./PaxHeaders.7502/nordugrid-arc-5.4.20000644000000000000000000000012613214316022015251 xustar000000000000000028 mtime=1513200658.6357308 30 atime=1513200668.715854084 28 ctime=1513200658.6357308 nordugrid-arc-5.4.2/0000755000175000002070000000000013214316022015234 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712252036737017243 xustar000000000000000027 mtime=1386757599.725144 30 atime=1513200577.791742048 30 ctime=1513200658.601730384 nordugrid-arc-5.4.2/Makefile.am0000644000175000002070000000113512252036737017305 0ustar00mockbuildmock00000000000000# /opt/local is the location for macports on MacOS X ACLOCAL_AMFLAGS = -I m4 `test -d /opt/local/share/aclocal && echo -I /opt/local/share/aclocal` if JAVA_SWIG_ENABLED JAVA_SD = java endif if SWIG_ENABLED SWIG_SD = swig endif SUBDIRS = src include $(SWIG_SD) $(JAVA_SD) python $(POSUB) debian nsis DIST_SUBDIRS = src include swig java python po debian nsis EXTRA_DIST = nordugrid-arc.spec mingw-nordugrid-arc.spec \ nordugrid-arc.SlackBuild autogen.sh LICENSE NOTICE \ selinux/nordugrid-arc-egiis.te selinux/nordugrid-arc-egiis.fc pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = arcbase.pc nordugrid-arc-5.4.2/PaxHeaders.7502/configure0000644000000000000000000000013013214315714017077 xustar000000000000000029 mtime=1513200588.75487613 30 atime=1513200606.608094482 29 ctime=1513200658.60873047 nordugrid-arc-5.4.2/configure0000755000175000002070000507515213214315714017171 0ustar00mockbuildmock00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.63 for nordugrid-arc 5.4.2. # # Report bugs to . # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, # 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # PATH needs CR # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi # Work around bugs in pre-3.0 UWIN ksh. for as_var in ENV MAIL MAILPATH do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # CDPATH. $as_unset CDPATH if test "x$CONFIG_SHELL" = x; then if (eval ":") 2>/dev/null; then as_have_required=yes else as_have_required=no fi if test $as_have_required = yes && (eval ": (as_func_return () { (exit \$1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = \"\$1\" ); then : else exitcode=1 echo positional parameters were not saved. fi test \$exitcode = 0) || { (exit 1); exit 1; } ( as_lineno_1=\$LINENO as_lineno_2=\$LINENO test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" && test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; } ") 2> /dev/null; then : else as_candidate_shells= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. case $as_dir in /*) for as_base in sh bash ksh sh5; do as_candidate_shells="$as_candidate_shells $as_dir/$as_base" done;; esac done IFS=$as_save_IFS for as_shell in $as_candidate_shells $SHELL; do # Try only shells that exist, to save several forks. if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { ("$as_shell") 2> /dev/null <<\_ASEOF if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi : _ASEOF }; then CONFIG_SHELL=$as_shell as_have_required=yes if { "$as_shell" 2> /dev/null <<\_ASEOF if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi : (as_func_return () { (exit $1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = "$1" ); then : else exitcode=1 echo positional parameters were not saved. fi test $exitcode = 0) || { (exit 1); exit 1; } ( as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; } _ASEOF }; then break fi fi done if test "x$CONFIG_SHELL" != x; then for as_var in BASH_ENV ENV do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done export CONFIG_SHELL exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} fi if test $as_have_required = no; then echo This script requires a shell more modern than all the echo shells that I found on your system. Please install a echo modern shell, or manually run the script under such a echo shell if you do have one. { (exit 1); exit 1; } fi fi fi (eval "as_func_return () { (exit \$1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = \"\$1\" ); then : else exitcode=1 echo positional parameters were not saved. fi test \$exitcode = 0") || { echo No shell found that supports shell functions. echo Please tell bug-autoconf@gnu.org about your system, echo including any error possibly output before this message. echo This can help us improve future autoconf versions. echo Configuration will now proceed without shell functions. } as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line after each line using $LINENO; the second 'sed' # does the real work. The second script uses 'N' to pair each # line-number line with the line containing $LINENO, and appends # trailing '-' during substitution so that $LINENO is not a special # case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # scripts with optimization help from Paolo Bonzini. Blame Lee # E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in -n*) case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" # Check that we are running under the correct shell. SHELL=${CONFIG_SHELL-/bin/sh} case X$lt_ECHO in X*--fallback-echo) # Remove one level of quotation (which was required for Make). ECHO=`echo "$lt_ECHO" | sed 's,\\\\\$\\$0,'$0','` ;; esac ECHO=${lt_ECHO-echo} if test "X$1" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test "X$1" = X--fallback-echo; then # Avoid inline document here, it may be left over : elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then # Yippee, $ECHO works! : else # Restart under the correct shell. exec $SHELL "$0" --no-reexec ${1+"$@"} fi if test "X$1" = X--fallback-echo; then # used as fallback echo shift cat <<_LT_EOF $* _LT_EOF exit 0 fi # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH if test -z "$lt_ECHO"; then if test "X${echo_test_string+set}" != Xset; then # find a string as large as possible, as long as the shell can cope with it for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... if { echo_test_string=`eval $cmd`; } 2>/dev/null && { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null then break fi done fi if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then : else # The Solaris, AIX, and Digital Unix default echo programs unquote # backslashes. This makes it impossible to quote backslashes using # echo "$something" | sed 's/\\/\\\\/g' # # So, first we look for a working echo in the user's PATH. lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for dir in $PATH /usr/ucb; do IFS="$lt_save_ifs" if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then ECHO="$dir/echo" break fi done IFS="$lt_save_ifs" if test "X$ECHO" = Xecho; then # We didn't find a better echo, so look for alternatives. if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then # This shell has a builtin print -r that does the trick. ECHO='print -r' elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } && test "X$CONFIG_SHELL" != X/bin/ksh; then # If we have ksh, try running configure again with it. ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} export ORIGINAL_CONFIG_SHELL CONFIG_SHELL=/bin/ksh export CONFIG_SHELL exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"} else # Try using printf. ECHO='printf %s\n' if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then # Cool, printf works : elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && test "X$echo_testing_string" = 'X\t' && echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL export CONFIG_SHELL SHELL="$CONFIG_SHELL" export SHELL ECHO="$CONFIG_SHELL $0 --fallback-echo" elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && test "X$echo_testing_string" = 'X\t' && echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then ECHO="$CONFIG_SHELL $0 --fallback-echo" else # maybe with a smaller string... prev=: for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null then break fi prev="$cmd" done if test "$prev" != 'sed 50q "$0"'; then echo_test_string=`eval $prev` export echo_test_string exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"} else # Oops. We lost completely, so just stick with echo. ECHO=echo fi fi fi fi fi fi # Copy echo and quote the copy suitably for passing to libtool from # the Makefile, instead of quoting the original, which is used later. lt_ECHO=$ECHO if test "X$lt_ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then lt_ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" fi exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. PACKAGE_NAME='nordugrid-arc' PACKAGE_TARNAME='nordugrid-arc' PACKAGE_VERSION='5.4.2' PACKAGE_STRING='nordugrid-arc 5.4.2' PACKAGE_BUGREPORT='http://bugzilla.nordugrid.org/' ac_unique_file="Makefile.am" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_header_list= ac_func_list= ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS SPECDATE DATER DATE posix_shell nodename gnu_time tmp_dir pbs_log_path pbs_bin_path arc_location HED_ENABLED_FALSE HED_ENABLED_TRUE EMIES_ENABLED_FALSE EMIES_ENABLED_TRUE UNICORE_ENABLED_FALSE UNICORE_ENABLED_TRUE WSRF_CLIENT_ENABLED_FALSE WSRF_CLIENT_ENABLED_TRUE SAML_CLIENT_ENABLED_FALSE SAML_CLIENT_ENABLED_TRUE JURA_CLIENT_ENABLED_FALSE JURA_CLIENT_ENABLED_TRUE DATA_CLIENT_ENABLED_FALSE DATA_CLIENT_ENABLED_TRUE ECHO_CLIENT_ENABLED_FALSE ECHO_CLIENT_ENABLED_TRUE CREDENTIALS_CLIENT_ENABLED_FALSE CREDENTIALS_CLIENT_ENABLED_TRUE COMPUTE_CLIENT_ENABLED_FALSE COMPUTE_CLIENT_ENABLED_TRUE ACIX_TESTS_ENABLED_FALSE ACIX_TESTS_ENABLED_TRUE TRIAL ACIX_ENABLED_FALSE ACIX_ENABLED_TRUE DATADELIVERY_SERVICE_ENABLED_FALSE DATADELIVERY_SERVICE_ENABLED_TRUE CACHE_WEBSERVICE_ENABLED_FALSE CACHE_WEBSERVICE_ENABLED_TRUE CACHE_SERVICE_ENABLED_FALSE CACHE_SERVICE_ENABLED_TRUE WS_MONITOR_ENABLED_FALSE WS_MONITOR_ENABLED_TRUE LDAP_MONITOR_ENABLED_FALSE LDAP_MONITOR_ENABLED_TRUE GIIS_SERVICE_ENABLED_FALSE GIIS_SERVICE_ENABLED_TRUE LDAP_SERVICE_ENABLED_FALSE LDAP_SERVICE_ENABLED_TRUE GRIDFTPD_SERVICE_ENABLED_FALSE GRIDFTPD_SERVICE_ENABLED_TRUE A_REX_SERVICE_ENABLED_FALSE A_REX_SERVICE_ENABLED_TRUE ALTPYDOXYGEN_FALSE ALTPYDOXYGEN_TRUE PYDOXYGEN_FALSE PYDOXYGEN_TRUE DOC_ENABLED_FALSE DOC_ENABLED_TRUE DOT DOXYGEN PDFLATEX WIN32_FALSE WIN32_TRUE SOCKET_LIBS REGEX_LIBS EXTRA_LIBS WINDRES LIBRESOLV LIBOBJS DLOPEN_LIBS UUID_LIBS LDAP_ENABLED_FALSE LDAP_ENABLED_TRUE LDAP_LIBS SRM_DMC_ENABLED_FALSE SRM_DMC_ENABLED_TRUE CPPUNIT_ENABLED_FALSE CPPUNIT_ENABLED_TRUE XMLSEC_ENABLED_FALSE XMLSEC_ENABLED_TRUE XROOTD_ENABLED_FALSE XROOTD_ENABLED_TRUE S3_DMC_ENABLED_FALSE S3_DMC_ENABLED_TRUE GFAL_ENABLED_FALSE GFAL_ENABLED_TRUE MOCK_DMC_ENABLED_FALSE MOCK_DMC_ENABLED_TRUE GRIDFTP_ENABLED_FALSE GRIDFTP_ENABLED_TRUE GLOBUSUTILS_ENABLED_FALSE GLOBUSUTILS_ENABLED_TRUE XROOTD_LIBS XROOTD_CPPFLAGS S3_LIBS S3_CPPFLAGS GFAL2_LIBS GFAL2_CFLAGS LCMAPS_LIBS LCMAPS_CFLAGS LCMAPS_LOCATION LCAS_LIBS LCAS_CFLAGS LCAS_LOCATION DEFAULT_GLOBUS_LOCATION GLOBUS_OPENSSL_LIBS GLOBUS_OPENSSL_CFLAGS GLOBUS_OPENSSL_MODULE_LIBS GLOBUS_OPENSSL_MODULE_CFLAGS GLOBUS_GSI_CREDENTIAL_LIBS GLOBUS_GSI_CREDENTIAL_CFLAGS GLOBUS_GSI_CERT_UTILS_LIBS GLOBUS_GSI_CERT_UTILS_CFLAGS GLOBUS_IO_LIBS GLOBUS_IO_CFLAGS GLOBUS_FTP_CONTROL_LIBS GLOBUS_FTP_CONTROL_CFLAGS GLOBUS_FTP_CLIENT_LIBS GLOBUS_FTP_CLIENT_CFLAGS GLOBUS_GSI_CALLBACK_LIBS GLOBUS_GSI_CALLBACK_CFLAGS GLOBUS_GSS_ASSIST_LIBS GLOBUS_GSS_ASSIST_CFLAGS GLOBUS_GSSAPI_GSI_LIBS GLOBUS_GSSAPI_GSI_CFLAGS GPT_QUERY GPT_FLAVOR_CONFIGURATION GLOBUS_MAKEFILE_HEADER GLOBUS_COMMON_LIBS GLOBUS_COMMON_CFLAGS DBJSTORE_ENABLED_FALSE DBJSTORE_ENABLED_TRUE DBCXX_LIBS DBCXX_CPPFLAGS ARGUS_ENABLED_FALSE ARGUS_ENABLED_TRUE ARGUS_LIBS ARGUS_CFLAGS ZLIB_LIBS ZLIB_CFLAGS ws_monitor_prefix ldap_monitor_prefix MYSQL_LIBRARY_ENABLED_FALSE MYSQL_LIBRARY_ENABLED_TRUE MYSQL_CFLAGS MYSQL_LIBS XMLSEC_OPENSSL_LIBS XMLSEC_OPENSSL_CFLAGS XMLSEC_LIBS XMLSEC_CFLAGS MACOSX_FALSE MACOSX_TRUE LDNS_ENABLED_FALSE LDNS_ENABLED_TRUE LDNS_CONFIG LDNS_LIBS LDNS_CFLAGS TEST_DIR CPPUNIT_CONFIG CPPUNIT_LIBS CPPUNIT_CFLAGS CANLXX_ENABLED_FALSE CANLXX_ENABLED_TRUE CANLXX_LIBS CANLXX_CFLAGS SQLITE_ENABLED_FALSE SQLITE_ENABLED_TRUE SQLITE_LIBS SQLITE_CFLAGS NSS_ENABLED_FALSE NSS_ENABLED_TRUE NSS_LIBS NSS_CFLAGS OPENSSL_1_1_LIBS OPENSSL_1_1_CFLAGS OPENSSL_LIBS OPENSSL_CFLAGS LIBXML2_LIBS LIBXML2_CFLAGS GLIBMM_LIBS GLIBMM_CFLAGS GTHREAD_LIBS GTHREAD_CFLAGS PYLINT_ENABLED_FALSE PYLINT_ENABLED_TRUE PYLINT_ARGS_ARGUMENTS_DIFFER PYLINT_ARGS PYLINT ALTPYTHON3_FALSE ALTPYTHON3_TRUE ALTPYTHON_ENABLED_FALSE ALTPYTHON_ENABLED_TRUE ALTPYTHON_SITE_LIB ALTPYTHON_SITE_ARCH ALTPYTHON_SOABI ALTPYTHON_VERSION ALTPYTHON_LIBS ALTPYTHON_CFLAGS ALTPYTHON PYTHON_SERVICE_FALSE PYTHON_SERVICE_TRUE PYTHON_SWIG_ENABLED_FALSE PYTHON_SWIG_ENABLED_TRUE PYTHON3_FALSE PYTHON3_TRUE PYTHON_ENABLED_FALSE PYTHON_ENABLED_TRUE PYTHON_SITE_LIB PYTHON_SITE_ARCH PYTHON_SOABI PYTHON_VERSION PYTHON_LIBS PYTHON_CFLAGS PYTHON JUNIT_ENABLED_FALSE JUNIT_ENABLED_TRUE jninativedir jnidir JDK_CFLAGS JAVAC_FLAGS JAVA_FLAGS JAR_JFLAGS JAVA_IS_15_OR_ABOVE_FALSE JAVA_IS_15_OR_ABOVE_TRUE JAVA_SWIG_ENABLED_FALSE JAVA_SWIG_ENABLED_TRUE JAVA_ENABLED_FALSE JAVA_ENABLED_TRUE JAR JAVAC JAVA SWIG_ENABLED_FALSE SWIG_ENABLED_TRUE SWIG_PYTHON_NAMING SWIG2 SWIG PEDANTIC_COMPILE_FALSE PEDANTIC_COMPILE_TRUE AM_CXXFLAGS pkgconfigdir PKG_CONFIG POSUB LTLIBINTL LIBINTL INTLLIBS LTLIBICONV LIBICONV MSGMERGE XGETTEXT GMSGFMT MSGFMT USE_NLS MKINSTALLDIRS cronddir initddir SYSV_SCRIPTS_ENABLED_FALSE SYSV_SCRIPTS_ENABLED_TRUE unitsdir SYSTEMD_UNITS_ENABLED_FALSE SYSTEMD_UNITS_ENABLED_TRUE pkgdatasubdir pkgdatadir_rel_to_pkglibexecdir bindir_rel_to_pkglibexecdir sbindir_rel_to_pkglibexecdir pkglibdir_rel_to_pkglibexecdir pkglibexecsubdir pkglibsubdir libsubdir ARCXMLSEC_CFLAGS ARCXMLSEC_LIBS ARCWSSECURITY_CFLAGS ARCWSSECURITY_LIBS ARCWS_CFLAGS ARCWS_LIBS ARCINFOSYS_CFLAGS ARCINFOSYS_LIBS ARCSECURITY_CFLAGS ARCSECURITY_LIBS ARCMESSAGE_CFLAGS ARCMESSAGE_LIBS ARCLOADER_CFLAGS ARCLOADER_LIBS ARCJOB_CFLAGS ARCJOB_LIBS ARCDATA_CFLAGS ARCDATA_LIBS ARCCREDENTIAL_CFLAGS ARCCREDENTIAL_LIBS ARCCOMMON_CFLAGS ARCCOMMON_LIBS ARCCLIENT_CFLAGS ARCCLIENT_LIBS pkglibexecdir pkglibdir pkgincludedir pkgdatadir PERL CXXCPP OTOOL64 OTOOL LIPO NMEDIT DSYMUTIL lt_ECHO RANLIB AR NM ac_ct_DUMPBIN DUMPBIN LD FGREP SED LIBTOOL OBJDUMP DLLTOOL AS host_os host_vendor host_cpu host build_os build_vendor build_cpu build LN_S EGREP GREP CPP am__fastdepCC_FALSE am__fastdepCC_TRUE CCDEPMODE ac_ct_CC CFLAGS CC am__fastdepCXX_FALSE am__fastdepCXX_TRUE CXXDEPMODE AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE am__quote am__include DEPDIR OBJEXT EXEEXT ac_ct_CXX CPPFLAGS LDFLAGS CXXFLAGS CXX debianversion fedorasetupopts fedorarelease preversion baseversion am__untar am__tar AMTAR am__leading_dot SET_MAKE AWK mkdir_p MKDIR_P INSTALL_STRIP_PROGRAM STRIP install_sh MAKEINFO AUTOHEADER AUTOMAKE AUTOCONF ACLOCAL VERSION PACKAGE CYGPATH_W am__isrc INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_dependency_tracking enable_static enable_shared with_pic enable_fast_install with_gnu_ld enable_libtool_lock with_docdir with_systemd_units_location with_sysv_scripts_location with_cron_scripts_prefix enable_nls enable_rpath with_libiconv_prefix with_libintl_prefix enable_largefile enable_all enable_all_clients enable_all_data_clients enable_all_services enable_pedantic_compile enable_swig_python enable_swig_java enable_swig enable_hed enable_java with_jdk with_jnidir with_jninativedir enable_junit enable_python with_python with_python_site_arch with_python_site_lib enable_altpython with_altpython with_altpython_site_arch with_altpython_site_lib enable_pylint enable_nss enable_sqlite enable_canlxx with_canlxx enable_cppunit enable_ldns enable_xmlsec1 with_xmlsec1 enable_mysql with_mysql enable_ldap_monitor with_ldap_monitor enable_ws_monitor with_ws_monitor with_zlib enable_argus with_argus with_dbcxx_include with_db4_library_path enable_dbjstore with_flavor with_lcas_location with_lcmaps_location enable_mock_dmc enable_gfal enable_s3 with_s3 enable_xrootd with_xrootd enable_ldap enable_doc enable_a_rex_service enable_gridftpd_service enable_ldap_service enable_giis_service enable_cache_service enable_datadelivery_service enable_acix enable_compute_client enable_credentials_client enable_echo_client enable_data_client enable_jura_client enable_saml_client enable_wsrf_client enable_unicore_client enable_emies_client ' ac_precious_vars='build_alias host_alias target_alias CXX CXXFLAGS LDFLAGS LIBS CPPFLAGS CCC CC CFLAGS CPP CXXCPP PKG_CONFIG PYTHON_CFLAGS PYTHON_LIBS ALTPYTHON_CFLAGS ALTPYTHON_LIBS GTHREAD_CFLAGS GTHREAD_LIBS GLIBMM_CFLAGS GLIBMM_LIBS LIBXML2_CFLAGS LIBXML2_LIBS OPENSSL_CFLAGS OPENSSL_LIBS OPENSSL_1_1_CFLAGS OPENSSL_1_1_LIBS NSS_CFLAGS NSS_LIBS SQLITE_CFLAGS SQLITE_LIBS CANLXX_CFLAGS CANLXX_LIBS CPPUNIT_CFLAGS CPPUNIT_LIBS LDNS_CFLAGS LDNS_LIBS XMLSEC_CFLAGS XMLSEC_LIBS XMLSEC_OPENSSL_CFLAGS XMLSEC_OPENSSL_LIBS ARGUS_CFLAGS ARGUS_LIBS GLOBUS_COMMON_CFLAGS GLOBUS_COMMON_LIBS GLOBUS_MAKEFILE_HEADER GPT_FLAVOR_CONFIGURATION GPT_QUERY GLOBUS_GSSAPI_GSI_CFLAGS GLOBUS_GSSAPI_GSI_LIBS GLOBUS_GSS_ASSIST_CFLAGS GLOBUS_GSS_ASSIST_LIBS GLOBUS_GSI_CALLBACK_CFLAGS GLOBUS_GSI_CALLBACK_LIBS GLOBUS_FTP_CLIENT_CFLAGS GLOBUS_FTP_CLIENT_LIBS GLOBUS_FTP_CONTROL_CFLAGS GLOBUS_FTP_CONTROL_LIBS GLOBUS_IO_CFLAGS GLOBUS_IO_LIBS GLOBUS_GSI_CERT_UTILS_CFLAGS GLOBUS_GSI_CERT_UTILS_LIBS GLOBUS_GSI_CREDENTIAL_CFLAGS GLOBUS_GSI_CREDENTIAL_LIBS GLOBUS_OPENSSL_MODULE_CFLAGS GLOBUS_OPENSSL_MODULE_LIBS GLOBUS_OPENSSL_CFLAGS GLOBUS_OPENSSL_LIBS GFAL2_CFLAGS GFAL2_LIBS' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) { $as_echo "$as_me: error: unrecognized option: $ac_option Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid variable name: $ac_envvar" >&2 { (exit 1); exit 1; }; } eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` { $as_echo "$as_me: error: missing argument to $ac_option" >&2 { (exit 1); exit 1; }; } fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) { $as_echo "$as_me: error: unrecognized options: $ac_unrecognized_opts" >&2 { (exit 1); exit 1; }; } ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac { $as_echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 { (exit 1); exit 1; }; } done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. If a cross compiler is detected then cross compile mode will be used." >&2 elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || { $as_echo "$as_me: error: working directory cannot be determined" >&2 { (exit 1); exit 1; }; } test "X$ac_ls_di" = "X$ac_pwd_ls_di" || { $as_echo "$as_me: error: pwd does not report name of working directory" >&2 { (exit 1); exit 1; }; } # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." { $as_echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 { (exit 1); exit 1; }; } fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || { $as_echo "$as_me: error: $ac_msg" >&2 { (exit 1); exit 1; }; } pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures nordugrid-arc 5.4.2 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/nordugrid-arc] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of nordugrid-arc 5.4.2:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --disable-dependency-tracking speeds up one-time build --enable-dependency-tracking do not reject slow dependency extractors --enable-static[=PKGS] build static libraries [default=no] --enable-shared[=PKGS] build shared libraries [default=yes] --enable-fast-install[=PKGS] optimize for fast installation [default=yes] --disable-libtool-lock avoid locking (might break parallel builds) --disable-nls do not use Native Language Support --disable-rpath do not hardcode runtime library paths --disable-largefile omit support for large files --disable-all disables all buildable components. Can be overwritten with --enable-* for group or specific component. It is also possible to use --enable-all to overwrite defaults for most of components. --disable-all-clients disables all buildable client components. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-clients to overwrite defaults and --enable-all. --disable-all-data-clients disables all buildable client components providing data handling abilities. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-data-clients to overwrite defaults, --enable-all and --enable-all-clients. --disable-all-services disables all buildable service componets. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-services to overwrite defaults and --enable-all. --enable-pedantic-compile add pedantic compiler flags --disable-swig-python disable SWIG python bindings --disable-swig-java disable SWIG java bindings --disable-swig disable all bindings through SWIG --disable-hed disable building HED libraries and plugins. Do not do that unless You do not want to build anything. Even in that case better use --disable-all. --disable-java disable Java components --disable-junit disable unit testing of Java bindings --disable-python disable Python components --disable-altpython enable alternative Python binding --disable-pylint disable python example checking using pylint --disable-nss disable use of the mozilla nss library --enable-sqlite enable use of the SQLite (not affected by --enable-all) --enable-canlxx enable use of EMI common authentication libraries --disable-cppunit disable cppunit-based UNIT testing of code --disable-ldns disable ldns library usage (makes ARCHERY client unavailable) --disable-xmlsec1 disable features which need xmlsec1 library --enable-mysql enable use of the MySQL client library --enable-ldap-monitor enable use of the ldap monitor --enable-ws-monitor enable use of the ws monitor --enable-argus enable use of Argus PEP V2 libraries --disable-dbjstore disable storing local jobs information in BDB --enable-mock-dmc enable mock DMC, default is disable --enable-gfal enable the GFAL support, default is disable --enable-s3 enable the S3 support, default is disable --disable-xrootd disable the xrootd support, default is enable --disable-ldap disable the LDAP support - requires OpenLDAP --disable-doc disable building documentation (requires doxygen and pdflatex) --disable-a-rex-service disable building A-Rex service --disable-gridftpd-service disable building Gridftpd service --disable-ldap-service disable building LDAP Infosystem Service --disable-giis-service disable building GIIS Service --disable-ldap-monitor disable building LDAP Monitor --disable-ws-monitor disable building WS Monitor --disable-cache-service disable building cache service --disable-datadelivery-service disable building DataDelivery service --disable-acix disable building ACIX service --disable-compute-client disable building compute (job management) client tools --disable-credentials-client disable building client tools for handling X.509 credentials --disable-echo-client disable building client tools for communicationg with Echo service --disable-data-client disable building generic client tools for handling data --disable-jura-client disable building client tool for communicating JURA --disable-saml-client disable building client tool for communicating SAML-based VOMS service --disable-wsrf-client disable building client tools for querying WSRF-enabled services. --enable-unicore-client enables building UNICORE-related plugins (not affected by --enable-all). --disable-emies-client disables building EMI ES-related client plugins. Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-pic try to use only PIC/non-PIC objects [default=use both] --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-docdir=DIR Install documentation in DIR [default:], [${datadir}/doc] --with-systemd-units-location= Location of the systemd unit files. [None] --with-sysv-scripts-location= Location of the SYSV init scripts. [autodetect] --with-cron-scripts-prefix= Specify the location of the cron directory. [SYSCONFDIR/cron.d] --with-gnu-ld assume the C compiler uses GNU ld default=no --with-libiconv-prefix[=DIR] search for libiconv in DIR/include and DIR/lib --without-libiconv-prefix don't search for libiconv in includedir and libdir --with-libintl-prefix[=DIR] search for libintl in DIR/include and DIR/lib --without-libintl-prefix don't search for libintl in includedir and libdir --with-jdk=(JDK) path to JDK. If unset the system JDK will be used --with-jnidir=DIR Install jar in DIR [default:], [${libdir}/java] --with-jninativedir=DIR Install jar in DIR [default:], [${pkglibdir}] --with-python=(PYTHON) specify python program from PATH --with-python-site-arch=directory Direcory where Python modules will be installed - defaults is to query the Python binary --with-python-site-lib=directory Direcory where Python modules will be installed - defaults is to query the Python binary --with-altpython=(PYTHON) specify alternative python program from PATH --with-altpython-site-arch=directory Direcory where Python modules will be installed - defaults is to query the Python binary --with-altpython-site-lib=directory Direcory where Python modules will be installed - defaults is to query the Python binary --with-canlxx=PATH CANL++ installation path --with-xmlsec1=(PATH) xmlsec1 location --with-mysql=(PATH) prefix of MySQL installation. e.g. /usr/local or /usr --with-ldap-monitor=(PATH) where to install the monitor, eg /var/www/ldap-monitor or /usr/share/arc/ldap-monitor --with-ws-monitor=(PATH) where to install the monitor, eg /var/www/ws-monitor or /usr/share/arc/ws-monitor --with-zlib=PATH where zlib is installed --with-argus=PATH ARGUS PEP installation path --with-dbcxx-include=PATH Specify path to db_cxx.h --with-db4-library-path=PATH Specify path to DB4 library --with-flavor=(flavor) Specify the gpt build flavor [autodetect] --with-lcas-location= Specify the LCAS installation path. [/opt/glite] --with-lcmaps-location= Specify the LCMAPS installation path. [/opt/glite] --with-s3=(PATH) libs3 location --with-xrootd=(PATH) Xrootd location Some influential environment variables: CXX C++ compiler command CXXFLAGS C++ compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I if you have headers in a nonstandard directory CC C compiler command CFLAGS C compiler flags CPP C preprocessor CXXCPP C++ preprocessor PKG_CONFIG path to pkg-config utility PYTHON_CFLAGS C compiler flags for PYTHON, overriding pkg-config PYTHON_LIBS linker flags for PYTHON, overriding pkg-config ALTPYTHON_CFLAGS C compiler flags for ALTPYTHON, overriding pkg-config ALTPYTHON_LIBS linker flags for ALTPYTHON, overriding pkg-config GTHREAD_CFLAGS C compiler flags for GTHREAD, overriding pkg-config GTHREAD_LIBS linker flags for GTHREAD, overriding pkg-config GLIBMM_CFLAGS C compiler flags for GLIBMM, overriding pkg-config GLIBMM_LIBS linker flags for GLIBMM, overriding pkg-config LIBXML2_CFLAGS C compiler flags for LIBXML2, overriding pkg-config LIBXML2_LIBS linker flags for LIBXML2, overriding pkg-config OPENSSL_CFLAGS C compiler flags for OPENSSL, overriding pkg-config OPENSSL_LIBS linker flags for OPENSSL, overriding pkg-config OPENSSL_1_1_CFLAGS C compiler flags for OPENSSL_1_1, overriding pkg-config OPENSSL_1_1_LIBS linker flags for OPENSSL_1_1, overriding pkg-config NSS_CFLAGS C compiler flags for NSS, overriding pkg-config NSS_LIBS linker flags for NSS, overriding pkg-config SQLITE_CFLAGS C compiler flags for SQLITE, overriding pkg-config SQLITE_LIBS linker flags for SQLITE, overriding pkg-config CANLXX_CFLAGS C compiler flags for CANLXX, overriding pkg-config CANLXX_LIBS linker flags for CANLXX, overriding pkg-config CPPUNIT_CFLAGS C compiler flags for CPPUNIT, overriding pkg-config CPPUNIT_LIBS linker flags for CPPUNIT, overriding pkg-config LDNS_CFLAGS C compiler flags for LDNS, overriding pkg-config LDNS_LIBS linker flags for LDNS, overriding pkg-config XMLSEC_CFLAGS C compiler flags for XMLSEC, overriding pkg-config XMLSEC_LIBS linker flags for XMLSEC, overriding pkg-config XMLSEC_OPENSSL_CFLAGS C compiler flags for XMLSEC_OPENSSL, overriding pkg-config XMLSEC_OPENSSL_LIBS linker flags for XMLSEC_OPENSSL, overriding pkg-config ARGUS_CFLAGS C compiler flags for ARGUS, overriding pkg-config ARGUS_LIBS linker flags for ARGUS, overriding pkg-config GLOBUS_COMMON_CFLAGS C compiler flags for GLOBUS_COMMON, overriding pkg-config GLOBUS_COMMON_LIBS linker flags for GLOBUS_COMMON, overriding pkg-config GLOBUS_MAKEFILE_HEADER path to globus-makefile-header GPT_FLAVOR_CONFIGURATION path to gpt-flavor-configuration GPT_QUERY path to gpt-query GLOBUS_GSSAPI_GSI_CFLAGS C compiler flags for GLOBUS_GSSAPI_GSI, overriding pkg-config GLOBUS_GSSAPI_GSI_LIBS linker flags for GLOBUS_GSSAPI_GSI, overriding pkg-config GLOBUS_GSS_ASSIST_CFLAGS C compiler flags for GLOBUS_GSS_ASSIST, overriding pkg-config GLOBUS_GSS_ASSIST_LIBS linker flags for GLOBUS_GSS_ASSIST, overriding pkg-config GLOBUS_GSI_CALLBACK_CFLAGS C compiler flags for GLOBUS_GSI_CALLBACK, overriding pkg-config GLOBUS_GSI_CALLBACK_LIBS linker flags for GLOBUS_GSI_CALLBACK, overriding pkg-config GLOBUS_FTP_CLIENT_CFLAGS C compiler flags for GLOBUS_FTP_CLIENT, overriding pkg-config GLOBUS_FTP_CLIENT_LIBS linker flags for GLOBUS_FTP_CLIENT, overriding pkg-config GLOBUS_FTP_CONTROL_CFLAGS C compiler flags for GLOBUS_FTP_CONTROL, overriding pkg-config GLOBUS_FTP_CONTROL_LIBS linker flags for GLOBUS_FTP_CONTROL, overriding pkg-config GLOBUS_IO_CFLAGS C compiler flags for GLOBUS_IO, overriding pkg-config GLOBUS_IO_LIBS linker flags for GLOBUS_IO, overriding pkg-config GLOBUS_GSI_CERT_UTILS_CFLAGS C compiler flags for GLOBUS_GSI_CERT_UTILS, overriding pkg-config GLOBUS_GSI_CERT_UTILS_LIBS linker flags for GLOBUS_GSI_CERT_UTILS, overriding pkg-config GLOBUS_GSI_CREDENTIAL_CFLAGS C compiler flags for GLOBUS_GSI_CREDENTIAL, overriding pkg-config GLOBUS_GSI_CREDENTIAL_LIBS linker flags for GLOBUS_GSI_CREDENTIAL, overriding pkg-config GLOBUS_OPENSSL_MODULE_CFLAGS C compiler flags for GLOBUS_OPENSSL_MODULE, overriding pkg-config GLOBUS_OPENSSL_MODULE_LIBS linker flags for GLOBUS_OPENSSL_MODULE, overriding pkg-config GLOBUS_OPENSSL_CFLAGS C compiler flags for GLOBUS_OPENSSL, overriding pkg-config GLOBUS_OPENSSL_LIBS linker flags for GLOBUS_OPENSSL, overriding pkg-config GFAL2_CFLAGS C compiler flags for GFAL2, overriding pkg-config GFAL2_LIBS linker flags for GFAL2, overriding pkg-config Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF nordugrid-arc configure 5.4.2 generated by GNU Autoconf 2.63 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by nordugrid-arc $as_me 5.4.2, which was generated by GNU Autoconf 2.63. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; 2) ac_configure_args1="$ac_configure_args1 '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac done done $as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } $as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo cat <<\_ASBOX ## ---------------- ## ## Cache variables. ## ## ---------------- ## _ASBOX echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) $as_unset $ac_var ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo cat <<\_ASBOX ## ----------------- ## ## Output variables. ## ## ----------------- ## _ASBOX echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then cat <<\_ASBOX ## ------------------- ## ## File substitutions. ## ## ------------------- ## _ASBOX echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then cat <<\_ASBOX ## ----------- ## ## confdefs.h. ## ## ----------- ## _ASBOX echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then ac_site_file1=$CONFIG_SITE elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test -r "$ac_site_file"; then { $as_echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special # files actually), so we avoid doing that. if test -f "$cache_file"; then { $as_echo "$as_me:$LINENO: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:$LINENO: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi ac_header_list="$ac_header_list sys/time.h" ac_header_list="$ac_header_list unistd.h" ac_func_list="$ac_func_list alarm" # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:$LINENO: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:$LINENO: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:$LINENO: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} { { $as_echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 $as_echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} { (exit 1); exit 1; }; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu am__api_version='1.11' ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then { { $as_echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&5 $as_echo "$as_me: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&2;} { (exit 1); exit 1; }; } fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if test "${ac_cv_path_install+set}" = set; then $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in ./ | .// | /cC/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:$LINENO: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { $as_echo "$as_me:$LINENO: checking whether build environment is sane" >&5 $as_echo_n "checking whether build environment is sane... " >&6; } # Just in case sleep 1 echo timestamp > conftest.file # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) { { $as_echo "$as_me:$LINENO: error: unsafe absolute working directory name" >&5 $as_echo "$as_me: error: unsafe absolute working directory name" >&2;} { (exit 1); exit 1; }; };; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) { { $as_echo "$as_me:$LINENO: error: unsafe srcdir value: \`$srcdir'" >&5 $as_echo "$as_me: error: unsafe srcdir value: \`$srcdir'" >&2;} { (exit 1); exit 1; }; };; esac # Do `set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi rm -f conftest.file if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". { { $as_echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken alias in your environment" >&5 $as_echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken alias in your environment" >&2;} { (exit 1); exit 1; }; } fi test "$2" = conftest.file ) then # Ok. : else { { $as_echo "$as_me:$LINENO: error: newly created file is older than distributed files! Check your system clock" >&5 $as_echo "$as_me: error: newly created file is older than distributed files! Check your system clock" >&2;} { (exit 1); exit 1; }; } fi { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --run true"; then am_missing_run="$MISSING --run " else am_missing_run= { $as_echo "$as_me:$LINENO: WARNING: \`missing' script is too old or missing" >&5 $as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} fi if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi # Installed binaries are usually stripped using `strip' when the user # run `make install-strip'. However `strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the `STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_STRIP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:$LINENO: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" { $as_echo "$as_me:$LINENO: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if test "${ac_cv_path_mkdir+set}" = set; then $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. test -d ./--version && rmdir ./--version MKDIR_P="$ac_install_sh -d" fi fi { $as_echo "$as_me:$LINENO: result: $MKDIR_P" >&5 $as_echo "$MKDIR_P" >&6; } mkdir_p="$MKDIR_P" case $mkdir_p in [\\/$]* | ?:[\\/]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_AWK+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:$LINENO: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then { { $as_echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5 $as_echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;} { (exit 1); exit 1; }; } fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='nordugrid-arc' VERSION='5.4.2' cat >>confdefs.h <<_ACEOF #define PACKAGE "$PACKAGE" _ACEOF cat >>confdefs.h <<_ACEOF #define VERSION "$VERSION" _ACEOF # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # We need awk for the "check" target. The system "awk" is bad on # some platforms. # Always define AMTAR for backward compatibility. AMTAR=${AMTAR-"${am_missing_run}tar"} { $as_echo "$as_me:$LINENO: checking how to create a pax tar archive" >&5 $as_echo_n "checking how to create a pax tar archive... " >&6; } # Loop over all known methods to create a tar archive until one works. _am_tools='gnutar pax cpio none' _am_tools=${am_cv_prog_tar_pax-$_am_tools} # Do not fold the above two line into one, because Tru64 sh and # Solaris sh will not grok spaces in the rhs of `-'. for _am_tool in $_am_tools do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do { echo "$as_me:$LINENO: $_am_tar --version" >&5 ($_am_tar --version) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && break done am__tar="$_am_tar --format=posix -chf - "'"$$tardir"' am__tar_="$_am_tar --format=posix -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x pax -w "$$tardir"' am__tar_='pax -L -x pax -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H pax -L' am__tar_='find "$tardir" -print | cpio -o -H pax -L' am__untar='cpio -i -H pax -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_pax}" && break # tar/untar a dummy directory, and stop if the command works rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file { echo "$as_me:$LINENO: tardir=conftest.dir && eval $am__tar_ >conftest.tar" >&5 (tardir=conftest.dir && eval $am__tar_ >conftest.tar) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } rm -rf conftest.dir if test -s conftest.tar; then { echo "$as_me:$LINENO: $am__untar &5 ($am__untar &5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } grep GrepMe conftest.dir/file >/dev/null 2>&1 && break fi done rm -rf conftest.dir if test "${am_cv_prog_tar_pax+set}" = set; then $as_echo_n "(cached) " >&6 else am_cv_prog_tar_pax=$_am_tool fi { $as_echo "$as_me:$LINENO: result: $am_cv_prog_tar_pax" >&5 $as_echo "$am_cv_prog_tar_pax" >&6; } ac_config_headers="$ac_config_headers config.h" baseversion=`echo $VERSION | sed 's/[^0-9.].*//'` preversion=`echo $VERSION | sed 's/^[0-9.]*//'` if test "x$baseversion" = "x" ; then baseversion=$VERSION preversion="" fi if test "x$preversion" = "x" ; then fedorarelease="1" fedorasetupopts="-q" debianversion="$baseversion" else fedorarelease="0.$preversion" fedorasetupopts="-q -n %{name}-%{version}$preversion" debianversion="$baseversion~$preversion" fi # This macro was introduced in autoconf 2.57g? but we currently only require 2.56 ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CXX+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:$LINENO: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:$LINENO: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 { (ac_try="$ac_compiler --version >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler --version >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -v >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -v >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -V >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -V >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:$LINENO: checking for C++ compiler default output file name" >&5 $as_echo_n "checking for C++ compiler default output file name... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { (ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi { $as_echo "$as_me:$LINENO: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } if test -z "$ac_file"; then $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: C++ compiler cannot create executables See \`config.log' for more details." >&5 $as_echo "$as_me: error: C++ compiler cannot create executables See \`config.log' for more details." >&2;} { (exit 77); exit 77; }; }; } fi ac_exeext=$ac_cv_exeext # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:$LINENO: checking whether the C++ compiler works" >&5 $as_echo_n "checking whether the C++ compiler works... " >&6; } # FIXME: These cross compiler hacks should be removed for Autoconf 3.0 # If not cross compiling, check that we can run a simple program. if test "$cross_compiling" != yes; then if { ac_try='./$ac_file' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: cannot run C++ compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." >&5 $as_echo "$as_me: error: cannot run C++ compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi fi fi { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } { $as_echo "$as_me:$LINENO: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } { $as_echo "$as_me:$LINENO: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." >&5 $as_echo "$as_me: error: cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi rm -f conftest$ac_cv_exeext { $as_echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT { $as_echo "$as_me:$LINENO: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if test "${ac_cv_objext+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile See \`config.log' for more details." >&5 $as_echo "$as_me: error: cannot compute suffix of object files: cannot compile See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if test "${ac_cv_cxx_compiler_gnu+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_compiler_gnu=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if test "${ac_cv_prog_cxx_g+set}" = set; then $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cxx_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 CXXFLAGS="" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cxx_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. { $as_echo "$as_me:$LINENO: checking for style of include used by $am_make" >&5 $as_echo_n "checking for style of include used by $am_make... " >&6; } am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from `make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi { $as_echo "$as_me:$LINENO: result: $_am_result" >&5 $as_echo "$_am_result" >&6; } rm -f confinc confmf # Check whether --enable-dependency-tracking was given. if test "${enable_dependency_tracking+set}" = set; then enableval=$enable_dependency_tracking; fi if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= AMDEP_FALSE='#' else AMDEP_TRUE='#' AMDEP_FALSE= fi depcc="$CXX" am_compiler_list= { $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if test "${am_cv_CXX_dependencies_compiler_type+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CXX_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CXX_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CXX_dependencies_compiler_type=none fi fi { $as_echo "$as_me:$LINENO: result: $am_cv_CXX_dependencies_compiler_type" >&5 $as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then am__fastdepCXX_TRUE= am__fastdepCXX_FALSE='#' else am__fastdepCXX_TRUE='#' am__fastdepCXX_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&5 $as_echo "$as_me: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } # Provide some information about the compiler. $as_echo "$as_me:$LINENO: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 { (ac_try="$ac_compiler --version >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler --version >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -v >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -v >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -V >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -V >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { $as_echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if test "${ac_cv_c_compiler_gnu+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_compiler_gnu=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if test "${ac_cv_prog_cc_g+set}" = set; then $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 CFLAGS="" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if test "${ac_cv_prog_cc_c89+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_c89=$ac_arg else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:$LINENO: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:$LINENO: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:$LINENO: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi case $ac_cv_prog_cc_stdc in no) ac_cv_prog_cc_c99=no; ac_cv_prog_cc_c89=no ;; *) { $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C99" >&5 $as_echo_n "checking for $CC option to accept ISO C99... " >&6; } if test "${ac_cv_prog_cc_c99+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c99=no ac_save_CC=$CC cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include #include // Check varargs macros. These examples are taken from C99 6.10.3.5. #define debug(...) fprintf (stderr, __VA_ARGS__) #define showlist(...) puts (#__VA_ARGS__) #define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) static void test_varargs_macros (void) { int x = 1234; int y = 5678; debug ("Flag"); debug ("X = %d\n", x); showlist (The first, second, and third items.); report (x>y, "x is %d but y is %d", x, y); } // Check long long types. #define BIG64 18446744073709551615ull #define BIG32 4294967295ul #define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) #if !BIG_OK your preprocessor is broken; #endif #if BIG_OK #else your preprocessor is broken; #endif static long long int bignum = -9223372036854775807LL; static unsigned long long int ubignum = BIG64; struct incomplete_array { int datasize; double data[]; }; struct named_init { int number; const wchar_t *name; double average; }; typedef const char *ccp; static inline int test_restrict (ccp restrict text) { // See if C++-style comments work. // Iterate through items via the restricted pointer. // Also check for declarations in for loops. for (unsigned int i = 0; *(text+i) != '\0'; ++i) continue; return 0; } // Check varargs and va_copy. static void test_varargs (const char *format, ...) { va_list args; va_start (args, format); va_list args_copy; va_copy (args_copy, args); const char *str; int number; float fnumber; while (*format) { switch (*format++) { case 's': // string str = va_arg (args_copy, const char *); break; case 'd': // int number = va_arg (args_copy, int); break; case 'f': // float fnumber = va_arg (args_copy, double); break; default: break; } } va_end (args_copy); va_end (args); } int main () { // Check bool. _Bool success = false; // Check restrict. if (test_restrict ("String literal") == 0) success = true; char *restrict newvar = "Another string"; // Check varargs. test_varargs ("s, d' f .", "string", 65, 34.234); test_varargs_macros (); // Check flexible array members. struct incomplete_array *ia = malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); ia->datasize = 10; for (int i = 0; i < ia->datasize; ++i) ia->data[i] = i * 1.234; // Check named initializers. struct named_init ni = { .number = 34, .name = L"Test wide string", .average = 543.34343, }; ni.number = 58; int dynamic_array[ni.number]; dynamic_array[ni.number - 1] = 543; // work around unused variable warnings return (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == 'x' || dynamic_array[ni.number - 1] != 543); ; return 0; } _ACEOF for ac_arg in '' -std=gnu99 -std=c99 -c99 -AC99 -xc99=all -qlanglvl=extc99 do CC="$ac_save_CC $ac_arg" rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_c99=$ac_arg else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c99" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c99" in x) { $as_echo "$as_me:$LINENO: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:$LINENO: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c99" { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c99" >&5 $as_echo "$ac_cv_prog_cc_c99" >&6; } ;; esac if test "x$ac_cv_prog_cc_c99" != xno; then ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 else { $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if test "${ac_cv_prog_cc_c89+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_c89=$ac_arg else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:$LINENO: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:$LINENO: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 else ac_cv_prog_cc_stdc=no fi fi ;; esac { $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO Standard C" >&5 $as_echo_n "checking for $CC option to accept ISO Standard C... " >&6; } if test "${ac_cv_prog_cc_stdc+set}" = set; then $as_echo_n "(cached) " >&6 fi case $ac_cv_prog_cc_stdc in no) { $as_echo "$as_me:$LINENO: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; '') { $as_echo "$as_me:$LINENO: result: none needed" >&5 $as_echo "none needed" >&6; } ;; *) { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_stdc" >&5 $as_echo "$ac_cv_prog_cc_stdc" >&6; } ;; esac ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if test "${ac_cv_prog_CPP+set}" = set; then $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then # Broken: success on invalid input. continue else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:$LINENO: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then # Broken: success on invalid input. continue else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details." >&5 $as_echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if test "${ac_cv_path_GREP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then { { $as_echo "$as_me:$LINENO: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 $as_echo "$as_me: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} { (exit 1); exit 1; }; } fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:$LINENO: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if test "${ac_cv_path_EGREP+set}" = set; then $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then { { $as_echo "$as_me:$LINENO: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 $as_echo "$as_me: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} { (exit 1); exit 1; }; } fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:$LINENO: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if test "${ac_cv_header_stdc+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_header_stdc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then : else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_header_stdc=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then cat >>confdefs.h <<\_ACEOF #define STDC_HEADERS 1 _ACEOF fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then eval "$as_ac_Header=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done if test "${ac_cv_header_minix_config_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for minix/config.h" >&5 $as_echo_n "checking for minix/config.h... " >&6; } if test "${ac_cv_header_minix_config_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_minix_config_h" >&5 $as_echo "$ac_cv_header_minix_config_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking minix/config.h usability" >&5 $as_echo_n "checking minix/config.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking minix/config.h presence" >&5 $as_echo_n "checking minix/config.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: minix/config.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: minix/config.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: minix/config.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: minix/config.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: minix/config.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: minix/config.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: minix/config.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: minix/config.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for minix/config.h" >&5 $as_echo_n "checking for minix/config.h... " >&6; } if test "${ac_cv_header_minix_config_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_minix_config_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_minix_config_h" >&5 $as_echo "$ac_cv_header_minix_config_h" >&6; } fi if test "x$ac_cv_header_minix_config_h" = x""yes; then MINIX=yes else MINIX= fi if test "$MINIX" = yes; then cat >>confdefs.h <<\_ACEOF #define _POSIX_SOURCE 1 _ACEOF cat >>confdefs.h <<\_ACEOF #define _POSIX_1_SOURCE 2 _ACEOF cat >>confdefs.h <<\_ACEOF #define _MINIX 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking whether it is safe to define __EXTENSIONS__" >&5 $as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; } if test "${ac_cv_safe_to_define___extensions__+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ # define __EXTENSIONS__ 1 $ac_includes_default int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_safe_to_define___extensions__=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_safe_to_define___extensions__=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_safe_to_define___extensions__" >&5 $as_echo "$ac_cv_safe_to_define___extensions__" >&6; } test $ac_cv_safe_to_define___extensions__ = yes && cat >>confdefs.h <<\_ACEOF #define __EXTENSIONS__ 1 _ACEOF cat >>confdefs.h <<\_ACEOF #define _ALL_SOURCE 1 _ACEOF cat >>confdefs.h <<\_ACEOF #define _GNU_SOURCE 1 _ACEOF cat >>confdefs.h <<\_ACEOF #define _POSIX_PTHREAD_SEMANTICS 1 _ACEOF cat >>confdefs.h <<\_ACEOF #define _TANDEM_SOURCE 1 _ACEOF for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_AWK+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:$LINENO: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if test "${ac_cv_path_install+set}" = set; then $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in ./ | .// | /cC/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:$LINENO: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { $as_echo "$as_me:$LINENO: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:$LINENO: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi { $as_echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi # Check whether --enable-static was given. if test "${enable_static+set}" = set; then enableval=$enable_static; p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS="$lt_save_ifs" ;; esac else enable_static=no fi # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || { { $as_echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5 $as_echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;} { (exit 1); exit 1; }; } { $as_echo "$as_me:$LINENO: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if test "${ac_cv_build+set}" = set; then $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && { { $as_echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 $as_echo "$as_me: error: cannot guess build type; you must specify one" >&2;} { (exit 1); exit 1; }; } ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5 $as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;} { (exit 1); exit 1; }; } fi { $as_echo "$as_me:$LINENO: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical build" >&5 $as_echo "$as_me: error: invalid value of canonical build" >&2;} { (exit 1); exit 1; }; };; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:$LINENO: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if test "${ac_cv_host+set}" = set; then $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 $as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} { (exit 1); exit 1; }; } fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 $as_echo "$as_me: error: invalid value of canonical host" >&2;} { (exit 1); exit 1; }; };; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac enable_win32_dll=yes case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}as", so it can be a program name with args. set dummy ${ac_tool_prefix}as; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_AS+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$AS"; then ac_cv_prog_AS="$AS" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AS="${ac_tool_prefix}as" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AS=$ac_cv_prog_AS if test -n "$AS"; then { $as_echo "$as_me:$LINENO: result: $AS" >&5 $as_echo "$AS" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_AS"; then ac_ct_AS=$AS # Extract the first word of "as", so it can be a program name with args. set dummy as; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_AS+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AS"; then ac_cv_prog_ac_ct_AS="$ac_ct_AS" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_AS="as" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AS=$ac_cv_prog_ac_ct_AS if test -n "$ac_ct_AS"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_AS" >&5 $as_echo "$ac_ct_AS" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_AS" = x; then AS="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AS=$ac_ct_AS fi else AS="$ac_cv_prog_AS" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. set dummy ${ac_tool_prefix}dlltool; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DLLTOOL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$DLLTOOL"; then ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DLLTOOL=$ac_cv_prog_DLLTOOL if test -n "$DLLTOOL"; then { $as_echo "$as_me:$LINENO: result: $DLLTOOL" >&5 $as_echo "$DLLTOOL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DLLTOOL"; then ac_ct_DLLTOOL=$DLLTOOL # Extract the first word of "dlltool", so it can be a program name with args. set dummy dlltool; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_DLLTOOL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DLLTOOL"; then ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_DLLTOOL="dlltool" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL if test -n "$ac_ct_DLLTOOL"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_DLLTOOL" >&5 $as_echo "$ac_ct_DLLTOOL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DLLTOOL" = x; then DLLTOOL="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DLLTOOL=$ac_ct_DLLTOOL fi else DLLTOOL="$ac_cv_prog_DLLTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OBJDUMP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:$LINENO: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi ;; esac test -z "$AS" && AS=as test -z "$DLLTOOL" && DLLTOOL=dlltool test -z "$OBJDUMP" && OBJDUMP=objdump case `pwd` in *\ * | *\ *) { $as_echo "$as_me:$LINENO: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 $as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; esac macro_version='2.2.6b' macro_revision='1.3017' ltmain="$ac_aux_dir/ltmain.sh" { $as_echo "$as_me:$LINENO: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if test "${ac_cv_path_SED+set}" = set; then $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed $as_unset ac_script || ac_script= if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then { { $as_echo "$as_me:$LINENO: error: no acceptable sed could be found in \$PATH" >&5 $as_echo "$as_me: error: no acceptable sed could be found in \$PATH" >&2;} { (exit 1); exit 1; }; } fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" { $as_echo "$as_me:$LINENO: checking for fgrep" >&5 $as_echo_n "checking for fgrep... " >&6; } if test "${ac_cv_path_FGREP+set}" = set; then $as_echo_n "(cached) " >&6 else if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 then ac_cv_path_FGREP="$GREP -F" else if test -z "$FGREP"; then ac_path_FGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in fgrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue # Check for GNU ac_path_FGREP and select it if it is found. # Check for GNU $ac_path_FGREP case `"$ac_path_FGREP" --version 2>&1` in *GNU*) ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'FGREP' >> "conftest.nl" "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_FGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_FGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_FGREP"; then { { $as_echo "$as_me:$LINENO: error: no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 $as_echo "$as_me: error: no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} { (exit 1); exit 1; }; } fi else ac_cv_path_FGREP=$FGREP fi fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_path_FGREP" >&5 $as_echo "$ac_cv_path_FGREP" >&6; } FGREP="$ac_cv_path_FGREP" test -z "$GREP" && GREP=grep # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:$LINENO: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:$LINENO: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:$LINENO: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if test "${lt_cv_path_LD+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && { { $as_echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 $as_echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} { (exit 1); exit 1; }; } { $as_echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if test "${lt_cv_prog_gnu_ld+set}" = set; then $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld { $as_echo "$as_me:$LINENO: checking for BSD- or MS-compatible name lister (nm)" >&5 $as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if test "${lt_cv_path_NM+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. tmp_nm="$ac_dir/$lt_tmp_nm" if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then # Check to see if the nm accepts a BSD-compat flag. # Adding the `sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in */dev/null* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi fi { $as_echo "$as_me:$LINENO: result: $lt_cv_path_NM" >&5 $as_echo "$lt_cv_path_NM" >&6; } if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$ac_tool_prefix"; then for ac_prog in "dumpbin -symbols" "link -dump -symbols" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DUMPBIN+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$DUMPBIN"; then ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DUMPBIN=$ac_cv_prog_DUMPBIN if test -n "$DUMPBIN"; then { $as_echo "$as_me:$LINENO: result: $DUMPBIN" >&5 $as_echo "$DUMPBIN" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DUMPBIN" && break done fi if test -z "$DUMPBIN"; then ac_ct_DUMPBIN=$DUMPBIN for ac_prog in "dumpbin -symbols" "link -dump -symbols" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_DUMPBIN+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DUMPBIN"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN if test -n "$ac_ct_DUMPBIN"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_DUMPBIN" >&5 $as_echo "$ac_ct_DUMPBIN" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_DUMPBIN" && break done if test "x$ac_ct_DUMPBIN" = x; then DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DUMPBIN=$ac_ct_DUMPBIN fi fi if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm { $as_echo "$as_me:$LINENO: checking the name lister ($NM) interface" >&5 $as_echo_n "checking the name lister ($NM) interface... " >&6; } if test "${lt_cv_nm_interface+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:7257: $ac_compile\"" >&5) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&5 (eval echo "\"\$as_me:7260: $NM \\\"conftest.$ac_objext\\\"\"" >&5) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&5 (eval echo "\"\$as_me:7263: output\"" >&5) cat conftest.out >&5 if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_nm_interface" >&5 $as_echo "$lt_cv_nm_interface" >&6; } # find the maximum length of command line arguments { $as_echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5 $as_echo_n "checking the maximum length of command line arguments... " >&6; } if test "${lt_cv_sys_max_cmd_len+set}" = set; then $as_echo_n "(cached) " >&6 else i=0 teststring="ABCD" case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8 ; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test "X"`$SHELL $0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \ = "XX$teststring$teststring"; } >/dev/null 2>&1 && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac fi if test -n $lt_cv_sys_max_cmd_len ; then { $as_echo "$as_me:$LINENO: result: $lt_cv_sys_max_cmd_len" >&5 $as_echo "$lt_cv_sys_max_cmd_len" >&6; } else { $as_echo "$as_me:$LINENO: result: none" >&5 $as_echo "none" >&6; } fi max_cmd_len=$lt_cv_sys_max_cmd_len : ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} { $as_echo "$as_me:$LINENO: checking whether the shell understands some XSI constructs" >&5 $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes { $as_echo "$as_me:$LINENO: result: $xsi_shell" >&5 $as_echo "$xsi_shell" >&6; } { $as_echo "$as_me:$LINENO: checking whether the shell understands \"+=\"" >&5 $as_echo_n "checking whether the shell understands \"+=\"... " >&6; } lt_shell_append=no ( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes { $as_echo "$as_me:$LINENO: result: $lt_shell_append" >&5 $as_echo "$lt_shell_append" >&6; } if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac { $as_echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5 $as_echo_n "checking for $LD option to reload object files... " >&6; } if test "${lt_cv_ld_reload_flag+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_ld_reload_flag='-r' fi { $as_echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5 $as_echo "$lt_cv_ld_reload_flag" >&6; } reload_flag=$lt_cv_ld_reload_flag case $reload_flag in "" | " "*) ;; *) reload_flag=" $reload_flag" ;; esac reload_cmds='$LD$reload_flag -o $output$reload_objs' case $host_os in darwin*) if test "$GCC" = yes; then reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' else reload_cmds='$LD$reload_flag -o $output$reload_objs' fi ;; esac if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OBJDUMP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:$LINENO: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi test -z "$OBJDUMP" && OBJDUMP=objdump { $as_echo "$as_me:$LINENO: checking how to recognize dependent libraries" >&5 $as_echo_n "checking how to recognize dependent libraries... " >&6; } if test "${lt_cv_deplibs_check_method+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # `unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # which responds to the $file_magic_cmd with a given extended regex. # If you have `file' or equivalent on your system and you're not sure # whether `pass_all' will *always* work, you probably want this one. case $host_os in aix[4-9]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[45]*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. if ( file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; gnu*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]' lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[3-9]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be Linux ELF. linux* | k*bsd*-gnu) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; esac fi { $as_echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5 $as_echo "$lt_cv_deplibs_check_method" >&6; } file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. set dummy ${ac_tool_prefix}ar; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_AR+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AR="${ac_tool_prefix}ar" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:$LINENO: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_AR"; then ac_ct_AR=$AR # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_AR+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_AR="ar" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_AR" = x; then AR="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi else AR="$ac_cv_prog_AR" fi test -z "$AR" && AR=ar test -z "$AR_FLAGS" && AR_FLAGS=cru if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_STRIP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:$LINENO: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi test -z "$STRIP" && STRIP=: if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_RANLIB+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:$LINENO: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi test -z "$RANLIB" && RANLIB=: # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" fi # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. { $as_echo "$as_me:$LINENO: checking command to parse $NM output from $compiler object" >&5 $as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[BCDEGRST]' # Regexp to match symbols that can be accessed directly from C. sympat='\([_A-Za-z][_A-Za-z0-9]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[BCDT]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[ABCDGISTW]' ;; hpux*) if test "$host_cpu" = ia64; then symcode='[ABCDEGRST]' fi ;; irix* | nonstopux*) symcode='[BCDEGRST]' ;; osf*) symcode='[BCDEGQRST]' ;; solaris*) symcode='[BDRT]' ;; sco3.2v5*) symcode='[DT]' ;; sysv4.2uw2*) symcode='[DT]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[ABDT]' ;; sysv4) symcode='[DFNSTU]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[ABCDGIRSTW]' ;; esac # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function # and D for any global variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK '"\ " {last_section=section; section=\$ 3};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ " {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ " s[1]~/^[@?]/{print s[1], s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # Now try to grab the symbols. nlist=conftest.nm if { (eval echo "$as_me:$LINENO: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\"") >&5 (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ const struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_save_LIBS="$LIBS" lt_save_CFLAGS="$CFLAGS" LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS="$lt_save_LIBS" CFLAGS="$lt_save_CFLAGS" else echo "cannot find nm_test_func in $nlist" >&5 fi else echo "cannot find nm_test_var in $nlist" >&5 fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 fi else echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then break else lt_cv_sys_global_symbol_pipe= fi done fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then { $as_echo "$as_me:$LINENO: result: failed" >&5 $as_echo "failed" >&6; } else { $as_echo "$as_me:$LINENO: result: ok" >&5 $as_echo "ok" >&6; } fi # Check whether --enable-libtool-lock was given. if test "${enable_libtool_lock+set}" = set; then enableval=$enable_libtool_lock; fi test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '#line 8454 "configure"' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" { $as_echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5 $as_echo_n "checking whether the C compiler needs -belf... " >&6; } if test "${lt_cv_cc_needs_belf+set}" = set; then $as_echo_n "(cached) " >&6 else ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_cv_cc_needs_belf=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 lt_cv_cc_needs_belf=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5 $as_echo "$lt_cv_cc_needs_belf" >&6; } if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; sparc*-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) LD="${LD-ld} -m elf64_sparc" ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks="$enable_libtool_lock" case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DSYMUTIL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$DSYMUTIL"; then ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DSYMUTIL=$ac_cv_prog_DSYMUTIL if test -n "$DSYMUTIL"; then { $as_echo "$as_me:$LINENO: result: $DSYMUTIL" >&5 $as_echo "$DSYMUTIL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DSYMUTIL"; then ac_ct_DSYMUTIL=$DSYMUTIL # Extract the first word of "dsymutil", so it can be a program name with args. set dummy dsymutil; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_DSYMUTIL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DSYMUTIL"; then ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL if test -n "$ac_ct_DSYMUTIL"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_DSYMUTIL" >&5 $as_echo "$ac_ct_DSYMUTIL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DSYMUTIL" = x; then DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DSYMUTIL=$ac_ct_DSYMUTIL fi else DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. set dummy ${ac_tool_prefix}nmedit; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_NMEDIT+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$NMEDIT"; then ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NMEDIT=$ac_cv_prog_NMEDIT if test -n "$NMEDIT"; then { $as_echo "$as_me:$LINENO: result: $NMEDIT" >&5 $as_echo "$NMEDIT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NMEDIT"; then ac_ct_NMEDIT=$NMEDIT # Extract the first word of "nmedit", so it can be a program name with args. set dummy nmedit; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_NMEDIT+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NMEDIT"; then ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_NMEDIT="nmedit" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT if test -n "$ac_ct_NMEDIT"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_NMEDIT" >&5 $as_echo "$ac_ct_NMEDIT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NMEDIT" = x; then NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NMEDIT=$ac_ct_NMEDIT fi else NMEDIT="$ac_cv_prog_NMEDIT" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. set dummy ${ac_tool_prefix}lipo; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_LIPO+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$LIPO"; then ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_LIPO="${ac_tool_prefix}lipo" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LIPO=$ac_cv_prog_LIPO if test -n "$LIPO"; then { $as_echo "$as_me:$LINENO: result: $LIPO" >&5 $as_echo "$LIPO" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_LIPO"; then ac_ct_LIPO=$LIPO # Extract the first word of "lipo", so it can be a program name with args. set dummy lipo; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_LIPO+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_LIPO"; then ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_LIPO="lipo" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO if test -n "$ac_ct_LIPO"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_LIPO" >&5 $as_echo "$ac_ct_LIPO" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_LIPO" = x; then LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac LIPO=$ac_ct_LIPO fi else LIPO="$ac_cv_prog_LIPO" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. set dummy ${ac_tool_prefix}otool; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OTOOL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$OTOOL"; then ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OTOOL="${ac_tool_prefix}otool" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL=$ac_cv_prog_OTOOL if test -n "$OTOOL"; then { $as_echo "$as_me:$LINENO: result: $OTOOL" >&5 $as_echo "$OTOOL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL"; then ac_ct_OTOOL=$OTOOL # Extract the first word of "otool", so it can be a program name with args. set dummy otool; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OTOOL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL"; then ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OTOOL="otool" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL if test -n "$ac_ct_OTOOL"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_OTOOL" >&5 $as_echo "$ac_ct_OTOOL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL" = x; then OTOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL=$ac_ct_OTOOL fi else OTOOL="$ac_cv_prog_OTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. set dummy ${ac_tool_prefix}otool64; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OTOOL64+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$OTOOL64"; then ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL64=$ac_cv_prog_OTOOL64 if test -n "$OTOOL64"; then { $as_echo "$as_me:$LINENO: result: $OTOOL64" >&5 $as_echo "$OTOOL64" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL64"; then ac_ct_OTOOL64=$OTOOL64 # Extract the first word of "otool64", so it can be a program name with args. set dummy otool64; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OTOOL64+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL64"; then ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OTOOL64="otool64" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 if test -n "$ac_ct_OTOOL64"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_OTOOL64" >&5 $as_echo "$ac_ct_OTOOL64" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL64" = x; then OTOOL64=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL64=$ac_ct_OTOOL64 fi else OTOOL64="$ac_cv_prog_OTOOL64" fi { $as_echo "$as_me:$LINENO: checking for -single_module linker flag" >&5 $as_echo_n "checking for -single_module linker flag... " >&6; } if test "${lt_cv_apple_cc_single_mod+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&5 $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi { $as_echo "$as_me:$LINENO: result: $lt_cv_apple_cc_single_mod" >&5 $as_echo "$lt_cv_apple_cc_single_mod" >&6; } { $as_echo "$as_me:$LINENO: checking for -exported_symbols_list linker flag" >&5 $as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } if test "${lt_cv_ld_exported_symbols_list+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_cv_ld_exported_symbols_list=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 lt_cv_ld_exported_symbols_list=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:$LINENO: result: $lt_cv_ld_exported_symbols_list" >&5 $as_echo "$lt_cv_ld_exported_symbols_list" >&6; } case $host_os in rhapsody* | darwin1.[012]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[91]*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; 10.[012]*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test "$lt_cv_apple_cc_single_mod" = "yes"; then _lt_dar_single_mod='$single_module' fi if test "$lt_cv_ld_exported_symbols_list" = "yes"; then _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' fi if test "$DSYMUTIL" != ":"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac for ac_header in dlfcn.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then eval "$as_ac_Header=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CXX+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:$LINENO: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:$LINENO: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 { (ac_try="$ac_compiler --version >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler --version >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -v >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -v >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -V >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -V >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { $as_echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if test "${ac_cv_cxx_compiler_gnu+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_compiler_gnu=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if test "${ac_cv_prog_cxx_g+set}" = set; then $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cxx_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 CXXFLAGS="" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cxx_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CXX" am_compiler_list= { $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if test "${am_cv_CXX_dependencies_compiler_type+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CXX_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CXX_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CXX_dependencies_compiler_type=none fi fi { $as_echo "$as_me:$LINENO: result: $am_cv_CXX_dependencies_compiler_type" >&5 $as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then am__fastdepCXX_TRUE= am__fastdepCXX_FALSE='#' else am__fastdepCXX_TRUE='#' am__fastdepCXX_FALSE= fi if test -n "$CXX" && ( test "X$CXX" != "Xno" && ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || (test "X$CXX" != "Xg++"))) ; then ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:$LINENO: checking how to run the C++ preprocessor" >&5 $as_echo_n "checking how to run the C++ preprocessor... " >&6; } if test -z "$CXXCPP"; then if test "${ac_cv_prog_CXXCPP+set}" = set; then $as_echo_n "(cached) " >&6 else # Double quotes because CXXCPP needs to be expanded for CXXCPP in "$CXX -E" "/lib/cpp" do ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then # Broken: success on invalid input. continue else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then break fi done ac_cv_prog_CXXCPP=$CXXCPP fi CXXCPP=$ac_cv_prog_CXXCPP else ac_cv_prog_CXXCPP=$CXXCPP fi { $as_echo "$as_me:$LINENO: result: $CXXCPP" >&5 $as_echo "$CXXCPP" >&6; } ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then # Broken: success on invalid input. continue else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} _lt_caught_CXX_error=yes; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu else _lt_caught_CXX_error=yes fi # Set options enable_dlopen=no # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then enableval=$enable_shared; p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS="$lt_save_ifs" ;; esac else enable_shared=yes fi # Check whether --with-pic was given. if test "${with_pic+set}" = set; then withval=$with_pic; pic_mode="$withval" else pic_mode=default fi test -z "$pic_mode" && pic_mode=default # Check whether --enable-fast-install was given. if test "${enable_fast_install+set}" = set; then enableval=$enable_fast_install; p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS="$lt_save_ifs" ;; esac else enable_fast_install=yes fi # This can be used to rebuild libtool when needed LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' test -z "$LN_S" && LN_S="ln -s" if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi { $as_echo "$as_me:$LINENO: checking for objdir" >&5 $as_echo_n "checking for objdir... " >&6; } if test "${lt_cv_objdir+set}" = set; then $as_echo_n "(cached) " >&6 else rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null fi { $as_echo "$as_me:$LINENO: result: $lt_cv_objdir" >&5 $as_echo "$lt_cv_objdir" >&6; } objdir=$lt_cv_objdir cat >>confdefs.h <<_ACEOF #define LT_OBJDIR "$lt_cv_objdir/" _ACEOF case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='s/\(["`$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o for cc_temp in $compiler""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then { $as_echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5 $as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } if test "${lt_cv_path_MAGIC_CMD+set}" = set; then $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/${ac_tool_prefix}file; then lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then { $as_echo "$as_me:$LINENO: checking for file" >&5 $as_echo_n "checking for file... " >&6; } if test "${lt_cv_path_MAGIC_CMD+set}" = set; then $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/file; then lt_cv_path_MAGIC_CMD="$ac_dir/file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else MAGIC_CMD=: fi fi fi ;; esac # Use C for the default configuration in the libtool script lt_save_CC="$CC" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o objext=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test "$GCC" = yes; then lt_prog_compiler_no_builtin_flag=' -fno-builtin' { $as_echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 $as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_rtti_exceptions=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-fno-rtti -fno-exceptions" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:10529: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:10533: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_rtti_exceptions=yes fi fi $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 $as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else : fi fi lt_prog_compiler_wl= lt_prog_compiler_pic= lt_prog_compiler_static= { $as_echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if test "$GCC" = yes; then lt_prog_compiler_wl='-Wl,' lt_prog_compiler_static='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic='-DDLL_EXPORT' ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) lt_prog_compiler_pic='-fPIC' ;; esac ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. lt_prog_compiler_can_build_shared=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic=-Kconform_pic fi ;; *) lt_prog_compiler_pic='-fPIC' ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) lt_prog_compiler_wl='-Wl,' if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' else lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' fi ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic='-DDLL_EXPORT' ;; hpux9* | hpux10* | hpux11*) lt_prog_compiler_wl='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? lt_prog_compiler_static='${wl}-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) lt_prog_compiler_wl='-Wl,' # PIC (with -KPIC) is the default. lt_prog_compiler_static='-non_shared' ;; linux* | k*bsd*-gnu) case $cc_basename in # old Intel for x86_64 which still supported -KPIC. ecc*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; # Lahey Fortran 8.1. lf95*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='--shared' lt_prog_compiler_static='--static' ;; pgcc* | pgf77* | pgf90* | pgf95*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; ccc*) lt_prog_compiler_wl='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static='-non_shared' ;; xl*) # IBM XL C 8.0/Fortran 10.1 on PPC lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-qpic' lt_prog_compiler_static='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Wl,' ;; *Sun\ F*) # Sun Fortran 8.3 passes all unrecognized flags to the linker lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='' ;; esac ;; esac ;; newsos6) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; osf3* | osf4* | osf5*) lt_prog_compiler_wl='-Wl,' # All OSF/1 code is PIC. lt_prog_compiler_static='-non_shared' ;; rdos*) lt_prog_compiler_static='-non_shared' ;; solaris*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' case $cc_basename in f77* | f90* | f95*) lt_prog_compiler_wl='-Qoption ld ';; *) lt_prog_compiler_wl='-Wl,';; esac ;; sunos4*) lt_prog_compiler_wl='-Qoption ld ' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then lt_prog_compiler_pic='-Kconform_pic' lt_prog_compiler_static='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; unicos*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_can_build_shared=no ;; uts4*) lt_prog_compiler_pic='-pic' lt_prog_compiler_static='-Bstatic' ;; *) lt_prog_compiler_can_build_shared=no ;; esac fi case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac { $as_echo "$as_me:$LINENO: result: $lt_prog_compiler_pic" >&5 $as_echo "$lt_prog_compiler_pic" >&6; } # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then { $as_echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } if test "${lt_cv_prog_compiler_pic_works+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:10868: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:10872: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works=yes fi fi $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works" >&5 $as_echo "$lt_cv_prog_compiler_pic_works" >&6; } if test x"$lt_cv_prog_compiler_pic_works" = xyes; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; esac else lt_prog_compiler_pic= lt_prog_compiler_can_build_shared=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { $as_echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if test "${lt_cv_prog_compiler_static_works+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works=yes fi else lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works" >&5 $as_echo "$lt_cv_prog_compiler_static_works" >&6; } if test x"$lt_cv_prog_compiler_static_works" = xyes; then : else lt_prog_compiler_static= fi { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if test "${lt_cv_prog_compiler_c_o+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:10973: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:10977: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if test "${lt_cv_prog_compiler_c_o+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:11028: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:11032: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } hard_links="nottested" if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:$LINENO: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test "$hard_links" = no; then { $as_echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } runpath_var= allow_undefined_flag= always_export_symbols=no archive_cmds= archive_expsym_cmds= compiler_needs_object=no enable_shared_with_static_runtimes=no export_dynamic_flag_spec= export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' hardcode_automatic=no hardcode_direct=no hardcode_direct_absolute=no hardcode_libdir_flag_spec= hardcode_libdir_flag_spec_ld= hardcode_libdir_separator= hardcode_minus_L=no hardcode_shlibpath_var=unsupported inherit_rpath=no link_all_deplibs=unknown module_cmds= module_expsym_cmds= old_archive_from_new_cmds= old_archive_from_expsyms_cmds= thread_safe_flag_spec= whole_archive_flag_spec= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list include_expsyms= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac ld_shlibs=yes if test "$with_gnu_ld" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec='${wl}--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else whole_archive_flag_spec= fi supports_anon_versioning=no case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.9.1, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to modify your PATH *** so that a non-GNU linker is found, and then restart. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' allow_undefined_flag=unsupported always_export_symbols=no enable_shared_with_static_runtimes=yes export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs=no fi ;; interix[3-9]*) hardcode_direct=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu) tmp_diet=no if test "$host_os" = linux-dietlibc; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test "$tmp_diet" = no then tmp_addflag= tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 whole_archive_flag_spec= tmp_sharedflag='--shared' ;; xl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi case $cc_basename in xlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' hardcode_libdir_flag_spec= hardcode_libdir_flag_spec_ld='-rpath $libdir' archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else ld_shlibs=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac ;; sunos4*) archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= hardcode_direct=yes hardcode_shlibpath_var=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = no; then runpath_var= hardcode_libdir_flag_spec= export_dynamic_flag_spec= whole_archive_flag_spec= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) allow_undefined_flag=unsupported always_export_symbols=yes archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds='' hardcode_direct=yes hardcode_direct_absolute=yes hardcode_libdir_separator=':' link_all_deplibs=yes file_list_spec='${wl}-f,' if test "$GCC" = yes; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi export_dynamic_flag_spec='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. always_export_symbols=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag=' ${wl}-bernotok' allow_undefined_flag=' ${wl}-berok' # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec='$convenience' archive_cmds_need_lc=yes # This is similar to how AIX traditionally builds its shared libraries. archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) export_dynamic_flag_spec=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. old_archive_from_new_cmds='true' # FIXME: Should let the user specify the lib program. old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' fix_srcfile_path='`cygpath -w "$srcfile"`' enable_shared_with_static_runtimes=yes ;; darwin* | rhapsody*) archive_cmds_need_lc=no hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported whole_archive_flag_spec='' link_all_deplibs=yes allow_undefined_flag="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=echo archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" else ld_shlibs=no fi ;; dgux*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; freebsd1*) ld_shlibs=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; hpux9*) if test "$GCC" = yes; then archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' fi hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes export_dynamic_flag_spec='${wl}-E' ;; hpux10*) if test "$GCC" = yes -a "$with_gnu_ld" = no; then archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_flag_spec_ld='+b $libdir' hardcode_libdir_separator=: hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test "$GCC" = yes -a "$with_gnu_ld" = no; then case $host_cpu in hppa*64*) archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac fi if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no hardcode_shlibpath_var=no ;; *) hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test "$GCC" = yes; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" cat >conftest.$ac_ext <<_ACEOF int foo(void) {} _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: inherit_rpath=yes link_all_deplibs=yes ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; newsos6) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: hardcode_shlibpath_var=no ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes hardcode_shlibpath_var=no hardcode_direct_absolute=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' else case $host_os in openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-R$libdir' ;; *) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; esac fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$GCC" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$GCC" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi archive_cmds_need_lc='no' hardcode_libdir_separator=: ;; solaris*) no_undefined_flag=' -z defs' if test "$GCC" = yes; then wlarc='${wl}' archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='${wl}' archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi hardcode_libdir_flag_spec='-R$libdir' hardcode_shlibpath_var=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. GCC discards it without `$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test "$GCC" = yes; then whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' else whole_archive_flag_spec='-z allextract$convenience -z defaultextract' fi ;; esac link_all_deplibs=yes ;; sunos4*) if test "x$host_vendor" = xsequent; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; sysv4) case $host_vendor in sni) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' reload_cmds='$CC -r -o $output$reload_objs' hardcode_direct=no ;; motorola) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no ;; sysv4.3*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no export_dynamic_flag_spec='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag='${wl}-z,text' archive_cmds_need_lc=no hardcode_shlibpath_var=no runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag='${wl}-z,text' allow_undefined_flag='${wl}-z,nodefs' archive_cmds_need_lc=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='${wl}-R,$libdir' hardcode_libdir_separator=':' link_all_deplibs=yes export_dynamic_flag_spec='${wl}-Bexport' runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; *) ld_shlibs=no ;; esac if test x$host_vendor = xsni; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='${wl}-Blargedynsym' ;; esac fi fi { $as_echo "$as_me:$LINENO: result: $ld_shlibs" >&5 $as_echo "$ld_shlibs" >&6; } test "$ld_shlibs" = no && can_build_shared=no with_gnu_ld=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc" in x|xyes) # Assume -lc should be added archive_cmds_need_lc=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $archive_cmds in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl pic_flag=$lt_prog_compiler_pic compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\"") >&5 (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } then archive_cmds_need_lc=no else archive_cmds_need_lc=yes fi allow_undefined_flag=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* { $as_echo "$as_me:$LINENO: result: $archive_cmds_need_lc" >&5 $as_echo "$archive_cmds_need_lc" >&6; } ;; esac fi ;; esac { $as_echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'` else lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary. lt_tmp_lt_search_path_spec= lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path/$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" else test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk ' BEGIN {RS=" "; FS="/|\n";} { lt_foo=""; lt_count=0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo="/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[lt_foo]++; } if (lt_freq[lt_foo] == 1) { print lt_foo; } }'` sys_lib_search_path_spec=`$ECHO $lt_search_path_spec` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[4-9]*) version_type=linux need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$host_os in yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH printed by # mingw gcc, but we are running on Cygwin. Gcc prints its search # path with ; separators, and with drive letters. We can handle the # drive letters (cygwin fileutils understands them), so leave them, # especially as we might pass files found there to a mingw objdump, # which wouldn't understand a cygwinified path. Ahh. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; esac ;; *) library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ;; esac dynamic_linker='Win32 ld.exe' # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd1*) dynamic_linker=no ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[123]*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; gnu*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555. postinstall_cmds='chmod 555 $lib' ;; interix[3-9]*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be Linux ELF. linux* | k*bsd*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then shlibpath_overrides_runpath=yes fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[89] | openbsd2.[89].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:$LINENO: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi { $as_echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action= if test -n "$hardcode_libdir_flag_spec" || test -n "$runpath_var" || test "X$hardcode_automatic" = "Xyes" ; then # We can hardcode non-existent directories. if test "$hardcode_direct" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && test "$hardcode_minus_L" != no; then # Linking always hardcodes the temporary library directory. hardcode_action=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi { $as_echo "$as_me:$LINENO: result: $hardcode_action" >&5 $as_echo "$hardcode_action" >&6; } if test "$hardcode_action" = relink || test "$inherit_rpath" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen="LoadLibrary" lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if test "${ac_cv_lib_dl_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dl_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = x""yes; then lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes fi ;; *) { $as_echo "$as_me:$LINENO: checking for shl_load" >&5 $as_echo_n "checking for shl_load... " >&6; } if test "${ac_cv_func_shl_load+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define shl_load to an innocuous variant, in case declares shl_load. For example, HP-UX 11i declares gettimeofday. */ #define shl_load innocuous_shl_load /* System header to define __stub macros and hopefully few prototypes, which can conflict with char shl_load (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef shl_load /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_shl_load || defined __stub___shl_load choke me #endif int main () { return shl_load (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_func_shl_load=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_shl_load=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 $as_echo "$ac_cv_func_shl_load" >&6; } if test "x$ac_cv_func_shl_load" = x""yes; then lt_cv_dlopen="shl_load" else { $as_echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if test "${ac_cv_lib_dld_shl_load+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dld_shl_load=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_shl_load=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = x""yes; then lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" else { $as_echo "$as_me:$LINENO: checking for dlopen" >&5 $as_echo_n "checking for dlopen... " >&6; } if test "${ac_cv_func_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define dlopen to an innocuous variant, in case declares dlopen. For example, HP-UX 11i declares gettimeofday. */ #define dlopen innocuous_dlopen /* System header to define __stub macros and hopefully few prototypes, which can conflict with char dlopen (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef dlopen /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_dlopen || defined __stub___dlopen choke me #endif int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_func_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 $as_echo "$ac_cv_func_dlopen" >&6; } if test "x$ac_cv_func_dlopen" = x""yes; then lt_cv_dlopen="dlopen" else { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if test "${ac_cv_lib_dl_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dl_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = x""yes; then lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else { $as_echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if test "${ac_cv_lib_svld_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_svld_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_svld_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = x""yes; then lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" else { $as_echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if test "${ac_cv_lib_dld_dld_link+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dld_dld_link=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_dld_link=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = x""yes; then lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" fi fi fi fi fi fi ;; esac if test "x$lt_cv_dlopen" != xno; then enable_dlopen=yes else enable_dlopen=no fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS="$CPPFLAGS" test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" { $as_echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 $as_echo_n "checking whether a program can dlopen itself... " >&6; } if test "${lt_cv_dlopen_self+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line 13831 "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif void fnord() { int i=42;} int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac else : # compilation failed lt_cv_dlopen_self=no fi fi rm -fr conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 $as_echo "$lt_cv_dlopen_self" >&6; } if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" { $as_echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 $as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } if test "${lt_cv_dlopen_self_static+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self_static=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line 13927 "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif void fnord() { int i=42;} int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; esac else : # compilation failed lt_cv_dlopen_self_static=no fi fi rm -fr conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 $as_echo "$lt_cv_dlopen_self_static" >&6; } fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi striplib= old_striplib= { $as_echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 $as_echo_n "checking whether stripping libraries is possible... " >&6; } if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi ;; *) { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } ;; esac fi # Report which library types will actually be built { $as_echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 $as_echo_n "checking if libtool supports shared libraries... " >&6; } { $as_echo "$as_me:$LINENO: result: $can_build_shared" >&5 $as_echo "$can_build_shared" >&6; } { $as_echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 $as_echo_n "checking whether to build shared libraries... " >&6; } test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[4-9]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac { $as_echo "$as_me:$LINENO: result: $enable_shared" >&5 $as_echo "$enable_shared" >&6; } { $as_echo "$as_me:$LINENO: checking whether to build static libraries" >&5 $as_echo_n "checking whether to build static libraries... " >&6; } # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes { $as_echo "$as_me:$LINENO: result: $enable_static" >&5 $as_echo "$enable_static" >&6; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CC="$lt_save_CC" ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu archive_cmds_need_lc_CXX=no allow_undefined_flag_CXX= always_export_symbols_CXX=no archive_expsym_cmds_CXX= compiler_needs_object_CXX=no export_dynamic_flag_spec_CXX= hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no hardcode_libdir_flag_spec_CXX= hardcode_libdir_flag_spec_ld_CXX= hardcode_libdir_separator_CXX= hardcode_minus_L_CXX=no hardcode_shlibpath_var_CXX=unsupported hardcode_automatic_CXX=no inherit_rpath_CXX=no module_cmds_CXX= module_expsym_cmds_CXX= link_all_deplibs_CXX=unknown old_archive_cmds_CXX=$old_archive_cmds no_undefined_flag_CXX= whole_archive_flag_spec_CXX= enable_shared_with_static_runtimes_CXX=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o objext_CXX=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_caught_CXX_error" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} compiler=$CC compiler_CXX=$CC for cc_temp in $compiler""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test "$GXX" = yes; then lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' else lt_prog_compiler_no_builtin_flag_CXX= fi if test "$GXX" = yes; then # Set up default GNU C++ configuration # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:$LINENO: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:$LINENO: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:$LINENO: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if test "${lt_cv_path_LD+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && { { $as_echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 $as_echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} { (exit 1); exit 1; }; } { $as_echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if test "${lt_cv_prog_gnu_ld+set}" = set; then $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test "$with_gnu_ld" = yes; then archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='${wl}' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else whole_archive_flag_spec_CXX= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics { $as_echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ld_shlibs_CXX=yes case $host_os in aix3*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds_CXX='' hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes file_list_spec_CXX='${wl}-f,' if test "$GXX" = yes; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct_CXX=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L_CXX=yes hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_libdir_separator_CXX= fi esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi export_dynamic_flag_spec_CXX='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. always_export_symbols_CXX=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag_CXX='-berok' # Determine the default libpath from the value encoded in an empty # executable. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag_CXX="-z nodefs" archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag_CXX=' ${wl}-bernotok' allow_undefined_flag_CXX=' ${wl}-berok' # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec_CXX='$convenience' archive_cmds_need_lc_CXX=yes # This is similar to how AIX traditionally builds its shared # libraries. archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag_CXX=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else ld_shlibs_CXX=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec_CXX='-L$libdir' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=no enable_shared_with_static_runtimes_CXX=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs_CXX=no fi ;; darwin* | rhapsody*) archive_cmds_need_lc_CXX=no hardcode_direct_CXX=no hardcode_automatic_CXX=yes hardcode_shlibpath_var_CXX=unsupported whole_archive_flag_spec_CXX='' link_all_deplibs_CXX=yes allow_undefined_flag_CXX="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=echo archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" if test "$lt_cv_apple_cc_single_mod" != "yes"; then archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi else ld_shlibs_CXX=no fi ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; freebsd[12]*) # C++ shared libraries reported to be fairly broken before # switch to ELF ld_shlibs_CXX=no ;; freebsd-elf*) archive_cmds_need_lc_CXX=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions ld_shlibs_CXX=yes ;; gnu*) ;; hpux9*) hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' hardcode_libdir_separator_CXX=: export_dynamic_flag_spec_CXX='${wl}-E' hardcode_direct_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes; then archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; hpux10*|hpux11*) if test $with_gnu_ld = no; then hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' hardcode_libdir_separator_CXX=: case $host_cpu in hppa*64*|ia64*) ;; *) export_dynamic_flag_spec_CXX='${wl}-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no ;; *) hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes; then if test $with_gnu_ld = no; then case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; interix[3-9]*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test "$GXX" = yes; then if test "$with_gnu_ld" = no; then archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` -o $lib' fi fi link_all_deplibs_CXX=yes ;; esac hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator_CXX=: inherit_rpath_CXX=yes ;; linux* | k*bsd*-gnu) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; esac archive_cmds_need_lc_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [1-5]* | *pgcpp\ [1-5]*) prelink_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' old_archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ $RANLIB $oldlib' archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' archive_expsym_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; *) # Version 6 will use weak symbols archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' ;; cxx*) # Compaq C++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec_CXX='-rpath $libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; xl*) # IBM XL 8.0 on PPC, with GNU ld hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' hardcode_libdir_flag_spec_CXX='-R$libdir' whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object_CXX=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='echo' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; m88k*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) ld_shlibs_CXX=yes ;; openbsd2*) # C++ shared libraries are fairly broken ld_shlibs_CXX=no ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no hardcode_direct_absolute_CXX=yes archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' export_dynamic_flag_spec_CXX='${wl}-E' whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' fi output_verbose_link_cmd=echo else ld_shlibs_CXX=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' hardcode_libdir_separator_CXX=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; cxx*) case $host in osf3*) allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && $ECHO "X${wl}-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' ;; *) allow_undefined_flag_CXX=' -expect_unresolved \*' archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~ $RM $lib.exp' hardcode_libdir_flag_spec_CXX='-rpath $libdir' ;; esac hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes && test "$with_gnu_ld" = no; then allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' case $host in osf3*) archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; *) archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; solaris*) case $cc_basename in CC*) # Sun C++ 4.2, 5.x and Centerline C++ archive_cmds_need_lc_CXX=yes no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_shlibpath_var_CXX=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' ;; esac link_all_deplibs_CXX=yes output_verbose_link_cmd='echo' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test "$GXX" = yes && test "$with_gnu_ld" = no; then no_undefined_flag_CXX=' ${wl}-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else # g++ 2.7 appears to require `-G' NOT `-shared' on this # platform. archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' fi hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag_CXX='${wl}-z,text' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag_CXX='${wl}-z,text' allow_undefined_flag_CXX='${wl}-z,nodefs' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir' hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes export_dynamic_flag_spec_CXX='${wl}-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac { $as_echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test "$ld_shlibs_CXX" = no && can_build_shared=no GCC_CXX="$GXX" LD_CXX="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... # Dependencies to place before and after the object being linked: predep_objects_CXX= postdep_objects_CXX= predeps_CXX= postdeps_CXX= compiler_lib_search_path_CXX= cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case $p in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test $p = "-L" || test $p = "-R"; then prev=$p continue else prev= fi if test "$pre_test_object_deps_done" = no; then case $p in -L* | -R*) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$compiler_lib_search_path_CXX"; then compiler_lib_search_path_CXX="${prev}${p}" else compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$postdeps_CXX"; then postdeps_CXX="${prev}${p}" else postdeps_CXX="${postdeps_CXX} ${prev}${p}" fi fi ;; *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test "$pre_test_object_deps_done" = no; then if test -z "$predep_objects_CXX"; then predep_objects_CXX="$p" else predep_objects_CXX="$predep_objects_CXX $p" fi else if test -z "$postdep_objects_CXX"; then postdep_objects_CXX="$p" else postdep_objects_CXX="$postdep_objects_CXX $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling CXX test program" fi $RM -f confest.$objext # PORTME: override above test on systems where it is broken case $host_os in interix[3-9]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. predep_objects_CXX= postdep_objects_CXX= postdeps_CXX= ;; linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac if test "$solaris_use_stlport4" != yes; then postdeps_CXX='-library=Cstd -library=Crun' fi ;; esac ;; solaris*) case $cc_basename in CC*) # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac # Adding this requires a known-good setup of shared libraries for # Sun compiler versions before 5.6, else PIC objects from an old # archive will be linked into the output, leading to subtle bugs. if test "$solaris_use_stlport4" != yes; then postdeps_CXX='-library=Cstd -library=Crun' fi ;; esac ;; esac case " $postdeps_CXX " in *" -lc "*) archive_cmds_need_lc_CXX=no ;; esac compiler_lib_search_dirs_CXX= if test -n "${compiler_lib_search_path_CXX}"; then compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` fi lt_prog_compiler_wl_CXX= lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX= { $as_echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } # C++ specific cases for pic, static, wl, etc. if test "$GXX" = yes; then lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic_CXX='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic_CXX='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all lt_prog_compiler_pic_CXX= ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic_CXX=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac else case $host_os in aix[4-9]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' else lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; dgux*) case $cc_basename in ec++*) lt_prog_compiler_pic_CXX='-KPIC' ;; ghcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' if test "$host_cpu" != ia64; then lt_prog_compiler_pic_CXX='+Z' fi ;; aCC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic_CXX='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu) case $cc_basename in KCC*) # KAI C++ Compiler lt_prog_compiler_wl_CXX='--backend -Wl,' lt_prog_compiler_pic_CXX='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64 which still supported -KPIC. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fPIC' lt_prog_compiler_static_CXX='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fpic' lt_prog_compiler_static_CXX='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; xlc* | xlC*) # IBM XL 8.0 on PPC lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-qpic' lt_prog_compiler_static_CXX='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) lt_prog_compiler_pic_CXX='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) lt_prog_compiler_wl_CXX='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 lt_prog_compiler_pic_CXX='-pic' ;; cxx*) # Digital/Compaq C++ lt_prog_compiler_wl_CXX='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC*) # Sun C++ 4.2, 5.x and Centerline C++ lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x lt_prog_compiler_pic_CXX='-pic' lt_prog_compiler_static_CXX='-Bstatic' ;; lcc*) # Lucid lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 lt_prog_compiler_pic_CXX='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) lt_prog_compiler_can_build_shared_CXX=no ;; esac fi case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic_CXX= ;; *) lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" ;; esac { $as_echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_CXX" >&5 $as_echo "$lt_prog_compiler_pic_CXX" >&6; } # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_CXX"; then { $as_echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } if test "${lt_cv_prog_compiler_pic_works_CXX+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works_CXX=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:15947: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:15951: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works_CXX=yes fi fi $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then case $lt_prog_compiler_pic_CXX in "" | " "*) ;; *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; esac else lt_prog_compiler_pic_CXX= lt_prog_compiler_can_build_shared_CXX=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" { $as_echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if test "${lt_cv_prog_compiler_static_works_CXX+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works_CXX=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works_CXX=yes fi else lt_cv_prog_compiler_static_works_CXX=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then : else lt_prog_compiler_static_CXX= fi { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if test "${lt_cv_prog_compiler_c_o_CXX+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:16046: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:16050: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if test "${lt_cv_prog_compiler_c_o_CXX+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:16098: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:16102: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } hard_links="nottested" if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:$LINENO: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test "$hard_links" = no; then { $as_echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' case $host_os in aix[4-9]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi ;; pw32*) export_symbols_cmds_CXX="$ltdll_cmds" ;; cygwin* | mingw* | cegcc*) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;/^.*[ ]__nm__/s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' { $as_echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test "$ld_shlibs_CXX" = no && can_build_shared=no with_gnu_ld_CXX=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc_CXX" in x|xyes) # Assume -lc should be added archive_cmds_need_lc_CXX=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $archive_cmds_CXX in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl_CXX pic_flag=$lt_prog_compiler_pic_CXX compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag_CXX allow_undefined_flag_CXX= if { (eval echo "$as_me:$LINENO: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\"") >&5 (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } then archive_cmds_need_lc_CXX=no else archive_cmds_need_lc_CXX=yes fi allow_undefined_flag_CXX=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* { $as_echo "$as_me:$LINENO: result: $archive_cmds_need_lc_CXX" >&5 $as_echo "$archive_cmds_need_lc_CXX" >&6; } ;; esac fi ;; esac { $as_echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[4-9]*) version_type=linux need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$host_os in yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH printed by # mingw gcc, but we are running on Cygwin. Gcc prints its search # path with ; separators, and with drive letters. We can handle the # drive letters (cygwin fileutils understands them), so leave them, # especially as we might pass files found there to a mingw objdump, # which wouldn't understand a cygwinified path. Ahh. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; esac ;; *) library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ;; esac dynamic_linker='Win32 ld.exe' # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd1*) dynamic_linker=no ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[123]*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; gnu*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555. postinstall_cmds='chmod 555 $lib' ;; interix[3-9]*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be Linux ELF. linux* | k*bsd*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then shlibpath_overrides_runpath=yes fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[89] | openbsd2.[89].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:$LINENO: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi { $as_echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action_CXX= if test -n "$hardcode_libdir_flag_spec_CXX" || test -n "$runpath_var_CXX" || test "X$hardcode_automatic_CXX" = "Xyes" ; then # We can hardcode non-existent directories. if test "$hardcode_direct_CXX" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no && test "$hardcode_minus_L_CXX" != no; then # Linking always hardcodes the temporary library directory. hardcode_action_CXX=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action_CXX=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action_CXX=unsupported fi { $as_echo "$as_me:$LINENO: result: $hardcode_action_CXX" >&5 $as_echo "$hardcode_action_CXX" >&6; } if test "$hardcode_action_CXX" = relink || test "$inherit_rpath_CXX" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi fi # test -n "$compiler" CC=$lt_save_CC LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test "$_lt_caught_CXX_error" != yes ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_config_commands="$ac_config_commands libtool" # Only expand once: # Extract the first word of "perl", so it can be a program name with args. set dummy perl; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_PERL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$PERL"; then ac_cv_prog_PERL="$PERL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_PERL="perl" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_prog_PERL" && ac_cv_prog_PERL=":" fi fi PERL=$ac_cv_prog_PERL if test -n "$PERL"; then { $as_echo "$as_me:$LINENO: result: $PERL" >&5 $as_echo "$PERL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi # EL-5 compatibility. $(mkdir_p) is now obsolete. test -n "$MKDIR_P" || MKDIR_P="$mkdir_p" # Check whether --with-docdir was given. if test "${with_docdir+set}" = set; then withval=$with_docdir; case $withval in yes|no) { { $as_echo "$as_me:$LINENO: error: Invalid DIR" >&5 $as_echo "$as_me: error: Invalid DIR" >&2;} { (exit 1); exit 1; }; } ;; *) docdir="$withval" ;; esac else if test -z "$docdir"; then docdir='${datadir}/doc/${PACKAGE}' fi fi # Use arc for "pkgdir" instead of nordugrid-arc (@PACKAGE@) pkgdatadir='${datadir}/arc' pkgincludedir='${includedir}/arc' pkglibdir='${libdir}/arc' pkglibexecdir='${libexecdir}/arc' ARCCLIENT_LIBS='$(top_builddir)/src/hed/libs/compute/libarccompute.la' ARCCLIENT_CFLAGS='-I$(top_srcdir)/include' ARCCOMMON_LIBS='$(top_builddir)/src/hed/libs/common/libarccommon.la' ARCCOMMON_CFLAGS='-I$(top_srcdir)/include' ARCCREDENTIAL_LIBS='$(top_builddir)/src/hed/libs/credential/libarccredential.la' ARCCREDENTIAL_CFLAGS='-I$(top_srcdir)/include' ARCDATA_LIBS='$(top_builddir)/src/hed/libs/data/libarcdata.la' ARCDATA_CFLAGS='-I$(top_srcdir)/include' ARCJOB_LIBS='$(top_builddir)/src/hed/libs/job/libarcjob.la' ARCJOB_CFLAGS='-I$(top_srcdir)/include' ARCLOADER_LIBS='$(top_builddir)/src/hed/libs/loader/libarcloader.la' ARCLOADER_CFLAGS='-I$(top_srcdir)/include' ARCMESSAGE_LIBS='$(top_builddir)/src/hed/libs/message/libarcmessage.la' ARCMESSAGE_CFLAGS='-I$(top_srcdir)/include' ARCSECURITY_LIBS='$(top_builddir)/src/hed/libs/security/libarcsecurity.la' ARCSECURITY_CFLAGS='-I$(top_srcdir)/include' ARCINFOSYS_LIBS='$(top_builddir)/src/hed/libs/infosys/libarcinfosys.la' ARCINFOSYS_CFLAGS='-I$(top_srcdir)/include' ARCWS_LIBS='$(top_builddir)/src/hed/libs/ws/libarcws.la' ARCWS_CFLAGS='-I$(top_srcdir)/include' ARCWSSECURITY_LIBS='$(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la' ARCWSSECURITY_CFLAGS='-I$(top_srcdir)/include' ARCXMLSEC_LIBS='$(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la' ARCXMLSEC_CFLAGS='-I$(top_srcdir)/include' get_relative_path() { olddir=`echo $1 | sed -e 's|/+|/|g' -e 's|^/||' -e 's|/*$|/|'` newdir=`echo $2 | sed -e 's|/+|/|g' -e 's|^/||' -e 's|/*$|/|'` O_IFS=$IFS IFS=/ relative="" common="" for i in $olddir; do if echo "$newdir" | grep -q "^$common$i/"; then common="$common$i/" else relative="../$relative" fi done IFS=$O_IFS echo $newdir | sed "s|^$common|$relative|" | sed 's|/*$||' } if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval instprefix="\"${exec_prefix}\"" eval arc_libdir="\"${libdir}\"" eval arc_bindir="\"${bindir}\"" eval arc_sbindir="\"${sbindir}\"" eval arc_pkglibdir="\"${libdir}/arc\"" eval arc_pkglibexecdir="\"${libexecdir}/arc\"" # It seems arc_datadir should be evaluated twice to be expanded fully. eval arc_datadir="\"${datadir}/arc\"" eval arc_datadir="\"${arc_datadir}\"" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" libsubdir=`get_relative_path "$instprefix" "$arc_libdir"` pkglibsubdir=`get_relative_path "$instprefix" "$arc_pkglibdir"` pkglibexecsubdir=`get_relative_path "$instprefix" "$arc_pkglibexecdir"` pkgdatasubdir=`get_relative_path "$instprefix" "$arc_datadir"` pkglibdir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_pkglibdir"` sbindir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_sbindir"` bindir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_bindir"` pkgdatadir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_datadir"` { $as_echo "$as_me:$LINENO: pkglib subdirectory is: $pkglibsubdir" >&5 $as_echo "$as_me: pkglib subdirectory is: $pkglibsubdir" >&6;} { $as_echo "$as_me:$LINENO: pkglibexec subdirectory is: $pkglibexecsubdir" >&5 $as_echo "$as_me: pkglibexec subdirectory is: $pkglibexecsubdir" >&6;} { $as_echo "$as_me:$LINENO: relative path of pkglib to pkglibexec is: $pkglibdir_rel_to_pkglibexecdir" >&5 $as_echo "$as_me: relative path of pkglib to pkglibexec is: $pkglibdir_rel_to_pkglibexecdir" >&6;} cat >>confdefs.h <<_ACEOF #define INSTPREFIX "${instprefix}" _ACEOF cat >>confdefs.h <<_ACEOF #define LIBSUBDIR "${libsubdir}" _ACEOF cat >>confdefs.h <<_ACEOF #define PKGLIBSUBDIR "${pkglibsubdir}" _ACEOF cat >>confdefs.h <<_ACEOF #define PKGLIBEXECSUBDIR "${pkglibexecsubdir}" _ACEOF cat >>confdefs.h <<_ACEOF #define PKGDATASUBDIR "${pkgdatasubdir}" _ACEOF # Check whether --with-systemd-units-location was given. if test "${with_systemd_units_location+set}" = set; then withval=$with_systemd_units_location; unitsdir="$withval" else unitsdir= fi if test "x$unitsdir" != "x"; then SYSTEMD_UNITS_ENABLED_TRUE= SYSTEMD_UNITS_ENABLED_FALSE='#' else SYSTEMD_UNITS_ENABLED_TRUE='#' SYSTEMD_UNITS_ENABLED_FALSE= fi { $as_echo "$as_me:$LINENO: result: $unitsdir" >&5 $as_echo "$unitsdir" >&6; } # Check whether --with-sysv-scripts-location was given. if test "${with_sysv_scripts_location+set}" = set; then withval=$with_sysv_scripts_location; initddir="$withval" else initddir= case "${host}" in *linux* | *kfreebsd* | *gnu* ) for i in init.d rc.d/init.d rc.d; do if test -d "/etc/$i" -a ! -h "/etc/$i" ; then initddir="$sysconfdir/$i" break fi done if test -z "$initddir"; then { $as_echo "$as_me:$LINENO: WARNING: could not find a suitable location for the SYSV init scripts - not installing" >&5 $as_echo "$as_me: WARNING: could not find a suitable location for the SYSV init scripts - not installing" >&2;} fi ;; esac fi if test "x$unitsdir" = "x" && test "x$initddir" != "x"; then SYSV_SCRIPTS_ENABLED_TRUE= SYSV_SCRIPTS_ENABLED_FALSE='#' else SYSV_SCRIPTS_ENABLED_TRUE='#' SYSV_SCRIPTS_ENABLED_FALSE= fi { $as_echo "$as_me:$LINENO: result: $initddir" >&5 $as_echo "$initddir" >&6; } # Check whether --with-cron-scripts-prefix was given. if test "${with_cron_scripts_prefix+set}" = set; then withval=$with_cron_scripts_prefix; cronddir="$withval" else cronddir="$sysconfdir/cron.d" fi # gettext MKINSTALLDIRS= if test -n "$ac_aux_dir"; then case "$ac_aux_dir" in /*) MKINSTALLDIRS="$ac_aux_dir/mkinstalldirs" ;; *) MKINSTALLDIRS="\$(top_builddir)/$ac_aux_dir/mkinstalldirs" ;; esac fi if test -z "$MKINSTALLDIRS"; then MKINSTALLDIRS="\$(top_srcdir)/mkinstalldirs" fi { $as_echo "$as_me:$LINENO: checking whether NLS is requested" >&5 $as_echo_n "checking whether NLS is requested... " >&6; } # Check whether --enable-nls was given. if test "${enable_nls+set}" = set; then enableval=$enable_nls; USE_NLS=$enableval else USE_NLS=yes fi { $as_echo "$as_me:$LINENO: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "msgfmt", so it can be a program name with args. set dummy msgfmt; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_MSGFMT+set}" = set; then $as_echo_n "(cached) " >&6 else case "$MSGFMT" in [\\/]* | ?:[\\/]*) ac_cv_path_MSGFMT="$MSGFMT" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then if $ac_dir/$ac_word --statistics /dev/null >/dev/null 2>&1 && (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then ac_cv_path_MSGFMT="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_MSGFMT" && ac_cv_path_MSGFMT=":" ;; esac fi MSGFMT="$ac_cv_path_MSGFMT" if test "$MSGFMT" != ":"; then { $as_echo "$as_me:$LINENO: result: $MSGFMT" >&5 $as_echo "$MSGFMT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "gmsgfmt", so it can be a program name with args. set dummy gmsgfmt; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_GMSGFMT+set}" = set; then $as_echo_n "(cached) " >&6 else case $GMSGFMT in [\\/]* | ?:[\\/]*) ac_cv_path_GMSGFMT="$GMSGFMT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_GMSGFMT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_GMSGFMT" && ac_cv_path_GMSGFMT="$MSGFMT" ;; esac fi GMSGFMT=$ac_cv_path_GMSGFMT if test -n "$GMSGFMT"; then { $as_echo "$as_me:$LINENO: result: $GMSGFMT" >&5 $as_echo "$GMSGFMT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "xgettext", so it can be a program name with args. set dummy xgettext; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_XGETTEXT+set}" = set; then $as_echo_n "(cached) " >&6 else case "$XGETTEXT" in [\\/]* | ?:[\\/]*) ac_cv_path_XGETTEXT="$XGETTEXT" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >/dev/null 2>&1 && (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then ac_cv_path_XGETTEXT="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_XGETTEXT" && ac_cv_path_XGETTEXT=":" ;; esac fi XGETTEXT="$ac_cv_path_XGETTEXT" if test "$XGETTEXT" != ":"; then { $as_echo "$as_me:$LINENO: result: $XGETTEXT" >&5 $as_echo "$XGETTEXT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi rm -f messages.po # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "msgmerge", so it can be a program name with args. set dummy msgmerge; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_MSGMERGE+set}" = set; then $as_echo_n "(cached) " >&6 else case "$MSGMERGE" in [\\/]* | ?:[\\/]*) ac_cv_path_MSGMERGE="$MSGMERGE" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then if $ac_dir/$ac_word --update -q /dev/null /dev/null >/dev/null 2>&1; then ac_cv_path_MSGMERGE="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_MSGMERGE" && ac_cv_path_MSGMERGE=":" ;; esac fi MSGMERGE="$ac_cv_path_MSGMERGE" if test "$MSGMERGE" != ":"; then { $as_echo "$as_me:$LINENO: result: $MSGMERGE" >&5 $as_echo "$MSGMERGE" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "$GMSGFMT" != ":"; then if $GMSGFMT --statistics /dev/null >/dev/null 2>&1 && (if $GMSGFMT --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then : ; else GMSGFMT=`echo "$GMSGFMT" | sed -e 's,^.*/,,'` { $as_echo "$as_me:$LINENO: result: found $GMSGFMT program is not GNU msgfmt; ignore it" >&5 $as_echo "found $GMSGFMT program is not GNU msgfmt; ignore it" >&6; } GMSGFMT=":" fi fi if test "$XGETTEXT" != ":"; then if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >/dev/null 2>&1 && (if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then : ; else { $as_echo "$as_me:$LINENO: result: found xgettext program is not GNU xgettext; ignore it" >&5 $as_echo "found xgettext program is not GNU xgettext; ignore it" >&6; } XGETTEXT=":" fi rm -f messages.po fi ac_config_commands="$ac_config_commands default-1" # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:$LINENO: checking for ld used by GCC" >&5 $as_echo_n "checking for ld used by GCC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | [A-Za-z]:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the path of ld ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'` while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:$LINENO: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:$LINENO: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if test "${acl_cv_path_LD+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$LD"; then IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}" for ac_dir in $PATH; do test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some GNU ld's only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. if "$acl_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then test "$with_gnu_ld" != no && break else test "$with_gnu_ld" != yes && break fi fi done IFS="$ac_save_ifs" else acl_cv_path_LD="$LD" # Let the user override the test with a path. fi fi LD="$acl_cv_path_LD" if test -n "$LD"; then { $as_echo "$as_me:$LINENO: result: $LD" >&5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && { { $as_echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 $as_echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} { (exit 1); exit 1; }; } { $as_echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if test "${acl_cv_prog_gnu_ld+set}" = set; then $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU ld's only accept -v. if $LD -v 2>&1 &5; then acl_cv_prog_gnu_ld=yes else acl_cv_prog_gnu_ld=no fi fi { $as_echo "$as_me:$LINENO: result: $acl_cv_prog_gnu_ld" >&5 $as_echo "$acl_cv_prog_gnu_ld" >&6; } with_gnu_ld=$acl_cv_prog_gnu_ld { $as_echo "$as_me:$LINENO: checking for shared library run path origin" >&5 $as_echo_n "checking for shared library run path origin... " >&6; } if test "${acl_cv_rpath+set}" = set; then $as_echo_n "(cached) " >&6 else CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done fi { $as_echo "$as_me:$LINENO: result: $acl_cv_rpath" >&5 $as_echo "$acl_cv_rpath" >&6; } wl="$acl_cv_wl" libext="$acl_cv_libext" shlibext="$acl_cv_shlibext" hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" hardcode_direct="$acl_cv_hardcode_direct" hardcode_minus_L="$acl_cv_hardcode_minus_L" # Check whether --enable-rpath was given. if test "${enable_rpath+set}" = set; then enableval=$enable_rpath; : else enable_rpath=yes fi use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libiconv-prefix was given. if test "${with_libiconv_prefix+set}" = set; then withval=$with_libiconv_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/lib" fi fi fi LIBICONV= LTLIBICONV= INCICONV= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='iconv ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBICONV="${LIBICONV}${LIBICONV:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$value" else : fi else found_dir= found_la= found_so= found_a= if test $use_additional = yes; then if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then found_dir="$additional_libdir" found_so="$additional_libdir/lib$name.$shlibext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi else if test -f "$additional_libdir/lib$name.$libext"; then found_dir="$additional_libdir" found_a="$additional_libdir/lib$name.$libext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then found_dir="$dir" found_so="$dir/lib$name.$shlibext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi else if test -f "$dir/lib$name.$libext"; then found_dir="$dir" found_a="$dir/lib$name.$libext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$hardcode_direct" = yes; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir" fi if test "$hardcode_minus_L" != no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_a" else LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */lib | */lib/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCICONV="${INCICONV}${INCICONV:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$additional_libdir" != "X/usr/lib"; then haveit= if test "X$additional_libdir" = "X/usr/local/lib"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBICONV="${LIBICONV}${LIBICONV:+ }$dep" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$dep" ;; esac done fi else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-R$found_dir" done fi { $as_echo "$as_me:$LINENO: checking whether NLS is requested" >&5 $as_echo_n "checking whether NLS is requested... " >&6; } # Check whether --enable-nls was given. if test "${enable_nls+set}" = set; then enableval=$enable_nls; USE_NLS=$enableval else USE_NLS=yes fi { $as_echo "$as_me:$LINENO: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } LIBINTL= LTLIBINTL= POSUB= if test "$USE_NLS" = "yes"; then gt_use_preinstalled_gnugettext=no { $as_echo "$as_me:$LINENO: checking for GNU gettext in libc" >&5 $as_echo_n "checking for GNU gettext in libc... " >&6; } if test "${gt_cv_func_gnugettext1_libc+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern int *_nl_domain_bindings; int main () { bindtextdomain ("", ""); return (int) gettext ("") + _nl_msg_cat_cntr + *_nl_domain_bindings ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then gt_cv_func_gnugettext1_libc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 gt_cv_func_gnugettext1_libc=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $gt_cv_func_gnugettext1_libc" >&5 $as_echo "$gt_cv_func_gnugettext1_libc" >&6; } if test "$gt_cv_func_gnugettext1_libc" != "yes"; then am_save_CPPFLAGS="$CPPFLAGS" for element in $INCICONV; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done { $as_echo "$as_me:$LINENO: checking for iconv" >&5 $as_echo_n "checking for iconv... " >&6; } if test "${am_cv_func_iconv+set}" = set; then $as_echo_n "(cached) " >&6 else am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then am_cv_func_iconv=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then am_cv_lib_iconv=yes am_cv_func_iconv=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS="$am_save_LIBS" fi fi { $as_echo "$as_me:$LINENO: result: $am_cv_func_iconv" >&5 $as_echo "$am_cv_func_iconv" >&6; } if test "$am_cv_func_iconv" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_ICONV 1 _ACEOF fi if test "$am_cv_lib_iconv" = yes; then { $as_echo "$as_me:$LINENO: checking how to link with libiconv" >&5 $as_echo_n "checking how to link with libiconv... " >&6; } { $as_echo "$as_me:$LINENO: result: $LIBICONV" >&5 $as_echo "$LIBICONV" >&6; } else CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libintl-prefix was given. if test "${with_libintl_prefix+set}" = set; then withval=$with_libintl_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/lib" fi fi fi LIBINTL= LTLIBINTL= INCINTL= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='intl ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBINTL="${LIBINTL}${LIBINTL:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$value" else : fi else found_dir= found_la= found_so= found_a= if test $use_additional = yes; then if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then found_dir="$additional_libdir" found_so="$additional_libdir/lib$name.$shlibext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi else if test -f "$additional_libdir/lib$name.$libext"; then found_dir="$additional_libdir" found_a="$additional_libdir/lib$name.$libext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then found_dir="$dir" found_so="$dir/lib$name.$shlibext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi else if test -f "$dir/lib$name.$libext"; then found_dir="$dir" found_a="$dir/lib$name.$libext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$hardcode_direct" = yes; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir" fi if test "$hardcode_minus_L" != no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_a" else LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */lib | */lib/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCINTL="${INCINTL}${INCINTL:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$additional_libdir" != "X/usr/lib"; then haveit= if test "X$additional_libdir" = "X/usr/local/lib"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBINTL="${LIBINTL}${LIBINTL:+ }$dep" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$dep" ;; esac done fi else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-R$found_dir" done fi { $as_echo "$as_me:$LINENO: checking for GNU gettext in libintl" >&5 $as_echo_n "checking for GNU gettext in libintl... " >&6; } if test "${gt_cv_func_gnugettext1_libintl+set}" = set; then $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $INCINTL" gt_save_LIBS="$LIBS" LIBS="$LIBS $LIBINTL" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (); int main () { bindtextdomain ("", ""); return (int) gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias (0) ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then gt_cv_func_gnugettext1_libintl=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 gt_cv_func_gnugettext1_libintl=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext if test "$gt_cv_func_gnugettext1_libintl" != yes && test -n "$LIBICONV"; then LIBS="$LIBS $LIBICONV" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (); int main () { bindtextdomain ("", ""); return (int) gettext ("") + _nl_msg_cat_cntr + *_nl_expand_alias (0) ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then LIBINTL="$LIBINTL $LIBICONV" LTLIBINTL="$LTLIBINTL $LTLIBICONV" gt_cv_func_gnugettext1_libintl=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:$LINENO: result: $gt_cv_func_gnugettext1_libintl" >&5 $as_echo "$gt_cv_func_gnugettext1_libintl" >&6; } fi if test "$gt_cv_func_gnugettext1_libc" = "yes" \ || { test "$gt_cv_func_gnugettext1_libintl" = "yes" \ && test "$PACKAGE" != gettext-runtime \ && test "$PACKAGE" != gettext-tools; }; then gt_use_preinstalled_gnugettext=yes else LIBINTL= LTLIBINTL= INCINTL= fi if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then cat >>confdefs.h <<\_ACEOF #define ENABLE_NLS 1 _ACEOF else USE_NLS=no fi fi { $as_echo "$as_me:$LINENO: checking whether to use NLS" >&5 $as_echo_n "checking whether to use NLS... " >&6; } { $as_echo "$as_me:$LINENO: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } if test "$USE_NLS" = "yes"; then { $as_echo "$as_me:$LINENO: checking where the gettext function comes from" >&5 $as_echo_n "checking where the gettext function comes from... " >&6; } if test "$gt_use_preinstalled_gnugettext" = "yes"; then if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then gt_source="external libintl" else gt_source="libc" fi else gt_source="included intl directory" fi { $as_echo "$as_me:$LINENO: result: $gt_source" >&5 $as_echo "$gt_source" >&6; } fi if test "$USE_NLS" = "yes"; then if test "$gt_use_preinstalled_gnugettext" = "yes"; then if test "$gt_cv_func_gnugettext1_libintl" = "yes"; then { $as_echo "$as_me:$LINENO: checking how to link with libintl" >&5 $as_echo_n "checking how to link with libintl... " >&6; } { $as_echo "$as_me:$LINENO: result: $LIBINTL" >&5 $as_echo "$LIBINTL" >&6; } for element in $INCINTL; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done fi cat >>confdefs.h <<\_ACEOF #define HAVE_GETTEXT 1 _ACEOF cat >>confdefs.h <<\_ACEOF #define HAVE_DCGETTEXT 1 _ACEOF fi POSUB=po fi INTLLIBS="$LIBINTL" # Using Autoconf 2.60 or later you will get a warning during configure: # config.status: WARNING: 'po/Makefile.in.in' seems to ignore the --datarootdir setting # This warning can be removed by bumping the gettext version requirement below from 0.12 to at least 0.15 # See more: info Autoconf "Changed Directory Variables" [ -r $srcdir/po/POTFILES.in ] || touch $srcdir/po/POTFILES.in # Portable 64bit file offsets # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then enableval=$enable_largefile; fi if test "$enable_largefile" != no; then { $as_echo "$as_me:$LINENO: checking for special C compiler options needed for large files" >&5 $as_echo_n "checking for special C compiler options needed for large files... " >&6; } if test "${ac_cv_sys_largefile_CC+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC while :; do # IRIX 6.2 and later do not support large files by default, # so use the C compiler's -n32 option if that helps. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then break else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext CC="$CC -n32" rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_sys_largefile_CC=' -n32'; break else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_sys_largefile_CC" >&5 $as_echo "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi { $as_echo "$as_me:$LINENO: checking for _FILE_OFFSET_BITS value needed for large files" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } if test "${ac_cv_sys_file_offset_bits+set}" = set; then $as_echo_n "(cached) " >&6 else while :; do cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_sys_file_offset_bits=no; break else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #define _FILE_OFFSET_BITS 64 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_sys_file_offset_bits=64; break else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi { $as_echo "$as_me:$LINENO: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then { $as_echo "$as_me:$LINENO: checking for _LARGE_FILES value needed for large files" >&5 $as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } if test "${ac_cv_sys_large_files+set}" = set; then $as_echo_n "(cached) " >&6 else while :; do cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_sys_large_files=no; break else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #define _LARGE_FILES 1 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_sys_large_files=1; break else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi { $as_echo "$as_me:$LINENO: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _LARGE_FILES $ac_cv_sys_large_files _ACEOF ;; esac rm -rf conftest* fi fi # pkg-config needed for many checks if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_PKG_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_ac_pt_PKG_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="no" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi if test "x$PKG_CONFIG" = "xno"; then { { $as_echo "$as_me:$LINENO: error: *** pkg-config not found" >&5 $as_echo "$as_me: error: *** pkg-config not found" >&2;} { (exit 1); exit 1; }; } else pkgconfigdir=${libdir}/pkgconfig fi # Default enable/disable switches # Features enables_ldap=yes enables_mysql=no enables_swig_python=yes enables_swig_java=yes # Features directly related to components enables_cppunit=yes enables_java=yes enables_junit=yes enables_python=yes enables_altpython=yes enables_pylint=yes enables_mock_dmc=no enables_gfal=no enables_s3=no enables_xrootd=yes enables_argus=no enables_xmlsec1=yes enables_dbjstore=yes enables_ldns=yes # Libraries and plugins # Currently no fine-grained choice is supported. # Also this variable is used to check if source # build is needed at all because no component can # be built without HED. enables_hed=yes # Services enables_a_rex_service=yes enables_gridftpd_service=yes enables_ldap_service=yes enables_giis_service=yes enables_cache_service=yes enables_datadelivery_service=yes enables_ldap_monitor=yes enables_ws_monitor=yes # Clients enables_compute_client=yes enables_credentials_client=yes enables_echo_client=yes enables_data_client=yes enables_jura_client=yes enables_saml_client=yes enables_wsrf_client=yes enables_unicore_client=no enables_emies_client=yes # Documentation enables_doc=yes # ACIX cache index enables_acix=yes # Handle group enable/disable switches # Check whether --enable-all was given. if test "${enable_all+set}" = set; then enableval=$enable_all; enables_a_rex_service=$enableval enables_gridftpd_service=$enableval enables_ldap_service=$enableval enables_giis_service=$enableval enables_ldap_monitor=$enableval enables_ws_monitor=$enableval enables_cache_service=$enableval enables_datadelivery_service=$enableval enables_compute_client=$enableval enables_credentials_client=$enableval enables_echo_client=$enableval enables_data_client=$enableval enables_jura_client=$enableval enables_saml_client=$enableval enables_wsrf_client=$enableval enables_emies_client=$enableval enables_hed=$enableval enables_java=$enableval enables_junit=$enableval enables_python=$enableval enables_altpython=$enableval enables_pylint=$enableval enables_mock_dmc=$enableval enables_gfal=$enableval enables_s3=$enableval enables_xrootd=$enableval enables_xmlsec1=$enableval enables_argus=$enableval enables_cppunit=$enableval enables_doc=$enableval enables_acix=$enableval enables_dbjstore=$enableval enables_ldns=$enableval fi # Check whether --enable-all-clients was given. if test "${enable_all_clients+set}" = set; then enableval=$enable_all_clients; enables_compute_client=$enableval enables_credentials_client=$enableval enables_echo_client=$enableval enables_data_client=$enableval enables_jura_client=$enableval enables_saml_client=$enableval enables_wsrf_client=$enableval enables_emies_client=$enableval enables_doc=$enableval fi # Check whether --enable-all-data-clients was given. if test "${enable_all_data_clients+set}" = set; then enableval=$enable_all_data_clients; enables_data_client=$enableval fi # Check whether --enable-all-services was given. if test "${enable_all_services+set}" = set; then enableval=$enable_all_services; enables_a_rex_service=$enableval enables_gridftpd_service=$enableval enables_ldap_service=$enableval enables_giis_service=$enableval enables_ldap_monitor=$enableval enables_cache_service=$enableval enables_datadelivery_service=$enableval enables_acix=$enableval fi # Be pedantic about compiler warnings. # Check whether --enable-pedantic-compile was given. if test "${enable_pedantic_compile+set}" = set; then enableval=$enable_pedantic_compile; enables_pedantic_compile="yes" else enables_pedantic_compile="no" fi if test "x$enables_pedantic_compile" = "xyes"; then # This check need to be enhanced. It won't work in case of cross-compilation # and if path to compiler is explicitly specified. if test x"$CXX" = x"g++"; then # GNU C/C++ flags AM_CXXFLAGS="-Wall -Wextra -Werror -Wno-sign-compare -Wno-unused" SAVE_CPPFLAGS=$CPPFLAGS ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu CPPFLAGS="$CPPFLAGS -Wno-unused-result" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then AM_CXXFLAGS="$AM_CXXFLAGS -Wno-unused-result" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: compilation flag -Wno-unused-result is not supported" >&5 $as_echo "$as_me: compilation flag -Wno-unused-result is not supported" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CPPFLAGS=$SAVE_CPPFLAGS else # TODO: set generic flags for generic compiler AM_CXXFLAGS="" fi fi if test "x$enables_pedantic_compile" = "xyes"; then PEDANTIC_COMPILE_TRUE= PEDANTIC_COMPILE_FALSE='#' else PEDANTIC_COMPILE_TRUE='#' PEDANTIC_COMPILE_FALSE= fi # Enable/disable switches for third-party. # Swig # Check whether --enable-swig-python was given. if test "${enable_swig_python+set}" = set; then enableval=$enable_swig_python; enables_swig_python=$enableval fi # Check whether --enable-swig-java was given. if test "${enable_swig_java+set}" = set; then enableval=$enable_swig_java; enables_swig_java=$enableval fi # Check whether --enable-swig was given. if test "${enable_swig+set}" = set; then enableval=$enable_swig; enables_swig_python=$enableval enables_swig_java=$enableval fi if test "$enables_swig_python" = "yes" || test "$enables_swig_java" = "yes"; then for ac_prog in swig do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_SWIG+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$SWIG"; then ac_cv_prog_SWIG="$SWIG" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_SWIG="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi SWIG=$ac_cv_prog_SWIG if test -n "$SWIG"; then { $as_echo "$as_me:$LINENO: result: $SWIG" >&5 $as_echo "$SWIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$SWIG" && break done if test "x$SWIG" = "x"; then enables_swig="no" else swigver=`$SWIG -version 2>&1 | grep Version | sed 's/.* //'` swigver1=`echo $swigver | cut -d. -f1` swigver2=`echo $swigver | cut -d. -f2` swigver3=`echo $swigver | cut -d. -f3` if test $swigver1 -lt 1 || ( test $swigver1 -eq 1 && ( \ test $swigver2 -lt 3 || ( test $swigver2 -eq 3 && ( \ test $swigver3 -lt 25 ) ) ) ) ; then { $as_echo "$as_me:$LINENO: swig is too old (< 1.3.25)" >&5 $as_echo "$as_me: swig is too old (< 1.3.25)" >&6;} SWIG="" enables_swig="no" elif test $swigver1 -eq 1 && test $swigver2 -eq 3 && test $swigver3 -eq 38 ; then { $as_echo "$as_me:$LINENO: swig version 1.3.38 has bug which prevents it from being used for this software. Please upgrade or downgrade." >&5 $as_echo "$as_me: swig version 1.3.38 has bug which prevents it from being used for this software. Please upgrade or downgrade." >&6;} SWIG="" enables_swig="no" else SWIG2="no" if test $swigver1 -ge 2 then SWIG2="yes" fi SWIG_PYTHON_NAMING="SwigPy" # In SWIG version 1.3.37 naming was changed from "PySwig" to "SwigPy". if test $swigver1 -lt 1 || ( test $swigver1 -eq 1 && ( \ test $swigver2 -lt 3 || ( test $swigver2 -eq 3 && ( \ test $swigver3 -lt 37 ) ) ) ) ; then SWIG_PYTHON_NAMING="PySwig" fi fi fi else SWIG="" fi if test "x$enables_swig" = "xyes"; then SWIG_ENABLED_TRUE= SWIG_ENABLED_FALSE='#' else SWIG_ENABLED_TRUE='#' SWIG_ENABLED_FALSE= fi # Check whether --enable-hed was given. if test "${enable_hed+set}" = set; then enableval=$enable_hed; enables_hed=$enableval fi # Java if test "$enables_hed" = "yes"; then JAVAC_FLAGS= JDK_CFLAGS= # Check whether --enable-java was given. if test "${enable_java+set}" = set; then enableval=$enable_java; enables_java=$enableval enables_swig_java=$enableval fi if test "$enables_java" = "yes"; then # Check whether --with-jdk was given. if test "${with_jdk+set}" = set; then withval=$with_jdk; fi JPATH= if test "x$with_jdk" != "x"; then # User specified JDK! JPATH=$with_jdk else # Look for system JDK. for i in /usr/lib/jvm/java-*-openjdk* /usr/lib/jvm/java-*-icedtea* /usr/lib/jvm/java-*-gcj* /usr/lib/jvm/java-*-sun* /usr/lib/gcc/*-redhat-linux/*; do if test -f $i/include/jni.h; then JPATH=$i break fi done fi if test "x$JPATH" != "x"; then JDK_CFLAGS="-I$JPATH/include" # Any extra includes? Look for them. JAVA_EXTRA_INCLUDE= case "${host}" in *-pc-mingw32 | *-pc-cygwin) JAVA_EXTRA_INCLUDE="win32" ;; *linux* | *kfreebsd* | *gnu* ) JAVA_EXTRA_INCLUDE="linux" ;; *solaris*) JAVA_EXTRA_INCLUDE="solaris" ;; esac if test "x$JAVA_EXTRA_INCLUDE" != "x" && test -d $JPATH/include/$JAVA_EXTRA_INCLUDE; then JDK_CFLAGS="$JDK_CFLAGS $JDK_CFLAGS/$JAVA_EXTRA_INCLUDE" fi SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $JDK_CFLAGS" for ac_header in jni.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF jni_h="yes" else jni_h="no" fi done CPPFLAGS=$SAVE_CPPFLAGS for ac_prog in java gij do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_JAVA+set}" = set; then $as_echo_n "(cached) " >&6 else case $JAVA in [\\/]* | ?:[\\/]*) ac_cv_path_JAVA="$JAVA" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$JPATH/bin:/usr/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_JAVA="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi JAVA=$ac_cv_path_JAVA if test -n "$JAVA"; then { $as_echo "$as_me:$LINENO: result: $JAVA" >&5 $as_echo "$JAVA" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVA" && break done for ac_prog in javac gcj ecj do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_JAVAC+set}" = set; then $as_echo_n "(cached) " >&6 else case $JAVAC in [\\/]* | ?:[\\/]*) ac_cv_path_JAVAC="$JAVAC" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$JPATH/bin:/usr/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_JAVAC="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi JAVAC=$ac_cv_path_JAVAC if test -n "$JAVAC"; then { $as_echo "$as_me:$LINENO: result: $JAVAC" >&5 $as_echo "$JAVAC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVAC" && break done for ac_prog in fastjar jar do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_JAR+set}" = set; then $as_echo_n "(cached) " >&6 else case $JAR in [\\/]* | ?:[\\/]*) ac_cv_path_JAR="$JAR" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$JPATH/bin:/usr/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_JAR="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi JAR=$ac_cv_path_JAR if test -n "$JAR"; then { $as_echo "$as_me:$LINENO: result: $JAR" >&5 $as_echo "$JAR" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAR" && break done break fi if test "x$with_jdk" = "x" && test "x$JDK_CFLAGS" = "x"; then # If JDK was not found in any of the above locations try system locations. for ac_header in jni.h JavaVM/jni.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF jni_h="yes"; break else jni_h="no" fi done for ac_prog in java gij do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_JAVA+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$JAVA"; then ac_cv_prog_JAVA="$JAVA" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_JAVA="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAVA=$ac_cv_prog_JAVA if test -n "$JAVA"; then { $as_echo "$as_me:$LINENO: result: $JAVA" >&5 $as_echo "$JAVA" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVA" && break done for ac_prog in javac gcj ecj do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_JAVAC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$JAVAC"; then ac_cv_prog_JAVAC="$JAVAC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_JAVAC="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAVAC=$ac_cv_prog_JAVAC if test -n "$JAVAC"; then { $as_echo "$as_me:$LINENO: result: $JAVAC" >&5 $as_echo "$JAVAC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVAC" && break done for ac_prog in fastjar jar do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_JAR+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$JAR"; then ac_cv_prog_JAR="$JAR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_JAR="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAR=$ac_cv_prog_JAR if test -n "$JAR"; then { $as_echo "$as_me:$LINENO: result: $JAR" >&5 $as_echo "$JAR" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAR" && break done fi # Set absolute limit on Java heap size allocation instead of letting # the java virtual machine and compiler (on Linux) allocate a quarter # of the total memory. JAVA_VM_FLAGS="-Xms50M -Xmx50M" JAVAC_VM_FLAGS="-J-Xms50M -J-Xmx50M" JAR_JFLAGS="" # Old versions of fastjar does not ignore -J options if test "x$JAR" != "x" && test "x`basename $JAR`" = "xjar"; then JAR_JFLAGS="$JAVAC_VM_FLAGS" fi if test "x$JAVAC" != "x" && test "x`basename $JAVAC`" = "xgcj"; then JAVAC_FLAGS="-C -fsource=1.5 -ftarget=1.5 --classpath=." elif test "x$JAVAC" != "x" && test "x`basename $JAVAC`" = "xjavac"; then JAVAC_FLAGS="$JAVAC_VM_FLAGS -source 1.5 -target 1.5 -cp ." fi if test "x$JAVA" != "x" && test "x`basename $JAVA`" = "xjava"; then JAVA_FLAGS="$JAVA_VM_FLAGS" fi # Check if version is 1.5 (Iterable was first introduced in 1.5) echo "public abstract class TestIterable implements Iterable< Object > {}" > TestIterable.java if $JAVAC $JAVAC_FLAGS TestIterable.java >/dev/null 2>&1; then java_is_15_or_above="yes" fi rm -f TestIterable.java TestIterable.class if test "x$JAVAC" = "x"; then { $as_echo "$as_me:$LINENO: Missing Java compiler - skipping Java components" >&5 $as_echo "$as_me: Missing Java compiler - skipping Java components" >&6;} enables_java="no" elif test "x$JAR" = "x"; then { $as_echo "$as_me:$LINENO: Missing Java archiver - skipping Java components" >&5 $as_echo "$as_me: Missing Java archiver - skipping Java components" >&6;} enables_java="no" elif test "x$jni_h" != "xyes"; then { $as_echo "$as_me:$LINENO: Missing Java headers - skipping Java components" >&5 $as_echo "$as_me: Missing Java headers - skipping Java components" >&6;} enables_java="no" else { $as_echo "$as_me:$LINENO: Java available: $JAVAC" >&5 $as_echo "$as_me: Java available: $JAVAC" >&6;} fi if test "x$enables_java" != "xyes"; then { $as_echo "$as_me:$LINENO: Missing Java - skipping Java bindings" >&5 $as_echo "$as_me: Missing Java - skipping Java bindings" >&6;} enables_swig_java="no" elif ! test -f java/arc_wrap.cpp && test "x$enables_swig_java" != "xyes"; then { $as_echo "$as_me:$LINENO: Missing pre-compiled Java wrapper and SWIG - skipping Java bindings" >&5 $as_echo "$as_me: Missing pre-compiled Java wrapper and SWIG - skipping Java bindings" >&6;} enables_swig_java="no" fi fi fi { $as_echo "$as_me:$LINENO: Java enabled: $enables_java" >&5 $as_echo "$as_me: Java enabled: $enables_java" >&6;} { $as_echo "$as_me:$LINENO: Java SWIG binding enabled: $enables_swig_java" >&5 $as_echo "$as_me: Java SWIG binding enabled: $enables_swig_java" >&6;} if test "x$enables_java" = "xyes"; then JAVA_ENABLED_TRUE= JAVA_ENABLED_FALSE='#' else JAVA_ENABLED_TRUE='#' JAVA_ENABLED_FALSE= fi if test "x$enables_swig_java" = "xyes"; then JAVA_SWIG_ENABLED_TRUE= JAVA_SWIG_ENABLED_FALSE='#' else JAVA_SWIG_ENABLED_TRUE='#' JAVA_SWIG_ENABLED_FALSE= fi if test "x$java_is_15_or_above" = "xyes"; then JAVA_IS_15_OR_ABOVE_TRUE= JAVA_IS_15_OR_ABOVE_FALSE='#' else JAVA_IS_15_OR_ABOVE_TRUE='#' JAVA_IS_15_OR_ABOVE_FALSE= fi # Check whether --with-jnidir was given. if test "${with_jnidir+set}" = set; then withval=$with_jnidir; case $withval in yes|no) { { $as_echo "$as_me:$LINENO: error: Invalid DIR" >&5 $as_echo "$as_me: error: Invalid DIR" >&2;} { (exit 1); exit 1; }; } ;; *) jnidir="$withval" ;; esac else if test -z "$jnidir"; then jnidir='${libdir}/java' fi fi # Check whether --with-jninativedir was given. if test "${with_jninativedir+set}" = set; then withval=$with_jninativedir; case $withval in yes|no) { { $as_echo "$as_me:$LINENO: error: Invalid DIR" >&5 $as_echo "$as_me: error: Invalid DIR" >&2;} { (exit 1); exit 1; }; } ;; *) jninativedir="$withval" ;; esac else if test -z "$jninativedir"; then jninativedir='${pkglibdir}' fi fi # Try to find junit - used for unit testing of Java bindings. if test "$enables_hed" = "yes"; then # Check whether --enable-junit was given. if test "${enable_junit+set}" = set; then enableval=$enable_junit; enables_junit=$enableval fi if test "$enables_java" = "yes" && test "$enables_junit" = "yes"; then if ! test -f /usr/share/java/junit.jar; then enables_junit="no" fi else enables_junit="no" fi fi if test "x$enables_junit" = "xyes"; then JUNIT_ENABLED_TRUE= JUNIT_ENABLED_FALSE='#' else JUNIT_ENABLED_TRUE='#' JUNIT_ENABLED_FALSE= fi { $as_echo "$as_me:$LINENO: Java unit testing enabled: $enables_junit" >&5 $as_echo "$as_me: Java unit testing enabled: $enables_junit" >&6;} # Python if test "$enables_hed" = "yes"; then # Check whether --enable-python was given. if test "${enable_python+set}" = set; then enableval=$enable_python; enables_python=$enableval enables_swig_python=$enableval fi if test "$enables_python" = "yes"; then # Check whether --with-python was given. if test "${with_python+set}" = set; then withval=$with_python; fi # We do not look for python binary when cross-compiling # but we need to make the variable non-empty if test "${build}" = "${host}"; then for ac_prog in $with_python python do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_PYTHON+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$PYTHON"; then ac_cv_prog_PYTHON="$PYTHON" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_PYTHON="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi PYTHON=$ac_cv_prog_PYTHON if test -n "$PYTHON"; then { $as_echo "$as_me:$LINENO: result: $PYTHON" >&5 $as_echo "$PYTHON" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PYTHON" && break done else PYTHON=python fi if test "X$PYTHON" != "X"; then PYNAME=`basename $PYTHON` if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_PKG_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_ac_pt_PKG_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi fi if test -n "$PKG_CONFIG"; then _pkg_min_version=0.9.0 { $as_echo "$as_me:$LINENO: checking pkg-config is at least version $_pkg_min_version" >&5 $as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } PKG_CONFIG="" fi fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\"") >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\"") >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(sys.version[:3])'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then { $as_echo "$as_me:$LINENO: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then { $as_echo "$as_me:$LINENO: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then PYTHON_VERSION=`$PYTHON -c 'import sys; print(sys.version[:3])'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then { $as_echo "$as_me:$LINENO: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then { $as_echo "$as_me:$LINENO: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi if test "${build}" = "${host}"; then PYTHON_SOABI=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SOABI'))" | sed s/None//` else PYTHON_SOABI="" fi # Check whether --with-python-site-arch was given. if test "${with_python_site_arch+set}" = set; then withval=$with_python_site_arch; fi if test "X$PYTHON_SITE_ARCH" = "X"; then if test "${build}" = "${host}"; then PYTHON_SITE_ARCH=`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(1,0,"${prefix}"))'` else PYTHON_SITE_ARCH="${libdir}/python${PYTHON_VERSION}/site-packages" fi fi # Check whether --with-python-site-lib was given. if test "${with_python_site_lib+set}" = set; then withval=$with_python_site_lib; fi if test "X$PYTHON_SITE_LIB" = "X"; then if test "${build}" = "${host}"; then PYTHON_SITE_LIB=`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0,0,"${prefix}"))'` else PYTHON_SITE_LIB="${libdir}/python${PYTHON_VERSION}/site-packages" fi fi SAVE_LDFLAGS=$LDFLAGS SAVE_CPPFLAGS=$CPPFLAGS LDFLAGS="$LDFLAGS $PYTHON_LIBS" CPPFLAGS="$CPPFLAGS $PYTHON_CFLAGS" if test "${ac_cv_header_Python_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for Python.h" >&5 $as_echo_n "checking for Python.h... " >&6; } if test "${ac_cv_header_Python_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_Python_h" >&5 $as_echo "$ac_cv_header_Python_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking Python.h usability" >&5 $as_echo_n "checking Python.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking Python.h presence" >&5 $as_echo_n "checking Python.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: Python.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: Python.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: Python.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: Python.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: Python.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: Python.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: Python.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: Python.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: Python.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: Python.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for Python.h" >&5 $as_echo_n "checking for Python.h... " >&6; } if test "${ac_cv_header_Python_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_Python_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_Python_h" >&5 $as_echo "$ac_cv_header_Python_h" >&6; } fi if test "x$ac_cv_header_Python_h" = x""yes; then pythonh="yes" else pythonh="no" fi cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Py_InitializeEx(0) ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then { $as_echo "$as_me:$LINENO: Python includes functionality of skipping initialization registration of signal handlers" >&5 $as_echo "$as_me: Python includes functionality of skipping initialization registration of signal handlers" >&6;} cat >>confdefs.h <<\_ACEOF #define HAVE_PYTHON_INITIALIZE_EX /**/ _ACEOF enables_python_service="yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: Python does not include functionality of skipping initialization registration of signal handlers, since its version is below 2.4" >&5 $as_echo "$as_me: Python does not include functionality of skipping initialization registration of signal handlers, since its version is below 2.4" >&6;} enables_python_service="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext LDFLAGS=$SAVE_LDFLAGS CPPFLAGS=$SAVE_CPPFLAGS fi if test "X$PYTHON" = "X"; then { $as_echo "$as_me:$LINENO: Missing Python - skipping Python components" >&5 $as_echo "$as_me: Missing Python - skipping Python components" >&6;} enables_python=no elif test "X$PYTHON_SITE_ARCH" = "X" || test "X$PYTHON_SITE_LIB" = "X"; then { $as_echo "$as_me:$LINENO: Missing python site packages location - skipping Python components" >&5 $as_echo "$as_me: Missing python site packages location - skipping Python components" >&6;} enables_python=no else { $as_echo "$as_me:$LINENO: Python available: $PYTHON_VERSION" >&5 $as_echo "$as_me: Python available: $PYTHON_VERSION" >&6;} fi if test "x$enables_python" != "xyes"; then { $as_echo "$as_me:$LINENO: Missing Python - skipping Python bindings" >&5 $as_echo "$as_me: Missing Python - skipping Python bindings" >&6;} enables_swig_python=no elif test "X$PYTHON_LIBS" = "X"; then { $as_echo "$as_me:$LINENO: Missing Python library - skipping Python bindings" >&5 $as_echo "$as_me: Missing Python library - skipping Python bindings" >&6;} enables_swig_python=no elif test "X$pythonh" != "Xyes"; then { $as_echo "$as_me:$LINENO: Missing Python header - skipping Python bindings" >&5 $as_echo "$as_me: Missing Python header - skipping Python bindings" >&6;} enables_swig_python=no elif ! test -f python/arc_wrap.cpp && test "x$enables_swig_python" != "xyes"; then { $as_echo "$as_me:$LINENO: Missing pre-compiled Python wrapper and SWIG - skipping Python bindings" >&5 $as_echo "$as_me: Missing pre-compiled Python wrapper and SWIG - skipping Python bindings" >&6;} enables_swig_python=no fi fi fi { $as_echo "$as_me:$LINENO: Python enabled: $enables_python" >&5 $as_echo "$as_me: Python enabled: $enables_python" >&6;} { $as_echo "$as_me:$LINENO: Python SWIG bindings enabled: $enables_swig_python" >&5 $as_echo "$as_me: Python SWIG bindings enabled: $enables_swig_python" >&6;} if test "x$enables_python" = "xyes"; then PYTHON_ENABLED_TRUE= PYTHON_ENABLED_FALSE='#' else PYTHON_ENABLED_TRUE='#' PYTHON_ENABLED_FALSE= fi if test "x$enables_python" = "xyes" && test "x$PYTHON_MAJOR" = "x3"; then PYTHON3_TRUE= PYTHON3_FALSE='#' else PYTHON3_TRUE='#' PYTHON3_FALSE= fi if test "x$enables_swig_python" = "xyes"; then PYTHON_SWIG_ENABLED_TRUE= PYTHON_SWIG_ENABLED_FALSE='#' else PYTHON_SWIG_ENABLED_TRUE='#' PYTHON_SWIG_ENABLED_FALSE= fi if test "x$enables_swig_python" = "xyes" && test "x$enables_python_service" = "xyes"; then PYTHON_SERVICE_TRUE= PYTHON_SERVICE_FALSE='#' else PYTHON_SERVICE_TRUE='#' PYTHON_SERVICE_FALSE= fi # Alternative Python if test "$enables_hed" = "yes"; then # Check whether --enable-altpython was given. if test "${enable_altpython+set}" = set; then enableval=$enable_altpython; enables_altpython=$enableval fi if test "$enables_altpython" = "yes"; then # Check whether --with-altpython was given. if test "${with_altpython+set}" = set; then withval=$with_altpython; fi for ac_prog in $with_altpython do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ALTPYTHON+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ALTPYTHON"; then ac_cv_prog_ALTPYTHON="$ALTPYTHON" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ALTPYTHON="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ALTPYTHON=$ac_cv_prog_ALTPYTHON if test -n "$ALTPYTHON"; then { $as_echo "$as_me:$LINENO: result: $ALTPYTHON" >&5 $as_echo "$ALTPYTHON" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ALTPYTHON" && break done if test "X$ALTPYTHON" != "X"; then ALTPYNAME=`basename $ALTPYTHON` pkg_failed=no { $as_echo "$as_me:$LINENO: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\"") >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\"") >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(sys.version[:3])'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then { $as_echo "$as_me:$LINENO: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then { $as_echo "$as_me:$LINENO: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(sys.version[:3])'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then { $as_echo "$as_me:$LINENO: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then { $as_echo "$as_me:$LINENO: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi ALTPYTHON_SOABI=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SOABI'))" | sed s/None//` # Check whether --with-altpython-site-arch was given. if test "${with_altpython_site_arch+set}" = set; then withval=$with_altpython_site_arch; fi if test "X$ALTPYTHON_SITE_ARCH" = "X"; then ALTPYTHON_SITE_ARCH=`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(1,0,"${prefix}"))'` fi # Check whether --with-altpython-site-lib was given. if test "${with_altpython_site_lib+set}" = set; then withval=$with_altpython_site_lib; fi if test "X$ALTPYTHON_SITE_LIB" = "X"; then ALTPYTHON_SITE_LIB=`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0,0,"${prefix}"))'` fi SAVE_LDFLAGS=$LDFLAGS SAVE_CPPFLAGS=$CPPFLAGS LDFLAGS="$LDFLAGS $ALTPYTHON_LIBS" CPPFLAGS="$CPPFLAGS $ALTPYTHON_CFLAGS" if test "${ac_cv_header_Python_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for Python.h" >&5 $as_echo_n "checking for Python.h... " >&6; } if test "${ac_cv_header_Python_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_Python_h" >&5 $as_echo "$ac_cv_header_Python_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking Python.h usability" >&5 $as_echo_n "checking Python.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking Python.h presence" >&5 $as_echo_n "checking Python.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: Python.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: Python.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: Python.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: Python.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: Python.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: Python.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: Python.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: Python.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: Python.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: Python.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: Python.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for Python.h" >&5 $as_echo_n "checking for Python.h... " >&6; } if test "${ac_cv_header_Python_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_Python_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_Python_h" >&5 $as_echo "$ac_cv_header_Python_h" >&6; } fi if test "x$ac_cv_header_Python_h" = x""yes; then altpythonh="yes" else altpythonh="no" fi LDFLAGS=$SAVE_LDFLAGS CPPFLAGS=$SAVE_CPPFLAGS fi if test "X$ALTPYTHON" = "X"; then { $as_echo "$as_me:$LINENO: Missing alternative Python - skipping alternative Python" >&5 $as_echo "$as_me: Missing alternative Python - skipping alternative Python" >&6;} enables_altpython=no elif test "X$ALTPYTHON_LIBS" = "X"; then { $as_echo "$as_me:$LINENO: Missing alternative Python library - skipping alternative Python bindings" >&5 $as_echo "$as_me: Missing alternative Python library - skipping alternative Python bindings" >&6;} enables_altpython=no elif test "X$altpythonh" != "Xyes"; then { $as_echo "$as_me:$LINENO: Missing alternative Python header - skipping alternative Python bindings" >&5 $as_echo "$as_me: Missing alternative Python header - skipping alternative Python bindings" >&6;} enables_altpython=no elif test "X$ALTPYTHON_SITE_ARCH" = "X" || test "X$ALTPYTHON_SITE_LIB" = "X"; then { $as_echo "$as_me:$LINENO: Missing python site packages location - skipping Python bindings" >&5 $as_echo "$as_me: Missing python site packages location - skipping Python bindings" >&6;} enables_altpython=no else { $as_echo "$as_me:$LINENO: Alternative Python available: $ALTPYTHON_VERSION" >&5 $as_echo "$as_me: Alternative Python available: $ALTPYTHON_VERSION" >&6;} fi if test "x$enables_altpython" != "xyes"; then { $as_echo "$as_me:$LINENO: Missing alternative Python - skipping alternative Python bindings" >&5 $as_echo "$as_me: Missing alternative Python - skipping alternative Python bindings" >&6;} enables_altpython=no elif ! test -f python/arc_wrap.cpp && test "x$enables_swig_python" != "xyes"; then { $as_echo "$as_me:$LINENO: Missing pre-compiled Python wrapper and SWIG - skipping alternative Python bindings" >&5 $as_echo "$as_me: Missing pre-compiled Python wrapper and SWIG - skipping alternative Python bindings" >&6;} enables_altpython=no fi fi fi { $as_echo "$as_me:$LINENO: Alternative Python enabled: $enables_altpython" >&5 $as_echo "$as_me: Alternative Python enabled: $enables_altpython" >&6;} if test "x$enables_altpython" = "xyes"; then ALTPYTHON_ENABLED_TRUE= ALTPYTHON_ENABLED_FALSE='#' else ALTPYTHON_ENABLED_TRUE='#' ALTPYTHON_ENABLED_FALSE= fi if test "x$enables_altpython" = "xyes" && test "x$ALTPYTHON_MAJOR" = "x3"; then ALTPYTHON3_TRUE= ALTPYTHON3_FALSE='#' else ALTPYTHON3_TRUE='#' ALTPYTHON3_FALSE= fi # check for pylint if test "$enables_hed" = "yes"; then # Check whether --enable-pylint was given. if test "${enable_pylint+set}" = set; then enableval=$enable_pylint; enables_pylint=$enableval fi for ac_prog in pylint do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_PYLINT+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$PYLINT"; then ac_cv_prog_PYLINT="$PYLINT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_PYLINT="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi PYLINT=$ac_cv_prog_PYLINT if test -n "$PYLINT"; then { $as_echo "$as_me:$LINENO: result: $PYLINT" >&5 $as_echo "$PYLINT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PYLINT" && break done if test "x$PYLINT" = "x"; then enables_pylint="no" else PYLINT_VERSION=`$PYLINT --version 2> /dev/null | sed -n 's/^pylint \([0-9.]*\).*/\1/p'` # Check if pylint supports the following arguments, otherwise disable pylint (python example checking). # Do not generate report # Disable convention and recommendation messages - we are only interested in fatals, errors and warnings. PYLINT_ARGS="--reports=no --disable=C,R" if $PYLINT $PYLINT_ARGS /dev/null > /dev/null 2>&1 ; then { $as_echo "$as_me:$LINENO: pylint version $PYLINT_VERSION found - version ok" >&5 $as_echo "$as_me: pylint version $PYLINT_VERSION found - version ok" >&6;} enables_pylint="yes" else { $as_echo "$as_me:$LINENO: pylint version $PYLINT_VERSION found - bad version" >&5 $as_echo "$as_me: pylint version $PYLINT_VERSION found - bad version" >&6;} enables_pylint="no" PYLINT_ARGS="" fi fi # Check if the --disable=W0221 option is supported # W0221: Disable arguments differ messages since Swig uses tuple syntax (*args). if test "$enables_pylint" = "yes"; then PYLINT_ARGS_ARGUMENTS_DIFFER="--disable=W0221" if ! $PYLINT $PYLINT_ARGS $PYLINT_ARGS_ARGUMENTS_DIFFER /dev/null > /dev/null 2>&1 ; then PYLINT_ARGS_ARGUMENTS_DIFFER="" fi fi fi if test "x$enables_pylint" = "xyes"; then PYLINT_ENABLED_TRUE= PYLINT_ENABLED_FALSE='#' else PYLINT_ENABLED_TRUE='#' PYLINT_ENABLED_FALSE= fi { $as_echo "$as_me:$LINENO: Python example checking with pylint enabled: $enables_pylint" >&5 $as_echo "$as_me: Python example checking with pylint enabled: $enables_pylint" >&6;} # check gthread if test "$enables_hed" = "yes"; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GTHREAD" >&5 $as_echo_n "checking for GTHREAD... " >&6; } if test -n "$GTHREAD_CFLAGS"; then pkg_cv_GTHREAD_CFLAGS="$GTHREAD_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"gthread-2.0 >= 2.4.7\"") >&5 ($PKG_CONFIG --exists --print-errors "gthread-2.0 >= 2.4.7") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GTHREAD_CFLAGS=`$PKG_CONFIG --cflags "gthread-2.0 >= 2.4.7" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GTHREAD_LIBS"; then pkg_cv_GTHREAD_LIBS="$GTHREAD_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"gthread-2.0 >= 2.4.7\"") >&5 ($PKG_CONFIG --exists --print-errors "gthread-2.0 >= 2.4.7") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GTHREAD_LIBS=`$PKG_CONFIG --libs "gthread-2.0 >= 2.4.7" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GTHREAD_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "gthread-2.0 >= 2.4.7" 2>&1` else GTHREAD_PKG_ERRORS=`$PKG_CONFIG --print-errors "gthread-2.0 >= 2.4.7" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GTHREAD_PKG_ERRORS" >&5 { { $as_echo "$as_me:$LINENO: error: Package requirements (gthread-2.0 >= 2.4.7) were not met: $GTHREAD_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables GTHREAD_CFLAGS and GTHREAD_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&5 $as_echo "$as_me: error: Package requirements (gthread-2.0 >= 2.4.7) were not met: $GTHREAD_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables GTHREAD_CFLAGS and GTHREAD_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&2;} { (exit 1); exit 1; }; } elif test $pkg_failed = untried; then { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables GTHREAD_CFLAGS and GTHREAD_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&5 $as_echo "$as_me: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables GTHREAD_CFLAGS and GTHREAD_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } else GTHREAD_CFLAGS=$pkg_cv_GTHREAD_CFLAGS GTHREAD_LIBS=$pkg_cv_GTHREAD_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi fi # check glibmm # check for giomm which became a part of glibmm as of version 2.16 if test "$enables_hed" = "yes"; then "$PKG_CONFIG" giomm-2.4 if test "$?" = '1'; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLIBMM" >&5 $as_echo_n "checking for GLIBMM... " >&6; } if test -n "$GLIBMM_CFLAGS"; then pkg_cv_GLIBMM_CFLAGS="$GLIBMM_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"glibmm-2.4 >= 2.4.7\"") >&5 ($PKG_CONFIG --exists --print-errors "glibmm-2.4 >= 2.4.7") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLIBMM_CFLAGS=`$PKG_CONFIG --cflags "glibmm-2.4 >= 2.4.7" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLIBMM_LIBS"; then pkg_cv_GLIBMM_LIBS="$GLIBMM_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"glibmm-2.4 >= 2.4.7\"") >&5 ($PKG_CONFIG --exists --print-errors "glibmm-2.4 >= 2.4.7") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLIBMM_LIBS=`$PKG_CONFIG --libs "glibmm-2.4 >= 2.4.7" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLIBMM_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "glibmm-2.4 >= 2.4.7" 2>&1` else GLIBMM_PKG_ERRORS=`$PKG_CONFIG --print-errors "glibmm-2.4 >= 2.4.7" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLIBMM_PKG_ERRORS" >&5 { { $as_echo "$as_me:$LINENO: error: Package requirements (glibmm-2.4 >= 2.4.7) were not met: $GLIBMM_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&5 $as_echo "$as_me: error: Package requirements (glibmm-2.4 >= 2.4.7) were not met: $GLIBMM_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&2;} { (exit 1); exit 1; }; } elif test $pkg_failed = untried; then { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&5 $as_echo "$as_me: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } else GLIBMM_CFLAGS=$pkg_cv_GLIBMM_CFLAGS GLIBMM_LIBS=$pkg_cv_GLIBMM_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi else pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLIBMM" >&5 $as_echo_n "checking for GLIBMM... " >&6; } if test -n "$GLIBMM_CFLAGS"; then pkg_cv_GLIBMM_CFLAGS="$GLIBMM_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"giomm-2.4\"") >&5 ($PKG_CONFIG --exists --print-errors "giomm-2.4") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLIBMM_CFLAGS=`$PKG_CONFIG --cflags "giomm-2.4" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLIBMM_LIBS"; then pkg_cv_GLIBMM_LIBS="$GLIBMM_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"giomm-2.4\"") >&5 ($PKG_CONFIG --exists --print-errors "giomm-2.4") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLIBMM_LIBS=`$PKG_CONFIG --libs "giomm-2.4" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLIBMM_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "giomm-2.4" 2>&1` else GLIBMM_PKG_ERRORS=`$PKG_CONFIG --print-errors "giomm-2.4" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLIBMM_PKG_ERRORS" >&5 { { $as_echo "$as_me:$LINENO: error: Package requirements (giomm-2.4) were not met: $GLIBMM_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&5 $as_echo "$as_me: error: Package requirements (giomm-2.4) were not met: $GLIBMM_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&2;} { (exit 1); exit 1; }; } elif test $pkg_failed = untried; then { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&5 $as_echo "$as_me: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } else GLIBMM_CFLAGS=$pkg_cv_GLIBMM_CFLAGS GLIBMM_LIBS=$pkg_cv_GLIBMM_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi cat >>confdefs.h <<\_ACEOF #define HAVE_GIOMM /**/ _ACEOF fi SAVE_CPPFLAGS=$CPPFLAGS ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu CPPFLAGS="$CPPFLAGS $GLIBMM_CFLAGS" if test "${ac_cv_header_glibmm_optioncontext_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for glibmm/optioncontext.h" >&5 $as_echo_n "checking for glibmm/optioncontext.h... " >&6; } if test "${ac_cv_header_glibmm_optioncontext_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_glibmm_optioncontext_h" >&5 $as_echo "$ac_cv_header_glibmm_optioncontext_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking glibmm/optioncontext.h usability" >&5 $as_echo_n "checking glibmm/optioncontext.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking glibmm/optioncontext.h presence" >&5 $as_echo_n "checking glibmm/optioncontext.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: glibmm/optioncontext.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: glibmm/optioncontext.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: glibmm/optioncontext.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: glibmm/optioncontext.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: glibmm/optioncontext.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: glibmm/optioncontext.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: glibmm/optioncontext.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: glibmm/optioncontext.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: glibmm/optioncontext.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: glibmm/optioncontext.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: glibmm/optioncontext.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: glibmm/optioncontext.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: glibmm/optioncontext.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: glibmm/optioncontext.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: glibmm/optioncontext.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: glibmm/optioncontext.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for glibmm/optioncontext.h" >&5 $as_echo_n "checking for glibmm/optioncontext.h... " >&6; } if test "${ac_cv_header_glibmm_optioncontext_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_glibmm_optioncontext_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_glibmm_optioncontext_h" >&5 $as_echo "$ac_cv_header_glibmm_optioncontext_h" >&6; } fi if test "x$ac_cv_header_glibmm_optioncontext_h" = x""yes; then cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Glib::OptionContext ctx; ctx.set_summary("summary") ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY /**/ _ACEOF { $as_echo "$as_me:$LINENO: using glibmm command line parsing" >&5 $as_echo "$as_me: using glibmm command line parsing" >&6;} else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: using getopt_long command line parsing" >&5 $as_echo "$as_me: using getopt_long command line parsing" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Glib::OptionContext ctx; ctx.get_help(); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLIBMM_OPTIONCONTEXT_GET_HELP /**/ _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Glib::SignalChildWatch watch = Glib::signal_child_watch(); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then glibmm_childwatch=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 glibmm_childwatch=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_childwatch" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLIBMM_CHILDWATCH /**/ _ACEOF else { $as_echo "$as_me:$LINENO: WARNING: glibmm has no API for controlling children processes - result of external processes may be inconsistent" >&5 $as_echo "$as_me: WARNING: glibmm has no API for controlling children processes - result of external processes may be inconsistent" >&6;} fi cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Glib::ModuleFlags flags = Glib::MODULE_BIND_LOCAL; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then glibmm_bind_local=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 glibmm_bind_local=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_bind_local" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLIBMM_BIND_LOCAL /**/ _ACEOF else { $as_echo "$as_me:$LINENO: WARNING: glibmm has no way to limit scope of symbols of shared libraries. Make sure external libraries used by plugins have no conflicting symbols. HINT: use Globus compiled against system OpenSSL library." >&5 $as_echo "$as_me: WARNING: glibmm has no way to limit scope of symbols of shared libraries. Make sure external libraries used by plugins have no conflicting symbols. HINT: use Globus compiled against system OpenSSL library." >&6;} fi cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Glib::getenv(""); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then glibmm_getenv=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 glibmm_getenv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_getenv" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLIBMM_GETENV /**/ _ACEOF else { $as_echo "$as_me:$LINENO: WARNING: glibmm has no support for getenv. Usage of libc getenv is unsafe in multi-threaded applications." >&5 $as_echo "$as_me: WARNING: glibmm has no support for getenv. Usage of libc getenv is unsafe in multi-threaded applications." >&6;} fi cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Glib::setenv("", ""); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then glibmm_setenv=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 glibmm_setenv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_setenv" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLIBMM_SETENV /**/ _ACEOF else { $as_echo "$as_me:$LINENO: WARNING: glibmm has no support for setenv. Usage of libc setenv may be unsafe in multi-threaded applications." >&5 $as_echo "$as_me: WARNING: glibmm has no support for setenv. Usage of libc setenv may be unsafe in multi-threaded applications." >&6;} fi cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Glib::unsetenv(""); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then glibmm_unsetenv=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 glibmm_unsetenv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_unsetenv" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLIBMM_UNSETENV /**/ _ACEOF else { $as_echo "$as_me:$LINENO: WARNING: glibmm has no support for unsetenv. Usage of libc unsetenv may be unsafe in multi-threaded applications." >&5 $as_echo "$as_me: WARNING: glibmm has no support for unsetenv. Usage of libc unsetenv may be unsafe in multi-threaded applications." >&6;} fi cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { Glib::listenv(); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then glibmm_listenv=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 glibmm_listenv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_listenv" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLIBMM_LISTENV /**/ _ACEOF else { $as_echo "$as_me:$LINENO: WARNING: glibmm has no support for listenv. Usage of libc environ is unsafe in multi-threaded applications." >&5 $as_echo "$as_me: WARNING: glibmm has no support for listenv. Usage of libc environ is unsafe in multi-threaded applications." >&6;} fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CPPFLAGS=$SAVE_CPPFLAGS fi # check libxml if test "$enables_hed" = "yes"; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for LIBXML2" >&5 $as_echo_n "checking for LIBXML2... " >&6; } if test -n "$LIBXML2_CFLAGS"; then pkg_cv_LIBXML2_CFLAGS="$LIBXML2_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"libxml-2.0 >= 2.4.0\"") >&5 ($PKG_CONFIG --exists --print-errors "libxml-2.0 >= 2.4.0") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_LIBXML2_CFLAGS=`$PKG_CONFIG --cflags "libxml-2.0 >= 2.4.0" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LIBXML2_LIBS"; then pkg_cv_LIBXML2_LIBS="$LIBXML2_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"libxml-2.0 >= 2.4.0\"") >&5 ($PKG_CONFIG --exists --print-errors "libxml-2.0 >= 2.4.0") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_LIBXML2_LIBS=`$PKG_CONFIG --libs "libxml-2.0 >= 2.4.0" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LIBXML2_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "libxml-2.0 >= 2.4.0" 2>&1` else LIBXML2_PKG_ERRORS=`$PKG_CONFIG --print-errors "libxml-2.0 >= 2.4.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LIBXML2_PKG_ERRORS" >&5 { { $as_echo "$as_me:$LINENO: error: Package requirements (libxml-2.0 >= 2.4.0) were not met: $LIBXML2_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables LIBXML2_CFLAGS and LIBXML2_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&5 $as_echo "$as_me: error: Package requirements (libxml-2.0 >= 2.4.0) were not met: $LIBXML2_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables LIBXML2_CFLAGS and LIBXML2_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&2;} { (exit 1); exit 1; }; } elif test $pkg_failed = untried; then { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables LIBXML2_CFLAGS and LIBXML2_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&5 $as_echo "$as_me: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables LIBXML2_CFLAGS and LIBXML2_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } else LIBXML2_CFLAGS=$pkg_cv_LIBXML2_CFLAGS LIBXML2_LIBS=$pkg_cv_LIBXML2_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi fi # check openssl if test "$enables_hed" = "yes"; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for OPENSSL" >&5 $as_echo_n "checking for OPENSSL... " >&6; } if test -n "$OPENSSL_CFLAGS"; then pkg_cv_OPENSSL_CFLAGS="$OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"openssl >= 1.0.0\"") >&5 ($PKG_CONFIG --exists --print-errors "openssl >= 1.0.0") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "openssl >= 1.0.0" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$OPENSSL_LIBS"; then pkg_cv_OPENSSL_LIBS="$OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"openssl >= 1.0.0\"") >&5 ($PKG_CONFIG --exists --print-errors "openssl >= 1.0.0") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_OPENSSL_LIBS=`$PKG_CONFIG --libs "openssl >= 1.0.0" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "openssl >= 1.0.0" 2>&1` else OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors "openssl >= 1.0.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$OPENSSL_PKG_ERRORS" >&5 { { $as_echo "$as_me:$LINENO: error: Package requirements (openssl >= 1.0.0) were not met: $OPENSSL_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables OPENSSL_CFLAGS and OPENSSL_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&5 $as_echo "$as_me: error: Package requirements (openssl >= 1.0.0) were not met: $OPENSSL_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables OPENSSL_CFLAGS and OPENSSL_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&2;} { (exit 1); exit 1; }; } elif test $pkg_failed = untried; then { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables OPENSSL_CFLAGS and OPENSSL_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&5 $as_echo "$as_me: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables OPENSSL_CFLAGS and OPENSSL_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } else OPENSSL_CFLAGS=$pkg_cv_OPENSSL_CFLAGS OPENSSL_LIBS=$pkg_cv_OPENSSL_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for OPENSSL_1_1" >&5 $as_echo_n "checking for OPENSSL_1_1... " >&6; } if test -n "$OPENSSL_1_1_CFLAGS"; then pkg_cv_OPENSSL_1_1_CFLAGS="$OPENSSL_1_1_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"openssl >= 1.1.0\"") >&5 ($PKG_CONFIG --exists --print-errors "openssl >= 1.1.0") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_OPENSSL_1_1_CFLAGS=`$PKG_CONFIG --cflags "openssl >= 1.1.0" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$OPENSSL_1_1_LIBS"; then pkg_cv_OPENSSL_1_1_LIBS="$OPENSSL_1_1_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"openssl >= 1.1.0\"") >&5 ($PKG_CONFIG --exists --print-errors "openssl >= 1.1.0") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_OPENSSL_1_1_LIBS=`$PKG_CONFIG --libs "openssl >= 1.1.0" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then OPENSSL_1_1_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "openssl >= 1.1.0" 2>&1` else OPENSSL_1_1_PKG_ERRORS=`$PKG_CONFIG --print-errors "openssl >= 1.1.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$OPENSSL_1_1_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: OpenSSL is pre-1.1" >&5 $as_echo "$as_me: OpenSSL is pre-1.1" >&6;} elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: OpenSSL is pre-1.1" >&5 $as_echo "$as_me: OpenSSL is pre-1.1" >&6;} else OPENSSL_1_1_CFLAGS=$pkg_cv_OPENSSL_1_1_CFLAGS OPENSSL_1_1_LIBS=$pkg_cv_OPENSSL_1_1_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } OPENSSL_CFLAGS="$OPENSSL_CFLAGS -DOPENSSL_API_COMPAT=0x10100000L" { $as_echo "$as_me:$LINENO: Forcing off deprecated functions for OpenSSL >= 1.1" >&5 $as_echo "$as_me: Forcing off deprecated functions for OpenSSL >= 1.1" >&6;} fi fi # Check for available *_method functions in OpenSSL SAVE_CPPFLAGS=$CPPFLAGS SAVE_LIBS=$LIBS CPPFLAGS="$CPPFLAGS $OPENSSL_CFLAGS" LIBS="$LIBS $OPENSSL_LIBS" ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include void _test(void) { (void)SSLv3_method(); } int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_SSLV3_METHOD 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: No SSLv3_method function avialable" >&5 $as_echo "$as_me: No SSLv3_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include void _test(void) { (void)TLSv1_method(); } int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_TLSV1_METHOD 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: No TLSv1_method function avialable" >&5 $as_echo "$as_me: No TLSv1_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include void _test(void) { (void)TLSv1_1_method(); } int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_TLSV1_1_METHOD 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: No TLSv1_1_method function avialable" >&5 $as_echo "$as_me: No TLSv1_1_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include void _test(void) { (void)TLSv1_2_method(); } int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_TLSV1_2_METHOD 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: No TLSv1_2_method function avialable" >&5 $as_echo "$as_me: No TLSv1_2_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include void _test(void) { (void)TLS_method(); } int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_TLS_METHOD 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: No TLS_method function avialable" >&5 $as_echo "$as_me: No TLS_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include void _test(void) { (void)DTLSv1_method(); } int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_DTLSV1_METHOD 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: No DTLSv1_method function avialable" >&5 $as_echo "$as_me: No DTLSv1_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include void _test(void) { (void)DTLSv1_2_method(); } int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_DTLSV1_2_METHOD 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: No DTLSv1_2_method function avialable" >&5 $as_echo "$as_me: No DTLSv1_2_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include void _test(void) { (void)DTLS_method(); } int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_DTLS_METHOD 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: No DTLS_method function avialable" >&5 $as_echo "$as_me: No DTLS_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CPPFLAGS=$SAVE_CPPFLAGS LIBS=$SAVE_LIBS #check mozilla nss enables_nss=yes NSS_INSTALLED=no # Check whether --enable-nss was given. if test "${enable_nss+set}" = set; then enableval=$enable_nss; enables_nss="$enableval" fi if test "$enables_nss" = "yes"; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for NSS" >&5 $as_echo_n "checking for NSS... " >&6; } if test -n "$NSS_CFLAGS"; then pkg_cv_NSS_CFLAGS="$NSS_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"nss >= 3.10\"") >&5 ($PKG_CONFIG --exists --print-errors "nss >= 3.10") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_NSS_CFLAGS=`$PKG_CONFIG --cflags "nss >= 3.10" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$NSS_LIBS"; then pkg_cv_NSS_LIBS="$NSS_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"nss >= 3.10\"") >&5 ($PKG_CONFIG --exists --print-errors "nss >= 3.10") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_NSS_LIBS=`$PKG_CONFIG --libs "nss >= 3.10" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then NSS_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "nss >= 3.10" 2>&1` else NSS_PKG_ERRORS=`$PKG_CONFIG --print-errors "nss >= 3.10" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$NSS_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: WARNING: Cannot locate nss lib" >&5 $as_echo "$as_me: WARNING: Cannot locate nss lib" >&2;} NSS_INSTALLED=no enables_nss=no elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: WARNING: Cannot locate nss lib" >&5 $as_echo "$as_me: WARNING: Cannot locate nss lib" >&2;} NSS_INSTALLED=no enables_nss=no else NSS_CFLAGS=$pkg_cv_NSS_CFLAGS NSS_LIBS=$pkg_cv_NSS_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } NSS_INSTALLED=yes fi if test "x$NSS_INSTALLED" = "xyes" ; then cat >>confdefs.h <<\_ACEOF #define HAVE_NSS /**/ _ACEOF fi fi if test x$NSS_INSTALLED = xyes; then NSS_ENABLED_TRUE= NSS_ENABLED_FALSE='#' else NSS_ENABLED_TRUE='#' NSS_ENABLED_FALSE= fi #check SQLite enables_sqlite=no SQLITE_INSTALLED=no # Check whether --enable-sqlite was given. if test "${enable_sqlite+set}" = set; then enableval=$enable_sqlite; enables_sqlite="$enableval" fi if test "$enables_sqlite" = "yes"; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for SQLITE" >&5 $as_echo_n "checking for SQLITE... " >&6; } if test -n "$SQLITE_CFLAGS"; then pkg_cv_SQLITE_CFLAGS="$SQLITE_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"sqlite3 >= 3.6\"") >&5 ($PKG_CONFIG --exists --print-errors "sqlite3 >= 3.6") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_SQLITE_CFLAGS=`$PKG_CONFIG --cflags "sqlite3 >= 3.6" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$SQLITE_LIBS"; then pkg_cv_SQLITE_LIBS="$SQLITE_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"sqlite3 >= 3.6\"") >&5 ($PKG_CONFIG --exists --print-errors "sqlite3 >= 3.6") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_SQLITE_LIBS=`$PKG_CONFIG --libs "sqlite3 >= 3.6" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then SQLITE_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "sqlite3 >= 3.6" 2>&1` else SQLITE_PKG_ERRORS=`$PKG_CONFIG --print-errors "sqlite3 >= 3.6" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$SQLITE_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: WARNING: Cannot locate SQLite newer than 3.6" >&5 $as_echo "$as_me: WARNING: Cannot locate SQLite newer than 3.6" >&2;} SQLITE_INSTALLED=no enables_sqlite=no elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: WARNING: Cannot locate SQLite newer than 3.6" >&5 $as_echo "$as_me: WARNING: Cannot locate SQLite newer than 3.6" >&2;} SQLITE_INSTALLED=no enables_sqlite=no else SQLITE_CFLAGS=$pkg_cv_SQLITE_CFLAGS SQLITE_LIBS=$pkg_cv_SQLITE_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } SQLITE_INSTALLED=yes fi if test "x$SQLITE_INSTALLED" = "xyes" ; then cat >>confdefs.h <<\_ACEOF #define HAVE_SQLITE /**/ _ACEOF # Check for finction available since 3.8 SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $SQLITE_CFLAGS" LIBS="$LIBS $SQLITE_LIBS" for ac_func in sqlite3_errstr do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 $as_echo_n "checking for $ac_func... " >&6; } if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$ac_func || defined __stub___$ac_func choke me #endif int main () { return $ac_func (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_var=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS fi fi if test x$SQLITE_INSTALLED = xyes; then SQLITE_ENABLED_TRUE= SQLITE_ENABLED_FALSE='#' else SQLITE_ENABLED_TRUE='#' SQLITE_ENABLED_FALSE= fi #check emi common authentiation library enables_canlxx=no # Check whether --enable-canlxx was given. if test "${enable_canlxx+set}" = set; then enableval=$enable_canlxx; enables_canlxx="$enableval" fi if test "x$enables_canlxx" = "xyes"; then # Check whether --with-canlxx was given. if test "${with_canlxx+set}" = set; then withval=$with_canlxx; if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$withval/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$withval/lib/pkgconfig" fi fi { $as_echo "$as_me:$LINENO: PKG_CONFIG_PATH for CANL++ is: $PKG_CONFIG_PATH" >&5 $as_echo "$as_me: PKG_CONFIG_PATH for CANL++ is: $PKG_CONFIG_PATH" >&6;} pkg_failed=no { $as_echo "$as_me:$LINENO: checking for CANLXX" >&5 $as_echo_n "checking for CANLXX... " >&6; } if test -n "$CANLXX_CFLAGS"; then pkg_cv_CANLXX_CFLAGS="$CANLXX_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"canl-c++\"") >&5 ($PKG_CONFIG --exists --print-errors "canl-c++") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_CANLXX_CFLAGS=`$PKG_CONFIG --cflags "canl-c++" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$CANLXX_LIBS"; then pkg_cv_CANLXX_LIBS="$CANLXX_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"canl-c++\"") >&5 ($PKG_CONFIG --exists --print-errors "canl-c++") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_CANLXX_LIBS=`$PKG_CONFIG --libs "canl-c++" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then CANLXX_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "canl-c++" 2>&1` else CANLXX_PKG_ERRORS=`$PKG_CONFIG --print-errors "canl-c++" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$CANLXX_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: Failed to find EMI common authentication libraries" >&5 $as_echo "$as_me: Failed to find EMI common authentication libraries" >&6;} enables_canlxx=no elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: Failed to find EMI common authentication libraries" >&5 $as_echo "$as_me: Failed to find EMI common authentication libraries" >&6;} enables_canlxx=no else CANLXX_CFLAGS=$pkg_cv_CANLXX_CFLAGS CANLXX_LIBS=$pkg_cv_CANLXX_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi fi if test "x$enables_canlxx" = "xyes" ; then cat >>confdefs.h <<\_ACEOF #define HAVE_CANLXX /**/ _ACEOF fi if test "x$enables_canlxx" = "xyes"; then CANLXX_ENABLED_TRUE= CANLXX_ENABLED_FALSE='#' else CANLXX_ENABLED_TRUE='#' CANLXX_ENABLED_FALSE= fi # check cppunit if test "$enables_hed" = "yes"; then # Check whether --enable-cppunit was given. if test "${enable_cppunit+set}" = set; then enableval=$enable_cppunit; enables_cppunit=$enableval fi if test "$enables_cppunit" = "yes"; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for CPPUNIT" >&5 $as_echo_n "checking for CPPUNIT... " >&6; } if test -n "$CPPUNIT_CFLAGS"; then pkg_cv_CPPUNIT_CFLAGS="$CPPUNIT_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"cppunit\"") >&5 ($PKG_CONFIG --exists --print-errors "cppunit") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_CPPUNIT_CFLAGS=`$PKG_CONFIG --cflags "cppunit" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$CPPUNIT_LIBS"; then pkg_cv_CPPUNIT_LIBS="$CPPUNIT_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"cppunit\"") >&5 ($PKG_CONFIG --exists --print-errors "cppunit") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_CPPUNIT_LIBS=`$PKG_CONFIG --libs "cppunit" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then CPPUNIT_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "cppunit" 2>&1` else CPPUNIT_PKG_ERRORS=`$PKG_CONFIG --print-errors "cppunit" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$CPPUNIT_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } # Extract the first word of "cppunit-config", so it can be a program name with args. set dummy cppunit-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_CPPUNIT_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $CPPUNIT_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_CPPUNIT_CONFIG="$CPPUNIT_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_CPPUNIT_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_CPPUNIT_CONFIG" && ac_cv_path_CPPUNIT_CONFIG="no" ;; esac fi CPPUNIT_CONFIG=$ac_cv_path_CPPUNIT_CONFIG if test -n "$CPPUNIT_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $CPPUNIT_CONFIG" >&5 $as_echo "$CPPUNIT_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$CPPUNIT_CONFIG" = "xno"; then { $as_echo "$as_me:$LINENO: WARNING: cppunit-config not found - no UNIT testing will be performed" >&5 $as_echo "$as_me: WARNING: cppunit-config not found - no UNIT testing will be performed" >&2;} CPPUNIT_CFLAGS= CPPUNIT_LIBS= enables_cppunit="no" else CPPUNIT_CFLAGS="`$CPPUNIT_CONFIG --cflags`" CPPUNIT_LIBS="`$CPPUNIT_CONFIG --libs`" fi elif test $pkg_failed = untried; then # Extract the first word of "cppunit-config", so it can be a program name with args. set dummy cppunit-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_CPPUNIT_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $CPPUNIT_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_CPPUNIT_CONFIG="$CPPUNIT_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_CPPUNIT_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_CPPUNIT_CONFIG" && ac_cv_path_CPPUNIT_CONFIG="no" ;; esac fi CPPUNIT_CONFIG=$ac_cv_path_CPPUNIT_CONFIG if test -n "$CPPUNIT_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $CPPUNIT_CONFIG" >&5 $as_echo "$CPPUNIT_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$CPPUNIT_CONFIG" = "xno"; then { $as_echo "$as_me:$LINENO: WARNING: cppunit-config not found - no UNIT testing will be performed" >&5 $as_echo "$as_me: WARNING: cppunit-config not found - no UNIT testing will be performed" >&2;} CPPUNIT_CFLAGS= CPPUNIT_LIBS= enables_cppunit="no" else CPPUNIT_CFLAGS="`$CPPUNIT_CONFIG --cflags`" CPPUNIT_LIBS="`$CPPUNIT_CONFIG --libs`" fi else CPPUNIT_CFLAGS=$pkg_cv_CPPUNIT_CFLAGS CPPUNIT_LIBS=$pkg_cv_CPPUNIT_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi if test "x$CPPUNIT_CONFIG" != "xno" || test "x$CPPUNIT_PKG_ERRORS" != "x" then TEST_DIR=test else enables_cppunit=no TEST_DIR= fi fi else enables_cppunit="no" fi # check ldns library if test "$enables_compute_client" = "yes"; then # Check whether --enable-ldns was given. if test "${enable_ldns+set}" = set; then enableval=$enable_ldns; enables_ldns=$enableval fi if test "$enables_ldns" = "yes"; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for LDNS" >&5 $as_echo_n "checking for LDNS... " >&6; } if test -n "$LDNS_CFLAGS"; then pkg_cv_LDNS_CFLAGS="$LDNS_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"ldns\"") >&5 ($PKG_CONFIG --exists --print-errors "ldns") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_LDNS_CFLAGS=`$PKG_CONFIG --cflags "ldns" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LDNS_LIBS"; then pkg_cv_LDNS_LIBS="$LDNS_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"ldns\"") >&5 ($PKG_CONFIG --exists --print-errors "ldns") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_LDNS_LIBS=`$PKG_CONFIG --libs "ldns" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LDNS_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "ldns" 2>&1` else LDNS_PKG_ERRORS=`$PKG_CONFIG --print-errors "ldns" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LDNS_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } # Extract the first word of "ldns-config", so it can be a program name with args. set dummy ldns-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_LDNS_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $LDNS_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_LDNS_CONFIG="$LDNS_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_LDNS_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_LDNS_CONFIG" && ac_cv_path_LDNS_CONFIG="no" ;; esac fi LDNS_CONFIG=$ac_cv_path_LDNS_CONFIG if test -n "$LDNS_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $LDNS_CONFIG" >&5 $as_echo "$LDNS_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$LDNS_CONFIG" = "xno"; then if test "${ac_cv_header_ldns_ldns_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for ldns/ldns.h" >&5 $as_echo_n "checking for ldns/ldns.h... " >&6; } if test "${ac_cv_header_ldns_ldns_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_ldns_ldns_h" >&5 $as_echo "$ac_cv_header_ldns_ldns_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking ldns/ldns.h usability" >&5 $as_echo_n "checking ldns/ldns.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking ldns/ldns.h presence" >&5 $as_echo_n "checking ldns/ldns.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for ldns/ldns.h" >&5 $as_echo_n "checking for ldns/ldns.h... " >&6; } if test "${ac_cv_header_ldns_ldns_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_ldns_ldns_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_ldns_ldns_h" >&5 $as_echo "$ac_cv_header_ldns_ldns_h" >&6; } fi if test "x$ac_cv_header_ldns_ldns_h" = x""yes; then { $as_echo "$as_me:$LINENO: checking for ldns_dname_new_frm_str in -lldns" >&5 $as_echo_n "checking for ldns_dname_new_frm_str in -lldns... " >&6; } if test "${ac_cv_lib_ldns_ldns_dname_new_frm_str+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldns $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldns_dname_new_frm_str (); int main () { return ldns_dname_new_frm_str (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_ldns_ldns_dname_new_frm_str=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_ldns_ldns_dname_new_frm_str=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_ldns_ldns_dname_new_frm_str" >&5 $as_echo "$ac_cv_lib_ldns_ldns_dname_new_frm_str" >&6; } if test "x$ac_cv_lib_ldns_ldns_dname_new_frm_str" = x""yes; then LDNS_CFLAGS="$LDNS_CFLAGS" LDNS_LIBS="$LDNS_LIBS -lldns" else enables_ldns="no" fi else enables_ldns="no" fi else LDNS_CFLAGS="`$LDNS_CONFIG --cflags`" LDNS_LIBS="`$LDNS_CONFIG --libs`" fi elif test $pkg_failed = untried; then # Extract the first word of "ldns-config", so it can be a program name with args. set dummy ldns-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_LDNS_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $LDNS_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_LDNS_CONFIG="$LDNS_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_LDNS_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_LDNS_CONFIG" && ac_cv_path_LDNS_CONFIG="no" ;; esac fi LDNS_CONFIG=$ac_cv_path_LDNS_CONFIG if test -n "$LDNS_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $LDNS_CONFIG" >&5 $as_echo "$LDNS_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$LDNS_CONFIG" = "xno"; then if test "${ac_cv_header_ldns_ldns_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for ldns/ldns.h" >&5 $as_echo_n "checking for ldns/ldns.h... " >&6; } if test "${ac_cv_header_ldns_ldns_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_ldns_ldns_h" >&5 $as_echo "$ac_cv_header_ldns_ldns_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking ldns/ldns.h usability" >&5 $as_echo_n "checking ldns/ldns.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking ldns/ldns.h presence" >&5 $as_echo_n "checking ldns/ldns.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldns/ldns.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: ldns/ldns.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for ldns/ldns.h" >&5 $as_echo_n "checking for ldns/ldns.h... " >&6; } if test "${ac_cv_header_ldns_ldns_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_ldns_ldns_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_ldns_ldns_h" >&5 $as_echo "$ac_cv_header_ldns_ldns_h" >&6; } fi if test "x$ac_cv_header_ldns_ldns_h" = x""yes; then { $as_echo "$as_me:$LINENO: checking for ldns_dname_new_frm_str in -lldns" >&5 $as_echo_n "checking for ldns_dname_new_frm_str in -lldns... " >&6; } if test "${ac_cv_lib_ldns_ldns_dname_new_frm_str+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldns $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldns_dname_new_frm_str (); int main () { return ldns_dname_new_frm_str (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_ldns_ldns_dname_new_frm_str=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_ldns_ldns_dname_new_frm_str=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_ldns_ldns_dname_new_frm_str" >&5 $as_echo "$ac_cv_lib_ldns_ldns_dname_new_frm_str" >&6; } if test "x$ac_cv_lib_ldns_ldns_dname_new_frm_str" = x""yes; then LDNS_CFLAGS="$LDNS_CFLAGS" LDNS_LIBS="$LDNS_LIBS -lldns" else enables_ldns="no" fi else enables_ldns="no" fi else LDNS_CFLAGS="`$LDNS_CONFIG --cflags`" LDNS_LIBS="`$LDNS_CONFIG --libs`" fi else LDNS_CFLAGS=$pkg_cv_LDNS_CFLAGS LDNS_LIBS=$pkg_cv_LDNS_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi if test "$enables_ldns" = "no"; then { $as_echo "$as_me:$LINENO: WARNING: ldns library was not found. Compute clients will be built without ARCHERY support." >&5 $as_echo "$as_me: WARNING: ldns library was not found. Compute clients will be built without ARCHERY support." >&2;} fi fi else enables_ldns="no" fi if test "x$enables_ldns" = "xyes" ; then cat >>confdefs.h <<\_ACEOF #define HAVE_LDNS /**/ _ACEOF else LDNS_CFLAGS= LDNS_LIBS= fi if test "x$enables_ldns" = "xyes"; then LDNS_ENABLED_TRUE= LDNS_ENABLED_FALSE='#' else LDNS_ENABLED_TRUE='#' LDNS_ENABLED_FALSE= fi ############################## # # Check xmlsec1 # ############################# MACOSX="" case "${host}" in *darwin*) MACOSX="yes" ;; esac if test "x$MACOSX" = "xyes"; then cat >>confdefs.h <<\_ACEOF #define _MACOSX /**/ _ACEOF fi if test "x$MACOSX" = "xyes"; then MACOSX_TRUE= MACOSX_FALSE='#' else MACOSX_TRUE='#' MACOSX_FALSE= fi if test "$enables_hed" = "yes"; then XMLSEC_MIN_VERSION="1.2.4" XMLSEC_OPENSSL_MIN_VERSION="1.2.4" XMLSEC_CONFIG="${XMLSEC1_CONFIG:-xmlsec1-config}" XMLSEC_CFLAGS="" XMLSEC_LIBS="" XMLSEC_INSTALLED=no # Check whether --enable-xmlsec1 was given. if test "${enable_xmlsec1+set}" = set; then enableval=$enable_xmlsec1; enables_xmlsec1=$enableval fi if test "x$enables_xmlsec1" = "xyes"; then # Check whether --with-xmlsec1 was given. if test "${with_xmlsec1+set}" = set; then withval=$with_xmlsec1; fi if test "x$with_xmlsec1" = "x" ; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for XMLSEC" >&5 $as_echo_n "checking for XMLSEC... " >&6; } if test -n "$XMLSEC_CFLAGS"; then pkg_cv_XMLSEC_CFLAGS="$XMLSEC_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1 >= \$XMLSEC_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_LIBS"; then pkg_cv_XMLSEC_LIBS="$XMLSEC_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1 >= \$XMLSEC_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_LIBS=`$PKG_CONFIG --libs "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>&1` else XMLSEC_PKG_ERRORS=`$PKG_CONFIG --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then XMLSEC_INSTALLED=no else XMLSEC_CFLAGS=$pkg_cv_XMLSEC_CFLAGS XMLSEC_LIBS=$pkg_cv_XMLSEC_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi if test "x$XMLSEC_INSTALLED" = "xyes" ; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for XMLSEC_OPENSSL" >&5 $as_echo_n "checking for XMLSEC_OPENSSL... " >&6; } if test -n "$XMLSEC_OPENSSL_CFLAGS"; then pkg_cv_XMLSEC_OPENSSL_CFLAGS="$XMLSEC_OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_OPENSSL_LIBS"; then pkg_cv_XMLSEC_OPENSSL_LIBS="$XMLSEC_OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_OPENSSL_LIBS=`$PKG_CONFIG --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` else XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_OPENSSL_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then XMLSEC_INSTALLED=no else XMLSEC_OPENSSL_CFLAGS=$pkg_cv_XMLSEC_OPENSSL_CFLAGS XMLSEC_OPENSSL_LIBS=$pkg_cv_XMLSEC_OPENSSL_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi fi # Find number of backslashes in XMLSEC_CFLAGS n=$(echo $XMLSEC_CFLAGS|sed 's/.*-DXMLSEC_CRYPTO=\([^ ]*\).*/\1/'|tr -d '[A-Za-z0-1\n"]'| wc -c) # Fixes due to bugs in pkg-config and/or xmlsec1 # # 0: Indicates a bug in pkg-config which removes the escaping of the quotes # 2: Correct value with escaped quotes # 6: Old xmlsec1 version which used 3 back-slashes to escape quotes # See eg. https://bugzilla.redhat.com/show_bug.cgi?id=675334 # Make sure that the quotes are escaped with single backslash if test $n = 0 -o $n = 6; then { $as_echo "$as_me:$LINENO: Working around bad combination of pkgconfig and xmlsec1 with $n back-slashes" >&5 $as_echo "$as_me: Working around bad combination of pkgconfig and xmlsec1 with $n back-slashes" >&6;} XMLSEC_CFLAGS=$(echo $XMLSEC_CFLAGS|sed 's/\(.*-DXMLSEC_CRYPTO=\)\\*"\([^ \\"]*\)\\*" \(.*\)/\1\\"\2\\" \3/') XMLSEC_OPENSSL_CFLAGS=$(echo $XMLSEC_OPENSSL_CFLAGS|sed 's/\(.*-DXMLSEC_CRYPTO=\)\\*"\([^ \\"]*\)\\*" \(.*\)/\1\\"\2\\" \3/') fi fi if test "x$XMLSEC_INSTALLED" = "xno" -a "x$MACOSX" != "xyes"; then { $as_echo "$as_me:$LINENO: checking for xmlsec1 libraries >= $XMLSEC_MIN_VERSION" >&5 $as_echo_n "checking for xmlsec1 libraries >= $XMLSEC_MIN_VERSION... " >&6; } if test "x$with_xmlsec1" != "x" ; then XMLSEC_CONFIG=$with_xmlsec1/bin/$XMLSEC_CONFIG fi "$XMLSEC_CONFIG" --version 2>/dev/null 1>/dev/null if test "$?" != '0' ; then { $as_echo "$as_me:$LINENO: WARNING: Could not find xmlsec1 anywhere; The xml security related functionality will not be compiled" >&5 $as_echo "$as_me: WARNING: Could not find xmlsec1 anywhere; The xml security related functionality will not be compiled" >&2;} else vers=`$XMLSEC_CONFIG --version 2>/dev/null | awk -F. '{ printf "%d", ($1 * 1000 + $2) * 1000 + $3;}'` minvers=`echo $XMLSEC_MIN_VERSION | awk -F. '{ printf "%d", ($1 * 1000 + $2) * 1000 + $3;}'` if test "$vers" -ge "$minvers" ; then XMLSEC_LIBS="`$XMLSEC_CONFIG --libs`" XMLSEC_CFLAGS="`$XMLSEC_CONFIG --cflags`" #check the xmlsec1-openssl here if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig" fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for XMLSEC_OPENSSL" >&5 $as_echo_n "checking for XMLSEC_OPENSSL... " >&6; } if test -n "$XMLSEC_OPENSSL_CFLAGS"; then pkg_cv_XMLSEC_OPENSSL_CFLAGS="$XMLSEC_OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_OPENSSL_LIBS"; then pkg_cv_XMLSEC_OPENSSL_LIBS="$XMLSEC_OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_OPENSSL_LIBS=`$PKG_CONFIG --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` else XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_OPENSSL_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then XMLSEC_INSTALLED=no else XMLSEC_OPENSSL_CFLAGS=$pkg_cv_XMLSEC_OPENSSL_CFLAGS XMLSEC_OPENSSL_LIBS=$pkg_cv_XMLSEC_OPENSSL_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi else { $as_echo "$as_me:$LINENO: WARNING: You need at least xmlsec1 $XMLSEC_MIN_VERSION for this version of arc1" >&5 $as_echo "$as_me: WARNING: You need at least xmlsec1 $XMLSEC_MIN_VERSION for this version of arc1" >&2;} fi fi elif test "x$XMLSEC_INSTALLED" = "xno" -a "x$MACOSX" = "xyes"; then #MACOSX has no "ldd" which is needed by xmlsec1-config, so here simply we use PKG_CHECK_MODULES if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig" fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for XMLSEC" >&5 $as_echo_n "checking for XMLSEC... " >&6; } if test -n "$XMLSEC_CFLAGS"; then pkg_cv_XMLSEC_CFLAGS="$XMLSEC_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1 >= \$XMLSEC_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_LIBS"; then pkg_cv_XMLSEC_LIBS="$XMLSEC_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1 >= \$XMLSEC_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_LIBS=`$PKG_CONFIG --libs "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>&1` else XMLSEC_PKG_ERRORS=`$PKG_CONFIG --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then XMLSEC_INSTALLED=no else XMLSEC_CFLAGS=$pkg_cv_XMLSEC_CFLAGS XMLSEC_LIBS=$pkg_cv_XMLSEC_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi if test "x$XMLSEC_INSTALLED" = "xyes" ; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for XMLSEC_OPENSSL" >&5 $as_echo_n "checking for XMLSEC_OPENSSL... " >&6; } if test -n "$XMLSEC_OPENSSL_CFLAGS"; then pkg_cv_XMLSEC_OPENSSL_CFLAGS="$XMLSEC_OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_OPENSSL_LIBS"; then pkg_cv_XMLSEC_OPENSSL_LIBS="$XMLSEC_OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\"") >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_XMLSEC_OPENSSL_LIBS=`$PKG_CONFIG --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` else XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_OPENSSL_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then XMLSEC_INSTALLED=no else XMLSEC_OPENSSL_CFLAGS=$pkg_cv_XMLSEC_OPENSSL_CFLAGS XMLSEC_OPENSSL_LIBS=$pkg_cv_XMLSEC_OPENSSL_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi fi fi #AC_SUBST(XMLSEC_CONFIG) #AC_SUBST(XMLSEC_MIN_VERSION) enables_xmlsec1="$XMLSEC_INSTALLED" fi else enables_xmlsec1="no" fi ######################### # # Check libmysqlclient # ######################### MYSQL_INSTALLED=no if test "$enables_hed" = "yes"; then MYSQL_CONFIG="mysql_config" MYSQL_CFLAGS="" MYSQL_LIBS="" # Check whether --enable-mysql was given. if test "${enable_mysql+set}" = set; then enableval=$enable_mysql; enables_mysql="$enableval" fi # Ask user for path to libmysqlclient if test "x$enables_mysql" = "xyes"; then # Check whether --with-mysql was given. if test "${with_mysql+set}" = set; then withval=$with_mysql; fi { $as_echo "$as_me:$LINENO: checking for mysql client library" >&5 $as_echo_n "checking for mysql client library... " >&6; } if test "x$with_mysql" != "x" ; then MYSQL_CONFIG=$with_mysql/bin/$MYSQL_CONFIG fi if ! $MYSQL_CONFIG --version > /dev/null 2>&1 ; then { { $as_echo "$as_me:$LINENO: error: Could not find mysql C library anywhere (see config.log for details)." >&5 $as_echo "$as_me: error: Could not find mysql C library anywhere (see config.log for details)." >&2;} { (exit 1); exit 1; }; } fi MYSQL_LIBS="`$MYSQL_CONFIG --libs`" MYSQL_CFLAGS="`$MYSQL_CONFIG --cflags`" MYSQL_INSTALLED="yes" enables_mysql=$MYSQL_INSTALLED fi { $as_echo "$as_me:$LINENO: MySQL client library enabled: $MYSQL_INSTALLED" >&5 $as_echo "$as_me: MySQL client library enabled: $MYSQL_INSTALLED" >&6;} fi if test "x$MYSQL_INSTALLED" = "xyes"; then MYSQL_LIBRARY_ENABLED_TRUE= MYSQL_LIBRARY_ENABLED_FALSE='#' else MYSQL_LIBRARY_ENABLED_TRUE='#' MYSQL_LIBRARY_ENABLED_FALSE= fi # Check ldap-monitor # Check whether --enable-ldap_monitor was given. if test "${enable_ldap_monitor+set}" = set; then enableval=$enable_ldap_monitor; enables_ldap_monitor="$enableval" fi if test "x$enables_ldap_monitor" = "xyes"; then # Check whether --with-ldap_monitor was given. if test "${with_ldap_monitor+set}" = set; then withval=$with_ldap_monitor; fi { $as_echo "$as_me:$LINENO: checking for ldap-monitor installation path" >&5 $as_echo_n "checking for ldap-monitor installation path... " >&6; } if test "x$with_ldap_monitor" != "x" ; then ldap_monitor_prefix=$with_ldap_monitor else ldap_monitor_prefix=${datadir}/arc/ldap-monitor fi { $as_echo "$as_me:$LINENO: result: $ldap_monitor_prefix" >&5 $as_echo "$ldap_monitor_prefix" >&6; } fi # Check ws-monitor # Check whether --enable-ws_monitor was given. if test "${enable_ws_monitor+set}" = set; then enableval=$enable_ws_monitor; enables_ws_monitor="$enableval" fi if test "x$enables_ws_monitor" = "xyes"; then # Check whether --with-ws_monitor was given. if test "${with_ws_monitor+set}" = set; then withval=$with_ws_monitor; fi { $as_echo "$as_me:$LINENO: checking for ws-monitor installation path" >&5 $as_echo_n "checking for ws-monitor installation path... " >&6; } if test "x$with_ws_monitor" != "x" ; then ws_monitor_prefix=$with_ws_monitor else ws_monitor_prefix=${datadir}/arc/ws-monitor fi { $as_echo "$as_me:$LINENO: result: $ws_monitor_prefix" >&5 $as_echo "$ws_monitor_prefix" >&6; } fi # check zlib ZLIB_CFLAGS= ZLIB_LDFLAGS= ZLIB_LIBS= if test "$enables_hed" = "yes"; then SAVE_CPPFLAGS=$CPPFLAGS SAVE_LDFLAGS=$LDFLAGS # Check whether --with-zlib was given. if test "${with_zlib+set}" = set; then withval=$with_zlib; if test -d "$withval"; then ZLIB_CFLAGS="${CPPFLAGS} -I$withval/include" ZLIB_LDFLAGS="${LDFLAGS} -L$withval/lib" fi fi CPPFLAGS="$CPPFLAGS $ZLIB_CFLAGS" LDFLAGS="$LDFLAGS $ZLIB_LDFLAGS" if test "${ac_cv_header_zlib_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for zlib.h" >&5 $as_echo_n "checking for zlib.h... " >&6; } if test "${ac_cv_header_zlib_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_zlib_h" >&5 $as_echo "$ac_cv_header_zlib_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking zlib.h usability" >&5 $as_echo_n "checking zlib.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking zlib.h presence" >&5 $as_echo_n "checking zlib.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: zlib.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: zlib.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: zlib.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: zlib.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: zlib.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: zlib.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: zlib.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: zlib.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: zlib.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: zlib.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: zlib.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: zlib.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: zlib.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: zlib.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: zlib.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: zlib.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for zlib.h" >&5 $as_echo_n "checking for zlib.h... " >&6; } if test "${ac_cv_header_zlib_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_zlib_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_zlib_h" >&5 $as_echo "$ac_cv_header_zlib_h" >&6; } fi if test "x$ac_cv_header_zlib_h" = x""yes; then ZLIB_CFLAGS="$ZLIB_CFLAGS" else { { $as_echo "$as_me:$LINENO: error: unable to find zlib header files" >&5 $as_echo "$as_me: error: unable to find zlib header files" >&2;} { (exit 1); exit 1; }; } fi { $as_echo "$as_me:$LINENO: checking for deflateInit2_ in -lz" >&5 $as_echo_n "checking for deflateInit2_ in -lz... " >&6; } if test "${ac_cv_lib_z_deflateInit2_+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lz $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char deflateInit2_ (); int main () { return deflateInit2_ (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_z_deflateInit2_=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_z_deflateInit2_=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_z_deflateInit2_" >&5 $as_echo "$ac_cv_lib_z_deflateInit2_" >&6; } if test "x$ac_cv_lib_z_deflateInit2_" = x""yes; then ZLIB_LIBS="$ZLIB_LDFLAGS -lz" else { { $as_echo "$as_me:$LINENO: error: unable to link with zlib library" >&5 $as_echo "$as_me: error: unable to link with zlib library" >&2;} { (exit 1); exit 1; }; } fi CPPFLAGS=$SAVE_CPPFLAGS LDFLAGS=$SAVE_LDFLAGS fi # check ARGUS ARGUS_CFLAGS= ARGUS_LIBS= # Check whether --enable-argus was given. if test "${enable_argus+set}" = set; then enableval=$enable_argus; enables_argus="$enableval" fi if test "x$enables_argus" = "xyes"; then # Check whether --with-argus was given. if test "${with_argus+set}" = set; then withval=$with_argus; if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$withval/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$withval/lib/pkgconfig" fi fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for ARGUS" >&5 $as_echo_n "checking for ARGUS... " >&6; } if test -n "$ARGUS_CFLAGS"; then pkg_cv_ARGUS_CFLAGS="$ARGUS_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"libargus-pep >= 2.0.0\"") >&5 ($PKG_CONFIG --exists --print-errors "libargus-pep >= 2.0.0") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_ARGUS_CFLAGS=`$PKG_CONFIG --cflags "libargus-pep >= 2.0.0" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ARGUS_LIBS"; then pkg_cv_ARGUS_LIBS="$ARGUS_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"libargus-pep >= 2.0.0\"") >&5 ($PKG_CONFIG --exists --print-errors "libargus-pep >= 2.0.0") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_ARGUS_LIBS=`$PKG_CONFIG --libs "libargus-pep >= 2.0.0" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ARGUS_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "libargus-pep >= 2.0.0" 2>&1` else ARGUS_PKG_ERRORS=`$PKG_CONFIG --print-errors "libargus-pep >= 2.0.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ARGUS_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: Failed to find Argus PEP libraries with version >= 2" >&5 $as_echo "$as_me: Failed to find Argus PEP libraries with version >= 2" >&6;} enables_argus=no elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: Failed to find Argus PEP libraries with version >= 2" >&5 $as_echo "$as_me: Failed to find Argus PEP libraries with version >= 2" >&6;} enables_argus=no else ARGUS_CFLAGS=$pkg_cv_ARGUS_CFLAGS ARGUS_LIBS=$pkg_cv_ARGUS_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi fi if test "x$enables_argus" = "xyes"; then ARGUS_ENABLED_TRUE= ARGUS_ENABLED_FALSE='#' else ARGUS_ENABLED_TRUE='#' ARGUS_ENABLED_FALSE= fi ############################################### # # Check for Berkeley DB C++ # ############################################### DBCXX_LIBS="" DBCXX_CPPFLAGS= if test "$enables_hed" = "yes"; then # # Allow the user to specify db_cxx.h location (we will still check though) # dbcxx_include_paths= # Check whether --with-dbcxx-include was given. if test "${with_dbcxx_include+set}" = set; then withval=$with_dbcxx_include; if test "x$withval" = "xyes" ; then { { $as_echo "$as_me:$LINENO: error: --with-dbcxx-include requires PATH argument" >&5 $as_echo "$as_me: error: --with-dbcxx-include requires PATH argument" >&2;} { (exit 1); exit 1; }; } fi if test "x$withval" != "xno" ; then dbcxx_include_paths=$withval fi fi # # Allow the user to specify DB4 library location (we will still check though) # db4_library_path= # Check whether --with-db4-library-path was given. if test "${with_db4_library_path+set}" = set; then withval=$with_db4_library_path; if test "x$withval" = "xyes" ; then { { $as_echo "$as_me:$LINENO: error: --with-db4-library-path requires PATH argument" >&5 $as_echo "$as_me: error: --with-db4-library-path requires PATH argument" >&2;} { (exit 1); exit 1; }; } fi if test "x$withval" != "xno" ; then db4_library_path=$withval fi fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu # # If user did not specify location we start by searching at the standard locations # if test "x$dbcxx_include_paths" = "x" then { $as_echo "$as_me:$LINENO: Looking for db_cxx.h in standard locations" >&5 $as_echo "$as_me: Looking for db_cxx.h in standard locations" >&6;} for ac_header in db_cxx.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF HAVE_DBCXX=yes else HAVE_DBCXX=no fi done # If the user did not provide a location we have some good suggestions dbcxx_include_paths="/usr/include/db4 /usr/include/db44 /usr/include/db43" else HAVE_DBCXX=no fi # # Now Look for db_cxx.h in non-standard locations # if test "$HAVE_DBCXX" = no then for dbcxx_dir in $dbcxx_include_paths do SAVE_CPPFLAGS=$CPPFLAGS DBCXX_CPPFLAGS=-I$dbcxx_dir CPPFLAGS="$CPPFLAGS $DBCXX_CPPFLAGS" # Disable Autoconf caching unset ac_cv_header_db_cxx_h { $as_echo "$as_me:$LINENO: Looking for db_cxx.h in $dbcxx_dir" >&5 $as_echo "$as_me: Looking for db_cxx.h in $dbcxx_dir" >&6;} for ac_header in db_cxx.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF HAVE_DBCXX=yes else HAVE_DBCXX=no fi done CPPFLAGS=$SAVE_CPPFLAGS # If a db_cxx.h was found we break and keep the current value of DBCXX_CPPFLAGS if test "$HAVE_DBCXX" = yes then break fi DBCXX_CPPFLAGS= done fi if test "x$db4_library_path" != "x" then db4_library_path="-L$db4_library_path" fi if test "$HAVE_DBCXX" = no then DBCXX_LIBS="" else SAVE_LDFLAGS=$LDFLAGS SAVE_CXXFLAGS=$CXXFLAGS case $host in *-*-mingw*) CXXFLAGS="-I$dbcxx_dir $CXXFLAGS" ;; *) # pthread needed for RH9 LDFLAGS="$LDFLAGS -lpthread" ;; esac LDFLAGS="$LDFLAGS $db4_library_path" for db_ver in "" -4.7 -4.3 -4.2 do as_ac_Lib=`$as_echo "ac_cv_lib_db_cxx$db_ver''_main" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for main in -ldb_cxx$db_ver" >&5 $as_echo_n "checking for main in -ldb_cxx$db_ver... " >&6; } if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldb_cxx$db_ver $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { return main (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_Lib=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Lib=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi ac_res=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Lib'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then DBCXX_LIBS="$db4_library_path -ldb_cxx$db_ver" else DBCXX_LIBS="" fi if test "$DBCXX_LIBS" = "" then { $as_echo "$as_me:$LINENO: WARNING: BerkeleyDB library libdb_cxx$db_ver was not found!" >&5 $as_echo "$as_me: WARNING: BerkeleyDB library libdb_cxx$db_ver was not found!" >&2;} else break fi done if test "$DBCXX_LIBS" = "" then { $as_echo "$as_me:$LINENO: WARNING: No BerkeleyDB library found!" >&5 $as_echo "$as_me: WARNING: No BerkeleyDB library found!" >&2;} fi LDFLAGS=$SAVE_LDFLAGS CXXFLAGS=$SAVE_CXXFLAGS fi if test ! "x$DBCXX_LIBS" = "x" then # Mingw need -I$dbcxx_dir cat >>confdefs.h <<\_ACEOF #define HAVE_DBCXX /**/ _ACEOF SAVE_CXXFLAGS=$CXXFLAGS CXXFLAGS="-I$dbcxx_dir $CXXFLAGS" { $as_echo "$as_me:$LINENO: checking whether the Berkeley DB has DbDeadlockException" >&5 $as_echo_n "checking whether the Berkeley DB has DbDeadlockException... " >&6; } if test "${ac_cv_dbcxx_dbdeadlockexception+set}" = set; then $as_echo_n "(cached) " >&6 else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { try { } catch(DbDeadlockException&) { }; return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_dbcxx_have_dbdeadlockexception=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_dbcxx_have_dbdeadlockexception=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $ac_cv_dbcxx_dbdeadlockexception" >&5 $as_echo "$ac_cv_dbcxx_dbdeadlockexception" >&6; } if test "$ac_cv_dbcxx_have_dbdeadlockexception" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_DBDEADLOCKEXCEPTION /**/ _ACEOF fi CXXFLAGS=$SAVE_CXXFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi # DBJSTORE (storing jobs information in BDB) # Check whether --enable-dbjstore was given. if test "${enable_dbjstore+set}" = set; then enableval=$enable_dbjstore; enables_dbjstore=$enableval fi if test "$enables_dbjstore" = "yes"; then if test "x$DBCXX_LIBS" = "x" ; then { $as_echo "$as_me:$LINENO: For storing jobs in BDB C++ API is needed (dbcxx) - disabling" >&5 $as_echo "$as_me: For storing jobs in BDB C++ API is needed (dbcxx) - disabling" >&6;} enables_dbjstore="no" fi fi { $as_echo "$as_me:$LINENO: Storing jobs in BDB enabled: $enables_dbjstore" >&5 $as_echo "$as_me: Storing jobs in BDB enabled: $enables_dbjstore" >&6;} if test "x$enables_dbjstore" = "xyes"; then DBJSTORE_ENABLED_TRUE= DBJSTORE_ENABLED_FALSE='#' else DBJSTORE_ENABLED_TRUE='#' DBJSTORE_ENABLED_FALSE= fi if test "x$enables_dbjstore" = "xyes"; then cat >>confdefs.h <<\_ACEOF #define DBJSTORE_ENABLED /**/ _ACEOF fi # globus/gpt packages if test "$enables_hed" = "yes"; then if test "x$ac_cv_env_GLOBUS_MAKEFILE_HEADER_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}globus-makefile-header", so it can be a program name with args. set dummy ${ac_tool_prefix}globus-makefile-header; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_GLOBUS_MAKEFILE_HEADER+set}" = set; then $as_echo_n "(cached) " >&6 else case $GLOBUS_MAKEFILE_HEADER in [\\/]* | ?:[\\/]*) ac_cv_path_GLOBUS_MAKEFILE_HEADER="$GLOBUS_MAKEFILE_HEADER" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/opt/globus/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_GLOBUS_MAKEFILE_HEADER="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi GLOBUS_MAKEFILE_HEADER=$ac_cv_path_GLOBUS_MAKEFILE_HEADER if test -n "$GLOBUS_MAKEFILE_HEADER"; then { $as_echo "$as_me:$LINENO: result: $GLOBUS_MAKEFILE_HEADER" >&5 $as_echo "$GLOBUS_MAKEFILE_HEADER" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_GLOBUS_MAKEFILE_HEADER"; then ac_pt_GLOBUS_MAKEFILE_HEADER=$GLOBUS_MAKEFILE_HEADER # Extract the first word of "globus-makefile-header", so it can be a program name with args. set dummy globus-makefile-header; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_ac_pt_GLOBUS_MAKEFILE_HEADER+set}" = set; then $as_echo_n "(cached) " >&6 else case $ac_pt_GLOBUS_MAKEFILE_HEADER in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_GLOBUS_MAKEFILE_HEADER="$ac_pt_GLOBUS_MAKEFILE_HEADER" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/opt/globus/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_ac_pt_GLOBUS_MAKEFILE_HEADER="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_GLOBUS_MAKEFILE_HEADER=$ac_cv_path_ac_pt_GLOBUS_MAKEFILE_HEADER if test -n "$ac_pt_GLOBUS_MAKEFILE_HEADER"; then { $as_echo "$as_me:$LINENO: result: $ac_pt_GLOBUS_MAKEFILE_HEADER" >&5 $as_echo "$ac_pt_GLOBUS_MAKEFILE_HEADER" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_GLOBUS_MAKEFILE_HEADER" = x; then GLOBUS_MAKEFILE_HEADER="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac GLOBUS_MAKEFILE_HEADER=$ac_pt_GLOBUS_MAKEFILE_HEADER fi else GLOBUS_MAKEFILE_HEADER="$ac_cv_path_GLOBUS_MAKEFILE_HEADER" fi fi if test -f "$GLOBUS_MAKEFILE_HEADER" && test "x$GLOBUS_LOCATION" = "x"; then GLOBUS_LOCATION=`dirname $GLOBUS_MAKEFILE_HEADER` GLOBUS_LOCATION=`dirname $GLOBUS_LOCATION` export GLOBUS_LOCATION fi if test "x$ac_cv_env_GPT_FLAVOR_CONFIGURATION_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gpt-flavor-configuration", so it can be a program name with args. set dummy ${ac_tool_prefix}gpt-flavor-configuration; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_GPT_FLAVOR_CONFIGURATION+set}" = set; then $as_echo_n "(cached) " >&6 else case $GPT_FLAVOR_CONFIGURATION in [\\/]* | ?:[\\/]*) ac_cv_path_GPT_FLAVOR_CONFIGURATION="$GPT_FLAVOR_CONFIGURATION" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/sbin:/opt/gpt/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_GPT_FLAVOR_CONFIGURATION="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi GPT_FLAVOR_CONFIGURATION=$ac_cv_path_GPT_FLAVOR_CONFIGURATION if test -n "$GPT_FLAVOR_CONFIGURATION"; then { $as_echo "$as_me:$LINENO: result: $GPT_FLAVOR_CONFIGURATION" >&5 $as_echo "$GPT_FLAVOR_CONFIGURATION" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_GPT_FLAVOR_CONFIGURATION"; then ac_pt_GPT_FLAVOR_CONFIGURATION=$GPT_FLAVOR_CONFIGURATION # Extract the first word of "gpt-flavor-configuration", so it can be a program name with args. set dummy gpt-flavor-configuration; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_ac_pt_GPT_FLAVOR_CONFIGURATION+set}" = set; then $as_echo_n "(cached) " >&6 else case $ac_pt_GPT_FLAVOR_CONFIGURATION in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_GPT_FLAVOR_CONFIGURATION="$ac_pt_GPT_FLAVOR_CONFIGURATION" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/sbin:/opt/gpt/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_ac_pt_GPT_FLAVOR_CONFIGURATION="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_GPT_FLAVOR_CONFIGURATION=$ac_cv_path_ac_pt_GPT_FLAVOR_CONFIGURATION if test -n "$ac_pt_GPT_FLAVOR_CONFIGURATION"; then { $as_echo "$as_me:$LINENO: result: $ac_pt_GPT_FLAVOR_CONFIGURATION" >&5 $as_echo "$ac_pt_GPT_FLAVOR_CONFIGURATION" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_GPT_FLAVOR_CONFIGURATION" = x; then GPT_FLAVOR_CONFIGURATION="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac GPT_FLAVOR_CONFIGURATION=$ac_pt_GPT_FLAVOR_CONFIGURATION fi else GPT_FLAVOR_CONFIGURATION="$ac_cv_path_GPT_FLAVOR_CONFIGURATION" fi fi if test -f "$GPT_FLAVOR_CONFIGURATION" && test "x$GPT_LOCATION" = "x"; then GPT_LOCATION=`dirname $GPT_FLAVOR_CONFIGURATION` GPT_LOCATION=`dirname $GPT_LOCATION` export GPT_LOCATION fi { $as_echo "$as_me:$LINENO: checking for gpt flavor" >&5 $as_echo_n "checking for gpt flavor... " >&6; } # Check whether --with-flavor was given. if test "${with_flavor+set}" = set; then withval=$with_flavor; GPT_FLAVOR=$withval else if test -n "$GPT_FLAVOR_CONFIGURATION" ; then GPT_FLAVOR=`$GPT_FLAVOR_CONFIGURATION | \\ grep '^[a-zA-Z].*:$' | cut -f1 -d: | grep thr | tail -1` fi fi if test -n "$GPT_FLAVOR"; then { $as_echo "$as_me:$LINENO: result: $GPT_FLAVOR" >&5 $as_echo "$GPT_FLAVOR" >&6; } else { $as_echo "$as_me:$LINENO: result: none detected, is globus_core-devel installed?" >&5 $as_echo "none detected, is globus_core-devel installed?" >&6; } fi if test "x$ac_cv_env_GPT_QUERY_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gpt-query", so it can be a program name with args. set dummy ${ac_tool_prefix}gpt-query; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_GPT_QUERY+set}" = set; then $as_echo_n "(cached) " >&6 else case $GPT_QUERY in [\\/]* | ?:[\\/]*) ac_cv_path_GPT_QUERY="$GPT_QUERY" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/sbin:/opt/gpt/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_GPT_QUERY="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi GPT_QUERY=$ac_cv_path_GPT_QUERY if test -n "$GPT_QUERY"; then { $as_echo "$as_me:$LINENO: result: $GPT_QUERY" >&5 $as_echo "$GPT_QUERY" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_GPT_QUERY"; then ac_pt_GPT_QUERY=$GPT_QUERY # Extract the first word of "gpt-query", so it can be a program name with args. set dummy gpt-query; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_ac_pt_GPT_QUERY+set}" = set; then $as_echo_n "(cached) " >&6 else case $ac_pt_GPT_QUERY in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_GPT_QUERY="$ac_pt_GPT_QUERY" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/sbin:/opt/gpt/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_ac_pt_GPT_QUERY="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_GPT_QUERY=$ac_cv_path_ac_pt_GPT_QUERY if test -n "$ac_pt_GPT_QUERY"; then { $as_echo "$as_me:$LINENO: result: $ac_pt_GPT_QUERY" >&5 $as_echo "$ac_pt_GPT_QUERY" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_GPT_QUERY" = x; then GPT_QUERY="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac GPT_QUERY=$ac_pt_GPT_QUERY fi else GPT_QUERY="$ac_cv_path_GPT_QUERY" fi fi if test -f "$GPT_QUERY" && test "x$GPT_LOCATION" = "x"; then GPT_LOCATION=`dirname $GPT_QUERY` GPT_LOCATION=`dirname $GPT_LOCATION` export GPT_LOCATION fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_COMMON" >&5 $as_echo_n "checking for GLOBUS_COMMON... " >&6; } if test -n "$GLOBUS_COMMON_CFLAGS"; then pkg_cv_GLOBUS_COMMON_CFLAGS="$GLOBUS_COMMON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-common\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-common") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_COMMON_CFLAGS=`$PKG_CONFIG --cflags "globus-common" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_COMMON_LIBS"; then pkg_cv_GLOBUS_COMMON_LIBS="$GLOBUS_COMMON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-common\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-common") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_COMMON_LIBS=`$PKG_CONFIG --libs "globus-common" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_COMMON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-common" 2>&1` else GLOBUS_COMMON_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-common" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_COMMON_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_common" >&5 $as_echo_n "checking for globus_common... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_common_version=`$GPT_QUERY globus_common-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_common_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_common | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_common_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_common_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_common_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_common_version" >&5 $as_echo "$gpt_cv_globus_common_version" >&6; } GLOBUS_COMMON_VERSION=$gpt_cv_globus_common_version GLOBUS_COMMON_LIBS=$gpt_cv_globus_common_libs GLOBUS_COMMON_CFLAGS=$gpt_cv_globus_common_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_common" >&5 $as_echo_n "checking for globus_common... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_common_version=`$GPT_QUERY globus_common-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_common_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_common | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_common_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_common_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_common_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_common_version" >&5 $as_echo "$gpt_cv_globus_common_version" >&6; } GLOBUS_COMMON_VERSION=$gpt_cv_globus_common_version GLOBUS_COMMON_LIBS=$gpt_cv_globus_common_libs GLOBUS_COMMON_CFLAGS=$gpt_cv_globus_common_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_COMMON_CFLAGS=$pkg_cv_GLOBUS_COMMON_CFLAGS GLOBUS_COMMON_LIBS=$pkg_cv_GLOBUS_COMMON_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_COMMON_VERSION=`$PKG_CONFIG --modversion globus-common` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_GSSAPI_GSI" >&5 $as_echo_n "checking for GLOBUS_GSSAPI_GSI... " >&6; } if test -n "$GLOBUS_GSSAPI_GSI_CFLAGS"; then pkg_cv_GLOBUS_GSSAPI_GSI_CFLAGS="$GLOBUS_GSSAPI_GSI_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gssapi-gsi\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gssapi-gsi") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSSAPI_GSI_CFLAGS=`$PKG_CONFIG --cflags "globus-gssapi-gsi" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSSAPI_GSI_LIBS"; then pkg_cv_GLOBUS_GSSAPI_GSI_LIBS="$GLOBUS_GSSAPI_GSI_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gssapi-gsi\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gssapi-gsi") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSSAPI_GSI_LIBS=`$PKG_CONFIG --libs "globus-gssapi-gsi" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSSAPI_GSI_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-gssapi-gsi" 2>&1` else GLOBUS_GSSAPI_GSI_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-gssapi-gsi" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSSAPI_GSI_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_gssapi_gsi" >&5 $as_echo_n "checking for globus_gssapi_gsi... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gssapi_gsi_version=`$GPT_QUERY globus_gssapi_gsi-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gssapi_gsi_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gssapi_gsi | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gssapi_gsi_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gssapi_gsi_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gssapi_gsi_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gssapi_gsi_version" >&5 $as_echo "$gpt_cv_globus_gssapi_gsi_version" >&6; } GLOBUS_GSSAPI_GSI_VERSION=$gpt_cv_globus_gssapi_gsi_version GLOBUS_GSSAPI_GSI_LIBS=$gpt_cv_globus_gssapi_gsi_libs GLOBUS_GSSAPI_GSI_CFLAGS=$gpt_cv_globus_gssapi_gsi_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_gssapi_gsi" >&5 $as_echo_n "checking for globus_gssapi_gsi... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gssapi_gsi_version=`$GPT_QUERY globus_gssapi_gsi-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gssapi_gsi_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gssapi_gsi | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gssapi_gsi_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gssapi_gsi_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gssapi_gsi_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gssapi_gsi_version" >&5 $as_echo "$gpt_cv_globus_gssapi_gsi_version" >&6; } GLOBUS_GSSAPI_GSI_VERSION=$gpt_cv_globus_gssapi_gsi_version GLOBUS_GSSAPI_GSI_LIBS=$gpt_cv_globus_gssapi_gsi_libs GLOBUS_GSSAPI_GSI_CFLAGS=$gpt_cv_globus_gssapi_gsi_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSSAPI_GSI_CFLAGS=$pkg_cv_GLOBUS_GSSAPI_GSI_CFLAGS GLOBUS_GSSAPI_GSI_LIBS=$pkg_cv_GLOBUS_GSSAPI_GSI_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSSAPI_GSI_VERSION=`$PKG_CONFIG --modversion globus-gssapi-gsi` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_GSS_ASSIST" >&5 $as_echo_n "checking for GLOBUS_GSS_ASSIST... " >&6; } if test -n "$GLOBUS_GSS_ASSIST_CFLAGS"; then pkg_cv_GLOBUS_GSS_ASSIST_CFLAGS="$GLOBUS_GSS_ASSIST_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gss-assist\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gss-assist") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSS_ASSIST_CFLAGS=`$PKG_CONFIG --cflags "globus-gss-assist" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSS_ASSIST_LIBS"; then pkg_cv_GLOBUS_GSS_ASSIST_LIBS="$GLOBUS_GSS_ASSIST_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gss-assist\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gss-assist") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSS_ASSIST_LIBS=`$PKG_CONFIG --libs "globus-gss-assist" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSS_ASSIST_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-gss-assist" 2>&1` else GLOBUS_GSS_ASSIST_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-gss-assist" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSS_ASSIST_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_gss_assist" >&5 $as_echo_n "checking for globus_gss_assist... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gss_assist_version=`$GPT_QUERY globus_gss_assist-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gss_assist_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gss_assist | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gss_assist_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gss_assist_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gss_assist_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gss_assist_version" >&5 $as_echo "$gpt_cv_globus_gss_assist_version" >&6; } GLOBUS_GSS_ASSIST_VERSION=$gpt_cv_globus_gss_assist_version GLOBUS_GSS_ASSIST_LIBS=$gpt_cv_globus_gss_assist_libs GLOBUS_GSS_ASSIST_CFLAGS=$gpt_cv_globus_gss_assist_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_gss_assist" >&5 $as_echo_n "checking for globus_gss_assist... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gss_assist_version=`$GPT_QUERY globus_gss_assist-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gss_assist_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gss_assist | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gss_assist_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gss_assist_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gss_assist_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gss_assist_version" >&5 $as_echo "$gpt_cv_globus_gss_assist_version" >&6; } GLOBUS_GSS_ASSIST_VERSION=$gpt_cv_globus_gss_assist_version GLOBUS_GSS_ASSIST_LIBS=$gpt_cv_globus_gss_assist_libs GLOBUS_GSS_ASSIST_CFLAGS=$gpt_cv_globus_gss_assist_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSS_ASSIST_CFLAGS=$pkg_cv_GLOBUS_GSS_ASSIST_CFLAGS GLOBUS_GSS_ASSIST_LIBS=$pkg_cv_GLOBUS_GSS_ASSIST_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSS_ASSIST_VERSION=`$PKG_CONFIG --modversion globus-gss-assist` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_GSI_CALLBACK" >&5 $as_echo_n "checking for GLOBUS_GSI_CALLBACK... " >&6; } if test -n "$GLOBUS_GSI_CALLBACK_CFLAGS"; then pkg_cv_GLOBUS_GSI_CALLBACK_CFLAGS="$GLOBUS_GSI_CALLBACK_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gsi-callback\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-callback") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSI_CALLBACK_CFLAGS=`$PKG_CONFIG --cflags "globus-gsi-callback" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSI_CALLBACK_LIBS"; then pkg_cv_GLOBUS_GSI_CALLBACK_LIBS="$GLOBUS_GSI_CALLBACK_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gsi-callback\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-callback") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSI_CALLBACK_LIBS=`$PKG_CONFIG --libs "globus-gsi-callback" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSI_CALLBACK_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-gsi-callback" 2>&1` else GLOBUS_GSI_CALLBACK_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-gsi-callback" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSI_CALLBACK_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_gsi_callback" >&5 $as_echo_n "checking for globus_gsi_callback... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_callback_version=`$GPT_QUERY globus_gsi_callback-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_callback_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_callback | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_callback_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_callback_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_callback_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gsi_callback_version" >&5 $as_echo "$gpt_cv_globus_gsi_callback_version" >&6; } GLOBUS_GSI_CALLBACK_VERSION=$gpt_cv_globus_gsi_callback_version GLOBUS_GSI_CALLBACK_LIBS=$gpt_cv_globus_gsi_callback_libs GLOBUS_GSI_CALLBACK_CFLAGS=$gpt_cv_globus_gsi_callback_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_gsi_callback" >&5 $as_echo_n "checking for globus_gsi_callback... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_callback_version=`$GPT_QUERY globus_gsi_callback-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_callback_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_callback | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_callback_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_callback_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_callback_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gsi_callback_version" >&5 $as_echo "$gpt_cv_globus_gsi_callback_version" >&6; } GLOBUS_GSI_CALLBACK_VERSION=$gpt_cv_globus_gsi_callback_version GLOBUS_GSI_CALLBACK_LIBS=$gpt_cv_globus_gsi_callback_libs GLOBUS_GSI_CALLBACK_CFLAGS=$gpt_cv_globus_gsi_callback_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSI_CALLBACK_CFLAGS=$pkg_cv_GLOBUS_GSI_CALLBACK_CFLAGS GLOBUS_GSI_CALLBACK_LIBS=$pkg_cv_GLOBUS_GSI_CALLBACK_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSI_CALLBACK_VERSION=`$PKG_CONFIG --modversion globus-gsi-callback` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_FTP_CLIENT" >&5 $as_echo_n "checking for GLOBUS_FTP_CLIENT... " >&6; } if test -n "$GLOBUS_FTP_CLIENT_CFLAGS"; then pkg_cv_GLOBUS_FTP_CLIENT_CFLAGS="$GLOBUS_FTP_CLIENT_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-ftp-client\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-ftp-client") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_FTP_CLIENT_CFLAGS=`$PKG_CONFIG --cflags "globus-ftp-client" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_FTP_CLIENT_LIBS"; then pkg_cv_GLOBUS_FTP_CLIENT_LIBS="$GLOBUS_FTP_CLIENT_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-ftp-client\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-ftp-client") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_FTP_CLIENT_LIBS=`$PKG_CONFIG --libs "globus-ftp-client" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_FTP_CLIENT_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-ftp-client" 2>&1` else GLOBUS_FTP_CLIENT_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-ftp-client" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_FTP_CLIENT_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_ftp_client" >&5 $as_echo_n "checking for globus_ftp_client... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_ftp_client_version=`$GPT_QUERY globus_ftp_client-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_ftp_client_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_ftp_client | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_ftp_client_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_ftp_client_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_ftp_client_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_ftp_client_version" >&5 $as_echo "$gpt_cv_globus_ftp_client_version" >&6; } GLOBUS_FTP_CLIENT_VERSION=$gpt_cv_globus_ftp_client_version GLOBUS_FTP_CLIENT_LIBS=$gpt_cv_globus_ftp_client_libs GLOBUS_FTP_CLIENT_CFLAGS=$gpt_cv_globus_ftp_client_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_ftp_client" >&5 $as_echo_n "checking for globus_ftp_client... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_ftp_client_version=`$GPT_QUERY globus_ftp_client-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_ftp_client_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_ftp_client | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_ftp_client_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_ftp_client_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_ftp_client_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_ftp_client_version" >&5 $as_echo "$gpt_cv_globus_ftp_client_version" >&6; } GLOBUS_FTP_CLIENT_VERSION=$gpt_cv_globus_ftp_client_version GLOBUS_FTP_CLIENT_LIBS=$gpt_cv_globus_ftp_client_libs GLOBUS_FTP_CLIENT_CFLAGS=$gpt_cv_globus_ftp_client_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_FTP_CLIENT_CFLAGS=$pkg_cv_GLOBUS_FTP_CLIENT_CFLAGS GLOBUS_FTP_CLIENT_LIBS=$pkg_cv_GLOBUS_FTP_CLIENT_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_FTP_CLIENT_VERSION=`$PKG_CONFIG --modversion globus-ftp-client` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_FTP_CONTROL" >&5 $as_echo_n "checking for GLOBUS_FTP_CONTROL... " >&6; } if test -n "$GLOBUS_FTP_CONTROL_CFLAGS"; then pkg_cv_GLOBUS_FTP_CONTROL_CFLAGS="$GLOBUS_FTP_CONTROL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-ftp-control\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-ftp-control") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_FTP_CONTROL_CFLAGS=`$PKG_CONFIG --cflags "globus-ftp-control" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_FTP_CONTROL_LIBS"; then pkg_cv_GLOBUS_FTP_CONTROL_LIBS="$GLOBUS_FTP_CONTROL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-ftp-control\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-ftp-control") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_FTP_CONTROL_LIBS=`$PKG_CONFIG --libs "globus-ftp-control" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_FTP_CONTROL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-ftp-control" 2>&1` else GLOBUS_FTP_CONTROL_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-ftp-control" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_FTP_CONTROL_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_ftp_control" >&5 $as_echo_n "checking for globus_ftp_control... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_ftp_control_version=`$GPT_QUERY globus_ftp_control-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_ftp_control_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_ftp_control | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_ftp_control_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_ftp_control_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_ftp_control_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_ftp_control_version" >&5 $as_echo "$gpt_cv_globus_ftp_control_version" >&6; } GLOBUS_FTP_CONTROL_VERSION=$gpt_cv_globus_ftp_control_version GLOBUS_FTP_CONTROL_LIBS=$gpt_cv_globus_ftp_control_libs GLOBUS_FTP_CONTROL_CFLAGS=$gpt_cv_globus_ftp_control_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_ftp_control" >&5 $as_echo_n "checking for globus_ftp_control... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_ftp_control_version=`$GPT_QUERY globus_ftp_control-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_ftp_control_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_ftp_control | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_ftp_control_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_ftp_control_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_ftp_control_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_ftp_control_version" >&5 $as_echo "$gpt_cv_globus_ftp_control_version" >&6; } GLOBUS_FTP_CONTROL_VERSION=$gpt_cv_globus_ftp_control_version GLOBUS_FTP_CONTROL_LIBS=$gpt_cv_globus_ftp_control_libs GLOBUS_FTP_CONTROL_CFLAGS=$gpt_cv_globus_ftp_control_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_FTP_CONTROL_CFLAGS=$pkg_cv_GLOBUS_FTP_CONTROL_CFLAGS GLOBUS_FTP_CONTROL_LIBS=$pkg_cv_GLOBUS_FTP_CONTROL_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_FTP_CONTROL_VERSION=`$PKG_CONFIG --modversion globus-ftp-control` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_IO" >&5 $as_echo_n "checking for GLOBUS_IO... " >&6; } if test -n "$GLOBUS_IO_CFLAGS"; then pkg_cv_GLOBUS_IO_CFLAGS="$GLOBUS_IO_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-io\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-io") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_IO_CFLAGS=`$PKG_CONFIG --cflags "globus-io" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_IO_LIBS"; then pkg_cv_GLOBUS_IO_LIBS="$GLOBUS_IO_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-io\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-io") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_IO_LIBS=`$PKG_CONFIG --libs "globus-io" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_IO_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-io" 2>&1` else GLOBUS_IO_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-io" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_IO_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_io" >&5 $as_echo_n "checking for globus_io... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_io_version=`$GPT_QUERY globus_io-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_io_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_io | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_io_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_io_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_io_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_io_version" >&5 $as_echo "$gpt_cv_globus_io_version" >&6; } GLOBUS_IO_VERSION=$gpt_cv_globus_io_version GLOBUS_IO_LIBS=$gpt_cv_globus_io_libs GLOBUS_IO_CFLAGS=$gpt_cv_globus_io_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_io" >&5 $as_echo_n "checking for globus_io... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_io_version=`$GPT_QUERY globus_io-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_io_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_io | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_io_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_io_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_io_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_io_version" >&5 $as_echo "$gpt_cv_globus_io_version" >&6; } GLOBUS_IO_VERSION=$gpt_cv_globus_io_version GLOBUS_IO_LIBS=$gpt_cv_globus_io_libs GLOBUS_IO_CFLAGS=$gpt_cv_globus_io_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_IO_CFLAGS=$pkg_cv_GLOBUS_IO_CFLAGS GLOBUS_IO_LIBS=$pkg_cv_GLOBUS_IO_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_IO_VERSION=`$PKG_CONFIG --modversion globus-io` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_GSI_CERT_UTILS" >&5 $as_echo_n "checking for GLOBUS_GSI_CERT_UTILS... " >&6; } if test -n "$GLOBUS_GSI_CERT_UTILS_CFLAGS"; then pkg_cv_GLOBUS_GSI_CERT_UTILS_CFLAGS="$GLOBUS_GSI_CERT_UTILS_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gsi-cert-utils\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-cert-utils") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSI_CERT_UTILS_CFLAGS=`$PKG_CONFIG --cflags "globus-gsi-cert-utils" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSI_CERT_UTILS_LIBS"; then pkg_cv_GLOBUS_GSI_CERT_UTILS_LIBS="$GLOBUS_GSI_CERT_UTILS_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gsi-cert-utils\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-cert-utils") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSI_CERT_UTILS_LIBS=`$PKG_CONFIG --libs "globus-gsi-cert-utils" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSI_CERT_UTILS_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-gsi-cert-utils" 2>&1` else GLOBUS_GSI_CERT_UTILS_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-gsi-cert-utils" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSI_CERT_UTILS_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_gsi_cert_utils" >&5 $as_echo_n "checking for globus_gsi_cert_utils... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_cert_utils_version=`$GPT_QUERY globus_gsi_cert_utils-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_cert_utils_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_cert_utils | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_cert_utils_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_cert_utils_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_cert_utils_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gsi_cert_utils_version" >&5 $as_echo "$gpt_cv_globus_gsi_cert_utils_version" >&6; } GLOBUS_GSI_CERT_UTILS_VERSION=$gpt_cv_globus_gsi_cert_utils_version GLOBUS_GSI_CERT_UTILS_LIBS=$gpt_cv_globus_gsi_cert_utils_libs GLOBUS_GSI_CERT_UTILS_CFLAGS=$gpt_cv_globus_gsi_cert_utils_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_gsi_cert_utils" >&5 $as_echo_n "checking for globus_gsi_cert_utils... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_cert_utils_version=`$GPT_QUERY globus_gsi_cert_utils-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_cert_utils_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_cert_utils | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_cert_utils_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_cert_utils_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_cert_utils_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gsi_cert_utils_version" >&5 $as_echo "$gpt_cv_globus_gsi_cert_utils_version" >&6; } GLOBUS_GSI_CERT_UTILS_VERSION=$gpt_cv_globus_gsi_cert_utils_version GLOBUS_GSI_CERT_UTILS_LIBS=$gpt_cv_globus_gsi_cert_utils_libs GLOBUS_GSI_CERT_UTILS_CFLAGS=$gpt_cv_globus_gsi_cert_utils_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSI_CERT_UTILS_CFLAGS=$pkg_cv_GLOBUS_GSI_CERT_UTILS_CFLAGS GLOBUS_GSI_CERT_UTILS_LIBS=$pkg_cv_GLOBUS_GSI_CERT_UTILS_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSI_CERT_UTILS_VERSION=`$PKG_CONFIG --modversion globus-gsi-cert-utils` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_GSI_CREDENTIAL" >&5 $as_echo_n "checking for GLOBUS_GSI_CREDENTIAL... " >&6; } if test -n "$GLOBUS_GSI_CREDENTIAL_CFLAGS"; then pkg_cv_GLOBUS_GSI_CREDENTIAL_CFLAGS="$GLOBUS_GSI_CREDENTIAL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gsi-credential\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-credential") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSI_CREDENTIAL_CFLAGS=`$PKG_CONFIG --cflags "globus-gsi-credential" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSI_CREDENTIAL_LIBS"; then pkg_cv_GLOBUS_GSI_CREDENTIAL_LIBS="$GLOBUS_GSI_CREDENTIAL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-gsi-credential\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-credential") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_GSI_CREDENTIAL_LIBS=`$PKG_CONFIG --libs "globus-gsi-credential" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSI_CREDENTIAL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-gsi-credential" 2>&1` else GLOBUS_GSI_CREDENTIAL_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-gsi-credential" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSI_CREDENTIAL_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_gsi_credential" >&5 $as_echo_n "checking for globus_gsi_credential... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_credential_version=`$GPT_QUERY globus_gsi_credential-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_credential_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_credential | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_credential_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_credential_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_credential_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gsi_credential_version" >&5 $as_echo "$gpt_cv_globus_gsi_credential_version" >&6; } GLOBUS_GSI_CREDENTIAL_VERSION=$gpt_cv_globus_gsi_credential_version GLOBUS_GSI_CREDENTIAL_LIBS=$gpt_cv_globus_gsi_credential_libs GLOBUS_GSI_CREDENTIAL_CFLAGS=$gpt_cv_globus_gsi_credential_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_gsi_credential" >&5 $as_echo_n "checking for globus_gsi_credential... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_credential_version=`$GPT_QUERY globus_gsi_credential-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_credential_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_credential | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_credential_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_credential_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_credential_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_gsi_credential_version" >&5 $as_echo "$gpt_cv_globus_gsi_credential_version" >&6; } GLOBUS_GSI_CREDENTIAL_VERSION=$gpt_cv_globus_gsi_credential_version GLOBUS_GSI_CREDENTIAL_LIBS=$gpt_cv_globus_gsi_credential_libs GLOBUS_GSI_CREDENTIAL_CFLAGS=$gpt_cv_globus_gsi_credential_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSI_CREDENTIAL_CFLAGS=$pkg_cv_GLOBUS_GSI_CREDENTIAL_CFLAGS GLOBUS_GSI_CREDENTIAL_LIBS=$pkg_cv_GLOBUS_GSI_CREDENTIAL_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSI_CREDENTIAL_VERSION=`$PKG_CONFIG --modversion globus-gsi-credential` fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_OPENSSL_MODULE" >&5 $as_echo_n "checking for GLOBUS_OPENSSL_MODULE... " >&6; } if test -n "$GLOBUS_OPENSSL_MODULE_CFLAGS"; then pkg_cv_GLOBUS_OPENSSL_MODULE_CFLAGS="$GLOBUS_OPENSSL_MODULE_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-openssl-module\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-openssl-module") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_OPENSSL_MODULE_CFLAGS=`$PKG_CONFIG --cflags "globus-openssl-module" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_OPENSSL_MODULE_LIBS"; then pkg_cv_GLOBUS_OPENSSL_MODULE_LIBS="$GLOBUS_OPENSSL_MODULE_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-openssl-module\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-openssl-module") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_OPENSSL_MODULE_LIBS=`$PKG_CONFIG --libs "globus-openssl-module" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_OPENSSL_MODULE_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-openssl-module" 2>&1` else GLOBUS_OPENSSL_MODULE_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-openssl-module" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_OPENSSL_MODULE_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_openssl_module" >&5 $as_echo_n "checking for globus_openssl_module... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_openssl_module_version=`$GPT_QUERY globus_openssl_module-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_openssl_module_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_openssl_module | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_openssl_module_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_openssl_module_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_openssl_module_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_openssl_module_version" >&5 $as_echo "$gpt_cv_globus_openssl_module_version" >&6; } GLOBUS_OPENSSL_MODULE_VERSION=$gpt_cv_globus_openssl_module_version GLOBUS_OPENSSL_MODULE_LIBS=$gpt_cv_globus_openssl_module_libs GLOBUS_OPENSSL_MODULE_CFLAGS=$gpt_cv_globus_openssl_module_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_openssl_module" >&5 $as_echo_n "checking for globus_openssl_module... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_openssl_module_version=`$GPT_QUERY globus_openssl_module-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_openssl_module_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_openssl_module | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_openssl_module_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_openssl_module_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_openssl_module_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_openssl_module_version" >&5 $as_echo "$gpt_cv_globus_openssl_module_version" >&6; } GLOBUS_OPENSSL_MODULE_VERSION=$gpt_cv_globus_openssl_module_version GLOBUS_OPENSSL_MODULE_LIBS=$gpt_cv_globus_openssl_module_libs GLOBUS_OPENSSL_MODULE_CFLAGS=$gpt_cv_globus_openssl_module_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_OPENSSL_MODULE_CFLAGS=$pkg_cv_GLOBUS_OPENSSL_MODULE_CFLAGS GLOBUS_OPENSSL_MODULE_LIBS=$pkg_cv_GLOBUS_OPENSSL_MODULE_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_OPENSSL_MODULE_VERSION=`$PKG_CONFIG --modversion globus-openssl-module` fi # Check for new globus thread model selection SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $GLOBUS_COMMON_CFLAGS" LIBS="$LIBS $GLOBUS_COMMON_LIBS" for ac_func in globus_thread_set_model do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 $as_echo_n "checking for $ac_func... " >&6; } if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$ac_func || defined __stub___$ac_func choke me #endif int main () { return $ac_func (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_var=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS # Check for gridftp-v2 SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $GLOBUS_FTP_CLIENT_CFLAGS" LIBS="$LIBS $GLOBUS_FTP_CLIENT_LIBS" for ac_func in globus_ftp_client_handleattr_set_gridftp2 do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 $as_echo_n "checking for $ac_func... " >&6; } if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$ac_func || defined __stub___$ac_func choke me #endif int main () { return $ac_func (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_var=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS globus_openssl_detected= pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GLOBUS_OPENSSL" >&5 $as_echo_n "checking for GLOBUS_OPENSSL... " >&6; } if test -n "$GLOBUS_OPENSSL_CFLAGS"; then pkg_cv_GLOBUS_OPENSSL_CFLAGS="$GLOBUS_OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-openssl\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-openssl") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "globus-openssl" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_OPENSSL_LIBS"; then pkg_cv_GLOBUS_OPENSSL_LIBS="$GLOBUS_OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"globus-openssl\"") >&5 ($PKG_CONFIG --exists --print-errors "globus-openssl") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GLOBUS_OPENSSL_LIBS=`$PKG_CONFIG --libs "globus-openssl" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "globus-openssl" 2>&1` else GLOBUS_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors "globus-openssl" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_OPENSSL_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:$LINENO: checking for globus_openssl" >&5 $as_echo_n "checking for globus_openssl... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_openssl_version=`$GPT_QUERY globus_openssl-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_openssl_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_openssl | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_openssl_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_openssl_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_openssl_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_openssl_version" >&5 $as_echo "$gpt_cv_globus_openssl_version" >&6; } GLOBUS_OPENSSL_VERSION=$gpt_cv_globus_openssl_version GLOBUS_OPENSSL_LIBS=$gpt_cv_globus_openssl_libs GLOBUS_OPENSSL_CFLAGS=$gpt_cv_globus_openssl_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:$LINENO: checking for globus_openssl" >&5 $as_echo_n "checking for globus_openssl... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_openssl_version=`$GPT_QUERY globus_openssl-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_openssl_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_openssl | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_openssl_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_openssl_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_openssl_version"; then { $as_echo "$as_me:$LINENO: result: $gpt_cv_globus_openssl_version" >&5 $as_echo "$gpt_cv_globus_openssl_version" >&6; } GLOBUS_OPENSSL_VERSION=$gpt_cv_globus_openssl_version GLOBUS_OPENSSL_LIBS=$gpt_cv_globus_openssl_libs GLOBUS_OPENSSL_CFLAGS=$gpt_cv_globus_openssl_cflags else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_OPENSSL_CFLAGS=$pkg_cv_GLOBUS_OPENSSL_CFLAGS GLOBUS_OPENSSL_LIBS=$pkg_cv_GLOBUS_OPENSSL_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_OPENSSL_VERSION=`$PKG_CONFIG --modversion globus-openssl` fi if test ! "x$GLOBUS_OPENSSL_LIBS" = "x" ; then globus_openssl_detected=`echo "$GLOBUS_OPENSSL_LIBS" | grep "lssl_$GPT_FLAVOR"` if test ! "x$globus_openssl_detected" = "x" ; then globus_openssl_detected="yes" fi fi if test "x$globus_openssl_detected" = "xyes" ; then { $as_echo "$as_me:$LINENO: result: Globus own OpenSSL library detected. In order to avoid runtime conflicts following components will be disabled: GridFTP DMC, SRM DMC, GSI MCC. To enable these components use Globus compiled for system OpenSSL. " >&5 $as_echo " Globus own OpenSSL library detected. In order to avoid runtime conflicts following components will be disabled: GridFTP DMC, SRM DMC, GSI MCC. To enable these components use Globus compiled for system OpenSSL. " >&6; } GLOBUS_FTP_CLIENT_VERSION= GLOBUS_FTP_CONTROL_VERSION= GLOBUS_IO_VERSION= GLOBUS_GSSAPI_GSI_VERSION= fi if test "x$GLOBUS_IO_VERSION" = "x"; then IO_VERSION_MAJOR=0 else IO_VERSION_MAJOR=`echo "$GLOBUS_IO_VERSION" | sed 's/^\([^.]*\).*/\1/'`; fi cat >>confdefs.h <<_ACEOF #define GLOBUS_IO_VERSION $IO_VERSION_MAJOR _ACEOF if test "x$GLOBUS_GSSAPI_GSI_VERSION" = "x"; then GLOBUS_GSSAPI_GSI_VERSION_MAJOR=0 GLOBUS_GSSAPI_GSI_VERSION_MINOR=0 else GLOBUS_GSSAPI_GSI_VERSION_MAJOR=`echo "$GLOBUS_GSSAPI_GSI_VERSION" | sed 's/^\([^.]*\).*/\1/'`; GLOBUS_GSSAPI_GSI_VERSION_MINOR=`echo "$GLOBUS_GSSAPI_GSI_VERSION" | sed 's/^[^.]*\.\([^.]*\).*/\1/'`; fi if test "$GLOBUS_GSSAPI_GSI_VERSION_MAJOR" -lt "12"; then GLOBUS_GSSAPI_GSI_OLD_OPENSSL=1 elif test "$GLOBUS_GSSAPI_GSI_VERSION_MAJOR" -eq "12"; then if test "$GLOBUS_GSSAPI_GSI_VERSION_MINOR" -lt "2"; then GLOBUS_GSSAPI_GSI_OLD_OPENSSL=1 else GLOBUS_GSSAPI_GSI_OLD_OPENSSL=0 fi else GLOBUS_GSSAPI_GSI_OLD_OPENSSL=0 fi cat >>confdefs.h <<_ACEOF #define GLOBUS_GSSAPI_GSI_VERSION $GSSAPI_GSI_VERSION_MAJOR _ACEOF cat >>confdefs.h <<_ACEOF #define GLOBUS_GSSAPI_GSI_OLD_OPENSSL $GLOBUS_GSSAPI_GSI_OLD_OPENSSL _ACEOF { $as_echo "$as_me:$LINENO: checking for DEFAULT_GLOBUS_LOCATION" >&5 $as_echo_n "checking for DEFAULT_GLOBUS_LOCATION... " >&6; } # GLOBUS_LOCATION is set by GPT macros DEFAULT_GLOBUS_LOCATION="$GLOBUS_LOCATION" { $as_echo "$as_me:$LINENO: result: $DEFAULT_GLOBUS_LOCATION" >&5 $as_echo "$DEFAULT_GLOBUS_LOCATION" >&6; } #check lcas DEFAULT_LCAS_LOCATION=/opt/glite LCAS_LOCATION= LCAS_CFLAGS= LCAS_LIBS= # Check whether --with-lcas-location was given. if test "${with_lcas_location+set}" = set; then withval=$with_lcas_location; LCAS_LOCATION=$with_lcas_location if test ! -d $LCAS_LOCATION; then { $as_echo "$as_me:$LINENO: WARNING: LCAS_LOCATION ($LCAS_LOCATION) does not exist" >&5 $as_echo "$as_me: WARNING: LCAS_LOCATION ($LCAS_LOCATION) does not exist" >&2;} LCAS_LOCATION= fi else if test "x$LCAS_LOCATION" = "x"; then LCAS_LOCATION=$DEFAULT_LCAS_LOCATION fi if test ! -d $LCAS_LOCATION; then LCAS_LOCATION= fi fi if test "x$LCAS_LOCATION" != "x"; then LCAS_CFLAGS=$LCAS_LOCATION/include/glite/security/lcas if test ! -d $LCAS_CFLAGS; then LCAS_CFLAGS=$LCAS_LOCATION/include/lcas if test ! -d $LCAS_CFLAGS; then LCAS_CFLAGS=$LCAS_LOCATION/include fi fi LCAS_CFLAGS=-I$LCAS_CFLAGS SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$LCAS_CFLAGS $GLOBUS_GSSAPI_GSI_CFLAGS" for ac_header in lcas.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF LCAS_LDFLAGS= if test -d $LCAS_LOCATION/lib64; then LCAS_LDFLAGS="-L$LCAS_LOCATION/lib64 $GLOBUS_GSSAPI_GSI_LIBS" else LCAS_LDFLAGS="-L$LCAS_LOCATION/lib $GLOBUS_GSSAPI_GSI_LIBS" fi SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $LCAS_LDFLAGS" { $as_echo "$as_me:$LINENO: checking for lcas_init in -llcas" >&5 $as_echo_n "checking for lcas_init in -llcas... " >&6; } if test "${ac_cv_lib_lcas_lcas_init+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-llcas $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char lcas_init (); int main () { return lcas_init (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_lcas_lcas_init=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_lcas_lcas_init=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_lcas_lcas_init" >&5 $as_echo "$ac_cv_lib_lcas_lcas_init" >&6; } if test "x$ac_cv_lib_lcas_lcas_init" = x""yes; then LCAS_LIBS="$LCAS_LDFLAGS -llcas" else LCAS_LOCATION="" fi LDFLAGS=$SAVE_LDFLAGS else LCAS_LOCATION="" fi done CPPFLAGS=$SAVE_CPPFLAGS fi if test "x$LCAS_LOCATION" != "x"; then cat >>confdefs.h <<\_ACEOF #define HAVE_LCAS /**/ _ACEOF fi #check lcmaps DEFAULT_LCMAPS_LOCATION=/opt/glite LCMAPS_LOCATION= LCMAPS_CFLAGS= LCMAPS_LIBS= # Check whether --with-lcmaps-location was given. if test "${with_lcmaps_location+set}" = set; then withval=$with_lcmaps_location; LCMAPS_LOCATION=$with_lcmaps_location if test ! -d $LCMAPS_LOCATION; then { $as_echo "$as_me:$LINENO: WARNING: LCMAPS_LOCATION ($LCMAPS_LOCATION) does not exist" >&5 $as_echo "$as_me: WARNING: LCMAPS_LOCATION ($LCMAPS_LOCATION) does not exist" >&2;} LCMAPS_LOCATION= fi else if test "x$LCMAPS_LOCATION" = "x"; then LCMAPS_LOCATION=$DEFAULT_LCMAPS_LOCATION fi if test ! -d $LCMAPS_LOCATION; then LCMAPS_LOCATION= fi fi if test "x$LCMAPS_LOCATION" != "x"; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include/glite/security/lcmaps if test ! -d $LCMAPS_CFLAGS; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include/lcmaps if test ! -d $LCMAPS_CFLAGS; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include fi fi LCMAPS_CFLAGS=-I$LCMAPS_CFLAGS SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$LCMAPS_CFLAGS $GLOBUS_GSSAPI_GSI_CFLAGS" for ac_header in lcmaps.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF LCMAPS_LDFLAGS= if test -d $LCMAPS_LOCATION/lib64; then LCMAPS_LDFLAGS="-L$LCMAPS_LOCATION/lib64 $GLOBUS_GSSAPI_GSI_LIBS" else LCMAPS_LDFLAGS="-L$LCMAPS_LOCATION/lib $GLOBUS_GSSAPI_GSI_LIBS" fi SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $LCMAPS_LDFLAGS" { $as_echo "$as_me:$LINENO: checking for lcmaps_init in -llcmaps" >&5 $as_echo_n "checking for lcmaps_init in -llcmaps... " >&6; } if test "${ac_cv_lib_lcmaps_lcmaps_init+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-llcmaps $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char lcmaps_init (); int main () { return lcmaps_init (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_lcmaps_lcmaps_init=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_lcmaps_lcmaps_init=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_lcmaps_lcmaps_init" >&5 $as_echo "$ac_cv_lib_lcmaps_lcmaps_init" >&6; } if test "x$ac_cv_lib_lcmaps_lcmaps_init" = x""yes; then LCMAPS_LIBS="$LCMAPS_LDFLAGS -llcmaps" else LCMAPS_LOCATION="" fi LDFLAGS=$SAVE_LDFLAGS else LCMAPS_LOCATION="" fi done CPPFLAGS=$SAVE_CPPFLAGS fi if test "x$LCMAPS_LOCATION" != "x"; then cat >>confdefs.h <<\_ACEOF #define HAVE_LCMAPS /**/ _ACEOF fi # Check if mock DMC is enabled # Check whether --enable-mock-dmc was given. if test "${enable_mock_dmc+set}" = set; then enableval=$enable_mock_dmc; enables_mock_dmc="$enableval" fi # Check for GFAL2 # Check whether --enable-gfal was given. if test "${enable_gfal+set}" = set; then enableval=$enable_gfal; enables_gfal="$enableval" fi if test "x$enables_gfal" = "xyes"; then pkg_failed=no { $as_echo "$as_me:$LINENO: checking for GFAL2" >&5 $as_echo_n "checking for GFAL2... " >&6; } if test -n "$GFAL2_CFLAGS"; then pkg_cv_GFAL2_CFLAGS="$GFAL2_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"gfal_transfer\"") >&5 ($PKG_CONFIG --exists --print-errors "gfal_transfer") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GFAL2_CFLAGS=`$PKG_CONFIG --cflags "gfal_transfer" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GFAL2_LIBS"; then pkg_cv_GFAL2_LIBS="$GFAL2_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"gfal_transfer\"") >&5 ($PKG_CONFIG --exists --print-errors "gfal_transfer") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_GFAL2_LIBS=`$PKG_CONFIG --libs "gfal_transfer" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GFAL2_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "gfal_transfer" 2>&1` else GFAL2_PKG_ERRORS=`$PKG_CONFIG --print-errors "gfal_transfer" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GFAL2_PKG_ERRORS" >&5 { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } enables_gfal="no" elif test $pkg_failed = untried; then enables_gfal="no" else GFAL2_CFLAGS=$pkg_cv_GFAL2_CFLAGS GFAL2_LIBS=$pkg_cv_GFAL2_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi fi # Check for S3 # Check whether --enable-s3 was given. if test "${enable_s3+set}" = set; then enableval=$enable_s3; enables_s3="$enableval" fi if test "x$enables_s3" = "xyes"; then # Check whether --with-s3 was given. if test "${with_s3+set}" = set; then withval=$with_s3; fi if test ! "x$with_s3" = "x" ; then S3_LOCATION="$with_s3" S3_CPPFLAGS="-I$S3_LOCATION/include" if test -d $S3_LOCATION/lib64; then S3_LDFLAGS="-L$S3_LOCATION/lib64" else S3_LDFLAGS="-L$S3_LOCATION/lib" fi fi SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $S3_CPPFLAGS" if test "${ac_cv_header_libs3_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for libs3.h" >&5 $as_echo_n "checking for libs3.h... " >&6; } if test "${ac_cv_header_libs3_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_libs3_h" >&5 $as_echo "$ac_cv_header_libs3_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking libs3.h usability" >&5 $as_echo_n "checking libs3.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking libs3.h presence" >&5 $as_echo_n "checking libs3.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: libs3.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: libs3.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: libs3.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: libs3.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: libs3.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: libs3.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: libs3.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: libs3.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: libs3.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: libs3.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: libs3.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: libs3.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: libs3.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: libs3.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: libs3.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: libs3.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for libs3.h" >&5 $as_echo_n "checking for libs3.h... " >&6; } if test "${ac_cv_header_libs3_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_libs3_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_libs3_h" >&5 $as_echo "$ac_cv_header_libs3_h" >&6; } fi if test "x$ac_cv_header_libs3_h" = x""yes; then : else enables_s3="no" fi CPPFLAGS=$SAVE_CPPFLAGS SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $S3_LDFLAGS" { $as_echo "$as_me:$LINENO: checking for S3_initialize in -ls3" >&5 $as_echo_n "checking for S3_initialize in -ls3... " >&6; } if test "${ac_cv_lib_s3_S3_initialize+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ls3 $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char S3_initialize (); int main () { return S3_initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_s3_S3_initialize=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_s3_S3_initialize=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_s3_S3_initialize" >&5 $as_echo "$ac_cv_lib_s3_S3_initialize" >&6; } if test "x$ac_cv_lib_s3_S3_initialize" = x""yes; then S3_LIBS="$S3_LDFLAGS -ls3" else enables_s3="no" fi LDFLAGS=$SAVE_LDFLAGS if test x$enables_s3 = xyes then if s3 help 2>&1 | grep -q -- '--timeout' then cat >>confdefs.h <<\_ACEOF #define HAVE_S3_TIMEOUT /**/ _ACEOF fi fi fi # Check for xrootd (c++) ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu # Check whether --enable-xrootd was given. if test "${enable_xrootd+set}" = set; then enableval=$enable_xrootd; enables_xrootd="$enableval" fi if test "x$enables_xrootd" = "xyes"; then XROOTD_CPPFLAGS="-I/usr/include/xrootd" # Check whether --with-xrootd was given. if test "${with_xrootd+set}" = set; then withval=$with_xrootd; fi if test ! "x$with_xrootd" = "x" ; then XROOTD_LOCATION="$with_xrootd" XROOTD_CPPFLAGS="-I$XROOTD_LOCATION/include/xrootd" if test -d $XROOTD_LOCATION/lib64; then XROOTD_LDFLAGS="-L$XROOTD_LOCATION/lib64" else XROOTD_LDFLAGS="-L$XROOTD_LOCATION/lib" fi fi SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $XROOTD_CPPFLAGS" if test "${ac_cv_header_XrdPosix_XrdPosixXrootd_hh+set}" = set; then { $as_echo "$as_me:$LINENO: checking for XrdPosix/XrdPosixXrootd.hh" >&5 $as_echo_n "checking for XrdPosix/XrdPosixXrootd.hh... " >&6; } if test "${ac_cv_header_XrdPosix_XrdPosixXrootd_hh+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_XrdPosix_XrdPosixXrootd_hh" >&5 $as_echo "$ac_cv_header_XrdPosix_XrdPosixXrootd_hh" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking XrdPosix/XrdPosixXrootd.hh usability" >&5 $as_echo_n "checking XrdPosix/XrdPosixXrootd.hh usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking XrdPosix/XrdPosixXrootd.hh presence" >&5 $as_echo_n "checking XrdPosix/XrdPosixXrootd.hh presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: XrdPosix/XrdPosixXrootd.hh: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: XrdPosix/XrdPosixXrootd.hh: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: XrdPosix/XrdPosixXrootd.hh: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: XrdPosix/XrdPosixXrootd.hh: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: XrdPosix/XrdPosixXrootd.hh: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: XrdPosix/XrdPosixXrootd.hh: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: XrdPosix/XrdPosixXrootd.hh: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: XrdPosix/XrdPosixXrootd.hh: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: XrdPosix/XrdPosixXrootd.hh: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: XrdPosix/XrdPosixXrootd.hh: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: XrdPosix/XrdPosixXrootd.hh: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: XrdPosix/XrdPosixXrootd.hh: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: XrdPosix/XrdPosixXrootd.hh: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: XrdPosix/XrdPosixXrootd.hh: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: XrdPosix/XrdPosixXrootd.hh: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: XrdPosix/XrdPosixXrootd.hh: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for XrdPosix/XrdPosixXrootd.hh" >&5 $as_echo_n "checking for XrdPosix/XrdPosixXrootd.hh... " >&6; } if test "${ac_cv_header_XrdPosix_XrdPosixXrootd_hh+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_XrdPosix_XrdPosixXrootd_hh=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_XrdPosix_XrdPosixXrootd_hh" >&5 $as_echo "$ac_cv_header_XrdPosix_XrdPosixXrootd_hh" >&6; } fi if test "x$ac_cv_header_XrdPosix_XrdPosixXrootd_hh" = x""yes; then : else enables_xrootd="no" #include fi CPPFLAGS=$SAVE_CPPFLAGS SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $XROOTD_LDFLAGS" { $as_echo "$as_me:$LINENO: checking for _init in -lXrdPosix" >&5 $as_echo_n "checking for _init in -lXrdPosix... " >&6; } if test "${ac_cv_lib_XrdPosix__init+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lXrdPosix $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char _init (); int main () { return _init (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_XrdPosix__init=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_XrdPosix__init=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_XrdPosix__init" >&5 $as_echo "$ac_cv_lib_XrdPosix__init" >&6; } if test "x$ac_cv_lib_XrdPosix__init" = x""yes; then XROOTD_LIBS="$XROOTD_LDFLAGS -lXrdPosix" else enables_xrootd="no" fi LDFLAGS=$SAVE_LDFLAGS fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Setup conditionals if test -n "$GLOBUS_COMMON_VERSION"; then GLOBUSUTILS_ENABLED_TRUE= GLOBUSUTILS_ENABLED_FALSE='#' else GLOBUSUTILS_ENABLED_TRUE='#' GLOBUSUTILS_ENABLED_FALSE= fi if test -n "$GLOBUS_FTP_CLIENT_VERSION"; then GRIDFTP_ENABLED_TRUE= GRIDFTP_ENABLED_FALSE='#' else GRIDFTP_ENABLED_TRUE='#' GRIDFTP_ENABLED_FALSE= fi if test x$enables_mock_dmc = xyes; then MOCK_DMC_ENABLED_TRUE= MOCK_DMC_ENABLED_FALSE='#' else MOCK_DMC_ENABLED_TRUE='#' MOCK_DMC_ENABLED_FALSE= fi if test x$enables_gfal = xyes; then GFAL_ENABLED_TRUE= GFAL_ENABLED_FALSE='#' else GFAL_ENABLED_TRUE='#' GFAL_ENABLED_FALSE= fi if test x$enables_s3 = xyes; then S3_DMC_ENABLED_TRUE= S3_DMC_ENABLED_FALSE='#' else S3_DMC_ENABLED_TRUE='#' S3_DMC_ENABLED_FALSE= fi if test x$enables_xrootd = xyes; then XROOTD_ENABLED_TRUE= XROOTD_ENABLED_FALSE='#' else XROOTD_ENABLED_TRUE='#' XROOTD_ENABLED_FALSE= fi if test x$XMLSEC_INSTALLED = xyes; then XMLSEC_ENABLED_TRUE= XMLSEC_ENABLED_FALSE='#' else XMLSEC_ENABLED_TRUE='#' XMLSEC_ENABLED_FALSE= fi if test x$enables_cppunit = xyes; then CPPUNIT_ENABLED_TRUE= CPPUNIT_ENABLED_FALSE='#' else CPPUNIT_ENABLED_TRUE='#' CPPUNIT_ENABLED_FALSE= fi enables_srm_dmc=no if test "$enables_hed" = "yes"; then enables_srm_dmc=yes fi if test "$enables_giis_service" = "yes"; then if test "$enables_ldap_service" = "no"; then enables_ldap_service="yes" fi fi if test "x$enables_srm_dmc" = "xyes"; then SRM_DMC_ENABLED_TRUE= SRM_DMC_ENABLED_FALSE='#' else SRM_DMC_ENABLED_TRUE='#' SRM_DMC_ENABLED_FALSE= fi # Setup defines if test -n "$GLOBUS_COMMON_VERSION"; then cat >>confdefs.h <<\_ACEOF #define HAVE_GLOBUS /**/ _ACEOF fi if test x"$XMLSEC_INSTALLED" = xyes; then cat >>confdefs.h <<\_ACEOF #define HAVE_XMLSEC /**/ _ACEOF fi # Setup messages for reporting enables_gridftp=no if test -n "$GLOBUS_FTP_CLIENT_VERSION" ; then enables_gridftp=yes; fi enables_dbcxx=no if test -n "$DBCXX_LIBS" ; then enables_dbcxx=yes; fi # Check for LDAP if test "$enables_hed" = "yes"; then LDAP=no # Check whether --enable-ldap was given. if test "${enable_ldap+set}" = set; then enableval=$enable_ldap; enables_ldap="$enableval" fi if test "x$enables_ldap" = "xyes"; then if test "${ac_cv_header_ldap_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for ldap.h" >&5 $as_echo_n "checking for ldap.h... " >&6; } if test "${ac_cv_header_ldap_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_ldap_h" >&5 $as_echo "$ac_cv_header_ldap_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking ldap.h usability" >&5 $as_echo_n "checking ldap.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking ldap.h presence" >&5 $as_echo_n "checking ldap.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: ldap.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: ldap.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldap.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: ldap.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: ldap.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: ldap.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldap.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: ldap.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldap.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: ldap.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldap.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: ldap.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldap.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: ldap.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: ldap.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: ldap.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for ldap.h" >&5 $as_echo_n "checking for ldap.h... " >&6; } if test "${ac_cv_header_ldap_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_ldap_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_ldap_h" >&5 $as_echo "$ac_cv_header_ldap_h" >&6; } fi if test "x$ac_cv_header_ldap_h" = x""yes; then LDAP=yes SAVE_LDFLAGS=$LDFLAGS case "${host}" in *-*-mingw32): ;; *): LDFLAGS=-lpthread ;; esac { $as_echo "$as_me:$LINENO: checking for ldap_first_message in -lldap_r" >&5 $as_echo_n "checking for ldap_first_message in -lldap_r... " >&6; } if test "${ac_cv_lib_ldap_r_ldap_first_message+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldap_r $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldap_first_message (); int main () { return ldap_first_message (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_ldap_r_ldap_first_message=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_ldap_r_ldap_first_message=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_ldap_r_ldap_first_message" >&5 $as_echo "$ac_cv_lib_ldap_r_ldap_first_message" >&6; } if test "x$ac_cv_lib_ldap_r_ldap_first_message" = x""yes; then { $as_echo "$as_me:$LINENO: checking for ldap_initialize in -lldap_r" >&5 $as_echo_n "checking for ldap_initialize in -lldap_r... " >&6; } if test "${ac_cv_lib_ldap_r_ldap_initialize+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldap_r $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldap_initialize (); int main () { return ldap_initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_ldap_r_ldap_initialize=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_ldap_r_ldap_initialize=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_ldap_r_ldap_initialize" >&5 $as_echo "$ac_cv_lib_ldap_r_ldap_initialize" >&6; } if test "x$ac_cv_lib_ldap_r_ldap_initialize" = x""yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_LDAP_INITIALIZE /**/ _ACEOF fi LDAP_LIBS=-lldap_r else { $as_echo "$as_me:$LINENO: checking for ldap_first_message in -lldap" >&5 $as_echo_n "checking for ldap_first_message in -lldap... " >&6; } if test "${ac_cv_lib_ldap_ldap_first_message+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldap $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldap_first_message (); int main () { return ldap_first_message (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_ldap_ldap_first_message=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_ldap_ldap_first_message=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_ldap_ldap_first_message" >&5 $as_echo "$ac_cv_lib_ldap_ldap_first_message" >&6; } if test "x$ac_cv_lib_ldap_ldap_first_message" = x""yes; then { $as_echo "$as_me:$LINENO: checking for ldap_initialize in -lldap" >&5 $as_echo_n "checking for ldap_initialize in -lldap... " >&6; } if test "${ac_cv_lib_ldap_ldap_initialize+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldap $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldap_initialize (); int main () { return ldap_initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_ldap_ldap_initialize=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_ldap_ldap_initialize=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_ldap_ldap_initialize" >&5 $as_echo "$ac_cv_lib_ldap_ldap_initialize" >&6; } if test "x$ac_cv_lib_ldap_ldap_initialize" = x""yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_LDAP_INITIALIZE /**/ _ACEOF fi LDAP_LIBS=-lldap else LDAP=no fi fi { $as_echo "$as_me:$LINENO: checking for ber_init in -llber" >&5 $as_echo_n "checking for ber_init in -llber... " >&6; } if test "${ac_cv_lib_lber_ber_init+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-llber $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ber_init (); int main () { return ber_init (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_lber_ber_init=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_lber_ber_init=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_lber_ber_init" >&5 $as_echo "$ac_cv_lib_lber_ber_init" >&6; } if test "x$ac_cv_lib_lber_ber_init" = x""yes; then LDAP_LIBS="$LDAP_LIBS -llber" fi LDFLAGS=$SAVE_LDFLAGS else LDAP=no # Try native LDAP on Win32 if OpenLDAP fails case "${host}" in *-*-mingw32) { $as_echo "$as_me:$LINENO: checking for winldap.h" >&5 $as_echo_n "checking for winldap.h... " >&6; } if test "${ac_cv_header_winldap_h+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_header_winldap_h=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_winldap_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_winldap_h" >&5 $as_echo "$ac_cv_header_winldap_h" >&6; } if test "x$ac_cv_header_winldap_h" = x""yes; then { $as_echo "$as_me:$LINENO: checking for ldap_init in -lwldap32" >&5 $as_echo_n "checking for ldap_init in -lwldap32... " >&6; } if test "${ac_cv_lib_wldap32_ldap_init+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lwldap32 $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldap_init (); int main () { return ldap_init (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_wldap32_ldap_init=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_wldap32_ldap_init=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_wldap32_ldap_init" >&5 $as_echo "$ac_cv_lib_wldap32_ldap_init" >&6; } if test "x$ac_cv_lib_wldap32_ldap_init" = x""yes; then LDAP=yes LDAP_LIBS="-lwldap32" cat >>confdefs.h <<\_ACEOF #define USE_WIN32_LDAP_API /**/ _ACEOF fi fi ;; esac fi enables_ldap="$LDAP" fi else enables_ldap="no" fi if test x$LDAP = xyes; then LDAP_ENABLED_TRUE= LDAP_ENABLED_FALSE='#' else LDAP_ENABLED_TRUE='#' LDAP_ENABLED_FALSE= fi if test "x$LDAP" = "xyes"; then cat >>confdefs.h <<\_ACEOF #define HAVE_LDAP /**/ _ACEOF fi # Check for the uuid lib UUID_LIBS="" if test "$enables_hed" = "yes"; then if test "${ac_cv_header_uuid_uuid_h+set}" = set; then { $as_echo "$as_me:$LINENO: checking for uuid/uuid.h" >&5 $as_echo_n "checking for uuid/uuid.h... " >&6; } if test "${ac_cv_header_uuid_uuid_h+set}" = set; then $as_echo_n "(cached) " >&6 fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_uuid_uuid_h" >&5 $as_echo "$ac_cv_header_uuid_uuid_h" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking uuid/uuid.h usability" >&5 $as_echo_n "checking uuid/uuid.h usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking uuid/uuid.h presence" >&5 $as_echo_n "checking uuid/uuid.h presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: uuid/uuid.h: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: uuid/uuid.h: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: uuid/uuid.h: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: uuid/uuid.h: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: uuid/uuid.h: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: uuid/uuid.h: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: uuid/uuid.h: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: uuid/uuid.h: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: uuid/uuid.h: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: uuid/uuid.h: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: uuid/uuid.h: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: uuid/uuid.h: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: uuid/uuid.h: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: uuid/uuid.h: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: uuid/uuid.h: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: uuid/uuid.h: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for uuid/uuid.h" >&5 $as_echo_n "checking for uuid/uuid.h... " >&6; } if test "${ac_cv_header_uuid_uuid_h+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_header_uuid_uuid_h=$ac_header_preproc fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_uuid_uuid_h" >&5 $as_echo "$ac_cv_header_uuid_uuid_h" >&6; } fi if test "x$ac_cv_header_uuid_uuid_h" = x""yes; then { $as_echo "$as_me:$LINENO: checking for uuid_generate" >&5 $as_echo_n "checking for uuid_generate... " >&6; } if test "${ac_cv_func_uuid_generate+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define uuid_generate to an innocuous variant, in case declares uuid_generate. For example, HP-UX 11i declares gettimeofday. */ #define uuid_generate innocuous_uuid_generate /* System header to define __stub macros and hopefully few prototypes, which can conflict with char uuid_generate (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef uuid_generate /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char uuid_generate (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_uuid_generate || defined __stub___uuid_generate choke me #endif int main () { return uuid_generate (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_func_uuid_generate=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_uuid_generate=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_uuid_generate" >&5 $as_echo "$ac_cv_func_uuid_generate" >&6; } if test "x$ac_cv_func_uuid_generate" = x""yes; then UUID_LIBS= else { $as_echo "$as_me:$LINENO: checking for uuid_generate in -luuid" >&5 $as_echo_n "checking for uuid_generate in -luuid... " >&6; } if test "${ac_cv_lib_uuid_uuid_generate+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-luuid $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char uuid_generate (); int main () { return uuid_generate (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_uuid_uuid_generate=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_uuid_uuid_generate=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_uuid_uuid_generate" >&5 $as_echo "$ac_cv_lib_uuid_uuid_generate" >&6; } if test "x$ac_cv_lib_uuid_uuid_generate" = x""yes; then UUID_LIBS=-luuid else { $as_echo "$as_me:$LINENO: Can't find library containing uuid implementation" >&5 $as_echo "$as_me: Can't find library containing uuid implementation" >&6;} fi fi else { $as_echo "$as_me:$LINENO: Can't find uuid header" >&5 $as_echo "$as_me: Can't find uuid header" >&6;} fi LIBS="$LIBS $UUID_LIBS" fi # Check for dlopen DLOPEN_LIBS="" if test "$enables_hed" = "yes"; then { $as_echo "$as_me:$LINENO: checking for dlopen" >&5 $as_echo_n "checking for dlopen... " >&6; } if test "${ac_cv_func_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define dlopen to an innocuous variant, in case declares dlopen. For example, HP-UX 11i declares gettimeofday. */ #define dlopen innocuous_dlopen /* System header to define __stub macros and hopefully few prototypes, which can conflict with char dlopen (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef dlopen /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_dlopen || defined __stub___dlopen choke me #endif int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_func_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 $as_echo "$ac_cv_func_dlopen" >&6; } if test "x$ac_cv_func_dlopen" = x""yes; then DLOPEN_LIBS= else { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if test "${ac_cv_lib_dl_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dl_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = x""yes; then DLOPEN_LIBS=-ldl else { $as_echo "$as_me:$LINENO: Can't find library containing dlopen implementation" >&5 $as_echo "$as_me: Can't find library containing dlopen implementation" >&6;} fi fi fi # Check for clock_gettime { $as_echo "$as_me:$LINENO: checking for library containing clock_gettime" >&5 $as_echo_n "checking for library containing clock_gettime... " >&6; } if test "${ac_cv_search_clock_gettime+set}" = set; then $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char clock_gettime (); int main () { return clock_gettime (); ; return 0; } _ACEOF for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_search_clock_gettime=$ac_res else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext if test "${ac_cv_search_clock_gettime+set}" = set; then break fi done if test "${ac_cv_search_clock_gettime+set}" = set; then : else ac_cv_search_clock_gettime=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_search_clock_gettime" >&5 $as_echo "$ac_cv_search_clock_gettime" >&6; } ac_res=$ac_cv_search_clock_gettime if test "$ac_res" != no; then test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi # check for fsusage if test "$enables_hed" = "yes"; then for ac_header in sys/param.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in sys/vfs.h sys/fs_types.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in sys/mount.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #if HAVE_SYS_PARAM_H #include #endif #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then eval "$as_ac_Header=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking how to get file system space usage" >&5 $as_echo "$as_me: checking how to get file system space usage" >&6;} ac_fsusage_space=no # Perform only the link test since it seems there are no variants of the # statvfs function. This check is more than just AC_CHECK_FUNCS(statvfs) # because that got a false positive on SCO OSR5. Adding the declaration # of a `struct statvfs' causes this test to fail (as it should) on such # systems. That system is reported to work fine with STAT_STATFS4 which # is what it gets when this test fails. if test $ac_fsusage_space = no; then # SVR4 { $as_echo "$as_me:$LINENO: checking for statvfs function (SVR4)" >&5 $as_echo_n "checking for statvfs function (SVR4)... " >&6; } if test "${fu_cv_sys_stat_statvfs+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #if defined __GLIBC__ && !defined __BEOS__ Do not use statvfs on systems with GNU libc, because that function stats all preceding entries in /proc/mounts, and that makes df hang if even one of the corresponding file systems is hard-mounted, but not available. statvfs in GNU libc on BeOS operates differently: it only makes a system call. #endif #ifdef __osf__ "Do not use Tru64's statvfs implementation" #endif #include int main () { struct statvfs fsd; statvfs (0, &fsd); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then fu_cv_sys_stat_statvfs=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fu_cv_sys_stat_statvfs=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $fu_cv_sys_stat_statvfs" >&5 $as_echo "$fu_cv_sys_stat_statvfs" >&6; } if test $fu_cv_sys_stat_statvfs = yes; then ac_fsusage_space=yes cat >>confdefs.h <<\_ACEOF #define STAT_STATVFS 1 _ACEOF fi fi if test $ac_fsusage_space = no; then # DEC Alpha running OSF/1 { $as_echo "$as_me:$LINENO: checking for 3-argument statfs function (DEC OSF/1)" >&5 $as_echo_n "checking for 3-argument statfs function (DEC OSF/1)... " >&6; } if test "${fu_cv_sys_stat_statfs3_osf1+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then fu_cv_sys_stat_statfs3_osf1=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include int main () { struct statfs fsd; fsd.f_fsize = 0; return statfs (".", &fsd, sizeof (struct statfs)) != 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then fu_cv_sys_stat_statfs3_osf1=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) fu_cv_sys_stat_statfs3_osf1=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $fu_cv_sys_stat_statfs3_osf1" >&5 $as_echo "$fu_cv_sys_stat_statfs3_osf1" >&6; } if test $fu_cv_sys_stat_statfs3_osf1 = yes; then ac_fsusage_space=yes cat >>confdefs.h <<\_ACEOF #define STAT_STATFS3_OSF1 1 _ACEOF fi fi if test $ac_fsusage_space = no; then # AIX { $as_echo "$as_me:$LINENO: checking for two-argument statfs with statfs.bsize member (AIX, 4.3BSD)" >&5 $as_echo_n "checking for two-argument statfs with statfs.bsize member (AIX, 4.3BSD)... " >&6; } if test "${fu_cv_sys_stat_statfs2_bsize+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then fu_cv_sys_stat_statfs2_bsize=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_SYS_VFS_H #include #endif int main () { struct statfs fsd; fsd.f_bsize = 0; return statfs (".", &fsd) != 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then fu_cv_sys_stat_statfs2_bsize=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) fu_cv_sys_stat_statfs2_bsize=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $fu_cv_sys_stat_statfs2_bsize" >&5 $as_echo "$fu_cv_sys_stat_statfs2_bsize" >&6; } if test $fu_cv_sys_stat_statfs2_bsize = yes; then ac_fsusage_space=yes cat >>confdefs.h <<\_ACEOF #define STAT_STATFS2_BSIZE 1 _ACEOF fi fi if test $ac_fsusage_space = no; then # SVR3 { $as_echo "$as_me:$LINENO: checking for four-argument statfs (AIX-3.2.5, SVR3)" >&5 $as_echo_n "checking for four-argument statfs (AIX-3.2.5, SVR3)... " >&6; } if test "${fu_cv_sys_stat_statfs4+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then fu_cv_sys_stat_statfs4=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include int main () { struct statfs fsd; return statfs (".", &fsd, sizeof fsd, 0) != 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then fu_cv_sys_stat_statfs4=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) fu_cv_sys_stat_statfs4=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $fu_cv_sys_stat_statfs4" >&5 $as_echo "$fu_cv_sys_stat_statfs4" >&6; } if test $fu_cv_sys_stat_statfs4 = yes; then ac_fsusage_space=yes cat >>confdefs.h <<\_ACEOF #define STAT_STATFS4 1 _ACEOF fi fi if test $ac_fsusage_space = no; then # 4.4BSD and NetBSD { $as_echo "$as_me:$LINENO: checking for two-argument statfs with statfs.fsize member (4.4BSD and NetBSD)" >&5 $as_echo_n "checking for two-argument statfs with statfs.fsize member (4.4BSD and NetBSD)... " >&6; } if test "${fu_cv_sys_stat_statfs2_fsize+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then fu_cv_sys_stat_statfs2_fsize=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif int main () { struct statfs fsd; fsd.f_fsize = 0; return statfs (".", &fsd) != 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then fu_cv_sys_stat_statfs2_fsize=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) fu_cv_sys_stat_statfs2_fsize=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $fu_cv_sys_stat_statfs2_fsize" >&5 $as_echo "$fu_cv_sys_stat_statfs2_fsize" >&6; } if test $fu_cv_sys_stat_statfs2_fsize = yes; then ac_fsusage_space=yes cat >>confdefs.h <<\_ACEOF #define STAT_STATFS2_FSIZE 1 _ACEOF fi fi if test $ac_fsusage_space = no; then # Ultrix { $as_echo "$as_me:$LINENO: checking for two-argument statfs with struct fs_data (Ultrix)" >&5 $as_echo_n "checking for two-argument statfs with struct fs_data (Ultrix)... " >&6; } if test "${fu_cv_sys_stat_fs_data+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then fu_cv_sys_stat_fs_data=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_SYS_FS_TYPES_H #include #endif int main () { struct fs_data fsd; /* Ultrix's statfs returns 1 for success, 0 for not mounted, -1 for failure. */ return statfs (".", &fsd) != 1; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then fu_cv_sys_stat_fs_data=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) fu_cv_sys_stat_fs_data=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $fu_cv_sys_stat_fs_data" >&5 $as_echo "$fu_cv_sys_stat_fs_data" >&6; } if test $fu_cv_sys_stat_fs_data = yes; then ac_fsusage_space=yes cat >>confdefs.h <<\_ACEOF #define STAT_STATFS2_FS_DATA 1 _ACEOF fi fi if test $ac_fsusage_space = no; then # SVR2 cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then cat >>confdefs.h <<\_ACEOF #define STAT_READ_FILSYS 1 _ACEOF ac_fsusage_space=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f conftest.err conftest.$ac_ext fi if test $ac_fsusage_space = yes; then gl_cv_fs_space=yes else gl_cv_fs_space=no fi if test $gl_cv_fs_space = yes; then case " $LIBOBJS " in *" fsusage.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS fsusage.$ac_objext" ;; esac for ac_header in dustat.h sys/fs/s5param.h sys/filsys.h sys/statfs.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking for statfs that truncates block counts" >&5 $as_echo_n "checking for statfs that truncates block counts... " >&6; } if test "${fu_cv_sys_truncating_statfs+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #if !defined(sun) && !defined(__sun) choke -- this is a workaround for a Sun-specific problem #endif #include #include int main () { struct statfs t; long c = *(t.f_spare); if (c) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then fu_cv_sys_truncating_statfs=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fu_cv_sys_truncating_statfs=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $fu_cv_sys_truncating_statfs = yes; then cat >>confdefs.h <<\_ACEOF #define STATFS_TRUNCATES_BLOCK_COUNTS 1 _ACEOF fi { $as_echo "$as_me:$LINENO: result: $fu_cv_sys_truncating_statfs" >&5 $as_echo "$fu_cv_sys_truncating_statfs" >&6; } fi fi if test "$enables_hed" = "yes"; then # Checks for header files. ac_header_dirent=no for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_hdr that defines DIR" >&5 $as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include <$ac_hdr> int main () { if ((DIR *) 0) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then eval "$as_ac_Header=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 _ACEOF ac_header_dirent=$ac_hdr; break fi done # Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. if test $ac_header_dirent = dirent.h; then { $as_echo "$as_me:$LINENO: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if test "${ac_cv_search_opendir+set}" = set; then $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' dir; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_search_opendir=$ac_res else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext if test "${ac_cv_search_opendir+set}" = set; then break fi done if test "${ac_cv_search_opendir+set}" = set; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi else { $as_echo "$as_me:$LINENO: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if test "${ac_cv_search_opendir+set}" = set; then $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' x; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_search_opendir=$ac_res else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext if test "${ac_cv_search_opendir+set}" = set; then break fi done if test "${ac_cv_search_opendir+set}" = set; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi fi { $as_echo "$as_me:$LINENO: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if test "${ac_cv_header_stdc+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_header_stdc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then : else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_header_stdc=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then cat >>confdefs.h <<\_ACEOF #define STDC_HEADERS 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking for sys/wait.h that is POSIX.1 compatible" >&5 $as_echo_n "checking for sys/wait.h that is POSIX.1 compatible... " >&6; } if test "${ac_cv_header_sys_wait_h+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #ifndef WEXITSTATUS # define WEXITSTATUS(stat_val) ((unsigned int) (stat_val) >> 8) #endif #ifndef WIFEXITED # define WIFEXITED(stat_val) (((stat_val) & 255) == 0) #endif int main () { int s; wait (&s); s = WIFEXITED (s) ? WEXITSTATUS (s) : 1; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_header_sys_wait_h=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_sys_wait_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_sys_wait_h" >&5 $as_echo "$ac_cv_header_sys_wait_h" >&6; } if test $ac_cv_header_sys_wait_h = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_SYS_WAIT_H 1 _ACEOF fi for ac_header in arpa/inet.h fcntl.h float.h limits.h netdb.h netinet/in.h sasl.h sasl/sasl.h stdint.h stdlib.h string.h sys/file.h sys/socket.h sys/vfs.h unistd.h uuid/uuid.h getopt.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking whether the compiler implements namespaces" >&5 $as_echo_n "checking whether the compiler implements namespaces... " >&6; } if test "${ac_cv_cxx_namespaces+set}" = set; then $as_echo_n "(cached) " >&6 else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ namespace Outer { namespace Inner { int i = 0; }} int main () { using namespace Outer::Inner; return i; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_cxx_namespaces=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_cxx_namespaces=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $ac_cv_cxx_namespaces" >&5 $as_echo "$ac_cv_cxx_namespaces" >&6; } if test "$ac_cv_cxx_namespaces" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_NAMESPACES /**/ _ACEOF fi { $as_echo "$as_me:$LINENO: checking whether the compiler has stringstream" >&5 $as_echo_n "checking whether the compiler has stringstream... " >&6; } if test "${ac_cv_cxx_have_sstream+set}" = set; then $as_echo_n "(cached) " >&6 else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #ifdef HAVE_NAMESPACES using namespace std; #endif int main () { stringstream message; message << "Hello"; return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_cxx_have_sstream=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_cxx_have_sstream=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $ac_cv_cxx_have_sstream" >&5 $as_echo "$ac_cv_cxx_have_sstream" >&6; } if test "$ac_cv_cxx_have_sstream" = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_SSTREAM /**/ _ACEOF fi # Checks for typedefs, structures, and compiler characteristics. { $as_echo "$as_me:$LINENO: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } if test "${ac_cv_header_stdbool_h+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #ifndef bool "error: bool is not defined" #endif #ifndef false "error: false is not defined" #endif #if false "error: false is not 0" #endif #ifndef true "error: true is not defined" #endif #if true != 1 "error: true is not 1" #endif #ifndef __bool_true_false_are_defined "error: __bool_true_false_are_defined is not defined" #endif struct s { _Bool s: 1; _Bool t; } s; char a[true == 1 ? 1 : -1]; char b[false == 0 ? 1 : -1]; char c[__bool_true_false_are_defined == 1 ? 1 : -1]; char d[(bool) 0.5 == true ? 1 : -1]; bool e = &s; char f[(_Bool) 0.0 == false ? 1 : -1]; char g[true]; char h[sizeof (_Bool)]; char i[sizeof s.t]; enum { j = false, k = true, l = false * true, m = true * 256 }; /* The following fails for HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ _Bool n[m]; char o[sizeof n == m * sizeof n[0] ? 1 : -1]; char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; # if defined __xlc__ || defined __GNUC__ /* Catch a bug in IBM AIX xlc compiler version 6.0.0.0 reported by James Lemley on 2005-10-05; see http://lists.gnu.org/archive/html/bug-coreutils/2005-10/msg00086.html This test is not quite right, since xlc is allowed to reject this program, as the initializer for xlcbug is not one of the forms that C requires support for. However, doing the test right would require a runtime test, and that would make cross-compilation harder. Let us hope that IBM fixes the xlc bug, and also adds support for this kind of constant expression. In the meantime, this test will reject xlc, which is OK, since our stdbool.h substitute should suffice. We also test this with GCC, where it should work, to detect more quickly whether someone messes up the test in the future. */ char digs[] = "0123456789"; int xlcbug = 1 / (&(digs + 5)[-2 + (bool) 1] == &digs[4] ? 1 : -1); # endif /* Catch a bug in an HP-UX C compiler. See http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html */ _Bool q = true; _Bool *pq = &q; int main () { *pq |= q; *pq |= ! q; /* Refer to every declared value, to avoid compiler optimizations. */ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + !m + !n + !o + !p + !q + !pq); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_header_stdbool_h=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_stdbool_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } { $as_echo "$as_me:$LINENO: checking for _Bool" >&5 $as_echo_n "checking for _Bool... " >&6; } if test "${ac_cv_type__Bool+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_type__Bool=no cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof (_Bool)) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof ((_Bool))) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_type__Bool=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_type__Bool" >&5 $as_echo "$ac_cv_type__Bool" >&6; } if test "x$ac_cv_type__Bool" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 _ACEOF fi if test $ac_cv_header_stdbool_h = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_STDBOOL_H 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } if test "${ac_cv_c_const+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { /* FIXME: Include the comments suggested by Paul. */ #ifndef __cplusplus /* Ultrix mips cc rejects this. */ typedef int charset[2]; const charset cs; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; pcpcc = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; { /* SCO 3.2v4 cc rejects this. */ char *t; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; if (s) return 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; }; struct s *b; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; if (!foo) return 0; } return !cs[0] && !zero.x; #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_c_const=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_c_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then cat >>confdefs.h <<\_ACEOF #define const /**/ _ACEOF fi { $as_echo "$as_me:$LINENO: checking for uid_t in sys/types.h" >&5 $as_echo_n "checking for uid_t in sys/types.h... " >&6; } if test "${ac_cv_type_uid_t+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "uid_t" >/dev/null 2>&1; then ac_cv_type_uid_t=yes else ac_cv_type_uid_t=no fi rm -f conftest* fi { $as_echo "$as_me:$LINENO: result: $ac_cv_type_uid_t" >&5 $as_echo "$ac_cv_type_uid_t" >&6; } if test $ac_cv_type_uid_t = no; then cat >>confdefs.h <<\_ACEOF #define uid_t int _ACEOF cat >>confdefs.h <<\_ACEOF #define gid_t int _ACEOF fi { $as_echo "$as_me:$LINENO: checking for inline" >&5 $as_echo_n "checking for inline... " >&6; } if test "${ac_cv_c_inline+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_c_inline=no for ac_kw in inline __inline__ __inline; do cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifndef __cplusplus typedef int foo_t; static $ac_kw foo_t static_foo () {return 0; } $ac_kw foo_t foo () {return 0; } #endif _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_c_inline=$ac_kw else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext test "$ac_cv_c_inline" != no && break done fi { $as_echo "$as_me:$LINENO: result: $ac_cv_c_inline" >&5 $as_echo "$ac_cv_c_inline" >&6; } case $ac_cv_c_inline in inline | yes) ;; *) case $ac_cv_c_inline in no) ac_val=;; *) ac_val=$ac_cv_c_inline;; esac cat >>confdefs.h <<_ACEOF #ifndef __cplusplus #define inline $ac_val #endif _ACEOF ;; esac { $as_echo "$as_me:$LINENO: checking for mode_t" >&5 $as_echo_n "checking for mode_t... " >&6; } if test "${ac_cv_type_mode_t+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_type_mode_t=no cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof (mode_t)) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof ((mode_t))) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_type_mode_t=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_type_mode_t" >&5 $as_echo "$ac_cv_type_mode_t" >&6; } if test "x$ac_cv_type_mode_t" = x""yes; then : else cat >>confdefs.h <<_ACEOF #define mode_t int _ACEOF fi { $as_echo "$as_me:$LINENO: checking for off_t" >&5 $as_echo_n "checking for off_t... " >&6; } if test "${ac_cv_type_off_t+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_type_off_t=no cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof (off_t)) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof ((off_t))) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_type_off_t=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_type_off_t" >&5 $as_echo "$ac_cv_type_off_t" >&6; } if test "x$ac_cv_type_off_t" = x""yes; then : else cat >>confdefs.h <<_ACEOF #define off_t long int _ACEOF fi { $as_echo "$as_me:$LINENO: checking for pid_t" >&5 $as_echo_n "checking for pid_t... " >&6; } if test "${ac_cv_type_pid_t+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_type_pid_t=no cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof (pid_t)) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof ((pid_t))) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_type_pid_t=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_type_pid_t" >&5 $as_echo "$ac_cv_type_pid_t" >&6; } if test "x$ac_cv_type_pid_t" = x""yes; then : else cat >>confdefs.h <<_ACEOF #define pid_t int _ACEOF fi { $as_echo "$as_me:$LINENO: checking for size_t" >&5 $as_echo_n "checking for size_t... " >&6; } if test "${ac_cv_type_size_t+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_type_size_t=no cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof (size_t)) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof ((size_t))) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_type_size_t=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5 $as_echo "$ac_cv_type_size_t" >&6; } if test "x$ac_cv_type_size_t" = x""yes; then : else cat >>confdefs.h <<_ACEOF #define size_t unsigned int _ACEOF fi { $as_echo "$as_me:$LINENO: checking for struct stat.st_blksize" >&5 $as_echo_n "checking for struct stat.st_blksize... " >&6; } if test "${ac_cv_member_struct_stat_st_blksize+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { static struct stat ac_aggr; if (ac_aggr.st_blksize) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_member_struct_stat_st_blksize=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { static struct stat ac_aggr; if (sizeof ac_aggr.st_blksize) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_member_struct_stat_st_blksize=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_member_struct_stat_st_blksize=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_blksize" >&5 $as_echo "$ac_cv_member_struct_stat_st_blksize" >&6; } if test "x$ac_cv_member_struct_stat_st_blksize" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STAT_ST_BLKSIZE 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking whether time.h and sys/time.h may both be included" >&5 $as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } if test "${ac_cv_header_time+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include int main () { if ((struct tm *) 0) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_header_time=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_time=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_time" >&5 $as_echo "$ac_cv_header_time" >&6; } if test $ac_cv_header_time = yes; then cat >>confdefs.h <<\_ACEOF #define TIME_WITH_SYS_TIME 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking whether struct tm is in sys/time.h or time.h" >&5 $as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } if test "${ac_cv_struct_tm+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include int main () { struct tm tm; int *p = &tm.tm_sec; return !p; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_struct_tm=time.h else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_struct_tm=sys/time.h fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_struct_tm" >&5 $as_echo "$ac_cv_struct_tm" >&6; } if test $ac_cv_struct_tm = sys/time.h; then cat >>confdefs.h <<\_ACEOF #define TM_IN_SYS_TIME 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking for ptrdiff_t" >&5 $as_echo_n "checking for ptrdiff_t... " >&6; } if test "${ac_cv_type_ptrdiff_t+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_type_ptrdiff_t=no cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof (ptrdiff_t)) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { if (sizeof ((ptrdiff_t))) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_type_ptrdiff_t=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_type_ptrdiff_t" >&5 $as_echo "$ac_cv_type_ptrdiff_t" >&6; } if test "x$ac_cv_type_ptrdiff_t" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_PTRDIFF_T 1 _ACEOF fi # Checks for library functions. for ac_header in unistd.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking for working chown" >&5 $as_echo_n "checking for working chown... " >&6; } if test "${ac_cv_func_chown_works+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_chown_works=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include int main () { char *f = "conftest.chown"; struct stat before, after; if (creat (f, 0600) < 0) return 1; if (stat (f, &before) < 0) return 1; if (chown (f, (uid_t) -1, (gid_t) -1) == -1) return 1; if (stat (f, &after) < 0) return 1; return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid); ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_chown_works=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_chown_works=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi rm -f conftest.chown fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_chown_works" >&5 $as_echo "$ac_cv_func_chown_works" >&6; } if test $ac_cv_func_chown_works = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_CHOWN 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking whether closedir returns void" >&5 $as_echo_n "checking whether closedir returns void... " >&6; } if test "${ac_cv_func_closedir_void+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_closedir_void=yes else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header_dirent> #ifndef __cplusplus int closedir (); #endif int main () { return closedir (opendir (".")) != 0; ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_closedir_void=no else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_closedir_void=yes fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_closedir_void" >&5 $as_echo "$ac_cv_func_closedir_void" >&6; } if test $ac_cv_func_closedir_void = yes; then cat >>confdefs.h <<\_ACEOF #define CLOSEDIR_VOID 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking for error_at_line" >&5 $as_echo_n "checking for error_at_line... " >&6; } if test "${ac_cv_lib_error_at_line+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include int main () { error_at_line (0, 0, "", 0, "an error occurred"); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_error_at_line=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_error_at_line=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_error_at_line" >&5 $as_echo "$ac_cv_lib_error_at_line" >&6; } if test $ac_cv_lib_error_at_line = no; then case " $LIBOBJS " in *" error.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS error.$ac_objext" ;; esac fi for ac_header in vfork.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in fork vfork do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 $as_echo_n "checking for $ac_func... " >&6; } if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$ac_func || defined __stub___$ac_func choke me #endif int main () { return $ac_func (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_var=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done if test "x$ac_cv_func_fork" = xyes; then { $as_echo "$as_me:$LINENO: checking for working fork" >&5 $as_echo_n "checking for working fork... " >&6; } if test "${ac_cv_func_fork_works+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_fork_works=cross else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { /* By Ruediger Kuhlmann. */ return fork () < 0; ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_fork_works=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_fork_works=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_fork_works" >&5 $as_echo "$ac_cv_func_fork_works" >&6; } else ac_cv_func_fork_works=$ac_cv_func_fork fi if test "x$ac_cv_func_fork_works" = xcross; then case $host in *-*-amigaos* | *-*-msdosdjgpp*) # Override, as these systems have only a dummy fork() stub ac_cv_func_fork_works=no ;; *) ac_cv_func_fork_works=yes ;; esac { $as_echo "$as_me:$LINENO: WARNING: result $ac_cv_func_fork_works guessed because of cross compilation" >&5 $as_echo "$as_me: WARNING: result $ac_cv_func_fork_works guessed because of cross compilation" >&2;} fi ac_cv_func_vfork_works=$ac_cv_func_vfork if test "x$ac_cv_func_vfork" = xyes; then { $as_echo "$as_me:$LINENO: checking for working vfork" >&5 $as_echo_n "checking for working vfork... " >&6; } if test "${ac_cv_func_vfork_works+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_vfork_works=cross else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Thanks to Paul Eggert for this test. */ $ac_includes_default #include #ifdef HAVE_VFORK_H # include #endif /* On some sparc systems, changes by the child to local and incoming argument registers are propagated back to the parent. The compiler is told about this with #include , but some compilers (e.g. gcc -O) don't grok . Test for this by using a static variable whose address is put into a register that is clobbered by the vfork. */ static void #ifdef __cplusplus sparc_address_test (int arg) # else sparc_address_test (arg) int arg; #endif { static pid_t child; if (!child) { child = vfork (); if (child < 0) { perror ("vfork"); _exit(2); } if (!child) { arg = getpid(); write(-1, "", 0); _exit (arg); } } } int main () { pid_t parent = getpid (); pid_t child; sparc_address_test (0); child = vfork (); if (child == 0) { /* Here is another test for sparc vfork register problems. This test uses lots of local variables, at least as many local variables as main has allocated so far including compiler temporaries. 4 locals are enough for gcc 1.40.3 on a Solaris 4.1.3 sparc, but we use 8 to be safe. A buggy compiler should reuse the register of parent for one of the local variables, since it will think that parent can't possibly be used any more in this routine. Assigning to the local variable will thus munge parent in the parent process. */ pid_t p = getpid(), p1 = getpid(), p2 = getpid(), p3 = getpid(), p4 = getpid(), p5 = getpid(), p6 = getpid(), p7 = getpid(); /* Convince the compiler that p..p7 are live; otherwise, it might use the same hardware register for all 8 local variables. */ if (p != p1 || p != p2 || p != p3 || p != p4 || p != p5 || p != p6 || p != p7) _exit(1); /* On some systems (e.g. IRIX 3.3), vfork doesn't separate parent from child file descriptors. If the child closes a descriptor before it execs or exits, this munges the parent's descriptor as well. Test for this by closing stdout in the child. */ _exit(close(fileno(stdout)) != 0); } else { int status; struct stat st; while (wait(&status) != child) ; return ( /* Was there some problem with vforking? */ child < 0 /* Did the child fail? (This shouldn't happen.) */ || status /* Did the vfork/compiler bug occur? */ || parent != getpid() /* Did the file descriptor bug occur? */ || fstat(fileno(stdout), &st) != 0 ); } } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_vfork_works=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_vfork_works=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_vfork_works" >&5 $as_echo "$ac_cv_func_vfork_works" >&6; } fi; if test "x$ac_cv_func_fork_works" = xcross; then ac_cv_func_vfork_works=$ac_cv_func_vfork { $as_echo "$as_me:$LINENO: WARNING: result $ac_cv_func_vfork_works guessed because of cross compilation" >&5 $as_echo "$as_me: WARNING: result $ac_cv_func_vfork_works guessed because of cross compilation" >&2;} fi if test "x$ac_cv_func_vfork_works" = xyes; then cat >>confdefs.h <<\_ACEOF #define HAVE_WORKING_VFORK 1 _ACEOF else cat >>confdefs.h <<\_ACEOF #define vfork fork _ACEOF fi if test "x$ac_cv_func_fork_works" = xyes; then cat >>confdefs.h <<\_ACEOF #define HAVE_WORKING_FORK 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking whether lstat dereferences a symlink specified with a trailing slash" >&5 $as_echo_n "checking whether lstat dereferences a symlink specified with a trailing slash... " >&6; } if test "${ac_cv_func_lstat_dereferences_slashed_symlink+set}" = set; then $as_echo_n "(cached) " >&6 else rm -f conftest.sym conftest.file echo >conftest.file if test "$as_ln_s" = "ln -s" && ln -s conftest.file conftest.sym; then if test "$cross_compiling" = yes; then ac_cv_func_lstat_dereferences_slashed_symlink=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; /* Linux will dereference the symlink and fail. That is better in the sense that it means we will not have to compile and use the lstat wrapper. */ return lstat ("conftest.sym/", &sbuf) == 0; ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_lstat_dereferences_slashed_symlink=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi else # If the `ln -s' command failed, then we probably don't even # have an lstat function. ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f conftest.sym conftest.file fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_lstat_dereferences_slashed_symlink" >&5 $as_echo "$ac_cv_func_lstat_dereferences_slashed_symlink" >&6; } test $ac_cv_func_lstat_dereferences_slashed_symlink = yes && cat >>confdefs.h <<_ACEOF #define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 _ACEOF if test $ac_cv_func_lstat_dereferences_slashed_symlink = no; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac fi { $as_echo "$as_me:$LINENO: checking whether lstat accepts an empty string" >&5 $as_echo_n "checking whether lstat accepts an empty string... " >&6; } if test "${ac_cv_func_lstat_empty_string_bug+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_lstat_empty_string_bug=yes else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; return lstat ("", &sbuf) == 0; ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_lstat_empty_string_bug=no else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_lstat_empty_string_bug=yes fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_lstat_empty_string_bug" >&5 $as_echo "$ac_cv_func_lstat_empty_string_bug" >&6; } if test $ac_cv_func_lstat_empty_string_bug = yes; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac cat >>confdefs.h <<_ACEOF #define HAVE_LSTAT_EMPTY_STRING_BUG 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking whether lstat dereferences a symlink specified with a trailing slash" >&5 $as_echo_n "checking whether lstat dereferences a symlink specified with a trailing slash... " >&6; } if test "${ac_cv_func_lstat_dereferences_slashed_symlink+set}" = set; then $as_echo_n "(cached) " >&6 else rm -f conftest.sym conftest.file echo >conftest.file if test "$as_ln_s" = "ln -s" && ln -s conftest.file conftest.sym; then if test "$cross_compiling" = yes; then ac_cv_func_lstat_dereferences_slashed_symlink=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; /* Linux will dereference the symlink and fail. That is better in the sense that it means we will not have to compile and use the lstat wrapper. */ return lstat ("conftest.sym/", &sbuf) == 0; ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_lstat_dereferences_slashed_symlink=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi else # If the `ln -s' command failed, then we probably don't even # have an lstat function. ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f conftest.sym conftest.file fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_lstat_dereferences_slashed_symlink" >&5 $as_echo "$ac_cv_func_lstat_dereferences_slashed_symlink" >&6; } test $ac_cv_func_lstat_dereferences_slashed_symlink = yes && cat >>confdefs.h <<_ACEOF #define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 _ACEOF if test $ac_cv_func_lstat_dereferences_slashed_symlink = no; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac fi { $as_echo "$as_me:$LINENO: checking for working memcmp" >&5 $as_echo_n "checking for working memcmp... " >&6; } if test "${ac_cv_func_memcmp_working+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_memcmp_working=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { /* Some versions of memcmp are not 8-bit clean. */ char c0 = '\100', c1 = '\200', c2 = '\201'; if (memcmp(&c0, &c2, 1) >= 0 || memcmp(&c1, &c2, 1) >= 0) return 1; /* The Next x86 OpenStep bug shows up only when comparing 16 bytes or more and with at least one buffer not starting on a 4-byte boundary. William Lewis provided this test program. */ { char foo[21]; char bar[21]; int i; for (i = 0; i < 4; i++) { char *a = foo + i; char *b = bar + i; strcpy (a, "--------01111111"); strcpy (b, "--------10000000"); if (memcmp (a, b, 16) >= 0) return 1; } return 0; } ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_memcmp_working=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_memcmp_working=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_memcmp_working" >&5 $as_echo "$ac_cv_func_memcmp_working" >&6; } test $ac_cv_func_memcmp_working = no && case " $LIBOBJS " in *" memcmp.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS memcmp.$ac_objext" ;; esac for ac_header in $ac_header_list do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in $ac_func_list do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 $as_echo_n "checking for $ac_func... " >&6; } if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$ac_func || defined __stub___$ac_func choke me #endif int main () { return $ac_func (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_var=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking for working mktime" >&5 $as_echo_n "checking for working mktime... " >&6; } if test "${ac_cv_func_working_mktime+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_working_mktime=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Test program from Paul Eggert and Tony Leneis. */ #ifdef TIME_WITH_SYS_TIME # include # include #else # ifdef HAVE_SYS_TIME_H # include # else # include # endif #endif #include #include #ifdef HAVE_UNISTD_H # include #endif #ifndef HAVE_ALARM # define alarm(X) /* empty */ #endif /* Work around redefinition to rpl_putenv by other config tests. */ #undef putenv static time_t time_t_max; static time_t time_t_min; /* Values we'll use to set the TZ environment variable. */ static char *tz_strings[] = { (char *) 0, "TZ=GMT0", "TZ=JST-9", "TZ=EST+3EDT+2,M10.1.0/00:00:00,M2.3.0/00:00:00" }; #define N_STRINGS (sizeof (tz_strings) / sizeof (tz_strings[0])) /* Return 0 if mktime fails to convert a date in the spring-forward gap. Based on a problem report from Andreas Jaeger. */ static int spring_forward_gap () { /* glibc (up to about 1998-10-07) failed this test. */ struct tm tm; /* Use the portable POSIX.1 specification "TZ=PST8PDT,M4.1.0,M10.5.0" instead of "TZ=America/Vancouver" in order to detect the bug even on systems that don't support the Olson extension, or don't have the full zoneinfo tables installed. */ putenv ("TZ=PST8PDT,M4.1.0,M10.5.0"); tm.tm_year = 98; tm.tm_mon = 3; tm.tm_mday = 5; tm.tm_hour = 2; tm.tm_min = 0; tm.tm_sec = 0; tm.tm_isdst = -1; return mktime (&tm) != (time_t) -1; } static int mktime_test1 (now) time_t now; { struct tm *lt; return ! (lt = localtime (&now)) || mktime (lt) == now; } static int mktime_test (now) time_t now; { return (mktime_test1 (now) && mktime_test1 ((time_t) (time_t_max - now)) && mktime_test1 ((time_t) (time_t_min + now))); } static int irix_6_4_bug () { /* Based on code from Ariel Faigon. */ struct tm tm; tm.tm_year = 96; tm.tm_mon = 3; tm.tm_mday = 0; tm.tm_hour = 0; tm.tm_min = 0; tm.tm_sec = 0; tm.tm_isdst = -1; mktime (&tm); return tm.tm_mon == 2 && tm.tm_mday == 31; } static int bigtime_test (j) int j; { struct tm tm; time_t now; tm.tm_year = tm.tm_mon = tm.tm_mday = tm.tm_hour = tm.tm_min = tm.tm_sec = j; now = mktime (&tm); if (now != (time_t) -1) { struct tm *lt = localtime (&now); if (! (lt && lt->tm_year == tm.tm_year && lt->tm_mon == tm.tm_mon && lt->tm_mday == tm.tm_mday && lt->tm_hour == tm.tm_hour && lt->tm_min == tm.tm_min && lt->tm_sec == tm.tm_sec && lt->tm_yday == tm.tm_yday && lt->tm_wday == tm.tm_wday && ((lt->tm_isdst < 0 ? -1 : 0 < lt->tm_isdst) == (tm.tm_isdst < 0 ? -1 : 0 < tm.tm_isdst)))) return 0; } return 1; } static int year_2050_test () { /* The correct answer for 2050-02-01 00:00:00 in Pacific time, ignoring leap seconds. */ unsigned long int answer = 2527315200UL; struct tm tm; time_t t; tm.tm_year = 2050 - 1900; tm.tm_mon = 2 - 1; tm.tm_mday = 1; tm.tm_hour = tm.tm_min = tm.tm_sec = 0; tm.tm_isdst = -1; /* Use the portable POSIX.1 specification "TZ=PST8PDT,M4.1.0,M10.5.0" instead of "TZ=America/Vancouver" in order to detect the bug even on systems that don't support the Olson extension, or don't have the full zoneinfo tables installed. */ putenv ("TZ=PST8PDT,M4.1.0,M10.5.0"); t = mktime (&tm); /* Check that the result is either a failure, or close enough to the correct answer that we can assume the discrepancy is due to leap seconds. */ return (t == (time_t) -1 || (0 < t && answer - 120 <= t && t <= answer + 120)); } int main () { time_t t, delta; int i, j; /* This test makes some buggy mktime implementations loop. Give up after 60 seconds; a mktime slower than that isn't worth using anyway. */ alarm (60); for (;;) { t = (time_t_max << 1) + 1; if (t <= time_t_max) break; time_t_max = t; } time_t_min = - ((time_t) ~ (time_t) 0 == (time_t) -1) - time_t_max; delta = time_t_max / 997; /* a suitable prime number */ for (i = 0; i < N_STRINGS; i++) { if (tz_strings[i]) putenv (tz_strings[i]); for (t = 0; t <= time_t_max - delta; t += delta) if (! mktime_test (t)) return 1; if (! (mktime_test ((time_t) 1) && mktime_test ((time_t) (60 * 60)) && mktime_test ((time_t) (60 * 60 * 24)))) return 1; for (j = 1; ; j <<= 1) if (! bigtime_test (j)) return 1; else if (INT_MAX / 2 < j) break; if (! bigtime_test (INT_MAX)) return 1; } return ! (irix_6_4_bug () && spring_forward_gap () && year_2050_test ()); } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_working_mktime=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_working_mktime=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_working_mktime" >&5 $as_echo "$ac_cv_func_working_mktime" >&6; } if test $ac_cv_func_working_mktime = no; then case " $LIBOBJS " in *" mktime.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS mktime.$ac_objext" ;; esac fi # GNU compatible *ALLOC functions are available on Windows # The test will however fail when cross-compiling with mingw case "${host}" in *-*-mingw32) : ;; *) for ac_header in stdlib.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking for GNU libc compatible malloc" >&5 $as_echo_n "checking for GNU libc compatible malloc... " >&6; } if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_malloc_0_nonnull=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *malloc (); #endif int main () { return ! malloc (0); ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_malloc_0_nonnull=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_malloc_0_nonnull=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_malloc_0_nonnull" >&5 $as_echo "$ac_cv_func_malloc_0_nonnull" >&6; } if test $ac_cv_func_malloc_0_nonnull = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_MALLOC 1 _ACEOF else cat >>confdefs.h <<\_ACEOF #define HAVE_MALLOC 0 _ACEOF case " $LIBOBJS " in *" malloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS malloc.$ac_objext" ;; esac cat >>confdefs.h <<\_ACEOF #define malloc rpl_malloc _ACEOF fi for ac_header in stdlib.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking for GNU libc compatible realloc" >&5 $as_echo_n "checking for GNU libc compatible realloc... " >&6; } if test "${ac_cv_func_realloc_0_nonnull+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_realloc_0_nonnull=no else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *realloc (); #endif int main () { return ! realloc (0, 0); ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_realloc_0_nonnull=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_realloc_0_nonnull=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_realloc_0_nonnull" >&5 $as_echo "$ac_cv_func_realloc_0_nonnull" >&6; } if test $ac_cv_func_realloc_0_nonnull = yes; then cat >>confdefs.h <<\_ACEOF #define HAVE_REALLOC 1 _ACEOF else cat >>confdefs.h <<\_ACEOF #define HAVE_REALLOC 0 _ACEOF case " $LIBOBJS " in *" realloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS realloc.$ac_objext" ;; esac cat >>confdefs.h <<\_ACEOF #define realloc rpl_realloc _ACEOF fi ;; esac for ac_header in sys/select.h sys/socket.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking types of arguments for select" >&5 $as_echo_n "checking types of arguments for select... " >&6; } if test "${ac_cv_func_select_args+set}" = set; then $as_echo_n "(cached) " >&6 else for ac_arg234 in 'fd_set *' 'int *' 'void *'; do for ac_arg1 in 'int' 'size_t' 'unsigned long int' 'unsigned int'; do for ac_arg5 in 'struct timeval *' 'const struct timeval *'; do cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #ifdef HAVE_SYS_SELECT_H # include #endif #ifdef HAVE_SYS_SOCKET_H # include #endif int main () { extern int select ($ac_arg1, $ac_arg234, $ac_arg234, $ac_arg234, $ac_arg5); ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_func_select_args="$ac_arg1,$ac_arg234,$ac_arg5"; break 3 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done done done # Provide a safe default value. : ${ac_cv_func_select_args='int,int *,struct timeval *'} fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_select_args" >&5 $as_echo "$ac_cv_func_select_args" >&6; } ac_save_IFS=$IFS; IFS=',' set dummy `echo "$ac_cv_func_select_args" | sed 's/\*/\*/g'` IFS=$ac_save_IFS shift cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG1 $1 _ACEOF cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG234 ($2) _ACEOF cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG5 ($3) _ACEOF rm -f conftest* { $as_echo "$as_me:$LINENO: checking return type of signal handlers" >&5 $as_echo_n "checking return type of signal handlers... " >&6; } if test "${ac_cv_type_signal+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include int main () { return *(signal (0, 0)) (0) == 1; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_type_signal=int else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_type_signal=void fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_type_signal" >&5 $as_echo "$ac_cv_type_signal" >&6; } cat >>confdefs.h <<_ACEOF #define RETSIGTYPE $ac_cv_type_signal _ACEOF { $as_echo "$as_me:$LINENO: checking whether strerror_r is declared" >&5 $as_echo_n "checking whether strerror_r is declared... " >&6; } if test "${ac_cv_have_decl_strerror_r+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { #ifndef strerror_r (void) strerror_r; #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_have_decl_strerror_r=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_have_decl_strerror_r=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_have_decl_strerror_r" >&5 $as_echo "$ac_cv_have_decl_strerror_r" >&6; } if test "x$ac_cv_have_decl_strerror_r" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_DECL_STRERROR_R 1 _ACEOF else cat >>confdefs.h <<_ACEOF #define HAVE_DECL_STRERROR_R 0 _ACEOF fi for ac_func in strerror_r do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 $as_echo_n "checking for $ac_func... " >&6; } if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$ac_func || defined __stub___$ac_func choke me #endif int main () { return $ac_func (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_var=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking whether strerror_r returns char *" >&5 $as_echo_n "checking whether strerror_r returns char *... " >&6; } if test "${ac_cv_func_strerror_r_char_p+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_func_strerror_r_char_p=no if test $ac_cv_have_decl_strerror_r = yes; then cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { char buf[100]; char x = *strerror_r (0, buf, sizeof buf); char *p = strerror_r (0, buf, sizeof buf); return !p || x; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_func_strerror_r_char_p=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else # strerror_r is not declared. Choose between # systems that have relatively inaccessible declarations for the # function. BeOS and DEC UNIX 4.0 fall in this category, but the # former has a strerror_r that returns char*, while the latter # has a strerror_r that returns `int'. # This test should segfault on the DEC system. if test "$cross_compiling" = yes; then : else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default extern char *strerror_r (); int main () { char buf[100]; char x = *strerror_r (0, buf, sizeof buf); return ! isalpha (x); ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_strerror_r_char_p=yes else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_strerror_r_char_p" >&5 $as_echo "$ac_cv_func_strerror_r_char_p" >&6; } if test $ac_cv_func_strerror_r_char_p = yes; then cat >>confdefs.h <<\_ACEOF #define STRERROR_R_CHAR_P 1 _ACEOF fi { $as_echo "$as_me:$LINENO: checking whether stat accepts an empty string" >&5 $as_echo_n "checking whether stat accepts an empty string... " >&6; } if test "${ac_cv_func_stat_empty_string_bug+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then ac_cv_func_stat_empty_string_bug=yes else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; return stat ("", &sbuf) == 0; ; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then ac_cv_func_stat_empty_string_bug=no else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_func_stat_empty_string_bug=yes fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_stat_empty_string_bug" >&5 $as_echo "$ac_cv_func_stat_empty_string_bug" >&6; } if test $ac_cv_func_stat_empty_string_bug = yes; then case " $LIBOBJS " in *" stat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS stat.$ac_objext" ;; esac cat >>confdefs.h <<_ACEOF #define HAVE_STAT_EMPTY_STRING_BUG 1 _ACEOF fi for ac_func in acl dup2 floor ftruncate gethostname getdomainname getpid gmtime_r lchown localtime_r memchr memmove memset mkdir mkfifo regcomp rmdir select setenv socket strcasecmp strchr strcspn strdup strerror strncasecmp strstr strtol strtoul strtoull timegm tzset unsetenv getopt_long_only getgrouplist mkdtemp posix_fallocate readdir_r mkstemp mktemp do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 $as_echo_n "checking for $ac_func... " >&6; } if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$ac_func || defined __stub___$ac_func choke me #endif int main () { return $ac_func (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_var=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking for res_query in -lresolv" >&5 $as_echo_n "checking for res_query in -lresolv... " >&6; } if test "${ac_cv_lib_resolv_res_query+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lresolv $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char res_query (); int main () { return res_query (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_resolv_res_query=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_resolv_res_query=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_resolv_res_query" >&5 $as_echo "$ac_cv_lib_resolv_res_query" >&6; } if test "x$ac_cv_lib_resolv_res_query" = x""yes; then LIBRESOLV=-lresolv else LIBRESOLV= fi { $as_echo "$as_me:$LINENO: checking for __dn_skipname in -lresolv" >&5 $as_echo_n "checking for __dn_skipname in -lresolv... " >&6; } if test "${ac_cv_lib_resolv___dn_skipname+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lresolv $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char __dn_skipname (); int main () { return __dn_skipname (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_resolv___dn_skipname=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_resolv___dn_skipname=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_resolv___dn_skipname" >&5 $as_echo "$ac_cv_lib_resolv___dn_skipname" >&6; } if test "x$ac_cv_lib_resolv___dn_skipname" = x""yes; then LIBRESOLV=-lresolv else LIBRESOLV= fi { $as_echo "$as_me:$LINENO: checking for gethostbyname in -lnsl" >&5 $as_echo_n "checking for gethostbyname in -lnsl... " >&6; } if test "${ac_cv_lib_nsl_gethostbyname+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnsl $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gethostbyname (); int main () { return gethostbyname (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_nsl_gethostbyname=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_nsl_gethostbyname=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_gethostbyname" >&5 $as_echo "$ac_cv_lib_nsl_gethostbyname" >&6; } if test "x$ac_cv_lib_nsl_gethostbyname" = x""yes; then LIBRESOLV="$LIBRESOLV -lnsl" fi { $as_echo "$as_me:$LINENO: checking for getdomainname in -lnsl" >&5 $as_echo_n "checking for getdomainname in -lnsl... " >&6; } if test "${ac_cv_lib_nsl_getdomainname+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnsl $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char getdomainname (); int main () { return getdomainname (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_nsl_getdomainname=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_nsl_getdomainname=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_getdomainname" >&5 $as_echo "$ac_cv_lib_nsl_getdomainname" >&6; } if test "x$ac_cv_lib_nsl_getdomainname" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_LIBNSL 1 _ACEOF LIBS="-lnsl $LIBS" fi fi # check for platfom specific extra libraries and flags EXTRA_LIBS="" REGEX_LIBS="" SOCKET_LIBS="" WIN32="" case "${host}" in *-*-mingw32) WIN32="yes" REGEX_LIBS="-lregex" SOCKET_LIBS="-lws2_32" EXTRA_LIBS="-lole32" # its required to libtool generate .dlls on win32 using mingw LDFLAGS="$LDFLAGS -no-undefined" cat >>confdefs.h <<\_ACEOF #define HAVE_GETDOMAINNAME /**/ _ACEOF for ac_prog in windres.exe ${host}-windres do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_WINDRES+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$WINDRES"; then ac_cv_prog_WINDRES="$WINDRES" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_WINDRES="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi WINDRES=$ac_cv_prog_WINDRES if test -n "$WINDRES"; then { $as_echo "$as_me:$LINENO: result: $WINDRES" >&5 $as_echo "$WINDRES" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$WINDRES" && break done ;; *solaris*) SOCKET_LIBS="-lsocket" CFLAGS="$CFLAGS -D_POSIX_PTHREAD_SEMANTICS" CXXFLAGS="$CXXFLAGS -D_POSIX_PTHREAD_SEMANTICS" ;; *) cat >>confdefs.h <<\_ACEOF #define HAVE_GETDOMAINNAME /**/ _ACEOF ;; esac if test "x$WIN32" = "xyes" ; then WIN32_TRUE= WIN32_FALSE='#' else WIN32_TRUE='#' WIN32_FALSE= fi case " $LDFLAGS " in " -Wl,--no-undefined ") ;; " -Wl,-no-undefined ") ;; " -Wl,-z -Wl,defs ") ;; " -Wl,-z,defs ") ;; *) case "${host}" in *darwin*);; *solaris*);; *) LDFLAGS="$LDFLAGS -Wl,--no-undefined" ;; esac ;; esac for ac_prog in pdflatex do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_PDFLATEX+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$PDFLATEX"; then ac_cv_prog_PDFLATEX="$PDFLATEX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_PDFLATEX="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi PDFLATEX=$ac_cv_prog_PDFLATEX if test -n "$PDFLATEX"; then { $as_echo "$as_me:$LINENO: result: $PDFLATEX" >&5 $as_echo "$PDFLATEX" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PDFLATEX" && break done for ac_prog in doxygen do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DOXYGEN+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$DOXYGEN"; then ac_cv_prog_DOXYGEN="$DOXYGEN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DOXYGEN="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DOXYGEN=$ac_cv_prog_DOXYGEN if test -n "$DOXYGEN"; then { $as_echo "$as_me:$LINENO: result: $DOXYGEN" >&5 $as_echo "$DOXYGEN" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DOXYGEN" && break done for ac_prog in dot do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DOT+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$DOT"; then ac_cv_prog_DOT="$DOT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DOT="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DOT=$ac_cv_prog_DOT if test -n "$DOT"; then { $as_echo "$as_me:$LINENO: result: $DOT" >&5 $as_echo "$DOT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DOT" && break done # Check if user asks to skip documentation build # Check whether --enable-doc was given. if test "${enable_doc+set}" = set; then enableval=$enable_doc; enables_doc=$enableval fi #if test "x$enables_doc" = "xyes"; then # There is no point disabling docs due to missing tools since the pdf # files are both in svn and in the dist tarball # if test "x$PDFLATEX" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing pdflatex - documentation won't be built]) # elif test "x$DOXYGEN" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing doxygen - documentation won't be built]) # elif test "x$DOT" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing dot - documentation won't be built]) # fi #fi { $as_echo "$as_me:$LINENO: Documentation enabled: $enables_doc" >&5 $as_echo "$as_me: Documentation enabled: $enables_doc" >&6;} if test "x$enables_doc" = "xyes"; then DOC_ENABLED_TRUE= DOC_ENABLED_FALSE='#' else DOC_ENABLED_TRUE='#' DOC_ENABLED_FALSE= fi if test -f python/python/arc/index.xml -o "x$DOXYGEN" != "x"; then PYDOXYGEN_TRUE= PYDOXYGEN_FALSE='#' else PYDOXYGEN_TRUE='#' PYDOXYGEN_FALSE= fi if test -f python/altpython/arc/index.xml -o "x$DOXYGEN" != "x"; then ALTPYDOXYGEN_TRUE= ALTPYDOXYGEN_FALSE='#' else ALTPYDOXYGEN_TRUE='#' ALTPYDOXYGEN_FALSE= fi # Check for explicitly and implicitely disabled services if test "x$WIN32" = "xyes" ; then { $as_echo "$as_me:$LINENO: In WIN32 environment many (all) services are not supported yet" >&5 $as_echo "$as_me: In WIN32 environment many (all) services are not supported yet" >&6;} fi # A-Rex # Check whether --enable-a_rex_service was given. if test "${enable_a_rex_service+set}" = set; then enableval=$enable_a_rex_service; enables_a_rex_service=$enableval fi if test "$enables_a_rex_service" = "yes"; then if test "x$WIN32" = "xyes" ; then { $as_echo "$as_me:$LINENO: A-Rex can't be built for WIN32 environment - disabling" >&5 $as_echo "$as_me: A-Rex can't be built for WIN32 environment - disabling" >&6;} enables_a_rex_service="no" elif test "x$DBCXX_LIBS" = "x" ; then { $as_echo "$as_me:$LINENO: A-Rex can't be built without C++ API for DB4.x - disabling" >&5 $as_echo "$as_me: A-Rex can't be built without C++ API for DB4.x - disabling" >&6;} enables_a_rex_service="no" fi fi { $as_echo "$as_me:$LINENO: A-Rex service enabled: $enables_a_rex_service" >&5 $as_echo "$as_me: A-Rex service enabled: $enables_a_rex_service" >&6;} if test "x$enables_a_rex_service" = "xyes"; then A_REX_SERVICE_ENABLED_TRUE= A_REX_SERVICE_ENABLED_FALSE='#' else A_REX_SERVICE_ENABLED_TRUE='#' A_REX_SERVICE_ENABLED_FALSE= fi # Gridftpd # Check whether --enable-gridftpd_service was given. if test "${enable_gridftpd_service+set}" = set; then enableval=$enable_gridftpd_service; enables_gridftpd_service=$enableval fi if test "$enables_gridftpd_service" = "yes"; then if test "x$WIN32" = "xyes" ; then { $as_echo "$as_me:$LINENO: Gridftpd can not be built for WIN32 environment - disabling" >&5 $as_echo "$as_me: Gridftpd can not be built for WIN32 environment - disabling" >&6;} enables_gridftpd_service="no" fi gridftpd_service_globus_pkgs="globus-common globus-io globus-gsi-credential globus-openssl-module globus-ftp-control" gridftpd_service_globus_pkgs_missing="" for pkg in $gridftpd_service_globus_pkgs do var=`echo '$'$pkg|tr '\-a-z' '_A-Z'|sed 's/$/_VERSION/'` if test -z "`eval echo $var`" then gridftpd_service_globus_pkgs_missing="$gridftpd_service_globus_pkgs_missing $pkg" fi done if test -n "$gridftpd_service_globus_pkgs_missing" ; then { $as_echo "$as_me:$LINENO: GridFTP service can not be built (missing development packages for$gridftpd_service_globus_pkgs_missing) - disabling" >&5 $as_echo "$as_me: GridFTP service can not be built (missing development packages for$gridftpd_service_globus_pkgs_missing) - disabling" >&6;} enables_gridftpd_service="no" fi #check for struct statfs for ac_func in fstatfs do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 $as_echo_n "checking for $ac_func... " >&6; } if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define $ac_func to an innocuous variant, in case declares $ac_func. For example, HP-UX 11i declares gettimeofday. */ #define $ac_func innocuous_$ac_func /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $ac_func (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $ac_func (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$ac_func || defined __stub___$ac_func choke me #endif int main () { return $ac_func (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then eval "$as_ac_var=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_var=no" fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_var'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in sys/param.h sys/statfs.h sys/mount.h sys/vfs.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:$LINENO: checking for struct statfs.f_type" >&5 $as_echo_n "checking for struct statfs.f_type... " >&6; } if test "${ac_cv_member_struct_statfs_f_type+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #if HAVE_SYS_STATFS_H #include #endif #if HAVE_SYS_MOUNT_H #include #endif #if HAVE_SYS_VFS_H #include #endif int main () { static struct statfs ac_aggr; if (ac_aggr.f_type) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_member_struct_statfs_f_type=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #if HAVE_SYS_STATFS_H #include #endif #if HAVE_SYS_MOUNT_H #include #endif #if HAVE_SYS_VFS_H #include #endif int main () { static struct statfs ac_aggr; if (sizeof ac_aggr.f_type) return 0; ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_member_struct_statfs_f_type=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_member_struct_statfs_f_type=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_member_struct_statfs_f_type" >&5 $as_echo "$ac_cv_member_struct_statfs_f_type" >&6; } if test "x$ac_cv_member_struct_statfs_f_type" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STATFS_F_TYPE 1 _ACEOF fi fi { $as_echo "$as_me:$LINENO: Gridftpd service enabled: $enables_gridftpd_service" >&5 $as_echo "$as_me: Gridftpd service enabled: $enables_gridftpd_service" >&6;} if test "x$enables_gridftpd_service" = "xyes"; then GRIDFTPD_SERVICE_ENABLED_TRUE= GRIDFTPD_SERVICE_ENABLED_FALSE='#' else GRIDFTPD_SERVICE_ENABLED_TRUE='#' GRIDFTPD_SERVICE_ENABLED_FALSE= fi # LDAP service # Check whether --enable-ldap_service was given. if test "${enable_ldap_service+set}" = set; then enableval=$enable_ldap_service; enables_ldap_service=$enableval fi if test "$enables_ldap_service" = "yes"; then if test "x$WIN32" = "xyes" ; then { $as_echo "$as_me:$LINENO: LDAP infosystem can't be built for WIN32 environment - disabling" >&5 $as_echo "$as_me: LDAP infosystem can't be built for WIN32 environment - disabling" >&6;} enables_ldap_service="no" fi fi { $as_echo "$as_me:$LINENO: LDAP Infosystem service enabled: $enables_ldap_service" >&5 $as_echo "$as_me: LDAP Infosystem service enabled: $enables_ldap_service" >&6;} if test "x$enables_ldap_service" = "xyes"; then LDAP_SERVICE_ENABLED_TRUE= LDAP_SERVICE_ENABLED_FALSE='#' else LDAP_SERVICE_ENABLED_TRUE='#' LDAP_SERVICE_ENABLED_FALSE= fi # GIIS service # Check whether --enable-giis_service was given. if test "${enable_giis_service+set}" = set; then enableval=$enable_giis_service; enables_giis_service=$enableval fi if test "$enables_giis_service" = "yes"; then if test "x$WIN32" = "xyes" ; then { $as_echo "$as_me:$LINENO: GIIS can't be built for WIN32 environment - disabling" >&5 $as_echo "$as_me: GIIS can't be built for WIN32 environment - disabling" >&6;} enables_giis_service="no" fi for ac_header in lber.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF else enables_giis_service=no fi done for ac_header in ldap_features.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF else enables_giis_service=no fi done fi if test "$enables_ldap_service" = "no"; then if test "$enables_giis_service" = "yes"; then enables_giis_service="no" { $as_echo "$as_me:$LINENO: WARNING: GIIS service can't be enabled without LDAP Infosys, disabling GIIS" >&5 $as_echo "$as_me: WARNING: GIIS service can't be enabled without LDAP Infosys, disabling GIIS" >&6;} fi fi { $as_echo "$as_me:$LINENO: GIIS service enabled: $enables_giis_service" >&5 $as_echo "$as_me: GIIS service enabled: $enables_giis_service" >&6;} if test "x$enables_giis_service" = "xyes"; then GIIS_SERVICE_ENABLED_TRUE= GIIS_SERVICE_ENABLED_FALSE='#' else GIIS_SERVICE_ENABLED_TRUE='#' GIIS_SERVICE_ENABLED_FALSE= fi # LDAP monitor # Check whether --enable-ldap_monitor was given. if test "${enable_ldap_monitor+set}" = set; then enableval=$enable_ldap_monitor; enables_ldap_monitor=$enableval fi { $as_echo "$as_me:$LINENO: LDAP Monitor enabled: $enables_ldap_monitor" >&5 $as_echo "$as_me: LDAP Monitor enabled: $enables_ldap_monitor" >&6;} if test "x$enables_ldap_monitor" = "xyes"; then LDAP_MONITOR_ENABLED_TRUE= LDAP_MONITOR_ENABLED_FALSE='#' else LDAP_MONITOR_ENABLED_TRUE='#' LDAP_MONITOR_ENABLED_FALSE= fi # WS monitor # Check whether --enable-ws_monitor was given. if test "${enable_ws_monitor+set}" = set; then enableval=$enable_ws_monitor; enables_ws_monitor=$enableval fi { $as_echo "$as_me:$LINENO: WS Monitor enabled: $enables_ws_monitor" >&5 $as_echo "$as_me: WS Monitor enabled: $enables_ws_monitor" >&6;} if test "x$enables_ws_monitor" = "xyes"; then WS_MONITOR_ENABLED_TRUE= WS_MONITOR_ENABLED_FALSE='#' else WS_MONITOR_ENABLED_TRUE='#' WS_MONITOR_ENABLED_FALSE= fi # Cache service # Check whether --enable-cache_service was given. if test "${enable_cache_service+set}" = set; then enableval=$enable_cache_service; enables_cache_service=$enableval fi if test "$enables_cache_service" = "yes"; then if test "x$WIN32" = "xyes" ; then enables_cache_service="no" { $as_echo "$as_me:$LINENO: Cache service can't be built for WIN32 environment - disabling" >&5 $as_echo "$as_me: Cache service can't be built for WIN32 environment - disabling" >&6;} elif test ! "x$enables_a_rex_service" = "xyes" ; then enables_cache_service="no" { $as_echo "$as_me:$LINENO: Cache service can't be built without A-REX - disabling" >&5 $as_echo "$as_me: Cache service can't be built without A-REX - disabling" >&6;} fi fi { $as_echo "$as_me:$LINENO: Cache service enabled: $enables_cache_service" >&5 $as_echo "$as_me: Cache service enabled: $enables_cache_service" >&6;} if test "x$enables_cache_service" = "xyes"; then CACHE_SERVICE_ENABLED_TRUE= CACHE_SERVICE_ENABLED_FALSE='#' else CACHE_SERVICE_ENABLED_TRUE='#' CACHE_SERVICE_ENABLED_FALSE= fi if test "x$enables_cache_service" = "xyes"; then CACHE_WEBSERVICE_ENABLED_TRUE= CACHE_WEBSERVICE_ENABLED_FALSE='#' else CACHE_WEBSERVICE_ENABLED_TRUE='#' CACHE_WEBSERVICE_ENABLED_FALSE= fi # DataDelivery service # Check whether --enable-datadelivery_service was given. if test "${enable_datadelivery_service+set}" = set; then enableval=$enable_datadelivery_service; enables_datadelivery_service=$enableval fi if test "$enables_datadelivery_service" = "yes"; then if test "x$WIN32" = "xyes" ; then enables_datadelivery_service="no" { $as_echo "$as_me:$LINENO: DataDelivery service can't be built for WIN32 environment - disabling" >&5 $as_echo "$as_me: DataDelivery service can't be built for WIN32 environment - disabling" >&6;} fi fi { $as_echo "$as_me:$LINENO: DataDelivery service enabled: $enables_datadelivery_service" >&5 $as_echo "$as_me: DataDelivery service enabled: $enables_datadelivery_service" >&6;} if test "x$enables_datadelivery_service" = "xyes"; then DATADELIVERY_SERVICE_ENABLED_TRUE= DATADELIVERY_SERVICE_ENABLED_FALSE='#' else DATADELIVERY_SERVICE_ENABLED_TRUE='#' DATADELIVERY_SERVICE_ENABLED_FALSE= fi # ACIX service # Check whether --enable-acix was given. if test "${enable_acix+set}" = set; then enableval=$enable_acix; enables_acix=$enableval fi if test "x$PYTHON_VERSION" = "x2.2" -o "x$PYTHON_VERSION" = "x2.3" then enables_acix="no" fi { $as_echo "$as_me:$LINENO: ACIX enabled: $enables_acix" >&5 $as_echo "$as_me: ACIX enabled: $enables_acix" >&6;} if test "x$enables_acix" = "xyes"; then ACIX_ENABLED_TRUE= ACIX_ENABLED_FALSE='#' else ACIX_ENABLED_TRUE='#' ACIX_ENABLED_FALSE= fi # trial command (from python-twisted-core) is used for acix unittests for ac_prog in trial do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_TRIAL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$TRIAL"; then ac_cv_prog_TRIAL="$TRIAL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_TRIAL="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi TRIAL=$ac_cv_prog_TRIAL if test -n "$TRIAL"; then { $as_echo "$as_me:$LINENO: result: $TRIAL" >&5 $as_echo "$TRIAL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$TRIAL" && break done if test "x$TRIAL" = "x"; then { $as_echo "$as_me:$LINENO: trial not found - ACIX unit tests will be skipped" >&5 $as_echo "$as_me: trial not found - ACIX unit tests will be skipped" >&6;} fi # unit tests also require python >=2.6 if test "x$TRIAL" != "x" && test "x$PYTHON_VERSION" != "x2.4" && test "x$PYTHON_VERSION" != "x2.5"; then ACIX_TESTS_ENABLED_TRUE= ACIX_TESTS_ENABLED_FALSE='#' else ACIX_TESTS_ENABLED_TRUE='#' ACIX_TESTS_ENABLED_FALSE= fi # Check for explicitly and implicitely disabled clients # Check whether --enable-compute_client was given. if test "${enable_compute_client+set}" = set; then enableval=$enable_compute_client; enables_compute_client=$enableval fi { $as_echo "$as_me:$LINENO: Compute client tools enabled: $enables_compute_client" >&5 $as_echo "$as_me: Compute client tools enabled: $enables_compute_client" >&6;} if test "x$enables_compute_client" = "xyes"; then COMPUTE_CLIENT_ENABLED_TRUE= COMPUTE_CLIENT_ENABLED_FALSE='#' else COMPUTE_CLIENT_ENABLED_TRUE='#' COMPUTE_CLIENT_ENABLED_FALSE= fi # Check whether --enable-credentials_client was given. if test "${enable_credentials_client+set}" = set; then enableval=$enable_credentials_client; enables_credentials_client=$enableval fi { $as_echo "$as_me:$LINENO: Credentials client tools enabled: $enables_credentials_client" >&5 $as_echo "$as_me: Credentials client tools enabled: $enables_credentials_client" >&6;} if test "x$enables_credentials_client" = "xyes"; then CREDENTIALS_CLIENT_ENABLED_TRUE= CREDENTIALS_CLIENT_ENABLED_FALSE='#' else CREDENTIALS_CLIENT_ENABLED_TRUE='#' CREDENTIALS_CLIENT_ENABLED_FALSE= fi # Check whether --enable-echo_client was given. if test "${enable_echo_client+set}" = set; then enableval=$enable_echo_client; enables_echo_client=$enableval fi { $as_echo "$as_me:$LINENO: Echo client tool enabled: $enables_echo_client" >&5 $as_echo "$as_me: Echo client tool enabled: $enables_echo_client" >&6;} if test "x$enables_echo_client" = "xyes"; then ECHO_CLIENT_ENABLED_TRUE= ECHO_CLIENT_ENABLED_FALSE='#' else ECHO_CLIENT_ENABLED_TRUE='#' ECHO_CLIENT_ENABLED_FALSE= fi # Check whether --enable-data_client was given. if test "${enable_data_client+set}" = set; then enableval=$enable_data_client; enables_data_client=$enableval fi { $as_echo "$as_me:$LINENO: Data client tools enabled: $enables_data_client" >&5 $as_echo "$as_me: Data client tools enabled: $enables_data_client" >&6;} if test "x$enables_data_client" = "xyes"; then DATA_CLIENT_ENABLED_TRUE= DATA_CLIENT_ENABLED_FALSE='#' else DATA_CLIENT_ENABLED_TRUE='#' DATA_CLIENT_ENABLED_FALSE= fi # Check whether --enable-jura_client was given. if test "${enable_jura_client+set}" = set; then enableval=$enable_jura_client; enables_jura_client=$enableval fi { $as_echo "$as_me:$LINENO: JURA client tool enabled: $enables_jura_client" >&5 $as_echo "$as_me: JURA client tool enabled: $enables_jura_client" >&6;} if test "x$enables_jura_client" = "xyes"; then JURA_CLIENT_ENABLED_TRUE= JURA_CLIENT_ENABLED_FALSE='#' else JURA_CLIENT_ENABLED_TRUE='#' JURA_CLIENT_ENABLED_FALSE= fi # Check whether --enable-saml_client was given. if test "${enable_saml_client+set}" = set; then enableval=$enable_saml_client; enables_saml_client=$enableval fi if test "$enables_saml_client" = "yes"; then if test "$enables_xmlsec1" != "yes" ; then enables_saml_client="no" { $as_echo "$as_me:$LINENO: SAML client requires xmlsec1 - disabling" >&5 $as_echo "$as_me: SAML client requires xmlsec1 - disabling" >&6;} fi fi { $as_echo "$as_me:$LINENO: SAML client tool enabled: $enables_saml_client" >&5 $as_echo "$as_me: SAML client tool enabled: $enables_saml_client" >&6;} if test "x$enables_saml_client" = "xyes"; then SAML_CLIENT_ENABLED_TRUE= SAML_CLIENT_ENABLED_FALSE='#' else SAML_CLIENT_ENABLED_TRUE='#' SAML_CLIENT_ENABLED_FALSE= fi # Check whether --enable-wsrf_client was given. if test "${enable_wsrf_client+set}" = set; then enableval=$enable_wsrf_client; enables_wsrf_client=$enableval fi { $as_echo "$as_me:$LINENO: WSRF client tool enabled: $enables_wsrf_client" >&5 $as_echo "$as_me: WSRF client tool enabled: $enables_wsrf_client" >&6;} if test "x$enables_wsrf_client" = "xyes"; then WSRF_CLIENT_ENABLED_TRUE= WSRF_CLIENT_ENABLED_FALSE='#' else WSRF_CLIENT_ENABLED_TRUE='#' WSRF_CLIENT_ENABLED_FALSE= fi # Check whether --enable-unicore_client was given. if test "${enable_unicore_client+set}" = set; then enableval=$enable_unicore_client; enables_unicore_client=$enableval fi { $as_echo "$as_me:$LINENO: UNICORE plugin(s) enabled: $enables_unicore_client" >&5 $as_echo "$as_me: UNICORE plugin(s) enabled: $enables_unicore_client" >&6;} if test "x$enables_unicore_client" = "xyes"; then UNICORE_ENABLED_TRUE= UNICORE_ENABLED_FALSE='#' else UNICORE_ENABLED_TRUE='#' UNICORE_ENABLED_FALSE= fi # Check whether --enable-emies_client was given. if test "${enable_emies_client+set}" = set; then enableval=$enable_emies_client; enables_emies_client=$enableval fi { $as_echo "$as_me:$LINENO: EMI ES plugin(s) enabled: $enables_emies_client" >&5 $as_echo "$as_me: EMI ES plugin(s) enabled: $enables_emies_client" >&6;} if test "x$enables_emies_client" = "xyes"; then EMIES_ENABLED_TRUE= EMIES_ENABLED_FALSE='#' else EMIES_ENABLED_TRUE='#' EMIES_ENABLED_FALSE= fi # Check for consistency among disabled components if test "$enables_hed" = "no"; then if test "$enables_a_rex_service" = "yes" -o \ "$enables_cache_service" = "yes" -o \ "$enables_datadelivery_service" = "yes" -o \ "$enables_compute_client" = "yes" -o \ "$enables_credentials_client" = "yes" -o \ "$enables_echo_client" = "yes" -o \ "$enables_data_client" = "yes" -o \ "$enables_jura_client" = "yes" -o \ "$enables_saml_client" = "yes" -o \ "$enables_wsrf_client" = "yes" -o \ ; then { { $as_echo "$as_me:$LINENO: error: HED is needed for building any of the client or service tools. Please enable HED by using --enable-hed." >&5 $as_echo "$as_me: error: HED is needed for building any of the client or service tools. Please enable HED by using --enable-hed." >&2;} { (exit 1); exit 1; }; } fi fi if test "x$enables_hed" = "xyes"; then HED_ENABLED_TRUE= HED_ENABLED_FALSE='#' else HED_ENABLED_TRUE='#' HED_ENABLED_FALSE= fi # A-Rex specific hack for backend scripts pbs_bin_path=/usr/bin pbs_log_path=/var/spool/pbs/server_logs tmp_dir=/tmp gnu_time=/usr/bin/time case "${host}" in *solaris* | *darwin* ) # hostname -f does not work on Solaris, OS X nodename="hostname" ;; *) nodename="/bin/hostname -f" ;; esac arc_location=$prefix # Shell for the job control scripts case $host_os in solaris* ) posix_shell='/usr/xpg4/bin/sh' ;; * ) posix_shell='/bin/sh' ;; esac DATE=`date +%Y-%m-%d` #DATER=`date -R` DATER=`date +'%a, %d %b %Y %H:%M:%S %z'` SPECDATE=`LANG=C date +"%a %b %d %Y"` ac_config_files="$ac_config_files Makefile src/Makefile src/external/Makefile src/external/cJSON/Makefile src/hed/Makefile src/hed/libs/compute/Makefile src/hed/libs/compute/test/Makefile src/hed/libs/compute/examples/Makefile src/hed/libs/common/Makefile src/hed/libs/common/test/Makefile src/hed/libs/communication/Makefile src/hed/libs/credential/Makefile src/hed/libs/credential/test/Makefile src/hed/libs/credentialmod/Makefile src/hed/libs/crypto/Makefile src/hed/libs/cryptomod/Makefile src/hed/libs/data/Makefile src/hed/libs/data/cache-clean src/hed/libs/data/cache-clean.1 src/hed/libs/data/cache-list src/hed/libs/data/cache-list.1 src/hed/libs/data/test/Makefile src/hed/libs/data/examples/Makefile src/hed/libs/Makefile src/hed/libs/loader/Makefile src/hed/libs/loader/schema/Makefile src/hed/libs/loader/test/Makefile src/hed/libs/message/Makefile src/hed/libs/message/test/Makefile src/hed/libs/security/Makefile src/hed/libs/security/ArcPDP/Makefile src/hed/libs/security/ArcPDP/attr/Makefile src/hed/libs/security/ArcPDP/policy/Makefile src/hed/libs/security/ArcPDP/alg/Makefile src/hed/libs/security/ArcPDP/fn/Makefile src/hed/libs/credentialstore/Makefile src/hed/libs/wsrf/Makefile src/hed/libs/ws-addressing/Makefile src/hed/libs/ws-security/Makefile src/hed/libs/ws-security/test/Makefile src/hed/libs/infosys/Makefile src/hed/libs/infosys/schema/Makefile src/hed/libs/infosys/test/Makefile src/hed/libs/delegation/Makefile src/hed/libs/delegation/test/Makefile src/hed/libs/ws/Makefile src/hed/libs/xmlsec/Makefile src/hed/libs/globusutils/Makefile src/hed/daemon/Makefile src/hed/daemon/scripts/Makefile src/hed/daemon/schema/Makefile src/hed/daemon/unix/Makefile src/hed/daemon/win32/Makefile src/hed/mcc/Makefile src/hed/mcc/soap/Makefile src/hed/mcc/tcp/Makefile src/hed/mcc/tcp/schema/Makefile src/hed/mcc/http/Makefile src/hed/mcc/http/schema/Makefile src/hed/mcc/tls/Makefile src/hed/mcc/tls/schema/Makefile src/hed/mcc/msgvalidator/Makefile src/hed/mcc/msgvalidator/schema/Makefile src/hed/acc/Makefile src/hed/acc/ARC0/Makefile src/hed/acc/ARC1/Makefile src/hed/acc/ARC1/test/Makefile src/hed/acc/EMIES/Makefile src/hed/acc/EMIES/arcemiestest.1 src/hed/acc/EMIES/schema/Makefile src/hed/acc/CREAM/Makefile src/hed/acc/UNICORE/Makefile src/hed/acc/Broker/Makefile src/hed/acc/Broker/test/Makefile src/hed/acc/PythonBroker/Makefile src/hed/acc/JobDescriptionParser/Makefile src/hed/acc/JobDescriptionParser/test/Makefile src/hed/acc/SER/Makefile src/hed/acc/ldap/Makefile src/hed/acc/TEST/Makefile src/hed/dmc/Makefile src/hed/dmc/file/Makefile src/hed/dmc/gridftp/Makefile src/hed/dmc/http/Makefile src/hed/dmc/ldap/Makefile src/hed/dmc/srm/Makefile src/hed/dmc/srm/srmclient/Makefile src/hed/dmc/gfal/Makefile src/hed/dmc/xrootd/Makefile src/hed/dmc/mock/Makefile src/hed/dmc/acix/Makefile src/hed/dmc/rucio/Makefile src/hed/dmc/s3/Makefile src/hed/profiles/general/general.xml src/hed/shc/Makefile src/hed/shc/arcpdp/Makefile src/hed/shc/arcpdp/schema/Makefile src/hed/shc/xacmlpdp/Makefile src/hed/shc/xacmlpdp/schema/Makefile src/hed/shc/delegationpdp/Makefile src/hed/shc/delegationpdp/schema/Makefile src/hed/shc/gaclpdp/Makefile src/hed/shc/pdpserviceinvoker/Makefile src/hed/shc/pdpserviceinvoker/schema/Makefile src/hed/shc/allowpdp/Makefile src/hed/shc/denypdp/Makefile src/hed/shc/simplelistpdp/Makefile src/hed/shc/simplelistpdp/schema/Makefile src/hed/shc/arcauthzsh/Makefile src/hed/shc/arcauthzsh/schema/Makefile src/hed/shc/usernametokensh/Makefile src/hed/shc/usernametokensh/schema/Makefile src/hed/shc/x509tokensh/Makefile src/hed/shc/x509tokensh/schema/Makefile src/hed/shc/samltokensh/Makefile src/hed/shc/samltokensh/schema/Makefile src/hed/shc/saml2sso_assertionconsumersh/Makefile src/hed/shc/delegationsh/Makefile src/hed/shc/delegationsh/schema/Makefile src/hed/shc/legacy/Makefile src/hed/shc/legacy/schema/Makefile src/hed/identitymap/Makefile src/hed/identitymap/schema/Makefile src/libs/Makefile src/libs/data-staging/Makefile src/libs/data-staging/test/Makefile src/libs/data-staging/examples/Makefile src/services/Makefile src/services/a-rex/Makefile src/services/a-rex/a-rex src/services/a-rex/a-rex.service src/services/a-rex/a-rex-start src/services/a-rex/a-rex-backtrace-collect src/services/a-rex/a-rex-backtrace-collect.8 src/services/a-rex/perferator src/services/a-rex/grid-manager/arc-vomsac-check.8 src/services/a-rex/grid-manager/arc-blahp-logger.8 src/services/a-rex/grid-manager/gm-jobs.8 src/services/a-rex/grid-manager/gm-delegations-converter.8 src/services/a-rex/delegation/Makefile src/services/a-rex/grid-manager/Makefile src/services/a-rex/grid-manager/conf/Makefile src/services/a-rex/grid-manager/files/Makefile src/services/a-rex/grid-manager/jobs/Makefile src/services/a-rex/grid-manager/jobplugin/Makefile src/services/a-rex/grid-manager/loaders/Makefile src/services/a-rex/grid-manager/log/Makefile src/services/a-rex/grid-manager/mail/Makefile src/services/a-rex/grid-manager/misc/Makefile src/services/a-rex/grid-manager/run/Makefile src/services/a-rex/grid-manager/arc-config-check.1 src/services/a-rex/infoproviders/Makefile src/services/a-rex/infoproviders/CEinfo.pl src/services/a-rex/infoproviders/PerfData.pl src/services/a-rex/infoproviders/test/Makefile src/services/a-rex/jura/Makefile src/services/a-rex/jura/jura.1 src/services/a-rex/jura/ssm/Makefile src/services/a-rex/ldif/Makefile src/services/a-rex/lrms/Makefile src/services/a-rex/lrms/submit_common.sh src/services/a-rex/lrms/scan_common.sh src/services/a-rex/lrms/condor/Makefile src/services/a-rex/lrms/condor/scan-condor-job src/services/a-rex/lrms/condor/cancel-condor-job src/services/a-rex/lrms/condor/submit-condor-job src/services/a-rex/lrms/fork/Makefile src/services/a-rex/lrms/fork/scan-fork-job src/services/a-rex/lrms/fork/submit-fork-job src/services/a-rex/lrms/fork/cancel-fork-job src/services/a-rex/lrms/ll/Makefile src/services/a-rex/lrms/ll/submit-ll-job src/services/a-rex/lrms/ll/cancel-ll-job src/services/a-rex/lrms/ll/scan-ll-job src/services/a-rex/lrms/lsf/Makefile src/services/a-rex/lrms/lsf/submit-lsf-job src/services/a-rex/lrms/lsf/cancel-lsf-job src/services/a-rex/lrms/lsf/scan-lsf-job src/services/a-rex/lrms/pbs/Makefile src/services/a-rex/lrms/pbs/submit-pbs-job src/services/a-rex/lrms/pbs/cancel-pbs-job src/services/a-rex/lrms/pbs/scan-pbs-job src/services/a-rex/lrms/pbs/configure-pbs-env.sh src/services/a-rex/lrms/sge/Makefile src/services/a-rex/lrms/sge/submit-sge-job src/services/a-rex/lrms/sge/scan-sge-job src/services/a-rex/lrms/sge/cancel-sge-job src/services/a-rex/lrms/slurm/Makefile src/services/a-rex/lrms/slurm/submit-SLURM-job src/services/a-rex/lrms/slurm/scan-SLURM-job src/services/a-rex/lrms/slurm/cancel-SLURM-job src/services/a-rex/lrms/dgbridge/Makefile src/services/a-rex/lrms/dgbridge/submit-DGBridge-job src/services/a-rex/lrms/dgbridge/scan-DGBridge-job src/services/a-rex/lrms/dgbridge/cancel-DGBridge-job src/services/a-rex/lrms/boinc/Makefile src/services/a-rex/lrms/boinc/submit-boinc-job src/services/a-rex/lrms/boinc/scan-boinc-job src/services/a-rex/lrms/boinc/cancel-boinc-job src/services/a-rex/schema/Makefile src/services/acix/Makefile src/services/acix/cacheserver/Makefile src/services/acix/cacheserver/acix-cache.service src/services/acix/cacheserver/test/Makefile src/services/acix/core/Makefile src/services/acix/core/test/Makefile src/services/acix/indexserver/Makefile src/services/acix/indexserver/acix-index.service src/services/acix/indexserver/test/Makefile src/services/cache_service/Makefile src/services/cache_service/arc-cache-service src/services/cache_service/arc-cache-service.service src/services/cache_service/arc-cache-service-start src/services/data-staging/Makefile src/services/data-staging/arc-datadelivery-service src/services/data-staging/arc-datadelivery-service.service src/services/data-staging/arc-datadelivery-service-start src/services/gridftpd/Makefile src/services/gridftpd/gridftpd.init src/services/gridftpd/gridftpd.service src/services/gridftpd/gridftpd-start src/services/gridftpd/gridftpd.8 src/services/gridftpd/auth/Makefile src/services/gridftpd/conf/Makefile src/services/gridftpd/misc/Makefile src/services/gridftpd/run/Makefile src/services/gridftpd/fileplugin/Makefile src/services/ldap-infosys/Makefile src/services/ldap-infosys/create-bdii-config src/services/ldap-infosys/create-inforeg-config src/services/ldap-infosys/create-slapd-config src/services/ldap-infosys/nordugrid-arc-bdii src/services/ldap-infosys/nordugrid-arc-egiis src/services/ldap-infosys/nordugrid-arc-inforeg src/services/ldap-infosys/nordugrid-arc-ldap-infosys src/services/ldap-infosys/nordugrid-arc-slapd src/services/ldap-infosys/giis/Makefile src/services/ldap-infosys/giis/arc-infoindex-relay.8 src/services/ldap-infosys/giis/arc-infoindex-server.8 src/services/ldap-monitor/Makefile src/services/ldap-monitor/ldap-monitor src/services/ldap-monitor/README src/services/ldap-monitor/man/Makefile src/services/ldap-monitor/man/ldap-monitor.7 src/services/ldap-monitor/includes/Makefile src/services/ldap-monitor/mon-icons/Makefile src/services/ldap-monitor/lang/Makefile src/services/ws-monitor/Makefile src/services/ws-monitor/ws-monitor src/services/ws-monitor/README src/services/ws-monitor/man/Makefile src/services/ws-monitor/man/ws-monitor.7 src/services/ws-monitor/includes/Makefile src/services/ws-monitor/mon-icons/Makefile src/services/ws-monitor/lang/Makefile src/services/examples/Makefile src/services/examples/echo_java/Makefile src/services/examples/echo_python/Makefile src/services/wrappers/Makefile src/services/wrappers/java/Makefile src/services/wrappers/java/schema/Makefile src/services/wrappers/python/Makefile src/services/wrappers/python/schema/Makefile src/clients/Makefile src/clients/data/Makefile src/clients/data/arccp.1 src/clients/data/arcls.1 src/clients/data/arcrm.1 src/clients/data/arcmkdir.1 src/clients/data/arcrename.1 src/clients/echo/Makefile src/clients/echo/arcecho.1 src/clients/credentials/Makefile src/clients/credentials/arcproxy.1 src/clients/saml/Makefile src/clients/saml/saml_assertion_init.1 src/clients/compute/Makefile src/clients/compute/arcstat.1 src/clients/compute/arcinfo.1 src/clients/compute/arcsub.1 src/clients/compute/arcclean.1 src/clients/compute/arckill.1 src/clients/compute/arcget.1 src/clients/compute/arccat.1 src/clients/compute/arcresub.1 src/clients/compute/arcsync.1 src/clients/compute/arcrenew.1 src/clients/compute/arcresume.1 src/clients/compute/arctest.1 src/clients/wsrf/arcwsrf.1 src/clients/wsrf/Makefile src/tests/Makefile src/tests/echo/Makefile src/tests/echo/perftest.1 src/tests/echo/echo_service.xml.example src/tests/echo/schema/Makefile src/tests/policy-delegation/Makefile src/tests/delegation/Makefile src/tests/translator/Makefile src/tests/xpath/Makefile src/tests/arcpolicy/Makefile src/tests/perf/Makefile src/tests/perf/arcperftest.1 src/tests/client/Makefile src/utils/hed/wsdl2hed.1 src/utils/hed/arcplugin.1 src/utils/hed/Makefile src/utils/gridmap/nordugridmap.cron src/utils/gridmap/nordugridmap.8 src/utils/gridmap/Makefile src/utils/Makefile src/doc/Makefile src/doc/arc.conf.5 swig/Makefile java/Makefile java/test/Makefile java/test/strip_test_file_name_and_run_junit java/examples/Makefile python/Makefile python/Doxyfile.api python/python/Makefile python/python/arc/Makefile python/altpython/Makefile python/altpython/arc/Makefile python/test/Makefile python/test/python/Makefile python/test/altpython/Makefile python/examples/Makefile po/Makefile.in include/Makefile debian/Makefile debian/changelog.deb nordugrid-arc.spec mingw-nordugrid-arc.spec src/hed/daemon/arched.8 src/hed/daemon/scripts/arched src/hed/daemon/scripts/arched.service src/hed/daemon/scripts/arched-start arcbase.pc nsis/Makefile nsis/arc.nsis src/doxygen/Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) $as_unset $ac_var ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes (double-quote # substitution turns \\\\ into \\, and sed turns \\ into \). sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then test "x$cache_file" != "x/dev/null" && { $as_echo "$as_me:$LINENO: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} cat confcache >$cache_file else { $as_echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' else am__EXEEXT_TRUE='#' am__EXEEXT_FALSE= fi if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${SYSTEMD_UNITS_ENABLED_TRUE}" && test -z "${SYSTEMD_UNITS_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"SYSTEMD_UNITS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"SYSTEMD_UNITS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${SYSV_SCRIPTS_ENABLED_TRUE}" && test -z "${SYSV_SCRIPTS_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"SYSV_SCRIPTS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"SYSV_SCRIPTS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${PEDANTIC_COMPILE_TRUE}" && test -z "${PEDANTIC_COMPILE_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"PEDANTIC_COMPILE\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"PEDANTIC_COMPILE\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${SWIG_ENABLED_TRUE}" && test -z "${SWIG_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"SWIG_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"SWIG_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${JAVA_ENABLED_TRUE}" && test -z "${JAVA_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"JAVA_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"JAVA_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${JAVA_SWIG_ENABLED_TRUE}" && test -z "${JAVA_SWIG_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"JAVA_SWIG_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"JAVA_SWIG_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${JAVA_IS_15_OR_ABOVE_TRUE}" && test -z "${JAVA_IS_15_OR_ABOVE_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"JAVA_IS_15_OR_ABOVE\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"JAVA_IS_15_OR_ABOVE\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${JUNIT_ENABLED_TRUE}" && test -z "${JUNIT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"JUNIT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"JUNIT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${PYTHON_ENABLED_TRUE}" && test -z "${PYTHON_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"PYTHON_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"PYTHON_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${PYTHON3_TRUE}" && test -z "${PYTHON3_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"PYTHON3\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"PYTHON3\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${PYTHON_SWIG_ENABLED_TRUE}" && test -z "${PYTHON_SWIG_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"PYTHON_SWIG_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"PYTHON_SWIG_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${PYTHON_SERVICE_TRUE}" && test -z "${PYTHON_SERVICE_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"PYTHON_SERVICE\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"PYTHON_SERVICE\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${ALTPYTHON_ENABLED_TRUE}" && test -z "${ALTPYTHON_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"ALTPYTHON_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"ALTPYTHON_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${ALTPYTHON3_TRUE}" && test -z "${ALTPYTHON3_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"ALTPYTHON3\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"ALTPYTHON3\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${PYLINT_ENABLED_TRUE}" && test -z "${PYLINT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"PYLINT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"PYLINT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${NSS_ENABLED_TRUE}" && test -z "${NSS_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"NSS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"NSS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${SQLITE_ENABLED_TRUE}" && test -z "${SQLITE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"SQLITE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"SQLITE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${CANLXX_ENABLED_TRUE}" && test -z "${CANLXX_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"CANLXX_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"CANLXX_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${LDNS_ENABLED_TRUE}" && test -z "${LDNS_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"LDNS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"LDNS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${MACOSX_TRUE}" && test -z "${MACOSX_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"MACOSX\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"MACOSX\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${MYSQL_LIBRARY_ENABLED_TRUE}" && test -z "${MYSQL_LIBRARY_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"MYSQL_LIBRARY_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"MYSQL_LIBRARY_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${ARGUS_ENABLED_TRUE}" && test -z "${ARGUS_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"ARGUS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"ARGUS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${DBJSTORE_ENABLED_TRUE}" && test -z "${DBJSTORE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"DBJSTORE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"DBJSTORE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${GLOBUSUTILS_ENABLED_TRUE}" && test -z "${GLOBUSUTILS_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"GLOBUSUTILS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"GLOBUSUTILS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${GRIDFTP_ENABLED_TRUE}" && test -z "${GRIDFTP_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"GRIDFTP_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"GRIDFTP_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${MOCK_DMC_ENABLED_TRUE}" && test -z "${MOCK_DMC_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"MOCK_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"MOCK_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${GFAL_ENABLED_TRUE}" && test -z "${GFAL_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"GFAL_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"GFAL_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${S3_DMC_ENABLED_TRUE}" && test -z "${S3_DMC_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"S3_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"S3_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${XROOTD_ENABLED_TRUE}" && test -z "${XROOTD_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"XROOTD_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"XROOTD_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${XMLSEC_ENABLED_TRUE}" && test -z "${XMLSEC_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"XMLSEC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"XMLSEC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${CPPUNIT_ENABLED_TRUE}" && test -z "${CPPUNIT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"CPPUNIT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"CPPUNIT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${SRM_DMC_ENABLED_TRUE}" && test -z "${SRM_DMC_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"SRM_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"SRM_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${LDAP_ENABLED_TRUE}" && test -z "${LDAP_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"LDAP_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"LDAP_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${WIN32_TRUE}" && test -z "${WIN32_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"WIN32\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"WIN32\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${DOC_ENABLED_TRUE}" && test -z "${DOC_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"DOC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"DOC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${PYDOXYGEN_TRUE}" && test -z "${PYDOXYGEN_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"PYDOXYGEN\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"PYDOXYGEN\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${ALTPYDOXYGEN_TRUE}" && test -z "${ALTPYDOXYGEN_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"ALTPYDOXYGEN\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"ALTPYDOXYGEN\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${A_REX_SERVICE_ENABLED_TRUE}" && test -z "${A_REX_SERVICE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"A_REX_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"A_REX_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${GRIDFTPD_SERVICE_ENABLED_TRUE}" && test -z "${GRIDFTPD_SERVICE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"GRIDFTPD_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"GRIDFTPD_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${LDAP_SERVICE_ENABLED_TRUE}" && test -z "${LDAP_SERVICE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"LDAP_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"LDAP_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${GIIS_SERVICE_ENABLED_TRUE}" && test -z "${GIIS_SERVICE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"GIIS_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"GIIS_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${LDAP_MONITOR_ENABLED_TRUE}" && test -z "${LDAP_MONITOR_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"LDAP_MONITOR_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"LDAP_MONITOR_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${WS_MONITOR_ENABLED_TRUE}" && test -z "${WS_MONITOR_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"WS_MONITOR_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"WS_MONITOR_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${CACHE_SERVICE_ENABLED_TRUE}" && test -z "${CACHE_SERVICE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"CACHE_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"CACHE_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${CACHE_WEBSERVICE_ENABLED_TRUE}" && test -z "${CACHE_WEBSERVICE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"CACHE_WEBSERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"CACHE_WEBSERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${DATADELIVERY_SERVICE_ENABLED_TRUE}" && test -z "${DATADELIVERY_SERVICE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"DATADELIVERY_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"DATADELIVERY_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${ACIX_ENABLED_TRUE}" && test -z "${ACIX_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"ACIX_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"ACIX_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${ACIX_TESTS_ENABLED_TRUE}" && test -z "${ACIX_TESTS_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"ACIX_TESTS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"ACIX_TESTS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${COMPUTE_CLIENT_ENABLED_TRUE}" && test -z "${COMPUTE_CLIENT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"COMPUTE_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"COMPUTE_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${CREDENTIALS_CLIENT_ENABLED_TRUE}" && test -z "${CREDENTIALS_CLIENT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"CREDENTIALS_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"CREDENTIALS_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${ECHO_CLIENT_ENABLED_TRUE}" && test -z "${ECHO_CLIENT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"ECHO_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"ECHO_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${DATA_CLIENT_ENABLED_TRUE}" && test -z "${DATA_CLIENT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"DATA_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"DATA_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${JURA_CLIENT_ENABLED_TRUE}" && test -z "${JURA_CLIENT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"JURA_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"JURA_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${SAML_CLIENT_ENABLED_TRUE}" && test -z "${SAML_CLIENT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"SAML_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"SAML_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${WSRF_CLIENT_ENABLED_TRUE}" && test -z "${WSRF_CLIENT_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"WSRF_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"WSRF_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${UNICORE_ENABLED_TRUE}" && test -z "${UNICORE_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"UNICORE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"UNICORE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${EMIES_ENABLED_TRUE}" && test -z "${EMIES_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"EMIES_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"EMIES_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${HED_ENABLED_TRUE}" && test -z "${HED_ENABLED_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"HED_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"HED_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi : ${CONFIG_STATUS=./config.status} ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} cat >$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # PATH needs CR # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi # Work around bugs in pre-3.0 UWIN ksh. for as_var in ENV MAIL MAILPATH do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # CDPATH. $as_unset CDPATH as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line after each line using $LINENO; the second 'sed' # does the real work. The second script uses 'N' to pair each # line-number line with the line containing $LINENO, and appends # trailing '-' during substitution so that $LINENO is not a special # case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # scripts with optimization help from Paolo Bonzini. Blame Lee # E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in -n*) case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 # Save the log message, to keep $[0] and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by nordugrid-arc $as_me 5.4.2, which was generated by GNU Autoconf 2.63. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files from templates according to the current configuration. Usage: $0 [OPTION]... [FILE]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_version="\\ nordugrid-arc config.status 5.4.2 configured by $0, generated by GNU Autoconf 2.63, with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" Copyright (C) 2008 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac CONFIG_FILES="$CONFIG_FILES '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac CONFIG_HEADERS="$CONFIG_HEADERS '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header { $as_echo "$as_me: error: ambiguous option: $1 Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; };; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) { $as_echo "$as_me: error: unrecognized option: $1 Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *) ac_config_targets="$ac_config_targets $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' enable_static='`$ECHO "X$enable_static" | $Xsed -e "$delay_single_quote_subst"`' AS='`$ECHO "X$AS" | $Xsed -e "$delay_single_quote_subst"`' DLLTOOL='`$ECHO "X$DLLTOOL" | $Xsed -e "$delay_single_quote_subst"`' OBJDUMP='`$ECHO "X$OBJDUMP" | $Xsed -e "$delay_single_quote_subst"`' macro_version='`$ECHO "X$macro_version" | $Xsed -e "$delay_single_quote_subst"`' macro_revision='`$ECHO "X$macro_revision" | $Xsed -e "$delay_single_quote_subst"`' enable_shared='`$ECHO "X$enable_shared" | $Xsed -e "$delay_single_quote_subst"`' pic_mode='`$ECHO "X$pic_mode" | $Xsed -e "$delay_single_quote_subst"`' enable_fast_install='`$ECHO "X$enable_fast_install" | $Xsed -e "$delay_single_quote_subst"`' host_alias='`$ECHO "X$host_alias" | $Xsed -e "$delay_single_quote_subst"`' host='`$ECHO "X$host" | $Xsed -e "$delay_single_quote_subst"`' host_os='`$ECHO "X$host_os" | $Xsed -e "$delay_single_quote_subst"`' build_alias='`$ECHO "X$build_alias" | $Xsed -e "$delay_single_quote_subst"`' build='`$ECHO "X$build" | $Xsed -e "$delay_single_quote_subst"`' build_os='`$ECHO "X$build_os" | $Xsed -e "$delay_single_quote_subst"`' SED='`$ECHO "X$SED" | $Xsed -e "$delay_single_quote_subst"`' Xsed='`$ECHO "X$Xsed" | $Xsed -e "$delay_single_quote_subst"`' GREP='`$ECHO "X$GREP" | $Xsed -e "$delay_single_quote_subst"`' EGREP='`$ECHO "X$EGREP" | $Xsed -e "$delay_single_quote_subst"`' FGREP='`$ECHO "X$FGREP" | $Xsed -e "$delay_single_quote_subst"`' LD='`$ECHO "X$LD" | $Xsed -e "$delay_single_quote_subst"`' NM='`$ECHO "X$NM" | $Xsed -e "$delay_single_quote_subst"`' LN_S='`$ECHO "X$LN_S" | $Xsed -e "$delay_single_quote_subst"`' max_cmd_len='`$ECHO "X$max_cmd_len" | $Xsed -e "$delay_single_quote_subst"`' ac_objext='`$ECHO "X$ac_objext" | $Xsed -e "$delay_single_quote_subst"`' exeext='`$ECHO "X$exeext" | $Xsed -e "$delay_single_quote_subst"`' lt_unset='`$ECHO "X$lt_unset" | $Xsed -e "$delay_single_quote_subst"`' lt_SP2NL='`$ECHO "X$lt_SP2NL" | $Xsed -e "$delay_single_quote_subst"`' lt_NL2SP='`$ECHO "X$lt_NL2SP" | $Xsed -e "$delay_single_quote_subst"`' reload_flag='`$ECHO "X$reload_flag" | $Xsed -e "$delay_single_quote_subst"`' reload_cmds='`$ECHO "X$reload_cmds" | $Xsed -e "$delay_single_quote_subst"`' deplibs_check_method='`$ECHO "X$deplibs_check_method" | $Xsed -e "$delay_single_quote_subst"`' file_magic_cmd='`$ECHO "X$file_magic_cmd" | $Xsed -e "$delay_single_quote_subst"`' AR='`$ECHO "X$AR" | $Xsed -e "$delay_single_quote_subst"`' AR_FLAGS='`$ECHO "X$AR_FLAGS" | $Xsed -e "$delay_single_quote_subst"`' STRIP='`$ECHO "X$STRIP" | $Xsed -e "$delay_single_quote_subst"`' RANLIB='`$ECHO "X$RANLIB" | $Xsed -e "$delay_single_quote_subst"`' old_postinstall_cmds='`$ECHO "X$old_postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' old_postuninstall_cmds='`$ECHO "X$old_postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' old_archive_cmds='`$ECHO "X$old_archive_cmds" | $Xsed -e "$delay_single_quote_subst"`' CC='`$ECHO "X$CC" | $Xsed -e "$delay_single_quote_subst"`' CFLAGS='`$ECHO "X$CFLAGS" | $Xsed -e "$delay_single_quote_subst"`' compiler='`$ECHO "X$compiler" | $Xsed -e "$delay_single_quote_subst"`' GCC='`$ECHO "X$GCC" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_pipe='`$ECHO "X$lt_cv_sys_global_symbol_pipe" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_cdecl='`$ECHO "X$lt_cv_sys_global_symbol_to_cdecl" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`' objdir='`$ECHO "X$objdir" | $Xsed -e "$delay_single_quote_subst"`' SHELL='`$ECHO "X$SHELL" | $Xsed -e "$delay_single_quote_subst"`' ECHO='`$ECHO "X$ECHO" | $Xsed -e "$delay_single_quote_subst"`' MAGIC_CMD='`$ECHO "X$MAGIC_CMD" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag='`$ECHO "X$lt_prog_compiler_no_builtin_flag" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_wl='`$ECHO "X$lt_prog_compiler_wl" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_pic='`$ECHO "X$lt_prog_compiler_pic" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_static='`$ECHO "X$lt_prog_compiler_static" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o='`$ECHO "X$lt_cv_prog_compiler_c_o" | $Xsed -e "$delay_single_quote_subst"`' need_locks='`$ECHO "X$need_locks" | $Xsed -e "$delay_single_quote_subst"`' DSYMUTIL='`$ECHO "X$DSYMUTIL" | $Xsed -e "$delay_single_quote_subst"`' NMEDIT='`$ECHO "X$NMEDIT" | $Xsed -e "$delay_single_quote_subst"`' LIPO='`$ECHO "X$LIPO" | $Xsed -e "$delay_single_quote_subst"`' OTOOL='`$ECHO "X$OTOOL" | $Xsed -e "$delay_single_quote_subst"`' OTOOL64='`$ECHO "X$OTOOL64" | $Xsed -e "$delay_single_quote_subst"`' libext='`$ECHO "X$libext" | $Xsed -e "$delay_single_quote_subst"`' shrext_cmds='`$ECHO "X$shrext_cmds" | $Xsed -e "$delay_single_quote_subst"`' extract_expsyms_cmds='`$ECHO "X$extract_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`' archive_cmds_need_lc='`$ECHO "X$archive_cmds_need_lc" | $Xsed -e "$delay_single_quote_subst"`' enable_shared_with_static_runtimes='`$ECHO "X$enable_shared_with_static_runtimes" | $Xsed -e "$delay_single_quote_subst"`' export_dynamic_flag_spec='`$ECHO "X$export_dynamic_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' whole_archive_flag_spec='`$ECHO "X$whole_archive_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' compiler_needs_object='`$ECHO "X$compiler_needs_object" | $Xsed -e "$delay_single_quote_subst"`' old_archive_from_new_cmds='`$ECHO "X$old_archive_from_new_cmds" | $Xsed -e "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds='`$ECHO "X$old_archive_from_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`' archive_cmds='`$ECHO "X$archive_cmds" | $Xsed -e "$delay_single_quote_subst"`' archive_expsym_cmds='`$ECHO "X$archive_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`' module_cmds='`$ECHO "X$module_cmds" | $Xsed -e "$delay_single_quote_subst"`' module_expsym_cmds='`$ECHO "X$module_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`' with_gnu_ld='`$ECHO "X$with_gnu_ld" | $Xsed -e "$delay_single_quote_subst"`' allow_undefined_flag='`$ECHO "X$allow_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`' no_undefined_flag='`$ECHO "X$no_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_flag_spec='`$ECHO "X$hardcode_libdir_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_ld='`$ECHO "X$hardcode_libdir_flag_spec_ld" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_separator='`$ECHO "X$hardcode_libdir_separator" | $Xsed -e "$delay_single_quote_subst"`' hardcode_direct='`$ECHO "X$hardcode_direct" | $Xsed -e "$delay_single_quote_subst"`' hardcode_direct_absolute='`$ECHO "X$hardcode_direct_absolute" | $Xsed -e "$delay_single_quote_subst"`' hardcode_minus_L='`$ECHO "X$hardcode_minus_L" | $Xsed -e "$delay_single_quote_subst"`' hardcode_shlibpath_var='`$ECHO "X$hardcode_shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`' hardcode_automatic='`$ECHO "X$hardcode_automatic" | $Xsed -e "$delay_single_quote_subst"`' inherit_rpath='`$ECHO "X$inherit_rpath" | $Xsed -e "$delay_single_quote_subst"`' link_all_deplibs='`$ECHO "X$link_all_deplibs" | $Xsed -e "$delay_single_quote_subst"`' fix_srcfile_path='`$ECHO "X$fix_srcfile_path" | $Xsed -e "$delay_single_quote_subst"`' always_export_symbols='`$ECHO "X$always_export_symbols" | $Xsed -e "$delay_single_quote_subst"`' export_symbols_cmds='`$ECHO "X$export_symbols_cmds" | $Xsed -e "$delay_single_quote_subst"`' exclude_expsyms='`$ECHO "X$exclude_expsyms" | $Xsed -e "$delay_single_quote_subst"`' include_expsyms='`$ECHO "X$include_expsyms" | $Xsed -e "$delay_single_quote_subst"`' prelink_cmds='`$ECHO "X$prelink_cmds" | $Xsed -e "$delay_single_quote_subst"`' file_list_spec='`$ECHO "X$file_list_spec" | $Xsed -e "$delay_single_quote_subst"`' variables_saved_for_relink='`$ECHO "X$variables_saved_for_relink" | $Xsed -e "$delay_single_quote_subst"`' need_lib_prefix='`$ECHO "X$need_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`' need_version='`$ECHO "X$need_version" | $Xsed -e "$delay_single_quote_subst"`' version_type='`$ECHO "X$version_type" | $Xsed -e "$delay_single_quote_subst"`' runpath_var='`$ECHO "X$runpath_var" | $Xsed -e "$delay_single_quote_subst"`' shlibpath_var='`$ECHO "X$shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`' shlibpath_overrides_runpath='`$ECHO "X$shlibpath_overrides_runpath" | $Xsed -e "$delay_single_quote_subst"`' libname_spec='`$ECHO "X$libname_spec" | $Xsed -e "$delay_single_quote_subst"`' library_names_spec='`$ECHO "X$library_names_spec" | $Xsed -e "$delay_single_quote_subst"`' soname_spec='`$ECHO "X$soname_spec" | $Xsed -e "$delay_single_quote_subst"`' postinstall_cmds='`$ECHO "X$postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' postuninstall_cmds='`$ECHO "X$postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' finish_cmds='`$ECHO "X$finish_cmds" | $Xsed -e "$delay_single_quote_subst"`' finish_eval='`$ECHO "X$finish_eval" | $Xsed -e "$delay_single_quote_subst"`' hardcode_into_libs='`$ECHO "X$hardcode_into_libs" | $Xsed -e "$delay_single_quote_subst"`' sys_lib_search_path_spec='`$ECHO "X$sys_lib_search_path_spec" | $Xsed -e "$delay_single_quote_subst"`' sys_lib_dlsearch_path_spec='`$ECHO "X$sys_lib_dlsearch_path_spec" | $Xsed -e "$delay_single_quote_subst"`' hardcode_action='`$ECHO "X$hardcode_action" | $Xsed -e "$delay_single_quote_subst"`' enable_dlopen='`$ECHO "X$enable_dlopen" | $Xsed -e "$delay_single_quote_subst"`' enable_dlopen_self='`$ECHO "X$enable_dlopen_self" | $Xsed -e "$delay_single_quote_subst"`' enable_dlopen_self_static='`$ECHO "X$enable_dlopen_self_static" | $Xsed -e "$delay_single_quote_subst"`' old_striplib='`$ECHO "X$old_striplib" | $Xsed -e "$delay_single_quote_subst"`' striplib='`$ECHO "X$striplib" | $Xsed -e "$delay_single_quote_subst"`' compiler_lib_search_dirs='`$ECHO "X$compiler_lib_search_dirs" | $Xsed -e "$delay_single_quote_subst"`' predep_objects='`$ECHO "X$predep_objects" | $Xsed -e "$delay_single_quote_subst"`' postdep_objects='`$ECHO "X$postdep_objects" | $Xsed -e "$delay_single_quote_subst"`' predeps='`$ECHO "X$predeps" | $Xsed -e "$delay_single_quote_subst"`' postdeps='`$ECHO "X$postdeps" | $Xsed -e "$delay_single_quote_subst"`' compiler_lib_search_path='`$ECHO "X$compiler_lib_search_path" | $Xsed -e "$delay_single_quote_subst"`' LD_CXX='`$ECHO "X$LD_CXX" | $Xsed -e "$delay_single_quote_subst"`' old_archive_cmds_CXX='`$ECHO "X$old_archive_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' compiler_CXX='`$ECHO "X$compiler_CXX" | $Xsed -e "$delay_single_quote_subst"`' GCC_CXX='`$ECHO "X$GCC_CXX" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "X$lt_prog_compiler_no_builtin_flag_CXX" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_wl_CXX='`$ECHO "X$lt_prog_compiler_wl_CXX" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_pic_CXX='`$ECHO "X$lt_prog_compiler_pic_CXX" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_static_CXX='`$ECHO "X$lt_prog_compiler_static_CXX" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o_CXX='`$ECHO "X$lt_cv_prog_compiler_c_o_CXX" | $Xsed -e "$delay_single_quote_subst"`' archive_cmds_need_lc_CXX='`$ECHO "X$archive_cmds_need_lc_CXX" | $Xsed -e "$delay_single_quote_subst"`' enable_shared_with_static_runtimes_CXX='`$ECHO "X$enable_shared_with_static_runtimes_CXX" | $Xsed -e "$delay_single_quote_subst"`' export_dynamic_flag_spec_CXX='`$ECHO "X$export_dynamic_flag_spec_CXX" | $Xsed -e "$delay_single_quote_subst"`' whole_archive_flag_spec_CXX='`$ECHO "X$whole_archive_flag_spec_CXX" | $Xsed -e "$delay_single_quote_subst"`' compiler_needs_object_CXX='`$ECHO "X$compiler_needs_object_CXX" | $Xsed -e "$delay_single_quote_subst"`' old_archive_from_new_cmds_CXX='`$ECHO "X$old_archive_from_new_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds_CXX='`$ECHO "X$old_archive_from_expsyms_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' archive_cmds_CXX='`$ECHO "X$archive_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' archive_expsym_cmds_CXX='`$ECHO "X$archive_expsym_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' module_cmds_CXX='`$ECHO "X$module_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' module_expsym_cmds_CXX='`$ECHO "X$module_expsym_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' with_gnu_ld_CXX='`$ECHO "X$with_gnu_ld_CXX" | $Xsed -e "$delay_single_quote_subst"`' allow_undefined_flag_CXX='`$ECHO "X$allow_undefined_flag_CXX" | $Xsed -e "$delay_single_quote_subst"`' no_undefined_flag_CXX='`$ECHO "X$no_undefined_flag_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_CXX='`$ECHO "X$hardcode_libdir_flag_spec_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_ld_CXX='`$ECHO "X$hardcode_libdir_flag_spec_ld_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_separator_CXX='`$ECHO "X$hardcode_libdir_separator_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_direct_CXX='`$ECHO "X$hardcode_direct_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_direct_absolute_CXX='`$ECHO "X$hardcode_direct_absolute_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_minus_L_CXX='`$ECHO "X$hardcode_minus_L_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_shlibpath_var_CXX='`$ECHO "X$hardcode_shlibpath_var_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_automatic_CXX='`$ECHO "X$hardcode_automatic_CXX" | $Xsed -e "$delay_single_quote_subst"`' inherit_rpath_CXX='`$ECHO "X$inherit_rpath_CXX" | $Xsed -e "$delay_single_quote_subst"`' link_all_deplibs_CXX='`$ECHO "X$link_all_deplibs_CXX" | $Xsed -e "$delay_single_quote_subst"`' fix_srcfile_path_CXX='`$ECHO "X$fix_srcfile_path_CXX" | $Xsed -e "$delay_single_quote_subst"`' always_export_symbols_CXX='`$ECHO "X$always_export_symbols_CXX" | $Xsed -e "$delay_single_quote_subst"`' export_symbols_cmds_CXX='`$ECHO "X$export_symbols_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' exclude_expsyms_CXX='`$ECHO "X$exclude_expsyms_CXX" | $Xsed -e "$delay_single_quote_subst"`' include_expsyms_CXX='`$ECHO "X$include_expsyms_CXX" | $Xsed -e "$delay_single_quote_subst"`' prelink_cmds_CXX='`$ECHO "X$prelink_cmds_CXX" | $Xsed -e "$delay_single_quote_subst"`' file_list_spec_CXX='`$ECHO "X$file_list_spec_CXX" | $Xsed -e "$delay_single_quote_subst"`' hardcode_action_CXX='`$ECHO "X$hardcode_action_CXX" | $Xsed -e "$delay_single_quote_subst"`' compiler_lib_search_dirs_CXX='`$ECHO "X$compiler_lib_search_dirs_CXX" | $Xsed -e "$delay_single_quote_subst"`' predep_objects_CXX='`$ECHO "X$predep_objects_CXX" | $Xsed -e "$delay_single_quote_subst"`' postdep_objects_CXX='`$ECHO "X$postdep_objects_CXX" | $Xsed -e "$delay_single_quote_subst"`' predeps_CXX='`$ECHO "X$predeps_CXX" | $Xsed -e "$delay_single_quote_subst"`' postdeps_CXX='`$ECHO "X$postdeps_CXX" | $Xsed -e "$delay_single_quote_subst"`' compiler_lib_search_path_CXX='`$ECHO "X$compiler_lib_search_path_CXX" | $Xsed -e "$delay_single_quote_subst"`' LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # Quote evaled strings. for var in SED \ GREP \ EGREP \ FGREP \ LD \ NM \ LN_S \ lt_SP2NL \ lt_NL2SP \ reload_flag \ deplibs_check_method \ file_magic_cmd \ AR \ AR_FLAGS \ STRIP \ RANLIB \ CC \ CFLAGS \ compiler \ lt_cv_sys_global_symbol_pipe \ lt_cv_sys_global_symbol_to_cdecl \ lt_cv_sys_global_symbol_to_c_name_address \ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ SHELL \ ECHO \ lt_prog_compiler_no_builtin_flag \ lt_prog_compiler_wl \ lt_prog_compiler_pic \ lt_prog_compiler_static \ lt_cv_prog_compiler_c_o \ need_locks \ DSYMUTIL \ NMEDIT \ LIPO \ OTOOL \ OTOOL64 \ shrext_cmds \ export_dynamic_flag_spec \ whole_archive_flag_spec \ compiler_needs_object \ with_gnu_ld \ allow_undefined_flag \ no_undefined_flag \ hardcode_libdir_flag_spec \ hardcode_libdir_flag_spec_ld \ hardcode_libdir_separator \ fix_srcfile_path \ exclude_expsyms \ include_expsyms \ file_list_spec \ variables_saved_for_relink \ libname_spec \ library_names_spec \ soname_spec \ finish_eval \ old_striplib \ striplib \ compiler_lib_search_dirs \ predep_objects \ postdep_objects \ predeps \ postdeps \ compiler_lib_search_path \ LD_CXX \ compiler_CXX \ lt_prog_compiler_no_builtin_flag_CXX \ lt_prog_compiler_wl_CXX \ lt_prog_compiler_pic_CXX \ lt_prog_compiler_static_CXX \ lt_cv_prog_compiler_c_o_CXX \ export_dynamic_flag_spec_CXX \ whole_archive_flag_spec_CXX \ compiler_needs_object_CXX \ with_gnu_ld_CXX \ allow_undefined_flag_CXX \ no_undefined_flag_CXX \ hardcode_libdir_flag_spec_CXX \ hardcode_libdir_flag_spec_ld_CXX \ hardcode_libdir_separator_CXX \ fix_srcfile_path_CXX \ exclude_expsyms_CXX \ include_expsyms_CXX \ file_list_spec_CXX \ compiler_lib_search_dirs_CXX \ predep_objects_CXX \ postdep_objects_CXX \ predeps_CXX \ postdeps_CXX \ compiler_lib_search_path_CXX; do case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in reload_cmds \ old_postinstall_cmds \ old_postuninstall_cmds \ old_archive_cmds \ extract_expsyms_cmds \ old_archive_from_new_cmds \ old_archive_from_expsyms_cmds \ archive_cmds \ archive_expsym_cmds \ module_cmds \ module_expsym_cmds \ export_symbols_cmds \ prelink_cmds \ postinstall_cmds \ postuninstall_cmds \ finish_cmds \ sys_lib_search_path_spec \ sys_lib_dlsearch_path_spec \ old_archive_cmds_CXX \ old_archive_from_new_cmds_CXX \ old_archive_from_expsyms_cmds_CXX \ archive_cmds_CXX \ archive_expsym_cmds_CXX \ module_cmds_CXX \ module_expsym_cmds_CXX \ export_symbols_cmds_CXX \ prelink_cmds_CXX; do case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Fix-up fallback echo if it was mangled by the above quoting rules. case \$lt_ECHO in *'\\\$0 --fallback-echo"') lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\$0 --fallback-echo"\$/\$0 --fallback-echo"/'\` ;; esac ac_aux_dir='$ac_aux_dir' xsi_shell='$xsi_shell' lt_shell_append='$lt_shell_append' # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi PACKAGE='$PACKAGE' VERSION='$VERSION' TIMESTAMP='$TIMESTAMP' RM='$RM' ofile='$ofile' # Capture the value of obsolete ALL_LINGUAS because we need it to compute # POFILES, GMOFILES, UPDATEPOFILES, DUMMYPOFILES, CATALOGS. But hide it # from automake. eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"' # Capture the value of LINGUAS because we need it to compute CATALOGS. LINGUAS="${LINGUAS-%UNSET%}" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "default-1") CONFIG_COMMANDS="$CONFIG_COMMANDS default-1" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "src/external/Makefile") CONFIG_FILES="$CONFIG_FILES src/external/Makefile" ;; "src/external/cJSON/Makefile") CONFIG_FILES="$CONFIG_FILES src/external/cJSON/Makefile" ;; "src/hed/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/Makefile" ;; "src/hed/libs/compute/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/compute/Makefile" ;; "src/hed/libs/compute/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/compute/test/Makefile" ;; "src/hed/libs/compute/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/compute/examples/Makefile" ;; "src/hed/libs/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/common/Makefile" ;; "src/hed/libs/common/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/common/test/Makefile" ;; "src/hed/libs/communication/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/communication/Makefile" ;; "src/hed/libs/credential/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/credential/Makefile" ;; "src/hed/libs/credential/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/credential/test/Makefile" ;; "src/hed/libs/credentialmod/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/credentialmod/Makefile" ;; "src/hed/libs/crypto/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/crypto/Makefile" ;; "src/hed/libs/cryptomod/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/cryptomod/Makefile" ;; "src/hed/libs/data/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/Makefile" ;; "src/hed/libs/data/cache-clean") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/cache-clean" ;; "src/hed/libs/data/cache-clean.1") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/cache-clean.1" ;; "src/hed/libs/data/cache-list") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/cache-list" ;; "src/hed/libs/data/cache-list.1") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/cache-list.1" ;; "src/hed/libs/data/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/test/Makefile" ;; "src/hed/libs/data/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/examples/Makefile" ;; "src/hed/libs/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/Makefile" ;; "src/hed/libs/loader/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/loader/Makefile" ;; "src/hed/libs/loader/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/loader/schema/Makefile" ;; "src/hed/libs/loader/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/loader/test/Makefile" ;; "src/hed/libs/message/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/message/Makefile" ;; "src/hed/libs/message/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/message/test/Makefile" ;; "src/hed/libs/security/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/Makefile" ;; "src/hed/libs/security/ArcPDP/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/Makefile" ;; "src/hed/libs/security/ArcPDP/attr/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/attr/Makefile" ;; "src/hed/libs/security/ArcPDP/policy/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/policy/Makefile" ;; "src/hed/libs/security/ArcPDP/alg/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/alg/Makefile" ;; "src/hed/libs/security/ArcPDP/fn/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/fn/Makefile" ;; "src/hed/libs/credentialstore/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/credentialstore/Makefile" ;; "src/hed/libs/wsrf/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/wsrf/Makefile" ;; "src/hed/libs/ws-addressing/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/ws-addressing/Makefile" ;; "src/hed/libs/ws-security/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/ws-security/Makefile" ;; "src/hed/libs/ws-security/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/ws-security/test/Makefile" ;; "src/hed/libs/infosys/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/infosys/Makefile" ;; "src/hed/libs/infosys/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/infosys/schema/Makefile" ;; "src/hed/libs/infosys/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/infosys/test/Makefile" ;; "src/hed/libs/delegation/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/delegation/Makefile" ;; "src/hed/libs/delegation/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/delegation/test/Makefile" ;; "src/hed/libs/ws/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/ws/Makefile" ;; "src/hed/libs/xmlsec/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/xmlsec/Makefile" ;; "src/hed/libs/globusutils/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/globusutils/Makefile" ;; "src/hed/daemon/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/Makefile" ;; "src/hed/daemon/scripts/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/scripts/Makefile" ;; "src/hed/daemon/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/schema/Makefile" ;; "src/hed/daemon/unix/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/unix/Makefile" ;; "src/hed/daemon/win32/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/win32/Makefile" ;; "src/hed/mcc/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/Makefile" ;; "src/hed/mcc/soap/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/soap/Makefile" ;; "src/hed/mcc/tcp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tcp/Makefile" ;; "src/hed/mcc/tcp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tcp/schema/Makefile" ;; "src/hed/mcc/http/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/http/Makefile" ;; "src/hed/mcc/http/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/http/schema/Makefile" ;; "src/hed/mcc/tls/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tls/Makefile" ;; "src/hed/mcc/tls/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tls/schema/Makefile" ;; "src/hed/mcc/msgvalidator/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/msgvalidator/Makefile" ;; "src/hed/mcc/msgvalidator/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/msgvalidator/schema/Makefile" ;; "src/hed/acc/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/Makefile" ;; "src/hed/acc/ARC0/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/ARC0/Makefile" ;; "src/hed/acc/ARC1/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/ARC1/Makefile" ;; "src/hed/acc/ARC1/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/ARC1/test/Makefile" ;; "src/hed/acc/EMIES/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/EMIES/Makefile" ;; "src/hed/acc/EMIES/arcemiestest.1") CONFIG_FILES="$CONFIG_FILES src/hed/acc/EMIES/arcemiestest.1" ;; "src/hed/acc/EMIES/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/EMIES/schema/Makefile" ;; "src/hed/acc/CREAM/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/CREAM/Makefile" ;; "src/hed/acc/UNICORE/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/UNICORE/Makefile" ;; "src/hed/acc/Broker/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/Broker/Makefile" ;; "src/hed/acc/Broker/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/Broker/test/Makefile" ;; "src/hed/acc/PythonBroker/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/PythonBroker/Makefile" ;; "src/hed/acc/JobDescriptionParser/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/JobDescriptionParser/Makefile" ;; "src/hed/acc/JobDescriptionParser/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/JobDescriptionParser/test/Makefile" ;; "src/hed/acc/SER/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/SER/Makefile" ;; "src/hed/acc/ldap/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/ldap/Makefile" ;; "src/hed/acc/TEST/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/TEST/Makefile" ;; "src/hed/dmc/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/Makefile" ;; "src/hed/dmc/file/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/file/Makefile" ;; "src/hed/dmc/gridftp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/gridftp/Makefile" ;; "src/hed/dmc/http/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/http/Makefile" ;; "src/hed/dmc/ldap/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/ldap/Makefile" ;; "src/hed/dmc/srm/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/srm/Makefile" ;; "src/hed/dmc/srm/srmclient/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/srm/srmclient/Makefile" ;; "src/hed/dmc/gfal/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/gfal/Makefile" ;; "src/hed/dmc/xrootd/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/xrootd/Makefile" ;; "src/hed/dmc/mock/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/mock/Makefile" ;; "src/hed/dmc/acix/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/acix/Makefile" ;; "src/hed/dmc/rucio/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/rucio/Makefile" ;; "src/hed/dmc/s3/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/s3/Makefile" ;; "src/hed/profiles/general/general.xml") CONFIG_FILES="$CONFIG_FILES src/hed/profiles/general/general.xml" ;; "src/hed/shc/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/Makefile" ;; "src/hed/shc/arcpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/arcpdp/Makefile" ;; "src/hed/shc/arcpdp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/arcpdp/schema/Makefile" ;; "src/hed/shc/xacmlpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/xacmlpdp/Makefile" ;; "src/hed/shc/xacmlpdp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/xacmlpdp/schema/Makefile" ;; "src/hed/shc/delegationpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/delegationpdp/Makefile" ;; "src/hed/shc/delegationpdp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/delegationpdp/schema/Makefile" ;; "src/hed/shc/gaclpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/gaclpdp/Makefile" ;; "src/hed/shc/pdpserviceinvoker/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/pdpserviceinvoker/Makefile" ;; "src/hed/shc/pdpserviceinvoker/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/pdpserviceinvoker/schema/Makefile" ;; "src/hed/shc/allowpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/allowpdp/Makefile" ;; "src/hed/shc/denypdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/denypdp/Makefile" ;; "src/hed/shc/simplelistpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/simplelistpdp/Makefile" ;; "src/hed/shc/simplelistpdp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/simplelistpdp/schema/Makefile" ;; "src/hed/shc/arcauthzsh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/arcauthzsh/Makefile" ;; "src/hed/shc/arcauthzsh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/arcauthzsh/schema/Makefile" ;; "src/hed/shc/usernametokensh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/usernametokensh/Makefile" ;; "src/hed/shc/usernametokensh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/usernametokensh/schema/Makefile" ;; "src/hed/shc/x509tokensh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/x509tokensh/Makefile" ;; "src/hed/shc/x509tokensh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/x509tokensh/schema/Makefile" ;; "src/hed/shc/samltokensh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/samltokensh/Makefile" ;; "src/hed/shc/samltokensh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/samltokensh/schema/Makefile" ;; "src/hed/shc/saml2sso_assertionconsumersh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/saml2sso_assertionconsumersh/Makefile" ;; "src/hed/shc/delegationsh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/delegationsh/Makefile" ;; "src/hed/shc/delegationsh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/delegationsh/schema/Makefile" ;; "src/hed/shc/legacy/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/legacy/Makefile" ;; "src/hed/shc/legacy/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/legacy/schema/Makefile" ;; "src/hed/identitymap/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/identitymap/Makefile" ;; "src/hed/identitymap/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/identitymap/schema/Makefile" ;; "src/libs/Makefile") CONFIG_FILES="$CONFIG_FILES src/libs/Makefile" ;; "src/libs/data-staging/Makefile") CONFIG_FILES="$CONFIG_FILES src/libs/data-staging/Makefile" ;; "src/libs/data-staging/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/libs/data-staging/test/Makefile" ;; "src/libs/data-staging/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/libs/data-staging/examples/Makefile" ;; "src/services/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/Makefile" ;; "src/services/a-rex/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/Makefile" ;; "src/services/a-rex/a-rex") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/a-rex" ;; "src/services/a-rex/a-rex.service") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/a-rex.service" ;; "src/services/a-rex/a-rex-start") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/a-rex-start" ;; "src/services/a-rex/a-rex-backtrace-collect") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/a-rex-backtrace-collect" ;; "src/services/a-rex/a-rex-backtrace-collect.8") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/a-rex-backtrace-collect.8" ;; "src/services/a-rex/perferator") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/perferator" ;; "src/services/a-rex/grid-manager/arc-vomsac-check.8") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/arc-vomsac-check.8" ;; "src/services/a-rex/grid-manager/arc-blahp-logger.8") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/arc-blahp-logger.8" ;; "src/services/a-rex/grid-manager/gm-jobs.8") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/gm-jobs.8" ;; "src/services/a-rex/grid-manager/gm-delegations-converter.8") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/gm-delegations-converter.8" ;; "src/services/a-rex/delegation/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/delegation/Makefile" ;; "src/services/a-rex/grid-manager/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/Makefile" ;; "src/services/a-rex/grid-manager/conf/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/conf/Makefile" ;; "src/services/a-rex/grid-manager/files/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/files/Makefile" ;; "src/services/a-rex/grid-manager/jobs/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/jobs/Makefile" ;; "src/services/a-rex/grid-manager/jobplugin/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/jobplugin/Makefile" ;; "src/services/a-rex/grid-manager/loaders/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/loaders/Makefile" ;; "src/services/a-rex/grid-manager/log/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/log/Makefile" ;; "src/services/a-rex/grid-manager/mail/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/mail/Makefile" ;; "src/services/a-rex/grid-manager/misc/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/misc/Makefile" ;; "src/services/a-rex/grid-manager/run/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/run/Makefile" ;; "src/services/a-rex/grid-manager/arc-config-check.1") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/arc-config-check.1" ;; "src/services/a-rex/infoproviders/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/Makefile" ;; "src/services/a-rex/infoproviders/CEinfo.pl") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/CEinfo.pl" ;; "src/services/a-rex/infoproviders/PerfData.pl") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/PerfData.pl" ;; "src/services/a-rex/infoproviders/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/test/Makefile" ;; "src/services/a-rex/jura/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/jura/Makefile" ;; "src/services/a-rex/jura/jura.1") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/jura/jura.1" ;; "src/services/a-rex/jura/ssm/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/jura/ssm/Makefile" ;; "src/services/a-rex/ldif/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/ldif/Makefile" ;; "src/services/a-rex/lrms/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/Makefile" ;; "src/services/a-rex/lrms/submit_common.sh") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/submit_common.sh" ;; "src/services/a-rex/lrms/scan_common.sh") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/scan_common.sh" ;; "src/services/a-rex/lrms/condor/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/condor/Makefile" ;; "src/services/a-rex/lrms/condor/scan-condor-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/condor/scan-condor-job" ;; "src/services/a-rex/lrms/condor/cancel-condor-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/condor/cancel-condor-job" ;; "src/services/a-rex/lrms/condor/submit-condor-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/condor/submit-condor-job" ;; "src/services/a-rex/lrms/fork/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/fork/Makefile" ;; "src/services/a-rex/lrms/fork/scan-fork-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/fork/scan-fork-job" ;; "src/services/a-rex/lrms/fork/submit-fork-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/fork/submit-fork-job" ;; "src/services/a-rex/lrms/fork/cancel-fork-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/fork/cancel-fork-job" ;; "src/services/a-rex/lrms/ll/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/ll/Makefile" ;; "src/services/a-rex/lrms/ll/submit-ll-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/ll/submit-ll-job" ;; "src/services/a-rex/lrms/ll/cancel-ll-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/ll/cancel-ll-job" ;; "src/services/a-rex/lrms/ll/scan-ll-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/ll/scan-ll-job" ;; "src/services/a-rex/lrms/lsf/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lsf/Makefile" ;; "src/services/a-rex/lrms/lsf/submit-lsf-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lsf/submit-lsf-job" ;; "src/services/a-rex/lrms/lsf/cancel-lsf-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lsf/cancel-lsf-job" ;; "src/services/a-rex/lrms/lsf/scan-lsf-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lsf/scan-lsf-job" ;; "src/services/a-rex/lrms/pbs/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/Makefile" ;; "src/services/a-rex/lrms/pbs/submit-pbs-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/submit-pbs-job" ;; "src/services/a-rex/lrms/pbs/cancel-pbs-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/cancel-pbs-job" ;; "src/services/a-rex/lrms/pbs/scan-pbs-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/scan-pbs-job" ;; "src/services/a-rex/lrms/pbs/configure-pbs-env.sh") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/configure-pbs-env.sh" ;; "src/services/a-rex/lrms/sge/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/sge/Makefile" ;; "src/services/a-rex/lrms/sge/submit-sge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/sge/submit-sge-job" ;; "src/services/a-rex/lrms/sge/scan-sge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/sge/scan-sge-job" ;; "src/services/a-rex/lrms/sge/cancel-sge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/sge/cancel-sge-job" ;; "src/services/a-rex/lrms/slurm/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/Makefile" ;; "src/services/a-rex/lrms/slurm/submit-SLURM-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/submit-SLURM-job" ;; "src/services/a-rex/lrms/slurm/scan-SLURM-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/scan-SLURM-job" ;; "src/services/a-rex/lrms/slurm/cancel-SLURM-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/cancel-SLURM-job" ;; "src/services/a-rex/lrms/dgbridge/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/dgbridge/Makefile" ;; "src/services/a-rex/lrms/dgbridge/submit-DGBridge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/dgbridge/submit-DGBridge-job" ;; "src/services/a-rex/lrms/dgbridge/scan-DGBridge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/dgbridge/scan-DGBridge-job" ;; "src/services/a-rex/lrms/dgbridge/cancel-DGBridge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/dgbridge/cancel-DGBridge-job" ;; "src/services/a-rex/lrms/boinc/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/boinc/Makefile" ;; "src/services/a-rex/lrms/boinc/submit-boinc-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/boinc/submit-boinc-job" ;; "src/services/a-rex/lrms/boinc/scan-boinc-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/boinc/scan-boinc-job" ;; "src/services/a-rex/lrms/boinc/cancel-boinc-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/boinc/cancel-boinc-job" ;; "src/services/a-rex/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/schema/Makefile" ;; "src/services/acix/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/acix/Makefile" ;; "src/services/acix/cacheserver/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/acix/cacheserver/Makefile" ;; "src/services/acix/cacheserver/acix-cache.service") CONFIG_FILES="$CONFIG_FILES src/services/acix/cacheserver/acix-cache.service" ;; "src/services/acix/cacheserver/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/acix/cacheserver/test/Makefile" ;; "src/services/acix/core/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/acix/core/Makefile" ;; "src/services/acix/core/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/acix/core/test/Makefile" ;; "src/services/acix/indexserver/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/acix/indexserver/Makefile" ;; "src/services/acix/indexserver/acix-index.service") CONFIG_FILES="$CONFIG_FILES src/services/acix/indexserver/acix-index.service" ;; "src/services/acix/indexserver/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/acix/indexserver/test/Makefile" ;; "src/services/cache_service/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/cache_service/Makefile" ;; "src/services/cache_service/arc-cache-service") CONFIG_FILES="$CONFIG_FILES src/services/cache_service/arc-cache-service" ;; "src/services/cache_service/arc-cache-service.service") CONFIG_FILES="$CONFIG_FILES src/services/cache_service/arc-cache-service.service" ;; "src/services/cache_service/arc-cache-service-start") CONFIG_FILES="$CONFIG_FILES src/services/cache_service/arc-cache-service-start" ;; "src/services/data-staging/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/data-staging/Makefile" ;; "src/services/data-staging/arc-datadelivery-service") CONFIG_FILES="$CONFIG_FILES src/services/data-staging/arc-datadelivery-service" ;; "src/services/data-staging/arc-datadelivery-service.service") CONFIG_FILES="$CONFIG_FILES src/services/data-staging/arc-datadelivery-service.service" ;; "src/services/data-staging/arc-datadelivery-service-start") CONFIG_FILES="$CONFIG_FILES src/services/data-staging/arc-datadelivery-service-start" ;; "src/services/gridftpd/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/Makefile" ;; "src/services/gridftpd/gridftpd.init") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/gridftpd.init" ;; "src/services/gridftpd/gridftpd.service") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/gridftpd.service" ;; "src/services/gridftpd/gridftpd-start") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/gridftpd-start" ;; "src/services/gridftpd/gridftpd.8") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/gridftpd.8" ;; "src/services/gridftpd/auth/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/auth/Makefile" ;; "src/services/gridftpd/conf/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/conf/Makefile" ;; "src/services/gridftpd/misc/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/misc/Makefile" ;; "src/services/gridftpd/run/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/run/Makefile" ;; "src/services/gridftpd/fileplugin/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/gridftpd/fileplugin/Makefile" ;; "src/services/ldap-infosys/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/Makefile" ;; "src/services/ldap-infosys/create-bdii-config") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/create-bdii-config" ;; "src/services/ldap-infosys/create-inforeg-config") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/create-inforeg-config" ;; "src/services/ldap-infosys/create-slapd-config") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/create-slapd-config" ;; "src/services/ldap-infosys/nordugrid-arc-bdii") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/nordugrid-arc-bdii" ;; "src/services/ldap-infosys/nordugrid-arc-egiis") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/nordugrid-arc-egiis" ;; "src/services/ldap-infosys/nordugrid-arc-inforeg") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/nordugrid-arc-inforeg" ;; "src/services/ldap-infosys/nordugrid-arc-ldap-infosys") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/nordugrid-arc-ldap-infosys" ;; "src/services/ldap-infosys/nordugrid-arc-slapd") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/nordugrid-arc-slapd" ;; "src/services/ldap-infosys/giis/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/giis/Makefile" ;; "src/services/ldap-infosys/giis/arc-infoindex-relay.8") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/giis/arc-infoindex-relay.8" ;; "src/services/ldap-infosys/giis/arc-infoindex-server.8") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/giis/arc-infoindex-server.8" ;; "src/services/ldap-monitor/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ldap-monitor/Makefile" ;; "src/services/ldap-monitor/ldap-monitor") CONFIG_FILES="$CONFIG_FILES src/services/ldap-monitor/ldap-monitor" ;; "src/services/ldap-monitor/README") CONFIG_FILES="$CONFIG_FILES src/services/ldap-monitor/README" ;; "src/services/ldap-monitor/man/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ldap-monitor/man/Makefile" ;; "src/services/ldap-monitor/man/ldap-monitor.7") CONFIG_FILES="$CONFIG_FILES src/services/ldap-monitor/man/ldap-monitor.7" ;; "src/services/ldap-monitor/includes/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ldap-monitor/includes/Makefile" ;; "src/services/ldap-monitor/mon-icons/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ldap-monitor/mon-icons/Makefile" ;; "src/services/ldap-monitor/lang/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ldap-monitor/lang/Makefile" ;; "src/services/ws-monitor/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ws-monitor/Makefile" ;; "src/services/ws-monitor/ws-monitor") CONFIG_FILES="$CONFIG_FILES src/services/ws-monitor/ws-monitor" ;; "src/services/ws-monitor/README") CONFIG_FILES="$CONFIG_FILES src/services/ws-monitor/README" ;; "src/services/ws-monitor/man/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ws-monitor/man/Makefile" ;; "src/services/ws-monitor/man/ws-monitor.7") CONFIG_FILES="$CONFIG_FILES src/services/ws-monitor/man/ws-monitor.7" ;; "src/services/ws-monitor/includes/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ws-monitor/includes/Makefile" ;; "src/services/ws-monitor/mon-icons/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ws-monitor/mon-icons/Makefile" ;; "src/services/ws-monitor/lang/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ws-monitor/lang/Makefile" ;; "src/services/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/examples/Makefile" ;; "src/services/examples/echo_java/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/examples/echo_java/Makefile" ;; "src/services/examples/echo_python/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/examples/echo_python/Makefile" ;; "src/services/wrappers/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/wrappers/Makefile" ;; "src/services/wrappers/java/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/wrappers/java/Makefile" ;; "src/services/wrappers/java/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/wrappers/java/schema/Makefile" ;; "src/services/wrappers/python/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/wrappers/python/Makefile" ;; "src/services/wrappers/python/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/wrappers/python/schema/Makefile" ;; "src/clients/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/Makefile" ;; "src/clients/data/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/data/Makefile" ;; "src/clients/data/arccp.1") CONFIG_FILES="$CONFIG_FILES src/clients/data/arccp.1" ;; "src/clients/data/arcls.1") CONFIG_FILES="$CONFIG_FILES src/clients/data/arcls.1" ;; "src/clients/data/arcrm.1") CONFIG_FILES="$CONFIG_FILES src/clients/data/arcrm.1" ;; "src/clients/data/arcmkdir.1") CONFIG_FILES="$CONFIG_FILES src/clients/data/arcmkdir.1" ;; "src/clients/data/arcrename.1") CONFIG_FILES="$CONFIG_FILES src/clients/data/arcrename.1" ;; "src/clients/echo/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/echo/Makefile" ;; "src/clients/echo/arcecho.1") CONFIG_FILES="$CONFIG_FILES src/clients/echo/arcecho.1" ;; "src/clients/credentials/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/credentials/Makefile" ;; "src/clients/credentials/arcproxy.1") CONFIG_FILES="$CONFIG_FILES src/clients/credentials/arcproxy.1" ;; "src/clients/saml/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/saml/Makefile" ;; "src/clients/saml/saml_assertion_init.1") CONFIG_FILES="$CONFIG_FILES src/clients/saml/saml_assertion_init.1" ;; "src/clients/compute/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/compute/Makefile" ;; "src/clients/compute/arcstat.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcstat.1" ;; "src/clients/compute/arcinfo.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcinfo.1" ;; "src/clients/compute/arcsub.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcsub.1" ;; "src/clients/compute/arcclean.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcclean.1" ;; "src/clients/compute/arckill.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arckill.1" ;; "src/clients/compute/arcget.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcget.1" ;; "src/clients/compute/arccat.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arccat.1" ;; "src/clients/compute/arcresub.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcresub.1" ;; "src/clients/compute/arcsync.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcsync.1" ;; "src/clients/compute/arcrenew.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcrenew.1" ;; "src/clients/compute/arcresume.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arcresume.1" ;; "src/clients/compute/arctest.1") CONFIG_FILES="$CONFIG_FILES src/clients/compute/arctest.1" ;; "src/clients/wsrf/arcwsrf.1") CONFIG_FILES="$CONFIG_FILES src/clients/wsrf/arcwsrf.1" ;; "src/clients/wsrf/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/wsrf/Makefile" ;; "src/tests/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/Makefile" ;; "src/tests/echo/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/echo/Makefile" ;; "src/tests/echo/perftest.1") CONFIG_FILES="$CONFIG_FILES src/tests/echo/perftest.1" ;; "src/tests/echo/echo_service.xml.example") CONFIG_FILES="$CONFIG_FILES src/tests/echo/echo_service.xml.example" ;; "src/tests/echo/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/echo/schema/Makefile" ;; "src/tests/policy-delegation/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/policy-delegation/Makefile" ;; "src/tests/delegation/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/delegation/Makefile" ;; "src/tests/translator/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/translator/Makefile" ;; "src/tests/xpath/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/xpath/Makefile" ;; "src/tests/arcpolicy/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/arcpolicy/Makefile" ;; "src/tests/perf/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/perf/Makefile" ;; "src/tests/perf/arcperftest.1") CONFIG_FILES="$CONFIG_FILES src/tests/perf/arcperftest.1" ;; "src/tests/client/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/client/Makefile" ;; "src/utils/hed/wsdl2hed.1") CONFIG_FILES="$CONFIG_FILES src/utils/hed/wsdl2hed.1" ;; "src/utils/hed/arcplugin.1") CONFIG_FILES="$CONFIG_FILES src/utils/hed/arcplugin.1" ;; "src/utils/hed/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/hed/Makefile" ;; "src/utils/gridmap/nordugridmap.cron") CONFIG_FILES="$CONFIG_FILES src/utils/gridmap/nordugridmap.cron" ;; "src/utils/gridmap/nordugridmap.8") CONFIG_FILES="$CONFIG_FILES src/utils/gridmap/nordugridmap.8" ;; "src/utils/gridmap/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/gridmap/Makefile" ;; "src/utils/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/Makefile" ;; "src/doc/Makefile") CONFIG_FILES="$CONFIG_FILES src/doc/Makefile" ;; "src/doc/arc.conf.5") CONFIG_FILES="$CONFIG_FILES src/doc/arc.conf.5" ;; "swig/Makefile") CONFIG_FILES="$CONFIG_FILES swig/Makefile" ;; "java/Makefile") CONFIG_FILES="$CONFIG_FILES java/Makefile" ;; "java/test/Makefile") CONFIG_FILES="$CONFIG_FILES java/test/Makefile" ;; "java/test/strip_test_file_name_and_run_junit") CONFIG_FILES="$CONFIG_FILES java/test/strip_test_file_name_and_run_junit" ;; "java/examples/Makefile") CONFIG_FILES="$CONFIG_FILES java/examples/Makefile" ;; "python/Makefile") CONFIG_FILES="$CONFIG_FILES python/Makefile" ;; "python/Doxyfile.api") CONFIG_FILES="$CONFIG_FILES python/Doxyfile.api" ;; "python/python/Makefile") CONFIG_FILES="$CONFIG_FILES python/python/Makefile" ;; "python/python/arc/Makefile") CONFIG_FILES="$CONFIG_FILES python/python/arc/Makefile" ;; "python/altpython/Makefile") CONFIG_FILES="$CONFIG_FILES python/altpython/Makefile" ;; "python/altpython/arc/Makefile") CONFIG_FILES="$CONFIG_FILES python/altpython/arc/Makefile" ;; "python/test/Makefile") CONFIG_FILES="$CONFIG_FILES python/test/Makefile" ;; "python/test/python/Makefile") CONFIG_FILES="$CONFIG_FILES python/test/python/Makefile" ;; "python/test/altpython/Makefile") CONFIG_FILES="$CONFIG_FILES python/test/altpython/Makefile" ;; "python/examples/Makefile") CONFIG_FILES="$CONFIG_FILES python/examples/Makefile" ;; "po/Makefile.in") CONFIG_FILES="$CONFIG_FILES po/Makefile.in" ;; "include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;; "debian/Makefile") CONFIG_FILES="$CONFIG_FILES debian/Makefile" ;; "debian/changelog.deb") CONFIG_FILES="$CONFIG_FILES debian/changelog.deb" ;; "nordugrid-arc.spec") CONFIG_FILES="$CONFIG_FILES nordugrid-arc.spec" ;; "mingw-nordugrid-arc.spec") CONFIG_FILES="$CONFIG_FILES mingw-nordugrid-arc.spec" ;; "src/hed/daemon/arched.8") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/arched.8" ;; "src/hed/daemon/scripts/arched") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/scripts/arched" ;; "src/hed/daemon/scripts/arched.service") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/scripts/arched.service" ;; "src/hed/daemon/scripts/arched-start") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/scripts/arched-start" ;; "arcbase.pc") CONFIG_FILES="$CONFIG_FILES arcbase.pc" ;; "nsis/Makefile") CONFIG_FILES="$CONFIG_FILES nsis/Makefile" ;; "nsis/arc.nsis") CONFIG_FILES="$CONFIG_FILES nsis/arc.nsis" ;; "src/doxygen/Makefile") CONFIG_FILES="$CONFIG_FILES src/doxygen/Makefile" ;; *) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 $as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;} { (exit 1); exit 1; }; };; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= trap 'exit_status=$? { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status ' 0 trap '{ (exit 1); exit 1; }' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || { $as_echo "$as_me: cannot create a temporary directory in ." >&2 { (exit 1); exit 1; } } # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=' ' ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 $as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 $as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 $as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\).*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\).*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ || { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5 $as_echo "$as_me: error: could not setup config files machinery" >&2;} { (exit 1); exit 1; }; } _ACEOF # VPATH may cause trouble with some makes, so we remove $(srcdir), # ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=/{ s/:*\$(srcdir):*/:/ s/:*\${srcdir}:*/:/ s/:*@srcdir@:*/:/ s/^\([^=]*=[ ]*\):*/\1/ s/:*$// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_t=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_t"; then break elif $ac_last_try; then { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_HEADERS" >&5 $as_echo "$as_me: error: could not make $CONFIG_HEADERS" >&2;} { (exit 1); exit 1; }; } else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 { { $as_echo "$as_me:$LINENO: error: could not setup config headers machinery" >&5 $as_echo "$as_me: error: could not setup config headers machinery" >&2;} { (exit 1); exit 1; }; } fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: invalid tag $ac_tag" >&5 $as_echo "$as_me: error: invalid tag $ac_tag" >&2;} { (exit 1); exit 1; }; };; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || { { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 $as_echo "$as_me: error: cannot find input file: $ac_f" >&2;} { (exit 1); exit 1; }; };; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac ac_file_inputs="$ac_file_inputs '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:$LINENO: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$tmp/stdin" \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` { as_dir="$ac_dir" case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 $as_echo "$as_me: error: cannot create directory $as_dir" >&2;} { (exit 1); exit 1; }; }; } ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p ' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&2;} rm -f "$tmp/stdin" case $ac_file in -) cat "$tmp/out" && rm -f "$tmp/out";; *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; esac \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" } >"$tmp/config.h" \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:$LINENO: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$tmp/config.h" "$ac_file" \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \ || { { $as_echo "$as_me:$LINENO: error: could not create -" >&5 $as_echo "$as_me: error: could not create -" >&2;} { (exit 1); exit 1; }; } fi # Compute "$ac_file"'s index in $config_headers. _am_arg="$ac_file" _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$_am_arg" : 'X\(//\)[^/]' \| \ X"$_am_arg" : 'X\(//\)$' \| \ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'`/stamp-h$_am_stamp_count ;; :C) { $as_echo "$as_me:$LINENO: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "depfiles":C) test x"$AMDEP_TRUE" != x"" || { # Autoconf 2.62 quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named `Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`$as_dirname -- "$mf" || $as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$mf" : 'X\(//\)[^/]' \| \ X"$mf" : 'X\(//\)$' \| \ X"$mf" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running `make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # When using ansi2knr, U may be empty or an underscore; expand it U=`sed -n 's/^U = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`$as_dirname -- "$file" || $as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$file" : 'X\(//\)[^/]' \| \ X"$file" : 'X\(//\)$' \| \ X"$file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` { as_dir=$dirpart/$fdir case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 $as_echo "$as_me: error: cannot create directory $as_dir" >&2;} { (exit 1); exit 1; }; }; } # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ;; "libtool":C) # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi cfgfile="${ofile}T" trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. # Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008 Free Software Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is part of GNU Libtool. # # GNU Libtool is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, or # obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # The names of the tagged configurations supported by this script. available_tags="CXX " # ### BEGIN LIBTOOL CONFIG # Whether or not to build static libraries. build_old_libs=$enable_static # Assembler program. AS=$AS # DLL creation program. DLLTOOL=$DLLTOOL # Object dumper program. OBJDUMP=$OBJDUMP # Which release of libtool.m4 was used? macro_version=$macro_version macro_revision=$macro_revision # Whether or not to build shared libraries. build_libtool_libs=$enable_shared # What type of objects to build. pic_mode=$pic_mode # Whether or not to optimize for fast installation. fast_install=$enable_fast_install # The host system. host_alias=$host_alias host=$host host_os=$host_os # The build system. build_alias=$build_alias build=$build build_os=$build_os # A sed program that does not truncate output. SED=$lt_SED # Sed that helps us avoid accidentally triggering echo(1) options like -n. Xsed="\$SED -e 1s/^X//" # A grep program that handles long lines. GREP=$lt_GREP # An ERE matcher. EGREP=$lt_EGREP # A literal string matcher. FGREP=$lt_FGREP # A BSD- or MS-compatible name lister. NM=$lt_NM # Whether we need soft or hard links. LN_S=$lt_LN_S # What is the maximum length of a command? max_cmd_len=$max_cmd_len # Object file suffix (normally "o"). objext=$ac_objext # Executable file suffix (normally ""). exeext=$exeext # whether the shell understands "unset". lt_unset=$lt_unset # turn spaces into newlines. SP2NL=$lt_lt_SP2NL # turn newlines into spaces. NL2SP=$lt_lt_NL2SP # How to create reloadable object files. reload_flag=$lt_reload_flag reload_cmds=$lt_reload_cmds # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method # Command to use when deplibs_check_method == "file_magic". file_magic_cmd=$lt_file_magic_cmd # The archiver. AR=$lt_AR AR_FLAGS=$lt_AR_FLAGS # A symbol stripping program. STRIP=$lt_STRIP # Commands used to install an old-style archive. RANLIB=$lt_RANLIB old_postinstall_cmds=$lt_old_postinstall_cmds old_postuninstall_cmds=$lt_old_postuninstall_cmds # A C compiler. LTCC=$lt_CC # LTCC compiler flags. LTCFLAGS=$lt_CFLAGS # Take the output of nm and produce a listing of raw symbols and C names. global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe # Transform the output of nm in a proper C declaration. global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl # Transform the output of nm in a C name address pair. global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address # Transform the output of nm in a C name address pair when lib prefix is needed. global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix # The name of the directory that contains temporary libtool files. objdir=$objdir # Shell to use when invoking shell scripts. SHELL=$lt_SHELL # An echo program that does not interpret backslashes. ECHO=$lt_ECHO # Used to examine libraries when file_magic_cmd begins with "file". MAGIC_CMD=$MAGIC_CMD # Must we lock files when doing compilation? need_locks=$lt_need_locks # Tool to manipulate archived DWARF debug symbol files on Mac OS X. DSYMUTIL=$lt_DSYMUTIL # Tool to change global to local symbols on Mac OS X. NMEDIT=$lt_NMEDIT # Tool to manipulate fat objects and archives on Mac OS X. LIPO=$lt_LIPO # ldd/readelf like tool for Mach-O binaries on Mac OS X. OTOOL=$lt_OTOOL # ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. OTOOL64=$lt_OTOOL64 # Old archive suffix (normally "a"). libext=$libext # Shared library suffix (normally ".so"). shrext_cmds=$lt_shrext_cmds # The commands to extract the exported symbol list from a shared archive. extract_expsyms_cmds=$lt_extract_expsyms_cmds # Variables whose values should be saved in libtool wrapper scripts and # restored at link time. variables_saved_for_relink=$lt_variables_saved_for_relink # Do we need the "lib" prefix for modules? need_lib_prefix=$need_lib_prefix # Do we need a version for libraries? need_version=$need_version # Library versioning type. version_type=$version_type # Shared library runtime path variable. runpath_var=$runpath_var # Shared library path variable. shlibpath_var=$shlibpath_var # Is shlibpath searched before the hard-coded library search path? shlibpath_overrides_runpath=$shlibpath_overrides_runpath # Format of library name prefix. libname_spec=$lt_libname_spec # List of archive names. First name is the real one, the rest are links. # The last name is the one that the linker finds with -lNAME library_names_spec=$lt_library_names_spec # The coded name of the library, if different from the real name. soname_spec=$lt_soname_spec # Command to use after installation of a shared archive. postinstall_cmds=$lt_postinstall_cmds # Command to use after uninstallation of a shared archive. postuninstall_cmds=$lt_postuninstall_cmds # Commands used to finish a libtool library installation in a directory. finish_cmds=$lt_finish_cmds # As "finish_cmds", except a single script fragment to be evaled but # not shown. finish_eval=$lt_finish_eval # Whether we should hardcode library paths into libraries. hardcode_into_libs=$hardcode_into_libs # Compile-time system search path for libraries. sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries. sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec # Whether dlopen is supported. dlopen_support=$enable_dlopen # Whether dlopen of programs is supported. dlopen_self=$enable_dlopen_self # Whether dlopen of statically linked programs is supported. dlopen_self_static=$enable_dlopen_self_static # Commands to strip libraries. old_striplib=$lt_old_striplib striplib=$lt_striplib # The linker used to build libraries. LD=$lt_LD # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds # A language specific compiler. CC=$lt_compiler # Is the compiler the GNU compiler? with_gcc=$GCC # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds archive_expsym_cmds=$lt_archive_expsym_cmds # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds module_expsym_cmds=$lt_module_expsym_cmds # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec # If ld is used when linking, flag to hardcode \$libdir into a binary # during linking. This must work even if \$libdir does not exist. hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \${shlibpath_var} if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs # Fix the shell variable \$srcfile for the compiler. fix_srcfile_path=$lt_fix_srcfile_path # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms # Symbols that must always be exported. include_expsyms=$lt_include_expsyms # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds # Specify filename containing input files. file_list_spec=$lt_file_list_spec # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects postdep_objects=$lt_postdep_objects predeps=$lt_predeps postdeps=$lt_postdeps # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path # ### END LIBTOOL CONFIG _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac ltmain="$ac_aux_dir/ltmain.sh" # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) case $xsi_shell in yes) cat << \_LT_EOF >> "$cfgfile" # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac } # func_basename file func_basename () { func_basename_result="${1##*/}" } # func_dirname_and_basename file append nondir_replacement # perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # Implementation must be kept synchronized with func_dirname # and func_basename. For efficiency, we do not delegate to # those functions but instead duplicate the functionality here. func_dirname_and_basename () { case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac func_basename_result="${1##*/}" } # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). func_stripname () { # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are # positional parameters, so assign one to ordinary parameter first. func_stripname_result=${3} func_stripname_result=${func_stripname_result#"${1}"} func_stripname_result=${func_stripname_result%"${2}"} } # func_opt_split func_opt_split () { func_opt_split_opt=${1%%=*} func_opt_split_arg=${1#*=} } # func_lo2o object func_lo2o () { case ${1} in *.lo) func_lo2o_result=${1%.lo}.${objext} ;; *) func_lo2o_result=${1} ;; esac } # func_xform libobj-or-source func_xform () { func_xform_result=${1%.*}.lo } # func_arith arithmetic-term... func_arith () { func_arith_result=$(( $* )) } # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=${#1} } _LT_EOF ;; *) # Bourne compatible functions. cat << \_LT_EOF >> "$cfgfile" # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { # Extract subdirectory from the argument. func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi } # func_basename file func_basename () { func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` } # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # func_strip_suffix prefix name func_stripname () { case ${2} in .*) func_stripname_result=`$ECHO "X${3}" \ | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "X${3}" \ | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;; esac } # sed scripts: my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' my_sed_long_arg='1s/^-[^=]*=//' # func_opt_split func_opt_split () { func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"` func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"` } # func_lo2o object func_lo2o () { func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"` } # func_xform libobj-or-source func_xform () { func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[^.]*$/.lo/'` } # func_arith arithmetic-term... func_arith () { func_arith_result=`expr "$@"` } # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` } _LT_EOF esac case $lt_shell_append in yes) cat << \_LT_EOF >> "$cfgfile" # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "$1+=\$2" } _LT_EOF ;; *) cat << \_LT_EOF >> "$cfgfile" # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "$1=\$$1\$2" } _LT_EOF ;; esac sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" cat <<_LT_EOF >> "$ofile" # ### BEGIN LIBTOOL TAG CONFIG: CXX # The linker used to build libraries. LD=$lt_LD_CXX # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds_CXX # A language specific compiler. CC=$lt_compiler_CXX # Is the compiler the GNU compiler? with_gcc=$GCC_CXX # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl_CXX # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic_CXX # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static_CXX # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc_CXX # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object_CXX # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds_CXX archive_expsym_cmds=$lt_archive_expsym_cmds_CXX # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds_CXX module_expsym_cmds=$lt_module_expsym_cmds_CXX # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld_CXX # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag_CXX # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag_CXX # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX # If ld is used when linking, flag to hardcode \$libdir into a binary # during linking. This must work even if \$libdir does not exist. hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_CXX # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct_CXX # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \${shlibpath_var} if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute_CXX # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L_CXX # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic_CXX # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath_CXX # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs_CXX # Fix the shell variable \$srcfile for the compiler. fix_srcfile_path=$lt_fix_srcfile_path_CXX # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols_CXX # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds_CXX # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms_CXX # Symbols that must always be exported. include_expsyms=$lt_include_expsyms_CXX # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds_CXX # Specify filename containing input files. file_list_spec=$lt_file_list_spec_CXX # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action_CXX # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects_CXX postdep_objects=$lt_postdep_objects_CXX predeps=$lt_predeps_CXX postdeps=$lt_postdeps_CXX # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_CXX # ### END LIBTOOL TAG CONFIG: CXX _LT_EOF ;; "default-1":C) for ac_file in $CONFIG_FILES; do # Support "outfile[:infile[:infile...]]" case "$ac_file" in *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; esac # PO directories have a Makefile.in generated from Makefile.in.in. case "$ac_file" in */Makefile.in) # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then rm -f "$ac_dir/POTFILES" test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" POMAKEFILEDEPS="POTFILES.in" # ALL_LINGUAS, POFILES, GMOFILES, UPDATEPOFILES, DUMMYPOFILES depend # on $ac_dir but don't depend on user-specified configuration # parameters. if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then # The LINGUAS file contains the set of available languages. if test -n "$OBSOLETE_ALL_LINGUAS"; then test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" fi ALL_LINGUAS_=`sed -e "/^#/d" "$ac_given_srcdir/$ac_dir/LINGUAS"` # Hide the ALL_LINGUAS assigment from automake. eval 'ALL_LINGUAS''=$ALL_LINGUAS_' POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" else # The set of available languages was given in configure.in. eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS' fi case "$ac_given_srcdir" in .) srcdirpre= ;; *) srcdirpre='$(srcdir)/' ;; esac POFILES= GMOFILES= UPDATEPOFILES= DUMMYPOFILES= for lang in $ALL_LINGUAS; do POFILES="$POFILES $srcdirpre$lang.po" GMOFILES="$GMOFILES $srcdirpre$lang.gmo" UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" DUMMYPOFILES="$DUMMYPOFILES $lang.nop" done # CATALOGS depends on both $ac_dir and the user's LINGUAS # environment variable. INST_LINGUAS= if test -n "$ALL_LINGUAS"; then for presentlang in $ALL_LINGUAS; do useit=no if test "%UNSET%" != "$LINGUAS"; then desiredlanguages="$LINGUAS" else desiredlanguages="$ALL_LINGUAS" fi for desiredlang in $desiredlanguages; do # Use the presentlang catalog if desiredlang is # a. equal to presentlang, or # b. a variant of presentlang (because in this case, # presentlang can be used as a fallback for messages # which are not translated in the desiredlang catalog). case "$desiredlang" in "$presentlang"*) useit=yes;; esac done if test $useit = yes; then INST_LINGUAS="$INST_LINGUAS $presentlang" fi done fi CATALOGS= if test -n "$INST_LINGUAS"; then for lang in $INST_LINGUAS; do CATALOGS="$CATALOGS $lang.gmo" done fi test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do if test -f "$f"; then case "$f" in *.orig | *.bak | *~) ;; *) cat "$f" >> "$ac_dir/Makefile" ;; esac fi done fi ;; esac done ;; esac done # for ac_tag { (exit 0); exit 0; } _ACEOF chmod +x $CONFIG_STATUS ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || { { $as_echo "$as_me:$LINENO: error: write failure creating $CONFIG_STATUS" >&5 $as_echo "$as_me: error: write failure creating $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || { (exit 1); exit 1; } fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:$LINENO: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi { $as_echo "$as_me:$LINENO: result: Unit testing: ${enables_cppunit} Java binding: ${enables_swig_java} Python binding: ${enables_swig_python} ($PYTHON_VERSION) Alt.Python binding: ${enables_altpython} ($ALTPYTHON_VERSION) Available third-party features: GridFTP: ${enables_gridftp} GFAL: ${enables_gfal} S3: ${enables_s3} Xrootd: ${enables_xrootd} MYSQL CLIENT LIB: ${enables_mysql} LDAP: ${enables_ldap} xmlsec1: ${enables_xmlsec1} ARGUS: ${enables_argus} NSS: ${enables_nss} CANL++: ${enables_canlxx} BDB C++: ${enables_dbcxx} SQLite: ${enables_sqlite} LDNS: ${enables_ldns} Enabled features: Local jobs info in BDB: ${enables_dbjstore} Included components: HED: ${enables_hed} A-REX service: ${enables_a_rex_service} GRIDFTPD service: ${enables_gridftpd_service} LDAP Info service: ${enables_ldap_service} GIIS service: ${enables_giis_service} CACHE service: ${enables_cache_service} DATADELIVERY service: ${enables_datadelivery_service} ACIX service: ${enables_acix} COMPUTE clients: ${enables_compute_client} DATA clients: ${enables_data_client} CREDENTIAL clients: ${enables_credentials_client} ECHO client: ${enables_echo_client} JURA client: ${enables_jura_client} SAML VOMS client: ${enables_saml_client} WSRF client: ${enables_wsrf_client} UNICORE client (ACC): ${enables_unicore_client} EMI ES client (ACC): ${enables_emies_client} SRM client (DMC): ${enables_srm_dmc} Documentation: ${enables_doc} Monitoring: LDAP Monitor ${enables_ldap_monitor} WS Monitor ${enables_ws_monitor} " >&5 $as_echo " Unit testing: ${enables_cppunit} Java binding: ${enables_swig_java} Python binding: ${enables_swig_python} ($PYTHON_VERSION) Alt.Python binding: ${enables_altpython} ($ALTPYTHON_VERSION) Available third-party features: GridFTP: ${enables_gridftp} GFAL: ${enables_gfal} S3: ${enables_s3} Xrootd: ${enables_xrootd} MYSQL CLIENT LIB: ${enables_mysql} LDAP: ${enables_ldap} xmlsec1: ${enables_xmlsec1} ARGUS: ${enables_argus} NSS: ${enables_nss} CANL++: ${enables_canlxx} BDB C++: ${enables_dbcxx} SQLite: ${enables_sqlite} LDNS: ${enables_ldns} Enabled features: Local jobs info in BDB: ${enables_dbjstore} Included components: HED: ${enables_hed} A-REX service: ${enables_a_rex_service} GRIDFTPD service: ${enables_gridftpd_service} LDAP Info service: ${enables_ldap_service} GIIS service: ${enables_giis_service} CACHE service: ${enables_cache_service} DATADELIVERY service: ${enables_datadelivery_service} ACIX service: ${enables_acix} COMPUTE clients: ${enables_compute_client} DATA clients: ${enables_data_client} CREDENTIAL clients: ${enables_credentials_client} ECHO client: ${enables_echo_client} JURA client: ${enables_jura_client} SAML VOMS client: ${enables_saml_client} WSRF client: ${enables_wsrf_client} UNICORE client (ACC): ${enables_unicore_client} EMI ES client (ACC): ${enables_emies_client} SRM client (DMC): ${enables_srm_dmc} Documentation: ${enables_doc} Monitoring: LDAP Monitor ${enables_ldap_monitor} WS Monitor ${enables_ws_monitor} " >&6; } nordugrid-arc-5.4.2/PaxHeaders.7502/nordugrid-arc.spec.in0000644000000000000000000000012713214315410021213 xustar000000000000000027 mtime=1513200392.861335 30 atime=1513200652.946661221 30 ctime=1513200658.606730445 nordugrid-arc-5.4.2/nordugrid-arc.spec.in0000644000175000002070000016412613214315410021267 0ustar00mockbuildmock00000000000000%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(0)")} %{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}} %{!?_jnidir: %global _jnidir %{_libdir}/java} %if %{?rhel}%{!?rhel:0} == 5 %global __python26 %{_bindir}/python2.6 %{!?python26_sitearch: %global python26_sitearch %(%{__python26} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} %{!?python26_sitelib: %global python26_sitelib %(%{__python26} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(0)")} # Disable the default python byte compilation %global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g') %endif %if %{?filter_setup:1}%{!?filter_setup:0} %filter_provides_in %{python_sitearch}.*\.so$ %if %{?rhel}%{!?rhel:0} == 5 %filter_provides_in %{python26_sitearch}.*\.so$ %endif %if %{?fedora}%{!?fedora:0} >= 13 %filter_provides_in %{python3_sitearch}.*\.so$ %endif %filter_setup %endif # # Build dependency descrepancies across platforms # %if %{?suse_version:1}%{!?suse_version:0} %global glibmm2_devel glibmm2-devel %global openldap_devel openldap2-devel %else %global glibmm2_devel glibmm24-devel %global openldap_devel openldap-devel %endif %if %{?fedora}%{!?fedora:5} > 4 && %{?suse_version:0}%{!?suse_version:1} %global nss_devel nss-devel %else %global nss_devel mozilla-nss-devel %endif %if %{?fedora}%{!?fedora:0} >= 12 || %{?rhel}%{!?rhel:0} >= 6 || %{?suse_version:1}%{!?suse_version:0} %global libuuid_devel libuuid-devel %else %global libuuid_devel e2fsprogs-devel %endif # # xROOTd # %if %{?fedora}%{!?fedora:0} >= 12 || %{?rhel}%{!?rhel:0} %global with_xrootd %{!?_without_xrootd:1}%{?_without_xrootd:0} %else %global with_xrootd 0 %endif # # Java # %if %{?fedora}%{!?fedora:0} >= 7 || %{?rhel}%{!?rhel:0} >= 5 %if %{?rhel}%{!?rhel:0} == 5 %ifarch ppc # RHEL 5 ppc only has java 1.4 %global with_java 0 %global with_junit 0 %else %global with_java %{!?_without_java:1}%{?_without_java:0} %global with_junit %{!?_without_junit:1}%{?_without_junit:0} %endif %else %global with_java %{!?_without_java:1}%{?_without_java:0} %global with_junit %{!?_without_junit:1}%{?_without_junit:0} %endif %else %global with_java 0 %global with_junit 0 %endif %global with_gcj %{!?_with_gcj:0}%{?_with_gcj:1} # # Python # %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 || %{?suse_version:1}%{!?suse_version:0} %if %{?rhel}%{!?rhel:0} == 6 %ifarch ppc64 # The python module doesn't build on RHEL6 ppc64 (.toc1 section overflow) %global with_python 0 %else %global with_python 1 %endif %else %global with_python 1 %endif %else %global with_python 0 %endif %if %{with_python} %if %{?fedora}%{!?fedora:0} || %{?suse_version:1}%{!?suse_version:0} %global with_pylint %{!?_without_pylint:1}%{?_without_pylint:0} %else %global with_pylint 0 %endif %else %global with_pylint 0 %endif %if %{?fedora}%{!?fedora:0} >= 7 || %{?rhel}%{!?rhel:0} >= 5 || %{?suse_version}%{!?suse_version:0} >= 1110 %global with_acix 1 %else %global with_acix 0 %endif %if %{?fedora}%{!?fedora:0} >= 21 || %{?rhel}%{!?rhel:0} >= 5 %global with_s3 1 %else %global with_s3 0 %endif %if %{?fedora}%{!?fedora:0} >= 21 || %{?rhel}%{!?rhel:0} >= 5 %global with_gfal 1 %else %global with_gfal 0 %endif %global with_canl 0 %if %{?fedora}%{!?fedora:0} || %{?rhel}%{!?rhel:0} %global with_xmlsec1 %{!?_without_xmlsec1:1}%{?_without_xmlsec1:0} %else %global with_xmlsec1 0 %endif # # CA utils # %if %{?suse_version:1}%{!?suse_version:0} %global with_cautils 0 %else %global with_cautils 1 %endif # SQLite %if %{?fedora}%{!?fedora:0} >= 11 || %{?rhel}%{!?rhel:0} >= 6 %global with_sqlite 1 %else %global with_sqlite 0 %endif # LDNS %if %{?fedora}%{!?fedora:0} >= 13 || %{?rhel}%{!?rhel:0} >= 5 %global with_ldns 1 %else %global with_ldns 0 %endif %global pkgdir arc %if %{?fedora}%{!?fedora:0} >= 25 || %{?rhel}%{!?rhel:0} >= 8 %global use_systemd 1 %else %global use_systemd 0 %endif # # Macros for scripts # # Stop and disable service on package removal %if %{use_systemd} %define stop_on_removal() %{expand:%%systemd_preun %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?stop_on_removal:0}%{!?stop_on_removal:1} %global stop_on_removal() if [ $1 -eq 0 ]; then for s in %*; do service $s stop > /dev/null 2>&1 || : ; done; for s in %*; do /sbin/chkconfig --del $s; done; fi %endif %endif # Enable a service %if %{use_systemd} %define enable_service() %{expand:%%systemd_post %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?suse_version:1}%{!?suse_version:0} %define enable_service() %{expand:%%fillup_and_insserv -f %{?*}} %else %define enable_service() for s in %{?*}; do /sbin/chkconfig --add $s ; done %endif %endif # Conditionally restart service on package update %if %{use_systemd} %define condrestart_on_update() %{expand:%%systemd_postun_with_restart %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?suse_version:1}%{!?suse_version:0} %define condrestart_on_update() %{expand:%%restart_on_update %{?*}} %{expand:%%insserv_cleanup} %else %define condrestart_on_update() if [ $1 -ge 1 ]; then for s in %{?*}; do service $s condrestart > /dev/null 2>&1 || : ; done; fi %endif %endif # Standard service requirements %if %{use_systemd} %define service_post_requires systemd-units %define service_preun_requires systemd-units %define service_postun_requires systemd-units %else %if %{?suse_version:1}%{!?suse_version:0} %define service_post_requires %{insserv_prereq} %define service_preun_requires %{insserv_prereq} %define service_postun_requires %{insserv_prereq} %else %define service_post_requires chkconfig %define service_preun_requires chkconfig, initscripts %define service_postun_requires initscripts %endif %endif Name: @PACKAGE@ Version: @baseversion@ Release: @fedorarelease@%{?dist} Summary: Advanced Resource Connector Grid Middleware Group: System Environment/Daemons License: ASL 2.0 URL: http://www.nordugrid.org/ Source: http://download.nordugrid.org/packages/%{name}/releases/%{version}@preversion@/src/%{name}-%{version}@preversion@.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) # Packages dropped without replacements Obsoletes: nordugrid-arc-chelonia < 2.0.0 Obsoletes: nordugrid-arc-hopi < 2.0.0 Obsoletes: nordugrid-arc-isis < 2.0.0 Obsoletes: nordugrid-arc-janitor < 2.0.0 Obsoletes: nordugrid-arc-doxygen < 4.0.0 BuildRequires: cppunit-devel BuildRequires: pkgconfig %if %{?fedora}%{!?fedora:0} >= 12 || %{?rhel}%{!?rhel:0} >= 6 || %{?suse_version:1}%{!?suse_version:0} BuildRequires: libuuid-devel %else BuildRequires: e2fsprogs-devel %endif BuildRequires: gettext BuildRequires: python-devel %if %{?rhel}%{!?rhel:0} == 5 BuildRequires: python26-devel %endif %if %{?fedora}%{!?fedora:0} >= 13 BuildRequires: python3-devel %endif %if %{with_pylint} BuildRequires: pylint %endif BuildRequires: %{glibmm2_devel} BuildRequires: glib2-devel BuildRequires: libxml2-devel BuildRequires: openssl BuildRequires: openssl-devel %if %{with_xmlsec1} BuildRequires: xmlsec1-devel >= 1.2.4 BuildRequires: xmlsec1-openssl-devel >= 1.2.4 %endif BuildRequires: %{nss_devel} BuildRequires: %{openldap_devel} BuildRequires: globus-common-devel BuildRequires: globus-ftp-client-devel BuildRequires: globus-ftp-control-devel %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: globus-gssapi-gsi-devel >= 12.2 %else BuildRequires: globus-gssapi-gsi-devel < 12.2 %endif %if %{with_canl} BuildRequires: canl-c++-devel %endif %if %{with_xrootd} %if %{?fedora}%{!?fedora:0} >= 17 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: xrootd-client-devel %else BuildRequires: xrootd-devel %endif %endif %if %{with_gfal} BuildRequires: gfal2-devel %endif %if %{with_s3} BuildRequires: libs3-devel %endif %if %{?suse_version}%{!?suse_version:0} == 1110 BuildRequires: db43-devel %else %if %{?fedora}%{!?fedora:0} >= 15 || %{?rhel}%{!?rhel:0} >= 7 BuildRequires: libdb-cxx-devel %else %if %{?fedora}%{!?fedora:0} == 14 BuildRequires: libdb-devel %else BuildRequires: db4-devel %endif %endif %endif %if %{with_java} BuildRequires: java-devel >= 1.5.0 BuildRequires: jpackage-utils %if %{with_gcj} BuildRequires: java-gcj-compat-devel %endif %if %{with_junit} BuildRequires: junit %endif %endif # Needed for Boinc backend testing during make check BuildRequires: perl(DBI) # Needed for infoprovider testing during make check BuildRequires: perl(XML::Simple) # Needed for LRMS testing during make check BuildRequires: perl(Test::Harness) BuildRequires: perl(Test::Simple) # Needed to run ACIX unit tests %if %{with_acix} %if %{?suse_version:1}%{!?suse_version:0} BuildRequires: python-twisted %else BuildRequires: python-twisted-core %endif BuildRequires: python-twisted-web %if %{?suse_version:1}%{!?suse_version:0} BuildRequires: python-openssl %else BuildRequires: pyOpenSSL %endif %if %{?rhel}%{!?rhel:0} == 5 BuildRequires: python-hashlib %endif %endif %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 # The compilation of the selinux module fails on Fedora 5 BuildRequires: selinux-policy-devel BuildRequires: checkpolicy %endif BuildRequires: swig %if %{?suse_version:1}%{!?suse_version:0} # Needed for python/doxy2swig.py BuildRequires: python-xml %endif %if %{?fedora}%{!?fedora:0} >= 4 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: libtool-ltdl-devel %else BuildRequires: libtool %endif %if %{with_sqlite} BuildRequires: sqlite-devel >= 3.6 %endif %if %{with_ldns} BuildRequires: ldns-devel >= 1.6.8 %endif %description NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The Advanced Resource Connector (ARC) brings computing resources together across institutional boundaries. This concept is commonly referred to as a "computational grid". Historically, grids address the organization of distributed storage of data and parallel computation, but could be expanded to arbitrary services. Just like the web, ARC has its roots in the IT infrastructure that was erected to analyze the experiments for high energy physics at CERN. The first release, ARC-0.x, was dependent on Globus, the current release keeps that compatibility but can also be used independently. %package client Summary: ARC command line interface Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} %description client NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). For the regular user of any ARC-based computational grid, this client package contains (or depends on) all packages that are needed to submit jobs, query their status and retrieve results. %package hed Summary: ARC Hosting Environment Daemon Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description hed NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The ARC Hosting Environment Daemon (HED). This daemon is a container for ARC services. %package gridftpd Summary: ARC gridftp server Group: System Environment/Daemons Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-globus Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description gridftpd NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC gridftp server which has a plugin framework. Current plugins include: fileplugin : Standard gridftp server based on Globus globus-ftp-control jobplugin : Classical ARC job submission interface %package cache-service Summary: ARC cache service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description cache-service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC cache service. It provides a way to perform some operations on the A-REX cache remotely. It can be especially helpful for data management within a pilot job framework. %package datadelivery-service Summary: ARC data delivery service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description datadelivery-service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC data delivery service. %package ldap-infosys Summary: ARC LDAP information service Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: openldap-servers Requires: glue-schema >= 2.0.10 Requires: bdii Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 8 Requires(post): policycoreutils-python-utils Requires(postun): policycoreutils-python-utils %else %if %{?fedora}%{!?fedora:0} >= 11 || %{?rhel}%{!?rhel:0} >= 6 Requires(post): policycoreutils-python Requires(postun): policycoreutils-python %else %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 Requires(post): policycoreutils Requires(postun): policycoreutils %endif %endif %endif %description ldap-infosys NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the LDAP based information system for ARC. This package is not self-contained. it should be pulled in by either nordugrid-arc-arex to be a part of a local information system or by nordugrid-arc-egiis to be a part of a EGIIS service. %package aris Summary: ARC local information system Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: %{name}-ldap-infosys = %{version}-%{release} Requires: openldap-servers Requires: glue-schema >= 2.0.10 Requires: bdii Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 8 Requires(post): policycoreutils-python-utils Requires(postun): policycoreutils-python-utils %else %if %{?fedora}%{!?fedora:0} >= 11 || %{?rhel}%{!?rhel:0} >= 6 Requires(post): policycoreutils-python Requires(postun): policycoreutils-python %else %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 Requires(post): policycoreutils Requires(postun): policycoreutils %endif %endif %endif %description aris NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the LDAP based information system for ARC. This package is not self-contained but is closely connected to nordugrid-arc-arex. %package egiis Summary: ARC EGIIS service Group: System Environment/Libraries Requires: %{name}-ldap-infosys = %{version}-%{release} Requires: openldap-servers Requires: glue-schema >= 2.0.10 Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 Requires(post): policycoreutils Requires(postun): policycoreutils %endif %description egiis NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The EGIIS is the Information Index Service used by ARC, This service is used to set up a ldap-based server that local information systems register to. %package ldap-monitor Summary: ARC LDAP monitor service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: php Requires: php-gd Requires: php-ldap %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description ldap-monitor NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the LDAP monitor system for ARC. This package is self-contained. It is used to set up a web-based monitor which pulls information from a EGIIS and shows it graphically. %package ws-monitor Summary: ARC WS monitor service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description ws-monitor NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the WS monitor system for ARC. This package is self-contained. It is used to set up a web-based monitor which pulls information from ISIS and shows it graphically. %package arex Summary: ARC Remote EXecution service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-ldap-infosys = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} %if %{with_python} Requires: python2-%{name} = %{version}-%{release} %endif Requires: perl(XML::Simple) %if %{?fedora}%{!?fedora:0} >= 26 || %{?rhel}%{!?rhel:0} >= 8 Requires: python2-stomppy %else Requires: stomppy %endif Requires: python-ldap Requires: python-dirq Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description arex NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The ARC Remote EXecution service (AREX) provides a service for the execution of compute jobs and the transfer of input or output data. %package plugins-needed Summary: ARC base plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-needed NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). %package plugins-globus Summary: ARC Globus plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 5 Requires: globus-gssapi-gsi >= 12.2 %else Requires: globus-gssapi-gsi < 12.2 %endif %description plugins-globus NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC Globus plugins. This includes the Globus dependent Data Manager Components (DMCs). %if %{with_canl} %package arcproxyalt Summary: ARC proxy client based on canl Group: Applications/Internet Requires: %{name} = %{version}-%{release} %description arcproxyalt NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package includes a preview of the arcproxy command based on canl. %endif %if %{with_xrootd} %package plugins-xrootd Summary: ARC xrootd plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-xrootd NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC xrootd plugins. These allow access to data through the xrootd protocol. %endif %if %{with_gfal} %package plugins-gfal Summary: ARC GFAL plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-gfal NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC plugins for GFAL2. This allows third-party transfer and adds support for several extra transfer protocols (rfio, dcap, gsidcap). Specific protocol support is provided by separate gfal2-plugin packages. %endif %if %{with_s3} %package plugins-s3 Summary: ARC S3 plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-s3 NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC plugins for S3. These allow access to data through the S3 protocol. %endif %if %{with_acix} %package acix-core Summary: ARC cache index - core Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python-twisted-core Requires: python-twisted-web Requires: pyOpenSSL %if %{?rhel}%{!?rhel:0} == 5 Requires: python-hashlib %endif %description acix-core NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Core components of the ARC Cache Index (ACIX). %package acix-cache Summary: ARC cache index - cache server Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python-twisted-core Requires: python-twisted-web Requires: pyOpenSSL %if %{?rhel}%{!?rhel:0} == 5 Requires: python-hashlib %endif Requires: %{name}-acix-core = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description acix-cache NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Cache server component of the ARC Cache Index (ACIX), usually installed alongside A-REX. This component collects information on the content of an A-REX cache. %package acix-index Summary: ARC cache index - index server Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python-twisted-core Requires: python-twisted-web Requires: pyOpenSSL %if %{?rhel}%{!?rhel:0} == 5 Requires: python-hashlib %endif Requires: %{name}-acix-core = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description acix-index NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Index server component of the ARC Cache Index (ACIX), usually installed independently of any A-REX installation. This component pulls cache content from cache servers and can be queried by clients for the location of cached files. %endif %package devel Summary: ARC development files Group: Development/Libraries Requires: %{name} = %{version}-%{release} Requires: %{glibmm2_devel} Requires: glib2-devel Requires: libxml2-devel Requires: openssl-devel %description devel NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Header files and libraries needed to develop applications using ARC. %if %{with_python} %package -n python2-%{name} Summary: ARC Python wrapper Group: Development/Libraries %{?python_provide:%python_provide python2-%{name}} Provides: %{name}-python = %{version}-%{release} Obsoletes: %{name}-python < 5.3.3 Requires: %{name} = %{version}-%{release} %description -n python2-%{name} NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Python bindings for ARC. %endif %if %{?rhel}%{!?rhel:0} == 5 %package -n python26-%{name} Summary: ARC Python wrapper Group: Development/Libraries %{?python_provide:%python_provide python26-%{name}} Provides: %{name}-python26 = %{version}-%{release} Obsoletes: %{name}-python26 < 5.3.3 Requires: %{name} = %{version}-%{release} Requires: python(abi) = 2.6 %description -n python26-%{name} NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Python bindings for ARC. %endif %if %{?fedora}%{!?fedora:0} >= 13 %package -n python3-%{name} Summary: ARC Python wrapper Group: Development/Libraries %{?python_provide:%python_provide python3-%{name}} Provides: %{name}-python3 = %{version}-%{release} Obsoletes: %{name}-python3 < 5.3.3 Requires: %{name} = %{version}-%{release} %description -n python3-%{name} NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Python bindings for ARC. %endif %if %{with_java} %package java Summary: ARC Java wrapper Group: Development/Libraries Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 20 || %{?rhel}%{!?rhel:0} >= 7 Requires: java-headless %else Requires: java %endif Requires: jpackage-utils %if %{with_gcj} Requires(post): java-gcj-compat Requires(postun): java-gcj-compat %endif %description java NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Java bindings for ARC. %endif %package gridmap-utils Summary: NorduGrid authorization tools Group: Applications/Internet Requires: perl(Crypt::SSLeay) Requires: perl(SOAP::Lite) Requires: perl(Crypt::OpenSSL::X509) Requires: crontabs %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description gridmap-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC authorization machinery. A prominent tool distributed in this package is the nordugridmap script to collect user information from the virtual organizations. %if %{with_cautils} %package ca-utils Summary: NorduGrid authentication tools Group: Applications/Internet Requires: fetch-crl Requires(post): fetch-crl Requires(post): chkconfig %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description ca-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The nordugrid-arc-ca-utils packages has been obsoleted. The functionality of the grid-update-crls tool is provided by the fetch-crl tool in the fetch-crl package. %endif %package misc-utils Summary: NorduGrid misc tools Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} %description misc-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains utilities for various tasks including testing. The package is usually not required by users or sysadmins but is mainly for developers. %prep %setup @fedorasetupopts@ %if %{?fedora}%{!?fedora:0} <= 9 && %{?rhel}%{!?rhel:0} <= 5 # Older versions of SELinux does not have policy for open cd selinux sed -e 's/ open / /' -e /open/d -i %{name}-egiis.te cd - %endif %build if pkg-config --atleast-version 2.6 sigc++-2.0 ; then if [ `echo __GNUC__ | gcc -E - | tail -1` -lt 6 ] ; then # Workaround for too new libsigc++/glibmm, too old gcc combination export CXXFLAGS="%{optflags} -std=c++11" fi fi %configure --disable-static \ %if ! %{with_java} --disable-java \ %endif %if ! %{with_python} --disable-swig-python \ %endif %if ! %{with_acix} --disable-acix \ %endif %if %{with_gfal} --enable-gfal \ %endif %if %{with_s3} --enable-s3 \ %endif %if %{?rhel}%{!?rhel:0} == 5 --with-altpython=python26 \ %endif %if %{?fedora}%{!?fedora:0} >= 13 --with-altpython=python3 \ %endif %if %{with_canl} --enable-canlxx \ %endif %if ! %{with_xrootd} --disable-xrootd \ %endif %if %{with_sqlite} --enable-sqlite \ %endif %if ! %{with_ldns} --disable-ldns \ %endif %if %{use_systemd} --with-systemd-units-location=%{_unitdir} \ %endif --disable-doc \ --with-docdir=%{_pkgdocdir} \ --with-jnidir=%{_jnidir} make %{?_smp_mflags} %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 cd selinux make -f /usr/share/selinux/devel/Makefile cd - %endif %check make check %install rm -rf $RPM_BUILD_ROOT make install DESTDIR=$RPM_BUILD_ROOT # Install Logrotate. mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d install -p -m 644 debian/%{name}-arex.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-arex install -p -m 644 debian/%{name}-gridftpd.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-gridftpd install -p -m 644 debian/%{name}-aris.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-aris install -p -m 644 debian/%{name}-datadelivery-service.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-datadelivery-service find $RPM_BUILD_ROOT -type f -name \*.la -exec rm -fv '{}' ';' # The py-compile script in the source tarball is old (RHEL 6) # It does the wrong thing for python 3 - remove and let rpmbuild do it right find $RPM_BUILD_ROOT -type f -name \*.pyc -exec rm -fv '{}' ';' find $RPM_BUILD_ROOT -type f -name \*.pyo -exec rm -fv '{}' ';' # libarcglobusutils is not part of the ARC api. find $RPM_BUILD_ROOT -name libarcglobusutils.so -exec rm -fv '{}' ';' # Create log directory mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/log/arc # Create spool directories for Jura mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc/ssm mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc/urs %find_lang %{name} %if %{with_java} && %{with_gcj} %{_bindir}/aot-compile-rpm %endif %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 cd selinux mkdir -p $RPM_BUILD_ROOT%{_datadir}/selinux/packages/%{name} install -p -m 644 %{name}-egiis.pp \ $RPM_BUILD_ROOT%{_datadir}/selinux/packages/%{name} cd - %endif %if %{?rhel}%{!?rhel:0} == 5 %{__python} -c 'import compileall; compileall.compile_dir("'"$RPM_BUILD_ROOT"'", 10, "%{python_sitearch}", 1)' > /dev/null %{__python} -O -c 'import compileall; compileall.compile_dir("'"$RPM_BUILD_ROOT"'", 10, "%{python_sitearch}", 1)' > /dev/null %{__python26} -c 'import compileall; compileall.compile_dir("'"$RPM_BUILD_ROOT%{python26_sitearch}"'", 10, "%{python26_sitearch}", 1)' > /dev/null %{__python26} -O -c 'import compileall; compileall.compile_dir("'"$RPM_BUILD_ROOT%{python26_sitearch}"'", 10, "%{python26_sitearch}", 1)' > /dev/null %endif # Remove examples and let RPM package them under /usr/share/doc using the doc macro rm -rf $RPM_BUILD_ROOT%{_datadir}/%{pkgdir}/examples make -C src/libs/data-staging/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/libs/compute/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/libs/data/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/acc/PythonBroker DESTDIR=$PWD/docdir/python pkgdatadir= install-exampleDATA make -C python/examples DESTDIR=$PWD/docdir/python pkgdatadir= install-exampleDATA make -C java/examples DESTDIR=$PWD/docdir/java pkgdatadir= install-exampleDATA make -C src/clients DESTDIR=$PWD/docdir/client pkgdatadir= install-exampleDATA make -C src/tests/echo DESTDIR=$PWD/docdir/hed pkgdatadir= install-exampleDATA make -C src/hed DESTDIR=$PWD/docdir/hed pkgdatadir= install-profileDATA # arc.conf.reference needs special handling make -C src/doc DESTDIR=$RPM_BUILD_ROOT install-exampleDATA # Link to arc.conf.reference from doc rm -f $PWD/docdir/arc.conf.reference ln -s %{_datadir}/%{pkgdir}/examples/arc.conf.reference $PWD/docdir/arc.conf.reference %clean rm -rf $RPM_BUILD_ROOT %post -p /sbin/ldconfig %postun -p /sbin/ldconfig %post plugins-globus -p /sbin/ldconfig %postun plugins-globus -p /sbin/ldconfig %post hed %enable_service arched %preun hed %stop_on_removal arched %postun hed %condrestart_on_update arched %post arex %enable_service a-rex %preun arex %stop_on_removal a-rex %postun arex %condrestart_on_update a-rex %post gridftpd %enable_service gridftpd %preun gridftpd %stop_on_removal gridftpd %postun gridftpd %condrestart_on_update gridftpd %post cache-service %enable_service arc-cache-service %preun cache-service %stop_on_removal arc-cache-service %postun cache-service %condrestart_on_update arc-cache-service %post datadelivery-service %enable_service arc-datadelivery-service %preun datadelivery-service %stop_on_removal arc-datadelivery-service %postun datadelivery-service %condrestart_on_update arc-datadelivery-service %post ldap-infosys %enable_service nordugrid-arc-slapd nordugrid-arc-inforeg %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 semanage port -a -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -a -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : %endif %preun ldap-infosys %stop_on_removal nordugrid-arc-slapd nordugrid-arc-inforeg %postun ldap-infosys %condrestart_on_update nordugrid-arc-slapd nordugrid-arc-inforeg %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 if [ $1 -eq 0 ]; then semanage port -d -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -d -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : fi %endif %post aris %enable_service nordugrid-arc-bdii %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 semanage fcontext -a -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -a -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : # Remove selinux labels for old arc bdii var dir semanage fcontext -d -t slapd_db_t "/var/run/arc/bdii(/.*)?" 2>/dev/null || : %endif %preun aris %stop_on_removal nordugrid-arc-bdii %postun aris %condrestart_on_update nordugrid-arc-bdii %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 if [ $1 -eq 0 ]; then semanage fcontext -d -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -d -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : fi %endif %triggerun aris -- bdii %if %{?suse_version:1}%{!?suse_version:0} FIRST_ARG=1 %restart_on_update nordugrid-arc-bdii %else service nordugrid-arc-bdii condrestart > /dev/null 2>&1 || : %endif %post egiis %enable_service nordugrid-arc-egiis %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 /usr/sbin/semodule -i %{_datadir}/selinux/packages/%{name}/%{name}-egiis.pp > /dev/null 2>&1 || : /sbin/restorecon %{_sbindir}/arc-infoindex-relay %endif %preun egiis %stop_on_removal nordugrid-arc-egiis %postun egiis %condrestart_on_update nordugrid-arc-egiis %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 if [ $1 -eq 0 ] ; then /usr/sbin/semodule -r %{name}-egiis > /dev/null 2>&1 || : fi %endif %if %{with_acix} %post acix-cache %enable_service acix-cache %preun acix-cache %stop_on_removal acix-cache %postun acix-cache %condrestart_on_update acix-cache %post acix-index %enable_service acix-index %preun acix-index %stop_on_removal acix-index %postun acix-index %condrestart_on_update acix-index %endif %if %{with_java} && %{with_gcj} %post java [ -x %{_bindir}/rebuild-gcj-db ] && %{_bindir}/rebuild-gcj-db %endif %if %{with_java} && %{with_gcj} %postun java [ -x %{_bindir}/rebuild-gcj-db ] && %{_bindir}/rebuild-gcj-db %endif %if %{with_cautils} %post ca-utils # Enable and start CRL updates via cron for backward compatibility /sbin/chkconfig fetch-crl-cron on service fetch-crl-cron start > /dev/null 2>&1 %endif %files -f %{name}.lang %defattr(-,root,root,-) %doc docdir/arc.conf.reference README AUTHORS LICENSE NOTICE ChangeLog %{_libdir}/libarccompute.so.* %{_libdir}/libarccommunication.so.* %{_libdir}/libarccommon.so.* %{_libdir}/libarccredential.so.* %{_libdir}/libarccredentialstore.so.* %{_libdir}/libarccrypto.so.* %{_libdir}/libarcdata.so.* %{_libdir}/libarcdatastaging.so.* %{_libdir}/libarcloader.so.* %{_libdir}/libarcmessage.so.* %{_libdir}/libarcsecurity.so.* %{_libdir}/libarcinfosys.so.* %{_libdir}/libarcws.so.* %{_libdir}/libarcwssecurity.so.* %if %{with_xmlsec1} %{_libdir}/libarcxmlsec.so.* %endif %dir %{_libdir}/%{pkgdir} # We need to have libmodcrypto.so close to libarccrypto %{_libdir}/%{pkgdir}/libmodcrypto.so %{_libdir}/%{pkgdir}/libmodcrypto.apd # We need to have libmodcredential.so close to libarccredential %{_libdir}/%{pkgdir}/libmodcredential.so %{_libdir}/%{pkgdir}/libmodcredential.apd %{_libdir}/%{pkgdir}/arc-file-access %{_libdir}/%{pkgdir}/arc-hostname-resolver %{_libdir}/%{pkgdir}/DataStagingDelivery %dir %{_libexecdir}/%{pkgdir} %dir %{_datadir}/%{pkgdir} %dir %{_datadir}/%{pkgdir}/examples %{_datadir}/%{pkgdir}/examples/arc.conf.reference %dir %{_datadir}/%{pkgdir}/test-jobs %{_datadir}/%{pkgdir}/test-jobs/test-job-* %{_datadir}/%{pkgdir}/schema %files client %defattr(-,root,root,-) %doc docdir/client/* %{_bindir}/arccat %{_bindir}/arcclean %{_bindir}/arccp %{_bindir}/arcecho %{_bindir}/arcget %{_bindir}/arcinfo %{_bindir}/arckill %{_bindir}/arcls %{_bindir}/arcmkdir %{_bindir}/arcrename %{_bindir}/arcproxy %{_bindir}/arcrenew %{_bindir}/arcresub %{_bindir}/arcresume %{_bindir}/arcrm %{_bindir}/arcstat %{_bindir}/arcsub %{_bindir}/arcsync %{_bindir}/arctest %dir %{_sysconfdir}/%{pkgdir} %config(noreplace) %{_sysconfdir}/%{pkgdir}/client.conf %doc %{_mandir}/man1/arccat.1* %doc %{_mandir}/man1/arcclean.1* %doc %{_mandir}/man1/arccp.1* %doc %{_mandir}/man1/arcecho.1* %doc %{_mandir}/man1/arcget.1* %doc %{_mandir}/man1/arcinfo.1* %doc %{_mandir}/man1/arckill.1* %doc %{_mandir}/man1/arcls.1* %doc %{_mandir}/man1/arcmkdir.1* %doc %{_mandir}/man1/arcrename.1* %doc %{_mandir}/man1/arcproxy.1* %doc %{_mandir}/man1/arcrenew.1* %doc %{_mandir}/man1/arcresub.1* %doc %{_mandir}/man1/arcresume.1* %doc %{_mandir}/man1/arcrm.1* %doc %{_mandir}/man1/arcstat.1* %doc %{_mandir}/man1/arcsub.1* %doc %{_mandir}/man1/arcsync.1* %doc %{_mandir}/man1/arctest.1* %files hed %defattr(-,root,root,-) %doc docdir/hed/* %if %{use_systemd} %{_unitdir}/arched.service %{_datadir}/%{pkgdir}/arched-start %else %{_initrddir}/arched %endif %{_sbindir}/arched %{_libdir}/%{pkgdir}/libecho.so %{_libdir}/%{pkgdir}/libecho.apd %{_datadir}/%{pkgdir}/profiles %doc %{_mandir}/man8/arched.8* %doc %{_mandir}/man5/arc.conf.5* %files gridftpd %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/gridftpd.service %{_datadir}/%{pkgdir}/gridftpd-start %else %{_initrddir}/gridftpd %endif %{_sbindir}/gridftpd %{_libdir}/%{pkgdir}/jobplugin.* %{_libdir}/%{pkgdir}/fileplugin.* %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-gridftpd %doc %{_mandir}/man8/gridftpd.8* %files ldap-infosys %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/nordugrid-arc-slapd.service %{_unitdir}/nordugrid-arc-inforeg.service %else %{_initrddir}/nordugrid-arc-ldap-infosys %{_initrddir}/nordugrid-arc-slapd %{_initrddir}/nordugrid-arc-inforeg %endif %{_datadir}/%{pkgdir}/create-slapd-config %{_datadir}/%{pkgdir}/create-inforeg-config %{_datadir}/%{pkgdir}/config_parser_compat.sh %{_datadir}/%{pkgdir}/grid-info-soft-register %{_datadir}/%{pkgdir}/ldap-schema %files aris %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/nordugrid-arc-bdii.service %else %{_initrddir}/nordugrid-arc-bdii %endif %{_datadir}/%{pkgdir}/create-bdii-config %{_datadir}/%{pkgdir}/glue-generator.pl %{_datadir}/%{pkgdir}/glite-info-provider-ldap %{_datadir}/%{pkgdir}/PerfData.pl %{_datadir}/%{pkgdir}/ConfigParser.pm %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-aris %files egiis %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/nordugrid-arc-egiis.service %else %{_initrddir}/nordugrid-arc-egiis %endif %{_sbindir}/arc-infoindex-relay %{_sbindir}/arc-infoindex-server %{_mandir}/man8/arc-infoindex-relay.8* %{_mandir}/man8/arc-infoindex-server.8* %{_libdir}/%{pkgdir}/arc-infoindex-slapd-wrapper.so %{_libdir}/%{pkgdir}/arc-infoindex-slapd-wrapper.apd %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 %{_datadir}/selinux/packages/%{name} %endif %files ldap-monitor %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/ldap-monitor %doc %{_mandir}/man7/ldap-monitor.7* %files ws-monitor %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/ws-monitor %doc %{_mandir}/man7/ws-monitor.7* %files cache-service %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-cache-service.service %{_datadir}/%{pkgdir}/arc-cache-service-start %else %{_initrddir}/arc-cache-service %endif %{_libdir}/%{pkgdir}/libcacheservice.so %{_libdir}/%{pkgdir}/libcacheservice.apd %files datadelivery-service %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-datadelivery-service.service %{_datadir}/%{pkgdir}/arc-datadelivery-service-start %else %{_initrddir}/arc-datadelivery-service %endif %{_libdir}/%{pkgdir}/libdatadeliveryservice.so %{_libdir}/%{pkgdir}/libdatadeliveryservice.apd %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-datadelivery-service %files arex %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/a-rex.service %{_datadir}/%{pkgdir}/a-rex-start %else %{_initrddir}/a-rex %endif %{_libexecdir}/%{pkgdir}/cache-clean %{_libexecdir}/%{pkgdir}/cache-list %{_libexecdir}/%{pkgdir}/jura %{_libexecdir}/%{pkgdir}/downloader %if %{with_sqlite} %{_libexecdir}/%{pkgdir}/gm-delegations-converter %doc %{_mandir}/man8/gm-delegations-converter.8* %endif %{_libexecdir}/%{pkgdir}/gm-jobs %{_libexecdir}/%{pkgdir}/gm-kick %{_libexecdir}/%{pkgdir}/smtp-send %{_libexecdir}/%{pkgdir}/smtp-send.sh %{_libexecdir}/%{pkgdir}/uploader %{_libexecdir}/%{pkgdir}/inputcheck %{_libexecdir}/%{pkgdir}/arc-vomsac-check %{_libexecdir}/%{pkgdir}/arc-config-check %{_libexecdir}/%{pkgdir}/arc-blahp-logger %{_datadir}/%{pkgdir}/cancel-*-job %{_datadir}/%{pkgdir}/scan-*-job %{_datadir}/%{pkgdir}/submit-*-job %{_datadir}/%{pkgdir}/DGAuthplug.py* %{_datadir}/%{pkgdir}/DGBridgeDataPlugin.py* %{_datadir}/%{pkgdir}/DGLog2XML.py* %{_libdir}/%{pkgdir}/libarex.so %{_libdir}/%{pkgdir}/libarex.apd %{_datadir}/%{pkgdir}/CEinfo.pl %{_datadir}/%{pkgdir}/ARC0mod.pm %{_datadir}/%{pkgdir}/FORKmod.pm %{_datadir}/%{pkgdir}/Fork.pm %{_datadir}/%{pkgdir}/SGEmod.pm %{_datadir}/%{pkgdir}/SGE.pm %{_datadir}/%{pkgdir}/LL.pm %{_datadir}/%{pkgdir}/LSF.pm %{_datadir}/%{pkgdir}/PBS.pm %{_datadir}/%{pkgdir}/Condor.pm %{_datadir}/%{pkgdir}/SLURMmod.pm %{_datadir}/%{pkgdir}/SLURM.pm %{_datadir}/%{pkgdir}/DGBridge.pm %{_datadir}/%{pkgdir}/Boinc.pm %{_datadir}/%{pkgdir}/XmlPrinter.pm %{_datadir}/%{pkgdir}/InfosysHelper.pm %{_datadir}/%{pkgdir}/LdifPrinter.pm %{_datadir}/%{pkgdir}/GLUE2xmlPrinter.pm %{_datadir}/%{pkgdir}/GLUE2ldifPrinter.pm %{_datadir}/%{pkgdir}/NGldifPrinter.pm %{_datadir}/%{pkgdir}/ARC0ClusterInfo.pm %{_datadir}/%{pkgdir}/ARC1ClusterInfo.pm %{_datadir}/%{pkgdir}/ConfigCentral.pm %{_datadir}/%{pkgdir}/GMJobsInfo.pm %{_datadir}/%{pkgdir}/HostInfo.pm %{_datadir}/%{pkgdir}/RTEInfo.pm %{_datadir}/%{pkgdir}/InfoChecker.pm %{_datadir}/%{pkgdir}/IniParser.pm %{_datadir}/%{pkgdir}/LRMSInfo.pm %{_datadir}/%{pkgdir}/Sysinfo.pm %{_datadir}/%{pkgdir}/LogUtils.pm %{_datadir}/%{pkgdir}/condor_env.pm %{_datadir}/%{pkgdir}/cancel_common.sh %{_datadir}/%{pkgdir}/config_parser.sh %{_datadir}/%{pkgdir}/configure-*-env.sh %{_datadir}/%{pkgdir}/submit_common.sh %{_datadir}/%{pkgdir}/scan_common.sh %{_datadir}/%{pkgdir}/perferator %doc %{_mandir}/man1/cache-clean.1* %doc %{_mandir}/man1/cache-list.1* %doc %{_mandir}/man1/jura.1* %doc %{_mandir}/man8/gm-jobs.8* %doc %{_mandir}/man1/arc-config-check.1* %doc %{_mandir}/man8/arc-vomsac-check.8* %doc %{_mandir}/man8/arc-blahp-logger.8* %doc %{_mandir}/man8/a-rex-backtrace-collect.8* %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-arex %dir %{_localstatedir}/log/arc %dir %{_localstatedir}/spool/arc %dir %{_localstatedir}/spool/arc/ssm %dir %{_localstatedir}/spool/arc/urs %{_libexecdir}/%{pkgdir}/ssmsend %dir %{_datadir}/%{pkgdir}/ssm %{_datadir}/%{pkgdir}/ssm/__init__.py* %{_datadir}/%{pkgdir}/ssm/crypto.py* %{_datadir}/%{pkgdir}/ssm/ssm2.py* %{_datadir}/%{pkgdir}/ssm/brokers.py* %{_datadir}/%{pkgdir}/ssm/sender.cfg %{_sbindir}/a-rex-backtrace-collect %if %{with_acix} %files acix-core %defattr(-,root,root,-) %dir %{python_sitelib}/acix %{python_sitelib}/acix/__init__.py* %{python_sitelib}/acix/core %files acix-cache %defattr(-,root,root,-) %{python_sitelib}/acix/cacheserver %if %{use_systemd} %{_unitdir}/acix-cache.service %{_datadir}/%{pkgdir}/acix-cache-start %else %{_initrddir}/acix-cache %endif %files acix-index %defattr(-,root,root,-) %{python_sitelib}/acix/indexserver %if %{use_systemd} %{_unitdir}/acix-index.service %{_datadir}/%{pkgdir}/acix-index-start %else %{_initrddir}/acix-index %endif %endif %files devel %defattr(-,root,root,-) %doc docdir/devel/* src/hed/shc/arcpdp/*.xsd %{_includedir}/%{pkgdir} %{_libdir}/lib*.so %{_bindir}/wsdl2hed %doc %{_mandir}/man1/wsdl2hed.1* %{_bindir}/arcplugin %doc %{_mandir}/man1/arcplugin.1* %{_libdir}/pkgconfig/arcbase.pc %files plugins-needed %defattr(-,root,root,-) %dir %{_libdir}/%{pkgdir}/test %{_libdir}/%{pkgdir}/test/libaccTEST.so %{_libdir}/%{pkgdir}/libaccARC1.so %{_libdir}/%{pkgdir}/libaccBroker.so %{_libdir}/%{pkgdir}/libaccCREAM.so %{_libdir}/%{pkgdir}/libaccEMIES.so %{_libdir}/%{pkgdir}/libaccJobDescriptionParser.so %{_libdir}/%{pkgdir}/libaccSER.so %{_libdir}/%{pkgdir}/libaccldap.so %{_libdir}/%{pkgdir}/libarcshc.so %{_libdir}/%{pkgdir}/libarcshclegacy.so %{_libdir}/%{pkgdir}/libdmcfile.so %{_libdir}/%{pkgdir}/libdmchttp.so %{_libdir}/%{pkgdir}/libdmcldap.so %{_libdir}/%{pkgdir}/libdmcsrm.so %{_libdir}/%{pkgdir}/libdmcrucio.so %{_libdir}/%{pkgdir}/libdmcacix.so %{_libdir}/%{pkgdir}/libidentitymap.so %{_libdir}/%{pkgdir}/libarguspdpclient.so %{_libdir}/%{pkgdir}/libmcchttp.so %{_libdir}/%{pkgdir}/libmccmsgvalidator.so %{_libdir}/%{pkgdir}/libmccsoap.so %{_libdir}/%{pkgdir}/libmcctcp.so %{_libdir}/%{pkgdir}/libmcctls.so %{_libdir}/%{pkgdir}/libaccARC1.apd %{_libdir}/%{pkgdir}/libaccBroker.apd %{_libdir}/%{pkgdir}/libaccCREAM.apd %{_libdir}/%{pkgdir}/libaccEMIES.apd %{_libdir}/%{pkgdir}/libaccJobDescriptionParser.apd %{_libdir}/%{pkgdir}/libaccSER.apd %{_libdir}/%{pkgdir}/libaccldap.apd %{_libdir}/%{pkgdir}/test/libaccTEST.apd %{_libdir}/%{pkgdir}/libarcshc.apd %{_libdir}/%{pkgdir}/libarcshclegacy.apd %{_libdir}/%{pkgdir}/libdmcfile.apd %{_libdir}/%{pkgdir}/libdmchttp.apd %{_libdir}/%{pkgdir}/libdmcldap.apd %{_libdir}/%{pkgdir}/libdmcsrm.apd %{_libdir}/%{pkgdir}/libdmcrucio.apd %{_libdir}/%{pkgdir}/libdmcacix.apd %{_libdir}/%{pkgdir}/libidentitymap.apd %{_libdir}/%{pkgdir}/libarguspdpclient.apd %{_libdir}/%{pkgdir}/libmcchttp.apd %{_libdir}/%{pkgdir}/libmccsoap.apd %{_libdir}/%{pkgdir}/libmcctcp.apd %{_libdir}/%{pkgdir}/libmccmsgvalidator.apd %{_libdir}/%{pkgdir}/libmcctls.apd %files plugins-globus %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libaccARC0.so %{_libdir}/%{pkgdir}/libdmcgridftp.so %{_libdir}/%{pkgdir}/libaccARC0.apd %{_libdir}/%{pkgdir}/libdmcgridftp.apd %{_libdir}/libarcglobusutils.so.* %{_libexecdir}/%{pkgdir}/arc-lcas %{_libexecdir}/%{pkgdir}/arc-lcmaps %if %{with_canl} %files arcproxyalt %defattr(-,root,root,-) %{_bindir}/arcproxyalt %endif %if %{with_xrootd} %files plugins-xrootd %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libdmcxrootd.so %{_libdir}/%{pkgdir}/libdmcxrootd.apd %endif %if %{with_gfal} %files plugins-gfal %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libdmcgfal.so %{_libdir}/%{pkgdir}/libdmcgfal.apd %endif %if %{with_s3} %files plugins-s3 %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libdmcs3.so %{_libdir}/%{pkgdir}/libdmcs3.apd %endif %if %{with_python} %files -n python2-%{name} %defattr(-,root,root,-) %doc docdir/python/* %dir %{python_sitearch}/%{pkgdir} %{python_sitearch}/_arc.@PYTHON_SOABI@*so %{python_sitearch}/arc %{_libdir}/%{pkgdir}/libaccPythonBroker.so %{_libdir}/%{pkgdir}/libaccPythonBroker.apd %{_libdir}/%{pkgdir}/libpythonservice.so %{_libdir}/%{pkgdir}/libpythonservice.apd %endif %if %{?rhel}%{!?rhel:0} == 5 %files -n python26-%{name} %defattr(-,root,root,-) %{python26_sitearch}/_arc.@ALTPYTHON_SOABI@*so %{python26_sitearch}/%{pkgdir} %endif %if %{?fedora}%{!?fedora:0} >= 13 %files -n python3-%{name} %defattr(-,root,root,-) %{python3_sitearch}/_arc.@ALTPYTHON_SOABI@*so %{python3_sitearch}/%{pkgdir} %endif %if %{with_java} %files java %defattr(-,root,root,-) %doc docdir/java/* %{_libdir}/%{pkgdir}/libjarc.so %{_jnidir}/arc.jar %{_libdir}/%{pkgdir}/libjavaservice.so %{_libdir}/%{pkgdir}/libjavaservice.apd %if %{with_gcj} %{_libdir}/gcj/%{name} %endif %endif %files gridmap-utils %defattr(-,root,root,-) %{_sbindir}/nordugridmap %config(noreplace) %{_sysconfdir}/cron.d/nordugridmap %doc src/utils/gridmap/nordugridmap.conf %doc %{_mandir}/man8/nordugridmap.8* %if %{with_cautils} %files ca-utils %defattr(-,root,root,-) %endif %files misc-utils %defattr(-,root,root,-) %{_bindir}/arcemiestest %{_bindir}/arcwsrf %{_bindir}/arcperftest %if %{with_xmlsec1} %{_bindir}/saml_assertion_init %doc %{_mandir}/man1/saml_assertion_init.1* %endif %doc %{_mandir}/man1/arcemiestest.1* %doc %{_mandir}/man1/arcwsrf.1* %doc %{_mandir}/man1/arcperftest.1* %changelog * Wed Dec 13 2017 Anders Waananen - 5.4.2-1 - 5.4.2 Final Release * Thu Oct 12 2017 Anders Waananen - 5.4.1-1 - 5.4.1 Final Release * Mon Sep 18 2017 Anders Waananen - 5.4.0-1 - 5.4.0 Final Release * Wed Jul 05 2017 Anders Waananen - 5.3.2-1 - 5.3.2 Final Release * Tue May 30 2017 Anders Waananen - 5.3.1-1 - 5.3.1 Final Release * Thu Apr 06 2017 Anders Waananen - 5.3.0-1 - 5.3.0 Final Release * Tue Feb 07 2017 Anders Waananen - 5.2.2-1 - 5.2.2 Final Release * Thu Dec 15 2016 Anders Waananen - 5.2.1-1 - 5.2.1 Final Release * Tue Oct 25 2016 Anders Waananen - 5.2.0-1 - 5.2.0 Final Release * Wed Aug 24 2016 Anders Waananen - 5.1.3-1 - 5.1.3 Final Release * Fri Jul 01 2016 Anders Waananen - 5.1.2-1 - 5.1.2 Final Release * Wed May 25 2016 Anders Waananen - 5.1.1-1 - 5.1.1 Final Release * Thu May 19 2016 Anders Waananen - 5.1.0-1 - 5.1.0 Final Release * Fri Jan 08 2016 Anders Waananen - 5.0.5-1 - 5.0.5 Final Release * Thu Nov 19 2015 Anders Waananen - 5.0.4-1 - 5.0.4 Final Release * Sun Sep 27 2015 Anders Waananen - 5.0.3-1 - 5.0.3 Final Release * Mon Jun 29 2015 Anders Waananen - 5.0.2-1 - 5.0.2 Final Release * Thu Jun 25 2015 Anders Waananen - 5.0.1-1 - 5.0.1 Final Release * Fri Mar 27 2015 Anders Waananen - 5.0.0-1 - 5.0.0 Final Release * Thu Mar 19 2015 Anders Waananen - 5.0.0-0.rc5 - 5.0.0 Release candidate 5 * Wed Feb 18 2015 Anders Waananen - 5.0.0-0.rc4 - 5.0.0 Release candidate 4 * Fri Feb 06 2015 Anders Waananen - 5.0.0-0.rc3 - 5.0.0 Release candidate 3 * Thu Feb 05 2015 Anders Waananen - 5.0.0-0.rc2 - 5.0.0 Release candidate 2 * Thu Jan 08 2015 Anders Waananen - 5.0.0-0.rc1 - 5.0.0 Release candidate 1 * Thu Aug 14 2014 Anders Waananen - 4.2.0-1 - 4.2.0 Final release * Tue Apr 29 2014 Anders Waananen - 4.1.0-1 - 4.1.0 Final release * Mon Mar 24 2014 Anders Waananen - 4.1.0-0.rc1 - 4.1.0 Release candidate 1 * Wed Nov 27 2013 Anders Waananen - 4.0.0-1 - 4.0.0 Final Release * Wed Nov 06 2013 Anders Waananen - 4.0.0-0.rc2 - 4.0.0 release candidate 2 * Wed Oct 23 2013 Anders Waananen - 4.0.0-0.rc1 - 4.0.0 release candidate 1 * Fri Jul 19 2013 Anders Waananen - 3.0.3-1 - 3.0.3 Final Release * Wed Jun 12 2013 Anders Waananen - 3.0.2-1 - 3.0.2 Final Release * Mon Apr 29 2013 Anders Waananen - 3.0.1-1 - 3.0.1 Final Release * Fri Apr 12 2013 Anders Waananen - 3.0.1-0.rc2 - 3.0.1 release candidate 2 * Fri Apr 12 2013 Anders Waananen - 3.0.1-0.rc1 - 3.0.1 release candidate 1 * Fri Feb 22 2013 Anders Waananen - 3.0.0-1 - 3.0.0 Final Release * Wed Feb 20 2013 Anders Waananen - 3.0.0-0.rc6 - 3.0.0 release candidate 6 * Wed Feb 06 2013 Anders Waananen - 3.0.0-0.rc5 - 3.0.0 release candidate 5 * Sat Feb 02 2013 Anders Waananen - 3.0.0-0.rc4 - 3.0.0 release candidate 4 * Wed Jan 30 2013 Anders Waananen - 3.0.0-0.rc3 - 3.0.0 release candidate 3 * Mon Jan 28 2013 Anders Waananen - 3.0.0-0.rc2 - 3.0.0 release candidate 2 * Thu Dec 06 2012 Anders Waananen - 3.0.0-0.rc1 - 3.0.0 release candidate 1 * Thu Nov 08 2012 Anders Waananen - 2.0.1-1 - 2.0.1 Final Release * Thu Oct 25 2012 Anders Waananen - 2.0.1-0.rc2 - 2.0.1 release candidate 2 * Fri Aug 24 2012 Anders Waananen - 2.0.1-0.rc1 - 2.0.1 release candidate 1 * Wed May 23 2012 Mattias Ellert - 2.0.0-1 - 2.0.0 Final Release * Wed Apr 11 2012 Mattias Ellert - 2.0.0-0.rc4.1 - 2.0.0 release candidate 4.1 * Mon Apr 02 2012 Mattias Ellert - 2.0.0-0.rc4 - 2.0.0 release candidate 4 * Thu Mar 29 2012 Mattias Ellert - 2.0.0-0.rc3.2 - 2.0.0 release candidate 3.2 * Tue Mar 27 2012 Mattias Ellert - 2.0.0-0.rc3.1 - 2.0.0 release candidate 3.1 * Mon Mar 05 2012 Mattias Ellert - 2.0.0-0.rc3 - 2.0.0 release candidate 3 * Wed Feb 15 2012 Mattias Ellert - 2.0.0-0.rc2 - 2.0.0 release candidate 2 * Wed Feb 15 2012 Mattias Ellert - 1.1.1-1 - 1.1.1 Final Release * Mon Oct 03 2011 Mattias Ellert - 1.1.0-1 - 1.1.0 Final Release * Sun Sep 25 2011 Mattias Ellert - 1.1.0-0.rc2 - 1.1.0 release candidate 2 * Sun Sep 11 2011 Mattias Ellert - 1.1.0-0.rc1 - 1.1.0 release candidate 1 * Sat Jul 23 2011 Mattias Ellert - 1.0.1-1 - 1.0.1 Final Release * Tue Jul 19 2011 Mattias Ellert - 1.0.1-0.rc4 - 1.0.1 release candidate 4 * Sat Jun 18 2011 Mattias Ellert - 1.0.1-0.rc1 - 1.0.1 release candidate 1 * Mon Apr 18 2011 Mattias Ellert - 1.0.0-1 - 1.0.0 Final Release * Wed Apr 06 2011 Mattias Ellert - 1.0.0-0.b5 - 1.0.0 beta 5 * Wed Mar 23 2011 Mattias Ellert - 1.0.0-0.b4 - 1.0.0 beta 4 * Thu Mar 10 2011 Mattias Ellert - 1.0.0-0.b3 - 1.0.0 beta 3 * Mon Mar 07 2011 Mattias Ellert - 1.0.0-0.b2 - 1.0.0 Beta 2 * Mon Feb 14 2011 Mattias Ellert - 1.0.0-0.b1 - rename nordugrid-arc-nox → nordugrid-arc - 1.0.0 Beta 1 * Tue Dec 21 2010 Mattias Ellert - 1.2.1-1 - 1.2.1 Final Release * Tue Dec 21 2010 Mattias Ellert - 1.2.1-0.rc2 - 1.2.1 Release Candidate 2 * Wed Dec 08 2010 Mattias Ellert - 1.2.1-0.rc1 - 1.2.1 Release Candidate 1 * Fri Oct 22 2010 Mattias Ellert - 1.2.0-1 - 1.2.0 Final Release * Thu Sep 30 2010 Mattias Ellert - 1.2.0-0.rc2 - 1.2.0 Release Candidate 2 * Mon Sep 13 2010 Mattias Ellert - 1.2.0-0.rc1 - 1.2.0 Release Candidate 1 * Wed May 05 2010 Mattias Ellert - 1.1.0-1 - 1.1.0 Final Release * Mon Mar 08 2010 Mattias Ellert - 1.1.0-0.rc6 - 1.1.0 Release Candidate 6 * Fri Feb 26 2010 Mattias Ellert - 1.1.0-0.rc5.1 - Rebuild for Globus Toolkit 5 * Fri Feb 26 2010 Mattias Ellert - 1.1.0-0.rc5 - 1.1.0 Release Candidate 5 * Wed Feb 24 2010 Mattias Ellert - 1.1.0-0.rc4 - 1.1.0 Release Candidate 4 * Mon Feb 22 2010 Mattias Ellert - 1.1.0-0.rc3 - 1.1.0 Release Candidate 3 * Mon Feb 15 2010 Mattias Ellert - 1.1.0-0.rc2 - 1.1.0 Release Candidate 2 * Thu Feb 11 2010 Mattias Ellert - 1.1.0-0.rc1 - 1.1.0 Release Candidate 1 * Sun Nov 29 2009 Mattias Ellert - 1.0.0-1 - 1.0.0 Final Release * Thu Nov 19 2009 Mattias Ellert - 1.0.0-0.rc7 - 1.0.0 Release Candidate 7 * Thu Nov 12 2009 Mattias Ellert - 1.0.0-0.rc6 - 1.0.0 Release Candidate 6 * Wed Nov 04 2009 Mattias Ellert - 1.0.0-0.rc5 - rename nordugrid-arc1 → nordugrid-arc-nox - 1.0.0 Release candidate 5 * Mon Oct 26 2009 Mattias Ellert - 0.9.4-0.rc4 - 0.9.4 Release candidate 4 * Thu Oct 22 2009 Mattias Ellert - 0.9.4-0.rc3 - 0.9.4 Release candidate 3 * Wed Oct 14 2009 Mattias Ellert - 0.9.4-0.rc2 - 0.9.4 Release candidate 2 * Sun Sep 27 2009 Mattias Ellert - 0.9.3-1 - 0.9.3 Final release * Thu Sep 17 2009 Mattias Ellert - 0.9.3-0.rc3 - 0.9.3 Release candidate 3 * Wed Jan 14 2009 Anders Wäänänen - 0.9.2-1 - 0.9.2 * Fri Oct 10 2008 Anders Wäänänen - 0.9.1-1 - Initial build nordugrid-arc-5.4.2/PaxHeaders.7502/nsis0000644000000000000000000000013213214316034016070 xustar000000000000000030 mtime=1513200668.713854059 30 atime=1513200668.715854084 30 ctime=1513200668.713854059 nordugrid-arc-5.4.2/nsis/0000755000175000002070000000000013214316034016213 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/nsis/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711304206670020207 xustar000000000000000027 mtime=1259408824.547861 30 atime=1513200592.358920209 30 ctime=1513200668.705853961 nordugrid-arc-5.4.2/nsis/Makefile.am0000644000175000002070000000010511304206670020245 0ustar00mockbuildmock00000000000000EXTRA_DIST = arc.nsis EnvVarUpdate.nsh license.txt README.txt vomses nordugrid-arc-5.4.2/nsis/PaxHeaders.7502/README.txt0000644000000000000000000000012411304206670017646 xustar000000000000000027 mtime=1259408824.547861 27 atime=1513200573.748692 30 ctime=1513200668.712854047 nordugrid-arc-5.4.2/nsis/README.txt0000644000175000002070000000225611304206670017720 0ustar00mockbuildmock00000000000000You need to do these steps after the ARC installation ----------------------------------------------------- Install Grid certificate and private key: On Windows XP: Copy your usercert.pem file to: C:\Documents and Settings\\.globus\usercert.pem Copy your userkey.pem file to: C:\Documents and Settings\\.globus\userkey.pem On Windows Vista, 7: Copy your usercert.pem file to: C:\Users\\.globus\usercert.pem Copy your userkey.pem file to: C:\Users\\.globus\userkey.pem Other information ----------------- 1. Default client.conf, jobs.xml directory location (these directories are hidden by default): On Windows XP: C:\Documents and Settings\\Application Data\.arc On Windows Vista, 7: C:\Users\\AppData\Roaming\.arc 2. The .globus directory creation can be problematic because the Explorer of the Windows does not like the "." at the beginning of the directory name. Solution for this: cmd (command line interface of Windows) mkdir "C:\Documents and Settings\\.globus" or mkdir "C:\Users\\.globus" nordugrid-arc-5.4.2/nsis/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720020213 xustar000000000000000030 mtime=1513200592.388920576 30 atime=1513200653.058662591 30 ctime=1513200668.707853986 nordugrid-arc-5.4.2/nsis/Makefile.in0000644000175000002070000003762113214315720020272 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = nsis DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arc.nsis.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc.nsis CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ EXTRA_DIST = arc.nsis EnvVarUpdate.nsh license.txt README.txt vomses all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign nsis/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign nsis/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc.nsis: $(top_builddir)/config.status $(srcdir)/arc.nsis.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/nsis/PaxHeaders.7502/license.txt0000644000000000000000000000012411304206670020333 xustar000000000000000027 mtime=1259408824.547861 27 atime=1513200573.743692 30 ctime=1513200668.711854035 nordugrid-arc-5.4.2/nsis/license.txt0000644000175000002070000002613611304206670020410 0ustar00mockbuildmock00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. nordugrid-arc-5.4.2/nsis/PaxHeaders.7502/arc.nsis0000644000000000000000000000013113214316015017606 xustar000000000000000029 mtime=1513200653.08066286 30 atime=1513200658.445728476 30 ctime=1513200668.709854011 nordugrid-arc-5.4.2/nsis/arc.nsis0000644000175000002070000005561413214316015017670 0ustar00mockbuildmock00000000000000Name "NorduGrid ARC" RequestExecutionLevel admin #NOTE: You still need to check user rights with UserInfo! !include LogicLib.nsh Function .onInit UserInfo::GetAccountType pop $0 ${If} $0 != "admin" ;Require admin rights on NT4+ MessageBox mb_iconstop "Administrator rights required!" SetErrorLevel 740 ;ERROR_ELEVATION_REQUIRED Quit ${EndIf} FunctionEnd OutFile "nordugrid-arc-5.4.2-1.exe" # Fix lnk removals RequestExecutionLevel user !define COMPANYNAME "NorduGrid" InstallDir "$PROGRAMFILES\${COMPANYNAME}" InstallDirRegKey HKLM SOFTWARE\${COMPANYNAME}\ARC "Install_Dir" ShowInstDetails hide ShowUninstDetails hide XPStyle on !include EnvVarUpdate.nsh ; include for some of the windows messages defines !include "winmessages.nsh" ; HKLM (all users) vs HKCU (current user) defines !define env_hklm 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' !define env_hkcu 'HKCU "Environment"' !define USE_GUI #!define USE_PYTHON #!define USE_GLOBUS !ifndef SYS_PATH !define SYS_PATH "/usr" !endif !ifndef ARC_LOCATION !define ARC_LOCATION "/opt/arc" !endif !ifndef GLOBUS_LOCATION !define GLOBUS_LOCATION "/opt/globus" !endif !define CERT_PATH "/etc/grid-security/certificates" !define MISC_PATH "." !define PYVER 2.6 PageEx license LicenseData ${MISC_PATH}/license.txt LicenseForceSelection checkbox PageExEnd Page components Page directory Page instfiles UninstPage uninstConfirm UninstPage instfiles ComponentText "Select which optional components you want to install." DirText "Please select the installation folder." Section "NorduGrid ARC clients" SectionIn RO SetOutPath "$INSTDIR" File "${MISC_PATH}/README.txt" SetOutPath "$INSTDIR\share\arc\examples" File "${ARC_LOCATION}/share/arc/examples/client.conf" SetOutPath "$INSTDIR\bin" File "${ARC_LOCATION}/bin/arccat.exe" File "${ARC_LOCATION}/bin/arcclean.exe" File "${ARC_LOCATION}/bin/arccp.exe" File "${ARC_LOCATION}/bin/arcecho.exe" File "${ARC_LOCATION}/bin/arcget.exe" File "${ARC_LOCATION}/bin/arcinfo.exe" File "${ARC_LOCATION}/bin/arckill.exe" File "${ARC_LOCATION}/bin/arcls.exe" File "${ARC_LOCATION}/bin/arcmkdir.exe" File "${ARC_LOCATION}/bin/arcproxy.exe" File "${ARC_LOCATION}/bin/arcproxyalt.exe" File "${ARC_LOCATION}/bin/arcrename.exe" File "${ARC_LOCATION}/bin/arcrenew.exe" File "${ARC_LOCATION}/bin/arcresub.exe" File "${ARC_LOCATION}/bin/arcresume.exe" File "${ARC_LOCATION}/bin/arcrm.exe" File "${ARC_LOCATION}/bin/arcstat.exe" File "${ARC_LOCATION}/bin/arcsub.exe" File "${ARC_LOCATION}/bin/arcsync.exe" File "${ARC_LOCATION}/bin/arctest.exe" # File "${ARC_LOCATION}/bin/arcwsrf.exe" # File "${ARC_LOCATION}/bin/arcemiestest.exe" File "${ARC_LOCATION}/bin/libarccommunication-3.dll" File "${ARC_LOCATION}/bin/libarccompute-3.dll" File "${ARC_LOCATION}/bin/libarccommon-3.dll" File "${ARC_LOCATION}/bin/libarccredential-3.dll" File "${ARC_LOCATION}/bin/libarccredentialstore-3.dll" File "${ARC_LOCATION}/bin/libarccrypto-3.dll" File "${ARC_LOCATION}/bin/libarcdata-3.dll" File "${ARC_LOCATION}/bin/libarcdatastaging-3.dll" File "${ARC_LOCATION}/bin/libarcloader-3.dll" File "${ARC_LOCATION}/bin/libarcmessage-3.dll" File "${ARC_LOCATION}/bin/libarcsecurity-3.dll" File "${ARC_LOCATION}/bin/libarcinfosys-0.dll" File "${ARC_LOCATION}/bin/libarcws-3.dll" File "${ARC_LOCATION}/bin/libarcwssecurity-3.dll" File "${ARC_LOCATION}/bin/libarcxmlsec-3.dll" !ifdef USE_GLOBUS File "${ARC_LOCATION}/bin/libarcglobusutils-2.dll" !endif ; File "${ARC_LOCATION}/bin/perftest.exe" ; File "${ARC_LOCATION}/bin/saml_assertion_init.exe" # Needed by libcanl_c++-1.dll File "${SYS_PATH}/bin/libwinpthread-1.dll" # Needed by arcproxyalt.exe File "${SYS_PATH}/bin/libcanl_c++-1.dll" # Needed by libcanl_c++-1.dll File "${SYS_PATH}/bin/libdb_cxx-5.3.dll" File "${SYS_PATH}/bin/libcrypto-10.dll" File "${SYS_PATH}/bin/libgcc_s_sjlj-1.dll" File "${SYS_PATH}/bin/libglib-2.0-0.dll" File "${SYS_PATH}/bin/libglibmm-2.4-1.dll" File "${SYS_PATH}/bin/libgmodule-2.0-0.dll" File "${SYS_PATH}/bin/libgnurx-0.dll" File "${SYS_PATH}/bin/libgobject-2.0-0.dll" File "${SYS_PATH}/bin/libgthread-2.0-0.dll" File "${SYS_PATH}/bin/iconv.dll" File "${SYS_PATH}/bin/libintl-8.dll" File "${SYS_PATH}/bin/libgio-2.0-0.dll" File "${SYS_PATH}/bin/libgiomm-2.4-1.dll" File "${SYS_PATH}/bin/libsigc-2.0-0.dll" File "${SYS_PATH}/bin/libssl-10.dll" File "${SYS_PATH}/bin/libstdc++-6.dll" File "${SYS_PATH}/bin/libxml2-2.dll" File "${SYS_PATH}/bin/libxmlsec1.dll" File "${SYS_PATH}/bin/libxmlsec1-openssl.dll" File "${SYS_PATH}/bin/libxslt-1.dll" File "${SYS_PATH}/bin/zlib1.dll" File "${SYS_PATH}/bin/libffi-6.dll" File "${SYS_PATH}/bin/libltdl-7.dll" File "${SYS_PATH}/bin/openssl.exe" !ifdef USE_GUI File "${ARC_LOCATION}/bin/arccert-ui.exe" File "${ARC_LOCATION}/bin/arcproxy-ui.exe" File "${ARC_LOCATION}/bin/arcstat-ui.exe" File "${ARC_LOCATION}/bin/arcstorage-ui.exe" File "${ARC_LOCATION}/bin/arcsub-ui.exe" File "${SYS_PATH}/bin/QtCore4.dll" File "${SYS_PATH}/bin/QtGui4.dll" File "${SYS_PATH}/bin/QtNetwork4.dll" File "${SYS_PATH}/bin/QtWebKit4.dll" File "${SYS_PATH}/bin/libpng16-16.dll" File "${SYS_PATH}/bin/libsqlite3-0.dll" !endif ; Python !ifdef USE_PYTHON File "${SYS_PATH}/bin/python.exe" File "${SYS_PATH}/bin/python${PYVER}.exe" File "${SYS_PATH}/bin/libpython${PYVER}.dll" File "${SYS_PATH}/bin/pydoc" File "${SYS_PATH}/bin/readline.dll" File "${SYS_PATH}/bin/libtermcap-0.dll" File "${SYS_PATH}/bin/libdb-4.7.dll" SetOutPath "$INSTDIR\lib\python${PYVER}" File /r "${SYS_PATH}/lib/python${PYVER}/**" SetOutPath "$INSTDIR\lib\python${PYVER}\site-packages" File /oname=_arc.pyd "${SYS_PATH}/lib/python${PYVER}/site-packages/_arc.dll" !endif ; Globus ARC dependencies !ifdef USE_GLOBUS SetOutPath "$INSTDIR\bin" File "${GLOBUS_LOCATION}/bin/libglobus_common-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_callback-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_cert_utils-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_credential-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_proxy_core-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_sysconfig-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gssapi_error-2.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gssapi_gsi-4.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gss_assist-3.dll" File "${GLOBUS_LOCATION}/bin/libglobus_io-3.dll" File "${GLOBUS_LOCATION}/bin/libglobus_openssl-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_openssl_error-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_proxy_ssl-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_xio-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_ftp_control-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_callback-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_credential-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_proxy_core-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gssapi_gsi-4.dll" File "${GLOBUS_LOCATION}/bin/libglobus_oldgaa-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_callout-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_ftp_client-2.dll" File "${GLOBUS_LOCATION}/bin/libglobus_ftp_control-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gssapi_error-2.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gss_assist-3.dll" File "${GLOBUS_LOCATION}/bin/libglobus_io-3.dll" ; Globus plugins SetOutPath "$INSTDIR\lib" File "${GLOBUS_LOCATION}/lib/libglobus_xio_gsi_driver-0.dll" File "${GLOBUS_LOCATION}/lib/libglobus_xio_popen_driver-0.dll" !endif SetOutPath "$INSTDIR\lib\arc" File "${ARC_LOCATION}/lib/arc/libaccARC1.dll" File "${ARC_LOCATION}/lib/arc/libaccBroker.dll" File "${ARC_LOCATION}/lib/arc/libaccCREAM.dll" File "${ARC_LOCATION}/lib/arc/libaccEMIES.dll" File "${ARC_LOCATION}/lib/arc/libaccSER.dll" File "${ARC_LOCATION}/lib/arc/libaccldap.dll" ; File "${ARC_LOCATION}/lib/arc/libaccUNICORE.dll" File "${ARC_LOCATION}/lib/arc/libaccJobDescriptionParser.dll" !ifdef USE_PYTHON File "${ARC_LOCATION}/lib/arc/libaccPythonBroker.dll" !endif File "${ARC_LOCATION}/lib/arc/libarcshc.dll" File "${ARC_LOCATION}/lib/arc/libarcshclegacy.dll" File "${ARC_LOCATION}/lib/arc/libdmcacix.dll" File "${ARC_LOCATION}/lib/arc/libdmcfile.dll" File "${ARC_LOCATION}/lib/arc/libdmchttp.dll" File "${ARC_LOCATION}/lib/arc/libdmcrucio.dll" File "${ARC_LOCATION}/lib/arc/libdmcsrm.dll" File "${ARC_LOCATION}/lib/arc/libidentitymap.dll" File "${ARC_LOCATION}/lib/arc/libmcchttp.dll" File "${ARC_LOCATION}/lib/arc/libmccmsgvalidator.dll" File "${ARC_LOCATION}/lib/arc/libmccsoap.dll" File "${ARC_LOCATION}/lib/arc/libmcctcp.dll" File "${ARC_LOCATION}/lib/arc/libmcctls.dll" File "${ARC_LOCATION}/lib/arc/libmodcrypto.dll" File "${ARC_LOCATION}/lib/arc/libmodcredential.dll" ; Extra stuff now we have Globus !ifdef USE_GLOBUS File "${ARC_LOCATION}/lib/arc/libaccARC0.dll" File "${ARC_LOCATION}/lib/arc/libdmcgridftp.dll" File "${ARC_LOCATION}/lib/arc/libdmcldap.dll" !endif ; Testing stuff SetOutPath "$INSTDIR\lib\arc\test" File "${ARC_LOCATION}/lib/arc/test/libaccTEST.dll" ;; Fix for bug #1563 ; SetOutPath "$INSTDIR\lib" ; File /oname=libarccrypto.dll "${ARC_LOCATION}/bin/libarccrypto-0.dll" SetOutPath "$INSTDIR\etc\arc" File "${ARC_LOCATION}/etc/arc/client.conf" SetOutPath "$INSTDIR\etc\grid-security" File "${MISC_PATH}/vomses" SectionEnd Section "NSS Utilities" SetOutPath "$INSTDIR\bin" ; NSPR File "${SYS_PATH}/bin/libnspr4.dll" File "${SYS_PATH}/bin/libplc4.dll" File "${SYS_PATH}/bin/libplds4.dll" ; NSS File "${SYS_PATH}/bin/freebl3.dll" File "${SYS_PATH}/bin/nss3.dll" File "${SYS_PATH}/bin/nssckbi.dll" File "${SYS_PATH}/bin/nssdbm3.dll" File "${SYS_PATH}/bin/nssutil3.dll" File "${SYS_PATH}/bin/smime3.dll" File "${SYS_PATH}/bin/softokn3.dll" File "${SYS_PATH}/bin/ssl3.dll" File "${SYS_PATH}/bin/certutil.exe" File "${SYS_PATH}/bin/cmsutil.exe" File "${SYS_PATH}/bin/crlutil.exe" File "${SYS_PATH}/bin/modutil.exe" File "${SYS_PATH}/bin/pk12util.exe" File "${SYS_PATH}/bin/signtool.exe" File "${SYS_PATH}/bin/signver.exe" File "${SYS_PATH}/bin/ssltap.exe" SectionEnd Section "IGTF Certificates" SetOutPath "$INSTDIR\etc\grid-security\certificates" File "${CERT_PATH}/*.signing_policy" File "${CERT_PATH}/*.info" File "${CERT_PATH}/*.namespaces" File "${CERT_PATH}/*.crl_url" File "${CERT_PATH}/*.0" SectionEnd Section "Uninstall" SetShellVarContext all Delete /rebootok "$INSTDIR\Uninstall NorduGrid ARC.exe" Delete "$SMPROGRAMS\${COMPANYNAME}\Uninstall NorduGrid ARC.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Job Manager.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Certificate Utility.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Proxy Generator.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Job Submission Tool.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Storage Explorer.lnk" rmDir "$SMPROGRAMS\${COMPANYNAME}" Delete /rebootok "$INSTDIR\lib\arc\test\libaccTEST.dll" RMDir "$INSTDIR\lib\arc\test" Delete /rebootok "$INSTDIR\lib\arc\libmodcrypto.dll" Delete /rebootok "$INSTDIR\lib\arc\libmodcredential.dll" Delete /rebootok "$INSTDIR\lib\arc\libmcctls.dll" Delete /rebootok "$INSTDIR\lib\arc\libmcctcp.dll" Delete /rebootok "$INSTDIR\lib\arc\libmccsoap.dll" Delete /rebootok "$INSTDIR\lib\arc\libmccmsgvalidator.dll" Delete /rebootok "$INSTDIR\lib\arc\libmcchttp.dll" Delete /rebootok "$INSTDIR\lib\arc\libidentitymap.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcsrm.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcrucio.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmchttp.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcfile.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcacix.dll" Delete /rebootok "$INSTDIR\lib\arc\libarcshc.dll" Delete /rebootok "$INSTDIR\lib\arc\libarcshclegacy.dll" !ifdef USE_PYTHON Delete /rebootok "$INSTDIR\lib\arc\libaccPythonBroker.dll" !endif Delete /rebootok "$INSTDIR\lib\arc\libaccJobDescriptionParser.dll" ; Delete /rebootok "$INSTDIR\lib\arc\libaccUNICORE.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccldap.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccSER.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccEMIES.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccCREAM.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccBroker.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccARC1.dll" !ifdef USE_GLOBUS Delete /rebootok "$INSTDIR\lib\arc\libaccARC0.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcldap.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcgridftp.dll" !endif !ifdef USE_PYTHON Delete /rebootok "$INSTDIR\lib\arc\libpythonservice.dll" !endif Delete /rebootok "$INSTDIR\lib\arc\libecho.dll" Delete /rebootok "$INSTDIR\lib\arc\libslcs.dll" RMDir "$INSTDIR\lib\arc" ; Delete /rebootok "$INSTDIR\lib\libarccrypto.dll" RMDir "$INSTDIR\lib" !ifdef USE_PYTHON RmDir /r "$INSTDIR\lib\python${PYVER}" Delete /rebootok "$INSTDIR\bin\libdb-4.7.dll" Delete /rebootok "$INSTDIR\bin\libtermcap-0.dll" Delete /rebootok "$INSTDIR\bin\readline.dll" Delete /rebootok "$INSTDIR\bin\pydoc" Delete /rebootok "$INSTDIR\bin\libpython${PYVER}.dll" Delete /rebootok "$INSTDIR\bin\python${PYVER}.exe" Delete /rebootok "$INSTDIR\bin\python.exe" !endif !ifdef USE_GUI Delete /rebootok "$INSTDIR\bin\libsqlite3-0.dll" Delete /rebootok "$INSTDIR\bin\libpng16-16.dll" Delete /rebootok "$INSTDIR\bin\QtWebKit4.dll" Delete /rebootok "$INSTDIR\bin\QtNetwork4.dll" Delete /rebootok "$INSTDIR\bin\QtGui4.dll" Delete /rebootok "$INSTDIR\bin\QtCore4.dll" Delete /rebootok "$INSTDIR\bin\arcsub-ui.exe" Delete /rebootok "$INSTDIR\bin\arcstorage-ui.exe" Delete /rebootok "$INSTDIR\bin\arcstat-ui.exe" Delete /rebootok "$INSTDIR\bin\arcproxy-ui.exe" Delete /rebootok "$INSTDIR\bin\arccert-ui.exe" !endif Delete /rebootok "$INSTDIR\bin\openssl.exe" Delete /rebootok "$INSTDIR\bin\libltdl-7.dll" Delete /rebootok "$INSTDIR\bin\libffi-6.dll" Delete /rebootok "$INSTDIR\bin\zlib1.dll" Delete /rebootok "$INSTDIR\bin\libxslt-1.dll" Delete /rebootok "$INSTDIR\bin\libxmlsec1-openssl.dll" Delete /rebootok "$INSTDIR\bin\libxmlsec1.dll" Delete /rebootok "$INSTDIR\bin\libxml2-2.dll" Delete /rebootok "$INSTDIR\bin\libssl-10.dll" Delete /rebootok "$INSTDIR\bin\libgiomm-2.4-1.dll" Delete /rebootok "$INSTDIR\bin\libgio-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libstdc++-6.dll" Delete /rebootok "$INSTDIR\bin\libsigc-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libintl-8.dll" Delete /rebootok "$INSTDIR\bin\iconv.dll" Delete /rebootok "$INSTDIR\bin\libgthread-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libgobject-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libgnurx-0.dll" Delete /rebootok "$INSTDIR\bin\libgmodule-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libglibmm-2.4-1.dll" Delete /rebootok "$INSTDIR\bin\libglib-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libgcc_s_sjlj-1.dll" Delete /rebootok "$INSTDIR\bin\libcrypto-10.dll" Delete /rebootok "$INSTDIR\bin\libwinpthread-1.dll" Delete /rebootok "$INSTDIR\bin\libdb_cxx-5.3.dll" Delete /rebootok "$INSTDIR\bin\libcanl_c++-1.dll" ; Delete /rebootok "$INSTDIR\bin\saml_assertion_init.exe" ; Delete /rebootok "$INSTDIR\bin\perftest.exe" !ifdef USE_GLOBUS Delete /rebootok "$INSTDIR\bin\libarcglobusutils-2.dll" !endif Delete /rebootok "$INSTDIR\bin\libarcxmlsec-3.dll" Delete /rebootok "$INSTDIR\bin\libarcwssecurity-3.dll" Delete /rebootok "$INSTDIR\bin\libarcws-3.dll" Delete /rebootok "$INSTDIR\bin\libarcinfosys-0.dll" Delete /rebootok "$INSTDIR\bin\libarcsecurity-3.dll" Delete /rebootok "$INSTDIR\bin\libarcmessage-3.dll" Delete /rebootok "$INSTDIR\bin\libarcloader-3.dll" Delete /rebootok "$INSTDIR\bin\libarcdatastaging-3.dll" Delete /rebootok "$INSTDIR\bin\libarcdata-3.dll" Delete /rebootok "$INSTDIR\bin\libarccredential-3.dll" Delete /rebootok "$INSTDIR\bin\libarccredentialstore-3.dll" Delete /rebootok "$INSTDIR\bin\libarccommon-3.dll" Delete /rebootok "$INSTDIR\bin\libarccompute-3.dll" Delete /rebootok "$INSTDIR\bin\libarccommunication-3.dll" Delete /rebootok "$INSTDIR\bin\libarccrypto-3.dll" # Delete /rebootok "$INSTDIR\bin\arcemiestest.exe" # Delete /rebootok "$INSTDIR\bin\arcwsrf.exe" Delete /rebootok "$INSTDIR\bin\arctest.exe" Delete /rebootok "$INSTDIR\bin\arcsync.exe" Delete /rebootok "$INSTDIR\bin\arcsub.exe" Delete /rebootok "$INSTDIR\bin\arcstat.exe" Delete /rebootok "$INSTDIR\bin\arcrm.exe" Delete /rebootok "$INSTDIR\bin\arcresume.exe" Delete /rebootok "$INSTDIR\bin\arcresub.exe" Delete /rebootok "$INSTDIR\bin\arcrenew.exe" Delete /rebootok "$INSTDIR\bin\arcrename.exe" Delete /rebootok "$INSTDIR\bin\arcproxyalt.exe" Delete /rebootok "$INSTDIR\bin\arcproxy.exe" Delete /rebootok "$INSTDIR\bin\arcmkdir.exe" Delete /rebootok "$INSTDIR\bin\arcls.exe" Delete /rebootok "$INSTDIR\bin\arckill.exe" Delete /rebootok "$INSTDIR\bin\arcinfo.exe" Delete /rebootok "$INSTDIR\bin\arcget.exe" Delete /rebootok "$INSTDIR\bin\arcecho.exe" Delete /rebootok "$INSTDIR\bin\arccp.exe" Delete /rebootok "$INSTDIR\bin\arcclean.exe" Delete /rebootok "$INSTDIR\bin\arccat.exe" ; NSS Delete /rebootok "$INSTDIR\bin\ssl3.dll" Delete /rebootok "$INSTDIR\bin\softokn3.dll" Delete /rebootok "$INSTDIR\bin\smime3.dll" Delete /rebootok "$INSTDIR\bin\nssutil3.dll" Delete /rebootok "$INSTDIR\bin\nssdbm3.dll" Delete /rebootok "$INSTDIR\bin\nssckbi.dll" Delete /rebootok "$INSTDIR\bin\nss3.dll" Delete /rebootok "$INSTDIR\bin\freebl3.dll" ; NSPR Delete /rebootok "$INSTDIR\bin\libplds4.dll" Delete /rebootok "$INSTDIR\bin\libplc4.dll" Delete /rebootok "$INSTDIR\bin\libnspr4.dll" Delete /rebootok "$INSTDIR\bin\/ssltap.exe" Delete /rebootok "$INSTDIR\bin\/signver.exe" Delete /rebootok "$INSTDIR\bin\/signtool.exe" Delete /rebootok "$INSTDIR\bin\/pk12util.exe" Delete /rebootok "$INSTDIR\bin\/modutil.exe" Delete /rebootok "$INSTDIR\bin\/crlutil.exe" Delete /rebootok "$INSTDIR\bin\/cmsutil.exe" Delete /rebootok "$INSTDIR\bin\/certutil.exe" !ifdef USE_GLOBUS Delete /rebootok "$INSTDIR\bin\libglobus_common-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_ftp_control-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_callback-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_cert_utils-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_credential-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_proxy_core-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_sysconfig-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gssapi_error-2.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gssapi_gsi-4.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gss_assist-3.dll" Delete /rebootok "$INSTDIR\bin\libglobus_io-3.dll" Delete /rebootok "$INSTDIR\bin\libglobus_oldgaa-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_openssl-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_openssl_error-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_proxy_ssl-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_xio-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_callout-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_ftp_client-2.dll" Delete /rebootok "$INSTDIR\bin\libglobus_ftp_control-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gssapi_error-2.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gss_assist-3.dll" Delete /rebootok "$INSTDIR\bin\libglobus_io-3.dll" !endif RMDir "$INSTDIR\bin" !ifdef USE_GLOBUS Delete /rebootok "$INSTDIR\lib\libglobus_xio_gsi_driver-0.dll" Delete /rebootok "$INSTDIR\lib\libglobus_xio_popen_driver-0.dll" !endif RMDir "$INSTDIR\lib" Delete /rebootok "$INSTDIR\share\arc\examples\client.conf" RMDir "$INSTDIR\share\arc\examples" RMDir "$INSTDIR\share\arc" RMDir "$INSTDIR\share" RMDir "$INSTDIR\sbin" Delete /rebootok "$INSTDIR\etc\grid-security\certificates\*" Delete /rebootok "$INSTDIR\etc\grid-security\vomses" RMDir "$INSTDIR\etc\grid-security\certificates" RMDir "$INSTDIR\etc\grid-security" Delete /rebootok "$INSTDIR\etc\arc\client.conf" RMDir "$INSTDIR\etc\arc" RMDir "$INSTDIR\etc" Delete /rebootok "$INSTDIR\README.txt" RMDir "$INSTDIR" ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\bin" ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\lib" ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\lib\arc" ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\sbin" !ifdef USE_PYTHON ${un.EnvVarUpdate} $0 "PYTHONPATH" "R" "HKLM" "$INSTDIR\lib\python${PYVER}\site-packages" !endif DeleteRegValue ${env_hklm} ARC_LOCATION DeleteRegValue ${env_hklm} GLOBUS_LOCATION DeleteRegValue ${env_hklm} X509_CERT_DIR ; DeleteRegValue ${env_hklm} X509_USER_CERT ; DeleteRegValue ${env_hklm} X509_USER_KEY ; DeleteRegValue ${env_hklm} X509_USER_PROXY SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 SectionEnd Section -post SetShellVarContext all WriteUninstaller "$INSTDIR\Uninstall NorduGrid ARC.exe" # Start Menu createDirectory "$SMPROGRAMS\${COMPANYNAME}" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Storage Explorer.lnk" "$INSTDIR\bin\arcstorage-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Browse Grid storage resources" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Job Submission Tool.lnk" "$INSTDIR\bin\arcsub-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Submit jobs to the Grid" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Proxy Generator.lnk" "$INSTDIR\bin\arcproxy-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Create shortlived X509 proxy certificate" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Certificate Utility.lnk" "$INSTDIR\bin\arccert-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Converts between X509 and PKCS12 certificates" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Job Manager.lnk" "$INSTDIR\bin\arcstat-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Manage running Grid jobs" createShortCut "$SMPROGRAMS\${COMPANYNAME}\Uninstall NorduGrid ARC.lnk" "$INSTDIR\\Uninstall NorduGrid ARC.exe" WriteRegExpandStr ${env_hklm} ARC_LOCATION $INSTDIR WriteRegExpandStr ${env_hklm} GLOBUS_LOCATION $INSTDIR WriteRegExpandStr ${env_hklm} X509_CERT_DIR $INSTDIR\etc\grid-security\certificates ; WriteRegExpandStr ${env_hklm} X509_USER_CERT "$PROFILE\.globus\usercert.pem" ; WriteRegExpandStr ${env_hklm} X509_USER_KEY "$PROFILE\.globus\userkey.pem" ; WriteRegExpandStr ${env_hklm} X509_USER_PROXY "$TEMP\x509up_u0" SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\bin" ; Append the new one ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\lib" ; Append the new one ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\lib\arc" ; Append the new one ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\sbin" ; Append the new one !if USE_PYTHON ${EnvVarUpdate} $0 "PYTHONPATH" "A" "HKLM" "$INSTDIR\lib\python${PYVER}\site-packages" !endif SectionEnd Function .onInstSuccess MessageBox MB_YESNO "NorduGrid ARC successfully installed. View README file?" IDNO NoReadme Exec "notepad.exe $INSTDIR/README.txt" NoReadme: FunctionEnd nordugrid-arc-5.4.2/nsis/PaxHeaders.7502/EnvVarUpdate.nsh0000644000000000000000000000012411304206670021226 xustar000000000000000027 mtime=1259408824.547861 27 atime=1513200573.746692 30 ctime=1513200668.710854023 nordugrid-arc-5.4.2/nsis/EnvVarUpdate.nsh0000644000175000002070000002461011304206670021276 0ustar00mockbuildmock00000000000000/** * EnvVarUpdate.nsh * : Environmental Variables: append, prepend, and remove entries * * WARNING: If you use StrFunc.nsh header then include it before this file * with all required definitions. This is to avoid conflicts * * Usage: * ${EnvVarUpdate} "ResultVar" "EnvVarName" "Action" "RegLoc" "PathString" * * Credits: * Version 1.0 * * Cal Turney (turnec2) * * Amir Szekely (KiCHiK) and e-circ for developing the forerunners of this * function: AddToPath, un.RemoveFromPath, AddToEnvVar, un.RemoveFromEnvVar, * WriteEnvStr, and un.DeleteEnvStr * * Diego Pedroso (deguix) for StrTok * * Kevin English (kenglish_hi) for StrContains * * Hendri Adriaens (Smile2Me), Diego Pedroso (deguix), and Dan Fuhry * (dandaman32) for StrReplace * * Version 1.1 (compatibility with StrFunc.nsh) * * techtonik * * http://nsis.sourceforge.net/Environmental_Variables:_append%2C_prepend%2C_and_remove_entries * */ !ifndef ENVVARUPDATE_FUNCTION !define ENVVARUPDATE_FUNCTION !verbose push !verbose 3 !include "LogicLib.nsh" !include "WinMessages.NSH" !include "StrFunc.nsh" ; ---- Fix for conflict if StrFunc.nsh is already includes in main file ----------------------- !macro _IncludeStrFunction StrFuncName !ifndef ${StrFuncName}_INCLUDED ${${StrFuncName}} !endif !ifndef Un${StrFuncName}_INCLUDED ${Un${StrFuncName}} !endif !define un.${StrFuncName} "${Un${StrFuncName}}" !macroend !insertmacro _IncludeStrFunction StrTok !insertmacro _IncludeStrFunction StrStr !insertmacro _IncludeStrFunction StrRep ; ---------------------------------- Macro Definitions ---------------------------------------- !macro _EnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString Push "${EnvVarName}" Push "${Action}" Push "${RegLoc}" Push "${PathString}" Call EnvVarUpdate Pop "${ResultVar}" !macroend !define EnvVarUpdate '!insertmacro "_EnvVarUpdateConstructor"' !macro _unEnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString Push "${EnvVarName}" Push "${Action}" Push "${RegLoc}" Push "${PathString}" Call un.EnvVarUpdate Pop "${ResultVar}" !macroend !define un.EnvVarUpdate '!insertmacro "_unEnvVarUpdateConstructor"' ; ---------------------------------- Macro Definitions end------------------------------------- ;----------------------------------- EnvVarUpdate start---------------------------------------- !define hklm_all_users 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' !define hkcu_current_user 'HKCU "Environment"' !macro EnvVarUpdate UN Function ${UN}EnvVarUpdate Push $0 Exch 4 Exch $1 Exch 3 Exch $2 Exch 2 Exch $3 Exch Exch $4 Push $5 Push $6 Push $7 Push $8 Push $9 Push $R0 /* After this point: ------------------------- $0 = ResultVar (returned) $1 = EnvVarName (input) $2 = Action (input) $3 = RegLoc (input) $4 = PathString (input) $5 = Orig EnvVar (read from registry) $6 = Len of $0 (temp) $7 = tempstr1 (temp) $8 = Entry counter (temp) $9 = tempstr2 (temp) $R0 = tempChar (temp) */ ; Step 1: Read contents of EnvVarName from RegLoc ; ; Check for empty EnvVarName ${If} $1 == "" SetErrors DetailPrint "ERROR: EnvVarName is blank" Goto EnvVarUpdate_Restore_Vars ${EndIf} ; Check for valid Action ${If} $2 != "A" ${AndIf} $2 != "P" ${AndIf} $2 != "R" SetErrors DetailPrint "ERROR: Invalid Action - must be A, P, or R" Goto EnvVarUpdate_Restore_Vars ${EndIf} ${If} $3 == HKLM ReadRegStr $5 ${hklm_all_users} $1 ; Get EnvVarName from all users into $5 ${ElseIf} $3 == HKCU ReadRegStr $5 ${hkcu_current_user} $1 ; Read EnvVarName from current user into $5 ${Else} SetErrors DetailPrint 'ERROR: Action is [$3] but must be "HKLM" or HKCU"' Goto EnvVarUpdate_Restore_Vars ${EndIf} ; Check for empty PathString ${If} $4 == "" SetErrors DetailPrint "ERROR: PathString is blank" Goto EnvVarUpdate_Restore_Vars ${EndIf} ; Make sure we've got some work to do ${If} $5 == "" ${AndIf} $2 == "R" SetErrors DetailPrint "$1 is empty - Nothing to remove" Goto EnvVarUpdate_Restore_Vars ${EndIf} ; Step 2: Scrub EnvVar ; StrCpy $0 $5 ; Copy the contents to $0 ; Remove spaces around semicolons (NOTE: spaces before the 1st entry or ; after the last one are not removed here but instead in Step 3) ${If} $0 != "" ; If EnvVar is not empty ... ${Do} ${${UN}StrStr} $7 $0 " ;" ${If} $7 == "" ${ExitDo} ${EndIf} ${${UN}StrRep} $0 $0 " ;" ";" ; Remove ';' ${Loop} ${Do} ${${UN}StrStr} $7 $0 "; " ${If} $7 == "" ${ExitDo} ${EndIf} ${${UN}StrRep} $0 $0 "; " ";" ; Remove ';' ${Loop} ${Do} ${${UN}StrStr} $7 $0 ";;" ${If} $7 == "" ${ExitDo} ${EndIf} ${${UN}StrRep} $0 $0 ";;" ";" ${Loop} ; Remove a leading or trailing semicolon from EnvVar StrCpy $7 $0 1 0 ${If} $7 == ";" StrCpy $0 $0 "" 1 ; Change ';' to '' ${EndIf} StrLen $6 $0 IntOp $6 $6 - 1 StrCpy $7 $0 1 $6 ${If} $7 == ";" StrCpy $0 $0 $6 ; Change ';' to '' ${EndIf} ; DetailPrint "Scrubbed $1: [$0]" ; Uncomment to debug ${EndIf} /* Step 3. Remove all instances of the target path/string (even if "A" or "P") $6 = bool flag (1 = found and removed PathString) $7 = a string (e.g. path) delimited by semicolon(s) $8 = entry counter starting at 0 $9 = copy of $0 $R0 = tempChar */ ${If} $5 != "" ; If EnvVar is not empty ... StrCpy $9 $0 StrCpy $0 "" StrCpy $8 0 StrCpy $6 0 ${Do} ${${UN}StrTok} $7 $9 ";" $8 "0" ; $7 = next entry, $8 = entry counter ${If} $7 == "" ; If we've run out of entries, ${ExitDo} ; were done ${EndIf} ; ; Remove leading and trailing spaces from this entry (critical step for Action=Remove) ${Do} StrCpy $R0 $7 1 ${If} $R0 != " " ${ExitDo} ${EndIf} StrCpy $7 $7 "" 1 ; Remove leading space ${Loop} ${Do} StrCpy $R0 $7 1 -1 ${If} $R0 != " " ${ExitDo} ${EndIf} StrCpy $7 $7 -1 ; Remove trailing space ${Loop} ${If} $7 == $4 ; If string matches, remove it by not appending it StrCpy $6 1 ; Set 'found' flag ${ElseIf} $7 != $4 ; If string does NOT match ${AndIf} $0 == "" ; and the 1st string being added to $0, StrCpy $0 $7 ; copy it to $0 without a prepended semicolon ${ElseIf} $7 != $4 ; If string does NOT match ${AndIf} $0 != "" ; and this is NOT the 1st string to be added to $0, StrCpy $0 $0;$7 ; append path to $0 with a prepended semicolon ${EndIf} ; IntOp $8 $8 + 1 ; Bump counter ${Loop} ; Check for duplicates until we run out of paths ${EndIf} ; Step 4: Perform the requested Action ; ${If} $2 != "R" ; If Append or Prepend ${If} $6 == 1 ; And if we found the target DetailPrint "Target is already present in $1. It will be removed and" ${EndIf} ${If} $0 == "" ; If EnvVar is (now) empty StrCpy $0 $4 ; just copy PathString to EnvVar ${If} $6 == 0 ; If found flag is either 0 ${OrIf} $6 == "" ; or blank (if EnvVarName is empty) DetailPrint "$1 was empty and has been updated with the target" ${EndIf} ${ElseIf} $2 == "A" ; If Append (and EnvVar is not empty), StrCpy $0 $0;$4 ; append PathString ${If} $6 == 1 DetailPrint "appended to $1" ${Else} DetailPrint "Target was appended to $1" ${EndIf} ${Else} ; If Prepend (and EnvVar is not empty), StrCpy $0 $4;$0 ; prepend PathString ${If} $6 == 1 DetailPrint "prepended to $1" ${Else} DetailPrint "Target was prepended to $1" ${EndIf} ${EndIf} ${Else} ; If Action = Remove ${If} $6 == 1 ; and we found the target DetailPrint "Target was found and removed from $1" ${Else} DetailPrint "Target was NOT found in $1 (nothing to remove)" ${EndIf} ${If} $0 == "" DetailPrint "$1 is now empty" ${EndIf} ${EndIf} ; Step 5: Update the registry at RegLoc with the updated EnvVar and announce the change ; ClearErrors ${If} $3 == HKLM WriteRegExpandStr ${hklm_all_users} $1 $0 ; Write it in all users section ${ElseIf} $3 == HKCU WriteRegExpandStr ${hkcu_current_user} $1 $0 ; Write it to current user section ${EndIf} IfErrors 0 +4 MessageBox MB_OK|MB_ICONEXCLAMATION "Could not write updated $1 to $3" DetailPrint "Could not write updated $1 to $3" Goto EnvVarUpdate_Restore_Vars ; "Export" our change SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 EnvVarUpdate_Restore_Vars: ; ; Restore the user's variables and return ResultVar Pop $R0 Pop $9 Pop $8 Pop $7 Pop $6 Pop $5 Pop $4 Pop $3 Pop $2 Pop $1 Push $0 ; Push my $0 (ResultVar) Exch Pop $0 ; Restore his $0 FunctionEnd !macroend ; EnvVarUpdate UN !insertmacro EnvVarUpdate "" !insertmacro EnvVarUpdate "un." ;----------------------------------- EnvVarUpdate end---------------------------------------- !verbose pop !endif nordugrid-arc-5.4.2/nsis/PaxHeaders.7502/vomses0000644000000000000000000000012411776207441017421 xustar000000000000000027 mtime=1341722401.572961 27 atime=1513200573.743692 30 ctime=1513200668.713854059 nordugrid-arc-5.4.2/nsis/vomses0000644000175000002070000000162011776207441017465 0ustar00mockbuildmock00000000000000"gin.ggf.org" "kuiken.nikhef.nl" "15050" "/O=dutchgrid/O=hosts/OU=nikhef.nl/CN=kuiken.nikhef.nl" "gin.ggf.org" "pamela" "voms.cnaf.infn.it" "15013" "/C=IT/O=INFN/OU=Host/L=CNAF/CN=voms.cnaf.infn.it" "pamela" "atlas-old" "voms.cern.ch" "15001" "/C=CH/O=CERN/OU=GRID/CN=host/voms.cern.ch" "atlas-old" "atlas-lcg" "lcg-voms.cern.ch" "15001" "/C=CH/O=CERN/OU=GRID/CN=host/lcg-voms.cern.ch" "atlas-lcg" "knowarc.eu" "arthur.hep.lu.se" "15001" "/O=Grid/O=NorduGrid/CN=host/arthur.hep.lu.se" "knowarc.eu" "atlas" "voms.cern.ch" "15001" "/DC=ch/DC=cern/OU=computers/CN=voms.cern.ch" "atlas" "nordugrid.org" "voms.uninett.no" "15015" "/O=Grid/O=NorduGrid/CN=host/voms.ndgf.org" "nordugrid.org" "playground.knowarc.eu" "arthur.hep.lu.se" "15002" "/O=Grid/O=NorduGrid/CN=host/arthur.hep.lu.se" "playground.knowarc.eu" "dteam" "voms.hellasgrid.gr" "15004" "/C=GR/O=HellasGrid/OU=hellasgrid.gr/CN=voms.hellasgrid.gr" "dteam" nordugrid-arc-5.4.2/nsis/PaxHeaders.7502/arc.nsis.in0000644000000000000000000000012712442365766020243 xustar000000000000000027 mtime=1418324982.290198 30 atime=1513200653.075662798 30 ctime=1513200668.708853998 nordugrid-arc-5.4.2/nsis/arc.nsis.in0000644000175000002070000005562012442365766020315 0ustar00mockbuildmock00000000000000Name "NorduGrid ARC" RequestExecutionLevel admin #NOTE: You still need to check user rights with UserInfo! !include LogicLib.nsh Function .onInit UserInfo::GetAccountType pop $0 ${If} $0 != "admin" ;Require admin rights on NT4+ MessageBox mb_iconstop "Administrator rights required!" SetErrorLevel 740 ;ERROR_ELEVATION_REQUIRED Quit ${EndIf} FunctionEnd OutFile "nordugrid-arc-@VERSION@-1.exe" # Fix lnk removals RequestExecutionLevel user !define COMPANYNAME "NorduGrid" InstallDir "$PROGRAMFILES\${COMPANYNAME}" InstallDirRegKey HKLM SOFTWARE\${COMPANYNAME}\ARC "Install_Dir" ShowInstDetails hide ShowUninstDetails hide XPStyle on !include EnvVarUpdate.nsh ; include for some of the windows messages defines !include "winmessages.nsh" ; HKLM (all users) vs HKCU (current user) defines !define env_hklm 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' !define env_hkcu 'HKCU "Environment"' !define USE_GUI #!define USE_PYTHON #!define USE_GLOBUS !ifndef SYS_PATH !define SYS_PATH "/usr" !endif !ifndef ARC_LOCATION !define ARC_LOCATION "/opt/arc" !endif !ifndef GLOBUS_LOCATION !define GLOBUS_LOCATION "/opt/globus" !endif !define CERT_PATH "/etc/grid-security/certificates" !define MISC_PATH "." !define PYVER 2.6 PageEx license LicenseData ${MISC_PATH}/license.txt LicenseForceSelection checkbox PageExEnd Page components Page directory Page instfiles UninstPage uninstConfirm UninstPage instfiles ComponentText "Select which optional components you want to install." DirText "Please select the installation folder." Section "NorduGrid ARC clients" SectionIn RO SetOutPath "$INSTDIR" File "${MISC_PATH}/README.txt" SetOutPath "$INSTDIR\share\arc\examples" File "${ARC_LOCATION}/share/arc/examples/client.conf" SetOutPath "$INSTDIR\bin" File "${ARC_LOCATION}/bin/arccat.exe" File "${ARC_LOCATION}/bin/arcclean.exe" File "${ARC_LOCATION}/bin/arccp.exe" File "${ARC_LOCATION}/bin/arcecho.exe" File "${ARC_LOCATION}/bin/arcget.exe" File "${ARC_LOCATION}/bin/arcinfo.exe" File "${ARC_LOCATION}/bin/arckill.exe" File "${ARC_LOCATION}/bin/arcls.exe" File "${ARC_LOCATION}/bin/arcmkdir.exe" File "${ARC_LOCATION}/bin/arcproxy.exe" File "${ARC_LOCATION}/bin/arcproxyalt.exe" File "${ARC_LOCATION}/bin/arcrename.exe" File "${ARC_LOCATION}/bin/arcrenew.exe" File "${ARC_LOCATION}/bin/arcresub.exe" File "${ARC_LOCATION}/bin/arcresume.exe" File "${ARC_LOCATION}/bin/arcrm.exe" File "${ARC_LOCATION}/bin/arcstat.exe" File "${ARC_LOCATION}/bin/arcsub.exe" File "${ARC_LOCATION}/bin/arcsync.exe" File "${ARC_LOCATION}/bin/arctest.exe" # File "${ARC_LOCATION}/bin/arcwsrf.exe" # File "${ARC_LOCATION}/bin/arcemiestest.exe" File "${ARC_LOCATION}/bin/libarccommunication-3.dll" File "${ARC_LOCATION}/bin/libarccompute-3.dll" File "${ARC_LOCATION}/bin/libarccommon-3.dll" File "${ARC_LOCATION}/bin/libarccredential-3.dll" File "${ARC_LOCATION}/bin/libarccredentialstore-3.dll" File "${ARC_LOCATION}/bin/libarccrypto-3.dll" File "${ARC_LOCATION}/bin/libarcdata-3.dll" File "${ARC_LOCATION}/bin/libarcdatastaging-3.dll" File "${ARC_LOCATION}/bin/libarcloader-3.dll" File "${ARC_LOCATION}/bin/libarcmessage-3.dll" File "${ARC_LOCATION}/bin/libarcsecurity-3.dll" File "${ARC_LOCATION}/bin/libarcinfosys-0.dll" File "${ARC_LOCATION}/bin/libarcws-3.dll" File "${ARC_LOCATION}/bin/libarcwssecurity-3.dll" File "${ARC_LOCATION}/bin/libarcxmlsec-3.dll" !ifdef USE_GLOBUS File "${ARC_LOCATION}/bin/libarcglobusutils-2.dll" !endif ; File "${ARC_LOCATION}/bin/perftest.exe" ; File "${ARC_LOCATION}/bin/saml_assertion_init.exe" # Needed by libcanl_c++-1.dll File "${SYS_PATH}/bin/libwinpthread-1.dll" # Needed by arcproxyalt.exe File "${SYS_PATH}/bin/libcanl_c++-1.dll" # Needed by libcanl_c++-1.dll File "${SYS_PATH}/bin/libdb_cxx-5.3.dll" File "${SYS_PATH}/bin/libcrypto-10.dll" File "${SYS_PATH}/bin/libgcc_s_sjlj-1.dll" File "${SYS_PATH}/bin/libglib-2.0-0.dll" File "${SYS_PATH}/bin/libglibmm-2.4-1.dll" File "${SYS_PATH}/bin/libgmodule-2.0-0.dll" File "${SYS_PATH}/bin/libgnurx-0.dll" File "${SYS_PATH}/bin/libgobject-2.0-0.dll" File "${SYS_PATH}/bin/libgthread-2.0-0.dll" File "${SYS_PATH}/bin/iconv.dll" File "${SYS_PATH}/bin/libintl-8.dll" File "${SYS_PATH}/bin/libgio-2.0-0.dll" File "${SYS_PATH}/bin/libgiomm-2.4-1.dll" File "${SYS_PATH}/bin/libsigc-2.0-0.dll" File "${SYS_PATH}/bin/libssl-10.dll" File "${SYS_PATH}/bin/libstdc++-6.dll" File "${SYS_PATH}/bin/libxml2-2.dll" File "${SYS_PATH}/bin/libxmlsec1.dll" File "${SYS_PATH}/bin/libxmlsec1-openssl.dll" File "${SYS_PATH}/bin/libxslt-1.dll" File "${SYS_PATH}/bin/zlib1.dll" File "${SYS_PATH}/bin/libffi-6.dll" File "${SYS_PATH}/bin/libltdl-7.dll" File "${SYS_PATH}/bin/openssl.exe" !ifdef USE_GUI File "${ARC_LOCATION}/bin/arccert-ui.exe" File "${ARC_LOCATION}/bin/arcproxy-ui.exe" File "${ARC_LOCATION}/bin/arcstat-ui.exe" File "${ARC_LOCATION}/bin/arcstorage-ui.exe" File "${ARC_LOCATION}/bin/arcsub-ui.exe" File "${SYS_PATH}/bin/QtCore4.dll" File "${SYS_PATH}/bin/QtGui4.dll" File "${SYS_PATH}/bin/QtNetwork4.dll" File "${SYS_PATH}/bin/QtWebKit4.dll" File "${SYS_PATH}/bin/libpng16-16.dll" File "${SYS_PATH}/bin/libsqlite3-0.dll" !endif ; Python !ifdef USE_PYTHON File "${SYS_PATH}/bin/python.exe" File "${SYS_PATH}/bin/python${PYVER}.exe" File "${SYS_PATH}/bin/libpython${PYVER}.dll" File "${SYS_PATH}/bin/pydoc" File "${SYS_PATH}/bin/readline.dll" File "${SYS_PATH}/bin/libtermcap-0.dll" File "${SYS_PATH}/bin/libdb-4.7.dll" SetOutPath "$INSTDIR\lib\python${PYVER}" File /r "${SYS_PATH}/lib/python${PYVER}/**" SetOutPath "$INSTDIR\lib\python${PYVER}\site-packages" File /oname=_arc.pyd "${SYS_PATH}/lib/python${PYVER}/site-packages/_arc.dll" !endif ; Globus ARC dependencies !ifdef USE_GLOBUS SetOutPath "$INSTDIR\bin" File "${GLOBUS_LOCATION}/bin/libglobus_common-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_callback-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_cert_utils-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_credential-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_proxy_core-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_sysconfig-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gssapi_error-2.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gssapi_gsi-4.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gss_assist-3.dll" File "${GLOBUS_LOCATION}/bin/libglobus_io-3.dll" File "${GLOBUS_LOCATION}/bin/libglobus_openssl-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_openssl_error-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_proxy_ssl-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_xio-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_ftp_control-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_callback-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_credential-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gsi_proxy_core-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gssapi_gsi-4.dll" File "${GLOBUS_LOCATION}/bin/libglobus_oldgaa-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_callout-0.dll" File "${GLOBUS_LOCATION}/bin/libglobus_ftp_client-2.dll" File "${GLOBUS_LOCATION}/bin/libglobus_ftp_control-1.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gssapi_error-2.dll" File "${GLOBUS_LOCATION}/bin/libglobus_gss_assist-3.dll" File "${GLOBUS_LOCATION}/bin/libglobus_io-3.dll" ; Globus plugins SetOutPath "$INSTDIR\lib" File "${GLOBUS_LOCATION}/lib/libglobus_xio_gsi_driver-0.dll" File "${GLOBUS_LOCATION}/lib/libglobus_xio_popen_driver-0.dll" !endif SetOutPath "$INSTDIR\lib\arc" File "${ARC_LOCATION}/lib/arc/libaccARC1.dll" File "${ARC_LOCATION}/lib/arc/libaccBroker.dll" File "${ARC_LOCATION}/lib/arc/libaccCREAM.dll" File "${ARC_LOCATION}/lib/arc/libaccEMIES.dll" File "${ARC_LOCATION}/lib/arc/libaccSER.dll" File "${ARC_LOCATION}/lib/arc/libaccldap.dll" ; File "${ARC_LOCATION}/lib/arc/libaccUNICORE.dll" File "${ARC_LOCATION}/lib/arc/libaccJobDescriptionParser.dll" !ifdef USE_PYTHON File "${ARC_LOCATION}/lib/arc/libaccPythonBroker.dll" !endif File "${ARC_LOCATION}/lib/arc/libarcshc.dll" File "${ARC_LOCATION}/lib/arc/libarcshclegacy.dll" File "${ARC_LOCATION}/lib/arc/libdmcacix.dll" File "${ARC_LOCATION}/lib/arc/libdmcfile.dll" File "${ARC_LOCATION}/lib/arc/libdmchttp.dll" File "${ARC_LOCATION}/lib/arc/libdmcrucio.dll" File "${ARC_LOCATION}/lib/arc/libdmcsrm.dll" File "${ARC_LOCATION}/lib/arc/libidentitymap.dll" File "${ARC_LOCATION}/lib/arc/libmcchttp.dll" File "${ARC_LOCATION}/lib/arc/libmccmsgvalidator.dll" File "${ARC_LOCATION}/lib/arc/libmccsoap.dll" File "${ARC_LOCATION}/lib/arc/libmcctcp.dll" File "${ARC_LOCATION}/lib/arc/libmcctls.dll" File "${ARC_LOCATION}/lib/arc/libmodcrypto.dll" File "${ARC_LOCATION}/lib/arc/libmodcredential.dll" ; Extra stuff now we have Globus !ifdef USE_GLOBUS File "${ARC_LOCATION}/lib/arc/libaccARC0.dll" File "${ARC_LOCATION}/lib/arc/libdmcgridftp.dll" File "${ARC_LOCATION}/lib/arc/libdmcldap.dll" !endif ; Testing stuff SetOutPath "$INSTDIR\lib\arc\test" File "${ARC_LOCATION}/lib/arc/test/libaccTEST.dll" ;; Fix for bug #1563 ; SetOutPath "$INSTDIR\lib" ; File /oname=libarccrypto.dll "${ARC_LOCATION}/bin/libarccrypto-0.dll" SetOutPath "$INSTDIR\etc\arc" File "${ARC_LOCATION}/etc/arc/client.conf" SetOutPath "$INSTDIR\etc\grid-security" File "${MISC_PATH}/vomses" SectionEnd Section "NSS Utilities" SetOutPath "$INSTDIR\bin" ; NSPR File "${SYS_PATH}/bin/libnspr4.dll" File "${SYS_PATH}/bin/libplc4.dll" File "${SYS_PATH}/bin/libplds4.dll" ; NSS File "${SYS_PATH}/bin/freebl3.dll" File "${SYS_PATH}/bin/nss3.dll" File "${SYS_PATH}/bin/nssckbi.dll" File "${SYS_PATH}/bin/nssdbm3.dll" File "${SYS_PATH}/bin/nssutil3.dll" File "${SYS_PATH}/bin/smime3.dll" File "${SYS_PATH}/bin/softokn3.dll" File "${SYS_PATH}/bin/ssl3.dll" File "${SYS_PATH}/bin/certutil.exe" File "${SYS_PATH}/bin/cmsutil.exe" File "${SYS_PATH}/bin/crlutil.exe" File "${SYS_PATH}/bin/modutil.exe" File "${SYS_PATH}/bin/pk12util.exe" File "${SYS_PATH}/bin/signtool.exe" File "${SYS_PATH}/bin/signver.exe" File "${SYS_PATH}/bin/ssltap.exe" SectionEnd Section "IGTF Certificates" SetOutPath "$INSTDIR\etc\grid-security\certificates" File "${CERT_PATH}/*.signing_policy" File "${CERT_PATH}/*.info" File "${CERT_PATH}/*.namespaces" File "${CERT_PATH}/*.crl_url" File "${CERT_PATH}/*.0" SectionEnd Section "Uninstall" SetShellVarContext all Delete /rebootok "$INSTDIR\Uninstall NorduGrid ARC.exe" Delete "$SMPROGRAMS\${COMPANYNAME}\Uninstall NorduGrid ARC.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Job Manager.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Certificate Utility.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Proxy Generator.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Job Submission Tool.lnk" Delete "$SMPROGRAMS\${COMPANYNAME}\ARC Storage Explorer.lnk" rmDir "$SMPROGRAMS\${COMPANYNAME}" Delete /rebootok "$INSTDIR\lib\arc\test\libaccTEST.dll" RMDir "$INSTDIR\lib\arc\test" Delete /rebootok "$INSTDIR\lib\arc\libmodcrypto.dll" Delete /rebootok "$INSTDIR\lib\arc\libmodcredential.dll" Delete /rebootok "$INSTDIR\lib\arc\libmcctls.dll" Delete /rebootok "$INSTDIR\lib\arc\libmcctcp.dll" Delete /rebootok "$INSTDIR\lib\arc\libmccsoap.dll" Delete /rebootok "$INSTDIR\lib\arc\libmccmsgvalidator.dll" Delete /rebootok "$INSTDIR\lib\arc\libmcchttp.dll" Delete /rebootok "$INSTDIR\lib\arc\libidentitymap.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcsrm.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcrucio.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmchttp.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcfile.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcacix.dll" Delete /rebootok "$INSTDIR\lib\arc\libarcshc.dll" Delete /rebootok "$INSTDIR\lib\arc\libarcshclegacy.dll" !ifdef USE_PYTHON Delete /rebootok "$INSTDIR\lib\arc\libaccPythonBroker.dll" !endif Delete /rebootok "$INSTDIR\lib\arc\libaccJobDescriptionParser.dll" ; Delete /rebootok "$INSTDIR\lib\arc\libaccUNICORE.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccldap.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccSER.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccEMIES.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccCREAM.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccBroker.dll" Delete /rebootok "$INSTDIR\lib\arc\libaccARC1.dll" !ifdef USE_GLOBUS Delete /rebootok "$INSTDIR\lib\arc\libaccARC0.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcldap.dll" Delete /rebootok "$INSTDIR\lib\arc\libdmcgridftp.dll" !endif !ifdef USE_PYTHON Delete /rebootok "$INSTDIR\lib\arc\libpythonservice.dll" !endif Delete /rebootok "$INSTDIR\lib\arc\libecho.dll" Delete /rebootok "$INSTDIR\lib\arc\libslcs.dll" RMDir "$INSTDIR\lib\arc" ; Delete /rebootok "$INSTDIR\lib\libarccrypto.dll" RMDir "$INSTDIR\lib" !ifdef USE_PYTHON RmDir /r "$INSTDIR\lib\python${PYVER}" Delete /rebootok "$INSTDIR\bin\libdb-4.7.dll" Delete /rebootok "$INSTDIR\bin\libtermcap-0.dll" Delete /rebootok "$INSTDIR\bin\readline.dll" Delete /rebootok "$INSTDIR\bin\pydoc" Delete /rebootok "$INSTDIR\bin\libpython${PYVER}.dll" Delete /rebootok "$INSTDIR\bin\python${PYVER}.exe" Delete /rebootok "$INSTDIR\bin\python.exe" !endif !ifdef USE_GUI Delete /rebootok "$INSTDIR\bin\libsqlite3-0.dll" Delete /rebootok "$INSTDIR\bin\libpng16-16.dll" Delete /rebootok "$INSTDIR\bin\QtWebKit4.dll" Delete /rebootok "$INSTDIR\bin\QtNetwork4.dll" Delete /rebootok "$INSTDIR\bin\QtGui4.dll" Delete /rebootok "$INSTDIR\bin\QtCore4.dll" Delete /rebootok "$INSTDIR\bin\arcsub-ui.exe" Delete /rebootok "$INSTDIR\bin\arcstorage-ui.exe" Delete /rebootok "$INSTDIR\bin\arcstat-ui.exe" Delete /rebootok "$INSTDIR\bin\arcproxy-ui.exe" Delete /rebootok "$INSTDIR\bin\arccert-ui.exe" !endif Delete /rebootok "$INSTDIR\bin\openssl.exe" Delete /rebootok "$INSTDIR\bin\libltdl-7.dll" Delete /rebootok "$INSTDIR\bin\libffi-6.dll" Delete /rebootok "$INSTDIR\bin\zlib1.dll" Delete /rebootok "$INSTDIR\bin\libxslt-1.dll" Delete /rebootok "$INSTDIR\bin\libxmlsec1-openssl.dll" Delete /rebootok "$INSTDIR\bin\libxmlsec1.dll" Delete /rebootok "$INSTDIR\bin\libxml2-2.dll" Delete /rebootok "$INSTDIR\bin\libssl-10.dll" Delete /rebootok "$INSTDIR\bin\libgiomm-2.4-1.dll" Delete /rebootok "$INSTDIR\bin\libgio-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libstdc++-6.dll" Delete /rebootok "$INSTDIR\bin\libsigc-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libintl-8.dll" Delete /rebootok "$INSTDIR\bin\iconv.dll" Delete /rebootok "$INSTDIR\bin\libgthread-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libgobject-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libgnurx-0.dll" Delete /rebootok "$INSTDIR\bin\libgmodule-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libglibmm-2.4-1.dll" Delete /rebootok "$INSTDIR\bin\libglib-2.0-0.dll" Delete /rebootok "$INSTDIR\bin\libgcc_s_sjlj-1.dll" Delete /rebootok "$INSTDIR\bin\libcrypto-10.dll" Delete /rebootok "$INSTDIR\bin\libwinpthread-1.dll" Delete /rebootok "$INSTDIR\bin\libdb_cxx-5.3.dll" Delete /rebootok "$INSTDIR\bin\libcanl_c++-1.dll" ; Delete /rebootok "$INSTDIR\bin\saml_assertion_init.exe" ; Delete /rebootok "$INSTDIR\bin\perftest.exe" !ifdef USE_GLOBUS Delete /rebootok "$INSTDIR\bin\libarcglobusutils-2.dll" !endif Delete /rebootok "$INSTDIR\bin\libarcxmlsec-3.dll" Delete /rebootok "$INSTDIR\bin\libarcwssecurity-3.dll" Delete /rebootok "$INSTDIR\bin\libarcws-3.dll" Delete /rebootok "$INSTDIR\bin\libarcinfosys-0.dll" Delete /rebootok "$INSTDIR\bin\libarcsecurity-3.dll" Delete /rebootok "$INSTDIR\bin\libarcmessage-3.dll" Delete /rebootok "$INSTDIR\bin\libarcloader-3.dll" Delete /rebootok "$INSTDIR\bin\libarcdatastaging-3.dll" Delete /rebootok "$INSTDIR\bin\libarcdata-3.dll" Delete /rebootok "$INSTDIR\bin\libarccredential-3.dll" Delete /rebootok "$INSTDIR\bin\libarccredentialstore-3.dll" Delete /rebootok "$INSTDIR\bin\libarccommon-3.dll" Delete /rebootok "$INSTDIR\bin\libarccompute-3.dll" Delete /rebootok "$INSTDIR\bin\libarccommunication-3.dll" Delete /rebootok "$INSTDIR\bin\libarccrypto-3.dll" # Delete /rebootok "$INSTDIR\bin\arcemiestest.exe" # Delete /rebootok "$INSTDIR\bin\arcwsrf.exe" Delete /rebootok "$INSTDIR\bin\arctest.exe" Delete /rebootok "$INSTDIR\bin\arcsync.exe" Delete /rebootok "$INSTDIR\bin\arcsub.exe" Delete /rebootok "$INSTDIR\bin\arcstat.exe" Delete /rebootok "$INSTDIR\bin\arcrm.exe" Delete /rebootok "$INSTDIR\bin\arcresume.exe" Delete /rebootok "$INSTDIR\bin\arcresub.exe" Delete /rebootok "$INSTDIR\bin\arcrenew.exe" Delete /rebootok "$INSTDIR\bin\arcrename.exe" Delete /rebootok "$INSTDIR\bin\arcproxyalt.exe" Delete /rebootok "$INSTDIR\bin\arcproxy.exe" Delete /rebootok "$INSTDIR\bin\arcmkdir.exe" Delete /rebootok "$INSTDIR\bin\arcls.exe" Delete /rebootok "$INSTDIR\bin\arckill.exe" Delete /rebootok "$INSTDIR\bin\arcinfo.exe" Delete /rebootok "$INSTDIR\bin\arcget.exe" Delete /rebootok "$INSTDIR\bin\arcecho.exe" Delete /rebootok "$INSTDIR\bin\arccp.exe" Delete /rebootok "$INSTDIR\bin\arcclean.exe" Delete /rebootok "$INSTDIR\bin\arccat.exe" ; NSS Delete /rebootok "$INSTDIR\bin\ssl3.dll" Delete /rebootok "$INSTDIR\bin\softokn3.dll" Delete /rebootok "$INSTDIR\bin\smime3.dll" Delete /rebootok "$INSTDIR\bin\nssutil3.dll" Delete /rebootok "$INSTDIR\bin\nssdbm3.dll" Delete /rebootok "$INSTDIR\bin\nssckbi.dll" Delete /rebootok "$INSTDIR\bin\nss3.dll" Delete /rebootok "$INSTDIR\bin\freebl3.dll" ; NSPR Delete /rebootok "$INSTDIR\bin\libplds4.dll" Delete /rebootok "$INSTDIR\bin\libplc4.dll" Delete /rebootok "$INSTDIR\bin\libnspr4.dll" Delete /rebootok "$INSTDIR\bin\/ssltap.exe" Delete /rebootok "$INSTDIR\bin\/signver.exe" Delete /rebootok "$INSTDIR\bin\/signtool.exe" Delete /rebootok "$INSTDIR\bin\/pk12util.exe" Delete /rebootok "$INSTDIR\bin\/modutil.exe" Delete /rebootok "$INSTDIR\bin\/crlutil.exe" Delete /rebootok "$INSTDIR\bin\/cmsutil.exe" Delete /rebootok "$INSTDIR\bin\/certutil.exe" !ifdef USE_GLOBUS Delete /rebootok "$INSTDIR\bin\libglobus_common-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_ftp_control-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_callback-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_cert_utils-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_credential-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_proxy_core-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gsi_sysconfig-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gssapi_error-2.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gssapi_gsi-4.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gss_assist-3.dll" Delete /rebootok "$INSTDIR\bin\libglobus_io-3.dll" Delete /rebootok "$INSTDIR\bin\libglobus_oldgaa-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_openssl-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_openssl_error-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_proxy_ssl-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_xio-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_callout-0.dll" Delete /rebootok "$INSTDIR\bin\libglobus_ftp_client-2.dll" Delete /rebootok "$INSTDIR\bin\libglobus_ftp_control-1.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gssapi_error-2.dll" Delete /rebootok "$INSTDIR\bin\libglobus_gss_assist-3.dll" Delete /rebootok "$INSTDIR\bin\libglobus_io-3.dll" !endif RMDir "$INSTDIR\bin" !ifdef USE_GLOBUS Delete /rebootok "$INSTDIR\lib\libglobus_xio_gsi_driver-0.dll" Delete /rebootok "$INSTDIR\lib\libglobus_xio_popen_driver-0.dll" !endif RMDir "$INSTDIR\lib" Delete /rebootok "$INSTDIR\share\arc\examples\client.conf" RMDir "$INSTDIR\share\arc\examples" RMDir "$INSTDIR\share\arc" RMDir "$INSTDIR\share" RMDir "$INSTDIR\sbin" Delete /rebootok "$INSTDIR\etc\grid-security\certificates\*" Delete /rebootok "$INSTDIR\etc\grid-security\vomses" RMDir "$INSTDIR\etc\grid-security\certificates" RMDir "$INSTDIR\etc\grid-security" Delete /rebootok "$INSTDIR\etc\arc\client.conf" RMDir "$INSTDIR\etc\arc" RMDir "$INSTDIR\etc" Delete /rebootok "$INSTDIR\README.txt" RMDir "$INSTDIR" ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\bin" ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\lib" ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\lib\arc" ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\sbin" !ifdef USE_PYTHON ${un.EnvVarUpdate} $0 "PYTHONPATH" "R" "HKLM" "$INSTDIR\lib\python${PYVER}\site-packages" !endif DeleteRegValue ${env_hklm} ARC_LOCATION DeleteRegValue ${env_hklm} GLOBUS_LOCATION DeleteRegValue ${env_hklm} X509_CERT_DIR ; DeleteRegValue ${env_hklm} X509_USER_CERT ; DeleteRegValue ${env_hklm} X509_USER_KEY ; DeleteRegValue ${env_hklm} X509_USER_PROXY SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 SectionEnd Section -post SetShellVarContext all WriteUninstaller "$INSTDIR\Uninstall NorduGrid ARC.exe" # Start Menu createDirectory "$SMPROGRAMS\${COMPANYNAME}" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Storage Explorer.lnk" "$INSTDIR\bin\arcstorage-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Browse Grid storage resources" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Job Submission Tool.lnk" "$INSTDIR\bin\arcsub-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Submit jobs to the Grid" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Proxy Generator.lnk" "$INSTDIR\bin\arcproxy-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Create shortlived X509 proxy certificate" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Certificate Utility.lnk" "$INSTDIR\bin\arccert-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Converts between X509 and PKCS12 certificates" createShortCut "$SMPROGRAMS\${COMPANYNAME}\ARC Job Manager.lnk" "$INSTDIR\bin\arcstat-ui.exe" "" "" "" \ SW_SHOWNORMAL ALT|CONTROL|SHIFT|F5 "Manage running Grid jobs" createShortCut "$SMPROGRAMS\${COMPANYNAME}\Uninstall NorduGrid ARC.lnk" "$INSTDIR\\Uninstall NorduGrid ARC.exe" WriteRegExpandStr ${env_hklm} ARC_LOCATION $INSTDIR WriteRegExpandStr ${env_hklm} GLOBUS_LOCATION $INSTDIR WriteRegExpandStr ${env_hklm} X509_CERT_DIR $INSTDIR\etc\grid-security\certificates ; WriteRegExpandStr ${env_hklm} X509_USER_CERT "$PROFILE\.globus\usercert.pem" ; WriteRegExpandStr ${env_hklm} X509_USER_KEY "$PROFILE\.globus\userkey.pem" ; WriteRegExpandStr ${env_hklm} X509_USER_PROXY "$TEMP\x509up_u0" SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\bin" ; Append the new one ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\lib" ; Append the new one ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\lib\arc" ; Append the new one ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\sbin" ; Append the new one !if USE_PYTHON ${EnvVarUpdate} $0 "PYTHONPATH" "A" "HKLM" "$INSTDIR\lib\python${PYVER}\site-packages" !endif SectionEnd Function .onInstSuccess MessageBox MB_YESNO "NorduGrid ARC successfully installed. View README file?" IDNO NoReadme Exec "notepad.exe $INSTDIR/README.txt" NoReadme: FunctionEnd nordugrid-arc-5.4.2/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315736017246 xustar000000000000000030 mtime=1513200606.594094311 30 atime=1513200647.719597292 30 ctime=1513200658.602730397 nordugrid-arc-5.4.2/Makefile.in0000644000175000002070000007725713214315736017336 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # /opt/local is the location for macports on MacOS X VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = . DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/arcbase.pc.in \ $(srcdir)/config.h.in $(srcdir)/mingw-nordugrid-arc.spec.in \ $(srcdir)/nordugrid-arc.spec.in $(top_srcdir)/configure \ $(top_srcdir)/src/hed/profiles/general/general.xml.in \ ABOUT-NLS AUTHORS ChangeLog NEWS config.guess config.rpath \ config.sub depcomp install-sh ltmain.sh missing mkinstalldirs \ py-compile ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = config.h CONFIG_CLEAN_FILES = src/hed/profiles/general/general.xml \ nordugrid-arc.spec mingw-nordugrid-arc.spec arcbase.pc CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgconfigdir)" DATA = $(pkgconfig_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir dist dist-all distcheck ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ { test ! -d "$(distdir)" \ || { find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ && rm -fr "$(distdir)"; }; } am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best distuninstallcheck_listfiles = find . -type f -print distcleancheck_listfiles = find . -type f -print pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = $(libdir)/pkgconfig pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ ACLOCAL_AMFLAGS = -I m4 `test -d /opt/local/share/aclocal && echo -I /opt/local/share/aclocal` @JAVA_SWIG_ENABLED_TRUE@JAVA_SD = java @SWIG_ENABLED_TRUE@SWIG_SD = swig SUBDIRS = src include $(SWIG_SD) $(JAVA_SD) python $(POSUB) debian nsis DIST_SUBDIRS = src include swig java python po debian nsis EXTRA_DIST = nordugrid-arc.spec mingw-nordugrid-arc.spec \ nordugrid-arc.SlackBuild autogen.sh LICENSE NOTICE \ selinux/nordugrid-arc-egiis.te selinux/nordugrid-arc-egiis.fc pkgconfig_DATA = arcbase.pc all: config.h $(MAKE) $(AM_MAKEFLAGS) all-recursive .SUFFIXES: am--refresh: @: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) $(SHELL) ./config.status --recheck $(top_srcdir)/configure: $(am__configure_deps) $(am__cd) $(srcdir) && $(AUTOCONF) $(ACLOCAL_M4): $(am__aclocal_m4_deps) $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) $(am__aclocal_m4_deps): config.h: stamp-h1 @if test ! -f $@; then \ rm -f stamp-h1; \ $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ else :; fi stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status @rm -f stamp-h1 cd $(top_builddir) && $(SHELL) ./config.status config.h $(srcdir)/config.h.in: $(am__configure_deps) ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) rm -f stamp-h1 touch $@ distclean-hdr: -rm -f config.h stamp-h1 src/hed/profiles/general/general.xml: $(top_builddir)/config.status $(top_srcdir)/src/hed/profiles/general/general.xml.in cd $(top_builddir) && $(SHELL) ./config.status $@ nordugrid-arc.spec: $(top_builddir)/config.status $(srcdir)/nordugrid-arc.spec.in cd $(top_builddir) && $(SHELL) ./config.status $@ mingw-nordugrid-arc.spec: $(top_builddir)/config.status $(srcdir)/mingw-nordugrid-arc.spec.in cd $(top_builddir) && $(SHELL) ./config.status $@ arcbase.pc: $(top_builddir)/config.status $(srcdir)/arcbase.pc.in cd $(top_builddir) && $(SHELL) ./config.status $@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs distclean-libtool: -rm -f libtool config.lt install-pkgconfigDATA: $(pkgconfig_DATA) @$(NORMAL_INSTALL) test -z "$(pkgconfigdir)" || $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ done uninstall-pkgconfigDATA: @$(NORMAL_UNINSTALL) @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgconfigdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgconfigdir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) $(am__remove_distdir) test -d "$(distdir)" || mkdir "$(distdir)" @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done -test -n "$(am__skip_mode_fix)" \ || find "$(distdir)" -type d ! -perm -755 \ -exec chmod u+rwx,go+rx {} \; -o \ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 $(am__remove_distdir) dist-lzma: distdir tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma $(am__remove_distdir) dist-xz: distdir tardir=$(distdir) && $(am__tar) | xz -c >$(distdir).tar.xz $(am__remove_distdir) dist-tarZ: distdir tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__remove_distdir) dist-shar: distdir shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz $(am__remove_distdir) dist-zip: distdir -rm -f $(distdir).zip zip -rq $(distdir).zip $(distdir) $(am__remove_distdir) dist dist-all: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__remove_distdir) # This target untars the dist file and tries a VPATH configuration. Then # it guarantees that the distribution is self-contained by making another # tarfile. distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lzma*) \ lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\ *.tar.xz*) \ xz -dc $(distdir).tar.xz | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ esac chmod -R a-w $(distdir); chmod u+w $(distdir) mkdir $(distdir)/_build mkdir $(distdir)/_inst chmod a-w $(distdir) test -d $(distdir)/_build || exit 0; \ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && am__cwd=`pwd` \ && $(am__cd) $(distdir)/_build \ && ../configure --srcdir=.. --prefix="$$dc_install_base" \ $(DISTCHECK_CONFIGURE_FLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ && $(MAKE) $(AM_MAKEFLAGS) uninstall \ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ distuninstallcheck \ && chmod -R a-w "$$dc_install_base" \ && ({ \ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ } || { rm -rf "$$dc_destdir"; exit 1; }) \ && rm -rf "$$dc_destdir" \ && $(MAKE) $(AM_MAKEFLAGS) dist \ && rm -rf $(DIST_ARCHIVES) \ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ && cd "$$am__cwd" \ || exit 1 $(am__remove_distdir) @(echo "$(distdir) archives ready for distribution: "; \ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: @$(am__cd) '$(distuninstallcheck_dir)' \ && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ fi ; \ $(distuninstallcheck_listfiles) ; \ exit 1; } >&2 distcleancheck: distclean @if test '$(srcdir)' = . ; then \ echo "ERROR: distcleancheck can only run from a VPATH build" ; \ exit 1 ; \ fi @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left in build directory after distclean:" ; \ $(distcleancheck_listfiles) ; \ exit 1; } >&2 check-am: all-am check: check-recursive all-am: Makefile $(DATA) config.h installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkgconfigdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -f Makefile distclean-am: clean-am distclean-generic distclean-hdr \ distclean-libtool distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-pkgconfigDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkgconfigDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all \ ctags-recursive install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am am--refresh check check-am clean clean-generic \ clean-libtool ctags ctags-recursive dist dist-all dist-bzip2 \ dist-gzip dist-lzma dist-shar dist-tarZ dist-xz dist-zip \ distcheck distclean distclean-generic distclean-hdr \ distclean-libtool distclean-tags distcleancheck distdir \ distuninstallcheck dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkgconfigDATA install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-pkgconfigDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/PaxHeaders.7502/config.h.in0000644000000000000000000000013213214315716017222 xustar000000000000000030 mtime=1513200590.137893045 30 atime=1513200653.108663202 30 ctime=1513200658.604730421 nordugrid-arc-5.4.2/config.h.in0000644000175000002070000004120613214315716017273 0ustar00mockbuildmock00000000000000/* config.h.in. Generated from configure.ac by autoheader. */ /* Define to 1 if the `closedir' function returns void instead of `int'. */ #undef CLOSEDIR_VOID /* define if to build job information in BDB storage */ #undef DBJSTORE_ENABLED /* Define to 1 if translation of program messages to the user's native language is requested. */ #undef ENABLE_NLS /* Globus GSSAPI GSI is for OpenSSL post-1.1 */ #undef GLOBUS_GSSAPI_GSI_OLD_OPENSSL /* Globus GSSAPI GSI version */ #undef GLOBUS_GSSAPI_GSI_VERSION /* Globus IO version */ #undef GLOBUS_IO_VERSION /* Define to 1 if you have the `acl' function. */ #undef HAVE_ACL /* Define to 1 if you have the `alarm' function. */ #undef HAVE_ALARM /* Define to 1 if you have the header file. */ #undef HAVE_ARPA_INET_H /* define if CANL++ is enabled and available */ #undef HAVE_CANLXX /* Define to 1 if your system has a working `chown' function. */ #undef HAVE_CHOWN /* define if Berkeley DB C++ binding is available */ #undef HAVE_DBCXX /* define if the Berkeley DB has DbDeadLockException */ #undef HAVE_DBDEADLOCKEXCEPTION /* Define to 1 if you have the header file. */ #undef HAVE_DB_CXX_H /* Define if the GNU dcgettext() function is already present or preinstalled. */ #undef HAVE_DCGETTEXT /* Define to 1 if you have the declaration of `strerror_r', and to 0 if you don't. */ #undef HAVE_DECL_STRERROR_R /* Define to 1 if you have the header file, and it defines `DIR'. */ #undef HAVE_DIRENT_H /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H /* define if DTLSv1_2_method is available */ #undef HAVE_DTLSV1_2_METHOD /* define if DTLSv1_method is available */ #undef HAVE_DTLSV1_METHOD /* define if DTLS_method is available */ #undef HAVE_DTLS_METHOD /* Define to 1 if you have the `dup2' function. */ #undef HAVE_DUP2 /* Define to 1 if you have the header file. */ #undef HAVE_DUSTAT_H /* Define to 1 if you have the header file. */ #undef HAVE_FCNTL_H /* Define to 1 if you have the header file. */ #undef HAVE_FLOAT_H /* Define to 1 if you have the `floor' function. */ #undef HAVE_FLOOR /* Define to 1 if you have the `fork' function. */ #undef HAVE_FORK /* Define to 1 if you have the `fstatfs' function. */ #undef HAVE_FSTATFS /* Define to 1 if you have the `ftruncate' function. */ #undef HAVE_FTRUNCATE /* let's suppose the unix-alike (except solaris) has gethostname method */ #undef HAVE_GETDOMAINNAME /* Define to 1 if you have the `getgrouplist' function. */ #undef HAVE_GETGROUPLIST /* Define to 1 if you have the `gethostname' function. */ #undef HAVE_GETHOSTNAME /* Define to 1 if you have the header file. */ #undef HAVE_GETOPT_H /* Define to 1 if you have the `getopt_long_only' function. */ #undef HAVE_GETOPT_LONG_ONLY /* Define to 1 if you have the `getpid' function. */ #undef HAVE_GETPID /* Define if the GNU gettext() function is already present or preinstalled. */ #undef HAVE_GETTEXT /* define if giomm is supported in glibmm */ #undef HAVE_GIOMM /* define if glibmm have support local symbol resolution in shared libraries */ #undef HAVE_GLIBMM_BIND_LOCAL /* define if glibmm have support for controling state of children processes */ #undef HAVE_GLIBMM_CHILDWATCH /* define if glibmm have getenv operations */ #undef HAVE_GLIBMM_GETENV /* define if glibmm have listenv operations */ #undef HAVE_GLIBMM_LISTENV /* define if glibmm has Glib::OptionContext::get_help() */ #undef HAVE_GLIBMM_OPTIONCONTEXT_GET_HELP /* define if glibmm has Glib::OptionContext::set_summary() */ #undef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY /* define if glibmm have setenv operations */ #undef HAVE_GLIBMM_SETENV /* define if glibmm have unsetenv operations */ #undef HAVE_GLIBMM_UNSETENV /* define if GLOBUS is available */ #undef HAVE_GLOBUS /* Define to 1 if you have the `globus_ftp_client_handleattr_set_gridftp2' function. */ #undef HAVE_GLOBUS_FTP_CLIENT_HANDLEATTR_SET_GRIDFTP2 /* Define to 1 if you have the `globus_thread_set_model' function. */ #undef HAVE_GLOBUS_THREAD_SET_MODEL /* Define to 1 if you have the `gmtime_r' function. */ #undef HAVE_GMTIME_R /* Define if you have the iconv() function. */ #undef HAVE_ICONV /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_JAVAVM_JNI_H /* Define to 1 if you have the header file. */ #undef HAVE_JNI_H /* Define to 1 if you have the header file. */ #undef HAVE_LBER_H /* define if lcas is available */ #undef HAVE_LCAS /* Define to 1 if you have the header file. */ #undef HAVE_LCAS_H /* Define to 1 if you have the `lchown' function. */ #undef HAVE_LCHOWN /* define if lcmaps is available */ #undef HAVE_LCMAPS /* Define to 1 if you have the header file. */ #undef HAVE_LCMAPS_H /* Define if OpenLDAP is available */ #undef HAVE_LDAP /* Define to 1 if you have the header file. */ #undef HAVE_LDAP_FEATURES_H /* Define if you have ldap_initialize function */ #undef HAVE_LDAP_INITIALIZE /* define if LDNS is enabled and available */ #undef HAVE_LDNS /* Define to 1 if you have the `nsl' library (-lnsl). */ #undef HAVE_LIBNSL /* Define to 1 if you have the header file. */ #undef HAVE_LIMITS_H /* Define to 1 if you have the `localtime_r' function. */ #undef HAVE_LOCALTIME_R /* Define to 1 if `lstat' has the bug that it succeeds when given the zero-length file name argument. */ #undef HAVE_LSTAT_EMPTY_STRING_BUG /* Define to 1 if your system has a GNU libc compatible `malloc' function, and to 0 otherwise. */ #undef HAVE_MALLOC /* Define to 1 if you have the `memchr' function. */ #undef HAVE_MEMCHR /* Define to 1 if you have the `memmove' function. */ #undef HAVE_MEMMOVE /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the `memset' function. */ #undef HAVE_MEMSET /* Define to 1 if you have the `mkdir' function. */ #undef HAVE_MKDIR /* Define to 1 if you have the `mkdtemp' function. */ #undef HAVE_MKDTEMP /* Define to 1 if you have the `mkfifo' function. */ #undef HAVE_MKFIFO /* Define to 1 if you have the `[mkstemp]' function. */ #undef HAVE_MKSTEMP /* Define to 1 if you have the `mktemp' function. */ #undef HAVE_MKTEMP /* define if the compiler implements namespaces */ #undef HAVE_NAMESPACES /* Define to 1 if you have the header file, and it defines `DIR'. */ #undef HAVE_NDIR_H /* Define to 1 if you have the header file. */ #undef HAVE_NETDB_H /* Define to 1 if you have the header file. */ #undef HAVE_NETINET_IN_H /* define if NSS is enabled and available */ #undef HAVE_NSS /* Define to 1 if you have the `posix_fallocate' function. */ #undef HAVE_POSIX_FALLOCATE /* Define to 1 if the system has the type `ptrdiff_t'. */ #undef HAVE_PTRDIFF_T /* Define if you have Py_InitializeEx function */ #undef HAVE_PYTHON_INITIALIZE_EX /* Define to 1 if you have the `readdir_r' function. */ #undef HAVE_READDIR_R /* Define to 1 if your system has a GNU libc compatible `realloc' function, and to 0 otherwise. */ #undef HAVE_REALLOC /* Define to 1 if you have the `regcomp' function. */ #undef HAVE_REGCOMP /* Define to 1 if you have the `rmdir' function. */ #undef HAVE_RMDIR /* Define if S3 API has timeouts */ #undef HAVE_S3_TIMEOUT /* Define to 1 if you have the header file. */ #undef HAVE_SASL_H /* Define to 1 if you have the header file. */ #undef HAVE_SASL_SASL_H /* Define to 1 if you have the `select' function. */ #undef HAVE_SELECT /* Define to 1 if you have the `setenv' function. */ #undef HAVE_SETENV /* Define to 1 if you have the `socket' function. */ #undef HAVE_SOCKET /* define if SQLite is enabled and available */ #undef HAVE_SQLITE /* Define to 1 if you have the `sqlite3_errstr' function. */ #undef HAVE_SQLITE3_ERRSTR /* define if SSLv3_method is available */ #undef HAVE_SSLV3_METHOD /* define if the compiler has stringstream */ #undef HAVE_SSTREAM /* Define to 1 if `stat' has the bug that it succeeds when given the zero-length file name argument. */ #undef HAVE_STAT_EMPTY_STRING_BUG /* Define to 1 if stdbool.h conforms to C99. */ #undef HAVE_STDBOOL_H /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the `strcasecmp' function. */ #undef HAVE_STRCASECMP /* Define to 1 if you have the `strchr' function. */ #undef HAVE_STRCHR /* Define to 1 if you have the `strcspn' function. */ #undef HAVE_STRCSPN /* Define to 1 if you have the `strdup' function. */ #undef HAVE_STRDUP /* Define to 1 if you have the `strerror' function. */ #undef HAVE_STRERROR /* Define to 1 if you have the `strerror_r' function. */ #undef HAVE_STRERROR_R /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the `strncasecmp' function. */ #undef HAVE_STRNCASECMP /* Define to 1 if you have the `strstr' function. */ #undef HAVE_STRSTR /* Define to 1 if you have the `strtol' function. */ #undef HAVE_STRTOL /* Define to 1 if you have the `strtoul' function. */ #undef HAVE_STRTOUL /* Define to 1 if you have the `strtoull' function. */ #undef HAVE_STRTOULL /* Define to 1 if `f_type' is member of `struct statfs'. */ #undef HAVE_STRUCT_STATFS_F_TYPE /* Define to 1 if `st_blksize' is member of `struct stat'. */ #undef HAVE_STRUCT_STAT_ST_BLKSIZE /* Define to 1 if you have the header file, and it defines `DIR'. */ #undef HAVE_SYS_DIR_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FILE_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FILSYS_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FS_S5PARAM_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_MOUNT_H /* Define to 1 if you have the header file, and it defines `DIR'. */ #undef HAVE_SYS_NDIR_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_PARAM_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SELECT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SOCKET_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STATFS_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TIME_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_VFS_H /* Define to 1 if you have that is POSIX.1 compatible. */ #undef HAVE_SYS_WAIT_H /* Define to 1 if you have the `timegm' function. */ #undef HAVE_TIMEGM /* define if TLSv1_1_method is available */ #undef HAVE_TLSV1_1_METHOD /* define if TLSv1_2_method is available */ #undef HAVE_TLSV1_2_METHOD /* define if TLSv1_method is available */ #undef HAVE_TLSV1_METHOD /* define if TLS_method is available */ #undef HAVE_TLS_METHOD /* Define to 1 if you have the `tzset' function. */ #undef HAVE_TZSET /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to 1 if you have the `unsetenv' function. */ #undef HAVE_UNSETENV /* Define to 1 if you have the header file. */ #undef HAVE_UUID_UUID_H /* Define to 1 if you have the `vfork' function. */ #undef HAVE_VFORK /* Define to 1 if you have the header file. */ #undef HAVE_VFORK_H /* Define to 1 if `fork' works. */ #undef HAVE_WORKING_FORK /* Define to 1 if `vfork' works. */ #undef HAVE_WORKING_VFORK /* define if XMLSEC package is available */ #undef HAVE_XMLSEC /* Define to 1 if the system has the type `_Bool'. */ #undef HAVE__BOOL /* installation prefix */ #undef INSTPREFIX /* library installation subdirectory */ #undef LIBSUBDIR /* Define to 1 if `lstat' dereferences a symlink specified with a trailing slash. */ #undef LSTAT_FOLLOWS_SLASHED_SYMLINK /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #undef LT_OBJDIR /* Name of package */ #undef PACKAGE /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the version of this package. */ #undef PACKAGE_VERSION /* package data subdirectory */ #undef PKGDATASUBDIR /* helper programs installation subdirectory */ #undef PKGLIBEXECSUBDIR /* plugin installation subdirectory */ #undef PKGLIBSUBDIR /* Define as the return type of signal handlers (`int' or `void'). */ #undef RETSIGTYPE /* Define to the type of arg 1 for `select'. */ #undef SELECT_TYPE_ARG1 /* Define to the type of args 2, 3 and 4 for `select'. */ #undef SELECT_TYPE_ARG234 /* Define to the type of arg 5 for `select'. */ #undef SELECT_TYPE_ARG5 /* Define if the block counts reported by statfs may be truncated to 2GB and the correct values may be stored in the f_spare array. (SunOS 4.1.2, 4.1.3, and 4.1.3_U1 are reported to have this problem. SunOS 4.1.1 seems not to be affected.) */ #undef STATFS_TRUNCATES_BLOCK_COUNTS /* Define if there is no specific function for reading file systems usage information and you have the header file. (SVR2) */ #undef STAT_READ_FILSYS /* Define if statfs takes 2 args and struct statfs has a field named f_bsize. (4.3BSD, SunOS 4, HP-UX, AIX PS/2) */ #undef STAT_STATFS2_BSIZE /* Define if statfs takes 2 args and struct statfs has a field named f_fsize. (4.4BSD, NetBSD) */ #undef STAT_STATFS2_FSIZE /* Define if statfs takes 2 args and the second argument has type struct fs_data. (Ultrix) */ #undef STAT_STATFS2_FS_DATA /* Define if statfs takes 3 args. (DEC Alpha running OSF/1) */ #undef STAT_STATFS3_OSF1 /* Define if statfs takes 4 args. (SVR3, Dynix, Irix, Dolphin) */ #undef STAT_STATFS4 /* Define if there is a function named statvfs. (SVR4) */ #undef STAT_STATVFS /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Define to 1 if strerror_r returns char *. */ #undef STRERROR_R_CHAR_P /* Define to 1 if you can safely include both and . */ #undef TIME_WITH_SYS_TIME /* Define to 1 if your declares `struct tm'. */ #undef TM_IN_SYS_TIME /* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # undef _ALL_SOURCE #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # undef _GNU_SOURCE #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # undef _POSIX_PTHREAD_SEMANTICS #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # undef _TANDEM_SOURCE #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # undef __EXTENSIONS__ #endif /* Define if using WIN32 LDAP API */ #undef USE_WIN32_LDAP_API /* Version number of package */ #undef VERSION /* Number of bits in a file offset, on hosts where this is settable. */ #undef _FILE_OFFSET_BITS /* Define for large files, on AIX-style hosts. */ #undef _LARGE_FILES /* Define if compiling for MacOSX */ #undef _MACOSX /* Define to 1 if on MINIX. */ #undef _MINIX /* Define to 2 if the system does not provide POSIX.1 features except with this defined. */ #undef _POSIX_1_SOURCE /* Define to 1 if you need to in order for `stat' and other things to work. */ #undef _POSIX_SOURCE /* Define to empty if `const' does not conform to ANSI C. */ #undef const /* Define to `int' if doesn't define. */ #undef gid_t /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus #undef inline #endif /* Define to rpl_malloc if the replacement function should be used. */ #undef malloc /* Define to `int' if does not define. */ #undef mode_t /* Define to `long int' if does not define. */ #undef off_t /* Define to `int' if does not define. */ #undef pid_t /* Define to rpl_realloc if the replacement function should be used. */ #undef realloc /* Define to `unsigned int' if does not define. */ #undef size_t /* Define to `int' if doesn't define. */ #undef uid_t /* Define as `fork' if `vfork' does not work. */ #undef vfork #ifdef WIN32 #include #endif nordugrid-arc-5.4.2/PaxHeaders.7502/mkinstalldirs0000644000000000000000000000013213214315736020004 xustar000000000000000030 mtime=1513200606.552093797 30 atime=1513200606.552093797 30 ctime=1513200658.620730617 nordugrid-arc-5.4.2/mkinstalldirs0000755000175000002070000000672213214315736020064 0ustar00mockbuildmock00000000000000#! /bin/sh # mkinstalldirs --- make directory hierarchy scriptversion=2009-04-28.21; # UTC # Original author: Noah Friedman # Created: 1993-05-16 # Public domain. # # This file is maintained in Automake, please report # bugs to or send patches to # . nl=' ' IFS=" "" $nl" errstatus=0 dirmode= usage="\ Usage: mkinstalldirs [-h] [--help] [--version] [-m MODE] DIR ... Create each directory DIR (with mode MODE, if specified), including all leading file name components. Report bugs to ." # process command line arguments while test $# -gt 0 ; do case $1 in -h | --help | --h*) # -h for help echo "$usage" exit $? ;; -m) # -m PERM arg shift test $# -eq 0 && { echo "$usage" 1>&2; exit 1; } dirmode=$1 shift ;; --version) echo "$0 $scriptversion" exit $? ;; --) # stop option processing shift break ;; -*) # unknown option echo "$usage" 1>&2 exit 1 ;; *) # first non-opt arg break ;; esac done for file do if test -d "$file"; then shift else break fi done case $# in 0) exit 0 ;; esac # Solaris 8's mkdir -p isn't thread-safe. If you mkdir -p a/b and # mkdir -p a/c at the same time, both will detect that a is missing, # one will create a, then the other will try to create a and die with # a "File exists" error. This is a problem when calling mkinstalldirs # from a parallel make. We use --version in the probe to restrict # ourselves to GNU mkdir, which is thread-safe. case $dirmode in '') if mkdir -p --version . >/dev/null 2>&1 && test ! -d ./--version; then echo "mkdir -p -- $*" exec mkdir -p -- "$@" else # On NextStep and OpenStep, the `mkdir' command does not # recognize any option. It will interpret all options as # directories to create, and then abort because `.' already # exists. test -d ./-p && rmdir ./-p test -d ./--version && rmdir ./--version fi ;; *) if mkdir -m "$dirmode" -p --version . >/dev/null 2>&1 && test ! -d ./--version; then echo "mkdir -m $dirmode -p -- $*" exec mkdir -m "$dirmode" -p -- "$@" else # Clean up after NextStep and OpenStep mkdir. for d in ./-m ./-p ./--version "./$dirmode"; do test -d $d && rmdir $d done fi ;; esac for file do case $file in /*) pathcomp=/ ;; *) pathcomp= ;; esac oIFS=$IFS IFS=/ set fnord $file shift IFS=$oIFS for d do test "x$d" = x && continue pathcomp=$pathcomp$d case $pathcomp in -*) pathcomp=./$pathcomp ;; esac if test ! -d "$pathcomp"; then echo "mkdir $pathcomp" mkdir "$pathcomp" || lasterr=$? if test ! -d "$pathcomp"; then errstatus=$lasterr else if test ! -z "$dirmode"; then echo "chmod $dirmode $pathcomp" lasterr= chmod "$dirmode" "$pathcomp" || lasterr=$? if test ! -z "$lasterr"; then errstatus=$lasterr fi fi fi fi pathcomp=$pathcomp/ done done exit $errstatus # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: nordugrid-arc-5.4.2/PaxHeaders.7502/nordugrid-arc.spec0000644000000000000000000000013213214316014020603 xustar000000000000000030 mtime=1513200652.956661343 30 atime=1513200658.418728146 30 ctime=1513200658.623730653 nordugrid-arc-5.4.2/nordugrid-arc.spec0000644000175000002070000016375513214316014020672 0ustar00mockbuildmock00000000000000%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(0)")} %{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}} %{!?_jnidir: %global _jnidir %{_libdir}/java} %if %{?rhel}%{!?rhel:0} == 5 %global __python26 %{_bindir}/python2.6 %{!?python26_sitearch: %global python26_sitearch %(%{__python26} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} %{!?python26_sitelib: %global python26_sitelib %(%{__python26} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(0)")} # Disable the default python byte compilation %global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g') %endif %if %{?filter_setup:1}%{!?filter_setup:0} %filter_provides_in %{python_sitearch}.*\.so$ %if %{?rhel}%{!?rhel:0} == 5 %filter_provides_in %{python26_sitearch}.*\.so$ %endif %if %{?fedora}%{!?fedora:0} >= 13 %filter_provides_in %{python3_sitearch}.*\.so$ %endif %filter_setup %endif # # Build dependency descrepancies across platforms # %if %{?suse_version:1}%{!?suse_version:0} %global glibmm2_devel glibmm2-devel %global openldap_devel openldap2-devel %else %global glibmm2_devel glibmm24-devel %global openldap_devel openldap-devel %endif %if %{?fedora}%{!?fedora:5} > 4 && %{?suse_version:0}%{!?suse_version:1} %global nss_devel nss-devel %else %global nss_devel mozilla-nss-devel %endif %if %{?fedora}%{!?fedora:0} >= 12 || %{?rhel}%{!?rhel:0} >= 6 || %{?suse_version:1}%{!?suse_version:0} %global libuuid_devel libuuid-devel %else %global libuuid_devel e2fsprogs-devel %endif # # xROOTd # %if %{?fedora}%{!?fedora:0} >= 12 || %{?rhel}%{!?rhel:0} %global with_xrootd %{!?_without_xrootd:1}%{?_without_xrootd:0} %else %global with_xrootd 0 %endif # # Java # %if %{?fedora}%{!?fedora:0} >= 7 || %{?rhel}%{!?rhel:0} >= 5 %if %{?rhel}%{!?rhel:0} == 5 %ifarch ppc # RHEL 5 ppc only has java 1.4 %global with_java 0 %global with_junit 0 %else %global with_java %{!?_without_java:1}%{?_without_java:0} %global with_junit %{!?_without_junit:1}%{?_without_junit:0} %endif %else %global with_java %{!?_without_java:1}%{?_without_java:0} %global with_junit %{!?_without_junit:1}%{?_without_junit:0} %endif %else %global with_java 0 %global with_junit 0 %endif %global with_gcj %{!?_with_gcj:0}%{?_with_gcj:1} # # Python # %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 || %{?suse_version:1}%{!?suse_version:0} %if %{?rhel}%{!?rhel:0} == 6 %ifarch ppc64 # The python module doesn't build on RHEL6 ppc64 (.toc1 section overflow) %global with_python 0 %else %global with_python 1 %endif %else %global with_python 1 %endif %else %global with_python 0 %endif %if %{with_python} %if %{?fedora}%{!?fedora:0} || %{?suse_version:1}%{!?suse_version:0} %global with_pylint %{!?_without_pylint:1}%{?_without_pylint:0} %else %global with_pylint 0 %endif %else %global with_pylint 0 %endif %if %{?fedora}%{!?fedora:0} >= 7 || %{?rhel}%{!?rhel:0} >= 5 || %{?suse_version}%{!?suse_version:0} >= 1110 %global with_acix 1 %else %global with_acix 0 %endif %if %{?fedora}%{!?fedora:0} >= 21 || %{?rhel}%{!?rhel:0} >= 5 %global with_s3 1 %else %global with_s3 0 %endif %if %{?fedora}%{!?fedora:0} >= 21 || %{?rhel}%{!?rhel:0} >= 5 %global with_gfal 1 %else %global with_gfal 0 %endif %global with_canl 0 %if %{?fedora}%{!?fedora:0} || %{?rhel}%{!?rhel:0} %global with_xmlsec1 %{!?_without_xmlsec1:1}%{?_without_xmlsec1:0} %else %global with_xmlsec1 0 %endif # # CA utils # %if %{?suse_version:1}%{!?suse_version:0} %global with_cautils 0 %else %global with_cautils 1 %endif # SQLite %if %{?fedora}%{!?fedora:0} >= 11 || %{?rhel}%{!?rhel:0} >= 6 %global with_sqlite 1 %else %global with_sqlite 0 %endif # LDNS %if %{?fedora}%{!?fedora:0} >= 13 || %{?rhel}%{!?rhel:0} >= 5 %global with_ldns 1 %else %global with_ldns 0 %endif %global pkgdir arc %if %{?fedora}%{!?fedora:0} >= 25 || %{?rhel}%{!?rhel:0} >= 8 %global use_systemd 1 %else %global use_systemd 0 %endif # # Macros for scripts # # Stop and disable service on package removal %if %{use_systemd} %define stop_on_removal() %{expand:%%systemd_preun %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?stop_on_removal:0}%{!?stop_on_removal:1} %global stop_on_removal() if [ $1 -eq 0 ]; then for s in %*; do service $s stop > /dev/null 2>&1 || : ; done; for s in %*; do /sbin/chkconfig --del $s; done; fi %endif %endif # Enable a service %if %{use_systemd} %define enable_service() %{expand:%%systemd_post %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?suse_version:1}%{!?suse_version:0} %define enable_service() %{expand:%%fillup_and_insserv -f %{?*}} %else %define enable_service() for s in %{?*}; do /sbin/chkconfig --add $s ; done %endif %endif # Conditionally restart service on package update %if %{use_systemd} %define condrestart_on_update() %{expand:%%systemd_postun_with_restart %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?suse_version:1}%{!?suse_version:0} %define condrestart_on_update() %{expand:%%restart_on_update %{?*}} %{expand:%%insserv_cleanup} %else %define condrestart_on_update() if [ $1 -ge 1 ]; then for s in %{?*}; do service $s condrestart > /dev/null 2>&1 || : ; done; fi %endif %endif # Standard service requirements %if %{use_systemd} %define service_post_requires systemd-units %define service_preun_requires systemd-units %define service_postun_requires systemd-units %else %if %{?suse_version:1}%{!?suse_version:0} %define service_post_requires %{insserv_prereq} %define service_preun_requires %{insserv_prereq} %define service_postun_requires %{insserv_prereq} %else %define service_post_requires chkconfig %define service_preun_requires chkconfig, initscripts %define service_postun_requires initscripts %endif %endif Name: nordugrid-arc Version: 5.4.2 Release: 1%{?dist} Summary: Advanced Resource Connector Grid Middleware Group: System Environment/Daemons License: ASL 2.0 URL: http://www.nordugrid.org/ Source: http://download.nordugrid.org/packages/%{name}/releases/%{version}/src/%{name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) # Packages dropped without replacements Obsoletes: nordugrid-arc-chelonia < 2.0.0 Obsoletes: nordugrid-arc-hopi < 2.0.0 Obsoletes: nordugrid-arc-isis < 2.0.0 Obsoletes: nordugrid-arc-janitor < 2.0.0 Obsoletes: nordugrid-arc-doxygen < 4.0.0 BuildRequires: cppunit-devel BuildRequires: pkgconfig %if %{?fedora}%{!?fedora:0} >= 12 || %{?rhel}%{!?rhel:0} >= 6 || %{?suse_version:1}%{!?suse_version:0} BuildRequires: libuuid-devel %else BuildRequires: e2fsprogs-devel %endif BuildRequires: gettext BuildRequires: python-devel %if %{?rhel}%{!?rhel:0} == 5 BuildRequires: python26-devel %endif %if %{?fedora}%{!?fedora:0} >= 13 BuildRequires: python3-devel %endif %if %{with_pylint} BuildRequires: pylint %endif BuildRequires: %{glibmm2_devel} BuildRequires: glib2-devel BuildRequires: libxml2-devel BuildRequires: openssl BuildRequires: openssl-devel %if %{with_xmlsec1} BuildRequires: xmlsec1-devel >= 1.2.4 BuildRequires: xmlsec1-openssl-devel >= 1.2.4 %endif BuildRequires: %{nss_devel} BuildRequires: %{openldap_devel} BuildRequires: globus-common-devel BuildRequires: globus-ftp-client-devel BuildRequires: globus-ftp-control-devel %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: globus-gssapi-gsi-devel >= 12.2 %else BuildRequires: globus-gssapi-gsi-devel < 12.2 %endif %if %{with_canl} BuildRequires: canl-c++-devel %endif %if %{with_xrootd} %if %{?fedora}%{!?fedora:0} >= 17 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: xrootd-client-devel %else BuildRequires: xrootd-devel %endif %endif %if %{with_gfal} BuildRequires: gfal2-devel %endif %if %{with_s3} BuildRequires: libs3-devel %endif %if %{?suse_version}%{!?suse_version:0} == 1110 BuildRequires: db43-devel %else %if %{?fedora}%{!?fedora:0} >= 15 || %{?rhel}%{!?rhel:0} >= 7 BuildRequires: libdb-cxx-devel %else %if %{?fedora}%{!?fedora:0} == 14 BuildRequires: libdb-devel %else BuildRequires: db4-devel %endif %endif %endif %if %{with_java} BuildRequires: java-devel >= 1.5.0 BuildRequires: jpackage-utils %if %{with_gcj} BuildRequires: java-gcj-compat-devel %endif %if %{with_junit} BuildRequires: junit %endif %endif # Needed for Boinc backend testing during make check BuildRequires: perl(DBI) # Needed for infoprovider testing during make check BuildRequires: perl(XML::Simple) # Needed for LRMS testing during make check BuildRequires: perl(Test::Harness) BuildRequires: perl(Test::Simple) # Needed to run ACIX unit tests %if %{with_acix} %if %{?suse_version:1}%{!?suse_version:0} BuildRequires: python-twisted %else BuildRequires: python-twisted-core %endif BuildRequires: python-twisted-web %if %{?suse_version:1}%{!?suse_version:0} BuildRequires: python-openssl %else BuildRequires: pyOpenSSL %endif %if %{?rhel}%{!?rhel:0} == 5 BuildRequires: python-hashlib %endif %endif %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 # The compilation of the selinux module fails on Fedora 5 BuildRequires: selinux-policy-devel BuildRequires: checkpolicy %endif BuildRequires: swig %if %{?suse_version:1}%{!?suse_version:0} # Needed for python/doxy2swig.py BuildRequires: python-xml %endif %if %{?fedora}%{!?fedora:0} >= 4 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: libtool-ltdl-devel %else BuildRequires: libtool %endif %if %{with_sqlite} BuildRequires: sqlite-devel >= 3.6 %endif %if %{with_ldns} BuildRequires: ldns-devel >= 1.6.8 %endif %description NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The Advanced Resource Connector (ARC) brings computing resources together across institutional boundaries. This concept is commonly referred to as a "computational grid". Historically, grids address the organization of distributed storage of data and parallel computation, but could be expanded to arbitrary services. Just like the web, ARC has its roots in the IT infrastructure that was erected to analyze the experiments for high energy physics at CERN. The first release, ARC-0.x, was dependent on Globus, the current release keeps that compatibility but can also be used independently. %package client Summary: ARC command line interface Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} %description client NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). For the regular user of any ARC-based computational grid, this client package contains (or depends on) all packages that are needed to submit jobs, query their status and retrieve results. %package hed Summary: ARC Hosting Environment Daemon Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description hed NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The ARC Hosting Environment Daemon (HED). This daemon is a container for ARC services. %package gridftpd Summary: ARC gridftp server Group: System Environment/Daemons Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-globus Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description gridftpd NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC gridftp server which has a plugin framework. Current plugins include: fileplugin : Standard gridftp server based on Globus globus-ftp-control jobplugin : Classical ARC job submission interface %package cache-service Summary: ARC cache service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description cache-service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC cache service. It provides a way to perform some operations on the A-REX cache remotely. It can be especially helpful for data management within a pilot job framework. %package datadelivery-service Summary: ARC data delivery service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description datadelivery-service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC data delivery service. %package ldap-infosys Summary: ARC LDAP information service Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: openldap-servers Requires: glue-schema >= 2.0.10 Requires: bdii Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 8 Requires(post): policycoreutils-python-utils Requires(postun): policycoreutils-python-utils %else %if %{?fedora}%{!?fedora:0} >= 11 || %{?rhel}%{!?rhel:0} >= 6 Requires(post): policycoreutils-python Requires(postun): policycoreutils-python %else %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 Requires(post): policycoreutils Requires(postun): policycoreutils %endif %endif %endif %description ldap-infosys NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the LDAP based information system for ARC. This package is not self-contained. it should be pulled in by either nordugrid-arc-arex to be a part of a local information system or by nordugrid-arc-egiis to be a part of a EGIIS service. %package aris Summary: ARC local information system Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: %{name}-ldap-infosys = %{version}-%{release} Requires: openldap-servers Requires: glue-schema >= 2.0.10 Requires: bdii Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 8 Requires(post): policycoreutils-python-utils Requires(postun): policycoreutils-python-utils %else %if %{?fedora}%{!?fedora:0} >= 11 || %{?rhel}%{!?rhel:0} >= 6 Requires(post): policycoreutils-python Requires(postun): policycoreutils-python %else %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 Requires(post): policycoreutils Requires(postun): policycoreutils %endif %endif %endif %description aris NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the LDAP based information system for ARC. This package is not self-contained but is closely connected to nordugrid-arc-arex. %package egiis Summary: ARC EGIIS service Group: System Environment/Libraries Requires: %{name}-ldap-infosys = %{version}-%{release} Requires: openldap-servers Requires: glue-schema >= 2.0.10 Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 Requires(post): policycoreutils Requires(postun): policycoreutils %endif %description egiis NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The EGIIS is the Information Index Service used by ARC, This service is used to set up a ldap-based server that local information systems register to. %package ldap-monitor Summary: ARC LDAP monitor service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: php Requires: php-gd Requires: php-ldap %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description ldap-monitor NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the LDAP monitor system for ARC. This package is self-contained. It is used to set up a web-based monitor which pulls information from a EGIIS and shows it graphically. %package ws-monitor Summary: ARC WS monitor service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description ws-monitor NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the WS monitor system for ARC. This package is self-contained. It is used to set up a web-based monitor which pulls information from ISIS and shows it graphically. %package arex Summary: ARC Remote EXecution service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-ldap-infosys = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} %if %{with_python} Requires: python2-%{name} = %{version}-%{release} %endif Requires: perl(XML::Simple) %if %{?fedora}%{!?fedora:0} >= 26 || %{?rhel}%{!?rhel:0} >= 8 Requires: python2-stomppy %else Requires: stomppy %endif Requires: python-ldap Requires: python-dirq Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description arex NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The ARC Remote EXecution service (AREX) provides a service for the execution of compute jobs and the transfer of input or output data. %package plugins-needed Summary: ARC base plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-needed NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). %package plugins-globus Summary: ARC Globus plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 5 Requires: globus-gssapi-gsi >= 12.2 %else Requires: globus-gssapi-gsi < 12.2 %endif %description plugins-globus NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC Globus plugins. This includes the Globus dependent Data Manager Components (DMCs). %if %{with_canl} %package arcproxyalt Summary: ARC proxy client based on canl Group: Applications/Internet Requires: %{name} = %{version}-%{release} %description arcproxyalt NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package includes a preview of the arcproxy command based on canl. %endif %if %{with_xrootd} %package plugins-xrootd Summary: ARC xrootd plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-xrootd NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC xrootd plugins. These allow access to data through the xrootd protocol. %endif %if %{with_gfal} %package plugins-gfal Summary: ARC GFAL plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-gfal NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC plugins for GFAL2. This allows third-party transfer and adds support for several extra transfer protocols (rfio, dcap, gsidcap). Specific protocol support is provided by separate gfal2-plugin packages. %endif %if %{with_s3} %package plugins-s3 Summary: ARC S3 plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-s3 NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). ARC plugins for S3. These allow access to data through the S3 protocol. %endif %if %{with_acix} %package acix-core Summary: ARC cache index - core Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python-twisted-core Requires: python-twisted-web Requires: pyOpenSSL %if %{?rhel}%{!?rhel:0} == 5 Requires: python-hashlib %endif %description acix-core NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Core components of the ARC Cache Index (ACIX). %package acix-cache Summary: ARC cache index - cache server Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python-twisted-core Requires: python-twisted-web Requires: pyOpenSSL %if %{?rhel}%{!?rhel:0} == 5 Requires: python-hashlib %endif Requires: %{name}-acix-core = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description acix-cache NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Cache server component of the ARC Cache Index (ACIX), usually installed alongside A-REX. This component collects information on the content of an A-REX cache. %package acix-index Summary: ARC cache index - index server Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python-twisted-core Requires: python-twisted-web Requires: pyOpenSSL %if %{?rhel}%{!?rhel:0} == 5 Requires: python-hashlib %endif Requires: %{name}-acix-core = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description acix-index NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Index server component of the ARC Cache Index (ACIX), usually installed independently of any A-REX installation. This component pulls cache content from cache servers and can be queried by clients for the location of cached files. %endif %package devel Summary: ARC development files Group: Development/Libraries Requires: %{name} = %{version}-%{release} Requires: %{glibmm2_devel} Requires: glib2-devel Requires: libxml2-devel Requires: openssl-devel %description devel NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Header files and libraries needed to develop applications using ARC. %if %{with_python} %package -n python2-%{name} Summary: ARC Python wrapper Group: Development/Libraries %{?python_provide:%python_provide python2-%{name}} Provides: %{name}-python = %{version}-%{release} Obsoletes: %{name}-python < 5.3.3 Requires: %{name} = %{version}-%{release} %description -n python2-%{name} NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Python bindings for ARC. %endif %if %{?rhel}%{!?rhel:0} == 5 %package -n python26-%{name} Summary: ARC Python wrapper Group: Development/Libraries %{?python_provide:%python_provide python26-%{name}} Provides: %{name}-python26 = %{version}-%{release} Obsoletes: %{name}-python26 < 5.3.3 Requires: %{name} = %{version}-%{release} Requires: python(abi) = 2.6 %description -n python26-%{name} NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Python bindings for ARC. %endif %if %{?fedora}%{!?fedora:0} >= 13 %package -n python3-%{name} Summary: ARC Python wrapper Group: Development/Libraries %{?python_provide:%python_provide python3-%{name}} Provides: %{name}-python3 = %{version}-%{release} Obsoletes: %{name}-python3 < 5.3.3 Requires: %{name} = %{version}-%{release} %description -n python3-%{name} NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Python bindings for ARC. %endif %if %{with_java} %package java Summary: ARC Java wrapper Group: Development/Libraries Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 20 || %{?rhel}%{!?rhel:0} >= 7 Requires: java-headless %else Requires: java %endif Requires: jpackage-utils %if %{with_gcj} Requires(post): java-gcj-compat Requires(postun): java-gcj-compat %endif %description java NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). Java bindings for ARC. %endif %package gridmap-utils Summary: NorduGrid authorization tools Group: Applications/Internet Requires: perl(Crypt::SSLeay) Requires: perl(SOAP::Lite) Requires: perl(Crypt::OpenSSL::X509) Requires: crontabs %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description gridmap-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC authorization machinery. A prominent tool distributed in this package is the nordugridmap script to collect user information from the virtual organizations. %if %{with_cautils} %package ca-utils Summary: NorduGrid authentication tools Group: Applications/Internet Requires: fetch-crl Requires(post): fetch-crl Requires(post): chkconfig %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description ca-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). The nordugrid-arc-ca-utils packages has been obsoleted. The functionality of the grid-update-crls tool is provided by the fetch-crl tool in the fetch-crl package. %endif %package misc-utils Summary: NorduGrid misc tools Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} %description misc-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains utilities for various tasks including testing. The package is usually not required by users or sysadmins but is mainly for developers. %prep %setup -q %if %{?fedora}%{!?fedora:0} <= 9 && %{?rhel}%{!?rhel:0} <= 5 # Older versions of SELinux does not have policy for open cd selinux sed -e 's/ open / /' -e /open/d -i %{name}-egiis.te cd - %endif %build if pkg-config --atleast-version 2.6 sigc++-2.0 ; then if [ `echo __GNUC__ | gcc -E - | tail -1` -lt 6 ] ; then # Workaround for too new libsigc++/glibmm, too old gcc combination export CXXFLAGS="%{optflags} -std=c++11" fi fi %configure --disable-static \ %if ! %{with_java} --disable-java \ %endif %if ! %{with_python} --disable-swig-python \ %endif %if ! %{with_acix} --disable-acix \ %endif %if %{with_gfal} --enable-gfal \ %endif %if %{with_s3} --enable-s3 \ %endif %if %{?rhel}%{!?rhel:0} == 5 --with-altpython=python26 \ %endif %if %{?fedora}%{!?fedora:0} >= 13 --with-altpython=python3 \ %endif %if %{with_canl} --enable-canlxx \ %endif %if ! %{with_xrootd} --disable-xrootd \ %endif %if %{with_sqlite} --enable-sqlite \ %endif %if ! %{with_ldns} --disable-ldns \ %endif %if %{use_systemd} --with-systemd-units-location=%{_unitdir} \ %endif --disable-doc \ --with-docdir=%{_pkgdocdir} \ --with-jnidir=%{_jnidir} make %{?_smp_mflags} %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 cd selinux make -f /usr/share/selinux/devel/Makefile cd - %endif %check make check %install rm -rf $RPM_BUILD_ROOT make install DESTDIR=$RPM_BUILD_ROOT # Install Logrotate. mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d install -p -m 644 debian/%{name}-arex.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-arex install -p -m 644 debian/%{name}-gridftpd.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-gridftpd install -p -m 644 debian/%{name}-aris.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-aris install -p -m 644 debian/%{name}-datadelivery-service.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-datadelivery-service find $RPM_BUILD_ROOT -type f -name \*.la -exec rm -fv '{}' ';' # The py-compile script in the source tarball is old (RHEL 6) # It does the wrong thing for python 3 - remove and let rpmbuild do it right find $RPM_BUILD_ROOT -type f -name \*.pyc -exec rm -fv '{}' ';' find $RPM_BUILD_ROOT -type f -name \*.pyo -exec rm -fv '{}' ';' # libarcglobusutils is not part of the ARC api. find $RPM_BUILD_ROOT -name libarcglobusutils.so -exec rm -fv '{}' ';' # Create log directory mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/log/arc # Create spool directories for Jura mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc/ssm mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc/urs %find_lang %{name} %if %{with_java} && %{with_gcj} %{_bindir}/aot-compile-rpm %endif %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 cd selinux mkdir -p $RPM_BUILD_ROOT%{_datadir}/selinux/packages/%{name} install -p -m 644 %{name}-egiis.pp \ $RPM_BUILD_ROOT%{_datadir}/selinux/packages/%{name} cd - %endif %if %{?rhel}%{!?rhel:0} == 5 %{__python} -c 'import compileall; compileall.compile_dir("'"$RPM_BUILD_ROOT"'", 10, "%{python_sitearch}", 1)' > /dev/null %{__python} -O -c 'import compileall; compileall.compile_dir("'"$RPM_BUILD_ROOT"'", 10, "%{python_sitearch}", 1)' > /dev/null %{__python26} -c 'import compileall; compileall.compile_dir("'"$RPM_BUILD_ROOT%{python26_sitearch}"'", 10, "%{python26_sitearch}", 1)' > /dev/null %{__python26} -O -c 'import compileall; compileall.compile_dir("'"$RPM_BUILD_ROOT%{python26_sitearch}"'", 10, "%{python26_sitearch}", 1)' > /dev/null %endif # Remove examples and let RPM package them under /usr/share/doc using the doc macro rm -rf $RPM_BUILD_ROOT%{_datadir}/%{pkgdir}/examples make -C src/libs/data-staging/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/libs/compute/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/libs/data/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/acc/PythonBroker DESTDIR=$PWD/docdir/python pkgdatadir= install-exampleDATA make -C python/examples DESTDIR=$PWD/docdir/python pkgdatadir= install-exampleDATA make -C java/examples DESTDIR=$PWD/docdir/java pkgdatadir= install-exampleDATA make -C src/clients DESTDIR=$PWD/docdir/client pkgdatadir= install-exampleDATA make -C src/tests/echo DESTDIR=$PWD/docdir/hed pkgdatadir= install-exampleDATA make -C src/hed DESTDIR=$PWD/docdir/hed pkgdatadir= install-profileDATA # arc.conf.reference needs special handling make -C src/doc DESTDIR=$RPM_BUILD_ROOT install-exampleDATA # Link to arc.conf.reference from doc rm -f $PWD/docdir/arc.conf.reference ln -s %{_datadir}/%{pkgdir}/examples/arc.conf.reference $PWD/docdir/arc.conf.reference %clean rm -rf $RPM_BUILD_ROOT %post -p /sbin/ldconfig %postun -p /sbin/ldconfig %post plugins-globus -p /sbin/ldconfig %postun plugins-globus -p /sbin/ldconfig %post hed %enable_service arched %preun hed %stop_on_removal arched %postun hed %condrestart_on_update arched %post arex %enable_service a-rex %preun arex %stop_on_removal a-rex %postun arex %condrestart_on_update a-rex %post gridftpd %enable_service gridftpd %preun gridftpd %stop_on_removal gridftpd %postun gridftpd %condrestart_on_update gridftpd %post cache-service %enable_service arc-cache-service %preun cache-service %stop_on_removal arc-cache-service %postun cache-service %condrestart_on_update arc-cache-service %post datadelivery-service %enable_service arc-datadelivery-service %preun datadelivery-service %stop_on_removal arc-datadelivery-service %postun datadelivery-service %condrestart_on_update arc-datadelivery-service %post ldap-infosys %enable_service nordugrid-arc-slapd nordugrid-arc-inforeg %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 semanage port -a -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -a -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : %endif %preun ldap-infosys %stop_on_removal nordugrid-arc-slapd nordugrid-arc-inforeg %postun ldap-infosys %condrestart_on_update nordugrid-arc-slapd nordugrid-arc-inforeg %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 if [ $1 -eq 0 ]; then semanage port -d -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -d -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : fi %endif %post aris %enable_service nordugrid-arc-bdii %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 semanage fcontext -a -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -a -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : # Remove selinux labels for old arc bdii var dir semanage fcontext -d -t slapd_db_t "/var/run/arc/bdii(/.*)?" 2>/dev/null || : %endif %preun aris %stop_on_removal nordugrid-arc-bdii %postun aris %condrestart_on_update nordugrid-arc-bdii %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 if [ $1 -eq 0 ]; then semanage fcontext -d -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -d -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : fi %endif %triggerun aris -- bdii %if %{?suse_version:1}%{!?suse_version:0} FIRST_ARG=1 %restart_on_update nordugrid-arc-bdii %else service nordugrid-arc-bdii condrestart > /dev/null 2>&1 || : %endif %post egiis %enable_service nordugrid-arc-egiis %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 /usr/sbin/semodule -i %{_datadir}/selinux/packages/%{name}/%{name}-egiis.pp > /dev/null 2>&1 || : /sbin/restorecon %{_sbindir}/arc-infoindex-relay %endif %preun egiis %stop_on_removal nordugrid-arc-egiis %postun egiis %condrestart_on_update nordugrid-arc-egiis %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 if [ $1 -eq 0 ] ; then /usr/sbin/semodule -r %{name}-egiis > /dev/null 2>&1 || : fi %endif %if %{with_acix} %post acix-cache %enable_service acix-cache %preun acix-cache %stop_on_removal acix-cache %postun acix-cache %condrestart_on_update acix-cache %post acix-index %enable_service acix-index %preun acix-index %stop_on_removal acix-index %postun acix-index %condrestart_on_update acix-index %endif %if %{with_java} && %{with_gcj} %post java [ -x %{_bindir}/rebuild-gcj-db ] && %{_bindir}/rebuild-gcj-db %endif %if %{with_java} && %{with_gcj} %postun java [ -x %{_bindir}/rebuild-gcj-db ] && %{_bindir}/rebuild-gcj-db %endif %if %{with_cautils} %post ca-utils # Enable and start CRL updates via cron for backward compatibility /sbin/chkconfig fetch-crl-cron on service fetch-crl-cron start > /dev/null 2>&1 %endif %files -f %{name}.lang %defattr(-,root,root,-) %doc docdir/arc.conf.reference README AUTHORS LICENSE NOTICE ChangeLog %{_libdir}/libarccompute.so.* %{_libdir}/libarccommunication.so.* %{_libdir}/libarccommon.so.* %{_libdir}/libarccredential.so.* %{_libdir}/libarccredentialstore.so.* %{_libdir}/libarccrypto.so.* %{_libdir}/libarcdata.so.* %{_libdir}/libarcdatastaging.so.* %{_libdir}/libarcloader.so.* %{_libdir}/libarcmessage.so.* %{_libdir}/libarcsecurity.so.* %{_libdir}/libarcinfosys.so.* %{_libdir}/libarcws.so.* %{_libdir}/libarcwssecurity.so.* %if %{with_xmlsec1} %{_libdir}/libarcxmlsec.so.* %endif %dir %{_libdir}/%{pkgdir} # We need to have libmodcrypto.so close to libarccrypto %{_libdir}/%{pkgdir}/libmodcrypto.so %{_libdir}/%{pkgdir}/libmodcrypto.apd # We need to have libmodcredential.so close to libarccredential %{_libdir}/%{pkgdir}/libmodcredential.so %{_libdir}/%{pkgdir}/libmodcredential.apd %{_libdir}/%{pkgdir}/arc-file-access %{_libdir}/%{pkgdir}/arc-hostname-resolver %{_libdir}/%{pkgdir}/DataStagingDelivery %dir %{_libexecdir}/%{pkgdir} %dir %{_datadir}/%{pkgdir} %dir %{_datadir}/%{pkgdir}/examples %{_datadir}/%{pkgdir}/examples/arc.conf.reference %dir %{_datadir}/%{pkgdir}/test-jobs %{_datadir}/%{pkgdir}/test-jobs/test-job-* %{_datadir}/%{pkgdir}/schema %files client %defattr(-,root,root,-) %doc docdir/client/* %{_bindir}/arccat %{_bindir}/arcclean %{_bindir}/arccp %{_bindir}/arcecho %{_bindir}/arcget %{_bindir}/arcinfo %{_bindir}/arckill %{_bindir}/arcls %{_bindir}/arcmkdir %{_bindir}/arcrename %{_bindir}/arcproxy %{_bindir}/arcrenew %{_bindir}/arcresub %{_bindir}/arcresume %{_bindir}/arcrm %{_bindir}/arcstat %{_bindir}/arcsub %{_bindir}/arcsync %{_bindir}/arctest %dir %{_sysconfdir}/%{pkgdir} %config(noreplace) %{_sysconfdir}/%{pkgdir}/client.conf %doc %{_mandir}/man1/arccat.1* %doc %{_mandir}/man1/arcclean.1* %doc %{_mandir}/man1/arccp.1* %doc %{_mandir}/man1/arcecho.1* %doc %{_mandir}/man1/arcget.1* %doc %{_mandir}/man1/arcinfo.1* %doc %{_mandir}/man1/arckill.1* %doc %{_mandir}/man1/arcls.1* %doc %{_mandir}/man1/arcmkdir.1* %doc %{_mandir}/man1/arcrename.1* %doc %{_mandir}/man1/arcproxy.1* %doc %{_mandir}/man1/arcrenew.1* %doc %{_mandir}/man1/arcresub.1* %doc %{_mandir}/man1/arcresume.1* %doc %{_mandir}/man1/arcrm.1* %doc %{_mandir}/man1/arcstat.1* %doc %{_mandir}/man1/arcsub.1* %doc %{_mandir}/man1/arcsync.1* %doc %{_mandir}/man1/arctest.1* %files hed %defattr(-,root,root,-) %doc docdir/hed/* %if %{use_systemd} %{_unitdir}/arched.service %{_datadir}/%{pkgdir}/arched-start %else %{_initrddir}/arched %endif %{_sbindir}/arched %{_libdir}/%{pkgdir}/libecho.so %{_libdir}/%{pkgdir}/libecho.apd %{_datadir}/%{pkgdir}/profiles %doc %{_mandir}/man8/arched.8* %doc %{_mandir}/man5/arc.conf.5* %files gridftpd %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/gridftpd.service %{_datadir}/%{pkgdir}/gridftpd-start %else %{_initrddir}/gridftpd %endif %{_sbindir}/gridftpd %{_libdir}/%{pkgdir}/jobplugin.* %{_libdir}/%{pkgdir}/fileplugin.* %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-gridftpd %doc %{_mandir}/man8/gridftpd.8* %files ldap-infosys %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/nordugrid-arc-slapd.service %{_unitdir}/nordugrid-arc-inforeg.service %else %{_initrddir}/nordugrid-arc-ldap-infosys %{_initrddir}/nordugrid-arc-slapd %{_initrddir}/nordugrid-arc-inforeg %endif %{_datadir}/%{pkgdir}/create-slapd-config %{_datadir}/%{pkgdir}/create-inforeg-config %{_datadir}/%{pkgdir}/config_parser_compat.sh %{_datadir}/%{pkgdir}/grid-info-soft-register %{_datadir}/%{pkgdir}/ldap-schema %files aris %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/nordugrid-arc-bdii.service %else %{_initrddir}/nordugrid-arc-bdii %endif %{_datadir}/%{pkgdir}/create-bdii-config %{_datadir}/%{pkgdir}/glue-generator.pl %{_datadir}/%{pkgdir}/glite-info-provider-ldap %{_datadir}/%{pkgdir}/PerfData.pl %{_datadir}/%{pkgdir}/ConfigParser.pm %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-aris %files egiis %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/nordugrid-arc-egiis.service %else %{_initrddir}/nordugrid-arc-egiis %endif %{_sbindir}/arc-infoindex-relay %{_sbindir}/arc-infoindex-server %{_mandir}/man8/arc-infoindex-relay.8* %{_mandir}/man8/arc-infoindex-server.8* %{_libdir}/%{pkgdir}/arc-infoindex-slapd-wrapper.so %{_libdir}/%{pkgdir}/arc-infoindex-slapd-wrapper.apd %if %{?fedora}%{!?fedora:0} >= 6 || %{?rhel}%{!?rhel:0} >= 5 %{_datadir}/selinux/packages/%{name} %endif %files ldap-monitor %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/ldap-monitor %doc %{_mandir}/man7/ldap-monitor.7* %files ws-monitor %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/ws-monitor %doc %{_mandir}/man7/ws-monitor.7* %files cache-service %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-cache-service.service %{_datadir}/%{pkgdir}/arc-cache-service-start %else %{_initrddir}/arc-cache-service %endif %{_libdir}/%{pkgdir}/libcacheservice.so %{_libdir}/%{pkgdir}/libcacheservice.apd %files datadelivery-service %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-datadelivery-service.service %{_datadir}/%{pkgdir}/arc-datadelivery-service-start %else %{_initrddir}/arc-datadelivery-service %endif %{_libdir}/%{pkgdir}/libdatadeliveryservice.so %{_libdir}/%{pkgdir}/libdatadeliveryservice.apd %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-datadelivery-service %files arex %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/a-rex.service %{_datadir}/%{pkgdir}/a-rex-start %else %{_initrddir}/a-rex %endif %{_libexecdir}/%{pkgdir}/cache-clean %{_libexecdir}/%{pkgdir}/cache-list %{_libexecdir}/%{pkgdir}/jura %{_libexecdir}/%{pkgdir}/downloader %if %{with_sqlite} %{_libexecdir}/%{pkgdir}/gm-delegations-converter %doc %{_mandir}/man8/gm-delegations-converter.8* %endif %{_libexecdir}/%{pkgdir}/gm-jobs %{_libexecdir}/%{pkgdir}/gm-kick %{_libexecdir}/%{pkgdir}/smtp-send %{_libexecdir}/%{pkgdir}/smtp-send.sh %{_libexecdir}/%{pkgdir}/uploader %{_libexecdir}/%{pkgdir}/inputcheck %{_libexecdir}/%{pkgdir}/arc-vomsac-check %{_libexecdir}/%{pkgdir}/arc-config-check %{_libexecdir}/%{pkgdir}/arc-blahp-logger %{_datadir}/%{pkgdir}/cancel-*-job %{_datadir}/%{pkgdir}/scan-*-job %{_datadir}/%{pkgdir}/submit-*-job %{_datadir}/%{pkgdir}/DGAuthplug.py* %{_datadir}/%{pkgdir}/DGBridgeDataPlugin.py* %{_datadir}/%{pkgdir}/DGLog2XML.py* %{_libdir}/%{pkgdir}/libarex.so %{_libdir}/%{pkgdir}/libarex.apd %{_datadir}/%{pkgdir}/CEinfo.pl %{_datadir}/%{pkgdir}/ARC0mod.pm %{_datadir}/%{pkgdir}/FORKmod.pm %{_datadir}/%{pkgdir}/Fork.pm %{_datadir}/%{pkgdir}/SGEmod.pm %{_datadir}/%{pkgdir}/SGE.pm %{_datadir}/%{pkgdir}/LL.pm %{_datadir}/%{pkgdir}/LSF.pm %{_datadir}/%{pkgdir}/PBS.pm %{_datadir}/%{pkgdir}/Condor.pm %{_datadir}/%{pkgdir}/SLURMmod.pm %{_datadir}/%{pkgdir}/SLURM.pm %{_datadir}/%{pkgdir}/DGBridge.pm %{_datadir}/%{pkgdir}/Boinc.pm %{_datadir}/%{pkgdir}/XmlPrinter.pm %{_datadir}/%{pkgdir}/InfosysHelper.pm %{_datadir}/%{pkgdir}/LdifPrinter.pm %{_datadir}/%{pkgdir}/GLUE2xmlPrinter.pm %{_datadir}/%{pkgdir}/GLUE2ldifPrinter.pm %{_datadir}/%{pkgdir}/NGldifPrinter.pm %{_datadir}/%{pkgdir}/ARC0ClusterInfo.pm %{_datadir}/%{pkgdir}/ARC1ClusterInfo.pm %{_datadir}/%{pkgdir}/ConfigCentral.pm %{_datadir}/%{pkgdir}/GMJobsInfo.pm %{_datadir}/%{pkgdir}/HostInfo.pm %{_datadir}/%{pkgdir}/RTEInfo.pm %{_datadir}/%{pkgdir}/InfoChecker.pm %{_datadir}/%{pkgdir}/IniParser.pm %{_datadir}/%{pkgdir}/LRMSInfo.pm %{_datadir}/%{pkgdir}/Sysinfo.pm %{_datadir}/%{pkgdir}/LogUtils.pm %{_datadir}/%{pkgdir}/condor_env.pm %{_datadir}/%{pkgdir}/cancel_common.sh %{_datadir}/%{pkgdir}/config_parser.sh %{_datadir}/%{pkgdir}/configure-*-env.sh %{_datadir}/%{pkgdir}/submit_common.sh %{_datadir}/%{pkgdir}/scan_common.sh %{_datadir}/%{pkgdir}/perferator %doc %{_mandir}/man1/cache-clean.1* %doc %{_mandir}/man1/cache-list.1* %doc %{_mandir}/man1/jura.1* %doc %{_mandir}/man8/gm-jobs.8* %doc %{_mandir}/man1/arc-config-check.1* %doc %{_mandir}/man8/arc-vomsac-check.8* %doc %{_mandir}/man8/arc-blahp-logger.8* %doc %{_mandir}/man8/a-rex-backtrace-collect.8* %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-arex %dir %{_localstatedir}/log/arc %dir %{_localstatedir}/spool/arc %dir %{_localstatedir}/spool/arc/ssm %dir %{_localstatedir}/spool/arc/urs %{_libexecdir}/%{pkgdir}/ssmsend %dir %{_datadir}/%{pkgdir}/ssm %{_datadir}/%{pkgdir}/ssm/__init__.py* %{_datadir}/%{pkgdir}/ssm/crypto.py* %{_datadir}/%{pkgdir}/ssm/ssm2.py* %{_datadir}/%{pkgdir}/ssm/brokers.py* %{_datadir}/%{pkgdir}/ssm/sender.cfg %{_sbindir}/a-rex-backtrace-collect %if %{with_acix} %files acix-core %defattr(-,root,root,-) %dir %{python_sitelib}/acix %{python_sitelib}/acix/__init__.py* %{python_sitelib}/acix/core %files acix-cache %defattr(-,root,root,-) %{python_sitelib}/acix/cacheserver %if %{use_systemd} %{_unitdir}/acix-cache.service %{_datadir}/%{pkgdir}/acix-cache-start %else %{_initrddir}/acix-cache %endif %files acix-index %defattr(-,root,root,-) %{python_sitelib}/acix/indexserver %if %{use_systemd} %{_unitdir}/acix-index.service %{_datadir}/%{pkgdir}/acix-index-start %else %{_initrddir}/acix-index %endif %endif %files devel %defattr(-,root,root,-) %doc docdir/devel/* src/hed/shc/arcpdp/*.xsd %{_includedir}/%{pkgdir} %{_libdir}/lib*.so %{_bindir}/wsdl2hed %doc %{_mandir}/man1/wsdl2hed.1* %{_bindir}/arcplugin %doc %{_mandir}/man1/arcplugin.1* %{_libdir}/pkgconfig/arcbase.pc %files plugins-needed %defattr(-,root,root,-) %dir %{_libdir}/%{pkgdir}/test %{_libdir}/%{pkgdir}/test/libaccTEST.so %{_libdir}/%{pkgdir}/libaccARC1.so %{_libdir}/%{pkgdir}/libaccBroker.so %{_libdir}/%{pkgdir}/libaccCREAM.so %{_libdir}/%{pkgdir}/libaccEMIES.so %{_libdir}/%{pkgdir}/libaccJobDescriptionParser.so %{_libdir}/%{pkgdir}/libaccSER.so %{_libdir}/%{pkgdir}/libaccldap.so %{_libdir}/%{pkgdir}/libarcshc.so %{_libdir}/%{pkgdir}/libarcshclegacy.so %{_libdir}/%{pkgdir}/libdmcfile.so %{_libdir}/%{pkgdir}/libdmchttp.so %{_libdir}/%{pkgdir}/libdmcldap.so %{_libdir}/%{pkgdir}/libdmcsrm.so %{_libdir}/%{pkgdir}/libdmcrucio.so %{_libdir}/%{pkgdir}/libdmcacix.so %{_libdir}/%{pkgdir}/libidentitymap.so %{_libdir}/%{pkgdir}/libarguspdpclient.so %{_libdir}/%{pkgdir}/libmcchttp.so %{_libdir}/%{pkgdir}/libmccmsgvalidator.so %{_libdir}/%{pkgdir}/libmccsoap.so %{_libdir}/%{pkgdir}/libmcctcp.so %{_libdir}/%{pkgdir}/libmcctls.so %{_libdir}/%{pkgdir}/libaccARC1.apd %{_libdir}/%{pkgdir}/libaccBroker.apd %{_libdir}/%{pkgdir}/libaccCREAM.apd %{_libdir}/%{pkgdir}/libaccEMIES.apd %{_libdir}/%{pkgdir}/libaccJobDescriptionParser.apd %{_libdir}/%{pkgdir}/libaccSER.apd %{_libdir}/%{pkgdir}/libaccldap.apd %{_libdir}/%{pkgdir}/test/libaccTEST.apd %{_libdir}/%{pkgdir}/libarcshc.apd %{_libdir}/%{pkgdir}/libarcshclegacy.apd %{_libdir}/%{pkgdir}/libdmcfile.apd %{_libdir}/%{pkgdir}/libdmchttp.apd %{_libdir}/%{pkgdir}/libdmcldap.apd %{_libdir}/%{pkgdir}/libdmcsrm.apd %{_libdir}/%{pkgdir}/libdmcrucio.apd %{_libdir}/%{pkgdir}/libdmcacix.apd %{_libdir}/%{pkgdir}/libidentitymap.apd %{_libdir}/%{pkgdir}/libarguspdpclient.apd %{_libdir}/%{pkgdir}/libmcchttp.apd %{_libdir}/%{pkgdir}/libmccsoap.apd %{_libdir}/%{pkgdir}/libmcctcp.apd %{_libdir}/%{pkgdir}/libmccmsgvalidator.apd %{_libdir}/%{pkgdir}/libmcctls.apd %files plugins-globus %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libaccARC0.so %{_libdir}/%{pkgdir}/libdmcgridftp.so %{_libdir}/%{pkgdir}/libaccARC0.apd %{_libdir}/%{pkgdir}/libdmcgridftp.apd %{_libdir}/libarcglobusutils.so.* %{_libexecdir}/%{pkgdir}/arc-lcas %{_libexecdir}/%{pkgdir}/arc-lcmaps %if %{with_canl} %files arcproxyalt %defattr(-,root,root,-) %{_bindir}/arcproxyalt %endif %if %{with_xrootd} %files plugins-xrootd %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libdmcxrootd.so %{_libdir}/%{pkgdir}/libdmcxrootd.apd %endif %if %{with_gfal} %files plugins-gfal %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libdmcgfal.so %{_libdir}/%{pkgdir}/libdmcgfal.apd %endif %if %{with_s3} %files plugins-s3 %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libdmcs3.so %{_libdir}/%{pkgdir}/libdmcs3.apd %endif %if %{with_python} %files -n python2-%{name} %defattr(-,root,root,-) %doc docdir/python/* %dir %{python_sitearch}/%{pkgdir} %{python_sitearch}/_arc.*so %{python_sitearch}/arc %{_libdir}/%{pkgdir}/libaccPythonBroker.so %{_libdir}/%{pkgdir}/libaccPythonBroker.apd %{_libdir}/%{pkgdir}/libpythonservice.so %{_libdir}/%{pkgdir}/libpythonservice.apd %endif %if %{?rhel}%{!?rhel:0} == 5 %files -n python26-%{name} %defattr(-,root,root,-) %{python26_sitearch}/_arc.*so %{python26_sitearch}/%{pkgdir} %endif %if %{?fedora}%{!?fedora:0} >= 13 %files -n python3-%{name} %defattr(-,root,root,-) %{python3_sitearch}/_arc.*so %{python3_sitearch}/%{pkgdir} %endif %if %{with_java} %files java %defattr(-,root,root,-) %doc docdir/java/* %{_libdir}/%{pkgdir}/libjarc.so %{_jnidir}/arc.jar %{_libdir}/%{pkgdir}/libjavaservice.so %{_libdir}/%{pkgdir}/libjavaservice.apd %if %{with_gcj} %{_libdir}/gcj/%{name} %endif %endif %files gridmap-utils %defattr(-,root,root,-) %{_sbindir}/nordugridmap %config(noreplace) %{_sysconfdir}/cron.d/nordugridmap %doc src/utils/gridmap/nordugridmap.conf %doc %{_mandir}/man8/nordugridmap.8* %if %{with_cautils} %files ca-utils %defattr(-,root,root,-) %endif %files misc-utils %defattr(-,root,root,-) %{_bindir}/arcemiestest %{_bindir}/arcwsrf %{_bindir}/arcperftest %if %{with_xmlsec1} %{_bindir}/saml_assertion_init %doc %{_mandir}/man1/saml_assertion_init.1* %endif %doc %{_mandir}/man1/arcemiestest.1* %doc %{_mandir}/man1/arcwsrf.1* %doc %{_mandir}/man1/arcperftest.1* %changelog * Wed Dec 13 2017 Anders Waananen - 5.4.2-1 - 5.4.2 Final Release * Thu Oct 12 2017 Anders Waananen - 5.4.1-1 - 5.4.1 Final Release * Mon Sep 18 2017 Anders Waananen - 5.4.0-1 - 5.4.0 Final Release * Wed Jul 05 2017 Anders Waananen - 5.3.2-1 - 5.3.2 Final Release * Tue May 30 2017 Anders Waananen - 5.3.1-1 - 5.3.1 Final Release * Thu Apr 06 2017 Anders Waananen - 5.3.0-1 - 5.3.0 Final Release * Tue Feb 07 2017 Anders Waananen - 5.2.2-1 - 5.2.2 Final Release * Thu Dec 15 2016 Anders Waananen - 5.2.1-1 - 5.2.1 Final Release * Tue Oct 25 2016 Anders Waananen - 5.2.0-1 - 5.2.0 Final Release * Wed Aug 24 2016 Anders Waananen - 5.1.3-1 - 5.1.3 Final Release * Fri Jul 01 2016 Anders Waananen - 5.1.2-1 - 5.1.2 Final Release * Wed May 25 2016 Anders Waananen - 5.1.1-1 - 5.1.1 Final Release * Thu May 19 2016 Anders Waananen - 5.1.0-1 - 5.1.0 Final Release * Fri Jan 08 2016 Anders Waananen - 5.0.5-1 - 5.0.5 Final Release * Thu Nov 19 2015 Anders Waananen - 5.0.4-1 - 5.0.4 Final Release * Sun Sep 27 2015 Anders Waananen - 5.0.3-1 - 5.0.3 Final Release * Mon Jun 29 2015 Anders Waananen - 5.0.2-1 - 5.0.2 Final Release * Thu Jun 25 2015 Anders Waananen - 5.0.1-1 - 5.0.1 Final Release * Fri Mar 27 2015 Anders Waananen - 5.0.0-1 - 5.0.0 Final Release * Thu Mar 19 2015 Anders Waananen - 5.0.0-0.rc5 - 5.0.0 Release candidate 5 * Wed Feb 18 2015 Anders Waananen - 5.0.0-0.rc4 - 5.0.0 Release candidate 4 * Fri Feb 06 2015 Anders Waananen - 5.0.0-0.rc3 - 5.0.0 Release candidate 3 * Thu Feb 05 2015 Anders Waananen - 5.0.0-0.rc2 - 5.0.0 Release candidate 2 * Thu Jan 08 2015 Anders Waananen - 5.0.0-0.rc1 - 5.0.0 Release candidate 1 * Thu Aug 14 2014 Anders Waananen - 4.2.0-1 - 4.2.0 Final release * Tue Apr 29 2014 Anders Waananen - 4.1.0-1 - 4.1.0 Final release * Mon Mar 24 2014 Anders Waananen - 4.1.0-0.rc1 - 4.1.0 Release candidate 1 * Wed Nov 27 2013 Anders Waananen - 4.0.0-1 - 4.0.0 Final Release * Wed Nov 06 2013 Anders Waananen - 4.0.0-0.rc2 - 4.0.0 release candidate 2 * Wed Oct 23 2013 Anders Waananen - 4.0.0-0.rc1 - 4.0.0 release candidate 1 * Fri Jul 19 2013 Anders Waananen - 3.0.3-1 - 3.0.3 Final Release * Wed Jun 12 2013 Anders Waananen - 3.0.2-1 - 3.0.2 Final Release * Mon Apr 29 2013 Anders Waananen - 3.0.1-1 - 3.0.1 Final Release * Fri Apr 12 2013 Anders Waananen - 3.0.1-0.rc2 - 3.0.1 release candidate 2 * Fri Apr 12 2013 Anders Waananen - 3.0.1-0.rc1 - 3.0.1 release candidate 1 * Fri Feb 22 2013 Anders Waananen - 3.0.0-1 - 3.0.0 Final Release * Wed Feb 20 2013 Anders Waananen - 3.0.0-0.rc6 - 3.0.0 release candidate 6 * Wed Feb 06 2013 Anders Waananen - 3.0.0-0.rc5 - 3.0.0 release candidate 5 * Sat Feb 02 2013 Anders Waananen - 3.0.0-0.rc4 - 3.0.0 release candidate 4 * Wed Jan 30 2013 Anders Waananen - 3.0.0-0.rc3 - 3.0.0 release candidate 3 * Mon Jan 28 2013 Anders Waananen - 3.0.0-0.rc2 - 3.0.0 release candidate 2 * Thu Dec 06 2012 Anders Waananen - 3.0.0-0.rc1 - 3.0.0 release candidate 1 * Thu Nov 08 2012 Anders Waananen - 2.0.1-1 - 2.0.1 Final Release * Thu Oct 25 2012 Anders Waananen - 2.0.1-0.rc2 - 2.0.1 release candidate 2 * Fri Aug 24 2012 Anders Waananen - 2.0.1-0.rc1 - 2.0.1 release candidate 1 * Wed May 23 2012 Mattias Ellert - 2.0.0-1 - 2.0.0 Final Release * Wed Apr 11 2012 Mattias Ellert - 2.0.0-0.rc4.1 - 2.0.0 release candidate 4.1 * Mon Apr 02 2012 Mattias Ellert - 2.0.0-0.rc4 - 2.0.0 release candidate 4 * Thu Mar 29 2012 Mattias Ellert - 2.0.0-0.rc3.2 - 2.0.0 release candidate 3.2 * Tue Mar 27 2012 Mattias Ellert - 2.0.0-0.rc3.1 - 2.0.0 release candidate 3.1 * Mon Mar 05 2012 Mattias Ellert - 2.0.0-0.rc3 - 2.0.0 release candidate 3 * Wed Feb 15 2012 Mattias Ellert - 2.0.0-0.rc2 - 2.0.0 release candidate 2 * Wed Feb 15 2012 Mattias Ellert - 1.1.1-1 - 1.1.1 Final Release * Mon Oct 03 2011 Mattias Ellert - 1.1.0-1 - 1.1.0 Final Release * Sun Sep 25 2011 Mattias Ellert - 1.1.0-0.rc2 - 1.1.0 release candidate 2 * Sun Sep 11 2011 Mattias Ellert - 1.1.0-0.rc1 - 1.1.0 release candidate 1 * Sat Jul 23 2011 Mattias Ellert - 1.0.1-1 - 1.0.1 Final Release * Tue Jul 19 2011 Mattias Ellert - 1.0.1-0.rc4 - 1.0.1 release candidate 4 * Sat Jun 18 2011 Mattias Ellert - 1.0.1-0.rc1 - 1.0.1 release candidate 1 * Mon Apr 18 2011 Mattias Ellert - 1.0.0-1 - 1.0.0 Final Release * Wed Apr 06 2011 Mattias Ellert - 1.0.0-0.b5 - 1.0.0 beta 5 * Wed Mar 23 2011 Mattias Ellert - 1.0.0-0.b4 - 1.0.0 beta 4 * Thu Mar 10 2011 Mattias Ellert - 1.0.0-0.b3 - 1.0.0 beta 3 * Mon Mar 07 2011 Mattias Ellert - 1.0.0-0.b2 - 1.0.0 Beta 2 * Mon Feb 14 2011 Mattias Ellert - 1.0.0-0.b1 - rename nordugrid-arc-nox → nordugrid-arc - 1.0.0 Beta 1 * Tue Dec 21 2010 Mattias Ellert - 1.2.1-1 - 1.2.1 Final Release * Tue Dec 21 2010 Mattias Ellert - 1.2.1-0.rc2 - 1.2.1 Release Candidate 2 * Wed Dec 08 2010 Mattias Ellert - 1.2.1-0.rc1 - 1.2.1 Release Candidate 1 * Fri Oct 22 2010 Mattias Ellert - 1.2.0-1 - 1.2.0 Final Release * Thu Sep 30 2010 Mattias Ellert - 1.2.0-0.rc2 - 1.2.0 Release Candidate 2 * Mon Sep 13 2010 Mattias Ellert - 1.2.0-0.rc1 - 1.2.0 Release Candidate 1 * Wed May 05 2010 Mattias Ellert - 1.1.0-1 - 1.1.0 Final Release * Mon Mar 08 2010 Mattias Ellert - 1.1.0-0.rc6 - 1.1.0 Release Candidate 6 * Fri Feb 26 2010 Mattias Ellert - 1.1.0-0.rc5.1 - Rebuild for Globus Toolkit 5 * Fri Feb 26 2010 Mattias Ellert - 1.1.0-0.rc5 - 1.1.0 Release Candidate 5 * Wed Feb 24 2010 Mattias Ellert - 1.1.0-0.rc4 - 1.1.0 Release Candidate 4 * Mon Feb 22 2010 Mattias Ellert - 1.1.0-0.rc3 - 1.1.0 Release Candidate 3 * Mon Feb 15 2010 Mattias Ellert - 1.1.0-0.rc2 - 1.1.0 Release Candidate 2 * Thu Feb 11 2010 Mattias Ellert - 1.1.0-0.rc1 - 1.1.0 Release Candidate 1 * Sun Nov 29 2009 Mattias Ellert - 1.0.0-1 - 1.0.0 Final Release * Thu Nov 19 2009 Mattias Ellert - 1.0.0-0.rc7 - 1.0.0 Release Candidate 7 * Thu Nov 12 2009 Mattias Ellert - 1.0.0-0.rc6 - 1.0.0 Release Candidate 6 * Wed Nov 04 2009 Mattias Ellert - 1.0.0-0.rc5 - rename nordugrid-arc1 → nordugrid-arc-nox - 1.0.0 Release candidate 5 * Mon Oct 26 2009 Mattias Ellert - 0.9.4-0.rc4 - 0.9.4 Release candidate 4 * Thu Oct 22 2009 Mattias Ellert - 0.9.4-0.rc3 - 0.9.4 Release candidate 3 * Wed Oct 14 2009 Mattias Ellert - 0.9.4-0.rc2 - 0.9.4 Release candidate 2 * Sun Sep 27 2009 Mattias Ellert - 0.9.3-1 - 0.9.3 Final release * Thu Sep 17 2009 Mattias Ellert - 0.9.3-0.rc3 - 0.9.3 Release candidate 3 * Wed Jan 14 2009 Anders Wäänänen - 0.9.2-1 - 0.9.2 * Fri Oct 10 2008 Anders Wäänänen - 0.9.1-1 - Initial build nordugrid-arc-5.4.2/PaxHeaders.7502/config.rpath0000644000000000000000000000013213214315702017477 xustar000000000000000030 mtime=1513200578.027744933 30 atime=1513200621.461276143 30 ctime=1513200658.614730543 nordugrid-arc-5.4.2/config.rpath0000755000175000002070000003521713214315702017560 0ustar00mockbuildmock00000000000000#! /bin/sh # Output a system dependent set of variables, describing how to set the # run time search path of shared libraries in an executable. # # Copyright 1996-2003 Free Software Foundation, Inc. # Taken from GNU libtool, 2001 # Originally by Gordon Matzigkeit , 1996 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # # The first argument passed to this file is the canonical host specification, # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld # should be set by the caller. # # The set of defined variables is at the end of this script. # Known limitations: # - On IRIX 6.5 with CC="cc", the run time search patch must not be longer # than 256 bytes, otherwise the compiler driver will dump core. The only # known workaround is to choose shorter directory names for the build # directory and/or the installation directory. # All known linkers require a `.a' archive for static linking (except M$VC, # which needs '.lib'). libext=a shrext=.so host="$1" host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` # Code taken from libtool.m4's AC_LIBTOOL_PROG_COMPILER_PIC. wl= if test "$GCC" = yes; then wl='-Wl,' else case "$host_os" in aix*) wl='-Wl,' ;; mingw* | pw32* | os2*) ;; hpux9* | hpux10* | hpux11*) wl='-Wl,' ;; irix5* | irix6* | nonstopux*) wl='-Wl,' ;; newsos6) ;; linux*) case $CC in icc|ecc) wl='-Wl,' ;; ccc) wl='-Wl,' ;; esac ;; osf3* | osf4* | osf5*) wl='-Wl,' ;; sco3.2v5*) ;; solaris*) wl='-Wl,' ;; sunos4*) wl='-Qoption ld ' ;; sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) wl='-Wl,' ;; sysv4*MP*) ;; uts4*) ;; esac fi # Code taken from libtool.m4's AC_LIBTOOL_PROG_LD_SHLIBS. hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_direct=no hardcode_minus_L=no case "$host_os" in cygwin* | mingw* | pw32*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; openbsd*) with_gnu_ld=no ;; esac ld_shlibs=yes if test "$with_gnu_ld" = yes; then case "$host_os" in aix3* | aix4* | aix5*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no fi ;; amigaos*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes # Samuel A. Falvo II reports # that the semantics of dynamic libraries on AmigaOS, at least up # to version 4, is to share data among multiple programs linked # with the same dynamic library. Since this doesn't match the # behavior of shared libraries on other platforms, we can use # them. ld_shlibs=no ;; beos*) if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; cygwin* | mingw* | pw32*) # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then : else ld_shlibs=no fi ;; netbsd*) ;; solaris* | sysv5*) if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then ld_shlibs=no elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; sunos4*) hardcode_direct=yes ;; *) if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = yes; then # Unlike libtool, we use -rpath here, not --rpath, since the documented # option of GNU ld is called -rpath, not --rpath. hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' fi else case "$host_os" in aix3*) # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$GCC" = yes; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix4* | aix5*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix5*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done esac fi hardcode_direct=yes hardcode_libdir_separator=':' if test "$GCC" = yes; then case $host_os in aix4.[012]|aix4.[012].*) collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && \ strings "$collect2name" | grep resolve_lib_name >/dev/null then # We have reworked collect2 hardcode_direct=yes else # We have old collect2 hardcode_direct=unsupported hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi esac fi # Begin _LT_AC_SYS_LIBPATH_AIX. echo 'int main () { return 0; }' > conftest.c ${CC} ${LDFLAGS} conftest.c -o conftest aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } }'` if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } }'` fi if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib" fi rm -f conftest.c conftest # End _LT_AC_SYS_LIBPATH_AIX. if test "$aix_use_runtimelinking" = yes; then hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' else hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" fi fi ;; amigaos*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes # see comment about different semantics on the GNU ld section ld_shlibs=no ;; bsdi4*) ;; cygwin* | mingw* | pw32*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec=' ' libext=lib ;; darwin* | rhapsody*) if $CC -v 2>&1 | grep 'Apple' >/dev/null ; then hardcode_direct=no fi ;; dgux*) hardcode_libdir_flag_spec='-L$libdir' ;; freebsd1*) ld_shlibs=no ;; freebsd2.2*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; freebsd2*) hardcode_direct=yes hardcode_minus_L=yes ;; freebsd*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; hpux9*) hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; hpux10* | hpux11*) if test "$with_gnu_ld" = no; then case "$host_cpu" in hppa*64*) hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=no ;; ia64*) hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=no # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; *) hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; netbsd*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; newsos6) hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; openbsd*) hardcode_direct=yes if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then hardcode_libdir_flag_spec='${wl}-rpath,$libdir' else case "$host_os" in openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) hardcode_libdir_flag_spec='-R$libdir' ;; *) hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; esac fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; osf3*) hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) if test "$GCC" = yes; then hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else # Both cc and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi hardcode_libdir_separator=: ;; sco3.2v5*) ;; solaris*) hardcode_libdir_flag_spec='-R$libdir' ;; sunos4*) hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes ;; sysv4) case $host_vendor in sni) hardcode_direct=yes # is this really true??? ;; siemens) hardcode_direct=no ;; motorola) hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac ;; sysv4.3*) ;; sysv4*MP*) if test -d /usr/nec; then ld_shlibs=yes fi ;; sysv4.2uw2*) hardcode_direct=yes hardcode_minus_L=no ;; sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[78]* | unixware7*) ;; sysv5*) hardcode_libdir_flag_spec= ;; uts4*) hardcode_libdir_flag_spec='-L$libdir' ;; *) ld_shlibs=no ;; esac fi # Check dynamic linker characteristics # Code taken from libtool.m4's AC_LIBTOOL_SYS_DYNAMIC_LINKER. libname_spec='lib$name' case "$host_os" in aix3*) ;; aix4* | aix5*) ;; amigaos*) ;; beos*) ;; bsdi4*) ;; cygwin* | mingw* | pw32*) shrext=.dll ;; darwin* | rhapsody*) shrext=.dylib ;; dgux*) ;; freebsd1*) ;; freebsd*) ;; gnu*) ;; hpux9* | hpux10* | hpux11*) case "$host_cpu" in ia64*) shrext=.so ;; hppa*64*) shrext=.sl ;; *) shrext=.sl ;; esac ;; irix5* | irix6* | nonstopux*) case "$host_os" in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;; *) libsuff= shlibsuff= ;; esac ;; esac ;; linux*oldld* | linux*aout* | linux*coff*) ;; linux*) ;; netbsd*) ;; newsos6) ;; nto-qnx) ;; openbsd*) ;; os2*) libname_spec='$name' shrext=.dll ;; osf3* | osf4* | osf5*) ;; sco3.2v5*) ;; solaris*) ;; sunos4*) ;; sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) ;; sysv4*MP*) ;; uts4*) ;; esac sed_quote_subst='s/\(["`$\\]\)/\\\1/g' escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"` shlibext=`echo "$shrext" | sed -e 's,^\.,,'` escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' < POTFILES.in.2 ; \ if diff $(srcdir)/POTFILES.in POTFILES.in.2 >/dev/null 2>&1 ; then \ rm -f POTFILES.in.2 ; \ else \ mv POTFILES.in.2 $(srcdir)/POTFILES.in ; \ fi DISTFILES.extra1 = Rules-POTFILES nordugrid-arc-5.4.2/po/PaxHeaders.7502/hu.po0000644000000000000000000000013113214316034016562 xustar000000000000000030 mtime=1513200668.286848837 29 atime=1513200668.50785154 30 ctime=1513200668.603852714 nordugrid-arc-5.4.2/po/hu.po0000644000175000002070000221376213214316034016646 0ustar00mockbuildmock00000000000000# Translation of Arc.po to Hungarian # Gábor Rőczei , 2010. # Translation file for the Advanced Resource Connector (Arc) msgid "" msgstr "" "Project-Id-Version: Arc\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2017-12-13 22:31+0100\n" "PO-Revision-Date: 2010-07-05 12:25+0100\n" "Last-Translator: Gábor Rőczei \n" "Language-Team: Hungarian\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Language: \n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "X-Poedit-Language: Hungarian\n" "X-Poedit-Country: HUNGARY\n" "X-Poedit-SourceCharset: utf-8\n" #: src/clients/compute/arccat.cpp:35 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresub.cpp:35 #: src/clients/compute/arcresume.cpp:32 src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "" #: src/clients/compute/arccat.cpp:36 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" #: src/clients/compute/arccat.cpp:43 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresub.cpp:40 src/clients/compute/arcresume.cpp:37 #: src/clients/compute/arcstat.cpp:42 src/clients/compute/arcsub.cpp:54 #: src/clients/compute/arcsync.cpp:147 src/clients/compute/arctest.cpp:62 #: src/clients/credentials/arcproxy.cpp:475 #: src/clients/credentials/arcproxyalt.cpp:461 src/clients/data/arccp.cpp:641 #: src/clients/data/arcls.cpp:346 src/clients/data/arcmkdir.cpp:125 #: src/clients/data/arcrename.cpp:136 src/clients/data/arcrm.cpp:151 #: src/clients/echo/arcecho.cpp:61 src/clients/saml/saml_assertion_init.cpp:62 #: src/clients/wsrf/arcwsrf.cpp:74 src/hed/daemon/unix/main_unix.cpp:346 #: src/hed/daemon/win32/main_win32.cpp:148 #: src/services/a-rex/jura/jura.cpp:109 #, c-format msgid "%s version %s" msgstr "%s verzió %s" #: src/clients/compute/arccat.cpp:52 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresub.cpp:49 src/clients/compute/arcresume.cpp:46 #: src/clients/compute/arcstat.cpp:51 src/clients/compute/arcsub.cpp:63 #: src/clients/compute/arcsync.cpp:156 src/clients/compute/arctest.cpp:84 #: src/clients/credentials/arcproxy.cpp:483 #: src/clients/credentials/arcproxyalt.cpp:469 src/clients/data/arccp.cpp:648 #: src/clients/data/arcls.cpp:354 src/clients/data/arcmkdir.cpp:133 #: src/clients/data/arcrename.cpp:144 src/clients/data/arcrm.cpp:160 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:172 #, c-format msgid "Running command: %s" msgstr "" #: src/clients/compute/arccat.cpp:63 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresub.cpp:53 src/clients/compute/arcresume.cpp:50 #: src/clients/compute/arcstat.cpp:62 src/clients/compute/arcsub.cpp:67 #: src/clients/compute/arcsync.cpp:167 src/clients/compute/arctest.cpp:88 #: src/clients/data/arccp.cpp:671 src/clients/data/arcls.cpp:376 #: src/clients/data/arcmkdir.cpp:155 src/clients/data/arcrename.cpp:166 #: src/clients/data/arcrm.cpp:182 src/clients/echo/arcecho.cpp:72 #: src/clients/wsrf/arcwsrf.cpp:101 msgid "Failed configuration initialization" msgstr "Nem sikerült betölteni a konfigurációt" #: src/clients/compute/arccat.cpp:76 src/clients/compute/arcclean.cpp:74 #: src/clients/compute/arcget.cpp:88 src/clients/compute/arckill.cpp:73 #: src/clients/compute/arcrenew.cpp:70 src/clients/compute/arcresub.cpp:81 #: src/clients/compute/arcresume.cpp:70 src/clients/compute/arcstat.cpp:71 #, fuzzy, c-format msgid "Cannot read specified jobid file: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/arccat.cpp:87 src/clients/compute/arcclean.cpp:85 #: src/clients/compute/arcget.cpp:99 src/clients/compute/arckill.cpp:84 #: src/clients/compute/arcrenew.cpp:81 src/clients/compute/arcresub.cpp:95 #: src/clients/compute/arcresume.cpp:81 src/clients/compute/arcstat.cpp:105 msgid "No jobs given" msgstr "Nem adott meg feladatot" #: src/clients/compute/arccat.cpp:100 src/clients/compute/arcclean.cpp:98 #: src/clients/compute/arcget.cpp:112 src/clients/compute/arckill.cpp:97 #: src/clients/compute/arcrenew.cpp:94 src/clients/compute/arcresub.cpp:105 #: src/clients/compute/arcresume.cpp:94 src/clients/compute/arcstat.cpp:117 #, fuzzy, c-format msgid "Job list file (%s) doesn't exist" msgstr "Az XML konfigurációs fájl: %s nem létezik" #: src/clients/compute/arccat.cpp:107 src/clients/compute/arcclean.cpp:105 #: src/clients/compute/arcget.cpp:119 src/clients/compute/arckill.cpp:104 #: src/clients/compute/arcrenew.cpp:101 src/clients/compute/arcresub.cpp:112 #: src/clients/compute/arcresume.cpp:101 src/clients/compute/arcstat.cpp:124 #: src/clients/compute/arctest.cpp:298 #, fuzzy, c-format msgid "Unable to read job information from file (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/compute/arccat.cpp:116 src/clients/compute/arcclean.cpp:113 #: src/clients/compute/arcget.cpp:127 src/clients/compute/arckill.cpp:112 #: src/clients/compute/arcrenew.cpp:110 src/clients/compute/arcresub.cpp:120 #: src/clients/compute/arcresume.cpp:110 src/clients/compute/arcstat.cpp:133 #, c-format msgid "Warning: Job not found in job list: %s" msgstr "" #: src/clients/compute/arccat.cpp:129 src/clients/compute/arcclean.cpp:168 #: src/clients/compute/arcget.cpp:140 src/clients/compute/arckill.cpp:124 #: src/clients/compute/arcrenew.cpp:122 src/clients/compute/arcresub.cpp:132 #: src/clients/compute/arcresume.cpp:122 #, fuzzy msgid "No jobs" msgstr "Nem adott meg feladatot" #: src/clients/compute/arccat.cpp:144 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "" #: src/clients/compute/arccat.cpp:145 src/clients/compute/arccat.cpp:151 #, c-format msgid "Cannot create output of %s for any jobs" msgstr "" #: src/clients/compute/arccat.cpp:152 #, fuzzy, c-format msgid "Invalid destination URL %s" msgstr "Érvénytelen URL: %s" #: src/clients/compute/arccat.cpp:170 #, c-format msgid "Job deleted: %s" msgstr "" #: src/clients/compute/arccat.cpp:180 #, c-format msgid "Job has not started yet: %s" msgstr "" #: src/clients/compute/arccat.cpp:188 #, c-format msgid "Cannot determine the %s location: %s" msgstr "" #: src/clients/compute/arccat.cpp:196 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "" #: src/clients/compute/arccat.cpp:206 #, c-format msgid "Catting %s for job %s" msgstr "" #: src/clients/compute/arcclean.cpp:35 #, fuzzy msgid "The arcclean command removes a job from the computing resource." msgstr "Az arcclean parancs eltávolít egy feladatot a távoli klaszterröl" #: src/clients/compute/arcclean.cpp:137 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" #: src/clients/compute/arcclean.cpp:140 msgid "Are you sure you want to clean jobs missing information?" msgstr "" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "y" msgstr "y" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "n" msgstr "n" #: src/clients/compute/arcclean.cpp:146 msgid "Jobs missing information will not be cleaned!" msgstr "" #: src/clients/compute/arcclean.cpp:162 src/clients/compute/arcresub.cpp:155 #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:302 #, fuzzy, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/compute/arcclean.cpp:163 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" #: src/clients/compute/arcclean.cpp:172 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "" "Az arcget parancsot arra lehet használni, hogy a feladat eredményeit " "megjelenítse" #: src/clients/compute/arcget.cpp:76 #, fuzzy, c-format msgid "Job download directory from user configuration file: %s " msgstr "voms szerver fájljának az elérési útvonala" #: src/clients/compute/arcget.cpp:79 #, fuzzy msgid "Job download directory will be created in present working directory. " msgstr "" "könyvtár letöltése (a feladat könyvtára ebben a könyvtárban fog létrejönni)" #: src/clients/compute/arcget.cpp:83 #, c-format msgid "Job download directory: %s " msgstr "" #: src/clients/compute/arcget.cpp:150 #, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "" #: src/clients/compute/arcget.cpp:160 #, c-format msgid "Results stored at: %s" msgstr "" #: src/clients/compute/arcget.cpp:172 src/clients/compute/arckill.cpp:140 msgid "Warning: Some jobs were not removed from server" msgstr "" #: src/clients/compute/arcget.cpp:173 src/clients/compute/arcget.cpp:180 msgid " Use arclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arcget.cpp:179 src/clients/compute/arckill.cpp:147 #: src/clients/compute/arcresub.cpp:185 #, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "" #: src/clients/compute/arcget.cpp:184 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arcget.cpp:188 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "" #: src/clients/compute/arcinfo.cpp:34 msgid "[resource ...]" msgstr "" #: src/clients/compute/arcinfo.cpp:35 #, fuzzy msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "" "Az arcinfo paranccsal lehet lekérdezni az egyes klaszterek állapotát a grid-" "ben." #: src/clients/compute/arcinfo.cpp:142 msgid "Information endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:149 msgid "Submission endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:151 msgid "status" msgstr "" #: src/clients/compute/arcinfo.cpp:153 #, fuzzy msgid "interface" msgstr "Felhasználó oldali hiba" #: src/clients/compute/arcinfo.cpp:172 msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "" #: src/clients/compute/arcinfo.cpp:185 msgid "ERROR: Failed to retrieve information" msgstr "" #: src/clients/compute/arcinfo.cpp:187 msgid "from the following endpoints:" msgstr "" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "Az arckill paranccsal lehet megölni egy futó feladatot" #: src/clients/compute/arckill.cpp:141 msgid " Use arcclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:148 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:151 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arckill.cpp:153 #, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "" #: src/clients/compute/arcrenew.cpp:128 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "" #: src/clients/compute/arcresub.cpp:75 msgid "--same and --not-same cannot be specified together." msgstr "" #: src/clients/compute/arcresub.cpp:144 #, fuzzy msgid "No jobs to resubmit with the specified status" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/clients/compute/arcresub.cpp:151 src/clients/compute/arcsub.cpp:194 #, c-format msgid "Job submitted with jobid: %s" msgstr "Feladat elküldve ezzel az azonítóval: %s" #: src/clients/compute/arcresub.cpp:156 msgid " To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arcresub.cpp:161 #, fuzzy, c-format msgid "Cannot write jobids to file (%s)" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/arcresub.cpp:172 #, c-format msgid "" "Resubmission of job (%s) succeeded, but killing the job failed - it will " "still appear in the job list" msgstr "" #: src/clients/compute/arcresub.cpp:181 #, c-format msgid "" "Resubmission of job (%s) succeeded, but cleaning the job failed - it will " "still appear in the job list" msgstr "" #: src/clients/compute/arcresub.cpp:186 msgid " Use arcclean to remove non-existing jobs" msgstr "" #: src/clients/compute/arcresub.cpp:193 #, fuzzy msgid "Job resubmission summary:" msgstr "Job küldési összefoglaló" #: src/clients/compute/arcresub.cpp:195 #, fuzzy, c-format msgid "%d of %d jobs were resubmitted" msgstr "%d %d feladatból elküldve" #: src/clients/compute/arcresub.cpp:197 #, fuzzy, c-format msgid "The following %d were not resubmitted" msgstr "%d nem lett elküldve" #: src/clients/compute/arcresume.cpp:128 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "" #: src/clients/compute/arcstat.cpp:35 #, fuzzy msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" "Az arcstat paranccsal lehet lekérdezni azon feladatok állapotát,\n" "amelyek el lettek küldve a grid-ben lévő klaszterre." #: src/clients/compute/arcstat.cpp:79 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "A 'sort' vagy az 'rsort' kapcsolókat nem lehet egyszerre használni" #: src/clients/compute/arcstat.cpp:149 msgid "No jobs found, try later" msgstr "" #: src/clients/compute/arcstat.cpp:176 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "" #: src/clients/compute/arcsub.cpp:46 msgid "[filename ...]" msgstr "[fájlnév ...]" #: src/clients/compute/arcsub.cpp:47 #, fuzzy msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "Az arcsub paranccsal lehet feladatot küldeni a grid-be." #: src/clients/compute/arcsub.cpp:99 msgid "No job description input specified" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/clients/compute/arcsub.cpp:112 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:499 #, c-format msgid "Can not open job description file: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/arcsub.cpp:140 src/clients/compute/arcsub.cpp:168 msgid "Invalid JobDescription:" msgstr "Érvénytelen feladat leírás:" #: src/clients/compute/arcsub.cpp:200 #, fuzzy, c-format msgid "Cannot write job IDs to file (%s)" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/arcsub.cpp:205 src/clients/compute/arcsync.cpp:66 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/arcsub.cpp:210 src/clients/compute/arctest.cpp:304 msgid "To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arcsub.cpp:217 msgid "Job submission summary:" msgstr "Job küldési összefoglaló" #: src/clients/compute/arcsub.cpp:219 #, c-format msgid "%d of %d jobs were submitted" msgstr "%d %d feladatból elküldve" #: src/clients/compute/arcsub.cpp:221 #, c-format msgid "The following %d were not submitted" msgstr "%d nem lett elküldve" #: src/clients/compute/arcsub.cpp:228 msgid "Job nr." msgstr "" #: src/clients/compute/arcsub.cpp:268 #, c-format msgid "Removing endpoint %s: It has an unrequested interface (%s)." msgstr "" #: src/clients/compute/arcsub.cpp:282 #, fuzzy, c-format msgid "ERROR: Unable to load broker %s" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/clients/compute/arcsub.cpp:286 #, fuzzy msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" "Feladat küldés sikertelen, mert egyetlen klaszter sem adott vissza " "információt magáról" #: src/clients/compute/arcsub.cpp:290 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "" #: src/clients/compute/arcsub.cpp:304 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" #: src/clients/compute/arcsub.cpp:338 src/clients/compute/arctest.cpp:236 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" #: src/clients/compute/arcsub.cpp:339 src/clients/compute/arctest.cpp:237 msgid "Original job description is listed below:" msgstr "" #: src/clients/compute/arcsub.cpp:351 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" #: src/clients/compute/arcsub.cpp:368 src/clients/compute/arctest.cpp:317 #, fuzzy, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/clients/compute/arcsub.cpp:384 src/clients/compute/arctest.cpp:330 #, fuzzy, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "Egy hiba lépett fel a feladat leírás elkészítése közben." #: src/clients/compute/arcsub.cpp:388 src/clients/compute/arctest.cpp:334 #, c-format msgid "Job description to be sent to %s:" msgstr "Feladat leírás elküldve ide: %s" #: src/clients/compute/arcsub.cpp:406 #, fuzzy msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:113 #, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "" #: src/clients/compute/arcsync.cpp:140 msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given resources or index servers." msgstr "" #: src/clients/compute/arcsync.cpp:183 msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" #: src/clients/compute/arcsync.cpp:188 msgid "Are you sure you want to synchronize your local job list?" msgstr "" #: src/clients/compute/arcsync.cpp:193 msgid "Cancelling synchronization request" msgstr "" #: src/clients/compute/arcsync.cpp:203 msgid "" "No services specified. Please configure default services in the client " "configuration,or specify a cluster or index (-c or -g options, see arcsync -" "h)." msgstr "" #: src/clients/compute/arctest.cpp:55 msgid " " msgstr "" #: src/clients/compute/arctest.cpp:56 #, fuzzy msgid "The arctest command is used for testing clusters as resources." msgstr "" "Az arcget parancsot arra lehet használni, hogy a feladat eredményeit " "megjelenítse" #: src/clients/compute/arctest.cpp:68 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" #: src/clients/compute/arctest.cpp:75 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" #: src/clients/compute/arctest.cpp:109 #, fuzzy msgid "Certificate information:" msgstr "verzió információ kiírása" #: src/clients/compute/arctest.cpp:113 #, fuzzy msgid "No user-certificate found" msgstr "publikus kulcs elérési útvonala" #: src/clients/compute/arctest.cpp:116 #, fuzzy, c-format msgid "Certificate: %s" msgstr "Célállomás: %s" #: src/clients/compute/arctest.cpp:118 #, fuzzy, c-format msgid "Subject name: %s" msgstr "Tárgy: %s" #: src/clients/compute/arctest.cpp:119 #, fuzzy, c-format msgid "Valid until: %s" msgstr "A proxy eddig érvényes: %s" #: src/clients/compute/arctest.cpp:123 msgid "Unable to determine certificate information" msgstr "" #: src/clients/compute/arctest.cpp:127 #, fuzzy msgid "Proxy certificate information:" msgstr "verzió információ kiírása" #: src/clients/compute/arctest.cpp:129 msgid "No proxy found" msgstr "" #: src/clients/compute/arctest.cpp:132 #, fuzzy, c-format msgid "Proxy: %s" msgstr "Proxy elérési útvonal: %s" #: src/clients/compute/arctest.cpp:133 #, fuzzy, c-format msgid "Proxy-subject: %s" msgstr "Tárgy: %s" #: src/clients/compute/arctest.cpp:135 #, fuzzy msgid "Valid for: Proxy expired" msgstr "Nem használható tovább a proxy: Lejárt a proxy" #: src/clients/compute/arctest.cpp:137 #, fuzzy msgid "Valid for: Proxy not valid" msgstr "Nem használható tovább a proxy: Nem érvényes a proxy" #: src/clients/compute/arctest.cpp:139 #, fuzzy, c-format msgid "Valid for: %s" msgstr "Érvénytelen URL: %s" #: src/clients/compute/arctest.cpp:144 #, c-format msgid "Certificate issuer: %s" msgstr "" #: src/clients/compute/arctest.cpp:148 #, fuzzy msgid "CA-certificates installed:" msgstr "publikus kulcs elérési útvonala" #: src/clients/compute/arctest.cpp:170 msgid "Unable to detect if issuer certificate is installed." msgstr "" #: src/clients/compute/arctest.cpp:173 msgid "Your issuer's certificate is not installed" msgstr "" #: src/clients/compute/arctest.cpp:191 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "" #: src/clients/compute/arctest.cpp:209 #, c-format msgid "Unable to load broker %s" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/clients/compute/arctest.cpp:212 #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "%s bróker betöltve" #: src/clients/compute/arctest.cpp:234 #, fuzzy msgid "Test aborted because no resource returned any information" msgstr "" "Feladat küldés sikertelen, mert egyetlen klaszter sem adott vissza " "információt magáról" #: src/clients/compute/arctest.cpp:247 msgid "" "ERROR: Test aborted because no suitable resources were found for the test-job" msgstr "" #: src/clients/compute/arctest.cpp:249 msgid "" "ERROR: Dumping job description aborted because no suitable resources were " "found for the test-job" msgstr "" #: src/clients/compute/arctest.cpp:258 #, c-format msgid "Submitting test-job %d:" msgstr "" #: src/clients/compute/arctest.cpp:262 #, c-format msgid "Client version: nordugrid-arc-%s" msgstr "" #: src/clients/compute/arctest.cpp:269 #, fuzzy, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/arctest.cpp:270 #, fuzzy, c-format msgid "Test submitted with jobid: %s" msgstr "Feladat elküldve ezzel az azonítóval: %s" #: src/clients/compute/arctest.cpp:285 #, c-format msgid "Computing service: %s" msgstr "" #: src/clients/compute/arctest.cpp:291 #, fuzzy msgid "Test failed, no more possible targets" msgstr "A feladat küldés meghiusúlt, mert nincs több szabad várakozó sor." #: src/clients/compute/utils.cpp:118 #, c-format msgid "Types of execution services %s is able to submit jobs to:" msgstr "" #: src/clients/compute/utils.cpp:121 #, c-format msgid "Types of registry services which %s is able collect information from:" msgstr "" #: src/clients/compute/utils.cpp:124 #, c-format msgid "" "Types of local information services which %s is able collect information " "from:" msgstr "" #: src/clients/compute/utils.cpp:127 #, c-format msgid "" "Types of local information services which %s is able collect job information " "from:" msgstr "" #: src/clients/compute/utils.cpp:130 #, c-format msgid "Types of services %s is able to manage jobs at:" msgstr "" #: src/clients/compute/utils.cpp:133 #, fuzzy, c-format msgid "Job description languages supported by %s:" msgstr "Feladat leírás elküldve ide: %s" #: src/clients/compute/utils.cpp:136 #, c-format msgid "Brokers available to %s:" msgstr "" #: src/clients/compute/utils.cpp:159 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" #: src/clients/compute/utils.cpp:169 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:174 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:278 msgid "" "select one or more computing elements: name can be an alias for a single CE, " "a group of CEs or a URL" msgstr "" #: src/clients/compute/utils.cpp:280 src/clients/compute/utils.cpp:297 #: src/clients/compute/utils.cpp:404 msgid "name" msgstr "" #: src/clients/compute/utils.cpp:285 msgid "" "the computing element specified by URL at the command line should be queried " "using this information interface (possible options: org.nordugrid.ldapng, " "org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies." "resourceinfo)" msgstr "" #: src/clients/compute/utils.cpp:289 #, fuzzy msgid "interfacename" msgstr "Felhasználó oldali hiba" #: src/clients/compute/utils.cpp:295 msgid "" "selecting a computing element for the new jobs with a URL or an alias, or " "selecting a group of computing elements with the name of the group" msgstr "" #: src/clients/compute/utils.cpp:303 msgid "force migration, ignore kill failure" msgstr "migráció kikényszerítése, megölési hiba figyelmen kívül hagyása" #: src/clients/compute/utils.cpp:309 msgid "keep the files on the server (do not clean)" msgstr "fájlok megőrzése a szerveren (nincs törlés)" #: src/clients/compute/utils.cpp:315 msgid "do not ask for verification" msgstr "ne kérjen ellenőrzést" #: src/clients/compute/utils.cpp:319 #, fuzzy msgid "truncate the joblist before synchronizing" msgstr "feladat lista megcsonkult a szinkronizáció előtt" #: src/clients/compute/utils.cpp:325 src/clients/data/arcls.cpp:287 msgid "long format (more information)" msgstr "részletes formátum (több információ)" #: src/clients/compute/utils.cpp:331 msgid "print a list of services configured in the client.conf" msgstr "" #: src/clients/compute/utils.cpp:337 msgid "show the stdout of the job (default)" msgstr "" #: src/clients/compute/utils.cpp:341 msgid "show the stderr of the job" msgstr "" #: src/clients/compute/utils.cpp:345 msgid "show the CE's error log of the job" msgstr "" #: src/clients/compute/utils.cpp:351 msgid "" "download directory (the job directory will be created in this directory)" msgstr "" "könyvtár letöltése (a feladat könyvtára ebben a könyvtárban fog létrejönni)" #: src/clients/compute/utils.cpp:353 msgid "dirname" msgstr "könyvtárnév" #: src/clients/compute/utils.cpp:357 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" #: src/clients/compute/utils.cpp:362 msgid "force download (overwrite existing job directory)" msgstr "" #: src/clients/compute/utils.cpp:368 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "" #: src/clients/compute/utils.cpp:372 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "" "feladatok rendezése az azonosítójuk, az elküldés ideje vagy a neve alapján" #: src/clients/compute/utils.cpp:373 src/clients/compute/utils.cpp:376 msgid "order" msgstr "sorrend" #: src/clients/compute/utils.cpp:375 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" "feladatok rendezésének megfordítása az azonosítójuk, az elküldés ideje vagy " "a neve alapján" #: src/clients/compute/utils.cpp:379 msgid "show jobs where status information is unavailable" msgstr "" #: src/clients/compute/utils.cpp:385 #, fuzzy msgid "resubmit to the same resource" msgstr "újraküldés ugyanarra a klaszterre" #: src/clients/compute/utils.cpp:389 #, fuzzy msgid "do not resubmit to the same resource" msgstr "újraküldés ugyanarra a klaszterre" #: src/clients/compute/utils.cpp:395 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" "feladat eltávolítása a helyi listából ha az nem található az információs " "rendszerben" #: src/clients/compute/utils.cpp:402 msgid "" "select one or more registries: name can be an alias for a single registry, a " "group of registries or a URL" msgstr "" #: src/clients/compute/utils.cpp:410 msgid "submit test job given by the number" msgstr "" #: src/clients/compute/utils.cpp:411 src/clients/compute/utils.cpp:415 msgid "int" msgstr "" #: src/clients/compute/utils.cpp:414 msgid "test job runtime specified by the number" msgstr "" #: src/clients/compute/utils.cpp:421 msgid "only select jobs whose status is statusstr" msgstr "" #: src/clients/compute/utils.cpp:422 msgid "statusstr" msgstr "" #: src/clients/compute/utils.cpp:428 msgid "all jobs" msgstr "" #: src/clients/compute/utils.cpp:434 msgid "jobdescription string describing the job to be submitted" msgstr "a feladat leíró szöveg tartalmazza magát az elküldendő feladatot" #: src/clients/compute/utils.cpp:436 src/clients/compute/utils.cpp:442 #: src/clients/credentials/arcproxy.cpp:369 #: src/clients/credentials/arcproxy.cpp:376 #: src/clients/credentials/arcproxy.cpp:394 #: src/clients/credentials/arcproxy.cpp:401 #: src/clients/credentials/arcproxy.cpp:419 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:438 #: src/clients/credentials/arcproxy.cpp:448 #: src/clients/credentials/arcproxy.cpp:452 #: src/clients/credentials/arcproxyalt.cpp:369 #: src/clients/credentials/arcproxyalt.cpp:376 #: src/clients/credentials/arcproxyalt.cpp:399 #: src/clients/credentials/arcproxyalt.cpp:415 #: src/clients/credentials/arcproxyalt.cpp:419 #: src/clients/credentials/arcproxyalt.cpp:429 #: src/clients/credentials/arcproxyalt.cpp:433 msgid "string" msgstr "szöveg" #: src/clients/compute/utils.cpp:440 msgid "jobdescription file describing the job to be submitted" msgstr "a feladat leíró fájl tartalmazza magát az elküldendő feladatot" #: src/clients/compute/utils.cpp:448 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" #: src/clients/compute/utils.cpp:449 msgid "broker" msgstr "bróker" #: src/clients/compute/utils.cpp:452 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "" #: src/clients/compute/utils.cpp:453 src/clients/compute/utils.cpp:475 #: src/clients/compute/utils.cpp:512 src/clients/compute/utils.cpp:520 #: src/clients/credentials/arcproxy.cpp:461 #: src/clients/credentials/arcproxyalt.cpp:447 src/clients/data/arccp.cpp:627 #: src/clients/data/arcls.cpp:332 src/clients/data/arcmkdir.cpp:111 #: src/clients/data/arcrename.cpp:122 src/clients/data/arcrm.cpp:137 #: src/clients/echo/arcecho.cpp:47 src/clients/wsrf/arcwsrf.cpp:57 msgid "filename" msgstr "fájlnév" #: src/clients/compute/utils.cpp:457 msgid "" "only use this interface for submitting (e.g. org.nordugrid.gridftpjob, org." "ogf.glue.emies.activitycreation, org.ogf.bes)" msgstr "" #: src/clients/compute/utils.cpp:459 src/clients/compute/utils.cpp:501 msgid "InterfaceName" msgstr "" #: src/clients/compute/utils.cpp:466 msgid "skip the service with the given URL during service discovery" msgstr "" #: src/clients/compute/utils.cpp:467 src/clients/compute/utils.cpp:480 #: src/clients/data/arccp.cpp:607 msgid "URL" msgstr "" #: src/clients/compute/utils.cpp:474 msgid "a file containing a list of jobIDs" msgstr "" #: src/clients/compute/utils.cpp:479 msgid "skip jobs which are on a computing element with a given URL" msgstr "" #: src/clients/compute/utils.cpp:485 msgid "submit jobs as dry run (no submission to batch system)" msgstr "" #: src/clients/compute/utils.cpp:488 msgid "submit directly - no resource discovery or matchmaking" msgstr "" #: src/clients/compute/utils.cpp:492 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" "nincs küldés - azon feladat leíró formátumban megjelenítése, amit a távoli " "klaszter elfogad" #: src/clients/compute/utils.cpp:499 msgid "" "only get information about executon targets which support this job " "submission interface (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies." "activitycreation, org.ogf.bes)" msgstr "" #: src/clients/compute/utils.cpp:506 msgid "prints info about installed user- and CA-certificates" msgstr "" #: src/clients/compute/utils.cpp:511 #, c-format msgid "the file storing information about active jobs (default %s)" msgstr "" #: src/clients/compute/utils.cpp:519 src/clients/credentials/arcproxy.cpp:460 #: src/clients/credentials/arcproxyalt.cpp:446 src/clients/data/arccp.cpp:626 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:110 #: src/clients/data/arcrename.cpp:121 src/clients/data/arcrm.cpp:136 #: src/clients/echo/arcecho.cpp:46 src/clients/wsrf/arcwsrf.cpp:56 msgid "configuration file (default ~/.arc/client.conf)" msgstr "konfigurációs fájl (alapbeállítás ~/.arc/client.conf)" #: src/clients/compute/utils.cpp:522 src/clients/credentials/arcproxy.cpp:455 #: src/clients/credentials/arcproxyalt.cpp:441 src/clients/data/arccp.cpp:621 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:105 #: src/clients/data/arcrename.cpp:116 src/clients/data/arcrm.cpp:131 #: src/clients/echo/arcecho.cpp:41 src/clients/wsrf/arcwsrf.cpp:51 msgid "timeout in seconds (default 20)" msgstr "időkorlát másodpercben (alapbeállítás 20)" #: src/clients/compute/utils.cpp:523 src/clients/credentials/arcproxy.cpp:456 #: src/clients/credentials/arcproxyalt.cpp:442 src/clients/data/arccp.cpp:622 #: src/clients/data/arcls.cpp:327 src/clients/data/arcmkdir.cpp:106 #: src/clients/data/arcrename.cpp:117 src/clients/data/arcrm.cpp:132 #: src/clients/echo/arcecho.cpp:42 src/clients/wsrf/arcwsrf.cpp:52 msgid "seconds" msgstr "másodpercek" #: src/clients/compute/utils.cpp:526 msgid "list the available plugins" msgstr "" #: src/clients/compute/utils.cpp:530 src/clients/credentials/arcproxy.cpp:465 #: src/clients/credentials/arcproxyalt.cpp:451 src/clients/data/arccp.cpp:631 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:115 #: src/clients/data/arcrename.cpp:126 src/clients/data/arcrm.cpp:141 #: src/clients/echo/arcecho.cpp:51 src/clients/saml/saml_assertion_init.cpp:52 #: src/clients/wsrf/arcwsrf.cpp:61 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "FATAL, ERROR, WARNING, INFO, VERBOSE vagy DEBUG" #: src/clients/compute/utils.cpp:531 src/clients/credentials/arcproxy.cpp:466 #: src/clients/credentials/arcproxyalt.cpp:452 src/clients/data/arccp.cpp:632 #: src/clients/data/arcls.cpp:337 src/clients/data/arcmkdir.cpp:116 #: src/clients/data/arcrename.cpp:127 src/clients/data/arcrm.cpp:142 #: src/clients/echo/arcecho.cpp:52 src/clients/saml/saml_assertion_init.cpp:53 #: src/clients/wsrf/arcwsrf.cpp:62 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 msgid "debuglevel" msgstr "logolási szint" #: src/clients/compute/utils.cpp:533 src/clients/credentials/arcproxy.cpp:469 #: src/clients/credentials/arcproxyalt.cpp:455 src/clients/data/arccp.cpp:635 #: src/clients/data/arcls.cpp:340 src/clients/data/arcmkdir.cpp:119 #: src/clients/data/arcrename.cpp:130 src/clients/data/arcrm.cpp:145 #: src/clients/echo/arcecho.cpp:55 src/clients/saml/saml_assertion_init.cpp:56 #: src/clients/wsrf/arcwsrf.cpp:65 msgid "print version information" msgstr "verzió információ kiírása" #: src/clients/credentials/arcproxy.cpp:172 #: src/hed/libs/credential/ARCProxyUtil.cpp:1248 #, fuzzy, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "%d darab publikus tanúsítvány van a válasz üzenetben" #: src/clients/credentials/arcproxy.cpp:188 #: src/hed/libs/credential/ARCProxyUtil.cpp:1264 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "" #: src/clients/credentials/arcproxy.cpp:197 #: src/hed/libs/credential/ARCProxyUtil.cpp:1273 #, c-format msgid " expiration time: %s " msgstr "" #: src/clients/credentials/arcproxy.cpp:201 #: src/hed/libs/credential/ARCProxyUtil.cpp:1277 #, c-format msgid " certificate dn: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:202 #: src/hed/libs/credential/ARCProxyUtil.cpp:1278 #, c-format msgid " issuer dn: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:203 #: src/hed/libs/credential/ARCProxyUtil.cpp:1279 #, c-format msgid " serial number: %d" msgstr "" #: src/clients/credentials/arcproxy.cpp:207 #: src/hed/libs/credential/ARCProxyUtil.cpp:1283 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:272 #: src/clients/credentials/arcproxyalt.cpp:317 #, fuzzy msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" "Az arcproxy parancs proxy-t készít a publikus/privát kulcsból,\n" "hogy tudja használni az ARC köztesréteget" #: src/clients/credentials/arcproxy.cpp:274 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours " "for delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file\n" " keybits=number - length of the key to generate. Default is 1024 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" " signingAlgorithm=name - signing algorithm to use for signing public key of " "proxy.\n" " Possible values are sha1, sha2 (alias for sha256), sha224, sha256, " "sha384, sha512\n" " and inherit (use algorithm of signing certificate). Default is inherit.\n" " With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" " identity - identity subject name of proxy certificate.\n" " issuer - issuer subject name of proxy certificate.\n" " ca - subject name of CA ehich issued initial certificate\n" " path - file system path to file containing proxy.\n" " type - type of proxy certificate.\n" " validityStart - timestamp when proxy validity starts.\n" " validityEnd - timestamp when proxy validity ends.\n" " validityPeriod - duration of proxy validity in seconds.\n" " validityLeft - duration of proxy validity left in seconds.\n" " vomsVO - VO name represented by VOMS attribute\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" " vomsIssuer - subject of service which issued VOMS certificate\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" " proxyPolicy\n" " keybits - size of proxy certificate key in bits.\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" " myproxy - for accessing credentials at MyProxy service\n" " myproxynew - for creating credentials at MyProxy service\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" " int - interactively request password from console\n" " stdin - read password from standard input delimited by newline\n" " file:filename - read password from file named filename\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:334 #: src/clients/credentials/arcproxyalt.cpp:334 #, fuzzy msgid "path to the proxy file" msgstr "proxy fájl elérési útvonala" #: src/clients/credentials/arcproxy.cpp:335 #: src/clients/credentials/arcproxy.cpp:339 #: src/clients/credentials/arcproxy.cpp:343 #: src/clients/credentials/arcproxy.cpp:347 #: src/clients/credentials/arcproxy.cpp:351 #: src/clients/credentials/arcproxy.cpp:355 #: src/clients/credentials/arcproxyalt.cpp:335 #: src/clients/credentials/arcproxyalt.cpp:339 #: src/clients/credentials/arcproxyalt.cpp:343 #: src/clients/credentials/arcproxyalt.cpp:347 #: src/clients/credentials/arcproxyalt.cpp:351 #: src/clients/credentials/arcproxyalt.cpp:355 src/clients/data/arccp.cpp:584 #: src/clients/saml/saml_assertion_init.cpp:48 msgid "path" msgstr "elérési útvonal" #: src/clients/credentials/arcproxy.cpp:338 #: src/clients/credentials/arcproxyalt.cpp:338 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formated" msgstr "" #: src/clients/credentials/arcproxy.cpp:342 #: src/clients/credentials/arcproxyalt.cpp:342 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" #: src/clients/credentials/arcproxy.cpp:346 #: src/clients/credentials/arcproxyalt.cpp:346 #, fuzzy msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "megbízható tanúsítványok könyvtára, csak a voms kliensek használják" #: src/clients/credentials/arcproxy.cpp:350 #: src/clients/credentials/arcproxyalt.cpp:350 #, fuzzy msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "megbízható tanúsítványok könyvtára, csak a voms kliensek használják" #: src/clients/credentials/arcproxy.cpp:354 #: src/clients/credentials/arcproxyalt.cpp:354 #, fuzzy msgid "path to the VOMS server configuration file" msgstr "voms szerver fájljának az elérési útvonala" #: src/clients/credentials/arcproxy.cpp:358 #: src/clients/credentials/arcproxyalt.cpp:358 msgid "" "voms<:command>. Specify VOMS server (More than one VOMS server \n" " can be specified like this: --voms VOa:command1 --voms VOb:" "command2). \n" " :command is optional, and is used to ask for specific " "attributes(e.g: roles)\n" " command options are:\n" " all --- put all of this DN's attributes into AC; \n" " list ---list all of the DN's attribute, will not create AC " "extension; \n" " /Role=yourRole --- specify the role, if this DN \n" " has such a role, the role will be put into " "AC \n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN \n" " has such a role, the role will be put into " "AC \n" msgstr "" #: src/clients/credentials/arcproxy.cpp:372 #: src/clients/credentials/arcproxyalt.cpp:372 msgid "" "group<:role>. Specify ordering of attributes \n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester \n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester \n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxyalt.cpp:379 msgid "use GSI communication protocol for contacting VOMS services" msgstr "GSI kommunikációs protokoll használata a VOMS szolgáltatás eléréséhez" #: src/clients/credentials/arcproxy.cpp:382 #: src/clients/credentials/arcproxyalt.cpp:382 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access \n" " Note for RESTful access, 'list' command and multiple VOMS " "server are not supported\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:388 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "" #: src/clients/credentials/arcproxy.cpp:391 msgid "print all information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:394 msgid "print selected information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:397 #: src/clients/credentials/arcproxyalt.cpp:395 msgid "remove proxy" msgstr "" #: src/clients/credentials/arcproxy.cpp:400 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" #: src/clients/credentials/arcproxy.cpp:405 #: src/clients/credentials/arcproxyalt.cpp:402 msgid "" "don't prompt for a credential passphrase, when retrieve a \n" " credential from on MyProxy server. \n" " The precondition of this choice is the credential is PUT onto\n" " the MyProxy server without a passphrase by using -R (--" "retrievable_by_cert) \n" " option when being PUTing onto Myproxy server. \n" " This option is specific for the GET command when contacting " "Myproxy server." msgstr "" #: src/clients/credentials/arcproxy.cpp:416 #: src/clients/credentials/arcproxyalt.cpp:412 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific for the PUT command when contacting " "Myproxy server." msgstr "" #: src/clients/credentials/arcproxy.cpp:422 #: src/clients/credentials/arcproxyalt.cpp:418 #, fuzzy msgid "hostname[:port] of MyProxy server" msgstr "szervernév[:port] myproxy szerveré" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server; \n" " GET -- get a delegated credentials from the MyProxy server; \n" " INFO -- get and present information about credentials stored " "at the MyProxy server; \n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server; \n" " DESTROY -- wipe off credentials stored at the MyProxy " "server; \n" " Local credentials (certificate and key) are not necessary " "except in case of PUT. \n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:442 msgid "" "use NSS credential database in default Mozilla profiles, \n" " including Firefox, Seamonkey and Thunderbird.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:447 #: src/clients/credentials/arcproxyalt.cpp:432 msgid "proxy constraints" msgstr "" #: src/clients/credentials/arcproxy.cpp:451 msgid "password destination=password source" msgstr "" #: src/clients/credentials/arcproxy.cpp:500 #: src/clients/credentials/arcproxy.cpp:1161 #: src/clients/credentials/arcproxyalt.cpp:513 #: src/clients/credentials/arcproxyalt.cpp:556 #, fuzzy msgid "Failed configuration initialization." msgstr "Nem sikerült betölteni a konfigurációt" #: src/clients/credentials/arcproxy.cpp:518 #: src/clients/credentials/arcproxyalt.cpp:563 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" #: src/clients/credentials/arcproxy.cpp:519 #: src/clients/credentials/arcproxy.cpp:531 #: src/clients/credentials/arcproxyalt.cpp:564 #: src/clients/credentials/arcproxyalt.cpp:574 msgid "You may try to increase verbosity to get more information." msgstr "" #: src/clients/credentials/arcproxy.cpp:527 #: src/clients/credentials/arcproxyalt.cpp:570 #, fuzzy msgid "Failed to find CA certificates" msgstr "Nem sikerült listázni a meta adatokat" #: src/clients/credentials/arcproxy.cpp:528 #: src/clients/credentials/arcproxyalt.cpp:571 msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" #: src/clients/credentials/arcproxy.cpp:532 #: src/clients/credentials/arcproxyalt.cpp:575 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" #: src/clients/credentials/arcproxy.cpp:544 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxy.cpp:589 #: src/clients/credentials/arcproxyalt.cpp:604 src/clients/echo/arcecho.cpp:84 msgid "Wrong number of arguments!" msgstr "" #: src/clients/credentials/arcproxy.cpp:597 #: src/clients/credentials/arcproxy.cpp:618 #: src/clients/credentials/arcproxyalt.cpp:612 #: src/clients/credentials/arcproxyalt.cpp:632 msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:604 #: src/clients/credentials/arcproxyalt.cpp:621 #, fuzzy, c-format msgid "Cannot remove proxy file at %s" msgstr "proxy fájl elérési útvonala" #: src/clients/credentials/arcproxy.cpp:606 #: src/clients/credentials/arcproxyalt.cpp:617 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "" #: src/clients/credentials/arcproxy.cpp:624 #: src/clients/credentials/arcproxyalt.cpp:638 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" #: src/clients/credentials/arcproxy.cpp:630 #: src/clients/credentials/arcproxyalt.cpp:651 #, c-format msgid "Subject: %s" msgstr "Tárgy: %s" #: src/clients/credentials/arcproxy.cpp:631 #: src/clients/credentials/arcproxyalt.cpp:652 #, fuzzy, c-format msgid "Issuer: %s" msgstr "Válasz: %s" #: src/clients/credentials/arcproxy.cpp:632 #: src/clients/credentials/arcproxyalt.cpp:653 #, c-format msgid "Identity: %s" msgstr "Azonosító: %s" #: src/clients/credentials/arcproxy.cpp:634 #: src/clients/credentials/arcproxyalt.cpp:657 msgid "Time left for proxy: Proxy expired" msgstr "Nem használható tovább a proxy: Lejárt a proxy" #: src/clients/credentials/arcproxy.cpp:636 #: src/clients/credentials/arcproxyalt.cpp:659 #, fuzzy msgid "Time left for proxy: Proxy not valid yet" msgstr "Nem használható tovább a proxy: Nem érvényes a proxy" #: src/clients/credentials/arcproxy.cpp:638 #: src/clients/credentials/arcproxyalt.cpp:661 #, c-format msgid "Time left for proxy: %s" msgstr "Ennyi ideig érvényes még a proxy: %s" #: src/clients/credentials/arcproxy.cpp:639 #: src/clients/credentials/arcproxyalt.cpp:663 #, c-format msgid "Proxy path: %s" msgstr "Proxy elérési útvonal: %s" #: src/clients/credentials/arcproxy.cpp:640 #, c-format msgid "Proxy type: %s" msgstr "Proxy típusa: %s" #: src/clients/credentials/arcproxy.cpp:641 #, fuzzy, c-format msgid "Proxy key length: %i" msgstr "Proxy elérési útvonal: %s" #: src/clients/credentials/arcproxy.cpp:642 #, fuzzy, c-format msgid "Proxy signature: %s" msgstr "Proxy elérési útvonal: %s" #: src/clients/credentials/arcproxy.cpp:651 #: src/clients/credentials/arcproxyalt.cpp:675 #, fuzzy msgid "AC extension information for VO " msgstr "verzió információ kiírása" #: src/clients/credentials/arcproxy.cpp:654 #: src/clients/credentials/arcproxyalt.cpp:678 msgid "Error detected while parsing this AC" msgstr "" #: src/clients/credentials/arcproxy.cpp:667 #: src/clients/credentials/arcproxyalt.cpp:691 msgid "AC is invalid: " msgstr "" #: src/clients/credentials/arcproxy.cpp:720 #: src/clients/credentials/arcproxyalt.cpp:732 #, fuzzy msgid "Time left for AC: AC is not valid yet" msgstr "Nem használható tovább a proxy: Nem érvényes a proxy" #: src/clients/credentials/arcproxy.cpp:722 #: src/clients/credentials/arcproxyalt.cpp:734 #, fuzzy msgid "Time left for AC: AC has expired" msgstr "Nem használható tovább a proxy: Lejárt a proxy" #: src/clients/credentials/arcproxy.cpp:724 #: src/clients/credentials/arcproxyalt.cpp:736 #, fuzzy, c-format msgid "Time left for AC: %s" msgstr "Ennyi ideig érvényes még a proxy: %s" #: src/clients/credentials/arcproxy.cpp:815 #, c-format msgid "Information item '%s' is not known" msgstr "" #: src/clients/credentials/arcproxy.cpp:824 #: src/clients/credentials/arcproxyalt.cpp:746 msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:828 #: src/clients/credentials/arcproxyalt.cpp:750 msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:852 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" #: src/clients/credentials/arcproxy.cpp:869 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" #: src/clients/credentials/arcproxy.cpp:884 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int,stdin,stream,file." msgstr "" #: src/clients/credentials/arcproxy.cpp:898 msgid "Only standard input is currently supported for password source." msgstr "" #: src/clients/credentials/arcproxy.cpp:903 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int,stdin," "stream,file." msgstr "" #: src/clients/credentials/arcproxy.cpp:942 #: src/clients/credentials/arcproxyalt.cpp:782 msgid "The start, end and period can't be set simultaneously" msgstr "" #: src/clients/credentials/arcproxy.cpp:948 #: src/clients/credentials/arcproxyalt.cpp:788 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:955 #: src/clients/credentials/arcproxyalt.cpp:795 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:962 #: src/clients/credentials/arcproxyalt.cpp:802 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:971 #: src/clients/credentials/arcproxyalt.cpp:811 #, c-format msgid "The end time that you set: %s is before start time:%s." msgstr "" #: src/clients/credentials/arcproxy.cpp:982 #: src/clients/credentials/arcproxyalt.cpp:822 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:985 #: src/clients/credentials/arcproxyalt.cpp:825 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:995 #: src/clients/credentials/arcproxyalt.cpp:835 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1013 #: src/clients/credentials/arcproxyalt.cpp:853 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1028 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1042 #: src/clients/credentials/arcproxyalt.cpp:476 #: src/hed/libs/credential/ARCProxyUtil.cpp:1303 msgid "The NSS database can not be detected in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxy.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:1311 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" #: src/clients/credentials/arcproxy.cpp:1053 #: src/hed/libs/credential/ARCProxyUtil.cpp:1315 #, c-format msgid "Number %d is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1055 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:1071 #: src/clients/credentials/arcproxyalt.cpp:482 #: src/hed/libs/credential/ARCProxyUtil.cpp:1329 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:1142 #: src/hed/libs/credential/ARCProxyUtil.cpp:1503 #, c-format msgid "Certificate to use is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1190 #: src/clients/credentials/arcproxy.cpp:1306 #: src/clients/credentials/arcproxyalt.cpp:539 #: src/clients/credentials/arcproxyalt.cpp:955 #: src/hed/libs/credential/ARCProxyUtil.cpp:1560 msgid "Proxy generation succeeded" msgstr "Proxy készítés sikeres" #: src/clients/credentials/arcproxy.cpp:1191 #: src/clients/credentials/arcproxy.cpp:1307 #: src/clients/credentials/arcproxyalt.cpp:540 #: src/clients/credentials/arcproxyalt.cpp:956 #: src/hed/libs/credential/ARCProxyUtil.cpp:1561 #, c-format msgid "Your proxy is valid until: %s" msgstr "A proxy eddig érvényes: %s" #: src/clients/credentials/arcproxy.cpp:1210 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" #: src/clients/credentials/arcproxy.cpp:1229 src/hed/mcc/tls/MCCTLS.cpp:167 #: src/hed/mcc/tls/MCCTLS.cpp:200 src/hed/mcc/tls/MCCTLS.cpp:226 msgid "VOMS attribute parsing failed" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/clients/credentials/arcproxy.cpp:1231 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxyalt.cpp:892 #: src/hed/libs/credential/ARCProxyUtil.cpp:341 #, fuzzy msgid "Proxy generation failed: No valid certificate found." msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenőrizni a publikus kulcsot" #: src/clients/credentials/arcproxy.cpp:1258 #: src/clients/credentials/arcproxyalt.cpp:899 #: src/hed/libs/credential/ARCProxyUtil.cpp:348 #, fuzzy msgid "Proxy generation failed: No valid private key found." msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenőrizni a publikus kulcsot" #: src/clients/credentials/arcproxy.cpp:1263 #: src/clients/credentials/arcproxyalt.cpp:902 #: src/hed/libs/credential/ARCProxyUtil.cpp:173 #, c-format msgid "Your identity: %s" msgstr "Azonosítód: %s" #: src/clients/credentials/arcproxy.cpp:1265 #: src/clients/credentials/arcproxyalt.cpp:907 #: src/hed/libs/credential/ARCProxyUtil.cpp:356 msgid "Proxy generation failed: Certificate has expired." msgstr "Proxy készítés sikertelen: A publikus kulcs érvényessége lejárt." #: src/clients/credentials/arcproxy.cpp:1269 #: src/clients/credentials/arcproxyalt.cpp:911 #: src/hed/libs/credential/ARCProxyUtil.cpp:361 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "Proxy készítés sikertelen: A publikus kulcs érvénytelen." #: src/clients/credentials/arcproxy.cpp:1280 #, fuzzy msgid "Proxy generation failed: Failed to create temporary file." msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenőrizni a publikus kulcsot" #: src/clients/credentials/arcproxy.cpp:1288 #, fuzzy msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenőrizni a publikus kulcsot" #: src/clients/credentials/arcproxy_myproxy.cpp:100 #: src/clients/credentials/arcproxyalt.cpp:1312 #: src/hed/libs/credential/ARCProxyUtil.cpp:844 msgid "Succeeded to get info from MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:144 #: src/clients/credentials/arcproxyalt.cpp:1368 #: src/hed/libs/credential/ARCProxyUtil.cpp:900 msgid "Succeeded to change password on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:185 #: src/clients/credentials/arcproxyalt.cpp:1417 #: src/hed/libs/credential/ARCProxyUtil.cpp:949 msgid "Succeeded to destroy credential on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:265 #: src/clients/credentials/arcproxyalt.cpp:1506 #: src/hed/libs/credential/ARCProxyUtil.cpp:1038 #, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:318 #: src/clients/credentials/arcproxyalt.cpp:1565 #: src/hed/libs/credential/ARCProxyUtil.cpp:1097 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_proxy.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1222 #: src/hed/libs/credential/ARCProxyUtil.cpp:403 #: src/hed/libs/credential/ARCProxyUtil.cpp:1410 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:63 msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:75 #, fuzzy, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/clients/credentials/arcproxy_voms.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:640 #, fuzzy, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #: src/clients/credentials/arcproxyalt.cpp:1061 #: src/clients/credentials/arcproxyalt.cpp:1063 #: src/hed/libs/credential/ARCProxyUtil.cpp:650 #: src/hed/libs/credential/ARCProxyUtil.cpp:652 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "VOMS szerver elérése (neve: %s): %s ezen a porton: %s" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, fuzzy, c-format msgid "No valid response from VOMS server: %s" msgstr "Nincs válasz a voms szervertől" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:167 #, c-format msgid "Failed to parse VOMS command: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:108 #: src/hed/libs/credential/ARCProxyUtil.cpp:258 #, fuzzy, c-format msgid "OpenSSL error -- %s" msgstr "OpenSSL hiba -- %s" #: src/clients/credentials/arcproxyalt.cpp:109 #: src/hed/libs/credential/ARCProxyUtil.cpp:259 #, c-format msgid "Library : %s" msgstr "Könyvtár : %s" #: src/clients/credentials/arcproxyalt.cpp:110 #: src/hed/libs/credential/ARCProxyUtil.cpp:260 #, c-format msgid "Function : %s" msgstr "Funkció: %s" #: src/clients/credentials/arcproxyalt.cpp:111 #: src/hed/libs/credential/ARCProxyUtil.cpp:261 #, c-format msgid "Reason : %s" msgstr "Indok : %s" #: src/clients/credentials/arcproxyalt.cpp:167 #: src/hed/libs/credential/ARCProxyUtil.cpp:317 msgid "User interface error" msgstr "Felhasználó oldali hiba" #: src/clients/credentials/arcproxyalt.cpp:173 #: src/hed/libs/credential/ARCProxyUtil.cpp:323 msgid "Aborted!" msgstr "Megszakítva!" #: src/clients/credentials/arcproxyalt.cpp:319 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours for " "delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:388 msgid "" "print all information about this proxy. \n" " In order to show the Identity (DN without CN as suffix for " "proxy) \n" " of the certificate, the 'trusted certdir' is needed." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:398 #, fuzzy msgid "username to MyProxy server" msgstr "myproxy szerverhez szükséges felhasználónév" #: src/clients/credentials/arcproxyalt.cpp:422 msgid "" "command to MyProxy server. The command can be PUT or GET.\n" " PUT/put/Put -- put a delegated credential to the MyProxy " "server; \n" " GET/get/Get -- get a delegated credential from the MyProxy " "server, \n" " credential (certificate and key) is not needed in this case. \n" " MyProxy functionality can be used together with VOMS\n" " functionality.\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:437 msgid "use NSS credential database in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1040 #: src/hed/libs/credential/ARCProxyUtil.cpp:629 #, fuzzy, c-format msgid "There are %d commands to the same VOMS server %s" msgstr "%d számú parancs van ugyanahoz a voms szerverhez: %s" #: src/clients/credentials/arcproxyalt.cpp:1094 #: src/hed/libs/credential/ARCProxyUtil.cpp:683 #, fuzzy, c-format msgid "Try to get attribute from VOMS server with order: %s" msgstr "Attribútumok lekérdezés a voms szervertől ebben a sorrendben: %s" #: src/clients/credentials/arcproxyalt.cpp:1097 #: src/hed/libs/credential/ARCProxyUtil.cpp:686 #, c-format msgid "Message sent to VOMS server %s is: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1116 #: src/hed/libs/credential/ARCProxyUtil.cpp:705 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1120 #: src/hed/libs/credential/ARCProxyUtil.cpp:709 #, fuzzy msgid "No HTTP response from VOMS server" msgstr "Nincs válasz a voms szervertől" #: src/clients/credentials/arcproxyalt.cpp:1125 #: src/clients/credentials/arcproxyalt.cpp:1151 #: src/hed/libs/credential/ARCProxyUtil.cpp:714 #: src/hed/libs/credential/ARCProxyUtil.cpp:740 #, fuzzy, c-format msgid "Returned message from VOMS server: %s" msgstr "Ez a válasz érkezett a voms szervertől: %s" #: src/clients/credentials/arcproxyalt.cpp:1137 #: src/hed/libs/credential/ARCProxyUtil.cpp:726 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\"\n" "can not be reached, please make sure it is available" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1141 #: src/hed/libs/credential/ARCProxyUtil.cpp:730 #, fuzzy msgid "No stream response from VOMS server" msgstr "Nincs válasz a voms szervertől" #: src/clients/credentials/arcproxyalt.cpp:1163 #: src/hed/libs/credential/ARCProxyUtil.cpp:752 #, c-format msgid "" "The validity duration of VOMS AC is shortened from %s to %s, due to the " "validity constraint on voms server side.\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1166 #: src/hed/libs/credential/ARCProxyUtil.cpp:755 #, c-format msgid "" "Cannot get any AC or attributes info from VOMS server: %s;\n" " Returned message from VOMS server: %s\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1171 #: src/hed/libs/credential/ARCProxyUtil.cpp:760 #, fuzzy, c-format msgid "Returned message from VOMS server %s is: %s\n" msgstr "Ez a válasz érkezett a voms szervertől: %s" #: src/clients/credentials/arcproxyalt.cpp:1193 #: src/hed/libs/credential/ARCProxyUtil.cpp:782 #, c-format msgid "The attribute information from VOMS server: %s is list as following:" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1205 #: src/hed/libs/credential/ARCProxyUtil.cpp:794 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message. But proxy without " "VOMS AC extension will still be generated." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1220 #, fuzzy, c-format msgid "Failed to add extension: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/clients/credentials/arcproxyalt.cpp:1238 #: src/hed/libs/credential/ARCProxyUtil.cpp:443 #: src/hed/libs/credential/Credential.cpp:884 #, c-format msgid "Error: can't open policy file: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1248 #: src/hed/libs/credential/ARCProxyUtil.cpp:453 #: src/hed/libs/credential/Credential.cpp:897 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1600 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specify the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Cannot find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, " "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1640 #: src/hed/libs/credential/ARCProxyUtil.cpp:552 #, c-format msgid "VOMS line contains wrong number of tokens (%u expected): \"%s\"" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1684 #: src/hed/libs/credential/ARCProxyUtil.cpp:596 #, fuzzy, c-format msgid "Cannot get VOMS server %s information from the vomses files" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/credentials/test2myproxyserver_get.cpp:89 #: src/clients/credentials/test2myproxyserver_get.cpp:131 #: src/clients/credentials/test2myproxyserver_put.cpp:88 #: src/clients/credentials/test2myproxyserver_put.cpp:182 #: src/clients/credentials/test2vomsserver.cpp:101 msgid "No stream response" msgstr "Nincs válasz" #: src/clients/credentials/test2myproxyserver_get.cpp:104 #: src/clients/credentials/test2myproxyserver_get.cpp:143 #: src/clients/credentials/test2myproxyserver_get.cpp:190 #: src/clients/credentials/test2myproxyserver_put.cpp:103 #: src/clients/credentials/test2myproxyserver_put.cpp:116 #: src/clients/credentials/test2myproxyserver_put.cpp:194 #, c-format msgid "Returned msg from myproxy server: %s %d" msgstr "Ezt a választ kaptam a myproxy szervertől: %s %d" #: src/clients/credentials/test2myproxyserver_get.cpp:149 #, c-format msgid "There are %d certificates in the returned msg" msgstr "%d darab publikus tanúsítvány van a válasz üzenetben" #: src/clients/credentials/test2myproxyserver_put.cpp:135 msgid "Delegate proxy failed" msgstr "Proxy delegáció sikertelen" #: src/clients/credentials/test2vomsserver.cpp:116 #, c-format msgid "Returned msg from voms server: %s " msgstr "Ezt a választ kaptam a voms szervertől: %s" #: src/clients/data/arccp.cpp:77 src/clients/data/arccp.cpp:330 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:426 #, c-format msgid "Current transfer FAILED: %s" msgstr "Az aktuális átvitel MEGSZAKADT: %s" #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:135 #: src/clients/data/arccp.cpp:332 src/clients/data/arcls.cpp:224 #: src/clients/data/arcmkdir.cpp:73 src/clients/data/arcrename.cpp:89 #: src/clients/data/arcrm.cpp:95 msgid "This seems like a temporary error, please try again later" msgstr "" #: src/clients/data/arccp.cpp:87 src/clients/data/arccp.cpp:96 #, fuzzy, c-format msgid "Unable to copy %s" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/clients/data/arccp.cpp:88 src/clients/data/arccp.cpp:97 #: src/clients/data/arcls.cpp:150 src/clients/data/arcls.cpp:159 #: src/clients/data/arcmkdir.cpp:55 src/clients/data/arcmkdir.cpp:64 #: src/clients/data/arcrename.cpp:67 src/clients/data/arcrename.cpp:76 #: src/clients/data/arcrm.cpp:68 src/clients/data/arcrm.cpp:80 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" #: src/clients/data/arccp.cpp:94 src/clients/data/arcls.cpp:156 #: src/clients/data/arcmkdir.cpp:61 src/clients/data/arcrename.cpp:73 #: src/clients/data/arcrm.cpp:77 msgid "Proxy expired" msgstr "" #: src/clients/data/arccp.cpp:112 src/clients/data/arccp.cpp:116 #: src/clients/data/arccp.cpp:149 src/clients/data/arccp.cpp:153 #: src/clients/data/arccp.cpp:358 src/clients/data/arccp.cpp:363 #: src/clients/data/arcls.cpp:123 src/clients/data/arcmkdir.cpp:28 #: src/clients/data/arcrename.cpp:29 src/clients/data/arcrename.cpp:33 #: src/clients/data/arcrm.cpp:36 #, c-format msgid "Invalid URL: %s" msgstr "Érvénytelen URL: %s" #: src/clients/data/arccp.cpp:128 msgid "Third party transfer is not supported for these endpoints" msgstr "" #: src/clients/data/arccp.cpp:130 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" #: src/clients/data/arccp.cpp:133 #, c-format msgid "Transfer FAILED: %s" msgstr "" #: src/clients/data/arccp.cpp:161 src/clients/data/arccp.cpp:187 #: src/clients/data/arccp.cpp:374 src/clients/data/arccp.cpp:402 #, c-format msgid "Can't read list of sources from file %s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/clients/data/arccp.cpp:166 src/clients/data/arccp.cpp:202 #: src/clients/data/arccp.cpp:379 src/clients/data/arccp.cpp:418 #, c-format msgid "Can't read list of destinations from file %s" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/clients/data/arccp.cpp:171 src/clients/data/arccp.cpp:385 msgid "Numbers of sources and destinations do not match" msgstr "A forrás és céállomások száma nem egyezik meg" #: src/clients/data/arccp.cpp:216 msgid "Fileset registration is not supported yet" msgstr "A fileset regisztcáció nem támogatott még" #: src/clients/data/arccp.cpp:222 src/clients/data/arccp.cpp:295 #: src/clients/data/arccp.cpp:456 #, c-format msgid "Unsupported source url: %s" msgstr "Nem támogatott url: %s" #: src/clients/data/arccp.cpp:226 src/clients/data/arccp.cpp:299 #, c-format msgid "Unsupported destination url: %s" msgstr "Nem támogatott url: %s" #: src/clients/data/arccp.cpp:233 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" #: src/clients/data/arccp.cpp:243 #, c-format msgid "Could not obtain information about source: %s" msgstr "" #: src/clients/data/arccp.cpp:250 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" #: src/clients/data/arccp.cpp:262 msgid "Failed to accept new file/destination" msgstr "" #: src/clients/data/arccp.cpp:268 src/clients/data/arccp.cpp:274 #, c-format msgid "Failed to register new file/destination: %s" msgstr "" #: src/clients/data/arccp.cpp:436 msgid "Fileset copy to single object is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:446 msgid "Can't extract object's name from source url" msgstr "" #: src/clients/data/arccp.cpp:465 #, c-format msgid "%s. Cannot copy fileset" msgstr "" #: src/clients/data/arccp.cpp:475 src/hed/libs/compute/ExecutionTarget.cpp:254 #: src/hed/libs/compute/ExecutionTarget.cpp:326 #, c-format msgid "Name: %s" msgstr "Név: %s" #: src/clients/data/arccp.cpp:478 #, c-format msgid "Source: %s" msgstr "Forrás: %s" #: src/clients/data/arccp.cpp:479 #, c-format msgid "Destination: %s" msgstr "Célállomás: %s" #: src/clients/data/arccp.cpp:485 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:433 msgid "Current transfer complete" msgstr "Az aktuális átvitel sikeres" #: src/clients/data/arccp.cpp:488 msgid "Some transfers failed" msgstr "" #: src/clients/data/arccp.cpp:498 #, c-format msgid "Directory: %s" msgstr "" #: src/clients/data/arccp.cpp:518 msgid "Transfer complete" msgstr "" #: src/clients/data/arccp.cpp:537 msgid "source destination" msgstr "" #: src/clients/data/arccp.cpp:538 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" #: src/clients/data/arccp.cpp:543 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" #: src/clients/data/arccp.cpp:549 msgid "do not try to force passive transfer" msgstr "" #: src/clients/data/arccp.cpp:554 msgid "" "if the destination is an indexing service and not the same as the source and " "the destination is already registered, then the copy is normally not done. " "However, if this option is specified the source is assumed to be a replica " "of the destination created in an uncontrolled way and the copy is done like " "in case of replication. Using this option also skips validation of completed " "transfers." msgstr "" #: src/clients/data/arccp.cpp:567 msgid "show progress indicator" msgstr "" #: src/clients/data/arccp.cpp:572 msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" #: src/clients/data/arccp.cpp:578 msgid "use secure transfer (insecure by default)" msgstr "" #: src/clients/data/arccp.cpp:583 msgid "path to local cache (use to put file into cache)" msgstr "" #: src/clients/data/arccp.cpp:588 src/clients/data/arcls.cpp:300 msgid "operate recursively" msgstr "" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:305 msgid "operate recursively up to specified level" msgstr "" #: src/clients/data/arccp.cpp:594 src/clients/data/arcls.cpp:306 msgid "level" msgstr "" #: src/clients/data/arccp.cpp:598 msgid "number of retries before failing file transfer" msgstr "" #: src/clients/data/arccp.cpp:599 msgid "number" msgstr "szám" #: src/clients/data/arccp.cpp:603 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" #: src/clients/data/arccp.cpp:611 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" #: src/clients/data/arccp.cpp:617 src/clients/data/arcls.cpp:322 #: src/clients/data/arcmkdir.cpp:101 src/clients/data/arcrename.cpp:112 #: src/clients/data/arcrm.cpp:127 msgid "list the available plugins (protocols supported)" msgstr "" #: src/clients/data/arccp.cpp:656 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:141 src/clients/data/arcrename.cpp:152 #: src/clients/data/arcrm.cpp:168 msgid "Protocol plugins available:" msgstr "" #: src/clients/data/arccp.cpp:681 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:175 #: src/clients/data/arcrm.cpp:193 msgid "Wrong number of parameters specified" msgstr "" #: src/clients/data/arccp.cpp:686 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "" #: src/clients/data/arcls.cpp:129 src/clients/data/arcmkdir.cpp:34 #: src/clients/data/arcrm.cpp:43 #, c-format msgid "Can't read list of locations from file %s" msgstr "" #: src/clients/data/arcls.cpp:144 src/clients/data/arcmkdir.cpp:49 #: src/clients/data/arcrename.cpp:61 msgid "Unsupported URL given" msgstr "" #: src/clients/data/arcls.cpp:149 src/clients/data/arcls.cpp:158 #, fuzzy, c-format msgid "Unable to list content of %s" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/clients/data/arcls.cpp:227 msgid "Warning: Failed listing files but some information is obtained" msgstr "" #: src/clients/data/arcls.cpp:281 src/clients/data/arcmkdir.cpp:90 msgid "url" msgstr "url" #: src/clients/data/arcls.cpp:282 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" #: src/clients/data/arcls.cpp:291 msgid "show URLs of file locations" msgstr "" #: src/clients/data/arcls.cpp:295 msgid "display all available metadata" msgstr "" #: src/clients/data/arcls.cpp:309 msgid "" "show only description of requested object, do not list content of directories" msgstr "" #: src/clients/data/arcls.cpp:313 msgid "treat requested object as directory and always try to list content" msgstr "" #: src/clients/data/arcls.cpp:317 msgid "check readability of object, does not show any information about object" msgstr "" #: src/clients/data/arcls.cpp:392 msgid "Incompatible options --nolist and --forcelist requested" msgstr "" #: src/clients/data/arcls.cpp:397 msgid "Requesting recursion and --nolist has no sense" msgstr "" #: src/clients/data/arcmkdir.cpp:54 src/clients/data/arcmkdir.cpp:63 #, fuzzy, c-format msgid "Unable to create directory %s" msgstr "Nem sikerült elküldeni a kérést" #: src/clients/data/arcmkdir.cpp:91 msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" #: src/clients/data/arcmkdir.cpp:96 msgid "make parent directories as needed" msgstr "" #: src/clients/data/arcrename.cpp:41 msgid "Both URLs must have the same protocol, host and port" msgstr "" #: src/clients/data/arcrename.cpp:51 msgid "Cannot rename to or from root directory" msgstr "" #: src/clients/data/arcrename.cpp:55 msgid "Cannot rename to the same URL" msgstr "" #: src/clients/data/arcrename.cpp:66 src/clients/data/arcrename.cpp:75 #, fuzzy, c-format msgid "Unable to rename %s" msgstr "Nem sikerült elküldeni a kérést" #: src/clients/data/arcrename.cpp:106 msgid "old_url new_url" msgstr "" #: src/clients/data/arcrename.cpp:107 #, fuzzy msgid "The arcrename command renames files on grid storage elements." msgstr "Az arcclean parancs eltávolít egy feladatot a távoli klaszterröl" #: src/clients/data/arcrm.cpp:58 #, fuzzy, c-format msgid "Unsupported URL given: %s" msgstr "Nem támogatott url: %s" #: src/clients/data/arcrm.cpp:67 src/clients/data/arcrm.cpp:79 #, fuzzy, c-format msgid "Unable to remove file %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/clients/data/arcrm.cpp:115 #, fuzzy msgid "url [url ...]" msgstr "[klaszter ...]" #: src/clients/data/arcrm.cpp:116 msgid "The arcrm command deletes files and on grid storage elements." msgstr "" #: src/clients/data/arcrm.cpp:121 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" #: src/clients/echo/arcecho.cpp:32 msgid "service message" msgstr "" #: src/clients/echo/arcecho.cpp:33 msgid "The arcecho command is a client for the ARC echo service." msgstr "" #: src/clients/echo/arcecho.cpp:35 msgid "" "The service argument is a URL to an ARC echo service.\n" "The message argument is the message the service should return." msgstr "" #: src/clients/echo/arcecho.cpp:105 src/hed/dmc/arc/DataPointARC.cpp:169 #: src/hed/dmc/arc/DataPointARC.cpp:222 src/hed/dmc/arc/DataPointARC.cpp:304 #: src/hed/dmc/arc/DataPointARC.cpp:415 src/hed/dmc/arc/DataPointARC.cpp:510 #: src/hed/dmc/arc/DataPointARC.cpp:574 src/hed/dmc/arc/DataPointARC.cpp:624 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:100 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:174 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:240 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, c-format msgid "" "Request:\n" "%s" msgstr "" "Kérés:\n" "%s" #: src/clients/echo/arcecho.cpp:119 src/hed/dmc/arc/DataPointARC.cpp:182 #: src/hed/dmc/arc/DataPointARC.cpp:235 src/hed/dmc/arc/DataPointARC.cpp:320 #: src/hed/dmc/arc/DataPointARC.cpp:431 src/hed/dmc/arc/DataPointARC.cpp:524 #: src/hed/dmc/arc/DataPointARC.cpp:587 src/hed/dmc/arc/DataPointARC.cpp:638 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "" #: src/clients/echo/arcecho.cpp:124 src/hed/acc/UNICORE/UNICOREClient.cpp:531 #: src/hed/dmc/arc/DataPointARC.cpp:187 src/hed/dmc/arc/DataPointARC.cpp:240 #: src/hed/dmc/arc/DataPointARC.cpp:325 src/hed/dmc/arc/DataPointARC.cpp:436 #: src/hed/dmc/arc/DataPointARC.cpp:529 src/hed/dmc/arc/DataPointARC.cpp:592 #: src/hed/dmc/arc/DataPointARC.cpp:643 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:119 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:193 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:267 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:346 #, c-format msgid "" "Response:\n" "%s" msgstr "" "Válasz:\n" "%s" #: src/clients/saml/saml_assertion_init.cpp:43 msgid "service_url" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:47 msgid "path to config file" msgstr "a konfigurációs fájl elérési útvonala" #: src/clients/saml/saml_assertion_init.cpp:140 msgid "SOAP Request failed: No response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:144 msgid "SOAP Request failed: Error" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:150 msgid "No in SOAP response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:156 msgid "No in SAML response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:168 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:321 msgid "Succeeded to verify the signature under " msgstr "" #: src/clients/saml/saml_assertion_init.cpp:171 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:324 msgid "Failed to verify the signature under " msgstr "" #: src/clients/wsrf/arcwsrf.cpp:39 msgid "URL [query]" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:40 msgid "" "The arcwsrf command is used for obtaining the WS-ResourceProperties of\n" "services." msgstr "" #: src/clients/wsrf/arcwsrf.cpp:46 msgid "Request for specific Resource Property" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:47 msgid "[-]name" msgstr "[-]név" #: src/clients/wsrf/arcwsrf.cpp:80 msgid "Missing URL" msgstr "Hiányzik az URL" #: src/clients/wsrf/arcwsrf.cpp:85 msgid "Too many parameters" msgstr "Túl sok paraméter" #: src/clients/wsrf/arcwsrf.cpp:123 #, fuzzy msgid "Query is not a valid XML" msgstr "A lekérdezés nem XML helyes" #: src/clients/wsrf/arcwsrf.cpp:138 msgid "Failed to create WSRP request" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:145 msgid "Specified URL is not valid" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:157 msgid "Failed to send request" msgstr "Nem sikerült elküldeni a kérést" #: src/clients/wsrf/arcwsrf.cpp:161 msgid "Failed to obtain SOAP response" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:167 msgid "SOAP fault received" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:142 #, c-format msgid "Connect: Failed to init handle: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:148 #, c-format msgid "Failed to enable IPv6: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:158 src/hed/acc/ARC0/FTPControl.cpp:172 #, c-format msgid "Connect: Failed to connect: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:165 #, c-format msgid "Connect: Connecting timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:185 #, c-format msgid "Connect: Failed to init auth info handle: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:196 src/hed/acc/ARC0/FTPControl.cpp:210 #, c-format msgid "Connect: Failed authentication: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:203 #, c-format msgid "Connect: Authentication timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:224 src/hed/acc/ARC0/FTPControl.cpp:256 #, c-format msgid "SendCommand: Command: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:229 src/hed/acc/ARC0/FTPControl.cpp:240 #: src/hed/acc/ARC0/FTPControl.cpp:260 src/hed/acc/ARC0/FTPControl.cpp:271 #, c-format msgid "SendCommand: Failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:235 src/hed/acc/ARC0/FTPControl.cpp:266 #, c-format msgid "SendCommand: Timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:243 src/hed/acc/ARC0/FTPControl.cpp:276 #, fuzzy, c-format msgid "SendCommand: Response: %s" msgstr "Válasz: %s" #: src/hed/acc/ARC0/FTPControl.cpp:293 msgid "SendData: Failed sending EPSV and PASV commands" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:298 src/hed/acc/ARC0/FTPControl.cpp:304 #: src/hed/acc/ARC0/FTPControl.cpp:320 #, c-format msgid "SendData: Server PASV response parsing failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:330 src/hed/acc/ARC0/FTPControl.cpp:336 #: src/hed/acc/ARC0/FTPControl.cpp:343 src/hed/acc/ARC0/FTPControl.cpp:350 #, c-format msgid "SendData: Server EPSV response parsing failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:357 #, c-format msgid "SendData: Server EPSV response port parsing failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:366 #, c-format msgid "SendData: Failed to apply local address to data connection: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:372 #, c-format msgid "SendData: Can't parse host and/or port in response to EPSV/PASV: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:377 #, c-format msgid "SendData: Data channel: %d.%d.%d.%d:%d" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:393 #, c-format msgid "SendData: Data channel: [%s]:%d" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:398 #, c-format msgid "SendData: Local port failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:422 msgid "SendData: Failed sending DCAU command" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:427 msgid "SendData: Failed sending TYPE command" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:436 #, c-format msgid "SendData: Local type failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:446 #, c-format msgid "SendData: Failed sending STOR command: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:454 src/hed/acc/ARC0/FTPControl.cpp:475 #, c-format msgid "SendData: Data connect write failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:461 src/hed/acc/ARC0/FTPControl.cpp:469 #, c-format msgid "SendData: Data connect write timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:487 src/hed/acc/ARC0/FTPControl.cpp:507 #, c-format msgid "SendData: Data write failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:493 src/hed/acc/ARC0/FTPControl.cpp:501 #, c-format msgid "SendData: Data write timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:527 src/hed/acc/ARC0/FTPControl.cpp:538 #, c-format msgid "Disconnect: Failed aborting - ignoring: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:530 #, c-format msgid "Disconnect: Data close timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:541 #, c-format msgid "Disconnect: Abort timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:549 #, c-format msgid "Disconnect: Failed quitting - ignoring: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:552 #, c-format msgid "Disconnect: Quitting timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:561 #, c-format msgid "Disconnect: Failed closing - ignoring: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:567 #, c-format msgid "Disconnect: Closing timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:582 msgid "Disconnect: waiting for globus handle to settle" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:596 msgid "Disconnect: globus handle is stuck." msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:604 #, c-format msgid "Disconnect: Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:607 msgid "Disconnect: handle destroyed." msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:47 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:44 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to " "developers." msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:62 #, c-format msgid "Unable to query job information (%s), invalid URL provided (%s)" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:74 #, c-format msgid "Jobs left to query: %d" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:83 #, c-format msgid "Querying batch with %d jobs" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:99 msgid "Can't create information handle - is the ARC LDAP DMC plugin available?" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:132 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:47 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:36 #, c-format msgid "Job information not found in the information system: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:134 msgid "" "This job was very recently submitted and might not yet have reached the " "information system" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:236 #, c-format msgid "Cleaning job: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:240 msgid "Failed to connect for job cleaning" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:252 msgid "Failed sending CWD command for job cleaning" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:259 msgid "Failed sending RMD command for job cleaning" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:266 msgid "Failed to disconnect after job cleaning" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:273 msgid "Job cleaning successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:284 #, c-format msgid "Cancelling job: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:288 msgid "Failed to connect for job cancelling" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:300 msgid "Failed sending CWD command for job cancelling" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:307 msgid "Failed sending DELE command for job cancelling" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:314 msgid "Failed to disconnect after job cancelling" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:322 msgid "Job cancelling successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:333 #, c-format msgid "Renewing credentials for job: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:337 msgid "Failed to connect for credential renewal" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:349 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:356 msgid "Failed sending CWD command for credentials renewal" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:362 msgid "Failed to disconnect after credentials renewal" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:369 msgid "Renewal of credentials was successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:381 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:111 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:192 #, c-format msgid "Job %s does not report a resumable state" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:391 #, c-format msgid "Illegal jobID specified (%s)" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:398 #, c-format msgid "HER: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:404 #, c-format msgid "Could not create temporary file: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:437 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:131 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:210 msgid "Job resuming successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:473 #, c-format msgid "Trying to retrieve job description of %s from computing resource" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:478 #, fuzzy, c-format msgid "invalid jobID: %s" msgstr "Érvénytelen URL: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:520 msgid "clientxrsl found" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:523 msgid "could not find start of clientxrsl" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:528 msgid "could not find end of clientxrsl" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:541 #, c-format msgid "Job description: %s" msgstr "Feladat leírás: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:544 msgid "clientxrsl not found" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:550 #, fuzzy, c-format msgid "Invalid JobDescription: %s" msgstr "Érvénytelen feladat leírás:" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:553 msgid "Valid JobDescription found" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:60 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:204 msgid "Submit: Failed to connect" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:68 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:212 msgid "Submit: Failed sending CWD command" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:79 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:223 msgid "Submit: Failed sending CWD new command" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:106 #, fuzzy msgid "Failed to prepare job description." msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:116 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:260 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:63 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:158 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:87 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:401 #, fuzzy, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:123 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:267 msgid "Submit: Failed sending job description" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:138 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:282 msgid "Submit: Failed uploading local input files" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:193 msgid "" "Submit: service has no suitable information interface - need org.nordugrid." "ldapng" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:250 #, fuzzy msgid "Failed to prepare job description to target resources." msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/ARC1/AREXClient.cpp:58 msgid "Creating an A-REX client" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:61 msgid "Unable to create SOAP client used by AREXClient." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:85 #, fuzzy msgid "Failed locating credentials." msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/acc/ARC1/AREXClient.cpp:94 msgid "Failed initiate client connection." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:102 msgid "Client connection has no entry point." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:113 src/hed/acc/EMIES/EMIESClient.cpp:130 #: src/hed/acc/UNICORE/UNICOREClient.cpp:191 #: src/hed/acc/UNICORE/UNICOREClient.cpp:222 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:505 #: src/services/a-rex/test.cpp:86 msgid "Initiating delegation procedure" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:115 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:507 msgid "Failed to initiate delegation credentials" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:128 msgid "Re-creating an A-REX client" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:146 msgid "AREXClient was not created properly." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:151 src/hed/acc/EMIES/EMIESClient.cpp:174 #, c-format msgid "Processing a %s request" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:173 src/hed/acc/CREAM/CREAMClient.cpp:134 #: src/hed/acc/EMIES/EMIESClient.cpp:180 #, c-format msgid "%s request failed" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:181 src/hed/acc/EMIES/EMIESClient.cpp:189 #, c-format msgid "No response from %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:190 src/hed/acc/EMIES/EMIESClient.cpp:198 #, c-format msgid "%s request to %s failed with response: %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:195 src/hed/acc/EMIES/EMIESClient.cpp:213 #, c-format msgid "XML response: %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:204 #, c-format msgid "%s request to %s failed. No expected response." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:218 #, c-format msgid "Creating and sending submit request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:234 src/hed/acc/ARC1/AREXClient.cpp:482 #: src/hed/acc/EMIES/EMIESClient.cpp:302 src/hed/acc/EMIES/EMIESClient.cpp:405 #: src/hed/acc/UNICORE/UNICOREClient.cpp:160 #, c-format msgid "Job description to be sent: %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:248 src/hed/acc/EMIES/EMIESClient.cpp:491 #: src/hed/acc/EMIES/EMIESClient.cpp:525 src/hed/acc/EMIES/EMIESClient.cpp:581 #, c-format msgid "Creating and sending job information query request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:293 src/hed/acc/ARC1/AREXClient.cpp:336 #, c-format msgid "Unable to retrieve status of job (%s)" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:346 src/hed/acc/EMIES/EMIESClient.cpp:821 #, c-format msgid "Creating and sending service information query request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:366 #, c-format msgid "Creating and sending ISIS information query request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:383 #, c-format msgid "Service %s of type %s ignored" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:386 msgid "No execution services registered in the index service" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:392 #, c-format msgid "Creating and sending terminate request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:403 #: src/hed/acc/UNICORE/UNICOREClient.cpp:619 #: src/hed/acc/UNICORE/UNICOREClient.cpp:692 msgid "Job termination failed" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:414 #, c-format msgid "Creating and sending clean request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:444 #, c-format msgid "Creating and sending job description retrieval request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:464 #, c-format msgid "Creating and sending job migrate request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:498 src/hed/acc/EMIES/EMIESClient.cpp:932 #, c-format msgid "Creating and sending job resume request to %s" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:100 msgid "Renewal of ARC1 jobs is not supported" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:117 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:197 #, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:183 #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:103 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:139 #, c-format msgid "Failed retrieving job description for job: %s" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:42 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:69 msgid "Failed retrieving job status information" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:52 msgid "Cleaning of BES jobs is not supported" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:78 msgid "Renewal of BES jobs is not supported" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:86 msgid "Resuming BES jobs is not supported" msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:49 msgid "Collecting Job (A-REX jobs) information." msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:53 #, c-format msgid "Failed retrieving job IDs: Unsupported url (%s) given" msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:61 msgid "Failed retrieving job IDs" msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:64 msgid "" "Error encoutered during job ID retrieval. All job IDs might not have been " "retrieved" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:53 #, fuzzy msgid "Failed to prepare job description" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:78 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:173 msgid "No job identifier returned by BES service" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:99 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:194 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:310 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:77 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:169 msgid "Failed uploading local input files" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:148 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:53 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:136 #, fuzzy msgid "Failed to prepare job description to target resources" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:271 msgid "Failed adapting job description to target resources" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:282 #, c-format msgid "" "Unable to migrate job. Job description is not valid in the %s format: %s" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:295 msgid "No job identifier returned by A-REX" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:50 msgid "Querying WSRF GLUE2 computing info endpoint." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:95 #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:100 msgid "The Service doesn't advertise its Quality Level." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:120 #, c-format msgid "Generating A-REX target: %s" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:145 #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:151 #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:178 msgid "The Service doesn't advertise its Interface." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:210 msgid "The Service doesn't advertise its Serving State." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:278 #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:279 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:220 #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:398 #: src/hed/libs/compute/GLUE2.cpp:417 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:467 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:426 #, c-format msgid "Unable to parse the %s.%s value from execution service (%s)." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:468 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:427 #, c-format msgid "Value of %s.%s is \"%s\"" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:114 msgid "Creating a CREAM client" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:117 msgid "Unable to create SOAP client used by CREAMClient." msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:128 msgid "CREAMClient not created properly" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:139 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:52 #: src/hed/acc/UNICORE/UNICOREClient.cpp:207 #: src/hed/acc/UNICORE/UNICOREClient.cpp:299 #: src/hed/acc/UNICORE/UNICOREClient.cpp:376 #: src/hed/acc/UNICORE/UNICOREClient.cpp:455 #: src/hed/acc/UNICORE/UNICOREClient.cpp:488 #: src/hed/acc/UNICORE/UNICOREClient.cpp:565 #: src/hed/acc/UNICORE/UNICOREClient.cpp:641 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:188 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:29 msgid "There was no SOAP response" msgstr "Nincs SOAP-os válasz" #: src/hed/acc/CREAM/CREAMClient.cpp:148 src/hed/acc/CREAM/CREAMClient.cpp:353 #: src/hed/acc/CREAM/CREAMClient.cpp:374 src/hed/acc/CREAM/CREAMClient.cpp:395 #: src/hed/acc/CREAM/CREAMClient.cpp:414 src/hed/acc/CREAM/CREAMClient.cpp:465 #: src/hed/acc/CREAM/CREAMClient.cpp:494 msgid "Empty response" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:167 #, c-format msgid "Request failed: %s" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:175 src/hed/acc/CREAM/CREAMClient.cpp:428 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:35 #: src/hed/acc/UNICORE/UNICOREClient.cpp:359 msgid "Creating and sending a status request" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:200 msgid "Unable to retrieve job status." msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:340 #: src/hed/acc/UNICORE/UNICOREClient.cpp:549 #: src/hed/acc/UNICORE/UNICOREClient.cpp:628 msgid "Creating and sending request to terminate a job" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:361 msgid "Creating and sending request to clean a job" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:382 #, fuzzy msgid "Creating and sending request to resume a job" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/CREAM/CREAMClient.cpp:403 #, fuzzy msgid "Creating and sending request to list jobs" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/CREAM/CREAMClient.cpp:450 msgid "Creating and sending job register request" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:470 src/hed/acc/CREAM/CREAMClient.cpp:499 msgid "No job ID in response" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:480 msgid "Creating and sending job start request" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:508 msgid "Creating delegation" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:520 msgid "Malformed response: missing getProxyReqReturn" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:531 #, c-format msgid "Delegatable credentials expired: %s" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:541 msgid "Failed signing certificate request" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:561 msgid "Failed putting signed delegation certificate to service" msgstr "" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:52 #, c-format msgid "Failed cleaning job: %s" msgstr "" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:70 #, c-format msgid "Failed canceling job: %s" msgstr "" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:84 msgid "Renewal of CREAM jobs is not supported" msgstr "" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:98 #, fuzzy, c-format msgid "Failed resuming job: %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:40 msgid "Failed creating signed delegation certificate" msgstr "" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:61 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:153 #: src/hed/acc/UNICORE/UNICOREClient.cpp:115 #, fuzzy, c-format msgid "Unable to submit job. Job description is not valid in the %s format" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:69 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:161 msgid "Failed registering job" msgstr "" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:85 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:177 msgid "Failed starting job" msgstr "" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:123 msgid "Failed creating singed delegation certificate" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:79 msgid "Creating an EMI ES client" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:82 msgid "Unable to create SOAP client used by EMIESClient." msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:157 msgid "Re-creating an EMI ES client" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:223 #, c-format msgid "%s request to %s failed. Unexpected response: %s." msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:237 src/hed/acc/EMIES/EMIESClient.cpp:344 #, fuzzy, c-format msgid "Creating and sending job submit request to %s" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/EMIES/EMIESClient.cpp:415 src/hed/acc/EMIES/EMIESClient.cpp:598 #: src/hed/acc/EMIES/EMIESClient.cpp:1087 #, c-format msgid "New limit for vector queries returned by EMI ES service: %d" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:423 src/hed/acc/EMIES/EMIESClient.cpp:606 #: src/hed/acc/EMIES/EMIESClient.cpp:1095 #, c-format msgid "" "Error: Service returned a limit higher or equal to current limit (current: %" "d; returned: %d)" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:764 #, fuzzy, c-format msgid "Creating and sending service information request to %s" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/EMIES/EMIESClient.cpp:869 src/hed/acc/EMIES/EMIESClient.cpp:890 #, fuzzy, c-format msgid "Creating and sending job clean request to %s" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/EMIES/EMIESClient.cpp:911 #, fuzzy, c-format msgid "Creating and sending job suspend request to %s" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/EMIES/EMIESClient.cpp:953 #, fuzzy, c-format msgid "Creating and sending job restart request to %s" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/EMIES/EMIESClient.cpp:1010 #, fuzzy, c-format msgid "Creating and sending job notify request to %s" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/EMIES/EMIESClient.cpp:1065 #, fuzzy, c-format msgid "Creating and sending notify request to %s" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/EMIES/EMIESClient.cpp:1155 #, fuzzy, c-format msgid "Creating and sending job list request to %s" msgstr "SOAP kérés készítése és küldése" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:156 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:170 #, c-format msgid "Job %s failed to renew delegation %s - %s." msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:247 #, c-format msgid "Failed retrieving information for job: %s" msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:319 msgid "Retrieving job description of EMI ES jobs is not supported" msgstr "" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:61 #, c-format msgid "Listing jobs succeeded, %d jobs found" msgstr "" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:77 #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:102 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface (%" "s)." msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:47 msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:54 #, c-format msgid "Failed to delegate credentials to server - %s" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:77 #, fuzzy msgid "Failed preparing job description" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:95 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:406 #, fuzzy msgid "Unable to submit job. Job description is not valid XML" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:154 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:482 msgid "No valid job identifier returned by EMI ES" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:180 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:499 msgid "Job failed on service side" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:190 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:509 msgid "Failed to obtain state of job" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:205 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:521 msgid "Failed to wait for job to allow stage in" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:228 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:541 msgid "Failed to obtain valid stagein URL for input files" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:248 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:558 #, c-format msgid "Failed uploading local input files to %s" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:269 #, c-format msgid "Failed to submit job description: EMIESFault(%s , %s)" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:278 #, c-format msgid "Failed to submit job description: UnexpectedError(%s)" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:315 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:574 #, fuzzy msgid "Failed to notify service" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:367 #, fuzzy msgid "Failed preparing job description to target resources" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:475 #, fuzzy, c-format msgid "Failed to submit job description: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:54 msgid "Collecting EMI-ES GLUE2 computing info endpoint information." msgstr "" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:74 msgid "Generating EMIES targets" msgstr "" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:83 #, c-format msgid "Generated EMIES target: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:75 #: src/hed/acc/EMIES/TestEMIESClient.cpp:79 #, c-format msgid "Query returned unexpected element: %s:%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:85 #, c-format msgid "Element validation according to GLUE2 schema failed: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:114 #, fuzzy msgid "Resource query failed" msgstr "Státusz lekérdezés sikertelen" #: src/hed/acc/EMIES/TestEMIESClient.cpp:132 msgid "Submission failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:143 #, fuzzy msgid "Obtaining status failed" msgstr "Státusz lekérdezés sikertelen" #: src/hed/acc/EMIES/TestEMIESClient.cpp:153 #, fuzzy msgid "Obtaining information failed" msgstr "verzió információ kiírása" #: src/hed/acc/EMIES/TestEMIESClient.cpp:170 msgid "Cleaning failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:177 msgid "Notify failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:184 msgid "Kill failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:190 msgid "List failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:201 #, c-format msgid "Fetching resource description from %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:204 #: src/hed/acc/EMIES/TestEMIESClient.cpp:273 #: src/hed/acc/EMIES/TestEMIESClient.cpp:283 #: src/hed/acc/EMIES/TestEMIESClient.cpp:294 #, fuzzy, c-format msgid "Failed to obtain resource description: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:213 #: src/hed/acc/EMIES/TestEMIESClient.cpp:217 #, c-format msgid "Resource description contains unexpected element: %s:%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:223 msgid "Resource description validation according to GLUE2 schema failed: " msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:228 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:133 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:173 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1218 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1252 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1434 #: src/hed/identitymap/ArgusPDPClient.cpp:444 #: src/hed/identitymap/ArgusPEPClient.cpp:98 #: src/hed/identitymap/ArgusPEPClient.cpp:345 #: src/hed/libs/common/Thread.cpp:193 src/hed/libs/common/Thread.cpp:196 #: src/hed/libs/common/Thread.cpp:199 #: src/hed/libs/credential/Credential.cpp:1055 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:72 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:88 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:104 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:134 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:151 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:160 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:82 src/hed/shc/arcpdp/ArcPDP.cpp:235 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:293 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:247 #: src/services/a-rex/delegation/DelegationStore.cpp:44 #: src/services/a-rex/delegation/DelegationStore.cpp:49 #: src/services/a-rex/delegation/DelegationStore.cpp:54 #: src/services/a-rex/delegation/DelegationStore.cpp:88 #: src/services/a-rex/delegation/DelegationStore.cpp:94 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:552 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:620 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:645 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:656 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:667 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:678 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:686 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:692 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:697 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:702 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:707 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:715 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:723 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:734 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:741 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:780 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:790 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:798 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:824 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:893 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:906 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:923 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:935 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1239 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1244 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1273 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1286 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:373 #, c-format msgid "%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:248 #, fuzzy msgid "Resource description is empty" msgstr "Feladat leírás: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:255 #, c-format msgid "Resource description provides URL for interface %s: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:260 msgid "Resource description provides no URLs for interfaces" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:263 msgid "Resource description validation passed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:266 #, c-format msgid "Requesting ComputingService elements of resource description at %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:271 msgid "Performing /Services/ComputingService query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:275 #: src/hed/acc/EMIES/TestEMIESClient.cpp:285 #: src/hed/acc/EMIES/TestEMIESClient.cpp:296 msgid "Query returned no elements." msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:281 msgid "Performing /ComputingService query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:292 msgid "Performing /* query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:302 #, fuzzy msgid "All queries failed" msgstr "Státusz lekérdezés sikertelen" #: src/hed/acc/EMIES/TestEMIESClient.cpp:332 #, c-format msgid "" "Number of ComputingService elements obtained from full document and XPath " "qury do not match: %d != %d" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:335 msgid "Resource description query validation passed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:337 #, fuzzy, c-format msgid "Unsupported command: %s" msgstr "Nem támogatott url: %s" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:75 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:95 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:105 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:114 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:126 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:364 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:411 msgid "[ADLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:454 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:473 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:505 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:563 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:568 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:574 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:591 #, fuzzy msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "A fileset regisztcáció nem támogatott még" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:605 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:613 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:620 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:627 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:656 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:666 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:676 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:690 #, fuzzy msgid "[ADLParser] Benchmark is not supported yet." msgstr "A fileset regisztcáció nem támogatott még" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:698 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:715 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:735 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:768 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:774 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:792 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:846 #, c-format msgid "Location URI for file %s is invalid" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:812 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:75 #, c-format msgid "Unknown operator '%s' in attribute require in Version element" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:121 #, c-format msgid "Multiple '%s' elements are not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:136 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:143 #, c-format msgid "The 'exclusiveBound' attribute to the '%s' element is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:150 msgid "The 'epsilon' attribute to the 'Exact' element is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:178 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:195 #, c-format msgid "Parsing error: Value of %s element can't be parsed as number" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:183 #, c-format msgid "" "Parsing error: Elements (%s) representing upper range have different values" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:200 #, c-format msgid "" "Parsing error: Elements (%s) representing lower range have different values" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:209 #, c-format msgid "" "Parsing error: Value of lower range (%s) is greater than value of upper " "range (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:296 msgid "[ARCJSDLParser] Not a JSDL - missing JobDescription element" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:377 #, c-format msgid "" "[ARCJSDLParser] Error during the parsing: missed the name attributes of the " "\"%s\" Environment" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:424 msgid "[ARCJSDLParser] RemoteLogging URL is wrongly formatted." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:440 msgid "[ARCJSDLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:718 msgid "Lower bounded range is not supported for the 'TotalCPUCount' element." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:736 msgid "" "Parsing the \"require\" attribute of the \"QueueName\" nordugrid-JSDL " "element failed. An invalid comparison operator was used, only \"ne\" or \"eq" "\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:841 #, c-format msgid "No URI element found in Location for file %s" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:873 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:323 #, c-format msgid "String successfully parsed as %s." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:53 #, c-format msgid "[JDLParser] Semicolon (;) is not allowed inside brackets, at '%s;'." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:137 #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:141 #, c-format msgid "[JDLParser] This kind of JDL descriptor is not supported yet: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:144 #, c-format msgid "[JDLParser] Attribute named %s has unknown value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:224 msgid "Not enough outputsandboxdesturi elements!" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:306 msgid "" "[JDLParser] Environment variable has been defined without any equals sign." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:503 #, c-format msgid "[JDLParser]: Unknown attribute name: '%s', with value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:539 msgid "The inputsandboxbaseuri JDL attribute specifies an invalid URL." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:616 msgid "[JDLParser] Syntax error found during the split function." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:620 msgid "[JDLParser] Lines count is zero or other funny error has occurred." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:628 msgid "" "[JDLParser] JDL syntax error. There is at least one equals sign missing " "where it would be expected." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:641 #, c-format msgid "String successfully parsed as %s" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 msgid "End of comment not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 #, fuzzy msgid "Broken string" msgstr "szöveg" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 msgid "Relation operator expected" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must specified when 'join' attribute is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 #, fuzzy msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "A 'sort' vagy az 'rsort' kapcsolókat nem lehet egyszerre használni" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 #, fuzzy msgid "No RSL content in job description found" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:293 msgid "Multi-request job description not allowed in GRIDMANAGER dialect" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 msgid "No execuable path specified in GRIDMANAGER dialect" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:311 msgid "'action' attribute not allowed in user-side job description" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:314 msgid "Executable path not specified ('executable' attribute)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:332 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:350 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Attribute '%s' multiply defined" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:336 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:341 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:357 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:387 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:375 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:379 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:511 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1368 msgid "Unexpected RSL type" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:576 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:581 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, fuzzy, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:615 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:625 #, fuzzy, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:699 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:711 #, fuzzy, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:720 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:730 #, fuzzy, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:761 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:767 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1029 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1043 msgid "The cluster XRSL attribute is currently unsupported." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1059 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1067 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1070 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1118 msgid "priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1151 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1194 msgid "Value of 'count' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1224 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1270 #, c-format msgid "Invalid action value %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1360 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1364 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1378 #, c-format msgid "Wrong language requested: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1384 msgid "Missing executable" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1702 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:96 msgid "Failed to initialize main Python thread" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:101 msgid "Main Python thread was not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, fuzzy, c-format msgid "Loading Python broker (%i)" msgstr "PythonBroker betöltése" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:139 msgid "Main Python thread is not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 msgid "PythonBroker init" msgstr "PythonBroker betöltése" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, fuzzy, c-format msgid "Class name: %s" msgstr "osztály neve: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, fuzzy, c-format msgid "Module name: %s" msgstr "modul neve: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:183 msgid "Cannot convert ARC module name to Python string" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:191 #, fuzzy msgid "Cannot import ARC module" msgstr "Nem tudom importálni az arc modult" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:201 #: src/services/wrappers/python/pythonwrapper.cpp:426 #: src/services/wrappers/python/pythonwrapper.cpp:526 #, fuzzy msgid "Cannot get dictionary of ARC module" msgstr "Nem tudom importálni az arc modult" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 msgid "Cannot find ARC UserConfig class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 msgid "UserConfig class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 #, fuzzy msgid "Cannot find ARC JobDescription class" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 msgid "JobDescription class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 msgid "Cannot find ARC ExecutionTarget class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 msgid "ExecutionTarget class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:162 msgid "Cannot convert module name to Python string" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:169 msgid "Cannot import module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 msgid "Cannot get dictionary of custom broker module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 msgid "Cannot find custom broker class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, c-format msgid "%s class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 msgid "Cannot create UserConfig argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 msgid "Cannot convert UserConfig to Python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:258 msgid "Cannot create argument of the constructor" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:266 msgid "Cannot create instance of Python class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, c-format msgid "Python broker constructor called (%d)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, c-format msgid "Python broker destructor called (%d)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 msgid "Cannot create ExecutionTarget argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 msgid "Cannot create JobDescription argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 msgid "Cannot convert JobDescription to python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:60 msgid "Cannot create resolver from /etc/resolv.conf" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:121 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:126 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:131 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:135 #, c-format msgid "Found service endpoint %s (type %s)" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:150 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.cpp:101 #, c-format msgid "Found %u service endpoints from the index service at %s" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:102 msgid "Cleaning of UNICORE jobs is not supported" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:131 msgid "Canceling of UNICORE jobs is not supported" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:139 msgid "Renewal of UNICORE jobs is not supported" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:147 msgid "Resumation of UNICORE jobs is not supported" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:67 msgid "Creating a UNICORE client" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:90 src/services/a-rex/test.cpp:154 #: src/services/a-rex/test.cpp:227 src/services/a-rex/test.cpp:275 #: src/services/a-rex/test.cpp:323 src/services/a-rex/test.cpp:371 #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:64 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:19 msgid "Creating and sending request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:182 msgid "Failed to find delegation credentials in client configuration" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:194 #: src/hed/acc/UNICORE/UNICOREClient.cpp:224 src/services/a-rex/test.cpp:88 msgid "Failed to initiate delegation" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:203 #: src/hed/acc/UNICORE/UNICOREClient.cpp:236 msgid "Submission request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:239 msgid "Submission request succeed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:241 msgid "There was no response to a submission request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:248 msgid "A response to a submission request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:255 #: src/hed/acc/UNICORE/UNICOREClient.cpp:336 #: src/hed/acc/UNICORE/UNICOREClient.cpp:414 #: src/hed/acc/UNICORE/UNICOREClient.cpp:527 #: src/hed/acc/UNICORE/UNICOREClient.cpp:603 #: src/hed/acc/UNICORE/UNICOREClient.cpp:677 msgid "There is no connection chain configured" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:276 #: src/hed/acc/UNICORE/UNICOREClient.cpp:348 #, c-format msgid "Submission returned failure: %s" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:277 #: src/hed/acc/UNICORE/UNICOREClient.cpp:349 #, c-format msgid "Submission failed, service returned: %s" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:284 msgid "Creating and sending a start job request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:317 msgid "A start job request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:320 msgid "A start job request succeeded" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:322 msgid "There was no response to a start job request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:329 msgid "The response of a start job request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:395 msgid "A status request failed" msgstr "Státusz lekérdezés sikertelen" #: src/hed/acc/UNICORE/UNICOREClient.cpp:398 msgid "A status request succeed" msgstr "Státusz lekérdezés sikeres" #: src/hed/acc/UNICORE/UNICOREClient.cpp:400 msgid "There was no response to a status request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:408 msgid "The response of a status request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:433 msgid "The job status could not be retrieved" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:444 msgid "Creating and sending an index service query" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:472 msgid "Creating and sending a service status request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:508 msgid "A service status request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:511 msgid "A service status request succeeded" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:513 msgid "There was no response to a service status request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:520 msgid "The response of a service status request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:537 msgid "The service status could not be retrieved" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:584 msgid "A job termination request failed" msgstr "A feladat megszakítása sikertelen" #: src/hed/acc/UNICORE/UNICOREClient.cpp:587 msgid "A job termination request succeed" msgstr "A feladat megszakítása sikeres" #: src/hed/acc/UNICORE/UNICOREClient.cpp:589 msgid "There was no response to a job termination request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:596 msgid "The response of a job termination request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:658 msgid "A job cleaning request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:661 msgid "A job cleaning request succeed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:663 msgid "There was no response to a job cleaning request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:670 msgid "The response of a job cleaning request was not a SOAP message" msgstr "" #: src/hed/acc/ldap/Extractor.h:22 #, c-format msgid "Extractor[%s] (%s): %s = %s" msgstr "" #: src/hed/acc/ldap/Extractor.h:113 src/hed/acc/ldap/Extractor.h:130 #, c-format msgid "Extractor[%s] (%s): %s contains %s" msgstr "" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.cpp:54 #, c-format msgid "Adding endpoint '%s' with interface name %s" msgstr "" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:63 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:43 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:46 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.cpp:49 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:47 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:59 msgid "Can't create information handle - is the ARC ldap DMC plugin available?" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:85 msgid "Adding CREAM computing service" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:79 #, c-format msgid "Unknown entry in EGIIS (%s)" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:87 msgid "" "Entry in EGIIS is missing one or more of the attributes 'Mds-Service-type', " "'Mds-Service-hn', 'Mds-Service-port' and/or 'Mds-Service-Ldap-suffix'" msgstr "" #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:219 msgid "" "The \"FreeSlotsWithDuration\" attribute is wrongly formatted. Ignoring it." msgstr "" #: src/hed/daemon/unix/daemon.cpp:74 #, c-format msgid "Daemonization fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:82 msgid "Watchdog (re)starting application" msgstr "" #: src/hed/daemon/unix/daemon.cpp:87 #, c-format msgid "Watchdog fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:94 msgid "Watchdog starting monitoring" msgstr "" #: src/hed/daemon/unix/daemon.cpp:120 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:122 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:124 msgid "Watchdog detected application exit" msgstr "" #: src/hed/daemon/unix/daemon.cpp:133 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application timeout or error - killing process" msgstr "" #: src/hed/daemon/unix/daemon.cpp:151 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" #: src/hed/daemon/unix/daemon.cpp:163 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "" #: src/hed/daemon/unix/daemon.cpp:184 msgid "Shutdown daemon" msgstr "Démon leállítása" #: src/hed/daemon/unix/main_unix.cpp:43 src/hed/daemon/win32/main_win32.cpp:27 msgid "shutdown" msgstr "leállítás" #: src/hed/daemon/unix/main_unix.cpp:46 msgid "exit" msgstr "kilép" #: src/hed/daemon/unix/main_unix.cpp:92 src/hed/daemon/win32/main_win32.cpp:53 msgid "No server config part of config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:163 #: src/hed/daemon/win32/main_win32.cpp:91 #, c-format msgid "Unknown log level %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:173 #: src/hed/daemon/win32/main_win32.cpp:100 #, c-format msgid "Failed to open log file: %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:206 #: src/hed/daemon/win32/main_win32.cpp:126 msgid "Start foreground" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:255 #, c-format msgid "XML config file %s does not exist" msgstr "Az XML konfigurációs fájl: %s nem létezik" #: src/hed/daemon/unix/main_unix.cpp:259 src/hed/daemon/unix/main_unix.cpp:274 #: src/hed/daemon/win32/main_win32.cpp:154 #, c-format msgid "Failed to load service configuration from file %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:265 #, c-format msgid "INI config file %s does not exist" msgstr "Az INI konfigurációs fájl: %s nem létezik" #: src/hed/daemon/unix/main_unix.cpp:270 msgid "Error evaluating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:286 msgid "Error loading generated configuration" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:292 msgid "Error evaulating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:297 msgid "Failed to load service configuration from any default config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:358 msgid "Schema validation error" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:373 #: src/hed/daemon/win32/main_win32.cpp:159 msgid "Configuration root element is not " msgstr "" #: src/hed/daemon/unix/main_unix.cpp:389 #, c-format msgid "Cannot switch to group (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:399 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:404 #, c-format msgid "Cannot switch to user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:422 #: src/hed/daemon/win32/main_win32.cpp:176 msgid "Failed to load service side MCCs" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:424 #: src/hed/daemon/win32/main_win32.cpp:178 src/services/a-rex/test.cpp:41 #: src/tests/count/test_service.cpp:32 src/tests/echo/test.cpp:30 #: src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "A szolgáltatás oldali MCC-k betöltődtek" #: src/hed/daemon/unix/main_unix.cpp:431 #: src/hed/daemon/win32/main_win32.cpp:185 msgid "Unexpected arguments supplied" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:93 #: src/hed/dmc/acix/DataPointACIX.cpp:342 #: src/hed/dmc/rucio/DataPointRucio.cpp:220 #: src/hed/dmc/rucio/DataPointRucio.cpp:438 #, c-format msgid "No locations found for %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:121 #, c-format msgid "Found none or multiple URLs (%s) in ACIX URL: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:131 #, c-format msgid "Cannot handle URL %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:138 #, c-format msgid "Could not resolve original source of %s: out of time" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:144 #, c-format msgid "Could not resolve original source of %s: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:160 #, c-format msgid "Querying ACIX server at %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:161 #, c-format msgid "Calling acix with query %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:167 #, fuzzy, c-format msgid "Failed to query ACIX: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/acix/DataPointACIX.cpp:171 #: src/hed/dmc/acix/DataPointACIX.cpp:308 #, fuzzy, c-format msgid "Failed to parse ACIX response: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/acix/DataPointACIX.cpp:298 #, c-format msgid "ACIX returned %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:319 #, c-format msgid "No locations for %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:325 #, c-format msgid "%s: ACIX Location: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:327 #, c-format msgid "%s: Location %s not accessible remotely, skipping" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:68 #, c-format msgid "" "checingBartenderURL: Response:\n" "%s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:154 src/hed/dmc/arc/DataPointARC.cpp:206 #: src/hed/dmc/arc/DataPointARC.cpp:278 src/hed/dmc/arc/DataPointARC.cpp:375 #: src/hed/dmc/arc/DataPointARC.cpp:548 src/hed/dmc/arc/DataPointARC.cpp:609 msgid "Hostname is not implemented for arc protocol" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:245 src/hed/dmc/arc/DataPointARC.cpp:330 #: src/hed/dmc/arc/DataPointARC.cpp:441 src/hed/dmc/arc/DataPointARC.cpp:534 #, c-format msgid "" "nd:\n" "%s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:263 msgid "Not a collection" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:282 src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:338 src/hed/dmc/arc/DataPointARC.cpp:449 #: src/hed/dmc/arc/DataPointARC.cpp:601 #, c-format msgid "Recieved transfer URL: %s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:378 src/hed/dmc/srm/DataPointSRM.cpp:518 msgid "StartWriting" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:490 #, c-format msgid "Calculated checksum: %s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:554 msgid "Check" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:648 #, c-format msgid "Deleted %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:86 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:93 #, fuzzy, c-format msgid "Failed to open stdio channel %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/file/DataPointFile.cpp:94 #, c-format msgid "Failed to open stdio channel %d" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:335 #, c-format msgid "fsync of file %s failed: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:347 #, c-format msgid "closing file %s failed: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:366 #, c-format msgid "File is not accessible: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:372 #: src/hed/dmc/file/DataPointFile.cpp:459 #, c-format msgid "Can't stat file: %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:420 #: src/hed/dmc/file/DataPointFile.cpp:426 #, c-format msgid "Can't stat stdio channel %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:474 #, fuzzy, c-format msgid "%s is not a directory" msgstr "könyvtár" #: src/hed/dmc/file/DataPointFile.cpp:489 src/hed/dmc/s3/DataPointS3.cpp:461 #: src/hed/dmc/s3/DataPointS3.cpp:571 #, c-format msgid "Failed to read object %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:502 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, c-format msgid "File is not accessible %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:508 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:418 #, c-format msgid "Can't delete directory %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:515 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:425 #, c-format msgid "Can't delete file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:525 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1470 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:440 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:460 #: src/services/a-rex/jura/JobLogFile.cpp:657 #: src/services/a-rex/jura/JobLogFile.cpp:1274 #, c-format msgid "Creating directory %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:533 src/hed/dmc/srm/DataPointSRM.cpp:160 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:474 #, fuzzy, c-format msgid "Renaming %s to %s" msgstr "Indok : %s" #: src/hed/dmc/file/DataPointFile.cpp:535 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:483 #, c-format msgid "Can't rename file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:566 #, c-format msgid "Failed to open %s for reading: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:581 #: src/hed/dmc/file/DataPointFile.cpp:719 #, c-format msgid "Failed to switch user id to %d/%d" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:587 #, c-format msgid "Failed to create/open file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:603 #, fuzzy msgid "Failed to create thread" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/file/DataPointFile.cpp:683 #, c-format msgid "Invalid url: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:692 src/hed/libs/data/FileCache.cpp:603 #, c-format msgid "Failed to create directory %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:708 #: src/hed/dmc/file/DataPointFile.cpp:727 #, fuzzy, c-format msgid "Failed to create file %s: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/dmc/file/DataPointFile.cpp:739 #, c-format msgid "setting file %s to size %llu" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:759 #, c-format msgid "Failed to preallocate space for %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:800 src/hed/libs/data/FileCache.cpp:981 #, fuzzy, c-format msgid "Failed to clean up file %s: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/file/DataPointFile.cpp:809 #, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:813 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, c-format msgid "Using proxy %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, c-format msgid "Using key %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, c-format msgid "Using cert %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, c-format msgid "Add location: url: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, c-format msgid "Add location: metadata: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, fuzzy, c-format msgid "gfal_open failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, c-format msgid "gfal_close failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, fuzzy, c-format msgid "gfal_read failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 msgid "StopReading starts waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 msgid "StopReading finished waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:66 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:71 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:42 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:47 #, c-format msgid "No locations defined for %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, fuzzy, c-format msgid "Failed to set LFC replicas: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, c-format msgid "gfal_write failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:302 msgid "StopWriting starts waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:304 msgid "StopWriting finished waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, c-format msgid "gfal_stat failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, fuzzy, c-format msgid "gfal_opendir failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, fuzzy, c-format msgid "gfal_closedir failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, c-format msgid "gfal_rmdir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, c-format msgid "gfal_unlink failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, c-format msgid "gfal_mkdir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, c-format msgid "gfal_rename failed: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 msgid "Transfer failed" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 #, fuzzy msgid "Transfer succeeded" msgstr "Proxy készítés sikeres" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 msgid "ftp_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 msgid "ftp_check_callback" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/services/gridftpd/commands.cpp:1225 #: src/services/gridftpd/dataread.cpp:76 #: src/services/gridftpd/dataread.cpp:173 #: src/services/gridftpd/datawrite.cpp:59 #: src/services/gridftpd/datawrite.cpp:146 #, c-format msgid "Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 msgid "Excessive data received while checking file access" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, c-format msgid "check_ftp: obtained size: %lli" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, c-format msgid "check_ftp: obtained modification date: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 msgid "check_ftp: globus_ftp_client_get failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 msgid "check_ftp: globus_ftp_client_register_read" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 msgid "check_ftp: timeout waiting for partial get" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 msgid "delete_ftp: timeout waiting for delete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #, c-format msgid "mkdir_ftp: making %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 msgid "Timeout waiting for mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 msgid "start_reading_ftp" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 msgid "ftp_read_thread: waiting for eof" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 msgid "ftp_read_thread: waiting for buffers released" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 msgid "ftp_read_thread: exiting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #, c-format msgid "ftp_read_callback: failure: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 msgid "Failed to get ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 msgid "start_writing_ftp: mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 msgid "start_writing_ftp: put" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 msgid "start_writing_ftp: put failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 msgid "StopWriting: aborting connection" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #, c-format msgid "StopWriting: Calculated checksum %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #, c-format msgid "StopWriting: looking for checksum of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 msgid "list_files_ftp: timeout waiting for cksum" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 msgid "list_files_ftp: no checksum information possible" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #, c-format msgid "list_files_ftp: checksum %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 msgid "ftp_write_thread: data callback failed - aborting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 msgid "ftp_write_thread: waiting for eof" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 msgid "ftp_write_thread: waiting for buffers released" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 msgid "ftp_write_thread: exiting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #, c-format msgid "ftp_write_callback: failure: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #, c-format msgid "ftp_write_callback: success %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 msgid "Failed to store ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 msgid "ftp_put_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 msgid "list_files_ftp: timeout waiting for size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 msgid "list_files_ftp: failed to get file's size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 msgid "list_files_ftp: failed to get file's modification time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 msgid "No results returned from stat" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #, c-format msgid "Unexpected path %s returned from server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 msgid "Rename: globus_ftp_client_move failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 msgid "Rename: timeout waiting for operation to complete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 msgid "Failed to set credentials for GridFTP transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 msgid "Using secure data transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 msgid "Using insecure data transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 msgid "~DataPoint: destroy ftp_handle" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:224 src/hed/dmc/gridftp/Lister.cpp:292 #: src/hed/dmc/gridftp/Lister.cpp:387 src/hed/dmc/gridftp/Lister.cpp:737 #: src/hed/dmc/gridftp/Lister.cpp:775 #, c-format msgid "Failure: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:226 src/hed/dmc/gridftp/Lister.cpp:246 #: src/hed/dmc/gridftp/Lister.cpp:471 src/hed/dmc/gridftp/Lister.cpp:478 #: src/hed/dmc/gridftp/Lister.cpp:500 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:163 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:196 #, c-format msgid "Response: %s" msgstr "Válasz: %s" #: src/hed/dmc/gridftp/Lister.cpp:291 msgid "Error getting list of files (in list)" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:293 msgid "Assuming - file not found" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:310 #, c-format msgid "list record: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:365 msgid "Failed reading list of files" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:401 msgid "Failed reading data" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:429 #, c-format msgid "Command: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:433 src/hed/dmc/gridftp/Lister.cpp:474 #: src/hed/mcc/http/PayloadHTTP.cpp:991 msgid "Memory allocation error" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:441 #, c-format msgid "%s failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:445 msgid "Command is being sent" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:450 msgid "Waiting for response" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:455 msgid "Callback got failure" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:541 msgid "Failed in globus_cond_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:545 msgid "Failed in globus_mutex_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:552 msgid "Failed allocating memory for handle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:557 msgid "Failed in globus_ftp_control_handle_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:565 #, fuzzy msgid "Failed to enable IPv6" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/gridftp/Lister.cpp:576 src/services/gridftpd/commands.cpp:983 msgid "Closing connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:583 src/hed/dmc/gridftp/Lister.cpp:598 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:608 msgid "Closed successfully" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:610 msgid "Closing may have failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:637 msgid "Waiting for globus handle to settle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:642 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:648 msgid "Globus handle is stuck" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:664 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:687 #, c-format msgid "EPSV failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:691 msgid "EPSV failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:698 #, c-format msgid "PASV failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:702 msgid "PASV failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:735 msgid "Failed to apply local address to data connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:749 msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:754 #, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:769 #, c-format msgid "Data channel: [%s]:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:773 msgid "Obtained host and address are not acceptable" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Failed to open data channel" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:801 #, c-format msgid "Unsupported protocol in url %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:813 msgid "Reusing connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:837 #, c-format msgid "Failed connecting to server %s:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:843 #, c-format msgid "Failed to connect to server %s:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:859 #, fuzzy msgid "Missing authentication information" msgstr "verzió információ kiírása" #: src/hed/dmc/gridftp/Lister.cpp:868 src/hed/dmc/gridftp/Lister.cpp:882 #, c-format msgid "Bad authentication information: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:891 src/hed/dmc/gridftp/Lister.cpp:906 #, fuzzy, c-format msgid "Failed authenticating: %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/dmc/gridftp/Lister.cpp:898 msgid "Failed authenticating" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:933 src/hed/dmc/gridftp/Lister.cpp:1089 #, c-format msgid "DCAU failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:937 src/hed/dmc/gridftp/Lister.cpp:1094 msgid "DCAU failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:957 msgid "MLST is not supported - trying LIST" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:973 #, c-format msgid "Immediate completion expected: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:977 msgid "Immediate completion expected" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:990 #, c-format msgid "Missing information in reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1024 #, c-format msgid "Missing final reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1048 #, c-format msgid "Unexpected immediate completion: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1060 #, c-format msgid "LIST/MLST failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1065 msgid "LIST/MLST failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1115 msgid "MLSD is not supported - trying NLST" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1129 #, c-format msgid "Immediate completion: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1137 #, c-format msgid "NLST/MLSD failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1143 msgid "NLST/MLSD failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1164 #, c-format msgid "Data transfer aborted: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1169 msgid "Data transfer aborted" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1181 msgid "Failed to transfer data" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:388 #: src/hed/dmc/http/DataPointHTTP.cpp:517 #: src/hed/dmc/http/DataPointHTTP.cpp:598 #: src/hed/dmc/http/DataPointHTTP.cpp:1000 #: src/hed/dmc/http/DataPointHTTP.cpp:1141 #: src/hed/dmc/http/DataPointHTTP.cpp:1286 #, c-format msgid "Redirecting to %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:670 #, c-format msgid "Stat: obtained size %llu" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:674 #, c-format msgid "Stat: obtained modification time %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:903 #, c-format msgid "Check: obtained size %llu" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:905 #, c-format msgid "Check: obtained modification time %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1017 #: src/hed/dmc/http/DataPointHTTP.cpp:1161 #, c-format msgid "HTTP failure %u - %s" msgstr "" #: src/hed/dmc/ldap/DataPointLDAP.cpp:36 msgid "" "Missing reference to factory and/or module. Currently safe unloading of LDAP " "DMC is not supported. Report to developers." msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:175 msgid "SASL Interaction" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:223 #, c-format msgid "Challenge: %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:227 #, c-format msgid "Default: %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:303 #, c-format msgid "LDAPQuery: Initializing connection to %s:%d" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:307 #, c-format msgid "LDAP connection already open to %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:325 #, c-format msgid "Could not open LDAP connection to %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:346 #, c-format msgid "Failed to create ldap bind thread (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:353 #, c-format msgid "Ldap bind timeout (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:360 #, c-format msgid "Failed to bind to ldap server (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:381 #, c-format msgid "Could not set LDAP network timeout (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:389 #, c-format msgid "Could not set LDAP timelimit (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:396 #, c-format msgid "Could not set LDAP protocol version (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:472 #, c-format msgid "LDAPQuery: Querying %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:474 #, c-format msgid " base dn: %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:476 #, c-format msgid " filter: %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:478 msgid " attributes:" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:481 #: src/services/gridftpd/misc/ldapquery.cpp:399 #, c-format msgid " %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:526 src/hed/dmc/ldap/LDAPQuery.cpp:598 #, c-format msgid "%s (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:550 #, c-format msgid "LDAPQuery: Getting results from %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:553 #, c-format msgid "Error: no LDAP query started to %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:593 #, c-format msgid "LDAP query timed out: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:23 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:36 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:39 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:101 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:104 msgid "Failed to extract VOMS nickname from proxy" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:106 #, c-format msgid "Using Rucio account %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:146 #, c-format msgid "" "Bad path for %s: Rucio supports read/write at /objectstores and read-only " "at /replicas" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:162 #, c-format msgid "Can't handle URL %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:303 #, c-format msgid "Acquired auth token for %s: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:357 #, c-format msgid "Rucio returned %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:382 #, fuzzy, c-format msgid "Failed to parse Rucio response: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/rucio/DataPointRucio.cpp:388 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:394 #, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:400 #, c-format msgid "No RSE information returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:423 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:426 #, c-format msgid "%s: size %llu" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:430 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:433 #, c-format msgid "%s: checksum %s" msgstr "" #: src/hed/dmc/s3/DataPointS3.cpp:648 #, fuzzy, c-format msgid "Failed to write object %s: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/dmc/srm/DataPointSRM.cpp:67 #, c-format msgid "Check: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:79 #, c-format msgid "Check: obtained size: %lli" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:85 #, c-format msgid "Check: obtained checksum: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:89 #, c-format msgid "Check: obtained modification date: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:93 msgid "Check: obtained access latency: low (ONLINE)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:97 msgid "Check: obtained access latency: high (NEARLINE)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:119 #, c-format msgid "Remove: deleting: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:139 #, c-format msgid "Creating directory: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:190 src/hed/dmc/srm/DataPointSRM.cpp:243 msgid "Calling PrepareReading when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:212 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:222 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:227 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:234 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:263 src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "None of the requested transfer protocols are supported" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:488 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:503 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:318 msgid "StartReading: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:332 src/hed/dmc/srm/DataPointSRM.cpp:534 #, c-format msgid "TURL %s cannot be handled" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:340 src/hed/dmc/srm/DataPointSRM.cpp:542 #, c-format msgid "Redirecting to new URL: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:404 msgid "Calling PrepareWriting when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:435 msgid "No space token specified" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:441 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:444 #, c-format msgid "Using space token description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:450 #, c-format msgid "Error looking up space tokens matching description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, c-format msgid "No space tokens found matching description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:464 #, c-format msgid "Using space token %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:510 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:520 msgid "StartWriting: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:593 #, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:610 #, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:613 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:616 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:619 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:620 src/hed/dmc/srm/DataPointSRM.cpp:621 msgid "No checksum information from server" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:622 src/hed/dmc/srm/DataPointSRM.cpp:623 msgid "No checksum verification possible" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:629 #, fuzzy msgid "Failed to release completed request" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/srm/DataPointSRM.cpp:673 src/hed/dmc/srm/DataPointSRM.cpp:740 #, c-format msgid "ListFiles: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:806 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:55 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:94 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:146 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:185 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:225 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:263 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:307 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:369 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:442 msgid "SRM did not return any information" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:320 #, c-format msgid "File could not be moved to Running state: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:376 msgid "SRM did not return any useful information" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:454 msgid "File could not be moved to Done state" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:92 msgid "Could not determine version of server" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:98 #, c-format msgid "Server SRM version: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:103 #, c-format msgid "Server implementation: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:140 #, c-format msgid "Adding space token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:167 msgid "No request tokens found" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:180 #, c-format msgid "Adding request token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:241 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:646 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:832 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1389 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:279 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:331 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:702 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:768 #, c-format msgid "File is ready! TURL is %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:363 #, c-format msgid "Setting userRequestDescription to %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:418 #, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:461 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1164 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1198 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1232 msgid "No request token specified!" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:528 msgid "Request is reported as ABORTED, but all files are done" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:534 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:540 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:677 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:749 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:682 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:754 #, c-format msgid "Error creating required directories for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:855 msgid "Too many files in one request - please try again with fewer files" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:903 msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:940 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:946 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:635 #: src/services/a-rex/jura/ApelDestination.cpp:215 #: src/services/a-rex/jura/LutsDestination.cpp:192 #: src/services/gridftpd/misc/ldapquery.cpp:183 #: src/services/gridftpd/misc/ldapquery.cpp:186 #: src/services/gridftpd/misc/ldapquery.cpp:392 #: src/services/gridftpd/misc/ldapquery.cpp:623 #: src/services/gridftpd/misc/ldapquery.cpp:632 #, c-format msgid "%s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:979 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1189 #, c-format msgid "Files associated with request token %s released successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1223 #, c-format msgid "Files associated with request token %s put done successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1258 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1275 #, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is file, calling srmRm" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "Type is dir, calling srmRmDir" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1289 msgid "File type is not available, attempting file delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1292 msgid "File delete failed, attempting directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1317 #, c-format msgid "File %s removed successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1344 #, c-format msgid "Directory %s removed successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1459 #, c-format msgid "Checking for existence of %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1462 #, c-format msgid "File already exists: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1499 #, c-format msgid "Error creating directory %s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, c-format msgid "Storing port %i for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, c-format msgid "No port succeeded for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, fuzzy, c-format msgid "SOAP request: %s" msgstr "Kérés: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, c-format msgid "SOAP fault: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 msgid "Reconnecting" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, c-format msgid "SRM Client status: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #: src/hed/identitymap/ArgusPDPClient.cpp:250 #, fuzzy, c-format msgid "SOAP response: %s" msgstr "Válasz: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:76 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:162 #, fuzzy, c-format msgid "Failed to acquire lock on file %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:81 #, fuzzy, c-format msgid "Error reading info from file %s:%s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:95 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:187 #, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:100 #, c-format msgid "Cannot convert string %s to int in line %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:203 #, c-format msgid "Error writing srm info file %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:91 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:93 #, c-format msgid "Read %i bytes" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:124 #, c-format msgid "Could not open file %s for reading: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:139 #, c-format msgid "Unable to find file size of %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:203 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:226 #, c-format msgid "xrootd write failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:235 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:309 #, c-format msgid "xrootd close failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:258 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:271 #, c-format msgid "xrootd open failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:285 #, fuzzy, c-format msgid "close failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:327 #, c-format msgid "Read access not allowed for %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:346 #, c-format msgid "Could not stat file %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:381 #, fuzzy, c-format msgid "Failed to open directory %s: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:396 #, c-format msgid "Error while reading dir %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:446 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:464 #, c-format msgid "Error creating required dirs: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:158 msgid "PDPD location is missing" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:161 #, fuzzy, c-format msgid "PDPD location: %s" msgstr "Célállomás: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:166 #: src/hed/identitymap/ArgusPEPClient.cpp:129 msgid "Conversion mode is set to SUBJECT" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:169 #: src/hed/identitymap/ArgusPEPClient.cpp:132 msgid "Conversion mode is set to CREAM" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:172 #: src/hed/identitymap/ArgusPEPClient.cpp:135 msgid "Conversion mode is set to EMI" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:175 #: src/hed/identitymap/ArgusPEPClient.cpp:138 #, c-format msgid "Unknown conversion mode %s, using default" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:242 #, c-format msgid "Failed to contact PDP server: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:245 #, fuzzy, c-format msgid "There was no SOAP response return from PDP server: %s" msgstr "Nincs SOAP-os válasz" #: src/hed/identitymap/ArgusPDPClient.cpp:360 #: src/hed/identitymap/ArgusPEPClient.cpp:286 #, c-format msgid "Have %i requests to process" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:362 msgid "Creating a client to Argus PDP service" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:375 #, fuzzy, c-format msgid "XACML authorisation request: %s" msgstr "A feladat megszakítása sikeres" #: src/hed/identitymap/ArgusPDPClient.cpp:386 #, c-format msgid "XACML authorisation response: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:426 #: src/hed/identitymap/ArgusPEPClient.cpp:333 #, c-format msgid "%s is not authorized to do action %s in resource %s " msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:429 #: src/hed/identitymap/ArgusPDPClient.cpp:434 #: src/hed/identitymap/ArgusPEPClient.cpp:336 msgid "Not authorized" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:439 #: src/hed/identitymap/ArgusPEPClient.cpp:341 #: src/hed/identitymap/IdentityMap.cpp:219 #: src/hed/shc/legacy/LegacyMap.cpp:215 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:566 #: src/hed/identitymap/ArgusPEPClient.cpp:655 msgid "Doing CREAM request" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:580 #: src/hed/identitymap/ArgusPDPClient.cpp:748 #: src/hed/identitymap/ArgusPEPClient.cpp:683 #, c-format msgid "Adding profile-id value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:592 #: src/hed/identitymap/ArgusPDPClient.cpp:759 #: src/hed/identitymap/ArgusPEPClient.cpp:694 #, c-format msgid "Adding subject-id value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:600 #: src/hed/identitymap/ArgusPDPClient.cpp:767 #: src/hed/identitymap/ArgusPEPClient.cpp:704 #, c-format msgid "Adding subject-issuer value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:607 #: src/hed/identitymap/ArgusPEPClient.cpp:713 #, c-format msgid "Adding virtual-organization value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:620 #: src/hed/identitymap/ArgusPEPClient.cpp:730 #, c-format msgid "Adding FQAN value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:629 #: src/hed/identitymap/ArgusPEPClient.cpp:739 #, c-format msgid "Adding FQAN/primary value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:639 #: src/hed/identitymap/ArgusPEPClient.cpp:750 #, c-format msgid "Adding cert chain value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:648 #: src/hed/identitymap/ArgusPDPClient.cpp:840 #, c-format msgid "Adding resource-id value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:662 #: src/hed/identitymap/ArgusPDPClient.cpp:863 #: src/hed/identitymap/ArgusPEPClient.cpp:775 #, c-format msgid "Adding action-id value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:666 #: src/hed/identitymap/ArgusPEPClient.cpp:786 #, c-format msgid "CREAM request generation failed: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:732 msgid "Doing EMI request" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:774 #, c-format msgid "Adding Virtual Organization value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:797 #, c-format msgid "Adding VOMS group value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:803 #, c-format msgid "Adding VOMS primary group value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:822 #, c-format msgid "Adding VOMS role value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:829 #, c-format msgid "Adding VOMS primary role value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:846 #, c-format msgid "Adding resource-owner value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:867 #, c-format msgid "EMI request generation failed: %s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:119 msgid "PEPD location is missing" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:122 #, fuzzy, c-format msgid "PEPD location: %s" msgstr "Célállomás: %s" #: src/hed/identitymap/ArgusPEPClient.cpp:126 msgid "Conversion mode is set to DIRECT" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:331 #, c-format msgid "" "Not authorized according to request:\n" "%s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:361 msgid "Subject of request is null \n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:366 #, c-format msgid "Can not create XACML SubjectAttribute: %s\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:375 msgid "Can not create XACML Resource \n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:381 #, c-format msgid "Can not create XACML ResourceAttribute: %s\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:390 msgid "Can not create XACML Action\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:397 #, c-format msgid "Can not create XACML ActionAttribute: %s\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:407 msgid "Can not create XACML request\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:539 #, c-format msgid "Converting to CREAM action - namespace: %s, operation: %s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:760 #, c-format msgid "Adding resoure-id value: %s" msgstr "" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "" #: src/hed/libs/common/ArcLocation.cpp:68 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/FileLock.cpp:48 msgid "Cannot determine hostname from gethostname()" msgstr "" #: src/hed/libs/common/FileLock.cpp:97 #, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:102 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:109 #, c-format msgid "Error creating temporary file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:118 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:129 #, c-format msgid "Could not create lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:133 #, c-format msgid "Error creating lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:138 #, c-format msgid "Error writing to lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:146 #, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:155 #, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" #: src/hed/libs/common/FileLock.cpp:164 #, c-format msgid "%li seconds since lock file %s was created" msgstr "" #: src/hed/libs/common/FileLock.cpp:167 #, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:171 #, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:183 #, c-format msgid "This process already owns the lock on %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:189 #, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "" #: src/hed/libs/common/FileLock.cpp:191 #, fuzzy, c-format msgid "Failed to remove file %s: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/common/FileLock.cpp:200 #, c-format msgid "The file %s is currently locked with a valid lock" msgstr "" #: src/hed/libs/common/FileLock.cpp:215 #, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:227 #, c-format msgid "Lock file %s doesn't exist" msgstr "" #: src/hed/libs/common/FileLock.cpp:229 #, c-format msgid "Error listing lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:235 #, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "" #: src/hed/libs/common/FileLock.cpp:241 #, c-format msgid "Error reading lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:245 #, c-format msgid "Error with formatting in lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:255 #, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "" #: src/hed/libs/common/FileLock.cpp:264 #, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:267 #, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "" #: src/hed/libs/common/Logger.cpp:60 msgid "Invalid log level. Using default " msgstr "" #: src/hed/libs/common/Logger.cpp:125 msgid "Invalid old log level. Using default " msgstr "" #: src/hed/libs/common/OptionParser.cpp:107 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "" #: src/hed/libs/common/OptionParser.cpp:265 msgid "Use -? to get usage description" msgstr "" #: src/hed/libs/common/OptionParser.cpp:342 msgid "Usage:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:345 msgid "OPTION..." msgstr "" #: src/hed/libs/common/OptionParser.cpp:351 msgid "Help Options:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:352 msgid "Show help options" msgstr "" #: src/hed/libs/common/OptionParser.cpp:354 msgid "Application Options:" msgstr "" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:212 msgid "Maximum number of threads running - puting new request into queue" msgstr "" #: src/hed/libs/common/Thread.cpp:253 #, c-format msgid "Thread exited with Glib error: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:255 #, c-format msgid "Thread exited with Glib exception: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:257 #, c-format msgid "Thread exited with generic exception: %s" msgstr "" #: src/hed/libs/common/URL.cpp:121 #, c-format msgid "URL is not valid: %s" msgstr "" #: src/hed/libs/common/URL.cpp:192 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "" #: src/hed/libs/common/URL.cpp:197 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "" #: src/hed/libs/common/URL.cpp:286 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "" #: src/hed/libs/common/URL.cpp:302 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "" #: src/hed/libs/common/URL.cpp:310 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "" #: src/hed/libs/common/URL.cpp:326 #, c-format msgid "Invalid port number in %s" msgstr "" #: src/hed/libs/common/URL.cpp:425 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "" #: src/hed/libs/common/URL.cpp:587 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" #: src/hed/libs/common/URL.cpp:686 #, c-format msgid "URL option %s does not have format name=value" msgstr "" #: src/hed/libs/common/URL.cpp:1151 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "" #: src/hed/libs/common/URL.cpp:1156 #, c-format msgid "URL protocol is not urllist: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:30 #: src/hed/libs/common/UserConfig.cpp:681 #: src/hed/libs/common/UserConfig.cpp:690 #: src/hed/libs/common/UserConfig.cpp:696 #: src/hed/libs/common/UserConfig.cpp:718 #: src/hed/libs/common/UserConfig.cpp:728 #: src/hed/libs/common/UserConfig.cpp:740 #: src/hed/libs/common/UserConfig.cpp:760 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:82 #, fuzzy, c-format msgid "Wrong ownership of certificate file: %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/common/UserConfig.cpp:84 #, fuzzy, c-format msgid "Wrong permissions of certificate file: %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/common/UserConfig.cpp:86 #, c-format msgid "Can not access certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:93 #, c-format msgid "Wrong ownership of key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:95 #, c-format msgid "Wrong permissions of key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:97 #, c-format msgid "Can not access key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:104 #, c-format msgid "Wrong ownership of proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:106 #, c-format msgid "Wrong permissions of proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:108 #, c-format msgid "Can not access proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:119 msgid "computing" msgstr "" #: src/hed/libs/common/UserConfig.cpp:121 msgid "index" msgstr "" #: src/hed/libs/common/UserConfig.cpp:165 #: src/hed/libs/common/UserConfig.cpp:171 #: src/hed/libs/common/UserConfig.cpp:223 #: src/hed/libs/common/UserConfig.cpp:229 #, c-format msgid "System configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:176 #: src/hed/libs/common/UserConfig.cpp:234 #, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:178 #: src/hed/libs/common/UserConfig.cpp:180 #: src/hed/libs/common/UserConfig.cpp:236 #: src/hed/libs/common/UserConfig.cpp:238 #, c-format msgid "System configuration file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:187 #: src/hed/libs/common/UserConfig.cpp:199 #: src/hed/libs/common/UserConfig.cpp:245 #: src/hed/libs/common/UserConfig.cpp:257 #, c-format msgid "User configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:192 #: src/hed/libs/common/UserConfig.cpp:250 msgid "No configuration file could be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:195 #: src/hed/libs/common/UserConfig.cpp:253 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:310 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" #: src/hed/libs/common/UserConfig.cpp:322 #, c-format msgid "" "Unsupported job list type '%s', using 'BDB'. Supported types are: BDB, " "SQLITE, XML." msgstr "" #: src/hed/libs/common/UserConfig.cpp:503 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:505 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or '%" "s' attributes in the client configuration file (e.g. '%s')" msgstr "" #: src/hed/libs/common/UserConfig.cpp:522 #, c-format msgid "" "Can not access CA certificates directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:532 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:558 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" #: src/hed/libs/common/UserConfig.cpp:579 #, c-format msgid "Using proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:582 #, c-format msgid "Using certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:583 #, c-format msgid "Using key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:587 #, c-format msgid "Using CA certificate directory: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:600 #: src/hed/libs/common/UserConfig.cpp:606 #, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "" #: src/hed/libs/common/UserConfig.cpp:612 #, c-format msgid "Can not access VOMS file/directory: %s." msgstr "" #: src/hed/libs/common/UserConfig.cpp:631 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" #: src/hed/libs/common/UserConfig.cpp:644 #, c-format msgid "Loading configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:678 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:703 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" #: src/hed/libs/common/UserConfig.cpp:715 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:735 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:781 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:785 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:826 #, c-format msgid "Unknown section %s, ignoring it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:830 #, c-format msgid "Configuration (%s) loaded" msgstr "" #: src/hed/libs/common/UserConfig.cpp:833 #, c-format msgid "Could not load configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:928 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "Unable to create %s directory." msgstr "" #: src/hed/libs/common/UserConfig.cpp:950 #, c-format msgid "Configuration example file created (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:952 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:956 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "" #: src/hed/libs/common/UserConfig.cpp:961 #, c-format msgid "Example configuration (%s) not created." msgstr "" #: src/hed/libs/common/UserConfig.cpp:966 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "" #: src/hed/libs/common/UserConfig.cpp:984 #, c-format msgid "%s directory created" msgstr "" #: src/hed/libs/common/UserConfig.cpp:986 #: src/hed/libs/common/UserConfig.cpp:1025 src/hed/libs/data/DataMover.cpp:684 #, c-format msgid "Failed to create directory %s" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:300 msgid "Succeeded to verify the signature under " msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Failed to verify the signature under " msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:54 msgid "Creating delegation credential to ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:64 #: src/hed/libs/communication/ClientX509Delegation.cpp:267 msgid "DelegateCredentialsInit failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:68 #: src/hed/libs/communication/ClientX509Delegation.cpp:122 #: src/hed/libs/communication/ClientX509Delegation.cpp:157 #: src/hed/libs/communication/ClientX509Delegation.cpp:212 #: src/hed/libs/communication/ClientX509Delegation.cpp:271 msgid "There is no SOAP response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:73 msgid "There is no X509 request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:78 msgid "There is no Format request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:86 msgid "There is no Id or X509 request value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:99 #: src/hed/libs/communication/ClientX509Delegation.cpp:187 msgid "DelegateProxy failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:118 msgid "UpdateCredentials failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:126 msgid "There is no UpdateCredentialsResponse in response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:134 #: src/hed/libs/communication/ClientX509Delegation.cpp:162 #: src/hed/libs/communication/ClientX509Delegation.cpp:217 #: src/hed/libs/communication/ClientX509Delegation.cpp:302 msgid "There is no SOAP connection chain configured" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:140 msgid "Creating delegation to CREAM delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:153 msgid "Delegation getProxyReq request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:173 msgid "Creating delegation to CREAM delegation service failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:208 msgid "Delegation putProxy request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:222 msgid "Creating delegation to CREAM delegation failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:237 msgid "Getting delegation credential from ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:276 msgid "There is no Delegated X509 token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:281 msgid "There is no Format delegated token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:289 msgid "There is no Id or X509 token value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:298 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" #: src/hed/libs/compute/Broker.cpp:62 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:72 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:153 src/hed/libs/compute/Broker.cpp:171 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "ComputingShare (%s) does not match selected queue (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:189 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" #: src/hed/libs/compute/Broker.cpp:194 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "" #: src/hed/libs/compute/Broker.cpp:200 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:205 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:212 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %" "s" msgstr "" #: src/hed/libs/compute/Broker.cpp:217 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:243 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:272 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:284 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:299 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:306 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:311 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:317 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:322 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:330 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:336 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:341 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:349 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:354 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:362 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" #: src/hed/libs/compute/Broker.cpp:367 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:375 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:380 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:388 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:393 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:402 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" #: src/hed/libs/compute/Broker.cpp:406 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:414 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:421 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:427 src/hed/libs/compute/Broker.cpp:448 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:435 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:442 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:456 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:461 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:469 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:475 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:481 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:489 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:494 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:502 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:509 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:532 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" #: src/hed/libs/compute/Broker.cpp:549 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" #: src/hed/libs/compute/Broker.cpp:585 msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "" #: src/hed/libs/compute/Broker.cpp:609 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, fuzzy, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:26 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:30 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:37 #, c-format msgid "Uniq is adding service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:60 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:63 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:66 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, c-format msgid "Failed to start querying the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for %s plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, c-format msgid "%s plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:100 #: src/hed/libs/compute/JobControllerPlugin.cpp:109 #: src/hed/libs/compute/SubmitterPlugin.cpp:158 #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, fuzzy, c-format msgid "%s %s could not be created." msgstr "A feladatot nem sikerült megölni vagy letörölni" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, fuzzy, c-format msgid "Loaded %s %s" msgstr "Feltöltve %s" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:237 #, fuzzy, c-format msgid "Address: %s" msgstr "Válasz: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:238 #, fuzzy, c-format msgid "Place: %s" msgstr "Név: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, fuzzy, c-format msgid "Country: %s" msgstr "Forrás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, c-format msgid "Postal code: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, c-format msgid "Latitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, c-format msgid "Longitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:248 #, c-format msgid "Owner: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:255 #, c-format msgid "ID: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:256 #, fuzzy, c-format msgid "Type: %s" msgstr "Proxy típusa: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:261 #, fuzzy, c-format msgid "URL: %s" msgstr "Érvénytelen URL: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:262 #, fuzzy, c-format msgid "Interface: %s" msgstr "Forrás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:264 #, fuzzy msgid "Interface versions:" msgstr "Felhasználó oldali hiba" #: src/hed/libs/compute/ExecutionTarget.cpp:269 msgid "Interface extensions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:274 msgid "Capabilities:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:278 #, c-format msgid "Technology: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:280 msgid "Supported Profiles:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:284 #, fuzzy, c-format msgid "Implementor: %s" msgstr "modul neve: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:285 #, fuzzy, c-format msgid "Implementation name: %s" msgstr "modul neve: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, c-format msgid "Quality level: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, c-format msgid "Health state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, fuzzy, c-format msgid "Health state info: %s" msgstr "Célállomás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, c-format msgid "Serving state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, c-format msgid "Issuer CA: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:292 msgid "Trusted CAs:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:296 #, c-format msgid "Downtime starts: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:297 #, c-format msgid "Downtime ends: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, fuzzy, c-format msgid "Staging: %s" msgstr "Célállomás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:300 #, fuzzy msgid "Job descriptions:" msgstr "Feladat leírás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:312 #, fuzzy, c-format msgid "Scheme: %s" msgstr "Forrás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:315 #, fuzzy, c-format msgid "Rule: %s" msgstr "Kérés: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:327 #, fuzzy, c-format msgid "Mapping queue: %s" msgstr "Kérés: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Max wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, c-format msgid "Max total wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Min wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, c-format msgid "Default wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Max CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, c-format msgid "Min CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, c-format msgid "Default CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Max total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, c-format msgid "Max running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, c-format msgid "Max waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, c-format msgid "Max user running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max slots per job: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max stage in streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max stage out streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, c-format msgid "Scheduling policy: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, c-format msgid "Max virtual memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max disk space: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, c-format msgid "Default Storage Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:348 msgid "Supports preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:349 msgid "Doesn't support preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:350 #, c-format msgid "Total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:351 #, c-format msgid "Running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, c-format msgid "Local running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, c-format msgid "Waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, c-format msgid "Local waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, c-format msgid "Suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, c-format msgid "Local suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, c-format msgid "Staging jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, c-format msgid "Estimated average waiting time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, c-format msgid "Estimated worst waiting time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, c-format msgid "Free slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:363 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:366 #, c-format msgid " %s: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:367 #, c-format msgid " unspecified: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:370 #, c-format msgid "Used slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:371 #, fuzzy, c-format msgid "Requested slots: %i" msgstr "Kérés: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, c-format msgid "Reservation policy: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:379 #, fuzzy, c-format msgid "Resource manager: %s" msgstr "modul neve: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:380 #, c-format msgid " (%s)" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, c-format msgid "Total physical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:384 #, c-format msgid "Total logical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:385 #, c-format msgid "Total slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:386 msgid "Supports advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:387 msgid "Doesn't support advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:388 msgid "Supports bulk submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:389 msgid "Doesn't support bulk Submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Non-homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:393 #, fuzzy msgid "Network information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/ExecutionTarget.cpp:398 msgid "Working area is shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:399 msgid "Working area is not shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:400 #, c-format msgid "Working area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:401 #, c-format msgid "Working area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:402 #, c-format msgid "Working area life time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:403 #, c-format msgid "Cache area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Cache area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:410 #, fuzzy, c-format msgid "Platform: %s" msgstr "Név: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:411 msgid "Execution environment supports inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:412 msgid "Execution environment does not support inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:413 msgid "Execution environment supports outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:414 msgid "Execution environment does not support outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment is a virtual machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment is a physical machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:417 #, c-format msgid "CPU vendor: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:418 #, c-format msgid "CPU model: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:419 #, fuzzy, c-format msgid "CPU version: %s" msgstr "%s verzió %s" #: src/hed/libs/compute/ExecutionTarget.cpp:420 #, c-format msgid "CPU clock speed: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, c-format msgid "Main memory size: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, c-format msgid "OS family: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, fuzzy, c-format msgid "OS name: %s" msgstr "Név: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, fuzzy, c-format msgid "OS version: %s" msgstr "%s verzió %s" #: src/hed/libs/compute/ExecutionTarget.cpp:431 msgid "Computing service:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:455 #, c-format msgid "%d Endpoints" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:460 #, fuzzy msgid "Endpoint Information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/ExecutionTarget.cpp:472 #, c-format msgid "%d Batch Systems" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:477 msgid "Batch System Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:483 msgid "Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:496 #, c-format msgid "%d Shares" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:501 #, fuzzy msgid "Share Information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/ExecutionTarget.cpp:507 #, c-format msgid "%d mapping policies" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:511 msgid "Mapping policy:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:527 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:529 #, c-format msgid " Computing endpoint URL: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, c-format msgid " Computing endpoint interface name: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #: src/hed/libs/compute/Job.cpp:580 #, c-format msgid " Queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:536 #, c-format msgid " Mapping queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:539 #, c-format msgid " Health state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:544 msgid "Service information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:549 msgid " Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:556 #, fuzzy msgid "Batch system information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/ExecutionTarget.cpp:559 msgid "Queue information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:566 #, fuzzy msgid " Benchmark information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/GLUE2.cpp:58 msgid "The ComputingService doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:117 msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:128 msgid "The ComputingService doesn't advertise its Interface." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:160 msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "" #: src/hed/libs/compute/Job.cpp:329 msgid "Unable to detect format of job record." msgstr "" #: src/hed/libs/compute/Job.cpp:550 #, c-format msgid "Job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:552 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:553 #, fuzzy, c-format msgid " State: %s" msgstr "Név: %s" #: src/hed/libs/compute/Job.cpp:556 #, c-format msgid " Specific state: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:560 src/hed/libs/compute/Job.cpp:584 #, c-format msgid " Waiting Position: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:564 #, c-format msgid " Exit Code: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:568 #, fuzzy, c-format msgid " Job Error: %s" msgstr "Feladat leírás: %s" #: src/hed/libs/compute/Job.cpp:573 #, c-format msgid " Owner: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:577 #, c-format msgid " Other Messages: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:582 #, fuzzy, c-format msgid " Requested Slots: %d" msgstr "Kérés: %s" #: src/hed/libs/compute/Job.cpp:587 #, c-format msgid " Stdin: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:589 #, c-format msgid " Stdout: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:591 #, c-format msgid " Stderr: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:593 #, c-format msgid " Computing Service Log Directory: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:596 #, c-format msgid " Submitted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:599 #, c-format msgid " End Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:602 #, c-format msgid " Submitted from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:605 #, c-format msgid " Submitting client: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:608 #, c-format msgid " Requested CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:615 #, c-format msgid " Used Wall Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:618 #, c-format msgid " Used Memory: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Results were deleted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:623 #, c-format msgid " Results must be retrieved before: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:627 #, c-format msgid " Proxy valid until: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Entry valid from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:634 #, c-format msgid " Entry valid for: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:638 msgid " Old job IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:646 #, c-format msgid " ID on service: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Service information URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:648 #, c-format msgid " Job status URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:649 #, c-format msgid " Job management URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:650 #, c-format msgid " Stagein directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:651 #, c-format msgid " Stageout directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:652 #, c-format msgid " Session directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:654 msgid " Delegation IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:670 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "" #: src/hed/libs/compute/Job.cpp:675 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:697 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:702 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" #: src/hed/libs/compute/Job.cpp:706 #, c-format msgid "Downloading job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:710 #, c-format msgid "" "Cant retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:715 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:722 #, c-format msgid "%s directory exist! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:728 #, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:733 #, c-format msgid "No files to retrieve for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:739 #, c-format msgid "Failed to create directory %s! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:752 #, fuzzy, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "Nem sikerült feloldani a célállomást" #: src/hed/libs/compute/Job.cpp:758 #, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "" #: src/hed/libs/compute/Job.cpp:764 #, c-format msgid "Failed downloading %s to %s" msgstr "" #: src/hed/libs/compute/Job.cpp:777 src/hed/libs/compute/Job.cpp:782 #, c-format msgid "Unable to list files at %s" msgstr "" #: src/hed/libs/compute/Job.cpp:824 msgid "Now copying (from -> to)" msgstr "" #: src/hed/libs/compute/Job.cpp:825 #, c-format msgid " %s -> %s" msgstr "" #: src/hed/libs/compute/Job.cpp:841 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:852 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:871 #, c-format msgid "File download failed: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:910 src/hed/libs/compute/Job.cpp:939 #: src/hed/libs/compute/Job.cpp:971 src/hed/libs/compute/Job.cpp:1004 #, c-format msgid "Waiting for lock on file %s" msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:101 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:110 #, fuzzy, c-format msgid "JobControllerPlugin %s could not be created" msgstr "A feladat vezérlő modult nem sikerült betölteni" #: src/hed/libs/compute/JobControllerPlugin.cpp:115 #, fuzzy, c-format msgid "Loaded JobControllerPlugin %s" msgstr "A feladat vezérlő modult nem sikerült betölteni" #: src/hed/libs/compute/JobDescription.cpp:22 #, c-format msgid ": %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:24 #, c-format msgid ": %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:138 msgid " --- DRY RUN --- " msgstr "" #: src/hed/libs/compute/JobDescription.cpp:148 #, fuzzy, c-format msgid " Annotation: %s" msgstr "Célállomás: %s" #: src/hed/libs/compute/JobDescription.cpp:154 #, c-format msgid " Old activity ID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:171 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:174 #, c-format msgid " RemoteLogging: %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:182 #, c-format msgid " Environment.name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:183 #, c-format msgid " Environment: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:196 #, c-format msgid " PreExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:199 #: src/hed/libs/compute/JobDescription.cpp:217 #, c-format msgid " Exit code for successful execution: %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:202 #: src/hed/libs/compute/JobDescription.cpp:220 msgid " No exit code for successful execution specified." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:214 #, c-format msgid " PostExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:230 #, c-format msgid " Access control: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:234 #, c-format msgid " Processing start time: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:237 msgid " Notify:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:251 #, c-format msgid " Credential service: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:261 msgid " Operating system requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:279 msgid " Computing endpoint requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:292 msgid " Node access: inbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:295 msgid " Node access: outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound and outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:308 msgid " Job requires exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:311 msgid " Job does not require exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:316 msgid " Run time environment requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:328 msgid " Inputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:329 #: src/hed/libs/compute/JobDescription.cpp:351 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:331 msgid " Is executable: true" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:335 #, fuzzy, c-format msgid " Sources: %s" msgstr "Forrás: %s" #: src/hed/libs/compute/JobDescription.cpp:337 #, c-format msgid " Sources.DelegationID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:341 #, c-format msgid " Sources.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:350 msgid " Outputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:354 #, c-format msgid " Targets: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:356 #, c-format msgid " Targets.DelegationID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:367 #, c-format msgid " DelegationID element: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:374 #, c-format msgid " Other attributes: [%s], %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:440 msgid "Empty job description source string" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:473 #, fuzzy msgid "No job description parsers available" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/hed/libs/compute/JobDescription.cpp:475 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:483 #, c-format msgid "%s parsing error" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:499 #, fuzzy msgid "No job description parser was able to interpret job description" msgstr "Feladat leírás elküldve ide: %s" #: src/hed/libs/compute/JobDescription.cpp:509 msgid "" "Job description language is not specified, unable to output description." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:521 #, fuzzy, c-format msgid "Generating %s job description output" msgstr "Egy hiba lépett fel a feladat leírás elkészítése közben." #: src/hed/libs/compute/JobDescription.cpp:537 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:550 #, c-format msgid "Two input files have identical name '%s'." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:569 #: src/hed/libs/compute/JobDescription.cpp:582 #, fuzzy, c-format msgid "Cannot stat local input file '%s'" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/libs/compute/JobDescription.cpp:602 #, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:644 msgid "Unable to select runtime environment" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:651 msgid "Unable to select middleware" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:658 msgid "Unable to select operating system." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:677 #, c-format msgid "No test-job with ID %d found." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:689 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:693 #, fuzzy, c-format msgid "No jobdescription resulted at %d test" msgstr "Feladat leírás elküldve ide: %s" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:172 #, fuzzy msgid "Unable to create temporary directory" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:180 #, c-format msgid "Unable to create data base environment (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:190 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:194 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:198 #, c-format msgid "Unable to set duplicate flags for secondary key DB (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:204 #, c-format msgid "Unable to create job database (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:208 #, c-format msgid "Unable to create DB for secondary name keys (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:212 #, c-format msgid "Unable to create DB for secondary endpoint keys (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:216 #, c-format msgid "Unable to create DB for secondary service info keys (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:221 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:225 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:229 #, c-format msgid "Unable to associate secondary DB with primary DB (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:232 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:92 #, c-format msgid "Job database created successfully (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:274 #, c-format msgid "Error from BDB: %s: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:277 #, c-format msgid "Error from BDB: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:297 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:137 #: src/hed/libs/compute/JobInformationStorageXML.cpp:27 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:301 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:141 #: src/hed/libs/compute/JobInformationStorageXML.cpp:31 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:308 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #: src/hed/libs/compute/JobInformationStorageXML.cpp:38 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:346 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:405 #, c-format msgid "Unable to write key/value pair to job database (%s): Key \"%s\"" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:572 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:435 #: src/hed/libs/compute/JobInformationStorageXML.cpp:137 #, c-format msgid "Unable to truncate job database (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:603 msgid "" "ENOENT: The file or directory does not exist, Or a nonexistent re_source " "file was specified." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:606 msgid "" "DB_OLD_VERSION: The database cannot be opened without being first upgraded." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:609 msgid "EEXIST: DB_CREATE and DB_EXCL were specified and the database exists." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:611 msgid "EINVAL" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:614 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:468 #, fuzzy, c-format msgid "Unable to determine error (%d)" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:57 #, fuzzy, c-format msgid "Unable to create data base (%s)" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:72 #, fuzzy, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:80 #, fuzzy, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:88 #, fuzzy, c-format msgid "Failed checking database (%s)" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:113 #, fuzzy, c-format msgid "Error from SQLite: %s: %s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:116 #, c-format msgid "Error from SQLite: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:246 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:253 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:260 #, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "" #: src/hed/libs/compute/JobInformationStorageXML.cpp:51 #: src/hed/libs/compute/JobInformationStorageXML.cpp:223 #: src/hed/libs/compute/JobInformationStorageXML.cpp:264 #, fuzzy, c-format msgid "Waiting for lock on job list file %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:162 #, c-format msgid "Will remove %s on service %s." msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:36 msgid "Ignoring job, the job ID is empty" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:41 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:46 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:51 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:56 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:65 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:72 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:385 #, fuzzy, c-format msgid "Job resubmission failed: Unable to load broker (%s)" msgstr "A feladat küldés meghiusúlt, mert nincs több szabad várakozó sor." #: src/hed/libs/compute/JobSupervisor.cpp:400 #, fuzzy msgid "Job resubmission aborted because no resource returned any information" msgstr "" "Feladat küldés sikertelen, mert egyetlen klaszter sem adott vissza " "információt magáról" #: src/hed/libs/compute/JobSupervisor.cpp:421 #, c-format msgid "Unable to resubmit job (%s), unable to parse obtained job description" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:443 #, c-format msgid "" "Unable to resubmit job (%s), target information retrieval failed for target: " "%s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:469 #, c-format msgid "Unable to resubmit job (%s), no targets applicable for submission" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:504 #, c-format msgid "" "Unable to migrate job (%s), job description could not be retrieved remotely" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:524 #, fuzzy msgid "Job migration aborted, no resource returned any information" msgstr "" "Feladat küldés sikertelen, mert egyetlen klaszter sem adott vissza " "információt magáról" #: src/hed/libs/compute/JobSupervisor.cpp:536 #, fuzzy, c-format msgid "Job migration aborted, unable to load broker (%s)" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/hed/libs/compute/JobSupervisor.cpp:552 #, c-format msgid "Unable to migrate job (%s), unable to parse obtained job description" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:573 #, c-format msgid "Unable to load submission plugin for %s interface" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:583 #, fuzzy, c-format msgid "Job migration failed for job (%s), no applicable targets" msgstr "A feladat küldés meghiusúlt, mert nincs több szabad várakozó sor." #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "" #: src/hed/libs/compute/Software.cpp:206 src/hed/libs/compute/Software.cpp:217 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:212 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:221 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "" #: src/hed/libs/compute/Software.cpp:226 msgid "All requirements satisfied." msgstr "" #: src/hed/libs/compute/Submitter.cpp:83 #, fuzzy, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "" #: src/hed/libs/compute/Submitter.cpp:106 msgid "Trying all available interfaces" msgstr "" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "" #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:54 msgid "No stagein URL is provided" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:73 #, fuzzy, c-format msgid "Failed uploading file %s to %s: %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/compute/SubmitterPlugin.cpp:103 #, c-format msgid "Trying to migrate to %s: Migration to a %s interface is not supported." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:159 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:169 #, c-format msgid "SubmitterPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:174 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 #, fuzzy msgid "Invalid job description" msgstr "Érvénytelen feladat leírás:" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 #, fuzzy msgid "Failed to submit job" msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, c-format msgid "Failed to write to local job list %s" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in JDL, POSIX JSDL, JSDL, " "or XRSL format." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "" "define the requested format (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, emies:" "adl)" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:33 msgid "show the original job description" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:43 msgid "Use --help option for detailed usage information" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:50 #, fuzzy msgid " [ JobDescription tester ] " msgstr "Feladat leírás elküldve ide: %s" #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:80 #, fuzzy msgid "Unable to parse." msgstr "Nem sikerült betölteni a %s bróker modult" #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ egee:jdl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ emies:adl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:93 msgid " [ nordugrid:jsdl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:95 msgid " [ nordugrid:xrsl ] " msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:138 msgid "VOMS command is empty" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:424 #: src/hed/libs/credential/ARCProxyUtil.cpp:1431 #, fuzzy msgid "Failed to sign proxy" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/ARCProxyUtil.cpp:1317 #, c-format msgid "Please choose the NSS database you would use (1-%d): " msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1353 #: src/hed/libs/credential/ARCProxyUtil.cpp:1460 #, fuzzy msgid "Failed to generate X509 request with NSS" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/ARCProxyUtil.cpp:1364 #: src/hed/libs/credential/ARCProxyUtil.cpp:1471 #: src/hed/libs/credential/ARCProxyUtil.cpp:1512 msgid "Failed to create X509 certificate with NSS" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1376 #: src/hed/libs/credential/ARCProxyUtil.cpp:1483 #: src/hed/libs/credential/ARCProxyUtil.cpp:1536 msgid "Failed to export X509 certificate from NSS DB" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1519 msgid "Failed to import X509 certificate into NSS DB" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1528 msgid "Failed to initialize the credential configuration" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:166 #, c-format msgid "Error number in store context: %i" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:167 msgid "Self-signed certificate" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:170 #, c-format msgid "The certificate with subject %s is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:173 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:176 #, c-format msgid "Certificate with subject %s has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:179 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:181 #, c-format msgid "Certificate verification error: %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:193 msgid "Can not get the certificate type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:233 msgid "Couldn't verify availability of CRL" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:246 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:253 msgid "The available CRL is not yet valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:262 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:268 msgid "The available CRL has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:291 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:309 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:318 msgid "Can't allocate memory for CA policy path" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:364 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:378 #: src/hed/libs/credential/Credential.cpp:1693 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Trying to check X509 cert with check_cert_type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:465 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal form" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:469 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:473 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:505 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" #: src/hed/libs/credential/Credential.cpp:73 #, c-format msgid "OpenSSL error string: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:196 msgid "Can't get the first byte of input to determine its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:210 msgid "Can't reset the input" msgstr "" #: src/hed/libs/credential/Credential.cpp:236 #: src/hed/libs/credential/Credential.cpp:273 msgid "Can't get the first byte of input BIO to get its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:248 msgid "Can not read certificate/key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:456 #, c-format msgid "Can not find certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:461 #, c-format msgid "Can not read certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:499 msgid "Can not read certificate string" msgstr "" #: src/hed/libs/credential/Credential.cpp:519 msgid "Certificate format is PEM" msgstr "" #: src/hed/libs/credential/Credential.cpp:546 msgid "Certificate format is DER" msgstr "" #: src/hed/libs/credential/Credential.cpp:575 msgid "Certificate format is PKCS" msgstr "" #: src/hed/libs/credential/Credential.cpp:602 msgid "Certificate format is unknown" msgstr "" #: src/hed/libs/credential/Credential.cpp:610 #, c-format msgid "Can not find key file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:615 #, c-format msgid "Can not open key file %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:634 msgid "Can not read key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:697 #: src/hed/libs/credential/VOMSUtil.cpp:258 msgid "Failed to lock arccredential library in memory" msgstr "" #: src/hed/libs/credential/Credential.cpp:712 msgid "Certificate verification succeeded" msgstr "" #: src/hed/libs/credential/Credential.cpp:716 msgid "Certificate verification failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:731 #: src/hed/libs/credential/Credential.cpp:751 #: src/hed/libs/credential/Credential.cpp:771 #: src/hed/libs/credential/Credential.cpp:1003 #: src/hed/libs/credential/Credential.cpp:2314 #: src/hed/libs/credential/Credential.cpp:2345 msgid "Failed to initialize extensions member for Credential" msgstr "" #: src/hed/libs/credential/Credential.cpp:814 #, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:826 #, c-format msgid "Unsupported proxy version is requested - %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:837 msgid "If you specify a policy you also need to specify a policy language" msgstr "" #: src/hed/libs/credential/Credential.cpp:1008 msgid "Certificate/Proxy path is empty" msgstr "" #: src/hed/libs/credential/Credential.cpp:1067 #: src/hed/libs/credential/Credential.cpp:2856 msgid "Failed to duplicate extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1071 #, fuzzy msgid "Failed to add extension into credential extensions" msgstr "Nem sikerült betölteni a konfigurációt" #: src/hed/libs/credential/Credential.cpp:1082 #, fuzzy msgid "Certificate information collection failed" msgstr "verzió információ kiírása" #: src/hed/libs/credential/Credential.cpp:1124 #: src/hed/libs/credential/Credential.cpp:1129 msgid "Can not convert string into ASN1_OBJECT" msgstr "" #: src/hed/libs/credential/Credential.cpp:1141 msgid "Can not create extension for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:1180 #: src/hed/libs/credential/Credential.cpp:1348 msgid "BN_set_word failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1189 #: src/hed/libs/credential/Credential.cpp:1357 msgid "RSA_generate_key_ex failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1198 #: src/hed/libs/credential/Credential.cpp:1365 msgid "BN_new || RSA_new failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1209 msgid "Created RSA key, proceeding with request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1214 msgid "pkey and rsa_key exist!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1217 msgid "Generate new X509 request!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1222 msgid "Setting subject name!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1230 #: src/hed/libs/credential/Credential.cpp:1444 msgid "PEM_write_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1301 #: src/hed/libs/credential/Credential.cpp:1476 #: src/hed/libs/credential/Credential.cpp:1496 msgid "Can not create BIO for request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1278 msgid "Failed to write request into string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1305 #: src/hed/libs/credential/Credential.cpp:1310 #: src/hed/libs/credential/Credential.cpp:1500 msgid "Can not set writable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1316 #: src/hed/libs/credential/Credential.cpp:1505 msgid "Wrote request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1318 #: src/hed/libs/credential/Credential.cpp:1508 msgid "Failed to write request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1338 msgid "The credential's private key has already been initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1386 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1396 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1414 #: src/hed/libs/credential/Credential.cpp:1421 #: src/hed/libs/credential/Credential.cpp:1943 #: src/hed/libs/credential/Credential.cpp:1951 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1451 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1461 msgid "Can not generate X509 request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1463 msgid "Can not set private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1561 msgid "Failed to get private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1580 msgid "Failed to get public key from RSA object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1588 msgid "Failed to get public key from X509 object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1595 msgid "Failed to get public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1634 #, c-format msgid "Certiticate chain number %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1662 msgid "NULL BIO passed to InquireRequest" msgstr "" #: src/hed/libs/credential/Credential.cpp:1665 msgid "PEM_read_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1669 msgid "d2i_X509_REQ_bio failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1702 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1706 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1722 #, c-format msgid "Cert Type: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1735 #: src/hed/libs/credential/Credential.cpp:1754 msgid "Can not create BIO for parsing request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1740 msgid "Read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1743 msgid "Failed to read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1758 msgid "Can not set readable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1763 msgid "Read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1766 msgid "Failed to read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Can not convert private key to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1924 msgid "Credential is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1930 #, fuzzy msgid "Failed to duplicate X509 structure" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/Credential.cpp:1935 msgid "Failed to initialize X509 structure" msgstr "" #: src/hed/libs/credential/Credential.cpp:1958 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1962 #: src/hed/libs/credential/Credential.cpp:2010 msgid "Can not add X509 extension to proxy cert" msgstr "" #: src/hed/libs/credential/Credential.cpp:1978 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1990 #: src/hed/libs/credential/Credential.cpp:1999 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2006 msgid "Can not create extension for keyUsage" msgstr "" #: src/hed/libs/credential/Credential.cpp:2019 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2024 msgid "Can not copy extended KeyUsage extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2029 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2039 msgid "Can not compute digest of public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2050 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2056 msgid "Can not create name entry CN for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2061 msgid "Can not set CN in proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2069 msgid "Can not set issuer's subject for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2074 msgid "Can not set version number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2082 msgid "Can not set serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2088 msgid "Can not duplicate serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2094 msgid "Can not set the lifetime for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2098 msgid "Can not set pubkey for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2114 #: src/hed/libs/credential/Credential.cpp:2744 msgid "The credential to be signed is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2118 #: src/hed/libs/credential/Credential.cpp:2748 msgid "The credential to be signed contains no request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2122 #: src/hed/libs/credential/Credential.cpp:2752 msgid "The BIO for output is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2136 #: src/hed/libs/credential/Credential.cpp:2759 msgid "Error when extracting public key from request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2141 #: src/hed/libs/credential/Credential.cpp:2763 msgid "Failed to verify the request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2145 msgid "Failed to add issuer's extension into proxy" msgstr "" #: src/hed/libs/credential/Credential.cpp:2169 msgid "Failed to find extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2181 msgid "Can not get the issuer's private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2188 #: src/hed/libs/credential/Credential.cpp:2796 msgid "There is no digest in issuer's private key object" msgstr "" #: src/hed/libs/credential/Credential.cpp:2193 #: src/hed/libs/credential/Credential.cpp:2800 #, c-format msgid "%s is an unsupported digest type" msgstr "" #: src/hed/libs/credential/Credential.cpp:2204 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" #: src/hed/libs/credential/Credential.cpp:2210 msgid "Failed to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2212 msgid "Succeeded to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2217 msgid "Failed to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2219 msgid "Succeeded to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2224 #: src/hed/libs/credential/Credential.cpp:2233 msgid "Output the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2227 msgid "Can not convert signed proxy cert into PEM format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2236 msgid "Can not convert signed proxy cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2252 #: src/hed/libs/credential/Credential.cpp:2275 msgid "Can not create BIO for signed proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2279 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2284 msgid "Wrote signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2287 msgid "Failed to write signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2323 #: src/hed/libs/credential/Credential.cpp:2363 #, c-format msgid "ERROR:%s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2371 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2416 #, c-format msgid "unable to load number from: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2421 msgid "error converting number from bin to BIGNUM" msgstr "" #: src/hed/libs/credential/Credential.cpp:2448 msgid "file name too long" msgstr "" #: src/hed/libs/credential/Credential.cpp:2471 msgid "error converting serial to ASN.1 format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2504 #, c-format msgid "load serial from %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2509 msgid "add_word failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2514 #, c-format msgid "save serial to %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2534 msgid "Error initialising X509 store" msgstr "" #: src/hed/libs/credential/Credential.cpp:2541 msgid "Out of memory when generate random serial" msgstr "" #: src/hed/libs/credential/Credential.cpp:2553 msgid "CA certificate and CA private key do not match" msgstr "" #: src/hed/libs/credential/Credential.cpp:2577 #, c-format msgid "Failed to load extension section: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2614 msgid "malloc error" msgstr "" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Subject does not start with '/'" msgstr "" #: src/hed/libs/credential/Credential.cpp:2634 #: src/hed/libs/credential/Credential.cpp:2655 msgid "escape character at end of string" msgstr "" #: src/hed/libs/credential/Credential.cpp:2646 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2683 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2687 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2729 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" #: src/hed/libs/credential/Credential.cpp:2739 msgid "The private key for signing is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2819 #, c-format msgid "Error when loading the extension config file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2823 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2872 msgid "Can not sign a EEC" msgstr "" #: src/hed/libs/credential/Credential.cpp:2876 msgid "Output EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2879 msgid "Can not convert signed EEC cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2893 #: src/hed/libs/credential/Credential.cpp:2912 msgid "Can not create BIO for signed EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2916 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2921 msgid "Wrote signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2924 msgid "Failed to write signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:147 msgid "Error writing raw certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:224 msgid "Failed to add RFC proxy OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:227 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:233 msgid "Failed to add anyLanguage OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:236 #: src/hed/libs/credential/NSSUtil.cpp:254 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:242 msgid "Failed to add inheritAll OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:245 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:251 msgid "Failed to add Independent OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:260 msgid "Failed to add VOMS AC sequence OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:263 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:292 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:303 msgid "Succeeded to initialize NSS" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:325 #, c-format msgid "Failed to read attribute %x from private key." msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:377 msgid "Succeeded to get credential" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:378 #, fuzzy msgid "Failed to get credential" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:440 msgid "p12 file is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:450 msgid "Unable to write to p12 file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:466 #, fuzzy msgid "Failed to open pk12 file" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/credential/NSSUtil.cpp:501 msgid "Failed to allocate p12 context" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1211 msgid "Failed to find issuer certificate for proxy certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1368 #, c-format msgid "Failed to find certificates by nickname: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1373 #, c-format msgid "No user certificate by nickname %s found" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1386 #: src/hed/libs/credential/NSSUtil.cpp:1422 msgid "Certificate does not have a slot" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1392 msgid "Failed to create export context" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1407 msgid "PKCS12 output password not provided" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1414 msgid "PKCS12 add password integrity failed" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1435 #, fuzzy msgid "Failed to create key or certificate safe" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1451 #, fuzzy msgid "Failed to add certificate and key" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1460 #, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1465 #, fuzzy msgid "Failed to encode PKCS12" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:1468 msgid "Succeeded to export PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1496 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1502 #, fuzzy msgid "Failed to delete certificate" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1516 msgid "The name of the private key to delete is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1521 #: src/hed/libs/credential/NSSUtil.cpp:1605 #, c-format msgid "Failed to authenticate to token %s." msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1528 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1561 #, fuzzy msgid "Failed to delete private key and certificate" msgstr "A privát és publikus kulcs tárolására szolgáló könyvtár" #: src/hed/libs/credential/NSSUtil.cpp:1571 #, fuzzy msgid "Failed to delete private key" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1582 #, c-format msgid "Can not find key with name: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1616 #, c-format msgid "Failed to delete private key that attaches to certificate: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1637 msgid "Can not read PEM private key: probably bad password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1639 msgid "Can not read PEM private key: failed to decrypt" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1641 #: src/hed/libs/credential/NSSUtil.cpp:1643 msgid "Can not read PEM private key: failed to obtain password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1644 msgid "Can not read PEM private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1688 #, fuzzy msgid "Failed to load private key" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1689 msgid "Succeeded to load PrivateKeyInfo" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1692 msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1693 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1730 #, fuzzy msgid "Failed to import private key" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1733 msgid "Succeeded to import private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1746 #: src/hed/libs/credential/NSSUtil.cpp:1788 #: src/hed/libs/credential/NSSUtil.cpp:2920 msgid "Failed to authenticate to key database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1755 msgid "Succeeded to generate public/private key pair" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1757 msgid "Failed to generate public/private key pair" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1762 #, fuzzy msgid "Failed to export private key" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1829 msgid "Failed to create subject name" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1845 #, fuzzy msgid "Failed to create certificate request" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:1858 msgid "Failed to call PORT_NewArena" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1866 msgid "Failed to encode the certificate request with DER format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1873 msgid "Unknown key or hash type" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1879 #, fuzzy msgid "Failed to sign the certificate request" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:1895 msgid "Failed to output the certificate request as ASCII format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1904 msgid "Failed to output the certificate request as DER format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1913 #, c-format msgid "Succeeded to output the certificate request into %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1952 #: src/hed/libs/credential/NSSUtil.cpp:1989 msgid "Failed to read data from input file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1968 msgid "Input is without trailer\n" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1979 msgid "Failed to convert ASCII to DER" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2030 msgid "Certificate request is invalid" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2252 #, c-format msgid "The policy language: %s is not supported" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2260 #: src/hed/libs/credential/NSSUtil.cpp:2285 #: src/hed/libs/credential/NSSUtil.cpp:2308 #: src/hed/libs/credential/NSSUtil.cpp:2330 #, fuzzy msgid "Failed to new arena" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:2269 #: src/hed/libs/credential/NSSUtil.cpp:2294 msgid "Failed to create path length" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2272 #: src/hed/libs/credential/NSSUtil.cpp:2297 #: src/hed/libs/credential/NSSUtil.cpp:2317 #: src/hed/libs/credential/NSSUtil.cpp:2339 msgid "Failed to create policy language" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2738 #, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2745 #, c-format msgid "Can not find certificate with name %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2776 #, fuzzy, c-format msgid "Proxy subject: %s" msgstr "Tárgy: %s" #: src/hed/libs/credential/NSSUtil.cpp:2795 msgid "Failed to start certificate extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2800 msgid "Failed to add key usage extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2805 msgid "Failed to add proxy certificate information extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2809 msgid "Failed to add voms AC extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2829 #, fuzzy msgid "Failed to retrieve private key for issuer" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2836 msgid "Unknown key or hash type of issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2842 msgid "Failed to set signature algorithm ID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2854 #, fuzzy msgid "Failed to encode certificate" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:2860 msgid "Failed to allocate item for certificate data" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2866 msgid "Failed to sign encoded certificate data" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2875 #, c-format msgid "Failed to open file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2886 #, c-format msgid "Succeeded to output certificate to %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2927 #, fuzzy, c-format msgid "Failed to open input certificate file %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2945 #, fuzzy msgid "Failed to read input certificate file" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2950 #, fuzzy msgid "Failed to get certificate from certificate file" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2957 msgid "Failed to allocate certificate trust" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2962 #, fuzzy msgid "Failed to decode trust string" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:2971 #: src/hed/libs/credential/NSSUtil.cpp:2988 #, c-format msgid "Failed to authenticate to token %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2976 #: src/hed/libs/credential/NSSUtil.cpp:2993 msgid "Failed to add certificate to token or database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2979 #: src/hed/libs/credential/NSSUtil.cpp:2982 msgid "Succeeded to import certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2996 #: src/hed/libs/credential/NSSUtil.cpp:2999 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:3026 #, fuzzy, c-format msgid "Failed to import private key from file: %s" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:3028 #, fuzzy, c-format msgid "Failed to import certificate from file: %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/VOMSConfig.cpp:142 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:158 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:176 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:188 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:185 #, fuzzy, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/credential/VOMSUtil.cpp:193 #, fuzzy, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/VOMSUtil.cpp:346 #, c-format msgid "VOMS: create FQAN: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:384 #, c-format msgid "VOMS: create attribute: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:670 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:678 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:704 msgid "VOMS: Can not parse AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:734 msgid "VOMS: CA directory or CA file must be provided" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:798 msgid "VOMS: failed to verify AC signature" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:867 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:875 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:881 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:916 #, fuzzy, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "Az XML konfigurációs fájl: %s nem létezik" #: src/hed/libs/credential/VOMSUtil.cpp:922 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:970 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1003 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1023 msgid "VOMS: AC signature verification failed" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1032 msgid "VOMS: unable to verify certificate chain" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1038 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1061 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1087 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1109 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1116 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1126 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1142 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1151 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1228 msgid "VOMS: the grantor attribute is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1246 msgid "VOMS: the attribute name is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1252 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1257 msgid "VOMS: the attribute qualifier is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1289 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1323 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1328 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1343 msgid "VOMS: failed to parse attributes from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1387 msgid "VOMS: authorityKey is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1415 msgid "VOMS: missing AC parts" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1432 msgid "VOMS: unsupported time format format in AC - expecting GENERALIZED TIME" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1438 msgid "VOMS: AC is not yet valid" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1445 msgid "VOMS: AC has expired" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1460 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1465 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1466 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1469 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "VOMS: the holder information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1500 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1501 #, c-format msgid "VOMS: DN of holder: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1502 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1509 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1521 #: src/hed/libs/credential/VOMSUtil.cpp:1528 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1541 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1551 msgid "VOMS: the issuer information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1559 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1597 #: src/hed/libs/credential/VOMSUtil.cpp:1605 #: src/hed/libs/credential/VOMSUtil.cpp:1613 #: src/hed/libs/credential/VOMSUtil.cpp:1621 #: src/hed/libs/credential/VOMSUtil.cpp:1644 msgid "VOMS: unable to extract VO name from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1635 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1654 msgid "VOMS: can not verify the signature of the AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1660 msgid "VOMS: problems while parsing information in AC" msgstr "" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:128 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, c-format msgid "MyProxy failure: %s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:68 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:81 msgid "SSL locks not initialized" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:85 #, c-format msgid "wrong SSL lock requested: %i of %i: %i - %s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:112 msgid "Failed to lock arccrypto library in memory" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:117 src/hed/libs/crypto/OpenSSL.cpp:128 msgid "Failed to initialize OpenSSL library" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:150 msgid "Number of OpenSSL locks changed - reinitializing" msgstr "" #: src/hed/libs/data/DataMover.cpp:111 msgid "No locations found - probably no more physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:117 src/hed/libs/data/FileCache.cpp:673 #: src/libs/data-staging/Processor.cpp:458 #: src/libs/data-staging/Processor.cpp:472 #, c-format msgid "Removing %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:130 msgid "This instance was already deleted" msgstr "" #: src/hed/libs/data/DataMover.cpp:136 msgid "Failed to delete physical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:147 #, c-format msgid "Removing metadata in %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete meta-information" msgstr "" #: src/hed/libs/data/DataMover.cpp:165 msgid "Failed to remove all physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:169 #, c-format msgid "Removing logical file from metadata %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:172 msgid "Failed to delete logical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:179 msgid "Failed to remove instance" msgstr "" #: src/hed/libs/data/DataMover.cpp:228 msgid "DataMover::Transfer : starting new thread" msgstr "" #: src/hed/libs/data/DataMover.cpp:256 #, c-format msgid "Transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:258 msgid "Not valid source" msgstr "" #: src/hed/libs/data/DataMover.cpp:263 msgid "Not valid destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:283 #: src/services/cache_service/CacheService.cpp:294 #, c-format msgid "Couldn't handle certificate: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:293 src/hed/libs/data/DataMover.cpp:591 #: src/libs/data-staging/Processor.cpp:137 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "" #: src/hed/libs/data/DataMover.cpp:297 src/hed/libs/data/DataMover.cpp:610 #: src/hed/libs/data/DataMover.cpp:672 src/libs/data-staging/Processor.cpp:156 msgid "Permission checking passed" msgstr "" #: src/hed/libs/data/DataMover.cpp:298 src/hed/libs/data/DataMover.cpp:630 #: src/hed/libs/data/DataMover.cpp:1136 msgid "Linking/copying cached file" msgstr "" #: src/hed/libs/data/DataMover.cpp:323 #, c-format msgid "No locations for source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:327 #, c-format msgid "Failed to resolve source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:339 src/hed/libs/data/DataMover.cpp:407 #, c-format msgid "No locations for destination found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:344 src/hed/libs/data/DataMover.cpp:411 #, c-format msgid "Failed to resolve destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:359 #, c-format msgid "No locations for destination different from source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:380 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:391 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "" #: src/hed/libs/data/DataMover.cpp:394 #, c-format msgid "Failed to delete %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:421 #, c-format msgid "Deleted but still have locations at %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:433 msgid "DataMover: cycle" msgstr "" #: src/hed/libs/data/DataMover.cpp:435 msgid "DataMover: no retries requested - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:440 msgid "DataMover: source out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:442 msgid "DataMover: destination out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:450 #, c-format msgid "Real transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:477 #, c-format msgid "Creating buffer: %lli x %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:493 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:498 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "" #: src/hed/libs/data/DataMover.cpp:522 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:527 msgid "Buffer creation failed !" msgstr "" #: src/hed/libs/data/DataMover.cpp:550 #, c-format msgid "URL is mapped to: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:580 src/hed/libs/data/DataMover.cpp:639 #: src/libs/data-staging/Processor.cpp:91 msgid "Cached file is locked - should retry" msgstr "" #: src/hed/libs/data/DataMover.cpp:585 src/libs/data-staging/Processor.cpp:110 msgid "Failed to initiate cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:602 #: src/services/cache_service/CacheService.cpp:366 #, c-format msgid "Permission checking failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:604 src/hed/libs/data/DataMover.cpp:664 #: src/hed/libs/data/DataMover.cpp:686 src/hed/libs/data/DataMover.cpp:697 msgid "source.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:618 src/libs/data-staging/Processor.cpp:161 #, c-format msgid "Source modification date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:619 src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Cache creation date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:625 src/libs/data-staging/Processor.cpp:167 msgid "Cached file is outdated, will re-download" msgstr "" #: src/hed/libs/data/DataMover.cpp:629 src/libs/data-staging/Processor.cpp:173 msgid "Cached copy is still valid" msgstr "" #: src/hed/libs/data/DataMover.cpp:657 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" #: src/hed/libs/data/DataMover.cpp:661 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:674 msgid "Linking local file" msgstr "" #: src/hed/libs/data/DataMover.cpp:694 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:703 #, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:715 #, c-format msgid "cache file: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:741 #, fuzzy, c-format msgid "Failed to stat source %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataMover.cpp:743 src/hed/libs/data/DataMover.cpp:758 #: src/hed/libs/data/DataMover.cpp:795 src/hed/libs/data/DataMover.cpp:814 #: src/hed/libs/data/DataMover.cpp:982 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1024 src/hed/libs/data/DataMover.cpp:1101 msgid "(Re)Trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:756 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:770 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" #: src/hed/libs/data/DataMover.cpp:774 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:789 #, fuzzy, c-format msgid "Failed to prepare source: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataMover.cpp:805 #, c-format msgid "Failed to start reading from source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:826 msgid "Metadata of source and destination are different" msgstr "" #: src/hed/libs/data/DataMover.cpp:847 #, c-format msgid "Failed to preregister destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:852 src/hed/libs/data/DataMover.cpp:1125 msgid "destination.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:866 #, fuzzy, c-format msgid "Failed to prepare destination: %s" msgstr "Nem támogatott url: %s" #: src/hed/libs/data/DataMover.cpp:873 src/hed/libs/data/DataMover.cpp:897 #: src/hed/libs/data/DataMover.cpp:1122 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:877 src/hed/libs/data/DataMover.cpp:900 #: src/hed/libs/data/DataMover.cpp:991 src/hed/libs/data/DataMover.cpp:1007 #: src/hed/libs/data/DataMover.cpp:1030 src/hed/libs/data/DataMover.cpp:1077 msgid "(Re)Trying next destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:889 #, c-format msgid "Failed to start writing to destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:913 msgid "Failed to start writing to cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:921 src/hed/libs/data/DataMover.cpp:969 #: src/hed/libs/data/DataMover.cpp:1148 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:929 msgid "Waiting for buffer" msgstr "" #: src/hed/libs/data/DataMover.cpp:936 #, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:941 #, c-format msgid "buffer: read EOF : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:942 #, c-format msgid "buffer: write EOF: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:943 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:944 msgid "Closing read channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:950 msgid "Closing write channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:958 msgid "Failed to complete writing to destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:974 msgid "Transfer cancelled successfully" msgstr "" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Cause of failure unclear - choosing randomly" msgstr "" #: src/hed/libs/data/DataMover.cpp:1062 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" #: src/hed/libs/data/DataMover.cpp:1070 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:1074 msgid "Failed to delete destination, retry may fail" msgstr "" #: src/hed/libs/data/DataMover.cpp:1084 msgid "Cannot compare empty checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1091 #: src/libs/data-staging/DataStagingDelivery.cpp:456 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" #: src/hed/libs/data/DataMover.cpp:1093 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:1106 #: src/libs/data-staging/DataStagingDelivery.cpp:472 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1112 #: src/libs/data-staging/DataStagingDelivery.cpp:475 msgid "Checksum not computed" msgstr "" #: src/hed/libs/data/DataMover.cpp:1118 #, c-format msgid "Failed to postregister destination %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:83 #, c-format msgid "Invalid URL option: %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:254 #, c-format msgid "Skipping invalid URL option %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:269 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:91 #, c-format msgid "Can't handle location %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:229 #, c-format msgid "Replica %s matches host pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "" #: src/hed/libs/data/DataStatus.cpp:13 #, fuzzy msgid "Source is invalid URL" msgstr "A lekérdezés nem XML helyes" #: src/hed/libs/data/DataStatus.cpp:14 #, fuzzy msgid "Destination is invalid URL" msgstr "Célállomás: %s" #: src/hed/libs/data/DataStatus.cpp:15 msgid "Resolving of index service for source failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:16 msgid "Resolving of index service for destination failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:21 msgid "Failed while transferring data" msgstr "" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:26 msgid "Unregistering from index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "" #: src/hed/libs/data/DataStatus.cpp:29 msgid "Delete error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "" #: src/hed/libs/data/DataStatus.cpp:34 msgid "Already reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:35 #, fuzzy msgid "Already writing to destination" msgstr "Nem sikerült feloldani a célállomást" #: src/hed/libs/data/DataStatus.cpp:36 msgid "Read access check failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:37 #, fuzzy msgid "Directory listing failed" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "" #: src/hed/libs/data/DataStatus.cpp:39 msgid "Failed to obtain information about file" msgstr "" #: src/hed/libs/data/DataStatus.cpp:40 msgid "No such file or directory" msgstr "" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "" #: src/hed/libs/data/DataStatus.cpp:45 #, fuzzy msgid "Failed to prepare source" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:47 #, fuzzy msgid "Failed to prepare destination" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:49 msgid "Failed to finalize reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:50 msgid "Failed to finalize writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:51 #, fuzzy msgid "Failed to create directory" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataStatus.cpp:52 #, fuzzy msgid "Failed to rename URL" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "" #: src/hed/libs/data/DataStatus.cpp:54 msgid "Operation cancelled successfully" msgstr "" #: src/hed/libs/data/DataStatus.cpp:55 #, fuzzy msgid "Generic error" msgstr "Felhasználó oldali hiba" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:61 msgid "Transfer timed out" msgstr "" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:65 #, fuzzy msgid "Temporary service error" msgstr "Felhasználó oldali hiba" #: src/hed/libs/data/DataStatus.cpp:66 #, fuzzy msgid "Permanent service error" msgstr "Felhasználó oldali hiba" #: src/hed/libs/data/DataStatus.cpp:67 msgid "Error switching uid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:68 msgid "Request timed out" msgstr "" #: src/hed/libs/data/FileCache.cpp:101 msgid "No cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:118 msgid "No usable caches" msgstr "" #: src/hed/libs/data/FileCache.cpp:127 msgid "No remote cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:149 msgid "No draining cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:177 #, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:187 #, c-format msgid "Failed to create any cache directories for %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:194 #, c-format msgid "Failed to change permissions on %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:206 #, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:209 #, fuzzy, c-format msgid "Failed to release lock on file %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/data/FileCache.cpp:248 #, c-format msgid "Found file %s in remote cache at %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:265 #, c-format msgid "Failed to delete stale remote cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:269 #, c-format msgid "Failed to release lock on remote cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:287 src/hed/libs/data/FileCache.cpp:339 #, c-format msgid "Failed to obtain lock on cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:294 src/hed/libs/data/FileCache.cpp:348 #: src/hed/libs/data/FileCache.cpp:408 #, c-format msgid "Error removing cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:296 src/hed/libs/data/FileCache.cpp:314 #: src/hed/libs/data/FileCache.cpp:318 src/hed/libs/data/FileCache.cpp:350 #: src/hed/libs/data/FileCache.cpp:361 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:301 #, c-format msgid "Replicating file %s to local cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:304 src/hed/libs/data/FileCache.cpp:611 #, c-format msgid "Failed to copy file %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:309 #, c-format msgid "" "Replicating file %s from remote cache failed due to source being deleted or " "modified" msgstr "" #: src/hed/libs/data/FileCache.cpp:311 #, c-format msgid "Failed to delete bad copy of remote cache file %s at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:333 #, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:380 src/hed/libs/data/FileCache.cpp:414 #, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:397 #, c-format msgid "Invalid lock on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:403 #, fuzzy, c-format msgid "Failed to remove .meta file %s: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/data/FileCache.cpp:468 #, fuzzy, c-format msgid "Cache not found for file %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/libs/data/FileCache.cpp:478 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" #: src/hed/libs/data/FileCache.cpp:484 src/hed/libs/data/FileCache.cpp:819 #, c-format msgid "Cache file %s does not exist" msgstr "" #: src/hed/libs/data/FileCache.cpp:503 #, c-format msgid "Cache file for %s not found in any local or remote cache" msgstr "" #: src/hed/libs/data/FileCache.cpp:507 #, c-format msgid "Using remote cache file %s for url %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:510 src/hed/libs/data/FileCache.cpp:821 #, c-format msgid "Error accessing cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:516 #, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "" #: src/hed/libs/data/FileCache.cpp:521 #, c-format msgid "Cannot change permission of %s: %s " msgstr "" #: src/hed/libs/data/FileCache.cpp:525 #, c-format msgid "Cannot change owner of %s: %s " msgstr "" #: src/hed/libs/data/FileCache.cpp:539 #, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:543 src/hed/libs/data/FileCache.cpp:554 #, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:549 #, c-format msgid "Cache file %s not found" msgstr "" #: src/hed/libs/data/FileCache.cpp:564 #, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:572 #, c-format msgid "Failed to release lock on cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:583 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:588 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:593 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:617 #, c-format msgid "Failed to set executable bit on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:622 #, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:636 #, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:640 src/hed/libs/data/FileCache.cpp:645 #, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:675 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:694 src/hed/libs/data/FileCache.cpp:771 #, c-format msgid "Error reading meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:699 src/hed/libs/data/FileCache.cpp:776 #, c-format msgid "Error opening meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:704 src/hed/libs/data/FileCache.cpp:780 #, c-format msgid "meta file %s is empty" msgstr "" #: src/hed/libs/data/FileCache.cpp:713 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" #: src/hed/libs/data/FileCache.cpp:733 #, c-format msgid "Bad format detected in file %s, in line %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:750 #, c-format msgid "Could not acquire lock on meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:754 #, c-format msgid "Error opening meta file for writing %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:790 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:794 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:845 #, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:850 #, fuzzy, c-format msgid "Failed to create cache meta file %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/data/FileCache.cpp:865 #, fuzzy, c-format msgid "Failed to read cache meta file %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/data/FileCache.cpp:870 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:875 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:883 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" #: src/hed/libs/data/FileCache.cpp:893 #, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:955 #, c-format msgid "Using cache %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:969 #, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:975 #, c-format msgid "Cache %s: Free space %f GB" msgstr "" #: src/hed/libs/data/URLMap.cpp:33 #, c-format msgid "Can't use URL %s" msgstr "" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "" #: src/hed/libs/data/URLMap.cpp:49 #, c-format msgid "Mapping %s to %s" msgstr "" #: src/hed/libs/data/examples/simple_copy.cpp:17 msgid "Usage: copy source destination" msgstr "" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, c-format msgid "Copy failed: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, c-format msgid "Failed to read proxy file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, c-format msgid "Failed to read certificate file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, c-format msgid "Failed to read private key file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)%s:%" "s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:26 msgid "Initialize ISIS handler" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:31 #, c-format msgid "Can't recognize URL: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:48 msgid "Initialize ISIS handler succeeded" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:52 #, c-format msgid "Remove ISIS (%s) from list" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:72 #, c-format msgid "getISISList from %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:73 #, c-format msgid "Key %s, Cert: %s, CA: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:105 #, c-format msgid "ISIS (%s) is not available or not valid response. (%d. reconnection)" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:107 #, c-format msgid "Connection to the ISIS (%s) is success and get the list of ISIS." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:133 #, c-format msgid "GetISISList add this (%s) ISIS into the list." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:145 #, c-format msgid "Chosen ISIS for communication: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:152 msgid "Get ISIS from list of ISIS handler" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:156 msgid "Here is the end of the infinite calling loop." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:163 msgid "There is no more ISIS available. The list of ISIS's is already empty." msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:47 #, c-format msgid "cannot create directory: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:60 #, c-format msgid "Cache configuration: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:63 msgid "Missing cache root in configuration" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:67 msgid "Missing service ID" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:70 #, c-format msgid "Cache root: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:76 #, c-format msgid "Cache directory: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:143 #: src/hed/libs/infosys/InfoCache.cpp:162 #: src/hed/libs/infosys/InfoCache.cpp:181 #: src/hed/libs/infosys/InfoCache.cpp:206 msgid "InfoCache object is not set up" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:147 #: src/hed/libs/infosys/InfoCache.cpp:166 #, c-format msgid "Invalid path in Set(): %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:185 #, c-format msgid "Invalid path in Get(): %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:23 #, c-format msgid "" "InfoRegistrar thread waiting %d seconds for the all Registers elements " "creation." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:69 #, c-format msgid "" "InfoRegister created with config:\n" "%s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:79 #, c-format msgid "InfoRegister to be registered in Registrar %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:81 msgid "Discarding Registrar because the \"URL\" element is missing or empty." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:133 #, c-format msgid "InfoRegistrar id \"%s\" has been found." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:140 #, c-format msgid "InfoRegistrar id \"%s\" was not found. New registrar created" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:182 #, c-format msgid "" "Configuration error. Retry: \"%s\" is not a valid value. Default value will " "be used." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:188 #, c-format msgid "Retry: %d" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:197 #, c-format msgid "Key: %s, cert: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:226 msgid "The service won't be registered." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:231 msgid "Configuration error. Missing mandatory \"Period\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:236 msgid "Configuration error. Missing mandatory \"Endpoint\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:241 msgid "Configuration error. Missing mandatory \"Expiration\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:248 #, c-format msgid "" "Service was already registered to the InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:284 #, c-format msgid "" "Service is successfully added to the InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:301 msgid "Unregistred Service can not be removed." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:348 #: src/hed/libs/infosys/InfoRegister.cpp:411 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s CAPath" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:359 #: src/hed/libs/infosys/InfoRegister.cpp:646 #, c-format msgid "Response from the ISIS: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:363 #, c-format msgid "Failed to remove registration from %s ISIS" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:366 #, c-format msgid "Successfuly removed registration from ISIS (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:372 #, c-format msgid "Failed to remove registration from ISIS (%s) - %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:379 #: src/hed/libs/infosys/InfoRegister.cpp:656 #, c-format msgid "Retry connecting to the ISIS (%s) %d time(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:385 #, c-format msgid "ISIS (%s) is not available." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:389 #: src/hed/libs/infosys/InfoRegister.cpp:439 #, c-format msgid "Service removed from InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:420 #, c-format msgid "Failed to remove registration from %s EMIRegistry" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:423 #, c-format msgid "Successfuly removed registration from EMIRegistry (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:429 #: src/hed/libs/infosys/InfoRegister.cpp:957 #, c-format msgid "Retry connecting to the EMIRegistry (%s) %d time(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:435 #, c-format msgid "EMIRegistry (%s) is not available." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:476 #: src/hed/libs/infosys/InfoRegister.cpp:684 #, c-format msgid "Registration starts: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:477 #: src/hed/libs/infosys/InfoRegister.cpp:685 #, c-format msgid "reg_.size(): %d" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:480 #: src/hed/libs/infosys/InfoRegister.cpp:688 msgid "Registrant has no proper URL specified. Registration end." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:510 #: src/hed/libs/infosys/InfoRegister.cpp:713 msgid "Create RegEntry XML element" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:542 #: src/hed/libs/infosys/InfoRegister.cpp:745 msgid "ServiceID attribute calculated from Endpoint Reference" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:546 #: src/hed/libs/infosys/InfoRegister.cpp:749 msgid "Generation Time attribute calculated from current time" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:553 #: src/hed/libs/infosys/InfoRegister.cpp:756 #, c-format msgid "ServiceID stored: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:559 #: src/hed/libs/infosys/InfoRegister.cpp:762 #, c-format msgid "Missing service document provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:565 #: src/hed/libs/infosys/InfoRegister.cpp:768 #, c-format msgid "" "Missing MetaServiceAdvertisment or Expiration values provided by the service " "%s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:572 #: src/hed/libs/infosys/InfoRegister.cpp:775 #, c-format msgid "Missing Type value provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:580 #: src/hed/libs/infosys/InfoRegister.cpp:783 #, c-format msgid "Missing Endpoint Reference value provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:598 #, c-format msgid "Registering to %s ISIS" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:621 #: src/hed/libs/infosys/InfoRegister.cpp:822 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s, CAFile" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:625 #, c-format msgid "Sent RegEntries: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:639 #, c-format msgid "Error during registration to %s ISIS" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:648 #, c-format msgid "Successful registration to ISIS (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:652 #, c-format msgid "Failed to register to ISIS (%s) - %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:668 #: src/hed/libs/infosys/InfoRegister.cpp:967 #, c-format msgid "Registration ends: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:669 #: src/hed/libs/infosys/InfoRegister.cpp:968 #, c-format msgid "Waiting period is %d second(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:679 #: src/hed/libs/infosys/InfoRegister.cpp:978 #, c-format msgid "Registration exit: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:801 #, c-format msgid "Registering to %s EMIRegistry" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:927 #, fuzzy, c-format msgid "Sent entry: %s" msgstr "Azonosító: %s" #: src/hed/libs/infosys/InfoRegister.cpp:940 #, c-format msgid "Error during %s to %s EMIRegistry" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:943 #, c-format msgid "Successful %s to EMIRegistry (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:949 #, c-format msgid "Failed to %s to EMIRegistry (%s) - %d" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:25 msgid "Module Manager Init" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:68 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:202 #, c-format msgid "Found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:209 #, c-format msgid "Could not locate module %s in following paths:" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:213 #, c-format msgid "\t%s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:227 #, c-format msgid "Loaded %s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:271 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:307 #: src/hed/libs/loader/ModuleManager.cpp:320 #, c-format msgid "%s made persistent" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:311 #, c-format msgid "Not found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:325 msgid "Specified module not found in cache" msgstr "" #: src/hed/libs/loader/Plugin.cpp:369 src/hed/libs/loader/Plugin.cpp:574 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:375 src/hed/libs/loader/Plugin.cpp:581 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:381 src/hed/libs/loader/Plugin.cpp:492 #: src/hed/libs/loader/Plugin.cpp:586 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:398 src/hed/libs/loader/Plugin.cpp:502 #: src/hed/libs/loader/Plugin.cpp:608 #, c-format msgid "Module %s failed to reload (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:462 src/hed/libs/loader/Plugin.cpp:475 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:480 #, c-format msgid "Could not find loadable module descriptor by names %s and %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:486 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "" #: src/hed/libs/message/MCC.cpp:77 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "" #: src/hed/libs/message/MCC.cpp:85 msgid "Security processing/check failed" msgstr "" #: src/hed/libs/message/MCC.cpp:89 msgid "Security processing/check passed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:16 msgid "Chain(s) configuration failed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:133 msgid "SecHandler configuration is not defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:156 msgid "SecHandler has no configuration" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:162 msgid "SecHandler has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:172 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:176 #, c-format msgid "SecHandler: %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:188 msgid "Component has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:193 msgid "Component has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:202 #, c-format msgid "Component %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:232 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:287 #, c-format msgid "Loaded MCC %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:305 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:315 #, c-format msgid "Loaded Plexer %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:323 msgid "Service has no Name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:329 msgid "Service has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:338 #, c-format msgid "Service %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:345 #, c-format msgid "Loaded Service %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:387 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:398 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:407 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:412 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:431 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:442 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:451 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:457 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "" #: src/hed/libs/message/Service.cpp:33 #, c-format msgid "Security processing/check for '%s' failed" msgstr "" #: src/hed/libs/message/Service.cpp:37 #, c-format msgid "Security processing/check for '%s' passed" msgstr "" #: src/hed/libs/message/Service.cpp:43 msgid "Empty registration collector" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:136 #, c-format msgid "Can not load ARC evaluator object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:187 #, c-format msgid "Can not load ARC request object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:228 #, c-format msgid "Can not load policy object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:276 msgid "Can not load policy object" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:324 msgid "Can not load request object" msgstr "" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:157 #, c-format msgid "HTTP Error: %d %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:222 msgid "Cannot create http payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:288 msgid "No next element in the chain" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:297 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:306 msgid "next element of the chain returned no payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:318 msgid "next element of the chain returned invalid/unsupported payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:393 msgid "Error to flush output payload" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, c-format msgid "< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:576 #, fuzzy msgid "Failed to parse HTTP header" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:950 #, c-format msgid "> %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:975 msgid "Failed to write header to output stream" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:1000 src/hed/mcc/http/PayloadHTTP.cpp:1006 #: src/hed/mcc/http/PayloadHTTP.cpp:1012 src/hed/mcc/http/PayloadHTTP.cpp:1022 #: src/hed/mcc/http/PayloadHTTP.cpp:1034 src/hed/mcc/http/PayloadHTTP.cpp:1039 #: src/hed/mcc/http/PayloadHTTP.cpp:1044 src/hed/mcc/http/PayloadHTTP.cpp:1052 #: src/hed/mcc/http/PayloadHTTP.cpp:1059 msgid "Failed to write body to output stream" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:222 msgid "empty next chain element" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:238 msgid "next element of the chain returned empty payload" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:192 msgid "empty input payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:198 msgid "incoming message is not SOAP" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:215 src/hed/mcc/soap/MCCSOAP.cpp:372 msgid "Security check failed in SOAP MCC for incoming message" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:230 #, c-format msgid "next element of the chain returned error status: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:249 msgid "next element of the chain returned unknown payload - passing through" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:252 src/hed/mcc/soap/MCCSOAP.cpp:265 #: src/hed/mcc/soap/MCCSOAP.cpp:317 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:104 src/hed/mcc/tcp/MCCTCP.cpp:636 msgid "Cannot initialize winsock library" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:119 msgid "Missing Port in Listen element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:128 msgid "Version in Listen element can't be recognized" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:137 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:139 #, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:148 #, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:154 #, c-format msgid "Failed to create socket for for listening at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:156 #, c-format msgid "Failed to create socket for for listening at %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:171 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:173 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:181 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:183 #, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:198 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:200 #, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:217 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:219 #, c-format msgid "Listening on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:226 #, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:228 #, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:234 msgid "No listening ports initiated" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:245 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:249 msgid "Failed to start thread for listening" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:285 msgid "Failed to start thread for communication" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:315 msgid "Failed while waiting for connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:337 msgid "Failed to accept connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:346 msgid "Too many connections - dropping new one" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:353 msgid "Too many connections - waiting for old to close" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:577 msgid "next chain element called" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:592 msgid "Only Raw Buffer payload is supported for output" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:600 src/hed/mcc/tcp/MCCTCP.cpp:709 #: src/hed/mcc/tls/MCCTLS.cpp:545 msgid "Failed to send content of buffer" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "TCP executor is removed" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:614 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:642 msgid "No Connect element specified" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:648 msgid "Missing Port in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:654 msgid "Missing Host in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:685 msgid "TCP client process called" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:71 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:87 #, c-format msgid "Failed to resolve %s (%s)" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:97 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:101 #, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:121 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:131 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:139 #, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:149 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:158 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:215 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:77 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:128 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:82 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:87 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:92 msgid "Missing CA subject in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:102 msgid "Negative rights are not supported in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:106 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:111 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:133 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:138 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:144 msgid "Missing condition subjects in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:220 msgid "Unknown element in Globus signing policy" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:222 #, fuzzy msgid "Critical VOMS attribute processing failed" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/hed/mcc/tls/MCCTLS.cpp:230 #, fuzzy msgid "VOMS attribute validation failed" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/hed/mcc/tls/MCCTLS.cpp:232 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:424 #, fuzzy, c-format msgid "Failed to establish connection: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/mcc/tls/MCCTLS.cpp:442 src/hed/mcc/tls/MCCTLS.cpp:524 #, c-format msgid "Peer name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:444 src/hed/mcc/tls/MCCTLS.cpp:526 #, c-format msgid "Identity name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:446 src/hed/mcc/tls/MCCTLS.cpp:528 #, c-format msgid "CA name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:452 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:461 msgid "Security check failed in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:534 msgid "Security check failed for outgoing TLS message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:563 msgid "Security check failed for incoming TLS message" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:93 #, fuzzy msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "publikus kulcs elérési útvonala" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:98 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:128 #, c-format msgid "Certificate %s already expired" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:136 #, c-format msgid "Certificate %s will expire in %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:157 msgid "Failed to store application data" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:185 msgid "Failed to retrieve application data from OpenSSL" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:257 src/hed/mcc/tls/PayloadTLSMCC.cpp:351 msgid "Can not create the SSL Context object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:369 msgid "Can't set OpenSSL verify flags" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:289 src/hed/mcc/tls/PayloadTLSMCC.cpp:383 msgid "Can not create the SSL object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:308 msgid "Failed to establish SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:311 src/hed/mcc/tls/PayloadTLSMCC.cpp:398 #, c-format msgid "Using cipher: %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:395 msgid "Failed to accept SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:448 #, c-format msgid "Failed to shut down SSL: %s" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 msgid "PDP: missing name attribute" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, c-format msgid "PDP: %s (%s)" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, fuzzy, c-format msgid "There are %d RequestItems" msgstr "%d darab publikus tanúsítvány van a válasz üzenetben" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 msgid "Can not create PolicyStore object" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 #: src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 msgid "Can not dynamically produce Request" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:110 msgid "Can not find ArcPDPContext" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:139 src/hed/shc/xacmlpdp/XACMLPDP.cpp:117 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:143 src/hed/shc/xacmlpdp/XACMLPDP.cpp:121 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:155 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:84 #: src/hed/shc/gaclpdp/GACLPDP.cpp:118 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:133 msgid "Can not dynamically produce Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:158 msgid "Evaluator for ArcPDP was not loaded" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:165 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:57 #: src/hed/shc/gaclpdp/GACLPDP.cpp:128 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:87 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:143 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:173 src/hed/shc/arcpdp/ArcPDP.cpp:181 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:137 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:143 #: src/hed/shc/gaclpdp/GACLPDP.cpp:136 src/hed/shc/gaclpdp/GACLPDP.cpp:144 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:95 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:103 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:189 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:150 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:111 #, c-format msgid "ARC Auth. request: %s" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:192 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:153 #: src/hed/shc/gaclpdp/GACLPDP.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:170 msgid "No requested security information was collected" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:199 msgid "Not authorized from arc.pdp - failed to get reponse from Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 msgid "Authorized by arc.pdp" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:246 msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:75 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:129 msgid "No delegation policies in this context and message - passing through" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:95 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:109 msgid "Failed to convert security information to ARC policy" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:116 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:123 #, c-format msgid "ARC delegation policy: %s" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:161 msgid "No authorization response was returned" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:164 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:183 msgid "Delegation authorization passed" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:185 msgid "Delegation authorization failed" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 msgid "Failed to acquire delegation context" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 msgid "Delegation handler with delegatee role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:554 #: src/services/cache_service/CacheService.cpp:529 #: src/services/data-staging/DataDeliveryService.cpp:630 msgid "process: POST" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:561 #: src/services/cache_service/CacheService.cpp:538 #: src/services/data-staging/DataDeliveryService.cpp:639 #: src/services/wrappers/java/javawrapper.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:413 msgid "input is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, c-format msgid "Delegated credential identity: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delgation service should be configured" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 msgid "Delegation handler with delegatee role ends" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 msgid "Delegation handler with delegator role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, c-format msgid "The delegated credential got from path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 #: src/services/wrappers/java/javawrapper.cpp:144 msgid "output is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:220 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 msgid "Incoming Message is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:341 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 msgid "Delegation handler is not configured" msgstr "" #: src/hed/shc/gaclpdp/GACLPDP.cpp:121 msgid "Evaluator for GACLPDP was not loaded" msgstr "" #: src/hed/shc/gaclpdp/GACLPDP.cpp:152 #, c-format msgid "GACL Auth. request: %s" msgstr "" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 msgid "Policy is not gacl" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:13 #, fuzzy msgid "Configuration file not specified" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:53 #: src/hed/shc/legacy/ConfigParser.cpp:58 msgid "Configuration file can not be read" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:68 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:31 src/hed/shc/legacy/LegacyPDP.cpp:96 msgid "Configuration file not specified in ConfigBlock" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:40 src/hed/shc/legacy/LegacyPDP.cpp:105 msgid "BlockName is empty" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:99 #, c-format msgid "Failed processing user mapping command: unixmap %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:106 #, c-format msgid "Failed processing user mapping command: unixgroup %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:113 #, c-format msgid "Failed processing user mapping command: unixvo %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:173 msgid "LegacyMap: no configurations blocks defined" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:195 src/hed/shc/legacy/LegacyPDP.cpp:210 msgid "" "LegacyPDP: there is no ARCLEGACY Sec Attribute defined. Probably ARC Legacy " "Sec Handler is not configured or failed." msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:200 src/hed/shc/legacy/LegacyPDP.cpp:215 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:115 #, fuzzy, c-format msgid "Failed to parse configuration file %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/hed/shc/legacy/LegacyPDP.cpp:121 #, c-format msgid "Block %s not found in configuration file %s" msgstr "" #: src/hed/shc/legacy/LegacySecHandler.cpp:36 #: src/hed/shc/legacy/LegacySecHandler.cpp:110 msgid "LegacySecHandler: configuration file not specified" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:146 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:172 src/hed/shc/legacy/arc_lcmaps.cpp:189 msgid "Missing subject name" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:177 src/hed/shc/legacy/arc_lcmaps.cpp:194 #, fuzzy msgid "Missing path of credentials file" msgstr "kérési fájl elérési útvonala" #: src/hed/shc/legacy/arc_lcas.cpp:182 msgid "Missing name of LCAS library" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:199 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:209 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:219 msgid "Failed to initialize LCAS" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:234 msgid "Failed to terminate LCAS" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 msgid "Can't read policy names" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 msgid "Failed to initialize LCMAPS" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 msgid "LCMAPS did not return any GID" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:308 msgid "LCMAPS did not return any UID" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:318 msgid "Failed to terminate LCMAPS" msgstr "" #: src/hed/shc/legacy/auth.cpp:293 #, c-format msgid "Credentials stored in temporary file %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:302 #, c-format msgid "Assigned to authorization group %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:307 #, c-format msgid "Assigned to VO %s" msgstr "" #: src/hed/shc/legacy/auth_file.cpp:24 #: src/services/gridftpd/auth/auth_file.cpp:24 #, c-format msgid "Failed to read file %s" msgstr "" #: src/hed/shc/legacy/auth_ldap.cpp:22 #, fuzzy msgid "LDAP authorization is not supported anymore" msgstr "A fileset regisztcáció nem támogatott még" #: src/hed/shc/legacy/auth_plugin.cpp:44 src/hed/shc/legacy/unixmap.cpp:260 #: src/services/gridftpd/auth/auth_plugin.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:251 #, c-format msgid "Plugin %s returned: %u" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:48 src/hed/shc/legacy/unixmap.cpp:264 #, c-format msgid "Plugin %s timeout after %u seconds" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:51 src/hed/shc/legacy/unixmap.cpp:267 #, c-format msgid "Plugin %s failed to start" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:53 src/hed/shc/legacy/unixmap.cpp:269 #, c-format msgid "Plugin %s printed: %s" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:54 src/hed/shc/legacy/unixmap.cpp:270 #, c-format msgid "Plugin %s error: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:39 #: src/services/gridftpd/auth/auth_voms.cpp:45 msgid "Missing VO in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:44 #: src/services/gridftpd/auth/auth_voms.cpp:51 msgid "Missing group in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:49 #: src/services/gridftpd/auth/auth_voms.cpp:57 msgid "Missing role in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:54 #: src/services/gridftpd/auth/auth_voms.cpp:63 msgid "Missing capabilities in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:58 #: src/services/gridftpd/auth/auth_voms.cpp:67 #, fuzzy, c-format msgid "Rule: vo: %s" msgstr "Kérés: %s" #: src/hed/shc/legacy/auth_voms.cpp:59 #: src/services/gridftpd/auth/auth_voms.cpp:68 #, c-format msgid "Rule: group: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:60 #: src/services/gridftpd/auth/auth_voms.cpp:69 #, fuzzy, c-format msgid "Rule: role: %s" msgstr "modul neve: %s" #: src/hed/shc/legacy/auth_voms.cpp:61 #: src/services/gridftpd/auth/auth_voms.cpp:70 #, c-format msgid "Rule: capabilities: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:64 #: src/services/gridftpd/auth/auth_voms.cpp:77 #, c-format msgid "Match vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:71 #, c-format msgid "Matched: %s %s %s %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:86 #: src/services/gridftpd/auth/auth_voms.cpp:98 msgid "Matched nothing" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:71 src/hed/shc/legacy/simplemap.cpp:76 #: src/services/gridftpd/auth/simplemap.cpp:63 #: src/services/gridftpd/auth/simplemap.cpp:68 #, c-format msgid "SimpleMap: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:49 src/hed/shc/legacy/unixmap.cpp:54 #: src/hed/shc/legacy/unixmap.cpp:98 src/hed/shc/legacy/unixmap.cpp:103 #: src/hed/shc/legacy/unixmap.cpp:147 src/hed/shc/legacy/unixmap.cpp:152 #: src/services/gridftpd/auth/unixmap.cpp:47 #: src/services/gridftpd/auth/unixmap.cpp:52 #: src/services/gridftpd/auth/unixmap.cpp:96 #: src/services/gridftpd/auth/unixmap.cpp:101 #: src/services/gridftpd/auth/unixmap.cpp:145 #: src/services/gridftpd/auth/unixmap.cpp:150 msgid "User name mapping command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:61 src/services/gridftpd/auth/unixmap.cpp:59 #, c-format msgid "User name mapping has empty group: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:72 src/hed/shc/legacy/unixmap.cpp:121 #: src/hed/shc/legacy/unixmap.cpp:169 #: src/services/gridftpd/auth/unixmap.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:119 #: src/services/gridftpd/auth/unixmap.cpp:167 #, c-format msgid "User name mapping has empty command: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:110 #: src/services/gridftpd/auth/unixmap.cpp:108 #, c-format msgid "User name mapping has empty VO: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:159 #: src/services/gridftpd/auth/unixmap.cpp:157 #, c-format msgid "User name mapping has empty name: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:208 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:229 src/hed/shc/legacy/unixmap.cpp:235 #: src/services/gridftpd/auth/unixmap.cpp:212 #: src/services/gridftpd/auth/unixmap.cpp:217 #: src/services/gridftpd/auth/unixmap.cpp:233 msgid "Plugin (user mapping) command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:219 #: src/services/gridftpd/auth/unixmap.cpp:223 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:223 #: src/services/gridftpd/auth/unixmap.cpp:227 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:257 #: src/services/gridftpd/auth/unixmap.cpp:248 #, c-format msgid "Plugin %s returned too much: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:278 msgid "User subject match is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:282 #: src/services/gridftpd/auth/unixmap.cpp:266 #, c-format msgid "Mapfile at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:305 #: src/services/gridftpd/auth/unixmap.cpp:290 msgid "User pool call is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:310 #: src/services/gridftpd/auth/unixmap.cpp:295 #, c-format msgid "User pool at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:315 #: src/services/gridftpd/auth/unixmap.cpp:300 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:332 #: src/services/gridftpd/auth/unixmap.cpp:317 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:48 msgid "Creating a pdpservice client" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:80 msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:152 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:185 msgid "Policy Decision Service invocation failed" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:170 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:204 msgid "Authorized from remote pdp service" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Unauthorized from remote pdp service" msgstr "" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:152 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:157 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:175 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:179 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, c-format msgid "Processing type not supported: %s" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:199 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:212 msgid "Succeeded to authenticate SAMLToken" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:275 #, fuzzy, c-format msgid "No response from AA service %s" msgstr "Nincs válasz a voms szervertől" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:279 #, c-format msgid "SOAP Request to AA service %s failed" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 msgid "Cannot find content under response soap message" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 msgid "Cannot find under response soap message:" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:308 msgid "The Response is not going to this end" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "The StatusCode is Success" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:335 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:345 msgid "SAML Token handler is not configured" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:29 #, c-format msgid "Access list location: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:39 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:42 #, fuzzy, c-format msgid "Subject to match: %s" msgstr "Tárgy: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:45 #, fuzzy, c-format msgid "Policy subject: %s" msgstr "Tárgy: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:47 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:73 #, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:54 msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:61 #, fuzzy, c-format msgid "Policy line: %s" msgstr "Proxy típusa: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:79 #, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "" #: src/hed/shc/test.cpp:101 msgid "Input request from a file: Request.xml" msgstr "" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "" #: src/hed/shc/test.cpp:121 #, c-format msgid "Attribute Value (1): %s" msgstr "" #: src/hed/shc/test.cpp:132 msgid "Input request from code" msgstr "" #: src/hed/shc/test.cpp:211 #, c-format msgid "Attribute Value (2): %s" msgstr "" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 msgid "Can not dynamically produce Policy" msgstr "" #: src/hed/shc/testinterface_arc.cpp:138 #, c-format msgid "Attribute Value inside Subject: %s" msgstr "" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, c-format msgid "Can not create function %s" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:88 msgid "Can not find XACMLPDPContext" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:136 msgid "Evaluator for XACMLPDP was not loaded" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:151 src/hed/shc/xacmlpdp/XACMLPDP.cpp:159 msgid "Failed to convert security information to XACML request" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:167 #, c-format msgid "XACML request: %s" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 msgid "Authorized from xacml.pdp" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:180 msgid "UnAuthorized from xacml.pdp" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "" #: src/libs/data-staging/DTR.cpp:86 src/libs/data-staging/DTR.cpp:90 #, c-format msgid "Could not handle endpoint %s" msgstr "" #: src/libs/data-staging/DTR.cpp:100 msgid "Source is the same as destination" msgstr "" #: src/libs/data-staging/DTR.cpp:174 #, fuzzy, c-format msgid "Invalid ID: %s" msgstr "Érvénytelen URL: %s" #: src/libs/data-staging/DTR.cpp:211 #, c-format msgid "%s->%s" msgstr "" #: src/libs/data-staging/DTR.cpp:330 #, c-format msgid "No callback for %s defined" msgstr "" #: src/libs/data-staging/DTR.cpp:345 #, c-format msgid "NULL callback for %s" msgstr "" #: src/libs/data-staging/DTR.cpp:348 #, c-format msgid "Request to push to unknown owner - %u" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:147 #, c-format msgid "Cleaning up after failure: deleting %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:137 #, c-format msgid "Bad checksum format %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:210 #, c-format msgid "DataDelivery: %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:222 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:241 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:67 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:315 #, c-format msgid "Connecting to Delivery service at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:94 #, c-format msgid "Failed to set up credential delegation with %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:106 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:332 #, c-format msgid "Could not connect to service %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:114 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:340 #, fuzzy, c-format msgid "No SOAP response from Delivery service %s" msgstr "Nincs válasz a voms szervertől" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:128 #, fuzzy, c-format msgid "Failed to start transfer request: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:143 #, c-format msgid "Could not make new transfer request: %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:148 #, c-format msgid "Started remote Delivery at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:181 #, fuzzy, c-format msgid "Failed to send cancel request: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:188 #, fuzzy msgid "Failed to cancel: No SOAP response" msgstr "Nincs SOAP-os válasz" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:202 #, fuzzy, c-format msgid "Failed to cancel transfer request: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:209 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:290 #, c-format msgid "Bad format in XML response: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:216 #, fuzzy, c-format msgid "Failed to cancel: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:260 #, fuzzy msgid "No SOAP response from delivery service" msgstr "Nincs válasz a voms szervertől" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:281 #, fuzzy, c-format msgid "Failed to query state: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:355 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:363 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:371 #, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:379 #, c-format msgid "Dir %s allowed at service %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:473 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:486 #, fuzzy msgid "Failed locating credentials" msgstr "Nem sikerült listázni a meta adatokat" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:491 #, fuzzy msgid "Failed to initiate client connection" msgstr "Nem sikerült betölteni a konfigurációt" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:497 msgid "Client connection has no entry point" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:134 msgid "Unexpected arguments" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:137 msgid "Source URL missing" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:140 #, fuzzy msgid "Destination URL missing" msgstr "Célállomás: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:144 #, fuzzy, c-format msgid "Source URL not valid: %s" msgstr "A lekérdezés nem XML helyes" #: src/libs/data-staging/DataStagingDelivery.cpp:148 #, fuzzy, c-format msgid "Destination URL not valid: %s" msgstr "Célállomás: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:205 #, c-format msgid "Unknown transfer option: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:230 #, c-format msgid "Source URL not supported: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #: src/libs/data-staging/DataStagingDelivery.cpp:254 msgid "No credentials supplied" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:249 #, fuzzy, c-format msgid "Destination URL not supported: %s" msgstr "A fileset regisztcáció nem támogatott még" #: src/libs/data-staging/DataStagingDelivery.cpp:298 #, c-format msgid "Will calculate %s checksum" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:309 msgid "Cannot use supplied --size option" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:458 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:468 #, fuzzy, c-format msgid "Failed cleaning up destination %s" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:60 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:418 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:435 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:331 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:348 #: src/services/cache_service/CacheService.cpp:114 msgid "Error creating cache" msgstr "" #: src/libs/data-staging/Processor.cpp:86 #, c-format msgid "Forcing re-download of file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:103 #, c-format msgid "Will wait around %is" msgstr "" #: src/libs/data-staging/Processor.cpp:123 #, c-format msgid "Force-checking source of cache file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:126 #, c-format msgid "Source check requested but failed: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:146 msgid "Permission checking failed, will try downloading without using cache" msgstr "" #: src/libs/data-staging/Processor.cpp:177 #, c-format msgid "Will download to cache file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:199 msgid "Looking up source replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:217 #: src/libs/data-staging/Processor.cpp:329 #, c-format msgid "Skipping replica on local host %s" msgstr "" #: src/libs/data-staging/Processor.cpp:225 #: src/libs/data-staging/Processor.cpp:337 #, c-format msgid "No locations left for %s" msgstr "" #: src/libs/data-staging/Processor.cpp:248 #: src/libs/data-staging/Processor.cpp:496 #, fuzzy msgid "Resolving destination replicas" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:266 msgid "No locations for destination different from source found" msgstr "" #: src/libs/data-staging/Processor.cpp:278 msgid "Pre-registering destination in index service" msgstr "" #: src/libs/data-staging/Processor.cpp:305 msgid "Resolving source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:319 #, c-format msgid "No replicas found for %s" msgstr "" #: src/libs/data-staging/Processor.cpp:361 #, c-format msgid "Checking %s" msgstr "" #: src/libs/data-staging/Processor.cpp:370 #: src/libs/data-staging/Processor.cpp:429 msgid "Metadata of replica and index service differ" msgstr "" #: src/libs/data-staging/Processor.cpp:378 #, c-format msgid "Failed checking source replica %s: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:405 msgid "Querying source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:417 #, c-format msgid "Failed checking source replica: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:423 msgid "Failed checking source replica" msgstr "" #: src/libs/data-staging/Processor.cpp:464 msgid "Finding existing destination replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:476 #, c-format msgid "Failed to delete replica %s: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:490 #, fuzzy, c-format msgid "Unregistering %s" msgstr "Figyelmen kívül hagyás: %s" #: src/libs/data-staging/Processor.cpp:501 #, fuzzy msgid "Pre-registering destination" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:507 #, c-format msgid "Failed to pre-clean destination: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:531 msgid "Preparing to stage source" msgstr "" #: src/libs/data-staging/Processor.cpp:544 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:550 msgid "No physical files found for source" msgstr "" #: src/libs/data-staging/Processor.cpp:569 #, fuzzy msgid "Preparing to stage destination" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:582 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:588 msgid "No physical files found for destination" msgstr "" #: src/libs/data-staging/Processor.cpp:615 msgid "Releasing source" msgstr "" #: src/libs/data-staging/Processor.cpp:619 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:624 #, fuzzy msgid "Releasing destination" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:628 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:659 msgid "Removing pre-registered destination in index service" msgstr "" #: src/libs/data-staging/Processor.cpp:662 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:668 msgid "Registering destination replica" msgstr "" #: src/libs/data-staging/Processor.cpp:671 #, fuzzy, c-format msgid "Failed to register destination replica: %s" msgstr "Nem támogatott url: %s" #: src/libs/data-staging/Processor.cpp:674 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:705 msgid "Error creating cache. Stale locks may remain." msgstr "" #: src/libs/data-staging/Processor.cpp:740 #, c-format msgid "Linking/copying cached file to %s" msgstr "" #: src/libs/data-staging/Processor.cpp:761 #, fuzzy, c-format msgid "Failed linking cache file to %s" msgstr "Nem sikerült listázni a fájlokat" #: src/libs/data-staging/Processor.cpp:765 #, c-format msgid "Error linking cache file to %s." msgstr "" #: src/libs/data-staging/Processor.cpp:787 #: src/libs/data-staging/Processor.cpp:794 msgid "Adding to bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" #: src/libs/data-staging/Scheduler.cpp:213 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:216 msgid "Linking mapped file" msgstr "" #: src/libs/data-staging/Scheduler.cpp:223 #, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:235 msgid "Linking mapped file - can't link on Windows" msgstr "" #: src/libs/data-staging/Scheduler.cpp:251 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" #: src/libs/data-staging/Scheduler.cpp:258 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" #: src/libs/data-staging/Scheduler.cpp:264 msgid "File is cacheable, will check cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:267 #: src/libs/data-staging/Scheduler.cpp:293 #, c-format msgid "File is currently being cached, will wait %is" msgstr "" #: src/libs/data-staging/Scheduler.cpp:286 msgid "Timed out while waiting for cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:297 msgid "Checking cache again" msgstr "" #: src/libs/data-staging/Scheduler.cpp:317 #, fuzzy msgid "Destination file is in cache" msgstr "Célállomás: %s" #: src/libs/data-staging/Scheduler.cpp:321 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:324 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:334 msgid "Problem with index service, will release cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:338 msgid "Problem with index service, will proceed to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:348 msgid "Checking source file is present" msgstr "" #: src/libs/data-staging/Scheduler.cpp:356 msgid "Error with source file, moving to next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:378 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "No more replicas, will use %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:383 #, c-format msgid "Checking replica %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:393 msgid "Overwrite requested - will pre-clean destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:396 msgid "No overwrite requested or allowed, skipping pre-cleaning" msgstr "" #: src/libs/data-staging/Scheduler.cpp:404 msgid "Pre-clean failed, will still try to copy" msgstr "" #: src/libs/data-staging/Scheduler.cpp:411 #, fuzzy msgid "Source or destination requires staging" msgstr "A feladat megszakítása sikertelen" #: src/libs/data-staging/Scheduler.cpp:415 msgid "No need to stage source or destination, skipping staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:445 msgid "Staging request timed out, will release request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:449 msgid "Querying status of staging request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:458 #, fuzzy msgid "Releasing requests" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/Scheduler.cpp:475 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "" #: src/libs/data-staging/Scheduler.cpp:490 #, c-format msgid "Transfer failed: %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:500 msgid "Releasing request(s) made during staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:503 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:515 msgid "Trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:519 #, c-format msgid "Will %s in destination index service" msgstr "" #: src/libs/data-staging/Scheduler.cpp:523 msgid "Destination is not index service, skipping replica registration" msgstr "" #: src/libs/data-staging/Scheduler.cpp:536 msgid "Error registering replica, moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:544 msgid "Will process cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:548 msgid "File is not cacheable, skipping cache processing" msgstr "" #: src/libs/data-staging/Scheduler.cpp:562 msgid "Cancellation complete" msgstr "" #: src/libs/data-staging/Scheduler.cpp:576 msgid "Will wait 10s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:582 msgid "Error in cache processing, will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:591 msgid "Will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:609 msgid "Proxy has expired" msgstr "" #: src/libs/data-staging/Scheduler.cpp:620 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "" #: src/libs/data-staging/Scheduler.cpp:636 msgid "Out of retries" msgstr "" #: src/libs/data-staging/Scheduler.cpp:638 msgid "Permanent failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:644 msgid "Finished successfully" msgstr "" #: src/libs/data-staging/Scheduler.cpp:654 msgid "Returning to generator" msgstr "" #: src/libs/data-staging/Scheduler.cpp:820 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:874 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:882 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:895 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:911 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "" #: src/libs/data-staging/Scheduler.cpp:938 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:948 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1149 msgid "Cancelling active transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1165 msgid "Processing thread timed out. Restarting DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1233 #, fuzzy msgid "Will use bulk request" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/Scheduler.cpp:1255 msgid "No delivery endpoints available, will try later" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1274 msgid "Scheduler received NULL DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1284 msgid "Scheduler received invalid DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1373 msgid "Scheduler starting up" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1374 msgid "Scheduler configuration:" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1375 #, c-format msgid " Pre-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1376 #, c-format msgid " Delivery slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1377 #, c-format msgid " Post-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1378 #, c-format msgid " Emergency slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1379 #, c-format msgid " Prepared slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1380 #, c-format msgid "" " Shares configuration:\n" "%s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1383 msgid " Delivery service: LOCAL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1384 #, c-format msgid " Delivery service: %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1389 msgid "Failed to create DTR dump thread" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1406 #: src/services/data-staging/DataDeliveryService.cpp:513 #, c-format msgid "DTR %s cancelled" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:32 #, fuzzy msgid "Generator started" msgstr "A feltöltő elindult" #: src/libs/data-staging/examples/Generator.cpp:33 msgid "Starting DTR threads" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:46 #, fuzzy msgid "No valid credentials found, exiting" msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #: src/libs/data-staging/examples/Generator.cpp:58 #, fuzzy, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "Nem sikerült feloldani a célállomást" #: src/services/a-rex/arex.cpp:446 #, c-format msgid "Using cached local account '%s'" msgstr "" #: src/services/a-rex/arex.cpp:457 msgid "Will not map to 'root' account by default" msgstr "" #: src/services/a-rex/arex.cpp:470 msgid "No local account name specified" msgstr "" #: src/services/a-rex/arex.cpp:473 #, c-format msgid "Using local account '%s'" msgstr "" #: src/services/a-rex/arex.cpp:494 msgid "Failed to acquire grid-manager's configuration" msgstr "" #: src/services/a-rex/arex.cpp:519 #: src/services/cache_service/CacheService.cpp:572 #: src/services/data-staging/DataDeliveryService.cpp:687 #, c-format msgid "SOAP operation is not supported: %s" msgstr "" #: src/services/a-rex/arex.cpp:532 #, fuzzy, c-format msgid "Connection from %s: %s" msgstr "Funkció: %s" #: src/services/a-rex/arex.cpp:534 #, c-format msgid "process: method: %s" msgstr "" #: src/services/a-rex/arex.cpp:535 #, c-format msgid "process: endpoint: %s" msgstr "" #: src/services/a-rex/arex.cpp:546 #, c-format msgid "process: id: %s" msgstr "" #: src/services/a-rex/arex.cpp:547 #, c-format msgid "process: subpath: %s" msgstr "" #: src/services/a-rex/arex.cpp:567 #: src/services/cache_service/CacheService.cpp:546 #: src/services/data-staging/DataDeliveryService.cpp:647 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "" #: src/services/a-rex/arex.cpp:572 #: src/services/cache_service/CacheService.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:652 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "" #: src/services/a-rex/arex.cpp:575 #: src/services/cache_service/CacheService.cpp:554 #: src/services/data-staging/DataDeliveryService.cpp:655 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "" #: src/services/a-rex/arex.cpp:591 src/services/a-rex/arex.cpp:804 #: src/services/a-rex/arex.cpp:823 src/services/a-rex/arex.cpp:837 #: src/services/a-rex/arex.cpp:847 src/services/a-rex/arex.cpp:862 #: src/services/cache_service/CacheService.cpp:588 #: src/services/data-staging/DataDeliveryService.cpp:703 msgid "Security Handlers processing failed" msgstr "" #: src/services/a-rex/arex.cpp:598 msgid "Can't obtain configuration" msgstr "" #: src/services/a-rex/arex.cpp:614 msgid "process: factory endpoint" msgstr "" #: src/services/a-rex/arex.cpp:798 src/services/a-rex/arex.cpp:815 #: src/services/cache_service/CacheService.cpp:583 #: src/services/data-staging/DataDeliveryService.cpp:698 #: src/tests/echo/echo.cpp:166 #, c-format msgid "process: response=%s" msgstr "" #: src/services/a-rex/arex.cpp:800 msgid "process: response is not SOAP" msgstr "" #: src/services/a-rex/arex.cpp:830 msgid "process: GET" msgstr "" #: src/services/a-rex/arex.cpp:831 #, c-format msgid "GET: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:854 msgid "process: PUT" msgstr "" #: src/services/a-rex/arex.cpp:869 #, c-format msgid "process: method %s is not supported" msgstr "" #: src/services/a-rex/arex.cpp:872 msgid "process: method is not defined" msgstr "" #: src/services/a-rex/arex.cpp:908 msgid "Failed to run Grid Manager thread" msgstr "" #: src/services/a-rex/arex.cpp:972 #, c-format msgid "Storing configuration in temporary file %s" msgstr "" #: src/services/a-rex/arex.cpp:977 msgid "Failed to process service configuration" msgstr "" #: src/services/a-rex/arex.cpp:985 #, c-format msgid "Failed to process configuration in %s" msgstr "" #: src/services/a-rex/arex.cpp:991 msgid "No control directory set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:995 msgid "No session directory set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:999 msgid "No LRMS set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:1004 #, c-format msgid "Failed to create control directory %s" msgstr "" #: src/services/a-rex/arex.cpp:1033 #, c-format msgid "Provided LRMSName is not a valid URL: %s" msgstr "" #: src/services/a-rex/arex.cpp:1035 msgid "" "No LRMSName is provided. This is needed if you wish to completely comply " "with the BES specifications." msgstr "" #: src/services/a-rex/cachecheck.cpp:34 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:539 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:843 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:424 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:337 #, c-format msgid "Error with cache configuration: %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:50 #: src/services/cache_service/CacheService.cpp:305 msgid "Error with cache configuration" msgstr "" #: src/services/a-rex/cachecheck.cpp:75 #: src/services/cache_service/CacheService.cpp:135 #: src/services/cache_service/CacheService.cpp:330 #, c-format msgid "Looking up URL %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:77 #: src/services/cache_service/CacheService.cpp:144 #, fuzzy, c-format msgid "Cache file is %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/change_activity_status.cpp:33 #, c-format msgid "" "ChangeActivityStatus: request = \n" "%s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:38 msgid "ChangeActivityStatus: no ActivityIdentifier found" msgstr "" #: src/services/a-rex/change_activity_status.cpp:47 msgid "ChangeActivityStatus: EPR contains no JobID" msgstr "" #: src/services/a-rex/change_activity_status.cpp:57 #, c-format msgid "ChangeActivityStatus: no job found: %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:73 msgid "ChangeActivityStatus: missing NewStatus element" msgstr "" #: src/services/a-rex/change_activity_status.cpp:87 msgid "ChangeActivityStatus: Failed to accept delegation" msgstr "" #: src/services/a-rex/change_activity_status.cpp:103 msgid "ChangeActivityStatus: old BES state does not match" msgstr "" #: src/services/a-rex/change_activity_status.cpp:110 msgid "ChangeActivityStatus: old A-REX state does not match" msgstr "" #: src/services/a-rex/change_activity_status.cpp:137 msgid "ChangeActivityStatus: Failed to update credentials" msgstr "" #: src/services/a-rex/change_activity_status.cpp:143 msgid "ChangeActivityStatus: Failed to resume job" msgstr "" #: src/services/a-rex/change_activity_status.cpp:149 #, c-format msgid "ChangeActivityStatus: State change not allowed: from %s/%s to %s/%s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:168 #, c-format msgid "" "ChangeActivityStatus: response = \n" "%s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:213 #: src/services/a-rex/change_activity_status.cpp:217 #, c-format msgid "EMIES:PauseActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:262 #: src/services/a-rex/change_activity_status.cpp:266 #, c-format msgid "EMIES:ResumeActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:311 #: src/services/a-rex/change_activity_status.cpp:316 #, c-format msgid "EMIES:CancelActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:324 #, c-format msgid "job %s cancelled successfully" msgstr "" #: src/services/a-rex/change_activity_status.cpp:370 #: src/services/a-rex/change_activity_status.cpp:385 #, c-format msgid "EMIES:WipeActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:389 #, c-format msgid "job %s (will be) cleaned successfully" msgstr "" #: src/services/a-rex/change_activity_status.cpp:435 #: src/services/a-rex/change_activity_status.cpp:440 #, c-format msgid "EMIES:RestartActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:444 #, c-format msgid "job %s restarted successfully" msgstr "" #: src/services/a-rex/create_activity.cpp:35 #, c-format msgid "" "CreateActivity: request = \n" "%s" msgstr "" #: src/services/a-rex/create_activity.cpp:40 msgid "CreateActivity: no job description found" msgstr "" #: src/services/a-rex/create_activity.cpp:48 msgid "CreateActivity: max jobs total limit reached" msgstr "" #: src/services/a-rex/create_activity.cpp:67 msgid "CreateActivity: Failed to accept delegation" msgstr "" #: src/services/a-rex/create_activity.cpp:100 #, c-format msgid "CreateActivity: Failed to create new job: %s" msgstr "" #: src/services/a-rex/create_activity.cpp:102 msgid "CreateActivity: Failed to create new job" msgstr "" #: src/services/a-rex/create_activity.cpp:117 msgid "CreateActivity finished successfully" msgstr "" #: src/services/a-rex/create_activity.cpp:121 #, c-format msgid "" "CreateActivity: response = \n" "%s" msgstr "" #: src/services/a-rex/create_activity.cpp:159 #, c-format msgid "" "EMIES:CreateActivity: request = \n" "%s" msgstr "" #: src/services/a-rex/create_activity.cpp:165 msgid "EMIES:CreateActivity: too many activity descriptions" msgstr "" #: src/services/a-rex/create_activity.cpp:175 msgid "EMIES:CreateActivity: no job description found" msgstr "" #: src/services/a-rex/create_activity.cpp:182 msgid "EMIES:CreateActivity: max jobs total limit reached" msgstr "" #: src/services/a-rex/create_activity.cpp:208 #, c-format msgid "ES:CreateActivity: Failed to create new job: %s" msgstr "" #: src/services/a-rex/create_activity.cpp:224 msgid "EMIES:CreateActivity finished successfully" msgstr "" #: src/services/a-rex/create_activity.cpp:225 #, fuzzy, c-format msgid "New job accepted with id %s" msgstr "Feladat elküldve ezzel az azonítóval: %s" #: src/services/a-rex/create_activity.cpp:229 #, c-format msgid "" "EMIES:CreateActivity: response = \n" "%s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:55 msgid "Wiping and re-creating whole storage" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:210 #: src/services/a-rex/delegation/DelegationStore.cpp:311 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:271 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:291 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" #: src/services/a-rex/get.cpp:112 #, c-format msgid "Get: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:123 #, c-format msgid "Get: can't process file %s" msgstr "" #: src/services/a-rex/get.cpp:167 #, c-format msgid "Head: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:178 #, c-format msgid "Head: can't process file %s" msgstr "" #: src/services/a-rex/get.cpp:190 #, c-format msgid "http_get: start=%llu, end=%llu, burl=%s, hpath=%s" msgstr "" #: src/services/a-rex/get.cpp:357 #, fuzzy msgid "Failed to extract credential information" msgstr "Nem sikerült betölteni a konfigurációt" #: src/services/a-rex/get.cpp:360 #, c-format msgid "Checking cache permissions: DN: %s" msgstr "" #: src/services/a-rex/get.cpp:361 #, c-format msgid "Checking cache permissions: VO: %s" msgstr "" #: src/services/a-rex/get.cpp:363 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "" #: src/services/a-rex/get.cpp:373 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "" #: src/services/a-rex/get.cpp:376 #, c-format msgid "DN %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:379 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "" #: src/services/a-rex/get.cpp:382 #, c-format msgid "VO %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:388 src/services/a-rex/get.cpp:407 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "" #: src/services/a-rex/get.cpp:396 src/services/a-rex/get.cpp:415 #, c-format msgid "VOMS attr %s matches %s" msgstr "" #: src/services/a-rex/get.cpp:397 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "" #: src/services/a-rex/get.cpp:400 src/services/a-rex/get.cpp:419 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:416 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "" #: src/services/a-rex/get.cpp:422 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "" #: src/services/a-rex/get.cpp:428 #, c-format msgid "No match found in cache access rules for %s" msgstr "" #: src/services/a-rex/get.cpp:438 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "" #: src/services/a-rex/get.cpp:441 #, fuzzy, c-format msgid "Get from cache: Invalid URL %s" msgstr "Érvénytelen URL: %s" #: src/services/a-rex/get.cpp:458 msgid "Get from cache: Error in cache configuration" msgstr "" #: src/services/a-rex/get.cpp:467 msgid "Get from cache: File not in cache" msgstr "" #: src/services/a-rex/get.cpp:470 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "" #: src/services/a-rex/get.cpp:480 msgid "Get from cache: Cached file is locked" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:29 #, c-format msgid "" "GetActivityDocuments: request = \n" "%s" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:40 msgid "GetActivityDocuments: non-AREX job requested" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:49 #: src/services/a-rex/get_activity_documents.cpp:60 #, c-format msgid "GetActivityDocuments: job %s - %s" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:72 #, c-format msgid "" "GetActivityDocuments: response = \n" "%s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:35 #, c-format msgid "" "GetActivityStatuses: request = \n" "%s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:50 #, c-format msgid "GetActivityStatuses: unknown verbosity level requested: %s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:62 #, c-format msgid "GetActivityStatuses: job %s - can't understand EPR" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:71 #, c-format msgid "GetActivityStatuses: job %s - %s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:105 #, c-format msgid "" "GetActivityStatuses: response = \n" "%s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:306 #: src/services/a-rex/get_activity_statuses.cpp:400 #, c-format msgid "EMIES:GetActivityStatus: job %s - %s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:526 #, c-format msgid "EMIES:GetActivityInfo: job %s - failed to retrieve GLUE2 information" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:578 #: src/services/a-rex/get_activity_statuses.cpp:585 #, c-format msgid "EMIES:NotifyService: job %s - %s" msgstr "" #: src/services/a-rex/get_factory_attributes_document.cpp:37 #, c-format msgid "" "GetFactoryAttributesDocument: request = \n" "%s" msgstr "" #: src/services/a-rex/get_factory_attributes_document.cpp:62 #, c-format msgid "" "GetFactoryAttributesDocument: response = \n" "%s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, c-format msgid "Running command %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:114 msgid "Failed to start cache clean script" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:115 msgid "Cache cleaning script failed" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:177 msgid "Starting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:178 #, c-format msgid "Used configuration file %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:187 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:190 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:199 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:211 msgid "Failed to start new thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:268 msgid "Failed to start new thread: cache won't be cleaned" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:273 msgid "Picking up left jobs" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:277 msgid "Starting data staging threads" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:281 msgid "Failed to start data staging threads, exiting Grid Manager thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:290 msgid "Starting jobs' monitoring" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:303 #, fuzzy, c-format msgid "Failed to open heartbeat file %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/GridManager.cpp:335 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:340 msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:346 msgid "Waking up" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:349 msgid "Stopping jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:352 msgid "Exiting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:365 msgid "Shutting down job processing" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:370 msgid "Shutting down data staging threads" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:24 #, c-format msgid "" "Usage: %s -I -U -P -L [-c " "] [-p ] [-d ]" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:74 msgid "Job ID argument is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Path to user's proxy file should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "User name should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "Path to .local job status file is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:100 msgid "Generating ceID prefix from hostname automatically" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:103 msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:112 #, c-format msgid "ceID prefix is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:120 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:129 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:139 #, c-format msgid "globalid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:142 #, c-format msgid "headnode is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "interface is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:149 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:152 #, c-format msgid "localid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 #, c-format msgid "queue name is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "owner subject is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:160 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:165 #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:119 msgid "Can not read information from the local job status file" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:181 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly :-) Please submit the bug to bugzilla." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:191 msgid "Parsing VOMS AC to get FQANs information" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:210 msgid "No FQAN found. Using NULL as userFQAN value" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:224 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:229 #, c-format msgid "Writing the info the the BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 #, fuzzy, c-format msgid "Cannot open BLAH log file '%s'" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:24 #, c-format msgid "" "Usage: %s [-N] -P -L [-c ] [-d " "]" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:90 msgid "User proxy file is required but is not specified" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:95 msgid "Local job status file is required" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:114 #, c-format msgid "Making the decision for the queue %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:127 #, fuzzy, c-format msgid "Can not parse the configuration file %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:144 #, c-format msgid "Can not find queue '%s' in the configuration file" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:168 msgid "No access policy to check, returning success" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:182 #, fuzzy, c-format msgid "CA certificates directory %s does not exist" msgstr "Az XML konfigurációs fájl: %s nem létezik" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:202 #, fuzzy msgid "User proxy certificate is not valid" msgstr "Proxy készítés sikertelen: A publikus kulcs érvénytelen." #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:207 #, c-format msgid "Getting VOMS AC for: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:221 #, c-format msgid "Checking a match for '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:228 #, c-format msgid "FQAN '%s' IS a match to '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:232 #, c-format msgid "Queue '%s' usage is prohibited to FQAN '%s' by the site access policy" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:236 #, c-format msgid "FQAN '%s' IS NOT a match to '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:242 #, c-format msgid "" "Queue '%s' usage with provided FQANs is prohibited by the site access policy" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:35 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:39 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:43 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:57 #, c-format msgid "Wrong option in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:71 #, fuzzy, c-format msgid "Can't read configuration file at %s" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:80 #, fuzzy, c-format msgid "Can't interpret configuration file %s as XML" msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:118 #, fuzzy, c-format msgid "Can't recognize type of configuration file at %s" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:121 msgid "Could not determine configuration type or configuration is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:169 msgid "No queue name given in queue block name" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:176 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:499 #, fuzzy msgid "forcedefaultvoms parameter is empty" msgstr "Túl sok paraméter" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:185 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:198 #, fuzzy msgid "authorizedvo parameter is empty" msgstr "Túl sok paraméter" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:276 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:604 #, c-format msgid "Wrong number in jobreport_period: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:280 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 #, c-format msgid "Wrong number in jobreport_period: %d, minimal value: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:299 msgid "Missing file name in jobreport_logfile" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:316 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:323 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:330 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:337 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:344 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:351 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:360 #, fuzzy msgid "mail parameter is empty" msgstr "Túl sok paraméter" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:370 msgid "Wrong number in defaultttl command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:376 msgid "Wrong number in maxrerun command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:382 msgid "defaultlrms is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:398 msgid "State name for plugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:402 msgid "Options for plugin are missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:405 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:726 #, c-format msgid "Failed to register plugin for state %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:413 msgid "Wrong number for timeout in plugin command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:429 msgid "Wrong option in fixdirectories" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:441 msgid "Wrong option in delegationdb" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:456 msgid "Session root directory is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:459 msgid "Junk in sessiondir command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:471 msgid "Missing directory in control command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:476 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:481 msgid "User for helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:484 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:875 msgid "Only user '.' for helper program is supported" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:487 msgid "Helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:632 msgid "Value for maxJobsTracked is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:636 msgid "Value for maxJobsRun is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:640 msgid "Value for maxJobsTotal is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:644 msgid "Value for maxJobsPerDN is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:648 msgid "Value for wakeupPeriod is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 msgid "Value for maxScripts is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:664 msgid "serviceMail is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:678 msgid "Type in LRMS is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:691 msgid "LRMS is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:704 msgid "State name for authPlugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:709 msgid "Command for authPlugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:723 #, c-format msgid "Registering plugin for state %s; options: %s; command: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:739 msgid "Command for localCred is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:745 msgid "Timeout for localCred is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:750 msgid "Timeout for localCred is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:778 msgid "Control element must be present" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:783 msgid "controlDir is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:790 msgid "sessionRootDir is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:800 msgid "Attribute drain for sessionRootDir is incorrect boolean" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:809 msgid "The fixDirectories element is incorrect value" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:816 msgid "The delegationDB element is incorrect value" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:821 msgid "The maxReruns element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:825 msgid "The noRootPower element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:829 msgid "The defaultTTL element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:833 msgid "The defaultTTR element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:866 msgid "Command in helperUtility is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:871 msgid "Username in helperUtility is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:106 #, c-format msgid "\tSession root dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:107 #, c-format msgid "\tControl dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:108 #, c-format msgid "\tdefault LRMS : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:109 #, c-format msgid "\tdefault queue : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:110 #, c-format msgid "\tdefault ttl : %u" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:115 msgid "No valid caches found in configuration, caching is disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tCache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:122 #, c-format msgid "\tCache link dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 #, c-format msgid "\tRemote cache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:128 #, c-format msgid "\tRemote cache link: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:130 msgid "\tCache cleaning enabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 msgid "\tCache cleaning disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:308 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:412 #, c-format msgid "Starting helper process: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:422 #, c-format msgid "Helper process start failed: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:429 #, c-format msgid "Stopping helper process %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:14 #, c-format msgid "wrong boolean in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:20 #, c-format msgid "wrong number in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:49 msgid "Can't read configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:58 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:32 msgid "Can't interpret configuration file as XML" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:62 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:69 msgid "Configuration error" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:75 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:103 msgid "Can't recognize type of configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxdelivery" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxemergency" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:107 msgid "Bad number in maxprocessor" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:113 msgid "Bad number in maxprepared" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:119 msgid "Bad number in maxtransfertries" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:128 msgid "Bad number in speedcontrol" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:139 #, c-format msgid "Bad number in definedshare %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:148 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:159 msgid "Bad number in remotesizelimit" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:189 msgid "Bad value for debug" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:203 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:315 msgid "Bad URL in acix_endpoint" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:258 #, c-format msgid "Bad URL in deliveryService: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:272 msgid "Bad value for logLevel" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:25 msgid "Can't open configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:47 msgid "Value for 'link' element in mapURL is incorrect" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:53 msgid "Missing 'from' element in mapURL" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:57 msgid "Missing 'to' element in mapURL" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:84 msgid "Not enough parameters in copyurl" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:93 msgid "Not enough parameters in linkurl" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:179 #, c-format msgid "Wrong directory in %s" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:100 #, c-format msgid "Failed setting file owner: %s" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:33 msgid "gm-delegations-converter changes format of delegation database." msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:106 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:107 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:43 #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 msgid "read information from specified control directory" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:44 #: src/services/a-rex/grid-manager/gm_jobs.cpp:112 msgid "dir" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:48 msgid "convert from specified input database format [bdb|sqlite]" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:49 #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:54 msgid "database format" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:53 msgid "convert into specified output database format [bdb|sqlite]" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:96 msgid "gm-jobs displays information on current jobs in the system." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:101 msgid "display more information on each job" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "print summary of jobs in each transfer share" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:121 msgid "do not print list of jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:126 msgid "do not print number of jobs in each state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:131 msgid "print state of the service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:136 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:137 #: src/services/a-rex/grid-manager/gm_jobs.cpp:147 #: src/services/a-rex/grid-manager/gm_jobs.cpp:157 #, fuzzy msgid "dn" msgstr "n" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 msgid "request to cancel job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:142 #: src/services/a-rex/grid-manager/gm_jobs.cpp:152 #: src/services/a-rex/grid-manager/gm_jobs.cpp:162 #: src/services/a-rex/grid-manager/gm_jobs.cpp:172 msgid "id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 msgid "request to clean job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 msgid "show only jobs with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 msgid "print list of available delegation IDs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:171 msgid "print delegation token of specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 msgid "print main delegation token of specified Job ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:177 msgid "job id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:182 #, fuzzy msgid "file name" msgstr "fájlnév" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control file. If no " "file is given it uses the control directory found in the configuration file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, c-format msgid "Failed to acquire source: %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, c-format msgid "Failed to resolve %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, c-format msgid "Failed to check %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:88 msgid "Wrong number of arguments given" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:105 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1009 #, fuzzy msgid "Failed to run plugin" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:109 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1015 #, fuzzy, c-format msgid "Plugin failed: %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:158 msgid "empty argument to remotegmdirs" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:165 msgid "bad arguments to remotegmdirs" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:177 msgid "Wrong number in maxjobdesc" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:185 #: src/services/gridftpd/fileplugin/fileplugin.cpp:186 #, c-format msgid "Unsupported configuration command: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:199 #, c-format msgid "Mapped user:group (%s:%s) not found" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:202 msgid "Failed processing grid-manager configuration" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:205 msgid "" "Cannot use multiple session directories and remotegmdirs at the same time" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:228 msgid "This user is denied to submit new jobs." msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:239 msgid "No control or remote control directories defined in configuration" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:290 #, c-format msgid "Job submission user: %s (%i:%i)" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:294 msgid "Job plugin was not initialised" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:312 msgid "No delegated credentials were passed" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:385 #, c-format msgid "Cancelling job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:436 #, c-format msgid "Cleaning job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:472 msgid "Request to open file with storing in progress" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:505 #: src/services/gridftpd/fileplugin/fileplugin.cpp:343 #, c-format msgid "Retrieving file %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:556 #, c-format msgid "Accepting submission of new job or modification request: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:577 #: src/services/gridftpd/fileplugin/fileplugin.cpp:383 #: src/services/gridftpd/fileplugin/fileplugin.cpp:420 #, c-format msgid "Storing file %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:599 #, c-format msgid "Unknown open mode %i" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:716 #, c-format msgid "action(%s) != request" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:767 #, fuzzy msgid "Failed writing job description" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:954 msgid "Failed writing local description" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:964 msgid "Failed writing ACL" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:980 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:987 #: src/services/a-rex/job.cpp:587 #, c-format msgid "Failed to run external plugin: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:991 #: src/services/a-rex/job.cpp:591 #, c-format msgid "Plugin response: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:993 msgid "Failed to run external plugin" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1025 #, c-format msgid "Failed to create session directory %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1035 msgid "Failed writing status" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1049 #, fuzzy, c-format msgid "Failed to lock delegated credentials: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1300 #, c-format msgid "Renewing proxy for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1312 #, c-format msgid "New proxy expires at %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1315 msgid "Failed to write 'local' information" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1321 msgid "Failed to renew proxy" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1324 msgid "New proxy expiry time is not later than old proxy, not renewing proxy" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1367 #, c-format msgid "Checking file %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1415 msgid "ID contains forbidden characters" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1462 #: src/services/a-rex/job.cpp:781 #, c-format msgid "Failed to create file in %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1487 msgid "Out of tries while allocating new job ID" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1568 #, c-format msgid "Failed to read job's local description for job %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1650 msgid "No non-draining control or session directories available" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1666 #, c-format msgid "Using control directory %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1667 #, c-format msgid "Using session directory %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:26 #, c-format msgid "Failed to read job's ACL for job %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:69 #, c-format msgid "Failed to parse user policy for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:74 #, c-format msgid "Failed to load policy evaluator for policy of job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:126 #, c-format msgid "Unknown ACL policy %s for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:121 msgid "Exiting Generator thread" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:211 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:225 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:238 msgid "DTRGenerator is not running!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:214 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:314 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:324 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:333 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:344 #, fuzzy, c-format msgid "%s: Invalid DTR" msgstr "Érvénytelen URL: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:354 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:366 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:982 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:281 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:304 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:634 #, c-format msgid "%s: Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:377 #, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:383 #, c-format msgid "%s: Cancelling other DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:394 #, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:404 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:645 #, c-format msgid "%s: Failed to read list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:418 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:555 #, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:420 #, c-format msgid "%s: Going through files in list %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:424 #, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:428 #, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:444 #, c-format msgid "%s: Failed to write list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:448 #, c-format msgid "%s: Failed to write list of output status files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:460 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:656 #, c-format msgid "%s: Failed to read list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:479 #, fuzzy, c-format msgid "%s: Failed to write list of input files" msgstr "Nem sikerült listázni a fájlokat" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:491 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #: src/services/cache_service/CacheServiceGenerator.cpp:108 #, fuzzy, c-format msgid "No active job id %s" msgstr "Feladat migrálásra került ezzel az azonosítóval: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:543 #, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:567 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:578 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:692 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:817 #, c-format msgid "%s: Failed to clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:576 #, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:588 #, c-format msgid "%s: All %s %s successfully" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:592 #, c-format msgid "%s: Some %s failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:607 #, c-format msgid "%s: Received data staging request to %s files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 #, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:714 #, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:716 #, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:744 #, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:763 #, c-format msgid "%s: Adding new output file %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:785 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:798 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:828 #, c-format msgid "%s: Received job in a bad state: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:901 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:988 #, c-format msgid "%s: Failed writing local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1005 #, c-format msgid "%s: Cancelling active DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1028 #, fuzzy, c-format msgid "%s: Can't read list of input files" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1043 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1048 #, fuzzy, c-format msgid "%s: User has uploaded file %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1055 #, fuzzy, c-format msgid "%s: Failed writing changed input file." msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1059 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1076 #, c-format msgid "%s: Uploadable files timed out" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1132 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1158 #, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1139 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1148 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1170 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1186 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1192 #, c-format msgid "%s: Failed to open file %s for reading" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1200 #, c-format msgid "%s: Error accessing file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1212 #, c-format msgid "%s: Error reading file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1227 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1233 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1245 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1252 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:131 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:225 #, c-format msgid "Bad name for stdout: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:233 #, c-format msgid "Bad name for stderr: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:291 #, c-format msgid "Bad name for runtime environment: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:336 msgid "Job description file could not be read." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:387 msgid "Bad name for executable: " msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:401 #, c-format msgid "Bad name for executable: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:107 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:169 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:171 #, c-format msgid "%s: %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:177 #, c-format msgid "%s: Destroying" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:182 #, c-format msgid "%s: Can't read state - no comments, just cleaning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:194 #, c-format msgid "%s: Cleaning control and session directories" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:200 #, c-format msgid "%s: This job may be still running - canceling" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:203 #, c-format msgid "%s: Cancellation failed (probably job finished) - cleaning anyway" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:210 #, c-format msgid "%s: Cancellation probably succeeded - cleaning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:273 #, c-format msgid "%s: Failed writing list of output files: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:315 #, c-format msgid "%s: Failed creating grami file" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:319 #, c-format msgid "%s: Failed setting executable permissions" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:334 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:336 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:347 #, c-format msgid "%s: Failed running submission process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, c-format msgid "%s: Failed running cancellation process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:355 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:374 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:380 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:388 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:394 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:409 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:417 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:420 #, c-format msgid "%s: Failed to cancel running job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:434 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:447 #, c-format msgid "%s: Failed writing local information: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:457 #, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:463 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:531 #, c-format msgid "%s: State: %s: still in data staging" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:560 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:581 #, fuzzy, c-format msgid "%s: Reprocessing job description failed" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:588 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:592 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:676 #, c-format msgid "%s: Reading status of new job failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:691 #, c-format msgid "%s: Processing job description failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:708 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:718 #, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:732 #, c-format msgid "%s: State: ACCEPTED" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:739 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:751 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:757 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:774 #, c-format msgid "%s: State: PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:778 #, c-format msgid "%s: Failed obtaining local job information." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:829 #, c-format msgid "%s: State: SUBMIT" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:844 #, c-format msgid "%s: State: CANCELING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:860 #, c-format msgid "%s: State: INLRMS" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:868 #, c-format msgid "%s: Job finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:872 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:893 #, c-format msgid "%s: State: FINISHING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:922 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:961 #, c-format msgid "%s: Can't rerun on request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:963 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:973 #, c-format msgid "%s: Job is too old - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1012 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1041 #, c-format msgid "%s: Canceling job because of user request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1107 #, c-format msgid "%s: Job failure detected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1163 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1171 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1177 #, c-format msgid "%s: Plugin execution failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1202 #, c-format msgid "%s: Delete request due to internal problems" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1284 #, fuzzy, c-format msgid "Failed to move file %s to %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1292 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1375 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1464 #, c-format msgid "Failed reading control directory: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1334 #, c-format msgid "Failed reading control directory: %s: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:153 #, c-format msgid "Invalid checksum in %s for %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:162 #, c-format msgid "Invalid file size in %s for %s " msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:182 #, c-format msgid "Invalid file: %s is too big." msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:196 #, fuzzy, c-format msgid "Error accessing file %s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:205 #, fuzzy, c-format msgid "Error reading file %s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:216 #, c-format msgid "File %s has wrong CRC." msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:240 #, fuzzy, c-format msgid "Failed downloading file %s - %s" msgstr "Nem sikerült listázni a fájlokat" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:246 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:132 #, fuzzy msgid "Retrying" msgstr "szöveg" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:249 #, fuzzy, c-format msgid "Downloaded file %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:330 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:243 #, c-format msgid "Wrong number of threads: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:336 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:249 #, fuzzy, c-format msgid "Wrong number of files: %s" msgstr "publikus kulcs elérési útvonala" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:342 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:358 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:365 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:372 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:379 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:255 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:271 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:278 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:285 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:292 #, fuzzy, c-format msgid "Bad number: %s" msgstr "osztály neve: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:346 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:352 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:259 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:265 msgid "Specified user can't be handled" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:384 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:297 #, fuzzy, c-format msgid "Unsupported option: %c" msgstr "Nem támogatott url: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:388 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:301 #, c-format msgid "Missing parameter for option %c" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:392 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:305 #, fuzzy msgid "Undefined processing error" msgstr "Felhasználó oldali hiba" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:399 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:312 #, fuzzy msgid "Missing job id" msgstr "Hiányzik az URL" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:401 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:314 #, fuzzy msgid "Missing control directory" msgstr "könyvtár" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:403 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:316 #, fuzzy msgid "Missing session directory" msgstr "könyvtár" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:446 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:359 #, c-format msgid "Minimal speed: %llu B/s during %i s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:448 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:361 #, c-format msgid "Minimal average speed: %llu B/s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:450 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:363 #, c-format msgid "Maximal inactivity time: %i s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:455 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:368 msgid "Won't use more than 10 threads" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:462 #, fuzzy msgid "Downloader started" msgstr "A feltöltő elindult" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:488 #, fuzzy msgid "Can't read list of input files" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:495 #, fuzzy, c-format msgid "Error: duplicate file in list of input files: %s" msgstr "Nem sikerült listázni a fájlokat" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:518 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:402 #, fuzzy msgid "Can't read list of output files" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:523 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:449 #, fuzzy msgid "Can't remove junk files" msgstr "proxy fájl elérési útvonala" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:531 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:463 #, fuzzy msgid "Can't read job local description" msgstr "Érvénytelen feladat leírás:" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:553 #, c-format msgid "Local source for download: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:567 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:571 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:512 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:518 #, c-format msgid "Can't accept URL: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:586 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:535 #, fuzzy, c-format msgid "Failed to initiate file transfer: %s - %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:603 #, fuzzy, c-format msgid "Downloaded %s" msgstr "Feltöltve %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:628 #, fuzzy, c-format msgid "Failed to download (but may be retried) %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:633 #, fuzzy, c-format msgid "Failed to download %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:642 msgid "Some downloads failed" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:646 msgid "Some downloads failed, but may be retried" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:650 #, fuzzy msgid "Failed writing changed input file" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:662 #, fuzzy, c-format msgid "Checking user uploadable file: %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:668 #, fuzzy, c-format msgid "User has uploaded file %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:673 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:706 #, fuzzy msgid "Failed writing changed input file." msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:677 #, fuzzy, c-format msgid "Critical error for uploadable file %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:694 #, c-format msgid "No changes in uploadable files for %u seconds" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:695 #, fuzzy msgid "Uploadable files timed out" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:761 #, fuzzy, c-format msgid "Leaving downloader (%i)" msgstr "PythonBroker betöltése" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:126 #, fuzzy, c-format msgid "Failed uploading file %s - %s" msgstr "Nem sikerült listázni a fájlokat" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:135 #, c-format msgid "Uploaded file %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:373 msgid "Uploader started" msgstr "A feltöltő elindult" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:409 #, c-format msgid "Reading output files from user generated list in %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:411 #, fuzzy, c-format msgid "Error reading user generated output file list in %s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:424 #, fuzzy, c-format msgid "Two identical output destinations: %s" msgstr "Nem sikerült feloldani a célállomást" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:433 #, c-format msgid "Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:496 #, fuzzy, c-format msgid "Local destination for uploader %s" msgstr "Nem sikerült feloldani a célállomást" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:556 #, c-format msgid "Uploaded %s" msgstr "Feltöltve %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:580 #, fuzzy msgid "Failed writing output status file" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:587 #, fuzzy, c-format msgid "Failed to upload (but may be retried) %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:591 #, fuzzy, c-format msgid "Failed to upload %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:600 msgid "Some uploads failed" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:616 #, fuzzy, c-format msgid "Writing back dynamic output file %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:618 #, c-format msgid "Failed to rewrite output file list %s. Job resuming may not work" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:630 msgid "Some uploads failed, but (some) may be retried" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:637 #, fuzzy msgid "Failed writing changed output file" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:649 #, c-format msgid "Leaving uploader (%i)" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:118 #, fuzzy msgid ": Logger name is not specified" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 msgid ": Failure creating slot for reporter child process" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 msgid ": Failure starting reporter child process" msgstr "" #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:130 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:55 #, c-format msgid "%s: Failure creating slot for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:79 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:61 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:123 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:72 #, c-format msgid "%s: Failure starting child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:140 #, c-format msgid "%s: Failed to run plugin" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:143 #, c-format msgid "%s: Plugin failed" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:77 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "" #: src/services/a-rex/information_collector.cpp:45 #, c-format msgid "Resource information provider: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:51 #, fuzzy msgid "Resource information provider failed" msgstr "Státusz lekérdezés sikertelen" #: src/services/a-rex/information_collector.cpp:55 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:57 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:61 msgid "No new informational document assigned" msgstr "Nem jött létre új információs dokumentum" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "Obtained XML: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:77 msgid "Informational document is empty" msgstr "" #: src/services/a-rex/information_collector.cpp:88 msgid "Passing service's information from collector to registrator" msgstr "" #: src/services/a-rex/information_collector.cpp:144 #, c-format msgid "" "Registered static information: \n" " doc: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:147 #, c-format msgid "" "Information Registered without static attributes: \n" " doc: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:324 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:327 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:333 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:342 msgid "OptimizedInformationContainer failed to parse XML" msgstr "" #: src/services/a-rex/information_collector.cpp:353 #, fuzzy msgid "OptimizedInformationContainer failed to rename temprary file" msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenőrizni a publikus kulcsot" #: src/services/a-rex/job.cpp:53 #, c-format msgid "Cannot handle local user %s" msgstr "" #: src/services/a-rex/job.cpp:101 #, c-format msgid "%s: Failed to parse user policy" msgstr "" #: src/services/a-rex/job.cpp:106 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "" #: src/services/a-rex/job.cpp:211 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "" #: src/services/a-rex/job.cpp:473 src/services/a-rex/job.cpp:497 #, c-format msgid "Credential expires at %s" msgstr "" #: src/services/a-rex/job.cpp:475 src/services/a-rex/job.cpp:499 #, c-format msgid "Credential handling exception: %s" msgstr "" #: src/services/a-rex/job.cpp:789 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "" #: src/services/a-rex/job.cpp:1006 msgid "No non-draining session dirs available" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:89 #: src/services/a-rex/jura/CARDestination.cpp:49 #: src/services/a-rex/jura/LutsDestination.cpp:71 msgid "ServiceURL missing" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:97 #: src/services/a-rex/jura/CARDestination.cpp:56 #: src/services/a-rex/jura/LutsDestination.cpp:89 #, c-format msgid "Protocol is %s, should be https" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:133 #: src/services/a-rex/jura/ApelDestination.cpp:158 #: src/services/a-rex/jura/CARDestination.cpp:95 #: src/services/a-rex/jura/LutsDestination.cpp:120 #: src/services/a-rex/jura/LutsDestination.cpp:144 #, c-format msgid "Ignoring incomplete log file \"%s\"" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:182 #: src/services/a-rex/jura/CARDestination.cpp:119 #: src/services/a-rex/jura/LutsDestination.cpp:166 #, c-format msgid "Logging UR set of %d URs." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:185 #: src/services/a-rex/jura/CARDestination.cpp:122 #: src/services/a-rex/jura/Destination.cpp:61 #: src/services/a-rex/jura/LutsDestination.cpp:169 #, c-format msgid "UR set dump: %s" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:271 #: src/services/a-rex/jura/Destination.cpp:96 #, c-format msgid "Backup file (%s) created." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:316 #, c-format msgid "APEL message file (%s) created." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:370 #: src/services/a-rex/jura/CARAggregation.cpp:208 #, c-format msgid "system retval: %d" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:73 #, c-format msgid "Aggregation record (%s) not exist, initialize it..." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:77 #, c-format msgid "Aggregation record (%s) initialization successful." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:82 #, c-format msgid "Some error happens during the Aggregation record (%s) initialization." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:88 #, c-format msgid "Aggregation record (%s) read from file successful." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:100 #, c-format msgid "Aggregation record (%s) stored successful." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:103 #, c-format msgid "Some error happens during the Aggregation record (%s) storing." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:156 #, c-format msgid "APEL aggregation message file (%s) created." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:228 #, fuzzy, c-format msgid "year: %s" msgstr "Név: %s" #: src/services/a-rex/jura/CARAggregation.cpp:229 #, fuzzy, c-format msgid "moth: %s" msgstr "Proxy elérési útvonal: %s" #: src/services/a-rex/jura/CARAggregation.cpp:230 #, fuzzy, c-format msgid "queue: %s" msgstr "Kérés: %s" #: src/services/a-rex/jura/CARAggregation.cpp:238 #: src/services/a-rex/jura/CARAggregation.cpp:404 #, fuzzy, c-format msgid "query: %s" msgstr "Kérés: %s" #: src/services/a-rex/jura/CARAggregation.cpp:241 #, c-format msgid "list size: %d" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:359 #, c-format msgid "XML: %s" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:361 msgid "UPDATE Aggregation Record called." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:415 #: src/services/a-rex/jura/CARAggregation.cpp:465 msgid "Does not sending empty aggregation/synch message." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:548 #, c-format msgid "synch message: %s" msgstr "" #: src/services/a-rex/jura/Destination.cpp:123 #, c-format msgid "Sent jobIDs: (nr. of job(s) %d)" msgstr "" #: src/services/a-rex/jura/Destinations.cpp:27 msgid "Unable to create adapter for the specific reporting destination type" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:92 #, c-format msgid "Insert filter element: <%s,%s>" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:105 #, c-format msgid "Not set filter for this URL (%s)." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:111 #, c-format msgid "Current job's VO name: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:114 #, c-format msgid "VO filter for host: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:204 #: src/services/a-rex/jura/JobLogFile.cpp:698 #, c-format msgid "Read archive file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:209 #: src/services/a-rex/jura/JobLogFile.cpp:703 #, c-format msgid "" "Could not read archive file %s for job log file %s (%s), generating new " "Usage Record" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:298 #: src/services/a-rex/jura/JobLogFile.cpp:827 #, c-format msgid "" "Missing required Usage Record element \"RecordIdentity\", in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:345 #, c-format msgid "VO (%s) not set for this (%s) SGAS server by VO filter." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:378 #, c-format msgid "[VO filter] Job log will be not send. %s." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:454 #: src/services/a-rex/jura/JobLogFile.cpp:970 #, c-format msgid "Missing required element \"Status\" in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:663 #: src/services/a-rex/jura/JobLogFile.cpp:1280 #, c-format msgid "Failed to create archive directory %s: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:670 #: src/services/a-rex/jura/JobLogFile.cpp:1287 #, c-format msgid "Archiving Usage Record to file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:676 #: src/services/a-rex/jura/JobLogFile.cpp:1293 #, c-format msgid "Failed to write file %s: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1062 #, c-format msgid "Missing required element \"CpuDuration\" in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1082 #, c-format msgid "Set non standard bechmark type: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1095 #, c-format msgid "Ignored incoming benchmark value: %s, Use float value!" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1324 #, c-format msgid "Failed to delete file %s:%s" msgstr "" #: src/services/a-rex/jura/LutsDestination.cpp:223 #, c-format msgid "UsageRecords registration response: %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:53 #, c-format msgid "Initialised, archived job log dir: %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:73 #, c-format msgid "Incoming time range: %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:92 #, c-format msgid "Requested time range: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d " msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:98 #: src/services/a-rex/jura/UsageReporter.cpp:45 msgid "Interactive mode." msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:127 #: src/services/a-rex/jura/UsageReporter.cpp:68 #, c-format msgid "Could not open log directory \"%s\": %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:167 #: src/services/a-rex/jura/UsageReporter.cpp:193 #, c-format msgid "Error reading log directory \"%s\": %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:179 #: src/services/a-rex/jura/UsageReporter.cpp:205 #, c-format msgid "Finished, job log dir: %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:39 #, c-format msgid "Initialised, job log dir: %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:41 #, c-format msgid "Expiration time: %d seconds" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:80 #, c-format msgid "Could not open output directory \"%s\": %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:125 #, c-format msgid "Removing outdated job log file %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:37 #, c-format msgid "" "MigrateActivity: request = \n" "%s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:42 msgid "MigrateActivitys: no ActivityIdentifier found" msgstr "" #: src/services/a-rex/migrate_activity.cpp:51 msgid "MigrateActivity: EPR contains no JobID" msgstr "" #: src/services/a-rex/migrate_activity.cpp:69 msgid "MigrateActivity: Failed to accept delegation" msgstr "" #: src/services/a-rex/migrate_activity.cpp:130 msgid "MigrateActivity: no job description found" msgstr "" #: src/services/a-rex/migrate_activity.cpp:153 #, c-format msgid "Migration XML sent to AREXJob: %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:180 #, c-format msgid "MigrateActivity: Failed to migrate new job: %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:182 msgid "MigrateActivity: Failed to migrate new job" msgstr "" #: src/services/a-rex/migrate_activity.cpp:198 msgid "MigrateActivity finished successfully" msgstr "" #: src/services/a-rex/migrate_activity.cpp:202 #, c-format msgid "" "MigrateActivity: response = \n" "%s" msgstr "" #: src/services/a-rex/put.cpp:37 #, c-format msgid "Put: there is no job: %s - %s" msgstr "" #: src/services/a-rex/put.cpp:43 #, c-format msgid "Put: there is no payload for file %s in job: %s" msgstr "" #: src/services/a-rex/put.cpp:56 #, c-format msgid "Put: unrecognized payload for file %s in job: %s" msgstr "" #: src/services/a-rex/put.cpp:76 src/services/a-rex/put.cpp:130 #, c-format msgid "Put: failed to create file %s for job %s - %s" msgstr "" #: src/services/a-rex/put.cpp:85 #, c-format msgid "Put: failed to set position of file %s for job %s to %Lu - %s" msgstr "" #: src/services/a-rex/put.cpp:91 #, c-format msgid "Put: failed to allocate memory for file %s in job %s" msgstr "" #: src/services/a-rex/put.cpp:103 #, c-format msgid "Put: failed to write to file %s for job %s - %s" msgstr "" #: src/services/a-rex/terminate_activities.cpp:29 #, c-format msgid "" "TerminateActivities: request = \n" "%s" msgstr "" #: src/services/a-rex/terminate_activities.cpp:40 msgid "TerminateActivities: non-AREX job requested" msgstr "" #: src/services/a-rex/terminate_activities.cpp:49 #, c-format msgid "TerminateActivities: job %s - %s" msgstr "" #: src/services/a-rex/terminate_activities.cpp:69 #, c-format msgid "" "TerminateActivities: response = \n" "%s" msgstr "" #: src/services/a-rex/test.cpp:34 src/tests/count/test_service.cpp:25 #: src/tests/echo/test.cpp:23 src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "" #: src/services/a-rex/test.cpp:37 src/tests/count/test_service.cpp:28 #: src/tests/echo/test.cpp:26 src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "" #: src/services/a-rex/test.cpp:43 src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:23 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "" #: src/services/a-rex/test.cpp:49 src/tests/count/test_client.cpp:53 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "" #: src/services/a-rex/test.cpp:53 src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "" #: src/services/a-rex/test.cpp:56 src/tests/count/test_client.cpp:60 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "" #: src/services/a-rex/test.cpp:112 src/services/a-rex/test.cpp:191 #: src/services/a-rex/test.cpp:248 src/services/a-rex/test.cpp:296 #: src/services/a-rex/test.cpp:344 src/services/a-rex/test.cpp:392 #: src/tests/count/test_client.cpp:87 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "" #: src/services/a-rex/test.cpp:115 src/services/a-rex/test.cpp:194 #: src/services/a-rex/test.cpp:251 src/services/a-rex/test.cpp:299 #: src/services/a-rex/test.cpp:347 src/services/a-rex/test.cpp:395 #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "" #: src/services/a-rex/test.cpp:117 src/services/a-rex/test.cpp:196 #: src/services/a-rex/test.cpp:253 src/services/a-rex/test.cpp:301 #: src/services/a-rex/test.cpp:349 src/services/a-rex/test.cpp:397 #: src/tests/count/test_client.cpp:93 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "" #: src/services/a-rex/test.cpp:125 src/services/a-rex/test.cpp:204 #: src/services/a-rex/test.cpp:261 src/services/a-rex/test.cpp:309 #: src/services/a-rex/test.cpp:357 src/services/a-rex/test.cpp:405 #: src/tests/count/test_client.cpp:100 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "" #: src/services/a-rex/test.cpp:136 msgid "Response is not expected WS-RP" msgstr "" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" #: src/services/cache_service/CacheService.cpp:52 msgid "No A-REX config file found in cache service configuration" msgstr "" #: src/services/cache_service/CacheService.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "" #: src/services/cache_service/CacheService.cpp:60 #, c-format msgid "Failed to process A-REX configuration in %s" msgstr "" #: src/services/cache_service/CacheService.cpp:65 msgid "No caches defined in configuration" msgstr "" #: src/services/cache_service/CacheService.cpp:139 msgid "Empty filename returned from FileCache" msgstr "" #: src/services/cache_service/CacheService.cpp:151 #, c-format msgid "Problem accessing cache file %s: %s" msgstr "" #: src/services/cache_service/CacheService.cpp:200 #: src/services/cache_service/CacheService.cpp:472 msgid "No job ID supplied" msgstr "" #: src/services/cache_service/CacheService.cpp:209 #, c-format msgid "Bad number in priority element: %s" msgstr "" #: src/services/cache_service/CacheService.cpp:218 msgid "No username supplied" msgstr "" #: src/services/cache_service/CacheService.cpp:225 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" #: src/services/cache_service/CacheService.cpp:239 msgid "No session directory found" msgstr "" #: src/services/cache_service/CacheService.cpp:243 #, c-format msgid "Using session dir %s" msgstr "" #: src/services/cache_service/CacheService.cpp:247 #, fuzzy, c-format msgid "Failed to stat session dir %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/cache_service/CacheService.cpp:252 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "" #: src/services/cache_service/CacheService.cpp:279 #, fuzzy, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/cache_service/CacheService.cpp:297 #, c-format msgid "DN is %s" msgstr "" #: src/services/cache_service/CacheService.cpp:373 #, c-format msgid "Permission checking passed for url %s" msgstr "" #: src/services/cache_service/CacheService.cpp:398 #: src/services/cache_service/CacheServiceGenerator.cpp:138 #, c-format msgid "Failed to move %s to %s: %s" msgstr "" #: src/services/cache_service/CacheService.cpp:437 #, c-format msgid "Starting new DTR for %s" msgstr "" #: src/services/cache_service/CacheService.cpp:439 #, c-format msgid "Failed to start new DTR for %s" msgstr "" #: src/services/cache_service/CacheService.cpp:486 #, c-format msgid "Job %s: all files downloaded successfully" msgstr "" #: src/services/cache_service/CacheService.cpp:495 #, c-format msgid "Job %s: Some downloads failed" msgstr "" #: src/services/cache_service/CacheService.cpp:501 #, c-format msgid "Job %s: files still downloading" msgstr "" #: src/services/cache_service/CacheService.cpp:514 msgid "CacheService: Unauthorized" msgstr "" #: src/services/cache_service/CacheService.cpp:523 msgid "No local user mapping found" msgstr "" #: src/services/cache_service/CacheService.cpp:530 #: src/services/data-staging/DataDeliveryService.cpp:631 #, fuzzy, c-format msgid "Identity is %s" msgstr "Azonosító: %s" #: src/services/cache_service/CacheService.cpp:595 msgid "Only POST is supported in CacheService" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:127 #, c-format msgid "Could not determine session directory from filename %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:168 #, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:210 #, c-format msgid "DTRs still running for job %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:219 #, c-format msgid "All DTRs finished for job %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:226 #, c-format msgid "Job %s not found" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:58 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:62 #, c-format msgid "Archiving DTR %s, state %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:166 msgid "No delegation token in request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "Failed to accept delegation" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:203 #: src/services/data-staging/DataDeliveryService.cpp:210 #, fuzzy msgid "ErrorDescription" msgstr "Feladat leírás: %s" #: src/services/data-staging/DataDeliveryService.cpp:215 #, c-format msgid "All %u process slots used" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:230 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:237 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Storing temp proxy at %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:256 #, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:263 #, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:289 #, fuzzy msgid "Invalid DTR" msgstr "Érvénytelen URL: %s" #: src/services/data-staging/DataDeliveryService.cpp:294 #, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:395 #, c-format msgid "No such DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:406 #, c-format msgid "DTR %s failed: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:417 #, c-format msgid "DTR %s finished successfully" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:427 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:488 #, c-format msgid "No active DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:498 #, c-format msgid "DTR %s was already cancelled" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:507 #, fuzzy, c-format msgid "DTR %s could not be cancelled" msgstr "A feladatot nem sikerült megölni vagy letörölni" #: src/services/data-staging/DataDeliveryService.cpp:551 #, fuzzy, c-format msgid "Failed to get load average: %s" msgstr "privát kulcs elérési útvonala" #: src/services/data-staging/DataDeliveryService.cpp:575 msgid "Invalid configuration - no allowed IP address specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:579 msgid "Invalid configuration - no allowed dirs specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:590 msgid "Failed to start archival thread" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:615 msgid "Shutting down data delivery service" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:624 msgid "Unauthorized" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:710 msgid "Only POST is supported in DataDeliveryService" msgstr "" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:91 msgid "EchoService (python) 'Process' called" msgstr "" #: src/services/examples/echo_python/EchoService.py:95 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:96 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:97 #, python-format msgid "EchoService (python) got: %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:102 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:108 #: src/services/examples/echo_python/EchoService.py:177 #, fuzzy, python-format msgid "outpayload %s" msgstr "Feltöltve %s" #: src/services/examples/echo_python/EchoService.py:137 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:140 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:146 #: src/services/examples/echo_python/EchoService.py:161 #, fuzzy, python-format msgid "new_payload %s" msgstr "Feltöltve %s" #: src/services/examples/echo_python/EchoService.py:155 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "" #: src/services/examples/echo_python/EchoService.py:171 msgid "Start waiting 10 sec..." msgstr "" #: src/services/examples/echo_python/EchoService.py:173 msgid "Waiting ends." msgstr "" #: src/services/gridftpd/auth/auth.cpp:312 #, c-format msgid "Unknown authorization command %s" msgstr "" #: src/services/gridftpd/auth/auth.cpp:330 #, c-format msgid "" "The [vo] section labeled '%s' has no file associated and can't be used for " "matching" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:56 #, c-format msgid "Connecting to %s:%i" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:57 #, c-format msgid "Querying at %s" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:62 #, c-format msgid "Failed to query LDAP server %s" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:69 #, c-format msgid "Failed to get results from LDAP server %s" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:83 #, fuzzy msgid "LDAP authorization is not supported" msgstr "A fileset regisztcáció nem támogatott még" #: src/services/gridftpd/auth/auth_plugin.cpp:73 #: src/services/gridftpd/auth/unixmap.cpp:254 #, c-format msgid "Plugin %s failed to run" msgstr "" #: src/services/gridftpd/auth/auth_plugin.cpp:75 #: src/services/gridftpd/auth/unixmap.cpp:256 #, c-format msgid "Plugin %s printed: %u" msgstr "" #: src/services/gridftpd/auth/auth_plugin.cpp:76 #: src/services/gridftpd/auth/unixmap.cpp:257 #, c-format msgid "Plugin %s error: %u" msgstr "" #: src/services/gridftpd/auth/auth_voms.cpp:28 #, c-format msgid "VOMS proxy processing returns: %i - %s" msgstr "" #: src/services/gridftpd/auth/auth_voms.cpp:120 #, c-format msgid "VOMS trust chains: %s" msgstr "" #: src/services/gridftpd/commands.cpp:46 #, c-format msgid "response: %s" msgstr "" #: src/services/gridftpd/commands.cpp:50 #, c-format msgid "Send response failed: %s" msgstr "" #: src/services/gridftpd/commands.cpp:80 msgid "Response sending error" msgstr "" #: src/services/gridftpd/commands.cpp:93 msgid "Closed connection" msgstr "" #: src/services/gridftpd/commands.cpp:131 #, c-format msgid "Socket conversion failed: %s" msgstr "" #: src/services/gridftpd/commands.cpp:141 #, c-format msgid "Failed to obtain own address: %s" msgstr "" #: src/services/gridftpd/commands.cpp:149 #, c-format msgid "Failed to recognize own address type (IPv4 ir IPv6) - %u" msgstr "" #: src/services/gridftpd/commands.cpp:159 #, c-format msgid "Accepted connection on [%s]:%u" msgstr "" #: src/services/gridftpd/commands.cpp:161 #, c-format msgid "Accepted connection on %u.%u.%u.%u:%u" msgstr "" #: src/services/gridftpd/commands.cpp:196 msgid "Accept failed" msgstr "" #: src/services/gridftpd/commands.cpp:204 #: src/services/gridftpd/listener.cpp:415 #, c-format msgid "Accept failed: %s" msgstr "" #: src/services/gridftpd/commands.cpp:219 #, c-format msgid "Accepted connection from [%s]:%u" msgstr "" #: src/services/gridftpd/commands.cpp:221 #, c-format msgid "Accepted connection from %u.%u.%u.%u:%u" msgstr "" #: src/services/gridftpd/commands.cpp:230 msgid "Authenticate in commands failed" msgstr "" #: src/services/gridftpd/commands.cpp:239 msgid "Authentication failure" msgstr "" #: src/services/gridftpd/commands.cpp:247 #, c-format msgid "User subject: %s" msgstr "" #: src/services/gridftpd/commands.cpp:248 #, c-format msgid "Encrypted: %s" msgstr "" #: src/services/gridftpd/commands.cpp:254 msgid "User has no proper configuration associated" msgstr "" #: src/services/gridftpd/commands.cpp:262 msgid "" "User has empty virtual directory tree.\n" "Either user has no authorised plugins or there are no plugins configured at " "all." msgstr "" #: src/services/gridftpd/commands.cpp:279 msgid "Read commands in authenticate failed" msgstr "" #: src/services/gridftpd/commands.cpp:410 msgid "Control connection (probably) closed" msgstr "" #: src/services/gridftpd/commands.cpp:444 #: src/services/gridftpd/commands.cpp:723 msgid "Command EPRT" msgstr "" #: src/services/gridftpd/commands.cpp:445 #, fuzzy, c-format msgid "Failed to parse remote addres %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/gridftpd/commands.cpp:467 #, c-format msgid "Command USER %s" msgstr "" #: src/services/gridftpd/commands.cpp:474 msgid "Command CDUP" msgstr "" #: src/services/gridftpd/commands.cpp:480 #, c-format msgid "Command CWD %s" msgstr "" #: src/services/gridftpd/commands.cpp:496 #, c-format msgid "Command MKD %s" msgstr "" #: src/services/gridftpd/commands.cpp:516 #, c-format msgid "Command SIZE %s" msgstr "" #: src/services/gridftpd/commands.cpp:531 #, c-format msgid "Command SBUF: %i" msgstr "" #: src/services/gridftpd/commands.cpp:552 #, c-format msgid "Command MLST %s" msgstr "" #: src/services/gridftpd/commands.cpp:575 #, c-format msgid "Command DELE %s" msgstr "" #: src/services/gridftpd/commands.cpp:590 #, c-format msgid "Command RMD %s" msgstr "" #: src/services/gridftpd/commands.cpp:604 #, c-format msgid "Command TYPE %c" msgstr "" #: src/services/gridftpd/commands.cpp:615 #, c-format msgid "Command MODE %c" msgstr "" #: src/services/gridftpd/commands.cpp:627 msgid "Command ABOR" msgstr "" #: src/services/gridftpd/commands.cpp:640 #, c-format msgid "Command REST %s" msgstr "" #: src/services/gridftpd/commands.cpp:653 #, c-format msgid "Command EPSV %s" msgstr "" #: src/services/gridftpd/commands.cpp:655 msgid "Command SPAS" msgstr "" #: src/services/gridftpd/commands.cpp:657 msgid "Command PASV" msgstr "" #: src/services/gridftpd/commands.cpp:678 msgid "local_pasv failed" msgstr "" #: src/services/gridftpd/commands.cpp:702 msgid "local_spas failed" msgstr "" #: src/services/gridftpd/commands.cpp:725 msgid "Command PORT" msgstr "" #: src/services/gridftpd/commands.cpp:728 msgid "active_data is disabled" msgstr "" #: src/services/gridftpd/commands.cpp:737 msgid "local_port failed" msgstr "" #: src/services/gridftpd/commands.cpp:750 #, c-format msgid "Command MLSD %s" msgstr "" #: src/services/gridftpd/commands.cpp:752 #, c-format msgid "Command NLST %s" msgstr "" #: src/services/gridftpd/commands.cpp:754 #, c-format msgid "Command LIST %s" msgstr "" #: src/services/gridftpd/commands.cpp:805 #, c-format msgid "Command ERET %s" msgstr "" #: src/services/gridftpd/commands.cpp:835 #, c-format msgid "Command RETR %s" msgstr "" #: src/services/gridftpd/commands.cpp:864 #, c-format msgid "Command STOR %s" msgstr "" #: src/services/gridftpd/commands.cpp:892 #, c-format msgid "Command ALLO %i" msgstr "" #: src/services/gridftpd/commands.cpp:915 msgid "Command OPTS" msgstr "" #: src/services/gridftpd/commands.cpp:918 msgid "Command OPTS RETR" msgstr "" #: src/services/gridftpd/commands.cpp:928 #, c-format msgid "Option: %s" msgstr "" #: src/services/gridftpd/commands.cpp:972 msgid "Command NOOP" msgstr "" #: src/services/gridftpd/commands.cpp:976 msgid "Command QUIT" msgstr "" #: src/services/gridftpd/commands.cpp:986 msgid "Failed to close, deleting client" msgstr "" #: src/services/gridftpd/commands.cpp:1000 #, c-format msgid "Command DCAU: %i '%s'" msgstr "" #: src/services/gridftpd/commands.cpp:1028 #, c-format msgid "Command PBZS: %s" msgstr "" #: src/services/gridftpd/commands.cpp:1036 #, c-format msgid "Setting pbsz to %lu" msgstr "" #: src/services/gridftpd/commands.cpp:1052 #, c-format msgid "Command PROT: %s" msgstr "" #: src/services/gridftpd/commands.cpp:1077 #, c-format msgid "Command MDTM %s" msgstr "" #: src/services/gridftpd/commands.cpp:1099 #, c-format msgid "Raw command: %s" msgstr "" #: src/services/gridftpd/commands.cpp:1147 msgid "Failed to allocate memory for buffer" msgstr "" #: src/services/gridftpd/commands.cpp:1154 #, c-format msgid "Allocated %u buffers %llu bytes each." msgstr "" #: src/services/gridftpd/commands.cpp:1161 msgid "abort_callback: start" msgstr "" #: src/services/gridftpd/commands.cpp:1164 #, c-format msgid "abort_callback: Globus error: %s" msgstr "" #: src/services/gridftpd/commands.cpp:1178 msgid "make_abort: start" msgstr "" #: src/services/gridftpd/commands.cpp:1190 msgid "Failed to abort data connection - ignoring and recovering" msgstr "" #: src/services/gridftpd/commands.cpp:1198 msgid "make_abort: wait for abort flag to be reset" msgstr "" #: src/services/gridftpd/commands.cpp:1208 msgid "make_abort: leaving" msgstr "" #: src/services/gridftpd/commands.cpp:1223 msgid "check_abort: have Globus error" msgstr "" #: src/services/gridftpd/commands.cpp:1224 msgid "Abort request caused by transfer error" msgstr "" #: src/services/gridftpd/commands.cpp:1227 msgid "check_abort: sending 426" msgstr "" #: src/services/gridftpd/commands.cpp:1248 msgid "Abort request caused by error in transfer function" msgstr "" #: src/services/gridftpd/commands.cpp:1330 msgid "Failed to start timer thread - timeout won't work" msgstr "" #: src/services/gridftpd/commands.cpp:1382 msgid "Killing connection due to timeout" msgstr "" #: src/services/gridftpd/conf/conf_vo.cpp:25 #: src/services/gridftpd/conf/conf_vo.cpp:51 #: src/services/gridftpd/conf/conf_vo.cpp:69 #: src/services/gridftpd/conf/conf_vo.cpp:81 msgid "" "Configuration section [vo] is missing name. Check for presence of name= or " "vo= option." msgstr "" #: src/services/gridftpd/conf/daemon.cpp:60 #: src/services/gridftpd/conf/daemon.cpp:183 #, c-format msgid "No such user: %s" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:72 #: src/services/gridftpd/conf/daemon.cpp:195 #, c-format msgid "No such group: %s" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:85 #: src/services/gridftpd/conf/daemon.cpp:208 #, c-format msgid "Improper debug level '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:127 msgid "Missing option for command daemon" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:132 msgid "Wrong option in daemon" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:142 #, c-format msgid "Improper size of log '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:151 #, c-format msgid "Improper number of logs '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:157 #, c-format msgid "Improper argument for logsize '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:164 msgid "Missing option for command logreopen" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:169 msgid "Wrong option in logreopen" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:253 #, fuzzy, c-format msgid "Failed to open log file %s" msgstr "Nem sikerült listázni a fájlokat" #: src/services/gridftpd/conf/environment.cpp:175 msgid "" "Central configuration file is missing at guessed location:\n" " /etc/arc.conf\n" "Use ARC_CONFIG variable for non-standard location" msgstr "" #: src/services/gridftpd/datalist.cpp:101 msgid "Closing channel (list)" msgstr "" #: src/services/gridftpd/datalist.cpp:157 msgid "Data channel connected (list)" msgstr "" #: src/services/gridftpd/dataread.cpp:24 msgid "data_connect_retrieve_callback" msgstr "" #: src/services/gridftpd/dataread.cpp:30 msgid "Data channel connected (retrieve)" msgstr "" #: src/services/gridftpd/dataread.cpp:37 msgid "data_connect_retrieve_callback: allocate_data_buffer" msgstr "" #: src/services/gridftpd/dataread.cpp:40 msgid "data_connect_retrieve_callback: allocate_data_buffer failed" msgstr "" #: src/services/gridftpd/dataread.cpp:48 #, c-format msgid "data_connect_retrieve_callback: check for buffer %u" msgstr "" #: src/services/gridftpd/dataread.cpp:61 #, c-format msgid "Closing channel (retrieve) due to local read error :%s" msgstr "" #: src/services/gridftpd/dataread.cpp:75 #: src/services/gridftpd/dataread.cpp:172 msgid "Buffer registration failed" msgstr "" #: src/services/gridftpd/dataread.cpp:88 msgid "data_retrieve_callback" msgstr "" #: src/services/gridftpd/dataread.cpp:96 #, c-format msgid "Data channel (retrieve) %i %i %i" msgstr "" #: src/services/gridftpd/dataread.cpp:104 msgid "Closing channel (retrieve)" msgstr "" #: src/services/gridftpd/dataread.cpp:110 #: src/services/gridftpd/datawrite.cpp:128 #, c-format msgid "Time spent waiting for network: %.3f ms" msgstr "" #: src/services/gridftpd/dataread.cpp:111 #: src/services/gridftpd/datawrite.cpp:129 #, c-format msgid "Time spent waiting for disc: %.3f ms" msgstr "" #: src/services/gridftpd/dataread.cpp:122 msgid "data_retrieve_callback: lost buffer" msgstr "" #: src/services/gridftpd/dataread.cpp:158 #, c-format msgid "Closing channel (retrieve) due to local read error: %s" msgstr "" #: src/services/gridftpd/datawrite.cpp:24 msgid "data_connect_store_callback" msgstr "" #: src/services/gridftpd/datawrite.cpp:30 msgid "Data channel connected (store)" msgstr "" #: src/services/gridftpd/datawrite.cpp:57 msgid "Failed to register any buffer" msgstr "" #: src/services/gridftpd/datawrite.cpp:76 #, c-format msgid "Data channel (store) %i %i %i" msgstr "" #: src/services/gridftpd/datawrite.cpp:89 msgid "data_store_callback: lost buffer" msgstr "" #: src/services/gridftpd/datawrite.cpp:105 #, c-format msgid "Closing channel (store) due to error: %s" msgstr "" #: src/services/gridftpd/datawrite.cpp:115 msgid "Closing channel (store)" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:55 msgid "Can't parse access rights in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:61 msgid "Can't parse user:group in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:68 msgid "Can't recognize user in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:77 msgid "Can't recognize group in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:84 #: src/services/gridftpd/fileplugin/fileplugin.cpp:89 msgid "Can't parse or:and in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:116 msgid "Can't parse configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:120 #, c-format msgid "Bad directory name: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:137 msgid "Can't parse create arguments in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:146 msgid "Can't parse mkdir arguments in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:163 #, c-format msgid "Bad subcommand in configuration line: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:175 msgid "Bad mount directory specified" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:177 #, c-format msgid "Mount point %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:215 #: src/services/gridftpd/fileplugin/fileplugin.cpp:273 #, c-format msgid "mkdir failed: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:226 #, c-format msgid "Warning: mount point %s creation failed." msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:329 #, c-format msgid "plugin: open: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:377 #: src/services/gridftpd/fileplugin/fileplugin.cpp:414 msgid "Not enough space to store file" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:428 #, c-format msgid "open: changing owner for %s, %i, %i" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:435 #, c-format msgid "open: owner: %i %i" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:444 #: src/services/gridftpd/fileplugin/fileplugin.cpp:484 #, c-format msgid "Unknown open mode %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:449 msgid "plugin: close" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:490 msgid "plugin: read" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:496 msgid "Error while reading file" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:506 msgid "plugin: write" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:517 msgid "Zero bytes written to file" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:725 #, c-format msgid "plugin: checkdir: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:728 #, c-format msgid "plugin: checkdir: access: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:737 #, c-format msgid "plugin: checkdir: access: allowed: %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:14 #, c-format msgid "No plugin is configured or authorised for requested path %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:19 msgid "FilePlugin: more unload than load" msgstr "" #: src/services/gridftpd/fileroot.cpp:34 #, c-format msgid "Can't load plugin %s for access point %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:39 src/services/gridftpd/fileroot.cpp:43 #, c-format msgid "Plugin %s for access point %s is broken." msgstr "" #: src/services/gridftpd/fileroot.cpp:47 #, c-format msgid "Plugin %s for access point %s acquire failed (should never happen)." msgstr "" #: src/services/gridftpd/fileroot.cpp:54 #, c-format msgid "Destructor with dlclose (%s)" msgstr "" #: src/services/gridftpd/fileroot.cpp:77 #, c-format msgid "FileNode: operator= (%s <- %s) %lu <- %lu" msgstr "" #: src/services/gridftpd/fileroot.cpp:79 msgid "Copying with dlclose" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:32 #: src/services/gridftpd/fileroot_config.cpp:596 msgid "configuration file not found" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:51 msgid "Wrong port number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:60 msgid "Wrong maxconnections number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:69 msgid "Wrong defaultbuffer number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:78 msgid "Wrong maxbuffer number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:110 #: src/services/gridftpd/fileroot_config.cpp:118 #, c-format msgid "Can't resolve host %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:152 #: src/services/gridftpd/fileroot_config.cpp:455 #, c-format msgid "couldn't open file %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:167 #: src/services/gridftpd/fileroot_config.cpp:183 #: src/services/gridftpd/fileroot_config.cpp:469 #, c-format msgid "improper attribute for encryption command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:177 #: src/services/gridftpd/fileroot_config.cpp:479 #: src/services/gridftpd/fileroot_config.cpp:622 msgid "unknown (non-gridmap) user is not allowed" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:207 #: src/services/gridftpd/fileroot_config.cpp:547 #, c-format msgid "Failed processing authorization group %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:216 msgid "couldn't process VO configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:223 #: src/services/gridftpd/fileroot_config.cpp:231 #: src/services/gridftpd/fileroot_config.cpp:239 #: src/services/gridftpd/fileroot_config.cpp:500 #: src/services/gridftpd/fileroot_config.cpp:508 #: src/services/gridftpd/fileroot_config.cpp:516 #, fuzzy, c-format msgid "failed while processing configuration command: %s %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/services/gridftpd/fileroot_config.cpp:281 #, c-format msgid "can't parse configuration line: %s %s %s %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:286 #, c-format msgid "bad directory in plugin command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:298 #: src/services/gridftpd/fileroot_config.cpp:405 #, c-format msgid "Already have directory: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:307 #: src/services/gridftpd/fileroot_config.cpp:408 #, c-format msgid "Registering directory: %s with plugin: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:312 #: src/services/gridftpd/fileroot_config.cpp:421 #, c-format msgid "file node creation failed: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:330 #, c-format msgid "improper attribute for allowactvedata command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:335 #, c-format msgid "unsupported configuration command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:359 msgid "Could not determine hostname from gethostname()" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:375 msgid "unnamed group" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:384 msgid "undefined plugin" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:388 msgid "undefined virtual plugin path" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:393 #, c-format msgid "bad directory for plugin: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:485 #, c-format msgid "improper attribute for allowunknown command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:607 msgid "failed to process client identification" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:641 #, c-format msgid "Registering dummy directory: %s" msgstr "" #: src/services/gridftpd/listener.cpp:57 #: src/services/gridftpd/listener.cpp:466 msgid "Activation failed" msgstr "" #: src/services/gridftpd/listener.cpp:66 #: src/services/gridftpd/listener.cpp:172 msgid "Child exited" msgstr "" #: src/services/gridftpd/listener.cpp:78 msgid "Globus connection error" msgstr "" #: src/services/gridftpd/listener.cpp:80 #: src/services/gridftpd/listener.cpp:424 msgid "New connection" msgstr "" #: src/services/gridftpd/listener.cpp:87 msgid "Server stopped" msgstr "" #: src/services/gridftpd/listener.cpp:157 msgid "Error: failed to set handler for SIGTERM" msgstr "" #: src/services/gridftpd/listener.cpp:161 msgid "Starting controlled process" msgstr "" #: src/services/gridftpd/listener.cpp:164 msgid "fork failed" msgstr "" #: src/services/gridftpd/listener.cpp:169 msgid "wait failed - killing child" msgstr "" #: src/services/gridftpd/listener.cpp:174 msgid "Killed with signal: " msgstr "" #: src/services/gridftpd/listener.cpp:176 msgid "Restarting after segmentation violation." msgstr "" #: src/services/gridftpd/listener.cpp:177 msgid "Waiting 1 minute" msgstr "" #: src/services/gridftpd/listener.cpp:239 msgid "Error: failed to set handler for SIGCHLD" msgstr "" #: src/services/gridftpd/listener.cpp:256 msgid "Missing argument" msgstr "" #: src/services/gridftpd/listener.cpp:257 msgid "Unknown option" msgstr "" #: src/services/gridftpd/listener.cpp:264 msgid "Wrong port number" msgstr "" #: src/services/gridftpd/listener.cpp:274 msgid "Wrong number of connections" msgstr "" #: src/services/gridftpd/listener.cpp:281 msgid "Wrong buffer size" msgstr "" #: src/services/gridftpd/listener.cpp:288 msgid "Wrong maximal buffer size" msgstr "" #: src/services/gridftpd/listener.cpp:300 msgid "Failed reading configuration" msgstr "" #: src/services/gridftpd/listener.cpp:331 #, c-format msgid "Failed to obtain local address: %s" msgstr "" #: src/services/gridftpd/listener.cpp:338 #, c-format msgid "Failed to create socket(%s): %s" msgstr "" #: src/services/gridftpd/listener.cpp:352 #, c-format msgid "Failed to limit socket to IPv6: %s" msgstr "" #: src/services/gridftpd/listener.cpp:359 #, c-format msgid "Failed to bind socket(%s): %s" msgstr "" #: src/services/gridftpd/listener.cpp:364 #, c-format msgid "Failed to listen on socket(%s): %s" msgstr "" #: src/services/gridftpd/listener.cpp:371 msgid "Not listening to anything" msgstr "" #: src/services/gridftpd/listener.cpp:374 #, c-format msgid "Some addresses failed. Listening on %u of %u." msgstr "" #: src/services/gridftpd/listener.cpp:382 #: src/services/gridftpd/listener.cpp:477 msgid "Listen started" msgstr "" #: src/services/gridftpd/listener.cpp:395 msgid "No valid handles left for listening" msgstr "" #: src/services/gridftpd/listener.cpp:401 #, fuzzy, c-format msgid "Select failed: %s" msgstr "Fájl feltöltve %s" #: src/services/gridftpd/listener.cpp:422 #, c-format msgid "Have connections: %i, max: %i" msgstr "" #: src/services/gridftpd/listener.cpp:427 #, c-format msgid "Fork failed: %s" msgstr "" #: src/services/gridftpd/listener.cpp:445 msgid "Refusing connection: Connection limit exceeded" msgstr "" #: src/services/gridftpd/listener.cpp:471 msgid "Init failed" msgstr "" #: src/services/gridftpd/listener.cpp:474 msgid "Listen failed" msgstr "" #: src/services/gridftpd/listener.cpp:488 msgid "Listen finished" msgstr "" #: src/services/gridftpd/listener.cpp:493 msgid "Stopping server" msgstr "" #: src/services/gridftpd/listener.cpp:497 msgid "Destroying handle" msgstr "" #: src/services/gridftpd/listener.cpp:500 msgid "Deactivating modules" msgstr "" #: src/services/gridftpd/listener.cpp:508 msgid "Exiting" msgstr "" #: src/services/gridftpd/misc/ldapquery.cpp:253 #, c-format msgid "%s: %s:%i" msgstr "" #: src/services/gridftpd/misc/ldapquery.cpp:390 #: src/services/gridftpd/misc/ldapquery.cpp:468 #, c-format msgid "%s %s" msgstr "" #: src/services/gridftpd/misc/ldapquery.cpp:394 #, c-format msgid " %s: %s" msgstr "" #: src/services/gridftpd/misc/ldapquery.cpp:396 #, c-format msgid " %s:" msgstr "" #: src/services/gridftpd/userspec.cpp:48 #, c-format msgid "Mapfile is missing at %s" msgstr "" #: src/services/gridftpd/userspec.cpp:89 #: src/services/gridftpd/userspec.cpp:215 msgid "There is no local mapping for user" msgstr "" #: src/services/gridftpd/userspec.cpp:92 #: src/services/gridftpd/userspec.cpp:219 msgid "There is no local name for user" msgstr "" #: src/services/gridftpd/userspec.cpp:142 #: src/services/gridftpd/userspec.cpp:233 msgid "No proxy provided" msgstr "" #: src/services/gridftpd/userspec.cpp:144 #, c-format msgid "Proxy/credentials stored at %s" msgstr "" #: src/services/gridftpd/userspec.cpp:147 #: src/services/gridftpd/userspec.cpp:238 #, c-format msgid "Initially mapped to local user: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:150 #: src/services/gridftpd/userspec.cpp:340 #, c-format msgid "Local user %s does not exist" msgstr "" #: src/services/gridftpd/userspec.cpp:155 #: src/services/gridftpd/userspec.cpp:246 #, c-format msgid "Initially mapped to local group: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:158 #: src/services/gridftpd/userspec.cpp:249 #: src/services/gridftpd/userspec.cpp:349 #, c-format msgid "Local group %s does not exist" msgstr "" #: src/services/gridftpd/userspec.cpp:167 #: src/services/gridftpd/userspec.cpp:258 msgid "Running user has no name" msgstr "" #: src/services/gridftpd/userspec.cpp:170 #: src/services/gridftpd/userspec.cpp:261 #, c-format msgid "Mapped to running user: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:180 #: src/services/gridftpd/userspec.cpp:271 #, c-format msgid "Mapped to local id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:185 #: src/services/gridftpd/userspec.cpp:276 #, c-format msgid "No group %i for mapped user" msgstr "" #: src/services/gridftpd/userspec.cpp:194 #: src/services/gridftpd/userspec.cpp:285 #, c-format msgid "Mapped to local group id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:195 #: src/services/gridftpd/userspec.cpp:286 #, c-format msgid "Mapped to local group name: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:196 #: src/services/gridftpd/userspec.cpp:287 #, c-format msgid "Mapped user's home: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:235 #, c-format msgid "Proxy stored at %s" msgstr "" #: src/services/gridftpd/userspec.cpp:241 msgid "Local user does not exist" msgstr "" #: src/services/gridftpd/userspec.cpp:317 #, c-format msgid "Undefined control sequence: %%%s" msgstr "" #: src/services/gridftpd/userspec.cpp:354 #, c-format msgid "Remapped to local user: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:355 #, c-format msgid "Remapped to local id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:356 #, c-format msgid "Remapped to local group id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:357 #, c-format msgid "Remapped to local group name: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:358 #, c-format msgid "Remapped user's home: %s" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:33 #, c-format msgid "config: %s, class name: %s" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:42 msgid "libjvm.so not loadable - check your LD_LIBRARY_PATH" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:52 msgid "libjvm.so does not contain the expected symbols" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:66 msgid "JVM started" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:71 #, c-format msgid "There is no service: %s in your Java class search path" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:79 msgid "There is no constructor function" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:86 #, c-format msgid "%s constructed" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:90 msgid "Destroy JVM" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:183 msgid "Cannot find MCC_Status object" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:197 msgid "Java object returned NULL status" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Loading %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:111 #, c-format msgid "Initialized %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:147 msgid "Invalid class name" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:152 #, c-format msgid "class name: %s" msgstr "osztály neve: %s" #: src/services/wrappers/python/pythonwrapper.cpp:153 #, c-format msgid "module name: %s" msgstr "modul neve: %s" #: src/services/wrappers/python/pythonwrapper.cpp:210 msgid "Cannot find ARC Config class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:217 msgid "Config class is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:225 msgid "Cannot get dictionary of module" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:234 msgid "Cannot find service class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:243 msgid "Cannot create config argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:250 msgid "Cannot convert config to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:273 #, c-format msgid "%s is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:279 msgid "Message class is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:287 msgid "Python Wrapper constructor succeeded" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:303 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:336 msgid "Python interpreter locked" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:340 msgid "Python interpreter released" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:400 msgid "Python wrapper process called" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:409 msgid "Failed to create input SOAP container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:419 msgid "Cannot create inmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:433 msgid "Cannot find ARC Message class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:439 msgid "Cannot convert inmsg to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:448 msgid "Failed to create SOAP containers" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:454 msgid "Cannot create outmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:460 msgid "Cannot convert outmsg to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:516 msgid "Failed to create XMLNode container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:533 msgid "Cannot find ARC XMLNode class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:539 msgid "Cannot create doc argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:545 msgid "Cannot convert doc to Python object" msgstr "" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:9 msgid "Creating a soap client" msgstr "" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 msgid "SOAP invokation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 msgid "HTTP with SAML2SSO invokation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 msgid "SOAP with SAML2SSO invokation failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:76 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:51 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:56 #: src/tests/delegation/test_delegation_client.cpp:88 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service:%s" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:83 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "" #: src/tests/count/test_service.cpp:33 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "" #: src/tests/echo/test.cpp:32 msgid "Creating client interface" msgstr "" #: src/tests/echo/test_clientinterface.py:27 msgid "SOAP invocation failed" msgstr "" #~ msgid "use GSI proxy (RFC 3820 compliant proxy is default)" #~ msgstr "" #~ "GSI proxy használata (RFC 3820-nak megfelelő proxy, ez az alapbeállítás)" #, fuzzy #~ msgid "Unable to copy %s: No valid credentials found" #~ msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #, fuzzy #~ msgid "Unable to create directory %s: No valid credentials found" #~ msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #, fuzzy #~ msgid "Unable to rename %s: No valid credentials found" #~ msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #, fuzzy #~ msgid "Unable to remove file %s: No valid credentials found" #~ msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #~ msgid "explicitly select or reject a specific cluster" #~ msgstr "egy klaszter egyértelmű kiválasztása vagy tiltása" #~ msgid "explicitly select or reject an index server" #~ msgstr "egy index szerver egyértelmű kiválasztása vagy tiltása" #~ msgid "" #~ "The arcmigrate command is used for migrating queued jobs to another " #~ "cluster.\n" #~ "Note that migration is only supported between ARC1 clusters." #~ msgstr "" #~ "Az arcmigrate paraccsot arra lehet használni, hogy egy várakozó sorban\n" #~ "lévő feladatot átmozgassunk egy másik klaszterre. Jelenleg csak az ARC1-" #~ "es\n" #~ "klaszterek esetén lehet csak használni" #~ msgid "explicitly select or reject a cluster to migrate to" #~ msgstr "egy klaszter egyértelmű kiválasztása vagy tiltása migráció esetére" #~ msgid "select broker method (Random (default), FastestQueue, or custom)" #~ msgstr "" #~ "bróker kiválasztása (Random (alapbeállítás), FastestQueue vagy saját)" #~ msgid "file where the jobs will be stored" #~ msgstr "azon fájl, ahol a feladat azonosítók tárolásra kerülnek" #~ msgid "explicitly select or reject a specific cluster for the new job" #~ msgstr "" #~ "egy klaszter egyértelmű kiválasztása vagy tiltása új feladat számára" #~ msgid "No jobs to resubmit" #~ msgstr "Nem sikerült újraküldeni a feladatot" #~ msgid "Submission to %s failed, trying next target" #~ msgstr "" #~ "Feladat küldés erre a klaszterre nem sikerült: %s, megpróbálom a " #~ "következőt" #~ msgid "Job resubmitted with new jobid: %s" #~ msgstr "Feladat újraküldve ezzel az azonosítóval: %s" #~ msgid "service_url request_file" #~ msgstr "szolgáltatás_url kérési_fájl" #~ msgid "url of the policy decision service" #~ msgstr "az eljárásmódot eldöntő szolgáltatás url-je" #~ msgid "URL of SLCS service" #~ msgstr "SLCS szolgáltatás URL-je" #~ msgid "Identity provider name" #~ msgstr "Azonító szolgáltatás neve" #~ msgid "User account to identity provider" #~ msgstr "Felhasználói név az azonosító szolgáltató részére" #~ msgid "Password for user account to identity provider" #~ msgstr "" #~ "A felhasználói névhez tartozó jelszó az azonosító szolgáltató részére" #~ msgid "Key size of the private key (512, 1024, 2048)" #~ msgstr "A privát kulcs mérete (512, 1024, 2048)" #~ msgid "Private key passphrase" #~ msgstr "Privát kulcs jelszava" #~ msgid "passphrase" #~ msgstr "jelszó" #~ msgid "period" #~ msgstr "periódus" #~ msgid "Current transfer FAILED: %s - %s" #~ msgstr "Az aktuális átvitel MEGSZAKADT: %s - %s" nordugrid-arc-5.4.2/po/PaxHeaders.7502/remove-potcdate.sin0000644000000000000000000000013113214315702021420 xustar000000000000000030 mtime=1513200578.408749593 29 atime=1513200665.63381639 30 ctime=1513200668.587852519 nordugrid-arc-5.4.2/po/remove-potcdate.sin0000644000175000002070000000066013214315702021471 0ustar00mockbuildmock00000000000000# Sed script that remove the POT-Creation-Date line in the header entry # from a POT file. # # The distinction between the first and the following occurrences of the # pattern is achieved by looking at the hold space. /^"POT-Creation-Date: .*"$/{ x # Test if the hold space is empty. s/P/P/ ta # Yes it was empty. First occurrence. Remove the line. g d bb :a # The hold space was nonempty. Following occurrences. Do nothing. x :b } nordugrid-arc-5.4.2/po/PaxHeaders.7502/stamp-po0000644000000000000000000000013213214316034017272 xustar000000000000000030 mtime=1513200668.584852482 30 atime=1513200668.584852482 30 ctime=1513200668.597852641 nordugrid-arc-5.4.2/po/stamp-po0000644000175000002070000000001213214316034017331 0ustar00mockbuildmock00000000000000timestamp nordugrid-arc-5.4.2/po/PaxHeaders.7502/boldquot.sed0000644000000000000000000000013113214315702020135 xustar000000000000000029 mtime=1513200578.36274903 30 atime=1513200578.361749018 30 ctime=1513200668.590852555 nordugrid-arc-5.4.2/po/boldquot.sed0000644000175000002070000000033113214315702020201 0ustar00mockbuildmock00000000000000s/"\([^"]*\)"/“\1”/g s/`\([^`']*\)'/‘\1’/g s/ '\([^`']*\)' / ‘\1’ /g s/ '\([^`']*\)'$/ ‘\1’/g s/^'\([^`']*\)' /‘\1’ /g s/“”/""/g s/“/“/g s/”/”/g s/‘/‘/g s/’/’/g nordugrid-arc-5.4.2/po/PaxHeaders.7502/LINGUAS0000644000000000000000000000012511413403300016626 xustar000000000000000027 mtime=1278084800.930071 30 atime=1513200658.490729027 28 ctime=1513200668.6108528 nordugrid-arc-5.4.2/po/LINGUAS0000644000175000002070000000001411413403300016665 0ustar00mockbuildmock00000000000000ru sv de hu nordugrid-arc-5.4.2/po/PaxHeaders.7502/de.po0000644000000000000000000000013213214316034016537 xustar000000000000000030 mtime=1513200668.040845828 30 atime=1513200668.455850904 30 ctime=1513200668.602852702 nordugrid-arc-5.4.2/po/de.po0000644000175000002070000253157413214316034016626 0ustar00mockbuildmock00000000000000# translation of Arc.po to Russian # Oxana Smirnova , 2007. # Translation file for the Advanced Resource Connector (Arc) msgid "" msgstr "" "Project-Id-Version: Arc\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2017-12-13 22:31+0100\n" "PO-Revision-Date: 2010-02-25 19:18+0100\n" "Last-Translator: Steffen Möller \n" "Language-Team: German\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Language: \n" "X-Generator: KBabel 1.11.4\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%" "10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" "X-Poedit-Language: Russian\n" "X-Poedit-KeywordsList: msg:2;IString:1;istring:1\n" "X-Poedit-Basepath: /home/oxana/CVSROOT/ARC1\n" "X-Poedit-SearchPath-0: src\n" #: src/clients/compute/arccat.cpp:35 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresub.cpp:35 #: src/clients/compute/arcresume.cpp:32 src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "[Job ...]" #: src/clients/compute/arccat.cpp:36 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" "Эта команда предназначена для вывода на экран сообщений стандартного\n" "выхода, стандартной ошибки или ошибок системы при исполнении задачи" #: src/clients/compute/arccat.cpp:43 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresub.cpp:40 src/clients/compute/arcresume.cpp:37 #: src/clients/compute/arcstat.cpp:42 src/clients/compute/arcsub.cpp:54 #: src/clients/compute/arcsync.cpp:147 src/clients/compute/arctest.cpp:62 #: src/clients/credentials/arcproxy.cpp:475 #: src/clients/credentials/arcproxyalt.cpp:461 src/clients/data/arccp.cpp:641 #: src/clients/data/arcls.cpp:346 src/clients/data/arcmkdir.cpp:125 #: src/clients/data/arcrename.cpp:136 src/clients/data/arcrm.cpp:151 #: src/clients/echo/arcecho.cpp:61 src/clients/saml/saml_assertion_init.cpp:62 #: src/clients/wsrf/arcwsrf.cpp:74 src/hed/daemon/unix/main_unix.cpp:346 #: src/hed/daemon/win32/main_win32.cpp:148 #: src/services/a-rex/jura/jura.cpp:109 #, c-format msgid "%s version %s" msgstr "%s version %s" #: src/clients/compute/arccat.cpp:52 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresub.cpp:49 src/clients/compute/arcresume.cpp:46 #: src/clients/compute/arcstat.cpp:51 src/clients/compute/arcsub.cpp:63 #: src/clients/compute/arcsync.cpp:156 src/clients/compute/arctest.cpp:84 #: src/clients/credentials/arcproxy.cpp:483 #: src/clients/credentials/arcproxyalt.cpp:469 src/clients/data/arccp.cpp:648 #: src/clients/data/arcls.cpp:354 src/clients/data/arcmkdir.cpp:133 #: src/clients/data/arcrename.cpp:144 src/clients/data/arcrm.cpp:160 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:172 #, fuzzy, c-format msgid "Running command: %s" msgstr "Kommando: %s" #: src/clients/compute/arccat.cpp:63 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresub.cpp:53 src/clients/compute/arcresume.cpp:50 #: src/clients/compute/arcstat.cpp:62 src/clients/compute/arcsub.cpp:67 #: src/clients/compute/arcsync.cpp:167 src/clients/compute/arctest.cpp:88 #: src/clients/data/arccp.cpp:671 src/clients/data/arcls.cpp:376 #: src/clients/data/arcmkdir.cpp:155 src/clients/data/arcrename.cpp:166 #: src/clients/data/arcrm.cpp:182 src/clients/echo/arcecho.cpp:72 #: src/clients/wsrf/arcwsrf.cpp:101 #, fuzzy msgid "Failed configuration initialization" msgstr "Fehler bei Initialisierung der Konfiguration" #: src/clients/compute/arccat.cpp:76 src/clients/compute/arcclean.cpp:74 #: src/clients/compute/arcget.cpp:88 src/clients/compute/arckill.cpp:73 #: src/clients/compute/arcrenew.cpp:70 src/clients/compute/arcresub.cpp:81 #: src/clients/compute/arcresume.cpp:70 src/clients/compute/arcstat.cpp:71 #, fuzzy, c-format msgid "Cannot read specified jobid file: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/clients/compute/arccat.cpp:87 src/clients/compute/arcclean.cpp:85 #: src/clients/compute/arcget.cpp:99 src/clients/compute/arckill.cpp:84 #: src/clients/compute/arcrenew.cpp:81 src/clients/compute/arcresub.cpp:95 #: src/clients/compute/arcresume.cpp:81 src/clients/compute/arcstat.cpp:105 msgid "No jobs given" msgstr "Keine Jobs angegeben" #: src/clients/compute/arccat.cpp:100 src/clients/compute/arcclean.cpp:98 #: src/clients/compute/arcget.cpp:112 src/clients/compute/arckill.cpp:97 #: src/clients/compute/arcrenew.cpp:94 src/clients/compute/arcresub.cpp:105 #: src/clients/compute/arcresume.cpp:94 src/clients/compute/arcstat.cpp:117 #, fuzzy, c-format msgid "Job list file (%s) doesn't exist" msgstr "Lock-Datei %s existiert nicht" #: src/clients/compute/arccat.cpp:107 src/clients/compute/arcclean.cpp:105 #: src/clients/compute/arcget.cpp:119 src/clients/compute/arckill.cpp:104 #: src/clients/compute/arcrenew.cpp:101 src/clients/compute/arcresub.cpp:112 #: src/clients/compute/arcresume.cpp:101 src/clients/compute/arcstat.cpp:124 #: src/clients/compute/arctest.cpp:298 #, fuzzy, c-format msgid "Unable to read job information from file (%s)" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/arccat.cpp:116 src/clients/compute/arcclean.cpp:113 #: src/clients/compute/arcget.cpp:127 src/clients/compute/arckill.cpp:112 #: src/clients/compute/arcrenew.cpp:110 src/clients/compute/arcresub.cpp:120 #: src/clients/compute/arcresume.cpp:110 src/clients/compute/arcstat.cpp:133 #, fuzzy, c-format msgid "Warning: Job not found in job list: %s" msgstr "Kann Job ID nicht finden: %s" #: src/clients/compute/arccat.cpp:129 src/clients/compute/arcclean.cpp:168 #: src/clients/compute/arcget.cpp:140 src/clients/compute/arckill.cpp:124 #: src/clients/compute/arcrenew.cpp:122 src/clients/compute/arcresub.cpp:132 #: src/clients/compute/arcresume.cpp:122 #, fuzzy msgid "No jobs" msgstr "NO Job" #: src/clients/compute/arccat.cpp:144 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "" #: src/clients/compute/arccat.cpp:145 src/clients/compute/arccat.cpp:151 #, fuzzy, c-format msgid "Cannot create output of %s for any jobs" msgstr "Kann Verzeichnis \"%s\" für cache nicht anlegen" #: src/clients/compute/arccat.cpp:152 #, fuzzy, c-format msgid "Invalid destination URL %s" msgstr "Ungültige URL: %s" #: src/clients/compute/arccat.cpp:170 #, c-format msgid "Job deleted: %s" msgstr "" #: src/clients/compute/arccat.cpp:180 #, c-format msgid "Job has not started yet: %s" msgstr "" #: src/clients/compute/arccat.cpp:188 #, fuzzy, c-format msgid "Cannot determine the %s location: %s" msgstr "Kann Funktion %s nicht anlegen" #: src/clients/compute/arccat.cpp:196 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "" #: src/clients/compute/arccat.cpp:206 #, c-format msgid "Catting %s for job %s" msgstr "" #: src/clients/compute/arcclean.cpp:35 #, fuzzy msgid "The arcclean command removes a job from the computing resource." msgstr "Das arcclean Kommando entfernt einen Job von einem entfernten Cluster." #: src/clients/compute/arcclean.cpp:137 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" #: src/clients/compute/arcclean.cpp:140 #, fuzzy msgid "Are you sure you want to clean jobs missing information?" msgstr "Soll die lokale job list wirklich synchronisiert werden?" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "y" msgstr "j" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "n" msgstr "n" #: src/clients/compute/arcclean.cpp:146 #, fuzzy msgid "Jobs missing information will not be cleaned!" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/compute/arcclean.cpp:162 src/clients/compute/arcresub.cpp:155 #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:302 #, fuzzy, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/arcclean.cpp:163 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" #: src/clients/compute/arcclean.cpp:172 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "Mit arcget erhält man die Ergebnisse eines Jobs." #: src/clients/compute/arcget.cpp:76 #, fuzzy, c-format msgid "Job download directory from user configuration file: %s " msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/clients/compute/arcget.cpp:79 #, fuzzy msgid "Job download directory will be created in present working directory. " msgstr "" "Download-Verzeichnis (das Job-Verzeichnis wird in diesem Verzeichnis " "abgelegt)" #: src/clients/compute/arcget.cpp:83 #, fuzzy, c-format msgid "Job download directory: %s " msgstr "Fehler beim Öffnen von Verzeichs: %s" #: src/clients/compute/arcget.cpp:150 #, fuzzy, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #: src/clients/compute/arcget.cpp:160 #, c-format msgid "Results stored at: %s" msgstr "" #: src/clients/compute/arcget.cpp:172 src/clients/compute/arckill.cpp:140 msgid "Warning: Some jobs were not removed from server" msgstr "" #: src/clients/compute/arcget.cpp:173 src/clients/compute/arcget.cpp:180 msgid " Use arclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arcget.cpp:179 src/clients/compute/arckill.cpp:147 #: src/clients/compute/arcresub.cpp:185 #, fuzzy, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/clients/compute/arcget.cpp:184 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arcget.cpp:188 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "" #: src/clients/compute/arcinfo.cpp:34 #, fuzzy msgid "[resource ...]" msgstr "[Job ...]" #: src/clients/compute/arcinfo.cpp:35 #, fuzzy msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "Mit arcinfo wird der Zustand von Clustern auf dem Grid bestimmt." #: src/clients/compute/arcinfo.cpp:142 msgid "Information endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:149 #, fuzzy msgid "Submission endpoint" msgstr "Submission ergab Fehler: %s" #: src/clients/compute/arcinfo.cpp:151 #, fuzzy msgid "status" msgstr "statusstr" #: src/clients/compute/arcinfo.cpp:153 #, fuzzy msgid "interface" msgstr "Benutzungsschnittstellenfehler" #: src/clients/compute/arcinfo.cpp:172 #, fuzzy msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/compute/arcinfo.cpp:185 #, fuzzy msgid "ERROR: Failed to retrieve information" msgstr "Konnte Job Status Information nicht beziehen." #: src/clients/compute/arcinfo.cpp:187 msgid "from the following endpoints:" msgstr "" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "Mit arckill lassen sich laufenden Prozesse beenden." #: src/clients/compute/arckill.cpp:141 msgid " Use arcclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:148 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:151 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arckill.cpp:153 #, fuzzy, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "Job resumed erfolgreich" #: src/clients/compute/arcrenew.cpp:128 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "" #: src/clients/compute/arcresub.cpp:75 msgid "--same and --not-same cannot be specified together." msgstr "" #: src/clients/compute/arcresub.cpp:144 #, fuzzy msgid "No jobs to resubmit with the specified status" msgstr "Keine Job Beschreibung als Eingabe benötigt" #: src/clients/compute/arcresub.cpp:151 src/clients/compute/arcsub.cpp:194 #, c-format msgid "Job submitted with jobid: %s" msgstr "Job hochgeladen mit Job ID: %s" #: src/clients/compute/arcresub.cpp:156 msgid " To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arcresub.cpp:161 #, fuzzy, c-format msgid "Cannot write jobids to file (%s)" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/compute/arcresub.cpp:172 #, c-format msgid "" "Resubmission of job (%s) succeeded, but killing the job failed - it will " "still appear in the job list" msgstr "" #: src/clients/compute/arcresub.cpp:181 #, c-format msgid "" "Resubmission of job (%s) succeeded, but cleaning the job failed - it will " "still appear in the job list" msgstr "" #: src/clients/compute/arcresub.cpp:186 msgid " Use arcclean to remove non-existing jobs" msgstr "" #: src/clients/compute/arcresub.cpp:193 #, fuzzy msgid "Job resubmission summary:" msgstr "Job Hochladen Zusammenfassung:" #: src/clients/compute/arcresub.cpp:195 #, fuzzy, c-format msgid "%d of %d jobs were resubmitted" msgstr "%d von %s Jobs wurden hochgeladen" #: src/clients/compute/arcresub.cpp:197 #, fuzzy, c-format msgid "The following %d were not resubmitted" msgstr "Die folgenden %d wurden nicht hochgeladen" #: src/clients/compute/arcresume.cpp:128 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "" #: src/clients/compute/arcstat.cpp:35 #, fuzzy msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" "Эта команда используется для вывода информации о состоянии\n" "задач, отправленных на Грид, и о состоянии вычислительных\n" "ресурсов Грид " #: src/clients/compute/arcstat.cpp:79 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "" #: src/clients/compute/arcstat.cpp:149 #, fuzzy msgid "No jobs found, try later" msgstr "Keine Jobs zu bearbeiten" #: src/clients/compute/arcstat.cpp:176 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "" #: src/clients/compute/arcsub.cpp:46 msgid "[filename ...]" msgstr "[dateiname ...]" #: src/clients/compute/arcsub.cpp:47 #, fuzzy msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "Mit dem arcsub Kommando werden Jobs den entfernten Clustern zugewiesen" #: src/clients/compute/arcsub.cpp:99 #, fuzzy msgid "No job description input specified" msgstr "Keine Job Beschreibung als Eingabe benötigt" #: src/clients/compute/arcsub.cpp:112 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:499 #, c-format msgid "Can not open job description file: %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/compute/arcsub.cpp:140 src/clients/compute/arcsub.cpp:168 msgid "Invalid JobDescription:" msgstr "Ungültige JobDescription:" #: src/clients/compute/arcsub.cpp:200 #, fuzzy, c-format msgid "Cannot write job IDs to file (%s)" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/compute/arcsub.cpp:205 src/clients/compute/arcsync.cpp:66 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/arcsub.cpp:210 src/clients/compute/arctest.cpp:304 msgid "To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arcsub.cpp:217 msgid "Job submission summary:" msgstr "Job Hochladen Zusammenfassung:" #: src/clients/compute/arcsub.cpp:219 #, fuzzy, c-format msgid "%d of %d jobs were submitted" msgstr "%d von %s Jobs wurden hochgeladen" #: src/clients/compute/arcsub.cpp:221 #, fuzzy, c-format msgid "The following %d were not submitted" msgstr "Die folgenden %d wurden nicht hochgeladen" #: src/clients/compute/arcsub.cpp:228 msgid "Job nr." msgstr "" #: src/clients/compute/arcsub.cpp:268 #, c-format msgid "Removing endpoint %s: It has an unrequested interface (%s)." msgstr "" #: src/clients/compute/arcsub.cpp:282 #, fuzzy, c-format msgid "ERROR: Unable to load broker %s" msgstr "Konnter Broker %s nicht laden" #: src/clients/compute/arcsub.cpp:286 #, fuzzy msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" "Hochladen des Jobs abgebrochen, da keine Cluster entsprechende Informationen " "anboten" #: src/clients/compute/arcsub.cpp:290 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "" #: src/clients/compute/arcsub.cpp:304 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" #: src/clients/compute/arcsub.cpp:338 src/clients/compute/arctest.cpp:236 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" #: src/clients/compute/arcsub.cpp:339 src/clients/compute/arctest.cpp:237 #, fuzzy msgid "Original job description is listed below:" msgstr "" " -o, -stdout вывести файл стандартого выхода задачи (по\n" " умолчанию)" #: src/clients/compute/arcsub.cpp:351 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" #: src/clients/compute/arcsub.cpp:368 src/clients/compute/arctest.cpp:317 #, fuzzy, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/clients/compute/arcsub.cpp:384 src/clients/compute/arctest.cpp:330 #, fuzzy, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "Eine fehler geschah während des Generieres der Job Beschreibung." #: src/clients/compute/arcsub.cpp:388 src/clients/compute/arctest.cpp:334 #, fuzzy, c-format msgid "Job description to be sent to %s:" msgstr "Zu sendende Job-Beschreibung : %s" #: src/clients/compute/arcsub.cpp:406 #, fuzzy msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:113 #, fuzzy, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/arcsync.cpp:140 #, fuzzy msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given resources or index servers." msgstr "" "Das Kommando synchronisierte Ihre lokale Jobliste mit der Information eines " "Clusters oder Index-Servers" #: src/clients/compute/arcsync.cpp:183 #, fuzzy msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" "Synchronisiere lokale Liste aktiver Jobs mit der Information im MDS. Dies " "mag\n" "zu Inkonsistenzen führen. Gerade erst hochgeladene Jobs sind vielleicht " "noch\n" "nicht dem MDB bekannt, während für die Löschung ausgewählte Jobs noch ange-\n" "zeigt werden." #: src/clients/compute/arcsync.cpp:188 msgid "Are you sure you want to synchronize your local job list?" msgstr "Soll die lokale job list wirklich synchronisiert werden?" #: src/clients/compute/arcsync.cpp:193 msgid "Cancelling synchronization request" msgstr "Abbruch der Synchronisationsanfrage" #: src/clients/compute/arcsync.cpp:203 msgid "" "No services specified. Please configure default services in the client " "configuration,or specify a cluster or index (-c or -g options, see arcsync -" "h)." msgstr "" #: src/clients/compute/arctest.cpp:55 msgid " " msgstr "" #: src/clients/compute/arctest.cpp:56 #, fuzzy msgid "The arctest command is used for testing clusters as resources." msgstr "Mit arcget erhält man die Ergebnisse eines Jobs." #: src/clients/compute/arctest.cpp:68 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" #: src/clients/compute/arctest.cpp:75 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" #: src/clients/compute/arctest.cpp:109 #, fuzzy msgid "Certificate information:" msgstr "Ungültige Authentisierungs-Information" #: src/clients/compute/arctest.cpp:113 #, fuzzy msgid "No user-certificate found" msgstr "Pfad zu Zertifikat-Datei" #: src/clients/compute/arctest.cpp:116 #, fuzzy, c-format msgid "Certificate: %s" msgstr "Voreinstellung: %s" #: src/clients/compute/arctest.cpp:118 #, fuzzy, c-format msgid "Subject name: %s" msgstr "Subjekt: %s" #: src/clients/compute/arctest.cpp:119 #, fuzzy, c-format msgid "Valid until: %s" msgstr "Ungültige url: %s" #: src/clients/compute/arctest.cpp:123 #, fuzzy msgid "Unable to determine certificate information" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/clients/compute/arctest.cpp:127 #, fuzzy msgid "Proxy certificate information:" msgstr "Ungültige Authentisierungs-Information" #: src/clients/compute/arctest.cpp:129 msgid "No proxy found" msgstr "" #: src/clients/compute/arctest.cpp:132 #, fuzzy, c-format msgid "Proxy: %s" msgstr "Proxy Pfad: %s" #: src/clients/compute/arctest.cpp:133 #, fuzzy, c-format msgid "Proxy-subject: %s" msgstr "Subjekt: %s" #: src/clients/compute/arctest.cpp:135 #, fuzzy msgid "Valid for: Proxy expired" msgstr "Zeit verbleibend für Proxy: Proxy abgelaufen" #: src/clients/compute/arctest.cpp:137 #, fuzzy msgid "Valid for: Proxy not valid" msgstr "Zeit verbleibend für Proxy: Proxy ungültig" #: src/clients/compute/arctest.cpp:139 #, fuzzy, c-format msgid "Valid for: %s" msgstr "Ungültige url: %s" #: src/clients/compute/arctest.cpp:144 #, c-format msgid "Certificate issuer: %s" msgstr "" #: src/clients/compute/arctest.cpp:148 #, fuzzy msgid "CA-certificates installed:" msgstr "Pfad zu Zertifikat-Datei" #: src/clients/compute/arctest.cpp:170 msgid "Unable to detect if issuer certificate is installed." msgstr "" #: src/clients/compute/arctest.cpp:173 msgid "Your issuer's certificate is not installed" msgstr "" #: src/clients/compute/arctest.cpp:191 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "" #: src/clients/compute/arctest.cpp:209 #, fuzzy, c-format msgid "Unable to load broker %s" msgstr "Konnter Broker %s nicht laden" #: src/clients/compute/arctest.cpp:212 #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "Broker %s geladen" #: src/clients/compute/arctest.cpp:234 #, fuzzy msgid "Test aborted because no resource returned any information" msgstr "Job Migration abgebrochen, da kein Cluster Informationen lieferte" #: src/clients/compute/arctest.cpp:247 msgid "" "ERROR: Test aborted because no suitable resources were found for the test-job" msgstr "" #: src/clients/compute/arctest.cpp:249 msgid "" "ERROR: Dumping job description aborted because no suitable resources were " "found for the test-job" msgstr "" #: src/clients/compute/arctest.cpp:258 #, c-format msgid "Submitting test-job %d:" msgstr "" #: src/clients/compute/arctest.cpp:262 #, c-format msgid "Client version: nordugrid-arc-%s" msgstr "" #: src/clients/compute/arctest.cpp:269 #, fuzzy, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/compute/arctest.cpp:270 #, fuzzy, c-format msgid "Test submitted with jobid: %s" msgstr "Job hochgeladen mit Job ID: %s" #: src/clients/compute/arctest.cpp:285 #, fuzzy, c-format msgid "Computing service: %s" msgstr "Delegation service: %s" #: src/clients/compute/arctest.cpp:291 #, fuzzy msgid "Test failed, no more possible targets" msgstr "Hochladen des Jobs schlug fehl, keine weiteren Ziele verfügbar" #: src/clients/compute/utils.cpp:118 #, c-format msgid "Types of execution services %s is able to submit jobs to:" msgstr "" #: src/clients/compute/utils.cpp:121 #, c-format msgid "Types of registry services which %s is able collect information from:" msgstr "" #: src/clients/compute/utils.cpp:124 #, c-format msgid "" "Types of local information services which %s is able collect information " "from:" msgstr "" #: src/clients/compute/utils.cpp:127 #, c-format msgid "" "Types of local information services which %s is able collect job information " "from:" msgstr "" #: src/clients/compute/utils.cpp:130 #, c-format msgid "Types of services %s is able to manage jobs at:" msgstr "" #: src/clients/compute/utils.cpp:133 #, fuzzy, c-format msgid "Job description languages supported by %s:" msgstr "Zu sendende Job-Beschreibung : %s" #: src/clients/compute/utils.cpp:136 #, c-format msgid "Brokers available to %s:" msgstr "" #: src/clients/compute/utils.cpp:159 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" #: src/clients/compute/utils.cpp:169 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:174 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:278 msgid "" "select one or more computing elements: name can be an alias for a single CE, " "a group of CEs or a URL" msgstr "" #: src/clients/compute/utils.cpp:280 src/clients/compute/utils.cpp:297 #: src/clients/compute/utils.cpp:404 msgid "name" msgstr "" #: src/clients/compute/utils.cpp:285 msgid "" "the computing element specified by URL at the command line should be queried " "using this information interface (possible options: org.nordugrid.ldapng, " "org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies." "resourceinfo)" msgstr "" #: src/clients/compute/utils.cpp:289 #, fuzzy msgid "interfacename" msgstr "Benutzungsschnittstellenfehler" #: src/clients/compute/utils.cpp:295 msgid "" "selecting a computing element for the new jobs with a URL or an alias, or " "selecting a group of computing elements with the name of the group" msgstr "" #: src/clients/compute/utils.cpp:303 msgid "force migration, ignore kill failure" msgstr "" "erzwinge Migration, ignoriere ein Fehlschlagen des Abbruchs bereits " "laufender Jobs" #: src/clients/compute/utils.cpp:309 msgid "keep the files on the server (do not clean)" msgstr "behalte die Dateien auf dem Server (dort nicht löschen)" #: src/clients/compute/utils.cpp:315 msgid "do not ask for verification" msgstr "frage nicht nach Verifikation" #: src/clients/compute/utils.cpp:319 #, fuzzy msgid "truncate the joblist before synchronizing" msgstr "kürze Jobliste vor Synchronisation" #: src/clients/compute/utils.cpp:325 src/clients/data/arcls.cpp:287 msgid "long format (more information)" msgstr "ausführliche Ausgabe" #: src/clients/compute/utils.cpp:331 msgid "print a list of services configured in the client.conf" msgstr "" #: src/clients/compute/utils.cpp:337 msgid "show the stdout of the job (default)" msgstr "Zeige stdout des Jobs (Voreinstellung)" #: src/clients/compute/utils.cpp:341 msgid "show the stderr of the job" msgstr "zeige stderr des Jobs" #: src/clients/compute/utils.cpp:345 #, fuzzy msgid "show the CE's error log of the job" msgstr "zeige den error log des Grid Manager für diesen Job" #: src/clients/compute/utils.cpp:351 msgid "" "download directory (the job directory will be created in this directory)" msgstr "" "Download-Verzeichnis (das Job-Verzeichnis wird in diesem Verzeichnis " "abgelegt)" #: src/clients/compute/utils.cpp:353 msgid "dirname" msgstr "Verzeichnisname" #: src/clients/compute/utils.cpp:357 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" #: src/clients/compute/utils.cpp:362 msgid "force download (overwrite existing job directory)" msgstr "" #: src/clients/compute/utils.cpp:368 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "" #: src/clients/compute/utils.cpp:372 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:373 src/clients/compute/utils.cpp:376 msgid "order" msgstr "" #: src/clients/compute/utils.cpp:375 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:379 msgid "show jobs where status information is unavailable" msgstr "" #: src/clients/compute/utils.cpp:385 #, fuzzy msgid "resubmit to the same resource" msgstr "Erneut zu demselben Cluster submitten" #: src/clients/compute/utils.cpp:389 #, fuzzy msgid "do not resubmit to the same resource" msgstr "Erneut zu demselben Cluster submitten" #: src/clients/compute/utils.cpp:395 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" "entferne Job aus lokaler Liste selbst wenn der Job dem Infosys nicht bekannt " "ist" #: src/clients/compute/utils.cpp:402 msgid "" "select one or more registries: name can be an alias for a single registry, a " "group of registries or a URL" msgstr "" #: src/clients/compute/utils.cpp:410 msgid "submit test job given by the number" msgstr "" #: src/clients/compute/utils.cpp:411 src/clients/compute/utils.cpp:415 #, fuzzy msgid "int" msgstr "Minuten" #: src/clients/compute/utils.cpp:414 msgid "test job runtime specified by the number" msgstr "" #: src/clients/compute/utils.cpp:421 msgid "only select jobs whose status is statusstr" msgstr "Selektiere Jobs mit Status statusstr" #: src/clients/compute/utils.cpp:422 msgid "statusstr" msgstr "statusstr" #: src/clients/compute/utils.cpp:428 msgid "all jobs" msgstr "alle Jobs" #: src/clients/compute/utils.cpp:434 msgid "jobdescription string describing the job to be submitted" msgstr "Zeichenkette mit Job-Beschreibung wird hochgeladen" #: src/clients/compute/utils.cpp:436 src/clients/compute/utils.cpp:442 #: src/clients/credentials/arcproxy.cpp:369 #: src/clients/credentials/arcproxy.cpp:376 #: src/clients/credentials/arcproxy.cpp:394 #: src/clients/credentials/arcproxy.cpp:401 #: src/clients/credentials/arcproxy.cpp:419 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:438 #: src/clients/credentials/arcproxy.cpp:448 #: src/clients/credentials/arcproxy.cpp:452 #: src/clients/credentials/arcproxyalt.cpp:369 #: src/clients/credentials/arcproxyalt.cpp:376 #: src/clients/credentials/arcproxyalt.cpp:399 #: src/clients/credentials/arcproxyalt.cpp:415 #: src/clients/credentials/arcproxyalt.cpp:419 #: src/clients/credentials/arcproxyalt.cpp:429 #: src/clients/credentials/arcproxyalt.cpp:433 msgid "string" msgstr "Zeichenkette" #: src/clients/compute/utils.cpp:440 msgid "jobdescription file describing the job to be submitted" msgstr "Datei mit Job-Beschreibung wird hochgeladen" #: src/clients/compute/utils.cpp:448 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" #: src/clients/compute/utils.cpp:449 msgid "broker" msgstr "Broker" #: src/clients/compute/utils.cpp:452 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "" #: src/clients/compute/utils.cpp:453 src/clients/compute/utils.cpp:475 #: src/clients/compute/utils.cpp:512 src/clients/compute/utils.cpp:520 #: src/clients/credentials/arcproxy.cpp:461 #: src/clients/credentials/arcproxyalt.cpp:447 src/clients/data/arccp.cpp:627 #: src/clients/data/arcls.cpp:332 src/clients/data/arcmkdir.cpp:111 #: src/clients/data/arcrename.cpp:122 src/clients/data/arcrm.cpp:137 #: src/clients/echo/arcecho.cpp:47 src/clients/wsrf/arcwsrf.cpp:57 msgid "filename" msgstr "Dateiname" #: src/clients/compute/utils.cpp:457 msgid "" "only use this interface for submitting (e.g. org.nordugrid.gridftpjob, org." "ogf.glue.emies.activitycreation, org.ogf.bes)" msgstr "" #: src/clients/compute/utils.cpp:459 src/clients/compute/utils.cpp:501 #, fuzzy msgid "InterfaceName" msgstr "Interaktiver Modus." #: src/clients/compute/utils.cpp:466 msgid "skip the service with the given URL during service discovery" msgstr "" #: src/clients/compute/utils.cpp:467 src/clients/compute/utils.cpp:480 #: src/clients/data/arccp.cpp:607 msgid "URL" msgstr "" #: src/clients/compute/utils.cpp:474 #, fuzzy msgid "a file containing a list of jobIDs" msgstr "Datei mit Liste aller Jobs" #: src/clients/compute/utils.cpp:479 msgid "skip jobs which are on a computing element with a given URL" msgstr "" #: src/clients/compute/utils.cpp:485 msgid "submit jobs as dry run (no submission to batch system)" msgstr "" #: src/clients/compute/utils.cpp:488 msgid "submit directly - no resource discovery or matchmaking" msgstr "" #: src/clients/compute/utils.cpp:492 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" #: src/clients/compute/utils.cpp:499 msgid "" "only get information about executon targets which support this job " "submission interface (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies." "activitycreation, org.ogf.bes)" msgstr "" #: src/clients/compute/utils.cpp:506 msgid "prints info about installed user- and CA-certificates" msgstr "" #: src/clients/compute/utils.cpp:511 #, fuzzy, c-format msgid "the file storing information about active jobs (default %s)" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/compute/utils.cpp:519 src/clients/credentials/arcproxy.cpp:460 #: src/clients/credentials/arcproxyalt.cpp:446 src/clients/data/arccp.cpp:626 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:110 #: src/clients/data/arcrename.cpp:121 src/clients/data/arcrm.cpp:136 #: src/clients/echo/arcecho.cpp:46 src/clients/wsrf/arcwsrf.cpp:56 msgid "configuration file (default ~/.arc/client.conf)" msgstr "Konfigurationsdatei (Vorteinstellung ~/.arc/client.conf)" #: src/clients/compute/utils.cpp:522 src/clients/credentials/arcproxy.cpp:455 #: src/clients/credentials/arcproxyalt.cpp:441 src/clients/data/arccp.cpp:621 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:105 #: src/clients/data/arcrename.cpp:116 src/clients/data/arcrm.cpp:131 #: src/clients/echo/arcecho.cpp:41 src/clients/wsrf/arcwsrf.cpp:51 msgid "timeout in seconds (default 20)" msgstr "Zeitüberschreitung nach Sekunden (Voreinstellung 20)" #: src/clients/compute/utils.cpp:523 src/clients/credentials/arcproxy.cpp:456 #: src/clients/credentials/arcproxyalt.cpp:442 src/clients/data/arccp.cpp:622 #: src/clients/data/arcls.cpp:327 src/clients/data/arcmkdir.cpp:106 #: src/clients/data/arcrename.cpp:117 src/clients/data/arcrm.cpp:132 #: src/clients/echo/arcecho.cpp:42 src/clients/wsrf/arcwsrf.cpp:52 msgid "seconds" msgstr "Sekunden" #: src/clients/compute/utils.cpp:526 msgid "list the available plugins" msgstr "" #: src/clients/compute/utils.cpp:530 src/clients/credentials/arcproxy.cpp:465 #: src/clients/credentials/arcproxyalt.cpp:451 src/clients/data/arccp.cpp:631 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:115 #: src/clients/data/arcrename.cpp:126 src/clients/data/arcrm.cpp:141 #: src/clients/echo/arcecho.cpp:51 src/clients/saml/saml_assertion_init.cpp:52 #: src/clients/wsrf/arcwsrf.cpp:61 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" #: src/clients/compute/utils.cpp:531 src/clients/credentials/arcproxy.cpp:466 #: src/clients/credentials/arcproxyalt.cpp:452 src/clients/data/arccp.cpp:632 #: src/clients/data/arcls.cpp:337 src/clients/data/arcmkdir.cpp:116 #: src/clients/data/arcrename.cpp:127 src/clients/data/arcrm.cpp:142 #: src/clients/echo/arcecho.cpp:52 src/clients/saml/saml_assertion_init.cpp:53 #: src/clients/wsrf/arcwsrf.cpp:62 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 #, fuzzy msgid "debuglevel" msgstr "debuglevel" #: src/clients/compute/utils.cpp:533 src/clients/credentials/arcproxy.cpp:469 #: src/clients/credentials/arcproxyalt.cpp:455 src/clients/data/arccp.cpp:635 #: src/clients/data/arcls.cpp:340 src/clients/data/arcmkdir.cpp:119 #: src/clients/data/arcrename.cpp:130 src/clients/data/arcrm.cpp:145 #: src/clients/echo/arcecho.cpp:55 src/clients/saml/saml_assertion_init.cpp:56 #: src/clients/wsrf/arcwsrf.cpp:65 msgid "print version information" msgstr "Angabe des aktuellen Versionsbezeichners" #: src/clients/credentials/arcproxy.cpp:172 #: src/hed/libs/credential/ARCProxyUtil.cpp:1248 #, fuzzy, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "Es sind %d Zertifikate in der zurückgelieferten Nachricht." #: src/clients/credentials/arcproxy.cpp:188 #: src/hed/libs/credential/ARCProxyUtil.cpp:1264 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "" #: src/clients/credentials/arcproxy.cpp:197 #: src/hed/libs/credential/ARCProxyUtil.cpp:1273 #, c-format msgid " expiration time: %s " msgstr "" #: src/clients/credentials/arcproxy.cpp:201 #: src/hed/libs/credential/ARCProxyUtil.cpp:1277 #, fuzzy, c-format msgid " certificate dn: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/clients/credentials/arcproxy.cpp:202 #: src/hed/libs/credential/ARCProxyUtil.cpp:1278 #, fuzzy, c-format msgid " issuer dn: %s" msgstr " base dn: %s" #: src/clients/credentials/arcproxy.cpp:203 #: src/hed/libs/credential/ARCProxyUtil.cpp:1279 #, c-format msgid " serial number: %d" msgstr "" #: src/clients/credentials/arcproxy.cpp:207 #: src/hed/libs/credential/ARCProxyUtil.cpp:1283 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:272 #: src/clients/credentials/arcproxyalt.cpp:317 #, fuzzy msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" "Команда arcproxy создаёт доверенности из пары закрытый/открытый ключ\n" "для использования на Гриде" #: src/clients/credentials/arcproxy.cpp:274 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours " "for delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file\n" " keybits=number - length of the key to generate. Default is 1024 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" " signingAlgorithm=name - signing algorithm to use for signing public key of " "proxy.\n" " Possible values are sha1, sha2 (alias for sha256), sha224, sha256, " "sha384, sha512\n" " and inherit (use algorithm of signing certificate). Default is inherit.\n" " With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" " identity - identity subject name of proxy certificate.\n" " issuer - issuer subject name of proxy certificate.\n" " ca - subject name of CA ehich issued initial certificate\n" " path - file system path to file containing proxy.\n" " type - type of proxy certificate.\n" " validityStart - timestamp when proxy validity starts.\n" " validityEnd - timestamp when proxy validity ends.\n" " validityPeriod - duration of proxy validity in seconds.\n" " validityLeft - duration of proxy validity left in seconds.\n" " vomsVO - VO name represented by VOMS attribute\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" " vomsIssuer - subject of service which issued VOMS certificate\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" " proxyPolicy\n" " keybits - size of proxy certificate key in bits.\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" " myproxy - for accessing credentials at MyProxy service\n" " myproxynew - for creating credentials at MyProxy service\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" " int - interactively request password from console\n" " stdin - read password from standard input delimited by newline\n" " file:filename - read password from file named filename\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:334 #: src/clients/credentials/arcproxyalt.cpp:334 #, fuzzy msgid "path to the proxy file" msgstr "Pfad zu Proxy-Datei" #: src/clients/credentials/arcproxy.cpp:335 #: src/clients/credentials/arcproxy.cpp:339 #: src/clients/credentials/arcproxy.cpp:343 #: src/clients/credentials/arcproxy.cpp:347 #: src/clients/credentials/arcproxy.cpp:351 #: src/clients/credentials/arcproxy.cpp:355 #: src/clients/credentials/arcproxyalt.cpp:335 #: src/clients/credentials/arcproxyalt.cpp:339 #: src/clients/credentials/arcproxyalt.cpp:343 #: src/clients/credentials/arcproxyalt.cpp:347 #: src/clients/credentials/arcproxyalt.cpp:351 #: src/clients/credentials/arcproxyalt.cpp:355 src/clients/data/arccp.cpp:584 #: src/clients/saml/saml_assertion_init.cpp:48 msgid "path" msgstr "Pfad" #: src/clients/credentials/arcproxy.cpp:338 #: src/clients/credentials/arcproxyalt.cpp:338 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formated" msgstr "" #: src/clients/credentials/arcproxy.cpp:342 #: src/clients/credentials/arcproxyalt.cpp:342 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" #: src/clients/credentials/arcproxy.cpp:346 #: src/clients/credentials/arcproxyalt.cpp:346 #, fuzzy msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "" "путь к каталогу с доверяемыми сертификатами, используется\n" " только клиентом VOMS" #: src/clients/credentials/arcproxy.cpp:350 #: src/clients/credentials/arcproxyalt.cpp:350 #, fuzzy msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "" "путь к каталогу с доверяемыми сертификатами, используется\n" " только клиентом VOMS" #: src/clients/credentials/arcproxy.cpp:354 #: src/clients/credentials/arcproxyalt.cpp:354 #, fuzzy msgid "path to the VOMS server configuration file" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:358 #: src/clients/credentials/arcproxyalt.cpp:358 #, fuzzy msgid "" "voms<:command>. Specify VOMS server (More than one VOMS server \n" " can be specified like this: --voms VOa:command1 --voms VOb:" "command2). \n" " :command is optional, and is used to ask for specific " "attributes(e.g: roles)\n" " command options are:\n" " all --- put all of this DN's attributes into AC; \n" " list ---list all of the DN's attribute, will not create AC " "extension; \n" " /Role=yourRole --- specify the role, if this DN \n" " has such a role, the role will be put into " "AC \n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN \n" " has such a role, the role will be put into " "AC \n" msgstr "" "voms<:инструкция>. Описание сервера VOMS (несколько серверов задаются\n" " следующим образом: --voms VOa:инструкция1 --voms VOb:" "инструкция2).\n" " <:инструкция> не обязательна и служит для запроса " "дополнительных\n" " атрибутов (например, ролей)\n" " Инструкции:\n" " all --- добавить все атрибуты, доступные данному " "пользователю;\n" " list --- перечислить все атрибуты, доступные данному " "пользователю,\n" " без создания расширения AC; \n" " /Role=вашаРоль --- указать желаемую роль; если данный " "пользователь\n" " может играть такую роль, она будет " "добавлена;\n" " /voname/groupname/Role=вашаРоль --- указать ВО, группу и роль; " "если\n" " данный пользователь может играть такую " "роль, она\n" " будет добавлена.\n" #: src/clients/credentials/arcproxy.cpp:372 #: src/clients/credentials/arcproxyalt.cpp:372 msgid "" "group<:role>. Specify ordering of attributes \n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester \n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester \n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxyalt.cpp:379 msgid "use GSI communication protocol for contacting VOMS services" msgstr "" #: src/clients/credentials/arcproxy.cpp:382 #: src/clients/credentials/arcproxyalt.cpp:382 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access \n" " Note for RESTful access, 'list' command and multiple VOMS " "server are not supported\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:388 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "" #: src/clients/credentials/arcproxy.cpp:391 msgid "print all information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:394 msgid "print selected information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:397 #: src/clients/credentials/arcproxyalt.cpp:395 msgid "remove proxy" msgstr "" #: src/clients/credentials/arcproxy.cpp:400 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" #: src/clients/credentials/arcproxy.cpp:405 #: src/clients/credentials/arcproxyalt.cpp:402 msgid "" "don't prompt for a credential passphrase, when retrieve a \n" " credential from on MyProxy server. \n" " The precondition of this choice is the credential is PUT onto\n" " the MyProxy server without a passphrase by using -R (--" "retrievable_by_cert) \n" " option when being PUTing onto Myproxy server. \n" " This option is specific for the GET command when contacting " "Myproxy server." msgstr "" #: src/clients/credentials/arcproxy.cpp:416 #: src/clients/credentials/arcproxyalt.cpp:412 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific for the PUT command when contacting " "Myproxy server." msgstr "" #: src/clients/credentials/arcproxy.cpp:422 #: src/clients/credentials/arcproxyalt.cpp:418 #, fuzzy msgid "hostname[:port] of MyProxy server" msgstr "Nutzername bei myproxy Server" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server; \n" " GET -- get a delegated credentials from the MyProxy server; \n" " INFO -- get and present information about credentials stored " "at the MyProxy server; \n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server; \n" " DESTROY -- wipe off credentials stored at the MyProxy " "server; \n" " Local credentials (certificate and key) are not necessary " "except in case of PUT. \n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:442 msgid "" "use NSS credential database in default Mozilla profiles, \n" " including Firefox, Seamonkey and Thunderbird.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:447 #: src/clients/credentials/arcproxyalt.cpp:432 #, fuzzy msgid "proxy constraints" msgstr "Proxy constraints" #: src/clients/credentials/arcproxy.cpp:451 msgid "password destination=password source" msgstr "" #: src/clients/credentials/arcproxy.cpp:500 #: src/clients/credentials/arcproxy.cpp:1161 #: src/clients/credentials/arcproxyalt.cpp:513 #: src/clients/credentials/arcproxyalt.cpp:556 #, fuzzy msgid "Failed configuration initialization." msgstr "Fehler bei Initialisierung der Konfiguration" #: src/clients/credentials/arcproxy.cpp:518 #: src/clients/credentials/arcproxyalt.cpp:563 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" #: src/clients/credentials/arcproxy.cpp:519 #: src/clients/credentials/arcproxy.cpp:531 #: src/clients/credentials/arcproxyalt.cpp:564 #: src/clients/credentials/arcproxyalt.cpp:574 msgid "You may try to increase verbosity to get more information." msgstr "" #: src/clients/credentials/arcproxy.cpp:527 #: src/clients/credentials/arcproxyalt.cpp:570 #, fuzzy msgid "Failed to find CA certificates" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/clients/credentials/arcproxy.cpp:528 #: src/clients/credentials/arcproxyalt.cpp:571 #, fuzzy msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" "Kann den Pfad zum CA Zertifikat-Verzeichnis nicht ermitteln. Bitte setzen " "Sie die Umgebungsvariable X509_CERT_DIR oder den Eintrag zu " "cacertificatesdirectory in der Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:532 #: src/clients/credentials/arcproxyalt.cpp:575 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" #: src/clients/credentials/arcproxy.cpp:544 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxy.cpp:589 #: src/clients/credentials/arcproxyalt.cpp:604 src/clients/echo/arcecho.cpp:84 msgid "Wrong number of arguments!" msgstr "" #: src/clients/credentials/arcproxy.cpp:597 #: src/clients/credentials/arcproxy.cpp:618 #: src/clients/credentials/arcproxyalt.cpp:612 #: src/clients/credentials/arcproxyalt.cpp:632 #, fuzzy msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" "Kann den Pfad zum CA Zertifikat-Verzeichnis nicht ermitteln. Bitte setzen " "Sie die Umgebungsvariable X509_CERT_DIR oder den Eintrag zu " "cacertificatesdirectory in der Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:604 #: src/clients/credentials/arcproxyalt.cpp:621 #, fuzzy, c-format msgid "Cannot remove proxy file at %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/credentials/arcproxy.cpp:606 #: src/clients/credentials/arcproxyalt.cpp:617 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "" #: src/clients/credentials/arcproxy.cpp:624 #: src/clients/credentials/arcproxyalt.cpp:638 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" "Kann Datei nicht bei %s finden, um den Proxy zu erhalten. Bitte stellen Sie " "sicher, dass diese Datei existiert." #: src/clients/credentials/arcproxy.cpp:630 #: src/clients/credentials/arcproxyalt.cpp:651 #, c-format msgid "Subject: %s" msgstr "Subjekt: %s" #: src/clients/credentials/arcproxy.cpp:631 #: src/clients/credentials/arcproxyalt.cpp:652 #, fuzzy, c-format msgid "Issuer: %s" msgstr "Anfrage: %s" #: src/clients/credentials/arcproxy.cpp:632 #: src/clients/credentials/arcproxyalt.cpp:653 #, c-format msgid "Identity: %s" msgstr "Identität: %s" #: src/clients/credentials/arcproxy.cpp:634 #: src/clients/credentials/arcproxyalt.cpp:657 #, fuzzy msgid "Time left for proxy: Proxy expired" msgstr "Zeit verbleibend für Proxy: Proxy abgelaufen" #: src/clients/credentials/arcproxy.cpp:636 #: src/clients/credentials/arcproxyalt.cpp:659 #, fuzzy msgid "Time left for proxy: Proxy not valid yet" msgstr "Zeit verbleibend für Proxy: Proxy ungültig" #: src/clients/credentials/arcproxy.cpp:638 #: src/clients/credentials/arcproxyalt.cpp:661 #, fuzzy, c-format msgid "Time left for proxy: %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/clients/credentials/arcproxy.cpp:639 #: src/clients/credentials/arcproxyalt.cpp:663 #, c-format msgid "Proxy path: %s" msgstr "Proxy Pfad: %s" #: src/clients/credentials/arcproxy.cpp:640 #, c-format msgid "Proxy type: %s" msgstr "Proxy Typ: %s" #: src/clients/credentials/arcproxy.cpp:641 #, fuzzy, c-format msgid "Proxy key length: %i" msgstr "Proxy Pfad: %s" #: src/clients/credentials/arcproxy.cpp:642 #, fuzzy, c-format msgid "Proxy signature: %s" msgstr "Nach Signatur: %s" #: src/clients/credentials/arcproxy.cpp:651 #: src/clients/credentials/arcproxyalt.cpp:675 #, fuzzy msgid "AC extension information for VO " msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/credentials/arcproxy.cpp:654 #: src/clients/credentials/arcproxyalt.cpp:678 msgid "Error detected while parsing this AC" msgstr "" #: src/clients/credentials/arcproxy.cpp:667 #: src/clients/credentials/arcproxyalt.cpp:691 msgid "AC is invalid: " msgstr "" #: src/clients/credentials/arcproxy.cpp:720 #: src/clients/credentials/arcproxyalt.cpp:732 #, fuzzy msgid "Time left for AC: AC is not valid yet" msgstr "Zeit verbleibend für Proxy: Proxy ungültig" #: src/clients/credentials/arcproxy.cpp:722 #: src/clients/credentials/arcproxyalt.cpp:734 #, fuzzy msgid "Time left for AC: AC has expired" msgstr "Zeit verbleibend für Proxy: Proxy abgelaufen" #: src/clients/credentials/arcproxy.cpp:724 #: src/clients/credentials/arcproxyalt.cpp:736 #, fuzzy, c-format msgid "Time left for AC: %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/clients/credentials/arcproxy.cpp:815 #, c-format msgid "Information item '%s' is not known" msgstr "" #: src/clients/credentials/arcproxy.cpp:824 #: src/clients/credentials/arcproxyalt.cpp:746 #, fuzzy msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" "Kann den Pfad zum CA Zertifikat-Verzeichnis nicht ermitteln. Bitte setzen " "Sie die Umgebungsvariable X509_CERT_DIR oder den Eintrag zu " "cacertificatesdirectory in der Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:828 #: src/clients/credentials/arcproxyalt.cpp:750 #, fuzzy msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" "Kann den Pfad zum CA Zertifikat-Verzeichnis nicht ermitteln. Bitte setzen " "Sie die Umgebungsvariable X509_CERT_DIR oder den Eintrag zu " "cacertificatesdirectory in der Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:852 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" #: src/clients/credentials/arcproxy.cpp:869 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" #: src/clients/credentials/arcproxy.cpp:884 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int,stdin,stream,file." msgstr "" #: src/clients/credentials/arcproxy.cpp:898 msgid "Only standard input is currently supported for password source." msgstr "" #: src/clients/credentials/arcproxy.cpp:903 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int,stdin," "stream,file." msgstr "" #: src/clients/credentials/arcproxy.cpp:942 #: src/clients/credentials/arcproxyalt.cpp:782 msgid "The start, end and period can't be set simultaneously" msgstr "" #: src/clients/credentials/arcproxy.cpp:948 #: src/clients/credentials/arcproxyalt.cpp:788 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:955 #: src/clients/credentials/arcproxyalt.cpp:795 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:962 #: src/clients/credentials/arcproxyalt.cpp:802 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:971 #: src/clients/credentials/arcproxyalt.cpp:811 #, c-format msgid "The end time that you set: %s is before start time:%s." msgstr "" #: src/clients/credentials/arcproxy.cpp:982 #: src/clients/credentials/arcproxyalt.cpp:822 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:985 #: src/clients/credentials/arcproxyalt.cpp:825 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:995 #: src/clients/credentials/arcproxyalt.cpp:835 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1013 #: src/clients/credentials/arcproxyalt.cpp:853 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1028 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1042 #: src/clients/credentials/arcproxyalt.cpp:476 #: src/hed/libs/credential/ARCProxyUtil.cpp:1303 msgid "The NSS database can not be detected in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxy.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:1311 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" #: src/clients/credentials/arcproxy.cpp:1053 #: src/hed/libs/credential/ARCProxyUtil.cpp:1315 #, c-format msgid "Number %d is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1055 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:1071 #: src/clients/credentials/arcproxyalt.cpp:482 #: src/hed/libs/credential/ARCProxyUtil.cpp:1329 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:1142 #: src/hed/libs/credential/ARCProxyUtil.cpp:1503 #, fuzzy, c-format msgid "Certificate to use is: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/clients/credentials/arcproxy.cpp:1190 #: src/clients/credentials/arcproxy.cpp:1306 #: src/clients/credentials/arcproxyalt.cpp:539 #: src/clients/credentials/arcproxyalt.cpp:955 #: src/hed/libs/credential/ARCProxyUtil.cpp:1560 msgid "Proxy generation succeeded" msgstr "Proxy erfolgreich angelegt" #: src/clients/credentials/arcproxy.cpp:1191 #: src/clients/credentials/arcproxy.cpp:1307 #: src/clients/credentials/arcproxyalt.cpp:540 #: src/clients/credentials/arcproxyalt.cpp:956 #: src/hed/libs/credential/ARCProxyUtil.cpp:1561 #, c-format msgid "Your proxy is valid until: %s" msgstr "Ihr Proxy ist gültig bis: %s" #: src/clients/credentials/arcproxy.cpp:1210 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" #: src/clients/credentials/arcproxy.cpp:1229 src/hed/mcc/tls/MCCTLS.cpp:167 #: src/hed/mcc/tls/MCCTLS.cpp:200 src/hed/mcc/tls/MCCTLS.cpp:226 msgid "VOMS attribute parsing failed" msgstr "Konnte VOMS Attribut nicht herauslesen" #: src/clients/credentials/arcproxy.cpp:1231 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxyalt.cpp:892 #: src/hed/libs/credential/ARCProxyUtil.cpp:341 msgid "Proxy generation failed: No valid certificate found." msgstr "" #: src/clients/credentials/arcproxy.cpp:1258 #: src/clients/credentials/arcproxyalt.cpp:899 #: src/hed/libs/credential/ARCProxyUtil.cpp:348 msgid "Proxy generation failed: No valid private key found." msgstr "" #: src/clients/credentials/arcproxy.cpp:1263 #: src/clients/credentials/arcproxyalt.cpp:902 #: src/hed/libs/credential/ARCProxyUtil.cpp:173 #, c-format msgid "Your identity: %s" msgstr "Ihre Identität: %s" #: src/clients/credentials/arcproxy.cpp:1265 #: src/clients/credentials/arcproxyalt.cpp:907 #: src/hed/libs/credential/ARCProxyUtil.cpp:356 msgid "Proxy generation failed: Certificate has expired." msgstr "" #: src/clients/credentials/arcproxy.cpp:1269 #: src/clients/credentials/arcproxyalt.cpp:911 #: src/hed/libs/credential/ARCProxyUtil.cpp:361 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "" #: src/clients/credentials/arcproxy.cpp:1280 #, fuzzy msgid "Proxy generation failed: Failed to create temporary file." msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/clients/credentials/arcproxy.cpp:1288 msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:100 #: src/clients/credentials/arcproxyalt.cpp:1312 #: src/hed/libs/credential/ARCProxyUtil.cpp:844 msgid "Succeeded to get info from MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:144 #: src/clients/credentials/arcproxyalt.cpp:1368 #: src/hed/libs/credential/ARCProxyUtil.cpp:900 msgid "Succeeded to change password on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:185 #: src/clients/credentials/arcproxyalt.cpp:1417 #: src/hed/libs/credential/ARCProxyUtil.cpp:949 msgid "Succeeded to destroy credential on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:265 #: src/clients/credentials/arcproxyalt.cpp:1506 #: src/hed/libs/credential/ARCProxyUtil.cpp:1038 #, fuzzy, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "Zurückerhaltene Nachricht von myproxy Server: %s" #: src/clients/credentials/arcproxy_myproxy.cpp:318 #: src/clients/credentials/arcproxyalt.cpp:1565 #: src/hed/libs/credential/ARCProxyUtil.cpp:1097 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_proxy.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1222 #: src/hed/libs/credential/ARCProxyUtil.cpp:403 #: src/hed/libs/credential/ARCProxyUtil.cpp:1410 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:63 #, fuzzy msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/clients/credentials/arcproxy_voms.cpp:75 #, fuzzy, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/clients/credentials/arcproxy_voms.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:640 #, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #: src/clients/credentials/arcproxyalt.cpp:1061 #: src/clients/credentials/arcproxyalt.cpp:1063 #: src/hed/libs/credential/ARCProxyUtil.cpp:650 #: src/hed/libs/credential/ARCProxyUtil.cpp:652 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, fuzzy, c-format msgid "No valid response from VOMS server: %s" msgstr "Frühe Antwort vom Server" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:167 #, fuzzy, c-format msgid "Failed to parse VOMS command: %s" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:108 #: src/hed/libs/credential/ARCProxyUtil.cpp:258 #, fuzzy, c-format msgid "OpenSSL error -- %s" msgstr "OpenSSL Fehler -- %s" #: src/clients/credentials/arcproxyalt.cpp:109 #: src/hed/libs/credential/ARCProxyUtil.cpp:259 #, c-format msgid "Library : %s" msgstr "Bibliothek : %s" #: src/clients/credentials/arcproxyalt.cpp:110 #: src/hed/libs/credential/ARCProxyUtil.cpp:260 #, c-format msgid "Function : %s" msgstr "Funktion : %s" #: src/clients/credentials/arcproxyalt.cpp:111 #: src/hed/libs/credential/ARCProxyUtil.cpp:261 #, c-format msgid "Reason : %s" msgstr "Grund : %s" #: src/clients/credentials/arcproxyalt.cpp:167 #: src/hed/libs/credential/ARCProxyUtil.cpp:317 msgid "User interface error" msgstr "Benutzungsschnittstellenfehler" #: src/clients/credentials/arcproxyalt.cpp:173 #: src/hed/libs/credential/ARCProxyUtil.cpp:323 msgid "Aborted!" msgstr "Abbruch!" #: src/clients/credentials/arcproxyalt.cpp:319 #, fuzzy msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours for " "delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file" msgstr "" "Поддерживаемые ограничения:\n" " validityStart=время (например, 2008-05-29T10:20:30Z; если не указано, то " "начинается немедленно)\n" " validityEnd=время\n" " validityPeriod=время (например, 43200, или 12h, или 12H; если не указаны " "ни validityPeriod,\n" " ни validityEnd, то срок действия по умолчанию составляет 12 часов)\n" " vomsACvalidityPeriod=время (например, 43200, или 12h, или 12H; если не " "указано, то используется\n" " значение validityPeriod)\n" " proxyPolicy=содержимое норматива\n" " proxyPolicyFile=файл норматива" #: src/clients/credentials/arcproxyalt.cpp:388 #, fuzzy msgid "" "print all information about this proxy. \n" " In order to show the Identity (DN without CN as suffix for " "proxy) \n" " of the certificate, the 'trusted certdir' is needed." msgstr "" "вывести всю информацию о данной доверенности. \n" " Для вывода персональной информации (DN без CN как суффикс " "доверенности) \n" " из сертификата, необходим 'trusted certdir'." #: src/clients/credentials/arcproxyalt.cpp:398 #, fuzzy msgid "username to MyProxy server" msgstr "Nutzername bei myproxy Server" #: src/clients/credentials/arcproxyalt.cpp:422 #, fuzzy msgid "" "command to MyProxy server. The command can be PUT or GET.\n" " PUT/put/Put -- put a delegated credential to the MyProxy " "server; \n" " GET/get/Get -- get a delegated credential from the MyProxy " "server, \n" " credential (certificate and key) is not needed in this case. \n" " MyProxy functionality can be used together with VOMS\n" " functionality.\n" msgstr "" "инструкция серверу MyProxy. Возможны две инструкции: PUT и GET:\n" " PUT/put -- сохранить делегированный сертификат на сервере " "MyProxy;\n" " GET/get -- получить делегированный сертификат с сервера " "MyProxy,\n" " в этом случае не требуются личные сертификаты и " "ключи.\n" " Инструкции MyProxy и VOMS могут использоваться одновременно.\n" #: src/clients/credentials/arcproxyalt.cpp:437 msgid "use NSS credential database in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1040 #: src/hed/libs/credential/ARCProxyUtil.cpp:629 #, fuzzy, c-format msgid "There are %d commands to the same VOMS server %s" msgstr "Es gibt %d Kommandos an denselben VOMS Server %s\n" #: src/clients/credentials/arcproxyalt.cpp:1094 #: src/hed/libs/credential/ARCProxyUtil.cpp:683 #, c-format msgid "Try to get attribute from VOMS server with order: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1097 #: src/hed/libs/credential/ARCProxyUtil.cpp:686 #, fuzzy, c-format msgid "Message sent to VOMS server %s is: %s" msgstr "Warnung: kann nicht verbinden zu RLS server %s: %s" #: src/clients/credentials/arcproxyalt.cpp:1116 #: src/hed/libs/credential/ARCProxyUtil.cpp:705 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1120 #: src/hed/libs/credential/ARCProxyUtil.cpp:709 #, fuzzy msgid "No HTTP response from VOMS server" msgstr "Frühe Antwort vom Server" #: src/clients/credentials/arcproxyalt.cpp:1125 #: src/clients/credentials/arcproxyalt.cpp:1151 #: src/hed/libs/credential/ARCProxyUtil.cpp:714 #: src/hed/libs/credential/ARCProxyUtil.cpp:740 #, fuzzy, c-format msgid "Returned message from VOMS server: %s" msgstr "Zurückerhaltene Nachricht von myproxy Server: %s" #: src/clients/credentials/arcproxyalt.cpp:1137 #: src/hed/libs/credential/ARCProxyUtil.cpp:726 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\"\n" "can not be reached, please make sure it is available" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1141 #: src/hed/libs/credential/ARCProxyUtil.cpp:730 #, fuzzy msgid "No stream response from VOMS server" msgstr "Frühe Antwort vom Server" #: src/clients/credentials/arcproxyalt.cpp:1163 #: src/hed/libs/credential/ARCProxyUtil.cpp:752 #, c-format msgid "" "The validity duration of VOMS AC is shortened from %s to %s, due to the " "validity constraint on voms server side.\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1166 #: src/hed/libs/credential/ARCProxyUtil.cpp:755 #, c-format msgid "" "Cannot get any AC or attributes info from VOMS server: %s;\n" " Returned message from VOMS server: %s\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1171 #: src/hed/libs/credential/ARCProxyUtil.cpp:760 #, fuzzy, c-format msgid "Returned message from VOMS server %s is: %s\n" msgstr "Zurückerhaltene Nachricht von myproxy Server: %s" #: src/clients/credentials/arcproxyalt.cpp:1193 #: src/hed/libs/credential/ARCProxyUtil.cpp:782 #, c-format msgid "The attribute information from VOMS server: %s is list as following:" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1205 #: src/hed/libs/credential/ARCProxyUtil.cpp:794 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message. But proxy without " "VOMS AC extension will still be generated." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1220 #, fuzzy, c-format msgid "Failed to add extension: %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/clients/credentials/arcproxyalt.cpp:1238 #: src/hed/libs/credential/ARCProxyUtil.cpp:443 #: src/hed/libs/credential/Credential.cpp:884 #, c-format msgid "Error: can't open policy file: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1248 #: src/hed/libs/credential/ARCProxyUtil.cpp:453 #: src/hed/libs/credential/Credential.cpp:897 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1600 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specify the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Cannot find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, " "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1640 #: src/hed/libs/credential/ARCProxyUtil.cpp:552 #, c-format msgid "VOMS line contains wrong number of tokens (%u expected): \"%s\"" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1684 #: src/hed/libs/credential/ARCProxyUtil.cpp:596 #, c-format msgid "Cannot get VOMS server %s information from the vomses files" msgstr "" #: src/clients/credentials/test2myproxyserver_get.cpp:89 #: src/clients/credentials/test2myproxyserver_get.cpp:131 #: src/clients/credentials/test2myproxyserver_put.cpp:88 #: src/clients/credentials/test2myproxyserver_put.cpp:182 #: src/clients/credentials/test2vomsserver.cpp:101 msgid "No stream response" msgstr "" #: src/clients/credentials/test2myproxyserver_get.cpp:104 #: src/clients/credentials/test2myproxyserver_get.cpp:143 #: src/clients/credentials/test2myproxyserver_get.cpp:190 #: src/clients/credentials/test2myproxyserver_put.cpp:103 #: src/clients/credentials/test2myproxyserver_put.cpp:116 #: src/clients/credentials/test2myproxyserver_put.cpp:194 #, c-format msgid "Returned msg from myproxy server: %s %d" msgstr "" #: src/clients/credentials/test2myproxyserver_get.cpp:149 #, c-format msgid "There are %d certificates in the returned msg" msgstr "Es sind %d Zertifikate in der zurückgelieferten Nachricht." #: src/clients/credentials/test2myproxyserver_put.cpp:135 msgid "Delegate proxy failed" msgstr "" #: src/clients/credentials/test2vomsserver.cpp:116 #, c-format msgid "Returned msg from voms server: %s " msgstr "" #: src/clients/data/arccp.cpp:77 src/clients/data/arccp.cpp:330 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:426 #, c-format msgid "Current transfer FAILED: %s" msgstr "Aktueller Transfer SCHLUG FEHL: %s" #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:135 #: src/clients/data/arccp.cpp:332 src/clients/data/arcls.cpp:224 #: src/clients/data/arcmkdir.cpp:73 src/clients/data/arcrename.cpp:89 #: src/clients/data/arcrm.cpp:95 msgid "This seems like a temporary error, please try again later" msgstr "" "Dies scheint ein vorübergehender Fehler zu sein, bitte später nochmal " "probieren" #: src/clients/data/arccp.cpp:87 src/clients/data/arccp.cpp:96 #, fuzzy, c-format msgid "Unable to copy %s" msgstr "Konnter Broker %s nicht laden" #: src/clients/data/arccp.cpp:88 src/clients/data/arccp.cpp:97 #: src/clients/data/arcls.cpp:150 src/clients/data/arcls.cpp:159 #: src/clients/data/arcmkdir.cpp:55 src/clients/data/arcmkdir.cpp:64 #: src/clients/data/arcrename.cpp:67 src/clients/data/arcrename.cpp:76 #: src/clients/data/arcrm.cpp:68 src/clients/data/arcrm.cpp:80 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" #: src/clients/data/arccp.cpp:94 src/clients/data/arcls.cpp:156 #: src/clients/data/arcmkdir.cpp:61 src/clients/data/arcrename.cpp:73 #: src/clients/data/arcrm.cpp:77 #, fuzzy msgid "Proxy expired" msgstr "Proxy store:" #: src/clients/data/arccp.cpp:112 src/clients/data/arccp.cpp:116 #: src/clients/data/arccp.cpp:149 src/clients/data/arccp.cpp:153 #: src/clients/data/arccp.cpp:358 src/clients/data/arccp.cpp:363 #: src/clients/data/arcls.cpp:123 src/clients/data/arcmkdir.cpp:28 #: src/clients/data/arcrename.cpp:29 src/clients/data/arcrename.cpp:33 #: src/clients/data/arcrm.cpp:36 #, c-format msgid "Invalid URL: %s" msgstr "Ungültige URL: %s" #: src/clients/data/arccp.cpp:128 msgid "Third party transfer is not supported for these endpoints" msgstr "" #: src/clients/data/arccp.cpp:130 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" #: src/clients/data/arccp.cpp:133 #, c-format msgid "Transfer FAILED: %s" msgstr "Transfer FEHLER: %s" #: src/clients/data/arccp.cpp:161 src/clients/data/arccp.cpp:187 #: src/clients/data/arccp.cpp:374 src/clients/data/arccp.cpp:402 #, c-format msgid "Can't read list of sources from file %s" msgstr "" #: src/clients/data/arccp.cpp:166 src/clients/data/arccp.cpp:202 #: src/clients/data/arccp.cpp:379 src/clients/data/arccp.cpp:418 #, c-format msgid "Can't read list of destinations from file %s" msgstr "" #: src/clients/data/arccp.cpp:171 src/clients/data/arccp.cpp:385 msgid "Numbers of sources and destinations do not match" msgstr "" #: src/clients/data/arccp.cpp:216 msgid "Fileset registration is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:222 src/clients/data/arccp.cpp:295 #: src/clients/data/arccp.cpp:456 #, c-format msgid "Unsupported source url: %s" msgstr "Nicht unterstützte URL für Quelle: %s" #: src/clients/data/arccp.cpp:226 src/clients/data/arccp.cpp:299 #, c-format msgid "Unsupported destination url: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/clients/data/arccp.cpp:233 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" #: src/clients/data/arccp.cpp:243 #, fuzzy, c-format msgid "Could not obtain information about source: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/data/arccp.cpp:250 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" #: src/clients/data/arccp.cpp:262 msgid "Failed to accept new file/destination" msgstr "" #: src/clients/data/arccp.cpp:268 src/clients/data/arccp.cpp:274 #, fuzzy, c-format msgid "Failed to register new file/destination: %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/clients/data/arccp.cpp:436 msgid "Fileset copy to single object is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:446 msgid "Can't extract object's name from source url" msgstr "" #: src/clients/data/arccp.cpp:465 #, fuzzy, c-format msgid "%s. Cannot copy fileset" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/data/arccp.cpp:475 src/hed/libs/compute/ExecutionTarget.cpp:254 #: src/hed/libs/compute/ExecutionTarget.cpp:326 #, c-format msgid "Name: %s" msgstr "Name %s" #: src/clients/data/arccp.cpp:478 #, c-format msgid "Source: %s" msgstr "Quelle: %s" #: src/clients/data/arccp.cpp:479 #, c-format msgid "Destination: %s" msgstr "Ziel: %s" #: src/clients/data/arccp.cpp:485 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:433 msgid "Current transfer complete" msgstr "Aktueller Transfer vollständig" #: src/clients/data/arccp.cpp:488 msgid "Some transfers failed" msgstr "Einige Transfers schlugen fehl" #: src/clients/data/arccp.cpp:498 #, c-format msgid "Directory: %s" msgstr "Verzeichnis: %s" #: src/clients/data/arccp.cpp:518 msgid "Transfer complete" msgstr "Transfer vollständig" #: src/clients/data/arccp.cpp:537 msgid "source destination" msgstr "Quelle Ziel" #: src/clients/data/arccp.cpp:538 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" "Mit arccp werden Dateien zu, von und zwischen grid storage Elementen kopiert." #: src/clients/data/arccp.cpp:543 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" #: src/clients/data/arccp.cpp:549 msgid "do not try to force passive transfer" msgstr "versuche nicht, passiven Transfer zu erzwigen" #: src/clients/data/arccp.cpp:554 msgid "" "if the destination is an indexing service and not the same as the source and " "the destination is already registered, then the copy is normally not done. " "However, if this option is specified the source is assumed to be a replica " "of the destination created in an uncontrolled way and the copy is done like " "in case of replication. Using this option also skips validation of completed " "transfers." msgstr "" #: src/clients/data/arccp.cpp:567 msgid "show progress indicator" msgstr "zeige Fortschrittsanzeige" #: src/clients/data/arccp.cpp:572 #, fuzzy msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" "transferiere Datei nicht, registriere sie nur - Zeil muss eine nicht-" "existierende Meta-URL sein" #: src/clients/data/arccp.cpp:578 msgid "use secure transfer (insecure by default)" msgstr "Nutze sicheren Transfer (unsicher ist Voreinstellung)" #: src/clients/data/arccp.cpp:583 msgid "path to local cache (use to put file into cache)" msgstr "" #: src/clients/data/arccp.cpp:588 src/clients/data/arcls.cpp:300 #, fuzzy msgid "operate recursively" msgstr "arbeite rekursiv bis zu einer festgelegten Tiefe" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:305 msgid "operate recursively up to specified level" msgstr "arbeite rekursiv bis zu einer festgelegten Tiefe" #: src/clients/data/arccp.cpp:594 src/clients/data/arcls.cpp:306 msgid "level" msgstr "Tiefe" #: src/clients/data/arccp.cpp:598 msgid "number of retries before failing file transfer" msgstr "Anzahl von Wiederholungen bis zu einem Abbruch der Dateiübertragung" #: src/clients/data/arccp.cpp:599 msgid "number" msgstr "Nummer" #: src/clients/data/arccp.cpp:603 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" #: src/clients/data/arccp.cpp:611 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" #: src/clients/data/arccp.cpp:617 src/clients/data/arcls.cpp:322 #: src/clients/data/arcmkdir.cpp:101 src/clients/data/arcrename.cpp:112 #: src/clients/data/arcrm.cpp:127 msgid "list the available plugins (protocols supported)" msgstr "" #: src/clients/data/arccp.cpp:656 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:141 src/clients/data/arcrename.cpp:152 #: src/clients/data/arcrm.cpp:168 msgid "Protocol plugins available:" msgstr "" #: src/clients/data/arccp.cpp:681 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:175 #: src/clients/data/arcrm.cpp:193 msgid "Wrong number of parameters specified" msgstr "Falsche Anzahl an Parametern übertragen" #: src/clients/data/arccp.cpp:686 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "" #: src/clients/data/arcls.cpp:129 src/clients/data/arcmkdir.cpp:34 #: src/clients/data/arcrm.cpp:43 #, c-format msgid "Can't read list of locations from file %s" msgstr "" #: src/clients/data/arcls.cpp:144 src/clients/data/arcmkdir.cpp:49 #: src/clients/data/arcrename.cpp:61 #, fuzzy msgid "Unsupported URL given" msgstr "Nicht-unterstützte URL angegeben" #: src/clients/data/arcls.cpp:149 src/clients/data/arcls.cpp:158 #, fuzzy, c-format msgid "Unable to list content of %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/clients/data/arcls.cpp:227 msgid "Warning: Failed listing files but some information is obtained" msgstr "" #: src/clients/data/arcls.cpp:281 src/clients/data/arcmkdir.cpp:90 msgid "url" msgstr "URL" #: src/clients/data/arcls.cpp:282 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" "Mit arcls werden Verzeichniss auf grid storage Elementen und Datei Index " "Katalogen angegeben" #: src/clients/data/arcls.cpp:291 msgid "show URLs of file locations" msgstr "zeige URLs von Datei-Lokalisationen" #: src/clients/data/arcls.cpp:295 msgid "display all available metadata" msgstr "zeige alle verfügbare Metadaten" #: src/clients/data/arcls.cpp:309 msgid "" "show only description of requested object, do not list content of directories" msgstr "" #: src/clients/data/arcls.cpp:313 msgid "treat requested object as directory and always try to list content" msgstr "" #: src/clients/data/arcls.cpp:317 msgid "check readability of object, does not show any information about object" msgstr "" #: src/clients/data/arcls.cpp:392 msgid "Incompatible options --nolist and --forcelist requested" msgstr "" #: src/clients/data/arcls.cpp:397 msgid "Requesting recursion and --nolist has no sense" msgstr "" #: src/clients/data/arcmkdir.cpp:54 src/clients/data/arcmkdir.cpp:63 #, fuzzy, c-format msgid "Unable to create directory %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/clients/data/arcmkdir.cpp:91 #, fuzzy msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" "Mit arcls werden Verzeichniss auf grid storage Elementen und Datei Index " "Katalogen angegeben" #: src/clients/data/arcmkdir.cpp:96 msgid "make parent directories as needed" msgstr "" #: src/clients/data/arcrename.cpp:41 msgid "Both URLs must have the same protocol, host and port" msgstr "" #: src/clients/data/arcrename.cpp:51 #, fuzzy msgid "Cannot rename to or from root directory" msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #: src/clients/data/arcrename.cpp:55 #, fuzzy msgid "Cannot rename to the same URL" msgstr "Kann doc Argument nicht anlegen" #: src/clients/data/arcrename.cpp:66 src/clients/data/arcrename.cpp:75 #, fuzzy, c-format msgid "Unable to rename %s" msgstr "Fehler bei Erstellen von GUID in RLS: %s" #: src/clients/data/arcrename.cpp:106 msgid "old_url new_url" msgstr "" #: src/clients/data/arcrename.cpp:107 #, fuzzy msgid "The arcrename command renames files on grid storage elements." msgstr "" "Mit arccp werden Dateien zu, von und zwischen grid storage Elementen kopiert." #: src/clients/data/arcrm.cpp:58 #, fuzzy, c-format msgid "Unsupported URL given: %s" msgstr "Nicht-unterstützte URL angegeben" #: src/clients/data/arcrm.cpp:67 src/clients/data/arcrm.cpp:79 #, fuzzy, c-format msgid "Unable to remove file %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/clients/data/arcrm.cpp:115 #, fuzzy msgid "url [url ...]" msgstr "[Cluster ...]" #: src/clients/data/arcrm.cpp:116 msgid "The arcrm command deletes files and on grid storage elements." msgstr "" #: src/clients/data/arcrm.cpp:121 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" #: src/clients/echo/arcecho.cpp:32 #, fuzzy msgid "service message" msgstr "Service Nachricht" #: src/clients/echo/arcecho.cpp:33 msgid "The arcecho command is a client for the ARC echo service." msgstr "" #: src/clients/echo/arcecho.cpp:35 msgid "" "The service argument is a URL to an ARC echo service.\n" "The message argument is the message the service should return." msgstr "" #: src/clients/echo/arcecho.cpp:105 src/hed/dmc/arc/DataPointARC.cpp:169 #: src/hed/dmc/arc/DataPointARC.cpp:222 src/hed/dmc/arc/DataPointARC.cpp:304 #: src/hed/dmc/arc/DataPointARC.cpp:415 src/hed/dmc/arc/DataPointARC.cpp:510 #: src/hed/dmc/arc/DataPointARC.cpp:574 src/hed/dmc/arc/DataPointARC.cpp:624 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:100 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:174 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:240 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, c-format msgid "" "Request:\n" "%s" msgstr "" "Anfrage:\n" "%s" #: src/clients/echo/arcecho.cpp:119 src/hed/dmc/arc/DataPointARC.cpp:182 #: src/hed/dmc/arc/DataPointARC.cpp:235 src/hed/dmc/arc/DataPointARC.cpp:320 #: src/hed/dmc/arc/DataPointARC.cpp:431 src/hed/dmc/arc/DataPointARC.cpp:524 #: src/hed/dmc/arc/DataPointARC.cpp:587 src/hed/dmc/arc/DataPointARC.cpp:638 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "Keine SOAP Antwort" #: src/clients/echo/arcecho.cpp:124 src/hed/acc/UNICORE/UNICOREClient.cpp:531 #: src/hed/dmc/arc/DataPointARC.cpp:187 src/hed/dmc/arc/DataPointARC.cpp:240 #: src/hed/dmc/arc/DataPointARC.cpp:325 src/hed/dmc/arc/DataPointARC.cpp:436 #: src/hed/dmc/arc/DataPointARC.cpp:529 src/hed/dmc/arc/DataPointARC.cpp:592 #: src/hed/dmc/arc/DataPointARC.cpp:643 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:119 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:193 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:267 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:346 #, c-format msgid "" "Response:\n" "%s" msgstr "" "Antwort:\n" "%s" #: src/clients/saml/saml_assertion_init.cpp:43 msgid "service_url" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:47 msgid "path to config file" msgstr "Pfad zu Konfigurationsdatei" #: src/clients/saml/saml_assertion_init.cpp:140 msgid "SOAP Request failed: No response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:144 msgid "SOAP Request failed: Error" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:150 msgid "No in SOAP response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:156 msgid "No in SAML response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:168 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:321 msgid "Succeeded to verify the signature under " msgstr "Erfolgreiche Überprüfung der Signatur unter " #: src/clients/saml/saml_assertion_init.cpp:171 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:324 msgid "Failed to verify the signature under " msgstr "Fehler bei der Überprüfung der Signatur unter " #: src/clients/wsrf/arcwsrf.cpp:39 msgid "URL [query]" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:40 msgid "" "The arcwsrf command is used for obtaining the WS-ResourceProperties of\n" "services." msgstr "" #: src/clients/wsrf/arcwsrf.cpp:46 msgid "Request for specific Resource Property" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:47 msgid "[-]name" msgstr "[-]Name" #: src/clients/wsrf/arcwsrf.cpp:80 msgid "Missing URL" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:85 msgid "Too many parameters" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:123 #, fuzzy msgid "Query is not a valid XML" msgstr "" "Указанный URL\n" " %1 \n" " содержит ошибки." #: src/clients/wsrf/arcwsrf.cpp:138 msgid "Failed to create WSRP request" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:145 msgid "Specified URL is not valid" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:157 msgid "Failed to send request" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:161 msgid "Failed to obtain SOAP response" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:167 msgid "SOAP fault received" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:142 #, c-format msgid "Connect: Failed to init handle: %s" msgstr "Connect: Konnte init handle nicht initialisieren: %s" #: src/hed/acc/ARC0/FTPControl.cpp:148 #, fuzzy, c-format msgid "Failed to enable IPv6: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/acc/ARC0/FTPControl.cpp:158 src/hed/acc/ARC0/FTPControl.cpp:172 #, c-format msgid "Connect: Failed to connect: %s" msgstr "Connect: Verbindung zu %s schlug fehl" #: src/hed/acc/ARC0/FTPControl.cpp:165 #, c-format msgid "Connect: Connecting timed out after %d ms" msgstr "Connect: Zeitüberschreitung der Verbindung nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:185 #, c-format msgid "Connect: Failed to init auth info handle: %s" msgstr "Connect: Konnte auth info handle nicht initialisieren: %s" #: src/hed/acc/ARC0/FTPControl.cpp:196 src/hed/acc/ARC0/FTPControl.cpp:210 #, c-format msgid "Connect: Failed authentication: %s" msgstr "Connect: Authentikation fehlgeschlagen: %s" #: src/hed/acc/ARC0/FTPControl.cpp:203 #, c-format msgid "Connect: Authentication timed out after %d ms" msgstr "Connect: Zeitüberschreitung der Authentikation nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:224 src/hed/acc/ARC0/FTPControl.cpp:256 #, fuzzy, c-format msgid "SendCommand: Command: %s" msgstr "SendCommand: Fehler: %s" #: src/hed/acc/ARC0/FTPControl.cpp:229 src/hed/acc/ARC0/FTPControl.cpp:240 #: src/hed/acc/ARC0/FTPControl.cpp:260 src/hed/acc/ARC0/FTPControl.cpp:271 #, c-format msgid "SendCommand: Failed: %s" msgstr "SendCommand: Fehler: %s" #: src/hed/acc/ARC0/FTPControl.cpp:235 src/hed/acc/ARC0/FTPControl.cpp:266 #, c-format msgid "SendCommand: Timed out after %d ms" msgstr "SendCommand: Zeitüberschreitung nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:243 src/hed/acc/ARC0/FTPControl.cpp:276 #, fuzzy, c-format msgid "SendCommand: Response: %s" msgstr "SendCommand: Fehler: %s" #: src/hed/acc/ARC0/FTPControl.cpp:293 #, fuzzy msgid "SendData: Failed sending EPSV and PASV commands" msgstr "SendData: Fehler bei Senden von PASV Kommando" #: src/hed/acc/ARC0/FTPControl.cpp:298 src/hed/acc/ARC0/FTPControl.cpp:304 #: src/hed/acc/ARC0/FTPControl.cpp:320 #, fuzzy, c-format msgid "SendData: Server PASV response parsing failed: %s" msgstr "SendData: Server PASV Antwort konnte nicht geparst werden: %s" #: src/hed/acc/ARC0/FTPControl.cpp:330 src/hed/acc/ARC0/FTPControl.cpp:336 #: src/hed/acc/ARC0/FTPControl.cpp:343 src/hed/acc/ARC0/FTPControl.cpp:350 #, fuzzy, c-format msgid "SendData: Server EPSV response parsing failed: %s" msgstr "SendData: Server PASV Antwort konnte nicht geparst werden: %s" #: src/hed/acc/ARC0/FTPControl.cpp:357 #, fuzzy, c-format msgid "SendData: Server EPSV response port parsing failed: %s" msgstr "SendData: Server PASV Antwort konnte nicht geparst werden: %s" #: src/hed/acc/ARC0/FTPControl.cpp:366 #, c-format msgid "SendData: Failed to apply local address to data connection: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:372 #, fuzzy, c-format msgid "SendData: Can't parse host and/or port in response to EPSV/PASV: %s" msgstr "Kann host and port nicht aus Antwort zu PASV herauslesen" #: src/hed/acc/ARC0/FTPControl.cpp:377 #, fuzzy, c-format msgid "SendData: Data channel: %d.%d.%d.%d:%d" msgstr "Datenkanal: %d.%d.%d.%d %d" #: src/hed/acc/ARC0/FTPControl.cpp:393 #, fuzzy, c-format msgid "SendData: Data channel: [%s]:%d" msgstr "SendData: Fehler bei Datenverbindung zum Schreiben: %s" #: src/hed/acc/ARC0/FTPControl.cpp:398 #, c-format msgid "SendData: Local port failed: %s" msgstr "SendData: Lokaler port schlug fehl: %s" #: src/hed/acc/ARC0/FTPControl.cpp:422 msgid "SendData: Failed sending DCAU command" msgstr "SendData: Fehler bei Senden von DCAU Kommando" #: src/hed/acc/ARC0/FTPControl.cpp:427 msgid "SendData: Failed sending TYPE command" msgstr "SendData: Fehler bei Senden von TYPE Kommando" #: src/hed/acc/ARC0/FTPControl.cpp:436 #, c-format msgid "SendData: Local type failed: %s" msgstr "SendData: Lokaler type schlug fehl: %s" #: src/hed/acc/ARC0/FTPControl.cpp:446 #, c-format msgid "SendData: Failed sending STOR command: %s" msgstr "SendData: Fehler bei Senden von STOR Kommando: %s" #: src/hed/acc/ARC0/FTPControl.cpp:454 src/hed/acc/ARC0/FTPControl.cpp:475 #, c-format msgid "SendData: Data connect write failed: %s" msgstr "SendData: Fehler bei Datenverbindung zum Schreiben: %s" #: src/hed/acc/ARC0/FTPControl.cpp:461 src/hed/acc/ARC0/FTPControl.cpp:469 #, c-format msgid "SendData: Data connect write timed out after %d ms" msgstr "" "SendData: Zeitüberschreitung bei Datenverbindung zum Schreiben nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:487 src/hed/acc/ARC0/FTPControl.cpp:507 #, c-format msgid "SendData: Data write failed: %s" msgstr "SendData: Schreiben von Daten schlug fehl: %s" #: src/hed/acc/ARC0/FTPControl.cpp:493 src/hed/acc/ARC0/FTPControl.cpp:501 #, c-format msgid "SendData: Data write timed out after %d ms" msgstr "SendData: Zeitüberschreitung beim Schreiben nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:527 src/hed/acc/ARC0/FTPControl.cpp:538 #, fuzzy, c-format msgid "Disconnect: Failed aborting - ignoring: %s" msgstr "Disconnect: Fehler beim Schließen der Verbindung - ignoriert: %s" #: src/hed/acc/ARC0/FTPControl.cpp:530 #, fuzzy, c-format msgid "Disconnect: Data close timed out after %d ms" msgstr "Disconnect: Zeitüberschreitung vom Schließen nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:541 #, fuzzy, c-format msgid "Disconnect: Abort timed out after %d ms" msgstr "Disconnect: Zeitüberschreitung vom Schließen nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:549 #, fuzzy, c-format msgid "Disconnect: Failed quitting - ignoring: %s" msgstr "Disconnect: Fehler beim Schließen der Verbindung - ignoriert: %s" #: src/hed/acc/ARC0/FTPControl.cpp:552 #, c-format msgid "Disconnect: Quitting timed out after %d ms" msgstr "Disconnect: Zeitüberschreitung beim Verlassen nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:561 #, c-format msgid "Disconnect: Failed closing - ignoring: %s" msgstr "Disconnect: Fehler beim Schließen der Verbindung - ignoriert: %s" #: src/hed/acc/ARC0/FTPControl.cpp:567 #, c-format msgid "Disconnect: Closing timed out after %d ms" msgstr "Disconnect: Zeitüberschreitung vom Schließen nach %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:582 #, fuzzy msgid "Disconnect: waiting for globus handle to settle" msgstr "Disconnect: Konnte handle nicht freigeben: %s" #: src/hed/acc/ARC0/FTPControl.cpp:596 #, fuzzy msgid "Disconnect: globus handle is stuck." msgstr "Disconnect: Konnte handle nicht freigeben: %s" #: src/hed/acc/ARC0/FTPControl.cpp:604 #, fuzzy, c-format msgid "Disconnect: Failed destroying handle: %s. Can't handle such situation." msgstr "Disconnect: Konnte handle nicht freigeben: %s" #: src/hed/acc/ARC0/FTPControl.cpp:607 #, fuzzy msgid "Disconnect: handle destroyed." msgstr "Disconnect: Konnte handle nicht freigeben: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:47 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:44 #, fuzzy msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to " "developers." msgstr "" "Fehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im " "nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. Bitte " "die Entwickler informieren." #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:62 #, c-format msgid "Unable to query job information (%s), invalid URL provided (%s)" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:74 #, c-format msgid "Jobs left to query: %d" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:83 #, c-format msgid "Querying batch with %d jobs" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:99 #, fuzzy msgid "Can't create information handle - is the ARC LDAP DMC plugin available?" msgstr "" "Kann information handle nicht anlegen - ist das ARC LDAP DMC plugin " "verfügbar?" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:132 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:47 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:36 #, c-format msgid "Job information not found in the information system: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:134 msgid "" "This job was very recently submitted and might not yet have reached the " "information system" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:236 #, c-format msgid "Cleaning job: %s" msgstr "Aufräumen von Job: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:240 msgid "Failed to connect for job cleaning" msgstr "Konnte nicht verbinden, um Job aufzuräumen" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:252 msgid "Failed sending CWD command for job cleaning" msgstr "Konnte CWD Kommando nicht senden um Job aufzuräumen" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:259 msgid "Failed sending RMD command for job cleaning" msgstr "Konnte RMD Kommando nicht senden um Job aufzuräumen" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:266 msgid "Failed to disconnect after job cleaning" msgstr "Konnte Verbindung nicht trennen nach Aufräumen von Job" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:273 msgid "Job cleaning successful" msgstr "Job erfolgreich aufgeräumt." #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:284 #, fuzzy, c-format msgid "Cancelling job: %s" msgstr "Aufräumen von Job: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:288 #, fuzzy msgid "Failed to connect for job cancelling" msgstr "Konnte nicht verbinden, um Job aufzuräumen" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:300 msgid "Failed sending CWD command for job cancelling" msgstr "Fehler beim Senden von CWD für den Abbruch eines Jobs" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:307 msgid "Failed sending DELE command for job cancelling" msgstr "Fehler beim Senden von DELE für den Abbruch eines Jobs" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:314 msgid "Failed to disconnect after job cancelling" msgstr "Fehler beim Trennen der Verbindung nach Abbruch von Job" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:322 msgid "Job cancelling successful" msgstr "Job erfolgreich abgebrochen" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:333 #, c-format msgid "Renewing credentials for job: %s" msgstr "Erneuern der credentials für Job %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:337 msgid "Failed to connect for credential renewal" msgstr "Fehler beim Verbindungen für Erneuerung von credentials" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:349 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:356 msgid "Failed sending CWD command for credentials renewal" msgstr "Fehler beim Senden von CWD Kommando für Erneuerung von credentials" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:362 msgid "Failed to disconnect after credentials renewal" msgstr "Fehler bein Trennen der Verbindung nach Erneuerung der credentials" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:369 msgid "Renewal of credentials was successful" msgstr "Erneuerung der Credentials war erfolgreich" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:381 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:111 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:192 #, c-format msgid "Job %s does not report a resumable state" msgstr "Job %s berichtet nicht von einem resumable Zustand" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:391 #, fuzzy, c-format msgid "Illegal jobID specified (%s)" msgstr "Ungültige Job ID angegeben" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:398 #, c-format msgid "HER: %s" msgstr "HER: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:404 #, c-format msgid "Could not create temporary file: %s" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:437 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:131 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:210 msgid "Job resuming successful" msgstr "Job erfolgreich resumed." #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:473 #, fuzzy, c-format msgid "Trying to retrieve job description of %s from computing resource" msgstr "Versuche Job Beschreibung von %s von Cluster zu beziehen" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:478 #, fuzzy, c-format msgid "invalid jobID: %s" msgstr "ungültige Job ID: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:520 msgid "clientxrsl found" msgstr "clientxrsl gefunden" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:523 msgid "could not find start of clientxrsl" msgstr "konnte Start von clientxrsl nicht finden" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:528 msgid "could not find end of clientxrsl" msgstr "konnte Ende von clientxrsl nicht finden" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:541 #, c-format msgid "Job description: %s" msgstr "Job Beschreibung: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:544 msgid "clientxrsl not found" msgstr "clientxrsl nicht gefunden" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:550 #, fuzzy, c-format msgid "Invalid JobDescription: %s" msgstr "Ungültige JobDescription:" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:553 #, fuzzy msgid "Valid JobDescription found" msgstr "Gültige JobDescription gefunden" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:60 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:204 msgid "Submit: Failed to connect" msgstr "Submit: Verbindungsfehler" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:68 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:212 msgid "Submit: Failed sending CWD command" msgstr "Submit: Konnte CWD Kommmando nicht senden" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:79 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:223 msgid "Submit: Failed sending CWD new command" msgstr "Submit: Konnte CWD new Kommmando nicht senden" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:106 #, fuzzy msgid "Failed to prepare job description." msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:116 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:260 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:63 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:158 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:87 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:401 #, fuzzy, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:123 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:267 msgid "Submit: Failed sending job description" msgstr "Submit: Fehler bei Senden von Job Beschreibung" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:138 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:282 msgid "Submit: Failed uploading local input files" msgstr "Submit; Hochladen der lokalen Inputfiles schlug fehl" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:193 msgid "" "Submit: service has no suitable information interface - need org.nordugrid." "ldapng" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:250 #, fuzzy msgid "Failed to prepare job description to target resources." msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/ARC1/AREXClient.cpp:58 msgid "Creating an A-REX client" msgstr "Lege A-REX client an." #: src/hed/acc/ARC1/AREXClient.cpp:61 #, fuzzy msgid "Unable to create SOAP client used by AREXClient." msgstr "Konnte SOAP client nicht für AREXClient anlegen." #: src/hed/acc/ARC1/AREXClient.cpp:85 #, fuzzy msgid "Failed locating credentials." msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/acc/ARC1/AREXClient.cpp:94 #, fuzzy msgid "Failed initiate client connection." msgstr "Fehler bei Schließen von Verbindung 1" #: src/hed/acc/ARC1/AREXClient.cpp:102 msgid "Client connection has no entry point." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:113 src/hed/acc/EMIES/EMIESClient.cpp:130 #: src/hed/acc/UNICORE/UNICOREClient.cpp:191 #: src/hed/acc/UNICORE/UNICOREClient.cpp:222 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:505 #: src/services/a-rex/test.cpp:86 msgid "Initiating delegation procedure" msgstr "Initialisierung der Delegations-Prozedur" #: src/hed/acc/ARC1/AREXClient.cpp:115 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:507 msgid "Failed to initiate delegation credentials" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/acc/ARC1/AREXClient.cpp:128 #, fuzzy msgid "Re-creating an A-REX client" msgstr "Lege A-REX client an." #: src/hed/acc/ARC1/AREXClient.cpp:146 msgid "AREXClient was not created properly." msgstr "AREXClient wurde nicht richtig angelegt." #: src/hed/acc/ARC1/AREXClient.cpp:151 src/hed/acc/EMIES/EMIESClient.cpp:174 #, c-format msgid "Processing a %s request" msgstr "Verarbeite %s Anfrage" #: src/hed/acc/ARC1/AREXClient.cpp:173 src/hed/acc/CREAM/CREAMClient.cpp:134 #: src/hed/acc/EMIES/EMIESClient.cpp:180 #, c-format msgid "%s request failed" msgstr "Anfrage %s schlug fehl" #: src/hed/acc/ARC1/AREXClient.cpp:181 src/hed/acc/EMIES/EMIESClient.cpp:189 #, c-format msgid "No response from %s" msgstr "Keine Antwort von %s" #: src/hed/acc/ARC1/AREXClient.cpp:190 src/hed/acc/EMIES/EMIESClient.cpp:198 #, c-format msgid "%s request to %s failed with response: %s" msgstr "%s Anfrage an %s schlug fehl mit Antwort %s" #: src/hed/acc/ARC1/AREXClient.cpp:195 src/hed/acc/EMIES/EMIESClient.cpp:213 #, c-format msgid "XML response: %s" msgstr "XML Antwort: %s" #: src/hed/acc/ARC1/AREXClient.cpp:204 #, fuzzy, c-format msgid "%s request to %s failed. No expected response." msgstr "Anfrage %s an %s schlug fehl. Leere Anwort." #: src/hed/acc/ARC1/AREXClient.cpp:218 #, c-format msgid "Creating and sending submit request to %s" msgstr "Erstelle und sende submit Anfrage an %s" #: src/hed/acc/ARC1/AREXClient.cpp:234 src/hed/acc/ARC1/AREXClient.cpp:482 #: src/hed/acc/EMIES/EMIESClient.cpp:302 src/hed/acc/EMIES/EMIESClient.cpp:405 #: src/hed/acc/UNICORE/UNICOREClient.cpp:160 #, c-format msgid "Job description to be sent: %s" msgstr "Zu sendende Job-Beschreibung : %s" #: src/hed/acc/ARC1/AREXClient.cpp:248 src/hed/acc/EMIES/EMIESClient.cpp:491 #: src/hed/acc/EMIES/EMIESClient.cpp:525 src/hed/acc/EMIES/EMIESClient.cpp:581 #, fuzzy, c-format msgid "Creating and sending job information query request to %s" msgstr "Erstelle und sende job information query request an %s" #: src/hed/acc/ARC1/AREXClient.cpp:293 src/hed/acc/ARC1/AREXClient.cpp:336 #, fuzzy, c-format msgid "Unable to retrieve status of job (%s)" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/acc/ARC1/AREXClient.cpp:346 src/hed/acc/EMIES/EMIESClient.cpp:821 #, fuzzy, c-format msgid "Creating and sending service information query request to %s" msgstr "Erstelle und sende service information query request an %s" #: src/hed/acc/ARC1/AREXClient.cpp:366 #, fuzzy, c-format msgid "Creating and sending ISIS information query request to %s" msgstr "Erstelle und send ISIS information query request an %s" #: src/hed/acc/ARC1/AREXClient.cpp:383 #, fuzzy, c-format msgid "Service %s of type %s ignored" msgstr "Service %s des Typ %s wurde ignoriert" #: src/hed/acc/ARC1/AREXClient.cpp:386 msgid "No execution services registered in the index service" msgstr "Keine execution services in index service registriert" #: src/hed/acc/ARC1/AREXClient.cpp:392 #, fuzzy, c-format msgid "Creating and sending terminate request to %s" msgstr "Erstelle und sende terminate request an %s" #: src/hed/acc/ARC1/AREXClient.cpp:403 #: src/hed/acc/UNICORE/UNICOREClient.cpp:619 #: src/hed/acc/UNICORE/UNICOREClient.cpp:692 msgid "Job termination failed" msgstr "Beendigung des Jobs schlug fehl" #: src/hed/acc/ARC1/AREXClient.cpp:414 #, fuzzy, c-format msgid "Creating and sending clean request to %s" msgstr "Erstelle und sende clean request an %s" #: src/hed/acc/ARC1/AREXClient.cpp:444 #, fuzzy, c-format msgid "Creating and sending job description retrieval request to %s" msgstr "Erstelle und sende job description retrieval request an %s" #: src/hed/acc/ARC1/AREXClient.cpp:464 #, fuzzy, c-format msgid "Creating and sending job migrate request to %s" msgstr "Erstelle und sende job migrate request an %s" #: src/hed/acc/ARC1/AREXClient.cpp:498 src/hed/acc/EMIES/EMIESClient.cpp:932 #, fuzzy, c-format msgid "Creating and sending job resume request to %s" msgstr "Erstelle und sende job resume request an %s" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:100 #, fuzzy msgid "Renewal of ARC1 jobs is not supported" msgstr "Das Erneuern von ARC1 Jobs wird nicht unterstützt" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:117 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:197 #, fuzzy, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "Resuming Job: %s in Zustand: %s" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:183 #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:103 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:139 #, fuzzy, c-format msgid "Failed retrieving job description for job: %s" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:42 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:69 msgid "Failed retrieving job status information" msgstr "Konnte Job Status Information nicht beziehen." #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:52 #, fuzzy msgid "Cleaning of BES jobs is not supported" msgstr "Das Löschen von BES Jobs wird nicht unterstützt" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:78 #, fuzzy msgid "Renewal of BES jobs is not supported" msgstr "Das Erneuern von BES Jobs wird nicht unterstützt" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:86 #, fuzzy msgid "Resuming BES jobs is not supported" msgstr "Ein Resume von BES jobs wird nicht unterstützt" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:49 msgid "Collecting Job (A-REX jobs) information." msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:53 #, c-format msgid "Failed retrieving job IDs: Unsupported url (%s) given" msgstr "Konnte job IDs nicht bestimmen: Nicht unterstützte URL erhalten (%s)" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:61 msgid "Failed retrieving job IDs" msgstr "Konnt job IDs nicht erhalten." #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:64 msgid "" "Error encoutered during job ID retrieval. All job IDs might not have been " "retrieved" msgstr "" "Fehler beim Bestimmen der job ID. Womöglich wurde keine job ID erhalten." #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:53 #, fuzzy msgid "Failed to prepare job description" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:78 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:173 msgid "No job identifier returned by BES service" msgstr "Kein Job identifier von BES service zurückerhalten" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:99 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:194 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:310 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:77 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:169 msgid "Failed uploading local input files" msgstr "Konnte lokale Inputdateien nicht hochladen" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:148 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:53 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:136 #, fuzzy msgid "Failed to prepare job description to target resources" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:271 #, fuzzy msgid "Failed adapting job description to target resources" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:282 #, c-format msgid "" "Unable to migrate job. Job description is not valid in the %s format: %s" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:295 msgid "No job identifier returned by A-REX" msgstr "A-REX lieferte keinen Job Identifikator zurück" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:50 msgid "Querying WSRF GLUE2 computing info endpoint." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:95 #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "Der Service gibt seinen Typ nicht an." #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:100 msgid "The Service doesn't advertise its Quality Level." msgstr "Der Service gibt seinen Quality Level nicht an." #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:120 #, fuzzy, c-format msgid "Generating A-REX target: %s" msgstr "Generiere A-REX target: %s" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:145 #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:151 #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "Der Service gibt keinen Health State an." #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:178 msgid "The Service doesn't advertise its Interface." msgstr "Der Service gibt seine Interface nicht an." #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:210 msgid "The Service doesn't advertise its Serving State." msgstr "Der Servcice gibt seinen Serving State nicht an." #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:278 #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:279 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:220 #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:398 #: src/hed/libs/compute/GLUE2.cpp:417 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" "Konnte benchmark XML nicht parsen:\n" "%s" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:467 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:426 #, c-format msgid "Unable to parse the %s.%s value from execution service (%s)." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:468 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:427 #, c-format msgid "Value of %s.%s is \"%s\"" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" "Ziel %s entfernt durch FastestQueueBroker, die Anzahl wartender Jobs wird " "nicht genannt" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" "Ziel %s entfernt durch FastestQueueBroker, die Anzahl vorhandener slots wird " "nicht genannt" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" "Ziel %s entfernt durch FastestQueueBroker, die Anzahl freier slots wird " "nicht genannt" #: src/hed/acc/CREAM/CREAMClient.cpp:114 msgid "Creating a CREAM client" msgstr "Anlegen eines CREAM client" #: src/hed/acc/CREAM/CREAMClient.cpp:117 msgid "Unable to create SOAP client used by CREAMClient." msgstr "Konnte SOAP client nicht anlegen für CREAMClient." #: src/hed/acc/CREAM/CREAMClient.cpp:128 #, fuzzy msgid "CREAMClient not created properly" msgstr "CREAMClient nicht richtig angelegt" #: src/hed/acc/CREAM/CREAMClient.cpp:139 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:52 #: src/hed/acc/UNICORE/UNICOREClient.cpp:207 #: src/hed/acc/UNICORE/UNICOREClient.cpp:299 #: src/hed/acc/UNICORE/UNICOREClient.cpp:376 #: src/hed/acc/UNICORE/UNICOREClient.cpp:455 #: src/hed/acc/UNICORE/UNICOREClient.cpp:488 #: src/hed/acc/UNICORE/UNICOREClient.cpp:565 #: src/hed/acc/UNICORE/UNICOREClient.cpp:641 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:188 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:29 msgid "There was no SOAP response" msgstr "Keine SOAP response erhalten" #: src/hed/acc/CREAM/CREAMClient.cpp:148 src/hed/acc/CREAM/CREAMClient.cpp:353 #: src/hed/acc/CREAM/CREAMClient.cpp:374 src/hed/acc/CREAM/CREAMClient.cpp:395 #: src/hed/acc/CREAM/CREAMClient.cpp:414 src/hed/acc/CREAM/CREAMClient.cpp:465 #: src/hed/acc/CREAM/CREAMClient.cpp:494 #, fuzzy msgid "Empty response" msgstr "Leere Antwort" #: src/hed/acc/CREAM/CREAMClient.cpp:167 #, fuzzy, c-format msgid "Request failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/acc/CREAM/CREAMClient.cpp:175 src/hed/acc/CREAM/CREAMClient.cpp:428 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:35 #: src/hed/acc/UNICORE/UNICOREClient.cpp:359 msgid "Creating and sending a status request" msgstr "Erstellen und senden einer Status-Anfrage" #: src/hed/acc/CREAM/CREAMClient.cpp:200 #, fuzzy msgid "Unable to retrieve job status." msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/acc/CREAM/CREAMClient.cpp:340 #: src/hed/acc/UNICORE/UNICOREClient.cpp:549 #: src/hed/acc/UNICORE/UNICOREClient.cpp:628 msgid "Creating and sending request to terminate a job" msgstr "Erstellen und senden von Anfrage, einen Job zu beenden" #: src/hed/acc/CREAM/CREAMClient.cpp:361 msgid "Creating and sending request to clean a job" msgstr "Erstlelen und senden einer Anfragen einen Job zu löschen" #: src/hed/acc/CREAM/CREAMClient.cpp:382 #, fuzzy msgid "Creating and sending request to resume a job" msgstr "Erstellen und senden von Anfrage, einen Job zu beenden" #: src/hed/acc/CREAM/CREAMClient.cpp:403 #, fuzzy msgid "Creating and sending request to list jobs" msgstr "Erstlelen und senden einer Anfragen einen Job zu löschen" #: src/hed/acc/CREAM/CREAMClient.cpp:450 msgid "Creating and sending job register request" msgstr "Erstellen und senden einer Anfragen, eien Job zu registrieren" #: src/hed/acc/CREAM/CREAMClient.cpp:470 src/hed/acc/CREAM/CREAMClient.cpp:499 #, fuzzy msgid "No job ID in response" msgstr "Keine Job ID in Antwort" #: src/hed/acc/CREAM/CREAMClient.cpp:480 msgid "Creating and sending job start request" msgstr "Erstellen und senden einer Anfrage, einen Job zu starten" #: src/hed/acc/CREAM/CREAMClient.cpp:508 #, fuzzy msgid "Creating delegation" msgstr "Erstelle Delegation" #: src/hed/acc/CREAM/CREAMClient.cpp:520 msgid "Malformed response: missing getProxyReqReturn" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:531 #, fuzzy, c-format msgid "Delegatable credentials expired: %s" msgstr "" "Делегированные параметры доступа:\n" " %s" #: src/hed/acc/CREAM/CREAMClient.cpp:541 #, fuzzy msgid "Failed signing certificate request" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/acc/CREAM/CREAMClient.cpp:561 #, fuzzy msgid "Failed putting signed delegation certificate to service" msgstr "" "Der Transfer des signierten delegation certificate zu Service schlug fehl" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:52 #, fuzzy, c-format msgid "Failed cleaning job: %s" msgstr "Löschen fehlgeschlagen von job: %s" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:70 #, fuzzy, c-format msgid "Failed canceling job: %s" msgstr "Abbruch fehlgeschlagen von job: %s" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:84 #, fuzzy msgid "Renewal of CREAM jobs is not supported" msgstr "Erneuerung von CREAM jobs wird nicht unterstützt" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:98 #, fuzzy, c-format msgid "Failed resuming job: %s" msgstr "Löschen fehlgeschlagen von job: %s" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:40 #, fuzzy msgid "Failed creating signed delegation certificate" msgstr "Erstellen eines singed delegation certificate ist fehlgeschlagen" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:61 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:153 #: src/hed/acc/UNICORE/UNICOREClient.cpp:115 #, fuzzy, c-format msgid "Unable to submit job. Job description is not valid in the %s format" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:69 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:161 #, fuzzy msgid "Failed registering job" msgstr "Konnte job nicht registrieren" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:85 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:177 msgid "Failed starting job" msgstr "Konnte job nicht starten" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:123 #, fuzzy msgid "Failed creating singed delegation certificate" msgstr "Erstellen eines singed delegation certificate ist fehlgeschlagen" #: src/hed/acc/EMIES/EMIESClient.cpp:79 #, fuzzy msgid "Creating an EMI ES client" msgstr "Anlegen eines CREAM client" #: src/hed/acc/EMIES/EMIESClient.cpp:82 #, fuzzy msgid "Unable to create SOAP client used by EMIESClient." msgstr "Konnte SOAP client nicht anlegen für CREAMClient." #: src/hed/acc/EMIES/EMIESClient.cpp:157 #, fuzzy msgid "Re-creating an EMI ES client" msgstr "Anlegen eines CREAM client" #: src/hed/acc/EMIES/EMIESClient.cpp:223 #, fuzzy, c-format msgid "%s request to %s failed. Unexpected response: %s." msgstr "%s Anfrage an %s schlug fehl mit Antwort %s" #: src/hed/acc/EMIES/EMIESClient.cpp:237 src/hed/acc/EMIES/EMIESClient.cpp:344 #, fuzzy, c-format msgid "Creating and sending job submit request to %s" msgstr "Erstelle und sende submit Anfrage an %s" #: src/hed/acc/EMIES/EMIESClient.cpp:415 src/hed/acc/EMIES/EMIESClient.cpp:598 #: src/hed/acc/EMIES/EMIESClient.cpp:1087 #, fuzzy, c-format msgid "New limit for vector queries returned by EMI ES service: %d" msgstr "Kein Job identifier von BES service zurückerhalten" #: src/hed/acc/EMIES/EMIESClient.cpp:423 src/hed/acc/EMIES/EMIESClient.cpp:606 #: src/hed/acc/EMIES/EMIESClient.cpp:1095 #, c-format msgid "" "Error: Service returned a limit higher or equal to current limit (current: %" "d; returned: %d)" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:764 #, fuzzy, c-format msgid "Creating and sending service information request to %s" msgstr "Erstelle und sende service information query request an %s" #: src/hed/acc/EMIES/EMIESClient.cpp:869 src/hed/acc/EMIES/EMIESClient.cpp:890 #, fuzzy, c-format msgid "Creating and sending job clean request to %s" msgstr "Erstelle und sende clean request an %s" #: src/hed/acc/EMIES/EMIESClient.cpp:911 #, fuzzy, c-format msgid "Creating and sending job suspend request to %s" msgstr "Erstelle und sende job resume request an %s" #: src/hed/acc/EMIES/EMIESClient.cpp:953 #, fuzzy, c-format msgid "Creating and sending job restart request to %s" msgstr "Erstelle und sende job resume request an %s" #: src/hed/acc/EMIES/EMIESClient.cpp:1010 #, fuzzy, c-format msgid "Creating and sending job notify request to %s" msgstr "Erstelle und sende job migrate request an %s" #: src/hed/acc/EMIES/EMIESClient.cpp:1065 #, fuzzy, c-format msgid "Creating and sending notify request to %s" msgstr "Erstelle und sende clean request an %s" #: src/hed/acc/EMIES/EMIESClient.cpp:1155 #, fuzzy, c-format msgid "Creating and sending job list request to %s" msgstr "Erstelle und sende job migrate request an %s" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:156 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:170 #, fuzzy, c-format msgid "Job %s failed to renew delegation %s - %s." msgstr "Initiierung der Delegation fehlgeschlagen" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:247 #, fuzzy, c-format msgid "Failed retrieving information for job: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:319 #, fuzzy msgid "Retrieving job description of EMI ES jobs is not supported" msgstr "Resume von CREAM jobs wird nicht unterstützt" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:61 #, c-format msgid "Listing jobs succeeded, %d jobs found" msgstr "" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:77 #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:102 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface (%" "s)." msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:47 #, fuzzy msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "Konnte delegation credentials in Client Konfiguration nicht finden" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:54 #, fuzzy, c-format msgid "Failed to delegate credentials to server - %s" msgstr "Konnte delegation credentatials nicht zerstören für job: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:77 #, fuzzy msgid "Failed preparing job description" msgstr "Submit: Fehler bei Senden von Job Beschreibung" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:95 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:406 #, fuzzy msgid "Unable to submit job. Job description is not valid XML" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:154 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:482 #, fuzzy msgid "No valid job identifier returned by EMI ES" msgstr "A-REX lieferte keinen Job Identifikator zurück" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:180 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:499 #, fuzzy msgid "Job failed on service side" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:190 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:509 #, fuzzy msgid "Failed to obtain state of job" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:205 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:521 #, fuzzy msgid "Failed to wait for job to allow stage in" msgstr "Konnte nicht verbinden, um Job aufzuräumen" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:228 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:541 #, fuzzy msgid "Failed to obtain valid stagein URL for input files" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:248 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:558 #, fuzzy, c-format msgid "Failed uploading local input files to %s" msgstr "Konnte lokale Inputdateien nicht hochladen" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:269 #, fuzzy, c-format msgid "Failed to submit job description: EMIESFault(%s , %s)" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:278 #, fuzzy, c-format msgid "Failed to submit job description: UnexpectedError(%s)" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:315 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:574 #, fuzzy msgid "Failed to notify service" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:367 #, fuzzy msgid "Failed preparing job description to target resources" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:475 #, fuzzy, c-format msgid "Failed to submit job description: %s" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:54 msgid "Collecting EMI-ES GLUE2 computing info endpoint information." msgstr "" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:74 #, fuzzy msgid "Generating EMIES targets" msgstr "Generiere A-REX target: %s" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:83 #, fuzzy, c-format msgid "Generated EMIES target: %s" msgstr "Generiere A-REX target: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:75 #: src/hed/acc/EMIES/TestEMIESClient.cpp:79 #, c-format msgid "Query returned unexpected element: %s:%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:85 #, c-format msgid "Element validation according to GLUE2 schema failed: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:114 #, fuzzy msgid "Resource query failed" msgstr "Anfrage %s schlug fehl" #: src/hed/acc/EMIES/TestEMIESClient.cpp:132 #, fuzzy msgid "Submission failed" msgstr "Submission von Anfrage schlug fehl" #: src/hed/acc/EMIES/TestEMIESClient.cpp:143 #, fuzzy msgid "Obtaining status failed" msgstr "Die Job Terminierungs-Anfrage schlug fehl" #: src/hed/acc/EMIES/TestEMIESClient.cpp:153 #, fuzzy msgid "Obtaining information failed" msgstr "SOAP Aufruf fehlgeschlagen" #: src/hed/acc/EMIES/TestEMIESClient.cpp:170 #, fuzzy msgid "Cleaning failed" msgstr "Delegation nicht erfolgreich: " #: src/hed/acc/EMIES/TestEMIESClient.cpp:177 #, fuzzy msgid "Notify failed" msgstr "Schreibfehler" #: src/hed/acc/EMIES/TestEMIESClient.cpp:184 #, fuzzy msgid "Kill failed" msgstr "%s fehlgeschlagen" #: src/hed/acc/EMIES/TestEMIESClient.cpp:190 #, fuzzy msgid "List failed" msgstr "%s fehlgeschlagen" #: src/hed/acc/EMIES/TestEMIESClient.cpp:201 #, fuzzy, c-format msgid "Fetching resource description from %s" msgstr "Setzer userRequestDescription zu %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:204 #: src/hed/acc/EMIES/TestEMIESClient.cpp:273 #: src/hed/acc/EMIES/TestEMIESClient.cpp:283 #: src/hed/acc/EMIES/TestEMIESClient.cpp:294 #, fuzzy, c-format msgid "Failed to obtain resource description: %s" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:213 #: src/hed/acc/EMIES/TestEMIESClient.cpp:217 #, c-format msgid "Resource description contains unexpected element: %s:%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:223 msgid "Resource description validation according to GLUE2 schema failed: " msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:228 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:133 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:173 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1218 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1252 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1434 #: src/hed/identitymap/ArgusPDPClient.cpp:444 #: src/hed/identitymap/ArgusPEPClient.cpp:98 #: src/hed/identitymap/ArgusPEPClient.cpp:345 #: src/hed/libs/common/Thread.cpp:193 src/hed/libs/common/Thread.cpp:196 #: src/hed/libs/common/Thread.cpp:199 #: src/hed/libs/credential/Credential.cpp:1055 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:72 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:88 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:104 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:134 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:151 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:160 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:82 src/hed/shc/arcpdp/ArcPDP.cpp:235 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:293 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:247 #: src/services/a-rex/delegation/DelegationStore.cpp:44 #: src/services/a-rex/delegation/DelegationStore.cpp:49 #: src/services/a-rex/delegation/DelegationStore.cpp:54 #: src/services/a-rex/delegation/DelegationStore.cpp:88 #: src/services/a-rex/delegation/DelegationStore.cpp:94 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:552 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:620 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:645 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:656 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:667 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:678 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:686 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:692 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:697 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:702 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:707 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:715 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:723 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:734 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:741 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:780 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:790 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:798 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:824 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:893 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:906 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:923 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:935 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1239 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1244 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1273 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1286 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:373 #, c-format msgid "%s" msgstr "%s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:248 #, fuzzy msgid "Resource description is empty" msgstr "Anfrage ist leer" #: src/hed/acc/EMIES/TestEMIESClient.cpp:255 #, c-format msgid "Resource description provides URL for interface %s: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:260 msgid "Resource description provides no URLs for interfaces" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:263 #, fuzzy msgid "Resource description validation passed" msgstr "Quelle Ziel" #: src/hed/acc/EMIES/TestEMIESClient.cpp:266 #, c-format msgid "Requesting ComputingService elements of resource description at %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:271 msgid "Performing /Services/ComputingService query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:275 #: src/hed/acc/EMIES/TestEMIESClient.cpp:285 #: src/hed/acc/EMIES/TestEMIESClient.cpp:296 msgid "Query returned no elements." msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:281 msgid "Performing /ComputingService query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:292 msgid "Performing /* query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:302 #, fuzzy msgid "All queries failed" msgstr "Anfrage %s schlug fehl" #: src/hed/acc/EMIES/TestEMIESClient.cpp:332 #, c-format msgid "" "Number of ComputingService elements obtained from full document and XPath " "qury do not match: %d != %d" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:335 msgid "Resource description query validation passed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:337 #, fuzzy, c-format msgid "Unsupported command: %s" msgstr "Nicht unterstützte URL für Quelle: %s" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:75 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:95 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:105 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:114 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:126 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:364 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:411 msgid "[ADLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:454 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:473 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:505 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:563 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:568 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:574 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:591 msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:605 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:613 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:620 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:627 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:656 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:666 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:676 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:690 msgid "[ADLParser] Benchmark is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:698 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:715 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:735 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:768 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:774 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:792 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:846 #, c-format msgid "Location URI for file %s is invalid" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:812 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:75 #, c-format msgid "Unknown operator '%s' in attribute require in Version element" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:121 #, c-format msgid "Multiple '%s' elements are not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:136 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:143 #, c-format msgid "The 'exclusiveBound' attribute to the '%s' element is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:150 msgid "The 'epsilon' attribute to the 'Exact' element is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:178 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:195 #, c-format msgid "Parsing error: Value of %s element can't be parsed as number" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:183 #, c-format msgid "" "Parsing error: Elements (%s) representing upper range have different values" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:200 #, c-format msgid "" "Parsing error: Elements (%s) representing lower range have different values" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:209 #, c-format msgid "" "Parsing error: Value of lower range (%s) is greater than value of upper " "range (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:296 msgid "[ARCJSDLParser] Not a JSDL - missing JobDescription element" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:377 #, c-format msgid "" "[ARCJSDLParser] Error during the parsing: missed the name attributes of the " "\"%s\" Environment" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:424 msgid "[ARCJSDLParser] RemoteLogging URL is wrongly formatted." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:440 msgid "[ARCJSDLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:718 msgid "Lower bounded range is not supported for the 'TotalCPUCount' element." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:736 msgid "" "Parsing the \"require\" attribute of the \"QueueName\" nordugrid-JSDL " "element failed. An invalid comparison operator was used, only \"ne\" or \"eq" "\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:841 #, c-format msgid "No URI element found in Location for file %s" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:873 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:323 #, c-format msgid "String successfully parsed as %s." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:53 #, c-format msgid "[JDLParser] Semicolon (;) is not allowed inside brackets, at '%s;'." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:137 #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:141 #, c-format msgid "[JDLParser] This kind of JDL descriptor is not supported yet: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:144 #, c-format msgid "[JDLParser] Attribute named %s has unknown value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:224 msgid "Not enough outputsandboxdesturi elements!" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:306 msgid "" "[JDLParser] Environment variable has been defined without any equals sign." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:503 #, c-format msgid "[JDLParser]: Unknown attribute name: '%s', with value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:539 msgid "The inputsandboxbaseuri JDL attribute specifies an invalid URL." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:616 msgid "[JDLParser] Syntax error found during the split function." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:620 msgid "[JDLParser] Lines count is zero or other funny error has occurred." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:628 msgid "" "[JDLParser] JDL syntax error. There is at least one equals sign missing " "where it would be expected." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:641 #, fuzzy, c-format msgid "String successfully parsed as %s" msgstr "erfolgreich angelegt, ID: %s" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 #, fuzzy msgid "End of comment not found" msgstr "clientxrsl nicht gefunden" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 #, fuzzy msgid "Broken string" msgstr "Zeichenkette" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 #, fuzzy msgid "Relation operator expected" msgstr "Sofortige Vervollständigung: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must specified when 'join' attribute is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 #, fuzzy msgid "No RSL content in job description found" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:293 msgid "Multi-request job description not allowed in GRIDMANAGER dialect" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 msgid "No execuable path specified in GRIDMANAGER dialect" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:311 msgid "'action' attribute not allowed in user-side job description" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:314 msgid "Executable path not specified ('executable' attribute)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:332 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:350 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Attribute '%s' multiply defined" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:336 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:341 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:357 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:387 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:375 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:379 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:511 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1368 msgid "Unexpected RSL type" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:576 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:581 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, fuzzy, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:615 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:625 #, fuzzy, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:699 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:711 #, fuzzy, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:720 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:730 #, fuzzy, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:761 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:767 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1029 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1043 msgid "The cluster XRSL attribute is currently unsupported." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1059 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1067 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1070 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1118 msgid "priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1151 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1194 msgid "Value of 'count' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1224 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1270 #, c-format msgid "Invalid action value %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1360 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1364 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1378 #, fuzzy, c-format msgid "Wrong language requested: %s" msgstr "*** Client Anfrage: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1384 msgid "Missing executable" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1702 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:96 msgid "Failed to initialize main Python thread" msgstr "Fehler bei Initialisierung des main Python Threads" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:101 msgid "Main Python thread was not initialized" msgstr "Main Python Thread wurde nicht initialisiert" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, fuzzy, c-format msgid "Loading Python broker (%i)" msgstr "Lade python broker (%i)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:139 #, fuzzy msgid "Main Python thread is not initialized" msgstr "Main python thread wurde nicht initialisiert" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 #, fuzzy msgid "PythonBroker init" msgstr "PythonBroker init" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, fuzzy, c-format msgid "Class name: %s" msgstr "Klassenname: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, fuzzy, c-format msgid "Module name: %s" msgstr "Modulname: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:183 #, fuzzy msgid "Cannot convert ARC module name to Python string" msgstr "Kann Modul name nicht zu Python Zeichenkette konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:191 #, fuzzy msgid "Cannot import ARC module" msgstr "Kann Modul nicht importieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:201 #: src/services/wrappers/python/pythonwrapper.cpp:426 #: src/services/wrappers/python/pythonwrapper.cpp:526 #, fuzzy msgid "Cannot get dictionary of ARC module" msgstr "Kann auf Wörterbuch des arc Moduls nicht zugreifen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 #, fuzzy msgid "Cannot find ARC UserConfig class" msgstr "Kann UserConfig Klasse nicht finden" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 #, fuzzy msgid "UserConfig class is not an object" msgstr "UserConfig Klasse ist kein Objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 #, fuzzy msgid "Cannot find ARC JobDescription class" msgstr "Kann arc JobDescription Klasse nicht finden" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 #, fuzzy msgid "JobDescription class is not an object" msgstr "JobDescription Klasse ist kein Objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 #, fuzzy msgid "Cannot find ARC ExecutionTarget class" msgstr "Kann arc ExecutionTarget Klasse nicht finden" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 #, fuzzy msgid "ExecutionTarget class is not an object" msgstr "ExecutionTarget Klasse ist kein Objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:162 msgid "Cannot convert module name to Python string" msgstr "Kann Modul name nicht zu Python Zeichenkette konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:169 msgid "Cannot import module" msgstr "Kann Modul nicht importieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 #, fuzzy msgid "Cannot get dictionary of custom broker module" msgstr "Kann auf Wörterbuch von custom broker Modul nicht zugreifen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 #, fuzzy msgid "Cannot find custom broker class" msgstr "Kann custom broker Klasse nicht finden" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, fuzzy, c-format msgid "%s class is not an object" msgstr "Klasse %s ist kein Objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 #, fuzzy msgid "Cannot create UserConfig argument" msgstr "Kann UserConfig Argument nicht anlegen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 #, fuzzy msgid "Cannot convert UserConfig to Python object" msgstr "Kann UserConfig nicht zu python Objekt konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:258 msgid "Cannot create argument of the constructor" msgstr "Kann Argument für den Konstruktor nicht anlegen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:266 #, fuzzy msgid "Cannot create instance of Python class" msgstr "Kann Instanz von Python Klasse nicht anlegen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, fuzzy, c-format msgid "Python broker constructor called (%d)" msgstr "Python broker Kontruktor aufgerufen (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, fuzzy, c-format msgid "Python broker destructor called (%d)" msgstr "Python broker Destruktor aufgerufen (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 #, fuzzy msgid "Cannot create ExecutionTarget argument" msgstr "Kann ExecutionTarget Argument nicht anlegen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, fuzzy, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "Kann ExecutionTarget nicht zu Python Objekt konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 #, fuzzy msgid "Cannot create JobDescription argument" msgstr "Kann JobDescription Argument nicht anlegen." #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 #, fuzzy msgid "Cannot convert JobDescription to python object" msgstr "Kann JobDescription nicht zu Python Objekt konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:60 #, fuzzy msgid "Cannot create resolver from /etc/resolv.conf" msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:121 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:126 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:131 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:135 #, fuzzy, c-format msgid "Found service endpoint %s (type %s)" msgstr "Fand %u execution services des index service %s" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:150 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.cpp:101 #, fuzzy, c-format msgid "Found %u service endpoints from the index service at %s" msgstr "Fand %u execution services des index service %s" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:102 #, fuzzy msgid "Cleaning of UNICORE jobs is not supported" msgstr "Das Löschen von BES Jobs wird nicht unterstützt" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:131 #, fuzzy msgid "Canceling of UNICORE jobs is not supported" msgstr "Erneuerung von UNICORE Jobs wird nicht unterstützt" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:139 #, fuzzy msgid "Renewal of UNICORE jobs is not supported" msgstr "Erneuerung von UNICORE Jobs wird nicht unterstützt" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:147 #, fuzzy msgid "Resumation of UNICORE jobs is not supported" msgstr "Resume von UNICORE jobs wird nicht unterstützt" #: src/hed/acc/UNICORE/UNICOREClient.cpp:67 msgid "Creating a UNICORE client" msgstr "Erstellen von UNICORE client" #: src/hed/acc/UNICORE/UNICOREClient.cpp:90 src/services/a-rex/test.cpp:154 #: src/services/a-rex/test.cpp:227 src/services/a-rex/test.cpp:275 #: src/services/a-rex/test.cpp:323 src/services/a-rex/test.cpp:371 #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:64 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:19 msgid "Creating and sending request" msgstr "Erstellen und senden von Anfrage" #: src/hed/acc/UNICORE/UNICOREClient.cpp:182 msgid "Failed to find delegation credentials in client configuration" msgstr "Konnte delegation credentials in Client Konfiguration nicht finden" #: src/hed/acc/UNICORE/UNICOREClient.cpp:194 #: src/hed/acc/UNICORE/UNICOREClient.cpp:224 src/services/a-rex/test.cpp:88 msgid "Failed to initiate delegation" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/hed/acc/UNICORE/UNICOREClient.cpp:203 #: src/hed/acc/UNICORE/UNICOREClient.cpp:236 msgid "Submission request failed" msgstr "Submission von Anfrage schlug fehl" #: src/hed/acc/UNICORE/UNICOREClient.cpp:239 msgid "Submission request succeed" msgstr "Submission von Anfrage ist erfolgt" #: src/hed/acc/UNICORE/UNICOREClient.cpp:241 msgid "There was no response to a submission request" msgstr "Keine Antwort zu submission request erhalten" #: src/hed/acc/UNICORE/UNICOREClient.cpp:248 msgid "A response to a submission request was not a SOAP message" msgstr "Eine Antwort zu submission request war keine SOAP message" #: src/hed/acc/UNICORE/UNICOREClient.cpp:255 #: src/hed/acc/UNICORE/UNICOREClient.cpp:336 #: src/hed/acc/UNICORE/UNICOREClient.cpp:414 #: src/hed/acc/UNICORE/UNICOREClient.cpp:527 #: src/hed/acc/UNICORE/UNICOREClient.cpp:603 #: src/hed/acc/UNICORE/UNICOREClient.cpp:677 #, fuzzy msgid "There is no connection chain configured" msgstr "Es ist keine connetion chain konfiguriert" #: src/hed/acc/UNICORE/UNICOREClient.cpp:276 #: src/hed/acc/UNICORE/UNICOREClient.cpp:348 #, fuzzy, c-format msgid "Submission returned failure: %s" msgstr "Submission ergab Fehler: %s" #: src/hed/acc/UNICORE/UNICOREClient.cpp:277 #: src/hed/acc/UNICORE/UNICOREClient.cpp:349 #, fuzzy, c-format msgid "Submission failed, service returned: %s" msgstr "Submission fehlgeschlagen, Service erwiderte: %s" #: src/hed/acc/UNICORE/UNICOREClient.cpp:284 #, fuzzy msgid "Creating and sending a start job request" msgstr "Erstelle und sende eine start job Anfrage" #: src/hed/acc/UNICORE/UNICOREClient.cpp:317 #, fuzzy msgid "A start job request failed" msgstr "Eine start job Anfrage schlug fehl" #: src/hed/acc/UNICORE/UNICOREClient.cpp:320 #, fuzzy msgid "A start job request succeeded" msgstr "Eine start job Anfrage war erfolgreich" #: src/hed/acc/UNICORE/UNICOREClient.cpp:322 #, fuzzy msgid "There was no response to a start job request" msgstr "Keine Antwort zu start job Anfrage erhalten" #: src/hed/acc/UNICORE/UNICOREClient.cpp:329 #, fuzzy msgid "The response of a start job request was not a SOAP message" msgstr "Die Antwort zu start job Anfrage war keine SOAP Nachricht" #: src/hed/acc/UNICORE/UNICOREClient.cpp:395 msgid "A status request failed" msgstr "Eine Anfrage nach dem Status schlug fehl" #: src/hed/acc/UNICORE/UNICOREClient.cpp:398 msgid "A status request succeed" msgstr "Die Anfrage nach dem Status war erfolgreich" #: src/hed/acc/UNICORE/UNICOREClient.cpp:400 msgid "There was no response to a status request" msgstr "Es gab keine Antwort zu einer Status Anfrage" #: src/hed/acc/UNICORE/UNICOREClient.cpp:408 msgid "The response of a status request was not a SOAP message" msgstr "Die Antwort auf eine Status Anfrage war keine SOAP Nachricht" #: src/hed/acc/UNICORE/UNICOREClient.cpp:433 msgid "The job status could not be retrieved" msgstr "Der Job Status konnte nicht ermittelt werden" #: src/hed/acc/UNICORE/UNICOREClient.cpp:444 #, fuzzy msgid "Creating and sending an index service query" msgstr "Erstellen und senden einer Index Service Anfrage" #: src/hed/acc/UNICORE/UNICOREClient.cpp:472 msgid "Creating and sending a service status request" msgstr "Erstlelen und senden einer Service Status Anfrage" #: src/hed/acc/UNICORE/UNICOREClient.cpp:508 #, fuzzy msgid "A service status request failed" msgstr "Eine Service Status Anfrage schlug fehl" #: src/hed/acc/UNICORE/UNICOREClient.cpp:511 #, fuzzy msgid "A service status request succeeded" msgstr "Eine Service Status Anfrage war erfolgreich" #: src/hed/acc/UNICORE/UNICOREClient.cpp:513 msgid "There was no response to a service status request" msgstr "Es gab keine Antwort zu einer Service Status Anfrage" #: src/hed/acc/UNICORE/UNICOREClient.cpp:520 msgid "The response of a service status request was not a SOAP message" msgstr "Die Antwort zu einer Service Status Anfrage war keine SOAP message" #: src/hed/acc/UNICORE/UNICOREClient.cpp:537 msgid "The service status could not be retrieved" msgstr "Der Service Status konnte nicht ermittelt werden" #: src/hed/acc/UNICORE/UNICOREClient.cpp:584 #, fuzzy msgid "A job termination request failed" msgstr "Die Job Terminierungs-Anfrage schlug fehl" #: src/hed/acc/UNICORE/UNICOREClient.cpp:587 #, fuzzy msgid "A job termination request succeed" msgstr "Eine Job Terminierungs-Anfrage war erfolgreich" #: src/hed/acc/UNICORE/UNICOREClient.cpp:589 msgid "There was no response to a job termination request" msgstr "Es gab keine Antwort zu einer Job Terminierungs-Anfrage" #: src/hed/acc/UNICORE/UNICOREClient.cpp:596 msgid "The response of a job termination request was not a SOAP message" msgstr "" "Die Antwort zu einer Job Terminierungs-Anfrage war keine SOAP Nachricht" #: src/hed/acc/UNICORE/UNICOREClient.cpp:658 #, fuzzy msgid "A job cleaning request failed" msgstr "Die Job Terminierungs-Anfrage schlug fehl" #: src/hed/acc/UNICORE/UNICOREClient.cpp:661 #, fuzzy msgid "A job cleaning request succeed" msgstr "Die Job Löschen-Anfrage war erfolgreich" #: src/hed/acc/UNICORE/UNICOREClient.cpp:663 msgid "There was no response to a job cleaning request" msgstr "Keine Antwort auf eine Job Löschen-Anfrage erhalten" #: src/hed/acc/UNICORE/UNICOREClient.cpp:670 msgid "The response of a job cleaning request was not a SOAP message" msgstr "Die Antwort auf eine Job Löschen-Anfrage war keine SOAP Nachricht" #: src/hed/acc/ldap/Extractor.h:22 #, c-format msgid "Extractor[%s] (%s): %s = %s" msgstr "" #: src/hed/acc/ldap/Extractor.h:113 src/hed/acc/ldap/Extractor.h:130 #, c-format msgid "Extractor[%s] (%s): %s contains %s" msgstr "" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.cpp:54 #, c-format msgid "Adding endpoint '%s' with interface name %s" msgstr "" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:63 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:43 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:46 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.cpp:49 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:47 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:59 msgid "Can't create information handle - is the ARC ldap DMC plugin available?" msgstr "" "Kann information handle nicht anlegen - ist das ARC LDAP DMC plugin " "verfügbar?" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:85 msgid "Adding CREAM computing service" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:79 #, fuzzy, c-format msgid "Unknown entry in EGIIS (%s)" msgstr "unbekannter return code %s" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:87 msgid "" "Entry in EGIIS is missing one or more of the attributes 'Mds-Service-type', " "'Mds-Service-hn', 'Mds-Service-port' and/or 'Mds-Service-Ldap-suffix'" msgstr "" #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:219 msgid "" "The \"FreeSlotsWithDuration\" attribute is wrongly formatted. Ignoring it." msgstr "" #: src/hed/daemon/unix/daemon.cpp:74 #, c-format msgid "Daemonization fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:82 msgid "Watchdog (re)starting application" msgstr "" #: src/hed/daemon/unix/daemon.cpp:87 #, fuzzy, c-format msgid "Watchdog fork failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/daemon/unix/daemon.cpp:94 msgid "Watchdog starting monitoring" msgstr "" #: src/hed/daemon/unix/daemon.cpp:120 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:122 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:124 msgid "Watchdog detected application exit" msgstr "" #: src/hed/daemon/unix/daemon.cpp:133 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application timeout or error - killing process" msgstr "" #: src/hed/daemon/unix/daemon.cpp:151 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" #: src/hed/daemon/unix/daemon.cpp:163 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "" #: src/hed/daemon/unix/daemon.cpp:184 msgid "Shutdown daemon" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:43 src/hed/daemon/win32/main_win32.cpp:27 msgid "shutdown" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:46 msgid "exit" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:92 src/hed/daemon/win32/main_win32.cpp:53 msgid "No server config part of config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:163 #: src/hed/daemon/win32/main_win32.cpp:91 #, c-format msgid "Unknown log level %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:173 #: src/hed/daemon/win32/main_win32.cpp:100 #, c-format msgid "Failed to open log file: %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:206 #: src/hed/daemon/win32/main_win32.cpp:126 msgid "Start foreground" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:255 #, c-format msgid "XML config file %s does not exist" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:259 src/hed/daemon/unix/main_unix.cpp:274 #: src/hed/daemon/win32/main_win32.cpp:154 #, c-format msgid "Failed to load service configuration from file %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:265 #, c-format msgid "INI config file %s does not exist" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:270 msgid "Error evaluating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:286 #, fuzzy msgid "Error loading generated configuration" msgstr "" "\n" "%s: ошибка чтения входного файла '%s': %s\n" #: src/hed/daemon/unix/main_unix.cpp:292 msgid "Error evaulating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:297 msgid "Failed to load service configuration from any default config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:358 msgid "Schema validation error" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:373 #: src/hed/daemon/win32/main_win32.cpp:159 msgid "Configuration root element is not " msgstr "" #: src/hed/daemon/unix/main_unix.cpp:389 #, c-format msgid "Cannot switch to group (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:399 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:404 #, c-format msgid "Cannot switch to user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:422 #: src/hed/daemon/win32/main_win32.cpp:176 #, fuzzy msgid "Failed to load service side MCCs" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/daemon/unix/main_unix.cpp:424 #: src/hed/daemon/win32/main_win32.cpp:178 src/services/a-rex/test.cpp:41 #: src/tests/count/test_service.cpp:32 src/tests/echo/test.cpp:30 #: src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:431 #: src/hed/daemon/win32/main_win32.cpp:185 msgid "Unexpected arguments supplied" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:93 #: src/hed/dmc/acix/DataPointACIX.cpp:342 #: src/hed/dmc/rucio/DataPointRucio.cpp:220 #: src/hed/dmc/rucio/DataPointRucio.cpp:438 #, fuzzy, c-format msgid "No locations found for %s" msgstr "Keine locations gefunden für %s" #: src/hed/dmc/acix/DataPointACIX.cpp:121 #, c-format msgid "Found none or multiple URLs (%s) in ACIX URL: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:131 #, fuzzy, c-format msgid "Cannot handle URL %s" msgstr "" "Kann Owner von %s nicht ändernНевозможно изменить владельца папки %1.\n" "Ошибка: %2" #: src/hed/dmc/acix/DataPointACIX.cpp:138 #, fuzzy, c-format msgid "Could not resolve original source of %s: out of time" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:144 #, fuzzy, c-format msgid "Could not resolve original source of %s: %s" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:160 #, c-format msgid "Querying ACIX server at %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:161 #, c-format msgid "Calling acix with query %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:167 #, fuzzy, c-format msgid "Failed to query ACIX: %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:171 #: src/hed/dmc/acix/DataPointACIX.cpp:308 #, fuzzy, c-format msgid "Failed to parse ACIX response: %s" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:298 #, fuzzy, c-format msgid "ACIX returned %s" msgstr "XACML Anfrage: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:319 #, fuzzy, c-format msgid "No locations for %s" msgstr "Keine locations gefunden für %s" #: src/hed/dmc/acix/DataPointACIX.cpp:325 #, fuzzy, c-format msgid "%s: ACIX Location: %s" msgstr "Zugriffslist location: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:327 #, c-format msgid "%s: Location %s not accessible remotely, skipping" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:68 #, c-format msgid "" "checingBartenderURL: Response:\n" "%s" msgstr "" "checkingBartenderURL: Response:\n" "%s" #: src/hed/dmc/arc/DataPointARC.cpp:154 src/hed/dmc/arc/DataPointARC.cpp:206 #: src/hed/dmc/arc/DataPointARC.cpp:278 src/hed/dmc/arc/DataPointARC.cpp:375 #: src/hed/dmc/arc/DataPointARC.cpp:548 src/hed/dmc/arc/DataPointARC.cpp:609 msgid "Hostname is not implemented for arc protocol" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:245 src/hed/dmc/arc/DataPointARC.cpp:330 #: src/hed/dmc/arc/DataPointARC.cpp:441 src/hed/dmc/arc/DataPointARC.cpp:534 #, c-format msgid "" "nd:\n" "%s" msgstr "" "nd:\n" "%s" #: src/hed/dmc/arc/DataPointARC.cpp:263 #, fuzzy msgid "Not a collection" msgstr "Nicht verbunden" #: src/hed/dmc/arc/DataPointARC.cpp:282 src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "StartReading" #: src/hed/dmc/arc/DataPointARC.cpp:338 src/hed/dmc/arc/DataPointARC.cpp:449 #: src/hed/dmc/arc/DataPointARC.cpp:601 #, c-format msgid "Recieved transfer URL: %s" msgstr "Erhielt transfer URL: %s" #: src/hed/dmc/arc/DataPointARC.cpp:378 src/hed/dmc/srm/DataPointSRM.cpp:518 msgid "StartWriting" msgstr "StartWriting" #: src/hed/dmc/arc/DataPointARC.cpp:490 #, fuzzy, c-format msgid "Calculated checksum: %s" msgstr "Errechneted checksum: %s" #: src/hed/dmc/arc/DataPointARC.cpp:554 msgid "Check" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:648 #, c-format msgid "Deleted %s" msgstr "Löschte %s" #: src/hed/dmc/file/DataPointFile.cpp:86 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:93 #, fuzzy, c-format msgid "Failed to open stdio channel %s" msgstr "Fehler bei Öffnen von Datenkanal" #: src/hed/dmc/file/DataPointFile.cpp:94 #, fuzzy, c-format msgid "Failed to open stdio channel %d" msgstr "Fehler bei Öffnen von Datenkanal" #: src/hed/dmc/file/DataPointFile.cpp:335 #, fuzzy, c-format msgid "fsync of file %s failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:347 #, fuzzy, c-format msgid "closing file %s failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/file/DataPointFile.cpp:366 #, c-format msgid "File is not accessible: %s" msgstr "Datei ist nicht zugreifbar: %s" #: src/hed/dmc/file/DataPointFile.cpp:372 #: src/hed/dmc/file/DataPointFile.cpp:459 #, fuzzy, c-format msgid "Can't stat file: %s: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/file/DataPointFile.cpp:420 #: src/hed/dmc/file/DataPointFile.cpp:426 #, fuzzy, c-format msgid "Can't stat stdio channel %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/file/DataPointFile.cpp:474 #, fuzzy, c-format msgid "%s is not a directory" msgstr "Klasse %s ist kein Objekt" #: src/hed/dmc/file/DataPointFile.cpp:489 src/hed/dmc/s3/DataPointS3.cpp:461 #: src/hed/dmc/s3/DataPointS3.cpp:571 #, c-format msgid "Failed to read object %s: %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:502 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, fuzzy, c-format msgid "File is not accessible %s: %s" msgstr "Datei ist nicht zugreifbar: %s" #: src/hed/dmc/file/DataPointFile.cpp:508 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:418 #, fuzzy, c-format msgid "Can't delete directory %s: %s" msgstr "Kann Verzeichnis nicht löschen: %s - %s" #: src/hed/dmc/file/DataPointFile.cpp:515 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:425 #, fuzzy, c-format msgid "Can't delete file %s: %s" msgstr "Kann Datei nicht löschen: %s - %s" #: src/hed/dmc/file/DataPointFile.cpp:525 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1470 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:440 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:460 #: src/services/a-rex/jura/JobLogFile.cpp:657 #: src/services/a-rex/jura/JobLogFile.cpp:1274 #, c-format msgid "Creating directory %s" msgstr "Lege Verzeichnis %s an" #: src/hed/dmc/file/DataPointFile.cpp:533 src/hed/dmc/srm/DataPointSRM.cpp:160 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:474 #, fuzzy, c-format msgid "Renaming %s to %s" msgstr "" "\n" " Соответствие раздел-сегмент:\n" #: src/hed/dmc/file/DataPointFile.cpp:535 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:483 #, fuzzy, c-format msgid "Can't rename file %s: %s" msgstr "Kann Datei nicht löschen: %s - %s" #: src/hed/dmc/file/DataPointFile.cpp:566 #, fuzzy, c-format msgid "Failed to open %s for reading: %s" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/hed/dmc/file/DataPointFile.cpp:581 #: src/hed/dmc/file/DataPointFile.cpp:719 #, fuzzy, c-format msgid "Failed to switch user id to %d/%d" msgstr "fehler bei Senden zu %d von %s" #: src/hed/dmc/file/DataPointFile.cpp:587 #, fuzzy, c-format msgid "Failed to create/open file %s: %s" msgstr "Fehler bei Anlegen/Öffnen von Datei %s (%d)" #: src/hed/dmc/file/DataPointFile.cpp:603 #, fuzzy msgid "Failed to create thread" msgstr "Fehler bei Anlegen von ldap bind thread (%s)" #: src/hed/dmc/file/DataPointFile.cpp:683 #, c-format msgid "Invalid url: %s" msgstr "Ungültige url: %s" #: src/hed/dmc/file/DataPointFile.cpp:692 src/hed/libs/data/FileCache.cpp:603 #, fuzzy, c-format msgid "Failed to create directory %s: %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/dmc/file/DataPointFile.cpp:708 #: src/hed/dmc/file/DataPointFile.cpp:727 #, fuzzy, c-format msgid "Failed to create file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:739 #, c-format msgid "setting file %s to size %llu" msgstr "Setze Datei %s zu Größe %llu" #: src/hed/dmc/file/DataPointFile.cpp:759 #, fuzzy, c-format msgid "Failed to preallocate space for %s" msgstr "Fehler bei Reservieren von Platz" #: src/hed/dmc/file/DataPointFile.cpp:800 src/hed/libs/data/FileCache.cpp:981 #, fuzzy, c-format msgid "Failed to clean up file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:809 #, fuzzy, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "Fehler bei Lesen von gültiger und existierender Lock-Datei %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:813 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, fuzzy, c-format msgid "Using proxy %s" msgstr "Nutze space token %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, fuzzy, c-format msgid "Using key %s" msgstr "Nutze space token %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, fuzzy, c-format msgid "Using cert %s" msgstr "Nutze space token %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "Locations fehlen in destination LFC URL" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "Doppelte replica gefunden in LFC: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "Füge location hinzu: %s - %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, fuzzy, c-format msgid "Add location: url: %s" msgstr "Füge location hinzu: url: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, fuzzy, c-format msgid "Add location: metadata: %s" msgstr "Füge location hinzu: Metadaten: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, fuzzy, c-format msgid "gfal_open failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, fuzzy, c-format msgid "gfal_close failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, fuzzy, c-format msgid "gfal_read failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 #, fuzzy msgid "StopReading starts waiting for transfer_condition." msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 #, fuzzy msgid "StopReading finished waiting for transfer_condition." msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:66 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:71 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:42 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:47 #, fuzzy, c-format msgid "No locations defined for %s" msgstr "Keine locations gefunden für %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, fuzzy, c-format msgid "Failed to set LFC replicas: %s" msgstr "Fehler beim Entfernen von LFC Verzeichnis: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, fuzzy, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "start_writing_ftp: mkdir fehlgeschlagen - versuche weiter zu schreiben" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, fuzzy, c-format msgid "gfal_write failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:302 #, fuzzy msgid "StopWriting starts waiting for transfer_condition." msgstr "StopWriting: Abbruch der Verbindung" #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:304 #, fuzzy msgid "StopWriting finished waiting for transfer_condition." msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, fuzzy, c-format msgid "gfal_stat failed: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, fuzzy, c-format msgid "gfal_opendir failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, fuzzy, c-format msgid "gfal_closedir failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, fuzzy, c-format msgid "gfal_rmdir failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, fuzzy, c-format msgid "gfal_unlink failed: %s" msgstr "globus_io_cancel ist fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, fuzzy, c-format msgid "gfal_mkdir failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, fuzzy, c-format msgid "gfal_rename failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, fuzzy, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, fuzzy, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "Connect: Konnte init handle nicht initialisieren: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, fuzzy, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, fuzzy, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "Fehler bei Anlegen von soft link: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, fuzzy, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "Fehler beim Entfernen der location vom LFC: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 #, fuzzy msgid "Transfer failed" msgstr "Einige Transfers schlugen fehl" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 #, fuzzy msgid "Transfer succeeded" msgstr "Transfer vollständig" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 msgid "ftp_complete_callback: success" msgstr "ftp_complete_callback: erfolgreich" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "ftp_complete_callback: Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 #, fuzzy msgid "ftp_check_callback" msgstr "ftp_check_callback" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/services/gridftpd/commands.cpp:1225 #: src/services/gridftpd/dataread.cpp:76 #: src/services/gridftpd/dataread.cpp:173 #: src/services/gridftpd/datawrite.cpp:59 #: src/services/gridftpd/datawrite.cpp:146 #, c-format msgid "Globus error: %s" msgstr "Globus Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 msgid "Excessive data received while checking file access" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 #, fuzzy msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "Registrierung von Globus FTP buffer fehlgeschlagen - breche check ab" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "check_ftp: globus_ftp_client_size fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "check_ftp: Zeitüberschreitung bei Warten für Größe" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "check_ftp: konnten Dateigröße nicht bestimmen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, fuzzy, c-format msgid "check_ftp: obtained size: %lli" msgstr "Check: erhielt Größe: %lli" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "check_ftp: globus_ftp_client_modification_time fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "check_ftp: Zeitüberschreitung bei Warten auf modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "check_ftp: konnte Modification time von Datei nicht erhalten" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, fuzzy, c-format msgid "check_ftp: obtained modification date: %s" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 msgid "check_ftp: globus_ftp_client_get failed" msgstr "check_ftp: globus_ftp_client_get fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 msgid "check_ftp: globus_ftp_client_register_read" msgstr "check_ftp: globus_ftp_client_register_read" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 #, fuzzy msgid "check_ftp: timeout waiting for partial get" msgstr "check_ftp: Zeitüberschreitung beim Warten auf partial get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #, fuzzy, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "Löschen von Datei schlug fehl, versuche als Verzeichnis zu löschen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 #, fuzzy msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "delete_ftp: globus_ftp_client_delete fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 #, fuzzy msgid "delete_ftp: timeout waiting for delete" msgstr "list_files_ftp: Zeitüberschreitung bei Warten auf Dateigröße " #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 #, fuzzy msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "delete_ftp: globus_ftp_client_delete fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #, fuzzy, c-format msgid "mkdir_ftp: making %s" msgstr "mkdir_ftp: erstelle %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 #, fuzzy msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "mkdir_ftp: Zeitüberschreitung bei mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 #, fuzzy msgid "Timeout waiting for mkdir" msgstr "mkdir_ftp: Zeitüberschreitung bei mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 #, fuzzy msgid "start_reading_ftp" msgstr "start_reading_ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "start_reading_ftp: globus_ftp_client_get fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "start_reading_ftp: globus_ftp_client_get fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "start_reading_ftp: globus_thread_create fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "stop_reading_ftp: Abbruch der Verbindung" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, fuzzy, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, fuzzy, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "stop-reading_ftp: verlasse: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "ftp_read_thread: beziehe und registriere Puffer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "ftp_read_thread: for_read fehlgeschlagen - Abbruch: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, fuzzy, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "ftp_read_thread: for_read fehlgeschlagen - Abbruch: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "ftp_read_thread: Globus Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "ftp_read_thread: zu viele Registrierungsfehler - Abbruch: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, fuzzy, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" "ftp_read_thread: Fehler bei Registrieren von Globus Puffer - verschoben auf " "später: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 msgid "ftp_read_thread: waiting for eof" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 #, fuzzy msgid "ftp_read_thread: waiting for buffers released" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 #, fuzzy msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "" "ftp_read_thread: Fehler bei Registrieren von Globus Puffer - verschoben auf " "später: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 #, fuzzy msgid "ftp_read_thread: exiting" msgstr "ftp_read_thread: Beenden" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #, fuzzy, c-format msgid "ftp_read_callback: failure: %s" msgstr "ftp_read_callback: Fehler" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "ftp_read_callback: Erfolg" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 #, fuzzy msgid "Failed to get ftp file" msgstr "Fehler bei Bezug von FTP Datei" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 #, fuzzy msgid "start_writing_ftp: mkdir" msgstr "start_wrtiting_ftp: mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "start_writing_ftp: mkdir fehlgeschlagen - versuche weiter zu schreiben" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 #, fuzzy msgid "start_writing_ftp: put" msgstr "start_writing_ftp: put" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 msgid "start_writing_ftp: put failed" msgstr "start_writing_ftp: put fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "start_writitng_ftp: globus_thread_create failed" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 #, fuzzy msgid "StopWriting: aborting connection" msgstr "StopWriting: Abbruch der Verbindung" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #, fuzzy, c-format msgid "StopWriting: Calculated checksum %s" msgstr "Errechneted checksum: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #, fuzzy, c-format msgid "StopWriting: looking for checksum of %s" msgstr "list_files_ftp: Suche nach Größe von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 #, fuzzy msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "list_files_ftp: globus_ftp_client_size fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 #, fuzzy msgid "list_files_ftp: timeout waiting for cksum" msgstr "list_files_ftp: Zeitüberschreitung bei Warten auf Dateigröße " #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 #, fuzzy msgid "list_files_ftp: no checksum information possible" msgstr "list_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #, fuzzy, c-format msgid "list_files_ftp: checksum %s" msgstr "meta_get_data: checksum: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "ftp_write_thread: Beziehe und Registriere Puffer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "ftp_write_thread: for_write fehlgeschlagen - Abbruch" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 #, fuzzy msgid "ftp_write_thread: data callback failed - aborting" msgstr "ftp_write_thread: for_write fehlgeschlagen - Abbruch" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 #, fuzzy msgid "ftp_write_thread: waiting for eof" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 #, fuzzy msgid "ftp_write_thread: waiting for buffers released" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 #, fuzzy msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "ftp_write_thread: Beziehe und Registriere Puffer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 #, fuzzy msgid "ftp_write_thread: exiting" msgstr "ftp_read_thread: Beenden" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #, fuzzy, c-format msgid "ftp_write_callback: failure: %s" msgstr "ftp_write_callback: Fehler" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #, fuzzy, c-format msgid "ftp_write_callback: success %s" msgstr "ftp_write_callback: Erfolg" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 msgid "Failed to store ftp file" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 #, fuzzy msgid "ftp_put_complete_callback: success" msgstr "ftp_complete_callback: erfolgreich" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "list_files_ftp: Suche nach Größe von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "list_files_ftp: globus_ftp_client_size fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 msgid "list_files_ftp: timeout waiting for size" msgstr "list_files_ftp: Zeitüberschreitung bei Warten auf Dateigröße " #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 msgid "list_files_ftp: failed to get file's size" msgstr "list_files_ftp: Fehler bei Bezug von Dateigröße" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "list_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "list_files_ftp: globus_ftp_client_modification_time fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "" "list_files_ftp: Zeitüberschreitung bei Warten auf Zeitpunkt der letzten " "Dateiänderung " #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 msgid "list_files_ftp: failed to get file's modification time" msgstr "" "list_files_ftp: Fehler bei Bezug von Zeitpunkt der letzten Dateiänderung" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #, fuzzy, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "list_files_ftp: Suche nach Größe von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #, fuzzy, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 #, fuzzy msgid "No results returned from stat" msgstr "Keine Antwort von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #, c-format msgid "Unexpected path %s returned from server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #, fuzzy, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 #, fuzzy msgid "Rename: globus_ftp_client_move failed" msgstr "check_ftp: globus_ftp_client_get fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 #, fuzzy msgid "Rename: timeout waiting for operation to complete" msgstr "check_ftp: Zeitüberschreitung bei Warten auf modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "init_handle: globus_ftp_client_handleattr_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "init_handle: globus_ftp_client_handleattr_set_gridftp2 fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 #, fuzzy msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "init_handle: globus_ftp_client_handle_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "init_handle: globus_ftp_client_operationattr_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 #, fuzzy msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "init_handle: globus_ftp_client_operationattr_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 #, fuzzy msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "init_handle: globus_ftp_client_operationattr_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "globus_ftp_client_operationattr_set_authorisation: Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 #, fuzzy msgid "Failed to set credentials for GridFTP transfer" msgstr "Fehler bei Setzen von Credentials für GridFTP transfer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 msgid "Using secure data transfer" msgstr "Nutze sicheren Datentransfer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 msgid "Using insecure data transfer" msgstr "Nutze unsicheren Datentransfer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 #, fuzzy msgid "~DataPoint: destroy ftp_handle" msgstr "DataPoint::deinit_handle: destroly ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 #, fuzzy msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "DataPoint::deinit_handle: destroly ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 #, fuzzy msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "DataPoint::deinit_handle: destroly ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" "Fehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im " "nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. Bitte " "die Entwickler informieren." #: src/hed/dmc/gridftp/Lister.cpp:224 src/hed/dmc/gridftp/Lister.cpp:292 #: src/hed/dmc/gridftp/Lister.cpp:387 src/hed/dmc/gridftp/Lister.cpp:737 #: src/hed/dmc/gridftp/Lister.cpp:775 #, c-format msgid "Failure: %s" msgstr "Fehler: %s" #: src/hed/dmc/gridftp/Lister.cpp:226 src/hed/dmc/gridftp/Lister.cpp:246 #: src/hed/dmc/gridftp/Lister.cpp:471 src/hed/dmc/gridftp/Lister.cpp:478 #: src/hed/dmc/gridftp/Lister.cpp:500 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:163 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:196 #, c-format msgid "Response: %s" msgstr "Antwort: %s" #: src/hed/dmc/gridftp/Lister.cpp:291 #, fuzzy msgid "Error getting list of files (in list)" msgstr "Fehler bei Bezug von Dateiliste (in Liste)" #: src/hed/dmc/gridftp/Lister.cpp:293 #, fuzzy msgid "Assuming - file not found" msgstr "Vermuting - Datei nicht gefunden" #: src/hed/dmc/gridftp/Lister.cpp:310 #, fuzzy, c-format msgid "list record: %s" msgstr "Listen-Eintrag: %s" #: src/hed/dmc/gridftp/Lister.cpp:365 msgid "Failed reading list of files" msgstr "Fehler bei Lesen von Dateiliste" #: src/hed/dmc/gridftp/Lister.cpp:401 msgid "Failed reading data" msgstr "Fehler bei Lesen von Daten" #: src/hed/dmc/gridftp/Lister.cpp:429 #, c-format msgid "Command: %s" msgstr "Kommando: %s" #: src/hed/dmc/gridftp/Lister.cpp:433 src/hed/dmc/gridftp/Lister.cpp:474 #: src/hed/mcc/http/PayloadHTTP.cpp:991 msgid "Memory allocation error" msgstr "Speicherallokationsfehler" #: src/hed/dmc/gridftp/Lister.cpp:441 #, c-format msgid "%s failed" msgstr "%s fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:445 #, fuzzy msgid "Command is being sent" msgstr "Kommando wird gesendet" #: src/hed/dmc/gridftp/Lister.cpp:450 msgid "Waiting for response" msgstr "Warte vor Antwort" #: src/hed/dmc/gridftp/Lister.cpp:455 #, fuzzy msgid "Callback got failure" msgstr "Callback erhielt Fehler" #: src/hed/dmc/gridftp/Lister.cpp:541 #, fuzzy msgid "Failed in globus_cond_init" msgstr "Fehler bei Initialisierung der condition" #: src/hed/dmc/gridftp/Lister.cpp:545 #, fuzzy msgid "Failed in globus_mutex_init" msgstr "Fehler bei Initialisierung des Mutex" #: src/hed/dmc/gridftp/Lister.cpp:552 #, fuzzy msgid "Failed allocating memory for handle" msgstr "Fehler bei Reservieren des Speichers für handle" #: src/hed/dmc/gridftp/Lister.cpp:557 #, fuzzy msgid "Failed in globus_ftp_control_handle_init" msgstr "Memory leak (globus_ftp_control_handle_t)" #: src/hed/dmc/gridftp/Lister.cpp:565 #, fuzzy msgid "Failed to enable IPv6" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/dmc/gridftp/Lister.cpp:576 src/services/gridftpd/commands.cpp:983 msgid "Closing connection" msgstr "Schließe Verbindung" #: src/hed/dmc/gridftp/Lister.cpp:583 src/hed/dmc/gridftp/Lister.cpp:598 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:608 msgid "Closed successfully" msgstr "Verbindung erfolgreich geschlossen" #: src/hed/dmc/gridftp/Lister.cpp:610 #, fuzzy msgid "Closing may have failed" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/gridftp/Lister.cpp:637 msgid "Waiting for globus handle to settle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:642 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:648 msgid "Globus handle is stuck" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:664 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:687 #, fuzzy, c-format msgid "EPSV failed: %s" msgstr "PASV fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:691 #, fuzzy msgid "EPSV failed" msgstr "PASV fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:698 #, c-format msgid "PASV failed: %s" msgstr "PASV fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:702 msgid "PASV failed" msgstr "PASV fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:735 #, fuzzy msgid "Failed to apply local address to data connection" msgstr "Fehler bei Schließen von Verbindung 1" #: src/hed/dmc/gridftp/Lister.cpp:749 #, fuzzy msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "Kann host and port nicht aus Antwort zu PASV herauslesen" #: src/hed/dmc/gridftp/Lister.cpp:754 #, fuzzy, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "Datenkanal: %d.%d.%d.%d %d" #: src/hed/dmc/gridftp/Lister.cpp:769 #, fuzzy, c-format msgid "Data channel: [%s]:%d" msgstr "Datenkanal: %d.%d.%d.%d %d" #: src/hed/dmc/gridftp/Lister.cpp:773 #, fuzzy msgid "Obtained host and address are not acceptable" msgstr "Erhaltener host und Adresse sind nicht akzeptabel" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Failed to open data channel" msgstr "Fehler bei Öffnen von Datenkanal" #: src/hed/dmc/gridftp/Lister.cpp:801 #, c-format msgid "Unsupported protocol in url %s" msgstr "Nicht-unterstzütztes Protrokoll in URL %s" #: src/hed/dmc/gridftp/Lister.cpp:813 msgid "Reusing connection" msgstr "Wiederholte Nutzung von Verbindung" #: src/hed/dmc/gridftp/Lister.cpp:837 #, c-format msgid "Failed connecting to server %s:%d" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:843 #, c-format msgid "Failed to connect to server %s:%d" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:859 #, fuzzy msgid "Missing authentication information" msgstr "Ungültige Authentisierungs-Information" #: src/hed/dmc/gridftp/Lister.cpp:868 src/hed/dmc/gridftp/Lister.cpp:882 #, fuzzy, c-format msgid "Bad authentication information: %s" msgstr "Ungültige Authentisierungs-Information" #: src/hed/dmc/gridftp/Lister.cpp:891 src/hed/dmc/gridftp/Lister.cpp:906 #, fuzzy, c-format msgid "Failed authenticating: %s" msgstr "Fehler bei Authentisieren" #: src/hed/dmc/gridftp/Lister.cpp:898 msgid "Failed authenticating" msgstr "Fehler bei Authentisieren" #: src/hed/dmc/gridftp/Lister.cpp:933 src/hed/dmc/gridftp/Lister.cpp:1089 #, c-format msgid "DCAU failed: %s" msgstr "DCAU fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:937 src/hed/dmc/gridftp/Lister.cpp:1094 msgid "DCAU failed" msgstr "DCAU fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:957 #, fuzzy msgid "MLST is not supported - trying LIST" msgstr "MLSD ist nicht unterstützt - versuche NLST" #: src/hed/dmc/gridftp/Lister.cpp:973 #, fuzzy, c-format msgid "Immediate completion expected: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/gridftp/Lister.cpp:977 #, fuzzy msgid "Immediate completion expected" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/gridftp/Lister.cpp:990 #, fuzzy, c-format msgid "Missing information in reply: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/hed/dmc/gridftp/Lister.cpp:1024 #, c-format msgid "Missing final reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1048 #, fuzzy, c-format msgid "Unexpected immediate completion: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/gridftp/Lister.cpp:1060 #, fuzzy, c-format msgid "LIST/MLST failed: %s" msgstr "NLST/UMLSD fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:1065 #, fuzzy msgid "LIST/MLST failed" msgstr "NLST/UMLSD fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:1115 msgid "MLSD is not supported - trying NLST" msgstr "MLSD ist nicht unterstützt - versuche NLST" #: src/hed/dmc/gridftp/Lister.cpp:1129 #, fuzzy, c-format msgid "Immediate completion: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/gridftp/Lister.cpp:1137 #, c-format msgid "NLST/MLSD failed: %s" msgstr "NLST/UMLSD fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:1143 msgid "NLST/MLSD failed" msgstr "NLST/UMLSD fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:1164 #, c-format msgid "Data transfer aborted: %s" msgstr "Datentransfer abgebrochen: %s" #: src/hed/dmc/gridftp/Lister.cpp:1169 msgid "Data transfer aborted" msgstr "Datentransfer abgebrochen" #: src/hed/dmc/gridftp/Lister.cpp:1181 msgid "Failed to transfer data" msgstr "Fehler bei Transfer von Daten" #: src/hed/dmc/http/DataPointHTTP.cpp:388 #: src/hed/dmc/http/DataPointHTTP.cpp:517 #: src/hed/dmc/http/DataPointHTTP.cpp:598 #: src/hed/dmc/http/DataPointHTTP.cpp:1000 #: src/hed/dmc/http/DataPointHTTP.cpp:1141 #: src/hed/dmc/http/DataPointHTTP.cpp:1286 #, fuzzy, c-format msgid "Redirecting to %s" msgstr "Weiterleitung zu neuer URL: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:670 #, fuzzy, c-format msgid "Stat: obtained size %llu" msgstr "StartReading: erhielt Größe: %" #: src/hed/dmc/http/DataPointHTTP.cpp:674 #, fuzzy, c-format msgid "Stat: obtained modification time %s" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:903 #, fuzzy, c-format msgid "Check: obtained size %llu" msgstr "Check: erhielt Größe: %lli" #: src/hed/dmc/http/DataPointHTTP.cpp:905 #, fuzzy, c-format msgid "Check: obtained modification time %s" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1017 #: src/hed/dmc/http/DataPointHTTP.cpp:1161 #, fuzzy, c-format msgid "HTTP failure %u - %s" msgstr "Proxy Pfad: %s" #: src/hed/dmc/ldap/DataPointLDAP.cpp:36 #, fuzzy msgid "" "Missing reference to factory and/or module. Currently safe unloading of LDAP " "DMC is not supported. Report to developers." msgstr "" "Fehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im " "nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. Bitte " "die Entwickler informieren." #: src/hed/dmc/ldap/LDAPQuery.cpp:175 msgid "SASL Interaction" msgstr "SASL Interaktion" #: src/hed/dmc/ldap/LDAPQuery.cpp:223 #, fuzzy, c-format msgid "Challenge: %s" msgstr "Herausforderung: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:227 #, c-format msgid "Default: %s" msgstr "Voreinstellung: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:303 #, c-format msgid "LDAPQuery: Initializing connection to %s:%d" msgstr "LDAPQuery: Initialisiere Verbindung zu %s:%d" #: src/hed/dmc/ldap/LDAPQuery.cpp:307 #, c-format msgid "LDAP connection already open to %s" msgstr "LDAP Verbindung bereits offen zu %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:325 #, c-format msgid "Could not open LDAP connection to %s" msgstr "Konnte LDAP Verbindung nicht öffnen zu %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:346 #, c-format msgid "Failed to create ldap bind thread (%s)" msgstr "Fehler bei Anlegen von ldap bind thread (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:353 #, c-format msgid "Ldap bind timeout (%s)" msgstr "Ldap bind timeout (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:360 #, c-format msgid "Failed to bind to ldap server (%s)" msgstr "Fehler bei Verbinden zu ldap server (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:381 #, c-format msgid "Could not set LDAP network timeout (%s)" msgstr "Konnte LDAP netowrk Zeitüberschreitung nicht setzen (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:389 #, c-format msgid "Could not set LDAP timelimit (%s)" msgstr "Konnte LDAP Zeitlimit nicht setzen (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:396 #, c-format msgid "Could not set LDAP protocol version (%s)" msgstr "Konnte LDAP Protokoll Version nicht setzen (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:472 #, c-format msgid "LDAPQuery: Querying %s" msgstr "LDAPQuery: Frage an %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:474 #, c-format msgid " base dn: %s" msgstr " base dn: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:476 #, c-format msgid " filter: %s" msgstr " Filter: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:478 msgid " attributes:" msgstr " Attribute" #: src/hed/dmc/ldap/LDAPQuery.cpp:481 #: src/services/gridftpd/misc/ldapquery.cpp:399 #, c-format msgid " %s" msgstr " %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:526 src/hed/dmc/ldap/LDAPQuery.cpp:598 #, c-format msgid "%s (%s)" msgstr "%s (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:550 #, c-format msgid "LDAPQuery: Getting results from %s" msgstr "LDAPQuery: Erhalte Ergebnisse von %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:553 #, c-format msgid "Error: no LDAP query started to %s" msgstr "Fehler: keine LDAP Anfrage gestartet bei %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:593 #, c-format msgid "LDAP query timed out: %s" msgstr "Zeitüberschreibung bei LDAP Anfrage: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:23 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:36 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:39 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:101 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:104 msgid "Failed to extract VOMS nickname from proxy" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:106 #, fuzzy, c-format msgid "Using Rucio account %s" msgstr "Nutze space token %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:146 #, c-format msgid "" "Bad path for %s: Rucio supports read/write at /objectstores and read-only " "at /replicas" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:162 #, fuzzy, c-format msgid "Can't handle URL %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:303 #, c-format msgid "Acquired auth token for %s: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:357 #, fuzzy, c-format msgid "Rucio returned %s" msgstr "unbekannter return code %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:382 #, fuzzy, c-format msgid "Failed to parse Rucio response: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:388 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:394 #, fuzzy, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:400 #, c-format msgid "No RSE information returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:423 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:426 #, c-format msgid "%s: size %llu" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:430 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:433 #, fuzzy, c-format msgid "%s: checksum %s" msgstr "Errechneted checksum: %s" #: src/hed/dmc/s3/DataPointS3.cpp:648 #, fuzzy, c-format msgid "Failed to write object %s: %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:67 #, c-format msgid "Check: looking for metadata: %s" msgstr "Check: looking für Metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:79 #, c-format msgid "Check: obtained size: %lli" msgstr "Check: erhielt Größe: %lli" #: src/hed/dmc/srm/DataPointSRM.cpp:85 #, c-format msgid "Check: obtained checksum: %s" msgstr "Check: erhielt checksum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:89 #, fuzzy, c-format msgid "Check: obtained modification date: %s" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:93 #, fuzzy msgid "Check: obtained access latency: low (ONLINE)" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:97 #, fuzzy msgid "Check: obtained access latency: high (NEARLINE)" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:119 #, fuzzy, c-format msgid "Remove: deleting: %s" msgstr "remove_srm: lösche: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:139 #, fuzzy, c-format msgid "Creating directory: %s" msgstr "Lege Verzeichnis %s an" #: src/hed/dmc/srm/DataPointSRM.cpp:190 src/hed/dmc/srm/DataPointSRM.cpp:243 msgid "Calling PrepareReading when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:212 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:222 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:227 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:234 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:263 src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "None of the requested transfer protocols are supported" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, fuzzy, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "%s: Datei Anfrage %s in SRM queue. Schlage für %i Sekunden" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:488 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "Überprüfen der URL zurückgegeben von SRM: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:503 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "SRM gab keine nützliche Transfer URLs: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:318 #, fuzzy msgid "StartReading: File was not prepared properly" msgstr "AREXClient wurde nicht richtig angelegt." #: src/hed/dmc/srm/DataPointSRM.cpp:332 src/hed/dmc/srm/DataPointSRM.cpp:534 #, fuzzy, c-format msgid "TURL %s cannot be handled" msgstr "PDP: %s kann nicht geladen werden" #: src/hed/dmc/srm/DataPointSRM.cpp:340 src/hed/dmc/srm/DataPointSRM.cpp:542 #, fuzzy, c-format msgid "Redirecting to new URL: %s" msgstr "Weiterleitung zu neuer URL: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:404 msgid "Calling PrepareWriting when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:435 #, fuzzy msgid "No space token specified" msgstr "Kein space token angegeben" #: src/hed/dmc/srm/DataPointSRM.cpp:441 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "Warnung: Nutze SRM Protokol v1 das keine space tokens unterstützt" #: src/hed/dmc/srm/DataPointSRM.cpp:444 #, fuzzy, c-format msgid "Using space token description %s" msgstr "Nutze space token Beschreibugn %s" #: src/hed/dmc/srm/DataPointSRM.cpp:450 #, fuzzy, c-format msgid "Error looking up space tokens matching description %s" msgstr "" "Warnung: Fehler beim Nachschlagen von space tokens, entsprechend der " "Beschreibung %s. Kopiere ohne Nutzung der Token" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, fuzzy, c-format msgid "No space tokens found matching description %s" msgstr "Nutze space token Beschreibugn %s" #: src/hed/dmc/srm/DataPointSRM.cpp:464 #, c-format msgid "Using space token %s" msgstr "Nutze space token %s" #: src/hed/dmc/srm/DataPointSRM.cpp:480 #, fuzzy, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "%s: Datei Anfrage %s in SRM queue. Schlage für %i Sekunden" #: src/hed/dmc/srm/DataPointSRM.cpp:510 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:520 #, fuzzy msgid "StartWriting: File was not prepared properly" msgstr "AREXClient wurde nicht richtig angelegt." #: src/hed/dmc/srm/DataPointSRM.cpp:593 #, fuzzy, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "ListFiles: suche nach Metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:610 #, fuzzy, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "StartReading: erhielt checksum: %s:%s" #: src/hed/dmc/srm/DataPointSRM.cpp:613 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:616 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:619 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:620 src/hed/dmc/srm/DataPointSRM.cpp:621 msgid "No checksum information from server" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:622 src/hed/dmc/srm/DataPointSRM.cpp:623 msgid "No checksum verification possible" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:629 #, fuzzy msgid "Failed to release completed request" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/dmc/srm/DataPointSRM.cpp:673 src/hed/dmc/srm/DataPointSRM.cpp:740 #, fuzzy, c-format msgid "ListFiles: looking for metadata: %s" msgstr "ListFiles: suche nach Metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:806 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:55 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:94 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:146 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:185 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:225 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:263 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:307 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:369 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:442 msgid "SRM did not return any information" msgstr "SRM lieferte keine Information zurück" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:320 #, fuzzy, c-format msgid "File could not be moved to Running state: %s" msgstr "Datei konnte nicht zu Running Zustand bewegt werden: %s" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:376 msgid "SRM did not return any useful information" msgstr "SRM lieferte keinerlei gebrauchbare Information" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:454 #, fuzzy msgid "File could not be moved to Done state" msgstr "Datei konnte nicht zu Done Zustand bewegt werden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:92 msgid "Could not determine version of server" msgstr "Konnte Version des Server nicht bestimmen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:98 #, c-format msgid "Server SRM version: %s" msgstr "Server SRM version: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:103 #, c-format msgid "Server implementation: %s" msgstr "Server Implementation: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:140 #, fuzzy, c-format msgid "Adding space token %s" msgstr "Füge space token %s hinzu" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:167 #, fuzzy msgid "No request tokens found" msgstr "Keine Anfrage-Token gefunden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:180 #, fuzzy, c-format msgid "Adding request token %s" msgstr "Füge Anfrage-Token %s hinzu" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:241 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:646 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:832 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1389 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Datei Anfrage %s in SRM queue. Schlage für %i Sekunden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:279 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:331 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:702 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:768 #, fuzzy, c-format msgid "File is ready! TURL is %s" msgstr "Datei ist bereit! Die URL ist %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:363 #, fuzzy, c-format msgid "Setting userRequestDescription to %s" msgstr "Setzer userRequestDescription zu %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:418 #, fuzzy, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Datei Anfrage %s in SRM queue. Schlage für %i Sekunden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:461 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1164 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1198 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1232 #, fuzzy msgid "No request token specified!" msgstr "Keine Anfrage-Token spezifiziert!" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:528 msgid "Request is reported as ABORTED, but all files are done" msgstr "" "Anfrage wurde berichtet als ABORTED (abgebrochen), aber alle Dateien wurden " "bearbeitet" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:534 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "" "Anfrage wurde berichtet als ABORTED (abgebrochen), denn sie wurde abgebrochen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:540 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "Anfrage wurde berichtet als ABORTED (abgebrochen). Grund: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:677 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:749 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "Pfad %s ist ungültig, lege benötigte Verzeichnisse an" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:682 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:754 #, fuzzy, c-format msgid "Error creating required directories for %s" msgstr "Fehler bei Anlegen von benötigten Verzeichnissen für %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:855 msgid "Too many files in one request - please try again with fewer files" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:903 #, fuzzy msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" "Verzeichnis enthält mehr als %i Dateien, werde Aufruf mehrfach ausführen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:940 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:946 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:635 #: src/services/a-rex/jura/ApelDestination.cpp:215 #: src/services/a-rex/jura/LutsDestination.cpp:192 #: src/services/gridftpd/misc/ldapquery.cpp:183 #: src/services/gridftpd/misc/ldapquery.cpp:186 #: src/services/gridftpd/misc/ldapquery.cpp:392 #: src/services/gridftpd/misc/ldapquery.cpp:623 #: src/services/gridftpd/misc/ldapquery.cpp:632 #, c-format msgid "%s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:979 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" "Verzeichnis enthält mehr als %i Dateien, werde Aufruf mehrfach ausführen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1189 #, c-format msgid "Files associated with request token %s released successfully" msgstr "Dateien assoziiert mit Anfrage Token %s erfolgreich freigegeben" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1223 #, fuzzy, c-format msgid "Files associated with request token %s put done successfully" msgstr "Dateien assoziiert mit Anfrage Token %s erfolgreich put done" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1258 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "Dateien assoziiert mit Anfrage Token %s erfolgreich abgebrochen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1275 #, fuzzy, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is file, calling srmRm" msgstr "Typ ist Datei, rufe srmRm auf" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 #, fuzzy msgid "Type is dir, calling srmRmDir" msgstr "Typ ist Datei, rufe srmRmDir auf" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1289 msgid "File type is not available, attempting file delete" msgstr "Dateitype ist nicht verfügbar, versuche Datei zu löschen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1292 msgid "File delete failed, attempting directory delete" msgstr "Löschen von Datei schlug fehl, versuche als Verzeichnis zu löschen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1317 #, fuzzy, c-format msgid "File %s removed successfully" msgstr "Datei %s erfolgreich entfernt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1344 #, fuzzy, c-format msgid "Directory %s removed successfully" msgstr "Verzeichnis %s erfolgreich entfernt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1459 #, fuzzy, c-format msgid "Checking for existence of %s" msgstr "Suche nache Existenz von %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1462 #, fuzzy, c-format msgid "File already exists: %s" msgstr "LFN existiert bereits in LFC" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1499 #, fuzzy, c-format msgid "Error creating directory %s: %s" msgstr "Fehler bei Anlegen von Verzeichnis %s: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, fuzzy, c-format msgid "Storing port %i for %s" msgstr "Nicht-unterstzütztes Protrokoll in URL %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, fuzzy, c-format msgid "No port succeeded for %s" msgstr "Keine locations gefunden für %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, fuzzy, c-format msgid "SOAP request: %s" msgstr "XACML Anfrage: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, fuzzy, c-format msgid "SOAP fault: %s" msgstr "Voreinstellung: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 #, fuzzy msgid "Reconnecting" msgstr "Wiederholte Nutzung von Verbindung" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, fuzzy, c-format msgid "SRM Client status: %s" msgstr "*** Client Anfrage: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #: src/hed/identitymap/ArgusPDPClient.cpp:250 #, fuzzy, c-format msgid "SOAP response: %s" msgstr "Keine SOAP Antwort" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:76 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:162 #, fuzzy, c-format msgid "Failed to acquire lock on file %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:81 #, fuzzy, c-format msgid "Error reading info from file %s:%s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:95 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:187 #, fuzzy, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:100 #, fuzzy, c-format msgid "Cannot convert string %s to int in line %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:203 #, fuzzy, c-format msgid "Error writing srm info file %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:91 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:93 #, c-format msgid "Read %i bytes" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:124 #, fuzzy, c-format msgid "Could not open file %s for reading: %s" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:139 #, fuzzy, c-format msgid "Unable to find file size of %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:203 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:226 #, fuzzy, c-format msgid "xrootd write failed: %s" msgstr "SendData: Schreiben von Daten schlug fehl: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:235 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:309 #, fuzzy, c-format msgid "xrootd close failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:258 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:271 #, fuzzy, c-format msgid "xrootd open failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:285 #, fuzzy, c-format msgid "close failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:327 #, c-format msgid "Read access not allowed for %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:346 #, fuzzy, c-format msgid "Could not stat file %s: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:381 #, fuzzy, c-format msgid "Failed to open directory %s: %s" msgstr "Fehler beim Entfernen von LFC Verzeichnis: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:396 #, fuzzy, c-format msgid "Error while reading dir %s: %s" msgstr "Fehler beim Listen von Verzeichnis: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:446 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:464 #, fuzzy, c-format msgid "Error creating required dirs: %s" msgstr "Fehler bei Anlegen benötigter Verzeichnisse: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:158 #, fuzzy msgid "PDPD location is missing" msgstr "Location fehlt" #: src/hed/identitymap/ArgusPDPClient.cpp:161 #, fuzzy, c-format msgid "PDPD location: %s" msgstr "Füge location hinzu: url: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:166 #: src/hed/identitymap/ArgusPEPClient.cpp:129 msgid "Conversion mode is set to SUBJECT" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:169 #: src/hed/identitymap/ArgusPEPClient.cpp:132 msgid "Conversion mode is set to CREAM" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:172 #: src/hed/identitymap/ArgusPEPClient.cpp:135 msgid "Conversion mode is set to EMI" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:175 #: src/hed/identitymap/ArgusPEPClient.cpp:138 #, c-format msgid "Unknown conversion mode %s, using default" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:242 #, fuzzy, c-format msgid "Failed to contact PDP server: %s" msgstr "Fehler beim Verbinden zu RLS server: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:245 #, fuzzy, c-format msgid "There was no SOAP response return from PDP server: %s" msgstr "Keine SOAP response erhalten" #: src/hed/identitymap/ArgusPDPClient.cpp:360 #: src/hed/identitymap/ArgusPEPClient.cpp:286 #, c-format msgid "Have %i requests to process" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:362 #, fuzzy msgid "Creating a client to Argus PDP service" msgstr "Erstelle Client Schnitstelle" #: src/hed/identitymap/ArgusPDPClient.cpp:375 #, fuzzy, c-format msgid "XACML authorisation request: %s" msgstr "GACL Auth. Anfrage. %s" #: src/hed/identitymap/ArgusPDPClient.cpp:386 #, fuzzy, c-format msgid "XACML authorisation response: %s" msgstr "Es wurde keine authorization response erwidert" #: src/hed/identitymap/ArgusPDPClient.cpp:426 #: src/hed/identitymap/ArgusPEPClient.cpp:333 #, c-format msgid "%s is not authorized to do action %s in resource %s " msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:429 #: src/hed/identitymap/ArgusPDPClient.cpp:434 #: src/hed/identitymap/ArgusPEPClient.cpp:336 #, fuzzy msgid "Not authorized" msgstr "echo: Unauthorisiert" #: src/hed/identitymap/ArgusPDPClient.cpp:439 #: src/hed/identitymap/ArgusPEPClient.cpp:341 #: src/hed/identitymap/IdentityMap.cpp:219 #: src/hed/shc/legacy/LegacyMap.cpp:215 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "Grid Identität wird zugewiesen zu lokaler Identität '%s'" #: src/hed/identitymap/ArgusPDPClient.cpp:566 #: src/hed/identitymap/ArgusPEPClient.cpp:655 #, fuzzy msgid "Doing CREAM request" msgstr "Verarbeite %s Anfrage" #: src/hed/identitymap/ArgusPDPClient.cpp:580 #: src/hed/identitymap/ArgusPDPClient.cpp:748 #: src/hed/identitymap/ArgusPEPClient.cpp:683 #, fuzzy, c-format msgid "Adding profile-id value: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:592 #: src/hed/identitymap/ArgusPDPClient.cpp:759 #: src/hed/identitymap/ArgusPEPClient.cpp:694 #, fuzzy, c-format msgid "Adding subject-id value: %s" msgstr "Füge Anfrage-Token %s hinzu" #: src/hed/identitymap/ArgusPDPClient.cpp:600 #: src/hed/identitymap/ArgusPDPClient.cpp:767 #: src/hed/identitymap/ArgusPEPClient.cpp:704 #, c-format msgid "Adding subject-issuer value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:607 #: src/hed/identitymap/ArgusPEPClient.cpp:713 #, c-format msgid "Adding virtual-organization value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:620 #: src/hed/identitymap/ArgusPEPClient.cpp:730 #, c-format msgid "Adding FQAN value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:629 #: src/hed/identitymap/ArgusPEPClient.cpp:739 #, c-format msgid "Adding FQAN/primary value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:639 #: src/hed/identitymap/ArgusPEPClient.cpp:750 #, fuzzy, c-format msgid "Adding cert chain value: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:648 #: src/hed/identitymap/ArgusPDPClient.cpp:840 #, fuzzy, c-format msgid "Adding resource-id value: %s" msgstr "Addressen-Auflösung schlug fehl: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:662 #: src/hed/identitymap/ArgusPDPClient.cpp:863 #: src/hed/identitymap/ArgusPEPClient.cpp:775 #, fuzzy, c-format msgid "Adding action-id value: %s" msgstr "Füge location hinzu: %s - %s" #: src/hed/identitymap/ArgusPDPClient.cpp:666 #: src/hed/identitymap/ArgusPEPClient.cpp:786 #, fuzzy, c-format msgid "CREAM request generation failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:732 #, fuzzy msgid "Doing EMI request" msgstr "Verarbeite %s Anfrage" #: src/hed/identitymap/ArgusPDPClient.cpp:774 #, c-format msgid "Adding Virtual Organization value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:797 #, c-format msgid "Adding VOMS group value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:803 #, c-format msgid "Adding VOMS primary group value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:822 #, c-format msgid "Adding VOMS role value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:829 #, c-format msgid "Adding VOMS primary role value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:846 #, fuzzy, c-format msgid "Adding resource-owner value: %s" msgstr "Addressen-Auflösung schlug fehl: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:867 #, fuzzy, c-format msgid "EMI request generation failed: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/hed/identitymap/ArgusPEPClient.cpp:119 #, fuzzy msgid "PEPD location is missing" msgstr "Location fehlt" #: src/hed/identitymap/ArgusPEPClient.cpp:122 #, fuzzy, c-format msgid "PEPD location: %s" msgstr "Füge location hinzu: url: %s" #: src/hed/identitymap/ArgusPEPClient.cpp:126 msgid "Conversion mode is set to DIRECT" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:331 #, c-format msgid "" "Not authorized according to request:\n" "%s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:361 msgid "Subject of request is null \n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:366 #, fuzzy, c-format msgid "Can not create XACML SubjectAttribute: %s\n" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/identitymap/ArgusPEPClient.cpp:375 #, fuzzy msgid "Can not create XACML Resource \n" msgstr "Kann Resource ID nicht erhalten" #: src/hed/identitymap/ArgusPEPClient.cpp:381 #, fuzzy, c-format msgid "Can not create XACML ResourceAttribute: %s\n" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/identitymap/ArgusPEPClient.cpp:390 #, fuzzy msgid "Can not create XACML Action\n" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/identitymap/ArgusPEPClient.cpp:397 #, fuzzy, c-format msgid "Can not create XACML ActionAttribute: %s\n" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/identitymap/ArgusPEPClient.cpp:407 #, fuzzy msgid "Can not create XACML request\n" msgstr "Kann doc Argument nicht anlegen" #: src/hed/identitymap/ArgusPEPClient.cpp:539 #, c-format msgid "Converting to CREAM action - namespace: %s, operation: %s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:760 #, fuzzy, c-format msgid "Adding resoure-id value: %s" msgstr "Addressen-Auflösung schlug fehl: %s" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "PDP: %s kann nicht geladen werden" #: src/hed/libs/common/ArcLocation.cpp:68 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "Stunden" msgstr[1] "Stunde" msgstr[2] "Stunden" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "Minuten" msgstr[1] "Minute" msgstr[2] "Minuten" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "Sekunden" msgstr[1] "Sekunde" msgstr[2] "Sekunden" #: src/hed/libs/common/FileLock.cpp:48 #, fuzzy msgid "Cannot determine hostname from gethostname()" msgstr "Kann hostname von uname nciht ermitteln" #: src/hed/libs/common/FileLock.cpp:97 #, fuzzy, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "EACCESS Fehler bei Öffnen von Lock-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:102 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "Fehler bei Öffnen von Lock-Datei %s in initialer Überprüfung: %s" #: src/hed/libs/common/FileLock.cpp:109 #, fuzzy, c-format msgid "Error creating temporary file %s: %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:118 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:129 #, fuzzy, c-format msgid "Could not create lock file %s as it already exists" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/hed/libs/common/FileLock.cpp:133 #, fuzzy, c-format msgid "Error creating lock file %s: %s" msgstr "Fehler bei Lesen von Lock-datei %s. %s" #: src/hed/libs/common/FileLock.cpp:138 #, fuzzy, c-format msgid "Error writing to lock file %s: %s" msgstr "Fehler beim Schreiben zu tmp lock Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:146 #, fuzzy, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "Fehler bei Umbenennen von temporärer Datei %s zu Lock_datei %s: %s" #: src/hed/libs/common/FileLock.cpp:155 #, fuzzy, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" "Fehler bei Umbenennen von Lock-Datei, obwohl rename() keinen Fehler " "zurücklieferte" #: src/hed/libs/common/FileLock.cpp:164 #, fuzzy, c-format msgid "%li seconds since lock file %s was created" msgstr "%li Sekunden seit Lock-Datei engelegt wurde" #: src/hed/libs/common/FileLock.cpp:167 #, fuzzy, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "Zeitüberschreitung, werde lock-Datei entfernen" #: src/hed/libs/common/FileLock.cpp:171 #, fuzzy, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:183 #, fuzzy, c-format msgid "This process already owns the lock on %s" msgstr "Warnung: Diesem Prozess gehört der Lock bereits" #: src/hed/libs/common/FileLock.cpp:189 #, fuzzy, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "" "Der Prozesse, dem der Lock gehört, läuft nicht mehr. Der Lock wird entfernt." #: src/hed/libs/common/FileLock.cpp:191 #, fuzzy, c-format msgid "Failed to remove file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:200 #, fuzzy, c-format msgid "The file %s is currently locked with a valid lock" msgstr "Die Datei ist derzeit gelockt mit einem gültigen Lock" #: src/hed/libs/common/FileLock.cpp:215 #, fuzzy, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "Fehler bei Unlock von Datei mit Lock %s: %s" #: src/hed/libs/common/FileLock.cpp:227 #, fuzzy, c-format msgid "Lock file %s doesn't exist" msgstr "Lock-Datei %s existiert nicht" #: src/hed/libs/common/FileLock.cpp:229 #, fuzzy, c-format msgid "Error listing lock file %s: %s" msgstr "Fehler bei Listing von Lock-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:235 #, fuzzy, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "" "Ein anderer Prozess besitzt das Lock auf Datei %s. Muss zurück zu Start()" #: src/hed/libs/common/FileLock.cpp:241 #, fuzzy, c-format msgid "Error reading lock file %s: %s" msgstr "Fehler bei Lesen von Lock-datei %s. %s" #: src/hed/libs/common/FileLock.cpp:245 #, fuzzy, c-format msgid "Error with formatting in lock file %s" msgstr "Fehler bei Formatieren von Lock-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:255 #, fuzzy, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "Lock gehört einem anderen host" #: src/hed/libs/common/FileLock.cpp:264 #, fuzzy, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/hed/libs/common/FileLock.cpp:267 #, fuzzy, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "" "Ein anderer Prozess besitzt das Lock auf Datei %s. Muss zurück zu Start()" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "" #: src/hed/libs/common/Logger.cpp:60 #, fuzzy msgid "Invalid log level. Using default " msgstr "Ungültiger Wert für Priority, nutze Voreinstellung von 10" #: src/hed/libs/common/Logger.cpp:125 msgid "Invalid old log level. Using default " msgstr "" #: src/hed/libs/common/OptionParser.cpp:107 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "" #: src/hed/libs/common/OptionParser.cpp:265 #, fuzzy msgid "Use -? to get usage description" msgstr "Nutze space token Beschreibugn %s" #: src/hed/libs/common/OptionParser.cpp:342 msgid "Usage:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:345 msgid "OPTION..." msgstr "" #: src/hed/libs/common/OptionParser.cpp:351 msgid "Help Options:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:352 msgid "Show help options" msgstr "" #: src/hed/libs/common/OptionParser.cpp:354 msgid "Application Options:" msgstr "" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:212 msgid "Maximum number of threads running - puting new request into queue" msgstr "" #: src/hed/libs/common/Thread.cpp:253 #, fuzzy, c-format msgid "Thread exited with Glib error: %s" msgstr "ftp_read_thread: Globus Fehler: %s" #: src/hed/libs/common/Thread.cpp:255 #, c-format msgid "Thread exited with Glib exception: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:257 #, c-format msgid "Thread exited with generic exception: %s" msgstr "" #: src/hed/libs/common/URL.cpp:121 #, c-format msgid "URL is not valid: %s" msgstr "" #: src/hed/libs/common/URL.cpp:192 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "" #: src/hed/libs/common/URL.cpp:197 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "" #: src/hed/libs/common/URL.cpp:286 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "" #: src/hed/libs/common/URL.cpp:302 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "" #: src/hed/libs/common/URL.cpp:310 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "" #: src/hed/libs/common/URL.cpp:326 #, fuzzy, c-format msgid "Invalid port number in %s" msgstr "Ungültige url: %s" #: src/hed/libs/common/URL.cpp:425 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "" #: src/hed/libs/common/URL.cpp:587 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" #: src/hed/libs/common/URL.cpp:686 #, c-format msgid "URL option %s does not have format name=value" msgstr "" #: src/hed/libs/common/URL.cpp:1151 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "" #: src/hed/libs/common/URL.cpp:1156 #, c-format msgid "URL protocol is not urllist: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:30 #: src/hed/libs/common/UserConfig.cpp:681 #: src/hed/libs/common/UserConfig.cpp:690 #: src/hed/libs/common/UserConfig.cpp:696 #: src/hed/libs/common/UserConfig.cpp:718 #: src/hed/libs/common/UserConfig.cpp:728 #: src/hed/libs/common/UserConfig.cpp:740 #: src/hed/libs/common/UserConfig.cpp:760 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:82 #, fuzzy, c-format msgid "Wrong ownership of certificate file: %s" msgstr "Fehler bei Ändern des Besitzers der destination Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:84 #, fuzzy, c-format msgid "Wrong permissions of certificate file: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/common/UserConfig.cpp:86 #, c-format msgid "Can not access certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:93 #, fuzzy, c-format msgid "Wrong ownership of key file: %s" msgstr "Fehler bei Ändern des Besitzers der destination Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:95 #, fuzzy, c-format msgid "Wrong permissions of key file: %s" msgstr "Fehler bei Änderung von Zugriffsrechten auf Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:97 #, c-format msgid "Can not access key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:104 #, fuzzy, c-format msgid "Wrong ownership of proxy file: %s" msgstr "Fehler bei Ändern des Besitzers der destination Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:106 #, fuzzy, c-format msgid "Wrong permissions of proxy file: %s" msgstr "Fehler bei Änderung von Zugriffsrechten auf Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:108 #, c-format msgid "Can not access proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:119 msgid "computing" msgstr "" #: src/hed/libs/common/UserConfig.cpp:121 msgid "index" msgstr "" #: src/hed/libs/common/UserConfig.cpp:165 #: src/hed/libs/common/UserConfig.cpp:171 #: src/hed/libs/common/UserConfig.cpp:223 #: src/hed/libs/common/UserConfig.cpp:229 #, c-format msgid "System configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:176 #: src/hed/libs/common/UserConfig.cpp:234 #, fuzzy, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/common/UserConfig.cpp:178 #: src/hed/libs/common/UserConfig.cpp:180 #: src/hed/libs/common/UserConfig.cpp:236 #: src/hed/libs/common/UserConfig.cpp:238 #, c-format msgid "System configuration file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:187 #: src/hed/libs/common/UserConfig.cpp:199 #: src/hed/libs/common/UserConfig.cpp:245 #: src/hed/libs/common/UserConfig.cpp:257 #, c-format msgid "User configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:192 #: src/hed/libs/common/UserConfig.cpp:250 msgid "No configuration file could be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:195 #: src/hed/libs/common/UserConfig.cpp:253 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:310 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" #: src/hed/libs/common/UserConfig.cpp:322 #, c-format msgid "" "Unsupported job list type '%s', using 'BDB'. Supported types are: BDB, " "SQLITE, XML." msgstr "" #: src/hed/libs/common/UserConfig.cpp:503 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:505 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or '%" "s' attributes in the client configuration file (e.g. '%s')" msgstr "" #: src/hed/libs/common/UserConfig.cpp:522 #, c-format msgid "" "Can not access CA certificates directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:532 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:558 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" #: src/hed/libs/common/UserConfig.cpp:579 #, c-format msgid "Using proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:582 #, c-format msgid "Using certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:583 #, c-format msgid "Using key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:587 #, c-format msgid "Using CA certificate directory: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:600 #: src/hed/libs/common/UserConfig.cpp:606 #, fuzzy, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "Lege Verzeichnis %s an" #: src/hed/libs/common/UserConfig.cpp:612 #, fuzzy, c-format msgid "Can not access VOMS file/directory: %s." msgstr "Lege Verzeichnis %s an" #: src/hed/libs/common/UserConfig.cpp:631 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" #: src/hed/libs/common/UserConfig.cpp:644 #, c-format msgid "Loading configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:678 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:703 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" #: src/hed/libs/common/UserConfig.cpp:715 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:735 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:781 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:785 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:826 #, c-format msgid "Unknown section %s, ignoring it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:830 #, c-format msgid "Configuration (%s) loaded" msgstr "" #: src/hed/libs/common/UserConfig.cpp:833 #, c-format msgid "Could not load configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:928 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "Unable to create %s directory." msgstr "" #: src/hed/libs/common/UserConfig.cpp:950 #, c-format msgid "Configuration example file created (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:952 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:956 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "" #: src/hed/libs/common/UserConfig.cpp:961 #, c-format msgid "Example configuration (%s) not created." msgstr "" #: src/hed/libs/common/UserConfig.cpp:966 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "" #: src/hed/libs/common/UserConfig.cpp:984 #, c-format msgid "%s directory created" msgstr "" #: src/hed/libs/common/UserConfig.cpp:986 #: src/hed/libs/common/UserConfig.cpp:1025 src/hed/libs/data/DataMover.cpp:684 #, fuzzy, c-format msgid "Failed to create directory %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:300 msgid "Succeeded to verify the signature under " msgstr "Erfolgreiche Verifikation der Signatur unter " #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Failed to verify the signature under " msgstr "Fehler bei der Überprüfung der Signatur unter " #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:54 msgid "Creating delegation credential to ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:64 #: src/hed/libs/communication/ClientX509Delegation.cpp:267 msgid "DelegateCredentialsInit failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:68 #: src/hed/libs/communication/ClientX509Delegation.cpp:122 #: src/hed/libs/communication/ClientX509Delegation.cpp:157 #: src/hed/libs/communication/ClientX509Delegation.cpp:212 #: src/hed/libs/communication/ClientX509Delegation.cpp:271 msgid "There is no SOAP response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:73 msgid "There is no X509 request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:78 msgid "There is no Format request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:86 msgid "There is no Id or X509 request value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:99 #: src/hed/libs/communication/ClientX509Delegation.cpp:187 msgid "DelegateProxy failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:118 msgid "UpdateCredentials failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:126 msgid "There is no UpdateCredentialsResponse in response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:134 #: src/hed/libs/communication/ClientX509Delegation.cpp:162 #: src/hed/libs/communication/ClientX509Delegation.cpp:217 #: src/hed/libs/communication/ClientX509Delegation.cpp:302 msgid "There is no SOAP connection chain configured" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:140 msgid "Creating delegation to CREAM delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:153 msgid "Delegation getProxyReq request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:173 msgid "Creating delegation to CREAM delegation service failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:208 msgid "Delegation putProxy request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:222 msgid "Creating delegation to CREAM delegation failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:237 msgid "Getting delegation credential from ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:276 msgid "There is no Delegated X509 token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:281 msgid "There is no Format delegated token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:289 msgid "There is no Id or X509 token value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:298 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" #: src/hed/libs/compute/Broker.cpp:62 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:72 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:153 src/hed/libs/compute/Broker.cpp:171 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "ComputingShare (%s) does not match selected queue (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:189 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" #: src/hed/libs/compute/Broker.cpp:194 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "" #: src/hed/libs/compute/Broker.cpp:200 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:205 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:212 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %" "s" msgstr "" #: src/hed/libs/compute/Broker.cpp:217 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:243 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:272 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:284 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:299 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:306 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:311 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:317 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:322 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:330 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:336 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:341 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:349 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:354 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:362 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" #: src/hed/libs/compute/Broker.cpp:367 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:375 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:380 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:388 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:393 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:402 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" #: src/hed/libs/compute/Broker.cpp:406 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:414 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:421 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:427 src/hed/libs/compute/Broker.cpp:448 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:435 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:442 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:456 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:461 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:469 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:475 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:481 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:489 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:494 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:502 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:509 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:532 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" #: src/hed/libs/compute/Broker.cpp:549 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" #: src/hed/libs/compute/Broker.cpp:585 #, fuzzy msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "Kann ExecutionTarget nicht zu Python Objekt konvertieren" #: src/hed/libs/compute/Broker.cpp:609 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, fuzzy, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "Konnter Broker %s nicht laden" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:26 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:30 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:37 #, c-format msgid "Uniq is adding service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:60 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:63 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:66 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, fuzzy, c-format msgid "Failed to start querying the endpoint on %s" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, fuzzy, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "" "Ошибка при попытке открыть файл:\n" " %1" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for %s plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, fuzzy, c-format msgid "%s plugin \"%s\" not found." msgstr "clientxrsl nicht gefunden" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:100 #: src/hed/libs/compute/JobControllerPlugin.cpp:109 #: src/hed/libs/compute/SubmitterPlugin.cpp:158 #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, fuzzy, c-format msgid "%s %s could not be created." msgstr "Der Job Status konnte nicht ermittelt werden" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, c-format msgid "Loaded %s %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:237 #, fuzzy, c-format msgid "Address: %s" msgstr "Antwort: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:238 #, fuzzy, c-format msgid "Place: %s" msgstr "Name %s" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, fuzzy, c-format msgid "Country: %s" msgstr "Anfrage: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, fuzzy, c-format msgid "Postal code: %s" msgstr "Listen-Eintrag: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, fuzzy, c-format msgid "Latitude: %f" msgstr "Fehler: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, c-format msgid "Longitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:248 #, fuzzy, c-format msgid "Owner: %s" msgstr "Anfrage: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:255 #, fuzzy, c-format msgid "ID: %s" msgstr "ID: " #: src/hed/libs/compute/ExecutionTarget.cpp:256 #, fuzzy, c-format msgid "Type: %s" msgstr "Proxy Typ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:261 #, fuzzy, c-format msgid "URL: %s" msgstr "HER: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:262 #, fuzzy, c-format msgid "Interface: %s" msgstr "Quelle: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:264 #, fuzzy msgid "Interface versions:" msgstr "Benutzungsschnittstellenfehler" #: src/hed/libs/compute/ExecutionTarget.cpp:269 msgid "Interface extensions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:274 msgid "Capabilities:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:278 #, c-format msgid "Technology: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:280 #, fuzzy msgid "Supported Profiles:" msgstr "Nicht-unterstützte URL angegeben" #: src/hed/libs/compute/ExecutionTarget.cpp:284 #, fuzzy, c-format msgid "Implementor: %s" msgstr "Server Implementation: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:285 #, fuzzy, c-format msgid "Implementation name: %s" msgstr "Server Implementation: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, fuzzy, c-format msgid "Quality level: %s" msgstr "Policy Zeile: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, fuzzy, c-format msgid "Health state: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, fuzzy, c-format msgid "Health state info: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, fuzzy, c-format msgid "Serving state: %s" msgstr "Start start" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, fuzzy, c-format msgid "Issuer CA: %s" msgstr "Anfrage: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:292 msgid "Trusted CAs:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:296 #, c-format msgid "Downtime starts: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:297 #, c-format msgid "Downtime ends: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, fuzzy, c-format msgid "Staging: %s" msgstr "Kontaktiere %s" #: src/hed/libs/compute/ExecutionTarget.cpp:300 #, fuzzy msgid "Job descriptions:" msgstr "Job Beschreibung: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:312 #, fuzzy, c-format msgid "Scheme: %s" msgstr "Quelle: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:315 #, fuzzy, c-format msgid "Rule: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:327 #, fuzzy, c-format msgid "Mapping queue: %s" msgstr "" "\n" " Соответствие раздел-сегмент:\n" #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Max wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, c-format msgid "Max total wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Min wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, fuzzy, c-format msgid "Default wall-time: %s" msgstr "Voreinstellung: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Max CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, c-format msgid "Min CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, fuzzy, c-format msgid "Default CPU time: %s" msgstr "Voreinstellung: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Max total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, fuzzy, c-format msgid "Max running jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, fuzzy, c-format msgid "Max waiting jobs: %i" msgstr "Herunterladen des Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, c-format msgid "Max user running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max slots per job: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max stage in streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max stage out streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, fuzzy, c-format msgid "Scheduling policy: %s" msgstr "ARC delegation policy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, c-format msgid "Max virtual memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max disk space: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, fuzzy, c-format msgid "Default Storage Service: %s" msgstr "Delegation service: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:348 msgid "Supports preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:349 msgid "Doesn't support preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:350 #, fuzzy, c-format msgid "Total jobs: %i" msgstr "alle Jobs" #: src/hed/libs/compute/ExecutionTarget.cpp:351 #, fuzzy, c-format msgid "Running jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, fuzzy, c-format msgid "Local running jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, fuzzy, c-format msgid "Waiting jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, fuzzy, c-format msgid "Local waiting jobs: %i" msgstr "Herunterladen des Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, c-format msgid "Suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, c-format msgid "Local suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, fuzzy, c-format msgid "Staging jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, fuzzy, c-format msgid "Estimated average waiting time: %s" msgstr "start_reading_ftp: angegelegt um: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, fuzzy, c-format msgid "Estimated worst waiting time: %s" msgstr "start_reading_ftp: angegelegt um: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, c-format msgid "Free slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:363 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:366 #, c-format msgid " %s: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:367 #, c-format msgid " unspecified: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:370 #, c-format msgid "Used slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:371 #, fuzzy, c-format msgid "Requested slots: %i" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, fuzzy, c-format msgid "Reservation policy: %s" msgstr "ARC delegation policy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:379 #, fuzzy, c-format msgid "Resource manager: %s" msgstr "Modulname: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:380 #, fuzzy, c-format msgid " (%s)" msgstr "%s (%s)" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, c-format msgid "Total physical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:384 #, c-format msgid "Total logical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:385 #, c-format msgid "Total slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:386 msgid "Supports advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:387 msgid "Doesn't support advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:388 msgid "Supports bulk submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:389 msgid "Doesn't support bulk Submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Non-homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:393 #, fuzzy msgid "Network information:" msgstr "Angabe des aktuellen Versionsbezeichners" #: src/hed/libs/compute/ExecutionTarget.cpp:398 msgid "Working area is shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:399 msgid "Working area is not shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:400 #, c-format msgid "Working area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:401 #, c-format msgid "Working area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:402 #, c-format msgid "Working area life time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:403 #, c-format msgid "Cache area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Cache area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:410 #, fuzzy, c-format msgid "Platform: %s" msgstr "ProxyStore: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:411 msgid "Execution environment supports inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:412 msgid "Execution environment does not support inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:413 msgid "Execution environment supports outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:414 msgid "Execution environment does not support outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment is a virtual machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment is a physical machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:417 #, fuzzy, c-format msgid "CPU vendor: %s" msgstr "DCAU fehlgeschlagen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:418 #, fuzzy, c-format msgid "CPU model: %s" msgstr "Policy Zeile: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:419 #, fuzzy, c-format msgid "CPU version: %s" msgstr "%s version %s" #: src/hed/libs/compute/ExecutionTarget.cpp:420 #, c-format msgid "CPU clock speed: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, c-format msgid "Main memory size: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, fuzzy, c-format msgid "OS family: %s" msgstr "PASV fehlgeschlagen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, fuzzy, c-format msgid "OS name: %s" msgstr "Name %s" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, fuzzy, c-format msgid "OS version: %s" msgstr "%s version %s" #: src/hed/libs/compute/ExecutionTarget.cpp:431 #, fuzzy msgid "Computing service:" msgstr "Delegation service: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:455 #, c-format msgid "%d Endpoints" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:460 #, fuzzy msgid "Endpoint Information:" msgstr "Angabe des aktuellen Versionsbezeichners" #: src/hed/libs/compute/ExecutionTarget.cpp:472 #, c-format msgid "%d Batch Systems" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:477 msgid "Batch System Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:483 #, fuzzy msgid "Installed application environments:" msgstr "Initialisierte replication Umgebung" #: src/hed/libs/compute/ExecutionTarget.cpp:496 #, c-format msgid "%d Shares" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:501 #, fuzzy msgid "Share Information:" msgstr "Ungültige Authentisierungs-Information" #: src/hed/libs/compute/ExecutionTarget.cpp:507 #, c-format msgid "%d mapping policies" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:511 #, fuzzy msgid "Mapping policy:" msgstr "ARC delegation policy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:527 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:529 #, c-format msgid " Computing endpoint URL: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, fuzzy, c-format msgid " Computing endpoint interface name: %s" msgstr "Erstelle Client Schnitstelle" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #: src/hed/libs/compute/Job.cpp:580 #, c-format msgid " Queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:536 #, fuzzy, c-format msgid " Mapping queue: %s" msgstr "" "\n" " Соответствие раздел-сегмент:\n" #: src/hed/libs/compute/ExecutionTarget.cpp:539 #, fuzzy, c-format msgid " Health state: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:544 msgid "Service information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:549 msgid " Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:556 #, fuzzy msgid "Batch system information:" msgstr "Ungültige Authentisierungs-Information" #: src/hed/libs/compute/ExecutionTarget.cpp:559 msgid "Queue information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:566 #, fuzzy msgid " Benchmark information:" msgstr "Ungültige Authentisierungs-Information" #: src/hed/libs/compute/GLUE2.cpp:58 #, fuzzy msgid "The ComputingService doesn't advertise its Quality Level." msgstr "Der Service gibt seinen Quality Level nicht an." #: src/hed/libs/compute/GLUE2.cpp:117 #, fuzzy msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "Der Service gibt seinen Quality Level nicht an." #: src/hed/libs/compute/GLUE2.cpp:128 #, fuzzy msgid "The ComputingService doesn't advertise its Interface." msgstr "Der Service gibt seine Interface nicht an." #: src/hed/libs/compute/GLUE2.cpp:160 #, fuzzy msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "Der Servcice gibt seinen Serving State nicht an." #: src/hed/libs/compute/Job.cpp:329 #, fuzzy msgid "Unable to detect format of job record." msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/Job.cpp:550 #, c-format msgid "Job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:552 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:553 #, fuzzy, c-format msgid " State: %s" msgstr "Name %s" #: src/hed/libs/compute/Job.cpp:556 #, c-format msgid " Specific state: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:560 src/hed/libs/compute/Job.cpp:584 #, c-format msgid " Waiting Position: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:564 #, c-format msgid " Exit Code: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:568 #, fuzzy, c-format msgid " Job Error: %s" msgstr "Error: %s" #: src/hed/libs/compute/Job.cpp:573 #, c-format msgid " Owner: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:577 #, c-format msgid " Other Messages: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:582 #, fuzzy, c-format msgid " Requested Slots: %d" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/libs/compute/Job.cpp:587 #, c-format msgid " Stdin: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:589 #, c-format msgid " Stdout: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:591 #, c-format msgid " Stderr: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:593 #, fuzzy, c-format msgid " Computing Service Log Directory: %s" msgstr "Fehler beim listen von Datei oder Verzeichnis: %s" #: src/hed/libs/compute/Job.cpp:596 #, c-format msgid " Submitted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:599 #, c-format msgid " End Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:602 #, c-format msgid " Submitted from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:605 #, c-format msgid " Submitting client: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:608 #, c-format msgid " Requested CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:615 #, c-format msgid " Used Wall Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:618 #, c-format msgid " Used Memory: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Results were deleted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:623 #, c-format msgid " Results must be retrieved before: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:627 #, c-format msgid " Proxy valid until: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Entry valid from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:634 #, c-format msgid " Entry valid for: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:638 msgid " Old job IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:646 #, fuzzy, c-format msgid " ID on service: %s" msgstr "Delegation service: %s" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Service information URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:648 #, c-format msgid " Job status URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:649 #, c-format msgid " Job management URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:650 #, fuzzy, c-format msgid " Stagein directory URL: %s" msgstr "Lege Verzeichnis %s an" #: src/hed/libs/compute/Job.cpp:651 #, fuzzy, c-format msgid " Stageout directory URL: %s" msgstr "Fehler beim Listen von Verzeichnis: %s" #: src/hed/libs/compute/Job.cpp:652 #, fuzzy, c-format msgid " Session directory URL: %s" msgstr "Lege Verzeichnis %s an" #: src/hed/libs/compute/Job.cpp:654 #, fuzzy msgid " Delegation IDs:" msgstr "Delegation ID: %s" #: src/hed/libs/compute/Job.cpp:670 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "" #: src/hed/libs/compute/Job.cpp:675 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:697 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:702 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" #: src/hed/libs/compute/Job.cpp:706 #, fuzzy, c-format msgid "Downloading job: %s" msgstr "Herunterladen des Job: %s" #: src/hed/libs/compute/Job.cpp:710 #, c-format msgid "" "Cant retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:715 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:722 #, c-format msgid "%s directory exist! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:728 #, fuzzy, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/Job.cpp:733 #, fuzzy, c-format msgid "No files to retrieve for job %s" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/Job.cpp:739 #, fuzzy, c-format msgid "Failed to create directory %s! Skipping job." msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/compute/Job.cpp:752 #, fuzzy, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "Fahler bei Herunterladen %s zu %s" #: src/hed/libs/compute/Job.cpp:758 #, fuzzy, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/compute/Job.cpp:764 #, fuzzy, c-format msgid "Failed downloading %s to %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/hed/libs/compute/Job.cpp:777 src/hed/libs/compute/Job.cpp:782 #, c-format msgid "Unable to list files at %s" msgstr "" #: src/hed/libs/compute/Job.cpp:824 msgid "Now copying (from -> to)" msgstr "" #: src/hed/libs/compute/Job.cpp:825 #, c-format msgid " %s -> %s" msgstr "" #: src/hed/libs/compute/Job.cpp:841 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:852 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:871 #, c-format msgid "File download failed: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:910 src/hed/libs/compute/Job.cpp:939 #: src/hed/libs/compute/Job.cpp:971 src/hed/libs/compute/Job.cpp:1004 #, fuzzy, c-format msgid "Waiting for lock on file %s" msgstr "Warte vor Antwort" #: src/hed/libs/compute/JobControllerPlugin.cpp:101 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:110 #, fuzzy, c-format msgid "JobControllerPlugin %s could not be created" msgstr "Keine Job controller Plugins geladen" #: src/hed/libs/compute/JobControllerPlugin.cpp:115 #, fuzzy, c-format msgid "Loaded JobControllerPlugin %s" msgstr "Keine Job controller Plugins geladen" #: src/hed/libs/compute/JobDescription.cpp:22 #, c-format msgid ": %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:24 #, c-format msgid ": %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:138 msgid " --- DRY RUN --- " msgstr "" #: src/hed/libs/compute/JobDescription.cpp:148 #, fuzzy, c-format msgid " Annotation: %s" msgstr "Ziel: %s" #: src/hed/libs/compute/JobDescription.cpp:154 #, c-format msgid " Old activity ID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:171 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:174 #, c-format msgid " RemoteLogging: %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:182 #, c-format msgid " Environment.name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:183 #, c-format msgid " Environment: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:196 #, c-format msgid " PreExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:199 #: src/hed/libs/compute/JobDescription.cpp:217 #, c-format msgid " Exit code for successful execution: %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:202 #: src/hed/libs/compute/JobDescription.cpp:220 msgid " No exit code for successful execution specified." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:214 #, c-format msgid " PostExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:230 #, fuzzy, c-format msgid " Access control: %s" msgstr "Zugriffslist location: %s" #: src/hed/libs/compute/JobDescription.cpp:234 #, fuzzy, c-format msgid " Processing start time: %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/hed/libs/compute/JobDescription.cpp:237 msgid " Notify:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:251 #, fuzzy, c-format msgid " Credential service: %s" msgstr "Delegation service: %s" #: src/hed/libs/compute/JobDescription.cpp:261 msgid " Operating system requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:279 msgid " Computing endpoint requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:292 msgid " Node access: inbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:295 msgid " Node access: outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound and outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:308 msgid " Job requires exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:311 msgid " Job does not require exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:316 msgid " Run time environment requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:328 msgid " Inputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:329 #: src/hed/libs/compute/JobDescription.cpp:351 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:331 msgid " Is executable: true" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:335 #, fuzzy, c-format msgid " Sources: %s" msgstr "Quelle: %s" #: src/hed/libs/compute/JobDescription.cpp:337 #, fuzzy, c-format msgid " Sources.DelegationID: %s" msgstr "Delegation ID: %s" #: src/hed/libs/compute/JobDescription.cpp:341 #, c-format msgid " Sources.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:350 msgid " Outputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:354 #, c-format msgid " Targets: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:356 #, fuzzy, c-format msgid " Targets.DelegationID: %s" msgstr "Delegation ID: %s" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:367 #, fuzzy, c-format msgid " DelegationID element: %s" msgstr "Delegation ID: %s" #: src/hed/libs/compute/JobDescription.cpp:374 #, fuzzy, c-format msgid " Other attributes: [%s], %s" msgstr "Attribut: %s - %s" #: src/hed/libs/compute/JobDescription.cpp:440 msgid "Empty job description source string" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:473 #, fuzzy msgid "No job description parsers available" msgstr "Keine Job Beschreibung als Eingabe benötigt" #: src/hed/libs/compute/JobDescription.cpp:475 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:483 #, fuzzy, c-format msgid "%s parsing error" msgstr "Fataler Fehler: %s" #: src/hed/libs/compute/JobDescription.cpp:499 #, fuzzy msgid "No job description parser was able to interpret job description" msgstr "Zu sendende Job-Beschreibung : %s" #: src/hed/libs/compute/JobDescription.cpp:509 msgid "" "Job description language is not specified, unable to output description." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:521 #, fuzzy, c-format msgid "Generating %s job description output" msgstr "Eine fehler geschah während des Generieres der Job Beschreibung." #: src/hed/libs/compute/JobDescription.cpp:537 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:550 #, fuzzy, c-format msgid "Two input files have identical name '%s'." msgstr "Zwei Dateien haben identische Namen: '%s'." #: src/hed/libs/compute/JobDescription.cpp:569 #: src/hed/libs/compute/JobDescription.cpp:582 #, fuzzy, c-format msgid "Cannot stat local input file '%s'" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/compute/JobDescription.cpp:602 #, fuzzy, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "Konnte lokale Inputdateien nicht hochladen" #: src/hed/libs/compute/JobDescription.cpp:644 #, fuzzy msgid "Unable to select runtime environment" msgstr "Kann run time environment nicht auswählen." #: src/hed/libs/compute/JobDescription.cpp:651 #, fuzzy msgid "Unable to select middleware" msgstr "Kann middleware nicht auswählen." #: src/hed/libs/compute/JobDescription.cpp:658 #, fuzzy msgid "Unable to select operating system." msgstr "Kann Operating System nciht auswählen." #: src/hed/libs/compute/JobDescription.cpp:677 #, c-format msgid "No test-job with ID %d found." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:689 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:693 #, fuzzy, c-format msgid "No jobdescription resulted at %d test" msgstr "Zu sendende Job-Beschreibung : %s" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, fuzzy, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "JobDescription Klasse ist kein Objekt" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, fuzzy, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "Gültige JobDescription gefunden" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:172 #, fuzzy msgid "Unable to create temporary directory" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:180 #, fuzzy, c-format msgid "Unable to create data base environment (%s)" msgstr "Kann run time environment nicht auswählen." #: src/hed/libs/compute/JobInformationStorageBDB.cpp:190 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:194 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:198 #, c-format msgid "Unable to set duplicate flags for secondary key DB (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:204 #, fuzzy, c-format msgid "Unable to create job database (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageBDB.cpp:208 #, c-format msgid "Unable to create DB for secondary name keys (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:212 #, fuzzy, c-format msgid "Unable to create DB for secondary endpoint keys (%s)" msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:216 #, fuzzy, c-format msgid "Unable to create DB for secondary service info keys (%s)" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:221 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:225 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:229 #, c-format msgid "Unable to associate secondary DB with primary DB (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:232 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:92 #, fuzzy, c-format msgid "Job database created successfully (%s)" msgstr "erfolgreich angelegt, ID: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:274 #, fuzzy, c-format msgid "Error from BDB: %s: %s" msgstr "Fehler bei Suche nach LFN anhand guid %s: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:277 #, fuzzy, c-format msgid "Error from BDB: %s" msgstr "Error: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:297 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:137 #: src/hed/libs/compute/JobInformationStorageXML.cpp:27 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:301 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:141 #: src/hed/libs/compute/JobInformationStorageXML.cpp:31 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:308 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #: src/hed/libs/compute/JobInformationStorageXML.cpp:38 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:346 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:405 #, c-format msgid "Unable to write key/value pair to job database (%s): Key \"%s\"" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:572 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:435 #: src/hed/libs/compute/JobInformationStorageXML.cpp:137 #, fuzzy, c-format msgid "Unable to truncate job database (%s)" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/JobInformationStorageBDB.cpp:603 msgid "" "ENOENT: The file or directory does not exist, Or a nonexistent re_source " "file was specified." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:606 msgid "" "DB_OLD_VERSION: The database cannot be opened without being first upgraded." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:609 msgid "EEXIST: DB_CREATE and DB_EXCL were specified and the database exists." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:611 msgid "EINVAL" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:614 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:468 #, fuzzy, c-format msgid "Unable to determine error (%d)" msgstr "Konnter Broker %s nicht laden" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:57 #, fuzzy, c-format msgid "Unable to create data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:72 #, fuzzy, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:80 #, fuzzy, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:88 #, fuzzy, c-format msgid "Failed checking database (%s)" msgstr "Fehler bei Lesen von Daten" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:113 #, fuzzy, c-format msgid "Error from SQLite: %s: %s" msgstr "Fehler bei Suche nach LFN anhand guid %s: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:116 #, fuzzy, c-format msgid "Error from SQLite: %s" msgstr "Error: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:246 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:253 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:260 #, fuzzy, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/JobInformationStorageXML.cpp:51 #: src/hed/libs/compute/JobInformationStorageXML.cpp:223 #: src/hed/libs/compute/JobInformationStorageXML.cpp:264 #, fuzzy, c-format msgid "Waiting for lock on job list file %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:162 #, c-format msgid "Will remove %s on service %s." msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:36 msgid "Ignoring job, the job ID is empty" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:41 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:46 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:51 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:56 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:65 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:72 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:385 #, fuzzy, c-format msgid "Job resubmission failed: Unable to load broker (%s)" msgstr "Hochladen des Jobs schlug fehl, keine weiteren Ziele verfügbar" #: src/hed/libs/compute/JobSupervisor.cpp:400 #, fuzzy msgid "Job resubmission aborted because no resource returned any information" msgstr "" "Hochladen des Jobs abgebrochen, da keine Cluster entsprechende Informationen " "anboten" #: src/hed/libs/compute/JobSupervisor.cpp:421 #, c-format msgid "Unable to resubmit job (%s), unable to parse obtained job description" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:443 #, c-format msgid "" "Unable to resubmit job (%s), target information retrieval failed for target: " "%s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:469 #, c-format msgid "Unable to resubmit job (%s), no targets applicable for submission" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:504 #, c-format msgid "" "Unable to migrate job (%s), job description could not be retrieved remotely" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:524 #, fuzzy msgid "Job migration aborted, no resource returned any information" msgstr "Job Migration abgebrochen, da kein Cluster Informationen lieferte" #: src/hed/libs/compute/JobSupervisor.cpp:536 #, fuzzy, c-format msgid "Job migration aborted, unable to load broker (%s)" msgstr "Konnter Broker %s nicht laden" #: src/hed/libs/compute/JobSupervisor.cpp:552 #, c-format msgid "Unable to migrate job (%s), unable to parse obtained job description" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:573 #, c-format msgid "Unable to load submission plugin for %s interface" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:583 #, fuzzy, c-format msgid "Job migration failed for job (%s), no applicable targets" msgstr "Hochladen des Jobs schlug fehl, keine weiteren Ziele verfügbar" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "" #: src/hed/libs/compute/Software.cpp:206 src/hed/libs/compute/Software.cpp:217 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:212 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:221 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "" #: src/hed/libs/compute/Software.cpp:226 msgid "All requirements satisfied." msgstr "" #: src/hed/libs/compute/Submitter.cpp:83 #, fuzzy, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "Versuche den Job erneut hochzuladen zu %s" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "" #: src/hed/libs/compute/Submitter.cpp:106 #, fuzzy msgid "Trying all available interfaces" msgstr "Erstelle Client Schnitstelle" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "" #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:54 msgid "No stagein URL is provided" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:73 #, fuzzy, c-format msgid "Failed uploading file %s to %s: %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:103 #, fuzzy, c-format msgid "Trying to migrate to %s: Migration to a %s interface is not supported." msgstr "" "Versuche zu %s zu migrieren: Migration zu einem BES cluster wird nicht " "unterstützt" #: src/hed/libs/compute/SubmitterPlugin.cpp:159 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:169 #, fuzzy, c-format msgid "SubmitterPlugin %s could not be created" msgstr "Der Job Status konnte nicht ermittelt werden" #: src/hed/libs/compute/SubmitterPlugin.cpp:174 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 #, fuzzy msgid "Invalid job description" msgstr "Ungültige JobDescription:" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 #, fuzzy msgid "Failed to submit job" msgstr "Konnte job nicht starten" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, fuzzy, c-format msgid "Failed to write to local job list %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in JDL, POSIX JSDL, JSDL, " "or XRSL format." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "" "define the requested format (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, emies:" "adl)" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:33 #, fuzzy msgid "show the original job description" msgstr "" " -o, -stdout вывести файл стандартого выхода задачи (по\n" " умолчанию)" #: src/hed/libs/compute/test_jobdescription.cpp:43 msgid "Use --help option for detailed usage information" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:50 #, fuzzy msgid " [ JobDescription tester ] " msgstr "Zu sendende Job-Beschreibung : %s" #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:80 #, fuzzy msgid "Unable to parse." msgstr "Konnter Broker %s nicht laden" #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ egee:jdl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ emies:adl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:93 msgid " [ nordugrid:jsdl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:95 msgid " [ nordugrid:xrsl ] " msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:138 msgid "VOMS command is empty" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:424 #: src/hed/libs/credential/ARCProxyUtil.cpp:1431 #, fuzzy msgid "Failed to sign proxy" msgstr "Fehler beim Senden von body" #: src/hed/libs/credential/ARCProxyUtil.cpp:1317 #, c-format msgid "Please choose the NSS database you would use (1-%d): " msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1353 #: src/hed/libs/credential/ARCProxyUtil.cpp:1460 #, fuzzy msgid "Failed to generate X509 request with NSS" msgstr "Fehler bei Generieren von X509 Token für ausgehende SOAP" #: src/hed/libs/credential/ARCProxyUtil.cpp:1364 #: src/hed/libs/credential/ARCProxyUtil.cpp:1471 #: src/hed/libs/credential/ARCProxyUtil.cpp:1512 #, fuzzy msgid "Failed to create X509 certificate with NSS" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/ARCProxyUtil.cpp:1376 #: src/hed/libs/credential/ARCProxyUtil.cpp:1483 #: src/hed/libs/credential/ARCProxyUtil.cpp:1536 #, fuzzy msgid "Failed to export X509 certificate from NSS DB" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/ARCProxyUtil.cpp:1519 #, fuzzy msgid "Failed to import X509 certificate into NSS DB" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/ARCProxyUtil.cpp:1528 #, fuzzy msgid "Failed to initialize the credential configuration" msgstr "Konnte delegation credentials in Client Konfiguration nicht finden" #: src/hed/libs/credential/CertUtil.cpp:166 #, c-format msgid "Error number in store context: %i" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:167 msgid "Self-signed certificate" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:170 #, c-format msgid "The certificate with subject %s is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:173 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:176 #, c-format msgid "Certificate with subject %s has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:179 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:181 #, c-format msgid "Certificate verification error: %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:193 msgid "Can not get the certificate type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:233 msgid "Couldn't verify availability of CRL" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:246 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:253 msgid "The available CRL is not yet valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:262 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:268 msgid "The available CRL has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:291 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:309 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:318 msgid "Can't allocate memory for CA policy path" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:364 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:378 #: src/hed/libs/credential/Credential.cpp:1693 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Trying to check X509 cert with check_cert_type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:465 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal form" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:469 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:473 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:505 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" #: src/hed/libs/credential/Credential.cpp:73 #, fuzzy, c-format msgid "OpenSSL error string: %s" msgstr "Fehler bei Traversieren: %s" #: src/hed/libs/credential/Credential.cpp:196 msgid "Can't get the first byte of input to determine its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:210 #, fuzzy msgid "Can't reset the input" msgstr "Kann Python Liste nicht anlegen" #: src/hed/libs/credential/Credential.cpp:236 #: src/hed/libs/credential/Credential.cpp:273 msgid "Can't get the first byte of input BIO to get its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:248 #, fuzzy msgid "Can not read certificate/key string" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/Credential.cpp:456 #, c-format msgid "Can not find certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:461 #, c-format msgid "Can not read certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:499 msgid "Can not read certificate string" msgstr "" #: src/hed/libs/credential/Credential.cpp:519 msgid "Certificate format is PEM" msgstr "" #: src/hed/libs/credential/Credential.cpp:546 msgid "Certificate format is DER" msgstr "" #: src/hed/libs/credential/Credential.cpp:575 msgid "Certificate format is PKCS" msgstr "" #: src/hed/libs/credential/Credential.cpp:602 msgid "Certificate format is unknown" msgstr "" #: src/hed/libs/credential/Credential.cpp:610 #, c-format msgid "Can not find key file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:615 #, fuzzy, c-format msgid "Can not open key file %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/credential/Credential.cpp:634 msgid "Can not read key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:697 #: src/hed/libs/credential/VOMSUtil.cpp:258 msgid "Failed to lock arccredential library in memory" msgstr "" #: src/hed/libs/credential/Credential.cpp:712 msgid "Certificate verification succeeded" msgstr "" #: src/hed/libs/credential/Credential.cpp:716 msgid "Certificate verification failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:731 #: src/hed/libs/credential/Credential.cpp:751 #: src/hed/libs/credential/Credential.cpp:771 #: src/hed/libs/credential/Credential.cpp:1003 #: src/hed/libs/credential/Credential.cpp:2314 #: src/hed/libs/credential/Credential.cpp:2345 #, fuzzy msgid "Failed to initialize extensions member for Credential" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/libs/credential/Credential.cpp:814 #, fuzzy, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "Nicht-unterstzütztes Protrokoll in URL %s" #: src/hed/libs/credential/Credential.cpp:826 #, fuzzy, c-format msgid "Unsupported proxy version is requested - %s" msgstr "Nicht-unterstzütztes Protrokoll in URL %s" #: src/hed/libs/credential/Credential.cpp:837 msgid "If you specify a policy you also need to specify a policy language" msgstr "" #: src/hed/libs/credential/Credential.cpp:1008 msgid "Certificate/Proxy path is empty" msgstr "" #: src/hed/libs/credential/Credential.cpp:1067 #: src/hed/libs/credential/Credential.cpp:2856 msgid "Failed to duplicate extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1071 #, fuzzy msgid "Failed to add extension into credential extensions" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/libs/credential/Credential.cpp:1082 #, fuzzy msgid "Certificate information collection failed" msgstr "Ungültige Authentisierungs-Information" #: src/hed/libs/credential/Credential.cpp:1124 #: src/hed/libs/credential/Credential.cpp:1129 msgid "Can not convert string into ASN1_OBJECT" msgstr "" #: src/hed/libs/credential/Credential.cpp:1141 msgid "Can not create extension for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:1180 #: src/hed/libs/credential/Credential.cpp:1348 msgid "BN_set_word failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1189 #: src/hed/libs/credential/Credential.cpp:1357 msgid "RSA_generate_key_ex failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1198 #: src/hed/libs/credential/Credential.cpp:1365 msgid "BN_new || RSA_new failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1209 msgid "Created RSA key, proceeding with request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1214 msgid "pkey and rsa_key exist!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1217 msgid "Generate new X509 request!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1222 msgid "Setting subject name!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1230 #: src/hed/libs/credential/Credential.cpp:1444 msgid "PEM_write_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1301 #: src/hed/libs/credential/Credential.cpp:1476 #: src/hed/libs/credential/Credential.cpp:1496 msgid "Can not create BIO for request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1278 msgid "Failed to write request into string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1305 #: src/hed/libs/credential/Credential.cpp:1310 #: src/hed/libs/credential/Credential.cpp:1500 msgid "Can not set writable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1316 #: src/hed/libs/credential/Credential.cpp:1505 msgid "Wrote request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1318 #: src/hed/libs/credential/Credential.cpp:1508 msgid "Failed to write request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1338 msgid "The credential's private key has already been initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1386 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1396 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1414 #: src/hed/libs/credential/Credential.cpp:1421 #: src/hed/libs/credential/Credential.cpp:1943 #: src/hed/libs/credential/Credential.cpp:1951 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1451 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1461 msgid "Can not generate X509 request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1463 msgid "Can not set private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1561 msgid "Failed to get private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1580 msgid "Failed to get public key from RSA object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1588 msgid "Failed to get public key from X509 object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1595 msgid "Failed to get public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1634 #, c-format msgid "Certiticate chain number %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1662 msgid "NULL BIO passed to InquireRequest" msgstr "" #: src/hed/libs/credential/Credential.cpp:1665 msgid "PEM_read_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1669 msgid "d2i_X509_REQ_bio failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1702 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1706 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1722 #, c-format msgid "Cert Type: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1735 #: src/hed/libs/credential/Credential.cpp:1754 msgid "Can not create BIO for parsing request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1740 msgid "Read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1743 msgid "Failed to read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1758 msgid "Can not set readable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1763 msgid "Read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1766 msgid "Failed to read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Can not convert private key to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1924 msgid "Credential is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1930 #, fuzzy msgid "Failed to duplicate X509 structure" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/Credential.cpp:1935 msgid "Failed to initialize X509 structure" msgstr "" #: src/hed/libs/credential/Credential.cpp:1958 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1962 #: src/hed/libs/credential/Credential.cpp:2010 msgid "Can not add X509 extension to proxy cert" msgstr "" #: src/hed/libs/credential/Credential.cpp:1978 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1990 #: src/hed/libs/credential/Credential.cpp:1999 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2006 #, fuzzy msgid "Can not create extension for keyUsage" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/libs/credential/Credential.cpp:2019 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2024 msgid "Can not copy extended KeyUsage extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2029 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2039 msgid "Can not compute digest of public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2050 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2056 msgid "Can not create name entry CN for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2061 msgid "Can not set CN in proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2069 msgid "Can not set issuer's subject for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2074 msgid "Can not set version number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2082 msgid "Can not set serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2088 #, fuzzy msgid "Can not duplicate serial number for proxy certificate" msgstr "" "Der Transfer des signierten delegation certificate zu Service schlug fehl" #: src/hed/libs/credential/Credential.cpp:2094 msgid "Can not set the lifetime for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2098 msgid "Can not set pubkey for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2114 #: src/hed/libs/credential/Credential.cpp:2744 msgid "The credential to be signed is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2118 #: src/hed/libs/credential/Credential.cpp:2748 msgid "The credential to be signed contains no request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2122 #: src/hed/libs/credential/Credential.cpp:2752 msgid "The BIO for output is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2136 #: src/hed/libs/credential/Credential.cpp:2759 msgid "Error when extracting public key from request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2141 #: src/hed/libs/credential/Credential.cpp:2763 msgid "Failed to verify the request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2145 msgid "Failed to add issuer's extension into proxy" msgstr "" #: src/hed/libs/credential/Credential.cpp:2169 msgid "Failed to find extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2181 msgid "Can not get the issuer's private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2188 #: src/hed/libs/credential/Credential.cpp:2796 msgid "There is no digest in issuer's private key object" msgstr "" #: src/hed/libs/credential/Credential.cpp:2193 #: src/hed/libs/credential/Credential.cpp:2800 #, c-format msgid "%s is an unsupported digest type" msgstr "" #: src/hed/libs/credential/Credential.cpp:2204 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" #: src/hed/libs/credential/Credential.cpp:2210 msgid "Failed to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2212 msgid "Succeeded to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2217 msgid "Failed to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2219 msgid "Succeeded to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2224 #: src/hed/libs/credential/Credential.cpp:2233 msgid "Output the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2227 msgid "Can not convert signed proxy cert into PEM format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2236 msgid "Can not convert signed proxy cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2252 #: src/hed/libs/credential/Credential.cpp:2275 msgid "Can not create BIO for signed proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2279 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2284 msgid "Wrote signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2287 msgid "Failed to write signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2323 #: src/hed/libs/credential/Credential.cpp:2363 #, c-format msgid "ERROR:%s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2371 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2416 #, c-format msgid "unable to load number from: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2421 msgid "error converting number from bin to BIGNUM" msgstr "" #: src/hed/libs/credential/Credential.cpp:2448 msgid "file name too long" msgstr "" #: src/hed/libs/credential/Credential.cpp:2471 msgid "error converting serial to ASN.1 format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2504 #, c-format msgid "load serial from %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2509 msgid "add_word failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2514 #, c-format msgid "save serial to %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2534 msgid "Error initialising X509 store" msgstr "" #: src/hed/libs/credential/Credential.cpp:2541 msgid "Out of memory when generate random serial" msgstr "" #: src/hed/libs/credential/Credential.cpp:2553 msgid "CA certificate and CA private key do not match" msgstr "" #: src/hed/libs/credential/Credential.cpp:2577 #, fuzzy, c-format msgid "Failed to load extension section: %s" msgstr "Fehler bei Schließen von Verbindung 1" #: src/hed/libs/credential/Credential.cpp:2614 msgid "malloc error" msgstr "" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Subject does not start with '/'" msgstr "" #: src/hed/libs/credential/Credential.cpp:2634 #: src/hed/libs/credential/Credential.cpp:2655 msgid "escape character at end of string" msgstr "" #: src/hed/libs/credential/Credential.cpp:2646 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2683 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2687 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2729 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" #: src/hed/libs/credential/Credential.cpp:2739 msgid "The private key for signing is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2819 #, c-format msgid "Error when loading the extension config file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2823 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2872 msgid "Can not sign a EEC" msgstr "" #: src/hed/libs/credential/Credential.cpp:2876 msgid "Output EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2879 msgid "Can not convert signed EEC cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2893 #: src/hed/libs/credential/Credential.cpp:2912 msgid "Can not create BIO for signed EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2916 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2921 msgid "Wrote signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2924 msgid "Failed to write signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:147 #, fuzzy msgid "Error writing raw certificate" msgstr "Fehler beim Listen der replicas: %s" #: src/hed/libs/credential/NSSUtil.cpp:224 #, fuzzy msgid "Failed to add RFC proxy OID" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:227 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:233 msgid "Failed to add anyLanguage OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:236 #: src/hed/libs/credential/NSSUtil.cpp:254 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:242 #, fuzzy msgid "Failed to add inheritAll OID" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:245 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:251 #, fuzzy msgid "Failed to add Independent OID" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:260 msgid "Failed to add VOMS AC sequence OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:263 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:292 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:303 #, fuzzy msgid "Succeeded to initialize NSS" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:325 #, fuzzy, c-format msgid "Failed to read attribute %x from private key." msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:377 #, fuzzy msgid "Succeeded to get credential" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:378 #, fuzzy msgid "Failed to get credential" msgstr "Fehler bei Bezug von FTP Datei" #: src/hed/libs/credential/NSSUtil.cpp:440 #, fuzzy msgid "p12 file is empty" msgstr "Policy is leer" #: src/hed/libs/credential/NSSUtil.cpp:450 #, fuzzy msgid "Unable to write to p12 file" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/libs/credential/NSSUtil.cpp:466 #, fuzzy msgid "Failed to open pk12 file" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/libs/credential/NSSUtil.cpp:501 #, fuzzy msgid "Failed to allocate p12 context" msgstr "Fehler bei Reservieren von Platz" #: src/hed/libs/credential/NSSUtil.cpp:1211 #, fuzzy msgid "Failed to find issuer certificate for proxy certificate" msgstr "" "Der Transfer des signierten delegation certificate zu Service schlug fehl" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, fuzzy, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/credential/NSSUtil.cpp:1368 #, fuzzy, c-format msgid "Failed to find certificates by nickname: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1373 #, c-format msgid "No user certificate by nickname %s found" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1386 #: src/hed/libs/credential/NSSUtil.cpp:1422 #, fuzzy msgid "Certificate does not have a slot" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/credential/NSSUtil.cpp:1392 #, fuzzy msgid "Failed to create export context" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/hed/libs/credential/NSSUtil.cpp:1407 msgid "PKCS12 output password not provided" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1414 msgid "PKCS12 add password integrity failed" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1435 #, fuzzy msgid "Failed to create key or certificate safe" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1451 #, fuzzy msgid "Failed to add certificate and key" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1460 #, fuzzy, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/libs/credential/NSSUtil.cpp:1465 #, fuzzy msgid "Failed to encode PKCS12" msgstr "Fehler beim Senden von body" #: src/hed/libs/credential/NSSUtil.cpp:1468 msgid "Succeeded to export PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1496 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1502 #, fuzzy msgid "Failed to delete certificate" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1516 msgid "The name of the private key to delete is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1521 #: src/hed/libs/credential/NSSUtil.cpp:1605 #, fuzzy, c-format msgid "Failed to authenticate to token %s." msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/credential/NSSUtil.cpp:1528 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1561 #, fuzzy msgid "Failed to delete private key and certificate" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1571 #, fuzzy msgid "Failed to delete private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1582 #, fuzzy, c-format msgid "Can not find key with name: %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/credential/NSSUtil.cpp:1616 #, fuzzy, c-format msgid "Failed to delete private key that attaches to certificate: %s" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1637 msgid "Can not read PEM private key: probably bad password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1639 #, fuzzy msgid "Can not read PEM private key: failed to decrypt" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1641 #: src/hed/libs/credential/NSSUtil.cpp:1643 #, fuzzy msgid "Can not read PEM private key: failed to obtain password" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1644 #, fuzzy msgid "Can not read PEM private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1688 #, fuzzy msgid "Failed to load private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1689 msgid "Succeeded to load PrivateKeyInfo" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1692 #, fuzzy msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "Fehler bei Konvertieren von security information für ARC policy" #: src/hed/libs/credential/NSSUtil.cpp:1693 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1730 #, fuzzy msgid "Failed to import private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1733 #, fuzzy msgid "Succeeded to import private key" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:1746 #: src/hed/libs/credential/NSSUtil.cpp:1788 #: src/hed/libs/credential/NSSUtil.cpp:2920 #, fuzzy msgid "Failed to authenticate to key database" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/credential/NSSUtil.cpp:1755 #, fuzzy msgid "Succeeded to generate public/private key pair" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:1757 #, fuzzy msgid "Failed to generate public/private key pair" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1762 #, fuzzy msgid "Failed to export private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1829 #, fuzzy msgid "Failed to create subject name" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:1845 #, fuzzy msgid "Failed to create certificate request" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1858 #, fuzzy msgid "Failed to call PORT_NewArena" msgstr "Fehler beim Reservieren von Speicher" #: src/hed/libs/credential/NSSUtil.cpp:1866 #, fuzzy msgid "Failed to encode the certificate request with DER format" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:1873 msgid "Unknown key or hash type" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1879 #, fuzzy msgid "Failed to sign the certificate request" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:1895 #, fuzzy msgid "Failed to output the certificate request as ASCII format" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:1904 #, fuzzy msgid "Failed to output the certificate request as DER format" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:1913 #, fuzzy, c-format msgid "Succeeded to output the certificate request into %s" msgstr "Erfolgreiche Authentifikation des UsernameToken" #: src/hed/libs/credential/NSSUtil.cpp:1952 #: src/hed/libs/credential/NSSUtil.cpp:1989 #, fuzzy msgid "Failed to read data from input file" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/hed/libs/credential/NSSUtil.cpp:1968 msgid "Input is without trailer\n" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1979 #, fuzzy msgid "Failed to convert ASCII to DER" msgstr "Fehler beim Verbinden zu RLS server: %s" #: src/hed/libs/credential/NSSUtil.cpp:2030 msgid "Certificate request is invalid" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2252 #, fuzzy, c-format msgid "The policy language: %s is not supported" msgstr "Der Erhalt von BES Jobs wird nicht unterstützt" #: src/hed/libs/credential/NSSUtil.cpp:2260 #: src/hed/libs/credential/NSSUtil.cpp:2285 #: src/hed/libs/credential/NSSUtil.cpp:2308 #: src/hed/libs/credential/NSSUtil.cpp:2330 #, fuzzy msgid "Failed to new arena" msgstr "Fehler bei Transfer von Daten" #: src/hed/libs/credential/NSSUtil.cpp:2269 #: src/hed/libs/credential/NSSUtil.cpp:2294 #, fuzzy msgid "Failed to create path length" msgstr "Fehler bei Anlegen von soft link: %s" #: src/hed/libs/credential/NSSUtil.cpp:2272 #: src/hed/libs/credential/NSSUtil.cpp:2297 #: src/hed/libs/credential/NSSUtil.cpp:2317 #: src/hed/libs/credential/NSSUtil.cpp:2339 #, fuzzy msgid "Failed to create policy language" msgstr "Fehler bei Anlegen von soft link: %s" #: src/hed/libs/credential/NSSUtil.cpp:2738 #, fuzzy, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2745 #, fuzzy, c-format msgid "Can not find certificate with name %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2776 #, fuzzy, c-format msgid "Proxy subject: %s" msgstr "Subjekt: %s" #: src/hed/libs/credential/NSSUtil.cpp:2795 #, fuzzy msgid "Failed to start certificate extension" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2800 #, fuzzy msgid "Failed to add key usage extension" msgstr "Fehler beim Lesen von SSL Token während Authentifizierung" #: src/hed/libs/credential/NSSUtil.cpp:2805 #, fuzzy msgid "Failed to add proxy certificate information extension" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2809 #, fuzzy msgid "Failed to add voms AC extension" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:2829 #, fuzzy msgid "Failed to retrieve private key for issuer" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2836 msgid "Unknown key or hash type of issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2842 #, fuzzy msgid "Failed to set signature algorithm ID" msgstr "Fehler bei der Überprüfung der Signatur unter " #: src/hed/libs/credential/NSSUtil.cpp:2854 #, fuzzy msgid "Failed to encode certificate" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2860 #, fuzzy msgid "Failed to allocate item for certificate data" msgstr "Fehler beim Reservieren von Speicher" #: src/hed/libs/credential/NSSUtil.cpp:2866 #, fuzzy msgid "Failed to sign encoded certificate data" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:2875 #, fuzzy, c-format msgid "Failed to open file %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/credential/NSSUtil.cpp:2886 #, fuzzy, c-format msgid "Succeeded to output certificate to %s" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:2927 #, fuzzy, c-format msgid "Failed to open input certificate file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2945 #, fuzzy msgid "Failed to read input certificate file" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2950 #, fuzzy msgid "Failed to get certificate from certificate file" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2957 #, fuzzy msgid "Failed to allocate certificate trust" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2962 #, fuzzy msgid "Failed to decode trust string" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/credential/NSSUtil.cpp:2971 #: src/hed/libs/credential/NSSUtil.cpp:2988 #, fuzzy, c-format msgid "Failed to authenticate to token %s" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/credential/NSSUtil.cpp:2976 #: src/hed/libs/credential/NSSUtil.cpp:2993 #, fuzzy msgid "Failed to add certificate to token or database" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2979 #: src/hed/libs/credential/NSSUtil.cpp:2982 #, fuzzy msgid "Succeeded to import certificate" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:2996 #: src/hed/libs/credential/NSSUtil.cpp:2999 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:3026 #, fuzzy, c-format msgid "Failed to import private key from file: %s" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:3028 #, fuzzy, c-format msgid "Failed to import certificate from file: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/VOMSConfig.cpp:142 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:158 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:176 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:188 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:185 #, fuzzy, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/hed/libs/credential/VOMSUtil.cpp:193 #, fuzzy, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/libs/credential/VOMSUtil.cpp:346 #, c-format msgid "VOMS: create FQAN: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:384 #, c-format msgid "VOMS: create attribute: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:670 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:678 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:704 msgid "VOMS: Can not parse AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:734 msgid "VOMS: CA directory or CA file must be provided" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:798 msgid "VOMS: failed to verify AC signature" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:867 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:875 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:881 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:916 #, fuzzy, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/credential/VOMSUtil.cpp:922 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:970 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1003 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1023 #, fuzzy msgid "VOMS: AC signature verification failed" msgstr "SOAP Aufruf fehlgeschlagen" #: src/hed/libs/credential/VOMSUtil.cpp:1032 msgid "VOMS: unable to verify certificate chain" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1038 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1061 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1087 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1109 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1116 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1126 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1142 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1151 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1228 msgid "VOMS: the grantor attribute is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1246 msgid "VOMS: the attribute name is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1252 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1257 msgid "VOMS: the attribute qualifier is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1289 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1323 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1328 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1343 msgid "VOMS: failed to parse attributes from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1387 msgid "VOMS: authorityKey is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1415 msgid "VOMS: missing AC parts" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1432 msgid "VOMS: unsupported time format format in AC - expecting GENERALIZED TIME" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1438 msgid "VOMS: AC is not yet valid" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1445 msgid "VOMS: AC has expired" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1460 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1465 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1466 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1469 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "VOMS: the holder information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1500 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1501 #, c-format msgid "VOMS: DN of holder: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1502 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1509 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1521 #: src/hed/libs/credential/VOMSUtil.cpp:1528 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1541 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1551 msgid "VOMS: the issuer information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1559 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1597 #: src/hed/libs/credential/VOMSUtil.cpp:1605 #: src/hed/libs/credential/VOMSUtil.cpp:1613 #: src/hed/libs/credential/VOMSUtil.cpp:1621 #: src/hed/libs/credential/VOMSUtil.cpp:1644 msgid "VOMS: unable to extract VO name from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1635 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1654 msgid "VOMS: can not verify the signature of the AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1660 msgid "VOMS: problems while parsing information in AC" msgstr "" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:128 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, fuzzy, c-format msgid "MyProxy failure: %s" msgstr "Proxy Pfad: %s" #: src/hed/libs/crypto/OpenSSL.cpp:68 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "SSL Fehler: %d - %s:%s:%s" #: src/hed/libs/crypto/OpenSSL.cpp:81 #, fuzzy msgid "SSL locks not initialized" msgstr "FATAL: SSL Locks nicht initialisiert" #: src/hed/libs/crypto/OpenSSL.cpp:85 #, fuzzy, c-format msgid "wrong SSL lock requested: %i of %i: %i - %s" msgstr "FATAL: falsches SSL lock angefragt: %i von %i: %i - %s" #: src/hed/libs/crypto/OpenSSL.cpp:112 #, fuzzy msgid "Failed to lock arccrypto library in memory" msgstr "Fehler bei Lock von arccrypto Bibliothek in Speicher" #: src/hed/libs/crypto/OpenSSL.cpp:117 src/hed/libs/crypto/OpenSSL.cpp:128 msgid "Failed to initialize OpenSSL library" msgstr "Fehler bei Initialisierung von OpenSSL Bibliothek" #: src/hed/libs/crypto/OpenSSL.cpp:150 msgid "Number of OpenSSL locks changed - reinitializing" msgstr "Anzahl von OpenSSL locks verändert - reinitialisierung" #: src/hed/libs/data/DataMover.cpp:111 msgid "No locations found - probably no more physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:117 src/hed/libs/data/FileCache.cpp:673 #: src/libs/data-staging/Processor.cpp:458 #: src/libs/data-staging/Processor.cpp:472 #, c-format msgid "Removing %s" msgstr "Entferne %s" #: src/hed/libs/data/DataMover.cpp:130 msgid "This instance was already deleted" msgstr "" #: src/hed/libs/data/DataMover.cpp:136 msgid "Failed to delete physical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:147 #, c-format msgid "Removing metadata in %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete meta-information" msgstr "" #: src/hed/libs/data/DataMover.cpp:165 msgid "Failed to remove all physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:169 #, c-format msgid "Removing logical file from metadata %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:172 msgid "Failed to delete logical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:179 msgid "Failed to remove instance" msgstr "" #: src/hed/libs/data/DataMover.cpp:228 msgid "DataMover::Transfer : starting new thread" msgstr "" #: src/hed/libs/data/DataMover.cpp:256 #, c-format msgid "Transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:258 msgid "Not valid source" msgstr "" #: src/hed/libs/data/DataMover.cpp:263 msgid "Not valid destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:283 #: src/services/cache_service/CacheService.cpp:294 #, c-format msgid "Couldn't handle certificate: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:293 src/hed/libs/data/DataMover.cpp:591 #: src/libs/data-staging/Processor.cpp:137 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "" #: src/hed/libs/data/DataMover.cpp:297 src/hed/libs/data/DataMover.cpp:610 #: src/hed/libs/data/DataMover.cpp:672 src/libs/data-staging/Processor.cpp:156 msgid "Permission checking passed" msgstr "" #: src/hed/libs/data/DataMover.cpp:298 src/hed/libs/data/DataMover.cpp:630 #: src/hed/libs/data/DataMover.cpp:1136 msgid "Linking/copying cached file" msgstr "" #: src/hed/libs/data/DataMover.cpp:323 #, c-format msgid "No locations for source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:327 #, c-format msgid "Failed to resolve source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:339 src/hed/libs/data/DataMover.cpp:407 #, c-format msgid "No locations for destination found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:344 src/hed/libs/data/DataMover.cpp:411 #, c-format msgid "Failed to resolve destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:359 #, c-format msgid "No locations for destination different from source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:380 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:391 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "" #: src/hed/libs/data/DataMover.cpp:394 #, c-format msgid "Failed to delete %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:421 #, c-format msgid "Deleted but still have locations at %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:433 msgid "DataMover: cycle" msgstr "" #: src/hed/libs/data/DataMover.cpp:435 msgid "DataMover: no retries requested - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:440 msgid "DataMover: source out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:442 msgid "DataMover: destination out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:450 #, c-format msgid "Real transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:477 #, c-format msgid "Creating buffer: %lli x %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:493 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:498 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "" #: src/hed/libs/data/DataMover.cpp:522 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:527 msgid "Buffer creation failed !" msgstr "" #: src/hed/libs/data/DataMover.cpp:550 #, c-format msgid "URL is mapped to: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:580 src/hed/libs/data/DataMover.cpp:639 #: src/libs/data-staging/Processor.cpp:91 msgid "Cached file is locked - should retry" msgstr "" #: src/hed/libs/data/DataMover.cpp:585 src/libs/data-staging/Processor.cpp:110 msgid "Failed to initiate cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:602 #: src/services/cache_service/CacheService.cpp:366 #, c-format msgid "Permission checking failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:604 src/hed/libs/data/DataMover.cpp:664 #: src/hed/libs/data/DataMover.cpp:686 src/hed/libs/data/DataMover.cpp:697 msgid "source.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:618 src/libs/data-staging/Processor.cpp:161 #, fuzzy, c-format msgid "Source modification date: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/hed/libs/data/DataMover.cpp:619 src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Cache creation date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:625 src/libs/data-staging/Processor.cpp:167 msgid "Cached file is outdated, will re-download" msgstr "" #: src/hed/libs/data/DataMover.cpp:629 src/libs/data-staging/Processor.cpp:173 msgid "Cached copy is still valid" msgstr "" #: src/hed/libs/data/DataMover.cpp:657 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" #: src/hed/libs/data/DataMover.cpp:661 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:674 msgid "Linking local file" msgstr "" #: src/hed/libs/data/DataMover.cpp:694 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:703 #, fuzzy, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "Fehler bei Ändernn des Owner von hard link zu %i: %s" #: src/hed/libs/data/DataMover.cpp:715 #, c-format msgid "cache file: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:741 #, fuzzy, c-format msgid "Failed to stat source %s" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/data/DataMover.cpp:743 src/hed/libs/data/DataMover.cpp:758 #: src/hed/libs/data/DataMover.cpp:795 src/hed/libs/data/DataMover.cpp:814 #: src/hed/libs/data/DataMover.cpp:982 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1024 src/hed/libs/data/DataMover.cpp:1101 msgid "(Re)Trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:756 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:770 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" #: src/hed/libs/data/DataMover.cpp:774 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:789 #, fuzzy, c-format msgid "Failed to prepare source: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/data/DataMover.cpp:805 #, c-format msgid "Failed to start reading from source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:826 #, fuzzy msgid "Metadata of source and destination are different" msgstr "" "Файл назначения совпадает с исходным.\n" "%1" #: src/hed/libs/data/DataMover.cpp:847 #, c-format msgid "Failed to preregister destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:852 src/hed/libs/data/DataMover.cpp:1125 msgid "destination.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:866 #, fuzzy, c-format msgid "Failed to prepare destination: %s" msgstr "Fehler bei Anlegen von soft link: %s" #: src/hed/libs/data/DataMover.cpp:873 src/hed/libs/data/DataMover.cpp:897 #: src/hed/libs/data/DataMover.cpp:1122 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:877 src/hed/libs/data/DataMover.cpp:900 #: src/hed/libs/data/DataMover.cpp:991 src/hed/libs/data/DataMover.cpp:1007 #: src/hed/libs/data/DataMover.cpp:1030 src/hed/libs/data/DataMover.cpp:1077 msgid "(Re)Trying next destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:889 #, c-format msgid "Failed to start writing to destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:913 msgid "Failed to start writing to cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:921 src/hed/libs/data/DataMover.cpp:969 #: src/hed/libs/data/DataMover.cpp:1148 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:929 msgid "Waiting for buffer" msgstr "" #: src/hed/libs/data/DataMover.cpp:936 #, fuzzy, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/data/DataMover.cpp:941 #, c-format msgid "buffer: read EOF : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:942 #, fuzzy, c-format msgid "buffer: write EOF: %s" msgstr "Globus error (Schreiben): %s" #: src/hed/libs/data/DataMover.cpp:943 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:944 msgid "Closing read channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:950 msgid "Closing write channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:958 #, fuzzy msgid "Failed to complete writing to destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/data/DataMover.cpp:974 #, fuzzy msgid "Transfer cancelled successfully" msgstr "Job erfolgreich abgebrochen" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Cause of failure unclear - choosing randomly" msgstr "" #: src/hed/libs/data/DataMover.cpp:1062 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" #: src/hed/libs/data/DataMover.cpp:1070 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:1074 #, fuzzy msgid "Failed to delete destination, retry may fail" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/libs/data/DataMover.cpp:1084 #, fuzzy msgid "Cannot compare empty checksum" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/data/DataMover.cpp:1091 #: src/libs/data-staging/DataStagingDelivery.cpp:456 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" #: src/hed/libs/data/DataMover.cpp:1093 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:1106 #: src/libs/data-staging/DataStagingDelivery.cpp:472 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1112 #: src/libs/data-staging/DataStagingDelivery.cpp:475 msgid "Checksum not computed" msgstr "" #: src/hed/libs/data/DataMover.cpp:1118 #, c-format msgid "Failed to postregister destination %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:83 #, fuzzy, c-format msgid "Invalid URL option: %s" msgstr "Ungültige URL Option: %s" #: src/hed/libs/data/DataPoint.cpp:254 #, fuzzy, c-format msgid "Skipping invalid URL option %s" msgstr "Ungültige URL Option: %s" #: src/hed/libs/data/DataPoint.cpp:269 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:91 #, fuzzy, c-format msgid "Can't handle location %s" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, fuzzy, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "Replica %s existiert bereits für LFN %s" #: src/hed/libs/data/DataPointIndex.cpp:229 #, fuzzy, c-format msgid "Replica %s matches host pattern %s" msgstr "Replica %s existiert bereits für LFN %s" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "Operation erfolgreich abgeschlossen" #: src/hed/libs/data/DataStatus.cpp:13 #, fuzzy msgid "Source is invalid URL" msgstr "Quelle muss LFN enthalten" #: src/hed/libs/data/DataStatus.cpp:14 #, fuzzy msgid "Destination is invalid URL" msgstr "Destination muss LFN enthalten" #: src/hed/libs/data/DataStatus.cpp:15 #, fuzzy msgid "Resolving of index service for source failed" msgstr "Auflösen von index service URL für Quelle schlug fehl" #: src/hed/libs/data/DataStatus.cpp:16 #, fuzzy msgid "Resolving of index service for destination failed" msgstr "Auflösen von index service URL für Ziel schlug fehl" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "Kann nicht von Quelle lesen" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "Kann nicht zu Ziel schreiben" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "Fehler bei Lesen von Quelle" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/data/DataStatus.cpp:21 #, fuzzy msgid "Failed while transferring data" msgstr "Fehler bei Transfer von Daten" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "Fehler bei Abschluß des Lesens von Quelle" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:26 #, fuzzy msgid "Unregistering from index service failed" msgstr "Keine Antwort von AA service %s schlug fehl" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "" #: src/hed/libs/data/DataStatus.cpp:29 #, fuzzy msgid "Delete error" msgstr "Löschte %s" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "" #: src/hed/libs/data/DataStatus.cpp:34 #, fuzzy msgid "Already reading from source" msgstr "Fehler bei Lesen von Quelle" #: src/hed/libs/data/DataStatus.cpp:35 #, fuzzy msgid "Already writing to destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/data/DataStatus.cpp:36 #, fuzzy msgid "Read access check failed" msgstr "Lese Archiv Datei %s" #: src/hed/libs/data/DataStatus.cpp:37 #, fuzzy msgid "Directory listing failed" msgstr "Fehler beim Auflisten von Dateien" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "" #: src/hed/libs/data/DataStatus.cpp:39 #, fuzzy msgid "Failed to obtain information about file" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/libs/data/DataStatus.cpp:40 #, fuzzy msgid "No such file or directory" msgstr "Fehler beim listen von Datei oder Verzeichnis: %s" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "" #: src/hed/libs/data/DataStatus.cpp:45 #, fuzzy msgid "Failed to prepare source" msgstr "Fehler bei Reservieren von Platz" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:47 #, fuzzy msgid "Failed to prepare destination" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:49 #, fuzzy msgid "Failed to finalize reading from source" msgstr "Fehler bei Lesen von Quelle" #: src/hed/libs/data/DataStatus.cpp:50 #, fuzzy msgid "Failed to finalize writing to destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/data/DataStatus.cpp:51 #, fuzzy msgid "Failed to create directory" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/data/DataStatus.cpp:52 #, fuzzy msgid "Failed to rename URL" msgstr "Fehler bei Erstellen von GUID in RLS: %s" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "" #: src/hed/libs/data/DataStatus.cpp:54 #, fuzzy msgid "Operation cancelled successfully" msgstr "Operation erfolgreich abgeschlossen" #: src/hed/libs/data/DataStatus.cpp:55 #, fuzzy msgid "Generic error" msgstr "Benutzungsschnittstellenfehler" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:61 #, fuzzy msgid "Transfer timed out" msgstr "Transfer vollständig" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:65 #, fuzzy msgid "Temporary service error" msgstr "Benutzungsschnittstellenfehler" #: src/hed/libs/data/DataStatus.cpp:66 #, fuzzy msgid "Permanent service error" msgstr "Benutzungsschnittstellenfehler" #: src/hed/libs/data/DataStatus.cpp:67 #, fuzzy msgid "Error switching uid" msgstr "Fehler bei Importieren" #: src/hed/libs/data/DataStatus.cpp:68 #, fuzzy msgid "Request timed out" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/libs/data/FileCache.cpp:101 #, fuzzy msgid "No cache directory specified" msgstr "Kein Cache-Verzeichnis angegeben" #: src/hed/libs/data/FileCache.cpp:118 msgid "No usable caches" msgstr "" #: src/hed/libs/data/FileCache.cpp:127 #, fuzzy msgid "No remote cache directory specified" msgstr "Kein Cache-Verzeichnis angegeben" #: src/hed/libs/data/FileCache.cpp:149 #, fuzzy msgid "No draining cache directory specified" msgstr "Kein Cache-Verzeichnis angegeben" #: src/hed/libs/data/FileCache.cpp:177 #, fuzzy, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:187 #, fuzzy, c-format msgid "Failed to create any cache directories for %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/data/FileCache.cpp:194 #, fuzzy, c-format msgid "Failed to change permissions on %s: %s" msgstr "Konnte Zugriffsrechte von hard link nicht ändern zu 0644: %s" #: src/hed/libs/data/FileCache.cpp:206 #, fuzzy, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:209 #, fuzzy, c-format msgid "Failed to release lock on file %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/hed/libs/data/FileCache.cpp:248 #, c-format msgid "Found file %s in remote cache at %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:265 #, fuzzy, c-format msgid "Failed to delete stale remote cache file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:269 #, fuzzy, c-format msgid "Failed to release lock on remote cache file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/data/FileCache.cpp:287 src/hed/libs/data/FileCache.cpp:339 #, fuzzy, c-format msgid "Failed to obtain lock on cache file %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:294 src/hed/libs/data/FileCache.cpp:348 #: src/hed/libs/data/FileCache.cpp:408 #, fuzzy, c-format msgid "Error removing cache file %s: %s" msgstr "Fehler bei Entfernen von Cache-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:296 src/hed/libs/data/FileCache.cpp:314 #: src/hed/libs/data/FileCache.cpp:318 src/hed/libs/data/FileCache.cpp:350 #: src/hed/libs/data/FileCache.cpp:361 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:301 #, fuzzy, c-format msgid "Replicating file %s to local cache file %s" msgstr "Fehler bei Umbenennen von temporärer Datei %s zu Lock_datei %s: %s" #: src/hed/libs/data/FileCache.cpp:304 src/hed/libs/data/FileCache.cpp:611 #, fuzzy, c-format msgid "Failed to copy file %s to %s: %s" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/hed/libs/data/FileCache.cpp:309 #, c-format msgid "" "Replicating file %s from remote cache failed due to source being deleted or " "modified" msgstr "" #: src/hed/libs/data/FileCache.cpp:311 #, fuzzy, c-format msgid "Failed to delete bad copy of remote cache file %s at %s: %s" msgstr "Fehler bei Anlegen von hard link von %s zu %s: %s" #: src/hed/libs/data/FileCache.cpp:333 #, fuzzy, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "" "Warnung: Fehler bei Nachschlagen von Attributen von gecachter Datei: %s" #: src/hed/libs/data/FileCache.cpp:380 src/hed/libs/data/FileCache.cpp:414 #, fuzzy, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:397 #, fuzzy, c-format msgid "Invalid lock on file %s" msgstr "Ungültige url: %s" #: src/hed/libs/data/FileCache.cpp:403 #, fuzzy, c-format msgid "Failed to remove .meta file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:468 #, fuzzy, c-format msgid "Cache not found for file %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/data/FileCache.cpp:478 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" #: src/hed/libs/data/FileCache.cpp:484 src/hed/libs/data/FileCache.cpp:819 #, fuzzy, c-format msgid "Cache file %s does not exist" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/data/FileCache.cpp:503 #, c-format msgid "Cache file for %s not found in any local or remote cache" msgstr "" #: src/hed/libs/data/FileCache.cpp:507 #, fuzzy, c-format msgid "Using remote cache file %s for url %s" msgstr "Fehler bei Entfernen von Cache-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:510 src/hed/libs/data/FileCache.cpp:821 #, fuzzy, c-format msgid "Error accessing cache file %s: %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:516 #, fuzzy, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "Kann Verzeichnis \"%s\" nicht anlegen für Job-spezifische hard links" #: src/hed/libs/data/FileCache.cpp:521 #, fuzzy, c-format msgid "Cannot change permission of %s: %s " msgstr "Kann Zugriffsrecht von \"%s\" nicht zu 0700 ändern" #: src/hed/libs/data/FileCache.cpp:525 #, fuzzy, c-format msgid "Cannot change owner of %s: %s " msgstr "" "Kann Owner von %s nicht ändernНевозможно изменить владельца папки %1.\n" "Ошибка: %2" #: src/hed/libs/data/FileCache.cpp:539 #, fuzzy, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/hed/libs/data/FileCache.cpp:543 src/hed/libs/data/FileCache.cpp:554 #, fuzzy, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "Fehler bei Anlegen von hard link von %s zu %s: %s" #: src/hed/libs/data/FileCache.cpp:549 #, fuzzy, c-format msgid "Cache file %s not found" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/data/FileCache.cpp:564 #, fuzzy, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "Konnte Zugriffsrechte von hard link nicht ändern zu 0644: %s" #: src/hed/libs/data/FileCache.cpp:572 #, fuzzy, c-format msgid "Failed to release lock on cache file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/data/FileCache.cpp:583 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:588 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:593 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:617 #, fuzzy, c-format msgid "Failed to set executable bit on file %s" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/hed/libs/data/FileCache.cpp:622 #, fuzzy, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:636 #, fuzzy, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/hed/libs/data/FileCache.cpp:640 src/hed/libs/data/FileCache.cpp:645 #, fuzzy, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "Fehler bei Anlegen von hard link von %s zu %s: %s" #: src/hed/libs/data/FileCache.cpp:675 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "Fehler bei Entfernen von cache per-job Verzeichnis %s: %s" #: src/hed/libs/data/FileCache.cpp:694 src/hed/libs/data/FileCache.cpp:771 #, fuzzy, c-format msgid "Error reading meta file %s: %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:699 src/hed/libs/data/FileCache.cpp:776 #, fuzzy, c-format msgid "Error opening meta file %s" msgstr "Fehler bei Öffnen von Meta-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:704 src/hed/libs/data/FileCache.cpp:780 #, fuzzy, c-format msgid "meta file %s is empty" msgstr "Anfrage ist leer" #: src/hed/libs/data/FileCache.cpp:713 #, fuzzy, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" "Fehler: Datei %s wird bereits gecacht bei %s unter anderer URL: %s - werde " "DN nicht zu cached list hinzufügen" #: src/hed/libs/data/FileCache.cpp:733 #, fuzzy, c-format msgid "Bad format detected in file %s, in line %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/hed/libs/data/FileCache.cpp:750 #, fuzzy, c-format msgid "Could not acquire lock on meta file %s" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/hed/libs/data/FileCache.cpp:754 #, fuzzy, c-format msgid "Error opening meta file for writing %s" msgstr "Fehler bei Öffnen von Meta-Datei zum schreiben %s: %s" #: src/hed/libs/data/FileCache.cpp:790 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "DN %s wird gecacht und ist gültig bis %s für Datei %s" #: src/hed/libs/data/FileCache.cpp:794 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "DN %s wird gecacht aber ist abgelaufen für URL %s" #: src/hed/libs/data/FileCache.cpp:845 #, fuzzy, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:850 #, fuzzy, c-format msgid "Failed to create cache meta file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/data/FileCache.cpp:865 #, fuzzy, c-format msgid "Failed to read cache meta file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/data/FileCache.cpp:870 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:875 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:883 #, fuzzy, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" "Fehler: Datei %s wird bereits gecacht bei %s unter einer anderen URL: %s - " "diese Datei wird nicht gecacht" #: src/hed/libs/data/FileCache.cpp:893 #, fuzzy, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "Fehler bei Nachschlagen von Attributen von Meta-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:955 #, fuzzy, c-format msgid "Using cache %s" msgstr "Nutze space token %s" #: src/hed/libs/data/FileCache.cpp:969 #, fuzzy, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "Fehler bei Öffnen von Meta-Datei zum schreiben %s: %s" #: src/hed/libs/data/FileCache.cpp:975 #, c-format msgid "Cache %s: Free space %f GB" msgstr "" #: src/hed/libs/data/URLMap.cpp:33 #, fuzzy, c-format msgid "Can't use URL %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "" #: src/hed/libs/data/URLMap.cpp:49 #, fuzzy, c-format msgid "Mapping %s to %s" msgstr "" "\n" " Соответствие раздел-сегмент:\n" #: src/hed/libs/data/examples/simple_copy.cpp:17 #, fuzzy msgid "Usage: copy source destination" msgstr "Quelle Ziel" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, fuzzy, c-format msgid "Copy failed: %s" msgstr "DCAU fehlgeschlagen: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, fuzzy, c-format msgid "Failed to read proxy file: %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, fuzzy, c-format msgid "Failed to read certificate file: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, fuzzy, c-format msgid "Failed to read private key file: %s" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, fuzzy, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)%s:%" "s" msgstr "" "Fehler bei Konvertieren von GSI Credential zu GCC Credential (major: %d, " "minor: %d)%s" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, fuzzy, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "Fehler bei Freigabe von GSS Credential (major: %d, minor: %d):%s" #: src/hed/libs/infosys/BootstrapISIS.cpp:26 msgid "Initialize ISIS handler" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:31 #, c-format msgid "Can't recognize URL: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:48 msgid "Initialize ISIS handler succeeded" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:52 #, c-format msgid "Remove ISIS (%s) from list" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:72 #, c-format msgid "getISISList from %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:73 #, c-format msgid "Key %s, Cert: %s, CA: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:105 #, c-format msgid "ISIS (%s) is not available or not valid response. (%d. reconnection)" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:107 #, c-format msgid "Connection to the ISIS (%s) is success and get the list of ISIS." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:133 #, c-format msgid "GetISISList add this (%s) ISIS into the list." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:145 #, c-format msgid "Chosen ISIS for communication: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:152 msgid "Get ISIS from list of ISIS handler" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:156 msgid "Here is the end of the infinite calling loop." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:163 msgid "There is no more ISIS available. The list of ISIS's is already empty." msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:47 #, c-format msgid "cannot create directory: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:60 #, c-format msgid "Cache configuration: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:63 msgid "Missing cache root in configuration" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:67 msgid "Missing service ID" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:70 #, c-format msgid "Cache root: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:76 #, c-format msgid "Cache directory: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:143 #: src/hed/libs/infosys/InfoCache.cpp:162 #: src/hed/libs/infosys/InfoCache.cpp:181 #: src/hed/libs/infosys/InfoCache.cpp:206 msgid "InfoCache object is not set up" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:147 #: src/hed/libs/infosys/InfoCache.cpp:166 #, c-format msgid "Invalid path in Set(): %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:185 #, c-format msgid "Invalid path in Get(): %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:23 #, c-format msgid "" "InfoRegistrar thread waiting %d seconds for the all Registers elements " "creation." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:69 #, c-format msgid "" "InfoRegister created with config:\n" "%s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:79 #, c-format msgid "InfoRegister to be registered in Registrar %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:81 msgid "Discarding Registrar because the \"URL\" element is missing or empty." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:133 #, c-format msgid "InfoRegistrar id \"%s\" has been found." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:140 #, c-format msgid "InfoRegistrar id \"%s\" was not found. New registrar created" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:182 #, c-format msgid "" "Configuration error. Retry: \"%s\" is not a valid value. Default value will " "be used." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:188 #, c-format msgid "Retry: %d" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:197 #, c-format msgid "Key: %s, cert: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:226 msgid "The service won't be registered." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:231 msgid "Configuration error. Missing mandatory \"Period\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:236 msgid "Configuration error. Missing mandatory \"Endpoint\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:241 msgid "Configuration error. Missing mandatory \"Expiration\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:248 #, c-format msgid "" "Service was already registered to the InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:284 #, c-format msgid "" "Service is successfully added to the InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:301 msgid "Unregistred Service can not be removed." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:348 #: src/hed/libs/infosys/InfoRegister.cpp:411 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s CAPath" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:359 #: src/hed/libs/infosys/InfoRegister.cpp:646 #, c-format msgid "Response from the ISIS: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:363 #, fuzzy, c-format msgid "Failed to remove registration from %s ISIS" msgstr "Fehler beim Entfernen der location vom LFC: %s" #: src/hed/libs/infosys/InfoRegister.cpp:366 #, c-format msgid "Successfuly removed registration from ISIS (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:372 #, c-format msgid "Failed to remove registration from ISIS (%s) - %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:379 #: src/hed/libs/infosys/InfoRegister.cpp:656 #, c-format msgid "Retry connecting to the ISIS (%s) %d time(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:385 #, c-format msgid "ISIS (%s) is not available." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:389 #: src/hed/libs/infosys/InfoRegister.cpp:439 #, c-format msgid "Service removed from InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:420 #, fuzzy, c-format msgid "Failed to remove registration from %s EMIRegistry" msgstr "Fehler beim Entfernen der location vom LFC: %s" #: src/hed/libs/infosys/InfoRegister.cpp:423 #, c-format msgid "Successfuly removed registration from EMIRegistry (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:429 #: src/hed/libs/infosys/InfoRegister.cpp:957 #, c-format msgid "Retry connecting to the EMIRegistry (%s) %d time(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:435 #, c-format msgid "EMIRegistry (%s) is not available." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:476 #: src/hed/libs/infosys/InfoRegister.cpp:684 #, c-format msgid "Registration starts: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:477 #: src/hed/libs/infosys/InfoRegister.cpp:685 #, c-format msgid "reg_.size(): %d" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:480 #: src/hed/libs/infosys/InfoRegister.cpp:688 msgid "Registrant has no proper URL specified. Registration end." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:510 #: src/hed/libs/infosys/InfoRegister.cpp:713 msgid "Create RegEntry XML element" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:542 #: src/hed/libs/infosys/InfoRegister.cpp:745 msgid "ServiceID attribute calculated from Endpoint Reference" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:546 #: src/hed/libs/infosys/InfoRegister.cpp:749 msgid "Generation Time attribute calculated from current time" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:553 #: src/hed/libs/infosys/InfoRegister.cpp:756 #, c-format msgid "ServiceID stored: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:559 #: src/hed/libs/infosys/InfoRegister.cpp:762 #, c-format msgid "Missing service document provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:565 #: src/hed/libs/infosys/InfoRegister.cpp:768 #, c-format msgid "" "Missing MetaServiceAdvertisment or Expiration values provided by the service " "%s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:572 #: src/hed/libs/infosys/InfoRegister.cpp:775 #, c-format msgid "Missing Type value provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:580 #: src/hed/libs/infosys/InfoRegister.cpp:783 #, c-format msgid "Missing Endpoint Reference value provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:598 #, c-format msgid "Registering to %s ISIS" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:621 #: src/hed/libs/infosys/InfoRegister.cpp:822 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s, CAFile" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:625 #, c-format msgid "Sent RegEntries: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:639 #, c-format msgid "Error during registration to %s ISIS" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:648 #, c-format msgid "Successful registration to ISIS (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:652 #, c-format msgid "Failed to register to ISIS (%s) - %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:668 #: src/hed/libs/infosys/InfoRegister.cpp:967 #, c-format msgid "Registration ends: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:669 #: src/hed/libs/infosys/InfoRegister.cpp:968 #, c-format msgid "Waiting period is %d second(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:679 #: src/hed/libs/infosys/InfoRegister.cpp:978 #, c-format msgid "Registration exit: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:801 #, c-format msgid "Registering to %s EMIRegistry" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:927 #, fuzzy, c-format msgid "Sent entry: %s" msgstr "Identität: %s" #: src/hed/libs/infosys/InfoRegister.cpp:940 #, c-format msgid "Error during %s to %s EMIRegistry" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:943 #, c-format msgid "Successful %s to EMIRegistry (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:949 #, fuzzy, c-format msgid "Failed to %s to EMIRegistry (%s) - %d" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/hed/libs/loader/ModuleManager.cpp:25 msgid "Module Manager Init" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:68 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:202 #, c-format msgid "Found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:209 #, c-format msgid "Could not locate module %s in following paths:" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:213 #, fuzzy, c-format msgid "\t%s" msgstr "%s" #: src/hed/libs/loader/ModuleManager.cpp:227 #, c-format msgid "Loaded %s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:271 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:307 #: src/hed/libs/loader/ModuleManager.cpp:320 #, c-format msgid "%s made persistent" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:311 #, c-format msgid "Not found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:325 msgid "Specified module not found in cache" msgstr "" #: src/hed/libs/loader/Plugin.cpp:369 src/hed/libs/loader/Plugin.cpp:574 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:375 src/hed/libs/loader/Plugin.cpp:581 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:381 src/hed/libs/loader/Plugin.cpp:492 #: src/hed/libs/loader/Plugin.cpp:586 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:398 src/hed/libs/loader/Plugin.cpp:502 #: src/hed/libs/loader/Plugin.cpp:608 #, c-format msgid "Module %s failed to reload (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:462 src/hed/libs/loader/Plugin.cpp:475 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:480 #, c-format msgid "Could not find loadable module descriptor by names %s and %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:486 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "" #: src/hed/libs/message/MCC.cpp:77 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "" #: src/hed/libs/message/MCC.cpp:85 msgid "Security processing/check failed" msgstr "" #: src/hed/libs/message/MCC.cpp:89 msgid "Security processing/check passed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:16 #, fuzzy msgid "Chain(s) configuration failed" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/hed/libs/message/MCCLoader.cpp:133 msgid "SecHandler configuration is not defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:156 msgid "SecHandler has no configuration" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:162 msgid "SecHandler has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:172 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:176 #, c-format msgid "SecHandler: %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:188 msgid "Component has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:193 msgid "Component has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:202 #, c-format msgid "Component %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:232 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:287 #, c-format msgid "Loaded MCC %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:305 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:315 #, c-format msgid "Loaded Plexer %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:323 msgid "Service has no Name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:329 msgid "Service has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:338 #, c-format msgid "Service %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:345 #, c-format msgid "Loaded Service %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:387 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:398 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:407 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:412 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:431 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:442 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:451 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:457 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "" #: src/hed/libs/message/Service.cpp:33 #, c-format msgid "Security processing/check for '%s' failed" msgstr "" #: src/hed/libs/message/Service.cpp:37 #, c-format msgid "Security processing/check for '%s' passed" msgstr "" #: src/hed/libs/message/Service.cpp:43 msgid "Empty registration collector" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:136 #, fuzzy, c-format msgid "Can not load ARC evaluator object: %s" msgstr "Kann PolicyStore Objekt nicht anlegen" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:187 #, c-format msgid "Can not load ARC request object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:228 #, c-format msgid "Can not load policy object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:276 msgid "Can not load policy object" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:324 msgid "Can not load request object" msgstr "" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:157 #, c-format msgid "HTTP Error: %d %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:222 msgid "Cannot create http payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:288 msgid "No next element in the chain" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:297 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:306 msgid "next element of the chain returned no payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:318 msgid "next element of the chain returned invalid/unsupported payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:393 msgid "Error to flush output payload" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, fuzzy, c-format msgid "< %s" msgstr "%s" #: src/hed/mcc/http/PayloadHTTP.cpp:576 #, fuzzy msgid "Failed to parse HTTP header" msgstr "Fehler beim Senden von header" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:950 #, fuzzy, c-format msgid "> %s" msgstr "%s" #: src/hed/mcc/http/PayloadHTTP.cpp:975 #, fuzzy msgid "Failed to write header to output stream" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/mcc/http/PayloadHTTP.cpp:1000 src/hed/mcc/http/PayloadHTTP.cpp:1006 #: src/hed/mcc/http/PayloadHTTP.cpp:1012 src/hed/mcc/http/PayloadHTTP.cpp:1022 #: src/hed/mcc/http/PayloadHTTP.cpp:1034 src/hed/mcc/http/PayloadHTTP.cpp:1039 #: src/hed/mcc/http/PayloadHTTP.cpp:1044 src/hed/mcc/http/PayloadHTTP.cpp:1052 #: src/hed/mcc/http/PayloadHTTP.cpp:1059 #, fuzzy msgid "Failed to write body to output stream" msgstr "Fehler bei Lesen von Dateiliste" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:222 msgid "empty next chain element" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:238 msgid "next element of the chain returned empty payload" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:192 msgid "empty input payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:198 msgid "incoming message is not SOAP" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:215 src/hed/mcc/soap/MCCSOAP.cpp:372 msgid "Security check failed in SOAP MCC for incoming message" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:230 #, c-format msgid "next element of the chain returned error status: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:249 msgid "next element of the chain returned unknown payload - passing through" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:252 src/hed/mcc/soap/MCCSOAP.cpp:265 #: src/hed/mcc/soap/MCCSOAP.cpp:317 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:104 src/hed/mcc/tcp/MCCTCP.cpp:636 msgid "Cannot initialize winsock library" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:119 msgid "Missing Port in Listen element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:128 msgid "Version in Listen element can't be recognized" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:137 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:139 #, fuzzy, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:148 #, fuzzy, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "" "Ошибка при попытке открыть файл:\n" " %1" #: src/hed/mcc/tcp/MCCTCP.cpp:154 #, c-format msgid "Failed to create socket for for listening at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:156 #, fuzzy, c-format msgid "Failed to create socket for for listening at %s:%s(%s): %s" msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:171 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:173 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:181 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:183 #, fuzzy, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:198 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:200 #, fuzzy, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:217 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:219 #, c-format msgid "Listening on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:226 #, fuzzy, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:228 #, fuzzy, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:234 msgid "No listening ports initiated" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:245 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:249 msgid "Failed to start thread for listening" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:285 msgid "Failed to start thread for communication" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:315 msgid "Failed while waiting for connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:337 msgid "Failed to accept connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:346 msgid "Too many connections - dropping new one" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:353 msgid "Too many connections - waiting for old to close" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:577 msgid "next chain element called" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:592 msgid "Only Raw Buffer payload is supported for output" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:600 src/hed/mcc/tcp/MCCTCP.cpp:709 #: src/hed/mcc/tls/MCCTLS.cpp:545 msgid "Failed to send content of buffer" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "TCP executor is removed" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:614 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:642 msgid "No Connect element specified" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:648 msgid "Missing Port in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:654 msgid "Missing Host in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:685 #, fuzzy msgid "TCP client process called" msgstr "konnte Nachricht nicht verarbeiten" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:71 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:87 #, c-format msgid "Failed to resolve %s (%s)" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:97 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:101 #, fuzzy, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:121 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:131 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:139 #, fuzzy, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:149 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:158 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:215 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:77 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:128 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:82 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:87 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:92 msgid "Missing CA subject in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:102 msgid "Negative rights are not supported in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:106 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:111 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:133 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:138 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:144 msgid "Missing condition subjects in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:220 msgid "Unknown element in Globus signing policy" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:222 #, fuzzy msgid "Critical VOMS attribute processing failed" msgstr "Konnte VOMS Attribut nicht herauslesen" #: src/hed/mcc/tls/MCCTLS.cpp:230 #, fuzzy msgid "VOMS attribute validation failed" msgstr "Konnte VOMS Attribut nicht herauslesen" #: src/hed/mcc/tls/MCCTLS.cpp:232 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:424 #, fuzzy, c-format msgid "Failed to establish connection: %s" msgstr "Fehler bei Schließen von Verbindung 2" #: src/hed/mcc/tls/MCCTLS.cpp:442 src/hed/mcc/tls/MCCTLS.cpp:524 #, c-format msgid "Peer name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:444 src/hed/mcc/tls/MCCTLS.cpp:526 #, c-format msgid "Identity name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:446 src/hed/mcc/tls/MCCTLS.cpp:528 #, c-format msgid "CA name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:452 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:461 msgid "Security check failed in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:534 msgid "Security check failed for outgoing TLS message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:563 msgid "Security check failed for incoming TLS message" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:93 #, fuzzy msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "Fehler beim Reservieren von Speicher" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:98 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:128 #, c-format msgid "Certificate %s already expired" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:136 #, c-format msgid "Certificate %s will expire in %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:157 msgid "Failed to store application data" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:185 msgid "Failed to retrieve application data from OpenSSL" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:257 src/hed/mcc/tls/PayloadTLSMCC.cpp:351 msgid "Can not create the SSL Context object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:369 msgid "Can't set OpenSSL verify flags" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:289 src/hed/mcc/tls/PayloadTLSMCC.cpp:383 msgid "Can not create the SSL object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:308 msgid "Failed to establish SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:311 src/hed/mcc/tls/PayloadTLSMCC.cpp:398 #, fuzzy, c-format msgid "Using cipher: %s" msgstr "Nutze space token %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:395 msgid "Failed to accept SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:448 #, fuzzy, c-format msgid "Failed to shut down SSL: %s" msgstr "Fehler bei Erstellen von GUID in RLS: %s" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" "ArcAuthZ: Fehler bei Initiierung wenigstens einer PDP - diese Instanz wird " "nicht funktional sein" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 #, fuzzy msgid "PDP: missing name attribute" msgstr "PDP: %s Name ist Doublette" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, fuzzy, c-format msgid "PDP: %s (%s)" msgstr "PDP: %s (%d)" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, fuzzy, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "PDP: %s kann nicht geladen werden" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, fuzzy, c-format msgid "There are %d RequestItems" msgstr "Es gibt %d RequestItems" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "Konnte classname für FunctionFactory nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "Konnte classname für AttributeFactory nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" "Konnte classname für CombiningAlgorithmFactory nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "Konnte classname für Request nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "Kann AttributeFactory nicht dynamisch anlegen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "Kann FnFactory nicht dynamisch anlegen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "Kann AlgFactory nicht dynamisch anlegen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 #, fuzzy msgid "Can not create PolicyStore object" msgstr "Kann PolicyStore Objekt nicht anlegen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 #: src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 #, fuzzy msgid "Can not dynamically produce Request" msgstr "Kann Anfrage nicht dynamisch produzieren" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:110 #, fuzzy msgid "Can not find ArcPDPContext" msgstr "Kann ArcPDPContext nicht finden" #: src/hed/shc/arcpdp/ArcPDP.cpp:139 src/hed/shc/xacmlpdp/XACMLPDP.cpp:117 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "Evaluator unterstützt ladare Combining Algorithms nicht" #: src/hed/shc/arcpdp/ArcPDP.cpp:143 src/hed/shc/xacmlpdp/XACMLPDP.cpp:121 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "Evaluator unterstützt die angegebenen Combining Algorithms nicht - %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:155 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:84 #: src/hed/shc/gaclpdp/GACLPDP.cpp:118 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:133 msgid "Can not dynamically produce Evaluator" msgstr "Kann Evaluator nicht dynamisch produzieren" #: src/hed/shc/arcpdp/ArcPDP.cpp:158 msgid "Evaluator for ArcPDP was not loaded" msgstr "Evaluator für ArcPDP wurde nicht geladen" #: src/hed/shc/arcpdp/ArcPDP.cpp:165 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:57 #: src/hed/shc/gaclpdp/GACLPDP.cpp:128 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:87 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:143 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "Fehlendes security Objekt in Nachricht" #: src/hed/shc/arcpdp/ArcPDP.cpp:173 src/hed/shc/arcpdp/ArcPDP.cpp:181 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:137 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:143 #: src/hed/shc/gaclpdp/GACLPDP.cpp:136 src/hed/shc/gaclpdp/GACLPDP.cpp:144 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:95 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:103 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "Fehler bei Konvertierung von security information für ARC Anfrage" #: src/hed/shc/arcpdp/ArcPDP.cpp:189 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:150 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:111 #, c-format msgid "ARC Auth. request: %s" msgstr "ARC Auth. Anfrage: %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:192 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:153 #: src/hed/shc/gaclpdp/GACLPDP.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:170 #, fuzzy msgid "No requested security information was collected" msgstr "Keine security information erhalten" #: src/hed/shc/arcpdp/ArcPDP.cpp:199 msgid "Not authorized from arc.pdp - failed to get reponse from Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 #, fuzzy msgid "Authorized by arc.pdp" msgstr "Authorisiert von arc.pdp" #: src/hed/shc/arcpdp/ArcPDP.cpp:246 #, fuzzy msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" "UnAuthorisiert von arc.pdp; einige der ReqestItems genügen nicht der Policy" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "Policy is leer" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, fuzzy, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "PolicyId: %s Alg in dieser Policy ist:-- %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:75 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:129 #, fuzzy msgid "No delegation policies in this context and message - passing through" msgstr "" "Keine delegation policies in diesem context und message - durchgelassen" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:95 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:109 msgid "Failed to convert security information to ARC policy" msgstr "Fehler bei Konvertieren von security information für ARC policy" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:116 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:123 #, c-format msgid "ARC delegation policy: %s" msgstr "ARC delegation policy: %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:161 msgid "No authorization response was returned" msgstr "Es wurde keine authorization response erwidert" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:164 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "Es gibt %d Anfragen, die wenigstens einer Policy Anfrage genügt" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:183 #, fuzzy msgid "Delegation authorization passed" msgstr "Delegations Authorisation zugelassen" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:185 msgid "Delegation authorization failed" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" "Fehlendes CertificatePath Element oder ProxyPath Element, oder " " fehlt" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" "Fehlendes oder leeres KeyPath Element, oder fehlt" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "Fehlendes oder leeres CertificatePath oder CACertificatesDir Element" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "Delegation role nicht unterstützt: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "Delegation Typ nicht unterstützt: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 #, fuzzy msgid "Failed to acquire delegation context" msgstr "Konnte delegation context nicht erhalten" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "Kann delegation context nicht anlegen" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 #, fuzzy msgid "Delegation handler with delegatee role starts to process" msgstr "Delegation handler mit delegatee role gestartet." #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:554 #: src/services/cache_service/CacheService.cpp:529 #: src/services/data-staging/DataDeliveryService.cpp:630 #, fuzzy msgid "process: POST" msgstr "Prozess: POST" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:561 #: src/services/cache_service/CacheService.cpp:538 #: src/services/data-staging/DataDeliveryService.cpp:639 #: src/services/wrappers/java/javawrapper.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:413 msgid "input is not SOAP" msgstr "Eingabe ist kein SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "Delegation service: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #, fuzzy, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "" "Kann delegation credential nicht erhalten: %s von delegation service: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, fuzzy, c-format msgid "Delegated credential identity: %s" msgstr "Delegated credential Identität: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" "Das delegierte credential wie erhalten von delegation service is abgelegt " "unter Pfad: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delgation service should be configured" msgstr "Der Endpunkt des delegation service konnte nicht konfiguriert werden" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 #, fuzzy msgid "Delegation handler with delegatee role ends" msgstr "Delegation handler mit delegatee Rolle endet" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 #, fuzzy msgid "Delegation handler with delegator role starts to process" msgstr "Delegation handler mit delegator Rolle startet" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, fuzzy, c-format msgid "The delegated credential got from path: %s" msgstr "Das delegated credetion erhalten von Pfad: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, fuzzy, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "Kann delegation credential nicht anlegen für delegation service: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 #: src/services/wrappers/java/javawrapper.cpp:144 msgid "output is not SOAP" msgstr "Ausgabe ist nicht SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, fuzzy, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" "Senden von DelegationService erfolgreich: %s und DelegationID: %s Info an " "peer service" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:220 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 #, fuzzy msgid "Incoming Message is not SOAP" msgstr "Einkommende Nachricht ist nicht SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:341 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "Ausgehende Nachricht ist kein SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 #, fuzzy msgid "Delegation handler is not configured" msgstr "Delegation handler wurde nicht konfiguriert" #: src/hed/shc/gaclpdp/GACLPDP.cpp:121 msgid "Evaluator for GACLPDP was not loaded" msgstr "Evaluator für GACLPDP wurde nicht geladen" #: src/hed/shc/gaclpdp/GACLPDP.cpp:152 #, fuzzy, c-format msgid "GACL Auth. request: %s" msgstr "GACL Auth. Anfrage. %s" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 #, fuzzy msgid "Policy is not gacl" msgstr "Policy ist nicht gacl" #: src/hed/shc/legacy/ConfigParser.cpp:13 #, fuzzy msgid "Configuration file not specified" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:53 #: src/hed/shc/legacy/ConfigParser.cpp:58 #, fuzzy msgid "Configuration file can not be read" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/hed/shc/legacy/ConfigParser.cpp:68 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:31 src/hed/shc/legacy/LegacyPDP.cpp:96 msgid "Configuration file not specified in ConfigBlock" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:40 src/hed/shc/legacy/LegacyPDP.cpp:105 #, fuzzy msgid "BlockName is empty" msgstr "Policy is leer" #: src/hed/shc/legacy/LegacyMap.cpp:99 #, fuzzy, c-format msgid "Failed processing user mapping command: unixmap %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/shc/legacy/LegacyMap.cpp:106 #, fuzzy, c-format msgid "Failed processing user mapping command: unixgroup %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/shc/legacy/LegacyMap.cpp:113 #, fuzzy, c-format msgid "Failed processing user mapping command: unixvo %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/shc/legacy/LegacyMap.cpp:173 msgid "LegacyMap: no configurations blocks defined" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:195 src/hed/shc/legacy/LegacyPDP.cpp:210 msgid "" "LegacyPDP: there is no ARCLEGACY Sec Attribute defined. Probably ARC Legacy " "Sec Handler is not configured or failed." msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:200 src/hed/shc/legacy/LegacyPDP.cpp:215 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:115 #, fuzzy, c-format msgid "Failed to parse configuration file %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/shc/legacy/LegacyPDP.cpp:121 #, fuzzy, c-format msgid "Block %s not found in configuration file %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/shc/legacy/LegacySecHandler.cpp:36 #: src/hed/shc/legacy/LegacySecHandler.cpp:110 msgid "LegacySecHandler: configuration file not specified" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:146 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, fuzzy, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" "Fehler bei Konvertieren von GSI Credential zu GCC Credential (major: %d, " "minor: %d)%s" #: src/hed/shc/legacy/arc_lcas.cpp:172 src/hed/shc/legacy/arc_lcmaps.cpp:189 #, fuzzy msgid "Missing subject name" msgstr "Fehlendes security Objekt in Nachricht" #: src/hed/shc/legacy/arc_lcas.cpp:177 src/hed/shc/legacy/arc_lcmaps.cpp:194 #, fuzzy msgid "Missing path of credentials file" msgstr "Pfad zu verlangter Datei" #: src/hed/shc/legacy/arc_lcas.cpp:182 msgid "Missing name of LCAS library" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:199 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:209 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:219 #, fuzzy msgid "Failed to initialize LCAS" msgstr "Fehler bei Initialisierung von OpenSSL Bibliothek" #: src/hed/shc/legacy/arc_lcas.cpp:234 #, fuzzy msgid "Failed to terminate LCAS" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 #, fuzzy msgid "Can't read policy names" msgstr "Kann nicht von Quelle lesen" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 #, fuzzy msgid "Failed to initialize LCMAPS" msgstr "Fehler bei Initialisierung von OpenSSL Bibliothek" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 #, fuzzy msgid "LCMAPS did not return any GID" msgstr "SRM lieferte keine Information zurück" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:308 #, fuzzy msgid "LCMAPS did not return any UID" msgstr "SRM lieferte keine Information zurück" #: src/hed/shc/legacy/arc_lcmaps.cpp:318 #, fuzzy msgid "Failed to terminate LCMAPS" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/shc/legacy/auth.cpp:293 #, fuzzy, c-format msgid "Credentials stored in temporary file %s" msgstr "Konnte nicht in temporäre Datei schreiben: %s" #: src/hed/shc/legacy/auth.cpp:302 #, fuzzy, c-format msgid "Assigned to authorization group %s" msgstr "Delegations Authorisation zugelassen" #: src/hed/shc/legacy/auth.cpp:307 #, c-format msgid "Assigned to VO %s" msgstr "" #: src/hed/shc/legacy/auth_file.cpp:24 #: src/services/gridftpd/auth/auth_file.cpp:24 #, fuzzy, c-format msgid "Failed to read file %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/shc/legacy/auth_ldap.cpp:22 #, fuzzy msgid "LDAP authorization is not supported anymore" msgstr "Es wurde keine authorization response erwidert" #: src/hed/shc/legacy/auth_plugin.cpp:44 src/hed/shc/legacy/unixmap.cpp:260 #: src/services/gridftpd/auth/auth_plugin.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:251 #, c-format msgid "Plugin %s returned: %u" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:48 src/hed/shc/legacy/unixmap.cpp:264 #, fuzzy, c-format msgid "Plugin %s timeout after %u seconds" msgstr "Verbindung zu %s fehlgeschlagen nach %i Sekunden" #: src/hed/shc/legacy/auth_plugin.cpp:51 src/hed/shc/legacy/unixmap.cpp:267 #, c-format msgid "Plugin %s failed to start" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:53 src/hed/shc/legacy/unixmap.cpp:269 #, fuzzy, c-format msgid "Plugin %s printed: %s" msgstr "lfn: %s - pfn: %s" #: src/hed/shc/legacy/auth_plugin.cpp:54 src/hed/shc/legacy/unixmap.cpp:270 #, fuzzy, c-format msgid "Plugin %s error: %s" msgstr "Globus Fehler: %s" #: src/hed/shc/legacy/auth_voms.cpp:39 #: src/services/gridftpd/auth/auth_voms.cpp:45 #, fuzzy msgid "Missing VO in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_voms.cpp:44 #: src/services/gridftpd/auth/auth_voms.cpp:51 #, fuzzy msgid "Missing group in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_voms.cpp:49 #: src/services/gridftpd/auth/auth_voms.cpp:57 #, fuzzy msgid "Missing role in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_voms.cpp:54 #: src/services/gridftpd/auth/auth_voms.cpp:63 #, fuzzy msgid "Missing capabilities in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_voms.cpp:58 #: src/services/gridftpd/auth/auth_voms.cpp:67 #, c-format msgid "Rule: vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:59 #: src/services/gridftpd/auth/auth_voms.cpp:68 #, c-format msgid "Rule: group: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:60 #: src/services/gridftpd/auth/auth_voms.cpp:69 #, fuzzy, c-format msgid "Rule: role: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/shc/legacy/auth_voms.cpp:61 #: src/services/gridftpd/auth/auth_voms.cpp:70 #, fuzzy, c-format msgid "Rule: capabilities: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/shc/legacy/auth_voms.cpp:64 #: src/services/gridftpd/auth/auth_voms.cpp:77 #, c-format msgid "Match vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:71 #, fuzzy, c-format msgid "Matched: %s %s %s %s" msgstr "" "cnd:\n" "%s ist ein %s" #: src/hed/shc/legacy/auth_voms.cpp:86 #: src/services/gridftpd/auth/auth_voms.cpp:98 msgid "Matched nothing" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:71 src/hed/shc/legacy/simplemap.cpp:76 #: src/services/gridftpd/auth/simplemap.cpp:63 #: src/services/gridftpd/auth/simplemap.cpp:68 #, c-format msgid "SimpleMap: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:49 src/hed/shc/legacy/unixmap.cpp:54 #: src/hed/shc/legacy/unixmap.cpp:98 src/hed/shc/legacy/unixmap.cpp:103 #: src/hed/shc/legacy/unixmap.cpp:147 src/hed/shc/legacy/unixmap.cpp:152 #: src/services/gridftpd/auth/unixmap.cpp:47 #: src/services/gridftpd/auth/unixmap.cpp:52 #: src/services/gridftpd/auth/unixmap.cpp:96 #: src/services/gridftpd/auth/unixmap.cpp:101 #: src/services/gridftpd/auth/unixmap.cpp:145 #: src/services/gridftpd/auth/unixmap.cpp:150 msgid "User name mapping command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:61 src/services/gridftpd/auth/unixmap.cpp:59 #, c-format msgid "User name mapping has empty group: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:72 src/hed/shc/legacy/unixmap.cpp:121 #: src/hed/shc/legacy/unixmap.cpp:169 #: src/services/gridftpd/auth/unixmap.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:119 #: src/services/gridftpd/auth/unixmap.cpp:167 #, c-format msgid "User name mapping has empty command: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:110 #: src/services/gridftpd/auth/unixmap.cpp:108 #, c-format msgid "User name mapping has empty VO: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:159 #: src/services/gridftpd/auth/unixmap.cpp:157 #, c-format msgid "User name mapping has empty name: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:208 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:229 src/hed/shc/legacy/unixmap.cpp:235 #: src/services/gridftpd/auth/unixmap.cpp:212 #: src/services/gridftpd/auth/unixmap.cpp:217 #: src/services/gridftpd/auth/unixmap.cpp:233 msgid "Plugin (user mapping) command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:219 #: src/services/gridftpd/auth/unixmap.cpp:223 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:223 #: src/services/gridftpd/auth/unixmap.cpp:227 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:257 #: src/services/gridftpd/auth/unixmap.cpp:248 #, c-format msgid "Plugin %s returned too much: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:278 msgid "User subject match is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:282 #: src/services/gridftpd/auth/unixmap.cpp:266 #, c-format msgid "Mapfile at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:305 #: src/services/gridftpd/auth/unixmap.cpp:290 msgid "User pool call is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:310 #: src/services/gridftpd/auth/unixmap.cpp:295 #, c-format msgid "User pool at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:315 #: src/services/gridftpd/auth/unixmap.cpp:300 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:332 #: src/services/gridftpd/auth/unixmap.cpp:317 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:48 msgid "Creating a pdpservice client" msgstr "Lege pdpservice client an" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:80 #, fuzzy msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "Arc policy can nicht mit SAML2.0 Profil von XACML geprüft werden" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:152 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:185 msgid "Policy Decision Service invocation failed" msgstr "Ausführen des Policy Decision Service schlug fehl" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:170 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:204 msgid "Authorized from remote pdp service" msgstr "Authorisiert durch remote pdp service" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Unauthorized from remote pdp service" msgstr "Nicht authorisiert von entferntem PDP service" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 #, fuzzy msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "Kann SAMLAssertion SecAttr nicht erhalten von message context" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:152 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "Fehlendes oder leeres CertificatePath Element" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:157 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" "Sowohl CACertificatePath als auch CACertificatesDir Elemente sind fehlend " "oder leer" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:175 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" "Fehlendes oder leeres CertificatePath oder CACertificatesDir Element; werde " "nur die Signature überprüfen, die Nachricht jedoch nicht authentifizieren" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:179 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, fuzzy, c-format msgid "Processing type not supported: %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:199 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "Konnte SAML Token nicht aus eingehender SOAP herausparsen" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "Konnte SAML Token aus eingehender SOAP nicht authentifizieren" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:212 msgid "Succeeded to authenticate SAMLToken" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:275 #, fuzzy, c-format msgid "No response from AA service %s" msgstr "Keine Antwort von AA service %s schlug fehl" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:279 #, fuzzy, c-format msgid "SOAP Request to AA service %s failed" msgstr "SOAP Request zu AA service %s schlug fehl" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 msgid "Cannot find content under response soap message" msgstr "Kann Inhalt in SOAP-Antwort nicht finden" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 msgid "Cannot find under response soap message:" msgstr "Kann in SOAP-Antwort nicht finden" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:308 msgid "The Response is not going to this end" msgstr "Die Antwort geht nicht bis zu diesem Ende" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "The StatusCode is Success" msgstr "Der StatusCode ist Success" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:335 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "Konnte SAML Token für ausgehendes SOAP nicht generieren" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:345 #, fuzzy msgid "SAML Token handler is not configured" msgstr "SAML Token handler ist nicht konfiguriert" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:29 #, fuzzy, c-format msgid "Access list location: %s" msgstr "Zugriffslist location: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:39 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" "Keine Policy Datei oder DNs angegeben für simplelist.pdp, bitte setzen Sie " "ein location Attribut oder zumindest ein DN Element für den PDP Knoten in " "der Konfiguration" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:42 #, fuzzy, c-format msgid "Subject to match: %s" msgstr "Subjekt: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:45 #, fuzzy, c-format msgid "Policy subject: %s" msgstr "Subjekt: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:47 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:73 #, fuzzy, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "Authorisiert durch simplelist.pdp" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:54 #, fuzzy msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" "Die Policy Datei Konfiguration für simplelist.pdb existiert nicht, bitte " "überprüfen Sie das location Attribut für simplelist PDP node in der Serivice " "Konfiguration" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:61 #, fuzzy, c-format msgid "Policy line: %s" msgstr "Policy Zeile: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:79 #, fuzzy, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "Nicht authorisiert von simplelist.pdp" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "Starte Test" #: src/hed/shc/test.cpp:101 #, fuzzy msgid "Input request from a file: Request.xml" msgstr "Input request von einer Datei: Request.xml" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "Es gibt %d Subjekte, die wenigstens eine Policy erfüllen" #: src/hed/shc/test.cpp:121 #, fuzzy, c-format msgid "Attribute Value (1): %s" msgstr "Attribut Wert (1): %s" #: src/hed/shc/test.cpp:132 #, fuzzy msgid "Input request from code" msgstr "Eingabe-Aufforderung von code" #: src/hed/shc/test.cpp:211 #, fuzzy, c-format msgid "Attribute Value (2): %s" msgstr "Attributewert (2): %s" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 #, fuzzy msgid "Can not dynamically produce Policy" msgstr "Kann Policy nicht dynamisch produzieren" #: src/hed/shc/testinterface_arc.cpp:138 #, fuzzy, c-format msgid "Attribute Value inside Subject: %s" msgstr "Attributwert in Subjekt: %s" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "Die Anfrage hat die Policy Evaluierung bestanden" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "Fehlendes oder leeres PasswordSource Element" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "Passwort Kodierung nicht unterstützt: %s" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "Fehlendes oder leeres Username Element" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 #, fuzzy msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "Konnte SAML Token nicht aus eingehender SOAP herausparsen" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "Konnte Username Token nicht von eingehender SOAP Nachricht herauslesen" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" "Fehler bei der Authentifikation des Username Token in der einngehenden SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "Erfolgreiche Authentifikation des UsernameToken" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "Fehler bei Erstellen von Nutzernamen Token für ausgehende SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "Nutzernamen Token handler ist nicht konfiguriert" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "Fehler bei Parsen von X509 Token in eigehendem SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "Fehler bei Verifizieren von X509 Token in eigehendem SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "Fehler bei Authentifizieren von X509 Token in eigehendem SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "X509Token erfolgreich authentifiziert" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "Fehler bei Generieren von X509 Token für ausgehende SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "X509 Token handler ist nicht konfiguriert" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "Kann Funktion nicht anlegen: FunctionId existiert nicht" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, fuzzy, c-format msgid "Can not create function %s" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:88 #, fuzzy msgid "Can not find XACMLPDPContext" msgstr "Kann XACMLPDPContext nciht finden" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:136 msgid "Evaluator for XACMLPDP was not loaded" msgstr "Evaluator für XACMLPDP wurde nicht geladen" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:151 src/hed/shc/xacmlpdp/XACMLPDP.cpp:159 #, fuzzy msgid "Failed to convert security information to XACML request" msgstr "" "Fehler bei Konvertierung der security information zu einer XACML Anfrage" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:167 #, c-format msgid "XACML request: %s" msgstr "XACML Anfrage: %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 msgid "Authorized from xacml.pdp" msgstr "Authorisiert durch xaml.pdp" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:180 #, fuzzy msgid "UnAuthorized from xacml.pdp" msgstr "UnAuthorisiert durch xaml.pdp" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "Kann element mit passendem namespace nicht finden" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "Kein Ziel innerhalb der Policy vorhanden" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "Anfrage ist leer" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "Kann element mit passendem namespace nicht finden" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "Ungültiger Effekt" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "Kein Ziel verfügbar in dieser Regel" #: src/libs/data-staging/DTR.cpp:86 src/libs/data-staging/DTR.cpp:90 #, fuzzy, c-format msgid "Could not handle endpoint %s" msgstr "konnte Ende von clientxrsl nicht finden" #: src/libs/data-staging/DTR.cpp:100 #, fuzzy msgid "Source is the same as destination" msgstr "Quelle Ziel" #: src/libs/data-staging/DTR.cpp:174 #, fuzzy, c-format msgid "Invalid ID: %s" msgstr "Ungültige URL: %s" #: src/libs/data-staging/DTR.cpp:211 #, fuzzy, c-format msgid "%s->%s" msgstr "%s (%s)" #: src/libs/data-staging/DTR.cpp:330 #, c-format msgid "No callback for %s defined" msgstr "" #: src/libs/data-staging/DTR.cpp:345 #, c-format msgid "NULL callback for %s" msgstr "" #: src/libs/data-staging/DTR.cpp:348 #, c-format msgid "Request to push to unknown owner - %u" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:147 #, fuzzy, c-format msgid "Cleaning up after failure: deleting %s" msgstr "Lege Verzeichnis %s an" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:137 #, fuzzy, c-format msgid "Bad checksum format %s" msgstr "Errechneted checksum: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:210 #, fuzzy, c-format msgid "DataDelivery: %s" msgstr "Fataler Fehler: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:222 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:241 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:67 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:315 #, fuzzy, c-format msgid "Connecting to Delivery service at %s" msgstr "Kein Verbindungsaufbau zu Server: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:94 #, fuzzy, c-format msgid "Failed to set up credential delegation with %s" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:106 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:332 #, fuzzy, c-format msgid "Could not connect to service %s: %s" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:114 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:340 #, fuzzy, c-format msgid "No SOAP response from Delivery service %s" msgstr "Keine Antwort von AA service %s schlug fehl" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:128 #, fuzzy, c-format msgid "Failed to start transfer request: %s" msgstr "Fehler bei Transfer von Daten" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, fuzzy, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "Formatierungfehler in Dati %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:143 #, fuzzy, c-format msgid "Could not make new transfer request: %s: %s" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:148 #, c-format msgid "Started remote Delivery at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:181 #, fuzzy, c-format msgid "Failed to send cancel request: %s" msgstr "Kann Kanal stdout nicht nutzen" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:188 #, fuzzy msgid "Failed to cancel: No SOAP response" msgstr "Keine SOAP Antwort" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:202 #, fuzzy, c-format msgid "Failed to cancel transfer request: %s" msgstr "Fehler bei Transfer von Daten" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:209 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:290 #, fuzzy, c-format msgid "Bad format in XML response: %s" msgstr "Formatierungfehler in Dati %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:216 #, fuzzy, c-format msgid "Failed to cancel: %s" msgstr "Fehler beim Authentifizieren: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:260 #, fuzzy msgid "No SOAP response from delivery service" msgstr "Keine Antwort von Server erhalten" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:281 #, fuzzy, c-format msgid "Failed to query state: %s" msgstr "Fehler beim Authentifizieren: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:355 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:363 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:371 #, fuzzy, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "Konnte replica nicht finden: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:379 #, fuzzy, c-format msgid "Dir %s allowed at service %s" msgstr "Delegation service: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:473 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:486 #, fuzzy msgid "Failed locating credentials" msgstr "Fehler beim Auflisten von Meta-Dateien" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:491 #, fuzzy msgid "Failed to initiate client connection" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:497 msgid "Client connection has no entry point" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:134 #, fuzzy msgid "Unexpected arguments" msgstr "Kann doc Argument nicht anlegen" #: src/libs/data-staging/DataStagingDelivery.cpp:137 #, fuzzy msgid "Source URL missing" msgstr "ServiceURL fehlt" #: src/libs/data-staging/DataStagingDelivery.cpp:140 #, fuzzy msgid "Destination URL missing" msgstr "Ziel: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:144 #, c-format msgid "Source URL not valid: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:148 #, fuzzy, c-format msgid "Destination URL not valid: %s" msgstr "Ziel: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:205 #, fuzzy, c-format msgid "Unknown transfer option: %s" msgstr "Datentransfer abgebrochen: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:230 #, fuzzy, c-format msgid "Source URL not supported: %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #: src/libs/data-staging/DataStagingDelivery.cpp:254 msgid "No credentials supplied" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:249 #, fuzzy, c-format msgid "Destination URL not supported: %s" msgstr "Delegation role nicht unterstützt: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:298 #, fuzzy, c-format msgid "Will calculate %s checksum" msgstr "Errechneted checksum: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:309 msgid "Cannot use supplied --size option" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:458 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:468 #, fuzzy, c-format msgid "Failed cleaning up destination %s" msgstr "Fehler bei Schreiben zu Ziel" #: src/libs/data-staging/Processor.cpp:60 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:418 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:435 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:331 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:348 #: src/services/cache_service/CacheService.cpp:114 msgid "Error creating cache" msgstr "" #: src/libs/data-staging/Processor.cpp:86 #, c-format msgid "Forcing re-download of file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:103 #, c-format msgid "Will wait around %is" msgstr "" #: src/libs/data-staging/Processor.cpp:123 #, fuzzy, c-format msgid "Force-checking source of cache file %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/libs/data-staging/Processor.cpp:126 #, fuzzy, c-format msgid "Source check requested but failed: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/libs/data-staging/Processor.cpp:146 msgid "Permission checking failed, will try downloading without using cache" msgstr "" #: src/libs/data-staging/Processor.cpp:177 #, fuzzy, c-format msgid "Will download to cache file %s" msgstr "Lese Archiv Datei %s" #: src/libs/data-staging/Processor.cpp:199 msgid "Looking up source replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:217 #: src/libs/data-staging/Processor.cpp:329 #, fuzzy, c-format msgid "Skipping replica on local host %s" msgstr "Doppelte replica location: %s" #: src/libs/data-staging/Processor.cpp:225 #: src/libs/data-staging/Processor.cpp:337 #, fuzzy, c-format msgid "No locations left for %s" msgstr "Keine locations gefunden für %s" #: src/libs/data-staging/Processor.cpp:248 #: src/libs/data-staging/Processor.cpp:496 #, fuzzy msgid "Resolving destination replicas" msgstr "Probleme bei Auflösen von Zieladresse" #: src/libs/data-staging/Processor.cpp:266 #, fuzzy msgid "No locations for destination different from source found" msgstr "Keine locations gefunden für Ziel" #: src/libs/data-staging/Processor.cpp:278 #, fuzzy msgid "Pre-registering destination in index service" msgstr "Erstellen und senden einer Index Service Anfrage" #: src/libs/data-staging/Processor.cpp:305 msgid "Resolving source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:319 #, fuzzy, c-format msgid "No replicas found for %s" msgstr "Keine locations gefunden für %s" #: src/libs/data-staging/Processor.cpp:361 #, fuzzy, c-format msgid "Checking %s" msgstr "Herausforderung: %s" #: src/libs/data-staging/Processor.cpp:370 #: src/libs/data-staging/Processor.cpp:429 #, fuzzy msgid "Metadata of replica and index service differ" msgstr "" "Файл назначения совпадает с исходным.\n" "%1" #: src/libs/data-staging/Processor.cpp:378 #, fuzzy, c-format msgid "Failed checking source replica %s: %s" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/libs/data-staging/Processor.cpp:405 msgid "Querying source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:417 #, fuzzy, c-format msgid "Failed checking source replica: %s" msgstr "Löschen fehlgeschlagen von job: %s" #: src/libs/data-staging/Processor.cpp:423 #, fuzzy msgid "Failed checking source replica" msgstr "Fehler bei Lesen von Quelle" #: src/libs/data-staging/Processor.cpp:464 #, fuzzy msgid "Finding existing destination replicas" msgstr "Fehler bei Schreiben zu Ziel" #: src/libs/data-staging/Processor.cpp:476 #, fuzzy, c-format msgid "Failed to delete replica %s: %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/libs/data-staging/Processor.cpp:490 #, fuzzy, c-format msgid "Unregistering %s" msgstr "Außer Acht lassend %s" #: src/libs/data-staging/Processor.cpp:501 #, fuzzy msgid "Pre-registering destination" msgstr "Probleme bei Auflösen von Zieladresse" #: src/libs/data-staging/Processor.cpp:507 #, fuzzy, c-format msgid "Failed to pre-clean destination: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/libs/data-staging/Processor.cpp:531 msgid "Preparing to stage source" msgstr "" #: src/libs/data-staging/Processor.cpp:544 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:550 #, fuzzy msgid "No physical files found for source" msgstr "Keine locations gefunden für %s" #: src/libs/data-staging/Processor.cpp:569 #, fuzzy msgid "Preparing to stage destination" msgstr "Kann nicht zu Ziel schreiben" #: src/libs/data-staging/Processor.cpp:582 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:588 #, fuzzy msgid "No physical files found for destination" msgstr "Keine locations gefunden für Ziel" #: src/libs/data-staging/Processor.cpp:615 msgid "Releasing source" msgstr "" #: src/libs/data-staging/Processor.cpp:619 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:624 #, fuzzy msgid "Releasing destination" msgstr "Probleme bei Auflösen von Zieladresse" #: src/libs/data-staging/Processor.cpp:628 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:659 #, fuzzy msgid "Removing pre-registered destination in index service" msgstr "Keine execution services in index service registriert" #: src/libs/data-staging/Processor.cpp:662 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:668 msgid "Registering destination replica" msgstr "" #: src/libs/data-staging/Processor.cpp:671 #, fuzzy, c-format msgid "Failed to register destination replica: %s" msgstr "Fehler bei Ändern von owner des Zielverzeichnisses zu %i: %s" #: src/libs/data-staging/Processor.cpp:674 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:705 msgid "Error creating cache. Stale locks may remain." msgstr "" #: src/libs/data-staging/Processor.cpp:740 #, c-format msgid "Linking/copying cached file to %s" msgstr "" #: src/libs/data-staging/Processor.cpp:761 #, fuzzy, c-format msgid "Failed linking cache file to %s" msgstr "Fehler beim Auflisten von Dateien" #: src/libs/data-staging/Processor.cpp:765 #, fuzzy, c-format msgid "Error linking cache file to %s." msgstr "Fehler bei Entfernen von Cache-Datei %s: %s" #: src/libs/data-staging/Processor.cpp:787 #: src/libs/data-staging/Processor.cpp:794 #, fuzzy msgid "Adding to bulk request" msgstr "Füge Anfrage-Token %s hinzu" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" #: src/libs/data-staging/Scheduler.cpp:213 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:216 msgid "Linking mapped file" msgstr "" #: src/libs/data-staging/Scheduler.cpp:223 #, fuzzy, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "Fehler bei Anlegen von soft link: %s" #: src/libs/data-staging/Scheduler.cpp:235 msgid "Linking mapped file - can't link on Windows" msgstr "" #: src/libs/data-staging/Scheduler.cpp:251 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" #: src/libs/data-staging/Scheduler.cpp:258 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" #: src/libs/data-staging/Scheduler.cpp:264 msgid "File is cacheable, will check cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:267 #: src/libs/data-staging/Scheduler.cpp:293 #, c-format msgid "File is currently being cached, will wait %is" msgstr "" #: src/libs/data-staging/Scheduler.cpp:286 #, fuzzy msgid "Timed out while waiting for cache lock" msgstr "Timeout beim Lesen des response header" #: src/libs/data-staging/Scheduler.cpp:297 msgid "Checking cache again" msgstr "" #: src/libs/data-staging/Scheduler.cpp:317 #, fuzzy msgid "Destination file is in cache" msgstr "Destination muss LFN enthalten" #: src/libs/data-staging/Scheduler.cpp:321 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:324 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:334 msgid "Problem with index service, will release cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:338 msgid "Problem with index service, will proceed to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:348 msgid "Checking source file is present" msgstr "" #: src/libs/data-staging/Scheduler.cpp:356 msgid "Error with source file, moving to next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:378 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "No more replicas, will use %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:383 #, fuzzy, c-format msgid "Checking replica %s" msgstr "Suche nache Existenz von %s" #: src/libs/data-staging/Scheduler.cpp:393 msgid "Overwrite requested - will pre-clean destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:396 msgid "No overwrite requested or allowed, skipping pre-cleaning" msgstr "" #: src/libs/data-staging/Scheduler.cpp:404 msgid "Pre-clean failed, will still try to copy" msgstr "" #: src/libs/data-staging/Scheduler.cpp:411 #, fuzzy msgid "Source or destination requires staging" msgstr "Quelle Ziel" #: src/libs/data-staging/Scheduler.cpp:415 msgid "No need to stage source or destination, skipping staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:445 msgid "Staging request timed out, will release request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:449 #, fuzzy msgid "Querying status of staging request" msgstr "Erstellen und senden von Anfrage" #: src/libs/data-staging/Scheduler.cpp:458 #, fuzzy msgid "Releasing requests" msgstr "Verarbeite %s Anfrage" #: src/libs/data-staging/Scheduler.cpp:475 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "" #: src/libs/data-staging/Scheduler.cpp:490 #, fuzzy, c-format msgid "Transfer failed: %s" msgstr "Einige Transfers schlugen fehl" #: src/libs/data-staging/Scheduler.cpp:500 msgid "Releasing request(s) made during staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:503 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:515 msgid "Trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:519 #, c-format msgid "Will %s in destination index service" msgstr "" #: src/libs/data-staging/Scheduler.cpp:523 msgid "Destination is not index service, skipping replica registration" msgstr "" #: src/libs/data-staging/Scheduler.cpp:536 msgid "Error registering replica, moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:544 msgid "Will process cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:548 msgid "File is not cacheable, skipping cache processing" msgstr "" #: src/libs/data-staging/Scheduler.cpp:562 #, fuzzy msgid "Cancellation complete" msgstr "Transfer vollständig" #: src/libs/data-staging/Scheduler.cpp:576 msgid "Will wait 10s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:582 msgid "Error in cache processing, will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:591 msgid "Will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:609 #, fuzzy msgid "Proxy has expired" msgstr "Proxy store:" #: src/libs/data-staging/Scheduler.cpp:620 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "" #: src/libs/data-staging/Scheduler.cpp:636 msgid "Out of retries" msgstr "" #: src/libs/data-staging/Scheduler.cpp:638 msgid "Permanent failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:644 #, fuzzy msgid "Finished successfully" msgstr "Verbindung erfolgreich geschlossen" #: src/libs/data-staging/Scheduler.cpp:654 #, fuzzy msgid "Returning to generator" msgstr "Wiederholte Nutzung von Verbindung" #: src/libs/data-staging/Scheduler.cpp:820 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:874 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:882 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:895 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:911 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "" #: src/libs/data-staging/Scheduler.cpp:938 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:948 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1149 msgid "Cancelling active transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1165 msgid "Processing thread timed out. Restarting DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1233 msgid "Will use bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1255 msgid "No delivery endpoints available, will try later" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1274 msgid "Scheduler received NULL DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1284 msgid "Scheduler received invalid DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1373 #, fuzzy msgid "Scheduler starting up" msgstr "Konnte job nicht starten" #: src/libs/data-staging/Scheduler.cpp:1374 msgid "Scheduler configuration:" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1375 #, c-format msgid " Pre-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1376 #, c-format msgid " Delivery slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1377 #, c-format msgid " Post-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1378 #, c-format msgid " Emergency slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1379 #, c-format msgid " Prepared slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1380 #, fuzzy, c-format msgid "" " Shares configuration:\n" "%s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/libs/data-staging/Scheduler.cpp:1383 msgid " Delivery service: LOCAL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1384 #, fuzzy, c-format msgid " Delivery service: %s" msgstr "Delegation service: %s" #: src/libs/data-staging/Scheduler.cpp:1389 #, fuzzy msgid "Failed to create DTR dump thread" msgstr "Fehler bei Anlegen von ldap bind thread (%s)" #: src/libs/data-staging/Scheduler.cpp:1406 #: src/services/data-staging/DataDeliveryService.cpp:513 #, c-format msgid "DTR %s cancelled" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:32 #, fuzzy msgid "Generator started" msgstr "Start start" #: src/libs/data-staging/examples/Generator.cpp:33 #, fuzzy msgid "Starting DTR threads" msgstr "Starte Test" #: src/libs/data-staging/examples/Generator.cpp:46 msgid "No valid credentials found, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:58 #, fuzzy, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "Probleme bei Auflösen von Zieladresse" #: src/services/a-rex/arex.cpp:446 #, fuzzy, c-format msgid "Using cached local account '%s'" msgstr "Nutze space token %s" #: src/services/a-rex/arex.cpp:457 msgid "Will not map to 'root' account by default" msgstr "" #: src/services/a-rex/arex.cpp:470 msgid "No local account name specified" msgstr "" #: src/services/a-rex/arex.cpp:473 #, c-format msgid "Using local account '%s'" msgstr "" #: src/services/a-rex/arex.cpp:494 msgid "Failed to acquire grid-manager's configuration" msgstr "" #: src/services/a-rex/arex.cpp:519 #: src/services/cache_service/CacheService.cpp:572 #: src/services/data-staging/DataDeliveryService.cpp:687 #, c-format msgid "SOAP operation is not supported: %s" msgstr "" #: src/services/a-rex/arex.cpp:532 #, fuzzy, c-format msgid "Connection from %s: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/services/a-rex/arex.cpp:534 #, c-format msgid "process: method: %s" msgstr "" #: src/services/a-rex/arex.cpp:535 #, c-format msgid "process: endpoint: %s" msgstr "" #: src/services/a-rex/arex.cpp:546 #, c-format msgid "process: id: %s" msgstr "" #: src/services/a-rex/arex.cpp:547 #, c-format msgid "process: subpath: %s" msgstr "" #: src/services/a-rex/arex.cpp:567 #: src/services/cache_service/CacheService.cpp:546 #: src/services/data-staging/DataDeliveryService.cpp:647 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "" #: src/services/a-rex/arex.cpp:572 #: src/services/cache_service/CacheService.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:652 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "" #: src/services/a-rex/arex.cpp:575 #: src/services/cache_service/CacheService.cpp:554 #: src/services/data-staging/DataDeliveryService.cpp:655 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "" #: src/services/a-rex/arex.cpp:591 src/services/a-rex/arex.cpp:804 #: src/services/a-rex/arex.cpp:823 src/services/a-rex/arex.cpp:837 #: src/services/a-rex/arex.cpp:847 src/services/a-rex/arex.cpp:862 #: src/services/cache_service/CacheService.cpp:588 #: src/services/data-staging/DataDeliveryService.cpp:703 msgid "Security Handlers processing failed" msgstr "" #: src/services/a-rex/arex.cpp:598 msgid "Can't obtain configuration" msgstr "" #: src/services/a-rex/arex.cpp:614 msgid "process: factory endpoint" msgstr "" #: src/services/a-rex/arex.cpp:798 src/services/a-rex/arex.cpp:815 #: src/services/cache_service/CacheService.cpp:583 #: src/services/data-staging/DataDeliveryService.cpp:698 #: src/tests/echo/echo.cpp:166 #, c-format msgid "process: response=%s" msgstr "" #: src/services/a-rex/arex.cpp:800 msgid "process: response is not SOAP" msgstr "" #: src/services/a-rex/arex.cpp:830 msgid "process: GET" msgstr "" #: src/services/a-rex/arex.cpp:831 #, c-format msgid "GET: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:854 msgid "process: PUT" msgstr "" #: src/services/a-rex/arex.cpp:869 #, c-format msgid "process: method %s is not supported" msgstr "" #: src/services/a-rex/arex.cpp:872 msgid "process: method is not defined" msgstr "" #: src/services/a-rex/arex.cpp:908 #, fuzzy msgid "Failed to run Grid Manager thread" msgstr "Fehler bei Transfer von Daten" #: src/services/a-rex/arex.cpp:972 #, fuzzy, c-format msgid "Storing configuration in temporary file %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/arex.cpp:977 #, fuzzy msgid "Failed to process service configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/arex.cpp:985 #, fuzzy, c-format msgid "Failed to process configuration in %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/arex.cpp:991 #, fuzzy msgid "No control directory set in configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/arex.cpp:995 #, fuzzy msgid "No session directory set in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/services/a-rex/arex.cpp:999 msgid "No LRMS set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:1004 #, fuzzy, c-format msgid "Failed to create control directory %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/services/a-rex/arex.cpp:1033 #, c-format msgid "Provided LRMSName is not a valid URL: %s" msgstr "" #: src/services/a-rex/arex.cpp:1035 msgid "" "No LRMSName is provided. This is needed if you wish to completely comply " "with the BES specifications." msgstr "" #: src/services/a-rex/cachecheck.cpp:34 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:539 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:843 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:424 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:337 #, c-format msgid "Error with cache configuration: %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:50 #: src/services/cache_service/CacheService.cpp:305 msgid "Error with cache configuration" msgstr "" #: src/services/a-rex/cachecheck.cpp:75 #: src/services/cache_service/CacheService.cpp:135 #: src/services/cache_service/CacheService.cpp:330 #, c-format msgid "Looking up URL %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:77 #: src/services/cache_service/CacheService.cpp:144 #, fuzzy, c-format msgid "Cache file is %s" msgstr "Lese Archiv Datei %s" #: src/services/a-rex/change_activity_status.cpp:33 #, c-format msgid "" "ChangeActivityStatus: request = \n" "%s" msgstr "" "ChangeActivityStatus: запрос = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:38 msgid "ChangeActivityStatus: no ActivityIdentifier found" msgstr "" #: src/services/a-rex/change_activity_status.cpp:47 msgid "ChangeActivityStatus: EPR contains no JobID" msgstr "" #: src/services/a-rex/change_activity_status.cpp:57 #, c-format msgid "ChangeActivityStatus: no job found: %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:73 msgid "ChangeActivityStatus: missing NewStatus element" msgstr "" #: src/services/a-rex/change_activity_status.cpp:87 msgid "ChangeActivityStatus: Failed to accept delegation" msgstr "" #: src/services/a-rex/change_activity_status.cpp:103 msgid "ChangeActivityStatus: old BES state does not match" msgstr "" #: src/services/a-rex/change_activity_status.cpp:110 #, fuzzy msgid "ChangeActivityStatus: old A-REX state does not match" msgstr "" "ChangeActivityStatus: запрос = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:137 #, fuzzy msgid "ChangeActivityStatus: Failed to update credentials" msgstr "" "ChangeActivityStatus: запрос = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:143 #, fuzzy msgid "ChangeActivityStatus: Failed to resume job" msgstr "" "ChangeActivityStatus: запрос = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:149 #, c-format msgid "ChangeActivityStatus: State change not allowed: from %s/%s to %s/%s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:168 #, c-format msgid "" "ChangeActivityStatus: response = \n" "%s" msgstr "" "ChangeActivityStatus: ответ = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:213 #: src/services/a-rex/change_activity_status.cpp:217 #, fuzzy, c-format msgid "EMIES:PauseActivity: job %s - %s" msgstr "" "MigrateActivity: отзыв = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:262 #: src/services/a-rex/change_activity_status.cpp:266 #, c-format msgid "EMIES:ResumeActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:311 #: src/services/a-rex/change_activity_status.cpp:316 #, fuzzy, c-format msgid "EMIES:CancelActivity: job %s - %s" msgstr "" "CreateActivity: ответ = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:324 #, fuzzy, c-format msgid "job %s cancelled successfully" msgstr "Job erfolgreich abgebrochen" #: src/services/a-rex/change_activity_status.cpp:370 #: src/services/a-rex/change_activity_status.cpp:385 #, fuzzy, c-format msgid "EMIES:WipeActivity: job %s - %s" msgstr "" "MigrateActivity: отзыв = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:389 #, fuzzy, c-format msgid "job %s (will be) cleaned successfully" msgstr "Job erfolgreich aufgeräumt." #: src/services/a-rex/change_activity_status.cpp:435 #: src/services/a-rex/change_activity_status.cpp:440 #, c-format msgid "EMIES:RestartActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:444 #, fuzzy, c-format msgid "job %s restarted successfully" msgstr "Datei %s erfolgreich entfernt" #: src/services/a-rex/create_activity.cpp:35 #, c-format msgid "" "CreateActivity: request = \n" "%s" msgstr "" "CreateActivity: запрос = \n" "%s" #: src/services/a-rex/create_activity.cpp:40 msgid "CreateActivity: no job description found" msgstr "" #: src/services/a-rex/create_activity.cpp:48 msgid "CreateActivity: max jobs total limit reached" msgstr "" #: src/services/a-rex/create_activity.cpp:67 msgid "CreateActivity: Failed to accept delegation" msgstr "" #: src/services/a-rex/create_activity.cpp:100 #, c-format msgid "CreateActivity: Failed to create new job: %s" msgstr "" #: src/services/a-rex/create_activity.cpp:102 msgid "CreateActivity: Failed to create new job" msgstr "" #: src/services/a-rex/create_activity.cpp:117 msgid "CreateActivity finished successfully" msgstr "" #: src/services/a-rex/create_activity.cpp:121 #, c-format msgid "" "CreateActivity: response = \n" "%s" msgstr "" "CreateActivity: ответ = \n" "%s" #: src/services/a-rex/create_activity.cpp:159 #, fuzzy, c-format msgid "" "EMIES:CreateActivity: request = \n" "%s" msgstr "" "CreateActivity: запрос = \n" "%s" #: src/services/a-rex/create_activity.cpp:165 msgid "EMIES:CreateActivity: too many activity descriptions" msgstr "" #: src/services/a-rex/create_activity.cpp:175 msgid "EMIES:CreateActivity: no job description found" msgstr "" #: src/services/a-rex/create_activity.cpp:182 msgid "EMIES:CreateActivity: max jobs total limit reached" msgstr "" #: src/services/a-rex/create_activity.cpp:208 #, fuzzy, c-format msgid "ES:CreateActivity: Failed to create new job: %s" msgstr "" "CreateActivity: ответ = \n" "%s" #: src/services/a-rex/create_activity.cpp:224 msgid "EMIES:CreateActivity finished successfully" msgstr "" #: src/services/a-rex/create_activity.cpp:225 #, fuzzy, c-format msgid "New job accepted with id %s" msgstr "Job migrierte mit Job ID: %s" #: src/services/a-rex/create_activity.cpp:229 #, fuzzy, c-format msgid "" "EMIES:CreateActivity: response = \n" "%s" msgstr "" "CreateActivity: ответ = \n" "%s" #: src/services/a-rex/delegation/DelegationStore.cpp:55 msgid "Wiping and re-creating whole storage" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:210 #: src/services/a-rex/delegation/DelegationStore.cpp:311 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:271 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:291 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" #: src/services/a-rex/get.cpp:112 #, c-format msgid "Get: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:123 #, c-format msgid "Get: can't process file %s" msgstr "" #: src/services/a-rex/get.cpp:167 #, c-format msgid "Head: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:178 #, fuzzy, c-format msgid "Head: can't process file %s" msgstr "Lese Archiv Datei %s" #: src/services/a-rex/get.cpp:190 #, c-format msgid "http_get: start=%llu, end=%llu, burl=%s, hpath=%s" msgstr "" #: src/services/a-rex/get.cpp:357 #, fuzzy msgid "Failed to extract credential information" msgstr "Fehler beim Verbindungen für Erneuerung von credentials" #: src/services/a-rex/get.cpp:360 #, fuzzy, c-format msgid "Checking cache permissions: DN: %s" msgstr "Check: looking für Metadata: %s" #: src/services/a-rex/get.cpp:361 #, fuzzy, c-format msgid "Checking cache permissions: VO: %s" msgstr "Check: looking für Metadata: %s" #: src/services/a-rex/get.cpp:363 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "" #: src/services/a-rex/get.cpp:373 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "" #: src/services/a-rex/get.cpp:376 #, c-format msgid "DN %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:379 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "" #: src/services/a-rex/get.cpp:382 #, c-format msgid "VO %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:388 src/services/a-rex/get.cpp:407 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "" #: src/services/a-rex/get.cpp:396 src/services/a-rex/get.cpp:415 #, c-format msgid "VOMS attr %s matches %s" msgstr "" #: src/services/a-rex/get.cpp:397 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "" #: src/services/a-rex/get.cpp:400 src/services/a-rex/get.cpp:419 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:416 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "" #: src/services/a-rex/get.cpp:422 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "" #: src/services/a-rex/get.cpp:428 #, c-format msgid "No match found in cache access rules for %s" msgstr "" #: src/services/a-rex/get.cpp:438 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "" #: src/services/a-rex/get.cpp:441 #, fuzzy, c-format msgid "Get from cache: Invalid URL %s" msgstr "Ungültige URL: %s" #: src/services/a-rex/get.cpp:458 #, fuzzy msgid "Get from cache: Error in cache configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/get.cpp:467 msgid "Get from cache: File not in cache" msgstr "" #: src/services/a-rex/get.cpp:470 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "" #: src/services/a-rex/get.cpp:480 msgid "Get from cache: Cached file is locked" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:29 #, c-format msgid "" "GetActivityDocuments: request = \n" "%s" msgstr "" "GetActivityDocuments: запрос = \n" "%s" #: src/services/a-rex/get_activity_documents.cpp:40 #, fuzzy msgid "GetActivityDocuments: non-AREX job requested" msgstr "" "GetActivityDocuments: запрос = \n" "%s" #: src/services/a-rex/get_activity_documents.cpp:49 #: src/services/a-rex/get_activity_documents.cpp:60 #, c-format msgid "GetActivityDocuments: job %s - %s" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:72 #, c-format msgid "" "GetActivityDocuments: response = \n" "%s" msgstr "" "GetActivityDocuments: ответ = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:35 #, c-format msgid "" "GetActivityStatuses: request = \n" "%s" msgstr "" "GetActivityStatuses: запрос = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:50 #, fuzzy, c-format msgid "GetActivityStatuses: unknown verbosity level requested: %s" msgstr "" "GetActivityStatuses: запрос = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:62 #, c-format msgid "GetActivityStatuses: job %s - can't understand EPR" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:71 #, c-format msgid "GetActivityStatuses: job %s - %s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:105 #, c-format msgid "" "GetActivityStatuses: response = \n" "%s" msgstr "" "GetActivityStatuses: ответ = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:306 #: src/services/a-rex/get_activity_statuses.cpp:400 #, fuzzy, c-format msgid "EMIES:GetActivityStatus: job %s - %s" msgstr "" "GetActivityStatuses: ответ = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:526 #, c-format msgid "EMIES:GetActivityInfo: job %s - failed to retrieve GLUE2 information" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:578 #: src/services/a-rex/get_activity_statuses.cpp:585 #, c-format msgid "EMIES:NotifyService: job %s - %s" msgstr "" #: src/services/a-rex/get_factory_attributes_document.cpp:37 #, c-format msgid "" "GetFactoryAttributesDocument: request = \n" "%s" msgstr "" "GetFactoryAttributesDocument: запрос = \n" "%s" #: src/services/a-rex/get_factory_attributes_document.cpp:62 #, c-format msgid "" "GetFactoryAttributesDocument: response = \n" "%s" msgstr "" "GetFactoryAttributesDocument: ответ = \n" "%s" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, fuzzy, c-format msgid "Running command %s" msgstr "Kommando: %s" #: src/services/a-rex/grid-manager/GridManager.cpp:114 #, fuzzy msgid "Failed to start cache clean script" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/services/a-rex/grid-manager/GridManager.cpp:115 #, fuzzy msgid "Cache cleaning script failed" msgstr "Die Job Terminierungs-Anfrage schlug fehl" #: src/services/a-rex/grid-manager/GridManager.cpp:177 msgid "Starting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:178 #, c-format msgid "Used configuration file %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:187 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:190 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:199 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:211 msgid "Failed to start new thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:268 msgid "Failed to start new thread: cache won't be cleaned" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:273 msgid "Picking up left jobs" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:277 msgid "Starting data staging threads" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:281 msgid "Failed to start data staging threads, exiting Grid Manager thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:290 msgid "Starting jobs' monitoring" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:303 #, fuzzy, c-format msgid "Failed to open heartbeat file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/services/a-rex/grid-manager/GridManager.cpp:335 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:340 #, fuzzy msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/services/a-rex/grid-manager/GridManager.cpp:346 msgid "Waking up" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:349 msgid "Stopping jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:352 msgid "Exiting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:365 msgid "Shutting down job processing" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:370 msgid "Shutting down data staging threads" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:24 #, c-format msgid "" "Usage: %s -I -U -P -L [-c " "] [-p ] [-d ]" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:74 msgid "Job ID argument is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Path to user's proxy file should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "User name should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "Path to .local job status file is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:100 msgid "Generating ceID prefix from hostname automatically" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:103 #, fuzzy msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "Kann hostname von uname nciht ermitteln" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:112 #, c-format msgid "ceID prefix is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:120 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:129 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:139 #, c-format msgid "globalid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:142 #, c-format msgid "headnode is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "interface is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:149 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:152 #, c-format msgid "localid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 #, c-format msgid "queue name is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "owner subject is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:160 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:165 #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:119 msgid "Can not read information from the local job status file" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:181 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly :-) Please submit the bug to bugzilla." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:191 msgid "Parsing VOMS AC to get FQANs information" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:210 msgid "No FQAN found. Using NULL as userFQAN value" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:224 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:229 #, c-format msgid "Writing the info the the BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 #, fuzzy, c-format msgid "Cannot open BLAH log file '%s'" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:24 #, c-format msgid "" "Usage: %s [-N] -P -L [-c ] [-d " "]" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:90 msgid "User proxy file is required but is not specified" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:95 msgid "Local job status file is required" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:114 #, c-format msgid "Making the decision for the queue %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:127 #, fuzzy, c-format msgid "Can not parse the configuration file %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:144 #, fuzzy, c-format msgid "Can not find queue '%s' in the configuration file" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:168 msgid "No access policy to check, returning success" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:182 #, fuzzy, c-format msgid "CA certificates directory %s does not exist" msgstr "Cache-Datei %s existiert nicht" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:202 msgid "User proxy certificate is not valid" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:207 #, c-format msgid "Getting VOMS AC for: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:221 #, fuzzy, c-format msgid "Checking a match for '%s'" msgstr "Suche nache Existenz von %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:228 #, c-format msgid "FQAN '%s' IS a match to '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:232 #, c-format msgid "Queue '%s' usage is prohibited to FQAN '%s' by the site access policy" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:236 #, c-format msgid "FQAN '%s' IS NOT a match to '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:242 #, c-format msgid "" "Queue '%s' usage with provided FQANs is prohibited by the site access policy" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:35 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:39 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:43 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:57 #, c-format msgid "Wrong option in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:71 #, fuzzy, c-format msgid "Can't read configuration file at %s" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:80 #, fuzzy, c-format msgid "Can't interpret configuration file %s as XML" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:118 #, fuzzy, c-format msgid "Can't recognize type of configuration file at %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:121 msgid "Could not determine configuration type or configuration is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:169 msgid "No queue name given in queue block name" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:176 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:499 msgid "forcedefaultvoms parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:185 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:198 msgid "authorizedvo parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:276 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:604 #, c-format msgid "Wrong number in jobreport_period: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:280 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 #, c-format msgid "Wrong number in jobreport_period: %d, minimal value: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:299 msgid "Missing file name in jobreport_logfile" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:316 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:323 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:330 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:337 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:344 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:351 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:360 msgid "mail parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:370 msgid "Wrong number in defaultttl command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:376 #, fuzzy msgid "Wrong number in maxrerun command" msgstr "Falsche Anzahl an Parametern übertragen" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:382 msgid "defaultlrms is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:398 msgid "State name for plugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:402 msgid "Options for plugin are missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:405 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:726 #, c-format msgid "Failed to register plugin for state %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:413 #, fuzzy msgid "Wrong number for timeout in plugin command" msgstr "Falsche Anzahl an Parametern übertragen" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:429 #, fuzzy msgid "Wrong option in fixdirectories" msgstr "Fehler beim Öffnen von Verzeichs: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:441 #, fuzzy msgid "Wrong option in delegationdb" msgstr "Fehler beim Öffnen von Verzeichs: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:456 msgid "Session root directory is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:459 msgid "Junk in sessiondir command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:471 msgid "Missing directory in control command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:476 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:481 msgid "User for helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:484 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:875 msgid "Only user '.' for helper program is supported" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:487 msgid "Helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:632 msgid "Value for maxJobsTracked is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:636 msgid "Value for maxJobsRun is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:640 msgid "Value for maxJobsTotal is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:644 msgid "Value for maxJobsPerDN is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:648 msgid "Value for wakeupPeriod is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 msgid "Value for maxScripts is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:664 msgid "serviceMail is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:678 #, fuzzy msgid "Type in LRMS is missing" msgstr "Location fehlt" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:691 msgid "LRMS is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:704 msgid "State name for authPlugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:709 #, fuzzy msgid "Command for authPlugin is missing" msgstr "Location fehlt" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:723 #, c-format msgid "Registering plugin for state %s; options: %s; command: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:739 #, fuzzy msgid "Command for localCred is missing" msgstr "Location fehlt" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:745 msgid "Timeout for localCred is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:750 msgid "Timeout for localCred is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:778 msgid "Control element must be present" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:783 msgid "controlDir is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:790 msgid "sessionRootDir is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:800 msgid "Attribute drain for sessionRootDir is incorrect boolean" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:809 msgid "The fixDirectories element is incorrect value" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:816 #, fuzzy msgid "The delegationDB element is incorrect value" msgstr "Delegation ID: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:821 msgid "The maxReruns element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:825 msgid "The noRootPower element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:829 msgid "The defaultTTL element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:833 msgid "The defaultTTR element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:866 msgid "Command in helperUtility is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:871 msgid "Username in helperUtility is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:106 #, c-format msgid "\tSession root dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:107 #, c-format msgid "\tControl dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:108 #, c-format msgid "\tdefault LRMS : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:109 #, c-format msgid "\tdefault queue : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:110 #, c-format msgid "\tdefault ttl : %u" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:115 msgid "No valid caches found in configuration, caching is disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tCache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:122 #, c-format msgid "\tCache link dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 #, c-format msgid "\tRemote cache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:128 #, c-format msgid "\tRemote cache link: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:130 msgid "\tCache cleaning enabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 msgid "\tCache cleaning disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:308 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:412 #, c-format msgid "Starting helper process: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:422 #, fuzzy, c-format msgid "Helper process start failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:429 #, c-format msgid "Stopping helper process %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:14 #, c-format msgid "wrong boolean in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:20 #, fuzzy, c-format msgid "wrong number in %s" msgstr "Schließe Verbindung" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:49 msgid "Can't read configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:58 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:32 msgid "Can't interpret configuration file as XML" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:62 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:69 #, fuzzy msgid "Configuration error" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:75 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:103 msgid "Can't recognize type of configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxdelivery" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxemergency" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:107 msgid "Bad number in maxprocessor" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:113 msgid "Bad number in maxprepared" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:119 msgid "Bad number in maxtransfertries" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:128 msgid "Bad number in speedcontrol" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:139 #, c-format msgid "Bad number in definedshare %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:148 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:159 msgid "Bad number in remotesizelimit" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:189 msgid "Bad value for debug" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:203 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:315 msgid "Bad URL in acix_endpoint" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:258 #, c-format msgid "Bad URL in deliveryService: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:272 msgid "Bad value for logLevel" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:25 msgid "Can't open configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:47 msgid "Value for 'link' element in mapURL is incorrect" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:53 msgid "Missing 'from' element in mapURL" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:57 msgid "Missing 'to' element in mapURL" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:84 msgid "Not enough parameters in copyurl" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:93 msgid "Not enough parameters in linkurl" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:179 #, c-format msgid "Wrong directory in %s" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:100 #, c-format msgid "Failed setting file owner: %s" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:33 msgid "gm-delegations-converter changes format of delegation database." msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:106 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:107 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:43 #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 msgid "read information from specified control directory" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:44 #: src/services/a-rex/grid-manager/gm_jobs.cpp:112 msgid "dir" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:48 msgid "convert from specified input database format [bdb|sqlite]" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:49 #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:54 msgid "database format" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:53 msgid "convert into specified output database format [bdb|sqlite]" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:96 msgid "gm-jobs displays information on current jobs in the system." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:101 msgid "display more information on each job" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "print summary of jobs in each transfer share" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:121 msgid "do not print list of jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:126 msgid "do not print number of jobs in each state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:131 msgid "print state of the service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:136 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:137 #: src/services/a-rex/grid-manager/gm_jobs.cpp:147 #: src/services/a-rex/grid-manager/gm_jobs.cpp:157 #, fuzzy msgid "dn" msgstr "n" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 msgid "request to cancel job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:142 #: src/services/a-rex/grid-manager/gm_jobs.cpp:152 #: src/services/a-rex/grid-manager/gm_jobs.cpp:162 #: src/services/a-rex/grid-manager/gm_jobs.cpp:172 msgid "id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 #, fuzzy msgid "request to clean job(s) with specified ID(s)" msgstr "Keine Anfrage-Token spezifiziert!" #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 msgid "show only jobs with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 msgid "print list of available delegation IDs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:171 msgid "print delegation token of specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 msgid "print main delegation token of specified Job ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:177 #, fuzzy msgid "job id" msgstr "ungültige Job ID" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:182 #, fuzzy msgid "file name" msgstr "Dateiname" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control file. If no " "file is given it uses the control directory found in the configuration file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, fuzzy, c-format msgid "Failed to acquire source: %s" msgstr "Fehler beim Authentifizieren: %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, fuzzy, c-format msgid "Failed to resolve %s" msgstr "Fehler bei Lesen von Objekt %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, fuzzy, c-format msgid "Failed to check %s" msgstr "Fehler beim Authentifizieren: %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:88 #, fuzzy msgid "Wrong number of arguments given" msgstr "Falsche Anzahl an Parametern übertragen" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:105 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1009 #, fuzzy msgid "Failed to run plugin" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:109 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1015 #, fuzzy, c-format msgid "Plugin failed: %s" msgstr "PASV fehlgeschlagen: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:158 msgid "empty argument to remotegmdirs" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:165 msgid "bad arguments to remotegmdirs" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:177 msgid "Wrong number in maxjobdesc" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:185 #: src/services/gridftpd/fileplugin/fileplugin.cpp:186 #, fuzzy, c-format msgid "Unsupported configuration command: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:199 #, c-format msgid "Mapped user:group (%s:%s) not found" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:202 #, fuzzy msgid "Failed processing grid-manager configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:205 msgid "" "Cannot use multiple session directories and remotegmdirs at the same time" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:228 msgid "This user is denied to submit new jobs." msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:239 msgid "No control or remote control directories defined in configuration" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:290 #, fuzzy, c-format msgid "Job submission user: %s (%i:%i)" msgstr "Job Hochladen Zusammenfassung:" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:294 #, fuzzy msgid "Job plugin was not initialised" msgstr "Main python thread wurde nicht initialisiert" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:312 #, fuzzy msgid "No delegated credentials were passed" msgstr "" "Делегированные параметры доступа:\n" " %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:385 #, fuzzy, c-format msgid "Cancelling job %s" msgstr "Aufräumen von Job: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:436 #, fuzzy, c-format msgid "Cleaning job %s" msgstr "Aufräumen von Job: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:472 #, fuzzy msgid "Request to open file with storing in progress" msgstr "%s Anfrage an %s schlug fehl mit Antwort %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:505 #: src/services/gridftpd/fileplugin/fileplugin.cpp:343 #, fuzzy, c-format msgid "Retrieving file %s" msgstr "Lese Archiv Datei %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:556 #, c-format msgid "Accepting submission of new job or modification request: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:577 #: src/services/gridftpd/fileplugin/fileplugin.cpp:383 #: src/services/gridftpd/fileplugin/fileplugin.cpp:420 #, fuzzy, c-format msgid "Storing file %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:599 #, fuzzy, c-format msgid "Unknown open mode %i" msgstr "unbekannter return code %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:716 #, c-format msgid "action(%s) != request" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:767 #, fuzzy msgid "Failed writing job description" msgstr "Fehler bei Schreiben zu Ziel" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:954 #, fuzzy msgid "Failed writing local description" msgstr "Fehler bei Schreiben zu Ziel" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:964 #, fuzzy msgid "Failed writing ACL" msgstr "Konnte job nicht starten" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:980 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:987 #: src/services/a-rex/job.cpp:587 #, c-format msgid "Failed to run external plugin: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:991 #: src/services/a-rex/job.cpp:591 #, c-format msgid "Plugin response: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:993 #, fuzzy msgid "Failed to run external plugin" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1025 #, fuzzy, c-format msgid "Failed to create session directory %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1035 #, fuzzy msgid "Failed writing status" msgstr "Fehler beim Auflisten von Meta-Dateien" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1049 #, fuzzy, c-format msgid "Failed to lock delegated credentials: %s" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1300 #, fuzzy, c-format msgid "Renewing proxy for job %s" msgstr "Erneuern der credentials für Job %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1312 #, c-format msgid "New proxy expires at %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1315 #, fuzzy msgid "Failed to write 'local' information" msgstr "Konnte Job Status Information nicht beziehen." #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1321 #, fuzzy msgid "Failed to renew proxy" msgstr "Fehler beim Senden von body" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1324 msgid "New proxy expiry time is not later than old proxy, not renewing proxy" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1367 #, fuzzy, c-format msgid "Checking file %s" msgstr "Suche nache Existenz von %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1415 msgid "ID contains forbidden characters" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1462 #: src/services/a-rex/job.cpp:781 #, c-format msgid "Failed to create file in %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1487 msgid "Out of tries while allocating new job ID" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1568 #, fuzzy, c-format msgid "Failed to read job's local description for job %s from %s" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1650 #, fuzzy msgid "No non-draining control or session directories available" msgstr "Konnte ownen des session dir nicht ändern zu %i: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1666 #, fuzzy, c-format msgid "Using control directory %s" msgstr "Lege Verzeichnis %s an" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1667 #, fuzzy, c-format msgid "Using session directory %s" msgstr "Lege Verzeichnis %s an" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:26 #, fuzzy, c-format msgid "Failed to read job's ACL for job %s from %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:69 #, fuzzy, c-format msgid "Failed to parse user policy for job %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:74 #, fuzzy, c-format msgid "Failed to load policy evaluator for policy of job %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:126 #, c-format msgid "Unknown ACL policy %s for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:121 msgid "Exiting Generator thread" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:211 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:225 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:238 msgid "DTRGenerator is not running!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:214 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:314 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:324 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:333 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:344 #, fuzzy, c-format msgid "%s: Invalid DTR" msgstr "Ungültige URL: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:354 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:366 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:982 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:281 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:304 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:634 #, c-format msgid "%s: Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:377 #, fuzzy, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "Warnung: Schließen von tmp Lock Datei %s fehlgeschlagen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:383 #, c-format msgid "%s: Cancelling other DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:394 #, fuzzy, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "Warnung: Schließen von tmp Lock Datei %s fehlgeschlagen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:404 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:645 #, c-format msgid "%s: Failed to read list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:418 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:555 #, fuzzy, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:420 #, c-format msgid "%s: Going through files in list %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:424 #, fuzzy, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:428 #, fuzzy, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:444 #, fuzzy, c-format msgid "%s: Failed to write list of output files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:448 #, fuzzy, c-format msgid "%s: Failed to write list of output status files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:460 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:656 #, c-format msgid "%s: Failed to read list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:479 #, fuzzy, c-format msgid "%s: Failed to write list of input files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:491 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #: src/services/cache_service/CacheServiceGenerator.cpp:108 #, fuzzy, c-format msgid "No active job id %s" msgstr "Kann Job ID nicht finden: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:543 #, fuzzy, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:567 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:578 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:692 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:817 #, fuzzy, c-format msgid "%s: Failed to clean up session dir" msgstr "Fehler bei Reservieren von Platz" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:576 #, fuzzy, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:588 #, fuzzy, c-format msgid "%s: All %s %s successfully" msgstr "Verbindung erfolgreich geschlossen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:592 #, fuzzy, c-format msgid "%s: Some %s failed" msgstr "Anfrage %s schlug fehl" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:607 #, c-format msgid "%s: Received data staging request to %s files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 #, fuzzy, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:714 #, fuzzy, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "" "\n" "%s: ошибка чтения входного файла '%s': %s\n" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:716 #, fuzzy, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "" "\n" "%s: ошибка чтения входного файла '%s': %s\n" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:744 #, fuzzy, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:763 #, fuzzy, c-format msgid "%s: Adding new output file %s: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:785 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:798 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:828 #, fuzzy, c-format msgid "%s: Received job in a bad state: %s" msgstr "Resuming Job: %s in Zustand: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:901 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:988 #, c-format msgid "%s: Failed writing local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1005 #, c-format msgid "%s: Cancelling active DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1028 #, fuzzy, c-format msgid "%s: Can't read list of input files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1043 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1048 #, c-format msgid "%s: User has uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1055 #, fuzzy, c-format msgid "%s: Failed writing changed input file." msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1059 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1076 #, c-format msgid "%s: Uploadable files timed out" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1132 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1158 #, fuzzy, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1139 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, fuzzy, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1148 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1170 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1186 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1192 #, fuzzy, c-format msgid "%s: Failed to open file %s for reading" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1200 #, fuzzy, c-format msgid "%s: Error accessing file %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1212 #, fuzzy, c-format msgid "%s: Error reading file %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1227 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1233 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1245 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1252 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:131 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:225 #, c-format msgid "Bad name for stdout: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:233 #, c-format msgid "Bad name for stderr: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:291 #, c-format msgid "Bad name for runtime environment: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:336 msgid "Job description file could not be read." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:387 msgid "Bad name for executable: " msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:401 #, c-format msgid "Bad name for executable: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:107 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:169 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:171 #, fuzzy, c-format msgid "%s: %i" msgstr "%s (%s)" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:177 #, c-format msgid "%s: Destroying" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:182 #, c-format msgid "%s: Can't read state - no comments, just cleaning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:194 #, c-format msgid "%s: Cleaning control and session directories" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:200 #, c-format msgid "%s: This job may be still running - canceling" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:203 #, c-format msgid "%s: Cancellation failed (probably job finished) - cleaning anyway" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:210 #, c-format msgid "%s: Cancellation probably succeeded - cleaning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:273 #, fuzzy, c-format msgid "%s: Failed writing list of output files: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:315 #, c-format msgid "%s: Failed creating grami file" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:319 #, c-format msgid "%s: Failed setting executable permissions" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:334 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:336 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:347 #, c-format msgid "%s: Failed running submission process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, fuzzy, c-format msgid "%s: Failed running cancellation process" msgstr "Fehler bei Reservieren von Platz" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:355 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:374 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:380 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:388 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:394 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:409 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:417 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:420 #, c-format msgid "%s: Failed to cancel running job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:434 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:447 #, fuzzy, c-format msgid "%s: Failed writing local information: %s" msgstr "Konnte job information nicht beziehen für job: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:457 #, fuzzy, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "" "start_reading_ftp: Zeitüberschreitung bei Warten auf Zeitpunkt letzter " "Änderung" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:463 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:531 #, c-format msgid "%s: State: %s: still in data staging" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:560 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:581 #, fuzzy, c-format msgid "%s: Reprocessing job description failed" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:588 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:592 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:676 #, c-format msgid "%s: Reading status of new job failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:691 #, c-format msgid "%s: Processing job description failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:708 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:718 #, fuzzy, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:732 #, c-format msgid "%s: State: ACCEPTED" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:739 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:751 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:757 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:774 #, c-format msgid "%s: State: PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:778 #, fuzzy, c-format msgid "%s: Failed obtaining local job information." msgstr "Konnte Job Status Information nicht beziehen." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:829 #, c-format msgid "%s: State: SUBMIT" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:844 #, c-format msgid "%s: State: CANCELING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:860 #, c-format msgid "%s: State: INLRMS" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:868 #, c-format msgid "%s: Job finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:872 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:893 #, c-format msgid "%s: State: FINISHING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:922 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:961 #, c-format msgid "%s: Can't rerun on request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:963 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:973 #, c-format msgid "%s: Job is too old - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1012 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1041 #, c-format msgid "%s: Canceling job because of user request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1107 #, c-format msgid "%s: Job failure detected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1163 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1171 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1177 #, c-format msgid "%s: Plugin execution failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1202 #, c-format msgid "%s: Delete request due to internal problems" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1284 #, fuzzy, c-format msgid "Failed to move file %s to %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1292 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1375 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1464 #, c-format msgid "Failed reading control directory: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1334 #, fuzzy, c-format msgid "Failed reading control directory: %s: %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:153 #, fuzzy, c-format msgid "Invalid checksum in %s for %s" msgstr "Errechneted checksum: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:162 #, fuzzy, c-format msgid "Invalid file size in %s for %s " msgstr "Fehler bei Lesen von Datei %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:182 #, fuzzy, c-format msgid "Invalid file: %s is too big." msgstr "Ungültige url: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:196 #, fuzzy, c-format msgid "Error accessing file %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:205 #, fuzzy, c-format msgid "Error reading file %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:216 #, c-format msgid "File %s has wrong CRC." msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:240 #, fuzzy, c-format msgid "Failed downloading file %s - %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:246 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:132 #, fuzzy msgid "Retrying" msgstr "Zeichenkette" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:249 #, fuzzy, c-format msgid "Downloaded file %s" msgstr "Lese Archiv Datei %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:330 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:243 #, fuzzy, c-format msgid "Wrong number of threads: %s" msgstr "Falsche Anzahl an Parametern übertragen" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:336 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:249 #, fuzzy, c-format msgid "Wrong number of files: %s" msgstr "Fehler bei Ändern des Besitzers der destination Datei %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:342 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:358 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:365 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:372 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:379 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:255 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:271 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:278 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:285 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:292 #, fuzzy, c-format msgid "Bad number: %s" msgstr "Klassenname: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:346 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:352 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:259 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:265 #, fuzzy msgid "Specified user can't be handled" msgstr "PDP: %s kann nicht geladen werden" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:384 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:297 #, fuzzy, c-format msgid "Unsupported option: %c" msgstr "Nicht unterstützte URL für Quelle: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:388 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:301 #, fuzzy, c-format msgid "Missing parameter for option %c" msgstr "Nutze space token Beschreibugn %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:392 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:305 #, fuzzy msgid "Undefined processing error" msgstr "Fataler Fehler: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:399 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:312 #, fuzzy msgid "Missing job id" msgstr "Aufräumen von Job: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:401 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:314 #, fuzzy msgid "Missing control directory" msgstr "Lege Verzeichnis %s an" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:403 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:316 #, fuzzy msgid "Missing session directory" msgstr "Lege Verzeichnis %s an" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:446 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:359 #, c-format msgid "Minimal speed: %llu B/s during %i s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:448 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:361 #, c-format msgid "Minimal average speed: %llu B/s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:450 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:363 #, c-format msgid "Maximal inactivity time: %i s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:455 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:368 msgid "Won't use more than 10 threads" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:462 #, fuzzy msgid "Downloader started" msgstr "Start start" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:488 #, fuzzy msgid "Can't read list of input files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:495 #, fuzzy, c-format msgid "Error: duplicate file in list of input files: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:518 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:402 #, fuzzy msgid "Can't read list of output files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:523 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:449 #, fuzzy msgid "Can't remove junk files" msgstr "Kann Datei nicht löschen: %s - %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:531 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:463 #, fuzzy msgid "Can't read job local description" msgstr "Ungültige JobDescription:" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:553 #, c-format msgid "Local source for download: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:567 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:571 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:512 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:518 #, fuzzy, c-format msgid "Can't accept URL: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:586 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:535 #, fuzzy, c-format msgid "Failed to initiate file transfer: %s - %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:603 #, fuzzy, c-format msgid "Downloaded %s" msgstr "Herunterladen des Job: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:628 #, fuzzy, c-format msgid "Failed to download (but may be retried) %s" msgstr "Fehler bei Verbinden zu ldap server (%s)" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:633 #, fuzzy, c-format msgid "Failed to download %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:642 #, fuzzy msgid "Some downloads failed" msgstr "Einige Transfers schlugen fehl" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:646 #, fuzzy msgid "Some downloads failed, but may be retried" msgstr "Fehler bei Verbinden zu ldap server (%s)" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:650 #, fuzzy msgid "Failed writing changed input file" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:662 #, fuzzy, c-format msgid "Checking user uploadable file: %s" msgstr "Suche nache Existenz von %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:668 #, c-format msgid "User has uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:673 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:706 #, fuzzy msgid "Failed writing changed input file." msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:677 #, c-format msgid "Critical error for uploadable file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:694 #, c-format msgid "No changes in uploadable files for %u seconds" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:695 #, fuzzy msgid "Uploadable files timed out" msgstr "Anfrage fehlgeschlagen: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:761 #, fuzzy, c-format msgid "Leaving downloader (%i)" msgstr "Lade python broker (%i)" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:126 #, fuzzy, c-format msgid "Failed uploading file %s - %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:135 #, fuzzy, c-format msgid "Uploaded file %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:373 #, fuzzy msgid "Uploader started" msgstr "Start start" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:409 #, fuzzy, c-format msgid "Reading output files from user generated list in %s" msgstr "" "\n" "%s: ошибка чтения входного файла '%s': %s\n" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:411 #, fuzzy, c-format msgid "Error reading user generated output file list in %s" msgstr "" "\n" "%s: ошибка чтения входного файла '%s': %s\n" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:424 #, fuzzy, c-format msgid "Two identical output destinations: %s" msgstr "Fehler bei Schreiben zu Ziel" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:433 #, c-format msgid "Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:496 #, fuzzy, c-format msgid "Local destination for uploader %s" msgstr "Probleme bei Auflösen von Zieladresse" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:556 #, fuzzy, c-format msgid "Uploaded %s" msgstr "Name %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:580 #, fuzzy msgid "Failed writing output status file" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:587 #, fuzzy, c-format msgid "Failed to upload (but may be retried) %s" msgstr "Fehler bei Verbinden zu ldap server (%s)" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:591 #, fuzzy, c-format msgid "Failed to upload %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:600 #, fuzzy msgid "Some uploads failed" msgstr "Anfrage %s schlug fehl" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:616 #, fuzzy, c-format msgid "Writing back dynamic output file %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:618 #, c-format msgid "Failed to rewrite output file list %s. Job resuming may not work" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:630 #, fuzzy msgid "Some uploads failed, but (some) may be retried" msgstr "Fehler bei Verbinden zu ldap server (%s)" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:637 #, fuzzy msgid "Failed writing changed output file" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:649 #, c-format msgid "Leaving uploader (%i)" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:118 #, fuzzy msgid ": Logger name is not specified" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 #, fuzzy msgid ": Failure creating slot for reporter child process" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 #, fuzzy msgid ": Failure starting reporter child process" msgstr "Fehler bei Reservieren von Platz" #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:130 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:55 #, c-format msgid "%s: Failure creating slot for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:79 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:61 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:123 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:72 #, c-format msgid "%s: Failure starting child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:140 #, c-format msgid "%s: Failed to run plugin" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:143 #, c-format msgid "%s: Plugin failed" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:77 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "" #: src/services/a-rex/information_collector.cpp:45 #, fuzzy, c-format msgid "Resource information provider: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/information_collector.cpp:51 #, fuzzy msgid "Resource information provider failed" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/information_collector.cpp:55 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:57 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:61 msgid "No new informational document assigned" msgstr "" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "Obtained XML: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:77 msgid "Informational document is empty" msgstr "" #: src/services/a-rex/information_collector.cpp:88 msgid "Passing service's information from collector to registrator" msgstr "" #: src/services/a-rex/information_collector.cpp:144 #, fuzzy, c-format msgid "" "Registered static information: \n" " doc: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/information_collector.cpp:147 #, c-format msgid "" "Information Registered without static attributes: \n" " doc: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:324 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:327 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:333 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:342 msgid "OptimizedInformationContainer failed to parse XML" msgstr "" #: src/services/a-rex/information_collector.cpp:353 #, fuzzy msgid "OptimizedInformationContainer failed to rename temprary file" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/services/a-rex/job.cpp:53 #, fuzzy, c-format msgid "Cannot handle local user %s" msgstr "" "Kann Owner von %s nicht ändernНевозможно изменить владельца папки %1.\n" "Ошибка: %2" #: src/services/a-rex/job.cpp:101 #, c-format msgid "%s: Failed to parse user policy" msgstr "" #: src/services/a-rex/job.cpp:106 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "" #: src/services/a-rex/job.cpp:211 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "" #: src/services/a-rex/job.cpp:473 src/services/a-rex/job.cpp:497 #, fuzzy, c-format msgid "Credential expires at %s" msgstr "Delegation service: %s" #: src/services/a-rex/job.cpp:475 src/services/a-rex/job.cpp:499 #, fuzzy, c-format msgid "Credential handling exception: %s" msgstr "Delegation service: %s" #: src/services/a-rex/job.cpp:789 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "" #: src/services/a-rex/job.cpp:1006 msgid "No non-draining session dirs available" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:89 #: src/services/a-rex/jura/CARDestination.cpp:49 #: src/services/a-rex/jura/LutsDestination.cpp:71 msgid "ServiceURL missing" msgstr "ServiceURL fehlt" #: src/services/a-rex/jura/ApelDestination.cpp:97 #: src/services/a-rex/jura/CARDestination.cpp:56 #: src/services/a-rex/jura/LutsDestination.cpp:89 #, c-format msgid "Protocol is %s, should be https" msgstr "Protokol ist %s, sollte https sein" #: src/services/a-rex/jura/ApelDestination.cpp:133 #: src/services/a-rex/jura/ApelDestination.cpp:158 #: src/services/a-rex/jura/CARDestination.cpp:95 #: src/services/a-rex/jura/LutsDestination.cpp:120 #: src/services/a-rex/jura/LutsDestination.cpp:144 #, c-format msgid "Ignoring incomplete log file \"%s\"" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:182 #: src/services/a-rex/jura/CARDestination.cpp:119 #: src/services/a-rex/jura/LutsDestination.cpp:166 #, c-format msgid "Logging UR set of %d URs." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:185 #: src/services/a-rex/jura/CARDestination.cpp:122 #: src/services/a-rex/jura/Destination.cpp:61 #: src/services/a-rex/jura/LutsDestination.cpp:169 #, c-format msgid "UR set dump: %s" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:271 #: src/services/a-rex/jura/Destination.cpp:96 #, c-format msgid "Backup file (%s) created." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:316 #, c-format msgid "APEL message file (%s) created." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:370 #: src/services/a-rex/jura/CARAggregation.cpp:208 #, c-format msgid "system retval: %d" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:73 #, c-format msgid "Aggregation record (%s) not exist, initialize it..." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:77 #, c-format msgid "Aggregation record (%s) initialization successful." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:82 #, c-format msgid "Some error happens during the Aggregation record (%s) initialization." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:88 #, fuzzy, c-format msgid "Aggregation record (%s) read from file successful." msgstr "Verzeichnis %s erfolgreich entfernt" #: src/services/a-rex/jura/CARAggregation.cpp:100 #, fuzzy, c-format msgid "Aggregation record (%s) stored successful." msgstr "Verzeichnis %s erfolgreich entfernt" #: src/services/a-rex/jura/CARAggregation.cpp:103 #, c-format msgid "Some error happens during the Aggregation record (%s) storing." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:156 #, c-format msgid "APEL aggregation message file (%s) created." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:228 #, fuzzy, c-format msgid "year: %s" msgstr "header: %s" #: src/services/a-rex/jura/CARAggregation.cpp:229 #, fuzzy, c-format msgid "moth: %s" msgstr "Proxy Pfad: %s" #: src/services/a-rex/jura/CARAggregation.cpp:230 #, fuzzy, c-format msgid "queue: %s" msgstr "Anfrage: %s" #: src/services/a-rex/jura/CARAggregation.cpp:238 #: src/services/a-rex/jura/CARAggregation.cpp:404 #, fuzzy, c-format msgid "query: %s" msgstr "Anfrage: %s" #: src/services/a-rex/jura/CARAggregation.cpp:241 #, fuzzy, c-format msgid "list size: %d" msgstr "Zeige Antwort: %s" #: src/services/a-rex/jura/CARAggregation.cpp:359 #, fuzzy, c-format msgid "XML: %s" msgstr "XML Antwort: %s" #: src/services/a-rex/jura/CARAggregation.cpp:361 msgid "UPDATE Aggregation Record called." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:415 #: src/services/a-rex/jura/CARAggregation.cpp:465 msgid "Does not sending empty aggregation/synch message." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:548 #, fuzzy, c-format msgid "synch message: %s" msgstr "Service Nachricht" #: src/services/a-rex/jura/Destination.cpp:123 #, c-format msgid "Sent jobIDs: (nr. of job(s) %d)" msgstr "" #: src/services/a-rex/jura/Destinations.cpp:27 msgid "Unable to create adapter for the specific reporting destination type" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:92 #, c-format msgid "Insert filter element: <%s,%s>" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:105 #, c-format msgid "Not set filter for this URL (%s)." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:111 #, c-format msgid "Current job's VO name: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:114 #, fuzzy, c-format msgid "VO filter for host: %s" msgstr " Filter: %s" #: src/services/a-rex/jura/JobLogFile.cpp:204 #: src/services/a-rex/jura/JobLogFile.cpp:698 #, c-format msgid "Read archive file %s" msgstr "Lese Archiv Datei %s" #: src/services/a-rex/jura/JobLogFile.cpp:209 #: src/services/a-rex/jura/JobLogFile.cpp:703 #, c-format msgid "" "Could not read archive file %s for job log file %s (%s), generating new " "Usage Record" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:298 #: src/services/a-rex/jura/JobLogFile.cpp:827 #, c-format msgid "" "Missing required Usage Record element \"RecordIdentity\", in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:345 #, c-format msgid "VO (%s) not set for this (%s) SGAS server by VO filter." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:378 #, c-format msgid "[VO filter] Job log will be not send. %s." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:454 #: src/services/a-rex/jura/JobLogFile.cpp:970 #, c-format msgid "Missing required element \"Status\" in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:663 #: src/services/a-rex/jura/JobLogFile.cpp:1280 #, c-format msgid "Failed to create archive directory %s: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:670 #: src/services/a-rex/jura/JobLogFile.cpp:1287 #, c-format msgid "Archiving Usage Record to file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:676 #: src/services/a-rex/jura/JobLogFile.cpp:1293 #, c-format msgid "Failed to write file %s: %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/services/a-rex/jura/JobLogFile.cpp:1062 #, c-format msgid "Missing required element \"CpuDuration\" in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1082 #, c-format msgid "Set non standard bechmark type: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1095 #, c-format msgid "Ignored incoming benchmark value: %s, Use float value!" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1324 #, c-format msgid "Failed to delete file %s:%s" msgstr "" #: src/services/a-rex/jura/LutsDestination.cpp:223 #, c-format msgid "UsageRecords registration response: %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:53 #, c-format msgid "Initialised, archived job log dir: %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:73 #, fuzzy, c-format msgid "Incoming time range: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/services/a-rex/jura/ReReporter.cpp:92 #, c-format msgid "Requested time range: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d " msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:98 #: src/services/a-rex/jura/UsageReporter.cpp:45 msgid "Interactive mode." msgstr "Interaktiver Modus." #: src/services/a-rex/jura/ReReporter.cpp:127 #: src/services/a-rex/jura/UsageReporter.cpp:68 #, fuzzy, c-format msgid "Could not open log directory \"%s\": %s" msgstr "" "Невозможно открыть каталог со справкой:\n" "%s" #: src/services/a-rex/jura/ReReporter.cpp:167 #: src/services/a-rex/jura/UsageReporter.cpp:193 #, c-format msgid "Error reading log directory \"%s\": %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:179 #: src/services/a-rex/jura/UsageReporter.cpp:205 #, c-format msgid "Finished, job log dir: %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:39 #, c-format msgid "Initialised, job log dir: %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:41 #, c-format msgid "Expiration time: %d seconds" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:80 #, fuzzy, c-format msgid "Could not open output directory \"%s\": %s" msgstr "" "Невозможно открыть каталог со справкой:\n" "%s" #: src/services/a-rex/jura/UsageReporter.cpp:125 #, c-format msgid "Removing outdated job log file %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:37 #, c-format msgid "" "MigrateActivity: request = \n" "%s" msgstr "" "MigrateActivity: запрос = \n" "%s" #: src/services/a-rex/migrate_activity.cpp:42 msgid "MigrateActivitys: no ActivityIdentifier found" msgstr "" #: src/services/a-rex/migrate_activity.cpp:51 msgid "MigrateActivity: EPR contains no JobID" msgstr "" #: src/services/a-rex/migrate_activity.cpp:69 msgid "MigrateActivity: Failed to accept delegation" msgstr "" #: src/services/a-rex/migrate_activity.cpp:130 msgid "MigrateActivity: no job description found" msgstr "" #: src/services/a-rex/migrate_activity.cpp:153 #, c-format msgid "Migration XML sent to AREXJob: %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:180 #, c-format msgid "MigrateActivity: Failed to migrate new job: %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:182 msgid "MigrateActivity: Failed to migrate new job" msgstr "" #: src/services/a-rex/migrate_activity.cpp:198 msgid "MigrateActivity finished successfully" msgstr "" #: src/services/a-rex/migrate_activity.cpp:202 #, c-format msgid "" "MigrateActivity: response = \n" "%s" msgstr "" "MigrateActivity: отзыв = \n" "%s" #: src/services/a-rex/put.cpp:37 #, c-format msgid "Put: there is no job: %s - %s" msgstr "" #: src/services/a-rex/put.cpp:43 #, c-format msgid "Put: there is no payload for file %s in job: %s" msgstr "" #: src/services/a-rex/put.cpp:56 #, c-format msgid "Put: unrecognized payload for file %s in job: %s" msgstr "" #: src/services/a-rex/put.cpp:76 src/services/a-rex/put.cpp:130 #, c-format msgid "Put: failed to create file %s for job %s - %s" msgstr "" #: src/services/a-rex/put.cpp:85 #, c-format msgid "Put: failed to set position of file %s for job %s to %Lu - %s" msgstr "" #: src/services/a-rex/put.cpp:91 #, c-format msgid "Put: failed to allocate memory for file %s in job %s" msgstr "" #: src/services/a-rex/put.cpp:103 #, c-format msgid "Put: failed to write to file %s for job %s - %s" msgstr "" #: src/services/a-rex/terminate_activities.cpp:29 #, c-format msgid "" "TerminateActivities: request = \n" "%s" msgstr "" "TerminateActivities: запрос = \n" "%s" #: src/services/a-rex/terminate_activities.cpp:40 #, fuzzy msgid "TerminateActivities: non-AREX job requested" msgstr "" "TerminateActivities: запрос = \n" "%s" #: src/services/a-rex/terminate_activities.cpp:49 #, c-format msgid "TerminateActivities: job %s - %s" msgstr "" #: src/services/a-rex/terminate_activities.cpp:69 #, c-format msgid "" "TerminateActivities: response = \n" "%s" msgstr "" "TerminateActivities: ответ = \n" "%s" #: src/services/a-rex/test.cpp:34 src/tests/count/test_service.cpp:25 #: src/tests/echo/test.cpp:23 src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "" #: src/services/a-rex/test.cpp:37 src/tests/count/test_service.cpp:28 #: src/tests/echo/test.cpp:26 src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "" #: src/services/a-rex/test.cpp:43 src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:23 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "" #: src/services/a-rex/test.cpp:49 src/tests/count/test_client.cpp:53 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "" #: src/services/a-rex/test.cpp:53 src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "" #: src/services/a-rex/test.cpp:56 src/tests/count/test_client.cpp:60 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "" #: src/services/a-rex/test.cpp:112 src/services/a-rex/test.cpp:191 #: src/services/a-rex/test.cpp:248 src/services/a-rex/test.cpp:296 #: src/services/a-rex/test.cpp:344 src/services/a-rex/test.cpp:392 #: src/tests/count/test_client.cpp:87 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "" #: src/services/a-rex/test.cpp:115 src/services/a-rex/test.cpp:194 #: src/services/a-rex/test.cpp:251 src/services/a-rex/test.cpp:299 #: src/services/a-rex/test.cpp:347 src/services/a-rex/test.cpp:395 #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "" #: src/services/a-rex/test.cpp:117 src/services/a-rex/test.cpp:196 #: src/services/a-rex/test.cpp:253 src/services/a-rex/test.cpp:301 #: src/services/a-rex/test.cpp:349 src/services/a-rex/test.cpp:397 #: src/tests/count/test_client.cpp:93 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "" #: src/services/a-rex/test.cpp:125 src/services/a-rex/test.cpp:204 #: src/services/a-rex/test.cpp:261 src/services/a-rex/test.cpp:309 #: src/services/a-rex/test.cpp:357 src/services/a-rex/test.cpp:405 #: src/tests/count/test_client.cpp:100 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "" #: src/services/a-rex/test.cpp:136 msgid "Response is not expected WS-RP" msgstr "" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" "UpdateCredentials: запрос = \n" "%s" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" "UpdateCredentials: отзыв = \n" "%s" #: src/services/cache_service/CacheService.cpp:52 #, fuzzy msgid "No A-REX config file found in cache service configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/cache_service/CacheService.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "" #: src/services/cache_service/CacheService.cpp:60 #, fuzzy, c-format msgid "Failed to process A-REX configuration in %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/cache_service/CacheService.cpp:65 #, fuzzy msgid "No caches defined in configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/cache_service/CacheService.cpp:139 msgid "Empty filename returned from FileCache" msgstr "" #: src/services/cache_service/CacheService.cpp:151 #, fuzzy, c-format msgid "Problem accessing cache file %s: %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/services/cache_service/CacheService.cpp:200 #: src/services/cache_service/CacheService.cpp:472 #, fuzzy msgid "No job ID supplied" msgstr "Keine Job ID in Antwort" #: src/services/cache_service/CacheService.cpp:209 #, c-format msgid "Bad number in priority element: %s" msgstr "" #: src/services/cache_service/CacheService.cpp:218 msgid "No username supplied" msgstr "" #: src/services/cache_service/CacheService.cpp:225 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" #: src/services/cache_service/CacheService.cpp:239 #, fuzzy msgid "No session directory found" msgstr "Kein Cache-Verzeichnis angegeben" #: src/services/cache_service/CacheService.cpp:243 #, fuzzy, c-format msgid "Using session dir %s" msgstr "Fehler beim start von session: %s" #: src/services/cache_service/CacheService.cpp:247 #, fuzzy, c-format msgid "Failed to stat session dir %s" msgstr "Konnte ownen des session dir nicht ändern zu %i: %s" #: src/services/cache_service/CacheService.cpp:252 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "" #: src/services/cache_service/CacheService.cpp:279 #, fuzzy, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/services/cache_service/CacheService.cpp:297 #, fuzzy, c-format msgid "DN is %s" msgstr "Größe ist %s" #: src/services/cache_service/CacheService.cpp:373 #, c-format msgid "Permission checking passed for url %s" msgstr "" #: src/services/cache_service/CacheService.cpp:398 #: src/services/cache_service/CacheServiceGenerator.cpp:138 #, fuzzy, c-format msgid "Failed to move %s to %s: %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/services/cache_service/CacheService.cpp:437 #, c-format msgid "Starting new DTR for %s" msgstr "" #: src/services/cache_service/CacheService.cpp:439 #, fuzzy, c-format msgid "Failed to start new DTR for %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/services/cache_service/CacheService.cpp:486 #, fuzzy, c-format msgid "Job %s: all files downloaded successfully" msgstr "Datei %s erfolgreich entfernt" #: src/services/cache_service/CacheService.cpp:495 #, c-format msgid "Job %s: Some downloads failed" msgstr "" #: src/services/cache_service/CacheService.cpp:501 #, c-format msgid "Job %s: files still downloading" msgstr "" #: src/services/cache_service/CacheService.cpp:514 #, fuzzy msgid "CacheService: Unauthorized" msgstr "echo: Unauthorisiert" #: src/services/cache_service/CacheService.cpp:523 msgid "No local user mapping found" msgstr "" #: src/services/cache_service/CacheService.cpp:530 #: src/services/data-staging/DataDeliveryService.cpp:631 #, fuzzy, c-format msgid "Identity is %s" msgstr "Identität: %s" #: src/services/cache_service/CacheService.cpp:595 msgid "Only POST is supported in CacheService" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:127 #, fuzzy, c-format msgid "Could not determine session directory from filename %s" msgstr "Konnte Version des Server nicht bestimmen" #: src/services/cache_service/CacheServiceGenerator.cpp:168 #, fuzzy, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "Quelle Ziel" #: src/services/cache_service/CacheServiceGenerator.cpp:210 #, c-format msgid "DTRs still running for job %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:219 #, c-format msgid "All DTRs finished for job %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:226 #, fuzzy, c-format msgid "Job %s not found" msgstr "Erhielt dbnotfound" #: src/services/data-staging/DataDeliveryService.cpp:58 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:62 #, c-format msgid "Archiving DTR %s, state %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:166 #, fuzzy msgid "No delegation token in request" msgstr "Erstellen und senden von Anfrage" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "Failed to accept delegation" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:203 #: src/services/data-staging/DataDeliveryService.cpp:210 #, fuzzy msgid "ErrorDescription" msgstr "Fehler bei Importieren" #: src/services/data-staging/DataDeliveryService.cpp:215 #, c-format msgid "All %u process slots used" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:230 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:237 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Storing temp proxy at %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:256 #, fuzzy, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:263 #, fuzzy, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "Konnte ownen des session dir nicht ändern zu %i: %s" #: src/services/data-staging/DataDeliveryService.cpp:289 #, fuzzy msgid "Invalid DTR" msgstr "Ungültige URL: %s" #: src/services/data-staging/DataDeliveryService.cpp:294 #, fuzzy, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:395 #, c-format msgid "No such DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:406 #, fuzzy, c-format msgid "DTR %s failed: %s" msgstr "DCAU fehlgeschlagen: %s" #: src/services/data-staging/DataDeliveryService.cpp:417 #, fuzzy, c-format msgid "DTR %s finished successfully" msgstr "Verbindung erfolgreich geschlossen" #: src/services/data-staging/DataDeliveryService.cpp:427 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:488 #, c-format msgid "No active DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:498 #, c-format msgid "DTR %s was already cancelled" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:507 #, fuzzy, c-format msgid "DTR %s could not be cancelled" msgstr "PDP: %s kann nicht geladen werden" #: src/services/data-staging/DataDeliveryService.cpp:551 #, fuzzy, c-format msgid "Failed to get load average: %s" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:575 msgid "Invalid configuration - no allowed IP address specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:579 msgid "Invalid configuration - no allowed dirs specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:590 #, fuzzy msgid "Failed to start archival thread" msgstr "Fehler bei Ablage von FTP Datei" #: src/services/data-staging/DataDeliveryService.cpp:615 msgid "Shutting down data delivery service" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:624 #, fuzzy msgid "Unauthorized" msgstr "echo: Unauthorisiert" #: src/services/data-staging/DataDeliveryService.cpp:710 msgid "Only POST is supported in DataDeliveryService" msgstr "" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:91 msgid "EchoService (python) 'Process' called" msgstr "" #: src/services/examples/echo_python/EchoService.py:95 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:96 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:97 #, python-format msgid "EchoService (python) got: %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:102 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:108 #: src/services/examples/echo_python/EchoService.py:177 #, python-format msgid "outpayload %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:137 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:140 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:146 #: src/services/examples/echo_python/EchoService.py:161 #, python-format msgid "new_payload %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:155 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "" #: src/services/examples/echo_python/EchoService.py:171 msgid "Start waiting 10 sec..." msgstr "" #: src/services/examples/echo_python/EchoService.py:173 #, fuzzy msgid "Waiting ends." msgstr "Warte vor Antwort" #: src/services/gridftpd/auth/auth.cpp:312 #, fuzzy, c-format msgid "Unknown authorization command %s" msgstr "unbekannter return code %s" #: src/services/gridftpd/auth/auth.cpp:330 #, c-format msgid "" "The [vo] section labeled '%s' has no file associated and can't be used for " "matching" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:56 #, fuzzy, c-format msgid "Connecting to %s:%i" msgstr "Verbindung zu %s schlug fehl: %s" #: src/services/gridftpd/auth/auth_ldap.cpp:57 #, fuzzy, c-format msgid "Querying at %s" msgstr "Anfrage: %s" #: src/services/gridftpd/auth/auth_ldap.cpp:62 #, fuzzy, c-format msgid "Failed to query LDAP server %s" msgstr "Fehler beim Verbinden zu RLS server: %s" #: src/services/gridftpd/auth/auth_ldap.cpp:69 #, fuzzy, c-format msgid "Failed to get results from LDAP server %s" msgstr "Fehler beim Verbinden zu RLS server: %s" #: src/services/gridftpd/auth/auth_ldap.cpp:83 #, fuzzy msgid "LDAP authorization is not supported" msgstr "Es wurde keine authorization response erwidert" #: src/services/gridftpd/auth/auth_plugin.cpp:73 #: src/services/gridftpd/auth/unixmap.cpp:254 #, c-format msgid "Plugin %s failed to run" msgstr "" #: src/services/gridftpd/auth/auth_plugin.cpp:75 #: src/services/gridftpd/auth/unixmap.cpp:256 #, c-format msgid "Plugin %s printed: %u" msgstr "" #: src/services/gridftpd/auth/auth_plugin.cpp:76 #: src/services/gridftpd/auth/unixmap.cpp:257 #, fuzzy, c-format msgid "Plugin %s error: %u" msgstr "Globus Fehler: %s" #: src/services/gridftpd/auth/auth_voms.cpp:28 #, c-format msgid "VOMS proxy processing returns: %i - %s" msgstr "" #: src/services/gridftpd/auth/auth_voms.cpp:120 #, c-format msgid "VOMS trust chains: %s" msgstr "" #: src/services/gridftpd/commands.cpp:46 #, fuzzy, c-format msgid "response: %s" msgstr "Antwort: %s" #: src/services/gridftpd/commands.cpp:50 #, fuzzy, c-format msgid "Send response failed: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/services/gridftpd/commands.cpp:80 msgid "Response sending error" msgstr "" #: src/services/gridftpd/commands.cpp:93 #, fuzzy msgid "Closed connection" msgstr "Schließe Verbindung" #: src/services/gridftpd/commands.cpp:131 #, fuzzy, c-format msgid "Socket conversion failed: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/services/gridftpd/commands.cpp:141 #, fuzzy, c-format msgid "Failed to obtain own address: %s" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/services/gridftpd/commands.cpp:149 #, c-format msgid "Failed to recognize own address type (IPv4 ir IPv6) - %u" msgstr "" #: src/services/gridftpd/commands.cpp:159 #, fuzzy, c-format msgid "Accepted connection on [%s]:%u" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/services/gridftpd/commands.cpp:161 #, c-format msgid "Accepted connection on %u.%u.%u.%u:%u" msgstr "" #: src/services/gridftpd/commands.cpp:196 #, fuzzy msgid "Accept failed" msgstr "PASV fehlgeschlagen" #: src/services/gridftpd/commands.cpp:204 #: src/services/gridftpd/listener.cpp:415 #, fuzzy, c-format msgid "Accept failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/services/gridftpd/commands.cpp:219 #, c-format msgid "Accepted connection from [%s]:%u" msgstr "" #: src/services/gridftpd/commands.cpp:221 #, c-format msgid "Accepted connection from %u.%u.%u.%u:%u" msgstr "" #: src/services/gridftpd/commands.cpp:230 #, fuzzy msgid "Authenticate in commands failed" msgstr "Authentifiziere: %s" #: src/services/gridftpd/commands.cpp:239 #, fuzzy msgid "Authentication failure" msgstr "Authentifiziere: %s" #: src/services/gridftpd/commands.cpp:247 #, fuzzy, c-format msgid "User subject: %s" msgstr "Subjekt: %s" #: src/services/gridftpd/commands.cpp:248 #, fuzzy, c-format msgid "Encrypted: %s" msgstr "Verschlüsselter Name ID: %s" #: src/services/gridftpd/commands.cpp:254 #, fuzzy msgid "User has no proper configuration associated" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/gridftpd/commands.cpp:262 msgid "" "User has empty virtual directory tree.\n" "Either user has no authorised plugins or there are no plugins configured at " "all." msgstr "" #: src/services/gridftpd/commands.cpp:279 msgid "Read commands in authenticate failed" msgstr "" #: src/services/gridftpd/commands.cpp:410 #, fuzzy msgid "Control connection (probably) closed" msgstr "GET: Verbindung wird geschlossen" #: src/services/gridftpd/commands.cpp:444 #: src/services/gridftpd/commands.cpp:723 #, fuzzy msgid "Command EPRT" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:445 #, fuzzy, c-format msgid "Failed to parse remote addres %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/services/gridftpd/commands.cpp:467 #, fuzzy, c-format msgid "Command USER %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:474 #, fuzzy msgid "Command CDUP" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:480 #, fuzzy, c-format msgid "Command CWD %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:496 #, fuzzy, c-format msgid "Command MKD %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:516 #, fuzzy, c-format msgid "Command SIZE %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:531 #, fuzzy, c-format msgid "Command SBUF: %i" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:552 #, fuzzy, c-format msgid "Command MLST %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:575 #, fuzzy, c-format msgid "Command DELE %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:590 #, fuzzy, c-format msgid "Command RMD %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:604 #, fuzzy, c-format msgid "Command TYPE %c" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:615 #, fuzzy, c-format msgid "Command MODE %c" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:627 #, fuzzy msgid "Command ABOR" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:640 #, fuzzy, c-format msgid "Command REST %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:653 #, fuzzy, c-format msgid "Command EPSV %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:655 #, fuzzy msgid "Command SPAS" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:657 #, fuzzy msgid "Command PASV" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:678 #, fuzzy msgid "local_pasv failed" msgstr "%s fehlgeschlagen" #: src/services/gridftpd/commands.cpp:702 #, fuzzy msgid "local_spas failed" msgstr "%s fehlgeschlagen" #: src/services/gridftpd/commands.cpp:725 #, fuzzy msgid "Command PORT" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:728 #, fuzzy msgid "active_data is disabled" msgstr "SOAP Aufruf fehlgeschlagen" #: src/services/gridftpd/commands.cpp:737 #, fuzzy msgid "local_port failed" msgstr "SendData: Lokaler port schlug fehl: %s" #: src/services/gridftpd/commands.cpp:750 #, fuzzy, c-format msgid "Command MLSD %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:752 #, fuzzy, c-format msgid "Command NLST %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:754 #, fuzzy, c-format msgid "Command LIST %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:805 #, fuzzy, c-format msgid "Command ERET %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:835 #, fuzzy, c-format msgid "Command RETR %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:864 #, fuzzy, c-format msgid "Command STOR %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:892 #, fuzzy, c-format msgid "Command ALLO %i" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:915 #, fuzzy msgid "Command OPTS" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:918 msgid "Command OPTS RETR" msgstr "" #: src/services/gridftpd/commands.cpp:928 #, fuzzy, c-format msgid "Option: %s" msgstr "Funktion : %s" #: src/services/gridftpd/commands.cpp:972 #, fuzzy msgid "Command NOOP" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:976 #, fuzzy msgid "Command QUIT" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:986 #, fuzzy msgid "Failed to close, deleting client" msgstr "Konnte delegation context nicht erhalten" #: src/services/gridftpd/commands.cpp:1000 #, fuzzy, c-format msgid "Command DCAU: %i '%s'" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1028 #, fuzzy, c-format msgid "Command PBZS: %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1036 #, fuzzy, c-format msgid "Setting pbsz to %lu" msgstr "Setze Datei %s zu Größe %llu" #: src/services/gridftpd/commands.cpp:1052 #, fuzzy, c-format msgid "Command PROT: %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1077 #, fuzzy, c-format msgid "Command MDTM %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1099 #, fuzzy, c-format msgid "Raw command: %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1147 #, fuzzy msgid "Failed to allocate memory for buffer" msgstr "Fehler beim Reservieren von Speicher" #: src/services/gridftpd/commands.cpp:1154 #, c-format msgid "Allocated %u buffers %llu bytes each." msgstr "" #: src/services/gridftpd/commands.cpp:1161 #, fuzzy msgid "abort_callback: start" msgstr "ftp_write_callback: Fehler" #: src/services/gridftpd/commands.cpp:1164 #, fuzzy, c-format msgid "abort_callback: Globus error: %s" msgstr "ftp_complete_callback: Fehler: %s" #: src/services/gridftpd/commands.cpp:1178 msgid "make_abort: start" msgstr "" #: src/services/gridftpd/commands.cpp:1190 msgid "Failed to abort data connection - ignoring and recovering" msgstr "" #: src/services/gridftpd/commands.cpp:1198 msgid "make_abort: wait for abort flag to be reset" msgstr "" #: src/services/gridftpd/commands.cpp:1208 msgid "make_abort: leaving" msgstr "" #: src/services/gridftpd/commands.cpp:1223 msgid "check_abort: have Globus error" msgstr "" #: src/services/gridftpd/commands.cpp:1224 msgid "Abort request caused by transfer error" msgstr "" #: src/services/gridftpd/commands.cpp:1227 msgid "check_abort: sending 426" msgstr "" #: src/services/gridftpd/commands.cpp:1248 msgid "Abort request caused by error in transfer function" msgstr "" #: src/services/gridftpd/commands.cpp:1330 msgid "Failed to start timer thread - timeout won't work" msgstr "" #: src/services/gridftpd/commands.cpp:1382 msgid "Killing connection due to timeout" msgstr "" #: src/services/gridftpd/conf/conf_vo.cpp:25 #: src/services/gridftpd/conf/conf_vo.cpp:51 #: src/services/gridftpd/conf/conf_vo.cpp:69 #: src/services/gridftpd/conf/conf_vo.cpp:81 msgid "" "Configuration section [vo] is missing name. Check for presence of name= or " "vo= option." msgstr "" #: src/services/gridftpd/conf/daemon.cpp:60 #: src/services/gridftpd/conf/daemon.cpp:183 #, c-format msgid "No such user: %s" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:72 #: src/services/gridftpd/conf/daemon.cpp:195 #, c-format msgid "No such group: %s" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:85 #: src/services/gridftpd/conf/daemon.cpp:208 #, c-format msgid "Improper debug level '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:127 msgid "Missing option for command daemon" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:132 msgid "Wrong option in daemon" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:142 #, c-format msgid "Improper size of log '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:151 #, c-format msgid "Improper number of logs '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:157 #, c-format msgid "Improper argument for logsize '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:164 msgid "Missing option for command logreopen" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:169 msgid "Wrong option in logreopen" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:253 #, fuzzy, c-format msgid "Failed to open log file %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/services/gridftpd/conf/environment.cpp:175 msgid "" "Central configuration file is missing at guessed location:\n" " /etc/arc.conf\n" "Use ARC_CONFIG variable for non-standard location" msgstr "" #: src/services/gridftpd/datalist.cpp:101 #, fuzzy msgid "Closing channel (list)" msgstr "Schließe Verbindung" #: src/services/gridftpd/datalist.cpp:157 msgid "Data channel connected (list)" msgstr "" #: src/services/gridftpd/dataread.cpp:24 msgid "data_connect_retrieve_callback" msgstr "" #: src/services/gridftpd/dataread.cpp:30 msgid "Data channel connected (retrieve)" msgstr "" #: src/services/gridftpd/dataread.cpp:37 msgid "data_connect_retrieve_callback: allocate_data_buffer" msgstr "" #: src/services/gridftpd/dataread.cpp:40 msgid "data_connect_retrieve_callback: allocate_data_buffer failed" msgstr "" #: src/services/gridftpd/dataread.cpp:48 #, c-format msgid "data_connect_retrieve_callback: check for buffer %u" msgstr "" #: src/services/gridftpd/dataread.cpp:61 #, c-format msgid "Closing channel (retrieve) due to local read error :%s" msgstr "" #: src/services/gridftpd/dataread.cpp:75 #: src/services/gridftpd/dataread.cpp:172 #, fuzzy msgid "Buffer registration failed" msgstr "Delegation nicht erfolgreich: " #: src/services/gridftpd/dataread.cpp:88 #, fuzzy msgid "data_retrieve_callback" msgstr "ftp_check_callback" #: src/services/gridftpd/dataread.cpp:96 #, c-format msgid "Data channel (retrieve) %i %i %i" msgstr "" #: src/services/gridftpd/dataread.cpp:104 #, fuzzy msgid "Closing channel (retrieve)" msgstr "Schließe Verbindung" #: src/services/gridftpd/dataread.cpp:110 #: src/services/gridftpd/datawrite.cpp:128 #, c-format msgid "Time spent waiting for network: %.3f ms" msgstr "" #: src/services/gridftpd/dataread.cpp:111 #: src/services/gridftpd/datawrite.cpp:129 #, c-format msgid "Time spent waiting for disc: %.3f ms" msgstr "" #: src/services/gridftpd/dataread.cpp:122 #, fuzzy msgid "data_retrieve_callback: lost buffer" msgstr "ftp_write_callback: Fehler" #: src/services/gridftpd/dataread.cpp:158 #, c-format msgid "Closing channel (retrieve) due to local read error: %s" msgstr "" #: src/services/gridftpd/datawrite.cpp:24 #, fuzzy msgid "data_connect_store_callback" msgstr "ftp_check_callback" #: src/services/gridftpd/datawrite.cpp:30 msgid "Data channel connected (store)" msgstr "" #: src/services/gridftpd/datawrite.cpp:57 #, fuzzy msgid "Failed to register any buffer" msgstr "Konnte job nicht registrieren" #: src/services/gridftpd/datawrite.cpp:76 #, c-format msgid "Data channel (store) %i %i %i" msgstr "" #: src/services/gridftpd/datawrite.cpp:89 #, fuzzy msgid "data_store_callback: lost buffer" msgstr "ftp_read_callback: Fehler" #: src/services/gridftpd/datawrite.cpp:105 #, c-format msgid "Closing channel (store) due to error: %s" msgstr "" #: src/services/gridftpd/datawrite.cpp:115 #, fuzzy msgid "Closing channel (store)" msgstr "Schließe Verbindung" #: src/services/gridftpd/fileplugin/fileplugin.cpp:55 #, fuzzy msgid "Can't parse access rights in configuration line" msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #: src/services/gridftpd/fileplugin/fileplugin.cpp:61 #, fuzzy msgid "Can't parse user:group in configuration line" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/gridftpd/fileplugin/fileplugin.cpp:68 #, fuzzy msgid "Can't recognize user in configuration line" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/gridftpd/fileplugin/fileplugin.cpp:77 msgid "Can't recognize group in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:84 #: src/services/gridftpd/fileplugin/fileplugin.cpp:89 #, fuzzy msgid "Can't parse or:and in configuration line" msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #: src/services/gridftpd/fileplugin/fileplugin.cpp:116 #, fuzzy msgid "Can't parse configuration line" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/gridftpd/fileplugin/fileplugin.cpp:120 #, fuzzy, c-format msgid "Bad directory name: %s" msgstr "Verzeichnis: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:137 #, fuzzy msgid "Can't parse create arguments in configuration line" msgstr "Konnte classname für Request nicht von Konfiguration parsen" #: src/services/gridftpd/fileplugin/fileplugin.cpp:146 #, fuzzy msgid "Can't parse mkdir arguments in configuration line" msgstr "Konnte classname für Request nicht von Konfiguration parsen" #: src/services/gridftpd/fileplugin/fileplugin.cpp:163 #, fuzzy, c-format msgid "Bad subcommand in configuration line: %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:175 #, fuzzy msgid "Bad mount directory specified" msgstr "Kein Cache-Verzeichnis angegeben" #: src/services/gridftpd/fileplugin/fileplugin.cpp:177 #, c-format msgid "Mount point %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:215 #: src/services/gridftpd/fileplugin/fileplugin.cpp:273 #, fuzzy, c-format msgid "mkdir failed: %s" msgstr "PASV fehlgeschlagen: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:226 #, fuzzy, c-format msgid "Warning: mount point %s creation failed." msgstr "Warnung: Schließen von tmp Lock Datei %s fehlgeschlagen" #: src/services/gridftpd/fileplugin/fileplugin.cpp:329 #, fuzzy, c-format msgid "plugin: open: %s" msgstr "lfn: %s - pfn: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:377 #: src/services/gridftpd/fileplugin/fileplugin.cpp:414 #, fuzzy msgid "Not enough space to store file" msgstr "Kein space token angegeben" #: src/services/gridftpd/fileplugin/fileplugin.cpp:428 #, fuzzy, c-format msgid "open: changing owner for %s, %i, %i" msgstr "" "Kann Owner von %s nicht ändernНевозможно изменить владельца папки %1.\n" "Ошибка: %2" #: src/services/gridftpd/fileplugin/fileplugin.cpp:435 #, c-format msgid "open: owner: %i %i" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:444 #: src/services/gridftpd/fileplugin/fileplugin.cpp:484 #, fuzzy, c-format msgid "Unknown open mode %s" msgstr "unbekannter return code %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:449 msgid "plugin: close" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:490 msgid "plugin: read" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:496 #, fuzzy msgid "Error while reading file" msgstr "Fehler beim Lesen des response header" #: src/services/gridftpd/fileplugin/fileplugin.cpp:506 msgid "plugin: write" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:517 msgid "Zero bytes written to file" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:725 #, c-format msgid "plugin: checkdir: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:728 #, c-format msgid "plugin: checkdir: access: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:737 #, c-format msgid "plugin: checkdir: access: allowed: %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:14 #, c-format msgid "No plugin is configured or authorised for requested path %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:19 msgid "FilePlugin: more unload than load" msgstr "" #: src/services/gridftpd/fileroot.cpp:34 #, c-format msgid "Can't load plugin %s for access point %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:39 src/services/gridftpd/fileroot.cpp:43 #, c-format msgid "Plugin %s for access point %s is broken." msgstr "" #: src/services/gridftpd/fileroot.cpp:47 #, c-format msgid "Plugin %s for access point %s acquire failed (should never happen)." msgstr "" #: src/services/gridftpd/fileroot.cpp:54 #, c-format msgid "Destructor with dlclose (%s)" msgstr "" #: src/services/gridftpd/fileroot.cpp:77 #, c-format msgid "FileNode: operator= (%s <- %s) %lu <- %lu" msgstr "" #: src/services/gridftpd/fileroot.cpp:79 msgid "Copying with dlclose" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:32 #: src/services/gridftpd/fileroot_config.cpp:596 #, fuzzy msgid "configuration file not found" msgstr "Vermuting - Datei nicht gefunden" #: src/services/gridftpd/fileroot_config.cpp:51 #, fuzzy msgid "Wrong port number in configuration" msgstr "ausführliche Ausgabe" #: src/services/gridftpd/fileroot_config.cpp:60 #, fuzzy msgid "Wrong maxconnections number in configuration" msgstr "Es ist keine connetion chain konfiguriert" #: src/services/gridftpd/fileroot_config.cpp:69 msgid "Wrong defaultbuffer number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:78 msgid "Wrong maxbuffer number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:110 #: src/services/gridftpd/fileroot_config.cpp:118 #, fuzzy, c-format msgid "Can't resolve host %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/services/gridftpd/fileroot_config.cpp:152 #: src/services/gridftpd/fileroot_config.cpp:455 #, fuzzy, c-format msgid "couldn't open file %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/services/gridftpd/fileroot_config.cpp:167 #: src/services/gridftpd/fileroot_config.cpp:183 #: src/services/gridftpd/fileroot_config.cpp:469 #, c-format msgid "improper attribute for encryption command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:177 #: src/services/gridftpd/fileroot_config.cpp:479 #: src/services/gridftpd/fileroot_config.cpp:622 msgid "unknown (non-gridmap) user is not allowed" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:207 #: src/services/gridftpd/fileroot_config.cpp:547 #, fuzzy, c-format msgid "Failed processing authorization group %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/gridftpd/fileroot_config.cpp:216 #, fuzzy msgid "couldn't process VO configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/gridftpd/fileroot_config.cpp:223 #: src/services/gridftpd/fileroot_config.cpp:231 #: src/services/gridftpd/fileroot_config.cpp:239 #: src/services/gridftpd/fileroot_config.cpp:500 #: src/services/gridftpd/fileroot_config.cpp:508 #: src/services/gridftpd/fileroot_config.cpp:516 #, fuzzy, c-format msgid "failed while processing configuration command: %s %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/gridftpd/fileroot_config.cpp:281 #, fuzzy, c-format msgid "can't parse configuration line: %s %s %s %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/gridftpd/fileroot_config.cpp:286 #, c-format msgid "bad directory in plugin command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:298 #: src/services/gridftpd/fileroot_config.cpp:405 #, fuzzy, c-format msgid "Already have directory: %s" msgstr "Lege Verzeichnis %s an" #: src/services/gridftpd/fileroot_config.cpp:307 #: src/services/gridftpd/fileroot_config.cpp:408 #, fuzzy, c-format msgid "Registering directory: %s with plugin: %s" msgstr "Fehler bei Anlegen von Verzeichnis %s: %s" #: src/services/gridftpd/fileroot_config.cpp:312 #: src/services/gridftpd/fileroot_config.cpp:421 #, fuzzy, c-format msgid "file node creation failed: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/services/gridftpd/fileroot_config.cpp:330 #, c-format msgid "improper attribute for allowactvedata command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:335 #, fuzzy, c-format msgid "unsupported configuration command: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/services/gridftpd/fileroot_config.cpp:359 #, fuzzy msgid "Could not determine hostname from gethostname()" msgstr "Kann hostname von uname nciht ermitteln" #: src/services/gridftpd/fileroot_config.cpp:375 msgid "unnamed group" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:384 msgid "undefined plugin" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:388 msgid "undefined virtual plugin path" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:393 #, c-format msgid "bad directory for plugin: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:485 #, c-format msgid "improper attribute for allowunknown command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:607 #, fuzzy msgid "failed to process client identification" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/gridftpd/fileroot_config.cpp:641 #, fuzzy, c-format msgid "Registering dummy directory: %s" msgstr "Lege Verzeichnis %s an" #: src/services/gridftpd/listener.cpp:57 #: src/services/gridftpd/listener.cpp:466 #, fuzzy msgid "Activation failed" msgstr "SOAP Aufruf fehlgeschlagen" #: src/services/gridftpd/listener.cpp:66 #: src/services/gridftpd/listener.cpp:172 msgid "Child exited" msgstr "" #: src/services/gridftpd/listener.cpp:78 #, fuzzy msgid "Globus connection error" msgstr "Schließe Verbindung" #: src/services/gridftpd/listener.cpp:80 #: src/services/gridftpd/listener.cpp:424 #, fuzzy msgid "New connection" msgstr "Wiederholte Nutzung von Verbindung" #: src/services/gridftpd/listener.cpp:87 msgid "Server stopped" msgstr "" #: src/services/gridftpd/listener.cpp:157 msgid "Error: failed to set handler for SIGTERM" msgstr "" #: src/services/gridftpd/listener.cpp:161 msgid "Starting controlled process" msgstr "" #: src/services/gridftpd/listener.cpp:164 #, fuzzy msgid "fork failed" msgstr "%s fehlgeschlagen" #: src/services/gridftpd/listener.cpp:169 msgid "wait failed - killing child" msgstr "" #: src/services/gridftpd/listener.cpp:174 msgid "Killed with signal: " msgstr "" #: src/services/gridftpd/listener.cpp:176 msgid "Restarting after segmentation violation." msgstr "" #: src/services/gridftpd/listener.cpp:177 #, fuzzy msgid "Waiting 1 minute" msgstr "Warte vor Antwort" #: src/services/gridftpd/listener.cpp:239 msgid "Error: failed to set handler for SIGCHLD" msgstr "" #: src/services/gridftpd/listener.cpp:256 msgid "Missing argument" msgstr "" #: src/services/gridftpd/listener.cpp:257 msgid "Unknown option" msgstr "" #: src/services/gridftpd/listener.cpp:264 msgid "Wrong port number" msgstr "" #: src/services/gridftpd/listener.cpp:274 #, fuzzy msgid "Wrong number of connections" msgstr "Schließe Verbindung" #: src/services/gridftpd/listener.cpp:281 msgid "Wrong buffer size" msgstr "" #: src/services/gridftpd/listener.cpp:288 msgid "Wrong maximal buffer size" msgstr "" #: src/services/gridftpd/listener.cpp:300 #, fuzzy msgid "Failed reading configuration" msgstr "Fehler bei Initialisierung der condition" #: src/services/gridftpd/listener.cpp:331 #, fuzzy, c-format msgid "Failed to obtain local address: %s" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/services/gridftpd/listener.cpp:338 #, fuzzy, c-format msgid "Failed to create socket(%s): %s" msgstr "Fehler bei Anlegen von soft link: %s" #: src/services/gridftpd/listener.cpp:352 #, fuzzy, c-format msgid "Failed to limit socket to IPv6: %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/services/gridftpd/listener.cpp:359 #, fuzzy, c-format msgid "Failed to bind socket(%s): %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/services/gridftpd/listener.cpp:364 #, fuzzy, c-format msgid "Failed to listen on socket(%s): %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/services/gridftpd/listener.cpp:371 msgid "Not listening to anything" msgstr "" #: src/services/gridftpd/listener.cpp:374 #, c-format msgid "Some addresses failed. Listening on %u of %u." msgstr "" #: src/services/gridftpd/listener.cpp:382 #: src/services/gridftpd/listener.cpp:477 #, fuzzy msgid "Listen started" msgstr "Start start" #: src/services/gridftpd/listener.cpp:395 msgid "No valid handles left for listening" msgstr "" #: src/services/gridftpd/listener.cpp:401 #, fuzzy, c-format msgid "Select failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/services/gridftpd/listener.cpp:422 #, c-format msgid "Have connections: %i, max: %i" msgstr "" #: src/services/gridftpd/listener.cpp:427 #, fuzzy, c-format msgid "Fork failed: %s" msgstr "PASV fehlgeschlagen: %s" #: src/services/gridftpd/listener.cpp:445 msgid "Refusing connection: Connection limit exceeded" msgstr "" #: src/services/gridftpd/listener.cpp:471 #, fuzzy msgid "Init failed" msgstr "Schreibfehler" #: src/services/gridftpd/listener.cpp:474 #, fuzzy msgid "Listen failed" msgstr "Schreibfehler" #: src/services/gridftpd/listener.cpp:488 msgid "Listen finished" msgstr "" #: src/services/gridftpd/listener.cpp:493 msgid "Stopping server" msgstr "" #: src/services/gridftpd/listener.cpp:497 #, fuzzy msgid "Destroying handle" msgstr "Fehler bei Initialisierung von handle" #: src/services/gridftpd/listener.cpp:500 msgid "Deactivating modules" msgstr "" #: src/services/gridftpd/listener.cpp:508 #, fuzzy msgid "Exiting" msgstr "Zeichenkette" #: src/services/gridftpd/misc/ldapquery.cpp:253 #, fuzzy, c-format msgid "%s: %s:%i" msgstr "%s (%s)" #: src/services/gridftpd/misc/ldapquery.cpp:390 #: src/services/gridftpd/misc/ldapquery.cpp:468 #, fuzzy, c-format msgid "%s %s" msgstr "%s (%s)" #: src/services/gridftpd/misc/ldapquery.cpp:394 #, fuzzy, c-format msgid " %s: %s" msgstr " %s" #: src/services/gridftpd/misc/ldapquery.cpp:396 #, fuzzy, c-format msgid " %s:" msgstr " %s" #: src/services/gridftpd/userspec.cpp:48 #, fuzzy, c-format msgid "Mapfile is missing at %s" msgstr "Location fehlt" #: src/services/gridftpd/userspec.cpp:89 #: src/services/gridftpd/userspec.cpp:215 #, fuzzy msgid "There is no local mapping for user" msgstr "Es ist keine connetion chain konfiguriert" #: src/services/gridftpd/userspec.cpp:92 #: src/services/gridftpd/userspec.cpp:219 msgid "There is no local name for user" msgstr "" #: src/services/gridftpd/userspec.cpp:142 #: src/services/gridftpd/userspec.cpp:233 msgid "No proxy provided" msgstr "" #: src/services/gridftpd/userspec.cpp:144 #, c-format msgid "Proxy/credentials stored at %s" msgstr "" #: src/services/gridftpd/userspec.cpp:147 #: src/services/gridftpd/userspec.cpp:238 #, fuzzy, c-format msgid "Initially mapped to local user: %s" msgstr "Grid Identität wird zugewiesen zu lokaler Identität '%s'" #: src/services/gridftpd/userspec.cpp:150 #: src/services/gridftpd/userspec.cpp:340 #, fuzzy, c-format msgid "Local user %s does not exist" msgstr "Lock-Datei %s existiert nicht" #: src/services/gridftpd/userspec.cpp:155 #: src/services/gridftpd/userspec.cpp:246 #, c-format msgid "Initially mapped to local group: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:158 #: src/services/gridftpd/userspec.cpp:249 #: src/services/gridftpd/userspec.cpp:349 #, fuzzy, c-format msgid "Local group %s does not exist" msgstr "Lock-Datei %s existiert nicht" #: src/services/gridftpd/userspec.cpp:167 #: src/services/gridftpd/userspec.cpp:258 msgid "Running user has no name" msgstr "" #: src/services/gridftpd/userspec.cpp:170 #: src/services/gridftpd/userspec.cpp:261 #, c-format msgid "Mapped to running user: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:180 #: src/services/gridftpd/userspec.cpp:271 #, fuzzy, c-format msgid "Mapped to local id: %i" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/services/gridftpd/userspec.cpp:185 #: src/services/gridftpd/userspec.cpp:276 #, c-format msgid "No group %i for mapped user" msgstr "" #: src/services/gridftpd/userspec.cpp:194 #: src/services/gridftpd/userspec.cpp:285 #, c-format msgid "Mapped to local group id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:195 #: src/services/gridftpd/userspec.cpp:286 #, c-format msgid "Mapped to local group name: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:196 #: src/services/gridftpd/userspec.cpp:287 #, c-format msgid "Mapped user's home: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:235 #, fuzzy, c-format msgid "Proxy stored at %s" msgstr "ProxyStore: %s" #: src/services/gridftpd/userspec.cpp:241 #, fuzzy msgid "Local user does not exist" msgstr "Lock-Datei %s existiert nicht" #: src/services/gridftpd/userspec.cpp:317 #, c-format msgid "Undefined control sequence: %%%s" msgstr "" #: src/services/gridftpd/userspec.cpp:354 #, fuzzy, c-format msgid "Remapped to local user: %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/services/gridftpd/userspec.cpp:355 #, fuzzy, c-format msgid "Remapped to local id: %i" msgstr "Grid Identität wird zugewiesen zu lokaler Identität '%s'" #: src/services/gridftpd/userspec.cpp:356 #, c-format msgid "Remapped to local group id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:357 #, c-format msgid "Remapped to local group name: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:358 #, c-format msgid "Remapped user's home: %s" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:33 #, c-format msgid "config: %s, class name: %s" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:42 msgid "libjvm.so not loadable - check your LD_LIBRARY_PATH" msgstr "libjvm.so nicht ladbar - überprüfe LD_LIBRARY_PATH" #: src/services/wrappers/java/javawrapper.cpp:52 msgid "libjvm.so does not contain the expected symbols" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:66 msgid "JVM started" msgstr "JVM gestartet" #: src/services/wrappers/java/javawrapper.cpp:71 #, c-format msgid "There is no service: %s in your Java class search path" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:79 msgid "There is no constructor function" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:86 #, c-format msgid "%s constructed" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:90 msgid "Destroy JVM" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:183 msgid "Cannot find MCC_Status object" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:197 msgid "Java object returned NULL status" msgstr "Java Objekt gab NULL status an" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Loading %u-th Python service" msgstr "Lade %u-th Python Service" #: src/services/wrappers/python/pythonwrapper.cpp:111 #, c-format msgid "Initialized %u-th Python service" msgstr "Initialisierte %u-th Python servce" #: src/services/wrappers/python/pythonwrapper.cpp:147 msgid "Invalid class name" msgstr "Ungültiger Klassenname" #: src/services/wrappers/python/pythonwrapper.cpp:152 #, c-format msgid "class name: %s" msgstr "Klassenname: %s" #: src/services/wrappers/python/pythonwrapper.cpp:153 #, c-format msgid "module name: %s" msgstr "Modulname: %s" #: src/services/wrappers/python/pythonwrapper.cpp:210 #, fuzzy msgid "Cannot find ARC Config class" msgstr "Kann UserConfig Klasse nicht finden" #: src/services/wrappers/python/pythonwrapper.cpp:217 #, fuzzy msgid "Config class is not an object" msgstr "UserConfig Klasse ist kein Objekt" #: src/services/wrappers/python/pythonwrapper.cpp:225 msgid "Cannot get dictionary of module" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:234 msgid "Cannot find service class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:243 msgid "Cannot create config argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:250 #, fuzzy msgid "Cannot convert config to Python object" msgstr "Kann UserConfig nicht zu python Objekt konvertieren" #: src/services/wrappers/python/pythonwrapper.cpp:273 #, c-format msgid "%s is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:279 #, fuzzy msgid "Message class is not an object" msgstr "Klasse %s ist kein Objekt" #: src/services/wrappers/python/pythonwrapper.cpp:287 msgid "Python Wrapper constructor succeeded" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:303 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:336 msgid "Python interpreter locked" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:340 msgid "Python interpreter released" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:400 msgid "Python wrapper process called" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:409 msgid "Failed to create input SOAP container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:419 msgid "Cannot create inmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:433 #, fuzzy msgid "Cannot find ARC Message class" msgstr "Kann arc ExecutionTarget Klasse nicht finden" #: src/services/wrappers/python/pythonwrapper.cpp:439 #, fuzzy msgid "Cannot convert inmsg to Python object" msgstr "Kann doc nicht zu Python Objekt konvertieren" #: src/services/wrappers/python/pythonwrapper.cpp:448 msgid "Failed to create SOAP containers" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:454 msgid "Cannot create outmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:460 #, fuzzy msgid "Cannot convert outmsg to Python object" msgstr "Kann doc nicht zu Python Objekt konvertieren" #: src/services/wrappers/python/pythonwrapper.cpp:516 msgid "Failed to create XMLNode container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:533 #, fuzzy msgid "Cannot find ARC XMLNode class" msgstr "Kann custom broker Klasse nicht finden" #: src/services/wrappers/python/pythonwrapper.cpp:539 msgid "Cannot create doc argument" msgstr "Kann doc Argument nicht anlegen" #: src/services/wrappers/python/pythonwrapper.cpp:545 #, fuzzy msgid "Cannot convert doc to Python object" msgstr "Kann doc nicht zu Python Objekt konvertieren" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:9 msgid "Creating a soap client" msgstr "Lege SOAP Clietn an" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 msgid "SOAP invokation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "Lege HTTP Client an" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 msgid "HTTP with SAML2SSO invokation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "Keine HTTP Antwort erhalten" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 msgid "SOAP with SAML2SSO invokation failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:76 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:51 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:56 #: src/tests/delegation/test_delegation_client.cpp:88 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "Delegation ID: %s" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service:%s" msgstr "" "Kann delegation credential nicht erhalten: %s von delegation service: %s" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:83 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "echo: Unauthorisiert" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "" #: src/tests/count/test_service.cpp:33 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "Service wartet auf Anfragen" #: src/tests/echo/test.cpp:32 #, fuzzy msgid "Creating client interface" msgstr "Erstelle Client Schnitstelle" #: src/tests/echo/test_clientinterface.py:27 msgid "SOAP invocation failed" msgstr "SOAP Aufruf fehlgeschlagen" #~ msgid "use GSI proxy (RFC 3820 compliant proxy is default)" #~ msgstr "" #~ "использовать доверенность GSI (по умолчанию используется\n" #~ " RFC 3820-совместимая доверенность)" #, fuzzy #~ msgid "Unable to create directory %s: No valid credentials found" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #, fuzzy #~ msgid "Unable to rename %s: No valid credentials found" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #~ msgid "Submit: Failed to disconnect after submission" #~ msgstr "Submit: Fehler bei Disconnect nach Submission" #~ msgid "year" #~ msgid_plural "years" #~ msgstr[0] "Jahre" #~ msgstr[1] "Jahr" #~ msgstr[2] "Jahre" #~ msgid "month" #~ msgid_plural "months" #~ msgstr[0] "Monate" #~ msgstr[1] "Monat" #~ msgstr[2] "Monate" #~ msgid "day" #~ msgid_plural "days" #~ msgstr[0] "Tage" #~ msgstr[1] "Tag" #~ msgstr[2] "Tage" #, fuzzy #~ msgid "arc_to_voms - %u attributes" #~ msgstr " Attribute" #, fuzzy #~ msgid "Failed to report renewed proxy to job" #~ msgstr "Fehler bei Lesen von Proxy-Datei: %s" #, fuzzy #~ msgid "Match group: %s" #~ msgstr "Fataler Fehler: %s" #, fuzzy #~ msgid "Match role: %s" #~ msgstr "Fataler Fehler: %s" #, fuzzy #~ msgid "Failed writing RSL" #~ msgstr "Konnte job nicht starten" #, fuzzy #~ msgid "RSL could not be evaluated: %s" #~ msgstr "Datei konnte nicht zu Running Zustand bewegt werden: %s" #, fuzzy #~ msgid "Can't evaluate RSL fragment: %s" #~ msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #, fuzzy #~ msgid "Failed to set GFAL2 user data object: %s" #~ msgstr "Fehler bei Lesen von Objekt %s" #~ msgid "" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Аргумент опции -c задаётся по форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "explicity select or reject a specific cluster" #~ msgstr "explizit einen Cluster auswählen oder ablehnen" #~ msgid "" #~ "Argument to -i has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Аргумент опции -i задаётся по форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Аргумент опции -c задаётся по форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "explicity select or reject an index server" #~ msgstr "Ausdrücklich einen Index Server bestimmen oder ablehnen" #~ msgid "" #~ "The arcmigrate command is used for migrating queud jobs to another " #~ "cluster.\n" #~ "Note that migration is only supported between ARC1 clusters." #~ msgstr "" #~ "Das arcmigrate Kommando dient zur Verteilung von bereits hochgeladenen " #~ "Jobs zwischen Clustern. Diese Migration wird nur zwischen ARC1 Clustern " #~ "unterstützt." #, fuzzy #~ msgid "select broker method (Random (default), FastestQueue, or custom)" #~ msgstr "" #~ "wählt Methode für eine Verteilung von Jobs zwischen Clustern (Random " #~ "(engl. für zufällig, die Voreinstellung, FastestQueue (die Queue mit den " #~ "schnellsten Rechnern), oder custom (für eigenes))" #~ msgid "[job ...]\n" #~ msgstr "[Job ...]\n" #, fuzzy #~ msgid "file where the jobs will be stored" #~ msgstr "Datei, in der Jobs abgelegt werden" #~ msgid "explicity select or reject a specific cluster for the new job" #~ msgstr "positive oder negative Selektion eines Clusters für einen Job" #~ msgid "No jobs to resubmit" #~ msgstr "Keine erneut hochzuladenen Jobs" #~ msgid "Submission to %s failed, trying next target" #~ msgstr "Hochladen zu %s schlug fehl, versuche nächtes Ziel" #~ msgid "Job resubmitted with new jobid: %s" #~ msgstr "Job erneut hochgeladen mit neuer Job ID: %s" #~ msgid "Job could not be killed or cleaned" #~ msgstr "Job konnte nicht abgebrochen oder gelöscht werden" #~ msgid "url of the policy decision service" #~ msgstr "URL des Policy Decision Service" #~ msgid "use SAML 2.0 profile of XACML v2.0 to contact the service" #~ msgstr "nutze SAML 2.0 Profil von XACML v2.0 um Server zu kontaktieren" #~ msgid "path to private key file" #~ msgstr "Pfad zu Datei mit privatem Schlüssel" #~ msgid "URL of SLCS service" #~ msgstr "URL des SLCS Service" #~ msgid "IdP name" #~ msgstr "IdP Name" #~ msgid "Password for user account to IdP" #~ msgstr "Passwort des user accountd für IdP" #~ msgid "Private key passphrase" #~ msgstr "Passphrase für privaten Schlüssel" #~ msgid "passphrase" #~ msgstr "Passphrase" #~ msgid "directory" #~ msgstr "Verzeichnis" #~ msgid "Source probably does not exist" #~ msgstr "Quelle existiert vermutlich nicht" #~ msgid "Current transfer FAILED: %s - %s" #~ msgstr "Aktueller Transfer SCHLUG FEHL: %s - %s" #~ msgid "Transfer FAILED: %s - %s" #~ msgstr "Transfer FEHELER: %s - %s" #~ msgid "isis" #~ msgstr "isis" #~ msgid "method" #~ msgstr "Methode" #~ msgid " ISIS tester start!" #~ msgstr "ISIS tester Start" #~ msgid "Disconnect: Failed quitting: %s" #~ msgstr "Disconnect: Verlassen der Veerbindung fehlgeschlagen: %s" #~ msgid "Submit: Failed to modify job description to be sent to target." #~ msgstr "" #~ "Submit: Konnte Job Beschreibungung für die Sendung zum Ziel nicht " #~ "modifizieren" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a ARC0 cluster is not supported." #~ msgstr "" #~ "Versuch zu migieren zu %s: Migration zu ARC0 cluster wird nicht " #~ "unterstützt" #~ msgid "TargetRetriverARC0 initialized with %s service url: %s" #~ msgstr "TargetRetrieverARC0 initialisiert mit %s service URL: %s" #, fuzzy #~ msgid "Failed locating delegation credentials in chain configuration" #~ msgstr "" #~ "Fehler bei Lokalisation der delegation credentials in chain Konfiguration" #, fuzzy #~ msgid "Found malformed job state string: %s" #~ msgstr "Fand ungültig formulierte Job Zustandsbeschreibung: %s\n" #~ msgid "TargetRetriverARC1 initialized with %s service url: %s" #~ msgstr "TargetRetriverARC1 initialisiert mit %s service url: %s" #~ msgid "TargetRetriverBES initialized with %s service url: %s" #~ msgstr "TargetRetriverBES initialisiert mit %s Service URL: %s" #~ msgid "" #~ "Matching against job description,following targets possible for " #~ "BenchmarkBroker: %d" #~ msgstr "" #~ "Abgleich mit Job Beschreibung, die folgenden Ziele sind möglich für " #~ "BenchmarkBroker: %d" #, fuzzy #~ msgid "%d. Cluster: %s; Queue: %s" #~ msgstr "%d. Cluster: %s; Queue: %s" #~ msgid "Cluster will be ranked according to the %s benchmark scenario" #~ msgstr "Cluster wird bewertet anhand des %s benchmark" #~ msgid "Best targets are: %d" #~ msgstr "Die besten Ziel sind: %d" #~ msgid "" #~ "Matching against job description, following targets possible for " #~ "DataBroker: %d" #~ msgstr "" #~ "Abgleich mit Job Beschreibung, die folgenden Ziele sind möglich für " #~ "DataBroker: %d" #~ msgid "FastestQueueBroker is filtering %d targets" #~ msgstr "FastestQueueBroker filter %d Ziele" #~ msgid "FastestQueueBroker will rank the following %d targets" #~ msgstr "FastestQueueBroker bewertet die folgenden %d Ziele:" #~ msgid "" #~ "Matching against job description, following targets possible for " #~ "RandomBroker: %d" #~ msgstr "" #~ "Abgleich gegen Job Beschreibung, folgende Ziele sind möglich für " #~ "RandomBroker: %d" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a CREAM cluster is not supported." #~ msgstr "" #~ "Versuch zu migrieren zu %s: MIgration zu einem CREAM cluster wird nicht " #~ "unterstützt" #~ msgid "TargetRetriverCREAM initialized with %s service url: %s" #~ msgstr "TargetRetrieverCREAM initialisiert mit %s service URL: %s" #~ msgid "Cannot convert arc module name to Python string" #~ msgstr "Kann arc Modulename nicht zu Python Zeichenkette konvertierten" #~ msgid "Cannot import arc module" #~ msgstr "Kann arc Modul nicht importieren" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a UNICORE cluster is not supported." #~ msgstr "" #~ "Versuch zu migrieren zu %s: Migration zu einem UNICORE cluster wird nicht " #~ "unterstützt" #~ msgid "TargetRetriverUNICORE initialized with %s service url: %s" #~ msgstr "TargetRetriverUNICORE initialisiert mit %s service URL: %s" #~ msgid "File is not accessible: %s - %s" #~ msgstr "Dati ist nicht zugreifbar: %s - %s" #~ msgid "delete_ftp: globus_ftp_client_delete timeout" #~ msgstr "delete_ftp: globus_ftp_client_delete Zeitüberschreitung" #, fuzzy #~ msgid "Response(%i): %s" #~ msgstr "Antwort: %s" #, fuzzy #~ msgid "Failed to close connection 3" #~ msgstr "Fehler bei Schließen von Verbindung 3" #~ msgid "Cthread_init() error: %s" #~ msgstr "Cthread_init() Fehler: %s" #, fuzzy #~ msgid "Using supplied guid %s" #~ msgstr "Nutze angegebene guid %s" #~ msgid "meta_get_data: size: %llu" #~ msgstr "meta_get_data: Größe: %llu" #~ msgid "meta_get_data: created: %s" #~ msgstr "meta_get_data: angelegt: %s" #~ msgid "LFN is missing in LFC (needed for replication)" #~ msgstr "LFN fehlt in LFC (benötigt für Replikation)" #~ msgid "LFN already exists in LFC" #~ msgstr "LFN existiert bereits in LFC" #~ msgid "Creating LFC directory %s" #~ msgstr "Anlegen von LFC Verzeichnis %s" #~ msgid "Error creating required LFC dirs: %s" #~ msgstr "Fehler bei Anlegen benötigter LFC Verzeichnisse: %s" #~ msgid "Error creating LFC entry: %s" #~ msgstr "Fehler bei Anlegen von LFC Eintrag: %s" #, fuzzy #~ msgid "Error finding info on LFC entry %s which should exist: %s" #~ msgstr "Fehler beim Anlegen von LFC Eintrag %s, guid %s: %s" #~ msgid "Error creating LFC entry %s, guid %s: %s" #~ msgstr "Fehler beim Anlegen von LFC Eintrag %s, guid %s: %s" #, fuzzy #~ msgid "Error entering metadata: %s" #~ msgstr "Fehler bei Eingabe von Metadaten: %s" #~ msgid "No GUID defined for LFN - probably not preregistered" #~ msgstr "Keine GUID definiert für LFN - vielleicht nicht preregistriert" #~ msgid "Error adding replica: %s" #~ msgstr "Fehler beim Hinzufügen von replica: %s" #, fuzzy #~ msgid "Entering checksum type %s, value %s, file size %llu" #~ msgstr "Eingabe von checksum Typ %s, Wert %s, Dateigröße %llu" #~ msgid "Failed to remove LFN in LFC - You may need to do it by hand" #~ msgstr "" #~ "Fehler beim Entfernen von LFN in LFC - Sie müssen dies wohl von Hand " #~ "erledigen" #, fuzzy #~ msgid "Error getting replicas: %s" #~ msgstr "Fehler bei Erhalt der replicas: %s" #~ msgid "Failed to remove location from LFC" #~ msgstr "Fehler beim Entfernen von location von LFC" #~ msgid "Failed to remove LFC directory: directory is not empty" #~ msgstr "" #~ "Fehler beim Entfernen von LFC Verzeichnis: Verzeichnis ist nicht leer" #~ msgid "Failed to remove LFN in LFC: %s" #~ msgstr "Fehler beim Entfernen von LFN in LFC: %s" #~ msgid "guid %s resolved to LFN %s" #~ msgstr "guid %s aufgelöst zu LFN %s" #~ msgid "Failed to find GUID for specified LFN in %s: %s" #~ msgstr "Konnte GUID für angegebenen LFN nicht finden in %s: %s" #~ msgid "There is no GUID for specified LFN in %s" #~ msgstr "Es gibt keine GUID für angegebenen LFN in %s" #~ msgid "Warning: can't get PFNs from server %s: %s" #~ msgstr "Warnung: Kann keine PFNs erhalten von server %s: %s" #, fuzzy #~ msgid "RLS URL must contain host" #~ msgstr "RLS URL muss Angabe von host enthalten" #, fuzzy #~ msgid "" #~ "Locations are missing in destination RLS url - will use those registered " #~ "with special name" #~ msgstr "" #~ "Lokalisation fehlen in Ziel RLS URL - werde die mit special name " #~ "registrierten nutzen" #~ msgid "LFN is missing in RLS (needed for replication)" #~ msgstr "LFN fehlt in RLS (benötigt für Replikation)" #~ msgid "LFN already exists in replica" #~ msgstr "LFN existiert bereits in replica" #~ msgid "Failed to check for existing LFN in %s: %s" #~ msgstr "Fehler bei Überprüfung für existierenden LFN in %s: %s" #, fuzzy #~ msgid "There is same LFN in %s" #~ msgstr "Es existiert dieselbe LFN in %s" #~ msgid "Failed to add LFN-GUID to RLS: %s" #~ msgstr "Fehler bei Hinzufüden von LFN-GUID zu RLS: %s" #~ msgid "Failed to create/add LFN-PFN mapping: %s" #~ msgstr "Fehler bei Anlegen/Hinzufügen von LFN-PFN Zuweisungen: %s" #~ msgid "Warning: failed to add attribute to RLS: %s" #~ msgstr "Warnung: Fehler bei Hinzufügen eines Attributs zu RLS: %s" #~ msgid "Warning: Failed to retrieve LFN/PFNs from %s: %s" #~ msgstr "Warnung: Fehler bei Bezug von LFN/PFNs von %s: %s" #~ msgid "SE location will be unregistered automatically" #~ msgstr "SE location wird automatisch deregistriert" #~ msgid "Warning: Failed to delete LFN/PFN from %s: %s" #~ msgstr "Warnung: Fehler beim Löschen von LFN/PFN von %s: %s" #~ msgid "LFN must be already deleted, try LRC anyway" #~ msgstr "LFN muss bereits gelöscht sein, versuche dennoch LRC" #, fuzzy #~ msgid "Failed to retrieve LFN/LRC: %s" #~ msgstr "Fehler bei Bezug von LFN/LRC: %s" #~ msgid "Warning: Failed to connect to LRC at %s: %s" #~ msgstr "Warnung. Fehler bei Verbindung zu LRC bei %s: %s" #~ msgid "No LFNs found in %s" #~ msgstr "Keine LFNs gefunden in %s" #~ msgid "Failed to retrieve list of LFNs/PFNs from %s" #~ msgstr "Fehler bei Bezug von List von LFNs/PFNs von %s" #~ msgid "lfn: %s(%s) - %s" #~ msgstr "lfn: %s(%s) - %s" #~ msgid "Warning: can't get list of RLIs from server %s: %s" #~ msgstr "Warnung. Erhalte keine Liste von RLIs von server %s: %s" #, fuzzy #~ msgid "Warning: can't get list of senders from server %s: %s" #~ msgstr "Warnung: Erhalte keine Liste von senders von Server %s: %s" #, fuzzy #~ msgid "" #~ "Warning: No space tokens found matching description! Will copy without " #~ "using token" #~ msgstr "" #~ "Warnung: Keine space tokens gefunden, die der Beschreibung entsprechen! " #~ "Kopiere ohne Nutzung der Token" #, fuzzy #~ msgid "start_reading_srm: looking for metadata: %s" #~ msgstr "StartReading: suche nach Metadaten: %s" #~ msgid "globus_io_register_read failed: %s" #~ msgstr "globus_io_register_read ist fehlgeschlagen: %s" #~ msgid "globus_io_register_write failed: %s" #~ msgstr "globus_io_register_write ist fehlgeschlagen: %s" #, fuzzy #~ msgid "clear_input: %s" #~ msgstr "clear_input: %s" #~ msgid "Connection closed" #~ msgstr "Verbindung geschlossen" #, fuzzy #~ msgid "Globus error (read): %s" #~ msgstr "Globus Fehler (Lesen): %s" #, fuzzy #~ msgid "*** Server response: %s" #~ msgstr "*** Server Antwort: %s" #, fuzzy #~ msgid "Failed wrapping GSI token: %s" #~ msgstr "Konnte GI token nicht wrappen: %s" #, fuzzy #~ msgid "Failed unwrapping GSI token: %s" #~ msgstr "Fehler bei unwrap des GSI token: %s" #, fuzzy #~ msgid "Unwrapped data does not fit into buffer" #~ msgstr "Unwrapped data passt nicht in Puffer" #, fuzzy #~ msgid "Urecognized SSL token received" #~ msgstr "Nicht erkannter SSL token erkannt" #, fuzzy #~ msgid "read_response_header: line: %s" #~ msgstr "read_response_header: Zeile: %s" #~ msgid "read_response_header: header finished" #~ msgstr "read_response_header: header beendet" #~ msgid "skip_response_entity" #~ msgstr "skip_response_entity" #~ msgid "skip_response_entity: size: %llu" #~ msgstr "skip_response_entity: Größe: %llu" #~ msgid "skip_response_entity: already have all" #~ msgstr "skip_response_entity: Sie haben bereits alle" #~ msgid "skip_response_entity: size left: %llu" #~ msgstr "skip_response_entity: Größe verbleibend: %llu" #~ msgid "skip_response_entity: to read: %llu" #~ msgstr "skip_response_entity: zu lesen: %llu" #~ msgid "skip_response_entity: timeout %llu" #~ msgstr "skip_response_entity: Zeitüberschreitung %llu" #~ msgid "skip_response_entity: read: %u (%llu)" #~ msgstr "skip_response_entity: gelesen: %u (%llu)" #~ msgid "skip_response_entity: read all" #~ msgstr "skip_response_entity: alles gelesen" #, fuzzy #~ msgid "skip_response_entity: no entity" #~ msgstr "skip_response_entity: no entity" #~ msgid "skip_response_entity: unknown size" #~ msgstr "skip_response_entity: unbekannte Größe" #, fuzzy #~ msgid "Timeout sending header" #~ msgstr "Zeitüberschreitung beim Senden des Header" #, fuzzy #~ msgid "Failure while receiving entity" #~ msgstr "Fehler beim Emfpangen von entity" #, fuzzy #~ msgid "Timeout while sending header" #~ msgstr "Zeitüberschreitung beim Senden von header" #~ msgid "GET: header is read - rest: %u" #~ msgstr "GET: header wird gelesen - verbleibend: %u" #~ msgid "GET: calling callback(rest): content: %s" #~ msgstr "GET: rufe callback(rest): Inhalt: %s" #~ msgid "GET: calling callback(rest): size: %u" #~ msgstr "GET: rufe callbeck(rest): Größe: %u" #~ msgid "GET: calling callback(rest): offset: %llu" #~ msgstr "GET: rufe callbeck(rest): offset: %llu" #, fuzzy #~ msgid "GET callback returned error" #~ msgstr "GET callback lieferte Fehlermeldung" #, fuzzy #~ msgid "Failed while reading response content" #~ msgstr "Fehler beim Lesen von Antwort Inhalt" #, fuzzy #~ msgid "Timeout while reading response content" #~ msgstr "Timeout beim Lesen von Antwort Inhalt" #, fuzzy #~ msgid "Error while reading response content" #~ msgstr "Fehler beim Lesen von Antwort Inhalt" #~ msgid "GET: calling callback: content: %s" #~ msgstr "GET: rufe callback: Inhalt: %s" #, fuzzy #~ msgid "GET: calling callback: size: %u" #~ msgstr "GET: rufe callback: Größe: %u" #~ msgid "GET: calling callback: offset: %llu" #~ msgstr "GET: rufe callback: offset: %llu" #, fuzzy #~ msgid "Timeout while sending SOAP request" #~ msgstr "Timeout beim Senden von SOAP request" #~ msgid "Error sending data to server" #~ msgstr "Fehler beim Senden von Daten zum Server" #~ msgid "SOAP request failed (get)" #~ msgstr "SOAP Anfrage fehlgeschlagen (get)" #~ msgid "SOAP request failed (getRequestStatus)" #~ msgstr "SOAP Anfrage fehlgeschlagen (getRequestStatus)" #~ msgid "SOAP request failed (put)" #~ msgstr "SOAP Anfrage fehlgeschlagen (put)" #~ msgid "SOAP request failed (copy)" #~ msgstr "SOAP Anfrage fehlgeschlagen (copy)" #~ msgid "SOAP request failed (setFileStatus)" #~ msgstr "SOAP Anfrage fehlgeschlagen (setFileStatus)" #~ msgid "SOAP request failed (SRMv1Meth__advisoryDelete)" #~ msgstr "SOAP Anfragen fehlgeschlagen (SRMv1Meth__advisoryDelete)" #~ msgid "SOAP request failed (getFileMetaData)" #~ msgstr "SOAP Anfragen fehlgeschlagen (getFileMetaData)" #~ msgid "SOAP request failed (%s)" #~ msgstr "SOAP Anfrage fehlgeschlagen (%s)" #~ msgid "Error: PrepareToGet request timed out after %i seconds" #~ msgstr "" #~ "Fehler: Zeitüberschreitung bei PrepareToGet Anfrage nach %i Sekunden" #~ msgid "Request is reported as ABORTED" #~ msgstr "Anfrage wurde berichtet als ABORTED (abgebrochen)" #~ msgid "Error: PrepareToPut request timed out after %i seconds" #~ msgstr "" #~ "Fehler: Zeitüberschreitung bei PrepareToPut Anfrage nach %i Sekunden" #~ msgid "Error: Ls request timed out after %i seconds" #~ msgstr "Fehler: Zeitüberschreitung bei Ls Anfrage nach %i Sekunden" #~ msgid "Error: copy request timed out after %i seconds" #~ msgstr "Fehler: Zeitüberschreitung bei Kopieranfrage nach %i Sekunden " #, fuzzy #~ msgid "SOAP request failed (srmMkdir)" #~ msgstr "SOAP Anfrage schlug fehl (srmMkdir)" #, fuzzy #~ msgid "Error opening srm info file %s:%s" #~ msgstr "Fehler bei Öffnen von Meta-Datei %s: %s" #, fuzzy #~ msgid "Trying to open confirm site %s" #~ msgstr "" #~ "Ошибка при попытке открыть файл:\n" #~ " %1" #~ msgid "Source is bad URL or can't be used due to some reason" #~ msgstr "" #~ "Quelle ist eine schlechte URL oder kann aus irgendeinem Grund nicht " #~ "genutzt werden." #~ msgid "Destination is bad URL or can't be used due to some reason" #~ msgstr "" #~ "Ziel ist eine schlechte URL oder kann aus irgendeinem Grund nicht genutzt " #~ "werden." #, fuzzy #~ msgid "Failed while transfering data (mostly timeout)" #~ msgstr "Fehler bei Datentransfer (überwiegend Zeitüberschreitung)" #, fuzzy #~ msgid "Error creating file %s with mkstemp(): %s" #~ msgstr "Fehler bei Anlegen von %s mit mkstemp(): %s" #~ msgid "Error opening lock file we just renamed successfully %s: %s" #~ msgstr "" #~ "Fehler bei Öfnnen von Lock-Datei die gerade erfolgreich umgenannt wurde %" #~ "s: %s" #~ msgid "" #~ "Lock that recently existed has been deleted by another process, calling " #~ "Start() again" #~ msgstr "" #~ "Lock das zuvor existierte wurde gelöscht von anderem Prozess, rufe Start" #~ "() nochmals" #~ msgid "Error opening valid and existing lock file %s: %s" #~ msgstr "Fehler bei Öffnen von gültiger und existierener Lock-Datei %s: %s" #~ msgid "Error reading valid and existing lock file %s: %s" #~ msgstr "Fehler bei Lesen von gültiger und existierender Lock-Datei %s: %s" #, fuzzy #~ msgid "Error creating tmp file %s for remote lock with mkstemp(): %s" #~ msgstr "Fehler bei Anlegen von %s mit mkstemp(): %s" #, fuzzy #~ msgid "Error writing to tmp lock file for remote lock %s: %s" #~ msgstr "Fehler beim Schreiben zu tmp lock Datei %s: %s" #, fuzzy #~ msgid "Warning: closing tmp lock file for remote lock %s failed" #~ msgstr "Warnung: Schließen von tmp Lock Datei %s fehlgeschlagen" #, fuzzy #~ msgid "Error renaming tmp file %s to lock file %s for remote lock: %s" #~ msgstr "Fehler bei Umbenennen von temporärer Datei %s zu Lock_datei %s: %s" #, fuzzy #~ msgid "" #~ "Error renaming lock file for remote lock, even though rename() did not " #~ "return an error: %s" #~ msgstr "" #~ "Fehler bei Umbenennen von Lock-Datei, obwohl rename() keinen Fehler " #~ "zurücklieferte" #, fuzzy #~ msgid "" #~ "Error opening lock file for remote lock we just renamed successfully %s: %" #~ "s" #~ msgstr "" #~ "Fehler bei Öfnnen von Lock-Datei die gerade erfolgreich umgenannt wurde %" #~ "s: %s" #, fuzzy #~ msgid "" #~ "The remote cache file is currently locked with a valid lock, will " #~ "download from source" #~ msgstr "Die Datei ist derzeit gelockt mit einem gültigen Lock" #, fuzzy #~ msgid "Failed to create file %s for writing: %s" #~ msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #, fuzzy #~ msgid "Error: Cache file %s does not exist" #~ msgstr "Cache-Datei %s existiert nicht" #~ msgid "Failed to change permissions of session dir to 0700: %s" #~ msgstr "Konnte Zugriffsrechte auf session dir nicht ändern zu 0700: %s" #~ msgid "Error opening per-job dir %s: %s" #~ msgstr "Fehler bei Öffnen von per-job Verzeichnis %s. %s" #, fuzzy #~ msgid "Mismatching url in file %s: %s Expected %s" #~ msgstr "Nicht-Übereinstimmung von URL in Datei %s: %s erwartete %s" #, fuzzy #~ msgid "Bad separator in file %s: %s" #~ msgstr "Ungültges Trennzeichen in Datei %s: %s" #, fuzzy #~ msgid "Bad value of expiry time in %s: %s" #~ msgstr "Ungültiger Wert für expiry time in %s: %s" #, fuzzy #~ msgid "Error opening lock file %s: %s" #~ msgstr "Fehler bei Öffnen von Lock Datei %s: %s" #, fuzzy #~ msgid "Can't read user list in specified file %s" #~ msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #, fuzzy #~ msgid "%s: State: %s: failed to create temporary proxy for renew: %s" #~ msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #, fuzzy #~ msgid "%s: adding to transfer share %s" #~ msgstr "Datentransfer abgebrochen: %s" #, fuzzy #~ msgid "NULL response" #~ msgstr "Keine SOAP Antwort" #, fuzzy #~ msgid "" #~ "Not authorized from Charon service; Some of the RequestItem does not " #~ "satisfy Policy" #~ msgstr "" #~ "UnAuthorisiert von arc.pdp; einige der ReqestItems genügen nicht der " #~ "Policy" #, fuzzy #~ msgid "Loading policy from %s" #~ msgstr "Lade python broker (%i)" #, fuzzy #~ msgid "Old policy times: %u/%u" #~ msgstr "Policy Zeile: %s" #, fuzzy #~ msgid "New policy times: %u/%u" #~ msgstr "Policy Zeile: %s" #, fuzzy #~ msgid "Misisng or empty CertificatePath element in the configuration!" #~ msgstr "Fehlendes oder leeres CertificatePath Element" #, fuzzy #~ msgid "Missing or empty CACertificatesDir element in the configuration!" #~ msgstr "Fehlendes oder leeres CertificatePath Element" #, fuzzy #~ msgid "Missing or empty CACertificatePath element in the configuration!" #~ msgstr "Fehlendes oder leeres CertificatePath Element" #~ msgid "" #~ "Thread %d: Task %d Result:\n" #~ "%s\n" #~ msgstr "" #~ "Поток %d: Задание %d Результат:\n" #~ "%s\n" #~ msgid "Is connected to database? %s" #~ msgstr "Ist verbunden mit Datenban? %s" #~ msgid "Can not decrypt the EncryptedID from saml assertion" #~ msgstr "Konnte die Encrypted ID von SAML Assertion nicht entschlüsseln" #~ msgid "Decrypted SAML NameID: %s" #~ msgstr "Entschlüsselter SAML NameID: %s" #~ msgid "Request succeeded!!!" #~ msgstr "Anfragen Erfolgreich!!!" #~ msgid "%d <> %d" #~ msgstr "%d <> %d" #~ msgid "Invalid status report" #~ msgstr "Ungültiger Status Report:" #~ msgid "%s reports job status of %s but it is running on %s" #~ msgstr "%s berichtet Job Status von %s aber läuft auf %s" #~ msgid "%s try to status change: %s->%s" #~ msgstr "%s versuch Status zu ändern: %s -> %s" #~ msgid "refresh: Cannot abort transaction: %s" #~ msgstr "refresh: Kann Transaktion nicht abbrechen: %s" #~ msgid "refresh: Error during transaction: %s" #~ msgstr "refresh: Fehler bei Transaktion: %s" #~ msgid "operator[]: Cannot abort transaction: %s" #~ msgstr "operator[]: Kann Transaktion nicht abbrechen: %s" #~ msgid "remove: Cannot abort transaction: %s" #~ msgstr "remove: Kann Transaktion nicht abbrechen: %s" #, fuzzy #~ msgid "There is no X509Request node in the request message" #~ msgstr "Es ist kein X509Request Knoten in der request Nachricht" #~ msgid "Composed DN: %s" #~ msgstr "Zusammengestellte DN: %s" #~ msgid "CentralAHash constructor called" #~ msgstr "CentralAHash Konstruktor aufgerufen" #~ msgid "Error importing class" #~ msgstr "Fehler bei Importieren von Klasse" #~ msgid "ReplicatedAHash constructor called" #~ msgstr "ReplicatedAHash aconstructor aufgrufen" #, fuzzy #~ msgid "sending message of length" #~ msgstr "sende Nachricht der Läng %d an %s" #~ msgid "sendt message, success=%s" #~ msgstr "Nachricht gesende, Erfolg=%s" #~ msgid "processing message..." #~ msgstr "verarbeite Nachricht" #~ msgid "processing message... Finished" #~ msgstr "Verarbeiten der Nachricht beendet" #~ msgid "Couldn't start replication manager." #~ msgstr "Konnte replication manager nicht starten" #~ msgid "Could not find checking period, using default 10s" #~ msgstr "Konnte checking period nicht finden, nutze Voreinstellung von 10s" #~ msgid "Bad cache size or no cache size configured, using 10MB" #~ msgstr "" #~ "Ungültige cache Größe oder keine cache Größe konfiguriert, nutze 10MB" #~ msgid "master locking" #~ msgstr "master setzt lock" #~ msgid "unlocking" #~ msgstr "entferne lock" #~ msgid "unlocked" #~ msgstr "lock entfernt" #~ msgid "couldn't unlock" #~ msgstr "konnte lock nicht entfernen" #~ msgid "checkingThread slept %d s" #~ msgstr "checkingThread schlief %d s" #, fuzzy #~ msgid "wrote ahash list %s" #~ msgstr "schrieb ahash Liste %s" #~ msgid "but dbenv wasn't ready." #~ msgstr "aber dbenv war nicht bereit" #~ msgid "Couldn't start replication framework" #~ msgstr "Konnte replication framework nicht starten" #, fuzzy #~ msgid "entered election thread" #~ msgstr "Starte Auswahl-Thread" #, fuzzy #~ msgid "%s: my role is" #~ msgstr "%s: meine Rolle ist %d" #, fuzzy #~ msgid "%s: my role is now" #~ msgstr "%s: meine Rolle ist nun %d" #, fuzzy #~ msgid "Couldn't run election" #~ msgstr "Konnte Auswahl nicht vornehmen" #, fuzzy #~ msgid "num_reps is %(nr)d, votes is %(v)d, hostMap is %(hm)s" #~ msgstr "num_reps ist %d, Stimmen sind %d, hostMap ist %s" #~ msgid "entering startElection" #~ msgstr "Start von startElection" #~ msgid "new role" #~ msgstr "neue Rolle" #~ msgid "Couldn't begin role" #~ msgstr "Konnte Rolle nicht beginnen" #~ msgid "entering send" #~ msgstr "Start von send" #, fuzzy #~ msgid "failed to send to" #~ msgstr "Fehler beim Senden von body" #~ msgid "Master is offline, starting re-election" #~ msgstr "Master ist offline, starte Neuwahl" #~ msgid "entering repSend" #~ msgstr "Starte repSend" #~ msgid "entering sendNewSiteMsg" #~ msgstr "Start von sendNewSiteMsg" #~ msgid "entering sendHeartbeatMsg" #~ msgstr "Start von sendHeartbeatMsg" #~ msgid "entering sendNewMasterMsg" #~ msgstr "Start von sendNewMasterMsg" #~ msgid "entering processMessage from " #~ msgstr "verarbeite processMessage von " #~ msgid "received message from myself!" #~ msgstr "erhielt Nachricht von mir selbst!" #~ msgid "received from new sender or sender back online" #~ msgstr "erhalten von neuem Sender der Sender ist wieder online" #~ msgid "received master id" #~ msgstr "erhielt master id" #~ msgid "received HEARTBEAT_MESSAGE" #~ msgstr "erhielt HEARTBEAT_MESSAGE" #~ msgid "received ELECTION_MESSAGE" #~ msgstr "erhielt ELECTION_MESSAGE" #~ msgid "received NEWSITE_MESSAGE" #~ msgstr "erhielt NEWSITE_MESSAGE" #~ msgid "processing message from %d" #~ msgstr "Verarbeite Nachricht von %d" #~ msgid "received DB_REP_NEWSITE from %s" #~ msgstr "erhielt DB_REP_NEWSITE von %s" #~ msgid "received DB_REP_HOLDELECTION" #~ msgstr "erhielt DB_REP_HODLELECTION" #~ msgid "REP_ISPERM returned for LSN %s" #~ msgstr "REP_ISPERM erhalten für LSN %s" #~ msgid "REP_NOTPERM returned for LSN %s" #~ msgstr "REP_NOTPERM erhalten für LSN %s" #~ msgid "REP_DUPMASTER received, starting new election" #~ msgstr "REP_DUPMASTER erhalten, stare neue Verbindung" #~ msgid "REP_IGNORE received" #~ msgstr "REP_IGNORE erhalten" #~ msgid "JOIN_FAILURE received" #~ msgstr "JOIN_FAILURE erhalten" #~ msgid "I am now a master" #~ msgstr "Ich bin nun ein master" #~ msgid "received DB_EVENT_REP_MASTER" #~ msgstr "erhielt DB_EVEN_REP_MASTER" #~ msgid "I am now a client" #~ msgstr "Ich bin nun ein Client" #~ msgid "Getting permission failed" #~ msgstr "Erlaubnis nicht erhalten" #~ msgid "New master elected" #~ msgstr "Neuer Master ausgewählt" #~ msgid "I won the election: I am the MASTER" #~ msgstr "Ich gewann die Auswahl: Ich bin der MASTER" #, fuzzy #~ msgid "Oops! Internal DB panic!" #~ msgstr "Ooops! Interne DB Panik!" #~ msgid "accessing gateway: %s" #~ msgstr "greife zu auf gateway: %s" #~ msgid "This bartender does not support gateway" #~ msgstr "Dieser Bartender benötigt keinen support gateway" #~ msgid "" #~ "cannot connect to gateway. Access of third party store required gateway." #~ msgstr "" #~ "kann nicht verbinden zu Gateway. Zugang zu store Dritter benötigt einen " #~ "Gatway." #~ msgid "Got Librarian URLs from the config:" #~ msgstr "Erhielt Librarian URLs von Konfiguration:" #, fuzzy #~ msgid "Librarian URL or ISIS URL not found in the configuration." #~ msgstr "Librarian URL oder ISIS URL nicht gefunden in der Konfiguration." #~ msgid "Got ISIS URL, starting initThread" #~ msgstr "Erhielt ISIS URL, startete initThread" #~ msgid "Trying to get Librarian from" #~ msgstr "Versuche Librarian zu erhalten von" #~ msgid "Got Librarian from ISIS:" #~ msgstr "Erhielt Librarian von ISIS:" #, fuzzy #~ msgid "Error connecting to ISIS %{iu}s, reason: %{r}s" #~ msgstr "Fehler beim Verbinden zu ISIS %s, Grund: %s" #~ msgid "Error in initThread: %s" #~ msgstr "Fehler in initThread: %s" #~ msgid "initThread finished, starting isisThread" #~ msgstr "initThread beended, starte isisThread" #, fuzzy #~ msgid "Error in isisThread: %s" #~ msgstr "Fehler in isisThread: %s" #~ msgid "//// _traverse request trailing slash removed:" #~ msgstr "//// bei _traverse Anfrage wurde terminaler Schrägstrich entfernt" #~ msgid "adding" #~ msgstr "beim Hinzufügen" #~ msgid "modifyMetadata response" #~ msgstr "modifyMetadata Antwort" #~ msgid "modifyMetadata failed, removing the new librarian entry" #~ msgstr "modifyMetadata failed, entferne den enuen librarian Eintrag" #~ msgid "Error creating new entry in Librarian: %s" #~ msgstr "Fehler beim Anlegen eines neuen Eintrags in Librarian: %s" #~ msgid "//// response from the external store:" #~ msgstr "//// Antwort von ausgewähltem store:" #~ msgid "location chosen:" #~ msgstr "ausgewählte Lokalisation:" #, fuzzy #~ msgid "ERROR from the chosen Shepherd" #~ msgstr "FEHLER bei ausgewähltem Shepherd" #~ msgid "addReplica" #~ msgstr "addReplica" #~ msgid "Registered Shepherds in Librarian" #~ msgstr "Registrierte Shepherds bei Librarian" #~ msgid "Alive Shepherds:" #~ msgstr "Aktive Shepherds:" #~ msgid "LN" #~ msgstr "LN" #~ msgid "metadata" #~ msgstr "Metadaten" #~ msgid "\\/\\/" #~ msgstr "\\/\\/" #~ msgid "removing" #~ msgstr "am Entfernen" #~ msgid "" #~ "The directory for storing proxies is not available. Proxy delegation " #~ "disabled." #~ msgstr "" #~ "Das Verzeichnis für die Ablage von Proxies ist nicht verfügbar. Proxy " #~ "delegation ausgesetzt." #~ msgid "Delegation status: " #~ msgstr "Delegation status: " #~ msgid "creating proxy file : " #~ msgstr "erstelle Proxy Datei : " #~ msgid "" #~ "cannot access proxy_store, Check the configuration file (service.xml)\n" #~ " Need to have a " #~ msgstr "" #~ "Kann auf Proxy Store nicht zugreifen. Überprüfe die Konfigurationsdatei " #~ "(service.xml)\n" #~ " Es wird ein benötigt" #~ msgid "removeCredentials: %s" #~ msgstr "removeCredentials: %s" #~ msgid "proxy store is not accessable." #~ msgstr "Proxy store nicht zugereifbar" #~ msgid "get response: %s" #~ msgstr "Erhalte Antwort: %s" #~ msgid "Error processing report message" #~ msgstr "Fehler bei Verarbeiten von report message" #~ msgid "Error traversing: %s" #~ msgstr "Fehler bei Traversieren: %s" #~ msgid "Error in traverseLN method: %s" #~ msgstr "Fehler in taverseLN Methode: %s" #~ msgid "Trying to get Bartender from" #~ msgstr "Zerstöre JVM" #~ msgid "Got Bartender from ISIS:" #~ msgstr "Erhielt Bartender von ISIS:" #~ msgid "" #~ "\n" #~ "CHECKSUM OK" #~ msgstr "" #~ "\n" #~ "CHECKSUM в порядке" #~ msgid "" #~ "\n" #~ "CHECKSUM MISMATCH" #~ msgstr "" #~ "\n" #~ "CHECKSUM не совпадает" #~ msgid "\n" #~ msgstr "\n" #~ msgid "" #~ "\n" #~ "\n" #~ "File" #~ msgstr "" #~ "\n" #~ "\n" #~ "Файл" #~ msgid "" #~ "\n" #~ "\n" #~ "I have an invalid replica of file" #~ msgstr "" #~ "\n" #~ "\n" #~ "Обнаружена неверная копия файла" #~ msgid "" #~ "\n" #~ "\n" #~ msgstr "" #~ "\n" #~ "\n" #~ msgid "url of myproxy server" #~ msgstr "URL von myproxy Server" #~ msgid "Myproxy server return failure msg" #~ msgstr "Myproxy Server lieferte eine Fehlermeldung zurück." #, fuzzy #~ msgid "Malformated response" #~ msgstr "Antwort hält sich nicht an Format" #~ msgid "srmPing gives v2.2, instantiating v2.2 client" #~ msgstr "srmPing gibt v2.2, instanziierend v2.2 client" #~ msgid "SOAP error with srmPing, instantiating v1 client" #~ msgstr "SOAP Fehler mit srmPing, instanziierende v1 client" #~ msgid "Service error, cannot instantiate SRM client" #~ msgstr "Service Fehler, kann nicht SRM client instanziieren" #~ msgid "start_reading_ftp: size: url: %s" #~ msgstr "start_reading_ftp: url: %s" #, fuzzy #~ msgid "start_reading_ftp: failure" #~ msgstr "start_reading_ftp: Fehler" #~ msgid "start_reading_ftp: timeout waiting for file size" #~ msgstr "start_reading_ftp: Zeitüberschreitung bei Warten auf Dateigröße" #~ msgid "Timeout waiting for FTP file size - cancel transfer" #~ msgstr "" #~ "Zeitüberschreitung bei Warten auf FTP Datei Größe - breche Transfer ab" #~ msgid "start_reading_ftp: failed to get file's size" #~ msgstr "start_reading_ftp: Fehler bei Bezug von Dateigröße" #~ msgid "start_reading_ftp: obtained size: %llu" #~ msgstr "start_reading_ftp: erzielte Größe: %llu" #~ msgid "start_reading_ftp: globus_ftp_client_modification_time failed" #~ msgstr "" #~ "start_reading_ftp: globus_ftp_client_modification_time fehlgeschlagen" #~ msgid "start_reading_ftp: failed to get file's modification time" #~ msgstr "start_reading_ftp: Fehler bei Bezug von Zeitpunkt letzter Änderung" #, fuzzy #~ msgid "start_reading_ftp: range is out of size" #~ msgstr "start_reading_ftp: Größe von Wert verlässt erwarteten Bereich" #~ msgid "%s tried election with %d replicas" #~ msgstr "%s initiierte Auswahl mit %d Replicas" #, fuzzy #~ msgid "store job descriptions in local sandbox." #~ msgstr "Lege Job Beschreibung in lokaler Sandbox ab." nordugrid-arc-5.4.2/po/PaxHeaders.7502/POTFILES.in0000644000000000000000000000013113214316031017360 xustar000000000000000030 mtime=1513200665.379813283 29 atime=1513200665.56681557 30 ctime=1513200668.595852616 nordugrid-arc-5.4.2/po/POTFILES.in0000644000175000002070000012336713214316031017443 0ustar00mockbuildmock00000000000000src/Test.cpp src/clients/cache/__init__.py src/clients/cache/cache.py src/clients/compute/arccat.cpp src/clients/compute/arcclean.cpp src/clients/compute/arcget.cpp src/clients/compute/arcinfo.cpp src/clients/compute/arckill.cpp src/clients/compute/arcrenew.cpp src/clients/compute/arcresub.cpp src/clients/compute/arcresume.cpp src/clients/compute/arcstat.cpp src/clients/compute/arcsub.cpp src/clients/compute/arcsync.cpp src/clients/compute/arctest.cpp src/clients/compute/utils.cpp src/clients/compute/utils.h src/clients/credentials/arcproxy.cpp src/clients/credentials/arcproxy.h src/clients/credentials/arcproxy_myproxy.cpp src/clients/credentials/arcproxy_proxy.cpp src/clients/credentials/arcproxy_voms.cpp src/clients/credentials/arcproxyalt.cpp src/clients/credentials/test2myproxyserver_get.cpp src/clients/credentials/test2myproxyserver_put.cpp src/clients/credentials/test2vomsserver.cpp src/clients/data/arccp.cpp src/clients/data/arcls.cpp src/clients/data/arcmkdir.cpp src/clients/data/arcrename.cpp src/clients/data/arcrm.cpp src/clients/echo/arcecho.cpp src/clients/saml/saml_assertion_init.cpp src/clients/wsrf/arcwsrf.cpp src/doxygen/add-bindings-deviations-to-dox.py src/doxygen/add-java-getter-setter-method-notice.py src/doxygen/create-mapping-documentation.py src/external/cJSON/cJSON.h src/hed/acc/ARC0/DescriptorsARC0.cpp src/hed/acc/ARC0/FTPControl.cpp src/hed/acc/ARC0/FTPControl.h src/hed/acc/ARC0/JobControllerPluginARC0.cpp src/hed/acc/ARC0/JobControllerPluginARC0.h src/hed/acc/ARC0/JobStateARC0.cpp src/hed/acc/ARC0/JobStateARC0.h src/hed/acc/ARC0/SubmitterPluginARC0.cpp src/hed/acc/ARC0/SubmitterPluginARC0.h src/hed/acc/ARC1/AREXClient.cpp src/hed/acc/ARC1/AREXClient.h src/hed/acc/ARC1/DescriptorsARC1.cpp src/hed/acc/ARC1/JobControllerPluginARC1.cpp src/hed/acc/ARC1/JobControllerPluginARC1.h src/hed/acc/ARC1/JobControllerPluginBES.cpp src/hed/acc/ARC1/JobControllerPluginBES.h src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp src/hed/acc/ARC1/JobListRetrieverPluginARC1.h src/hed/acc/ARC1/JobListRetrieverPluginWSRFBES.cpp src/hed/acc/ARC1/JobListRetrieverPluginWSRFBES.h src/hed/acc/ARC1/JobStateARC1.cpp src/hed/acc/ARC1/JobStateARC1.h src/hed/acc/ARC1/JobStateBES.cpp src/hed/acc/ARC1/JobStateBES.h src/hed/acc/ARC1/SubmitterPluginARC1.cpp src/hed/acc/ARC1/SubmitterPluginARC1.h src/hed/acc/ARC1/TargetInformationRetrieverPluginBES.cpp src/hed/acc/ARC1/TargetInformationRetrieverPluginBES.h src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.h src/hed/acc/ARC1/test/AREXClientTest.cpp src/hed/acc/Broker/BenchmarkBrokerPlugin.cpp src/hed/acc/Broker/BenchmarkBrokerPlugin.h src/hed/acc/Broker/DataBrokerPlugin.cpp src/hed/acc/Broker/DataBrokerPlugin.h src/hed/acc/Broker/DescriptorsBroker.cpp src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp src/hed/acc/Broker/FastestQueueBrokerPlugin.h src/hed/acc/Broker/NullBrokerPlugin.h src/hed/acc/Broker/RandomBrokerPlugin.h src/hed/acc/Broker/test/BenchmarkBrokerTest.cpp src/hed/acc/CREAM/CREAMClient.cpp src/hed/acc/CREAM/CREAMClient.h src/hed/acc/CREAM/DescriptorsCREAM.cpp src/hed/acc/CREAM/JobControllerPluginCREAM.cpp src/hed/acc/CREAM/JobControllerPluginCREAM.h src/hed/acc/CREAM/JobListRetrieverPluginWSRFCREAM.cpp src/hed/acc/CREAM/JobListRetrieverPluginWSRFCREAM.h src/hed/acc/CREAM/JobStateCREAM.cpp src/hed/acc/CREAM/JobStateCREAM.h src/hed/acc/CREAM/SubmitterPluginCREAM.cpp src/hed/acc/CREAM/SubmitterPluginCREAM.h src/hed/acc/EMIES/DescriptorsEMIES.cpp src/hed/acc/EMIES/EMIESClient.cpp src/hed/acc/EMIES/EMIESClient.h src/hed/acc/EMIES/JobControllerPluginEMIES.cpp src/hed/acc/EMIES/JobControllerPluginEMIES.h src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp src/hed/acc/EMIES/JobListRetrieverPluginEMIES.h src/hed/acc/EMIES/JobStateEMIES.cpp src/hed/acc/EMIES/JobStateEMIES.h src/hed/acc/EMIES/SubmitterPluginEMIES.cpp src/hed/acc/EMIES/SubmitterPluginEMIES.h src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.h src/hed/acc/EMIES/TestEMIESClient.cpp src/hed/acc/JobDescriptionParser/ADLParser.cpp src/hed/acc/JobDescriptionParser/ADLParser.h src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp src/hed/acc/JobDescriptionParser/ARCJSDLParser.h src/hed/acc/JobDescriptionParser/DescriptorsJobDescriptionParser.cpp src/hed/acc/JobDescriptionParser/JDLParser.cpp src/hed/acc/JobDescriptionParser/JDLParser.h src/hed/acc/JobDescriptionParser/RSLParser.cpp src/hed/acc/JobDescriptionParser/RSLParser.h src/hed/acc/JobDescriptionParser/XMLNodeRecover.cpp src/hed/acc/JobDescriptionParser/XMLNodeRecover.h src/hed/acc/JobDescriptionParser/XRSLParser.cpp src/hed/acc/JobDescriptionParser/XRSLParser.h src/hed/acc/JobDescriptionParser/test/ADLParserTest.cpp src/hed/acc/JobDescriptionParser/test/ARCJSDLParserTest.cpp src/hed/acc/JobDescriptionParser/test/JDLParserTest.cpp src/hed/acc/JobDescriptionParser/test/XRSLParserTest.cpp src/hed/acc/PythonBroker/ACIXBroker.py src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp src/hed/acc/PythonBroker/PythonBrokerPlugin.h src/hed/acc/PythonBroker/SampleBroker.py src/hed/acc/SER/DescriptorsSER.cpp src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.h src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.cpp src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.h src/hed/acc/TEST/BrokerPluginTestACC.h src/hed/acc/TEST/JobControllerPluginTestACC.cpp src/hed/acc/TEST/JobControllerPluginTestACC.h src/hed/acc/TEST/JobDescriptionParserPluginTestACC.cpp src/hed/acc/TEST/JobDescriptionParserPluginTestACC.h src/hed/acc/TEST/JobListRetrieverPluginTEST.cpp src/hed/acc/TEST/JobListRetrieverPluginTEST.h src/hed/acc/TEST/ServiceEndpointRetrieverPluginTEST.cpp src/hed/acc/TEST/ServiceEndpointRetrieverPluginTEST.h src/hed/acc/TEST/SubmitterPluginTestACC.cpp src/hed/acc/TEST/SubmitterPluginTestACC.h src/hed/acc/TEST/TargetInformationRetrieverPluginTEST.cpp src/hed/acc/TEST/TargetInformationRetrieverPluginTEST.h src/hed/acc/TEST/TestACCPluginDescriptors.cpp src/hed/acc/UNICORE/DescriptorsUNICORE.cpp src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp src/hed/acc/UNICORE/JobControllerPluginUNICORE.h src/hed/acc/UNICORE/JobStateUNICORE.cpp src/hed/acc/UNICORE/JobStateUNICORE.h src/hed/acc/UNICORE/SubmitterPluginUNICORE.cpp src/hed/acc/UNICORE/SubmitterPluginUNICORE.h src/hed/acc/UNICORE/UNICOREClient.cpp src/hed/acc/UNICORE/UNICOREClient.h src/hed/acc/ldap/Descriptors.cpp src/hed/acc/ldap/Extractor.h src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.cpp src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.h src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.h src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.h src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.h src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.cpp src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.h src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.h src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.h src/hed/client.cpp src/hed/daemon/options.cpp src/hed/daemon/options.h src/hed/daemon/unix/daemon.cpp src/hed/daemon/unix/daemon.h src/hed/daemon/unix/main_unix.cpp src/hed/daemon/win32/main_win32.cpp src/hed/daemon/win32/paul_gui.cpp src/hed/daemon/win32/paul_gui.h src/hed/dmc/acix/DataPointACIX.cpp src/hed/dmc/acix/DataPointACIX.h src/hed/dmc/arc/DataPointARC.cpp src/hed/dmc/arc/DataPointARC.h src/hed/dmc/file/DataPointFile.cpp src/hed/dmc/file/DataPointFile.h src/hed/dmc/gfal/DataPointGFAL.cpp src/hed/dmc/gfal/DataPointGFAL.h src/hed/dmc/gfal/GFALTransfer3rdParty.cpp src/hed/dmc/gfal/GFALTransfer3rdParty.h src/hed/dmc/gfal/GFALUtils.cpp src/hed/dmc/gfal/GFALUtils.h src/hed/dmc/gridftp/DataPointGridFTP.cpp src/hed/dmc/gridftp/DataPointGridFTP.h src/hed/dmc/gridftp/Lister.cpp src/hed/dmc/gridftp/Lister.h src/hed/dmc/http/DataPointHTTP.cpp src/hed/dmc/http/DataPointHTTP.h src/hed/dmc/http/StreamBuffer.cpp src/hed/dmc/http/StreamBuffer.h src/hed/dmc/ldap/DataPointLDAP.cpp src/hed/dmc/ldap/DataPointLDAP.h src/hed/dmc/ldap/LDAPQuery.cpp src/hed/dmc/ldap/LDAPQuery.h src/hed/dmc/mock/DataPointMock.cpp src/hed/dmc/mock/DataPointMock.h src/hed/dmc/rucio/DataPointRucio.cpp src/hed/dmc/rucio/DataPointRucio.h src/hed/dmc/s3/DataPointS3.cpp src/hed/dmc/s3/DataPointS3.h src/hed/dmc/srm/DataPointSRM.cpp src/hed/dmc/srm/DataPointSRM.h src/hed/dmc/srm/srmclient/SRM1Client.cpp src/hed/dmc/srm/srmclient/SRM1Client.h src/hed/dmc/srm/srmclient/SRM22Client.cpp src/hed/dmc/srm/srmclient/SRM22Client.h src/hed/dmc/srm/srmclient/SRMClient.cpp src/hed/dmc/srm/srmclient/SRMClient.h src/hed/dmc/srm/srmclient/SRMClientRequest.h src/hed/dmc/srm/srmclient/SRMInfo.cpp src/hed/dmc/srm/srmclient/SRMInfo.h src/hed/dmc/srm/srmclient/SRMURL.cpp src/hed/dmc/srm/srmclient/SRMURL.h src/hed/dmc/xrootd/DataPointXrootd.cpp src/hed/dmc/xrootd/DataPointXrootd.h src/hed/identitymap/ArgusPDPClient.cpp src/hed/identitymap/ArgusPDPClient.h src/hed/identitymap/ArgusPEPClient.cpp src/hed/identitymap/ArgusPEPClient.h src/hed/identitymap/ArgusXACMLConstant.h src/hed/identitymap/IdentityMap.cpp src/hed/identitymap/IdentityMap.h src/hed/identitymap/SimpleMap.cpp src/hed/identitymap/SimpleMap.h src/hed/libs/common/ArcConfig.cpp src/hed/libs/common/ArcConfig.h src/hed/libs/common/ArcConfigFile.cpp src/hed/libs/common/ArcConfigFile.h src/hed/libs/common/ArcConfigIni.cpp src/hed/libs/common/ArcConfigIni.h src/hed/libs/common/ArcLocation.cpp src/hed/libs/common/ArcLocation.h src/hed/libs/common/ArcRegex.cpp src/hed/libs/common/ArcRegex.h src/hed/libs/common/ArcVersion.cpp src/hed/libs/common/ArcVersion.h src/hed/libs/common/Base64.cpp src/hed/libs/common/Base64.h src/hed/libs/common/CheckSum.cpp src/hed/libs/common/CheckSum.h src/hed/libs/common/Counter.cpp src/hed/libs/common/Counter.h src/hed/libs/common/DBInterface.h src/hed/libs/common/DateTime.cpp src/hed/libs/common/DateTime.h src/hed/libs/common/FileAccess.cpp src/hed/libs/common/FileAccess.h src/hed/libs/common/FileLock.cpp src/hed/libs/common/FileLock.h src/hed/libs/common/FileUtils.cpp src/hed/libs/common/FileUtils.h src/hed/libs/common/GUID.cpp src/hed/libs/common/GUID.h src/hed/libs/common/HostnameResolver.cpp src/hed/libs/common/HostnameResolver.h src/hed/libs/common/IString.cpp src/hed/libs/common/IString.h src/hed/libs/common/IniConfig.cpp src/hed/libs/common/IniConfig.h src/hed/libs/common/IntraProcessCounter.cpp src/hed/libs/common/IntraProcessCounter.h src/hed/libs/common/JobPerfLog.cpp src/hed/libs/common/JobPerfLog.h src/hed/libs/common/Logger.cpp src/hed/libs/common/Logger.h src/hed/libs/common/MysqlWrapper.cpp src/hed/libs/common/MysqlWrapper.h src/hed/libs/common/OptionParser.cpp src/hed/libs/common/OptionParser.h src/hed/libs/common/Profile.cpp src/hed/libs/common/Profile.h src/hed/libs/common/Run.h src/hed/libs/common/Run_unix.cpp src/hed/libs/common/Run_win32.cpp src/hed/libs/common/StringConv.cpp src/hed/libs/common/StringConv.h src/hed/libs/common/Thread.cpp src/hed/libs/common/Thread.h src/hed/libs/common/URL.cpp src/hed/libs/common/URL.h src/hed/libs/common/User.cpp src/hed/libs/common/User.h src/hed/libs/common/UserConfig.cpp src/hed/libs/common/UserConfig.h src/hed/libs/common/Utils.cpp src/hed/libs/common/Utils.h src/hed/libs/common/Watchdog.h src/hed/libs/common/XMLNode.cpp src/hed/libs/common/XMLNode.h src/hed/libs/common/file_access.cpp src/hed/libs/common/file_access.h src/hed/libs/common/hostname_resolver.cpp src/hed/libs/common/hostname_resolver.h src/hed/libs/common/test/ArcRegexTest.cpp src/hed/libs/common/test/Base64Test.cpp src/hed/libs/common/test/CheckSumTest.cpp src/hed/libs/common/test/EnvTest.cpp src/hed/libs/common/test/FileAccessTest.cpp src/hed/libs/common/test/FileLockTest.cpp src/hed/libs/common/test/FileUtilsTest.cpp src/hed/libs/common/test/LoggerTest.cpp src/hed/libs/common/test/MysqlWrapperTest.cpp src/hed/libs/common/test/ProfileTest.cpp src/hed/libs/common/test/RunTest.cpp src/hed/libs/common/test/StringConvTest.cpp src/hed/libs/common/test/ThreadTest.cpp src/hed/libs/common/test/URLTest.cpp src/hed/libs/common/test/UserConfigTest.cpp src/hed/libs/common/test/UserTest.cpp src/hed/libs/common/test/WatchdogTest.cpp src/hed/libs/common/test/XMLNodeTest.cpp src/hed/libs/common/win32.cpp src/hed/libs/common/win32.h src/hed/libs/communication/ClientInterface.cpp src/hed/libs/communication/ClientInterface.h src/hed/libs/communication/ClientSAML2SSO.cpp src/hed/libs/communication/ClientSAML2SSO.h src/hed/libs/communication/ClientX509Delegation.cpp src/hed/libs/communication/ClientX509Delegation.h src/hed/libs/communication/test/SimulatorClasses.cpp src/hed/libs/communication/test/SimulatorClasses.h src/hed/libs/compute/Broker.cpp src/hed/libs/compute/Broker.h src/hed/libs/compute/BrokerPlugin.cpp src/hed/libs/compute/BrokerPlugin.h src/hed/libs/compute/ComputingServiceRetriever.cpp src/hed/libs/compute/ComputingServiceRetriever.h src/hed/libs/compute/Endpoint.cpp src/hed/libs/compute/Endpoint.h src/hed/libs/compute/EndpointQueryingStatus.cpp src/hed/libs/compute/EndpointQueryingStatus.h src/hed/libs/compute/EntityRetriever.cpp src/hed/libs/compute/EntityRetriever.h src/hed/libs/compute/EntityRetrieverPlugin.cpp src/hed/libs/compute/EntityRetrieverPlugin.h src/hed/libs/compute/ExecutionTarget.cpp src/hed/libs/compute/ExecutionTarget.h src/hed/libs/compute/GLUE2.cpp src/hed/libs/compute/GLUE2.h src/hed/libs/compute/GLUE2Entity.h src/hed/libs/compute/Job.cpp src/hed/libs/compute/Job.h src/hed/libs/compute/JobControllerPlugin.cpp src/hed/libs/compute/JobControllerPlugin.h src/hed/libs/compute/JobDescription.cpp src/hed/libs/compute/JobDescription.h src/hed/libs/compute/JobDescriptionParserPlugin.cpp src/hed/libs/compute/JobDescriptionParserPlugin.h src/hed/libs/compute/JobInformationStorage.h src/hed/libs/compute/JobInformationStorageBDB.cpp src/hed/libs/compute/JobInformationStorageBDB.h src/hed/libs/compute/JobInformationStorageDescriptor.cpp src/hed/libs/compute/JobInformationStorageSQLite.cpp src/hed/libs/compute/JobInformationStorageSQLite.h src/hed/libs/compute/JobInformationStorageXML.cpp src/hed/libs/compute/JobInformationStorageXML.h src/hed/libs/compute/JobState.cpp src/hed/libs/compute/JobState.h src/hed/libs/compute/JobSupervisor.cpp src/hed/libs/compute/JobSupervisor.h src/hed/libs/compute/Software.cpp src/hed/libs/compute/Software.h src/hed/libs/compute/SubmissionStatus.h src/hed/libs/compute/Submitter.cpp src/hed/libs/compute/Submitter.h src/hed/libs/compute/SubmitterPlugin.cpp src/hed/libs/compute/SubmitterPlugin.h src/hed/libs/compute/TestACCControl.cpp src/hed/libs/compute/TestACCControl.h src/hed/libs/compute/examples/basic_job_submission.cpp src/hed/libs/compute/examples/job_selector.cpp src/hed/libs/compute/test/BrokerTest.cpp src/hed/libs/compute/test/ComputingServiceUniqTest.cpp src/hed/libs/compute/test/ExecutionTargetTest.cpp src/hed/libs/compute/test/JobControllerPluginTest.cpp src/hed/libs/compute/test/JobDescriptionParserPluginTest.cpp src/hed/libs/compute/test/JobDescriptionTest.cpp src/hed/libs/compute/test/JobInformationStorageTest.cpp src/hed/libs/compute/test/JobListRetrieverTest.cpp src/hed/libs/compute/test/JobStateTest.cpp src/hed/libs/compute/test/JobSupervisorTest.cpp src/hed/libs/compute/test/JobTest.cpp src/hed/libs/compute/test/ServiceEndpointRetrieverTest.cpp src/hed/libs/compute/test/SoftwareTest.cpp src/hed/libs/compute/test/SubmissionStatusTest.cpp src/hed/libs/compute/test/SubmitterPluginTest.cpp src/hed/libs/compute/test/SubmitterTest.cpp src/hed/libs/compute/test/TargetInformationRetrieverTest.cpp src/hed/libs/compute/test_JobInformationStorage.cpp src/hed/libs/compute/test_jobdescription.cpp src/hed/libs/credential/ARCProxyUtil.cpp src/hed/libs/credential/ARCProxyUtil.h src/hed/libs/credential/CertUtil.cpp src/hed/libs/credential/CertUtil.h src/hed/libs/credential/Credential.cpp src/hed/libs/credential/Credential.h src/hed/libs/credential/NSSUtil.cpp src/hed/libs/credential/NSSUtil.h src/hed/libs/credential/PasswordSource.cpp src/hed/libs/credential/PasswordSource.h src/hed/libs/credential/Proxycertinfo.cpp src/hed/libs/credential/Proxycertinfo.h src/hed/libs/credential/VOMSAttribute.cpp src/hed/libs/credential/VOMSAttribute.h src/hed/libs/credential/VOMSConfig.cpp src/hed/libs/credential/VOMSConfig.h src/hed/libs/credential/VOMSUtil.cpp src/hed/libs/credential/VOMSUtil.h src/hed/libs/credential/listfunc.cpp src/hed/libs/credential/listfunc.h src/hed/libs/credential/nssprivkeyinfocodec.cpp src/hed/libs/credential/nssprivkeyinfocodec.h src/hed/libs/credential/test/CredentialTest.cpp src/hed/libs/credential/test/VOMSUtilTest.cpp src/hed/libs/credential/test/listfuncTest.cpp src/hed/libs/credential/testcertinfo.cpp src/hed/libs/credential/testeec.cpp src/hed/libs/credential/testproxy.cpp src/hed/libs/credential/testproxy2proxy.cpp src/hed/libs/credential/testvoms.cpp src/hed/libs/credentialmod/cred.cpp src/hed/libs/credentialstore/ClientVOMS.cpp src/hed/libs/credentialstore/ClientVOMS.h src/hed/libs/credentialstore/ClientVOMSRESTful.cpp src/hed/libs/credentialstore/ClientVOMSRESTful.h src/hed/libs/credentialstore/CredentialStore.cpp src/hed/libs/credentialstore/CredentialStore.h src/hed/libs/crypto/OpenSSL.cpp src/hed/libs/crypto/OpenSSL.h src/hed/libs/cryptomod/crypto.cpp src/hed/libs/data/DataBuffer.cpp src/hed/libs/data/DataBuffer.h src/hed/libs/data/DataCallback.h src/hed/libs/data/DataHandle.h src/hed/libs/data/DataMover.cpp src/hed/libs/data/DataMover.h src/hed/libs/data/DataPoint.cpp src/hed/libs/data/DataPoint.h src/hed/libs/data/DataPointDirect.cpp src/hed/libs/data/DataPointDirect.h src/hed/libs/data/DataPointIndex.cpp src/hed/libs/data/DataPointIndex.h src/hed/libs/data/DataSpeed.cpp src/hed/libs/data/DataSpeed.h src/hed/libs/data/DataStatus.cpp src/hed/libs/data/DataStatus.h src/hed/libs/data/FileCache.cpp src/hed/libs/data/FileCache.h src/hed/libs/data/FileCacheHash.cpp src/hed/libs/data/FileCacheHash.h src/hed/libs/data/FileInfo.h src/hed/libs/data/URLMap.cpp src/hed/libs/data/URLMap.h src/hed/libs/data/examples/DataPointMyProtocol.cpp src/hed/libs/data/examples/partial_copy.cpp src/hed/libs/data/examples/simple_copy.cpp src/hed/libs/data/test/FileCacheTest.cpp src/hed/libs/delegation/DelegationInterface.cpp src/hed/libs/delegation/DelegationInterface.h src/hed/libs/delegation/test/DelegationInterfaceTest.cpp src/hed/libs/deprecated.h src/hed/libs/globusutils/GSSCredential.cpp src/hed/libs/globusutils/GSSCredential.h src/hed/libs/globusutils/GlobusErrorUtils.cpp src/hed/libs/globusutils/GlobusErrorUtils.h src/hed/libs/globusutils/GlobusWorkarounds.cpp src/hed/libs/globusutils/GlobusWorkarounds.h src/hed/libs/infosys/BootstrapISIS.cpp src/hed/libs/infosys/InfoCache.cpp src/hed/libs/infosys/InfoCache.h src/hed/libs/infosys/InfoFilter.cpp src/hed/libs/infosys/InfoFilter.h src/hed/libs/infosys/InfoRegister.cpp src/hed/libs/infosys/InfoRegister.h src/hed/libs/infosys/InformationInterface.cpp src/hed/libs/infosys/InformationInterface.h src/hed/libs/infosys/RegisteredService.cpp src/hed/libs/infosys/RegisteredService.h src/hed/libs/infosys/test/InfoFilterTest.cpp src/hed/libs/infosys/test/InformationInterfaceTest.cpp src/hed/libs/infosys/test/RegisteredServiceTest.cpp src/hed/libs/loader/FinderLoader.cpp src/hed/libs/loader/FinderLoader.h src/hed/libs/loader/Loader.cpp src/hed/libs/loader/Loader.h src/hed/libs/loader/ModuleManager.cpp src/hed/libs/loader/ModuleManager.h src/hed/libs/loader/Plugin.cpp src/hed/libs/loader/Plugin.h src/hed/libs/loader/test/PluginTest.cpp src/hed/libs/loader/test/TestPlugin.cpp src/hed/libs/message/MCC.cpp src/hed/libs/message/MCC.h src/hed/libs/message/MCCLoader.cpp src/hed/libs/message/MCCLoader.h src/hed/libs/message/MCC_Status.cpp src/hed/libs/message/MCC_Status.h src/hed/libs/message/Message.cpp src/hed/libs/message/Message.h src/hed/libs/message/MessageAttributes.cpp src/hed/libs/message/MessageAttributes.h src/hed/libs/message/MessageAuth.cpp src/hed/libs/message/MessageAuth.h src/hed/libs/message/PayloadRaw.cpp src/hed/libs/message/PayloadRaw.h src/hed/libs/message/PayloadSOAP.cpp src/hed/libs/message/PayloadSOAP.h src/hed/libs/message/PayloadStream.cpp src/hed/libs/message/PayloadStream.h src/hed/libs/message/Plexer.cpp src/hed/libs/message/Plexer.h src/hed/libs/message/SOAPEnvelope.cpp src/hed/libs/message/SOAPEnvelope.h src/hed/libs/message/SOAPMessage.cpp src/hed/libs/message/SOAPMessage.h src/hed/libs/message/SecAttr.cpp src/hed/libs/message/SecAttr.h src/hed/libs/message/SecHandler.cpp src/hed/libs/message/SecHandler.h src/hed/libs/message/Service.cpp src/hed/libs/message/Service.h src/hed/libs/message/secattr/CIStringValue.cpp src/hed/libs/message/secattr/CIStringValue.h src/hed/libs/message/secattr/SecAttrValue.cpp src/hed/libs/message/secattr/SecAttrValue.h src/hed/libs/message/test/ChainTest.cpp src/hed/libs/message/test/TestMCC.cpp src/hed/libs/message/test/TestService.cpp src/hed/libs/security/ArcPDP/EvaluationCtx.cpp src/hed/libs/security/ArcPDP/EvaluationCtx.h src/hed/libs/security/ArcPDP/Evaluator.cpp src/hed/libs/security/ArcPDP/Evaluator.h src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp src/hed/libs/security/ArcPDP/EvaluatorLoader.h src/hed/libs/security/ArcPDP/PolicyParser.cpp src/hed/libs/security/ArcPDP/PolicyParser.h src/hed/libs/security/ArcPDP/PolicyStore.cpp src/hed/libs/security/ArcPDP/PolicyStore.h src/hed/libs/security/ArcPDP/Request.h src/hed/libs/security/ArcPDP/RequestItem.h src/hed/libs/security/ArcPDP/Response.h src/hed/libs/security/ArcPDP/Result.h src/hed/libs/security/ArcPDP/Source.cpp src/hed/libs/security/ArcPDP/Source.h src/hed/libs/security/ArcPDP/alg/AlgFactory.h src/hed/libs/security/ArcPDP/alg/CombiningAlg.h src/hed/libs/security/ArcPDP/alg/DenyOverridesAlg.cpp src/hed/libs/security/ArcPDP/alg/DenyOverridesAlg.h src/hed/libs/security/ArcPDP/alg/OrderedAlg.cpp src/hed/libs/security/ArcPDP/alg/OrderedAlg.h src/hed/libs/security/ArcPDP/alg/PermitOverridesAlg.cpp src/hed/libs/security/ArcPDP/alg/PermitOverridesAlg.h src/hed/libs/security/ArcPDP/attr/AnyURIAttribute.cpp src/hed/libs/security/ArcPDP/attr/AnyURIAttribute.h src/hed/libs/security/ArcPDP/attr/AttributeFactory.h src/hed/libs/security/ArcPDP/attr/AttributeProxy.h src/hed/libs/security/ArcPDP/attr/AttributeValue.h src/hed/libs/security/ArcPDP/attr/BooleanAttribute.cpp src/hed/libs/security/ArcPDP/attr/BooleanAttribute.h src/hed/libs/security/ArcPDP/attr/DateTimeAttribute.cpp src/hed/libs/security/ArcPDP/attr/DateTimeAttribute.h src/hed/libs/security/ArcPDP/attr/GenericAttribute.cpp src/hed/libs/security/ArcPDP/attr/GenericAttribute.h src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp src/hed/libs/security/ArcPDP/attr/RequestAttribute.h src/hed/libs/security/ArcPDP/attr/StringAttribute.cpp src/hed/libs/security/ArcPDP/attr/StringAttribute.h src/hed/libs/security/ArcPDP/attr/X500NameAttribute.cpp src/hed/libs/security/ArcPDP/attr/X500NameAttribute.h src/hed/libs/security/ArcPDP/fn/EqualFunction.cpp src/hed/libs/security/ArcPDP/fn/EqualFunction.h src/hed/libs/security/ArcPDP/fn/FnFactory.h src/hed/libs/security/ArcPDP/fn/Function.h src/hed/libs/security/ArcPDP/fn/InRangeFunction.cpp src/hed/libs/security/ArcPDP/fn/InRangeFunction.h src/hed/libs/security/ArcPDP/fn/MatchFunction.cpp src/hed/libs/security/ArcPDP/fn/MatchFunction.h src/hed/libs/security/ArcPDP/policy/Policy.cpp src/hed/libs/security/ArcPDP/policy/Policy.h src/hed/libs/security/ClassLoader.cpp src/hed/libs/security/ClassLoader.h src/hed/libs/security/PDP.cpp src/hed/libs/security/PDP.h src/hed/libs/security/Security.cpp src/hed/libs/security/Security.h src/hed/libs/ws-addressing/WSA.cpp src/hed/libs/ws-addressing/WSA.h src/hed/libs/ws-addressing/test.cpp src/hed/libs/ws-security/SAMLToken.cpp src/hed/libs/ws-security/SAMLToken.h src/hed/libs/ws-security/UsernameToken.cpp src/hed/libs/ws-security/UsernameToken.h src/hed/libs/ws-security/X509Token.cpp src/hed/libs/ws-security/X509Token.h src/hed/libs/ws-security/test/SAMLTokenTest.cpp src/hed/libs/ws-security/test/UsernameTokenTest.cpp src/hed/libs/ws-security/test/X509TokenTest.cpp src/hed/libs/ws-security/test_samltoken.cpp src/hed/libs/ws-security/test_usernametoken.cpp src/hed/libs/ws-security/test_x509token.cpp src/hed/libs/wsrf/PayloadWSRF.cpp src/hed/libs/wsrf/PayloadWSRF.h src/hed/libs/wsrf/WSRF.cpp src/hed/libs/wsrf/WSRF.h src/hed/libs/wsrf/WSRFBaseFault.cpp src/hed/libs/wsrf/WSRFBaseFault.h src/hed/libs/wsrf/WSResourceProperties.cpp src/hed/libs/wsrf/WSResourceProperties.h src/hed/libs/wsrf/test.cpp src/hed/libs/xmlsec/XMLSecNode.cpp src/hed/libs/xmlsec/XMLSecNode.h src/hed/libs/xmlsec/XmlSecUtils.cpp src/hed/libs/xmlsec/XmlSecUtils.h src/hed/libs/xmlsec/saml_util.cpp src/hed/libs/xmlsec/saml_util.h src/hed/libs/xmlsec/test_xmlsecnode.cpp src/hed/mcc/http/MCCHTTP.cpp src/hed/mcc/http/MCCHTTP.h src/hed/mcc/http/PayloadHTTP.cpp src/hed/mcc/http/PayloadHTTP.h src/hed/mcc/http/http_test.cpp src/hed/mcc/http/http_test_withtls.cpp src/hed/mcc/msgvalidator/MCCMsgValidator.cpp src/hed/mcc/msgvalidator/MCCMsgValidator.h src/hed/mcc/soap/MCCSOAP.cpp src/hed/mcc/soap/MCCSOAP.h src/hed/mcc/tcp/MCCTCP.cpp src/hed/mcc/tcp/MCCTCP.h src/hed/mcc/tcp/PayloadTCPSocket.cpp src/hed/mcc/tcp/PayloadTCPSocket.h src/hed/mcc/tls/BIOGSIMCC.cpp src/hed/mcc/tls/BIOGSIMCC.h src/hed/mcc/tls/BIOMCC.cpp src/hed/mcc/tls/BIOMCC.h src/hed/mcc/tls/ConfigTLSMCC.cpp src/hed/mcc/tls/ConfigTLSMCC.h src/hed/mcc/tls/DelegationCollector.cpp src/hed/mcc/tls/DelegationCollector.h src/hed/mcc/tls/DelegationSecAttr.cpp src/hed/mcc/tls/DelegationSecAttr.h src/hed/mcc/tls/GlobusSigningPolicy.cpp src/hed/mcc/tls/GlobusSigningPolicy.h src/hed/mcc/tls/MCCTLS.cpp src/hed/mcc/tls/MCCTLS.h src/hed/mcc/tls/PayloadTLSMCC.cpp src/hed/mcc/tls/PayloadTLSMCC.h src/hed/mcc/tls/PayloadTLSStream.cpp src/hed/mcc/tls/PayloadTLSStream.h src/hed/shc/SecHandlerPlugin.cpp src/hed/shc/allowpdp/AllowPDP.cpp src/hed/shc/allowpdp/AllowPDP.h src/hed/shc/arcauthzsh/ArcAuthZ.cpp src/hed/shc/arcauthzsh/ArcAuthZ.h src/hed/shc/arcpdp/ArcAlgFactory.cpp src/hed/shc/arcpdp/ArcAlgFactory.h src/hed/shc/arcpdp/ArcAttributeFactory.cpp src/hed/shc/arcpdp/ArcAttributeFactory.h src/hed/shc/arcpdp/ArcAttributeProxy.h src/hed/shc/arcpdp/ArcEvaluationCtx.cpp src/hed/shc/arcpdp/ArcEvaluationCtx.h src/hed/shc/arcpdp/ArcEvaluator.cpp src/hed/shc/arcpdp/ArcEvaluator.h src/hed/shc/arcpdp/ArcFnFactory.cpp src/hed/shc/arcpdp/ArcFnFactory.h src/hed/shc/arcpdp/ArcPDP.cpp src/hed/shc/arcpdp/ArcPDP.h src/hed/shc/arcpdp/ArcPolicy.cpp src/hed/shc/arcpdp/ArcPolicy.h src/hed/shc/arcpdp/ArcRequest.cpp src/hed/shc/arcpdp/ArcRequest.h src/hed/shc/arcpdp/ArcRequestItem.cpp src/hed/shc/arcpdp/ArcRequestItem.h src/hed/shc/arcpdp/ArcRule.cpp src/hed/shc/arcpdp/ArcRule.h src/hed/shc/classload_test.cpp src/hed/shc/delegationpdp/DelegationPDP.cpp src/hed/shc/delegationpdp/DelegationPDP.h src/hed/shc/delegationsh/DelegationSH.cpp src/hed/shc/delegationsh/DelegationSH.h src/hed/shc/denypdp/DenyPDP.cpp src/hed/shc/denypdp/DenyPDP.h src/hed/shc/gaclpdp/GACLEvaluator.cpp src/hed/shc/gaclpdp/GACLEvaluator.h src/hed/shc/gaclpdp/GACLPDP.cpp src/hed/shc/gaclpdp/GACLPDP.h src/hed/shc/gaclpdp/GACLPolicy.cpp src/hed/shc/gaclpdp/GACLPolicy.h src/hed/shc/gaclpdp/GACLRequest.cpp src/hed/shc/gaclpdp/GACLRequest.h src/hed/shc/legacy/ConfigParser.cpp src/hed/shc/legacy/ConfigParser.h src/hed/shc/legacy/LegacyMap.cpp src/hed/shc/legacy/LegacyMap.h src/hed/shc/legacy/LegacyPDP.cpp src/hed/shc/legacy/LegacyPDP.h src/hed/shc/legacy/LegacySecAttr.cpp src/hed/shc/legacy/LegacySecAttr.h src/hed/shc/legacy/LegacySecHandler.cpp src/hed/shc/legacy/LegacySecHandler.h src/hed/shc/legacy/arc_lcas.cpp src/hed/shc/legacy/arc_lcmaps.cpp src/hed/shc/legacy/auth.cpp src/hed/shc/legacy/auth.h src/hed/shc/legacy/auth_file.cpp src/hed/shc/legacy/auth_lcas.cpp src/hed/shc/legacy/auth_ldap.cpp src/hed/shc/legacy/auth_plugin.cpp src/hed/shc/legacy/auth_subject.cpp src/hed/shc/legacy/auth_voms.cpp src/hed/shc/legacy/cert_util.cpp src/hed/shc/legacy/cert_util.h src/hed/shc/legacy/plugin.cpp src/hed/shc/legacy/simplemap.cpp src/hed/shc/legacy/simplemap.h src/hed/shc/legacy/unixmap.cpp src/hed/shc/legacy/unixmap.h src/hed/shc/legacy/unixmap_lcmaps.cpp src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.h src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.h src/hed/shc/samltokensh/SAMLTokenSH.cpp src/hed/shc/samltokensh/SAMLTokenSH.h src/hed/shc/simplelistpdp/SimpleListPDP.cpp src/hed/shc/simplelistpdp/SimpleListPDP.h src/hed/shc/test.cpp src/hed/shc/testinterface_arc.cpp src/hed/shc/testinterface_xacml.cpp src/hed/shc/usernametokensh/UsernameTokenSH.cpp src/hed/shc/usernametokensh/UsernameTokenSH.h src/hed/shc/x509tokensh/X509TokenSH.cpp src/hed/shc/x509tokensh/X509TokenSH.h src/hed/shc/xacmlpdp/AttributeDesignator.cpp src/hed/shc/xacmlpdp/AttributeDesignator.h src/hed/shc/xacmlpdp/AttributeSelector.cpp src/hed/shc/xacmlpdp/AttributeSelector.h src/hed/shc/xacmlpdp/XACMLAlgFactory.cpp src/hed/shc/xacmlpdp/XACMLAlgFactory.h src/hed/shc/xacmlpdp/XACMLApply.cpp src/hed/shc/xacmlpdp/XACMLApply.h src/hed/shc/xacmlpdp/XACMLAttributeFactory.cpp src/hed/shc/xacmlpdp/XACMLAttributeFactory.h src/hed/shc/xacmlpdp/XACMLAttributeProxy.h src/hed/shc/xacmlpdp/XACMLCondition.cpp src/hed/shc/xacmlpdp/XACMLCondition.h src/hed/shc/xacmlpdp/XACMLEvaluationCtx.cpp src/hed/shc/xacmlpdp/XACMLEvaluationCtx.h src/hed/shc/xacmlpdp/XACMLEvaluator.cpp src/hed/shc/xacmlpdp/XACMLEvaluator.h src/hed/shc/xacmlpdp/XACMLFnFactory.cpp src/hed/shc/xacmlpdp/XACMLFnFactory.h src/hed/shc/xacmlpdp/XACMLPDP.cpp src/hed/shc/xacmlpdp/XACMLPDP.h src/hed/shc/xacmlpdp/XACMLPolicy.cpp src/hed/shc/xacmlpdp/XACMLPolicy.h src/hed/shc/xacmlpdp/XACMLRequest.cpp src/hed/shc/xacmlpdp/XACMLRequest.h src/hed/shc/xacmlpdp/XACMLRule.cpp src/hed/shc/xacmlpdp/XACMLRule.h src/hed/shc/xacmlpdp/XACMLTarget.cpp src/hed/shc/xacmlpdp/XACMLTarget.h src/hed/test.cpp src/libs/data-staging/DTR.cpp src/libs/data-staging/DTR.h src/libs/data-staging/DTRList.cpp src/libs/data-staging/DTRList.h src/libs/data-staging/DTRStatus.cpp src/libs/data-staging/DTRStatus.h src/libs/data-staging/DataDelivery.cpp src/libs/data-staging/DataDelivery.h src/libs/data-staging/DataDeliveryComm.cpp src/libs/data-staging/DataDeliveryComm.h src/libs/data-staging/DataDeliveryLocalComm.cpp src/libs/data-staging/DataDeliveryLocalComm.h src/libs/data-staging/DataDeliveryRemoteComm.cpp src/libs/data-staging/DataDeliveryRemoteComm.h src/libs/data-staging/DataStagingDelivery.cpp src/libs/data-staging/Processor.cpp src/libs/data-staging/Processor.h src/libs/data-staging/Scheduler.cpp src/libs/data-staging/Scheduler.h src/libs/data-staging/TransferShares.cpp src/libs/data-staging/TransferShares.h src/libs/data-staging/examples/Generator.cpp src/libs/data-staging/examples/Generator.h src/libs/data-staging/examples/generator-main.cpp src/libs/data-staging/test/DTRMemTest.cpp src/libs/data-staging/test/DTRTest.cpp src/libs/data-staging/test/DeliveryTest.cpp src/libs/data-staging/test/ProcessorTest.cpp src/services/a-rex/FileChunks.cpp src/services/a-rex/FileChunks.h src/services/a-rex/PayloadFile.cpp src/services/a-rex/PayloadFile.h src/services/a-rex/arex.cpp src/services/a-rex/arex.h src/services/a-rex/cachecheck.cpp src/services/a-rex/change_activity_status.cpp src/services/a-rex/create_activity.cpp src/services/a-rex/delegation/DelegationStore.cpp src/services/a-rex/delegation/DelegationStore.h src/services/a-rex/delegation/DelegationStores.cpp src/services/a-rex/delegation/DelegationStores.h src/services/a-rex/delegation/FileRecord.cpp src/services/a-rex/delegation/FileRecord.h src/services/a-rex/delegation/FileRecordBDB.cpp src/services/a-rex/delegation/FileRecordBDB.h src/services/a-rex/delegation/FileRecordSQLite.cpp src/services/a-rex/delegation/FileRecordSQLite.h src/services/a-rex/delegation/uid.cpp src/services/a-rex/delegation/uid.h src/services/a-rex/faults.cpp src/services/a-rex/get.cpp src/services/a-rex/get_activity_documents.cpp src/services/a-rex/get_activity_statuses.cpp src/services/a-rex/get_factory_attributes_document.cpp src/services/a-rex/grid-manager/GridManager.cpp src/services/a-rex/grid-manager/GridManager.h src/services/a-rex/grid-manager/arc_blahp_logger.cpp src/services/a-rex/grid-manager/arc_vomsac_check.cpp src/services/a-rex/grid-manager/conf/CacheConfig.cpp src/services/a-rex/grid-manager/conf/CacheConfig.h src/services/a-rex/grid-manager/conf/CoreConfig.cpp src/services/a-rex/grid-manager/conf/CoreConfig.h src/services/a-rex/grid-manager/conf/GMConfig.cpp src/services/a-rex/grid-manager/conf/GMConfig.h src/services/a-rex/grid-manager/conf/StagingConfig.cpp src/services/a-rex/grid-manager/conf/StagingConfig.h src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp src/services/a-rex/grid-manager/conf/UrlMapConfig.h src/services/a-rex/grid-manager/files/ControlFileContent.cpp src/services/a-rex/grid-manager/files/ControlFileContent.h src/services/a-rex/grid-manager/files/ControlFileHandling.cpp src/services/a-rex/grid-manager/files/ControlFileHandling.h src/services/a-rex/grid-manager/files/Delete.cpp src/services/a-rex/grid-manager/files/Delete.h src/services/a-rex/grid-manager/files/JobLogFile.cpp src/services/a-rex/grid-manager/files/JobLogFile.h src/services/a-rex/grid-manager/gm_delegations_converter.cpp src/services/a-rex/grid-manager/gm_jobs.cpp src/services/a-rex/grid-manager/gm_kick.cpp src/services/a-rex/grid-manager/inputcheck.cpp src/services/a-rex/grid-manager/jobplugin/init.cpp src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp src/services/a-rex/grid-manager/jobplugin/jobplugin.h src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp src/services/a-rex/grid-manager/jobs/CommFIFO.cpp src/services/a-rex/grid-manager/jobs/CommFIFO.h src/services/a-rex/grid-manager/jobs/ContinuationPlugins.cpp src/services/a-rex/grid-manager/jobs/ContinuationPlugins.h src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp src/services/a-rex/grid-manager/jobs/DTRGenerator.h src/services/a-rex/grid-manager/jobs/GMJob.cpp src/services/a-rex/grid-manager/jobs/GMJob.h src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.h src/services/a-rex/grid-manager/jobs/JobsList.cpp src/services/a-rex/grid-manager/jobs/JobsList.h src/services/a-rex/grid-manager/loaders/downloader.cpp src/services/a-rex/grid-manager/loaders/uploader.cpp src/services/a-rex/grid-manager/log/JobLog.cpp src/services/a-rex/grid-manager/log/JobLog.h src/services/a-rex/grid-manager/log/JobsMetrics.cpp src/services/a-rex/grid-manager/log/JobsMetrics.h src/services/a-rex/grid-manager/mail/send_mail.cpp src/services/a-rex/grid-manager/mail/send_mail.h src/services/a-rex/grid-manager/misc/proxy.cpp src/services/a-rex/grid-manager/misc/proxy.h src/services/a-rex/grid-manager/run/RunParallel.cpp src/services/a-rex/grid-manager/run/RunParallel.h src/services/a-rex/grid-manager/run/RunPlugin.cpp src/services/a-rex/grid-manager/run/RunPlugin.h src/services/a-rex/grid-manager/run/RunRedirected.cpp src/services/a-rex/grid-manager/run/RunRedirected.h src/services/a-rex/information_collector.cpp src/services/a-rex/job.cpp src/services/a-rex/job.h src/services/a-rex/jura/ApelDestination.cpp src/services/a-rex/jura/ApelDestination.h src/services/a-rex/jura/CARAggregation.cpp src/services/a-rex/jura/CARAggregation.h src/services/a-rex/jura/CARDestination.cpp src/services/a-rex/jura/CARDestination.h src/services/a-rex/jura/Destination.cpp src/services/a-rex/jura/Destination.h src/services/a-rex/jura/Destinations.cpp src/services/a-rex/jura/Destinations.h src/services/a-rex/jura/JobLogFile.cpp src/services/a-rex/jura/JobLogFile.h src/services/a-rex/jura/LutsDestination.cpp src/services/a-rex/jura/LutsDestination.h src/services/a-rex/jura/ReReporter.cpp src/services/a-rex/jura/ReReporter.h src/services/a-rex/jura/Reporter.h src/services/a-rex/jura/UsageReporter.cpp src/services/a-rex/jura/UsageReporter.h src/services/a-rex/jura/jura.cpp src/services/a-rex/jura/jura.h src/services/a-rex/jura/ssm/__init__.py src/services/a-rex/jura/ssm/brokers.py src/services/a-rex/jura/ssm/crypto.py src/services/a-rex/jura/ssm/ssm2.py src/services/a-rex/ldif/LDIFtoXML.cpp src/services/a-rex/ldif/LDIFtoXML.h src/services/a-rex/ldif/main.cpp src/services/a-rex/ldif/test.cpp src/services/a-rex/lrms/dgbridge/DGAuthplug.py src/services/a-rex/lrms/dgbridge/DGBridgeDataPlugin.py src/services/a-rex/lrms/dgbridge/DGLog2XML.py src/services/a-rex/migrate_activity.cpp src/services/a-rex/put.cpp src/services/a-rex/terminate_activities.cpp src/services/a-rex/test.cpp src/services/a-rex/test_cache_check.cpp src/services/a-rex/tools.cpp src/services/a-rex/tools.h src/services/a-rex/update_credentials.cpp src/services/acix/__init__.py src/services/acix/cacheserver/__init__.py src/services/acix/cacheserver/cache.py src/services/acix/cacheserver/cacheresource.py src/services/acix/cacheserver/cachesetup.py src/services/acix/cacheserver/pscan.py src/services/acix/cacheserver/test/test_cacheresource.py src/services/acix/cacheserver/test/test_scan.py src/services/acix/core/__init__.py src/services/acix/core/bitvector.py src/services/acix/core/bloomfilter.py src/services/acix/core/cacheclient.py src/services/acix/core/hashes.py src/services/acix/core/indexclient.py src/services/acix/core/ssl.py src/services/acix/core/test/test_bloomfilter.py src/services/acix/indexserver/__init__.py src/services/acix/indexserver/index.py src/services/acix/indexserver/indexresource.py src/services/acix/indexserver/indexsetup.py src/services/acix/indexserver/test/test_system.py src/services/cache_service/CacheService.cpp src/services/cache_service/CacheService.h src/services/cache_service/CacheServiceGenerator.cpp src/services/cache_service/CacheServiceGenerator.h src/services/data-staging/DataDeliveryService.cpp src/services/data-staging/DataDeliveryService.h src/services/examples/echo_python/EchoService.py src/services/examples/echo_python/__init__.py src/services/examples/echo_python/test.cpp src/services/gridftpd/auth/auth.cpp src/services/gridftpd/auth/auth.h src/services/gridftpd/auth/auth_file.cpp src/services/gridftpd/auth/auth_lcas.cpp src/services/gridftpd/auth/auth_ldap.cpp src/services/gridftpd/auth/auth_plugin.cpp src/services/gridftpd/auth/auth_subject.cpp src/services/gridftpd/auth/auth_voms.cpp src/services/gridftpd/auth/identity.cpp src/services/gridftpd/auth/identity.h src/services/gridftpd/auth/identity_dn.cpp src/services/gridftpd/auth/identity_dn.h src/services/gridftpd/auth/identity_voms.cpp src/services/gridftpd/auth/identity_voms.h src/services/gridftpd/auth/object_access.cpp src/services/gridftpd/auth/object_access.h src/services/gridftpd/auth/permission.cpp src/services/gridftpd/auth/permission.h src/services/gridftpd/auth/simplemap.cpp src/services/gridftpd/auth/simplemap.h src/services/gridftpd/auth/unixmap.cpp src/services/gridftpd/auth/unixmap.h src/services/gridftpd/auth/unixmap_lcmaps.cpp src/services/gridftpd/commands.cpp src/services/gridftpd/commands.h src/services/gridftpd/conf.h src/services/gridftpd/conf/conf_vo.cpp src/services/gridftpd/conf/conf_vo.h src/services/gridftpd/conf/daemon.cpp src/services/gridftpd/conf/daemon.h src/services/gridftpd/conf/environment.cpp src/services/gridftpd/conf/environment.h src/services/gridftpd/conf/gridmap.cpp src/services/gridftpd/conf/gridmap.h src/services/gridftpd/config.cpp src/services/gridftpd/datalist.cpp src/services/gridftpd/dataread.cpp src/services/gridftpd/datawrite.cpp src/services/gridftpd/fileplugin/fileplugin.cpp src/services/gridftpd/fileplugin/fileplugin.h src/services/gridftpd/fileplugin/init.cpp src/services/gridftpd/fileroot.cpp src/services/gridftpd/fileroot.h src/services/gridftpd/fileroot_config.cpp src/services/gridftpd/listener.cpp src/services/gridftpd/misc.cpp src/services/gridftpd/misc.h src/services/gridftpd/misc/ldapquery.cpp src/services/gridftpd/misc/ldapquery.h src/services/gridftpd/misc/proxy.cpp src/services/gridftpd/misc/proxy.h src/services/gridftpd/names.cpp src/services/gridftpd/names.h src/services/gridftpd/run/run_plugin.cpp src/services/gridftpd/run/run_plugin.h src/services/gridftpd/userspec.cpp src/services/gridftpd/userspec.h src/services/ldap-infosys/giis/Entry.cpp src/services/ldap-infosys/giis/Entry.h src/services/ldap-infosys/giis/Index.cpp src/services/ldap-infosys/giis/Index.h src/services/ldap-infosys/giis/Policy.cpp src/services/ldap-infosys/giis/Policy.h src/services/ldap-infosys/giis/Server.cpp src/services/ldap-infosys/giis/Server.h src/services/ldap-infosys/giis/SlapdWrapper.cpp src/services/ldap-infosys/giis/main.cpp src/services/wrappers/java/javawrapper.cpp src/services/wrappers/java/javawrapper.h src/services/wrappers/python/pythonwrapper.cpp src/services/wrappers/python/pythonwrapper.h src/tests/arcpolicy/arcpolicy.cpp src/tests/client/test_ClientInterface.cpp src/tests/client/test_ClientSAML2SSO.cpp src/tests/client/test_ClientX509Delegation_ARC.cpp src/tests/client/test_ClientX509Delegation_GridSite.cpp src/tests/count/count.cpp src/tests/count/count.h src/tests/count/test_client.cpp src/tests/count/test_service.cpp src/tests/delegation/test_client_with_delegation_sechandler.cpp src/tests/delegation/test_delegation_client.cpp src/tests/echo/echo.cpp src/tests/echo/echo.h src/tests/echo/echo_client.py src/tests/echo/echo_test4axis2c/test_client.cpp src/tests/echo/perfengine.py src/tests/echo/perftest.cpp src/tests/echo/test.cpp src/tests/echo/test_client.cpp src/tests/echo/test_clientinterface.cpp src/tests/echo/test_clientinterface.py src/tests/echo/test_service.cpp src/tests/perf/perftest.cpp src/tests/perf/perftest_cmd_duration.cpp src/tests/perf/perftest_cmd_times.cpp src/tests/perf/perftest_deleg_bydelegclient.cpp src/tests/perf/perftest_deleg_bysechandler.cpp src/tests/perf/perftest_msgsize.cpp src/tests/perf/perftest_saml2sso.cpp src/tests/perf/perftest_samlaa.cpp src/tests/perf/perftest_slcs.cpp src/tests/policy-delegation/test.cpp src/tests/translator/translator.cpp src/tests/unit/ClientsTest.cpp src/tests/unit/ClientsTest.h src/tests/unit/Test.cpp src/tests/xpath/prepare.py src/tests/xpath/query.cpp src/utils/hed/arcplugin.cpp src/utils/hed/common.cpp src/utils/hed/complextype.cpp src/utils/hed/schemaconv.cpp src/utils/hed/schemaconv.h src/utils/hed/simpletype.cpp src/utils/hed/wsdl2hed.cpp nordugrid-arc-5.4.2/po/PaxHeaders.7502/Rules-quot0000644000000000000000000000013213214315702017613 xustar000000000000000030 mtime=1513200578.352748908 30 atime=1513200658.495729088 30 ctime=1513200668.593852592 nordugrid-arc-5.4.2/po/Rules-quot0000644000175000002070000000323113214315702017660 0ustar00mockbuildmock00000000000000# Special Makefile rules for English message catalogs with quotation marks. DISTFILES.common.extra1 = quot.sed boldquot.sed en@quot.header en@boldquot.header insert-header.sin Rules-quot .SUFFIXES: .insert-header .po-update-en en@quot.po-update: en@quot.po-update-en en@boldquot.po-update: en@boldquot.po-update-en .insert-header.po-update-en: @lang=`echo $@ | sed -e 's/\.po-update-en$$//'`; \ if test "$(PACKAGE)" = "gettext"; then PATH=`pwd`/../src:$$PATH; GETTEXTLIBDIR=`cd $(top_srcdir)/src && pwd`; export GETTEXTLIBDIR; fi; \ tmpdir=`pwd`; \ echo "$$lang:"; \ ll=`echo $$lang | sed -e 's/@.*//'`; \ LC_ALL=C; export LC_ALL; \ cd $(srcdir); \ if $(MSGINIT) -i $(DOMAIN).pot --no-translator -l $$ll -o - 2>/dev/null | sed -f $$tmpdir/$$lang.insert-header | $(MSGCONV) -t UTF-8 | $(MSGFILTER) sed -f `echo $$lang | sed -e 's/.*@//'`.sed 2>/dev/null > $$tmpdir/$$lang.new.po; then \ if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \ rm -f $$tmpdir/$$lang.new.po; \ else \ if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \ :; \ else \ echo "creation of $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \ exit 1; \ fi; \ fi; \ else \ echo "creation of $$lang.po failed!" 1>&2; \ rm -f $$tmpdir/$$lang.new.po; \ fi en@quot.insert-header: insert-header.sin sed -e '/^#/d' -e 's/HEADER/en@quot.header/g' $(srcdir)/insert-header.sin > en@quot.insert-header en@boldquot.insert-header: insert-header.sin sed -e '/^#/d' -e 's/HEADER/en@boldquot.header/g' $(srcdir)/insert-header.sin > en@boldquot.insert-header mostlyclean: mostlyclean-quot mostlyclean-quot: rm -f *.insert-header nordugrid-arc-5.4.2/po/PaxHeaders.7502/Makefile.in.in0000644000000000000000000000013213214315702020262 xustar000000000000000030 mtime=1513200578.333748676 30 atime=1513200652.884660462 30 ctime=1513200668.587852519 nordugrid-arc-5.4.2/po/Makefile.in.in0000644000175000002070000002744113214315702020340 0ustar00mockbuildmock00000000000000# Makefile for PO directory in any package using GNU gettext. # Copyright (C) 1995-1997, 2000-2003 by Ulrich Drepper # # This file can be copied and used freely without restrictions. It can # be used in projects which are not available under the GNU General Public # License but which still want to provide support for the GNU gettext # functionality. # Please note that the actual code of GNU gettext is covered by the GNU # General Public License and is *not* in the public domain. PACKAGE = @PACKAGE@ VERSION = @VERSION@ SHELL = /bin/sh @SET_MAKE@ srcdir = @srcdir@ top_srcdir = @top_srcdir@ VPATH = @srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ datadir = @datadir@ localedir = $(datadir)/locale gettextsrcdir = $(datadir)/gettext/po INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ MKINSTALLDIRS = @MKINSTALLDIRS@ mkinstalldirs = $(SHELL) $(MKINSTALLDIRS) GMSGFMT = @GMSGFMT@ MSGFMT = @MSGFMT@ XGETTEXT = @XGETTEXT@ MSGMERGE = msgmerge MSGMERGE_UPDATE = @MSGMERGE@ --update MSGINIT = msginit MSGCONV = msgconv MSGFILTER = msgfilter POFILES = @POFILES@ GMOFILES = @GMOFILES@ UPDATEPOFILES = @UPDATEPOFILES@ DUMMYPOFILES = @DUMMYPOFILES@ DISTFILES.common = Makefile.in.in remove-potcdate.sin \ $(DISTFILES.common.extra1) $(DISTFILES.common.extra2) $(DISTFILES.common.extra3) DISTFILES = $(DISTFILES.common) Makevars POTFILES.in $(DOMAIN).pot stamp-po \ $(POFILES) $(GMOFILES) \ $(DISTFILES.extra1) $(DISTFILES.extra2) $(DISTFILES.extra3) POTFILES = \ CATALOGS = @CATALOGS@ # Makevars gets inserted here. (Don't remove this line!) .SUFFIXES: .SUFFIXES: .po .gmo .mo .sed .sin .nop .po-update .po.mo: @echo "$(MSGFMT) -c -o $@ $<"; \ $(MSGFMT) -c -o t-$@ $< && mv t-$@ $@ .po.gmo: @lang=`echo $* | sed -e 's,.*/,,'`; \ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ echo "$${cdcmd}rm -f $${lang}.gmo && $(GMSGFMT) -c --statistics -o $${lang}.gmo $${lang}.po"; \ cd $(srcdir) && rm -f $${lang}.gmo && $(GMSGFMT) -c --statistics -o t-$${lang}.gmo $${lang}.po && mv t-$${lang}.gmo $${lang}.gmo .sin.sed: sed -e '/^#/d' $< > t-$@ mv t-$@ $@ all: all-@USE_NLS@ all-yes: stamp-po all-no: # stamp-po is a timestamp denoting the last time at which the CATALOGS have # been loosely updated. Its purpose is that when a developer or translator # checks out the package via CVS, and the $(DOMAIN).pot file is not in CVS, # "make" will update the $(DOMAIN).pot and the $(CATALOGS), but subsequent # invocations of "make" will do nothing. This timestamp would not be necessary # if updating the $(CATALOGS) would always touch them; however, the rule for # $(POFILES) has been designed to not touch files that don't need to be # changed. stamp-po: $(srcdir)/$(DOMAIN).pot test -z "$(CATALOGS)" || $(MAKE) $(CATALOGS) @echo "touch stamp-po" @echo timestamp > stamp-poT @mv stamp-poT stamp-po # Note: Target 'all' must not depend on target '$(DOMAIN).pot-update', # otherwise packages like GCC can not be built if only parts of the source # have been downloaded. # This target rebuilds $(DOMAIN).pot; it is an expensive operation. # Note that $(DOMAIN).pot is not touched if it doesn't need to be changed. $(DOMAIN).pot-update: $(POTFILES) $(srcdir)/POTFILES.in remove-potcdate.sed $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \ --add-comments=TRANSLATORS: $(XGETTEXT_OPTIONS) \ --files-from=$(srcdir)/POTFILES.in \ --copyright-holder='$(COPYRIGHT_HOLDER)' \ --msgid-bugs-address='$(MSGID_BUGS_ADDRESS)' test ! -f $(DOMAIN).po || { \ if test -f $(srcdir)/$(DOMAIN).pot; then \ sed -f remove-potcdate.sed < $(srcdir)/$(DOMAIN).pot > $(DOMAIN).1po && \ sed -f remove-potcdate.sed < $(DOMAIN).po > $(DOMAIN).2po && \ if cmp $(DOMAIN).1po $(DOMAIN).2po >/dev/null 2>&1; then \ rm -f $(DOMAIN).1po $(DOMAIN).2po $(DOMAIN).po; \ else \ rm -f $(DOMAIN).1po $(DOMAIN).2po $(srcdir)/$(DOMAIN).pot && \ mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ fi; \ else \ mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ fi; \ } # This rule has no dependencies: we don't need to update $(DOMAIN).pot at # every "make" invocation, only create it when it is missing. # Only "make $(DOMAIN).pot-update" or "make dist" will force an update. $(srcdir)/$(DOMAIN).pot: $(MAKE) $(DOMAIN).pot-update # This target rebuilds a PO file if $(DOMAIN).pot has changed. # Note that a PO file is not touched if it doesn't need to be changed. $(POFILES): $(srcdir)/$(DOMAIN).pot @lang=`echo $@ | sed -e 's,.*/,,' -e 's/\.po$$//'`; \ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ echo "$${cdcmd}$(MSGMERGE_UPDATE) $${lang}.po $(DOMAIN).pot"; \ cd $(srcdir) && $(MSGMERGE_UPDATE) $${lang}.po $(DOMAIN).pot install: install-exec install-data install-exec: install-data: install-data-@USE_NLS@ if test "$(PACKAGE)" = "gettext-tools"; then \ $(mkinstalldirs) $(DESTDIR)$(gettextsrcdir); \ for file in $(DISTFILES.common) Makevars.template; do \ $(INSTALL_DATA) $(srcdir)/$$file \ $(DESTDIR)$(gettextsrcdir)/$$file; \ done; \ for file in Makevars; do \ rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ done; \ else \ : ; \ fi install-data-no: all install-data-yes: all $(mkinstalldirs) $(DESTDIR)$(datadir) @catalogs='$(CATALOGS)'; \ for cat in $$catalogs; do \ cat=`basename $$cat`; \ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ dir=$(localedir)/$$lang/LC_MESSAGES; \ $(mkinstalldirs) $(DESTDIR)$$dir; \ if test -r $$cat; then realcat=$$cat; else realcat=$(srcdir)/$$cat; fi; \ $(INSTALL_DATA) $$realcat $(DESTDIR)$$dir/$(DOMAIN).mo; \ echo "installing $$realcat as $(DESTDIR)$$dir/$(DOMAIN).mo"; \ for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ if test -n "$$lc"; then \ if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ for file in *; do \ if test -f $$file; then \ ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ fi; \ done); \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ else \ if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ :; \ else \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ fi; \ fi; \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ ln -s ../LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ ln $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ cp -p $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ echo "installing $$realcat link as $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo"; \ fi; \ done; \ done install-strip: install installdirs: installdirs-exec installdirs-data installdirs-exec: installdirs-data: installdirs-data-@USE_NLS@ if test "$(PACKAGE)" = "gettext-tools"; then \ $(mkinstalldirs) $(DESTDIR)$(gettextsrcdir); \ else \ : ; \ fi installdirs-data-no: installdirs-data-yes: $(mkinstalldirs) $(DESTDIR)$(datadir) @catalogs='$(CATALOGS)'; \ for cat in $$catalogs; do \ cat=`basename $$cat`; \ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ dir=$(localedir)/$$lang/LC_MESSAGES; \ $(mkinstalldirs) $(DESTDIR)$$dir; \ for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ if test -n "$$lc"; then \ if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ for file in *; do \ if test -f $$file; then \ ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ fi; \ done); \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ else \ if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ :; \ else \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ fi; \ fi; \ fi; \ done; \ done # Define this as empty until I found a useful application. installcheck: uninstall: uninstall-exec uninstall-data uninstall-exec: uninstall-data: uninstall-data-@USE_NLS@ if test "$(PACKAGE)" = "gettext-tools"; then \ for file in $(DISTFILES.common) Makevars.template; do \ rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ done; \ else \ : ; \ fi uninstall-data-no: uninstall-data-yes: catalogs='$(CATALOGS)'; \ for cat in $$catalogs; do \ cat=`basename $$cat`; \ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ for lc in LC_MESSAGES $(EXTRA_LOCALE_CATEGORIES); do \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ done; \ done check: all info dvi ps pdf html tags TAGS ctags CTAGS ID: mostlyclean: rm -f remove-potcdate.sed rm -f stamp-poT rm -f core core.* $(DOMAIN).po $(DOMAIN).1po $(DOMAIN).2po *.new.po rm -fr *.o clean: mostlyclean distclean: clean rm -f Makefile Makefile.in POTFILES *.mo maintainer-clean: distclean @echo "This command is intended for maintainers to use;" @echo "it deletes files that may require special tools to rebuild." rm -f stamp-po $(GMOFILES) distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir) dist distdir: $(MAKE) update-po @$(MAKE) dist2 # This is a separate target because 'update-po' must be executed before. dist2: $(DISTFILES) dists="$(DISTFILES)"; \ if test "$(PACKAGE)" = "gettext-tools"; then \ dists="$$dists Makevars.template"; \ fi; \ if test -f $(srcdir)/ChangeLog; then \ dists="$$dists ChangeLog"; \ fi; \ for i in 0 1 2 3 4 5 6 7 8 9; do \ if test -f $(srcdir)/ChangeLog.$$i; then \ dists="$$dists ChangeLog.$$i"; \ fi; \ done; \ if test -f $(srcdir)/LINGUAS; then dists="$$dists LINGUAS"; fi; \ for file in $$dists; do \ if test -f $$file; then \ cp -p $$file $(distdir); \ else \ cp -p $(srcdir)/$$file $(distdir); \ fi; \ done update-po: Makefile $(MAKE) $(DOMAIN).pot-update test -z "$(UPDATEPOFILES)" || $(MAKE) $(UPDATEPOFILES) $(MAKE) update-gmo # General rule for updating PO files. .nop.po-update: @lang=`echo $@ | sed -e 's/\.po-update$$//'`; \ if test "$(PACKAGE)" = "gettext-tools"; then PATH=`pwd`/../src:$$PATH; fi; \ tmpdir=`pwd`; \ echo "$$lang:"; \ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ echo "$${cdcmd}$(MSGMERGE) $$lang.po $(DOMAIN).pot -o $$lang.new.po"; \ cd $(srcdir); \ if $(MSGMERGE) $$lang.po $(DOMAIN).pot -o $$tmpdir/$$lang.new.po; then \ if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \ rm -f $$tmpdir/$$lang.new.po; \ else \ if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \ :; \ else \ echo "msgmerge for $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \ exit 1; \ fi; \ fi; \ else \ echo "msgmerge for $$lang.po failed!" 1>&2; \ rm -f $$tmpdir/$$lang.new.po; \ fi $(DUMMYPOFILES): update-gmo: Makefile $(GMOFILES) @: Makefile: Makefile.in.in $(top_builddir)/config.status @POMAKEFILEDEPS@ cd $(top_builddir) \ && CONFIG_FILES=$(subdir)/$@.in CONFIG_HEADERS= \ $(SHELL) ./config.status force: # Tell versions [3.59,3.63) of GNU make not to export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/po/PaxHeaders.7502/sv.po0000644000000000000000000000013113214316033016575 xustar000000000000000030 mtime=1513200667.234835971 30 atime=1513200668.397850195 29 ctime=1513200668.60185269 nordugrid-arc-5.4.2/po/sv.po0000644000175000002070000272613313214316033016662 0ustar00mockbuildmock00000000000000# Translation file for the Advanced Resource Connector (Arc) msgid "" msgstr "" "Project-Id-Version: Arc\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2017-12-13 22:31+0100\n" "PO-Revision-Date: 2009-06-23 15:28+0200\n" "Last-Translator: name \n" "Language-Team: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Language: \n" "Plural-Forms: nplurals=2; plural=n != 1;\n" #: src/clients/compute/arccat.cpp:35 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresub.cpp:35 #: src/clients/compute/arcresume.cpp:32 src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "[jobb ...]" #: src/clients/compute/arccat.cpp:36 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" "arccat-kommandot utför cat-kommandot på jobbets stdout, stderr eller " "gridmanager-fellogg." #: src/clients/compute/arccat.cpp:43 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresub.cpp:40 src/clients/compute/arcresume.cpp:37 #: src/clients/compute/arcstat.cpp:42 src/clients/compute/arcsub.cpp:54 #: src/clients/compute/arcsync.cpp:147 src/clients/compute/arctest.cpp:62 #: src/clients/credentials/arcproxy.cpp:475 #: src/clients/credentials/arcproxyalt.cpp:461 src/clients/data/arccp.cpp:641 #: src/clients/data/arcls.cpp:346 src/clients/data/arcmkdir.cpp:125 #: src/clients/data/arcrename.cpp:136 src/clients/data/arcrm.cpp:151 #: src/clients/echo/arcecho.cpp:61 src/clients/saml/saml_assertion_init.cpp:62 #: src/clients/wsrf/arcwsrf.cpp:74 src/hed/daemon/unix/main_unix.cpp:346 #: src/hed/daemon/win32/main_win32.cpp:148 #: src/services/a-rex/jura/jura.cpp:109 #, c-format msgid "%s version %s" msgstr "%s version %s" #: src/clients/compute/arccat.cpp:52 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresub.cpp:49 src/clients/compute/arcresume.cpp:46 #: src/clients/compute/arcstat.cpp:51 src/clients/compute/arcsub.cpp:63 #: src/clients/compute/arcsync.cpp:156 src/clients/compute/arctest.cpp:84 #: src/clients/credentials/arcproxy.cpp:483 #: src/clients/credentials/arcproxyalt.cpp:469 src/clients/data/arccp.cpp:648 #: src/clients/data/arcls.cpp:354 src/clients/data/arcmkdir.cpp:133 #: src/clients/data/arcrename.cpp:144 src/clients/data/arcrm.cpp:160 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:172 #, fuzzy, c-format msgid "Running command: %s" msgstr "Kommando: %s" #: src/clients/compute/arccat.cpp:63 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresub.cpp:53 src/clients/compute/arcresume.cpp:50 #: src/clients/compute/arcstat.cpp:62 src/clients/compute/arcsub.cpp:67 #: src/clients/compute/arcsync.cpp:167 src/clients/compute/arctest.cpp:88 #: src/clients/data/arccp.cpp:671 src/clients/data/arcls.cpp:376 #: src/clients/data/arcmkdir.cpp:155 src/clients/data/arcrename.cpp:166 #: src/clients/data/arcrm.cpp:182 src/clients/echo/arcecho.cpp:72 #: src/clients/wsrf/arcwsrf.cpp:101 msgid "Failed configuration initialization" msgstr "Misslyckades med att initiera inställningar" #: src/clients/compute/arccat.cpp:76 src/clients/compute/arcclean.cpp:74 #: src/clients/compute/arcget.cpp:88 src/clients/compute/arckill.cpp:73 #: src/clients/compute/arcrenew.cpp:70 src/clients/compute/arcresub.cpp:81 #: src/clients/compute/arcresume.cpp:70 src/clients/compute/arcstat.cpp:71 #, fuzzy, c-format msgid "Cannot read specified jobid file: %s" msgstr "Kan inte läsa certifikatfil: %s" #: src/clients/compute/arccat.cpp:87 src/clients/compute/arcclean.cpp:85 #: src/clients/compute/arcget.cpp:99 src/clients/compute/arckill.cpp:84 #: src/clients/compute/arcrenew.cpp:81 src/clients/compute/arcresub.cpp:95 #: src/clients/compute/arcresume.cpp:81 src/clients/compute/arcstat.cpp:105 msgid "No jobs given" msgstr "Inga jobb angivna" #: src/clients/compute/arccat.cpp:100 src/clients/compute/arcclean.cpp:98 #: src/clients/compute/arcget.cpp:112 src/clients/compute/arckill.cpp:97 #: src/clients/compute/arcrenew.cpp:94 src/clients/compute/arcresub.cpp:105 #: src/clients/compute/arcresume.cpp:94 src/clients/compute/arcstat.cpp:117 #, fuzzy, c-format msgid "Job list file (%s) doesn't exist" msgstr "Låsfil %s existerar inte" #: src/clients/compute/arccat.cpp:107 src/clients/compute/arcclean.cpp:105 #: src/clients/compute/arcget.cpp:119 src/clients/compute/arckill.cpp:104 #: src/clients/compute/arcrenew.cpp:101 src/clients/compute/arcresub.cpp:112 #: src/clients/compute/arcresume.cpp:101 src/clients/compute/arcstat.cpp:124 #: src/clients/compute/arctest.cpp:298 #, fuzzy, c-format msgid "Unable to read job information from file (%s)" msgstr "Misslyckades med att ladda serviceinställningar" #: src/clients/compute/arccat.cpp:116 src/clients/compute/arcclean.cpp:113 #: src/clients/compute/arcget.cpp:127 src/clients/compute/arckill.cpp:112 #: src/clients/compute/arcrenew.cpp:110 src/clients/compute/arcresub.cpp:120 #: src/clients/compute/arcresume.cpp:110 src/clients/compute/arcstat.cpp:133 #, fuzzy, c-format msgid "Warning: Job not found in job list: %s" msgstr "Jobb ej funnet i jobblista: %s" #: src/clients/compute/arccat.cpp:129 src/clients/compute/arcclean.cpp:168 #: src/clients/compute/arcget.cpp:140 src/clients/compute/arckill.cpp:124 #: src/clients/compute/arcrenew.cpp:122 src/clients/compute/arcresub.cpp:132 #: src/clients/compute/arcresume.cpp:122 #, fuzzy msgid "No jobs" msgstr "INGET jobb" #: src/clients/compute/arccat.cpp:144 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/clients/compute/arccat.cpp:145 src/clients/compute/arccat.cpp:151 #, fuzzy, c-format msgid "Cannot create output of %s for any jobs" msgstr "Kan inte skapa katalog \"%s\" för cache" #: src/clients/compute/arccat.cpp:152 #, fuzzy, c-format msgid "Invalid destination URL %s" msgstr "Ogiltig destination" #: src/clients/compute/arccat.cpp:170 #, fuzzy, c-format msgid "Job deleted: %s" msgstr "Förval: %s" #: src/clients/compute/arccat.cpp:180 #, fuzzy, c-format msgid "Job has not started yet: %s" msgstr "JobID: %s tillstånd: %s" #: src/clients/compute/arccat.cpp:188 #, fuzzy, c-format msgid "Cannot determine the %s location: %s" msgstr "Kan inte bestämma plats för stdout: %s" #: src/clients/compute/arccat.cpp:196 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "" #: src/clients/compute/arccat.cpp:206 #, fuzzy, c-format msgid "Catting %s for job %s" msgstr "Hämtar %s-jobb" #: src/clients/compute/arcclean.cpp:35 #, fuzzy msgid "The arcclean command removes a job from the computing resource." msgstr "arcclean-kommandot tar bort ett job från ett kluster." #: src/clients/compute/arcclean.cpp:137 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" #: src/clients/compute/arcclean.cpp:140 msgid "Are you sure you want to clean jobs missing information?" msgstr "" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "y" msgstr "" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "n" msgstr "" #: src/clients/compute/arcclean.cpp:146 #, fuzzy msgid "Jobs missing information will not be cleaned!" msgstr "Klusterinformationstillhandahållare: %s" #: src/clients/compute/arcclean.cpp:162 src/clients/compute/arcresub.cpp:155 #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:302 #, fuzzy, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "Misslyckades med att inhämta statusinformation" #: src/clients/compute/arcclean.cpp:163 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" #: src/clients/compute/arcclean.cpp:172 #, fuzzy, c-format msgid "Jobs processed: %d, deleted: %d" msgstr " Resultaten har raderats: %s" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "arcget-kommandot används för att hämta resultatet av ett jobb." #: src/clients/compute/arcget.cpp:76 #, fuzzy, c-format msgid "Job download directory from user configuration file: %s " msgstr "Kan inte öppna inställningsfil." #: src/clients/compute/arcget.cpp:79 #, fuzzy msgid "Job download directory will be created in present working directory. " msgstr "nedladdningskatalog (jobbkatalogen kommer att skapas i denna katalog)" #: src/clients/compute/arcget.cpp:83 #, fuzzy, c-format msgid "Job download directory: %s " msgstr "Fel vid öppnande av katalog: %s" #: src/clients/compute/arcget.cpp:150 #, fuzzy, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "Misslyckades med att skapa socket för port %s" #: src/clients/compute/arcget.cpp:160 #, fuzzy, c-format msgid "Results stored at: %s" msgstr " Resultaten har raderats: %s" #: src/clients/compute/arcget.cpp:172 src/clients/compute/arckill.cpp:140 msgid "Warning: Some jobs were not removed from server" msgstr "" #: src/clients/compute/arcget.cpp:173 src/clients/compute/arcget.cpp:180 msgid " Use arclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arcget.cpp:179 src/clients/compute/arckill.cpp:147 #: src/clients/compute/arcresub.cpp:185 #, fuzzy, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "Varning: Misslyckades med att erhålla attribut från %s: %s" #: src/clients/compute/arcget.cpp:184 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arcget.cpp:188 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "" #: src/clients/compute/arcinfo.cpp:34 #, fuzzy msgid "[resource ...]" msgstr "[jobb ...]" #: src/clients/compute/arcinfo.cpp:35 #, fuzzy msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "arcget-kommandot används för att hämta resultatet av ett jobb." #: src/clients/compute/arcinfo.cpp:142 #, fuzzy msgid "Information endpoint" msgstr "Tilldelat nytt informationsdokument" #: src/clients/compute/arcinfo.cpp:149 #, fuzzy msgid "Submission endpoint" msgstr "Insändning returnerade fel: %s" #: src/clients/compute/arcinfo.cpp:151 #, fuzzy msgid "status" msgstr "statusstr" #: src/clients/compute/arcinfo.cpp:153 #, fuzzy msgid "interface" msgstr " Gränssnittstillägg: %s" #: src/clients/compute/arcinfo.cpp:172 #, fuzzy msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "Misslyckades med att inhämta statusinformation" #: src/clients/compute/arcinfo.cpp:185 #, fuzzy msgid "ERROR: Failed to retrieve information" msgstr "Misslyckades med att ta bort metainformation" #: src/clients/compute/arcinfo.cpp:187 msgid "from the following endpoints:" msgstr "" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "arckill-kommandot används för att avbryta exekverande jobb." #: src/clients/compute/arckill.cpp:141 msgid " Use arcclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:148 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:151 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arckill.cpp:153 #, fuzzy, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "Stängdes OK" #: src/clients/compute/arcrenew.cpp:128 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "" #: src/clients/compute/arcresub.cpp:75 msgid "--same and --not-same cannot be specified together." msgstr "" #: src/clients/compute/arcresub.cpp:144 #, fuzzy msgid "No jobs to resubmit with the specified status" msgstr "Ingen jobbeskrivning angiven" #: src/clients/compute/arcresub.cpp:151 src/clients/compute/arcsub.cpp:194 #, c-format msgid "Job submitted with jobid: %s" msgstr "Jobb insänt med jobb-id: %s" #: src/clients/compute/arcresub.cpp:156 msgid " To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arcresub.cpp:161 #, fuzzy, c-format msgid "Cannot write jobids to file (%s)" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/clients/compute/arcresub.cpp:172 #, c-format msgid "" "Resubmission of job (%s) succeeded, but killing the job failed - it will " "still appear in the job list" msgstr "" #: src/clients/compute/arcresub.cpp:181 #, c-format msgid "" "Resubmission of job (%s) succeeded, but cleaning the job failed - it will " "still appear in the job list" msgstr "" #: src/clients/compute/arcresub.cpp:186 msgid " Use arcclean to remove non-existing jobs" msgstr "" #: src/clients/compute/arcresub.cpp:193 #, fuzzy msgid "Job resubmission summary:" msgstr "Jobbinsändningssammanfattning:" #: src/clients/compute/arcresub.cpp:195 #, fuzzy, c-format msgid "%d of %d jobs were resubmitted" msgstr "%d av %d jobb sändes in" #: src/clients/compute/arcresub.cpp:197 #, fuzzy, c-format msgid "The following %d were not resubmitted" msgstr "Följande %d sändes inte in" #: src/clients/compute/arcresume.cpp:128 #, fuzzy, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "process: metod: %s" #: src/clients/compute/arcstat.cpp:35 #, fuzzy msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" "arcstat-kommandot används fär att erhålla status hos jobb som sänts in till\n" "gridresurser och status hos gridkluster." #: src/clients/compute/arcstat.cpp:79 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "" #: src/clients/compute/arcstat.cpp:149 #, fuzzy msgid "No jobs found, try later" msgstr "Inga jobbkontrollerare laddade" #: src/clients/compute/arcstat.cpp:176 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "" #: src/clients/compute/arcsub.cpp:46 msgid "[filename ...]" msgstr "[filnamn ...]" #: src/clients/compute/arcsub.cpp:47 #, fuzzy msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "arcsub-kommandot används för att sända in jobb till gridresurser." #: src/clients/compute/arcsub.cpp:99 msgid "No job description input specified" msgstr "Ingen jobbeskrivning angiven" #: src/clients/compute/arcsub.cpp:112 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:499 #, c-format msgid "Can not open job description file: %s" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/clients/compute/arcsub.cpp:140 src/clients/compute/arcsub.cpp:168 msgid "Invalid JobDescription:" msgstr "Ogiltig jobbeskrivning:" #: src/clients/compute/arcsub.cpp:200 #, fuzzy, c-format msgid "Cannot write job IDs to file (%s)" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/clients/compute/arcsub.cpp:205 src/clients/compute/arcsync.cpp:66 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/arcsub.cpp:210 src/clients/compute/arctest.cpp:304 msgid "To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arcsub.cpp:217 msgid "Job submission summary:" msgstr "Jobbinsändningssammanfattning:" #: src/clients/compute/arcsub.cpp:219 #, c-format msgid "%d of %d jobs were submitted" msgstr "%d av %d jobb sändes in" #: src/clients/compute/arcsub.cpp:221 #, c-format msgid "The following %d were not submitted" msgstr "Följande %d sändes inte in" #: src/clients/compute/arcsub.cpp:228 msgid "Job nr." msgstr "" #: src/clients/compute/arcsub.cpp:268 #, c-format msgid "Removing endpoint %s: It has an unrequested interface (%s)." msgstr "" #: src/clients/compute/arcsub.cpp:282 #, fuzzy, c-format msgid "ERROR: Unable to load broker %s" msgstr "Misslyckades med att läsa object: %s" #: src/clients/compute/arcsub.cpp:286 #, fuzzy msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" "Jobbinsändning misslyckades p.g.a. att inget av de angivna klustren " "returnerade någon information" #: src/clients/compute/arcsub.cpp:290 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "" #: src/clients/compute/arcsub.cpp:304 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" #: src/clients/compute/arcsub.cpp:338 src/clients/compute/arctest.cpp:236 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" #: src/clients/compute/arcsub.cpp:339 src/clients/compute/arctest.cpp:237 #, fuzzy msgid "Original job description is listed below:" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/clients/compute/arcsub.cpp:351 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" #: src/clients/compute/arcsub.cpp:368 src/clients/compute/arctest.cpp:317 #, fuzzy, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/clients/compute/arcsub.cpp:384 src/clients/compute/arctest.cpp:330 #, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "" #: src/clients/compute/arcsub.cpp:388 src/clients/compute/arctest.cpp:334 #, fuzzy, c-format msgid "Job description to be sent to %s:" msgstr "Jobbeskrivning som skall sändas: %s" #: src/clients/compute/arcsub.cpp:406 #, fuzzy msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:113 #, fuzzy, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "Misslyckades med att inhämta statusinformation" #: src/clients/compute/arcsync.cpp:140 msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given resources or index servers." msgstr "" #: src/clients/compute/arcsync.cpp:183 msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" #: src/clients/compute/arcsync.cpp:188 msgid "Are you sure you want to synchronize your local job list?" msgstr "" #: src/clients/compute/arcsync.cpp:193 #, fuzzy msgid "Cancelling synchronization request" msgstr "Skapar och sänder en förfrågan om att starta ett jobb" #: src/clients/compute/arcsync.cpp:203 msgid "" "No services specified. Please configure default services in the client " "configuration,or specify a cluster or index (-c or -g options, see arcsync -" "h)." msgstr "" #: src/clients/compute/arctest.cpp:55 msgid " " msgstr "" #: src/clients/compute/arctest.cpp:56 #, fuzzy msgid "The arctest command is used for testing clusters as resources." msgstr "arcget-kommandot används för att hämta resultatet av ett jobb." #: src/clients/compute/arctest.cpp:68 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" #: src/clients/compute/arctest.cpp:75 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" #: src/clients/compute/arctest.cpp:109 #, fuzzy msgid "Certificate information:" msgstr "Köinformation" #: src/clients/compute/arctest.cpp:113 #, fuzzy msgid "No user-certificate found" msgstr "sökväg till certifikatfil" #: src/clients/compute/arctest.cpp:116 #, fuzzy, c-format msgid "Certificate: %s" msgstr "Certifikatformat för BIO är: %d" #: src/clients/compute/arctest.cpp:118 #, fuzzy, c-format msgid "Subject name: %s" msgstr "subjekt: %s" #: src/clients/compute/arctest.cpp:119 #, fuzzy, c-format msgid "Valid until: %s" msgstr "Proxy är ingen fil: %s" #: src/clients/compute/arctest.cpp:123 #, fuzzy msgid "Unable to determine certificate information" msgstr "Misslyckades med att ta bort metainformation" #: src/clients/compute/arctest.cpp:127 #, fuzzy msgid "Proxy certificate information:" msgstr "Köinformation" #: src/clients/compute/arctest.cpp:129 msgid "No proxy found" msgstr "" #: src/clients/compute/arctest.cpp:132 #, fuzzy, c-format msgid "Proxy: %s" msgstr "Källa: %s" #: src/clients/compute/arctest.cpp:133 #, fuzzy, c-format msgid "Proxy-subject: %s" msgstr "subjekt: %s" #: src/clients/compute/arctest.cpp:135 #, fuzzy msgid "Valid for: Proxy expired" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/clients/compute/arctest.cpp:137 #, fuzzy msgid "Valid for: Proxy not valid" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/clients/compute/arctest.cpp:139 #, fuzzy, c-format msgid "Valid for: %s" msgstr "Ogiltig URL: %s" #: src/clients/compute/arctest.cpp:144 #, fuzzy, c-format msgid "Certificate issuer: %s" msgstr "Certifikat är ingen fil: %s" #: src/clients/compute/arctest.cpp:148 #, fuzzy msgid "CA-certificates installed:" msgstr "Certifikat är ingen fil: %s" #: src/clients/compute/arctest.cpp:170 #, fuzzy msgid "Unable to detect if issuer certificate is installed." msgstr "" "Misslyckades med att skriva det signerade proxycertifikatet till en fil" #: src/clients/compute/arctest.cpp:173 msgid "Your issuer's certificate is not installed" msgstr "" #: src/clients/compute/arctest.cpp:191 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "" #: src/clients/compute/arctest.cpp:209 #, fuzzy, c-format msgid "Unable to load broker %s" msgstr "Misslyckades med att läsa object: %s" #: src/clients/compute/arctest.cpp:212 #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "" #: src/clients/compute/arctest.cpp:234 #, fuzzy msgid "Test aborted because no resource returned any information" msgstr "" "Jobbinsändning misslyckades p.g.a. att inget av de angivna klustren " "returnerade någon information" #: src/clients/compute/arctest.cpp:247 msgid "" "ERROR: Test aborted because no suitable resources were found for the test-job" msgstr "" #: src/clients/compute/arctest.cpp:249 msgid "" "ERROR: Dumping job description aborted because no suitable resources were " "found for the test-job" msgstr "" #: src/clients/compute/arctest.cpp:258 #, c-format msgid "Submitting test-job %d:" msgstr "" #: src/clients/compute/arctest.cpp:262 #, c-format msgid "Client version: nordugrid-arc-%s" msgstr "" #: src/clients/compute/arctest.cpp:269 #, fuzzy, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/clients/compute/arctest.cpp:270 #, fuzzy, c-format msgid "Test submitted with jobid: %s" msgstr "Jobb insänt med jobb-id: %s" #: src/clients/compute/arctest.cpp:285 #, fuzzy, c-format msgid "Computing service: %s" msgstr "Avvisar service: %s" #: src/clients/compute/arctest.cpp:291 #, fuzzy msgid "Test failed, no more possible targets" msgstr "Jobbinsändning misslyckades, inga fler möjliga destinationer" #: src/clients/compute/utils.cpp:118 #, c-format msgid "Types of execution services %s is able to submit jobs to:" msgstr "" #: src/clients/compute/utils.cpp:121 #, c-format msgid "Types of registry services which %s is able collect information from:" msgstr "" #: src/clients/compute/utils.cpp:124 #, c-format msgid "" "Types of local information services which %s is able collect information " "from:" msgstr "" #: src/clients/compute/utils.cpp:127 #, c-format msgid "" "Types of local information services which %s is able collect job information " "from:" msgstr "" #: src/clients/compute/utils.cpp:130 #, c-format msgid "Types of services %s is able to manage jobs at:" msgstr "" #: src/clients/compute/utils.cpp:133 #, fuzzy, c-format msgid "Job description languages supported by %s:" msgstr "[jobb ...]" #: src/clients/compute/utils.cpp:136 #, c-format msgid "Brokers available to %s:" msgstr "" #: src/clients/compute/utils.cpp:159 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" #: src/clients/compute/utils.cpp:169 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:174 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:278 msgid "" "select one or more computing elements: name can be an alias for a single CE, " "a group of CEs or a URL" msgstr "" #: src/clients/compute/utils.cpp:280 src/clients/compute/utils.cpp:297 #: src/clients/compute/utils.cpp:404 #, fuzzy msgid "name" msgstr "[-]namn" #: src/clients/compute/utils.cpp:285 msgid "" "the computing element specified by URL at the command line should be queried " "using this information interface (possible options: org.nordugrid.ldapng, " "org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies." "resourceinfo)" msgstr "" #: src/clients/compute/utils.cpp:289 #, fuzzy msgid "interfacename" msgstr " Gränssnittsnamn: %s" #: src/clients/compute/utils.cpp:295 msgid "" "selecting a computing element for the new jobs with a URL or an alias, or " "selecting a group of computing elements with the name of the group" msgstr "" #: src/clients/compute/utils.cpp:303 msgid "force migration, ignore kill failure" msgstr "" #: src/clients/compute/utils.cpp:309 msgid "keep the files on the server (do not clean)" msgstr "behåll filerna på servern (städa inte upp)" #: src/clients/compute/utils.cpp:315 msgid "do not ask for verification" msgstr "" #: src/clients/compute/utils.cpp:319 msgid "truncate the joblist before synchronizing" msgstr "" #: src/clients/compute/utils.cpp:325 src/clients/data/arcls.cpp:287 msgid "long format (more information)" msgstr "långt format (mer information)" #: src/clients/compute/utils.cpp:331 msgid "print a list of services configured in the client.conf" msgstr "" #: src/clients/compute/utils.cpp:337 msgid "show the stdout of the job (default)" msgstr "visa jobbets stdout (förval)" #: src/clients/compute/utils.cpp:341 msgid "show the stderr of the job" msgstr "visa jobbets stderr" #: src/clients/compute/utils.cpp:345 #, fuzzy msgid "show the CE's error log of the job" msgstr "visa jobbets gridmanager-fellogg" #: src/clients/compute/utils.cpp:351 msgid "" "download directory (the job directory will be created in this directory)" msgstr "nedladdningskatalog (jobbkatalogen kommer att skapas i denna katalog)" #: src/clients/compute/utils.cpp:353 msgid "dirname" msgstr "katalognamn" #: src/clients/compute/utils.cpp:357 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" #: src/clients/compute/utils.cpp:362 msgid "force download (overwrite existing job directory)" msgstr "" #: src/clients/compute/utils.cpp:368 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "" #: src/clients/compute/utils.cpp:372 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:373 src/clients/compute/utils.cpp:376 msgid "order" msgstr "" #: src/clients/compute/utils.cpp:375 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:379 msgid "show jobs where status information is unavailable" msgstr "" #: src/clients/compute/utils.cpp:385 msgid "resubmit to the same resource" msgstr "" #: src/clients/compute/utils.cpp:389 msgid "do not resubmit to the same resource" msgstr "" #: src/clients/compute/utils.cpp:395 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" "ta bort jobbet från den lokala jobblistan även om jobbet inte hittas i " "informationssystemet" #: src/clients/compute/utils.cpp:402 msgid "" "select one or more registries: name can be an alias for a single registry, a " "group of registries or a URL" msgstr "" #: src/clients/compute/utils.cpp:410 msgid "submit test job given by the number" msgstr "" #: src/clients/compute/utils.cpp:411 src/clients/compute/utils.cpp:415 #, fuzzy msgid "int" msgstr "minut" #: src/clients/compute/utils.cpp:414 msgid "test job runtime specified by the number" msgstr "" #: src/clients/compute/utils.cpp:421 msgid "only select jobs whose status is statusstr" msgstr "välj endast jobb vars status är statusstr" #: src/clients/compute/utils.cpp:422 msgid "statusstr" msgstr "statusstr" #: src/clients/compute/utils.cpp:428 msgid "all jobs" msgstr "alla jobb" #: src/clients/compute/utils.cpp:434 msgid "jobdescription string describing the job to be submitted" msgstr "jobbeskrivningssträng som beskriver jobbet som ska sändas in" #: src/clients/compute/utils.cpp:436 src/clients/compute/utils.cpp:442 #: src/clients/credentials/arcproxy.cpp:369 #: src/clients/credentials/arcproxy.cpp:376 #: src/clients/credentials/arcproxy.cpp:394 #: src/clients/credentials/arcproxy.cpp:401 #: src/clients/credentials/arcproxy.cpp:419 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:438 #: src/clients/credentials/arcproxy.cpp:448 #: src/clients/credentials/arcproxy.cpp:452 #: src/clients/credentials/arcproxyalt.cpp:369 #: src/clients/credentials/arcproxyalt.cpp:376 #: src/clients/credentials/arcproxyalt.cpp:399 #: src/clients/credentials/arcproxyalt.cpp:415 #: src/clients/credentials/arcproxyalt.cpp:419 #: src/clients/credentials/arcproxyalt.cpp:429 #: src/clients/credentials/arcproxyalt.cpp:433 msgid "string" msgstr "sträng" #: src/clients/compute/utils.cpp:440 msgid "jobdescription file describing the job to be submitted" msgstr "jobbeskrivningsfil som beskriver jobbet som ska sändas in" #: src/clients/compute/utils.cpp:448 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" #: src/clients/compute/utils.cpp:449 msgid "broker" msgstr "mäklare" #: src/clients/compute/utils.cpp:452 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "" #: src/clients/compute/utils.cpp:453 src/clients/compute/utils.cpp:475 #: src/clients/compute/utils.cpp:512 src/clients/compute/utils.cpp:520 #: src/clients/credentials/arcproxy.cpp:461 #: src/clients/credentials/arcproxyalt.cpp:447 src/clients/data/arccp.cpp:627 #: src/clients/data/arcls.cpp:332 src/clients/data/arcmkdir.cpp:111 #: src/clients/data/arcrename.cpp:122 src/clients/data/arcrm.cpp:137 #: src/clients/echo/arcecho.cpp:47 src/clients/wsrf/arcwsrf.cpp:57 msgid "filename" msgstr "filnamn" #: src/clients/compute/utils.cpp:457 msgid "" "only use this interface for submitting (e.g. org.nordugrid.gridftpjob, org." "ogf.glue.emies.activitycreation, org.ogf.bes)" msgstr "" #: src/clients/compute/utils.cpp:459 src/clients/compute/utils.cpp:501 #, fuzzy msgid "InterfaceName" msgstr " Gränssnittsnamn: %s" #: src/clients/compute/utils.cpp:466 msgid "skip the service with the given URL during service discovery" msgstr "" #: src/clients/compute/utils.cpp:467 src/clients/compute/utils.cpp:480 #: src/clients/data/arccp.cpp:607 msgid "URL" msgstr "" #: src/clients/compute/utils.cpp:474 #, fuzzy msgid "a file containing a list of jobIDs" msgstr "fil som innehåller en jobblista" #: src/clients/compute/utils.cpp:479 msgid "skip jobs which are on a computing element with a given URL" msgstr "" #: src/clients/compute/utils.cpp:485 msgid "submit jobs as dry run (no submission to batch system)" msgstr "" #: src/clients/compute/utils.cpp:488 msgid "submit directly - no resource discovery or matchmaking" msgstr "" #: src/clients/compute/utils.cpp:492 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" #: src/clients/compute/utils.cpp:499 msgid "" "only get information about executon targets which support this job " "submission interface (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies." "activitycreation, org.ogf.bes)" msgstr "" #: src/clients/compute/utils.cpp:506 msgid "prints info about installed user- and CA-certificates" msgstr "" #: src/clients/compute/utils.cpp:511 #, fuzzy, c-format msgid "the file storing information about active jobs (default %s)" msgstr "Misslyckades med att inhämta statusinformation" #: src/clients/compute/utils.cpp:519 src/clients/credentials/arcproxy.cpp:460 #: src/clients/credentials/arcproxyalt.cpp:446 src/clients/data/arccp.cpp:626 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:110 #: src/clients/data/arcrename.cpp:121 src/clients/data/arcrm.cpp:136 #: src/clients/echo/arcecho.cpp:46 src/clients/wsrf/arcwsrf.cpp:56 #, fuzzy msgid "configuration file (default ~/.arc/client.conf)" msgstr "inställningsfil (förval ~/.arc/client.xml)" #: src/clients/compute/utils.cpp:522 src/clients/credentials/arcproxy.cpp:455 #: src/clients/credentials/arcproxyalt.cpp:441 src/clients/data/arccp.cpp:621 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:105 #: src/clients/data/arcrename.cpp:116 src/clients/data/arcrm.cpp:131 #: src/clients/echo/arcecho.cpp:41 src/clients/wsrf/arcwsrf.cpp:51 msgid "timeout in seconds (default 20)" msgstr "timeout i sekunder (förval 20)" #: src/clients/compute/utils.cpp:523 src/clients/credentials/arcproxy.cpp:456 #: src/clients/credentials/arcproxyalt.cpp:442 src/clients/data/arccp.cpp:622 #: src/clients/data/arcls.cpp:327 src/clients/data/arcmkdir.cpp:106 #: src/clients/data/arcrename.cpp:117 src/clients/data/arcrm.cpp:132 #: src/clients/echo/arcecho.cpp:42 src/clients/wsrf/arcwsrf.cpp:52 msgid "seconds" msgstr "sekunder" #: src/clients/compute/utils.cpp:526 msgid "list the available plugins" msgstr "" #: src/clients/compute/utils.cpp:530 src/clients/credentials/arcproxy.cpp:465 #: src/clients/credentials/arcproxyalt.cpp:451 src/clients/data/arccp.cpp:631 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:115 #: src/clients/data/arcrename.cpp:126 src/clients/data/arcrm.cpp:141 #: src/clients/echo/arcecho.cpp:51 src/clients/saml/saml_assertion_init.cpp:52 #: src/clients/wsrf/arcwsrf.cpp:61 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 #, fuzzy msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "FATAL, ERROR, WARNING, INFO, DEBUG eller VERBOSE" #: src/clients/compute/utils.cpp:531 src/clients/credentials/arcproxy.cpp:466 #: src/clients/credentials/arcproxyalt.cpp:452 src/clients/data/arccp.cpp:632 #: src/clients/data/arcls.cpp:337 src/clients/data/arcmkdir.cpp:116 #: src/clients/data/arcrename.cpp:127 src/clients/data/arcrm.cpp:142 #: src/clients/echo/arcecho.cpp:52 src/clients/saml/saml_assertion_init.cpp:53 #: src/clients/wsrf/arcwsrf.cpp:62 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 msgid "debuglevel" msgstr "debugnivå" #: src/clients/compute/utils.cpp:533 src/clients/credentials/arcproxy.cpp:469 #: src/clients/credentials/arcproxyalt.cpp:455 src/clients/data/arccp.cpp:635 #: src/clients/data/arcls.cpp:340 src/clients/data/arcmkdir.cpp:119 #: src/clients/data/arcrename.cpp:130 src/clients/data/arcrm.cpp:145 #: src/clients/echo/arcecho.cpp:55 src/clients/saml/saml_assertion_init.cpp:56 #: src/clients/wsrf/arcwsrf.cpp:65 msgid "print version information" msgstr "skriv ut versionsinformation" #: src/clients/credentials/arcproxy.cpp:172 #: src/hed/libs/credential/ARCProxyUtil.cpp:1248 #, fuzzy, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "Peer-certifikat kan inte extraheras" #: src/clients/credentials/arcproxy.cpp:188 #: src/hed/libs/credential/ARCProxyUtil.cpp:1264 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "" #: src/clients/credentials/arcproxy.cpp:197 #: src/hed/libs/credential/ARCProxyUtil.cpp:1273 #, fuzzy, c-format msgid " expiration time: %s " msgstr "Destinaltion: %s" #: src/clients/credentials/arcproxy.cpp:201 #: src/hed/libs/credential/ARCProxyUtil.cpp:1277 #, fuzzy, c-format msgid " certificate dn: %s" msgstr "Använder certifikatfil: %s" #: src/clients/credentials/arcproxy.cpp:202 #: src/hed/libs/credential/ARCProxyUtil.cpp:1278 #, fuzzy, c-format msgid " issuer dn: %s" msgstr "\tCachejobbkatalog : %s" #: src/clients/credentials/arcproxy.cpp:203 #: src/hed/libs/credential/ARCProxyUtil.cpp:1279 #, c-format msgid " serial number: %d" msgstr "" #: src/clients/credentials/arcproxy.cpp:207 #: src/hed/libs/credential/ARCProxyUtil.cpp:1283 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:272 #: src/clients/credentials/arcproxyalt.cpp:317 msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" #: src/clients/credentials/arcproxy.cpp:274 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours " "for delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file\n" " keybits=number - length of the key to generate. Default is 1024 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" " signingAlgorithm=name - signing algorithm to use for signing public key of " "proxy.\n" " Possible values are sha1, sha2 (alias for sha256), sha224, sha256, " "sha384, sha512\n" " and inherit (use algorithm of signing certificate). Default is inherit.\n" " With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" " identity - identity subject name of proxy certificate.\n" " issuer - issuer subject name of proxy certificate.\n" " ca - subject name of CA ehich issued initial certificate\n" " path - file system path to file containing proxy.\n" " type - type of proxy certificate.\n" " validityStart - timestamp when proxy validity starts.\n" " validityEnd - timestamp when proxy validity ends.\n" " validityPeriod - duration of proxy validity in seconds.\n" " validityLeft - duration of proxy validity left in seconds.\n" " vomsVO - VO name represented by VOMS attribute\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" " vomsIssuer - subject of service which issued VOMS certificate\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" " proxyPolicy\n" " keybits - size of proxy certificate key in bits.\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" " myproxy - for accessing credentials at MyProxy service\n" " myproxynew - for creating credentials at MyProxy service\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" " int - interactively request password from console\n" " stdin - read password from standard input delimited by newline\n" " file:filename - read password from file named filename\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:334 #: src/clients/credentials/arcproxyalt.cpp:334 #, fuzzy msgid "path to the proxy file" msgstr "sökväg till proxyfil" #: src/clients/credentials/arcproxy.cpp:335 #: src/clients/credentials/arcproxy.cpp:339 #: src/clients/credentials/arcproxy.cpp:343 #: src/clients/credentials/arcproxy.cpp:347 #: src/clients/credentials/arcproxy.cpp:351 #: src/clients/credentials/arcproxy.cpp:355 #: src/clients/credentials/arcproxyalt.cpp:335 #: src/clients/credentials/arcproxyalt.cpp:339 #: src/clients/credentials/arcproxyalt.cpp:343 #: src/clients/credentials/arcproxyalt.cpp:347 #: src/clients/credentials/arcproxyalt.cpp:351 #: src/clients/credentials/arcproxyalt.cpp:355 src/clients/data/arccp.cpp:584 #: src/clients/saml/saml_assertion_init.cpp:48 msgid "path" msgstr "sökväg" #: src/clients/credentials/arcproxy.cpp:338 #: src/clients/credentials/arcproxyalt.cpp:338 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formated" msgstr "" #: src/clients/credentials/arcproxy.cpp:342 #: src/clients/credentials/arcproxyalt.cpp:342 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" #: src/clients/credentials/arcproxy.cpp:346 #: src/clients/credentials/arcproxyalt.cpp:346 #, fuzzy msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "sökväg till certifikatfil" #: src/clients/credentials/arcproxy.cpp:350 #: src/clients/credentials/arcproxyalt.cpp:350 #, fuzzy msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "sökväg till certifikatfil" #: src/clients/credentials/arcproxy.cpp:354 #: src/clients/credentials/arcproxyalt.cpp:354 #, fuzzy msgid "path to the VOMS server configuration file" msgstr "Kan inte öppna inställningsfil." #: src/clients/credentials/arcproxy.cpp:358 #: src/clients/credentials/arcproxyalt.cpp:358 msgid "" "voms<:command>. Specify VOMS server (More than one VOMS server \n" " can be specified like this: --voms VOa:command1 --voms VOb:" "command2). \n" " :command is optional, and is used to ask for specific " "attributes(e.g: roles)\n" " command options are:\n" " all --- put all of this DN's attributes into AC; \n" " list ---list all of the DN's attribute, will not create AC " "extension; \n" " /Role=yourRole --- specify the role, if this DN \n" " has such a role, the role will be put into " "AC \n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN \n" " has such a role, the role will be put into " "AC \n" msgstr "" #: src/clients/credentials/arcproxy.cpp:372 #: src/clients/credentials/arcproxyalt.cpp:372 msgid "" "group<:role>. Specify ordering of attributes \n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester \n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester \n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxyalt.cpp:379 msgid "use GSI communication protocol for contacting VOMS services" msgstr "" #: src/clients/credentials/arcproxy.cpp:382 #: src/clients/credentials/arcproxyalt.cpp:382 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access \n" " Note for RESTful access, 'list' command and multiple VOMS " "server are not supported\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:388 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "" #: src/clients/credentials/arcproxy.cpp:391 msgid "print all information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:394 msgid "print selected information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:397 #: src/clients/credentials/arcproxyalt.cpp:395 msgid "remove proxy" msgstr "" #: src/clients/credentials/arcproxy.cpp:400 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" #: src/clients/credentials/arcproxy.cpp:405 #: src/clients/credentials/arcproxyalt.cpp:402 msgid "" "don't prompt for a credential passphrase, when retrieve a \n" " credential from on MyProxy server. \n" " The precondition of this choice is the credential is PUT onto\n" " the MyProxy server without a passphrase by using -R (--" "retrievable_by_cert) \n" " option when being PUTing onto Myproxy server. \n" " This option is specific for the GET command when contacting " "Myproxy server." msgstr "" #: src/clients/credentials/arcproxy.cpp:416 #: src/clients/credentials/arcproxyalt.cpp:412 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific for the PUT command when contacting " "Myproxy server." msgstr "" #: src/clients/credentials/arcproxy.cpp:422 #: src/clients/credentials/arcproxyalt.cpp:418 #, fuzzy msgid "hostname[:port] of MyProxy server" msgstr "sökväg till proxyfil" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server; \n" " GET -- get a delegated credentials from the MyProxy server; \n" " INFO -- get and present information about credentials stored " "at the MyProxy server; \n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server; \n" " DESTROY -- wipe off credentials stored at the MyProxy " "server; \n" " Local credentials (certificate and key) are not necessary " "except in case of PUT. \n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:442 msgid "" "use NSS credential database in default Mozilla profiles, \n" " including Firefox, Seamonkey and Thunderbird.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:447 #: src/clients/credentials/arcproxyalt.cpp:432 msgid "proxy constraints" msgstr "proxyvillkor" #: src/clients/credentials/arcproxy.cpp:451 msgid "password destination=password source" msgstr "" #: src/clients/credentials/arcproxy.cpp:500 #: src/clients/credentials/arcproxy.cpp:1161 #: src/clients/credentials/arcproxyalt.cpp:513 #: src/clients/credentials/arcproxyalt.cpp:556 #, fuzzy msgid "Failed configuration initialization." msgstr "Misslyckades med att initiera inställningar" #: src/clients/credentials/arcproxy.cpp:518 #: src/clients/credentials/arcproxyalt.cpp:563 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" #: src/clients/credentials/arcproxy.cpp:519 #: src/clients/credentials/arcproxy.cpp:531 #: src/clients/credentials/arcproxyalt.cpp:564 #: src/clients/credentials/arcproxyalt.cpp:574 msgid "You may try to increase verbosity to get more information." msgstr "" #: src/clients/credentials/arcproxy.cpp:527 #: src/clients/credentials/arcproxyalt.cpp:570 #, fuzzy msgid "Failed to find CA certificates" msgstr "Misslyckades med att signera proxycertifikatet" #: src/clients/credentials/arcproxy.cpp:528 #: src/clients/credentials/arcproxyalt.cpp:571 msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" #: src/clients/credentials/arcproxy.cpp:532 #: src/clients/credentials/arcproxyalt.cpp:575 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" #: src/clients/credentials/arcproxy.cpp:544 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxy.cpp:589 #: src/clients/credentials/arcproxyalt.cpp:604 src/clients/echo/arcecho.cpp:84 #, fuzzy msgid "Wrong number of arguments!" msgstr "Fel antal parametrar angivna" #: src/clients/credentials/arcproxy.cpp:597 #: src/clients/credentials/arcproxy.cpp:618 #: src/clients/credentials/arcproxyalt.cpp:612 #: src/clients/credentials/arcproxyalt.cpp:632 msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:604 #: src/clients/credentials/arcproxyalt.cpp:621 #, fuzzy, c-format msgid "Cannot remove proxy file at %s" msgstr "Kan inte läsa proxyfil: %s (%s)" #: src/clients/credentials/arcproxy.cpp:606 #: src/clients/credentials/arcproxyalt.cpp:617 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "" #: src/clients/credentials/arcproxy.cpp:624 #: src/clients/credentials/arcproxyalt.cpp:638 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" #: src/clients/credentials/arcproxy.cpp:630 #: src/clients/credentials/arcproxyalt.cpp:651 #, fuzzy, c-format msgid "Subject: %s" msgstr "subjekt: %s" #: src/clients/credentials/arcproxy.cpp:631 #: src/clients/credentials/arcproxyalt.cpp:652 #, fuzzy, c-format msgid "Issuer: %s" msgstr " Utfärdar-CA: %s" #: src/clients/credentials/arcproxy.cpp:632 #: src/clients/credentials/arcproxyalt.cpp:653 #, fuzzy, c-format msgid "Identity: %s" msgstr "Identitetsnamn: %s" #: src/clients/credentials/arcproxy.cpp:634 #: src/clients/credentials/arcproxyalt.cpp:657 #, fuzzy msgid "Time left for proxy: Proxy expired" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/clients/credentials/arcproxy.cpp:636 #: src/clients/credentials/arcproxyalt.cpp:659 #, fuzzy msgid "Time left for proxy: Proxy not valid yet" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/clients/credentials/arcproxy.cpp:638 #: src/clients/credentials/arcproxyalt.cpp:661 #, fuzzy, c-format msgid "Time left for proxy: %s" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/clients/credentials/arcproxy.cpp:639 #: src/clients/credentials/arcproxyalt.cpp:663 #, fuzzy, c-format msgid "Proxy path: %s" msgstr "Källa: %s" #: src/clients/credentials/arcproxy.cpp:640 #, fuzzy, c-format msgid "Proxy type: %s" msgstr "Källa: %s" #: src/clients/credentials/arcproxy.cpp:641 #, fuzzy, c-format msgid "Proxy key length: %i" msgstr "Källa: %s" #: src/clients/credentials/arcproxy.cpp:642 #, fuzzy, c-format msgid "Proxy signature: %s" msgstr "Peer-namn: %s" #: src/clients/credentials/arcproxy.cpp:651 #: src/clients/credentials/arcproxyalt.cpp:675 #, fuzzy msgid "AC extension information for VO " msgstr "Misslyckades med att inhämta statusinformation" #: src/clients/credentials/arcproxy.cpp:654 #: src/clients/credentials/arcproxyalt.cpp:678 msgid "Error detected while parsing this AC" msgstr "" #: src/clients/credentials/arcproxy.cpp:667 #: src/clients/credentials/arcproxyalt.cpp:691 #, fuzzy msgid "AC is invalid: " msgstr "URL:en är inte giltig: %s" #: src/clients/credentials/arcproxy.cpp:720 #: src/clients/credentials/arcproxyalt.cpp:732 #, fuzzy msgid "Time left for AC: AC is not valid yet" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/clients/credentials/arcproxy.cpp:722 #: src/clients/credentials/arcproxyalt.cpp:734 #, fuzzy msgid "Time left for AC: AC has expired" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/clients/credentials/arcproxy.cpp:724 #: src/clients/credentials/arcproxyalt.cpp:736 #, fuzzy, c-format msgid "Time left for AC: %s" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/clients/credentials/arcproxy.cpp:815 #, c-format msgid "Information item '%s' is not known" msgstr "" #: src/clients/credentials/arcproxy.cpp:824 #: src/clients/credentials/arcproxyalt.cpp:746 msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:828 #: src/clients/credentials/arcproxyalt.cpp:750 msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:852 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" #: src/clients/credentials/arcproxy.cpp:869 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" #: src/clients/credentials/arcproxy.cpp:884 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int,stdin,stream,file." msgstr "" #: src/clients/credentials/arcproxy.cpp:898 msgid "Only standard input is currently supported for password source." msgstr "" #: src/clients/credentials/arcproxy.cpp:903 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int,stdin," "stream,file." msgstr "" #: src/clients/credentials/arcproxy.cpp:942 #: src/clients/credentials/arcproxyalt.cpp:782 #, fuzzy msgid "The start, end and period can't be set simultaneously" msgstr "Flaggorna 'p' och 'n' kan inte användas samtidigt" #: src/clients/credentials/arcproxy.cpp:948 #: src/clients/credentials/arcproxyalt.cpp:788 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:955 #: src/clients/credentials/arcproxyalt.cpp:795 #, fuzzy, c-format msgid "The period that you set: %s can't be recognized." msgstr "Version i Listen-element kan ej kännas igen" #: src/clients/credentials/arcproxy.cpp:962 #: src/clients/credentials/arcproxyalt.cpp:802 #, fuzzy, c-format msgid "The end time that you set: %s can't be recognized." msgstr "Version i Listen-element kan ej kännas igen" #: src/clients/credentials/arcproxy.cpp:971 #: src/clients/credentials/arcproxyalt.cpp:811 #, c-format msgid "The end time that you set: %s is before start time:%s." msgstr "" #: src/clients/credentials/arcproxy.cpp:982 #: src/clients/credentials/arcproxyalt.cpp:822 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:985 #: src/clients/credentials/arcproxyalt.cpp:825 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:995 #: src/clients/credentials/arcproxyalt.cpp:835 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1013 #: src/clients/credentials/arcproxyalt.cpp:853 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1028 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1042 #: src/clients/credentials/arcproxyalt.cpp:476 #: src/hed/libs/credential/ARCProxyUtil.cpp:1303 msgid "The NSS database can not be detected in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxy.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:1311 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" #: src/clients/credentials/arcproxy.cpp:1053 #: src/hed/libs/credential/ARCProxyUtil.cpp:1315 #, fuzzy, c-format msgid "Number %d is: %s" msgstr "Fel antal parametrar angivna" #: src/clients/credentials/arcproxy.cpp:1055 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:1071 #: src/clients/credentials/arcproxyalt.cpp:482 #: src/hed/libs/credential/ARCProxyUtil.cpp:1329 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:1142 #: src/hed/libs/credential/ARCProxyUtil.cpp:1503 #, fuzzy, c-format msgid "Certificate to use is: %s" msgstr "Certifikat är ingen fil: %s" #: src/clients/credentials/arcproxy.cpp:1190 #: src/clients/credentials/arcproxy.cpp:1306 #: src/clients/credentials/arcproxyalt.cpp:539 #: src/clients/credentials/arcproxyalt.cpp:955 #: src/hed/libs/credential/ARCProxyUtil.cpp:1560 #, fuzzy msgid "Proxy generation succeeded" msgstr "Certifikatverifiering lyckades" #: src/clients/credentials/arcproxy.cpp:1191 #: src/clients/credentials/arcproxy.cpp:1307 #: src/clients/credentials/arcproxyalt.cpp:540 #: src/clients/credentials/arcproxyalt.cpp:956 #: src/hed/libs/credential/ARCProxyUtil.cpp:1561 #, fuzzy, c-format msgid "Your proxy is valid until: %s" msgstr "Cachefil giltig till: %s" #: src/clients/credentials/arcproxy.cpp:1210 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" #: src/clients/credentials/arcproxy.cpp:1229 src/hed/mcc/tls/MCCTLS.cpp:167 #: src/hed/mcc/tls/MCCTLS.cpp:200 src/hed/mcc/tls/MCCTLS.cpp:226 msgid "VOMS attribute parsing failed" msgstr "" #: src/clients/credentials/arcproxy.cpp:1231 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxyalt.cpp:892 #: src/hed/libs/credential/ARCProxyUtil.cpp:341 #, fuzzy msgid "Proxy generation failed: No valid certificate found." msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/clients/credentials/arcproxy.cpp:1258 #: src/clients/credentials/arcproxyalt.cpp:899 #: src/hed/libs/credential/ARCProxyUtil.cpp:348 #, fuzzy msgid "Proxy generation failed: No valid private key found." msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/clients/credentials/arcproxy.cpp:1263 #: src/clients/credentials/arcproxyalt.cpp:902 #: src/hed/libs/credential/ARCProxyUtil.cpp:173 #, fuzzy, c-format msgid "Your identity: %s" msgstr "Identitetsnamn: %s" #: src/clients/credentials/arcproxy.cpp:1265 #: src/clients/credentials/arcproxyalt.cpp:907 #: src/hed/libs/credential/ARCProxyUtil.cpp:356 msgid "Proxy generation failed: Certificate has expired." msgstr "" #: src/clients/credentials/arcproxy.cpp:1269 #: src/clients/credentials/arcproxyalt.cpp:911 #: src/hed/libs/credential/ARCProxyUtil.cpp:361 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "" #: src/clients/credentials/arcproxy.cpp:1280 #, fuzzy msgid "Proxy generation failed: Failed to create temporary file." msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/clients/credentials/arcproxy.cpp:1288 #, fuzzy msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/clients/credentials/arcproxy_myproxy.cpp:100 #: src/clients/credentials/arcproxyalt.cpp:1312 #: src/hed/libs/credential/ARCProxyUtil.cpp:844 #, fuzzy msgid "Succeeded to get info from MyProxy server" msgstr "Lyckades signera proxycertifikatet" #: src/clients/credentials/arcproxy_myproxy.cpp:144 #: src/clients/credentials/arcproxyalt.cpp:1368 #: src/hed/libs/credential/ARCProxyUtil.cpp:900 msgid "Succeeded to change password on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:185 #: src/clients/credentials/arcproxyalt.cpp:1417 #: src/hed/libs/credential/ARCProxyUtil.cpp:949 msgid "Succeeded to destroy credential on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:265 #: src/clients/credentials/arcproxyalt.cpp:1506 #: src/hed/libs/credential/ARCProxyUtil.cpp:1038 #, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:318 #: src/clients/credentials/arcproxyalt.cpp:1565 #: src/hed/libs/credential/ARCProxyUtil.cpp:1097 #, fuzzy msgid "Succeeded to put a proxy onto MyProxy server" msgstr "Delegeringsauktorisering misslyckades" #: src/clients/credentials/arcproxy_proxy.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1222 #: src/hed/libs/credential/ARCProxyUtil.cpp:403 #: src/hed/libs/credential/ARCProxyUtil.cpp:1410 #, fuzzy msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "Misslyckades med att lägga till tillägg till proxyn" #: src/clients/credentials/arcproxy_voms.cpp:63 #, fuzzy msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/clients/credentials/arcproxy_voms.cpp:75 #, fuzzy, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "Misslyckades med att förregistrera destination: %s" #: src/clients/credentials/arcproxy_voms.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:640 #, fuzzy, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "Kan inte läsa certifikatinformation från BIO" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #: src/clients/credentials/arcproxyalt.cpp:1061 #: src/clients/credentials/arcproxyalt.cpp:1063 #: src/hed/libs/credential/ARCProxyUtil.cpp:650 #: src/hed/libs/credential/ARCProxyUtil.cpp:652 #, fuzzy, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "Connect: Misslyckades med att läsa proxyfil: %s" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, fuzzy, c-format msgid "No valid response from VOMS server: %s" msgstr "Inget jobb-id har mottagits" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:167 #, fuzzy, c-format msgid "Failed to parse VOMS command: %s" msgstr "Misslyckades med att skapa GUID i RLS: %s" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:108 #: src/hed/libs/credential/ARCProxyUtil.cpp:258 #, fuzzy, c-format msgid "OpenSSL error -- %s" msgstr "OpenSSL-fel -- %s" #: src/clients/credentials/arcproxyalt.cpp:109 #: src/hed/libs/credential/ARCProxyUtil.cpp:259 #, c-format msgid "Library : %s" msgstr "Bibliotek: %s" #: src/clients/credentials/arcproxyalt.cpp:110 #: src/hed/libs/credential/ARCProxyUtil.cpp:260 #, c-format msgid "Function : %s" msgstr "Funktion : %s" #: src/clients/credentials/arcproxyalt.cpp:111 #: src/hed/libs/credential/ARCProxyUtil.cpp:261 #, c-format msgid "Reason : %s" msgstr "Anledning: %s" #: src/clients/credentials/arcproxyalt.cpp:167 #: src/hed/libs/credential/ARCProxyUtil.cpp:317 #, fuzzy msgid "User interface error" msgstr " Gränssnittstillägg: %s" #: src/clients/credentials/arcproxyalt.cpp:173 #: src/hed/libs/credential/ARCProxyUtil.cpp:323 msgid "Aborted!" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:319 #, fuzzy msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours for " "delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file" msgstr "" "Understödda villkor är:\n" " validityStart=tid\n" " validityEnd=tid\n" " validityPeriod=tid\n" " proxyPolicy=policy-innehåll\n" " proxyPolicyFile=policy-fil" #: src/clients/credentials/arcproxyalt.cpp:388 msgid "" "print all information about this proxy. \n" " In order to show the Identity (DN without CN as suffix for " "proxy) \n" " of the certificate, the 'trusted certdir' is needed." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:398 #, fuzzy msgid "username to MyProxy server" msgstr "sökväg till proxyfil" #: src/clients/credentials/arcproxyalt.cpp:422 msgid "" "command to MyProxy server. The command can be PUT or GET.\n" " PUT/put/Put -- put a delegated credential to the MyProxy " "server; \n" " GET/get/Get -- get a delegated credential from the MyProxy " "server, \n" " credential (certificate and key) is not needed in this case. \n" " MyProxy functionality can be used together with VOMS\n" " functionality.\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:437 msgid "use NSS credential database in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1040 #: src/hed/libs/credential/ARCProxyUtil.cpp:629 #, fuzzy, c-format msgid "There are %d commands to the same VOMS server %s" msgstr "arcclean-kommandot tar bort ett job från ett kluster." #: src/clients/credentials/arcproxyalt.cpp:1094 #: src/hed/libs/credential/ARCProxyUtil.cpp:683 #, c-format msgid "Try to get attribute from VOMS server with order: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1097 #: src/hed/libs/credential/ARCProxyUtil.cpp:686 #, fuzzy, c-format msgid "Message sent to VOMS server %s is: %s" msgstr "Varning: kan inte koppla upp mot RLS-servern %s: %s" #: src/clients/credentials/arcproxyalt.cpp:1116 #: src/hed/libs/credential/ARCProxyUtil.cpp:705 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1120 #: src/hed/libs/credential/ARCProxyUtil.cpp:709 #, fuzzy msgid "No HTTP response from VOMS server" msgstr "Inget jobb-id har mottagits" #: src/clients/credentials/arcproxyalt.cpp:1125 #: src/clients/credentials/arcproxyalt.cpp:1151 #: src/hed/libs/credential/ARCProxyUtil.cpp:714 #: src/hed/libs/credential/ARCProxyUtil.cpp:740 #, c-format msgid "Returned message from VOMS server: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1137 #: src/hed/libs/credential/ARCProxyUtil.cpp:726 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\"\n" "can not be reached, please make sure it is available" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1141 #: src/hed/libs/credential/ARCProxyUtil.cpp:730 #, fuzzy msgid "No stream response from VOMS server" msgstr "Inget jobb-id har mottagits" #: src/clients/credentials/arcproxyalt.cpp:1163 #: src/hed/libs/credential/ARCProxyUtil.cpp:752 #, c-format msgid "" "The validity duration of VOMS AC is shortened from %s to %s, due to the " "validity constraint on voms server side.\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1166 #: src/hed/libs/credential/ARCProxyUtil.cpp:755 #, c-format msgid "" "Cannot get any AC or attributes info from VOMS server: %s;\n" " Returned message from VOMS server: %s\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1171 #: src/hed/libs/credential/ARCProxyUtil.cpp:760 #, c-format msgid "Returned message from VOMS server %s is: %s\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1193 #: src/hed/libs/credential/ARCProxyUtil.cpp:782 #, c-format msgid "The attribute information from VOMS server: %s is list as following:" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1205 #: src/hed/libs/credential/ARCProxyUtil.cpp:794 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message. But proxy without " "VOMS AC extension will still be generated." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1220 #, fuzzy, c-format msgid "Failed to add extension: %s" msgstr "Misslyckades med att duplicera tillägg" #: src/clients/credentials/arcproxyalt.cpp:1238 #: src/hed/libs/credential/ARCProxyUtil.cpp:443 #: src/hed/libs/credential/Credential.cpp:884 #, fuzzy, c-format msgid "Error: can't open policy file: %s" msgstr "Fel vid öppning av låsfil %s: %s" #: src/clients/credentials/arcproxyalt.cpp:1248 #: src/hed/libs/credential/ARCProxyUtil.cpp:453 #: src/hed/libs/credential/Credential.cpp:897 #, fuzzy, c-format msgid "Error: policy location: %s is not a regular file" msgstr "ARC-jobblistfil är inte en vanlig fil: %s" #: src/clients/credentials/arcproxyalt.cpp:1600 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specify the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Cannot find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, " "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1640 #: src/hed/libs/credential/ARCProxyUtil.cpp:552 #, c-format msgid "VOMS line contains wrong number of tokens (%u expected): \"%s\"" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1684 #: src/hed/libs/credential/ARCProxyUtil.cpp:596 #, fuzzy, c-format msgid "Cannot get VOMS server %s information from the vomses files" msgstr "Kan inte läsa certifikatinformation från BIO" #: src/clients/credentials/test2myproxyserver_get.cpp:89 #: src/clients/credentials/test2myproxyserver_get.cpp:131 #: src/clients/credentials/test2myproxyserver_put.cpp:88 #: src/clients/credentials/test2myproxyserver_put.cpp:182 #: src/clients/credentials/test2vomsserver.cpp:101 #, fuzzy msgid "No stream response" msgstr "Inget svar" #: src/clients/credentials/test2myproxyserver_get.cpp:104 #: src/clients/credentials/test2myproxyserver_get.cpp:143 #: src/clients/credentials/test2myproxyserver_get.cpp:190 #: src/clients/credentials/test2myproxyserver_put.cpp:103 #: src/clients/credentials/test2myproxyserver_put.cpp:116 #: src/clients/credentials/test2myproxyserver_put.cpp:194 #, c-format msgid "Returned msg from myproxy server: %s %d" msgstr "" #: src/clients/credentials/test2myproxyserver_get.cpp:149 #, fuzzy, c-format msgid "There are %d certificates in the returned msg" msgstr "Peer-certifikat kan inte extraheras" #: src/clients/credentials/test2myproxyserver_put.cpp:135 #, fuzzy msgid "Delegate proxy failed" msgstr "DelegateProxy misslyckades" #: src/clients/credentials/test2vomsserver.cpp:116 #, c-format msgid "Returned msg from voms server: %s " msgstr "" #: src/clients/data/arccp.cpp:77 src/clients/data/arccp.cpp:330 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:426 #, c-format msgid "Current transfer FAILED: %s" msgstr "Nuvarande överföring MISSLYCKADES: %s" #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:135 #: src/clients/data/arccp.cpp:332 src/clients/data/arcls.cpp:224 #: src/clients/data/arcmkdir.cpp:73 src/clients/data/arcrename.cpp:89 #: src/clients/data/arcrm.cpp:95 msgid "This seems like a temporary error, please try again later" msgstr "" #: src/clients/data/arccp.cpp:87 src/clients/data/arccp.cpp:96 #, fuzzy, c-format msgid "Unable to copy %s" msgstr "Misslyckades med att läsa object: %s" #: src/clients/data/arccp.cpp:88 src/clients/data/arccp.cpp:97 #: src/clients/data/arcls.cpp:150 src/clients/data/arcls.cpp:159 #: src/clients/data/arcmkdir.cpp:55 src/clients/data/arcmkdir.cpp:64 #: src/clients/data/arcrename.cpp:67 src/clients/data/arcrename.cpp:76 #: src/clients/data/arcrm.cpp:68 src/clients/data/arcrm.cpp:80 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" #: src/clients/data/arccp.cpp:94 src/clients/data/arcls.cpp:156 #: src/clients/data/arcmkdir.cpp:61 src/clients/data/arcrename.cpp:73 #: src/clients/data/arcrm.cpp:77 msgid "Proxy expired" msgstr "" #: src/clients/data/arccp.cpp:112 src/clients/data/arccp.cpp:116 #: src/clients/data/arccp.cpp:149 src/clients/data/arccp.cpp:153 #: src/clients/data/arccp.cpp:358 src/clients/data/arccp.cpp:363 #: src/clients/data/arcls.cpp:123 src/clients/data/arcmkdir.cpp:28 #: src/clients/data/arcrename.cpp:29 src/clients/data/arcrename.cpp:33 #: src/clients/data/arcrm.cpp:36 #, fuzzy, c-format msgid "Invalid URL: %s" msgstr "Ogiltig URL: %s" #: src/clients/data/arccp.cpp:128 msgid "Third party transfer is not supported for these endpoints" msgstr "" #: src/clients/data/arccp.cpp:130 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" #: src/clients/data/arccp.cpp:133 #, c-format msgid "Transfer FAILED: %s" msgstr "Överföring MISSLYCKADES: %s" #: src/clients/data/arccp.cpp:161 src/clients/data/arccp.cpp:187 #: src/clients/data/arccp.cpp:374 src/clients/data/arccp.cpp:402 #, c-format msgid "Can't read list of sources from file %s" msgstr "Kan inte läsa lista med källor från filen %s" #: src/clients/data/arccp.cpp:166 src/clients/data/arccp.cpp:202 #: src/clients/data/arccp.cpp:379 src/clients/data/arccp.cpp:418 #, c-format msgid "Can't read list of destinations from file %s" msgstr "Kan inte läsa lista med destinationer från filen %s" #: src/clients/data/arccp.cpp:171 src/clients/data/arccp.cpp:385 msgid "Numbers of sources and destinations do not match" msgstr "Antalet källor och destinationer stämmer inte överens" #: src/clients/data/arccp.cpp:216 msgid "Fileset registration is not supported yet" msgstr "Filuppsättningsregistrering understöds inte ännu" #: src/clients/data/arccp.cpp:222 src/clients/data/arccp.cpp:295 #: src/clients/data/arccp.cpp:456 #, c-format msgid "Unsupported source url: %s" msgstr "Icke understödd käll-URL: %s" #: src/clients/data/arccp.cpp:226 src/clients/data/arccp.cpp:299 #, c-format msgid "Unsupported destination url: %s" msgstr "Icke understödd destinations-URL: %s" #: src/clients/data/arccp.cpp:233 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" "För registrering måste källan vara en vanlig URL och destinationen en " "indexeringsservice" #: src/clients/data/arccp.cpp:243 #, fuzzy, c-format msgid "Could not obtain information about source: %s" msgstr "Jobbtillståndsinformation ej funnen: %s" #: src/clients/data/arccp.cpp:250 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" #: src/clients/data/arccp.cpp:262 msgid "Failed to accept new file/destination" msgstr "Misslyckades med att acceptera ny fil/destination" #: src/clients/data/arccp.cpp:268 src/clients/data/arccp.cpp:274 #, fuzzy, c-format msgid "Failed to register new file/destination: %s" msgstr "Misslyckades med att registrera ny fil/destination" #: src/clients/data/arccp.cpp:436 msgid "Fileset copy to single object is not supported yet" msgstr "Kopiering av filuppsättning till ett enstaka objekt stöds ej ännu" #: src/clients/data/arccp.cpp:446 msgid "Can't extract object's name from source url" msgstr "Kan ej extrahera objektets namn från käll-URL" #: src/clients/data/arccp.cpp:465 #, fuzzy, c-format msgid "%s. Cannot copy fileset" msgstr "Kan inte läsa nyckelfil: %s" #: src/clients/data/arccp.cpp:475 src/hed/libs/compute/ExecutionTarget.cpp:254 #: src/hed/libs/compute/ExecutionTarget.cpp:326 #, c-format msgid "Name: %s" msgstr "Namn: %s" #: src/clients/data/arccp.cpp:478 #, c-format msgid "Source: %s" msgstr "Källa: %s" #: src/clients/data/arccp.cpp:479 #, c-format msgid "Destination: %s" msgstr "Destinaltion: %s" #: src/clients/data/arccp.cpp:485 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:433 #, fuzzy msgid "Current transfer complete" msgstr "Nuvarande överföring slutförd." #: src/clients/data/arccp.cpp:488 msgid "Some transfers failed" msgstr "Några överföringar misslyckades" #: src/clients/data/arccp.cpp:498 #, c-format msgid "Directory: %s" msgstr "Katalog: %s" #: src/clients/data/arccp.cpp:518 #, fuzzy msgid "Transfer complete" msgstr "Överföring slutförd" #: src/clients/data/arccp.cpp:537 msgid "source destination" msgstr "källa destination" #: src/clients/data/arccp.cpp:538 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" "arccp-kommandot används för att kopiera filer till, från och mellan\n" "gridlagringsresurser." #: src/clients/data/arccp.cpp:543 #, fuzzy msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" "använd passiv överföring (fungerar inte om säker överföring är aktiv, " "förvalt om säker överföring inte begärts" #: src/clients/data/arccp.cpp:549 msgid "do not try to force passive transfer" msgstr "försök inte forcera passiv överföring" #: src/clients/data/arccp.cpp:554 #, fuzzy msgid "" "if the destination is an indexing service and not the same as the source and " "the destination is already registered, then the copy is normally not done. " "However, if this option is specified the source is assumed to be a replica " "of the destination created in an uncontrolled way and the copy is done like " "in case of replication. Using this option also skips validation of completed " "transfers." msgstr "" "om destinationen är en indexservice och inte densamma som källan och " "destinationen redan är registrerad, så skapas kopian normalt inte. Men, om " "denna flagga anges antas källan varar en kopia av destinationen som skapats " "på ett okontrollerat sätt och kopian skapas på samma sätt som vid replikering" #: src/clients/data/arccp.cpp:567 msgid "show progress indicator" msgstr "visa fortskridandeindikator" #: src/clients/data/arccp.cpp:572 #, fuzzy msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" "kopiera inte filen, registrera den bara - destinationen måste vara icke-" "existerande meta-url" #: src/clients/data/arccp.cpp:578 msgid "use secure transfer (insecure by default)" msgstr "använd säker överföring (osäker som förval)" #: src/clients/data/arccp.cpp:583 msgid "path to local cache (use to put file into cache)" msgstr "sökväg till lokalt cache (använd för att lägga in fil i cache)" #: src/clients/data/arccp.cpp:588 src/clients/data/arcls.cpp:300 #, fuzzy msgid "operate recursively" msgstr "arbeta rekursivt upp till den angivna nivån" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:305 msgid "operate recursively up to specified level" msgstr "arbeta rekursivt upp till den angivna nivån" #: src/clients/data/arccp.cpp:594 src/clients/data/arcls.cpp:306 msgid "level" msgstr "nivå" #: src/clients/data/arccp.cpp:598 msgid "number of retries before failing file transfer" msgstr "antal försök innan överföring misslyckas" #: src/clients/data/arccp.cpp:599 msgid "number" msgstr "nummer" #: src/clients/data/arccp.cpp:603 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" #: src/clients/data/arccp.cpp:611 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" #: src/clients/data/arccp.cpp:617 src/clients/data/arcls.cpp:322 #: src/clients/data/arcmkdir.cpp:101 src/clients/data/arcrename.cpp:112 #: src/clients/data/arcrm.cpp:127 msgid "list the available plugins (protocols supported)" msgstr "" #: src/clients/data/arccp.cpp:656 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:141 src/clients/data/arcrename.cpp:152 #: src/clients/data/arcrm.cpp:168 msgid "Protocol plugins available:" msgstr "" #: src/clients/data/arccp.cpp:681 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:175 #: src/clients/data/arcrm.cpp:193 msgid "Wrong number of parameters specified" msgstr "Fel antal parametrar angivna" #: src/clients/data/arccp.cpp:686 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "Flaggorna 'p' och 'n' kan inte användas samtidigt" #: src/clients/data/arcls.cpp:129 src/clients/data/arcmkdir.cpp:34 #: src/clients/data/arcrm.cpp:43 #, c-format msgid "Can't read list of locations from file %s" msgstr "Kan inte läsa platslista från fil %s" #: src/clients/data/arcls.cpp:144 src/clients/data/arcmkdir.cpp:49 #: src/clients/data/arcrename.cpp:61 #, fuzzy msgid "Unsupported URL given" msgstr "Icke understödd url angiven" #: src/clients/data/arcls.cpp:149 src/clients/data/arcls.cpp:158 #, fuzzy, c-format msgid "Unable to list content of %s" msgstr "Använder jobblistfil: %s" #: src/clients/data/arcls.cpp:227 #, fuzzy msgid "Warning: Failed listing files but some information is obtained" msgstr "" "Varning: Misslyckades med att lista metafiler men viss information erhölls" #: src/clients/data/arcls.cpp:281 src/clients/data/arcmkdir.cpp:90 msgid "url" msgstr "url" #: src/clients/data/arcls.cpp:282 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" "arcls-kommandot används för att lista filer på gridlagringsresurser och i\n" "filindexkataloger." #: src/clients/data/arcls.cpp:291 msgid "show URLs of file locations" msgstr "visa URL:er till filens registrerade kopior" #: src/clients/data/arcls.cpp:295 msgid "display all available metadata" msgstr "" #: src/clients/data/arcls.cpp:309 msgid "" "show only description of requested object, do not list content of directories" msgstr "" #: src/clients/data/arcls.cpp:313 msgid "treat requested object as directory and always try to list content" msgstr "" #: src/clients/data/arcls.cpp:317 msgid "check readability of object, does not show any information about object" msgstr "" #: src/clients/data/arcls.cpp:392 msgid "Incompatible options --nolist and --forcelist requested" msgstr "" #: src/clients/data/arcls.cpp:397 msgid "Requesting recursion and --nolist has no sense" msgstr "" #: src/clients/data/arcmkdir.cpp:54 src/clients/data/arcmkdir.cpp:63 #, fuzzy, c-format msgid "Unable to create directory %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/clients/data/arcmkdir.cpp:91 #, fuzzy msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "arcrm-kommandot används för att ta bort filer på gridlagringsresurser." #: src/clients/data/arcmkdir.cpp:96 msgid "make parent directories as needed" msgstr "" #: src/clients/data/arcrename.cpp:41 msgid "Both URLs must have the same protocol, host and port" msgstr "" #: src/clients/data/arcrename.cpp:51 #, fuzzy msgid "Cannot rename to or from root directory" msgstr "Kan inte skapa tillägg för proxycertifikat" #: src/clients/data/arcrename.cpp:55 #, fuzzy msgid "Cannot rename to the same URL" msgstr "Kan inte acceptera källa som URL" #: src/clients/data/arcrename.cpp:66 src/clients/data/arcrename.cpp:75 #, fuzzy, c-format msgid "Unable to rename %s" msgstr "Misslyckades med att skapa GUID i RLS: %s" #: src/clients/data/arcrename.cpp:106 msgid "old_url new_url" msgstr "" #: src/clients/data/arcrename.cpp:107 #, fuzzy msgid "The arcrename command renames files on grid storage elements." msgstr "arcrm-kommandot används för att ta bort filer på gridlagringsresurser." #: src/clients/data/arcrm.cpp:58 #, fuzzy, c-format msgid "Unsupported URL given: %s" msgstr "Icke understödd url angiven" #: src/clients/data/arcrm.cpp:67 src/clients/data/arcrm.cpp:79 #, fuzzy, c-format msgid "Unable to remove file %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/clients/data/arcrm.cpp:115 msgid "url [url ...]" msgstr "" #: src/clients/data/arcrm.cpp:116 msgid "The arcrm command deletes files and on grid storage elements." msgstr "arcrm-kommandot används för att ta bort filer på gridlagringsresurser." #: src/clients/data/arcrm.cpp:121 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" "ta bort logiska filnamnsregistreringen även om inte alla fysiska kopior " "tagits bort" #: src/clients/echo/arcecho.cpp:32 msgid "service message" msgstr "" #: src/clients/echo/arcecho.cpp:33 #, fuzzy msgid "The arcecho command is a client for the ARC echo service." msgstr "arcclean-kommandot tar bort ett job från ett kluster." #: src/clients/echo/arcecho.cpp:35 msgid "" "The service argument is a URL to an ARC echo service.\n" "The message argument is the message the service should return." msgstr "" #: src/clients/echo/arcecho.cpp:105 src/hed/dmc/arc/DataPointARC.cpp:169 #: src/hed/dmc/arc/DataPointARC.cpp:222 src/hed/dmc/arc/DataPointARC.cpp:304 #: src/hed/dmc/arc/DataPointARC.cpp:415 src/hed/dmc/arc/DataPointARC.cpp:510 #: src/hed/dmc/arc/DataPointARC.cpp:574 src/hed/dmc/arc/DataPointARC.cpp:624 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:100 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:174 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:240 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, fuzzy, c-format msgid "" "Request:\n" "%s" msgstr "Förfrågan: %s" #: src/clients/echo/arcecho.cpp:119 src/hed/dmc/arc/DataPointARC.cpp:182 #: src/hed/dmc/arc/DataPointARC.cpp:235 src/hed/dmc/arc/DataPointARC.cpp:320 #: src/hed/dmc/arc/DataPointARC.cpp:431 src/hed/dmc/arc/DataPointARC.cpp:524 #: src/hed/dmc/arc/DataPointARC.cpp:587 src/hed/dmc/arc/DataPointARC.cpp:638 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 #, fuzzy msgid "No SOAP response" msgstr "Inget svar" #: src/clients/echo/arcecho.cpp:124 src/hed/acc/UNICORE/UNICOREClient.cpp:531 #: src/hed/dmc/arc/DataPointARC.cpp:187 src/hed/dmc/arc/DataPointARC.cpp:240 #: src/hed/dmc/arc/DataPointARC.cpp:325 src/hed/dmc/arc/DataPointARC.cpp:436 #: src/hed/dmc/arc/DataPointARC.cpp:529 src/hed/dmc/arc/DataPointARC.cpp:592 #: src/hed/dmc/arc/DataPointARC.cpp:643 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:119 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:193 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:267 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:346 #, fuzzy, c-format msgid "" "Response:\n" "%s" msgstr "Svar: %s" #: src/clients/saml/saml_assertion_init.cpp:43 msgid "service_url" msgstr "serviceurl" #: src/clients/saml/saml_assertion_init.cpp:47 msgid "path to config file" msgstr "sökväg till inställningsfil" #: src/clients/saml/saml_assertion_init.cpp:140 #, fuzzy msgid "SOAP Request failed: No response" msgstr "Förfrågan misslyckades: Inget svar" #: src/clients/saml/saml_assertion_init.cpp:144 #, fuzzy msgid "SOAP Request failed: Error" msgstr "Förfrågan misslyckades: Fel" #: src/clients/saml/saml_assertion_init.cpp:150 #, fuzzy msgid "No in SOAP response" msgstr "Det fanns inget SOAP-svar" #: src/clients/saml/saml_assertion_init.cpp:156 msgid "No in SAML response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:168 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:321 msgid "Succeeded to verify the signature under " msgstr "Lyckades verifiera signaturen under " #: src/clients/saml/saml_assertion_init.cpp:171 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:324 msgid "Failed to verify the signature under " msgstr "Misslyckades med att verifiera signaturen under " #: src/clients/wsrf/arcwsrf.cpp:39 msgid "URL [query]" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:40 #, fuzzy msgid "" "The arcwsrf command is used for obtaining the WS-ResourceProperties of\n" "services." msgstr "arcget-kommandot används för att hämta resultatet av ett jobb." #: src/clients/wsrf/arcwsrf.cpp:46 msgid "Request for specific Resource Property" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:47 msgid "[-]name" msgstr "[-]namn" #: src/clients/wsrf/arcwsrf.cpp:80 msgid "Missing URL" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:85 msgid "Too many parameters" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:123 #, fuzzy msgid "Query is not a valid XML" msgstr "%s är inte en giltig URL" #: src/clients/wsrf/arcwsrf.cpp:138 #, fuzzy msgid "Failed to create WSRP request" msgstr "Misslyckades med att verifiera begäran" #: src/clients/wsrf/arcwsrf.cpp:145 #, fuzzy msgid "Specified URL is not valid" msgstr "URL:en är inte giltig: %s" #: src/clients/wsrf/arcwsrf.cpp:157 #, fuzzy msgid "Failed to send request" msgstr "Misslyckades med att starta ny tråd" #: src/clients/wsrf/arcwsrf.cpp:161 #, fuzzy msgid "Failed to obtain SOAP response" msgstr "Misslyckades med att skapa indata-SOAP-behållare" #: src/clients/wsrf/arcwsrf.cpp:167 msgid "SOAP fault received" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:142 #, c-format msgid "Connect: Failed to init handle: %s" msgstr "Connect: Misslyckades med att initiera handtag: %s" #: src/hed/acc/ARC0/FTPControl.cpp:148 #, fuzzy, c-format msgid "Failed to enable IPv6: %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/hed/acc/ARC0/FTPControl.cpp:158 src/hed/acc/ARC0/FTPControl.cpp:172 #, c-format msgid "Connect: Failed to connect: %s" msgstr "Connect: Misslyckades med att koppla upp: %s" #: src/hed/acc/ARC0/FTPControl.cpp:165 #, c-format msgid "Connect: Connecting timed out after %d ms" msgstr "Connect: Uppkoppling avbröts after %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:185 #, c-format msgid "Connect: Failed to init auth info handle: %s" msgstr "Connect: Misslyckades med att initiera autentiseringsinfohandtag: %s" #: src/hed/acc/ARC0/FTPControl.cpp:196 src/hed/acc/ARC0/FTPControl.cpp:210 #, c-format msgid "Connect: Failed authentication: %s" msgstr "Connect: Misslyckades med autentisering: %s" #: src/hed/acc/ARC0/FTPControl.cpp:203 #, c-format msgid "Connect: Authentication timed out after %d ms" msgstr "Connect: Autenticering avbröts efter %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:224 src/hed/acc/ARC0/FTPControl.cpp:256 #, fuzzy, c-format msgid "SendCommand: Command: %s" msgstr "SendCommand: Misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:229 src/hed/acc/ARC0/FTPControl.cpp:240 #: src/hed/acc/ARC0/FTPControl.cpp:260 src/hed/acc/ARC0/FTPControl.cpp:271 #, c-format msgid "SendCommand: Failed: %s" msgstr "SendCommand: Misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:235 src/hed/acc/ARC0/FTPControl.cpp:266 #, c-format msgid "SendCommand: Timed out after %d ms" msgstr "SendCommand: Avbröts efter %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:243 src/hed/acc/ARC0/FTPControl.cpp:276 #, fuzzy, c-format msgid "SendCommand: Response: %s" msgstr "SendCommand: Misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:293 #, fuzzy msgid "SendData: Failed sending EPSV and PASV commands" msgstr "SendData: Misslyckades med att sända PASV-kommando" #: src/hed/acc/ARC0/FTPControl.cpp:298 src/hed/acc/ARC0/FTPControl.cpp:304 #: src/hed/acc/ARC0/FTPControl.cpp:320 #, fuzzy, c-format msgid "SendData: Server PASV response parsing failed: %s" msgstr "SendData: Datauppkoppling för skrivning misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:330 src/hed/acc/ARC0/FTPControl.cpp:336 #: src/hed/acc/ARC0/FTPControl.cpp:343 src/hed/acc/ARC0/FTPControl.cpp:350 #, fuzzy, c-format msgid "SendData: Server EPSV response parsing failed: %s" msgstr "SendData: Datauppkoppling för skrivning misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:357 #, fuzzy, c-format msgid "SendData: Server EPSV response port parsing failed: %s" msgstr "SendData: Datauppkoppling för skrivning misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:366 #, fuzzy, c-format msgid "SendData: Failed to apply local address to data connection: %s" msgstr "Misslyckades med att erhålla lokal adress för port %s - %s" #: src/hed/acc/ARC0/FTPControl.cpp:372 #, fuzzy, c-format msgid "SendData: Can't parse host and/or port in response to EPSV/PASV: %s" msgstr "Kan inte tolka värd och port i PASV-svar" #: src/hed/acc/ARC0/FTPControl.cpp:377 #, fuzzy, c-format msgid "SendData: Data channel: %d.%d.%d.%d:%d" msgstr "Datakanal: %d.%d.%d.%d %d" #: src/hed/acc/ARC0/FTPControl.cpp:393 #, fuzzy, c-format msgid "SendData: Data channel: [%s]:%d" msgstr "SendData: Datauppkoppling för skrivning misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:398 #, c-format msgid "SendData: Local port failed: %s" msgstr "SendData: Lokal port misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:422 msgid "SendData: Failed sending DCAU command" msgstr "SendData: Misslyckades med att sända DCAU-kommando" #: src/hed/acc/ARC0/FTPControl.cpp:427 msgid "SendData: Failed sending TYPE command" msgstr "SendData: Misslyckades med att sända TYPE-kommando" #: src/hed/acc/ARC0/FTPControl.cpp:436 #, c-format msgid "SendData: Local type failed: %s" msgstr "SendData: Lokal typ misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:446 #, c-format msgid "SendData: Failed sending STOR command: %s" msgstr "SendData: Misslyckades med att sända STOR-kommando: %s" #: src/hed/acc/ARC0/FTPControl.cpp:454 src/hed/acc/ARC0/FTPControl.cpp:475 #, c-format msgid "SendData: Data connect write failed: %s" msgstr "SendData: Datauppkoppling för skrivning misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:461 src/hed/acc/ARC0/FTPControl.cpp:469 #, c-format msgid "SendData: Data connect write timed out after %d ms" msgstr "SendData: Datauppkoppling för skrivning avbröts efter %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:487 src/hed/acc/ARC0/FTPControl.cpp:507 #, c-format msgid "SendData: Data write failed: %s" msgstr "SendData: Dataskrivning misslyckades: %s" #: src/hed/acc/ARC0/FTPControl.cpp:493 src/hed/acc/ARC0/FTPControl.cpp:501 #, c-format msgid "SendData: Data write timed out after %d ms" msgstr "SendData: Dataskrivning avbröts efter %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:527 src/hed/acc/ARC0/FTPControl.cpp:538 #, fuzzy, c-format msgid "Disconnect: Failed aborting - ignoring: %s" msgstr "Disconnect: Misslyckades med att avsluta: %s" #: src/hed/acc/ARC0/FTPControl.cpp:530 #, fuzzy, c-format msgid "Disconnect: Data close timed out after %d ms" msgstr "Disconnect: Avslutande avbröts efter %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:541 #, fuzzy, c-format msgid "Disconnect: Abort timed out after %d ms" msgstr "Disconnect: Avslutande avbröts efter %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:549 #, fuzzy, c-format msgid "Disconnect: Failed quitting - ignoring: %s" msgstr "Disconnect: Misslyckades med att avsluta: %s" #: src/hed/acc/ARC0/FTPControl.cpp:552 #, c-format msgid "Disconnect: Quitting timed out after %d ms" msgstr "Disconnect: Avslutande avbröts efter %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:561 #, fuzzy, c-format msgid "Disconnect: Failed closing - ignoring: %s" msgstr "Disconnect: Misslyckades med att avsluta: %s" #: src/hed/acc/ARC0/FTPControl.cpp:567 #, fuzzy, c-format msgid "Disconnect: Closing timed out after %d ms" msgstr "Disconnect: Avslutande avbröts efter %d ms" #: src/hed/acc/ARC0/FTPControl.cpp:582 #, fuzzy msgid "Disconnect: waiting for globus handle to settle" msgstr "Disconnect: Misslyckades med att förstöra handtag: %s" #: src/hed/acc/ARC0/FTPControl.cpp:596 #, fuzzy msgid "Disconnect: globus handle is stuck." msgstr "Disconnect: Misslyckades med att förstöra handtag: %s" #: src/hed/acc/ARC0/FTPControl.cpp:604 #, fuzzy, c-format msgid "Disconnect: Failed destroying handle: %s. Can't handle such situation." msgstr "Disconnect: Misslyckades med att förstöra handtag: %s" #: src/hed/acc/ARC0/FTPControl.cpp:607 #, fuzzy msgid "Disconnect: handle destroyed." msgstr "Disconnect: Misslyckades med att förstöra handtag: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:47 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:44 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to " "developers." msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:62 #, c-format msgid "Unable to query job information (%s), invalid URL provided (%s)" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:74 #, c-format msgid "Jobs left to query: %d" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:83 #, c-format msgid "Querying batch with %d jobs" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:99 msgid "Can't create information handle - is the ARC LDAP DMC plugin available?" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:132 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:47 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:36 #, fuzzy, c-format msgid "Job information not found in the information system: %s" msgstr "Jobbinformation ej funnen: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:134 #, fuzzy msgid "" "This job was very recently submitted and might not yet have reached the " "information system" msgstr "" "Detta jobb sickades nyligen in och har kanske inte nått informationssystemet " "ännu" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:236 #, c-format msgid "Cleaning job: %s" msgstr "Städar upp jobb: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:240 msgid "Failed to connect for job cleaning" msgstr "Misslyckades med att koppla upp för att städa upp jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:252 msgid "Failed sending CWD command for job cleaning" msgstr "Misslyckades med att sända CWD-kommando för att städa upp jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:259 msgid "Failed sending RMD command for job cleaning" msgstr "Misslyckades med att sända RMD-kommando för att städa upp jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:266 msgid "Failed to disconnect after job cleaning" msgstr "Misslyckades med koppla ner efter att ha städat upp jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:273 msgid "Job cleaning successful" msgstr "Uppstädning av jobb lyckades" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:284 #, fuzzy, c-format msgid "Cancelling job: %s" msgstr "Städar upp jobb: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:288 #, fuzzy msgid "Failed to connect for job cancelling" msgstr "Misslyckades med att koppla upp för att städa upp jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:300 msgid "Failed sending CWD command for job cancelling" msgstr "Misslyckades med att skicka CWD-kommando för att avbryta jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:307 msgid "Failed sending DELE command for job cancelling" msgstr "Misslyckades med att skicka DELE-kommando för att avbryta jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:314 msgid "Failed to disconnect after job cancelling" msgstr "Misslyckades med att koppla ner efter att ha avbrutit jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:322 msgid "Job cancelling successful" msgstr "Avbrytande av jobb lyckades" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:333 #, c-format msgid "Renewing credentials for job: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:337 #, fuzzy msgid "Failed to connect for credential renewal" msgstr "Misslyckades med att koppla upp för att städa upp jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:349 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:356 #, fuzzy msgid "Failed sending CWD command for credentials renewal" msgstr "Misslyckades med att sända CWD-kommando för att städa upp jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:362 #, fuzzy msgid "Failed to disconnect after credentials renewal" msgstr "Misslyckades med koppla ner efter att ha städat upp jobb" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:369 msgid "Renewal of credentials was successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:381 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:111 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:192 #, c-format msgid "Job %s does not report a resumable state" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:391 #, c-format msgid "Illegal jobID specified (%s)" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:398 #, fuzzy, c-format msgid "HER: %s" msgstr " URL: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:404 #, fuzzy, c-format msgid "Could not create temporary file: %s" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:437 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:131 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:210 #, fuzzy msgid "Job resuming successful" msgstr "Uppstädning av jobb lyckades" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:473 #, c-format msgid "Trying to retrieve job description of %s from computing resource" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:478 #, fuzzy, c-format msgid "invalid jobID: %s" msgstr "ogiltigt jobb-id" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:520 msgid "clientxrsl found" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:523 #, fuzzy msgid "could not find start of clientxrsl" msgstr "Kunde inte hitta systemets klientinställningar" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:528 #, fuzzy msgid "could not find end of clientxrsl" msgstr "Kunde inte hitta systemets klientinställningar" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:541 #, fuzzy, c-format msgid "Job description: %s" msgstr "Destinaltion: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:544 msgid "clientxrsl not found" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:550 #, fuzzy, c-format msgid "Invalid JobDescription: %s" msgstr "Ogiltig jobbeskrivning:" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:553 #, fuzzy msgid "Valid JobDescription found" msgstr "Ogiltig jobbeskrivning:" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:60 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:204 msgid "Submit: Failed to connect" msgstr "Submit: Misslyckades med att koppla upp" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:68 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:212 msgid "Submit: Failed sending CWD command" msgstr "Submit: Misslyckades med att sända CWD-kommando" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:79 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:223 msgid "Submit: Failed sending CWD new command" msgstr "Submit: Misslyckades med att sända CWD new-kommando" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:106 #, fuzzy msgid "Failed to prepare job description." msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:116 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:260 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:63 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:158 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:87 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:401 #, fuzzy, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:123 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:267 msgid "Submit: Failed sending job description" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:138 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:282 msgid "Submit: Failed uploading local input files" msgstr "Submit: Misslyckades med att ladda upp lokala indatafiler" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:193 msgid "" "Submit: service has no suitable information interface - need org.nordugrid." "ldapng" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:250 #, fuzzy msgid "Failed to prepare job description to target resources." msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/ARC1/AREXClient.cpp:58 msgid "Creating an A-REX client" msgstr "Skapar en A-REX-klient" #: src/hed/acc/ARC1/AREXClient.cpp:61 #, fuzzy msgid "Unable to create SOAP client used by AREXClient." msgstr "Misslyckades med att skapa SOAP-behållare" #: src/hed/acc/ARC1/AREXClient.cpp:85 #, fuzzy msgid "Failed locating credentials." msgstr "Misslyckades med att initiera delegering" #: src/hed/acc/ARC1/AREXClient.cpp:94 #, fuzzy msgid "Failed initiate client connection." msgstr "Misslyckades med att stänga förbindelse 1" #: src/hed/acc/ARC1/AREXClient.cpp:102 #, fuzzy msgid "Client connection has no entry point." msgstr "Klientkedjan har ingen ingångspunkt" #: src/hed/acc/ARC1/AREXClient.cpp:113 src/hed/acc/EMIES/EMIESClient.cpp:130 #: src/hed/acc/UNICORE/UNICOREClient.cpp:191 #: src/hed/acc/UNICORE/UNICOREClient.cpp:222 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:505 #: src/services/a-rex/test.cpp:86 msgid "Initiating delegation procedure" msgstr "Initialiserar delegeringsprocess" #: src/hed/acc/ARC1/AREXClient.cpp:115 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:507 #, fuzzy msgid "Failed to initiate delegation credentials" msgstr "Misslyckades med att initiera delegering" #: src/hed/acc/ARC1/AREXClient.cpp:128 #, fuzzy msgid "Re-creating an A-REX client" msgstr "Skapar en A-REX-klient" #: src/hed/acc/ARC1/AREXClient.cpp:146 msgid "AREXClient was not created properly." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:151 src/hed/acc/EMIES/EMIESClient.cpp:174 #, fuzzy, c-format msgid "Processing a %s request" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/ARC1/AREXClient.cpp:173 src/hed/acc/CREAM/CREAMClient.cpp:134 #: src/hed/acc/EMIES/EMIESClient.cpp:180 #, fuzzy, c-format msgid "%s request failed" msgstr "Förfrågan misslyckades" #: src/hed/acc/ARC1/AREXClient.cpp:181 src/hed/acc/EMIES/EMIESClient.cpp:189 #, fuzzy, c-format msgid "No response from %s" msgstr "Inget svar" #: src/hed/acc/ARC1/AREXClient.cpp:190 src/hed/acc/EMIES/EMIESClient.cpp:198 #, fuzzy, c-format msgid "%s request to %s failed with response: %s" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/acc/ARC1/AREXClient.cpp:195 src/hed/acc/EMIES/EMIESClient.cpp:213 #, fuzzy, c-format msgid "XML response: %s" msgstr "Svar: %s" #: src/hed/acc/ARC1/AREXClient.cpp:204 #, fuzzy, c-format msgid "%s request to %s failed. No expected response." msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/acc/ARC1/AREXClient.cpp:218 #, fuzzy, c-format msgid "Creating and sending submit request to %s" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/ARC1/AREXClient.cpp:234 src/hed/acc/ARC1/AREXClient.cpp:482 #: src/hed/acc/EMIES/EMIESClient.cpp:302 src/hed/acc/EMIES/EMIESClient.cpp:405 #: src/hed/acc/UNICORE/UNICOREClient.cpp:160 #, c-format msgid "Job description to be sent: %s" msgstr "Jobbeskrivning som skall sändas: %s" #: src/hed/acc/ARC1/AREXClient.cpp:248 src/hed/acc/EMIES/EMIESClient.cpp:491 #: src/hed/acc/EMIES/EMIESClient.cpp:525 src/hed/acc/EMIES/EMIESClient.cpp:581 #, fuzzy, c-format msgid "Creating and sending job information query request to %s" msgstr "Skapar och sänder en registreringsförfrågan" #: src/hed/acc/ARC1/AREXClient.cpp:293 src/hed/acc/ARC1/AREXClient.cpp:336 #, fuzzy, c-format msgid "Unable to retrieve status of job (%s)" msgstr "Misslyckades med att inhämta LFN/PFN-lista från %s" #: src/hed/acc/ARC1/AREXClient.cpp:346 src/hed/acc/EMIES/EMIESClient.cpp:821 #, fuzzy, c-format msgid "Creating and sending service information query request to %s" msgstr "Skapar och sänder en servicestatusförfrågan" #: src/hed/acc/ARC1/AREXClient.cpp:366 #, fuzzy, c-format msgid "Creating and sending ISIS information query request to %s" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/ARC1/AREXClient.cpp:383 #, c-format msgid "Service %s of type %s ignored" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:386 msgid "No execution services registered in the index service" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:392 #, fuzzy, c-format msgid "Creating and sending terminate request to %s" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/ARC1/AREXClient.cpp:403 #: src/hed/acc/UNICORE/UNICOREClient.cpp:619 #: src/hed/acc/UNICORE/UNICOREClient.cpp:692 msgid "Job termination failed" msgstr "Avbrytande av jobb misslyckades" #: src/hed/acc/ARC1/AREXClient.cpp:414 #, fuzzy, c-format msgid "Creating and sending clean request to %s" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/ARC1/AREXClient.cpp:444 #, fuzzy, c-format msgid "Creating and sending job description retrieval request to %s" msgstr "Skapar och sänder en förfrågan om att starta ett jobb" #: src/hed/acc/ARC1/AREXClient.cpp:464 #, fuzzy, c-format msgid "Creating and sending job migrate request to %s" msgstr "Skapar och sänder en förfrågan om att starta ett jobb" #: src/hed/acc/ARC1/AREXClient.cpp:498 src/hed/acc/EMIES/EMIESClient.cpp:932 #, fuzzy, c-format msgid "Creating and sending job resume request to %s" msgstr "Skapar och sänder en registreringsförfrågan" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:100 #, fuzzy msgid "Renewal of ARC1 jobs is not supported" msgstr "Förfrågan understöds inte - %s" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:117 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:197 #, fuzzy, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "JobID: %s tillstånd: %s" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:183 #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:103 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:139 #, fuzzy, c-format msgid "Failed retrieving job description for job: %s" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:42 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:69 msgid "Failed retrieving job status information" msgstr "Misslyckades med att inhämta statusinformation" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:52 #, fuzzy msgid "Cleaning of BES jobs is not supported" msgstr "process: %s: understöds inte" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:78 #, fuzzy msgid "Renewal of BES jobs is not supported" msgstr "Förfrågan understöds inte - %s" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:86 #, fuzzy msgid "Resuming BES jobs is not supported" msgstr "process: %s: understöds inte" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:49 msgid "Collecting Job (A-REX jobs) information." msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:53 #, fuzzy, c-format msgid "Failed retrieving job IDs: Unsupported url (%s) given" msgstr "Misslyckades med att inhämta statusinformation" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:61 #, fuzzy msgid "Failed retrieving job IDs" msgstr "Misslyckades med att städa upp efter jobb %s" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:64 msgid "" "Error encoutered during job ID retrieval. All job IDs might not have been " "retrieved" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:53 #, fuzzy msgid "Failed to prepare job description" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:78 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:173 msgid "No job identifier returned by BES service" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:99 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:194 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:310 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:77 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:169 msgid "Failed uploading local input files" msgstr "Misslyckades med att ladda upp lokala indatafiler" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:148 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:53 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:136 #, fuzzy msgid "Failed to prepare job description to target resources" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:271 #, fuzzy msgid "Failed adapting job description to target resources" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:282 #, c-format msgid "" "Unable to migrate job. Job description is not valid in the %s format: %s" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:295 msgid "No job identifier returned by A-REX" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:50 msgid "Querying WSRF GLUE2 computing info endpoint." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:95 #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:100 msgid "The Service doesn't advertise its Quality Level." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:120 #, fuzzy, c-format msgid "Generating A-REX target: %s" msgstr "Registrering start" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:145 #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:151 #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:178 msgid "The Service doesn't advertise its Interface." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:210 msgid "The Service doesn't advertise its Serving State." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:278 #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:279 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:220 #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:398 #: src/hed/libs/compute/GLUE2.cpp:417 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:467 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:426 #, c-format msgid "Unable to parse the %s.%s value from execution service (%s)." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:468 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:427 #, c-format msgid "Value of %s.%s is \"%s\"" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:114 msgid "Creating a CREAM client" msgstr "Skapar en CREAM-klient" #: src/hed/acc/CREAM/CREAMClient.cpp:117 msgid "Unable to create SOAP client used by CREAMClient." msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:128 msgid "CREAMClient not created properly" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:139 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:52 #: src/hed/acc/UNICORE/UNICOREClient.cpp:207 #: src/hed/acc/UNICORE/UNICOREClient.cpp:299 #: src/hed/acc/UNICORE/UNICOREClient.cpp:376 #: src/hed/acc/UNICORE/UNICOREClient.cpp:455 #: src/hed/acc/UNICORE/UNICOREClient.cpp:488 #: src/hed/acc/UNICORE/UNICOREClient.cpp:565 #: src/hed/acc/UNICORE/UNICOREClient.cpp:641 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:188 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:29 msgid "There was no SOAP response" msgstr "Det fanns inget SOAP-svar" #: src/hed/acc/CREAM/CREAMClient.cpp:148 src/hed/acc/CREAM/CREAMClient.cpp:353 #: src/hed/acc/CREAM/CREAMClient.cpp:374 src/hed/acc/CREAM/CREAMClient.cpp:395 #: src/hed/acc/CREAM/CREAMClient.cpp:414 src/hed/acc/CREAM/CREAMClient.cpp:465 #: src/hed/acc/CREAM/CREAMClient.cpp:494 #, fuzzy msgid "Empty response" msgstr "Inget svar" #: src/hed/acc/CREAM/CREAMClient.cpp:167 #, fuzzy, c-format msgid "Request failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/acc/CREAM/CREAMClient.cpp:175 src/hed/acc/CREAM/CREAMClient.cpp:428 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:35 #: src/hed/acc/UNICORE/UNICOREClient.cpp:359 msgid "Creating and sending a status request" msgstr "Skapar och sänder en statusförfrågan" #: src/hed/acc/CREAM/CREAMClient.cpp:200 #, fuzzy msgid "Unable to retrieve job status." msgstr "Misslyckades med att inhämta statusinformation" #: src/hed/acc/CREAM/CREAMClient.cpp:340 #: src/hed/acc/UNICORE/UNICOREClient.cpp:549 #: src/hed/acc/UNICORE/UNICOREClient.cpp:628 msgid "Creating and sending request to terminate a job" msgstr "Skapar och sänder förfrågan om att avbryta ett jobb" #: src/hed/acc/CREAM/CREAMClient.cpp:361 msgid "Creating and sending request to clean a job" msgstr "Skapar och sänder förfrågan om uppstädning efter ett jobb" #: src/hed/acc/CREAM/CREAMClient.cpp:382 #, fuzzy msgid "Creating and sending request to resume a job" msgstr "Skapar och sänder förfrågan om att avbryta ett jobb" #: src/hed/acc/CREAM/CREAMClient.cpp:403 #, fuzzy msgid "Creating and sending request to list jobs" msgstr "Skapar och sänder förfrågan om uppstädning efter ett jobb" #: src/hed/acc/CREAM/CREAMClient.cpp:450 msgid "Creating and sending job register request" msgstr "Skapar och sänder en registreringsförfrågan" #: src/hed/acc/CREAM/CREAMClient.cpp:470 src/hed/acc/CREAM/CREAMClient.cpp:499 #, fuzzy msgid "No job ID in response" msgstr "Inget svar" #: src/hed/acc/CREAM/CREAMClient.cpp:480 msgid "Creating and sending job start request" msgstr "Skapar och sänder en förfrågan om att starta ett jobb" #: src/hed/acc/CREAM/CREAMClient.cpp:508 msgid "Creating delegation" msgstr "Skapar delegering" #: src/hed/acc/CREAM/CREAMClient.cpp:520 msgid "Malformed response: missing getProxyReqReturn" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:531 #, fuzzy, c-format msgid "Delegatable credentials expired: %s" msgstr "Misslyckades med att förstöra kreditivhandtag: %s" #: src/hed/acc/CREAM/CREAMClient.cpp:541 #, fuzzy msgid "Failed signing certificate request" msgstr "Misslyckades med att verifiera begäran" #: src/hed/acc/CREAM/CREAMClient.cpp:561 #, fuzzy msgid "Failed putting signed delegation certificate to service" msgstr "" "Misslyckades med att skriva det signerade proxycertifikatet till en fil" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:52 #, fuzzy, c-format msgid "Failed cleaning job: %s" msgstr "Misslyckades med att städa upp efter jobb %s" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:70 #, fuzzy, c-format msgid "Failed canceling job: %s" msgstr "Misslyckades med att avbryta jobb %s" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:84 #, fuzzy msgid "Renewal of CREAM jobs is not supported" msgstr "Förfrågan understöds inte - %s" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:98 #, fuzzy, c-format msgid "Failed resuming job: %s" msgstr "Misslyckades med att städa upp efter jobb %s" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:40 #, fuzzy msgid "Failed creating signed delegation certificate" msgstr "Misslyckades med att signera proxycertifikatet" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:61 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:153 #: src/hed/acc/UNICORE/UNICOREClient.cpp:115 #, fuzzy, c-format msgid "Unable to submit job. Job description is not valid in the %s format" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:69 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:161 #, fuzzy msgid "Failed registering job" msgstr "Misslyckades med att starta jobb" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:85 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:177 msgid "Failed starting job" msgstr "Misslyckades med att starta jobb" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:123 #, fuzzy msgid "Failed creating singed delegation certificate" msgstr "Misslyckades med att signera proxycertifikatet" #: src/hed/acc/EMIES/EMIESClient.cpp:79 #, fuzzy msgid "Creating an EMI ES client" msgstr "Skapar en CREAM-klient" #: src/hed/acc/EMIES/EMIESClient.cpp:82 #, fuzzy msgid "Unable to create SOAP client used by EMIESClient." msgstr "Misslyckades med att skapa SOAP-behållare" #: src/hed/acc/EMIES/EMIESClient.cpp:157 #, fuzzy msgid "Re-creating an EMI ES client" msgstr "Skapar en CREAM-klient" #: src/hed/acc/EMIES/EMIESClient.cpp:223 #, fuzzy, c-format msgid "%s request to %s failed. Unexpected response: %s." msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/acc/EMIES/EMIESClient.cpp:237 src/hed/acc/EMIES/EMIESClient.cpp:344 #, fuzzy, c-format msgid "Creating and sending job submit request to %s" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/EMIES/EMIESClient.cpp:415 src/hed/acc/EMIES/EMIESClient.cpp:598 #: src/hed/acc/EMIES/EMIESClient.cpp:1087 #, c-format msgid "New limit for vector queries returned by EMI ES service: %d" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:423 src/hed/acc/EMIES/EMIESClient.cpp:606 #: src/hed/acc/EMIES/EMIESClient.cpp:1095 #, c-format msgid "" "Error: Service returned a limit higher or equal to current limit (current: %" "d; returned: %d)" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:764 #, fuzzy, c-format msgid "Creating and sending service information request to %s" msgstr "Skapar och sänder en servicestatusförfrågan" #: src/hed/acc/EMIES/EMIESClient.cpp:869 src/hed/acc/EMIES/EMIESClient.cpp:890 #, fuzzy, c-format msgid "Creating and sending job clean request to %s" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/EMIES/EMIESClient.cpp:911 #, fuzzy, c-format msgid "Creating and sending job suspend request to %s" msgstr "Skapar och sänder en registreringsförfrågan" #: src/hed/acc/EMIES/EMIESClient.cpp:953 #, fuzzy, c-format msgid "Creating and sending job restart request to %s" msgstr "Skapar och sänder en registreringsförfrågan" #: src/hed/acc/EMIES/EMIESClient.cpp:1010 #, fuzzy, c-format msgid "Creating and sending job notify request to %s" msgstr "Skapar och sänder en förfrågan om att starta ett jobb" #: src/hed/acc/EMIES/EMIESClient.cpp:1065 #, fuzzy, c-format msgid "Creating and sending notify request to %s" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/EMIES/EMIESClient.cpp:1155 #, fuzzy, c-format msgid "Creating and sending job list request to %s" msgstr "Skapar och sänder en förfrågan om att starta ett jobb" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:156 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:170 #, fuzzy, c-format msgid "Job %s failed to renew delegation %s - %s." msgstr "Misslyckades med att initiera delegering" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:247 #, fuzzy, c-format msgid "Failed retrieving information for job: %s" msgstr "Misslyckades med att inhämta statusinformation" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:319 #, fuzzy msgid "Retrieving job description of EMI ES jobs is not supported" msgstr "process: %s: understöds inte" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:61 #, c-format msgid "Listing jobs succeeded, %d jobs found" msgstr "" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:77 #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:102 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface (%" "s)." msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:47 #, fuzzy msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:54 #, fuzzy, c-format msgid "Failed to delegate credentials to server - %s" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:77 #, fuzzy msgid "Failed preparing job description" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:95 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:406 #, fuzzy msgid "Unable to submit job. Job description is not valid XML" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:154 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:482 msgid "No valid job identifier returned by EMI ES" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:180 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:499 #, fuzzy msgid "Job failed on service side" msgstr "Misslyckades med att ladda serviceinställningar" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:190 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:509 #, fuzzy msgid "Failed to obtain state of job" msgstr "Misslyckades med att erhålla lista från ftp: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:205 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:521 #, fuzzy msgid "Failed to wait for job to allow stage in" msgstr "Misslyckades med att koppla upp för att städa upp jobb" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:228 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:541 #, fuzzy msgid "Failed to obtain valid stagein URL for input files" msgstr "Misslyckades med att erhålla lista från ftp: %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:248 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:558 #, fuzzy, c-format msgid "Failed uploading local input files to %s" msgstr "Misslyckades med att ladda upp lokala indatafiler" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:269 #, fuzzy, c-format msgid "Failed to submit job description: EMIESFault(%s , %s)" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:278 #, fuzzy, c-format msgid "Failed to submit job description: UnexpectedError(%s)" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:315 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:574 #, fuzzy msgid "Failed to notify service" msgstr "Misslyckades med att ladda serviceinställningar" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:367 #, fuzzy msgid "Failed preparing job description to target resources" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:475 #, fuzzy, c-format msgid "Failed to submit job description: %s" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:54 msgid "Collecting EMI-ES GLUE2 computing info endpoint information." msgstr "" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:74 #, fuzzy msgid "Generating EMIES targets" msgstr "Registrering start" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:83 #, fuzzy, c-format msgid "Generated EMIES target: %s" msgstr "Registrering start" #: src/hed/acc/EMIES/TestEMIESClient.cpp:75 #: src/hed/acc/EMIES/TestEMIESClient.cpp:79 #, c-format msgid "Query returned unexpected element: %s:%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:85 #, c-format msgid "Element validation according to GLUE2 schema failed: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:114 #, fuzzy msgid "Resource query failed" msgstr "En servicestatusförfrågan misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:132 #, fuzzy msgid "Submission failed" msgstr "Insändningsförfrågan misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:143 #, fuzzy msgid "Obtaining status failed" msgstr "En förfrågan om uppstädning efter ett jobb misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:153 #, fuzzy msgid "Obtaining information failed" msgstr "SOAP-anrop misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:170 #, fuzzy msgid "Cleaning failed" msgstr "Uppstädning efter ett jobb misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:177 #, fuzzy msgid "Notify failed" msgstr "%s misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:184 #, fuzzy msgid "Kill failed" msgstr "%s misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:190 #, fuzzy msgid "List failed" msgstr "%s misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:201 #, fuzzy, c-format msgid "Fetching resource description from %s" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/acc/EMIES/TestEMIESClient.cpp:204 #: src/hed/acc/EMIES/TestEMIESClient.cpp:273 #: src/hed/acc/EMIES/TestEMIESClient.cpp:283 #: src/hed/acc/EMIES/TestEMIESClient.cpp:294 #, fuzzy, c-format msgid "Failed to obtain resource description: %s" msgstr "Misslyckades med att slå upp destination: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:213 #: src/hed/acc/EMIES/TestEMIESClient.cpp:217 #, c-format msgid "Resource description contains unexpected element: %s:%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:223 msgid "Resource description validation according to GLUE2 schema failed: " msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:228 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:133 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:173 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1218 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1252 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1434 #: src/hed/identitymap/ArgusPDPClient.cpp:444 #: src/hed/identitymap/ArgusPEPClient.cpp:98 #: src/hed/identitymap/ArgusPEPClient.cpp:345 #: src/hed/libs/common/Thread.cpp:193 src/hed/libs/common/Thread.cpp:196 #: src/hed/libs/common/Thread.cpp:199 #: src/hed/libs/credential/Credential.cpp:1055 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:72 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:88 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:104 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:134 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:151 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:160 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:82 src/hed/shc/arcpdp/ArcPDP.cpp:235 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:293 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:247 #: src/services/a-rex/delegation/DelegationStore.cpp:44 #: src/services/a-rex/delegation/DelegationStore.cpp:49 #: src/services/a-rex/delegation/DelegationStore.cpp:54 #: src/services/a-rex/delegation/DelegationStore.cpp:88 #: src/services/a-rex/delegation/DelegationStore.cpp:94 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:552 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:620 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:645 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:656 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:667 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:678 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:686 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:692 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:697 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:702 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:707 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:715 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:723 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:734 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:741 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:780 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:790 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:798 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:824 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:893 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:906 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:923 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:935 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1239 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1244 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1273 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1286 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:373 #, c-format msgid "%s" msgstr "%s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:248 #, fuzzy msgid "Resource description is empty" msgstr "Förfrågan-nod är tom" #: src/hed/acc/EMIES/TestEMIESClient.cpp:255 #, c-format msgid "Resource description provides URL for interface %s: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:260 msgid "Resource description provides no URLs for interfaces" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:263 #, fuzzy msgid "Resource description validation passed" msgstr "källa destination" #: src/hed/acc/EMIES/TestEMIESClient.cpp:266 #, c-format msgid "Requesting ComputingService elements of resource description at %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:271 msgid "Performing /Services/ComputingService query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:275 #: src/hed/acc/EMIES/TestEMIESClient.cpp:285 #: src/hed/acc/EMIES/TestEMIESClient.cpp:296 msgid "Query returned no elements." msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:281 msgid "Performing /ComputingService query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:292 msgid "Performing /* query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:302 #, fuzzy msgid "All queries failed" msgstr "WSRF-förfrågan misslyckades" #: src/hed/acc/EMIES/TestEMIESClient.cpp:332 #, c-format msgid "" "Number of ComputingService elements obtained from full document and XPath " "qury do not match: %d != %d" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:335 msgid "Resource description query validation passed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:337 #, fuzzy, c-format msgid "Unsupported command: %s" msgstr "Icke understödd destinations-URL: %s" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:75 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:95 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:105 #, fuzzy, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "Kopiering av filuppsättning för denna källtyp stöds inte" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:114 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:126 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:364 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:411 msgid "[ADLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:454 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:473 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:505 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:563 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:568 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:574 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:591 #, fuzzy msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "Kopiering av filuppsättning för denna källtyp stöds inte" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:605 #, fuzzy, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "process: %s: understöds inte" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:613 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:620 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:627 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:656 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:666 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:676 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:690 #, fuzzy msgid "[ADLParser] Benchmark is not supported yet." msgstr "Kopiering av filuppsättning för denna källtyp stöds inte" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:698 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:715 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:735 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:768 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:774 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:792 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:846 #, c-format msgid "Location URI for file %s is invalid" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:812 #, fuzzy, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "SOAP-process understöds inte: %s" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:75 #, c-format msgid "Unknown operator '%s' in attribute require in Version element" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:121 #, fuzzy, c-format msgid "Multiple '%s' elements are not supported." msgstr "process: %s: understöds inte" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:136 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:143 #, c-format msgid "The 'exclusiveBound' attribute to the '%s' element is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:150 msgid "The 'epsilon' attribute to the 'Exact' element is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:178 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:195 #, c-format msgid "Parsing error: Value of %s element can't be parsed as number" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:183 #, c-format msgid "" "Parsing error: Elements (%s) representing upper range have different values" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:200 #, c-format msgid "" "Parsing error: Elements (%s) representing lower range have different values" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:209 #, c-format msgid "" "Parsing error: Value of lower range (%s) is greater than value of upper " "range (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:296 msgid "[ARCJSDLParser] Not a JSDL - missing JobDescription element" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:377 #, c-format msgid "" "[ARCJSDLParser] Error during the parsing: missed the name attributes of the " "\"%s\" Environment" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:424 msgid "[ARCJSDLParser] RemoteLogging URL is wrongly formatted." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:440 msgid "[ARCJSDLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:718 msgid "Lower bounded range is not supported for the 'TotalCPUCount' element." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:736 msgid "" "Parsing the \"require\" attribute of the \"QueueName\" nordugrid-JSDL " "element failed. An invalid comparison operator was used, only \"ne\" or \"eq" "\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:841 #, fuzzy, c-format msgid "No URI element found in Location for file %s" msgstr "Demonisering av fork misslyckades: %s" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:873 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:323 #, c-format msgid "String successfully parsed as %s." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:53 #, c-format msgid "[JDLParser] Semicolon (;) is not allowed inside brackets, at '%s;'." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:137 #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:141 #, fuzzy, c-format msgid "[JDLParser] This kind of JDL descriptor is not supported yet: %s" msgstr "Kopiering av filuppsättning för denna källtyp stöds inte" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:144 #, c-format msgid "[JDLParser] Attribute named %s has unknown value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:224 msgid "Not enough outputsandboxdesturi elements!" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:306 msgid "" "[JDLParser] Environment variable has been defined without any equals sign." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:503 #, c-format msgid "[JDLParser]: Unknown attribute name: '%s', with value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:539 msgid "The inputsandboxbaseuri JDL attribute specifies an invalid URL." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:616 msgid "[JDLParser] Syntax error found during the split function." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:620 msgid "[JDLParser] Lines count is zero or other funny error has occurred." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:628 msgid "" "[JDLParser] JDL syntax error. There is at least one equals sign missing " "where it would be expected." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:641 #, fuzzy, c-format msgid "String successfully parsed as %s" msgstr "Stängdes OK" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 #, fuzzy msgid "End of comment not found" msgstr "Jobb ej funnet i jobblista: %s" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, fuzzy, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "urllistan %s innehåller ogiltig URL: %s" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 #, fuzzy msgid "Broken string" msgstr "sträng" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, fuzzy, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "urllistan %s innehåller ogiltig URL: %s" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 #, fuzzy msgid "Relation operator expected" msgstr "Omedelbart färdigställande: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must specified when 'join' attribute is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 #, fuzzy msgid "No RSL content in job description found" msgstr "CreateActivity: ingen jobbeskrivning funnen" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:293 msgid "Multi-request job description not allowed in GRIDMANAGER dialect" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 msgid "No execuable path specified in GRIDMANAGER dialect" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:311 msgid "'action' attribute not allowed in user-side job description" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:314 msgid "Executable path not specified ('executable' attribute)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:332 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:350 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Attribute '%s' multiply defined" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:336 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:341 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:357 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:387 #, fuzzy, c-format msgid "Value of attribute '%s' is not a string" msgstr "Misslyckades med att skriva begäran till en fil" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:375 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:379 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:511 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1368 msgid "Unexpected RSL type" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:576 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:581 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, fuzzy, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "Ogiltig periodsträng: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:615 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "Ogiltig periodsträng: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:625 #, fuzzy, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "Ogiltig periodsträng: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:699 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:711 #, fuzzy, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "Ogiltig periodsträng: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:720 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "Ogiltig periodsträng: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:730 #, fuzzy, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "Ogiltig periodsträng: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:761 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:767 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1029 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1043 msgid "The cluster XRSL attribute is currently unsupported." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1059 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1067 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1070 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1118 msgid "priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1151 #, fuzzy, c-format msgid "Invalid nodeaccess value: %s" msgstr "Filen är inte tillgänglig: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1194 msgid "Value of 'count' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1224 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1270 #, fuzzy, c-format msgid "Invalid action value %s" msgstr "Filen är inte tillgänglig: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1360 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1364 #, fuzzy, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr " attribut:" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1378 #, fuzzy, c-format msgid "Wrong language requested: %s" msgstr "Fel antal parametrar angivna" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1384 #, fuzzy msgid "Missing executable" msgstr "Tom executable" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1702 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:96 #, fuzzy msgid "Failed to initialize main Python thread" msgstr "Misslyckades med att initiera X509-struktur" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:101 msgid "Main Python thread was not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, fuzzy, c-format msgid "Loading Python broker (%i)" msgstr "Läser inställningsfil: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:139 #, fuzzy msgid "Main Python thread is not initialized" msgstr "DelegateProxy misslyckades" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 msgid "PythonBroker init" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, fuzzy, c-format msgid "Class name: %s" msgstr "klassnamn: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, fuzzy, c-format msgid "Module name: %s" msgstr "modulnamn: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:183 #, fuzzy msgid "Cannot convert ARC module name to Python string" msgstr "Kan inte omvandla modulnamn till pythonsträng" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:191 #, fuzzy msgid "Cannot import ARC module" msgstr "Kan inte importera modul" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:201 #: src/services/wrappers/python/pythonwrapper.cpp:426 #: src/services/wrappers/python/pythonwrapper.cpp:526 #, fuzzy msgid "Cannot get dictionary of ARC module" msgstr "Kan inte erhålla ordlista för modulen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 #, fuzzy msgid "Cannot find ARC UserConfig class" msgstr "Kan inte hitta arcinställningsklass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 #, fuzzy msgid "UserConfig class is not an object" msgstr "Inställningsklass är inget objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 #, fuzzy msgid "Cannot find ARC JobDescription class" msgstr "Kan inte hitta arcinställningsklass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 #, fuzzy msgid "JobDescription class is not an object" msgstr "Inställningsklass är inget objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 #, fuzzy msgid "Cannot find ARC ExecutionTarget class" msgstr "Kan inte hitta arcmeddelandeklass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 #, fuzzy msgid "ExecutionTarget class is not an object" msgstr "Meddelandeklass är inget objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:162 msgid "Cannot convert module name to Python string" msgstr "Kan inte omvandla modulnamn till pythonsträng" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:169 msgid "Cannot import module" msgstr "Kan inte importera modul" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 #, fuzzy msgid "Cannot get dictionary of custom broker module" msgstr "Kan inte erhålla ordlista för arcmodulen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 #, fuzzy msgid "Cannot find custom broker class" msgstr "Hittar inte serviceklass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, fuzzy, c-format msgid "%s class is not an object" msgstr "%s är inget objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 #, fuzzy msgid "Cannot create UserConfig argument" msgstr "Kan inte skapa inställningsargument" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 #, fuzzy msgid "Cannot convert UserConfig to Python object" msgstr "Kan inte omvandla inställningar till pythonobjekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:258 msgid "Cannot create argument of the constructor" msgstr "Kan inte skapa argument till konstruktorn" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:266 #, fuzzy msgid "Cannot create instance of Python class" msgstr "Kan inte skapa instans av pythonklass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, fuzzy, c-format msgid "Python broker constructor called (%d)" msgstr "Python-wrapper-destruktor anropad (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, fuzzy, c-format msgid "Python broker destructor called (%d)" msgstr "Python-wrapper-destruktor anropad (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 #, fuzzy msgid "Cannot create ExecutionTarget argument" msgstr "Kan inte skapa inställningsargument" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, fuzzy, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "Kan inte omvandla inställningar till pythonobjekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 #, fuzzy msgid "Cannot create JobDescription argument" msgstr "Kan inte skapa inställningsargument" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 #, fuzzy msgid "Cannot convert JobDescription to python object" msgstr "Kan inte omvandla inställningar till pythonobjekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:53 #, fuzzy msgid "Cannot initialize ARCHERY domain name for query" msgstr "Kan inte initiera winsockbibliotek" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:60 #, fuzzy msgid "Cannot create resolver from /etc/resolv.conf" msgstr "Kan inte skapa tillägg för proxycertifikat" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:121 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:126 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:131 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:135 #, fuzzy, c-format msgid "Found service endpoint %s (type %s)" msgstr "process: ändpunkt: %s" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:150 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.cpp:101 #, c-format msgid "Found %u service endpoints from the index service at %s" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:102 #, fuzzy msgid "Cleaning of UNICORE jobs is not supported" msgstr "process: %s: understöds inte" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:131 #, fuzzy msgid "Canceling of UNICORE jobs is not supported" msgstr "Förfrågan understöds inte - %s" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:139 #, fuzzy msgid "Renewal of UNICORE jobs is not supported" msgstr "Förfrågan understöds inte - %s" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:147 #, fuzzy msgid "Resumation of UNICORE jobs is not supported" msgstr "process: %s: understöds inte" #: src/hed/acc/UNICORE/UNICOREClient.cpp:67 #, fuzzy msgid "Creating a UNICORE client" msgstr "Skapar en CREAM-klient" #: src/hed/acc/UNICORE/UNICOREClient.cpp:90 src/services/a-rex/test.cpp:154 #: src/services/a-rex/test.cpp:227 src/services/a-rex/test.cpp:275 #: src/services/a-rex/test.cpp:323 src/services/a-rex/test.cpp:371 #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:64 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:19 msgid "Creating and sending request" msgstr "Skapar och skickar förfrågan" #: src/hed/acc/UNICORE/UNICOREClient.cpp:182 msgid "Failed to find delegation credentials in client configuration" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/acc/UNICORE/UNICOREClient.cpp:194 #: src/hed/acc/UNICORE/UNICOREClient.cpp:224 src/services/a-rex/test.cpp:88 msgid "Failed to initiate delegation" msgstr "Misslyckades med att initiera delegering" #: src/hed/acc/UNICORE/UNICOREClient.cpp:203 #: src/hed/acc/UNICORE/UNICOREClient.cpp:236 msgid "Submission request failed" msgstr "Insändningsförfrågan misslyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:239 msgid "Submission request succeed" msgstr "Insändningsförfrågan lyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:241 msgid "There was no response to a submission request" msgstr "Det kom inget svar på en insändningsförfrågan" #: src/hed/acc/UNICORE/UNICOREClient.cpp:248 msgid "A response to a submission request was not a SOAP message" msgstr "Ett svar på en insändningsförfrågan var inget SOAP-meddelande" #: src/hed/acc/UNICORE/UNICOREClient.cpp:255 #: src/hed/acc/UNICORE/UNICOREClient.cpp:336 #: src/hed/acc/UNICORE/UNICOREClient.cpp:414 #: src/hed/acc/UNICORE/UNICOREClient.cpp:527 #: src/hed/acc/UNICORE/UNICOREClient.cpp:603 #: src/hed/acc/UNICORE/UNICOREClient.cpp:677 msgid "There is no connection chain configured" msgstr "Ingen uppkopplingskedja har ställts in" #: src/hed/acc/UNICORE/UNICOREClient.cpp:276 #: src/hed/acc/UNICORE/UNICOREClient.cpp:348 #, c-format msgid "Submission returned failure: %s" msgstr "Insändning returnerade fel: %s" #: src/hed/acc/UNICORE/UNICOREClient.cpp:277 #: src/hed/acc/UNICORE/UNICOREClient.cpp:349 #, c-format msgid "Submission failed, service returned: %s" msgstr "Insändning misslyckades, service returnerade: %s" #: src/hed/acc/UNICORE/UNICOREClient.cpp:284 #, fuzzy msgid "Creating and sending a start job request" msgstr "Skapar och sänder en statusförfrågan" #: src/hed/acc/UNICORE/UNICOREClient.cpp:317 #, fuzzy msgid "A start job request failed" msgstr "En statusförfrågan misslyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:320 #, fuzzy msgid "A start job request succeeded" msgstr "En statusförfrågan lyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:322 #, fuzzy msgid "There was no response to a start job request" msgstr "Det kom inget svar på en statusförfrågan" #: src/hed/acc/UNICORE/UNICOREClient.cpp:329 #, fuzzy msgid "The response of a start job request was not a SOAP message" msgstr "Ett svar på en statusförfrågan var inget SOAP-meddelande" #: src/hed/acc/UNICORE/UNICOREClient.cpp:395 msgid "A status request failed" msgstr "En statusförfrågan misslyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:398 msgid "A status request succeed" msgstr "En statusförfrågan lyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:400 msgid "There was no response to a status request" msgstr "Det kom inget svar på en statusförfrågan" #: src/hed/acc/UNICORE/UNICOREClient.cpp:408 msgid "The response of a status request was not a SOAP message" msgstr "Ett svar på en statusförfrågan var inget SOAP-meddelande" #: src/hed/acc/UNICORE/UNICOREClient.cpp:433 msgid "The job status could not be retrieved" msgstr "Jobstatus kunde inte inhämtas" #: src/hed/acc/UNICORE/UNICOREClient.cpp:444 #, fuzzy msgid "Creating and sending an index service query" msgstr "Skapar och sänder en servicestatusförfrågan" #: src/hed/acc/UNICORE/UNICOREClient.cpp:472 msgid "Creating and sending a service status request" msgstr "Skapar och sänder en servicestatusförfrågan" #: src/hed/acc/UNICORE/UNICOREClient.cpp:508 msgid "A service status request failed" msgstr "En servicestatusförfrågan misslyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:511 #, fuzzy msgid "A service status request succeeded" msgstr "En servicestatusförfrågan lyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:513 msgid "There was no response to a service status request" msgstr "Det kom inget svar på en servicestatusförfrågan" #: src/hed/acc/UNICORE/UNICOREClient.cpp:520 msgid "The response of a service status request was not a SOAP message" msgstr "Ett svar på en servicestatusförfrågan var inget SOAP-meddelande" #: src/hed/acc/UNICORE/UNICOREClient.cpp:537 msgid "The service status could not be retrieved" msgstr "Servicestatus kunde inte inhämtas" #: src/hed/acc/UNICORE/UNICOREClient.cpp:584 msgid "A job termination request failed" msgstr "En förfrågan om att avbryta ett jobb misslyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:587 msgid "A job termination request succeed" msgstr "En förfrågan om att avbryta ett jobb lyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:589 msgid "There was no response to a job termination request" msgstr "Det kom inget svar på en förfrågan om att avbryta ett jobb" #: src/hed/acc/UNICORE/UNICOREClient.cpp:596 msgid "The response of a job termination request was not a SOAP message" msgstr "" "Ett svar på en förfrågan om att avbryta ett jobb var inget SOAP-meddelande" #: src/hed/acc/UNICORE/UNICOREClient.cpp:658 msgid "A job cleaning request failed" msgstr "En förfrågan om uppstädning efter ett jobb misslyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:661 msgid "A job cleaning request succeed" msgstr "En förfrågan om uppstädning efter ett jobb lyckades" #: src/hed/acc/UNICORE/UNICOREClient.cpp:663 msgid "There was no response to a job cleaning request" msgstr "Det kom inget svar på en förfrågan om uppstädning efter ett jobb" #: src/hed/acc/UNICORE/UNICOREClient.cpp:670 msgid "The response of a job cleaning request was not a SOAP message" msgstr "" "Ett svar på en förfrågan om uppstädning efter ett jobb var inget SOAP-" "meddelande" #: src/hed/acc/ldap/Extractor.h:22 #, c-format msgid "Extractor[%s] (%s): %s = %s" msgstr "" #: src/hed/acc/ldap/Extractor.h:113 src/hed/acc/ldap/Extractor.h:130 #, c-format msgid "Extractor[%s] (%s): %s contains %s" msgstr "" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.cpp:54 #, c-format msgid "Adding endpoint '%s' with interface name %s" msgstr "" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:63 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:43 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:46 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.cpp:49 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:47 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:59 msgid "Can't create information handle - is the ARC ldap DMC plugin available?" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:85 msgid "Adding CREAM computing service" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:79 #, c-format msgid "Unknown entry in EGIIS (%s)" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:87 msgid "" "Entry in EGIIS is missing one or more of the attributes 'Mds-Service-type', " "'Mds-Service-hn', 'Mds-Service-port' and/or 'Mds-Service-Ldap-suffix'" msgstr "" #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:219 msgid "" "The \"FreeSlotsWithDuration\" attribute is wrongly formatted. Ignoring it." msgstr "" #: src/hed/daemon/unix/daemon.cpp:74 #, c-format msgid "Daemonization fork failed: %s" msgstr "Demonisering av fork misslyckades: %s" #: src/hed/daemon/unix/daemon.cpp:82 #, fuzzy msgid "Watchdog (re)starting application" msgstr "Kan inte starta program" #: src/hed/daemon/unix/daemon.cpp:87 #, fuzzy, c-format msgid "Watchdog fork failed: %s" msgstr "Demonisering av fork misslyckades: %s" #: src/hed/daemon/unix/daemon.cpp:94 #, fuzzy msgid "Watchdog starting monitoring" msgstr "Startar jobbmonitorering" #: src/hed/daemon/unix/daemon.cpp:120 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:122 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:124 #, fuzzy msgid "Watchdog detected application exit" msgstr "Misslyckades med att lagra tillämpningsdata i OpenSSL" #: src/hed/daemon/unix/daemon.cpp:133 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" #: src/hed/daemon/unix/daemon.cpp:140 #, fuzzy msgid "Watchdog detected application timeout or error - killing process" msgstr "Misslyckades med att lagra tillämpningsdata i OpenSSL" #: src/hed/daemon/unix/daemon.cpp:151 #, fuzzy msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "Misslyckades med att lagra tillämpningsdata i OpenSSL" #: src/hed/daemon/unix/daemon.cpp:163 #, fuzzy msgid "Watchdog failed to kill application - giving up and exiting" msgstr "Misslyckades med att lagra tillämpningsdata i OpenSSL" #: src/hed/daemon/unix/daemon.cpp:184 msgid "Shutdown daemon" msgstr "Stänger av demon" #: src/hed/daemon/unix/main_unix.cpp:43 src/hed/daemon/win32/main_win32.cpp:27 msgid "shutdown" msgstr "avstängning" #: src/hed/daemon/unix/main_unix.cpp:46 msgid "exit" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:92 src/hed/daemon/win32/main_win32.cpp:53 msgid "No server config part of config file" msgstr "Ingen serverinställningsdel i inställningsfilen" #: src/hed/daemon/unix/main_unix.cpp:163 #: src/hed/daemon/win32/main_win32.cpp:91 #, fuzzy, c-format msgid "Unknown log level %s" msgstr " attribut:" #: src/hed/daemon/unix/main_unix.cpp:173 #: src/hed/daemon/win32/main_win32.cpp:100 #, fuzzy, c-format msgid "Failed to open log file: %s" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/hed/daemon/unix/main_unix.cpp:206 #: src/hed/daemon/win32/main_win32.cpp:126 msgid "Start foreground" msgstr "Startar i förgrunden" #: src/hed/daemon/unix/main_unix.cpp:255 #, fuzzy, c-format msgid "XML config file %s does not exist" msgstr "Cachefil %s existerar inte" #: src/hed/daemon/unix/main_unix.cpp:259 src/hed/daemon/unix/main_unix.cpp:274 #: src/hed/daemon/win32/main_win32.cpp:154 #, fuzzy, c-format msgid "Failed to load service configuration from file %s" msgstr "Misslyckades med att ladda serviceinställningar" #: src/hed/daemon/unix/main_unix.cpp:265 #, fuzzy, c-format msgid "INI config file %s does not exist" msgstr "Cachefil %s existerar inte" #: src/hed/daemon/unix/main_unix.cpp:270 #, fuzzy msgid "Error evaluating profile" msgstr "Använder proxyfil: %s" #: src/hed/daemon/unix/main_unix.cpp:286 #, fuzzy msgid "Error loading generated configuration" msgstr "Fel med cacheinställningar: %s" #: src/hed/daemon/unix/main_unix.cpp:292 #, fuzzy msgid "Error evaulating profile" msgstr "Använder proxyfil: %s" #: src/hed/daemon/unix/main_unix.cpp:297 #, fuzzy msgid "Failed to load service configuration from any default config file" msgstr "Misslyckades med att ladda serviceinställningar" #: src/hed/daemon/unix/main_unix.cpp:358 #, fuzzy msgid "Schema validation error" msgstr "Minnesallokeringsfel" #: src/hed/daemon/unix/main_unix.cpp:373 #: src/hed/daemon/win32/main_win32.cpp:159 #, fuzzy msgid "Configuration root element is not " msgstr "Inställningarnas rotobjekt är inte ArcConfig" #: src/hed/daemon/unix/main_unix.cpp:389 #, fuzzy, c-format msgid "Cannot switch to group (%s)" msgstr "Ingen sådan grupp: %s" #: src/hed/daemon/unix/main_unix.cpp:399 #, fuzzy, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "Ingen sådan grupp: %s" #: src/hed/daemon/unix/main_unix.cpp:404 #, fuzzy, c-format msgid "Cannot switch to user (%s)" msgstr "Kan inte ändra ägare för %s" #: src/hed/daemon/unix/main_unix.cpp:422 #: src/hed/daemon/win32/main_win32.cpp:176 #, fuzzy msgid "Failed to load service side MCCs" msgstr "Misslyckades med att ladda serviceinställningar" #: src/hed/daemon/unix/main_unix.cpp:424 #: src/hed/daemon/win32/main_win32.cpp:178 src/services/a-rex/test.cpp:41 #: src/tests/count/test_service.cpp:32 src/tests/echo/test.cpp:30 #: src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "Servicesidans MCC:er har laddats" #: src/hed/daemon/unix/main_unix.cpp:431 #: src/hed/daemon/win32/main_win32.cpp:185 msgid "Unexpected arguments supplied" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:93 #: src/hed/dmc/acix/DataPointACIX.cpp:342 #: src/hed/dmc/rucio/DataPointRucio.cpp:220 #: src/hed/dmc/rucio/DataPointRucio.cpp:438 #, fuzzy, c-format msgid "No locations found for %s" msgstr "Inga platser funna för destination" #: src/hed/dmc/acix/DataPointACIX.cpp:121 #, c-format msgid "Found none or multiple URLs (%s) in ACIX URL: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:131 #, fuzzy, c-format msgid "Cannot handle URL %s" msgstr "Kan inte ändra ägare för %s" #: src/hed/dmc/acix/DataPointACIX.cpp:138 #, fuzzy, c-format msgid "Could not resolve original source of %s: out of time" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/hed/dmc/acix/DataPointACIX.cpp:144 #, fuzzy, c-format msgid "Could not resolve original source of %s: %s" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/hed/dmc/acix/DataPointACIX.cpp:160 #, c-format msgid "Querying ACIX server at %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:161 #, c-format msgid "Calling acix with query %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:167 #, fuzzy, c-format msgid "Failed to query ACIX: %s" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:171 #: src/hed/dmc/acix/DataPointACIX.cpp:308 #, fuzzy, c-format msgid "Failed to parse ACIX response: %s" msgstr "Misslyckades med att skapa indata-SOAP-behållare" #: src/hed/dmc/acix/DataPointACIX.cpp:298 #, fuzzy, c-format msgid "ACIX returned %s" msgstr "GACL-auktoriseringsförfrågan: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:319 #, fuzzy, c-format msgid "No locations for %s" msgstr "Inga platser funna för destination" #: src/hed/dmc/acix/DataPointACIX.cpp:325 #, fuzzy, c-format msgid "%s: ACIX Location: %s" msgstr "Funktion : %s" #: src/hed/dmc/acix/DataPointACIX.cpp:327 #, c-format msgid "%s: Location %s not accessible remotely, skipping" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:68 #, c-format msgid "" "checingBartenderURL: Response:\n" "%s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:154 src/hed/dmc/arc/DataPointARC.cpp:206 #: src/hed/dmc/arc/DataPointARC.cpp:278 src/hed/dmc/arc/DataPointARC.cpp:375 #: src/hed/dmc/arc/DataPointARC.cpp:548 src/hed/dmc/arc/DataPointARC.cpp:609 #, fuzzy msgid "Hostname is not implemented for arc protocol" msgstr "Feature ej implementerad" #: src/hed/dmc/arc/DataPointARC.cpp:245 src/hed/dmc/arc/DataPointARC.cpp:330 #: src/hed/dmc/arc/DataPointARC.cpp:441 src/hed/dmc/arc/DataPointARC.cpp:534 #, fuzzy, c-format msgid "" "nd:\n" "%s" msgstr "del: %s" #: src/hed/dmc/arc/DataPointARC.cpp:263 msgid "Not a collection" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:282 src/hed/dmc/srm/DataPointSRM.cpp:316 #, fuzzy msgid "StartReading" msgstr "start_reading_ftp" #: src/hed/dmc/arc/DataPointARC.cpp:338 src/hed/dmc/arc/DataPointARC.cpp:449 #: src/hed/dmc/arc/DataPointARC.cpp:601 #, fuzzy, c-format msgid "Recieved transfer URL: %s" msgstr "Nuvarande överföring MISSLYCKADES: %s" #: src/hed/dmc/arc/DataPointARC.cpp:378 src/hed/dmc/srm/DataPointSRM.cpp:518 msgid "StartWriting" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:490 #, fuzzy, c-format msgid "Calculated checksum: %s" msgstr "meta_get_data: checksum: %s" #: src/hed/dmc/arc/DataPointARC.cpp:554 #, fuzzy msgid "Check" msgstr "checkpoint: %s" #: src/hed/dmc/arc/DataPointARC.cpp:648 #, fuzzy, c-format msgid "Deleted %s" msgstr "Förval: %s" #: src/hed/dmc/file/DataPointFile.cpp:86 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:93 #, fuzzy, c-format msgid "Failed to open stdio channel %s" msgstr "Misslyckades med att öppna datakanal" #: src/hed/dmc/file/DataPointFile.cpp:94 #, fuzzy, c-format msgid "Failed to open stdio channel %d" msgstr "Misslyckades med att öppna datakanal" #: src/hed/dmc/file/DataPointFile.cpp:335 #, fuzzy, c-format msgid "fsync of file %s failed: %s" msgstr "Konvertering misslyckades: %s" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:347 #, fuzzy, c-format msgid "closing file %s failed: %s" msgstr "Använder nyckelfil: %s" #: src/hed/dmc/file/DataPointFile.cpp:366 #, c-format msgid "File is not accessible: %s" msgstr "Filen är inte tillgänglig: %s" #: src/hed/dmc/file/DataPointFile.cpp:372 #: src/hed/dmc/file/DataPointFile.cpp:459 #, fuzzy, c-format msgid "Can't stat file: %s: %s" msgstr "Kan inte göra stat på filen: %s" #: src/hed/dmc/file/DataPointFile.cpp:420 #: src/hed/dmc/file/DataPointFile.cpp:426 #, fuzzy, c-format msgid "Can't stat stdio channel %s" msgstr "Kan inte göra stat på filen: %s" #: src/hed/dmc/file/DataPointFile.cpp:474 #, fuzzy, c-format msgid "%s is not a directory" msgstr "%s är inget objekt" #: src/hed/dmc/file/DataPointFile.cpp:489 src/hed/dmc/s3/DataPointS3.cpp:461 #: src/hed/dmc/s3/DataPointS3.cpp:571 #, fuzzy, c-format msgid "Failed to read object %s: %s" msgstr "Misslyckades med att läsa object: %s" #: src/hed/dmc/file/DataPointFile.cpp:502 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, fuzzy, c-format msgid "File is not accessible %s: %s" msgstr "Filen är inte tillgänglig: %s" #: src/hed/dmc/file/DataPointFile.cpp:508 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:418 #, fuzzy, c-format msgid "Can't delete directory %s: %s" msgstr "kan inte skapa katalog: %s" #: src/hed/dmc/file/DataPointFile.cpp:515 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:425 #, fuzzy, c-format msgid "Can't delete file %s: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:525 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1470 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:440 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:460 #: src/services/a-rex/jura/JobLogFile.cpp:657 #: src/services/a-rex/jura/JobLogFile.cpp:1274 #, c-format msgid "Creating directory %s" msgstr "Skapar katalog %s" #: src/hed/dmc/file/DataPointFile.cpp:533 src/hed/dmc/srm/DataPointSRM.cpp:160 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:474 #, fuzzy, c-format msgid "Renaming %s to %s" msgstr "Mappar %s till %s" #: src/hed/dmc/file/DataPointFile.cpp:535 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:483 #, fuzzy, c-format msgid "Can't rename file %s: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:566 #, fuzzy, c-format msgid "Failed to open %s for reading: %s" msgstr "Misslyckades med att öppna fil %s för läsning: %s" #: src/hed/dmc/file/DataPointFile.cpp:581 #: src/hed/dmc/file/DataPointFile.cpp:719 #, fuzzy, c-format msgid "Failed to switch user id to %d/%d" msgstr "Misslyckades med att städa upp efter jobb" #: src/hed/dmc/file/DataPointFile.cpp:587 #, fuzzy, c-format msgid "Failed to create/open file %s: %s" msgstr "Misslyckades med att skapa/öppna fil %s (%d)" #: src/hed/dmc/file/DataPointFile.cpp:603 #, fuzzy msgid "Failed to create thread" msgstr "Misslyckades med att starta ny tråd" #: src/hed/dmc/file/DataPointFile.cpp:683 #, c-format msgid "Invalid url: %s" msgstr "Ogiltig URL: %s" #: src/hed/dmc/file/DataPointFile.cpp:692 src/hed/libs/data/FileCache.cpp:603 #, fuzzy, c-format msgid "Failed to create directory %s: %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/hed/dmc/file/DataPointFile.cpp:708 #: src/hed/dmc/file/DataPointFile.cpp:727 #, fuzzy, c-format msgid "Failed to create file %s: %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:739 #, c-format msgid "setting file %s to size %llu" msgstr "Sätter fil %s till storlek %llu" #: src/hed/dmc/file/DataPointFile.cpp:759 #, fuzzy, c-format msgid "Failed to preallocate space for %s" msgstr "Misslyckades med förallokera utrymme" #: src/hed/dmc/file/DataPointFile.cpp:800 src/hed/libs/data/FileCache.cpp:981 #, fuzzy, c-format msgid "Failed to clean up file %s: %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:809 #, fuzzy, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "Fel under transaktionen: %s" #: src/hed/dmc/file/DataPointFile.cpp:813 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, fuzzy, c-format msgid "Using proxy %s" msgstr "Använder proxyfil: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, fuzzy, c-format msgid "Using key %s" msgstr "Använder nyckelfil: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, fuzzy, c-format msgid "Using cert %s" msgstr "Använder cachade inställningar: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "Platser saknas i destinations-LFC-URL" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "Lägger till plats: %s - %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, c-format msgid "Add location: url: %s" msgstr "Lägg till plats: url: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, c-format msgid "Add location: metadata: %s" msgstr "Lägg till plats: metadata: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, fuzzy, c-format msgid "gfal_open failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, fuzzy, c-format msgid "gfal_close failed: %s" msgstr "cachefil: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, fuzzy, c-format msgid "gfal_read failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 #, fuzzy msgid "StopReading starts waiting for transfer_condition." msgstr "stop_reading_ftp: väntar på att överföring ska avslutas" #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 #, fuzzy msgid "StopReading finished waiting for transfer_condition." msgstr "stop_reading_ftp: väntar på att överföring ska avslutas" #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:66 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:71 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:42 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:47 #, fuzzy, c-format msgid "No locations defined for %s" msgstr "Inga platser funna för destination" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, fuzzy, c-format msgid "Failed to set LFC replicas: %s" msgstr "Misslyckades med att ta bort LFC-katalog: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, fuzzy, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "start_writing_ftp: mkdir misslyckades - försöker fortfarande skriva" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, fuzzy, c-format msgid "gfal_write failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:302 #, fuzzy msgid "StopWriting starts waiting for transfer_condition." msgstr "StopWriting: avbryter förbindelse" #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:304 #, fuzzy msgid "StopWriting finished waiting for transfer_condition." msgstr "stop_reading_ftp: väntar på att överföring ska avslutas" #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, fuzzy, c-format msgid "gfal_stat failed: %s" msgstr "Kan inte göra stat på filen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, fuzzy, c-format msgid "gfal_opendir failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, fuzzy, c-format msgid "gfal_closedir failed: %s" msgstr "Använder nyckelfil: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, fuzzy, c-format msgid "gfal_rmdir failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, fuzzy, c-format msgid "gfal_unlink failed: %s" msgstr "Konvertering misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, fuzzy, c-format msgid "gfal_mkdir failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, fuzzy, c-format msgid "gfal_rename failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, fuzzy, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "Misslyckades med att erhålla lista från ftp: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, fuzzy, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, fuzzy, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "Misslyckades med att skapa GUID i RLS: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, fuzzy, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "Misslyckades med att skapa GUID i RLS: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, fuzzy, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "Misslyckades med att ta bort plats från LFC: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 #, fuzzy msgid "Transfer failed" msgstr "Några överföringar misslyckades" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 #, fuzzy msgid "Transfer succeeded" msgstr "Överföring slutförd" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 msgid "ftp_complete_callback: success" msgstr "ftp_complete_callback: OK" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "ftp_complete_callback: fel: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 msgid "ftp_check_callback" msgstr "ftp_check_callback" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/services/gridftpd/commands.cpp:1225 #: src/services/gridftpd/dataread.cpp:76 #: src/services/gridftpd/dataread.cpp:173 #: src/services/gridftpd/datawrite.cpp:59 #: src/services/gridftpd/datawrite.cpp:146 #, c-format msgid "Globus error: %s" msgstr "Globusfel: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 msgid "Excessive data received while checking file access" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "Registrering av Globus-FTP-buffer misslyckades - avbryter kontroll" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "check_ftp: globus_ftp_client_size misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "check_ftp: timeout vid väntan på storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "check_ftp: misslyckades med att erhålla filens storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, fuzzy, c-format msgid "check_ftp: obtained size: %lli" msgstr "start_reading_ftp: erhöll storlek: %llu" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "check_ftp: globus_ftp_client_modification_time misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "check_ftp: timeout vid väntan på ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "check_ftp: misslyckades med att erhålla filens ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, fuzzy, c-format msgid "check_ftp: obtained modification date: %s" msgstr "Cache skapades: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 msgid "check_ftp: globus_ftp_client_get failed" msgstr "check_ftp: globus_ftp_client_get misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 msgid "check_ftp: globus_ftp_client_register_read" msgstr "check_ftp: globus_ftp_client_register_read" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 msgid "check_ftp: timeout waiting for partial get" msgstr "check_ftp: timeout vid väntan på partiell get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #, fuzzy, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "delete_ftp: globus_ftp_client_delete misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 #, fuzzy msgid "delete_ftp: timeout waiting for delete" msgstr "list_files_ftp: timeout vid väntan på storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 #, fuzzy msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "delete_ftp: globus_ftp_client_delete misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #, c-format msgid "mkdir_ftp: making %s" msgstr "mkdir_ftp: skapar %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "mkdir_ftp: timeout vid väntan på mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 #, fuzzy msgid "Timeout waiting for mkdir" msgstr "mkdir_ftp: timeout vid väntan på mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 msgid "start_reading_ftp" msgstr "start_reading_ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "start_reading_ftp: globus_ftp_client_get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "start_reading_ftp: globus_ftp_client_get misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "start_reading_ftp: globus_thread_create misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "stop_reading_ftp: avbryter förbindelse" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, fuzzy, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "Misslyckades med att spara ftp-fil" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "stop_reading_ftp: väntar på att överföring ska avslutas" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "stop_reading_ftp: avslutar: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "ftp_read_thread: erhåll och registrerar buffrar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "ftp_read_thread: for_read misslyckades - avbryter: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, fuzzy, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "ftp_read_thread: for_read misslyckades - avbryter: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "ftp_read_thread: Globusfel: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "ftp_read_thread: för många registreringsfel - avbryter: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, fuzzy, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" "ftp_read_thread: misslyckades med att registrera globusbuffer - kommer att " "prova senare: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 msgid "ftp_read_thread: waiting for eof" msgstr "ftp_read_thread: väntar på filslut" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 #, fuzzy msgid "ftp_read_thread: waiting for buffers released" msgstr "ftp_read_thread: väntar på filslut" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 #, fuzzy msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "" "ftp_read_thread: misslyckades med att registrera globusbuffer - kommer att " "prova senare: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 msgid "ftp_read_thread: exiting" msgstr "ftp_read_thread: avslutar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #, fuzzy, c-format msgid "ftp_read_callback: failure: %s" msgstr "ftp_read_callback: misslyckande" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "ftp_read_callback: OK" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 msgid "Failed to get ftp file" msgstr "Misslyckades med att hämta ftp-fil" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 msgid "start_writing_ftp: mkdir" msgstr "start_writing_ftp: mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "start_writing_ftp: mkdir misslyckades - försöker fortfarande skriva" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 msgid "start_writing_ftp: put" msgstr "start_writing_ftp: put" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 msgid "start_writing_ftp: put failed" msgstr "start_writing_ftp: put misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "start_writing_ftp: globus_thread_create misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 msgid "StopWriting: aborting connection" msgstr "StopWriting: avbryter förbindelse" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #, fuzzy, c-format msgid "StopWriting: Calculated checksum %s" msgstr "meta_get_data: checksum: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #, fuzzy, c-format msgid "StopWriting: looking for checksum of %s" msgstr "list_files_ftp: söker efter storlek på %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 #, fuzzy msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "list_files_ftp: globus_ftp_client_size misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 #, fuzzy msgid "list_files_ftp: timeout waiting for cksum" msgstr "list_files_ftp: timeout vid väntan på storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 #, fuzzy msgid "list_files_ftp: no checksum information possible" msgstr "list_files_ftp: söker efter ändringstid för %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #, fuzzy, c-format msgid "list_files_ftp: checksum %s" msgstr "meta_get_data: checksum: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "ftp_write_thread: eråll och registrera buffrar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "ftp_write_thread: for_write misslyckades - avbryter" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 #, fuzzy msgid "ftp_write_thread: data callback failed - aborting" msgstr "ftp_write_thread: for_write misslyckades - avbryter" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 #, fuzzy msgid "ftp_write_thread: waiting for eof" msgstr "ftp_read_thread: väntar på filslut" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 #, fuzzy msgid "ftp_write_thread: waiting for buffers released" msgstr "ftp_read_thread: väntar på filslut" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 #, fuzzy msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "ftp_write_thread: eråll och registrera buffrar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 #, fuzzy msgid "ftp_write_thread: exiting" msgstr "ftp_read_thread: avslutar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #, fuzzy, c-format msgid "ftp_write_callback: failure: %s" msgstr "ftp_write_callback: misslyckande" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #, fuzzy, c-format msgid "ftp_write_callback: success %s" msgstr "ftp_write_callback: OK" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 msgid "Failed to store ftp file" msgstr "Misslyckades med att spara ftp-fil" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 #, fuzzy msgid "ftp_put_complete_callback: success" msgstr "ftp_complete_callback: OK" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "list_files_ftp: söker efter storlek på %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "list_files_ftp: globus_ftp_client_size misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 msgid "list_files_ftp: timeout waiting for size" msgstr "list_files_ftp: timeout vid väntan på storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 msgid "list_files_ftp: failed to get file's size" msgstr "list_files_ftp: misslyckades med att erhålla filens storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "list_files_ftp: söker efter ändringstid för %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "list_files_ftp: globus_ftp_client_modification_time misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "list_files_ftp: timeout vid väntan på ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 msgid "list_files_ftp: failed to get file's modification time" msgstr "list_files_ftp: misslyckades med att erhålla filens ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #, fuzzy, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "list_files_ftp: söker efter storlek på %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #, fuzzy, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "Misslyckades med att erhålla lista från ftp: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 #, fuzzy msgid "No results returned from stat" msgstr "Inget svar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #, c-format msgid "Unexpected path %s returned from server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #, fuzzy, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "Misslyckades med att erhålla lista från ftp: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 #, fuzzy msgid "Rename: globus_ftp_client_move failed" msgstr "check_ftp: globus_ftp_client_get misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 #, fuzzy msgid "Rename: timeout waiting for operation to complete" msgstr "check_ftp: timeout vid väntan på ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "init_handle: globus_ftp_client_handleattr_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "init_handle: globus_ftp_client_handleattr_set_gridftp2 misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "init_handle: globus_ftp_client_handle_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "init_handle: globus_ftp_client_operationattr_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 #, fuzzy msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "init_handle: globus_ftp_client_operationattr_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 #, fuzzy msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "init_handle: globus_ftp_client_operationattr_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #, fuzzy, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "init_handle: globus_ftp_client_operationattr_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 #, fuzzy msgid "Failed to set credentials for GridFTP transfer" msgstr "Misslyckades med att skapa fil %s för skrivning: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 msgid "Using secure data transfer" msgstr "Använder säker dataöverföring" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 msgid "Using insecure data transfer" msgstr "Använder osäker dataöverföring" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 #, fuzzy msgid "~DataPoint: destroy ftp_handle" msgstr "DataPoint::deinit_handle: förstör ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 #, fuzzy msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "DataPoint::deinit_handle: förstör ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 #, fuzzy msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "DataPoint::deinit_handle: förstör ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:224 src/hed/dmc/gridftp/Lister.cpp:292 #: src/hed/dmc/gridftp/Lister.cpp:387 src/hed/dmc/gridftp/Lister.cpp:737 #: src/hed/dmc/gridftp/Lister.cpp:775 #, c-format msgid "Failure: %s" msgstr "Misslyckande: %s" #: src/hed/dmc/gridftp/Lister.cpp:226 src/hed/dmc/gridftp/Lister.cpp:246 #: src/hed/dmc/gridftp/Lister.cpp:471 src/hed/dmc/gridftp/Lister.cpp:478 #: src/hed/dmc/gridftp/Lister.cpp:500 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:163 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:196 #, c-format msgid "Response: %s" msgstr "Svar: %s" #: src/hed/dmc/gridftp/Lister.cpp:291 msgid "Error getting list of files (in list)" msgstr "Fel vid erhållande av fillista (i list)" #: src/hed/dmc/gridftp/Lister.cpp:293 msgid "Assuming - file not found" msgstr "Antar - filen ej funnen" #: src/hed/dmc/gridftp/Lister.cpp:310 #, c-format msgid "list record: %s" msgstr "listpost: %s" #: src/hed/dmc/gridftp/Lister.cpp:365 msgid "Failed reading list of files" msgstr "Misslyckades med att läsa fillista" #: src/hed/dmc/gridftp/Lister.cpp:401 msgid "Failed reading data" msgstr "Misslyckades med att läsa data" #: src/hed/dmc/gridftp/Lister.cpp:429 #, c-format msgid "Command: %s" msgstr "Kommando: %s" #: src/hed/dmc/gridftp/Lister.cpp:433 src/hed/dmc/gridftp/Lister.cpp:474 #: src/hed/mcc/http/PayloadHTTP.cpp:991 msgid "Memory allocation error" msgstr "Minnesallokeringsfel" #: src/hed/dmc/gridftp/Lister.cpp:441 #, c-format msgid "%s failed" msgstr "%s misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:445 msgid "Command is being sent" msgstr "Kommande sänds" #: src/hed/dmc/gridftp/Lister.cpp:450 msgid "Waiting for response" msgstr "Väntar på svar" #: src/hed/dmc/gridftp/Lister.cpp:455 msgid "Callback got failure" msgstr "Callback erhöll misslyckande" #: src/hed/dmc/gridftp/Lister.cpp:541 #, fuzzy msgid "Failed in globus_cond_init" msgstr "Misslyckades med att initiera villkor" #: src/hed/dmc/gridftp/Lister.cpp:545 #, fuzzy msgid "Failed in globus_mutex_init" msgstr "Misslyckades med att initiera mutex" #: src/hed/dmc/gridftp/Lister.cpp:552 msgid "Failed allocating memory for handle" msgstr "Misslyckades med att allokera minne för handtag" #: src/hed/dmc/gridftp/Lister.cpp:557 #, fuzzy msgid "Failed in globus_ftp_control_handle_init" msgstr "Minnesläcka (globus_ftp_control_handle_t)" #: src/hed/dmc/gridftp/Lister.cpp:565 #, fuzzy msgid "Failed to enable IPv6" msgstr "Misslyckades med att ta bort %s" #: src/hed/dmc/gridftp/Lister.cpp:576 src/services/gridftpd/commands.cpp:983 msgid "Closing connection" msgstr "Stänger förbindelse" #: src/hed/dmc/gridftp/Lister.cpp:583 src/hed/dmc/gridftp/Lister.cpp:598 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:608 msgid "Closed successfully" msgstr "Stängdes OK" #: src/hed/dmc/gridftp/Lister.cpp:610 #, fuzzy msgid "Closing may have failed" msgstr "Kopierar cachad fil" #: src/hed/dmc/gridftp/Lister.cpp:637 msgid "Waiting for globus handle to settle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:642 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:648 msgid "Globus handle is stuck" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:664 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:687 #, fuzzy, c-format msgid "EPSV failed: %s" msgstr "PASV misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:691 #, fuzzy msgid "EPSV failed" msgstr "PASV misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:698 #, c-format msgid "PASV failed: %s" msgstr "PASV misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:702 msgid "PASV failed" msgstr "PASV misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:735 #, fuzzy msgid "Failed to apply local address to data connection" msgstr "Misslyckades med att acceptera SSL-förbindelse" #: src/hed/dmc/gridftp/Lister.cpp:749 #, fuzzy msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "Kan inte tolka värd och port i PASV-svar" #: src/hed/dmc/gridftp/Lister.cpp:754 #, fuzzy, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "Datakanal: %d.%d.%d.%d %d" #: src/hed/dmc/gridftp/Lister.cpp:769 #, fuzzy, c-format msgid "Data channel: [%s]:%d" msgstr "Datakanal: %d.%d.%d.%d %d" #: src/hed/dmc/gridftp/Lister.cpp:773 msgid "Obtained host and address are not acceptable" msgstr "Erhållen värd och adress kan inte accepteras" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Failed to open data channel" msgstr "Misslyckades med att öppna datakanal" #: src/hed/dmc/gridftp/Lister.cpp:801 #, c-format msgid "Unsupported protocol in url %s" msgstr "Icke understött protokoll i url %s" #: src/hed/dmc/gridftp/Lister.cpp:813 msgid "Reusing connection" msgstr "Återanvänder förbindelse" #: src/hed/dmc/gridftp/Lister.cpp:837 #, c-format msgid "Failed connecting to server %s:%d" msgstr "Misslyckades med att koppla upp mot %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:843 #, c-format msgid "Failed to connect to server %s:%d" msgstr "Misslyckades med att koppla upp mot %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:859 #, fuzzy msgid "Missing authentication information" msgstr "Felaktig autentiseringsinformation" #: src/hed/dmc/gridftp/Lister.cpp:868 src/hed/dmc/gridftp/Lister.cpp:882 #, fuzzy, c-format msgid "Bad authentication information: %s" msgstr "Felaktig autentiseringsinformation" #: src/hed/dmc/gridftp/Lister.cpp:891 src/hed/dmc/gridftp/Lister.cpp:906 #, fuzzy, c-format msgid "Failed authenticating: %s" msgstr "Misslyckades med autentisering" #: src/hed/dmc/gridftp/Lister.cpp:898 msgid "Failed authenticating" msgstr "Misslyckades med autentisering" #: src/hed/dmc/gridftp/Lister.cpp:933 src/hed/dmc/gridftp/Lister.cpp:1089 #, c-format msgid "DCAU failed: %s" msgstr "DCAU misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:937 src/hed/dmc/gridftp/Lister.cpp:1094 msgid "DCAU failed" msgstr "DCAU misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:957 #, fuzzy msgid "MLST is not supported - trying LIST" msgstr "MSLD understöds inte - försöker med NLST" #: src/hed/dmc/gridftp/Lister.cpp:973 #, fuzzy, c-format msgid "Immediate completion expected: %s" msgstr "Omedelbart färdigställande: %s" #: src/hed/dmc/gridftp/Lister.cpp:977 #, fuzzy msgid "Immediate completion expected" msgstr "Omedelbart färdigställande: %s" #: src/hed/dmc/gridftp/Lister.cpp:990 #, fuzzy, c-format msgid "Missing information in reply: %s" msgstr "Klusterinformationstillhandahållare: %s" #: src/hed/dmc/gridftp/Lister.cpp:1024 #, fuzzy, c-format msgid "Missing final reply: %s" msgstr "Använder nyckelfil: %s" #: src/hed/dmc/gridftp/Lister.cpp:1048 #, fuzzy, c-format msgid "Unexpected immediate completion: %s" msgstr "Omedelbart färdigställande: %s" #: src/hed/dmc/gridftp/Lister.cpp:1060 #, fuzzy, c-format msgid "LIST/MLST failed: %s" msgstr "NLST/MLSD misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:1065 #, fuzzy msgid "LIST/MLST failed" msgstr "NLST/MLSD misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:1115 msgid "MLSD is not supported - trying NLST" msgstr "MSLD understöds inte - försöker med NLST" #: src/hed/dmc/gridftp/Lister.cpp:1129 #, c-format msgid "Immediate completion: %s" msgstr "Omedelbart färdigställande: %s" #: src/hed/dmc/gridftp/Lister.cpp:1137 #, c-format msgid "NLST/MLSD failed: %s" msgstr "NLST/MLSD misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:1143 msgid "NLST/MLSD failed" msgstr "NLST/MLSD misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:1164 #, c-format msgid "Data transfer aborted: %s" msgstr "Dataöverföring avbruten: %s" #: src/hed/dmc/gridftp/Lister.cpp:1169 msgid "Data transfer aborted" msgstr "Dataöverföring avbruten" #: src/hed/dmc/gridftp/Lister.cpp:1181 msgid "Failed to transfer data" msgstr "Misslyckades med att överföra data" #: src/hed/dmc/http/DataPointHTTP.cpp:388 #: src/hed/dmc/http/DataPointHTTP.cpp:517 #: src/hed/dmc/http/DataPointHTTP.cpp:598 #: src/hed/dmc/http/DataPointHTTP.cpp:1000 #: src/hed/dmc/http/DataPointHTTP.cpp:1141 #: src/hed/dmc/http/DataPointHTTP.cpp:1286 #, fuzzy, c-format msgid "Redirecting to %s" msgstr "Registrerar till %s ISIS" #: src/hed/dmc/http/DataPointHTTP.cpp:670 #, fuzzy, c-format msgid "Stat: obtained size %llu" msgstr "start_reading_ftp: erhöll storlek: %llu" #: src/hed/dmc/http/DataPointHTTP.cpp:674 #, fuzzy, c-format msgid "Stat: obtained modification time %s" msgstr "Cache skapades: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:903 #, fuzzy, c-format msgid "Check: obtained size %llu" msgstr "start_reading_ftp: erhöll storlek: %llu" #: src/hed/dmc/http/DataPointHTTP.cpp:905 #, fuzzy, c-format msgid "Check: obtained modification time %s" msgstr "Cache skapades: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1017 #: src/hed/dmc/http/DataPointHTTP.cpp:1161 #, fuzzy, c-format msgid "HTTP failure %u - %s" msgstr "Källa: %s" #: src/hed/dmc/ldap/DataPointLDAP.cpp:36 msgid "" "Missing reference to factory and/or module. Currently safe unloading of LDAP " "DMC is not supported. Report to developers." msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:175 msgid "SASL Interaction" msgstr "SASL växelverkan" #: src/hed/dmc/ldap/LDAPQuery.cpp:223 #, c-format msgid "Challenge: %s" msgstr "Utmaning: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:227 #, c-format msgid "Default: %s" msgstr "Förval: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:303 #, c-format msgid "LDAPQuery: Initializing connection to %s:%d" msgstr "LDAPQuery: Initierar förbindelse till %s:%d" #: src/hed/dmc/ldap/LDAPQuery.cpp:307 #, c-format msgid "LDAP connection already open to %s" msgstr "LDAP-förbindelse är redan öppen till %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:325 #, c-format msgid "Could not open LDAP connection to %s" msgstr "Kunde inte öppna LDAP-förbindelse till %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:346 #, c-format msgid "Failed to create ldap bind thread (%s)" msgstr "Misslyckades med att skapa tråd för ldap bind (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:353 #, c-format msgid "Ldap bind timeout (%s)" msgstr "Ldap bind timeout (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:360 #, c-format msgid "Failed to bind to ldap server (%s)" msgstr "Misslyckades med att binda till ldap-server: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:381 #, c-format msgid "Could not set LDAP network timeout (%s)" msgstr "Kunde inte ange LDAP-nätverkstimeout (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:389 #, c-format msgid "Could not set LDAP timelimit (%s)" msgstr "Kunde inte ange LDAP-tidsgräns (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:396 #, c-format msgid "Could not set LDAP protocol version (%s)" msgstr "Kunde inte ange LDAP-protokollversion (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:472 #, c-format msgid "LDAPQuery: Querying %s" msgstr "LDAPQuery: Frågar %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:474 #, c-format msgid " base dn: %s" msgstr " bas-dn: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:476 #, c-format msgid " filter: %s" msgstr " filter: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:478 msgid " attributes:" msgstr " attribut:" #: src/hed/dmc/ldap/LDAPQuery.cpp:481 #: src/services/gridftpd/misc/ldapquery.cpp:399 #, c-format msgid " %s" msgstr " %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:526 src/hed/dmc/ldap/LDAPQuery.cpp:598 #, c-format msgid "%s (%s)" msgstr "%s (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:550 #, c-format msgid "LDAPQuery: Getting results from %s" msgstr "LDAPQuery: Erhåller resultat från %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:553 #, c-format msgid "Error: no LDAP query started to %s" msgstr "Fel: ingen LDAP-förfrågan påbörjad till %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:593 #, c-format msgid "LDAP query timed out: %s" msgstr "LDAP-förfrågan timeout: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:23 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:36 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:39 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:101 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:104 #, fuzzy msgid "Failed to extract VOMS nickname from proxy" msgstr "Misslyckades med att lägga till tillägg till proxyn" #: src/hed/dmc/rucio/DataPointRucio.cpp:106 #, fuzzy, c-format msgid "Using Rucio account %s" msgstr "Använder lokalt konto '%s'" #: src/hed/dmc/rucio/DataPointRucio.cpp:146 #, c-format msgid "" "Bad path for %s: Rucio supports read/write at /objectstores and read-only " "at /replicas" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:162 #, fuzzy, c-format msgid "Can't handle URL %s" msgstr "Kan inte göra stat på filen: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:303 #, c-format msgid "Acquired auth token for %s: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:357 #, fuzzy, c-format msgid "Rucio returned %s" msgstr "Insändning returnerade fel: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:382 #, fuzzy, c-format msgid "Failed to parse Rucio response: %s" msgstr "Misslyckades med att slå upp källa: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:388 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:394 #, fuzzy, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "Omedelbart färdigställande: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:400 #, fuzzy, c-format msgid "No RSE information returned in Rucio response: %s" msgstr "Insändning returnerade fel: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:423 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:426 #, fuzzy, c-format msgid "%s: size %llu" msgstr "Misslyckades med att läsa fil %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:430 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:433 #, fuzzy, c-format msgid "%s: checksum %s" msgstr "meta_get_data: checksum: %s" #: src/hed/dmc/s3/DataPointS3.cpp:648 #, fuzzy, c-format msgid "Failed to write object %s: %s" msgstr "Misslyckades med att läsa object: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:67 #, fuzzy, c-format msgid "Check: looking for metadata: %s" msgstr "Lägg till plats: metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:79 #, fuzzy, c-format msgid "Check: obtained size: %lli" msgstr "start_reading_ftp: erhöll storlek: %llu" #: src/hed/dmc/srm/DataPointSRM.cpp:85 #, fuzzy, c-format msgid "Check: obtained checksum: %s" msgstr "meta_get_data: checksum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:89 #, fuzzy, c-format msgid "Check: obtained modification date: %s" msgstr "Cache skapades: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:93 #, fuzzy msgid "Check: obtained access latency: low (ONLINE)" msgstr "Cache skapades: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:97 #, fuzzy msgid "Check: obtained access latency: high (NEARLINE)" msgstr "Cache skapades: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:119 #, fuzzy, c-format msgid "Remove: deleting: %s" msgstr "process: operation: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:139 #, fuzzy, c-format msgid "Creating directory: %s" msgstr "Skapar katalog %s" #: src/hed/dmc/srm/DataPointSRM.cpp:190 src/hed/dmc/srm/DataPointSRM.cpp:243 msgid "Calling PrepareReading when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:212 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:222 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:227 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:234 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:263 src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "None of the requested transfer protocols are supported" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:488 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:503 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:318 msgid "StartReading: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:332 src/hed/dmc/srm/DataPointSRM.cpp:534 #, fuzzy, c-format msgid "TURL %s cannot be handled" msgstr "PDP: %s kan inte laddas" #: src/hed/dmc/srm/DataPointSRM.cpp:340 src/hed/dmc/srm/DataPointSRM.cpp:542 #, c-format msgid "Redirecting to new URL: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:404 msgid "Calling PrepareWriting when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:435 #, fuzzy msgid "No space token specified" msgstr "Ingen cachecatalog angivan" #: src/hed/dmc/srm/DataPointSRM.cpp:441 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:444 #, fuzzy, c-format msgid "Using space token description %s" msgstr "Använder cachade inställningar: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:450 #, fuzzy, c-format msgid "Error looking up space tokens matching description %s" msgstr "Använder cachade inställningar: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, fuzzy, c-format msgid "No space tokens found matching description %s" msgstr "Använder cachade inställningar: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:464 #, fuzzy, c-format msgid "Using space token %s" msgstr "Använder cachade inställningar: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:510 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:520 msgid "StartWriting: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:593 #, fuzzy, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "list_files_ftp: söker efter storlek på %s" #: src/hed/dmc/srm/DataPointSRM.cpp:610 #, fuzzy, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "start_reading_ftp: erhöll storlek: %llu" #: src/hed/dmc/srm/DataPointSRM.cpp:613 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:616 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:619 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:620 src/hed/dmc/srm/DataPointSRM.cpp:621 #, fuzzy msgid "No checksum information from server" msgstr "Tilldelat nytt informationsdokument" #: src/hed/dmc/srm/DataPointSRM.cpp:622 src/hed/dmc/srm/DataPointSRM.cpp:623 #, fuzzy msgid "No checksum verification possible" msgstr "Peer-certifikatvalidering misslyckades" #: src/hed/dmc/srm/DataPointSRM.cpp:629 #, fuzzy msgid "Failed to release completed request" msgstr "Misslyckades med att starta ny tråd" #: src/hed/dmc/srm/DataPointSRM.cpp:673 src/hed/dmc/srm/DataPointSRM.cpp:740 #, fuzzy, c-format msgid "ListFiles: looking for metadata: %s" msgstr "list_files_ftp: söker efter storlek på %s" #: src/hed/dmc/srm/DataPointSRM.cpp:806 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:55 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:94 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:146 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:185 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:225 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:263 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:307 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:369 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:442 #, fuzzy msgid "SRM did not return any information" msgstr "Kunde inte inhämta jobbinformation" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:320 #, c-format msgid "File could not be moved to Running state: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:376 #, fuzzy msgid "SRM did not return any useful information" msgstr "Kunde inte inhämta jobbinformation" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:454 msgid "File could not be moved to Done state" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:92 msgid "Could not determine version of server" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:98 #, fuzzy, c-format msgid "Server SRM version: %s" msgstr "%s version %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:103 #, fuzzy, c-format msgid "Server implementation: %s" msgstr " Implementeringsnamn: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:140 #, fuzzy, c-format msgid "Adding space token %s" msgstr "Lägger till plats: %s - %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:167 msgid "No request tokens found" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:180 #, c-format msgid "Adding request token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:241 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:646 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:832 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1389 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:279 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:331 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:702 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:768 #, fuzzy, c-format msgid "File is ready! TURL is %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:363 #, c-format msgid "Setting userRequestDescription to %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:418 #, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:461 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1164 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1198 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1232 #, fuzzy msgid "No request token specified!" msgstr "Inget Connect-element specificerat" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:528 msgid "Request is reported as ABORTED, but all files are done" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:534 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:540 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:677 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:749 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:682 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:754 #, fuzzy, c-format msgid "Error creating required directories for %s" msgstr "Fel vid skapande av nödvändiga kataloger: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:855 msgid "Too many files in one request - please try again with fewer files" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:903 msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:940 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:946 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:635 #: src/services/a-rex/jura/ApelDestination.cpp:215 #: src/services/a-rex/jura/LutsDestination.cpp:192 #: src/services/gridftpd/misc/ldapquery.cpp:183 #: src/services/gridftpd/misc/ldapquery.cpp:186 #: src/services/gridftpd/misc/ldapquery.cpp:392 #: src/services/gridftpd/misc/ldapquery.cpp:623 #: src/services/gridftpd/misc/ldapquery.cpp:632 #, c-format msgid "%s: %s" msgstr "%s: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:979 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1189 #, c-format msgid "Files associated with request token %s released successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1223 #, c-format msgid "Files associated with request token %s put done successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1258 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1275 #, fuzzy, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "Misslyckades med att skapa infofil %s: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is file, calling srmRm" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "Type is dir, calling srmRmDir" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1289 msgid "File type is not available, attempting file delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1292 msgid "File delete failed, attempting directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1317 #, fuzzy, c-format msgid "File %s removed successfully" msgstr "Stängdes OK" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1344 #, fuzzy, c-format msgid "Directory %s removed successfully" msgstr "Stängdes OK" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1459 #, fuzzy, c-format msgid "Checking for existence of %s" msgstr "Lägg till plats: metadata: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1462 #, fuzzy, c-format msgid "File already exists: %s" msgstr "LFN finns redan i LFC" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1499 #, fuzzy, c-format msgid "Error creating directory %s: %s" msgstr "Fel vid läsning av låsfil %s: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, fuzzy, c-format msgid "Storing port %i for %s" msgstr "Icke understött protokoll i url %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, fuzzy, c-format msgid "No port succeeded for %s" msgstr "Inga platser funna för destination" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, fuzzy, c-format msgid "SOAP request: %s" msgstr "GACL-auktoriseringsförfrågan: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, fuzzy, c-format msgid "SOAP fault: %s" msgstr "Förval: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 #, fuzzy msgid "Reconnecting" msgstr "Återanvänder förbindelse" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, fuzzy, c-format msgid "SRM Client status: %s" msgstr "%s ny status: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #: src/hed/identitymap/ArgusPDPClient.cpp:250 #, fuzzy, c-format msgid "SOAP response: %s" msgstr "Inget svar" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:76 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:162 #, fuzzy, c-format msgid "Failed to acquire lock on file %s" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:81 #, fuzzy, c-format msgid "Error reading info from file %s:%s" msgstr "Fel vid läsning av metafil %s: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:95 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:187 #, fuzzy, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "Felaktigt format i fil %s: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:100 #, fuzzy, c-format msgid "Cannot convert string %s to int in line %s" msgstr "Felaktigt format i fil %s: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:203 #, fuzzy, c-format msgid "Error writing srm info file %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:91 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:93 #, c-format msgid "Read %i bytes" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:124 #, fuzzy, c-format msgid "Could not open file %s for reading: %s" msgstr "Misslyckades med att öppna fil %s för läsning: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:139 #, fuzzy, c-format msgid "Unable to find file size of %s" msgstr "Använder jobblistfil: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:203 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:226 #, fuzzy, c-format msgid "xrootd write failed: %s" msgstr "SendData: Dataskrivning misslyckades: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:235 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:309 #, fuzzy, c-format msgid "xrootd close failed: %s" msgstr "Konvertering misslyckades: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:258 #, fuzzy, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:271 #, fuzzy, c-format msgid "xrootd open failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:285 #, fuzzy, c-format msgid "close failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:327 #, c-format msgid "Read access not allowed for %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:346 #, fuzzy, c-format msgid "Could not stat file %s: %s" msgstr "Kan inte göra stat på filen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:381 #, fuzzy, c-format msgid "Failed to open directory %s: %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:396 #, fuzzy, c-format msgid "Error while reading dir %s: %s" msgstr "Fel vid listande av katalog: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:446 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:464 #, c-format msgid "Error creating required dirs: %s" msgstr "Fel vid skapande av nödvändiga kataloger: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:158 #, fuzzy msgid "PDPD location is missing" msgstr "Plats saknas" #: src/hed/identitymap/ArgusPDPClient.cpp:161 #, fuzzy, c-format msgid "PDPD location: %s" msgstr "policyplats: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:166 #: src/hed/identitymap/ArgusPEPClient.cpp:129 msgid "Conversion mode is set to SUBJECT" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:169 #: src/hed/identitymap/ArgusPEPClient.cpp:132 msgid "Conversion mode is set to CREAM" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:172 #: src/hed/identitymap/ArgusPEPClient.cpp:135 msgid "Conversion mode is set to EMI" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:175 #: src/hed/identitymap/ArgusPEPClient.cpp:138 #, fuzzy, c-format msgid "Unknown conversion mode %s, using default" msgstr "Okänt element i Globus sugneringspolicy" #: src/hed/identitymap/ArgusPDPClient.cpp:242 #, fuzzy, c-format msgid "Failed to contact PDP server: %s" msgstr "Misslyckades med att koppla upp mot RLS-servern: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:245 #, fuzzy, c-format msgid "There was no SOAP response return from PDP server: %s" msgstr "Det fanns inget SOAP-svar" #: src/hed/identitymap/ArgusPDPClient.cpp:360 #: src/hed/identitymap/ArgusPEPClient.cpp:286 #, c-format msgid "Have %i requests to process" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:362 #, fuzzy msgid "Creating a client to Argus PDP service" msgstr "Skapar klientgränssnitt" #: src/hed/identitymap/ArgusPDPClient.cpp:375 #, fuzzy, c-format msgid "XACML authorisation request: %s" msgstr "GACL-auktoriseringsförfrågan: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:386 #, fuzzy, c-format msgid "XACML authorisation response: %s" msgstr "Inget auktoriseringssvar returnerades" #: src/hed/identitymap/ArgusPDPClient.cpp:426 #: src/hed/identitymap/ArgusPEPClient.cpp:333 #, c-format msgid "%s is not authorized to do action %s in resource %s " msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:429 #: src/hed/identitymap/ArgusPDPClient.cpp:434 #: src/hed/identitymap/ArgusPEPClient.cpp:336 #, fuzzy msgid "Not authorized" msgstr "echo: Oautoriserad" #: src/hed/identitymap/ArgusPDPClient.cpp:439 #: src/hed/identitymap/ArgusPEPClient.cpp:341 #: src/hed/identitymap/IdentityMap.cpp:219 #: src/hed/shc/legacy/LegacyMap.cpp:215 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "Grididentitet mappas till lokal identitet '%s'" #: src/hed/identitymap/ArgusPDPClient.cpp:566 #: src/hed/identitymap/ArgusPEPClient.cpp:655 #, fuzzy msgid "Doing CREAM request" msgstr "Skapar och skickar förfrågan" #: src/hed/identitymap/ArgusPDPClient.cpp:580 #: src/hed/identitymap/ArgusPDPClient.cpp:748 #: src/hed/identitymap/ArgusPEPClient.cpp:683 #, fuzzy, c-format msgid "Adding profile-id value: %s" msgstr "Använder proxyfil: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:592 #: src/hed/identitymap/ArgusPDPClient.cpp:759 #: src/hed/identitymap/ArgusPEPClient.cpp:694 #, fuzzy, c-format msgid "Adding subject-id value: %s" msgstr "Söker förvalda services" #: src/hed/identitymap/ArgusPDPClient.cpp:600 #: src/hed/identitymap/ArgusPDPClient.cpp:767 #: src/hed/identitymap/ArgusPEPClient.cpp:704 #, fuzzy, c-format msgid "Adding subject-issuer value: %s" msgstr "Söker förvalda services" #: src/hed/identitymap/ArgusPDPClient.cpp:607 #: src/hed/identitymap/ArgusPEPClient.cpp:713 #, fuzzy, c-format msgid "Adding virtual-organization value: %s" msgstr "Filen är inte tillgänglig: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:620 #: src/hed/identitymap/ArgusPEPClient.cpp:730 #, fuzzy, c-format msgid "Adding FQAN value: %s" msgstr "Misslyckande: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:629 #: src/hed/identitymap/ArgusPEPClient.cpp:739 #, c-format msgid "Adding FQAN/primary value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:639 #: src/hed/identitymap/ArgusPEPClient.cpp:750 #, fuzzy, c-format msgid "Adding cert chain value: %s" msgstr "Använder certifikatfil: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:648 #: src/hed/identitymap/ArgusPDPClient.cpp:840 #, fuzzy, c-format msgid "Adding resource-id value: %s" msgstr "Källan skapades: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:662 #: src/hed/identitymap/ArgusPDPClient.cpp:863 #: src/hed/identitymap/ArgusPEPClient.cpp:775 #, fuzzy, c-format msgid "Adding action-id value: %s" msgstr "Filen är inte tillgänglig: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:666 #: src/hed/identitymap/ArgusPEPClient.cpp:786 #, fuzzy, c-format msgid "CREAM request generation failed: %s" msgstr "Förfrågan misslyckades" #: src/hed/identitymap/ArgusPDPClient.cpp:732 #, fuzzy msgid "Doing EMI request" msgstr "Skapar och skickar förfrågan" #: src/hed/identitymap/ArgusPDPClient.cpp:774 #, fuzzy, c-format msgid "Adding Virtual Organization value: %s" msgstr "Filen är inte tillgänglig: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:797 #, c-format msgid "Adding VOMS group value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:803 #, c-format msgid "Adding VOMS primary group value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:822 #, c-format msgid "Adding VOMS role value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:829 #, c-format msgid "Adding VOMS primary role value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:846 #, fuzzy, c-format msgid "Adding resource-owner value: %s" msgstr "Källan skapades: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:867 #, fuzzy, c-format msgid "EMI request generation failed: %s" msgstr "Källan skapades: %s" #: src/hed/identitymap/ArgusPEPClient.cpp:119 #, fuzzy msgid "PEPD location is missing" msgstr "Plats saknas" #: src/hed/identitymap/ArgusPEPClient.cpp:122 #, fuzzy, c-format msgid "PEPD location: %s" msgstr "policyplats: %s" #: src/hed/identitymap/ArgusPEPClient.cpp:126 msgid "Conversion mode is set to DIRECT" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:331 #, c-format msgid "" "Not authorized according to request:\n" "%s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:361 msgid "Subject of request is null \n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:366 #, fuzzy, c-format msgid "Can not create XACML SubjectAttribute: %s\n" msgstr "Kan inte skapa SOAP-klient" #: src/hed/identitymap/ArgusPEPClient.cpp:375 #, fuzzy msgid "Can not create XACML Resource \n" msgstr "Kan inte erhålla resurs-ID" #: src/hed/identitymap/ArgusPEPClient.cpp:381 #, fuzzy, c-format msgid "Can not create XACML ResourceAttribute: %s\n" msgstr "Kan inte skapa SOAP-klient" #: src/hed/identitymap/ArgusPEPClient.cpp:390 #, fuzzy msgid "Can not create XACML Action\n" msgstr "Kan inte skapa SOAP-klient" #: src/hed/identitymap/ArgusPEPClient.cpp:397 #, fuzzy, c-format msgid "Can not create XACML ActionAttribute: %s\n" msgstr "Kan inte skapa SOAP-klient" #: src/hed/identitymap/ArgusPEPClient.cpp:407 #, fuzzy msgid "Can not create XACML request\n" msgstr "Kan inte skapa BIO för begäran" #: src/hed/identitymap/ArgusPEPClient.cpp:539 #, c-format msgid "Converting to CREAM action - namespace: %s, operation: %s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:760 #, fuzzy, c-format msgid "Adding resoure-id value: %s" msgstr "Källan skapades: %s" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "PDP: %s kan inte laddas" #: src/hed/libs/common/ArcLocation.cpp:68 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" "Kan inte bestämma installationsplats. Använder %s. Ange ARC_LOCATION om " "detta inte är korrekt." #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "Tom sträng" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "Kan inte tolka datum: %s" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "Kan inte tolka tid: %s" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "Kan inte tolka tidszon: %s" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "Ogiltigt tidsformat: %s" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "Kan inte tolka månad: %s" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "Ogiltigt ISO-tidsperiodsformat: %s" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "Ogiltig periodsträng: %s" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "timme" msgstr[1] "timmar" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "minut" msgstr[1] "minuter" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "sekund" msgstr[1] "sekunder" #: src/hed/libs/common/FileLock.cpp:48 #, fuzzy msgid "Cannot determine hostname from gethostname()" msgstr "Kan inte bestämma värdnamn från uname()" #: src/hed/libs/common/FileLock.cpp:97 #, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "EACCES-fel vid öppnanade av låsfil %s: %s" #: src/hed/libs/common/FileLock.cpp:102 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "Fel vid öppnande av låsfil %s i initial check: %s" #: src/hed/libs/common/FileLock.cpp:109 #, fuzzy, c-format msgid "Error creating temporary file %s: %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/hed/libs/common/FileLock.cpp:118 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:129 #, fuzzy, c-format msgid "Could not create lock file %s as it already exists" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/hed/libs/common/FileLock.cpp:133 #, fuzzy, c-format msgid "Error creating lock file %s: %s" msgstr "Fel vid läsning av låsfil %s: %s" #: src/hed/libs/common/FileLock.cpp:138 #, fuzzy, c-format msgid "Error writing to lock file %s: %s" msgstr "Fel vid skrivning till temporär låsfil %s: %s" #: src/hed/libs/common/FileLock.cpp:146 #, fuzzy, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "Fel vid omdöpning av temporär låsfil %s till låsfil %s: %s" #: src/hed/libs/common/FileLock.cpp:155 #, fuzzy, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" "Fel vid omdöpning av låsfil, trots at rename() inte returnerade ett fel" #: src/hed/libs/common/FileLock.cpp:164 #, fuzzy, c-format msgid "%li seconds since lock file %s was created" msgstr "%li sekunder sedan låsfilen skapades" #: src/hed/libs/common/FileLock.cpp:167 #, fuzzy, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "Timeout har passerat, kommer att ta bort låsfil" #: src/hed/libs/common/FileLock.cpp:171 #, fuzzy, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "Misslyckades med att skapa infofil %s: %s" #: src/hed/libs/common/FileLock.cpp:183 #, fuzzy, c-format msgid "This process already owns the lock on %s" msgstr "Varning: Denna process äger redan låset" #: src/hed/libs/common/FileLock.cpp:189 #, fuzzy, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "Processen som äger låset exekverar inte längre, kommer att ta bort lås" #: src/hed/libs/common/FileLock.cpp:191 #, fuzzy, c-format msgid "Failed to remove file %s: %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/hed/libs/common/FileLock.cpp:200 #, fuzzy, c-format msgid "The file %s is currently locked with a valid lock" msgstr "Filen är för tillfället låst med ett giltigt lås" #: src/hed/libs/common/FileLock.cpp:215 #, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "Misslyckades med att låsa upp fil med lås %s: %s" #: src/hed/libs/common/FileLock.cpp:227 #, c-format msgid "Lock file %s doesn't exist" msgstr "Låsfil %s existerar inte" #: src/hed/libs/common/FileLock.cpp:229 #, c-format msgid "Error listing lock file %s: %s" msgstr "Fel vid listning av låsfil %s: %s" #: src/hed/libs/common/FileLock.cpp:235 #, fuzzy, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "En annan process äger låset på fil %s. Måste gå tillbaka till Start()" #: src/hed/libs/common/FileLock.cpp:241 #, c-format msgid "Error reading lock file %s: %s" msgstr "Fel vid läsning av låsfil %s: %s" #: src/hed/libs/common/FileLock.cpp:245 #, fuzzy, c-format msgid "Error with formatting in lock file %s" msgstr "Formatteringsfel is låsfil %s: %s" #: src/hed/libs/common/FileLock.cpp:255 #, fuzzy, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "Låsfil ägs av en annan värd" #: src/hed/libs/common/FileLock.cpp:264 #, fuzzy, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "Felaktigt format i fil %s: %s" #: src/hed/libs/common/FileLock.cpp:267 #, fuzzy, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "En annan process äger låset på fil %s. Måste gå tillbaka till Start()" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "(tom)" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "(null)" #: src/hed/libs/common/Logger.cpp:60 msgid "Invalid log level. Using default " msgstr "" #: src/hed/libs/common/Logger.cpp:125 msgid "Invalid old log level. Using default " msgstr "" #: src/hed/libs/common/OptionParser.cpp:107 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "Kan inte tolka heltalsvärdet '%s' för -%c" #: src/hed/libs/common/OptionParser.cpp:265 #, fuzzy msgid "Use -? to get usage description" msgstr "Använder cachade inställningar: %s" #: src/hed/libs/common/OptionParser.cpp:342 msgid "Usage:" msgstr "Användning:" #: src/hed/libs/common/OptionParser.cpp:345 msgid "OPTION..." msgstr "FLAGGA..." #: src/hed/libs/common/OptionParser.cpp:351 msgid "Help Options:" msgstr "Hjälpflaggor:" #: src/hed/libs/common/OptionParser.cpp:352 msgid "Show help options" msgstr "Visa hjälpflaggor" #: src/hed/libs/common/OptionParser.cpp:354 msgid "Application Options:" msgstr "Programflaggor:" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "Konvertering misslyckades: %s" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "Hela strängen användes inte: %s" #: src/hed/libs/common/Thread.cpp:212 msgid "Maximum number of threads running - puting new request into queue" msgstr "" #: src/hed/libs/common/Thread.cpp:253 #, fuzzy, c-format msgid "Thread exited with Glib error: %s" msgstr "ftp_read_thread: Globusfel: %s" #: src/hed/libs/common/Thread.cpp:255 #, c-format msgid "Thread exited with Glib exception: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:257 #, c-format msgid "Thread exited with generic exception: %s" msgstr "" #: src/hed/libs/common/URL.cpp:121 #, c-format msgid "URL is not valid: %s" msgstr "URL:en är inte giltig: %s" #: src/hed/libs/common/URL.cpp:192 #, fuzzy, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "Ogiltig URL - inget värdnamn givet" #: src/hed/libs/common/URL.cpp:197 #, fuzzy, c-format msgid "Illegal URL - no hostname given: %s" msgstr "Ogiltig URL - inget värdnamn givet" #: src/hed/libs/common/URL.cpp:286 #, fuzzy, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "Ogiltig URL - inget värdnamn givet" #: src/hed/libs/common/URL.cpp:302 #, fuzzy, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "Ogiltig URL - inget värdnamn givet" #: src/hed/libs/common/URL.cpp:310 #, fuzzy, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "Ogiltig URL - inget värdnamn givet" #: src/hed/libs/common/URL.cpp:326 #, fuzzy, c-format msgid "Invalid port number in %s" msgstr "Ogiltig periodsträng: %s" #: src/hed/libs/common/URL.cpp:425 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "Okänt LDAP-scope %s - använder base" #: src/hed/libs/common/URL.cpp:587 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" #: src/hed/libs/common/URL.cpp:686 #, c-format msgid "URL option %s does not have format name=value" msgstr "" #: src/hed/libs/common/URL.cpp:1151 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "urllistan %s innehåller ogiltig URL: %s" #: src/hed/libs/common/URL.cpp:1156 #, c-format msgid "URL protocol is not urllist: %s" msgstr "URL-protokollet är inte urllist: %s" #: src/hed/libs/common/UserConfig.cpp:30 #: src/hed/libs/common/UserConfig.cpp:681 #: src/hed/libs/common/UserConfig.cpp:690 #: src/hed/libs/common/UserConfig.cpp:696 #: src/hed/libs/common/UserConfig.cpp:718 #: src/hed/libs/common/UserConfig.cpp:728 #: src/hed/libs/common/UserConfig.cpp:740 #: src/hed/libs/common/UserConfig.cpp:760 #, fuzzy, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "Läser inställningsfil: %s" #: src/hed/libs/common/UserConfig.cpp:82 #, fuzzy, c-format msgid "Wrong ownership of certificate file: %s" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/common/UserConfig.cpp:84 #, fuzzy, c-format msgid "Wrong permissions of certificate file: %s" msgstr "Använder certifikatfil: %s" #: src/hed/libs/common/UserConfig.cpp:86 #, fuzzy, c-format msgid "Can not access certificate file: %s" msgstr "Kan inte läsa certifikatfil: %s (%s)" #: src/hed/libs/common/UserConfig.cpp:93 #, fuzzy, c-format msgid "Wrong ownership of key file: %s" msgstr "Fel antal parametrar angivna" #: src/hed/libs/common/UserConfig.cpp:95 #, fuzzy, c-format msgid "Wrong permissions of key file: %s" msgstr "Fel antal parametrar angivna" #: src/hed/libs/common/UserConfig.cpp:97 #, fuzzy, c-format msgid "Can not access key file: %s" msgstr "Kan inte läsa nyckelfil: %s (%s)" #: src/hed/libs/common/UserConfig.cpp:104 #, fuzzy, c-format msgid "Wrong ownership of proxy file: %s" msgstr "Fel antal parametrar angivna" #: src/hed/libs/common/UserConfig.cpp:106 #, fuzzy, c-format msgid "Wrong permissions of proxy file: %s" msgstr "Fel vid ändring av tillträde till katalog %s: %s" #: src/hed/libs/common/UserConfig.cpp:108 #, fuzzy, c-format msgid "Can not access proxy file: %s" msgstr "Kan inte läsa proxyfil: %s (%s)" #: src/hed/libs/common/UserConfig.cpp:119 msgid "computing" msgstr "" #: src/hed/libs/common/UserConfig.cpp:121 msgid "index" msgstr "" #: src/hed/libs/common/UserConfig.cpp:165 #: src/hed/libs/common/UserConfig.cpp:171 #: src/hed/libs/common/UserConfig.cpp:223 #: src/hed/libs/common/UserConfig.cpp:229 #, fuzzy, c-format msgid "System configuration file (%s) contains errors." msgstr "Använd inställningsfil %s" #: src/hed/libs/common/UserConfig.cpp:176 #: src/hed/libs/common/UserConfig.cpp:234 #, fuzzy, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "Använd inställningsfil %s" #: src/hed/libs/common/UserConfig.cpp:178 #: src/hed/libs/common/UserConfig.cpp:180 #: src/hed/libs/common/UserConfig.cpp:236 #: src/hed/libs/common/UserConfig.cpp:238 #, fuzzy, c-format msgid "System configuration file (%s) does not exist." msgstr "Använd inställningsfil %s" #: src/hed/libs/common/UserConfig.cpp:187 #: src/hed/libs/common/UserConfig.cpp:199 #: src/hed/libs/common/UserConfig.cpp:245 #: src/hed/libs/common/UserConfig.cpp:257 #, fuzzy, c-format msgid "User configuration file (%s) contains errors." msgstr "Använd inställningsfil %s" #: src/hed/libs/common/UserConfig.cpp:192 #: src/hed/libs/common/UserConfig.cpp:250 #, fuzzy msgid "No configuration file could be loaded." msgstr "Jobstatus kunde inte inhämtas" #: src/hed/libs/common/UserConfig.cpp:195 #: src/hed/libs/common/UserConfig.cpp:253 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:310 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" #: src/hed/libs/common/UserConfig.cpp:322 #, c-format msgid "" "Unsupported job list type '%s', using 'BDB'. Supported types are: BDB, " "SQLITE, XML." msgstr "" #: src/hed/libs/common/UserConfig.cpp:503 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:505 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or '%" "s' attributes in the client configuration file (e.g. '%s')" msgstr "" #: src/hed/libs/common/UserConfig.cpp:522 #, fuzzy, c-format msgid "" "Can not access CA certificates directory: %s. The certificates will not be " "verified." msgstr "Kan inte läsa CA-certifikatkatalog: %s (%s)" #: src/hed/libs/common/UserConfig.cpp:532 #, fuzzy, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "Kan inte läsa CA-certifikatkatalog: %s (%s)" #: src/hed/libs/common/UserConfig.cpp:558 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" #: src/hed/libs/common/UserConfig.cpp:579 #, c-format msgid "Using proxy file: %s" msgstr "Använder proxyfil: %s" #: src/hed/libs/common/UserConfig.cpp:582 #, c-format msgid "Using certificate file: %s" msgstr "Använder certifikatfil: %s" #: src/hed/libs/common/UserConfig.cpp:583 #, c-format msgid "Using key file: %s" msgstr "Använder nyckelfil: %s" #: src/hed/libs/common/UserConfig.cpp:587 #, c-format msgid "Using CA certificate directory: %s" msgstr "Använder CA-certifikatkatalog: %s" #: src/hed/libs/common/UserConfig.cpp:600 #: src/hed/libs/common/UserConfig.cpp:606 #, fuzzy, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "Kan inte läsa CA-certifikatkatalog: %s (%s)" #: src/hed/libs/common/UserConfig.cpp:612 #, fuzzy, c-format msgid "Can not access VOMS file/directory: %s." msgstr "Kan inte läsa CA-certifikatkatalog: %s (%s)" #: src/hed/libs/common/UserConfig.cpp:631 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" #: src/hed/libs/common/UserConfig.cpp:644 #, fuzzy, c-format msgid "Loading configuration (%s)" msgstr "Läser inställningsfil: %s" #: src/hed/libs/common/UserConfig.cpp:678 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:703 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" #: src/hed/libs/common/UserConfig.cpp:715 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:735 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:781 #, fuzzy, c-format msgid "Specified overlay file (%s) does not exist." msgstr "Cachefil %s existerar inte" #: src/hed/libs/common/UserConfig.cpp:785 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:826 #, fuzzy, c-format msgid "Unknown section %s, ignoring it" msgstr "Okänt element i Globus sugneringspolicy" #: src/hed/libs/common/UserConfig.cpp:830 #, fuzzy, c-format msgid "Configuration (%s) loaded" msgstr "Cacheinställningar: %s" #: src/hed/libs/common/UserConfig.cpp:833 #, fuzzy, c-format msgid "Could not load configuration (%s)" msgstr "Kunde inte hitta systemets klientinställningar" #: src/hed/libs/common/UserConfig.cpp:928 #, fuzzy, c-format msgid "UserConfiguration saved to file (%s)" msgstr "Använd inställningsfil %s" #: src/hed/libs/common/UserConfig.cpp:941 #, fuzzy, c-format msgid "Unable to create %s directory." msgstr "Misslyckades med att skapa/hitta katalog %s" #: src/hed/libs/common/UserConfig.cpp:950 #, fuzzy, c-format msgid "Configuration example file created (%s)" msgstr "Insändning misslyckades, service returnerade: %s" #: src/hed/libs/common/UserConfig.cpp:952 #, fuzzy, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "Misslyckades med att ladda serviceinställningar" #: src/hed/libs/common/UserConfig.cpp:956 #, fuzzy, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "ARC-jobblistfil är inte en vanlig fil: %s" #: src/hed/libs/common/UserConfig.cpp:961 #, fuzzy, c-format msgid "Example configuration (%s) not created." msgstr "Cacheinställningar: %s" #: src/hed/libs/common/UserConfig.cpp:966 #, fuzzy, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "ARC-användarinställningsfil är inte en vanlig fil: %s" #: src/hed/libs/common/UserConfig.cpp:984 #, fuzzy, c-format msgid "%s directory created" msgstr "%s rapporterade" #: src/hed/libs/common/UserConfig.cpp:986 #: src/hed/libs/common/UserConfig.cpp:1025 src/hed/libs/data/DataMover.cpp:684 #, fuzzy, c-format msgid "Failed to create directory %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/hed/libs/common/test/LoggerTest.cpp:58 #, fuzzy msgid "This VERBOSE message should not be seen" msgstr "Detta DEBUG-meddelande borde inte ses" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "Detta INFO-meddelande borde ses" #: src/hed/libs/common/test/LoggerTest.cpp:73 #, fuzzy msgid "This VERBOSE message should now be seen" msgstr "Detta DEBUG-meddelande borde nu ses" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "Detta INFO-meddelande borde också ses" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 #, fuzzy msgid "Request failed: No response from SPService" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 #, fuzzy msgid "Request failed: response from SPService is not as expected" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 #, fuzzy msgid "Request failed: No response from IdP" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 #, fuzzy msgid "Request failed: No response from IdP when doing redirecting" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 #, fuzzy msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 #, fuzzy msgid "Request failed: No response from IdP when doing authentication" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 #, fuzzy msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:300 msgid "Succeeded to verify the signature under " msgstr "Lyckades verifiera signaturen under " #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Failed to verify the signature under " msgstr "Misslyckades med att verifiera signaturen under " #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 #, fuzzy msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 #, fuzzy msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "Förfrågan misslyckades: Inget svar" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:54 #, fuzzy msgid "Creating delegation credential to ARC delegation service" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/libs/communication/ClientX509Delegation.cpp:64 #: src/hed/libs/communication/ClientX509Delegation.cpp:267 #, fuzzy msgid "DelegateCredentialsInit failed" msgstr "DelegateProxy misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:68 #: src/hed/libs/communication/ClientX509Delegation.cpp:122 #: src/hed/libs/communication/ClientX509Delegation.cpp:157 #: src/hed/libs/communication/ClientX509Delegation.cpp:212 #: src/hed/libs/communication/ClientX509Delegation.cpp:271 #, fuzzy msgid "There is no SOAP response" msgstr "Det fanns inget SOAP-svar" #: src/hed/libs/communication/ClientX509Delegation.cpp:73 #, fuzzy msgid "There is no X509 request in the response" msgstr "Det finns inget svar" #: src/hed/libs/communication/ClientX509Delegation.cpp:78 #, fuzzy msgid "There is no Format request in the response" msgstr "Det finns inget svar" #: src/hed/libs/communication/ClientX509Delegation.cpp:86 msgid "There is no Id or X509 request value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:99 #: src/hed/libs/communication/ClientX509Delegation.cpp:187 msgid "DelegateProxy failed" msgstr "DelegateProxy misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:118 msgid "UpdateCredentials failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:126 msgid "There is no UpdateCredentialsResponse in response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:134 #: src/hed/libs/communication/ClientX509Delegation.cpp:162 #: src/hed/libs/communication/ClientX509Delegation.cpp:217 #: src/hed/libs/communication/ClientX509Delegation.cpp:302 #, fuzzy msgid "There is no SOAP connection chain configured" msgstr "Ingen uppkopplingskedja har ställts in" #: src/hed/libs/communication/ClientX509Delegation.cpp:140 #, fuzzy msgid "Creating delegation to CREAM delegation service" msgstr "Misslyckades med att skapa delegering" #: src/hed/libs/communication/ClientX509Delegation.cpp:153 #, fuzzy msgid "Delegation getProxyReq request failed" msgstr "DelegateProxy misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:173 #, fuzzy msgid "Creating delegation to CREAM delegation service failed" msgstr "Misslyckades med att skapa delegering" #: src/hed/libs/communication/ClientX509Delegation.cpp:208 #, fuzzy msgid "Delegation putProxy request failed" msgstr "Delegeringsauktorisering misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:222 #, fuzzy msgid "Creating delegation to CREAM delegation failed" msgstr "Misslyckades med att skapa delegering" #: src/hed/libs/communication/ClientX509Delegation.cpp:237 #, fuzzy msgid "Getting delegation credential from ARC delegation service" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/libs/communication/ClientX509Delegation.cpp:276 msgid "There is no Delegated X509 token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:281 msgid "There is no Format delegated token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:289 msgid "There is no Id or X509 token value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:298 #, fuzzy, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/libs/compute/Broker.cpp:62 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:72 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:153 src/hed/libs/compute/Broker.cpp:171 #, fuzzy, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "Meddelandeklass är inget objekt" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "ComputingShare (%s) does not match selected queue (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:189 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" #: src/hed/libs/compute/Broker.cpp:194 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "" #: src/hed/libs/compute/Broker.cpp:200 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:205 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:212 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %" "s" msgstr "" #: src/hed/libs/compute/Broker.cpp:217 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:243 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:272 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:284 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:299 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:306 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:311 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:317 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:322 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:330 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:336 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:341 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:349 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:354 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:362 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" #: src/hed/libs/compute/Broker.cpp:367 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:375 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:380 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:388 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:393 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:402 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" #: src/hed/libs/compute/Broker.cpp:406 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:414 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:421 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:427 src/hed/libs/compute/Broker.cpp:448 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:435 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:442 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:456 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:461 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:469 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:475 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:481 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:489 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:494 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:502 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:509 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:532 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" #: src/hed/libs/compute/Broker.cpp:549 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" #: src/hed/libs/compute/Broker.cpp:585 #, fuzzy msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "Kan inte omvandla inställningar till pythonobjekt" #: src/hed/libs/compute/Broker.cpp:609 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, fuzzy, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "Misslyckades med att läsa object: %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:26 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:30 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:37 #, fuzzy, c-format msgid "Uniq is adding service coming from %s" msgstr "Misslyckades med att ladda serviceinställningar" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:60 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:63 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:66 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:110 #, fuzzy, c-format msgid "Service Loop: Endpoint %s" msgstr "process: ändpunkt: %s" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, fuzzy, c-format msgid "Failed to start querying the endpoint on %s" msgstr "Misslyckades med att påbörja skrivning till destination: %s" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, fuzzy, c-format msgid "Found suspended endpoint (%s)" msgstr "process: ändpunkt: %s" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, fuzzy, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "Försöker lyssna på port %s" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for %s plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, fuzzy, c-format msgid "%s plugin \"%s\" not found." msgstr "Antar - filen ej funnen" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:100 #: src/hed/libs/compute/JobControllerPlugin.cpp:109 #: src/hed/libs/compute/SubmitterPlugin.cpp:158 #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, fuzzy, c-format msgid "%s %s could not be created." msgstr "Service %s(%s) kunde inte skapas" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, fuzzy, c-format msgid "Loaded %s %s" msgstr "Laddade %s" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:237 #, fuzzy, c-format msgid "Address: %s" msgstr "Lagt till användare : %s" #: src/hed/libs/compute/ExecutionTarget.cpp:238 #, fuzzy, c-format msgid "Place: %s" msgstr " Ort: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, fuzzy, c-format msgid "Country: %s" msgstr "Kluster: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, fuzzy, c-format msgid "Postal code: %s" msgstr " Postnummer: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, fuzzy, c-format msgid "Latitude: %f" msgstr " Latitud: %f" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, fuzzy, c-format msgid "Longitude: %f" msgstr " Longitud: %f" #: src/hed/libs/compute/ExecutionTarget.cpp:248 #, fuzzy, c-format msgid "Owner: %s" msgstr " Ägare: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:255 #, fuzzy, c-format msgid "ID: %s" msgstr "%s: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:256 #, fuzzy, c-format msgid "Type: %s" msgstr " Avslutningstid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:261 #, fuzzy, c-format msgid "URL: %s" msgstr " URL: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:262 #, fuzzy, c-format msgid "Interface: %s" msgstr " Gränssnittsnamn: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:264 #, fuzzy msgid "Interface versions:" msgstr " Gränssnittstillägg: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:269 #, fuzzy msgid "Interface extensions:" msgstr " Gränssnittstillägg: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:274 msgid "Capabilities:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:278 #, fuzzy, c-format msgid "Technology: %s" msgstr " Ort: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:280 #, fuzzy msgid "Supported Profiles:" msgstr " Stöder preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:284 #, fuzzy, c-format msgid "Implementor: %s" msgstr " Implementerare: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:285 #, fuzzy, c-format msgid "Implementation name: %s" msgstr " Implementeringsnamn: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, fuzzy, c-format msgid "Quality level: %s" msgstr " Kö: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, fuzzy, c-format msgid "Health state: %s" msgstr " Hälsotillstånd: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, fuzzy, c-format msgid "Health state info: %s" msgstr " Hälsotillstånd: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, fuzzy, c-format msgid "Serving state: %s" msgstr " Betjäningstillstånd: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, fuzzy, c-format msgid "Issuer CA: %s" msgstr " Utfärdar-CA: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:292 msgid "Trusted CAs:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:296 #, fuzzy, c-format msgid "Downtime starts: %s" msgstr "%s ny status: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:297 #, fuzzy, c-format msgid "Downtime ends: %s" msgstr "%s ny status: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, fuzzy, c-format msgid "Staging: %s" msgstr " Laddar ned/upp: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:300 #, fuzzy msgid "Job descriptions:" msgstr "Ogiltig jobbeskrivning:" #: src/hed/libs/compute/ExecutionTarget.cpp:312 #, fuzzy, c-format msgid "Scheme: %s" msgstr "Källa: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:315 #, fuzzy, c-format msgid "Rule: %s" msgstr "Omschemalagt jobb %s" #: src/hed/libs/compute/ExecutionTarget.cpp:327 #, fuzzy, c-format msgid "Mapping queue: %s" msgstr " Mappar till kö: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, fuzzy, c-format msgid "Max wall-time: %s" msgstr " Största klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, fuzzy, c-format msgid "Max total wall-time: %s" msgstr " Största klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, fuzzy, c-format msgid "Min wall-time: %s" msgstr " Minsta klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, fuzzy, c-format msgid "Default wall-time: %s" msgstr " Förvald klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, fuzzy, c-format msgid "Max CPU time: %s" msgstr " Största CPU-tid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, fuzzy, c-format msgid "Min CPU time: %s" msgstr " Minsta CPU-tid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, fuzzy, c-format msgid "Default CPU time: %s" msgstr " Förvald CPU-tid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, fuzzy, c-format msgid "Max total jobs: %i" msgstr " Största totalt antal jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, fuzzy, c-format msgid "Max running jobs: %i" msgstr " Största antal exekverande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, fuzzy, c-format msgid "Max waiting jobs: %i" msgstr " Största antal väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, fuzzy, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr " Största antal pre-LRMS-väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, fuzzy, c-format msgid "Max user running jobs: %i" msgstr " Största antal exekverande jobb för användaren: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, fuzzy, c-format msgid "Max slots per job: %i" msgstr " Största antal slotar per jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, fuzzy, c-format msgid "Max stage in streams: %i" msgstr " Största antal nedladdningsströmmar: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, fuzzy, c-format msgid "Max stage out streams: %i" msgstr " Största antal uppladdningsströmmar: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, fuzzy, c-format msgid "Scheduling policy: %s" msgstr "ARC delegeringspolicy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, fuzzy, c-format msgid "Max memory: %i" msgstr " Största minne: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, fuzzy, c-format msgid "Max virtual memory: %i" msgstr " Största minne: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, fuzzy, c-format msgid "Max disk space: %i" msgstr " Största diskutrymme: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, fuzzy, c-format msgid "Default Storage Service: %s" msgstr " Förvald lagringsservice: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:348 #, fuzzy msgid "Supports preemption" msgstr " Stöder preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:349 #, fuzzy msgid "Doesn't support preemption" msgstr " Stöder ej preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:350 #, fuzzy, c-format msgid "Total jobs: %i" msgstr " Största totalt antal jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:351 #, fuzzy, c-format msgid "Running jobs: %i" msgstr " Största antal exekverande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, fuzzy, c-format msgid "Local running jobs: %i" msgstr " Största antal exekverande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, fuzzy, c-format msgid "Waiting jobs: %i" msgstr " Största antal väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, fuzzy, c-format msgid "Local waiting jobs: %i" msgstr " Största antal väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, fuzzy, c-format msgid "Suspended jobs: %i" msgstr " Största antal exekverande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, fuzzy, c-format msgid "Local suspended jobs: %i" msgstr " Största antal exekverande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, fuzzy, c-format msgid "Staging jobs: %i" msgstr " Laddar ned/upp: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, fuzzy, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr " Största antal pre-LRMS-väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, fuzzy, c-format msgid "Estimated average waiting time: %s" msgstr " Använd klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, fuzzy, c-format msgid "Estimated worst waiting time: %s" msgstr " Använd klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, fuzzy, c-format msgid "Free slots: %i" msgstr " Använda slots: %d" #: src/hed/libs/compute/ExecutionTarget.cpp:363 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:366 #, fuzzy, c-format msgid " %s: %i" msgstr "%s: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:367 #, fuzzy, c-format msgid " unspecified: %i" msgstr " Största antal exekverande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:370 #, fuzzy, c-format msgid "Used slots: %i" msgstr " Använda slots: %d" #: src/hed/libs/compute/ExecutionTarget.cpp:371 #, fuzzy, c-format msgid "Requested slots: %i" msgstr " Använda slots: %d" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, fuzzy, c-format msgid "Reservation policy: %s" msgstr "ARC delegeringspolicy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:379 #, fuzzy, c-format msgid "Resource manager: %s" msgstr "Källan skapades: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:380 #, fuzzy, c-format msgid " (%s)" msgstr "%s (%s)" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, fuzzy, c-format msgid "Total physical CPUs: %i" msgstr " Största CPU-tid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:384 #, fuzzy, c-format msgid "Total logical CPUs: %i" msgstr " Största totalt antal jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:385 #, fuzzy, c-format msgid "Total slots: %i" msgstr " Största totalt antal jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:386 #, fuzzy msgid "Supports advance reservations" msgstr " Stöder preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:387 #, fuzzy msgid "Doesn't support advance reservations" msgstr " Stöder ej preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:388 #, fuzzy msgid "Supports bulk submission" msgstr " Stöder preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:389 #, fuzzy msgid "Doesn't support bulk Submission" msgstr " Stöder ej preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Non-homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:393 #, fuzzy msgid "Network information:" msgstr "Köinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:398 #, fuzzy msgid "Working area is shared among jobs" msgstr " Avslutningstid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:399 msgid "Working area is not shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:400 #, fuzzy, c-format msgid "Working area total size: %i GB" msgstr "Cache skapades: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:401 #, fuzzy, c-format msgid "Working area free size: %i GB" msgstr " Avslutningstid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:402 #, fuzzy, c-format msgid "Working area life time: %s" msgstr " Avslutningstid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:403 #, fuzzy, c-format msgid "Cache area total size: %i GB" msgstr "Cache skapades: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, fuzzy, c-format msgid "Cache area free size: %i GB" msgstr "Cache skapades: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:410 #, fuzzy, c-format msgid "Platform: %s" msgstr " Ort: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:411 #, fuzzy msgid "Execution environment supports inbound connections" msgstr "skriv ut versionsinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:412 #, fuzzy msgid "Execution environment does not support inbound connections" msgstr "skriv ut versionsinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:413 #, fuzzy msgid "Execution environment supports outbound connections" msgstr "skriv ut versionsinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:414 #, fuzzy msgid "Execution environment does not support outbound connections" msgstr "skriv ut versionsinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:415 #, fuzzy msgid "Execution environment is a virtual machine" msgstr "skriv ut versionsinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:416 #, fuzzy msgid "Execution environment is a physical machine" msgstr "skriv ut versionsinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:417 #, fuzzy, c-format msgid "CPU vendor: %s" msgstr " Största CPU-tid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:418 #, fuzzy, c-format msgid "CPU model: %s" msgstr " Postnummer: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:419 #, fuzzy, c-format msgid "CPU version: %s" msgstr "%s version %s" #: src/hed/libs/compute/ExecutionTarget.cpp:420 #, c-format msgid "CPU clock speed: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, fuzzy, c-format msgid "Main memory size: %i" msgstr " Största minne: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, fuzzy, c-format msgid "OS family: %s" msgstr "Misslyckande: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, fuzzy, c-format msgid "OS name: %s" msgstr " Namn: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, fuzzy, c-format msgid "OS version: %s" msgstr "%s version %s" #: src/hed/libs/compute/ExecutionTarget.cpp:431 #, fuzzy msgid "Computing service:" msgstr "Avvisar service: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:455 #, fuzzy, c-format msgid "%d Endpoints" msgstr "checkpoint: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:460 #, fuzzy msgid "Endpoint Information:" msgstr "Ändpunktsinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:472 #, fuzzy, c-format msgid "%d Batch Systems" msgstr " Hälsotillstånd: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:477 #, fuzzy msgid "Batch System Information:" msgstr "Felaktig autentiseringsinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:483 #, fuzzy msgid "Installed application environments:" msgstr "Misslyckades med att initiera GM-miljö" #: src/hed/libs/compute/ExecutionTarget.cpp:496 #, c-format msgid "%d Shares" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:501 #, fuzzy msgid "Share Information:" msgstr "Köinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:507 #, c-format msgid "%d mapping policies" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:511 #, fuzzy msgid "Mapping policy:" msgstr "ARC delegeringspolicy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:527 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:529 #, fuzzy, c-format msgid " Computing endpoint URL: %s" msgstr " URL: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, fuzzy, c-format msgid " Computing endpoint interface name: %s" msgstr "Skapar klientgränssnitt" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #: src/hed/libs/compute/Job.cpp:580 #, fuzzy, c-format msgid " Queue: %s" msgstr " Kö: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:536 #, fuzzy, c-format msgid " Mapping queue: %s" msgstr " Mappar till kö: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:539 #, fuzzy, c-format msgid " Health state: %s" msgstr " Hälsotillstånd: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:544 #, fuzzy msgid "Service information:" msgstr "Köinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:549 #, fuzzy msgid " Installed application environments:" msgstr "Misslyckades med att initiera GM-miljö" #: src/hed/libs/compute/ExecutionTarget.cpp:556 #, fuzzy msgid "Batch system information:" msgstr "Köinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:559 #, fuzzy msgid "Queue information:" msgstr "Köinformation" #: src/hed/libs/compute/ExecutionTarget.cpp:566 #, fuzzy msgid " Benchmark information:" msgstr "Felaktig autentiseringsinformation" #: src/hed/libs/compute/GLUE2.cpp:58 msgid "The ComputingService doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:117 msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:128 #, fuzzy msgid "The ComputingService doesn't advertise its Interface." msgstr "Servicestatus kunde inte inhämtas" #: src/hed/libs/compute/GLUE2.cpp:160 msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "" #: src/hed/libs/compute/Job.cpp:329 #, fuzzy msgid "Unable to detect format of job record." msgstr "Misslyckades med att skapa/hitta katalog %s" #: src/hed/libs/compute/Job.cpp:550 #, c-format msgid "Job: %s" msgstr "Jobb: %s" #: src/hed/libs/compute/Job.cpp:552 #, c-format msgid " Name: %s" msgstr " Namn: %s" #: src/hed/libs/compute/Job.cpp:553 #, fuzzy, c-format msgid " State: %s" msgstr " Tillstånd: %s" #: src/hed/libs/compute/Job.cpp:556 #, fuzzy, c-format msgid " Specific state: %s" msgstr " Betjäningstillstånd: %s" #: src/hed/libs/compute/Job.cpp:560 src/hed/libs/compute/Job.cpp:584 #, fuzzy, c-format msgid " Waiting Position: %d" msgstr " Största antal väntade jobb: %i" #: src/hed/libs/compute/Job.cpp:564 #, c-format msgid " Exit Code: %d" msgstr " Avslutningskod: %d" #: src/hed/libs/compute/Job.cpp:568 #, fuzzy, c-format msgid " Job Error: %s" msgstr " Fel: %s" #: src/hed/libs/compute/Job.cpp:573 #, c-format msgid " Owner: %s" msgstr " Ägare: %s" #: src/hed/libs/compute/Job.cpp:577 #, fuzzy, c-format msgid " Other Messages: %s" msgstr "Källan skapades: %s" #: src/hed/libs/compute/Job.cpp:582 #, fuzzy, c-format msgid " Requested Slots: %d" msgstr " Använda slots: %d" #: src/hed/libs/compute/Job.cpp:587 #, fuzzy, c-format msgid " Stdin: %s" msgstr " Laddar ned/upp: %s" #: src/hed/libs/compute/Job.cpp:589 #, fuzzy, c-format msgid " Stdout: %s" msgstr "StdOut: %s" #: src/hed/libs/compute/Job.cpp:591 #, fuzzy, c-format msgid " Stderr: %s" msgstr "StdErr: %s" #: src/hed/libs/compute/Job.cpp:593 #, fuzzy, c-format msgid " Computing Service Log Directory: %s" msgstr " Gridhanterarens loggkatalog: %s" #: src/hed/libs/compute/Job.cpp:596 #, fuzzy, c-format msgid " Submitted: %s" msgstr " Tillstånd: %s" #: src/hed/libs/compute/Job.cpp:599 #, fuzzy, c-format msgid " End Time: %s" msgstr " Avslutningstid: %s" #: src/hed/libs/compute/Job.cpp:602 #, fuzzy, c-format msgid " Submitted from: %s" msgstr "%s: Tillstånd: %s från %s" #: src/hed/libs/compute/Job.cpp:605 #, c-format msgid " Submitting client: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:608 #, fuzzy, c-format msgid " Requested CPU Time: %s" msgstr " Förvald CPU-tid: %s" #: src/hed/libs/compute/Job.cpp:612 #, fuzzy, c-format msgid " Used CPU Time: %s" msgstr " Största CPU-tid: %s" #: src/hed/libs/compute/Job.cpp:615 #, fuzzy, c-format msgid " Used Wall Time: %s" msgstr " Största klocktid: %s" #: src/hed/libs/compute/Job.cpp:618 #, fuzzy, c-format msgid " Used Memory: %d" msgstr " Största minne: %i" #: src/hed/libs/compute/Job.cpp:622 #, fuzzy, c-format msgid " Results were deleted: %s" msgstr " Resultaten har raderats: %s" #: src/hed/libs/compute/Job.cpp:623 #, fuzzy, c-format msgid " Results must be retrieved before: %s" msgstr "Insändning returnerade fel: %s" #: src/hed/libs/compute/Job.cpp:627 #, fuzzy, c-format msgid " Proxy valid until: %s" msgstr "Proxy är ingen fil: %s" #: src/hed/libs/compute/Job.cpp:631 #, fuzzy, c-format msgid " Entry valid from: %s" msgstr "Ogiltig URL: %s" #: src/hed/libs/compute/Job.cpp:634 #, fuzzy, c-format msgid " Entry valid for: %s" msgstr "Ogiltig URL: %s" #: src/hed/libs/compute/Job.cpp:638 msgid " Old job IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:646 #, fuzzy, c-format msgid " ID on service: %s" msgstr "Avvisar service: %s" #: src/hed/libs/compute/Job.cpp:647 #, fuzzy, c-format msgid " Service information URL: %s (%s)" msgstr "Köinformation" #: src/hed/libs/compute/Job.cpp:648 #, fuzzy, c-format msgid " Job status URL: %s (%s)" msgstr " Tillstånd: %s" #: src/hed/libs/compute/Job.cpp:649 #, c-format msgid " Job management URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:650 #, fuzzy, c-format msgid " Stagein directory URL: %s" msgstr "Cachekatalog: %s" #: src/hed/libs/compute/Job.cpp:651 #, fuzzy, c-format msgid " Stageout directory URL: %s" msgstr "Cachekatalog: %s" #: src/hed/libs/compute/Job.cpp:652 #, fuzzy, c-format msgid " Session directory URL: %s" msgstr "Skapar katalog %s" #: src/hed/libs/compute/Job.cpp:654 #, fuzzy msgid " Delegation IDs:" msgstr "Destinaltion: %s" #: src/hed/libs/compute/Job.cpp:670 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "" #: src/hed/libs/compute/Job.cpp:675 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:697 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:702 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" #: src/hed/libs/compute/Job.cpp:706 #, c-format msgid "Downloading job: %s" msgstr "Laddar ner jobb: %s" #: src/hed/libs/compute/Job.cpp:710 #, c-format msgid "" "Cant retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:715 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:722 #, c-format msgid "%s directory exist! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:728 #, fuzzy, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "Misslyckades med att inhämta LFN/PFN-lista från %s" #: src/hed/libs/compute/Job.cpp:733 #, fuzzy, c-format msgid "No files to retrieve for job %s" msgstr "Misslyckades med att inhämta LFN/PFN-lista från %s" #: src/hed/libs/compute/Job.cpp:739 #, fuzzy, c-format msgid "Failed to create directory %s! Skipping job." msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/hed/libs/compute/Job.cpp:752 #, fuzzy, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "Misslyckades med att ladda ner %s till %s" #: src/hed/libs/compute/Job.cpp:758 #, fuzzy, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "Misslyckades under skrivning till destination" #: src/hed/libs/compute/Job.cpp:764 #, fuzzy, c-format msgid "Failed downloading %s to %s" msgstr "Misslyckades med att ladda ner %s till %s" #: src/hed/libs/compute/Job.cpp:777 src/hed/libs/compute/Job.cpp:782 #, fuzzy, c-format msgid "Unable to list files at %s" msgstr "Använder jobblistfil: %s" #: src/hed/libs/compute/Job.cpp:824 msgid "Now copying (from -> to)" msgstr "Kopierar nu (from -> to)" #: src/hed/libs/compute/Job.cpp:825 #, c-format msgid " %s -> %s" msgstr " %s -> %s" #: src/hed/libs/compute/Job.cpp:841 #, fuzzy, c-format msgid "Unable to initialise connection to source: %s" msgstr "Misslyckades med att etablera förbindelse till %s:%i" #: src/hed/libs/compute/Job.cpp:852 #, fuzzy, c-format msgid "Unable to initialise connection to destination: %s" msgstr "Misslyckades med att påbörja skrivning till destination: %s" #: src/hed/libs/compute/Job.cpp:871 #, c-format msgid "File download failed: %s" msgstr "Filnedladdning misslyckades: %s" #: src/hed/libs/compute/Job.cpp:910 src/hed/libs/compute/Job.cpp:939 #: src/hed/libs/compute/Job.cpp:971 src/hed/libs/compute/Job.cpp:1004 #, fuzzy, c-format msgid "Waiting for lock on file %s" msgstr "Demonisering av fork misslyckades: %s" #: src/hed/libs/compute/JobControllerPlugin.cpp:101 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:110 #, fuzzy, c-format msgid "JobControllerPlugin %s could not be created" msgstr "Komponent %s(%s) kunde inte skapas" #: src/hed/libs/compute/JobControllerPlugin.cpp:115 #, fuzzy, c-format msgid "Loaded JobControllerPlugin %s" msgstr "Laddade Plexer %s(%s)" #: src/hed/libs/compute/JobDescription.cpp:22 #, fuzzy, c-format msgid ": %d" msgstr "Period: %d" #: src/hed/libs/compute/JobDescription.cpp:24 #, fuzzy, c-format msgid ": %s" msgstr "%s: %s" #: src/hed/libs/compute/JobDescription.cpp:138 msgid " --- DRY RUN --- " msgstr "" #: src/hed/libs/compute/JobDescription.cpp:148 #, fuzzy, c-format msgid " Annotation: %s" msgstr "Funktion : %s" #: src/hed/libs/compute/JobDescription.cpp:154 #, c-format msgid " Old activity ID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:160 #, fuzzy, c-format msgid " Argument: %s" msgstr "get: %s" #: src/hed/libs/compute/JobDescription.cpp:171 #, fuzzy, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr "Tar bort %s" #: src/hed/libs/compute/JobDescription.cpp:174 #, fuzzy, c-format msgid " RemoteLogging: %s (%s)" msgstr "Tar bort %s" #: src/hed/libs/compute/JobDescription.cpp:182 #, c-format msgid " Environment.name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:183 #, fuzzy, c-format msgid " Environment: %s" msgstr " Avslutningstid: %s" #: src/hed/libs/compute/JobDescription.cpp:196 #, fuzzy, c-format msgid " PreExecutable.Argument: %s" msgstr " Exekverings-CE: %s" #: src/hed/libs/compute/JobDescription.cpp:199 #: src/hed/libs/compute/JobDescription.cpp:217 #, c-format msgid " Exit code for successful execution: %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:202 #: src/hed/libs/compute/JobDescription.cpp:220 msgid " No exit code for successful execution specified." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:214 #, fuzzy, c-format msgid " PostExecutable.Argument: %s" msgstr " Exekverings-CE: %s" #: src/hed/libs/compute/JobDescription.cpp:230 #, fuzzy, c-format msgid " Access control: %s" msgstr "Tillträdeslista: %s" #: src/hed/libs/compute/JobDescription.cpp:234 #, fuzzy, c-format msgid " Processing start time: %s" msgstr " Betjäningstillstånd: %s" #: src/hed/libs/compute/JobDescription.cpp:237 msgid " Notify:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:251 #, fuzzy, c-format msgid " Credential service: %s" msgstr "Avvisar service: %s" #: src/hed/libs/compute/JobDescription.cpp:261 msgid " Operating system requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:279 msgid " Computing endpoint requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:292 msgid " Node access: inbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:295 msgid " Node access: outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound and outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:308 msgid " Job requires exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:311 msgid " Job does not require exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:316 #, fuzzy msgid " Run time environment requirements:" msgstr "Felaktigt namn för runtime-miljö: %s" #: src/hed/libs/compute/JobDescription.cpp:328 #, fuzzy msgid " Inputfile element:" msgstr "Katalog: %s" #: src/hed/libs/compute/JobDescription.cpp:329 #: src/hed/libs/compute/JobDescription.cpp:351 #, fuzzy, c-format msgid " Name: %s" msgstr " Namn: %s" #: src/hed/libs/compute/JobDescription.cpp:331 #, fuzzy msgid " Is executable: true" msgstr " Exekverings-CE: %s" #: src/hed/libs/compute/JobDescription.cpp:335 #, fuzzy, c-format msgid " Sources: %s" msgstr "Källa: %s" #: src/hed/libs/compute/JobDescription.cpp:337 #, fuzzy, c-format msgid " Sources.DelegationID: %s" msgstr "Källa: %s" #: src/hed/libs/compute/JobDescription.cpp:341 #, fuzzy, c-format msgid " Sources.Options: %s = %s" msgstr "Källa: %s" #: src/hed/libs/compute/JobDescription.cpp:350 #, fuzzy msgid " Outputfile element:" msgstr "Katalog: %s" #: src/hed/libs/compute/JobDescription.cpp:354 #, fuzzy, c-format msgid " Targets: %s" msgstr " Namn: %s" #: src/hed/libs/compute/JobDescription.cpp:356 #, fuzzy, c-format msgid " Targets.DelegationID: %s" msgstr "Destinaltion: %s" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:367 #, fuzzy, c-format msgid " DelegationID element: %s" msgstr "Destinaltion: %s" #: src/hed/libs/compute/JobDescription.cpp:374 #, fuzzy, c-format msgid " Other attributes: [%s], %s" msgstr "Attribut: %s - %s" #: src/hed/libs/compute/JobDescription.cpp:440 #, fuzzy msgid "Empty job description source string" msgstr "Jobbeskrivning som skall sändas: %s" #: src/hed/libs/compute/JobDescription.cpp:473 #, fuzzy msgid "No job description parsers available" msgstr "Ingen jobbeskrivning angiven" #: src/hed/libs/compute/JobDescription.cpp:475 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:483 #, fuzzy, c-format msgid "%s parsing error" msgstr "Fatalt fel: %s" #: src/hed/libs/compute/JobDescription.cpp:499 #, fuzzy msgid "No job description parser was able to interpret job description" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/hed/libs/compute/JobDescription.cpp:509 msgid "" "Job description language is not specified, unable to output description." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:521 #, fuzzy, c-format msgid "Generating %s job description output" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/hed/libs/compute/JobDescription.cpp:537 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:550 #, c-format msgid "Two input files have identical name '%s'." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:569 #: src/hed/libs/compute/JobDescription.cpp:582 #, fuzzy, c-format msgid "Cannot stat local input file '%s'" msgstr "Misslyckades med att ladda upp lokala indatafiler" #: src/hed/libs/compute/JobDescription.cpp:602 #, fuzzy, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "Misslyckades med att ladda upp lokala indatafiler" #: src/hed/libs/compute/JobDescription.cpp:644 #, fuzzy msgid "Unable to select runtime environment" msgstr "Felaktigt namn för runtime-miljö: %s" #: src/hed/libs/compute/JobDescription.cpp:651 msgid "Unable to select middleware" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:658 msgid "Unable to select operating system." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:677 #, c-format msgid "No test-job with ID %d found." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:689 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:693 #, fuzzy, c-format msgid "No jobdescription resulted at %d test" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, fuzzy, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "Komponent %s(%s) kunde inte skapas" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, fuzzy, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "Ogiltig jobbeskrivning:" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:172 #, fuzzy msgid "Unable to create temporary directory" msgstr "Misslyckades med att skapa/hitta katalog %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:180 #, fuzzy, c-format msgid "Unable to create data base environment (%s)" msgstr "Felaktigt namn för runtime-miljö: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:190 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:194 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:198 #, c-format msgid "Unable to set duplicate flags for secondary key DB (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:204 #, fuzzy, c-format msgid "Unable to create job database (%s)" msgstr "Misslyckades med att inhämta statusinformation" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:208 #, fuzzy, c-format msgid "Unable to create DB for secondary name keys (%s)" msgstr "Misslyckades med att skapa/hitta katalog %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:212 #, fuzzy, c-format msgid "Unable to create DB for secondary endpoint keys (%s)" msgstr "Misslyckades med att skapa socket för port %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:216 #, fuzzy, c-format msgid "Unable to create DB for secondary service info keys (%s)" msgstr "Misslyckades med att skapa fil i %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:221 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:225 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:229 #, c-format msgid "Unable to associate secondary DB with primary DB (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:232 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:92 #, fuzzy, c-format msgid "Job database created successfully (%s)" msgstr "Stängdes OK" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:274 #, fuzzy, c-format msgid "Error from BDB: %s: %s" msgstr "Fel vid sökning av replikor: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:277 #, fuzzy, c-format msgid "Error from BDB: %s" msgstr " Fel: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:297 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:137 #: src/hed/libs/compute/JobInformationStorageXML.cpp:27 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:301 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:141 #: src/hed/libs/compute/JobInformationStorageXML.cpp:31 #, fuzzy, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "CA-certifikatkatlog är inte en katalog: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:308 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #: src/hed/libs/compute/JobInformationStorageXML.cpp:38 #, fuzzy, c-format msgid "Job list file (%s) is not a regular file" msgstr "ARC-jobblistfil är inte en vanlig fil: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:346 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:405 #, c-format msgid "Unable to write key/value pair to job database (%s): Key \"%s\"" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:572 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:435 #: src/hed/libs/compute/JobInformationStorageXML.cpp:137 #, fuzzy, c-format msgid "Unable to truncate job database (%s)" msgstr "Misslyckades med att inhämta LFN/PFN-lista från %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:603 msgid "" "ENOENT: The file or directory does not exist, Or a nonexistent re_source " "file was specified." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:606 msgid "" "DB_OLD_VERSION: The database cannot be opened without being first upgraded." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:609 msgid "EEXIST: DB_CREATE and DB_EXCL were specified and the database exists." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:611 msgid "EINVAL" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:614 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:468 #, fuzzy, c-format msgid "Unable to determine error (%d)" msgstr "Misslyckades med att läsa object: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:57 #, fuzzy, c-format msgid "Unable to create data base (%s)" msgstr "Misslyckades med att inhämta statusinformation" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:72 #, fuzzy, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "Misslyckades med att inhämta statusinformation" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:80 #, fuzzy, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "Misslyckades med att inhämta statusinformation" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:88 #, fuzzy, c-format msgid "Failed checking database (%s)" msgstr "Misslyckades med att läsa data" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:113 #, fuzzy, c-format msgid "Error from SQLite: %s: %s" msgstr "Fel vid sökning av replikor: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:116 #, fuzzy, c-format msgid "Error from SQLite: %s" msgstr " Fel: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:246 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:253 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:260 #, fuzzy, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "Misslyckades med att inhämta LFN/PFN-lista från %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:51 #: src/hed/libs/compute/JobInformationStorageXML.cpp:223 #: src/hed/libs/compute/JobInformationStorageXML.cpp:264 #, fuzzy, c-format msgid "Waiting for lock on job list file %s" msgstr "Använder jobblistfil: %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:162 #, c-format msgid "Will remove %s on service %s." msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:36 msgid "Ignoring job, the job ID is empty" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:41 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:46 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:51 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:56 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:65 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:72 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:385 #, fuzzy, c-format msgid "Job resubmission failed: Unable to load broker (%s)" msgstr "Jobbinsändning misslyckades, inga fler möjliga destinationer" #: src/hed/libs/compute/JobSupervisor.cpp:400 #, fuzzy msgid "Job resubmission aborted because no resource returned any information" msgstr "" "Jobbinsändning misslyckades p.g.a. att inget av de angivna klustren " "returnerade någon information" #: src/hed/libs/compute/JobSupervisor.cpp:421 #, c-format msgid "Unable to resubmit job (%s), unable to parse obtained job description" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:443 #, c-format msgid "" "Unable to resubmit job (%s), target information retrieval failed for target: " "%s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:469 #, c-format msgid "Unable to resubmit job (%s), no targets applicable for submission" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:504 #, fuzzy, c-format msgid "" "Unable to migrate job (%s), job description could not be retrieved remotely" msgstr "Jobstatus kunde inte inhämtas" #: src/hed/libs/compute/JobSupervisor.cpp:524 #, fuzzy msgid "Job migration aborted, no resource returned any information" msgstr "" "Jobbinsändning misslyckades p.g.a. att inget av de angivna klustren " "returnerade någon information" #: src/hed/libs/compute/JobSupervisor.cpp:536 #, fuzzy, c-format msgid "Job migration aborted, unable to load broker (%s)" msgstr "Misslyckades med att läsa object: %s" #: src/hed/libs/compute/JobSupervisor.cpp:552 #, c-format msgid "Unable to migrate job (%s), unable to parse obtained job description" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:573 #, c-format msgid "Unable to load submission plugin for %s interface" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:583 #, fuzzy, c-format msgid "Job migration failed for job (%s), no applicable targets" msgstr "Jobbinsändning misslyckades, inga fler möjliga destinationer" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, fuzzy, c-format msgid "%s > %s => true" msgstr "%s -> %s" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "" #: src/hed/libs/compute/Software.cpp:206 src/hed/libs/compute/Software.cpp:217 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:212 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:221 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "" #: src/hed/libs/compute/Software.cpp:226 msgid "All requirements satisfied." msgstr "" #: src/hed/libs/compute/Submitter.cpp:83 #, fuzzy, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "Försöker lyssna på port %s" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "" #: src/hed/libs/compute/Submitter.cpp:106 #, fuzzy msgid "Trying all available interfaces" msgstr "Skapar klientgränssnitt" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "" #: src/hed/libs/compute/Submitter.cpp:336 #, fuzzy, c-format msgid "Target %s does not match requested interface(s)." msgstr "Det finns inget svar" #: src/hed/libs/compute/SubmitterPlugin.cpp:54 msgid "No stagein URL is provided" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:73 #, fuzzy, c-format msgid "Failed uploading file %s to %s: %s" msgstr "Misslyckades med att ladda upp fil: %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:103 #, fuzzy, c-format msgid "Trying to migrate to %s: Migration to a %s interface is not supported." msgstr "SOAP-process understöds inte: %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:159 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:169 #, fuzzy, c-format msgid "SubmitterPlugin %s could not be created" msgstr "Service %s(%s) kunde inte skapas" #: src/hed/libs/compute/SubmitterPlugin.cpp:174 #, fuzzy, c-format msgid "Loaded SubmitterPlugin %s" msgstr "Laddade Service %s(%s)" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 #, fuzzy msgid "Invalid job description" msgstr "Ogiltig jobbeskrivning:" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 #, fuzzy msgid "Failed to submit job" msgstr "Misslyckades med att sända in jobb" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, fuzzy, c-format msgid "Failed to write to local job list %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/hed/libs/compute/test_jobdescription.cpp:20 #, fuzzy msgid "[job description ...]" msgstr "[jobb ...]" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in JDL, POSIX JSDL, JSDL, " "or XRSL format." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "" "define the requested format (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, emies:" "adl)" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:33 #, fuzzy msgid "show the original job description" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/hed/libs/compute/test_jobdescription.cpp:43 msgid "Use --help option for detailed usage information" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:50 #, fuzzy msgid " [ JobDescription tester ] " msgstr "Ogiltig jobbeskrivning:" #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:80 #, fuzzy msgid "Unable to parse." msgstr "Misslyckades med att läsa object: %s" #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ egee:jdl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ emies:adl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:93 msgid " [ nordugrid:jsdl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:95 msgid " [ nordugrid:xrsl ] " msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:138 #, fuzzy msgid "VOMS command is empty" msgstr "mail är tom" #: src/hed/libs/credential/ARCProxyUtil.cpp:424 #: src/hed/libs/credential/ARCProxyUtil.cpp:1431 #, fuzzy msgid "Failed to sign proxy" msgstr "Misslyckades med att städa upp efter jobb" #: src/hed/libs/credential/ARCProxyUtil.cpp:1317 #, c-format msgid "Please choose the NSS database you would use (1-%d): " msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1353 #: src/hed/libs/credential/ARCProxyUtil.cpp:1460 #, fuzzy msgid "Failed to generate X509 request with NSS" msgstr "Kan inte generera X509-begäran" #: src/hed/libs/credential/ARCProxyUtil.cpp:1364 #: src/hed/libs/credential/ARCProxyUtil.cpp:1471 #: src/hed/libs/credential/ARCProxyUtil.cpp:1512 #, fuzzy msgid "Failed to create X509 certificate with NSS" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/ARCProxyUtil.cpp:1376 #: src/hed/libs/credential/ARCProxyUtil.cpp:1483 #: src/hed/libs/credential/ARCProxyUtil.cpp:1536 #, fuzzy msgid "Failed to export X509 certificate from NSS DB" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/ARCProxyUtil.cpp:1519 #, fuzzy msgid "Failed to import X509 certificate into NSS DB" msgstr "" "Misslyckades med att skriva det signerade proxycertifikatet till en fil" #: src/hed/libs/credential/ARCProxyUtil.cpp:1528 #, fuzzy msgid "Failed to initialize the credential configuration" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/libs/credential/CertUtil.cpp:166 #, fuzzy, c-format msgid "Error number in store context: %i" msgstr "felaktigt nummer i speedcontrol" #: src/hed/libs/credential/CertUtil.cpp:167 #, fuzzy msgid "Self-signed certificate" msgstr "Lyckades verifiera det signerade certifikatet" #: src/hed/libs/credential/CertUtil.cpp:170 #, c-format msgid "The certificate with subject %s is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:173 #, fuzzy, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "Kan inte sätta in certifikat %s i certifikatets utfärdarkedja" #: src/hed/libs/credential/CertUtil.cpp:176 #, fuzzy, c-format msgid "Certificate with subject %s has expired" msgstr "Certifikat kan inte extraheras" #: src/hed/libs/credential/CertUtil.cpp:179 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:181 #, fuzzy, c-format msgid "Certificate verification error: %s" msgstr "Certifikatverifiering misslyckades" #: src/hed/libs/credential/CertUtil.cpp:193 #, fuzzy msgid "Can not get the certificate type" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/credential/CertUtil.cpp:233 msgid "Couldn't verify availability of CRL" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:246 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:253 #, fuzzy msgid "The available CRL is not yet valid" msgstr "%s är inte en giltig URL" #: src/hed/libs/credential/CertUtil.cpp:262 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:268 msgid "The available CRL has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:291 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:309 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:318 #, fuzzy msgid "Can't allocate memory for CA policy path" msgstr "Kan inte skapa BIO för att tolka begäran" #: src/hed/libs/credential/CertUtil.cpp:364 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:378 #: src/hed/libs/credential/Credential.cpp:1693 #, fuzzy msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "Kan inte omvandla DER-kodat PROXYCERTINFO-tillägg till internt format" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Trying to check X509 cert with check_cert_type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:465 #, fuzzy msgid "Can't convert DER encoded PROXYCERTINFO extension to internal form" msgstr "Kan inte omvandla DER-kodat PROXYCERTINFO-tillägg till internt format" #: src/hed/libs/credential/CertUtil.cpp:469 #, fuzzy msgid "Can't get policy from PROXYCERTINFO extension" msgstr "Kan inte erhålla policy från PROXYCERTINFO-tillägg" #: src/hed/libs/credential/CertUtil.cpp:473 #, fuzzy msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "Kan inte erhålla policy-språk från PROXYCERTINFO-tillägg" #: src/hed/libs/credential/CertUtil.cpp:505 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" #: src/hed/libs/credential/Credential.cpp:73 #, fuzzy, c-format msgid "OpenSSL error string: %s" msgstr "OpenSSL-fel: %s" #: src/hed/libs/credential/Credential.cpp:196 msgid "Can't get the first byte of input to determine its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:210 #, fuzzy msgid "Can't reset the input" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/hed/libs/credential/Credential.cpp:236 #: src/hed/libs/credential/Credential.cpp:273 msgid "Can't get the first byte of input BIO to get its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:248 #, fuzzy msgid "Can not read certificate/key string" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/credential/Credential.cpp:456 #, fuzzy, c-format msgid "Can not find certificate file: %s" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/credential/Credential.cpp:461 #, c-format msgid "Can not read certificate file: %s" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/credential/Credential.cpp:499 #, fuzzy msgid "Can not read certificate string" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/credential/Credential.cpp:519 #, fuzzy msgid "Certificate format is PEM" msgstr "Certifikatformat för BIO är: %d" #: src/hed/libs/credential/Credential.cpp:546 #, fuzzy msgid "Certificate format is DER" msgstr "Certifikatformat för BIO är: %d" #: src/hed/libs/credential/Credential.cpp:575 #, fuzzy msgid "Certificate format is PKCS" msgstr "Certifikatformat för BIO är: %d" #: src/hed/libs/credential/Credential.cpp:602 #, fuzzy msgid "Certificate format is unknown" msgstr "Certifikatformat för BIO är: %d" #: src/hed/libs/credential/Credential.cpp:610 #, fuzzy, c-format msgid "Can not find key file: %s" msgstr "Kan inte läsa nyckelfil: %s" #: src/hed/libs/credential/Credential.cpp:615 #, fuzzy, c-format msgid "Can not open key file %s" msgstr "Kan inte läsa nyckelfil: %s" #: src/hed/libs/credential/Credential.cpp:634 #, fuzzy msgid "Can not read key string" msgstr "Kan inte läsa nyckelfil: %s" #: src/hed/libs/credential/Credential.cpp:697 #: src/hed/libs/credential/VOMSUtil.cpp:258 #, fuzzy msgid "Failed to lock arccredential library in memory" msgstr "Misslyckades med att koppla upp för att städa upp jobb" #: src/hed/libs/credential/Credential.cpp:712 msgid "Certificate verification succeeded" msgstr "Certifikatverifiering lyckades" #: src/hed/libs/credential/Credential.cpp:716 msgid "Certificate verification failed" msgstr "Certifikatverifiering misslyckades" #: src/hed/libs/credential/Credential.cpp:731 #: src/hed/libs/credential/Credential.cpp:751 #: src/hed/libs/credential/Credential.cpp:771 #: src/hed/libs/credential/Credential.cpp:1003 #: src/hed/libs/credential/Credential.cpp:2314 #: src/hed/libs/credential/Credential.cpp:2345 #, fuzzy msgid "Failed to initialize extensions member for Credential" msgstr "Misslyckades med att initiera delegering" #: src/hed/libs/credential/Credential.cpp:814 #, fuzzy, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "Icke understött protokoll i url %s" #: src/hed/libs/credential/Credential.cpp:826 #, fuzzy, c-format msgid "Unsupported proxy version is requested - %s" msgstr "Icke understött protokoll i url %s" #: src/hed/libs/credential/Credential.cpp:837 msgid "If you specify a policy you also need to specify a policy language" msgstr "" #: src/hed/libs/credential/Credential.cpp:1008 #, fuzzy msgid "Certificate/Proxy path is empty" msgstr "Certifikatformat för BIO är: %d" #: src/hed/libs/credential/Credential.cpp:1067 #: src/hed/libs/credential/Credential.cpp:2856 msgid "Failed to duplicate extension" msgstr "Misslyckades med att duplicera tillägg" #: src/hed/libs/credential/Credential.cpp:1071 #, fuzzy msgid "Failed to add extension into credential extensions" msgstr "Misslyckades med att lägga till tillägg till proxyn" #: src/hed/libs/credential/Credential.cpp:1082 #, fuzzy msgid "Certificate information collection failed" msgstr "Certifikatverifiering misslyckades" #: src/hed/libs/credential/Credential.cpp:1124 #: src/hed/libs/credential/Credential.cpp:1129 msgid "Can not convert string into ASN1_OBJECT" msgstr "Kan inte omvandla sträng till ASN1_OBJECT" #: src/hed/libs/credential/Credential.cpp:1141 msgid "Can not create extension for proxy certificate" msgstr "Kan inte skapa tillägg för proxycertifikat" #: src/hed/libs/credential/Credential.cpp:1180 #: src/hed/libs/credential/Credential.cpp:1348 msgid "BN_set_word failed" msgstr "BN_set_word misslyckades" #: src/hed/libs/credential/Credential.cpp:1189 #: src/hed/libs/credential/Credential.cpp:1357 msgid "RSA_generate_key_ex failed" msgstr "RSA_generate_key_ex misslyckades" #: src/hed/libs/credential/Credential.cpp:1198 #: src/hed/libs/credential/Credential.cpp:1365 msgid "BN_new || RSA_new failed" msgstr "BN_new || RSA_new misslyckades" #: src/hed/libs/credential/Credential.cpp:1209 msgid "Created RSA key, proceeding with request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1214 msgid "pkey and rsa_key exist!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1217 #, fuzzy msgid "Generate new X509 request!" msgstr "Kan inte generera X509-begäran" #: src/hed/libs/credential/Credential.cpp:1222 msgid "Setting subject name!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1230 #: src/hed/libs/credential/Credential.cpp:1444 msgid "PEM_write_bio_X509_REQ failed" msgstr "PEM_write_bio_X509_REQ misslyckades" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1301 #: src/hed/libs/credential/Credential.cpp:1476 #: src/hed/libs/credential/Credential.cpp:1496 msgid "Can not create BIO for request" msgstr "Kan inte skapa BIO för begäran" #: src/hed/libs/credential/Credential.cpp:1278 #, fuzzy msgid "Failed to write request into string" msgstr "Misslyckades med att skriva begäran till en fil" #: src/hed/libs/credential/Credential.cpp:1305 #: src/hed/libs/credential/Credential.cpp:1310 #: src/hed/libs/credential/Credential.cpp:1500 msgid "Can not set writable file for request BIO" msgstr "Kan inte ange skrivbar fil för begärans BIO" #: src/hed/libs/credential/Credential.cpp:1316 #: src/hed/libs/credential/Credential.cpp:1505 msgid "Wrote request into a file" msgstr "Skrev begäran till en fil" #: src/hed/libs/credential/Credential.cpp:1318 #: src/hed/libs/credential/Credential.cpp:1508 msgid "Failed to write request into a file" msgstr "Misslyckades med att skriva begäran till en fil" #: src/hed/libs/credential/Credential.cpp:1338 msgid "The credential's private key has already been initialized" msgstr "Kreditivets privata nyckel har redan initialiserats" #: src/hed/libs/credential/Credential.cpp:1386 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" "Kan inte duplicera subjektnamnet för den självsignerande " "proxycertifikatbegäran" #: src/hed/libs/credential/Credential.cpp:1396 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "Kan inte skapa en ny X509_NAME_ENTRY för proxycertifikatbegäran" #: src/hed/libs/credential/Credential.cpp:1414 #: src/hed/libs/credential/Credential.cpp:1421 #: src/hed/libs/credential/Credential.cpp:1943 #: src/hed/libs/credential/Credential.cpp:1951 #, fuzzy msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" "Kan inte omvandla PROXYCERTINFO struct från internt till DER-kodat format" #: src/hed/libs/credential/Credential.cpp:1451 #, fuzzy msgid "Can't convert X509 request from internal to DER encoded format" msgstr "" "Kan inte omvandla PROXYCERTINFO struct från internt till DER-kodat format" #: src/hed/libs/credential/Credential.cpp:1461 msgid "Can not generate X509 request" msgstr "Kan inte generera X509-begäran" #: src/hed/libs/credential/Credential.cpp:1463 msgid "Can not set private key" msgstr "Kan inte ange privat nyckel" #: src/hed/libs/credential/Credential.cpp:1561 #, fuzzy msgid "Failed to get private key" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/Credential.cpp:1580 msgid "Failed to get public key from RSA object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1588 msgid "Failed to get public key from X509 object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1595 #, fuzzy msgid "Failed to get public key" msgstr "Misslyckades med att hämta ftp-fil" #: src/hed/libs/credential/Credential.cpp:1634 #, fuzzy, c-format msgid "Certiticate chain number %d" msgstr "Certifikat kan inte extraheras" #: src/hed/libs/credential/Credential.cpp:1662 msgid "NULL BIO passed to InquireRequest" msgstr "NULL BIO skickad till InquireRequest" #: src/hed/libs/credential/Credential.cpp:1665 msgid "PEM_read_bio_X509_REQ failed" msgstr "PEM_read_bio_X509_REQ misslyckades" #: src/hed/libs/credential/Credential.cpp:1669 #, fuzzy msgid "d2i_X509_REQ_bio failed" msgstr "PEM_read_bio_X509_REQ misslyckades" #: src/hed/libs/credential/Credential.cpp:1702 #, fuzzy msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "Kan inte erhålla policy från PROXYCERTINFO-tillägg" #: src/hed/libs/credential/Credential.cpp:1706 #, fuzzy msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "Kan inte erhålla policy-språk från PROXYCERTINFO-tillägg" #: src/hed/libs/credential/Credential.cpp:1722 #, fuzzy, c-format msgid "Cert Type: %d" msgstr " Avslutningstid: %s" #: src/hed/libs/credential/Credential.cpp:1735 #: src/hed/libs/credential/Credential.cpp:1754 msgid "Can not create BIO for parsing request" msgstr "Kan inte skapa BIO för att tolka begäran" #: src/hed/libs/credential/Credential.cpp:1740 msgid "Read request from a string" msgstr "Läste begäran från en sträng" #: src/hed/libs/credential/Credential.cpp:1743 msgid "Failed to read request from a string" msgstr "Misslyckades med att läsa begäran från en sträng" #: src/hed/libs/credential/Credential.cpp:1758 msgid "Can not set readable file for request BIO" msgstr "Kunde inte ange läsbar fil för begärans BIO" #: src/hed/libs/credential/Credential.cpp:1763 msgid "Read request from a file" msgstr "Läste begäran från en fil" #: src/hed/libs/credential/Credential.cpp:1766 msgid "Failed to read request from a file" msgstr "Misslyckades med att läsa begäran från en fil" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Can not convert private key to DER format" msgstr "Kan inte omvandla privat nyckel till DER-format" #: src/hed/libs/credential/Credential.cpp:1924 #, fuzzy msgid "Credential is not initialized" msgstr "DelegateProxy misslyckades" #: src/hed/libs/credential/Credential.cpp:1930 #, fuzzy msgid "Failed to duplicate X509 structure" msgstr "Misslyckades med att initiera X509-struktur" #: src/hed/libs/credential/Credential.cpp:1935 msgid "Failed to initialize X509 structure" msgstr "Misslyckades med att initiera X509-struktur" #: src/hed/libs/credential/Credential.cpp:1958 #, fuzzy msgid "Can not create extension for PROXY_CERT_INFO" msgstr "Kan inte skapa tillägg för proxycertifikat" #: src/hed/libs/credential/Credential.cpp:1962 #: src/hed/libs/credential/Credential.cpp:2010 msgid "Can not add X509 extension to proxy cert" msgstr "Kan inte lägga till X509-tillägg till proxycertifikat" #: src/hed/libs/credential/Credential.cpp:1978 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "Kan inte omvandla keyUsage struct från DER-kodat format" #: src/hed/libs/credential/Credential.cpp:1990 #: src/hed/libs/credential/Credential.cpp:1999 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "Kan inte omvandla keyUsage struct från internt till DER-format" #: src/hed/libs/credential/Credential.cpp:2006 #, fuzzy msgid "Can not create extension for keyUsage" msgstr "Kan inte skapa tillägg för proxycertifikat" #: src/hed/libs/credential/Credential.cpp:2019 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "Kan inte erhålla utökad KeyUsage-tillägg från utfärdarcertifikatet" #: src/hed/libs/credential/Credential.cpp:2024 msgid "Can not copy extended KeyUsage extension" msgstr "Kan inte kopiera det utökade KeyUsage-tillägget" #: src/hed/libs/credential/Credential.cpp:2029 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" "Kan inte lägga till X509-utökat KeyUsage-tillägg till det nya " "proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2039 #, fuzzy msgid "Can not compute digest of public key" msgstr "Misslyckades med att hämta ftp-fil" #: src/hed/libs/credential/Credential.cpp:2050 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "Kan inte kopiera subjektnamnet från utfärdaren för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2056 msgid "Can not create name entry CN for proxy certificate" msgstr "Kan inte skapa namnpost CN för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2061 msgid "Can not set CN in proxy certificate" msgstr "Kan inte ange CN i proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2069 msgid "Can not set issuer's subject for proxy certificate" msgstr "Kan inte ange utfärdarens subjekt för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2074 msgid "Can not set version number for proxy certificate" msgstr "Kan inte ange versionsnummer för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2082 msgid "Can not set serial number for proxy certificate" msgstr "Kan inte ange serienummer för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2088 #, fuzzy msgid "Can not duplicate serial number for proxy certificate" msgstr "Kan inte ange serienummer för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2094 msgid "Can not set the lifetime for proxy certificate" msgstr "Kan inte ange livstid för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2098 msgid "Can not set pubkey for proxy certificate" msgstr "Kan inte ange publik nyckel för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2114 #: src/hed/libs/credential/Credential.cpp:2744 msgid "The credential to be signed is NULL" msgstr "Kreditivet som skall signeras är NULL" #: src/hed/libs/credential/Credential.cpp:2118 #: src/hed/libs/credential/Credential.cpp:2748 #, fuzzy msgid "The credential to be signed contains no request" msgstr "Kreditivet som skall signeras är NULL" #: src/hed/libs/credential/Credential.cpp:2122 #: src/hed/libs/credential/Credential.cpp:2752 msgid "The BIO for output is NULL" msgstr "Utdata-BIO är NULL" #: src/hed/libs/credential/Credential.cpp:2136 #: src/hed/libs/credential/Credential.cpp:2759 msgid "Error when extracting public key from request" msgstr "Fel när publik nyckel extraheras från begäran" #: src/hed/libs/credential/Credential.cpp:2141 #: src/hed/libs/credential/Credential.cpp:2763 msgid "Failed to verify the request" msgstr "Misslyckades med att verifiera begäran" #: src/hed/libs/credential/Credential.cpp:2145 msgid "Failed to add issuer's extension into proxy" msgstr "Misslyckades med att lägga till utfärdarens tillägg till proxyn" #: src/hed/libs/credential/Credential.cpp:2169 #, fuzzy msgid "Failed to find extension" msgstr "Misslyckades med att duplicera tillägg" #: src/hed/libs/credential/Credential.cpp:2181 msgid "Can not get the issuer's private key" msgstr "Kan inte erhålla utfärdarens privata nyckel" #: src/hed/libs/credential/Credential.cpp:2188 #: src/hed/libs/credential/Credential.cpp:2796 #, fuzzy msgid "There is no digest in issuer's private key object" msgstr "Kan inte erhålla utfärdarens privata nyckel" #: src/hed/libs/credential/Credential.cpp:2193 #: src/hed/libs/credential/Credential.cpp:2800 #, c-format msgid "%s is an unsupported digest type" msgstr "" #: src/hed/libs/credential/Credential.cpp:2204 #, fuzzy, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" "Signeringsalgoritmen %s är ej tillåten, den skall vara MD5 för att signera " "certifikatbegäran" #: src/hed/libs/credential/Credential.cpp:2210 msgid "Failed to sign the proxy certificate" msgstr "Misslyckades med att signera proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2212 msgid "Succeeded to sign the proxy certificate" msgstr "Lyckades signera proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2217 msgid "Failed to verify the signed certificate" msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/hed/libs/credential/Credential.cpp:2219 msgid "Succeeded to verify the signed certificate" msgstr "Lyckades verifiera det signerade certifikatet" #: src/hed/libs/credential/Credential.cpp:2224 #: src/hed/libs/credential/Credential.cpp:2233 msgid "Output the proxy certificate" msgstr "Skriv ut proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2227 #, fuzzy msgid "Can not convert signed proxy cert into PEM format" msgstr "Kan inte omvandla det signerade proxycertifikatet till DER-format" #: src/hed/libs/credential/Credential.cpp:2236 msgid "Can not convert signed proxy cert into DER format" msgstr "Kan inte omvandla det signerade proxycertifikatet till DER-format" #: src/hed/libs/credential/Credential.cpp:2252 #: src/hed/libs/credential/Credential.cpp:2275 msgid "Can not create BIO for signed proxy certificate" msgstr "Kan inte skapa BIO för det signerade proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2279 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "Kan inte ange skrivbar fil för det signerade proxycertifikatets BIO" #: src/hed/libs/credential/Credential.cpp:2284 msgid "Wrote signed proxy certificate into a file" msgstr "Skrev det signerade proxycertifikatet till en fil" #: src/hed/libs/credential/Credential.cpp:2287 msgid "Failed to write signed proxy certificate into a file" msgstr "" "Misslyckades med att skriva det signerade proxycertifikatet till en fil" #: src/hed/libs/credential/Credential.cpp:2323 #: src/hed/libs/credential/Credential.cpp:2363 #, c-format msgid "ERROR:%s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2371 #, fuzzy, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "SSL-fel: %d - %s:%s:%s" #: src/hed/libs/credential/Credential.cpp:2416 #, c-format msgid "unable to load number from: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2421 msgid "error converting number from bin to BIGNUM" msgstr "" #: src/hed/libs/credential/Credential.cpp:2448 #, fuzzy msgid "file name too long" msgstr "filnamn" #: src/hed/libs/credential/Credential.cpp:2471 #, fuzzy msgid "error converting serial to ASN.1 format" msgstr "Kan inte omvandla privat nyckel till DER-format" #: src/hed/libs/credential/Credential.cpp:2504 #, c-format msgid "load serial from %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2509 #, fuzzy msgid "add_word failure" msgstr "BN_set_word misslyckades" #: src/hed/libs/credential/Credential.cpp:2514 #, c-format msgid "save serial to %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2534 #, fuzzy msgid "Error initialising X509 store" msgstr "Misslyckades med att initiera X509-struktur" #: src/hed/libs/credential/Credential.cpp:2541 msgid "Out of memory when generate random serial" msgstr "" #: src/hed/libs/credential/Credential.cpp:2553 msgid "CA certificate and CA private key do not match" msgstr "" #: src/hed/libs/credential/Credential.cpp:2577 #, fuzzy, c-format msgid "Failed to load extension section: %s" msgstr "Misslyckades med att lägga till tillägg till proxyn" #: src/hed/libs/credential/Credential.cpp:2614 #, fuzzy msgid "malloc error" msgstr "Minnesallokeringsfel" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Subject does not start with '/'" msgstr "" #: src/hed/libs/credential/Credential.cpp:2634 #: src/hed/libs/credential/Credential.cpp:2655 msgid "escape character at end of string" msgstr "" #: src/hed/libs/credential/Credential.cpp:2646 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2683 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2687 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2729 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" #: src/hed/libs/credential/Credential.cpp:2739 #, fuzzy msgid "The private key for signing is not initialized" msgstr "Kreditivets privata nyckel har redan initialiserats" #: src/hed/libs/credential/Credential.cpp:2819 #, fuzzy, c-format msgid "Error when loading the extension config file: %s" msgstr "Formatteringsfel is låsfil %s: %s" #: src/hed/libs/credential/Credential.cpp:2823 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2872 #, fuzzy msgid "Can not sign a EEC" msgstr "Kan inte hitta jobb-id" #: src/hed/libs/credential/Credential.cpp:2876 #, fuzzy msgid "Output EEC certificate" msgstr "Skriv ut proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2879 #, fuzzy msgid "Can not convert signed EEC cert into DER format" msgstr "Kan inte omvandla det signerade proxycertifikatet till DER-format" #: src/hed/libs/credential/Credential.cpp:2893 #: src/hed/libs/credential/Credential.cpp:2912 #, fuzzy msgid "Can not create BIO for signed EEC certificate" msgstr "Kan inte skapa BIO för det signerade proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2916 #, fuzzy msgid "Can not set writable file for signed EEC certificate BIO" msgstr "Kan inte ange skrivbar fil för det signerade proxycertifikatets BIO" #: src/hed/libs/credential/Credential.cpp:2921 #, fuzzy msgid "Wrote signed EEC certificate into a file" msgstr "Skrev det signerade proxycertifikatet till en fil" #: src/hed/libs/credential/Credential.cpp:2924 #, fuzzy msgid "Failed to write signed EEC certificate into a file" msgstr "" "Misslyckades med att skriva det signerade proxycertifikatet till en fil" #: src/hed/libs/credential/NSSUtil.cpp:147 #, fuzzy msgid "Error writing raw certificate" msgstr "Fel vid listning av replikor: %s" #: src/hed/libs/credential/NSSUtil.cpp:224 #, fuzzy msgid "Failed to add RFC proxy OID" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/hed/libs/credential/NSSUtil.cpp:227 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:233 msgid "Failed to add anyLanguage OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:236 #: src/hed/libs/credential/NSSUtil.cpp:254 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:242 #, fuzzy msgid "Failed to add inheritAll OID" msgstr "Misslyckades med att avbryta jobb" #: src/hed/libs/credential/NSSUtil.cpp:245 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:251 #, fuzzy msgid "Failed to add Independent OID" msgstr "Misslyckades med att lägga till tillägg till proxyn" #: src/hed/libs/credential/NSSUtil.cpp:260 msgid "Failed to add VOMS AC sequence OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:263 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:292 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:303 #, fuzzy msgid "Succeeded to initialize NSS" msgstr "Lyckades med att autenticera X509-token" #: src/hed/libs/credential/NSSUtil.cpp:325 #, fuzzy, c-format msgid "Failed to read attribute %x from private key." msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:377 #, fuzzy msgid "Succeeded to get credential" msgstr "Lyckades verifiera det signerade certifikatet" #: src/hed/libs/credential/NSSUtil.cpp:378 #, fuzzy msgid "Failed to get credential" msgstr "Misslyckades med att hämta ftp-fil" #: src/hed/libs/credential/NSSUtil.cpp:440 #, fuzzy msgid "p12 file is empty" msgstr "mail är tom" #: src/hed/libs/credential/NSSUtil.cpp:450 #, fuzzy msgid "Unable to write to p12 file" msgstr "Misslyckades med att skriva begäran till en fil" #: src/hed/libs/credential/NSSUtil.cpp:466 #, fuzzy msgid "Failed to open pk12 file" msgstr "Misslyckades med att spara ftp-fil" #: src/hed/libs/credential/NSSUtil.cpp:501 #, fuzzy msgid "Failed to allocate p12 context" msgstr "Misslyckades med förallokera utrymme" #: src/hed/libs/credential/NSSUtil.cpp:1211 #, fuzzy msgid "Failed to find issuer certificate for proxy certificate" msgstr "Misslyckades med att signera proxycertifikatet" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, fuzzy, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "Misslyckades med autentisering" #: src/hed/libs/credential/NSSUtil.cpp:1368 #, fuzzy, c-format msgid "Failed to find certificates by nickname: %s" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1373 #, c-format msgid "No user certificate by nickname %s found" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1386 #: src/hed/libs/credential/NSSUtil.cpp:1422 #, fuzzy msgid "Certificate does not have a slot" msgstr "Certifikat är ingen fil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1392 #, fuzzy msgid "Failed to create export context" msgstr "Misslyckades med att skapa GUID i RLS: %s" #: src/hed/libs/credential/NSSUtil.cpp:1407 msgid "PKCS12 output password not provided" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1414 msgid "PKCS12 add password integrity failed" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1435 #, fuzzy msgid "Failed to create key or certificate safe" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1451 #, fuzzy msgid "Failed to add certificate and key" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1460 #, fuzzy, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/hed/libs/credential/NSSUtil.cpp:1465 #, fuzzy msgid "Failed to encode PKCS12" msgstr "Misslyckades med att städa upp efter jobb" #: src/hed/libs/credential/NSSUtil.cpp:1468 msgid "Succeeded to export PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1496 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1502 #, fuzzy msgid "Failed to delete certificate" msgstr "Misslyckades med att ta bort metainformation" #: src/hed/libs/credential/NSSUtil.cpp:1516 msgid "The name of the private key to delete is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1521 #: src/hed/libs/credential/NSSUtil.cpp:1605 #, fuzzy, c-format msgid "Failed to authenticate to token %s." msgstr "Misslyckades med autentisering" #: src/hed/libs/credential/NSSUtil.cpp:1528 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1561 #, fuzzy msgid "Failed to delete private key and certificate" msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/hed/libs/credential/NSSUtil.cpp:1571 #, fuzzy msgid "Failed to delete private key" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1582 #, fuzzy, c-format msgid "Can not find key with name: %s" msgstr "Kan inte läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1616 #, fuzzy, c-format msgid "Failed to delete private key that attaches to certificate: %s" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1637 msgid "Can not read PEM private key: probably bad password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1639 #, fuzzy msgid "Can not read PEM private key: failed to decrypt" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1641 #: src/hed/libs/credential/NSSUtil.cpp:1643 #, fuzzy msgid "Can not read PEM private key: failed to obtain password" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1644 #, fuzzy msgid "Can not read PEM private key" msgstr "Kan inte ange privat nyckel" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1688 #, fuzzy msgid "Failed to load private key" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1689 #, fuzzy msgid "Succeeded to load PrivateKeyInfo" msgstr "Lyckades med att skapa utvärderare" #: src/hed/libs/credential/NSSUtil.cpp:1692 #, fuzzy msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1693 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1730 #, fuzzy msgid "Failed to import private key" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1733 #, fuzzy msgid "Succeeded to import private key" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1746 #: src/hed/libs/credential/NSSUtil.cpp:1788 #: src/hed/libs/credential/NSSUtil.cpp:2920 #, fuzzy msgid "Failed to authenticate to key database" msgstr "Misslyckades med autentisering" #: src/hed/libs/credential/NSSUtil.cpp:1755 #, fuzzy msgid "Succeeded to generate public/private key pair" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1757 #, fuzzy msgid "Failed to generate public/private key pair" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1762 #, fuzzy msgid "Failed to export private key" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1829 #, fuzzy msgid "Failed to create subject name" msgstr "Misslyckades med att läsa object: %s" #: src/hed/libs/credential/NSSUtil.cpp:1845 #, fuzzy msgid "Failed to create certificate request" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1858 #, fuzzy msgid "Failed to call PORT_NewArena" msgstr "Misslyckades med att starta ny tråd" #: src/hed/libs/credential/NSSUtil.cpp:1866 #, fuzzy msgid "Failed to encode the certificate request with DER format" msgstr "Kan inte ladda ARC-begäranobjekt: %s" #: src/hed/libs/credential/NSSUtil.cpp:1873 msgid "Unknown key or hash type" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1879 #, fuzzy msgid "Failed to sign the certificate request" msgstr "Misslyckades med att verifiera begäran" #: src/hed/libs/credential/NSSUtil.cpp:1895 #, fuzzy msgid "Failed to output the certificate request as ASCII format" msgstr "Kan inte ladda ARC-begäranobjekt: %s" #: src/hed/libs/credential/NSSUtil.cpp:1904 #, fuzzy msgid "Failed to output the certificate request as DER format" msgstr "Kan inte ladda ARC-begäranobjekt: %s" #: src/hed/libs/credential/NSSUtil.cpp:1913 #, fuzzy, c-format msgid "Succeeded to output the certificate request into %s" msgstr "Lyckades signera proxycertifikatet" #: src/hed/libs/credential/NSSUtil.cpp:1952 #: src/hed/libs/credential/NSSUtil.cpp:1989 #, fuzzy msgid "Failed to read data from input file" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/hed/libs/credential/NSSUtil.cpp:1968 msgid "Input is without trailer\n" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1979 #, fuzzy msgid "Failed to convert ASCII to DER" msgstr "Misslyckades med att koppla upp mot %s(%s):%i" #: src/hed/libs/credential/NSSUtil.cpp:2030 #, fuzzy msgid "Certificate request is invalid" msgstr "Certifikatformat för BIO är: %d" #: src/hed/libs/credential/NSSUtil.cpp:2252 #, fuzzy, c-format msgid "The policy language: %s is not supported" msgstr "process: %s: understöds inte" #: src/hed/libs/credential/NSSUtil.cpp:2260 #: src/hed/libs/credential/NSSUtil.cpp:2285 #: src/hed/libs/credential/NSSUtil.cpp:2308 #: src/hed/libs/credential/NSSUtil.cpp:2330 #, fuzzy msgid "Failed to new arena" msgstr "Misslyckades med att starta ny tråd" #: src/hed/libs/credential/NSSUtil.cpp:2269 #: src/hed/libs/credential/NSSUtil.cpp:2294 #, fuzzy msgid "Failed to create path length" msgstr "Misslyckades med att skapa GUID i RLS: %s" #: src/hed/libs/credential/NSSUtil.cpp:2272 #: src/hed/libs/credential/NSSUtil.cpp:2297 #: src/hed/libs/credential/NSSUtil.cpp:2317 #: src/hed/libs/credential/NSSUtil.cpp:2339 #, fuzzy msgid "Failed to create policy language" msgstr "Misslyckades med att hämta ftp-fil" #: src/hed/libs/credential/NSSUtil.cpp:2738 #, fuzzy, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "Misslyckades med att läsa begäran från en fil" #: src/hed/libs/credential/NSSUtil.cpp:2745 #, fuzzy, c-format msgid "Can not find certificate with name %s" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2776 #, fuzzy, c-format msgid "Proxy subject: %s" msgstr "subjekt: %s" #: src/hed/libs/credential/NSSUtil.cpp:2795 #, fuzzy msgid "Failed to start certificate extension" msgstr "Misslyckades med att duplicera tillägg" #: src/hed/libs/credential/NSSUtil.cpp:2800 #, fuzzy msgid "Failed to add key usage extension" msgstr "Misslyckades med att duplicera tillägg" #: src/hed/libs/credential/NSSUtil.cpp:2805 #, fuzzy msgid "Failed to add proxy certificate information extension" msgstr "" "Misslyckades med att skriva det signerade proxycertifikatet till en fil" #: src/hed/libs/credential/NSSUtil.cpp:2809 #, fuzzy msgid "Failed to add voms AC extension" msgstr "Misslyckades med att duplicera tillägg" #: src/hed/libs/credential/NSSUtil.cpp:2829 #, fuzzy msgid "Failed to retrieve private key for issuer" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2836 msgid "Unknown key or hash type of issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2842 #, fuzzy msgid "Failed to set signature algorithm ID" msgstr "Misslyckades med att verifiera signaturen under " #: src/hed/libs/credential/NSSUtil.cpp:2854 #, fuzzy msgid "Failed to encode certificate" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2860 #, fuzzy msgid "Failed to allocate item for certificate data" msgstr "Misslyckades med att signera proxycertifikatet" #: src/hed/libs/credential/NSSUtil.cpp:2866 #, fuzzy msgid "Failed to sign encoded certificate data" msgstr "Misslyckades med att signera proxycertifikatet" #: src/hed/libs/credential/NSSUtil.cpp:2875 #, fuzzy, c-format msgid "Failed to open file %s" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/hed/libs/credential/NSSUtil.cpp:2886 #, fuzzy, c-format msgid "Succeeded to output certificate to %s" msgstr "Lyckades signera proxycertifikatet" #: src/hed/libs/credential/NSSUtil.cpp:2927 #, fuzzy, c-format msgid "Failed to open input certificate file %s" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2945 #, fuzzy msgid "Failed to read input certificate file" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2950 #, fuzzy msgid "Failed to get certificate from certificate file" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2957 #, fuzzy msgid "Failed to allocate certificate trust" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2962 #, fuzzy msgid "Failed to decode trust string" msgstr "Misslyckades med att läsa begäran från en sträng" #: src/hed/libs/credential/NSSUtil.cpp:2971 #: src/hed/libs/credential/NSSUtil.cpp:2988 #, fuzzy, c-format msgid "Failed to authenticate to token %s" msgstr "Misslyckades med autentisering" #: src/hed/libs/credential/NSSUtil.cpp:2976 #: src/hed/libs/credential/NSSUtil.cpp:2993 #, fuzzy msgid "Failed to add certificate to token or database" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2979 #: src/hed/libs/credential/NSSUtil.cpp:2982 #, fuzzy msgid "Succeeded to import certificate" msgstr "Lyckades signera proxycertifikatet" #: src/hed/libs/credential/NSSUtil.cpp:2996 #: src/hed/libs/credential/NSSUtil.cpp:2999 #, fuzzy, c-format msgid "Succeeded to change trusts to: %s" msgstr "Lyckades med att skapa utvärderare" #: src/hed/libs/credential/NSSUtil.cpp:3026 #, fuzzy, c-format msgid "Failed to import private key from file: %s" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/credential/NSSUtil.cpp:3028 #, fuzzy, c-format msgid "Failed to import certificate from file: %s" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/credential/VOMSConfig.cpp:142 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:158 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:176 #, fuzzy, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "Misslyckades med att ladda klientinställningar" #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:188 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:185 #, fuzzy, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "Misslyckades med att läsa object: %s" #: src/hed/libs/credential/VOMSUtil.cpp:193 #, fuzzy, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "Misslyckades med att erhålla lista från ftp: %s" #: src/hed/libs/credential/VOMSUtil.cpp:346 #, c-format msgid "VOMS: create FQAN: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:384 #, fuzzy, c-format msgid "VOMS: create attribute: %s" msgstr " attribut:" #: src/hed/libs/credential/VOMSUtil.cpp:670 #, fuzzy msgid "VOMS: Can not allocate memory for parsing AC" msgstr "Kan inte skapa BIO för att tolka begäran" #: src/hed/libs/credential/VOMSUtil.cpp:678 #, fuzzy msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "Kan inte skapa BIO för att tolka begäran" #: src/hed/libs/credential/VOMSUtil.cpp:704 #, fuzzy msgid "VOMS: Can not parse AC" msgstr "Kan inte tolka PKCS12-fil" #: src/hed/libs/credential/VOMSUtil.cpp:734 msgid "VOMS: CA directory or CA file must be provided" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:798 #, fuzzy msgid "VOMS: failed to verify AC signature" msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/hed/libs/credential/VOMSUtil.cpp:867 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:875 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:881 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:916 #, fuzzy, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "Cachefil %s existerar inte" #: src/hed/libs/credential/VOMSUtil.cpp:922 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:970 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1003 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1023 #, fuzzy msgid "VOMS: AC signature verification failed" msgstr "Certifikatverifiering misslyckades" #: src/hed/libs/credential/VOMSUtil.cpp:1032 #, fuzzy msgid "VOMS: unable to verify certificate chain" msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/hed/libs/credential/VOMSUtil.cpp:1038 #, fuzzy, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "Kunde inte ladda certifikatfil - %s" #: src/hed/libs/credential/VOMSUtil.cpp:1061 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1087 #, fuzzy, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "Kunde inte ladda certifikatfil - %s" #: src/hed/libs/credential/VOMSUtil.cpp:1109 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1116 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1126 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1142 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1151 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1228 msgid "VOMS: the grantor attribute is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1246 msgid "VOMS: the attribute name is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1252 #, fuzzy, c-format msgid "VOMS: the attribute value for %s is empty" msgstr " attribut:" #: src/hed/libs/credential/VOMSUtil.cpp:1257 msgid "VOMS: the attribute qualifier is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1289 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1323 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1328 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1343 #, fuzzy msgid "VOMS: failed to parse attributes from AC" msgstr "Varning: Misslyckades med att erhålla attribut från %s: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1387 msgid "VOMS: authorityKey is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1415 msgid "VOMS: missing AC parts" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1432 msgid "VOMS: unsupported time format format in AC - expecting GENERALIZED TIME" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1438 #, fuzzy msgid "VOMS: AC is not yet valid" msgstr "%s är inte en giltig URL" #: src/hed/libs/credential/VOMSUtil.cpp:1445 msgid "VOMS: AC has expired" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1460 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1465 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1466 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1469 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "VOMS: the holder information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1500 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1501 #, c-format msgid "VOMS: DN of holder: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1502 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1509 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1521 #: src/hed/libs/credential/VOMSUtil.cpp:1528 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1541 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1551 msgid "VOMS: the issuer information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1559 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1597 #: src/hed/libs/credential/VOMSUtil.cpp:1605 #: src/hed/libs/credential/VOMSUtil.cpp:1613 #: src/hed/libs/credential/VOMSUtil.cpp:1621 #: src/hed/libs/credential/VOMSUtil.cpp:1644 msgid "VOMS: unable to extract VO name from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1635 #, fuzzy, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "Kan inte bestämma värdnamn från uname()" #: src/hed/libs/credential/VOMSUtil.cpp:1654 msgid "VOMS: can not verify the signature of the AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1660 msgid "VOMS: problems while parsing information in AC" msgstr "" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:128 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, fuzzy, c-format msgid "MyProxy failure: %s" msgstr "Källa: %s" #: src/hed/libs/crypto/OpenSSL.cpp:68 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "SSL-fel: %d - %s:%s:%s" #: src/hed/libs/crypto/OpenSSL.cpp:81 #, fuzzy msgid "SSL locks not initialized" msgstr "DelegateProxy misslyckades" #: src/hed/libs/crypto/OpenSSL.cpp:85 #, c-format msgid "wrong SSL lock requested: %i of %i: %i - %s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:112 #, fuzzy msgid "Failed to lock arccrypto library in memory" msgstr "Misslyckades med förallokera utrymme" #: src/hed/libs/crypto/OpenSSL.cpp:117 src/hed/libs/crypto/OpenSSL.cpp:128 #, fuzzy msgid "Failed to initialize OpenSSL library" msgstr "Misslyckades med att initiera GM-miljö" #: src/hed/libs/crypto/OpenSSL.cpp:150 msgid "Number of OpenSSL locks changed - reinitializing" msgstr "" #: src/hed/libs/data/DataMover.cpp:111 msgid "No locations found - probably no more physical instances" msgstr "Inga platser funna - troligen inga fler fysiska instanser" #: src/hed/libs/data/DataMover.cpp:117 src/hed/libs/data/FileCache.cpp:673 #: src/libs/data-staging/Processor.cpp:458 #: src/libs/data-staging/Processor.cpp:472 #, c-format msgid "Removing %s" msgstr "Tar bort %s" #: src/hed/libs/data/DataMover.cpp:130 msgid "This instance was already deleted" msgstr "Denna instans har redan tagits bort" #: src/hed/libs/data/DataMover.cpp:136 msgid "Failed to delete physical file" msgstr "Misslyckades med att ta bort fysisk fil" #: src/hed/libs/data/DataMover.cpp:147 #, c-format msgid "Removing metadata in %s" msgstr "Tar bort metadata i %s" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete meta-information" msgstr "Misslyckades med att ta bort metainformation" #: src/hed/libs/data/DataMover.cpp:165 msgid "Failed to remove all physical instances" msgstr "Misslyckades med att ta bort alla instanser" #: src/hed/libs/data/DataMover.cpp:169 #, c-format msgid "Removing logical file from metadata %s" msgstr "Tar bort logist fil från metadata %s" #: src/hed/libs/data/DataMover.cpp:172 msgid "Failed to delete logical file" msgstr "Misslyckades med att ta bort logisk fil" #: src/hed/libs/data/DataMover.cpp:179 #, fuzzy msgid "Failed to remove instance" msgstr "Misslyckades med att ta bort alla instanser" #: src/hed/libs/data/DataMover.cpp:228 msgid "DataMover::Transfer : starting new thread" msgstr "DataMover::Transfer : startar ny tråd" #: src/hed/libs/data/DataMover.cpp:256 #, c-format msgid "Transfer from %s to %s" msgstr "Överföring från %s till %s" #: src/hed/libs/data/DataMover.cpp:258 msgid "Not valid source" msgstr "Ogiltig källa" #: src/hed/libs/data/DataMover.cpp:263 msgid "Not valid destination" msgstr "Ogiltig destination" #: src/hed/libs/data/DataMover.cpp:283 #: src/services/cache_service/CacheService.cpp:294 #, fuzzy, c-format msgid "Couldn't handle certificate: %s" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/data/DataMover.cpp:293 src/hed/libs/data/DataMover.cpp:591 #: src/libs/data-staging/Processor.cpp:137 #, fuzzy, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "Fil är cachad (%s) - kontrollerar tillträde" #: src/hed/libs/data/DataMover.cpp:297 src/hed/libs/data/DataMover.cpp:610 #: src/hed/libs/data/DataMover.cpp:672 src/libs/data-staging/Processor.cpp:156 msgid "Permission checking passed" msgstr "Tillträdeskontroll OK" #: src/hed/libs/data/DataMover.cpp:298 src/hed/libs/data/DataMover.cpp:630 #: src/hed/libs/data/DataMover.cpp:1136 msgid "Linking/copying cached file" msgstr "Länkar/kopierar cachad fil" #: src/hed/libs/data/DataMover.cpp:323 #, c-format msgid "No locations for source found: %s" msgstr "Inga platser funna för källa: %s" #: src/hed/libs/data/DataMover.cpp:327 #, c-format msgid "Failed to resolve source: %s" msgstr "Misslyckades med att slå upp källa: %s" #: src/hed/libs/data/DataMover.cpp:339 src/hed/libs/data/DataMover.cpp:407 #, c-format msgid "No locations for destination found: %s" msgstr "Inga platser funna för destination: %s" #: src/hed/libs/data/DataMover.cpp:344 src/hed/libs/data/DataMover.cpp:411 #, c-format msgid "Failed to resolve destination: %s" msgstr "Misslyckades med att slå upp destination: %s" #: src/hed/libs/data/DataMover.cpp:359 #, c-format msgid "No locations for destination different from source found: %s" msgstr "Inga platser för destinationen som skiljer sig från källan funna: %s" #: src/hed/libs/data/DataMover.cpp:380 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "DataMover::Transfer: försöker förstöra/skriva över destination: %s" #: src/hed/libs/data/DataMover.cpp:391 #, fuzzy, c-format msgid "Failed to delete %s but will still try to copy" msgstr "Misslyckades med att ta bort fysisk fil" #: src/hed/libs/data/DataMover.cpp:394 #, c-format msgid "Failed to delete %s" msgstr "Misslyckades med att ta bort %s" #: src/hed/libs/data/DataMover.cpp:421 #, c-format msgid "Deleted but still have locations at %s" msgstr "Borttaget men har fortfarande platser %s" #: src/hed/libs/data/DataMover.cpp:433 msgid "DataMover: cycle" msgstr "DataMover: nästa cykel" #: src/hed/libs/data/DataMover.cpp:435 msgid "DataMover: no retries requested - exit" msgstr "DataMover: inga återförsök begärda - avsluta" #: src/hed/libs/data/DataMover.cpp:440 msgid "DataMover: source out of tries - exit" msgstr "DataMover: källan har slut på försök - avsluta" #: src/hed/libs/data/DataMover.cpp:442 msgid "DataMover: destination out of tries - exit" msgstr "DataMover: destinationen har slut på försök - avsluta" #: src/hed/libs/data/DataMover.cpp:450 #, c-format msgid "Real transfer from %s to %s" msgstr "Reell överföring från %s till %s" #: src/hed/libs/data/DataMover.cpp:477 #, fuzzy, c-format msgid "Creating buffer: %lli x %i" msgstr "Skapar buffer: %i x %i" #: src/hed/libs/data/DataMover.cpp:493 #, fuzzy, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "DataMover::Transfer: checksumtyp är %s" #: src/hed/libs/data/DataMover.cpp:498 #, fuzzy, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "DataMover::Transfer: har giltig checksum" #: src/hed/libs/data/DataMover.cpp:522 #, fuzzy, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "DataMover::Transfer: har giltig checksum" #: src/hed/libs/data/DataMover.cpp:527 msgid "Buffer creation failed !" msgstr "Skapande av buffer misslyckades" #: src/hed/libs/data/DataMover.cpp:550 #, fuzzy, c-format msgid "URL is mapped to: %s" msgstr "Url mappas till: %s" #: src/hed/libs/data/DataMover.cpp:580 src/hed/libs/data/DataMover.cpp:639 #: src/libs/data-staging/Processor.cpp:91 msgid "Cached file is locked - should retry" msgstr "Cachad fil är låst - bör försöka igen" #: src/hed/libs/data/DataMover.cpp:585 src/libs/data-staging/Processor.cpp:110 msgid "Failed to initiate cache" msgstr "Misslyckades med att initiera cache" #: src/hed/libs/data/DataMover.cpp:602 #: src/services/cache_service/CacheService.cpp:366 #, c-format msgid "Permission checking failed: %s" msgstr "Tillträdeskontroll misslyckades: %s" #: src/hed/libs/data/DataMover.cpp:604 src/hed/libs/data/DataMover.cpp:664 #: src/hed/libs/data/DataMover.cpp:686 src/hed/libs/data/DataMover.cpp:697 msgid "source.next_location" msgstr "source.next_location" #: src/hed/libs/data/DataMover.cpp:618 src/libs/data-staging/Processor.cpp:161 #, fuzzy, c-format msgid "Source modification date: %s" msgstr "Källan skapades: %s" #: src/hed/libs/data/DataMover.cpp:619 src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Cache creation date: %s" msgstr "Cache skapades: %s" #: src/hed/libs/data/DataMover.cpp:625 src/libs/data-staging/Processor.cpp:167 msgid "Cached file is outdated, will re-download" msgstr "Cachad fil är gammal, åternedladdar" #: src/hed/libs/data/DataMover.cpp:629 src/libs/data-staging/Processor.cpp:173 msgid "Cached copy is still valid" msgstr "Cachad kopia är fortfarande giltig" #: src/hed/libs/data/DataMover.cpp:657 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" "URL är mappad till lokalt tillträde - kontrollerar tillträde på ursprunglig " "URL" #: src/hed/libs/data/DataMover.cpp:661 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "Tillträdeskontroll på ursprunglig URL misslyckades: %s" #: src/hed/libs/data/DataMover.cpp:674 msgid "Linking local file" msgstr "Länkar lokal fil" #: src/hed/libs/data/DataMover.cpp:694 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "Misslyckades med att skapa symbolisk länk %s till %s : %s" #: src/hed/libs/data/DataMover.cpp:703 #, fuzzy, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "Misslyckades med att ändra ägare av hård till %i: %s" #: src/hed/libs/data/DataMover.cpp:715 #, c-format msgid "cache file: %s" msgstr "cachefil: %s" #: src/hed/libs/data/DataMover.cpp:741 #, fuzzy, c-format msgid "Failed to stat source %s" msgstr "Misslyckades med att slå upp källa: %s" #: src/hed/libs/data/DataMover.cpp:743 src/hed/libs/data/DataMover.cpp:758 #: src/hed/libs/data/DataMover.cpp:795 src/hed/libs/data/DataMover.cpp:814 #: src/hed/libs/data/DataMover.cpp:982 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1024 src/hed/libs/data/DataMover.cpp:1101 msgid "(Re)Trying next source" msgstr "Försöker med nästa källa (igen)" #: src/hed/libs/data/DataMover.cpp:756 #, fuzzy, c-format msgid "Meta info of source and location do not match for %s" msgstr "Antalet källor och destinationer stämmer inte överens" #: src/hed/libs/data/DataMover.cpp:770 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" #: src/hed/libs/data/DataMover.cpp:774 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:789 #, fuzzy, c-format msgid "Failed to prepare source: %s" msgstr "Misslyckades med att slå upp källa: %s" #: src/hed/libs/data/DataMover.cpp:805 #, c-format msgid "Failed to start reading from source: %s" msgstr "Misslyckades med att påbörja läsning från källa: %s" #: src/hed/libs/data/DataMover.cpp:826 msgid "Metadata of source and destination are different" msgstr "Källans och destinationens metadata är olika" #: src/hed/libs/data/DataMover.cpp:847 #, c-format msgid "Failed to preregister destination: %s" msgstr "Misslyckades med att förregistrera destination: %s" #: src/hed/libs/data/DataMover.cpp:852 src/hed/libs/data/DataMover.cpp:1125 msgid "destination.next_location" msgstr "destination.next_location" #: src/hed/libs/data/DataMover.cpp:866 #, fuzzy, c-format msgid "Failed to prepare destination: %s" msgstr "Misslyckades med att förregistrera destination: %s" #: src/hed/libs/data/DataMover.cpp:873 src/hed/libs/data/DataMover.cpp:897 #: src/hed/libs/data/DataMover.cpp:1122 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" "Misslyckades med att avregistrera förregistrerad lfn. Du kan behöva " "avregistrera det manuellt: %s" #: src/hed/libs/data/DataMover.cpp:877 src/hed/libs/data/DataMover.cpp:900 #: src/hed/libs/data/DataMover.cpp:991 src/hed/libs/data/DataMover.cpp:1007 #: src/hed/libs/data/DataMover.cpp:1030 src/hed/libs/data/DataMover.cpp:1077 msgid "(Re)Trying next destination" msgstr "Försöker med nästa destination (igen)" #: src/hed/libs/data/DataMover.cpp:889 #, c-format msgid "Failed to start writing to destination: %s" msgstr "Misslyckades med att påbörja skrivning till destination: %s" #: src/hed/libs/data/DataMover.cpp:913 msgid "Failed to start writing to cache" msgstr "Misslyckades med att påbörja skrivning till cache" #: src/hed/libs/data/DataMover.cpp:921 src/hed/libs/data/DataMover.cpp:969 #: src/hed/libs/data/DataMover.cpp:1148 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" "Misslyckades med att avregistrera förregistrerad lfn. Du kan behöva " "avregistrera det manuellt" #: src/hed/libs/data/DataMover.cpp:929 msgid "Waiting for buffer" msgstr "Väntar på buffer" #: src/hed/libs/data/DataMover.cpp:936 #, fuzzy, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "Misslyckades med att skapa infofil %s: %s" #: src/hed/libs/data/DataMover.cpp:941 #, fuzzy, c-format msgid "buffer: read EOF : %s" msgstr "buffer: läs filslut : %i" #: src/hed/libs/data/DataMover.cpp:942 #, fuzzy, c-format msgid "buffer: write EOF: %s" msgstr "buffer: skriv filslut: %i" #: src/hed/libs/data/DataMover.cpp:943 #, fuzzy, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "buffer: fel : %i" #: src/hed/libs/data/DataMover.cpp:944 msgid "Closing read channel" msgstr "Stänger läskanal" #: src/hed/libs/data/DataMover.cpp:950 msgid "Closing write channel" msgstr "Stänger skrivkanal" #: src/hed/libs/data/DataMover.cpp:958 #, fuzzy msgid "Failed to complete writing to destination" msgstr "Misslyckades under skrivning till destination" #: src/hed/libs/data/DataMover.cpp:974 #, fuzzy msgid "Transfer cancelled successfully" msgstr "Avbrytande av jobb lyckades" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Cause of failure unclear - choosing randomly" msgstr "Anledning till misslyckande oklar - väljer slumpvis" #: src/hed/libs/data/DataMover.cpp:1062 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" #: src/hed/libs/data/DataMover.cpp:1070 #, fuzzy msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" "Misslyckades med att avregistrera förregistrerad lfn. Du kan behöva " "avregistrera det manuellt" #: src/hed/libs/data/DataMover.cpp:1074 #, fuzzy msgid "Failed to delete destination, retry may fail" msgstr "Misslyckades med att ta bort metainformation" #: src/hed/libs/data/DataMover.cpp:1084 #, fuzzy msgid "Cannot compare empty checksum" msgstr "Kan inte tolka schema!" #: src/hed/libs/data/DataMover.cpp:1091 #: src/libs/data-staging/DataStagingDelivery.cpp:456 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" #: src/hed/libs/data/DataMover.cpp:1093 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:1106 #: src/libs/data-staging/DataStagingDelivery.cpp:472 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1112 #: src/libs/data-staging/DataStagingDelivery.cpp:475 #, fuzzy msgid "Checksum not computed" msgstr "DataMover::Transfer: checksumtyp är %s" #: src/hed/libs/data/DataMover.cpp:1118 #, c-format msgid "Failed to postregister destination %s" msgstr "Misslyckades med att efterregistrera destination: %s" #: src/hed/libs/data/DataPoint.cpp:83 #, fuzzy, c-format msgid "Invalid URL option: %s" msgstr "Ogiltig periodsträng: %s" #: src/hed/libs/data/DataPoint.cpp:254 #, fuzzy, c-format msgid "Skipping invalid URL option %s" msgstr "Ogiltig periodsträng: %s" #: src/hed/libs/data/DataPoint.cpp:269 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:91 #, fuzzy, c-format msgid "Can't handle location %s" msgstr "Kan inte bestämma plats för stderr: %s" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, fuzzy, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "LFN finns redan i LFC" #: src/hed/libs/data/DataPointIndex.cpp:229 #, fuzzy, c-format msgid "Replica %s matches host pattern %s" msgstr "LFN finns redan i LFC" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "Operation avslutades OK" #: src/hed/libs/data/DataStatus.cpp:13 #, fuzzy msgid "Source is invalid URL" msgstr "%s är inte en giltig URL" #: src/hed/libs/data/DataStatus.cpp:14 #, fuzzy msgid "Destination is invalid URL" msgstr "Destination måste innehålla LFN" #: src/hed/libs/data/DataStatus.cpp:15 #, fuzzy msgid "Resolving of index service for source failed" msgstr "Uppslagning av indexserver-URL för källa misslyckades" #: src/hed/libs/data/DataStatus.cpp:16 #, fuzzy msgid "Resolving of index service for destination failed" msgstr "Uppslagning av indexserver-URL för destination misslyckades" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "Kan ej läsa från källa" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "Kan ej skriva till destination" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "Misslyckades under läsning från källa" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "Misslyckades under skrivning till destination" #: src/hed/libs/data/DataStatus.cpp:21 #, fuzzy msgid "Failed while transferring data" msgstr "Misslyckades med att överföra data" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "Misslyckades med att avsluta läsning från källa" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "Misslyckades med att avsluta skrivning till destination" #: src/hed/libs/data/DataStatus.cpp:24 #, fuzzy msgid "First stage of registration to index service failed" msgstr "Första steget av registrering av indexservice-URL misslyckades" #: src/hed/libs/data/DataStatus.cpp:25 #, fuzzy msgid "Last stage of registration to index service failed" msgstr "Sista steget av registrering av indexservice-URL misslyckades" #: src/hed/libs/data/DataStatus.cpp:26 #, fuzzy msgid "Unregistering from index service failed" msgstr "Avregistrering av indexservice-URL misslyckades" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "Fel i cachningsprocedur" #: src/hed/libs/data/DataStatus.cpp:28 #, fuzzy msgid "Error due to expiration of provided credentials" msgstr "Fel p.g.a. att givna kreditiv har gått ut" #: src/hed/libs/data/DataStatus.cpp:29 #, fuzzy msgid "Delete error" msgstr "Förval: %s" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "Ingen giltig plats tillgänglig" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "Plats existerar redan" #: src/hed/libs/data/DataStatus.cpp:32 #, fuzzy msgid "Operation not supported for this kind of URL" msgstr "Operation kan ej utföras på denna typ av URL" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "Feature ej implementerad" #: src/hed/libs/data/DataStatus.cpp:34 #, fuzzy msgid "Already reading from source" msgstr "Misslyckades under läsning från källa" #: src/hed/libs/data/DataStatus.cpp:35 #, fuzzy msgid "Already writing to destination" msgstr "Misslyckades under skrivning till destination" #: src/hed/libs/data/DataStatus.cpp:36 #, fuzzy msgid "Read access check failed" msgstr "Tillträdeskontroll misslyckades" #: src/hed/libs/data/DataStatus.cpp:37 #, fuzzy msgid "Directory listing failed" msgstr "Misslyckades med att lista filer" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "" #: src/hed/libs/data/DataStatus.cpp:39 #, fuzzy msgid "Failed to obtain information about file" msgstr "Misslyckades med att skapa informationsdokument" #: src/hed/libs/data/DataStatus.cpp:40 #, fuzzy msgid "No such file or directory" msgstr "Fel vid listning av fil eller katalog: %s" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "Objekt ej initierat (internt fel)" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:43 #, fuzzy msgid "Failed to stage file(s)" msgstr "Misslyckades med att spara ftp-fil" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "" #: src/hed/libs/data/DataStatus.cpp:45 #, fuzzy msgid "Failed to prepare source" msgstr "Misslyckades med att slå upp källa: %s" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:47 #, fuzzy msgid "Failed to prepare destination" msgstr "Misslyckades med att förregistrera destination: %s" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:49 #, fuzzy msgid "Failed to finalize reading from source" msgstr "Misslyckades under läsning från källa" #: src/hed/libs/data/DataStatus.cpp:50 #, fuzzy msgid "Failed to finalize writing to destination" msgstr "Misslyckades under skrivning till destination" #: src/hed/libs/data/DataStatus.cpp:51 #, fuzzy msgid "Failed to create directory" msgstr "Misslyckades med att skapa/hitta katalog %s" #: src/hed/libs/data/DataStatus.cpp:52 #, fuzzy msgid "Failed to rename URL" msgstr "Misslyckades med att skapa GUID i RLS: %s" #: src/hed/libs/data/DataStatus.cpp:53 #, fuzzy msgid "Data was already cached" msgstr "DataPoint läser redan" #: src/hed/libs/data/DataStatus.cpp:54 #, fuzzy msgid "Operation cancelled successfully" msgstr "Operation avslutades OK" #: src/hed/libs/data/DataStatus.cpp:55 #, fuzzy msgid "Generic error" msgstr " Gränssnittstillägg: %s" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "Okänt fel" #: src/hed/libs/data/DataStatus.cpp:60 #, fuzzy msgid "No error" msgstr "Minnesallokeringsfel" #: src/hed/libs/data/DataStatus.cpp:61 #, fuzzy msgid "Transfer timed out" msgstr "Överföring slutförd" #: src/hed/libs/data/DataStatus.cpp:62 #, fuzzy msgid "Checksum mismatch" msgstr "DataMover::Transfer: checksumtyp är %s" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:65 #, fuzzy msgid "Temporary service error" msgstr " Gränssnittstillägg: %s" #: src/hed/libs/data/DataStatus.cpp:66 #, fuzzy msgid "Permanent service error" msgstr " Gränssnittstillägg: %s" #: src/hed/libs/data/DataStatus.cpp:67 #, fuzzy msgid "Error switching uid" msgstr "Fel vid listande av katalog: %s" #: src/hed/libs/data/DataStatus.cpp:68 #, fuzzy msgid "Request timed out" msgstr "Förfrågan misslyckades" #: src/hed/libs/data/FileCache.cpp:101 msgid "No cache directory specified" msgstr "Ingen cachecatalog angivan" #: src/hed/libs/data/FileCache.cpp:118 msgid "No usable caches" msgstr "" #: src/hed/libs/data/FileCache.cpp:127 #, fuzzy msgid "No remote cache directory specified" msgstr "Ingen cachecatalog angivan" #: src/hed/libs/data/FileCache.cpp:149 #, fuzzy msgid "No draining cache directory specified" msgstr "Ingen cachecatalog angivan" #: src/hed/libs/data/FileCache.cpp:177 #, fuzzy, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/hed/libs/data/FileCache.cpp:187 #, fuzzy, c-format msgid "Failed to create any cache directories for %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/hed/libs/data/FileCache.cpp:194 #, fuzzy, c-format msgid "Failed to change permissions on %s: %s" msgstr "Misslyckades med att ändra tillträde för hård länk till 0644: %s" #: src/hed/libs/data/FileCache.cpp:206 #, fuzzy, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/hed/libs/data/FileCache.cpp:209 #, fuzzy, c-format msgid "Failed to release lock on file %s" msgstr "Misslyckades med att ta bort logisk fil" #: src/hed/libs/data/FileCache.cpp:248 #, fuzzy, c-format msgid "Found file %s in remote cache at %s" msgstr "Hittade %s i cache" #: src/hed/libs/data/FileCache.cpp:265 #, fuzzy, c-format msgid "Failed to delete stale remote cache file %s: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/hed/libs/data/FileCache.cpp:269 #, fuzzy, c-format msgid "Failed to release lock on remote cache file %s" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/data/FileCache.cpp:287 src/hed/libs/data/FileCache.cpp:339 #, fuzzy, c-format msgid "Failed to obtain lock on cache file %s" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/hed/libs/data/FileCache.cpp:294 src/hed/libs/data/FileCache.cpp:348 #: src/hed/libs/data/FileCache.cpp:408 #, c-format msgid "Error removing cache file %s: %s" msgstr "Fel vid borttagande av cachefil %s: %s" #: src/hed/libs/data/FileCache.cpp:296 src/hed/libs/data/FileCache.cpp:314 #: src/hed/libs/data/FileCache.cpp:318 src/hed/libs/data/FileCache.cpp:350 #: src/hed/libs/data/FileCache.cpp:361 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:301 #, fuzzy, c-format msgid "Replicating file %s to local cache file %s" msgstr "Fel vid omdöpning av temporär låsfil %s till låsfil %s: %s" #: src/hed/libs/data/FileCache.cpp:304 src/hed/libs/data/FileCache.cpp:611 #, fuzzy, c-format msgid "Failed to copy file %s to %s: %s" msgstr "Misslyckades med att öppna fil %s för läsning: %s" #: src/hed/libs/data/FileCache.cpp:309 #, c-format msgid "" "Replicating file %s from remote cache failed due to source being deleted or " "modified" msgstr "" #: src/hed/libs/data/FileCache.cpp:311 #, fuzzy, c-format msgid "Failed to delete bad copy of remote cache file %s at %s: %s" msgstr "Misslyckades med att skapa hård länk från %s till %s: %s" #: src/hed/libs/data/FileCache.cpp:333 #, fuzzy, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "Varning: fel vid uppslagning av attribut för cachad fil: %s" #: src/hed/libs/data/FileCache.cpp:380 src/hed/libs/data/FileCache.cpp:414 #, fuzzy, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/hed/libs/data/FileCache.cpp:397 #, fuzzy, c-format msgid "Invalid lock on file %s" msgstr "Filen är inte tillgänglig: %s" #: src/hed/libs/data/FileCache.cpp:403 #, fuzzy, c-format msgid "Failed to remove .meta file %s: %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/hed/libs/data/FileCache.cpp:468 #, fuzzy, c-format msgid "Cache not found for file %s" msgstr "Fel vid öppning av låsfil %s: %s" #: src/hed/libs/data/FileCache.cpp:478 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" #: src/hed/libs/data/FileCache.cpp:484 src/hed/libs/data/FileCache.cpp:819 #, c-format msgid "Cache file %s does not exist" msgstr "Cachefil %s existerar inte" #: src/hed/libs/data/FileCache.cpp:503 #, c-format msgid "Cache file for %s not found in any local or remote cache" msgstr "" #: src/hed/libs/data/FileCache.cpp:507 #, fuzzy, c-format msgid "Using remote cache file %s for url %s" msgstr "Fel vid borttagande av cachefil %s: %s" #: src/hed/libs/data/FileCache.cpp:510 src/hed/libs/data/FileCache.cpp:821 #, c-format msgid "Error accessing cache file %s: %s" msgstr "Tillträdesfel för cachefil %s: %s" #: src/hed/libs/data/FileCache.cpp:516 #, fuzzy, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "Kan inte skapa katalog \"%s\" för per-jobb hårda länkar" #: src/hed/libs/data/FileCache.cpp:521 #, fuzzy, c-format msgid "Cannot change permission of %s: %s " msgstr "Kan inte ändra tillträde för \"%s\" till 0700" #: src/hed/libs/data/FileCache.cpp:525 #, fuzzy, c-format msgid "Cannot change owner of %s: %s " msgstr "Kan inte ändra ägare för %s" #: src/hed/libs/data/FileCache.cpp:539 #, fuzzy, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "Misslyckades med att ta bort hård länk %s: %s" #: src/hed/libs/data/FileCache.cpp:543 src/hed/libs/data/FileCache.cpp:554 #, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "Misslyckades med att skapa hård länk från %s till %s: %s" #: src/hed/libs/data/FileCache.cpp:549 #, fuzzy, c-format msgid "Cache file %s not found" msgstr "Cachefil %s existerar inte" #: src/hed/libs/data/FileCache.cpp:564 #, fuzzy, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "Misslyckades med att ändra tillträde för hård länk till 0644: %s" #: src/hed/libs/data/FileCache.cpp:572 #, fuzzy, c-format msgid "Failed to release lock on cache file %s" msgstr "Misslyckades med att ta bort logisk fil" #: src/hed/libs/data/FileCache.cpp:583 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:588 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:593 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:617 #, fuzzy, c-format msgid "Failed to set executable bit on file %s" msgstr "Misslyckades med att skapa infofil %s: %s" #: src/hed/libs/data/FileCache.cpp:622 #, fuzzy, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "Misslyckades med att skapa infofil %s: %s" #: src/hed/libs/data/FileCache.cpp:636 #, fuzzy, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "Misslyckades med att skapa symbolisk länk %s till %s : %s" #: src/hed/libs/data/FileCache.cpp:640 src/hed/libs/data/FileCache.cpp:645 #, fuzzy, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "Misslyckades med att skapa hård länk från %s till %s: %s" #: src/hed/libs/data/FileCache.cpp:675 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "Misslyckades med att ta bort per-jobb-katalog %s: %s" #: src/hed/libs/data/FileCache.cpp:694 src/hed/libs/data/FileCache.cpp:771 #, c-format msgid "Error reading meta file %s: %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/hed/libs/data/FileCache.cpp:699 src/hed/libs/data/FileCache.cpp:776 #, fuzzy, c-format msgid "Error opening meta file %s" msgstr "Fel vid öppnande av metafil %s: %s" #: src/hed/libs/data/FileCache.cpp:704 src/hed/libs/data/FileCache.cpp:780 #, fuzzy, c-format msgid "meta file %s is empty" msgstr "mail är tom" #: src/hed/libs/data/FileCache.cpp:713 #, fuzzy, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" "Fel: Fil %s är redan cachad på %s under en annan URL: %s - denna fil kommer " "ej att cachas" #: src/hed/libs/data/FileCache.cpp:733 #, fuzzy, c-format msgid "Bad format detected in file %s, in line %s" msgstr "Felaktigt format i fil %s: %s" #: src/hed/libs/data/FileCache.cpp:750 #, fuzzy, c-format msgid "Could not acquire lock on meta file %s" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/hed/libs/data/FileCache.cpp:754 #, fuzzy, c-format msgid "Error opening meta file for writing %s" msgstr "Fel vid öppnande av metafil %s: %s" #: src/hed/libs/data/FileCache.cpp:790 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:794 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:845 #, fuzzy, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "Misslyckades med att skapa fil i %s" #: src/hed/libs/data/FileCache.cpp:850 #, fuzzy, c-format msgid "Failed to create cache meta file %s" msgstr "Misslyckades med att skapa fil i %s" #: src/hed/libs/data/FileCache.cpp:865 #, fuzzy, c-format msgid "Failed to read cache meta file %s" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/data/FileCache.cpp:870 #, fuzzy, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "Cachad fil är gammal, åternedladdar" #: src/hed/libs/data/FileCache.cpp:875 #, fuzzy, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "Cachad fil är gammal, åternedladdar" #: src/hed/libs/data/FileCache.cpp:883 #, fuzzy, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" "Fel: Fil %s är redan cachad på %s under en annan URL: %s - denna fil kommer " "ej att cachas" #: src/hed/libs/data/FileCache.cpp:893 #, fuzzy, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "Fel vid uppslagning av attribut för metafil %s: %s" #: src/hed/libs/data/FileCache.cpp:955 #, fuzzy, c-format msgid "Using cache %s" msgstr "Använder cachade inställningar: %s" #: src/hed/libs/data/FileCache.cpp:969 #, fuzzy, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "Fel vid öppnande av metafil %s: %s" #: src/hed/libs/data/FileCache.cpp:975 #, c-format msgid "Cache %s: Free space %f GB" msgstr "" #: src/hed/libs/data/URLMap.cpp:33 #, fuzzy, c-format msgid "Can't use URL %s" msgstr "Kan inte göra stat på filen: %s" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "fil %s är inte tillgänglig" #: src/hed/libs/data/URLMap.cpp:49 #, c-format msgid "Mapping %s to %s" msgstr "Mappar %s till %s" #: src/hed/libs/data/examples/simple_copy.cpp:17 #, fuzzy msgid "Usage: copy source destination" msgstr "källa destination" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, fuzzy, c-format msgid "Copy failed: %s" msgstr "DCAU misslyckades: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, c-format msgid "Failed to read proxy file: %s" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, c-format msgid "Failed to read certificate file: %s" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, fuzzy, c-format msgid "Failed to read private key file: %s" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, fuzzy, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)%s:%" "s" msgstr "" "Misslyckades med att konvertera GSI-kreditiv till GSS-kreditiv (major: %d, " "minor: %d)" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, fuzzy, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "Misslyckades med att befria GSS-kreditiv (major: %d, minor: %d)" #: src/hed/libs/infosys/BootstrapISIS.cpp:26 #, fuzzy msgid "Initialize ISIS handler" msgstr "Initialisera trådsystemet" #: src/hed/libs/infosys/BootstrapISIS.cpp:31 #, c-format msgid "Can't recognize URL: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:48 #, fuzzy msgid "Initialize ISIS handler succeeded" msgstr "Initialisera trådsystemet" #: src/hed/libs/infosys/BootstrapISIS.cpp:52 #, fuzzy, c-format msgid "Remove ISIS (%s) from list" msgstr "Tar bort jobb %s från jobblistfilen" #: src/hed/libs/infosys/BootstrapISIS.cpp:72 #, c-format msgid "getISISList from %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:73 #, c-format msgid "Key %s, Cert: %s, CA: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:105 #, c-format msgid "ISIS (%s) is not available or not valid response. (%d. reconnection)" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:107 #, c-format msgid "Connection to the ISIS (%s) is success and get the list of ISIS." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:133 #, c-format msgid "GetISISList add this (%s) ISIS into the list." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:145 #, fuzzy, c-format msgid "Chosen ISIS for communication: %s" msgstr "Cacheinställningar: %s" #: src/hed/libs/infosys/BootstrapISIS.cpp:152 msgid "Get ISIS from list of ISIS handler" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:156 msgid "Here is the end of the infinite calling loop." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:163 msgid "There is no more ISIS available. The list of ISIS's is already empty." msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:47 #, c-format msgid "cannot create directory: %s" msgstr "kan inte skapa katalog: %s" #: src/hed/libs/infosys/InfoCache.cpp:60 #, c-format msgid "Cache configuration: %s" msgstr "Cacheinställningar: %s" #: src/hed/libs/infosys/InfoCache.cpp:63 msgid "Missing cache root in configuration" msgstr "Cacherot saknas i inställningar" #: src/hed/libs/infosys/InfoCache.cpp:67 #, fuzzy msgid "Missing service ID" msgstr "Service-ID saknas" #: src/hed/libs/infosys/InfoCache.cpp:70 #, c-format msgid "Cache root: %s" msgstr "cacherot: %s" #: src/hed/libs/infosys/InfoCache.cpp:76 #, c-format msgid "Cache directory: %s" msgstr "Cachekatalog: %s" #: src/hed/libs/infosys/InfoCache.cpp:143 #: src/hed/libs/infosys/InfoCache.cpp:162 #: src/hed/libs/infosys/InfoCache.cpp:181 #: src/hed/libs/infosys/InfoCache.cpp:206 msgid "InfoCache object is not set up" msgstr "InfoCache-objekt är inte inställt" #: src/hed/libs/infosys/InfoCache.cpp:147 #: src/hed/libs/infosys/InfoCache.cpp:166 #, c-format msgid "Invalid path in Set(): %s" msgstr "Ogiltig sökväg i Set(): %s" #: src/hed/libs/infosys/InfoCache.cpp:185 #, c-format msgid "Invalid path in Get(): %s" msgstr "Ogiltig sökväg i Get(): %s" #: src/hed/libs/infosys/InfoRegister.cpp:23 #, c-format msgid "" "InfoRegistrar thread waiting %d seconds for the all Registers elements " "creation." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:69 #, c-format msgid "" "InfoRegister created with config:\n" "%s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:79 #, c-format msgid "InfoRegister to be registered in Registrar %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:81 msgid "Discarding Registrar because the \"URL\" element is missing or empty." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:133 #, c-format msgid "InfoRegistrar id \"%s\" has been found." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:140 #, c-format msgid "InfoRegistrar id \"%s\" was not found. New registrar created" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:182 #, c-format msgid "" "Configuration error. Retry: \"%s\" is not a valid value. Default value will " "be used." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:188 #, fuzzy, c-format msgid "Retry: %d" msgstr "Period: %d" #: src/hed/libs/infosys/InfoRegister.cpp:197 #, c-format msgid "Key: %s, cert: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:226 #, fuzzy msgid "The service won't be registered." msgstr "Servicestatus kunde inte inhämtas" #: src/hed/libs/infosys/InfoRegister.cpp:231 msgid "Configuration error. Missing mandatory \"Period\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:236 msgid "Configuration error. Missing mandatory \"Endpoint\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:241 msgid "Configuration error. Missing mandatory \"Expiration\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:248 #, c-format msgid "" "Service was already registered to the InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:284 #, c-format msgid "" "Service is successfully added to the InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:301 msgid "Unregistred Service can not be removed." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:348 #: src/hed/libs/infosys/InfoRegister.cpp:411 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s CAPath" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:359 #: src/hed/libs/infosys/InfoRegister.cpp:646 #, fuzzy, c-format msgid "Response from the ISIS: %s" msgstr "Svar: %s" #: src/hed/libs/infosys/InfoRegister.cpp:363 #, fuzzy, c-format msgid "Failed to remove registration from %s ISIS" msgstr "Misslyckades med att ta bort plats från LFC: %s" #: src/hed/libs/infosys/InfoRegister.cpp:366 #, fuzzy, c-format msgid "Successfuly removed registration from ISIS (%s)" msgstr "Lyckosam registrering till ISIS (%s)" #: src/hed/libs/infosys/InfoRegister.cpp:372 #, fuzzy, c-format msgid "Failed to remove registration from ISIS (%s) - %s" msgstr "Misslyckades med att registrera till ISIS (%s) - %s" #: src/hed/libs/infosys/InfoRegister.cpp:379 #: src/hed/libs/infosys/InfoRegister.cpp:656 #, fuzzy, c-format msgid "Retry connecting to the ISIS (%s) %d time(s)." msgstr "Misslyckades med att koppla upp mot %s(%s):%i" #: src/hed/libs/infosys/InfoRegister.cpp:385 #, fuzzy, c-format msgid "ISIS (%s) is not available." msgstr "fil %s är inte tillgänglig" #: src/hed/libs/infosys/InfoRegister.cpp:389 #: src/hed/libs/infosys/InfoRegister.cpp:439 #, fuzzy, c-format msgid "Service removed from InfoRegistrar connecting to infosys %s." msgstr "Service tillhandahöll inga registreringsposter" #: src/hed/libs/infosys/InfoRegister.cpp:420 #, fuzzy, c-format msgid "Failed to remove registration from %s EMIRegistry" msgstr "Misslyckades med att ta bort plats från LFC: %s" #: src/hed/libs/infosys/InfoRegister.cpp:423 #, fuzzy, c-format msgid "Successfuly removed registration from EMIRegistry (%s)" msgstr "Lyckosam registrering till ISIS (%s)" #: src/hed/libs/infosys/InfoRegister.cpp:429 #: src/hed/libs/infosys/InfoRegister.cpp:957 #, c-format msgid "Retry connecting to the EMIRegistry (%s) %d time(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:435 #, fuzzy, c-format msgid "EMIRegistry (%s) is not available." msgstr "fil %s är inte tillgänglig" #: src/hed/libs/infosys/InfoRegister.cpp:476 #: src/hed/libs/infosys/InfoRegister.cpp:684 #, fuzzy, c-format msgid "Registration starts: %s" msgstr "Registrering start" #: src/hed/libs/infosys/InfoRegister.cpp:477 #: src/hed/libs/infosys/InfoRegister.cpp:685 #, c-format msgid "reg_.size(): %d" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:480 #: src/hed/libs/infosys/InfoRegister.cpp:688 msgid "Registrant has no proper URL specified. Registration end." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:510 #: src/hed/libs/infosys/InfoRegister.cpp:713 msgid "Create RegEntry XML element" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:542 #: src/hed/libs/infosys/InfoRegister.cpp:745 msgid "ServiceID attribute calculated from Endpoint Reference" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:546 #: src/hed/libs/infosys/InfoRegister.cpp:749 msgid "Generation Time attribute calculated from current time" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:553 #: src/hed/libs/infosys/InfoRegister.cpp:756 #, fuzzy, c-format msgid "ServiceID stored: %s" msgstr " Betjäningstillstånd: %s" #: src/hed/libs/infosys/InfoRegister.cpp:559 #: src/hed/libs/infosys/InfoRegister.cpp:762 #, c-format msgid "Missing service document provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:565 #: src/hed/libs/infosys/InfoRegister.cpp:768 #, c-format msgid "" "Missing MetaServiceAdvertisment or Expiration values provided by the service " "%s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:572 #: src/hed/libs/infosys/InfoRegister.cpp:775 #, c-format msgid "Missing Type value provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:580 #: src/hed/libs/infosys/InfoRegister.cpp:783 #, c-format msgid "Missing Endpoint Reference value provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:598 #, c-format msgid "Registering to %s ISIS" msgstr "Registrerar till %s ISIS" #: src/hed/libs/infosys/InfoRegister.cpp:621 #: src/hed/libs/infosys/InfoRegister.cpp:822 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s, CAFile" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:625 #, fuzzy, c-format msgid "Sent RegEntries: %s" msgstr "Registrering för service: %s" #: src/hed/libs/infosys/InfoRegister.cpp:639 #, c-format msgid "Error during registration to %s ISIS" msgstr "Fel vid registrering till %s ISIS" #: src/hed/libs/infosys/InfoRegister.cpp:648 #, c-format msgid "Successful registration to ISIS (%s)" msgstr "Lyckosam registrering till ISIS (%s)" #: src/hed/libs/infosys/InfoRegister.cpp:652 #, c-format msgid "Failed to register to ISIS (%s) - %s" msgstr "Misslyckades med att registrera till ISIS (%s) - %s" #: src/hed/libs/infosys/InfoRegister.cpp:668 #: src/hed/libs/infosys/InfoRegister.cpp:967 #, fuzzy, c-format msgid "Registration ends: %s" msgstr "Registrering för service: %s" #: src/hed/libs/infosys/InfoRegister.cpp:669 #: src/hed/libs/infosys/InfoRegister.cpp:968 #, c-format msgid "Waiting period is %d second(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:679 #: src/hed/libs/infosys/InfoRegister.cpp:978 #, fuzzy, c-format msgid "Registration exit: %s" msgstr "Registrering för service: %s" #: src/hed/libs/infosys/InfoRegister.cpp:801 #, fuzzy, c-format msgid "Registering to %s EMIRegistry" msgstr "Registrerar till %s ISIS" #: src/hed/libs/infosys/InfoRegister.cpp:927 #, fuzzy, c-format msgid "Sent entry: %s" msgstr "Registrering för service: %s" #: src/hed/libs/infosys/InfoRegister.cpp:940 #, fuzzy, c-format msgid "Error during %s to %s EMIRegistry" msgstr "Fel vid registrering till %s ISIS" #: src/hed/libs/infosys/InfoRegister.cpp:943 #, fuzzy, c-format msgid "Successful %s to EMIRegistry (%s)" msgstr "Lyckosam registrering till ISIS (%s)" #: src/hed/libs/infosys/InfoRegister.cpp:949 #, fuzzy, c-format msgid "Failed to %s to EMIRegistry (%s) - %d" msgstr "Misslyckades med att registrera till ISIS (%s) - %s" #: src/hed/libs/loader/ModuleManager.cpp:25 msgid "Module Manager Init" msgstr "Modulhanterare init" #: src/hed/libs/loader/ModuleManager.cpp:68 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:202 #, c-format msgid "Found %s in cache" msgstr "Hittade %s i cache" #: src/hed/libs/loader/ModuleManager.cpp:209 #, fuzzy, c-format msgid "Could not locate module %s in following paths:" msgstr "Kunde inte lokalisera modulen %s" #: src/hed/libs/loader/ModuleManager.cpp:213 #, fuzzy, c-format msgid "\t%s" msgstr "%s" #: src/hed/libs/loader/ModuleManager.cpp:227 #, c-format msgid "Loaded %s" msgstr "Laddade %s" #: src/hed/libs/loader/ModuleManager.cpp:271 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "Modulhanterare init av ModuleManager::setCfg" #: src/hed/libs/loader/ModuleManager.cpp:307 #: src/hed/libs/loader/ModuleManager.cpp:320 #, c-format msgid "%s made persistent" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:311 #, fuzzy, c-format msgid "Not found %s in cache" msgstr "Hittade %s i cache" #: src/hed/libs/loader/ModuleManager.cpp:325 msgid "Specified module not found in cache" msgstr "" #: src/hed/libs/loader/Plugin.cpp:369 src/hed/libs/loader/Plugin.cpp:574 #, fuzzy, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "Kunde inte lokalisera modulen med namn %s (%s)" #: src/hed/libs/loader/Plugin.cpp:375 src/hed/libs/loader/Plugin.cpp:581 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "Kunde inte lokalisera modulen med namn %s (%s)" #: src/hed/libs/loader/Plugin.cpp:381 src/hed/libs/loader/Plugin.cpp:492 #: src/hed/libs/loader/Plugin.cpp:586 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "Modulen %s är inte en ARC-plugin (%s)" #: src/hed/libs/loader/Plugin.cpp:398 src/hed/libs/loader/Plugin.cpp:502 #: src/hed/libs/loader/Plugin.cpp:608 #, fuzzy, c-format msgid "Module %s failed to reload (%s)" msgstr "Modul %s kunde inte laddas" #: src/hed/libs/loader/Plugin.cpp:462 src/hed/libs/loader/Plugin.cpp:475 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:480 #, fuzzy, c-format msgid "Could not find loadable module descriptor by names %s and %s" msgstr "Kunde inte lokalisera modulen med namn %s och %s (%s)" #: src/hed/libs/loader/Plugin.cpp:486 #, fuzzy, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "Kunde inte lokalisera modulen med namn %s och %s (%s)" #: src/hed/libs/message/MCC.cpp:77 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "Ingen säkerhetsprocessering/kontroll begärd för '%s'" #: src/hed/libs/message/MCC.cpp:85 msgid "Security processing/check failed" msgstr "Säkerhetsprocessering/kontroll misslyckades" #: src/hed/libs/message/MCC.cpp:89 msgid "Security processing/check passed" msgstr "Säkerhetsprocessering/kontroll OK" #: src/hed/libs/message/MCCLoader.cpp:16 #, fuzzy msgid "Chain(s) configuration failed" msgstr "Misslyckades med att läsa inställningsfil." #: src/hed/libs/message/MCCLoader.cpp:133 #, fuzzy msgid "SecHandler configuration is not defined" msgstr "Cacheinställningar: %s" #: src/hed/libs/message/MCCLoader.cpp:156 msgid "SecHandler has no configuration" msgstr "Säkerhetshanterare saknar inställningar" #: src/hed/libs/message/MCCLoader.cpp:162 msgid "SecHandler has no name attribute defined" msgstr "Säkerhetshanterare har inget namnattribut definierat" #: src/hed/libs/message/MCCLoader.cpp:172 #, fuzzy, c-format msgid "Security Handler %s(%s) could not be created" msgstr "Service %s(%s) kunde inte skapas" #: src/hed/libs/message/MCCLoader.cpp:176 #, fuzzy, c-format msgid "SecHandler: %s(%s)" msgstr "sechandler namn: %s" #: src/hed/libs/message/MCCLoader.cpp:188 msgid "Component has no name attribute defined" msgstr "Komponent har inget namnattribut definierat" #: src/hed/libs/message/MCCLoader.cpp:193 #, fuzzy msgid "Component has no ID attribute defined" msgstr "Komponent har inget id-attribut definierat" #: src/hed/libs/message/MCCLoader.cpp:202 #, c-format msgid "Component %s(%s) could not be created" msgstr "Komponent %s(%s) kunde inte skapas" #: src/hed/libs/message/MCCLoader.cpp:232 #, fuzzy, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "Komponentens %s(%s) nästa har inget id-attribut definierat" #: src/hed/libs/message/MCCLoader.cpp:287 #, c-format msgid "Loaded MCC %s(%s)" msgstr "Laddade MCC %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:305 #, fuzzy, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "Plexerns %s nästa har inget id-attribut definierat" #: src/hed/libs/message/MCCLoader.cpp:315 #, fuzzy, c-format msgid "Loaded Plexer %s" msgstr "Laddade Plexer %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:323 #, fuzzy msgid "Service has no Name attribute defined" msgstr "Service har inget namnattribut definierat" #: src/hed/libs/message/MCCLoader.cpp:329 #, fuzzy msgid "Service has no ID attribute defined" msgstr "Service har inget id-attribut definierat" #: src/hed/libs/message/MCCLoader.cpp:338 #, c-format msgid "Service %s(%s) could not be created" msgstr "Service %s(%s) kunde inte skapas" #: src/hed/libs/message/MCCLoader.cpp:345 #, c-format msgid "Loaded Service %s(%s)" msgstr "Laddade Service %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:387 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "Länkar MCC %s(%s) till MCC (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:398 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "Länkar MCC %s(%s) till Service (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:407 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "Länkar MCC %s(%s) till Plexer (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:412 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "MCC %s(%s) - nästa %s(%s) saknar mål" #: src/hed/libs/message/MCCLoader.cpp:431 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "Länkar Plexer %s till MCC (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:442 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "Länkar Plexer %s till Service (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:451 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "Länkar Plexer %s till Plexer (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:457 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "Plexer (%s) - nästa %s(%s) saknar mål" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "Dålig etikett: \"%s\"" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "Operation på sökväg \"%s\"" #: src/hed/libs/message/Plexer.cpp:60 #, fuzzy, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "Ingen service på sökväg \"%s\"" #: src/hed/libs/message/Service.cpp:33 #, fuzzy, c-format msgid "Security processing/check for '%s' failed" msgstr "Säkerhetsprocessering/kontroll misslyckades" #: src/hed/libs/message/Service.cpp:37 #, fuzzy, c-format msgid "Security processing/check for '%s' passed" msgstr "Säkerhetsprocessering/kontroll OK" #: src/hed/libs/message/Service.cpp:43 msgid "Empty registration collector" msgstr "Tom registreringsinsamlare" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:136 #, fuzzy, c-format msgid "Can not load ARC evaluator object: %s" msgstr "Kan inte ladda ARC-utvärderingsobjekt: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:187 #, fuzzy, c-format msgid "Can not load ARC request object: %s" msgstr "Kan inte ladda ARC-begäranobjekt: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:228 #, c-format msgid "Can not load policy object: %s" msgstr "Kan inte ladda policyobjekt: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:276 msgid "Can not load policy object" msgstr "Kan inte ladda policyobjekt" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:324 msgid "Can not load request object" msgstr "Kan inte ladda begäranobjekt" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "Kan inte generera policyobjekt" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "Id= %s,Typ= %s,Utfärdare= %s,Värde= %s" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "Inget attribut existerar som kan hantera typen: %s" #: src/hed/mcc/http/MCCHTTP.cpp:157 #, c-format msgid "HTTP Error: %d %s" msgstr "HTTP-fel: %d %s" #: src/hed/mcc/http/MCCHTTP.cpp:222 msgid "Cannot create http payload" msgstr "Kan inte skapa http-nyttolast" #: src/hed/mcc/http/MCCHTTP.cpp:288 msgid "No next element in the chain" msgstr "Inget nästa element i kedjan" #: src/hed/mcc/http/MCCHTTP.cpp:297 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "nästa element i kedjan returnerade felstatus" #: src/hed/mcc/http/MCCHTTP.cpp:306 #, fuzzy msgid "next element of the chain returned no payload" msgstr "nästa element i kedjan returnerade ogiltig nyttolast" #: src/hed/mcc/http/MCCHTTP.cpp:318 #, fuzzy msgid "next element of the chain returned invalid/unsupported payload" msgstr "nästa element i kedjan returnerade ogiltig nyttolast" #: src/hed/mcc/http/MCCHTTP.cpp:393 msgid "Error to flush output payload" msgstr "Fel vid utmatning av utdatanyttolast" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, fuzzy, c-format msgid "<< %s" msgstr "%s: %s" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, fuzzy, c-format msgid "< %s" msgstr "%s: %s" #: src/hed/mcc/http/PayloadHTTP.cpp:576 #, fuzzy msgid "Failed to parse HTTP header" msgstr "Misslyckades med att starta ny tråd" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:950 #, fuzzy, c-format msgid "> %s" msgstr "%s: %s" #: src/hed/mcc/http/PayloadHTTP.cpp:975 #, fuzzy msgid "Failed to write header to output stream" msgstr "Misslyckades med att skriva begäran till en fil" #: src/hed/mcc/http/PayloadHTTP.cpp:1000 src/hed/mcc/http/PayloadHTTP.cpp:1006 #: src/hed/mcc/http/PayloadHTTP.cpp:1012 src/hed/mcc/http/PayloadHTTP.cpp:1022 #: src/hed/mcc/http/PayloadHTTP.cpp:1034 src/hed/mcc/http/PayloadHTTP.cpp:1039 #: src/hed/mcc/http/PayloadHTTP.cpp:1044 src/hed/mcc/http/PayloadHTTP.cpp:1052 #: src/hed/mcc/http/PayloadHTTP.cpp:1059 #, fuzzy msgid "Failed to write body to output stream" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "Hoppar över service: ingen ServicePath funnen!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "Hoppar över service: ingen SchemaPath funnen!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "Skapande av tolkningskontext misslyckades!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "Kan inte tolka schema!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "Tom nyttolast!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "Kunde inte konvertera nyttolast!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "Kunde inte skapa SOAP nyttolast!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "Tom indatanyttolast" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "Kunde inte konvertera inkommande nyttolast!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "Schema saknas! Hoppar över validering..." #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "Kunde inte validera meddelande!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:222 msgid "empty next chain element" msgstr "tomt nästa kedjeelement" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:238 msgid "next element of the chain returned empty payload" msgstr "nästa element i kedjan returnerade tom nyttolast" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "nästa element i kedjan returnerade ogiltig nyttolast" #: src/hed/mcc/soap/MCCSOAP.cpp:192 msgid "empty input payload" msgstr "tom indatanyttolast" #: src/hed/mcc/soap/MCCSOAP.cpp:198 msgid "incoming message is not SOAP" msgstr "inkommande meddelande är inte SOAP" #: src/hed/mcc/soap/MCCSOAP.cpp:215 src/hed/mcc/soap/MCCSOAP.cpp:372 msgid "Security check failed in SOAP MCC for incoming message" msgstr "Säkerhetskontroll misslyckades i SOAP MCC för inkommande meddelande" #: src/hed/mcc/soap/MCCSOAP.cpp:230 #, fuzzy, c-format msgid "next element of the chain returned error status: %s" msgstr "nästa element i kedjan returnerade felstatus" #: src/hed/mcc/soap/MCCSOAP.cpp:249 #, fuzzy msgid "next element of the chain returned unknown payload - passing through" msgstr "nästa element i kedjan returnerade ogiltig nyttolast" #: src/hed/mcc/soap/MCCSOAP.cpp:252 src/hed/mcc/soap/MCCSOAP.cpp:265 #: src/hed/mcc/soap/MCCSOAP.cpp:317 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "Säkerhetskontroll misslyckades i SOAP MCC för utgående meddelande" #: src/hed/mcc/tcp/MCCTCP.cpp:104 src/hed/mcc/tcp/MCCTCP.cpp:636 msgid "Cannot initialize winsock library" msgstr "Kan inte initiera winsockbibliotek" #: src/hed/mcc/tcp/MCCTCP.cpp:119 msgid "Missing Port in Listen element" msgstr "Port saknas i Listen-element" #: src/hed/mcc/tcp/MCCTCP.cpp:128 msgid "Version in Listen element can't be recognized" msgstr "Version i Listen-element kan ej kännas igen" #: src/hed/mcc/tcp/MCCTCP.cpp:137 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "Misslyckades med att erhålla lokal adress för port %s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:139 #, fuzzy, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "Misslyckades med att erhålla lokal adress för port %s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, fuzzy, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "Försöker lyssna på port %s" #: src/hed/mcc/tcp/MCCTCP.cpp:148 #, fuzzy, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "Försöker lyssna på port %s" #: src/hed/mcc/tcp/MCCTCP.cpp:154 #, fuzzy, c-format msgid "Failed to create socket for for listening at TCP port %s(%s): %s" msgstr "Misslyckades med att skapa socket för port %s" #: src/hed/mcc/tcp/MCCTCP.cpp:156 #, fuzzy, c-format msgid "Failed to create socket for for listening at %s:%s(%s): %s" msgstr "Misslyckades med att skapa socket för port %s" #: src/hed/mcc/tcp/MCCTCP.cpp:171 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:173 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:181 #, fuzzy, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "Misslyckades med att binda socket för port %s" #: src/hed/mcc/tcp/MCCTCP.cpp:183 #, fuzzy, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "Misslyckades med att binda socket för port %s" #: src/hed/mcc/tcp/MCCTCP.cpp:198 #, fuzzy, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "Misslyckades med att lyssna på port %s" #: src/hed/mcc/tcp/MCCTCP.cpp:200 #, fuzzy, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "Misslyckades med att lyssna på port %s" #: src/hed/mcc/tcp/MCCTCP.cpp:217 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:219 #, c-format msgid "Listening on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:226 #, fuzzy, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "Misslyckades med att erhålla lokal adress för port %s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:228 #, fuzzy, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "Misslyckades med att erhålla lokal adress för port %s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:234 msgid "No listening ports initiated" msgstr "Inga lyssnande portar initierade" #: src/hed/mcc/tcp/MCCTCP.cpp:245 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:249 msgid "Failed to start thread for listening" msgstr "Misslyckades med att starta tråd för att lyssna" #: src/hed/mcc/tcp/MCCTCP.cpp:285 msgid "Failed to start thread for communication" msgstr "Misslyckades med att starta tråd för kommunikation" #: src/hed/mcc/tcp/MCCTCP.cpp:315 msgid "Failed while waiting for connection request" msgstr "Misslyckades under väntan på förbindelseförfrågan" #: src/hed/mcc/tcp/MCCTCP.cpp:337 msgid "Failed to accept connection request" msgstr "Misslyckades med att acceptera förbindelseförfrågan" #: src/hed/mcc/tcp/MCCTCP.cpp:346 msgid "Too many connections - dropping new one" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:353 msgid "Too many connections - waiting for old to close" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:577 msgid "next chain element called" msgstr "nästa kedjeelement anropat" #: src/hed/mcc/tcp/MCCTCP.cpp:592 msgid "Only Raw Buffer payload is supported for output" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:600 src/hed/mcc/tcp/MCCTCP.cpp:709 #: src/hed/mcc/tls/MCCTLS.cpp:545 msgid "Failed to send content of buffer" msgstr "Misslyckades med att skicka innehåll till buffer" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "TCP executor is removed" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:614 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:642 msgid "No Connect element specified" msgstr "Inget Connect-element specificerat" #: src/hed/mcc/tcp/MCCTCP.cpp:648 msgid "Missing Port in Connect element" msgstr "Port saknas i Connect-element" #: src/hed/mcc/tcp/MCCTCP.cpp:654 msgid "Missing Host in Connect element" msgstr "Värd saknas i Connect-element" #: src/hed/mcc/tcp/MCCTCP.cpp:685 #, fuzzy msgid "TCP client process called" msgstr "klientprocess anropad" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:71 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:87 #, c-format msgid "Failed to resolve %s (%s)" msgstr "Misslyckades med att slå upp %s (%s)" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:97 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "Försöker koppla upp %s(%s):%d" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:101 #, fuzzy, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "Misslyckades med att skapa socket till %s(%s):%d" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:121 #, fuzzy, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "Misslyckades med att koppla upp mot %s(%s):%i" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:131 #, fuzzy, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "Misslyckades med att koppla upp mot %s(%s):%i" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:139 #, fuzzy, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "Misslyckades med att koppla upp mot %s(%s):%i" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:149 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:158 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "Misslyckades med att koppla upp mot %s(%s):%i" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:215 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "Oberoende proxy - inga rättigheter beviljade" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "Proxy med alla rättigheter ärvda" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "Proxy med tom policy - misslyckades p.g.a. oigenkänd policy" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "Proxy med specifik policy: %s" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "Proxy med ARC-policy" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "Proxy med okänd policy - misslyckades p.g.a. oigenkänd policy" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:77 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:128 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "Förväntade %s i början av \"%s\"" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:82 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "Vi stöder endast CA i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:87 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "Vi stöder endast X509-CA i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:92 msgid "Missing CA subject in Globus signing policy" msgstr "Saknat CA-subjekt i Globus signeringspolicy" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:102 msgid "Negative rights are not supported in Globus signing policy" msgstr "Negativa rättigheter stöds inte i Globus signeringspolicy" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:106 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "Okända rättigheter i Globus signeringspolicy - %s" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:111 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" "Endast globusrättigheter stöds i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" "Endast signeringsrättigheter stöds i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:133 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" "Vi stöder endast subjektvillkor i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:138 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" "Vi stöder endast globusvillkor i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:144 msgid "Missing condition subjects in Globus signing policy" msgstr "Saknade villkorssubjekt i Globus signeringspolicy" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:220 msgid "Unknown element in Globus signing policy" msgstr "Okänt element i Globus sugneringspolicy" #: src/hed/mcc/tls/MCCTLS.cpp:222 #, fuzzy msgid "Critical VOMS attribute processing failed" msgstr "Säkerhetshanterarprocessering misslyckades" #: src/hed/mcc/tls/MCCTLS.cpp:230 #, fuzzy msgid "VOMS attribute validation failed" msgstr "Certifikatverifiering misslyckades" #: src/hed/mcc/tls/MCCTLS.cpp:232 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:424 #, fuzzy, c-format msgid "Failed to establish connection: %s" msgstr "Misslyckades med att etablera SSL-förbindelse" #: src/hed/mcc/tls/MCCTLS.cpp:442 src/hed/mcc/tls/MCCTLS.cpp:524 #, c-format msgid "Peer name: %s" msgstr "Peer-namn: %s" #: src/hed/mcc/tls/MCCTLS.cpp:444 src/hed/mcc/tls/MCCTLS.cpp:526 #, c-format msgid "Identity name: %s" msgstr "Identitetsnamn: %s" #: src/hed/mcc/tls/MCCTLS.cpp:446 src/hed/mcc/tls/MCCTLS.cpp:528 #, fuzzy, c-format msgid "CA name: %s" msgstr " Namn: %s" #: src/hed/mcc/tls/MCCTLS.cpp:452 #, fuzzy msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "Säkerhetskontroll misslyckades i TLS MCC för inkommande meddelande" #: src/hed/mcc/tls/MCCTLS.cpp:461 msgid "Security check failed in TLS MCC for incoming message" msgstr "Säkerhetskontroll misslyckades i TLS MCC för inkommande meddelande" #: src/hed/mcc/tls/MCCTLS.cpp:534 #, fuzzy msgid "Security check failed for outgoing TLS message" msgstr "Säkerhetskontroll misslyckades i TLS MCC för utgående meddelande" #: src/hed/mcc/tls/MCCTLS.cpp:563 #, fuzzy msgid "Security check failed for incoming TLS message" msgstr "Säkerhetskontroll misslyckades i TLS MCC för inkommande meddelande" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:93 #, fuzzy msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "Misslyckades med att signera proxycertifikatet" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:98 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:128 #, fuzzy, c-format msgid "Certificate %s already expired" msgstr "Certifikat kan inte extraheras" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:136 #, fuzzy, c-format msgid "Certificate %s will expire in %s" msgstr "Certifikat är ingen fil: %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:157 #, fuzzy msgid "Failed to store application data" msgstr "Misslyckades med att lagra tillämpningsdata i OpenSSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:185 msgid "Failed to retrieve application data from OpenSSL" msgstr "Misslyckades med att inhämta tillämpningsdata från OpenSSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:257 src/hed/mcc/tls/PayloadTLSMCC.cpp:351 msgid "Can not create the SSL Context object" msgstr "Kan inte skapa SSL-kontextobjekt" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:369 msgid "Can't set OpenSSL verify flags" msgstr "Kan inte ange OpenSSL verifikationsflaggor" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:289 src/hed/mcc/tls/PayloadTLSMCC.cpp:383 #, fuzzy msgid "Can not create the SSL object" msgstr "Kan inte skapa SSL-kontextobjekt" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:308 msgid "Failed to establish SSL connection" msgstr "Misslyckades med att etablera SSL-förbindelse" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:311 src/hed/mcc/tls/PayloadTLSMCC.cpp:398 #, fuzzy, c-format msgid "Using cipher: %s" msgstr "Använder nyckelfil: %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:395 msgid "Failed to accept SSL connection" msgstr "Misslyckades med att acceptera SSL-förbindelse" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:448 #, fuzzy, c-format msgid "Failed to shut down SSL: %s" msgstr "Misslyckades med att stänga av SSL" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" "ArcAuthZ: misslyckades med att initiera alla PDP:er - denna instans kommer " "inte att fungera" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 #, fuzzy msgid "PDP: missing name attribute" msgstr "PDP: %s namn är duplicerat" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, fuzzy, c-format msgid "PDP: %s (%s)" msgstr "PDP: %s (%d)" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, fuzzy, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "PDP: %s kan inte laddas" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, fuzzy, c-format msgid "There are %d RequestItems" msgstr "Det finns %d RequestItem" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "Kan inte tolka klassnamn för FunctionFactory från konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "Kan inte tolka klassnamn för AttributeFactory från konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" "Kan inte tolka klassnamn för CombiningAlgorithmFactory från konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "Kan inte tolka klassnamn för Request från konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "Kan inte tolka klassnamn för Policy från konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "Kan inte skapa AttributeFactory dynamiskt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "Kan inte skapa FnFactory dynamiskt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "Kan inte skapa AlgFactory dynamiskt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 msgid "Can not create PolicyStore object" msgstr "Kan inte skapa PolicyStore-objekt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 #: src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 msgid "Can not dynamically produce Request" msgstr "Kan inte skapa Request dynamiskt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "Resultatvärde (0=Tillåt, 1=Vägra, 2=Obestämd, 3=Ej applicerbar): %d" #: src/hed/shc/arcpdp/ArcPDP.cpp:110 #, fuzzy msgid "Can not find ArcPDPContext" msgstr "Kan inte hitta MCC-status-objekt" #: src/hed/shc/arcpdp/ArcPDP.cpp:139 src/hed/shc/xacmlpdp/XACMLPDP.cpp:117 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "Utvärderare stöder inte laddningsbara kombinerande algoritmer" #: src/hed/shc/arcpdp/ArcPDP.cpp:143 src/hed/shc/xacmlpdp/XACMLPDP.cpp:121 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "Utvärderare stöder inte den specificerade kombinerande algoritmen - %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:155 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:84 #: src/hed/shc/gaclpdp/GACLPDP.cpp:118 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:133 msgid "Can not dynamically produce Evaluator" msgstr "Kan inte skapa Evaluator dynamiskt" #: src/hed/shc/arcpdp/ArcPDP.cpp:158 msgid "Evaluator for ArcPDP was not loaded" msgstr "Utvärderare för ArcPDP laddedes inte" #: src/hed/shc/arcpdp/ArcPDP.cpp:165 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:57 #: src/hed/shc/gaclpdp/GACLPDP.cpp:128 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:87 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:143 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "Säkerhetsobjekt saknas i meddelande" #: src/hed/shc/arcpdp/ArcPDP.cpp:173 src/hed/shc/arcpdp/ArcPDP.cpp:181 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:137 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:143 #: src/hed/shc/gaclpdp/GACLPDP.cpp:136 src/hed/shc/gaclpdp/GACLPDP.cpp:144 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:95 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:103 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "Misslyckades med att konvertera säkerhetsinformation till ARC-begäran" #: src/hed/shc/arcpdp/ArcPDP.cpp:189 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:150 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:111 #, c-format msgid "ARC Auth. request: %s" msgstr "ARC-auktoriseringsförfrågan: %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:192 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:153 #: src/hed/shc/gaclpdp/GACLPDP.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:170 msgid "No requested security information was collected" msgstr "Ingen begärd säkerhetsinformation samlades in" #: src/hed/shc/arcpdp/ArcPDP.cpp:199 msgid "Not authorized from arc.pdp - failed to get reponse from Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 #, fuzzy msgid "Authorized by arc.pdp" msgstr "Auktoriserad från arc.pdp" #: src/hed/shc/arcpdp/ArcPDP.cpp:246 #, fuzzy msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "Ej auktoriserad från arc.pdp; Några RequestItem uppfyller inte Policy" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "Policy är tom" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "PolicyId: %s Alg inuti denna policy är:-- %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:75 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:129 msgid "No delegation policies in this context and message - passing through" msgstr "" "Inga delegeringspolicyer i denna kontext och meddelande - passerar igenom" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:95 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:109 msgid "Failed to convert security information to ARC policy" msgstr "Misslyckades med att konvertera säkerhetsinformation till ARC-policy" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:116 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:123 #, c-format msgid "ARC delegation policy: %s" msgstr "ARC delegeringspolicy: %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:161 msgid "No authorization response was returned" msgstr "Inget auktoriseringssvar returnerades" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:164 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "Det finns %d begärningar som uppfyller åtminstone en policy" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:183 msgid "Delegation authorization passed" msgstr "Delegeringsauktorisering lyckades" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:185 msgid "Delegation authorization failed" msgstr "Delegeringsauktorisering misslyckades" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 #, fuzzy msgid "" "Missing or empty KeyPath element, or is missing" msgstr "KeyPath-element saknas eller är tomt" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 #, fuzzy msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "CertificatePath-element saknas eller är tomt" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, fuzzy, c-format msgid "Delegation role not supported: %s" msgstr "SOAP-process understöds inte: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, fuzzy, c-format msgid "Delegation type not supported: %s" msgstr "Processeringstyp understöds inte: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 #, fuzzy msgid "Failed to acquire delegation context" msgstr "Misslyckades med att acceptera delegering" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 #, fuzzy msgid "Can't create delegation context" msgstr "Skapar delegering" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 msgid "Delegation handler with delegatee role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:554 #: src/services/cache_service/CacheService.cpp:529 #: src/services/data-staging/DataDeliveryService.cpp:630 msgid "process: POST" msgstr "process: POST" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:561 #: src/services/cache_service/CacheService.cpp:538 #: src/services/data-staging/DataDeliveryService.cpp:639 #: src/services/wrappers/java/javawrapper.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:413 msgid "input is not SOAP" msgstr "indata är inte SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, fuzzy, c-format msgid "Delegation service: %s" msgstr "Avvisar service: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #, fuzzy, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, fuzzy, c-format msgid "Delegated credential identity: %s" msgstr "Misslyckades med att förstöra kreditivhandtag: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delgation service should be configured" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 #, fuzzy msgid "Delegation handler with delegatee role ends" msgstr "Användarnamnstokenhanteraren har ej ställts in" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 msgid "Delegation handler with delegator role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, c-format msgid "The delegated credential got from path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, fuzzy, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 #: src/services/wrappers/java/javawrapper.cpp:144 msgid "output is not SOAP" msgstr "utdata är inte SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:220 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 msgid "Incoming Message is not SOAP" msgstr "Inkommande meddelande är inte SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:341 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "Utgående meddelande är inte SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 #, fuzzy msgid "Delegation handler is not configured" msgstr "Användarnamnstokenhanteraren har ej ställts in" #: src/hed/shc/gaclpdp/GACLPDP.cpp:121 msgid "Evaluator for GACLPDP was not loaded" msgstr "Utvärderare för GACLPDP laddedes inte" #: src/hed/shc/gaclpdp/GACLPDP.cpp:152 #, c-format msgid "GACL Auth. request: %s" msgstr "GACL-auktoriseringsförfrågan: %s" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 msgid "Policy is not gacl" msgstr "Policy är inte gacl" #: src/hed/shc/legacy/ConfigParser.cpp:13 #, fuzzy msgid "Configuration file not specified" msgstr "Cacheinställningar: %s" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:53 #: src/hed/shc/legacy/ConfigParser.cpp:58 #, fuzzy msgid "Configuration file can not be read" msgstr "Jobstatus kunde inte inhämtas" #: src/hed/shc/legacy/ConfigParser.cpp:68 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:31 src/hed/shc/legacy/LegacyPDP.cpp:96 #, fuzzy msgid "Configuration file not specified in ConfigBlock" msgstr "Inställningarnas rotobjekt är inte ArcConfig" #: src/hed/shc/legacy/LegacyMap.cpp:40 src/hed/shc/legacy/LegacyPDP.cpp:105 #, fuzzy msgid "BlockName is empty" msgstr "Policy är tom" #: src/hed/shc/legacy/LegacyMap.cpp:99 #, fuzzy, c-format msgid "Failed processing user mapping command: unixmap %s" msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/hed/shc/legacy/LegacyMap.cpp:106 #, fuzzy, c-format msgid "Failed processing user mapping command: unixgroup %s" msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/hed/shc/legacy/LegacyMap.cpp:113 #, fuzzy, c-format msgid "Failed processing user mapping command: unixvo %s" msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/hed/shc/legacy/LegacyMap.cpp:173 #, fuzzy msgid "LegacyMap: no configurations blocks defined" msgstr "Cacheinställningar: %s" #: src/hed/shc/legacy/LegacyMap.cpp:195 src/hed/shc/legacy/LegacyPDP.cpp:210 msgid "" "LegacyPDP: there is no ARCLEGACY Sec Attribute defined. Probably ARC Legacy " "Sec Handler is not configured or failed." msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:200 src/hed/shc/legacy/LegacyPDP.cpp:215 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:115 #, fuzzy, c-format msgid "Failed to parse configuration file %s" msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/hed/shc/legacy/LegacyPDP.cpp:121 #, fuzzy, c-format msgid "Block %s not found in configuration file %s" msgstr "Läser inställningsfil: %s" #: src/hed/shc/legacy/LegacySecHandler.cpp:36 #: src/hed/shc/legacy/LegacySecHandler.cpp:110 #, fuzzy msgid "LegacySecHandler: configuration file not specified" msgstr "Cacheinställningar: %s" #: src/hed/shc/legacy/arc_lcas.cpp:146 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, fuzzy, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" "Misslyckades med att konvertera GSI-kreditiv till GSS-kreditiv (major: %d, " "minor: %d)" #: src/hed/shc/legacy/arc_lcas.cpp:172 src/hed/shc/legacy/arc_lcmaps.cpp:189 #, fuzzy msgid "Missing subject name" msgstr "Säkerhetsobjekt saknas i meddelande" #: src/hed/shc/legacy/arc_lcas.cpp:177 src/hed/shc/legacy/arc_lcmaps.cpp:194 #, fuzzy msgid "Missing path of credentials file" msgstr "Jobbidentifierare saknas" #: src/hed/shc/legacy/arc_lcas.cpp:182 msgid "Missing name of LCAS library" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:199 #, fuzzy, c-format msgid "Can't load LCAS library %s: %s" msgstr "Kan inte ladda policyobjekt: %s" #: src/hed/shc/legacy/arc_lcas.cpp:209 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:219 #, fuzzy msgid "Failed to initialize LCAS" msgstr "Misslyckades med att initiera cache" #: src/hed/shc/legacy/arc_lcas.cpp:234 #, fuzzy msgid "Failed to terminate LCAS" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 #, fuzzy msgid "Can't read policy names" msgstr "Kan inte ladda policyobjekt" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 #, fuzzy msgid "Failed to initialize LCMAPS" msgstr "Misslyckades med att initiera cache" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 #, fuzzy msgid "LCMAPS did not return any GID" msgstr "Kunde inte inhämta jobbinformation" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:308 #, fuzzy msgid "LCMAPS did not return any UID" msgstr "Kunde inte inhämta jobbinformation" #: src/hed/shc/legacy/arc_lcmaps.cpp:318 #, fuzzy msgid "Failed to terminate LCMAPS" msgstr "Misslyckades med att läsa nyckelfil: %s" #: src/hed/shc/legacy/auth.cpp:293 #, fuzzy, c-format msgid "Credentials stored in temporary file %s" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/hed/shc/legacy/auth.cpp:302 #, fuzzy, c-format msgid "Assigned to authorization group %s" msgstr "Delegeringsauktorisering lyckades" #: src/hed/shc/legacy/auth.cpp:307 #, c-format msgid "Assigned to VO %s" msgstr "" #: src/hed/shc/legacy/auth_file.cpp:24 #: src/services/gridftpd/auth/auth_file.cpp:24 #, fuzzy, c-format msgid "Failed to read file %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/hed/shc/legacy/auth_ldap.cpp:22 #, fuzzy msgid "LDAP authorization is not supported anymore" msgstr "SOAP-process understöds inte: %s" #: src/hed/shc/legacy/auth_plugin.cpp:44 src/hed/shc/legacy/unixmap.cpp:260 #: src/services/gridftpd/auth/auth_plugin.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:251 #, fuzzy, c-format msgid "Plugin %s returned: %u" msgstr "Svar: %s" #: src/hed/shc/legacy/auth_plugin.cpp:48 src/hed/shc/legacy/unixmap.cpp:264 #, fuzzy, c-format msgid "Plugin %s timeout after %u seconds" msgstr "Connect: Uppkoppling avbröts after %d ms" #: src/hed/shc/legacy/auth_plugin.cpp:51 src/hed/shc/legacy/unixmap.cpp:267 #, fuzzy, c-format msgid "Plugin %s failed to start" msgstr "%s: Plugin misslyckades" #: src/hed/shc/legacy/auth_plugin.cpp:53 src/hed/shc/legacy/unixmap.cpp:269 #, fuzzy, c-format msgid "Plugin %s printed: %s" msgstr "Svar: %s" #: src/hed/shc/legacy/auth_plugin.cpp:54 src/hed/shc/legacy/unixmap.cpp:270 #, fuzzy, c-format msgid "Plugin %s error: %s" msgstr "Globusfel: %s" #: src/hed/shc/legacy/auth_voms.cpp:39 #: src/services/gridftpd/auth/auth_voms.cpp:45 #, fuzzy msgid "Missing VO in configuration" msgstr "Cacherot saknas i inställningar" #: src/hed/shc/legacy/auth_voms.cpp:44 #: src/services/gridftpd/auth/auth_voms.cpp:51 #, fuzzy msgid "Missing group in configuration" msgstr "Cacherot saknas i inställningar" #: src/hed/shc/legacy/auth_voms.cpp:49 #: src/services/gridftpd/auth/auth_voms.cpp:57 #, fuzzy msgid "Missing role in configuration" msgstr "Cacherot saknas i inställningar" #: src/hed/shc/legacy/auth_voms.cpp:54 #: src/services/gridftpd/auth/auth_voms.cpp:63 #, fuzzy msgid "Missing capabilities in configuration" msgstr "Cacherot saknas i inställningar" #: src/hed/shc/legacy/auth_voms.cpp:58 #: src/services/gridftpd/auth/auth_voms.cpp:67 #, fuzzy, c-format msgid "Rule: vo: %s" msgstr "Omschemalagt jobb %s" #: src/hed/shc/legacy/auth_voms.cpp:59 #: src/services/gridftpd/auth/auth_voms.cpp:68 #, fuzzy, c-format msgid "Rule: group: %s" msgstr "Ingen sådan grupp: %s" #: src/hed/shc/legacy/auth_voms.cpp:60 #: src/services/gridftpd/auth/auth_voms.cpp:69 #, fuzzy, c-format msgid "Rule: role: %s" msgstr " Ort: %s" #: src/hed/shc/legacy/auth_voms.cpp:61 #: src/services/gridftpd/auth/auth_voms.cpp:70 #, fuzzy, c-format msgid "Rule: capabilities: %s" msgstr "Förfrågan misslyckades" #: src/hed/shc/legacy/auth_voms.cpp:64 #: src/services/gridftpd/auth/auth_voms.cpp:77 #, fuzzy, c-format msgid "Match vo: %s" msgstr "cacherot: %s" #: src/hed/shc/legacy/auth_voms.cpp:71 #, fuzzy, c-format msgid "Matched: %s %s %s %s" msgstr "check: %s (%s - %s > %s (%s))" #: src/hed/shc/legacy/auth_voms.cpp:86 #: src/services/gridftpd/auth/auth_voms.cpp:98 msgid "Matched nothing" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:71 src/hed/shc/legacy/simplemap.cpp:76 #: src/services/gridftpd/auth/simplemap.cpp:63 #: src/services/gridftpd/auth/simplemap.cpp:68 #, fuzzy, c-format msgid "SimpleMap: %s" msgstr "uppstädning %s" #: src/hed/shc/legacy/unixmap.cpp:49 src/hed/shc/legacy/unixmap.cpp:54 #: src/hed/shc/legacy/unixmap.cpp:98 src/hed/shc/legacy/unixmap.cpp:103 #: src/hed/shc/legacy/unixmap.cpp:147 src/hed/shc/legacy/unixmap.cpp:152 #: src/services/gridftpd/auth/unixmap.cpp:47 #: src/services/gridftpd/auth/unixmap.cpp:52 #: src/services/gridftpd/auth/unixmap.cpp:96 #: src/services/gridftpd/auth/unixmap.cpp:101 #: src/services/gridftpd/auth/unixmap.cpp:145 #: src/services/gridftpd/auth/unixmap.cpp:150 #, fuzzy msgid "User name mapping command is empty" msgstr "mail är tom" #: src/hed/shc/legacy/unixmap.cpp:61 src/services/gridftpd/auth/unixmap.cpp:59 #, c-format msgid "User name mapping has empty group: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:72 src/hed/shc/legacy/unixmap.cpp:121 #: src/hed/shc/legacy/unixmap.cpp:169 #: src/services/gridftpd/auth/unixmap.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:119 #: src/services/gridftpd/auth/unixmap.cpp:167 #, c-format msgid "User name mapping has empty command: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:110 #: src/services/gridftpd/auth/unixmap.cpp:108 #, c-format msgid "User name mapping has empty VO: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:159 #: src/services/gridftpd/auth/unixmap.cpp:157 #, c-format msgid "User name mapping has empty name: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:208 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:229 src/hed/shc/legacy/unixmap.cpp:235 #: src/services/gridftpd/auth/unixmap.cpp:212 #: src/services/gridftpd/auth/unixmap.cpp:217 #: src/services/gridftpd/auth/unixmap.cpp:233 msgid "Plugin (user mapping) command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:219 #: src/services/gridftpd/auth/unixmap.cpp:223 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:223 #: src/services/gridftpd/auth/unixmap.cpp:227 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:257 #: src/services/gridftpd/auth/unixmap.cpp:248 #, fuzzy, c-format msgid "Plugin %s returned too much: %s" msgstr "Svar: %s" #: src/hed/shc/legacy/unixmap.cpp:278 msgid "User subject match is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:282 #: src/services/gridftpd/auth/unixmap.cpp:266 #, fuzzy, c-format msgid "Mapfile at %s can't be opened." msgstr "URL:en är inte giltig: %s" #: src/hed/shc/legacy/unixmap.cpp:305 #: src/services/gridftpd/auth/unixmap.cpp:290 msgid "User pool call is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:310 #: src/services/gridftpd/auth/unixmap.cpp:295 #, c-format msgid "User pool at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:315 #: src/services/gridftpd/auth/unixmap.cpp:300 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:332 #: src/services/gridftpd/auth/unixmap.cpp:317 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:48 msgid "Creating a pdpservice client" msgstr "Skapar en pdpservice-klient" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:80 msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:152 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:185 msgid "Policy Decision Service invocation failed" msgstr "Policy Decision Service-anrop misslyckades" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:170 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:204 msgid "Authorized from remote pdp service" msgstr "Auktoriserad från fjärr-pdp-service" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Unauthorized from remote pdp service" msgstr "Oauktoriserad från fjärr-pdp-service" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 #, fuzzy msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "Kan inte erhålla tillägg från utfärdarcertifikat" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:152 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "CertificatePath-element saknas eller är tomt" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:157 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "KeyPath-element saknas eller är tomt" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 #, fuzzy msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "CertificatePath-element saknas eller är tomt" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:175 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" "CertificatePath- eller CACertificatesDir-element saknas eller är tomt; " "kommer endast att kontrollera signatrur, kommer ej att göra " "meddelandeautenticering" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:179 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, c-format msgid "Processing type not supported: %s" msgstr "Processeringstyp understöds inte: %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:199 #, fuzzy msgid "Failed to parse SAML Token from incoming SOAP" msgstr "Misslyckades med att tolka X509-token från inkommande SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 #, fuzzy msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "Misslyckades med att autenticera X509-token inuti inkommande SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:212 #, fuzzy msgid "Succeeded to authenticate SAMLToken" msgstr "Lyckades med att autenticera X509-token" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:275 #, fuzzy, c-format msgid "No response from AA service %s" msgstr "Inget jobb-id har mottagits" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:279 #, fuzzy, c-format msgid "SOAP Request to AA service %s failed" msgstr "WSRF-förfrågan misslyckades" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 msgid "Cannot find content under response soap message" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 msgid "Cannot find under response soap message:" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:308 msgid "The Response is not going to this end" msgstr "Svaret kommer inte till denna ände" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "The StatusCode is Success" msgstr "Statuskoden är Success" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:335 #, fuzzy msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "Misslyckades med att skapa X509-token för utgående SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:345 #, fuzzy msgid "SAML Token handler is not configured" msgstr "X509-tokenhanteraren har ej ställts in" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:29 #, c-format msgid "Access list location: %s" msgstr "Tillträdeslista: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:39 #, fuzzy msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" "Ingen policyfil angiven för simplelist.pdp, vänligen ange platsattribut för " "simplelist PDP nod i serviceinställnngarna" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:42 #, fuzzy, c-format msgid "Subject to match: %s" msgstr "subjekt: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:45 #, fuzzy, c-format msgid "Policy subject: %s" msgstr "subjekt: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:47 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:73 #, fuzzy, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "Auktoriserad från simplelist.pdp" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:54 msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" "Policyfilen angiven för simplelist.pdp existerar inte, vänligen kontrollera " "platsattribute för simplelist PDP nod i serviceinställnngarna" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:61 #, fuzzy, c-format msgid "Policy line: %s" msgstr "policyrad: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:79 #, fuzzy, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "Ej auktoriserad från simplelist.pdp" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "Påbörja test" #: src/hed/shc/test.cpp:101 msgid "Input request from a file: Request.xml" msgstr "Mata in begäran från en fil: Request.xml" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "Det finns %d subjekt som uppfyller åtminstone en policy" #: src/hed/shc/test.cpp:121 #, c-format msgid "Attribute Value (1): %s" msgstr "Attributvärde (1): %s" #: src/hed/shc/test.cpp:132 msgid "Input request from code" msgstr "Mata in begäran från kod" #: src/hed/shc/test.cpp:211 #, c-format msgid "Attribute Value (2): %s" msgstr "Attributvärde (2): %s" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 #, fuzzy msgid "Can not dynamically produce Policy" msgstr "Kan inte skapa AlgFactory dynamiskt" #: src/hed/shc/testinterface_arc.cpp:138 #, c-format msgid "Attribute Value inside Subject: %s" msgstr "Attributvärde inuti Subject: %s" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "Begäran har passerat policyutvärderingen" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "PasswordSource-element saknas eller är tomt" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "Lösenordskodningstyp ej understödd: %s" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "Username-element saknas eller är tomt" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 #, fuzzy msgid "The payload of incoming message is empty" msgstr "inkommande meddelande är inte SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 #, fuzzy msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "Misslyckades med att tolka X509-token från inkommande SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "Misslyckades med att tolka användarnamnstoken från inkommande SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" "Misslyckades med att autentisera användarnamnstoken inuti inkommande SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "Lyckades med att autentisera användarnamnstoken" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "Misslyckades med att skapa användarnamnstoken för utgående SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "Användarnamnstokenhanteraren har ej ställts in" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "Misslyckades med att tolka X509-token från inkommande SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "Misslyckades med att verifiera X509-token inuti inkommande SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "Misslyckades med att autenticera X509-token inuti inkommande SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "Lyckades med att autenticera X509-token" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "Misslyckades med att skapa X509-token för utgående SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "X509-tokenhanteraren har ej ställts in" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, fuzzy, c-format msgid "Can not create function %s" msgstr "Kan inte skapa SOAP-klient" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:88 #, fuzzy msgid "Can not find XACMLPDPContext" msgstr "Kan inte hitta MCC-status-objekt" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:136 #, fuzzy msgid "Evaluator for XACMLPDP was not loaded" msgstr "Utvärderare för GACLPDP laddedes inte" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:151 src/hed/shc/xacmlpdp/XACMLPDP.cpp:159 #, fuzzy msgid "Failed to convert security information to XACML request" msgstr "Misslyckades med att konvertera säkerhetsinformation till ARC-begäran" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:167 #, fuzzy, c-format msgid "XACML request: %s" msgstr "GACL-auktoriseringsförfrågan: %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 #, fuzzy msgid "Authorized from xacml.pdp" msgstr "Auktoriserad från arc.pdp" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:180 #, fuzzy msgid "UnAuthorized from xacml.pdp" msgstr "Auktoriserad från arc.pdp" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "Inget mål tillgängligt inuti policyn" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 #, fuzzy msgid "Request is empty" msgstr "Förfrågan-nod är tom" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "Ogiltig effekt" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "Inget mål tillgängligt inuti regeln" #: src/libs/data-staging/DTR.cpp:86 src/libs/data-staging/DTR.cpp:90 #, fuzzy, c-format msgid "Could not handle endpoint %s" msgstr "Kan inte läsa certifikatfil: %s" #: src/libs/data-staging/DTR.cpp:100 #, fuzzy msgid "Source is the same as destination" msgstr "källa destination" #: src/libs/data-staging/DTR.cpp:174 #, fuzzy, c-format msgid "Invalid ID: %s" msgstr "Ogiltig URL: %s" #: src/libs/data-staging/DTR.cpp:211 #, fuzzy, c-format msgid "%s->%s" msgstr "%s -> %s" #: src/libs/data-staging/DTR.cpp:330 #, c-format msgid "No callback for %s defined" msgstr "" #: src/libs/data-staging/DTR.cpp:345 #, c-format msgid "NULL callback for %s" msgstr "" #: src/libs/data-staging/DTR.cpp:348 #, c-format msgid "Request to push to unknown owner - %u" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 #, fuzzy msgid "Received invalid DTR" msgstr "Misslyckades med att starta jobb" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:68 #, fuzzy msgid "Received no DTR" msgstr "Misslyckades med att starta jobb" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:147 #, fuzzy, c-format msgid "Cleaning up after failure: deleting %s" msgstr "Rensar alla filer i katalogerna %s och %s" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:137 #, fuzzy, c-format msgid "Bad checksum format %s" msgstr "Ogiltigt ISO-tidsperiodsformat: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:210 #, fuzzy, c-format msgid "DataDelivery: %s" msgstr "Fatalt fel: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:222 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:241 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:67 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:315 #, fuzzy, c-format msgid "Connecting to Delivery service at %s" msgstr "Connect: Misslyckades med att läsa proxyfil: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:94 #, fuzzy, c-format msgid "Failed to set up credential delegation with %s" msgstr "Misslyckades med att initiera delegering" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:106 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:332 #, fuzzy, c-format msgid "Could not connect to service %s: %s" msgstr "Misslyckades med att koppla upp mot %s:%d" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:114 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:340 #, fuzzy, c-format msgid "No SOAP response from Delivery service %s" msgstr "Inget jobb-id har mottagits" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:128 #, fuzzy, c-format msgid "Failed to start transfer request: %s" msgstr "Misslyckades med att påbörja läsning från källa: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, fuzzy, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "Felaktigt format i fil %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:143 #, fuzzy, c-format msgid "Could not make new transfer request: %s: %s" msgstr "Kan inte ladda ARC-begäranobjekt: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:148 #, c-format msgid "Started remote Delivery at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:181 #, fuzzy, c-format msgid "Failed to send cancel request: %s" msgstr "Misslyckades med att starta ny tråd" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:188 #, fuzzy msgid "Failed to cancel: No SOAP response" msgstr "Misslyckades med att skapa indata-SOAP-behållare" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:202 #, fuzzy, c-format msgid "Failed to cancel transfer request: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:209 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:290 #, fuzzy, c-format msgid "Bad format in XML response: %s" msgstr "Felaktigt format i fil %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:216 #, fuzzy, c-format msgid "Failed to cancel: %s" msgstr "Misslyckades med att avbryta jobb" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:260 #, fuzzy msgid "No SOAP response from delivery service" msgstr "Inget jobb-id har mottagits" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:281 #, fuzzy, c-format msgid "Failed to query state: %s" msgstr "Misslyckades med autentisering" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:355 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:363 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:371 #, fuzzy, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "Söker förvalda services" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:379 #, fuzzy, c-format msgid "Dir %s allowed at service %s" msgstr "Avvisar service: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:473 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:486 #, fuzzy msgid "Failed locating credentials" msgstr "Misslyckades med att lista metafiler" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:491 #, fuzzy msgid "Failed to initiate client connection" msgstr "Misslyckades med att initiera delegering" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:497 #, fuzzy msgid "Client connection has no entry point" msgstr "Klientkedjan har ingen ingångspunkt" #: src/libs/data-staging/DataStagingDelivery.cpp:134 #, fuzzy msgid "Unexpected arguments" msgstr "Kan inte skapa inställningsargument" #: src/libs/data-staging/DataStagingDelivery.cpp:137 #, fuzzy msgid "Source URL missing" msgstr "Peer-namn: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:140 #, fuzzy msgid "Destination URL missing" msgstr "Destinaltion: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:144 #, fuzzy, c-format msgid "Source URL not valid: %s" msgstr "URL:en är inte giltig: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:148 #, fuzzy, c-format msgid "Destination URL not valid: %s" msgstr "URL:en är inte giltig: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:205 #, fuzzy, c-format msgid "Unknown transfer option: %s" msgstr " attribut:" #: src/libs/data-staging/DataStagingDelivery.cpp:230 #, fuzzy, c-format msgid "Source URL not supported: %s" msgstr "Processeringstyp understöds inte: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #: src/libs/data-staging/DataStagingDelivery.cpp:254 msgid "No credentials supplied" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:249 #, fuzzy, c-format msgid "Destination URL not supported: %s" msgstr "SOAP-process understöds inte: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:298 #, fuzzy, c-format msgid "Will calculate %s checksum" msgstr "meta_get_data: checksum: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:309 msgid "Cannot use supplied --size option" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:458 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:468 #, fuzzy, c-format msgid "Failed cleaning up destination %s" msgstr "Misslyckades under skrivning till destination" #: src/libs/data-staging/Processor.cpp:60 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:418 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:435 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:331 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:348 #: src/services/cache_service/CacheService.cpp:114 #, fuzzy msgid "Error creating cache" msgstr "Fel vid listning av replikor: %s" #: src/libs/data-staging/Processor.cpp:86 #, c-format msgid "Forcing re-download of file %s" msgstr "Framtvingar åternedläsning av fil %s" #: src/libs/data-staging/Processor.cpp:103 #, c-format msgid "Will wait around %is" msgstr "" #: src/libs/data-staging/Processor.cpp:123 #, fuzzy, c-format msgid "Force-checking source of cache file %s" msgstr "Tillträdesfel för cachefil %s: %s" #: src/libs/data-staging/Processor.cpp:126 #, fuzzy, c-format msgid "Source check requested but failed: %s" msgstr "Källan skapades: %s" #: src/libs/data-staging/Processor.cpp:146 msgid "Permission checking failed, will try downloading without using cache" msgstr "" #: src/libs/data-staging/Processor.cpp:177 #, fuzzy, c-format msgid "Will download to cache file %s" msgstr "Filnedladdning misslyckades: %s" #: src/libs/data-staging/Processor.cpp:199 msgid "Looking up source replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:217 #: src/libs/data-staging/Processor.cpp:329 #, fuzzy, c-format msgid "Skipping replica on local host %s" msgstr "Tillträdeslista: %s" #: src/libs/data-staging/Processor.cpp:225 #: src/libs/data-staging/Processor.cpp:337 #, fuzzy, c-format msgid "No locations left for %s" msgstr "Inga platser funna för destination" #: src/libs/data-staging/Processor.cpp:248 #: src/libs/data-staging/Processor.cpp:496 #, fuzzy msgid "Resolving destination replicas" msgstr "Problem med att slå upp destination" #: src/libs/data-staging/Processor.cpp:266 #, fuzzy msgid "No locations for destination different from source found" msgstr "Inga platser för destinationen som skiljer sig från källan funna: %s" #: src/libs/data-staging/Processor.cpp:278 #, fuzzy msgid "Pre-registering destination in index service" msgstr "Skapar och sänder en servicestatusförfrågan" #: src/libs/data-staging/Processor.cpp:305 msgid "Resolving source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:319 #, fuzzy, c-format msgid "No replicas found for %s" msgstr "Inga platser funna för destination" #: src/libs/data-staging/Processor.cpp:361 #, fuzzy, c-format msgid "Checking %s" msgstr "checkpoint: %s" #: src/libs/data-staging/Processor.cpp:370 #: src/libs/data-staging/Processor.cpp:429 #, fuzzy msgid "Metadata of replica and index service differ" msgstr "Källans och destinationens metadata är olika" #: src/libs/data-staging/Processor.cpp:378 #, fuzzy, c-format msgid "Failed checking source replica %s: %s" msgstr "Misslyckades med att koppla upp mot %s:%d" #: src/libs/data-staging/Processor.cpp:405 msgid "Querying source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:417 #, fuzzy, c-format msgid "Failed checking source replica: %s" msgstr "Misslyckades med att städa upp efter jobb %s" #: src/libs/data-staging/Processor.cpp:423 #, fuzzy msgid "Failed checking source replica" msgstr "Misslyckades under läsning från källa" #: src/libs/data-staging/Processor.cpp:464 #, fuzzy msgid "Finding existing destination replicas" msgstr "Misslyckades under skrivning till destination" #: src/libs/data-staging/Processor.cpp:476 #, fuzzy, c-format msgid "Failed to delete replica %s: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/libs/data-staging/Processor.cpp:490 #, fuzzy, c-format msgid "Unregistering %s" msgstr "Registrerar till %s ISIS" #: src/libs/data-staging/Processor.cpp:501 #, fuzzy msgid "Pre-registering destination" msgstr "Problem med att slå upp destination" #: src/libs/data-staging/Processor.cpp:507 #, fuzzy, c-format msgid "Failed to pre-clean destination: %s" msgstr "Misslyckades med att slå upp destination: %s" #: src/libs/data-staging/Processor.cpp:531 msgid "Preparing to stage source" msgstr "" #: src/libs/data-staging/Processor.cpp:544 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:550 #, fuzzy msgid "No physical files found for source" msgstr "Inga platser funna för destination" #: src/libs/data-staging/Processor.cpp:569 #, fuzzy msgid "Preparing to stage destination" msgstr "Försöker med nästa destination (igen)" #: src/libs/data-staging/Processor.cpp:582 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:588 #, fuzzy msgid "No physical files found for destination" msgstr "Inga platser funna för destination" #: src/libs/data-staging/Processor.cpp:615 #, fuzzy msgid "Releasing source" msgstr "Försöker med nästa källa (igen)" #: src/libs/data-staging/Processor.cpp:619 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:624 #, fuzzy msgid "Releasing destination" msgstr "Försöker med nästa destination (igen)" #: src/libs/data-staging/Processor.cpp:628 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:632 #, fuzzy, c-format msgid "Error with post-transfer destination handling: %s" msgstr "Inga platser funna för destination: %s" #: src/libs/data-staging/Processor.cpp:659 #, fuzzy msgid "Removing pre-registered destination in index service" msgstr "Misslyckades med att förregistrera destination: %s" #: src/libs/data-staging/Processor.cpp:662 #, fuzzy, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" "Misslyckades med att avregistrera förregistrerad lfn. Du kan behöva " "avregistrera det manuellt" #: src/libs/data-staging/Processor.cpp:668 #, fuzzy msgid "Registering destination replica" msgstr "Försöker med nästa destination (igen)" #: src/libs/data-staging/Processor.cpp:671 #, fuzzy, c-format msgid "Failed to register destination replica: %s" msgstr "Misslyckades med att förregistrera destination: %s" #: src/libs/data-staging/Processor.cpp:674 #, fuzzy, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" "Misslyckades med att avregistrera förregistrerad lfn. Du kan behöva " "avregistrera det manuellt" #: src/libs/data-staging/Processor.cpp:705 #, fuzzy msgid "Error creating cache. Stale locks may remain." msgstr "Fel vid listning av replikor: %s" #: src/libs/data-staging/Processor.cpp:740 #, fuzzy, c-format msgid "Linking/copying cached file to %s" msgstr "Länkar/kopierar cachad fil" #: src/libs/data-staging/Processor.cpp:761 #, fuzzy, c-format msgid "Failed linking cache file to %s" msgstr "Misslyckades med att lista filer" #: src/libs/data-staging/Processor.cpp:765 #, fuzzy, c-format msgid "Error linking cache file to %s." msgstr "Fel vid borttagande av cachefil %s: %s" #: src/libs/data-staging/Processor.cpp:787 #: src/libs/data-staging/Processor.cpp:794 msgid "Adding to bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:183 #, fuzzy msgid "Will clean up pre-registered destination" msgstr "Misslyckades med att förregistrera destination: %s" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:199 #, fuzzy, c-format msgid "Source is mapped to %s" msgstr "Url mappas till: %s" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" #: src/libs/data-staging/Scheduler.cpp:213 #, fuzzy msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "Kan inte acceptera destination som URL" #: src/libs/data-staging/Scheduler.cpp:216 #, fuzzy msgid "Linking mapped file" msgstr "Länkar lokal fil" #: src/libs/data-staging/Scheduler.cpp:223 #, fuzzy, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "Misslyckades med att skapa mjuk länk: %s" #: src/libs/data-staging/Scheduler.cpp:235 msgid "Linking mapped file - can't link on Windows" msgstr "" #: src/libs/data-staging/Scheduler.cpp:251 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" #: src/libs/data-staging/Scheduler.cpp:258 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" #: src/libs/data-staging/Scheduler.cpp:264 msgid "File is cacheable, will check cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:267 #: src/libs/data-staging/Scheduler.cpp:293 #, c-format msgid "File is currently being cached, will wait %is" msgstr "" #: src/libs/data-staging/Scheduler.cpp:286 #, fuzzy msgid "Timed out while waiting for cache lock" msgstr "Misslyckades under väntan på förbindelseförfrågan" #: src/libs/data-staging/Scheduler.cpp:297 #, fuzzy msgid "Checking cache again" msgstr "Kopierar cachad fil" #: src/libs/data-staging/Scheduler.cpp:317 #, fuzzy msgid "Destination file is in cache" msgstr "Destination måste innehålla LFN" #: src/libs/data-staging/Scheduler.cpp:321 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:324 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:334 msgid "Problem with index service, will release cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:338 msgid "Problem with index service, will proceed to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:348 msgid "Checking source file is present" msgstr "" #: src/libs/data-staging/Scheduler.cpp:356 msgid "Error with source file, moving to next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:378 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "No more replicas, will use %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:383 #, fuzzy, c-format msgid "Checking replica %s" msgstr "Lägg till plats: metadata: %s" #: src/libs/data-staging/Scheduler.cpp:393 msgid "Overwrite requested - will pre-clean destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:396 msgid "No overwrite requested or allowed, skipping pre-cleaning" msgstr "" #: src/libs/data-staging/Scheduler.cpp:404 msgid "Pre-clean failed, will still try to copy" msgstr "" #: src/libs/data-staging/Scheduler.cpp:411 #, fuzzy msgid "Source or destination requires staging" msgstr "källa destination" #: src/libs/data-staging/Scheduler.cpp:415 msgid "No need to stage source or destination, skipping staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:445 msgid "Staging request timed out, will release request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:449 #, fuzzy msgid "Querying status of staging request" msgstr "Skapar och skickar förfrågan" #: src/libs/data-staging/Scheduler.cpp:458 #, fuzzy msgid "Releasing requests" msgstr "En förfrågan om uppstädning efter ett jobb lyckades" #: src/libs/data-staging/Scheduler.cpp:475 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "" #: src/libs/data-staging/Scheduler.cpp:490 #, fuzzy, c-format msgid "Transfer failed: %s" msgstr "Några överföringar misslyckades" #: src/libs/data-staging/Scheduler.cpp:500 #, fuzzy msgid "Releasing request(s) made during staging" msgstr "Läste begäran från en sträng" #: src/libs/data-staging/Scheduler.cpp:503 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:515 #, fuzzy msgid "Trying next replica" msgstr "Försöker med nästa källa (igen)" #: src/libs/data-staging/Scheduler.cpp:519 #, c-format msgid "Will %s in destination index service" msgstr "" #: src/libs/data-staging/Scheduler.cpp:523 msgid "Destination is not index service, skipping replica registration" msgstr "" #: src/libs/data-staging/Scheduler.cpp:536 msgid "Error registering replica, moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:544 #, fuzzy msgid "Will process cache" msgstr "klientprocess anropad" #: src/libs/data-staging/Scheduler.cpp:548 msgid "File is not cacheable, skipping cache processing" msgstr "" #: src/libs/data-staging/Scheduler.cpp:562 #, fuzzy msgid "Cancellation complete" msgstr "Överföring slutförd" #: src/libs/data-staging/Scheduler.cpp:576 msgid "Will wait 10s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:582 msgid "Error in cache processing, will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:591 msgid "Will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:609 msgid "Proxy has expired" msgstr "" #: src/libs/data-staging/Scheduler.cpp:620 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "" #: src/libs/data-staging/Scheduler.cpp:636 msgid "Out of retries" msgstr "" #: src/libs/data-staging/Scheduler.cpp:638 msgid "Permanent failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:644 #, fuzzy msgid "Finished successfully" msgstr "Stängdes OK" #: src/libs/data-staging/Scheduler.cpp:654 #, fuzzy msgid "Returning to generator" msgstr "Återanvänder förbindelse" #: src/libs/data-staging/Scheduler.cpp:820 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:874 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:882 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:895 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:911 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "" #: src/libs/data-staging/Scheduler.cpp:938 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:948 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1149 msgid "Cancelling active transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1165 msgid "Processing thread timed out. Restarting DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1233 #, fuzzy msgid "Will use bulk request" msgstr "Misslyckades med att starta ny tråd" #: src/libs/data-staging/Scheduler.cpp:1255 msgid "No delivery endpoints available, will try later" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1274 msgid "Scheduler received NULL DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1284 msgid "Scheduler received invalid DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1373 #, fuzzy msgid "Scheduler starting up" msgstr "Misslyckades med att starta jobb" #: src/libs/data-staging/Scheduler.cpp:1374 #, fuzzy msgid "Scheduler configuration:" msgstr "Cacheinställningar: %s" #: src/libs/data-staging/Scheduler.cpp:1375 #, fuzzy, c-format msgid " Pre-processor slots: %u" msgstr "Processera jobb: %s" #: src/libs/data-staging/Scheduler.cpp:1376 #, fuzzy, c-format msgid " Delivery slots: %u" msgstr " Använda slots: %d" #: src/libs/data-staging/Scheduler.cpp:1377 #, fuzzy, c-format msgid " Post-processor slots: %u" msgstr "Processera jobb: %s" #: src/libs/data-staging/Scheduler.cpp:1378 #, fuzzy, c-format msgid " Emergency slots: %u" msgstr " Använda slots: %d" #: src/libs/data-staging/Scheduler.cpp:1379 #, fuzzy, c-format msgid " Prepared slots: %u" msgstr " Använda slots: %d" #: src/libs/data-staging/Scheduler.cpp:1380 #, fuzzy, c-format msgid "" " Shares configuration:\n" "%s" msgstr "Cacheinställningar: %s" #: src/libs/data-staging/Scheduler.cpp:1383 msgid " Delivery service: LOCAL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1384 #, fuzzy, c-format msgid " Delivery service: %s" msgstr "Avvisar service: %s" #: src/libs/data-staging/Scheduler.cpp:1389 #, fuzzy msgid "Failed to create DTR dump thread" msgstr "Misslyckades med att skapa tråd för ldap bind (%s)" #: src/libs/data-staging/Scheduler.cpp:1406 #: src/services/data-staging/DataDeliveryService.cpp:513 #, fuzzy, c-format msgid "DTR %s cancelled" msgstr "PUT anropad" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:32 #, fuzzy msgid "Generator started" msgstr "Laddar ner jobb: %s" #: src/libs/data-staging/examples/Generator.cpp:33 #, fuzzy msgid "Starting DTR threads" msgstr "Startar grid-manager-tråd" #: src/libs/data-staging/examples/Generator.cpp:46 #, fuzzy msgid "No valid credentials found, exiting" msgstr "Ogiltig destination" #: src/libs/data-staging/examples/Generator.cpp:58 #, fuzzy, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "Problem med att slå upp destination" #: src/services/a-rex/arex.cpp:446 #, fuzzy, c-format msgid "Using cached local account '%s'" msgstr "Använder lokalt konto '%s'" #: src/services/a-rex/arex.cpp:457 msgid "Will not map to 'root' account by default" msgstr "Kommer ej att mappa 'root'-konto som förval" #: src/services/a-rex/arex.cpp:470 msgid "No local account name specified" msgstr "Inget lokalt kontonamn specificerat" #: src/services/a-rex/arex.cpp:473 #, c-format msgid "Using local account '%s'" msgstr "Använder lokalt konto '%s'" #: src/services/a-rex/arex.cpp:494 msgid "Failed to acquire grid-manager's configuration" msgstr "Misslyckades med att ladda grid-managerns inställningar" #: src/services/a-rex/arex.cpp:519 #: src/services/cache_service/CacheService.cpp:572 #: src/services/data-staging/DataDeliveryService.cpp:687 #, c-format msgid "SOAP operation is not supported: %s" msgstr "SOAP-process understöds inte: %s" #: src/services/a-rex/arex.cpp:532 #, fuzzy, c-format msgid "Connection from %s: %s" msgstr "Konvertering misslyckades: %s" #: src/services/a-rex/arex.cpp:534 #, c-format msgid "process: method: %s" msgstr "process: metod: %s" #: src/services/a-rex/arex.cpp:535 #, c-format msgid "process: endpoint: %s" msgstr "process: ändpunkt: %s" #: src/services/a-rex/arex.cpp:546 #, c-format msgid "process: id: %s" msgstr "process: id: %s" #: src/services/a-rex/arex.cpp:547 #, c-format msgid "process: subpath: %s" msgstr "process: subsökväg: %s" #: src/services/a-rex/arex.cpp:567 #: src/services/cache_service/CacheService.cpp:546 #: src/services/data-staging/DataDeliveryService.cpp:647 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "process: begäran=%s" #: src/services/a-rex/arex.cpp:572 #: src/services/cache_service/CacheService.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:652 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "indata definierar ej operation" #: src/services/a-rex/arex.cpp:575 #: src/services/cache_service/CacheService.cpp:554 #: src/services/data-staging/DataDeliveryService.cpp:655 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "process: operation: %s" #: src/services/a-rex/arex.cpp:591 src/services/a-rex/arex.cpp:804 #: src/services/a-rex/arex.cpp:823 src/services/a-rex/arex.cpp:837 #: src/services/a-rex/arex.cpp:847 src/services/a-rex/arex.cpp:862 #: src/services/cache_service/CacheService.cpp:588 #: src/services/data-staging/DataDeliveryService.cpp:703 msgid "Security Handlers processing failed" msgstr "Säkerhetshanterarprocessering misslyckades" #: src/services/a-rex/arex.cpp:598 msgid "Can't obtain configuration" msgstr "Misslyckades med att ladda inställningar" #: src/services/a-rex/arex.cpp:614 msgid "process: factory endpoint" msgstr "process: factoryändpunkt" #: src/services/a-rex/arex.cpp:798 src/services/a-rex/arex.cpp:815 #: src/services/cache_service/CacheService.cpp:583 #: src/services/data-staging/DataDeliveryService.cpp:698 #: src/tests/echo/echo.cpp:166 #, c-format msgid "process: response=%s" msgstr "process: svar=%s" #: src/services/a-rex/arex.cpp:800 #, fuzzy msgid "process: response is not SOAP" msgstr "Svaret är inte SOAP" #: src/services/a-rex/arex.cpp:830 msgid "process: GET" msgstr "process: GET" #: src/services/a-rex/arex.cpp:831 #, c-format msgid "GET: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:854 msgid "process: PUT" msgstr "process: PUT" #: src/services/a-rex/arex.cpp:869 #, fuzzy, c-format msgid "process: method %s is not supported" msgstr "process: %s: understöds inte" #: src/services/a-rex/arex.cpp:872 #, fuzzy msgid "process: method is not defined" msgstr "process: %s: understöds inte" #: src/services/a-rex/arex.cpp:908 #, fuzzy msgid "Failed to run Grid Manager thread" msgstr "Misslyckades med att starta ny tråd" #: src/services/a-rex/arex.cpp:972 #, fuzzy, c-format msgid "Storing configuration in temporary file %s" msgstr "Läser inställningsfil: %s" #: src/services/a-rex/arex.cpp:977 #, fuzzy msgid "Failed to process service configuration" msgstr "Misslyckades med att ladda serviceinställningar" #: src/services/a-rex/arex.cpp:985 #, fuzzy, c-format msgid "Failed to process configuration in %s" msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/services/a-rex/arex.cpp:991 #, fuzzy msgid "No control directory set in configuration" msgstr "Jobbkontrollerare saknar jobblistinställning" #: src/services/a-rex/arex.cpp:995 #, fuzzy msgid "No session directory set in configuration" msgstr "Cacherot saknas i inställningar" #: src/services/a-rex/arex.cpp:999 #, fuzzy msgid "No LRMS set in configuration" msgstr "Cacherot saknas i inställningar" #: src/services/a-rex/arex.cpp:1004 #, fuzzy, c-format msgid "Failed to create control directory %s" msgstr "Misslyckades med att läsa kontrollkatalog: %s" #: src/services/a-rex/arex.cpp:1033 #, fuzzy, c-format msgid "Provided LRMSName is not a valid URL: %s" msgstr "URL-post i förvalda services är inte en giltig URL: %s" #: src/services/a-rex/arex.cpp:1035 msgid "" "No LRMSName is provided. This is needed if you wish to completely comply " "with the BES specifications." msgstr "" #: src/services/a-rex/cachecheck.cpp:34 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:539 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:843 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:424 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:337 #, c-format msgid "Error with cache configuration: %s" msgstr "Fel med cacheinställningar: %s" #: src/services/a-rex/cachecheck.cpp:50 #: src/services/cache_service/CacheService.cpp:305 #, fuzzy msgid "Error with cache configuration" msgstr "Fel med cacheinställningar: %s" #: src/services/a-rex/cachecheck.cpp:75 #: src/services/cache_service/CacheService.cpp:135 #: src/services/cache_service/CacheService.cpp:330 #, c-format msgid "Looking up URL %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:77 #: src/services/cache_service/CacheService.cpp:144 #, fuzzy, c-format msgid "Cache file is %s" msgstr "cachefil: %s" #: src/services/a-rex/change_activity_status.cpp:33 #, c-format msgid "" "ChangeActivityStatus: request = \n" "%s" msgstr "" "ChangeActivityStatus: begäran = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:38 msgid "ChangeActivityStatus: no ActivityIdentifier found" msgstr "ChangeActivityStatus: ingen ActivityIdentifier funnen" #: src/services/a-rex/change_activity_status.cpp:47 msgid "ChangeActivityStatus: EPR contains no JobID" msgstr "ChangeActivityStatus: EPR innehåller ingen JobID" #: src/services/a-rex/change_activity_status.cpp:57 #, c-format msgid "ChangeActivityStatus: no job found: %s" msgstr "ChangeActivityStatus: inget jobb funnet: %s" #: src/services/a-rex/change_activity_status.cpp:73 msgid "ChangeActivityStatus: missing NewStatus element" msgstr "ChangeActivityStatus: saknat NewStatus-element" #: src/services/a-rex/change_activity_status.cpp:87 #, fuzzy msgid "ChangeActivityStatus: Failed to accept delegation" msgstr "CreateActivity: Misslyckades med att acceptera delegering" #: src/services/a-rex/change_activity_status.cpp:103 msgid "ChangeActivityStatus: old BES state does not match" msgstr "ChangeActivityStatus: gammalt BES-tillstånd matchar inte" #: src/services/a-rex/change_activity_status.cpp:110 #, fuzzy msgid "ChangeActivityStatus: old A-REX state does not match" msgstr "ChangeActivityStatus: gammalt A-Rex-tillstånd matchar inte" #: src/services/a-rex/change_activity_status.cpp:137 #, fuzzy msgid "ChangeActivityStatus: Failed to update credentials" msgstr "ChangeActivityStatus: gammalt BES-tillstånd matchar inte" #: src/services/a-rex/change_activity_status.cpp:143 #, fuzzy msgid "ChangeActivityStatus: Failed to resume job" msgstr "CreateActivity: Misslyckades med att skapa nytt jobb" #: src/services/a-rex/change_activity_status.cpp:149 #, fuzzy, c-format msgid "ChangeActivityStatus: State change not allowed: from %s/%s to %s/%s" msgstr "" "ChangeActivityStatus: tillståndsförändring ej tillåten: från %s/%s till %s/%s" #: src/services/a-rex/change_activity_status.cpp:168 #, c-format msgid "" "ChangeActivityStatus: response = \n" "%s" msgstr "" "ChangeActivityStatus: svar = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:213 #: src/services/a-rex/change_activity_status.cpp:217 #, fuzzy, c-format msgid "EMIES:PauseActivity: job %s - %s" msgstr "GetActivityStatuses: jobb %s - %s" #: src/services/a-rex/change_activity_status.cpp:262 #: src/services/a-rex/change_activity_status.cpp:266 #, fuzzy, c-format msgid "EMIES:ResumeActivity: job %s - %s" msgstr "TerminateActivities: jobb %s - %s" #: src/services/a-rex/change_activity_status.cpp:311 #: src/services/a-rex/change_activity_status.cpp:316 #, fuzzy, c-format msgid "EMIES:CancelActivity: job %s - %s" msgstr "GetActivityStatuses: jobb %s - %s" #: src/services/a-rex/change_activity_status.cpp:324 #, fuzzy, c-format msgid "job %s cancelled successfully" msgstr "Avbrytande av jobb lyckades" #: src/services/a-rex/change_activity_status.cpp:370 #: src/services/a-rex/change_activity_status.cpp:385 #, fuzzy, c-format msgid "EMIES:WipeActivity: job %s - %s" msgstr "GetActivityStatuses: jobb %s - %s" #: src/services/a-rex/change_activity_status.cpp:389 #, fuzzy, c-format msgid "job %s (will be) cleaned successfully" msgstr "Uppstädning av jobb lyckades" #: src/services/a-rex/change_activity_status.cpp:435 #: src/services/a-rex/change_activity_status.cpp:440 #, fuzzy, c-format msgid "EMIES:RestartActivity: job %s - %s" msgstr "GetActivityStatuses: jobb %s - %s" #: src/services/a-rex/change_activity_status.cpp:444 #, fuzzy, c-format msgid "job %s restarted successfully" msgstr "Stängdes OK" #: src/services/a-rex/create_activity.cpp:35 #, c-format msgid "" "CreateActivity: request = \n" "%s" msgstr "" "CreateActivity: begäran = \n" "%s" #: src/services/a-rex/create_activity.cpp:40 msgid "CreateActivity: no job description found" msgstr "CreateActivity: ingen jobbeskrivning funnen" #: src/services/a-rex/create_activity.cpp:48 msgid "CreateActivity: max jobs total limit reached" msgstr "" #: src/services/a-rex/create_activity.cpp:67 msgid "CreateActivity: Failed to accept delegation" msgstr "CreateActivity: Misslyckades med att acceptera delegering" #: src/services/a-rex/create_activity.cpp:100 #, c-format msgid "CreateActivity: Failed to create new job: %s" msgstr "CreateActivity: Misslyckades med att skapa nytt jobb: %s" #: src/services/a-rex/create_activity.cpp:102 msgid "CreateActivity: Failed to create new job" msgstr "CreateActivity: Misslyckades med att skapa nytt jobb" #: src/services/a-rex/create_activity.cpp:117 msgid "CreateActivity finished successfully" msgstr "CreateActivity avslutades OK" #: src/services/a-rex/create_activity.cpp:121 #, c-format msgid "" "CreateActivity: response = \n" "%s" msgstr "" "CreateActivity: svar = \n" "%s" #: src/services/a-rex/create_activity.cpp:159 #, fuzzy, c-format msgid "" "EMIES:CreateActivity: request = \n" "%s" msgstr "" "CreateActivity: begäran = \n" "%s" #: src/services/a-rex/create_activity.cpp:165 #, fuzzy msgid "EMIES:CreateActivity: too many activity descriptions" msgstr "CreateActivity: ingen jobbeskrivning funnen" #: src/services/a-rex/create_activity.cpp:175 #, fuzzy msgid "EMIES:CreateActivity: no job description found" msgstr "CreateActivity: ingen jobbeskrivning funnen" #: src/services/a-rex/create_activity.cpp:182 msgid "EMIES:CreateActivity: max jobs total limit reached" msgstr "" #: src/services/a-rex/create_activity.cpp:208 #, fuzzy, c-format msgid "ES:CreateActivity: Failed to create new job: %s" msgstr "CreateActivity: Misslyckades med att skapa nytt jobb: %s" #: src/services/a-rex/create_activity.cpp:224 #, fuzzy msgid "EMIES:CreateActivity finished successfully" msgstr "CreateActivity avslutades OK" #: src/services/a-rex/create_activity.cpp:225 #, fuzzy, c-format msgid "New job accepted with id %s" msgstr "Jobb insänt med jobb-id: %s" #: src/services/a-rex/create_activity.cpp:229 #, fuzzy, c-format msgid "" "EMIES:CreateActivity: response = \n" "%s" msgstr "" "CreateActivity: svar = \n" "%s" #: src/services/a-rex/delegation/DelegationStore.cpp:55 msgid "Wiping and re-creating whole storage" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:210 #: src/services/a-rex/delegation/DelegationStore.cpp:311 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:271 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:291 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" #: src/services/a-rex/get.cpp:112 #, c-format msgid "Get: there is no job %s - %s" msgstr "Get: det finns inget jobb %s - %s" #: src/services/a-rex/get.cpp:123 #, fuzzy, c-format msgid "Get: can't process file %s" msgstr "Get: kan inte processera fil %s/%s" #: src/services/a-rex/get.cpp:167 #, fuzzy, c-format msgid "Head: there is no job %s - %s" msgstr "Get: det finns inget jobb %s - %s" #: src/services/a-rex/get.cpp:178 #, fuzzy, c-format msgid "Head: can't process file %s" msgstr "Get: kan inte processera fil %s/%s" #: src/services/a-rex/get.cpp:190 #, fuzzy, c-format msgid "http_get: start=%llu, end=%llu, burl=%s, hpath=%s" msgstr "http_get: början=%u, slut=%u, burl=%s, bsökväg=%s, hsökväg=%s" #: src/services/a-rex/get.cpp:357 #, fuzzy msgid "Failed to extract credential information" msgstr "Misslyckades med att ta bort metainformation" #: src/services/a-rex/get.cpp:360 #, fuzzy, c-format msgid "Checking cache permissions: DN: %s" msgstr "policyplats: %s" #: src/services/a-rex/get.cpp:361 #, fuzzy, c-format msgid "Checking cache permissions: VO: %s" msgstr "policyplats: %s" #: src/services/a-rex/get.cpp:363 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "" #: src/services/a-rex/get.cpp:373 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "" #: src/services/a-rex/get.cpp:376 #, c-format msgid "DN %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:379 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "" #: src/services/a-rex/get.cpp:382 #, c-format msgid "VO %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:388 src/services/a-rex/get.cpp:407 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "" #: src/services/a-rex/get.cpp:396 src/services/a-rex/get.cpp:415 #, c-format msgid "VOMS attr %s matches %s" msgstr "" #: src/services/a-rex/get.cpp:397 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "" #: src/services/a-rex/get.cpp:400 src/services/a-rex/get.cpp:419 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:416 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "" #: src/services/a-rex/get.cpp:422 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "" #: src/services/a-rex/get.cpp:428 #, c-format msgid "No match found in cache access rules for %s" msgstr "" #: src/services/a-rex/get.cpp:438 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "" #: src/services/a-rex/get.cpp:441 #, fuzzy, c-format msgid "Get from cache: Invalid URL %s" msgstr "Ogiltig URL: %s" #: src/services/a-rex/get.cpp:458 #, fuzzy msgid "Get from cache: Error in cache configuration" msgstr "Fel med cacheinställningar: %s" #: src/services/a-rex/get.cpp:467 msgid "Get from cache: File not in cache" msgstr "" #: src/services/a-rex/get.cpp:470 #, fuzzy, c-format msgid "Get from cache: could not access cached file: %s" msgstr "Kan inte läsa nyckelfil: %s (%s)" #: src/services/a-rex/get.cpp:480 msgid "Get from cache: Cached file is locked" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:29 #, c-format msgid "" "GetActivityDocuments: request = \n" "%s" msgstr "" "GetActivityDocuments: begäran = \n" "%s" #: src/services/a-rex/get_activity_documents.cpp:40 #, fuzzy msgid "GetActivityDocuments: non-AREX job requested" msgstr "GetActivityDocuments: icke-ARex-jobb begärt" #: src/services/a-rex/get_activity_documents.cpp:49 #: src/services/a-rex/get_activity_documents.cpp:60 #, c-format msgid "GetActivityDocuments: job %s - %s" msgstr "GetActivityDocuments: jobb %s - %s" #: src/services/a-rex/get_activity_documents.cpp:72 #, c-format msgid "" "GetActivityDocuments: response = \n" "%s" msgstr "" "GetActivityDocuments: svar = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:35 #, c-format msgid "" "GetActivityStatuses: request = \n" "%s" msgstr "" "GetActivityStatuses: begäran = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:50 #, fuzzy, c-format msgid "GetActivityStatuses: unknown verbosity level requested: %s" msgstr "" "GetActivityStatuses: begäran = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:62 #, c-format msgid "GetActivityStatuses: job %s - can't understand EPR" msgstr "GetActivityStatuses: jobb %s - kan inte förstå EPR" #: src/services/a-rex/get_activity_statuses.cpp:71 #, c-format msgid "GetActivityStatuses: job %s - %s" msgstr "GetActivityStatuses: jobb %s - %s" #: src/services/a-rex/get_activity_statuses.cpp:105 #, c-format msgid "" "GetActivityStatuses: response = \n" "%s" msgstr "" "GetActivityStatuses: begäran = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:306 #: src/services/a-rex/get_activity_statuses.cpp:400 #, fuzzy, c-format msgid "EMIES:GetActivityStatus: job %s - %s" msgstr "GetActivityStatuses: jobb %s - %s" #: src/services/a-rex/get_activity_statuses.cpp:526 #, c-format msgid "EMIES:GetActivityInfo: job %s - failed to retrieve GLUE2 information" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:578 #: src/services/a-rex/get_activity_statuses.cpp:585 #, c-format msgid "EMIES:NotifyService: job %s - %s" msgstr "" #: src/services/a-rex/get_factory_attributes_document.cpp:37 #, c-format msgid "" "GetFactoryAttributesDocument: request = \n" "%s" msgstr "" "GetFactoryAttributesDocument: begäran = \n" "%s" #: src/services/a-rex/get_factory_attributes_document.cpp:62 #, c-format msgid "" "GetFactoryAttributesDocument: response = \n" "%s" msgstr "" "GetFactoryAttributesDocument: svar = \n" "%s" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, fuzzy, c-format msgid "Running command %s" msgstr " Största antal exekverande jobb: %i" #: src/services/a-rex/grid-manager/GridManager.cpp:114 #, fuzzy msgid "Failed to start cache clean script" msgstr "Misslyckades med att exekvera cachestädningsskript: %s" #: src/services/a-rex/grid-manager/GridManager.cpp:115 #, fuzzy msgid "Cache cleaning script failed" msgstr "\tCachestädning avstängd" #: src/services/a-rex/grid-manager/GridManager.cpp:177 #, fuzzy msgid "Starting jobs processing thread" msgstr "Startar grid-manager-tråd" #: src/services/a-rex/grid-manager/GridManager.cpp:178 #, c-format msgid "Used configuration file %s" msgstr "Använd inställningsfil %s" #: src/services/a-rex/grid-manager/GridManager.cpp:187 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:190 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:199 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:211 msgid "Failed to start new thread" msgstr "Misslyckades med att starta ny tråd" #: src/services/a-rex/grid-manager/GridManager.cpp:268 msgid "Failed to start new thread: cache won't be cleaned" msgstr "Misslyckades med att starta tråd: cache kommer ej att rensas" #: src/services/a-rex/grid-manager/GridManager.cpp:273 msgid "Picking up left jobs" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:277 #, fuzzy msgid "Starting data staging threads" msgstr "Startar grid-manager-tråd" #: src/services/a-rex/grid-manager/GridManager.cpp:281 msgid "Failed to start data staging threads, exiting Grid Manager thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:290 #, fuzzy msgid "Starting jobs' monitoring" msgstr "Startar jobbmonitorering" #: src/services/a-rex/grid-manager/GridManager.cpp:303 #, fuzzy, c-format msgid "Failed to open heartbeat file %s" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/services/a-rex/grid-manager/GridManager.cpp:335 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:340 #, fuzzy msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "Misslyckades med att initiera delegering" #: src/services/a-rex/grid-manager/GridManager.cpp:346 msgid "Waking up" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:349 #, fuzzy msgid "Stopping jobs processing thread" msgstr "Startar grid-manager-tråd" #: src/services/a-rex/grid-manager/GridManager.cpp:352 #, fuzzy msgid "Exiting jobs processing thread" msgstr "Startar grid-manager-tråd" #: src/services/a-rex/grid-manager/GridManager.cpp:365 msgid "Shutting down job processing" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:370 msgid "Shutting down data staging threads" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:24 #, c-format msgid "" "Usage: %s -I -U -P -L [-c " "] [-p ] [-d ]" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:74 msgid "Job ID argument is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Path to user's proxy file should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "User name should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "Path to .local job status file is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:100 msgid "Generating ceID prefix from hostname automatically" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:103 #, fuzzy msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "Kan inte bestämma värdnamn från uname()" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:112 #, c-format msgid "ceID prefix is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:120 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:129 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:139 #, c-format msgid "globalid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:142 #, c-format msgid "headnode is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, fuzzy, c-format msgid "interface is set to %s" msgstr " Gränssnittsnamn: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:149 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:152 #, fuzzy, c-format msgid "localid is set to %s" msgstr "Url mappas till: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 #, c-format msgid "queue name is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "owner subject is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:160 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:165 #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:119 #, fuzzy msgid "Can not read information from the local job status file" msgstr "Tar bort jobb %s från jobblistfilen" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:181 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly :-) Please submit the bug to bugzilla." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:191 msgid "Parsing VOMS AC to get FQANs information" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:210 msgid "No FQAN found. Using NULL as userFQAN value" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:224 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:229 #, c-format msgid "Writing the info the the BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 #, fuzzy, c-format msgid "Cannot open BLAH log file '%s'" msgstr "Kan inte läsa nyckelfil: %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:24 #, c-format msgid "" "Usage: %s [-N] -P -L [-c ] [-d " "]" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:90 msgid "User proxy file is required but is not specified" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:95 msgid "Local job status file is required" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:114 #, c-format msgid "Making the decision for the queue %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:127 #, fuzzy, c-format msgid "Can not parse the configuration file %s" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:144 #, fuzzy, c-format msgid "Can not find queue '%s' in the configuration file" msgstr "Kan inte öppna inställningsfil." #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:168 msgid "No access policy to check, returning success" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:182 #, fuzzy, c-format msgid "CA certificates directory %s does not exist" msgstr "CA-certifikatkatlog är inte en katalog: %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:202 #, fuzzy msgid "User proxy certificate is not valid" msgstr "Skrev det signerade proxycertifikatet till en fil" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:207 #, c-format msgid "Getting VOMS AC for: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:221 #, fuzzy, c-format msgid "Checking a match for '%s'" msgstr "Lägg till plats: metadata: %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:228 #, c-format msgid "FQAN '%s' IS a match to '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:232 #, c-format msgid "Queue '%s' usage is prohibited to FQAN '%s' by the site access policy" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:236 #, c-format msgid "FQAN '%s' IS NOT a match to '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:242 #, c-format msgid "" "Queue '%s' usage with provided FQANs is prohibited by the site access policy" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:35 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:39 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:43 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:57 #, fuzzy, c-format msgid "Wrong option in %s" msgstr "Felaktig inställning i daemon" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:71 #, fuzzy, c-format msgid "Can't read configuration file at %s" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:80 #, fuzzy, c-format msgid "Can't interpret configuration file %s as XML" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:118 #, fuzzy, c-format msgid "Can't recognize type of configuration file at %s" msgstr "Kan inte öppna inställningsfil." #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:121 #, fuzzy msgid "Could not determine configuration type or configuration is empty" msgstr "Kan inte öppna inställningsfil." #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:169 msgid "No queue name given in queue block name" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:176 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:499 #, fuzzy msgid "forcedefaultvoms parameter is empty" msgstr "mail är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:185 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:198 #, fuzzy msgid "authorizedvo parameter is empty" msgstr "mail är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:276 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:604 #, fuzzy, c-format msgid "Wrong number in jobreport_period: %s" msgstr "felaktigt nummer i wakeupperiod" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:280 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 #, fuzzy, c-format msgid "Wrong number in jobreport_period: %d, minimal value: %s" msgstr "felaktigt nummer i wakeupperiod" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:299 msgid "Missing file name in jobreport_logfile" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:316 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:323 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:330 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:337 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:344 #, fuzzy, c-format msgid "Wrong number in maxjobs: %s" msgstr "felaktigt nummer i maxjobs" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:351 #, fuzzy, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "felaktigt nummer i wakeupperiod" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:360 #, fuzzy msgid "mail parameter is empty" msgstr "mail är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:370 #, fuzzy msgid "Wrong number in defaultttl command" msgstr "felaktigt nummer i defaultttl-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:376 #, fuzzy msgid "Wrong number in maxrerun command" msgstr "felaktigt nummer i maxrerun-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:382 msgid "defaultlrms is empty" msgstr "defaultlrms är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:398 #, fuzzy msgid "State name for plugin is missing" msgstr "tillståndsnamn för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:402 msgid "Options for plugin are missing" msgstr "Inställningar för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:405 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:726 #, c-format msgid "Failed to register plugin for state %s" msgstr "Misslyckades med att registrera plugin för tillstånd %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:413 #, fuzzy msgid "Wrong number for timeout in plugin command" msgstr "felaktigt nummer för timeout i plugin-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:429 #, fuzzy msgid "Wrong option in fixdirectories" msgstr "Felaktig inställning i daemon" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:441 #, fuzzy msgid "Wrong option in delegationdb" msgstr "Felaktig inställning i daemon" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:456 #, fuzzy msgid "Session root directory is missing" msgstr "sessionskatalog saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:459 #, fuzzy msgid "Junk in sessiondir command" msgstr "skräp i session-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:471 #, fuzzy msgid "Missing directory in control command" msgstr "saknad katalog i control-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:476 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:481 #, fuzzy msgid "User for helper program is missing" msgstr "tillståndsnamn för hjälpprogram saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:484 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:875 #, fuzzy msgid "Only user '.' for helper program is supported" msgstr "%s användare för hjälpprogram har ej ställts in" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:487 #, fuzzy msgid "Helper program is missing" msgstr "hjälpprogram saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:632 #, fuzzy msgid "Value for maxJobsTracked is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:636 #, fuzzy msgid "Value for maxJobsRun is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:640 #, fuzzy msgid "Value for maxJobsTotal is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:644 #, fuzzy msgid "Value for maxJobsPerDN is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:648 #, fuzzy msgid "Value for wakeupPeriod is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 #, fuzzy msgid "Value for maxScripts is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:664 #, fuzzy msgid "serviceMail is empty" msgstr "mail är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:678 #, fuzzy msgid "Type in LRMS is missing" msgstr "Plats saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:691 #, fuzzy msgid "LRMS is missing" msgstr "Plats saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:704 #, fuzzy msgid "State name for authPlugin is missing" msgstr "tillståndsnamn för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:709 #, fuzzy msgid "Command for authPlugin is missing" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:723 #, c-format msgid "Registering plugin for state %s; options: %s; command: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:739 #, fuzzy msgid "Command for localCred is missing" msgstr "Inställningar för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:745 #, fuzzy msgid "Timeout for localCred is missing" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:750 #, fuzzy msgid "Timeout for localCred is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:778 msgid "Control element must be present" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:783 #, fuzzy msgid "controlDir is missing" msgstr "sessionskatalog saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:790 #, fuzzy msgid "sessionRootDir is missing" msgstr "sessionskatalog saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:800 msgid "Attribute drain for sessionRootDir is incorrect boolean" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:809 msgid "The fixDirectories element is incorrect value" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:816 #, fuzzy msgid "The delegationDB element is incorrect value" msgstr "Destinaltion: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:821 #, fuzzy msgid "The maxReruns element is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:825 #, fuzzy msgid "The noRootPower element is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:829 #, fuzzy msgid "The defaultTTL element is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:833 #, fuzzy msgid "The defaultTTR element is incorrect number" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:866 #, fuzzy msgid "Command in helperUtility is missing" msgstr "timeout för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:871 #, fuzzy msgid "Username in helperUtility is empty" msgstr "%s användare för hjälpprogram har ej ställts in" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:106 #, c-format msgid "\tSession root dir : %s" msgstr "\tSessionkatalog : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:107 #, c-format msgid "\tControl dir : %s" msgstr "\tKontrollkatalog : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:108 #, c-format msgid "\tdefault LRMS : %s" msgstr "\tförvalt LRMS : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:109 #, c-format msgid "\tdefault queue : %s" msgstr "\tförvald kö : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:110 #, c-format msgid "\tdefault ttl : %u" msgstr "\tförvald ttl : %u" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:115 #, fuzzy msgid "No valid caches found in configuration, caching is disabled" msgstr "Ingen cachekatalog i inställningar, cachning är avstängd" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, fuzzy, c-format msgid "\tCache : %s" msgstr "\tCachejobbkatalog : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:122 #, c-format msgid "\tCache link dir : %s" msgstr "\tCachelänkkatalog : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 #, fuzzy, c-format msgid "\tRemote cache : %s" msgstr "\tCachejobbkatalog : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:128 #, fuzzy, c-format msgid "\tRemote cache link: %s" msgstr "\tCachelänkkatalog : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:130 msgid "\tCache cleaning enabled" msgstr "\tCachestädning påslagen" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 msgid "\tCache cleaning disabled" msgstr "\tCachestädning avstängd" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:308 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:412 #, fuzzy, c-format msgid "Starting helper process: %s" msgstr "Startar hjälpprocess (%s): %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:422 #, fuzzy, c-format msgid "Helper process start failed: %s" msgstr "Hjälpprocessstart misslyckades (%s): %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:429 #, fuzzy, c-format msgid "Stopping helper process %s" msgstr "Startar hjälpprocess (%s): %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:14 #, fuzzy, c-format msgid "wrong boolean in %s" msgstr "Fel vid öppnande av metafil %s: %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:20 #, fuzzy, c-format msgid "wrong number in %s" msgstr "felaktigt nummer i maxjobs" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:49 #, fuzzy msgid "Can't read configuration file" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:58 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:32 #, fuzzy msgid "Can't interpret configuration file as XML" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:62 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:69 #, fuzzy msgid "Configuration error" msgstr "Inställningar: Kö: %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:75 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:103 #, fuzzy msgid "Can't recognize type of configuration file" msgstr "Kan inte öppna inställningsfil." #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxdelivery" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 #, fuzzy msgid "Bad number in maxemergency" msgstr "felaktigt nummer i maxrerun-kommando" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:107 #, fuzzy msgid "Bad number in maxprocessor" msgstr "felaktigt nummer i maxjobs" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:113 #, fuzzy msgid "Bad number in maxprepared" msgstr "felaktigt nummer i maxjobs" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:119 #, fuzzy msgid "Bad number in maxtransfertries" msgstr "felaktigt nummer i maxjobs" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:128 #, fuzzy msgid "Bad number in speedcontrol" msgstr "felaktigt nummer i speedcontrol" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:139 #, fuzzy, c-format msgid "Bad number in definedshare %s" msgstr "felaktigt nummer i maxload" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:148 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:159 #, fuzzy msgid "Bad number in remotesizelimit" msgstr "felaktigt nummer i maxjobs" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:189 #, fuzzy msgid "Bad value for debug" msgstr "Felaktigt namn för stdout: %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:203 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:315 msgid "Bad URL in acix_endpoint" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:258 #, c-format msgid "Bad URL in deliveryService: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:272 msgid "Bad value for logLevel" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:25 #, fuzzy msgid "Can't open configuration file" msgstr "Kan inte öppna inställningsfil." #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:47 msgid "Value for 'link' element in mapURL is incorrect" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:53 msgid "Missing 'from' element in mapURL" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:57 msgid "Missing 'to' element in mapURL" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:84 msgid "Not enough parameters in copyurl" msgstr "Ej tillräckligt antal parametrar i copyurl" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:93 msgid "Not enough parameters in linkurl" msgstr "Ej tillräckligt antal parametrar i linkurl" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:179 #, c-format msgid "Wrong directory in %s" msgstr "Fel katalog i %s" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:100 #, c-format msgid "Failed setting file owner: %s" msgstr "Misslyckades med ange filägare: %s" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:33 msgid "gm-delegations-converter changes format of delegation database." msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:106 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 #, fuzzy msgid "use specified configuration file" msgstr "Använd inställningsfil %s" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:107 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 #, fuzzy msgid "file" msgstr "filnamn" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:43 #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 msgid "read information from specified control directory" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:44 #: src/services/a-rex/grid-manager/gm_jobs.cpp:112 #, fuzzy msgid "dir" msgstr "katalognamn" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:48 msgid "convert from specified input database format [bdb|sqlite]" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:49 #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:54 msgid "database format" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:53 msgid "convert into specified output database format [bdb|sqlite]" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:96 msgid "gm-jobs displays information on current jobs in the system." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:101 msgid "display more information on each job" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "print summary of jobs in each transfer share" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:121 #, fuzzy msgid "do not print list of jobs" msgstr "fil som innehåller en jobblista" #: src/services/a-rex/grid-manager/gm_jobs.cpp:126 msgid "do not print number of jobs in each state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:131 msgid "print state of the service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:136 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:137 #: src/services/a-rex/grid-manager/gm_jobs.cpp:147 #: src/services/a-rex/grid-manager/gm_jobs.cpp:157 msgid "dn" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 msgid "request to cancel job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:142 #: src/services/a-rex/grid-manager/gm_jobs.cpp:152 #: src/services/a-rex/grid-manager/gm_jobs.cpp:162 #: src/services/a-rex/grid-manager/gm_jobs.cpp:172 msgid "id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 #, fuzzy msgid "request to clean job(s) with specified ID(s)" msgstr "Inget Connect-element specificerat" #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 msgid "show only jobs with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 msgid "print list of available delegation IDs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:171 msgid "print delegation token of specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 msgid "print main delegation token of specified Job ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:177 #, fuzzy msgid "job id" msgstr "Jobbidentifierare saknas" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:182 #, fuzzy msgid "file name" msgstr "filnamn" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control file. If no " "file is given it uses the control directory found in the configuration file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, fuzzy, c-format msgid "Failed to acquire source: %s" msgstr "Misslyckades med att slå upp källa: %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, fuzzy, c-format msgid "Failed to resolve %s" msgstr "Misslyckades med att slå upp %s (%s)" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, fuzzy, c-format msgid "Failed to check %s" msgstr "Misslyckades med att ta bort %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:88 #, fuzzy msgid "Wrong number of arguments given" msgstr "Fel antal parametrar angivna" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:105 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1009 #, fuzzy msgid "Failed to run plugin" msgstr "%s: Misslyckades med att exekvera plugin" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:109 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1015 #, fuzzy, c-format msgid "Plugin failed: %s" msgstr "%s: Plugin misslyckades" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:158 msgid "empty argument to remotegmdirs" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:165 msgid "bad arguments to remotegmdirs" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:177 #, fuzzy msgid "Wrong number in maxjobdesc" msgstr "felaktigt nummer i maxjobs" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:185 #: src/services/gridftpd/fileplugin/fileplugin.cpp:186 #, fuzzy, c-format msgid "Unsupported configuration command: %s" msgstr "Använd inställningsfil %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:199 #, c-format msgid "Mapped user:group (%s:%s) not found" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:202 #, fuzzy msgid "Failed processing grid-manager configuration" msgstr "Misslyckades med att ladda grid-managerns inställningar" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:205 msgid "" "Cannot use multiple session directories and remotegmdirs at the same time" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:228 msgid "This user is denied to submit new jobs." msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:239 msgid "No control or remote control directories defined in configuration" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:290 #, fuzzy, c-format msgid "Job submission user: %s (%i:%i)" msgstr "Jobbinsändningssammanfattning:" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:294 #, fuzzy msgid "Job plugin was not initialised" msgstr "DelegateProxy misslyckades" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:312 #, fuzzy msgid "No delegated credentials were passed" msgstr "Misslyckades med att förstöra kreditivhandtag: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:385 #, fuzzy, c-format msgid "Cancelling job %s" msgstr "Städar upp jobb: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:436 #, fuzzy, c-format msgid "Cleaning job %s" msgstr "Städar upp jobb: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:472 #, fuzzy msgid "Request to open file with storing in progress" msgstr "Förfrågan misslyckades: Inget svar" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:505 #: src/services/gridftpd/fileplugin/fileplugin.cpp:343 #, fuzzy, c-format msgid "Retrieving file %s" msgstr "cachefil: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:556 #, c-format msgid "Accepting submission of new job or modification request: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:577 #: src/services/gridftpd/fileplugin/fileplugin.cpp:383 #: src/services/gridftpd/fileplugin/fileplugin.cpp:420 #, fuzzy, c-format msgid "Storing file %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:599 #, fuzzy, c-format msgid "Unknown open mode %i" msgstr " attribut:" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:716 #, c-format msgid "action(%s) != request" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:767 #, fuzzy msgid "Failed writing job description" msgstr "%s: Misslyckades med att skriva lokal information." #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:954 #, fuzzy msgid "Failed writing local description" msgstr "%s: Misslyckades med att skriva lokal information." #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:964 #, fuzzy msgid "Failed writing ACL" msgstr "Misslyckades med att starta jobb" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:980 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:987 #: src/services/a-rex/job.cpp:587 #, fuzzy, c-format msgid "Failed to run external plugin: %s" msgstr "Misslyckades med att registrera plugin för tillstånd %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:991 #: src/services/a-rex/job.cpp:591 #, fuzzy, c-format msgid "Plugin response: %s" msgstr "Svar: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:993 #, fuzzy msgid "Failed to run external plugin" msgstr "Misslyckades med att registrera plugin för tillstånd %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1025 #, fuzzy, c-format msgid "Failed to create session directory %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1035 #, fuzzy msgid "Failed writing status" msgstr "Misslyckades med att lista metafiler" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1049 #, fuzzy, c-format msgid "Failed to lock delegated credentials: %s" msgstr "Misslyckades med att initiera delegering" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1300 #, fuzzy, c-format msgid "Renewing proxy for job %s" msgstr "Hämtar %s-jobb" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1312 #, c-format msgid "New proxy expires at %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1315 #, fuzzy msgid "Failed to write 'local' information" msgstr "%s: Misslyckades med att skriva lokal information." #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1321 #, fuzzy msgid "Failed to renew proxy" msgstr "Misslyckades med att städa upp efter jobb" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1324 msgid "New proxy expiry time is not later than old proxy, not renewing proxy" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1367 #, fuzzy, c-format msgid "Checking file %s" msgstr "Lägg till plats: metadata: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1415 msgid "ID contains forbidden characters" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1462 #: src/services/a-rex/job.cpp:781 #, c-format msgid "Failed to create file in %s" msgstr "Misslyckades med att skapa fil i %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1487 #, fuzzy msgid "Out of tries while allocating new job ID" msgstr "Slut på försök vid allokering av nytt jobb-id i %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1568 #, fuzzy, c-format msgid "Failed to read job's local description for job %s from %s" msgstr "Submit: Misslyckades med att sända jobbeskrivning" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1650 #, fuzzy msgid "No non-draining control or session directories available" msgstr "Ingen giltig plats tillgänglig" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1666 #, fuzzy, c-format msgid "Using control directory %s" msgstr "Misslyckades med att läsa kontrollkatalog: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1667 #, fuzzy, c-format msgid "Using session directory %s" msgstr "%s: Rensar kontroll- och sessionskataloger" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:26 #, fuzzy, c-format msgid "Failed to read job's ACL for job %s from %s" msgstr "Misslyckades med att läsa object: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:69 #, fuzzy, c-format msgid "Failed to parse user policy for job %s" msgstr "Misslyckades med att tolka användarpolicy för jobb %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:74 #, fuzzy, c-format msgid "Failed to load policy evaluator for policy of job %s" msgstr "Misslyckades med ladda utvärderare för användarpolicy för jobb %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:126 #, fuzzy, c-format msgid "Unknown ACL policy %s for job %s" msgstr "Hämtar %s-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:121 msgid "Exiting Generator thread" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:211 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:225 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:238 msgid "DTRGenerator is not running!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:214 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:314 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:324 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:333 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:344 #, fuzzy, c-format msgid "%s: Invalid DTR" msgstr "%s är inte en giltig URL" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:354 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:366 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:982 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:281 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:304 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:634 #, fuzzy, c-format msgid "%s: Failed reading local information" msgstr "%s: Misslyckades med att läsa lokal information." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:377 #, fuzzy, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "Varning: stängning av temporär låsfil %s misslyckades" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:383 #, c-format msgid "%s: Cancelling other DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:394 #, fuzzy, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "Varning: stängning av temporär låsfil %s misslyckades" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:404 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:645 #, fuzzy, c-format msgid "%s: Failed to read list of output files" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:418 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:555 #, fuzzy, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:420 #, c-format msgid "%s: Going through files in list %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:424 #, fuzzy, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "Tar bort jobb %s från jobblistfilen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:428 #, fuzzy, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:444 #, fuzzy, c-format msgid "%s: Failed to write list of output files" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:448 #, fuzzy, c-format msgid "%s: Failed to write list of output status files" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:460 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:656 #, fuzzy, c-format msgid "%s: Failed to read list of input files" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:479 #, fuzzy, c-format msgid "%s: Failed to write list of input files" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:491 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #: src/services/cache_service/CacheServiceGenerator.cpp:108 #, fuzzy, c-format msgid "No active job id %s" msgstr "Kan inte hitta jobb-id: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:543 #, fuzzy, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:567 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:578 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:692 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:817 #, fuzzy, c-format msgid "%s: Failed to clean up session dir" msgstr "%s: Misslyckades med att avbryta exekverande jobb." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:576 #, fuzzy, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:588 #, fuzzy, c-format msgid "%s: All %s %s successfully" msgstr "Stängdes OK" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:592 #, fuzzy, c-format msgid "%s: Some %s failed" msgstr "Några överföringar misslyckades" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:607 #, fuzzy, c-format msgid "%s: Received data staging request to %s files" msgstr "Misslyckades med att skriva begäran till en fil" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 #, fuzzy, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:714 #, fuzzy, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:716 #, fuzzy, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:744 #, fuzzy, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:763 #, fuzzy, c-format msgid "%s: Adding new output file %s: %s" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:785 #, fuzzy, c-format msgid "%s: Two identical output destinations: %s" msgstr "Misslyckades med att påbörja skrivning till destination: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:798 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:828 #, fuzzy, c-format msgid "%s: Received job in a bad state: %s" msgstr "JobID: %s tillstånd: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:901 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:988 #, fuzzy, c-format msgid "%s: Failed writing local information" msgstr "%s: Misslyckades med att skriva lokal information." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1005 #, c-format msgid "%s: Cancelling active DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1028 #, fuzzy, c-format msgid "%s: Can't read list of input files" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1043 #, fuzzy, c-format msgid "%s: Checking user uploadable file: %s" msgstr "Misslyckades med att ladda upp fil: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1048 #, fuzzy, c-format msgid "%s: User has uploaded file %s" msgstr "Misslyckades med att ladda upp fil: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1055 #, fuzzy, c-format msgid "%s: Failed writing changed input file." msgstr "Misslyckades med att ladda upp lokala indatafiler" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1059 #, fuzzy, c-format msgid "%s: Critical error for uploadable file %s" msgstr "Misslyckades med att ladda upp fil: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1076 #, c-format msgid "%s: Uploadable files timed out" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1132 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1158 #, fuzzy, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "Felaktigt format i fil %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1139 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, fuzzy, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "Felaktigt format i fil %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1148 #, fuzzy, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "Ogiltigt ISO-tidsperiodsformat: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1170 #, fuzzy, c-format msgid "%s: Invalid file: %s is too big." msgstr "Ogiltig URL: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1186 #, fuzzy, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "Misslyckades med att skriva begäran till en fil" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1192 #, fuzzy, c-format msgid "%s: Failed to open file %s for reading" msgstr "Misslyckades med att öppna fil %s för läsning: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1200 #, fuzzy, c-format msgid "%s: Error accessing file %s" msgstr "Tillträdesfel för cachefil %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1212 #, fuzzy, c-format msgid "%s: Error reading file %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1227 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1233 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1245 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1252 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:131 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:225 #, c-format msgid "Bad name for stdout: %s" msgstr "Felaktigt namn för stdout: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:233 #, c-format msgid "Bad name for stderr: %s" msgstr "Felaktigt namn för stderr: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:291 #, c-format msgid "Bad name for runtime environment: %s" msgstr "Felaktigt namn för runtime-miljö: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:336 #, fuzzy msgid "Job description file could not be read." msgstr "Jobstatus kunde inte inhämtas" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:387 #, fuzzy msgid "Bad name for executable: " msgstr "Felaktigt namn för stdout: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:401 #, fuzzy, c-format msgid "Bad name for executable: %s" msgstr "Felaktigt namn för stdout: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:107 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:169 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:171 #, fuzzy, c-format msgid "%s: %i" msgstr "%s: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:177 #, c-format msgid "%s: Destroying" msgstr "%s: Förstör" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:182 #, c-format msgid "%s: Can't read state - no comments, just cleaning" msgstr "%s: Kan inte läsa tillstånd - inga kommentarer, endast rensning" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:194 #, c-format msgid "%s: Cleaning control and session directories" msgstr "%s: Rensar kontroll- och sessionskataloger" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:200 #, c-format msgid "%s: This job may be still running - canceling" msgstr "%s: Detta job kan fortfarande vara exekverande - avbryter" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:203 #, c-format msgid "%s: Cancellation failed (probably job finished) - cleaning anyway" msgstr "" "%s: Avbrytande misslyckades (troligen är jobbet avslutat) - rensar ändå" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:210 #, c-format msgid "%s: Cancellation probably succeeded - cleaning" msgstr "%s: Avbrytande troligen lyckosamt - rensar" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:273 #, fuzzy, c-format msgid "%s: Failed writing list of output files: %s" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:315 #, fuzzy, c-format msgid "%s: Failed creating grami file" msgstr "%s: Misslyckades med att skapa grami-fil." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:319 #, fuzzy, c-format msgid "%s: Failed setting executable permissions" msgstr "%s: Misslyckades med att ange körbart tillträde." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, fuzzy, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "%s: tillstånd SUBMITTING: startar barnprocess: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:334 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "%s: tillstånd CANCELING: startar barnprocess: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:336 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:347 #, fuzzy, c-format msgid "%s: Failed running submission process" msgstr "%s: Misslyckades med att exekvera insändningsprocess." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, fuzzy, c-format msgid "%s: Failed running cancellation process" msgstr "%s: Misslyckades med att exekvera avbrytandeprocess." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:355 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:374 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:380 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:388 #, fuzzy, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "%s: Jobbinsändning till LRMS misslyckades." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:394 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, fuzzy, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "%s: tillstånd SUBMITTING: barnprocess avslutades med kod %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:409 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "%s: tillstånd CANCELING: barnprocess avslutades med kod %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:417 #, fuzzy, c-format msgid "%s: Job submission to LRMS failed" msgstr "%s: Jobbinsändning till LRMS misslyckades." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:420 #, fuzzy, c-format msgid "%s: Failed to cancel running job" msgstr "%s: Misslyckades med att avbryta exekverande jobb." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:434 #, fuzzy, c-format msgid "%s: Failed obtaining lrms id" msgstr "%s: Misslyckades med att erhålla LRMS-id." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:447 #, fuzzy, c-format msgid "%s: Failed writing local information: %s" msgstr "%s: Misslyckades med att skriva lokal information." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:457 #, fuzzy, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "%s: tillstånd CANCELING: startar barnprocess: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:463 #, fuzzy, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "%s: tillstånd CANCELING: startar barnprocess: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:531 #, fuzzy, c-format msgid "%s: State: %s: still in data staging" msgstr "%s: tillstånd: PREPARING: startar barnprocess: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, fuzzy, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "%s: Jobb har ej tillåtelse att omstartas längre." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:560 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "%s: Jobbet misslyckades i okänt tillstånd. Kommer ej att omstartas." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:581 #, fuzzy, c-format msgid "%s: Reprocessing job description failed" msgstr "%s: Processering av RSL misslyckades." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:588 #, fuzzy, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "%s: Misslyckades med att läsa omprocesserad lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:592 #, fuzzy, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "%s: Misslyckades med att läsa omprocesserad lista med indatafiler." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:676 #, fuzzy, c-format msgid "%s: Reading status of new job failed" msgstr "%s: Läsandet av det nya jobbets status misslyckades." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, fuzzy, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "%s: State: ACCEPTED: tolkar RSL" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:691 #, fuzzy, c-format msgid "%s: Processing job description failed" msgstr "%s: Processering av RSL misslyckades." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:708 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "%s: %s: Nytt jobb tillhör %i/%i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:718 #, fuzzy, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "Misslyckades med att inhämta statusinformation" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:732 #, c-format msgid "%s: State: ACCEPTED" msgstr "%s: Tillstånd: ACCEPTED" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:739 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "%s: Tillstånd: ACCEPTED: dryrun" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:751 #, fuzzy, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "%s: Tillstånd: ACCEPTED: har process-tid %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:757 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "%s: Tillstånd: ACCEPTED: flyttar till PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:774 #, c-format msgid "%s: State: PREPARING" msgstr "%s: Tillstånd: PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:778 #, fuzzy, c-format msgid "%s: Failed obtaining local job information." msgstr "%s: Misslyckades med att läsa lokal information." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:829 #, fuzzy, c-format msgid "%s: State: SUBMIT" msgstr "%s: Tillstånd: SUBMITTING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:844 #, c-format msgid "%s: State: CANCELING" msgstr "%s: Tillstånd: CANCELING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:860 #, c-format msgid "%s: State: INLRMS" msgstr "%s: Tillstånd: INLRMS" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:868 #, fuzzy, c-format msgid "%s: Job finished" msgstr "%s: Jobbet avslutat." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:872 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "%s: Tillstånd: INLRMS: avslutningsmeddelande är %i %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:893 #, c-format msgid "%s: State: FINISHING" msgstr "%s: Tillstånd: FINISHING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:922 #, fuzzy, c-format msgid "%s: Job is requested to clean - deleting" msgstr "%s: Jobbet har begärts rensat - raderar." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:961 #, fuzzy, c-format msgid "%s: Can't rerun on request" msgstr "%s: Kan inte omstarta på begäran - inget lämpligt tillstånd." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:963 #, fuzzy, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "%s: Kan inte omstarta på begäran - inget lämpligt tillstånd." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:973 #, fuzzy, c-format msgid "%s: Job is too old - deleting" msgstr "%s: Jobbet är för gammalt - raderar." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1012 #, fuzzy, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "%s: Jobbet är antikt - raderar resterande information." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1041 #, fuzzy, c-format msgid "%s: Canceling job because of user request" msgstr "%s: Avbryter jobb (%s) p.g.a. användarbegäran" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1107 #, fuzzy, c-format msgid "%s: Job failure detected" msgstr "%s: Jobbfel detekterat." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: %s from %s" msgstr "%s: Tillstånd: %s från %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1163 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1171 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "%s: Plugin vid tillstånd %s : %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1177 #, c-format msgid "%s: Plugin execution failed" msgstr "%s: Pluginexekvering misslyckades" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1202 #, c-format msgid "%s: Delete request due to internal problems" msgstr "%s: Radera begäran p.g.a. interna problem" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1284 #, fuzzy, c-format msgid "Failed to move file %s to %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1292 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1375 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1464 #, c-format msgid "Failed reading control directory: %s" msgstr "Misslyckades med att läsa kontrollkatalog: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1334 #, fuzzy, c-format msgid "Failed reading control directory: %s: %s" msgstr "Misslyckades med att läsa kontrollkatalog: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:153 #, fuzzy, c-format msgid "Invalid checksum in %s for %s" msgstr "Ogiltigt ISO-tidsperiodsformat: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:162 #, fuzzy, c-format msgid "Invalid file size in %s for %s " msgstr "Ogiltigt ISO-tidsperiodsformat: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:182 #, fuzzy, c-format msgid "Invalid file: %s is too big." msgstr "Ogiltig URL: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:196 #, fuzzy, c-format msgid "Error accessing file %s" msgstr "Tillträdesfel för cachefil %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:205 #, fuzzy, c-format msgid "Error reading file %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:216 #, c-format msgid "File %s has wrong CRC." msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:240 #, fuzzy, c-format msgid "Failed downloading file %s - %s" msgstr "Misslyckades med att ladda ner %s till %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:246 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:132 #, fuzzy msgid "Retrying" msgstr "sträng" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:249 #, fuzzy, c-format msgid "Downloaded file %s" msgstr "Filnedladdning misslyckades: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:330 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:243 #, fuzzy, c-format msgid "Wrong number of threads: %s" msgstr "Fel antal parametrar angivna" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:336 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:249 #, fuzzy, c-format msgid "Wrong number of files: %s" msgstr "Fel antal parametrar angivna" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:342 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:358 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:365 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:372 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:379 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:255 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:271 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:278 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:285 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:292 #, fuzzy, c-format msgid "Bad number: %s" msgstr "Dålig etikett: \"%s\"" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:346 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:352 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:259 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:265 #, fuzzy msgid "Specified user can't be handled" msgstr "URL:en är inte giltig: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:384 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:297 #, fuzzy, c-format msgid "Unsupported option: %c" msgstr "Icke understödd destinations-URL: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:388 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:301 #, fuzzy, c-format msgid "Missing parameter for option %c" msgstr "Använder cachade inställningar: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:392 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:305 #, fuzzy msgid "Undefined processing error" msgstr "Fatalt fel: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:399 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:312 #, fuzzy msgid "Missing job id" msgstr " Största antal väntade jobb: %i" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:401 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:314 #, fuzzy msgid "Missing control directory" msgstr "Misslyckades med att läsa kontrollkatalog: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:403 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:316 #, fuzzy msgid "Missing session directory" msgstr "%s: Rensar kontroll- och sessionskataloger" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:446 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:359 #, c-format msgid "Minimal speed: %llu B/s during %i s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:448 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:361 #, c-format msgid "Minimal average speed: %llu B/s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:450 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:363 #, c-format msgid "Maximal inactivity time: %i s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:455 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:368 msgid "Won't use more than 10 threads" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:462 #, fuzzy msgid "Downloader started" msgstr "Laddar ner jobb: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:488 #, fuzzy msgid "Can't read list of input files" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/services/a-rex/grid-manager/loaders/downloader.cpp:495 #, fuzzy, c-format msgid "Error: duplicate file in list of input files: %s" msgstr "%s: Misslyckades med att läsa lista med indatafiler." #: src/services/a-rex/grid-manager/loaders/downloader.cpp:518 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:402 #, fuzzy msgid "Can't read list of output files" msgstr "%s: Misslyckades med att läsa lista med utdatafiler." #: src/services/a-rex/grid-manager/loaders/downloader.cpp:523 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:449 #, fuzzy msgid "Can't remove junk files" msgstr "Kan inte läsa nyckelfil: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:531 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:463 #, fuzzy msgid "Can't read job local description" msgstr "Ogiltig jobbeskrivning:" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:553 #, fuzzy, c-format msgid "Local source for download: %s" msgstr "Inga platser funna för källa: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:567 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:571 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:512 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:518 #, fuzzy, c-format msgid "Can't accept URL: %s" msgstr "Kan inte göra stat på filen: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:586 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:535 #, fuzzy, c-format msgid "Failed to initiate file transfer: %s - %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:603 #, fuzzy, c-format msgid "Downloaded %s" msgstr "Laddade %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:628 #, fuzzy, c-format msgid "Failed to download (but may be retried) %s" msgstr "Filnedladdning misslyckades: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:633 #, fuzzy, c-format msgid "Failed to download %s" msgstr "Misslyckades med att ladda ner jobb %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:642 #, fuzzy msgid "Some downloads failed" msgstr "Filnedladdning misslyckades" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:646 #, fuzzy msgid "Some downloads failed, but may be retried" msgstr "Några överföringar misslyckades" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:650 #, fuzzy msgid "Failed writing changed input file" msgstr "Misslyckades med att ladda upp lokala indatafiler" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:662 #, fuzzy, c-format msgid "Checking user uploadable file: %s" msgstr "Misslyckades med att ladda upp fil: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:668 #, fuzzy, c-format msgid "User has uploaded file %s" msgstr "Misslyckades med att ladda upp fil: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:673 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:706 #, fuzzy msgid "Failed writing changed input file." msgstr "Misslyckades med att ladda upp lokala indatafiler" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:677 #, fuzzy, c-format msgid "Critical error for uploadable file %s" msgstr "Misslyckades med att ladda upp fil: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:694 #, c-format msgid "No changes in uploadable files for %u seconds" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:695 #, fuzzy msgid "Uploadable files timed out" msgstr "cachefil: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:761 #, fuzzy, c-format msgid "Leaving downloader (%i)" msgstr "Läser inställningsfil: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:126 #, fuzzy, c-format msgid "Failed uploading file %s - %s" msgstr "Misslyckades med att ladda upp fil: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:135 #, fuzzy, c-format msgid "Uploaded file %s" msgstr "cachefil: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:373 #, fuzzy msgid "Uploader started" msgstr "Laddar ner jobb: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:409 #, fuzzy, c-format msgid "Reading output files from user generated list in %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:411 #, fuzzy, c-format msgid "Error reading user generated output file list in %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:424 #, fuzzy, c-format msgid "Two identical output destinations: %s" msgstr "Misslyckades med att påbörja skrivning till destination: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:433 #, c-format msgid "Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:496 #, fuzzy, c-format msgid "Local destination for uploader %s" msgstr "Inga platser funna för destination: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:556 #, fuzzy, c-format msgid "Uploaded %s" msgstr "Laddade %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:580 #, fuzzy msgid "Failed writing output status file" msgstr "Misslyckades med att initiera handtag" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:587 #, fuzzy, c-format msgid "Failed to upload (but may be retried) %s" msgstr "Filnedladdning misslyckades: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:591 #, fuzzy, c-format msgid "Failed to upload %s" msgstr "Misslyckades med att ta bort %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:600 #, fuzzy msgid "Some uploads failed" msgstr "Några överföringar misslyckades" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:616 #, fuzzy, c-format msgid "Writing back dynamic output file %s" msgstr "Misslyckades med att initiera handtag" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:618 #, c-format msgid "Failed to rewrite output file list %s. Job resuming may not work" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:630 #, fuzzy msgid "Some uploads failed, but (some) may be retried" msgstr "Några överföringar misslyckades" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:637 #, fuzzy msgid "Failed writing changed output file" msgstr "Misslyckades med att initiera handtag" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:649 #, c-format msgid "Leaving uploader (%i)" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:118 #, fuzzy msgid ": Logger name is not specified" msgstr "Cacheinställningar: %s" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 #, fuzzy msgid ": Failure creating slot for reporter child process" msgstr "%s: Misslyckades med skapa slot för barnprocess." #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 #, fuzzy msgid ": Failure starting reporter child process" msgstr "%s: Misslyckande vid start av barnprocess." #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:130 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 #, fuzzy msgid "Failed reading local information" msgstr "Misslyckades med att läsa lokal information." #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, fuzzy, c-format msgid "Running mailer command (%s)" msgstr "skräp i maxrerun-kommando" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 #, fuzzy msgid "Failed running mailer" msgstr "Misslyckades med att exekvera e-postsändare." #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:55 #, fuzzy, c-format msgid "%s: Failure creating slot for child process" msgstr "%s: Misslyckades med skapa slot för barnprocess." #: src/services/a-rex/grid-manager/run/RunParallel.cpp:79 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:61 #, fuzzy, c-format msgid "%s: Failure creating data storage for child process" msgstr "%s: Misslyckades med att skapa datalagring för barnprocess." #: src/services/a-rex/grid-manager/run/RunParallel.cpp:123 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:72 #, fuzzy, c-format msgid "%s: Failure starting child process" msgstr "%s: Misslyckande vid start av barnprocess." #: src/services/a-rex/grid-manager/run/RunParallel.cpp:140 #, c-format msgid "%s: Failed to run plugin" msgstr "%s: Misslyckades med att exekvera plugin" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:143 #, c-format msgid "%s: Plugin failed" msgstr "%s: Plugin misslyckades" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:77 #, fuzzy, c-format msgid "%s: Failure waiting for child process to finish" msgstr "%s: Misslyckande vid väntan på att barnprocess skall avslutas." #: src/services/a-rex/information_collector.cpp:45 #, fuzzy, c-format msgid "Resource information provider: %s" msgstr "Klusterinformationstillhandahållare: %s" #: src/services/a-rex/information_collector.cpp:51 #, fuzzy msgid "Resource information provider failed" msgstr "Klusterinformationstillhandahållare: %s" #: src/services/a-rex/information_collector.cpp:55 #, fuzzy, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "Klusterinformationstillhandahållarresultat: %i" #: src/services/a-rex/information_collector.cpp:57 #, fuzzy, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "Klusterinformationstillhandahållare: %s" #: src/services/a-rex/information_collector.cpp:61 #, fuzzy msgid "No new informational document assigned" msgstr "Tilldelat nytt informationsdokument" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "Obtained XML: %s" msgstr "Erhållen XML: %s" #: src/services/a-rex/information_collector.cpp:77 #, fuzzy msgid "Informational document is empty" msgstr "Tilldelat nytt informationsdokument" #: src/services/a-rex/information_collector.cpp:88 msgid "Passing service's information from collector to registrator" msgstr "Skickar vidare servicens information från insamlare till registrator" #: src/services/a-rex/information_collector.cpp:144 #, fuzzy, c-format msgid "" "Registered static information: \n" " doc: %s" msgstr "Jobbtillståndsinformation ej funnen: %s" #: src/services/a-rex/information_collector.cpp:147 #, c-format msgid "" "Information Registered without static attributes: \n" " doc: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:324 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:327 #, fuzzy, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/services/a-rex/information_collector.cpp:333 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:342 msgid "OptimizedInformationContainer failed to parse XML" msgstr "" #: src/services/a-rex/information_collector.cpp:353 #, fuzzy msgid "OptimizedInformationContainer failed to rename temprary file" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/services/a-rex/job.cpp:53 #, fuzzy, c-format msgid "Cannot handle local user %s" msgstr "Kan inte ändra ägare för %s" #: src/services/a-rex/job.cpp:101 #, fuzzy, c-format msgid "%s: Failed to parse user policy" msgstr "Misslyckades med att tolka användarpolicy för jobb %s" #: src/services/a-rex/job.cpp:106 #, fuzzy, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "Misslyckades med ladda utvärderare för användarpolicy för jobb %s" #: src/services/a-rex/job.cpp:211 #, fuzzy, c-format msgid "%s: Unknown user policy '%s'" msgstr "Okänd användarpolicy '%s' för jobb %s" #: src/services/a-rex/job.cpp:473 src/services/a-rex/job.cpp:497 #, fuzzy, c-format msgid "Credential expires at %s" msgstr "Avvisar service: %s" #: src/services/a-rex/job.cpp:475 src/services/a-rex/job.cpp:499 #, fuzzy, c-format msgid "Credential handling exception: %s" msgstr "Avvisar service: %s" #: src/services/a-rex/job.cpp:789 #, fuzzy, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "Slut på försök vid allokering av nytt jobb-id i %s" #: src/services/a-rex/job.cpp:1006 #, fuzzy msgid "No non-draining session dirs available" msgstr "Ingen giltig plats tillgänglig" #: src/services/a-rex/jura/ApelDestination.cpp:89 #: src/services/a-rex/jura/CARDestination.cpp:49 #: src/services/a-rex/jura/LutsDestination.cpp:71 #, fuzzy msgid "ServiceURL missing" msgstr "Peer-namn: %s" #: src/services/a-rex/jura/ApelDestination.cpp:97 #: src/services/a-rex/jura/CARDestination.cpp:56 #: src/services/a-rex/jura/LutsDestination.cpp:89 #, c-format msgid "Protocol is %s, should be https" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:133 #: src/services/a-rex/jura/ApelDestination.cpp:158 #: src/services/a-rex/jura/CARDestination.cpp:95 #: src/services/a-rex/jura/LutsDestination.cpp:120 #: src/services/a-rex/jura/LutsDestination.cpp:144 #, c-format msgid "Ignoring incomplete log file \"%s\"" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:182 #: src/services/a-rex/jura/CARDestination.cpp:119 #: src/services/a-rex/jura/LutsDestination.cpp:166 #, c-format msgid "Logging UR set of %d URs." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:185 #: src/services/a-rex/jura/CARDestination.cpp:122 #: src/services/a-rex/jura/Destination.cpp:61 #: src/services/a-rex/jura/LutsDestination.cpp:169 #, fuzzy, c-format msgid "UR set dump: %s" msgstr " bas-dn: %s" #: src/services/a-rex/jura/ApelDestination.cpp:271 #: src/services/a-rex/jura/Destination.cpp:96 #, c-format msgid "Backup file (%s) created." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:316 #, c-format msgid "APEL message file (%s) created." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:370 #: src/services/a-rex/jura/CARAggregation.cpp:208 #, c-format msgid "system retval: %d" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:73 #, c-format msgid "Aggregation record (%s) not exist, initialize it..." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:77 #, c-format msgid "Aggregation record (%s) initialization successful." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:82 #, c-format msgid "Some error happens during the Aggregation record (%s) initialization." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:88 #, fuzzy, c-format msgid "Aggregation record (%s) read from file successful." msgstr "Stängdes OK" #: src/services/a-rex/jura/CARAggregation.cpp:100 #, fuzzy, c-format msgid "Aggregation record (%s) stored successful." msgstr "Stängdes OK" #: src/services/a-rex/jura/CARAggregation.cpp:103 #, c-format msgid "Some error happens during the Aggregation record (%s) storing." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:156 #, c-format msgid "APEL aggregation message file (%s) created." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:228 #, fuzzy, c-format msgid "year: %s" msgstr "del: %s" #: src/services/a-rex/jura/CARAggregation.cpp:229 #, fuzzy, c-format msgid "moth: %s" msgstr "Jobb: %s" #: src/services/a-rex/jura/CARAggregation.cpp:230 #, fuzzy, c-format msgid "queue: %s" msgstr " Kö: %s" #: src/services/a-rex/jura/CARAggregation.cpp:238 #: src/services/a-rex/jura/CARAggregation.cpp:404 #, fuzzy, c-format msgid "query: %s" msgstr "Förfrågan: %s" #: src/services/a-rex/jura/CARAggregation.cpp:241 #, fuzzy, c-format msgid "list size: %d" msgstr "Svar: %s" #: src/services/a-rex/jura/CARAggregation.cpp:359 #, fuzzy, c-format msgid "XML: %s" msgstr "%s: %s" #: src/services/a-rex/jura/CARAggregation.cpp:361 msgid "UPDATE Aggregation Record called." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:415 #: src/services/a-rex/jura/CARAggregation.cpp:465 msgid "Does not sending empty aggregation/synch message." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:548 #, fuzzy, c-format msgid "synch message: %s" msgstr "Källan skapades: %s" #: src/services/a-rex/jura/Destination.cpp:123 #, c-format msgid "Sent jobIDs: (nr. of job(s) %d)" msgstr "" #: src/services/a-rex/jura/Destinations.cpp:27 msgid "Unable to create adapter for the specific reporting destination type" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:92 #, c-format msgid "Insert filter element: <%s,%s>" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:105 #, c-format msgid "Not set filter for this URL (%s)." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:111 #, c-format msgid "Current job's VO name: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:114 #, fuzzy, c-format msgid "VO filter for host: %s" msgstr " filter: %s" #: src/services/a-rex/jura/JobLogFile.cpp:204 #: src/services/a-rex/jura/JobLogFile.cpp:698 #, fuzzy, c-format msgid "Read archive file %s" msgstr "cachefil: %s" #: src/services/a-rex/jura/JobLogFile.cpp:209 #: src/services/a-rex/jura/JobLogFile.cpp:703 #, c-format msgid "" "Could not read archive file %s for job log file %s (%s), generating new " "Usage Record" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:298 #: src/services/a-rex/jura/JobLogFile.cpp:827 #, c-format msgid "" "Missing required Usage Record element \"RecordIdentity\", in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:345 #, c-format msgid "VO (%s) not set for this (%s) SGAS server by VO filter." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:378 #, c-format msgid "[VO filter] Job log will be not send. %s." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:454 #: src/services/a-rex/jura/JobLogFile.cpp:970 #, c-format msgid "Missing required element \"Status\" in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:663 #: src/services/a-rex/jura/JobLogFile.cpp:1280 #, fuzzy, c-format msgid "Failed to create archive directory %s: %s" msgstr "Misslyckades med att skapa/hitta katalog %s : %s\"" #: src/services/a-rex/jura/JobLogFile.cpp:670 #: src/services/a-rex/jura/JobLogFile.cpp:1287 #, fuzzy, c-format msgid "Archiving Usage Record to file %s" msgstr "Använder jobblistfil: %s" #: src/services/a-rex/jura/JobLogFile.cpp:676 #: src/services/a-rex/jura/JobLogFile.cpp:1293 #, c-format msgid "Failed to write file %s: %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/services/a-rex/jura/JobLogFile.cpp:1062 #, c-format msgid "Missing required element \"CpuDuration\" in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1082 #, c-format msgid "Set non standard bechmark type: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1095 #, c-format msgid "Ignored incoming benchmark value: %s, Use float value!" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1324 #, fuzzy, c-format msgid "Failed to delete file %s:%s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/services/a-rex/jura/LutsDestination.cpp:223 #, fuzzy, c-format msgid "UsageRecords registration response: %s" msgstr "Registrering för service: %s" #: src/services/a-rex/jura/ReReporter.cpp:53 #, fuzzy, c-format msgid "Initialised, archived job log dir: %s" msgstr "Avslutat jobb %s" #: src/services/a-rex/jura/ReReporter.cpp:73 #, fuzzy, c-format msgid "Incoming time range: %s" msgstr "Använder nyckelfil: %s" #: src/services/a-rex/jura/ReReporter.cpp:92 #, c-format msgid "Requested time range: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d " msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:98 #: src/services/a-rex/jura/UsageReporter.cpp:45 #, fuzzy msgid "Interactive mode." msgstr " Gränssnittsnamn: %s" #: src/services/a-rex/jura/ReReporter.cpp:127 #: src/services/a-rex/jura/UsageReporter.cpp:68 #, fuzzy, c-format msgid "Could not open log directory \"%s\": %s" msgstr "Kunde inte öppna LDAP-förbindelse till %s" #: src/services/a-rex/jura/ReReporter.cpp:167 #: src/services/a-rex/jura/UsageReporter.cpp:193 #, fuzzy, c-format msgid "Error reading log directory \"%s\": %s" msgstr "Fel vid läsning av låsfil %s: %s" #: src/services/a-rex/jura/ReReporter.cpp:179 #: src/services/a-rex/jura/UsageReporter.cpp:205 #, fuzzy, c-format msgid "Finished, job log dir: %s" msgstr "Avslutat jobb %s" #: src/services/a-rex/jura/UsageReporter.cpp:39 #, c-format msgid "Initialised, job log dir: %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:41 #, c-format msgid "Expiration time: %d seconds" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:80 #, fuzzy, c-format msgid "Could not open output directory \"%s\": %s" msgstr "Kunde inte öppna LDAP-förbindelse till %s" #: src/services/a-rex/jura/UsageReporter.cpp:125 #, fuzzy, c-format msgid "Removing outdated job log file %s" msgstr "Tar bort metadata i %s" #: src/services/a-rex/migrate_activity.cpp:37 #, fuzzy, c-format msgid "" "MigrateActivity: request = \n" "%s" msgstr "" "CreateActivity: begäran = \n" "%s" #: src/services/a-rex/migrate_activity.cpp:42 #, fuzzy msgid "MigrateActivitys: no ActivityIdentifier found" msgstr "ChangeActivityStatus: ingen ActivityIdentifier funnen" #: src/services/a-rex/migrate_activity.cpp:51 #, fuzzy msgid "MigrateActivity: EPR contains no JobID" msgstr "ChangeActivityStatus: EPR innehåller ingen JobID" #: src/services/a-rex/migrate_activity.cpp:69 #, fuzzy msgid "MigrateActivity: Failed to accept delegation" msgstr "CreateActivity: Misslyckades med att acceptera delegering" #: src/services/a-rex/migrate_activity.cpp:130 #, fuzzy msgid "MigrateActivity: no job description found" msgstr "CreateActivity: ingen jobbeskrivning funnen" #: src/services/a-rex/migrate_activity.cpp:153 #, fuzzy, c-format msgid "Migration XML sent to AREXJob: %s" msgstr "Inställningar: LRMS: %s" #: src/services/a-rex/migrate_activity.cpp:180 #, fuzzy, c-format msgid "MigrateActivity: Failed to migrate new job: %s" msgstr "CreateActivity: Misslyckades med att skapa nytt jobb: %s" #: src/services/a-rex/migrate_activity.cpp:182 #, fuzzy msgid "MigrateActivity: Failed to migrate new job" msgstr "CreateActivity: Misslyckades med att skapa nytt jobb" #: src/services/a-rex/migrate_activity.cpp:198 #, fuzzy msgid "MigrateActivity finished successfully" msgstr "CreateActivity avslutades OK" #: src/services/a-rex/migrate_activity.cpp:202 #, fuzzy, c-format msgid "" "MigrateActivity: response = \n" "%s" msgstr "" "CreateActivity: svar = \n" "%s" #: src/services/a-rex/put.cpp:37 #, c-format msgid "Put: there is no job: %s - %s" msgstr "Put: det finns inget jobb: %s - %s" #: src/services/a-rex/put.cpp:43 #, fuzzy, c-format msgid "Put: there is no payload for file %s in job: %s" msgstr "Put: det finns inget jobb: %s - %s" #: src/services/a-rex/put.cpp:56 #, c-format msgid "Put: unrecognized payload for file %s in job: %s" msgstr "" #: src/services/a-rex/put.cpp:76 src/services/a-rex/put.cpp:130 #, fuzzy, c-format msgid "Put: failed to create file %s for job %s - %s" msgstr "Misslyckades med att skapa fil %s för skrivning: %s" #: src/services/a-rex/put.cpp:85 #, fuzzy, c-format msgid "Put: failed to set position of file %s for job %s to %Lu - %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/services/a-rex/put.cpp:91 #, fuzzy, c-format msgid "Put: failed to allocate memory for file %s in job %s" msgstr "Misslyckades med att skapa infofil %s: %s" #: src/services/a-rex/put.cpp:103 #, fuzzy, c-format msgid "Put: failed to write to file %s for job %s - %s" msgstr "Misslyckades med att skriva fil %s: %s" #: src/services/a-rex/terminate_activities.cpp:29 #, c-format msgid "" "TerminateActivities: request = \n" "%s" msgstr "" "TerminateActivities: begäran = \n" "%s" #: src/services/a-rex/terminate_activities.cpp:40 #, fuzzy msgid "TerminateActivities: non-AREX job requested" msgstr "TerminateActivities: icke-ARex-job begärt" #: src/services/a-rex/terminate_activities.cpp:49 #, c-format msgid "TerminateActivities: job %s - %s" msgstr "TerminateActivities: jobb %s - %s" #: src/services/a-rex/terminate_activities.cpp:69 #, c-format msgid "" "TerminateActivities: response = \n" "%s" msgstr "" "TerminateActivities: svar = \n" "%s" #: src/services/a-rex/test.cpp:34 src/tests/count/test_service.cpp:25 #: src/tests/echo/test.cpp:23 src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "Skapar servicesidokedjan" #: src/services/a-rex/test.cpp:37 src/tests/count/test_service.cpp:28 #: src/tests/echo/test.cpp:26 src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "Misslyckades med att ladda serviceinställningar" #: src/services/a-rex/test.cpp:43 src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:23 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "Skapar klientsidokedjan" #: src/services/a-rex/test.cpp:49 src/tests/count/test_client.cpp:53 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "Misslyckades med att ladda klientinställningar" #: src/services/a-rex/test.cpp:53 src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "Klientsidans MCC:er har laddats" #: src/services/a-rex/test.cpp:56 src/tests/count/test_client.cpp:60 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "Klientkedjan har ingen ingångspunkt" #: src/services/a-rex/test.cpp:112 src/services/a-rex/test.cpp:191 #: src/services/a-rex/test.cpp:248 src/services/a-rex/test.cpp:296 #: src/services/a-rex/test.cpp:344 src/services/a-rex/test.cpp:392 #: src/tests/count/test_client.cpp:87 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "Förfrågan misslyckades" #: src/services/a-rex/test.cpp:115 src/services/a-rex/test.cpp:194 #: src/services/a-rex/test.cpp:251 src/services/a-rex/test.cpp:299 #: src/services/a-rex/test.cpp:347 src/services/a-rex/test.cpp:395 #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "Förfrågan lyckades!!!" #: src/services/a-rex/test.cpp:117 src/services/a-rex/test.cpp:196 #: src/services/a-rex/test.cpp:253 src/services/a-rex/test.cpp:301 #: src/services/a-rex/test.cpp:349 src/services/a-rex/test.cpp:397 #: src/tests/count/test_client.cpp:93 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "Det finns inget svar" #: src/services/a-rex/test.cpp:125 src/services/a-rex/test.cpp:204 #: src/services/a-rex/test.cpp:261 src/services/a-rex/test.cpp:309 #: src/services/a-rex/test.cpp:357 src/services/a-rex/test.cpp:405 #: src/tests/count/test_client.cpp:100 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "Svaret är inte SOAP" #: src/services/a-rex/test.cpp:136 msgid "Response is not expected WS-RP" msgstr "Svaret är inte förväntad WS-RP" #: src/services/a-rex/update_credentials.cpp:29 #, fuzzy, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" "CreateActivity: begäran = \n" "%s" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:60 #, fuzzy msgid "UpdateCredentials: EPR contains no JobID" msgstr "ChangeActivityStatus: EPR innehåller ingen JobID" #: src/services/a-rex/update_credentials.cpp:70 #, fuzzy, c-format msgid "UpdateCredentials: no job found: %s" msgstr "ChangeActivityStatus: inget jobb funnet: %s" #: src/services/a-rex/update_credentials.cpp:77 #, fuzzy msgid "UpdateCredentials: failed to update credentials" msgstr "ChangeActivityStatus: gammalt BES-tillstånd matchar inte" #: src/services/a-rex/update_credentials.cpp:85 #, fuzzy, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" "CreateActivity: svar = \n" "%s" #: src/services/cache_service/CacheService.cpp:52 #, fuzzy msgid "No A-REX config file found in cache service configuration" msgstr "Misslyckades med att ladda serviceinställningar" #: src/services/cache_service/CacheService.cpp:56 #, fuzzy, c-format msgid "Using A-REX config file %s" msgstr "Använder jobblistfil: %s" #: src/services/cache_service/CacheService.cpp:60 #, fuzzy, c-format msgid "Failed to process A-REX configuration in %s" msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/services/cache_service/CacheService.cpp:65 #, fuzzy msgid "No caches defined in configuration" msgstr "Cacherot saknas i inställningar" #: src/services/cache_service/CacheService.cpp:139 msgid "Empty filename returned from FileCache" msgstr "" #: src/services/cache_service/CacheService.cpp:151 #, fuzzy, c-format msgid "Problem accessing cache file %s: %s" msgstr "Tillträdesfel för cachefil %s: %s" #: src/services/cache_service/CacheService.cpp:200 #: src/services/cache_service/CacheService.cpp:472 #, fuzzy msgid "No job ID supplied" msgstr "Inget svar" #: src/services/cache_service/CacheService.cpp:209 #, fuzzy, c-format msgid "Bad number in priority element: %s" msgstr "felaktigt nummer i maxjobs" #: src/services/cache_service/CacheService.cpp:218 msgid "No username supplied" msgstr "" #: src/services/cache_service/CacheService.cpp:225 #, fuzzy, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "Den privata nyckeln passar inte certifikatet" #: src/services/cache_service/CacheService.cpp:239 #, fuzzy msgid "No session directory found" msgstr "%s: Rensar kontroll- och sessionskataloger" #: src/services/cache_service/CacheService.cpp:243 #, fuzzy, c-format msgid "Using session dir %s" msgstr "%s: Rensar kontroll- och sessionskataloger" #: src/services/cache_service/CacheService.cpp:247 #, fuzzy, c-format msgid "Failed to stat session dir %s" msgstr "Misslyckades med att ändra ägare av sessionskatalog till %i: %s" #: src/services/cache_service/CacheService.cpp:252 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "" #: src/services/cache_service/CacheService.cpp:279 #, fuzzy, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/services/cache_service/CacheService.cpp:297 #, fuzzy, c-format msgid "DN is %s" msgstr "Misslyckades med att läsa fil %s: %s" #: src/services/cache_service/CacheService.cpp:373 #, fuzzy, c-format msgid "Permission checking passed for url %s" msgstr "Tillträdeskontroll OK" #: src/services/cache_service/CacheService.cpp:398 #: src/services/cache_service/CacheServiceGenerator.cpp:138 #, fuzzy, c-format msgid "Failed to move %s to %s: %s" msgstr "Misslyckades med att ta bort hård länk %s: %s" #: src/services/cache_service/CacheService.cpp:437 #, c-format msgid "Starting new DTR for %s" msgstr "" #: src/services/cache_service/CacheService.cpp:439 #, fuzzy, c-format msgid "Failed to start new DTR for %s" msgstr "Misslyckades med att starta ny tråd" #: src/services/cache_service/CacheService.cpp:486 #, fuzzy, c-format msgid "Job %s: all files downloaded successfully" msgstr "Stängdes OK" #: src/services/cache_service/CacheService.cpp:495 #, fuzzy, c-format msgid "Job %s: Some downloads failed" msgstr "Filnedladdning misslyckades" #: src/services/cache_service/CacheService.cpp:501 #, c-format msgid "Job %s: files still downloading" msgstr "" #: src/services/cache_service/CacheService.cpp:514 #, fuzzy msgid "CacheService: Unauthorized" msgstr "echo: Oautoriserad" #: src/services/cache_service/CacheService.cpp:523 msgid "No local user mapping found" msgstr "" #: src/services/cache_service/CacheService.cpp:530 #: src/services/data-staging/DataDeliveryService.cpp:631 #, fuzzy, c-format msgid "Identity is %s" msgstr "Identitetsnamn: %s" #: src/services/cache_service/CacheService.cpp:595 msgid "Only POST is supported in CacheService" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:88 #, fuzzy, c-format msgid "DTR %s finished with state %s" msgstr "%s ny status: %s" #: src/services/cache_service/CacheServiceGenerator.cpp:127 #, fuzzy, c-format msgid "Could not determine session directory from filename %s" msgstr "Kunde inte öppna LDAP-förbindelse till %s" #: src/services/cache_service/CacheServiceGenerator.cpp:168 #, fuzzy, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "Misslyckades med att slå upp destination: %s" #: src/services/cache_service/CacheServiceGenerator.cpp:210 #, fuzzy, c-format msgid "DTRs still running for job %s" msgstr "Hämtar %s-jobb" #: src/services/cache_service/CacheServiceGenerator.cpp:219 #, fuzzy, c-format msgid "All DTRs finished for job %s" msgstr "Avslutat jobb %s" #: src/services/cache_service/CacheServiceGenerator.cpp:226 #, fuzzy, c-format msgid "Job %s not found" msgstr "Jobb ej funnet i jobblista: %s" #: src/services/data-staging/DataDeliveryService.cpp:58 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:62 #, fuzzy, c-format msgid "Archiving DTR %s, state %s" msgstr "Använder jobblistfil: %s" #: src/services/data-staging/DataDeliveryService.cpp:166 #, fuzzy msgid "No delegation token in request" msgstr "DelegateProxy misslyckades" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "Failed to accept delegation" msgstr "Misslyckades med att acceptera delegering" #: src/services/data-staging/DataDeliveryService.cpp:203 #: src/services/data-staging/DataDeliveryService.cpp:210 #, fuzzy msgid "ErrorDescription" msgstr "Ogiltig jobbeskrivning:" #: src/services/data-staging/DataDeliveryService.cpp:215 #, c-format msgid "All %u process slots used" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:230 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:237 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:248 #, fuzzy, c-format msgid "Storing temp proxy at %s" msgstr "Startar hjälpprocess (%s): %s" #: src/services/data-staging/DataDeliveryService.cpp:256 #, fuzzy, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "Misslyckades med att skapa fil i %s" #: src/services/data-staging/DataDeliveryService.cpp:263 #, fuzzy, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "Misslyckades med att ändra ägare av sessionskatalog till %i: %s" #: src/services/data-staging/DataDeliveryService.cpp:289 #, fuzzy msgid "Invalid DTR" msgstr "Ogiltig URL: %s" #: src/services/data-staging/DataDeliveryService.cpp:294 #, fuzzy, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "Misslyckades med att ta bort hård länk %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:395 #, fuzzy, c-format msgid "No such DTR %s" msgstr "Ingen sådan användare: %s" #: src/services/data-staging/DataDeliveryService.cpp:406 #, fuzzy, c-format msgid "DTR %s failed: %s" msgstr "DCAU misslyckades: %s" #: src/services/data-staging/DataDeliveryService.cpp:417 #, fuzzy, c-format msgid "DTR %s finished successfully" msgstr "FillJobStore har avslutats OK" #: src/services/data-staging/DataDeliveryService.cpp:427 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:488 #, c-format msgid "No active DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:498 #, fuzzy, c-format msgid "DTR %s was already cancelled" msgstr "Denna instans har redan tagits bort" #: src/services/data-staging/DataDeliveryService.cpp:507 #, fuzzy, c-format msgid "DTR %s could not be cancelled" msgstr "Service %s(%s) kunde inte skapas" #: src/services/data-staging/DataDeliveryService.cpp:551 #, fuzzy, c-format msgid "Failed to get load average: %s" msgstr "Misslyckades med att erhålla lokal adress för port %s - %s" #: src/services/data-staging/DataDeliveryService.cpp:575 msgid "Invalid configuration - no allowed IP address specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:579 #, fuzzy msgid "Invalid configuration - no allowed dirs specified" msgstr "Cacheinställningar: %s" #: src/services/data-staging/DataDeliveryService.cpp:590 #, fuzzy msgid "Failed to start archival thread" msgstr "Misslyckades med att starta ny tråd" #: src/services/data-staging/DataDeliveryService.cpp:615 msgid "Shutting down data delivery service" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:624 #, fuzzy msgid "Unauthorized" msgstr "echo: Oautoriserad" #: src/services/data-staging/DataDeliveryService.cpp:710 msgid "Only POST is supported in DataDeliveryService" msgstr "" #: src/services/examples/echo_python/EchoService.py:12 #, fuzzy msgid "EchoService (python) constructor called" msgstr "Python-wrapper-konstruktor anropad" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:24 #, fuzzy msgid "EchoService (python) destructor called" msgstr "Python-wrapper-konstruktor anropad" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:91 #, fuzzy msgid "EchoService (python) 'Process' called" msgstr "Python-wrapper-konstruktor anropad" #: src/services/examples/echo_python/EchoService.py:95 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:96 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:97 #, fuzzy, python-format msgid "EchoService (python) got: %s " msgstr "Python-wrapper-konstruktor anropad" #: src/services/examples/echo_python/EchoService.py:102 #, fuzzy, python-format msgid "EchoService (python) request_namespace: %s" msgstr "Python-wrapper-konstruktor anropad" #: src/services/examples/echo_python/EchoService.py:108 #: src/services/examples/echo_python/EchoService.py:177 #, fuzzy, python-format msgid "outpayload %s" msgstr "Laddade %s" #: src/services/examples/echo_python/EchoService.py:137 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:140 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:146 #: src/services/examples/echo_python/EchoService.py:161 #, fuzzy, python-format msgid "new_payload %s" msgstr "Laddade %s" #: src/services/examples/echo_python/EchoService.py:155 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "" #: src/services/examples/echo_python/EchoService.py:171 msgid "Start waiting 10 sec..." msgstr "" #: src/services/examples/echo_python/EchoService.py:173 #, fuzzy msgid "Waiting ends." msgstr "Väntar på svar" #: src/services/gridftpd/auth/auth.cpp:312 #, fuzzy, c-format msgid "Unknown authorization command %s" msgstr "Delegeringsauktorisering lyckades" #: src/services/gridftpd/auth/auth.cpp:330 #, c-format msgid "" "The [vo] section labeled '%s' has no file associated and can't be used for " "matching" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:56 #, fuzzy, c-format msgid "Connecting to %s:%i" msgstr "Konvertering misslyckades: %s" #: src/services/gridftpd/auth/auth_ldap.cpp:57 #, fuzzy, c-format msgid "Querying at %s" msgstr "Förfrågan: %s" #: src/services/gridftpd/auth/auth_ldap.cpp:62 #, fuzzy, c-format msgid "Failed to query LDAP server %s" msgstr "Misslyckades med att koppla upp mot RLS-servern: %s" #: src/services/gridftpd/auth/auth_ldap.cpp:69 #, fuzzy, c-format msgid "Failed to get results from LDAP server %s" msgstr "Misslyckades med att koppla upp mot RLS-servern: %s" #: src/services/gridftpd/auth/auth_ldap.cpp:83 #, fuzzy msgid "LDAP authorization is not supported" msgstr "SOAP-process understöds inte: %s" #: src/services/gridftpd/auth/auth_plugin.cpp:73 #: src/services/gridftpd/auth/unixmap.cpp:254 #, fuzzy, c-format msgid "Plugin %s failed to run" msgstr "%s: Plugin misslyckades" #: src/services/gridftpd/auth/auth_plugin.cpp:75 #: src/services/gridftpd/auth/unixmap.cpp:256 #, fuzzy, c-format msgid "Plugin %s printed: %u" msgstr "Svar: %s" #: src/services/gridftpd/auth/auth_plugin.cpp:76 #: src/services/gridftpd/auth/unixmap.cpp:257 #, fuzzy, c-format msgid "Plugin %s error: %u" msgstr "Globusfel: %s" #: src/services/gridftpd/auth/auth_voms.cpp:28 #, c-format msgid "VOMS proxy processing returns: %i - %s" msgstr "" #: src/services/gridftpd/auth/auth_voms.cpp:120 #, fuzzy, c-format msgid "VOMS trust chains: %s" msgstr "%s version %s" #: src/services/gridftpd/commands.cpp:46 #, fuzzy, c-format msgid "response: %s" msgstr "Svar: %s" #: src/services/gridftpd/commands.cpp:50 #, fuzzy, c-format msgid "Send response failed: %s" msgstr "Källan skapades: %s" #: src/services/gridftpd/commands.cpp:80 msgid "Response sending error" msgstr "" #: src/services/gridftpd/commands.cpp:93 #, fuzzy msgid "Closed connection" msgstr "Stänger förbindelse" #: src/services/gridftpd/commands.cpp:131 #, fuzzy, c-format msgid "Socket conversion failed: %s" msgstr "Källan skapades: %s" #: src/services/gridftpd/commands.cpp:141 #, fuzzy, c-format msgid "Failed to obtain own address: %s" msgstr "Misslyckades med att erhålla lokal adress för port %s - %s" #: src/services/gridftpd/commands.cpp:149 #, c-format msgid "Failed to recognize own address type (IPv4 ir IPv6) - %u" msgstr "" #: src/services/gridftpd/commands.cpp:159 #, fuzzy, c-format msgid "Accepted connection on [%s]:%u" msgstr "Misslyckades med att etablera förbindelse till %s:%i" #: src/services/gridftpd/commands.cpp:161 #, c-format msgid "Accepted connection on %u.%u.%u.%u:%u" msgstr "" #: src/services/gridftpd/commands.cpp:196 #, fuzzy msgid "Accept failed" msgstr "Tillträdeskontroll misslyckades" #: src/services/gridftpd/commands.cpp:204 #: src/services/gridftpd/listener.cpp:415 #, fuzzy, c-format msgid "Accept failed: %s" msgstr "cachefil: %s" #: src/services/gridftpd/commands.cpp:219 #, c-format msgid "Accepted connection from [%s]:%u" msgstr "" #: src/services/gridftpd/commands.cpp:221 #, c-format msgid "Accepted connection from %u.%u.%u.%u:%u" msgstr "" #: src/services/gridftpd/commands.cpp:230 #, fuzzy msgid "Authenticate in commands failed" msgstr "Certifikatverifiering misslyckades" #: src/services/gridftpd/commands.cpp:239 #, fuzzy msgid "Authentication failure" msgstr "Misslyckades med autentisering" #: src/services/gridftpd/commands.cpp:247 #, fuzzy, c-format msgid "User subject: %s" msgstr "subjekt: %s" #: src/services/gridftpd/commands.cpp:248 #, fuzzy, c-format msgid "Encrypted: %s" msgstr " Avslutningstid: %s" #: src/services/gridftpd/commands.cpp:254 #, fuzzy msgid "User has no proper configuration associated" msgstr "Kan inte öppna inställningsfil." #: src/services/gridftpd/commands.cpp:262 msgid "" "User has empty virtual directory tree.\n" "Either user has no authorised plugins or there are no plugins configured at " "all." msgstr "" #: src/services/gridftpd/commands.cpp:279 msgid "Read commands in authenticate failed" msgstr "" #: src/services/gridftpd/commands.cpp:410 #, fuzzy msgid "Control connection (probably) closed" msgstr "Kontaktar: %s" #: src/services/gridftpd/commands.cpp:444 #: src/services/gridftpd/commands.cpp:723 #, fuzzy msgid "Command EPRT" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:445 #, fuzzy, c-format msgid "Failed to parse remote addres %s" msgstr "Misslyckades med att förregistrera destination: %s" #: src/services/gridftpd/commands.cpp:467 #, fuzzy, c-format msgid "Command USER %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:474 #, fuzzy msgid "Command CDUP" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:480 #, fuzzy, c-format msgid "Command CWD %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:496 #, fuzzy, c-format msgid "Command MKD %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:516 #, fuzzy, c-format msgid "Command SIZE %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:531 #, fuzzy, c-format msgid "Command SBUF: %i" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:552 #, fuzzy, c-format msgid "Command MLST %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:575 #, fuzzy, c-format msgid "Command DELE %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:590 #, fuzzy, c-format msgid "Command RMD %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:604 #, fuzzy, c-format msgid "Command TYPE %c" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:615 #, fuzzy, c-format msgid "Command MODE %c" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:627 #, fuzzy msgid "Command ABOR" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:640 #, fuzzy, c-format msgid "Command REST %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:653 #, fuzzy, c-format msgid "Command EPSV %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:655 #, fuzzy msgid "Command SPAS" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:657 #, fuzzy msgid "Command PASV" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:678 #, fuzzy msgid "local_pasv failed" msgstr "%s misslyckades" #: src/services/gridftpd/commands.cpp:702 #, fuzzy msgid "local_spas failed" msgstr "%s misslyckades" #: src/services/gridftpd/commands.cpp:725 #, fuzzy msgid "Command PORT" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:728 #, fuzzy msgid "active_data is disabled" msgstr "Cachning är avstängd" #: src/services/gridftpd/commands.cpp:737 #, fuzzy msgid "local_port failed" msgstr "SendData: Lokal port misslyckades: %s" #: src/services/gridftpd/commands.cpp:750 #, fuzzy, c-format msgid "Command MLSD %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:752 #, fuzzy, c-format msgid "Command NLST %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:754 #, fuzzy, c-format msgid "Command LIST %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:805 #, fuzzy, c-format msgid "Command ERET %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:835 #, fuzzy, c-format msgid "Command RETR %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:864 #, fuzzy, c-format msgid "Command STOR %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:892 #, fuzzy, c-format msgid "Command ALLO %i" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:915 #, fuzzy msgid "Command OPTS" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:918 msgid "Command OPTS RETR" msgstr "" #: src/services/gridftpd/commands.cpp:928 #, fuzzy, c-format msgid "Option: %s" msgstr "%s version %s" #: src/services/gridftpd/commands.cpp:972 #, fuzzy msgid "Command NOOP" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:976 #, fuzzy msgid "Command QUIT" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:986 #, fuzzy msgid "Failed to close, deleting client" msgstr "Misslyckades med att acceptera delegering" #: src/services/gridftpd/commands.cpp:1000 #, fuzzy, c-format msgid "Command DCAU: %i '%s'" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1028 #, fuzzy, c-format msgid "Command PBZS: %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1036 #, fuzzy, c-format msgid "Setting pbsz to %lu" msgstr "Sätter fil %s till storlek %llu" #: src/services/gridftpd/commands.cpp:1052 #, fuzzy, c-format msgid "Command PROT: %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1077 #, fuzzy, c-format msgid "Command MDTM %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1099 #, fuzzy, c-format msgid "Raw command: %s" msgstr "Kommando: %s" #: src/services/gridftpd/commands.cpp:1147 #, fuzzy msgid "Failed to allocate memory for buffer" msgstr "Misslyckades med förallokera utrymme" #: src/services/gridftpd/commands.cpp:1154 #, c-format msgid "Allocated %u buffers %llu bytes each." msgstr "" #: src/services/gridftpd/commands.cpp:1161 #, fuzzy msgid "abort_callback: start" msgstr "ftp_write_callback: misslyckande" #: src/services/gridftpd/commands.cpp:1164 #, fuzzy, c-format msgid "abort_callback: Globus error: %s" msgstr "ftp_complete_callback: fel: %s" #: src/services/gridftpd/commands.cpp:1178 #, fuzzy msgid "make_abort: start" msgstr "Rapportera status" #: src/services/gridftpd/commands.cpp:1190 msgid "Failed to abort data connection - ignoring and recovering" msgstr "" #: src/services/gridftpd/commands.cpp:1198 msgid "make_abort: wait for abort flag to be reset" msgstr "" #: src/services/gridftpd/commands.cpp:1208 msgid "make_abort: leaving" msgstr "" #: src/services/gridftpd/commands.cpp:1223 msgid "check_abort: have Globus error" msgstr "" #: src/services/gridftpd/commands.cpp:1224 msgid "Abort request caused by transfer error" msgstr "" #: src/services/gridftpd/commands.cpp:1227 msgid "check_abort: sending 426" msgstr "" #: src/services/gridftpd/commands.cpp:1248 msgid "Abort request caused by error in transfer function" msgstr "" #: src/services/gridftpd/commands.cpp:1330 #, fuzzy msgid "Failed to start timer thread - timeout won't work" msgstr "Misslyckades med att starta tråd: cache kommer ej att rensas" #: src/services/gridftpd/commands.cpp:1382 msgid "Killing connection due to timeout" msgstr "" #: src/services/gridftpd/conf/conf_vo.cpp:25 #: src/services/gridftpd/conf/conf_vo.cpp:51 #: src/services/gridftpd/conf/conf_vo.cpp:69 #: src/services/gridftpd/conf/conf_vo.cpp:81 msgid "" "Configuration section [vo] is missing name. Check for presence of name= or " "vo= option." msgstr "" #: src/services/gridftpd/conf/daemon.cpp:60 #: src/services/gridftpd/conf/daemon.cpp:183 #, c-format msgid "No such user: %s" msgstr "Ingen sådan användare: %s" #: src/services/gridftpd/conf/daemon.cpp:72 #: src/services/gridftpd/conf/daemon.cpp:195 #, c-format msgid "No such group: %s" msgstr "Ingen sådan grupp: %s" #: src/services/gridftpd/conf/daemon.cpp:85 #: src/services/gridftpd/conf/daemon.cpp:208 #, c-format msgid "Improper debug level '%s'" msgstr "Ogiltig debugnivå '%s'" #: src/services/gridftpd/conf/daemon.cpp:127 msgid "Missing option for command daemon" msgstr "Saknad inställning för kommandot daemon" #: src/services/gridftpd/conf/daemon.cpp:132 msgid "Wrong option in daemon" msgstr "Felaktig inställning i daemon" #: src/services/gridftpd/conf/daemon.cpp:142 #, c-format msgid "Improper size of log '%s'" msgstr "Ogiltig loggstorlek '%s'" #: src/services/gridftpd/conf/daemon.cpp:151 #, c-format msgid "Improper number of logs '%s'" msgstr "Ogiltigt antal loggar '%s'" #: src/services/gridftpd/conf/daemon.cpp:157 #, c-format msgid "Improper argument for logsize '%s'" msgstr "Ogiltigt argument för loggstorlek '%s'" #: src/services/gridftpd/conf/daemon.cpp:164 #, fuzzy msgid "Missing option for command logreopen" msgstr "Saknad inställning för kommandot daemon" #: src/services/gridftpd/conf/daemon.cpp:169 #, fuzzy msgid "Wrong option in logreopen" msgstr "Felaktig inställning i daemon" #: src/services/gridftpd/conf/daemon.cpp:253 #, fuzzy, c-format msgid "Failed to open log file %s" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/services/gridftpd/conf/environment.cpp:175 msgid "" "Central configuration file is missing at guessed location:\n" " /etc/arc.conf\n" "Use ARC_CONFIG variable for non-standard location" msgstr "" #: src/services/gridftpd/datalist.cpp:101 #, fuzzy msgid "Closing channel (list)" msgstr "Stänger läskanal" #: src/services/gridftpd/datalist.cpp:157 msgid "Data channel connected (list)" msgstr "" #: src/services/gridftpd/dataread.cpp:24 msgid "data_connect_retrieve_callback" msgstr "" #: src/services/gridftpd/dataread.cpp:30 msgid "Data channel connected (retrieve)" msgstr "" #: src/services/gridftpd/dataread.cpp:37 msgid "data_connect_retrieve_callback: allocate_data_buffer" msgstr "" #: src/services/gridftpd/dataread.cpp:40 msgid "data_connect_retrieve_callback: allocate_data_buffer failed" msgstr "" #: src/services/gridftpd/dataread.cpp:48 #, c-format msgid "data_connect_retrieve_callback: check for buffer %u" msgstr "" #: src/services/gridftpd/dataread.cpp:61 #, c-format msgid "Closing channel (retrieve) due to local read error :%s" msgstr "" #: src/services/gridftpd/dataread.cpp:75 #: src/services/gridftpd/dataread.cpp:172 #, fuzzy msgid "Buffer registration failed" msgstr "Skapande av buffer misslyckades" #: src/services/gridftpd/dataread.cpp:88 #, fuzzy msgid "data_retrieve_callback" msgstr "ftp_check_callback" #: src/services/gridftpd/dataread.cpp:96 #, c-format msgid "Data channel (retrieve) %i %i %i" msgstr "" #: src/services/gridftpd/dataread.cpp:104 #, fuzzy msgid "Closing channel (retrieve)" msgstr "Stänger läskanal" #: src/services/gridftpd/dataread.cpp:110 #: src/services/gridftpd/datawrite.cpp:128 #, c-format msgid "Time spent waiting for network: %.3f ms" msgstr "" #: src/services/gridftpd/dataread.cpp:111 #: src/services/gridftpd/datawrite.cpp:129 #, c-format msgid "Time spent waiting for disc: %.3f ms" msgstr "" #: src/services/gridftpd/dataread.cpp:122 #, fuzzy msgid "data_retrieve_callback: lost buffer" msgstr "ftp_write_callback: misslyckande" #: src/services/gridftpd/dataread.cpp:158 #, c-format msgid "Closing channel (retrieve) due to local read error: %s" msgstr "" #: src/services/gridftpd/datawrite.cpp:24 #, fuzzy msgid "data_connect_store_callback" msgstr "ftp_check_callback" #: src/services/gridftpd/datawrite.cpp:30 msgid "Data channel connected (store)" msgstr "" #: src/services/gridftpd/datawrite.cpp:57 #, fuzzy msgid "Failed to register any buffer" msgstr "Misslyckades med att starta jobb" #: src/services/gridftpd/datawrite.cpp:76 #, c-format msgid "Data channel (store) %i %i %i" msgstr "" #: src/services/gridftpd/datawrite.cpp:89 #, fuzzy msgid "data_store_callback: lost buffer" msgstr "ftp_read_callback: misslyckande" #: src/services/gridftpd/datawrite.cpp:105 #, c-format msgid "Closing channel (store) due to error: %s" msgstr "" #: src/services/gridftpd/datawrite.cpp:115 #, fuzzy msgid "Closing channel (store)" msgstr "Stänger läskanal" #: src/services/gridftpd/fileplugin/fileplugin.cpp:55 #, fuzzy msgid "Can't parse access rights in configuration line" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/gridftpd/fileplugin/fileplugin.cpp:61 #, fuzzy msgid "Can't parse user:group in configuration line" msgstr "Kan inte öppna inställningsfil." #: src/services/gridftpd/fileplugin/fileplugin.cpp:68 #, fuzzy msgid "Can't recognize user in configuration line" msgstr "Kan inte öppna inställningsfil." #: src/services/gridftpd/fileplugin/fileplugin.cpp:77 #, fuzzy msgid "Can't recognize group in configuration line" msgstr "Kan inte öppna inställningsfil." #: src/services/gridftpd/fileplugin/fileplugin.cpp:84 #: src/services/gridftpd/fileplugin/fileplugin.cpp:89 #, fuzzy msgid "Can't parse or:and in configuration line" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/gridftpd/fileplugin/fileplugin.cpp:116 #, fuzzy msgid "Can't parse configuration line" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/gridftpd/fileplugin/fileplugin.cpp:120 #, fuzzy, c-format msgid "Bad directory name: %s" msgstr "Cachekatalog: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:137 #, fuzzy msgid "Can't parse create arguments in configuration line" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/gridftpd/fileplugin/fileplugin.cpp:146 #, fuzzy msgid "Can't parse mkdir arguments in configuration line" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/gridftpd/fileplugin/fileplugin.cpp:163 #, fuzzy, c-format msgid "Bad subcommand in configuration line: %s" msgstr "Läser inställningsfil: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:175 #, fuzzy msgid "Bad mount directory specified" msgstr "Ingen cachecatalog angivan" #: src/services/gridftpd/fileplugin/fileplugin.cpp:177 #, fuzzy, c-format msgid "Mount point %s" msgstr "checkpoint: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:215 #: src/services/gridftpd/fileplugin/fileplugin.cpp:273 #, fuzzy, c-format msgid "mkdir failed: %s" msgstr "PASV misslyckades: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:226 #, fuzzy, c-format msgid "Warning: mount point %s creation failed." msgstr "Skapande av tolkningskontext misslyckades!" #: src/services/gridftpd/fileplugin/fileplugin.cpp:329 #, fuzzy, c-format msgid "plugin: open: %s" msgstr "Svar: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:377 #: src/services/gridftpd/fileplugin/fileplugin.cpp:414 #, fuzzy msgid "Not enough space to store file" msgstr "Ingen cachecatalog angivan" #: src/services/gridftpd/fileplugin/fileplugin.cpp:428 #, fuzzy, c-format msgid "open: changing owner for %s, %i, %i" msgstr "Kan inte ändra ägare för %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:435 #, c-format msgid "open: owner: %i %i" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:444 #: src/services/gridftpd/fileplugin/fileplugin.cpp:484 #, fuzzy, c-format msgid "Unknown open mode %s" msgstr " attribut:" #: src/services/gridftpd/fileplugin/fileplugin.cpp:449 msgid "plugin: close" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:490 msgid "plugin: read" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:496 #, fuzzy msgid "Error while reading file" msgstr "Fel vid läsning av metafil %s: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:506 msgid "plugin: write" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:517 msgid "Zero bytes written to file" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:725 #, c-format msgid "plugin: checkdir: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:728 #, c-format msgid "plugin: checkdir: access: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:737 #, c-format msgid "plugin: checkdir: access: allowed: %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:14 #, c-format msgid "No plugin is configured or authorised for requested path %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:19 msgid "FilePlugin: more unload than load" msgstr "" #: src/services/gridftpd/fileroot.cpp:34 #, fuzzy, c-format msgid "Can't load plugin %s for access point %s" msgstr "Kan inte läsa lista med källor från filen %s" #: src/services/gridftpd/fileroot.cpp:39 src/services/gridftpd/fileroot.cpp:43 #, c-format msgid "Plugin %s for access point %s is broken." msgstr "" #: src/services/gridftpd/fileroot.cpp:47 #, c-format msgid "Plugin %s for access point %s acquire failed (should never happen)." msgstr "" #: src/services/gridftpd/fileroot.cpp:54 #, c-format msgid "Destructor with dlclose (%s)" msgstr "" #: src/services/gridftpd/fileroot.cpp:77 #, c-format msgid "FileNode: operator= (%s <- %s) %lu <- %lu" msgstr "" #: src/services/gridftpd/fileroot.cpp:79 #, fuzzy msgid "Copying with dlclose" msgstr "Kopierar cachad fil" #: src/services/gridftpd/fileroot_config.cpp:32 #: src/services/gridftpd/fileroot_config.cpp:596 #, fuzzy msgid "configuration file not found" msgstr "Använd inställningsfil %s" #: src/services/gridftpd/fileroot_config.cpp:51 #, fuzzy msgid "Wrong port number in configuration" msgstr "Misslyckades med att ladda inställningar" #: src/services/gridftpd/fileroot_config.cpp:60 #, fuzzy msgid "Wrong maxconnections number in configuration" msgstr "Cacherot saknas i inställningar" #: src/services/gridftpd/fileroot_config.cpp:69 #, fuzzy msgid "Wrong defaultbuffer number in configuration" msgstr "Cacherot saknas i inställningar" #: src/services/gridftpd/fileroot_config.cpp:78 #, fuzzy msgid "Wrong maxbuffer number in configuration" msgstr "Cacherot saknas i inställningar" #: src/services/gridftpd/fileroot_config.cpp:110 #: src/services/gridftpd/fileroot_config.cpp:118 #, fuzzy, c-format msgid "Can't resolve host %s" msgstr "Misslyckades med att slå upp %s (%s)" #: src/services/gridftpd/fileroot_config.cpp:152 #: src/services/gridftpd/fileroot_config.cpp:455 #, fuzzy, c-format msgid "couldn't open file %s" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/services/gridftpd/fileroot_config.cpp:167 #: src/services/gridftpd/fileroot_config.cpp:183 #: src/services/gridftpd/fileroot_config.cpp:469 #, c-format msgid "improper attribute for encryption command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:177 #: src/services/gridftpd/fileroot_config.cpp:479 #: src/services/gridftpd/fileroot_config.cpp:622 msgid "unknown (non-gridmap) user is not allowed" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:207 #: src/services/gridftpd/fileroot_config.cpp:547 #, fuzzy, c-format msgid "Failed processing authorization group %s" msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/services/gridftpd/fileroot_config.cpp:216 #, fuzzy msgid "couldn't process VO configuration" msgstr "Misslyckades med att ladda serviceinställningar" #: src/services/gridftpd/fileroot_config.cpp:223 #: src/services/gridftpd/fileroot_config.cpp:231 #: src/services/gridftpd/fileroot_config.cpp:239 #: src/services/gridftpd/fileroot_config.cpp:500 #: src/services/gridftpd/fileroot_config.cpp:508 #: src/services/gridftpd/fileroot_config.cpp:516 #, fuzzy, c-format msgid "failed while processing configuration command: %s %s" msgstr "Misslyckades med att läsa GM-inställningsfil %s" #: src/services/gridftpd/fileroot_config.cpp:281 #, fuzzy, c-format msgid "can't parse configuration line: %s %s %s %s" msgstr "Misslyckades med att läsa inställningsfil." #: src/services/gridftpd/fileroot_config.cpp:286 #, fuzzy, c-format msgid "bad directory in plugin command: %s" msgstr "saknad katalog i control-kommando" #: src/services/gridftpd/fileroot_config.cpp:298 #: src/services/gridftpd/fileroot_config.cpp:405 #, fuzzy, c-format msgid "Already have directory: %s" msgstr "Cachekatalog: %s" #: src/services/gridftpd/fileroot_config.cpp:307 #: src/services/gridftpd/fileroot_config.cpp:408 #, fuzzy, c-format msgid "Registering directory: %s with plugin: %s" msgstr "Fel katalog i %s" #: src/services/gridftpd/fileroot_config.cpp:312 #: src/services/gridftpd/fileroot_config.cpp:421 #, fuzzy, c-format msgid "file node creation failed: %s" msgstr "Källan skapades: %s" #: src/services/gridftpd/fileroot_config.cpp:330 #, c-format msgid "improper attribute for allowactvedata command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:335 #, fuzzy, c-format msgid "unsupported configuration command: %s" msgstr "Använd inställningsfil %s" #: src/services/gridftpd/fileroot_config.cpp:359 #, fuzzy msgid "Could not determine hostname from gethostname()" msgstr "Kan inte bestämma värdnamn från uname()" #: src/services/gridftpd/fileroot_config.cpp:375 msgid "unnamed group" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:384 msgid "undefined plugin" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:388 msgid "undefined virtual plugin path" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:393 #, fuzzy, c-format msgid "bad directory for plugin: %s" msgstr "Fel katalog i %s" #: src/services/gridftpd/fileroot_config.cpp:485 #, c-format msgid "improper attribute for allowunknown command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:607 #, fuzzy msgid "failed to process client identification" msgstr "Misslyckades med att ladda serviceinställningar" #: src/services/gridftpd/fileroot_config.cpp:641 #, fuzzy, c-format msgid "Registering dummy directory: %s" msgstr "Skapar katalog %s" #: src/services/gridftpd/listener.cpp:57 #: src/services/gridftpd/listener.cpp:466 #, fuzzy msgid "Activation failed" msgstr "SOAP-anrop misslyckades" #: src/services/gridftpd/listener.cpp:66 #: src/services/gridftpd/listener.cpp:172 msgid "Child exited" msgstr "" #: src/services/gridftpd/listener.cpp:78 #, fuzzy msgid "Globus connection error" msgstr "Stänger förbindelse" #: src/services/gridftpd/listener.cpp:80 #: src/services/gridftpd/listener.cpp:424 #, fuzzy msgid "New connection" msgstr "Återanvänder förbindelse" #: src/services/gridftpd/listener.cpp:87 #, fuzzy msgid "Server stopped" msgstr " Betjäningstillstånd: %s" #: src/services/gridftpd/listener.cpp:157 msgid "Error: failed to set handler for SIGTERM" msgstr "" #: src/services/gridftpd/listener.cpp:161 #, fuzzy msgid "Starting controlled process" msgstr "Starta process" #: src/services/gridftpd/listener.cpp:164 #, fuzzy msgid "fork failed" msgstr "%s misslyckades" #: src/services/gridftpd/listener.cpp:169 msgid "wait failed - killing child" msgstr "" #: src/services/gridftpd/listener.cpp:174 msgid "Killed with signal: " msgstr "" #: src/services/gridftpd/listener.cpp:176 msgid "Restarting after segmentation violation." msgstr "" #: src/services/gridftpd/listener.cpp:177 #, fuzzy msgid "Waiting 1 minute" msgstr "Väntar på buffer" #: src/services/gridftpd/listener.cpp:239 msgid "Error: failed to set handler for SIGCHLD" msgstr "" #: src/services/gridftpd/listener.cpp:256 msgid "Missing argument" msgstr "" #: src/services/gridftpd/listener.cpp:257 #, fuzzy msgid "Unknown option" msgstr " attribut:" #: src/services/gridftpd/listener.cpp:264 #, fuzzy msgid "Wrong port number" msgstr "Fel antal parametrar angivna" #: src/services/gridftpd/listener.cpp:274 #, fuzzy msgid "Wrong number of connections" msgstr "Fel antal parametrar angivna" #: src/services/gridftpd/listener.cpp:281 #, fuzzy msgid "Wrong buffer size" msgstr "Fel antal parametrar angivna" #: src/services/gridftpd/listener.cpp:288 msgid "Wrong maximal buffer size" msgstr "" #: src/services/gridftpd/listener.cpp:300 #, fuzzy msgid "Failed reading configuration" msgstr "Misslyckades med att läsa lokal information." #: src/services/gridftpd/listener.cpp:331 #, fuzzy, c-format msgid "Failed to obtain local address: %s" msgstr "Misslyckades med att erhålla lokal adress för port %s - %s" #: src/services/gridftpd/listener.cpp:338 #, fuzzy, c-format msgid "Failed to create socket(%s): %s" msgstr "Misslyckades med att skapa socket till %s(%s):%d" #: src/services/gridftpd/listener.cpp:352 #, fuzzy, c-format msgid "Failed to limit socket to IPv6: %s" msgstr "Misslyckades med att skapa socket till %s(%s):%d" #: src/services/gridftpd/listener.cpp:359 #, fuzzy, c-format msgid "Failed to bind socket(%s): %s" msgstr "Misslyckades med att binda socket för port %s" #: src/services/gridftpd/listener.cpp:364 #, fuzzy, c-format msgid "Failed to listen on socket(%s): %s" msgstr "Misslyckades med att lyssna på port %s" #: src/services/gridftpd/listener.cpp:371 #, fuzzy msgid "Not listening to anything" msgstr "Inga lyssnande portar initierade" #: src/services/gridftpd/listener.cpp:374 #, c-format msgid "Some addresses failed. Listening on %u of %u." msgstr "" #: src/services/gridftpd/listener.cpp:382 #: src/services/gridftpd/listener.cpp:477 #, fuzzy msgid "Listen started" msgstr "JVM startad" #: src/services/gridftpd/listener.cpp:395 #, fuzzy msgid "No valid handles left for listening" msgstr "Ogiltig destination" #: src/services/gridftpd/listener.cpp:401 #, fuzzy, c-format msgid "Select failed: %s" msgstr "Förfrågan misslyckades" #: src/services/gridftpd/listener.cpp:422 #, c-format msgid "Have connections: %i, max: %i" msgstr "" #: src/services/gridftpd/listener.cpp:427 #, fuzzy, c-format msgid "Fork failed: %s" msgstr "PASV misslyckades: %s" #: src/services/gridftpd/listener.cpp:445 msgid "Refusing connection: Connection limit exceeded" msgstr "" #: src/services/gridftpd/listener.cpp:471 #, fuzzy msgid "Init failed" msgstr "%s misslyckades" #: src/services/gridftpd/listener.cpp:474 #, fuzzy msgid "Listen failed" msgstr "%s misslyckades" #: src/services/gridftpd/listener.cpp:488 #, fuzzy msgid "Listen finished" msgstr "%s uppsättning avslutad" #: src/services/gridftpd/listener.cpp:493 msgid "Stopping server" msgstr "" #: src/services/gridftpd/listener.cpp:497 #, fuzzy msgid "Destroying handle" msgstr "Misslyckades med att förstöra delegering" #: src/services/gridftpd/listener.cpp:500 msgid "Deactivating modules" msgstr "" #: src/services/gridftpd/listener.cpp:508 #, fuzzy msgid "Exiting" msgstr "sträng" #: src/services/gridftpd/misc/ldapquery.cpp:253 #, fuzzy, c-format msgid "%s: %s:%i" msgstr "%s: %s" #: src/services/gridftpd/misc/ldapquery.cpp:390 #: src/services/gridftpd/misc/ldapquery.cpp:468 #, fuzzy, c-format msgid "%s %s" msgstr "%s: %s" #: src/services/gridftpd/misc/ldapquery.cpp:394 #, fuzzy, c-format msgid " %s: %s" msgstr "%s: %s" #: src/services/gridftpd/misc/ldapquery.cpp:396 #, fuzzy, c-format msgid " %s:" msgstr "%s: %s" #: src/services/gridftpd/userspec.cpp:48 #, fuzzy, c-format msgid "Mapfile is missing at %s" msgstr "Plats saknas" #: src/services/gridftpd/userspec.cpp:89 #: src/services/gridftpd/userspec.cpp:215 #, fuzzy msgid "There is no local mapping for user" msgstr "Ingen uppkopplingskedja har ställts in" #: src/services/gridftpd/userspec.cpp:92 #: src/services/gridftpd/userspec.cpp:219 #, fuzzy msgid "There is no local name for user" msgstr "Det finns inget svar" #: src/services/gridftpd/userspec.cpp:142 #: src/services/gridftpd/userspec.cpp:233 msgid "No proxy provided" msgstr "" #: src/services/gridftpd/userspec.cpp:144 #, c-format msgid "Proxy/credentials stored at %s" msgstr "" #: src/services/gridftpd/userspec.cpp:147 #: src/services/gridftpd/userspec.cpp:238 #, fuzzy, c-format msgid "Initially mapped to local user: %s" msgstr "Grididentitet mappas till lokal identitet '%s'" #: src/services/gridftpd/userspec.cpp:150 #: src/services/gridftpd/userspec.cpp:340 #, fuzzy, c-format msgid "Local user %s does not exist" msgstr "Låsfil %s existerar inte" #: src/services/gridftpd/userspec.cpp:155 #: src/services/gridftpd/userspec.cpp:246 #, c-format msgid "Initially mapped to local group: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:158 #: src/services/gridftpd/userspec.cpp:249 #: src/services/gridftpd/userspec.cpp:349 #, fuzzy, c-format msgid "Local group %s does not exist" msgstr "Låsfil %s existerar inte" #: src/services/gridftpd/userspec.cpp:167 #: src/services/gridftpd/userspec.cpp:258 #, fuzzy msgid "Running user has no name" msgstr "Fel antal parametrar angivna" #: src/services/gridftpd/userspec.cpp:170 #: src/services/gridftpd/userspec.cpp:261 #, fuzzy, c-format msgid "Mapped to running user: %s" msgstr " Mappar till kö: %s" #: src/services/gridftpd/userspec.cpp:180 #: src/services/gridftpd/userspec.cpp:271 #, fuzzy, c-format msgid "Mapped to local id: %i" msgstr "Misslyckades med att ta bort %s" #: src/services/gridftpd/userspec.cpp:185 #: src/services/gridftpd/userspec.cpp:276 #, c-format msgid "No group %i for mapped user" msgstr "" #: src/services/gridftpd/userspec.cpp:194 #: src/services/gridftpd/userspec.cpp:285 #, c-format msgid "Mapped to local group id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:195 #: src/services/gridftpd/userspec.cpp:286 #, c-format msgid "Mapped to local group name: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:196 #: src/services/gridftpd/userspec.cpp:287 #, fuzzy, c-format msgid "Mapped user's home: %s" msgstr "Lagt till användare : %s" #: src/services/gridftpd/userspec.cpp:235 #, fuzzy, c-format msgid "Proxy stored at %s" msgstr "Källa: %s" #: src/services/gridftpd/userspec.cpp:241 #, fuzzy msgid "Local user does not exist" msgstr "Låsfil %s existerar inte" #: src/services/gridftpd/userspec.cpp:317 #, c-format msgid "Undefined control sequence: %%%s" msgstr "" #: src/services/gridftpd/userspec.cpp:354 #, fuzzy, c-format msgid "Remapped to local user: %s" msgstr "Misslyckades med att låsa upp fil %s: %s" #: src/services/gridftpd/userspec.cpp:355 #, fuzzy, c-format msgid "Remapped to local id: %i" msgstr "Misslyckades med att ta bort %s" #: src/services/gridftpd/userspec.cpp:356 #, c-format msgid "Remapped to local group id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:357 #, c-format msgid "Remapped to local group name: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:358 #, fuzzy, c-format msgid "Remapped user's home: %s" msgstr "Lagt till användare : %s" #: src/services/wrappers/java/javawrapper.cpp:33 #, c-format msgid "config: %s, class name: %s" msgstr "inställningar: %s, klassnamn: %s" #: src/services/wrappers/java/javawrapper.cpp:42 msgid "libjvm.so not loadable - check your LD_LIBRARY_PATH" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:52 msgid "libjvm.so does not contain the expected symbols" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:66 msgid "JVM started" msgstr "JVM startad" #: src/services/wrappers/java/javawrapper.cpp:71 #, fuzzy, c-format msgid "There is no service: %s in your Java class search path" msgstr "Det finns ingen service: %s i din javaklassökväg" #: src/services/wrappers/java/javawrapper.cpp:79 msgid "There is no constructor function" msgstr "Det finns ingen konstruktorfunktion" #: src/services/wrappers/java/javawrapper.cpp:86 #, c-format msgid "%s constructed" msgstr "%s skapad" #: src/services/wrappers/java/javawrapper.cpp:90 #, fuzzy msgid "Destroy JVM" msgstr "Förstör jvm" #: src/services/wrappers/java/javawrapper.cpp:183 msgid "Cannot find MCC_Status object" msgstr "Kan inte hitta MCC-status-objekt" #: src/services/wrappers/java/javawrapper.cpp:197 msgid "Java object returned NULL status" msgstr "Javaobjekt returnerade NULL-status" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Loading %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:111 #, fuzzy, c-format msgid "Initialized %u-th Python service" msgstr "Initialisera trådsystemet" #: src/services/wrappers/python/pythonwrapper.cpp:147 msgid "Invalid class name" msgstr "Ogiltigt klassnamn" #: src/services/wrappers/python/pythonwrapper.cpp:152 #, c-format msgid "class name: %s" msgstr "klassnamn: %s" #: src/services/wrappers/python/pythonwrapper.cpp:153 #, c-format msgid "module name: %s" msgstr "modulnamn: %s" #: src/services/wrappers/python/pythonwrapper.cpp:210 #, fuzzy msgid "Cannot find ARC Config class" msgstr "Kan inte hitta arcinställningsklass" #: src/services/wrappers/python/pythonwrapper.cpp:217 #, fuzzy msgid "Config class is not an object" msgstr "Inställningsklass är inget objekt" #: src/services/wrappers/python/pythonwrapper.cpp:225 msgid "Cannot get dictionary of module" msgstr "Kan inte erhålla ordlista för modulen" #: src/services/wrappers/python/pythonwrapper.cpp:234 msgid "Cannot find service class" msgstr "Hittar inte serviceklass" #: src/services/wrappers/python/pythonwrapper.cpp:243 msgid "Cannot create config argument" msgstr "Kan inte skapa inställningsargument" #: src/services/wrappers/python/pythonwrapper.cpp:250 #, fuzzy msgid "Cannot convert config to Python object" msgstr "Kan inte omvandla inställningar till pythonobjekt" #: src/services/wrappers/python/pythonwrapper.cpp:273 #, c-format msgid "%s is not an object" msgstr "%s är inget objekt" #: src/services/wrappers/python/pythonwrapper.cpp:279 #, fuzzy msgid "Message class is not an object" msgstr "Meddelandeklass är inget objekt" #: src/services/wrappers/python/pythonwrapper.cpp:287 #, fuzzy msgid "Python Wrapper constructor succeeded" msgstr "Python-wrapper-konstruktor anropad" #: src/services/wrappers/python/pythonwrapper.cpp:303 #, fuzzy, c-format msgid "Python Wrapper destructor (%d)" msgstr "Python-wrapper-destruktor anropad (%d)" #: src/services/wrappers/python/pythonwrapper.cpp:336 msgid "Python interpreter locked" msgstr "Pythontolkare låst" #: src/services/wrappers/python/pythonwrapper.cpp:340 msgid "Python interpreter released" msgstr "Pythontolkare befriad" #: src/services/wrappers/python/pythonwrapper.cpp:400 msgid "Python wrapper process called" msgstr "Python-wrapper-process anropad" #: src/services/wrappers/python/pythonwrapper.cpp:409 msgid "Failed to create input SOAP container" msgstr "Misslyckades med att skapa indata-SOAP-behållare" #: src/services/wrappers/python/pythonwrapper.cpp:419 msgid "Cannot create inmsg argument" msgstr "Kan inte skapa inmsg-argument" #: src/services/wrappers/python/pythonwrapper.cpp:433 #, fuzzy msgid "Cannot find ARC Message class" msgstr "Kan inte hitta arcmeddelandeklass" #: src/services/wrappers/python/pythonwrapper.cpp:439 #, fuzzy msgid "Cannot convert inmsg to Python object" msgstr "Kan inte omvandla inmsg till pythonobjekt" #: src/services/wrappers/python/pythonwrapper.cpp:448 msgid "Failed to create SOAP containers" msgstr "Misslyckades med att skapa SOAP-behållare" #: src/services/wrappers/python/pythonwrapper.cpp:454 msgid "Cannot create outmsg argument" msgstr "Kan inte skapa outmsg-argument" #: src/services/wrappers/python/pythonwrapper.cpp:460 #, fuzzy msgid "Cannot convert outmsg to Python object" msgstr "Kan inte omvandla outmsg till pythonobjekt" #: src/services/wrappers/python/pythonwrapper.cpp:516 #, fuzzy msgid "Failed to create XMLNode container" msgstr "Misslyckades med att skapa SOAP-behållare" #: src/services/wrappers/python/pythonwrapper.cpp:533 #, fuzzy msgid "Cannot find ARC XMLNode class" msgstr "Kan inte hitta arcmeddelandeklass" #: src/services/wrappers/python/pythonwrapper.cpp:539 #, fuzzy msgid "Cannot create doc argument" msgstr "Kan inte skapa inställningsargument" #: src/services/wrappers/python/pythonwrapper.cpp:545 #, fuzzy msgid "Cannot convert doc to Python object" msgstr "Kan inte omvandla inställningar till pythonobjekt" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:9 msgid "Creating a soap client" msgstr "Skapar en echo-klient" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 msgid "SOAP invokation failed" msgstr "SOAP-anrop misslyckades" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 #, fuzzy msgid "Creating a http client" msgstr "Skapar en echo-klient" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 #, fuzzy msgid "HTTP with SAML2SSO invokation failed" msgstr "SOAP-anrop misslyckades" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 #, fuzzy msgid "There was no HTTP response" msgstr "Det fanns inget SOAP-svar" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 #, fuzzy msgid "SOAP with SAML2SSO invokation failed" msgstr "SOAP-anrop misslyckades" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:76 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 #, fuzzy msgid "Creating a delegation soap client" msgstr "Skapar en echo-klient" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:51 #: src/tests/echo/test_clientinterface.cpp:178 #, fuzzy msgid "Delegation to ARC delegation service failed" msgstr "Delegeringsauktorisering misslyckades" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:56 #: src/tests/delegation/test_delegation_client.cpp:88 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, fuzzy, c-format msgid "Delegation ID: %s" msgstr "Destinaltion: %s" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service:%s" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, fuzzy, c-format msgid "Delegated credential from delegation service: %s" msgstr "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:83 #: src/tests/echo/test_clientinterface.cpp:201 #, fuzzy msgid "Delegation to gridsite delegation service failed" msgstr "Delegeringsauktorisering misslyckades" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "Indata är inte SOAP" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "echo: Oautoriserad" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "Förfrågan understöds inte - %s" #: src/tests/count/test_service.cpp:33 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "Servicen väntar på förfrågningar" #: src/tests/echo/test.cpp:32 msgid "Creating client interface" msgstr "Skapar klientgränssnitt" #: src/tests/echo/test_clientinterface.py:27 msgid "SOAP invocation failed" msgstr "SOAP-anrop misslyckades" #, fuzzy #~ msgid "Unable to create directory %s: No valid credentials found" #~ msgstr "Submit: Misslyckades med att sända jobbeskrivning" #, fuzzy #~ msgid "Unable to rename %s: No valid credentials found" #~ msgstr "Misslyckades med att skapa/hitta katalog %s" #~ msgid "Submit: Failed to disconnect after submission" #~ msgstr "Submit: Misslyckades med att koppla ner efter insändning av jobb" #~ msgid "year" #~ msgid_plural "years" #~ msgstr[0] "år" #~ msgstr[1] "år" #~ msgid "month" #~ msgid_plural "months" #~ msgstr[0] "månad" #~ msgstr[1] "månader" #~ msgid "day" #~ msgid_plural "days" #~ msgstr[0] "dag" #~ msgstr[1] "dagar" #, fuzzy #~ msgid "Can not set the STORE_CTX for chain verification" #~ msgstr "Kan inte ange livstid för proxycertifikatet" #~ msgid "" #~ "Can not convert DER encoded PROXYCERTINFO extension to internal format" #~ msgstr "" #~ "Kan inte omvandla DER-kodat PROXYCERTINFO-tillägg till internt format" #~ msgid "RSA_generate_key failed" #~ msgstr "RSA_generate_key misslyckades" #~ msgid "Can not get extension from issuer certificate" #~ msgstr "Kan inte erhålla tillägg från utfärdarcertifikat" #~ msgid "Failed to add extension into proxy" #~ msgstr "Misslyckades med att lägga till tillägg till proxyn" #, fuzzy #~ msgid "" #~ "The signing algorithm %s is not allowed,it should be SHA1/SHA2 to sign " #~ "certificate requests" #~ msgstr "" #~ "Signeringsalgoritmen %s är ej tillåten, den skall vara MD5 för att " #~ "signera certifikatbegäran" #, fuzzy #~ msgid "Failed to add extension into EEC certificate" #~ msgstr "Misslyckades med att lägga till tillägg till proxyn" #, fuzzy #~ msgid "" #~ "Resource information provider timed out: %u seconds. Checking heartbeat " #~ "file..." #~ msgstr "Klusterinformationstillhandahållarfel: %s" #, fuzzy #~ msgid "arc_to_voms - %u attributes" #~ msgstr " attribut:" #, fuzzy #~ msgid "arc_to_voms: attribute: %s" #~ msgstr " attribut:" #, fuzzy #~ msgid "LDAP authorization is not implemented yet" #~ msgstr "Feature ej implementerad" #, fuzzy #~ msgid "Failed to report renewed proxy to job" #~ msgstr "Misslyckades med att läsa proxy fil: %s" #~ msgid "%s: Failed switching user" #~ msgstr "%s: Misslyckades med att byta användare" #, fuzzy #~ msgid "Match group: %s" #~ msgstr "Ingen sådan grupp: %s" #, fuzzy #~ msgid "Match role: %s" #~ msgstr "cachefil: %s" #, fuzzy #~ msgid "Match capabilities: %s" #~ msgstr "cachefil: %s" #, fuzzy #~ msgid "Failed writing RSL" #~ msgstr "Misslyckades med att starta jobb" #, fuzzy #~ msgid "RSL (inside multi) could not be evaluated: %s" #~ msgstr "Service %s(%s) kunde inte skapas" #, fuzzy #~ msgid "RSL could not be evaluated: %s" #~ msgstr "URL:en är inte giltig: %s" #, fuzzy #~ msgid "Can't evaluate RSL fragment: %s" #~ msgstr "Kan inte göra stat på filen: %s" #, fuzzy #~ msgid "Can't evaluate RSL substitution variable name: %s" #~ msgstr "Kan inte göra stat på filen: %s" #, fuzzy #~ msgid "Can't evaluate RSL substitution variable value: %s" #~ msgstr "Kan inte göra stat på filen: %s" #, fuzzy #~ msgid "Broken string at position %ld" #~ msgstr " Väntposition: %d" #, fuzzy #~ msgid "filename cannot be empty." #~ msgstr "Certifikat kan inte extraheras" #, fuzzy #~ msgid "Failed to set GFAL2 user data object: %s" #~ msgstr "Misslyckades med att läsa object: %s" #, fuzzy #~ msgid "%d Queues" #~ msgstr " Kö: %s" #, fuzzy #~ msgid "Queue Information:" #~ msgstr "Köinformation" #~ msgid "" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Argument till -c har formatet Gridtyp:URL t.ex.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "explicity select or reject a specific cluster" #~ msgstr "välj eller välj bort ett specifikt kluster" #, fuzzy #~ msgid "No job controller plugins loaded" #~ msgstr "Inga jobbkontrollerare laddade" #~ msgid "" #~ "Argument to -i has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Argument till -i har formatet Gridtyp:URL t.ex.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Argument till -c har formatet Gridtyp:URL t.ex.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "explicity select or reject an index server" #~ msgstr "välj eller välj bort en specifikt indexserver" #, fuzzy #~ msgid "explicitly select or reject a cluster holding queued jobs" #~ msgstr "välj eller välj bort ett specifikt kluster" #, fuzzy #~ msgid "explicitly select or reject a cluster to migrate to" #~ msgstr "välj eller välj bort ett specifikt kluster" #, fuzzy #~ msgid "explicitly select or reject an index server" #~ msgstr "välj eller välj bort en specifikt indexserver" #, fuzzy #~ msgid "select broker method (Random (default), FastestQueue, or custom)" #~ msgstr "" #~ "välj mäklarmetod (Random (förval), QueueBalance, eller användardefinerad)" #, fuzzy #~ msgid "[job ...]\n" #~ msgstr "[jobb ...]" #~ msgid "file where the jobs will be stored" #~ msgstr "fil där jobben kommer att sparas" #, fuzzy #~ msgid "explicity select or reject a specific cluster for the new job" #~ msgstr "välj eller välj bort ett specifikt kluster" #, fuzzy #~ msgid "No jobs to resubmit" #~ msgstr "%d av %d jobb sändes in" #, fuzzy #~ msgid "Disregarding %s" #~ msgstr "listpost: %s" #~ msgid "Submission to %s failed, trying next target" #~ msgstr "Insändnings till %s misslyckades, provar med nästa destination" #, fuzzy #~ msgid "Job resubmitted with new jobid: %s" #~ msgstr "Jobb insänt med jobb-id: %s" #~ msgid "service_url request_file" #~ msgstr "serviceurl förfrågan-fil" #, fuzzy #~ msgid "path to request file" #~ msgstr "sökväg till proxyfil" #~ msgid "path to private key file" #~ msgstr "sökväg till privat nyckelfil" #, fuzzy #~ msgid "Creating and sending soap request" #~ msgstr "Skapar och skickar förfrågan" #, fuzzy #~ msgid "IdP name" #~ msgstr "katalognamn" #, fuzzy #~ msgid "Store directory for key and signed certificate" #~ msgstr "Lyckades verifiera det signerade certifikatet" #~ msgid "directory" #~ msgstr "katalog" #~ msgid "Source probably does not exist" #~ msgstr "Källan existerar troligen inte" #~ msgid "Fileset copy for this kind of source is not supported" #~ msgstr "Kopiering av filuppsättning för denna källtyp stöds inte" #, fuzzy #~ msgid "Current transfer FAILED: %s - %s" #~ msgstr "Nuvarande överföring MISSLYCKADES: %s" #, fuzzy #~ msgid "Transfer FAILED: %s - %s" #~ msgstr "Överföring MISSLYCKADES: %s" #~ msgid "Request: %s" #~ msgstr "Förfrågan: %s" #, fuzzy #~ msgid "[ISIS testing ...]" #~ msgstr "[jobb ...]" #, fuzzy #~ msgid " Not enough or too much parameters! %s" #~ msgstr "Ej tillräckligt antal parametrar i copyurl" #, fuzzy #~ msgid "Client chain configuration: %s" #~ msgstr "Cacheinställningar: %s" #, fuzzy #~ msgid "service" #~ msgstr "serviceurl" #~ msgid "Disconnect: Failed quitting: %s" #~ msgstr "Disconnect: Misslyckades med att avsluta: %s" #, fuzzy #~ msgid "Submit: Failed to modify job description to be sent to target." #~ msgstr "Submit: Misslyckades med att sända jobbeskrivning" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a ARC0 cluster is not supported." #~ msgstr "SOAP-process understöds inte: %s" #~ msgid "TargetRetriverARC0 initialized with %s service url: %s" #~ msgstr "TargetRetriverARC0 initialiserad med %s-serviceurl: %s" #, fuzzy #~ msgid "Failed locating delegation credentials in chain configuration" #~ msgstr "" #~ "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #, fuzzy #~ msgid "Found malformed job state string: %s" #~ msgstr "Misslyckades med att skapa mjuk länk: %s" #, fuzzy #~ msgid "Getting BES jobs is not supported" #~ msgstr "process: %s: understöds inte" #~ msgid "TargetRetriverARC1 initialized with %s service url: %s" #~ msgstr "TargetRetriverARC1 initialiserad med %s-serviceurl: %s" #, fuzzy #~ msgid "TargetRetriverBES initialized with %s service url: %s" #~ msgstr "TargetRetriverCREAM initialiserad med %s-serviceurl: %s" #, fuzzy #~ msgid "%d. Cluster: %s; Queue: %s" #~ msgstr "Kluster: %s" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a CREAM cluster is not supported." #~ msgstr "SOAP-process understöds inte: %s" #~ msgid "TargetRetriverCREAM initialized with %s service url: %s" #~ msgstr "TargetRetriverCREAM initialiserad med %s-serviceurl: %s" #~ msgid "Cannot convert arc module name to Python string" #~ msgstr "Kan inte omvandla arcmodulnamn till pythonsträng" #~ msgid "Cannot import arc module" #~ msgstr "Kan inte importera arcmodul" #~ msgid "Cannot get dictionary of arc module" #~ msgstr "Kan inte erhålla ordlista för arcmodulen" #, fuzzy #~ msgid "Cannot create python list" #~ msgstr "Kan inte skapa SOAP-klient" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a UNICORE cluster is not supported." #~ msgstr "SOAP-process understöds inte: %s" #, fuzzy #~ msgid "TargetRetriverUNICORE initialized with %s service url: %s" #~ msgstr "TargetRetriverCREAM initialiserad med %s-serviceurl: %s" #, fuzzy #~ msgid "" #~ "cnd:\n" #~ "%s is a %s" #~ msgstr "%s: %s - %s" #, fuzzy #~ msgid "File is not accessible: %s - %s" #~ msgstr "Filen är inte tillgänglig: %s" #~ msgid "Failed to use channel stdout" #~ msgstr "Misslyckades med att använda stdout-kanalen" #, fuzzy #~ msgid "Failed to create/find directory %s, (%d)" #~ msgstr "Misslyckades med att skapa/hitta katalog %s" #~ msgid "delete_ftp: globus_ftp_client_delete timeout" #~ msgstr "delete_ftp: globus_ftp_client_delete timeout" #, fuzzy #~ msgid "Response(%i): %s" #~ msgstr "Svar: %s" #~ msgid "Failed initing handle" #~ msgstr "Misslyckades med att initiera handtag" #~ msgid "Failed to close connection 2" #~ msgstr "Misslyckades med att stänga förbindelse 2" #~ msgid "Failed to close connection 3" #~ msgstr "Misslyckades med att stänga förbindelse 3" #~ msgid "Cthread_init() error: %s" #~ msgstr "Cthread_init() fel: %s" #~ msgid "Error starting session: %s" #~ msgstr "Fel vid start av session: %s" #, fuzzy #~ msgid "Using supplied guid %s" #~ msgstr "Använder cachade inställningar: %s" #~ msgid "Source must contain LFN" #~ msgstr "Källa måste innehålla LFN" #~ msgid "Error finding replicas: %s" #~ msgstr "Fel vid sökning av replikor: %s" #~ msgid "meta_get_data: size: %llu" #~ msgstr "meta_get_data: storlek: %llu" #~ msgid "meta_get_data: created: %s" #~ msgstr "meta_get_data: skapat: %s" #~ msgid "LFN is missing in LFC (needed for replication)" #~ msgstr "LFN saknas i LFC (behövs för replikering)" #~ msgid "LFN already exists in LFC" #~ msgstr "LFN finns redan i LFC" #~ msgid "Creating LFC directory %s" #~ msgstr "Skapar LFC-katalog %s" #~ msgid "Error creating required LFC dirs: %s" #~ msgstr "Fel vid skapandet av nödvändiga LFC-kataloger: %s" #~ msgid "Error creating LFC entry: %s" #~ msgstr "Fel vid skapandet av LFC-post: %s" #, fuzzy #~ msgid "Error finding info on LFC entry %s which should exist: %s" #~ msgstr "Fel vid skapandet av LFC-post: %s" #, fuzzy #~ msgid "Error creating LFC entry %s, guid %s: %s" #~ msgstr "Fel vid skapandet av LFC-post: %s" #, fuzzy #~ msgid "Error entering metadata: %s" #~ msgstr "Fel vid öppnande av metafil %s: %s" #~ msgid "No GUID defined for LFN - probably not preregistered" #~ msgstr "Ingen GUID definierad för LFN - troligen inte förregistrerad" #~ msgid "Error adding replica: %s" #~ msgstr "Fel vid tillägg av replika: %s" #~ msgid "Entering checksum type %s, value %s, file size %llu" #~ msgstr "Inkommande checksumtyp %s, värde %s, filstorlek %llu" #~ msgid "Failed to remove LFN in LFC - You may need to do it by hand" #~ msgstr "" #~ "Misslyckades med att ta bort LFN i LFC - Du kan behöva göra det för hand" #~ msgid "Error getting replicas: %s" #~ msgstr "Fel vid hämtning av replikor: %s" #~ msgid "Failed to remove location from LFC" #~ msgstr "Misslyckades med att ta bort plats från LFC" #~ msgid "Failed to remove LFC directory: directory is not empty" #~ msgstr "Misslyckades med att ta bort LFC-katalog: katalogen är ej tom" #~ msgid "Failed to remove LFN in LFC: %s" #~ msgstr "Misslyckades med att ta bort LFN i LFC: %s" #~ msgid "Error listing directory: %s" #~ msgstr "Fel vid listande av katalog: %s" #~ msgid "Failed to find GUID for specified LFN in %s: %s" #~ msgstr "Misslyckades med att hitta GUID för det angivna LFN i %s: %s" #~ msgid "There is no GUID for specified LFN in %s" #~ msgstr "Det finns ingen GUID för det angivna LFN i %s" #~ msgid "Warning: can't get PFNs from server %s: %s" #~ msgstr "Varning: kan inte erhålla PFN från server %s: %s" #, fuzzy #~ msgid "RLS URL must contain host" #~ msgstr "Källa måste innehålla LFN" #~ msgid "" #~ "Locations are missing in destination RLS url - will use those registered " #~ "with special name" #~ msgstr "" #~ "Platser saknas i destination-RLS-url - använder de som registrerats med " #~ "speciellt namn" #~ msgid "LFN is missing in RLS (needed for replication)" #~ msgstr "LFN saknas i RLS (behövs för replikering)" #~ msgid "LFN already exists in replica" #~ msgstr "LFN finns redan i replika" #~ msgid "Failed to check for existing LFN in %s: %s" #~ msgstr "Misslyckades med att kontrollera existerande LFN i %s: %s" #~ msgid "There is same LFN in %s" #~ msgstr "Samma LFN finns i %s" #~ msgid "Failed to add LFN-GUID to RLS: %s" #~ msgstr "Misslyckades med att lägga till LFN-GUID till RLS: %s" #~ msgid "Failed to create/add LFN-PFN mapping: %s" #~ msgstr "Misslyckades med att skapa/lägga till LFN-PFN-mappning: %s" #~ msgid "Warning: failed to add attribute to RLS: %s" #~ msgstr "Varning: misslyckades med att lägga till attribut till RLS: %s" #~ msgid "Warning: Failed to retrieve LFN/PFNs from %s: %s" #~ msgstr "Varning: misslyckades med att inhämta LFN/PFN från %s: %s" #~ msgid "SE location will be unregistered automatically" #~ msgstr "SE-plats kommer att avregistreras automatiskt" #~ msgid "Warning: Failed to delete LFN/PFN from %s: %s" #~ msgstr "Varning: Misslyckades med att ta bort LFN/PFN från %s: %s" #~ msgid "LFN must be already deleted, try LRC anyway" #~ msgstr "LFN måste redan vara borttagen, försöker med LRC ändå" #~ msgid "Failed to retrieve LFN/LRC: %s" #~ msgstr "Misslyckades med att inhämta LFN/LRC: %s" #~ msgid "Warning: Failed to connect to LRC at %s: %s" #~ msgstr "Varning: Misslyckades med att koppla upp mot LRC på %s: %s" #~ msgid "No LFNs found in %s" #~ msgstr "Inga LFN funna i %s" #~ msgid "Failed to retrieve list of LFNs/PFNs from %s" #~ msgstr "Misslyckades med att inhämta LFN/PFN-lista från %s" #~ msgid "lfn: %s(%s) - %s" #~ msgstr "lfn: %s(%s) - %s" #~ msgid "lfn: %s - pfn: %s" #~ msgstr "lfn: %s - pfn: %s" #~ msgid "Contacting %s" #~ msgstr "Kontaktar: %s" #~ msgid "Warning: can't get list of RLIs from server %s: %s" #~ msgstr "Varning: kan inte erhålla RLI-lista från server %s: %s" #~ msgid "Warning: can't get list of senders from server %s: %s" #~ msgstr "Varning: kan inte erhålla sändarlista från server %s: %s" #, fuzzy #~ msgid "start_reading_srm: looking for metadata: %s" #~ msgstr "Tar bort logist fil från metadata %s" #, fuzzy #~ msgid "globus_io_register_read failed: %s" #~ msgstr "Insändning returnerade fel: %s" #, fuzzy #~ msgid "clear_input: %s" #~ msgstr "uppstädning %s" #, fuzzy #~ msgid "Globus error (read): %s" #~ msgstr "Globusfel: %s" #, fuzzy #~ msgid "*** Server response: %s" #~ msgstr "Svar: %s" #, fuzzy #~ msgid "Globus error (write): %s" #~ msgstr "Globusfel: %s" #, fuzzy #~ msgid "*** Client request: %s" #~ msgstr "ARC-auktoriseringsförfrågan: %s" #, fuzzy #~ msgid "Failed to read SSL token during authentication" #~ msgstr "Misslyckades med att etablera SSL-förbindelse" #, fuzzy #~ msgid "Failed wrapping GSI token: %s" #~ msgstr "Misslyckades med att ladda ner %s till %s" #, fuzzy #~ msgid "Failed unwrapping GSI token: %s" #~ msgstr "Misslyckades med att ladda ner %s till %s" #, fuzzy #~ msgid "Timeout while reading response header" #~ msgstr "Misslyckades under läsning från källa" #, fuzzy #~ msgid "Failure while receiving entity" #~ msgstr "Misslyckades under skrivning till destination" #, fuzzy #~ msgid "Failed while reading response content" #~ msgstr "Misslyckades under läsning från källa" #, fuzzy #~ msgid "Timeout while reading response content" #~ msgstr "Misslyckades under läsning från källa" #, fuzzy #~ msgid "Timeout while sending SOAP request" #~ msgstr "Skapar och skickar förfrågan" #, fuzzy #~ msgid "Error sending data to server" #~ msgstr "Fel under öppnande av databas: %s" #, fuzzy #~ msgid "SOAP request failed (get)" #~ msgstr "WSRF-förfrågan misslyckades" #, fuzzy #~ msgid "SOAP request failed (put)" #~ msgstr "WSRF-förfrågan misslyckades" #, fuzzy #~ msgid "SOAP request failed (copy)" #~ msgstr "WSRF-förfrågan misslyckades" #, fuzzy #~ msgid "SOAP request failed (%s)" #~ msgstr "WSRF-förfrågan misslyckades" #, fuzzy #~ msgid "Request is reported as ABORTED" #~ msgstr "Förfrågan understöds inte - %s" #, fuzzy #~ msgid "SOAP request failed (srmMkdir)" #~ msgstr "WSRF-förfrågan misslyckades" #, fuzzy #~ msgid "Error opening srm info file %s:%s" #~ msgstr "Fel vid öppnande av metafil %s: %s" #, fuzzy #~ msgid "[ARCJSDLParser] Failed to create parser context" #~ msgstr "Misslyckades med att skapa GUID i RLS: %s" #~ msgid "Target sorting not done, sorting them now" #~ msgstr "Destinationssortering ej gjord, sorterar nu" #, fuzzy #~ msgid "Loaded Broker %s" #~ msgstr "Laddade %s" #, fuzzy #~ msgid "The request is NULL!" #~ msgstr "Utdata-BIO är NULL" #, fuzzy #~ msgid "Could not find any digest for the given name" #~ msgstr "Kunde inte hitta systemets klientinställningar" #, fuzzy #~ msgid "Failed to parse XML file!" #~ msgstr "Misslyckades med att spara ftp-fil" #, fuzzy #~ msgid "Getting SAML response" #~ msgstr "Väntar på svar" #, fuzzy #~ msgid "The consent_page is %s" #~ msgstr "DataMover::Transfer: checksumtyp är %s" #, fuzzy #~ msgid "Trying to open confirm site %s" #~ msgstr "Försöker lyssna på port %s" #, fuzzy #~ msgid "The found action is %s" #~ msgstr "Funktion : %s" #, fuzzy #~ msgid "The post IdP-authentication action is %s" #~ msgstr "Felaktig autentiseringsinformation" #, fuzzy #~ msgid "The retrieved dn is %s" #~ msgstr "Peer-namn: %s" #, fuzzy #~ msgid "The OAuth request url is %s" #~ msgstr "ARC-auktoriseringsförfrågan: %s" #, fuzzy #~ msgid "The server location is %s " #~ msgstr "Tar bort plats: %s - %s" #, fuzzy #~ msgid "The request url is %s" #~ msgstr "Request xml structure är: %s" #, fuzzy #~ msgid "Relaystate %s" #~ msgstr " Hälsotillstånd: %s" #, fuzzy #~ msgid "Successfully logged in to the IdP!" #~ msgstr "Lyckosam dirigering till %s" #~ msgid "Cluster: %s" #~ msgstr "Kluster: %s" #, fuzzy #~ msgid "Location information:" #~ msgstr "Ändpunktsinformation" #, fuzzy #~ msgid "Domain information:" #~ msgstr "Ändpunktsinformation" #, fuzzy #~ msgid " Service Name: %s" #~ msgstr "Peer-namn: %s" #, fuzzy #~ msgid " Service Type: %s" #~ msgstr " Betjäningstillstånd: %s" #, fuzzy #~ msgid "Manager information:" #~ msgstr "Köinformation" #, fuzzy #~ msgid " Resource Manager Version: %s" #~ msgstr "%s version %s" #~ msgid "Filling job store with jobs according to specified jobids" #~ msgstr "Fyller jobblager med jobb i enlighet med specifierade jobb-id" #~ msgid "Filling job store with jobs according to list of selected clusters" #~ msgstr "Fyller jobblager med jobb i enlighet med listan med valda kluster" #~ msgid "Removing jobs from job store according to list of rejected clusters" #~ msgstr "" #~ "Tar bort jobb från jobblager i enlighet med listan med förkastade kluster" #~ msgid "Removing job %s from job store since it runs on a rejected cluster" #~ msgstr "" #~ "Tar bort jobb %s från jobblager eftersom det exekverar på ett körkastat " #~ "kluster" #~ msgid "" #~ "Filling job store with all jobs, except those running on rejected clusters" #~ msgstr "" #~ "Fyller jobblager med alla jobb, utom de som exekverar på körkastade " #~ "kluster" #~ msgid "Job store for %s contains %ld jobs" #~ msgstr "Jobblager för %s innehåller %ld jobb" #, fuzzy #~ msgid "The job ID (%s) is not a valid URL" #~ msgstr "%s är inte en giltig URL" #, fuzzy #~ msgid "The cluster URL is not a valid URL" #~ msgstr "%s är inte en giltig URL" #, fuzzy #~ msgid "Job has not finished yet: %s" #~ msgstr "Jobb ej funnet i jobblista: %s" #~ msgid "Failed downloading job %s" #~ msgstr "Misslyckades med att ladda ner jobb %s" #~ msgid "Failed cleaning job %s" #~ msgstr "Misslyckades med att städa upp efter jobb %s" #~ msgid "Failed cancelling job %s" #~ msgstr "Misslyckades med att avbryta jobb %s" #, fuzzy #~ msgid "" #~ "Job information not found, job %s will only be deleted from local joblist" #~ msgstr "Jobb %s kommer endast att tas bort från den lokala jobblistan" #~ msgid "%s from job %s" #~ msgstr "%s från jobb %s" #, fuzzy #~ msgid "Cannot migrate job %s, it is not queuing." #~ msgstr "Kan inte skapa SOAP-klient" #, fuzzy #~ msgid "Failed renewing job %s" #~ msgstr "Misslyckades med att städa upp efter jobb %s" #, fuzzy #~ msgid "File download failed: %s - %s" #~ msgstr "Filnedladdning misslyckades: %s" #~ msgid "Removing jobs from job list and job store" #~ msgstr "Tar bort jobb från jobblista och jobblager" #~ msgid "Job store for %s now contains %d jobs" #~ msgstr "Jobblager för %s innehåller nu %d jobb" #~ msgid "Finished removing jobs from job list and job store" #~ msgstr "Avslutade borttagande av jobb från jobblista och jobblager" #, fuzzy #~ msgid "Valid jobdescription found for: %s" #~ msgstr "Ogiltig jobbeskrivning:" #, fuzzy #~ msgid "Invalid jobdescription found for: %s" #~ msgstr "Ogiltig jobbeskrivning:" #, fuzzy #~ msgid "Job description for %s retrieved locally" #~ msgstr "Jobbeskrivning som skall sändas: %s" #, fuzzy #~ msgid "Job %s can not be resubmitted" #~ msgstr "%d av %d jobb sändes in" #, fuzzy #~ msgid " ExecutionCE: %s" #~ msgstr "Undantag: %s" #, fuzzy #~ msgid " Used Slots: %d" #~ msgstr " Använda slots: %d" #, fuzzy #~ msgid "Job type: single" #~ msgstr "Jobb: %s" #, fuzzy #~ msgid " UserTag: %s" #~ msgstr " Ägare: %s" #, fuzzy #~ msgid " Epilogue.Arguments: %s" #~ msgstr " Implementerare: %s" #, fuzzy #~ msgid " QueueName: %s" #~ msgstr " Kö: %s" #, fuzzy #~ msgid " DataIndexingService: %s" #~ msgstr " Förvald lagringsservice: %s" #, fuzzy #~ msgid "Generating %s output was unsuccessful" #~ msgstr "Operation avslutades OK" #~ msgid "Identifying needed job controllers according to specified jobs" #~ msgstr "" #~ "Identifierar behövda jobbkontrollerare i enlighet med specifierade jobb" #~ msgid "Need job controller for grid flavour %s" #~ msgstr "Behöver jobbkontrollerare för gridtyp %s" #~ msgid "Identifying needed job controllers according to specified clusters" #~ msgstr "" #~ "Identifierar behövda jobbkontrollerare i enlighet med specifierade kluster" #~ msgid "" #~ "Identifying needed job controllers according to all jobs present in job " #~ "list" #~ msgstr "" #~ "Identifierar behövda jobbkontrollerare i enlighet med alla jobb i " #~ "jobblistan" #, fuzzy #~ msgid "Failed uploading file: %s - %s" #~ msgstr "Misslyckades med att ladda upp fil: %s" #~ msgid "Failed uploading file: %s" #~ msgstr "Misslyckades med att ladda upp fil: %s" #, fuzzy #~ msgid "TargetRetriever %s could not be created" #~ msgstr "Service %s(%s) kunde inte skapas" #, fuzzy #~ msgid "Loaded TargetRetriever %s" #~ msgstr "Laddade Service %s(%s)" #, fuzzy #~ msgid "Incompatible RSL attributes" #~ msgstr " attribut:" #, fuzzy #~ msgid "" #~ "%s is not a directory, it is needed for the client to function correctly" #~ msgstr "sökväg till certifikatfil" #~ msgid "Created empty ARC job list file: %s" #~ msgstr "Skapade tom ARC-jobblistfil: %s" #~ msgid "" #~ "Default proxy file does not exist: %s trying default certificate and key" #~ msgstr "" #~ "Förvald proxyfil existerar inte: %s försöker med förvalt certifikat ock " #~ "nyckel" #, fuzzy #~ msgid "Can not locate CA certificate directory." #~ msgstr "Kan inte läsa CA-certifikatkatalog: %s (%s)" #, fuzzy #~ msgid "Adding rejected service %s:%s:%s" #~ msgstr "Avvisar service: %s" #, fuzzy #~ msgid "Cannot parse the specified %s service (%s)" #~ msgstr "Kan inte läsa nyckelfil: %s (%s)" #, fuzzy #~ msgid "The specified %s service (%s) is not a valid URL" #~ msgstr "%s är inte en giltig URL" #, fuzzy #~ msgid "Adding %s service %s:%s " #~ msgstr "Avvisar service: %s" #, fuzzy #~ msgid "Alias name (%s) contains a unknown servicetype %s at %s" #~ msgstr "Alias %s innehåller en beräkningsservice av gridtyp %s: %s" #, fuzzy #~ msgid "Failed to read input passphrase" #~ msgstr "Misslyckades med att läsa fil %s: %s" #~ msgid "Private key of the credential object is NULL" #~ msgstr "Kreditivobjektets privata nyckel är NULL" #, fuzzy #~ msgid "Error when loading the extension section: %s" #~ msgstr "Fel under transaktionen: %s" #~ msgid "DataMover::Transfer: will try to compute crc" #~ msgstr "DataMover::Transfer: kommer att försöka beräkna crc" #~ msgid "Cache file valid until: %s" #~ msgstr "Cachefil giltig till: %s" #~ msgid "Source is bad URL or can't be used due to some reason" #~ msgstr "Källan är ogiltig URL eller kan inte användas av någon anledning" #~ msgid "Destination is bad URL or can't be used due to some reason" #~ msgstr "" #~ "Destinationen är ogiltig URL eller kan inte användas av någon anledning" #~ msgid "Failed while transfering data (mostly timeout)" #~ msgstr "Misslyckades med att överföra data (mestadels timeout)" #~ msgid "Error deleting location or URL" #~ msgstr "Fel vid borttagande av plats eller URL" #~ msgid "DataPoint is already writing" #~ msgstr "DataPoint skriver redan" #~ msgid "Error creating file %s with mkstemp(): %s" #~ msgstr "Fel vid skapande av fil %s med mkstemp(): %s" #~ msgid "Error opening lock file we just renamed successfully %s: %s" #~ msgstr "Fel vid öppnande av låsfil vi just lyckats döpa om %s: %s" #~ msgid "" #~ "Lock that recently existed has been deleted by another process, calling " #~ "Start() again" #~ msgstr "" #~ "Lås som nyligen existerat har förstörts av en annan process, anropar Start" #~ "() igen" #~ msgid "Error opening valid and existing lock file %s: %s" #~ msgstr "Fel vid öppnandet av en giltig och existerande låsfil %s: %s" #~ msgid "Error reading valid and existing lock file %s: %s" #~ msgstr "Fel vid läsandet av en giltig och existerande låsfil %s: %s" #, fuzzy #~ msgid "Error creating tmp file %s for remote lock with mkstemp(): %s" #~ msgstr "Fel vid skapande av fil %s med mkstemp(): %s" #, fuzzy #~ msgid "Error writing to tmp lock file for remote lock %s: %s" #~ msgstr "Fel vid skrivning till temporär låsfil %s: %s" #, fuzzy #~ msgid "Warning: closing tmp lock file for remote lock %s failed" #~ msgstr "Varning: stängning av temporär låsfil %s misslyckades" #, fuzzy #~ msgid "Error renaming tmp file %s to lock file %s for remote lock: %s" #~ msgstr "Fel vid omdöpning av temporär låsfil %s till låsfil %s: %s" #, fuzzy #~ msgid "" #~ "Error renaming lock file for remote lock, even though rename() did not " #~ "return an error: %s" #~ msgstr "" #~ "Fel vid omdöpning av låsfil, trots at rename() inte returnerade ett fel" #, fuzzy #~ msgid "" #~ "Error opening lock file for remote lock we just renamed successfully %s: %" #~ "s" #~ msgstr "Fel vid öppnande av låsfil vi just lyckats döpa om %s: %s" #, fuzzy #~ msgid "" #~ "The remote cache file is currently locked with a valid lock, will " #~ "download from source" #~ msgstr "Filen är för tillfället låst med ett giltigt lås" #~ msgid "Failed to create file %s for writing: %s" #~ msgstr "Misslyckades med att skapa fil %s för skrivning: %s" #, fuzzy #~ msgid "Error: Cache file %s does not exist" #~ msgstr "Cachefil %s existerar inte" #~ msgid "Failed to change permissions of session dir to 0700: %s" #~ msgstr "" #~ "Misslyckades med att ändra tillträde för sessionskatalog till 0700: %s" #~ msgid "Failed to change owner of destination dir to %i: %s" #~ msgstr "Misslyckades med att ändra ägare av destinationskatalog till %i: %s" #~ msgid "Failed change ownership of destination file %s: %s" #~ msgstr "Misslyckades med att ändra ägare av destinationsfil %s: %s" #~ msgid "Error opening per-job dir %s: %s" #~ msgstr "Fel vid öppnande av per-jobb-katalog %s: %s" #~ msgid "Mismatching url in file %s: %s Expected %s" #~ msgstr "Ej matchande URL i fil %s: %s Förväntade %s" #~ msgid "Bad separator in file %s: %s" #~ msgstr "Felaktig separator i fil %s: %s" #~ msgid "Bad value of expiry time in %s: %s" #~ msgstr "Felaktigt värde för tid i %s: %s" #~ msgid "Error opening lock file %s: %s" #~ msgstr "Fel vid öppning av låsfil %s: %s" #~ msgid "Cannot open database" #~ msgstr "Kan öppna databas" #~ msgid "Cannot abort transaction %s" #~ msgstr "Kan inte avbryta transaktion %s" #~ msgid "put: deadlock handling: try again" #~ msgstr "put: deadlock-hantering: försök igen" #~ msgid "put: cannot abort transaction: %s" #~ msgstr "put: kan inte avbryta transaktion: %s" #~ msgid "put: %s" #~ msgstr "put: %s" #~ msgid "get: deadlock handling, try again" #~ msgstr "get: deadlock-hantering, försök igen" #~ msgid "get: cannot abort transaction: %s" #~ msgstr "get: kan inte avbryta transaktion: %s" #~ msgid "get: %s" #~ msgstr "get: %s" #~ msgid "del: deadlock handling, try again" #~ msgstr "del: deadlock-hantering, försök igen" #~ msgid "del: cannot abort transaction: %s" #~ msgstr "del: kan inte avbryta transaktion: %s" #~ msgid "del: %s" #~ msgstr "del: %s" #~ msgid "get_doc_name: deadlock handling, try again" #~ msgstr "get_doc_name: deadlock-hantering, försök igen" #~ msgid "get_doc_names: cannot abort transaction: %s" #~ msgstr "get_doc_names: kan inte avbryta transaktion: %s" #~ msgid "Error during the transaction: %s" #~ msgstr "Fel under transaktionen: %s" #~ msgid "checkpoint: %s" #~ msgstr "checkpoint: %s" #, fuzzy #~ msgid "" #~ "The config: \n" #~ "%s \n" #~ msgstr "Cacheinställningar: %s" #, fuzzy #~ msgid "No URLs to connect to (in %s)" #~ msgstr "Misslyckades med att koppla upp mot %s(%s):%i" #, fuzzy #~ msgid "Starting:" #~ msgstr "start_reading_ftp" #, fuzzy #~ msgid "%(sn)s.%(rn)s called" #~ msgstr "%s misslyckades" #, fuzzy #~ msgid "CachedStringStore constructor called" #~ msgstr "Python-wrapper-konstruktor anropad" #, fuzzy #~ msgid "PickleStore constructor called" #~ msgstr "Python-wrapper-konstruktor anropad" #, fuzzy #~ msgid "filename:" #~ msgstr "filnamn" #, fuzzy #~ msgid "StringStore constructor called" #~ msgstr "Python-wrapper-konstruktor anropad" #, fuzzy #~ msgid "TransDBStore constructor called" #~ msgstr "Python-wrapper-konstruktor anropad" #, fuzzy #~ msgid "db environment opened" #~ msgstr " Avslutningstid: %s" #, fuzzy #~ msgid "couldn't find SleepTime, using %d as default" #~ msgstr "Kunde inte hitta systemets klientinställningar" #, fuzzy #~ msgid "got deadlock - retrying" #~ msgstr "get: deadlock-hantering, försök igen" #, fuzzy #~ msgid "Error getting %s" #~ msgstr "Fel vid hämtning av replikor: %s" #, fuzzy #~ msgid "retrying transaction" #~ msgstr "Fel under transaktionen: %s" #, fuzzy #~ msgid "Error setting %s" #~ msgstr "Fel vid start av session: %s" #, fuzzy #~ msgid "db environment closed" #~ msgstr " Avslutningstid: %s" #, fuzzy #~ msgid "ZODBStore constructor called" #~ msgstr "Python-wrapper-konstruktor anropad" #, fuzzy #~ msgid "GSS accept security context failed: %i/%i%s" #~ msgstr "Källan skapades: %s" #, fuzzy #~ msgid "Security check failed in GSI MCC for incoming message" #~ msgstr "Säkerhetskontroll misslyckades i TLS MCC för inkommande meddelande" #, fuzzy #~ msgid "GSS wrap failed: %i/%i%s" #~ msgstr "PASV misslyckades: %s" #, fuzzy #~ msgid "Transfer protocol is SSL2" #~ msgstr "Överföring från %s till %s" #, fuzzy #~ msgid "Transfer protocol is GSI" #~ msgstr "Överföring från %s till %s" #, fuzzy #~ msgid "Can not read file %s with list of trusted VOMS DNs" #~ msgstr "Kan inte hitta filen %s som inkluderar betrodda voms-certifikat-dn" #~ msgid "Can not load certificate file - %s" #~ msgstr "Kunde inte ladda certifikatfil - %s" #~ msgid "Can not load key file - %s" #~ msgstr "Kunde inte ladda nyckelfil - %s" #~ msgid "Security check failed in TLS MCC for outgoing message" #~ msgstr "Säkerhetskontroll misslyckades i TLS MCC för utgående meddelande" #~ msgid "Certificate %s failed Globus signing policy" #~ msgstr "Certifikat %s underkändes av Globus signeringspolicy" #~ msgid "Peer certificate cannot be extracted" #~ msgstr "Peer-certifikat kan inte extraheras" #, fuzzy #~ msgid "Peer certificate chain cannot be extracted" #~ msgstr "Peer-certifikat kan inte extraheras" #, fuzzy #~ msgid "Failed to create control (%s) or session (%s) directories" #~ msgstr "Misslyckades med att ändra ägare av sessionskatalog till %i: %s" #, fuzzy #~ msgid "Failed to store configuration into temporary file: %s" #~ msgstr "Misslyckades med att ladda serviceinställningar" #~ msgid "Using cached configuration: %s" #~ msgstr "Använder cachade inställningar: %s" #, fuzzy #~ msgid "wrong number in maxload: %s" #~ msgstr "felaktigt nummer i maxload" #, fuzzy #~ msgid "wrong number in speedcontrol: %s" #~ msgstr "felaktigt nummer i speedcontrol" #~ msgid "wrong option in securetransfer" #~ msgstr "felaktig inställning i securetransfer" #~ msgid "wrong option in passivetransfer" #~ msgstr "felaktig inställning i passivetransfer" #~ msgid "wrong option in norootpower" #~ msgstr "felaktig inställning i norootpower" #~ msgid "wrong option in localtransfer" #~ msgstr "felaktig inställning i localtransfer" #~ msgid "defaultttl is empty" #~ msgstr "defaultttl är tom" #~ msgid "junk in defaultttl command" #~ msgstr "skräp i defaultttl-kommando" #~ msgid "maxrerun is empty" #~ msgstr "maxrerun är tom" #~ msgid "diskspace is empty" #~ msgstr "diskspace är tom" #~ msgid "junk in diskspace command" #~ msgstr "skräp i diskspace-kommando" #~ msgid "wrong number in diskspace command" #~ msgstr "felaktigt nummer i diskspace-kommando" #~ msgid "junk in defaultlrms command" #~ msgstr "skräp i defaultlrms-kommando" #~ msgid "timeout for plugin is missing" #~ msgstr "timeout för plugin saknas" #, fuzzy #~ msgid "Can't read user list in specified file %s" #~ msgstr "Kan inte läsa användare i gridmap-fil %s" #~ msgid "Warning: creation of user \"%s\" failed" #~ msgstr "Varning: skapande av användare \"%s\" misslyckades" #, fuzzy #~ msgid "Can't read users in specified file %s" #~ msgstr "Kan inte läsa användare i gridmap-fil %s" #, fuzzy #~ msgid "No username entries in control" #~ msgstr "Ej tillräckligt antal parametrar i copyurl" #~ msgid "Starting grid-manager thread" #~ msgstr "Startar grid-manager-tråd" #, fuzzy #~ msgid "Can't initialize runtime environment - EXITING" #~ msgstr "Kan inte initiera runtime-miljö - AVSLUTAR." #, fuzzy #~ msgid "Can't recognize own username - EXITING" #~ msgstr "Kan inte känna igen eget användarnamn - AVSLUTAR." #, fuzzy #~ msgid "Error processing configuration - EXITING" #~ msgstr "Fel vid processering av inställningar -AVSLUTAR." #, fuzzy #~ msgid "No suitable users found in configuration - EXITING" #~ msgstr "Inga lämpliga användare funna i inställningar - AVSLUTAR." #~ msgid "Not all jobs are cleaned yet" #~ msgstr "Alla jobb har inte rensats ännu" #~ msgid "Trying again" #~ msgstr "Försöker igen" #, fuzzy #~ msgid "Jobs cleaned" #~ msgstr "Jobb rensade." #~ msgid "%s: Added" #~ msgstr "%s: Tillagt" #, fuzzy #~ msgid "%s: state: %s: starting new child" #~ msgstr "%s: tillstånd: PREPARING/FINISHING: startar ny barnprocess" #, fuzzy #~ msgid "%s: Failed to run uploader process" #~ msgstr "%s: Misslyckades med att exekvera down/uploader-process." #, fuzzy #~ msgid "%s: Failed to run downloader process" #~ msgstr "%s: Misslyckades med att exekvera down/uploader-process." #~ msgid "%s: State: PREPARING/FINISHING: child is running" #~ msgstr "%s: Tillstånd: PREPARING/FINISHING: barnprocess exekveras" #~ msgid "%s: State: PREPARING: child exited with code: %i" #~ msgstr "%s: tillstånd PREPARING: barnprocess avslutades med kod %i" #~ msgid "%s: State: FINISHING: child exited with code: %i" #~ msgstr "%s: tillstånd FINISHING: barnprocess avslutades med kod %i" #~ msgid "%s: State: FINISHING: unrecoverable error detected (exit code 1)" #~ msgstr "" #~ "%s: Tillstånd: FINISHING: ohjälpligt fel detekterat (avslutningskod 1)" #~ msgid "%s: State: PREPARING: unrecoverable error detected (exit code 1)" #~ msgstr "" #~ "%s: Tillstånd: PREPARING: ohjälpligt fel detekterat (avslutningskod 1)" #~ msgid "%s: State: PREPARING/FINISHING: retryable error" #~ msgstr "%s: Tillstånd: PREPARING/FINISHING: återförsökbart fel" #, fuzzy #~ msgid "%s: State: %s: credentials probably expired (exit code %i)" #~ msgstr "" #~ "%s: Tillstånd: FINISHING: kreditiv troligen utgångna (avslutningskod 3)" #, fuzzy #~ msgid "%s: State: %s: failed to renew credentials" #~ msgstr "ChangeActivityStatus: gammalt BES-tillstånd matchar inte" #, fuzzy #~ msgid "" #~ "%s: State: %s: some error detected (exit code %i). Recover from such type " #~ "of errors is not supported yet." #~ msgstr "" #~ "%s: Tillstånd: FINISHING: fel detekterade (avslutningskod %i). " #~ "Återhämtning från denna feltyp stöds ännu inte." #, fuzzy #~ msgid "%s: Reprocessing RSL failed" #~ msgstr "%s: Omprocessering av RSL misslyckades." #, fuzzy #~ msgid "%s: adding to transfer share %s" #~ msgstr "Dataöverföring avbruten: %s" #, fuzzy #~ msgid "Failed to deploy Janitor" #~ msgstr "Misslyckades med förallokera utrymme" #, fuzzy #~ msgid "Can't start %s" #~ msgstr "Kan inte göra stat på filen: %s" #~ msgid "Assigned new informational document" #~ msgstr "Tilldelat nytt informationsdokument" #~ msgid "soap body does not include any request node" #~ msgstr "SOAP-meddelande innehåller ingen förfrågan-nod" #~ msgid "request node is empty" #~ msgstr "Förfrågan-nod är tom" #, fuzzy #~ msgid "NULL response" #~ msgstr "Inget svar" #~ msgid "Authorized from Charon service" #~ msgstr "Auktoriserad från Charon-service" #, fuzzy #~ msgid "" #~ "Not authorized from Charon service; Some of the RequestItem does not " #~ "satisfy Policy" #~ msgstr "" #~ "Ej auktoriserad från Charon-service; Några RequestItem uppfyller inte " #~ "Policy" #~ msgid "process: %s: not supported" #~ msgstr "process: %s: understöds inte" #~ msgid "Evaluator: %s" #~ msgstr "Utvärderare: %s" #, fuzzy #~ msgid "Loading policy from %s" #~ msgstr "Läser inställningsfil: %s" #, fuzzy #~ msgid "Failed loading policy from %s" #~ msgstr "Misslyckades med att ladda ner %s till %s" #, fuzzy #~ msgid "Policy removed: %s" #~ msgstr "listpost: %s" #, fuzzy #~ msgid "Old policy times: %u/%u" #~ msgstr "policyrad: %s" #, fuzzy #~ msgid "New policy times: %u/%u" #~ msgstr "policyrad: %s" #, fuzzy #~ msgid "Policy modified: %s" #~ msgstr "policyrad: %s" #~ msgid "Policy Decision Request failed" #~ msgstr "Policybeslutsförfrågan misslyckades" #~ msgid "Policy Decision Request succeed!!!" #~ msgstr "Policybeslutsförfrågan lyckades!!!" #, fuzzy #~ msgid "Status request failed" #~ msgstr "En statusförfrågan misslyckades" #, fuzzy #~ msgid "Status request succeed" #~ msgstr "En statusförfrågan lyckades" #, fuzzy #~ msgid "The response to a status request was not a SOAP message" #~ msgstr "Ett svar på en statusförfrågan var inget SOAP-meddelande" #, fuzzy #~ msgid "Service status request failed" #~ msgstr "En servicestatusförfrågan misslyckades" #, fuzzy #~ msgid "Service status request succeed" #~ msgstr "En servicestatusförfrågan lyckades" #, fuzzy #~ msgid "Job termination request failed" #~ msgstr "En förfrågan om att avbryta ett jobb misslyckades" #, fuzzy #~ msgid "Job termination request succeed" #~ msgstr "En förfrågan om att avbryta ett jobb lyckades" #, fuzzy #~ msgid "file_name: " #~ msgstr "filnamn" #, fuzzy #~ msgid "Result(s) download" #~ msgstr "nedladdning" #, fuzzy #~ msgid "Download Place: " #~ msgstr "Laddar ner jobb: %s" #, fuzzy #~ msgid "Download cycle: start" #~ msgstr "Laddar ner jobb: %s" #, fuzzy #~ msgid "Download url: " #~ msgstr "Laddar ner jobb: %s" #, fuzzy #~ msgid "Download path: " #~ msgstr "Laddar ner jobb: %s" #, fuzzy #~ msgid "Can not create output SOAP payload for delegation service" #~ msgstr "" #~ "Misslyckades med att hitta delegeringskreditiv i klientinställningar" #, fuzzy #~ msgid "Can not store proxy certificate" #~ msgstr "Kan inte ange CN i proxycertifikatet" #, fuzzy #~ msgid "Can not find the corresponding credential from credential cache" #~ msgstr "Kan inte hitta filen %s som inkluderar betrodda voms-certifikat-dn" #, fuzzy #~ msgid "Signing proxy on delegation service failed" #~ msgstr "Delegeringsauktorisering misslyckades" #, fuzzy #~ msgid "Hopi SlaveMode is active, PUT is only allowed to existing files" #~ msgstr "" #~ "Hopi SlaveMode är aktiv, PUT är endast tillåtet till existerande filer." #~ msgid "Hopi Initialized" #~ msgstr "Hopi initierad" #~ msgid "Hopi DocumentRoot is " #~ msgstr "Hopi DocumentRoot är " #~ msgid "Hopi SlaveMode is on!" #~ msgstr "Hopi SlaveMode är på!" #~ msgid "Hopi shutdown" #~ msgstr "Hopi avstängning" #, fuzzy #~ msgid "File size is %u" #~ msgstr "Misslyckades med att läsa fil %s: %s" #~ msgid "error on write" #~ msgstr "fel vid skrivning" #, fuzzy #~ msgid "Input for PUT operation is neither stream nor buffer" #~ msgstr "PUT: indata är varken ström eller buffer" #, fuzzy #~ msgid "method=%s, path=%s, url=%s, base=%s" #~ msgstr "metod=%s, sökväg=%s, url=%s, basurl=%s" #, fuzzy #~ msgid "No content provided for PUT operation" #~ msgstr "Icke understödd operation" #~ msgid "Not supported operation" #~ msgstr "Icke understödd operation" #, fuzzy #~ msgid "Status (%s): Failed" #~ msgstr "Status: %s %d" #, fuzzy #~ msgid "Status (%s): OK" #~ msgstr "Status: %s %d" #, fuzzy #~ msgid "Parsing configuration parameters" #~ msgstr "Läser inställningsfil: %s" #, fuzzy #~ msgid "Empty endpoint element in the configuration!" #~ msgstr "Inget nästa element i kedjan" #, fuzzy #~ msgid "KeyPath: %s" #~ msgstr "get: %s" #, fuzzy #~ msgid "CACertficatePath: %s" #~ msgstr "Certifikatformat för BIO är: %d" #, fuzzy #~ msgid "Missing or empty KeyPath element in the configuration!" #~ msgstr "Inget nästa element i kedjan" #, fuzzy #~ msgid "Misisng or empty CertificatePath element in the configuration!" #~ msgstr "Inget nästa element i kedjan" #, fuzzy #~ msgid "Missing or empty ProxyPath element in the configuration!" #~ msgstr "Inget nästa element i kedjan" #, fuzzy #~ msgid "Missing or empty CACertificatesDir element in the configuration!" #~ msgstr "Inget nästa element i kedjan" #, fuzzy #~ msgid "Missing or empty CACertificatePath element in the configuration!" #~ msgstr "Inget nästa element i kedjan" #~ msgid "Invalid database path definition" #~ msgstr "Ogiltig databassökvägdefinition" #, fuzzy #~ msgid "The InfoProvider URL is empty." #~ msgstr "Ingen giltig plats tillgänglig" #, fuzzy #~ msgid "RemoveRegistrations message sent to neighbors." #~ msgstr "Registrering för service: %s" #, fuzzy #~ msgid "Query received: %s" #~ msgstr "Förfrågan: %s" #, fuzzy #~ msgid "RemoveRegistrations received: ID=%s" #~ msgstr "Registrering för service: %s" #, fuzzy #~ msgid "Connect received" #~ msgstr "Kontaktar: %s" #, fuzzy #~ msgid "Communication error: input is not SOAP" #~ msgstr "utdata är inte SOAP" #, fuzzy #~ msgid "Query failed at %s, choosing new InfoProvider." #~ msgstr "Förfrågan misslyckades: Inget svar" #, fuzzy #~ msgid "Remove ISIS (%s) from the list of InfoProviders." #~ msgstr "Tar bort jobb %s från jobblistfilen" #, fuzzy #~ msgid "No InfoProvider is available." #~ msgstr "Ingen giltig plats tillgänglig" #, fuzzy #~ msgid "Connect status (%s): Failed" #~ msgstr "%s ny status: %s" #, fuzzy #~ msgid "Connect status (%s): OK" #~ msgstr "%s ny status: %s" #, fuzzy #~ msgid "No more available ISIS in the neighbors list." #~ msgstr "Inget mål tillgängligt inuti policyn" #, fuzzy #~ msgid "Database mass updated." #~ msgstr "Dataöverföring avbruten" #, fuzzy #~ msgid " Deconstructing is waiting for PerlProcessor" #~ msgstr "Servicen väntar på förfrågningar" #, fuzzy #~ msgid " Deconstructing is waiting for TaskQueue" #~ msgstr "Servicen väntar på förfrågningar" #, fuzzy #~ msgid " Deconstructing is waiting for TaskSet" #~ msgstr "Servicen väntar på förfrågningar" #, fuzzy #~ msgid "Creating fault! Reason: \"%s\"" #~ msgstr "Cache skapades: %s" #, fuzzy #~ msgid "Thread %d, Input is not SOAP" #~ msgstr "Indata är inte SOAP" #~ msgid "** %s" #~ msgstr "** %s" #, fuzzy #~ msgid "Cannot allocate output raw buffer" #~ msgstr "Kan inte allokera utdata-råbuffer" #~ msgid "Permission denied from %s host" #~ msgstr "Tillstånd nekad från %s värd" #~ msgid "Cannot collect resource information" #~ msgstr "Kan inte samla in resursinformation" #~ msgid "No response" #~ msgstr "Inget svar" #~ msgid "Cannot find job id" #~ msgstr "Kan inte hitta jobb-id" #~ msgid "Cannot find scheduler endpoint" #~ msgstr "Kan ej hitta schemaläggarändpunkt" #~ msgid "Status: %s %d" #~ msgstr "Status: %s %d" #~ msgid "Process job: %s" #~ msgstr "Processera jobb: %s" #~ msgid "No scheduler configured" #~ msgstr "Ingen schemaläggare inställd" #~ msgid "Do Request: %s" #~ msgstr "Utför förfrågan: %s" #~ msgid "No free CPU slot" #~ msgstr "Ingen ledig CPU-slot" #~ msgid "Per: %d" #~ msgstr "Period: %d" #~ msgid "%s reported %s" #~ msgstr "%s rapporterade %s" #~ msgid "%s reported" #~ msgstr "%s rapporterade" #~ msgid "%s job reported finished" #~ msgstr "%s job rapporterat avslutat" #~ msgid "Get activity status changes" #~ msgstr "Erhåll aktivitetstatusändringar" #~ msgid "Killing %s" #~ msgstr "Dödar: %s" #~ msgid "pre cleanup %s %d" #~ msgstr "föruppstädning %s %d" #~ msgid "cleanup 2 %s" #~ msgstr "uppstädning 2 %s" #~ msgid "PaulService shutdown" #~ msgstr "Paul service nedstängning" #~ msgid "Terminate job %s" #~ msgstr "Avsluta jobb %s" #~ msgid "Invalid JSDL! Missing application section" #~ msgstr "Ogiltig JSDL! Saknar application-stycke" #~ msgid "%s set exception" #~ msgstr "%s sätt undantag" #, fuzzy #~ msgid "Cmd: %s" #~ msgstr "Kommando: %s" #~ msgid "StdOut: %s" #~ msgstr "StdOut: %s" #~ msgid "StdErr: %s" #~ msgstr "StdErr: %s" #~ msgid "return from run" #~ msgstr "återkomst från körning" #~ msgid "Error during the application run" #~ msgstr "Fel under programexekveringen" #~ msgid "Exception: %s" #~ msgstr "Undantag: %s" #~ msgid "SpawnError" #~ msgstr "Spawn-fel" #~ msgid "Filetransfer created" #~ msgstr "Filöverföring skapad" #~ msgid "download" #~ msgstr "nedladdning" #~ msgid "Stage in" #~ msgstr "Nedladdning" #~ msgid "Stage out" #~ msgstr "Uppladdning" #~ msgid "Succeeded to verify the signature under " #~ msgstr "Lyckades verifiera signaturen under " #~ msgid "Failed to verify the signature under " #~ msgstr "Misslyckades med att verifiera signaturen under " #~ msgid "" #~ "The NameID inside request is the same as the NameID from the tls " #~ "authentication: %s" #~ msgstr "" #~ "Namn-id i begäran är detsamma som namn-id från TLS-autentiseringen: %s" #, fuzzy #~ msgid "Can't establish connection to mysql database" #~ msgstr "Misslyckades med att etablera förbindelse till %s:%i" #, fuzzy #~ msgid "Is connected to database? %s" #~ msgstr "Disconnect: Misslyckades med att förstöra handtag: %s" #, fuzzy #~ msgid "SP Service name is %s" #~ msgstr "Peer-namn: %s" #, fuzzy #~ msgid "SAML Metadata is from %s" #~ msgstr "%s: Tillstånd: %s från %s" #, fuzzy #~ msgid "saml2SP: Unauthorized" #~ msgstr "echo: Oautoriserad" #, fuzzy #~ msgid "AuthnRequest after deflation: %s" #~ msgstr "Insändning returnerade fel: %s" #, fuzzy #~ msgid "Using private key file to sign: %s" #~ msgstr "Använder nyckelfil: %s" #, fuzzy #~ msgid "Decrypted SAML Assertion: %s" #~ msgstr "%s version %s" #~ msgid "Request failed: No response" #~ msgstr "Förfrågan misslyckades: Inget svar" #~ msgid "Request failed: Error" #~ msgstr "Förfrågan misslyckades: Fel" #, fuzzy #~ msgid "Can not find StatusCode" #~ msgstr "Kan inte hitta MCC-status-objekt" #~ msgid "Request succeeded!!!" #~ msgstr "Förfrågan lyckades!!!" #~ msgid "Cannot create SOAP fault" #~ msgstr "Kan inte skapa SOAP-fel" #~ msgid "GetActivityStatuses: job %s not found" #~ msgstr "GetActivityStatuses: jobb %s ej funnet" #~ msgid "ChangeActivityStatuses: job %s not found" #~ msgstr "ChangeActivityStatuses: jobb %s ej funnet" #~ msgid "GetActivityDocuments: job %s not found" #~ msgstr "GetActivityDocuments: jobb %s ej funnet" #~ msgid "GetActivityStatuses: job %s" #~ msgstr "GetActivityStatuses: jobb %s" #~ msgid "doSched" #~ msgstr "doSched" #~ msgid "jobq checkpoint done" #~ msgstr "jobq checkpoint färdig" #~ msgid "" #~ "Count of jobs: %i Count of resources: %i Scheduler period: %i Endpoint: %" #~ "s DBPath: %s" #~ msgstr "" #~ "Antal jobb: %i Antal resurser: %i Schemaläggningsperiod: %i Ändpunkt: %s " #~ "Databassökväg: %s" #~ msgid "NEW job: %s" #~ msgstr "NYTT jobb: %s" #~ msgid "A-REX ID: %s" #~ msgstr "A-REX ID: %s" #~ msgid "Sched job ID: %s NOT SUBMITTED" #~ msgstr "Sched jobb-ID: %s EJ INSÄNT" #~ msgid "%s set killed" #~ msgstr "%s uppsättning avbruten" #~ msgid "%s remove from queue" #~ msgstr "%s ta bort från kö" #~ msgid "Sched job ID: %s (A-REX job ID is empty)" #~ msgstr "Sched jobb-ID: %s (A-REX jobb-ID är tomt)" #~ msgid "Job RESCHEDULE: %s" #~ msgstr "Jobb OMSCHEMALAGT: %s" #~ msgid "JobID: %s state: %s" #~ msgstr "JobID: %s tillstånd: %s" #~ msgid "doReschedule" #~ msgstr "doReschedule" #~ msgid "Error during database open: %s" #~ msgstr "Fel under öppnande av databas: %s" #~ msgid "%d <> %d" #~ msgstr "%d <> %d" #~ msgid "invalid job id" #~ msgstr "ogiltigt jobb-id" #~ msgid "Invalid status report" #~ msgstr "Ogiltig statusrapport" #~ msgid "%s reports job status of %s but it is running on %s" #~ msgstr "%s rapporterar jobbstatus för %s men det exekverar på %s" #~ msgid "%s try to status change: %s->%s" #~ msgstr "%s försök att ändra status: %s->%s" #~ msgid "refresh: Cannot abort transaction: %s" #~ msgstr "refresh: Kan inte avbryta transaktion: %s" #~ msgid "refresh: Error during transaction: %s" #~ msgstr "refresh: Fel under transaktion: %s" #~ msgid "operator[]: Cannot abort transaction: %s" #~ msgstr "operator[]: Kan inte avbryta transaktion: %s" #~ msgid "remove: Cannot abort transaction: %s" #~ msgstr "remove: Kan inte avbryta transaktion: %s" #, fuzzy #~ msgid "Composed DN: %s" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "CentralAHash constructor called" #~ msgstr "Python-wrapper-konstruktor anropad" #, fuzzy #~ msgid "Error importing class" #~ msgstr "Fel vid listning av replikor: %s" #, fuzzy #~ msgid "ReplicatedAHash constructor called" #~ msgstr "Python-wrapper-konstruktor anropad" #, fuzzy #~ msgid "sending message of length" #~ msgstr "inkommande meddelande är inte SOAP" #, fuzzy #~ msgid "Initialized replication environment" #~ msgstr "Misslyckades med att initiera GM-miljö" #, fuzzy #~ msgid "Couldn't start replication manager." #~ msgstr "Kan inte starta program" #, fuzzy #~ msgid "Could not find checking period, using default 10s" #~ msgstr "Kunde inte hitta systemets klientinställningar" #, fuzzy #~ msgid "master locking" #~ msgstr "Timer avfyrad" #, fuzzy #~ msgid "couldn't unlock" #~ msgstr "Kan inte starta program" #, fuzzy #~ msgid "Couldn't start replication framework" #~ msgstr "Kan inte starta program" #, fuzzy #~ msgid "Couldn't run election" #~ msgstr "Kan inte starta program" #, fuzzy #~ msgid "entering startElection" #~ msgstr "Fel under transaktionen: %s" #, fuzzy #~ msgid "failed to send to" #~ msgstr "Misslyckades med att städa upp efter jobb" #, fuzzy #~ msgid "entering processMessage from " #~ msgstr "klientprocess anropad" #, fuzzy #~ msgid "Got dbnotfound" #~ msgstr "Jobbinformation ej funnen: %s" #, fuzzy #~ msgid "couldn't process message" #~ msgstr "klientprocess anropad" #, fuzzy #~ msgid "I am now a master" #~ msgstr "Skapar en echo-klient" #, fuzzy #~ msgid "I am now a client" #~ msgstr "Skapar en echo-klient" #, fuzzy #~ msgid "Getting permission failed" #~ msgstr "Misslyckades med att skapa delegering" #, fuzzy #~ msgid "accessing gateway: %s" #~ msgstr "Cache skapades: %s" #, fuzzy #~ msgid "Error connecting to ISIS %{iu}s, reason: %{r}s" #~ msgstr "Fel vid start av session: %s" #, fuzzy #~ msgid "Error in initThread: %s" #~ msgstr "Fel vid sökning av replikor: %s" #, fuzzy #~ msgid "Error in isisThread: %s" #~ msgstr "Fel vid tillägg av replika: %s" #, fuzzy #~ msgid "Error creating new entry in Librarian: %s" #~ msgstr "Fel vid skapandet av LFC-post: %s" #, fuzzy #~ msgid "//// response from the external store:" #~ msgstr "Inget jobb-id har mottagits" #, fuzzy #~ msgid "location chosen:" #~ msgstr "Programflaggor:" #, fuzzy #~ msgid "removing" #~ msgstr "Tar bort %s" #, fuzzy #~ msgid "Delegation status: " #~ msgstr "Destinaltion: %s" #, fuzzy #~ msgid "creating proxy file : " #~ msgstr "Använder proxyfil: %s" #, fuzzy #~ msgid "Delegation failed: " #~ msgstr "DelegateProxy misslyckades" #, fuzzy #~ msgid "removeCredentials: %s" #~ msgstr "process: operation: %s" #, fuzzy #~ msgid "proxy store is not accessable." #~ msgstr "fil %s är inte tillgänglig" #, fuzzy #~ msgid "get response: %s" #~ msgstr "Svar: %s" #, fuzzy #~ msgid "AHash URL found in the configuration." #~ msgstr "Misslyckades med att ladda klientinställningar" #, fuzzy #~ msgid "Setting running state to True" #~ msgstr "Skapar och sänder en statusförfrågan" #, fuzzy #~ msgid "Error processing report message" #~ msgstr "Fel under processering av indata: %s" #, fuzzy #~ msgid "Error traversing: %s" #~ msgstr "Fel vid hämtning av replikor: %s" #, fuzzy #~ msgid "Error in traverseLN method: %s" #~ msgstr "Fel under öppnande av databas: %s" #, fuzzy #~ msgid "Subject:" #~ msgstr "subjekt: %s" #, fuzzy #~ msgid "Cannot import backend class %(c)s (reason: %(r)s)" #~ msgstr "Kan inte importera arcmodul" #, fuzzy #~ msgid "Cannot import store class" #~ msgstr "Hittar inte serviceklass" #, fuzzy #~ msgid "Cannot get serviceID" #~ msgstr "Kan inte erhålla resurs-ID" #, fuzzy #~ msgid "Error in isisLibrarianThread: %s" #~ msgstr "Fel vid skapandet av LFC-post: %s" #, fuzzy #~ msgid "Error in isisBartenderThread: %s" #~ msgstr "Fel under öppnande av databas: %s" #, fuzzy #~ msgid "store job descriptions in local sandbox." #~ msgstr "Kan inte öppna jobbeskrivningsfil: %s" #, fuzzy #~ msgid "Malformated response" #~ msgstr "Väntar på svar" #~ msgid "ARC_PLUGIN_PATH=%s" #~ msgstr "ARC_PLUGIN_PATH=%s" #~ msgid "start_reading_ftp: size: url: %s" #~ msgstr "start_reading_ftp: size: url: %s" #~ msgid "start_reading_ftp: failure" #~ msgstr "start_reading_ftp: misslyckande" #~ msgid "start_reading_ftp: timeout waiting for file size" #~ msgstr "start_reading_ftp: timeout vid väntan på filstorlek" #~ msgid "Timeout waiting for FTP file size - cancel transfer" #~ msgstr "Timeout vid väntan på FTP-filstorlek - avbryt överföring" #~ msgid "start_reading_ftp: failed to get file's size" #~ msgstr "start_reading_ftp: misslyckades med att erhålla filens storlek" #~ msgid "start_reading_ftp: obtained size: %llu" #~ msgstr "start_reading_ftp: erhöll storlek: %llu" #~ msgid "start_reading_ftp: globus_ftp_client_modification_time failed" #~ msgstr "start_reading_ftp: globus_ftp_client_modification_time misslyckades" #~ msgid "start_reading_ftp: timeout waiting for modification_time" #~ msgstr "start_reading_ftp: timeout vid väntan på ändringstid" #~ msgid "start_reading_ftp: failed to get file's modification time" #~ msgstr "start_reading_ftp: misslyckades med att erhålla filens ändringstid" #~ msgid "start_reading_ftp: creation time: %s" #~ msgstr "start_reading_ftp: skapelsetid: %s" #~ msgid "start_reading_ftp: range is out of size" #~ msgstr "start_reading_ftp: intervall är utanför storlek" #~ msgid "Can not read key file: %s" #~ msgstr "Kan inte läsa nyckelfil: %s" #~ msgid "%s: State FINISHING: starting child: %s" #~ msgstr "%s: tillstånd: FINISHING: startar barnprocess: %s" #~ msgid "%s: State: PREPARING: credentials probably expired (exit code 3)" #~ msgstr "" #~ "%s: Tillstånd: PREPARING: kreditiv troligen utgångna (avslutningskod 3)" #~ msgid "" #~ "%s: State: PREPARING: some error detected (exit code %i). Recover from " #~ "such type of errors is not supported yet." #~ msgstr "" #~ "%s: Tillstånd: PREPARING: fel detekterade (avslutningskod %i). " #~ "Återhämtning från denna feltyp stöds ännu inte." #~ msgid "%s: Plugin in state %s : %s" #~ msgstr "%s: Plugin i tillstånd %s : %s" #, fuzzy #~ msgid "Cannot clean up any cache files" #~ msgstr "Kan inte skapa SOAP-klient" #, fuzzy #~ msgid "timeout in seconds (default " #~ msgstr "timeout i sekunder (förval 20)" #, fuzzy #~ msgid "select broker method (Random (default), QueueBalance, or custom)" #~ msgstr "" #~ "välj mäklarmetod (Random (förval), QueueBalance, eller användardefinerad)" #~ msgid "Failed resolving aliases" #~ msgstr "Misslyckades med att slå upp alias" #, fuzzy #~ msgid "Received status " #~ msgstr "Rapportera status" #, fuzzy #~ msgid "Current transfer FAILED" #~ msgstr "Nuvarande överföring MISSLYCKADES." #, fuzzy #~ msgid "Transfer FAILED" #~ msgstr "Överföring MISSLYCKADES." #~ msgid "path for cache data (if different from -y)" #~ msgstr "sökväg för cachedata (om skiljt från -y)" #~ msgid "TargetRetrieverARC0 initialized with unknown url type" #~ msgstr "TargetRetrieverARC0 initialiserad med okänd url-typ" #, fuzzy #~ msgid "The response was not a SOAP message" #~ msgstr "Ett svar på en statusförfrågan var inget SOAP-meddelande" #, fuzzy #~ msgid "Fetching job state" #~ msgstr "Hämtar %s-jobb" #, fuzzy #~ msgid "The response to a service status request is Fault message: " #~ msgstr "Ett svar på en servicestatusförfrågan var inget SOAP-meddelande" #, fuzzy #~ msgid "There was an empty response to an index service query" #~ msgstr "Det kom inget svar på en servicestatusförfrågan" #, fuzzy #~ msgid "The response of a index service query was not a SOAP message" #~ msgstr "Ett svar på en servicestatusförfrågan var inget SOAP-meddelande" #, fuzzy #~ msgid "Request failed, service returned: %s" #~ msgstr "Insändning misslyckades, service returnerade: %s" #, fuzzy #~ msgid "Job resuming failed" #~ msgstr "Uppstädning efter ett jobb misslyckades" #, fuzzy #~ msgid "Job resumed at state: %s" #~ msgstr "JobID: %s tillstånd: %s" #, fuzzy #~ msgid "No job description" #~ msgstr "[jobb ...]" #~ msgid "Service returned no job identifier" #~ msgstr "Service returnerade ingen jobbidentifierare" #, fuzzy #~ msgid "Failed migrating job" #~ msgstr "Misslyckades med att sända in jobb" #~ msgid "TargetRetrieverARC1 initialized with unknown url type" #~ msgstr "TargetRetrieverARC1 initialiserad med okänd url-typ" #~ msgid "No job ID has been received" #~ msgstr "Inget jobb-id har mottagits" #~ msgid "Job starting failed" #~ msgstr "Misslyckades med att starta ett jobb" #~ msgid "Creating delegation failed" #~ msgstr "Misslyckades med att skapa delegering" #~ msgid "Could not retrieve job information" #~ msgstr "Kunde inte inhämta jobbinformation" #~ msgid "Failed to clean job" #~ msgstr "Misslyckades med att städa upp efter jobb" #~ msgid "Job registration failed" #~ msgstr "Misslyckades med att registrera jobb" #~ msgid "TargetRetrieverCREAM initialized with unknown url type" #~ msgstr "TargetRetrieverCREAM initialiserad med okänd url-typ" #, fuzzy #~ msgid "Creating client chain for UNICORE BES service" #~ msgstr "Misslyckades med att skapa delegering" #, fuzzy #~ msgid "TargetRetrieverUNICORE initialized with unknown url type" #~ msgstr "TargetRetrieverCREAM initialiserad med okänd url-typ" #, fuzzy #~ msgid "Creating and sending a service an index service query" #~ msgstr "Skapar och sänder en servicestatusförfrågan" #, fuzzy #~ msgid "Failed to load service configuration form file %s" #~ msgstr "Misslyckades med att ladda serviceinställningar" #~ msgid "SSL_library_init failed" #~ msgstr "SSL_library_init misslyckades" #, fuzzy #~ msgid "File type is neither file or directory" #~ msgstr "Misslyckades med att skapa/hitta katalog %s" #~ msgid "ArcClientComponent has no name attribute defined" #~ msgstr "Arcklientkomponent har inget namnattribut definierat" #~ msgid "ArcClientComponent has no id attribute defined" #~ msgstr "Arcklientkomponent har inget id-attribut definierat" #~ msgid "ArcClientComponent %s(%s) could not be created" #~ msgstr "Arcklientkomponent %s(%s) kunde inte skapas" #~ msgid "Loaded ArcClientComponent %s(%s)" #~ msgstr "Laddade Arcklientkomponent %s(%s)" #, fuzzy #~ msgid "Request failed: Error1" #~ msgstr "Förfrågan misslyckades: Fel" #, fuzzy #~ msgid "Request failed: Error2" #~ msgstr "Förfrågan misslyckades: Fel" #, fuzzy #~ msgid "Request failed: Error3" #~ msgstr "Förfrågan misslyckades: Fel" #, fuzzy #~ msgid "Request failed: Error4" #~ msgstr "Förfrågan misslyckades: Fel" #, fuzzy #~ msgid "Request failed: Error5" #~ msgstr "Förfrågan misslyckades: Fel" #~ msgid " Implementation Version: %s" #~ msgstr " Implementeringsversion: %s" #~ msgid "Killing %s jobs" #~ msgstr "Dödar %s-jobb" #~ msgid "Cleaning %s jobs" #~ msgstr "Städar upp efter %s-jobb" #~ msgid "Performing the 'cat' command on %s jobs" #~ msgstr "Utför cat-kommandot på %s-jobb" #~ msgid "Can not determine the stderr location: %s" #~ msgstr "Kan inte bestämma plats för stderr: %s" #, fuzzy #~ msgid "Migration to %s failed, trying next target" #~ msgstr "Insändnings till %s misslyckades, provar med nästa destination" #~ msgid "Failed to get DataHandle on source: %s" #~ msgstr "Misslyckades med få DataHandle till källa: %s" #~ msgid "Failed to get DataHandle on destination: %s" #~ msgstr "Misslyckades med få DataHandle till destination: %s" #~ msgid "" #~ "Job %s has been deleted (i.e. was in job store), but is not listed in job " #~ "list" #~ msgstr "" #~ "Job %s har tagits bort (d.v.s. fanns i joblager), men är inte listad i " #~ "jobblistan" #, fuzzy #~ msgid "No valid jobdescription found for: %s" #~ msgstr "Inga platser funna för destination: %s" #, fuzzy #~ msgid " JobName: %s" #~ msgstr " Namn: %s" #, fuzzy #~ msgid " Description: %s" #~ msgstr "Destinaltion: %s" #, fuzzy #~ msgid " JobProject: %s" #~ msgstr "subjekt: %s" #, fuzzy #~ msgid " JobCategory: %s" #~ msgstr " Tillstånd: %s" #, fuzzy #~ msgid " OptionalElement: %s, path: %s" #~ msgstr " Implementerare: %s" #, fuzzy #~ msgid " Author: %s" #~ msgstr "Utvärderare: %s" #, fuzzy #~ msgid " Input: %s" #~ msgstr "put: %s" #, fuzzy #~ msgid " Output: %s" #~ msgstr "put: %s" #, fuzzy #~ msgid " Epilogue: %s" #~ msgstr "Misslyckande: %s" #, fuzzy #~ msgid " SessionLifeTime: %s" #~ msgstr " Avslutningstid: %s" #, fuzzy #~ msgid " Address: %s" #~ msgstr "Lagt till användare : %s" #, fuzzy #~ msgid " Total CPU Time: %s" #~ msgstr " Största CPU-tid: %s" #, fuzzy #~ msgid " Individual CPU Time: %s" #~ msgstr " Minsta CPU-tid: %s" #, fuzzy #~ msgid " Total Wall Time: %s" #~ msgstr " Största klocktid: %s" #, fuzzy #~ msgid " Individual Wall Time: %s" #~ msgstr " Minsta klocktid: %s" #, fuzzy #~ msgid " Benchmark: %s" #~ msgstr " URL: %s" #, fuzzy #~ msgid " value: %d" #~ msgstr "Misslyckande: %s" #, fuzzy #~ msgid " OSFamily: %s" #~ msgstr "Misslyckande: %s" #, fuzzy #~ msgid " OSName: %s" #~ msgstr " Namn: %s" #, fuzzy #~ msgid " OSVersion: %s" #~ msgstr "%s version %s" #, fuzzy #~ msgid " DiskSpace: %d" #~ msgstr " Största diskutrymme: %i" #, fuzzy #~ msgid " CacheDiskSpace: %d" #~ msgstr " Största diskutrymme: %i" #, fuzzy #~ msgid " SessionDiskSpace: %d" #~ msgstr " Största diskutrymme: %i" #, fuzzy #~ msgid " IndividualDiskSpace: %d" #~ msgstr " Största diskutrymme: %i" #, fuzzy #~ msgid " Alias: %s" #~ msgstr " Ort: %s" #~ msgid " PostCode: %s" #~ msgstr " Postnummer: %s" #, fuzzy #~ msgid " Latitude: %s" #~ msgstr " Latitud: %f" #, fuzzy #~ msgid " Longitude: %s" #~ msgstr " Longitud: %f" #, fuzzy #~ msgid " Slots: %d" #~ msgstr " Använda slots: %d" #, fuzzy #~ msgid " NumberOfProcesses: %d" #~ msgstr "Antal möjliga destinationer: %d" #, fuzzy #~ msgid " SPMDVariation: %s" #~ msgstr "Destinaltion: %s" #, fuzzy #~ msgid " RunTimeEnvironment.Version: %s" #~ msgstr "Felaktigt namn för runtime-miljö: %s" #, fuzzy #~ msgid "[PosixJSDLParser] Failed to create parser context" #~ msgstr "Misslyckades med att skapa GUID i RLS: %s" #, fuzzy #~ msgid "Extracting local file list from job description failed" #~ msgstr "Kan inte öppna jobbeskrivningsfil: %s" #~ msgid "Failed uploading file" #~ msgstr "Misslyckades med att ladda upp fil" #~ msgid "Can not access ARC job list file: %s (%s)" #~ msgstr "Saknar tillträde till ARC-jobblistfil: %s (%s)" #~ msgid "URL entry in default services has no \"Flavour\" attribute" #~ msgstr "URL-post i förvalda services har inget Flavour-attribute" #~ msgid "URL entry in default services has no \"ServiceType\" attribute" #~ msgstr "URL-post i förvalda services har inget ServiceType-attribute<" #~ msgid "URL entry in default services is empty" #~ msgstr "URL-post i förvalda services är tom" #~ msgid "Default services contain a computing service of flavour %s: %s" #~ msgstr "Förvalda services innehåller en beräkningsservice av gridtyp %s: %s" #~ msgid "Default services contain an index service of flavour %s: %s" #~ msgstr "Förvalda services innehåller en indexservice av gridtyp %s: %s" #~ msgid "URL entry in default services contains unknown ServiceType: %s" #~ msgstr "URL-post i förvalda services innehåller en okänd ServiceType: %s" #~ msgid "Alias entry in default services is empty" #~ msgstr "Aliaspost i förvalda services är tom" #~ msgid "Done finding default services" #~ msgstr "Färdig med sökandet efter default services" #~ msgid "Resolving alias: %s" #~ msgstr "Slår upp alias: %s" #~ msgid "Alias \"%s\" requested but not defined" #~ msgstr "Alias \"%s\" begärt men inte definierat" #~ msgid "URL entry in alias definition \"%s\" has no \"Flavour\" attribute" #~ msgstr "URL-post i aliasdefinition \"%s\" har inget Flavour-attribut" #~ msgid "" #~ "URL entry in alias definition \"%s\" has no \"ServiceType\" attribute" #~ msgstr "URL-post i aliasdefinition \"%s\" har inget ServiceType-attribut" #~ msgid "URL entry in alias definition \"%s\" is empty" #~ msgstr "URL-poat i aliasdefinition \"%s\" är tom" #~ msgid "URL entry in alias definition \"%s\" is not a valid URL: %s" #~ msgstr "URL-post i aliasdefinition \"%s\" är inte en giltig URL: %s" #~ msgid "Alias %s contains an index service of flavour %s: %s" #~ msgstr "Alias %s innehåller en idexservice av gridtyp %s: %s" #~ msgid "" #~ "URL entry in alias definition \"%s\" contains unknown ServiceType: %s" #~ msgstr "" #~ "URL-post i aliasdefinition \"%s\" innehåller en okänd ServiceType: %s" #~ msgid "Alias entry in alias definition \"%s\" is empty" #~ msgstr "Alias-post in alias definition \"%s\" är tom" #~ msgid "Done resolving alias: %s" #~ msgstr "Uppslagning av alias färdigt: %s" #~ msgid "Key is not a file: %s" #~ msgstr "Nyckel är ingen fil: %s" #, fuzzy #~ msgid "Cannot access ARC user config file: %s (%s)" #~ msgstr "Har ej tillträde till ARC-användarinställningsfil: %s (%s)" #, fuzzy #~ msgid "Could not load system client configuration" #~ msgstr "Kunde inte hitta systemets klientinställningar" #, fuzzy #~ msgid "Invalid notify attribute: %c" #~ msgstr "Ogiltig periodsträng: %s" #~ msgid "Error in caching procedure (retryable)" #~ msgstr "Fel i cachningsprocedur (återförsökbart)" #~ msgid "DataManager has no name attribute defined" #~ msgstr "Datahanterare har inget namnattribut definierat" #~ msgid "DataManager has no id attribute defined" #~ msgstr "Datahanterare har inget id-attribut definierat" #~ msgid "DataManager %s(%s) could not be created" #~ msgstr "Datahanterare %s(%s) kunde inte skapas" #~ msgid "Loaded DataManager %s(%s)" #~ msgstr "Laddade datahanterare %s(%s)" #~ msgid "Plugins element has no Name defined" #~ msgstr "Pluginelement har inget namn definierat" #~ msgid "MCC_TLS::do_ssl_init" #~ msgstr "MCC_TLS::do_ssl_init" #~ msgid "Failed to allocate SSL locks" #~ msgstr "Misslyckades med att allokera SSL-lås" #~ msgid "SSL initialization counter lost sync" #~ msgstr "SSL-initialiseringsräknare förlorade synk" #~ msgid "MCC_TLS::do_ssl_deinit" #~ msgstr "MCC_TLS::do_ssl_deinit" #~ msgid "Request xml structure is: %s" #~ msgstr "Request xml structure är: %s" #~ msgid "Authorized from count.pdp!!!" #~ msgstr "Auktoriserad från count.pdp!!!" #~ msgid "UnAuthorized from count.pdp!!!" #~ msgstr "Oauktoriserad från count.pdp!!!" #, fuzzy #~ msgid "Creating http service side chain" #~ msgstr "Skapar servicesidokedjan" #~ msgid "process: CreateActivity" #~ msgstr "process: CreateActivity" #~ msgid "wrong option in cacheregistration" #~ msgstr "felaktig inställning i cacheregistration" #~ msgid "Configuration: Section: %s, Subsection: %s" #~ msgstr "Inställningar: Avdelning: %s, Underavdelning: %s" #~ msgid "Configuration: LRMS: %s" #~ msgstr "Inställningar: LRMS: %s" #, fuzzy #~ msgid "My hash is: %s" #~ msgstr "Ingen sådan användare: %s" #, fuzzy #~ msgid "My ServiceID: %s" #~ msgstr " Betjäningstillstånd: %s" #, fuzzy #~ msgid "The ServiceID (%s) is found in the database." #~ msgstr "Service %s(%s) kunde inte skapas" #, fuzzy #~ msgid "[Proxy] calculated value: %s" #~ msgstr " Proxy giltig till: %s" #, fuzzy #~ msgid "Connect request failed, try again." #~ msgstr "Echo-förfrågan misslyckades" #~ msgid "delete run" #~ msgstr "radera körning" #~ msgid "lasso_assertion_query_new() failed" #~ msgstr "lasso_assertion_query_new() misslyckades" #~ msgid "lasso_assertion_query_init_request failed" #~ msgstr "lasso_assertion_query_init_request misslyckades" #~ msgid "lasso_assertion_query_build_request_msg failed" #~ msgstr "lasso_assertion_query_build_request_msg misslyckades" #~ msgid "assertionRequestBody shouldn't be NULL" #~ msgstr "assertionRequestBody skall inte vara NULL" #~ msgid "lasso_assertion_query_process_response_msg failed" #~ msgstr "lasso_assertion_query_process_response_msg failed" #~ msgid "DMCs are loaded" #~ msgstr "DMC:erna har laddats" nordugrid-arc-5.4.2/po/PaxHeaders.7502/hu.gmo0000644000000000000000000000013213214316034016727 xustar000000000000000030 mtime=1513200668.550852066 30 atime=1513200668.550852066 30 ctime=1513200668.607852763 nordugrid-arc-5.4.2/po/hu.gmo0000644000175000002070000002225613214316034017004 0ustar00mockbuildmock00000000000000l|0 1 N \ !}     % , '= 1e     - #! E )\ !   !  ! 9 V d p "y &  0 1 6G ~    ) "'Jg w BA1#E-i": Uar!&/5 epxKH).$76\8+')06;OViE7OVv;z} #    +90N5/7$B`}/*#- <-Iw"+>O`1i- 3G#=k   $ 04<,q+  DT;89(/2X,  4+N-z; D T c c| U 6! =!IG!D!F!0")N"x"""""+""]"cS# # #P#"$/*$Z$M^$$%+*Fc"T?e 7G<'B :g];QaJ26S4X9ER\O0WMi)d-[ZI> f(l@L8A1^&. UKHDC/3`NYh5V,j!k =_P$#b%d of %d jobs were submitted%s version %sA job termination request failedA job termination request succeedA status request failedA status request succeedAborted!Broker %s loadedCan not open job description file: %sCan't read list of destinations from file %sCan't read list of sources from file %sContacting VOMS server (named %s): %s on port: %sCurrent transfer FAILED: %sCurrent transfer completeDelegate proxy failedDestination: %sFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFailed configuration initializationFailed to send requestFileset registration is not supported yetFunction : %sINI config file %s does not existIdentity: %sInvalid JobDescription:Invalid URL: %sJob description to be sent to %s:Job description: %sJob submission summary:Job submitted with jobid: %sLibrary : %sMissing URLName: %sNo job description input specifiedNo jobs givenNo new informational document assignedNo stream responseNumbers of sources and destinations do not matchProxy generation failed: Certificate has expired.Proxy generation failed: Certificate is not valid yet.Proxy generation succeededProxy path: %sProxy type: %sPythonBroker initReason : %sRequest: %sResponse: %sResponse: %sReturned msg from myproxy server: %s %dReturned msg from voms server: %s Service side MCCs are loadedShutdown daemonSource: %sSubject: %sThe 'sort' and 'rsort' flags cannot be specified at the same time.The arcget command is used for retrieving the results from a job.The arckill command is used to kill running jobs.The following %d were not submittedThere are %d certificates in the returned msgThere was no SOAP responseTime left for proxy: %sTime left for proxy: Proxy expiredToo many parametersUnable to load broker %sUnsupported destination url: %sUnsupported source url: %sUploaded %sUploaded file %sUploader startedUser interface errorVOMS attribute parsing failedXML config file %s does not existYour identity: %sYour proxy is valid until: %s[-]name[filename ...]brokerclass name: %sconfiguration file (default ~/.arc/client.conf)debugleveldirnamedo not ask for verificationdo not submit - dump job description in the language accepted by the targetdownload directory (the job directory will be created in this directory)exitfilenameforce migration, ignore kill failurejobdescription file describing the job to be submittedjobdescription string describing the job to be submittedkeep the files on the server (do not clean)long format (more information)module name: %snnumberorderpathpath to config fileprint version informationremove the job from the local list of jobs even if the job is not found in the infosysreverse sorting of jobs according to jobid, submissiontime or jobnamesecondsshutdownsort jobs according to jobid, submissiontime or jobnamestringtimeout in seconds (default 20)urluse GSI communication protocol for contacting VOMS servicesyProject-Id-Version: Arc Report-Msgid-Bugs-To: support@nordugrid.org POT-Creation-Date: 2017-12-13 22:31+0100 PO-Revision-Date: 2010-07-05 12:25+0100 Last-Translator: Gábor Rőczei Language-Team: Hungarian MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: Plural-Forms: nplurals=2; plural=(n != 1); X-Poedit-Language: Hungarian X-Poedit-Country: HUNGARY X-Poedit-SourceCharset: utf-8 %d %d feladatból elküldve%s verzió %sA feladat megszakítása sikertelenA feladat megszakítása sikeresStátusz lekérdezés sikertelenStátusz lekérdezés sikeresMegszakítva!%s bróker betöltveNem tudom megnyitni a feladat leíró fájlt: %sNem tudom olvasni a célállomásokat a fájlból: %sNem tudom olvasni a forrásokat a fájlból: %sVOMS szerver elérése (neve: %s): %s ezen a porton: %sAz aktuális átvitel MEGSZAKADT: %sAz aktuális átvitel sikeresProxy delegáció sikertelenCélállomás: %sFATAL, ERROR, WARNING, INFO, VERBOSE vagy DEBUGNem sikerült betölteni a konfigurációtNem sikerült elküldeni a kéréstA fileset regisztcáció nem támogatott mégFunkció: %sAz INI konfigurációs fájl: %s nem létezikAzonosító: %sÉrvénytelen feladat leírás:Érvénytelen URL: %sFeladat leírás elküldve ide: %sFeladat leírás: %sJob küldési összefoglalóFeladat elküldve ezzel az azonítóval: %sKönyvtár : %sHiányzik az URLNév: %sNincs megadva feladat leírás bemeneti adatkéntNem adott meg feladatotNem jött létre új információs dokumentumNincs válaszA forrás és céállomások száma nem egyezik megProxy készítés sikertelen: A publikus kulcs érvényessége lejárt.Proxy készítés sikertelen: A publikus kulcs érvénytelen.Proxy készítés sikeresProxy elérési útvonal: %sProxy típusa: %sPythonBroker betöltéseIndok : %sKérés: %sVálasz: %sVálasz: %sEzt a választ kaptam a myproxy szervertől: %s %dEzt a választ kaptam a voms szervertől: %sA szolgáltatás oldali MCC-k betöltődtekDémon leállításaForrás: %sTárgy: %sA 'sort' vagy az 'rsort' kapcsolókat nem lehet egyszerre használniAz arcget parancsot arra lehet használni, hogy a feladat eredményeit megjelenítseAz arckill paranccsal lehet megölni egy futó feladatot%d nem lett elküldve%d darab publikus tanúsítvány van a válasz üzenetbenNincs SOAP-os válaszEnnyi ideig érvényes még a proxy: %sNem használható tovább a proxy: Lejárt a proxyTúl sok paraméterNem sikerült betölteni a %s bróker modultNem támogatott url: %sNem támogatott url: %sFeltöltve %sFájl feltöltve %sA feltöltő elindultFelhasználó oldali hibaVOMS attribútumok értelmezése sikertelenAz XML konfigurációs fájl: %s nem létezikAzonosítód: %sA proxy eddig érvényes: %s[-]név[fájlnév ...]brókerosztály neve: %skonfigurációs fájl (alapbeállítás ~/.arc/client.conf)logolási szintkönyvtárnévne kérjen ellenőrzéstnincs küldés - azon feladat leíró formátumban megjelenítése, amit a távoli klaszter elfogadkönyvtár letöltése (a feladat könyvtára ebben a könyvtárban fog létrejönni)kilépfájlnévmigráció kikényszerítése, megölési hiba figyelmen kívül hagyásaa feladat leíró fájl tartalmazza magát az elküldendő feladatota feladat leíró szöveg tartalmazza magát az elküldendő feladatotfájlok megőrzése a szerveren (nincs törlés)részletes formátum (több információ)modul neve: %snszámsorrendelérési útvonala konfigurációs fájl elérési útvonalaverzió információ kiírásafeladat eltávolítása a helyi listából ha az nem található az információs rendszerbenfeladatok rendezésének megfordítása az azonosítójuk, az elküldés ideje vagy a neve alapjánmásodpercekleállításfeladatok rendezése az azonosítójuk, az elküldés ideje vagy a neve alapjánszövegidőkorlát másodpercben (alapbeállítás 20)urlGSI kommunikációs protokoll használata a VOMS szolgáltatás eléréséhezynordugrid-arc-5.4.2/po/PaxHeaders.7502/nordugrid-arc.pot0000644000000000000000000000013213214316031021070 xustar000000000000000030 mtime=1513200665.944820193 30 atime=1513200666.037821331 30 ctime=1513200668.596852629 nordugrid-arc-5.4.2/po/nordugrid-arc.pot0000644000175000002070000213204713214316031021147 0ustar00mockbuildmock00000000000000# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR NorduGrid collaboration # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2017-12-13 22:31+0100\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\n" #: src/clients/compute/arccat.cpp:35 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresub.cpp:35 #: src/clients/compute/arcresume.cpp:32 src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "" #: src/clients/compute/arccat.cpp:36 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" #: src/clients/compute/arccat.cpp:43 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresub.cpp:40 src/clients/compute/arcresume.cpp:37 #: src/clients/compute/arcstat.cpp:42 src/clients/compute/arcsub.cpp:54 #: src/clients/compute/arcsync.cpp:147 src/clients/compute/arctest.cpp:62 #: src/clients/credentials/arcproxy.cpp:475 #: src/clients/credentials/arcproxyalt.cpp:461 src/clients/data/arccp.cpp:641 #: src/clients/data/arcls.cpp:346 src/clients/data/arcmkdir.cpp:125 #: src/clients/data/arcrename.cpp:136 src/clients/data/arcrm.cpp:151 #: src/clients/echo/arcecho.cpp:61 src/clients/saml/saml_assertion_init.cpp:62 #: src/clients/wsrf/arcwsrf.cpp:74 src/hed/daemon/unix/main_unix.cpp:346 #: src/hed/daemon/win32/main_win32.cpp:148 #: src/services/a-rex/jura/jura.cpp:109 #, c-format msgid "%s version %s" msgstr "" #: src/clients/compute/arccat.cpp:52 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresub.cpp:49 src/clients/compute/arcresume.cpp:46 #: src/clients/compute/arcstat.cpp:51 src/clients/compute/arcsub.cpp:63 #: src/clients/compute/arcsync.cpp:156 src/clients/compute/arctest.cpp:84 #: src/clients/credentials/arcproxy.cpp:483 #: src/clients/credentials/arcproxyalt.cpp:469 src/clients/data/arccp.cpp:648 #: src/clients/data/arcls.cpp:354 src/clients/data/arcmkdir.cpp:133 #: src/clients/data/arcrename.cpp:144 src/clients/data/arcrm.cpp:160 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:172 #, c-format msgid "Running command: %s" msgstr "" #: src/clients/compute/arccat.cpp:63 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresub.cpp:53 src/clients/compute/arcresume.cpp:50 #: src/clients/compute/arcstat.cpp:62 src/clients/compute/arcsub.cpp:67 #: src/clients/compute/arcsync.cpp:167 src/clients/compute/arctest.cpp:88 #: src/clients/data/arccp.cpp:671 src/clients/data/arcls.cpp:376 #: src/clients/data/arcmkdir.cpp:155 src/clients/data/arcrename.cpp:166 #: src/clients/data/arcrm.cpp:182 src/clients/echo/arcecho.cpp:72 #: src/clients/wsrf/arcwsrf.cpp:101 msgid "Failed configuration initialization" msgstr "" #: src/clients/compute/arccat.cpp:76 src/clients/compute/arcclean.cpp:74 #: src/clients/compute/arcget.cpp:88 src/clients/compute/arckill.cpp:73 #: src/clients/compute/arcrenew.cpp:70 src/clients/compute/arcresub.cpp:81 #: src/clients/compute/arcresume.cpp:70 src/clients/compute/arcstat.cpp:71 #, c-format msgid "Cannot read specified jobid file: %s" msgstr "" #: src/clients/compute/arccat.cpp:87 src/clients/compute/arcclean.cpp:85 #: src/clients/compute/arcget.cpp:99 src/clients/compute/arckill.cpp:84 #: src/clients/compute/arcrenew.cpp:81 src/clients/compute/arcresub.cpp:95 #: src/clients/compute/arcresume.cpp:81 src/clients/compute/arcstat.cpp:105 msgid "No jobs given" msgstr "" #: src/clients/compute/arccat.cpp:100 src/clients/compute/arcclean.cpp:98 #: src/clients/compute/arcget.cpp:112 src/clients/compute/arckill.cpp:97 #: src/clients/compute/arcrenew.cpp:94 src/clients/compute/arcresub.cpp:105 #: src/clients/compute/arcresume.cpp:94 src/clients/compute/arcstat.cpp:117 #, c-format msgid "Job list file (%s) doesn't exist" msgstr "" #: src/clients/compute/arccat.cpp:107 src/clients/compute/arcclean.cpp:105 #: src/clients/compute/arcget.cpp:119 src/clients/compute/arckill.cpp:104 #: src/clients/compute/arcrenew.cpp:101 src/clients/compute/arcresub.cpp:112 #: src/clients/compute/arcresume.cpp:101 src/clients/compute/arcstat.cpp:124 #: src/clients/compute/arctest.cpp:298 #, c-format msgid "Unable to read job information from file (%s)" msgstr "" #: src/clients/compute/arccat.cpp:116 src/clients/compute/arcclean.cpp:113 #: src/clients/compute/arcget.cpp:127 src/clients/compute/arckill.cpp:112 #: src/clients/compute/arcrenew.cpp:110 src/clients/compute/arcresub.cpp:120 #: src/clients/compute/arcresume.cpp:110 src/clients/compute/arcstat.cpp:133 #, c-format msgid "Warning: Job not found in job list: %s" msgstr "" #: src/clients/compute/arccat.cpp:129 src/clients/compute/arcclean.cpp:168 #: src/clients/compute/arcget.cpp:140 src/clients/compute/arckill.cpp:124 #: src/clients/compute/arcrenew.cpp:122 src/clients/compute/arcresub.cpp:132 #: src/clients/compute/arcresume.cpp:122 msgid "No jobs" msgstr "" #: src/clients/compute/arccat.cpp:144 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "" #: src/clients/compute/arccat.cpp:145 src/clients/compute/arccat.cpp:151 #, c-format msgid "Cannot create output of %s for any jobs" msgstr "" #: src/clients/compute/arccat.cpp:152 #, c-format msgid "Invalid destination URL %s" msgstr "" #: src/clients/compute/arccat.cpp:170 #, c-format msgid "Job deleted: %s" msgstr "" #: src/clients/compute/arccat.cpp:180 #, c-format msgid "Job has not started yet: %s" msgstr "" #: src/clients/compute/arccat.cpp:188 #, c-format msgid "Cannot determine the %s location: %s" msgstr "" #: src/clients/compute/arccat.cpp:196 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "" #: src/clients/compute/arccat.cpp:206 #, c-format msgid "Catting %s for job %s" msgstr "" #: src/clients/compute/arcclean.cpp:35 msgid "The arcclean command removes a job from the computing resource." msgstr "" #: src/clients/compute/arcclean.cpp:137 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" #: src/clients/compute/arcclean.cpp:140 msgid "Are you sure you want to clean jobs missing information?" msgstr "" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "y" msgstr "" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "n" msgstr "" #: src/clients/compute/arcclean.cpp:146 msgid "Jobs missing information will not be cleaned!" msgstr "" #: src/clients/compute/arcclean.cpp:162 src/clients/compute/arcresub.cpp:155 #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:302 #, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "" #: src/clients/compute/arcclean.cpp:163 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" #: src/clients/compute/arcclean.cpp:172 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "" #: src/clients/compute/arcget.cpp:76 #, c-format msgid "Job download directory from user configuration file: %s " msgstr "" #: src/clients/compute/arcget.cpp:79 msgid "Job download directory will be created in present working directory. " msgstr "" #: src/clients/compute/arcget.cpp:83 #, c-format msgid "Job download directory: %s " msgstr "" #: src/clients/compute/arcget.cpp:150 #, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "" #: src/clients/compute/arcget.cpp:160 #, c-format msgid "Results stored at: %s" msgstr "" #: src/clients/compute/arcget.cpp:172 src/clients/compute/arckill.cpp:140 msgid "Warning: Some jobs were not removed from server" msgstr "" #: src/clients/compute/arcget.cpp:173 src/clients/compute/arcget.cpp:180 msgid " Use arclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arcget.cpp:179 src/clients/compute/arckill.cpp:147 #: src/clients/compute/arcresub.cpp:185 #, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "" #: src/clients/compute/arcget.cpp:184 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arcget.cpp:188 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "" #: src/clients/compute/arcinfo.cpp:34 msgid "[resource ...]" msgstr "" #: src/clients/compute/arcinfo.cpp:35 msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "" #: src/clients/compute/arcinfo.cpp:142 msgid "Information endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:149 msgid "Submission endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:151 msgid "status" msgstr "" #: src/clients/compute/arcinfo.cpp:153 msgid "interface" msgstr "" #: src/clients/compute/arcinfo.cpp:172 msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "" #: src/clients/compute/arcinfo.cpp:185 msgid "ERROR: Failed to retrieve information" msgstr "" #: src/clients/compute/arcinfo.cpp:187 msgid "from the following endpoints:" msgstr "" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "" #: src/clients/compute/arckill.cpp:141 msgid " Use arcclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:148 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:151 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arckill.cpp:153 #, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "" #: src/clients/compute/arcrenew.cpp:128 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "" #: src/clients/compute/arcresub.cpp:75 msgid "--same and --not-same cannot be specified together." msgstr "" #: src/clients/compute/arcresub.cpp:144 msgid "No jobs to resubmit with the specified status" msgstr "" #: src/clients/compute/arcresub.cpp:151 src/clients/compute/arcsub.cpp:194 #, c-format msgid "Job submitted with jobid: %s" msgstr "" #: src/clients/compute/arcresub.cpp:156 msgid " To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arcresub.cpp:161 #, c-format msgid "Cannot write jobids to file (%s)" msgstr "" #: src/clients/compute/arcresub.cpp:172 #, c-format msgid "" "Resubmission of job (%s) succeeded, but killing the job failed - it will " "still appear in the job list" msgstr "" #: src/clients/compute/arcresub.cpp:181 #, c-format msgid "" "Resubmission of job (%s) succeeded, but cleaning the job failed - it will " "still appear in the job list" msgstr "" #: src/clients/compute/arcresub.cpp:186 msgid " Use arcclean to remove non-existing jobs" msgstr "" #: src/clients/compute/arcresub.cpp:193 msgid "Job resubmission summary:" msgstr "" #: src/clients/compute/arcresub.cpp:195 #, c-format msgid "%d of %d jobs were resubmitted" msgstr "" #: src/clients/compute/arcresub.cpp:197 #, c-format msgid "The following %d were not resubmitted" msgstr "" #: src/clients/compute/arcresume.cpp:128 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "" #: src/clients/compute/arcstat.cpp:35 msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" #: src/clients/compute/arcstat.cpp:79 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "" #: src/clients/compute/arcstat.cpp:149 msgid "No jobs found, try later" msgstr "" #: src/clients/compute/arcstat.cpp:176 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "" #: src/clients/compute/arcsub.cpp:46 msgid "[filename ...]" msgstr "" #: src/clients/compute/arcsub.cpp:47 msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "" #: src/clients/compute/arcsub.cpp:99 msgid "No job description input specified" msgstr "" #: src/clients/compute/arcsub.cpp:112 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:499 #, c-format msgid "Can not open job description file: %s" msgstr "" #: src/clients/compute/arcsub.cpp:140 src/clients/compute/arcsub.cpp:168 msgid "Invalid JobDescription:" msgstr "" #: src/clients/compute/arcsub.cpp:200 #, c-format msgid "Cannot write job IDs to file (%s)" msgstr "" #: src/clients/compute/arcsub.cpp:205 src/clients/compute/arcsync.cpp:66 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/arcsub.cpp:210 src/clients/compute/arctest.cpp:304 msgid "To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arcsub.cpp:217 msgid "Job submission summary:" msgstr "" #: src/clients/compute/arcsub.cpp:219 #, c-format msgid "%d of %d jobs were submitted" msgstr "" #: src/clients/compute/arcsub.cpp:221 #, c-format msgid "The following %d were not submitted" msgstr "" #: src/clients/compute/arcsub.cpp:228 msgid "Job nr." msgstr "" #: src/clients/compute/arcsub.cpp:268 #, c-format msgid "Removing endpoint %s: It has an unrequested interface (%s)." msgstr "" #: src/clients/compute/arcsub.cpp:282 #, c-format msgid "ERROR: Unable to load broker %s" msgstr "" #: src/clients/compute/arcsub.cpp:286 msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" #: src/clients/compute/arcsub.cpp:290 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "" #: src/clients/compute/arcsub.cpp:304 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" #: src/clients/compute/arcsub.cpp:338 src/clients/compute/arctest.cpp:236 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" #: src/clients/compute/arcsub.cpp:339 src/clients/compute/arctest.cpp:237 msgid "Original job description is listed below:" msgstr "" #: src/clients/compute/arcsub.cpp:351 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" #: src/clients/compute/arcsub.cpp:368 src/clients/compute/arctest.cpp:317 #, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "" #: src/clients/compute/arcsub.cpp:384 src/clients/compute/arctest.cpp:330 #, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "" #: src/clients/compute/arcsub.cpp:388 src/clients/compute/arctest.cpp:334 #, c-format msgid "Job description to be sent to %s:" msgstr "" #: src/clients/compute/arcsub.cpp:406 msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "" #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:113 #, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "" #: src/clients/compute/arcsync.cpp:140 msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given resources or index servers." msgstr "" #: src/clients/compute/arcsync.cpp:183 msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" #: src/clients/compute/arcsync.cpp:188 msgid "Are you sure you want to synchronize your local job list?" msgstr "" #: src/clients/compute/arcsync.cpp:193 msgid "Cancelling synchronization request" msgstr "" #: src/clients/compute/arcsync.cpp:203 msgid "" "No services specified. Please configure default services in the client " "configuration,or specify a cluster or index (-c or -g options, see arcsync -" "h)." msgstr "" #: src/clients/compute/arctest.cpp:55 msgid " " msgstr "" #: src/clients/compute/arctest.cpp:56 msgid "The arctest command is used for testing clusters as resources." msgstr "" #: src/clients/compute/arctest.cpp:68 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" #: src/clients/compute/arctest.cpp:75 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" #: src/clients/compute/arctest.cpp:109 msgid "Certificate information:" msgstr "" #: src/clients/compute/arctest.cpp:113 msgid "No user-certificate found" msgstr "" #: src/clients/compute/arctest.cpp:116 #, c-format msgid "Certificate: %s" msgstr "" #: src/clients/compute/arctest.cpp:118 #, c-format msgid "Subject name: %s" msgstr "" #: src/clients/compute/arctest.cpp:119 #, c-format msgid "Valid until: %s" msgstr "" #: src/clients/compute/arctest.cpp:123 msgid "Unable to determine certificate information" msgstr "" #: src/clients/compute/arctest.cpp:127 msgid "Proxy certificate information:" msgstr "" #: src/clients/compute/arctest.cpp:129 msgid "No proxy found" msgstr "" #: src/clients/compute/arctest.cpp:132 #, c-format msgid "Proxy: %s" msgstr "" #: src/clients/compute/arctest.cpp:133 #, c-format msgid "Proxy-subject: %s" msgstr "" #: src/clients/compute/arctest.cpp:135 msgid "Valid for: Proxy expired" msgstr "" #: src/clients/compute/arctest.cpp:137 msgid "Valid for: Proxy not valid" msgstr "" #: src/clients/compute/arctest.cpp:139 #, c-format msgid "Valid for: %s" msgstr "" #: src/clients/compute/arctest.cpp:144 #, c-format msgid "Certificate issuer: %s" msgstr "" #: src/clients/compute/arctest.cpp:148 msgid "CA-certificates installed:" msgstr "" #: src/clients/compute/arctest.cpp:170 msgid "Unable to detect if issuer certificate is installed." msgstr "" #: src/clients/compute/arctest.cpp:173 msgid "Your issuer's certificate is not installed" msgstr "" #: src/clients/compute/arctest.cpp:191 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "" #: src/clients/compute/arctest.cpp:209 #, c-format msgid "Unable to load broker %s" msgstr "" #: src/clients/compute/arctest.cpp:212 #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "" #: src/clients/compute/arctest.cpp:234 msgid "Test aborted because no resource returned any information" msgstr "" #: src/clients/compute/arctest.cpp:247 msgid "" "ERROR: Test aborted because no suitable resources were found for the test-job" msgstr "" #: src/clients/compute/arctest.cpp:249 msgid "" "ERROR: Dumping job description aborted because no suitable resources were " "found for the test-job" msgstr "" #: src/clients/compute/arctest.cpp:258 #, c-format msgid "Submitting test-job %d:" msgstr "" #: src/clients/compute/arctest.cpp:262 #, c-format msgid "Client version: nordugrid-arc-%s" msgstr "" #: src/clients/compute/arctest.cpp:269 #, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "" #: src/clients/compute/arctest.cpp:270 #, c-format msgid "Test submitted with jobid: %s" msgstr "" #: src/clients/compute/arctest.cpp:285 #, c-format msgid "Computing service: %s" msgstr "" #: src/clients/compute/arctest.cpp:291 msgid "Test failed, no more possible targets" msgstr "" #: src/clients/compute/utils.cpp:118 #, c-format msgid "Types of execution services %s is able to submit jobs to:" msgstr "" #: src/clients/compute/utils.cpp:121 #, c-format msgid "Types of registry services which %s is able collect information from:" msgstr "" #: src/clients/compute/utils.cpp:124 #, c-format msgid "" "Types of local information services which %s is able collect information " "from:" msgstr "" #: src/clients/compute/utils.cpp:127 #, c-format msgid "" "Types of local information services which %s is able collect job information " "from:" msgstr "" #: src/clients/compute/utils.cpp:130 #, c-format msgid "Types of services %s is able to manage jobs at:" msgstr "" #: src/clients/compute/utils.cpp:133 #, c-format msgid "Job description languages supported by %s:" msgstr "" #: src/clients/compute/utils.cpp:136 #, c-format msgid "Brokers available to %s:" msgstr "" #: src/clients/compute/utils.cpp:159 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" #: src/clients/compute/utils.cpp:169 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:174 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:278 msgid "" "select one or more computing elements: name can be an alias for a single CE, " "a group of CEs or a URL" msgstr "" #: src/clients/compute/utils.cpp:280 src/clients/compute/utils.cpp:297 #: src/clients/compute/utils.cpp:404 msgid "name" msgstr "" #: src/clients/compute/utils.cpp:285 msgid "" "the computing element specified by URL at the command line should be queried " "using this information interface (possible options: org.nordugrid.ldapng, " "org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies." "resourceinfo)" msgstr "" #: src/clients/compute/utils.cpp:289 msgid "interfacename" msgstr "" #: src/clients/compute/utils.cpp:295 msgid "" "selecting a computing element for the new jobs with a URL or an alias, or " "selecting a group of computing elements with the name of the group" msgstr "" #: src/clients/compute/utils.cpp:303 msgid "force migration, ignore kill failure" msgstr "" #: src/clients/compute/utils.cpp:309 msgid "keep the files on the server (do not clean)" msgstr "" #: src/clients/compute/utils.cpp:315 msgid "do not ask for verification" msgstr "" #: src/clients/compute/utils.cpp:319 msgid "truncate the joblist before synchronizing" msgstr "" #: src/clients/compute/utils.cpp:325 src/clients/data/arcls.cpp:287 msgid "long format (more information)" msgstr "" #: src/clients/compute/utils.cpp:331 msgid "print a list of services configured in the client.conf" msgstr "" #: src/clients/compute/utils.cpp:337 msgid "show the stdout of the job (default)" msgstr "" #: src/clients/compute/utils.cpp:341 msgid "show the stderr of the job" msgstr "" #: src/clients/compute/utils.cpp:345 msgid "show the CE's error log of the job" msgstr "" #: src/clients/compute/utils.cpp:351 msgid "" "download directory (the job directory will be created in this directory)" msgstr "" #: src/clients/compute/utils.cpp:353 msgid "dirname" msgstr "" #: src/clients/compute/utils.cpp:357 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" #: src/clients/compute/utils.cpp:362 msgid "force download (overwrite existing job directory)" msgstr "" #: src/clients/compute/utils.cpp:368 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "" #: src/clients/compute/utils.cpp:372 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:373 src/clients/compute/utils.cpp:376 msgid "order" msgstr "" #: src/clients/compute/utils.cpp:375 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:379 msgid "show jobs where status information is unavailable" msgstr "" #: src/clients/compute/utils.cpp:385 msgid "resubmit to the same resource" msgstr "" #: src/clients/compute/utils.cpp:389 msgid "do not resubmit to the same resource" msgstr "" #: src/clients/compute/utils.cpp:395 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" #: src/clients/compute/utils.cpp:402 msgid "" "select one or more registries: name can be an alias for a single registry, a " "group of registries or a URL" msgstr "" #: src/clients/compute/utils.cpp:410 msgid "submit test job given by the number" msgstr "" #: src/clients/compute/utils.cpp:411 src/clients/compute/utils.cpp:415 msgid "int" msgstr "" #: src/clients/compute/utils.cpp:414 msgid "test job runtime specified by the number" msgstr "" #: src/clients/compute/utils.cpp:421 msgid "only select jobs whose status is statusstr" msgstr "" #: src/clients/compute/utils.cpp:422 msgid "statusstr" msgstr "" #: src/clients/compute/utils.cpp:428 msgid "all jobs" msgstr "" #: src/clients/compute/utils.cpp:434 msgid "jobdescription string describing the job to be submitted" msgstr "" #: src/clients/compute/utils.cpp:436 src/clients/compute/utils.cpp:442 #: src/clients/credentials/arcproxy.cpp:369 #: src/clients/credentials/arcproxy.cpp:376 #: src/clients/credentials/arcproxy.cpp:394 #: src/clients/credentials/arcproxy.cpp:401 #: src/clients/credentials/arcproxy.cpp:419 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:438 #: src/clients/credentials/arcproxy.cpp:448 #: src/clients/credentials/arcproxy.cpp:452 #: src/clients/credentials/arcproxyalt.cpp:369 #: src/clients/credentials/arcproxyalt.cpp:376 #: src/clients/credentials/arcproxyalt.cpp:399 #: src/clients/credentials/arcproxyalt.cpp:415 #: src/clients/credentials/arcproxyalt.cpp:419 #: src/clients/credentials/arcproxyalt.cpp:429 #: src/clients/credentials/arcproxyalt.cpp:433 msgid "string" msgstr "" #: src/clients/compute/utils.cpp:440 msgid "jobdescription file describing the job to be submitted" msgstr "" #: src/clients/compute/utils.cpp:448 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" #: src/clients/compute/utils.cpp:449 msgid "broker" msgstr "" #: src/clients/compute/utils.cpp:452 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "" #: src/clients/compute/utils.cpp:453 src/clients/compute/utils.cpp:475 #: src/clients/compute/utils.cpp:512 src/clients/compute/utils.cpp:520 #: src/clients/credentials/arcproxy.cpp:461 #: src/clients/credentials/arcproxyalt.cpp:447 src/clients/data/arccp.cpp:627 #: src/clients/data/arcls.cpp:332 src/clients/data/arcmkdir.cpp:111 #: src/clients/data/arcrename.cpp:122 src/clients/data/arcrm.cpp:137 #: src/clients/echo/arcecho.cpp:47 src/clients/wsrf/arcwsrf.cpp:57 msgid "filename" msgstr "" #: src/clients/compute/utils.cpp:457 msgid "" "only use this interface for submitting (e.g. org.nordugrid.gridftpjob, org." "ogf.glue.emies.activitycreation, org.ogf.bes)" msgstr "" #: src/clients/compute/utils.cpp:459 src/clients/compute/utils.cpp:501 msgid "InterfaceName" msgstr "" #: src/clients/compute/utils.cpp:466 msgid "skip the service with the given URL during service discovery" msgstr "" #: src/clients/compute/utils.cpp:467 src/clients/compute/utils.cpp:480 #: src/clients/data/arccp.cpp:607 msgid "URL" msgstr "" #: src/clients/compute/utils.cpp:474 msgid "a file containing a list of jobIDs" msgstr "" #: src/clients/compute/utils.cpp:479 msgid "skip jobs which are on a computing element with a given URL" msgstr "" #: src/clients/compute/utils.cpp:485 msgid "submit jobs as dry run (no submission to batch system)" msgstr "" #: src/clients/compute/utils.cpp:488 msgid "submit directly - no resource discovery or matchmaking" msgstr "" #: src/clients/compute/utils.cpp:492 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" #: src/clients/compute/utils.cpp:499 msgid "" "only get information about executon targets which support this job " "submission interface (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies." "activitycreation, org.ogf.bes)" msgstr "" #: src/clients/compute/utils.cpp:506 msgid "prints info about installed user- and CA-certificates" msgstr "" #: src/clients/compute/utils.cpp:511 #, c-format msgid "the file storing information about active jobs (default %s)" msgstr "" #: src/clients/compute/utils.cpp:519 src/clients/credentials/arcproxy.cpp:460 #: src/clients/credentials/arcproxyalt.cpp:446 src/clients/data/arccp.cpp:626 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:110 #: src/clients/data/arcrename.cpp:121 src/clients/data/arcrm.cpp:136 #: src/clients/echo/arcecho.cpp:46 src/clients/wsrf/arcwsrf.cpp:56 msgid "configuration file (default ~/.arc/client.conf)" msgstr "" #: src/clients/compute/utils.cpp:522 src/clients/credentials/arcproxy.cpp:455 #: src/clients/credentials/arcproxyalt.cpp:441 src/clients/data/arccp.cpp:621 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:105 #: src/clients/data/arcrename.cpp:116 src/clients/data/arcrm.cpp:131 #: src/clients/echo/arcecho.cpp:41 src/clients/wsrf/arcwsrf.cpp:51 msgid "timeout in seconds (default 20)" msgstr "" #: src/clients/compute/utils.cpp:523 src/clients/credentials/arcproxy.cpp:456 #: src/clients/credentials/arcproxyalt.cpp:442 src/clients/data/arccp.cpp:622 #: src/clients/data/arcls.cpp:327 src/clients/data/arcmkdir.cpp:106 #: src/clients/data/arcrename.cpp:117 src/clients/data/arcrm.cpp:132 #: src/clients/echo/arcecho.cpp:42 src/clients/wsrf/arcwsrf.cpp:52 msgid "seconds" msgstr "" #: src/clients/compute/utils.cpp:526 msgid "list the available plugins" msgstr "" #: src/clients/compute/utils.cpp:530 src/clients/credentials/arcproxy.cpp:465 #: src/clients/credentials/arcproxyalt.cpp:451 src/clients/data/arccp.cpp:631 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:115 #: src/clients/data/arcrename.cpp:126 src/clients/data/arcrm.cpp:141 #: src/clients/echo/arcecho.cpp:51 src/clients/saml/saml_assertion_init.cpp:52 #: src/clients/wsrf/arcwsrf.cpp:61 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "" #: src/clients/compute/utils.cpp:531 src/clients/credentials/arcproxy.cpp:466 #: src/clients/credentials/arcproxyalt.cpp:452 src/clients/data/arccp.cpp:632 #: src/clients/data/arcls.cpp:337 src/clients/data/arcmkdir.cpp:116 #: src/clients/data/arcrename.cpp:127 src/clients/data/arcrm.cpp:142 #: src/clients/echo/arcecho.cpp:52 src/clients/saml/saml_assertion_init.cpp:53 #: src/clients/wsrf/arcwsrf.cpp:62 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 msgid "debuglevel" msgstr "" #: src/clients/compute/utils.cpp:533 src/clients/credentials/arcproxy.cpp:469 #: src/clients/credentials/arcproxyalt.cpp:455 src/clients/data/arccp.cpp:635 #: src/clients/data/arcls.cpp:340 src/clients/data/arcmkdir.cpp:119 #: src/clients/data/arcrename.cpp:130 src/clients/data/arcrm.cpp:145 #: src/clients/echo/arcecho.cpp:55 src/clients/saml/saml_assertion_init.cpp:56 #: src/clients/wsrf/arcwsrf.cpp:65 msgid "print version information" msgstr "" #: src/clients/credentials/arcproxy.cpp:172 #: src/hed/libs/credential/ARCProxyUtil.cpp:1248 #, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "" #: src/clients/credentials/arcproxy.cpp:188 #: src/hed/libs/credential/ARCProxyUtil.cpp:1264 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "" #: src/clients/credentials/arcproxy.cpp:197 #: src/hed/libs/credential/ARCProxyUtil.cpp:1273 #, c-format msgid " expiration time: %s " msgstr "" #: src/clients/credentials/arcproxy.cpp:201 #: src/hed/libs/credential/ARCProxyUtil.cpp:1277 #, c-format msgid " certificate dn: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:202 #: src/hed/libs/credential/ARCProxyUtil.cpp:1278 #, c-format msgid " issuer dn: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:203 #: src/hed/libs/credential/ARCProxyUtil.cpp:1279 #, c-format msgid " serial number: %d" msgstr "" #: src/clients/credentials/arcproxy.cpp:207 #: src/hed/libs/credential/ARCProxyUtil.cpp:1283 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:272 #: src/clients/credentials/arcproxyalt.cpp:317 msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" #: src/clients/credentials/arcproxy.cpp:274 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours " "for delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file\n" " keybits=number - length of the key to generate. Default is 1024 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" " signingAlgorithm=name - signing algorithm to use for signing public key of " "proxy.\n" " Possible values are sha1, sha2 (alias for sha256), sha224, sha256, " "sha384, sha512\n" " and inherit (use algorithm of signing certificate). Default is inherit.\n" " With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" " identity - identity subject name of proxy certificate.\n" " issuer - issuer subject name of proxy certificate.\n" " ca - subject name of CA ehich issued initial certificate\n" " path - file system path to file containing proxy.\n" " type - type of proxy certificate.\n" " validityStart - timestamp when proxy validity starts.\n" " validityEnd - timestamp when proxy validity ends.\n" " validityPeriod - duration of proxy validity in seconds.\n" " validityLeft - duration of proxy validity left in seconds.\n" " vomsVO - VO name represented by VOMS attribute\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" " vomsIssuer - subject of service which issued VOMS certificate\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" " proxyPolicy\n" " keybits - size of proxy certificate key in bits.\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" " myproxy - for accessing credentials at MyProxy service\n" " myproxynew - for creating credentials at MyProxy service\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" " int - interactively request password from console\n" " stdin - read password from standard input delimited by newline\n" " file:filename - read password from file named filename\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:334 #: src/clients/credentials/arcproxyalt.cpp:334 msgid "path to the proxy file" msgstr "" #: src/clients/credentials/arcproxy.cpp:335 #: src/clients/credentials/arcproxy.cpp:339 #: src/clients/credentials/arcproxy.cpp:343 #: src/clients/credentials/arcproxy.cpp:347 #: src/clients/credentials/arcproxy.cpp:351 #: src/clients/credentials/arcproxy.cpp:355 #: src/clients/credentials/arcproxyalt.cpp:335 #: src/clients/credentials/arcproxyalt.cpp:339 #: src/clients/credentials/arcproxyalt.cpp:343 #: src/clients/credentials/arcproxyalt.cpp:347 #: src/clients/credentials/arcproxyalt.cpp:351 #: src/clients/credentials/arcproxyalt.cpp:355 src/clients/data/arccp.cpp:584 #: src/clients/saml/saml_assertion_init.cpp:48 msgid "path" msgstr "" #: src/clients/credentials/arcproxy.cpp:338 #: src/clients/credentials/arcproxyalt.cpp:338 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formated" msgstr "" #: src/clients/credentials/arcproxy.cpp:342 #: src/clients/credentials/arcproxyalt.cpp:342 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" #: src/clients/credentials/arcproxy.cpp:346 #: src/clients/credentials/arcproxyalt.cpp:346 msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "" #: src/clients/credentials/arcproxy.cpp:350 #: src/clients/credentials/arcproxyalt.cpp:350 msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "" #: src/clients/credentials/arcproxy.cpp:354 #: src/clients/credentials/arcproxyalt.cpp:354 msgid "path to the VOMS server configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:358 #: src/clients/credentials/arcproxyalt.cpp:358 msgid "" "voms<:command>. Specify VOMS server (More than one VOMS server \n" " can be specified like this: --voms VOa:command1 --voms VOb:" "command2). \n" " :command is optional, and is used to ask for specific " "attributes(e.g: roles)\n" " command options are:\n" " all --- put all of this DN's attributes into AC; \n" " list ---list all of the DN's attribute, will not create AC " "extension; \n" " /Role=yourRole --- specify the role, if this DN \n" " has such a role, the role will be put into " "AC \n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN \n" " has such a role, the role will be put into " "AC \n" msgstr "" #: src/clients/credentials/arcproxy.cpp:372 #: src/clients/credentials/arcproxyalt.cpp:372 msgid "" "group<:role>. Specify ordering of attributes \n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester \n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester \n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxyalt.cpp:379 msgid "use GSI communication protocol for contacting VOMS services" msgstr "" #: src/clients/credentials/arcproxy.cpp:382 #: src/clients/credentials/arcproxyalt.cpp:382 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access \n" " Note for RESTful access, 'list' command and multiple VOMS " "server are not supported\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:388 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "" #: src/clients/credentials/arcproxy.cpp:391 msgid "print all information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:394 msgid "print selected information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:397 #: src/clients/credentials/arcproxyalt.cpp:395 msgid "remove proxy" msgstr "" #: src/clients/credentials/arcproxy.cpp:400 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" #: src/clients/credentials/arcproxy.cpp:405 #: src/clients/credentials/arcproxyalt.cpp:402 msgid "" "don't prompt for a credential passphrase, when retrieve a \n" " credential from on MyProxy server. \n" " The precondition of this choice is the credential is PUT onto\n" " the MyProxy server without a passphrase by using -R (--" "retrievable_by_cert) \n" " option when being PUTing onto Myproxy server. \n" " This option is specific for the GET command when contacting " "Myproxy server." msgstr "" #: src/clients/credentials/arcproxy.cpp:416 #: src/clients/credentials/arcproxyalt.cpp:412 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific for the PUT command when contacting " "Myproxy server." msgstr "" #: src/clients/credentials/arcproxy.cpp:422 #: src/clients/credentials/arcproxyalt.cpp:418 msgid "hostname[:port] of MyProxy server" msgstr "" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server; \n" " GET -- get a delegated credentials from the MyProxy server; \n" " INFO -- get and present information about credentials stored " "at the MyProxy server; \n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server; \n" " DESTROY -- wipe off credentials stored at the MyProxy " "server; \n" " Local credentials (certificate and key) are not necessary " "except in case of PUT. \n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:442 msgid "" "use NSS credential database in default Mozilla profiles, \n" " including Firefox, Seamonkey and Thunderbird.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:447 #: src/clients/credentials/arcproxyalt.cpp:432 msgid "proxy constraints" msgstr "" #: src/clients/credentials/arcproxy.cpp:451 msgid "password destination=password source" msgstr "" #: src/clients/credentials/arcproxy.cpp:500 #: src/clients/credentials/arcproxy.cpp:1161 #: src/clients/credentials/arcproxyalt.cpp:513 #: src/clients/credentials/arcproxyalt.cpp:556 msgid "Failed configuration initialization." msgstr "" #: src/clients/credentials/arcproxy.cpp:518 #: src/clients/credentials/arcproxyalt.cpp:563 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" #: src/clients/credentials/arcproxy.cpp:519 #: src/clients/credentials/arcproxy.cpp:531 #: src/clients/credentials/arcproxyalt.cpp:564 #: src/clients/credentials/arcproxyalt.cpp:574 msgid "You may try to increase verbosity to get more information." msgstr "" #: src/clients/credentials/arcproxy.cpp:527 #: src/clients/credentials/arcproxyalt.cpp:570 msgid "Failed to find CA certificates" msgstr "" #: src/clients/credentials/arcproxy.cpp:528 #: src/clients/credentials/arcproxyalt.cpp:571 msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" #: src/clients/credentials/arcproxy.cpp:532 #: src/clients/credentials/arcproxyalt.cpp:575 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" #: src/clients/credentials/arcproxy.cpp:544 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxy.cpp:589 #: src/clients/credentials/arcproxyalt.cpp:604 src/clients/echo/arcecho.cpp:84 msgid "Wrong number of arguments!" msgstr "" #: src/clients/credentials/arcproxy.cpp:597 #: src/clients/credentials/arcproxy.cpp:618 #: src/clients/credentials/arcproxyalt.cpp:612 #: src/clients/credentials/arcproxyalt.cpp:632 msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:604 #: src/clients/credentials/arcproxyalt.cpp:621 #, c-format msgid "Cannot remove proxy file at %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:606 #: src/clients/credentials/arcproxyalt.cpp:617 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "" #: src/clients/credentials/arcproxy.cpp:624 #: src/clients/credentials/arcproxyalt.cpp:638 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" #: src/clients/credentials/arcproxy.cpp:630 #: src/clients/credentials/arcproxyalt.cpp:651 #, c-format msgid "Subject: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:631 #: src/clients/credentials/arcproxyalt.cpp:652 #, c-format msgid "Issuer: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:632 #: src/clients/credentials/arcproxyalt.cpp:653 #, c-format msgid "Identity: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:634 #: src/clients/credentials/arcproxyalt.cpp:657 msgid "Time left for proxy: Proxy expired" msgstr "" #: src/clients/credentials/arcproxy.cpp:636 #: src/clients/credentials/arcproxyalt.cpp:659 msgid "Time left for proxy: Proxy not valid yet" msgstr "" #: src/clients/credentials/arcproxy.cpp:638 #: src/clients/credentials/arcproxyalt.cpp:661 #, c-format msgid "Time left for proxy: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:639 #: src/clients/credentials/arcproxyalt.cpp:663 #, c-format msgid "Proxy path: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:640 #, c-format msgid "Proxy type: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:641 #, c-format msgid "Proxy key length: %i" msgstr "" #: src/clients/credentials/arcproxy.cpp:642 #, c-format msgid "Proxy signature: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:651 #: src/clients/credentials/arcproxyalt.cpp:675 msgid "AC extension information for VO " msgstr "" #: src/clients/credentials/arcproxy.cpp:654 #: src/clients/credentials/arcproxyalt.cpp:678 msgid "Error detected while parsing this AC" msgstr "" #: src/clients/credentials/arcproxy.cpp:667 #: src/clients/credentials/arcproxyalt.cpp:691 msgid "AC is invalid: " msgstr "" #: src/clients/credentials/arcproxy.cpp:720 #: src/clients/credentials/arcproxyalt.cpp:732 msgid "Time left for AC: AC is not valid yet" msgstr "" #: src/clients/credentials/arcproxy.cpp:722 #: src/clients/credentials/arcproxyalt.cpp:734 msgid "Time left for AC: AC has expired" msgstr "" #: src/clients/credentials/arcproxy.cpp:724 #: src/clients/credentials/arcproxyalt.cpp:736 #, c-format msgid "Time left for AC: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:815 #, c-format msgid "Information item '%s' is not known" msgstr "" #: src/clients/credentials/arcproxy.cpp:824 #: src/clients/credentials/arcproxyalt.cpp:746 msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:828 #: src/clients/credentials/arcproxyalt.cpp:750 msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:852 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" #: src/clients/credentials/arcproxy.cpp:869 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" #: src/clients/credentials/arcproxy.cpp:884 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int,stdin,stream,file." msgstr "" #: src/clients/credentials/arcproxy.cpp:898 msgid "Only standard input is currently supported for password source." msgstr "" #: src/clients/credentials/arcproxy.cpp:903 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int,stdin," "stream,file." msgstr "" #: src/clients/credentials/arcproxy.cpp:942 #: src/clients/credentials/arcproxyalt.cpp:782 msgid "The start, end and period can't be set simultaneously" msgstr "" #: src/clients/credentials/arcproxy.cpp:948 #: src/clients/credentials/arcproxyalt.cpp:788 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:955 #: src/clients/credentials/arcproxyalt.cpp:795 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:962 #: src/clients/credentials/arcproxyalt.cpp:802 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:971 #: src/clients/credentials/arcproxyalt.cpp:811 #, c-format msgid "The end time that you set: %s is before start time:%s." msgstr "" #: src/clients/credentials/arcproxy.cpp:982 #: src/clients/credentials/arcproxyalt.cpp:822 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:985 #: src/clients/credentials/arcproxyalt.cpp:825 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:995 #: src/clients/credentials/arcproxyalt.cpp:835 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1013 #: src/clients/credentials/arcproxyalt.cpp:853 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1028 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1042 #: src/clients/credentials/arcproxyalt.cpp:476 #: src/hed/libs/credential/ARCProxyUtil.cpp:1303 msgid "The NSS database can not be detected in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxy.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:1311 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" #: src/clients/credentials/arcproxy.cpp:1053 #: src/hed/libs/credential/ARCProxyUtil.cpp:1315 #, c-format msgid "Number %d is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1055 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:1071 #: src/clients/credentials/arcproxyalt.cpp:482 #: src/hed/libs/credential/ARCProxyUtil.cpp:1329 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:1142 #: src/hed/libs/credential/ARCProxyUtil.cpp:1503 #, c-format msgid "Certificate to use is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1190 #: src/clients/credentials/arcproxy.cpp:1306 #: src/clients/credentials/arcproxyalt.cpp:539 #: src/clients/credentials/arcproxyalt.cpp:955 #: src/hed/libs/credential/ARCProxyUtil.cpp:1560 msgid "Proxy generation succeeded" msgstr "" #: src/clients/credentials/arcproxy.cpp:1191 #: src/clients/credentials/arcproxy.cpp:1307 #: src/clients/credentials/arcproxyalt.cpp:540 #: src/clients/credentials/arcproxyalt.cpp:956 #: src/hed/libs/credential/ARCProxyUtil.cpp:1561 #, c-format msgid "Your proxy is valid until: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1210 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" #: src/clients/credentials/arcproxy.cpp:1229 src/hed/mcc/tls/MCCTLS.cpp:167 #: src/hed/mcc/tls/MCCTLS.cpp:200 src/hed/mcc/tls/MCCTLS.cpp:226 msgid "VOMS attribute parsing failed" msgstr "" #: src/clients/credentials/arcproxy.cpp:1231 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxyalt.cpp:892 #: src/hed/libs/credential/ARCProxyUtil.cpp:341 msgid "Proxy generation failed: No valid certificate found." msgstr "" #: src/clients/credentials/arcproxy.cpp:1258 #: src/clients/credentials/arcproxyalt.cpp:899 #: src/hed/libs/credential/ARCProxyUtil.cpp:348 msgid "Proxy generation failed: No valid private key found." msgstr "" #: src/clients/credentials/arcproxy.cpp:1263 #: src/clients/credentials/arcproxyalt.cpp:902 #: src/hed/libs/credential/ARCProxyUtil.cpp:173 #, c-format msgid "Your identity: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1265 #: src/clients/credentials/arcproxyalt.cpp:907 #: src/hed/libs/credential/ARCProxyUtil.cpp:356 msgid "Proxy generation failed: Certificate has expired." msgstr "" #: src/clients/credentials/arcproxy.cpp:1269 #: src/clients/credentials/arcproxyalt.cpp:911 #: src/hed/libs/credential/ARCProxyUtil.cpp:361 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "" #: src/clients/credentials/arcproxy.cpp:1280 msgid "Proxy generation failed: Failed to create temporary file." msgstr "" #: src/clients/credentials/arcproxy.cpp:1288 msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:100 #: src/clients/credentials/arcproxyalt.cpp:1312 #: src/hed/libs/credential/ARCProxyUtil.cpp:844 msgid "Succeeded to get info from MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:144 #: src/clients/credentials/arcproxyalt.cpp:1368 #: src/hed/libs/credential/ARCProxyUtil.cpp:900 msgid "Succeeded to change password on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:185 #: src/clients/credentials/arcproxyalt.cpp:1417 #: src/hed/libs/credential/ARCProxyUtil.cpp:949 msgid "Succeeded to destroy credential on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:265 #: src/clients/credentials/arcproxyalt.cpp:1506 #: src/hed/libs/credential/ARCProxyUtil.cpp:1038 #, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:318 #: src/clients/credentials/arcproxyalt.cpp:1565 #: src/hed/libs/credential/ARCProxyUtil.cpp:1097 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_proxy.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1222 #: src/hed/libs/credential/ARCProxyUtil.cpp:403 #: src/hed/libs/credential/ARCProxyUtil.cpp:1410 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:63 msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:75 #, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:640 #, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #: src/clients/credentials/arcproxyalt.cpp:1061 #: src/clients/credentials/arcproxyalt.cpp:1063 #: src/hed/libs/credential/ARCProxyUtil.cpp:650 #: src/hed/libs/credential/ARCProxyUtil.cpp:652 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, c-format msgid "No valid response from VOMS server: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:167 #, c-format msgid "Failed to parse VOMS command: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:108 #: src/hed/libs/credential/ARCProxyUtil.cpp:258 #, c-format msgid "OpenSSL error -- %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:109 #: src/hed/libs/credential/ARCProxyUtil.cpp:259 #, c-format msgid "Library : %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:110 #: src/hed/libs/credential/ARCProxyUtil.cpp:260 #, c-format msgid "Function : %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:111 #: src/hed/libs/credential/ARCProxyUtil.cpp:261 #, c-format msgid "Reason : %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:167 #: src/hed/libs/credential/ARCProxyUtil.cpp:317 msgid "User interface error" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:173 #: src/hed/libs/credential/ARCProxyUtil.cpp:323 msgid "Aborted!" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:319 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours for " "delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:388 msgid "" "print all information about this proxy. \n" " In order to show the Identity (DN without CN as suffix for " "proxy) \n" " of the certificate, the 'trusted certdir' is needed." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:398 msgid "username to MyProxy server" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:422 msgid "" "command to MyProxy server. The command can be PUT or GET.\n" " PUT/put/Put -- put a delegated credential to the MyProxy " "server; \n" " GET/get/Get -- get a delegated credential from the MyProxy " "server, \n" " credential (certificate and key) is not needed in this case. \n" " MyProxy functionality can be used together with VOMS\n" " functionality.\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:437 msgid "use NSS credential database in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1040 #: src/hed/libs/credential/ARCProxyUtil.cpp:629 #, c-format msgid "There are %d commands to the same VOMS server %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1094 #: src/hed/libs/credential/ARCProxyUtil.cpp:683 #, c-format msgid "Try to get attribute from VOMS server with order: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1097 #: src/hed/libs/credential/ARCProxyUtil.cpp:686 #, c-format msgid "Message sent to VOMS server %s is: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1116 #: src/hed/libs/credential/ARCProxyUtil.cpp:705 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1120 #: src/hed/libs/credential/ARCProxyUtil.cpp:709 msgid "No HTTP response from VOMS server" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1125 #: src/clients/credentials/arcproxyalt.cpp:1151 #: src/hed/libs/credential/ARCProxyUtil.cpp:714 #: src/hed/libs/credential/ARCProxyUtil.cpp:740 #, c-format msgid "Returned message from VOMS server: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1137 #: src/hed/libs/credential/ARCProxyUtil.cpp:726 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\"\n" "can not be reached, please make sure it is available" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1141 #: src/hed/libs/credential/ARCProxyUtil.cpp:730 msgid "No stream response from VOMS server" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1163 #: src/hed/libs/credential/ARCProxyUtil.cpp:752 #, c-format msgid "" "The validity duration of VOMS AC is shortened from %s to %s, due to the " "validity constraint on voms server side.\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1166 #: src/hed/libs/credential/ARCProxyUtil.cpp:755 #, c-format msgid "" "Cannot get any AC or attributes info from VOMS server: %s;\n" " Returned message from VOMS server: %s\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1171 #: src/hed/libs/credential/ARCProxyUtil.cpp:760 #, c-format msgid "Returned message from VOMS server %s is: %s\n" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1193 #: src/hed/libs/credential/ARCProxyUtil.cpp:782 #, c-format msgid "The attribute information from VOMS server: %s is list as following:" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1205 #: src/hed/libs/credential/ARCProxyUtil.cpp:794 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message. But proxy without " "VOMS AC extension will still be generated." msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1220 #, c-format msgid "Failed to add extension: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1238 #: src/hed/libs/credential/ARCProxyUtil.cpp:443 #: src/hed/libs/credential/Credential.cpp:884 #, c-format msgid "Error: can't open policy file: %s" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1248 #: src/hed/libs/credential/ARCProxyUtil.cpp:453 #: src/hed/libs/credential/Credential.cpp:897 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1600 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specify the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Cannot find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, " "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1640 #: src/hed/libs/credential/ARCProxyUtil.cpp:552 #, c-format msgid "VOMS line contains wrong number of tokens (%u expected): \"%s\"" msgstr "" #: src/clients/credentials/arcproxyalt.cpp:1684 #: src/hed/libs/credential/ARCProxyUtil.cpp:596 #, c-format msgid "Cannot get VOMS server %s information from the vomses files" msgstr "" #: src/clients/credentials/test2myproxyserver_get.cpp:89 #: src/clients/credentials/test2myproxyserver_get.cpp:131 #: src/clients/credentials/test2myproxyserver_put.cpp:88 #: src/clients/credentials/test2myproxyserver_put.cpp:182 #: src/clients/credentials/test2vomsserver.cpp:101 msgid "No stream response" msgstr "" #: src/clients/credentials/test2myproxyserver_get.cpp:104 #: src/clients/credentials/test2myproxyserver_get.cpp:143 #: src/clients/credentials/test2myproxyserver_get.cpp:190 #: src/clients/credentials/test2myproxyserver_put.cpp:103 #: src/clients/credentials/test2myproxyserver_put.cpp:116 #: src/clients/credentials/test2myproxyserver_put.cpp:194 #, c-format msgid "Returned msg from myproxy server: %s %d" msgstr "" #: src/clients/credentials/test2myproxyserver_get.cpp:149 #, c-format msgid "There are %d certificates in the returned msg" msgstr "" #: src/clients/credentials/test2myproxyserver_put.cpp:135 msgid "Delegate proxy failed" msgstr "" #: src/clients/credentials/test2vomsserver.cpp:116 #, c-format msgid "Returned msg from voms server: %s " msgstr "" #: src/clients/data/arccp.cpp:77 src/clients/data/arccp.cpp:330 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:426 #, c-format msgid "Current transfer FAILED: %s" msgstr "" #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:135 #: src/clients/data/arccp.cpp:332 src/clients/data/arcls.cpp:224 #: src/clients/data/arcmkdir.cpp:73 src/clients/data/arcrename.cpp:89 #: src/clients/data/arcrm.cpp:95 msgid "This seems like a temporary error, please try again later" msgstr "" #: src/clients/data/arccp.cpp:87 src/clients/data/arccp.cpp:96 #, c-format msgid "Unable to copy %s" msgstr "" #: src/clients/data/arccp.cpp:88 src/clients/data/arccp.cpp:97 #: src/clients/data/arcls.cpp:150 src/clients/data/arcls.cpp:159 #: src/clients/data/arcmkdir.cpp:55 src/clients/data/arcmkdir.cpp:64 #: src/clients/data/arcrename.cpp:67 src/clients/data/arcrename.cpp:76 #: src/clients/data/arcrm.cpp:68 src/clients/data/arcrm.cpp:80 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" #: src/clients/data/arccp.cpp:94 src/clients/data/arcls.cpp:156 #: src/clients/data/arcmkdir.cpp:61 src/clients/data/arcrename.cpp:73 #: src/clients/data/arcrm.cpp:77 msgid "Proxy expired" msgstr "" #: src/clients/data/arccp.cpp:112 src/clients/data/arccp.cpp:116 #: src/clients/data/arccp.cpp:149 src/clients/data/arccp.cpp:153 #: src/clients/data/arccp.cpp:358 src/clients/data/arccp.cpp:363 #: src/clients/data/arcls.cpp:123 src/clients/data/arcmkdir.cpp:28 #: src/clients/data/arcrename.cpp:29 src/clients/data/arcrename.cpp:33 #: src/clients/data/arcrm.cpp:36 #, c-format msgid "Invalid URL: %s" msgstr "" #: src/clients/data/arccp.cpp:128 msgid "Third party transfer is not supported for these endpoints" msgstr "" #: src/clients/data/arccp.cpp:130 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" #: src/clients/data/arccp.cpp:133 #, c-format msgid "Transfer FAILED: %s" msgstr "" #: src/clients/data/arccp.cpp:161 src/clients/data/arccp.cpp:187 #: src/clients/data/arccp.cpp:374 src/clients/data/arccp.cpp:402 #, c-format msgid "Can't read list of sources from file %s" msgstr "" #: src/clients/data/arccp.cpp:166 src/clients/data/arccp.cpp:202 #: src/clients/data/arccp.cpp:379 src/clients/data/arccp.cpp:418 #, c-format msgid "Can't read list of destinations from file %s" msgstr "" #: src/clients/data/arccp.cpp:171 src/clients/data/arccp.cpp:385 msgid "Numbers of sources and destinations do not match" msgstr "" #: src/clients/data/arccp.cpp:216 msgid "Fileset registration is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:222 src/clients/data/arccp.cpp:295 #: src/clients/data/arccp.cpp:456 #, c-format msgid "Unsupported source url: %s" msgstr "" #: src/clients/data/arccp.cpp:226 src/clients/data/arccp.cpp:299 #, c-format msgid "Unsupported destination url: %s" msgstr "" #: src/clients/data/arccp.cpp:233 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" #: src/clients/data/arccp.cpp:243 #, c-format msgid "Could not obtain information about source: %s" msgstr "" #: src/clients/data/arccp.cpp:250 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" #: src/clients/data/arccp.cpp:262 msgid "Failed to accept new file/destination" msgstr "" #: src/clients/data/arccp.cpp:268 src/clients/data/arccp.cpp:274 #, c-format msgid "Failed to register new file/destination: %s" msgstr "" #: src/clients/data/arccp.cpp:436 msgid "Fileset copy to single object is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:446 msgid "Can't extract object's name from source url" msgstr "" #: src/clients/data/arccp.cpp:465 #, c-format msgid "%s. Cannot copy fileset" msgstr "" #: src/clients/data/arccp.cpp:475 src/hed/libs/compute/ExecutionTarget.cpp:254 #: src/hed/libs/compute/ExecutionTarget.cpp:326 #, c-format msgid "Name: %s" msgstr "" #: src/clients/data/arccp.cpp:478 #, c-format msgid "Source: %s" msgstr "" #: src/clients/data/arccp.cpp:479 #, c-format msgid "Destination: %s" msgstr "" #: src/clients/data/arccp.cpp:485 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:433 msgid "Current transfer complete" msgstr "" #: src/clients/data/arccp.cpp:488 msgid "Some transfers failed" msgstr "" #: src/clients/data/arccp.cpp:498 #, c-format msgid "Directory: %s" msgstr "" #: src/clients/data/arccp.cpp:518 msgid "Transfer complete" msgstr "" #: src/clients/data/arccp.cpp:537 msgid "source destination" msgstr "" #: src/clients/data/arccp.cpp:538 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" #: src/clients/data/arccp.cpp:543 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" #: src/clients/data/arccp.cpp:549 msgid "do not try to force passive transfer" msgstr "" #: src/clients/data/arccp.cpp:554 msgid "" "if the destination is an indexing service and not the same as the source and " "the destination is already registered, then the copy is normally not done. " "However, if this option is specified the source is assumed to be a replica " "of the destination created in an uncontrolled way and the copy is done like " "in case of replication. Using this option also skips validation of completed " "transfers." msgstr "" #: src/clients/data/arccp.cpp:567 msgid "show progress indicator" msgstr "" #: src/clients/data/arccp.cpp:572 msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" #: src/clients/data/arccp.cpp:578 msgid "use secure transfer (insecure by default)" msgstr "" #: src/clients/data/arccp.cpp:583 msgid "path to local cache (use to put file into cache)" msgstr "" #: src/clients/data/arccp.cpp:588 src/clients/data/arcls.cpp:300 msgid "operate recursively" msgstr "" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:305 msgid "operate recursively up to specified level" msgstr "" #: src/clients/data/arccp.cpp:594 src/clients/data/arcls.cpp:306 msgid "level" msgstr "" #: src/clients/data/arccp.cpp:598 msgid "number of retries before failing file transfer" msgstr "" #: src/clients/data/arccp.cpp:599 msgid "number" msgstr "" #: src/clients/data/arccp.cpp:603 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" #: src/clients/data/arccp.cpp:611 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" #: src/clients/data/arccp.cpp:617 src/clients/data/arcls.cpp:322 #: src/clients/data/arcmkdir.cpp:101 src/clients/data/arcrename.cpp:112 #: src/clients/data/arcrm.cpp:127 msgid "list the available plugins (protocols supported)" msgstr "" #: src/clients/data/arccp.cpp:656 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:141 src/clients/data/arcrename.cpp:152 #: src/clients/data/arcrm.cpp:168 msgid "Protocol plugins available:" msgstr "" #: src/clients/data/arccp.cpp:681 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:175 #: src/clients/data/arcrm.cpp:193 msgid "Wrong number of parameters specified" msgstr "" #: src/clients/data/arccp.cpp:686 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "" #: src/clients/data/arcls.cpp:129 src/clients/data/arcmkdir.cpp:34 #: src/clients/data/arcrm.cpp:43 #, c-format msgid "Can't read list of locations from file %s" msgstr "" #: src/clients/data/arcls.cpp:144 src/clients/data/arcmkdir.cpp:49 #: src/clients/data/arcrename.cpp:61 msgid "Unsupported URL given" msgstr "" #: src/clients/data/arcls.cpp:149 src/clients/data/arcls.cpp:158 #, c-format msgid "Unable to list content of %s" msgstr "" #: src/clients/data/arcls.cpp:227 msgid "Warning: Failed listing files but some information is obtained" msgstr "" #: src/clients/data/arcls.cpp:281 src/clients/data/arcmkdir.cpp:90 msgid "url" msgstr "" #: src/clients/data/arcls.cpp:282 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" #: src/clients/data/arcls.cpp:291 msgid "show URLs of file locations" msgstr "" #: src/clients/data/arcls.cpp:295 msgid "display all available metadata" msgstr "" #: src/clients/data/arcls.cpp:309 msgid "" "show only description of requested object, do not list content of directories" msgstr "" #: src/clients/data/arcls.cpp:313 msgid "treat requested object as directory and always try to list content" msgstr "" #: src/clients/data/arcls.cpp:317 msgid "check readability of object, does not show any information about object" msgstr "" #: src/clients/data/arcls.cpp:392 msgid "Incompatible options --nolist and --forcelist requested" msgstr "" #: src/clients/data/arcls.cpp:397 msgid "Requesting recursion and --nolist has no sense" msgstr "" #: src/clients/data/arcmkdir.cpp:54 src/clients/data/arcmkdir.cpp:63 #, c-format msgid "Unable to create directory %s" msgstr "" #: src/clients/data/arcmkdir.cpp:91 msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" #: src/clients/data/arcmkdir.cpp:96 msgid "make parent directories as needed" msgstr "" #: src/clients/data/arcrename.cpp:41 msgid "Both URLs must have the same protocol, host and port" msgstr "" #: src/clients/data/arcrename.cpp:51 msgid "Cannot rename to or from root directory" msgstr "" #: src/clients/data/arcrename.cpp:55 msgid "Cannot rename to the same URL" msgstr "" #: src/clients/data/arcrename.cpp:66 src/clients/data/arcrename.cpp:75 #, c-format msgid "Unable to rename %s" msgstr "" #: src/clients/data/arcrename.cpp:106 msgid "old_url new_url" msgstr "" #: src/clients/data/arcrename.cpp:107 msgid "The arcrename command renames files on grid storage elements." msgstr "" #: src/clients/data/arcrm.cpp:58 #, c-format msgid "Unsupported URL given: %s" msgstr "" #: src/clients/data/arcrm.cpp:67 src/clients/data/arcrm.cpp:79 #, c-format msgid "Unable to remove file %s" msgstr "" #: src/clients/data/arcrm.cpp:115 msgid "url [url ...]" msgstr "" #: src/clients/data/arcrm.cpp:116 msgid "The arcrm command deletes files and on grid storage elements." msgstr "" #: src/clients/data/arcrm.cpp:121 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" #: src/clients/echo/arcecho.cpp:32 msgid "service message" msgstr "" #: src/clients/echo/arcecho.cpp:33 msgid "The arcecho command is a client for the ARC echo service." msgstr "" #: src/clients/echo/arcecho.cpp:35 msgid "" "The service argument is a URL to an ARC echo service.\n" "The message argument is the message the service should return." msgstr "" #: src/clients/echo/arcecho.cpp:105 src/hed/dmc/arc/DataPointARC.cpp:169 #: src/hed/dmc/arc/DataPointARC.cpp:222 src/hed/dmc/arc/DataPointARC.cpp:304 #: src/hed/dmc/arc/DataPointARC.cpp:415 src/hed/dmc/arc/DataPointARC.cpp:510 #: src/hed/dmc/arc/DataPointARC.cpp:574 src/hed/dmc/arc/DataPointARC.cpp:624 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:100 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:174 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:240 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, c-format msgid "" "Request:\n" "%s" msgstr "" #: src/clients/echo/arcecho.cpp:119 src/hed/dmc/arc/DataPointARC.cpp:182 #: src/hed/dmc/arc/DataPointARC.cpp:235 src/hed/dmc/arc/DataPointARC.cpp:320 #: src/hed/dmc/arc/DataPointARC.cpp:431 src/hed/dmc/arc/DataPointARC.cpp:524 #: src/hed/dmc/arc/DataPointARC.cpp:587 src/hed/dmc/arc/DataPointARC.cpp:638 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "" #: src/clients/echo/arcecho.cpp:124 src/hed/acc/UNICORE/UNICOREClient.cpp:531 #: src/hed/dmc/arc/DataPointARC.cpp:187 src/hed/dmc/arc/DataPointARC.cpp:240 #: src/hed/dmc/arc/DataPointARC.cpp:325 src/hed/dmc/arc/DataPointARC.cpp:436 #: src/hed/dmc/arc/DataPointARC.cpp:529 src/hed/dmc/arc/DataPointARC.cpp:592 #: src/hed/dmc/arc/DataPointARC.cpp:643 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:119 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:193 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:267 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:346 #, c-format msgid "" "Response:\n" "%s" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:43 msgid "service_url" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:47 msgid "path to config file" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:140 msgid "SOAP Request failed: No response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:144 msgid "SOAP Request failed: Error" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:150 msgid "No in SOAP response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:156 msgid "No in SAML response" msgstr "" #: src/clients/saml/saml_assertion_init.cpp:168 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:321 msgid "Succeeded to verify the signature under " msgstr "" #: src/clients/saml/saml_assertion_init.cpp:171 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:324 msgid "Failed to verify the signature under " msgstr "" #: src/clients/wsrf/arcwsrf.cpp:39 msgid "URL [query]" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:40 msgid "" "The arcwsrf command is used for obtaining the WS-ResourceProperties of\n" "services." msgstr "" #: src/clients/wsrf/arcwsrf.cpp:46 msgid "Request for specific Resource Property" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:47 msgid "[-]name" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:80 msgid "Missing URL" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:85 msgid "Too many parameters" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:123 msgid "Query is not a valid XML" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:138 msgid "Failed to create WSRP request" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:145 msgid "Specified URL is not valid" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:157 msgid "Failed to send request" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:161 msgid "Failed to obtain SOAP response" msgstr "" #: src/clients/wsrf/arcwsrf.cpp:167 msgid "SOAP fault received" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:142 #, c-format msgid "Connect: Failed to init handle: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:148 #, c-format msgid "Failed to enable IPv6: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:158 src/hed/acc/ARC0/FTPControl.cpp:172 #, c-format msgid "Connect: Failed to connect: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:165 #, c-format msgid "Connect: Connecting timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:185 #, c-format msgid "Connect: Failed to init auth info handle: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:196 src/hed/acc/ARC0/FTPControl.cpp:210 #, c-format msgid "Connect: Failed authentication: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:203 #, c-format msgid "Connect: Authentication timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:224 src/hed/acc/ARC0/FTPControl.cpp:256 #, c-format msgid "SendCommand: Command: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:229 src/hed/acc/ARC0/FTPControl.cpp:240 #: src/hed/acc/ARC0/FTPControl.cpp:260 src/hed/acc/ARC0/FTPControl.cpp:271 #, c-format msgid "SendCommand: Failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:235 src/hed/acc/ARC0/FTPControl.cpp:266 #, c-format msgid "SendCommand: Timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:243 src/hed/acc/ARC0/FTPControl.cpp:276 #, c-format msgid "SendCommand: Response: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:293 msgid "SendData: Failed sending EPSV and PASV commands" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:298 src/hed/acc/ARC0/FTPControl.cpp:304 #: src/hed/acc/ARC0/FTPControl.cpp:320 #, c-format msgid "SendData: Server PASV response parsing failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:330 src/hed/acc/ARC0/FTPControl.cpp:336 #: src/hed/acc/ARC0/FTPControl.cpp:343 src/hed/acc/ARC0/FTPControl.cpp:350 #, c-format msgid "SendData: Server EPSV response parsing failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:357 #, c-format msgid "SendData: Server EPSV response port parsing failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:366 #, c-format msgid "SendData: Failed to apply local address to data connection: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:372 #, c-format msgid "SendData: Can't parse host and/or port in response to EPSV/PASV: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:377 #, c-format msgid "SendData: Data channel: %d.%d.%d.%d:%d" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:393 #, c-format msgid "SendData: Data channel: [%s]:%d" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:398 #, c-format msgid "SendData: Local port failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:422 msgid "SendData: Failed sending DCAU command" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:427 msgid "SendData: Failed sending TYPE command" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:436 #, c-format msgid "SendData: Local type failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:446 #, c-format msgid "SendData: Failed sending STOR command: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:454 src/hed/acc/ARC0/FTPControl.cpp:475 #, c-format msgid "SendData: Data connect write failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:461 src/hed/acc/ARC0/FTPControl.cpp:469 #, c-format msgid "SendData: Data connect write timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:487 src/hed/acc/ARC0/FTPControl.cpp:507 #, c-format msgid "SendData: Data write failed: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:493 src/hed/acc/ARC0/FTPControl.cpp:501 #, c-format msgid "SendData: Data write timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:527 src/hed/acc/ARC0/FTPControl.cpp:538 #, c-format msgid "Disconnect: Failed aborting - ignoring: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:530 #, c-format msgid "Disconnect: Data close timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:541 #, c-format msgid "Disconnect: Abort timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:549 #, c-format msgid "Disconnect: Failed quitting - ignoring: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:552 #, c-format msgid "Disconnect: Quitting timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:561 #, c-format msgid "Disconnect: Failed closing - ignoring: %s" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:567 #, c-format msgid "Disconnect: Closing timed out after %d ms" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:582 msgid "Disconnect: waiting for globus handle to settle" msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:596 msgid "Disconnect: globus handle is stuck." msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:604 #, c-format msgid "Disconnect: Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/acc/ARC0/FTPControl.cpp:607 msgid "Disconnect: handle destroyed." msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:47 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:44 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to " "developers." msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:62 #, c-format msgid "Unable to query job information (%s), invalid URL provided (%s)" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:74 #, c-format msgid "Jobs left to query: %d" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:83 #, c-format msgid "Querying batch with %d jobs" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:99 msgid "Can't create information handle - is the ARC LDAP DMC plugin available?" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:132 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:47 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:36 #, c-format msgid "Job information not found in the information system: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:134 msgid "" "This job was very recently submitted and might not yet have reached the " "information system" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:236 #, c-format msgid "Cleaning job: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:240 msgid "Failed to connect for job cleaning" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:252 msgid "Failed sending CWD command for job cleaning" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:259 msgid "Failed sending RMD command for job cleaning" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:266 msgid "Failed to disconnect after job cleaning" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:273 msgid "Job cleaning successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:284 #, c-format msgid "Cancelling job: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:288 msgid "Failed to connect for job cancelling" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:300 msgid "Failed sending CWD command for job cancelling" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:307 msgid "Failed sending DELE command for job cancelling" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:314 msgid "Failed to disconnect after job cancelling" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:322 msgid "Job cancelling successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:333 #, c-format msgid "Renewing credentials for job: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:337 msgid "Failed to connect for credential renewal" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:349 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:356 msgid "Failed sending CWD command for credentials renewal" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:362 msgid "Failed to disconnect after credentials renewal" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:369 msgid "Renewal of credentials was successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:381 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:111 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:192 #, c-format msgid "Job %s does not report a resumable state" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:391 #, c-format msgid "Illegal jobID specified (%s)" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:398 #, c-format msgid "HER: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:404 #, c-format msgid "Could not create temporary file: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:437 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:131 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:210 msgid "Job resuming successful" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:473 #, c-format msgid "Trying to retrieve job description of %s from computing resource" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:478 #, c-format msgid "invalid jobID: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:520 msgid "clientxrsl found" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:523 msgid "could not find start of clientxrsl" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:528 msgid "could not find end of clientxrsl" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:541 #, c-format msgid "Job description: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:544 msgid "clientxrsl not found" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:550 #, c-format msgid "Invalid JobDescription: %s" msgstr "" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:553 msgid "Valid JobDescription found" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:60 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:204 msgid "Submit: Failed to connect" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:68 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:212 msgid "Submit: Failed sending CWD command" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:79 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:223 msgid "Submit: Failed sending CWD new command" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:106 msgid "Failed to prepare job description." msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:116 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:260 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:63 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:158 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:87 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:401 #, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:123 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:267 msgid "Submit: Failed sending job description" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:138 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:282 msgid "Submit: Failed uploading local input files" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:193 msgid "" "Submit: service has no suitable information interface - need org.nordugrid." "ldapng" msgstr "" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:250 msgid "Failed to prepare job description to target resources." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:58 msgid "Creating an A-REX client" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:61 msgid "Unable to create SOAP client used by AREXClient." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:85 msgid "Failed locating credentials." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:94 msgid "Failed initiate client connection." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:102 msgid "Client connection has no entry point." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:113 src/hed/acc/EMIES/EMIESClient.cpp:130 #: src/hed/acc/UNICORE/UNICOREClient.cpp:191 #: src/hed/acc/UNICORE/UNICOREClient.cpp:222 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:505 #: src/services/a-rex/test.cpp:86 msgid "Initiating delegation procedure" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:115 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:507 msgid "Failed to initiate delegation credentials" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:128 msgid "Re-creating an A-REX client" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:146 msgid "AREXClient was not created properly." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:151 src/hed/acc/EMIES/EMIESClient.cpp:174 #, c-format msgid "Processing a %s request" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:173 src/hed/acc/CREAM/CREAMClient.cpp:134 #: src/hed/acc/EMIES/EMIESClient.cpp:180 #, c-format msgid "%s request failed" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:181 src/hed/acc/EMIES/EMIESClient.cpp:189 #, c-format msgid "No response from %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:190 src/hed/acc/EMIES/EMIESClient.cpp:198 #, c-format msgid "%s request to %s failed with response: %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:195 src/hed/acc/EMIES/EMIESClient.cpp:213 #, c-format msgid "XML response: %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:204 #, c-format msgid "%s request to %s failed. No expected response." msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:218 #, c-format msgid "Creating and sending submit request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:234 src/hed/acc/ARC1/AREXClient.cpp:482 #: src/hed/acc/EMIES/EMIESClient.cpp:302 src/hed/acc/EMIES/EMIESClient.cpp:405 #: src/hed/acc/UNICORE/UNICOREClient.cpp:160 #, c-format msgid "Job description to be sent: %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:248 src/hed/acc/EMIES/EMIESClient.cpp:491 #: src/hed/acc/EMIES/EMIESClient.cpp:525 src/hed/acc/EMIES/EMIESClient.cpp:581 #, c-format msgid "Creating and sending job information query request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:293 src/hed/acc/ARC1/AREXClient.cpp:336 #, c-format msgid "Unable to retrieve status of job (%s)" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:346 src/hed/acc/EMIES/EMIESClient.cpp:821 #, c-format msgid "Creating and sending service information query request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:366 #, c-format msgid "Creating and sending ISIS information query request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:383 #, c-format msgid "Service %s of type %s ignored" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:386 msgid "No execution services registered in the index service" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:392 #, c-format msgid "Creating and sending terminate request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:403 #: src/hed/acc/UNICORE/UNICOREClient.cpp:619 #: src/hed/acc/UNICORE/UNICOREClient.cpp:692 msgid "Job termination failed" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:414 #, c-format msgid "Creating and sending clean request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:444 #, c-format msgid "Creating and sending job description retrieval request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:464 #, c-format msgid "Creating and sending job migrate request to %s" msgstr "" #: src/hed/acc/ARC1/AREXClient.cpp:498 src/hed/acc/EMIES/EMIESClient.cpp:932 #, c-format msgid "Creating and sending job resume request to %s" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:100 msgid "Renewal of ARC1 jobs is not supported" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:117 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:197 #, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:183 #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:103 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:139 #, c-format msgid "Failed retrieving job description for job: %s" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:42 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:69 msgid "Failed retrieving job status information" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:52 msgid "Cleaning of BES jobs is not supported" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:78 msgid "Renewal of BES jobs is not supported" msgstr "" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:86 msgid "Resuming BES jobs is not supported" msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:49 msgid "Collecting Job (A-REX jobs) information." msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:53 #, c-format msgid "Failed retrieving job IDs: Unsupported url (%s) given" msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:61 msgid "Failed retrieving job IDs" msgstr "" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:64 msgid "" "Error encoutered during job ID retrieval. All job IDs might not have been " "retrieved" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:53 msgid "Failed to prepare job description" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:78 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:173 msgid "No job identifier returned by BES service" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:99 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:194 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:310 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:77 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:169 msgid "Failed uploading local input files" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:148 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:53 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:136 msgid "Failed to prepare job description to target resources" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:271 msgid "Failed adapting job description to target resources" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:282 #, c-format msgid "" "Unable to migrate job. Job description is not valid in the %s format: %s" msgstr "" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:295 msgid "No job identifier returned by A-REX" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:50 msgid "Querying WSRF GLUE2 computing info endpoint." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:95 #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:100 msgid "The Service doesn't advertise its Quality Level." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:120 #, c-format msgid "Generating A-REX target: %s" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:145 #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:151 #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:178 msgid "The Service doesn't advertise its Interface." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:210 msgid "The Service doesn't advertise its Serving State." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:278 #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:279 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:220 #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:398 #: src/hed/libs/compute/GLUE2.cpp:417 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:467 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:426 #, c-format msgid "Unable to parse the %s.%s value from execution service (%s)." msgstr "" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:468 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:427 #, c-format msgid "Value of %s.%s is \"%s\"" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:114 msgid "Creating a CREAM client" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:117 msgid "Unable to create SOAP client used by CREAMClient." msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:128 msgid "CREAMClient not created properly" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:139 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:52 #: src/hed/acc/UNICORE/UNICOREClient.cpp:207 #: src/hed/acc/UNICORE/UNICOREClient.cpp:299 #: src/hed/acc/UNICORE/UNICOREClient.cpp:376 #: src/hed/acc/UNICORE/UNICOREClient.cpp:455 #: src/hed/acc/UNICORE/UNICOREClient.cpp:488 #: src/hed/acc/UNICORE/UNICOREClient.cpp:565 #: src/hed/acc/UNICORE/UNICOREClient.cpp:641 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:188 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:29 msgid "There was no SOAP response" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:148 src/hed/acc/CREAM/CREAMClient.cpp:353 #: src/hed/acc/CREAM/CREAMClient.cpp:374 src/hed/acc/CREAM/CREAMClient.cpp:395 #: src/hed/acc/CREAM/CREAMClient.cpp:414 src/hed/acc/CREAM/CREAMClient.cpp:465 #: src/hed/acc/CREAM/CREAMClient.cpp:494 msgid "Empty response" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:167 #, c-format msgid "Request failed: %s" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:175 src/hed/acc/CREAM/CREAMClient.cpp:428 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:35 #: src/hed/acc/UNICORE/UNICOREClient.cpp:359 msgid "Creating and sending a status request" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:200 msgid "Unable to retrieve job status." msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:340 #: src/hed/acc/UNICORE/UNICOREClient.cpp:549 #: src/hed/acc/UNICORE/UNICOREClient.cpp:628 msgid "Creating and sending request to terminate a job" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:361 msgid "Creating and sending request to clean a job" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:382 msgid "Creating and sending request to resume a job" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:403 msgid "Creating and sending request to list jobs" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:450 msgid "Creating and sending job register request" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:470 src/hed/acc/CREAM/CREAMClient.cpp:499 msgid "No job ID in response" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:480 msgid "Creating and sending job start request" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:508 msgid "Creating delegation" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:520 msgid "Malformed response: missing getProxyReqReturn" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:531 #, c-format msgid "Delegatable credentials expired: %s" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:541 msgid "Failed signing certificate request" msgstr "" #: src/hed/acc/CREAM/CREAMClient.cpp:561 msgid "Failed putting signed delegation certificate to service" msgstr "" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:52 #, c-format msgid "Failed cleaning job: %s" msgstr "" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:70 #, c-format msgid "Failed canceling job: %s" msgstr "" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:84 msgid "Renewal of CREAM jobs is not supported" msgstr "" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:98 #, c-format msgid "Failed resuming job: %s" msgstr "" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:40 msgid "Failed creating signed delegation certificate" msgstr "" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:61 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:153 #: src/hed/acc/UNICORE/UNICOREClient.cpp:115 #, c-format msgid "Unable to submit job. Job description is not valid in the %s format" msgstr "" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:69 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:161 msgid "Failed registering job" msgstr "" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:85 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:177 msgid "Failed starting job" msgstr "" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:123 msgid "Failed creating singed delegation certificate" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:79 msgid "Creating an EMI ES client" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:82 msgid "Unable to create SOAP client used by EMIESClient." msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:157 msgid "Re-creating an EMI ES client" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:223 #, c-format msgid "%s request to %s failed. Unexpected response: %s." msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:237 src/hed/acc/EMIES/EMIESClient.cpp:344 #, c-format msgid "Creating and sending job submit request to %s" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:415 src/hed/acc/EMIES/EMIESClient.cpp:598 #: src/hed/acc/EMIES/EMIESClient.cpp:1087 #, c-format msgid "New limit for vector queries returned by EMI ES service: %d" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:423 src/hed/acc/EMIES/EMIESClient.cpp:606 #: src/hed/acc/EMIES/EMIESClient.cpp:1095 #, c-format msgid "" "Error: Service returned a limit higher or equal to current limit (current: %" "d; returned: %d)" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:764 #, c-format msgid "Creating and sending service information request to %s" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:869 src/hed/acc/EMIES/EMIESClient.cpp:890 #, c-format msgid "Creating and sending job clean request to %s" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:911 #, c-format msgid "Creating and sending job suspend request to %s" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:953 #, c-format msgid "Creating and sending job restart request to %s" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:1010 #, c-format msgid "Creating and sending job notify request to %s" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:1065 #, c-format msgid "Creating and sending notify request to %s" msgstr "" #: src/hed/acc/EMIES/EMIESClient.cpp:1155 #, c-format msgid "Creating and sending job list request to %s" msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:156 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:170 #, c-format msgid "Job %s failed to renew delegation %s - %s." msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:247 #, c-format msgid "Failed retrieving information for job: %s" msgstr "" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:319 msgid "Retrieving job description of EMI ES jobs is not supported" msgstr "" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:61 #, c-format msgid "Listing jobs succeeded, %d jobs found" msgstr "" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:77 #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:102 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface (%" "s)." msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:47 msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:54 #, c-format msgid "Failed to delegate credentials to server - %s" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:77 msgid "Failed preparing job description" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:95 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:406 msgid "Unable to submit job. Job description is not valid XML" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:154 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:482 msgid "No valid job identifier returned by EMI ES" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:180 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:499 msgid "Job failed on service side" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:190 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:509 msgid "Failed to obtain state of job" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:205 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:521 msgid "Failed to wait for job to allow stage in" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:228 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:541 msgid "Failed to obtain valid stagein URL for input files" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:248 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:558 #, c-format msgid "Failed uploading local input files to %s" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:269 #, c-format msgid "Failed to submit job description: EMIESFault(%s , %s)" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:278 #, c-format msgid "Failed to submit job description: UnexpectedError(%s)" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:315 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:574 msgid "Failed to notify service" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:367 msgid "Failed preparing job description to target resources" msgstr "" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:475 #, c-format msgid "Failed to submit job description: %s" msgstr "" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:54 msgid "Collecting EMI-ES GLUE2 computing info endpoint information." msgstr "" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:74 msgid "Generating EMIES targets" msgstr "" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:83 #, c-format msgid "Generated EMIES target: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:75 #: src/hed/acc/EMIES/TestEMIESClient.cpp:79 #, c-format msgid "Query returned unexpected element: %s:%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:85 #, c-format msgid "Element validation according to GLUE2 schema failed: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:114 msgid "Resource query failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:132 msgid "Submission failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:143 msgid "Obtaining status failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:153 msgid "Obtaining information failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:170 msgid "Cleaning failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:177 msgid "Notify failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:184 msgid "Kill failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:190 msgid "List failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:201 #, c-format msgid "Fetching resource description from %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:204 #: src/hed/acc/EMIES/TestEMIESClient.cpp:273 #: src/hed/acc/EMIES/TestEMIESClient.cpp:283 #: src/hed/acc/EMIES/TestEMIESClient.cpp:294 #, c-format msgid "Failed to obtain resource description: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:213 #: src/hed/acc/EMIES/TestEMIESClient.cpp:217 #, c-format msgid "Resource description contains unexpected element: %s:%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:223 msgid "Resource description validation according to GLUE2 schema failed: " msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:228 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:133 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:173 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1218 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1252 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1434 #: src/hed/identitymap/ArgusPDPClient.cpp:444 #: src/hed/identitymap/ArgusPEPClient.cpp:98 #: src/hed/identitymap/ArgusPEPClient.cpp:345 #: src/hed/libs/common/Thread.cpp:193 src/hed/libs/common/Thread.cpp:196 #: src/hed/libs/common/Thread.cpp:199 #: src/hed/libs/credential/Credential.cpp:1055 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:72 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:88 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:104 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:134 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:151 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:160 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:82 src/hed/shc/arcpdp/ArcPDP.cpp:235 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:293 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:247 #: src/services/a-rex/delegation/DelegationStore.cpp:44 #: src/services/a-rex/delegation/DelegationStore.cpp:49 #: src/services/a-rex/delegation/DelegationStore.cpp:54 #: src/services/a-rex/delegation/DelegationStore.cpp:88 #: src/services/a-rex/delegation/DelegationStore.cpp:94 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:552 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:620 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:645 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:656 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:667 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:678 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:686 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:692 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:697 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:702 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:707 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:715 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:723 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:734 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:741 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:780 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:790 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:798 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:824 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:893 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:906 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:923 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:935 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1239 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1244 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1273 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1286 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:373 #, c-format msgid "%s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:248 msgid "Resource description is empty" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:255 #, c-format msgid "Resource description provides URL for interface %s: %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:260 msgid "Resource description provides no URLs for interfaces" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:263 msgid "Resource description validation passed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:266 #, c-format msgid "Requesting ComputingService elements of resource description at %s" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:271 msgid "Performing /Services/ComputingService query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:275 #: src/hed/acc/EMIES/TestEMIESClient.cpp:285 #: src/hed/acc/EMIES/TestEMIESClient.cpp:296 msgid "Query returned no elements." msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:281 msgid "Performing /ComputingService query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:292 msgid "Performing /* query" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:302 msgid "All queries failed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:332 #, c-format msgid "" "Number of ComputingService elements obtained from full document and XPath " "qury do not match: %d != %d" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:335 msgid "Resource description query validation passed" msgstr "" #: src/hed/acc/EMIES/TestEMIESClient.cpp:337 #, c-format msgid "Unsupported command: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:75 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:95 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:105 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:114 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:126 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:364 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:411 msgid "[ADLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:454 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:473 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:505 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:563 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:568 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:574 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:591 msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:605 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:613 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:620 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:627 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:656 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:666 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:676 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:690 msgid "[ADLParser] Benchmark is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:698 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:715 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:735 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:768 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:774 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:792 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:846 #, c-format msgid "Location URI for file %s is invalid" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:812 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:75 #, c-format msgid "Unknown operator '%s' in attribute require in Version element" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:121 #, c-format msgid "Multiple '%s' elements are not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:136 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:143 #, c-format msgid "The 'exclusiveBound' attribute to the '%s' element is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:150 msgid "The 'epsilon' attribute to the 'Exact' element is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:178 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:195 #, c-format msgid "Parsing error: Value of %s element can't be parsed as number" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:183 #, c-format msgid "" "Parsing error: Elements (%s) representing upper range have different values" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:200 #, c-format msgid "" "Parsing error: Elements (%s) representing lower range have different values" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:209 #, c-format msgid "" "Parsing error: Value of lower range (%s) is greater than value of upper " "range (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:296 msgid "[ARCJSDLParser] Not a JSDL - missing JobDescription element" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:377 #, c-format msgid "" "[ARCJSDLParser] Error during the parsing: missed the name attributes of the " "\"%s\" Environment" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:424 msgid "[ARCJSDLParser] RemoteLogging URL is wrongly formatted." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:440 msgid "[ARCJSDLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:718 msgid "Lower bounded range is not supported for the 'TotalCPUCount' element." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:736 msgid "" "Parsing the \"require\" attribute of the \"QueueName\" nordugrid-JSDL " "element failed. An invalid comparison operator was used, only \"ne\" or \"eq" "\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:841 #, c-format msgid "No URI element found in Location for file %s" msgstr "" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:873 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:323 #, c-format msgid "String successfully parsed as %s." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:53 #, c-format msgid "[JDLParser] Semicolon (;) is not allowed inside brackets, at '%s;'." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:137 #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:141 #, c-format msgid "[JDLParser] This kind of JDL descriptor is not supported yet: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:144 #, c-format msgid "[JDLParser] Attribute named %s has unknown value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:224 msgid "Not enough outputsandboxdesturi elements!" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:306 msgid "" "[JDLParser] Environment variable has been defined without any equals sign." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:503 #, c-format msgid "[JDLParser]: Unknown attribute name: '%s', with value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:539 msgid "The inputsandboxbaseuri JDL attribute specifies an invalid URL." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:616 msgid "[JDLParser] Syntax error found during the split function." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:620 msgid "[JDLParser] Lines count is zero or other funny error has occurred." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:628 msgid "" "[JDLParser] JDL syntax error. There is at least one equals sign missing " "where it would be expected." msgstr "" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:641 #, c-format msgid "String successfully parsed as %s" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 msgid "End of comment not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 msgid "Broken string" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 msgid "Relation operator expected" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must specified when 'join' attribute is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 msgid "No RSL content in job description found" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:293 msgid "Multi-request job description not allowed in GRIDMANAGER dialect" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 msgid "No execuable path specified in GRIDMANAGER dialect" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:311 msgid "'action' attribute not allowed in user-side job description" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:314 msgid "Executable path not specified ('executable' attribute)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:332 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:350 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Attribute '%s' multiply defined" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:336 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:341 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:357 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:387 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:375 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:379 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:511 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1368 msgid "Unexpected RSL type" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:576 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:581 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:615 #, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:625 #, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:699 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:711 #, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:720 #, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:730 #, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:761 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:767 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1029 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1043 msgid "The cluster XRSL attribute is currently unsupported." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1059 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1067 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1070 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1118 msgid "priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1151 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1194 msgid "Value of 'count' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1224 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1270 #, c-format msgid "Invalid action value %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1360 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1364 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1378 #, c-format msgid "Wrong language requested: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1384 msgid "Missing executable" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1702 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:96 msgid "Failed to initialize main Python thread" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:101 msgid "Main Python thread was not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, c-format msgid "Loading Python broker (%i)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:139 msgid "Main Python thread is not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 msgid "PythonBroker init" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, c-format msgid "Class name: %s" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, c-format msgid "Module name: %s" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:183 msgid "Cannot convert ARC module name to Python string" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:191 msgid "Cannot import ARC module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:201 #: src/services/wrappers/python/pythonwrapper.cpp:426 #: src/services/wrappers/python/pythonwrapper.cpp:526 msgid "Cannot get dictionary of ARC module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 msgid "Cannot find ARC UserConfig class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 msgid "UserConfig class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 msgid "Cannot find ARC JobDescription class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 msgid "JobDescription class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 msgid "Cannot find ARC ExecutionTarget class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 msgid "ExecutionTarget class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:162 msgid "Cannot convert module name to Python string" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:169 msgid "Cannot import module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 msgid "Cannot get dictionary of custom broker module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 msgid "Cannot find custom broker class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, c-format msgid "%s class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 msgid "Cannot create UserConfig argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 msgid "Cannot convert UserConfig to Python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:258 msgid "Cannot create argument of the constructor" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:266 msgid "Cannot create instance of Python class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, c-format msgid "Python broker constructor called (%d)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, c-format msgid "Python broker destructor called (%d)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 msgid "Cannot create ExecutionTarget argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 msgid "Cannot create JobDescription argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 msgid "Cannot convert JobDescription to python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:60 msgid "Cannot create resolver from /etc/resolv.conf" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:121 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:126 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:131 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:135 #, c-format msgid "Found service endpoint %s (type %s)" msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:150 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.cpp:101 #, c-format msgid "Found %u service endpoints from the index service at %s" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:102 msgid "Cleaning of UNICORE jobs is not supported" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:131 msgid "Canceling of UNICORE jobs is not supported" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:139 msgid "Renewal of UNICORE jobs is not supported" msgstr "" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:147 msgid "Resumation of UNICORE jobs is not supported" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:67 msgid "Creating a UNICORE client" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:90 src/services/a-rex/test.cpp:154 #: src/services/a-rex/test.cpp:227 src/services/a-rex/test.cpp:275 #: src/services/a-rex/test.cpp:323 src/services/a-rex/test.cpp:371 #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:64 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:19 msgid "Creating and sending request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:182 msgid "Failed to find delegation credentials in client configuration" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:194 #: src/hed/acc/UNICORE/UNICOREClient.cpp:224 src/services/a-rex/test.cpp:88 msgid "Failed to initiate delegation" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:203 #: src/hed/acc/UNICORE/UNICOREClient.cpp:236 msgid "Submission request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:239 msgid "Submission request succeed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:241 msgid "There was no response to a submission request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:248 msgid "A response to a submission request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:255 #: src/hed/acc/UNICORE/UNICOREClient.cpp:336 #: src/hed/acc/UNICORE/UNICOREClient.cpp:414 #: src/hed/acc/UNICORE/UNICOREClient.cpp:527 #: src/hed/acc/UNICORE/UNICOREClient.cpp:603 #: src/hed/acc/UNICORE/UNICOREClient.cpp:677 msgid "There is no connection chain configured" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:276 #: src/hed/acc/UNICORE/UNICOREClient.cpp:348 #, c-format msgid "Submission returned failure: %s" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:277 #: src/hed/acc/UNICORE/UNICOREClient.cpp:349 #, c-format msgid "Submission failed, service returned: %s" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:284 msgid "Creating and sending a start job request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:317 msgid "A start job request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:320 msgid "A start job request succeeded" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:322 msgid "There was no response to a start job request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:329 msgid "The response of a start job request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:395 msgid "A status request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:398 msgid "A status request succeed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:400 msgid "There was no response to a status request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:408 msgid "The response of a status request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:433 msgid "The job status could not be retrieved" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:444 msgid "Creating and sending an index service query" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:472 msgid "Creating and sending a service status request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:508 msgid "A service status request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:511 msgid "A service status request succeeded" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:513 msgid "There was no response to a service status request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:520 msgid "The response of a service status request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:537 msgid "The service status could not be retrieved" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:584 msgid "A job termination request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:587 msgid "A job termination request succeed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:589 msgid "There was no response to a job termination request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:596 msgid "The response of a job termination request was not a SOAP message" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:658 msgid "A job cleaning request failed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:661 msgid "A job cleaning request succeed" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:663 msgid "There was no response to a job cleaning request" msgstr "" #: src/hed/acc/UNICORE/UNICOREClient.cpp:670 msgid "The response of a job cleaning request was not a SOAP message" msgstr "" #: src/hed/acc/ldap/Extractor.h:22 #, c-format msgid "Extractor[%s] (%s): %s = %s" msgstr "" #: src/hed/acc/ldap/Extractor.h:113 src/hed/acc/ldap/Extractor.h:130 #, c-format msgid "Extractor[%s] (%s): %s contains %s" msgstr "" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.cpp:54 #, c-format msgid "Adding endpoint '%s' with interface name %s" msgstr "" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:63 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:43 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:46 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.cpp:49 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:47 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:59 msgid "Can't create information handle - is the ARC ldap DMC plugin available?" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:85 msgid "Adding CREAM computing service" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:79 #, c-format msgid "Unknown entry in EGIIS (%s)" msgstr "" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:87 msgid "" "Entry in EGIIS is missing one or more of the attributes 'Mds-Service-type', " "'Mds-Service-hn', 'Mds-Service-port' and/or 'Mds-Service-Ldap-suffix'" msgstr "" #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:219 msgid "" "The \"FreeSlotsWithDuration\" attribute is wrongly formatted. Ignoring it." msgstr "" #: src/hed/daemon/unix/daemon.cpp:74 #, c-format msgid "Daemonization fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:82 msgid "Watchdog (re)starting application" msgstr "" #: src/hed/daemon/unix/daemon.cpp:87 #, c-format msgid "Watchdog fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:94 msgid "Watchdog starting monitoring" msgstr "" #: src/hed/daemon/unix/daemon.cpp:120 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:122 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:124 msgid "Watchdog detected application exit" msgstr "" #: src/hed/daemon/unix/daemon.cpp:133 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application timeout or error - killing process" msgstr "" #: src/hed/daemon/unix/daemon.cpp:151 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" #: src/hed/daemon/unix/daemon.cpp:163 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "" #: src/hed/daemon/unix/daemon.cpp:184 msgid "Shutdown daemon" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:43 src/hed/daemon/win32/main_win32.cpp:27 msgid "shutdown" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:46 msgid "exit" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:92 src/hed/daemon/win32/main_win32.cpp:53 msgid "No server config part of config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:163 #: src/hed/daemon/win32/main_win32.cpp:91 #, c-format msgid "Unknown log level %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:173 #: src/hed/daemon/win32/main_win32.cpp:100 #, c-format msgid "Failed to open log file: %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:206 #: src/hed/daemon/win32/main_win32.cpp:126 msgid "Start foreground" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:255 #, c-format msgid "XML config file %s does not exist" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:259 src/hed/daemon/unix/main_unix.cpp:274 #: src/hed/daemon/win32/main_win32.cpp:154 #, c-format msgid "Failed to load service configuration from file %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:265 #, c-format msgid "INI config file %s does not exist" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:270 msgid "Error evaluating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:286 msgid "Error loading generated configuration" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:292 msgid "Error evaulating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:297 msgid "Failed to load service configuration from any default config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:358 msgid "Schema validation error" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:373 #: src/hed/daemon/win32/main_win32.cpp:159 msgid "Configuration root element is not " msgstr "" #: src/hed/daemon/unix/main_unix.cpp:389 #, c-format msgid "Cannot switch to group (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:399 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:404 #, c-format msgid "Cannot switch to user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:422 #: src/hed/daemon/win32/main_win32.cpp:176 msgid "Failed to load service side MCCs" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:424 #: src/hed/daemon/win32/main_win32.cpp:178 src/services/a-rex/test.cpp:41 #: src/tests/count/test_service.cpp:32 src/tests/echo/test.cpp:30 #: src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:431 #: src/hed/daemon/win32/main_win32.cpp:185 msgid "Unexpected arguments supplied" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:93 #: src/hed/dmc/acix/DataPointACIX.cpp:342 #: src/hed/dmc/rucio/DataPointRucio.cpp:220 #: src/hed/dmc/rucio/DataPointRucio.cpp:438 #, c-format msgid "No locations found for %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:121 #, c-format msgid "Found none or multiple URLs (%s) in ACIX URL: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:131 #, c-format msgid "Cannot handle URL %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:138 #, c-format msgid "Could not resolve original source of %s: out of time" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:144 #, c-format msgid "Could not resolve original source of %s: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:160 #, c-format msgid "Querying ACIX server at %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:161 #, c-format msgid "Calling acix with query %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:167 #, c-format msgid "Failed to query ACIX: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:171 #: src/hed/dmc/acix/DataPointACIX.cpp:308 #, c-format msgid "Failed to parse ACIX response: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:298 #, c-format msgid "ACIX returned %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:319 #, c-format msgid "No locations for %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:325 #, c-format msgid "%s: ACIX Location: %s" msgstr "" #: src/hed/dmc/acix/DataPointACIX.cpp:327 #, c-format msgid "%s: Location %s not accessible remotely, skipping" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:68 #, c-format msgid "" "checingBartenderURL: Response:\n" "%s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:154 src/hed/dmc/arc/DataPointARC.cpp:206 #: src/hed/dmc/arc/DataPointARC.cpp:278 src/hed/dmc/arc/DataPointARC.cpp:375 #: src/hed/dmc/arc/DataPointARC.cpp:548 src/hed/dmc/arc/DataPointARC.cpp:609 msgid "Hostname is not implemented for arc protocol" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:245 src/hed/dmc/arc/DataPointARC.cpp:330 #: src/hed/dmc/arc/DataPointARC.cpp:441 src/hed/dmc/arc/DataPointARC.cpp:534 #, c-format msgid "" "nd:\n" "%s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:263 msgid "Not a collection" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:282 src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:338 src/hed/dmc/arc/DataPointARC.cpp:449 #: src/hed/dmc/arc/DataPointARC.cpp:601 #, c-format msgid "Recieved transfer URL: %s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:378 src/hed/dmc/srm/DataPointSRM.cpp:518 msgid "StartWriting" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:490 #, c-format msgid "Calculated checksum: %s" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:554 msgid "Check" msgstr "" #: src/hed/dmc/arc/DataPointARC.cpp:648 #, c-format msgid "Deleted %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:86 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:93 #, c-format msgid "Failed to open stdio channel %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:94 #, c-format msgid "Failed to open stdio channel %d" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:335 #, c-format msgid "fsync of file %s failed: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:347 #, c-format msgid "closing file %s failed: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:366 #, c-format msgid "File is not accessible: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:372 #: src/hed/dmc/file/DataPointFile.cpp:459 #, c-format msgid "Can't stat file: %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:420 #: src/hed/dmc/file/DataPointFile.cpp:426 #, c-format msgid "Can't stat stdio channel %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:474 #, c-format msgid "%s is not a directory" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:489 src/hed/dmc/s3/DataPointS3.cpp:461 #: src/hed/dmc/s3/DataPointS3.cpp:571 #, c-format msgid "Failed to read object %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:502 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, c-format msgid "File is not accessible %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:508 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:418 #, c-format msgid "Can't delete directory %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:515 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:425 #, c-format msgid "Can't delete file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:525 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1470 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:440 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:460 #: src/services/a-rex/jura/JobLogFile.cpp:657 #: src/services/a-rex/jura/JobLogFile.cpp:1274 #, c-format msgid "Creating directory %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:533 src/hed/dmc/srm/DataPointSRM.cpp:160 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:474 #, c-format msgid "Renaming %s to %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:535 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:483 #, c-format msgid "Can't rename file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:566 #, c-format msgid "Failed to open %s for reading: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:581 #: src/hed/dmc/file/DataPointFile.cpp:719 #, c-format msgid "Failed to switch user id to %d/%d" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:587 #, c-format msgid "Failed to create/open file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:603 msgid "Failed to create thread" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:683 #, c-format msgid "Invalid url: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:692 src/hed/libs/data/FileCache.cpp:603 #, c-format msgid "Failed to create directory %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:708 #: src/hed/dmc/file/DataPointFile.cpp:727 #, c-format msgid "Failed to create file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:739 #, c-format msgid "setting file %s to size %llu" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:759 #, c-format msgid "Failed to preallocate space for %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:800 src/hed/libs/data/FileCache.cpp:981 #, c-format msgid "Failed to clean up file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:809 #, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:813 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, c-format msgid "Using proxy %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, c-format msgid "Using key %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, c-format msgid "Using cert %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, c-format msgid "Add location: url: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, c-format msgid "Add location: metadata: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, c-format msgid "gfal_open failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, c-format msgid "gfal_close failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, c-format msgid "gfal_read failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 msgid "StopReading starts waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 msgid "StopReading finished waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:66 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:71 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:42 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:47 #, c-format msgid "No locations defined for %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, c-format msgid "Failed to set LFC replicas: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, c-format msgid "gfal_write failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:302 msgid "StopWriting starts waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:304 msgid "StopWriting finished waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, c-format msgid "gfal_stat failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, c-format msgid "gfal_opendir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, c-format msgid "gfal_closedir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, c-format msgid "gfal_rmdir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, c-format msgid "gfal_unlink failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, c-format msgid "gfal_mkdir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, c-format msgid "gfal_rename failed: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 msgid "Transfer failed" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 msgid "Transfer succeeded" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 msgid "ftp_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 msgid "ftp_check_callback" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/services/gridftpd/commands.cpp:1225 #: src/services/gridftpd/dataread.cpp:76 #: src/services/gridftpd/dataread.cpp:173 #: src/services/gridftpd/datawrite.cpp:59 #: src/services/gridftpd/datawrite.cpp:146 #, c-format msgid "Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 msgid "Excessive data received while checking file access" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, c-format msgid "check_ftp: obtained size: %lli" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, c-format msgid "check_ftp: obtained modification date: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 msgid "check_ftp: globus_ftp_client_get failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 msgid "check_ftp: globus_ftp_client_register_read" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 msgid "check_ftp: timeout waiting for partial get" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 msgid "delete_ftp: timeout waiting for delete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #, c-format msgid "mkdir_ftp: making %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 msgid "Timeout waiting for mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 msgid "start_reading_ftp" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 msgid "ftp_read_thread: waiting for eof" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 msgid "ftp_read_thread: waiting for buffers released" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 msgid "ftp_read_thread: exiting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #, c-format msgid "ftp_read_callback: failure: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 msgid "Failed to get ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 msgid "start_writing_ftp: mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 msgid "start_writing_ftp: put" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 msgid "start_writing_ftp: put failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 msgid "StopWriting: aborting connection" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #, c-format msgid "StopWriting: Calculated checksum %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #, c-format msgid "StopWriting: looking for checksum of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 msgid "list_files_ftp: timeout waiting for cksum" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 msgid "list_files_ftp: no checksum information possible" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #, c-format msgid "list_files_ftp: checksum %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 msgid "ftp_write_thread: data callback failed - aborting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 msgid "ftp_write_thread: waiting for eof" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 msgid "ftp_write_thread: waiting for buffers released" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 msgid "ftp_write_thread: exiting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #, c-format msgid "ftp_write_callback: failure: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #, c-format msgid "ftp_write_callback: success %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 msgid "Failed to store ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 msgid "ftp_put_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 msgid "list_files_ftp: timeout waiting for size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 msgid "list_files_ftp: failed to get file's size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 msgid "list_files_ftp: failed to get file's modification time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 msgid "No results returned from stat" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #, c-format msgid "Unexpected path %s returned from server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 msgid "Rename: globus_ftp_client_move failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 msgid "Rename: timeout waiting for operation to complete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 msgid "Failed to set credentials for GridFTP transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 msgid "Using secure data transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 msgid "Using insecure data transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 msgid "~DataPoint: destroy ftp_handle" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:224 src/hed/dmc/gridftp/Lister.cpp:292 #: src/hed/dmc/gridftp/Lister.cpp:387 src/hed/dmc/gridftp/Lister.cpp:737 #: src/hed/dmc/gridftp/Lister.cpp:775 #, c-format msgid "Failure: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:226 src/hed/dmc/gridftp/Lister.cpp:246 #: src/hed/dmc/gridftp/Lister.cpp:471 src/hed/dmc/gridftp/Lister.cpp:478 #: src/hed/dmc/gridftp/Lister.cpp:500 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:163 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:196 #, c-format msgid "Response: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:291 msgid "Error getting list of files (in list)" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:293 msgid "Assuming - file not found" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:310 #, c-format msgid "list record: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:365 msgid "Failed reading list of files" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:401 msgid "Failed reading data" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:429 #, c-format msgid "Command: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:433 src/hed/dmc/gridftp/Lister.cpp:474 #: src/hed/mcc/http/PayloadHTTP.cpp:991 msgid "Memory allocation error" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:441 #, c-format msgid "%s failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:445 msgid "Command is being sent" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:450 msgid "Waiting for response" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:455 msgid "Callback got failure" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:541 msgid "Failed in globus_cond_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:545 msgid "Failed in globus_mutex_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:552 msgid "Failed allocating memory for handle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:557 msgid "Failed in globus_ftp_control_handle_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:565 msgid "Failed to enable IPv6" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:576 src/services/gridftpd/commands.cpp:983 msgid "Closing connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:583 src/hed/dmc/gridftp/Lister.cpp:598 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:608 msgid "Closed successfully" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:610 msgid "Closing may have failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:637 msgid "Waiting for globus handle to settle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:642 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:648 msgid "Globus handle is stuck" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:664 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:687 #, c-format msgid "EPSV failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:691 msgid "EPSV failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:698 #, c-format msgid "PASV failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:702 msgid "PASV failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:735 msgid "Failed to apply local address to data connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:749 msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:754 #, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:769 #, c-format msgid "Data channel: [%s]:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:773 msgid "Obtained host and address are not acceptable" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Failed to open data channel" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:801 #, c-format msgid "Unsupported protocol in url %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:813 msgid "Reusing connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:837 #, c-format msgid "Failed connecting to server %s:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:843 #, c-format msgid "Failed to connect to server %s:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:859 msgid "Missing authentication information" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:868 src/hed/dmc/gridftp/Lister.cpp:882 #, c-format msgid "Bad authentication information: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:891 src/hed/dmc/gridftp/Lister.cpp:906 #, c-format msgid "Failed authenticating: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:898 msgid "Failed authenticating" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:933 src/hed/dmc/gridftp/Lister.cpp:1089 #, c-format msgid "DCAU failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:937 src/hed/dmc/gridftp/Lister.cpp:1094 msgid "DCAU failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:957 msgid "MLST is not supported - trying LIST" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:973 #, c-format msgid "Immediate completion expected: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:977 msgid "Immediate completion expected" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:990 #, c-format msgid "Missing information in reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1024 #, c-format msgid "Missing final reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1048 #, c-format msgid "Unexpected immediate completion: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1060 #, c-format msgid "LIST/MLST failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1065 msgid "LIST/MLST failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1115 msgid "MLSD is not supported - trying NLST" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1129 #, c-format msgid "Immediate completion: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1137 #, c-format msgid "NLST/MLSD failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1143 msgid "NLST/MLSD failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1164 #, c-format msgid "Data transfer aborted: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1169 msgid "Data transfer aborted" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1181 msgid "Failed to transfer data" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:388 #: src/hed/dmc/http/DataPointHTTP.cpp:517 #: src/hed/dmc/http/DataPointHTTP.cpp:598 #: src/hed/dmc/http/DataPointHTTP.cpp:1000 #: src/hed/dmc/http/DataPointHTTP.cpp:1141 #: src/hed/dmc/http/DataPointHTTP.cpp:1286 #, c-format msgid "Redirecting to %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:670 #, c-format msgid "Stat: obtained size %llu" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:674 #, c-format msgid "Stat: obtained modification time %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:903 #, c-format msgid "Check: obtained size %llu" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:905 #, c-format msgid "Check: obtained modification time %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1017 #: src/hed/dmc/http/DataPointHTTP.cpp:1161 #, c-format msgid "HTTP failure %u - %s" msgstr "" #: src/hed/dmc/ldap/DataPointLDAP.cpp:36 msgid "" "Missing reference to factory and/or module. Currently safe unloading of LDAP " "DMC is not supported. Report to developers." msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:175 msgid "SASL Interaction" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:223 #, c-format msgid "Challenge: %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:227 #, c-format msgid "Default: %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:303 #, c-format msgid "LDAPQuery: Initializing connection to %s:%d" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:307 #, c-format msgid "LDAP connection already open to %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:325 #, c-format msgid "Could not open LDAP connection to %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:346 #, c-format msgid "Failed to create ldap bind thread (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:353 #, c-format msgid "Ldap bind timeout (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:360 #, c-format msgid "Failed to bind to ldap server (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:381 #, c-format msgid "Could not set LDAP network timeout (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:389 #, c-format msgid "Could not set LDAP timelimit (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:396 #, c-format msgid "Could not set LDAP protocol version (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:472 #, c-format msgid "LDAPQuery: Querying %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:474 #, c-format msgid " base dn: %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:476 #, c-format msgid " filter: %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:478 msgid " attributes:" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:481 #: src/services/gridftpd/misc/ldapquery.cpp:399 #, c-format msgid " %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:526 src/hed/dmc/ldap/LDAPQuery.cpp:598 #, c-format msgid "%s (%s)" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:550 #, c-format msgid "LDAPQuery: Getting results from %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:553 #, c-format msgid "Error: no LDAP query started to %s" msgstr "" #: src/hed/dmc/ldap/LDAPQuery.cpp:593 #, c-format msgid "LDAP query timed out: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:23 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:36 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:39 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:101 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:104 msgid "Failed to extract VOMS nickname from proxy" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:106 #, c-format msgid "Using Rucio account %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:146 #, c-format msgid "" "Bad path for %s: Rucio supports read/write at /objectstores and read-only " "at /replicas" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:162 #, c-format msgid "Can't handle URL %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:303 #, c-format msgid "Acquired auth token for %s: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:357 #, c-format msgid "Rucio returned %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:382 #, c-format msgid "Failed to parse Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:388 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:394 #, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:400 #, c-format msgid "No RSE information returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:423 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:426 #, c-format msgid "%s: size %llu" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:430 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:433 #, c-format msgid "%s: checksum %s" msgstr "" #: src/hed/dmc/s3/DataPointS3.cpp:648 #, c-format msgid "Failed to write object %s: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:67 #, c-format msgid "Check: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:79 #, c-format msgid "Check: obtained size: %lli" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:85 #, c-format msgid "Check: obtained checksum: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:89 #, c-format msgid "Check: obtained modification date: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:93 msgid "Check: obtained access latency: low (ONLINE)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:97 msgid "Check: obtained access latency: high (NEARLINE)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:119 #, c-format msgid "Remove: deleting: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:139 #, c-format msgid "Creating directory: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:190 src/hed/dmc/srm/DataPointSRM.cpp:243 msgid "Calling PrepareReading when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:212 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:222 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:227 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:234 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:263 src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "None of the requested transfer protocols are supported" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:488 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:503 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:318 msgid "StartReading: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:332 src/hed/dmc/srm/DataPointSRM.cpp:534 #, c-format msgid "TURL %s cannot be handled" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:340 src/hed/dmc/srm/DataPointSRM.cpp:542 #, c-format msgid "Redirecting to new URL: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:404 msgid "Calling PrepareWriting when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:435 msgid "No space token specified" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:441 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:444 #, c-format msgid "Using space token description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:450 #, c-format msgid "Error looking up space tokens matching description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, c-format msgid "No space tokens found matching description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:464 #, c-format msgid "Using space token %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:510 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:520 msgid "StartWriting: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:593 #, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:610 #, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:613 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:616 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:619 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:620 src/hed/dmc/srm/DataPointSRM.cpp:621 msgid "No checksum information from server" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:622 src/hed/dmc/srm/DataPointSRM.cpp:623 msgid "No checksum verification possible" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:629 msgid "Failed to release completed request" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:673 src/hed/dmc/srm/DataPointSRM.cpp:740 #, c-format msgid "ListFiles: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:806 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:55 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:94 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:146 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:185 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:225 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:263 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:307 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:369 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:442 msgid "SRM did not return any information" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:320 #, c-format msgid "File could not be moved to Running state: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:376 msgid "SRM did not return any useful information" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:454 msgid "File could not be moved to Done state" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:92 msgid "Could not determine version of server" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:98 #, c-format msgid "Server SRM version: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:103 #, c-format msgid "Server implementation: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:140 #, c-format msgid "Adding space token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:167 msgid "No request tokens found" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:180 #, c-format msgid "Adding request token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:241 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:646 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:832 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1389 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:279 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:331 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:702 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:768 #, c-format msgid "File is ready! TURL is %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:363 #, c-format msgid "Setting userRequestDescription to %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:418 #, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:461 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1164 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1198 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1232 msgid "No request token specified!" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:528 msgid "Request is reported as ABORTED, but all files are done" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:534 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:540 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:677 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:749 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:682 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:754 #, c-format msgid "Error creating required directories for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:855 msgid "Too many files in one request - please try again with fewer files" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:903 msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:940 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:946 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:635 #: src/services/a-rex/jura/ApelDestination.cpp:215 #: src/services/a-rex/jura/LutsDestination.cpp:192 #: src/services/gridftpd/misc/ldapquery.cpp:183 #: src/services/gridftpd/misc/ldapquery.cpp:186 #: src/services/gridftpd/misc/ldapquery.cpp:392 #: src/services/gridftpd/misc/ldapquery.cpp:623 #: src/services/gridftpd/misc/ldapquery.cpp:632 #, c-format msgid "%s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:979 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1189 #, c-format msgid "Files associated with request token %s released successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1223 #, c-format msgid "Files associated with request token %s put done successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1258 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1275 #, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is file, calling srmRm" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "Type is dir, calling srmRmDir" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1289 msgid "File type is not available, attempting file delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1292 msgid "File delete failed, attempting directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1317 #, c-format msgid "File %s removed successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1344 #, c-format msgid "Directory %s removed successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1459 #, c-format msgid "Checking for existence of %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1462 #, c-format msgid "File already exists: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1499 #, c-format msgid "Error creating directory %s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, c-format msgid "Storing port %i for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, c-format msgid "No port succeeded for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, c-format msgid "SOAP request: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, c-format msgid "SOAP fault: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 msgid "Reconnecting" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, c-format msgid "SRM Client status: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #: src/hed/identitymap/ArgusPDPClient.cpp:250 #, c-format msgid "SOAP response: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:76 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:162 #, c-format msgid "Failed to acquire lock on file %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:81 #, c-format msgid "Error reading info from file %s:%s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:95 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:187 #, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:100 #, c-format msgid "Cannot convert string %s to int in line %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:203 #, c-format msgid "Error writing srm info file %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:91 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:93 #, c-format msgid "Read %i bytes" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:124 #, c-format msgid "Could not open file %s for reading: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:139 #, c-format msgid "Unable to find file size of %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:203 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:226 #, c-format msgid "xrootd write failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:235 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:309 #, c-format msgid "xrootd close failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:258 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:271 #, c-format msgid "xrootd open failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:285 #, c-format msgid "close failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:327 #, c-format msgid "Read access not allowed for %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:346 #, c-format msgid "Could not stat file %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:381 #, c-format msgid "Failed to open directory %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:396 #, c-format msgid "Error while reading dir %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:446 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:464 #, c-format msgid "Error creating required dirs: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:158 msgid "PDPD location is missing" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:161 #, c-format msgid "PDPD location: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:166 #: src/hed/identitymap/ArgusPEPClient.cpp:129 msgid "Conversion mode is set to SUBJECT" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:169 #: src/hed/identitymap/ArgusPEPClient.cpp:132 msgid "Conversion mode is set to CREAM" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:172 #: src/hed/identitymap/ArgusPEPClient.cpp:135 msgid "Conversion mode is set to EMI" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:175 #: src/hed/identitymap/ArgusPEPClient.cpp:138 #, c-format msgid "Unknown conversion mode %s, using default" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:242 #, c-format msgid "Failed to contact PDP server: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:245 #, c-format msgid "There was no SOAP response return from PDP server: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:360 #: src/hed/identitymap/ArgusPEPClient.cpp:286 #, c-format msgid "Have %i requests to process" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:362 msgid "Creating a client to Argus PDP service" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:375 #, c-format msgid "XACML authorisation request: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:386 #, c-format msgid "XACML authorisation response: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:426 #: src/hed/identitymap/ArgusPEPClient.cpp:333 #, c-format msgid "%s is not authorized to do action %s in resource %s " msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:429 #: src/hed/identitymap/ArgusPDPClient.cpp:434 #: src/hed/identitymap/ArgusPEPClient.cpp:336 msgid "Not authorized" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:439 #: src/hed/identitymap/ArgusPEPClient.cpp:341 #: src/hed/identitymap/IdentityMap.cpp:219 #: src/hed/shc/legacy/LegacyMap.cpp:215 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:566 #: src/hed/identitymap/ArgusPEPClient.cpp:655 msgid "Doing CREAM request" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:580 #: src/hed/identitymap/ArgusPDPClient.cpp:748 #: src/hed/identitymap/ArgusPEPClient.cpp:683 #, c-format msgid "Adding profile-id value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:592 #: src/hed/identitymap/ArgusPDPClient.cpp:759 #: src/hed/identitymap/ArgusPEPClient.cpp:694 #, c-format msgid "Adding subject-id value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:600 #: src/hed/identitymap/ArgusPDPClient.cpp:767 #: src/hed/identitymap/ArgusPEPClient.cpp:704 #, c-format msgid "Adding subject-issuer value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:607 #: src/hed/identitymap/ArgusPEPClient.cpp:713 #, c-format msgid "Adding virtual-organization value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:620 #: src/hed/identitymap/ArgusPEPClient.cpp:730 #, c-format msgid "Adding FQAN value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:629 #: src/hed/identitymap/ArgusPEPClient.cpp:739 #, c-format msgid "Adding FQAN/primary value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:639 #: src/hed/identitymap/ArgusPEPClient.cpp:750 #, c-format msgid "Adding cert chain value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:648 #: src/hed/identitymap/ArgusPDPClient.cpp:840 #, c-format msgid "Adding resource-id value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:662 #: src/hed/identitymap/ArgusPDPClient.cpp:863 #: src/hed/identitymap/ArgusPEPClient.cpp:775 #, c-format msgid "Adding action-id value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:666 #: src/hed/identitymap/ArgusPEPClient.cpp:786 #, c-format msgid "CREAM request generation failed: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:732 msgid "Doing EMI request" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:774 #, c-format msgid "Adding Virtual Organization value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:797 #, c-format msgid "Adding VOMS group value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:803 #, c-format msgid "Adding VOMS primary group value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:822 #, c-format msgid "Adding VOMS role value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:829 #, c-format msgid "Adding VOMS primary role value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:846 #, c-format msgid "Adding resource-owner value: %s" msgstr "" #: src/hed/identitymap/ArgusPDPClient.cpp:867 #, c-format msgid "EMI request generation failed: %s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:119 msgid "PEPD location is missing" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:122 #, c-format msgid "PEPD location: %s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:126 msgid "Conversion mode is set to DIRECT" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:331 #, c-format msgid "" "Not authorized according to request:\n" "%s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:361 msgid "Subject of request is null \n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:366 #, c-format msgid "Can not create XACML SubjectAttribute: %s\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:375 msgid "Can not create XACML Resource \n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:381 #, c-format msgid "Can not create XACML ResourceAttribute: %s\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:390 msgid "Can not create XACML Action\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:397 #, c-format msgid "Can not create XACML ActionAttribute: %s\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:407 msgid "Can not create XACML request\n" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:539 #, c-format msgid "Converting to CREAM action - namespace: %s, operation: %s" msgstr "" #: src/hed/identitymap/ArgusPEPClient.cpp:760 #, c-format msgid "Adding resoure-id value: %s" msgstr "" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "" #: src/hed/libs/common/ArcLocation.cpp:68 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/FileLock.cpp:48 msgid "Cannot determine hostname from gethostname()" msgstr "" #: src/hed/libs/common/FileLock.cpp:97 #, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:102 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:109 #, c-format msgid "Error creating temporary file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:118 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:129 #, c-format msgid "Could not create lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:133 #, c-format msgid "Error creating lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:138 #, c-format msgid "Error writing to lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:146 #, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:155 #, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" #: src/hed/libs/common/FileLock.cpp:164 #, c-format msgid "%li seconds since lock file %s was created" msgstr "" #: src/hed/libs/common/FileLock.cpp:167 #, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:171 #, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:183 #, c-format msgid "This process already owns the lock on %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:189 #, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "" #: src/hed/libs/common/FileLock.cpp:191 #, c-format msgid "Failed to remove file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:200 #, c-format msgid "The file %s is currently locked with a valid lock" msgstr "" #: src/hed/libs/common/FileLock.cpp:215 #, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:227 #, c-format msgid "Lock file %s doesn't exist" msgstr "" #: src/hed/libs/common/FileLock.cpp:229 #, c-format msgid "Error listing lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:235 #, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "" #: src/hed/libs/common/FileLock.cpp:241 #, c-format msgid "Error reading lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:245 #, c-format msgid "Error with formatting in lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:255 #, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "" #: src/hed/libs/common/FileLock.cpp:264 #, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:267 #, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "" #: src/hed/libs/common/Logger.cpp:60 msgid "Invalid log level. Using default " msgstr "" #: src/hed/libs/common/Logger.cpp:125 msgid "Invalid old log level. Using default " msgstr "" #: src/hed/libs/common/OptionParser.cpp:107 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "" #: src/hed/libs/common/OptionParser.cpp:265 msgid "Use -? to get usage description" msgstr "" #: src/hed/libs/common/OptionParser.cpp:342 msgid "Usage:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:345 msgid "OPTION..." msgstr "" #: src/hed/libs/common/OptionParser.cpp:351 msgid "Help Options:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:352 msgid "Show help options" msgstr "" #: src/hed/libs/common/OptionParser.cpp:354 msgid "Application Options:" msgstr "" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:212 msgid "Maximum number of threads running - puting new request into queue" msgstr "" #: src/hed/libs/common/Thread.cpp:253 #, c-format msgid "Thread exited with Glib error: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:255 #, c-format msgid "Thread exited with Glib exception: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:257 #, c-format msgid "Thread exited with generic exception: %s" msgstr "" #: src/hed/libs/common/URL.cpp:121 #, c-format msgid "URL is not valid: %s" msgstr "" #: src/hed/libs/common/URL.cpp:192 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "" #: src/hed/libs/common/URL.cpp:197 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "" #: src/hed/libs/common/URL.cpp:286 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "" #: src/hed/libs/common/URL.cpp:302 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "" #: src/hed/libs/common/URL.cpp:310 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "" #: src/hed/libs/common/URL.cpp:326 #, c-format msgid "Invalid port number in %s" msgstr "" #: src/hed/libs/common/URL.cpp:425 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "" #: src/hed/libs/common/URL.cpp:587 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" #: src/hed/libs/common/URL.cpp:686 #, c-format msgid "URL option %s does not have format name=value" msgstr "" #: src/hed/libs/common/URL.cpp:1151 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "" #: src/hed/libs/common/URL.cpp:1156 #, c-format msgid "URL protocol is not urllist: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:30 #: src/hed/libs/common/UserConfig.cpp:681 #: src/hed/libs/common/UserConfig.cpp:690 #: src/hed/libs/common/UserConfig.cpp:696 #: src/hed/libs/common/UserConfig.cpp:718 #: src/hed/libs/common/UserConfig.cpp:728 #: src/hed/libs/common/UserConfig.cpp:740 #: src/hed/libs/common/UserConfig.cpp:760 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:82 #, c-format msgid "Wrong ownership of certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:84 #, c-format msgid "Wrong permissions of certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:86 #, c-format msgid "Can not access certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:93 #, c-format msgid "Wrong ownership of key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:95 #, c-format msgid "Wrong permissions of key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:97 #, c-format msgid "Can not access key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:104 #, c-format msgid "Wrong ownership of proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:106 #, c-format msgid "Wrong permissions of proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:108 #, c-format msgid "Can not access proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:119 msgid "computing" msgstr "" #: src/hed/libs/common/UserConfig.cpp:121 msgid "index" msgstr "" #: src/hed/libs/common/UserConfig.cpp:165 #: src/hed/libs/common/UserConfig.cpp:171 #: src/hed/libs/common/UserConfig.cpp:223 #: src/hed/libs/common/UserConfig.cpp:229 #, c-format msgid "System configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:176 #: src/hed/libs/common/UserConfig.cpp:234 #, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:178 #: src/hed/libs/common/UserConfig.cpp:180 #: src/hed/libs/common/UserConfig.cpp:236 #: src/hed/libs/common/UserConfig.cpp:238 #, c-format msgid "System configuration file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:187 #: src/hed/libs/common/UserConfig.cpp:199 #: src/hed/libs/common/UserConfig.cpp:245 #: src/hed/libs/common/UserConfig.cpp:257 #, c-format msgid "User configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:192 #: src/hed/libs/common/UserConfig.cpp:250 msgid "No configuration file could be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:195 #: src/hed/libs/common/UserConfig.cpp:253 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:310 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" #: src/hed/libs/common/UserConfig.cpp:322 #, c-format msgid "" "Unsupported job list type '%s', using 'BDB'. Supported types are: BDB, " "SQLITE, XML." msgstr "" #: src/hed/libs/common/UserConfig.cpp:503 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:505 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or '%" "s' attributes in the client configuration file (e.g. '%s')" msgstr "" #: src/hed/libs/common/UserConfig.cpp:522 #, c-format msgid "" "Can not access CA certificates directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:532 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:558 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" #: src/hed/libs/common/UserConfig.cpp:579 #, c-format msgid "Using proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:582 #, c-format msgid "Using certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:583 #, c-format msgid "Using key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:587 #, c-format msgid "Using CA certificate directory: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:600 #: src/hed/libs/common/UserConfig.cpp:606 #, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "" #: src/hed/libs/common/UserConfig.cpp:612 #, c-format msgid "Can not access VOMS file/directory: %s." msgstr "" #: src/hed/libs/common/UserConfig.cpp:631 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" #: src/hed/libs/common/UserConfig.cpp:644 #, c-format msgid "Loading configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:678 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:703 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" #: src/hed/libs/common/UserConfig.cpp:715 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:735 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:781 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:785 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:826 #, c-format msgid "Unknown section %s, ignoring it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:830 #, c-format msgid "Configuration (%s) loaded" msgstr "" #: src/hed/libs/common/UserConfig.cpp:833 #, c-format msgid "Could not load configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:928 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "Unable to create %s directory." msgstr "" #: src/hed/libs/common/UserConfig.cpp:950 #, c-format msgid "Configuration example file created (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:952 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:956 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "" #: src/hed/libs/common/UserConfig.cpp:961 #, c-format msgid "Example configuration (%s) not created." msgstr "" #: src/hed/libs/common/UserConfig.cpp:966 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "" #: src/hed/libs/common/UserConfig.cpp:984 #, c-format msgid "%s directory created" msgstr "" #: src/hed/libs/common/UserConfig.cpp:986 #: src/hed/libs/common/UserConfig.cpp:1025 src/hed/libs/data/DataMover.cpp:684 #, c-format msgid "Failed to create directory %s" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:300 msgid "Succeeded to verify the signature under " msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Failed to verify the signature under " msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:54 msgid "Creating delegation credential to ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:64 #: src/hed/libs/communication/ClientX509Delegation.cpp:267 msgid "DelegateCredentialsInit failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:68 #: src/hed/libs/communication/ClientX509Delegation.cpp:122 #: src/hed/libs/communication/ClientX509Delegation.cpp:157 #: src/hed/libs/communication/ClientX509Delegation.cpp:212 #: src/hed/libs/communication/ClientX509Delegation.cpp:271 msgid "There is no SOAP response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:73 msgid "There is no X509 request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:78 msgid "There is no Format request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:86 msgid "There is no Id or X509 request value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:99 #: src/hed/libs/communication/ClientX509Delegation.cpp:187 msgid "DelegateProxy failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:118 msgid "UpdateCredentials failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:126 msgid "There is no UpdateCredentialsResponse in response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:134 #: src/hed/libs/communication/ClientX509Delegation.cpp:162 #: src/hed/libs/communication/ClientX509Delegation.cpp:217 #: src/hed/libs/communication/ClientX509Delegation.cpp:302 msgid "There is no SOAP connection chain configured" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:140 msgid "Creating delegation to CREAM delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:153 msgid "Delegation getProxyReq request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:173 msgid "Creating delegation to CREAM delegation service failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:208 msgid "Delegation putProxy request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:222 msgid "Creating delegation to CREAM delegation failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:237 msgid "Getting delegation credential from ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:276 msgid "There is no Delegated X509 token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:281 msgid "There is no Format delegated token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:289 msgid "There is no Id or X509 token value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:298 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" #: src/hed/libs/compute/Broker.cpp:62 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:72 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:153 src/hed/libs/compute/Broker.cpp:171 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "ComputingShare (%s) does not match selected queue (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:189 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" #: src/hed/libs/compute/Broker.cpp:194 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "" #: src/hed/libs/compute/Broker.cpp:200 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:205 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:212 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %" "s" msgstr "" #: src/hed/libs/compute/Broker.cpp:217 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:243 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:272 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:284 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:299 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:306 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:311 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:317 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:322 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:330 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:336 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:341 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:349 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:354 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:362 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" #: src/hed/libs/compute/Broker.cpp:367 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:375 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:380 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:388 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:393 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:402 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" #: src/hed/libs/compute/Broker.cpp:406 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:414 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:421 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:427 src/hed/libs/compute/Broker.cpp:448 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:435 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:442 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:456 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:461 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:469 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:475 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:481 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:489 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:494 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:502 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:509 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:532 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" #: src/hed/libs/compute/Broker.cpp:549 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" #: src/hed/libs/compute/Broker.cpp:585 msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "" #: src/hed/libs/compute/Broker.cpp:609 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:26 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:30 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:37 #, c-format msgid "Uniq is adding service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:60 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:63 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:66 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, c-format msgid "Failed to start querying the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for %s plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, c-format msgid "%s plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:100 #: src/hed/libs/compute/JobControllerPlugin.cpp:109 #: src/hed/libs/compute/SubmitterPlugin.cpp:158 #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, c-format msgid "%s %s could not be created." msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, c-format msgid "Loaded %s %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:237 #, c-format msgid "Address: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:238 #, c-format msgid "Place: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, c-format msgid "Country: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, c-format msgid "Postal code: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, c-format msgid "Latitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, c-format msgid "Longitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:248 #, c-format msgid "Owner: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:255 #, c-format msgid "ID: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:256 #, c-format msgid "Type: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:261 #, c-format msgid "URL: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:262 #, c-format msgid "Interface: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:264 msgid "Interface versions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:269 msgid "Interface extensions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:274 msgid "Capabilities:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:278 #, c-format msgid "Technology: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:280 msgid "Supported Profiles:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:284 #, c-format msgid "Implementor: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:285 #, c-format msgid "Implementation name: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, c-format msgid "Quality level: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, c-format msgid "Health state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, c-format msgid "Health state info: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, c-format msgid "Serving state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, c-format msgid "Issuer CA: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:292 msgid "Trusted CAs:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:296 #, c-format msgid "Downtime starts: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:297 #, c-format msgid "Downtime ends: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, c-format msgid "Staging: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:300 msgid "Job descriptions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:312 #, c-format msgid "Scheme: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:315 #, c-format msgid "Rule: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:327 #, c-format msgid "Mapping queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Max wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, c-format msgid "Max total wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Min wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, c-format msgid "Default wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Max CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, c-format msgid "Min CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, c-format msgid "Default CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Max total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, c-format msgid "Max running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, c-format msgid "Max waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, c-format msgid "Max user running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max slots per job: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max stage in streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max stage out streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, c-format msgid "Scheduling policy: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, c-format msgid "Max virtual memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max disk space: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, c-format msgid "Default Storage Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:348 msgid "Supports preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:349 msgid "Doesn't support preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:350 #, c-format msgid "Total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:351 #, c-format msgid "Running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, c-format msgid "Local running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, c-format msgid "Waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, c-format msgid "Local waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, c-format msgid "Suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, c-format msgid "Local suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, c-format msgid "Staging jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, c-format msgid "Estimated average waiting time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, c-format msgid "Estimated worst waiting time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, c-format msgid "Free slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:363 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:366 #, c-format msgid " %s: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:367 #, c-format msgid " unspecified: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:370 #, c-format msgid "Used slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:371 #, c-format msgid "Requested slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, c-format msgid "Reservation policy: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:379 #, c-format msgid "Resource manager: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:380 #, c-format msgid " (%s)" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, c-format msgid "Total physical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:384 #, c-format msgid "Total logical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:385 #, c-format msgid "Total slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:386 msgid "Supports advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:387 msgid "Doesn't support advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:388 msgid "Supports bulk submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:389 msgid "Doesn't support bulk Submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Non-homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:393 msgid "Network information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:398 msgid "Working area is shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:399 msgid "Working area is not shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:400 #, c-format msgid "Working area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:401 #, c-format msgid "Working area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:402 #, c-format msgid "Working area life time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:403 #, c-format msgid "Cache area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Cache area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:410 #, c-format msgid "Platform: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:411 msgid "Execution environment supports inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:412 msgid "Execution environment does not support inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:413 msgid "Execution environment supports outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:414 msgid "Execution environment does not support outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment is a virtual machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment is a physical machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:417 #, c-format msgid "CPU vendor: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:418 #, c-format msgid "CPU model: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:419 #, c-format msgid "CPU version: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:420 #, c-format msgid "CPU clock speed: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, c-format msgid "Main memory size: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, c-format msgid "OS family: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, c-format msgid "OS name: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, c-format msgid "OS version: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:431 msgid "Computing service:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:455 #, c-format msgid "%d Endpoints" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:460 msgid "Endpoint Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:472 #, c-format msgid "%d Batch Systems" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:477 msgid "Batch System Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:483 msgid "Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:496 #, c-format msgid "%d Shares" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:501 msgid "Share Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:507 #, c-format msgid "%d mapping policies" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:511 msgid "Mapping policy:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:527 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:529 #, c-format msgid " Computing endpoint URL: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, c-format msgid " Computing endpoint interface name: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #: src/hed/libs/compute/Job.cpp:580 #, c-format msgid " Queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:536 #, c-format msgid " Mapping queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:539 #, c-format msgid " Health state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:544 msgid "Service information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:549 msgid " Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:556 msgid "Batch system information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:559 msgid "Queue information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:566 msgid " Benchmark information:" msgstr "" #: src/hed/libs/compute/GLUE2.cpp:58 msgid "The ComputingService doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:117 msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:128 msgid "The ComputingService doesn't advertise its Interface." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:160 msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "" #: src/hed/libs/compute/Job.cpp:329 msgid "Unable to detect format of job record." msgstr "" #: src/hed/libs/compute/Job.cpp:550 #, c-format msgid "Job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:552 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:553 #, c-format msgid " State: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:556 #, c-format msgid " Specific state: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:560 src/hed/libs/compute/Job.cpp:584 #, c-format msgid " Waiting Position: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:564 #, c-format msgid " Exit Code: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:568 #, c-format msgid " Job Error: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:573 #, c-format msgid " Owner: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:577 #, c-format msgid " Other Messages: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:582 #, c-format msgid " Requested Slots: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:587 #, c-format msgid " Stdin: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:589 #, c-format msgid " Stdout: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:591 #, c-format msgid " Stderr: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:593 #, c-format msgid " Computing Service Log Directory: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:596 #, c-format msgid " Submitted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:599 #, c-format msgid " End Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:602 #, c-format msgid " Submitted from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:605 #, c-format msgid " Submitting client: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:608 #, c-format msgid " Requested CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:615 #, c-format msgid " Used Wall Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:618 #, c-format msgid " Used Memory: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Results were deleted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:623 #, c-format msgid " Results must be retrieved before: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:627 #, c-format msgid " Proxy valid until: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Entry valid from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:634 #, c-format msgid " Entry valid for: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:638 msgid " Old job IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:646 #, c-format msgid " ID on service: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Service information URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:648 #, c-format msgid " Job status URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:649 #, c-format msgid " Job management URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:650 #, c-format msgid " Stagein directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:651 #, c-format msgid " Stageout directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:652 #, c-format msgid " Session directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:654 msgid " Delegation IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:670 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "" #: src/hed/libs/compute/Job.cpp:675 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:697 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:702 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" #: src/hed/libs/compute/Job.cpp:706 #, c-format msgid "Downloading job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:710 #, c-format msgid "" "Cant retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:715 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:722 #, c-format msgid "%s directory exist! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:728 #, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:733 #, c-format msgid "No files to retrieve for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:739 #, c-format msgid "Failed to create directory %s! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:752 #, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "" #: src/hed/libs/compute/Job.cpp:758 #, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "" #: src/hed/libs/compute/Job.cpp:764 #, c-format msgid "Failed downloading %s to %s" msgstr "" #: src/hed/libs/compute/Job.cpp:777 src/hed/libs/compute/Job.cpp:782 #, c-format msgid "Unable to list files at %s" msgstr "" #: src/hed/libs/compute/Job.cpp:824 msgid "Now copying (from -> to)" msgstr "" #: src/hed/libs/compute/Job.cpp:825 #, c-format msgid " %s -> %s" msgstr "" #: src/hed/libs/compute/Job.cpp:841 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:852 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:871 #, c-format msgid "File download failed: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:910 src/hed/libs/compute/Job.cpp:939 #: src/hed/libs/compute/Job.cpp:971 src/hed/libs/compute/Job.cpp:1004 #, c-format msgid "Waiting for lock on file %s" msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:101 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:110 #, c-format msgid "JobControllerPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:115 #, c-format msgid "Loaded JobControllerPlugin %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:22 #, c-format msgid ": %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:24 #, c-format msgid ": %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:138 msgid " --- DRY RUN --- " msgstr "" #: src/hed/libs/compute/JobDescription.cpp:148 #, c-format msgid " Annotation: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:154 #, c-format msgid " Old activity ID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:171 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:174 #, c-format msgid " RemoteLogging: %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:182 #, c-format msgid " Environment.name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:183 #, c-format msgid " Environment: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:196 #, c-format msgid " PreExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:199 #: src/hed/libs/compute/JobDescription.cpp:217 #, c-format msgid " Exit code for successful execution: %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:202 #: src/hed/libs/compute/JobDescription.cpp:220 msgid " No exit code for successful execution specified." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:214 #, c-format msgid " PostExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:230 #, c-format msgid " Access control: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:234 #, c-format msgid " Processing start time: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:237 msgid " Notify:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:251 #, c-format msgid " Credential service: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:261 msgid " Operating system requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:279 msgid " Computing endpoint requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:292 msgid " Node access: inbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:295 msgid " Node access: outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound and outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:308 msgid " Job requires exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:311 msgid " Job does not require exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:316 msgid " Run time environment requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:328 msgid " Inputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:329 #: src/hed/libs/compute/JobDescription.cpp:351 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:331 msgid " Is executable: true" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:335 #, c-format msgid " Sources: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:337 #, c-format msgid " Sources.DelegationID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:341 #, c-format msgid " Sources.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:350 msgid " Outputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:354 #, c-format msgid " Targets: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:356 #, c-format msgid " Targets.DelegationID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:367 #, c-format msgid " DelegationID element: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:374 #, c-format msgid " Other attributes: [%s], %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:440 msgid "Empty job description source string" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:473 msgid "No job description parsers available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:475 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:483 #, c-format msgid "%s parsing error" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:499 msgid "No job description parser was able to interpret job description" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:509 msgid "" "Job description language is not specified, unable to output description." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:521 #, c-format msgid "Generating %s job description output" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:537 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:550 #, c-format msgid "Two input files have identical name '%s'." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:569 #: src/hed/libs/compute/JobDescription.cpp:582 #, c-format msgid "Cannot stat local input file '%s'" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:602 #, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:644 msgid "Unable to select runtime environment" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:651 msgid "Unable to select middleware" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:658 msgid "Unable to select operating system." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:677 #, c-format msgid "No test-job with ID %d found." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:689 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:693 #, c-format msgid "No jobdescription resulted at %d test" msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:172 msgid "Unable to create temporary directory" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:180 #, c-format msgid "Unable to create data base environment (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:190 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:194 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:198 #, c-format msgid "Unable to set duplicate flags for secondary key DB (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:204 #, c-format msgid "Unable to create job database (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:208 #, c-format msgid "Unable to create DB for secondary name keys (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:212 #, c-format msgid "Unable to create DB for secondary endpoint keys (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:216 #, c-format msgid "Unable to create DB for secondary service info keys (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:221 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:225 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:229 #, c-format msgid "Unable to associate secondary DB with primary DB (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:232 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:92 #, c-format msgid "Job database created successfully (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:274 #, c-format msgid "Error from BDB: %s: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:277 #, c-format msgid "Error from BDB: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:297 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:137 #: src/hed/libs/compute/JobInformationStorageXML.cpp:27 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:301 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:141 #: src/hed/libs/compute/JobInformationStorageXML.cpp:31 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:308 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #: src/hed/libs/compute/JobInformationStorageXML.cpp:38 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:346 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:405 #, c-format msgid "Unable to write key/value pair to job database (%s): Key \"%s\"" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:572 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:435 #: src/hed/libs/compute/JobInformationStorageXML.cpp:137 #, c-format msgid "Unable to truncate job database (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:603 msgid "" "ENOENT: The file or directory does not exist, Or a nonexistent re_source " "file was specified." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:606 msgid "" "DB_OLD_VERSION: The database cannot be opened without being first upgraded." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:609 msgid "EEXIST: DB_CREATE and DB_EXCL were specified and the database exists." msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:611 msgid "EINVAL" msgstr "" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:614 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:468 #, c-format msgid "Unable to determine error (%d)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:57 #, c-format msgid "Unable to create data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:72 #, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:80 #, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:88 #, c-format msgid "Failed checking database (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:113 #, c-format msgid "Error from SQLite: %s: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:116 #, c-format msgid "Error from SQLite: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:246 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:253 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:260 #, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "" #: src/hed/libs/compute/JobInformationStorageXML.cpp:51 #: src/hed/libs/compute/JobInformationStorageXML.cpp:223 #: src/hed/libs/compute/JobInformationStorageXML.cpp:264 #, c-format msgid "Waiting for lock on job list file %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageXML.cpp:162 #, c-format msgid "Will remove %s on service %s." msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:36 msgid "Ignoring job, the job ID is empty" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:41 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:46 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:51 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:56 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:65 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:72 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:385 #, c-format msgid "Job resubmission failed: Unable to load broker (%s)" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:400 msgid "Job resubmission aborted because no resource returned any information" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:421 #, c-format msgid "Unable to resubmit job (%s), unable to parse obtained job description" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:443 #, c-format msgid "" "Unable to resubmit job (%s), target information retrieval failed for target: " "%s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:469 #, c-format msgid "Unable to resubmit job (%s), no targets applicable for submission" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:504 #, c-format msgid "" "Unable to migrate job (%s), job description could not be retrieved remotely" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:524 msgid "Job migration aborted, no resource returned any information" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:536 #, c-format msgid "Job migration aborted, unable to load broker (%s)" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:552 #, c-format msgid "Unable to migrate job (%s), unable to parse obtained job description" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:573 #, c-format msgid "Unable to load submission plugin for %s interface" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:583 #, c-format msgid "Job migration failed for job (%s), no applicable targets" msgstr "" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "" #: src/hed/libs/compute/Software.cpp:206 src/hed/libs/compute/Software.cpp:217 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:212 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:221 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "" #: src/hed/libs/compute/Software.cpp:226 msgid "All requirements satisfied." msgstr "" #: src/hed/libs/compute/Submitter.cpp:83 #, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "" #: src/hed/libs/compute/Submitter.cpp:106 msgid "Trying all available interfaces" msgstr "" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "" #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:54 msgid "No stagein URL is provided" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:73 #, c-format msgid "Failed uploading file %s to %s: %s" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:103 #, c-format msgid "Trying to migrate to %s: Migration to a %s interface is not supported." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:159 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:169 #, c-format msgid "SubmitterPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:174 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 msgid "Invalid job description" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 msgid "Failed to submit job" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, c-format msgid "Failed to write to local job list %s" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in JDL, POSIX JSDL, JSDL, " "or XRSL format." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "" "define the requested format (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, emies:" "adl)" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:33 msgid "show the original job description" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:43 msgid "Use --help option for detailed usage information" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:50 msgid " [ JobDescription tester ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:80 msgid "Unable to parse." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ egee:jdl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ emies:adl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:93 msgid " [ nordugrid:jsdl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:95 msgid " [ nordugrid:xrsl ] " msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:138 msgid "VOMS command is empty" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:424 #: src/hed/libs/credential/ARCProxyUtil.cpp:1431 msgid "Failed to sign proxy" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1317 #, c-format msgid "Please choose the NSS database you would use (1-%d): " msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1353 #: src/hed/libs/credential/ARCProxyUtil.cpp:1460 msgid "Failed to generate X509 request with NSS" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1364 #: src/hed/libs/credential/ARCProxyUtil.cpp:1471 #: src/hed/libs/credential/ARCProxyUtil.cpp:1512 msgid "Failed to create X509 certificate with NSS" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1376 #: src/hed/libs/credential/ARCProxyUtil.cpp:1483 #: src/hed/libs/credential/ARCProxyUtil.cpp:1536 msgid "Failed to export X509 certificate from NSS DB" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1519 msgid "Failed to import X509 certificate into NSS DB" msgstr "" #: src/hed/libs/credential/ARCProxyUtil.cpp:1528 msgid "Failed to initialize the credential configuration" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:166 #, c-format msgid "Error number in store context: %i" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:167 msgid "Self-signed certificate" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:170 #, c-format msgid "The certificate with subject %s is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:173 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:176 #, c-format msgid "Certificate with subject %s has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:179 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:181 #, c-format msgid "Certificate verification error: %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:193 msgid "Can not get the certificate type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:233 msgid "Couldn't verify availability of CRL" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:246 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:253 msgid "The available CRL is not yet valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:262 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:268 msgid "The available CRL has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:291 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:309 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:318 msgid "Can't allocate memory for CA policy path" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:364 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:378 #: src/hed/libs/credential/Credential.cpp:1693 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Trying to check X509 cert with check_cert_type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:465 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal form" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:469 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:473 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:505 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" #: src/hed/libs/credential/Credential.cpp:73 #, c-format msgid "OpenSSL error string: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:196 msgid "Can't get the first byte of input to determine its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:210 msgid "Can't reset the input" msgstr "" #: src/hed/libs/credential/Credential.cpp:236 #: src/hed/libs/credential/Credential.cpp:273 msgid "Can't get the first byte of input BIO to get its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:248 msgid "Can not read certificate/key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:456 #, c-format msgid "Can not find certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:461 #, c-format msgid "Can not read certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:499 msgid "Can not read certificate string" msgstr "" #: src/hed/libs/credential/Credential.cpp:519 msgid "Certificate format is PEM" msgstr "" #: src/hed/libs/credential/Credential.cpp:546 msgid "Certificate format is DER" msgstr "" #: src/hed/libs/credential/Credential.cpp:575 msgid "Certificate format is PKCS" msgstr "" #: src/hed/libs/credential/Credential.cpp:602 msgid "Certificate format is unknown" msgstr "" #: src/hed/libs/credential/Credential.cpp:610 #, c-format msgid "Can not find key file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:615 #, c-format msgid "Can not open key file %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:634 msgid "Can not read key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:697 #: src/hed/libs/credential/VOMSUtil.cpp:258 msgid "Failed to lock arccredential library in memory" msgstr "" #: src/hed/libs/credential/Credential.cpp:712 msgid "Certificate verification succeeded" msgstr "" #: src/hed/libs/credential/Credential.cpp:716 msgid "Certificate verification failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:731 #: src/hed/libs/credential/Credential.cpp:751 #: src/hed/libs/credential/Credential.cpp:771 #: src/hed/libs/credential/Credential.cpp:1003 #: src/hed/libs/credential/Credential.cpp:2314 #: src/hed/libs/credential/Credential.cpp:2345 msgid "Failed to initialize extensions member for Credential" msgstr "" #: src/hed/libs/credential/Credential.cpp:814 #, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:826 #, c-format msgid "Unsupported proxy version is requested - %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:837 msgid "If you specify a policy you also need to specify a policy language" msgstr "" #: src/hed/libs/credential/Credential.cpp:1008 msgid "Certificate/Proxy path is empty" msgstr "" #: src/hed/libs/credential/Credential.cpp:1067 #: src/hed/libs/credential/Credential.cpp:2856 msgid "Failed to duplicate extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1071 msgid "Failed to add extension into credential extensions" msgstr "" #: src/hed/libs/credential/Credential.cpp:1082 msgid "Certificate information collection failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1124 #: src/hed/libs/credential/Credential.cpp:1129 msgid "Can not convert string into ASN1_OBJECT" msgstr "" #: src/hed/libs/credential/Credential.cpp:1141 msgid "Can not create extension for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:1180 #: src/hed/libs/credential/Credential.cpp:1348 msgid "BN_set_word failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1189 #: src/hed/libs/credential/Credential.cpp:1357 msgid "RSA_generate_key_ex failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1198 #: src/hed/libs/credential/Credential.cpp:1365 msgid "BN_new || RSA_new failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1209 msgid "Created RSA key, proceeding with request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1214 msgid "pkey and rsa_key exist!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1217 msgid "Generate new X509 request!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1222 msgid "Setting subject name!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1230 #: src/hed/libs/credential/Credential.cpp:1444 msgid "PEM_write_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1301 #: src/hed/libs/credential/Credential.cpp:1476 #: src/hed/libs/credential/Credential.cpp:1496 msgid "Can not create BIO for request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1278 msgid "Failed to write request into string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1305 #: src/hed/libs/credential/Credential.cpp:1310 #: src/hed/libs/credential/Credential.cpp:1500 msgid "Can not set writable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1316 #: src/hed/libs/credential/Credential.cpp:1505 msgid "Wrote request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1318 #: src/hed/libs/credential/Credential.cpp:1508 msgid "Failed to write request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1338 msgid "The credential's private key has already been initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1386 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1396 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1414 #: src/hed/libs/credential/Credential.cpp:1421 #: src/hed/libs/credential/Credential.cpp:1943 #: src/hed/libs/credential/Credential.cpp:1951 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1451 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1461 msgid "Can not generate X509 request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1463 msgid "Can not set private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1561 msgid "Failed to get private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1580 msgid "Failed to get public key from RSA object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1588 msgid "Failed to get public key from X509 object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1595 msgid "Failed to get public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1634 #, c-format msgid "Certiticate chain number %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1662 msgid "NULL BIO passed to InquireRequest" msgstr "" #: src/hed/libs/credential/Credential.cpp:1665 msgid "PEM_read_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1669 msgid "d2i_X509_REQ_bio failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1702 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1706 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1722 #, c-format msgid "Cert Type: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1735 #: src/hed/libs/credential/Credential.cpp:1754 msgid "Can not create BIO for parsing request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1740 msgid "Read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1743 msgid "Failed to read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1758 msgid "Can not set readable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1763 msgid "Read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1766 msgid "Failed to read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Can not convert private key to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1924 msgid "Credential is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1930 msgid "Failed to duplicate X509 structure" msgstr "" #: src/hed/libs/credential/Credential.cpp:1935 msgid "Failed to initialize X509 structure" msgstr "" #: src/hed/libs/credential/Credential.cpp:1958 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1962 #: src/hed/libs/credential/Credential.cpp:2010 msgid "Can not add X509 extension to proxy cert" msgstr "" #: src/hed/libs/credential/Credential.cpp:1978 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1990 #: src/hed/libs/credential/Credential.cpp:1999 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2006 msgid "Can not create extension for keyUsage" msgstr "" #: src/hed/libs/credential/Credential.cpp:2019 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2024 msgid "Can not copy extended KeyUsage extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2029 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2039 msgid "Can not compute digest of public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2050 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2056 msgid "Can not create name entry CN for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2061 msgid "Can not set CN in proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2069 msgid "Can not set issuer's subject for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2074 msgid "Can not set version number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2082 msgid "Can not set serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2088 msgid "Can not duplicate serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2094 msgid "Can not set the lifetime for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2098 msgid "Can not set pubkey for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2114 #: src/hed/libs/credential/Credential.cpp:2744 msgid "The credential to be signed is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2118 #: src/hed/libs/credential/Credential.cpp:2748 msgid "The credential to be signed contains no request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2122 #: src/hed/libs/credential/Credential.cpp:2752 msgid "The BIO for output is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2136 #: src/hed/libs/credential/Credential.cpp:2759 msgid "Error when extracting public key from request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2141 #: src/hed/libs/credential/Credential.cpp:2763 msgid "Failed to verify the request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2145 msgid "Failed to add issuer's extension into proxy" msgstr "" #: src/hed/libs/credential/Credential.cpp:2169 msgid "Failed to find extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2181 msgid "Can not get the issuer's private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2188 #: src/hed/libs/credential/Credential.cpp:2796 msgid "There is no digest in issuer's private key object" msgstr "" #: src/hed/libs/credential/Credential.cpp:2193 #: src/hed/libs/credential/Credential.cpp:2800 #, c-format msgid "%s is an unsupported digest type" msgstr "" #: src/hed/libs/credential/Credential.cpp:2204 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" #: src/hed/libs/credential/Credential.cpp:2210 msgid "Failed to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2212 msgid "Succeeded to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2217 msgid "Failed to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2219 msgid "Succeeded to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2224 #: src/hed/libs/credential/Credential.cpp:2233 msgid "Output the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2227 msgid "Can not convert signed proxy cert into PEM format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2236 msgid "Can not convert signed proxy cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2252 #: src/hed/libs/credential/Credential.cpp:2275 msgid "Can not create BIO for signed proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2279 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2284 msgid "Wrote signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2287 msgid "Failed to write signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2323 #: src/hed/libs/credential/Credential.cpp:2363 #, c-format msgid "ERROR:%s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2371 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2416 #, c-format msgid "unable to load number from: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2421 msgid "error converting number from bin to BIGNUM" msgstr "" #: src/hed/libs/credential/Credential.cpp:2448 msgid "file name too long" msgstr "" #: src/hed/libs/credential/Credential.cpp:2471 msgid "error converting serial to ASN.1 format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2504 #, c-format msgid "load serial from %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2509 msgid "add_word failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2514 #, c-format msgid "save serial to %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2534 msgid "Error initialising X509 store" msgstr "" #: src/hed/libs/credential/Credential.cpp:2541 msgid "Out of memory when generate random serial" msgstr "" #: src/hed/libs/credential/Credential.cpp:2553 msgid "CA certificate and CA private key do not match" msgstr "" #: src/hed/libs/credential/Credential.cpp:2577 #, c-format msgid "Failed to load extension section: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2614 msgid "malloc error" msgstr "" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Subject does not start with '/'" msgstr "" #: src/hed/libs/credential/Credential.cpp:2634 #: src/hed/libs/credential/Credential.cpp:2655 msgid "escape character at end of string" msgstr "" #: src/hed/libs/credential/Credential.cpp:2646 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2683 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2687 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2729 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" #: src/hed/libs/credential/Credential.cpp:2739 msgid "The private key for signing is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2819 #, c-format msgid "Error when loading the extension config file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2823 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2872 msgid "Can not sign a EEC" msgstr "" #: src/hed/libs/credential/Credential.cpp:2876 msgid "Output EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2879 msgid "Can not convert signed EEC cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2893 #: src/hed/libs/credential/Credential.cpp:2912 msgid "Can not create BIO for signed EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2916 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2921 msgid "Wrote signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2924 msgid "Failed to write signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:147 msgid "Error writing raw certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:224 msgid "Failed to add RFC proxy OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:227 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:233 msgid "Failed to add anyLanguage OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:236 #: src/hed/libs/credential/NSSUtil.cpp:254 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:242 msgid "Failed to add inheritAll OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:245 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:251 msgid "Failed to add Independent OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:260 msgid "Failed to add VOMS AC sequence OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:263 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:292 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:303 msgid "Succeeded to initialize NSS" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:325 #, c-format msgid "Failed to read attribute %x from private key." msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:377 msgid "Succeeded to get credential" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:378 msgid "Failed to get credential" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:440 msgid "p12 file is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:450 msgid "Unable to write to p12 file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:466 msgid "Failed to open pk12 file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:501 msgid "Failed to allocate p12 context" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1211 msgid "Failed to find issuer certificate for proxy certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1368 #, c-format msgid "Failed to find certificates by nickname: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1373 #, c-format msgid "No user certificate by nickname %s found" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1386 #: src/hed/libs/credential/NSSUtil.cpp:1422 msgid "Certificate does not have a slot" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1392 msgid "Failed to create export context" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1407 msgid "PKCS12 output password not provided" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1414 msgid "PKCS12 add password integrity failed" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1435 msgid "Failed to create key or certificate safe" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1451 msgid "Failed to add certificate and key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1460 #, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1465 msgid "Failed to encode PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1468 msgid "Succeeded to export PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1496 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1502 msgid "Failed to delete certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1516 msgid "The name of the private key to delete is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1521 #: src/hed/libs/credential/NSSUtil.cpp:1605 #, c-format msgid "Failed to authenticate to token %s." msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1528 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1561 msgid "Failed to delete private key and certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1571 msgid "Failed to delete private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1582 #, c-format msgid "Can not find key with name: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1616 #, c-format msgid "Failed to delete private key that attaches to certificate: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1637 msgid "Can not read PEM private key: probably bad password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1639 msgid "Can not read PEM private key: failed to decrypt" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1641 #: src/hed/libs/credential/NSSUtil.cpp:1643 msgid "Can not read PEM private key: failed to obtain password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1644 msgid "Can not read PEM private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1688 msgid "Failed to load private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1689 msgid "Succeeded to load PrivateKeyInfo" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1692 msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1693 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1730 msgid "Failed to import private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1733 msgid "Succeeded to import private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1746 #: src/hed/libs/credential/NSSUtil.cpp:1788 #: src/hed/libs/credential/NSSUtil.cpp:2920 msgid "Failed to authenticate to key database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1755 msgid "Succeeded to generate public/private key pair" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1757 msgid "Failed to generate public/private key pair" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1762 msgid "Failed to export private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1829 msgid "Failed to create subject name" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1845 msgid "Failed to create certificate request" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1858 msgid "Failed to call PORT_NewArena" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1866 msgid "Failed to encode the certificate request with DER format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1873 msgid "Unknown key or hash type" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1879 msgid "Failed to sign the certificate request" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1895 msgid "Failed to output the certificate request as ASCII format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1904 msgid "Failed to output the certificate request as DER format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1913 #, c-format msgid "Succeeded to output the certificate request into %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1952 #: src/hed/libs/credential/NSSUtil.cpp:1989 msgid "Failed to read data from input file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1968 msgid "Input is without trailer\n" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1979 msgid "Failed to convert ASCII to DER" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2030 msgid "Certificate request is invalid" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2252 #, c-format msgid "The policy language: %s is not supported" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2260 #: src/hed/libs/credential/NSSUtil.cpp:2285 #: src/hed/libs/credential/NSSUtil.cpp:2308 #: src/hed/libs/credential/NSSUtil.cpp:2330 msgid "Failed to new arena" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2269 #: src/hed/libs/credential/NSSUtil.cpp:2294 msgid "Failed to create path length" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2272 #: src/hed/libs/credential/NSSUtil.cpp:2297 #: src/hed/libs/credential/NSSUtil.cpp:2317 #: src/hed/libs/credential/NSSUtil.cpp:2339 msgid "Failed to create policy language" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2738 #, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2745 #, c-format msgid "Can not find certificate with name %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2776 #, c-format msgid "Proxy subject: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2795 msgid "Failed to start certificate extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2800 msgid "Failed to add key usage extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2805 msgid "Failed to add proxy certificate information extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2809 msgid "Failed to add voms AC extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2829 msgid "Failed to retrieve private key for issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2836 msgid "Unknown key or hash type of issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2842 msgid "Failed to set signature algorithm ID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2854 msgid "Failed to encode certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2860 msgid "Failed to allocate item for certificate data" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2866 msgid "Failed to sign encoded certificate data" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2875 #, c-format msgid "Failed to open file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2886 #, c-format msgid "Succeeded to output certificate to %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2927 #, c-format msgid "Failed to open input certificate file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2945 msgid "Failed to read input certificate file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2950 msgid "Failed to get certificate from certificate file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2957 msgid "Failed to allocate certificate trust" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2962 msgid "Failed to decode trust string" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2971 #: src/hed/libs/credential/NSSUtil.cpp:2988 #, c-format msgid "Failed to authenticate to token %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2976 #: src/hed/libs/credential/NSSUtil.cpp:2993 msgid "Failed to add certificate to token or database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2979 #: src/hed/libs/credential/NSSUtil.cpp:2982 msgid "Succeeded to import certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2996 #: src/hed/libs/credential/NSSUtil.cpp:2999 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:3026 #, c-format msgid "Failed to import private key from file: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:3028 #, c-format msgid "Failed to import certificate from file: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:142 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:158 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:176 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:188 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:185 #, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:193 #, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:346 #, c-format msgid "VOMS: create FQAN: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:384 #, c-format msgid "VOMS: create attribute: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:670 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:678 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:704 msgid "VOMS: Can not parse AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:734 msgid "VOMS: CA directory or CA file must be provided" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:798 msgid "VOMS: failed to verify AC signature" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:867 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:875 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:881 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:916 #, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:922 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:970 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1003 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1023 msgid "VOMS: AC signature verification failed" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1032 msgid "VOMS: unable to verify certificate chain" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1038 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1061 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1087 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1109 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1116 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1126 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1142 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1151 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1228 msgid "VOMS: the grantor attribute is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1246 msgid "VOMS: the attribute name is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1252 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1257 msgid "VOMS: the attribute qualifier is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1289 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1323 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1328 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1343 msgid "VOMS: failed to parse attributes from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1387 msgid "VOMS: authorityKey is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1415 msgid "VOMS: missing AC parts" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1432 msgid "VOMS: unsupported time format format in AC - expecting GENERALIZED TIME" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1438 msgid "VOMS: AC is not yet valid" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1445 msgid "VOMS: AC has expired" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1460 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1465 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1466 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1469 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "VOMS: the holder information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1500 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1501 #, c-format msgid "VOMS: DN of holder: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1502 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1509 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1521 #: src/hed/libs/credential/VOMSUtil.cpp:1528 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1541 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1551 msgid "VOMS: the issuer information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1559 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1597 #: src/hed/libs/credential/VOMSUtil.cpp:1605 #: src/hed/libs/credential/VOMSUtil.cpp:1613 #: src/hed/libs/credential/VOMSUtil.cpp:1621 #: src/hed/libs/credential/VOMSUtil.cpp:1644 msgid "VOMS: unable to extract VO name from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1635 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1654 msgid "VOMS: can not verify the signature of the AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1660 msgid "VOMS: problems while parsing information in AC" msgstr "" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:128 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, c-format msgid "MyProxy failure: %s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:68 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:81 msgid "SSL locks not initialized" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:85 #, c-format msgid "wrong SSL lock requested: %i of %i: %i - %s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:112 msgid "Failed to lock arccrypto library in memory" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:117 src/hed/libs/crypto/OpenSSL.cpp:128 msgid "Failed to initialize OpenSSL library" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:150 msgid "Number of OpenSSL locks changed - reinitializing" msgstr "" #: src/hed/libs/data/DataMover.cpp:111 msgid "No locations found - probably no more physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:117 src/hed/libs/data/FileCache.cpp:673 #: src/libs/data-staging/Processor.cpp:458 #: src/libs/data-staging/Processor.cpp:472 #, c-format msgid "Removing %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:130 msgid "This instance was already deleted" msgstr "" #: src/hed/libs/data/DataMover.cpp:136 msgid "Failed to delete physical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:147 #, c-format msgid "Removing metadata in %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete meta-information" msgstr "" #: src/hed/libs/data/DataMover.cpp:165 msgid "Failed to remove all physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:169 #, c-format msgid "Removing logical file from metadata %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:172 msgid "Failed to delete logical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:179 msgid "Failed to remove instance" msgstr "" #: src/hed/libs/data/DataMover.cpp:228 msgid "DataMover::Transfer : starting new thread" msgstr "" #: src/hed/libs/data/DataMover.cpp:256 #, c-format msgid "Transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:258 msgid "Not valid source" msgstr "" #: src/hed/libs/data/DataMover.cpp:263 msgid "Not valid destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:283 #: src/services/cache_service/CacheService.cpp:294 #, c-format msgid "Couldn't handle certificate: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:293 src/hed/libs/data/DataMover.cpp:591 #: src/libs/data-staging/Processor.cpp:137 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "" #: src/hed/libs/data/DataMover.cpp:297 src/hed/libs/data/DataMover.cpp:610 #: src/hed/libs/data/DataMover.cpp:672 src/libs/data-staging/Processor.cpp:156 msgid "Permission checking passed" msgstr "" #: src/hed/libs/data/DataMover.cpp:298 src/hed/libs/data/DataMover.cpp:630 #: src/hed/libs/data/DataMover.cpp:1136 msgid "Linking/copying cached file" msgstr "" #: src/hed/libs/data/DataMover.cpp:323 #, c-format msgid "No locations for source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:327 #, c-format msgid "Failed to resolve source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:339 src/hed/libs/data/DataMover.cpp:407 #, c-format msgid "No locations for destination found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:344 src/hed/libs/data/DataMover.cpp:411 #, c-format msgid "Failed to resolve destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:359 #, c-format msgid "No locations for destination different from source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:380 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:391 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "" #: src/hed/libs/data/DataMover.cpp:394 #, c-format msgid "Failed to delete %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:421 #, c-format msgid "Deleted but still have locations at %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:433 msgid "DataMover: cycle" msgstr "" #: src/hed/libs/data/DataMover.cpp:435 msgid "DataMover: no retries requested - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:440 msgid "DataMover: source out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:442 msgid "DataMover: destination out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:450 #, c-format msgid "Real transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:477 #, c-format msgid "Creating buffer: %lli x %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:493 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:498 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "" #: src/hed/libs/data/DataMover.cpp:522 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:527 msgid "Buffer creation failed !" msgstr "" #: src/hed/libs/data/DataMover.cpp:550 #, c-format msgid "URL is mapped to: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:580 src/hed/libs/data/DataMover.cpp:639 #: src/libs/data-staging/Processor.cpp:91 msgid "Cached file is locked - should retry" msgstr "" #: src/hed/libs/data/DataMover.cpp:585 src/libs/data-staging/Processor.cpp:110 msgid "Failed to initiate cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:602 #: src/services/cache_service/CacheService.cpp:366 #, c-format msgid "Permission checking failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:604 src/hed/libs/data/DataMover.cpp:664 #: src/hed/libs/data/DataMover.cpp:686 src/hed/libs/data/DataMover.cpp:697 msgid "source.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:618 src/libs/data-staging/Processor.cpp:161 #, c-format msgid "Source modification date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:619 src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Cache creation date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:625 src/libs/data-staging/Processor.cpp:167 msgid "Cached file is outdated, will re-download" msgstr "" #: src/hed/libs/data/DataMover.cpp:629 src/libs/data-staging/Processor.cpp:173 msgid "Cached copy is still valid" msgstr "" #: src/hed/libs/data/DataMover.cpp:657 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" #: src/hed/libs/data/DataMover.cpp:661 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:674 msgid "Linking local file" msgstr "" #: src/hed/libs/data/DataMover.cpp:694 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:703 #, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:715 #, c-format msgid "cache file: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:741 #, c-format msgid "Failed to stat source %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:743 src/hed/libs/data/DataMover.cpp:758 #: src/hed/libs/data/DataMover.cpp:795 src/hed/libs/data/DataMover.cpp:814 #: src/hed/libs/data/DataMover.cpp:982 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1024 src/hed/libs/data/DataMover.cpp:1101 msgid "(Re)Trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:756 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:770 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" #: src/hed/libs/data/DataMover.cpp:774 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:789 #, c-format msgid "Failed to prepare source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:805 #, c-format msgid "Failed to start reading from source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:826 msgid "Metadata of source and destination are different" msgstr "" #: src/hed/libs/data/DataMover.cpp:847 #, c-format msgid "Failed to preregister destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:852 src/hed/libs/data/DataMover.cpp:1125 msgid "destination.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:866 #, c-format msgid "Failed to prepare destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:873 src/hed/libs/data/DataMover.cpp:897 #: src/hed/libs/data/DataMover.cpp:1122 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:877 src/hed/libs/data/DataMover.cpp:900 #: src/hed/libs/data/DataMover.cpp:991 src/hed/libs/data/DataMover.cpp:1007 #: src/hed/libs/data/DataMover.cpp:1030 src/hed/libs/data/DataMover.cpp:1077 msgid "(Re)Trying next destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:889 #, c-format msgid "Failed to start writing to destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:913 msgid "Failed to start writing to cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:921 src/hed/libs/data/DataMover.cpp:969 #: src/hed/libs/data/DataMover.cpp:1148 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:929 msgid "Waiting for buffer" msgstr "" #: src/hed/libs/data/DataMover.cpp:936 #, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:941 #, c-format msgid "buffer: read EOF : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:942 #, c-format msgid "buffer: write EOF: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:943 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:944 msgid "Closing read channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:950 msgid "Closing write channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:958 msgid "Failed to complete writing to destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:974 msgid "Transfer cancelled successfully" msgstr "" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Cause of failure unclear - choosing randomly" msgstr "" #: src/hed/libs/data/DataMover.cpp:1062 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" #: src/hed/libs/data/DataMover.cpp:1070 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:1074 msgid "Failed to delete destination, retry may fail" msgstr "" #: src/hed/libs/data/DataMover.cpp:1084 msgid "Cannot compare empty checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1091 #: src/libs/data-staging/DataStagingDelivery.cpp:456 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" #: src/hed/libs/data/DataMover.cpp:1093 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:1106 #: src/libs/data-staging/DataStagingDelivery.cpp:472 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1112 #: src/libs/data-staging/DataStagingDelivery.cpp:475 msgid "Checksum not computed" msgstr "" #: src/hed/libs/data/DataMover.cpp:1118 #, c-format msgid "Failed to postregister destination %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:83 #, c-format msgid "Invalid URL option: %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:254 #, c-format msgid "Skipping invalid URL option %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:269 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:91 #, c-format msgid "Can't handle location %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:229 #, c-format msgid "Replica %s matches host pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "" #: src/hed/libs/data/DataStatus.cpp:13 msgid "Source is invalid URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:14 msgid "Destination is invalid URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:15 msgid "Resolving of index service for source failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:16 msgid "Resolving of index service for destination failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:21 msgid "Failed while transferring data" msgstr "" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:26 msgid "Unregistering from index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "" #: src/hed/libs/data/DataStatus.cpp:29 msgid "Delete error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "" #: src/hed/libs/data/DataStatus.cpp:34 msgid "Already reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:35 msgid "Already writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:36 msgid "Read access check failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:37 msgid "Directory listing failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "" #: src/hed/libs/data/DataStatus.cpp:39 msgid "Failed to obtain information about file" msgstr "" #: src/hed/libs/data/DataStatus.cpp:40 msgid "No such file or directory" msgstr "" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "" #: src/hed/libs/data/DataStatus.cpp:45 msgid "Failed to prepare source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:47 msgid "Failed to prepare destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:49 msgid "Failed to finalize reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:50 msgid "Failed to finalize writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:51 msgid "Failed to create directory" msgstr "" #: src/hed/libs/data/DataStatus.cpp:52 msgid "Failed to rename URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "" #: src/hed/libs/data/DataStatus.cpp:54 msgid "Operation cancelled successfully" msgstr "" #: src/hed/libs/data/DataStatus.cpp:55 msgid "Generic error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:61 msgid "Transfer timed out" msgstr "" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:65 msgid "Temporary service error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:66 msgid "Permanent service error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:67 msgid "Error switching uid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:68 msgid "Request timed out" msgstr "" #: src/hed/libs/data/FileCache.cpp:101 msgid "No cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:118 msgid "No usable caches" msgstr "" #: src/hed/libs/data/FileCache.cpp:127 msgid "No remote cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:149 msgid "No draining cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:177 #, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:187 #, c-format msgid "Failed to create any cache directories for %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:194 #, c-format msgid "Failed to change permissions on %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:206 #, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:209 #, c-format msgid "Failed to release lock on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:248 #, c-format msgid "Found file %s in remote cache at %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:265 #, c-format msgid "Failed to delete stale remote cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:269 #, c-format msgid "Failed to release lock on remote cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:287 src/hed/libs/data/FileCache.cpp:339 #, c-format msgid "Failed to obtain lock on cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:294 src/hed/libs/data/FileCache.cpp:348 #: src/hed/libs/data/FileCache.cpp:408 #, c-format msgid "Error removing cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:296 src/hed/libs/data/FileCache.cpp:314 #: src/hed/libs/data/FileCache.cpp:318 src/hed/libs/data/FileCache.cpp:350 #: src/hed/libs/data/FileCache.cpp:361 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:301 #, c-format msgid "Replicating file %s to local cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:304 src/hed/libs/data/FileCache.cpp:611 #, c-format msgid "Failed to copy file %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:309 #, c-format msgid "" "Replicating file %s from remote cache failed due to source being deleted or " "modified" msgstr "" #: src/hed/libs/data/FileCache.cpp:311 #, c-format msgid "Failed to delete bad copy of remote cache file %s at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:333 #, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:380 src/hed/libs/data/FileCache.cpp:414 #, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:397 #, c-format msgid "Invalid lock on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:403 #, c-format msgid "Failed to remove .meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:468 #, c-format msgid "Cache not found for file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:478 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" #: src/hed/libs/data/FileCache.cpp:484 src/hed/libs/data/FileCache.cpp:819 #, c-format msgid "Cache file %s does not exist" msgstr "" #: src/hed/libs/data/FileCache.cpp:503 #, c-format msgid "Cache file for %s not found in any local or remote cache" msgstr "" #: src/hed/libs/data/FileCache.cpp:507 #, c-format msgid "Using remote cache file %s for url %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:510 src/hed/libs/data/FileCache.cpp:821 #, c-format msgid "Error accessing cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:516 #, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "" #: src/hed/libs/data/FileCache.cpp:521 #, c-format msgid "Cannot change permission of %s: %s " msgstr "" #: src/hed/libs/data/FileCache.cpp:525 #, c-format msgid "Cannot change owner of %s: %s " msgstr "" #: src/hed/libs/data/FileCache.cpp:539 #, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:543 src/hed/libs/data/FileCache.cpp:554 #, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:549 #, c-format msgid "Cache file %s not found" msgstr "" #: src/hed/libs/data/FileCache.cpp:564 #, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:572 #, c-format msgid "Failed to release lock on cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:583 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:588 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:593 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:617 #, c-format msgid "Failed to set executable bit on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:622 #, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:636 #, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:640 src/hed/libs/data/FileCache.cpp:645 #, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:675 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:694 src/hed/libs/data/FileCache.cpp:771 #, c-format msgid "Error reading meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:699 src/hed/libs/data/FileCache.cpp:776 #, c-format msgid "Error opening meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:704 src/hed/libs/data/FileCache.cpp:780 #, c-format msgid "meta file %s is empty" msgstr "" #: src/hed/libs/data/FileCache.cpp:713 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" #: src/hed/libs/data/FileCache.cpp:733 #, c-format msgid "Bad format detected in file %s, in line %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:750 #, c-format msgid "Could not acquire lock on meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:754 #, c-format msgid "Error opening meta file for writing %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:790 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:794 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:845 #, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:850 #, c-format msgid "Failed to create cache meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:865 #, c-format msgid "Failed to read cache meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:870 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:875 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:883 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" #: src/hed/libs/data/FileCache.cpp:893 #, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:955 #, c-format msgid "Using cache %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:969 #, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:975 #, c-format msgid "Cache %s: Free space %f GB" msgstr "" #: src/hed/libs/data/URLMap.cpp:33 #, c-format msgid "Can't use URL %s" msgstr "" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "" #: src/hed/libs/data/URLMap.cpp:49 #, c-format msgid "Mapping %s to %s" msgstr "" #: src/hed/libs/data/examples/simple_copy.cpp:17 msgid "Usage: copy source destination" msgstr "" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, c-format msgid "Copy failed: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, c-format msgid "Failed to read proxy file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, c-format msgid "Failed to read certificate file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, c-format msgid "Failed to read private key file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)%s:%" "s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:26 msgid "Initialize ISIS handler" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:31 #, c-format msgid "Can't recognize URL: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:48 msgid "Initialize ISIS handler succeeded" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:52 #, c-format msgid "Remove ISIS (%s) from list" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:72 #, c-format msgid "getISISList from %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:73 #, c-format msgid "Key %s, Cert: %s, CA: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:105 #, c-format msgid "ISIS (%s) is not available or not valid response. (%d. reconnection)" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:107 #, c-format msgid "Connection to the ISIS (%s) is success and get the list of ISIS." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:133 #, c-format msgid "GetISISList add this (%s) ISIS into the list." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:145 #, c-format msgid "Chosen ISIS for communication: %s" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:152 msgid "Get ISIS from list of ISIS handler" msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:156 msgid "Here is the end of the infinite calling loop." msgstr "" #: src/hed/libs/infosys/BootstrapISIS.cpp:163 msgid "There is no more ISIS available. The list of ISIS's is already empty." msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:47 #, c-format msgid "cannot create directory: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:60 #, c-format msgid "Cache configuration: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:63 msgid "Missing cache root in configuration" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:67 msgid "Missing service ID" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:70 #, c-format msgid "Cache root: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:76 #, c-format msgid "Cache directory: %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:143 #: src/hed/libs/infosys/InfoCache.cpp:162 #: src/hed/libs/infosys/InfoCache.cpp:181 #: src/hed/libs/infosys/InfoCache.cpp:206 msgid "InfoCache object is not set up" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:147 #: src/hed/libs/infosys/InfoCache.cpp:166 #, c-format msgid "Invalid path in Set(): %s" msgstr "" #: src/hed/libs/infosys/InfoCache.cpp:185 #, c-format msgid "Invalid path in Get(): %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:23 #, c-format msgid "" "InfoRegistrar thread waiting %d seconds for the all Registers elements " "creation." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:69 #, c-format msgid "" "InfoRegister created with config:\n" "%s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:79 #, c-format msgid "InfoRegister to be registered in Registrar %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:81 msgid "Discarding Registrar because the \"URL\" element is missing or empty." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:133 #, c-format msgid "InfoRegistrar id \"%s\" has been found." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:140 #, c-format msgid "InfoRegistrar id \"%s\" was not found. New registrar created" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:182 #, c-format msgid "" "Configuration error. Retry: \"%s\" is not a valid value. Default value will " "be used." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:188 #, c-format msgid "Retry: %d" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:197 #, c-format msgid "Key: %s, cert: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:226 msgid "The service won't be registered." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:231 msgid "Configuration error. Missing mandatory \"Period\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:236 msgid "Configuration error. Missing mandatory \"Endpoint\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:241 msgid "Configuration error. Missing mandatory \"Expiration\" element." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:248 #, c-format msgid "" "Service was already registered to the InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:284 #, c-format msgid "" "Service is successfully added to the InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:301 msgid "Unregistred Service can not be removed." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:348 #: src/hed/libs/infosys/InfoRegister.cpp:411 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s CAPath" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:359 #: src/hed/libs/infosys/InfoRegister.cpp:646 #, c-format msgid "Response from the ISIS: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:363 #, c-format msgid "Failed to remove registration from %s ISIS" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:366 #, c-format msgid "Successfuly removed registration from ISIS (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:372 #, c-format msgid "Failed to remove registration from ISIS (%s) - %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:379 #: src/hed/libs/infosys/InfoRegister.cpp:656 #, c-format msgid "Retry connecting to the ISIS (%s) %d time(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:385 #, c-format msgid "ISIS (%s) is not available." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:389 #: src/hed/libs/infosys/InfoRegister.cpp:439 #, c-format msgid "Service removed from InfoRegistrar connecting to infosys %s." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:420 #, c-format msgid "Failed to remove registration from %s EMIRegistry" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:423 #, c-format msgid "Successfuly removed registration from EMIRegistry (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:429 #: src/hed/libs/infosys/InfoRegister.cpp:957 #, c-format msgid "Retry connecting to the EMIRegistry (%s) %d time(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:435 #, c-format msgid "EMIRegistry (%s) is not available." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:476 #: src/hed/libs/infosys/InfoRegister.cpp:684 #, c-format msgid "Registration starts: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:477 #: src/hed/libs/infosys/InfoRegister.cpp:685 #, c-format msgid "reg_.size(): %d" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:480 #: src/hed/libs/infosys/InfoRegister.cpp:688 msgid "Registrant has no proper URL specified. Registration end." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:510 #: src/hed/libs/infosys/InfoRegister.cpp:713 msgid "Create RegEntry XML element" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:542 #: src/hed/libs/infosys/InfoRegister.cpp:745 msgid "ServiceID attribute calculated from Endpoint Reference" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:546 #: src/hed/libs/infosys/InfoRegister.cpp:749 msgid "Generation Time attribute calculated from current time" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:553 #: src/hed/libs/infosys/InfoRegister.cpp:756 #, c-format msgid "ServiceID stored: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:559 #: src/hed/libs/infosys/InfoRegister.cpp:762 #, c-format msgid "Missing service document provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:565 #: src/hed/libs/infosys/InfoRegister.cpp:768 #, c-format msgid "" "Missing MetaServiceAdvertisment or Expiration values provided by the service " "%s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:572 #: src/hed/libs/infosys/InfoRegister.cpp:775 #, c-format msgid "Missing Type value provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:580 #: src/hed/libs/infosys/InfoRegister.cpp:783 #, c-format msgid "Missing Endpoint Reference value provided by the service %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:598 #, c-format msgid "Registering to %s ISIS" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:621 #: src/hed/libs/infosys/InfoRegister.cpp:822 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s, CAFile" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:625 #, c-format msgid "Sent RegEntries: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:639 #, c-format msgid "Error during registration to %s ISIS" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:648 #, c-format msgid "Successful registration to ISIS (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:652 #, c-format msgid "Failed to register to ISIS (%s) - %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:668 #: src/hed/libs/infosys/InfoRegister.cpp:967 #, c-format msgid "Registration ends: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:669 #: src/hed/libs/infosys/InfoRegister.cpp:968 #, c-format msgid "Waiting period is %d second(s)." msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:679 #: src/hed/libs/infosys/InfoRegister.cpp:978 #, c-format msgid "Registration exit: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:801 #, c-format msgid "Registering to %s EMIRegistry" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:927 #, c-format msgid "Sent entry: %s" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:940 #, c-format msgid "Error during %s to %s EMIRegistry" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:943 #, c-format msgid "Successful %s to EMIRegistry (%s)" msgstr "" #: src/hed/libs/infosys/InfoRegister.cpp:949 #, c-format msgid "Failed to %s to EMIRegistry (%s) - %d" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:25 msgid "Module Manager Init" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:68 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:202 #, c-format msgid "Found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:209 #, c-format msgid "Could not locate module %s in following paths:" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:213 #, c-format msgid "\t%s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:227 #, c-format msgid "Loaded %s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:271 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:307 #: src/hed/libs/loader/ModuleManager.cpp:320 #, c-format msgid "%s made persistent" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:311 #, c-format msgid "Not found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:325 msgid "Specified module not found in cache" msgstr "" #: src/hed/libs/loader/Plugin.cpp:369 src/hed/libs/loader/Plugin.cpp:574 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:375 src/hed/libs/loader/Plugin.cpp:581 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:381 src/hed/libs/loader/Plugin.cpp:492 #: src/hed/libs/loader/Plugin.cpp:586 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:398 src/hed/libs/loader/Plugin.cpp:502 #: src/hed/libs/loader/Plugin.cpp:608 #, c-format msgid "Module %s failed to reload (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:462 src/hed/libs/loader/Plugin.cpp:475 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:480 #, c-format msgid "Could not find loadable module descriptor by names %s and %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:486 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "" #: src/hed/libs/message/MCC.cpp:77 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "" #: src/hed/libs/message/MCC.cpp:85 msgid "Security processing/check failed" msgstr "" #: src/hed/libs/message/MCC.cpp:89 msgid "Security processing/check passed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:16 msgid "Chain(s) configuration failed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:133 msgid "SecHandler configuration is not defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:156 msgid "SecHandler has no configuration" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:162 msgid "SecHandler has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:172 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:176 #, c-format msgid "SecHandler: %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:188 msgid "Component has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:193 msgid "Component has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:202 #, c-format msgid "Component %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:232 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:287 #, c-format msgid "Loaded MCC %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:305 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:315 #, c-format msgid "Loaded Plexer %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:323 msgid "Service has no Name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:329 msgid "Service has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:338 #, c-format msgid "Service %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:345 #, c-format msgid "Loaded Service %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:387 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:398 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:407 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:412 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:431 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:442 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:451 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:457 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "" #: src/hed/libs/message/Service.cpp:33 #, c-format msgid "Security processing/check for '%s' failed" msgstr "" #: src/hed/libs/message/Service.cpp:37 #, c-format msgid "Security processing/check for '%s' passed" msgstr "" #: src/hed/libs/message/Service.cpp:43 msgid "Empty registration collector" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:136 #, c-format msgid "Can not load ARC evaluator object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:187 #, c-format msgid "Can not load ARC request object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:228 #, c-format msgid "Can not load policy object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:276 msgid "Can not load policy object" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:324 msgid "Can not load request object" msgstr "" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:157 #, c-format msgid "HTTP Error: %d %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:222 msgid "Cannot create http payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:288 msgid "No next element in the chain" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:297 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:306 msgid "next element of the chain returned no payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:318 msgid "next element of the chain returned invalid/unsupported payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:393 msgid "Error to flush output payload" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, c-format msgid "< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:576 msgid "Failed to parse HTTP header" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:950 #, c-format msgid "> %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:975 msgid "Failed to write header to output stream" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:1000 src/hed/mcc/http/PayloadHTTP.cpp:1006 #: src/hed/mcc/http/PayloadHTTP.cpp:1012 src/hed/mcc/http/PayloadHTTP.cpp:1022 #: src/hed/mcc/http/PayloadHTTP.cpp:1034 src/hed/mcc/http/PayloadHTTP.cpp:1039 #: src/hed/mcc/http/PayloadHTTP.cpp:1044 src/hed/mcc/http/PayloadHTTP.cpp:1052 #: src/hed/mcc/http/PayloadHTTP.cpp:1059 msgid "Failed to write body to output stream" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:222 msgid "empty next chain element" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:238 msgid "next element of the chain returned empty payload" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:192 msgid "empty input payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:198 msgid "incoming message is not SOAP" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:215 src/hed/mcc/soap/MCCSOAP.cpp:372 msgid "Security check failed in SOAP MCC for incoming message" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:230 #, c-format msgid "next element of the chain returned error status: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:249 msgid "next element of the chain returned unknown payload - passing through" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:252 src/hed/mcc/soap/MCCSOAP.cpp:265 #: src/hed/mcc/soap/MCCSOAP.cpp:317 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:104 src/hed/mcc/tcp/MCCTCP.cpp:636 msgid "Cannot initialize winsock library" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:119 msgid "Missing Port in Listen element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:128 msgid "Version in Listen element can't be recognized" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:137 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:139 #, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:148 #, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:154 #, c-format msgid "Failed to create socket for for listening at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:156 #, c-format msgid "Failed to create socket for for listening at %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:171 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:173 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:181 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:183 #, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:198 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:200 #, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:217 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:219 #, c-format msgid "Listening on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:226 #, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:228 #, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:234 msgid "No listening ports initiated" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:245 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:249 msgid "Failed to start thread for listening" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:285 msgid "Failed to start thread for communication" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:315 msgid "Failed while waiting for connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:337 msgid "Failed to accept connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:346 msgid "Too many connections - dropping new one" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:353 msgid "Too many connections - waiting for old to close" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:577 msgid "next chain element called" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:592 msgid "Only Raw Buffer payload is supported for output" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:600 src/hed/mcc/tcp/MCCTCP.cpp:709 #: src/hed/mcc/tls/MCCTLS.cpp:545 msgid "Failed to send content of buffer" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "TCP executor is removed" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:614 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:642 msgid "No Connect element specified" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:648 msgid "Missing Port in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:654 msgid "Missing Host in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:685 msgid "TCP client process called" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:71 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:87 #, c-format msgid "Failed to resolve %s (%s)" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:97 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:101 #, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:121 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:131 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:139 #, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:149 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:158 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:215 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:77 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:128 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:82 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:87 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:92 msgid "Missing CA subject in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:102 msgid "Negative rights are not supported in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:106 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:111 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:133 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:138 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:144 msgid "Missing condition subjects in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:220 msgid "Unknown element in Globus signing policy" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:222 msgid "Critical VOMS attribute processing failed" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:230 msgid "VOMS attribute validation failed" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:232 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:424 #, c-format msgid "Failed to establish connection: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:442 src/hed/mcc/tls/MCCTLS.cpp:524 #, c-format msgid "Peer name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:444 src/hed/mcc/tls/MCCTLS.cpp:526 #, c-format msgid "Identity name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:446 src/hed/mcc/tls/MCCTLS.cpp:528 #, c-format msgid "CA name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:452 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:461 msgid "Security check failed in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:534 msgid "Security check failed for outgoing TLS message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:563 msgid "Security check failed for incoming TLS message" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:93 msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:98 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:128 #, c-format msgid "Certificate %s already expired" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:136 #, c-format msgid "Certificate %s will expire in %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:157 msgid "Failed to store application data" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:185 msgid "Failed to retrieve application data from OpenSSL" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:257 src/hed/mcc/tls/PayloadTLSMCC.cpp:351 msgid "Can not create the SSL Context object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:369 msgid "Can't set OpenSSL verify flags" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:289 src/hed/mcc/tls/PayloadTLSMCC.cpp:383 msgid "Can not create the SSL object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:308 msgid "Failed to establish SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:311 src/hed/mcc/tls/PayloadTLSMCC.cpp:398 #, c-format msgid "Using cipher: %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:395 msgid "Failed to accept SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:448 #, c-format msgid "Failed to shut down SSL: %s" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 msgid "PDP: missing name attribute" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, c-format msgid "PDP: %s (%s)" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, c-format msgid "There are %d RequestItems" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 msgid "Can not create PolicyStore object" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 #: src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 msgid "Can not dynamically produce Request" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:110 msgid "Can not find ArcPDPContext" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:139 src/hed/shc/xacmlpdp/XACMLPDP.cpp:117 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:143 src/hed/shc/xacmlpdp/XACMLPDP.cpp:121 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:155 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:84 #: src/hed/shc/gaclpdp/GACLPDP.cpp:118 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:133 msgid "Can not dynamically produce Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:158 msgid "Evaluator for ArcPDP was not loaded" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:165 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:57 #: src/hed/shc/gaclpdp/GACLPDP.cpp:128 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:87 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:143 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:173 src/hed/shc/arcpdp/ArcPDP.cpp:181 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:137 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:143 #: src/hed/shc/gaclpdp/GACLPDP.cpp:136 src/hed/shc/gaclpdp/GACLPDP.cpp:144 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:95 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:103 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:189 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:150 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:111 #, c-format msgid "ARC Auth. request: %s" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:192 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:153 #: src/hed/shc/gaclpdp/GACLPDP.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:170 msgid "No requested security information was collected" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:199 msgid "Not authorized from arc.pdp - failed to get reponse from Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 msgid "Authorized by arc.pdp" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:246 msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:75 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:129 msgid "No delegation policies in this context and message - passing through" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:95 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:109 msgid "Failed to convert security information to ARC policy" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:116 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:123 #, c-format msgid "ARC delegation policy: %s" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:161 msgid "No authorization response was returned" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:164 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:183 msgid "Delegation authorization passed" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:185 msgid "Delegation authorization failed" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 msgid "Failed to acquire delegation context" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 msgid "Delegation handler with delegatee role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:554 #: src/services/cache_service/CacheService.cpp:529 #: src/services/data-staging/DataDeliveryService.cpp:630 msgid "process: POST" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:561 #: src/services/cache_service/CacheService.cpp:538 #: src/services/data-staging/DataDeliveryService.cpp:639 #: src/services/wrappers/java/javawrapper.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:413 msgid "input is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, c-format msgid "Delegated credential identity: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delgation service should be configured" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 msgid "Delegation handler with delegatee role ends" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 msgid "Delegation handler with delegator role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, c-format msgid "The delegated credential got from path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 #: src/services/wrappers/java/javawrapper.cpp:144 msgid "output is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:220 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 msgid "Incoming Message is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:341 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 msgid "Delegation handler is not configured" msgstr "" #: src/hed/shc/gaclpdp/GACLPDP.cpp:121 msgid "Evaluator for GACLPDP was not loaded" msgstr "" #: src/hed/shc/gaclpdp/GACLPDP.cpp:152 #, c-format msgid "GACL Auth. request: %s" msgstr "" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 msgid "Policy is not gacl" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:13 msgid "Configuration file not specified" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:53 #: src/hed/shc/legacy/ConfigParser.cpp:58 msgid "Configuration file can not be read" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:68 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:31 src/hed/shc/legacy/LegacyPDP.cpp:96 msgid "Configuration file not specified in ConfigBlock" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:40 src/hed/shc/legacy/LegacyPDP.cpp:105 msgid "BlockName is empty" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:99 #, c-format msgid "Failed processing user mapping command: unixmap %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:106 #, c-format msgid "Failed processing user mapping command: unixgroup %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:113 #, c-format msgid "Failed processing user mapping command: unixvo %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:173 msgid "LegacyMap: no configurations blocks defined" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:195 src/hed/shc/legacy/LegacyPDP.cpp:210 msgid "" "LegacyPDP: there is no ARCLEGACY Sec Attribute defined. Probably ARC Legacy " "Sec Handler is not configured or failed." msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:200 src/hed/shc/legacy/LegacyPDP.cpp:215 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:115 #, c-format msgid "Failed to parse configuration file %s" msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:121 #, c-format msgid "Block %s not found in configuration file %s" msgstr "" #: src/hed/shc/legacy/LegacySecHandler.cpp:36 #: src/hed/shc/legacy/LegacySecHandler.cpp:110 msgid "LegacySecHandler: configuration file not specified" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:146 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:172 src/hed/shc/legacy/arc_lcmaps.cpp:189 msgid "Missing subject name" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:177 src/hed/shc/legacy/arc_lcmaps.cpp:194 msgid "Missing path of credentials file" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:182 msgid "Missing name of LCAS library" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:199 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:209 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:219 msgid "Failed to initialize LCAS" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:234 msgid "Failed to terminate LCAS" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 msgid "Can't read policy names" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 msgid "Failed to initialize LCMAPS" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 msgid "LCMAPS did not return any GID" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:308 msgid "LCMAPS did not return any UID" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:318 msgid "Failed to terminate LCMAPS" msgstr "" #: src/hed/shc/legacy/auth.cpp:293 #, c-format msgid "Credentials stored in temporary file %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:302 #, c-format msgid "Assigned to authorization group %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:307 #, c-format msgid "Assigned to VO %s" msgstr "" #: src/hed/shc/legacy/auth_file.cpp:24 #: src/services/gridftpd/auth/auth_file.cpp:24 #, c-format msgid "Failed to read file %s" msgstr "" #: src/hed/shc/legacy/auth_ldap.cpp:22 msgid "LDAP authorization is not supported anymore" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:44 src/hed/shc/legacy/unixmap.cpp:260 #: src/services/gridftpd/auth/auth_plugin.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:251 #, c-format msgid "Plugin %s returned: %u" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:48 src/hed/shc/legacy/unixmap.cpp:264 #, c-format msgid "Plugin %s timeout after %u seconds" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:51 src/hed/shc/legacy/unixmap.cpp:267 #, c-format msgid "Plugin %s failed to start" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:53 src/hed/shc/legacy/unixmap.cpp:269 #, c-format msgid "Plugin %s printed: %s" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:54 src/hed/shc/legacy/unixmap.cpp:270 #, c-format msgid "Plugin %s error: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:39 #: src/services/gridftpd/auth/auth_voms.cpp:45 msgid "Missing VO in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:44 #: src/services/gridftpd/auth/auth_voms.cpp:51 msgid "Missing group in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:49 #: src/services/gridftpd/auth/auth_voms.cpp:57 msgid "Missing role in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:54 #: src/services/gridftpd/auth/auth_voms.cpp:63 msgid "Missing capabilities in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:58 #: src/services/gridftpd/auth/auth_voms.cpp:67 #, c-format msgid "Rule: vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:59 #: src/services/gridftpd/auth/auth_voms.cpp:68 #, c-format msgid "Rule: group: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:60 #: src/services/gridftpd/auth/auth_voms.cpp:69 #, c-format msgid "Rule: role: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:61 #: src/services/gridftpd/auth/auth_voms.cpp:70 #, c-format msgid "Rule: capabilities: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:64 #: src/services/gridftpd/auth/auth_voms.cpp:77 #, c-format msgid "Match vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:71 #, c-format msgid "Matched: %s %s %s %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:86 #: src/services/gridftpd/auth/auth_voms.cpp:98 msgid "Matched nothing" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:71 src/hed/shc/legacy/simplemap.cpp:76 #: src/services/gridftpd/auth/simplemap.cpp:63 #: src/services/gridftpd/auth/simplemap.cpp:68 #, c-format msgid "SimpleMap: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:49 src/hed/shc/legacy/unixmap.cpp:54 #: src/hed/shc/legacy/unixmap.cpp:98 src/hed/shc/legacy/unixmap.cpp:103 #: src/hed/shc/legacy/unixmap.cpp:147 src/hed/shc/legacy/unixmap.cpp:152 #: src/services/gridftpd/auth/unixmap.cpp:47 #: src/services/gridftpd/auth/unixmap.cpp:52 #: src/services/gridftpd/auth/unixmap.cpp:96 #: src/services/gridftpd/auth/unixmap.cpp:101 #: src/services/gridftpd/auth/unixmap.cpp:145 #: src/services/gridftpd/auth/unixmap.cpp:150 msgid "User name mapping command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:61 src/services/gridftpd/auth/unixmap.cpp:59 #, c-format msgid "User name mapping has empty group: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:72 src/hed/shc/legacy/unixmap.cpp:121 #: src/hed/shc/legacy/unixmap.cpp:169 #: src/services/gridftpd/auth/unixmap.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:119 #: src/services/gridftpd/auth/unixmap.cpp:167 #, c-format msgid "User name mapping has empty command: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:110 #: src/services/gridftpd/auth/unixmap.cpp:108 #, c-format msgid "User name mapping has empty VO: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:159 #: src/services/gridftpd/auth/unixmap.cpp:157 #, c-format msgid "User name mapping has empty name: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:208 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:229 src/hed/shc/legacy/unixmap.cpp:235 #: src/services/gridftpd/auth/unixmap.cpp:212 #: src/services/gridftpd/auth/unixmap.cpp:217 #: src/services/gridftpd/auth/unixmap.cpp:233 msgid "Plugin (user mapping) command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:219 #: src/services/gridftpd/auth/unixmap.cpp:223 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:223 #: src/services/gridftpd/auth/unixmap.cpp:227 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:257 #: src/services/gridftpd/auth/unixmap.cpp:248 #, c-format msgid "Plugin %s returned too much: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:278 msgid "User subject match is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:282 #: src/services/gridftpd/auth/unixmap.cpp:266 #, c-format msgid "Mapfile at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:305 #: src/services/gridftpd/auth/unixmap.cpp:290 msgid "User pool call is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:310 #: src/services/gridftpd/auth/unixmap.cpp:295 #, c-format msgid "User pool at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:315 #: src/services/gridftpd/auth/unixmap.cpp:300 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:332 #: src/services/gridftpd/auth/unixmap.cpp:317 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:48 msgid "Creating a pdpservice client" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:80 msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:152 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:185 msgid "Policy Decision Service invocation failed" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:170 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:204 msgid "Authorized from remote pdp service" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Unauthorized from remote pdp service" msgstr "" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:152 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:157 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:175 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:179 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, c-format msgid "Processing type not supported: %s" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:199 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:212 msgid "Succeeded to authenticate SAMLToken" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:275 #, c-format msgid "No response from AA service %s" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:279 #, c-format msgid "SOAP Request to AA service %s failed" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 msgid "Cannot find content under response soap message" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 msgid "Cannot find under response soap message:" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:308 msgid "The Response is not going to this end" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "The StatusCode is Success" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:335 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:345 msgid "SAML Token handler is not configured" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:29 #, c-format msgid "Access list location: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:39 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:42 #, c-format msgid "Subject to match: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:45 #, c-format msgid "Policy subject: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:47 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:73 #, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:54 msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:61 #, c-format msgid "Policy line: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:79 #, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "" #: src/hed/shc/test.cpp:101 msgid "Input request from a file: Request.xml" msgstr "" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "" #: src/hed/shc/test.cpp:121 #, c-format msgid "Attribute Value (1): %s" msgstr "" #: src/hed/shc/test.cpp:132 msgid "Input request from code" msgstr "" #: src/hed/shc/test.cpp:211 #, c-format msgid "Attribute Value (2): %s" msgstr "" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 msgid "Can not dynamically produce Policy" msgstr "" #: src/hed/shc/testinterface_arc.cpp:138 #, c-format msgid "Attribute Value inside Subject: %s" msgstr "" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, c-format msgid "Can not create function %s" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:88 msgid "Can not find XACMLPDPContext" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:136 msgid "Evaluator for XACMLPDP was not loaded" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:151 src/hed/shc/xacmlpdp/XACMLPDP.cpp:159 msgid "Failed to convert security information to XACML request" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:167 #, c-format msgid "XACML request: %s" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 msgid "Authorized from xacml.pdp" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:180 msgid "UnAuthorized from xacml.pdp" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "" #: src/libs/data-staging/DTR.cpp:86 src/libs/data-staging/DTR.cpp:90 #, c-format msgid "Could not handle endpoint %s" msgstr "" #: src/libs/data-staging/DTR.cpp:100 msgid "Source is the same as destination" msgstr "" #: src/libs/data-staging/DTR.cpp:174 #, c-format msgid "Invalid ID: %s" msgstr "" #: src/libs/data-staging/DTR.cpp:211 #, c-format msgid "%s->%s" msgstr "" #: src/libs/data-staging/DTR.cpp:330 #, c-format msgid "No callback for %s defined" msgstr "" #: src/libs/data-staging/DTR.cpp:345 #, c-format msgid "NULL callback for %s" msgstr "" #: src/libs/data-staging/DTR.cpp:348 #, c-format msgid "Request to push to unknown owner - %u" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:147 #, c-format msgid "Cleaning up after failure: deleting %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:137 #, c-format msgid "Bad checksum format %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:210 #, c-format msgid "DataDelivery: %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:222 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:241 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:67 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:315 #, c-format msgid "Connecting to Delivery service at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:94 #, c-format msgid "Failed to set up credential delegation with %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:106 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:332 #, c-format msgid "Could not connect to service %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:114 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:340 #, c-format msgid "No SOAP response from Delivery service %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:128 #, c-format msgid "Failed to start transfer request: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:143 #, c-format msgid "Could not make new transfer request: %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:148 #, c-format msgid "Started remote Delivery at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:181 #, c-format msgid "Failed to send cancel request: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:188 msgid "Failed to cancel: No SOAP response" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:202 #, c-format msgid "Failed to cancel transfer request: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:209 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:290 #, c-format msgid "Bad format in XML response: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:216 #, c-format msgid "Failed to cancel: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:260 msgid "No SOAP response from delivery service" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:281 #, c-format msgid "Failed to query state: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:355 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:363 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:371 #, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:379 #, c-format msgid "Dir %s allowed at service %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:473 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:486 msgid "Failed locating credentials" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:491 msgid "Failed to initiate client connection" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:497 msgid "Client connection has no entry point" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:134 msgid "Unexpected arguments" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:137 msgid "Source URL missing" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:140 msgid "Destination URL missing" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:144 #, c-format msgid "Source URL not valid: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:148 #, c-format msgid "Destination URL not valid: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:205 #, c-format msgid "Unknown transfer option: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:230 #, c-format msgid "Source URL not supported: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #: src/libs/data-staging/DataStagingDelivery.cpp:254 msgid "No credentials supplied" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:249 #, c-format msgid "Destination URL not supported: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:298 #, c-format msgid "Will calculate %s checksum" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:309 msgid "Cannot use supplied --size option" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:458 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:468 #, c-format msgid "Failed cleaning up destination %s" msgstr "" #: src/libs/data-staging/Processor.cpp:60 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:418 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:435 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:331 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:348 #: src/services/cache_service/CacheService.cpp:114 msgid "Error creating cache" msgstr "" #: src/libs/data-staging/Processor.cpp:86 #, c-format msgid "Forcing re-download of file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:103 #, c-format msgid "Will wait around %is" msgstr "" #: src/libs/data-staging/Processor.cpp:123 #, c-format msgid "Force-checking source of cache file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:126 #, c-format msgid "Source check requested but failed: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:146 msgid "Permission checking failed, will try downloading without using cache" msgstr "" #: src/libs/data-staging/Processor.cpp:177 #, c-format msgid "Will download to cache file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:199 msgid "Looking up source replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:217 #: src/libs/data-staging/Processor.cpp:329 #, c-format msgid "Skipping replica on local host %s" msgstr "" #: src/libs/data-staging/Processor.cpp:225 #: src/libs/data-staging/Processor.cpp:337 #, c-format msgid "No locations left for %s" msgstr "" #: src/libs/data-staging/Processor.cpp:248 #: src/libs/data-staging/Processor.cpp:496 msgid "Resolving destination replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:266 msgid "No locations for destination different from source found" msgstr "" #: src/libs/data-staging/Processor.cpp:278 msgid "Pre-registering destination in index service" msgstr "" #: src/libs/data-staging/Processor.cpp:305 msgid "Resolving source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:319 #, c-format msgid "No replicas found for %s" msgstr "" #: src/libs/data-staging/Processor.cpp:361 #, c-format msgid "Checking %s" msgstr "" #: src/libs/data-staging/Processor.cpp:370 #: src/libs/data-staging/Processor.cpp:429 msgid "Metadata of replica and index service differ" msgstr "" #: src/libs/data-staging/Processor.cpp:378 #, c-format msgid "Failed checking source replica %s: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:405 msgid "Querying source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:417 #, c-format msgid "Failed checking source replica: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:423 msgid "Failed checking source replica" msgstr "" #: src/libs/data-staging/Processor.cpp:464 msgid "Finding existing destination replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:476 #, c-format msgid "Failed to delete replica %s: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:490 #, c-format msgid "Unregistering %s" msgstr "" #: src/libs/data-staging/Processor.cpp:501 msgid "Pre-registering destination" msgstr "" #: src/libs/data-staging/Processor.cpp:507 #, c-format msgid "Failed to pre-clean destination: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:531 msgid "Preparing to stage source" msgstr "" #: src/libs/data-staging/Processor.cpp:544 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:550 msgid "No physical files found for source" msgstr "" #: src/libs/data-staging/Processor.cpp:569 msgid "Preparing to stage destination" msgstr "" #: src/libs/data-staging/Processor.cpp:582 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:588 msgid "No physical files found for destination" msgstr "" #: src/libs/data-staging/Processor.cpp:615 msgid "Releasing source" msgstr "" #: src/libs/data-staging/Processor.cpp:619 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:624 msgid "Releasing destination" msgstr "" #: src/libs/data-staging/Processor.cpp:628 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:659 msgid "Removing pre-registered destination in index service" msgstr "" #: src/libs/data-staging/Processor.cpp:662 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:668 msgid "Registering destination replica" msgstr "" #: src/libs/data-staging/Processor.cpp:671 #, c-format msgid "Failed to register destination replica: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:674 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:705 msgid "Error creating cache. Stale locks may remain." msgstr "" #: src/libs/data-staging/Processor.cpp:740 #, c-format msgid "Linking/copying cached file to %s" msgstr "" #: src/libs/data-staging/Processor.cpp:761 #, c-format msgid "Failed linking cache file to %s" msgstr "" #: src/libs/data-staging/Processor.cpp:765 #, c-format msgid "Error linking cache file to %s." msgstr "" #: src/libs/data-staging/Processor.cpp:787 #: src/libs/data-staging/Processor.cpp:794 msgid "Adding to bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" #: src/libs/data-staging/Scheduler.cpp:213 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:216 msgid "Linking mapped file" msgstr "" #: src/libs/data-staging/Scheduler.cpp:223 #, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:235 msgid "Linking mapped file - can't link on Windows" msgstr "" #: src/libs/data-staging/Scheduler.cpp:251 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" #: src/libs/data-staging/Scheduler.cpp:258 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" #: src/libs/data-staging/Scheduler.cpp:264 msgid "File is cacheable, will check cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:267 #: src/libs/data-staging/Scheduler.cpp:293 #, c-format msgid "File is currently being cached, will wait %is" msgstr "" #: src/libs/data-staging/Scheduler.cpp:286 msgid "Timed out while waiting for cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:297 msgid "Checking cache again" msgstr "" #: src/libs/data-staging/Scheduler.cpp:317 msgid "Destination file is in cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:321 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:324 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:334 msgid "Problem with index service, will release cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:338 msgid "Problem with index service, will proceed to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:348 msgid "Checking source file is present" msgstr "" #: src/libs/data-staging/Scheduler.cpp:356 msgid "Error with source file, moving to next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:378 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "No more replicas, will use %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:383 #, c-format msgid "Checking replica %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:393 msgid "Overwrite requested - will pre-clean destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:396 msgid "No overwrite requested or allowed, skipping pre-cleaning" msgstr "" #: src/libs/data-staging/Scheduler.cpp:404 msgid "Pre-clean failed, will still try to copy" msgstr "" #: src/libs/data-staging/Scheduler.cpp:411 msgid "Source or destination requires staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:415 msgid "No need to stage source or destination, skipping staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:445 msgid "Staging request timed out, will release request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:449 msgid "Querying status of staging request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:458 msgid "Releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:475 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "" #: src/libs/data-staging/Scheduler.cpp:490 #, c-format msgid "Transfer failed: %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:500 msgid "Releasing request(s) made during staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:503 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:515 msgid "Trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:519 #, c-format msgid "Will %s in destination index service" msgstr "" #: src/libs/data-staging/Scheduler.cpp:523 msgid "Destination is not index service, skipping replica registration" msgstr "" #: src/libs/data-staging/Scheduler.cpp:536 msgid "Error registering replica, moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:544 msgid "Will process cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:548 msgid "File is not cacheable, skipping cache processing" msgstr "" #: src/libs/data-staging/Scheduler.cpp:562 msgid "Cancellation complete" msgstr "" #: src/libs/data-staging/Scheduler.cpp:576 msgid "Will wait 10s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:582 msgid "Error in cache processing, will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:591 msgid "Will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:609 msgid "Proxy has expired" msgstr "" #: src/libs/data-staging/Scheduler.cpp:620 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "" #: src/libs/data-staging/Scheduler.cpp:636 msgid "Out of retries" msgstr "" #: src/libs/data-staging/Scheduler.cpp:638 msgid "Permanent failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:644 msgid "Finished successfully" msgstr "" #: src/libs/data-staging/Scheduler.cpp:654 msgid "Returning to generator" msgstr "" #: src/libs/data-staging/Scheduler.cpp:820 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:874 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:882 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:895 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:911 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "" #: src/libs/data-staging/Scheduler.cpp:938 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:948 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1149 msgid "Cancelling active transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1165 msgid "Processing thread timed out. Restarting DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1233 msgid "Will use bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1255 msgid "No delivery endpoints available, will try later" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1274 msgid "Scheduler received NULL DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1284 msgid "Scheduler received invalid DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1373 msgid "Scheduler starting up" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1374 msgid "Scheduler configuration:" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1375 #, c-format msgid " Pre-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1376 #, c-format msgid " Delivery slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1377 #, c-format msgid " Post-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1378 #, c-format msgid " Emergency slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1379 #, c-format msgid " Prepared slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1380 #, c-format msgid "" " Shares configuration:\n" "%s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1383 msgid " Delivery service: LOCAL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1384 #, c-format msgid " Delivery service: %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1389 msgid "Failed to create DTR dump thread" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1406 #: src/services/data-staging/DataDeliveryService.cpp:513 #, c-format msgid "DTR %s cancelled" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:32 msgid "Generator started" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:33 msgid "Starting DTR threads" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:46 msgid "No valid credentials found, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:58 #, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "" #: src/services/a-rex/arex.cpp:446 #, c-format msgid "Using cached local account '%s'" msgstr "" #: src/services/a-rex/arex.cpp:457 msgid "Will not map to 'root' account by default" msgstr "" #: src/services/a-rex/arex.cpp:470 msgid "No local account name specified" msgstr "" #: src/services/a-rex/arex.cpp:473 #, c-format msgid "Using local account '%s'" msgstr "" #: src/services/a-rex/arex.cpp:494 msgid "Failed to acquire grid-manager's configuration" msgstr "" #: src/services/a-rex/arex.cpp:519 #: src/services/cache_service/CacheService.cpp:572 #: src/services/data-staging/DataDeliveryService.cpp:687 #, c-format msgid "SOAP operation is not supported: %s" msgstr "" #: src/services/a-rex/arex.cpp:532 #, c-format msgid "Connection from %s: %s" msgstr "" #: src/services/a-rex/arex.cpp:534 #, c-format msgid "process: method: %s" msgstr "" #: src/services/a-rex/arex.cpp:535 #, c-format msgid "process: endpoint: %s" msgstr "" #: src/services/a-rex/arex.cpp:546 #, c-format msgid "process: id: %s" msgstr "" #: src/services/a-rex/arex.cpp:547 #, c-format msgid "process: subpath: %s" msgstr "" #: src/services/a-rex/arex.cpp:567 #: src/services/cache_service/CacheService.cpp:546 #: src/services/data-staging/DataDeliveryService.cpp:647 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "" #: src/services/a-rex/arex.cpp:572 #: src/services/cache_service/CacheService.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:652 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "" #: src/services/a-rex/arex.cpp:575 #: src/services/cache_service/CacheService.cpp:554 #: src/services/data-staging/DataDeliveryService.cpp:655 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "" #: src/services/a-rex/arex.cpp:591 src/services/a-rex/arex.cpp:804 #: src/services/a-rex/arex.cpp:823 src/services/a-rex/arex.cpp:837 #: src/services/a-rex/arex.cpp:847 src/services/a-rex/arex.cpp:862 #: src/services/cache_service/CacheService.cpp:588 #: src/services/data-staging/DataDeliveryService.cpp:703 msgid "Security Handlers processing failed" msgstr "" #: src/services/a-rex/arex.cpp:598 msgid "Can't obtain configuration" msgstr "" #: src/services/a-rex/arex.cpp:614 msgid "process: factory endpoint" msgstr "" #: src/services/a-rex/arex.cpp:798 src/services/a-rex/arex.cpp:815 #: src/services/cache_service/CacheService.cpp:583 #: src/services/data-staging/DataDeliveryService.cpp:698 #: src/tests/echo/echo.cpp:166 #, c-format msgid "process: response=%s" msgstr "" #: src/services/a-rex/arex.cpp:800 msgid "process: response is not SOAP" msgstr "" #: src/services/a-rex/arex.cpp:830 msgid "process: GET" msgstr "" #: src/services/a-rex/arex.cpp:831 #, c-format msgid "GET: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:854 msgid "process: PUT" msgstr "" #: src/services/a-rex/arex.cpp:869 #, c-format msgid "process: method %s is not supported" msgstr "" #: src/services/a-rex/arex.cpp:872 msgid "process: method is not defined" msgstr "" #: src/services/a-rex/arex.cpp:908 msgid "Failed to run Grid Manager thread" msgstr "" #: src/services/a-rex/arex.cpp:972 #, c-format msgid "Storing configuration in temporary file %s" msgstr "" #: src/services/a-rex/arex.cpp:977 msgid "Failed to process service configuration" msgstr "" #: src/services/a-rex/arex.cpp:985 #, c-format msgid "Failed to process configuration in %s" msgstr "" #: src/services/a-rex/arex.cpp:991 msgid "No control directory set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:995 msgid "No session directory set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:999 msgid "No LRMS set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:1004 #, c-format msgid "Failed to create control directory %s" msgstr "" #: src/services/a-rex/arex.cpp:1033 #, c-format msgid "Provided LRMSName is not a valid URL: %s" msgstr "" #: src/services/a-rex/arex.cpp:1035 msgid "" "No LRMSName is provided. This is needed if you wish to completely comply " "with the BES specifications." msgstr "" #: src/services/a-rex/cachecheck.cpp:34 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:539 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:843 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:424 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:337 #, c-format msgid "Error with cache configuration: %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:50 #: src/services/cache_service/CacheService.cpp:305 msgid "Error with cache configuration" msgstr "" #: src/services/a-rex/cachecheck.cpp:75 #: src/services/cache_service/CacheService.cpp:135 #: src/services/cache_service/CacheService.cpp:330 #, c-format msgid "Looking up URL %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:77 #: src/services/cache_service/CacheService.cpp:144 #, c-format msgid "Cache file is %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:33 #, c-format msgid "" "ChangeActivityStatus: request = \n" "%s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:38 msgid "ChangeActivityStatus: no ActivityIdentifier found" msgstr "" #: src/services/a-rex/change_activity_status.cpp:47 msgid "ChangeActivityStatus: EPR contains no JobID" msgstr "" #: src/services/a-rex/change_activity_status.cpp:57 #, c-format msgid "ChangeActivityStatus: no job found: %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:73 msgid "ChangeActivityStatus: missing NewStatus element" msgstr "" #: src/services/a-rex/change_activity_status.cpp:87 msgid "ChangeActivityStatus: Failed to accept delegation" msgstr "" #: src/services/a-rex/change_activity_status.cpp:103 msgid "ChangeActivityStatus: old BES state does not match" msgstr "" #: src/services/a-rex/change_activity_status.cpp:110 msgid "ChangeActivityStatus: old A-REX state does not match" msgstr "" #: src/services/a-rex/change_activity_status.cpp:137 msgid "ChangeActivityStatus: Failed to update credentials" msgstr "" #: src/services/a-rex/change_activity_status.cpp:143 msgid "ChangeActivityStatus: Failed to resume job" msgstr "" #: src/services/a-rex/change_activity_status.cpp:149 #, c-format msgid "ChangeActivityStatus: State change not allowed: from %s/%s to %s/%s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:168 #, c-format msgid "" "ChangeActivityStatus: response = \n" "%s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:213 #: src/services/a-rex/change_activity_status.cpp:217 #, c-format msgid "EMIES:PauseActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:262 #: src/services/a-rex/change_activity_status.cpp:266 #, c-format msgid "EMIES:ResumeActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:311 #: src/services/a-rex/change_activity_status.cpp:316 #, c-format msgid "EMIES:CancelActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:324 #, c-format msgid "job %s cancelled successfully" msgstr "" #: src/services/a-rex/change_activity_status.cpp:370 #: src/services/a-rex/change_activity_status.cpp:385 #, c-format msgid "EMIES:WipeActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:389 #, c-format msgid "job %s (will be) cleaned successfully" msgstr "" #: src/services/a-rex/change_activity_status.cpp:435 #: src/services/a-rex/change_activity_status.cpp:440 #, c-format msgid "EMIES:RestartActivity: job %s - %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:444 #, c-format msgid "job %s restarted successfully" msgstr "" #: src/services/a-rex/create_activity.cpp:35 #, c-format msgid "" "CreateActivity: request = \n" "%s" msgstr "" #: src/services/a-rex/create_activity.cpp:40 msgid "CreateActivity: no job description found" msgstr "" #: src/services/a-rex/create_activity.cpp:48 msgid "CreateActivity: max jobs total limit reached" msgstr "" #: src/services/a-rex/create_activity.cpp:67 msgid "CreateActivity: Failed to accept delegation" msgstr "" #: src/services/a-rex/create_activity.cpp:100 #, c-format msgid "CreateActivity: Failed to create new job: %s" msgstr "" #: src/services/a-rex/create_activity.cpp:102 msgid "CreateActivity: Failed to create new job" msgstr "" #: src/services/a-rex/create_activity.cpp:117 msgid "CreateActivity finished successfully" msgstr "" #: src/services/a-rex/create_activity.cpp:121 #, c-format msgid "" "CreateActivity: response = \n" "%s" msgstr "" #: src/services/a-rex/create_activity.cpp:159 #, c-format msgid "" "EMIES:CreateActivity: request = \n" "%s" msgstr "" #: src/services/a-rex/create_activity.cpp:165 msgid "EMIES:CreateActivity: too many activity descriptions" msgstr "" #: src/services/a-rex/create_activity.cpp:175 msgid "EMIES:CreateActivity: no job description found" msgstr "" #: src/services/a-rex/create_activity.cpp:182 msgid "EMIES:CreateActivity: max jobs total limit reached" msgstr "" #: src/services/a-rex/create_activity.cpp:208 #, c-format msgid "ES:CreateActivity: Failed to create new job: %s" msgstr "" #: src/services/a-rex/create_activity.cpp:224 msgid "EMIES:CreateActivity finished successfully" msgstr "" #: src/services/a-rex/create_activity.cpp:225 #, c-format msgid "New job accepted with id %s" msgstr "" #: src/services/a-rex/create_activity.cpp:229 #, c-format msgid "" "EMIES:CreateActivity: response = \n" "%s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:55 msgid "Wiping and re-creating whole storage" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:210 #: src/services/a-rex/delegation/DelegationStore.cpp:311 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:271 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:291 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" #: src/services/a-rex/get.cpp:112 #, c-format msgid "Get: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:123 #, c-format msgid "Get: can't process file %s" msgstr "" #: src/services/a-rex/get.cpp:167 #, c-format msgid "Head: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:178 #, c-format msgid "Head: can't process file %s" msgstr "" #: src/services/a-rex/get.cpp:190 #, c-format msgid "http_get: start=%llu, end=%llu, burl=%s, hpath=%s" msgstr "" #: src/services/a-rex/get.cpp:357 msgid "Failed to extract credential information" msgstr "" #: src/services/a-rex/get.cpp:360 #, c-format msgid "Checking cache permissions: DN: %s" msgstr "" #: src/services/a-rex/get.cpp:361 #, c-format msgid "Checking cache permissions: VO: %s" msgstr "" #: src/services/a-rex/get.cpp:363 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "" #: src/services/a-rex/get.cpp:373 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "" #: src/services/a-rex/get.cpp:376 #, c-format msgid "DN %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:379 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "" #: src/services/a-rex/get.cpp:382 #, c-format msgid "VO %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:388 src/services/a-rex/get.cpp:407 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "" #: src/services/a-rex/get.cpp:396 src/services/a-rex/get.cpp:415 #, c-format msgid "VOMS attr %s matches %s" msgstr "" #: src/services/a-rex/get.cpp:397 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "" #: src/services/a-rex/get.cpp:400 src/services/a-rex/get.cpp:419 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:416 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "" #: src/services/a-rex/get.cpp:422 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "" #: src/services/a-rex/get.cpp:428 #, c-format msgid "No match found in cache access rules for %s" msgstr "" #: src/services/a-rex/get.cpp:438 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "" #: src/services/a-rex/get.cpp:441 #, c-format msgid "Get from cache: Invalid URL %s" msgstr "" #: src/services/a-rex/get.cpp:458 msgid "Get from cache: Error in cache configuration" msgstr "" #: src/services/a-rex/get.cpp:467 msgid "Get from cache: File not in cache" msgstr "" #: src/services/a-rex/get.cpp:470 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "" #: src/services/a-rex/get.cpp:480 msgid "Get from cache: Cached file is locked" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:29 #, c-format msgid "" "GetActivityDocuments: request = \n" "%s" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:40 msgid "GetActivityDocuments: non-AREX job requested" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:49 #: src/services/a-rex/get_activity_documents.cpp:60 #, c-format msgid "GetActivityDocuments: job %s - %s" msgstr "" #: src/services/a-rex/get_activity_documents.cpp:72 #, c-format msgid "" "GetActivityDocuments: response = \n" "%s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:35 #, c-format msgid "" "GetActivityStatuses: request = \n" "%s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:50 #, c-format msgid "GetActivityStatuses: unknown verbosity level requested: %s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:62 #, c-format msgid "GetActivityStatuses: job %s - can't understand EPR" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:71 #, c-format msgid "GetActivityStatuses: job %s - %s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:105 #, c-format msgid "" "GetActivityStatuses: response = \n" "%s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:306 #: src/services/a-rex/get_activity_statuses.cpp:400 #, c-format msgid "EMIES:GetActivityStatus: job %s - %s" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:526 #, c-format msgid "EMIES:GetActivityInfo: job %s - failed to retrieve GLUE2 information" msgstr "" #: src/services/a-rex/get_activity_statuses.cpp:578 #: src/services/a-rex/get_activity_statuses.cpp:585 #, c-format msgid "EMIES:NotifyService: job %s - %s" msgstr "" #: src/services/a-rex/get_factory_attributes_document.cpp:37 #, c-format msgid "" "GetFactoryAttributesDocument: request = \n" "%s" msgstr "" #: src/services/a-rex/get_factory_attributes_document.cpp:62 #, c-format msgid "" "GetFactoryAttributesDocument: response = \n" "%s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, c-format msgid "Running command %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:114 msgid "Failed to start cache clean script" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:115 msgid "Cache cleaning script failed" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:177 msgid "Starting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:178 #, c-format msgid "Used configuration file %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:187 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:190 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:199 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:211 msgid "Failed to start new thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:268 msgid "Failed to start new thread: cache won't be cleaned" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:273 msgid "Picking up left jobs" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:277 msgid "Starting data staging threads" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:281 msgid "Failed to start data staging threads, exiting Grid Manager thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:290 msgid "Starting jobs' monitoring" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:303 #, c-format msgid "Failed to open heartbeat file %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:335 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:340 msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:346 msgid "Waking up" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:349 msgid "Stopping jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:352 msgid "Exiting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:365 msgid "Shutting down job processing" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:370 msgid "Shutting down data staging threads" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:24 #, c-format msgid "" "Usage: %s -I -U -P -L [-c " "] [-p ] [-d ]" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:74 msgid "Job ID argument is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Path to user's proxy file should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "User name should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "Path to .local job status file is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:100 msgid "Generating ceID prefix from hostname automatically" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:103 msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:112 #, c-format msgid "ceID prefix is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:120 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:129 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:139 #, c-format msgid "globalid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:142 #, c-format msgid "headnode is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "interface is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:149 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:152 #, c-format msgid "localid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 #, c-format msgid "queue name is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "owner subject is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:160 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:165 #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:119 msgid "Can not read information from the local job status file" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:181 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly :-) Please submit the bug to bugzilla." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:191 msgid "Parsing VOMS AC to get FQANs information" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:210 msgid "No FQAN found. Using NULL as userFQAN value" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:224 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:229 #, c-format msgid "Writing the info the the BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 #, c-format msgid "Cannot open BLAH log file '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:24 #, c-format msgid "" "Usage: %s [-N] -P -L [-c ] [-d " "]" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:90 msgid "User proxy file is required but is not specified" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:95 msgid "Local job status file is required" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:114 #, c-format msgid "Making the decision for the queue %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:127 #, c-format msgid "Can not parse the configuration file %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:144 #, c-format msgid "Can not find queue '%s' in the configuration file" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:168 msgid "No access policy to check, returning success" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:182 #, c-format msgid "CA certificates directory %s does not exist" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:202 msgid "User proxy certificate is not valid" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:207 #, c-format msgid "Getting VOMS AC for: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:221 #, c-format msgid "Checking a match for '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:228 #, c-format msgid "FQAN '%s' IS a match to '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:232 #, c-format msgid "Queue '%s' usage is prohibited to FQAN '%s' by the site access policy" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:236 #, c-format msgid "FQAN '%s' IS NOT a match to '%s'" msgstr "" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:242 #, c-format msgid "" "Queue '%s' usage with provided FQANs is prohibited by the site access policy" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:35 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:39 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:43 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:57 #, c-format msgid "Wrong option in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:71 #, c-format msgid "Can't read configuration file at %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:80 #, c-format msgid "Can't interpret configuration file %s as XML" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:118 #, c-format msgid "Can't recognize type of configuration file at %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:121 msgid "Could not determine configuration type or configuration is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:169 msgid "No queue name given in queue block name" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:176 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:499 msgid "forcedefaultvoms parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:185 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:198 msgid "authorizedvo parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:276 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:604 #, c-format msgid "Wrong number in jobreport_period: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:280 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 #, c-format msgid "Wrong number in jobreport_period: %d, minimal value: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:299 msgid "Missing file name in jobreport_logfile" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:316 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:323 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:330 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:337 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:344 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:351 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:360 msgid "mail parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:370 msgid "Wrong number in defaultttl command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:376 msgid "Wrong number in maxrerun command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:382 msgid "defaultlrms is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:398 msgid "State name for plugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:402 msgid "Options for plugin are missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:405 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:726 #, c-format msgid "Failed to register plugin for state %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:413 msgid "Wrong number for timeout in plugin command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:429 msgid "Wrong option in fixdirectories" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:441 msgid "Wrong option in delegationdb" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:456 msgid "Session root directory is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:459 msgid "Junk in sessiondir command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:471 msgid "Missing directory in control command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:476 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:481 msgid "User for helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:484 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:875 msgid "Only user '.' for helper program is supported" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:487 msgid "Helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:632 msgid "Value for maxJobsTracked is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:636 msgid "Value for maxJobsRun is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:640 msgid "Value for maxJobsTotal is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:644 msgid "Value for maxJobsPerDN is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:648 msgid "Value for wakeupPeriod is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 msgid "Value for maxScripts is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:664 msgid "serviceMail is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:678 msgid "Type in LRMS is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:691 msgid "LRMS is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:704 msgid "State name for authPlugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:709 msgid "Command for authPlugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:723 #, c-format msgid "Registering plugin for state %s; options: %s; command: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:739 msgid "Command for localCred is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:745 msgid "Timeout for localCred is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:750 msgid "Timeout for localCred is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:778 msgid "Control element must be present" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:783 msgid "controlDir is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:790 msgid "sessionRootDir is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:800 msgid "Attribute drain for sessionRootDir is incorrect boolean" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:809 msgid "The fixDirectories element is incorrect value" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:816 msgid "The delegationDB element is incorrect value" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:821 msgid "The maxReruns element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:825 msgid "The noRootPower element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:829 msgid "The defaultTTL element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:833 msgid "The defaultTTR element is incorrect number" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:866 msgid "Command in helperUtility is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:871 msgid "Username in helperUtility is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:106 #, c-format msgid "\tSession root dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:107 #, c-format msgid "\tControl dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:108 #, c-format msgid "\tdefault LRMS : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:109 #, c-format msgid "\tdefault queue : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:110 #, c-format msgid "\tdefault ttl : %u" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:115 msgid "No valid caches found in configuration, caching is disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tCache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:122 #, c-format msgid "\tCache link dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 #, c-format msgid "\tRemote cache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:128 #, c-format msgid "\tRemote cache link: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:130 msgid "\tCache cleaning enabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 msgid "\tCache cleaning disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:308 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:412 #, c-format msgid "Starting helper process: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:422 #, c-format msgid "Helper process start failed: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:429 #, c-format msgid "Stopping helper process %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:14 #, c-format msgid "wrong boolean in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:20 #, c-format msgid "wrong number in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:49 msgid "Can't read configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:58 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:32 msgid "Can't interpret configuration file as XML" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:62 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:69 msgid "Configuration error" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:75 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:103 msgid "Can't recognize type of configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxdelivery" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxemergency" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:107 msgid "Bad number in maxprocessor" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:113 msgid "Bad number in maxprepared" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:119 msgid "Bad number in maxtransfertries" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:128 msgid "Bad number in speedcontrol" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:139 #, c-format msgid "Bad number in definedshare %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:148 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:159 msgid "Bad number in remotesizelimit" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:189 msgid "Bad value for debug" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:203 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:315 msgid "Bad URL in acix_endpoint" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:258 #, c-format msgid "Bad URL in deliveryService: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:272 msgid "Bad value for logLevel" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:25 msgid "Can't open configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:47 msgid "Value for 'link' element in mapURL is incorrect" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:53 msgid "Missing 'from' element in mapURL" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:57 msgid "Missing 'to' element in mapURL" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:84 msgid "Not enough parameters in copyurl" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:93 msgid "Not enough parameters in linkurl" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:179 #, c-format msgid "Wrong directory in %s" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:100 #, c-format msgid "Failed setting file owner: %s" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:33 msgid "gm-delegations-converter changes format of delegation database." msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:106 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:107 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:43 #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 msgid "read information from specified control directory" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:44 #: src/services/a-rex/grid-manager/gm_jobs.cpp:112 msgid "dir" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:48 msgid "convert from specified input database format [bdb|sqlite]" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:49 #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:54 msgid "database format" msgstr "" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:53 msgid "convert into specified output database format [bdb|sqlite]" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:96 msgid "gm-jobs displays information on current jobs in the system." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:101 msgid "display more information on each job" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "print summary of jobs in each transfer share" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:121 msgid "do not print list of jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:126 msgid "do not print number of jobs in each state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:131 msgid "print state of the service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:136 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:137 #: src/services/a-rex/grid-manager/gm_jobs.cpp:147 #: src/services/a-rex/grid-manager/gm_jobs.cpp:157 msgid "dn" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 msgid "request to cancel job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:142 #: src/services/a-rex/grid-manager/gm_jobs.cpp:152 #: src/services/a-rex/grid-manager/gm_jobs.cpp:162 #: src/services/a-rex/grid-manager/gm_jobs.cpp:172 msgid "id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 msgid "request to clean job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 msgid "show only jobs with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 msgid "print list of available delegation IDs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:171 msgid "print delegation token of specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 msgid "print main delegation token of specified Job ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:177 msgid "job id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:182 msgid "file name" msgstr "" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control file. If no " "file is given it uses the control directory found in the configuration file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, c-format msgid "Failed to acquire source: %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, c-format msgid "Failed to resolve %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, c-format msgid "Failed to check %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:88 msgid "Wrong number of arguments given" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:105 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1009 msgid "Failed to run plugin" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:109 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1015 #, c-format msgid "Plugin failed: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:158 msgid "empty argument to remotegmdirs" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:165 msgid "bad arguments to remotegmdirs" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:177 msgid "Wrong number in maxjobdesc" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:185 #: src/services/gridftpd/fileplugin/fileplugin.cpp:186 #, c-format msgid "Unsupported configuration command: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:199 #, c-format msgid "Mapped user:group (%s:%s) not found" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:202 msgid "Failed processing grid-manager configuration" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:205 msgid "" "Cannot use multiple session directories and remotegmdirs at the same time" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:228 msgid "This user is denied to submit new jobs." msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:239 msgid "No control or remote control directories defined in configuration" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:290 #, c-format msgid "Job submission user: %s (%i:%i)" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:294 msgid "Job plugin was not initialised" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:312 msgid "No delegated credentials were passed" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:385 #, c-format msgid "Cancelling job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:436 #, c-format msgid "Cleaning job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:472 msgid "Request to open file with storing in progress" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:505 #: src/services/gridftpd/fileplugin/fileplugin.cpp:343 #, c-format msgid "Retrieving file %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:556 #, c-format msgid "Accepting submission of new job or modification request: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:577 #: src/services/gridftpd/fileplugin/fileplugin.cpp:383 #: src/services/gridftpd/fileplugin/fileplugin.cpp:420 #, c-format msgid "Storing file %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:599 #, c-format msgid "Unknown open mode %i" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:716 #, c-format msgid "action(%s) != request" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:767 msgid "Failed writing job description" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:954 msgid "Failed writing local description" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:964 msgid "Failed writing ACL" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:980 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:987 #: src/services/a-rex/job.cpp:587 #, c-format msgid "Failed to run external plugin: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:991 #: src/services/a-rex/job.cpp:591 #, c-format msgid "Plugin response: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:993 msgid "Failed to run external plugin" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1025 #, c-format msgid "Failed to create session directory %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1035 msgid "Failed writing status" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1049 #, c-format msgid "Failed to lock delegated credentials: %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1300 #, c-format msgid "Renewing proxy for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1312 #, c-format msgid "New proxy expires at %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1315 msgid "Failed to write 'local' information" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1321 msgid "Failed to renew proxy" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1324 msgid "New proxy expiry time is not later than old proxy, not renewing proxy" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1367 #, c-format msgid "Checking file %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1415 msgid "ID contains forbidden characters" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1462 #: src/services/a-rex/job.cpp:781 #, c-format msgid "Failed to create file in %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1487 msgid "Out of tries while allocating new job ID" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1568 #, c-format msgid "Failed to read job's local description for job %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1650 msgid "No non-draining control or session directories available" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1666 #, c-format msgid "Using control directory %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1667 #, c-format msgid "Using session directory %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:26 #, c-format msgid "Failed to read job's ACL for job %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:69 #, c-format msgid "Failed to parse user policy for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:74 #, c-format msgid "Failed to load policy evaluator for policy of job %s" msgstr "" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:126 #, c-format msgid "Unknown ACL policy %s for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:121 msgid "Exiting Generator thread" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:211 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:225 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:238 msgid "DTRGenerator is not running!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:214 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:314 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:324 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:333 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:344 #, c-format msgid "%s: Invalid DTR" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:354 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:366 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:982 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:281 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:304 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:634 #, c-format msgid "%s: Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:377 #, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:383 #, c-format msgid "%s: Cancelling other DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:394 #, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:404 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:645 #, c-format msgid "%s: Failed to read list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:418 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:555 #, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:420 #, c-format msgid "%s: Going through files in list %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:424 #, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:428 #, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:444 #, c-format msgid "%s: Failed to write list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:448 #, c-format msgid "%s: Failed to write list of output status files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:460 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:656 #, c-format msgid "%s: Failed to read list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:479 #, c-format msgid "%s: Failed to write list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:491 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #: src/services/cache_service/CacheServiceGenerator.cpp:108 #, c-format msgid "No active job id %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:543 #, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:567 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:578 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:692 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:817 #, c-format msgid "%s: Failed to clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:576 #, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:588 #, c-format msgid "%s: All %s %s successfully" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:592 #, c-format msgid "%s: Some %s failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:607 #, c-format msgid "%s: Received data staging request to %s files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 #, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:714 #, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:716 #, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:744 #, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:763 #, c-format msgid "%s: Adding new output file %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:785 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:798 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:828 #, c-format msgid "%s: Received job in a bad state: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:901 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:988 #, c-format msgid "%s: Failed writing local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1005 #, c-format msgid "%s: Cancelling active DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1028 #, c-format msgid "%s: Can't read list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1043 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1048 #, c-format msgid "%s: User has uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1055 #, c-format msgid "%s: Failed writing changed input file." msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1059 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1076 #, c-format msgid "%s: Uploadable files timed out" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1132 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1158 #, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1139 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1148 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1170 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1186 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1192 #, c-format msgid "%s: Failed to open file %s for reading" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1200 #, c-format msgid "%s: Error accessing file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1212 #, c-format msgid "%s: Error reading file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1227 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1233 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1245 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1252 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:131 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:225 #, c-format msgid "Bad name for stdout: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:233 #, c-format msgid "Bad name for stderr: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:291 #, c-format msgid "Bad name for runtime environment: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:336 msgid "Job description file could not be read." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:387 msgid "Bad name for executable: " msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:401 #, c-format msgid "Bad name for executable: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:107 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:169 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:171 #, c-format msgid "%s: %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:177 #, c-format msgid "%s: Destroying" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:182 #, c-format msgid "%s: Can't read state - no comments, just cleaning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:194 #, c-format msgid "%s: Cleaning control and session directories" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:200 #, c-format msgid "%s: This job may be still running - canceling" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:203 #, c-format msgid "%s: Cancellation failed (probably job finished) - cleaning anyway" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:210 #, c-format msgid "%s: Cancellation probably succeeded - cleaning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:273 #, c-format msgid "%s: Failed writing list of output files: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:315 #, c-format msgid "%s: Failed creating grami file" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:319 #, c-format msgid "%s: Failed setting executable permissions" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:334 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:336 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:347 #, c-format msgid "%s: Failed running submission process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, c-format msgid "%s: Failed running cancellation process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:355 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:374 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:380 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:388 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:394 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:409 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:417 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:420 #, c-format msgid "%s: Failed to cancel running job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:434 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:447 #, c-format msgid "%s: Failed writing local information: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:457 #, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:463 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:531 #, c-format msgid "%s: State: %s: still in data staging" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:560 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:581 #, c-format msgid "%s: Reprocessing job description failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:588 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:592 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:676 #, c-format msgid "%s: Reading status of new job failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:691 #, c-format msgid "%s: Processing job description failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:708 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:718 #, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:732 #, c-format msgid "%s: State: ACCEPTED" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:739 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:751 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:757 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:774 #, c-format msgid "%s: State: PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:778 #, c-format msgid "%s: Failed obtaining local job information." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:829 #, c-format msgid "%s: State: SUBMIT" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:844 #, c-format msgid "%s: State: CANCELING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:860 #, c-format msgid "%s: State: INLRMS" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:868 #, c-format msgid "%s: Job finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:872 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:893 #, c-format msgid "%s: State: FINISHING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:922 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:961 #, c-format msgid "%s: Can't rerun on request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:963 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:973 #, c-format msgid "%s: Job is too old - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1012 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1041 #, c-format msgid "%s: Canceling job because of user request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1107 #, c-format msgid "%s: Job failure detected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1163 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1171 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1177 #, c-format msgid "%s: Plugin execution failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1202 #, c-format msgid "%s: Delete request due to internal problems" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1284 #, c-format msgid "Failed to move file %s to %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1292 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1375 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1464 #, c-format msgid "Failed reading control directory: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1334 #, c-format msgid "Failed reading control directory: %s: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:153 #, c-format msgid "Invalid checksum in %s for %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:162 #, c-format msgid "Invalid file size in %s for %s " msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:182 #, c-format msgid "Invalid file: %s is too big." msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:196 #, c-format msgid "Error accessing file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:205 #, c-format msgid "Error reading file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:216 #, c-format msgid "File %s has wrong CRC." msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:240 #, c-format msgid "Failed downloading file %s - %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:246 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:132 msgid "Retrying" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:249 #, c-format msgid "Downloaded file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:330 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:243 #, c-format msgid "Wrong number of threads: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:336 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:249 #, c-format msgid "Wrong number of files: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:342 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:358 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:365 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:372 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:379 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:255 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:271 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:278 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:285 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:292 #, c-format msgid "Bad number: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:346 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:352 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:259 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:265 msgid "Specified user can't be handled" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:384 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:297 #, c-format msgid "Unsupported option: %c" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:388 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:301 #, c-format msgid "Missing parameter for option %c" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:392 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:305 msgid "Undefined processing error" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:399 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:312 msgid "Missing job id" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:401 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:314 msgid "Missing control directory" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:403 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:316 msgid "Missing session directory" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:446 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:359 #, c-format msgid "Minimal speed: %llu B/s during %i s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:448 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:361 #, c-format msgid "Minimal average speed: %llu B/s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:450 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:363 #, c-format msgid "Maximal inactivity time: %i s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:455 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:368 msgid "Won't use more than 10 threads" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:462 msgid "Downloader started" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:488 msgid "Can't read list of input files" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:495 #, c-format msgid "Error: duplicate file in list of input files: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:518 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:402 msgid "Can't read list of output files" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:523 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:449 msgid "Can't remove junk files" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:531 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:463 msgid "Can't read job local description" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:553 #, c-format msgid "Local source for download: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:567 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:571 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:512 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:518 #, c-format msgid "Can't accept URL: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:586 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:535 #, c-format msgid "Failed to initiate file transfer: %s - %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:603 #, c-format msgid "Downloaded %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:628 #, c-format msgid "Failed to download (but may be retried) %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:633 #, c-format msgid "Failed to download %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:642 msgid "Some downloads failed" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:646 msgid "Some downloads failed, but may be retried" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:650 msgid "Failed writing changed input file" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:662 #, c-format msgid "Checking user uploadable file: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:668 #, c-format msgid "User has uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:673 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:706 msgid "Failed writing changed input file." msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:677 #, c-format msgid "Critical error for uploadable file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:694 #, c-format msgid "No changes in uploadable files for %u seconds" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:695 msgid "Uploadable files timed out" msgstr "" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:761 #, c-format msgid "Leaving downloader (%i)" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:126 #, c-format msgid "Failed uploading file %s - %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:135 #, c-format msgid "Uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:373 msgid "Uploader started" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:409 #, c-format msgid "Reading output files from user generated list in %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:411 #, c-format msgid "Error reading user generated output file list in %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:424 #, c-format msgid "Two identical output destinations: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:433 #, c-format msgid "Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:496 #, c-format msgid "Local destination for uploader %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:556 #, c-format msgid "Uploaded %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:580 msgid "Failed writing output status file" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:587 #, c-format msgid "Failed to upload (but may be retried) %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:591 #, c-format msgid "Failed to upload %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:600 msgid "Some uploads failed" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:616 #, c-format msgid "Writing back dynamic output file %s" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:618 #, c-format msgid "Failed to rewrite output file list %s. Job resuming may not work" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:630 msgid "Some uploads failed, but (some) may be retried" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:637 msgid "Failed writing changed output file" msgstr "" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:649 #, c-format msgid "Leaving uploader (%i)" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:118 msgid ": Logger name is not specified" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 msgid ": Failure creating slot for reporter child process" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 msgid ": Failure starting reporter child process" msgstr "" #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:130 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:55 #, c-format msgid "%s: Failure creating slot for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:79 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:61 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:123 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:72 #, c-format msgid "%s: Failure starting child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:140 #, c-format msgid "%s: Failed to run plugin" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:143 #, c-format msgid "%s: Plugin failed" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:77 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "" #: src/services/a-rex/information_collector.cpp:45 #, c-format msgid "Resource information provider: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:51 msgid "Resource information provider failed" msgstr "" #: src/services/a-rex/information_collector.cpp:55 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:57 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:61 msgid "No new informational document assigned" msgstr "" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "Obtained XML: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:77 msgid "Informational document is empty" msgstr "" #: src/services/a-rex/information_collector.cpp:88 msgid "Passing service's information from collector to registrator" msgstr "" #: src/services/a-rex/information_collector.cpp:144 #, c-format msgid "" "Registered static information: \n" " doc: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:147 #, c-format msgid "" "Information Registered without static attributes: \n" " doc: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:324 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:327 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:333 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:342 msgid "OptimizedInformationContainer failed to parse XML" msgstr "" #: src/services/a-rex/information_collector.cpp:353 msgid "OptimizedInformationContainer failed to rename temprary file" msgstr "" #: src/services/a-rex/job.cpp:53 #, c-format msgid "Cannot handle local user %s" msgstr "" #: src/services/a-rex/job.cpp:101 #, c-format msgid "%s: Failed to parse user policy" msgstr "" #: src/services/a-rex/job.cpp:106 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "" #: src/services/a-rex/job.cpp:211 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "" #: src/services/a-rex/job.cpp:473 src/services/a-rex/job.cpp:497 #, c-format msgid "Credential expires at %s" msgstr "" #: src/services/a-rex/job.cpp:475 src/services/a-rex/job.cpp:499 #, c-format msgid "Credential handling exception: %s" msgstr "" #: src/services/a-rex/job.cpp:789 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "" #: src/services/a-rex/job.cpp:1006 msgid "No non-draining session dirs available" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:89 #: src/services/a-rex/jura/CARDestination.cpp:49 #: src/services/a-rex/jura/LutsDestination.cpp:71 msgid "ServiceURL missing" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:97 #: src/services/a-rex/jura/CARDestination.cpp:56 #: src/services/a-rex/jura/LutsDestination.cpp:89 #, c-format msgid "Protocol is %s, should be https" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:133 #: src/services/a-rex/jura/ApelDestination.cpp:158 #: src/services/a-rex/jura/CARDestination.cpp:95 #: src/services/a-rex/jura/LutsDestination.cpp:120 #: src/services/a-rex/jura/LutsDestination.cpp:144 #, c-format msgid "Ignoring incomplete log file \"%s\"" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:182 #: src/services/a-rex/jura/CARDestination.cpp:119 #: src/services/a-rex/jura/LutsDestination.cpp:166 #, c-format msgid "Logging UR set of %d URs." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:185 #: src/services/a-rex/jura/CARDestination.cpp:122 #: src/services/a-rex/jura/Destination.cpp:61 #: src/services/a-rex/jura/LutsDestination.cpp:169 #, c-format msgid "UR set dump: %s" msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:271 #: src/services/a-rex/jura/Destination.cpp:96 #, c-format msgid "Backup file (%s) created." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:316 #, c-format msgid "APEL message file (%s) created." msgstr "" #: src/services/a-rex/jura/ApelDestination.cpp:370 #: src/services/a-rex/jura/CARAggregation.cpp:208 #, c-format msgid "system retval: %d" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:73 #, c-format msgid "Aggregation record (%s) not exist, initialize it..." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:77 #, c-format msgid "Aggregation record (%s) initialization successful." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:82 #, c-format msgid "Some error happens during the Aggregation record (%s) initialization." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:88 #, c-format msgid "Aggregation record (%s) read from file successful." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:100 #, c-format msgid "Aggregation record (%s) stored successful." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:103 #, c-format msgid "Some error happens during the Aggregation record (%s) storing." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:156 #, c-format msgid "APEL aggregation message file (%s) created." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:228 #, c-format msgid "year: %s" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:229 #, c-format msgid "moth: %s" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:230 #, c-format msgid "queue: %s" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:238 #: src/services/a-rex/jura/CARAggregation.cpp:404 #, c-format msgid "query: %s" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:241 #, c-format msgid "list size: %d" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:359 #, c-format msgid "XML: %s" msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:361 msgid "UPDATE Aggregation Record called." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:415 #: src/services/a-rex/jura/CARAggregation.cpp:465 msgid "Does not sending empty aggregation/synch message." msgstr "" #: src/services/a-rex/jura/CARAggregation.cpp:548 #, c-format msgid "synch message: %s" msgstr "" #: src/services/a-rex/jura/Destination.cpp:123 #, c-format msgid "Sent jobIDs: (nr. of job(s) %d)" msgstr "" #: src/services/a-rex/jura/Destinations.cpp:27 msgid "Unable to create adapter for the specific reporting destination type" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:92 #, c-format msgid "Insert filter element: <%s,%s>" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:105 #, c-format msgid "Not set filter for this URL (%s)." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:111 #, c-format msgid "Current job's VO name: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:114 #, c-format msgid "VO filter for host: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:204 #: src/services/a-rex/jura/JobLogFile.cpp:698 #, c-format msgid "Read archive file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:209 #: src/services/a-rex/jura/JobLogFile.cpp:703 #, c-format msgid "" "Could not read archive file %s for job log file %s (%s), generating new " "Usage Record" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:298 #: src/services/a-rex/jura/JobLogFile.cpp:827 #, c-format msgid "" "Missing required Usage Record element \"RecordIdentity\", in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:345 #, c-format msgid "VO (%s) not set for this (%s) SGAS server by VO filter." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:378 #, c-format msgid "[VO filter] Job log will be not send. %s." msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:454 #: src/services/a-rex/jura/JobLogFile.cpp:970 #, c-format msgid "Missing required element \"Status\" in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:663 #: src/services/a-rex/jura/JobLogFile.cpp:1280 #, c-format msgid "Failed to create archive directory %s: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:670 #: src/services/a-rex/jura/JobLogFile.cpp:1287 #, c-format msgid "Archiving Usage Record to file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:676 #: src/services/a-rex/jura/JobLogFile.cpp:1293 #, c-format msgid "Failed to write file %s: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1062 #, c-format msgid "Missing required element \"CpuDuration\" in job log file %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1082 #, c-format msgid "Set non standard bechmark type: %s" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1095 #, c-format msgid "Ignored incoming benchmark value: %s, Use float value!" msgstr "" #: src/services/a-rex/jura/JobLogFile.cpp:1324 #, c-format msgid "Failed to delete file %s:%s" msgstr "" #: src/services/a-rex/jura/LutsDestination.cpp:223 #, c-format msgid "UsageRecords registration response: %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:53 #, c-format msgid "Initialised, archived job log dir: %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:73 #, c-format msgid "Incoming time range: %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:92 #, c-format msgid "Requested time range: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d " msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:98 #: src/services/a-rex/jura/UsageReporter.cpp:45 msgid "Interactive mode." msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:127 #: src/services/a-rex/jura/UsageReporter.cpp:68 #, c-format msgid "Could not open log directory \"%s\": %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:167 #: src/services/a-rex/jura/UsageReporter.cpp:193 #, c-format msgid "Error reading log directory \"%s\": %s" msgstr "" #: src/services/a-rex/jura/ReReporter.cpp:179 #: src/services/a-rex/jura/UsageReporter.cpp:205 #, c-format msgid "Finished, job log dir: %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:39 #, c-format msgid "Initialised, job log dir: %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:41 #, c-format msgid "Expiration time: %d seconds" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:80 #, c-format msgid "Could not open output directory \"%s\": %s" msgstr "" #: src/services/a-rex/jura/UsageReporter.cpp:125 #, c-format msgid "Removing outdated job log file %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:37 #, c-format msgid "" "MigrateActivity: request = \n" "%s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:42 msgid "MigrateActivitys: no ActivityIdentifier found" msgstr "" #: src/services/a-rex/migrate_activity.cpp:51 msgid "MigrateActivity: EPR contains no JobID" msgstr "" #: src/services/a-rex/migrate_activity.cpp:69 msgid "MigrateActivity: Failed to accept delegation" msgstr "" #: src/services/a-rex/migrate_activity.cpp:130 msgid "MigrateActivity: no job description found" msgstr "" #: src/services/a-rex/migrate_activity.cpp:153 #, c-format msgid "Migration XML sent to AREXJob: %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:180 #, c-format msgid "MigrateActivity: Failed to migrate new job: %s" msgstr "" #: src/services/a-rex/migrate_activity.cpp:182 msgid "MigrateActivity: Failed to migrate new job" msgstr "" #: src/services/a-rex/migrate_activity.cpp:198 msgid "MigrateActivity finished successfully" msgstr "" #: src/services/a-rex/migrate_activity.cpp:202 #, c-format msgid "" "MigrateActivity: response = \n" "%s" msgstr "" #: src/services/a-rex/put.cpp:37 #, c-format msgid "Put: there is no job: %s - %s" msgstr "" #: src/services/a-rex/put.cpp:43 #, c-format msgid "Put: there is no payload for file %s in job: %s" msgstr "" #: src/services/a-rex/put.cpp:56 #, c-format msgid "Put: unrecognized payload for file %s in job: %s" msgstr "" #: src/services/a-rex/put.cpp:76 src/services/a-rex/put.cpp:130 #, c-format msgid "Put: failed to create file %s for job %s - %s" msgstr "" #: src/services/a-rex/put.cpp:85 #, c-format msgid "Put: failed to set position of file %s for job %s to %Lu - %s" msgstr "" #: src/services/a-rex/put.cpp:91 #, c-format msgid "Put: failed to allocate memory for file %s in job %s" msgstr "" #: src/services/a-rex/put.cpp:103 #, c-format msgid "Put: failed to write to file %s for job %s - %s" msgstr "" #: src/services/a-rex/terminate_activities.cpp:29 #, c-format msgid "" "TerminateActivities: request = \n" "%s" msgstr "" #: src/services/a-rex/terminate_activities.cpp:40 msgid "TerminateActivities: non-AREX job requested" msgstr "" #: src/services/a-rex/terminate_activities.cpp:49 #, c-format msgid "TerminateActivities: job %s - %s" msgstr "" #: src/services/a-rex/terminate_activities.cpp:69 #, c-format msgid "" "TerminateActivities: response = \n" "%s" msgstr "" #: src/services/a-rex/test.cpp:34 src/tests/count/test_service.cpp:25 #: src/tests/echo/test.cpp:23 src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "" #: src/services/a-rex/test.cpp:37 src/tests/count/test_service.cpp:28 #: src/tests/echo/test.cpp:26 src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "" #: src/services/a-rex/test.cpp:43 src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:23 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "" #: src/services/a-rex/test.cpp:49 src/tests/count/test_client.cpp:53 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "" #: src/services/a-rex/test.cpp:53 src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "" #: src/services/a-rex/test.cpp:56 src/tests/count/test_client.cpp:60 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "" #: src/services/a-rex/test.cpp:112 src/services/a-rex/test.cpp:191 #: src/services/a-rex/test.cpp:248 src/services/a-rex/test.cpp:296 #: src/services/a-rex/test.cpp:344 src/services/a-rex/test.cpp:392 #: src/tests/count/test_client.cpp:87 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "" #: src/services/a-rex/test.cpp:115 src/services/a-rex/test.cpp:194 #: src/services/a-rex/test.cpp:251 src/services/a-rex/test.cpp:299 #: src/services/a-rex/test.cpp:347 src/services/a-rex/test.cpp:395 #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "" #: src/services/a-rex/test.cpp:117 src/services/a-rex/test.cpp:196 #: src/services/a-rex/test.cpp:253 src/services/a-rex/test.cpp:301 #: src/services/a-rex/test.cpp:349 src/services/a-rex/test.cpp:397 #: src/tests/count/test_client.cpp:93 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "" #: src/services/a-rex/test.cpp:125 src/services/a-rex/test.cpp:204 #: src/services/a-rex/test.cpp:261 src/services/a-rex/test.cpp:309 #: src/services/a-rex/test.cpp:357 src/services/a-rex/test.cpp:405 #: src/tests/count/test_client.cpp:100 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "" #: src/services/a-rex/test.cpp:136 msgid "Response is not expected WS-RP" msgstr "" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" #: src/services/cache_service/CacheService.cpp:52 msgid "No A-REX config file found in cache service configuration" msgstr "" #: src/services/cache_service/CacheService.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "" #: src/services/cache_service/CacheService.cpp:60 #, c-format msgid "Failed to process A-REX configuration in %s" msgstr "" #: src/services/cache_service/CacheService.cpp:65 msgid "No caches defined in configuration" msgstr "" #: src/services/cache_service/CacheService.cpp:139 msgid "Empty filename returned from FileCache" msgstr "" #: src/services/cache_service/CacheService.cpp:151 #, c-format msgid "Problem accessing cache file %s: %s" msgstr "" #: src/services/cache_service/CacheService.cpp:200 #: src/services/cache_service/CacheService.cpp:472 msgid "No job ID supplied" msgstr "" #: src/services/cache_service/CacheService.cpp:209 #, c-format msgid "Bad number in priority element: %s" msgstr "" #: src/services/cache_service/CacheService.cpp:218 msgid "No username supplied" msgstr "" #: src/services/cache_service/CacheService.cpp:225 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" #: src/services/cache_service/CacheService.cpp:239 msgid "No session directory found" msgstr "" #: src/services/cache_service/CacheService.cpp:243 #, c-format msgid "Using session dir %s" msgstr "" #: src/services/cache_service/CacheService.cpp:247 #, c-format msgid "Failed to stat session dir %s" msgstr "" #: src/services/cache_service/CacheService.cpp:252 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "" #: src/services/cache_service/CacheService.cpp:279 #, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "" #: src/services/cache_service/CacheService.cpp:297 #, c-format msgid "DN is %s" msgstr "" #: src/services/cache_service/CacheService.cpp:373 #, c-format msgid "Permission checking passed for url %s" msgstr "" #: src/services/cache_service/CacheService.cpp:398 #: src/services/cache_service/CacheServiceGenerator.cpp:138 #, c-format msgid "Failed to move %s to %s: %s" msgstr "" #: src/services/cache_service/CacheService.cpp:437 #, c-format msgid "Starting new DTR for %s" msgstr "" #: src/services/cache_service/CacheService.cpp:439 #, c-format msgid "Failed to start new DTR for %s" msgstr "" #: src/services/cache_service/CacheService.cpp:486 #, c-format msgid "Job %s: all files downloaded successfully" msgstr "" #: src/services/cache_service/CacheService.cpp:495 #, c-format msgid "Job %s: Some downloads failed" msgstr "" #: src/services/cache_service/CacheService.cpp:501 #, c-format msgid "Job %s: files still downloading" msgstr "" #: src/services/cache_service/CacheService.cpp:514 msgid "CacheService: Unauthorized" msgstr "" #: src/services/cache_service/CacheService.cpp:523 msgid "No local user mapping found" msgstr "" #: src/services/cache_service/CacheService.cpp:530 #: src/services/data-staging/DataDeliveryService.cpp:631 #, c-format msgid "Identity is %s" msgstr "" #: src/services/cache_service/CacheService.cpp:595 msgid "Only POST is supported in CacheService" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:127 #, c-format msgid "Could not determine session directory from filename %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:168 #, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:210 #, c-format msgid "DTRs still running for job %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:219 #, c-format msgid "All DTRs finished for job %s" msgstr "" #: src/services/cache_service/CacheServiceGenerator.cpp:226 #, c-format msgid "Job %s not found" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:58 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:62 #, c-format msgid "Archiving DTR %s, state %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:166 msgid "No delegation token in request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "Failed to accept delegation" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:203 #: src/services/data-staging/DataDeliveryService.cpp:210 msgid "ErrorDescription" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:215 #, c-format msgid "All %u process slots used" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:230 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:237 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Storing temp proxy at %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:256 #, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:263 #, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:289 msgid "Invalid DTR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:294 #, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:395 #, c-format msgid "No such DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:406 #, c-format msgid "DTR %s failed: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:417 #, c-format msgid "DTR %s finished successfully" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:427 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:488 #, c-format msgid "No active DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:498 #, c-format msgid "DTR %s was already cancelled" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:507 #, c-format msgid "DTR %s could not be cancelled" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:551 #, c-format msgid "Failed to get load average: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:575 msgid "Invalid configuration - no allowed IP address specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:579 msgid "Invalid configuration - no allowed dirs specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:590 msgid "Failed to start archival thread" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:615 msgid "Shutting down data delivery service" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:624 msgid "Unauthorized" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:710 msgid "Only POST is supported in DataDeliveryService" msgstr "" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:91 msgid "EchoService (python) 'Process' called" msgstr "" #: src/services/examples/echo_python/EchoService.py:95 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:96 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:97 #, python-format msgid "EchoService (python) got: %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:102 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:108 #: src/services/examples/echo_python/EchoService.py:177 #, python-format msgid "outpayload %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:137 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:140 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:146 #: src/services/examples/echo_python/EchoService.py:161 #, python-format msgid "new_payload %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:155 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "" #: src/services/examples/echo_python/EchoService.py:171 msgid "Start waiting 10 sec..." msgstr "" #: src/services/examples/echo_python/EchoService.py:173 msgid "Waiting ends." msgstr "" #: src/services/gridftpd/auth/auth.cpp:312 #, c-format msgid "Unknown authorization command %s" msgstr "" #: src/services/gridftpd/auth/auth.cpp:330 #, c-format msgid "" "The [vo] section labeled '%s' has no file associated and can't be used for " "matching" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:56 #, c-format msgid "Connecting to %s:%i" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:57 #, c-format msgid "Querying at %s" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:62 #, c-format msgid "Failed to query LDAP server %s" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:69 #, c-format msgid "Failed to get results from LDAP server %s" msgstr "" #: src/services/gridftpd/auth/auth_ldap.cpp:83 msgid "LDAP authorization is not supported" msgstr "" #: src/services/gridftpd/auth/auth_plugin.cpp:73 #: src/services/gridftpd/auth/unixmap.cpp:254 #, c-format msgid "Plugin %s failed to run" msgstr "" #: src/services/gridftpd/auth/auth_plugin.cpp:75 #: src/services/gridftpd/auth/unixmap.cpp:256 #, c-format msgid "Plugin %s printed: %u" msgstr "" #: src/services/gridftpd/auth/auth_plugin.cpp:76 #: src/services/gridftpd/auth/unixmap.cpp:257 #, c-format msgid "Plugin %s error: %u" msgstr "" #: src/services/gridftpd/auth/auth_voms.cpp:28 #, c-format msgid "VOMS proxy processing returns: %i - %s" msgstr "" #: src/services/gridftpd/auth/auth_voms.cpp:120 #, c-format msgid "VOMS trust chains: %s" msgstr "" #: src/services/gridftpd/commands.cpp:46 #, c-format msgid "response: %s" msgstr "" #: src/services/gridftpd/commands.cpp:50 #, c-format msgid "Send response failed: %s" msgstr "" #: src/services/gridftpd/commands.cpp:80 msgid "Response sending error" msgstr "" #: src/services/gridftpd/commands.cpp:93 msgid "Closed connection" msgstr "" #: src/services/gridftpd/commands.cpp:131 #, c-format msgid "Socket conversion failed: %s" msgstr "" #: src/services/gridftpd/commands.cpp:141 #, c-format msgid "Failed to obtain own address: %s" msgstr "" #: src/services/gridftpd/commands.cpp:149 #, c-format msgid "Failed to recognize own address type (IPv4 ir IPv6) - %u" msgstr "" #: src/services/gridftpd/commands.cpp:159 #, c-format msgid "Accepted connection on [%s]:%u" msgstr "" #: src/services/gridftpd/commands.cpp:161 #, c-format msgid "Accepted connection on %u.%u.%u.%u:%u" msgstr "" #: src/services/gridftpd/commands.cpp:196 msgid "Accept failed" msgstr "" #: src/services/gridftpd/commands.cpp:204 #: src/services/gridftpd/listener.cpp:415 #, c-format msgid "Accept failed: %s" msgstr "" #: src/services/gridftpd/commands.cpp:219 #, c-format msgid "Accepted connection from [%s]:%u" msgstr "" #: src/services/gridftpd/commands.cpp:221 #, c-format msgid "Accepted connection from %u.%u.%u.%u:%u" msgstr "" #: src/services/gridftpd/commands.cpp:230 msgid "Authenticate in commands failed" msgstr "" #: src/services/gridftpd/commands.cpp:239 msgid "Authentication failure" msgstr "" #: src/services/gridftpd/commands.cpp:247 #, c-format msgid "User subject: %s" msgstr "" #: src/services/gridftpd/commands.cpp:248 #, c-format msgid "Encrypted: %s" msgstr "" #: src/services/gridftpd/commands.cpp:254 msgid "User has no proper configuration associated" msgstr "" #: src/services/gridftpd/commands.cpp:262 msgid "" "User has empty virtual directory tree.\n" "Either user has no authorised plugins or there are no plugins configured at " "all." msgstr "" #: src/services/gridftpd/commands.cpp:279 msgid "Read commands in authenticate failed" msgstr "" #: src/services/gridftpd/commands.cpp:410 msgid "Control connection (probably) closed" msgstr "" #: src/services/gridftpd/commands.cpp:444 #: src/services/gridftpd/commands.cpp:723 msgid "Command EPRT" msgstr "" #: src/services/gridftpd/commands.cpp:445 #, c-format msgid "Failed to parse remote addres %s" msgstr "" #: src/services/gridftpd/commands.cpp:467 #, c-format msgid "Command USER %s" msgstr "" #: src/services/gridftpd/commands.cpp:474 msgid "Command CDUP" msgstr "" #: src/services/gridftpd/commands.cpp:480 #, c-format msgid "Command CWD %s" msgstr "" #: src/services/gridftpd/commands.cpp:496 #, c-format msgid "Command MKD %s" msgstr "" #: src/services/gridftpd/commands.cpp:516 #, c-format msgid "Command SIZE %s" msgstr "" #: src/services/gridftpd/commands.cpp:531 #, c-format msgid "Command SBUF: %i" msgstr "" #: src/services/gridftpd/commands.cpp:552 #, c-format msgid "Command MLST %s" msgstr "" #: src/services/gridftpd/commands.cpp:575 #, c-format msgid "Command DELE %s" msgstr "" #: src/services/gridftpd/commands.cpp:590 #, c-format msgid "Command RMD %s" msgstr "" #: src/services/gridftpd/commands.cpp:604 #, c-format msgid "Command TYPE %c" msgstr "" #: src/services/gridftpd/commands.cpp:615 #, c-format msgid "Command MODE %c" msgstr "" #: src/services/gridftpd/commands.cpp:627 msgid "Command ABOR" msgstr "" #: src/services/gridftpd/commands.cpp:640 #, c-format msgid "Command REST %s" msgstr "" #: src/services/gridftpd/commands.cpp:653 #, c-format msgid "Command EPSV %s" msgstr "" #: src/services/gridftpd/commands.cpp:655 msgid "Command SPAS" msgstr "" #: src/services/gridftpd/commands.cpp:657 msgid "Command PASV" msgstr "" #: src/services/gridftpd/commands.cpp:678 msgid "local_pasv failed" msgstr "" #: src/services/gridftpd/commands.cpp:702 msgid "local_spas failed" msgstr "" #: src/services/gridftpd/commands.cpp:725 msgid "Command PORT" msgstr "" #: src/services/gridftpd/commands.cpp:728 msgid "active_data is disabled" msgstr "" #: src/services/gridftpd/commands.cpp:737 msgid "local_port failed" msgstr "" #: src/services/gridftpd/commands.cpp:750 #, c-format msgid "Command MLSD %s" msgstr "" #: src/services/gridftpd/commands.cpp:752 #, c-format msgid "Command NLST %s" msgstr "" #: src/services/gridftpd/commands.cpp:754 #, c-format msgid "Command LIST %s" msgstr "" #: src/services/gridftpd/commands.cpp:805 #, c-format msgid "Command ERET %s" msgstr "" #: src/services/gridftpd/commands.cpp:835 #, c-format msgid "Command RETR %s" msgstr "" #: src/services/gridftpd/commands.cpp:864 #, c-format msgid "Command STOR %s" msgstr "" #: src/services/gridftpd/commands.cpp:892 #, c-format msgid "Command ALLO %i" msgstr "" #: src/services/gridftpd/commands.cpp:915 msgid "Command OPTS" msgstr "" #: src/services/gridftpd/commands.cpp:918 msgid "Command OPTS RETR" msgstr "" #: src/services/gridftpd/commands.cpp:928 #, c-format msgid "Option: %s" msgstr "" #: src/services/gridftpd/commands.cpp:972 msgid "Command NOOP" msgstr "" #: src/services/gridftpd/commands.cpp:976 msgid "Command QUIT" msgstr "" #: src/services/gridftpd/commands.cpp:986 msgid "Failed to close, deleting client" msgstr "" #: src/services/gridftpd/commands.cpp:1000 #, c-format msgid "Command DCAU: %i '%s'" msgstr "" #: src/services/gridftpd/commands.cpp:1028 #, c-format msgid "Command PBZS: %s" msgstr "" #: src/services/gridftpd/commands.cpp:1036 #, c-format msgid "Setting pbsz to %lu" msgstr "" #: src/services/gridftpd/commands.cpp:1052 #, c-format msgid "Command PROT: %s" msgstr "" #: src/services/gridftpd/commands.cpp:1077 #, c-format msgid "Command MDTM %s" msgstr "" #: src/services/gridftpd/commands.cpp:1099 #, c-format msgid "Raw command: %s" msgstr "" #: src/services/gridftpd/commands.cpp:1147 msgid "Failed to allocate memory for buffer" msgstr "" #: src/services/gridftpd/commands.cpp:1154 #, c-format msgid "Allocated %u buffers %llu bytes each." msgstr "" #: src/services/gridftpd/commands.cpp:1161 msgid "abort_callback: start" msgstr "" #: src/services/gridftpd/commands.cpp:1164 #, c-format msgid "abort_callback: Globus error: %s" msgstr "" #: src/services/gridftpd/commands.cpp:1178 msgid "make_abort: start" msgstr "" #: src/services/gridftpd/commands.cpp:1190 msgid "Failed to abort data connection - ignoring and recovering" msgstr "" #: src/services/gridftpd/commands.cpp:1198 msgid "make_abort: wait for abort flag to be reset" msgstr "" #: src/services/gridftpd/commands.cpp:1208 msgid "make_abort: leaving" msgstr "" #: src/services/gridftpd/commands.cpp:1223 msgid "check_abort: have Globus error" msgstr "" #: src/services/gridftpd/commands.cpp:1224 msgid "Abort request caused by transfer error" msgstr "" #: src/services/gridftpd/commands.cpp:1227 msgid "check_abort: sending 426" msgstr "" #: src/services/gridftpd/commands.cpp:1248 msgid "Abort request caused by error in transfer function" msgstr "" #: src/services/gridftpd/commands.cpp:1330 msgid "Failed to start timer thread - timeout won't work" msgstr "" #: src/services/gridftpd/commands.cpp:1382 msgid "Killing connection due to timeout" msgstr "" #: src/services/gridftpd/conf/conf_vo.cpp:25 #: src/services/gridftpd/conf/conf_vo.cpp:51 #: src/services/gridftpd/conf/conf_vo.cpp:69 #: src/services/gridftpd/conf/conf_vo.cpp:81 msgid "" "Configuration section [vo] is missing name. Check for presence of name= or " "vo= option." msgstr "" #: src/services/gridftpd/conf/daemon.cpp:60 #: src/services/gridftpd/conf/daemon.cpp:183 #, c-format msgid "No such user: %s" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:72 #: src/services/gridftpd/conf/daemon.cpp:195 #, c-format msgid "No such group: %s" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:85 #: src/services/gridftpd/conf/daemon.cpp:208 #, c-format msgid "Improper debug level '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:127 msgid "Missing option for command daemon" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:132 msgid "Wrong option in daemon" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:142 #, c-format msgid "Improper size of log '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:151 #, c-format msgid "Improper number of logs '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:157 #, c-format msgid "Improper argument for logsize '%s'" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:164 msgid "Missing option for command logreopen" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:169 msgid "Wrong option in logreopen" msgstr "" #: src/services/gridftpd/conf/daemon.cpp:253 #, c-format msgid "Failed to open log file %s" msgstr "" #: src/services/gridftpd/conf/environment.cpp:175 msgid "" "Central configuration file is missing at guessed location:\n" " /etc/arc.conf\n" "Use ARC_CONFIG variable for non-standard location" msgstr "" #: src/services/gridftpd/datalist.cpp:101 msgid "Closing channel (list)" msgstr "" #: src/services/gridftpd/datalist.cpp:157 msgid "Data channel connected (list)" msgstr "" #: src/services/gridftpd/dataread.cpp:24 msgid "data_connect_retrieve_callback" msgstr "" #: src/services/gridftpd/dataread.cpp:30 msgid "Data channel connected (retrieve)" msgstr "" #: src/services/gridftpd/dataread.cpp:37 msgid "data_connect_retrieve_callback: allocate_data_buffer" msgstr "" #: src/services/gridftpd/dataread.cpp:40 msgid "data_connect_retrieve_callback: allocate_data_buffer failed" msgstr "" #: src/services/gridftpd/dataread.cpp:48 #, c-format msgid "data_connect_retrieve_callback: check for buffer %u" msgstr "" #: src/services/gridftpd/dataread.cpp:61 #, c-format msgid "Closing channel (retrieve) due to local read error :%s" msgstr "" #: src/services/gridftpd/dataread.cpp:75 #: src/services/gridftpd/dataread.cpp:172 msgid "Buffer registration failed" msgstr "" #: src/services/gridftpd/dataread.cpp:88 msgid "data_retrieve_callback" msgstr "" #: src/services/gridftpd/dataread.cpp:96 #, c-format msgid "Data channel (retrieve) %i %i %i" msgstr "" #: src/services/gridftpd/dataread.cpp:104 msgid "Closing channel (retrieve)" msgstr "" #: src/services/gridftpd/dataread.cpp:110 #: src/services/gridftpd/datawrite.cpp:128 #, c-format msgid "Time spent waiting for network: %.3f ms" msgstr "" #: src/services/gridftpd/dataread.cpp:111 #: src/services/gridftpd/datawrite.cpp:129 #, c-format msgid "Time spent waiting for disc: %.3f ms" msgstr "" #: src/services/gridftpd/dataread.cpp:122 msgid "data_retrieve_callback: lost buffer" msgstr "" #: src/services/gridftpd/dataread.cpp:158 #, c-format msgid "Closing channel (retrieve) due to local read error: %s" msgstr "" #: src/services/gridftpd/datawrite.cpp:24 msgid "data_connect_store_callback" msgstr "" #: src/services/gridftpd/datawrite.cpp:30 msgid "Data channel connected (store)" msgstr "" #: src/services/gridftpd/datawrite.cpp:57 msgid "Failed to register any buffer" msgstr "" #: src/services/gridftpd/datawrite.cpp:76 #, c-format msgid "Data channel (store) %i %i %i" msgstr "" #: src/services/gridftpd/datawrite.cpp:89 msgid "data_store_callback: lost buffer" msgstr "" #: src/services/gridftpd/datawrite.cpp:105 #, c-format msgid "Closing channel (store) due to error: %s" msgstr "" #: src/services/gridftpd/datawrite.cpp:115 msgid "Closing channel (store)" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:55 msgid "Can't parse access rights in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:61 msgid "Can't parse user:group in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:68 msgid "Can't recognize user in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:77 msgid "Can't recognize group in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:84 #: src/services/gridftpd/fileplugin/fileplugin.cpp:89 msgid "Can't parse or:and in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:116 msgid "Can't parse configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:120 #, c-format msgid "Bad directory name: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:137 msgid "Can't parse create arguments in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:146 msgid "Can't parse mkdir arguments in configuration line" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:163 #, c-format msgid "Bad subcommand in configuration line: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:175 msgid "Bad mount directory specified" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:177 #, c-format msgid "Mount point %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:215 #: src/services/gridftpd/fileplugin/fileplugin.cpp:273 #, c-format msgid "mkdir failed: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:226 #, c-format msgid "Warning: mount point %s creation failed." msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:329 #, c-format msgid "plugin: open: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:377 #: src/services/gridftpd/fileplugin/fileplugin.cpp:414 msgid "Not enough space to store file" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:428 #, c-format msgid "open: changing owner for %s, %i, %i" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:435 #, c-format msgid "open: owner: %i %i" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:444 #: src/services/gridftpd/fileplugin/fileplugin.cpp:484 #, c-format msgid "Unknown open mode %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:449 msgid "plugin: close" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:490 msgid "plugin: read" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:496 msgid "Error while reading file" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:506 msgid "plugin: write" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:517 msgid "Zero bytes written to file" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:725 #, c-format msgid "plugin: checkdir: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:728 #, c-format msgid "plugin: checkdir: access: %s" msgstr "" #: src/services/gridftpd/fileplugin/fileplugin.cpp:737 #, c-format msgid "plugin: checkdir: access: allowed: %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:14 #, c-format msgid "No plugin is configured or authorised for requested path %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:19 msgid "FilePlugin: more unload than load" msgstr "" #: src/services/gridftpd/fileroot.cpp:34 #, c-format msgid "Can't load plugin %s for access point %s" msgstr "" #: src/services/gridftpd/fileroot.cpp:39 src/services/gridftpd/fileroot.cpp:43 #, c-format msgid "Plugin %s for access point %s is broken." msgstr "" #: src/services/gridftpd/fileroot.cpp:47 #, c-format msgid "Plugin %s for access point %s acquire failed (should never happen)." msgstr "" #: src/services/gridftpd/fileroot.cpp:54 #, c-format msgid "Destructor with dlclose (%s)" msgstr "" #: src/services/gridftpd/fileroot.cpp:77 #, c-format msgid "FileNode: operator= (%s <- %s) %lu <- %lu" msgstr "" #: src/services/gridftpd/fileroot.cpp:79 msgid "Copying with dlclose" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:32 #: src/services/gridftpd/fileroot_config.cpp:596 msgid "configuration file not found" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:51 msgid "Wrong port number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:60 msgid "Wrong maxconnections number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:69 msgid "Wrong defaultbuffer number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:78 msgid "Wrong maxbuffer number in configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:110 #: src/services/gridftpd/fileroot_config.cpp:118 #, c-format msgid "Can't resolve host %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:152 #: src/services/gridftpd/fileroot_config.cpp:455 #, c-format msgid "couldn't open file %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:167 #: src/services/gridftpd/fileroot_config.cpp:183 #: src/services/gridftpd/fileroot_config.cpp:469 #, c-format msgid "improper attribute for encryption command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:177 #: src/services/gridftpd/fileroot_config.cpp:479 #: src/services/gridftpd/fileroot_config.cpp:622 msgid "unknown (non-gridmap) user is not allowed" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:207 #: src/services/gridftpd/fileroot_config.cpp:547 #, c-format msgid "Failed processing authorization group %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:216 msgid "couldn't process VO configuration" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:223 #: src/services/gridftpd/fileroot_config.cpp:231 #: src/services/gridftpd/fileroot_config.cpp:239 #: src/services/gridftpd/fileroot_config.cpp:500 #: src/services/gridftpd/fileroot_config.cpp:508 #: src/services/gridftpd/fileroot_config.cpp:516 #, c-format msgid "failed while processing configuration command: %s %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:281 #, c-format msgid "can't parse configuration line: %s %s %s %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:286 #, c-format msgid "bad directory in plugin command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:298 #: src/services/gridftpd/fileroot_config.cpp:405 #, c-format msgid "Already have directory: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:307 #: src/services/gridftpd/fileroot_config.cpp:408 #, c-format msgid "Registering directory: %s with plugin: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:312 #: src/services/gridftpd/fileroot_config.cpp:421 #, c-format msgid "file node creation failed: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:330 #, c-format msgid "improper attribute for allowactvedata command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:335 #, c-format msgid "unsupported configuration command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:359 msgid "Could not determine hostname from gethostname()" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:375 msgid "unnamed group" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:384 msgid "undefined plugin" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:388 msgid "undefined virtual plugin path" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:393 #, c-format msgid "bad directory for plugin: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:485 #, c-format msgid "improper attribute for allowunknown command: %s" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:607 msgid "failed to process client identification" msgstr "" #: src/services/gridftpd/fileroot_config.cpp:641 #, c-format msgid "Registering dummy directory: %s" msgstr "" #: src/services/gridftpd/listener.cpp:57 #: src/services/gridftpd/listener.cpp:466 msgid "Activation failed" msgstr "" #: src/services/gridftpd/listener.cpp:66 #: src/services/gridftpd/listener.cpp:172 msgid "Child exited" msgstr "" #: src/services/gridftpd/listener.cpp:78 msgid "Globus connection error" msgstr "" #: src/services/gridftpd/listener.cpp:80 #: src/services/gridftpd/listener.cpp:424 msgid "New connection" msgstr "" #: src/services/gridftpd/listener.cpp:87 msgid "Server stopped" msgstr "" #: src/services/gridftpd/listener.cpp:157 msgid "Error: failed to set handler for SIGTERM" msgstr "" #: src/services/gridftpd/listener.cpp:161 msgid "Starting controlled process" msgstr "" #: src/services/gridftpd/listener.cpp:164 msgid "fork failed" msgstr "" #: src/services/gridftpd/listener.cpp:169 msgid "wait failed - killing child" msgstr "" #: src/services/gridftpd/listener.cpp:174 msgid "Killed with signal: " msgstr "" #: src/services/gridftpd/listener.cpp:176 msgid "Restarting after segmentation violation." msgstr "" #: src/services/gridftpd/listener.cpp:177 msgid "Waiting 1 minute" msgstr "" #: src/services/gridftpd/listener.cpp:239 msgid "Error: failed to set handler for SIGCHLD" msgstr "" #: src/services/gridftpd/listener.cpp:256 msgid "Missing argument" msgstr "" #: src/services/gridftpd/listener.cpp:257 msgid "Unknown option" msgstr "" #: src/services/gridftpd/listener.cpp:264 msgid "Wrong port number" msgstr "" #: src/services/gridftpd/listener.cpp:274 msgid "Wrong number of connections" msgstr "" #: src/services/gridftpd/listener.cpp:281 msgid "Wrong buffer size" msgstr "" #: src/services/gridftpd/listener.cpp:288 msgid "Wrong maximal buffer size" msgstr "" #: src/services/gridftpd/listener.cpp:300 msgid "Failed reading configuration" msgstr "" #: src/services/gridftpd/listener.cpp:331 #, c-format msgid "Failed to obtain local address: %s" msgstr "" #: src/services/gridftpd/listener.cpp:338 #, c-format msgid "Failed to create socket(%s): %s" msgstr "" #: src/services/gridftpd/listener.cpp:352 #, c-format msgid "Failed to limit socket to IPv6: %s" msgstr "" #: src/services/gridftpd/listener.cpp:359 #, c-format msgid "Failed to bind socket(%s): %s" msgstr "" #: src/services/gridftpd/listener.cpp:364 #, c-format msgid "Failed to listen on socket(%s): %s" msgstr "" #: src/services/gridftpd/listener.cpp:371 msgid "Not listening to anything" msgstr "" #: src/services/gridftpd/listener.cpp:374 #, c-format msgid "Some addresses failed. Listening on %u of %u." msgstr "" #: src/services/gridftpd/listener.cpp:382 #: src/services/gridftpd/listener.cpp:477 msgid "Listen started" msgstr "" #: src/services/gridftpd/listener.cpp:395 msgid "No valid handles left for listening" msgstr "" #: src/services/gridftpd/listener.cpp:401 #, c-format msgid "Select failed: %s" msgstr "" #: src/services/gridftpd/listener.cpp:422 #, c-format msgid "Have connections: %i, max: %i" msgstr "" #: src/services/gridftpd/listener.cpp:427 #, c-format msgid "Fork failed: %s" msgstr "" #: src/services/gridftpd/listener.cpp:445 msgid "Refusing connection: Connection limit exceeded" msgstr "" #: src/services/gridftpd/listener.cpp:471 msgid "Init failed" msgstr "" #: src/services/gridftpd/listener.cpp:474 msgid "Listen failed" msgstr "" #: src/services/gridftpd/listener.cpp:488 msgid "Listen finished" msgstr "" #: src/services/gridftpd/listener.cpp:493 msgid "Stopping server" msgstr "" #: src/services/gridftpd/listener.cpp:497 msgid "Destroying handle" msgstr "" #: src/services/gridftpd/listener.cpp:500 msgid "Deactivating modules" msgstr "" #: src/services/gridftpd/listener.cpp:508 msgid "Exiting" msgstr "" #: src/services/gridftpd/misc/ldapquery.cpp:253 #, c-format msgid "%s: %s:%i" msgstr "" #: src/services/gridftpd/misc/ldapquery.cpp:390 #: src/services/gridftpd/misc/ldapquery.cpp:468 #, c-format msgid "%s %s" msgstr "" #: src/services/gridftpd/misc/ldapquery.cpp:394 #, c-format msgid " %s: %s" msgstr "" #: src/services/gridftpd/misc/ldapquery.cpp:396 #, c-format msgid " %s:" msgstr "" #: src/services/gridftpd/userspec.cpp:48 #, c-format msgid "Mapfile is missing at %s" msgstr "" #: src/services/gridftpd/userspec.cpp:89 #: src/services/gridftpd/userspec.cpp:215 msgid "There is no local mapping for user" msgstr "" #: src/services/gridftpd/userspec.cpp:92 #: src/services/gridftpd/userspec.cpp:219 msgid "There is no local name for user" msgstr "" #: src/services/gridftpd/userspec.cpp:142 #: src/services/gridftpd/userspec.cpp:233 msgid "No proxy provided" msgstr "" #: src/services/gridftpd/userspec.cpp:144 #, c-format msgid "Proxy/credentials stored at %s" msgstr "" #: src/services/gridftpd/userspec.cpp:147 #: src/services/gridftpd/userspec.cpp:238 #, c-format msgid "Initially mapped to local user: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:150 #: src/services/gridftpd/userspec.cpp:340 #, c-format msgid "Local user %s does not exist" msgstr "" #: src/services/gridftpd/userspec.cpp:155 #: src/services/gridftpd/userspec.cpp:246 #, c-format msgid "Initially mapped to local group: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:158 #: src/services/gridftpd/userspec.cpp:249 #: src/services/gridftpd/userspec.cpp:349 #, c-format msgid "Local group %s does not exist" msgstr "" #: src/services/gridftpd/userspec.cpp:167 #: src/services/gridftpd/userspec.cpp:258 msgid "Running user has no name" msgstr "" #: src/services/gridftpd/userspec.cpp:170 #: src/services/gridftpd/userspec.cpp:261 #, c-format msgid "Mapped to running user: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:180 #: src/services/gridftpd/userspec.cpp:271 #, c-format msgid "Mapped to local id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:185 #: src/services/gridftpd/userspec.cpp:276 #, c-format msgid "No group %i for mapped user" msgstr "" #: src/services/gridftpd/userspec.cpp:194 #: src/services/gridftpd/userspec.cpp:285 #, c-format msgid "Mapped to local group id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:195 #: src/services/gridftpd/userspec.cpp:286 #, c-format msgid "Mapped to local group name: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:196 #: src/services/gridftpd/userspec.cpp:287 #, c-format msgid "Mapped user's home: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:235 #, c-format msgid "Proxy stored at %s" msgstr "" #: src/services/gridftpd/userspec.cpp:241 msgid "Local user does not exist" msgstr "" #: src/services/gridftpd/userspec.cpp:317 #, c-format msgid "Undefined control sequence: %%%s" msgstr "" #: src/services/gridftpd/userspec.cpp:354 #, c-format msgid "Remapped to local user: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:355 #, c-format msgid "Remapped to local id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:356 #, c-format msgid "Remapped to local group id: %i" msgstr "" #: src/services/gridftpd/userspec.cpp:357 #, c-format msgid "Remapped to local group name: %s" msgstr "" #: src/services/gridftpd/userspec.cpp:358 #, c-format msgid "Remapped user's home: %s" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:33 #, c-format msgid "config: %s, class name: %s" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:42 msgid "libjvm.so not loadable - check your LD_LIBRARY_PATH" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:52 msgid "libjvm.so does not contain the expected symbols" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:66 msgid "JVM started" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:71 #, c-format msgid "There is no service: %s in your Java class search path" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:79 msgid "There is no constructor function" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:86 #, c-format msgid "%s constructed" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:90 msgid "Destroy JVM" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:183 msgid "Cannot find MCC_Status object" msgstr "" #: src/services/wrappers/java/javawrapper.cpp:197 msgid "Java object returned NULL status" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Loading %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:111 #, c-format msgid "Initialized %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:147 msgid "Invalid class name" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:152 #, c-format msgid "class name: %s" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:153 #, c-format msgid "module name: %s" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:210 msgid "Cannot find ARC Config class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:217 msgid "Config class is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:225 msgid "Cannot get dictionary of module" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:234 msgid "Cannot find service class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:243 msgid "Cannot create config argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:250 msgid "Cannot convert config to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:273 #, c-format msgid "%s is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:279 msgid "Message class is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:287 msgid "Python Wrapper constructor succeeded" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:303 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:336 msgid "Python interpreter locked" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:340 msgid "Python interpreter released" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:400 msgid "Python wrapper process called" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:409 msgid "Failed to create input SOAP container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:419 msgid "Cannot create inmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:433 msgid "Cannot find ARC Message class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:439 msgid "Cannot convert inmsg to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:448 msgid "Failed to create SOAP containers" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:454 msgid "Cannot create outmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:460 msgid "Cannot convert outmsg to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:516 msgid "Failed to create XMLNode container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:533 msgid "Cannot find ARC XMLNode class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:539 msgid "Cannot create doc argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:545 msgid "Cannot convert doc to Python object" msgstr "" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:9 msgid "Creating a soap client" msgstr "" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 msgid "SOAP invokation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 msgid "HTTP with SAML2SSO invokation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 msgid "SOAP with SAML2SSO invokation failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:76 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:51 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:56 #: src/tests/delegation/test_delegation_client.cpp:88 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service:%s" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:83 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "" #: src/tests/count/test_service.cpp:33 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "" #: src/tests/echo/test.cpp:32 msgid "Creating client interface" msgstr "" #: src/tests/echo/test_clientinterface.py:27 msgid "SOAP invocation failed" msgstr "" nordugrid-arc-5.4.2/po/PaxHeaders.7502/Makevars0000644000000000000000000000012711220163762017312 xustar000000000000000027 mtime=1245767666.402411 30 atime=1513200658.492729051 30 ctime=1513200668.594852604 nordugrid-arc-5.4.2/po/Makevars0000644000175000002070000000034611220163762017357 0ustar00mockbuildmock00000000000000DOMAIN = $(PACKAGE) subdir = po top_builddir = .. XGETTEXT_OPTIONS = -kmsg:2 -kIString:1 -kistring:1 -kFindNTrans:1,2 COPYRIGHT_HOLDER = NorduGrid collaboration MSGID_BUGS_ADDRESS = support@nordugrid.org EXTRA_LOCALE_CATEGORIES = nordugrid-arc-5.4.2/po/PaxHeaders.7502/ru.po0000644000000000000000000000013213214316032016573 xustar000000000000000030 mtime=1513200666.323824829 30 atime=1513200668.310849131 30 ctime=1513200668.599852665 nordugrid-arc-5.4.2/po/ru.po0000644000175000002070000433222313214316032016652 0ustar00mockbuildmock00000000000000# translation of Arc.po to Russian # Oxana Smirnova , 2007. # Translation file for the Advanced Resource Connector (Arc) msgid "" msgstr "" "Project-Id-Version: Arc\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2017-12-13 22:31+0100\n" "PO-Revision-Date: 2017-09-19 15:16+0200\n" "Last-Translator: Oxana Smirnova \n" "Language-Team: Russian\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Language: ru\n" "X-Generator: Poedit 1.8.7.1\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%" "10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" "X-Poedit-KeywordsList: msg:2;IString:1;istring:1;FindNTrans:1,2\n" "X-Poedit-Basepath: /home/oxana/CVSROOT/ARC1\n" "X-Poedit-SearchPath-0: src\n" #: src/clients/compute/arccat.cpp:35 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresub.cpp:35 #: src/clients/compute/arcresume.cpp:32 src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "[задача ...]" #: src/clients/compute/arccat.cpp:36 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" "Эта команда предназначена для вывода на экран сообщений стандартного\n" "выхода, стандартной ошибки или ошибок системы при исполнении задачи" #: src/clients/compute/arccat.cpp:43 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresub.cpp:40 src/clients/compute/arcresume.cpp:37 #: src/clients/compute/arcstat.cpp:42 src/clients/compute/arcsub.cpp:54 #: src/clients/compute/arcsync.cpp:147 src/clients/compute/arctest.cpp:62 #: src/clients/credentials/arcproxy.cpp:475 #: src/clients/credentials/arcproxyalt.cpp:461 src/clients/data/arccp.cpp:641 #: src/clients/data/arcls.cpp:346 src/clients/data/arcmkdir.cpp:125 #: src/clients/data/arcrename.cpp:136 src/clients/data/arcrm.cpp:151 #: src/clients/echo/arcecho.cpp:61 src/clients/saml/saml_assertion_init.cpp:62 #: src/clients/wsrf/arcwsrf.cpp:74 src/hed/daemon/unix/main_unix.cpp:346 #: src/hed/daemon/win32/main_win32.cpp:148 #: src/services/a-rex/jura/jura.cpp:109 #, c-format msgid "%s version %s" msgstr "%s, версия %s" #: src/clients/compute/arccat.cpp:52 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresub.cpp:49 src/clients/compute/arcresume.cpp:46 #: src/clients/compute/arcstat.cpp:51 src/clients/compute/arcsub.cpp:63 #: src/clients/compute/arcsync.cpp:156 src/clients/compute/arctest.cpp:84 #: src/clients/credentials/arcproxy.cpp:483 #: src/clients/credentials/arcproxyalt.cpp:469 src/clients/data/arccp.cpp:648 #: src/clients/data/arcls.cpp:354 src/clients/data/arcmkdir.cpp:133 #: src/clients/data/arcrename.cpp:144 src/clients/data/arcrm.cpp:160 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:172 #, c-format msgid "Running command: %s" msgstr "Выполняется команда %s" #: src/clients/compute/arccat.cpp:63 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresub.cpp:53 src/clients/compute/arcresume.cpp:50 #: src/clients/compute/arcstat.cpp:62 src/clients/compute/arcsub.cpp:67 #: src/clients/compute/arcsync.cpp:167 src/clients/compute/arctest.cpp:88 #: src/clients/data/arccp.cpp:671 src/clients/data/arcls.cpp:376 #: src/clients/data/arcmkdir.cpp:155 src/clients/data/arcrename.cpp:166 #: src/clients/data/arcrm.cpp:182 src/clients/echo/arcecho.cpp:72 #: src/clients/wsrf/arcwsrf.cpp:101 msgid "Failed configuration initialization" msgstr "Не удалось загрузить настройки" #: src/clients/compute/arccat.cpp:76 src/clients/compute/arcclean.cpp:74 #: src/clients/compute/arcget.cpp:88 src/clients/compute/arckill.cpp:73 #: src/clients/compute/arcrenew.cpp:70 src/clients/compute/arcresub.cpp:81 #: src/clients/compute/arcresume.cpp:70 src/clients/compute/arcstat.cpp:71 #, c-format msgid "Cannot read specified jobid file: %s" msgstr "Не удаётся прочесть указанный файл, содержащий ярлыки задач: %s" #: src/clients/compute/arccat.cpp:87 src/clients/compute/arcclean.cpp:85 #: src/clients/compute/arcget.cpp:99 src/clients/compute/arckill.cpp:84 #: src/clients/compute/arcrenew.cpp:81 src/clients/compute/arcresub.cpp:95 #: src/clients/compute/arcresume.cpp:81 src/clients/compute/arcstat.cpp:105 msgid "No jobs given" msgstr "Задачи не указаны" #: src/clients/compute/arccat.cpp:100 src/clients/compute/arcclean.cpp:98 #: src/clients/compute/arcget.cpp:112 src/clients/compute/arckill.cpp:97 #: src/clients/compute/arcrenew.cpp:94 src/clients/compute/arcresub.cpp:105 #: src/clients/compute/arcresume.cpp:94 src/clients/compute/arcstat.cpp:117 #, c-format msgid "Job list file (%s) doesn't exist" msgstr "Файл списка задач (%s) не существует" #: src/clients/compute/arccat.cpp:107 src/clients/compute/arcclean.cpp:105 #: src/clients/compute/arcget.cpp:119 src/clients/compute/arckill.cpp:104 #: src/clients/compute/arcrenew.cpp:101 src/clients/compute/arcresub.cpp:112 #: src/clients/compute/arcresume.cpp:101 src/clients/compute/arcstat.cpp:124 #: src/clients/compute/arctest.cpp:298 #, c-format msgid "Unable to read job information from file (%s)" msgstr "Невозможно прочитать информацию о задаче из файла (%s)" #: src/clients/compute/arccat.cpp:116 src/clients/compute/arcclean.cpp:113 #: src/clients/compute/arcget.cpp:127 src/clients/compute/arckill.cpp:112 #: src/clients/compute/arcrenew.cpp:110 src/clients/compute/arcresub.cpp:120 #: src/clients/compute/arcresume.cpp:110 src/clients/compute/arcstat.cpp:133 #, c-format msgid "Warning: Job not found in job list: %s" msgstr "Предупреждение: Задача не обнаружена в списке задач: %s" #: src/clients/compute/arccat.cpp:129 src/clients/compute/arcclean.cpp:168 #: src/clients/compute/arcget.cpp:140 src/clients/compute/arckill.cpp:124 #: src/clients/compute/arcrenew.cpp:122 src/clients/compute/arcresub.cpp:132 #: src/clients/compute/arcresume.cpp:122 msgid "No jobs" msgstr "Задач нет" #: src/clients/compute/arccat.cpp:144 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "Не удалось создать временный файл \"%s\"" #: src/clients/compute/arccat.cpp:145 src/clients/compute/arccat.cpp:151 #, c-format msgid "Cannot create output of %s for any jobs" msgstr "Невозможно создать выход %s ни для одной задачи" #: src/clients/compute/arccat.cpp:152 #, c-format msgid "Invalid destination URL %s" msgstr "Неверный URL цели %s" #: src/clients/compute/arccat.cpp:170 #, c-format msgid "Job deleted: %s" msgstr "Задача удалена: %s" #: src/clients/compute/arccat.cpp:180 #, c-format msgid "Job has not started yet: %s" msgstr "Исполнение задачи ещё не началось: %s" #: src/clients/compute/arccat.cpp:188 #, c-format msgid "Cannot determine the %s location: %s" msgstr "Не удаётся определить расположение %s: %s" #: src/clients/compute/arccat.cpp:196 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "Невозможно создать вывод %s для задачи (%s): Недопустимый источник %s" #: src/clients/compute/arccat.cpp:206 #, c-format msgid "Catting %s for job %s" msgstr "Подцепляется %s для задачи %s" #: src/clients/compute/arcclean.cpp:35 msgid "The arcclean command removes a job from the computing resource." msgstr "" "Эта команда используется для удаления результатов работы задач\n" "с удалённого компьютера" #: src/clients/compute/arcclean.cpp:137 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" "Из списка задач будут удалены задачи, о которых не обнаружена информация.\n" "ВНИМАНИЕ: задачи, запущенные недавно, могли ещё не появиться в " "информационной\n" "системе, и эта операция удалит также эти задачи." #: src/clients/compute/arcclean.cpp:140 msgid "Are you sure you want to clean jobs missing information?" msgstr "Вы уверены, что хотите вычистить задачи с отсутствующей информацией?" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "y" msgstr "y" #: src/clients/compute/arcclean.cpp:141 src/clients/compute/arcsync.cpp:189 msgid "n" msgstr "n" #: src/clients/compute/arcclean.cpp:146 msgid "Jobs missing information will not be cleaned!" msgstr "Задачи с отсутствующей информацией не будут вычищены!" #: src/clients/compute/arcclean.cpp:162 src/clients/compute/arcresub.cpp:155 #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:302 #, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "Предупреждение: Сбой записи информации о задаче в файл (%s)" #: src/clients/compute/arcclean.cpp:163 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" " Запустите 'arcclean -s Undefined' для удаления вычищенных задач из " "списка" #: src/clients/compute/arcclean.cpp:172 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "Обработано задач: %d, уничтожено: %d" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "Эта команда используется для загрузки результатов работы задач" #: src/clients/compute/arcget.cpp:76 #, c-format msgid "Job download directory from user configuration file: %s " msgstr "Каталог для загрузки задач (настройки пользователя): %s " #: src/clients/compute/arcget.cpp:79 msgid "Job download directory will be created in present working directory. " msgstr "Каталог для загрузки задачи будет создан в текущей рабочей директории" #: src/clients/compute/arcget.cpp:83 #, c-format msgid "Job download directory: %s " msgstr "Каталог для загрузки задач: %s " #: src/clients/compute/arcget.cpp:150 #, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "Не удалось создать каталог для сохранения результатов (%s) - %s" #: src/clients/compute/arcget.cpp:160 #, c-format msgid "Results stored at: %s" msgstr "Результаты сохранены в: %s" #: src/clients/compute/arcget.cpp:172 src/clients/compute/arckill.cpp:140 msgid "Warning: Some jobs were not removed from server" msgstr "Предупреждение: некоторые задачи не были удалены с сервера" #: src/clients/compute/arcget.cpp:173 src/clients/compute/arcget.cpp:180 msgid " Use arclean to remove retrieved jobs from job list" msgstr " Используйте arclean для удаления полученных задач из списка" #: src/clients/compute/arcget.cpp:179 src/clients/compute/arckill.cpp:147 #: src/clients/compute/arcresub.cpp:185 #, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "Предупреждение: Сбой удаления информации о задачах из файла (%s)" #: src/clients/compute/arcget.cpp:184 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "Обработано задач: %d, успешно получено: %d, успешно очищено: %d" #: src/clients/compute/arcget.cpp:188 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "Обработано задач: %d, успешно получено: %d" #: src/clients/compute/arcinfo.cpp:34 msgid "[resource ...]" msgstr "[ресурс ...]" #: src/clients/compute/arcinfo.cpp:35 msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "" "Команда arcinfo используется для проверки состояния вычислительных ресурсов " "на Гриде" #: src/clients/compute/arcinfo.cpp:142 msgid "Information endpoint" msgstr "Точка входа для информации" #: src/clients/compute/arcinfo.cpp:149 msgid "Submission endpoint" msgstr "Точка входа для засылки задач" #: src/clients/compute/arcinfo.cpp:151 msgid "status" msgstr "состояние" #: src/clients/compute/arcinfo.cpp:153 msgid "interface" msgstr "интерфейс" #: src/clients/compute/arcinfo.cpp:172 msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "ОШИБКА: Не удалось получить информацию через следующие точки входа:" #: src/clients/compute/arcinfo.cpp:185 msgid "ERROR: Failed to retrieve information" msgstr "ОШИБКА: не удалось получить информацию" #: src/clients/compute/arcinfo.cpp:187 msgid "from the following endpoints:" msgstr "через следующие точки входа:" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "Эта команда используется для прерывания исполняющихся задач" #: src/clients/compute/arckill.cpp:141 msgid " Use arcclean to remove retrieved jobs from job list" msgstr " Используйте arcclean для удаления полученных задач из списка" #: src/clients/compute/arckill.cpp:148 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" " Запустите 'arcclean -s Undefined' для удаления оборванных задач из " "списка" #: src/clients/compute/arckill.cpp:151 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "Обработано задач: %d, успешно оборвано: %d, успешно очищено: %d" #: src/clients/compute/arckill.cpp:153 #, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "Обработано задач: %d, успешно оборвано: %d" #: src/clients/compute/arcrenew.cpp:128 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "Обработано задач: %d, обновлено: %d" #: src/clients/compute/arcresub.cpp:75 msgid "--same and --not-same cannot be specified together." msgstr "--same и --not-same не могут быть заданы одновременно" #: src/clients/compute/arcresub.cpp:144 msgid "No jobs to resubmit with the specified status" msgstr "Нет задач для перезапуска в указанном состоянии" #: src/clients/compute/arcresub.cpp:151 src/clients/compute/arcsub.cpp:194 #, c-format msgid "Job submitted with jobid: %s" msgstr "Задача запущена с ярлыком: %s" #: src/clients/compute/arcresub.cpp:156 msgid " To recover missing jobs, run arcsync" msgstr " Для восполнения недостающих задач, запустите arcsync" #: src/clients/compute/arcresub.cpp:161 #, c-format msgid "Cannot write jobids to file (%s)" msgstr "Невозможно записать ярлыки задач в файл (%s)" #: src/clients/compute/arcresub.cpp:172 #, c-format msgid "" "Resubmission of job (%s) succeeded, but killing the job failed - it will " "still appear in the job list" msgstr "" "Успешно завершена перезасылка задачи (%s), но прервать задачу не удалось - " "она будет присутствовать в списке задач" #: src/clients/compute/arcresub.cpp:181 #, c-format msgid "" "Resubmission of job (%s) succeeded, but cleaning the job failed - it will " "still appear in the job list" msgstr "" "Успешно завершена перезасылка задачи (%s), но очистить задачу не удалось - " "она будет присутствовать в списке задач" #: src/clients/compute/arcresub.cpp:186 msgid " Use arcclean to remove non-existing jobs" msgstr " Используйте arcclean для удаления несуществующих задач" #: src/clients/compute/arcresub.cpp:193 msgid "Job resubmission summary:" msgstr "Сводка перезапуска задач:" #: src/clients/compute/arcresub.cpp:195 #, c-format msgid "%d of %d jobs were resubmitted" msgstr "%d из %d задач были перезапущены" #: src/clients/compute/arcresub.cpp:197 #, c-format msgid "The following %d were not resubmitted" msgstr "Следующие %d не были перезапущены" #: src/clients/compute/arcresume.cpp:128 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "Обработано задач: %d, возобновлено: %d" #: src/clients/compute/arcstat.cpp:35 msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" "Команда arcstat используется для вывода информации о состоянии\n" "задач, отправленных на Грид " #: src/clients/compute/arcstat.cpp:79 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "Опции 'sort' и 'rsort' не могут быть указаны одновременно" #: src/clients/compute/arcstat.cpp:149 msgid "No jobs found, try later" msgstr "Не найдено ни одной задачи, попробуйте позже" #: src/clients/compute/arcstat.cpp:176 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "Опрошено состояние %d задач, %d задач отозвались" #: src/clients/compute/arcsub.cpp:46 msgid "[filename ...]" msgstr "[файл ...]" #: src/clients/compute/arcsub.cpp:47 msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "" "Эта команда используется для запуска задач на вычислительные\n" "ресурсы Грид" #: src/clients/compute/arcsub.cpp:99 msgid "No job description input specified" msgstr "Не задано описание задачи" #: src/clients/compute/arcsub.cpp:112 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:499 #, c-format msgid "Can not open job description file: %s" msgstr "Невозможно открыть файл с описанием задачи: %s" #: src/clients/compute/arcsub.cpp:140 src/clients/compute/arcsub.cpp:168 msgid "Invalid JobDescription:" msgstr "Неверный элемент JobDescription:" #: src/clients/compute/arcsub.cpp:200 #, c-format msgid "Cannot write job IDs to file (%s)" msgstr "Невозможно записать ярлыки задач в файл (%s) " #: src/clients/compute/arcsub.cpp:205 src/clients/compute/arcsync.cpp:66 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" "Предупреждение: Невозможно открыть файл списка задач (%s), формат неизвестен" #: src/clients/compute/arcsub.cpp:210 src/clients/compute/arctest.cpp:304 msgid "To recover missing jobs, run arcsync" msgstr "Для восполнения недостающих задач, запустите arcsync" #: src/clients/compute/arcsub.cpp:217 msgid "Job submission summary:" msgstr "Сводка засылки задач:" #: src/clients/compute/arcsub.cpp:219 #, c-format msgid "%d of %d jobs were submitted" msgstr "%d из %d задач были засланы" #: src/clients/compute/arcsub.cpp:221 #, c-format msgid "The following %d were not submitted" msgstr "Следующие %d не были засланы" #: src/clients/compute/arcsub.cpp:228 msgid "Job nr." msgstr "Задача номер" #: src/clients/compute/arcsub.cpp:268 #, c-format msgid "Removing endpoint %s: It has an unrequested interface (%s)." msgstr "Удаляется точка входа %s: она содержит ненужный интерфейс (%s)." #: src/clients/compute/arcsub.cpp:282 #, c-format msgid "ERROR: Unable to load broker %s" msgstr "ОШИБКА: не удалось подгрузить планировщик %s" #: src/clients/compute/arcsub.cpp:286 msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" "ОШИБКА: Обрыв засылки задачи, так как ни один из ресурсов не предоставил " "информацию" #: src/clients/compute/arcsub.cpp:290 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "ОШИБКА: Одна или несколько задач не были запущены." #: src/clients/compute/arcsub.cpp:304 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" "Запрошен вычислительный ресурс, использующий интерфейс GridFTP, но " "необходимый\n" "%sподключаемый модуль не был подгружен. Устанавливали ли Вы этот модуль?\n" "%sЕсли нет, пожалуйста, установите пакет 'nordugrid-arc-plugins-globus'.\n" "%sНазвание пакета может зависеть от типа вашего дистрибутива." #: src/clients/compute/arcsub.cpp:338 src/clients/compute/arctest.cpp:236 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" "Не удалось адаптировать описание задачи ни к одному ресурсу, т.к. не " "получено никакой информации." #: src/clients/compute/arcsub.cpp:339 src/clients/compute/arctest.cpp:237 msgid "Original job description is listed below:" msgstr "Изначальное описание задачи приведено ниже:" #: src/clients/compute/arcsub.cpp:351 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" "Распечатка описания задачи оборвана: Невозможно подгрузить планировщик %s" #: src/clients/compute/arcsub.cpp:368 src/clients/compute/arctest.cpp:317 #, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "" "Невозможно адаптировать описание задачи в соответствии с требованиями " "назначения (%s)." #: src/clients/compute/arcsub.cpp:384 src/clients/compute/arctest.cpp:330 #, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "Возникла ошибка при составлении описания задачи для засылки на %s" #: src/clients/compute/arcsub.cpp:388 src/clients/compute/arctest.cpp:334 #, c-format msgid "Job description to be sent to %s:" msgstr "Описание задачи будет послано на: %s" #: src/clients/compute/arcsub.cpp:406 msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "" "Невозможно адаптировать описание задачи в соответствии с требованиями " "назначения." #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "Обнаружены следующие задачи:" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "Количество всех обнаруженных задач:" #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "Обнаружены следующие новые задачи:" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "Количество обнаруженных новых задач:" #: src/clients/compute/arcsync.cpp:113 #, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "ОШИБКА: Сбой записи информации о задаче в файл (%s)" #: src/clients/compute/arcsync.cpp:140 msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given resources or index servers." msgstr "" "Команда arcsync синхронизирует Ваш локальный список задач с информацией\n" "на заданных кластерах или каталогах ресурсов " #: src/clients/compute/arcsync.cpp:183 msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" "Синхронизация локального списка активных задач с информацией в системе Грид\n" "может привести к некоторым несоответствиям: только что запущенные задачи\n" "могут быть ещё не зарегистрированы в системе, тогда как только что " "удалённые\n" "задачи могут всё ещё присутствовать." #: src/clients/compute/arcsync.cpp:188 msgid "Are you sure you want to synchronize your local job list?" msgstr "Вы уверены, что хотите синхронизировать список локальных задач?" #: src/clients/compute/arcsync.cpp:193 msgid "Cancelling synchronization request" msgstr "Запрос о синхронизации отменяется" #: src/clients/compute/arcsync.cpp:203 msgid "" "No services specified. Please configure default services in the client " "configuration,or specify a cluster or index (-c or -g options, see arcsync -" "h)." msgstr "" "Не задано ни одного сервиса. Пожалуйста, настройте сервисы по умолчанию в " "файле настроек клиента, либо укажите явным образом ресурс или каталог " "ресурсов (опции -c или -g, см. arcsync -h)" #: src/clients/compute/arctest.cpp:55 msgid " " msgstr " " #: src/clients/compute/arctest.cpp:56 msgid "The arctest command is used for testing clusters as resources." msgstr "" "Команда arctest используется для проверки кластеров как вычислительных " "ресурсов." #: src/clients/compute/arctest.cpp:68 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" "Задание не указано:\n" "Вы должны либо указать номер тестового задания, используя опцию -J (--job),\n" "либо запросить информацию о сертификатах, используя опцию -E (--" "certificate)\n" #: src/clients/compute/arctest.cpp:75 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" "Для тестовой задачи номер 1 необходимо задать время исполнения с помощью " "опции -r (--runtime)." #: src/clients/compute/arctest.cpp:109 msgid "Certificate information:" msgstr "Информация о сертификате:" #: src/clients/compute/arctest.cpp:113 msgid "No user-certificate found" msgstr "Сертификат пользователя не обнаружен" #: src/clients/compute/arctest.cpp:116 #, c-format msgid "Certificate: %s" msgstr "Сертификат: %s" #: src/clients/compute/arctest.cpp:118 #, c-format msgid "Subject name: %s" msgstr "Имя субъекта: %s" #: src/clients/compute/arctest.cpp:119 #, c-format msgid "Valid until: %s" msgstr "Действует по: %s" #: src/clients/compute/arctest.cpp:123 msgid "Unable to determine certificate information" msgstr "Не удалось получить информацию о сертификате" #: src/clients/compute/arctest.cpp:127 msgid "Proxy certificate information:" msgstr "Информация о доверенности:" #: src/clients/compute/arctest.cpp:129 msgid "No proxy found" msgstr "Не удалось обнаружить доверенность" #: src/clients/compute/arctest.cpp:132 #, c-format msgid "Proxy: %s" msgstr "Доверенность: %s" #: src/clients/compute/arctest.cpp:133 #, c-format msgid "Proxy-subject: %s" msgstr "Имя субъекта доверенности: %s" #: src/clients/compute/arctest.cpp:135 msgid "Valid for: Proxy expired" msgstr "Доверенность действительна на: Срок действия доверенности вышел" #: src/clients/compute/arctest.cpp:137 msgid "Valid for: Proxy not valid" msgstr "Доверенность действительна на: Доверенность недействительна" #: src/clients/compute/arctest.cpp:139 #, c-format msgid "Valid for: %s" msgstr "Срок действия истекает через: %s" #: src/clients/compute/arctest.cpp:144 #, c-format msgid "Certificate issuer: %s" msgstr "Сертификат выдан: %s" #: src/clients/compute/arctest.cpp:148 msgid "CA-certificates installed:" msgstr "Установленные сертификаты CA:" #: src/clients/compute/arctest.cpp:170 msgid "Unable to detect if issuer certificate is installed." msgstr "Не удалось определить, установлены ли сертификаты центра сертификации" #: src/clients/compute/arctest.cpp:173 msgid "Your issuer's certificate is not installed" msgstr "Не установлен сертификат Вашего центра сертификации" #: src/clients/compute/arctest.cpp:191 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "Тестовая задача под номером \"%d\"не существует." #: src/clients/compute/arctest.cpp:209 #, c-format msgid "Unable to load broker %s" msgstr "Невозможно подгрузить брокер %s" #: src/clients/compute/arctest.cpp:212 #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "Подгружен планировщик %s" #: src/clients/compute/arctest.cpp:234 msgid "Test aborted because no resource returned any information" msgstr "" "Обрыв засылки теста, т.к. ни один из ресурсов не предоставил информацию" #: src/clients/compute/arctest.cpp:247 msgid "" "ERROR: Test aborted because no suitable resources were found for the test-job" msgstr "ОШИБКА: Обрыв засылки теста, так как подходящих ресурсов не найдено" #: src/clients/compute/arctest.cpp:249 msgid "" "ERROR: Dumping job description aborted because no suitable resources were " "found for the test-job" msgstr "" "ОШИБКА: Обрыв распечатки описания задачи, так как подходящих ресурсов не " "найдено" #: src/clients/compute/arctest.cpp:258 #, c-format msgid "Submitting test-job %d:" msgstr "Запускается тестовая задача %d:" #: src/clients/compute/arctest.cpp:262 #, c-format msgid "Client version: nordugrid-arc-%s" msgstr "Версия клиента: nordugrid-arc-%s" #: src/clients/compute/arctest.cpp:269 #, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "Невозможно записать ярлык задачи (%s) в файл (%s) " #: src/clients/compute/arctest.cpp:270 #, c-format msgid "Test submitted with jobid: %s" msgstr "Тест запущен с ярлыком: %s" #: src/clients/compute/arctest.cpp:285 #, c-format msgid "Computing service: %s" msgstr "Вычислительный сервис: %s" #: src/clients/compute/arctest.cpp:291 msgid "Test failed, no more possible targets" msgstr "Не удалось заслать тест, возможные назначения отсутствуют" #: src/clients/compute/utils.cpp:118 #, c-format msgid "Types of execution services %s is able to submit jobs to:" msgstr "Типы сервисов выполнения, на которые %s может засылать задачи:" #: src/clients/compute/utils.cpp:121 #, c-format msgid "Types of registry services which %s is able collect information from:" msgstr "Типы сервисов регистрации, в которых %s может получить информацию:" #: src/clients/compute/utils.cpp:124 #, c-format msgid "" "Types of local information services which %s is able collect information " "from:" msgstr "" "Типы локальных служб информации, с которых %s может получить информацию:" #: src/clients/compute/utils.cpp:127 #, c-format msgid "" "Types of local information services which %s is able collect job information " "from:" msgstr "" "Типы локальных служб информации, с которых %s может получить информацию о " "задачах:" #: src/clients/compute/utils.cpp:130 #, c-format msgid "Types of services %s is able to manage jobs at:" msgstr "Типы служб,на которых %s может управлять задачами:" #: src/clients/compute/utils.cpp:133 #, c-format msgid "Job description languages supported by %s:" msgstr "Следующие языки описания задач поддерживаются %s:" #: src/clients/compute/utils.cpp:136 #, c-format msgid "Brokers available to %s:" msgstr "Следующие планировщики доступны для %s:" #: src/clients/compute/utils.cpp:159 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" "Планировщик по умолчанию (%s) недоступен. При использовании %s планировщик " "должен быть указан явным образом (опция -b)." #: src/clients/compute/utils.cpp:169 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" "Срок действия доверенности вышел. Засылка задачи оборвана. Пожалуйста, " "запустите 'arcproxy'!" #: src/clients/compute/utils.cpp:174 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" "Не удалось обнаружить доверенность. Это приложение не работает без " "доверенности.\n" " Если Ваша доверенность хранится в нестандартном месте, пожалуйста,\n" " убедитесь, что в настройках клиента указан правильный путь.\n" " Если же Вы пока не создали доверенность, запустите 'arcproxy'!" #: src/clients/compute/utils.cpp:278 msgid "" "select one or more computing elements: name can be an alias for a single CE, " "a group of CEs or a URL" msgstr "" "указать один или более вычислительных ресурсов: имя может быть сокращением " "для одного ресурса, группы ресурсов, или URL" #: src/clients/compute/utils.cpp:280 src/clients/compute/utils.cpp:297 #: src/clients/compute/utils.cpp:404 msgid "name" msgstr "имя" #: src/clients/compute/utils.cpp:285 msgid "" "the computing element specified by URL at the command line should be queried " "using this information interface (possible options: org.nordugrid.ldapng, " "org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies." "resourceinfo)" msgstr "" "вычислительный ресурс, заданный URL в командной строке должен быть опрошен с " "помощью этого информационного интерфейса (возможные варианты: org.nordugrid." "ldapng, org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies." "resourceinfo)" #: src/clients/compute/utils.cpp:289 msgid "interfacename" msgstr "interfacename" #: src/clients/compute/utils.cpp:295 msgid "" "selecting a computing element for the new jobs with a URL or an alias, or " "selecting a group of computing elements with the name of the group" msgstr "" "выбор вычислительного ресурса для новых задач с помощью URL или сокращения, " "или выбор группы элементов с помощью названия группы" #: src/clients/compute/utils.cpp:303 msgid "force migration, ignore kill failure" msgstr "принудительная миграция, игнорируется сбой прерывания" #: src/clients/compute/utils.cpp:309 msgid "keep the files on the server (do not clean)" msgstr "сохранять файлы на сервере (не удалять)" #: src/clients/compute/utils.cpp:315 msgid "do not ask for verification" msgstr "не запрашивать подтверждения" #: src/clients/compute/utils.cpp:319 msgid "truncate the joblist before synchronizing" msgstr "сжать список задач перед синхронизацией" #: src/clients/compute/utils.cpp:325 src/clients/data/arcls.cpp:287 msgid "long format (more information)" msgstr "расширенный формат (дополнительная информация)" #: src/clients/compute/utils.cpp:331 msgid "print a list of services configured in the client.conf" msgstr "вывести список служб, настроенных в client.conf" #: src/clients/compute/utils.cpp:337 msgid "show the stdout of the job (default)" msgstr "вывести стандартный выход задачи (по умолчанию)" #: src/clients/compute/utils.cpp:341 msgid "show the stderr of the job" msgstr "вывести стандартную ошибку задачи" #: src/clients/compute/utils.cpp:345 msgid "show the CE's error log of the job" msgstr "вывести ошибки системы при исполнении задачи" #: src/clients/compute/utils.cpp:351 msgid "" "download directory (the job directory will be created in this directory)" msgstr "каталог загрузки (подкаталог задачи будет создан в этом каталоге)" #: src/clients/compute/utils.cpp:353 msgid "dirname" msgstr "каталог" #: src/clients/compute/utils.cpp:357 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" "использовать имя задачи вместо краткого идентификатора в качестве названия " "каталога" #: src/clients/compute/utils.cpp:362 msgid "force download (overwrite existing job directory)" msgstr "принудительная загрузка (перезаписать существующий каталог задачи)" #: src/clients/compute/utils.cpp:368 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "вместо состояния будут выведены только ярлыки указанных задач" #: src/clients/compute/utils.cpp:372 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "сортировать задачи по идентификатору, времени запуска или имени" #: src/clients/compute/utils.cpp:373 src/clients/compute/utils.cpp:376 msgid "order" msgstr "порядок" #: src/clients/compute/utils.cpp:375 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" "сортировать задачи в обратном порядке по идентификатору, времени запуска или " "имени" #: src/clients/compute/utils.cpp:379 msgid "show jobs where status information is unavailable" msgstr "перечислить задачи, для которых отсутствует информация о состоянии" #: src/clients/compute/utils.cpp:385 msgid "resubmit to the same resource" msgstr "Заслать заново на тот же ресурс" #: src/clients/compute/utils.cpp:389 msgid "do not resubmit to the same resource" msgstr "не перезасылать на тот же ресурс" #: src/clients/compute/utils.cpp:395 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" "удалить задачу из локального списка, даже если информация о ней \n" "отсутствует" #: src/clients/compute/utils.cpp:402 msgid "" "select one or more registries: name can be an alias for a single registry, a " "group of registries or a URL" msgstr "" "выбрать один или несколько реестров: имя может быть сокращением для одного " "реестра, группы реестров, или URL" #: src/clients/compute/utils.cpp:410 msgid "submit test job given by the number" msgstr "апустить тестовую задачу под соответствующим номером" #: src/clients/compute/utils.cpp:411 src/clients/compute/utils.cpp:415 msgid "int" msgstr "число" #: src/clients/compute/utils.cpp:414 msgid "test job runtime specified by the number" msgstr "время исполнения тестовой задачи" #: src/clients/compute/utils.cpp:421 msgid "only select jobs whose status is statusstr" msgstr "выполнить действие лишь над задачами в указанном состоянии" #: src/clients/compute/utils.cpp:422 msgid "statusstr" msgstr "состояние" #: src/clients/compute/utils.cpp:428 msgid "all jobs" msgstr "все задачи" #: src/clients/compute/utils.cpp:434 msgid "jobdescription string describing the job to be submitted" msgstr "Строка, содержащая описание запускаемой задачи" #: src/clients/compute/utils.cpp:436 src/clients/compute/utils.cpp:442 #: src/clients/credentials/arcproxy.cpp:369 #: src/clients/credentials/arcproxy.cpp:376 #: src/clients/credentials/arcproxy.cpp:394 #: src/clients/credentials/arcproxy.cpp:401 #: src/clients/credentials/arcproxy.cpp:419 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:438 #: src/clients/credentials/arcproxy.cpp:448 #: src/clients/credentials/arcproxy.cpp:452 #: src/clients/credentials/arcproxyalt.cpp:369 #: src/clients/credentials/arcproxyalt.cpp:376 #: src/clients/credentials/arcproxyalt.cpp:399 #: src/clients/credentials/arcproxyalt.cpp:415 #: src/clients/credentials/arcproxyalt.cpp:419 #: src/clients/credentials/arcproxyalt.cpp:429 #: src/clients/credentials/arcproxyalt.cpp:433 msgid "string" msgstr "строка" #: src/clients/compute/utils.cpp:440 msgid "jobdescription file describing the job to be submitted" msgstr "Файл, содержащий описание запускаемой задачи" #: src/clients/compute/utils.cpp:448 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" "выбрать способ планировки (список доступных планировщиков выводится опцией --" "listplugins)" #: src/clients/compute/utils.cpp:449 msgid "broker" msgstr "планировщик" #: src/clients/compute/utils.cpp:452 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "ярлыки запущенных задач будут занесены в этот файл" #: src/clients/compute/utils.cpp:453 src/clients/compute/utils.cpp:475 #: src/clients/compute/utils.cpp:512 src/clients/compute/utils.cpp:520 #: src/clients/credentials/arcproxy.cpp:461 #: src/clients/credentials/arcproxyalt.cpp:447 src/clients/data/arccp.cpp:627 #: src/clients/data/arcls.cpp:332 src/clients/data/arcmkdir.cpp:111 #: src/clients/data/arcrename.cpp:122 src/clients/data/arcrm.cpp:137 #: src/clients/echo/arcecho.cpp:47 src/clients/wsrf/arcwsrf.cpp:57 msgid "filename" msgstr "файл" #: src/clients/compute/utils.cpp:457 msgid "" "only use this interface for submitting (e.g. org.nordugrid.gridftpjob, org." "ogf.glue.emies.activitycreation, org.ogf.bes)" msgstr "" "использовать только этот интерфейс для засылки (например, org.nordugrid." "gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes)" #: src/clients/compute/utils.cpp:459 src/clients/compute/utils.cpp:501 msgid "InterfaceName" msgstr "InterfaceName" #: src/clients/compute/utils.cpp:466 msgid "skip the service with the given URL during service discovery" msgstr "пропустить службу с этим URL при обнаружении служб" #: src/clients/compute/utils.cpp:467 src/clients/compute/utils.cpp:480 #: src/clients/data/arccp.cpp:607 msgid "URL" msgstr "URL" #: src/clients/compute/utils.cpp:474 msgid "a file containing a list of jobIDs" msgstr "файл, содержащий ярлыки задач" #: src/clients/compute/utils.cpp:479 msgid "skip jobs which are on a computing element with a given URL" msgstr "" "пропустить задачи, находящиеся на вычислительном ресурсе с заданным URL" #: src/clients/compute/utils.cpp:485 msgid "submit jobs as dry run (no submission to batch system)" msgstr "запуск задач в режиме холостой прогонки (без засылки на счёт)" #: src/clients/compute/utils.cpp:488 msgid "submit directly - no resource discovery or matchmaking" msgstr "запустить напрямую, без обнаружения и проверки соответствия ресурсов" #: src/clients/compute/utils.cpp:492 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" "не выполнять засылку: распечатка описания задачи на языке, приемлемом " "назначением" #: src/clients/compute/utils.cpp:499 msgid "" "only get information about executon targets which support this job " "submission interface (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies." "activitycreation, org.ogf.bes)" msgstr "" "получить информацию только о тех вычислительных ресурсах, которые " "поддерживают заданный интерфейс для засылки задач (например, org.nordugrid." "gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes)" #: src/clients/compute/utils.cpp:506 msgid "prints info about installed user- and CA-certificates" msgstr "" "вывести информацию об установленных сертификатах пользователя и " "сертификационных агентств" #: src/clients/compute/utils.cpp:511 #, c-format msgid "the file storing information about active jobs (default %s)" msgstr "файл с записью информации о задачах на счёте (по умолчанию %s)" #: src/clients/compute/utils.cpp:519 src/clients/credentials/arcproxy.cpp:460 #: src/clients/credentials/arcproxyalt.cpp:446 src/clients/data/arccp.cpp:626 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:110 #: src/clients/data/arcrename.cpp:121 src/clients/data/arcrm.cpp:136 #: src/clients/echo/arcecho.cpp:46 src/clients/wsrf/arcwsrf.cpp:56 msgid "configuration file (default ~/.arc/client.conf)" msgstr "файл настроек (по умолчанию ~/.arc/client.conf)" #: src/clients/compute/utils.cpp:522 src/clients/credentials/arcproxy.cpp:455 #: src/clients/credentials/arcproxyalt.cpp:441 src/clients/data/arccp.cpp:621 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:105 #: src/clients/data/arcrename.cpp:116 src/clients/data/arcrm.cpp:131 #: src/clients/echo/arcecho.cpp:41 src/clients/wsrf/arcwsrf.cpp:51 msgid "timeout in seconds (default 20)" msgstr "время ожидания в секундах (по умолчанию 20)" #: src/clients/compute/utils.cpp:523 src/clients/credentials/arcproxy.cpp:456 #: src/clients/credentials/arcproxyalt.cpp:442 src/clients/data/arccp.cpp:622 #: src/clients/data/arcls.cpp:327 src/clients/data/arcmkdir.cpp:106 #: src/clients/data/arcrename.cpp:117 src/clients/data/arcrm.cpp:132 #: src/clients/echo/arcecho.cpp:42 src/clients/wsrf/arcwsrf.cpp:52 msgid "seconds" msgstr "секунд(а/ы)" #: src/clients/compute/utils.cpp:526 msgid "list the available plugins" msgstr "перечисление доступных подключаемых модулей" #: src/clients/compute/utils.cpp:530 src/clients/credentials/arcproxy.cpp:465 #: src/clients/credentials/arcproxyalt.cpp:451 src/clients/data/arccp.cpp:631 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:115 #: src/clients/data/arcrename.cpp:126 src/clients/data/arcrm.cpp:141 #: src/clients/echo/arcecho.cpp:51 src/clients/saml/saml_assertion_init.cpp:52 #: src/clients/wsrf/arcwsrf.cpp:61 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "FATAL, ERROR, WARNING, INFO, VERBOSE или DEBUG" #: src/clients/compute/utils.cpp:531 src/clients/credentials/arcproxy.cpp:466 #: src/clients/credentials/arcproxyalt.cpp:452 src/clients/data/arccp.cpp:632 #: src/clients/data/arcls.cpp:337 src/clients/data/arcmkdir.cpp:116 #: src/clients/data/arcrename.cpp:127 src/clients/data/arcrm.cpp:142 #: src/clients/echo/arcecho.cpp:52 src/clients/saml/saml_assertion_init.cpp:53 #: src/clients/wsrf/arcwsrf.cpp:62 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 msgid "debuglevel" msgstr "уровень" #: src/clients/compute/utils.cpp:533 src/clients/credentials/arcproxy.cpp:469 #: src/clients/credentials/arcproxyalt.cpp:455 src/clients/data/arccp.cpp:635 #: src/clients/data/arcls.cpp:340 src/clients/data/arcmkdir.cpp:119 #: src/clients/data/arcrename.cpp:130 src/clients/data/arcrm.cpp:145 #: src/clients/echo/arcecho.cpp:55 src/clients/saml/saml_assertion_init.cpp:56 #: src/clients/wsrf/arcwsrf.cpp:65 msgid "print version information" msgstr "вывести информацию о версии" #: src/clients/credentials/arcproxy.cpp:172 #: src/hed/libs/credential/ARCProxyUtil.cpp:1248 #, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "В базе данных NSS обнаружено %d сертификата пользователя" #: src/clients/credentials/arcproxy.cpp:188 #: src/hed/libs/credential/ARCProxyUtil.cpp:1264 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "Номер %d с кратким именем: %s%s" #: src/clients/credentials/arcproxy.cpp:197 #: src/hed/libs/credential/ARCProxyUtil.cpp:1273 #, c-format msgid " expiration time: %s " msgstr " действителен до: %s " #: src/clients/credentials/arcproxy.cpp:201 #: src/hed/libs/credential/ARCProxyUtil.cpp:1277 #, c-format msgid " certificate dn: %s" msgstr " DN сертификата: %s" #: src/clients/credentials/arcproxy.cpp:202 #: src/hed/libs/credential/ARCProxyUtil.cpp:1278 #, c-format msgid " issuer dn: %s" msgstr " DN эмитента: %s" #: src/clients/credentials/arcproxy.cpp:203 #: src/hed/libs/credential/ARCProxyUtil.cpp:1279 #, c-format msgid " serial number: %d" msgstr " Серийный номер: %d" #: src/clients/credentials/arcproxy.cpp:207 #: src/hed/libs/credential/ARCProxyUtil.cpp:1283 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "Пожалуйста, выберите то, что будет использоваться (1-%d): " #: src/clients/credentials/arcproxy.cpp:272 #: src/clients/credentials/arcproxyalt.cpp:317 msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" "Команда arcproxy создаёт доверенность из пары закрытый/открытый ключ\n" "для получения доступа к гриду." #: src/clients/credentials/arcproxy.cpp:274 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours " "for delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file\n" " keybits=number - length of the key to generate. Default is 1024 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" " signingAlgorithm=name - signing algorithm to use for signing public key of " "proxy.\n" " Possible values are sha1, sha2 (alias for sha256), sha224, sha256, " "sha384, sha512\n" " and inherit (use algorithm of signing certificate). Default is inherit.\n" " With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" " identity - identity subject name of proxy certificate.\n" " issuer - issuer subject name of proxy certificate.\n" " ca - subject name of CA ehich issued initial certificate\n" " path - file system path to file containing proxy.\n" " type - type of proxy certificate.\n" " validityStart - timestamp when proxy validity starts.\n" " validityEnd - timestamp when proxy validity ends.\n" " validityPeriod - duration of proxy validity in seconds.\n" " validityLeft - duration of proxy validity left in seconds.\n" " vomsVO - VO name represented by VOMS attribute\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" " vomsIssuer - subject of service which issued VOMS certificate\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" " proxyPolicy\n" " keybits - size of proxy certificate key in bits.\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" " myproxy - for accessing credentials at MyProxy service\n" " myproxynew - for creating credentials at MyProxy service\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" " int - interactively request password from console\n" " stdin - read password from standard input delimited by newline\n" " file:filename - read password from file named filename\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported.\n" msgstr "" "Поддерживаемые ограничения:\n" " validityStart=время (например, 2008-05-29T10:20:30Z; если не указано, то " "начинается немедленно)\n" " validityEnd=время\n" " validityPeriod=время (например, 43200, или 12h, или 12H; если не указаны " "ни validityPeriod,\n" " ни validityEnd, то срок действия по умолчанию составляет 12 часов для " "локальной доверенности,\n" " и 168 часов для делегированной доверенности на сервере MyProxy)\n" " vomsACvalidityPeriod=время (например, 43200, или 12h, или 12H; если не " "указано, то используется\n" " наименьшее между 12 часами и значением validityPeriod)\n" " myproxyvalidityPeriod=время (срок годности доверенности, делегированной " "через сервер MyProxy\n" " например, 43200, или 12h, или 12H; если не указано, то используется " "наименьшее между 12 часами\n" " и значением validityPeriod - сроком годности доверенности, " "делегированной через сервер MyProxy)\n" " proxyPolicy=содержимое политики\n" " proxyPolicyFile=файл политики\n" " keybits=число - длина генерируемого ключа. По умолчанию - 1024 бит.\n" " Специальное значение 'inherit' означает использование длины ключа " "подписывающего сертификата.\n" " signingAlgorithm=название - алгоритм, используемый для подписания " "открытого ключа или доверенности.\n" " По умолчанию - sha1. Возможные значения: sha1, sha2 (сокращение от " "sha256), sha224, sha256, sha384,\n" " sha512 и inherit (использовать алгоритм подписывающего сертификата). По " "умолчанию используется inherit.\n" " Старые системы поддерживают лишь sha1.\n" "\n" "Поддерживаемые поля информации:\n" " subject - имя субъекта доверенности.\n" " identity - идентифицируемое имя субъекта доверенности.\n" " issuer - имя субъекта, выдавшего доверенность.\n" " ca - имя субъекта агентства, выдавшего исходный сертификат\n" " path - локальный путь к файлу, содержащему доверенность.\n" " type - тип доверенности.\n" " validityStart - время начала действия доверенности.\n" " validityEnd - время окончания действия доверенности.\n" " validityPeriod - продолжительность годности доверенности в секундах.\n" " validityLeft - оставшаяся продолжительность годности доверенности в " "секундах.\n" " vomsVO - имя виртуальной организации, указанное в атрибуте VOMS.\n" " vomsSubject - субъект сертификата, которому был присвоен атрибут VOMS.\n" " vomsIssuer - субъект службы, выдавшей сертификат VOMS.\n" " vomsACvalidityStart - время начала действия атрибута VOMS.\n" " vomsACvalidityEnd - время окончания действия атрибута VOMS.\n" " vomsACvalidityPeriod - продолжительность годности атрибута VOMS в " "секундах.\n" " vomsACvalidityLeft - оставшаяся продолжительность годности атрибута VOMS в " "секундах.\n" " proxyPolicy - содержимое политики\n" " keybits - длина ключа доверенности в битах.\n" " signingAlgorithm - алгоритм, используемый при подписи сертификата.\n" "Значения выводятся в порядке запроса, каждое с новой строки.\n" "Если полю соответствуют несколько значений, они выводятся в строку и " "разделяются |.\n" "\n" "Поддерживаемые назначения паролей:\n" " key - для чтения закрытых ключей\n" " myproxy - для доступа к сертификатам на сервере MyProxy\n" " myproxynew - для создания сертификатов на сервере MyProxy\n" " all - с любой целью.\n" "\n" "Поддерживаемые источники паролей:\n" " quoted string (\"password\") - явно указанный пароль\n" " int - интерактивный запрос на ввод пароля с терминала\n" " stdin - чтение пароля со стандартного ввода по переводу строки\n" " file:filename - чтение пароля из файла filename\n" " stream:# - чтение пароля из входного потока номер #.\n" " На текущий момент поддерживается только 0 (стандартный ввод).\n" #: src/clients/credentials/arcproxy.cpp:334 #: src/clients/credentials/arcproxyalt.cpp:334 msgid "path to the proxy file" msgstr "путь к файлу доверенности" #: src/clients/credentials/arcproxy.cpp:335 #: src/clients/credentials/arcproxy.cpp:339 #: src/clients/credentials/arcproxy.cpp:343 #: src/clients/credentials/arcproxy.cpp:347 #: src/clients/credentials/arcproxy.cpp:351 #: src/clients/credentials/arcproxy.cpp:355 #: src/clients/credentials/arcproxyalt.cpp:335 #: src/clients/credentials/arcproxyalt.cpp:339 #: src/clients/credentials/arcproxyalt.cpp:343 #: src/clients/credentials/arcproxyalt.cpp:347 #: src/clients/credentials/arcproxyalt.cpp:351 #: src/clients/credentials/arcproxyalt.cpp:355 src/clients/data/arccp.cpp:584 #: src/clients/saml/saml_assertion_init.cpp:48 msgid "path" msgstr "путь" #: src/clients/credentials/arcproxy.cpp:338 #: src/clients/credentials/arcproxyalt.cpp:338 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formated" msgstr "" "путь к файлу сертификата, который может быть в формате PEM, DER, или PKCS12" #: src/clients/credentials/arcproxy.cpp:342 #: src/clients/credentials/arcproxyalt.cpp:342 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" "путь к закрытому ключу; если сертификат указан в формате PKCS12, закрытый " "ключ не нужен" #: src/clients/credentials/arcproxy.cpp:346 #: src/clients/credentials/arcproxyalt.cpp:346 msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "" "путь к каталогу с доверяемыми сертификатами, используется только клиентом " "VOMS" #: src/clients/credentials/arcproxy.cpp:350 #: src/clients/credentials/arcproxyalt.cpp:350 msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "" "путь к корневому каталогу с файлами VOMS *.lsc, используется только клиентом " "VOMS" #: src/clients/credentials/arcproxy.cpp:354 #: src/clients/credentials/arcproxyalt.cpp:354 msgid "path to the VOMS server configuration file" msgstr "путь к файлу настроек серверов VOMS" #: src/clients/credentials/arcproxy.cpp:358 #: src/clients/credentials/arcproxyalt.cpp:358 msgid "" "voms<:command>. Specify VOMS server (More than one VOMS server \n" " can be specified like this: --voms VOa:command1 --voms VOb:" "command2). \n" " :command is optional, and is used to ask for specific " "attributes(e.g: roles)\n" " command options are:\n" " all --- put all of this DN's attributes into AC; \n" " list ---list all of the DN's attribute, will not create AC " "extension; \n" " /Role=yourRole --- specify the role, if this DN \n" " has such a role, the role will be put into " "AC \n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN \n" " has such a role, the role will be put into " "AC \n" msgstr "" "voms<:инструкция>. Описание сервера VOMS (несколько серверов задаются\n" " следующим образом: --voms VOa:инструкция1 --voms VOb:" "инструкция2).\n" " <:инструкция> не обязательна и служит для запроса " "дополнительных\n" " атрибутов (например, ролей)\n" " Инструкции:\n" " all --- добавить все атрибуты, доступные данному " "пользователю;\n" " list --- перечислить все атрибуты, доступные данному " "пользователю,\n" " без создания расширения AC; \n" " /Role=вашаРоль --- указать желаемую роль; если данный " "пользователь\n" " может играть такую роль, она будет " "добавлена;\n" " /voname/groupname/Role=вашаРоль --- указать ВО, группу и роль; " "если\n" " данный пользователь может играть такую " "роль, она\n" " будет добавлена.\n" #: src/clients/credentials/arcproxy.cpp:372 #: src/clients/credentials/arcproxyalt.cpp:372 msgid "" "group<:role>. Specify ordering of attributes \n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester \n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester \n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" "group<:role>. Указанная последовательность атрибутов \n" " Пример: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester \n" " или: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester \n" " Имейте в виду, что при использовании нескольких серверов VOMS не имеет " "смысла указывать последовательность атрибутов" #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxyalt.cpp:379 msgid "use GSI communication protocol for contacting VOMS services" msgstr "использовать протокол GSI для контакта служб VOMS" #: src/clients/credentials/arcproxy.cpp:382 #: src/clients/credentials/arcproxyalt.cpp:382 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access \n" " Note for RESTful access, 'list' command and multiple VOMS " "server are not supported\n" msgstr "" "использовать протокол HTTP для связи со службами VOMS, поддерживающими " "доступ типа REST \n" " Внимание: для доступа REST, команда 'list' и множественный " "сервер VOMS не поддерживаются\n" #: src/clients/credentials/arcproxy.cpp:388 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "" "эта опция не действует (старые сертификаты доверенности GSI более не " "поддерживаются)" #: src/clients/credentials/arcproxy.cpp:391 msgid "print all information about this proxy." msgstr "вывести всю информацию об этой доверенности." #: src/clients/credentials/arcproxy.cpp:394 msgid "print selected information about this proxy." msgstr "вывести избранную информацию об этой доверенности." #: src/clients/credentials/arcproxy.cpp:397 #: src/clients/credentials/arcproxyalt.cpp:395 msgid "remove proxy" msgstr "удаление доверенности" #: src/clients/credentials/arcproxy.cpp:400 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" "Имя пользователя сервера MyProxy (при отсутствии имени субъекта, или при " "применении сертификата пользователя)" #: src/clients/credentials/arcproxy.cpp:405 #: src/clients/credentials/arcproxyalt.cpp:402 msgid "" "don't prompt for a credential passphrase, when retrieve a \n" " credential from on MyProxy server. \n" " The precondition of this choice is the credential is PUT onto\n" " the MyProxy server without a passphrase by using -R (--" "retrievable_by_cert) \n" " option when being PUTing onto Myproxy server. \n" " This option is specific for the GET command when contacting " "Myproxy server." msgstr "" "не запрашивать пароль учётных данных при получении этих \n" " данных с сервера MyProxy. \n" " Это возможно при условии, если данные были сохранены методом " "PUT\n" " на сервере MyProxy без пароля, используя опцию -R (--" "retrievable_by_cert) \n" " при выполнении операции PUT в отношении сервера Myproxy. \n" " Эта опция используется только командой GET в отношении сервера " "Myproxy." #: src/clients/credentials/arcproxy.cpp:416 #: src/clients/credentials/arcproxyalt.cpp:412 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific for the PUT command when contacting " "Myproxy server." msgstr "" "Разрешить указанному клиенту получать учётные данные без пароля.\n" " Эта опция используется только командой PUT в отношении сервера " "Myproxy." #: src/clients/credentials/arcproxy.cpp:422 #: src/clients/credentials/arcproxyalt.cpp:418 msgid "hostname[:port] of MyProxy server" msgstr "hostname[:port] сервера MyProxy" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server; \n" " GET -- get a delegated credentials from the MyProxy server; \n" " INFO -- get and present information about credentials stored " "at the MyProxy server; \n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server; \n" " DESTROY -- wipe off credentials stored at the MyProxy " "server; \n" " Local credentials (certificate and key) are not necessary " "except in case of PUT. \n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" "инструкция серверу MyProxy. Возможны следующие инструкции: PUT, GET, INFO, " "NEWPASS или DESTROY.\n" " PUT -- сохранить делегированный сертификат на сервере " "MyProxy;\n" " GET -- получить делегированный сертификат с сервера MyProxy,\n" " INFO -- вывести информацию о сертификатах, хранящихся на " "сервере MyProxy; \n" " NEWPASS -- изменить пароль, защищающий сертификаты, хранящиеся " "на сервере MyProxy; \n" " DESTROY -- удалить сертификаты, хранящиеся на сервере " "MyProxy; \n" " Личные сертификаты и ключи не требуются, за исключением " "инструкции PUT.\n" " Инструкции MyProxy и VOMS могут использоваться одновременно.\n" " Опции --voms and --vomses могут быть использованы с командой " "Get, если\n" " в доверенность необходимо включить атрибуты VOMS.\n" #: src/clients/credentials/arcproxy.cpp:442 msgid "" "use NSS credential database in default Mozilla profiles, \n" " including Firefox, Seamonkey and Thunderbird.\n" msgstr "" "использовать базу данных параметров доступа NSS из профилей Mozilla \n" " по умолчанию, включая Firefox, Seamonkey и Thunderbird.\n" #: src/clients/credentials/arcproxy.cpp:447 #: src/clients/credentials/arcproxyalt.cpp:432 msgid "proxy constraints" msgstr "ограничения доверенности" #: src/clients/credentials/arcproxy.cpp:451 msgid "password destination=password source" msgstr "назначение пароля=источник пароля" #: src/clients/credentials/arcproxy.cpp:500 #: src/clients/credentials/arcproxy.cpp:1161 #: src/clients/credentials/arcproxyalt.cpp:513 #: src/clients/credentials/arcproxyalt.cpp:556 msgid "Failed configuration initialization." msgstr "Не удалось загрузить настройки." #: src/clients/credentials/arcproxy.cpp:518 #: src/clients/credentials/arcproxyalt.cpp:563 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" "Не удалось обнаружить сертификат и/или закрытый ключ, либо у файлов " "неподходящие параметры доступа" #: src/clients/credentials/arcproxy.cpp:519 #: src/clients/credentials/arcproxy.cpp:531 #: src/clients/credentials/arcproxyalt.cpp:564 #: src/clients/credentials/arcproxyalt.cpp:574 msgid "You may try to increase verbosity to get more information." msgstr "" "Вы можете попытаться увеличить уровень детальности для получения " "дополнительной информации." #: src/clients/credentials/arcproxy.cpp:527 #: src/clients/credentials/arcproxyalt.cpp:570 msgid "Failed to find CA certificates" msgstr "Невозможно найти сертификаты CA." #: src/clients/credentials/arcproxy.cpp:528 #: src/clients/credentials/arcproxyalt.cpp:571 msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" "Не удалось найти каталог с сертификатами агентств CA. Пожалуйста, задайте " "переменную среды X509_CERT_DIR, или значение cacertificatesdirectory в файле " "настроек" #: src/clients/credentials/arcproxy.cpp:532 #: src/clients/credentials/arcproxyalt.cpp:575 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" "Каталог сертификатов агентств CA необходим для связи с серверами VOMS и " "MyProxy" #: src/clients/credentials/arcproxy.cpp:544 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" "$X509_VOMS_FILE и $X509_VOMSES не настроены;\n" "Пользователь не указал расположение файла vomses;\n" "Расположение файла vomses не найдено в файле настроек пользователя;\n" "Файл vomses не обнаружен в ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, а также в соответствующих " "подкаталогах" #: src/clients/credentials/arcproxy.cpp:589 #: src/clients/credentials/arcproxyalt.cpp:604 src/clients/echo/arcecho.cpp:84 msgid "Wrong number of arguments!" msgstr "Недопустимое число аргументов!" #: src/clients/credentials/arcproxy.cpp:597 #: src/clients/credentials/arcproxy.cpp:618 #: src/clients/credentials/arcproxyalt.cpp:612 #: src/clients/credentials/arcproxyalt.cpp:632 msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" "Не удалось найти доверенность пользователя. Пожалуйста, задайте переменную " "среды X509_USER_PROXY, или значение proxypath в файле настроек" #: src/clients/credentials/arcproxy.cpp:604 #: src/clients/credentials/arcproxyalt.cpp:621 #, c-format msgid "Cannot remove proxy file at %s" msgstr "Невозможно удалить файл доверенности в %s" #: src/clients/credentials/arcproxy.cpp:606 #: src/clients/credentials/arcproxyalt.cpp:617 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "Невозможно удалить файл доверенности в %s, потому что его там нет" #: src/clients/credentials/arcproxy.cpp:624 #: src/clients/credentials/arcproxyalt.cpp:638 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" "Не удалось найти файл по адресу %s, содержащий доверенность. Пожалуйста, " "убедитесь, что файл существует." #: src/clients/credentials/arcproxy.cpp:630 #: src/clients/credentials/arcproxyalt.cpp:651 #, c-format msgid "Subject: %s" msgstr "Субъект: %s" #: src/clients/credentials/arcproxy.cpp:631 #: src/clients/credentials/arcproxyalt.cpp:652 #, c-format msgid "Issuer: %s" msgstr "Кем выдана: %s" #: src/clients/credentials/arcproxy.cpp:632 #: src/clients/credentials/arcproxyalt.cpp:653 #, c-format msgid "Identity: %s" msgstr "Личные данные: %s" #: src/clients/credentials/arcproxy.cpp:634 #: src/clients/credentials/arcproxyalt.cpp:657 msgid "Time left for proxy: Proxy expired" msgstr "Доверенность действительна на: Срок действия доверенности вышел" #: src/clients/credentials/arcproxy.cpp:636 #: src/clients/credentials/arcproxyalt.cpp:659 msgid "Time left for proxy: Proxy not valid yet" msgstr "Доверенность действительна на: Доверенность пока недействительна" #: src/clients/credentials/arcproxy.cpp:638 #: src/clients/credentials/arcproxyalt.cpp:661 #, c-format msgid "Time left for proxy: %s" msgstr "Доверенность действительна на: %s" #: src/clients/credentials/arcproxy.cpp:639 #: src/clients/credentials/arcproxyalt.cpp:663 #, c-format msgid "Proxy path: %s" msgstr "Расположение доверенности: %s" #: src/clients/credentials/arcproxy.cpp:640 #, c-format msgid "Proxy type: %s" msgstr "Тип доверенности: %s" #: src/clients/credentials/arcproxy.cpp:641 #, c-format msgid "Proxy key length: %i" msgstr "Длина ключа доверенности: %i" #: src/clients/credentials/arcproxy.cpp:642 #, c-format msgid "Proxy signature: %s" msgstr "Подпись доверенности: %s" #: src/clients/credentials/arcproxy.cpp:651 #: src/clients/credentials/arcproxyalt.cpp:675 msgid "AC extension information for VO " msgstr "Информация о расширении AC для VO " #: src/clients/credentials/arcproxy.cpp:654 #: src/clients/credentials/arcproxyalt.cpp:678 msgid "Error detected while parsing this AC" msgstr "Обнаружена ошибка при разборе сертификата атрибута" #: src/clients/credentials/arcproxy.cpp:667 #: src/clients/credentials/arcproxyalt.cpp:691 msgid "AC is invalid: " msgstr "Сертификат атрибута недействителен:" #: src/clients/credentials/arcproxy.cpp:720 #: src/clients/credentials/arcproxyalt.cpp:732 msgid "Time left for AC: AC is not valid yet" msgstr "Сертификат атрибута действителен на: Сертификат пока недействителен" #: src/clients/credentials/arcproxy.cpp:722 #: src/clients/credentials/arcproxyalt.cpp:734 msgid "Time left for AC: AC has expired" msgstr "" "Сертификат атрибута действителен на: Срок действия сертификата закончился" #: src/clients/credentials/arcproxy.cpp:724 #: src/clients/credentials/arcproxyalt.cpp:736 #, c-format msgid "Time left for AC: %s" msgstr "Сертификат атрибута действителен на: %s" #: src/clients/credentials/arcproxy.cpp:815 #, c-format msgid "Information item '%s' is not known" msgstr "Неизвестный тип информации '%s'" #: src/clients/credentials/arcproxy.cpp:824 #: src/clients/credentials/arcproxyalt.cpp:746 msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" "Не удалось найти путь к открытому ключу пользователя. Пожалуйста, задайте " "переменную среды X509_USER_CERT, или значение certificatepath в файле " "настроек" #: src/clients/credentials/arcproxy.cpp:828 #: src/clients/credentials/arcproxyalt.cpp:750 msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" "Не удалось найти закрытый ключ пользователя. Пожалуйста, задайте переменную " "среды X509_USER_KEY, или значение keypath в файле настроек" #: src/clients/credentials/arcproxy.cpp:852 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" "Не удалось разобрать выражение %s для источника пароля: формат должен быть " "type=source" #: src/clients/credentials/arcproxy.cpp:869 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" "Не удалось разобрать тип пароля %s. В настоящий момент поддерживаются " "значения 'key','myproxy','myproxynew' и 'all'." #: src/clients/credentials/arcproxy.cpp:884 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int,stdin,stream,file." msgstr "" "Не удалось разобрать источник пароля %s. Формат должен быть source_type или " "source_type:data format. Поддерживаются следующие типы источников: int,stdin," "stream,file." #: src/clients/credentials/arcproxy.cpp:898 msgid "Only standard input is currently supported for password source." msgstr "" "На настоящий момент единственным поддерживаемым источником пароля является " "стандартный вход." #: src/clients/credentials/arcproxy.cpp:903 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int,stdin," "stream,file." msgstr "" "Не удалось разобрать тип источника пароля %s. Поддерживаются следующие типы " "источников: int,stdin,stream,file." #: src/clients/credentials/arcproxy.cpp:942 #: src/clients/credentials/arcproxyalt.cpp:782 msgid "The start, end and period can't be set simultaneously" msgstr "Опции start, end и period не могут быть заданы одновременно" #: src/clients/credentials/arcproxy.cpp:948 #: src/clients/credentials/arcproxyalt.cpp:788 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "Невозможно распознать заданное Вами время начала: %s" #: src/clients/credentials/arcproxy.cpp:955 #: src/clients/credentials/arcproxyalt.cpp:795 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "Невозможно распознать заданный Вами интервал: %s." #: src/clients/credentials/arcproxy.cpp:962 #: src/clients/credentials/arcproxyalt.cpp:802 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "Невозможно распознать заданное Вами время окончания: %s" #: src/clients/credentials/arcproxy.cpp:971 #: src/clients/credentials/arcproxyalt.cpp:811 #, c-format msgid "The end time that you set: %s is before start time:%s." msgstr "Заданное Вами время окончания: %s предшествует времени начала: %s" #: src/clients/credentials/arcproxy.cpp:982 #: src/clients/credentials/arcproxyalt.cpp:822 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" "ПРЕДУПРЕЖДЕНИЕ: Заданное Вами время начала: %s предшествует текущему " "времени: %s" #: src/clients/credentials/arcproxy.cpp:985 #: src/clients/credentials/arcproxyalt.cpp:825 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" "ПРЕДУПРЕЖДЕНИЕ: Заданное Вами время окончания: %s предшествует текущему " "времени: %s" #: src/clients/credentials/arcproxy.cpp:995 #: src/clients/credentials/arcproxyalt.cpp:835 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "Невозможно распознать заданный Вами период VOMS AC: %s." #: src/clients/credentials/arcproxy.cpp:1013 #: src/clients/credentials/arcproxyalt.cpp:853 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "Невозможно распознать заданный Вами период MyProxy: %s." #: src/clients/credentials/arcproxy.cpp:1028 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "Недопустимое значение ограничения keybits: %s." #: src/clients/credentials/arcproxy.cpp:1042 #: src/clients/credentials/arcproxyalt.cpp:476 #: src/hed/libs/credential/ARCProxyUtil.cpp:1303 msgid "The NSS database can not be detected in the Firefox profile" msgstr "База данных NSS в профиле Firefox не обнаружена" #: src/clients/credentials/arcproxy.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:1311 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" "Обнаружено %d основных директорий NSS, содержащих базы данных сертификатов, " "ключей и модулей" #: src/clients/credentials/arcproxy.cpp:1053 #: src/hed/libs/credential/ARCProxyUtil.cpp:1315 #, c-format msgid "Number %d is: %s" msgstr "Номер %d: %s" #: src/clients/credentials/arcproxy.cpp:1055 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "Пожалуйста, выберите базу данных NSS для использования (1-%d): " #: src/clients/credentials/arcproxy.cpp:1071 #: src/clients/credentials/arcproxyalt.cpp:482 #: src/hed/libs/credential/ARCProxyUtil.cpp:1329 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "Будет использоваться база данных NSS %s\n" #: src/clients/credentials/arcproxy.cpp:1142 #: src/hed/libs/credential/ARCProxyUtil.cpp:1503 #, c-format msgid "Certificate to use is: %s" msgstr "Используемый сертификат: %s" #: src/clients/credentials/arcproxy.cpp:1190 #: src/clients/credentials/arcproxy.cpp:1306 #: src/clients/credentials/arcproxyalt.cpp:539 #: src/clients/credentials/arcproxyalt.cpp:955 #: src/hed/libs/credential/ARCProxyUtil.cpp:1560 msgid "Proxy generation succeeded" msgstr "Доверенность успешно создана" #: src/clients/credentials/arcproxy.cpp:1191 #: src/clients/credentials/arcproxy.cpp:1307 #: src/clients/credentials/arcproxyalt.cpp:540 #: src/clients/credentials/arcproxyalt.cpp:956 #: src/hed/libs/credential/ARCProxyUtil.cpp:1561 #, c-format msgid "Your proxy is valid until: %s" msgstr "Ваша доверенность действительна до: %s" #: src/clients/credentials/arcproxy.cpp:1210 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" "Старые сертификаты доверенности GSI более не поддерживаются. Пожалуйста, не " "используйте опцию -O/--old." #: src/clients/credentials/arcproxy.cpp:1229 src/hed/mcc/tls/MCCTLS.cpp:167 #: src/hed/mcc/tls/MCCTLS.cpp:200 src/hed/mcc/tls/MCCTLS.cpp:226 msgid "VOMS attribute parsing failed" msgstr "Сбой обработки атрибутов VOMS" #: src/clients/credentials/arcproxy.cpp:1231 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "Сервер Myproxy не прислал сертификат с расширением VOMS AC" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxyalt.cpp:892 #: src/hed/libs/credential/ARCProxyUtil.cpp:341 msgid "Proxy generation failed: No valid certificate found." msgstr "Сбой создания доверенности: Не обнаружено действительных сертификатов." #: src/clients/credentials/arcproxy.cpp:1258 #: src/clients/credentials/arcproxyalt.cpp:899 #: src/hed/libs/credential/ARCProxyUtil.cpp:348 msgid "Proxy generation failed: No valid private key found." msgstr "" "Сбой создания доверенности: Не обнаружено действительных закрытых ключей." #: src/clients/credentials/arcproxy.cpp:1263 #: src/clients/credentials/arcproxyalt.cpp:902 #: src/hed/libs/credential/ARCProxyUtil.cpp:173 #, c-format msgid "Your identity: %s" msgstr "Ваши личные данные: %s" #: src/clients/credentials/arcproxy.cpp:1265 #: src/clients/credentials/arcproxyalt.cpp:907 #: src/hed/libs/credential/ARCProxyUtil.cpp:356 msgid "Proxy generation failed: Certificate has expired." msgstr "Сбой создания доверенности: Срок действия сертификата истёк" #: src/clients/credentials/arcproxy.cpp:1269 #: src/clients/credentials/arcproxyalt.cpp:911 #: src/hed/libs/credential/ARCProxyUtil.cpp:361 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "Сбой создания доверенности: Срок действия сертификата ещё не начался" #: src/clients/credentials/arcproxy.cpp:1280 msgid "Proxy generation failed: Failed to create temporary file." msgstr "Сбой создания доверенности: Сбой создания временного файла." #: src/clients/credentials/arcproxy.cpp:1288 msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "Сбой создания доверенности: Сбой получения информации VOMS." #: src/clients/credentials/arcproxy_myproxy.cpp:100 #: src/clients/credentials/arcproxyalt.cpp:1312 #: src/hed/libs/credential/ARCProxyUtil.cpp:844 msgid "Succeeded to get info from MyProxy server" msgstr "Удалось получить информацию с сервера MyProxy" #: src/clients/credentials/arcproxy_myproxy.cpp:144 #: src/clients/credentials/arcproxyalt.cpp:1368 #: src/hed/libs/credential/ARCProxyUtil.cpp:900 msgid "Succeeded to change password on MyProxy server" msgstr "Удалось поменять пароль на сервере MyProxy" #: src/clients/credentials/arcproxy_myproxy.cpp:185 #: src/clients/credentials/arcproxyalt.cpp:1417 #: src/hed/libs/credential/ARCProxyUtil.cpp:949 msgid "Succeeded to destroy credential on MyProxy server" msgstr "Удалось уничтожить доверенность на сервере MyProxy" #: src/clients/credentials/arcproxy_myproxy.cpp:265 #: src/clients/credentials/arcproxyalt.cpp:1506 #: src/hed/libs/credential/ARCProxyUtil.cpp:1038 #, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "Удалось получить доверенность в %s с сервера MyProxy %s" #: src/clients/credentials/arcproxy_myproxy.cpp:318 #: src/clients/credentials/arcproxyalt.cpp:1565 #: src/hed/libs/credential/ARCProxyUtil.cpp:1097 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "Удалось делегировать доверенность серверу MyProxy" #: src/clients/credentials/arcproxy_proxy.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1222 #: src/hed/libs/credential/ARCProxyUtil.cpp:403 #: src/hed/libs/credential/ARCProxyUtil.cpp:1410 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" "Сбой добавления расширения VOMS AC. Ваша доверенность может быть неполной." #: src/clients/credentials/arcproxy_voms.cpp:63 msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "" "Не удалось обработать настройки VOMS, или не найдены приемлемые строки " "конфигурации." #: src/clients/credentials/arcproxy_voms.cpp:75 #, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "Сбой разборки указанного времени действия VOMS: %s" #: src/clients/credentials/arcproxy_voms.cpp:93 #: src/clients/credentials/arcproxyalt.cpp:1051 #: src/hed/libs/credential/ARCProxyUtil.cpp:640 #, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "Информация об адресе сервера VOMS отсутствует в строке: %s\"" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #: src/clients/credentials/arcproxyalt.cpp:1061 #: src/clients/credentials/arcproxyalt.cpp:1063 #: src/hed/libs/credential/ARCProxyUtil.cpp:650 #: src/hed/libs/credential/ARCProxyUtil.cpp:652 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "Устанавливается связь с сервером VOMS (по имени %s): %s по порту: %s" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "Сбой разборки указанного номера порта сервера VOMS: %s" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "Перечисление не поддерживается для REST-интерфейса VOMS" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" "Невозможно связаться с сервером VOMS с информацией:\n" "\t%s\n" "Пожалуйста, проверьте, доступен ли этот сервер." #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" "Полученная ошибка:\n" "\t%s" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, c-format msgid "No valid response from VOMS server: %s" msgstr "Не получено приемлемого отзыва от сервера VOMS: %s" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "Перечисление не поддерживается для традиционного интерфейса VOMS" #: src/clients/credentials/arcproxy_voms.cpp:167 #, c-format msgid "Failed to parse VOMS command: %s" msgstr "Не удалось разобрать команду VOMS: %s" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message." msgstr "" "В Вашем файле vomses указаны %d серверов с одинаковым именем %s, но ни один " "из них не доступен или не отзывается правильно." #: src/clients/credentials/arcproxyalt.cpp:108 #: src/hed/libs/credential/ARCProxyUtil.cpp:258 #, c-format msgid "OpenSSL error -- %s" msgstr "Ошибка OpenSSL -- %s" #: src/clients/credentials/arcproxyalt.cpp:109 #: src/hed/libs/credential/ARCProxyUtil.cpp:259 #, c-format msgid "Library : %s" msgstr "Библиотека: %s" #: src/clients/credentials/arcproxyalt.cpp:110 #: src/hed/libs/credential/ARCProxyUtil.cpp:260 #, c-format msgid "Function : %s" msgstr "Функция: %s" #: src/clients/credentials/arcproxyalt.cpp:111 #: src/hed/libs/credential/ARCProxyUtil.cpp:261 #, c-format msgid "Reason : %s" msgstr "Причина: %s" #: src/clients/credentials/arcproxyalt.cpp:167 #: src/hed/libs/credential/ARCProxyUtil.cpp:317 msgid "User interface error" msgstr "Ошибка интерфейса пользователя" #: src/clients/credentials/arcproxyalt.cpp:173 #: src/hed/libs/credential/ARCProxyUtil.cpp:323 msgid "Aborted!" msgstr "Исполнение прервано!" #: src/clients/credentials/arcproxyalt.cpp:319 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " "from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and " "validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours for " "delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the " "default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on " "myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file" msgstr "" "Поддерживаемые ограничения:\n" " validityStart=время (например, 2008-05-29T10:20:30Z; если не указано, то " "начинается немедленно)\n" " validityEnd=время\n" " validityPeriod=время (например, 43200, или 12h, или 12H; если не указаны " "ни validityPeriod,\n" " ни validityEnd, то срок действия по умолчанию составляет 12 часов для " "локальной доверенности,\n" " и 168 часов для делегированной доверенности на сервере MyProxy)\n" " vomsACvalidityPeriod=время (например, 43200, или 12h, или 12H; если не " "указано, то используется\n" " наименьшее между 12 часами и значением validityPeriod)\n" " myproxyvalidityPeriod=время (срок годности доверенности, делегированной " "через сервер MyProxy\n" " например, 43200, или 12h, или 12H; если не указано, то используется " "наименьшее между 12 часами\n" " и значением validityPeriod - сроком годности доверенности, " "делегированной через сервер MyProxy)\n" " proxyPolicy=содержимое политики\n" " proxyPolicyFile=файл политики" #: src/clients/credentials/arcproxyalt.cpp:388 msgid "" "print all information about this proxy. \n" " In order to show the Identity (DN without CN as suffix for " "proxy) \n" " of the certificate, the 'trusted certdir' is needed." msgstr "" "вывести всю информацию о данной доверенности. \n" " Для вывода персональной информации (DN без CN как суффикс " "доверенности) \n" " из сертификата, необходим 'trusted certdir'." #: src/clients/credentials/arcproxyalt.cpp:398 msgid "username to MyProxy server" msgstr "Имя пользователя сервера MyProxy" #: src/clients/credentials/arcproxyalt.cpp:422 msgid "" "command to MyProxy server. The command can be PUT or GET.\n" " PUT/put/Put -- put a delegated credential to the MyProxy " "server; \n" " GET/get/Get -- get a delegated credential from the MyProxy " "server, \n" " credential (certificate and key) is not needed in this case. \n" " MyProxy functionality can be used together with VOMS\n" " functionality.\n" msgstr "" "инструкция серверу MyProxy. Возможны две инструкции: PUT и GET:\n" " PUT/put/Put -- сохранить делегированный сертификат на сервере " "MyProxy;\n" " GET/get/Get -- получить делегированный сертификат с сервера " "MyProxy,\n" " в этом случае не требуются личные сертификаты и " "ключи.\n" " Инструкции MyProxy и VOMS могут использоваться одновременно.\n" #: src/clients/credentials/arcproxyalt.cpp:437 msgid "use NSS credential database in the Firefox profile" msgstr "использовать базу данных параметров доступа NSS из профиля Firefox" #: src/clients/credentials/arcproxyalt.cpp:1040 #: src/hed/libs/credential/ARCProxyUtil.cpp:629 #, c-format msgid "There are %d commands to the same VOMS server %s" msgstr "%d инструкций направлено на один и тот же сервер VOMS, %s" #: src/clients/credentials/arcproxyalt.cpp:1094 #: src/hed/libs/credential/ARCProxyUtil.cpp:683 #, c-format msgid "Try to get attribute from VOMS server with order: %s" msgstr "Попытка получить атрибут с сервера VOMS с порядком: %s" #: src/clients/credentials/arcproxyalt.cpp:1097 #: src/hed/libs/credential/ARCProxyUtil.cpp:686 #, c-format msgid "Message sent to VOMS server %s is: %s" msgstr "Сообщение, отправленное на сервер VOMS %s: %s " #: src/clients/credentials/arcproxyalt.cpp:1116 #: src/hed/libs/credential/ARCProxyUtil.cpp:705 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available" msgstr "" "Невозможно связаться с сервером VOMS с информацией:\n" "\t%s\n" "пожалуйста, проверьте, доступен ли этот сервер" #: src/clients/credentials/arcproxyalt.cpp:1120 #: src/hed/libs/credential/ARCProxyUtil.cpp:709 msgid "No HTTP response from VOMS server" msgstr "Сервер VOMS не отзывается по HTTP" #: src/clients/credentials/arcproxyalt.cpp:1125 #: src/clients/credentials/arcproxyalt.cpp:1151 #: src/hed/libs/credential/ARCProxyUtil.cpp:714 #: src/hed/libs/credential/ARCProxyUtil.cpp:740 #, c-format msgid "Returned message from VOMS server: %s" msgstr "Сообщение с сервера VOMS: %s " #: src/clients/credentials/arcproxyalt.cpp:1137 #: src/hed/libs/credential/ARCProxyUtil.cpp:726 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\"\n" "can not be reached, please make sure it is available" msgstr "" "Невозможно связаться с сервером VOMS с информацией:\n" "\t%s\"\n" "пожалуйста, проверьте, доступен ли этот сервер" #: src/clients/credentials/arcproxyalt.cpp:1141 #: src/hed/libs/credential/ARCProxyUtil.cpp:730 msgid "No stream response from VOMS server" msgstr "Сервер VOMS не отзывается" #: src/clients/credentials/arcproxyalt.cpp:1163 #: src/hed/libs/credential/ARCProxyUtil.cpp:752 #, c-format msgid "" "The validity duration of VOMS AC is shortened from %s to %s, due to the " "validity constraint on voms server side.\n" msgstr "" "Срок действия сертификата атрибута VOMS (AC) сокращён с %s до %s, в связи с " "ограничением со стороны сервера VOMS.\n" #: src/clients/credentials/arcproxyalt.cpp:1166 #: src/hed/libs/credential/ARCProxyUtil.cpp:755 #, c-format msgid "" "Cannot get any AC or attributes info from VOMS server: %s;\n" " Returned message from VOMS server: %s\n" msgstr "" "Невозможно получить сертификат атрибута (AC) или информацию об атрибутах с " "сервера VOMS: %s;\n" " Сообщение, возвращённое сервером VOMS: %s\n" #: src/clients/credentials/arcproxyalt.cpp:1171 #: src/hed/libs/credential/ARCProxyUtil.cpp:760 #, c-format msgid "Returned message from VOMS server %s is: %s\n" msgstr "Сообщение, полученное с сервера VOMS %s: %s\n" #: src/clients/credentials/arcproxyalt.cpp:1193 #: src/hed/libs/credential/ARCProxyUtil.cpp:782 #, c-format msgid "The attribute information from VOMS server: %s is list as following:" msgstr "Информация об атрибутах с сервера VOMS: %s содержит:" #: src/clients/credentials/arcproxyalt.cpp:1205 #: src/hed/libs/credential/ARCProxyUtil.cpp:794 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but all of " "them can not be reached, or can not return valid message. But proxy without " "VOMS AC extension will still be generated." msgstr "" "В Вашем файле vomses указаны %d серверов с одинаковым именем %s, но не все " "доступны или правильно отзываются. В любом случае, расширение VOMS AC будет " "создано." #: src/clients/credentials/arcproxyalt.cpp:1220 #, c-format msgid "Failed to add extension: %s" msgstr "Сбой добавления расширения: %s" #: src/clients/credentials/arcproxyalt.cpp:1238 #: src/hed/libs/credential/ARCProxyUtil.cpp:443 #: src/hed/libs/credential/Credential.cpp:884 #, c-format msgid "Error: can't open policy file: %s" msgstr "Ошибка: невозможно открыть файл политик: %s" #: src/clients/credentials/arcproxyalt.cpp:1248 #: src/hed/libs/credential/ARCProxyUtil.cpp:453 #: src/hed/libs/credential/Credential.cpp:897 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "Ошибка: местонахождение политик: %s не является стандартным файлом" #: src/clients/credentials/arcproxyalt.cpp:1600 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specify the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Cannot find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, " "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" "$X509_VOMS_FILE и $X509_VOMSES не настроены;\n" "Пользователь не указал расположение файла vomses;\n" "Расположение файла vomses не найдено в файле настроек пользователя;\n" "Файл vomses не обнаружен в ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/" "vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /" "etc/grid-security/vomses, а также в соответствующих подкаталогах" #: src/clients/credentials/arcproxyalt.cpp:1640 #: src/hed/libs/credential/ARCProxyUtil.cpp:552 #, c-format msgid "VOMS line contains wrong number of tokens (%u expected): \"%s\"" msgstr "" "Строка VOMS содержит неверное количество токенов (ожидается %u): \"%s\"" #: src/clients/credentials/arcproxyalt.cpp:1684 #: src/hed/libs/credential/ARCProxyUtil.cpp:596 #, c-format msgid "Cannot get VOMS server %s information from the vomses files" msgstr "Невозможно найти информацию о сервере VOMS %s из файлов vomses" #: src/clients/credentials/test2myproxyserver_get.cpp:89 #: src/clients/credentials/test2myproxyserver_get.cpp:131 #: src/clients/credentials/test2myproxyserver_put.cpp:88 #: src/clients/credentials/test2myproxyserver_put.cpp:182 #: src/clients/credentials/test2vomsserver.cpp:101 msgid "No stream response" msgstr "Не получен ответ с сервера" #: src/clients/credentials/test2myproxyserver_get.cpp:104 #: src/clients/credentials/test2myproxyserver_get.cpp:143 #: src/clients/credentials/test2myproxyserver_get.cpp:190 #: src/clients/credentials/test2myproxyserver_put.cpp:103 #: src/clients/credentials/test2myproxyserver_put.cpp:116 #: src/clients/credentials/test2myproxyserver_put.cpp:194 #, c-format msgid "Returned msg from myproxy server: %s %d" msgstr "Сервер myproxy возвратил следующее сообщение: %s %d" #: src/clients/credentials/test2myproxyserver_get.cpp:149 #, c-format msgid "There are %d certificates in the returned msg" msgstr "Ответное сообщение содержит %d сертификатов" #: src/clients/credentials/test2myproxyserver_put.cpp:135 msgid "Delegate proxy failed" msgstr "Не удалось делегирование доверенности" #: src/clients/credentials/test2vomsserver.cpp:116 #, c-format msgid "Returned msg from voms server: %s " msgstr "Сообщение с сервера VOMS: %s " #: src/clients/data/arccp.cpp:77 src/clients/data/arccp.cpp:330 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:426 #, c-format msgid "Current transfer FAILED: %s" msgstr "Текущая передача НЕ СОСТОЯЛАСЬ: %s" #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:135 #: src/clients/data/arccp.cpp:332 src/clients/data/arcls.cpp:224 #: src/clients/data/arcmkdir.cpp:73 src/clients/data/arcrename.cpp:89 #: src/clients/data/arcrm.cpp:95 msgid "This seems like a temporary error, please try again later" msgstr "Похоже на временный сбой - пожалуйста, попытайтесь снова попозже" #: src/clients/data/arccp.cpp:87 src/clients/data/arccp.cpp:96 #, c-format msgid "Unable to copy %s" msgstr "Не удалось скопировать %s" #: src/clients/data/arccp.cpp:88 src/clients/data/arccp.cpp:97 #: src/clients/data/arcls.cpp:150 src/clients/data/arcls.cpp:159 #: src/clients/data/arcmkdir.cpp:55 src/clients/data/arcmkdir.cpp:64 #: src/clients/data/arcrename.cpp:67 src/clients/data/arcrename.cpp:76 #: src/clients/data/arcrm.cpp:68 src/clients/data/arcrm.cpp:80 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" "Недействительные реквизиты доступа, пожалуйста, проверьте сертификат " "доверенности и/или реквизиты органа сертификации" #: src/clients/data/arccp.cpp:94 src/clients/data/arcls.cpp:156 #: src/clients/data/arcmkdir.cpp:61 src/clients/data/arcrename.cpp:73 #: src/clients/data/arcrm.cpp:77 msgid "Proxy expired" msgstr "Срок действия доверенности вышел" #: src/clients/data/arccp.cpp:112 src/clients/data/arccp.cpp:116 #: src/clients/data/arccp.cpp:149 src/clients/data/arccp.cpp:153 #: src/clients/data/arccp.cpp:358 src/clients/data/arccp.cpp:363 #: src/clients/data/arcls.cpp:123 src/clients/data/arcmkdir.cpp:28 #: src/clients/data/arcrename.cpp:29 src/clients/data/arcrename.cpp:33 #: src/clients/data/arcrm.cpp:36 #, c-format msgid "Invalid URL: %s" msgstr "Неверный URL: %s" #: src/clients/data/arccp.cpp:128 msgid "Third party transfer is not supported for these endpoints" msgstr "Для этих точек входа сторонняя пересылка не поддерживается" #: src/clients/data/arccp.cpp:130 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" "Протокол не поддерживается - пожалуйста, убедитесь что\n" " установлены необходимые подключаемые модули gfal2 (пакеты gfal2-" "plugin-*)" #: src/clients/data/arccp.cpp:133 #, c-format msgid "Transfer FAILED: %s" msgstr "Передача НЕ УДАЛАСЬ: %s" #: src/clients/data/arccp.cpp:161 src/clients/data/arccp.cpp:187 #: src/clients/data/arccp.cpp:374 src/clients/data/arccp.cpp:402 #, c-format msgid "Can't read list of sources from file %s" msgstr "Невозможно прочесть список источников из файла %s" #: src/clients/data/arccp.cpp:166 src/clients/data/arccp.cpp:202 #: src/clients/data/arccp.cpp:379 src/clients/data/arccp.cpp:418 #, c-format msgid "Can't read list of destinations from file %s" msgstr "Невозможно прочестьсписок назначений из файла %s" #: src/clients/data/arccp.cpp:171 src/clients/data/arccp.cpp:385 msgid "Numbers of sources and destinations do not match" msgstr "Число источников и число назначений не соответствуют друг другу" #: src/clients/data/arccp.cpp:216 msgid "Fileset registration is not supported yet" msgstr "Регистрация наборов файлов пока не поддерживается" #: src/clients/data/arccp.cpp:222 src/clients/data/arccp.cpp:295 #: src/clients/data/arccp.cpp:456 #, c-format msgid "Unsupported source url: %s" msgstr "Неподдерживаемый URL источника: %s" #: src/clients/data/arccp.cpp:226 src/clients/data/arccp.cpp:299 #, c-format msgid "Unsupported destination url: %s" msgstr "Неподдерживаемый URL назначения: %s" #: src/clients/data/arccp.cpp:233 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" "Для регистрации, источник должен быть задан обычным URL, а назначением " "должен быть каталог ресурсов" #: src/clients/data/arccp.cpp:243 #, c-format msgid "Could not obtain information about source: %s" msgstr "Не удалось получить информацию об источнике: %s" #: src/clients/data/arccp.cpp:250 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" "Метаданные источника и цели не совпадают. Используйте опцию --force для " "принудительного копирования." #: src/clients/data/arccp.cpp:262 msgid "Failed to accept new file/destination" msgstr "Сбой при приёме нового файла/направления" #: src/clients/data/arccp.cpp:268 src/clients/data/arccp.cpp:274 #, c-format msgid "Failed to register new file/destination: %s" msgstr "Сбой при регистрации нового файла/цели: %s" #: src/clients/data/arccp.cpp:436 msgid "Fileset copy to single object is not supported yet" msgstr "Копирование набора файлов в отдельный объект пока не поддерживается" #: src/clients/data/arccp.cpp:446 msgid "Can't extract object's name from source url" msgstr "Невозможно извлечь имя объекта из URL источника" #: src/clients/data/arccp.cpp:465 #, c-format msgid "%s. Cannot copy fileset" msgstr "%s. Невозможно скопировать набор файлов" #: src/clients/data/arccp.cpp:475 src/hed/libs/compute/ExecutionTarget.cpp:254 #: src/hed/libs/compute/ExecutionTarget.cpp:326 #, c-format msgid "Name: %s" msgstr "Имя: %s" #: src/clients/data/arccp.cpp:478 #, c-format msgid "Source: %s" msgstr "Источник: %s" #: src/clients/data/arccp.cpp:479 #, c-format msgid "Destination: %s" msgstr "Назначение: %s" #: src/clients/data/arccp.cpp:485 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:433 msgid "Current transfer complete" msgstr "Текущая передача завершена" #: src/clients/data/arccp.cpp:488 msgid "Some transfers failed" msgstr "Некоторые загрузки не удались" #: src/clients/data/arccp.cpp:498 #, c-format msgid "Directory: %s" msgstr "Каталог: %s" #: src/clients/data/arccp.cpp:518 msgid "Transfer complete" msgstr "Передача данных завершена" #: src/clients/data/arccp.cpp:537 msgid "source destination" msgstr "источник назначение" #: src/clients/data/arccp.cpp:538 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" "Команда arccp копирует файлы на, с и между запоминающими устройствами Грид." #: src/clients/data/arccp.cpp:543 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" "использовать пассивную передачу данных (по умолчанию, опция отключена при " "защищённой передаче, и включена при незащищённой)" #: src/clients/data/arccp.cpp:549 msgid "do not try to force passive transfer" msgstr "не пытаться форсировать пассивный способ передачи данных" #: src/clients/data/arccp.cpp:554 msgid "" "if the destination is an indexing service and not the same as the source and " "the destination is already registered, then the copy is normally not done. " "However, if this option is specified the source is assumed to be a replica " "of the destination created in an uncontrolled way and the copy is done like " "in case of replication. Using this option also skips validation of completed " "transfers." msgstr "" "если назначением задан индексирующий сервис, отличный от источника, и это " "назначение уже зарегистрировано, копирование обычно не допускается. В случае " "же, когда указана эта опция, источник рассматривается как неофициальная " "копия зарегистрированного файла, и копирование производится как в случае " "тиражирования. При использовании этой опции пропускается сверка завершённых " "передач." #: src/clients/data/arccp.cpp:567 msgid "show progress indicator" msgstr "показать индикатор выполнения" #: src/clients/data/arccp.cpp:572 msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" "зарегистрировать файл, не передавая его - назначением должен быть мета-URL" #: src/clients/data/arccp.cpp:578 msgid "use secure transfer (insecure by default)" msgstr "" "использовать защищённую передачу данных (передача не защищена по умолчанию)" #: src/clients/data/arccp.cpp:583 msgid "path to local cache (use to put file into cache)" msgstr "путь к локальному кэшу (используется для записи файла в кэш)" #: src/clients/data/arccp.cpp:588 src/clients/data/arcls.cpp:300 msgid "operate recursively" msgstr "обработать рекурсивно" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:305 msgid "operate recursively up to specified level" msgstr "рекурсивное исполнение до указанного уровня" #: src/clients/data/arccp.cpp:594 src/clients/data/arcls.cpp:306 msgid "level" msgstr "уровень" #: src/clients/data/arccp.cpp:598 msgid "number of retries before failing file transfer" msgstr "количество попыток передачи файла" #: src/clients/data/arccp.cpp:599 msgid "number" msgstr "число" #: src/clients/data/arccp.cpp:603 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" "физический адрес для записи, если в качестве назначения указан каталог " "ресурсов. Должен быть указан для каталогов, не генерирующих физические " "адреса автоматически. Несколько значений может быть указано - адреса будут " "перебираться, пока не будет достигнут успех." #: src/clients/data/arccp.cpp:611 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" "выполнить стороннюю пересылку, когда назначение закачивает файл из источника " "(доступно только с модулем GFAL)" #: src/clients/data/arccp.cpp:617 src/clients/data/arcls.cpp:322 #: src/clients/data/arcmkdir.cpp:101 src/clients/data/arcrename.cpp:112 #: src/clients/data/arcrm.cpp:127 msgid "list the available plugins (protocols supported)" msgstr "Показать список доступных модулей (поддерживаемые протоколы)" #: src/clients/data/arccp.cpp:656 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:141 src/clients/data/arcrename.cpp:152 #: src/clients/data/arcrm.cpp:168 msgid "Protocol plugins available:" msgstr "Доступны модули для следующих протоколов:" #: src/clients/data/arccp.cpp:681 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:175 #: src/clients/data/arcrm.cpp:193 msgid "Wrong number of parameters specified" msgstr "Задано неверное количество параметров" #: src/clients/data/arccp.cpp:686 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "Опции 'p' и 'n' не могут быть использованы одновременно" #: src/clients/data/arcls.cpp:129 src/clients/data/arcmkdir.cpp:34 #: src/clients/data/arcrm.cpp:43 #, c-format msgid "Can't read list of locations from file %s" msgstr "Невозможно прочесть список адресов из файла %s" #: src/clients/data/arcls.cpp:144 src/clients/data/arcmkdir.cpp:49 #: src/clients/data/arcrename.cpp:61 msgid "Unsupported URL given" msgstr "Заданный URL не поддерживается" #: src/clients/data/arcls.cpp:149 src/clients/data/arcls.cpp:158 #, c-format msgid "Unable to list content of %s" msgstr "Не удалось просмотреть содержимое %s" #: src/clients/data/arcls.cpp:227 msgid "Warning: Failed listing files but some information is obtained" msgstr "" "Предупреждение: Не удалось вывести список файлов, но некоторая информация " "была получена" #: src/clients/data/arcls.cpp:281 src/clients/data/arcmkdir.cpp:90 msgid "url" msgstr "URL" #: src/clients/data/arcls.cpp:282 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" "Команда arcls используется для просмотра информации о файлах,\n" "хранящихся на накопительных устройствах Грид, а также в занесённых\n" "в каталоги данных." #: src/clients/data/arcls.cpp:291 msgid "show URLs of file locations" msgstr "вывести адреса физических файлов" #: src/clients/data/arcls.cpp:295 msgid "display all available metadata" msgstr "показать все доступные метаданные" #: src/clients/data/arcls.cpp:309 msgid "" "show only description of requested object, do not list content of directories" msgstr "" "показывать только описание запрашиваемого объекта, не выводить содержимое " "каталогов" #: src/clients/data/arcls.cpp:313 msgid "treat requested object as directory and always try to list content" msgstr "" "интерпретировать запрошенный объект как каталог, и всегда пытаться вывести " "его содержимое" #: src/clients/data/arcls.cpp:317 msgid "check readability of object, does not show any information about object" msgstr "проверить читаемость объекта, не показывать информацию об объекте" #: src/clients/data/arcls.cpp:392 msgid "Incompatible options --nolist and --forcelist requested" msgstr "Запрошены несовместимые опции --nolist и --forcelist" #: src/clients/data/arcls.cpp:397 msgid "Requesting recursion and --nolist has no sense" msgstr "Запрос рекурсивного просмотра и --nolist не имеет смысла" #: src/clients/data/arcmkdir.cpp:54 src/clients/data/arcmkdir.cpp:63 #, c-format msgid "Unable to create directory %s" msgstr "Не удалось создать каталог %s" #: src/clients/data/arcmkdir.cpp:91 msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" "Команда arcmkdir создаёт директории на грид-хранилищах и в каталогах данных" #: src/clients/data/arcmkdir.cpp:96 msgid "make parent directories as needed" msgstr "создавать родительские директории по мере необходимости" #: src/clients/data/arcrename.cpp:41 msgid "Both URLs must have the same protocol, host and port" msgstr "Оба URL должны содержать одинаковый протокол, адрес сервера и порт" #: src/clients/data/arcrename.cpp:51 msgid "Cannot rename to or from root directory" msgstr "Невозможно переместить в корневую директорию или из неё" #: src/clients/data/arcrename.cpp:55 msgid "Cannot rename to the same URL" msgstr "Невозможно переименовать в идентичный URL" #: src/clients/data/arcrename.cpp:66 src/clients/data/arcrename.cpp:75 #, c-format msgid "Unable to rename %s" msgstr "Не удалось переименовать %s" #: src/clients/data/arcrename.cpp:106 msgid "old_url new_url" msgstr "old_url new_url" #: src/clients/data/arcrename.cpp:107 msgid "The arcrename command renames files on grid storage elements." msgstr "Команда arcrename переименовывает файлы на запоминающих устройствах." #: src/clients/data/arcrm.cpp:58 #, c-format msgid "Unsupported URL given: %s" msgstr "Заданный URL не поддерживается: %s" #: src/clients/data/arcrm.cpp:67 src/clients/data/arcrm.cpp:79 #, c-format msgid "Unable to remove file %s" msgstr "Не удалось удалить файл %s" #: src/clients/data/arcrm.cpp:115 msgid "url [url ...]" msgstr "url [url ...]" #: src/clients/data/arcrm.cpp:116 msgid "The arcrm command deletes files and on grid storage elements." msgstr "Команда arcrm удаляет файлы с запоминающих устройств." #: src/clients/data/arcrm.cpp:121 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" "удалить логическое имя файла, даже если не все физические копии удалены" #: src/clients/echo/arcecho.cpp:32 msgid "service message" msgstr "сообщение службы" #: src/clients/echo/arcecho.cpp:33 msgid "The arcecho command is a client for the ARC echo service." msgstr "Команда arcecho является клиентским приложением службы ARC echo." #: src/clients/echo/arcecho.cpp:35 msgid "" "The service argument is a URL to an ARC echo service.\n" "The message argument is the message the service should return." msgstr "" "Аргументом службы должен быть URL эхо-сервера ARC.\n" "Аргументом сообщения должно быть сообщение, которое этот сервер должен " "возвратить." #: src/clients/echo/arcecho.cpp:105 src/hed/dmc/arc/DataPointARC.cpp:169 #: src/hed/dmc/arc/DataPointARC.cpp:222 src/hed/dmc/arc/DataPointARC.cpp:304 #: src/hed/dmc/arc/DataPointARC.cpp:415 src/hed/dmc/arc/DataPointARC.cpp:510 #: src/hed/dmc/arc/DataPointARC.cpp:574 src/hed/dmc/arc/DataPointARC.cpp:624 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:100 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:174 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:240 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, c-format msgid "" "Request:\n" "%s" msgstr "" "Запрос:\n" "%s" #: src/clients/echo/arcecho.cpp:119 src/hed/dmc/arc/DataPointARC.cpp:182 #: src/hed/dmc/arc/DataPointARC.cpp:235 src/hed/dmc/arc/DataPointARC.cpp:320 #: src/hed/dmc/arc/DataPointARC.cpp:431 src/hed/dmc/arc/DataPointARC.cpp:524 #: src/hed/dmc/arc/DataPointARC.cpp:587 src/hed/dmc/arc/DataPointARC.cpp:638 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "Нет ответа SOAP" #: src/clients/echo/arcecho.cpp:124 src/hed/acc/UNICORE/UNICOREClient.cpp:531 #: src/hed/dmc/arc/DataPointARC.cpp:187 src/hed/dmc/arc/DataPointARC.cpp:240 #: src/hed/dmc/arc/DataPointARC.cpp:325 src/hed/dmc/arc/DataPointARC.cpp:436 #: src/hed/dmc/arc/DataPointARC.cpp:529 src/hed/dmc/arc/DataPointARC.cpp:592 #: src/hed/dmc/arc/DataPointARC.cpp:643 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:119 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:193 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:267 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:346 #, c-format msgid "" "Response:\n" "%s" msgstr "" "Отзыв:\n" "%s" #: src/clients/saml/saml_assertion_init.cpp:43 msgid "service_url" msgstr "service_url" #: src/clients/saml/saml_assertion_init.cpp:47 msgid "path to config file" msgstr "путь к файлу настроек" #: src/clients/saml/saml_assertion_init.cpp:140 msgid "SOAP Request failed: No response" msgstr "Сбой запроса SOAP: Нет ответа" #: src/clients/saml/saml_assertion_init.cpp:144 msgid "SOAP Request failed: Error" msgstr "Сбой запроса SOAP: Ошибка" #: src/clients/saml/saml_assertion_init.cpp:150 msgid "No in SOAP response" msgstr "Отзыв SOAP не содержит " #: src/clients/saml/saml_assertion_init.cpp:156 msgid "No in SAML response" msgstr " В отклике SAML отсутствует " #: src/clients/saml/saml_assertion_init.cpp:168 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:321 msgid "Succeeded to verify the signature under " msgstr "Подпись успешно подтверждена" #: src/clients/saml/saml_assertion_init.cpp:171 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:324 msgid "Failed to verify the signature under " msgstr "Подпись не подтверждена" #: src/clients/wsrf/arcwsrf.cpp:39 msgid "URL [query]" msgstr "URL [запрос]" #: src/clients/wsrf/arcwsrf.cpp:40 msgid "" "The arcwsrf command is used for obtaining the WS-ResourceProperties of\n" "services." msgstr "" "Команда arcwsrf используется для получения значений WS-ResourceProperties\n" "различных служб." #: src/clients/wsrf/arcwsrf.cpp:46 msgid "Request for specific Resource Property" msgstr "Запрос заданного свойства ресурса" #: src/clients/wsrf/arcwsrf.cpp:47 msgid "[-]name" msgstr "[-]адрес" #: src/clients/wsrf/arcwsrf.cpp:80 msgid "Missing URL" msgstr "Отсутствует URL" #: src/clients/wsrf/arcwsrf.cpp:85 msgid "Too many parameters" msgstr "Слишком много параметров" #: src/clients/wsrf/arcwsrf.cpp:123 msgid "Query is not a valid XML" msgstr "Запрос не является корректным XML" #: src/clients/wsrf/arcwsrf.cpp:138 msgid "Failed to create WSRP request" msgstr "Не удалось создать корректный запрос WSRP" #: src/clients/wsrf/arcwsrf.cpp:145 msgid "Specified URL is not valid" msgstr "Указанный адрес недействителен" #: src/clients/wsrf/arcwsrf.cpp:157 msgid "Failed to send request" msgstr "Не удалось отправить запрос" #: src/clients/wsrf/arcwsrf.cpp:161 msgid "Failed to obtain SOAP response" msgstr "Не удалось получить отзыв SOAP" #: src/clients/wsrf/arcwsrf.cpp:167 msgid "SOAP fault received" msgstr "Получена ошибка SOAP" #: src/hed/acc/ARC0/FTPControl.cpp:142 #, c-format msgid "Connect: Failed to init handle: %s" msgstr "Соединение: Не удалось инициализировать ссылку: %s" #: src/hed/acc/ARC0/FTPControl.cpp:148 #, c-format msgid "Failed to enable IPv6: %s" msgstr "Сбой включения IPv6: %s" #: src/hed/acc/ARC0/FTPControl.cpp:158 src/hed/acc/ARC0/FTPControl.cpp:172 #, c-format msgid "Connect: Failed to connect: %s" msgstr "Соединение: Сбой соединения с: %s" #: src/hed/acc/ARC0/FTPControl.cpp:165 #, c-format msgid "Connect: Connecting timed out after %d ms" msgstr "Соединение: Время ожидания соединения истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:185 #, c-format msgid "Connect: Failed to init auth info handle: %s" msgstr "" "Соединение: Сбой инициализации идентификатора информации проверки " "подлинности: %s" #: src/hed/acc/ARC0/FTPControl.cpp:196 src/hed/acc/ARC0/FTPControl.cpp:210 #, c-format msgid "Connect: Failed authentication: %s" msgstr "Соединение: Ошибка проверки подлинности: %s" #: src/hed/acc/ARC0/FTPControl.cpp:203 #, c-format msgid "Connect: Authentication timed out after %d ms" msgstr "Соединение: Время ожидания проверки подлинности истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:224 src/hed/acc/ARC0/FTPControl.cpp:256 #, c-format msgid "SendCommand: Command: %s" msgstr "SendCommand: Команда: %s" #: src/hed/acc/ARC0/FTPControl.cpp:229 src/hed/acc/ARC0/FTPControl.cpp:240 #: src/hed/acc/ARC0/FTPControl.cpp:260 src/hed/acc/ARC0/FTPControl.cpp:271 #, c-format msgid "SendCommand: Failed: %s" msgstr "Отправка команды: Сбой: %s" #: src/hed/acc/ARC0/FTPControl.cpp:235 src/hed/acc/ARC0/FTPControl.cpp:266 #, c-format msgid "SendCommand: Timed out after %d ms" msgstr "Отправка команды: Время ожидания истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:243 src/hed/acc/ARC0/FTPControl.cpp:276 #, c-format msgid "SendCommand: Response: %s" msgstr "SendCommand: Отзыв: %s" #: src/hed/acc/ARC0/FTPControl.cpp:293 msgid "SendData: Failed sending EPSV and PASV commands" msgstr "SendData: Сбой отсылки команд EPSV и PASV" #: src/hed/acc/ARC0/FTPControl.cpp:298 src/hed/acc/ARC0/FTPControl.cpp:304 #: src/hed/acc/ARC0/FTPControl.cpp:320 #, c-format msgid "SendData: Server PASV response parsing failed: %s" msgstr "SendData: Сбой разбора отзыва сервера PASV: %s" #: src/hed/acc/ARC0/FTPControl.cpp:330 src/hed/acc/ARC0/FTPControl.cpp:336 #: src/hed/acc/ARC0/FTPControl.cpp:343 src/hed/acc/ARC0/FTPControl.cpp:350 #, c-format msgid "SendData: Server EPSV response parsing failed: %s" msgstr "SendData: Сбой разбора отзыва сервера EPSV: %s" #: src/hed/acc/ARC0/FTPControl.cpp:357 #, c-format msgid "SendData: Server EPSV response port parsing failed: %s" msgstr "SendData: Сбой разбора порта отзыва сервера EPSV: %s" #: src/hed/acc/ARC0/FTPControl.cpp:366 #, c-format msgid "SendData: Failed to apply local address to data connection: %s" msgstr "" "SendData: Не удалось применить локальный адрес к каналу передачи данных: %s" #: src/hed/acc/ARC0/FTPControl.cpp:372 #, c-format msgid "SendData: Can't parse host and/or port in response to EPSV/PASV: %s" msgstr "" "SendData: Не удалось извлечь адрес узла и/или номер порта из ответа на " "запрос EPSV/PASV: %s" #: src/hed/acc/ARC0/FTPControl.cpp:377 #, c-format msgid "SendData: Data channel: %d.%d.%d.%d:%d" msgstr "SendData: Канал передачи данных: %d.%d.%d.%d:%d" #: src/hed/acc/ARC0/FTPControl.cpp:393 #, c-format msgid "SendData: Data channel: [%s]:%d" msgstr "SendData: Канал передачи данных: [%s]:%d" #: src/hed/acc/ARC0/FTPControl.cpp:398 #, c-format msgid "SendData: Local port failed: %s" msgstr "Отправка данных: Сбой локального порта: %s" #: src/hed/acc/ARC0/FTPControl.cpp:422 msgid "SendData: Failed sending DCAU command" msgstr "Отправка команды: Сбой отправки команды DCAU" #: src/hed/acc/ARC0/FTPControl.cpp:427 msgid "SendData: Failed sending TYPE command" msgstr "Отправка команды: Сбой отправки команды TYPE" #: src/hed/acc/ARC0/FTPControl.cpp:436 #, c-format msgid "SendData: Local type failed: %s" msgstr "Отправка данных: Сбой локального типа: %s" #: src/hed/acc/ARC0/FTPControl.cpp:446 #, c-format msgid "SendData: Failed sending STOR command: %s" msgstr "Отправка данных: Сбой отправки команды STOR: %s" #: src/hed/acc/ARC0/FTPControl.cpp:454 src/hed/acc/ARC0/FTPControl.cpp:475 #, c-format msgid "SendData: Data connect write failed: %s" msgstr "Отправка данных: Сбой соединения и записи данных: %s" #: src/hed/acc/ARC0/FTPControl.cpp:461 src/hed/acc/ARC0/FTPControl.cpp:469 #, c-format msgid "SendData: Data connect write timed out after %d ms" msgstr "" "Пересылка данных: Время ожидания контакта и записи данных истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:487 src/hed/acc/ARC0/FTPControl.cpp:507 #, c-format msgid "SendData: Data write failed: %s" msgstr "Отправка данных: Сбой записи данных: %s" #: src/hed/acc/ARC0/FTPControl.cpp:493 src/hed/acc/ARC0/FTPControl.cpp:501 #, c-format msgid "SendData: Data write timed out after %d ms" msgstr "Пересылка данных: Время ожидания записи данных истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:527 src/hed/acc/ARC0/FTPControl.cpp:538 #, c-format msgid "Disconnect: Failed aborting - ignoring: %s" msgstr "Отключение: Сбой прерывания - игнорируется: %s" #: src/hed/acc/ARC0/FTPControl.cpp:530 #, c-format msgid "Disconnect: Data close timed out after %d ms" msgstr "Отключение: Время ожидания закрытия данных истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:541 #, c-format msgid "Disconnect: Abort timed out after %d ms" msgstr "Отключение: Время ожидания прерывания истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:549 #, c-format msgid "Disconnect: Failed quitting - ignoring: %s" msgstr "Отключение: Сбой выхода - игнорируется: %s" #: src/hed/acc/ARC0/FTPControl.cpp:552 #, c-format msgid "Disconnect: Quitting timed out after %d ms" msgstr "Отключение: Время ожидания выхода истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:561 #, c-format msgid "Disconnect: Failed closing - ignoring: %s" msgstr "Отключение: Сбой отключения - игнорируется: %s" #: src/hed/acc/ARC0/FTPControl.cpp:567 #, c-format msgid "Disconnect: Closing timed out after %d ms" msgstr "Отключение: Время ожидания отключения истекло после %d мс" #: src/hed/acc/ARC0/FTPControl.cpp:582 msgid "Disconnect: waiting for globus handle to settle" msgstr "Отключение: ждём пока ссылка globus устаканится" #: src/hed/acc/ARC0/FTPControl.cpp:596 msgid "Disconnect: globus handle is stuck." msgstr "Отключение: ссылка globus застряла." #: src/hed/acc/ARC0/FTPControl.cpp:604 #, c-format msgid "Disconnect: Failed destroying handle: %s. Can't handle such situation." msgstr "" "Отключение: Сбой уничтожения ссылки: %s. Невозможно справиться с таким " "положением." #: src/hed/acc/ARC0/FTPControl.cpp:607 msgid "Disconnect: handle destroyed." msgstr "Отключение: ссылка уничтожена." #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:47 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:44 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to " "developers." msgstr "" "Отсутствует указание на фабрику и/или модуль. Использование Globus во " "временном режиме небезопасно - SubmitterPlugin для ARC0 отключён. Сообщите " "разработчикам." #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:62 #, c-format msgid "Unable to query job information (%s), invalid URL provided (%s)" msgstr "" "Невозможно опросить информацию о задаче (%s), задан недопустимый URL (%s)" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:74 #, c-format msgid "Jobs left to query: %d" msgstr "Неопрошенных задач: %d" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:83 #, c-format msgid "Querying batch with %d jobs" msgstr "Опрашивается список из %d задач(и)" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:99 msgid "Can't create information handle - is the ARC LDAP DMC plugin available?" msgstr "" "Не удалось создать ссылку для информации - проверьте, доступен ли " "подгружаемый модуль ARC LDAP DMC" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:132 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:47 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:36 #, c-format msgid "Job information not found in the information system: %s" msgstr "Информация о задаче в информационной системе не обнаружена: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:134 msgid "" "This job was very recently submitted and might not yet have reached the " "information system" msgstr "" "Эта задача была запущена лишь недавно, и может быть ещё не зарегистрирована " "в системе" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:236 #, c-format msgid "Cleaning job: %s" msgstr "Удаляется задача: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:240 msgid "Failed to connect for job cleaning" msgstr "Не удалось соединиться для очистки задачи" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:252 msgid "Failed sending CWD command for job cleaning" msgstr "Не удалось отправить инструкцию CWD для очистки задачи" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:259 msgid "Failed sending RMD command for job cleaning" msgstr "Не удалось отправить инструкцию RMD для очистки задачи" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:266 msgid "Failed to disconnect after job cleaning" msgstr "Не удалось отсоединиться после очистки задачи" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:273 msgid "Job cleaning successful" msgstr "Задача успешно удалена" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:284 #, c-format msgid "Cancelling job: %s" msgstr "Прерывание задачи: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:288 msgid "Failed to connect for job cancelling" msgstr "Сбой соединения для прерывания задачи" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:300 msgid "Failed sending CWD command for job cancelling" msgstr "Сбой отправки инструкции CWD для прерывания задачи" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:307 msgid "Failed sending DELE command for job cancelling" msgstr "Сбой отправки инструкции DELE для прерывания задачи" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:314 msgid "Failed to disconnect after job cancelling" msgstr "Сбой отсоединения после прерывания задачи" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:322 msgid "Job cancelling successful" msgstr "Задача успешно оборвана" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:333 #, c-format msgid "Renewing credentials for job: %s" msgstr "Обновление параметров доступа для задачи: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:337 msgid "Failed to connect for credential renewal" msgstr "Сбой установления связи для обновления параметров доступа" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:349 #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:356 msgid "Failed sending CWD command for credentials renewal" msgstr "Сбой отправки инструкции CWD для обновления параметров доступа" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:362 msgid "Failed to disconnect after credentials renewal" msgstr "Сбой отсоединения после обновления параметров доступа" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:369 msgid "Renewal of credentials was successful" msgstr "Параметры доступа успешно обновлены" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:381 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:111 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:192 #, c-format msgid "Job %s does not report a resumable state" msgstr "Задача %s не находится в возобновляемом состоянии" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:391 #, c-format msgid "Illegal jobID specified (%s)" msgstr "Задан недопустимый ярлык задачи (%s)" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:398 #, c-format msgid "HER: %s" msgstr "HER: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:404 #, c-format msgid "Could not create temporary file: %s" msgstr "Не удалось создать временный файл: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:437 #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:131 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:210 msgid "Job resuming successful" msgstr "Задача успешно возобновлена" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:473 #, c-format msgid "Trying to retrieve job description of %s from computing resource" msgstr "Попытка получения описания задачи %s с вычислительного ресурса" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:478 #, c-format msgid "invalid jobID: %s" msgstr "Недействительный ярлык задачи: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:520 msgid "clientxrsl found" msgstr "найден оригинал описания задачи в формате XRSL" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:523 msgid "could not find start of clientxrsl" msgstr "невозможно найти начало описания задачи в формате XRSL" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:528 msgid "could not find end of clientxrsl" msgstr "невозможно найти конец описания задачи в формате XRSL" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:541 #, c-format msgid "Job description: %s" msgstr "Описание задачи: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:544 msgid "clientxrsl not found" msgstr "оригинал описания задачи в формате XRSL не найден" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:550 #, c-format msgid "Invalid JobDescription: %s" msgstr "Неверный элемент JobDescription: %s" #: src/hed/acc/ARC0/JobControllerPluginARC0.cpp:553 msgid "Valid JobDescription found" msgstr "Обнаружено действительное описание JobDescription" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:60 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:204 msgid "Submit: Failed to connect" msgstr "Засылка: Сбой связи" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:68 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:212 msgid "Submit: Failed sending CWD command" msgstr "Засылка: Сбой отправки команды CWD" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:79 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:223 msgid "Submit: Failed sending CWD new command" msgstr "Засылка: Сбой отправки команды CWD new" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:106 msgid "Failed to prepare job description." msgstr "Сбой подготовки описания задачи." #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:116 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:260 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:63 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:158 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:87 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:401 #, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "" "Невозможно заслать задачу. Описание задачи в формате %s недействительно: %s" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:123 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:267 msgid "Submit: Failed sending job description" msgstr "Засылка: Сбой отправки описания задачи" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:138 #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:282 msgid "Submit: Failed uploading local input files" msgstr "Засылка: Сбой выгрузки локальных входных файлов" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:193 msgid "" "Submit: service has no suitable information interface - need org.nordugrid." "ldapng" msgstr "" "Засылка: сервис не предоставляет подходящего информационного интерфейса - " "нужен org.nordugrid.ldapng" #: src/hed/acc/ARC0/SubmitterPluginARC0.cpp:250 msgid "Failed to prepare job description to target resources." msgstr "Сбой подготовки описания задачи для засылки по назначению." #: src/hed/acc/ARC1/AREXClient.cpp:58 msgid "Creating an A-REX client" msgstr "Создаётся клиент A-REX" #: src/hed/acc/ARC1/AREXClient.cpp:61 msgid "Unable to create SOAP client used by AREXClient." msgstr "Не удалось создать клиент SOAP использующийся AREXClient." #: src/hed/acc/ARC1/AREXClient.cpp:85 msgid "Failed locating credentials." msgstr "Сбой обнаружения параметров доступа." #: src/hed/acc/ARC1/AREXClient.cpp:94 msgid "Failed initiate client connection." msgstr "Сбой инициализации соединения с клиентом" #: src/hed/acc/ARC1/AREXClient.cpp:102 msgid "Client connection has no entry point." msgstr "Отсутствует точка входа в клиентскую цепь." #: src/hed/acc/ARC1/AREXClient.cpp:113 src/hed/acc/EMIES/EMIESClient.cpp:130 #: src/hed/acc/UNICORE/UNICOREClient.cpp:191 #: src/hed/acc/UNICORE/UNICOREClient.cpp:222 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:505 #: src/services/a-rex/test.cpp:86 msgid "Initiating delegation procedure" msgstr "Инициализация процедуры делегирования:" #: src/hed/acc/ARC1/AREXClient.cpp:115 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:507 msgid "Failed to initiate delegation credentials" msgstr "Сбой инициализации параметров доступа для делегирования" #: src/hed/acc/ARC1/AREXClient.cpp:128 msgid "Re-creating an A-REX client" msgstr "Воссоздаётся клиент A-REX" #: src/hed/acc/ARC1/AREXClient.cpp:146 msgid "AREXClient was not created properly." msgstr "AREXClient не был создан надлежащим образом." #: src/hed/acc/ARC1/AREXClient.cpp:151 src/hed/acc/EMIES/EMIESClient.cpp:174 #, c-format msgid "Processing a %s request" msgstr "Обработка запроса %s" #: src/hed/acc/ARC1/AREXClient.cpp:173 src/hed/acc/CREAM/CREAMClient.cpp:134 #: src/hed/acc/EMIES/EMIESClient.cpp:180 #, c-format msgid "%s request failed" msgstr "Запрос %s не выполнен" #: src/hed/acc/ARC1/AREXClient.cpp:181 src/hed/acc/EMIES/EMIESClient.cpp:189 #, c-format msgid "No response from %s" msgstr "Нет ответа от %s" #: src/hed/acc/ARC1/AREXClient.cpp:190 src/hed/acc/EMIES/EMIESClient.cpp:198 #, c-format msgid "%s request to %s failed with response: %s" msgstr "Запрос %s к %s не выполнен, получен ответ: %s" #: src/hed/acc/ARC1/AREXClient.cpp:195 src/hed/acc/EMIES/EMIESClient.cpp:213 #, c-format msgid "XML response: %s" msgstr "Отзыв XML: %s" #: src/hed/acc/ARC1/AREXClient.cpp:204 #, c-format msgid "%s request to %s failed. No expected response." msgstr "Сбой запроса %s к %s. Отсутствует ожидаемый отклик." #: src/hed/acc/ARC1/AREXClient.cpp:218 #, c-format msgid "Creating and sending submit request to %s" msgstr "Создаётся и отправляется запрос на засылку к %s" #: src/hed/acc/ARC1/AREXClient.cpp:234 src/hed/acc/ARC1/AREXClient.cpp:482 #: src/hed/acc/EMIES/EMIESClient.cpp:302 src/hed/acc/EMIES/EMIESClient.cpp:405 #: src/hed/acc/UNICORE/UNICOREClient.cpp:160 #, c-format msgid "Job description to be sent: %s" msgstr "описание засылаемой задачи: %s" #: src/hed/acc/ARC1/AREXClient.cpp:248 src/hed/acc/EMIES/EMIESClient.cpp:491 #: src/hed/acc/EMIES/EMIESClient.cpp:525 src/hed/acc/EMIES/EMIESClient.cpp:581 #, c-format msgid "Creating and sending job information query request to %s" msgstr "Создание и отправка запроса о состоянии задачи на %s" #: src/hed/acc/ARC1/AREXClient.cpp:293 src/hed/acc/ARC1/AREXClient.cpp:336 #, c-format msgid "Unable to retrieve status of job (%s)" msgstr "Ошибка при получении информации о состоянии задачи (%s)" #: src/hed/acc/ARC1/AREXClient.cpp:346 src/hed/acc/EMIES/EMIESClient.cpp:821 #, c-format msgid "Creating and sending service information query request to %s" msgstr "Создание и отправка запроса о состоянии службы на %s" #: src/hed/acc/ARC1/AREXClient.cpp:366 #, c-format msgid "Creating and sending ISIS information query request to %s" msgstr "Создание и отправка запроса об информации ISIS на %s" #: src/hed/acc/ARC1/AREXClient.cpp:383 #, c-format msgid "Service %s of type %s ignored" msgstr "Игнорируется сервис %s типа %s" #: src/hed/acc/ARC1/AREXClient.cpp:386 msgid "No execution services registered in the index service" msgstr "Ни одна служба исполнения не зарегистрирована в службе регистрации" #: src/hed/acc/ARC1/AREXClient.cpp:392 #, c-format msgid "Creating and sending terminate request to %s" msgstr "Создание и отправка запроса о прерывании задачи на %s" #: src/hed/acc/ARC1/AREXClient.cpp:403 #: src/hed/acc/UNICORE/UNICOREClient.cpp:619 #: src/hed/acc/UNICORE/UNICOREClient.cpp:692 msgid "Job termination failed" msgstr "Ошибка прерывания задачи" #: src/hed/acc/ARC1/AREXClient.cpp:414 #, c-format msgid "Creating and sending clean request to %s" msgstr "" "Создание и отправка запроса об удалении результатов работы задачи на %s" #: src/hed/acc/ARC1/AREXClient.cpp:444 #, c-format msgid "Creating and sending job description retrieval request to %s" msgstr "Создание и отправка запроса на получение описания задачи на %s" #: src/hed/acc/ARC1/AREXClient.cpp:464 #, c-format msgid "Creating and sending job migrate request to %s" msgstr "Создание и отправка запроса о миграции задачи на %s" #: src/hed/acc/ARC1/AREXClient.cpp:498 src/hed/acc/EMIES/EMIESClient.cpp:932 #, c-format msgid "Creating and sending job resume request to %s" msgstr "Создание и отправка запроса о возобновлении задачи на %s" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:100 msgid "Renewal of ARC1 jobs is not supported" msgstr "Возобновление задач ARC1 не поддерживается" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:117 #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:197 #, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "Возобновление задачи %s в состоянии %s (%s)" #: src/hed/acc/ARC1/JobControllerPluginARC1.cpp:183 #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:103 #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:139 #, c-format msgid "Failed retrieving job description for job: %s" msgstr "Сбой извлечения описания задачи: %s" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:42 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:69 msgid "Failed retrieving job status information" msgstr "Не удалось извлечь информацию о состоянии задачи" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:52 msgid "Cleaning of BES jobs is not supported" msgstr "Очистка результатов задач BES не поддерживается" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:78 msgid "Renewal of BES jobs is not supported" msgstr "Возобновление задач BES не поддерживается" #: src/hed/acc/ARC1/JobControllerPluginBES.cpp:86 msgid "Resuming BES jobs is not supported" msgstr "Перезапуск задач BES не поддерживается" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:49 msgid "Collecting Job (A-REX jobs) information." msgstr "Собирается информация о задачах (задачи на A-REX)" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:53 #, c-format msgid "Failed retrieving job IDs: Unsupported url (%s) given" msgstr "Сбой извлечения ярлыков задач: задан неподдерживаемый URL (%s)" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:61 msgid "Failed retrieving job IDs" msgstr "Сбой извлечения ярлыков задач" #: src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp:64 msgid "" "Error encoutered during job ID retrieval. All job IDs might not have been " "retrieved" msgstr "" "Сбой в процессе извлечения ярлыков задач: возможно, не все ярлыки извлечены" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:53 msgid "Failed to prepare job description" msgstr "Сбой подготовки описания задачи" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:78 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:173 msgid "No job identifier returned by BES service" msgstr "Служба BES не возвратила ни одного ярлыка задачи" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:99 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:194 #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:310 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:77 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:169 msgid "Failed uploading local input files" msgstr "Сбой выгрузки локальных входных файлов" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:148 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:53 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:136 msgid "Failed to prepare job description to target resources" msgstr "Сбой подготовки описания задачи для засылки по назначению" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:271 msgid "Failed adapting job description to target resources" msgstr "Сбой адаптирования описания задачи для засылки по назначению" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:282 #, c-format msgid "" "Unable to migrate job. Job description is not valid in the %s format: %s" msgstr "" "Невозможно мигрировать задачу. Описание задачи в формате %s недействительно: " "%s" #: src/hed/acc/ARC1/SubmitterPluginARC1.cpp:295 msgid "No job identifier returned by A-REX" msgstr "A-REX не возвратил ни одного ярлыка задачи" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:50 msgid "Querying WSRF GLUE2 computing info endpoint." msgstr "" "Опрашивается точка доступа WSRF GLUE2 к информации о вычислительном ресурсе" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:95 #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "Служба не сообщает о своём типе" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:100 msgid "The Service doesn't advertise its Quality Level." msgstr "Служба не сообщает о своём уровне качества" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:120 #, c-format msgid "Generating A-REX target: %s" msgstr "Создаётся назначение A-REX: %s" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:145 #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "У ComputingEndpoint отсутствует URL." #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:151 #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "Служба не предоставляет информации о состоянии здоровья" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:178 msgid "The Service doesn't advertise its Interface." msgstr "Служба не сообщает о своём интерфейсе" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:210 msgid "The Service doesn't advertise its Serving State." msgstr "Служба не сообщает о своём состоянии обслуживания" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:278 #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" "Неверно отформатирован атрибут \"FreeSlotsWithDuration\", публикуемый \"%s" "\", - игнорируется." #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:279 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:220 #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "Неверный формат \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:398 #: src/hed/libs/compute/GLUE2.cpp:417 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" "Невозможно разобрать эталонный XML:\n" "%s" #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:467 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:426 #, c-format msgid "Unable to parse the %s.%s value from execution service (%s)." msgstr "" "Невозможно разобрать %s.Получено значение %s от службы исполнения (%s)." #: src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp:468 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:427 #, c-format msgid "Value of %s.%s is \"%s\"" msgstr "Значение %s.%s: \"%s\"" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "Сортировка в соответствии с наличием свободных мест в очереди" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "Случайная сортировка" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" "Сортировка в соответствии с указанным эталонным тестом (по умолчанию - " "\"specint2000\")" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" "Сортировка в соответствии с доступностью входных данных в пункте назначения" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "Не производится ни сортировки, ни поиска соответствия" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" "Назначение %s отброшено алгоритмом FastestQueueBroker, т.к. не сообщает " "число ожидающих задач" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" "Назначение %s отброшено алгоритмом FastestQueueBroker, т.к. не сообщает " "общее число ячеек" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" "Назначение %s отброшено алгоритмом FastestQueueBroker, т.к. не сообщает " "число свободных ячеек" #: src/hed/acc/CREAM/CREAMClient.cpp:114 msgid "Creating a CREAM client" msgstr "Создаётся клиент CREAM" #: src/hed/acc/CREAM/CREAMClient.cpp:117 msgid "Unable to create SOAP client used by CREAMClient." msgstr "Не удалось создать клиент SOAP использующийся CREAMClient." #: src/hed/acc/CREAM/CREAMClient.cpp:128 msgid "CREAMClient not created properly" msgstr "CREAMClient не был создан надлежащим образом." #: src/hed/acc/CREAM/CREAMClient.cpp:139 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:52 #: src/hed/acc/UNICORE/UNICOREClient.cpp:207 #: src/hed/acc/UNICORE/UNICOREClient.cpp:299 #: src/hed/acc/UNICORE/UNICOREClient.cpp:376 #: src/hed/acc/UNICORE/UNICOREClient.cpp:455 #: src/hed/acc/UNICORE/UNICOREClient.cpp:488 #: src/hed/acc/UNICORE/UNICOREClient.cpp:565 #: src/hed/acc/UNICORE/UNICOREClient.cpp:641 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:188 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:29 msgid "There was no SOAP response" msgstr "Нет ответа SOAP" #: src/hed/acc/CREAM/CREAMClient.cpp:148 src/hed/acc/CREAM/CREAMClient.cpp:353 #: src/hed/acc/CREAM/CREAMClient.cpp:374 src/hed/acc/CREAM/CREAMClient.cpp:395 #: src/hed/acc/CREAM/CREAMClient.cpp:414 src/hed/acc/CREAM/CREAMClient.cpp:465 #: src/hed/acc/CREAM/CREAMClient.cpp:494 msgid "Empty response" msgstr "Пустой ответ" #: src/hed/acc/CREAM/CREAMClient.cpp:167 #, c-format msgid "Request failed: %s" msgstr "Сбой запроса: %s" #: src/hed/acc/CREAM/CREAMClient.cpp:175 src/hed/acc/CREAM/CREAMClient.cpp:428 #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:35 #: src/hed/acc/UNICORE/UNICOREClient.cpp:359 msgid "Creating and sending a status request" msgstr "Создание и отправка запроса о состоянии" #: src/hed/acc/CREAM/CREAMClient.cpp:200 msgid "Unable to retrieve job status." msgstr "Невозможно извлечь информацию о состоянии задачи." #: src/hed/acc/CREAM/CREAMClient.cpp:340 #: src/hed/acc/UNICORE/UNICOREClient.cpp:549 #: src/hed/acc/UNICORE/UNICOREClient.cpp:628 msgid "Creating and sending request to terminate a job" msgstr "Создание и отправка запроса о прерывании задачи" #: src/hed/acc/CREAM/CREAMClient.cpp:361 msgid "Creating and sending request to clean a job" msgstr "Создание и отправка запроса об удалении результатов работы задачи" #: src/hed/acc/CREAM/CREAMClient.cpp:382 msgid "Creating and sending request to resume a job" msgstr "Создание и отправка запроса о возобновлении задачи" #: src/hed/acc/CREAM/CREAMClient.cpp:403 msgid "Creating and sending request to list jobs" msgstr "Создание и отправка запроса о просмотре списка задач" #: src/hed/acc/CREAM/CREAMClient.cpp:450 msgid "Creating and sending job register request" msgstr "Создание и отправка запроса о регистрации задачи" #: src/hed/acc/CREAM/CREAMClient.cpp:470 src/hed/acc/CREAM/CREAMClient.cpp:499 msgid "No job ID in response" msgstr "Отзыв не содержит ярлыка задачи" #: src/hed/acc/CREAM/CREAMClient.cpp:480 msgid "Creating and sending job start request" msgstr "Создание и отправка запроса о начале задачи" #: src/hed/acc/CREAM/CREAMClient.cpp:508 msgid "Creating delegation" msgstr "Создание делегирования" #: src/hed/acc/CREAM/CREAMClient.cpp:520 msgid "Malformed response: missing getProxyReqReturn" msgstr "Искажённый отзыв: отсутствует getProxyReqReturn" #: src/hed/acc/CREAM/CREAMClient.cpp:531 #, c-format msgid "Delegatable credentials expired: %s" msgstr "Срок действия делегируемых параметров доступа истек: %s" #: src/hed/acc/CREAM/CREAMClient.cpp:541 msgid "Failed signing certificate request" msgstr "Сбой подписи запроса сертификата" #: src/hed/acc/CREAM/CREAMClient.cpp:561 msgid "Failed putting signed delegation certificate to service" msgstr "Сбой при передаче подписанного сертификата делегирования на сервис" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:52 #, c-format msgid "Failed cleaning job: %s" msgstr "Сбой очистки задачи: %s" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:70 #, c-format msgid "Failed canceling job: %s" msgstr "Сбой прерывания задачи: %s" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:84 msgid "Renewal of CREAM jobs is not supported" msgstr "Возобновление задач CREAM не поддерживается" #: src/hed/acc/CREAM/JobControllerPluginCREAM.cpp:98 #, c-format msgid "Failed resuming job: %s" msgstr "Сбой возобновления задачи %s" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:40 msgid "Failed creating signed delegation certificate" msgstr "Сбой создания подписанного сертификата делегирования" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:61 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:153 #: src/hed/acc/UNICORE/UNICOREClient.cpp:115 #, c-format msgid "Unable to submit job. Job description is not valid in the %s format" msgstr "" "Невозможно заслать задачу. Описание задачи в формате %s недействительно" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:69 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:161 msgid "Failed registering job" msgstr "Сбой регистрации задачи" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:85 #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:177 msgid "Failed starting job" msgstr "Сбой запуска задачи" #: src/hed/acc/CREAM/SubmitterPluginCREAM.cpp:123 msgid "Failed creating singed delegation certificate" msgstr "Сбой создания подписанного сертификата делегирования" #: src/hed/acc/EMIES/EMIESClient.cpp:79 msgid "Creating an EMI ES client" msgstr "Создаётся клиент EMI ES" #: src/hed/acc/EMIES/EMIESClient.cpp:82 msgid "Unable to create SOAP client used by EMIESClient." msgstr "Не удалось создать клиент SOAP использующийся EMIESClient." #: src/hed/acc/EMIES/EMIESClient.cpp:157 msgid "Re-creating an EMI ES client" msgstr "Воссоздаётся клиент EMI ES" #: src/hed/acc/EMIES/EMIESClient.cpp:223 #, c-format msgid "%s request to %s failed. Unexpected response: %s." msgstr "Запрос %s к %s не выполнен, неожиданный ответ: %s" #: src/hed/acc/EMIES/EMIESClient.cpp:237 src/hed/acc/EMIES/EMIESClient.cpp:344 #, c-format msgid "Creating and sending job submit request to %s" msgstr "Создание и отправка запроса об исполнении задачи на %s" #: src/hed/acc/EMIES/EMIESClient.cpp:415 src/hed/acc/EMIES/EMIESClient.cpp:598 #: src/hed/acc/EMIES/EMIESClient.cpp:1087 #, c-format msgid "New limit for vector queries returned by EMI ES service: %d" msgstr "" "Сервис EMI ES установил новые значения пределов для параллельных запросов: %d" #: src/hed/acc/EMIES/EMIESClient.cpp:423 src/hed/acc/EMIES/EMIESClient.cpp:606 #: src/hed/acc/EMIES/EMIESClient.cpp:1095 #, c-format msgid "" "Error: Service returned a limit higher or equal to current limit (current: %" "d; returned: %d)" msgstr "" "Ошибка: Сервис требует предел, превышающий или равный текущему (текущий: %d; " "требуемый: %d)" #: src/hed/acc/EMIES/EMIESClient.cpp:764 #, c-format msgid "Creating and sending service information request to %s" msgstr "Создание и отправка запроса информации о службе на %s" #: src/hed/acc/EMIES/EMIESClient.cpp:869 src/hed/acc/EMIES/EMIESClient.cpp:890 #, c-format msgid "Creating and sending job clean request to %s" msgstr "" "Создание и отправка запроса об удалении результатов работы задачи на %s" #: src/hed/acc/EMIES/EMIESClient.cpp:911 #, c-format msgid "Creating and sending job suspend request to %s" msgstr "Создание и отправка запроса о приостановке задачи на %s" #: src/hed/acc/EMIES/EMIESClient.cpp:953 #, c-format msgid "Creating and sending job restart request to %s" msgstr "Создание и отправка запроса о перезапуске задачи на %s" #: src/hed/acc/EMIES/EMIESClient.cpp:1010 #, c-format msgid "Creating and sending job notify request to %s" msgstr "Создание и отправка запроса об уведомлении о задаче на %s" #: src/hed/acc/EMIES/EMIESClient.cpp:1065 #, c-format msgid "Creating and sending notify request to %s" msgstr "Создание и отправка запроса об уведомлении на %s" #: src/hed/acc/EMIES/EMIESClient.cpp:1155 #, c-format msgid "Creating and sending job list request to %s" msgstr "Создание и отправка запроса о просмотре задачи на %s" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:156 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" "С задачей %s не ассоциировано никакого делегирования. Задача не может быть " "обновлена." #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:170 #, c-format msgid "Job %s failed to renew delegation %s - %s." msgstr "Задача %s не смогла обновить делегирование %s - %s." #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:247 #, c-format msgid "Failed retrieving information for job: %s" msgstr "Сбой извлечения информации о задаче: %s" #: src/hed/acc/EMIES/JobControllerPluginEMIES.cpp:319 msgid "Retrieving job description of EMI ES jobs is not supported" msgstr "Получение описания задач EMI ES не поддерживается" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:61 #, c-format msgid "Listing jobs succeeded, %d jobs found" msgstr "Задачи успешно перечислены, обнаружено %d задач(и)" #: src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp:77 #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:102 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface (%" "s)." msgstr "" "Пропускается скачанная задача (%s), так как она была запущена через другой " "интерфейс (%s)." #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:47 msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "" "Сбой делегирования параметров доступа на сервер - не обнаружен интерфейс " "делегирования" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:54 #, c-format msgid "Failed to delegate credentials to server - %s" msgstr "Сбой делегирования параметров доступа на сервер - %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:77 msgid "Failed preparing job description" msgstr "Не удалось подготовить описание задачи" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:95 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:406 msgid "Unable to submit job. Job description is not valid XML" msgstr "" "Невозможно заслать задачу. Описание задачи не является допустимым файлом XML" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:154 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:482 msgid "No valid job identifier returned by EMI ES" msgstr "EMI ES не возвратил действительных ярлыков задач" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:180 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:499 msgid "Job failed on service side" msgstr "Задача дала сбой на сервере" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:190 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:509 msgid "Failed to obtain state of job" msgstr "Сбой извлечения состояние задачи" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:205 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:521 msgid "Failed to wait for job to allow stage in" msgstr "Сбой ожидания разрешения от задачи на размещение входных файлов" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:228 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:541 msgid "Failed to obtain valid stagein URL for input files" msgstr "Сбой получения допустимых URL для размещения входных файлов" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:248 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:558 #, c-format msgid "Failed uploading local input files to %s" msgstr "Сбой выгрузки локальных входных файлов в %s" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:269 #, c-format msgid "Failed to submit job description: EMIESFault(%s , %s)" msgstr "Сбой засылки описания задачи: EMIESFault(%s , %s)" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:278 #, c-format msgid "Failed to submit job description: UnexpectedError(%s)" msgstr "Сбой засылки описания задачи: UnexpectedError(%s)" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:315 #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:574 msgid "Failed to notify service" msgstr "Сбой уведомления службы" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:367 msgid "Failed preparing job description to target resources" msgstr "Не удалось адаптировать описание задачи для засылки по назначению" #: src/hed/acc/EMIES/SubmitterPluginEMIES.cpp:475 #, c-format msgid "Failed to submit job description: %s" msgstr "Сбой засылки описания задачи: %s" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:54 msgid "Collecting EMI-ES GLUE2 computing info endpoint information." msgstr "Собирается информация GLUE2 для точки входа EMI-ES." #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:74 msgid "Generating EMIES targets" msgstr "Создаются назначения EMIES" #: src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp:83 #, c-format msgid "Generated EMIES target: %s" msgstr "Созданы назначения EMIES: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:75 #: src/hed/acc/EMIES/TestEMIESClient.cpp:79 #, c-format msgid "Query returned unexpected element: %s:%s" msgstr "Запрос возвратил неожиданный элемент: %s:%s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:85 #, c-format msgid "Element validation according to GLUE2 schema failed: %s" msgstr "Проверка соответствия элемента схеме GLUE2 не прошла: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:114 msgid "Resource query failed" msgstr "Сбой опроса ресурса" #: src/hed/acc/EMIES/TestEMIESClient.cpp:132 msgid "Submission failed" msgstr "Сбой засылки задачи" #: src/hed/acc/EMIES/TestEMIESClient.cpp:143 msgid "Obtaining status failed" msgstr "Сбой определения состояния" #: src/hed/acc/EMIES/TestEMIESClient.cpp:153 msgid "Obtaining information failed" msgstr "Сбой получения информации" #: src/hed/acc/EMIES/TestEMIESClient.cpp:170 msgid "Cleaning failed" msgstr "Сбой очистки" #: src/hed/acc/EMIES/TestEMIESClient.cpp:177 msgid "Notify failed" msgstr "Сбой уведомления" #: src/hed/acc/EMIES/TestEMIESClient.cpp:184 msgid "Kill failed" msgstr "Сбой прерывания задачи" #: src/hed/acc/EMIES/TestEMIESClient.cpp:190 msgid "List failed" msgstr "Сбой перечисления задач" #: src/hed/acc/EMIES/TestEMIESClient.cpp:201 #, c-format msgid "Fetching resource description from %s" msgstr "Получение описания ресурса с %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:204 #: src/hed/acc/EMIES/TestEMIESClient.cpp:273 #: src/hed/acc/EMIES/TestEMIESClient.cpp:283 #: src/hed/acc/EMIES/TestEMIESClient.cpp:294 #, c-format msgid "Failed to obtain resource description: %s" msgstr "Сбой получения описания ресурса: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:213 #: src/hed/acc/EMIES/TestEMIESClient.cpp:217 #, c-format msgid "Resource description contains unexpected element: %s:%s" msgstr "Схема описания ресурса содержит недействительный элемент: %s:%s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:223 msgid "Resource description validation according to GLUE2 schema failed: " msgstr "Проверка соответствия описания ресурса схеме GLUE2 не прошла:" #: src/hed/acc/EMIES/TestEMIESClient.cpp:228 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:133 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:173 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1218 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1252 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1434 #: src/hed/identitymap/ArgusPDPClient.cpp:444 #: src/hed/identitymap/ArgusPEPClient.cpp:98 #: src/hed/identitymap/ArgusPEPClient.cpp:345 #: src/hed/libs/common/Thread.cpp:193 src/hed/libs/common/Thread.cpp:196 #: src/hed/libs/common/Thread.cpp:199 #: src/hed/libs/credential/Credential.cpp:1055 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:72 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:88 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:104 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:134 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:151 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:160 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:82 src/hed/shc/arcpdp/ArcPDP.cpp:235 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:293 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:247 #: src/services/a-rex/delegation/DelegationStore.cpp:44 #: src/services/a-rex/delegation/DelegationStore.cpp:49 #: src/services/a-rex/delegation/DelegationStore.cpp:54 #: src/services/a-rex/delegation/DelegationStore.cpp:88 #: src/services/a-rex/delegation/DelegationStore.cpp:94 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:552 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:620 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:645 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:656 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:667 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:678 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:686 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:692 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:697 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:702 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:707 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:715 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:723 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:734 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:741 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:780 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:790 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:798 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:824 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:893 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:906 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:923 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:935 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1239 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1244 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1273 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1286 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:373 #, c-format msgid "%s" msgstr "%s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:248 msgid "Resource description is empty" msgstr "Описание ресурса пусто" #: src/hed/acc/EMIES/TestEMIESClient.cpp:255 #, c-format msgid "Resource description provides URL for interface %s: %s" msgstr "Описание ресурса содержит URL интерфейса %s: %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:260 msgid "Resource description provides no URLs for interfaces" msgstr "Описание ресурса не содержит URL интерфейсов" #: src/hed/acc/EMIES/TestEMIESClient.cpp:263 msgid "Resource description validation passed" msgstr "Прошла проверка описания ресурса" #: src/hed/acc/EMIES/TestEMIESClient.cpp:266 #, c-format msgid "Requesting ComputingService elements of resource description at %s" msgstr "Запрашиваются элементы описания ресурса ComputingService с %s" #: src/hed/acc/EMIES/TestEMIESClient.cpp:271 msgid "Performing /Services/ComputingService query" msgstr "Выполняется запрос /Services/ComputingService" #: src/hed/acc/EMIES/TestEMIESClient.cpp:275 #: src/hed/acc/EMIES/TestEMIESClient.cpp:285 #: src/hed/acc/EMIES/TestEMIESClient.cpp:296 msgid "Query returned no elements." msgstr "Результат запроса не содержит элементов." #: src/hed/acc/EMIES/TestEMIESClient.cpp:281 msgid "Performing /ComputingService query" msgstr "Выполняется запрос /ComputingService" #: src/hed/acc/EMIES/TestEMIESClient.cpp:292 msgid "Performing /* query" msgstr "Выполняется запрос /*" #: src/hed/acc/EMIES/TestEMIESClient.cpp:302 msgid "All queries failed" msgstr "Сбой всех запросов" #: src/hed/acc/EMIES/TestEMIESClient.cpp:332 #, c-format msgid "" "Number of ComputingService elements obtained from full document and XPath " "qury do not match: %d != %d" msgstr "" "Количество элементов ComputingService полученных из полного документа и из " "запроса XPath не совпадает: %d != %d" #: src/hed/acc/EMIES/TestEMIESClient.cpp:335 msgid "Resource description query validation passed" msgstr "Проверка соответствия запроса описания ресурса прошла" #: src/hed/acc/EMIES/TestEMIESClient.cpp:337 #, c-format msgid "Unsupported command: %s" msgstr "Неподдерживаемая команда: %s" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:75 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "[ADLParser] Неподдерживаемое состояние EMI ES %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:95 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "[ADLParser] Неподдерживаемое внутреннее состояние %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:105 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "[ADLParser] Атрибут optional для элементов %s пока не поддерживается." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:114 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "[ADLParser] элемент %s должен быть логическим." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:126 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" "[ADLParser] Код в FailIfExitCodeNotEqualTo в %s не является допустимым " "числом." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:364 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "[ADLParser] Корневой элемент не является ActivityDescription " #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:411 #, fuzzy msgid "[ADLParser] priority is too large - using max value 100" msgstr "" "[ARCJSDLParser] слишком высокий приоритет - используется максимальное " "значение 100" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:454 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "[ADLParser] Неподдерживаемый URL %s в RemoteLogging." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:473 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "[ADLParser] ExpirationTime содержит недопустимое время %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:505 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" "[ADLParser] Пока что поддерживается только email Prorocol для Notification." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:563 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "[ADLParser] Значение ProcessesPerSlot отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:568 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "[ADLParser] Значение ThreadsPerProcess отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:574 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" "[ADLParser] Отсутствует элемент Name или значение элемента " "ParallelEnvironment/Option." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:591 msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "[ADLParser] NetworkInfo пока что не поддерживается." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:605 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "[ADLParser] Значение NodeAccess %s пока что не поддерживается." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:613 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "[ADLParser] Значение NumberOfSlots отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:620 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" "[ADLParser] Значение элемента NumberOfSlots должно быть указано, если " "значение атрибута useNumberOfSlots элемента SlotsPerHost - \"true\"." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:627 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "[ADLParser] Значение SlotsPerHost отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:656 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "[ADLParser] Значение IndividualPhysicalMemory отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:666 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "[ADLParser] Значение IndividualVirtualMemory отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:676 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "[ADLParser] Значение DiskSpaceRequirement отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:690 msgid "[ADLParser] Benchmark is not supported yet." msgstr "[ADLParser] Benchmark пока что не поддерживается." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:698 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "[ADLParser] Значение IndividualCPUTime отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "[ADLParser] Значение TotalCPUTime отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:715 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "[ADLParser] Значение WallTime отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:735 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "[ADLParser] Значение Name в InputFile отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "[ADLParser] Указан неверный URI в Source - %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:768 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "[ADLParser] Значение Name в OutputFile отсутствует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:774 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "[ADLParser] Указан неверный URI в Target - %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:792 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:846 #, c-format msgid "Location URI for file %s is invalid" msgstr "Недопустимый URI в Location для файла %s" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:812 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "[ADLParser] Значение CreationFlag %s не поддерживается." #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:75 #, c-format msgid "Unknown operator '%s' in attribute require in Version element" msgstr "Неизвестный оператор '%s' в атрибуте require элемента Version" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:121 #, c-format msgid "Multiple '%s' elements are not supported." msgstr "Множественные элементы '%s' не поддерживаются." #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:136 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:143 #, c-format msgid "The 'exclusiveBound' attribute to the '%s' element is not supported." msgstr "Атрибут 'exclusiveBound' элемента '%s' не поддерживается." #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:150 msgid "The 'epsilon' attribute to the 'Exact' element is not supported." msgstr "Атрибут 'epsilon' элемента 'Exact' не поддерживается." #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:178 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:195 #, c-format msgid "Parsing error: Value of %s element can't be parsed as number" msgstr "Ошибка разбора: Значение элемента %s не может быть разобрано как число" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:183 #, c-format msgid "" "Parsing error: Elements (%s) representing upper range have different values" msgstr "Ошибка разбора: Элементы (%s) задающие верхнюю границу различаются" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:200 #, c-format msgid "" "Parsing error: Elements (%s) representing lower range have different values" msgstr "Ошибка разбора: Элементы (%s) задающие нижнюю границу различаются" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:209 #, c-format msgid "" "Parsing error: Value of lower range (%s) is greater than value of upper " "range (%s)" msgstr "" "Ошибка разбора: Значение нижней границы (%s) превышает значение верхней (%s)" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:296 msgid "[ARCJSDLParser] Not a JSDL - missing JobDescription element" msgstr "[ARCJSDLParser] Это не JSDL - отсутствует элемент JobDescription" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:377 #, c-format msgid "" "[ARCJSDLParser] Error during the parsing: missed the name attributes of the " "\"%s\" Environment" msgstr "" "[ARCJSDLParser] Ошибка при разборе: отсутствует атрибут name в элементе " "Environment \"%s\"" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:424 msgid "[ARCJSDLParser] RemoteLogging URL is wrongly formatted." msgstr "[ARCJSDLParser] Неверный формат RemoteLogging URL" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:440 msgid "[ARCJSDLParser] priority is too large - using max value 100" msgstr "" "[ARCJSDLParser] слишком высокий приоритет - используется максимальное " "значение 100" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:718 msgid "Lower bounded range is not supported for the 'TotalCPUCount' element." msgstr "" "Интервал с нижней границей не поддерживается для элемента 'TotalCPUCount'." #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:736 msgid "" "Parsing the \"require\" attribute of the \"QueueName\" nordugrid-JSDL " "element failed. An invalid comparison operator was used, only \"ne\" or \"eq" "\" are allowed." msgstr "" "Сбой разбора атрибута \"require\" элемента \"QueueName\" из nordugrid-JSDL. " "Используется недопустимый оператор сравнения, допускаются только \"ne\" или " "\"eq\"." #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:787 #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:841 #, c-format msgid "No URI element found in Location for file %s" msgstr "Не обнаружено элементов URI в Location для файла %s" #: src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp:873 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:323 #, c-format msgid "String successfully parsed as %s." msgstr "Строка успешно разобрана как %s." #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:53 #, c-format msgid "[JDLParser] Semicolon (;) is not allowed inside brackets, at '%s;'." msgstr "" "[JDLParser] Точка с запятой (;) не допускается внутри скобок, строка '%s;'." #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:137 #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:141 #, c-format msgid "[JDLParser] This kind of JDL descriptor is not supported yet: %s" msgstr "[JDLParser] Этот тип дескриптора JDL пока не поддерживается: %s" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:144 #, c-format msgid "[JDLParser] Attribute named %s has unknown value: %s" msgstr "[JDLParser] У атрибута %s недействительное значение: %s" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:224 msgid "Not enough outputsandboxdesturi elements!" msgstr "Недостаточно элементов outputsandboxdesturi!" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:306 msgid "" "[JDLParser] Environment variable has been defined without any equals sign." msgstr "" "[JDLParser] Переменная среды задана без использования знаков равенства." #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:503 #, c-format msgid "[JDLParser]: Unknown attribute name: '%s', with value: %s" msgstr "[JDLParser]: Неизвестное название атрибута: '%s', значение: %s" #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:539 msgid "The inputsandboxbaseuri JDL attribute specifies an invalid URL." msgstr "Атрибут JDL inputsandboxbaseuri задаёт недопустимый URL." #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:616 msgid "[JDLParser] Syntax error found during the split function." msgstr "[JDLParser] Обнаружена синтаксическая ошибка при выполнении разбиения." #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:620 msgid "[JDLParser] Lines count is zero or other funny error has occurred." msgstr "[JDLParser] Нулевое количество строк, или другая непонятная ошибка." #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:628 msgid "" "[JDLParser] JDL syntax error. There is at least one equals sign missing " "where it would be expected." msgstr "" "[JDLParser] Синтаксическая ошибка JDL. По крайней мере один из ожидаемых " "знаков равенства отсутствует." #: src/hed/acc/JobDescriptionParser/JDLParser.cpp:641 #, c-format msgid "String successfully parsed as %s" msgstr "Строка успешно разобрана как %s" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "Левый операнд для сцепления RSL не приводится к буквенной константе" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "Правый операнд для сцепления RSL не приводится к буквенной константе" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "Оператор множественности RSL допускается лишь в начале документа" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "Замена в RSL не является последовательностью" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "Замена в RSL не является последовательностью из двух элементов" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "Имя переменной для замены RSL не приводится к буквенной константе" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "Значение переменной для замены RSL не приводится к буквенной константе" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 msgid "End of comment not found" msgstr "Не найдено окончание комментария" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "Неразборчивые фрагменты в конце RSL" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "Не обнаружено конца строки в одиночных кавычках" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "Не обнаружено конца строки в двойных кавычках" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" "Не обнаружено конца строки, выделенной пользовательским ограничителем (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "ожидается ')'" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "ожидается ')'" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "Ожидается имя переменной" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "Имя переменной (%s) содержит неверный символ (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 msgid "Broken string" msgstr "Недопустимая строка" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "Отсутствует левый операнд оператора подцепления" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "Отсутствует правый операнд оператора подцепления" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "Ожидается имя атрибута" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "Имя атрибута (%s) содержит неверный символ (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 msgid "Relation operator expected" msgstr "Ожидается использование реляционного оператора" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "Ошибка разбора переопределённого системой атрибута executables." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" "Файл '%s' перечисленный в атрибуте 'executables' отсутствует в атрибуте " "'inputfiles'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "Значение атрибута 'ftpthreads' должно быть целым числом от 1 до 10" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must specified when 'join' attribute is specified" msgstr "" "Необходимо задать значение атрибута 'stdout', если задано значение атрибута " "'join'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" "Атрибут 'join' не может использоваться, если заданы оба атрибута 'stdout' и " "'stderr'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "Атрибуты 'gridtime' и 'cputime' не могут быть заданы одновременно." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "Атрибуты 'gridtime' и 'walltime' не могут быть заданы одновременно" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" "При задании атрибута 'countpernode', атрибут 'count' также должен быть задан" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "Значение атрибута 'countpernode' должно быть целочисленным" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 msgid "No RSL content in job description found" msgstr "В описании задачи не найдено структуры RSL" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:293 msgid "Multi-request job description not allowed in GRIDMANAGER dialect" msgstr "Множественное описание заданий не допускается в диалекте GRIDMANAGER" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 msgid "No execuable path specified in GRIDMANAGER dialect" msgstr "Не задан путь к исполняемому файлу в диалекте GRIDMANAGER" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:311 msgid "'action' attribute not allowed in user-side job description" msgstr "" "Использование атрибута 'action' в пользовательском описании задачи не " "допускается" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:314 msgid "Executable path not specified ('executable' attribute)" msgstr "Не задан путь к исполняемому файлу (атрибут 'executable')" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:332 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:350 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Attribute '%s' multiply defined" msgstr "Атрибут '%s' задан несколько раз" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:336 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "Значение атрибута '%s' неоднозначно" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:341 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "Значение атрибута '%s' не является строкой" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:357 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:387 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "Значение атрибута '%s' не является строкой" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:375 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "Значение атрибута '%s' не является последовательностью" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:379 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" "Значение атрибута '%s' содержит последовательность недопустимой длины: " "ожидается %d, получено %d" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:511 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1368 msgid "Unexpected RSL type" msgstr "Неожиданный тип RSL" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:576 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "Для атрибута 'inputfiles' необходимы как минимум два значения" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:581 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" "Первая часть значения атрибута 'inputfiles' (filename) не может быть пустой" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "Недопустимый URL '%s' для входного файла '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:615 #, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "Недопустимый синтаксис опции URL в опции '%s' для входного файла '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:625 #, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "Недопустимый URL: '%s' во входном файле '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "Для атрибута 'outputfiles' необходимы как минимум два значения" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:699 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" "Первая часть значения атрибута 'outputfiles' (filename) не может быть пустой" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:711 #, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "Недопустимый URL '%s' для выходного файла '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:720 #, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "Недопустимый синтаксис опции URL в опции '%s' для выходного файла '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:730 #, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "Недопустимый URL: '%s' в выходном файле '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:761 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" "Недопустимый оператор сравнения '%s' используется с атрибутом 'queue' в " "диалекте GRIDMANAGER, допускается лишь \"=\"" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:767 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" "Недопустимый оператор сравнения '%s' используется с атрибутом 'queue', " "допускаются только \"!=\" или \"=\"." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1029 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "Значение атрибута XRSL acl не является действительным кодом XML." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1043 msgid "The cluster XRSL attribute is currently unsupported." msgstr "Атрибут XRSL cluster пока что не поддерживается." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1059 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" "Синтаксическая ошибка в значении атрибута 'notify' ('%s'), он должен " "содержать адрес электронной почты" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1067 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" "Синтаксическая ошибка в значении атрибута 'notify' ('%s'), он должен " "содержать лишь адреса электронной почты после меток статуса" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1070 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" "Синтаксическая ошибка в значении атрибута 'notify' ('%s'), он содержит " "неизвестные метки статуса" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1118 msgid "priority is too large - using max value 100" msgstr "слишком высокий приоритет - используется максимальное значение 100" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1151 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "Недопустимое значение nodeaccess: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1194 msgid "Value of 'count' attribute must be an integer" msgstr "Значение атрибута 'count' должно быть целочисленным" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1224 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" "Значением атрибута 'exclusiveexecution' может быть либо 'yes', либо 'no'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1270 #, c-format msgid "Invalid action value %s" msgstr "Недопустимое значение action %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1360 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "Указанный атрибут Globus (%s) не поддерживается. %s игнорируется." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1364 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "Неизвестный атрибут XRSL: %s - игнорируется." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1378 #, c-format msgid "Wrong language requested: %s" msgstr "Запрошен неверный язык: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1384 msgid "Missing executable" msgstr "Отсутствует элемент Executable." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1702 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" "Невозможно вывести представление XRSL: атрибут Resources.SlotRequirement." "NumberOfSlots должен быть задан, если задан атрибут Resources." "SlotRequirement.SlotsPerHost ." #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:96 msgid "Failed to initialize main Python thread" msgstr "Сбой запуска головного потока Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:101 msgid "Main Python thread was not initialized" msgstr "Головной поток Python не был запущен" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, c-format msgid "Loading Python broker (%i)" msgstr "Подгрузка Python broker (%i)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:139 msgid "Main Python thread is not initialized" msgstr "Головной процесс Python не был запущен" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 msgid "PythonBroker init" msgstr "Инициализация PythonBroker" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" "Недопустимое имя класса. Аргумент брокера для PythonBroker должен быть\n" " Filename.Class.args (args не обязательно), например: SampleBroker." "MyBroker" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, c-format msgid "Class name: %s" msgstr "Название класса: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, c-format msgid "Module name: %s" msgstr "Название модуля: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:183 msgid "Cannot convert ARC module name to Python string" msgstr "Невозможно перевести название модуля ARC в строку Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:191 msgid "Cannot import ARC module" msgstr "Не удалось импортировать модуль ARC" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:201 #: src/services/wrappers/python/pythonwrapper.cpp:426 #: src/services/wrappers/python/pythonwrapper.cpp:526 msgid "Cannot get dictionary of ARC module" msgstr "Ошибка доступа к словарю модуля ARC" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 msgid "Cannot find ARC UserConfig class" msgstr "Не удалось найти класс ARC UserConfig" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 msgid "UserConfig class is not an object" msgstr "Класс UserConfig не является объектом" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 msgid "Cannot find ARC JobDescription class" msgstr "Не удалось найти класс ARC JobDescription" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 msgid "JobDescription class is not an object" msgstr "Класс JobDescription не является объектом" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 msgid "Cannot find ARC ExecutionTarget class" msgstr "Не удалось найти класс ARC ExecutionTarget" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 msgid "ExecutionTarget class is not an object" msgstr "Класс ExecutionTarget не является объектом" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:162 msgid "Cannot convert module name to Python string" msgstr "Невозможно перевести название модуля в строку Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:169 msgid "Cannot import module" msgstr "Не удалось импортировать модуль" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 msgid "Cannot get dictionary of custom broker module" msgstr "Невозможно обнаружить словарь пользовательского модуля планировщика" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 msgid "Cannot find custom broker class" msgstr "Не обнаружен класс пользовательского планировщика" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, c-format msgid "%s class is not an object" msgstr "Класс %s не является объектом" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 msgid "Cannot create UserConfig argument" msgstr "Не удалось создать аргумент UserConfig" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 msgid "Cannot convert UserConfig to Python object" msgstr "Не удалось преобразовать UserConfig в объект Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:258 msgid "Cannot create argument of the constructor" msgstr "Не удалось создать аргумент конструктора" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:266 msgid "Cannot create instance of Python class" msgstr "Не удалось реализовать класс Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, c-format msgid "Python broker constructor called (%d)" msgstr "Вызван Python-конструктор планировщика (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, c-format msgid "Python broker destructor called (%d)" msgstr "Вызван Python-деструктор планировщика (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 msgid "Cannot create ExecutionTarget argument" msgstr "Невозможно создать аргумент ExecutionTarget" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "Невозможно преобразовать ExecutionTarget (%s) в объект Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 msgid "Cannot create JobDescription argument" msgstr "Невозможно создать аргумент JobDescription" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 msgid "Cannot convert JobDescription to python object" msgstr "Невозможно преобразовать JobDescription в объект Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" "Сортировка с использованием пользовательского python-скрипта планировщика" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "Не удалось инициализировать доменное имя ARCHERY для запроса" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:60 msgid "Cannot create resolver from /etc/resolv.conf" msgstr "Не удалось создать преобразователь из /etc/resolv.conf" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "Не удалось опросить TXT записи точек входа сервисов в DNS" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "Не удалось разобрать TXT записи точек входа сервисов." #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:121 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "Недопустимый элемент учётной записи сервиса \"%s\" обнаружен в \"%s\"" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:126 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "Обнаружена недопустимая запись ARCHERY (не задан URL точки входа): %s" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:131 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "Обнаружена недопустимая запись ARCHERY (не задан тип точки входа): %s" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:135 #, c-format msgid "Found service endpoint %s (type %s)" msgstr "Обнаружена точка входа сервиса %s (тип %s)" #: src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp:150 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" "Статус точки доступа сервиса \"%s\" в ARCHERY указан как неактивный. " "Пропускается." #: src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.cpp:101 #, c-format msgid "Found %u service endpoints from the index service at %s" msgstr "Обнаружено %u точек входа служб в каталоге на %s" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:102 msgid "Cleaning of UNICORE jobs is not supported" msgstr "Удаление задач UNICORE не поддерживается" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:131 msgid "Canceling of UNICORE jobs is not supported" msgstr "Прерывание задач UNICORE не поддерживается" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:139 msgid "Renewal of UNICORE jobs is not supported" msgstr "Возобновление задач UNICORE не поддерживается" #: src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp:147 msgid "Resumation of UNICORE jobs is not supported" msgstr "Перезапуск задач UNICORE не поддерживается" #: src/hed/acc/UNICORE/UNICOREClient.cpp:67 msgid "Creating a UNICORE client" msgstr "Создаётся клиент UNICORE" #: src/hed/acc/UNICORE/UNICOREClient.cpp:90 src/services/a-rex/test.cpp:154 #: src/services/a-rex/test.cpp:227 src/services/a-rex/test.cpp:275 #: src/services/a-rex/test.cpp:323 src/services/a-rex/test.cpp:371 #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:64 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:19 msgid "Creating and sending request" msgstr "Создание и засылка запроса" #: src/hed/acc/UNICORE/UNICOREClient.cpp:182 msgid "Failed to find delegation credentials in client configuration" msgstr "" "Сбой обнаружения параметров доступа для делегирования в настройках клиента" #: src/hed/acc/UNICORE/UNICOREClient.cpp:194 #: src/hed/acc/UNICORE/UNICOREClient.cpp:224 src/services/a-rex/test.cpp:88 msgid "Failed to initiate delegation" msgstr "Сбой инициализации делегирование" #: src/hed/acc/UNICORE/UNICOREClient.cpp:203 #: src/hed/acc/UNICORE/UNICOREClient.cpp:236 msgid "Submission request failed" msgstr "Сбой запроса отправки задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:239 msgid "Submission request succeed" msgstr "Запрос о засылке задания удался" #: src/hed/acc/UNICORE/UNICOREClient.cpp:241 msgid "There was no response to a submission request" msgstr "Не поступил ответ на запрос об отправке задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:248 msgid "A response to a submission request was not a SOAP message" msgstr "Ответ на запрос о засылке не является сообщением SOAP" #: src/hed/acc/UNICORE/UNICOREClient.cpp:255 #: src/hed/acc/UNICORE/UNICOREClient.cpp:336 #: src/hed/acc/UNICORE/UNICOREClient.cpp:414 #: src/hed/acc/UNICORE/UNICOREClient.cpp:527 #: src/hed/acc/UNICORE/UNICOREClient.cpp:603 #: src/hed/acc/UNICORE/UNICOREClient.cpp:677 msgid "There is no connection chain configured" msgstr "Не настроена цепочка связи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:276 #: src/hed/acc/UNICORE/UNICOREClient.cpp:348 #, c-format msgid "Submission returned failure: %s" msgstr "Сбой при запуске: %s" #: src/hed/acc/UNICORE/UNICOREClient.cpp:277 #: src/hed/acc/UNICORE/UNICOREClient.cpp:349 #, c-format msgid "Submission failed, service returned: %s" msgstr "Сбой при запуске, сервис возвратил ошибку: %s" #: src/hed/acc/UNICORE/UNICOREClient.cpp:284 msgid "Creating and sending a start job request" msgstr "Создание и отправка запроса о запуске задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:317 msgid "A start job request failed" msgstr "Ошибка запроса о запуске задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:320 msgid "A start job request succeeded" msgstr "Успешный запрос о запуске задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:322 msgid "There was no response to a start job request" msgstr "Не поступил ответ на запрос о запуске задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:329 msgid "The response of a start job request was not a SOAP message" msgstr "Ответ на запрос о создании задачи не является сообщением SOAP" #: src/hed/acc/UNICORE/UNICOREClient.cpp:395 msgid "A status request failed" msgstr "Сбой запроса о состоянии" #: src/hed/acc/UNICORE/UNICOREClient.cpp:398 msgid "A status request succeed" msgstr "Запрос о состоянии удался" #: src/hed/acc/UNICORE/UNICOREClient.cpp:400 msgid "There was no response to a status request" msgstr "Не поступил ответ на запрос о состоянии" #: src/hed/acc/UNICORE/UNICOREClient.cpp:408 msgid "The response of a status request was not a SOAP message" msgstr "Ответ на запрос о состоянии не является сообщением SOAP" #: src/hed/acc/UNICORE/UNICOREClient.cpp:433 msgid "The job status could not be retrieved" msgstr "Не удалось определить состояние задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:444 msgid "Creating and sending an index service query" msgstr "Создание и отправка запроса в каталог ресурсов" #: src/hed/acc/UNICORE/UNICOREClient.cpp:472 msgid "Creating and sending a service status request" msgstr "Создание и отправка запроса о состоянии службы" #: src/hed/acc/UNICORE/UNICOREClient.cpp:508 msgid "A service status request failed" msgstr "Ошибка запроса о состоянии службы" #: src/hed/acc/UNICORE/UNICOREClient.cpp:511 msgid "A service status request succeeded" msgstr "Успешный запрос о состоянии службы" #: src/hed/acc/UNICORE/UNICOREClient.cpp:513 msgid "There was no response to a service status request" msgstr "Не поступил ответ на запрос о состоянии службы" #: src/hed/acc/UNICORE/UNICOREClient.cpp:520 msgid "The response of a service status request was not a SOAP message" msgstr "Ответ на запрос о состоянии сервера не является сообщением SOAP" #: src/hed/acc/UNICORE/UNICOREClient.cpp:537 msgid "The service status could not be retrieved" msgstr "Не удалось определить состояние службы" #: src/hed/acc/UNICORE/UNICOREClient.cpp:584 msgid "A job termination request failed" msgstr "Ошибка запроса об обрыве исполнения задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:587 msgid "A job termination request succeed" msgstr "Успешный запрос об обрыве исполнения задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:589 msgid "There was no response to a job termination request" msgstr "Не поступил ответ на запрос об обрыве исполнения задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:596 msgid "The response of a job termination request was not a SOAP message" msgstr "Ответ на запрос о прерывании задачи не является сообщением SOAP" #: src/hed/acc/UNICORE/UNICOREClient.cpp:658 msgid "A job cleaning request failed" msgstr "Ошибка запроса об удалении результатов работы задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:661 msgid "A job cleaning request succeed" msgstr "Успешный запрос об удалении результатов работы задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:663 msgid "There was no response to a job cleaning request" msgstr "Не поступил ответ на запрос об удалении результатов работы задачи" #: src/hed/acc/UNICORE/UNICOREClient.cpp:670 msgid "The response of a job cleaning request was not a SOAP message" msgstr "Ответ на запрос об удалении задачи не является сообщением SOAP" #: src/hed/acc/ldap/Extractor.h:22 #, c-format msgid "Extractor[%s] (%s): %s = %s" msgstr "Extractor[%s] (%s): %s = %s" #: src/hed/acc/ldap/Extractor.h:113 src/hed/acc/ldap/Extractor.h:130 #, c-format msgid "Extractor[%s] (%s): %s contains %s" msgstr "Extractor[%s] (%s): %s содержит %s" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.cpp:54 #, c-format msgid "Adding endpoint '%s' with interface name %s" msgstr "Добавляется точка доступа '%s' с названием интерфейса %s" #: src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp:63 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:43 #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:46 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.cpp:49 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:47 #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp:59 msgid "Can't create information handle - is the ARC ldap DMC plugin available?" msgstr "" "Не удалось создать ссылку для информации - проверьте, доступен ли " "подгружаемый модуль ARC LDAP DMC" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp:85 msgid "Adding CREAM computing service" msgstr "Добавляется вычислительная служба типа CREAM" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:79 #, c-format msgid "Unknown entry in EGIIS (%s)" msgstr "Неизвестная запись в EGIIS (%s)" #: src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp:87 msgid "" "Entry in EGIIS is missing one or more of the attributes 'Mds-Service-type', " "'Mds-Service-hn', 'Mds-Service-port' and/or 'Mds-Service-Ldap-suffix'" msgstr "" "Запись в EGIIS не содержит одного или нескольких атрибутов 'Mds-Service-" "type', 'Mds-Service-hn', 'Mds-Service-port' и/или 'Mds-Service-Ldap-suffix'" #: src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp:219 msgid "" "The \"FreeSlotsWithDuration\" attribute is wrongly formatted. Ignoring it." msgstr "Атрибут \"FreeSlotsWithDuration\" неверно сформатирован; игнорируется." #: src/hed/daemon/unix/daemon.cpp:74 #, c-format msgid "Daemonization fork failed: %s" msgstr "Не удалось создать дочерний демон: %s" #: src/hed/daemon/unix/daemon.cpp:82 msgid "Watchdog (re)starting application" msgstr "Самоконтроль (пере)запускает приложение" #: src/hed/daemon/unix/daemon.cpp:87 #, c-format msgid "Watchdog fork failed: %s" msgstr "Не удалось создать дочерний сторожевой процесс: %s" #: src/hed/daemon/unix/daemon.cpp:94 msgid "Watchdog starting monitoring" msgstr "Самоконтроль запускает мониторинг" #: src/hed/daemon/unix/daemon.cpp:120 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "Самоконтроль обнаружил завершение приложения по сигналу %u" #: src/hed/daemon/unix/daemon.cpp:122 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "Самоконтроль обнаружил приложение, завершившееся с кодом %u" #: src/hed/daemon/unix/daemon.cpp:124 msgid "Watchdog detected application exit" msgstr "Самоконтроль обнаружил завершение приложения" #: src/hed/daemon/unix/daemon.cpp:133 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" "Самоконтроль останавливается, потому что приложение было прервано намеренно, " "или завершилось" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application timeout or error - killing process" msgstr "" "Самоконтроль обнаружил превышение времени ожидания приложения или сбой - " "процесс прерывается" #: src/hed/daemon/unix/daemon.cpp:151 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" "Самоконтроль не дождался завершения приложения - посылается сигнал KILL" #: src/hed/daemon/unix/daemon.cpp:163 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "Самоконтроль не смог прервать приложение - прекращение попыток и выход" #: src/hed/daemon/unix/daemon.cpp:184 msgid "Shutdown daemon" msgstr "Останов демона" #: src/hed/daemon/unix/main_unix.cpp:43 src/hed/daemon/win32/main_win32.cpp:27 msgid "shutdown" msgstr "Выключение" #: src/hed/daemon/unix/main_unix.cpp:46 msgid "exit" msgstr " выход " #: src/hed/daemon/unix/main_unix.cpp:92 src/hed/daemon/win32/main_win32.cpp:53 msgid "No server config part of config file" msgstr "В файле настроек отсутствуют настройки сервера" #: src/hed/daemon/unix/main_unix.cpp:163 #: src/hed/daemon/win32/main_win32.cpp:91 #, c-format msgid "Unknown log level %s" msgstr "Неизвестный уровень журналирования %s" #: src/hed/daemon/unix/main_unix.cpp:173 #: src/hed/daemon/win32/main_win32.cpp:100 #, c-format msgid "Failed to open log file: %s" msgstr "Не удалось открыть журнальный файл: %s" #: src/hed/daemon/unix/main_unix.cpp:206 #: src/hed/daemon/win32/main_win32.cpp:126 msgid "Start foreground" msgstr "Запуск с высоким приоритетом" #: src/hed/daemon/unix/main_unix.cpp:255 #, c-format msgid "XML config file %s does not exist" msgstr "Файл настроек XML %s не существует" #: src/hed/daemon/unix/main_unix.cpp:259 src/hed/daemon/unix/main_unix.cpp:274 #: src/hed/daemon/win32/main_win32.cpp:154 #, c-format msgid "Failed to load service configuration from file %s" msgstr "Не удалось загрузить настройки сервиса из файла %s" #: src/hed/daemon/unix/main_unix.cpp:265 #, c-format msgid "INI config file %s does not exist" msgstr "Файл настроек INI %s не существует" #: src/hed/daemon/unix/main_unix.cpp:270 msgid "Error evaluating profile" msgstr "Ошибка проверки профиля" #: src/hed/daemon/unix/main_unix.cpp:286 msgid "Error loading generated configuration" msgstr "Ошибка загрузки сгенерированных настроек" #: src/hed/daemon/unix/main_unix.cpp:292 msgid "Error evaulating profile" msgstr "Ошибка проверки профиля" #: src/hed/daemon/unix/main_unix.cpp:297 msgid "Failed to load service configuration from any default config file" msgstr "Не удалось загрузить настройки сервиса ни из какого файла настроек" #: src/hed/daemon/unix/main_unix.cpp:358 msgid "Schema validation error" msgstr "Ошибка проверки схемы" #: src/hed/daemon/unix/main_unix.cpp:373 #: src/hed/daemon/win32/main_win32.cpp:159 msgid "Configuration root element is not " msgstr "Корневой элемент настроек не является " #: src/hed/daemon/unix/main_unix.cpp:389 #, c-format msgid "Cannot switch to group (%s)" msgstr "Невозможно перейти к группе (%s)" #: src/hed/daemon/unix/main_unix.cpp:399 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "Невозможно переключить на основную группу для пользователя (%s)" #: src/hed/daemon/unix/main_unix.cpp:404 #, c-format msgid "Cannot switch to user (%s)" msgstr "Невозможно перейти к пользователю (%s)" #: src/hed/daemon/unix/main_unix.cpp:422 #: src/hed/daemon/win32/main_win32.cpp:176 msgid "Failed to load service side MCCs" msgstr "Не удалось загрузить компоненты MCC сервера" #: src/hed/daemon/unix/main_unix.cpp:424 #: src/hed/daemon/win32/main_win32.cpp:178 src/services/a-rex/test.cpp:41 #: src/tests/count/test_service.cpp:32 src/tests/echo/test.cpp:30 #: src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "Подгружены сервисные компоненты цепи сообщений" #: src/hed/daemon/unix/main_unix.cpp:431 #: src/hed/daemon/win32/main_win32.cpp:185 msgid "Unexpected arguments supplied" msgstr "Заданы непредусмотренные аргументы" #: src/hed/dmc/acix/DataPointACIX.cpp:93 #: src/hed/dmc/acix/DataPointACIX.cpp:342 #: src/hed/dmc/rucio/DataPointRucio.cpp:220 #: src/hed/dmc/rucio/DataPointRucio.cpp:438 #, c-format msgid "No locations found for %s" msgstr "Не найдено расположений для %s" #: src/hed/dmc/acix/DataPointACIX.cpp:121 #, c-format msgid "Found none or multiple URLs (%s) in ACIX URL: %s" msgstr " В ACIX URL обнаружено ни одного или несколько URL (%s): %s" #: src/hed/dmc/acix/DataPointACIX.cpp:131 #, c-format msgid "Cannot handle URL %s" msgstr "Невозможно обработать URL %s" #: src/hed/dmc/acix/DataPointACIX.cpp:138 #, c-format msgid "Could not resolve original source of %s: out of time" msgstr "Не удалось определить исходный источник %s: время истекло" #: src/hed/dmc/acix/DataPointACIX.cpp:144 #, c-format msgid "Could not resolve original source of %s: %s" msgstr "Не удалось определить исходный источник %s: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:160 #, c-format msgid "Querying ACIX server at %s" msgstr "Опрашивается сервер ACIX на %s" #: src/hed/dmc/acix/DataPointACIX.cpp:161 #, c-format msgid "Calling acix with query %s" msgstr "Вызов ACIX с запросом %s" #: src/hed/dmc/acix/DataPointACIX.cpp:167 #, c-format msgid "Failed to query ACIX: %s" msgstr "Сбой запроса к ACIX: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:171 #: src/hed/dmc/acix/DataPointACIX.cpp:308 #, c-format msgid "Failed to parse ACIX response: %s" msgstr "Сбой разборки отзыва ACIX: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:298 #, c-format msgid "ACIX returned %s" msgstr "ACIX ответил %s" #: src/hed/dmc/acix/DataPointACIX.cpp:319 #, c-format msgid "No locations for %s" msgstr "Не найдено ни одного местонахождения файла %s" #: src/hed/dmc/acix/DataPointACIX.cpp:325 #, c-format msgid "%s: ACIX Location: %s" msgstr "%s: Местонахождение в ACIX: %s" #: src/hed/dmc/acix/DataPointACIX.cpp:327 #, c-format msgid "%s: Location %s not accessible remotely, skipping" msgstr "%s: К расположению %s нет удалённого доступа, пропускается" #: src/hed/dmc/arc/DataPointARC.cpp:68 #, c-format msgid "" "checingBartenderURL: Response:\n" "%s" msgstr "" "checingBartenderURL: Ответ:\n" "%s" #: src/hed/dmc/arc/DataPointARC.cpp:154 src/hed/dmc/arc/DataPointARC.cpp:206 #: src/hed/dmc/arc/DataPointARC.cpp:278 src/hed/dmc/arc/DataPointARC.cpp:375 #: src/hed/dmc/arc/DataPointARC.cpp:548 src/hed/dmc/arc/DataPointARC.cpp:609 msgid "Hostname is not implemented for arc protocol" msgstr "Hostname не поддерживается протоколом arc" #: src/hed/dmc/arc/DataPointARC.cpp:245 src/hed/dmc/arc/DataPointARC.cpp:330 #: src/hed/dmc/arc/DataPointARC.cpp:441 src/hed/dmc/arc/DataPointARC.cpp:534 #, c-format msgid "" "nd:\n" "%s" msgstr "" "nd:\n" "%s" #: src/hed/dmc/arc/DataPointARC.cpp:263 msgid "Not a collection" msgstr "Это не коллекция" #: src/hed/dmc/arc/DataPointARC.cpp:282 src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "Начало чтения" #: src/hed/dmc/arc/DataPointARC.cpp:338 src/hed/dmc/arc/DataPointARC.cpp:449 #: src/hed/dmc/arc/DataPointARC.cpp:601 #, c-format msgid "Recieved transfer URL: %s" msgstr "Получен транспортный URL: %s" #: src/hed/dmc/arc/DataPointARC.cpp:378 src/hed/dmc/srm/DataPointSRM.cpp:518 msgid "StartWriting" msgstr "Начало записи" #: src/hed/dmc/arc/DataPointARC.cpp:490 #, c-format msgid "Calculated checksum: %s" msgstr "Вычисленная контрольная сума: %s" #: src/hed/dmc/arc/DataPointARC.cpp:554 msgid "Check" msgstr "Проверка" #: src/hed/dmc/arc/DataPointARC.cpp:648 #, c-format msgid "Deleted %s" msgstr "Удалён %s" #: src/hed/dmc/file/DataPointFile.cpp:86 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "Неизвестный канал %s для протокола stdio" #: src/hed/dmc/file/DataPointFile.cpp:93 #, c-format msgid "Failed to open stdio channel %s" msgstr "Не удалось открыть канал stdio %s" #: src/hed/dmc/file/DataPointFile.cpp:94 #, c-format msgid "Failed to open stdio channel %d" msgstr "Не удалось открыть канал stdio %d" #: src/hed/dmc/file/DataPointFile.cpp:335 #, c-format msgid "fsync of file %s failed: %s" msgstr "Сбой операции fsync на файле %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:347 #, c-format msgid "closing file %s failed: %s" msgstr "сбой при закрытии файла %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:366 #, c-format msgid "File is not accessible: %s" msgstr "Файл недоступен: %s" #: src/hed/dmc/file/DataPointFile.cpp:372 #: src/hed/dmc/file/DataPointFile.cpp:459 #, c-format msgid "Can't stat file: %s: %s" msgstr "Невозможно получить статус файла: %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:420 #: src/hed/dmc/file/DataPointFile.cpp:426 #, c-format msgid "Can't stat stdio channel %s" msgstr "Невозможно выполнить операцию stat для канала stdio %s" #: src/hed/dmc/file/DataPointFile.cpp:474 #, c-format msgid "%s is not a directory" msgstr "%s не является каталогом" #: src/hed/dmc/file/DataPointFile.cpp:489 src/hed/dmc/s3/DataPointS3.cpp:461 #: src/hed/dmc/s3/DataPointS3.cpp:571 #, c-format msgid "Failed to read object %s: %s" msgstr "Сбой чтения объекта %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:502 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, c-format msgid "File is not accessible %s: %s" msgstr "Файл недоступен %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:508 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:418 #, c-format msgid "Can't delete directory %s: %s" msgstr "Невозможно удалить каталог %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:515 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:425 #, c-format msgid "Can't delete file %s: %s" msgstr "Невозможно удалить файл %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:525 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1470 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:440 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:460 #: src/services/a-rex/jura/JobLogFile.cpp:657 #: src/services/a-rex/jura/JobLogFile.cpp:1274 #, c-format msgid "Creating directory %s" msgstr "Создается директория %s" #: src/hed/dmc/file/DataPointFile.cpp:533 src/hed/dmc/srm/DataPointSRM.cpp:160 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:474 #, c-format msgid "Renaming %s to %s" msgstr "%s переименовывается в %s" #: src/hed/dmc/file/DataPointFile.cpp:535 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:483 #, c-format msgid "Can't rename file %s: %s" msgstr "Не удалось переименовать файл %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:566 #, c-format msgid "Failed to open %s for reading: %s" msgstr "Невозможно открыть %s для чтения: %s" #: src/hed/dmc/file/DataPointFile.cpp:581 #: src/hed/dmc/file/DataPointFile.cpp:719 #, c-format msgid "Failed to switch user id to %d/%d" msgstr "Не удалось изменить идентификатор пользователя на %d/%d" #: src/hed/dmc/file/DataPointFile.cpp:587 #, c-format msgid "Failed to create/open file %s: %s" msgstr "Сбой при создании/открытии файла %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:603 msgid "Failed to create thread" msgstr "Не удалось создать поток" #: src/hed/dmc/file/DataPointFile.cpp:683 #, c-format msgid "Invalid url: %s" msgstr "Неверный URL: %s" #: src/hed/dmc/file/DataPointFile.cpp:692 src/hed/libs/data/FileCache.cpp:603 #, c-format msgid "Failed to create directory %s: %s" msgstr "Сбой создания каталога %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:708 #: src/hed/dmc/file/DataPointFile.cpp:727 #, c-format msgid "Failed to create file %s: %s" msgstr "Сбой при создании файла %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:739 #, c-format msgid "setting file %s to size %llu" msgstr "файлу %s присваивается размер %llu" #: src/hed/dmc/file/DataPointFile.cpp:759 #, c-format msgid "Failed to preallocate space for %s" msgstr "Сбой предварительного резервирования места для %s" #: src/hed/dmc/file/DataPointFile.cpp:800 src/hed/libs/data/FileCache.cpp:981 #, c-format msgid "Failed to clean up file %s: %s" msgstr "Сбой при очистке файла %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:809 #, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "" "Ошибка при проверке файла. Невозможно выполнить операцию stat для файла %s: %" "s" #: src/hed/dmc/file/DataPointFile.cpp:813 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" "Ошибка при сверке: размер локального файла %llu не соответствует размеру " "файла-источника %llu для файла %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, c-format msgid "Using proxy %s" msgstr "Используется прокси %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, c-format msgid "Using key %s" msgstr "Используется ключ %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, c-format msgid "Using cert %s" msgstr "Используется сертификат %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "В URL назначения LFC отсутствуют местоположения" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "В LFC обнаружена идентичная копия: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "Добавляется адрес: %s - %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, c-format msgid "Add location: url: %s" msgstr "Добавление расположения: url: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, c-format msgid "Add location: metadata: %s" msgstr "Добавление расположения: metadata: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, c-format msgid "gfal_open failed: %s" msgstr "Сбой gfal_open: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, c-format msgid "gfal_close failed: %s" msgstr "Сбой gfal_close: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, c-format msgid "gfal_read failed: %s" msgstr "Сбой gfal_read: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 msgid "StopReading starts waiting for transfer_condition." msgstr "StopReading начинает ожидание transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 msgid "StopReading finished waiting for transfer_condition." msgstr "StopReading закончил ожидание transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:66 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:71 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:42 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:47 #, c-format msgid "No locations defined for %s" msgstr "Не найдено ни одного местонахождения файла %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, c-format msgid "Failed to set LFC replicas: %s" msgstr "Сбой задания копий в LFC: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "Сбой в gfal_mkdir (%s), всё же попытаемся записать" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" "DataPointGFAL::write_file получил на входе адрес %d и сдвиг %d, проводится " "поиск" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, c-format msgid "gfal_write failed: %s" msgstr "Сбой gfal_write: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:302 msgid "StopWriting starts waiting for transfer_condition." msgstr "StopWriting начинает ожидание transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:304 msgid "StopWriting finished waiting for transfer_condition." msgstr "StopWriting закончил ожидание transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, c-format msgid "gfal_stat failed: %s" msgstr "Сбой gfal_stat: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "Сбой в gfal_listxattr, невозможно получить информацию о копиях: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, c-format msgid "gfal_opendir failed: %s" msgstr "Сбой gfal_opendir: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "Перечисление запросит информацию stat об URL %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, c-format msgid "gfal_closedir failed: %s" msgstr "Сбой gfal_closedir: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, c-format msgid "gfal_rmdir failed: %s" msgstr "Сбой gfal_rmdir: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, c-format msgid "gfal_unlink failed: %s" msgstr "Сбой gfal_unlink: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, c-format msgid "gfal_mkdir failed: %s" msgstr "Сбой gfal_mkdir: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, c-format msgid "gfal_rename failed: %s" msgstr "Сбой gfal_rename: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "Сбой определения количества переданных байтов: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "Не удалось получить ссылку параметра GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "Не удалось получить новый контекст GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "Сбой установки обратного вызова монитора GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "Сбой установки опции перезаписи в GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" "Сбой установки времени ожидания передачи GFAL2, будет использоваться " "значение по умолчанию: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 msgid "Transfer failed" msgstr "Передача не удалась" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 msgid "Transfer succeeded" msgstr "Передача удалась" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 msgid "ftp_complete_callback: success" msgstr "ftp_complete_callback: успех" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "ftp_complete_callback: ошибка: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 msgid "ftp_check_callback" msgstr "ftp_check_callback" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/services/gridftpd/commands.cpp:1225 #: src/services/gridftpd/dataread.cpp:76 #: src/services/gridftpd/dataread.cpp:173 #: src/services/gridftpd/datawrite.cpp:59 #: src/services/gridftpd/datawrite.cpp:146 #, c-format msgid "Globus error: %s" msgstr "Ошибка Globus: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 msgid "Excessive data received while checking file access" msgstr "При проверке прав доступа к файлу получены избыточные данные" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "Сбой регистрации буфера Globus FTP - проверка прерывается" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "check_ftp: сбой в globus_ftp_client_size" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "check_ftp: истекло время ожидания команды size" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "check_ftp: не удалось определить размер файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, c-format msgid "check_ftp: obtained size: %lli" msgstr "check_ftp: получен размер: %lli" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "check_ftp: сбой в globus_ftp_client_modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "check_ftp: истекло время ожидания modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "check_ftp: сбой при определении времени изменения файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, c-format msgid "check_ftp: obtained modification date: %s" msgstr "check_ftp: получена дата изменения: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 msgid "check_ftp: globus_ftp_client_get failed" msgstr "check_ftp: сбой в globus_ftp_client_get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 msgid "check_ftp: globus_ftp_client_register_read" msgstr "check_ftp: globus_ftp_client_register_read" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 msgid "check_ftp: timeout waiting for partial get" msgstr "check_ftp: истекло время ожидания частичной загрузки" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "Сбой при удалении файла, попытка удаления каталога для %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "delete_ftp: сбой в globus_ftp_client_delete" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 msgid "delete_ftp: timeout waiting for delete" msgstr "delete_ftp: истекло время ожидания команды delete" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "delete_ftp: сбой в globus_ftp_client_rmdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #, c-format msgid "mkdir_ftp: making %s" msgstr "mkdir_ftp: создаётся %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "mkdir_ftp: истекло время ожидания команды mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 msgid "Timeout waiting for mkdir" msgstr "Истекло время ожидания команды mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 msgid "start_reading_ftp" msgstr "start_reading_ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "start_reading_ftp: globus_ftp_client_get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "start_reading_ftp: сбой в globus_ftp_client_get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "start_reading_ftp: сбой в globus_thread_create" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "stop_reading_ftp: отменяется соединение" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "Сбой прерывания передачи файла по ftp: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "Предполагаем, что пересылка уже отменена, либо оборвалась" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "stop_reading_ftp: ожидание завершения пересылки" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "stop_reading_ftp: выход: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "ftp_read_thread: получение и регистрация буферов" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "ftp_read_thread: сбой for_read - прерывание: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "ftp_read_thread: сбой обратного вызова данных - прерывание: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "ftp_read_thread: ошибка Globus: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "ftp_read_thread: слишком много сбоев регистрации - отмена: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" "ftp_read_thread: сбой при регистрации буфера Globus - попробуем попозже: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 msgid "ftp_read_thread: waiting for eof" msgstr "ftp_read_thread: ожидание конца файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 msgid "ftp_read_thread: waiting for buffers released" msgstr "ftp_read_thread: ожидание разблокировки буферов" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "ftp_read_thread: сбой сброса буферов - утечка" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 msgid "ftp_read_thread: exiting" msgstr "ftp_read_thread: выход" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #, c-format msgid "ftp_read_callback: failure: %s" msgstr "ftp_read_callback: сбой: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "ftp_read_callback: успех" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 msgid "Failed to get ftp file" msgstr "Не удалось получить файл ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 msgid "start_writing_ftp: mkdir" msgstr "start_writing_ftp: mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "start_writing_ftp: сбой mkdir - всё же пытаемся записать" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 msgid "start_writing_ftp: put" msgstr "start_writing_ftp: put" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 msgid "start_writing_ftp: put failed" msgstr "start_writing_ftp: сбой в put" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "start_writing_ftp: сбой в globus_thread_create" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 msgid "StopWriting: aborting connection" msgstr "StopWriting: прерывание связи" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #, c-format msgid "StopWriting: Calculated checksum %s" msgstr "StopWriting: Вычислена контрольная сумма %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #, c-format msgid "StopWriting: looking for checksum of %s" msgstr "StopWriting: поиск контрольной суммы %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "list_files_ftp: сбой globus_ftp_client_cksm" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 msgid "list_files_ftp: timeout waiting for cksum" msgstr "list_files_ftp: истекло время ожидания проверочной суммы" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 msgid "list_files_ftp: no checksum information possible" msgstr "list_files_ftp: информация о контрольных суммах недоступна" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #, c-format msgid "list_files_ftp: checksum %s" msgstr "list_files_ftp: проверочная сумма %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" "Тип контрольной суммы на сервере отличается от запрошенного, сравнение " "невозможно" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" "Вычисленная проверочная сумма %s совпадает с проверочной суммой сервера" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" "Несовпадение между вычисленной контрольной суммой %s и контрольной суммой, " "выданной сервером %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "ftp_write_thread: получение и регистрация буферов" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "ftp_write_thread: сбой for_write - прерывание" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 msgid "ftp_write_thread: data callback failed - aborting" msgstr "ftp_write_thread: сбой обратного вызова данных - прерывание" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 msgid "ftp_write_thread: waiting for eof" msgstr "ftp_read_thread: ожидание конца файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 msgid "ftp_write_thread: waiting for buffers released" msgstr "ftp_write_thread: ожидание разблокировки буферов" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "ftp_write_thread: сбой сброса буферов - утечка" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 msgid "ftp_write_thread: exiting" msgstr "ftp_write_thread: выход" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #, c-format msgid "ftp_write_callback: failure: %s" msgstr "ftp_write_callback: сбой: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #, c-format msgid "ftp_write_callback: success %s" msgstr "ftp_write_callback: успех %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 msgid "Failed to store ftp file" msgstr "Не удалось сохранить файл ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 msgid "ftp_put_complete_callback: success" msgstr "ftp_put_complete_callback: успех" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "list_files_ftp: поиск размера %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "list_files_ftp: сбой в globus_ftp_client_size" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 msgid "list_files_ftp: timeout waiting for size" msgstr "list_files_ftp: истекло время ожидания размера" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 msgid "list_files_ftp: failed to get file's size" msgstr "list_files_ftp: не удалось определить размер файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "list_files_ftp: определение времени изменения %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "list_files_ftp: сбой globus_ftp_client_modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "list_files_ftp: истекло время ожидания modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 msgid "list_files_ftp: failed to get file's modification time" msgstr "list_files_ftp: не удалось определить время изменения файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "list_files_ftp: поиск проверочной суммы %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "Не удалось получить список статус через FTP: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 msgid "No results returned from stat" msgstr "Вызов stat не возвратил никаких результатов" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "Неверное количество объектов (%i) для операции stat от ftp: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #, c-format msgid "Unexpected path %s returned from server" msgstr "Сервер возвратил неожиданный путь %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "Не удалось получить список файлов через FTP: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 msgid "Rename: globus_ftp_client_move failed" msgstr "Переименование: сбой в globus_ftp_client_move" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 msgid "Rename: timeout waiting for operation to complete" msgstr "Переименование: истекло время ожидания завершения операции" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "init_handle: сбой в globus_ftp_client_handleattr_init" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "init_handle: сбой в globus_ftp_client_handleattr_set_gridftp2" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "init_handle: сбой в globus_ftp_client_handlea_init" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "init_handle: сбой в globus_ftp_client_operationattr_init" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "init_handle: сбой globus_ftp_client_operationattr_set_allow_ipv6" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "init_handle: сбой globus_ftp_client_operationattr_set_delayed_pasv" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "globus_ftp_client_operationattr_set_authorization: ошибка: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 msgid "Failed to set credentials for GridFTP transfer" msgstr "Не удалось установить параметры доступа для передачи данных по GridFTP" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 msgid "Using secure data transfer" msgstr "Используется защищённая передача данных" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 msgid "Using insecure data transfer" msgstr "Используется незащищённая передача данных" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 msgid "~DataPoint: destroy ftp_handle" msgstr "~DataPoint: уничтожение ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "~DataPoint: уничтожение ftp_handle не удалось - новая попытка" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "~DataPoint: уничтожение ftp_handle не удалось - утечка" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" "Отсутствует указание на фабрику и/или модуль. Использование Globus в " "неопределённом режиме небезопасно - вызов (Grid)FTP заблокирован. Свяжитесь " "с разработчиками." #: src/hed/dmc/gridftp/Lister.cpp:224 src/hed/dmc/gridftp/Lister.cpp:292 #: src/hed/dmc/gridftp/Lister.cpp:387 src/hed/dmc/gridftp/Lister.cpp:737 #: src/hed/dmc/gridftp/Lister.cpp:775 #, c-format msgid "Failure: %s" msgstr "Ошибка: %s" #: src/hed/dmc/gridftp/Lister.cpp:226 src/hed/dmc/gridftp/Lister.cpp:246 #: src/hed/dmc/gridftp/Lister.cpp:471 src/hed/dmc/gridftp/Lister.cpp:478 #: src/hed/dmc/gridftp/Lister.cpp:500 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:163 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:196 #, c-format msgid "Response: %s" msgstr "Ответ: %s" #: src/hed/dmc/gridftp/Lister.cpp:291 msgid "Error getting list of files (in list)" msgstr "Не удалось получить список файлов (в list)" #: src/hed/dmc/gridftp/Lister.cpp:293 msgid "Assuming - file not found" msgstr "Предполагается, что файл не найден" #: src/hed/dmc/gridftp/Lister.cpp:310 #, c-format msgid "list record: %s" msgstr "перечисление записи: %s" #: src/hed/dmc/gridftp/Lister.cpp:365 msgid "Failed reading list of files" msgstr "Ошибка чтения списка файлов" #: src/hed/dmc/gridftp/Lister.cpp:401 msgid "Failed reading data" msgstr "Ошибка чтения данных" #: src/hed/dmc/gridftp/Lister.cpp:429 #, c-format msgid "Command: %s" msgstr "Команда: %s" #: src/hed/dmc/gridftp/Lister.cpp:433 src/hed/dmc/gridftp/Lister.cpp:474 #: src/hed/mcc/http/PayloadHTTP.cpp:991 msgid "Memory allocation error" msgstr "Ошибка выделения памяти" #: src/hed/dmc/gridftp/Lister.cpp:441 #, c-format msgid "%s failed" msgstr "%s не удалось" #: src/hed/dmc/gridftp/Lister.cpp:445 msgid "Command is being sent" msgstr "Посылается инструкция" #: src/hed/dmc/gridftp/Lister.cpp:450 msgid "Waiting for response" msgstr "Ожидание отклика" #: src/hed/dmc/gridftp/Lister.cpp:455 msgid "Callback got failure" msgstr "Сбой обратного вызова" #: src/hed/dmc/gridftp/Lister.cpp:541 msgid "Failed in globus_cond_init" msgstr "Сбой в globus_cond_init" #: src/hed/dmc/gridftp/Lister.cpp:545 msgid "Failed in globus_mutex_init" msgstr "Сбой в globus_mutex_init" #: src/hed/dmc/gridftp/Lister.cpp:552 msgid "Failed allocating memory for handle" msgstr "Не удалось зарезервировать память под ссылку" #: src/hed/dmc/gridftp/Lister.cpp:557 msgid "Failed in globus_ftp_control_handle_init" msgstr "Сбой в globus_ftp_control_handle_init" #: src/hed/dmc/gridftp/Lister.cpp:565 msgid "Failed to enable IPv6" msgstr "Не удалось включить IPv6" #: src/hed/dmc/gridftp/Lister.cpp:576 src/services/gridftpd/commands.cpp:983 msgid "Closing connection" msgstr "Прекращение связи" #: src/hed/dmc/gridftp/Lister.cpp:583 src/hed/dmc/gridftp/Lister.cpp:598 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "Истекло время ожидания ответного сообщения Globus - утечка соединения" #: src/hed/dmc/gridftp/Lister.cpp:608 msgid "Closed successfully" msgstr "Успешное прекращение" #: src/hed/dmc/gridftp/Lister.cpp:610 msgid "Closing may have failed" msgstr "Возможно, был сбой про закрытии" #: src/hed/dmc/gridftp/Lister.cpp:637 msgid "Waiting for globus handle to settle" msgstr "Ждём пока ссылка globus устаканится" #: src/hed/dmc/gridftp/Lister.cpp:642 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "Ссылка в недопустимом состояии %u/%u" #: src/hed/dmc/gridftp/Lister.cpp:648 msgid "Globus handle is stuck" msgstr "Ссылка globus застряла." #: src/hed/dmc/gridftp/Lister.cpp:664 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" "Не удалось уничтожить ссылку: %s. Невозможно справиться с таким положением." #: src/hed/dmc/gridftp/Lister.cpp:687 #, c-format msgid "EPSV failed: %s" msgstr "Сбой EPSV: %s" #: src/hed/dmc/gridftp/Lister.cpp:691 msgid "EPSV failed" msgstr "Сбой EPSV" #: src/hed/dmc/gridftp/Lister.cpp:698 #, c-format msgid "PASV failed: %s" msgstr "Инструкция PASV не прошла: %s" #: src/hed/dmc/gridftp/Lister.cpp:702 msgid "PASV failed" msgstr "Инструкция PASV не прошла" #: src/hed/dmc/gridftp/Lister.cpp:735 msgid "Failed to apply local address to data connection" msgstr "Не удалось применить локальный адрес к соединению передачи данных" #: src/hed/dmc/gridftp/Lister.cpp:749 msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "" "Не удалось извлечь адрес узла и/или номер порта из ответа на запрос EPSV/PASV" #: src/hed/dmc/gridftp/Lister.cpp:754 #, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "Канал передачи данных: %d.%d.%d.%d:%d" #: src/hed/dmc/gridftp/Lister.cpp:769 #, c-format msgid "Data channel: [%s]:%d" msgstr "Канал передачи данных: [%s]:%d" #: src/hed/dmc/gridftp/Lister.cpp:773 msgid "Obtained host and address are not acceptable" msgstr "Полученные адрес и номер порта неприемлемы" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Failed to open data channel" msgstr "Не удалось открыть канал передачи данных" #: src/hed/dmc/gridftp/Lister.cpp:801 #, c-format msgid "Unsupported protocol in url %s" msgstr "Неподдерживаемый протокол в URL %s" #: src/hed/dmc/gridftp/Lister.cpp:813 msgid "Reusing connection" msgstr "Повторное использование соединения" #: src/hed/dmc/gridftp/Lister.cpp:837 #, c-format msgid "Failed connecting to server %s:%d" msgstr "Сбой установления связи с сервером %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:843 #, c-format msgid "Failed to connect to server %s:%d" msgstr "Не удалось установить связь с сервером %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:859 msgid "Missing authentication information" msgstr "Отсутствует информация для проверки подлинности" #: src/hed/dmc/gridftp/Lister.cpp:868 src/hed/dmc/gridftp/Lister.cpp:882 #, c-format msgid "Bad authentication information: %s" msgstr "Неприемлемая информация для проверки подлинности: %s" #: src/hed/dmc/gridftp/Lister.cpp:891 src/hed/dmc/gridftp/Lister.cpp:906 #, c-format msgid "Failed authenticating: %s" msgstr "Ошибка проверки подлинности: %s" #: src/hed/dmc/gridftp/Lister.cpp:898 msgid "Failed authenticating" msgstr "Ошибка проверки подлинности" #: src/hed/dmc/gridftp/Lister.cpp:933 src/hed/dmc/gridftp/Lister.cpp:1089 #, c-format msgid "DCAU failed: %s" msgstr "Инструкция DCAU не прошла: %s" #: src/hed/dmc/gridftp/Lister.cpp:937 src/hed/dmc/gridftp/Lister.cpp:1094 msgid "DCAU failed" msgstr "Инструкция DCAU не прошла" #: src/hed/dmc/gridftp/Lister.cpp:957 msgid "MLST is not supported - trying LIST" msgstr "MLST не поддерживается - пробуем LIST" #: src/hed/dmc/gridftp/Lister.cpp:973 #, c-format msgid "Immediate completion expected: %s" msgstr "Ожидается немедленное соединение: %s" #: src/hed/dmc/gridftp/Lister.cpp:977 msgid "Immediate completion expected" msgstr "Ожидается немедленное завершение" #: src/hed/dmc/gridftp/Lister.cpp:990 #, c-format msgid "Missing information in reply: %s" msgstr "Неполная информация в отклике: %s" #: src/hed/dmc/gridftp/Lister.cpp:1024 #, c-format msgid "Missing final reply: %s" msgstr "Отсутствует заключительный отклик: %s" #: src/hed/dmc/gridftp/Lister.cpp:1048 #, c-format msgid "Unexpected immediate completion: %s" msgstr "Неожиданное немедленное завершение: %s" #: src/hed/dmc/gridftp/Lister.cpp:1060 #, c-format msgid "LIST/MLST failed: %s" msgstr "Сбой LIST/MLST: %s" #: src/hed/dmc/gridftp/Lister.cpp:1065 msgid "LIST/MLST failed" msgstr "Сбой LIST/MLST" #: src/hed/dmc/gridftp/Lister.cpp:1115 msgid "MLSD is not supported - trying NLST" msgstr "MLSD не поддерживается - пробуем NLST" #: src/hed/dmc/gridftp/Lister.cpp:1129 #, c-format msgid "Immediate completion: %s" msgstr "Немедленное завершение: %s" #: src/hed/dmc/gridftp/Lister.cpp:1137 #, c-format msgid "NLST/MLSD failed: %s" msgstr "Инструкция NLST/MLSD не прошла: %s" #: src/hed/dmc/gridftp/Lister.cpp:1143 msgid "NLST/MLSD failed" msgstr "Инструкция NLST/MLSD не прошла" #: src/hed/dmc/gridftp/Lister.cpp:1164 #, c-format msgid "Data transfer aborted: %s" msgstr "Передача данных прервана: %s" #: src/hed/dmc/gridftp/Lister.cpp:1169 msgid "Data transfer aborted" msgstr "Передача данных прервана" #: src/hed/dmc/gridftp/Lister.cpp:1181 msgid "Failed to transfer data" msgstr "Не удалось передать данные" #: src/hed/dmc/http/DataPointHTTP.cpp:388 #: src/hed/dmc/http/DataPointHTTP.cpp:517 #: src/hed/dmc/http/DataPointHTTP.cpp:598 #: src/hed/dmc/http/DataPointHTTP.cpp:1000 #: src/hed/dmc/http/DataPointHTTP.cpp:1141 #: src/hed/dmc/http/DataPointHTTP.cpp:1286 #, c-format msgid "Redirecting to %s" msgstr "Перенаправление к %s" #: src/hed/dmc/http/DataPointHTTP.cpp:670 #, c-format msgid "Stat: obtained size %llu" msgstr "Проверка: получен размер %llu" #: src/hed/dmc/http/DataPointHTTP.cpp:674 #, c-format msgid "Stat: obtained modification time %s" msgstr "Stat: получено время изменения %s" #: src/hed/dmc/http/DataPointHTTP.cpp:903 #, c-format msgid "Check: obtained size %llu" msgstr "Проверка: получен размер %llu" #: src/hed/dmc/http/DataPointHTTP.cpp:905 #, c-format msgid "Check: obtained modification time %s" msgstr "Check: получено время изменения %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1017 #: src/hed/dmc/http/DataPointHTTP.cpp:1161 #, c-format msgid "HTTP failure %u - %s" msgstr "Ошибка HTTP %u - %s" #: src/hed/dmc/ldap/DataPointLDAP.cpp:36 msgid "" "Missing reference to factory and/or module. Currently safe unloading of LDAP " "DMC is not supported. Report to developers." msgstr "" "Отсутствует ссылка на фабрику и/или модуль. В настоящее время безопасная " "выгрузка LDAP DMC не поддерживается. Пожалуйтесь разработчикам." #: src/hed/dmc/ldap/LDAPQuery.cpp:175 msgid "SASL Interaction" msgstr "Обмен данными SASL" #: src/hed/dmc/ldap/LDAPQuery.cpp:223 #, c-format msgid "Challenge: %s" msgstr "Запрос: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:227 #, c-format msgid "Default: %s" msgstr "По умолчанию: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:303 #, c-format msgid "LDAPQuery: Initializing connection to %s:%d" msgstr "LDAPQuery: устанавливается соединение с %s:%d" #: src/hed/dmc/ldap/LDAPQuery.cpp:307 #, c-format msgid "LDAP connection already open to %s" msgstr "Соединение LDAP с %s уже установлено" #: src/hed/dmc/ldap/LDAPQuery.cpp:325 #, c-format msgid "Could not open LDAP connection to %s" msgstr "Невозможно установить соединие LDAP с %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:346 #, c-format msgid "Failed to create ldap bind thread (%s)" msgstr "Не удалось создать поток для привязки к LDAP (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:353 #, c-format msgid "Ldap bind timeout (%s)" msgstr "Истекло время соединения с LDAP (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:360 #, c-format msgid "Failed to bind to ldap server (%s)" msgstr "Сбой привязки к серверу LDAP: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:381 #, c-format msgid "Could not set LDAP network timeout (%s)" msgstr "Не удалось задать время ожидания соединения LDAP (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:389 #, c-format msgid "Could not set LDAP timelimit (%s)" msgstr "Не удалось задать время ожидания ответа сервера LDAP (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:396 #, c-format msgid "Could not set LDAP protocol version (%s)" msgstr "Не удалось задать версию протокола LDAP (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:472 #, c-format msgid "LDAPQuery: Querying %s" msgstr "LdapQuery: Запрашивается %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:474 #, c-format msgid " base dn: %s" msgstr "базовое ОИ (DN): %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:476 #, c-format msgid " filter: %s" msgstr "фильтр: %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:478 msgid " attributes:" msgstr "атрибуты:" #: src/hed/dmc/ldap/LDAPQuery.cpp:481 #: src/services/gridftpd/misc/ldapquery.cpp:399 #, c-format msgid " %s" msgstr " %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:526 src/hed/dmc/ldap/LDAPQuery.cpp:598 #, c-format msgid "%s (%s)" msgstr "%s (%s)" #: src/hed/dmc/ldap/LDAPQuery.cpp:550 #, c-format msgid "LDAPQuery: Getting results from %s" msgstr "LDAPQuery: Получение результатов с %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:553 #, c-format msgid "Error: no LDAP query started to %s" msgstr "Ошибка: не послан запрос LDAP к %s" #: src/hed/dmc/ldap/LDAPQuery.cpp:593 #, c-format msgid "LDAP query timed out: %s" msgstr "Истекло время ожидания ответа на запрос LDAP: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:23 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "Заменяется существующий маркер для %s в кэше маркеров Rucio" #: src/hed/dmc/rucio/DataPointRucio.cpp:36 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" "Обнаружен существующий маркер для %s в кэше маркеров Rucio, истекающий %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:39 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "Срок действия маркера Rucio для %s истёк, или вскоре истечёт" #: src/hed/dmc/rucio/DataPointRucio.cpp:101 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" "Выделен псевдоним %s для параметров доступа, используемых в RUCIO_ACCOUNT" #: src/hed/dmc/rucio/DataPointRucio.cpp:104 msgid "Failed to extract VOMS nickname from proxy" msgstr "Сбой извлечения псевдонима VOMS из сертификата доверенности" #: src/hed/dmc/rucio/DataPointRucio.cpp:106 #, c-format msgid "Using Rucio account %s" msgstr "Используется учётная запись Rucio %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:146 #, c-format msgid "" "Bad path for %s: Rucio supports read/write at /objectstores and read-only " "at /replicas" msgstr "" "Неверный путь к %s: Rucio поддерживает запись/чтение в /objectstores и лишь " "чтение в /replicas" #: src/hed/dmc/rucio/DataPointRucio.cpp:162 #, c-format msgid "Can't handle URL %s" msgstr "Невозможно обработать URL %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:303 #, c-format msgid "Acquired auth token for %s: %s" msgstr "Получен маркер доступа для %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:357 #, c-format msgid "Rucio returned %s" msgstr "Rucio возвратил %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:382 #, c-format msgid "Failed to parse Rucio response: %s" msgstr "Не удалось разобрать отзыв Rucio: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:388 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "В отзыве Rucio отсутствует имя файла: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:394 #, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "Отзыв Rucio содержит недопустимое имя: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:400 #, c-format msgid "No RSE information returned in Rucio response: %s" msgstr "В отзыве Rucio отсутствует информация RSE: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:423 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "В отзыве Rucio для %s отсутствует информация о размере файла" #: src/hed/dmc/rucio/DataPointRucio.cpp:426 #, c-format msgid "%s: size %llu" msgstr "%s: размер %llu" #: src/hed/dmc/rucio/DataPointRucio.cpp:430 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "В отзыве Rucio для %s отсутствует информация о контрольной сумме" #: src/hed/dmc/rucio/DataPointRucio.cpp:433 #, c-format msgid "%s: checksum %s" msgstr "%s: контрольная сумма %s" #: src/hed/dmc/s3/DataPointS3.cpp:648 #, c-format msgid "Failed to write object %s: %s" msgstr "Сбой записи объекта %s: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:67 #, c-format msgid "Check: looking for metadata: %s" msgstr "Проверка: поиск метаданных: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:79 #, c-format msgid "Check: obtained size: %lli" msgstr "Проверка: получен размер: %lli" #: src/hed/dmc/srm/DataPointSRM.cpp:85 #, c-format msgid "Check: obtained checksum: %s" msgstr "Проверка: получена контрольная сумма: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:89 #, c-format msgid "Check: obtained modification date: %s" msgstr "Проверка: получено время изменения: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:93 msgid "Check: obtained access latency: low (ONLINE)" msgstr "Проверка: получена задержка доступа: короткая (ONLINE)" #: src/hed/dmc/srm/DataPointSRM.cpp:97 msgid "Check: obtained access latency: high (NEARLINE)" msgstr "Проверка: получена задержка доступа: длинная (NEARLINE)" #: src/hed/dmc/srm/DataPointSRM.cpp:119 #, c-format msgid "Remove: deleting: %s" msgstr "Remove: удаляется: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:139 #, c-format msgid "Creating directory: %s" msgstr "Создается директория %s" #: src/hed/dmc/srm/DataPointSRM.cpp:190 src/hed/dmc/srm/DataPointSRM.cpp:243 msgid "Calling PrepareReading when request was already prepared!" msgstr "Вызов PrepareReading когда запрос был уже подготовлен!" #: src/hed/dmc/srm/DataPointSRM.cpp:212 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" "Файл %s в состоянии NEARLINE, будет сделан запрос о размещении на диске" #: src/hed/dmc/srm/DataPointSRM.cpp:222 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "Запрос %s на размещение на диске всё ещё в очереди, следует подождать" #: src/hed/dmc/srm/DataPointSRM.cpp:227 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" "Запрос %s на размещение на диске успешно выполнен, файл теперь в состоянии " "ONLINE" #: src/hed/dmc/srm/DataPointSRM.cpp:234 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" "Неверная логика в %s - bringOnline завершился успешно, но запрос SRM не " "завершился успехом, либо ещё в процессе" #: src/hed/dmc/srm/DataPointSRM.cpp:263 src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "None of the requested transfer protocols are supported" msgstr "" "Не поддерживается ни один из запрошенных протоколов транспортного уровня" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "Запрос на получение %s всё ещё в очереди, следует подождать %i секунд" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:488 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "Проверяется URL выданный SRM: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:503 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "SRM не выдал пригодных для передачи URL: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" "Неверная логика в %s - getTURLs завершился успешно, но запрос SRM не " "завершился успехом, либо ещё в процессе" #: src/hed/dmc/srm/DataPointSRM.cpp:318 msgid "StartReading: File was not prepared properly" msgstr "StartReading: Файл не был подготовлен должным образом" #: src/hed/dmc/srm/DataPointSRM.cpp:332 src/hed/dmc/srm/DataPointSRM.cpp:534 #, c-format msgid "TURL %s cannot be handled" msgstr "TURL %s не может быть обработан" #: src/hed/dmc/srm/DataPointSRM.cpp:340 src/hed/dmc/srm/DataPointSRM.cpp:542 #, c-format msgid "Redirecting to new URL: %s" msgstr "Перенаправление к новому URL: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:404 msgid "Calling PrepareWriting when request was already prepared!" msgstr "Вызов PrepareWriting когда запрос был уже подготовлен!" #: src/hed/dmc/srm/DataPointSRM.cpp:435 msgid "No space token specified" msgstr "Не указан маркёр пространства памяти" #: src/hed/dmc/srm/DataPointSRM.cpp:441 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "" "Warning: Используется версия v1 протокола SRM, которая не поддерживает " "токены места" #: src/hed/dmc/srm/DataPointSRM.cpp:444 #, c-format msgid "Using space token description %s" msgstr "Используется описание маркёра пространства памяти %s" #: src/hed/dmc/srm/DataPointSRM.cpp:450 #, c-format msgid "Error looking up space tokens matching description %s" msgstr "" "Ошибка поиска маркёров пространства памяти, соответствующих описанию %s" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, c-format msgid "No space tokens found matching description %s" msgstr "Не найдены маркёры пространства памяти, соответствующие описанию %s" #: src/hed/dmc/srm/DataPointSRM.cpp:464 #, c-format msgid "Using space token %s" msgstr "Используется маркёр пространства памяти %s" #: src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "" "Запрос на размещение %s всё ещё в очереди, следует подождать %i секунд" #: src/hed/dmc/srm/DataPointSRM.cpp:510 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" "Неверная логика в %s - putTURLs завершился успешно, но запрос SRM не " "завершился успехом, либо ещё в процессе" #: src/hed/dmc/srm/DataPointSRM.cpp:520 msgid "StartWriting: File was not prepared properly" msgstr "StartWriting: Файл не был подготовлен должным образом" #: src/hed/dmc/srm/DataPointSRM.cpp:593 #, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "FinishWriting: поиск метаданных: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:610 #, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "FinishWriting: получена контрольная сумма: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:613 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" "Вычисленная/указанная контрольная сумма %s совпадает с контрольной суммой, " "заявленной точкой назначения SRM %s" #: src/hed/dmc/srm/DataPointSRM.cpp:616 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" "Несовпадение между вычисленной/указанной контрольной суммой %s и контрольной " "суммой, заявленной точкой назначения SRM %s" #: src/hed/dmc/srm/DataPointSRM.cpp:619 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" "Типы контрольной суммы в SRM (%s) и вычисленной/указанной контрольной суммы " "(%s) различаются, сравнение невозможно" #: src/hed/dmc/srm/DataPointSRM.cpp:620 src/hed/dmc/srm/DataPointSRM.cpp:621 msgid "No checksum information from server" msgstr "Сервер не выдал информацию о контрольной сумме" #: src/hed/dmc/srm/DataPointSRM.cpp:622 src/hed/dmc/srm/DataPointSRM.cpp:623 msgid "No checksum verification possible" msgstr "Невозможно подтвердить контрольную сумму" #: src/hed/dmc/srm/DataPointSRM.cpp:629 msgid "Failed to release completed request" msgstr "Сбой сброса завершившегося запроса" #: src/hed/dmc/srm/DataPointSRM.cpp:673 src/hed/dmc/srm/DataPointSRM.cpp:740 #, c-format msgid "ListFiles: looking for metadata: %s" msgstr "ListFiles: поиск метаданных: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:806 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" "не установлен подключаемый модуль для протокола транспортного уровня %s" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:55 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:94 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:146 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:185 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:225 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:263 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:307 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:369 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:442 msgid "SRM did not return any information" msgstr "SRM не возвратил никакой информации" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:320 #, c-format msgid "File could not be moved to Running state: %s" msgstr "Файл не может быть переведён в состояние Running: %s" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:376 msgid "SRM did not return any useful information" msgstr "SRM не возвратил никакой полезной информации" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:454 msgid "File could not be moved to Done state" msgstr "Файл не может быть переведён в состояние Done" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:92 msgid "Could not determine version of server" msgstr "Не удалось определить версию сервера" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:98 #, c-format msgid "Server SRM version: %s" msgstr "Версия сервера SRM: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:103 #, c-format msgid "Server implementation: %s" msgstr "Реализация сервера: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:140 #, c-format msgid "Adding space token %s" msgstr "Добавляется маркёр пространства памяти %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:167 msgid "No request tokens found" msgstr "Не найдены маркёры запроса" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:180 #, c-format msgid "Adding request token %s" msgstr "Добавляется маркёр запроса %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:241 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:646 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:832 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1389 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Запрос файла %s в очереди SRM. Ожидание %i секунд" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:279 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:331 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:702 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:768 #, c-format msgid "File is ready! TURL is %s" msgstr "Файл готов! TURL: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:363 #, c-format msgid "Setting userRequestDescription to %s" msgstr "Установка userRequestDescription в %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:418 #, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Запрос файла с ленты %s в очереди SRM. Ожидание %i секунд" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:461 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1164 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1198 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1232 msgid "No request token specified!" msgstr "Не указан маркёр запроса!" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:528 msgid "Request is reported as ABORTED, but all files are done" msgstr "Запрос прерван (ABORTED), но все файлы готовы" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:534 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "Запрос прерван (ABORTED), так как он был отменён" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:540 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "Запрос прерван (ABORTED). Причина: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:677 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:749 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "Путь %s недействителен, создаются недостающие директории" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:682 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:754 #, c-format msgid "Error creating required directories for %s" msgstr "Ошибка создания необходимых каталогов для %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:855 msgid "Too many files in one request - please try again with fewer files" msgstr "" "Слишком много файлов на один запрос - пожалуйста, попробуйте снова, с " "меньшим количеством файлов" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:903 msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" "Размер директории слишком велик для распечатки в одном запросе, придётся " "делать несколько запросов" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:940 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" "Ошибка при разборе отзыва с сервера - информация может быть частично неверной" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:946 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:635 #: src/services/a-rex/jura/ApelDestination.cpp:215 #: src/services/a-rex/jura/LutsDestination.cpp:192 #: src/services/gridftpd/misc/ldapquery.cpp:183 #: src/services/gridftpd/misc/ldapquery.cpp:186 #: src/services/gridftpd/misc/ldapquery.cpp:392 #: src/services/gridftpd/misc/ldapquery.cpp:623 #: src/services/gridftpd/misc/ldapquery.cpp:632 #, c-format msgid "%s: %s" msgstr "%s: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:979 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" "Размер директории превышает %i файлов, придётся делать несколько запросов" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1189 #, c-format msgid "Files associated with request token %s released successfully" msgstr "Файлы, ассоциированные с маркёром запроса %s, успешно разблокированы" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1223 #, c-format msgid "Files associated with request token %s put done successfully" msgstr "Файлы, ассоциированные с маркёром запроса %s, успешно отгружены" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1258 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "Файлы, ассоциированные с маркёром запроса %s, успешно прерваны" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1275 #, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "" "Не удалось найти информацию о типе %s, чтобы определить, стирается файл или " "каталог" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is file, calling srmRm" msgstr "Тип file, вызывается srmRm" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "Type is dir, calling srmRmDir" msgstr "Тип dir, вызывается srmRmDir" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1289 msgid "File type is not available, attempting file delete" msgstr "Тип файла недоступен, попытка стереть файл" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1292 msgid "File delete failed, attempting directory delete" msgstr "Сбой при удалении файла, попытка удаления каталога" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1317 #, c-format msgid "File %s removed successfully" msgstr "Успешно удалён файл %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1344 #, c-format msgid "Directory %s removed successfully" msgstr "Успешно удалён каталог %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1459 #, c-format msgid "Checking for existence of %s" msgstr "Проверка существования %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1462 #, c-format msgid "File already exists: %s" msgstr "Файл уже существует: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1499 #, c-format msgid "Error creating directory %s: %s" msgstr "Ошибка создания каталога %s: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "Попытка соединиться с %s по порту %i" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, c-format msgid "Storing port %i for %s" msgstr "Сохраняется порт %i для %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, c-format msgid "No port succeeded for %s" msgstr "Не найдено подходящего порта для %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" "URL %s не соответствует информации, хранящейся в SRM info; проверяется новая " "информация" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "Замена старой информации в SRM на новую для URL %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, c-format msgid "SOAP request: %s" msgstr "Запроса SOAP: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, c-format msgid "SOAP fault: %s" msgstr "Ошибка SOAP: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 msgid "Reconnecting" msgstr "Пересоединение" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, c-format msgid "SRM Client status: %s" msgstr "Состояние клиента SRM: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #: src/hed/identitymap/ArgusPDPClient.cpp:250 #, c-format msgid "SOAP response: %s" msgstr "Ответ SOAP: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:76 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:162 #, c-format msgid "Failed to acquire lock on file %s" msgstr "Сбой установки блокировки на файл %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:81 #, c-format msgid "Error reading info from file %s:%s" msgstr "Ошибка чтения информации из файла %s:%s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:95 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:187 #, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "Обнаружен неверный или устаревший формат в файле %s, строке %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:100 #, c-format msgid "Cannot convert string %s to int in line %s" msgstr "" "Невозможно преобразовать строку %s в целочисленное значение в строке %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:203 #, c-format msgid "Error writing srm info file %s" msgstr "Ошибка записи файла информации SRM %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:91 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "Чтение %u байтов из байта %llu" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:93 #, c-format msgid "Read %i bytes" msgstr "Прочитано %i байт" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:124 #, c-format msgid "Could not open file %s for reading: %s" msgstr "Невозможно открыть файл %s для чтения: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:139 #, c-format msgid "Unable to find file size of %s" msgstr "Не удалось определить размер файла %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:203 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" "DataPointXrootd::write_file получил адрес %d и сдвиг %d, проводится поиск" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:226 #, c-format msgid "xrootd write failed: %s" msgstr "Сбой при записи xrootd: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:235 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:309 #, c-format msgid "xrootd close failed: %s" msgstr "Сбой при закрытии xrootd: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:258 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "Сбой открытия %s, попытка создания родительских каталогов" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:271 #, c-format msgid "xrootd open failed: %s" msgstr "Сбой при открытии xrootd: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:285 #, c-format msgid "close failed: %s" msgstr "Сбой при закрытии: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:327 #, c-format msgid "Read access not allowed for %s: %s" msgstr "Закрыт доступ на чтение для %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:346 #, c-format msgid "Could not stat file %s: %s" msgstr "Не удалось определить состояние файла %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:381 #, c-format msgid "Failed to open directory %s: %s" msgstr "Не удалось открыть каталог %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:396 #, c-format msgid "Error while reading dir %s: %s" msgstr "Ошибка при чтении каталога %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:446 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:464 #, c-format msgid "Error creating required dirs: %s" msgstr "Ошибка создания требуемых каталогов: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:158 msgid "PDPD location is missing" msgstr "отсутствует расположение PDPD" #: src/hed/identitymap/ArgusPDPClient.cpp:161 #, c-format msgid "PDPD location: %s" msgstr "расположение PDPD: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:166 #: src/hed/identitymap/ArgusPEPClient.cpp:129 msgid "Conversion mode is set to SUBJECT" msgstr "Задан способ преобразования SUBJECT" #: src/hed/identitymap/ArgusPDPClient.cpp:169 #: src/hed/identitymap/ArgusPEPClient.cpp:132 msgid "Conversion mode is set to CREAM" msgstr "Задан способ преобразования CREAM" #: src/hed/identitymap/ArgusPDPClient.cpp:172 #: src/hed/identitymap/ArgusPEPClient.cpp:135 msgid "Conversion mode is set to EMI" msgstr "Задан способ преобразования EMI" #: src/hed/identitymap/ArgusPDPClient.cpp:175 #: src/hed/identitymap/ArgusPEPClient.cpp:138 #, c-format msgid "Unknown conversion mode %s, using default" msgstr "" "Неизвестный способ преобразования %s, используется значение по умолчанию" #: src/hed/identitymap/ArgusPDPClient.cpp:242 #, c-format msgid "Failed to contact PDP server: %s" msgstr "Сбой соединения с сервером PDP: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:245 #, c-format msgid "There was no SOAP response return from PDP server: %s" msgstr "Сервер PDP не возвратил ответ SOAP: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:360 #: src/hed/identitymap/ArgusPEPClient.cpp:286 #, c-format msgid "Have %i requests to process" msgstr "%i запросов для обработки" #: src/hed/identitymap/ArgusPDPClient.cpp:362 msgid "Creating a client to Argus PDP service" msgstr "Создаётся клиент для службы Argus PDP" #: src/hed/identitymap/ArgusPDPClient.cpp:375 #, c-format msgid "XACML authorisation request: %s" msgstr "Запрос авторизации GACL: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:386 #, c-format msgid "XACML authorisation response: %s" msgstr "Отклик допуска XACML: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:426 #: src/hed/identitymap/ArgusPEPClient.cpp:333 #, c-format msgid "%s is not authorized to do action %s in resource %s " msgstr "%s не допущен к исполнению действия %s на ресурсе %s " #: src/hed/identitymap/ArgusPDPClient.cpp:429 #: src/hed/identitymap/ArgusPDPClient.cpp:434 #: src/hed/identitymap/ArgusPEPClient.cpp:336 msgid "Not authorized" msgstr "Нет допуска" #: src/hed/identitymap/ArgusPDPClient.cpp:439 #: src/hed/identitymap/ArgusPEPClient.cpp:341 #: src/hed/identitymap/IdentityMap.cpp:219 #: src/hed/shc/legacy/LegacyMap.cpp:215 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "" "Опознавательные признаки Грид поставлены в соответствие местной учётной " "записи '%s'" #: src/hed/identitymap/ArgusPDPClient.cpp:566 #: src/hed/identitymap/ArgusPEPClient.cpp:655 msgid "Doing CREAM request" msgstr "Производится запрос CREAM" #: src/hed/identitymap/ArgusPDPClient.cpp:580 #: src/hed/identitymap/ArgusPDPClient.cpp:748 #: src/hed/identitymap/ArgusPEPClient.cpp:683 #, c-format msgid "Adding profile-id value: %s" msgstr "Добавляется значение profile-id: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:592 #: src/hed/identitymap/ArgusPDPClient.cpp:759 #: src/hed/identitymap/ArgusPEPClient.cpp:694 #, c-format msgid "Adding subject-id value: %s" msgstr "Добавляется значение subject-id: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:600 #: src/hed/identitymap/ArgusPDPClient.cpp:767 #: src/hed/identitymap/ArgusPEPClient.cpp:704 #, c-format msgid "Adding subject-issuer value: %s" msgstr "Добавляется значение subject-issuer: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:607 #: src/hed/identitymap/ArgusPEPClient.cpp:713 #, c-format msgid "Adding virtual-organization value: %s" msgstr "Добавляется значение virtual-organization: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:620 #: src/hed/identitymap/ArgusPEPClient.cpp:730 #, c-format msgid "Adding FQAN value: %s" msgstr "Добавляется значение FQAN: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:629 #: src/hed/identitymap/ArgusPEPClient.cpp:739 #, c-format msgid "Adding FQAN/primary value: %s" msgstr "Добавляется FQAN/первичное значение: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:639 #: src/hed/identitymap/ArgusPEPClient.cpp:750 #, c-format msgid "Adding cert chain value: %s" msgstr "Добавляется значение цепочки сертификатов: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:648 #: src/hed/identitymap/ArgusPDPClient.cpp:840 #, c-format msgid "Adding resource-id value: %s" msgstr "Добавляется значение идентификатора ресурса: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:662 #: src/hed/identitymap/ArgusPDPClient.cpp:863 #: src/hed/identitymap/ArgusPEPClient.cpp:775 #, c-format msgid "Adding action-id value: %s" msgstr "Добавляется значение action-id: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:666 #: src/hed/identitymap/ArgusPEPClient.cpp:786 #, c-format msgid "CREAM request generation failed: %s" msgstr "Не удалось создать запрос CREAM: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:732 msgid "Doing EMI request" msgstr "Производится запрос EMI" #: src/hed/identitymap/ArgusPDPClient.cpp:774 #, c-format msgid "Adding Virtual Organization value: %s" msgstr "Добавляется значение виртуальной организации: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:797 #, c-format msgid "Adding VOMS group value: %s" msgstr "Добавляется значение группы VOMS: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:803 #, c-format msgid "Adding VOMS primary group value: %s" msgstr "Добавляется значение первичной группы VOMS: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:822 #, c-format msgid "Adding VOMS role value: %s" msgstr "Добавляется значение роли VOMS: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:829 #, c-format msgid "Adding VOMS primary role value: %s" msgstr "Добавляется значение первичной роли VOMS: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:846 #, c-format msgid "Adding resource-owner value: %s" msgstr "Добавляется значение владельца ресурса: %s" #: src/hed/identitymap/ArgusPDPClient.cpp:867 #, c-format msgid "EMI request generation failed: %s" msgstr "Не удалось создать запрос EMI: %s" #: src/hed/identitymap/ArgusPEPClient.cpp:119 msgid "PEPD location is missing" msgstr "отсутствует расположение PEPD" #: src/hed/identitymap/ArgusPEPClient.cpp:122 #, c-format msgid "PEPD location: %s" msgstr "расположение PEPD: %s" #: src/hed/identitymap/ArgusPEPClient.cpp:126 msgid "Conversion mode is set to DIRECT" msgstr "Задан способ преобразования DIRECT" #: src/hed/identitymap/ArgusPEPClient.cpp:331 #, c-format msgid "" "Not authorized according to request:\n" "%s" msgstr "" "Нет допуска согласно запросу:\n" "%s" #: src/hed/identitymap/ArgusPEPClient.cpp:361 msgid "Subject of request is null \n" msgstr "Отсутствует субъект запроса\n" #: src/hed/identitymap/ArgusPEPClient.cpp:366 #, c-format msgid "Can not create XACML SubjectAttribute: %s\n" msgstr "Не удалось создать атрибут XACML SubjectAttribute: %s\n" #: src/hed/identitymap/ArgusPEPClient.cpp:375 msgid "Can not create XACML Resource \n" msgstr "Не удалось создать XACML Resource \n" #: src/hed/identitymap/ArgusPEPClient.cpp:381 #, c-format msgid "Can not create XACML ResourceAttribute: %s\n" msgstr "Не удалось создать атрибут XACML ResourceAttribute: %s\n" #: src/hed/identitymap/ArgusPEPClient.cpp:390 msgid "Can not create XACML Action\n" msgstr "Не удалось создать XACML Action\n" #: src/hed/identitymap/ArgusPEPClient.cpp:397 #, c-format msgid "Can not create XACML ActionAttribute: %s\n" msgstr "Не удалось создать атрибут XACML ActionAttribute: %s\n" #: src/hed/identitymap/ArgusPEPClient.cpp:407 msgid "Can not create XACML request\n" msgstr "Не удалось создать запрос XACML\n" #: src/hed/identitymap/ArgusPEPClient.cpp:539 #, c-format msgid "Converting to CREAM action - namespace: %s, operation: %s" msgstr "Преобразование в действие CREAM - пространство имён: %s, операция: %s" #: src/hed/identitymap/ArgusPEPClient.cpp:760 #, c-format msgid "Adding resoure-id value: %s" msgstr "Добавляется значение resoure-id: %s" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "PDP: %s не может быть подгружен" #: src/hed/libs/common/ArcLocation.cpp:68 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" "Невозможно определить место установки. Используется %s. Если это не " "соответствует действительности, задайте, пожалуйста, переменную ARC_LOCATION" #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "Пустая строка" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "Невозможно определить дату: %s" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "Невозможно определить время: %s" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "Невозможно определить часовой пояс: %s" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "Недопустимый формат времени: %s" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "Невозможно определить месяц: %s" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "Неверный ISO-формат продолжительности: %s" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "Недопустимый интервал времени: %s" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "час" msgstr[1] "часа" msgstr[2] "часов" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "минута" msgstr[1] "минуты" msgstr[2] "минут" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "секунда" msgstr[1] "секунды" msgstr[2] "секунд" #: src/hed/libs/common/FileLock.cpp:48 msgid "Cannot determine hostname from gethostname()" msgstr "Невозможно извлечь имя узла используя gethostname()" #: src/hed/libs/common/FileLock.cpp:97 #, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "EACCES Ошибка открытия файла блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:102 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "" "Ошибка при открытии файла блокировки %s при предварительной проверке: %s" #: src/hed/libs/common/FileLock.cpp:109 #, c-format msgid "Error creating temporary file %s: %s" msgstr "Ошибка создания временного файла %s: %s" #: src/hed/libs/common/FileLock.cpp:118 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" "Невозможно создать ссылку на файл блокировки %s, потому что она уже " "существует" #: src/hed/libs/common/FileLock.cpp:129 #, c-format msgid "Could not create lock file %s as it already exists" msgstr "Невозможно создать файл блокировки %s, потому что он уже существует" #: src/hed/libs/common/FileLock.cpp:133 #, c-format msgid "Error creating lock file %s: %s" msgstr "Ошибка создания файла блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:138 #, c-format msgid "Error writing to lock file %s: %s" msgstr "Ошибка записи в файл блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:146 #, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "Не удалось связать временный файл %s с файлом блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:155 #, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" "Ошибка в файле блокировки %s, несмотря на то, что создание ссылки прошло без " "сбоев" #: src/hed/libs/common/FileLock.cpp:164 #, c-format msgid "%li seconds since lock file %s was created" msgstr "%li секунд(ы) с момента создания файла блокировки %s" #: src/hed/libs/common/FileLock.cpp:167 #, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "Время ожидания истекло, файл блокировки %s будет удалён" #: src/hed/libs/common/FileLock.cpp:171 #, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "Сбой удаления устаревшего файла блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:183 #, c-format msgid "This process already owns the lock on %s" msgstr "У этого процесса уже существует блокировка в %s" #: src/hed/libs/common/FileLock.cpp:189 #, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "" "Процесс, которому принадлежит блок в %s, больше не существует, блок будет " "удалён" #: src/hed/libs/common/FileLock.cpp:191 #, c-format msgid "Failed to remove file %s: %s" msgstr "Не удалось удалить файл %s: %s" #: src/hed/libs/common/FileLock.cpp:200 #, c-format msgid "The file %s is currently locked with a valid lock" msgstr "Файл %s в настоящий момент заблокирован действительным блоком" #: src/hed/libs/common/FileLock.cpp:215 #, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "Сбой разблокирования файла с блоком %s: %s" #: src/hed/libs/common/FileLock.cpp:227 #, c-format msgid "Lock file %s doesn't exist" msgstr "Файл блокировки %s не существует" #: src/hed/libs/common/FileLock.cpp:229 #, c-format msgid "Error listing lock file %s: %s" msgstr "Ошибка перечисления файла блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:235 #, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "" "Найден непредвиденный пустой файл блокировки %s. Необходимо вернуться в " "acquire()" #: src/hed/libs/common/FileLock.cpp:241 #, c-format msgid "Error reading lock file %s: %s" msgstr "Ошибка чтения файла блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:245 #, c-format msgid "Error with formatting in lock file %s" msgstr "Ошибка формата в файле блокировки %s" #: src/hed/libs/common/FileLock.cpp:255 #, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "Блок %s принадлежит другому процессу (%s)" #: src/hed/libs/common/FileLock.cpp:264 #, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "Неверно сформированный pid %s в файле блокировки %s" #: src/hed/libs/common/FileLock.cpp:267 #, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "Другой процесс (%s) обладает блоком файла %s" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "(пусто)" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "(нулевой)" #: src/hed/libs/common/Logger.cpp:60 msgid "Invalid log level. Using default " msgstr "Неверный уровень отладки. Используется уровень по умолчанию." #: src/hed/libs/common/Logger.cpp:125 msgid "Invalid old log level. Using default " msgstr "Неверный старый уровень отладки. Используется значение по умолчанию" #: src/hed/libs/common/OptionParser.cpp:107 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "Не удаётся разобрать целое значение '%s' для -%c" #: src/hed/libs/common/OptionParser.cpp:265 msgid "Use -? to get usage description" msgstr "Для получения справки используйте \"-?\"." #: src/hed/libs/common/OptionParser.cpp:342 msgid "Usage:" msgstr "Использование:" #: src/hed/libs/common/OptionParser.cpp:345 msgid "OPTION..." msgstr "ПАРАМЕТР..." #: src/hed/libs/common/OptionParser.cpp:351 msgid "Help Options:" msgstr "Параметры справки:" #: src/hed/libs/common/OptionParser.cpp:352 msgid "Show help options" msgstr "Показать параметры справки" #: src/hed/libs/common/OptionParser.cpp:354 msgid "Application Options:" msgstr "Параметры приложения:" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" "Элемент \"%s\" в профиле игнорируется: значение атрибута \"inisections\" не " "может быть пустой строкой." #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" "Элемент \"%s\" в профиле игнорируется: значение атрибута \"initag\" не может " "быть пустой строкой." #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" "Элемент \"%s\" в профиле игнорируется: значение атрибута \"initype\" не " "может быть пустой строкой." #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" "Элемент \"%s\" в профиле игнорируется: значение атрибута \"inidefaultvalue\" " "не может быть задано, когда не заданы значения атрибутов \"inisections\" и " "\"initag\"." #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" "В профиле настроек атрибут 'initype' элемента \"%s\" имеет собственное " "значение \"%s\"." #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "Преобразование не удалось: %s" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "Строка использована неполностью: %s" #: src/hed/libs/common/Thread.cpp:212 msgid "Maximum number of threads running - puting new request into queue" msgstr "" "Запущено максимальное количество потоков - новый запрос помещён в очередь" #: src/hed/libs/common/Thread.cpp:253 #, c-format msgid "Thread exited with Glib error: %s" msgstr "Поток завершился ошибкой Glib: %s" #: src/hed/libs/common/Thread.cpp:255 #, c-format msgid "Thread exited with Glib exception: %s" msgstr "Поток завершился прерыванием Glib: %s" #: src/hed/libs/common/Thread.cpp:257 #, c-format msgid "Thread exited with generic exception: %s" msgstr "Поток завершился общим прерыванием: %s" #: src/hed/libs/common/URL.cpp:121 #, c-format msgid "URL is not valid: %s" msgstr "Недействительный адрес: %s" #: src/hed/libs/common/URL.cpp:192 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "Недопустимый URL - путь должен быть абсолютным: %s" #: src/hed/libs/common/URL.cpp:197 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "Недопустимый адрес - не содержится имя узла: %s" #: src/hed/libs/common/URL.cpp:286 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "Недопустимый URL - путь должен быть абсолютным или пустым: %s" #: src/hed/libs/common/URL.cpp:302 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "" "Недопустимый URL - отсутствует закрывающая скобка ] для адреса IPv6: %s" #: src/hed/libs/common/URL.cpp:310 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "" "Недопустимый URL - за закрывающей скобкой ] для адреса IPv6 следует " "недопустимый маркёр: %s" #: src/hed/libs/common/URL.cpp:326 #, c-format msgid "Invalid port number in %s" msgstr "Недопустимый номер порта в %s" #: src/hed/libs/common/URL.cpp:425 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "Неизвестный контекст LDAP %s - используется base" #: src/hed/libs/common/URL.cpp:587 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" "Попытка интерпретации относительного путь как URL - заменяется на абсолютный" #: src/hed/libs/common/URL.cpp:686 #, c-format msgid "URL option %s does not have format name=value" msgstr "Опция URL %s не задана в формате имя=значение" #: src/hed/libs/common/URL.cpp:1151 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "urllist %s содержит недопустимый URL: %s" #: src/hed/libs/common/URL.cpp:1156 #, c-format msgid "URL protocol is not urllist: %s" msgstr "Протокол URL не является urllist: %s" #: src/hed/libs/common/UserConfig.cpp:30 #: src/hed/libs/common/UserConfig.cpp:681 #: src/hed/libs/common/UserConfig.cpp:690 #: src/hed/libs/common/UserConfig.cpp:696 #: src/hed/libs/common/UserConfig.cpp:718 #: src/hed/libs/common/UserConfig.cpp:728 #: src/hed/libs/common/UserConfig.cpp:740 #: src/hed/libs/common/UserConfig.cpp:760 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "Множественные атрибуты %s в файле настроек (%s)" #: src/hed/libs/common/UserConfig.cpp:82 #, c-format msgid "Wrong ownership of certificate file: %s" msgstr "Неправильная принадлежность файла открытого ключа: %s" #: src/hed/libs/common/UserConfig.cpp:84 #, c-format msgid "Wrong permissions of certificate file: %s" msgstr "Неправильные права доступа к файлу открытого ключа: %s" #: src/hed/libs/common/UserConfig.cpp:86 #, c-format msgid "Can not access certificate file: %s" msgstr "Нет доступа к файлу сертификата: %s" #: src/hed/libs/common/UserConfig.cpp:93 #, c-format msgid "Wrong ownership of key file: %s" msgstr "Неправильная принадлежность файла личного ключа: %s" #: src/hed/libs/common/UserConfig.cpp:95 #, c-format msgid "Wrong permissions of key file: %s" msgstr "Неправильные права доступа к файлу личного ключа: %s" #: src/hed/libs/common/UserConfig.cpp:97 #, c-format msgid "Can not access key file: %s" msgstr "Нет доступа к файлу личного ключа: %s" #: src/hed/libs/common/UserConfig.cpp:104 #, c-format msgid "Wrong ownership of proxy file: %s" msgstr "Неправильная принадлежность файла доверенности: %s" #: src/hed/libs/common/UserConfig.cpp:106 #, c-format msgid "Wrong permissions of proxy file: %s" msgstr "Неправильные права доступа к файлу доверенности: %s" #: src/hed/libs/common/UserConfig.cpp:108 #, c-format msgid "Can not access proxy file: %s" msgstr "Нет доступа к файлу доверенности: %s" #: src/hed/libs/common/UserConfig.cpp:119 msgid "computing" msgstr "computing" #: src/hed/libs/common/UserConfig.cpp:121 msgid "index" msgstr "index" #: src/hed/libs/common/UserConfig.cpp:165 #: src/hed/libs/common/UserConfig.cpp:171 #: src/hed/libs/common/UserConfig.cpp:223 #: src/hed/libs/common/UserConfig.cpp:229 #, c-format msgid "System configuration file (%s) contains errors." msgstr "Файл системных настроек (%s) содержит ошибки." #: src/hed/libs/common/UserConfig.cpp:176 #: src/hed/libs/common/UserConfig.cpp:234 #, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "Файл системных настроек (%s or %s) не существует." #: src/hed/libs/common/UserConfig.cpp:178 #: src/hed/libs/common/UserConfig.cpp:180 #: src/hed/libs/common/UserConfig.cpp:236 #: src/hed/libs/common/UserConfig.cpp:238 #, c-format msgid "System configuration file (%s) does not exist." msgstr "Файл системных настроек (%s) не существует." #: src/hed/libs/common/UserConfig.cpp:187 #: src/hed/libs/common/UserConfig.cpp:199 #: src/hed/libs/common/UserConfig.cpp:245 #: src/hed/libs/common/UserConfig.cpp:257 #, c-format msgid "User configuration file (%s) contains errors." msgstr "Файл настроек пользователя (%s) содержит ошибки." #: src/hed/libs/common/UserConfig.cpp:192 #: src/hed/libs/common/UserConfig.cpp:250 msgid "No configuration file could be loaded." msgstr "Файл настроек не может быть подгружен." #: src/hed/libs/common/UserConfig.cpp:195 #: src/hed/libs/common/UserConfig.cpp:253 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" "Файл настроек пользователя (%s) не существует или не может быть подгружен." #: src/hed/libs/common/UserConfig.cpp:310 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" "Невозможно сопоставить запрашиваемый уровень отладки (%s) ни с одним из " "допустимых" #: src/hed/libs/common/UserConfig.cpp:322 #, c-format msgid "" "Unsupported job list type '%s', using 'BDB'. Supported types are: BDB, " "SQLITE, XML." msgstr "" "Тип списка задач '%s' не поддерживается, будет использоваться 'BDB'. " "Поддерживаются следующие типы: BDB, SQLITE, XML." #: src/hed/libs/common/UserConfig.cpp:503 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" "Сертификат и ключ ('%s' и '%s') не обнаружены ни в одном из расположений: %s" #: src/hed/libs/common/UserConfig.cpp:505 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or '%" "s' attributes in the client configuration file (e.g. '%s')" msgstr "" "Если пара сертификат/ключ или файл сертификата доверенности существуют, Вы " "можете вручную указать их расположение с помощью переменных среды '%s'/'%s' " "или '%s', или с помощью атрибутов '%s'/'%s' или '%s' в файле настроек " "клиента (например, '%s')" #: src/hed/libs/common/UserConfig.cpp:522 #, c-format msgid "" "Can not access CA certificates directory: %s. The certificates will not be " "verified." msgstr "" "Не удалось открыть каталог сертификатов CA: %s. Сертификаты не будут " "проверены." #: src/hed/libs/common/UserConfig.cpp:532 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "Не удалось открыть каталог сертификатов CA: %s. Сертификаты ." #: src/hed/libs/common/UserConfig.cpp:558 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" "Каталог сертификатов СА не обнаружен ни в одном из стандартных мест:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "Сертификат не будет подтверждён.\n" "Если каталог сертификатов СА существует, пожалуйста, укажите вручную\n" "его расположения с помощью переменной X509_CERT_DIR, или задайте\n" "cacertificatesdirectory в файле настроек клиента client.conf\n" #: src/hed/libs/common/UserConfig.cpp:579 #, c-format msgid "Using proxy file: %s" msgstr "Используется файл доверенности: %s" #: src/hed/libs/common/UserConfig.cpp:582 #, c-format msgid "Using certificate file: %s" msgstr "Используется файл сертификата: %s" #: src/hed/libs/common/UserConfig.cpp:583 #, c-format msgid "Using key file: %s" msgstr "Используется файл личного ключа: %s" #: src/hed/libs/common/UserConfig.cpp:587 #, c-format msgid "Using CA certificate directory: %s" msgstr "Используется каталог доверенных сертификатов CA: %s" #: src/hed/libs/common/UserConfig.cpp:600 #: src/hed/libs/common/UserConfig.cpp:606 #, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "Невозможно открыть каталог или файл VOMSES: %s." #: src/hed/libs/common/UserConfig.cpp:612 #, c-format msgid "Can not access VOMS file/directory: %s." msgstr "Невозможно открыть каталог или файл VOMS: %s." #: src/hed/libs/common/UserConfig.cpp:631 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" "Конфигурация серверов VOMS не обнаружена ни в одном из стандартных " "расположений: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, " "$ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-" "security/vomses" #: src/hed/libs/common/UserConfig.cpp:644 #, c-format msgid "Loading configuration (%s)" msgstr "Чтение файла настроек (%s)" #: src/hed/libs/common/UserConfig.cpp:678 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "Значение атрибута timeout (%s) в файле настроек разобрано неполностью" #: src/hed/libs/common/UserConfig.cpp:703 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" "Атрибут brokerarguments может быть использован только в связи с атрибутом " "brokername" #: src/hed/libs/common/UserConfig.cpp:715 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "Значение атрибута keysize (%s) в файле настроек разобрано неполностью" #: src/hed/libs/common/UserConfig.cpp:735 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" "Не удалось преобразовать значение атрибута slcs (%s) в файле настроек в URL " "(%s)" #: src/hed/libs/common/UserConfig.cpp:781 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "Указанный файл с трафаретом (%s) не существует." #: src/hed/libs/common/UserConfig.cpp:785 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" "Игнорируется неизвестный атрибут %s в разделе common файла настроек (%s)" #: src/hed/libs/common/UserConfig.cpp:826 #, c-format msgid "Unknown section %s, ignoring it" msgstr "Игнорируется неизвестный раздел %s" #: src/hed/libs/common/UserConfig.cpp:830 #, c-format msgid "Configuration (%s) loaded" msgstr "Настройки (%s) подгружены" #: src/hed/libs/common/UserConfig.cpp:833 #, c-format msgid "Could not load configuration (%s)" msgstr "Не удалось подгрузить настройки (%s)" #: src/hed/libs/common/UserConfig.cpp:928 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "UserConfiguration сохранены в файле (%s)" #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "Unable to create %s directory." msgstr "Не удалось создать каталог %s." #: src/hed/libs/common/UserConfig.cpp:950 #, c-format msgid "Configuration example file created (%s)" msgstr "Создан шаблонный файл настроек (%s)" #: src/hed/libs/common/UserConfig.cpp:952 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "Не удалось скопировать шаблон настроек из существующих настроек (%s)" #: src/hed/libs/common/UserConfig.cpp:956 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "" "Не удалось скопировать шаблон настроек (%s), т.к. это нестандартный файл" #: src/hed/libs/common/UserConfig.cpp:961 #, c-format msgid "Example configuration (%s) not created." msgstr "Шаблон настроек (%s) не создан." #: src/hed/libs/common/UserConfig.cpp:966 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "Файл настроек по умолчанию (%s) не является обычным файлом" #: src/hed/libs/common/UserConfig.cpp:984 #, c-format msgid "%s directory created" msgstr "создан каталог %s" #: src/hed/libs/common/UserConfig.cpp:986 #: src/hed/libs/common/UserConfig.cpp:1025 src/hed/libs/data/DataMover.cpp:684 #, c-format msgid "Failed to create directory %s" msgstr "Не удалось создать каталог %s" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "Этого сообщения VERBOSE не должно быть видно" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "Это сообщение INFO должно быть видно" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "Это сообщение VERBOSE теперь должно быть видно" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "Это сообщение INFO тоже должно быть видно" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "Это сообщение выводится в изначальное назначение" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "Это сообщение направляется в каждый поток" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "Сбой запроса: нет ответа от службы SPService" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "Сбой запроса: неверный ответ от службы SPService" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "Адрес URL запроса подтверждения подлинности: %s" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "Сбой запроса: нет ответа от службы IdP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "Сбой запроса: нет ответа ос службы IdP при перенаправлении" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "Сбой запроса: неверный ответ от службы IdP при перенаправлении" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "Сбой запроса: нет ответа от службы IdP при проверке подлинности" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "Сбой запроса: неверный ответ от службы IdP при проверке подлинности" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:300 msgid "Succeeded to verify the signature under " msgstr "Подпись успешно подтверждена" #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Failed to verify the signature under " msgstr "Подпись не подтверждена" #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" "Сбой запроса: нет ответа от службы SP при отсылке утверждения SAML на SP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" "Сбой запроса: неприемлемый ответ от службы SP при отсылке утверждения SAML " "на SP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "Служба IdP выдала сообщение об ошибке: %s" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "Сбой процесса SAML2SSO" #: src/hed/libs/communication/ClientX509Delegation.cpp:54 msgid "Creating delegation credential to ARC delegation service" msgstr "Создание делегируемых параметров доступа для службы делегирования ARC" #: src/hed/libs/communication/ClientX509Delegation.cpp:64 #: src/hed/libs/communication/ClientX509Delegation.cpp:267 msgid "DelegateCredentialsInit failed" msgstr "Сбой в DelegateCredentialsInit" #: src/hed/libs/communication/ClientX509Delegation.cpp:68 #: src/hed/libs/communication/ClientX509Delegation.cpp:122 #: src/hed/libs/communication/ClientX509Delegation.cpp:157 #: src/hed/libs/communication/ClientX509Delegation.cpp:212 #: src/hed/libs/communication/ClientX509Delegation.cpp:271 msgid "There is no SOAP response" msgstr "Нет ответа SOAP" #: src/hed/libs/communication/ClientX509Delegation.cpp:73 msgid "There is no X509 request in the response" msgstr "В ответе отсутствует запрос X509" #: src/hed/libs/communication/ClientX509Delegation.cpp:78 msgid "There is no Format request in the response" msgstr "В ответе отсутствует запрос Format" #: src/hed/libs/communication/ClientX509Delegation.cpp:86 msgid "There is no Id or X509 request value in the response" msgstr "Ответ не содержит Id или значение запроса X509" #: src/hed/libs/communication/ClientX509Delegation.cpp:99 #: src/hed/libs/communication/ClientX509Delegation.cpp:187 msgid "DelegateProxy failed" msgstr "Сбой в DelegateProxy" #: src/hed/libs/communication/ClientX509Delegation.cpp:118 msgid "UpdateCredentials failed" msgstr "Сбой в UpdateCredentials" #: src/hed/libs/communication/ClientX509Delegation.cpp:126 msgid "There is no UpdateCredentialsResponse in response" msgstr "В ответе отсутствует UpdateCredentialsResponse" #: src/hed/libs/communication/ClientX509Delegation.cpp:134 #: src/hed/libs/communication/ClientX509Delegation.cpp:162 #: src/hed/libs/communication/ClientX509Delegation.cpp:217 #: src/hed/libs/communication/ClientX509Delegation.cpp:302 msgid "There is no SOAP connection chain configured" msgstr "Не настроена цепочка соединения SOAP" #: src/hed/libs/communication/ClientX509Delegation.cpp:140 msgid "Creating delegation to CREAM delegation service" msgstr "Создание делегирования для службы делегирования CREAM" #: src/hed/libs/communication/ClientX509Delegation.cpp:153 msgid "Delegation getProxyReq request failed" msgstr "Сбой запроса делегирования getProxyReq" #: src/hed/libs/communication/ClientX509Delegation.cpp:173 msgid "Creating delegation to CREAM delegation service failed" msgstr "Сбой создания делегирования для службы делегирования CREAM" #: src/hed/libs/communication/ClientX509Delegation.cpp:208 msgid "Delegation putProxy request failed" msgstr "Сбой запроса делегирования putProxy" #: src/hed/libs/communication/ClientX509Delegation.cpp:222 msgid "Creating delegation to CREAM delegation failed" msgstr "Сбой создания делегирования для делегирования CREAM" #: src/hed/libs/communication/ClientX509Delegation.cpp:237 msgid "Getting delegation credential from ARC delegation service" msgstr "" "Получение делегированных параметров доступа от службы делегирования ARC" #: src/hed/libs/communication/ClientX509Delegation.cpp:276 msgid "There is no Delegated X509 token in the response" msgstr "Ответ не содержит делегированный токен X509" #: src/hed/libs/communication/ClientX509Delegation.cpp:281 msgid "There is no Format delegated token in the response" msgstr "Ответ не содержит делегированный токен в нужном формате" #: src/hed/libs/communication/ClientX509Delegation.cpp:289 msgid "There is no Id or X509 token value in the response" msgstr "Ответ не содержит Id или значение маркёра X509" #: src/hed/libs/communication/ClientX509Delegation.cpp:298 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" "Получение делегированных параметров доступа от службы делегирования: \n" " %s" #: src/hed/libs/compute/Broker.cpp:62 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "Производится сравнение с назначением (%s)." #: src/hed/libs/compute/Broker.cpp:72 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "Сравнение; ExecutionTarget: %s соответствует описанию задачи" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" "Агентство (%s), выдавшее сертификат (%s), не относится к доверяемым целью (%" "s)." #: src/hed/libs/compute/Broker.cpp:153 src/hed/libs/compute/Broker.cpp:171 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "Не определён параметр ComputingShareName атрибута ExecutionTarget (%s)" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "Цель ComputingShare (%s) явно отклонена" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "ComputingShare (%s) does not match selected queue (%s)" msgstr "Цель ComputingShare (%s) не соответствует выбранной очереди (%s)" #: src/hed/libs/compute/Broker.cpp:189 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" "Время начала счёта (%s), указанное в описании задачи, приходится на период " "недоступности цели [ %s - %s ]." #: src/hed/libs/compute/Broker.cpp:194 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "Период недоступности цели (%s) не объявлен. Цель сохраняется." #: src/hed/libs/compute/Broker.cpp:200 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK (%s)" msgstr "" "Состояние здоровья назначения для исполнения (%s) неудовлетворительное (%s)" #: src/hed/libs/compute/Broker.cpp:205 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" "Сравнение; назначение для исполнения: %s, состояние здоровья не определено" #: src/hed/libs/compute/Broker.cpp:212 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %" "s" msgstr "" "Сравнение; не удовлетворено требование к вычислительному ресурсу. Назначение " "для исполнения: %s" #: src/hed/libs/compute/Broker.cpp:217 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" "Сравнение; назначение для исполнения: %s, значение ImplementationName не " "определено" #: src/hed/libs/compute/Broker.cpp:243 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" "Сравнение; %s (%d) не соответствует (%s) значению %s (%d), публикуемому " "назначением для исполнения." #: src/hed/libs/compute/Broker.cpp:272 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" "Сравнение; приведённое к значению %s значение %s (%d) не соответствует (%s) " "значению %s (%d) публикуемому назначением для исполнения." #: src/hed/libs/compute/Broker.cpp:284 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" "Сравнение; значение эталонного теста %s не публикуется назначением для " "исполнения." #: src/hed/libs/compute/Broker.cpp:299 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" "Сравнение; проблема с MaxTotalCPUTime, ExecutionTarget: %d " "(MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)" #: src/hed/libs/compute/Broker.cpp:306 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" "Сравнение; проблема с MaxCPUTime, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" #: src/hed/libs/compute/Broker.cpp:311 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" "Сравнение; ExecutionTarget: %s, не задано MaxTotalCPUTime или MaxCPUTime, " "предполагается отсутствие ограничений на процессорное время" #: src/hed/libs/compute/Broker.cpp:317 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" "Сравнение; проблема с MinCPUTime, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" #: src/hed/libs/compute/Broker.cpp:322 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" "Сравнение; ExecutionTarget: %s, не задано MinCPUTime, предполагается " "отсутствие ограничений на процессорное время" #: src/hed/libs/compute/Broker.cpp:330 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" "Сравнение; несовпадение MainMemorySize: у назначения для исполнения: %d " "(MainMemorySize), в описании задачи: %d (IndividualPhysicalMemory)" #: src/hed/libs/compute/Broker.cpp:336 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" "Сравнение; несовпадение MaxMainMemory: у назначения для исполнения: %d " "(MaxMainMemory), в описании задачи: %d (IndividualPhysicalMemory)" #: src/hed/libs/compute/Broker.cpp:341 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" "Сравнение; назначение для исполнения: %s, значения MaxMainMemory и " "MainMemorySize не определены" #: src/hed/libs/compute/Broker.cpp:349 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" "Сравнение; несовпадение MaxVirtualMemory: у назначения для исполнения: %d " "(MaxVirtualMemory), в описании задачи: %d (IndividualVirtualMemory)" #: src/hed/libs/compute/Broker.cpp:354 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" "Сравнение; назначение для исполнения: %s, значение MaxVirtualMemory не " "определено" #: src/hed/libs/compute/Broker.cpp:362 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" "Сравнение; несовпадение платформ: ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" #: src/hed/libs/compute/Broker.cpp:367 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" "Сравнение; назначение для исполнения: %s, значение Platform не определено" #: src/hed/libs/compute/Broker.cpp:375 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" "Сравнение; не удовлетворены требования OperatingSystem к ExecutionTarget: %s" #: src/hed/libs/compute/Broker.cpp:380 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" "Сравнение; назначение для исполнения: %s, значение OperatingSystem не " "определено" #: src/hed/libs/compute/Broker.cpp:388 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" "Сравнение; не удовлетворены требования RunTimeEnvironment к ExecutionTarget: " "%s" #: src/hed/libs/compute/Broker.cpp:393 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" "Сравнение; назначение для исполнения: %s, значение ApplicationEnvironments " "не определено" #: src/hed/libs/compute/Broker.cpp:402 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" "Сравнение; не удовлетворено требование NetworkInfo, назначение для " "исполнения не поддерживает %s, указанное в описании задачи." #: src/hed/libs/compute/Broker.cpp:406 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" "Сравнение; назначение для исполнения: %s, значение NetworkInfo не определено" #: src/hed/libs/compute/Broker.cpp:414 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" "Сравнение; несовпадение MaxDiskSpace: у назначения для исполнения: %d MB " "(MaxDiskSpace), в описании задачи: %d MB (SessionDiskSpace)" #: src/hed/libs/compute/Broker.cpp:421 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" "Сравнение; несовпадение WorkingAreaFree: у назначения для исполнения: %d MB " "(WorkingAreaFree), в описании задачи: %d MB (SessionDiskSpace)" #: src/hed/libs/compute/Broker.cpp:427 src/hed/libs/compute/Broker.cpp:448 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" "Сравнение; назначение для исполнения: %s, значения MaxDiskSpace и " "WorkingAreaFree не определено" #: src/hed/libs/compute/Broker.cpp:435 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" "Сравнение; несовпадение MaxDiskSpace: у назначения для исполнения: %d MB " "(MaxDiskSpace), в описании задачи: %d MB (DiskSpace)" #: src/hed/libs/compute/Broker.cpp:442 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" "Сравнение; несовпадение WorkingAreaFree: у назначения для исполнения: %d MB " "(WorkingAreaFree), в описании задачи: %d MB (DiskSpace)" #: src/hed/libs/compute/Broker.cpp:456 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" "Сравнение; несовпадение CacheTotal: у назначения для исполнения: %d MB " "(CacheTotal), в описании задачи: %d MB (CacheDiskSpace)" #: src/hed/libs/compute/Broker.cpp:461 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" "Сравнение; назначение для исполнения: %s, значение CacheTotal не определено" #: src/hed/libs/compute/Broker.cpp:469 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" "Сравнение; несовпадение TotalSlots: у назначения для исполнения: %d " "(TotalSlots), в описании задачи: %d (NumberOfProcesses)" #: src/hed/libs/compute/Broker.cpp:475 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" "Сравнение; несовпадение MaxSlotsPerJob: у назначения для исполнения: %d " "(MaxSlotsPerJob), в описании задачи: %d (NumberOfProcesses)" #: src/hed/libs/compute/Broker.cpp:481 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" "Сравнение; назначение для исполнения: %s, значения TotalSlots и " "MaxSlotsPerJob не определены" #: src/hed/libs/compute/Broker.cpp:489 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" "Сравнение; несовпадение WorkingAreaLifeTime: у назначения для исполнения: %s " "(WorkingAreaLifeTime), в описании задачи: %s (SessionLifeTime)" #: src/hed/libs/compute/Broker.cpp:494 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" "Сравнение; назначение для исполнения: %s, значение WorkingAreaLifeTime не " "определено" #: src/hed/libs/compute/Broker.cpp:502 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" "Сравнение; несовпадение ConnectivityIn: у назначения для исполнения: %s " "(ConnectivityIn), в описании задачи: %s (InBound)" #: src/hed/libs/compute/Broker.cpp:509 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" "Сравнение; несовпадение ConnectivityOut: у назначения для исполнения: %s " "(ConnectivityOut), в описании задачи: %s (OutBound)" #: src/hed/libs/compute/Broker.cpp:532 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" "Невозможно упорядочить добавленные задачи. Подключаемый модуль BrokerPlugin " "не был подгружен." #: src/hed/libs/compute/Broker.cpp:549 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" "Ресурс не соответствует заданию, помечается как несоответствующий. " "Планировщик недействителен." #: src/hed/libs/compute/Broker.cpp:585 msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "" "Невозможно упорядочить объекты ExecutionTarget - недопустимый объект Broker." #: src/hed/libs/compute/Broker.cpp:609 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" "Невозможно зарегистрировать засылку задачи. Невозможно получить объект " "JobDescription из планировщика, планировщик недействителен." #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "Подключаемый модуль брокера \"%s\" не обнаружен" #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "Невозможно загрузить модуль BrokerPlugin (%s)" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:26 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" "Uniq заменяет сервис, обнаруженный через %s, на сервис, обнаруженный через %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:30 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "Uniq игнорирует сервис, обнаруженный через %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:37 #, c-format msgid "Uniq is adding service coming from %s" msgstr "Uniq добавляет сервис, обнаруженный через %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:60 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "Добавление точки входа (%s) в TargetInformationRetriever" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:63 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "Добавление точки входа (%s) в ServiceEndpointRetriever" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:66 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" "Точка входа (%s) добавляется как к ServiceEndpointRetriever, так и к " "TargetInformationRetriever" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" "Подключаемый модуль %s не поддерживает никаких интерфейсов, пропускается." #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" "Первый поддерживаемый интерфейс подключаемого модуля %s оказался пустой " "строкой, модуль пропускается." #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "Интерфейс точки входа (%s) %s." #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" "Игнорируется точка входа (%s), т.к. она уже зарегистрирована в загрузчике." #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "Цикл по сервисам: точка входа %s" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr " Состояние точки входа (%s) - STARTED или SUCCESSFUL" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" "Приостанавливается опрос точки входа (%s), т.к. сервис по этому адресу уже " "опрашивается или опрошен." #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr " Состояние точки входа (%s): %s" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "Задаётся состояние (STARTED) для точки входа: %s" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "Запускается поток для опроса точки доступа %s" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, c-format msgid "Failed to start querying the endpoint on %s" msgstr "Не удалось начать опрос точки входа на %s" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "Найден реестр, который будет опрошен рекурсивно: %s" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "Присваивается состояние (%s) точки входа: %s" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "Проверка отложенных точек входа на предмет повторного опроса" #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "Найдена точка входа в состоянии STARTED или SUCCESSFUL (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "Обнаружена временно исключённая точка входа (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "Попытка активации временно исключённой точки входа (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" "Начинается опрос отложенной точки входа (%s) - другие точки входа этого " "сервиса не опрашиваются, либо были уже успешно опрошены." #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "Вызывается подключаемый модуль %s для опроса точки входа на %s" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" "Интерфейс этой точки доступа (%s) не задан, пробуются все возможные " "подключаемые модули" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "Проблемы при подключении модуля %s, модуль пропускается." #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "Точка входа (%s) не поддерживается этим подключаемым модулем (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" "Создана новая точка доступа (%s) из точки с неизвестным интерфейсом (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "Запускается подпоток для опроса точки доступа по %s" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "Сбой начала опроса точки доступа по %s (не удалось создать подпоток)" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "Найден подключаемый модуль %s %s (уже подгружен)" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for %s plugin is " "installed" msgstr "" "Не удалось обнаружить подключаемый модуль \"%s\". Пожалуйста, " "проконсультируйтесь с инструкцией по установке и проверьте, установлен ли " "пакет, содержащий модуль \"%s\"" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, c-format msgid "%s plugin \"%s\" not found." msgstr "Не найден подключаемый модуль %s \"%s\"." #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:100 #: src/hed/libs/compute/JobControllerPlugin.cpp:109 #: src/hed/libs/compute/SubmitterPlugin.cpp:158 #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" "Не удалось обнаружить подключаемый модуль \"%s\". Пожалуйста, " "проконсультируйтесь с инструкцией по установке и проверьте, установлен ли " "пакет, содержащий модуль \"%s\"" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, c-format msgid "%s %s could not be created." msgstr "%s %s не может быть создан." #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, c-format msgid "Loaded %s %s" msgstr "Загружен %s %s" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" "Пропускается ComputingEndpoint '%s', потому что объявлен интерфейс '%s' " "вместо запрошенного '%s'." #: src/hed/libs/compute/ExecutionTarget.cpp:237 #, c-format msgid "Address: %s" msgstr "Адрес: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:238 #, c-format msgid "Place: %s" msgstr "Место: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, c-format msgid "Country: %s" msgstr "Страна: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, c-format msgid "Postal code: %s" msgstr "Почтовый индекс: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, c-format msgid "Latitude: %f" msgstr "Широта: %f" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, c-format msgid "Longitude: %f" msgstr "Долгота: %f" #: src/hed/libs/compute/ExecutionTarget.cpp:248 #, c-format msgid "Owner: %s" msgstr "Владелец: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:255 #, c-format msgid "ID: %s" msgstr "ID: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:256 #, c-format msgid "Type: %s" msgstr "Тип: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:261 #, c-format msgid "URL: %s" msgstr "URL: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:262 #, c-format msgid "Interface: %s" msgstr "Интерфейс %s" #: src/hed/libs/compute/ExecutionTarget.cpp:264 msgid "Interface versions:" msgstr "Версии интерфейса:" #: src/hed/libs/compute/ExecutionTarget.cpp:269 msgid "Interface extensions:" msgstr "Расширения интерфейса:" #: src/hed/libs/compute/ExecutionTarget.cpp:274 msgid "Capabilities:" msgstr "Возможности:" #: src/hed/libs/compute/ExecutionTarget.cpp:278 #, c-format msgid "Technology: %s" msgstr "Технология: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:280 msgid "Supported Profiles:" msgstr "Поддерживаемые профили:" #: src/hed/libs/compute/ExecutionTarget.cpp:284 #, c-format msgid "Implementor: %s" msgstr "Внедритель: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:285 #, c-format msgid "Implementation name: %s" msgstr "Имя реализации: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, c-format msgid "Quality level: %s" msgstr "Уровень качества: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, c-format msgid "Health state: %s" msgstr "Состояние здоровья: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, c-format msgid "Health state info: %s" msgstr "Информация о состоянии здоровья: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, c-format msgid "Serving state: %s" msgstr "Состояние обслуживания: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, c-format msgid "Issuer CA: %s" msgstr "Сертификат выдан CA: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:292 msgid "Trusted CAs:" msgstr "Доверенные центры сертификации:" #: src/hed/libs/compute/ExecutionTarget.cpp:296 #, c-format msgid "Downtime starts: %s" msgstr "Начало простоя: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:297 #, c-format msgid "Downtime ends: %s" msgstr "Конец простоя: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, c-format msgid "Staging: %s" msgstr "Размещается: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:300 msgid "Job descriptions:" msgstr "Описания задач:" #: src/hed/libs/compute/ExecutionTarget.cpp:312 #, c-format msgid "Scheme: %s" msgstr "Схема: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:315 #, c-format msgid "Rule: %s" msgstr "Правило: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:327 #, c-format msgid "Mapping queue: %s" msgstr "Назначается очередь: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Max wall-time: %s" msgstr "Длительность, наибольшая (по часам): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, c-format msgid "Max total wall-time: %s" msgstr "Предел общего времени (по часам): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Min wall-time: %s" msgstr "Длительность, наименьшая (по часам): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, c-format msgid "Default wall-time: %s" msgstr "Длительность по умолчанию (по часам): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Max CPU time: %s" msgstr "Длительность, наибольшая (процессорная): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, c-format msgid "Min CPU time: %s" msgstr "Длительность, наименьшая (процессорная): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, c-format msgid "Default CPU time: %s" msgstr "Длительность по умолчанию (процессорная): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Max total jobs: %i" msgstr "Всего заданий (предел): %i" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, c-format msgid "Max running jobs: %i" msgstr "Задачи в счёте (предел): %i" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, c-format msgid "Max waiting jobs: %i" msgstr "Предел задач в очереди: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "Предел задач в очереди до СУПО: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, c-format msgid "Max user running jobs: %i" msgstr "Задачи пользователя в счёте (предел): %i" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max slots per job: %i" msgstr "Предел сегментов на задачу: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max stage in streams: %i" msgstr "Предел потоков размещения: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max stage out streams: %i" msgstr "Потоки отгрузки (верхний предел): %i" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, c-format msgid "Scheduling policy: %s" msgstr "Правила планировки: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max memory: %i" msgstr "Макс. память: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, c-format msgid "Max virtual memory: %i" msgstr "Предел виртуальной памяти: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max disk space: %i" msgstr "Предел дискового пространства: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, c-format msgid "Default Storage Service: %s" msgstr "Хранилище по умолчанию: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:348 msgid "Supports preemption" msgstr "Поддержка упреждения" #: src/hed/libs/compute/ExecutionTarget.cpp:349 msgid "Doesn't support preemption" msgstr "Упреждение не поддерживается" #: src/hed/libs/compute/ExecutionTarget.cpp:350 #, c-format msgid "Total jobs: %i" msgstr "Всего задач: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:351 #, c-format msgid "Running jobs: %i" msgstr "Задачи в счёте: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, c-format msgid "Local running jobs: %i" msgstr "Внутренние задачи в счёте: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, c-format msgid "Waiting jobs: %i" msgstr "Задачи в очереди: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, c-format msgid "Local waiting jobs: %i" msgstr "Внутренние задачи в очереди: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, c-format msgid "Suspended jobs: %i" msgstr "Приостановленные задачи: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, c-format msgid "Local suspended jobs: %i" msgstr "Внутренние приостановленные задачи: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, c-format msgid "Staging jobs: %i" msgstr "Задачи, выполняющие размещение данных: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "Задачи в очереди до СУПО: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, c-format msgid "Estimated average waiting time: %s" msgstr "Оценка усреднённого времени ожидания: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, c-format msgid "Estimated worst waiting time: %s" msgstr "Оценка худшего времени ожидания: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, c-format msgid "Free slots: %i" msgstr "Свободные ядра: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:363 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" "Доступные места сгруппированы по предельному времени (предел: доступные " "места):" #: src/hed/libs/compute/ExecutionTarget.cpp:366 #, c-format msgid " %s: %i" msgstr " %s: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:367 #, c-format msgid " unspecified: %i" msgstr " непределённых: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:370 #, c-format msgid "Used slots: %i" msgstr "Использованные ядра: %d" #: src/hed/libs/compute/ExecutionTarget.cpp:371 #, c-format msgid "Requested slots: %i" msgstr "Запрошено сегментов ядер: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, c-format msgid "Reservation policy: %s" msgstr "Политика бронирования: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:379 #, c-format msgid "Resource manager: %s" msgstr "Система управления: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:380 #, c-format msgid " (%s)" msgstr " (%s)" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, c-format msgid "Total physical CPUs: %i" msgstr "Общее количество физических процессоров: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:384 #, c-format msgid "Total logical CPUs: %i" msgstr "Общее количество логических процессоров: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:385 #, c-format msgid "Total slots: %i" msgstr "Общее количество ядер: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:386 msgid "Supports advance reservations" msgstr "Поддержка предварительного бронирования" #: src/hed/libs/compute/ExecutionTarget.cpp:387 msgid "Doesn't support advance reservations" msgstr "Нет поддержки предварительного бронирования" #: src/hed/libs/compute/ExecutionTarget.cpp:388 msgid "Supports bulk submission" msgstr "Поддерживает групповую засылку" #: src/hed/libs/compute/ExecutionTarget.cpp:389 msgid "Doesn't support bulk Submission" msgstr "Не поддерживает групповую засылку" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Homogeneous resource" msgstr "Однородный ресурс" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Non-homogeneous resource" msgstr "Неоднородный ресурс" #: src/hed/libs/compute/ExecutionTarget.cpp:393 msgid "Network information:" msgstr "Информация о сети:" #: src/hed/libs/compute/ExecutionTarget.cpp:398 msgid "Working area is shared among jobs" msgstr "Рабочее пространство используется разными задачами" #: src/hed/libs/compute/ExecutionTarget.cpp:399 msgid "Working area is not shared among jobs" msgstr "Рабочее пространство используется одной задачей" #: src/hed/libs/compute/ExecutionTarget.cpp:400 #, c-format msgid "Working area total size: %i GB" msgstr "Общий объём рабочего пространства: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:401 #, c-format msgid "Working area free size: %i GB" msgstr "Свободное рабочее пространство: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:402 #, c-format msgid "Working area life time: %s" msgstr "Время жизни рабочего пространства: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:403 #, c-format msgid "Cache area total size: %i GB" msgstr "Общий объём пространства кэша: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Cache area free size: %i GB" msgstr "Свободное пространство кэша: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:410 #, c-format msgid "Platform: %s" msgstr "Платформа: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:411 msgid "Execution environment supports inbound connections" msgstr "Среда исполнения поддерживает входящие соединения" #: src/hed/libs/compute/ExecutionTarget.cpp:412 msgid "Execution environment does not support inbound connections" msgstr "Среда исполнения не поддерживает входящие соединения" #: src/hed/libs/compute/ExecutionTarget.cpp:413 msgid "Execution environment supports outbound connections" msgstr "Среда исполнения поддерживает исходящие соединения" #: src/hed/libs/compute/ExecutionTarget.cpp:414 msgid "Execution environment does not support outbound connections" msgstr "Среда исполнения не поддерживает исходящие соединения" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment is a virtual machine" msgstr "Рабочая среда - виртуальная машина" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment is a physical machine" msgstr "Рабочая среда - реальная машина" #: src/hed/libs/compute/ExecutionTarget.cpp:417 #, c-format msgid "CPU vendor: %s" msgstr "Производитель процессора: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:418 #, c-format msgid "CPU model: %s" msgstr "Модель процессора: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:419 #, c-format msgid "CPU version: %s" msgstr "Версия процессора: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:420 #, c-format msgid "CPU clock speed: %i" msgstr "Тактовая частота процессора: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, c-format msgid "Main memory size: %i" msgstr "Объём основной памяти: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, c-format msgid "OS family: %s" msgstr "Семейство ОС: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, c-format msgid "OS name: %s" msgstr "Название ОС: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, c-format msgid "OS version: %s" msgstr "Версия ОС: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:431 msgid "Computing service:" msgstr "Вычислительный сервис:" #: src/hed/libs/compute/ExecutionTarget.cpp:455 #, c-format msgid "%d Endpoints" msgstr "%d точки входа" #: src/hed/libs/compute/ExecutionTarget.cpp:460 msgid "Endpoint Information:" msgstr "Информация о точке входа:" #: src/hed/libs/compute/ExecutionTarget.cpp:472 #, c-format msgid "%d Batch Systems" msgstr "%d системы управления пакетной обработкой" #: src/hed/libs/compute/ExecutionTarget.cpp:477 msgid "Batch System Information:" msgstr "Информация о СУПО:" #: src/hed/libs/compute/ExecutionTarget.cpp:483 msgid "Installed application environments:" msgstr "Установленные рабочие среды:" #: src/hed/libs/compute/ExecutionTarget.cpp:496 #, c-format msgid "%d Shares" msgstr "%d Совместные ресурсы" #: src/hed/libs/compute/ExecutionTarget.cpp:501 msgid "Share Information:" msgstr "Информация о совместном ресурсе:" #: src/hed/libs/compute/ExecutionTarget.cpp:507 #, c-format msgid "%d mapping policies" msgstr "%d правила присвоения" #: src/hed/libs/compute/ExecutionTarget.cpp:511 msgid "Mapping policy:" msgstr "Правило присвоения:" #: src/hed/libs/compute/ExecutionTarget.cpp:527 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "Исполняющий ресурс вычислительного сервиса: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:529 #, c-format msgid " Computing endpoint URL: %s" msgstr " URL точки входа для вычислений: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, c-format msgid " Computing endpoint interface name: %s" msgstr " Название интерфейса точки входа для вычислений: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #: src/hed/libs/compute/Job.cpp:580 #, c-format msgid " Queue: %s" msgstr "Очередь: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:536 #, c-format msgid " Mapping queue: %s" msgstr " Очередь присвоения: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:539 #, c-format msgid " Health state: %s" msgstr " Состояние здоровья: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:544 msgid "Service information:" msgstr "Сведения о службе:" #: src/hed/libs/compute/ExecutionTarget.cpp:549 msgid " Installed application environments:" msgstr "Установленные рабочие среды:" #: src/hed/libs/compute/ExecutionTarget.cpp:556 msgid "Batch system information:" msgstr "Информация о СУПО:" #: src/hed/libs/compute/ExecutionTarget.cpp:559 msgid "Queue information:" msgstr "Сведения об очереди:" #: src/hed/libs/compute/ExecutionTarget.cpp:566 msgid " Benchmark information:" msgstr " Информация об эталонных тестах:" #: src/hed/libs/compute/GLUE2.cpp:58 msgid "The ComputingService doesn't advertise its Quality Level." msgstr "Служба ComputingService не сообщает о своём уровне качества" #: src/hed/libs/compute/GLUE2.cpp:117 msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "Служба ComputingEndpoint не сообщает о своём уровне качества" #: src/hed/libs/compute/GLUE2.cpp:128 msgid "The ComputingService doesn't advertise its Interface." msgstr "Служба ComputingService не сообщает о своём интерфейсе" #: src/hed/libs/compute/GLUE2.cpp:160 msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "Служба ComputingEndpoint не сообщает о своём состоянии обслуживания" #: src/hed/libs/compute/Job.cpp:329 msgid "Unable to detect format of job record." msgstr "Невозможно определить формат учётной записи о задаче." #: src/hed/libs/compute/Job.cpp:550 #, c-format msgid "Job: %s" msgstr "Задача: %s" #: src/hed/libs/compute/Job.cpp:552 #, c-format msgid " Name: %s" msgstr "Имя: %s" #: src/hed/libs/compute/Job.cpp:553 #, c-format msgid " State: %s" msgstr " Состояние: %s" #: src/hed/libs/compute/Job.cpp:556 #, c-format msgid " Specific state: %s" msgstr " Специфическое состояние: %s" #: src/hed/libs/compute/Job.cpp:560 src/hed/libs/compute/Job.cpp:584 #, c-format msgid " Waiting Position: %d" msgstr "Положение в очереди: %d" #: src/hed/libs/compute/Job.cpp:564 #, c-format msgid " Exit Code: %d" msgstr "Код выхода: %d" #: src/hed/libs/compute/Job.cpp:568 #, c-format msgid " Job Error: %s" msgstr " Ошибка задачи: %s" #: src/hed/libs/compute/Job.cpp:573 #, c-format msgid " Owner: %s" msgstr "Владелец: %s" #: src/hed/libs/compute/Job.cpp:577 #, c-format msgid " Other Messages: %s" msgstr "Другие сообщения: %s" #: src/hed/libs/compute/Job.cpp:582 #, c-format msgid " Requested Slots: %d" msgstr " Запрошено вакансий: %i" #: src/hed/libs/compute/Job.cpp:587 #, c-format msgid " Stdin: %s" msgstr "Стандартный вход: %s" #: src/hed/libs/compute/Job.cpp:589 #, c-format msgid " Stdout: %s" msgstr "Стандартный выход: %s" #: src/hed/libs/compute/Job.cpp:591 #, c-format msgid " Stderr: %s" msgstr "Стандартная ошибка: %s" #: src/hed/libs/compute/Job.cpp:593 #, c-format msgid " Computing Service Log Directory: %s" msgstr " Каталог, содержащий журнальную запись вычислительного сервиса: %s" #: src/hed/libs/compute/Job.cpp:596 #, c-format msgid " Submitted: %s" msgstr "Заслана: %s" #: src/hed/libs/compute/Job.cpp:599 #, c-format msgid " End Time: %s" msgstr "Время окончания: %s" #: src/hed/libs/compute/Job.cpp:602 #, c-format msgid " Submitted from: %s" msgstr "Засылающий клиент: %s" #: src/hed/libs/compute/Job.cpp:605 #, c-format msgid " Submitting client: %s" msgstr "Версия клиента: %s" #: src/hed/libs/compute/Job.cpp:608 #, c-format msgid " Requested CPU Time: %s" msgstr "Запрошенное процессорное время: %s" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s" msgstr "Использованное процессорное время: %s" #: src/hed/libs/compute/Job.cpp:615 #, c-format msgid " Used Wall Time: %s" msgstr "Использованное время: %s" #: src/hed/libs/compute/Job.cpp:618 #, c-format msgid " Used Memory: %d" msgstr "Использование ОЗУ: %d" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Results were deleted: %s" msgstr "Результаты были удалены: %s" #: src/hed/libs/compute/Job.cpp:623 #, c-format msgid " Results must be retrieved before: %s" msgstr "Результаты должны быть востребованы до: %s" #: src/hed/libs/compute/Job.cpp:627 #, c-format msgid " Proxy valid until: %s" msgstr "Доверенность действительна до: %s" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Entry valid from: %s" msgstr "Запись действительна с: %s" #: src/hed/libs/compute/Job.cpp:634 #, c-format msgid " Entry valid for: %s" msgstr "Запись действительна на: %s" #: src/hed/libs/compute/Job.cpp:638 msgid " Old job IDs:" msgstr " Старый ярлык задачи:" #: src/hed/libs/compute/Job.cpp:646 #, c-format msgid " ID on service: %s" msgstr " ID сервиса: %s" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Service information URL: %s (%s)" msgstr " URL информации о сервисе: %s (%s)" #: src/hed/libs/compute/Job.cpp:648 #, c-format msgid " Job status URL: %s (%s)" msgstr " URL состояния задачи: %s (%s)" #: src/hed/libs/compute/Job.cpp:649 #, c-format msgid " Job management URL: %s (%s)" msgstr " URL управления задачей: %s (%s)" #: src/hed/libs/compute/Job.cpp:650 #, c-format msgid " Stagein directory URL: %s" msgstr " URL каталога для загрузки: %s" #: src/hed/libs/compute/Job.cpp:651 #, c-format msgid " Stageout directory URL: %s" msgstr " URL каталога для отгрузки: %s" #: src/hed/libs/compute/Job.cpp:652 #, c-format msgid " Session directory URL: %s" msgstr " URL каталога Грид-сесии: %s" #: src/hed/libs/compute/Job.cpp:654 msgid " Delegation IDs:" msgstr " Идентификаторы делегирования:" #: src/hed/libs/compute/Job.cpp:670 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "Невозможно обработать задачу (%s), не указан интерфейс." #: src/hed/libs/compute/Job.cpp:675 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" "Невозможно обработать задачу (%s), для указанного интерфейса (%s) нету " "подключаемых модулей" #: src/hed/libs/compute/Job.cpp:697 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "Указан неверный путь к каталогу загрузки (%s)" #: src/hed/libs/compute/Job.cpp:702 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" "Невозможно загрузить задачу (%s), не был задан модуль JobControllerPlugin " "для работы с задачей." #: src/hed/libs/compute/Job.cpp:706 #, c-format msgid "Downloading job: %s" msgstr "Загружается задача: %s" #: src/hed/libs/compute/Job.cpp:710 #, c-format msgid "" "Cant retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" "Не удалось получить выходные файлы задачи (%s) - невозможно определить URL " "для записи" #: src/hed/libs/compute/Job.cpp:715 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "Указан неверный путь для отгрузки(%s)" #: src/hed/libs/compute/Job.cpp:722 #, c-format msgid "%s directory exist! Skipping job." msgstr "Каталог %s уже сушествует! Задача пропускается." #: src/hed/libs/compute/Job.cpp:728 #, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "Невозможно получить список загружаемых файлов для задачи %s" #: src/hed/libs/compute/Job.cpp:733 #, c-format msgid "No files to retrieve for job %s" msgstr "Отсутствуют загружаемые файлы для задачи %s" #: src/hed/libs/compute/Job.cpp:739 #, c-format msgid "Failed to create directory %s! Skipping job." msgstr "Сбой создания каталога %s! Задача пропускается." #: src/hed/libs/compute/Job.cpp:752 #, fuzzy, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "Ошибка загрузки %s в %s" #: src/hed/libs/compute/Job.cpp:758 #, fuzzy, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "Сбой завершения записи в цель" #: src/hed/libs/compute/Job.cpp:764 #, c-format msgid "Failed downloading %s to %s" msgstr "Ошибка загрузки %s в %s" #: src/hed/libs/compute/Job.cpp:777 src/hed/libs/compute/Job.cpp:782 #, c-format msgid "Unable to list files at %s" msgstr "Невозможно перечислить файлы на %s" #: src/hed/libs/compute/Job.cpp:824 msgid "Now copying (from -> to)" msgstr "Производится копирование (из -> в)" #: src/hed/libs/compute/Job.cpp:825 #, c-format msgid " %s -> %s" msgstr " %s -> %s" #: src/hed/libs/compute/Job.cpp:841 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "Невозможно инициализировать соединение с источником: %s" #: src/hed/libs/compute/Job.cpp:852 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "Невозможно инициализировать соединение с назначением: %s" #: src/hed/libs/compute/Job.cpp:871 #, c-format msgid "File download failed: %s" msgstr "Невозможно загрузить файл: %s" #: src/hed/libs/compute/Job.cpp:910 src/hed/libs/compute/Job.cpp:939 #: src/hed/libs/compute/Job.cpp:971 src/hed/libs/compute/Job.cpp:1004 #, c-format msgid "Waiting for lock on file %s" msgstr "Ожидание разблокирования файла %s" #: src/hed/libs/compute/JobControllerPlugin.cpp:101 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "Подключаемый модуль JobControllerPlugin \"%s\" не обнаружен" #: src/hed/libs/compute/JobControllerPlugin.cpp:110 #, c-format msgid "JobControllerPlugin %s could not be created" msgstr "Подключаемый модуль JobControllerPlugin %s не может быть создан" #: src/hed/libs/compute/JobControllerPlugin.cpp:115 #, c-format msgid "Loaded JobControllerPlugin %s" msgstr "Подгружен JobControllerPlugin %s" #: src/hed/libs/compute/JobDescription.cpp:22 #, c-format msgid ": %d" msgstr ": %d" #: src/hed/libs/compute/JobDescription.cpp:24 #, c-format msgid ": %s" msgstr ": %s" #: src/hed/libs/compute/JobDescription.cpp:138 msgid " --- DRY RUN --- " msgstr "--- ХОЛОСТАЯ ПРОГОНКА ---" #: src/hed/libs/compute/JobDescription.cpp:148 #, c-format msgid " Annotation: %s" msgstr " Аннотация: %s" #: src/hed/libs/compute/JobDescription.cpp:154 #, c-format msgid " Old activity ID: %s" msgstr " Старый ярлык задания: %s" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Argument: %s" msgstr " Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:171 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr " Удалённое журналирование (по выбору): %s (%s)" #: src/hed/libs/compute/JobDescription.cpp:174 #, c-format msgid " RemoteLogging: %s (%s)" msgstr " Удалённое журналирование: %s (%s)" #: src/hed/libs/compute/JobDescription.cpp:182 #, c-format msgid " Environment.name: %s" msgstr " Environment.name: %s" #: src/hed/libs/compute/JobDescription.cpp:183 #, c-format msgid " Environment: %s" msgstr " Environment: %s" #: src/hed/libs/compute/JobDescription.cpp:196 #, c-format msgid " PreExecutable.Argument: %s" msgstr " PreExecutable.Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:199 #: src/hed/libs/compute/JobDescription.cpp:217 #, c-format msgid " Exit code for successful execution: %d" msgstr " Код выхода успешного исполнения: %d" #: src/hed/libs/compute/JobDescription.cpp:202 #: src/hed/libs/compute/JobDescription.cpp:220 msgid " No exit code for successful execution specified." msgstr " Код выхода для успешного исполнения не указан" #: src/hed/libs/compute/JobDescription.cpp:214 #, c-format msgid " PostExecutable.Argument: %s" msgstr " PostExecutable.Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:230 #, c-format msgid " Access control: %s" msgstr " Контроль доступа: %s" #: src/hed/libs/compute/JobDescription.cpp:234 #, c-format msgid " Processing start time: %s" msgstr " Время начала обработки: %s" #: src/hed/libs/compute/JobDescription.cpp:237 msgid " Notify:" msgstr "Уведомить:" #: src/hed/libs/compute/JobDescription.cpp:251 #, c-format msgid " Credential service: %s" msgstr " Служба параметров доступа: %s" #: src/hed/libs/compute/JobDescription.cpp:261 msgid " Operating system requirements:" msgstr "Требования к операционной системе:" #: src/hed/libs/compute/JobDescription.cpp:279 msgid " Computing endpoint requirements:" msgstr "Требования к вычислительному ресурсу:" #: src/hed/libs/compute/JobDescription.cpp:292 msgid " Node access: inbound" msgstr " Доступ к узлу: входящий" #: src/hed/libs/compute/JobDescription.cpp:295 msgid " Node access: outbound" msgstr " Доступ к узлу: исходящий" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound and outbound" msgstr " Доступ к узлу: входящий и исходящий" #: src/hed/libs/compute/JobDescription.cpp:308 msgid " Job requires exclusive execution" msgstr " Задача требует эксклюзивного исполнения" #: src/hed/libs/compute/JobDescription.cpp:311 msgid " Job does not require exclusive execution" msgstr " Задача не требует эксклюзивного исполнения" #: src/hed/libs/compute/JobDescription.cpp:316 msgid " Run time environment requirements:" msgstr "Требования среды выполнения:" #: src/hed/libs/compute/JobDescription.cpp:328 msgid " Inputfile element:" msgstr " Элемент Inputfile:" #: src/hed/libs/compute/JobDescription.cpp:329 #: src/hed/libs/compute/JobDescription.cpp:351 #, c-format msgid " Name: %s" msgstr " Name: %s" #: src/hed/libs/compute/JobDescription.cpp:331 msgid " Is executable: true" msgstr " Исполняемый: верно" #: src/hed/libs/compute/JobDescription.cpp:335 #, c-format msgid " Sources: %s" msgstr " Источники: %s" #: src/hed/libs/compute/JobDescription.cpp:337 #, c-format msgid " Sources.DelegationID: %s" msgstr " Sources.DelegationID: %s" #: src/hed/libs/compute/JobDescription.cpp:341 #, c-format msgid " Sources.Options: %s = %s" msgstr " Sources.Options: %s = %s" #: src/hed/libs/compute/JobDescription.cpp:350 msgid " Outputfile element:" msgstr " Элемент Outputfile:" #: src/hed/libs/compute/JobDescription.cpp:354 #, c-format msgid " Targets: %s" msgstr " Назначения: %s" #: src/hed/libs/compute/JobDescription.cpp:356 #, c-format msgid " Targets.DelegationID: %s" msgstr " Targets.DelegationID: %s" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets.Options: %s = %s" msgstr " Targets.Options: %s = %s" #: src/hed/libs/compute/JobDescription.cpp:367 #, c-format msgid " DelegationID element: %s" msgstr " Элемент DelegationID: %s" #: src/hed/libs/compute/JobDescription.cpp:374 #, c-format msgid " Other attributes: [%s], %s" msgstr " Другие атрибуты: [%s], %s" #: src/hed/libs/compute/JobDescription.cpp:440 msgid "Empty job description source string" msgstr "Пустое исходное описание задачи" #: src/hed/libs/compute/JobDescription.cpp:473 msgid "No job description parsers available" msgstr "Отсутствуют разборщики описания задания" #: src/hed/libs/compute/JobDescription.cpp:475 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "Нет разборщиков описания задачи, подходящих для обработки языка '%s'" #: src/hed/libs/compute/JobDescription.cpp:483 #, c-format msgid "%s parsing error" msgstr "%s ошибка разборки" #: src/hed/libs/compute/JobDescription.cpp:499 msgid "No job description parser was able to interpret job description" msgstr "Ни один разборщик не смог обработать описание задачи" #: src/hed/libs/compute/JobDescription.cpp:509 msgid "" "Job description language is not specified, unable to output description." msgstr "Язык описания задачи не указан, невозможно вывести описание" #: src/hed/libs/compute/JobDescription.cpp:521 #, c-format msgid "Generating %s job description output" msgstr "Создаётся описание задачи в формате %s " #: src/hed/libs/compute/JobDescription.cpp:537 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "Язык (%s) не опознан ни одним из модулей разборки описаний задач." #: src/hed/libs/compute/JobDescription.cpp:550 #, c-format msgid "Two input files have identical name '%s'." msgstr "Два входных файла с идентичными именами '%s'." #: src/hed/libs/compute/JobDescription.cpp:569 #: src/hed/libs/compute/JobDescription.cpp:582 #, c-format msgid "Cannot stat local input file '%s'" msgstr "Невозможно определить статус локального входного файла '%s'" #: src/hed/libs/compute/JobDescription.cpp:602 #, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "Невозможно обнаружить локальный входной файл '%s' (%s)" #: src/hed/libs/compute/JobDescription.cpp:644 msgid "Unable to select runtime environment" msgstr "Невозможно выбрать среду выполнения" #: src/hed/libs/compute/JobDescription.cpp:651 msgid "Unable to select middleware" msgstr "Невозможно выбрать подпрограммное обеспечение." #: src/hed/libs/compute/JobDescription.cpp:658 msgid "Unable to select operating system." msgstr "Невозможно выбрать операционную систему." #: src/hed/libs/compute/JobDescription.cpp:677 #, c-format msgid "No test-job with ID %d found." msgstr "Тестовая задача под номером %d не найдена." #: src/hed/libs/compute/JobDescription.cpp:689 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" "Тест был создан с идентификатором %d, но при обработке возникла ошибка." #: src/hed/libs/compute/JobDescription.cpp:693 #, c-format msgid "No jobdescription resulted at %d test" msgstr "Для теста %d отсутствует описание задачи" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "Подключаемый модуль JobDescriptionParserPlugin \"%s\" не обнаружен" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "Подключаемый модуль JobDescriptionParserPlugin %s не может быть создан" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "Подгружен JobDescriptionParserPlugin %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:172 msgid "Unable to create temporary directory" msgstr "Не удалось создать временный каталог" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:180 #, c-format msgid "Unable to create data base environment (%s)" msgstr "Не удалось создать окружение для базы данных (%s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:190 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:194 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:198 #, c-format msgid "Unable to set duplicate flags for secondary key DB (%s)" msgstr "" "Невозможно установить повторяющиеся метки для вторичной базы данных ключей (%" "s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:204 #, c-format msgid "Unable to create job database (%s)" msgstr "Не удалось создать базу данных задач (%s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:208 #, c-format msgid "Unable to create DB for secondary name keys (%s)" msgstr "Не удалось создать базу данных для вторичных ключей имён (%s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:212 #, c-format msgid "Unable to create DB for secondary endpoint keys (%s)" msgstr "Не удалось создать базу данных для вторичных ключей точек входа (%s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:216 #, c-format msgid "Unable to create DB for secondary service info keys (%s)" msgstr "" "Не удалось создать базу данных для вторичных ключей информации о службах (%s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:221 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:225 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:229 #, c-format msgid "Unable to associate secondary DB with primary DB (%s)" msgstr "" "Невозможно поставить в соответствие вторичную базу данных первичной (%s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:232 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:92 #, c-format msgid "Job database created successfully (%s)" msgstr "Успешно создана база данных задач (%s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:274 #, c-format msgid "Error from BDB: %s: %s" msgstr "Ошибка BDB: %s: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:277 #, c-format msgid "Error from BDB: %s" msgstr "Ошибка BDB: %s" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:297 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:137 #: src/hed/libs/compute/JobInformationStorageXML.cpp:27 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" "Файл списка задач не может быть создан: родительский каталог (%s) не " "существует." #: src/hed/libs/compute/JobInformationStorageBDB.cpp:301 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:141 #: src/hed/libs/compute/JobInformationStorageXML.cpp:31 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "Файл списка задач не может быть создан: %s не является каталогом" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:308 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #: src/hed/libs/compute/JobInformationStorageXML.cpp:38 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "Список задач (%s) не является стандартным файлом" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:346 #: src/hed/libs/compute/JobInformationStorageBDB.cpp:405 #, c-format msgid "Unable to write key/value pair to job database (%s): Key \"%s\"" msgstr "" "Невозможно записать пару ключ/значение в базу данных задач (%s): Ключ \"%s\"" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:572 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:435 #: src/hed/libs/compute/JobInformationStorageXML.cpp:137 #, c-format msgid "Unable to truncate job database (%s)" msgstr "Не удалось укоротить базу данных задач (%s)" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:603 msgid "" "ENOENT: The file or directory does not exist, Or a nonexistent re_source " "file was specified." msgstr "" "ENOENT: Файл или каталог не существуют, либо указан несуществующий файл " "re_source" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:606 msgid "" "DB_OLD_VERSION: The database cannot be opened without being first upgraded." msgstr "" "DB_OLD_VERSION: База данных не может быть открыта без предварительного " "обновления версии." #: src/hed/libs/compute/JobInformationStorageBDB.cpp:609 msgid "EEXIST: DB_CREATE and DB_EXCL were specified and the database exists." msgstr "EEXIST: были заданы DB_CREATE и DB_EXCL, и база данных существует ." #: src/hed/libs/compute/JobInformationStorageBDB.cpp:611 msgid "EINVAL" msgstr "EINVAL" #: src/hed/libs/compute/JobInformationStorageBDB.cpp:614 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:468 #, c-format msgid "Unable to determine error (%d)" msgstr "Невозможно распознать ошибку (%d)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:57 #, c-format msgid "Unable to create data base (%s)" msgstr "Не удалось создать базу данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:72 #, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "Не удалось создать таблицу задач в базе данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:80 #, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "Не удалось создать индекс для таблицы задач в базе данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:88 #, c-format msgid "Failed checking database (%s)" msgstr "Сбой сверки базы данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:113 #, c-format msgid "Error from SQLite: %s: %s" msgstr "Ошибка SQLite: %s: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:116 #, c-format msgid "Error from SQLite: %s" msgstr "Ошибка SQLite: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:246 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:253 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:260 #, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "Невозможно внести запись в базу данных задач (%s): Id \"%s\"" #: src/hed/libs/compute/JobInformationStorageXML.cpp:51 #: src/hed/libs/compute/JobInformationStorageXML.cpp:223 #: src/hed/libs/compute/JobInformationStorageXML.cpp:264 #, c-format msgid "Waiting for lock on job list file %s" msgstr "Ожидание разблокирования файла списка задач %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:162 #, c-format msgid "Will remove %s on service %s." msgstr "Задача %s будет удалена с серивса %s." #: src/hed/libs/compute/JobSupervisor.cpp:36 msgid "Ignoring job, the job ID is empty" msgstr "Задача игнорируется, так как её ярлык пуст" #: src/hed/libs/compute/JobSupervisor.cpp:41 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "Игнорируется задача (%s), отсутствует название интерфейса управления" #: src/hed/libs/compute/JobSupervisor.cpp:46 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "Игнорируется задача (%s), отсутствует URL интерфейса управления" #: src/hed/libs/compute/JobSupervisor.cpp:51 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "Игнорируется задача (%s), отсутствует название интерфейса состояния" #: src/hed/libs/compute/JobSupervisor.cpp:56 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "Игнорируется задача (%s), отсутствует URL состояния задачи" #: src/hed/libs/compute/JobSupervisor.cpp:65 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" "Игнорируется задача (%s), невозможно подгрузить JobControllerPlugin для %s" #: src/hed/libs/compute/JobSupervisor.cpp:72 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" "Игнорируется задача (%s), предыдущая попытка подгрузить JobControllerPlugin " "завершилась неудачей" #: src/hed/libs/compute/JobSupervisor.cpp:385 #, c-format msgid "Job resubmission failed: Unable to load broker (%s)" msgstr "Перезасылка задачи оборвана: Невозможно подгрузить планировщик (%s)" #: src/hed/libs/compute/JobSupervisor.cpp:400 msgid "Job resubmission aborted because no resource returned any information" msgstr "" "Перезасылка задачи оборвана, т.к. ни один из ресурсов не предоставил " "информацию" #: src/hed/libs/compute/JobSupervisor.cpp:421 #, c-format msgid "Unable to resubmit job (%s), unable to parse obtained job description" msgstr "" "Не удалось перезаслать задачу (%s), т.к. невозможно разобрать полученное " "описание задачи" #: src/hed/libs/compute/JobSupervisor.cpp:443 #, c-format msgid "" "Unable to resubmit job (%s), target information retrieval failed for target: " "%s" msgstr "" "Невозможно перезапустить задачу (%s), сбой извлечения информации о цели %s" #: src/hed/libs/compute/JobSupervisor.cpp:469 #, c-format msgid "Unable to resubmit job (%s), no targets applicable for submission" msgstr "Невозможно перезапустить задачу (%s), нет подходящих целей" #: src/hed/libs/compute/JobSupervisor.cpp:504 #, c-format msgid "" "Unable to migrate job (%s), job description could not be retrieved remotely" msgstr "" "Невозможно мигрировать задачу (%s), описание задачи не может быть извлечено " "с удалённого источника" #: src/hed/libs/compute/JobSupervisor.cpp:524 msgid "Job migration aborted, no resource returned any information" msgstr "" "Перенаправление задачи оборвано, т.к. ни один из ресурсов не предоставил " "информацию" #: src/hed/libs/compute/JobSupervisor.cpp:536 #, c-format msgid "Job migration aborted, unable to load broker (%s)" msgstr "" "Перенаправление задачи оборвано, невозможно подгрузить планировщик (%s)" #: src/hed/libs/compute/JobSupervisor.cpp:552 #, c-format msgid "Unable to migrate job (%s), unable to parse obtained job description" msgstr "" "Не удалось перенаправить задачу (%s), т.к. невозможно разобрать полученное " "описание задачи" #: src/hed/libs/compute/JobSupervisor.cpp:573 #, c-format msgid "Unable to load submission plugin for %s interface" msgstr "" "Невозможно погрузить подключаемый модуль для запуска задач через интерфейс %s" #: src/hed/libs/compute/JobSupervisor.cpp:583 #, c-format msgid "Job migration failed for job (%s), no applicable targets" msgstr "Не удалось перенаправить задачу (%s), возможные назначения отсутствуют" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "%s > %s => неверно" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "%s > %s => верно" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "%s > %s => неверно: %s содержит нецифровые символы в номере версии." #: src/hed/libs/compute/Software.cpp:206 src/hed/libs/compute/Software.cpp:217 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "Требование \"%s %s\" НЕ удовлетворено." #: src/hed/libs/compute/Software.cpp:212 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "Требование \"%s %s\" удовлетворено." #: src/hed/libs/compute/Software.cpp:221 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "Требование \"%s %s\" удовлетворено \"%s\"." #: src/hed/libs/compute/Software.cpp:226 msgid "All requirements satisfied." msgstr "Все требования удовлетворены." #: src/hed/libs/compute/Submitter.cpp:83 #, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "Попытка засылки задачи напрямую к точке входа (%s)" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "Задан интерфейс (%s), засылка производится только через него" #: src/hed/libs/compute/Submitter.cpp:106 msgid "Trying all available interfaces" msgstr "Пробуются все доступные интерфейсы" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" "Попытка засылки на точку входа (%s) используя интерфейс (%s) с подключаемым " "модулем (%s)." #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" "Невозможно подгрузить модуль (%s) для интерфейса (%s) при попытке засылки " "описания задачи." #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "Опробованы все интерфейсы для точки входа %s." #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "Назначение %s не соответствует запрошенному интерфейсу." #: src/hed/libs/compute/SubmitterPlugin.cpp:54 msgid "No stagein URL is provided" msgstr "Не указан URL для загрузки" #: src/hed/libs/compute/SubmitterPlugin.cpp:73 #, c-format msgid "Failed uploading file %s to %s: %s" msgstr "Не удалось отгрузить файл %s в %s: %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:103 #, c-format msgid "Trying to migrate to %s: Migration to a %s interface is not supported." msgstr "Попытка миграции на %s: Миграция на интерфейс %s не поддерживается." #: src/hed/libs/compute/SubmitterPlugin.cpp:159 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "Подключаемый модуль SubmitterPlugin \"%s\" не обнаружен." #: src/hed/libs/compute/SubmitterPlugin.cpp:169 #, c-format msgid "SubmitterPlugin %s could not be created" msgstr "Подключаемый модуль SubmitterPlugin %s не может быть создан" #: src/hed/libs/compute/SubmitterPlugin.cpp:174 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "Подгружен SubmitterPlugin %s" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 msgid "Invalid job description" msgstr "Недопустимое описание задачи" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 msgid "Failed to submit job" msgstr "Ошибка запуска задачи" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, c-format msgid "Failed to write to local job list %s" msgstr "Ошибка записи в локальный файл списка задач %s" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "[описание задачи...]" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" "Эта программулечка может быть использована для проверки способностей " "преобразования JobDescription" #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in JDL, POSIX JSDL, JSDL, " "or XRSL format." msgstr "" "Описание задачи может быть также задано файлом или строкой в формате JDL, " "POSIX JSDL, JSDL, или XRSL." #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "" "define the requested format (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, emies:" "adl)" msgstr "" "укажите запрашиваемый формат (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, " "emies:adl)" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "формат" #: src/hed/libs/compute/test_jobdescription.cpp:33 msgid "show the original job description" msgstr "показать изначальное описание задачи" #: src/hed/libs/compute/test_jobdescription.cpp:43 msgid "Use --help option for detailed usage information" msgstr "Используйте опцию --help для подробного описания" #: src/hed/libs/compute/test_jobdescription.cpp:50 msgid " [ JobDescription tester ] " msgstr " [ тестировщик JobDescription ] " #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr " [ Обрабатывается исходный текст ] " #: src/hed/libs/compute/test_jobdescription.cpp:80 msgid "Unable to parse." msgstr "Не удалось обработать." #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ egee:jdl ] " msgstr " [ egee:jdl ] " #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ emies:adl ] " msgstr " [ emies:adl ] " #: src/hed/libs/compute/test_jobdescription.cpp:93 msgid " [ nordugrid:jsdl ] " msgstr " [ nordugrid:jsdl ] " #: src/hed/libs/compute/test_jobdescription.cpp:95 msgid " [ nordugrid:xrsl ] " msgstr " [ nordugrid:xrsl ] " #: src/hed/libs/credential/ARCProxyUtil.cpp:138 msgid "VOMS command is empty" msgstr "Пустая команда VOMS" #: src/hed/libs/credential/ARCProxyUtil.cpp:424 #: src/hed/libs/credential/ARCProxyUtil.cpp:1431 msgid "Failed to sign proxy" msgstr "Сбой подписи доверенности" #: src/hed/libs/credential/ARCProxyUtil.cpp:1317 #, c-format msgid "Please choose the NSS database you would use (1-%d): " msgstr "Пожалуйста, выберите базу данных NSS для использования (1-%d):" #: src/hed/libs/credential/ARCProxyUtil.cpp:1353 #: src/hed/libs/credential/ARCProxyUtil.cpp:1460 msgid "Failed to generate X509 request with NSS" msgstr "Сбой создания запроса X509 с помощью NSS" #: src/hed/libs/credential/ARCProxyUtil.cpp:1364 #: src/hed/libs/credential/ARCProxyUtil.cpp:1471 #: src/hed/libs/credential/ARCProxyUtil.cpp:1512 msgid "Failed to create X509 certificate with NSS" msgstr "Сбой создания сертификата X509 с помощью NSS" #: src/hed/libs/credential/ARCProxyUtil.cpp:1376 #: src/hed/libs/credential/ARCProxyUtil.cpp:1483 #: src/hed/libs/credential/ARCProxyUtil.cpp:1536 msgid "Failed to export X509 certificate from NSS DB" msgstr "Сбой сохранения сертификата X509 из базы данных NSS" #: src/hed/libs/credential/ARCProxyUtil.cpp:1519 msgid "Failed to import X509 certificate into NSS DB" msgstr "Сбой импортирования сертификата X509 в базу данных NSS" #: src/hed/libs/credential/ARCProxyUtil.cpp:1528 msgid "Failed to initialize the credential configuration" msgstr "Сбой инициализации настроек параметров доступа" #: src/hed/libs/credential/CertUtil.cpp:166 #, c-format msgid "Error number in store context: %i" msgstr "Номер ошибки в контексте хранилища: %i" #: src/hed/libs/credential/CertUtil.cpp:167 msgid "Self-signed certificate" msgstr "Самостоятельно подписанный сертификат" #: src/hed/libs/credential/CertUtil.cpp:170 #, c-format msgid "The certificate with subject %s is not valid" msgstr "Сертификат с субъектом %s недействителен" #: src/hed/libs/credential/CertUtil.cpp:173 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" "Невозможно найти сертификат агентства, выдавшего сертификат с субъектом %s и " "отпечатком: %lu" #: src/hed/libs/credential/CertUtil.cpp:176 #, c-format msgid "Certificate with subject %s has expired" msgstr "Срок действия сертификата с субъектом %s истёк" #: src/hed/libs/credential/CertUtil.cpp:179 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" "Цепочка содержит недоверяемый самоподписанный сертификат с субъектом %s и " "отпечатком: %lu" #: src/hed/libs/credential/CertUtil.cpp:181 #, c-format msgid "Certificate verification error: %s" msgstr "Ошибка проверки сертификата: %s" #: src/hed/libs/credential/CertUtil.cpp:193 msgid "Can not get the certificate type" msgstr "Не удалось определить тип сертификата" #: src/hed/libs/credential/CertUtil.cpp:233 msgid "Couldn't verify availability of CRL" msgstr "Невозможно подтвердить доступность списков отзыва сертификатов (CRL)" #: src/hed/libs/credential/CertUtil.cpp:246 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" "В доступном списке отзыва сертификатов (CRL) значение lastUpdate " "недействительно" #: src/hed/libs/credential/CertUtil.cpp:253 msgid "The available CRL is not yet valid" msgstr "Доступный список отзыва сертификатов (CRL) пока недействителен" #: src/hed/libs/credential/CertUtil.cpp:262 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" "В доступном списке отзыва сертификатов (CRL) значение nextUpdate " "недействительно" #: src/hed/libs/credential/CertUtil.cpp:268 msgid "The available CRL has expired" msgstr "Доступный список отзыва сертификатов (CRL) просрочен" #: src/hed/libs/credential/CertUtil.cpp:291 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "Сертификат с серийным номером %s и субъектом \"%s\" отозван" #: src/hed/libs/credential/CertUtil.cpp:309 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" "Каталог доверяемых агентств не указан/найден; в качестве такового " "используется текущий путь" #: src/hed/libs/credential/CertUtil.cpp:318 msgid "Can't allocate memory for CA policy path" msgstr "Невозможно выделить память для пути к файлу политик агентства" #: src/hed/libs/credential/CertUtil.cpp:364 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" "Сертификат содержит неизвестное расширение с численным идентификатором %u и " "именем субъекта %s" #: src/hed/libs/credential/CertUtil.cpp:378 #: src/hed/libs/credential/Credential.cpp:1693 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" "Невозможно преобразовать расширение PROXY_CERT_INFO_EXTENSION в кодировке " "DER во внутренний формат" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Trying to check X509 cert with check_cert_type" msgstr "Попытка проверки сертификата X509 с помощью check_cert_type" #: src/hed/libs/credential/CertUtil.cpp:465 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal form" msgstr "" "Невозможно преобразовать расширение PROXYCERTINFO в кодировке DER во " "внутренний формат" #: src/hed/libs/credential/CertUtil.cpp:469 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "Невозможно извлечь политику из расширения PROXYCERTINFO" #: src/hed/libs/credential/CertUtil.cpp:473 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "Невозможно извлечь язык политики из расширения PROXYCERTINFO" #: src/hed/libs/credential/CertUtil.cpp:505 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" "Субъект не соответствует имени выдавшего агентства и атрибуту доверенности CN" #: src/hed/libs/credential/Credential.cpp:73 #, c-format msgid "OpenSSL error string: %s" msgstr "Ошибка OpenSSL: %s" #: src/hed/libs/credential/Credential.cpp:196 msgid "Can't get the first byte of input to determine its format" msgstr "" "Невозможно получить первый байт сертификата, чтобы определить его формат" #: src/hed/libs/credential/Credential.cpp:210 msgid "Can't reset the input" msgstr "Невозможно сбросить ввод" #: src/hed/libs/credential/Credential.cpp:236 #: src/hed/libs/credential/Credential.cpp:273 msgid "Can't get the first byte of input BIO to get its format" msgstr "" "Невозможно получить первый байт сертификата, чтобы определить его формат" #: src/hed/libs/credential/Credential.cpp:248 msgid "Can not read certificate/key string" msgstr "Невозможно прочесть строку сертификата/ключа" #: src/hed/libs/credential/Credential.cpp:456 #, c-format msgid "Can not find certificate file: %s" msgstr "Не найден файл сертификата: %s" #: src/hed/libs/credential/Credential.cpp:461 #, c-format msgid "Can not read certificate file: %s" msgstr "Не удалось прочитать файл сертификата: %s" #: src/hed/libs/credential/Credential.cpp:499 msgid "Can not read certificate string" msgstr "Не удалось прочитать сертификат" #: src/hed/libs/credential/Credential.cpp:519 msgid "Certificate format is PEM" msgstr "Сертификат в формате PEM" #: src/hed/libs/credential/Credential.cpp:546 msgid "Certificate format is DER" msgstr "Сертификат в формате DER" #: src/hed/libs/credential/Credential.cpp:575 msgid "Certificate format is PKCS" msgstr "Сертификат в формате PKCS" #: src/hed/libs/credential/Credential.cpp:602 msgid "Certificate format is unknown" msgstr "Формат сертификата неизвестен" #: src/hed/libs/credential/Credential.cpp:610 #, c-format msgid "Can not find key file: %s" msgstr "Не удалось обнаружить файл личного ключа: %s" #: src/hed/libs/credential/Credential.cpp:615 #, c-format msgid "Can not open key file %s" msgstr "Не удалось открыть файл личного ключа %s" #: src/hed/libs/credential/Credential.cpp:634 msgid "Can not read key string" msgstr "Не удалось прочитать личный ключ" #: src/hed/libs/credential/Credential.cpp:697 #: src/hed/libs/credential/VOMSUtil.cpp:258 msgid "Failed to lock arccredential library in memory" msgstr "Невозможно заблокировать библиотеку arccredential в памяти" #: src/hed/libs/credential/Credential.cpp:712 msgid "Certificate verification succeeded" msgstr "Успешное подтверждение сертификата" #: src/hed/libs/credential/Credential.cpp:716 msgid "Certificate verification failed" msgstr "Сертификат не подтверждён" #: src/hed/libs/credential/Credential.cpp:731 #: src/hed/libs/credential/Credential.cpp:751 #: src/hed/libs/credential/Credential.cpp:771 #: src/hed/libs/credential/Credential.cpp:1003 #: src/hed/libs/credential/Credential.cpp:2314 #: src/hed/libs/credential/Credential.cpp:2345 msgid "Failed to initialize extensions member for Credential" msgstr "Сбой инициализации элемента расширения для параметров доступа" #: src/hed/libs/credential/Credential.cpp:814 #, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "Запрошен неподдерживаемый язык политик сертификата доверенности - %s" #: src/hed/libs/credential/Credential.cpp:826 #, c-format msgid "Unsupported proxy version is requested - %s" msgstr "Запрошена неподдерживаемая версия сертификата доверенности - %s" #: src/hed/libs/credential/Credential.cpp:837 msgid "If you specify a policy you also need to specify a policy language" msgstr "Указывая политику, указывайте также её язык" #: src/hed/libs/credential/Credential.cpp:1008 msgid "Certificate/Proxy path is empty" msgstr "Путь к сертификату/доверенности не задан" #: src/hed/libs/credential/Credential.cpp:1067 #: src/hed/libs/credential/Credential.cpp:2856 msgid "Failed to duplicate extension" msgstr "Не удалось скопировать расширение" #: src/hed/libs/credential/Credential.cpp:1071 msgid "Failed to add extension into credential extensions" msgstr "Не удалось добавить расширение к расширениям параметров доступа" #: src/hed/libs/credential/Credential.cpp:1082 msgid "Certificate information collection failed" msgstr "Сбой сбора информации о сертификате" #: src/hed/libs/credential/Credential.cpp:1124 #: src/hed/libs/credential/Credential.cpp:1129 msgid "Can not convert string into ASN1_OBJECT" msgstr "Невозможно преобразовать строку в ASN1_OBJECT" #: src/hed/libs/credential/Credential.cpp:1141 msgid "Can not create extension for proxy certificate" msgstr "Невозможно создать расширение для доверенности" #: src/hed/libs/credential/Credential.cpp:1180 #: src/hed/libs/credential/Credential.cpp:1348 msgid "BN_set_word failed" msgstr "Сбой метода BN_set_word" #: src/hed/libs/credential/Credential.cpp:1189 #: src/hed/libs/credential/Credential.cpp:1357 msgid "RSA_generate_key_ex failed" msgstr "Сбой метода RSA_generate_key_ex " #: src/hed/libs/credential/Credential.cpp:1198 #: src/hed/libs/credential/Credential.cpp:1365 msgid "BN_new || RSA_new failed" msgstr "Сбой метода BN_new или RSA_new" #: src/hed/libs/credential/Credential.cpp:1209 msgid "Created RSA key, proceeding with request" msgstr "Создан ключ RSA, теперь обрабатывается запрос" #: src/hed/libs/credential/Credential.cpp:1214 msgid "pkey and rsa_key exist!" msgstr "pkey и rsa_key существуют!" #: src/hed/libs/credential/Credential.cpp:1217 msgid "Generate new X509 request!" msgstr "Создайте новый запрос X509!" #: src/hed/libs/credential/Credential.cpp:1222 msgid "Setting subject name!" msgstr "Задаётся имя субъекта!" #: src/hed/libs/credential/Credential.cpp:1230 #: src/hed/libs/credential/Credential.cpp:1444 msgid "PEM_write_bio_X509_REQ failed" msgstr "Сбой PEM_write_bio_X509_REQ" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1301 #: src/hed/libs/credential/Credential.cpp:1476 #: src/hed/libs/credential/Credential.cpp:1496 msgid "Can not create BIO for request" msgstr "Невозможно создать BIO для запроса" #: src/hed/libs/credential/Credential.cpp:1278 msgid "Failed to write request into string" msgstr "Не удалось записать запрос в строку" #: src/hed/libs/credential/Credential.cpp:1305 #: src/hed/libs/credential/Credential.cpp:1310 #: src/hed/libs/credential/Credential.cpp:1500 msgid "Can not set writable file for request BIO" msgstr "Невозможно создать записываемый файл для BIO запроса" #: src/hed/libs/credential/Credential.cpp:1316 #: src/hed/libs/credential/Credential.cpp:1505 msgid "Wrote request into a file" msgstr "Запрос записан в файл" #: src/hed/libs/credential/Credential.cpp:1318 #: src/hed/libs/credential/Credential.cpp:1508 msgid "Failed to write request into a file" msgstr "Не удалось записать запрос в файл" #: src/hed/libs/credential/Credential.cpp:1338 msgid "The credential's private key has already been initialized" msgstr "Закрытый ключ параметров доступа уже инициализирован" #: src/hed/libs/credential/Credential.cpp:1386 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" "Невозможно дублировать имя субъекта для запроса самозаверяющей доверенности" #: src/hed/libs/credential/Credential.cpp:1396 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "" "Невозможно создать новую переменную X509_NAME_ENTRY для запроса доверенности" #: src/hed/libs/credential/Credential.cpp:1414 #: src/hed/libs/credential/Credential.cpp:1421 #: src/hed/libs/credential/Credential.cpp:1943 #: src/hed/libs/credential/Credential.cpp:1951 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" "Невозможно преобразовать структуру PROXY_CERT_INFO_EXTENSION из внутреннего " "формата в DER" #: src/hed/libs/credential/Credential.cpp:1451 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "Невозможно преобразовать запрос X509 из внутреннего формата в DER" #: src/hed/libs/credential/Credential.cpp:1461 msgid "Can not generate X509 request" msgstr "Не удалось создать запрос X509" #: src/hed/libs/credential/Credential.cpp:1463 msgid "Can not set private key" msgstr "Не удалось задать закрытый ключ" #: src/hed/libs/credential/Credential.cpp:1561 msgid "Failed to get private key" msgstr "Не удалось получить закрытый ключ" #: src/hed/libs/credential/Credential.cpp:1580 msgid "Failed to get public key from RSA object" msgstr "Невозможно извлечь открытый ключ из объекта RSA" #: src/hed/libs/credential/Credential.cpp:1588 msgid "Failed to get public key from X509 object" msgstr "Невозможно извлечь открытый ключ из объекта X509" #: src/hed/libs/credential/Credential.cpp:1595 msgid "Failed to get public key" msgstr "Не удалось получить открытый ключ" #: src/hed/libs/credential/Credential.cpp:1634 #, c-format msgid "Certiticate chain number %d" msgstr "Номер цепочки сертификатов %d" #: src/hed/libs/credential/Credential.cpp:1662 msgid "NULL BIO passed to InquireRequest" msgstr "NULL BIO передан в InquireRequest" #: src/hed/libs/credential/Credential.cpp:1665 msgid "PEM_read_bio_X509_REQ failed" msgstr "Сбой PEM_read_bio_X509_REQ" #: src/hed/libs/credential/Credential.cpp:1669 msgid "d2i_X509_REQ_bio failed" msgstr "Сбой d2i_X509_REQ_bio" #: src/hed/libs/credential/Credential.cpp:1702 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "Невозможно извлечь политику из расширения PROXY_CERT_INFO_EXTENSION" #: src/hed/libs/credential/Credential.cpp:1706 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "" "Невозможно извлечь язык политики из расширения PROXY_CERT_INFO_EXTENSION" #: src/hed/libs/credential/Credential.cpp:1722 #, c-format msgid "Cert Type: %d" msgstr "Тип сертификата: %d" #: src/hed/libs/credential/Credential.cpp:1735 #: src/hed/libs/credential/Credential.cpp:1754 msgid "Can not create BIO for parsing request" msgstr "Невозможно создать BIO для разбора запроса" #: src/hed/libs/credential/Credential.cpp:1740 msgid "Read request from a string" msgstr "Чтение запроса из строки" #: src/hed/libs/credential/Credential.cpp:1743 msgid "Failed to read request from a string" msgstr "Сбой при чтении запроса из строки" #: src/hed/libs/credential/Credential.cpp:1758 msgid "Can not set readable file for request BIO" msgstr "Невозможно открыть на чтение файл для запроса BIO" #: src/hed/libs/credential/Credential.cpp:1763 msgid "Read request from a file" msgstr "Прочесть запрос из файла" #: src/hed/libs/credential/Credential.cpp:1766 msgid "Failed to read request from a file" msgstr "Произошёл сбой при чтении запроса из файла" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Can not convert private key to DER format" msgstr "Невозможно преобразовать закрытый ключ в формат DER" #: src/hed/libs/credential/Credential.cpp:1924 msgid "Credential is not initialized" msgstr "Параметры доступа не инициализированы" #: src/hed/libs/credential/Credential.cpp:1930 msgid "Failed to duplicate X509 structure" msgstr "Не удалось скопировать структуру X509" #: src/hed/libs/credential/Credential.cpp:1935 msgid "Failed to initialize X509 structure" msgstr "Не удалось инициализировать структуру X509" #: src/hed/libs/credential/Credential.cpp:1958 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "Невозможно создать расширение для PROXY_CERT_INFO" #: src/hed/libs/credential/Credential.cpp:1962 #: src/hed/libs/credential/Credential.cpp:2010 msgid "Can not add X509 extension to proxy cert" msgstr "Невозможно добавить расширение X509 к доверенности" #: src/hed/libs/credential/Credential.cpp:1978 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "Невозможно преобразовать структуру keyUsage из формата кодировки DER" #: src/hed/libs/credential/Credential.cpp:1990 #: src/hed/libs/credential/Credential.cpp:1999 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "" "Невозможно преобразовать структуру keyUsage из внутреннего формата в DER" #: src/hed/libs/credential/Credential.cpp:2006 msgid "Can not create extension for keyUsage" msgstr "Невозможно создать расширение для keyUsage" #: src/hed/libs/credential/Credential.cpp:2019 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "" "Невозможно получить расширенное расширение KeyUsage из сертификата агентства" #: src/hed/libs/credential/Credential.cpp:2024 msgid "Can not copy extended KeyUsage extension" msgstr "Невозможно скопировать расширенное расширение KeyUsage" #: src/hed/libs/credential/Credential.cpp:2029 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" "Невозможно добавить расширенное X509 расширение KeyUsage к новой доверенности" #: src/hed/libs/credential/Credential.cpp:2039 msgid "Can not compute digest of public key" msgstr "Не удалось вычислить дайджест открытого ключа" #: src/hed/libs/credential/Credential.cpp:2050 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "Невозможно скопировать имя субъекта выдающего агентства в доверенность" #: src/hed/libs/credential/Credential.cpp:2056 msgid "Can not create name entry CN for proxy certificate" msgstr "Невозможно создать компонент названия CN для доверенности" #: src/hed/libs/credential/Credential.cpp:2061 msgid "Can not set CN in proxy certificate" msgstr "Невозможно задать элемент CN в доверенности" #: src/hed/libs/credential/Credential.cpp:2069 msgid "Can not set issuer's subject for proxy certificate" msgstr "Невозможно задать имя выдающего агентства в доверенности" #: src/hed/libs/credential/Credential.cpp:2074 msgid "Can not set version number for proxy certificate" msgstr "Невозможно задать номер версии в доверенности" #: src/hed/libs/credential/Credential.cpp:2082 msgid "Can not set serial number for proxy certificate" msgstr "Невозможно задать серийный номер в доверенности" #: src/hed/libs/credential/Credential.cpp:2088 msgid "Can not duplicate serial number for proxy certificate" msgstr "Невозможно скопировать серийный номер доверенности" #: src/hed/libs/credential/Credential.cpp:2094 msgid "Can not set the lifetime for proxy certificate" msgstr "Невозможно задать срок годности доверенности" #: src/hed/libs/credential/Credential.cpp:2098 msgid "Can not set pubkey for proxy certificate" msgstr "Невозможно задать открытый ключ доверенности" #: src/hed/libs/credential/Credential.cpp:2114 #: src/hed/libs/credential/Credential.cpp:2744 msgid "The credential to be signed is NULL" msgstr "Параметры доступа для подписи имеют значение NULL" #: src/hed/libs/credential/Credential.cpp:2118 #: src/hed/libs/credential/Credential.cpp:2748 msgid "The credential to be signed contains no request" msgstr "Параметры доступа для подписи не содержат запроса" #: src/hed/libs/credential/Credential.cpp:2122 #: src/hed/libs/credential/Credential.cpp:2752 msgid "The BIO for output is NULL" msgstr "BIO для выхода: NULL" #: src/hed/libs/credential/Credential.cpp:2136 #: src/hed/libs/credential/Credential.cpp:2759 msgid "Error when extracting public key from request" msgstr "Ошибка при извлечении открытого ключа из запроса" #: src/hed/libs/credential/Credential.cpp:2141 #: src/hed/libs/credential/Credential.cpp:2763 msgid "Failed to verify the request" msgstr "Не удалось подтвердить запрос" #: src/hed/libs/credential/Credential.cpp:2145 msgid "Failed to add issuer's extension into proxy" msgstr "Сбой добавления расширения выдающего агентства в доверенность" #: src/hed/libs/credential/Credential.cpp:2169 msgid "Failed to find extension" msgstr "Не удалось найти расширение" #: src/hed/libs/credential/Credential.cpp:2181 msgid "Can not get the issuer's private key" msgstr "Невозможно извлечь закрытый ключ выдающего агентства" #: src/hed/libs/credential/Credential.cpp:2188 #: src/hed/libs/credential/Credential.cpp:2796 msgid "There is no digest in issuer's private key object" msgstr "В объекте закрытого ключа издателя отсутствует профиль" #: src/hed/libs/credential/Credential.cpp:2193 #: src/hed/libs/credential/Credential.cpp:2800 #, c-format msgid "%s is an unsupported digest type" msgstr "%s не является поддерживаемым типом профиля" #: src/hed/libs/credential/Credential.cpp:2204 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" "Недопустимый алгоритм подписи %s: запросы сертификата должны подписываться " "SHA1 или SHA2" #: src/hed/libs/credential/Credential.cpp:2210 msgid "Failed to sign the proxy certificate" msgstr "Не удалось подписать доверенность" #: src/hed/libs/credential/Credential.cpp:2212 msgid "Succeeded to sign the proxy certificate" msgstr "Доверенность успешно подписана" #: src/hed/libs/credential/Credential.cpp:2217 msgid "Failed to verify the signed certificate" msgstr "Сбой проверки подписанного сертификата" #: src/hed/libs/credential/Credential.cpp:2219 msgid "Succeeded to verify the signed certificate" msgstr "Подписанный сертификат успешно проверен" #: src/hed/libs/credential/Credential.cpp:2224 #: src/hed/libs/credential/Credential.cpp:2233 msgid "Output the proxy certificate" msgstr "Вывод доверенности" #: src/hed/libs/credential/Credential.cpp:2227 msgid "Can not convert signed proxy cert into PEM format" msgstr "Невозможно преобразовать подписанную доверенность в формат PEM" #: src/hed/libs/credential/Credential.cpp:2236 msgid "Can not convert signed proxy cert into DER format" msgstr "Невозможно преобразовать подписанную доверенность в формат DER" #: src/hed/libs/credential/Credential.cpp:2252 #: src/hed/libs/credential/Credential.cpp:2275 msgid "Can not create BIO for signed proxy certificate" msgstr "" "Невозможно создать неформатированный ввод/вывод BIO для подписанной " "доверенности" #: src/hed/libs/credential/Credential.cpp:2279 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "" "Невозможно открыть на запись файл для неформатированного ввода/вывода " "подписанной доверенности" #: src/hed/libs/credential/Credential.cpp:2284 msgid "Wrote signed proxy certificate into a file" msgstr "Подписанная доверенность записана в файл" #: src/hed/libs/credential/Credential.cpp:2287 msgid "Failed to write signed proxy certificate into a file" msgstr "Сбой записи подписанной доверенности в файл" #: src/hed/libs/credential/Credential.cpp:2323 #: src/hed/libs/credential/Credential.cpp:2363 #, c-format msgid "ERROR:%s" msgstr "ОШИБКА:%s" #: src/hed/libs/credential/Credential.cpp:2371 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "Ошибка SSL: %s, libs: %s, func: %s, причина: %s" #: src/hed/libs/credential/Credential.cpp:2416 #, c-format msgid "unable to load number from: %s" msgstr "невозможно прочесть номер из: %s" #: src/hed/libs/credential/Credential.cpp:2421 msgid "error converting number from bin to BIGNUM" msgstr "ошибка преобразования числа из bin в BIGNUM" #: src/hed/libs/credential/Credential.cpp:2448 msgid "file name too long" msgstr "слишком длинное имя файла" #: src/hed/libs/credential/Credential.cpp:2471 msgid "error converting serial to ASN.1 format" msgstr "ошибка преобразования серийного номера в формат ASN.1" #: src/hed/libs/credential/Credential.cpp:2504 #, c-format msgid "load serial from %s failure" msgstr "сбой чтения серийного номера из %s" #: src/hed/libs/credential/Credential.cpp:2509 msgid "add_word failure" msgstr "Сбой add_word" #: src/hed/libs/credential/Credential.cpp:2514 #, c-format msgid "save serial to %s failure" msgstr "сбой записи серийного номера в %s" #: src/hed/libs/credential/Credential.cpp:2534 msgid "Error initialising X509 store" msgstr "Ошибка при инициализации хранилища X509" #: src/hed/libs/credential/Credential.cpp:2541 msgid "Out of memory when generate random serial" msgstr "Недостаточно памяти для создания случайного серийного номера" #: src/hed/libs/credential/Credential.cpp:2553 msgid "CA certificate and CA private key do not match" msgstr "Сертификат и закрытый ключ агентства не совпадают" #: src/hed/libs/credential/Credential.cpp:2577 #, c-format msgid "Failed to load extension section: %s" msgstr "Сбой загрузки раздела расширений: %s" #: src/hed/libs/credential/Credential.cpp:2614 msgid "malloc error" msgstr "ошибка malloc" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Subject does not start with '/'" msgstr "Субъект не начинается с '/'" #: src/hed/libs/credential/Credential.cpp:2634 #: src/hed/libs/credential/Credential.cpp:2655 msgid "escape character at end of string" msgstr "символ выхода в конце строки" #: src/hed/libs/credential/Credential.cpp:2646 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "достигнут конец строки при обработке типа элемента имени субъекта #%d" #: src/hed/libs/credential/Credential.cpp:2683 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "Атрибут субъекта %s не содержит известного NID, пропускается" #: src/hed/libs/credential/Credential.cpp:2687 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "Не задана значение атрибута субъекта %s, пропускается" #: src/hed/libs/credential/Credential.cpp:2729 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" "Не удалось задать открытый ключ для объекта X509 используя открытый ключ из " "X509_REQ" #: src/hed/libs/credential/Credential.cpp:2739 msgid "The private key for signing is not initialized" msgstr "Закрытый ключ для подписи не инициализирован" #: src/hed/libs/credential/Credential.cpp:2819 #, c-format msgid "Error when loading the extension config file: %s" msgstr "Ошибка при загрузке файла настроек расширений: %s" #: src/hed/libs/credential/Credential.cpp:2823 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "Ошибка при загрузке файла настроек расширений: %s в строке: %d" #: src/hed/libs/credential/Credential.cpp:2872 msgid "Can not sign a EEC" msgstr "Невозможно подписать EEC" #: src/hed/libs/credential/Credential.cpp:2876 msgid "Output EEC certificate" msgstr "Вывод сертификата EEC" #: src/hed/libs/credential/Credential.cpp:2879 msgid "Can not convert signed EEC cert into DER format" msgstr "Невозможно преобразовать подписанный сертификат EEC в формат DER" #: src/hed/libs/credential/Credential.cpp:2893 #: src/hed/libs/credential/Credential.cpp:2912 msgid "Can not create BIO for signed EEC certificate" msgstr "" "Невозможно создать неформатированный ввод/вывод BIO для подписанного " "сертификата EEC" #: src/hed/libs/credential/Credential.cpp:2916 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "" "Невозможно открыть на запись файл для неформатированного ввода/вывода " "подписанного сертификата EEC" #: src/hed/libs/credential/Credential.cpp:2921 msgid "Wrote signed EEC certificate into a file" msgstr "Подписанный сертификат EEC записан в файл" #: src/hed/libs/credential/Credential.cpp:2924 msgid "Failed to write signed EEC certificate into a file" msgstr "Сбой записи подписанного сертификата EEC в файл" #: src/hed/libs/credential/NSSUtil.cpp:147 msgid "Error writing raw certificate" msgstr "Ошибка записи исходного сертификата" #: src/hed/libs/credential/NSSUtil.cpp:224 msgid "Failed to add RFC proxy OID" msgstr "Не удалось добавить OID доверенности RFC" #: src/hed/libs/credential/NSSUtil.cpp:227 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "Успешно добавлен OID доверенности RFC, возвращена метка %d" #: src/hed/libs/credential/NSSUtil.cpp:233 msgid "Failed to add anyLanguage OID" msgstr "Не удалось добавить anyLanguage OID " #: src/hed/libs/credential/NSSUtil.cpp:236 #: src/hed/libs/credential/NSSUtil.cpp:254 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "Успешно добавлен OID anyLanguage, возвращена метка %d" #: src/hed/libs/credential/NSSUtil.cpp:242 msgid "Failed to add inheritAll OID" msgstr "Не удалось добавить inheritAll OID" #: src/hed/libs/credential/NSSUtil.cpp:245 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "Успешно добавлен OID inheritAll, возвращена метка %d" #: src/hed/libs/credential/NSSUtil.cpp:251 msgid "Failed to add Independent OID" msgstr "Не удалось добавить Independent OID" #: src/hed/libs/credential/NSSUtil.cpp:260 msgid "Failed to add VOMS AC sequence OID" msgstr "Не удалось добавить OID последовательности VOMS AC" #: src/hed/libs/credential/NSSUtil.cpp:263 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "Успешно добавлен OID последовательности VOMS AC, возвращена метка %d " #: src/hed/libs/credential/NSSUtil.cpp:292 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "Инициализация NSS оборвалась на базе данных сертификатов: %s" #: src/hed/libs/credential/NSSUtil.cpp:303 msgid "Succeeded to initialize NSS" msgstr "NSS успешно инициализирован" #: src/hed/libs/credential/NSSUtil.cpp:325 #, c-format msgid "Failed to read attribute %x from private key." msgstr "Не удалось прочесть атрибут %x из закрытого ключа" #: src/hed/libs/credential/NSSUtil.cpp:377 msgid "Succeeded to get credential" msgstr "Параметры доступа получены" #: src/hed/libs/credential/NSSUtil.cpp:378 msgid "Failed to get credential" msgstr "Не удалось получить параметры доступа" #: src/hed/libs/credential/NSSUtil.cpp:440 msgid "p12 file is empty" msgstr "Файл сертификата p12 пуст." #: src/hed/libs/credential/NSSUtil.cpp:450 msgid "Unable to write to p12 file" msgstr "Сбой записи в файл p12" #: src/hed/libs/credential/NSSUtil.cpp:466 msgid "Failed to open pk12 file" msgstr "Сбой при открытии файла pk12" #: src/hed/libs/credential/NSSUtil.cpp:501 msgid "Failed to allocate p12 context" msgstr "Не удалось зарезервировать контекст p12" #: src/hed/libs/credential/NSSUtil.cpp:1211 msgid "Failed to find issuer certificate for proxy certificate" msgstr "Не удалось обнаружить агентство, выдавшее сертификат доверенности" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "Сбой проверки подлинности для ячейки PKCS11 %s" #: src/hed/libs/credential/NSSUtil.cpp:1368 #, c-format msgid "Failed to find certificates by nickname: %s" msgstr "Не удалось обнаружить сертификат по краткому имени: %s" #: src/hed/libs/credential/NSSUtil.cpp:1373 #, c-format msgid "No user certificate by nickname %s found" msgstr "Не удалось обнаружить сертификат пользователя с кратким именем %s" #: src/hed/libs/credential/NSSUtil.cpp:1386 #: src/hed/libs/credential/NSSUtil.cpp:1422 msgid "Certificate does not have a slot" msgstr "У сертификата нет ячейки" #: src/hed/libs/credential/NSSUtil.cpp:1392 msgid "Failed to create export context" msgstr "Не удалось создать контекст для экспорта" #: src/hed/libs/credential/NSSUtil.cpp:1407 msgid "PKCS12 output password not provided" msgstr "Не задан пароль для нового сертификата PKCS12" #: src/hed/libs/credential/NSSUtil.cpp:1414 msgid "PKCS12 add password integrity failed" msgstr "Не удалось задать способ проверки целостности PKCS12 и пароля" #: src/hed/libs/credential/NSSUtil.cpp:1435 msgid "Failed to create key or certificate safe" msgstr "" "Не удалось создать безопасное хранилище для закрытого ключа или сертификата" #: src/hed/libs/credential/NSSUtil.cpp:1451 msgid "Failed to add certificate and key" msgstr "Не удалось добавить закрытый ключ и сертификат" #: src/hed/libs/credential/NSSUtil.cpp:1460 #, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "Не удалось инициализировать файл PKCS12: %s" #: src/hed/libs/credential/NSSUtil.cpp:1465 msgid "Failed to encode PKCS12" msgstr "Не удалось шифрование в формат PKCS12" #: src/hed/libs/credential/NSSUtil.cpp:1468 msgid "Succeeded to export PKCS12" msgstr "Удалось извлечь сертификат в формате PKCS12" #: src/hed/libs/credential/NSSUtil.cpp:1496 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" "Не найден сертификат с именем %s, сертификат мог быть удалён при создании CSR" #: src/hed/libs/credential/NSSUtil.cpp:1502 msgid "Failed to delete certificate" msgstr "Не удалось уничтожить сертификат" #: src/hed/libs/credential/NSSUtil.cpp:1516 msgid "The name of the private key to delete is empty" msgstr "Имя закрытого ключа для уничтожения пусто" #: src/hed/libs/credential/NSSUtil.cpp:1521 #: src/hed/libs/credential/NSSUtil.cpp:1605 #, c-format msgid "Failed to authenticate to token %s." msgstr "Не удалось аутентифицироваться к маркёру %s." #: src/hed/libs/credential/NSSUtil.cpp:1528 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "Закрытый ключ с именем %s отсутствует в базе данных NSS" #: src/hed/libs/credential/NSSUtil.cpp:1561 msgid "Failed to delete private key and certificate" msgstr "Не удалось уничтожить закрытый ключ и сертификат" #: src/hed/libs/credential/NSSUtil.cpp:1571 msgid "Failed to delete private key" msgstr "Не удалось уничтожить закрытый ключ" #: src/hed/libs/credential/NSSUtil.cpp:1582 #, c-format msgid "Can not find key with name: %s" msgstr "Не удалось найти закрытый ключ по имени: %s" #: src/hed/libs/credential/NSSUtil.cpp:1616 #, c-format msgid "Failed to delete private key that attaches to certificate: %s" msgstr "Сбой уничтожения закрытого ключа, прикрепляемого к сертификату: %s" #: src/hed/libs/credential/NSSUtil.cpp:1637 msgid "Can not read PEM private key: probably bad password" msgstr "" "Невозможно прочесть закрытый ключ PEM: возможно, введён неверный пароль" #: src/hed/libs/credential/NSSUtil.cpp:1639 msgid "Can not read PEM private key: failed to decrypt" msgstr "Сбой при чтении файла личного ключа PEM: не удалось расшифровать" #: src/hed/libs/credential/NSSUtil.cpp:1641 #: src/hed/libs/credential/NSSUtil.cpp:1643 msgid "Can not read PEM private key: failed to obtain password" msgstr "Сбой при чтении файла личного ключа PEM: не был введён пароль" #: src/hed/libs/credential/NSSUtil.cpp:1644 msgid "Can not read PEM private key" msgstr "Не удалось прочесть закрытый ключ PEM" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "Не удалось преобразовать EVP_PKEY в PKCS8" #: src/hed/libs/credential/NSSUtil.cpp:1688 msgid "Failed to load private key" msgstr "Не удалось загрузить закрытый ключ" #: src/hed/libs/credential/NSSUtil.cpp:1689 msgid "Succeeded to load PrivateKeyInfo" msgstr "Успешно подгружен PrivateKeyInfo" #: src/hed/libs/credential/NSSUtil.cpp:1692 msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "Сбой преобразования PrivateKeyInfo в EVP_PKEY" #: src/hed/libs/credential/NSSUtil.cpp:1693 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "Успешное преобразование PrivateKeyInfo в EVP_PKEY" #: src/hed/libs/credential/NSSUtil.cpp:1730 msgid "Failed to import private key" msgstr "Не удалось получить закрытый ключ" #: src/hed/libs/credential/NSSUtil.cpp:1733 msgid "Succeeded to import private key" msgstr "Закрытый ключ успешно получен" #: src/hed/libs/credential/NSSUtil.cpp:1746 #: src/hed/libs/credential/NSSUtil.cpp:1788 #: src/hed/libs/credential/NSSUtil.cpp:2920 msgid "Failed to authenticate to key database" msgstr "Сбой проверки подлинности на базе данных ключей" #: src/hed/libs/credential/NSSUtil.cpp:1755 msgid "Succeeded to generate public/private key pair" msgstr "Успешное создание пары открытого/закрытого ключей" #: src/hed/libs/credential/NSSUtil.cpp:1757 msgid "Failed to generate public/private key pair" msgstr "Сбой создания пары открытого/закрытого ключей" #: src/hed/libs/credential/NSSUtil.cpp:1762 msgid "Failed to export private key" msgstr "Не удалось сохранить закрытый ключ" #: src/hed/libs/credential/NSSUtil.cpp:1829 msgid "Failed to create subject name" msgstr "Не удалось сформировать имя субъекта" #: src/hed/libs/credential/NSSUtil.cpp:1845 msgid "Failed to create certificate request" msgstr "Не удалось создать запрос сертификата" #: src/hed/libs/credential/NSSUtil.cpp:1858 msgid "Failed to call PORT_NewArena" msgstr "Не удалось вызвать PORT_NewArena" #: src/hed/libs/credential/NSSUtil.cpp:1866 msgid "Failed to encode the certificate request with DER format" msgstr "Сбой шифрования запроса сертификата в формате DER" #: src/hed/libs/credential/NSSUtil.cpp:1873 msgid "Unknown key or hash type" msgstr "Неизвестный ключ или тип хеширования" #: src/hed/libs/credential/NSSUtil.cpp:1879 msgid "Failed to sign the certificate request" msgstr "Не удалось подписать запрос сертификата" #: src/hed/libs/credential/NSSUtil.cpp:1895 msgid "Failed to output the certificate request as ASCII format" msgstr "Сбой вывода запроса сертификата в формате ASCII" #: src/hed/libs/credential/NSSUtil.cpp:1904 msgid "Failed to output the certificate request as DER format" msgstr "Сбой вывода запроса сертификата в формате DER" #: src/hed/libs/credential/NSSUtil.cpp:1913 #, c-format msgid "Succeeded to output the certificate request into %s" msgstr "Успешный вывод запроса сертификата в %s" #: src/hed/libs/credential/NSSUtil.cpp:1952 #: src/hed/libs/credential/NSSUtil.cpp:1989 msgid "Failed to read data from input file" msgstr "Невозможно прочитать данные из входного файла" #: src/hed/libs/credential/NSSUtil.cpp:1968 msgid "Input is without trailer\n" msgstr "Входные данные не содержат строки окончания\n" #: src/hed/libs/credential/NSSUtil.cpp:1979 msgid "Failed to convert ASCII to DER" msgstr "Не удалось преобразовать ASCII в DER" #: src/hed/libs/credential/NSSUtil.cpp:2030 msgid "Certificate request is invalid" msgstr "Недопустимый запрос сертификата" #: src/hed/libs/credential/NSSUtil.cpp:2252 #, c-format msgid "The policy language: %s is not supported" msgstr "Язык политик %s не поддерживается" #: src/hed/libs/credential/NSSUtil.cpp:2260 #: src/hed/libs/credential/NSSUtil.cpp:2285 #: src/hed/libs/credential/NSSUtil.cpp:2308 #: src/hed/libs/credential/NSSUtil.cpp:2330 msgid "Failed to new arena" msgstr "Сбой выделения новой области" #: src/hed/libs/credential/NSSUtil.cpp:2269 #: src/hed/libs/credential/NSSUtil.cpp:2294 msgid "Failed to create path length" msgstr "Сбой создания длины пути" #: src/hed/libs/credential/NSSUtil.cpp:2272 #: src/hed/libs/credential/NSSUtil.cpp:2297 #: src/hed/libs/credential/NSSUtil.cpp:2317 #: src/hed/libs/credential/NSSUtil.cpp:2339 msgid "Failed to create policy language" msgstr "Сбой создания языка политик" #: src/hed/libs/credential/NSSUtil.cpp:2738 #, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "Сбой обработки запроса сертификата из файла CSR %s" #: src/hed/libs/credential/NSSUtil.cpp:2745 #, c-format msgid "Can not find certificate with name %s" msgstr "Не удалось найти открытый ключ по имени: %s" #: src/hed/libs/credential/NSSUtil.cpp:2776 #, c-format msgid "Proxy subject: %s" msgstr "Имя субъекта доверенности: %s" #: src/hed/libs/credential/NSSUtil.cpp:2795 msgid "Failed to start certificate extension" msgstr "Сбой начала создания расширения сертификата" #: src/hed/libs/credential/NSSUtil.cpp:2800 msgid "Failed to add key usage extension" msgstr "Сбой добавления расширения об использовании ключа" #: src/hed/libs/credential/NSSUtil.cpp:2805 msgid "Failed to add proxy certificate information extension" msgstr "Сбой добавления расширения об информации сертификата доверенности" #: src/hed/libs/credential/NSSUtil.cpp:2809 msgid "Failed to add voms AC extension" msgstr "Сбой добавления расширения VOMS AC" #: src/hed/libs/credential/NSSUtil.cpp:2829 msgid "Failed to retrieve private key for issuer" msgstr "Сбой извлечения файла закрытого ключа издателя" #: src/hed/libs/credential/NSSUtil.cpp:2836 msgid "Unknown key or hash type of issuer" msgstr "Неизвестный ключ или тип хеширования издателя сертификата" #: src/hed/libs/credential/NSSUtil.cpp:2842 msgid "Failed to set signature algorithm ID" msgstr "Сбой задания ID алгоритма подписи" #: src/hed/libs/credential/NSSUtil.cpp:2854 msgid "Failed to encode certificate" msgstr "Ошибка шифрования сертификата" #: src/hed/libs/credential/NSSUtil.cpp:2860 msgid "Failed to allocate item for certificate data" msgstr "Не удалось зарезервировать элемент для данных о сертификате" #: src/hed/libs/credential/NSSUtil.cpp:2866 msgid "Failed to sign encoded certificate data" msgstr "Сбой подписи данных зашифрованного сертификата" #: src/hed/libs/credential/NSSUtil.cpp:2875 #, c-format msgid "Failed to open file %s" msgstr "Не удалось открыть файл %s" #: src/hed/libs/credential/NSSUtil.cpp:2886 #, c-format msgid "Succeeded to output certificate to %s" msgstr "Успешный вывод сертификата в %s" #: src/hed/libs/credential/NSSUtil.cpp:2927 #, c-format msgid "Failed to open input certificate file %s" msgstr "Сбой открытия файла входного сертификата %s" #: src/hed/libs/credential/NSSUtil.cpp:2945 msgid "Failed to read input certificate file" msgstr "Сбой чтения файла входного сертификата" #: src/hed/libs/credential/NSSUtil.cpp:2950 msgid "Failed to get certificate from certificate file" msgstr "Сбой извлечения сертификата из файла" #: src/hed/libs/credential/NSSUtil.cpp:2957 msgid "Failed to allocate certificate trust" msgstr "Сбой резервирования доверительных отношений сертификата" #: src/hed/libs/credential/NSSUtil.cpp:2962 msgid "Failed to decode trust string" msgstr "Сбой расшифровки описания доверительных отношений" #: src/hed/libs/credential/NSSUtil.cpp:2971 #: src/hed/libs/credential/NSSUtil.cpp:2988 #, c-format msgid "Failed to authenticate to token %s" msgstr "Не удалось аутентифицироваться к маркёру %s" #: src/hed/libs/credential/NSSUtil.cpp:2976 #: src/hed/libs/credential/NSSUtil.cpp:2993 msgid "Failed to add certificate to token or database" msgstr "Сбой добавления сертификата к маркёру или базе данных" #: src/hed/libs/credential/NSSUtil.cpp:2979 #: src/hed/libs/credential/NSSUtil.cpp:2982 msgid "Succeeded to import certificate" msgstr "Успешное импортирование сертификата " #: src/hed/libs/credential/NSSUtil.cpp:2996 #: src/hed/libs/credential/NSSUtil.cpp:2999 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "Успешная смена доверительных отношений на: %s" #: src/hed/libs/credential/NSSUtil.cpp:3026 #, c-format msgid "Failed to import private key from file: %s" msgstr "Сбой импортирования закрытого ключа из файла: %s" #: src/hed/libs/credential/NSSUtil.cpp:3028 #, c-format msgid "Failed to import certificate from file: %s" msgstr "Сбой импортирования сертификата из файла: %s" #: src/hed/libs/credential/VOMSConfig.cpp:142 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" "ERROR: строка настройки VOMS содержит избыточное число элементов. Ожидается " "5 или 6. Строка: %s" #: src/hed/libs/credential/VOMSConfig.cpp:158 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" "ERROR: каталог содержит слишком много уровней для сканирования настроек " "VOMS. Максимально допустимое число уровней: %i." #: src/hed/libs/credential/VOMSConfig.cpp:176 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "ERROR: сбой чтения файла %s при сканировании настроек VOMS." #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" "ERROR: файл настроек VOMS %s содержит слишком много строк. Максимально " "допустимое количество: %i." #: src/hed/libs/credential/VOMSConfig.cpp:188 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" "ERROR: файл настроек VOMS %s содержит слишком длинную строку. Максимально " "допустимая длина: %i знаков." #: src/hed/libs/credential/VOMSUtil.cpp:185 #, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "Сбой создания объекта OpenSSL %s %s - %u %s" #: src/hed/libs/credential/VOMSUtil.cpp:193 #, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "Сбой получения идентификатора OpenSSL для %s" #: src/hed/libs/credential/VOMSUtil.cpp:346 #, c-format msgid "VOMS: create FQAN: %s" msgstr "VOMS: составление FQAN: %s" #: src/hed/libs/credential/VOMSUtil.cpp:384 #, c-format msgid "VOMS: create attribute: %s" msgstr "VOMS: созадние атрибута: %s" #: src/hed/libs/credential/VOMSUtil.cpp:670 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "VOMS: Не удалось зарезервировать память для разбора AC" #: src/hed/libs/credential/VOMSUtil.cpp:678 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "" "VOMS: Не удалось зарезервировать память для хранения последовательности AC" #: src/hed/libs/credential/VOMSUtil.cpp:704 msgid "VOMS: Can not parse AC" msgstr "VOMS: Не удалось обработать AC" #: src/hed/libs/credential/VOMSUtil.cpp:734 msgid "VOMS: CA directory or CA file must be provided" msgstr "VOMS: Необходимо задать каталог или файл сертификационного агентства" #: src/hed/libs/credential/VOMSUtil.cpp:798 msgid "VOMS: failed to verify AC signature" msgstr "VOMS: не удалось подтвердить подпись сертификата атрибута" #: src/hed/libs/credential/VOMSUtil.cpp:867 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "VOMS: подтверждается цепочка сертификатов: %s " #: src/hed/libs/credential/VOMSUtil.cpp:875 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" "VOMS: Отличительное имя (DN) в сертификате %s не соответствует таковому в " "доверяемом списке: %s" #: src/hed/libs/credential/VOMSUtil.cpp:881 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" "VOMS: Отличительный признак агентства, выдавшего сертификат %s, не " "соответствует таковому в доверяемом списке: %s" #: src/hed/libs/credential/VOMSUtil.cpp:916 #, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "VOMS: Файл lsc %s не существует" #: src/hed/libs/credential/VOMSUtil.cpp:922 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "VOMS: Файл lsc %s не может быть открыт" #: src/hed/libs/credential/VOMSUtil.cpp:970 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" "VOMS: отсутствуют ограничения по отличительным признакам доверяемых VOMS, " "цепочка сертификатов в сертификате атрибута (AC) не будет проверена." #: src/hed/libs/credential/VOMSUtil.cpp:1003 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" "VOMS: невозможно найти цепочку сертификатов, соответствующую доверяемым " "отличительным признакам VOMS" #: src/hed/libs/credential/VOMSUtil.cpp:1023 msgid "VOMS: AC signature verification failed" msgstr "VOMS: сбой подтверждения подписи сертификата атрибута" #: src/hed/libs/credential/VOMSUtil.cpp:1032 msgid "VOMS: unable to verify certificate chain" msgstr "VOMS: невозможно подтвердить цепочку сертификатов" #: src/hed/libs/credential/VOMSUtil.cpp:1038 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "" "VOMS: невозможно удостоверить лицо, выдавшее сертификат атрибута для " "виртуальной организации %s" #: src/hed/libs/credential/VOMSUtil.cpp:1061 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "VOMS: директория, содержащая сертификаты доверяемых служб: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1087 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "" "VOMS: невозможно найти сертификат лица, выдавшего сертификат атрибута для " "виртуальной организации %s" #: src/hed/libs/credential/VOMSUtil.cpp:1109 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "VOMS: Невозможно найти AC_ATTR типа IETFATTR" #: src/hed/libs/credential/VOMSUtil.cpp:1116 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "VOMS: использование множественных атрибутов IETFATTR не поддерживается" #: src/hed/libs/credential/VOMSUtil.cpp:1126 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" "VOMS: использование множественных атрибутов policyAuthority не поддерживается" #: src/hed/libs/credential/VOMSUtil.cpp:1142 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "VOMS: недопустимый формат атрибута policyAuthority - ожидается URI" #: src/hed/libs/credential/VOMSUtil.cpp:1151 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "VOMS: недопустимый формат IETFATTRVAL - ожидается OCTET STRING" #: src/hed/libs/credential/VOMSUtil.cpp:1228 msgid "VOMS: the grantor attribute is empty" msgstr "VOMS: атрибут grantor пуст" #: src/hed/libs/credential/VOMSUtil.cpp:1246 msgid "VOMS: the attribute name is empty" msgstr "VOMS: отсутствует имя атрибута" #: src/hed/libs/credential/VOMSUtil.cpp:1252 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "VOMS: отсутсвует значение атрибута для %s" #: src/hed/libs/credential/VOMSUtil.cpp:1257 msgid "VOMS: the attribute qualifier is empty" msgstr "VOMS: атрибут qualifier пуст" #: src/hed/libs/credential/VOMSUtil.cpp:1289 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" "VOMS: должны присутствовать оба расширения сертификата, idcenoRevAvail и " "authorityKeyIdentifier" #: src/hed/libs/credential/VOMSUtil.cpp:1323 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" "VOMS: FQDN узла %s не соответствует ни одному из назначений в сертификате " "атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1328 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" "VOMS: единственным поддерживаемым критическим расширением атрибута " "сертификата (AC) является idceTargets" #: src/hed/libs/credential/VOMSUtil.cpp:1343 msgid "VOMS: failed to parse attributes from AC" msgstr "VOMS: сбой при разборе атрибутов в сертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1387 msgid "VOMS: authorityKey is wrong" msgstr "VOMS: неверный authorityKey" #: src/hed/libs/credential/VOMSUtil.cpp:1415 msgid "VOMS: missing AC parts" msgstr "VOMS: отсутствуют части AC" #: src/hed/libs/credential/VOMSUtil.cpp:1432 msgid "VOMS: unsupported time format format in AC - expecting GENERALIZED TIME" msgstr "" "VOMS: неверный формат времени в сертификате атрибута (AC) - ожидается " "GENERALIZED TIME" #: src/hed/libs/credential/VOMSUtil.cpp:1438 msgid "VOMS: AC is not yet valid" msgstr "VOMS: сертификат атрибута ещё не действителен" #: src/hed/libs/credential/VOMSUtil.cpp:1445 msgid "VOMS: AC has expired" msgstr "VOMS: срок годности AC вышел" #: src/hed/libs/credential/VOMSUtil.cpp:1460 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" "VOMS: Сертификат атрибута (AC) неполон - отсутствует информация об атрибутах " "Serial и/или Issuer" #: src/hed/libs/credential/VOMSUtil.cpp:1465 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "VOMS: серийный номер владельца: %lx" #: src/hed/libs/credential/VOMSUtil.cpp:1466 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "VOMS: серийный номер в сертификате атрибута (AC): %lx" #: src/hed/libs/credential/VOMSUtil.cpp:1469 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" "VOMS: серийный номер владельца %lx не совпадает с таковым в сертификате " "атрибута (AC) %lx; сертификат, используемый для создания доверенности VOMS, " "может быть доверенностью с серийным номером, отличным от изначального " "сертификата" #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "VOMS: the holder information in AC is wrong" msgstr "VOMS: неверная информация о владельце в сертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1500 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "VOMS: DN владельца в сертификате атрибута (AC): %s" #: src/hed/libs/credential/VOMSUtil.cpp:1501 #, c-format msgid "VOMS: DN of holder: %s" msgstr "VOMS: DN владельца: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1502 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "VOMS: DN эмитента: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1509 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" "VOMS: имя владельца в сертификате атрибута (AC) не имеет отношения к " "отличительному имени в сертификате владельца" #: src/hed/libs/credential/VOMSUtil.cpp:1521 #: src/hed/libs/credential/VOMSUtil.cpp:1528 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" "VOMS: атрибут issuerUID в сертификате владельца не совпадает с таковым в " "сертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1541 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" "VOMS: имя агентства, выдавшего сертификат, не совпадает с таковым в " "сертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1551 msgid "VOMS: the issuer information in AC is wrong" msgstr "" "VOMS: неверная информация об агентстве, выдавшем сертификат, в сертификате " "атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1559 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" "VOMS: имя агентства, выдавшего сертификат - %s - не совпадает с таковым в " "сертификате атрибута (AC) - %s" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" "VOMS: слишком длинный серийный номер AC INFO - ожидается не более 20-и " "октетов" #: src/hed/libs/credential/VOMSUtil.cpp:1597 #: src/hed/libs/credential/VOMSUtil.cpp:1605 #: src/hed/libs/credential/VOMSUtil.cpp:1613 #: src/hed/libs/credential/VOMSUtil.cpp:1621 #: src/hed/libs/credential/VOMSUtil.cpp:1644 msgid "VOMS: unable to extract VO name from AC" msgstr "" "VOMS: невозможно извлечь название виртуальной организации из сертификата " "атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1635 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "" "VOMS: невозможно определить название узла сертификата атрибута (AC) из " "названия виртуальной организации: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1654 msgid "VOMS: can not verify the signature of the AC" msgstr "VOMS: не удалось подтвердить подпись сертификата атрибута" #: src/hed/libs/credential/VOMSUtil.cpp:1660 msgid "VOMS: problems while parsing information in AC" msgstr "VOMS: проблемы при разборке информации в AC" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:128 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "Строка %d.%d атрибутов выдала: %s" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "VOMS" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, c-format msgid "MyProxy failure: %s" msgstr "Сбой MyProxy: %s" #: src/hed/libs/crypto/OpenSSL.cpp:68 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "Ошибка SSL: %d - %s:%s:%s" #: src/hed/libs/crypto/OpenSSL.cpp:81 msgid "SSL locks not initialized" msgstr "Блокировка SSL не инициализирована" #: src/hed/libs/crypto/OpenSSL.cpp:85 #, c-format msgid "wrong SSL lock requested: %i of %i: %i - %s" msgstr "Запрошена неверная блокировка SSL: %i из %i: %i - %s" #: src/hed/libs/crypto/OpenSSL.cpp:112 msgid "Failed to lock arccrypto library in memory" msgstr "Невозможно заблокировать библиотеку arccrypto в памяти" #: src/hed/libs/crypto/OpenSSL.cpp:117 src/hed/libs/crypto/OpenSSL.cpp:128 msgid "Failed to initialize OpenSSL library" msgstr "Ошибка инициализации библиотеки OpenSSL" #: src/hed/libs/crypto/OpenSSL.cpp:150 msgid "Number of OpenSSL locks changed - reinitializing" msgstr "Изменилось число блокировок OpenSSL - повторная инициализация" #: src/hed/libs/data/DataMover.cpp:111 msgid "No locations found - probably no more physical instances" msgstr "Не найдено расположений - возможно, копий больше нет" #: src/hed/libs/data/DataMover.cpp:117 src/hed/libs/data/FileCache.cpp:673 #: src/libs/data-staging/Processor.cpp:458 #: src/libs/data-staging/Processor.cpp:472 #, c-format msgid "Removing %s" msgstr "Удаляется %s" #: src/hed/libs/data/DataMover.cpp:130 msgid "This instance was already deleted" msgstr "Эта копия уже удалена" #: src/hed/libs/data/DataMover.cpp:136 msgid "Failed to delete physical file" msgstr "Сбой при удалении физического файла" #: src/hed/libs/data/DataMover.cpp:147 #, c-format msgid "Removing metadata in %s" msgstr "Удаляются метаданные в %s" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete meta-information" msgstr "Сбой при удалении мета-информации" #: src/hed/libs/data/DataMover.cpp:165 msgid "Failed to remove all physical instances" msgstr "Сбой удаления всех фактических копий" #: src/hed/libs/data/DataMover.cpp:169 #, c-format msgid "Removing logical file from metadata %s" msgstr "Удаляется логический файл из метаданных %s" #: src/hed/libs/data/DataMover.cpp:172 msgid "Failed to delete logical file" msgstr "Сбой при удалении логического файла" #: src/hed/libs/data/DataMover.cpp:179 msgid "Failed to remove instance" msgstr "Не удалось удалить копию" #: src/hed/libs/data/DataMover.cpp:228 msgid "DataMover::Transfer : starting new thread" msgstr "DataMover::Transfer : запуск нового потока" #: src/hed/libs/data/DataMover.cpp:256 #, c-format msgid "Transfer from %s to %s" msgstr "Передача из %s в %s" #: src/hed/libs/data/DataMover.cpp:258 msgid "Not valid source" msgstr "Недействительный источник" #: src/hed/libs/data/DataMover.cpp:263 msgid "Not valid destination" msgstr "Цель недействительна" #: src/hed/libs/data/DataMover.cpp:283 #: src/services/cache_service/CacheService.cpp:294 #, c-format msgid "Couldn't handle certificate: %s" msgstr "Не удалось использовать сертификат: %s" #: src/hed/libs/data/DataMover.cpp:293 src/hed/libs/data/DataMover.cpp:591 #: src/libs/data-staging/Processor.cpp:137 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "Файл %s присутствует в кэше (%s) - проверяется допуск" #: src/hed/libs/data/DataMover.cpp:297 src/hed/libs/data/DataMover.cpp:610 #: src/hed/libs/data/DataMover.cpp:672 src/libs/data-staging/Processor.cpp:156 msgid "Permission checking passed" msgstr "Проверка допуска пройдена" #: src/hed/libs/data/DataMover.cpp:298 src/hed/libs/data/DataMover.cpp:630 #: src/hed/libs/data/DataMover.cpp:1136 msgid "Linking/copying cached file" msgstr "Подцепление/копирование файла из кэша" #: src/hed/libs/data/DataMover.cpp:323 #, c-format msgid "No locations for source found: %s" msgstr "Не найдено расположений для файла источника: %s" #: src/hed/libs/data/DataMover.cpp:327 #, c-format msgid "Failed to resolve source: %s" msgstr "Не удалось определить источник: %s" #: src/hed/libs/data/DataMover.cpp:339 src/hed/libs/data/DataMover.cpp:407 #, c-format msgid "No locations for destination found: %s" msgstr "Не найдено физических адресов для назначения: %s" #: src/hed/libs/data/DataMover.cpp:344 src/hed/libs/data/DataMover.cpp:411 #, c-format msgid "Failed to resolve destination: %s" msgstr "Не удалось определить назначение: %s" #: src/hed/libs/data/DataMover.cpp:359 #, c-format msgid "No locations for destination different from source found: %s" msgstr "Не найдено расположений для назначения, отличающихся от источника: %s" #: src/hed/libs/data/DataMover.cpp:380 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "DataMover::Transfer: попытка стереть/перезаписать назначение: %s" #: src/hed/libs/data/DataMover.cpp:391 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "Сбой удаления %s, всё равно попытаемся скопировать " #: src/hed/libs/data/DataMover.cpp:394 #, c-format msgid "Failed to delete %s" msgstr "Сбой при удалении %s" #: src/hed/libs/data/DataMover.cpp:421 #, c-format msgid "Deleted but still have locations at %s" msgstr "Удалён, но остались копии в %s" #: src/hed/libs/data/DataMover.cpp:433 msgid "DataMover: cycle" msgstr "DataMover: цикл" #: src/hed/libs/data/DataMover.cpp:435 msgid "DataMover: no retries requested - exit" msgstr "DataMover: не запрошено повторных попыток, выход" #: src/hed/libs/data/DataMover.cpp:440 msgid "DataMover: source out of tries - exit" msgstr "DataMover: закончились попытки поиска источника - завершение" #: src/hed/libs/data/DataMover.cpp:442 msgid "DataMover: destination out of tries - exit" msgstr "DataMover: закончились попытки поиска назначений - завершение" #: src/hed/libs/data/DataMover.cpp:450 #, c-format msgid "Real transfer from %s to %s" msgstr "Фактическая передача из %s в %s" #: src/hed/libs/data/DataMover.cpp:477 #, c-format msgid "Creating buffer: %lli x %i" msgstr "Создаётся буфер: %lli x %i" #: src/hed/libs/data/DataMover.cpp:493 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "DataMove::Transfer: контрольная сумма для %s не будет вычислена" #: src/hed/libs/data/DataMover.cpp:498 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "DataMove::Transfer: используется заданная контрольная сумма %s:%s" #: src/hed/libs/data/DataMover.cpp:522 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "DataMove::Transfer: будет вычислена контрольная сумма для %s" #: src/hed/libs/data/DataMover.cpp:527 msgid "Buffer creation failed !" msgstr "Невозможно создать буфер!" #: src/hed/libs/data/DataMover.cpp:550 #, c-format msgid "URL is mapped to: %s" msgstr "URL поставлен в соответствие к: %s" #: src/hed/libs/data/DataMover.cpp:580 src/hed/libs/data/DataMover.cpp:639 #: src/libs/data-staging/Processor.cpp:91 msgid "Cached file is locked - should retry" msgstr "Файл в кэше заблокирован - попытаемся заново" #: src/hed/libs/data/DataMover.cpp:585 src/libs/data-staging/Processor.cpp:110 msgid "Failed to initiate cache" msgstr "Сбой при инициализации кэша" #: src/hed/libs/data/DataMover.cpp:602 #: src/services/cache_service/CacheService.cpp:366 #, c-format msgid "Permission checking failed: %s" msgstr "Проверка прав доступа не удалась: %s" #: src/hed/libs/data/DataMover.cpp:604 src/hed/libs/data/DataMover.cpp:664 #: src/hed/libs/data/DataMover.cpp:686 src/hed/libs/data/DataMover.cpp:697 msgid "source.next_location" msgstr "source.next_location" #: src/hed/libs/data/DataMover.cpp:618 src/libs/data-staging/Processor.cpp:161 #, c-format msgid "Source modification date: %s" msgstr "Дата изменения источника: %s" #: src/hed/libs/data/DataMover.cpp:619 src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Cache creation date: %s" msgstr "Дата создания кэша: %s" #: src/hed/libs/data/DataMover.cpp:625 src/libs/data-staging/Processor.cpp:167 msgid "Cached file is outdated, will re-download" msgstr "Файл в кэше устарел, будет загружен заново" #: src/hed/libs/data/DataMover.cpp:629 src/libs/data-staging/Processor.cpp:173 msgid "Cached copy is still valid" msgstr "Копия в кэше ещё действительна" #: src/hed/libs/data/DataMover.cpp:657 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" "URL сопоставлен локальному файлу - проверка прав доступа к исходному URL" #: src/hed/libs/data/DataMover.cpp:661 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "Сбой проверки прав доступа к исходному URL: %s" #: src/hed/libs/data/DataMover.cpp:674 msgid "Linking local file" msgstr "Подцепляется локальный файл" #: src/hed/libs/data/DataMover.cpp:694 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "Сбой при создании символической ссылки %s на %s : %s" #: src/hed/libs/data/DataMover.cpp:703 #, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "Невозможно заменить владельца символьной ссылки %s на %i" #: src/hed/libs/data/DataMover.cpp:715 #, c-format msgid "cache file: %s" msgstr "кэш-файл: %s" #: src/hed/libs/data/DataMover.cpp:741 #, c-format msgid "Failed to stat source %s" msgstr "Сбой проверки статуса источника %s" #: src/hed/libs/data/DataMover.cpp:743 src/hed/libs/data/DataMover.cpp:758 #: src/hed/libs/data/DataMover.cpp:795 src/hed/libs/data/DataMover.cpp:814 #: src/hed/libs/data/DataMover.cpp:982 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1024 src/hed/libs/data/DataMover.cpp:1101 msgid "(Re)Trying next source" msgstr "Следующий источник" #: src/hed/libs/data/DataMover.cpp:756 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "Мета-информация источника и адрес не соответствуют друг другу для %s" #: src/hed/libs/data/DataMover.cpp:770 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" "Копия %s доступна с большой задержкой, но всё равно будет использоваться в " "связи с отсутствием других источников" #: src/hed/libs/data/DataMover.cpp:774 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "Копия %s доступна с большой задержкой, пробуется другой источник" #: src/hed/libs/data/DataMover.cpp:789 #, c-format msgid "Failed to prepare source: %s" msgstr "Не удалось подготовить источник: %s" #: src/hed/libs/data/DataMover.cpp:805 #, c-format msgid "Failed to start reading from source: %s" msgstr "Не удалось начать чтение из источника: %s" #: src/hed/libs/data/DataMover.cpp:826 msgid "Metadata of source and destination are different" msgstr "Метаданные источника и назначения не совпадают" #: src/hed/libs/data/DataMover.cpp:847 #, c-format msgid "Failed to preregister destination: %s" msgstr "Не удалось предварительно зарегистрировать назначение: %s" #: src/hed/libs/data/DataMover.cpp:852 src/hed/libs/data/DataMover.cpp:1125 msgid "destination.next_location" msgstr "destination.next_location" #: src/hed/libs/data/DataMover.cpp:866 #, c-format msgid "Failed to prepare destination: %s" msgstr "Не удалось подготовить назначение: %s" #: src/hed/libs/data/DataMover.cpp:873 src/hed/libs/data/DataMover.cpp:897 #: src/hed/libs/data/DataMover.cpp:1122 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" "Сбой удаления предварительной записи LFN. Возможно, необходимо удалить её " "вручную: %s" #: src/hed/libs/data/DataMover.cpp:877 src/hed/libs/data/DataMover.cpp:900 #: src/hed/libs/data/DataMover.cpp:991 src/hed/libs/data/DataMover.cpp:1007 #: src/hed/libs/data/DataMover.cpp:1030 src/hed/libs/data/DataMover.cpp:1077 msgid "(Re)Trying next destination" msgstr "Следующее назначение" #: src/hed/libs/data/DataMover.cpp:889 #, c-format msgid "Failed to start writing to destination: %s" msgstr "Сбой начала записи в назначение: %s" #: src/hed/libs/data/DataMover.cpp:913 msgid "Failed to start writing to cache" msgstr "Сбой начала записи в кэш" #: src/hed/libs/data/DataMover.cpp:921 src/hed/libs/data/DataMover.cpp:969 #: src/hed/libs/data/DataMover.cpp:1148 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" "Сбой удаления предварительной записи LFN. Возможно, необходимо удалить её " "вручную" #: src/hed/libs/data/DataMover.cpp:929 msgid "Waiting for buffer" msgstr "Ожидание буфера" #: src/hed/libs/data/DataMover.cpp:936 #, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "" "Сбой обновления метки времени файла блокировки кэша %s для файла %s: %s" #: src/hed/libs/data/DataMover.cpp:941 #, c-format msgid "buffer: read EOF : %s" msgstr "буфер: чтение конца файла : %s" #: src/hed/libs/data/DataMover.cpp:942 #, c-format msgid "buffer: write EOF: %s" msgstr "буфер: запись конца файла: %s" #: src/hed/libs/data/DataMover.cpp:943 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "буфер: ошибка: %s, чтение: %s, запись: %s" #: src/hed/libs/data/DataMover.cpp:944 msgid "Closing read channel" msgstr "Закрывается канал чтения" #: src/hed/libs/data/DataMover.cpp:950 msgid "Closing write channel" msgstr "Закрывается канал передачи" #: src/hed/libs/data/DataMover.cpp:958 msgid "Failed to complete writing to destination" msgstr "Сбой завершения записи в назначение" #: src/hed/libs/data/DataMover.cpp:974 msgid "Transfer cancelled successfully" msgstr "Передача файлов успешно отменена" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Cause of failure unclear - choosing randomly" msgstr "Причина сбоя не установлена - выбирается случайная копия" #: src/hed/libs/data/DataMover.cpp:1062 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" "Несовпадение контрольной суммы, указанной в метаданных (%s:%s), с " "вычисленной (%s)" #: src/hed/libs/data/DataMover.cpp:1070 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" "Сбой удаления предварительной записи LFN. Возможно, необходимо удалить её " "вручную" #: src/hed/libs/data/DataMover.cpp:1074 msgid "Failed to delete destination, retry may fail" msgstr "" "Не удалось уничтожить назначение, новые попытки могут быть безуспешными" #: src/hed/libs/data/DataMover.cpp:1084 msgid "Cannot compare empty checksum" msgstr "Невозможно сравнить пустую контрольную сумму" #: src/hed/libs/data/DataMover.cpp:1091 #: src/libs/data-staging/DataStagingDelivery.cpp:456 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" "Тип контрольной суммы источника отличается от вычисленной, сравнение " "невозможно" #: src/hed/libs/data/DataMover.cpp:1093 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" "Несовпадение вычисленной контрольной суммы %s и контрольной суммы источника %" "s" #: src/hed/libs/data/DataMover.cpp:1106 #: src/libs/data-staging/DataStagingDelivery.cpp:472 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" "Вычисленная контрольная сумма передачи %s совпадает с контрольной суммой " "источника" #: src/hed/libs/data/DataMover.cpp:1112 #: src/libs/data-staging/DataStagingDelivery.cpp:475 msgid "Checksum not computed" msgstr "Контрольная сумма не вычислена" #: src/hed/libs/data/DataMover.cpp:1118 #, c-format msgid "Failed to postregister destination %s" msgstr "Не удалось зарегистрировать назначение: %s" #: src/hed/libs/data/DataPoint.cpp:83 #, c-format msgid "Invalid URL option: %s" msgstr "Недопустимая опция URL: %s" #: src/hed/libs/data/DataPoint.cpp:254 #, c-format msgid "Skipping invalid URL option %s" msgstr "Пропускается недопустимая опция URL %s" #: src/hed/libs/data/DataPoint.cpp:269 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" "Запрошена пересылка файла третьим лицом, но необходимый\n" " подключаемый модуль не был подгружен. Устанавливали ли\n" " Вы модуль GFAL? Если нет, пожалуйста, установите пакеты\n" " 'nordugrid-arc-plugins-gfal' и 'gfal2-all'. Эти названия могут " "зависеть\n" " от типа вашего дистрибутива." #: src/hed/libs/data/DataPointIndex.cpp:91 #, c-format msgid "Can't handle location %s" msgstr "Невозможно использовать адрес %s" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "Копии сортируются в соответствии с расположением URL" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "Копия %s локализована" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "Копии сортируются в соответствии с предпочитаемым шаблоном %s" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "Исключаеся реплика %s в соответствии с шаблоном !%s" #: src/hed/libs/data/DataPointIndex.cpp:229 #, c-format msgid "Replica %s matches host pattern %s" msgstr "Копия %s соответствует шаблону узла %s" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "Копия %s соответствует шаблону %s" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "Копия %s не соответствует предпочитаемому шаблону или расположению URL" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "Операция завершена успешно" #: src/hed/libs/data/DataStatus.cpp:13 msgid "Source is invalid URL" msgstr "Недопустимый URL источника." #: src/hed/libs/data/DataStatus.cpp:14 msgid "Destination is invalid URL" msgstr "Недопустимый URL цели." #: src/hed/libs/data/DataStatus.cpp:15 msgid "Resolving of index service for source failed" msgstr "Сбой обнаружения каталога для источника" #: src/hed/libs/data/DataStatus.cpp:16 msgid "Resolving of index service for destination failed" msgstr "Сбой обнаружения каталога для назначения" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "Не удалось считать с источника" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "Не удалось записать в цель" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "Ошибка чтения из источника" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "Ошибка при записи в цель" #: src/hed/libs/data/DataStatus.cpp:21 msgid "Failed while transferring data" msgstr "Сбой при передаче данных" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "Сбой завершения чтения из источника" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "Сбой при завершении записи в назначение" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "Сбой первого шага регистрации в каталоге" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "Сбой последнего шага регистрации в каталоге" #: src/hed/libs/data/DataStatus.cpp:26 msgid "Unregistering from index service failed" msgstr "Сбой удаления регистрации из каталога" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "Ошибка кэширования" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "" "Ошибка в связи с истечением срока годности предоставленных параметров доступа" #: src/hed/libs/data/DataStatus.cpp:29 msgid "Delete error" msgstr "Ошибка удаления" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "Нет допустимых адресов" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "Такой файл уже существует" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "Эта операция не поддерживается для данного типа URL" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "Эта функция не реализована" #: src/hed/libs/data/DataStatus.cpp:34 msgid "Already reading from source" msgstr "Чтения из источника уже в процессе" #: src/hed/libs/data/DataStatus.cpp:35 msgid "Already writing to destination" msgstr "Запись в цель уже в процессе" #: src/hed/libs/data/DataStatus.cpp:36 msgid "Read access check failed" msgstr "Не удалось подтвердить наличие доступа на чтение" #: src/hed/libs/data/DataStatus.cpp:37 msgid "Directory listing failed" msgstr "Не удалось вывести список каталога" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "Объект не подходит для перечисления" #: src/hed/libs/data/DataStatus.cpp:39 msgid "Failed to obtain information about file" msgstr "Сбой получения информации о файле" #: src/hed/libs/data/DataStatus.cpp:40 msgid "No such file or directory" msgstr "Нет такого файла или каталога" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "Объект не инициализирован (внутренняя ошибка)" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "Ошибка операционной системы" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "Не удалось разместить файл(ы)" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "Противоречивые метаданные" #: src/hed/libs/data/DataStatus.cpp:45 msgid "Failed to prepare source" msgstr "Не удалось подготовить источник" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "Следует подождать, когда источник будет готов" #: src/hed/libs/data/DataStatus.cpp:47 msgid "Failed to prepare destination" msgstr "Не удалось подготовить назначение" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "Следует подождать, когда назначение будет готово" #: src/hed/libs/data/DataStatus.cpp:49 msgid "Failed to finalize reading from source" msgstr "Сбой завершения чтения из источника" #: src/hed/libs/data/DataStatus.cpp:50 msgid "Failed to finalize writing to destination" msgstr "Сбой завершения записи в цель" #: src/hed/libs/data/DataStatus.cpp:51 msgid "Failed to create directory" msgstr "Не удалось создать каталог" #: src/hed/libs/data/DataStatus.cpp:52 msgid "Failed to rename URL" msgstr "Не удалось переименовать URL" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "Данные уже записаны в кэщ" #: src/hed/libs/data/DataStatus.cpp:54 msgid "Operation cancelled successfully" msgstr "Операция успешно прервана" #: src/hed/libs/data/DataStatus.cpp:55 msgid "Generic error" msgstr "Неспецифическая ошибка" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "Неизвестная ошибка" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "Нет ошибок" #: src/hed/libs/data/DataStatus.cpp:61 msgid "Transfer timed out" msgstr "Истечение времени ожидания соединения" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "Несовпадение контрольной сумм" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "Неверная логика" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "Все полученные результаты неверны" #: src/hed/libs/data/DataStatus.cpp:65 msgid "Temporary service error" msgstr "Преходящая ошибка службы" #: src/hed/libs/data/DataStatus.cpp:66 msgid "Permanent service error" msgstr "Хроническая ошибка службы" #: src/hed/libs/data/DataStatus.cpp:67 msgid "Error switching uid" msgstr "Ошибка смены uid" #: src/hed/libs/data/DataStatus.cpp:68 msgid "Request timed out" msgstr "Истекло время ожидания запроса" #: src/hed/libs/data/FileCache.cpp:101 msgid "No cache directory specified" msgstr "Не указан каталог кэша" #: src/hed/libs/data/FileCache.cpp:118 msgid "No usable caches" msgstr "Нет подходящих кэшей" #: src/hed/libs/data/FileCache.cpp:127 msgid "No remote cache directory specified" msgstr "Не указан удалённый каталог кэша" #: src/hed/libs/data/FileCache.cpp:149 msgid "No draining cache directory specified" msgstr "Не указан каталог кэша для опорожнения" #: src/hed/libs/data/FileCache.cpp:177 #, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "Не удалось создать каталог кэша для файла %s: %s" #: src/hed/libs/data/FileCache.cpp:187 #, c-format msgid "Failed to create any cache directories for %s" msgstr "Не удалось создать каталоги кэша для %s" #: src/hed/libs/data/FileCache.cpp:194 #, c-format msgid "Failed to change permissions on %s: %s" msgstr "Невозможно изменить права доступа к %s: %s" #: src/hed/libs/data/FileCache.cpp:206 #, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "Не удалось удалить устаревший файл кэша %s: %s" #: src/hed/libs/data/FileCache.cpp:209 #, c-format msgid "Failed to release lock on file %s" msgstr "Невозможно разблокировать файл %s" #: src/hed/libs/data/FileCache.cpp:248 #, c-format msgid "Found file %s in remote cache at %s" msgstr "Файл %s обнаружен в удалённом кэше %s" #: src/hed/libs/data/FileCache.cpp:265 #, c-format msgid "Failed to delete stale remote cache file %s: %s" msgstr "Не удалось удалить устаревший удалённо кэшированный файл %s: %s" #: src/hed/libs/data/FileCache.cpp:269 #, c-format msgid "Failed to release lock on remote cache file %s" msgstr "Невозможно разблокировать удалённо кэшированный файл %s" #: src/hed/libs/data/FileCache.cpp:287 src/hed/libs/data/FileCache.cpp:339 #, c-format msgid "Failed to obtain lock on cache file %s" msgstr "Невозможно заблокировать файл в кэше %s" #: src/hed/libs/data/FileCache.cpp:294 src/hed/libs/data/FileCache.cpp:348 #: src/hed/libs/data/FileCache.cpp:408 #, c-format msgid "Error removing cache file %s: %s" msgstr "Ошибка удаления кэшированного файла %s: %s" #: src/hed/libs/data/FileCache.cpp:296 src/hed/libs/data/FileCache.cpp:314 #: src/hed/libs/data/FileCache.cpp:318 src/hed/libs/data/FileCache.cpp:350 #: src/hed/libs/data/FileCache.cpp:361 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" "Сбой разблокирования файла на %s. Возможно, необходимо ручное вмешательство" #: src/hed/libs/data/FileCache.cpp:301 #, c-format msgid "Replicating file %s to local cache file %s" msgstr "Копирование файла %s в локальный кэш %s" #: src/hed/libs/data/FileCache.cpp:304 src/hed/libs/data/FileCache.cpp:611 #, c-format msgid "Failed to copy file %s to %s: %s" msgstr "Сбой копирования файла %s в %s: %s" #: src/hed/libs/data/FileCache.cpp:309 #, c-format msgid "" "Replicating file %s from remote cache failed due to source being deleted or " "modified" msgstr "" "Копирование файла %s из удалённого кэша не удалось, т.к. источник был удалён " "или изменён" #: src/hed/libs/data/FileCache.cpp:311 #, c-format msgid "Failed to delete bad copy of remote cache file %s at %s: %s" msgstr "" "Не удалось удалить испорченную копию удалённо кэшированного файла %s в %s: %s" #: src/hed/libs/data/FileCache.cpp:333 #, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "Ошибка поиска атрибутов кэшированного файла: %s" #: src/hed/libs/data/FileCache.cpp:380 src/hed/libs/data/FileCache.cpp:414 #, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "" "Не удалось разблокировать файл %s: %s. Возможно, необходимо ручное " "вмешательство" #: src/hed/libs/data/FileCache.cpp:397 #, c-format msgid "Invalid lock on file %s" msgstr "Недопустимая блокировка файла %s" #: src/hed/libs/data/FileCache.cpp:403 #, c-format msgid "Failed to remove .meta file %s: %s" msgstr "Сбой удаления файла .meta %s: %s" #: src/hed/libs/data/FileCache.cpp:468 #, c-format msgid "Cache not found for file %s" msgstr "Не обнаружен кэш файла %s" #: src/hed/libs/data/FileCache.cpp:478 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" "Кэшированный файл %s был изменён в последний момент, приостановка процесса " "на 1 секунду для предотвращения гонки " #: src/hed/libs/data/FileCache.cpp:484 src/hed/libs/data/FileCache.cpp:819 #, c-format msgid "Cache file %s does not exist" msgstr "Кэшированный файл %s не существует" #: src/hed/libs/data/FileCache.cpp:503 #, c-format msgid "Cache file for %s not found in any local or remote cache" msgstr "" "Кэшированный файл для %s не был обнаружен ни в локальном, ни в удалённом " "кэшах" #: src/hed/libs/data/FileCache.cpp:507 #, c-format msgid "Using remote cache file %s for url %s" msgstr "Используется удалённо кэшированный файл %s для URL %s" #: src/hed/libs/data/FileCache.cpp:510 src/hed/libs/data/FileCache.cpp:821 #, c-format msgid "Error accessing cache file %s: %s" msgstr "Ошибка доступа к кэшированному файлу %s: %s" #: src/hed/libs/data/FileCache.cpp:516 #, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "Невозможно создать каталог %s для жёстких ссылок задач" #: src/hed/libs/data/FileCache.cpp:521 #, c-format msgid "Cannot change permission of %s: %s " msgstr "Не удалось изменить права доступа к %s: %s " #: src/hed/libs/data/FileCache.cpp:525 #, c-format msgid "Cannot change owner of %s: %s " msgstr "Невозможно изменить владельца %s: %s " #: src/hed/libs/data/FileCache.cpp:539 #, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "Невозможно удалить существующую жёсткую ссылку на %s: %s" #: src/hed/libs/data/FileCache.cpp:543 src/hed/libs/data/FileCache.cpp:554 #, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "Невозможно создать жёсткую ссылку с %s на %s: %s" #: src/hed/libs/data/FileCache.cpp:549 #, c-format msgid "Cache file %s not found" msgstr "Не обнаружен кэшированый файл %s" #: src/hed/libs/data/FileCache.cpp:564 #, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "Не удалось сменить права доступа или владельца жёсткой ссылки %s: %s" #: src/hed/libs/data/FileCache.cpp:572 #, c-format msgid "Failed to release lock on cache file %s" msgstr "Невозможно разблокировать файл в кэше %s" #: src/hed/libs/data/FileCache.cpp:583 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" "Кэшированный файл %s был заблокирован во время создания ссылки или копии, " "новая попытка" #: src/hed/libs/data/FileCache.cpp:588 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" "Кэшированный файл %s был удалён во время создания ссылки или копии, новая " "попытка" #: src/hed/libs/data/FileCache.cpp:593 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" "Кэшированный файл %s был изменён во время создания ссылки или копии, новая " "попытка" #: src/hed/libs/data/FileCache.cpp:617 #, c-format msgid "Failed to set executable bit on file %s" msgstr "Невозможно выставить исполняемый бит для файла %s" #: src/hed/libs/data/FileCache.cpp:622 #, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "Невозможно выставить исполняемый бит для файла %s: %s" #: src/hed/libs/data/FileCache.cpp:636 #, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "Невозможно удалить существующую символьную ссылку на %s: %s" #: src/hed/libs/data/FileCache.cpp:640 src/hed/libs/data/FileCache.cpp:645 #, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "Невозможно создать символьную ссылку с %s на %s: %s" #: src/hed/libs/data/FileCache.cpp:675 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "Сбой удаления каталога кэша задач %s: %s" #: src/hed/libs/data/FileCache.cpp:694 src/hed/libs/data/FileCache.cpp:771 #, c-format msgid "Error reading meta file %s: %s" msgstr "Ошибка чтения мета-файла %s: %s" #: src/hed/libs/data/FileCache.cpp:699 src/hed/libs/data/FileCache.cpp:776 #, c-format msgid "Error opening meta file %s" msgstr "Ошибка открытия мета-файла %s" #: src/hed/libs/data/FileCache.cpp:704 src/hed/libs/data/FileCache.cpp:780 #, c-format msgid "meta file %s is empty" msgstr "Мета-файл %s пуст" #: src/hed/libs/data/FileCache.cpp:713 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" "Файл %s уже кэширован в %s с другим URL: %s - выделенное имя не будет " "добавлено в кэшированный список" #: src/hed/libs/data/FileCache.cpp:733 #, c-format msgid "Bad format detected in file %s, in line %s" msgstr "Обнаружен недопустимый формат в файле %s, строке %s" #: src/hed/libs/data/FileCache.cpp:750 #, c-format msgid "Could not acquire lock on meta file %s" msgstr "Невозможно установить блокировку на мета-файл %s" #: src/hed/libs/data/FileCache.cpp:754 #, c-format msgid "Error opening meta file for writing %s" msgstr "Ошибка открытия мета-файла для записи %s" #: src/hed/libs/data/FileCache.cpp:790 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "Выделенное имя %s для URL %s кэшировано, и действительно до %s" #: src/hed/libs/data/FileCache.cpp:794 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "Выделенное имя %s для URL %s кэшировано, но уже просрочено" #: src/hed/libs/data/FileCache.cpp:845 #, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "Сбой установки блокировки на кэшированный мета-файл %s" #: src/hed/libs/data/FileCache.cpp:850 #, c-format msgid "Failed to create cache meta file %s" msgstr "Сбой создания мета-файла кэша %s" #: src/hed/libs/data/FileCache.cpp:865 #, c-format msgid "Failed to read cache meta file %s" msgstr "Сбой чтения мета-файла кэша %s" #: src/hed/libs/data/FileCache.cpp:870 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "Мета-файл кэша %s пуст, будет воссоздан" #: src/hed/libs/data/FileCache.cpp:875 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "Мета-файл кэша %s, возможно, повреждён, будет воссоздан" #: src/hed/libs/data/FileCache.cpp:883 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" "Файл %s уже находится в кэше %s с другим URL: %s - этот файл не будет " "кэширован" #: src/hed/libs/data/FileCache.cpp:893 #, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "Ошибка поиска атрибутов мета-файла кэша %s: %s" #: src/hed/libs/data/FileCache.cpp:955 #, c-format msgid "Using cache %s" msgstr "Используется кэш %s" #: src/hed/libs/data/FileCache.cpp:969 #, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "Ошибка получения информации от statvfs для пути %s: %s" #: src/hed/libs/data/FileCache.cpp:975 #, c-format msgid "Cache %s: Free space %f GB" msgstr "Кэш %s: Свободное пространство %f GB" #: src/hed/libs/data/URLMap.cpp:33 #, c-format msgid "Can't use URL %s" msgstr "Невозможно использовать URL %s" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "файл %s недоступен" #: src/hed/libs/data/URLMap.cpp:49 #, c-format msgid "Mapping %s to %s" msgstr "%s ставится в соответствие %s" #: src/hed/libs/data/examples/simple_copy.cpp:17 msgid "Usage: copy source destination" msgstr "Использование: copy источник назначение" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, c-format msgid "Copy failed: %s" msgstr "Сбой копирования: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, c-format msgid "Failed to read proxy file: %s" msgstr "Сбой при чтении файла доверенности: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, c-format msgid "Failed to read certificate file: %s" msgstr "Сбой при чтении файла сертификата: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, c-format msgid "Failed to read private key file: %s" msgstr "Сбой при чтении файла личного ключа: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, fuzzy, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)%s:%" "s" msgstr "" "Не удалось преобразовать параметры доступа GSI в GSS (major: %d, minor: %d)%s" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, fuzzy, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "Не удалось освободить параметры доступа GSS (major: %d, minor: %d):%s" #: src/hed/libs/infosys/BootstrapISIS.cpp:26 msgid "Initialize ISIS handler" msgstr "Инициализация обработчика ISIS" #: src/hed/libs/infosys/BootstrapISIS.cpp:31 #, c-format msgid "Can't recognize URL: %s" msgstr "Неприемлемый URL: %s" #: src/hed/libs/infosys/BootstrapISIS.cpp:48 msgid "Initialize ISIS handler succeeded" msgstr "Успешная инициализация обработчика ISIS" #: src/hed/libs/infosys/BootstrapISIS.cpp:52 #, c-format msgid "Remove ISIS (%s) from list" msgstr "Удаление ISIS (%s) из списка" #: src/hed/libs/infosys/BootstrapISIS.cpp:72 #, c-format msgid "getISISList from %s" msgstr "getISISList из %s" #: src/hed/libs/infosys/BootstrapISIS.cpp:73 #, c-format msgid "Key %s, Cert: %s, CA: %s" msgstr "Ключ %s, сертификат: %s, CA: %s" #: src/hed/libs/infosys/BootstrapISIS.cpp:105 #, c-format msgid "ISIS (%s) is not available or not valid response. (%d. reconnection)" msgstr "" "ISIS (%s) недоступен, или получен недопустимый отклик. (%d. Повторное " "соединение)" #: src/hed/libs/infosys/BootstrapISIS.cpp:107 #, c-format msgid "Connection to the ISIS (%s) is success and get the list of ISIS." msgstr "Успешное соединение с ISIS (%s), получение списка сервисов ISIS." #: src/hed/libs/infosys/BootstrapISIS.cpp:133 #, c-format msgid "GetISISList add this (%s) ISIS into the list." msgstr "GetISISList добавляет этот сервис (%s) ISIS в список." #: src/hed/libs/infosys/BootstrapISIS.cpp:145 #, c-format msgid "Chosen ISIS for communication: %s" msgstr "Выбранный для связи ISIS: %s" #: src/hed/libs/infosys/BootstrapISIS.cpp:152 msgid "Get ISIS from list of ISIS handler" msgstr "Извлечение сервиса ISIS из списка обработчиков ISIS" #: src/hed/libs/infosys/BootstrapISIS.cpp:156 msgid "Here is the end of the infinite calling loop." msgstr "Здесь и заканчивается бесконечный цикл запросов." #: src/hed/libs/infosys/BootstrapISIS.cpp:163 msgid "There is no more ISIS available. The list of ISIS's is already empty." msgstr "Доступных севрисов ISIS больше нет. Список ISIS-ов уже опустел." #: src/hed/libs/infosys/InfoCache.cpp:47 #, c-format msgid "cannot create directory: %s" msgstr "не удалось создать каталог: %s" #: src/hed/libs/infosys/InfoCache.cpp:60 #, c-format msgid "Cache configuration: %s" msgstr "Настройки кэша: %s" #: src/hed/libs/infosys/InfoCache.cpp:63 msgid "Missing cache root in configuration" msgstr "В настройках кэша отсутствует корневой каталог" #: src/hed/libs/infosys/InfoCache.cpp:67 msgid "Missing service ID" msgstr "Отсутствует ярлык сервиса" #: src/hed/libs/infosys/InfoCache.cpp:70 #, c-format msgid "Cache root: %s" msgstr "Корневая папка кэша: %s" #: src/hed/libs/infosys/InfoCache.cpp:76 #, c-format msgid "Cache directory: %s" msgstr "Папка кэша: %s" #: src/hed/libs/infosys/InfoCache.cpp:143 #: src/hed/libs/infosys/InfoCache.cpp:162 #: src/hed/libs/infosys/InfoCache.cpp:181 #: src/hed/libs/infosys/InfoCache.cpp:206 msgid "InfoCache object is not set up" msgstr "Объект InfoCache не создан" #: src/hed/libs/infosys/InfoCache.cpp:147 #: src/hed/libs/infosys/InfoCache.cpp:166 #, c-format msgid "Invalid path in Set(): %s" msgstr "Недопустимый путь в Set(): %s" #: src/hed/libs/infosys/InfoCache.cpp:185 #, c-format msgid "Invalid path in Get(): %s" msgstr "Недопустимый путь в Get(): %s" #: src/hed/libs/infosys/InfoRegister.cpp:23 #, c-format msgid "" "InfoRegistrar thread waiting %d seconds for the all Registers elements " "creation." msgstr "" "Поток InfoRegistrar ожидает %d секунд, пока создадутся все элементы " "Registers." #: src/hed/libs/infosys/InfoRegister.cpp:69 #, c-format msgid "" "InfoRegister created with config:\n" "%s" msgstr "" "InfoRegister создан с настройками:\n" "%s" #: src/hed/libs/infosys/InfoRegister.cpp:79 #, c-format msgid "InfoRegister to be registered in Registrar %s" msgstr "InfoRegister будет занесён в Registrar %s" #: src/hed/libs/infosys/InfoRegister.cpp:81 msgid "Discarding Registrar because the \"URL\" element is missing or empty." msgstr "" "Registrar игнорируется, так как элемент \"URL\" отсутствует, либо пуст." #: src/hed/libs/infosys/InfoRegister.cpp:133 #, c-format msgid "InfoRegistrar id \"%s\" has been found." msgstr "Обнаружен InfoRegistrar id \"%s\"." #: src/hed/libs/infosys/InfoRegister.cpp:140 #, c-format msgid "InfoRegistrar id \"%s\" was not found. New registrar created" msgstr "InfoRegistrar id \"%s\" не был обнаружен. Создан новый реестр" #: src/hed/libs/infosys/InfoRegister.cpp:182 #, c-format msgid "" "Configuration error. Retry: \"%s\" is not a valid value. Default value will " "be used." msgstr "" "Ошибйка настроек. Retry: \"%s\" не является допустимым значением. Будет " "использовано значение по умолчанию." #: src/hed/libs/infosys/InfoRegister.cpp:188 #, c-format msgid "Retry: %d" msgstr "Повторная попытка: %d" #: src/hed/libs/infosys/InfoRegister.cpp:197 #, c-format msgid "Key: %s, cert: %s" msgstr "Ключ: %s, сертификат: %s" #: src/hed/libs/infosys/InfoRegister.cpp:226 msgid "The service won't be registered." msgstr "Сервис не будет зарегистрирован" #: src/hed/libs/infosys/InfoRegister.cpp:231 msgid "Configuration error. Missing mandatory \"Period\" element." msgstr "Ошибка конфигурации. Отсутствует обязательный элемент \"Period\"." #: src/hed/libs/infosys/InfoRegister.cpp:236 msgid "Configuration error. Missing mandatory \"Endpoint\" element." msgstr "Ошибка конфигурации. Отсутствует обязательный элемент \"Endpoint\"." #: src/hed/libs/infosys/InfoRegister.cpp:241 msgid "Configuration error. Missing mandatory \"Expiration\" element." msgstr "Ошибка конфигурации. Отсутствует обязательный элемент \"Expiration\"." #: src/hed/libs/infosys/InfoRegister.cpp:248 #, c-format msgid "" "Service was already registered to the InfoRegistrar connecting to infosys %s." msgstr "" "Сервис был уже занесён в InfoRegistrar, подключённый к информационной " "системе %s." #: src/hed/libs/infosys/InfoRegister.cpp:284 #, c-format msgid "" "Service is successfully added to the InfoRegistrar connecting to infosys %s." msgstr "" "Сервис успешно добавлен в InfoRegistrar, подключённый к информационной " "системе %s." #: src/hed/libs/infosys/InfoRegister.cpp:301 msgid "Unregistred Service can not be removed." msgstr "Незарегистрированная служба не может быть удалена." #: src/hed/libs/infosys/InfoRegister.cpp:348 #: src/hed/libs/infosys/InfoRegister.cpp:411 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s CAPath" msgstr "Ключ: %s, Сертификат: %s, Доверенность: %s, Каталог CA: %s, путь CA" #: src/hed/libs/infosys/InfoRegister.cpp:359 #: src/hed/libs/infosys/InfoRegister.cpp:646 #, c-format msgid "Response from the ISIS: %s" msgstr "Отклик из ISIS: %s" #: src/hed/libs/infosys/InfoRegister.cpp:363 #, c-format msgid "Failed to remove registration from %s ISIS" msgstr "Не удалось удалить учётную запись с сервера ISIS %s " #: src/hed/libs/infosys/InfoRegister.cpp:366 #, c-format msgid "Successfuly removed registration from ISIS (%s)" msgstr "Успешное удаление учётной записи с сервера ISIS (%s)" #: src/hed/libs/infosys/InfoRegister.cpp:372 #, c-format msgid "Failed to remove registration from ISIS (%s) - %s" msgstr "Не удалось удалить учётную запись с сервера ISIS (%s) - %s" #: src/hed/libs/infosys/InfoRegister.cpp:379 #: src/hed/libs/infosys/InfoRegister.cpp:656 #, c-format msgid "Retry connecting to the ISIS (%s) %d time(s)." msgstr "Повторные попытки связи с сервером ISIS (%s) %d раз." #: src/hed/libs/infosys/InfoRegister.cpp:385 #, c-format msgid "ISIS (%s) is not available." msgstr "Сервер ISIS (%s) недоступен" #: src/hed/libs/infosys/InfoRegister.cpp:389 #: src/hed/libs/infosys/InfoRegister.cpp:439 #, c-format msgid "Service removed from InfoRegistrar connecting to infosys %s." msgstr "" "Сервис удалён из InfoRegistrar, подключённый к информационной системе %s." #: src/hed/libs/infosys/InfoRegister.cpp:420 #, c-format msgid "Failed to remove registration from %s EMIRegistry" msgstr "Не удалось удалить учётную запись с сервера EMIRegistry %s " #: src/hed/libs/infosys/InfoRegister.cpp:423 #, c-format msgid "Successfuly removed registration from EMIRegistry (%s)" msgstr "Успешное удаление учётной записи с сервера EMIRegistry (%s)" #: src/hed/libs/infosys/InfoRegister.cpp:429 #: src/hed/libs/infosys/InfoRegister.cpp:957 #, c-format msgid "Retry connecting to the EMIRegistry (%s) %d time(s)." msgstr "Попытка повторного соединения с EMIRegistry (%s) %d раз(а)." #: src/hed/libs/infosys/InfoRegister.cpp:435 #, c-format msgid "EMIRegistry (%s) is not available." msgstr "Недоступен сервис EMIRegistry (%s)." #: src/hed/libs/infosys/InfoRegister.cpp:476 #: src/hed/libs/infosys/InfoRegister.cpp:684 #, c-format msgid "Registration starts: %s" msgstr "Регистрация начинается: %s" #: src/hed/libs/infosys/InfoRegister.cpp:477 #: src/hed/libs/infosys/InfoRegister.cpp:685 #, c-format msgid "reg_.size(): %d" msgstr "reg_.size(): %d" #: src/hed/libs/infosys/InfoRegister.cpp:480 #: src/hed/libs/infosys/InfoRegister.cpp:688 msgid "Registrant has no proper URL specified. Registration end." msgstr "Registrant не содержит действительного URL. Регистрация окончена." #: src/hed/libs/infosys/InfoRegister.cpp:510 #: src/hed/libs/infosys/InfoRegister.cpp:713 msgid "Create RegEntry XML element" msgstr "Создание элемента XML RegEntry" #: src/hed/libs/infosys/InfoRegister.cpp:542 #: src/hed/libs/infosys/InfoRegister.cpp:745 msgid "ServiceID attribute calculated from Endpoint Reference" msgstr "Атрибут ServiceID получен из описания точки входа" #: src/hed/libs/infosys/InfoRegister.cpp:546 #: src/hed/libs/infosys/InfoRegister.cpp:749 msgid "Generation Time attribute calculated from current time" msgstr "Атрибут Generation Time получен из текущего времени" #: src/hed/libs/infosys/InfoRegister.cpp:553 #: src/hed/libs/infosys/InfoRegister.cpp:756 #, c-format msgid "ServiceID stored: %s" msgstr "Сохранён ServiceID: %s" #: src/hed/libs/infosys/InfoRegister.cpp:559 #: src/hed/libs/infosys/InfoRegister.cpp:762 #, c-format msgid "Missing service document provided by the service %s" msgstr "Отсутствует документ службы, публикуемый сервисом %s" #: src/hed/libs/infosys/InfoRegister.cpp:565 #: src/hed/libs/infosys/InfoRegister.cpp:768 #, c-format msgid "" "Missing MetaServiceAdvertisment or Expiration values provided by the service " "%s" msgstr "" "Отсутствуют значения атрибутов MetaServiceAdvertisment или Expiration, " "публикуемые сервисом %s" #: src/hed/libs/infosys/InfoRegister.cpp:572 #: src/hed/libs/infosys/InfoRegister.cpp:775 #, c-format msgid "Missing Type value provided by the service %s" msgstr "Отсутствует значение атрибута Type, публикуемое сервисом %s" #: src/hed/libs/infosys/InfoRegister.cpp:580 #: src/hed/libs/infosys/InfoRegister.cpp:783 #, c-format msgid "Missing Endpoint Reference value provided by the service %s" msgstr "" "Отсутствует значение атрибута Endpoint Reference, публикуемое сервисом %s" #: src/hed/libs/infosys/InfoRegister.cpp:598 #, c-format msgid "Registering to %s ISIS" msgstr "Регистрация на сервере ISIS %s " #: src/hed/libs/infosys/InfoRegister.cpp:621 #: src/hed/libs/infosys/InfoRegister.cpp:822 #, c-format msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s, CAFile" msgstr "Ключ: %s, Сертификат: %s, Доверенность: %s, Каталог CA: %s, файл CA" #: src/hed/libs/infosys/InfoRegister.cpp:625 #, c-format msgid "Sent RegEntries: %s" msgstr "Посланы RegEntries: %s" #: src/hed/libs/infosys/InfoRegister.cpp:639 #, c-format msgid "Error during registration to %s ISIS" msgstr "Ошибка при регистрации в сервис ISIS %s" #: src/hed/libs/infosys/InfoRegister.cpp:648 #, c-format msgid "Successful registration to ISIS (%s)" msgstr "Успешная регистрация в сервис ISIS (%s)" #: src/hed/libs/infosys/InfoRegister.cpp:652 #, c-format msgid "Failed to register to ISIS (%s) - %s" msgstr "Сбой регистрации в сервис ISIS (%s) - %s" #: src/hed/libs/infosys/InfoRegister.cpp:668 #: src/hed/libs/infosys/InfoRegister.cpp:967 #, c-format msgid "Registration ends: %s" msgstr "Конец регистрации: %s" #: src/hed/libs/infosys/InfoRegister.cpp:669 #: src/hed/libs/infosys/InfoRegister.cpp:968 #, c-format msgid "Waiting period is %d second(s)." msgstr "Время ожидания - %d секунд(ы)." #: src/hed/libs/infosys/InfoRegister.cpp:679 #: src/hed/libs/infosys/InfoRegister.cpp:978 #, c-format msgid "Registration exit: %s" msgstr "Выход из регистрации: %s" #: src/hed/libs/infosys/InfoRegister.cpp:801 #, c-format msgid "Registering to %s EMIRegistry" msgstr "Регистрация на сервере EMIRegistry %s " #: src/hed/libs/infosys/InfoRegister.cpp:927 #, c-format msgid "Sent entry: %s" msgstr "Отправлена запись: %s" #: src/hed/libs/infosys/InfoRegister.cpp:940 #, c-format msgid "Error during %s to %s EMIRegistry" msgstr "Сбой операции %s по отношению к сервису EMIRegistry %s " #: src/hed/libs/infosys/InfoRegister.cpp:943 #, c-format msgid "Successful %s to EMIRegistry (%s)" msgstr "" "Успешное завершение операции %s по отношению к сервису EMIRegistry (%s)" #: src/hed/libs/infosys/InfoRegister.cpp:949 #, c-format msgid "Failed to %s to EMIRegistry (%s) - %d" msgstr "" "Не удалось выполнить операцию %s по отношению к сервису EMIRegistry (%s) - %d" #: src/hed/libs/loader/ModuleManager.cpp:25 msgid "Module Manager Init" msgstr "Запуск управления модулями " #: src/hed/libs/loader/ModuleManager.cpp:68 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" "В процессе отключения менеджера модулей обнаружены занятые подключаемые " "модули. Ожидается их завершение." #: src/hed/libs/loader/ModuleManager.cpp:202 #, c-format msgid "Found %s in cache" msgstr "%s обнаружен в кэше" #: src/hed/libs/loader/ModuleManager.cpp:209 #, c-format msgid "Could not locate module %s in following paths:" msgstr "Невозможно найти модуль %s в следующих местах:" #: src/hed/libs/loader/ModuleManager.cpp:213 #, c-format msgid "\t%s" msgstr "\t%s" #: src/hed/libs/loader/ModuleManager.cpp:227 #, c-format msgid "Loaded %s" msgstr "Подгружен модуль %s " #: src/hed/libs/loader/ModuleManager.cpp:271 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "Инициализация менеджера модулей в ModuleManager::setCfg" #: src/hed/libs/loader/ModuleManager.cpp:307 #: src/hed/libs/loader/ModuleManager.cpp:320 #, c-format msgid "%s made persistent" msgstr "Модуль %s сброшен на диск" #: src/hed/libs/loader/ModuleManager.cpp:311 #, c-format msgid "Not found %s in cache" msgstr "Модуль %s не найден в кэше" #: src/hed/libs/loader/ModuleManager.cpp:325 msgid "Specified module not found in cache" msgstr "Указанные модули не найдены в кэше" #: src/hed/libs/loader/Plugin.cpp:369 src/hed/libs/loader/Plugin.cpp:574 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "Не удалось найти дескриптор подгружаемого модуля по имени %s" #: src/hed/libs/loader/Plugin.cpp:375 src/hed/libs/loader/Plugin.cpp:581 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "Не удалось найти подгружаемый модуль %s (%s)" #: src/hed/libs/loader/Plugin.cpp:381 src/hed/libs/loader/Plugin.cpp:492 #: src/hed/libs/loader/Plugin.cpp:586 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "Модуль %s не является подключаемым модулем ARC (%s)" #: src/hed/libs/loader/Plugin.cpp:398 src/hed/libs/loader/Plugin.cpp:502 #: src/hed/libs/loader/Plugin.cpp:608 #, c-format msgid "Module %s failed to reload (%s)" msgstr "Не удалось перезагрузить модуль %s (%s)" #: src/hed/libs/loader/Plugin.cpp:462 src/hed/libs/loader/Plugin.cpp:475 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "Подгружаемый модуль %s не содержит запрашиваемого модуля %s типа %s" #: src/hed/libs/loader/Plugin.cpp:480 #, c-format msgid "Could not find loadable module descriptor by names %s and %s" msgstr "Невозможно найти дескрипторы подгружаемых модулей по имени %s и %s" #: src/hed/libs/loader/Plugin.cpp:486 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "Невозможно найти подгружаемые модули по имени %s и %s (%s)" #: src/hed/libs/message/MCC.cpp:77 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "Обработка/проверка параметров доступа не запрошена для '%s'" #: src/hed/libs/message/MCC.cpp:85 msgid "Security processing/check failed" msgstr "Обработка/проверка параметров доступа не прошла" #: src/hed/libs/message/MCC.cpp:89 msgid "Security processing/check passed" msgstr "Обработка/проверка параметров доступа завершилась успехом" #: src/hed/libs/message/MCCLoader.cpp:16 msgid "Chain(s) configuration failed" msgstr "Не удалось настроить цепочку/и" #: src/hed/libs/message/MCCLoader.cpp:133 msgid "SecHandler configuration is not defined" msgstr "Настройки SecHandler не заданы" #: src/hed/libs/message/MCCLoader.cpp:156 msgid "SecHandler has no configuration" msgstr "Настройки SecHandler отсутствуют" #: src/hed/libs/message/MCCLoader.cpp:162 msgid "SecHandler has no name attribute defined" msgstr "Не задан атрибут name для SecHandler" #: src/hed/libs/message/MCCLoader.cpp:172 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "Обработчик безопасности %s(%s) не может быть создан" #: src/hed/libs/message/MCCLoader.cpp:176 #, c-format msgid "SecHandler: %s(%s)" msgstr "SecHandler: %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:188 msgid "Component has no name attribute defined" msgstr "Для компонента не задан атрибут name" #: src/hed/libs/message/MCCLoader.cpp:193 msgid "Component has no ID attribute defined" msgstr "Для компонента не задан атрибут ID" #: src/hed/libs/message/MCCLoader.cpp:202 #, c-format msgid "Component %s(%s) could not be created" msgstr "Компонента %s(%s) не может быть создана" #: src/hed/libs/message/MCCLoader.cpp:232 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "Для компонента %s(%s) отсутствует атрибут ID следующей цели" #: src/hed/libs/message/MCCLoader.cpp:287 #, c-format msgid "Loaded MCC %s(%s)" msgstr "Подгружен MCC %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:305 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "Для следующего после %s компонента Plexer не задан атрибут ID" #: src/hed/libs/message/MCCLoader.cpp:315 #, c-format msgid "Loaded Plexer %s" msgstr "Подгружен Plexer %s" #: src/hed/libs/message/MCCLoader.cpp:323 msgid "Service has no Name attribute defined" msgstr "Для службы не задан атрибут Name" #: src/hed/libs/message/MCCLoader.cpp:329 msgid "Service has no ID attribute defined" msgstr "Для службы не задан атрибут ID" #: src/hed/libs/message/MCCLoader.cpp:338 #, c-format msgid "Service %s(%s) could not be created" msgstr "Служба %s(%s) не может быть создана" #: src/hed/libs/message/MCCLoader.cpp:345 #, c-format msgid "Loaded Service %s(%s)" msgstr "Подгружена служба %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:387 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "Подцепление MCC %s(%s) к MCC (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:398 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "Подцепление MCC %s(%s) к службе (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:407 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "Подцепление MCC %s(%s) к коммутатору (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:412 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "MCC %s(%s) - следующий %s(%s) не содержит назначения" #: src/hed/libs/message/MCCLoader.cpp:431 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "Подцепление коммутатора %s к MCC (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:442 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "Подцепление коммутатора %s к службе (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:451 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "Подцепление коммутатора %s к коммутатору (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:457 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "Коммутатор (%s) - следующий %s(%s) не содержит назначения" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "Плохая метка: \"%s\"" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "Действие над путём \"%s\"" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "Не найдено больше MCC или служб в пути \"%s\"" #: src/hed/libs/message/Service.cpp:33 #, c-format msgid "Security processing/check for '%s' failed" msgstr "Обработка/проверка параметров доступа '%s' не прошла" #: src/hed/libs/message/Service.cpp:37 #, c-format msgid "Security processing/check for '%s' passed" msgstr "Обработка/проверка параметров доступа '%s' завершилась успехом" #: src/hed/libs/message/Service.cpp:43 msgid "Empty registration collector" msgstr "Пустой сборщик регистраций" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:136 #, c-format msgid "Can not load ARC evaluator object: %s" msgstr "Невозможно подгрузить объект интерпретатора ARC : %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:187 #, c-format msgid "Can not load ARC request object: %s" msgstr "Невозможно подгрузить объект запроса ARC: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:228 #, c-format msgid "Can not load policy object: %s" msgstr "Невозможно подгрузить объект политик: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:276 msgid "Can not load policy object" msgstr "Невозможно подгрузить объект политик" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:324 msgid "Can not load request object" msgstr "Невозможно подгрузить объект запроса" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "Невозможно создать объект правил доступа" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "Id= %s,Тип= %s,Издатель= %s,Значение= %s" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "Не существует атрибутов, способных трактовать этот тип: %s" #: src/hed/mcc/http/MCCHTTP.cpp:157 #, c-format msgid "HTTP Error: %d %s" msgstr "Ошибка HTTP: %d %s" #: src/hed/mcc/http/MCCHTTP.cpp:222 msgid "Cannot create http payload" msgstr "Не удалось создать нагрузку http" #: src/hed/mcc/http/MCCHTTP.cpp:288 msgid "No next element in the chain" msgstr "Отсутствует следующий элемент цепи" #: src/hed/mcc/http/MCCHTTP.cpp:297 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "следующий элемент цепи возвратил статус ошибки" #: src/hed/mcc/http/MCCHTTP.cpp:306 msgid "next element of the chain returned no payload" msgstr "следующий элемент в цепочке возвратил пустую нагрузку" #: src/hed/mcc/http/MCCHTTP.cpp:318 msgid "next element of the chain returned invalid/unsupported payload" msgstr "" "следующий элемент в цепи возвратил недопустимую или неподдерживаемую нагрузку" #: src/hed/mcc/http/MCCHTTP.cpp:393 msgid "Error to flush output payload" msgstr "Ошибка сброса исходящей нагрузки" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "<< %s" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, c-format msgid "< %s" msgstr "< %s" #: src/hed/mcc/http/PayloadHTTP.cpp:576 msgid "Failed to parse HTTP header" msgstr "Сбой разбора заголовка HTTP" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "Недопустимый объект HTTP не может дать результат" #: src/hed/mcc/http/PayloadHTTP.cpp:950 #, c-format msgid "> %s" msgstr "> %s" #: src/hed/mcc/http/PayloadHTTP.cpp:975 msgid "Failed to write header to output stream" msgstr "Сбой при записи заголовка в выходной поток" #: src/hed/mcc/http/PayloadHTTP.cpp:1000 src/hed/mcc/http/PayloadHTTP.cpp:1006 #: src/hed/mcc/http/PayloadHTTP.cpp:1012 src/hed/mcc/http/PayloadHTTP.cpp:1022 #: src/hed/mcc/http/PayloadHTTP.cpp:1034 src/hed/mcc/http/PayloadHTTP.cpp:1039 #: src/hed/mcc/http/PayloadHTTP.cpp:1044 src/hed/mcc/http/PayloadHTTP.cpp:1052 #: src/hed/mcc/http/PayloadHTTP.cpp:1059 msgid "Failed to write body to output stream" msgstr "Сбой при записи тела в выходной поток" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "Сервис пропускается: отсутствует ServicePath!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "Сервис пропускается: отсутствует SchemaPath!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "Не удалось создать контекст анализатора!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "Невозможно интерпретировать схему!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "Пустая нагрузка!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "Невозможно преобразовать нагрузку!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "Не удалось создать PayloadSOAP!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "Пустая нагрузка на входе!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "Не удалось преобразовать входную информацию!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "Схема отсутствует! Сверка пропускается..." #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "Не удалось подтвердить достоверность сообщения!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:222 msgid "empty next chain element" msgstr "следующий элемент в цепи пустой" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:238 msgid "next element of the chain returned empty payload" msgstr "следующий элемент в цепи возвратил пустую нагрузку" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "следующий элемент в цепи возвратил пустую нагрузку" #: src/hed/mcc/soap/MCCSOAP.cpp:192 msgid "empty input payload" msgstr "пустая нагрузка на входе" #: src/hed/mcc/soap/MCCSOAP.cpp:198 msgid "incoming message is not SOAP" msgstr "входящее сообщение не в формате SOAP" #: src/hed/mcc/soap/MCCSOAP.cpp:215 src/hed/mcc/soap/MCCSOAP.cpp:372 msgid "Security check failed in SOAP MCC for incoming message" msgstr "Не прошла проверка безопасности в SOAP MCC для входящего сообщения" #: src/hed/mcc/soap/MCCSOAP.cpp:230 #, c-format msgid "next element of the chain returned error status: %s" msgstr "следующий элемент цепи возвратил статус ошибки: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:249 msgid "next element of the chain returned unknown payload - passing through" msgstr "" "следующий элемент в цепи возвратил неопознанную нагрузку - пропускается" #: src/hed/mcc/soap/MCCSOAP.cpp:252 src/hed/mcc/soap/MCCSOAP.cpp:265 #: src/hed/mcc/soap/MCCSOAP.cpp:317 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "Не прошла проверка безопасности в SOAP MCC для исходящего сообщения" #: src/hed/mcc/tcp/MCCTCP.cpp:104 src/hed/mcc/tcp/MCCTCP.cpp:636 msgid "Cannot initialize winsock library" msgstr "Не удалось инициализировать библиотеку winsock" #: src/hed/mcc/tcp/MCCTCP.cpp:119 msgid "Missing Port in Listen element" msgstr "В элементе Listen отсутствует номер порта (Port)" #: src/hed/mcc/tcp/MCCTCP.cpp:128 msgid "Version in Listen element can't be recognized" msgstr "Версия в элементе Listen не опознана" #: src/hed/mcc/tcp/MCCTCP.cpp:137 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "Не удалось получить локальный адрес для порта %s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:139 #, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "Не удалось получить локальный адрес для %s:%s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "Попытка прослушать порт TCP %s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:148 #, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "Попытка прослушать %s:%s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:154 #, c-format msgid "Failed to create socket for for listening at TCP port %s(%s): %s" msgstr "Не удалось создать сокет для прослушки порта TCP %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:156 #, c-format msgid "Failed to create socket for for listening at %s:%s(%s): %s" msgstr "Не удалось создать сокет для прослушки %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:171 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" "Не удалось ограничить сокет под IPv6 на порте TCP %s - может привести к " "ошибкам для IPv4 по этому же порту" #: src/hed/mcc/tcp/MCCTCP.cpp:173 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" "Не удалось ограничить сокет под IPv6 на %s:%s - может привести к ошибкам для " "IPv4 по этому же порту" #: src/hed/mcc/tcp/MCCTCP.cpp:181 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "Не удалось связать сокет с портом TCP %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:183 #, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "Не удалось связать сокет с %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:198 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "Не удалось прослушать порт TCP %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:200 #, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "Не удалось прослушать %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:217 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "Прослушивается порт TCP %s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:219 #, c-format msgid "Listening on %s:%s(%s)" msgstr "Прослушивается %s:%s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:226 #, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "Не удалось начать прослушивание ни по какому адресу для %s:%s" #: src/hed/mcc/tcp/MCCTCP.cpp:228 #, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "Не удалось начать прослушивание ни по какому адресу для %s:%s(IPv%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:234 msgid "No listening ports initiated" msgstr "Не инициализированы прослушивающие порты" #: src/hed/mcc/tcp/MCCTCP.cpp:245 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" "Предельное количество соединений выставляется на %i, соединения сверх " "предела будут переведены в состояние %s" #: src/hed/mcc/tcp/MCCTCP.cpp:249 msgid "Failed to start thread for listening" msgstr "Не удалось запустить поток для прослушивания" #: src/hed/mcc/tcp/MCCTCP.cpp:285 msgid "Failed to start thread for communication" msgstr "Не удалось запустить поток для обмена информацией" #: src/hed/mcc/tcp/MCCTCP.cpp:315 msgid "Failed while waiting for connection request" msgstr "Сбой при ожидании запроса на соединение" #: src/hed/mcc/tcp/MCCTCP.cpp:337 msgid "Failed to accept connection request" msgstr "Не удалось принять запрос на соединение" #: src/hed/mcc/tcp/MCCTCP.cpp:346 msgid "Too many connections - dropping new one" msgstr "Слишком много соединений - новое отклонено" #: src/hed/mcc/tcp/MCCTCP.cpp:353 msgid "Too many connections - waiting for old to close" msgstr "Слишком много соединений - ожидание закрытия старых" #: src/hed/mcc/tcp/MCCTCP.cpp:577 msgid "next chain element called" msgstr "вызван следующий элемент в цепи" #: src/hed/mcc/tcp/MCCTCP.cpp:592 msgid "Only Raw Buffer payload is supported for output" msgstr "Для вывода поддерживается только неформатированный буфер" #: src/hed/mcc/tcp/MCCTCP.cpp:600 src/hed/mcc/tcp/MCCTCP.cpp:709 #: src/hed/mcc/tls/MCCTLS.cpp:545 msgid "Failed to send content of buffer" msgstr "Не удалось отправить содержимое буфера" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "TCP executor is removed" msgstr "Удалён исполнитель TCP" #: src/hed/mcc/tcp/MCCTCP.cpp:614 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "Сокеты на выходе не совпадают %i != %i" #: src/hed/mcc/tcp/MCCTCP.cpp:642 msgid "No Connect element specified" msgstr "Не задан элемент Connect" #: src/hed/mcc/tcp/MCCTCP.cpp:648 msgid "Missing Port in Connect element" msgstr "В элементе Connect отсутствует номер порта (Port)" #: src/hed/mcc/tcp/MCCTCP.cpp:654 msgid "Missing Host in Connect element" msgstr "В элементе Connect отсутствует название узла (Host)" #: src/hed/mcc/tcp/MCCTCP.cpp:685 msgid "TCP client process called" msgstr "Вызван процесс TCP клиента" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:71 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:87 #, c-format msgid "Failed to resolve %s (%s)" msgstr "Сбой при разрешении %s (%s)" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:97 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "Попытка соединения с %s(%s):%d" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:101 #, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "Не удалось создать сокет для соединения с %s(%s):%d - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" "Не удалось получить параметры TCP-сокета для соединения с %s(%s):%d - " "прерывание по времени не будет работать - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:121 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "Не удалось установить соединение с %s(%s):%i - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:131 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "Истекло время ожидания соединения с %s(%s):%i - %i с" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:139 #, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "Сбой при ожидании соединения с %s(%s):%i - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:149 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:158 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "Не удалось установить соединение с %s(%s):%i" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:215 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" "Получено сообщение вне полосы (некритично, уровень ERROR лишь для отладки)" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "Независимая доверенность - права не выделены" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "Доверенность со всеми унаследованными правами" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "Доверенность с незаполненной политикой - отказ по неизвестной политике" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "Доверенность с ограниченной политикой: %s" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "Доверенность с политикой ARC" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "Доверенность с неизвестной политикой - отказ по неизвестной политике" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:77 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:128 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "Ожидалось %s в начале \"%s\"" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:82 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "" "Мы поддерживаем только CA в Globus signing policy - %s не поддерживается" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:87 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "" "Мы поддерживаем только центры сертификации X509 в политике подписи Globus - %" "s не поддерживается" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:92 msgid "Missing CA subject in Globus signing policy" msgstr "Субъект центра сертификации отсутствует в политике подписи Globus" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:102 msgid "Negative rights are not supported in Globus signing policy" msgstr "Отрицательные права не поддерживаются политикой подписи Globus" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:106 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "Неизвестные права в политике подписи Globus - %s" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:111 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" "Мы поддерживаем только права globus в политике подписи Globus - %s не " "поддерживается" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" "Мы поддерживаем только права подписи в политике подписи Globus - %s не " "поддерживается" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:133 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" "Мы поддерживаем только условия субъекта в политике подписи Globus - %s не " "поддерживается" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:138 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" "Мы поддерживаем только условия globus в политике подписи Globus - %s не " "поддерживается" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:144 msgid "Missing condition subjects in Globus signing policy" msgstr "Условия субъекта отсутствуют в политике подписи Globus" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:220 msgid "Unknown element in Globus signing policy" msgstr "Неизвестный элемент в политике подписи Globus" #: src/hed/mcc/tls/MCCTLS.cpp:222 msgid "Critical VOMS attribute processing failed" msgstr "Сбой обработки критического атрибута VOMS" #: src/hed/mcc/tls/MCCTLS.cpp:230 msgid "VOMS attribute validation failed" msgstr "Сбой проверки атрибутов VOMS" #: src/hed/mcc/tls/MCCTLS.cpp:232 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "Атрибут VOMS игнорируется из-за ошибки обработки или проверки" #: src/hed/mcc/tls/MCCTLS.cpp:424 #, c-format msgid "Failed to establish connection: %s" msgstr "Сбой установления соединения: %s" #: src/hed/mcc/tls/MCCTLS.cpp:442 src/hed/mcc/tls/MCCTLS.cpp:524 #, c-format msgid "Peer name: %s" msgstr "Имя контакта: %s" #: src/hed/mcc/tls/MCCTLS.cpp:444 src/hed/mcc/tls/MCCTLS.cpp:526 #, c-format msgid "Identity name: %s" msgstr "Выделенное имя: %s" #: src/hed/mcc/tls/MCCTLS.cpp:446 src/hed/mcc/tls/MCCTLS.cpp:528 #, c-format msgid "CA name: %s" msgstr "Имя сертификационного агентства: %s" #: src/hed/mcc/tls/MCCTLS.cpp:452 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" "Не удалось обработать атрибуты безопасности в TLS MCC для входящего сообщения" #: src/hed/mcc/tls/MCCTLS.cpp:461 msgid "Security check failed in TLS MCC for incoming message" msgstr "Не прошла проверка безопасности в TLS MCC для входящего сообщения" #: src/hed/mcc/tls/MCCTLS.cpp:534 msgid "Security check failed for outgoing TLS message" msgstr "Не прошла проверка безопасности для исходящего сообщения TLS" #: src/hed/mcc/tls/MCCTLS.cpp:563 msgid "Security check failed for incoming TLS message" msgstr "Не прошла проверка безопасности для входящего сообщения TLS " #: src/hed/mcc/tls/PayloadTLSMCC.cpp:93 msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "" "Не удалось зарезервировать память для имени субъекта сертификата при сверке " "с политиками." #: src/hed/mcc/tls/PayloadTLSMCC.cpp:98 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" "Не удалось получить ссылку на поток TLS. Дополнительная сверка политики " "пропускается." #: src/hed/mcc/tls/PayloadTLSMCC.cpp:128 #, c-format msgid "Certificate %s already expired" msgstr "Срок действия сертификата %s уже истёк" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:136 #, c-format msgid "Certificate %s will expire in %s" msgstr "Срок действия сертификата %s истечёт через %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:157 msgid "Failed to store application data" msgstr "Не удалось записать данные приложения" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:185 msgid "Failed to retrieve application data from OpenSSL" msgstr "Не удалось получить данные о приложении через OpenSSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:257 src/hed/mcc/tls/PayloadTLSMCC.cpp:351 msgid "Can not create the SSL Context object" msgstr "Не удалось создать объект SSL Context" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:369 msgid "Can't set OpenSSL verify flags" msgstr "Не удалось выставить метки подтверждения OpenSSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:289 src/hed/mcc/tls/PayloadTLSMCC.cpp:383 msgid "Can not create the SSL object" msgstr "Не удалось создать объект SSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:308 msgid "Failed to establish SSL connection" msgstr "Не удалось установить соединение SSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:311 src/hed/mcc/tls/PayloadTLSMCC.cpp:398 #, c-format msgid "Using cipher: %s" msgstr "Используется алгоритм шифрования %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:395 msgid "Failed to accept SSL connection" msgstr "Не удалось принять соединение SSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:448 #, c-format msgid "Failed to shut down SSL: %s" msgstr "Не удалось прервать соединение SSL: %s" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" "ArcAuthZ: не удалось инициализировать все PDP - этот процесс будет нерабочим" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 msgid "PDP: missing name attribute" msgstr "PDP: отсутствует атрибут имени" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, c-format msgid "PDP: %s (%s)" msgstr "PDP: %s (%s)" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "PDP: %s (%s) не может быть подгружен" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, c-format msgid "There are %d RequestItems" msgstr "Обнаружено %d элементов запроса" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "Не удалось определить имя класса для FunctionFactory из настроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "Не удалось определить имя класса для AttributeFactory из настроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" "Не удалось определить имя класса для CombiningAlgorithmFactory из настроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "Не удалось определить имя класса для Request из настроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "Не удалось определить имя класса для Policy из настроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "Не удалось динамически создать AttributeFactory" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "Не удалось динамически создать FnFactory" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "Не удалось динамически создать AlgFacroty" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 msgid "Can not create PolicyStore object" msgstr "Не удалось создать объект PolicyStore" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 #: src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 msgid "Can not dynamically produce Request" msgstr "Не удалось динамически создать Request" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "Результат (0=Допуск, 1=Отказ, 2=Неопределённый, 3=Неприменим): %d" #: src/hed/shc/arcpdp/ArcPDP.cpp:110 msgid "Can not find ArcPDPContext" msgstr "Не обнаружен ArcPDPContext" #: src/hed/shc/arcpdp/ArcPDP.cpp:139 src/hed/shc/xacmlpdp/XACMLPDP.cpp:117 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "Обработчик не поддерживает подгружаемые алгоритмы комбинирования" #: src/hed/shc/arcpdp/ArcPDP.cpp:143 src/hed/shc/xacmlpdp/XACMLPDP.cpp:121 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "Обработчик не поддерживает указанный алгоритм комбинирования - %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:155 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:84 #: src/hed/shc/gaclpdp/GACLPDP.cpp:118 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:133 msgid "Can not dynamically produce Evaluator" msgstr "Не удалось динамически создать анализатор" #: src/hed/shc/arcpdp/ArcPDP.cpp:158 msgid "Evaluator for ArcPDP was not loaded" msgstr "Обработчик для ArcPDP не был загружен" #: src/hed/shc/arcpdp/ArcPDP.cpp:165 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:57 #: src/hed/shc/gaclpdp/GACLPDP.cpp:128 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:87 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:143 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "В сообщении отсутствует объект авторизации" #: src/hed/shc/arcpdp/ArcPDP.cpp:173 src/hed/shc/arcpdp/ArcPDP.cpp:181 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:137 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:143 #: src/hed/shc/gaclpdp/GACLPDP.cpp:136 src/hed/shc/gaclpdp/GACLPDP.cpp:144 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:95 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:103 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "Не удалось преобразовать информацию о защите в запрос ARC" #: src/hed/shc/arcpdp/ArcPDP.cpp:189 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:150 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:111 #, c-format msgid "ARC Auth. request: %s" msgstr "Запрос авторизации ARC: %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:192 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:153 #: src/hed/shc/gaclpdp/GACLPDP.cpp:155 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:170 msgid "No requested security information was collected" msgstr "Не удалось собрать запрошенную информацию о безопасности" #: src/hed/shc/arcpdp/ArcPDP.cpp:199 msgid "Not authorized from arc.pdp - failed to get reponse from Evaluator" msgstr "Нет допуска от arc.pdp - не удалось получить ответ обработчика" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 msgid "Authorized by arc.pdp" msgstr "Допущен через arc.pdp" #: src/hed/shc/arcpdp/ArcPDP.cpp:246 msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" "Нет допуска от arc.pdp - некоторые элементы RequestItem не удовлетворяют " "политике" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "Пустые правила" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "PolicyId: %s Внутренний алгоритм политики:-- %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:75 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:129 msgid "No delegation policies in this context and message - passing through" msgstr "" "В данном контексте и сообщении отсутствуют политики делегирования - " "пропускается" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:95 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:109 msgid "Failed to convert security information to ARC policy" msgstr "Не удалось преобразовать информацию о безопасности в политику ARC" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:116 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:123 #, c-format msgid "ARC delegation policy: %s" msgstr "Политика делегирования ARC: %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:161 msgid "No authorization response was returned" msgstr "Не получен ответ о допуске" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:164 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "Обнаружены %d запроса, удовлетворяющих хотя бы одной политике" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:183 msgid "Delegation authorization passed" msgstr "Авторизация делегирование пройдена" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:185 msgid "Delegation authorization failed" msgstr "Авторизация на делегирование не выдана" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" "Отсутствует элемент CertificatePath или ProxyPath element, или " "" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" "Элемент KeyPath отсутствует или пуст, либо отсутствует " "" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "Элемент CertificatePath или CACertificatesDir отсутствует или пуст" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "Неподдерживаемая роль делегирования: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "Неподдерживаемый тип делегирования: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 msgid "Failed to acquire delegation context" msgstr "Не удалось извлечь контекст делегирования" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "Не удалось создать контекст делегирования" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 msgid "Delegation handler with delegatee role starts to process" msgstr "Запущен обработчик делегирования с ролью делегата" #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:554 #: src/services/cache_service/CacheService.cpp:529 #: src/services/data-staging/DataDeliveryService.cpp:630 msgid "process: POST" msgstr "процесс: POST" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:561 #: src/services/cache_service/CacheService.cpp:538 #: src/services/data-staging/DataDeliveryService.cpp:639 #: src/services/wrappers/java/javawrapper.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:413 msgid "input is not SOAP" msgstr "ввод не в формате SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "Служба делегирования: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "" "Не удалось получить делегированные параметры доступа: %s от службы " "делегирования:%s" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, c-format msgid "Delegated credential identity: %s" msgstr "Отличительные признаки делегированных параметров доступа: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" "Делегированные параметры доступа полученные от службы делегирования записаны " "в каталоге: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delgation service should be configured" msgstr "Конечная точка сервиса делегирования должна быть настроена" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 msgid "Delegation handler with delegatee role ends" msgstr "Завершена обработка делегирования с ролью делегата" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 msgid "Delegation handler with delegator role starts to process" msgstr "Запущен обработчик делегирования с ролью поручителя" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, c-format msgid "The delegated credential got from path: %s" msgstr "Делегированные параметры доступа извлечены из каталога: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "Невозможно создать делегируемый документ для службы делегированию: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 #: src/services/wrappers/java/javawrapper.cpp:144 msgid "output is not SOAP" msgstr "вывод не в формате SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" "Информация о DelegationService: %s и DelegationID: %s успешно отправлена " "партнёрскому сервису" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:220 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 msgid "Incoming Message is not SOAP" msgstr "Входящее сообщение не в формате SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:341 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "Исходящее сообщение не является сообщением SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 msgid "Delegation handler is not configured" msgstr "Обработчик делегирвания не настроен" #: src/hed/shc/gaclpdp/GACLPDP.cpp:121 msgid "Evaluator for GACLPDP was not loaded" msgstr "Обработчик для GACLPDP не был загружен" #: src/hed/shc/gaclpdp/GACLPDP.cpp:152 #, c-format msgid "GACL Auth. request: %s" msgstr "Запрос авторизации GACL: %s" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 msgid "Policy is not gacl" msgstr "Политика не в формате GACL" #: src/hed/shc/legacy/ConfigParser.cpp:13 msgid "Configuration file not specified" msgstr "Не указан файл настроек" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:53 #: src/hed/shc/legacy/ConfigParser.cpp:58 msgid "Configuration file can not be read" msgstr "Невозможно прочесть файл настроек" #: src/hed/shc/legacy/ConfigParser.cpp:68 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "Файл настроек испорчен - слишком короткое название блока: %s" #: src/hed/shc/legacy/LegacyMap.cpp:31 src/hed/shc/legacy/LegacyPDP.cpp:96 msgid "Configuration file not specified in ConfigBlock" msgstr "Не указан файл настроек в ConfigBlock" #: src/hed/shc/legacy/LegacyMap.cpp:40 src/hed/shc/legacy/LegacyPDP.cpp:105 msgid "BlockName is empty" msgstr "Не указан BlockName" #: src/hed/shc/legacy/LegacyMap.cpp:99 #, c-format msgid "Failed processing user mapping command: unixmap %s" msgstr "Сбой работы команды соответствия пользователя: unixmap %s" #: src/hed/shc/legacy/LegacyMap.cpp:106 #, c-format msgid "Failed processing user mapping command: unixgroup %s" msgstr "Сбой работы команды соответствия пользователя: unixgroup %s" #: src/hed/shc/legacy/LegacyMap.cpp:113 #, c-format msgid "Failed processing user mapping command: unixvo %s" msgstr "Сбой работы команды соответствия пользователя: unixvo %s" #: src/hed/shc/legacy/LegacyMap.cpp:173 msgid "LegacyMap: no configurations blocks defined" msgstr "LegacyMap: не заданы группы настроек" #: src/hed/shc/legacy/LegacyMap.cpp:195 src/hed/shc/legacy/LegacyPDP.cpp:210 msgid "" "LegacyPDP: there is no ARCLEGACY Sec Attribute defined. Probably ARC Legacy " "Sec Handler is not configured or failed." msgstr "" "LegacyPDP: атрибут безопасности ARCLEGACY не задан. Возможно, обработчик " "безопасности ARC Legacy не настроен, или претерпел сбой." #: src/hed/shc/legacy/LegacyMap.cpp:200 src/hed/shc/legacy/LegacyPDP.cpp:215 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "LegacyPDP: атрибут безопасности ARC Legacy не опознан." #: src/hed/shc/legacy/LegacyPDP.cpp:115 #, c-format msgid "Failed to parse configuration file %s" msgstr "Сбой при разборе файла настроек %s" #: src/hed/shc/legacy/LegacyPDP.cpp:121 #, c-format msgid "Block %s not found in configuration file %s" msgstr "Блок %s не обнаружен в файле настроек %s" #: src/hed/shc/legacy/LegacySecHandler.cpp:36 #: src/hed/shc/legacy/LegacySecHandler.cpp:110 msgid "LegacySecHandler: configuration file not specified" msgstr "LegacySecHandler: не указан файл настроек" #: src/hed/shc/legacy/arc_lcas.cpp:146 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" "Не удалось преобразовать параметры доступа GSI в GSS (major: %d, minor: %d)" #: src/hed/shc/legacy/arc_lcas.cpp:172 src/hed/shc/legacy/arc_lcmaps.cpp:189 msgid "Missing subject name" msgstr "Отсутствует имя субъекта" #: src/hed/shc/legacy/arc_lcas.cpp:177 src/hed/shc/legacy/arc_lcmaps.cpp:194 msgid "Missing path of credentials file" msgstr "Отсутствует путь к файлу параметров доступа" #: src/hed/shc/legacy/arc_lcas.cpp:182 msgid "Missing name of LCAS library" msgstr "Отсутствует имя библиотеки LCAS" #: src/hed/shc/legacy/arc_lcas.cpp:199 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "Невозможно загрузить библиотеку LCAS %s: %s" #: src/hed/shc/legacy/arc_lcas.cpp:209 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "Не удалось обнаружить функции LCAS в библиотеке %s" #: src/hed/shc/legacy/arc_lcas.cpp:219 msgid "Failed to initialize LCAS" msgstr "Сбой инициализации LCAS" #: src/hed/shc/legacy/arc_lcas.cpp:234 msgid "Failed to terminate LCAS" msgstr "Сбой остановки LCAS" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "Отсутствует имя библиотеки LCMAPS" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 msgid "Can't read policy names" msgstr "Невозможно прочесть названия политик" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "Невозможно загрузить библиотеку LCMAPS %s: %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "Не удалось обнаружить функции LCMAPS в библиотеке %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "LCMAPS содержит lcmaps_run" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "LCMAPS содержит getCredentialData" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 msgid "Failed to initialize LCMAPS" msgstr "Сбой инициализации LCMAPS" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "LCMAPS возвратил недопустимый GID: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 msgid "LCMAPS did not return any GID" msgstr "LCMAPS не возвратил никакого GID" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "LCMAPS возвратил UID не соответствующий учётной записи: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "LCMAPS возвратил недопустимый UID: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:308 msgid "LCMAPS did not return any UID" msgstr "LCMAPS не возвратил никакого UID" #: src/hed/shc/legacy/arc_lcmaps.cpp:318 msgid "Failed to terminate LCMAPS" msgstr "Сбой остановки LCMAPS" #: src/hed/shc/legacy/auth.cpp:293 #, c-format msgid "Credentials stored in temporary file %s" msgstr "Параметры доступа сохранены во временном файле %s" #: src/hed/shc/legacy/auth.cpp:302 #, c-format msgid "Assigned to authorization group %s" msgstr "Приписан к группе допуска %s" #: src/hed/shc/legacy/auth.cpp:307 #, c-format msgid "Assigned to VO %s" msgstr "Приписан к VO %s" #: src/hed/shc/legacy/auth_file.cpp:24 #: src/services/gridftpd/auth/auth_file.cpp:24 #, c-format msgid "Failed to read file %s" msgstr "Сбой при чтении файла %s" #: src/hed/shc/legacy/auth_ldap.cpp:22 msgid "LDAP authorization is not supported anymore" msgstr "Авторизация для LDAP больше не поддерживается" #: src/hed/shc/legacy/auth_plugin.cpp:44 src/hed/shc/legacy/unixmap.cpp:260 #: src/services/gridftpd/auth/auth_plugin.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:251 #, c-format msgid "Plugin %s returned: %u" msgstr "Подключаемый модуль %s ответил: %u" #: src/hed/shc/legacy/auth_plugin.cpp:48 src/hed/shc/legacy/unixmap.cpp:264 #, c-format msgid "Plugin %s timeout after %u seconds" msgstr "Время ожидания подключаемого модуля %s истекло после %u секунд" #: src/hed/shc/legacy/auth_plugin.cpp:51 src/hed/shc/legacy/unixmap.cpp:267 #, c-format msgid "Plugin %s failed to start" msgstr "Подключаемый модуль %s не смог запуститься" #: src/hed/shc/legacy/auth_plugin.cpp:53 src/hed/shc/legacy/unixmap.cpp:269 #, c-format msgid "Plugin %s printed: %s" msgstr "Подключаемый модуль %s вывел на печать: %s" #: src/hed/shc/legacy/auth_plugin.cpp:54 src/hed/shc/legacy/unixmap.cpp:270 #, c-format msgid "Plugin %s error: %s" msgstr "Ошибка подключаемого модуля %s: %s" #: src/hed/shc/legacy/auth_voms.cpp:39 #: src/services/gridftpd/auth/auth_voms.cpp:45 msgid "Missing VO in configuration" msgstr "В настройках отсутствует ВО" #: src/hed/shc/legacy/auth_voms.cpp:44 #: src/services/gridftpd/auth/auth_voms.cpp:51 msgid "Missing group in configuration" msgstr "В настройках отсутствует группа" #: src/hed/shc/legacy/auth_voms.cpp:49 #: src/services/gridftpd/auth/auth_voms.cpp:57 msgid "Missing role in configuration" msgstr "В настройках отсутствует роль" #: src/hed/shc/legacy/auth_voms.cpp:54 #: src/services/gridftpd/auth/auth_voms.cpp:63 msgid "Missing capabilities in configuration" msgstr "В настройках отсутствуют возможности" #: src/hed/shc/legacy/auth_voms.cpp:58 #: src/services/gridftpd/auth/auth_voms.cpp:67 #, c-format msgid "Rule: vo: %s" msgstr "Правило: ВО: %s" #: src/hed/shc/legacy/auth_voms.cpp:59 #: src/services/gridftpd/auth/auth_voms.cpp:68 #, c-format msgid "Rule: group: %s" msgstr "Правило: группа: %s" #: src/hed/shc/legacy/auth_voms.cpp:60 #: src/services/gridftpd/auth/auth_voms.cpp:69 #, c-format msgid "Rule: role: %s" msgstr "Правило: роль: %s" #: src/hed/shc/legacy/auth_voms.cpp:61 #: src/services/gridftpd/auth/auth_voms.cpp:70 #, c-format msgid "Rule: capabilities: %s" msgstr "Правило: возможности: %s" #: src/hed/shc/legacy/auth_voms.cpp:64 #: src/services/gridftpd/auth/auth_voms.cpp:77 #, c-format msgid "Match vo: %s" msgstr "Совпадение ВО: %s" #: src/hed/shc/legacy/auth_voms.cpp:71 #, c-format msgid "Matched: %s %s %s %s" msgstr "Соответствие: %s %s %s %s" #: src/hed/shc/legacy/auth_voms.cpp:86 #: src/services/gridftpd/auth/auth_voms.cpp:98 msgid "Matched nothing" msgstr "Совпадений нет" #: src/hed/shc/legacy/simplemap.cpp:71 src/hed/shc/legacy/simplemap.cpp:76 #: src/services/gridftpd/auth/simplemap.cpp:63 #: src/services/gridftpd/auth/simplemap.cpp:68 #, c-format msgid "SimpleMap: %s" msgstr "SimpleMap: %s" #: src/hed/shc/legacy/unixmap.cpp:49 src/hed/shc/legacy/unixmap.cpp:54 #: src/hed/shc/legacy/unixmap.cpp:98 src/hed/shc/legacy/unixmap.cpp:103 #: src/hed/shc/legacy/unixmap.cpp:147 src/hed/shc/legacy/unixmap.cpp:152 #: src/services/gridftpd/auth/unixmap.cpp:47 #: src/services/gridftpd/auth/unixmap.cpp:52 #: src/services/gridftpd/auth/unixmap.cpp:96 #: src/services/gridftpd/auth/unixmap.cpp:101 #: src/services/gridftpd/auth/unixmap.cpp:145 #: src/services/gridftpd/auth/unixmap.cpp:150 msgid "User name mapping command is empty" msgstr "Пустая команда в присвоении имени пользователя" #: src/hed/shc/legacy/unixmap.cpp:61 src/services/gridftpd/auth/unixmap.cpp:59 #, c-format msgid "User name mapping has empty group: %s" msgstr "Пустая группа в присвоении имени пользователя: %s" #: src/hed/shc/legacy/unixmap.cpp:72 src/hed/shc/legacy/unixmap.cpp:121 #: src/hed/shc/legacy/unixmap.cpp:169 #: src/services/gridftpd/auth/unixmap.cpp:70 #: src/services/gridftpd/auth/unixmap.cpp:119 #: src/services/gridftpd/auth/unixmap.cpp:167 #, c-format msgid "User name mapping has empty command: %s" msgstr "Пустая команда в присвоении имени пользователя: %s" #: src/hed/shc/legacy/unixmap.cpp:110 #: src/services/gridftpd/auth/unixmap.cpp:108 #, c-format msgid "User name mapping has empty VO: %s" msgstr "Пустая VO в присвоении имени пользователя: %s" #: src/hed/shc/legacy/unixmap.cpp:159 #: src/services/gridftpd/auth/unixmap.cpp:157 #, c-format msgid "User name mapping has empty name: %s" msgstr "Пустое имя в присвоении имени пользователя: %s" #: src/hed/shc/legacy/unixmap.cpp:208 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:229 src/hed/shc/legacy/unixmap.cpp:235 #: src/services/gridftpd/auth/unixmap.cpp:212 #: src/services/gridftpd/auth/unixmap.cpp:217 #: src/services/gridftpd/auth/unixmap.cpp:233 msgid "Plugin (user mapping) command is empty" msgstr "Пустая команда в подключаемом модуле (присвоение имени пользователя)" #: src/hed/shc/legacy/unixmap.cpp:219 #: src/services/gridftpd/auth/unixmap.cpp:223 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" "Нецифровое значение времени ожидания в подключаемом модуле (присвоение имени " "пользователя): %s" #: src/hed/shc/legacy/unixmap.cpp:223 #: src/services/gridftpd/auth/unixmap.cpp:227 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" "Неприемлемое значение времени ожидания в подключаемом модуле (присвоение " "имени пользователя): %s" #: src/hed/shc/legacy/unixmap.cpp:257 #: src/services/gridftpd/auth/unixmap.cpp:248 #, c-format msgid "Plugin %s returned too much: %s" msgstr "Подключаемый модуль %s ответил слишком длинно: %s" #: src/hed/shc/legacy/unixmap.cpp:278 msgid "User subject match is missing user subject." msgstr "Отсутствует субъект пользователя для соответствия субъекта" #: src/hed/shc/legacy/unixmap.cpp:282 #: src/services/gridftpd/auth/unixmap.cpp:266 #, c-format msgid "Mapfile at %s can't be opened." msgstr "Невозможно открыть пул пользователей в %s." #: src/hed/shc/legacy/unixmap.cpp:305 #: src/services/gridftpd/auth/unixmap.cpp:290 msgid "User pool call is missing user subject." msgstr "Отсутствует субъект вызова пула пользователей" #: src/hed/shc/legacy/unixmap.cpp:310 #: src/services/gridftpd/auth/unixmap.cpp:295 #, c-format msgid "User pool at %s can't be opened." msgstr "Невозможно открыть пул пользователей в %s." #: src/hed/shc/legacy/unixmap.cpp:315 #: src/services/gridftpd/auth/unixmap.cpp:300 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" "Пул пользователей в %s не смог установить соответствие для пользователя" #: src/hed/shc/legacy/unixmap.cpp:332 #: src/services/gridftpd/auth/unixmap.cpp:317 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "Отсутствует имя пользователя в прямом присвоении имени: %s." #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:48 msgid "Creating a pdpservice client" msgstr "Создаётся клиент pdpservice" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:80 msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "Политика ARC не может быть задана в профиле SAML2.0 XACML" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:152 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:185 msgid "Policy Decision Service invocation failed" msgstr "Не удалось запустить службу принятия решений по политикам" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:170 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:204 msgid "Authorized from remote pdp service" msgstr "Допущен удалённой службой PDP" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Unauthorized from remote pdp service" msgstr "Не допущен удалённой службой PDP" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "Невозможно извлечь SAMLAssertion SecAttr из контекста сообщения" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:152 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "Элемент CertificatePath отсутствует или пуст" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:157 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "Элемент KeyPath отсутствует или пуст" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" "Оба элемента CACertificatePath and CACertificatesDir отсутствуют или пусты" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:175 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" "Элемент CertificatePath или CACertificatesDir отсутствует или пуст; будет " "выполнена лишь проверка подписи, а не удостоверение подлинности сообщения" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:179 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, c-format msgid "Processing type not supported: %s" msgstr "Неподдерживаемый тип обработки: %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:199 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "Не удалось разобрать токен SAML из входящего документа SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "" "Не удалось установить подлинность токена SAML во входящем документе SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:212 msgid "Succeeded to authenticate SAMLToken" msgstr "Успешная проверка подлинности токена SAML" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:275 #, c-format msgid "No response from AA service %s" msgstr "Нет ответа от сервера AA %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:279 #, c-format msgid "SOAP Request to AA service %s failed" msgstr "Ошибка запроса SOAP к серверу AA %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 msgid "Cannot find content under response soap message" msgstr "Не удалось найти содержание ответного сообщения SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 msgid "Cannot find under response soap message:" msgstr "Не удалось найти элемент в ответном сообщении SOAP:" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:308 msgid "The Response is not going to this end" msgstr "Отклик досюда не дошёл" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "The StatusCode is Success" msgstr "StatusCode: Success" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:335 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "Не удалось создать токен SAML для исходящего сообщения SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:345 msgid "SAML Token handler is not configured" msgstr "Обработчик токена SAML не настроен" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:29 #, c-format msgid "Access list location: %s" msgstr "Местонахождение списка доступа: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:39 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" "Для simplelist.pdp не задан файл политик или DN; пожалуйста, задайте в " "настройках атрибут location или хотя бы один элемент DN для узла PDP " "simplelist." #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:42 #, c-format msgid "Subject to match: %s" msgstr "Субъект для сверки: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:45 #, c-format msgid "Policy subject: %s" msgstr "Субъект политики: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:47 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:73 #, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "Допущен через simplelist.pdp: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:54 msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" "Для simplelist.pdp не задан файл настройки политик; пожалуйста, проверьте " "атрибут location в настройках службы узла PDP simplelist." #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:61 #, c-format msgid "Policy line: %s" msgstr "Строка политики: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:79 #, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "Не допущен через simplelist.pdp: %s" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "Начать тест" #: src/hed/shc/test.cpp:101 msgid "Input request from a file: Request.xml" msgstr "Запрос ввода из файла: Request.xml" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "Обнаружены %d субъекта, удовлетворяющих хотя бы одной политике" #: src/hed/shc/test.cpp:121 #, c-format msgid "Attribute Value (1): %s" msgstr "Значение атрибута (1): %s" #: src/hed/shc/test.cpp:132 msgid "Input request from code" msgstr "Запрос ввода из программы" #: src/hed/shc/test.cpp:211 #, c-format msgid "Attribute Value (2): %s" msgstr "Значение атрибута (2): %s" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 msgid "Can not dynamically produce Policy" msgstr "Не удалось динамически создать Policy" #: src/hed/shc/testinterface_arc.cpp:138 #, c-format msgid "Attribute Value inside Subject: %s" msgstr "Значение атрибута в субъекте: %s" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "Запрос прошёл сверку с политикой" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "Элемент PasswordSource отсутствует или пуст" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "Тип шифрования пароля не поддерживается: %s" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "Элемент Username отсутствует или пуст" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "Во входящем сообщении отсутствует полезная нагрузка" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "Не удалось создать PayloadSOAP из входящей нагрузки" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "Не удалось разобрать токен Username из входящего документа SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" "Не удалось установить подлинность токена Username во входящем документе SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "Успешная проверка подлинности UsernameToken" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "В исходящем сообщении отсутствует полезная нагрузка" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "Не удалось создать PayloadSOAP из исходящей нагрузки" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "" "Не удалось создать токен имени пользователя для исходящего сообщения SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "Обработчик токена Username не настроен" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "Не удалось разобрать токен X509 из входящего документа SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "Не удалось подтвердить токен X509 во входящем документе SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "" "Не удалось установить подлинность токена X509 во входящем документе SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "Успешное подтверждение подлинности токена X509" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "Не удалось создать токен X509 для исходящего сообщения SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "Обработчик токена X509 не настроен" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "Невозможно создать функцию: FunctionId не существует" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, c-format msgid "Can not create function %s" msgstr "Невозможно создать функцию %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:88 msgid "Can not find XACMLPDPContext" msgstr "Невозможно найти XACMLPDPContext" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:136 msgid "Evaluator for XACMLPDP was not loaded" msgstr "Обработчик для XACMLPDP не был загружен" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:151 src/hed/shc/xacmlpdp/XACMLPDP.cpp:159 msgid "Failed to convert security information to XACML request" msgstr "Не удалось преобразовать информацию о защите в запрос XACML" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:167 #, c-format msgid "XACML request: %s" msgstr "запрос XACML: %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 msgid "Authorized from xacml.pdp" msgstr "Допущен через xacml.pdp" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:180 msgid "UnAuthorized from xacml.pdp" msgstr "Не допущен через xacml.pdp" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "Невозможно найти элемент с нужным пространством имён" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "Политика не содержит назначений" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "Пустой запрос" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "Невозможно найти элемент с нужным пространством имён" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "Недопустимый эффект" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "Правило не содержит назначений" #: src/libs/data-staging/DTR.cpp:86 src/libs/data-staging/DTR.cpp:90 #, c-format msgid "Could not handle endpoint %s" msgstr "Невозможно обработать точку входа %s" #: src/libs/data-staging/DTR.cpp:100 msgid "Source is the same as destination" msgstr "Источник идентичен назначению" #: src/libs/data-staging/DTR.cpp:174 #, c-format msgid "Invalid ID: %s" msgstr "Неверный ID: %s" #: src/libs/data-staging/DTR.cpp:211 #, c-format msgid "%s->%s" msgstr "%s->%s" #: src/libs/data-staging/DTR.cpp:330 #, c-format msgid "No callback for %s defined" msgstr "Не определена функция обратного вызова для %s" #: src/libs/data-staging/DTR.cpp:345 #, c-format msgid "NULL callback for %s" msgstr "Нулевой обратный вызов для %s" #: src/libs/data-staging/DTR.cpp:348 #, c-format msgid "Request to push to unknown owner - %u" msgstr "Попытка передачи неизвестному владельцу - %u" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "Принят неверный запрос DTR" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" "Служба доставки получила новый запрос DTR %s с источником %s и назначением %s" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "Не получено запросов DTR" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "Отменяется DTR %s с источником: %s, назначением: %s" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "DTR %s запросил прерывание, но активные передачи отсутствуют" #: src/libs/data-staging/DataDelivery.cpp:147 #, c-format msgid "Cleaning up after failure: deleting %s" msgstr "Очистка после сбоя: уничтожается %s" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "Сбой удаления объекта доставки, или истекло время ожидания удаления" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "Передача завершена: %llu байтов передано %s" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "Прерван цикл размещения данных" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:137 #, c-format msgid "Bad checksum format %s" msgstr "Неверный формат контрольной суммы %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:210 #, c-format msgid "DataDelivery: %s" msgstr "DataDelivery: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:222 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "Процесс DataStagingDelivery завершился с кодом %i" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:241 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "Пересылка оборвана после %i секунд бездействия" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:67 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:315 #, c-format msgid "Connecting to Delivery service at %s" msgstr "Соединяемся со службой доставки на %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:94 #, c-format msgid "Failed to set up credential delegation with %s" msgstr "Сбой установки делегирования прав доступа с %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:106 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:332 #, c-format msgid "Could not connect to service %s: %s" msgstr "Не удалось соединиться со службой %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:114 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:340 #, c-format msgid "No SOAP response from Delivery service %s" msgstr "Нет ответа SOAP от службы доставки %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:128 #, c-format msgid "Failed to start transfer request: %s" msgstr "Сбой запуска запроса на передачу: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "Недопустимый формат отзыва XML от сервиса в %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:143 #, c-format msgid "Could not make new transfer request: %s: %s" msgstr "Невозможно создать новый запрос пересылки: %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:148 #, c-format msgid "Started remote Delivery at %s" msgstr "Запущена удалённая служба доставки на %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:181 #, c-format msgid "Failed to send cancel request: %s" msgstr "Сбой отправки запроса на прерывание: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:188 msgid "Failed to cancel: No SOAP response" msgstr "Сбой прерывания: нет ответа SOAP" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:202 #, c-format msgid "Failed to cancel transfer request: %s" msgstr "Сбой прерывания запроса на передачу: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:209 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:290 #, c-format msgid "Bad format in XML response: %s" msgstr "Неверный формат отклика XML: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:216 #, c-format msgid "Failed to cancel: %s" msgstr "Ошибка отмены: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:260 msgid "No SOAP response from delivery service" msgstr "Нет ответа SOAP от службы доставки" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:281 #, c-format msgid "Failed to query state: %s" msgstr "Сбой опроса состояния: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:355 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "Ошибка SOAP службы доставки на %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:363 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "Неверный формат отклика XML службы доставки на %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:371 #, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "Ошибка связи со службой доставки на %s: %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:379 #, c-format msgid "Dir %s allowed at service %s" msgstr "Каталог %s допускается для службы %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:473 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" "Последние записи журнала DataDelivery:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:486 msgid "Failed locating credentials" msgstr "Сбой обнаружения параметров доступа" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:491 msgid "Failed to initiate client connection" msgstr "Сбой запуска соединения с клиентом" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:497 msgid "Client connection has no entry point" msgstr "Отсутствует точка входа в клиентскую цепь" #: src/libs/data-staging/DataStagingDelivery.cpp:134 msgid "Unexpected arguments" msgstr "Непредусмотренные аргументы" #: src/libs/data-staging/DataStagingDelivery.cpp:137 msgid "Source URL missing" msgstr "Отсутствует URL источника" #: src/libs/data-staging/DataStagingDelivery.cpp:140 msgid "Destination URL missing" msgstr "Отсутствует URL назначения" #: src/libs/data-staging/DataStagingDelivery.cpp:144 #, c-format msgid "Source URL not valid: %s" msgstr "Недействительный URL источника: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:148 #, c-format msgid "Destination URL not valid: %s" msgstr "Недействительный URL назначения: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:205 #, c-format msgid "Unknown transfer option: %s" msgstr "Неизвестная опция передачи файлов: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:230 #, c-format msgid "Source URL not supported: %s" msgstr "Неподдерживаемый URL источника: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #: src/libs/data-staging/DataStagingDelivery.cpp:254 msgid "No credentials supplied" msgstr "Не указаны параметры доступа" #: src/libs/data-staging/DataStagingDelivery.cpp:249 #, c-format msgid "Destination URL not supported: %s" msgstr "Неподдерживаемый URL назначения: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:298 #, c-format msgid "Will calculate %s checksum" msgstr "Будет вычислена контрольная сумма для %s" #: src/libs/data-staging/DataStagingDelivery.cpp:309 msgid "Cannot use supplied --size option" msgstr "Невозможно использовать заявленную опцию --size" #: src/libs/data-staging/DataStagingDelivery.cpp:458 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" "Несовпадение вычисленной контрольной суммы %s и контрольной суммы источника %" "s" #: src/libs/data-staging/DataStagingDelivery.cpp:468 #, c-format msgid "Failed cleaning up destination %s" msgstr "Ошибка очистки цели %s" #: src/libs/data-staging/Processor.cpp:60 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:418 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:435 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:331 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:348 #: src/services/cache_service/CacheService.cpp:114 msgid "Error creating cache" msgstr "Ошибка при создании кэша" #: src/libs/data-staging/Processor.cpp:86 #, c-format msgid "Forcing re-download of file %s" msgstr "Принудительная перезагрузка файла %s" #: src/libs/data-staging/Processor.cpp:103 #, c-format msgid "Will wait around %is" msgstr "Ожидание порядка %i сек" #: src/libs/data-staging/Processor.cpp:123 #, c-format msgid "Force-checking source of cache file %s" msgstr "Принудительная проверка источника кэшированного файла %s" #: src/libs/data-staging/Processor.cpp:126 #, c-format msgid "Source check requested but failed: %s" msgstr "Проверка источника запрошена, но не прошла: %s" #: src/libs/data-staging/Processor.cpp:146 msgid "Permission checking failed, will try downloading without using cache" msgstr "Сбой проверки прав доступа, попытка загрузки без использования кэша" #: src/libs/data-staging/Processor.cpp:177 #, c-format msgid "Will download to cache file %s" msgstr "Будет произведена загрузка в файл кэша %s" #: src/libs/data-staging/Processor.cpp:199 msgid "Looking up source replicas" msgstr "Поиск копий файла-источника" #: src/libs/data-staging/Processor.cpp:217 #: src/libs/data-staging/Processor.cpp:329 #, c-format msgid "Skipping replica on local host %s" msgstr "Пропускается локальная копия %s" #: src/libs/data-staging/Processor.cpp:225 #: src/libs/data-staging/Processor.cpp:337 #, c-format msgid "No locations left for %s" msgstr "Не осталось расположений для %s" #: src/libs/data-staging/Processor.cpp:248 #: src/libs/data-staging/Processor.cpp:496 msgid "Resolving destination replicas" msgstr "Обнаружение копий назначения" #: src/libs/data-staging/Processor.cpp:266 msgid "No locations for destination different from source found" msgstr "Не найдено расположений для назначения, отличающихся от источника" #: src/libs/data-staging/Processor.cpp:278 msgid "Pre-registering destination in index service" msgstr "Предварительная регистрация назначения в каталоге" #: src/libs/data-staging/Processor.cpp:305 msgid "Resolving source replicas in bulk" msgstr "Массовое обнаружение копий источника" #: src/libs/data-staging/Processor.cpp:319 #, c-format msgid "No replicas found for %s" msgstr "Не найдено копий для %s" #: src/libs/data-staging/Processor.cpp:361 #, c-format msgid "Checking %s" msgstr "Проверка %s" #: src/libs/data-staging/Processor.cpp:370 #: src/libs/data-staging/Processor.cpp:429 msgid "Metadata of replica and index service differ" msgstr "Метаданные копии отличаются от тех, что в каталоге" #: src/libs/data-staging/Processor.cpp:378 #, c-format msgid "Failed checking source replica %s: %s" msgstr "Сбой проверки копии источника %s: %s" #: src/libs/data-staging/Processor.cpp:405 msgid "Querying source replicas in bulk" msgstr "Массовый опрос копий источника" #: src/libs/data-staging/Processor.cpp:417 #, c-format msgid "Failed checking source replica: %s" msgstr "Сбой проверки копии источника: %s" #: src/libs/data-staging/Processor.cpp:423 msgid "Failed checking source replica" msgstr "Сбой проверки копии источника" #: src/libs/data-staging/Processor.cpp:464 msgid "Finding existing destination replicas" msgstr "Обнаружение существующих копий назначения" #: src/libs/data-staging/Processor.cpp:476 #, c-format msgid "Failed to delete replica %s: %s" msgstr "Сбой удаления копии %s: %s" #: src/libs/data-staging/Processor.cpp:490 #, c-format msgid "Unregistering %s" msgstr "Удаляется запись о %s" #: src/libs/data-staging/Processor.cpp:501 msgid "Pre-registering destination" msgstr "Предварительная регистрация назначения" #: src/libs/data-staging/Processor.cpp:507 #, c-format msgid "Failed to pre-clean destination: %s" msgstr "Сбой предварительной очистки назначения: %s" #: src/libs/data-staging/Processor.cpp:531 msgid "Preparing to stage source" msgstr "Подготовка к размещению файла-источника" #: src/libs/data-staging/Processor.cpp:544 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "Источник неготов, следующая попытка через %u сек" #: src/libs/data-staging/Processor.cpp:550 msgid "No physical files found for source" msgstr "Не найдено реальных файлов источника" #: src/libs/data-staging/Processor.cpp:569 msgid "Preparing to stage destination" msgstr "Подготовка к размещению назначения" #: src/libs/data-staging/Processor.cpp:582 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "Назначение неготово, следующая попытка через %u сек" #: src/libs/data-staging/Processor.cpp:588 msgid "No physical files found for destination" msgstr "Не найдено реальных файлов для назначения" #: src/libs/data-staging/Processor.cpp:615 msgid "Releasing source" msgstr "Сброс источника" #: src/libs/data-staging/Processor.cpp:619 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "Обнаружена проблема при обслуживании источника после пересылки: %s" #: src/libs/data-staging/Processor.cpp:624 msgid "Releasing destination" msgstr "Сброс назначения" #: src/libs/data-staging/Processor.cpp:628 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" "Обнаружена проблема при обслуживании назначения после пересылки после сбоя: %" "s" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "Ошибка обслуживания назначения после пересылки: %s" #: src/libs/data-staging/Processor.cpp:659 msgid "Removing pre-registered destination in index service" msgstr "Отмена предварительной регистрации назначения в каталоге" #: src/libs/data-staging/Processor.cpp:662 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" "Не удалось отменить предварительную регистрацию назначения %s: %s. Возможно, " "Вам придётся сделать это вручную" #: src/libs/data-staging/Processor.cpp:668 msgid "Registering destination replica" msgstr "Регистрация копии назначения" #: src/libs/data-staging/Processor.cpp:671 #, c-format msgid "Failed to register destination replica: %s" msgstr "Сбой регистрации копии назначения: %s" #: src/libs/data-staging/Processor.cpp:674 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" "Не удалось отменить предварительную регистрацию назначения %s. Возможно, Вам " "придётся сделать это вручную" #: src/libs/data-staging/Processor.cpp:705 msgid "Error creating cache. Stale locks may remain." msgstr "Ошибка про создании кэша. Возможно, остались старые блокировки." #: src/libs/data-staging/Processor.cpp:740 #, c-format msgid "Linking/copying cached file to %s" msgstr "Создание ссылки/копирование файла из кэша в %s" #: src/libs/data-staging/Processor.cpp:761 #, c-format msgid "Failed linking cache file to %s" msgstr "Сбой создания ссылки на файл из кэша из %s" #: src/libs/data-staging/Processor.cpp:765 #, c-format msgid "Error linking cache file to %s." msgstr "Ошибка создания ссылки на файл из кэша из %s." #: src/libs/data-staging/Processor.cpp:787 #: src/libs/data-staging/Processor.cpp:794 msgid "Adding to bulk request" msgstr "Добавление к массовому запросу" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "Используется следующая копия (%s)" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "Больше копий нет (%s)" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "Предварительное назначение будет сброшено" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "Будет отменены блокировки в кэше" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "Заканчивается размещение данных" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "Источник поставлен в соответствие %s" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" "Невозможно создать ссылку на источник, который может измениться; будет " "сделана копия" #: src/libs/data-staging/Scheduler.cpp:213 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" "Невозможно создать ссылку на удалённое назначение. Приписанный URL не будет " "использован" #: src/libs/data-staging/Scheduler.cpp:216 msgid "Linking mapped file" msgstr "Создаётся символическая ссылка на соответствующий файл" #: src/libs/data-staging/Scheduler.cpp:223 #, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "Сбой создания ссылки %s. Приписанный URL не будет использован" #: src/libs/data-staging/Scheduler.cpp:235 msgid "Linking mapped file - can't link on Windows" msgstr "" "Создаётся символическая ссылка на соответствующий файл - невыполнимо на " "Windows" #: src/libs/data-staging/Scheduler.cpp:251 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" "Планировщик получил новый запрос DTR %s с источником: %s, назначением: %s, " "приписан к доле %s с приоритетом %d" #: src/libs/data-staging/Scheduler.cpp:258 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" "Файл либо не может быть кэширован, либо кэширование не было запрошено, либо " "кэша нет; пропускается проверка кэша" #: src/libs/data-staging/Scheduler.cpp:264 msgid "File is cacheable, will check cache" msgstr "Файл может быть кэширован, проверяется кэш" #: src/libs/data-staging/Scheduler.cpp:267 #: src/libs/data-staging/Scheduler.cpp:293 #, c-format msgid "File is currently being cached, will wait %is" msgstr "Файл ещё кэшируется, ожидание %i сек" #: src/libs/data-staging/Scheduler.cpp:286 msgid "Timed out while waiting for cache lock" msgstr "Истекло время ожидания блокировки кэша" #: src/libs/data-staging/Scheduler.cpp:297 msgid "Checking cache again" msgstr "Кэш проверяется снова" #: src/libs/data-staging/Scheduler.cpp:317 msgid "Destination file is in cache" msgstr "Файл назначения записан в кэш" #: src/libs/data-staging/Scheduler.cpp:321 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" "Источник и/или назначение является каталогом, будет произведён поиск копий" #: src/libs/data-staging/Scheduler.cpp:324 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" "Ни источник, ни назначение не являются каталогом, поиск копий не будет " "произведён" #: src/libs/data-staging/Scheduler.cpp:334 msgid "Problem with index service, will release cache lock" msgstr "Проблема с каталогом, кэш будет разблокирован" #: src/libs/data-staging/Scheduler.cpp:338 msgid "Problem with index service, will proceed to end of data staging" msgstr "Проблема с каталогом, переходим к завершению размещения данных" #: src/libs/data-staging/Scheduler.cpp:348 msgid "Checking source file is present" msgstr "Проверка наличия файла-источника" #: src/libs/data-staging/Scheduler.cpp:356 msgid "Error with source file, moving to next replica" msgstr "Ошибка в файле источника, пробуем другую копию" #: src/libs/data-staging/Scheduler.cpp:378 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "У копии %s долгая задержка, пробуем следующую копию" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "No more replicas, will use %s" msgstr "Больше копий нет, будет использован файл %s" #: src/libs/data-staging/Scheduler.cpp:383 #, c-format msgid "Checking replica %s" msgstr "Проверяется копия %s" #: src/libs/data-staging/Scheduler.cpp:393 msgid "Overwrite requested - will pre-clean destination" msgstr "Запрошена перезапись - назначение будет предварительно очищено" #: src/libs/data-staging/Scheduler.cpp:396 msgid "No overwrite requested or allowed, skipping pre-cleaning" msgstr "" "Перезапись не запрошена или не разрешена, предварительная очистка " "пропускается" #: src/libs/data-staging/Scheduler.cpp:404 msgid "Pre-clean failed, will still try to copy" msgstr "Сбой предварительной очистки, всё же попытаемся скопировать " #: src/libs/data-staging/Scheduler.cpp:411 msgid "Source or destination requires staging" msgstr "Источник или назначение требуют размещения с ленточного накопителя" #: src/libs/data-staging/Scheduler.cpp:415 msgid "No need to stage source or destination, skipping staging" msgstr "" "Не требуется размещение с ленточного накопителя ни источника, ни назначения; " "размещение пропускается " #: src/libs/data-staging/Scheduler.cpp:445 msgid "Staging request timed out, will release request" msgstr "Истекло время ожидания запроса на размещение, запрос будет отозван" #: src/libs/data-staging/Scheduler.cpp:449 msgid "Querying status of staging request" msgstr "Опрос состояния запроса на размещение" #: src/libs/data-staging/Scheduler.cpp:458 msgid "Releasing requests" msgstr "Сброс запросов" #: src/libs/data-staging/Scheduler.cpp:475 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "DTR готов к пересылке, переводится в очередь на доставку" #: src/libs/data-staging/Scheduler.cpp:490 #, c-format msgid "Transfer failed: %s" msgstr "Сбой передачи: %s" #: src/libs/data-staging/Scheduler.cpp:500 msgid "Releasing request(s) made during staging" msgstr "Отзыв запросов, сделанных при размещении" #: src/libs/data-staging/Scheduler.cpp:503 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" "Ни источник, ни назначение не были размещены с ленточного накопителя, " "пропускается отмена запросов" #: src/libs/data-staging/Scheduler.cpp:515 msgid "Trying next replica" msgstr "Пробуем следующую копию" #: src/libs/data-staging/Scheduler.cpp:519 #, c-format msgid "Will %s in destination index service" msgstr "Будет выполнена операция %s в каталоге назначения" #: src/libs/data-staging/Scheduler.cpp:523 msgid "Destination is not index service, skipping replica registration" msgstr "Назначение не является указателем, пропускается регистрация копии" #: src/libs/data-staging/Scheduler.cpp:536 msgid "Error registering replica, moving to end of data staging" msgstr "Ошибка регистрации копии, переход к завершению размещения" #: src/libs/data-staging/Scheduler.cpp:544 msgid "Will process cache" msgstr "Будет обработан кэш" #: src/libs/data-staging/Scheduler.cpp:548 msgid "File is not cacheable, skipping cache processing" msgstr "Файл не может быть кэширован, пропускается обработка кэша" #: src/libs/data-staging/Scheduler.cpp:562 msgid "Cancellation complete" msgstr "Отмена завершена" #: src/libs/data-staging/Scheduler.cpp:576 msgid "Will wait 10s" msgstr "Ожидание 10 секунд" #: src/libs/data-staging/Scheduler.cpp:582 msgid "Error in cache processing, will retry without caching" msgstr "Ошибка при обработке кэша, попытаемся без кэширования" #: src/libs/data-staging/Scheduler.cpp:591 msgid "Will retry without caching" msgstr "Будет произведена повторная попытка без кэширования" #: src/libs/data-staging/Scheduler.cpp:609 msgid "Proxy has expired" msgstr "Срок действия доверенности истёк" #: src/libs/data-staging/Scheduler.cpp:620 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "Осталось %i попыток, повторная попытка в %s" #: src/libs/data-staging/Scheduler.cpp:636 msgid "Out of retries" msgstr "Достигнут предел количества попыток" #: src/libs/data-staging/Scheduler.cpp:638 msgid "Permanent failure" msgstr "Устойчивый сбой" #: src/libs/data-staging/Scheduler.cpp:644 msgid "Finished successfully" msgstr "Успешное завершение" #: src/libs/data-staging/Scheduler.cpp:654 msgid "Returning to generator" msgstr "Возврат в генератор" #: src/libs/data-staging/Scheduler.cpp:820 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "Файл меньше %llu байт, будет использована локальная доставка" #: src/libs/data-staging/Scheduler.cpp:874 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "Служба доставки в %s может копировать в %s" #: src/libs/data-staging/Scheduler.cpp:882 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "Служба доставки в %s может копировать из %s" #: src/libs/data-staging/Scheduler.cpp:895 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" "Не удалось обнаружить подходящую службу доставки, вынужденно используется " "локальная пересылка" #: src/libs/data-staging/Scheduler.cpp:911 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "Служба доставки на %s не используется в связи с переполнением" #: src/libs/data-staging/Scheduler.cpp:938 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "Служба доставки %s не используется в связи с предыдущим сбоем" #: src/libs/data-staging/Scheduler.cpp:948 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" "Ни одна из удалённых служб доставки не подходит, вынужденно используется " "локальная доставка" #: src/libs/data-staging/Scheduler.cpp:1149 msgid "Cancelling active transfer" msgstr "Отмена активных передач" #: src/libs/data-staging/Scheduler.cpp:1165 msgid "Processing thread timed out. Restarting DTR" msgstr "Вышло время ожидания потока обработки. DTR перезапускается" #: src/libs/data-staging/Scheduler.cpp:1233 msgid "Will use bulk request" msgstr "Будет использован массовый запрос" #: src/libs/data-staging/Scheduler.cpp:1255 msgid "No delivery endpoints available, will try later" msgstr "Нет доступных назначений для отгрузки, попытаемся позже" #: src/libs/data-staging/Scheduler.cpp:1274 msgid "Scheduler received NULL DTR" msgstr "Планировщик получил пустой запрос DTR" #: src/libs/data-staging/Scheduler.cpp:1284 msgid "Scheduler received invalid DTR" msgstr "Планировщик получил недопустимый запрос DTR" #: src/libs/data-staging/Scheduler.cpp:1373 msgid "Scheduler starting up" msgstr "Запуск планировщика" #: src/libs/data-staging/Scheduler.cpp:1374 msgid "Scheduler configuration:" msgstr "Конфигурация планировщика:" #: src/libs/data-staging/Scheduler.cpp:1375 #, c-format msgid " Pre-processor slots: %u" msgstr " Мест для предварительной обработки: %u" #: src/libs/data-staging/Scheduler.cpp:1376 #, c-format msgid " Delivery slots: %u" msgstr " Мест для отгрузки: %u" #: src/libs/data-staging/Scheduler.cpp:1377 #, c-format msgid " Post-processor slots: %u" msgstr " Мест для окончательной обработки: %u" #: src/libs/data-staging/Scheduler.cpp:1378 #, c-format msgid " Emergency slots: %u" msgstr " Мест для срочной обработки: %u" #: src/libs/data-staging/Scheduler.cpp:1379 #, c-format msgid " Prepared slots: %u" msgstr " Подготовленных мест: %u" #: src/libs/data-staging/Scheduler.cpp:1380 #, c-format msgid "" " Shares configuration:\n" "%s" msgstr "" " Конфигурация квот:\n" "%s" #: src/libs/data-staging/Scheduler.cpp:1383 msgid " Delivery service: LOCAL" msgstr " Служба доставки: LOCAL" #: src/libs/data-staging/Scheduler.cpp:1384 #, c-format msgid " Delivery service: %s" msgstr " Служба доставки: %s" #: src/libs/data-staging/Scheduler.cpp:1389 msgid "Failed to create DTR dump thread" msgstr "Не удалось создать поток сброса DTR" #: src/libs/data-staging/Scheduler.cpp:1406 #: src/services/data-staging/DataDeliveryService.cpp:513 #, c-format msgid "DTR %s cancelled" msgstr "Запрос DTR %s отменён" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "Планировщик останавливается" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "Планировщик остановлен, выход" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "Планировщик вернул запрос DTR %s в состоянии %s" #: src/libs/data-staging/examples/Generator.cpp:32 msgid "Generator started" msgstr "Генератор запущен" #: src/libs/data-staging/examples/Generator.cpp:33 msgid "Starting DTR threads" msgstr "Запускаются потоки DTR" #: src/libs/data-staging/examples/Generator.cpp:46 msgid "No valid credentials found, exiting" msgstr "Не найдены действительные параметры доступа, выход" #: src/libs/data-staging/examples/Generator.cpp:58 #, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "Проблема при создании DTR (источник %s, назначение %s)" #: src/services/a-rex/arex.cpp:446 #, c-format msgid "Using cached local account '%s'" msgstr "Используется кэшированная местная учётная запись '%s'" #: src/services/a-rex/arex.cpp:457 msgid "Will not map to 'root' account by default" msgstr "По умолчанию привязки к учётной записи 'root' не будет" #: src/services/a-rex/arex.cpp:470 msgid "No local account name specified" msgstr "Не указано имя локальной учётной записи" #: src/services/a-rex/arex.cpp:473 #, c-format msgid "Using local account '%s'" msgstr "Используется локальная учётная запись '%s'" #: src/services/a-rex/arex.cpp:494 msgid "Failed to acquire grid-manager's configuration" msgstr "Не удалось получить настройки grid-manager" #: src/services/a-rex/arex.cpp:519 #: src/services/cache_service/CacheService.cpp:572 #: src/services/data-staging/DataDeliveryService.cpp:687 #, c-format msgid "SOAP operation is not supported: %s" msgstr "Операция SOAP не поддерживается: %s" #: src/services/a-rex/arex.cpp:532 #, c-format msgid "Connection from %s: %s" msgstr "Соединение с %s: %s" #: src/services/a-rex/arex.cpp:534 #, c-format msgid "process: method: %s" msgstr "процесс: метод: %s" #: src/services/a-rex/arex.cpp:535 #, c-format msgid "process: endpoint: %s" msgstr "процесс: конечная точка: %s" #: src/services/a-rex/arex.cpp:546 #, c-format msgid "process: id: %s" msgstr "процесс: идентификатор: %s" #: src/services/a-rex/arex.cpp:547 #, c-format msgid "process: subpath: %s" msgstr "процесс: подкаталог: %s" #: src/services/a-rex/arex.cpp:567 #: src/services/cache_service/CacheService.cpp:546 #: src/services/data-staging/DataDeliveryService.cpp:647 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "процесс: запрос=%s" #: src/services/a-rex/arex.cpp:572 #: src/services/cache_service/CacheService.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:652 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "не задана операция на вводе" #: src/services/a-rex/arex.cpp:575 #: src/services/cache_service/CacheService.cpp:554 #: src/services/data-staging/DataDeliveryService.cpp:655 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "процесс: операция: %s" #: src/services/a-rex/arex.cpp:591 src/services/a-rex/arex.cpp:804 #: src/services/a-rex/arex.cpp:823 src/services/a-rex/arex.cpp:837 #: src/services/a-rex/arex.cpp:847 src/services/a-rex/arex.cpp:862 #: src/services/cache_service/CacheService.cpp:588 #: src/services/data-staging/DataDeliveryService.cpp:703 msgid "Security Handlers processing failed" msgstr "Сбой в процессе обработки прав доступа" #: src/services/a-rex/arex.cpp:598 msgid "Can't obtain configuration" msgstr "Не удалось получить настройки" #: src/services/a-rex/arex.cpp:614 msgid "process: factory endpoint" msgstr "процесс: конечная точка фабрики " #: src/services/a-rex/arex.cpp:798 src/services/a-rex/arex.cpp:815 #: src/services/cache_service/CacheService.cpp:583 #: src/services/data-staging/DataDeliveryService.cpp:698 #: src/tests/echo/echo.cpp:166 #, c-format msgid "process: response=%s" msgstr "процесс: отзыв=%s" #: src/services/a-rex/arex.cpp:800 msgid "process: response is not SOAP" msgstr "процесс: ответ не является документом SOAP" #: src/services/a-rex/arex.cpp:830 msgid "process: GET" msgstr "процесс: GET" #: src/services/a-rex/arex.cpp:831 #, c-format msgid "GET: id %s path %s" msgstr "GET: идентификатор %s путь %s" #: src/services/a-rex/arex.cpp:854 msgid "process: PUT" msgstr "процес: PUT" #: src/services/a-rex/arex.cpp:869 #, c-format msgid "process: method %s is not supported" msgstr "процесс: метод %s не поддерживается" #: src/services/a-rex/arex.cpp:872 msgid "process: method is not defined" msgstr "процесс: неопределённый метод" #: src/services/a-rex/arex.cpp:908 msgid "Failed to run Grid Manager thread" msgstr "Сбой запуска потока Grid Manager" #: src/services/a-rex/arex.cpp:972 #, c-format msgid "Storing configuration in temporary file %s" msgstr "Запись настроек во временный файл %s" #: src/services/a-rex/arex.cpp:977 msgid "Failed to process service configuration" msgstr "Не удалось обработать настройки сервиса" #: src/services/a-rex/arex.cpp:985 #, c-format msgid "Failed to process configuration in %s" msgstr "Не удалось обработать настройки в %s" #: src/services/a-rex/arex.cpp:991 msgid "No control directory set in configuration" msgstr "Не найден контрольный каталог в файле настроек" #: src/services/a-rex/arex.cpp:995 msgid "No session directory set in configuration" msgstr "Не найден каталог сессии в файле настроек" #: src/services/a-rex/arex.cpp:999 msgid "No LRMS set in configuration" msgstr "Не найдена СУПО в файле настроек" #: src/services/a-rex/arex.cpp:1004 #, c-format msgid "Failed to create control directory %s" msgstr "Ошибка создания контрольного каталога %s" #: src/services/a-rex/arex.cpp:1033 #, c-format msgid "Provided LRMSName is not a valid URL: %s" msgstr "Указанное значение LRMSName не является допустимым URL: %s" #: src/services/a-rex/arex.cpp:1035 msgid "" "No LRMSName is provided. This is needed if you wish to completely comply " "with the BES specifications." msgstr "" "Не задан атрибут LRMSName. Он необходим для полного соответствия " "спецификации интерфейса BES." #: src/services/a-rex/cachecheck.cpp:34 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:539 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:843 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:424 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:337 #, c-format msgid "Error with cache configuration: %s" msgstr "Ошибка при настройке кэша: %s" #: src/services/a-rex/cachecheck.cpp:50 #: src/services/cache_service/CacheService.cpp:305 msgid "Error with cache configuration" msgstr "Ошибка при настройке кэша" #: src/services/a-rex/cachecheck.cpp:75 #: src/services/cache_service/CacheService.cpp:135 #: src/services/cache_service/CacheService.cpp:330 #, c-format msgid "Looking up URL %s" msgstr "Поиск URL %s" #: src/services/a-rex/cachecheck.cpp:77 #: src/services/cache_service/CacheService.cpp:144 #, c-format msgid "Cache file is %s" msgstr "Файл кэша: %s" #: src/services/a-rex/change_activity_status.cpp:33 #, c-format msgid "" "ChangeActivityStatus: request = \n" "%s" msgstr "" "ChangeActivityStatus: запрос = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:38 msgid "ChangeActivityStatus: no ActivityIdentifier found" msgstr "ChangeActivityStatus: не найден ActivityIdentifier" #: src/services/a-rex/change_activity_status.cpp:47 msgid "ChangeActivityStatus: EPR contains no JobID" msgstr "ChangeActivityStatus: EPR не содержит JobID" #: src/services/a-rex/change_activity_status.cpp:57 #, c-format msgid "ChangeActivityStatus: no job found: %s" msgstr "ChangeActivityStatus: задача не найдена: %s" #: src/services/a-rex/change_activity_status.cpp:73 msgid "ChangeActivityStatus: missing NewStatus element" msgstr "ChangeActivityStatus: отсутствует элемент NewStatus" #: src/services/a-rex/change_activity_status.cpp:87 msgid "ChangeActivityStatus: Failed to accept delegation" msgstr "ChangeActivityStatus: невозможно принять делегирование" #: src/services/a-rex/change_activity_status.cpp:103 msgid "ChangeActivityStatus: old BES state does not match" msgstr "ChangeActivityStatus: не найдено соответствия старому состоянию BES" #: src/services/a-rex/change_activity_status.cpp:110 msgid "ChangeActivityStatus: old A-REX state does not match" msgstr "ChangeActivityStatus: не найдено соответствия старому состоянию A-REX" #: src/services/a-rex/change_activity_status.cpp:137 msgid "ChangeActivityStatus: Failed to update credentials" msgstr "ChangeActivityStatus: невозможно обновить параметры доступа" #: src/services/a-rex/change_activity_status.cpp:143 msgid "ChangeActivityStatus: Failed to resume job" msgstr "ChangeActivityStatus: невозможно возобновить задачу" #: src/services/a-rex/change_activity_status.cpp:149 #, c-format msgid "ChangeActivityStatus: State change not allowed: from %s/%s to %s/%s" msgstr "" "ChangeActivityStatus: недопустимое изменение состояния: с %s/%s на %s/%s" #: src/services/a-rex/change_activity_status.cpp:168 #, c-format msgid "" "ChangeActivityStatus: response = \n" "%s" msgstr "" "ChangeActivityStatus: ответ = \n" "%s" #: src/services/a-rex/change_activity_status.cpp:213 #: src/services/a-rex/change_activity_status.cpp:217 #, c-format msgid "EMIES:PauseActivity: job %s - %s" msgstr "EMIES:PauseActivity: задача %s - %s" #: src/services/a-rex/change_activity_status.cpp:262 #: src/services/a-rex/change_activity_status.cpp:266 #, c-format msgid "EMIES:ResumeActivity: job %s - %s" msgstr "EMIES:ResumeActivity: задача %s - %s" #: src/services/a-rex/change_activity_status.cpp:311 #: src/services/a-rex/change_activity_status.cpp:316 #, c-format msgid "EMIES:CancelActivity: job %s - %s" msgstr "EMIES:CancelActivity: задача %s - %s" #: src/services/a-rex/change_activity_status.cpp:324 #, c-format msgid "job %s cancelled successfully" msgstr "задача %s успешно прервана" #: src/services/a-rex/change_activity_status.cpp:370 #: src/services/a-rex/change_activity_status.cpp:385 #, c-format msgid "EMIES:WipeActivity: job %s - %s" msgstr "EMIES:WipeActivity: задача %s - %s" #: src/services/a-rex/change_activity_status.cpp:389 #, c-format msgid "job %s (will be) cleaned successfully" msgstr "задача %s (будет) успешно удалена" #: src/services/a-rex/change_activity_status.cpp:435 #: src/services/a-rex/change_activity_status.cpp:440 #, c-format msgid "EMIES:RestartActivity: job %s - %s" msgstr "EMIES:RestartActivity: задача %s - %s" #: src/services/a-rex/change_activity_status.cpp:444 #, c-format msgid "job %s restarted successfully" msgstr "задача %s успешно перезапущена" #: src/services/a-rex/create_activity.cpp:35 #, c-format msgid "" "CreateActivity: request = \n" "%s" msgstr "" "CreateActivity: запрос = \n" "%s" #: src/services/a-rex/create_activity.cpp:40 msgid "CreateActivity: no job description found" msgstr "CreateActivity: Описание задачи не найдено" #: src/services/a-rex/create_activity.cpp:48 msgid "CreateActivity: max jobs total limit reached" msgstr "CreateActivity: достигнут максимальный предел общего количества задач" #: src/services/a-rex/create_activity.cpp:67 msgid "CreateActivity: Failed to accept delegation" msgstr "CreateActivity: Сбой при принятии делегирования" #: src/services/a-rex/create_activity.cpp:100 #, c-format msgid "CreateActivity: Failed to create new job: %s" msgstr "CreateActivity: Не удалось создать новую задачу: %s" #: src/services/a-rex/create_activity.cpp:102 msgid "CreateActivity: Failed to create new job" msgstr "CreateActivity: Не удалось создать новую задачу" #: src/services/a-rex/create_activity.cpp:117 msgid "CreateActivity finished successfully" msgstr "CreateActivity закончилось успешно" #: src/services/a-rex/create_activity.cpp:121 #, c-format msgid "" "CreateActivity: response = \n" "%s" msgstr "" "CreateActivity: ответ = \n" "%s" #: src/services/a-rex/create_activity.cpp:159 #, c-format msgid "" "EMIES:CreateActivity: request = \n" "%s" msgstr "" "EMIES:CreateActivity: запрос = \n" "%s" #: src/services/a-rex/create_activity.cpp:165 msgid "EMIES:CreateActivity: too many activity descriptions" msgstr "EMIES:CreateActivity: обнаружено слишком много описаний задач" #: src/services/a-rex/create_activity.cpp:175 msgid "EMIES:CreateActivity: no job description found" msgstr "EMIES:CreateActivity: описание задачи не обнаружено" #: src/services/a-rex/create_activity.cpp:182 msgid "EMIES:CreateActivity: max jobs total limit reached" msgstr "EMIES:CreateActivity: достигнут предел общего числа задач" #: src/services/a-rex/create_activity.cpp:208 #, c-format msgid "ES:CreateActivity: Failed to create new job: %s" msgstr "ES:CreateActivity: Не удалось создать новую задачу: %s" #: src/services/a-rex/create_activity.cpp:224 msgid "EMIES:CreateActivity finished successfully" msgstr "EMIES:CreateActivity успешно завершено" #: src/services/a-rex/create_activity.cpp:225 #, c-format msgid "New job accepted with id %s" msgstr "Новая задача принята с идентификатором %s" #: src/services/a-rex/create_activity.cpp:229 #, c-format msgid "" "EMIES:CreateActivity: response = \n" "%s" msgstr "" "EMIES:CreateActivity: ответ = \n" "%s" #: src/services/a-rex/delegation/DelegationStore.cpp:55 msgid "Wiping and re-creating whole storage" msgstr "Уничтожение и воссоздание всего хранилища" #: src/services/a-rex/delegation/DelegationStore.cpp:210 #: src/services/a-rex/delegation/DelegationStore.cpp:311 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "DelegationStore: TouchConsumer не смог создать файл %s" #: src/services/a-rex/delegation/DelegationStore.cpp:271 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" "DelegationStore: сбой возобновления итератора процессом " "PeriodicCheckConsumers" #: src/services/a-rex/delegation/DelegationStore.cpp:291 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" "DelegationStore: сбой удаления процессом PeriodicCheckConsumers устаревшего " "делегирования %s - %s" #: src/services/a-rex/get.cpp:112 #, c-format msgid "Get: there is no job %s - %s" msgstr "Get: отсутсвует задача %s - %s" #: src/services/a-rex/get.cpp:123 #, c-format msgid "Get: can't process file %s" msgstr "Get: невозможно обработать файл %s" #: src/services/a-rex/get.cpp:167 #, c-format msgid "Head: there is no job %s - %s" msgstr "Head: отсутсвует задача %s - %s" #: src/services/a-rex/get.cpp:178 #, c-format msgid "Head: can't process file %s" msgstr "Head: невозможно обработать файл %s" #: src/services/a-rex/get.cpp:190 #, c-format msgid "http_get: start=%llu, end=%llu, burl=%s, hpath=%s" msgstr "http_get: start=%llu, end=%llu, burl=%s, hpath=%s" #: src/services/a-rex/get.cpp:357 msgid "Failed to extract credential information" msgstr "Сбой извлечения информации о параметрах доступа" #: src/services/a-rex/get.cpp:360 #, c-format msgid "Checking cache permissions: DN: %s" msgstr "Проверка прав доступа к кэшу: DN: %s" #: src/services/a-rex/get.cpp:361 #, c-format msgid "Checking cache permissions: VO: %s" msgstr "Проверка прав доступа к кэшу: ВО: %s" #: src/services/a-rex/get.cpp:363 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "Checking cache permissions: атрибуты VOMS: %s" #: src/services/a-rex/get.cpp:373 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "Доступ к кэшу разрешён для %s пользователю с DN %s" #: src/services/a-rex/get.cpp:376 #, c-format msgid "DN %s doesn't match %s" msgstr "DN %s не совпадает с %s" #: src/services/a-rex/get.cpp:379 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "Доступ к кэшу разрешён для %s для ВО %s" #: src/services/a-rex/get.cpp:382 #, c-format msgid "VO %s doesn't match %s" msgstr "ВО %s не совпадает с %s" #: src/services/a-rex/get.cpp:388 src/services/a-rex/get.cpp:407 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "Недопустимое значение параметра доступа %s в правилах доступа к кэшу" #: src/services/a-rex/get.cpp:396 src/services/a-rex/get.cpp:415 #, c-format msgid "VOMS attr %s matches %s" msgstr "Атрибут VOMS %s совпадает с %s" #: src/services/a-rex/get.cpp:397 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "Доступ к кэшу разрешён для %s для ВО %s и роли %s" #: src/services/a-rex/get.cpp:400 src/services/a-rex/get.cpp:419 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "Атрибут VOMS %s не совпадает с %s" #: src/services/a-rex/get.cpp:416 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "Доступ к кэшу разрешён для %s для ВО %s и группы %s" #: src/services/a-rex/get.cpp:422 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "Неизвестный тип параметра доступа %s для шаблона URL %s" #: src/services/a-rex/get.cpp:428 #, c-format msgid "No match found in cache access rules for %s" msgstr "Не найдено соответствия для %s в правилах доступа к кэшу" #: src/services/a-rex/get.cpp:438 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "Получение из кэша: Поиск %s в кэше" #: src/services/a-rex/get.cpp:441 #, c-format msgid "Get from cache: Invalid URL %s" msgstr "Получение из кэша: Недопустимый URL %s" #: src/services/a-rex/get.cpp:458 msgid "Get from cache: Error in cache configuration" msgstr "Получение из кэша: Ошибка настроек кэша" #: src/services/a-rex/get.cpp:467 msgid "Get from cache: File not in cache" msgstr "Получение из кэша: Файла в кэше нет" #: src/services/a-rex/get.cpp:470 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "" "Получение из кэша: не удалось получить доступ к кэшированному файлу: %s" #: src/services/a-rex/get.cpp:480 msgid "Get from cache: Cached file is locked" msgstr "Получение из кэша: Кэшированный файл забклокирован" #: src/services/a-rex/get_activity_documents.cpp:29 #, c-format msgid "" "GetActivityDocuments: request = \n" "%s" msgstr "" "GetActivityDocuments: запрос = \n" "%s" #: src/services/a-rex/get_activity_documents.cpp:40 msgid "GetActivityDocuments: non-AREX job requested" msgstr "GetActivityDocuments: запрошенная задача не контролируется AREX" #: src/services/a-rex/get_activity_documents.cpp:49 #: src/services/a-rex/get_activity_documents.cpp:60 #, c-format msgid "GetActivityDocuments: job %s - %s" msgstr "GetActivityDocuments: задача %s - %s" #: src/services/a-rex/get_activity_documents.cpp:72 #, c-format msgid "" "GetActivityDocuments: response = \n" "%s" msgstr "" "GetActivityDocuments: ответ = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:35 #, c-format msgid "" "GetActivityStatuses: request = \n" "%s" msgstr "" "GetActivityStatuses: запрос = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:50 #, c-format msgid "GetActivityStatuses: unknown verbosity level requested: %s" msgstr "GetActivityStatuses: запрошен неизвестный уровень отладки: %s" #: src/services/a-rex/get_activity_statuses.cpp:62 #, c-format msgid "GetActivityStatuses: job %s - can't understand EPR" msgstr "GetActivityStatuses: задание %s - невозможно интерпретировать EPR" #: src/services/a-rex/get_activity_statuses.cpp:71 #, c-format msgid "GetActivityStatuses: job %s - %s" msgstr "GetActivityStatuses: задача %s - %s" #: src/services/a-rex/get_activity_statuses.cpp:105 #, c-format msgid "" "GetActivityStatuses: response = \n" "%s" msgstr "" "GetActivityStatuses: ответ = \n" "%s" #: src/services/a-rex/get_activity_statuses.cpp:306 #: src/services/a-rex/get_activity_statuses.cpp:400 #, c-format msgid "EMIES:GetActivityStatus: job %s - %s" msgstr "EMIES:GetActivityStatus: задача %s - %s" #: src/services/a-rex/get_activity_statuses.cpp:526 #, c-format msgid "EMIES:GetActivityInfo: job %s - failed to retrieve GLUE2 information" msgstr "" "EMIES:GetActivityInfo: задача %s - не удалось получить информацию по формату " "GLUE2" #: src/services/a-rex/get_activity_statuses.cpp:578 #: src/services/a-rex/get_activity_statuses.cpp:585 #, c-format msgid "EMIES:NotifyService: job %s - %s" msgstr "EMIES:NotifyService: задача %s - %s" #: src/services/a-rex/get_factory_attributes_document.cpp:37 #, c-format msgid "" "GetFactoryAttributesDocument: request = \n" "%s" msgstr "" "GetFactoryAttributesDocument: запрос = \n" "%s" #: src/services/a-rex/get_factory_attributes_document.cpp:62 #, c-format msgid "" "GetFactoryAttributesDocument: response = \n" "%s" msgstr "" "GetFactoryAttributesDocument: ответ = \n" "%s" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" "Не удалось создать каталоги для журнального файла %s. Сообщения будут " "записываться в этот журнал" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" "Не удалось открыть журнальный файл кэша %s: %s. Сообщения об очистке кэша " "будут записываться в этот журнал" #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, c-format msgid "Running command %s" msgstr "Выполняется команда %s" #: src/services/a-rex/grid-manager/GridManager.cpp:114 msgid "Failed to start cache clean script" msgstr "Не удалось запустить скрипт очистки кэша" #: src/services/a-rex/grid-manager/GridManager.cpp:115 msgid "Cache cleaning script failed" msgstr "Сбой в работе скрипта очистки кэша" #: src/services/a-rex/grid-manager/GridManager.cpp:177 msgid "Starting jobs processing thread" msgstr "Запускается поток обработки задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:178 #, c-format msgid "Used configuration file %s" msgstr "Используется файл настроек %s" #: src/services/a-rex/grid-manager/GridManager.cpp:187 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" "Сбой при добавлении интерфейса связи в %s. Возможно, уже запущен другой " "процесс A-REX." #: src/services/a-rex/grid-manager/GridManager.cpp:190 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" "Сбой при добавлении интерфейса связи в %s. Возможно, отсутствует доступ к " "директории." #: src/services/a-rex/grid-manager/GridManager.cpp:199 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" "Сбой при создании базы данных делегирования в %s. Возможно, отсутствует " "доступ к директории. Возвращена ошибка %s." #: src/services/a-rex/grid-manager/GridManager.cpp:211 msgid "Failed to start new thread" msgstr "Не удалось запустить новый поток" #: src/services/a-rex/grid-manager/GridManager.cpp:268 msgid "Failed to start new thread: cache won't be cleaned" msgstr "Не удалось запустить новый поток: кэш не будет очищен" #: src/services/a-rex/grid-manager/GridManager.cpp:273 msgid "Picking up left jobs" msgstr "Обработка оставшихся задач" #: src/services/a-rex/grid-manager/GridManager.cpp:277 msgid "Starting data staging threads" msgstr "Запускаются потоки размещения данных" #: src/services/a-rex/grid-manager/GridManager.cpp:281 msgid "Failed to start data staging threads, exiting Grid Manager thread" msgstr "" "Не удалось запустить потоки размещения данных, закрывается поток Grid Manager" #: src/services/a-rex/grid-manager/GridManager.cpp:290 msgid "Starting jobs' monitoring" msgstr "Запуск мониторинга задач" #: src/services/a-rex/grid-manager/GridManager.cpp:303 #, c-format msgid "Failed to open heartbeat file %s" msgstr "Не удалось открыть мониторинговый файл %s" #: src/services/a-rex/grid-manager/GridManager.cpp:335 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "Обнаружен неиспользуемый блок делегирования (%s) - очистка" #: src/services/a-rex/grid-manager/GridManager.cpp:340 msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "" "Не удалось получить блоки делегирования для очистки неиспользуемых блоков" #: src/services/a-rex/grid-manager/GridManager.cpp:346 msgid "Waking up" msgstr "Активизация" #: src/services/a-rex/grid-manager/GridManager.cpp:349 msgid "Stopping jobs processing thread" msgstr "Останавливается поток обработки задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:352 msgid "Exiting jobs processing thread" msgstr "Останавливается поток обработки задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:365 msgid "Shutting down job processing" msgstr "Отключение обработки задач" #: src/services/a-rex/grid-manager/GridManager.cpp:370 msgid "Shutting down data staging threads" msgstr "Закрываются потоки размещения данных" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:24 #, c-format msgid "" "Usage: %s -I -U -P -L [-c " "] [-p ] [-d ]" msgstr "" "Использование: %s -I <задача> -U <пользователь> -P <доверенность> -L <файл " "состояния задачи> [-c <префикс ceID>] [-p <префикс журнала> ] [-d <отладка>]" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:74 msgid "Job ID argument is required." msgstr "Требуется аргумент - идентификатор задачи." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Path to user's proxy file should be specified." msgstr "Должен быть указан путь к сертификату доверенности пользователя." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "User name should be specified." msgstr "Должно быть указано имя пользователя." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "Path to .local job status file is required." msgstr "Требуется путь к файлу состояния задач .local" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:100 msgid "Generating ceID prefix from hostname automatically" msgstr "Автоматическое создание префикса ceID из имени узла" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:103 msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "" "Невозможно определить hostname из gethostname() для автоматического создания " "ceID." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:112 #, c-format msgid "ceID prefix is set to %s" msgstr "Префикс ceID задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:120 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "Создание текущей метки времени для журнала программы разбора BLAH: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:129 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" "Разбирается файл .local с целью извлечения специфических для задачи " "идентификаторов и информации" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:139 #, c-format msgid "globalid is set to %s" msgstr "globalid задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:142 #, c-format msgid "headnode is set to %s" msgstr "Головной узел задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "interface is set to %s" msgstr "Интерфейс задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:149 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" "Отсутствует идентификатор СУПО. Сообщение не будет записано в журнал BLAH." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:152 #, c-format msgid "localid is set to %s" msgstr "localid задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 #, c-format msgid "queue name is set to %s" msgstr "Имя очереди задано как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "owner subject is set to %s" msgstr "Имя субъекта владельца задано как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:160 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" "Задача не завершилась успехом. Сообщение не будет записано в журнал BLAH." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:165 #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:119 msgid "Can not read information from the local job status file" msgstr "Невозможно прочесть информацию из файла состояния задачи" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:181 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly :-) Please submit the bug to bugzilla." msgstr "" "Интерфейс засылки %s не поддерживается. Похоже, arc-blahp-logger пора " "обновить :-) Пожалуйста, опишите проблему в bugzill-е." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:191 msgid "Parsing VOMS AC to get FQANs information" msgstr "Разборка VOMS AC с целью получения информации о FQAN" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:210 msgid "No FQAN found. Using NULL as userFQAN value" msgstr "" "FQAN не обнаружен. В качестве значения userFQAN будет использоваться NULL" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:224 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "Формирование записи журнала программы разбора BLAH: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:229 #, c-format msgid "Writing the info the the BLAH parser log: %s" msgstr "Запись информации в журнал программы разбора BLAH: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 #, c-format msgid "Cannot open BLAH log file '%s'" msgstr "Не удалось открыть журнальный файл BLAH '%s'" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:24 #, c-format msgid "" "Usage: %s [-N] -P -L [-c ] [-d " "]" msgstr "" "Использование: %s [-N] -P <доверенность> -L <файл состояния задачи> [-c " "<файл настроек>] [-d <отладка>]" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:90 msgid "User proxy file is required but is not specified" msgstr "Файл доверенности пользователя необходим, но не указан" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:95 msgid "Local job status file is required" msgstr "Необходимо указать файл состояния задачи" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:114 #, c-format msgid "Making the decision for the queue %s" msgstr "Принимается решение для очереди %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:127 #, c-format msgid "Can not parse the configuration file %s" msgstr "Невозможно обработать файл настроек %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:144 #, c-format msgid "Can not find queue '%s' in the configuration file" msgstr "Не удалось обнаружить очередь '%s' в файле настроек" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:168 msgid "No access policy to check, returning success" msgstr "Нет политик доступа, нуждающихся в сверке, успешное завершение." #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:182 #, c-format msgid "CA certificates directory %s does not exist" msgstr "Каталог сертификатов агентств CA %s не существует" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:202 msgid "User proxy certificate is not valid" msgstr "Доверенность пользователя недействительна" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:207 #, c-format msgid "Getting VOMS AC for: %s" msgstr "Извлечение сертификата атрибутов VOMS AC для: %s" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:221 #, c-format msgid "Checking a match for '%s'" msgstr "Проверка совпадения для '%s'" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:228 #, c-format msgid "FQAN '%s' IS a match to '%s'" msgstr "Полный атрибут '%s' СОВПАДАЕТ с '%s'" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:232 #, c-format msgid "Queue '%s' usage is prohibited to FQAN '%s' by the site access policy" msgstr "" "Использование очереди '%s' запрещено для полного атрибута '%s' в " "соответствии с локальной политикой доступа" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:236 #, c-format msgid "FQAN '%s' IS NOT a match to '%s'" msgstr "Полный атрибут '%s' НЕ СОВПАДАЕТ с '%s'" #: src/services/a-rex/grid-manager/arc_vomsac_check.cpp:242 #, c-format msgid "" "Queue '%s' usage with provided FQANs is prohibited by the site access policy" msgstr "" "Использование очереди '%s' запрещено для указанных полных атрибутов в " "соответствии с локальной политикой доступа" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:35 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "Не найден скрипт cancel-%s-job - прерывание задачи может не работать" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:39 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" "Не найден скрипт submit-%s-job - засылка задачи в СУПО может не работать" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:43 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "Не найден скрипт scan-%s-job - окончание задачи может быть незамеченым" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:57 #, c-format msgid "Wrong option in %s" msgstr "Неверная опция в %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:71 #, c-format msgid "Can't read configuration file at %s" msgstr "Невозможно прочесть файл настроек в %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:80 #, c-format msgid "Can't interpret configuration file %s as XML" msgstr "Не удалось разобрать файл настроек %s как XML" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:118 #, c-format msgid "Can't recognize type of configuration file at %s" msgstr "Невозможно определить тип файла настроек в %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:121 msgid "Could not determine configuration type or configuration is empty" msgstr "Невозможно определить тип файла настроек, или же он пуст" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:169 msgid "No queue name given in queue block name" msgstr "Не указано название очереди в названии блока queue" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:176 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:499 msgid "forcedefaultvoms parameter is empty" msgstr "Параметр forcedefaultvoms пуст" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:185 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:198 msgid "authorizedvo parameter is empty" msgstr "Параметр authorizedvo пуст" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:276 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:604 #, c-format msgid "Wrong number in jobreport_period: %s" msgstr "Недопустимое число в jobreport_period: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:280 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 #, c-format msgid "Wrong number in jobreport_period: %d, minimal value: %s" msgstr "Недопустимое число в jobreport_period: %d, наименьшее значение: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:299 msgid "Missing file name in jobreport_logfile" msgstr "Отсутствует имя файла в jobreport_logfile" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:316 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:323 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:330 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:337 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:344 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "Недопустимое число в maxjobs: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:351 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "Недопустимое число в wakeupperiod: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:360 msgid "mail parameter is empty" msgstr "Параметр mail пуст" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:370 msgid "Wrong number in defaultttl command" msgstr "Недопустимое число в команде defaultttl" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:376 msgid "Wrong number in maxrerun command" msgstr "Недопустимое число в команде maxrerun" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:382 msgid "defaultlrms is empty" msgstr "пустое значение defaultlrms" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:398 msgid "State name for plugin is missing" msgstr "Отсутствует наименование состояния подключаемого модуля" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:402 msgid "Options for plugin are missing" msgstr "Этот модуль не имеет настраиваемых параметров." #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:405 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:726 #, c-format msgid "Failed to register plugin for state %s" msgstr "Сбой регистрации подключаемого модуля для состояния %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:413 msgid "Wrong number for timeout in plugin command" msgstr "" "Недопустимое значение времени ожидания в инструкции подключаемого модуля" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:429 msgid "Wrong option in fixdirectories" msgstr "Неверная опция в fixdirectories" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:441 msgid "Wrong option in delegationdb" msgstr "Неверная опция для базы данных delegationdb" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:456 msgid "Session root directory is missing" msgstr "Отсутствует корневая директория сессии" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:459 msgid "Junk in sessiondir command" msgstr "Бессмыслица в команде sessiondir" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:471 msgid "Missing directory in control command" msgstr "В контрольной инструкции пропущен каталог" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:476 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" "Опция настроек 'control' теперь называется 'controldir'; пожалуйста, " "используйте новое название" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:481 msgid "User for helper program is missing" msgstr "Отсутствует пользователь для вспомогательной программы" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:484 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:875 msgid "Only user '.' for helper program is supported" msgstr "Для вспомогательной программы поддерживается только пользователь '.'" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:487 msgid "Helper program is missing" msgstr "Отсутствует вспомогательная программа" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:632 msgid "Value for maxJobsTracked is incorrect number" msgstr "Значение maxJobsTracked не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:636 msgid "Value for maxJobsRun is incorrect number" msgstr "Значение maxJobsRun не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:640 msgid "Value for maxJobsTotal is incorrect number" msgstr "Значение maxJobsTotal не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:644 msgid "Value for maxJobsPerDN is incorrect number" msgstr "Значение maxJobsPerDN не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:648 msgid "Value for wakeupPeriod is incorrect number" msgstr "Значение wakeupPeriod не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 msgid "Value for maxScripts is incorrect number" msgstr "Значение maxScripts не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:664 msgid "serviceMail is empty" msgstr "пустой serviceMail" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:678 msgid "Type in LRMS is missing" msgstr "Отсутствует тип в СУПО" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:691 msgid "LRMS is missing" msgstr "Отсутствует СУПО" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:704 msgid "State name for authPlugin is missing" msgstr "Отсутствует наименование состояния модуля authPlugin" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:709 msgid "Command for authPlugin is missing" msgstr "Отсутствует команда для модуля authPlugin" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:723 #, c-format msgid "Registering plugin for state %s; options: %s; command: %s" msgstr "" "Регистрируется подключаемый модуль для состояния %s; опции: %s; команда: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:739 msgid "Command for localCred is missing" msgstr "Отсутствует команда для модуля localCred" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:745 msgid "Timeout for localCred is missing" msgstr "Отсутствует тайм-аут для модуля localCred" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:750 msgid "Timeout for localCred is incorrect number" msgstr "Недопустимое значение тайм-аута для модуля localCred" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:778 msgid "Control element must be present" msgstr "Элемент Control должен присутствовать" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:783 msgid "controlDir is missing" msgstr "Отсутствует controlDir" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:790 msgid "sessionRootDir is missing" msgstr "Отсутствует sessionRootDir" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:800 msgid "Attribute drain for sessionRootDir is incorrect boolean" msgstr "" "Значение атрибута drain для sessionRootDir не является верным булевским" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:809 msgid "The fixDirectories element is incorrect value" msgstr "Значение элемента fixDirectories неверно" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:816 msgid "The delegationDB element is incorrect value" msgstr "Значение элемента delegationDB неверно" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:821 msgid "The maxReruns element is incorrect number" msgstr "Значение элемента maxReruns не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:825 msgid "The noRootPower element is incorrect number" msgstr "Значение элемента noRootPower не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:829 msgid "The defaultTTL element is incorrect number" msgstr "Значение элемента defaultTTL не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:833 msgid "The defaultTTR element is incorrect number" msgstr "Значение элемента defaultTTR не является допустимым числом" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:866 msgid "Command in helperUtility is missing" msgstr "Отсутствует команда в модуле helperUtility" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:871 msgid "Username in helperUtility is empty" msgstr "Не указано имя пользователя в модуле helperUtility" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:106 #, c-format msgid "\tSession root dir : %s" msgstr "\tКорневой каталог сессии: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:107 #, c-format msgid "\tControl dir : %s" msgstr "\tКонтрольный каталог: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:108 #, c-format msgid "\tdefault LRMS : %s" msgstr "\tСУПО по умолчанию : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:109 #, c-format msgid "\tdefault queue : %s" msgstr "\tочередь по умолчанию : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:110 #, c-format msgid "\tdefault ttl : %u" msgstr "\tВремя жизни по умолчанию : %u" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:115 msgid "No valid caches found in configuration, caching is disabled" msgstr "" "В настройках не обнаружено ни одного приемлемого кэша, кэширование отключено" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tCache : %s" msgstr "\tКэш : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:122 #, c-format msgid "\tCache link dir : %s" msgstr "\tКаталог с кэшем ссылок: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 #, c-format msgid "\tRemote cache : %s" msgstr "\tУдалённый кэш : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:128 #, c-format msgid "\tRemote cache link: %s" msgstr "\tСсылка на удалённый кэш: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:130 msgid "\tCache cleaning enabled" msgstr "\tОчистка кэша включена" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 msgid "\tCache cleaning disabled" msgstr "\tОчистка кэша отключена" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:308 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" "Переменная, указывающая на расположение Globus, больше не поддерживается. " "Пожалуйста, укажите полный путь." #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:412 #, c-format msgid "Starting helper process: %s" msgstr "Запускается вспомогательный процесс: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:422 #, c-format msgid "Helper process start failed: %s" msgstr "Сбой при запуске вспомогательного процесса: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:429 #, c-format msgid "Stopping helper process %s" msgstr "Останавливается вспомогательный процесс %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:14 #, c-format msgid "wrong boolean in %s" msgstr "неверная булева переменная в %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:20 #, c-format msgid "wrong number in %s" msgstr "неверное число в %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:49 msgid "Can't read configuration file" msgstr "Не удалось прочесть файл настроек" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:58 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:32 msgid "Can't interpret configuration file as XML" msgstr "Не удалось разобрать файл настроек как XML" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:62 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:69 msgid "Configuration error" msgstr "Ошибка настройки" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:75 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:103 msgid "Can't recognize type of configuration file" msgstr "Невозможно определить тип файла настроек" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxdelivery" msgstr "Недопустимое значение maxdelivery" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxemergency" msgstr "Недопустимое значение maxemergency" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:107 msgid "Bad number in maxprocessor" msgstr "Недопустимое значение maxprocessor" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:113 msgid "Bad number in maxprepared" msgstr "Недопустимое значение maxprepared" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:119 msgid "Bad number in maxtransfertries" msgstr "недопустимое число в maxtransfertries" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:128 msgid "Bad number in speedcontrol" msgstr "Недопустимое значение speedcontrol" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:139 #, c-format msgid "Bad number in definedshare %s" msgstr "Недопустимое значение definedshare %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:148 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "Недопустимый URL в deliveryservice: %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:159 msgid "Bad number in remotesizelimit" msgstr "Недопустимое значение remotesizelimit" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:189 msgid "Bad value for debug" msgstr "Недопустимое значение debug" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:203 #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:315 msgid "Bad URL in acix_endpoint" msgstr "Недопустимый URL в acix_endpoint" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:258 #, c-format msgid "Bad URL in deliveryService: %s" msgstr "Недопустимый URL в deliveryService: %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:272 msgid "Bad value for logLevel" msgstr "Недопустимое значение logLevel" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:25 msgid "Can't open configuration file" msgstr "Не удалось открыть файл настроек" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:47 msgid "Value for 'link' element in mapURL is incorrect" msgstr "Значение элемента 'link' в mapURL неверно" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:53 msgid "Missing 'from' element in mapURL" msgstr "Отсутствующий элемент 'from' в mapURL" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:57 msgid "Missing 'to' element in mapURL" msgstr "Отсутствующий элемент 'to' в mapURL" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:84 msgid "Not enough parameters in copyurl" msgstr "Недостаточное количество параметров в copyurl" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:93 msgid "Not enough parameters in linkurl" msgstr "Недостаточное количество параметров в linkurl" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:179 #, c-format msgid "Wrong directory in %s" msgstr "Неверный каталог в %s" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:100 #, c-format msgid "Failed setting file owner: %s" msgstr "Не удалось задать владельца файла: %s" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:33 msgid "gm-delegations-converter changes format of delegation database." msgstr "" "gm-delegations-converter преобразовывает формат базы данных делегирования." #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:106 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "использовать указанный файл настроек" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:107 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "файл" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:43 #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 msgid "read information from specified control directory" msgstr "читать информацию из указанного контрольного каталога" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:44 #: src/services/a-rex/grid-manager/gm_jobs.cpp:112 msgid "dir" msgstr "каталог" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:48 msgid "convert from specified input database format [bdb|sqlite]" msgstr "преобразовать из указанного исходного формата базы данных [bdb|sqlite]" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:49 #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:54 msgid "database format" msgstr "формат базы данных" #: src/services/a-rex/grid-manager/gm_delegations_converter.cpp:53 msgid "convert into specified output database format [bdb|sqlite]" msgstr "преобразовать в указанный выходной формат базы данных [bdb|sqlite]" #: src/services/a-rex/grid-manager/gm_jobs.cpp:96 msgid "gm-jobs displays information on current jobs in the system." msgstr "gm-jobs выводит информацию о текущих заданиях в системе." #: src/services/a-rex/grid-manager/gm_jobs.cpp:101 msgid "display more information on each job" msgstr "вывести больше информации о каждом задании" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "print summary of jobs in each transfer share" msgstr "вывести сводку о задачах в каждой из трансферных квот" #: src/services/a-rex/grid-manager/gm_jobs.cpp:121 msgid "do not print list of jobs" msgstr "не выводить список задач" #: src/services/a-rex/grid-manager/gm_jobs.cpp:126 msgid "do not print number of jobs in each state" msgstr "не выводить количество задач в каждом состоянии" #: src/services/a-rex/grid-manager/gm_jobs.cpp:131 msgid "print state of the service" msgstr "вывести состояние сервиса" #: src/services/a-rex/grid-manager/gm_jobs.cpp:136 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" "показать задачи, принадлежащие пользователям с указанными именами субъекта" #: src/services/a-rex/grid-manager/gm_jobs.cpp:137 #: src/services/a-rex/grid-manager/gm_jobs.cpp:147 #: src/services/a-rex/grid-manager/gm_jobs.cpp:157 msgid "dn" msgstr "DN" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 msgid "request to cancel job(s) with specified ID(s)" msgstr "запросить обрыв задач с указанными ярлыками" #: src/services/a-rex/grid-manager/gm_jobs.cpp:142 #: src/services/a-rex/grid-manager/gm_jobs.cpp:152 #: src/services/a-rex/grid-manager/gm_jobs.cpp:162 #: src/services/a-rex/grid-manager/gm_jobs.cpp:172 msgid "id" msgstr "ID" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" "запросить обрыв задач, принадлежащих пользователям с указанными именами " "субъекта" #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 msgid "request to clean job(s) with specified ID(s)" msgstr "запросить удаление задач с указанными ярлыками" #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" "запросить удаление задач, принадлежащих пользователям с указанными именами " "субъекта" #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 msgid "show only jobs with specified ID(s)" msgstr "показать задачи с указанными ярлыками" #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 msgid "print list of available delegation IDs" msgstr "вывести список доступных идентификаторов делегирования" #: src/services/a-rex/grid-manager/gm_jobs.cpp:171 msgid "print delegation token of specified ID(s)" msgstr "вывести токен делегирования указанного идентификатора" #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 msgid "print main delegation token of specified Job ID(s)" msgstr "вывести основной токен делегирования указанного идентификатора" #: src/services/a-rex/grid-manager/gm_jobs.cpp:177 msgid "job id" msgstr "ID задания" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" "записать указанные элементы (список задач, идентификаторы и токены " "делегирования) в файл" #: src/services/a-rex/grid-manager/gm_jobs.cpp:182 msgid "file name" msgstr "Название файла" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control file. If no " "file is given it uses the control directory found in the configuration file." msgstr "" "gm-kick принудительно запускает цикл A-REX в соответствии с указанным " "управляющим файлом. Если файл не указан, используется управляющий каталог из " "файла настроек." #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, c-format msgid "Failed to acquire source: %s" msgstr "Не удалось получить источник: %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, c-format msgid "Failed to resolve %s" msgstr "Не удалось разрешить %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, c-format msgid "Failed to check %s" msgstr "Не удалось проверить %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "job_description_file [proxy_file]" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" "inputcheck проверяет, доступны ли входные файлы, указанные в описании " "задачи, используя параметры доступа в указанном файле доверенности" #: src/services/a-rex/grid-manager/inputcheck.cpp:88 msgid "Wrong number of arguments given" msgstr "Указано неверное количество аргументов" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:105 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1009 msgid "Failed to run plugin" msgstr "Ошибка исполнения модуля" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:109 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1015 #, c-format msgid "Plugin failed: %s" msgstr "Сбой модуля: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:158 msgid "empty argument to remotegmdirs" msgstr "не задан аргумент remotegmdirs" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:165 msgid "bad arguments to remotegmdirs" msgstr "неверные аргументы remotegmdirs" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:177 msgid "Wrong number in maxjobdesc" msgstr "Недопустимое число в maxjobdesc" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:185 #: src/services/gridftpd/fileplugin/fileplugin.cpp:186 #, c-format msgid "Unsupported configuration command: %s" msgstr "Неподдерживаемая инструкция настроек: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:199 #, c-format msgid "Mapped user:group (%s:%s) not found" msgstr "Соответствующие user:group (%s:%s) не обнаружены" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:202 msgid "Failed processing grid-manager configuration" msgstr "Не удалось обработать настройки grid-manager" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:205 msgid "" "Cannot use multiple session directories and remotegmdirs at the same time" msgstr "" "Недопустимо одновременное использование нескольких каталогов сеансов и " "remotegmdirs" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:228 msgid "This user is denied to submit new jobs." msgstr "Этому пользователю отказано в праве запуска новых задач." #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:239 msgid "No control or remote control directories defined in configuration" msgstr "" "В настройках не заданы контрольные директории (локальные либо удалённые)" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:290 #, c-format msgid "Job submission user: %s (%i:%i)" msgstr "Пользователь, отправивший задачу: %s (%i:%i)" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:294 msgid "Job plugin was not initialised" msgstr "Модуль обработки задач не был запущен" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:312 msgid "No delegated credentials were passed" msgstr "Делегированные параметры доступа не переданы" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:385 #, c-format msgid "Cancelling job %s" msgstr "Прерывание задачи %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:436 #, c-format msgid "Cleaning job %s" msgstr "Удаляется задача %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:472 msgid "Request to open file with storing in progress" msgstr "Запрос открытия файла в процессе записи" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:505 #: src/services/gridftpd/fileplugin/fileplugin.cpp:343 #, c-format msgid "Retrieving file %s" msgstr "Получение файла %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:556 #, c-format msgid "Accepting submission of new job or modification request: %s" msgstr "Запрос на засылку новой задачи или изменение старой принят: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:577 #: src/services/gridftpd/fileplugin/fileplugin.cpp:383 #: src/services/gridftpd/fileplugin/fileplugin.cpp:420 #, c-format msgid "Storing file %s" msgstr "Записывается файл %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:599 #, c-format msgid "Unknown open mode %i" msgstr "Неизвестный режим открытия %i" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:716 #, c-format msgid "action(%s) != request" msgstr "action(%s) != request" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:767 msgid "Failed writing job description" msgstr "Не удалось записать описание задачи" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:954 msgid "Failed writing local description" msgstr "Сбой записи локального описания" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:964 msgid "Failed writing ACL" msgstr "Не удалось записать ACL" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:980 #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:987 #: src/services/a-rex/job.cpp:587 #, c-format msgid "Failed to run external plugin: %s" msgstr "Не удалось запустить внешний подключаемый модуль: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:991 #: src/services/a-rex/job.cpp:591 #, c-format msgid "Plugin response: %s" msgstr "Ответ подключаемого модуля: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:993 msgid "Failed to run external plugin" msgstr "Не удалось запустить внешний подключаемый модуль" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1025 #, c-format msgid "Failed to create session directory %s" msgstr "Не удалось создать каталог сессии %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1035 msgid "Failed writing status" msgstr "Не удалось записать состояние" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1049 #, c-format msgid "Failed to lock delegated credentials: %s" msgstr "Невозможно заблокировать делегированные параметры доступа: %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1300 #, c-format msgid "Renewing proxy for job %s" msgstr "Обновляется доверенность для задачи %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1312 #, c-format msgid "New proxy expires at %s" msgstr "Срок действия новой доверенности истекает в %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1315 msgid "Failed to write 'local' information" msgstr "Не удалось записать 'локальную' информацию" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1321 msgid "Failed to renew proxy" msgstr "Не удалось обновить доверенность" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1324 msgid "New proxy expiry time is not later than old proxy, not renewing proxy" msgstr "" "Срок действия новой доверенности не дольше старой, доверенность не " "обновляется" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1367 #, c-format msgid "Checking file %s" msgstr "Проверка файла %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1415 msgid "ID contains forbidden characters" msgstr "ID содержит недопустимые символы" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1462 #: src/services/a-rex/job.cpp:781 #, c-format msgid "Failed to create file in %s" msgstr "Не удалось создать файл в %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1487 msgid "Out of tries while allocating new job ID" msgstr "Закончились попытки присвоения нового ярлыка задачи" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1568 #, c-format msgid "Failed to read job's local description for job %s from %s" msgstr "Не удалось прочесть локальное описание для задачи %s из %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1650 msgid "No non-draining control or session directories available" msgstr "" "Нет контрольных каталогов или каталогов сессий не в состоянии разгрузки" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1666 #, c-format msgid "Using control directory %s" msgstr "Используется контрольный каталог %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp:1667 #, c-format msgid "Using session directory %s" msgstr "Используется каталог сессии %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:26 #, c-format msgid "Failed to read job's ACL for job %s from %s" msgstr "Не удалось прочесть правила доступа для задачи %s из %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:69 #, c-format msgid "Failed to parse user policy for job %s" msgstr "Сбой разбора правил допуска пользователя для задачи %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:74 #, c-format msgid "Failed to load policy evaluator for policy of job %s" msgstr "Не удалось подгрузить анализатор для правил допуска задачи %s" #: src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp:126 #, c-format msgid "Unknown ACL policy %s for job %s" msgstr "Неизвестное правило доступа %s для задачи %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:121 msgid "Exiting Generator thread" msgstr "Останавливается поток Generator" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:211 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:225 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:238 msgid "DTRGenerator is not running!" msgstr "DTRGenerator не запущен!" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:214 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" "Запрос DTR %s получен в процессе закрытия генератора - не может быть " "обработан" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:314 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:324 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "%s: Попытка удалить задание из активного процесса размещения данных" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:333 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" "%s: Попытка удалить задание из несуществующего процесса размещения данных" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:344 #, c-format msgid "%s: Invalid DTR" msgstr "%s: Недействительный запрос DTR" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:354 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "%s: Получен запрос DTR %s на копирование файла %s в состоянии %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:366 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:982 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:281 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:304 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:634 #, c-format msgid "%s: Failed reading local information" msgstr "%s: Не удалось прочесть локальную информацию" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:377 #, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "%s: Сбой запроса DTR %s на копирование файла %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:383 #, c-format msgid "%s: Cancelling other DTRs" msgstr "%s: Прерывание остальных запросов DTR" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:394 #, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "%s: копирование DTR %s в %s не удалось, но не было обязательным" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:404 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:645 #, c-format msgid "%s: Failed to read list of output files" msgstr "%s: Не удалось прочесть список выходных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:418 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:555 #, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "%s: Не удалось прочесть динамический список выходных файлов в %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:420 #, c-format msgid "%s: Going through files in list %s" msgstr "%s: Обрабатываются файлы в списке %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:424 #, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "%s: Удаляется %s из динамического списка выходных файлов %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:428 #, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "%s: Не удалось записать динамические выходные файлы обратно в %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:444 #, c-format msgid "%s: Failed to write list of output files" msgstr "%s: Не удалось записать список выходных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:448 #, c-format msgid "%s: Failed to write list of output status files" msgstr "%s: Не удалось вывести список состояний выходных файлов " #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:460 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:656 #, c-format msgid "%s: Failed to read list of input files" msgstr "%s: Не удалось прочесть список входных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:479 #, c-format msgid "%s: Failed to write list of input files" msgstr "%s: Не удалось записать список входных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:491 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "%s: Получен запрос DTR с двумя удалёнными адресами!" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #: src/services/cache_service/CacheServiceGenerator.cpp:108 #, c-format msgid "No active job id %s" msgstr "Нет активной задачи с ярлыком %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:543 #, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "" "%s: Не удалось прочесть список выходных файлов, невозможно очистить каталог " "сессии" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:567 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:578 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:692 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:817 #, c-format msgid "%s: Failed to clean up session dir" msgstr "%s: Не удалось очистить каталог сессии" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:576 #, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "" "%s: Не удалось прочесть список входных файлов, невозможно очистить каталог " "сессии" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:588 #, c-format msgid "%s: All %s %s successfully" msgstr "%s: Все процессы %s успешно завершились (%s)" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:592 #, c-format msgid "%s: Some %s failed" msgstr "%s: Некоторые процессы %s дали сбой" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:607 #, c-format msgid "%s: Received data staging request to %s files" msgstr "%s: Получен запрос на размещение файлов (%s)" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 #, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "%s: Повторяющееся имя файла в списке входных файлов: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:714 #, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "%s: Чтение выходных файлов в списке пользователя %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:716 #, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "%s: Чтение выходных файлов в списке пользователя %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:744 #, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "%s: Сбой вывода содержимого каталога назначения %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:763 #, c-format msgid "%s: Adding new output file %s: %s" msgstr "%s: Добавление нового файла выхода %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:785 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "%s: Два одинаковых назначения для выдачи: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:798 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "%s: Невозможно записать два разных файла %s и %s с одним LFN: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:828 #, c-format msgid "%s: Received job in a bad state: %s" msgstr "%s: Задача получена в плохом состоянии: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:901 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" "%s: Файл назначения %s вероятно остался недописанным после предыдущего " "запуска A-REX, перезапись" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:988 #, c-format msgid "%s: Failed writing local information" msgstr "%s: Не удалось записать локальную информацию" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1005 #, c-format msgid "%s: Cancelling active DTRs" msgstr "%s: Прерывание активных запросов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1028 #, c-format msgid "%s: Can't read list of input files" msgstr "%s: Невозможно прочесть список входных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1043 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "%s: Проверка отгружаемого файла пользователя: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1048 #, c-format msgid "%s: User has uploaded file %s" msgstr "%s: Пользователь отгрузил файл %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1055 #, c-format msgid "%s: Failed writing changed input file." msgstr "%s: Не удалось записать изменившийся входной файл" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1059 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "%s: Критическая ошибка для отгружаемого файла %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1076 #, c-format msgid "%s: Uploadable files timed out" msgstr "%s: Истекло время ожидания отгружаемых файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1132 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1158 #, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "%s: Невозможно преобразовать контрольную сумму файла %s в целое для %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1139 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "%s: Невозможно преобразовать размер файла %s в целое для %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1148 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "%s: Неверная информация о размере/контрольной сумме (%s) для %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1170 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "%s: Неверный файл: %s слишком велик." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1186 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" "%s: Не удалось изменить идентификатор пользователя на %d/%d для чтения файла " "%s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1192 #, c-format msgid "%s: Failed to open file %s for reading" msgstr "%s: Не удалось открыть файл %s на чтение" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1200 #, c-format msgid "%s: Error accessing file %s" msgstr "%s: Ошибка доступа к файлу %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1212 #, c-format msgid "%s: Error reading file %s" msgstr "%s: Ошибка при чтении файла %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1227 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "%s: У файла %s неверная контрольная сумма: %llu. Ожидалась %lli" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1233 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "%s: Проверочная сумма %llu подтверждена для %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1245 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" "Найдены незаконченные процессы DTR. Вероятно, предыдущий процесс A-REX " "завершился сбоем" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1252 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" "Найден запрос DTR %s для файла %s, оставшийся в состоянии передачи после " "предыдущего запуска" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:131 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "Очередь '%s' заменяется на '%s'" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:225 #, c-format msgid "Bad name for stdout: %s" msgstr "Недопустимое имя для stdout: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:233 #, c-format msgid "Bad name for stderr: %s" msgstr "Недопустимое имя для stderr: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:291 #, c-format msgid "Bad name for runtime environment: %s" msgstr "Недопустимое название среды выполнения: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:336 msgid "Job description file could not be read." msgstr "Невозможно прочесть файл с описанием задачи." #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:387 msgid "Bad name for executable: " msgstr "Недопустимое имя для исполняемого файла: " #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:401 #, c-format msgid "Bad name for executable: %s" msgstr "Недопустимое имя для исполняемого файла: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:107 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" "%s: Ошибка при чтении .local и изменении состояния, задачи и A-REX могут " "оказаться в противоречивом состоянии" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:169 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" "Текущие задачи в системе (от PREPARING до FINISHING) на DN (%i записей)" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:171 #, c-format msgid "%s: %i" msgstr "%s: %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:177 #, c-format msgid "%s: Destroying" msgstr "%s: Уничтожается" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:182 #, c-format msgid "%s: Can't read state - no comments, just cleaning" msgstr "" "%s: Невозможно прочесть состояние - никаких комментариев, просто чистка" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:194 #, c-format msgid "%s: Cleaning control and session directories" msgstr "%s: Очистка управляющей" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:200 #, c-format msgid "%s: This job may be still running - canceling" msgstr "%s: Эта задача, возможно, ещё исполняется - прерывание" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:203 #, c-format msgid "%s: Cancellation failed (probably job finished) - cleaning anyway" msgstr "" "%s: Прерывание не удалось (вероятно, задача закончилась) - всё равно удаляем" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:210 #, c-format msgid "%s: Cancellation probably succeeded - cleaning" msgstr "%s: Прерывание, вероятно, удалось - удаление" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:273 #, c-format msgid "%s: Failed writing list of output files: %s" msgstr "%s: Не удалось записать список выходных файлов: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:315 #, c-format msgid "%s: Failed creating grami file" msgstr "%s: Не удалось создать файл grami" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:319 #, c-format msgid "%s: Failed setting executable permissions" msgstr "%s: Не удалось установить права на исполнение" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "%s: состояние SUBMIT: запуск дочернего процесса: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:334 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "%s: состояние CANCELING: запуск дочернего процесса: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:336 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "%s:Задача уже завершилась. Действия по прерыванию не применяются " #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:347 #, c-format msgid "%s: Failed running submission process" msgstr "%s: Не удалось выполнить процедуру запуска" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, c-format msgid "%s: Failed running cancellation process" msgstr "%s: Не удалось выполнить процедуру прерывания" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:355 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" "%s: достигнут предел в %u скриптов СУПО - приостанавливается запуск/" "прерывание" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:374 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" "%s: Засылка задачи в СУПО происходит слишком медленно, но идентификатор уже " "доступен. Будем считать, что засылка произведена." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:380 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" "%s: Прерывание задачи происходит слишком медленно, но диагностика уже " "доступна. Будем считать, что прерывание произошло." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:388 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "%s: Засылка задачи в СУПО происходит слишком долго. Сбой." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:394 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "%s: Прерывание задачи происходит слишком долго. Сбой." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "%s: состояние: SUBMIT: дочерний процесс завершился с кодом выхода: %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:409 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "%s: состояние CANCELING: дочерний процесс завершился с кодом выхода %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:417 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "%s: Не удалось направить задачу в СУПО" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:420 #, c-format msgid "%s: Failed to cancel running job" msgstr "%s: Не удалось оборвать исполняющуюся задачу" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:434 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "%s: Не удалось получить номер из СУПО" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:447 #, c-format msgid "%s: Failed writing local information: %s" msgstr "%s: Не удалось записать локальную информацию: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:457 #, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "%s: состояние CANCELING: срок ожидания прерывания истёк" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:463 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "%s: состояние CANCELING: собирается диагностика задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:531 #, c-format msgid "%s: State: %s: still in data staging" msgstr "%s: Состояние: %s: всё ещё в процессе переноса данных" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "%s: Задачу нельзя больше перезапускать" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:560 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "" "%s: Сбой исполнения задачи в неизвестном состоянии. Перезапуска не будет." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:581 #, c-format msgid "%s: Reprocessing job description failed" msgstr "%s: Сбой повторной обработки описания задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:588 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "%s: Не удалось прочесть переработанный список выходных файлов" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:592 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "%s: Не удалось прочесть переработанный список входных файлов" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:676 #, c-format msgid "%s: Reading status of new job failed" msgstr "%s: Не удалось прочесть состояние новой задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "%s: Состояние: ACCEPTED: обрабатывается описание задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:691 #, c-format msgid "%s: Processing job description failed" msgstr "%s: Не удалось обработать описание задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:708 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "%s: %s: Новая задача принадлежит %i/%i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:718 #, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "Не удалось извлечь информацию о DN из файла .local задачи %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:732 #, c-format msgid "%s: State: ACCEPTED" msgstr "%s: Состояние: ACCEPTED" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:739 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "%s: Состояние: ACCEPTED: dryrun" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:751 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "%s: Состояние: ACCEPTED: время на исполнение %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:757 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "%s: состояние ACCEPTED: переход в PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:774 #, c-format msgid "%s: State: PREPARING" msgstr "%s: Состояние: PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:778 #, c-format msgid "%s: Failed obtaining local job information." msgstr "%s: Не удалось извлечь информацию о локальном состоянии задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:829 #, c-format msgid "%s: State: SUBMIT" msgstr "%s: Состояние: SUBMIT" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:844 #, c-format msgid "%s: State: CANCELING" msgstr "%s: Состояние: CANCELING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:860 #, c-format msgid "%s: State: INLRMS" msgstr "%s: Состояние: INLRMS" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:868 #, c-format msgid "%s: Job finished" msgstr "%s: Задача завершена" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:872 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "%s: состояние INLRMS: сообщение на выходе %i %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:893 #, c-format msgid "%s: State: FINISHING" msgstr "%s: Состояние: FINISHING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:922 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "%s: Поступил запрос на удаление задачи - удаляется" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:961 #, c-format msgid "%s: Can't rerun on request" msgstr "%s: Перезапуск по требованию невозможен" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:963 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "%s: Перезапуск по запросу невозможен - неподходящее состояние" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:973 #, c-format msgid "%s: Job is too old - deleting" msgstr "%s: Задача слишком старая - удаляется" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1012 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "%s: Задача устарела - удаляется оставшаяся информация" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1041 #, c-format msgid "%s: Canceling job because of user request" msgstr "%s: Прерывание задачи по запросу пользователя" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1107 #, c-format msgid "%s: Job failure detected" msgstr "%s: Обнаружен сбой задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: %s from %s" msgstr "%s: Состояние: %s после %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1163 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1171 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "%s: Подключаемый модуль в состоянии %s : %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1177 #, c-format msgid "%s: Plugin execution failed" msgstr "%s: Сбой при исполнении подключаемого модуля" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1202 #, c-format msgid "%s: Delete request due to internal problems" msgstr "%s: Удаление запроса в связи с внутренними неполадками" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1284 #, c-format msgid "Failed to move file %s to %s" msgstr "Не удалось переместить файл %s в %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1292 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1375 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1464 #, c-format msgid "Failed reading control directory: %s" msgstr "Сбой при чтении управляющего каталога: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1334 #, c-format msgid "Failed reading control directory: %s: %s" msgstr "Сбой при чтении управляющего каталога: %s: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:153 #, c-format msgid "Invalid checksum in %s for %s" msgstr "Неверная контрольная сумма в %s для %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:162 #, c-format msgid "Invalid file size in %s for %s " msgstr "Недопустимый размер файла в %s для %s " #: src/services/a-rex/grid-manager/loaders/downloader.cpp:182 #, c-format msgid "Invalid file: %s is too big." msgstr "Неверный файл: %s слишком велик." #: src/services/a-rex/grid-manager/loaders/downloader.cpp:196 #, c-format msgid "Error accessing file %s" msgstr "Ошибка доступа к файлу %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:205 #, c-format msgid "Error reading file %s" msgstr "Ошибка при чтении файла %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:216 #, c-format msgid "File %s has wrong CRC." msgstr "У файла %s неверная контрольная сумма" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:240 #, c-format msgid "Failed downloading file %s - %s" msgstr "Сбой загрузки файла %s - %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:246 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:132 msgid "Retrying" msgstr "Повтор" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:249 #, c-format msgid "Downloaded file %s" msgstr "Загружен файл %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:330 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:243 #, c-format msgid "Wrong number of threads: %s" msgstr "Неверное число потоков: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:336 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:249 #, c-format msgid "Wrong number of files: %s" msgstr "Неверное количество файлов: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:342 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:358 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:365 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:372 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:379 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:255 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:271 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:278 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:285 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:292 #, c-format msgid "Bad number: %s" msgstr "Неверное число: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:346 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:352 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:259 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:265 msgid "Specified user can't be handled" msgstr "Указанный пользователь не может быть обработан" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:384 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:297 #, c-format msgid "Unsupported option: %c" msgstr "Неподдерживаемая опция: %c" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:388 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:301 #, c-format msgid "Missing parameter for option %c" msgstr "Отсутствует параметр для опции %c" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:392 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:305 msgid "Undefined processing error" msgstr "Неопределённая ошибка при обработке" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:399 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:312 msgid "Missing job id" msgstr "Отсутствует ярлык задачи" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:401 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:314 msgid "Missing control directory" msgstr "Отсутствует каталог контроля" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:403 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:316 msgid "Missing session directory" msgstr "Отсутствует каталог сеансов" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:446 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:359 #, c-format msgid "Minimal speed: %llu B/s during %i s" msgstr "Минимальная скорость: %llu Б/с в течение %i с" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:448 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:361 #, c-format msgid "Minimal average speed: %llu B/s" msgstr "Минимальная средняя скорость: %llu B/s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:450 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:363 #, c-format msgid "Maximal inactivity time: %i s" msgstr "Максимальное время бездействия: %i s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:455 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:368 msgid "Won't use more than 10 threads" msgstr "Будет использовано не более 10-и потоков" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:462 msgid "Downloader started" msgstr "Загрузчик запущен" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:488 msgid "Can't read list of input files" msgstr "Невозможно прочесть список входных файлов" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:495 #, c-format msgid "Error: duplicate file in list of input files: %s" msgstr "Ошибка: дублированное имя файла в списке входных файлов: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:518 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:402 msgid "Can't read list of output files" msgstr "Невозможно прочесть список выходных файлов" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:523 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:449 msgid "Can't remove junk files" msgstr "Невозможно удалить ненужные файлы" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:531 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:463 msgid "Can't read job local description" msgstr "Невозможно прочесть локальное описание задачи" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:553 #, c-format msgid "Local source for download: %s" msgstr "Локальный источник загрузки: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:567 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:571 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:512 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:518 #, c-format msgid "Can't accept URL: %s" msgstr "Неприемлемый URL: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:586 #: src/services/a-rex/grid-manager/loaders/uploader.cpp:535 #, c-format msgid "Failed to initiate file transfer: %s - %s" msgstr "Невозможно запустить передачу файлов: %s - %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:603 #, c-format msgid "Downloaded %s" msgstr "Загружен %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:628 #, c-format msgid "Failed to download (but may be retried) %s" msgstr "Не удалось загрузить (возможна повторная попытка) %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:633 #, c-format msgid "Failed to download %s" msgstr "Не удалось загрузить %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:642 msgid "Some downloads failed" msgstr "Некоторые загрузки не удались" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:646 msgid "Some downloads failed, but may be retried" msgstr "Некоторые загрузки не удались (возможна повторная попытка)" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:650 msgid "Failed writing changed input file" msgstr "Не удалось записать изменившийся входной файл" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:662 #, c-format msgid "Checking user uploadable file: %s" msgstr "Проверка отгружаемого файла пользователя: %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:668 #, c-format msgid "User has uploaded file %s" msgstr "Пользователь отгрузил файл %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:673 #: src/services/a-rex/grid-manager/loaders/downloader.cpp:706 msgid "Failed writing changed input file." msgstr "Не удалось записать изменившийся входной файл" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:677 #, c-format msgid "Critical error for uploadable file %s" msgstr "Критическая ошибка для отгружаемого файла %s" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:694 #, c-format msgid "No changes in uploadable files for %u seconds" msgstr "Никаких изменений в отгружаемых файлах в течение %u сек." #: src/services/a-rex/grid-manager/loaders/downloader.cpp:695 msgid "Uploadable files timed out" msgstr "Истекло время ожидания отгружаемых файлов" #: src/services/a-rex/grid-manager/loaders/downloader.cpp:761 #, c-format msgid "Leaving downloader (%i)" msgstr "Выход из загрузчика (%i)" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:126 #, c-format msgid "Failed uploading file %s - %s" msgstr "Не удалось отгрузить файл %s - %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:135 #, c-format msgid "Uploaded file %s" msgstr "Закачан файл %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:373 msgid "Uploader started" msgstr "Отгрузчик запущен" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:409 #, c-format msgid "Reading output files from user generated list in %s" msgstr "Чтение выходных файлов в списке пользователя %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:411 #, c-format msgid "Error reading user generated output file list in %s" msgstr "Ошибка чтения списка выходных файлов пользователя в %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:424 #, c-format msgid "Two identical output destinations: %s" msgstr "Два одинаковых назначения для выдачи: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:433 #, c-format msgid "Cannot upload two different files %s and %s to same LFN: %s" msgstr "Невозможно записать два разных файла %s и %s с одним LFN: %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:496 #, c-format msgid "Local destination for uploader %s" msgstr "Локальный файл-приёмник для отгрузчика %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:556 #, c-format msgid "Uploaded %s" msgstr "Отгружен %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:580 msgid "Failed writing output status file" msgstr "Не удалось записать выходной файл состояния" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:587 #, c-format msgid "Failed to upload (but may be retried) %s" msgstr "Не удалось выгрузить (возможна повторная попытка) %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:591 #, c-format msgid "Failed to upload %s" msgstr "Не удалось отгрузить %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:600 msgid "Some uploads failed" msgstr "Некоторые отгрузки не удались" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:616 #, c-format msgid "Writing back dynamic output file %s" msgstr "Запись динамического списка выходных файлов %s" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:618 #, c-format msgid "Failed to rewrite output file list %s. Job resuming may not work" msgstr "" "Не удалось перезаписать список выходных файлов %s. Перезапуск задач может не " "работать." #: src/services/a-rex/grid-manager/loaders/uploader.cpp:630 msgid "Some uploads failed, but (some) may be retried" msgstr "" "Некоторые выгрузки не удались (для некоторых возможна повторная попытка)" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:637 msgid "Failed writing changed output file" msgstr "Не удалось записать изменившийся выходной файл" #: src/services/a-rex/grid-manager/loaders/uploader.cpp:649 #, c-format msgid "Leaving uploader (%i)" msgstr "Отгрузчик покидается (%i)" #: src/services/a-rex/grid-manager/log/JobLog.cpp:118 msgid ": Logger name is not specified" msgstr ": Не задано имя регистратора" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 msgid ": Failure creating slot for reporter child process" msgstr ": Сбой подготовки дочернего процесса для отчёта" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 msgid ": Failure starting reporter child process" msgstr ": Сбой запуска дочернего процесса для отчёта" #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:130 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr ": Средство измерения характеристик выдало ошибку %i: %s" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "Не удалось прочесть локальную информацию" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "Выполнение команды рассылки (%s)" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "Не удалось запустить службу рассылки" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:55 #, c-format msgid "%s: Failure creating slot for child process" msgstr "%s: Сбой создания области памяти для дочернего процесса" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:79 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:61 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "%s: Сбой создания хранилища данных для дочернего процесса" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:123 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:72 #, c-format msgid "%s: Failure starting child process" msgstr "%s: Сбой при запуске дочернего процесса" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:140 #, c-format msgid "%s: Failed to run plugin" msgstr "%s: Сбой при запуске подключаемого модуля" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:143 #, c-format msgid "%s: Plugin failed" msgstr "%s: Сбой подключаемого модуля" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:77 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "%s: Сбой ожидания окончания дочернего процесса" #: src/services/a-rex/information_collector.cpp:45 #, c-format msgid "Resource information provider: %s" msgstr "Сборщик информации о ресурсе: %s" #: src/services/a-rex/information_collector.cpp:51 msgid "Resource information provider failed" msgstr "Сбой сборщика информации о ресурсе" #: src/services/a-rex/information_collector.cpp:55 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" "Сбой сборщика информации о ресурсе с выходным статусом: %i\n" "%s" #: src/services/a-rex/information_collector.cpp:57 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" "Журнал сборщика информации о ресурсе:\n" "%s" #: src/services/a-rex/information_collector.cpp:61 msgid "No new informational document assigned" msgstr "Не присвоено новых информационных документов" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "Obtained XML: %s" msgstr "Полученный XML: %s" #: src/services/a-rex/information_collector.cpp:77 msgid "Informational document is empty" msgstr "Пустой информационный документ" #: src/services/a-rex/information_collector.cpp:88 msgid "Passing service's information from collector to registrator" msgstr "Идёт передача информации о службе от сборщика к регистратору" #: src/services/a-rex/information_collector.cpp:144 #, c-format msgid "" "Registered static information: \n" " doc: %s" msgstr "" "Зарегистрирована статическая информация: \n" " документ: %s" #: src/services/a-rex/information_collector.cpp:147 #, c-format msgid "" "Information Registered without static attributes: \n" " doc: %s" msgstr "" "Информация зарегистрирована без статических аттрибутов: \n" " документ: %s" #: src/services/a-rex/information_collector.cpp:324 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "OptimizedInformationContainer не смог создать временный файл" #: src/services/a-rex/information_collector.cpp:327 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "OptimizedInformationContainer создал временный файл: %s" #: src/services/a-rex/information_collector.cpp:333 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" "OptimizedInformationContainer не смог записать документ XML во временный файл" #: src/services/a-rex/information_collector.cpp:342 msgid "OptimizedInformationContainer failed to parse XML" msgstr "OptimizedInformationContainer не смог разобрать XML" #: src/services/a-rex/information_collector.cpp:353 msgid "OptimizedInformationContainer failed to rename temprary file" msgstr "OptimizedInformationContainer не смог переименовать временный файл" #: src/services/a-rex/job.cpp:53 #, c-format msgid "Cannot handle local user %s" msgstr "Невозможно обслужить локального пользователя %s" #: src/services/a-rex/job.cpp:101 #, c-format msgid "%s: Failed to parse user policy" msgstr "%s: Сбой при разборе правил допуска пользователя" #: src/services/a-rex/job.cpp:106 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "%s: Не удалось подгрузить анализатор для правил допуска пользователей" #: src/services/a-rex/job.cpp:211 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "%s: Неизвестное правило допуска пользователя '%s'" #: src/services/a-rex/job.cpp:473 src/services/a-rex/job.cpp:497 #, c-format msgid "Credential expires at %s" msgstr "Срок действия параметров доступа истекает к %s" #: src/services/a-rex/job.cpp:475 src/services/a-rex/job.cpp:499 #, c-format msgid "Credential handling exception: %s" msgstr "Сбой обработки параметров доступа: %s" #: src/services/a-rex/job.cpp:789 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "Закончились попытки присвоения нового ярлыка задачи в %s" #: src/services/a-rex/job.cpp:1006 msgid "No non-draining session dirs available" msgstr "Нет каталогов сессий не в состоянии разгрузки" #: src/services/a-rex/jura/ApelDestination.cpp:89 #: src/services/a-rex/jura/CARDestination.cpp:49 #: src/services/a-rex/jura/LutsDestination.cpp:71 msgid "ServiceURL missing" msgstr "Отсутствует ServiceURL" #: src/services/a-rex/jura/ApelDestination.cpp:97 #: src/services/a-rex/jura/CARDestination.cpp:56 #: src/services/a-rex/jura/LutsDestination.cpp:89 #, c-format msgid "Protocol is %s, should be https" msgstr "Указан протокол %s, а должен быть https" #: src/services/a-rex/jura/ApelDestination.cpp:133 #: src/services/a-rex/jura/ApelDestination.cpp:158 #: src/services/a-rex/jura/CARDestination.cpp:95 #: src/services/a-rex/jura/LutsDestination.cpp:120 #: src/services/a-rex/jura/LutsDestination.cpp:144 #, c-format msgid "Ignoring incomplete log file \"%s\"" msgstr "Игнорируется неполный журнальный файл \"%s\"" #: src/services/a-rex/jura/ApelDestination.cpp:182 #: src/services/a-rex/jura/CARDestination.cpp:119 #: src/services/a-rex/jura/LutsDestination.cpp:166 #, c-format msgid "Logging UR set of %d URs." msgstr "Записывается набор UR из %d записей UR." #: src/services/a-rex/jura/ApelDestination.cpp:185 #: src/services/a-rex/jura/CARDestination.cpp:122 #: src/services/a-rex/jura/Destination.cpp:61 #: src/services/a-rex/jura/LutsDestination.cpp:169 #, c-format msgid "UR set dump: %s" msgstr "Выведен набор записей UR: %s" #: src/services/a-rex/jura/ApelDestination.cpp:271 #: src/services/a-rex/jura/Destination.cpp:96 #, c-format msgid "Backup file (%s) created." msgstr "Создан резервный файл (%s)." #: src/services/a-rex/jura/ApelDestination.cpp:316 #, c-format msgid "APEL message file (%s) created." msgstr "Создан файл сообщений APEL (%s)." #: src/services/a-rex/jura/ApelDestination.cpp:370 #: src/services/a-rex/jura/CARAggregation.cpp:208 #, c-format msgid "system retval: %d" msgstr "Системное значение retval: %d" #: src/services/a-rex/jura/CARAggregation.cpp:73 #, c-format msgid "Aggregation record (%s) not exist, initialize it..." msgstr "" "Агрегированная запись (%s) не существует, производится инициализация..." #: src/services/a-rex/jura/CARAggregation.cpp:77 #, c-format msgid "Aggregation record (%s) initialization successful." msgstr "Агрегированная запись (%s) успешно инициализирована." #: src/services/a-rex/jura/CARAggregation.cpp:82 #, c-format msgid "Some error happens during the Aggregation record (%s) initialization." msgstr "Неизвестная ошибка при инициализации агрегированной записи (%s)." #: src/services/a-rex/jura/CARAggregation.cpp:88 #, c-format msgid "Aggregation record (%s) read from file successful." msgstr "Агрегированная запись (%s) успешно прочитана из файла." #: src/services/a-rex/jura/CARAggregation.cpp:100 #, c-format msgid "Aggregation record (%s) stored successful." msgstr "Агрегированная запись (%s) успешно сохранена." #: src/services/a-rex/jura/CARAggregation.cpp:103 #, c-format msgid "Some error happens during the Aggregation record (%s) storing." msgstr "Неизвестная ошибка при сохранении агрегированной записи (%s)." #: src/services/a-rex/jura/CARAggregation.cpp:156 #, c-format msgid "APEL aggregation message file (%s) created." msgstr "Создан файл агрегированного сообщения APEL (%s)." #: src/services/a-rex/jura/CARAggregation.cpp:228 #, c-format msgid "year: %s" msgstr "год: %s" #: src/services/a-rex/jura/CARAggregation.cpp:229 #, c-format msgid "moth: %s" msgstr "месяц: %s" #: src/services/a-rex/jura/CARAggregation.cpp:230 #, c-format msgid "queue: %s" msgstr "очередь: %s" #: src/services/a-rex/jura/CARAggregation.cpp:238 #: src/services/a-rex/jura/CARAggregation.cpp:404 #, c-format msgid "query: %s" msgstr "запрос: %s" #: src/services/a-rex/jura/CARAggregation.cpp:241 #, c-format msgid "list size: %d" msgstr "длина списка: %d" #: src/services/a-rex/jura/CARAggregation.cpp:359 #, c-format msgid "XML: %s" msgstr "XML: %s" #: src/services/a-rex/jura/CARAggregation.cpp:361 msgid "UPDATE Aggregation Record called." msgstr "Вызов метода UPDATE агрегированной записи." #: src/services/a-rex/jura/CARAggregation.cpp:415 #: src/services/a-rex/jura/CARAggregation.cpp:465 msgid "Does not sending empty aggregation/synch message." msgstr "" "Отсылка пустого агрегированного/синхронизационного сообщения не производится." #: src/services/a-rex/jura/CARAggregation.cpp:548 #, c-format msgid "synch message: %s" msgstr "синхронизационное сообщение: %s" #: src/services/a-rex/jura/Destination.cpp:123 #, c-format msgid "Sent jobIDs: (nr. of job(s) %d)" msgstr "Отправленные jobID: (всего %d задач(и))" #: src/services/a-rex/jura/Destinations.cpp:27 msgid "Unable to create adapter for the specific reporting destination type" msgstr "Невозможно создать адаптер для указанного назначения отчётности" #: src/services/a-rex/jura/JobLogFile.cpp:92 #, c-format msgid "Insert filter element: <%s,%s>" msgstr "Вставка элемента фильтра: <%s,%s>" #: src/services/a-rex/jura/JobLogFile.cpp:105 #, c-format msgid "Not set filter for this URL (%s)." msgstr "Для этого URL (%s) фильтр не назначен." #: src/services/a-rex/jura/JobLogFile.cpp:111 #, c-format msgid "Current job's VO name: %s" msgstr "Имя ВО текущей задачи: %s" #: src/services/a-rex/jura/JobLogFile.cpp:114 #, c-format msgid "VO filter for host: %s" msgstr "Фильтр ВО для узла: %s" #: src/services/a-rex/jura/JobLogFile.cpp:204 #: src/services/a-rex/jura/JobLogFile.cpp:698 #, c-format msgid "Read archive file %s" msgstr "Читается архивный файл %s" #: src/services/a-rex/jura/JobLogFile.cpp:209 #: src/services/a-rex/jura/JobLogFile.cpp:703 #, c-format msgid "" "Could not read archive file %s for job log file %s (%s), generating new " "Usage Record" msgstr "" "Невозможно прочесть архивный файл %s для файла журнала задач %s (%s), " "создаётся новая запись Usage Record" #: src/services/a-rex/jura/JobLogFile.cpp:298 #: src/services/a-rex/jura/JobLogFile.cpp:827 #, c-format msgid "" "Missing required Usage Record element \"RecordIdentity\", in job log file %s" msgstr "" "Отсутствует обязательный элемент Usage Record \"RecordIdentity\", в файле " "журнала задач %s" #: src/services/a-rex/jura/JobLogFile.cpp:345 #, c-format msgid "VO (%s) not set for this (%s) SGAS server by VO filter." msgstr "Фильтр ВО (%s) не настроен для этого сервера SGAS (%s)." #: src/services/a-rex/jura/JobLogFile.cpp:378 #, c-format msgid "[VO filter] Job log will be not send. %s." msgstr "[VO filter] запись о задаче не будет отправлена. %s." #: src/services/a-rex/jura/JobLogFile.cpp:454 #: src/services/a-rex/jura/JobLogFile.cpp:970 #, c-format msgid "Missing required element \"Status\" in job log file %s" msgstr "Отсутствует обязательный элемент \"Status\" в файле журнала задач %s" #: src/services/a-rex/jura/JobLogFile.cpp:663 #: src/services/a-rex/jura/JobLogFile.cpp:1280 #, c-format msgid "Failed to create archive directory %s: %s" msgstr "Не удалось создать архивный каталог %s: %s" #: src/services/a-rex/jura/JobLogFile.cpp:670 #: src/services/a-rex/jura/JobLogFile.cpp:1287 #, c-format msgid "Archiving Usage Record to file %s" msgstr "Архивирование записи Usage Record в файл %s" #: src/services/a-rex/jura/JobLogFile.cpp:676 #: src/services/a-rex/jura/JobLogFile.cpp:1293 #, c-format msgid "Failed to write file %s: %s" msgstr "Сбой при записи файла %s: %s" #: src/services/a-rex/jura/JobLogFile.cpp:1062 #, c-format msgid "Missing required element \"CpuDuration\" in job log file %s" msgstr "" "Отсутствует обязательный элемент \"CpuDuration\" в файле журнала задач %s" #: src/services/a-rex/jura/JobLogFile.cpp:1082 #, c-format msgid "Set non standard bechmark type: %s" msgstr "Задан нестандартный тип эталонного теста: %s" #: src/services/a-rex/jura/JobLogFile.cpp:1095 #, c-format msgid "Ignored incoming benchmark value: %s, Use float value!" msgstr "" "Игнорируется полученное значение эталонного теста: %s, используйте значение " "с плавающей запятой!" #: src/services/a-rex/jura/JobLogFile.cpp:1324 #, c-format msgid "Failed to delete file %s:%s" msgstr "Не удалось удалить файл %s: %s" #: src/services/a-rex/jura/LutsDestination.cpp:223 #, c-format msgid "UsageRecords registration response: %s" msgstr "Отклик регистрации записи UsageRecords: %s" #: src/services/a-rex/jura/ReReporter.cpp:53 #, c-format msgid "Initialised, archived job log dir: %s" msgstr "" "Инициализирован каталог архивного хранения учётных записей о задачах: %s" #: src/services/a-rex/jura/ReReporter.cpp:73 #, c-format msgid "Incoming time range: %s" msgstr "Промежуток времени засылки: %s" #: src/services/a-rex/jura/ReReporter.cpp:92 #, c-format msgid "Requested time range: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d " msgstr "Запрошенный промежуток времени: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d " #: src/services/a-rex/jura/ReReporter.cpp:98 #: src/services/a-rex/jura/UsageReporter.cpp:45 msgid "Interactive mode." msgstr "Интерактивный режим." #: src/services/a-rex/jura/ReReporter.cpp:127 #: src/services/a-rex/jura/UsageReporter.cpp:68 #, c-format msgid "Could not open log directory \"%s\": %s" msgstr "Невозможно открыть каталог с журналами \"%s\": %s" #: src/services/a-rex/jura/ReReporter.cpp:167 #: src/services/a-rex/jura/UsageReporter.cpp:193 #, c-format msgid "Error reading log directory \"%s\": %s" msgstr "Ошибка чтения каталога журналов \"%s\": %s" #: src/services/a-rex/jura/ReReporter.cpp:179 #: src/services/a-rex/jura/UsageReporter.cpp:205 #, c-format msgid "Finished, job log dir: %s" msgstr "Завершено, каталог журнала задач: %s" #: src/services/a-rex/jura/UsageReporter.cpp:39 #, c-format msgid "Initialised, job log dir: %s" msgstr "Запущено, каталог журнала задач: %s" #: src/services/a-rex/jura/UsageReporter.cpp:41 #, c-format msgid "Expiration time: %d seconds" msgstr "Время истечения действительности: %d секунд" #: src/services/a-rex/jura/UsageReporter.cpp:80 #, c-format msgid "Could not open output directory \"%s\": %s" msgstr "Невозможно открыть выходной каталог \"%s\": %s" #: src/services/a-rex/jura/UsageReporter.cpp:125 #, c-format msgid "Removing outdated job log file %s" msgstr "Удаляется устаревший файл журнала задач %s" #: src/services/a-rex/migrate_activity.cpp:37 #, c-format msgid "" "MigrateActivity: request = \n" "%s" msgstr "" "MigrateActivity: запрос = \n" "%s" #: src/services/a-rex/migrate_activity.cpp:42 msgid "MigrateActivitys: no ActivityIdentifier found" msgstr "MigrateActivitys: не обнаружен ActivityIdentifier" #: src/services/a-rex/migrate_activity.cpp:51 msgid "MigrateActivity: EPR contains no JobID" msgstr "MigrateActivity: EPR не содержит JobID" #: src/services/a-rex/migrate_activity.cpp:69 msgid "MigrateActivity: Failed to accept delegation" msgstr "MigrateActivity: невозможно принять делегирование" #: src/services/a-rex/migrate_activity.cpp:130 msgid "MigrateActivity: no job description found" msgstr "MigrateActivity: не обнаружено описание задачи" #: src/services/a-rex/migrate_activity.cpp:153 #, c-format msgid "Migration XML sent to AREXJob: %s" msgstr "Миграционный документ XML послан к AREXJob: %s" #: src/services/a-rex/migrate_activity.cpp:180 #, c-format msgid "MigrateActivity: Failed to migrate new job: %s" msgstr "MigrateActivity: невозможно мигрировать новую задачу: %s" #: src/services/a-rex/migrate_activity.cpp:182 msgid "MigrateActivity: Failed to migrate new job" msgstr "MigrateActivity: невозможно мигрировать новую задачу" #: src/services/a-rex/migrate_activity.cpp:198 msgid "MigrateActivity finished successfully" msgstr "MigrateActivity успешно завершён" #: src/services/a-rex/migrate_activity.cpp:202 #, c-format msgid "" "MigrateActivity: response = \n" "%s" msgstr "" "MigrateActivity: отзыв = \n" "%s" #: src/services/a-rex/put.cpp:37 #, c-format msgid "Put: there is no job: %s - %s" msgstr "Put: задача отсутствует: %s - %s" #: src/services/a-rex/put.cpp:43 #, c-format msgid "Put: there is no payload for file %s in job: %s" msgstr "Put: отсутствует информация о файле %s в задании: %s" #: src/services/a-rex/put.cpp:56 #, c-format msgid "Put: unrecognized payload for file %s in job: %s" msgstr "Put: неприемлемая информация о файле %s в задании: %s" #: src/services/a-rex/put.cpp:76 src/services/a-rex/put.cpp:130 #, c-format msgid "Put: failed to create file %s for job %s - %s" msgstr "Put: не удалось создать файл %s для задания %s - %s" #: src/services/a-rex/put.cpp:85 #, c-format msgid "Put: failed to set position of file %s for job %s to %Lu - %s" msgstr "Put: не удалось установить позицию файла %s для задания %s на %Lu - %s" #: src/services/a-rex/put.cpp:91 #, c-format msgid "Put: failed to allocate memory for file %s in job %s" msgstr "Put: не удалось зарезервировать память для файла %s в задании %s" #: src/services/a-rex/put.cpp:103 #, c-format msgid "Put: failed to write to file %s for job %s - %s" msgstr "Put: не удалось записать в файл %s для задания %s - %s" #: src/services/a-rex/terminate_activities.cpp:29 #, c-format msgid "" "TerminateActivities: request = \n" "%s" msgstr "" "TerminateActivities: запрос = \n" "%s" #: src/services/a-rex/terminate_activities.cpp:40 msgid "TerminateActivities: non-AREX job requested" msgstr "TerminateActivities: запрошена задача, несовместимая с AREX" #: src/services/a-rex/terminate_activities.cpp:49 #, c-format msgid "TerminateActivities: job %s - %s" msgstr "TerminateActivities: задача %s - %s" #: src/services/a-rex/terminate_activities.cpp:69 #, c-format msgid "" "TerminateActivities: response = \n" "%s" msgstr "" "TerminateActivities: ответ = \n" "%s" #: src/services/a-rex/test.cpp:34 src/tests/count/test_service.cpp:25 #: src/tests/echo/test.cpp:23 src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "Создание цепи на стороне сервиса" #: src/services/a-rex/test.cpp:37 src/tests/count/test_service.cpp:28 #: src/tests/echo/test.cpp:26 src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "Не удалось загрузить настройки сервиса" #: src/services/a-rex/test.cpp:43 src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:23 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "Создание цепи на стороне клиента" #: src/services/a-rex/test.cpp:49 src/tests/count/test_client.cpp:53 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "Не удалось загрузить настройки клиента" #: src/services/a-rex/test.cpp:53 src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "Подгружены клиентские компоненты цепи сообщений" #: src/services/a-rex/test.cpp:56 src/tests/count/test_client.cpp:60 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "Отсутстует точка входа в клиентскую цепь" #: src/services/a-rex/test.cpp:112 src/services/a-rex/test.cpp:191 #: src/services/a-rex/test.cpp:248 src/services/a-rex/test.cpp:296 #: src/services/a-rex/test.cpp:344 src/services/a-rex/test.cpp:392 #: src/tests/count/test_client.cpp:87 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "Ошибка при выполнении запроса" #: src/services/a-rex/test.cpp:115 src/services/a-rex/test.cpp:194 #: src/services/a-rex/test.cpp:251 src/services/a-rex/test.cpp:299 #: src/services/a-rex/test.cpp:347 src/services/a-rex/test.cpp:395 #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "Запрос удался!!!" #: src/services/a-rex/test.cpp:117 src/services/a-rex/test.cpp:196 #: src/services/a-rex/test.cpp:253 src/services/a-rex/test.cpp:301 #: src/services/a-rex/test.cpp:349 src/services/a-rex/test.cpp:397 #: src/tests/count/test_client.cpp:93 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "Нет ответа" #: src/services/a-rex/test.cpp:125 src/services/a-rex/test.cpp:204 #: src/services/a-rex/test.cpp:261 src/services/a-rex/test.cpp:309 #: src/services/a-rex/test.cpp:357 src/services/a-rex/test.cpp:405 #: src/tests/count/test_client.cpp:100 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "Ответ не в формате SOAP" #: src/services/a-rex/test.cpp:136 msgid "Response is not expected WS-RP" msgstr "Отзыв не является ожидаемым WS-RP" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" "UpdateCredentials: запрос = \n" "%s" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "UpdateCredentials: отсутствует ссылка" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "UpdateCredentials: недопустимое количество ссылок" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "UpdateCredentials: недопустимое число элементов внутри Reference" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "UpdateCredentials: EPR не содержит JobID" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "UpdateCredentials: задача не обнаружена: %s" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "UpdateCredentials: невозможно обновить параметры доступа" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" "UpdateCredentials: отзыв = \n" "%s" #: src/services/cache_service/CacheService.cpp:52 msgid "No A-REX config file found in cache service configuration" msgstr "Не обнаружен файл настроек A-REX в настройках службы кэша" #: src/services/cache_service/CacheService.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "Используется файл настроек A-REX %s" #: src/services/cache_service/CacheService.cpp:60 #, c-format msgid "Failed to process A-REX configuration in %s" msgstr "Не удалось обработать настройки A-REX в %s" #: src/services/cache_service/CacheService.cpp:65 msgid "No caches defined in configuration" msgstr "Кэш не описан в файле настроек" #: src/services/cache_service/CacheService.cpp:139 msgid "Empty filename returned from FileCache" msgstr "FileCache возвратил пустое имя файла" #: src/services/cache_service/CacheService.cpp:151 #, c-format msgid "Problem accessing cache file %s: %s" msgstr "Проблема при доступе к кэшированному файлу %s: %s" #: src/services/cache_service/CacheService.cpp:200 #: src/services/cache_service/CacheService.cpp:472 msgid "No job ID supplied" msgstr "Не указан ярлык задачи" #: src/services/cache_service/CacheService.cpp:209 #, c-format msgid "Bad number in priority element: %s" msgstr "Недопустимый приоритет: %s" #: src/services/cache_service/CacheService.cpp:218 msgid "No username supplied" msgstr "Не указано имя пользователя" #: src/services/cache_service/CacheService.cpp:225 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" "Указанное имя пользователя %s не совпадает с сопоставленным именем " "пользователя %s" #: src/services/cache_service/CacheService.cpp:239 msgid "No session directory found" msgstr "Не найден каталог сессии" #: src/services/cache_service/CacheService.cpp:243 #, c-format msgid "Using session dir %s" msgstr "Используется каталог сессии %s" #: src/services/cache_service/CacheService.cpp:247 #, c-format msgid "Failed to stat session dir %s" msgstr "Не удалось проверить состояние каталога сессии %s" #: src/services/cache_service/CacheService.cpp:252 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "Каталог сессии %s принадлежит %i, но текущий пользователь - %i" #: src/services/cache_service/CacheService.cpp:279 #, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "Сбой доступа к доверенности указанной задачи %s в %s" #: src/services/cache_service/CacheService.cpp:297 #, c-format msgid "DN is %s" msgstr "DN: %s" #: src/services/cache_service/CacheService.cpp:373 #, c-format msgid "Permission checking passed for url %s" msgstr "Проверка прав доступа пройдена для URL %s" #: src/services/cache_service/CacheService.cpp:398 #: src/services/cache_service/CacheServiceGenerator.cpp:138 #, c-format msgid "Failed to move %s to %s: %s" msgstr "Не удалось переместить %s в %s: %s" #: src/services/cache_service/CacheService.cpp:437 #, c-format msgid "Starting new DTR for %s" msgstr "Запускается новый запрос DTR для %s" #: src/services/cache_service/CacheService.cpp:439 #, c-format msgid "Failed to start new DTR for %s" msgstr "Не удалось запустить новый запрос DTR для %s" #: src/services/cache_service/CacheService.cpp:486 #, c-format msgid "Job %s: all files downloaded successfully" msgstr "Задача %s: все файлы успешно загружены" #: src/services/cache_service/CacheService.cpp:495 #, c-format msgid "Job %s: Some downloads failed" msgstr "Задача %s: Сбой некоторых загрузок" #: src/services/cache_service/CacheService.cpp:501 #, c-format msgid "Job %s: files still downloading" msgstr "Задача %s: файлы всё ещё загружаются" #: src/services/cache_service/CacheService.cpp:514 msgid "CacheService: Unauthorized" msgstr "CacheService: Нет допуска" #: src/services/cache_service/CacheService.cpp:523 msgid "No local user mapping found" msgstr "Пользователь не приписан ни к одному локальному имени" #: src/services/cache_service/CacheService.cpp:530 #: src/services/data-staging/DataDeliveryService.cpp:631 #, c-format msgid "Identity is %s" msgstr "Личные данные: %s" #: src/services/cache_service/CacheService.cpp:595 msgid "Only POST is supported in CacheService" msgstr "CacheService поддерживает только POST" #: src/services/cache_service/CacheServiceGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "DTR %s завершился в состоянии %s" #: src/services/cache_service/CacheServiceGenerator.cpp:127 #, c-format msgid "Could not determine session directory from filename %s" msgstr "Не удалось определить каталог сессии из имени файла %s" #: src/services/cache_service/CacheServiceGenerator.cpp:168 #, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "Недопустимый DTR для источника %s, назначения %s" #: src/services/cache_service/CacheServiceGenerator.cpp:210 #, c-format msgid "DTRs still running for job %s" msgstr "Запросы DTR для задачи %s всё ещё исполняются" #: src/services/cache_service/CacheServiceGenerator.cpp:219 #, c-format msgid "All DTRs finished for job %s" msgstr "Все запросы DTR для задачи %s завершены" #: src/services/cache_service/CacheServiceGenerator.cpp:226 #, c-format msgid "Job %s not found" msgstr "Задача %s не обнаружена" #: src/services/data-staging/DataDeliveryService.cpp:58 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "Архивирование запроса DTR %s, состояние ERROR" #: src/services/data-staging/DataDeliveryService.cpp:62 #, c-format msgid "Archiving DTR %s, state %s" msgstr "Архивирование запроса DTR %s, состояние %s" #: src/services/data-staging/DataDeliveryService.cpp:166 msgid "No delegation token in request" msgstr "В запросе отсутствует токен делегирования" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "Failed to accept delegation" msgstr "Не удалось принять делегирование" #: src/services/data-staging/DataDeliveryService.cpp:203 #: src/services/data-staging/DataDeliveryService.cpp:210 msgid "ErrorDescription" msgstr "Описание ошибки" #: src/services/data-staging/DataDeliveryService.cpp:215 #, c-format msgid "All %u process slots used" msgstr "Квота на процессы (%u) использована" #: src/services/data-staging/DataDeliveryService.cpp:230 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" "Получена повторная попытка запроса DTR %s, всё ещё в состоянии передачи" #: src/services/data-staging/DataDeliveryService.cpp:237 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "Запрос DTR %s в состоянии %s заменяется новым запросом" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Storing temp proxy at %s" msgstr "Сохранение временной доверенности в %s" #: src/services/data-staging/DataDeliveryService.cpp:256 #, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "Не удалось создать временную доверенность в %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:263 #, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "Не удалось поменять владельца временной доверенности в %s на %i:%i: %s" #: src/services/data-staging/DataDeliveryService.cpp:289 msgid "Invalid DTR" msgstr "Недействительный запрос DTR" #: src/services/data-staging/DataDeliveryService.cpp:294 #, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "Не удалось удалить временную доверенность %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:395 #, c-format msgid "No such DTR %s" msgstr "Нет такого запроса DTR %s" #: src/services/data-staging/DataDeliveryService.cpp:406 #, c-format msgid "DTR %s failed: %s" msgstr "Сбой запроса DTR %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:417 #, c-format msgid "DTR %s finished successfully" msgstr "Запрос DTR %s успешно завершён" #: src/services/data-staging/DataDeliveryService.cpp:427 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "Запрос DTR %s ещё в процессе (передано %lluB)" #: src/services/data-staging/DataDeliveryService.cpp:488 #, c-format msgid "No active DTR %s" msgstr "Нет активных запросов DTR %s" #: src/services/data-staging/DataDeliveryService.cpp:498 #, c-format msgid "DTR %s was already cancelled" msgstr "Запрос DTR %s уже был прерван" #: src/services/data-staging/DataDeliveryService.cpp:507 #, c-format msgid "DTR %s could not be cancelled" msgstr "Запрос DTR %s не может быть прерван" #: src/services/data-staging/DataDeliveryService.cpp:551 #, c-format msgid "Failed to get load average: %s" msgstr "Сбой вычисления усреднённой загруженности: %s" #: src/services/data-staging/DataDeliveryService.cpp:575 msgid "Invalid configuration - no allowed IP address specified" msgstr "Неверная настройка - не указано ни одного допустимого IP-адреса" #: src/services/data-staging/DataDeliveryService.cpp:579 msgid "Invalid configuration - no allowed dirs specified" msgstr "Неверная настройка - не указано ни одного допустимого каталога" #: src/services/data-staging/DataDeliveryService.cpp:590 msgid "Failed to start archival thread" msgstr "Не удалось запустить поток архивирования" #: src/services/data-staging/DataDeliveryService.cpp:615 msgid "Shutting down data delivery service" msgstr "Закрывается служба размещения данных" #: src/services/data-staging/DataDeliveryService.cpp:624 msgid "Unauthorized" msgstr "Доступ закрыт" #: src/services/data-staging/DataDeliveryService.cpp:710 msgid "Only POST is supported in DataDeliveryService" msgstr "DataDeliveryService поддерживает только POST" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "Вызван Python-конструктор EchoService" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" "EchoService (Python) содержит приставку %(prefix)s и суффикс %(suffix)s" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "Вызван Python-деструктор EchoService" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "Запуск теста потоков службы EchoService (python)" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" "Запуск теста потоков службы EchoService (python), итерация %(iteration)s %" "(status)s" #: src/services/examples/echo_python/EchoService.py:91 msgid "EchoService (python) 'Process' called" msgstr "Вызван 'Process' EchoService (Python)" #: src/services/examples/echo_python/EchoService.py:95 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" #: src/services/examples/echo_python/EchoService.py:96 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "inmsg.Attributes().getAll() = %s " #: src/services/examples/echo_python/EchoService.py:97 #, python-format msgid "EchoService (python) got: %s " msgstr "EchoService (python) получил: %s " #: src/services/examples/echo_python/EchoService.py:102 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "EchoService (python) request_namespace: %s" #: src/services/examples/echo_python/EchoService.py:108 #: src/services/examples/echo_python/EchoService.py:177 #, python-format msgid "outpayload %s" msgstr "outpayload %s" #: src/services/examples/echo_python/EchoService.py:137 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "Вызывается https://localhost:60000/Echo используя ClientSOAP" #: src/services/examples/echo_python/EchoService.py:140 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "Вызывается http://localhost:60000/Echo используя ClientSOAP" #: src/services/examples/echo_python/EchoService.py:146 #: src/services/examples/echo_python/EchoService.py:161 #, python-format msgid "new_payload %s" msgstr "new_payload %s" #: src/services/examples/echo_python/EchoService.py:155 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "Вызывается http://localhost:60000/Echo используя httplib" #: src/services/examples/echo_python/EchoService.py:171 msgid "Start waiting 10 sec..." msgstr "Ждём 10 секунд..." #: src/services/examples/echo_python/EchoService.py:173 msgid "Waiting ends." msgstr "Ожидание завершено." #: src/services/gridftpd/auth/auth.cpp:312 #, c-format msgid "Unknown authorization command %s" msgstr "Неизвестная ошибка команды допуска %s" #: src/services/gridftpd/auth/auth.cpp:330 #, c-format msgid "" "The [vo] section labeled '%s' has no file associated and can't be used for " "matching" msgstr "" "Разделу [vo] с названием '%s' не поставлен в соответствие файл, и он не " "может быть использован для авторизации" #: src/services/gridftpd/auth/auth_ldap.cpp:56 #, c-format msgid "Connecting to %s:%i" msgstr "Соединение с %s:%i" #: src/services/gridftpd/auth/auth_ldap.cpp:57 #, c-format msgid "Querying at %s" msgstr "Запрос к %s" #: src/services/gridftpd/auth/auth_ldap.cpp:62 #, c-format msgid "Failed to query LDAP server %s" msgstr "Не удалось запросить информацию с сервера LDAP %s" #: src/services/gridftpd/auth/auth_ldap.cpp:69 #, c-format msgid "Failed to get results from LDAP server %s" msgstr "Не удалось получить информацию с сервера LDAP %s" #: src/services/gridftpd/auth/auth_ldap.cpp:83 msgid "LDAP authorization is not supported" msgstr "Авторизация для LDAP не поддерживается" #: src/services/gridftpd/auth/auth_plugin.cpp:73 #: src/services/gridftpd/auth/unixmap.cpp:254 #, c-format msgid "Plugin %s failed to run" msgstr "Подключаемый модуль %s не смог запуститься" #: src/services/gridftpd/auth/auth_plugin.cpp:75 #: src/services/gridftpd/auth/unixmap.cpp:256 #, c-format msgid "Plugin %s printed: %u" msgstr "Подключаемый модуль %s вывел на печать: %u" #: src/services/gridftpd/auth/auth_plugin.cpp:76 #: src/services/gridftpd/auth/unixmap.cpp:257 #, c-format msgid "Plugin %s error: %u" msgstr "Ошибка подключаемого модуля %s: %u" #: src/services/gridftpd/auth/auth_voms.cpp:28 #, c-format msgid "VOMS proxy processing returns: %i - %s" msgstr "Обработка доверенности VOMS выдаёт: %i - %s" #: src/services/gridftpd/auth/auth_voms.cpp:120 #, c-format msgid "VOMS trust chains: %s" msgstr "Цепочка сертификатов VOMS: %s" #: src/services/gridftpd/commands.cpp:46 #, c-format msgid "response: %s" msgstr "ответ: %s" #: src/services/gridftpd/commands.cpp:50 #, c-format msgid "Send response failed: %s" msgstr "Сбой отсылки отклика: %s" #: src/services/gridftpd/commands.cpp:80 msgid "Response sending error" msgstr "Ошибка отсылки отклика" #: src/services/gridftpd/commands.cpp:93 msgid "Closed connection" msgstr "Соединение закрыто" #: src/services/gridftpd/commands.cpp:131 #, c-format msgid "Socket conversion failed: %s" msgstr "Ошибка преобразования сокета: %s" #: src/services/gridftpd/commands.cpp:141 #, c-format msgid "Failed to obtain own address: %s" msgstr "Сбой получения собственного адреса: %s" #: src/services/gridftpd/commands.cpp:149 #, c-format msgid "Failed to recognize own address type (IPv4 ir IPv6) - %u" msgstr "Сбой распознавания типа собственного адреса (IPv4 или IPv6) - %u" #: src/services/gridftpd/commands.cpp:159 #, c-format msgid "Accepted connection on [%s]:%u" msgstr "Принято соединение к [%s]:%u" #: src/services/gridftpd/commands.cpp:161 #, c-format msgid "Accepted connection on %u.%u.%u.%u:%u" msgstr "Принято соединение к %u.%u.%u.%u:%u" #: src/services/gridftpd/commands.cpp:196 msgid "Accept failed" msgstr "Сбой принятия" #: src/services/gridftpd/commands.cpp:204 #: src/services/gridftpd/listener.cpp:415 #, c-format msgid "Accept failed: %s" msgstr "Сбой принятия: %s" #: src/services/gridftpd/commands.cpp:219 #, c-format msgid "Accepted connection from [%s]:%u" msgstr "Принято соединение с [%s]:%u" #: src/services/gridftpd/commands.cpp:221 #, c-format msgid "Accepted connection from %u.%u.%u.%u:%u" msgstr "Принято соединение с %u.%u.%u.%u:%u" #: src/services/gridftpd/commands.cpp:230 msgid "Authenticate in commands failed" msgstr "Сбой проверки подлинности при исполнении инструкций" #: src/services/gridftpd/commands.cpp:239 msgid "Authentication failure" msgstr "Сбой при проверке подлинности" #: src/services/gridftpd/commands.cpp:247 #, c-format msgid "User subject: %s" msgstr "Субъект сертификата: %s" #: src/services/gridftpd/commands.cpp:248 #, c-format msgid "Encrypted: %s" msgstr "Зашифрован: %s" #: src/services/gridftpd/commands.cpp:254 msgid "User has no proper configuration associated" msgstr "Пользователь не ассоциирован с подходящей настройкой" #: src/services/gridftpd/commands.cpp:262 msgid "" "User has empty virtual directory tree.\n" "Either user has no authorised plugins or there are no plugins configured at " "all." msgstr "" "Дерево виртуального каталога пользователя пусто.\n" "Либо у пользователя нет допущенных расширений, либо расширения вообще не " "настроены." #: src/services/gridftpd/commands.cpp:279 msgid "Read commands in authenticate failed" msgstr "Сбой команд чтения в проверке подлинности" #: src/services/gridftpd/commands.cpp:410 msgid "Control connection (probably) closed" msgstr "Контрольное соединение (наверное) закрыто" #: src/services/gridftpd/commands.cpp:444 #: src/services/gridftpd/commands.cpp:723 msgid "Command EPRT" msgstr "Команда EPRT" #: src/services/gridftpd/commands.cpp:445 #, c-format msgid "Failed to parse remote addres %s" msgstr "Сбой разборки удалённого адреса %s" #: src/services/gridftpd/commands.cpp:467 #, c-format msgid "Command USER %s" msgstr "Команда USER %s" #: src/services/gridftpd/commands.cpp:474 msgid "Command CDUP" msgstr "Команда CDUP" #: src/services/gridftpd/commands.cpp:480 #, c-format msgid "Command CWD %s" msgstr "Команда CWD %s" #: src/services/gridftpd/commands.cpp:496 #, c-format msgid "Command MKD %s" msgstr "Команда MKD %s" #: src/services/gridftpd/commands.cpp:516 #, c-format msgid "Command SIZE %s" msgstr "Команда SIZE %s" #: src/services/gridftpd/commands.cpp:531 #, c-format msgid "Command SBUF: %i" msgstr "Команда SBUF: %i" #: src/services/gridftpd/commands.cpp:552 #, c-format msgid "Command MLST %s" msgstr "Команда MLST %s" #: src/services/gridftpd/commands.cpp:575 #, c-format msgid "Command DELE %s" msgstr "Команда DELE %s" #: src/services/gridftpd/commands.cpp:590 #, c-format msgid "Command RMD %s" msgstr "Команда RMD %s" #: src/services/gridftpd/commands.cpp:604 #, c-format msgid "Command TYPE %c" msgstr "Команда TYPE %c" #: src/services/gridftpd/commands.cpp:615 #, c-format msgid "Command MODE %c" msgstr "Команда MODE %c" #: src/services/gridftpd/commands.cpp:627 msgid "Command ABOR" msgstr "Команда ABOR" #: src/services/gridftpd/commands.cpp:640 #, c-format msgid "Command REST %s" msgstr "Команда REST %s" #: src/services/gridftpd/commands.cpp:653 #, c-format msgid "Command EPSV %s" msgstr "Команда EPSV %s" #: src/services/gridftpd/commands.cpp:655 msgid "Command SPAS" msgstr "Команда SPAS" #: src/services/gridftpd/commands.cpp:657 msgid "Command PASV" msgstr "Команда PASV" #: src/services/gridftpd/commands.cpp:678 msgid "local_pasv failed" msgstr "Сбой local_pasv" #: src/services/gridftpd/commands.cpp:702 msgid "local_spas failed" msgstr "Сбой local_spas" #: src/services/gridftpd/commands.cpp:725 msgid "Command PORT" msgstr "Команда PORT" #: src/services/gridftpd/commands.cpp:728 msgid "active_data is disabled" msgstr "active_data отключён" #: src/services/gridftpd/commands.cpp:737 msgid "local_port failed" msgstr "Сбой local_port" #: src/services/gridftpd/commands.cpp:750 #, c-format msgid "Command MLSD %s" msgstr "Команда MLSD %s" #: src/services/gridftpd/commands.cpp:752 #, c-format msgid "Command NLST %s" msgstr "Команда NLST %s" #: src/services/gridftpd/commands.cpp:754 #, c-format msgid "Command LIST %s" msgstr "Команда LIST %s" #: src/services/gridftpd/commands.cpp:805 #, c-format msgid "Command ERET %s" msgstr "Команда ERET %s" #: src/services/gridftpd/commands.cpp:835 #, c-format msgid "Command RETR %s" msgstr "Команда RETR %s" #: src/services/gridftpd/commands.cpp:864 #, c-format msgid "Command STOR %s" msgstr "Команда STOR %s" #: src/services/gridftpd/commands.cpp:892 #, c-format msgid "Command ALLO %i" msgstr "Команда ALLO %i" #: src/services/gridftpd/commands.cpp:915 msgid "Command OPTS" msgstr "Команда OPTS" #: src/services/gridftpd/commands.cpp:918 msgid "Command OPTS RETR" msgstr "Команда OPTS RETR" #: src/services/gridftpd/commands.cpp:928 #, c-format msgid "Option: %s" msgstr "Опция: %s" #: src/services/gridftpd/commands.cpp:972 msgid "Command NOOP" msgstr "Команда NOOP" #: src/services/gridftpd/commands.cpp:976 msgid "Command QUIT" msgstr "Команда QUIT" #: src/services/gridftpd/commands.cpp:986 msgid "Failed to close, deleting client" msgstr "Не удалось закрыть, уничтожается клиент" #: src/services/gridftpd/commands.cpp:1000 #, c-format msgid "Command DCAU: %i '%s'" msgstr "Команда DCAU: %i '%s'" #: src/services/gridftpd/commands.cpp:1028 #, c-format msgid "Command PBZS: %s" msgstr "Команда PBZS: %s" #: src/services/gridftpd/commands.cpp:1036 #, c-format msgid "Setting pbsz to %lu" msgstr "Посылается pbsz на %lu" #: src/services/gridftpd/commands.cpp:1052 #, c-format msgid "Command PROT: %s" msgstr "Команда PROT: %s" #: src/services/gridftpd/commands.cpp:1077 #, c-format msgid "Command MDTM %s" msgstr "Команда MDTM %s" #: src/services/gridftpd/commands.cpp:1099 #, c-format msgid "Raw command: %s" msgstr "Необработанная инструкция: %s" #: src/services/gridftpd/commands.cpp:1147 msgid "Failed to allocate memory for buffer" msgstr "Не удалось зарезервировать память под буфер" #: src/services/gridftpd/commands.cpp:1154 #, c-format msgid "Allocated %u buffers %llu bytes each." msgstr "Выделено %u буферов по %llu байт каждый." #: src/services/gridftpd/commands.cpp:1161 msgid "abort_callback: start" msgstr "abort_callback: запуск" #: src/services/gridftpd/commands.cpp:1164 #, c-format msgid "abort_callback: Globus error: %s" msgstr "abort_callback: ошибка Globus: %s" #: src/services/gridftpd/commands.cpp:1178 msgid "make_abort: start" msgstr "make_abort: запуск" #: src/services/gridftpd/commands.cpp:1190 msgid "Failed to abort data connection - ignoring and recovering" msgstr "" "Не удалось оборвать соединение для данных - игнорируем и восстанавливаемся" #: src/services/gridftpd/commands.cpp:1198 msgid "make_abort: wait for abort flag to be reset" msgstr "make_abort: ожидание сброса семафора прерывания" #: src/services/gridftpd/commands.cpp:1208 msgid "make_abort: leaving" msgstr "make_abort: выход" #: src/services/gridftpd/commands.cpp:1223 msgid "check_abort: have Globus error" msgstr "check_abort: получена ошибка Globus" #: src/services/gridftpd/commands.cpp:1224 msgid "Abort request caused by transfer error" msgstr "Запрос на прерывание по причине ошибки передачи" #: src/services/gridftpd/commands.cpp:1227 msgid "check_abort: sending 426" msgstr "check_abort: посылается 426" #: src/services/gridftpd/commands.cpp:1248 msgid "Abort request caused by error in transfer function" msgstr "Запрос на прерывание по причине ошибки в функции передачи" #: src/services/gridftpd/commands.cpp:1330 msgid "Failed to start timer thread - timeout won't work" msgstr "" "Не удалось запустить поток таймера - прерывание по времени не будет работать" #: src/services/gridftpd/commands.cpp:1382 msgid "Killing connection due to timeout" msgstr "Прерывание соединения в связи с истёкшим лимитом времени" #: src/services/gridftpd/conf/conf_vo.cpp:25 #: src/services/gridftpd/conf/conf_vo.cpp:51 #: src/services/gridftpd/conf/conf_vo.cpp:69 #: src/services/gridftpd/conf/conf_vo.cpp:81 msgid "" "Configuration section [vo] is missing name. Check for presence of name= or " "vo= option." msgstr "" "Раздел настроек [vo] не содержит имени. Убедитесь в наличии опций name= или " "vo= ." #: src/services/gridftpd/conf/daemon.cpp:60 #: src/services/gridftpd/conf/daemon.cpp:183 #, c-format msgid "No such user: %s" msgstr "Нет такого пользователя: %s" #: src/services/gridftpd/conf/daemon.cpp:72 #: src/services/gridftpd/conf/daemon.cpp:195 #, c-format msgid "No such group: %s" msgstr "Нет такой группы: %s" #: src/services/gridftpd/conf/daemon.cpp:85 #: src/services/gridftpd/conf/daemon.cpp:208 #, c-format msgid "Improper debug level '%s'" msgstr "Недопустимый уровень отладки '%s'" #: src/services/gridftpd/conf/daemon.cpp:127 msgid "Missing option for command daemon" msgstr "Пропущены настраиваемые параметры для командного демона" #: src/services/gridftpd/conf/daemon.cpp:132 msgid "Wrong option in daemon" msgstr "Неверные опции в демоне" #: src/services/gridftpd/conf/daemon.cpp:142 #, c-format msgid "Improper size of log '%s'" msgstr "Недопустимый размер журнала '%s'" #: src/services/gridftpd/conf/daemon.cpp:151 #, c-format msgid "Improper number of logs '%s'" msgstr "Недопустимое количество журналов '%s'" #: src/services/gridftpd/conf/daemon.cpp:157 #, c-format msgid "Improper argument for logsize '%s'" msgstr "Недопустимый аргумент для размера журнала '%s'" #: src/services/gridftpd/conf/daemon.cpp:164 msgid "Missing option for command logreopen" msgstr "Отсутствующая опция для команды logreopen" #: src/services/gridftpd/conf/daemon.cpp:169 msgid "Wrong option in logreopen" msgstr "Неверная опция для logreopen" #: src/services/gridftpd/conf/daemon.cpp:253 #, c-format msgid "Failed to open log file %s" msgstr "Не удалось открыть журнальный файл %s" #: src/services/gridftpd/conf/environment.cpp:175 msgid "" "Central configuration file is missing at guessed location:\n" " /etc/arc.conf\n" "Use ARC_CONFIG variable for non-standard location" msgstr "" "Общий файл настроек отсутствует в обычном месте:\n" " /etc/arc.conf\n" "Используйте переменную среды ARC_CONFIG для необычных мест" #: src/services/gridftpd/datalist.cpp:101 msgid "Closing channel (list)" msgstr "Закрывается канал (перечисление)" #: src/services/gridftpd/datalist.cpp:157 msgid "Data channel connected (list)" msgstr "Канал передачи данных подсоединён (перечисление)" #: src/services/gridftpd/dataread.cpp:24 msgid "data_connect_retrieve_callback" msgstr "data_connect_retrieve_callback" #: src/services/gridftpd/dataread.cpp:30 msgid "Data channel connected (retrieve)" msgstr "Канал передачи данных подсоединён (получение)" #: src/services/gridftpd/dataread.cpp:37 msgid "data_connect_retrieve_callback: allocate_data_buffer" msgstr "data_connect_retrieve_callback: allocate_data_buffer" #: src/services/gridftpd/dataread.cpp:40 msgid "data_connect_retrieve_callback: allocate_data_buffer failed" msgstr "data_connect_retrieve_callback: сбой в allocate_data_buffer" #: src/services/gridftpd/dataread.cpp:48 #, c-format msgid "data_connect_retrieve_callback: check for buffer %u" msgstr "data_connect_retrieve_callback: проверка буфера %u" #: src/services/gridftpd/dataread.cpp:61 #, c-format msgid "Closing channel (retrieve) due to local read error :%s" msgstr "Прерывание канала (получение) в связи с локальной ошибкой чтения :%s" #: src/services/gridftpd/dataread.cpp:75 #: src/services/gridftpd/dataread.cpp:172 msgid "Buffer registration failed" msgstr "Сбой регистрации буфера" #: src/services/gridftpd/dataread.cpp:88 msgid "data_retrieve_callback" msgstr "data_retrieve_callback" #: src/services/gridftpd/dataread.cpp:96 #, c-format msgid "Data channel (retrieve) %i %i %i" msgstr "Канал передачи данных (получение) %i %i %i" #: src/services/gridftpd/dataread.cpp:104 msgid "Closing channel (retrieve)" msgstr "Канал закрывается (получение)" #: src/services/gridftpd/dataread.cpp:110 #: src/services/gridftpd/datawrite.cpp:128 #, c-format msgid "Time spent waiting for network: %.3f ms" msgstr "Время, проведённое в ожидании связи: %.3f мс" #: src/services/gridftpd/dataread.cpp:111 #: src/services/gridftpd/datawrite.cpp:129 #, c-format msgid "Time spent waiting for disc: %.3f ms" msgstr "Время, проведённое в ожидании диска: %.3f мс" #: src/services/gridftpd/dataread.cpp:122 msgid "data_retrieve_callback: lost buffer" msgstr "data_retrieve_callback: буфер потерян" #: src/services/gridftpd/dataread.cpp:158 #, c-format msgid "Closing channel (retrieve) due to local read error: %s" msgstr "Закрывается канал (загрузки) в связи с локальной ошибкой: %s" #: src/services/gridftpd/datawrite.cpp:24 msgid "data_connect_store_callback" msgstr "data_connect_store_callback" #: src/services/gridftpd/datawrite.cpp:30 msgid "Data channel connected (store)" msgstr "Канал передачи данных подсоединён (запись)" #: src/services/gridftpd/datawrite.cpp:57 msgid "Failed to register any buffer" msgstr "Не удалось зарегистрировать ни одного буфера" #: src/services/gridftpd/datawrite.cpp:76 #, c-format msgid "Data channel (store) %i %i %i" msgstr "Канал передачи данных (запись) %i %i %i" #: src/services/gridftpd/datawrite.cpp:89 msgid "data_store_callback: lost buffer" msgstr "data_store_callback: буфер потерян" #: src/services/gridftpd/datawrite.cpp:105 #, c-format msgid "Closing channel (store) due to error: %s" msgstr "Прерывание канала (запись) в связи с ошибкой: %s" #: src/services/gridftpd/datawrite.cpp:115 msgid "Closing channel (store)" msgstr "Закрывается канал (запись)" #: src/services/gridftpd/fileplugin/fileplugin.cpp:55 msgid "Can't parse access rights in configuration line" msgstr "Не удалось разобрать права доступа в строке настроек" #: src/services/gridftpd/fileplugin/fileplugin.cpp:61 msgid "Can't parse user:group in configuration line" msgstr "Не удалось разобрать user:group в строке настроек" #: src/services/gridftpd/fileplugin/fileplugin.cpp:68 msgid "Can't recognize user in configuration line" msgstr "Не удалось определить пользователя в строке настроек" #: src/services/gridftpd/fileplugin/fileplugin.cpp:77 msgid "Can't recognize group in configuration line" msgstr "Не удалось определить группу в строке настроек" #: src/services/gridftpd/fileplugin/fileplugin.cpp:84 #: src/services/gridftpd/fileplugin/fileplugin.cpp:89 msgid "Can't parse or:and in configuration line" msgstr "Не удалось разобрать or:and в строке настроек" #: src/services/gridftpd/fileplugin/fileplugin.cpp:116 msgid "Can't parse configuration line" msgstr "Не удалось разобрать строку настроек" #: src/services/gridftpd/fileplugin/fileplugin.cpp:120 #, c-format msgid "Bad directory name: %s" msgstr "Неверное имя каталога: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:137 msgid "Can't parse create arguments in configuration line" msgstr "Не удалось обработать аргументы create в файле конфигурации" #: src/services/gridftpd/fileplugin/fileplugin.cpp:146 msgid "Can't parse mkdir arguments in configuration line" msgstr "Не удалось обработать аргументы mkdir в файле конфигурации" #: src/services/gridftpd/fileplugin/fileplugin.cpp:163 #, c-format msgid "Bad subcommand in configuration line: %s" msgstr "Неверная инструкция в строке настроек: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:175 msgid "Bad mount directory specified" msgstr "Указан неподходящий каталог для монтирования" #: src/services/gridftpd/fileplugin/fileplugin.cpp:177 #, c-format msgid "Mount point %s" msgstr "Точка подключения %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:215 #: src/services/gridftpd/fileplugin/fileplugin.cpp:273 #, c-format msgid "mkdir failed: %s" msgstr "сбой mkdir: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:226 #, c-format msgid "Warning: mount point %s creation failed." msgstr "Предупреждение: не удалось создать точку подключения %s." #: src/services/gridftpd/fileplugin/fileplugin.cpp:329 #, c-format msgid "plugin: open: %s" msgstr "подключаемый модуль: открытие: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:377 #: src/services/gridftpd/fileplugin/fileplugin.cpp:414 msgid "Not enough space to store file" msgstr "Недостаточно свободного места для записи файла" #: src/services/gridftpd/fileplugin/fileplugin.cpp:428 #, c-format msgid "open: changing owner for %s, %i, %i" msgstr "открытие: смена владельца для %s, %i, %i" #: src/services/gridftpd/fileplugin/fileplugin.cpp:435 #, c-format msgid "open: owner: %i %i" msgstr "открытие: владелец: %i %i" #: src/services/gridftpd/fileplugin/fileplugin.cpp:444 #: src/services/gridftpd/fileplugin/fileplugin.cpp:484 #, c-format msgid "Unknown open mode %s" msgstr "Неизвестный режим открытия %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:449 msgid "plugin: close" msgstr "подключаемый модуль: закрытие" #: src/services/gridftpd/fileplugin/fileplugin.cpp:490 msgid "plugin: read" msgstr "подключаемый модуль: чтение" #: src/services/gridftpd/fileplugin/fileplugin.cpp:496 msgid "Error while reading file" msgstr "Ошибка чтения файла%1" #: src/services/gridftpd/fileplugin/fileplugin.cpp:506 msgid "plugin: write" msgstr "подключаемый модуль: запись" #: src/services/gridftpd/fileplugin/fileplugin.cpp:517 msgid "Zero bytes written to file" msgstr "В файл записано ноль байтов" #: src/services/gridftpd/fileplugin/fileplugin.cpp:725 #, c-format msgid "plugin: checkdir: %s" msgstr "подключаемый модуль: проверка каталога: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:728 #, c-format msgid "plugin: checkdir: access: %s" msgstr "подключаемый модуль: проверка каталога: доступ: %s" #: src/services/gridftpd/fileplugin/fileplugin.cpp:737 #, c-format msgid "plugin: checkdir: access: allowed: %s" msgstr "подключаемый модуль: проверка каталога: доступ: открыт: %s" #: src/services/gridftpd/fileroot.cpp:14 #, c-format msgid "No plugin is configured or authorised for requested path %s" msgstr "" "Отсутствуют настроенные или допущенные расширения по заданному адресу %s" #: src/services/gridftpd/fileroot.cpp:19 msgid "FilePlugin: more unload than load" msgstr "FilePlugin: разгрузок больше, чем загрузок" #: src/services/gridftpd/fileroot.cpp:34 #, c-format msgid "Can't load plugin %s for access point %s" msgstr "Невозможно загрузить подключаемый модуль %s для точки доступа %s" #: src/services/gridftpd/fileroot.cpp:39 src/services/gridftpd/fileroot.cpp:43 #, c-format msgid "Plugin %s for access point %s is broken." msgstr "Расширение %s для точки доступа %s неисправно." #: src/services/gridftpd/fileroot.cpp:47 #, c-format msgid "Plugin %s for access point %s acquire failed (should never happen)." msgstr "" "Расширение %s для точки доступа %s недоступно (никогда не должно случаться)." #: src/services/gridftpd/fileroot.cpp:54 #, c-format msgid "Destructor with dlclose (%s)" msgstr "Деструктор с dlclose (%s)" #: src/services/gridftpd/fileroot.cpp:77 #, c-format msgid "FileNode: operator= (%s <- %s) %lu <- %lu" msgstr "FileNode: operator= (%s <- %s) %lu <- %lu" #: src/services/gridftpd/fileroot.cpp:79 msgid "Copying with dlclose" msgstr "Копирование с dlclose" #: src/services/gridftpd/fileroot_config.cpp:32 #: src/services/gridftpd/fileroot_config.cpp:596 msgid "configuration file not found" msgstr "файл настроек не найден" #: src/services/gridftpd/fileroot_config.cpp:51 msgid "Wrong port number in configuration" msgstr "Неприемлемый номер порта в настройках" #: src/services/gridftpd/fileroot_config.cpp:60 msgid "Wrong maxconnections number in configuration" msgstr "Неприемлемое значение maxconnections в настройках" #: src/services/gridftpd/fileroot_config.cpp:69 msgid "Wrong defaultbuffer number in configuration" msgstr "Неприемлемое значение defaultbuffer в настройках" #: src/services/gridftpd/fileroot_config.cpp:78 msgid "Wrong maxbuffer number in configuration" msgstr "Неприемлемое значение maxbuffer в настройках" #: src/services/gridftpd/fileroot_config.cpp:110 #: src/services/gridftpd/fileroot_config.cpp:118 #, c-format msgid "Can't resolve host %s" msgstr "Не удалось найти сервер %s" #: src/services/gridftpd/fileroot_config.cpp:152 #: src/services/gridftpd/fileroot_config.cpp:455 #, c-format msgid "couldn't open file %s" msgstr "не удалось открыть файл %s" #: src/services/gridftpd/fileroot_config.cpp:167 #: src/services/gridftpd/fileroot_config.cpp:183 #: src/services/gridftpd/fileroot_config.cpp:469 #, c-format msgid "improper attribute for encryption command: %s" msgstr "недопустимый атрибут команды шифрования: %s" #: src/services/gridftpd/fileroot_config.cpp:177 #: src/services/gridftpd/fileroot_config.cpp:479 #: src/services/gridftpd/fileroot_config.cpp:622 msgid "unknown (non-gridmap) user is not allowed" msgstr "неизвестный (не занесённый в gridmap) пользователь не допускается" #: src/services/gridftpd/fileroot_config.cpp:207 #: src/services/gridftpd/fileroot_config.cpp:547 #, c-format msgid "Failed processing authorization group %s" msgstr "Не удалось обработать группу допуска %s" #: src/services/gridftpd/fileroot_config.cpp:216 msgid "couldn't process VO configuration" msgstr "не удалось обработать настройки ВО" #: src/services/gridftpd/fileroot_config.cpp:223 #: src/services/gridftpd/fileroot_config.cpp:231 #: src/services/gridftpd/fileroot_config.cpp:239 #: src/services/gridftpd/fileroot_config.cpp:500 #: src/services/gridftpd/fileroot_config.cpp:508 #: src/services/gridftpd/fileroot_config.cpp:516 #, c-format msgid "failed while processing configuration command: %s %s" msgstr "сбой при обработке команды настройки: %s %s" #: src/services/gridftpd/fileroot_config.cpp:281 #, c-format msgid "can't parse configuration line: %s %s %s %s" msgstr "невозможно разобрать строку настроек: %s %s %s %s" #: src/services/gridftpd/fileroot_config.cpp:286 #, c-format msgid "bad directory in plugin command: %s" msgstr "неверный каталог в команде подключаемого модуля: %s" #: src/services/gridftpd/fileroot_config.cpp:298 #: src/services/gridftpd/fileroot_config.cpp:405 #, c-format msgid "Already have directory: %s" msgstr "Каталог %s уже существует" #: src/services/gridftpd/fileroot_config.cpp:307 #: src/services/gridftpd/fileroot_config.cpp:408 #, c-format msgid "Registering directory: %s with plugin: %s" msgstr "Регистрируется каталог: %s с подключаемым модулем: %s" #: src/services/gridftpd/fileroot_config.cpp:312 #: src/services/gridftpd/fileroot_config.cpp:421 #, c-format msgid "file node creation failed: %s" msgstr "сбой создания узла файла: %s" #: src/services/gridftpd/fileroot_config.cpp:330 #, c-format msgid "improper attribute for allowactvedata command: %s" msgstr "недопустимый атрибут команды allowactivedata: %s" #: src/services/gridftpd/fileroot_config.cpp:335 #, c-format msgid "unsupported configuration command: %s" msgstr "неподдерживаемая инструкция настроек: %s" #: src/services/gridftpd/fileroot_config.cpp:359 msgid "Could not determine hostname from gethostname()" msgstr "Невозможно определить имя узла используя gethostname()" #: src/services/gridftpd/fileroot_config.cpp:375 msgid "unnamed group" msgstr "группа без имени" #: src/services/gridftpd/fileroot_config.cpp:384 msgid "undefined plugin" msgstr "подключаемый модуль не определён" #: src/services/gridftpd/fileroot_config.cpp:388 msgid "undefined virtual plugin path" msgstr "не задан путь к виртуальному расширению" #: src/services/gridftpd/fileroot_config.cpp:393 #, c-format msgid "bad directory for plugin: %s" msgstr "неверный каталог для подключаемого модуля: %s" #: src/services/gridftpd/fileroot_config.cpp:485 #, c-format msgid "improper attribute for allowunknown command: %s" msgstr "недопустимый атрибут команды allowunknown: %s" #: src/services/gridftpd/fileroot_config.cpp:607 msgid "failed to process client identification" msgstr "Не удалось обработать личные данные клиента" #: src/services/gridftpd/fileroot_config.cpp:641 #, c-format msgid "Registering dummy directory: %s" msgstr "Регистрируется вспомогательный каталог: %s" #: src/services/gridftpd/listener.cpp:57 #: src/services/gridftpd/listener.cpp:466 msgid "Activation failed" msgstr "Ошибка активации" #: src/services/gridftpd/listener.cpp:66 #: src/services/gridftpd/listener.cpp:172 msgid "Child exited" msgstr "Потомок завершил работу" #: src/services/gridftpd/listener.cpp:78 msgid "Globus connection error" msgstr "Ошибка соединения Globus" #: src/services/gridftpd/listener.cpp:80 #: src/services/gridftpd/listener.cpp:424 msgid "New connection" msgstr "Новое соединение" #: src/services/gridftpd/listener.cpp:87 msgid "Server stopped" msgstr "Сервер остановлен" #: src/services/gridftpd/listener.cpp:157 msgid "Error: failed to set handler for SIGTERM" msgstr "Ошибка: не удалось установить обработчик SIGTERM" #: src/services/gridftpd/listener.cpp:161 msgid "Starting controlled process" msgstr "Запускается контролируемый процесс" #: src/services/gridftpd/listener.cpp:164 msgid "fork failed" msgstr "ошибка при выполнении системного вызова fork" #: src/services/gridftpd/listener.cpp:169 msgid "wait failed - killing child" msgstr "ошибка ожидания - прерывание процесса-потомка" #: src/services/gridftpd/listener.cpp:174 msgid "Killed with signal: " msgstr "Прерван сигналом: " #: src/services/gridftpd/listener.cpp:176 msgid "Restarting after segmentation violation." msgstr "Перезапуск после нарушения сегментации" #: src/services/gridftpd/listener.cpp:177 msgid "Waiting 1 minute" msgstr "Ожидание: 1 минута" #: src/services/gridftpd/listener.cpp:239 msgid "Error: failed to set handler for SIGCHLD" msgstr "Ошибка: не удалось установить обработчик SIGCHLD" #: src/services/gridftpd/listener.cpp:256 msgid "Missing argument" msgstr "Отсутствует аргумент" #: src/services/gridftpd/listener.cpp:257 msgid "Unknown option" msgstr "Неизвестный параметр" #: src/services/gridftpd/listener.cpp:264 msgid "Wrong port number" msgstr "Недопустимый номер порта" #: src/services/gridftpd/listener.cpp:274 msgid "Wrong number of connections" msgstr "Недопустимое количество подключений" #: src/services/gridftpd/listener.cpp:281 msgid "Wrong buffer size" msgstr "Недопустимый размер буфера" #: src/services/gridftpd/listener.cpp:288 msgid "Wrong maximal buffer size" msgstr "Недопустимый максимальный размер буфера" #: src/services/gridftpd/listener.cpp:300 msgid "Failed reading configuration" msgstr "Сбой чтения файла настроек" #: src/services/gridftpd/listener.cpp:331 #, c-format msgid "Failed to obtain local address: %s" msgstr "Не удалось получить локальный адрес: %s" #: src/services/gridftpd/listener.cpp:338 #, c-format msgid "Failed to create socket(%s): %s" msgstr "Не удалось создать сокет (%s): %s" #: src/services/gridftpd/listener.cpp:352 #, c-format msgid "Failed to limit socket to IPv6: %s" msgstr "Не удалось ограничить сокет до IPv6: %s" #: src/services/gridftpd/listener.cpp:359 #, c-format msgid "Failed to bind socket(%s): %s" msgstr "Не удалось связать сокет (%s): %s" #: src/services/gridftpd/listener.cpp:364 #, c-format msgid "Failed to listen on socket(%s): %s" msgstr "Не удалось прослушать сокет (%s): %s" #: src/services/gridftpd/listener.cpp:371 msgid "Not listening to anything" msgstr "Ничего не прослушивается" #: src/services/gridftpd/listener.cpp:374 #, c-format msgid "Some addresses failed. Listening on %u of %u." msgstr "Некоторые адреса недоступны. Прослушивается %u из %u." #: src/services/gridftpd/listener.cpp:382 #: src/services/gridftpd/listener.cpp:477 msgid "Listen started" msgstr "Прослушивание началось" #: src/services/gridftpd/listener.cpp:395 msgid "No valid handles left for listening" msgstr "Не осталось допустимых дескрипторов для прослушивания" #: src/services/gridftpd/listener.cpp:401 #, c-format msgid "Select failed: %s" msgstr "Выбор не удался: %s" #: src/services/gridftpd/listener.cpp:422 #, c-format msgid "Have connections: %i, max: %i" msgstr "Существующих соединений: %i, максимально: %i" #: src/services/gridftpd/listener.cpp:427 #, c-format msgid "Fork failed: %s" msgstr "Ошибка создания дочернего процеса: %s" #: src/services/gridftpd/listener.cpp:445 msgid "Refusing connection: Connection limit exceeded" msgstr "Отказано в соединении: Превышен предел соединений" #: src/services/gridftpd/listener.cpp:471 msgid "Init failed" msgstr "Сбой инициализации" #: src/services/gridftpd/listener.cpp:474 msgid "Listen failed" msgstr "Сбой прослушивания" #: src/services/gridftpd/listener.cpp:488 msgid "Listen finished" msgstr "Прослушивание завершено" #: src/services/gridftpd/listener.cpp:493 msgid "Stopping server" msgstr "Останавливается сервер" #: src/services/gridftpd/listener.cpp:497 msgid "Destroying handle" msgstr "Описатель уничтожается" #: src/services/gridftpd/listener.cpp:500 msgid "Deactivating modules" msgstr "Выгрузка модулей" #: src/services/gridftpd/listener.cpp:508 msgid "Exiting" msgstr "Завершается" #: src/services/gridftpd/misc/ldapquery.cpp:253 #, c-format msgid "%s: %s:%i" msgstr "%s: %s:%i" #: src/services/gridftpd/misc/ldapquery.cpp:390 #: src/services/gridftpd/misc/ldapquery.cpp:468 #, c-format msgid "%s %s" msgstr "%s %s" #: src/services/gridftpd/misc/ldapquery.cpp:394 #, c-format msgid " %s: %s" msgstr " %s: %s" #: src/services/gridftpd/misc/ldapquery.cpp:396 #, c-format msgid " %s:" msgstr " %s:" #: src/services/gridftpd/userspec.cpp:48 #, c-format msgid "Mapfile is missing at %s" msgstr "Файл приписки пользователей отсутствует в %s" #: src/services/gridftpd/userspec.cpp:89 #: src/services/gridftpd/userspec.cpp:215 msgid "There is no local mapping for user" msgstr "Пользователь не приписан ни к одному локальному имени" #: src/services/gridftpd/userspec.cpp:92 #: src/services/gridftpd/userspec.cpp:219 msgid "There is no local name for user" msgstr "Локальное имя приписки пользователя не указано" #: src/services/gridftpd/userspec.cpp:142 #: src/services/gridftpd/userspec.cpp:233 msgid "No proxy provided" msgstr "Отсутствует доверенность" #: src/services/gridftpd/userspec.cpp:144 #, c-format msgid "Proxy/credentials stored at %s" msgstr "Доверенность/параметры доступа сохранены в %s" #: src/services/gridftpd/userspec.cpp:147 #: src/services/gridftpd/userspec.cpp:238 #, c-format msgid "Initially mapped to local user: %s" msgstr "Начальная приписка к локальному имени пользователя: %s" #: src/services/gridftpd/userspec.cpp:150 #: src/services/gridftpd/userspec.cpp:340 #, c-format msgid "Local user %s does not exist" msgstr "Локальный пользователь %s не существует" #: src/services/gridftpd/userspec.cpp:155 #: src/services/gridftpd/userspec.cpp:246 #, c-format msgid "Initially mapped to local group: %s" msgstr "Предварительная привязка к локальной группе: %s" #: src/services/gridftpd/userspec.cpp:158 #: src/services/gridftpd/userspec.cpp:249 #: src/services/gridftpd/userspec.cpp:349 #, c-format msgid "Local group %s does not exist" msgstr "Локальная группа %s не существует" #: src/services/gridftpd/userspec.cpp:167 #: src/services/gridftpd/userspec.cpp:258 msgid "Running user has no name" msgstr "Текущий пользователь не имеет имени" #: src/services/gridftpd/userspec.cpp:170 #: src/services/gridftpd/userspec.cpp:261 #, c-format msgid "Mapped to running user: %s" msgstr "Привязка к текущему пользователю: %s" #: src/services/gridftpd/userspec.cpp:180 #: src/services/gridftpd/userspec.cpp:271 #, c-format msgid "Mapped to local id: %i" msgstr "Привязка к локальному идентификатору: %i" #: src/services/gridftpd/userspec.cpp:185 #: src/services/gridftpd/userspec.cpp:276 #, c-format msgid "No group %i for mapped user" msgstr "Группа %i для привязанного пользователя отсутствует" #: src/services/gridftpd/userspec.cpp:194 #: src/services/gridftpd/userspec.cpp:285 #, c-format msgid "Mapped to local group id: %i" msgstr "Привязка к локальной группе с идентификатором: %i" #: src/services/gridftpd/userspec.cpp:195 #: src/services/gridftpd/userspec.cpp:286 #, c-format msgid "Mapped to local group name: %s" msgstr "Привязка к локальной группе с именем: %s" #: src/services/gridftpd/userspec.cpp:196 #: src/services/gridftpd/userspec.cpp:287 #, c-format msgid "Mapped user's home: %s" msgstr "Домашний каталог привязанного пользователя: %s" #: src/services/gridftpd/userspec.cpp:235 #, c-format msgid "Proxy stored at %s" msgstr "Доверенность записана в %s" #: src/services/gridftpd/userspec.cpp:241 msgid "Local user does not exist" msgstr "Локальное имя пользователя не существует" #: src/services/gridftpd/userspec.cpp:317 #, c-format msgid "Undefined control sequence: %%%s" msgstr "Неверная управляющая последовательность: %%%s" #: src/services/gridftpd/userspec.cpp:354 #, c-format msgid "Remapped to local user: %s" msgstr "Перепривязка к локальному пользователю: %s" #: src/services/gridftpd/userspec.cpp:355 #, c-format msgid "Remapped to local id: %i" msgstr "Перепривязка к локальному идентификатору: %i" #: src/services/gridftpd/userspec.cpp:356 #, c-format msgid "Remapped to local group id: %i" msgstr "Перепривязка к локальной группе с идентификатором: %i" #: src/services/gridftpd/userspec.cpp:357 #, c-format msgid "Remapped to local group name: %s" msgstr "Перепривязка к локальной группе с именем: %s" #: src/services/gridftpd/userspec.cpp:358 #, c-format msgid "Remapped user's home: %s" msgstr "Домашний каталог перепривязанного пользователя: %s" #: src/services/wrappers/java/javawrapper.cpp:33 #, c-format msgid "config: %s, class name: %s" msgstr "настройки: %s, класс: %s" #: src/services/wrappers/java/javawrapper.cpp:42 msgid "libjvm.so not loadable - check your LD_LIBRARY_PATH" msgstr "libjvm.so не можетр быть подгружена - проверьте LD_LIBRARY_PATH" #: src/services/wrappers/java/javawrapper.cpp:52 msgid "libjvm.so does not contain the expected symbols" msgstr "libjvm.so не содержит ожидаемых символов" #: src/services/wrappers/java/javawrapper.cpp:66 msgid "JVM started" msgstr "Запущена JVM" #: src/services/wrappers/java/javawrapper.cpp:71 #, c-format msgid "There is no service: %s in your Java class search path" msgstr "Отсутствие услуги %s в пути поиска классов Java" #: src/services/wrappers/java/javawrapper.cpp:79 msgid "There is no constructor function" msgstr "Отсутствует конструктор" #: src/services/wrappers/java/javawrapper.cpp:86 #, c-format msgid "%s constructed" msgstr "%s создан" #: src/services/wrappers/java/javawrapper.cpp:90 msgid "Destroy JVM" msgstr "Уничтожение JVM" #: src/services/wrappers/java/javawrapper.cpp:183 msgid "Cannot find MCC_Status object" msgstr "Не удалось обнаружить объект MCC_Status" #: src/services/wrappers/java/javawrapper.cpp:197 msgid "Java object returned NULL status" msgstr "Объект Java возвратил статус NULL" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Loading %u-th Python service" msgstr "Загружается %u-я служба Python" #: src/services/wrappers/python/pythonwrapper.cpp:111 #, c-format msgid "Initialized %u-th Python service" msgstr "Запущена %u-я служба Python" #: src/services/wrappers/python/pythonwrapper.cpp:147 msgid "Invalid class name" msgstr "Неверное название класса" #: src/services/wrappers/python/pythonwrapper.cpp:152 #, c-format msgid "class name: %s" msgstr "название класса: %s" #: src/services/wrappers/python/pythonwrapper.cpp:153 #, c-format msgid "module name: %s" msgstr "название модуля: %s" #: src/services/wrappers/python/pythonwrapper.cpp:210 msgid "Cannot find ARC Config class" msgstr "Не удалось обнаружить класс ARC Config" #: src/services/wrappers/python/pythonwrapper.cpp:217 msgid "Config class is not an object" msgstr "Класс Config не является объектом" #: src/services/wrappers/python/pythonwrapper.cpp:225 msgid "Cannot get dictionary of module" msgstr "Ошибка доступа к словарю модуля" #: src/services/wrappers/python/pythonwrapper.cpp:234 msgid "Cannot find service class" msgstr "Не удалось найти класс сервиса" #: src/services/wrappers/python/pythonwrapper.cpp:243 msgid "Cannot create config argument" msgstr "Не удалось создать аргумент настроек" #: src/services/wrappers/python/pythonwrapper.cpp:250 msgid "Cannot convert config to Python object" msgstr "Не удалось преобразовать настройки в объект Python" #: src/services/wrappers/python/pythonwrapper.cpp:273 #, c-format msgid "%s is not an object" msgstr "%s не является объектом" #: src/services/wrappers/python/pythonwrapper.cpp:279 msgid "Message class is not an object" msgstr "Класс Message не является объектом" #: src/services/wrappers/python/pythonwrapper.cpp:287 msgid "Python Wrapper constructor succeeded" msgstr "Конструктор надстройки Python отработал успешно" #: src/services/wrappers/python/pythonwrapper.cpp:303 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "Деструктор оболочки Python (%d)" #: src/services/wrappers/python/pythonwrapper.cpp:336 msgid "Python interpreter locked" msgstr "Интерпретатор Python заблокирован" #: src/services/wrappers/python/pythonwrapper.cpp:340 msgid "Python interpreter released" msgstr "Интерпретатор Python разблокирован" #: src/services/wrappers/python/pythonwrapper.cpp:400 msgid "Python wrapper process called" msgstr "Вызван процесс Python wrapper" #: src/services/wrappers/python/pythonwrapper.cpp:409 msgid "Failed to create input SOAP container" msgstr "Не удалось создать входной контейнер SOAP" #: src/services/wrappers/python/pythonwrapper.cpp:419 msgid "Cannot create inmsg argument" msgstr "Не удалось создать аргумент inmsg" #: src/services/wrappers/python/pythonwrapper.cpp:433 msgid "Cannot find ARC Message class" msgstr "Не удалось обнаружить класс ARC Message" #: src/services/wrappers/python/pythonwrapper.cpp:439 msgid "Cannot convert inmsg to Python object" msgstr "Не удалось преобразовать inmsg в объект Python" #: src/services/wrappers/python/pythonwrapper.cpp:448 msgid "Failed to create SOAP containers" msgstr "Не удалось создать контейеры SOAP" #: src/services/wrappers/python/pythonwrapper.cpp:454 msgid "Cannot create outmsg argument" msgstr "Не удалось создать аргумент outmsg" #: src/services/wrappers/python/pythonwrapper.cpp:460 msgid "Cannot convert outmsg to Python object" msgstr "Не удалось преобразовать outmsg в объект Python" #: src/services/wrappers/python/pythonwrapper.cpp:516 msgid "Failed to create XMLNode container" msgstr "Не удалось создать контейнер XMLNode" #: src/services/wrappers/python/pythonwrapper.cpp:533 msgid "Cannot find ARC XMLNode class" msgstr "Класс ARC XMLNode не найден" #: src/services/wrappers/python/pythonwrapper.cpp:539 msgid "Cannot create doc argument" msgstr "Не удалось создать аргумент документации" #: src/services/wrappers/python/pythonwrapper.cpp:545 msgid "Cannot convert doc to Python object" msgstr "Не удалось преобразовать doc в объект Python" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:9 msgid "Creating a soap client" msgstr "Создаётся клиент SOAP" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 msgid "SOAP invokation failed" msgstr "Не удалось инициализировать SOAP" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "Создаётся клиент HTTP" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 msgid "HTTP with SAML2SSO invokation failed" msgstr "Активизация HTTP с SAML2SSO не выполнена" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "Нет ответа HTTP" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 msgid "SOAP with SAML2SSO invokation failed" msgstr "Активизация SOAP с SAML2SSO не выполнена" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:76 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "Создание клиента SOAP для делегации" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:51 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "Сбой делегирования службе делегирования ARC " #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:56 #: src/tests/delegation/test_delegation_client.cpp:88 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "ID делегирования: %s" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service:%s" msgstr "" "Не удалось получить делегированные параметры доступа %s от службы " "делегирования:%s" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "Делегированные параметры доступа от службы делегации: %s" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:83 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "Сбой делегирования службе делегирования Gridsite" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "Ввод не в формате SOAP" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "echo: Доступ закрыт" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "Запрос не поддерживается - %s" #: src/tests/count/test_service.cpp:33 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "Сервис в ожидании запросов" #: src/tests/echo/test.cpp:32 msgid "Creating client interface" msgstr "Создаётся интерфейс клиента" #: src/tests/echo/test_clientinterface.py:27 msgid "SOAP invocation failed" msgstr "Не удалась активизация SOAP" #~ msgid "Using non-RFC proxy so only local delivery can be used" #~ msgstr "" #~ "Используется несовместимая с RFC доверенность, поэтому возможна только " #~ "локальная доставка" #~ msgid "Using non-RFC proxy so forcing local delivery" #~ msgstr "" #~ "Используется несовместимая с RFC доверенность, вынужденно используется " #~ "локальная доставка" #~ msgid "Can not get extension from issuer certificate" #~ msgstr "Невозможно извлечь расширение из сертификата агентства" #~ msgid "use GSI proxy (RFC 3820 compliant proxy is default)" #~ msgstr "" #~ "использовать доверенность GSI (по умолчанию используется\n" #~ " RFC 3820-совместимая доверенность)" #~ msgid "Can not set the STORE_CTX for chain verification" #~ msgstr "Не удалось задать STORE_CTX для подтверждения цепи" #~ msgid "X509_V_ERR_PATH_LENGTH_EXCEEDED" #~ msgstr "X509_V_ERR_PATH_LENGTH_EXCEEDED" #~ msgid "X509_V_ERR_PATH_LENGTH_EXCEEDED --- with proxy" #~ msgstr "X509_V_ERR_PATH_LENGTH_EXCEEDED --- с доверенностью" #~ msgid "X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION" #~ msgstr "X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION" #~ msgid "" #~ "The proxy to be signed should be compatible with the signing certificate: " #~ "(%s) -> (%s)" #~ msgstr "" #~ "Подписываемая доверенность должна быть совместима с подписывающим " #~ "сертификатом: (%s) -> (%s)" #~ msgid "The proxy depth %i is out of maximum limit %i" #~ msgstr "Глубина доверенности %i превышает предел %i" #~ msgid "proxy_depth: %i, path_length: %i" #~ msgstr "proxy_depth: %i, path_length: %i" #~ msgid "" #~ "Can not convert DER encoded PROXYCERTINFO extension to internal format" #~ msgstr "" #~ "Невозможно преобразовать расширение PROXYCERTINFO в кодировке DER во " #~ "внутренний формат" #~ msgid "Found more than one PCI extension" #~ msgstr "Обнаружено более одного расширения PCI" #~ msgid "" #~ "Globus legacy proxies can not carry policy data or path length constraints" #~ msgstr "" #~ "Устаревшие доверенности Globus не могут содержать данные о политиках или " #~ "ограничения по длине пути" #~ msgid "RSA_generate_key failed" #~ msgstr "Сбой метода RSA_generate_key" #~ msgid "Can not get X509V3_EXT_METHOD for %s" #~ msgstr "Невозможно извлечь X509V3_EXT_METHOD для %s" #~ msgid "Can't get X509V3_EXT_METHOD for %s" #~ msgstr "Невозможно извлечь X509V3_EXT_METHOD для %s" #~ msgid "Failed to add extension into proxy" #~ msgstr "Сбой добавления расширения в доверенность" #~ msgid "" #~ "The signing algorithm %s is not allowed,it should be SHA1/SHA2 to sign " #~ "certificate requests" #~ msgstr "" #~ "Недопустимый алгоритм подписи %s: запросы сертификата должны " #~ "подписываться SHA1 или SHA2" #~ msgid "Failed to add extension into EEC certificate" #~ msgstr "Сбой добавления расширения в сертификат EEC" #~ msgid "EMIES:WipeActivity: job %s - state is %s, not terminal" #~ msgstr "EMIES:WipeActivity: задача %s - состояние %s, не конечное" #~ msgid "" #~ "Resource information provider timed out: %u seconds. Using heartbeat file " #~ "from now on... Consider increasing infoproviders_timeout in arc.conf" #~ msgstr "" #~ "Истекло время на сбор информации о ресурсе: %u секунд. Проверяется " #~ "контрольный файл... Попробуйте увеличить значение infoproviders_timeout в " #~ "arc.conf" #~ msgid "" #~ "Resource information provider timed out: %u seconds. Checking heartbeat " #~ "file..." #~ msgstr "" #~ "Истекло время на сбор информации о ресурсе: %u секунд. Проверяется " #~ "контрольный файл..." #~ msgid "" #~ "Cannot stat %s. Are infoproviders running? This message will not be " #~ "repeated." #~ msgstr "" #~ "Невозможно проверить %s. Запущены ли сборщики информации? Это сообщение " #~ "не будет больше повторяться." #~ msgid "" #~ "Cannot stat %s. Are infoproviders running? It happened already %d times." #~ msgstr "" #~ "Невозможно проверить %s. Запущены ли сборщики информации? Это уже %d-й " #~ "раз." #~ msgid "" #~ "Checked time: %d | Heartbeat file stat: %d | %s has not beed touched " #~ "before timeout (%d). \n" #~ " The performance is too low, infoproviders will be killed. A-REX " #~ "functionality is not ensured." #~ msgstr "" #~ "Время проверки: %d | Последнее контрольное обновление: %d | %s не " #~ "обновился в срок (%d). \n" #~ " Низкая производительность, сборщики информации будут остановлены. " #~ "Работоспособность A-REX под угрозой." #~ msgid "Found recent heartbeat file %s , waiting other %d seconds" #~ msgstr "" #~ "Обнаружен недавно обновлённый контрольный файл %s , ожидание ещё %d секунд" #~ msgid "Submit: Failed to disconnect after submission" #~ msgstr "Засылка: Сбой отсоединения после засылки" #~ msgid "Unable to copy %s: No valid credentials found" #~ msgstr "" #~ "Невозможно скопировать %s: Не обнаружено действительных параметров доступа" #~ msgid "Unable to list content of %s: No valid credentials found" #~ msgstr "" #~ "Невозможно просмотреть содержимое %s: Не обнаружено действительных " #~ "параметров доступа" #~ msgid "Unable to create directory %s: No valid credentials found" #~ msgstr "" #~ "Невозможно создать директорию %s: Не обнаружено действительных параметров " #~ "доступа" #~ msgid "Unable to rename %s: No valid credentials found" #~ msgstr "" #~ "Невозможно переименовать %s: Не обнаружено действительных параметров " #~ "доступа" #~ msgid "Unable to remove file %s: No valid credentials found" #~ msgstr "" #~ "Невозможно стереть %s: Не обнаружено действительных параметров доступа" #~ msgid "year" #~ msgid_plural "years" #~ msgstr[0] "год" #~ msgstr[1] "года" #~ msgstr[2] "лет" #~ msgid "month" #~ msgid_plural "months" #~ msgstr[0] "месяц" #~ msgstr[1] "месяца" #~ msgstr[2] "месяцев" #~ msgid "day" #~ msgid_plural "days" #~ msgstr[0] "день" #~ msgstr[1] "дня" #~ msgstr[2] "дней" #~ msgid "arc_to_voms - %u attributes" #~ msgstr "arc_to_voms - %u атрибут(а)" #~ msgid "arc_to_voms: attribute: %s" #~ msgstr "arc_to_voms: атрибут: %s" #~ msgid "%s: Failed switching user" #~ msgstr "%s: Сбой при смене пользователя" #~ msgid "Job could have died due to expired proxy: restarting" #~ msgstr "" #~ "Возможно, исполнение задачи прервалось из-за просроченной доверенности: " #~ "перезапуск" #~ msgid "Failed to report renewed proxy to job" #~ msgstr "Не удалось сообщить задаче о новой доверенности" #~ msgid "" #~ "Proxy certificate path was not explicitly set or does not exist or has\n" #~ "improper permissions/ownership and not found at default location.\n" #~ "Key/certificate paths were not explicitly set or do not exist or have\n" #~ "improper permissions/ownership and usercert.pem/userkey.pem not found\n" #~ "at default locations:\n" #~ "~/.arc/, ~/.globus/, %s/etc/arc, and ./.\n" #~ "If the proxy or certificate/key does exist, please manually specify the " #~ "locations via env\n" #~ "X509_USER_CERT/X509_USER_KEY or X509_USER_PROXY, or the certificatepath/" #~ "keypath or proxypath\n" #~ "item in client.conf\n" #~ "If the certificate/key does exist, and proxy is needed to be generated, " #~ "please\n" #~ "use arcproxy utility to create a proxy certificate." #~ msgstr "" #~ "Местонахождение доверенности не задано явно, либо не существует,\n" #~ "либо у Вас недостаточные привилегии, а в стандартном месте её нет.\n" #~ "Местонахождения закрытого/открытого ключей не заданы явно, либо их нет,\n" #~ "либо у Вас недостаточные привилегии, а файлов usercert.pem/userkey.pem " #~ "нет\n" #~ "в стандартных местах:\n" #~ "~/.arc/, ~/.globus/, %s/etc/arc, и ./.\n" #~ "Если у Вас есть эти файлы, пожалуйста, укажите вручную их расположения с " #~ "помощью\n" #~ "переменных X509_USER_CERT/X509_USER_KEY и/или X509_USER_PROXY, либо задав " #~ "значения\n" #~ "certificatepath/keypath or proxypath в файле настроек клиента client." #~ "conf\n" #~ "Если у вас есть ключи, но нет доверенности, используйте средство arcproxy " #~ "для её создания." #~ msgid "LDAP authorization is not implemented yet" #~ msgstr "Авторизация для LDAP ещё не реализована" #~ msgid "Match group: %s" #~ msgstr "Совпадение группы: %s" #~ msgid "Match role: %s" #~ msgstr "Совпадение роли: %s" #~ msgid "Match capabilities: %s" #~ msgstr "Совпадение возможности: %s" #~ msgid "Failed writing RSL" #~ msgstr "Сбой записи RSL" #~ msgid "" #~ "Parsing error:\n" #~ "%s" #~ msgstr "" #~ "Ошибка разбора:\n" #~ "%s" #~ msgid "Parsing string using ADLParser" #~ msgstr "Разбор строки с использованием ADLParser" #~ msgid "[ADLParser] Parsing error: %s\n" #~ msgstr "[ADLParser] Ошибка разбора: %s\n" #~ msgid "[ADLParser] Wrong XML structure! " #~ msgstr "[ADLParser] Неверная структура XML! " #~ msgid "Parsing string using ARCJSDLParser" #~ msgstr "Обработка строки с использованием ARCJSDLParser" #~ msgid "[ARCJSDLParser] XML parsing error: %s\n" #~ msgstr "[ARCJSDLParser] Ошибка разбора XML: %s\n" #~ msgid "[ARCJSDLParser] Wrong XML structure! " #~ msgstr "[ARCJSDLParser] Неверная структура XML! " #~ msgid "Parsing string using JDLParser" #~ msgstr "Разбор строки с использованием JDLParser" #~ msgid "" #~ "[JDLParser] There is at least one necessary square bracket missing or " #~ "their order is incorrect. ('[' or ']')" #~ msgstr "" #~ "[JDLParser] По крайней мере одна квадратная скобка отсутствует, или их " #~ "порядок неверен ('[' или ']')." #~ msgid "Can't evaluate left operand for RSL concatenation: %s" #~ msgstr "" #~ "Невозможно определить значение левого операнда для подцепления RSL: %s" #~ msgid "Can't evaluate right operand for RSL concatenation: %s" #~ msgstr "" #~ "Невозможно определить значение правого операнда для подцепления RSL: %s" #~ msgid "Can't evaluate RSL list member: %s" #~ msgstr "Невозможно определить значение элемента списка RSL: %s" #~ msgid "Can't evaluate RSL sequence member: %s" #~ msgstr "Невозможно определить значение члена последовательности RSL: %s" #~ msgid "Unknown RSL value type - should not happen" #~ msgstr "Неизвестный тип значения RSL - не должно случаться" #~ msgid "RSL (inside multi) could not be evaluated: %s" #~ msgstr "RSL (внутри множественного описания) не может быть обработан: %s" #~ msgid "RSL could not be evaluated: %s" #~ msgstr "RSL не может быть обработан: %s" #~ msgid "Can't evaluate RSL fragment: %s" #~ msgstr "Невозможно обработать фрагмент RSL: %s" #~ msgid "Can't evaluate RSL substitution variable name: %s" #~ msgstr "Невозможно определить имя переменной для замены RSL: %s" #~ msgid "Can't evaluate RSL substitution variable value: %s" #~ msgstr "Невозможно определить значение переменной для замены RSL: %s" #~ msgid "Can't evaluate RSL condition value: %s" #~ msgstr "Невозможно определить значение условия RSL: %s" #~ msgid "Unknown RSL type - should not happen" #~ msgstr "Неизвестный тип RSL - это не должно случаться" #~ msgid "RSL parsing failed at position %ld" #~ msgstr "Сбой обработки RSL на позиции %ld" #~ msgid "Expected ) at position %ld" #~ msgstr "Ожидается ) на позиции %ld" #~ msgid "Expected ( at position %ld" #~ msgstr "Ожидается ( на позиции %ld" #~ msgid "Expected variable name at position %ld" #~ msgstr "Ожидается название переменной на позиции %ld" #~ msgid "Broken string at position %ld" #~ msgstr "Повреждённая строка на позиции %ld" #~ msgid "RSL parsing error at position %ld" #~ msgstr "Ошибка обработки RSL на позиции %ld" #~ msgid "Expected attribute name at position %ld" #~ msgstr "Ожидается название атрибута на позиции %ld" #~ msgid "Expected relation operator at position %ld" #~ msgstr "Ожидается оператор сравнения на позиции %ld" #~ msgid "Xrsl attribute join is set but attribute stdout is not set" #~ msgstr "Задан атрибут xRSL join, но атрибут stdout пропущен" #~ msgid "Xrsl attribute join is set but attribute stderr is also set" #~ msgstr "Задан атрибут xRSL join, но также задан атрибут stderr" #~ msgid "Parsing string using XRSLParser" #~ msgstr "Обработка строки с использованием XRSLParser" #~ msgid "XRSL parsing error" #~ msgstr "Ошибка обработки XRSL" #~ msgid "filename cannot be empty." #~ msgstr "Имя файла не может быть пустым." #~ msgid "" #~ "Parsing the queue xrsl attribute failed. An invalid comparison operator " #~ "was used, only \"=\" is allowed." #~ msgstr "" #~ "Ошибка разбора атрибута XRSL queue. Используется недопустимый оператор " #~ "сравнения, допускается только \"=\"." #~ msgid "%d Queues" #~ msgstr "%d Очереди" #~ msgid "Queue Information:" #~ msgstr "Сведения об очереди:" #~ msgid "Failed to set GFAL2 user data object: %s" #~ msgstr "Сбой установки объекта данных пользователя GFAL2: %s" #~ msgid "" #~ "Localtransfer is deprecated, but turned on in arc.conf. Job will be " #~ "submitted with localtransfer=no." #~ msgstr "" #~ "Опция localtransfer более не поддерживается, но указана в arc.conf. " #~ "Задача будет запущена с опцией localtransfer=no." #~ msgid "Localtransfer deprecated. Localtransfer has been turned off." #~ msgstr "Опция localtransfer более не поддерживается и отключена." #~ msgid "Permission checking failed" #~ msgstr "Проверка прав доступа не удалась" #~ msgid "Cache file valid until: %s" #~ msgstr "Файл в кэше действителен до: %s" #~ msgid "Changing old validity time format to new in %s" #~ msgstr "Заменяется старый формат срока годности на новый в %s" #~ msgid "%s: adding to transfer share %s" #~ msgstr "%s: добавляется к трансферной доле %s" #~ msgid "%s: state: %s: starting new child" #~ msgstr "%s: Состояние: %s: запускается новый дочерний процесс" #~ msgid "%s: State %s: starting child: %s" #~ msgstr "%s: Состояние %s: запускается дочерний процесс: %s" #~ msgid "%s: Failed to run uploader process" #~ msgstr "%s: Не удалось запустить процесс отгрузчика" #~ msgid "%s: Failed to run downloader process" #~ msgstr "%s: Не удалось запустить процесс загрузчика" #~ msgid "%s: State: PREPARING/FINISHING: child is running" #~ msgstr "%s: состояние PREPARING/FINISHING: дочерний процесс исполняется" #~ msgid "%s: State: PREPARING: child exited with code: %i" #~ msgstr "" #~ "%s: состояние PREPARING: дочерний процесс завершился с кодом выхода: %i" #~ msgid "%s: State: FINISHING: child exited with code: %i" #~ msgstr "" #~ "%s: состояние: FINISHING: дочерний процесс завершился с кодом выхода: %i" #~ msgid "%s: State: FINISHING: unrecoverable error detected (exit code 1)" #~ msgstr "" #~ "%s: состояние FINISHING: обнаружена неисправимая ошибка (код выхода 1)" #~ msgid "%s: State: PREPARING: unrecoverable error detected (exit code 1)" #~ msgstr "" #~ "%s: состояние PREPARING: обнаружена неисправимая ошибка (код выхода 1)" #~ msgid "%s: State: PREPARING/FINISHING: retryable error" #~ msgstr "%s: состояние PREPARING/FINISHING: исправимая ошибка" #~ msgid "%s: State: %s: credentials probably expired (exit code %i)" #~ msgstr "" #~ "%s: состояние: %s: вероятно, истёк срок действия параметров доступа (код " #~ "выхода %i)" #~ msgid "%s: State: %s: trying to renew credentials" #~ msgstr "%s: Состояние: %s: попытка обновить параметры доступа" #~ msgid "%s: State: %s: failed to renew credentials" #~ msgstr "%s: Состояние: %s: невозможно обновить параметры доступа" #~ msgid "%s: State: %s: failed to create temporary proxy for renew: %s" #~ msgstr "" #~ "%s: Состояние: %s: не удалось создать временную доверенность для " #~ "обновления: %s" #~ msgid "" #~ "%s: State: %s: some error detected (exit code %i). Recover from such type " #~ "of errors is not supported yet." #~ msgstr "" #~ "%s: Состояние: %s:обнаружена ошибка (код выхода %i). Восстановление после " #~ "такой ошибки пока что не поддерживается." #~ msgid "%s: Data staging failed. No retries left." #~ msgstr "%s: Сбой размещения данных. Все попытки вышли." #~ msgid "" #~ "%s: Download failed. %d retries left. Will wait for %ds before retrying" #~ msgstr "%s: Сбой загрузки. Осталось %d попыток. Повторная попытка через %dс" #~ msgid "%s: Upload failed. No retries left." #~ msgstr "%s: Сбой отгрузки. Все попытки вышли." #~ msgid "" #~ "%s: Upload failed. %d retries left. Will wait for %ds before retrying." #~ msgstr "%s: Сбой отгрузки. Осталось %d попыток. Повторная попытка через %dс" #~ msgid "Wrong number in speedcontrol: %s" #~ msgstr "Недопустимое число в speedcontrol: %s" #~ msgid "Wrong number in maxtransfertries" #~ msgstr "Недопустимое число в maxtransfertries" #~ msgid "Empty root directory for GACL plugin" #~ msgstr "Корневой каталог расширения GACL пуст" #~ msgid "Failed to parse default GACL document" #~ msgstr "Не удалось разобрать документ GACL по умолчанию" #~ msgid "Mount point %s creation failed." #~ msgstr "Не удалось создать точку подключения %s." #~ msgid "Creation of top level ACL %s failed." #~ msgstr "Не удалось создать правила доступа высшего уровня %s." #~ msgid "plugin(gacl): open: %s" #~ msgstr "Подключаемый модуль(gacl): открытие: %s" #~ msgid "Failed to parse GACL" #~ msgstr "Невозможно обработать GACL" #~ msgid "GACL without is not allowed" #~ msgstr "Инструкция GACL без недопустима" #~ msgid "Failed to save GACL" #~ msgstr "Невозможно сохранить GACL" #~ msgid "GACL file %s is not an ordinary file" #~ msgstr "Файл GACL %s не является обычным файлом" #~ msgid "GACL description for file %s could not be loaded" #~ msgstr "Правила GACL для файла %s не могут быть загружены" #~ msgid "Request failed: No response" #~ msgstr "Запрос не удался: нет ответа" #~ msgid "Request failed: Error" #~ msgstr "Запрос не удался: ошибка." #~ msgid "Request succeeded!!!" #~ msgstr "Запрос удался!!!" #~ msgid "SP Service name is %s" #~ msgstr "Имя службы провайдера услуг: %s" #~ msgid "SAML Metadata is from %s" #~ msgstr "Метаданные SAML из %s" #~ msgid "saml2SP: Unauthorized" #~ msgstr "SAML2SP: Доступ закрыт" #~ msgid "no input payload" #~ msgstr "пустая нагрузка на входе" #~ msgid "AuthnRequest after deflation: %s" #~ msgstr "AuthnRequest после развёртывания: %s" #~ msgid "Using private key file to sign: %s" #~ msgstr "Используется файл личного ключа для подписи: %s" #~ msgid "After signature: %s" #~ msgstr "После подписи: %s" #~ msgid "Encrypted SAML assertion: %s" #~ msgstr "Зашифрованное утверждение SAML: %s" #~ msgid "Can not decrypt the EncryptedAssertion from SAML response" #~ msgstr "Не удалось расшифровать EncryptedAssertion из отзыва SAML" #~ msgid "Decrypted SAML Assertion: %s" #~ msgstr "Расшифрованное утверждение SAML: %s" #~ msgid "Encrypted name ID: %s" #~ msgstr "Зашифрованный идентификатор имени: %s" #~ msgid "Can not decrypt the EncryptedID from SAML assertion" #~ msgstr "Не удалось расшифровать EncryptedID из утверждения SAML" #~ msgid "Decrypted SAML name ID: %s" #~ msgstr "Расшифрованный идентификатор имени SAML: %s" #~ msgid "saml:Conditions, current time: %s is before the start time: %s" #~ msgstr "saml:Conditions, текущее время: %s раньше времени начала: %s" #~ msgid "saml:Conditions, current time: %s is after the end time: %s" #~ msgstr "saml:Conditions, текущее время: %s позже времени окончания: %s" #~ msgid "saml:Subject, current time is before the start time" #~ msgstr "SAML:Subject, текущее время раньше времени начала" #~ msgid "saml:Subject, current time is after the end time" #~ msgstr "SAML:Subject, текущее время позже времени окончания" #~ msgid "Can not get saml:Assertion or saml:EncryptedAssertion from IdP" #~ msgstr "" #~ "Невозможно получить SAML:Assertion или SAML:EncryptedAssertion от IdP" #~ msgid "Succeeded to verify the signature under " #~ msgstr "Подпись успешно подтверждена" #~ msgid "Failed to verify the signature under " #~ msgstr "Подпись не подтверждена" #~ msgid "" #~ "The NameID inside request is the same as the NameID from the tls " #~ "authentication: %s" #~ msgstr "" #~ "Параметр NameID в запросе идентичен NameID при проверке подлинности TLS: %" #~ "s" #~ msgid "" #~ "Access database %s from server %s port %s, with user %s and password %s" #~ msgstr "" #~ "Доступ к базе данных %s на сервере %s по порту %s, как пользователь %s с " #~ "паролем %s" #~ msgid "Can't establish connection to mysql database" #~ msgstr "Не удалось установить связь с базой данных mysql" #~ msgid "Is connected to database? %s" #~ msgstr "Есть ли связь с базой данных? %s" #~ msgid "Query: %s" #~ msgstr "Запрос: %s" #~ msgid "Get result array with %d rows" #~ msgstr "Получен массив результатов из %d строк" #~ msgid "Can not find StatusCode" #~ msgstr "Не обнаружен StatusCode" #~ msgid "" #~ "SAML Assertion parsed from SP Service:\n" #~ "%s" #~ msgstr "" #~ "Утверждение SAML выделенное из сервиса SP:\n" #~ "%s" #~ msgid "Can not get SAMLAssertion SecAttr from outgoing message AuthContext" #~ msgstr "" #~ "Невозможно получить SAMLAssertion SecAttr из исходящего сообщения " #~ "AuthContext" #~ msgid "MessageAuthContext can not be parsed from outgoing message" #~ msgstr "Невозможно выделить MessageAuthContext из исходящего сообщения" #~ msgid "Process: POST" #~ msgstr "Процесс: POST" #~ msgid "SOAP body does not include any request node" #~ msgstr "Тело SOAP не содержит запросов" #~ msgid "Request: %s" #~ msgstr "Запрос: %s" #~ msgid "There is no X509Request node in the request message" #~ msgstr "В запросе отсутствует элемент X509Request" #~ msgid "Composed DN: %s" #~ msgstr "Создан DN: %s" #~ msgid "Process: %s: not supported" #~ msgstr "Процесс: %s: не поддерживается" #~ msgid "get|put [object ...]" #~ msgstr "get|put [объект ...]" #~ msgid "" #~ "The arcacl command retrieves/sets permissions (ACL) of data or computing " #~ "objects." #~ msgstr "" #~ "Команда arcacl используется для проверки и присвоения прав доступа (ACL) " #~ "данным или вычислительному заданию." #~ msgid "Unsupported command %s." #~ msgstr "Неподдерживаемая команда %s" #~ msgid "Cannot read specified jobID file: %s" #~ msgstr "Не удаётся прочесть указанный файл, содержащий ярлыки задач: %s" #~ msgid "No objects given" #~ msgstr "Объекты не указаны" #~ msgid "Processing data object %s" #~ msgstr "Обрабатывается объект данных %s" #~ msgid "Data object %s is not valid URL." #~ msgstr "Файловый объект %s не является допустимым URL." #~ msgid "" #~ "Data object %s is not supported. Only GACL-enabled GridFTP servers are " #~ "supported yet." #~ msgstr "" #~ "Тип %s не поддерживается. Пока что поддерживаются только GridFTP серверы " #~ "с GACL." #~ msgid "URL %s is not supported." #~ msgstr "URL %s не поддерживается." #~ msgid "Object for stdout handling failed." #~ msgstr "Сбой обработки объекта stdout." #~ msgid "Object for stdin handling failed." #~ msgstr "Сбой обработки объекта stdin." #~ msgid "ACL transfer FAILED: %s" #~ msgstr "Сбой пересылки ACL: %s" #~ msgid "" #~ "The arcmigrate command is used for migrating queued jobs to another " #~ "resource.\n" #~ "Note that migration is only supported between A-REX powered resources." #~ msgstr "" #~ "Команда arcmigrate используется для миграции ожидающих задач на другой " #~ "ресурс.\n" #~ "Миграция поддерживается только между службами A-REX." #~ msgid "Cannot write job IDs of submitted jobs to file (%s)" #~ msgstr "Невозможно записать ярлыки запущенных задач в файл (%s) " #~ msgid "" #~ "Migration of job (%s) succeeded, but killing the job failed - it will " #~ "still appear in the job list" #~ msgstr "" #~ "Успешно завершена миграция задачи (%s), но прервать задачу не удалось - " #~ "она будет присутствовать в списке задач" #~ msgid "" #~ "Migration of job (%s) succeeded, but cleaning the job failed - it will " #~ "still appear in the job list" #~ msgstr "" #~ "Успешно завершена миграция задачи (%s), но очистить задачу не удалось - " #~ "она будет присутствовать в списке задач" #~ msgid "Job migration summary:" #~ msgstr "Сводка перезасылки задач:" #~ msgid "%d of %d jobs were migrated" #~ msgstr "%d из %d задач были перезасланы" #~ msgid "The following %d were not migrated" #~ msgstr "Следующие %d не были перезасланы" #~ msgid "OpenSSL Error -- %s" #~ msgstr "Ошибка OpenSSL -- %s" #~ msgid "Creating and sending soap request" #~ msgstr "Создание и отправка запроса SOAP" #~ msgid "URL of SLCS service" #~ msgstr "URL службы SLCS" #~ msgid "Identity provider name" #~ msgstr "Имя провайдера идентификационной информации" #~ msgid "User account to identity provider" #~ msgstr "" #~ "Учётная запись пользователя у провайдера идентификационной информации" #~ msgid "Password for user account to identity provider" #~ msgstr "Пароль учётной записи у провайдера идентификационной информации" #~ msgid "Key size of the private key (512, 1024, 2048)" #~ msgstr "Длина секретного ключа (512, 1024, 2048)" #~ msgid "Private key passphrase" #~ msgstr "Пароль секретного ключа:" #~ msgid "passphrase" #~ msgstr "пароль" #~ msgid "Lifetime of the certificate, start with current time, hour as unit" #~ msgstr "Период действия сертификата, начиная с текущего момента, в часах" #~ msgid "period" #~ msgstr "период" #~ msgid "Store directory for key and signed certificate" #~ msgstr "Место для хранения закрытого ключа и подписанного сертификата" #~ msgid "directory" #~ msgstr "каталог" #~ msgid "" #~ "The VOMS server with the information:\n" #~ "\t%s\"\n" #~ "can not be reached, please make sure it is available." #~ msgstr "" #~ "Невозможно связаться с сервером VOMS с информацией:\n" #~ "\t%s\"\n" #~ "Пожалуйста, проверьте, доступен ли этот сервер." #~ msgid "Error: can't read policy file: %s" #~ msgstr "Ошибка: невозможно прочесть файл политик: %s" #~ msgid "" #~ "One of the elements 'Exact', 'UpperBoundedRange', 'LowerBoundedRange', " #~ "'Range', 'Min' or 'Max' was expected." #~ msgstr "" #~ "Ожидался один из элементов 'Exact', 'UpperBoundedRange', " #~ "'LowerBoundedRange', 'Range', 'Min' или 'Max'." #~ msgid "" #~ "Combinations of 'Exact', 'Range', 'UpperBoundedRange'/'LowerBoundedRange' " #~ "and 'Max'/'Min' are not supported." #~ msgstr "" #~ "Комбинации 'Exact', 'Range', 'UpperBoundedRange'/'LowerBoundedRange' и " #~ "'Max'/'Min' не поддерживаются." #~ msgid "Called SAML2SSOHTTPClient constructor" #~ msgstr "Вызван конструктор SAML2SSOHTTPClient" #~ msgid "Relaystate %s" #~ msgstr "Значение RelayState: %s" #~ msgid "Performing SSO with %s " #~ msgstr "Выполняется SSO с %s " #~ msgid "The IdP login is %s" #~ msgstr "Имя пользователя IdP: %s" #~ msgid "Retrieving the remote SimpleSAMLphp installation failed!" #~ msgstr "Сбой доступа к удалённой службе SimpleSAMLphp!" #~ msgid "Getting from Confusa to the IdP page failed!" #~ msgstr "Сбой перехода с Confusa на страницу IdP!" #~ msgid "Successfully redirected from Confusa to the IdP login!" #~ msgstr "Успешное перенаправление с Confusa на вход в IdP!" #~ msgid "Getting the user consent for SSO failed!" #~ msgstr "Сбой получения согласия пользователя на SSO!" #~ msgid "Successfully logged in to the IdP!" #~ msgstr "Успешный вход в IdP!" #~ msgid "Directing back from the IdP to Confusa failed!" #~ msgstr "Сбой обратного перенаправления с IdP на Confusa!" #~ msgid "Successfully redirected back from the IdP to Confusa!" #~ msgstr "Успешное обратное перенаправление с IdP на Confusa!" #~ msgid "The used session cookies for the about page is %s" #~ msgstr "Используемые куки для страницы с информацией: %s" #~ msgid "The retrieved DN is %s" #~ msgstr "Полученное выделенное имя (DN): %s" #~ msgid "The location to which the GET is performed is %s" #~ msgstr "Назначение операции GET: %s" #~ msgid "Approving CSR on Confusa's approve page %s" #~ msgstr "Одобрение запроса CSR на странице одобрения Confusa %s" #~ msgid "The cookie sent with approve is %s" #~ msgstr "Куки-файл, посланный с одобрением: %s" #~ msgid "The server location is %s " #~ msgstr "Сервер расположен на %s " #~ msgid "The request URL is %s" #~ msgstr "URL запроса: %s" #~ msgid "Sending OAuth request to signed URL %s" #~ msgstr "Отправка запроса OAuth на подписанный URL %s" #~ msgid "Please login at the following URL " #~ msgstr "Пожалуйста, войдите в систему по данному URL " #~ msgid "Press enter to continue\n" #~ msgstr "Нажмите enter, чтобы продолжить\n" #~ msgid "The about-you request URL is %s" #~ msgstr "URL запроса о данных пользователя: %s" #~ msgid "Approving the certificate signing request at %s" #~ msgstr "Одобряется запрос подписи сертификата на %s" #~ msgid "The OAuth request URL is %s" #~ msgstr "URL запроса OAuth: %s" #~ msgid "The request is NULL!" #~ msgstr "Отсутствует субъект запроса!" #~ msgid "No characters were read from the BIO in public key extraction" #~ msgstr "" #~ "Ни одного символа не было считано с BIO при извлечении открытого ключа" #~ msgid "Could not find any digest for the given name" #~ msgstr "Невозможно найти сводку для указанного имени" #~ msgid "SHA1Sum appears to be empty!" #~ msgstr "Похоже, отсутствует SHA1Sum!" #~ msgid "Could not create a certificate request for subject %s" #~ msgstr "Невозможно создать запрос сертификата для субъекта %s" #~ msgid "Trying to get content %s from XML element, size %d" #~ msgstr "Попытка извлечь содержимое %s из элемента XML, размер %d" #~ msgid "Failed to parse XML file!" #~ msgstr "Сбой при разборе файла формата XML!" #~ msgid "extract_body_information(): Body elements not found in passed string" #~ msgstr "" #~ "extract_body_information(): Элемент Body не обнаружен в переданной строке" #~ msgid "post_2_ssoservice_redirect URL is %s" #~ msgstr "URL post_2_ssoservice_redirect: %s" #~ msgid "The consent_page is %s" #~ msgstr "consent_page: %s" #~ msgid "SAML2SSOHTTPClient::processConsent()" #~ msgstr "SAML2SSOHTTPClient::processConsent()" #~ msgid "Trying to open confirm site %s" #~ msgstr "Попытка открыть подтверждённый сайт %s" #~ msgid "Found action is %s" #~ msgstr "Обнаруженное действие: %s" #~ msgid "Post-IdP-authentication action is %s" #~ msgstr "Действие проверки подлинности после IdP: %s" #~ msgid "Used session cookies for the assertion consumer are %s" #~ msgstr "Использованные маркёры для проверки утверждений: %s" #~ msgid "Got over the actual IP login 2 to %s, cookie %s " #~ msgstr "Подключение собственно через вход IP к %s, куки-файл %s " #~ msgid "Posting username/password with the following session cookie %s to %s" #~ msgstr "Передача имени/пароля через следующий куки-файл сессии: %s на %s" #~ msgid "The idp_login_post_info cookie is %s, while the sent cookie was %s" #~ msgstr "" #~ "Куки-файл idp_login_post_info cookie является %s, тогда как отправленный " #~ "куки-файл был %s" #~ msgid "Getting SAML response" #~ msgstr "Ожидание отклика SAML" #~ msgid "Calling post-IdP site %s with relay state %s" #~ msgstr "Вызывается сервер post-IdP %s со статусом передачи %s" #~ msgid "Cookies %s" #~ msgstr "Куки %s" #~ msgid "Called HakaClient::processConsent()" #~ msgstr "Вызван HakaClient::processConsent()" #~ msgid "Checking if consent is necessary" #~ msgstr "Проверяем, необходимо ли согласие пользователя" #~ msgid "User consent to attribute transfer is necessary" #~ msgstr "Необходимо согласие пользователя на передачу атрибутов" #~ msgid "" #~ "Your identity provider will send the following information to the SLCS " #~ "service:" #~ msgstr "" #~ "Следующая информация будет послана Вашим провайдером идентификации на " #~ "сервер SLCS:" #~ msgid "==============================================================================" #~ msgstr "==============================================================================" #~ msgid "Do you consent to the release of that information? (y/n) " #~ msgstr "Согласны ли Вы на передачу этой информации? (y/n)" #~ msgid "Consent confirm redirection URL is %s, cookies %s" #~ msgstr "URL перенаправления при подтверждённом согласии - %s, куки-файлы %s" #~ msgid "" #~ "Missing reference to factory and/or module. It is unsafe to use Globus in " #~ "non-persistent mode - LFC code is disabled. Report to developers." #~ msgstr "" #~ "Отсутствует указание на фабрику и/или модуль. Использование Globus в " #~ "неопределённом режиме небезопасно - обращение к LFC заблокировано. " #~ "Свяжитесь с разработчиками." #~ msgid "Cthread_init() error: %s" #~ msgstr "Ошибка Cthread_init(): %s" #~ msgid "LFC resolve timed out" #~ msgstr "Истекло время ожидания разбора LFC" #~ msgid "Error finding replicas: %s" #~ msgstr "Ошибка обнаружения копий: %s" #~ msgid "LFC resolve returned no entries" #~ msgstr "Разбор LFC не выдал записей" #~ msgid "File does not exist in LFC" #~ msgstr "Этот файл не занесён в LFC" #~ msgid "Skipping invalid location: %s - %s" #~ msgstr "Пропускается неверный адрес: %s - %s" #~ msgid "Replica %s already exists for LFN %s" #~ msgstr "Реплика %s уже существует для LFN %s" #~ msgid "Duplicate replica location: %s" #~ msgstr "Идентичное местонахождение реплики: %s" #~ msgid "Resolve: checksum: %s" #~ msgstr "Разбор: контрольная сумма: %s" #~ msgid "Resolve: size: %llu" #~ msgstr "Разбор: размер: %llu" #~ msgid "Resolve: modified: %s" #~ msgstr "Разбор: время изменения: %s" #~ msgid "LFN is missing in LFC (needed for replication)" #~ msgstr "В LFC отсутствует LFN (необходимо для тиражирования)" #~ msgid "LFN already exists in LFC" #~ msgstr "LFN уже зарегистрирован в LFC" #~ msgid "Error starting session: %s" #~ msgstr "Ошибка запуска сессии: %s" #~ msgid "Using supplied guid %s" #~ msgstr "Используется предоставленный guid %s" #~ msgid "Error creating LFC entry: %s" #~ msgstr "Ошибка создания записи каталога LFC: %s" #~ msgid "Error finding info on LFC entry %s which should exist: %s" #~ msgstr "" #~ "Ошибка обнаружения информации о записи LFC %s, которая должна " #~ "существовать: %s" #~ msgid "Error creating LFC entry %s, guid %s: %s" #~ msgstr "Ошибка создания записи каталога LFC %s, guid %s: %s" #~ msgid "Error entering metadata: %s" #~ msgstr "Ошибка при вводе метаданных: %s" #~ msgid "Warning: only md5 and adler32 checksums are supported by LFC" #~ msgstr "" #~ "Предупреждение: LFC поддерживает только проверочные суммы типа md5 и " #~ "adler32" #~ msgid "No GUID defined for LFN - probably not preregistered" #~ msgstr "" #~ "Для LFN не задан GUID - возможно, не пройдена предварительная регистрация" #~ msgid "Error adding replica: %s" #~ msgstr "Ошибка добавления реплики: %s" #~ msgid "Entering checksum type %s, value %s, file size %llu" #~ msgstr "" #~ "Заносится проверочная сумма типа %s, со значением %s, размер файла %llu" #~ msgid "Failed to remove LFN in LFC - You may need to do it by hand" #~ msgstr "" #~ "Не удалось стереть LFN в LFC - возможно, Вам придётся делать это вручную" #~ msgid "Location is missing" #~ msgstr "Отсутствует расположение" #~ msgid "Error getting replicas: %s" #~ msgstr "Ошибка получения реплик: %s" #~ msgid "Failed to remove location from LFC: %s" #~ msgstr "Ошибка при удалении местонахождения из LFC: %s" #~ msgid "Failed to remove LFC directory: directory is not empty" #~ msgstr "Не удалось стереть директорию LFC: директория не пуста" #~ msgid "Failed to remove LFC directory: %s" #~ msgstr "Ошибка при удалении каталога LFC: %s" #~ msgid "Failed to remove LFN in LFC: %s" #~ msgstr "Сбой удаления LFN из LFC: %s" #~ msgid "Error listing file or directory: %s" #~ msgstr "Ошибка вывода файла или каталога: %s" #~ msgid "Not a directory" #~ msgstr "Не является каталогом" #~ msgid "Error opening directory: %s" #~ msgstr "Ошибка при открытии каталога: %s" #~ msgid "Error listing directory: %s" #~ msgstr "Ошибка вывода каталога: %s" #~ msgid "Error listing replicas: %s" #~ msgstr "Ошибка перечисления реплик: %s" #~ msgid "Creating LFC directory %s" #~ msgstr "Создание каталога LFC %s" #~ msgid "Error creating required LFC dirs: %s" #~ msgstr "Ошибка создания требуемых директорий LFC: %s" #~ msgid "Cannot rename to root directory" #~ msgstr "Невозможно переименовать в корневой каталог" #~ msgid "Error renaming %s to %s: %s" #~ msgstr "Ошибка переименовывания %s в %s: %s" #~ msgid "Error finding LFN from GUID %s: %s" #~ msgstr "Ошибка извлечения LFN с помощью GUID %s: %s" #~ msgid "GUID %s resolved to LFN %s" #~ msgstr "GUID %s принадлежит LFN %s" #~ msgid "Mismatching protocol/host in bulk resolve!" #~ msgstr "Несовпадающий протокол/сервер в массовом разборе!" #~ msgid "Cannot use a mixture of GUIDs and LFNs in bulk resolve" #~ msgstr "В массовом разборе нельзя использовать смесь GUID-ов и LFN-ов" #~ msgid "Bulk resolve returned no entries" #~ msgstr "Массовый разбор не обнаружил записей" #~ msgid "GUID %s, SFN %s" #~ msgstr "GUID %s, SFN %s" #~ msgid "LFC returned more results than we asked for!" #~ msgstr "LFC выдаёт больше результатов, чем надо!" #~ msgid "Invalid dataset name: %s" #~ msgstr "Неверное название набора данных: %s" #~ msgid "Invalid DQ2 URL %s" #~ msgstr "Недопустимый URL DQ2: %s" #~ msgid "Could not obtain information from AGIS" #~ msgstr "Не удалось получить информацию из AGIS" #~ msgid "No suitable endpoints found in AGIS" #~ msgstr "Не обнаружено подходящих точек входа в AGIS" #~ msgid "Proxy certificate does not have ATLAS VO extension" #~ msgstr "У сертификата доверенности нет расширения ВО ATLAS" #~ msgid "Locations of dataset %s are cached" #~ msgstr "Местоположения набора данных %s в кэше" #~ msgid "No such dataset: %s" #~ msgstr "Нет такого набора данных: %s" #~ msgid "Malformed DQ2 response: %s" #~ msgstr "Искажённый отзыв DQ2: %s" #~ msgid "Dataset %s: DUID %s" #~ msgstr "Набор данных %s: DUID %s" #~ msgid "Location: %s" #~ msgstr "Расположение: %s" #~ msgid "DQ2 returned %s" #~ msgstr "DQ2 ответил %s" #~ msgid "Duplicate location of file %s" #~ msgstr "Дублирующееся расположение файла %s" #~ msgid "Site %s is not deterministic and cannot be used" #~ msgstr "Узел %s не определён однозначно и не может быть использован" #~ msgid "Site %s not found in AGIS info" #~ msgstr "Узел %s не обнаружен в информации AGIS" #~ msgid "Reading cached AGIS data from %s" #~ msgstr "Чтение кэшрованных данных AGIS с %s" #~ msgid "Cannot read cached AGIS info from %s, will re-download: %s" #~ msgstr "" #~ "Не удалось прочесть информацию AGIS с %s, будет произведена перезагрузка: " #~ "%s" #~ msgid "Cached AGIS info is out of date, will re-download" #~ msgstr "Информация AGIS в кэше устарела, будет загружена заново" #~ msgid "Could not refresh AGIS info, cached version will be used: %s" #~ msgstr "" #~ "Не удалось обновить информацию AGIS, будет использована кэшированная " #~ "версия: %s" #~ msgid "Could not download AGIS info: %s" #~ msgstr "Не удалось загрузить информацию AGIS: %s" #~ msgid "AGIS returned %s" #~ msgstr "AGIS ответил %s " #~ msgid "Could not create file %s" #~ msgstr "Не удалось создать файл %s" #~ msgid "Badly formatted output from AGIS" #~ msgstr "Неверно сформированный отзыв AGIS" #~ msgid "%s -> %s" #~ msgstr "%s -> %s" #~ msgid "Recieved token length: %i" #~ msgstr "Длина полученного токена: %i" #~ msgid "GSS accept security context failed: %i/%i%s" #~ msgstr "Сбой принятия контекста безопасности GSS: %i/%i%s" #~ msgid "GSS accept security context: %i/%i" #~ msgstr "Принятие контекста безопасности GSS: %i/%i" #~ msgid "Returned token length: %i" #~ msgstr "Длина выданного токена: %i" #~ msgid "GSS unwrap failed: %i/%i%s" #~ msgstr "Сбой развёртывания GSS: %i/%i%s" #~ msgid "GSS unwrap: %i/%i" #~ msgstr "Развёртывание GSS: %i/%i" #~ msgid "Sent token length: %i" #~ msgstr "Длина отправленного токена: %i" #~ msgid "Security check failed in GSI MCC for incoming message" #~ msgstr "Не прошла проверка безопасности в GSI MCC для входящего сообщения" #~ msgid "Security check failed in GSI MCC for outgoing message" #~ msgstr "Не прошла проверка безопасности в GSI MCC для исходящего сообщения" #~ msgid "GSS wrap failed: %i/%i%s" #~ msgstr "Сбой свёртывания GSS: %i/%i%s" #~ msgid "GSS wrap: %i/%i" #~ msgstr "Свёртывание GSS: %i/%i" #~ msgid "Could not resolve peer side's hostname" #~ msgstr "Невозможно разобрать доменное имя узла партнёра" #~ msgid "Peer host name to which this client will access: %s" #~ msgstr "Доменное имя узла, к которому будет совершён доступ: %s" #~ msgid "GSS import name failed: %i/%i%s" #~ msgstr "Сбой извлечения имени GSS: %i/%i%s" #~ msgid "GSS init security context failed: %i/%i%s" #~ msgstr "Сбой инициализации контекста безопасности GSS: %i/%i%s" #~ msgid "GSS init security context: %i/%i" #~ msgstr "Инициализация контекста безопасности GSS: %i/%i" #~ msgid "No payload during GSI context initialisation" #~ msgstr "Отсутствует полезная нагрузка при инициализации контекста GSI" #~ msgid "Transfer protocol is TLS or SSL3" #~ msgstr "Протокол передачи TLS или SSL3" #~ msgid "Transfer protocol is GLOBUS SSL" #~ msgstr "Протокол передачи GLOBUS SSL" #~ msgid "Transfer protocol is SSL2" #~ msgstr "Протокол передачи SSL2" #~ msgid "Transfer protocol is GSI" #~ msgstr "Протокол передачи GSI" #~ msgid "input token length: %i" #~ msgstr "Длина входного токена: %i" #~ msgid "GSS wrap/unwrap failed: %i/%i%s" #~ msgstr "Сбой свёртывания/развёртывания GSS: %i/%i%s" #~ msgid "Output token length: %i" #~ msgstr "Длина выходного токена: %i" #~ msgid "password sources" #~ msgstr "источники пароля" #~ msgid "" #~ "There are %d NSS base directories where the certificate, key, and module " #~ "datbases live" #~ msgstr "" #~ "Обнаружено %d основных директорий NSS, содержащих базы данных " #~ "сертификатов, ключей и модулей" #~ msgid "Writing to xrootd is not (yet) supported" #~ msgstr "Запись по протоколу xrootd (пока) не поддерживается" #~ msgid "Cannot (yet) remove files through xrootd" #~ msgstr "Невозможно (пока) удалить файл через xrootd" #~ msgid "Cannot (yet) create directories through xrootd" #~ msgstr "Невозможно (пока) создать каталог через xrootd" #~ msgid "Cannot (yet) rename files through xrootd" #~ msgstr "Невозможно (пока) переименовать файл через xrootd" #~ msgid "Rucio returned malormed xml: %s" #~ msgstr "Rucio возвратил некорректный XML: %s" #~ msgid "" #~ "Matchmaking, MaxDiskSpace*1024 >= DiskSpace - CacheDiskSpace problem, " #~ "ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace), " #~ "%d MB (CacheDiskSpace)" #~ msgstr "" #~ "Сравнение; MaxDiskSpace*1024 >= DiskSpace - несовпадение CacheDiskSpace, " #~ "у назначения для исполнения: %d MB (MaxDiskSpace); в описании задачи: %d " #~ "MB (DiskSpace), %d MB (CacheDiskSpace)" #~ msgid "" #~ "Matchmaking, WorkingAreaFree*1024 >= DiskSpace - CacheDiskSpace problem, " #~ "ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace), " #~ "%d MB (CacheDiskSpace)" #~ msgstr "" #~ "Сравнение; WorkingAreaFree*1024 >= DiskSpace - несовпадение " #~ "CacheDiskSpace, у назначения для исполнения: %d MB (MaxDiskSpace); в " #~ "описании задачи: %d MB (DiskSpace), %d MB (CacheDiskSpace)" #~ msgid " State: %s (%s)" #~ msgstr " Состояние: %s (%s)" #~ msgid "Renewal of EMI ES jobs is not supported" #~ msgstr "Возобновление задач EMI ES не поддерживается" #~ msgid "" #~ "Could not convert the bartender attribute value (%s) to an URL instance " #~ "in configuration file (%s)" #~ msgstr "" #~ "Не удалось преобразовать значение атрибута bartender (%s) в файле " #~ "настроек в URL (%s)" #~ msgid "Command PASV/SPAS" #~ msgstr "Команда PASV/SPAS" #~ msgid "Wrong number in maxload: %s" #~ msgstr "Недопустимое число в maxload: %s" #~ msgid "Wrong number in maxloadshare: %s" #~ msgstr "Недопустимое число в maxloadshare: %s" #~ msgid "The type of share is not set in maxloadshare" #~ msgstr "Тип квоты не указан в maxloadshare" #~ msgid "share_limit should be located after maxloadshare" #~ msgstr "share_limit должен располагаться после maxloadshare" #~ msgid "The name of share is not set in share_limit" #~ msgstr "Название квоты не указано в share_limit" #~ msgid "Wrong number in share_limit: %s" #~ msgstr "Недопустимое число в share_limit: %s" #~ msgid "" #~ "'newdatastaging' configuration option is deprecated, 'enable_dtr' should " #~ "be used instead" #~ msgstr "" #~ "Опция настроек 'newdatastaging' теперь называется 'enable_dtr'; " #~ "пожалуйста, используйте новое название" #~ msgid "Resume of EMI ES jobs is not supported" #~ msgstr "Продолжение задач EMI ES не поддерживается" #~ msgid "Failed to read input passphrase" #~ msgstr "Не удалось прочесть пароль" #~ msgid "Input phrase is too short (at least %d char)" #~ msgstr "Пароль слишком короткий (используйте по крайней мере %d символов)" #~ msgid "Password input error - code %lu" #~ msgstr "Ошибка ввода пароля - код ошибки %lu" #~ msgid "Password is too short, need at least %u charcters" #~ msgstr "Пароль слишком короткий, используйте хотя бы %u символов." #~ msgid "Password is too long, need at most %u characters" #~ msgstr "Слишком длинный пароль, требуется не более %u символов" #~ msgid "" #~ "ERROR: A computing resource using the GridFTP interface was requested, but" #~ msgstr "" #~ "ОШИБКА: Был запрошен вычислительный ресурс с интерфейсом GridFTP, но" #~ msgid "" #~ " the corresponding plugin could not be loaded. Is the plugin " #~ "installed?" #~ msgstr "" #~ " соответствующий модуль не может быть подгружен. Вы установили этот " #~ "модуль?" #~ msgid "" #~ " If not, please install the package 'nordugrid-arc-plugins-globus'." #~ msgstr "" #~ " Если нет, пожалуйста, установите пакет 'nordugrid-arc-plugins-" #~ "globus'." #~ msgid "" #~ " Depending on your type of installation the package name might " #~ "differ. " #~ msgstr "" #~ " Имя пакета может отличаться, в зависимости от типа вашей системы. " #~ msgid "" #~ "Error: Unable to parse limit in VectorLimitExceededFault response from " #~ "service to an 'int': %s" #~ msgstr "" #~ "Ошибка: Невозможно разобрать предел в отзыве сервиса " #~ "VectorLimitExceededFault как 'int': %s" #~ msgid "" #~ "%s is not a directory, it is needed for the client to function correctly" #~ msgstr "" #~ "%s не является каталогом. Он необходим для нормальной работы клиента" #~ msgid "Created empty ARC job list file: %s" #~ msgstr "Создан пустой файл для записи задач ARC: %s" #~ msgid "ERROR: Failed to lock job list file %s" #~ msgstr "Ошибка: Не удалось заблокировать файл списка задач %s" #~ msgid "Please try again later, or manually clean up lock file" #~ msgstr "" #~ "Пожалуйста, попытайтесь заново попозже, или удалите файл блокировки " #~ "вручную" #~ msgid "Could not write meta file %s" #~ msgstr "Не удалось записать мета-файл %s" #~ msgid "DTR %s: Transfer failed: %s" #~ msgstr "DTR %s: Сбой передачи: %s" #~ msgid "DTR %s: No locations defined for %s" #~ msgstr "DTR %s: Не определены расположения %s" #~ msgid "" #~ "DTR %s: Request:\n" #~ "%s" #~ msgstr "" #~ "DTR %s: Запрос:\n" #~ "%s" #~ msgid "" #~ "DTR %s: Response:\n" #~ "%s" #~ msgstr "" #~ "DTR %s: Отклик:\n" #~ "%s" #~ msgid "DTR %s: Failed to cancel: %s" #~ msgstr "DTR %s: Сбой прерывания: %s" #~ msgid "DTR %s: %s" #~ msgstr "DTR %s: %s" #~ msgid "DTR %s: Failed locating credentials" #~ msgstr "DTR %s: Сбой обнаружения параметров доступа" #~ msgid "DTR %s: Failed to initiate client connection" #~ msgstr "DTR %s: Сбой запуска соединения с клиентом" #~ msgid "DTR %s: Client connection has no entry point" #~ msgstr "DTR %s: Отсутствует точка входа для соединения с клиентом" #~ msgid "DTR %s: Initiating delegation procedure" #~ msgstr "DTR %s: Инициализация процедуры делегирования" #~ msgid "DTR %s: Failed to initiate delegation credentials" #~ msgstr "DTR %s: Сбой инициализации делегируемых прав доступа" #~ msgid "DTR %s: Running command: %s" #~ msgstr "DTR %s: Выполняется команда %s" #~ msgid "DTR %s: Error creating cache" #~ msgstr "DTR %s: Ошибка создания кэша" #~ msgid "DTR %s: Forcing re-download of file %s" #~ msgstr "DTR %s: Принудительная перезагрузка файла %s" #~ msgid "DTR %s: Cached file is locked - should retry" #~ msgstr "DTR %s: Кэшированный файл заблокирован - должна быть новая попытка" #~ msgid "DTR %s: Failed to initiate cache" #~ msgstr "DTR %s: Сбой инициализации кэша" #~ msgid "DTR %s: File %s is cached (%s) - checking permissions" #~ msgstr "DTR %s: Файл %s занесён в кэш (%s) - проверка прав доступа" #~ msgid "DTR %s: Permission checking failed" #~ msgstr "DTR %s: Проверка доступа не пройдена" #~ msgid "DTR %s: Permission checking passed" #~ msgstr "DTR %s: Проверка доступа пройдена успешно" #~ msgid "DTR %s: Source modification date: %s" #~ msgstr "DTR %s: Время изменения источника: %s" #~ msgid "DTR %s: Cache creation date: %s" #~ msgstr "DTR %s: Время создания кэша: %s" #~ msgid "DTR %s: Cache file valid until: %s" #~ msgstr "DTR %s: Файл в кэше действителен до: %s" #~ msgid "DTR %s: Cached file is outdated, will re-download" #~ msgstr "DTR %s: Кэшированный файл устарел, будет перезагружен" #~ msgid "DTR %s: Cached copy is still valid" #~ msgstr "DTR %s: Кэшированная копия всё ещё действительна" #~ msgid "DTR %s: No locations for destination different from source found" #~ msgstr "DTR %s: Не обнаружено адресов цели, отличных от источника" #~ msgid "DTR %s: Checking %s" #~ msgstr "DTR %s: Проверяется %s" #~ msgid "DTR %s: Removing %s" #~ msgstr "DTR %s: Удаляется %s" #~ msgid "DTR %s: Linking/copying cached file to %s" #~ msgstr "DTR %s: Создание ссылки/копирование файла из кэша в %s" #~ msgid "WS-interface messages will be logged to %s" #~ msgstr "Сообщения интерфейса WS будут записаны в журнал %s" #~ msgid "Bad number in logsize: %s" #~ msgstr "Недопустимое значение logsize: %s" #~ msgid "Starting grid-manager thread" #~ msgstr "Запускается поток Грид-менеджера" #~ msgid "Destroying jobs and waiting for underlying processes to finish" #~ msgstr "Уничтожение задач и ожидание окончания соответствующих им процессов" #~ msgid "Cannot open database" #~ msgstr "Не удалось открыть базу данных" #~ msgid "Cannot abort transaction %s" #~ msgstr "Невозможно прервать транзакцию %s" #~ msgid "put: deadlock handling: try again" #~ msgstr "put: обработка взаимоблокировки, новая попытка" #~ msgid "put: cannot abort transaction: %s" #~ msgstr "put: невозможно оборвать транзакцию: %s" #~ msgid "put: %s" #~ msgstr "put: %s" #~ msgid "get: deadlock handling, try again" #~ msgstr "get: обработка взаимоблокировки, новая попытка" #~ msgid "get: cannot abort transaction: %s" #~ msgstr "get: невозможно оборвать транзакцию: %s" #~ msgid "get: %s" #~ msgstr "get: %s" #~ msgid "del: deadlock handling, try again" #~ msgstr "del: обработка взаимоблокировки, новая попытка" #~ msgid "del: cannot abort transaction: %s" #~ msgstr "del: невозможно оборвать транзакцию: %s" #~ msgid "del: %s" #~ msgstr "del: %s" #~ msgid "get_doc_name: deadlock handling, try again" #~ msgstr "get_doc_name: обработка взаимоблокировки, новая попытка" #~ msgid "get_doc_names: cannot abort transaction: %s" #~ msgstr "get_doc_names: невозможно прервать транзакцию: %s" #~ msgid "Error during the transaction: %s" #~ msgstr "Ошибка при транзакции: %s" #~ msgid "checkpoint: %s" #~ msgstr "контрольная точка: %s" #~ msgid "Failed to create dir %s for temp proxies: %s" #~ msgstr "Не удалось создать каталог %s для временных доверенностей: %s" #~ msgid "Could not write temporary file: %s" #~ msgstr "Не удалось записать временный файл: %s" #~ msgid "Error creating file %s with mkstemp(): %s" #~ msgstr "Ошибка создания файла %s с помощью mkstemp(): %s" #~ msgid "Error writing to tmp lock file %s: %s" #~ msgstr "Ошибка записи во временный файл блокировки %s: %s" #~ msgid "Warning: closing tmp lock file %s failed" #~ msgstr "Предупреждение: сбой закрытия временного файла блокировки %s" #~ msgid "Source probably does not exist" #~ msgstr "Источник скорее всего не существует" #~ msgid "Problems resolving destination" #~ msgstr "Проблемы с разбором направления" #~ msgid "%s: Reprocessing RSL failed" #~ msgstr "%s: Сбой переобработки RSL" #~ msgid "" #~ "Dumping job description aborted because no resource returned any " #~ "information" #~ msgstr "" #~ "Обрыв распечатки описания задачи, т.к. ни один из ресурсов не предоставил " #~ "информацию" #~ msgid "Creating a PDP client" #~ msgstr "Создаётся клиент PDP" #~ msgid "job.Resources.QueueName = %s" #~ msgstr "job.Resources.QueueName = %s" #, fuzzy #~ msgid "PrepareToGet request timed out after %i seconds" #~ msgstr "Не удаётся соединиться с %s:%s (%s), connection timed out" #, fuzzy #~ msgid "Bring online request timed out after %i seconds" #~ msgstr "Не удаётся соединиться с %s:%s (%s), connection timed out" #, fuzzy #~ msgid "PrepareToPut request timed out after %i seconds" #~ msgstr "Не удаётся соединиться с %s:%s (%s), connection timed out" #, fuzzy #~ msgid "Ls request timed out after %i seconds" #~ msgstr "Не удаётся соединиться с %s:%s (%s), connection timed out" #, fuzzy #~ msgid "copy request timed out after %i seconds" #~ msgstr "Не удаётся соединиться с %s:%s (%s), connection timed out" #~ msgid "Could not load GFAL DMC. Please check that this plugin is installed" #~ msgstr "" #~ "Не удалось подгрузить GFAL DMC. Пожалуйста, убедитесь, что этот " #~ "подключаемый модуль установлен." #~ msgid "Unable to remove file %s: No valid proxy found" #~ msgstr "Невозможно удалить файл %s: Не обнаружено приемлемой доверенности" #~ msgid "Unable to transfer file %s: No valid credentials found" #~ msgstr "" #~ "Невозможно переслать файл %s: Не обнаружено действительных параметров " #~ "доступа" #~ msgid "Unable to register file %s: No valid credentials found" #~ msgstr "" #~ "Невозможно зарегистрировать файл %s: Не обнаружено действительных " #~ "параметров доступа" #~ msgid "Unable to copy from %s: No valid credentials found" #~ msgstr "" #~ "Невозможно скопировать из %s: Не обнаружено действительных параметров " #~ "доступа" #, fuzzy #~ msgid "arrayOfFileStatuses" #~ msgstr "arrayOfFileStatuses" #, fuzzy #~ msgid "Failed to create reading thread" #~ msgstr "Не удалось создать поток для привязки к LDAP (%s)" #, fuzzy #~ msgid "Failed to create writing thread" #~ msgstr "Не удалось создать поток для привязки к LDAP (%s)" #, fuzzy #~ msgid "DTR %s: Failed to resolve any source replicas" #~ msgstr "Не удалось загрузить источник \"%s\": %s" #, fuzzy #~ msgid "DTR %s: Failed to resolve destination replicas" #~ msgstr "Не удалось зарегистрировать назначение: %s" #, fuzzy #~ msgid "DTR %s: Failed to pre-register destination" #~ msgstr "Сбой при регистрации нового файла/направления" #, fuzzy #~ msgid "DTR %s: Failed checking source replica" #~ msgstr "Ошибка проверки исходного раздела %1." #, fuzzy #~ msgid "DTR %s: Error resolving destination replicas" #~ msgstr "DTR %s: Обнаружение существующих копий назначения" #, fuzzy #~ msgid "DTR %s: Failed to resolve destination" #~ msgstr "Ошибка при записи в цель" #, fuzzy #~ msgid "DTR %s: Failed to prepare source" #~ msgstr "Сбой при доступе к источнику(-ам) конфигурации: %s\n" #, fuzzy #~ msgid "DTR %s: Failed to prepare destination" #~ msgstr "Ошибка при записи в цель" #, fuzzy #~ msgid "The retrieved dn is %s" #~ msgstr "принимается за 1 Гц " #~ msgid "xacml authz request: %s" #~ msgstr "Запрос на допуск XACML: %s" #~ msgid "xacml authz response: %s" #~ msgstr "Отклик допуска XACML: %s" #, fuzzy #~ msgid "Failed initing handle" #~ msgstr "Невозможно инициализировать мутекс" #~ msgid "Bad authentication information" #~ msgstr "Неприемлемая информация для проверки подлинности" #~ msgid "nss db to be accesses: %s\n" #~ msgstr "будет использована база данных NSS %s\n" #~ msgid "Removing temp proxy %s" #~ msgstr "Удаление временной доверенности %s" #~ msgid "Failed to create temporary file in %s - %s" #~ msgstr "Сбой создания временного файла в %s - %s" #~ msgid "Failed to create control (%s) or session (%s) directories" #~ msgstr "Не удалось создать контрольный каталог (%s) или каталог сессии (%s)" #~ msgid "Failed to store configuration into temporary file: %s" #~ msgstr "Сбой записи настроек во временный файл: %s" #, fuzzy #~ msgid "Failed to create/detect control (%s) or session (%s) directories" #~ msgstr "Не удалось создать контрольный каталог (%s) или каталог сессии (%s)" #~ msgid "pretend utility is run by user with given name" #~ msgstr "сделать вид, что утилита запущена пользователем с другим именем" #~ msgid "pretend utility is run by user with given UID" #~ msgstr "сделать вид, что утилита запущена пользователем с другим UID" #~ msgid "Error processing configuration - EXITING" #~ msgstr "Сбой при обработке настроек - ВЫХОД" #~ msgid "No suitable users found in configuration - EXITING" #~ msgstr "В настройках не указано, от чьего имени производить запуск - ВЫХОД" #~ msgid "Can't recognize own username - EXITING" #~ msgstr "Попытка запуска от имени неизвестного пользователя - ВЫХОД" #, fuzzy #~ msgid "Processing grid-manager configuration" #~ msgstr "Не удалось обработать настройки grid-manager" #~ msgid "Usage: inputcheck [-h] [-d debug_level] RSL_file [proxy_file]" #~ msgstr "" #~ "Использование: inputcheck [-h] [-d уровень_отладки] RSL_file " #~ "[файл_доверенности]" #~ msgid "Unrecognized option" #~ msgstr "Неопознанная опция" #~ msgid "Option processing error" #~ msgstr "Ошибка при обработке опции" #~ msgid "Environment could not be set up" #~ msgstr "Не удалось настроить среду" #~ msgid "User %s is not valid" #~ msgstr "Недействительный пользователь %s" #~ msgid "No configuration file found" #~ msgstr "Не найден файл настроек" #~ msgid "" #~ "Gridmap user list feature is not supported anymore. Plase use @filename " #~ "to specify user list." #~ msgstr "" #~ "Перехисление пользователей в gridmap больше не поддерживается. " #~ "Пожалуйста, укажите @файл со списком пользователей." #~ msgid "Can't read users in specified file %s" #~ msgstr "Невозможно прочесть пользователей в указанном файле %s" #~ msgid "Wrong number in speedcontrol: " #~ msgstr "Недопустимое число в speedcontrol: " #~ msgid "Wrong option in securetransfer" #~ msgstr "Неверная опция в securetransfer" #~ msgid "Wrong option in passivetransfer" #~ msgstr "Неверная опция в passivetransfer" #~ msgid "Wrong option in norootpower" #~ msgstr "Неверная опция в norootpower" #~ msgid "Wrong option in localtransfer" #~ msgstr "Неверная опция в localtransfer" #~ msgid "defaultttl is empty" #~ msgstr "пустое значение defaultttl" #~ msgid "Junk in defaultttl command" #~ msgstr "Бессмыслица в команде defaultttl" #~ msgid "Junk in maxrerun command" #~ msgstr "Бессмыслица в команде maxrerun" #~ msgid "diskspace is empty" #~ msgstr "пустое значение diskspace" #~ msgid "junk in diskspace command" #~ msgstr "бессмыслица в команде diskspace" #~ msgid "Wrong number in diskspace command" #~ msgstr "Недопустимое число в команде diskspace" #~ msgid "Junk in defaultlrms command" #~ msgstr "Бессмыслица в команде defaultlrms" #~ msgid "Timeout for plugin is missing" #~ msgstr "Отсутствует тайм-аут для подключаемого модуля" #~ msgid "preferredpattern value is missing" #~ msgstr "Отсутствует значение preferredpattern" #~ msgid "Wrong option in newdatastaging" #~ msgstr "Неверная опция в newdatastaging" #~ msgid "Bad URL in delivery_service: %s" #~ msgstr "Недопустимый URL в delivery_service: %s" #~ msgid "Could not add file:/local to delivery services" #~ msgstr "Невозможно добавить file:/local к службам доставки" #~ msgid "Can't read user list in specified file %s" #~ msgstr "Невозможно прочесть список пользователей в указанном файле %s" #~ msgid "Warning: creation of user \"%s\" failed" #~ msgstr "Предупреждение: не удалось создать пользователя \"%s\"" #~ msgid "" #~ "Gridmap user list feature is not supported anymore. Please use @filename " #~ "to specify user list." #~ msgstr "" #~ "Список пользователей в Gridmap больше не поддерживается. Пожалуйста, " #~ "используйте @filename для указания списка пользователей." #~ msgid "No username entries in control directory" #~ msgstr "Отсутствуют элементы имени пользователя в контрольном каталоге" #~ msgid "User %s for helperUtility is not configured" #~ msgstr "Не сконфигурирован пользователь %s для модуля helperUtility" #~ msgid "Added user : %s" #~ msgstr "Добавлен пользователь : %s" #~ msgid "%s: No configured user found for uid %i" #~ msgstr "%s: Не обнаружено сконфигурированных пользователей для uid %i" #~ msgid "%s: Added" #~ msgstr "%s: Добавлено" #~ msgid "Error with cache configuration: %s. Cannot clean up files for job %s" #~ msgstr "" #~ "Ошибка в настройке кэша: %s. Невозможно очистить файлы для задачи %s" #~ msgid "Wrong user name" #~ msgstr "Неверное имя пользователя" #~ msgid "No configuration found for user %s in A-REX configuration" #~ msgstr "Не найдено настроек для пользователя %s в настройках A-REX" #~ msgid "No caches configured for user %s" #~ msgstr "Кэш для пользователя %s не настроен" #~ msgid "Peer certificate cannot be extracted" #~ msgstr "Невозможно извлечь сертификат контакта" #~ msgid "Peer cert verification fail" #~ msgstr "Не удалось подтвердить действительность сертификата узла партнёра" #~ msgid "" #~ "Certificate cannot be extracted, make sure it is the case where client " #~ "side authentication is turned off" #~ msgstr "" #~ "Невозможно извлечь сертификат - убедитесь, что проверка подлинности на " #~ "стороне клиента отключена " #~ msgid "Peer certificate chain cannot be extracted" #~ msgstr "Невозможно извлечь цепочку сертификатов узла партнёра" #~ msgid "Can not read file %s with list of trusted VOMS DNs" #~ msgstr "" #~ "Не удалось прочесть файл %s со списком уникальных имён доверяемых " #~ "серверов VOMS" #~ msgid "Can not assign CA location - %s" #~ msgstr "Не удалось приписать местонахождение агентства - %s" #~ msgid "Can not load certificate file - %s" #~ msgstr "Невозможно подгрузить файл сертификата - %s" #~ msgid "Can not load key file - %s" #~ msgstr "Не удалось подгрузить файл секретного ключа - %s" #~ msgid "Private key %s does not match certificate %s" #~ msgstr "Секретный ключ %s не совпадает с сертификатом %s" #~ msgid "Certificate %s failed Globus signing policy" #~ msgstr "Сертификат %s не соответствует политике подписи Globus" #~ msgid "Resumation of CREAM jobs is not supported" #~ msgstr "Перезапуск задач CREAM не поддерживается" #~ msgid "EMIESClient was not created properly." #~ msgstr "EMIESClient не был создан надлежащим образом." #~ msgid "Missing ActivityManager in response from %s" #~ msgstr "Отсутствует элемент ActivityManager в отзыве с %s" #~ msgid "Current transfer FAILED: %s - %s" #~ msgstr "Текущая передача НЕ СОСТОЯЛАСЬ: %s - %s" #~ msgid "" #~ "The inputsandbox JDL attribute is referencing a non-regular file (%s)." #~ msgstr "атрибут JDL inputsandbox JDL ссылается на необычный файл (%s)." #~ msgid "NSS set domestic policy failed (%s) on certificate database %s" #~ msgstr "" #~ "Сбой установки локальной политики NSS (%s) для базы данных сертификатов %s" #~ msgid "Failed while transferring data (mostly timeout)" #~ msgstr "Сбой при передаче данных (обычно истечение срока ожидания)" #~ msgid "Cannot create directory %s/%s for cache: %s" #~ msgstr "Не удалось создать каталог %s/%s для кэша: %s" #~ msgid "Failed uploading file: %s - %s" #~ msgstr "Не удалось отгрузить файл %s - %s" #~ msgid "Failed uploading file: %s" #~ msgstr "Не удалось отгрузить файл: %s" #, fuzzy #~ msgid " Cluster: %s" #~ msgstr "Кластер" #, fuzzy #~ msgid " Management Interface: %s" #~ msgstr "Интерфейс управления горячими клавишами" #~ msgid "File download failed: %s - %s" #~ msgstr "Ошибка загрузи файла: %s - %s" #, fuzzy #~ msgid "" #~ "Ignoring job (%s), the Job::InterfaceName attribute must be specified" #~ msgstr "Задача (%s) игнорируется, необходимо указывать атрибут Job::Flavour" #~ msgid "Broker %s could not be created" #~ msgstr "Брокер %s не может быть создан" #~ msgid "Loaded Broker %s" #~ msgstr "Подгружен брокер %s" #~ msgid "" #~ "Will not query endpoint (%s) because another thread is already querying it" #~ msgstr "" #~ "Точка доступа (%s) не будет опрошена, так как её уже опрашивает другой " #~ "поток" #, fuzzy #~ msgid " Local information system URL: %s" #~ msgstr "Канал информации о системе" #, fuzzy #~ msgid " Submission interface name: %s" #~ msgstr "Не найден интерфейс с именем %s" #~ msgid "Location information:" #~ msgstr "Сведения о расположении:" #~ msgid "Domain information:" #~ msgstr "Сведения о домене:" #~ msgid " Service name: %s" #~ msgstr " Имя службы: %s" #~ msgid " Service type: %s" #~ msgstr " Тип службы: %s" #~ msgid "Manager information:" #~ msgstr "Информация о системе управления ресурсом:" #~ msgid " Resource manager version: %s" #~ msgstr " Версия системы управления: %s" #~ msgid "Execution environment information:" #~ msgstr "Информация о рабочих средах:" #~ msgid "Check: obtained creation date: %s" #~ msgstr "Проверка: получена дата создания: %s" #~ msgid "meta_get_data: checksum: %s" #~ msgstr "meta_get_data: контрольная сумма: %s" #~ msgid "meta_get_data: size: %llu" #~ msgstr "meta_get_data: размер: %llu" #~ msgid "meta_get_data: created: %s" #~ msgstr "meta_get_data: создан: %s" #~ msgid "Failed to remove location from LFC" #~ msgstr "Ошибка при удалении местонахождения из LFC" #~ msgid "Contacting %s" #~ msgstr "Запрос %s" #, fuzzy #~ msgid "Warning: can't connect to RLS server %s: %s" #~ msgstr "" #~ "Невозможно подключится к серверу. «%s» не является допустимым адресом." #~ msgid "" #~ "Missing reference to factory and/or module. It is unsafe to use Globus in " #~ "non-persistent mode - RLS code is disabled. Report to developers." #~ msgstr "" #~ "Отсутствует указание на фабрику и/или модуль. Использование Globus в " #~ "неопределённом режиме небезопасно - Обращение к RLS заблокировано. " #~ "Свяжитесь с разработчиками." #, fuzzy #~ msgid "Warning: Failed to obtain attributes from %s: %s" #~ msgstr "VOMS: сбой при разборе атрибутов в сертификате атрибута (AC)" #~ msgid "Attribute: %s - %s" #~ msgstr "Атрибут: %s - %s" #~ msgid "RLS URL must contain host" #~ msgstr "RLS URL должен содержать имя узла" #, fuzzy #~ msgid "Source must contain LFN" #~ msgstr "RLS URL должен содержать имя узла" #, fuzzy #~ msgid "Destination must contain LFN" #~ msgstr "" #~ " Назначение \"%s\" должно быть каталогом \n" #~ " %s " #, fuzzy #~ msgid "No locations found for destination" #~ msgstr "Не найдено физических адресов для назначения: %s" #~ msgid "LFN already exists in replica" #~ msgstr "LFN уже зарегистрирован для реплики" #~ msgid "Failed to create GUID in RLS: %s" #~ msgstr "Не удалось создать GUID в RLS: %s" #, fuzzy #~ msgid "There is same LFN in %s" #~ msgstr "Нет подпроекта %1 в SUBDIRS" #~ msgid "Failed to add LFN-GUID to RLS: %s" #~ msgstr "Не удалось добавить LFN-GUID в RLS: %s" #, fuzzy #~ msgid "Warning: failed to add attribute to RLS: %s" #~ msgstr "Не удалось добавить LFN-GUID в RLS: %s" #, fuzzy #~ msgid "Failed to retrieve LFN/LRC: %s" #~ msgstr "Не могу получить список каталогов!" #~ msgid "No LFNs found in %s" #~ msgstr "В %s не обнаружено логических имён файлов" #~ msgid "lfn: %s(%s) - %s" #~ msgstr "LFN: %s(%s) - %s" #~ msgid "lfn: %s - pfn: %s" #~ msgstr "LFN: %s - PFN: %s" #, fuzzy #~ msgid "Rename: failed to rename file" #~ msgstr "" #~ "Не удается переименовать файл '%s' в '%s': сбой функции g_rename(): %s" #, fuzzy #~ msgid "DTR %s: No SOAP response" #~ msgstr "Нет ответа SOAP" #, fuzzy #~ msgid "DTR %s: Starting bulk request" #~ msgstr "Принимать _непрямые запросы" #~ msgid "Cancelling all DTRs" #~ msgstr "Прерывание всех запросов DTR" #, fuzzy #~ msgid "Received back DTR %s" #~ msgstr "Получен запрос DTR %s, состояние %s" #~ msgid "Job submission failed, no more possible targets" #~ msgstr "Не удалось заслать задачу, возможные назначения отсутствуют" #~ msgid "Unable to print job description: No matching target found." #~ msgstr "" #~ "Невозможно вывести описание задачи: Не найдено ни одного подходящего " #~ "назначения." #~ msgid "Fileset copy for this kind of source is not supported" #~ msgstr "" #~ "Копирование набора файлов из источника данного типа не поддерживается" #~ msgid "Failed listing metafiles" #~ msgstr "Перечисление метафайлов не удалось" #~ msgid "Failed listing files" #~ msgstr "Перечисление файлов не удалось" #~ msgid "%s%s" #~ msgstr "%s%s" #~ msgid "Delete failed: %s" #~ msgstr "Сбой при удалении: %s" #, fuzzy #~ msgid "Rename failed: %s" #~ msgstr "переименовать не удалось, %s (%s -> %s)." #, fuzzy #~ msgid "Rename failed: %s (%s)" #~ msgstr "переименовать не удалось, %s (%s -> %s)." #~ msgid "service" #~ msgstr "служба" #~ msgid "The arcsrmping command is a ping client for the SRM service." #~ msgstr "Команда arcsrmping является аналогом утилиты ping для служб SRM." #~ msgid "The service argument is a URL to an SRM service." #~ msgstr "Аргументом службы должен быть URL сервера SRM" #, fuzzy #~ msgid "ExecutionTarget %s added to ExecutionTargetSet" #~ msgstr "Файл добавлен в проект" #~ msgid "AuthRequest(inmsg) = " #~ msgstr "AuthRequest(inmsg) = " #~ msgid "Starting:" #~ msgstr "Запуск:" #~ msgid "Stopping:" #~ msgstr "Остановка:" #~ msgid "%(sn)s.%(rn)s called" #~ msgstr "вызов %(sn)s.%(rn)s" #, fuzzy #~ msgid "No URLs to connect to (in %s)" #~ msgstr "В выделении нет клонов." #~ msgid "ERROR connecting to" #~ msgstr "Ошибка связи с" #~ msgid "ERROR connecting to all of these:" #~ msgstr "Ошибка связи с каждым из:" #~ msgid "ID" #~ msgstr "Идентификатор объекта" #~ msgid "ZODBStore constructor called" #~ msgstr "Вызван конструктор ZODBStore" #~ msgid "datadir:" #~ msgstr "datadir:" #~ msgid "TransDBStore constructor called" #~ msgstr "Вызван конструктор TransDBStore" #~ msgid "db environment opened" #~ msgstr "Окружение базы данных открыто" #~ msgid "couldn't find DeadlockRetries, using 5 as default" #~ msgstr "не удалось найти DeadlockRetries, по умолчанию используется 5" #~ msgid "couldn't find SleepTime, using %d as default" #~ msgstr "не удалось найти SleepTime, по умолчанию используется %d" #~ msgid "got deadlock - retrying" #~ msgstr "взаимоблокировка - новая попытка" #~ msgid "Got deadlock error" #~ msgstr "Ошибка взаимоблокировки" #~ msgid "Got rep_dead_handle error" #~ msgstr "Получена ошибка rep_dead_handle" #~ msgid "Error getting %s" #~ msgstr "Ошибка получения %s" #~ msgid "got DBLockDeadlockError" #~ msgstr "получена ошибка DBLockDeadlockError" #~ msgid "retrying transaction" #~ msgstr "новая попытка транзакции" #~ msgid "Deadlock exception, giving up..." #~ msgstr "Ошибка взаимоблокировки - сдаюсь..." #, fuzzy #~ msgid "Read-only db. I'm not a master." #~ msgstr "umount: невозможно перемонтировать %s только для чтения\n" #~ msgid "cannot delete non-existing entries" #~ msgstr "невозможно удалить несуществующие записи" #~ msgid "Error setting %s" #~ msgstr "Ошибка присвоения %s" #~ msgid "db environment closed" #~ msgstr "Окружение базы данных закрыто" #~ msgid "error closing environment" #~ msgstr "ошибка при закрытии среды" #~ msgid "PickleStore constructor called" #~ msgstr "Вызван конструктор PickleStore" #~ msgid "filename:" #~ msgstr "файл:" #~ msgid "StringStore constructor called" #~ msgstr "Вызван конструктор StringStore" #~ msgid "CachedStringStore constructor called" #~ msgstr "Вызван конструктор CachedStringStore" #, fuzzy #~ msgid "Failed to create parent directory, continuing anyway: %s" #~ msgstr "Не удалось создать архивный каталог %s: %s" #, fuzzy #~ msgid "Failed to output the cert req as ascii format" #~ msgstr "Не удалось запустить скрипт очистки кэша" #~ msgid "Not invoking janitor because it's not enabled in the config file" #~ msgstr "Janitor не будет запущен, т.к. он не активирован в настройках" #~ msgid "Janitor not enabled and job contains non-deployed RTEs" #~ msgstr "Janitor не запущен, а задача требует отсутствующую среду исполнения" #~ msgid "Janitor not installed and job contains non-deployed RTEs" #~ msgstr "" #~ "Janitor не установлен, а задача требует отсутствующую среду исполнения" #~ msgid "Failed to deploy Janitor" #~ msgstr "Не удалось запустить Janitor" #~ msgid "Janitor timeout while deploying Dynamic RTE(s)" #~ msgstr "" #~ "Время ожидания Janitor вышло при установке динамической среды исполнения" #~ msgid "Janitor not enabled and there are missing RTE(s)" #~ msgstr "Janitor не запущен, а среда исполнения отсутствует" #~ msgid "Janitor failed to deploy Dynamic RTE(s)" #~ msgstr "Janitor не смог установить динамическую среду исполнения" #~ msgid "" #~ "Janitor timeout while removing Dynamic RTE(s) associations (ignoring)" #~ msgstr "" #~ "Время ожидания Janitor вышло при удалении связей динамической среды " #~ "исполнения (игнорируется)" #~ msgid "Janitor failed to remove Dynamic RTE(s) associations (ignoring)" #~ msgstr "" #~ "Janitor не смог удалить связи динамической среды исполнения (игнорируется)" #~ msgid "Janitor executable not found at %s" #~ msgstr "Исполняемый файл Janitor не найден в %s" #~ msgid "Can't run %s" #~ msgstr "Невозможно выполнить %s" #~ msgid "Can't start %s" #~ msgstr "Невозможно запустить %s" #~ msgid "Stopping Master Thread." #~ msgstr "Останавливается основной поток" #~ msgid "Master Thread is deleting threads." #~ msgstr "Головной поток уничтожает потоки." #~ msgid "Master Thread stopped." #~ msgstr "Основной поток остановлен" #~ msgid "Thread %d, Pipes failed" #~ msgstr "Поток %d, сбой перенаправления потоков" #~ msgid "Thread %d, Fork failed" #~ msgstr "Поток %d, сбой почкования" #~ msgid "Thread %d, child is terminating." #~ msgstr "Поток %d, обрывается дочерний поток" #~ msgid "Thread %d is ready." #~ msgstr "Поток %d готов" #~ msgid "Thread %d got Task %d." #~ msgstr "Поток %d получил задание %d." #~ msgid "Thread %d, Input is not SOAP" #~ msgstr "Поток %d, вход не в формате SOAP" #~ msgid "" #~ "Thread %d: Task %d Result:\n" #~ "%s\n" #~ msgstr "" #~ "Поток %d: Задание %d Результат:\n" #~ "%s\n" #~ msgid "Thread %d, TaskQueue returned empty Task." #~ msgstr "Поток %d, TaskQueue вернул пустое задание." #~ msgid " Deconstructing Web Service" #~ msgstr "Ликвидируется веб-служба" #~ msgid " Flushing set and queue" #~ msgstr "Сбрасывются задания и очередь" #~ msgid " Deconstructing is waiting for PerlProcessor" #~ msgstr "Ликвидация ожидает PerlProcessor" #~ msgid " Deconstructing is waiting for TaskQueue" #~ msgstr "Ликвидация ожидает TaskQueue" #~ msgid " Deconstructing is waiting for TaskSet" #~ msgstr "Ликвидация ожидает TaskSet" #~ msgid " Deconstructing Web Service ... done" #~ msgstr "Ликвидируется веб-служба ... готово" #~ msgid "Creating fault! Reason: \"%s\"" #~ msgstr "Создаётся отчёт о сбое! Причина: \"%s\"" #~ msgid "DREWEBSERVICE 1 %d" #~ msgstr "DREWEBSERVICE 1 %d" #~ msgid "DREWEBSERVICE 2 %d" #~ msgstr "DREWEBSERVICE 2 %d" #~ msgid "TaskSet is waiting for objects (%d) still using the set." #~ msgstr "TaskSet ожидает объекты (%d) всё ещё использующие постановку." #~ msgid "Added Task %d to the set. " #~ msgstr "Задание %d добавлено в группу. " #~ msgid "Removed Task %d out of to the set. " #~ msgstr "Задание %d удалено из постановки. " #~ msgid "TaskSet is waiting for objects still using the set." #~ msgstr "TaskSet ожидает объекты всё ещё использующие постановку." #~ msgid "Pushed Task %d into the queue. " #~ msgstr "Задание %d переведено в очередь. " #~ msgid "Shifted Task %d out of to the queue. " #~ msgstr "Задание %d передвинуто в очередь. " #~ msgid "Chunk %u: %u - %u" #~ msgstr "Фрагмент %u: %u - %u" #~ msgid "Hopi SlaveMode is active, PUT is only allowed to existing files" #~ msgstr "" #~ "Активирован подчинённый режим Хопи, операция PUT разрешена только для " #~ "существующих файлов" #~ msgid "Removing complete file in slave mode" #~ msgstr "Удаляется весь файл в подчинённом режиме" #~ msgid "Hopi Initialized" #~ msgstr "Hopi запущен" #~ msgid "Hopi DocumentRoot is " #~ msgstr "Hopi DocumentRoot:" #~ msgid "Hopi SlaveMode is on!" #~ msgstr "Включён режим Hopi SlaveMode!" #~ msgid "Hopi shutdown" #~ msgstr "Hopi останавливается" #~ msgid "PUT called" #~ msgstr "Вызван метод PUT" #~ msgid "File size is %u" #~ msgstr "Размер файла: %u" #~ msgid "error reading from HTTP stream" #~ msgstr "Ошибка чтения потока HTTP" #~ msgid "error on write" #~ msgstr "ошибка записи" #~ msgid "Input for PUT operation is neither stream nor buffer" #~ msgstr "Вход операции PUT не является ни потоком, ни буфером" #~ msgid "method=%s, path=%s, url=%s, base=%s" #~ msgstr "метод=%s, путь=%s, URL-адрес=%s, база=%s" #~ msgid "No content provided for PUT operation" #~ msgstr "Не указано содержимое для операции PUT" #~ msgid "Not supported operation" #~ msgstr "Операция не поддерживается" #~ msgid "request node is empty" #~ msgstr "Пустой узел запроса" #~ msgid "Evaluator is not initialized" #~ msgstr "Обработчик не запущен" #~ msgid "Policy(ies) modified - reloading evaluator" #~ msgstr "Норматив(ы) изменен(ы) - перезагрузка анализатора" #~ msgid "NULL response" #~ msgstr "Ответ NULL" #~ msgid "Authorized from Charon service" #~ msgstr "Допущен службой Charon" #~ msgid "" #~ "Not authorized from Charon service; Some of the RequestItem does not " #~ "satisfy Policy" #~ msgstr "" #~ "Не допущен службой Charon; некоторые пункты RequestItem не удовлетворяют " #~ "нормативам" #~ msgid "process: %s: not supported" #~ msgstr "процесс: %s: не поддерживается" #~ msgid "Evaluator: %s" #~ msgstr "Обработчик: %s" #~ msgid "Policy location: %s" #~ msgstr "Расположение правил доступа: %s" #~ msgid "Loading policy from %s" #~ msgstr "Загрузка правил из %s" #~ msgid "Failed loading policy from %s" #~ msgstr "Сбой загрузки правил из %s" #~ msgid "Checking policy modification: %s" #~ msgstr "Проверка изменений в правилах: %s" #~ msgid "Policy removed: %s" #~ msgstr "Правила удалены: %s" #~ msgid "Old policy times: %u/%u" #~ msgstr "Время изменения/создания старых правил: %u/%u" #~ msgid "New policy times: %u/%u" #~ msgstr "Время изменения/создания новых правил: %u/%u" #~ msgid "Policy Decision Request failed" #~ msgstr "Ошибка запроса решения о доступе" #~ msgid "Policy Decision Request succeeded!!!" #~ msgstr "Запрос о принятии решения создан!!!" #~ msgid "ES:CreateActivities: Failed to create new job: %s" #~ msgstr "ES: CreateActivity: Не удалось создать новую задачу: %s" #~ msgid "Not all jobs are cleaned yet" #~ msgstr "Ещё не все задачи вычищены" #~ msgid "Trying again" #~ msgstr "Новая попытка" #~ msgid "Jobs cleaned" #~ msgstr "задач очищено" #~ msgid "Preparing directories" #~ msgstr "Подготовка каталогов" #~ msgid "Empty URL list add to the thread." #~ msgstr "Пустой список URL добавлен к потоку." #~ msgid "Empty message add to the thread." #~ msgstr "В поток добавлено пустое сообщение." #~ msgid "Status (%s): Failed" #~ msgstr "Состояние (%s): Сбой" #~ msgid "Status (%s): OK" #~ msgstr "Состояние (%s): Успех" #~ msgid "Empty message won't be send to the neighbors." #~ msgstr "Пустое сообщение не будет разослано соседям." #~ msgid "%s: %d seconds to the next database cleaning." #~ msgstr "%s: %d секунд(ы) до следующей очистки базы данных." #~ msgid "Parsing configuration parameters" #~ msgstr "Обработка параметров настройки" #~ msgid "" #~ "The Endpoint element is defined multiple time in ISIS configuration. The " #~ "'%s' value will be used." #~ msgstr "" #~ "Элемент Endpoint задан несколько раз в настройках ISIS. Будет " #~ "использовано значение '%s'." #~ msgid "Empty endpoint element in the configuration!" #~ msgstr "Пустой элемент endpoint в настройках!" #~ msgid "KeyPath: %s" #~ msgstr "KeyPath: %s" #~ msgid "CertificatePath: %s" #~ msgstr "CertificatePath: %s" #~ msgid "CACertificatesDir: %s" #~ msgstr "CACertificatesDir: %s" #~ msgid "CACertficatePath: %s" #~ msgstr "CACertficatePath: %s" #~ msgid "Missing or empty KeyPath element in the configuration!" #~ msgstr "Пустой или отсутствующий элемент KeyPath в настройках!" #~ msgid "Misisng or empty CertificatePath element in the configuration!" #~ msgstr "Пустой или отсутствующий элемент CertificatePath в настройках!" #~ msgid "Missing or empty ProxyPath element in the configuration!" #~ msgstr "Пустой или отсутствующий элемент ProxyPath в настройках!" #~ msgid "Missing or empty CACertificatesDir element in the configuration!" #~ msgstr "Пустой или отсутствующий элемент CACertificatesDir в настройках!" #~ msgid "Missing or empty CACertificatePath element in the configuration!" #~ msgstr "Пустой или отсутствующий элемент CACertificatePath в настройках!" #~ msgid "" #~ "Configuration error. Retry: \"%d\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка настроек. Retry: \"%d\" не является допустимым значением. Будет " #~ "использовано значение по умолчанию." #~ msgid "" #~ "The Retry element is defined multiple time in ISIS configuration. The '%" #~ "d' value will be used." #~ msgstr "" #~ "Элемент Retry задан несколько раз в настройках ISIS. Будет использовано " #~ "значение '%d'." #~ msgid "" #~ "Configuration error. Sparsity: \"%d\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка настроек. Sparsity: \"%d\" не является допустимым значением. " #~ "Будет использовано значение по умолчанию." #~ msgid "" #~ "The Sparsity element is defined multiple time in ISIS configuration. The " #~ "'%d' value will be used." #~ msgstr "" #~ "Элемент Sparsity задан несколько раз в настройках ISIS. Будет " #~ "использовано значение '%d'." #~ msgid "Sparsity: %d" #~ msgstr "Sparsity: %d" #~ msgid "" #~ "Configuration error. ETValid: \"%s\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка настроек. ETValid: \"%s\" не является допустимым значением. Будет " #~ "использовано значение по умолчанию." #~ msgid "Configuration error. ETValid is empty. Default value will be used." #~ msgstr "" #~ "Ошибйка настроек. Значение ETValid не задано. Будет использовано значение " #~ "по умолчанию." #~ msgid "ETValid: %d seconds" #~ msgstr "ETValid: %d секунд" #~ msgid "" #~ "Configuration error. ETRemove: \"%s\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка настроек. ETRemove: \"%s\" не является допустимым значением. " #~ "Будет использовано значение по умолчанию." #~ msgid "Configuration error. ETRemove is empty. Default value will be used." #~ msgstr "" #~ "Ошибйка настроек. Значение ETRemove не задано. Будет использовано " #~ "значение по умолчанию." #~ msgid "ETRemove: %d seconds" #~ msgstr "ETRemove: %d секунд" #~ msgid "Invalid database path definition" #~ msgstr "Неверное определение пути к базе данных" #~ msgid "The InfoProvider element in ISIS configuration is empty." #~ msgstr "Элемент InfoProvider в настройках ISIS пуст." #~ msgid "RemoveRegistrations message sent to neighbors." #~ msgstr "Сообщение RemoveRegistrations разослано соседям." #~ msgid "ISIS (%s) has %d more thread%s" #~ msgstr "В ISIS (%s) ещё %d поток%s" #~ msgid "ISIS (%s) destroyed." #~ msgstr "ISIS (%s) ликвидирован." #~ msgid "Query received: %s" #~ msgstr "Получен запрос: %s" #~ msgid "Register received: ID=%s; EPR=%s; MsgGenTime=%s" #~ msgstr "Получена регистрация: ID=%s; EPR=%s; MsgGenTime=%s" #~ msgid "RemoveRegistrations received: ID=%s" #~ msgstr "Получен запрос RemoveRegistrations: ID=%s" #~ msgid "GetISISList received" #~ msgstr "Получен запрос GetISISList" #~ msgid "Connect received" #~ msgstr "Получен запрос Connect" #~ msgid "Communication error: input is not SOAP" #~ msgstr "Сбой передачи данных: ввод не в формате SOAP" #~ msgid "Neighbors count recalculate from %d to %d (at ISIS %s)" #~ msgstr "Количество соседей пересчитано с %d на %d (для ISIS %s)" #~ msgid "Query failed at %s, choosing new InfoProvider." #~ msgstr "Запрос к %s не удался, выбираем новый InfoProvider." #~ msgid "Remove ISIS (%s) from the list of InfoProviders." #~ msgstr "Удаление ISIS (%s) из списка InfoProviders." #~ msgid "No InfoProvider is available." #~ msgstr "Нет доступных InfoProvider." #~ msgid "Neighbors count: %d" #~ msgstr "Количество соседей: %d" #~ msgid "Connect status (%s): Failed" #~ msgstr "Состояние соединения (%s): Сбой" #~ msgid "Connect status (%s): OK" #~ msgstr "Состояние соединения (%s): Успех" #~ msgid "Database mass updated." #~ msgstr "База данных массово обновлена." #~ msgid "Error converting maxload parameter %s to integer" #~ msgstr "Ошибка преобразования параметра maxload %s в целое" #~ msgid "Setting max downloads to %u" #~ msgstr "Максимальное число загрузок устанавливается на %u" #~ msgid "Failed writing file with inputs" #~ msgstr "Не удалось записать файл с входными ссылками" #~ msgid "Starting child downloader process" #~ msgstr "Запуск дочернего процесса загрузчика" #~ msgid "Failed to run downloader process for job id %s" #~ msgstr "Сбой запуска процесса загрузчика для задачи %s" #~ msgid "%s: child is running" #~ msgstr "%s: дочерний процесс запущен" #~ msgid "Download process for job %s timed out" #~ msgstr "Процесс загрузки для задачи %s превысил время ожидания" #~ msgid "Downloader exited with code: %i" #~ msgstr "Загрузчик завершил работу с кодом выхода: %i" #~ msgid "TargetRetriver%s initialized with %s service url: %s" #~ msgstr "TargetRetriver%s запущен с URL службы %s: %s" #~ msgid "" #~ "Trying to migrate to %s: Migration to a CREAM resource is not supported." #~ msgstr "Попытка миграции на %s: Миграция на ресурс CREAM не поддерживается." #~ msgid "Failed dowloading %s to %s" #~ msgstr "Не удалось загрузить %s в %s" #~ msgid "Migration for EMI ES is not implemented" #~ msgstr "Миграция для EMI ES не реализована" #~ msgid "Collecting Job (%s jobs) information." #~ msgstr "Собирается информация о задачах (%s задач)" #~ msgid "%s directory exist! This job downloaded previously." #~ msgstr "Каталог %s уже существует! Эта задача была загружена ранее." #~ msgid "Cancel of EMI ES jobs is not supported" #~ msgstr "Прерывание задач EMI ES не поддерживается" #~ msgid "" #~ "Trying to migrate to %s: Migration to a legacy ARC resource is not " #~ "supported." #~ msgstr "" #~ "Попытка миграции на %s: Миграция на старый ресурс ARC не поддерживается." #~ msgid "" #~ "Missing reference to factory and/or module. It is unsafe to use Globus in " #~ "non-persistent mode - TargetRetriver for ARC0 is disabled. Report to " #~ "developers." #~ msgstr "" #~ "Отсутствует указание на фабрику и/или модуль. Использование Globus в " #~ "неопределённом режиме небезопасно - TargetRetriever для ARC0 " #~ "заблокирован. Свяжитесь с разработчиками." #~ msgid "" #~ "Trying to migrate to %s: Migration to a UNICORE resource is not supported." #~ msgstr "" #~ "Попытка миграции на %s: Миграция на ресурс UNICORE не поддерживается." #~ msgid "Collecting ExecutionTarget (A-REX/BES) information." #~ msgstr "Сбор информации об ExecutionTarget (A-REX/BES)." #~ msgid "Generating BES target: %s" #~ msgstr "Создаётся назначение BES: %s" #~ msgid "" #~ "Multiple execution environments per queue specified for target: \"%s\". " #~ "Execution environment information will be ignored." #~ msgstr "" #~ "Для назначения \"%s\" указаны множественные рабочие среды очередей. " #~ "Информация о рабочих средах игнорируется." #~ msgid "ComputingShare is associated with the ExecutionEnvironment \"%s\"" #~ msgstr "ComputingShare ассоциирована с ExecutionEnvironment \"%s\"" #~ msgid "ExecutionEnvironment \"%s\" located" #~ msgstr "Обнаружена ExecutionEnvironment \"%s\"" #~ msgid "Getting BES jobs is not supported" #~ msgstr "Извлечение задач BES не поддерживается" #~ msgid "targets.size() = %d" #~ msgstr "targets.size() = %d" #~ msgid "Wrong middleware type: %s" #~ msgstr "Недопустимый тип подпрограммного обеспечению: %s" #~ msgid "Found %u %s execution services from the index service at %s" #~ msgstr "" #~ "Обнаружено %u вычислительных сервисов %s через сервис каталога на %s" #~ msgid "" #~ "Matching against job description,following targets possible for " #~ "BenchmarkBroker: %d" #~ msgstr "" #~ "Сравнение с описанием задачи; следующие назначения рассматриваются в " #~ "алгоритме BenchmarkBroker: %d" #~ msgid "%d. Resource: %s; Queue: %s" #~ msgstr "%d. Ресурс: %s; Очередь: %s" #~ msgid "Resource will be ranked according to the %s benchmark scenario" #~ msgstr "Назначение будет упорядочено в соответствии с эталонным тестом %s" #~ msgid "Best targets are: %d" #~ msgstr "Наилучшие цели: %d" #~ msgid "FastestQueueBroker is filtering %d targets" #~ msgstr "FastestQueueBroker перебирает %d назначений" #~ msgid "FastestQueueBroker will rank the following %d targets" #~ msgstr "FastestQueueBroker упорядочивает следующие %d назначений" #~ msgid "" #~ "Matching against job description, following targets possible for " #~ "DataBroker: %d" #~ msgstr "" #~ "Сравнение с описанием задачи; следующие назначения рассматриваются в " #~ "алгоритме DataBroker: %d" #~ msgid "" #~ "Matching against job description, following targets possible for " #~ "RandomBroker: %d" #~ msgstr "" #~ "Сравнение с описанием задачи; следующие назначения рассматриваются в " #~ "алгоритме RandomBroker: %d" #~ msgid "Cannot create Python list" #~ msgstr "Невозможно создать список Python" #~ msgid "Private key of the credential object is NULL" #~ msgstr "Закрытый ключ объекта параметров доступа имеет значение NULL" #~ msgid "Unable to get job (%s), job is deleted" #~ msgstr "Невозможно извлечь задачу (%s), задача удалена" #~ msgid "Unable to get job (%s), it has not finished yet" #~ msgstr "Невозможно извлечь задачу (%s), она ещё не завершилась" #~ msgid "Unable to renew job (%s), job already finished" #~ msgstr "Невозможно возобновить задачу (%s), она уже завершилась" #~ msgid "Unable to resume job (%s), job is %s and cannot be resumed" #~ msgstr "" #~ "Невозможно продолжить задачу (%s), задача в состоянии %s не может быть " #~ "продолжена" #~ msgid "" #~ "Unable to resubmit job (%s), job description could not be retrieved " #~ "remotely" #~ msgstr "" #~ "Невозможно перезапустить задачу (%s), описание задачи не может быть " #~ "извлечено с удалённого источника" #~ msgid "Unable to resubmit job (%s), local input file (%s) has changed" #~ msgstr "" #~ "Невозможно перезапустить задачу (%s), локальный входной файл (%s) " #~ "изменился" #~ msgid "Unable to kill job (%s), job is deleted" #~ msgstr "Невозможно прервать задачу (%s), задача удалена" #~ msgid "Unable to kill job (%s), job has already finished" #~ msgstr "Невозможно прервать задачу (%s), она уже завершилась" #~ msgid "Unable to clean job (%s), job has not finished yet" #~ msgstr "Невозможно вычистить задачу (%s), она ещё не завершилась" #~ msgid "Target (%s) was explicitly rejected." #~ msgstr "Цель (%s) явно отклонена." #~ msgid "Possible targets after prefiltering: %d" #~ msgstr "Возможные назначения после предварительного отбора: %d" #~ msgid "Health State: %s" #~ msgstr "Состояние здоровья: %s" #~ msgid "Target sorting not done, sorting them now" #~ msgstr "Назначения не упорядочены, ведётся сортировка" #~ msgid "For this middleware there are no testjobs defined." #~ msgstr "Для этого Грид-ПО пробных задач пока что нет" #~ msgid "For this middleware only %s testjobs are defined." #~ msgstr "Для этого Грид-ПО существуют только следующие тестовые задачи: %s" #~ msgid "FreeSlots = %d; UsedSlots = %d; WaitingJobs = %d" #~ msgstr "Свободных мест = %d; занятых мест = %d; задач в очереди = %d" #~ msgid "Generating computing target: %s" #~ msgstr "Создаётся назначение для вычисления: %s" #~ msgid "" #~ "Multiple execution environments per queue specified for target. Execution " #~ "environment information will be ignored." #~ msgstr "" #~ "Для цели указаны множественные рабочие среды очередей. Информация о " #~ "рабочих средах игнорируется." #~ msgid "Found %ld targets" #~ msgstr "Обнаружено %ld назначений" #~ msgid "Resource: %s" #~ msgstr "Ресурс: %s" #~ msgid "Found %ld jobs" #~ msgstr "Обнаружено %ld задач" #~ msgid " URL: %s:%s" #~ msgstr " URL: %s:%s" #~ msgid "TargetRetriever plugin \"%s\" not found." #~ msgstr "Подключаемый модуль TargetRetriever \"%s\" не обнаружен" #~ msgid "TargetRetriever %s could not be created." #~ msgstr "TargetRetriever %s не может быть создан" #~ msgid "Loaded TargetRetriever %s" #~ msgstr "Подгружен TargetRetriever %s" #~ msgid "Overwriting already defined alias \"%s\"" #~ msgstr "Переопределяется уже заданный псевдоним \"%s\"" #~ msgid "Could not resolve alias \"%s\" it is not defined." #~ msgstr "Не удалось разобрать сокращённое название \"%s\" т.к. оно не задано" #~ msgid "" #~ "The defaultservices attribute value contains a wrongly formated element (%" #~ "s) in configuration file (%s)" #~ msgstr "" #~ "Атрибут defaultservices содержит неверно сформулированный элемент (%s) в " #~ "файле настроек (%s)" #~ msgid "" #~ "The defaultservices attribute value contains an unknown servicetype %s at " #~ "%s in configuration file (%s)" #~ msgstr "" #~ "Значение атрибута defaultservices содержит неизвестный тип сервиса %s в %" #~ "s в файле настроек (%s)" #~ msgid "Adding selected service %s:%s" #~ msgstr "Добавляется выбранный сервис %s:%s" #~ msgid "" #~ "The rejectservices attribute value contains a wrongly formated element (%" #~ "s) in configuration file (%s)" #~ msgstr "" #~ "Атрибут rejectservices содержит неверно сформулированный элемент (%s) в " #~ "файле настроек (%s)" #~ msgid "" #~ "The rejectservices attribute value contains an unknown servicetype %s at %" #~ "s in configuration file (%s)" #~ msgstr "" #~ "Значение атрибута rejectservices содержит неизвестный тип сервиса %s в %s " #~ "в файле настроек (%s)" #~ msgid "Adding rejected service %s:%s" #~ msgstr "Блокируется сервис %s:%s" #~ msgid "rejected" #~ msgstr "отклонён" #~ msgid "Cannot resolve alias \"%s\". Loop detected: %s" #~ msgstr "" #~ "Невозможно разобратьсокращённое названив \"%s\". Обнаружена циклическая " #~ "зависимость: %s" #, fuzzy #~ msgid "Cannot resolve alias %s, it is not defined" #~ msgstr "Не удалось разобрать сокращённое название \"%s\" т.к. оно не задано" #~ msgid "Alias name (%s) contains a unknown servicetype %s at %s" #~ msgstr "Псевдоним (%s) содержит неизвестный тип сервиса %s в %s" #, fuzzy #~ msgid "Adding service %s:%s from resolved alias %s" #~ msgstr "Ошибка при добавлении службы. %s" #~ msgid "Alias (%s) contains a wrongly formatted element (%s)" #~ msgstr "Псевдоним (%s) содержит неверно оформленый элемент (%s)" #, fuzzy #~ msgid "DTR %s: Re-resolving destination replicas" #~ msgstr "Локальный &каталог назначения:" #~ msgid "The testjob ID should be 1, 2 or 3.\n" #~ msgstr "Номер тестовой задачи может быть 1, 2 или 3.\n" #~ msgid "Unable to print job description: No target found." #~ msgstr "Невозможно вывести описание задачи: Не найдено ни одного назначения" #~ msgid "" #~ "Cannot find any proxy. arcresub currently cannot run without a proxy.\n" #~ " If you have the proxy file in a non-default location,\n" #~ " please make sure the path is specified in the client configuration " #~ "file.\n" #~ " If you don't have a proxy yet, please run 'arcproxy'!" #~ msgstr "" #~ "Не удалось обнаружить доверенность. В этой версии arcresub не работает " #~ "без доверенности.\n" #~ " Если Ваша доверенность хранится в нестандартном месте, пожалуйста,\n" #~ " убедитесь, что в настройках клиента указан правильный путь.\n" #~ " Если же Вы пока не создали доверенность, запустите 'arcproxy'!" #~ msgid "explicitly select or reject a specific resource" #~ msgstr "явным образом выбрать или отсеять указанный ресурс" #~ msgid "explicitly select or reject a specific resource for new jobs" #~ msgstr "явным образом выбрать или отсеять указанный ресурс для новых задач" #~ msgid "explicitly select or reject an index server" #~ msgstr "явным образом выбрать или отсеять каталог ресурсов" #~ msgid "Unable to find JobController for job %s (plugin type: %s)" #~ msgstr "" #~ "Невозможно обнаружить модуль JobController для задачи %s (тип " #~ "подключаемого модуля: %s)" #~ msgid "No jobs selected for cleaning" #~ msgstr "Не выбраны задачи для вычищения" #~ msgid "No jobs selected for migration" #~ msgstr "Не выбраны задачи для перезасылки" #~ msgid "No queuing jobs to migrate" #~ msgstr "Нет задач в очереди для перезасылки" #~ msgid "No jobs selected for resubmission" #~ msgstr "Не выбраны задачи для перезапуска" #~ msgid "service_url request_file" #~ msgstr "service_url request_file" #~ msgid "url of the policy decision service" #~ msgstr "URL службы принятия решений" #~ msgid "path to request file" #~ msgstr "путь к файлу с запросом" #~ msgid "use SAML 2.0 profile of XACML v2.0 to contact the service" #~ msgstr "для связи со службой используется профиль SAML 2.0 XACML v2.0" #~ msgid "path to the certificate file" #~ msgstr "путь к файлу сертификата" #~ msgid "path to the private key file" #~ msgstr "путь к файлу секретного ключа" #~ msgid "" #~ "Cannot find the path of the certificate/key file, and proxy file, please " #~ "setup environment X509_USER_CERT/X509_USER_KEY, or X509_USER_PROXY,or " #~ "setup certificatepath/keypath, or proxypath in a configuration file" #~ msgstr "" #~ "Не удалось найти путь к открытому/закрытому ключу и доверенности. " #~ "Пожалуйста, задайте переменную среды X509_USER_CERT/X509_USER_KEY, или " #~ "X509_USER_PROXY, или значение certificatepath/keypath, или proxypath в " #~ "файле настроек" #~ msgid "" #~ "CA certificate directory: %s is given by X509_CERT_DIR, but it can't been " #~ "accessed." #~ msgstr "" #~ "Каталог сертификатов агентств CA %s задан X509_CERT_DIR, но не может быть " #~ "прочитан." #~ msgid "" #~ "The start time that you set plus validityPeriod: %s is before current " #~ "time: %s.\n" #~ "Please set the time constraints once again." #~ msgstr "" #~ "Указанное время начала с добавлением срока годности %s предшествует " #~ "текущему времени: %s.\n" #~ "Пожалуйста, задайте сроки снова." #~ msgid "" #~ "The start time that you set plus validityPeriod: %s is after current " #~ "time: %s.\n" #~ "The validityPeriod will be shorten to %s." #~ msgstr "" #~ "Указанное время начала с добавлением срока годности %s позже текущего " #~ "времени: %s.\n" #~ "Срок годности будет сокращён до %s." #~ msgid "" #~ "The start time that you set: %s is before current time: %s.\n" #~ "The current time will be used as start time." #~ msgstr "" #~ "Указанное время начала %s предшествует текущему времени: %s.\n" #~ "Текущее время будет использовано в качестве начального." #~ msgid "" #~ "The end time that you set: %s is after the start time plus " #~ "validityPeriod: %s.\n" #~ " The validityPeriod will not be changed.\n" #~ msgstr "" #~ "Указанное время окончания %s позже времени начала с добавлением срока " #~ "годности: %s.\n" #~ "Срок годности не будет изменён.\n" #~ msgid "" #~ "The end time that you set: %s is before the start time plus " #~ "validityPeriod: %s.\n" #~ "The validityPeriod will be shorten to: %s." #~ msgstr "" #~ "Указанное время окончания %s предшествует времени начала с добавлением " #~ "срока годности: %s.\n" #~ "Срок годности будет сокращён до: %s." #~ msgid "" #~ "The end time that you set: %s is before start time: %s.\n" #~ "Please set the time constraints once again.\n" #~ msgstr "" #~ "Указанное время окончания %s предшествует времени начала: %s.\n" #~ "Пожалуйста, задайте сроки снова.\n" #~ msgid " Service_ID's number is not equivalent with the EPR's number!" #~ msgstr "Номер Service_ID отличен от номера в EPR!!" #~ msgid "[ISIS testing ...]" #~ msgstr "[тестирование ISIS ...]" #~ msgid "This tiny tool can be used for testing the ISIS's abilities." #~ msgstr "" #~ "Эта скромная утилита может быть использована для тестирования " #~ "возможностей ISIS" #~ msgid "The method are the folows: Query, Register, RemoveRegistration" #~ msgstr "Следующие методы доступны: Query, Register, RemoveRegistration" #~ msgid "define the URL of the Bootstrap ISIS" #~ msgstr "задать URL начального ISIS" #~ msgid "isis" #~ msgstr "ISIS" #~ msgid "define the URL of the ISIS to connect directly" #~ msgstr "задать URL сервера ISIS для прямого доступа" #~ msgid "define which method are use (Query, Register, RemoveRegistration)" #~ msgstr "задать используемый метод (Query, Register, RemoveRegistration)" #~ msgid "method" #~ msgstr "метод" #~ msgid "get neighbors list from the BootstrapISIS" #~ msgstr "получить список соседей с начального ISIS" #~ msgid " ISIS tester start!" #~ msgstr " Запуск тестера ISIS!" #~ msgid " Not enough or too much parameters! %s" #~ msgstr "Недостаток или избыток параметров! %s" #~ msgid "ByteIOBackend datadir:" #~ msgstr "Директория данных ByteIOBackend:" #~ msgid "ByteIOBackend transferdir:" #~ msgstr "Каталог передачи ByteIOBackend:" #~ msgid "ByteIOService transfer dir:" #~ msgstr "Директория передач ByteIOService:" #~ msgid "Subject:" #~ msgstr "Тема:" #~ msgid "checking" #~ msgstr "проверяется" #~ msgid "HopiBackend datadir:" #~ msgstr "Директория данных HopiBackend:" #~ msgid "HopiBackend transferdir:" #~ msgstr "Каталог передачи HopiBackend:" #~ msgid "ApacheBackend datadir:" #~ msgstr "Директория данных ApacheBackend:" #~ msgid "ApacheBackend transferdir:" #~ msgstr "Каталог передачи ApacheBackend:" #~ msgid "Cannot import backend class %(c)s (reason: %(r)s)" #~ msgstr "" #~ "Невозможно импортировать класс внутреннего интерфейса %(c)s (причина: %(r)" #~ "s)" #~ msgid "Cannot import store class" #~ msgstr "Невозможно импортировать класс хранения" #~ msgid "Cannot set CheckPeriod, MinCheckInterval" #~ msgstr "Невозможно выставить CheckPeriod, MinCheckInterval" #~ msgid "Got Librarian URLs from the config:" #~ msgstr "В настройках обнаружен адрес Библиотекаря:" #~ msgid "" #~ "No Librarian URLs and no ISIS URLs found in the configuration: no self-" #~ "healing!" #~ msgstr "" #~ "В настройках не найдены адреса ни Библиотекаря, ни ISIS: " #~ "самовосстановление невозможно!" #~ msgid "Got Bartender URLs from the config:" #~ msgstr "В настройках обнаружен адрес Бармена:" #~ msgid "" #~ "No Bartender URLs and no ISIS URLs found in the configuration: no self-" #~ "healing!" #~ msgstr "" #~ "В настройках не найдены адреса ни Бармена, ни ISIS: самовосстановление " #~ "невозможно!" #~ msgid "Getting Librarians from ISISes" #~ msgstr "Получение списка Библиотекарей из ISIS-ов" #~ msgid "Trying to get Librarian from" #~ msgstr "Попытка получить адрес Библиотекаря из" #~ msgid "Got Librarian from ISIS:" #~ msgstr "Получен адрес Боблиотекаря из ISIS:" #~ msgid "Error in isisLibrarianThread: %s" #~ msgstr "Ошибка в isisLibrarianThread: %s" #~ msgid "Getting Bartenders from ISISes" #~ msgstr "Получение списка Барменов из ISIS-ов" #~ msgid "Trying to get Bartender from" #~ msgstr "Попытка получить адрес Бармена из" #~ msgid "Got Bartender from ISIS:" #~ msgstr "Получен адрес Бармена из ISIS:" #~ msgid "Error in isisBartenderThread: %s" #~ msgstr "Ошибка в isisBartenderThread: %s" #~ msgid "Shepherd" #~ msgstr "Чабан" #~ msgid "" #~ "\n" #~ "CHECKSUM OK" #~ msgstr "" #~ "\n" #~ "CHECKSUM в порядке" #~ msgid "" #~ "\n" #~ "CHECKSUM MISMATCH" #~ msgstr "" #~ "\n" #~ "CHECKSUM не совпадает" #~ msgid "checksum refreshed" #~ msgstr "Контрольная сумма обновлена" #~ msgid "\n" #~ msgstr "\n" #~ msgid "" #~ "\n" #~ "\n" #~ "File" #~ msgstr "" #~ "\n" #~ "\n" #~ "Файл" #~ msgid "" #~ "\n" #~ "\n" #~ "I have an invalid replica of file" #~ msgstr "" #~ "\n" #~ "\n" #~ "Обнаружена неверная копия файла" #, fuzzy #~ msgid "ERROR checking checksum of %(rID)s, reason: %(r)s" #~ msgstr "неверный заголовок: ошибка контрольной суммы" #~ msgid "changeState" #~ msgstr "changeState" #~ msgid "" #~ "\n" #~ "\n" #~ msgstr "" #~ "\n" #~ "\n" #~ msgid "Getting AHash URL from the config" #~ msgstr "В настройках обнаружен адрес А-Хэш" #~ msgid "Got AHash URLs:" #~ msgstr "Получены адреса А-Хэш:" #~ msgid "AHash URL found in the configuration." #~ msgstr "В настройках обнаружен адрес А-Хэш" #, fuzzy #~ msgid "Setting running state to True" #~ msgstr "Создание и отправка запроса о состоянии" #~ msgid "No AHash from the config" #~ msgstr "В настройках нет А-Хэш" #, fuzzy #~ msgid "AHash URL and ISIS URL not found in the configuration." #~ msgstr "В настройках не найдены адреса ни Библиотекаря, ни ISIS." #, fuzzy #~ msgid "Trying to get A-Hash from ISISes" #~ msgstr "Попытка извлечь содержимое %s из элемента XML, размер %d" #~ msgid "Trying to get A-Hash from" #~ msgstr "Попытка получить адрес А-Хэш из" #~ msgid "Got A-Hash from ISIS:" #~ msgstr "Получен адрес А-Хэш из ISIS:" #~ msgid "Error in initThread: %s" #~ msgstr "Ошибка в initThread: %s" #, fuzzy #~ msgid "Error in Librarian's checking thread: %s" #~ msgstr "Ошибка создания новой записи в Librarian: %s" #~ msgid "Error processing report message" #~ msgstr "Ошибка обработки отчёта" #~ msgid "Error traversing: %s" #~ msgstr "Ошибка при проходе: %s" #~ msgid "Error in traverseLN method: %s" #~ msgstr "Ошибка метода traverseLN: %s" #~ msgid "CentralAHash constructor called" #~ msgstr "Вызван конструктор CentralAHash" #~ msgid "Error importing" #~ msgstr "Сбой импортирования" #~ msgid "Error importing class" #~ msgstr "Ошибка импортирования класса" #~ msgid "ReplicatedAHash constructor called" #~ msgstr "Вызван конструктор ReplicatedAHash" #~ msgid "sending message of length" #~ msgstr "отправка сообщения длиной" #~ msgid "sendt message, success=%s" #~ msgstr "сообщение отправлено, success=%s" #~ msgid "processing message..." #~ msgstr "обработка сообщения..." #~ msgid "processing message... Finished" #~ msgstr "обработка сообщения... Закончена" #~ msgid "Initialized replication environment" #~ msgstr "Инициализирована среда репликации" #~ msgid "Couldn't start replication manager." #~ msgstr "Не удалось запустить менеджер репликации." #~ msgid "master locking" #~ msgstr "блокирование головного узла" #~ msgid "unlocking" #~ msgstr "разблокируется" #~ msgid "unlocked" #~ msgstr "разблокирован" #~ msgid "couldn't unlock" #~ msgstr "не удалось разблокировать" #~ msgid "checkingThread slept %d s" #~ msgstr "checkingThread ожидал %d с" #, fuzzy #~ msgid "Resolved %d deadlocks" #~ msgstr "Распознан псевдоним «%s» -> %s\n" #, fuzzy #~ msgid "wrote ahash list %s" #~ msgstr "конец списка поиска\n" #, fuzzy #~ msgid "but dbenv wasn't ready." #~ msgstr "Ожидалось завершение процесса %s, но он не был запущен" #, fuzzy #~ msgid "entering start" #~ msgstr "Начальная стрелка" #~ msgid "Couldn't start replication framework" #~ msgstr "Не удалось запустить инфраструктуру репликации" #, fuzzy #~ msgid "entered election thread" #~ msgstr "%<__thread%> перед %" #~ msgid "%s: my role is" #~ msgstr "%s: моя роль" #~ msgid "%s: my role is now" #~ msgstr "%s: моя роль теперь" #~ msgid "Couldn't run election" #~ msgstr "Невозможно провести выборы" #~ msgid "entering startElection" #~ msgstr "вход в startElection" #~ msgid "new role" #~ msgstr "новая роль" #~ msgid "Couldn't begin role" #~ msgstr "Невозможно вступить в роль" #~ msgid "entering send" #~ msgstr "переход в send" #~ msgid "failed to send to" #~ msgstr "сбой отправки на" #~ msgid "entering repSend" #~ msgstr "переход в repSend" #~ msgid "entering sendNewSiteMsg" #~ msgstr "переход в sendNewSiteMsg" #~ msgid "entering sendHeartbeatMsg" #~ msgstr "переход в sendHeartbeatMsg" #~ msgid "entering sendNewMasterMsg" #~ msgstr "переход в sendNewMasterMsg" #~ msgid "entering processMessage from " #~ msgstr "переход в processMessage из " #~ msgid "received message from myself!" #~ msgstr "получено собственное сообщение!" #~ msgid "received master id" #~ msgstr "получен идентификатор головного узла" #~ msgid "received HEARTBEAT_MESSAGE" #~ msgstr "получено сообщение HEARTBEAT_MESSAGE" #~ msgid "received ELECTION_MESSAGE" #~ msgstr "получено сообщение ELECTION_MESSAGE" #~ msgid "received NEWSITE_MESSAGE" #~ msgstr "получено сообщение NEWSITE_MESSAGE" #~ msgid "processing message from %d" #~ msgstr "обработка сообщения от %d" #~ msgid "Got dbnotfound" #~ msgstr "Получено dbnotfound" #~ msgid "couldn't process message" #~ msgstr "не удалось обработать сообщение" #~ msgid "received DB_REP_NEWSITE from %s" #~ msgstr "получено DB_REP_NEWSITE от %s" #~ msgid "received DB_REP_HOLDELECTION" #~ msgstr "получено DB_REP_HOLDELECTION" #~ msgid "REP_NOTPERM returned for LSN %s" #~ msgstr "REP_NOTPERM получено для LSN %s" #~ msgid "REP_IGNORE received" #~ msgstr "получен сигнал REP_IGNORE" #~ msgid "JOIN_FAILURE received" #~ msgstr "получен сигнал JOIN_FAILURE" #~ msgid "I am now a master" #~ msgstr "я теперь главный" #~ msgid "received DB_EVENT_REP_MASTER" #~ msgstr "получено DB_EVENT_REP_MASTER" #~ msgid "I am now a client" #~ msgstr "Я теперь клиент" #~ msgid "Getting permission failed" #~ msgstr "Не удалось получить разрешение" #~ msgid "Write failed" #~ msgstr "Запись не удалась" #~ msgid "New master elected" #~ msgstr "Выбран новый головной узел" #~ msgid "I won the election: I am the MASTER" #~ msgstr "Я победил на выборах: я теперь MASTER" #~ msgid "Oops! Internal DB panic!" #~ msgstr "Ой! Внутренняя паника БД!" #~ msgid "accessing gateway: %s" #~ msgstr "соединение с шлюзом: %s" #, fuzzy #~ msgid "This bartender does not support gateway" #~ msgstr "Сервер не поддерживает TLS" #~ msgid "Librarian URL or ISIS URL not found in the configuration." #~ msgstr "В настройках не найдены адреса ни Библиотекаря, ни ISIS." #, fuzzy #~ msgid "Error connecting to ISIS %(iu)s, reason: %(r)s" #~ msgstr "Ошибка подключения к беспроводной сети" #, fuzzy #~ msgid "Error in isisThread: %s" #~ msgstr "Ошибка в libkabc" #~ msgid "adding" #~ msgstr "добавляется" #~ msgid "modifyMetadata response" #~ msgstr "возврат modifyMetadata" #~ msgid "modifyMetadata failed, removing the new librarian entry" #~ msgstr "" #~ "ошибка выполнения modifyMetadata, удаление новой записи библиотекаря" #~ msgid "Error creating new entry in Librarian: %s" #~ msgstr "Ошибка создания новой записи в Librarian: %s" #~ msgid "//// response from the external store:" #~ msgstr "//// ответ внешнего запоминающего устройства:" #~ msgid "location chosen:" #~ msgstr "выбранное расположение:" #, fuzzy #~ msgid "ERROR from the chosen Shepherd" #~ msgstr "Ошибка %s при выборке из %s@%s\n" #~ msgid "addReplica" #~ msgstr "addReplica" #~ msgid "Registered Shepherds in Librarian" #~ msgstr "Зарегистрированные у Библиотекаря Чабаны" #~ msgid "Alive Shepherds:" #~ msgstr "Живые Чабаны:" #~ msgid "LN" #~ msgstr "LN" #~ msgid "metadata" #~ msgstr "метаданные" #~ msgid "Could not read entry" #~ msgstr "Не удалось прочесть запись" #~ msgid "\\/\\/" #~ msgstr "\\/\\/" #~ msgid "removing" #~ msgstr "удаляется" #~ msgid "Proxy store:" #~ msgstr "Хранилище доверенностей:" #~ msgid "Delegation status: " #~ msgstr "Статус делегирования:" #~ msgid "creating proxy file : " #~ msgstr "создаётся файл доверенности:" #~ msgid "Delegation failed: " #~ msgstr "Сбой делегирования:" #~ msgid "ID: " #~ msgstr "ID: " #~ msgid "ProxyStore: %s" #~ msgstr "ProxyStore: %s" #~ msgid "removeCredentials: %s" #~ msgstr "removeCredentials: %s" #~ msgid "proxy store is not accessable." #~ msgstr "хранилище доверенностей недоступно." #~ msgid "Certificate directory is not accessable! Check configuration file." #~ msgstr "" #~ "Каталог с открытыми ключами сертификационных агентств недоступен! " #~ "Проверьте файл настроек." #, fuzzy #~ msgid "Proxy store is not accessable." #~ msgstr "хранилище доверенностей недоступно." #~ msgid "Failed retrieving job information for job: %s" #~ msgstr "Не удалось извлечь информацию о задаче: %s" #~ msgid "Unable to select run time environment" #~ msgstr "Невозможно выбрать среду выполнения" #~ msgid "Submit: Failed to modify job description to be sent to target." #~ msgstr "" #~ "Засылка: Не удалось адаптировать описание задачи для засылки по назначению" #~ msgid "[ADLParser] RemoteSessionAccess is not supported yet." #~ msgstr "[ADLParser] RemoteSessionAccess пока что не поддерживается." #~ msgid "Can't sign a non-limited, non-independent proxy with a limited proxy" #~ msgstr "" #~ "Невозможно подписать неограниченную зависимую доверенность ограниченной " #~ "доверенностью" #~ msgid " Used Slots: %d" #~ msgstr "Использованные ядра: %d" #~ msgid "Job list file not specified." #~ msgstr "Не указан файл списка задач" #~ msgid "cFlavour = %s; service = %s" #~ msgstr "cFlavour = %s; service = %s" #~ msgid "" #~ "Unable to get job (%s), job information not found at execution service" #~ msgstr "" #~ "Невозможно извлечь задачу (%s), на сервисе исполнения не обнаружена " #~ "информация о задаче" #~ msgid "" #~ "Unable to kill job (%s), job information not found at execution service" #~ msgstr "" #~ "Невозможно прервать задачу (%s), на сервисе исполнения не обнаружена " #~ "информация о задаче" #~ msgid "Failed killing job (%s)" #~ msgstr "Сбой прерывания задачи (%s)" #~ msgid "" #~ "Unable to renew job (%s), job information not found at execution service" #~ msgstr "" #~ "Невозможно возобновить задачу (%s), на сервисе исполнения не обнаружена " #~ "информация о задаче" #~ msgid "Failed renewing job (%s)" #~ msgstr "Сбой возобновления задачи (%s)" #~ msgid "Unable to resume job (%s), job information not found" #~ msgstr "" #~ "Невозможно продолжить задачу (%s), не обнаружена информация о задаче" #~ msgid "Generating EMIES target: %s" #~ msgstr "Создание цели EMIES: %s" #~ msgid "" #~ "The middleware flavour of the job (%s) does not match that of the job " #~ "controller (%s)" #~ msgstr "" #~ "Тип подпрограммного обеспечения задачи (%s) не соответствует типу " #~ "контроллера (%s)" #~ msgid "Job has not finished yet: %s" #~ msgstr "Задача ещё не завершилась: %s" #~ msgid "Failed downloading job %s" #~ msgstr "Не удалось получить результаты задачи %s" #~ msgid "Failed cleaning job %s" #~ msgstr "Не удалось удалить задачу %s" #~ msgid "Job has already finished: %s" #~ msgstr "Задача уже завершилась: %s" #~ msgid "Failed cancelling job %s" #~ msgstr "Не удалось прервать задачу %s" #~ msgid "" #~ "Job information not found, job %s will only be deleted from local joblist" #~ msgstr "" #~ "Информация о задаче не обнаружена, задача %s будет удалена только из " #~ "локального списка" #~ msgid "Unknown output %s" #~ msgstr "Неизвестный вывод %s" #~ msgid "Job state information not found: %s" #~ msgstr "Не удалось получить информацию о задаче: %s" #~ msgid "Cannot create output of %s for job (%s): Invalid destination %s" #~ msgstr "" #~ "Невозможно создать вывод %s для задачи (%s): Недопустимое назначение %s" #~ msgid "%s from job %s" #~ msgstr "%s из задачи %s" #~ msgid "Cannot migrate job %s, it is not queuing." #~ msgstr "Невозможно мигрировать задачу %s, она не ожидает в очереди." #~ msgid "Job migration failed, for job %s, no more possible targets" #~ msgstr "Миграция задачи %s не удалась, отсутствуют возможные назначения" #~ msgid "Failed to lock job list file %s. Job information will be out of sync" #~ msgstr "" #~ "Не удалось заблокировать файл списка задач %s. Информация о задачах будет " #~ "рассинхронизована" #~ msgid "Failed renewing job %s" #~ msgstr "Не удалось обновить доверенность задачи %s" #~ msgid "Failed retrieving job description for job (%s)" #~ msgstr "Не удалось получить описание задачи (%s)" #, fuzzy #~ msgid "Scheduler loop exited" #~ msgstr "%s: программа %s завершилась с кодом %d\n" #~ msgid "No job controller plugins loaded" #~ msgstr "Не подгружен ни один модуль управления задачами" #~ msgid "Credentials renewed" #~ msgstr "Параметры доступа обновлены" #~ msgid "Failed to renew credentials for some or all jobs" #~ msgstr "Не удалось обновить параметры доступа для некоторых или всех задач" #~ msgid "add dryrun option if available" #~ msgstr "добавить холостую прогонку, если возможно" #~ msgid "select broker method (Random (default), FastestQueue, or custom)" #~ msgstr "" #~ "выбрать алгоритм планировщика (Random (по умолчанию), FastestQueue, или " #~ "специальный)" #~ msgid "Job description languages supported by ARC client tools:" #~ msgstr "" #~ "Следующие языки описания задач поддерживаются клиентскими средствами ARC:" #~ msgid "explicitly select or reject a resource holding queued jobs" #~ msgstr "явным образом выбрать или отсеять ресурс, держащий задачи в очереди" #~ msgid "explicitly select or reject a resource to migrate to" #~ msgstr "явным образом выбрать или отсеять назначение миграции" #~ msgid "Brokers available to arcmigrate:" #~ msgstr "Следующие планировщики доступны для arcmigrate:" #~ msgid "Job migration aborted because no resource returned any information" #~ msgstr "" #~ "Обрыв засылки задачи, т.к. ни один из ресурсов не предоставил информацию" #~ msgid "All jobs were resumed" #~ msgstr "Все задачи были возобновлены" #~ msgid "Brokers available to arcresub:" #~ msgstr "Следующие планировщики доступны для arcresub:" #~ msgid "Disregarding %s" #~ msgstr "Игнорируется %s" #~ msgid "Job resubmission failed, unable to parse obtained job description" #~ msgstr "" #~ "Не удалось перезаслать задачу, невозможно разобрать полученное описание " #~ "задачи" #~ msgid "Job resubmitted with new jobid: %s" #~ msgstr "Задача запущена с новым ярлыком: %s" #~ msgid "Job resubmission failed, no more possible targets" #~ msgstr "Не удалось перезаслать задачу, возможные назначения отсутствуют" #~ msgid "Job could not be killed or cleaned" #~ msgstr "Задача не может быть прервана или стёрта" #~ msgid "" #~ "Cannot find any proxy. arcsub currently cannot run without a proxy.\n" #~ " If you have the proxy file in a non-default location,\n" #~ " please make sure the path is specified in the client configuration " #~ "file.\n" #~ " If you don't have a proxy yet, please run 'arcproxy'!" #~ msgstr "" #~ "Не удалось обнаружить доверенность. В этой версии arcsub не работает без " #~ "доверенности.\n" #~ " Если Ваша доверенность хранится в нестандартном месте, пожалуйста,\n" #~ " убедитесь, что в настройках клиента указан правильный путь.\n" #~ " Если же Вы пока не создали доверенность, запустите 'arcproxy'!" #, fuzzy #~ msgid "The request_url is %s" #~ msgstr "Ошибка в ссылке '%1'." #~ msgid "Unable to calculate checksum of local input file %s" #~ msgstr "Не удалось вычислить контрольную сумму локального входного файла %s" #~ msgid "[ADLParser] %s element with false value is not supported yet." #~ msgstr "[ADLParser] Элемент %s с ложным значением пока не поддерживается." #~ msgid "[ADLParser] %s element with true value is not supported yet." #~ msgstr "[ADLParser] Элемент %s с истиным значением пока не поддерживается." #~ msgid "" #~ "[ADLParser] Option element inside RuntimeEnvironment is not supported yet." #~ msgstr "" #~ "[ADLParser] Элемент Option внутри RuntimeEnvironment пока что не " #~ "поддерживается." #~ msgid "[ADLParser] ParallelEnvironment is not supported yet." #~ msgstr "[ADLParser] ParallelEnvironment пока что не поддерживается." #~ msgid "[ADLParser] Coprocessor is not supported yet." #~ msgstr "[ADLParser] Coprocessor пока что не поддерживается." #~ msgid " Keep data: true" #~ msgstr " Оставлять данные: верно" #~ msgid "" #~ "[ADLParser] For useNumberOfSlots of SlotsPerHost only false value is " #~ "supported yet." #~ msgstr "" #~ "[ADLParser] Для useNumberOfSlots атрибута SlotsPerHost пока что " #~ "поддерживается только ложное значение." #~ msgid "[ADLParser] ExclusiveExecution is not supported yet." #~ msgstr "[ADLParser] ExclusiveExecution пока что не поддерживается." #, fuzzy #~ msgid "Invalid configuration - no allowed DNs specified" #~ msgstr "Не указан файл настроек" #~ msgid "bind failed" #~ msgstr "сбой привязки" #~ msgid "%s: Failed reading list of output files" #~ msgstr "%s: Не удалось прочесть список выходных файлов" #~ msgid "ARC: acl element wrongly formated - missing Content element" #~ msgstr "" #~ "Невеврный формат элемента ARC: acl element - отсутствует элемент Content" #~ msgid "ARC: unsupported ACL type specified: %s" #~ msgstr "ARC: указан неподдерживаемый тип ACL: %s" #~ msgid "" #~ "[ADLParser] Missing FailIfExitCodeNotEqualTo in %s. Ignoring exit code is " #~ "not supported yet." #~ msgstr "" #~ "[ADLParser] В %s отсутствует FailIfExitCodeNotEqualTo. Игнорирование кода " #~ "выхода пока не поддерживается." #~ msgid "" #~ "[ADLParser] FailIfExitCodeNotEqualTo in %s contain non-zero code. This " #~ "feature is not supported yet." #~ msgstr "" #~ "[ADLParser] FailIfExitCodeNotEqualTo в %s содержит ненулевой код. Такая " #~ "возможность пока что не поддерживается." #~ msgid "[ADLParser] Multiple PreExecutable elements are not supported yet." #~ msgstr "" #~ "[ADLParser] Множественные элементы PreExecutable пока что не " #~ "поддерживаются." #~ msgid "" #~ "[ADLParser] Only SGAS ServiceType for RemoteLogging is supported yet." #~ msgstr "" #~ "[ADLParser] Пока что поддерживается только SGAS ServiceType для " #~ "RemoteLogging." #~ msgid "[ADLParser] For ClientDataPush only false value is supported yet." #~ msgstr "" #~ "[ADLParser] Для ClientDataPush пока что поддерживается только ложное " #~ "значение." #~ msgid "[ADLParser] DelegationID in Source is not supported yet." #~ msgstr "[ADLParser] DelegationID в Source пока что не поддерживается." #~ msgid "[ADLParser] Option in Source is not supported yet." #~ msgstr "[ADLParser] Option в Source пока что не поддерживается." #~ msgid "[ADLParser] DelegationID in Target is not supported yet." #~ msgstr "[ADLParser] DelegationID в Target пока что не поддерживается." #~ msgid "[ADLParser] Option in Target is not supported yet." #~ msgstr "[ADLParser] Option в Target пока что не поддерживается." #~ msgid "" #~ "The JobDescription::operator bool() method is DEPRECATED, use validity " #~ "checks when parsing sting or outputing contents of JobDescription object." #~ msgstr "" #~ "Использование метода JobDescription::operator bool() НЕ РЕКОМЕНДУЕТСЯ, " #~ "проверяйте действительность при разборке строк или выводе содержимого " #~ "объекта JobDescription." #~ msgid "" #~ "The JobDescription::Print method is DEPRECATED, use the JobDescription::" #~ "SaveToStream method instead." #~ msgstr "" #~ "Использование метода JobDescription::Print НЕ РЕКОМЕНДУЕТСЯ, используйте " #~ "метод JobDescription::SaveToStream взамен." #~ msgid " User tag: %s" #~ msgstr " Метка пользователя: %s" #~ msgid " Prologue arguments: %s" #~ msgstr " Аргументы пролога: %s" #~ msgid " Epilogue arguments: %s" #~ msgstr " Аргументы эпилога: %s" #~ msgid "" #~ "This method is DEPRECATED, please use the JobDescription::Parse(const " #~ "std::string&, std::list&, const std::string&, const std::" #~ "string&) method instead." #~ msgstr "" #~ "Использование этого метода НЕ РЕКОМЕНДУЕТСЯ, пожалуйста, используйте " #~ "метод JobDescription::Parse(const std::string&, std::" #~ "list&, const std::string&, const std::string&) взамен." #~ msgid "" #~ "This method is DEPRECATED, please use the JobDescription::UnParse(std::" #~ "string&, std::string, const std::string&) method instead." #~ msgstr "" #~ "Использование этого метода НЕ РЕКОМЕНДУЕТСЯ, пожалуйста, используйте " #~ "метод JobDescription::UnParse(std::string&, std::string, const std::" #~ "string&) взамен." #~ msgid "" #~ "The Job::Print method is DEPRECATED, use the Job::SaveToStream method " #~ "instead." #~ msgstr "" #~ "Использование метода Job::Print НЕ РЕКОМЕНДУЕТСЯ, используйте метод Job::" #~ "SaveToStream взамен." #~ msgid "" #~ "The TargetGenerator::GetTargets method is DEPRECATED, use the " #~ "GetExecutionTargets or GetJobs method instead." #~ msgstr "" #~ "Использование метода TargetGenerator::GetTargets НЕ РЕКОМЕНДУЕТСЯ, " #~ "используйте метод GetExecutionTargets or GetJobs взамен." #~ msgid "Running resource (target) discovery" #~ msgstr "Выполняется обнаружение ресурсов (назначений)" #~ msgid "" #~ "The TargetGenerator::ModifyFoundTargets method is DEPRECATED, use the " #~ "FoundTargets method instead." #~ msgstr "" #~ "Использование метода TargetGenerator::ModifyFoundTargets НЕ " #~ "РЕКОМЕНДУЕТСЯ, используйте метод FoundTargets взамен." #~ msgid "" #~ "The TargetGenerator::FoundJobs method is DEPRECATED, use the GetFoundJobs " #~ "method instead." #~ msgstr "" #~ "Использование метода TargetGenerator::FoundJobs НЕ РЕКОМЕНДУЕТСЯ, " #~ "используйте метод GetFoundJobs взамен." #~ msgid "" #~ "The TargetGenerator::AddJob(const XMLNode&) method is DEPRECATED, use the " #~ "AddJob(const Job&) method instead." #~ msgstr "" #~ "Использование метода TargetGenerator::AddJob(const XMLNode&) НЕ " #~ "РЕКОМЕНДУЕТСЯ, используйте метод AddJob(const Job&) взамен." #~ msgid "" #~ "The TargetGenerator::PrintTargetInfo method is DEPRECATED, use the " #~ "TargetGenerator::SaveTargetInfoToStream method instead." #~ msgstr "" #~ "Использование метода TargetGenerator::PrintTargetInfo НЕ РЕКОМЕНДУЕТСЯ, " #~ "используйте метод TargetGenerator::SaveTargetInfoToStream взамен." #~ msgid "" #~ "The JobController::Cat(const std::list&, const std::string&) " #~ "method is DEPRECATED, use the JobController::Cat(std::ostream&, const " #~ "std::list&, const std::string&) method instead." #~ msgstr "" #~ "Использование метода JobController::Cat(const std::list&, " #~ "const std::string&) НЕ РЕКОМЕНДУЕТСЯ, используйте метод JobController::Cat" #~ "(std::ostream&, const std::list&, const std::string&) взамен." #~ msgid "" #~ "Specifying the \"gmlog\" value for the whichfile parameter in the Job::" #~ "Cat method is DEPRECATED, use the \"joblog\" value instead." #~ msgstr "" #~ "Использование значения \"gmlog\" для параметра whichfile в методе Job::" #~ "Cat НЕ РЕКОМЕНДУЕТСЯ, используйте значение \"joblog\" взамен." #~ msgid "" #~ "The JobController::PrintJobStatus method is DEPRECATED, use the Job::" #~ "SaveJobStatusToStream method instead." #~ msgstr "" #~ "Использование метода JobController::PrintJobStatus НЕ РЕКОМЕНДУЕТСЯ, " #~ "используйте метод Job::SaveJobStatusToStream взамен." #~ msgid "Failed to lock job list file %s. Job list will be out of sync" #~ msgstr "" #~ "Сбой блокировки файла списка задач %s. Информация о задачах будет " #~ "рассинхронизована" #~ msgid "" #~ "The ExecutionTarget::Print method is DEPRECATED, use the ExecutionTarget::" #~ "SaveToStream method instead." #~ msgstr "" #~ "Использование метода ExecutionTarget::Print НЕ РЕКОМЕНДУЕТСЯ, используйте " #~ "метод ExecutionTarget::SaveToStream взамен." #, fuzzy #~ msgid "CreateActivity: has delegation: %s" #~ msgstr "CreateActivity: Сбой при принятии делегирования" #, fuzzy #~ msgid "Error parsing VOMS AC" #~ msgstr "Обнаружена ошибка при разборе сертификата атрибута" #~ msgid "%s: Data staging finished" #~ msgstr "%s: Размещение данных завершено" #, fuzzy #~ msgid "Error opening lock file %s: %s" #~ msgstr "Ошибка открытия файла вывода" #, fuzzy #~ msgid "Found empty lock file %s" #~ msgstr "Ожидание блокировки файла" #, fuzzy #~ msgid "DTR %s: Failed linking cache file to %s due to existing write lock" #~ msgstr "Копирование файла '%s' из '%s'..." #~ msgid "Cannot determine hostname from uname()" #~ msgstr "Невозможно извлечь имя узла используя uname()" #~ msgid "Error reading meta file %s" #~ msgstr "Ошибка чтения мета-файла %s" #~ msgid "" #~ "File exists in remote cache at %s but is locked. Will download from source" #~ msgstr "" #~ "Файл присутствует в удалённом кэше на %s, но заблокирован. Будет " #~ "проведена загрузка из источника" #~ msgid "Creating temporary link from %s to remote cache file %s" #~ msgstr "Создаётся временная ссылка с %s на удалённо кэшированный файл %s" #~ msgid "" #~ "Failed to create soft link to remote cache: %s. Will download %s from " #~ "source" #~ msgstr "" #~ "Сбой создания гибкой ссылки на удалённый кэш: %s. Будет произведена " #~ "загрузка %s из источника" #~ msgid "" #~ "Could not read target of link %s. Manual intervention may be required to " #~ "remove lock in remote cache" #~ msgstr "" #~ "Невозможно прочесть цель ссылки %s. Возможно, необходимо ручное " #~ "вмешательство для снятия блокировки в удалённом кэше" #~ msgid "" #~ "Failed to unlock remote cache file %s. Manual intervention may be required" #~ msgstr "" #~ "Сбой разблокировки удалённого кэшированного файла %s. Возможно, " #~ "необходимо ручное вмешательство" #~ msgid "Error removing file %s: %s. Manual intervention may be required" #~ msgstr "" #~ "Ошибка при удалении файла %s: %s. Возможно, необходимо ручное " #~ "вмешательство" #~ msgid "Error: Cache file %s does not exist" #~ msgstr "Ошибка: Кэшированный файл %s не существует" #~ msgid "Could not read target of link %s" #~ msgstr "Невозможно прочесть цель ссылки %s" #~ msgid "Couldn't match link target %s to any remote cache" #~ msgstr "Цель ссылки %s не найдена ни в одном удалённом кэше" #~ msgid "Error removing symlink %s: %s. Manual intervention may be required" #~ msgstr "" #~ "Ошибка при удалении символьной ссылки %s: %s. Возможно, необходимо ручное " #~ "вмешательство" #~ msgid "'../' is not allowed in filename" #~ msgstr "'../' не допускается в имени файла" #~ msgid "Your issuer CA's DN: %s." #~ msgstr "Выделенное имя агентства, выдавшего Ваш сертификат: %s." #~ msgid "Source is bad URL or can't be used due to some reason" #~ msgstr "" #~ "URL источника недопустим, или не может быть использован по какой-либо " #~ "причине" #~ msgid "Destination is bad URL or can't be used due to some reason" #~ msgstr "" #~ "URL назначения недопустим, или не может быть использован по какой-либо " #~ "причине" #~ msgid "Error deleting location or URL" #~ msgstr "Ошибка удаления расположения или URL" #~ msgid "DataPoint is already reading" #~ msgstr "DataPoint уже читает" #~ msgid "DataPoint is already writing" #~ msgstr "DataPoint уже пишет" #~ msgid "File stating failed" #~ msgstr "Не удалось получить информацию о состоянии файла" #~ msgid "Failed to finish destination" #~ msgstr "Не удалось завершить назначение" #~ msgid "" #~ "Cannot find file at %s for getting the certificate. Please make sure this " #~ "file exists." #~ msgstr "" #~ "Не удалось найти файл по адресу %s, содержащий сертификат. Пожалуйста, " #~ "убедитесь, что файл существует." #~ msgid "Timeleft for AC: %s" #~ msgstr "Оставшееся время для AC: %s" #~ msgid "AC has been expired for: %s" #~ msgstr "Срок действия сертификат атрибута для %s закончился" #, fuzzy #~ msgid "Can get X509V3_EXT_METHOD for %s" #~ msgstr "Невозможно извлечь X509V3_EXT_METHOD для %s" #, fuzzy #~ msgid "Service_doc: %s" #~ msgstr "Файл &DOC:" #, fuzzy #~ msgid "SOAP Fault: %s" #~ msgstr "Получена ошибка SOAP" #~ msgid "Proxy successfully verified." #~ msgstr "Доверенность подтверждена." #~ msgid "Proxy not valid. Job submission aborted. Please run 'arcproxy'!" #~ msgstr "" #~ "Доверенность недействительна. Засылка задачи оборвана. Пожалуйста, " #~ "запустите 'arcproxy'!" #~ msgid "" #~ "Cannot find CA certificates directory. Please specify the location to the " #~ "directory in the client configuration file." #~ msgstr "" #~ "Не удалось найти каталог с открытыми ключами сертификационных агентств. " #~ "Пожалуйста, введите расположение этого каталога в файл настроек клиента." #~ msgid "Local user does not match user of DTR %s" #~ msgstr "Локальный пользователь не соответствует пользователю DTR %s" #~ msgid "" #~ "No services specified. Please specify a cluster or index (-c or -g " #~ "options, see arcsync -h) or set the \"defaultservices\" attribute in the " #~ "client configuration." #~ msgstr "" #~ "Не задано ни одного назначения. Пожалуйста, задайте значение аттрибута " #~ "\"defaultservices\" в файле настроек клиента, либо укажите явным образом " #~ "ресурс или каталог ресурсов (опции -c или -g, см. arcsync -h)" #~ msgid "Failed to read PEM from file %s" #~ msgstr "Не удалось прочесть PEM из файла %s" #~ msgid "" #~ "Failed to read private key from file %s - probably no delegation was done" #~ msgstr "" #~ "Не удалось прочитать файл личного ключа из файла %s - вероятно, не было " #~ "делегирования" #~ msgid "Failed in SSL (sk_X509_new_null)" #~ msgstr "Сбой в SSL (sk_X509_new_null)" #~ msgid "Failed in SSL (sk_X509_insert)" #~ msgstr "Сбой в SSL (sk_X509_insert)" #~ msgid "Error: no VOMS extension found" #~ msgstr "Ошибка: не найдено расширений VOMS" #~ msgid "Shutting down grid-manager thread" #~ msgstr "Прерывается поток Грид-менеджера" #~ msgid "Requirement satisfied. %s %s %s." #~ msgstr "Требование удовлетворено. %s %s %s." #~ msgid "Requirement NOT satisfied. %s %s %s." #~ msgstr "Требование НЕ удовлетворено. %s %s %s." #~ msgid "End of list reached requirement not met." #~ msgstr "Достигнут конец списка, требования не удовлетворены" #~ msgid "Can't stat file: %s" #~ msgstr "Невозможно получить статус файла: %s" #~ msgid "File is not accessible: %s - %s" #~ msgstr "Файл недоступен: %s - %s" #, fuzzy #~ msgid "delete_ftp: globus_ftp_client_delete timeout" #~ msgstr "check_ftp: сбой в globus_ftp_client_size" #~ msgid "Transfer FAILED: %s - %s" #~ msgstr "Передача НЕ УДАЛАСЬ: %s - %s" #, fuzzy #~ msgid "" #~ "Cannot find the path of the key file, please setup environment " #~ "X509_USER_KEY, or keypath in a configuration file" #~ msgstr "" #~ "Не удалось найти закрытый ключ пользователя. Пожалуйста, задайте " #~ "переменную среды X509_USER_KEY, или значение keypath в файле конфигурации" #, fuzzy #~ msgid "" #~ "Cannot find file at %s for getting the key. Please make sure this file " #~ "exists." #~ msgstr "" #~ "Не удалось найти файл по адресу %s, содержащий доверенность. Пожалуйста, " #~ "убедитесь, что файл существует." #, fuzzy #~ msgid "[ARCJSDLParser] Failed to create parser context" #~ msgstr "Не удалось зарезервировать память для контекста анализатора" #, fuzzy #~ msgid "[ARCJSDLParser] Validating error" #~ msgstr "%s: ошибка записи файла '%s': %s\n" #~ msgid "Requirements not satisfied." #~ msgstr "Требования не удовлетворены." #, fuzzy #~ msgid "Mismatching url in file %s: %s Expected %s" #~ msgstr "пропущено имя файла в URL" #, fuzzy #~ msgid "Bad separator in file %s: %s" #~ msgstr "неверная строка в файле-каркасе" #, fuzzy #~ msgid "Bad value of expiry time in %s: %s" #~ msgstr "Единица времени определения периода устаревания." #, fuzzy #~ msgid "Illegal testjob-id given" #~ msgstr "Задан недопустимый номер тестовой задачи" #~ msgid "Failed to terminate LCMAPS - has to keep library loaded" #~ msgstr "" #~ "Не удалось прервать LCMAPS - придётся оставить библиотеку подгруженой" #~ msgid "VOMS config: vo: %s" #~ msgstr "Настройки VOMS: ВО: %s" #~ msgid "VOMS config: group: %s" #~ msgstr "Настройки VOMS: группа: %s" #~ msgid "VOMS config: role: %s" #~ msgstr "Настройки VOMS: роль: %s" #~ msgid "VOMS config: capabilities: %s" #~ msgstr "Настройки VOMS: возможности: %s" #, fuzzy #~ msgid "VOMS matched" #~ msgstr "Подходящие подгруппы:" #~ msgid "Failed to terminate LCAS - has to keep library loaded" #~ msgstr "Не удалось прервать LCAS - придётся оставить библиотеку подгруженой" #~ msgid "Disconnect: Failed quitting: %s" #~ msgstr "Отключение: Не удалось выйти: %s" #~ msgid "Failed to close connection 1" #~ msgstr "Не удалось закрыть соединение 1" #~ msgid "Failed to close connection 2" #~ msgstr "Не удалось закрыть соединение 2" #~ msgid "Failed to close connection 3" #~ msgstr "Не удалось закрыть соединение 3" #~ msgid "Using cached configuration: %s" #~ msgstr "Используются сохранённые настройки: %s" #~ msgid "Reading configuration file: %s" #~ msgstr "Чтение файла настроек: %s" #, fuzzy #~ msgid "subject: %s" #~ msgstr "Тема:" #~ msgid "Out of memory" #~ msgstr "Мало памяти" #~ msgid "out of memory" #~ msgstr "мало памяти" #, fuzzy #~ msgid "Error reading valid and existing meta file %s: %s" #~ msgstr "" #~ "\n" #~ "%s: ошибка чтения входного файла '%s': %s\n" #, fuzzy #~ msgid "Error listing dir %s: %s" #~ msgstr "Ошибка перечисления каталогов: %s\n" #, fuzzy #~ msgid "Error reading srm info file %s:%s" #~ msgstr "%s: ошибка чтения вспомогательного файла '%s': %s\n" #, fuzzy #~ msgid "Error creating srm info file %s" #~ msgstr "%s: ошибка закрытия вспомогательного файла '%s': %s\n" #, fuzzy #~ msgid "DTR %s: Cache processing successful" #~ msgstr "Задача успешно возобновлена" #, fuzzy #~ msgid "job_id url destination" #~ msgstr "Неверный URL цели." #, fuzzy #~ msgid "link the cache file" #~ msgstr "Обновление кэш-файла" #, fuzzy #~ msgid "copy the cache file" #~ msgstr "Обновление кэш-файла" #~ msgid "file is executable" #~ msgstr "файл является исполняемым файлом" #, fuzzy #~ msgid "uid of destination owner" #~ msgstr "Указать владельца схемы" #, fuzzy #~ msgid "gid of destination owner" #~ msgstr "Указать владельца схемы" #, fuzzy #~ msgid "One of -l and -c must be specified" #~ msgstr "должно быть одним из: C, S, E, P, или пустым" #~ msgid "No configuration specified" #~ msgstr "Файл настроек не указан" #, fuzzy #~ msgid "Error linking/copying cache file" #~ msgstr "Ошибка копирования временного почтового файла: %s" #, fuzzy #~ msgid "Adding %s service %s " #~ msgstr "Ошибка при добавлении службы. %s" #, fuzzy #~ msgid "" #~ "Can not access CA certificate directory: %s. The certificates will not be " #~ "verified" #~ msgstr "Невозможно открыть файл сертификата: %s (%s)" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a ARC GM-powered resource is not " #~ "supported." #~ msgstr "-mhard-float не поддерживается" #~ msgid "Using job list file %s" #~ msgstr "Используется список задач из файла %s" #, fuzzy #~ msgid "Job not found in the job list: %s" #~ msgstr "Задача %s не обнаружена в списке задач." #~ msgid "Failed to use channel stdout" #~ msgstr "Сбой использования канала stdout" #~ msgid "" #~ "Cannot find any proxy. Please specify the path to the proxy file in the " #~ "client configuration file." #~ msgstr "" #~ "Не удалось найти доверенность пользователя. Пожалуйста, введите " #~ "расположение доверенности в файл конфигурации клиента." #, fuzzy #~ msgid "Error allocating memory for info file %s:%s" #~ msgstr "%s: ошибка чтения вспомогательного файла '%s': %s\n" #, fuzzy #~ msgid "Error opening srm info file for writing %s:%s" #~ msgstr "Ошибка: Невозможно открыть файл %s для записи.\n" #, fuzzy #~ msgid "Error allocating memory for srm info file %s:%s" #~ msgstr "%s: ошибка чтения вспомогательного файла '%s': %s\n" #, fuzzy #~ msgid "Error opening srm info file %s:%s" #~ msgstr "%s: ошибка закрытия вспомогательного файла '%s': %s\n" #~ msgid "" #~ "Argument to -g has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Аргумент опции -g задаётся по форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Аргумент опции -c задаётся по форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Аргумент опции -c задаётся по форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #, fuzzy #~ msgid "Getting job descriptions from local job file" #~ msgstr "Удалить передачу из списка." #, fuzzy #~ msgid "Disregarding job descriptions from local job file" #~ msgstr "Удалить передачу из списка." #, fuzzy #~ msgid "Valid job description found for: %s" #~ msgstr "CreateActivity: Описание задачи не найдено" #, fuzzy #~ msgid "Invalid job description found for: %s" #~ msgstr "CreateActivity: Описание задачи не найдено" #, fuzzy #~ msgid "Job description for %s retrieved locally" #~ msgstr "описание засылаемой задачи: %s" #, fuzzy #~ msgid "Job %s can not be resubmitted" #~ msgstr "Задача не может быть перезапущена" #~ msgid "Job description for %s could not be retrieved locally" #~ msgstr "Описание задачи %s не может быть восстановлено локально" #~ msgid "file where the jobs will be stored" #~ msgstr "Файл для записи ярлыков запущенных задач" #, fuzzy #~ msgid "Incompatible RSL attributes" #~ msgstr "Правка параметров слоя" #, fuzzy #~ msgid "job.Resources.CandidateTarget.size() = %d" #~ msgstr "Размер списка недавно использовавшихся ресурсов" #, fuzzy #~ msgid "Error creating tmp file %s for remote lock with mkstemp(): %s" #~ msgstr "Ошибка создания временного файла скрипта" #, fuzzy #~ msgid "Error writing to tmp lock file for remote lock %s: %s" #~ msgstr "Ожидание блокировки файла" #, fuzzy #~ msgid "Failed to change owner of destination dir to %i: %s" #~ msgstr "" #~ "Не удалось сменить текущий каталог на административный каталог %sinfo" #~ msgid " EndPointURL: %s" #~ msgstr "URL конечной точки: %s" #~ msgid " QueueName: %s" #~ msgstr "Имя очереди: %s" #, fuzzy #~ msgid " QueueName (ignored): %s" #~ msgstr "Игнорируется (устаревшая)" #~ msgid " Target.Mandatory: true" #~ msgstr " Target.Mandatory: true" #~ msgid " DownloadToCache: true" #~ msgstr " DownloadToCache: true" #~ msgid " Directory element:" #~ msgstr " Элемент Directory:" #, fuzzy #~ msgid "URL of ExecutionTarget is not properly defined" #~ msgstr "" #~ "Сравнение, назначение для исполнения: %s, состояние здоровья не " #~ "определено" #, fuzzy #~ msgid "URL of ExecutionTarget is not properly defined: %s." #~ msgstr "" #~ "Сравнение, назначение для исполнения: %s, состояние здоровья не " #~ "определено" #~ msgid "Filetransfer created" #~ msgstr "Передача файла начата" #~ msgid "download" #~ msgstr "передача" #~ msgid "Cannot accept destination as URL" #~ msgstr "Назначение должно быть URL" #~ msgid "Stage in" #~ msgstr "Подгрузка файлов" #~ msgid "Stage out" #~ msgstr "Выгрузка файлов" #~ msgid "Cannot collect resource information" #~ msgstr "Не удалось собрать информацию о ресурсе" #~ msgid "No response" #~ msgstr "Нет ответа" #~ msgid "Cannot find job id" #~ msgstr "Не удалось найти идентификатор задачи" #~ msgid "Cannot find scheduler endpoint" #~ msgstr "Не удалось найти конечную точку планировщика" #~ msgid "Status: %s %d" #~ msgstr "Состояние: %s %d" #~ msgid "Process job: %s" #~ msgstr "Обработка задачи: %s" #~ msgid "No scheduler configured" #~ msgstr "Ни одного планировщика не настроено" #~ msgid "Do Request: %s" #~ msgstr "Исполнение запроса: %s" #~ msgid "No free CPU slot" #~ msgstr "Отсутствуют доступные свободные процессоры" #~ msgid "Per: %d" #~ msgstr "Период: %d" #~ msgid "Report status" #~ msgstr "Отчёт о состоянии" #~ msgid "%s reported %s" #~ msgstr "%s сообщает %s" #~ msgid "%s reported" #~ msgstr "%s сообщает" #~ msgid "%s job reported finished" #~ msgstr "Задача %s закончена" #~ msgid "Get activity status changes" #~ msgstr "Получение информации об изменении состояния" #~ msgid "%s new status: %s" #~ msgstr "Новое состояние %s: %s" #~ msgid "Killing %s" #~ msgstr "Прерывается %s" #~ msgid "pre cleanup %s %d" #~ msgstr "предварительная очистка %s %d" #~ msgid "cleanup %s" #~ msgstr "очистка %s" #~ msgid "cleanup 2 %s" #~ msgstr "очистка 2 %s" #~ msgid "PaulService shutdown" #~ msgstr "Выключение PaulService" #~ msgid "Terminate job %s" #~ msgstr "Terminate job %s" #~ msgid "** %s" #~ msgstr "** %s" #~ msgid "Cannot allocate output raw buffer" #~ msgstr "Не удалось зарезервировать буфер вывода" #~ msgid "Permission denied from %s host" #~ msgstr "Сервер %s host отказал в доступе" #~ msgid "Start process" #~ msgstr "Начать процесс" #~ msgid "Invalid JSDL! Missing application section" #~ msgstr "Недопустимый формат JSDL! Отсутствует раздел \"application\"." #~ msgid "%s set exception" #~ msgstr "%s присвоена ошибка" #~ msgid "Empty executable" #~ msgstr "Не задан исполняемый файл" #~ msgid "Windows cmd path: %s" #~ msgstr "Путь поиска команд Windows: %s" #~ msgid "Cmd: %s" #~ msgstr "Команда: %s" #~ msgid "StdOut: %s" #~ msgstr "Стандартный выход: %s" #~ msgid "StdErr: %s" #~ msgstr "Стандартная ошибка: %s" #~ msgid "return from run" #~ msgstr "возврат после исполнения" #~ msgid "Error during the application run" #~ msgstr "Ошибка при исполнении приложения" #~ msgid "Exception: %s" #~ msgstr "Ошибка: %s" #~ msgid "SpawnError" #~ msgstr "SpawnError" #~ msgid "Status request failed" #~ msgstr "Запрос о состоянии удался" #~ msgid "Status request succeed" #~ msgstr "Запрос о состоянии удался" #~ msgid "The response to a status request was not a SOAP message" #~ msgstr "Ответ на запрос о состоянии не является сообщением SOAP" #~ msgid "Service status request failed" #~ msgstr "Ошибка запроса о состоянии службы" #~ msgid "Service status request succeed" #~ msgstr "Успешный запрос о состоянии службы" #~ msgid "Job termination request failed" #~ msgstr "ошибка запроса об обрыве исполнения задачи" #~ msgid "Job termination request succeed" #~ msgstr "успешный запрос об обрыве исполнения задачи" #~ msgid "Job cleaning request succeed" #~ msgstr "успешный запрос об удалении результатов работы задачи" #~ msgid "file_name: " #~ msgstr "Название файла:" #~ msgid "Jsdl: " #~ msgstr "JSDL: " #~ msgid "The submited JSDL file's name: " #~ msgstr "Имя заданного файла JSDL: " #~ msgid "Jod Id: " #~ msgstr "Идентификатор задачи:" #~ msgid "sourcefile" #~ msgstr "файл источника" #~ msgid "STATUS: " #~ msgstr "СОСТОЯНИЕ:" #~ msgid "Info from the ISIS" #~ msgstr "Информация из ISIS" #~ msgid "job(s) submit" #~ msgstr "запуск задач(и)" #~ msgid "Wrong Job submitting! URL: " #~ msgstr "Неверный запуск задачи! URL: " #~ msgid " Achitecture: " #~ msgstr " Архитектура: " #~ msgid "Result(s) download" #~ msgstr "Загрузка результатов:" #~ msgid "Download Place: " #~ msgstr "Расположение загруженных файлов:" #~ msgid "Download cycle: start" #~ msgstr "Цикл загрузки: начало" #~ msgid "Current Arhitecture: " #~ msgstr "Текущая архитектура:" #~ msgid "Empty Job ID. Go to the next Job ID." #~ msgstr "Пустой ярлык задачи. Переход к следующему ярлыку." #~ msgid "Download url: " #~ msgstr "URL для загрузки:" #~ msgid "Download path: " #~ msgstr "Путь для загрузки:" #~ msgid "Download cycle: end" #~ msgstr "Цикл загрузки: конец" #~ msgid "Finished the compile: " #~ msgstr "Компиляция окончена:" #~ msgid " The SOAP message send and return" #~ msgstr " Отправленное и полученное сообщение SOAP" #~ msgid "Can not create output SOAP payload for delegation service" #~ msgstr "Не удалось создать выходную нагрузку SOAP для службы делегирования" #~ msgid "Can not store proxy certificate" #~ msgstr "Не удалось сохранить доверенность" #~ msgid "" #~ "Delegated credentials:\n" #~ " %s" #~ msgstr "" #~ "Делегированные параметры доступа:\n" #~ " %s" #~ msgid "Can not find the corresponding credential from credential cache" #~ msgstr "Не удалось найти соответствующие параметры доступа в кэше" #~ msgid "Signing proxy on delegation service failed" #~ msgstr "Не удалось заверить доверенность на службе делегации" #~ msgid "Cannot create SOAP fault" #~ msgstr "Невозможно сформулировать ошибку SOAP" #~ msgid "GetActivityStatuses: job %s not found" #~ msgstr "GetActivityStatuses: задача %s не обнаружена" #~ msgid "ChangeActivityStatuses: job %s not found" #~ msgstr "ChangeActivityStatuses: задача %s не обнаружена" #~ msgid "GetActivityDocuments: job %s not found" #~ msgstr "GetActivityDocuments: задача %s не обнаружена" #~ msgid "GetActivityStatuses: job %s" #~ msgstr "GetActivityStatuses: задача %s" #~ msgid "doSched" #~ msgstr "В doSched..." #~ msgid "jobq checkpoint done" #~ msgstr "Контрольная точка jobq пройдена" #~ msgid "" #~ "Count of jobs: %i Count of resources: %i Scheduler period: %i Endpoint: %" #~ "s DBPath: %s" #~ msgstr "" #~ "Количество задач: %i Количество ресурсов: %i Период планировщика: %i " #~ "Конечная точка: %s DBPath: %s" #~ msgid "NEW job: %s" #~ msgstr "НОВАЯ задача: %s" #~ msgid "A-REX ID: %s" #~ msgstr "Идентификатор A-REX: %s" #~ msgid "Sched job ID: %s NOT SUBMITTED" #~ msgstr "Sched задача: %s NOT SUBMITTED" #~ msgid "%s set killed" #~ msgstr "%s оборвано" #~ msgid "%s remove from queue" #~ msgstr "%s удалено из очереди" #~ msgid "Sched job ID: %s (A-REX job ID is empty)" #~ msgstr "Sched задача: %s (пустой ярлык задачи A-REX)" #~ msgid "Job RESCHEDULE: %s" #~ msgstr "задача перепланирована: %s" #~ msgid "JobID: %s state: %s" #~ msgstr "JobID: %s состояние: %s" #~ msgid "doReschedule" #~ msgstr "В doReschedule..." #~ msgid "Rescheduled job: %s" #~ msgstr "Rescheduled job: %s" #~ msgid "Error during database open: %s" #~ msgstr "Ошибка при открытии базы данных: %s" #~ msgid "Assigned new informational document" #~ msgstr "Добавлен новый информационный документ" #~ msgid "Failed to create informational document" #~ msgstr "Сбой при создании информационного документа" #~ msgid "%d <> %d" #~ msgstr "%d <> %d" #~ msgid "Cannot get resource ID" #~ msgstr "Невозможно получить идентификатор ресурса" #~ msgid "invalid job id" #~ msgstr "неверный ярлык задачи" #~ msgid "Invalid status report" #~ msgstr "Недопустимые данные о состоянии" #~ msgid "%s reports job status of %s but it is running on %s" #~ msgstr "%s отчитывается о состоянии задачи %s, но она запущена на %s" #~ msgid "%s try to status change: %s->%s" #~ msgstr "%s пытается изменить состояние: %s->%s" #~ msgid "refresh: Cannot abort transaction: %s" #~ msgstr "В refresh: Невозможно прервать передачу: %s" #~ msgid "refresh: Error during transaction: %s" #~ msgstr "обновление: Ошибка при транзакции: %s" #~ msgid "operator[]: Cannot abort transaction: %s" #~ msgstr "operator[]: Не удалось прервать передачу: %s" #~ msgid "remove: Cannot abort transaction: %s" #~ msgstr "удаление: Невозможно оборвать транзакцию: %s" #~ msgid "Job type: single" #~ msgstr "Тип задачи: одиночная" #~ msgid "Job type: collection" #~ msgstr "Тип задачи: набор" #~ msgid "Job type: parallel" #~ msgstr "Тип задачи: параллельная" #~ msgid "Job type: workflownode" #~ msgstr "Тип задачи: узел поточного задания" #, fuzzy #~ msgid "Failed setting signal handler for SIGHUP" #~ msgstr "Не удалось задать владельца файла: %s" #, fuzzy #~ msgid "Failed setting signal handler for SIGCHLD" #~ msgstr "Ошибка: не удалось установить обработчик SIGCHLD" #, fuzzy #~ msgid "Failed setting signal handler for SIGTERM" #~ msgstr "Ошибка: не удалось установить обработчик SIGTERM" #, fuzzy #~ msgid "Failed setting signal handler for SIGINT" #~ msgstr "Не удалось задать владельца файла: %s" #, fuzzy #~ msgid "Failed to create thread for handling signals" #~ msgstr "Не удалось создать контекст GSI: %s" #, fuzzy #~ msgid "Failure creating slot for child process." #~ msgstr "%s: Сбой создания области памяти для дочернего процесса" #, fuzzy #~ msgid "Failure forking child process." #~ msgstr "%s: Сбой при запуске дочернего процесса" #, fuzzy #~ msgid "Timeout waiting for child to finish" #~ msgstr "%s: Сбой ожидания окончания дочернего процесса" #, fuzzy #~ msgid " Failed to start external program: %s" #~ msgstr "Сбой просмотра: не удалось запустить программу %1." #, fuzzy #~ msgid "Failure opening pipes." #~ msgstr "Поток %d, сбой перенаправления потоков" #~ msgid "TargetRetriverCREAM initialized with %s service url: %s" #~ msgstr "TargetRetriverCREAM запущен с адресом службы %s: %s" #~ msgid "TargetRetriverARC0 initialized with %s service url: %s" #~ msgstr "TargetRetriverARC0 запущен с URL службы %s:: %s" #~ msgid "TargetRetriverUNICORE initialized with %s service url: %s" #~ msgstr "TargetRetriverUNICORE запущен с URL службы %sl: %s" #~ msgid "TargetRetriverARC1 initialized with %s service url: %s" #~ msgstr "TargetRetriverARC1 запущен с URL службы %s:: %s" #, fuzzy #~ msgid "Failed locating delegation credentials in chain configuration" #~ msgstr "Не удалось обнаружить доверенности в конфигурации клиента" #, fuzzy #~ msgid "Found malformed job state string: %s" #~ msgstr "Не удалось получить информацию о задаче: %s" #, fuzzy #~ msgid "Failed to set PEPd URL: '%s'" #~ msgstr "не удалось установить скан-код %x коду %d\n" #, fuzzy #~ msgid "Failed to create XACML request\n" #~ msgstr "Запрос %s к %s не выполнен, получен ответ: %s" #, fuzzy #~ msgid "Failed to authorize XACML request: %s\n" #~ msgstr "Запрос %s к %s не выполнен, получен ответ: %s" #, fuzzy #~ msgid "Response is null" #~ msgstr "Ключ имеет значение NULL" #, fuzzy #~ msgid "%s is not authorized" #~ msgstr "%s не является объектом" #, fuzzy #~ msgid "Failed to create soft link: %s" #~ msgstr "невозможно создать жесткую ссылку %s на %s" #, fuzzy #~ msgid " XRSL_elements: [%s], %s" #~ msgstr "Дополнительные элементы" #, fuzzy #~ msgid " JDL_elements: [%s], %s" #~ msgstr "Дополнительные элементы" #~ msgid "Try to parse as XRSL" #~ msgstr "Попытка синтаксического разбора как XRSL" #~ msgid "Try to parse as JDL" #~ msgstr "Попытка синтаксического разбора как JDL" #~ msgid "Try to parse as ARCJSDL" #~ msgstr "Попытка синтаксического разбора как ARC JSDL" #~ msgid "Generate JDL output" #~ msgstr "Создание JDL на выходе" #~ msgid "Generating %s output was unsuccessful" #~ msgstr "Создание %s на выходе не удалось" #~ msgid "Generate XRSL output" #~ msgstr "Создание XRSL на выходе" #~ msgid "Generate ARCJSDL output" #~ msgstr "Созадние ARC JSDL на выходе" #~ msgid "Unknown output format: %s" #~ msgstr "Неизвестный формат вывода: %s" #~ msgid " ExecutionCE: %s" #~ msgstr "Исполняющий вычислительный элемент: %s" #, fuzzy #~ msgid "Cannot parse the specified %s service (%s)" #~ msgstr "Не удалось найти класс сервиса" #, fuzzy #~ msgid "The specified %s service (%s) is not a valid URL" #~ msgstr "Заданная папка некорректна" #~ msgid "" #~ "cnd:\n" #~ "%s is a %s" #~ msgstr "" #~ "cnd:\n" #~ "%s is a %s" #, fuzzy #~ msgid "globus_io_cancel failed: %s" #~ msgstr "%s: Не удалось выполнить процедуру прерывания" #, fuzzy #~ msgid "Connect to %s failed: %s" #~ msgstr "Не удалось установить соединение с %s" #, fuzzy #~ msgid "clear_input: %s" #~ msgstr "Очистить ввод" #~ msgid "Connection closed" #~ msgstr "Подключение закрыто" #, fuzzy #~ msgid "Globus error (read): %s" #~ msgstr "%s: ошибка чтения на %s\n" #, fuzzy #~ msgid "*** Server response: %s" #~ msgstr "&Ответ сервера:" #, fuzzy #~ msgid "Globus error (write): %s" #~ msgstr "" #~ "Ошибка:\n" #~ "\n" #~ "Не удалось записать %s\n" #, fuzzy #~ msgid "*** Client request: %s" #~ msgstr "Запрос удался!!!" #~ msgid "Authenticating: %s" #~ msgstr "Проверка подлинности: %s" #, fuzzy #~ msgid "Connection to server failed: %s" #~ msgstr "Не удалось прервать связь с сервером" #~ msgid "Failed to read SSL token during authentication" #~ msgstr "Не удалось прочесть токен SSL в процессе проверки подлинности" #, fuzzy #~ msgid "Failed wrapping GSI token: %s" #~ msgstr "Не удалось создать контекст GSI: %s" #, fuzzy #~ msgid "Failed unwrapping GSI token: %s" #~ msgstr "Не удалось создать контекст GSI: %s" #, fuzzy #~ msgid "Urecognized SSL token received" #~ msgstr "получен неверный ответ на согласование по SSL: %c\n" #, fuzzy #~ msgid "Timeout while sending SOAP request" #~ msgstr "Создание и отправка запроса SOAP" #~ msgid "Error sending data to server" #~ msgstr "Ошибка передачи данных на сервер." #, fuzzy #~ msgid "read_response_header: line: %s" #~ msgstr "Слишком длинный заголовок" #, fuzzy #~ msgid "Timeout while reading response header" #~ msgstr "" #~ "Ошибка при чтении заголовка файла:\n" #~ " %1" #, fuzzy #~ msgid "Error while reading response header" #~ msgstr "" #~ "Ошибка при чтении заголовка файла:\n" #~ " %1" #, fuzzy #~ msgid "read_response_header: header finished" #~ msgstr "невозможно считать заголовок архива" #, fuzzy #~ msgid "skip_response_entity" #~ msgstr "Другая внешняя сущность" #, fuzzy #~ msgid "skip_response_entity: no entity" #~ msgstr "сущность не имеет атрибута %s" #~ msgid "Not connected" #~ msgstr "Нет подключения" #, fuzzy #~ msgid "Timeout sending header" #~ msgstr "Шрифт колонтитулов:" #~ msgid "Early response from server" #~ msgstr "Преждевременный ответ сервера" #~ msgid "No response from server received" #~ msgstr "Ответ сервера не получен" #~ msgid "Failed to send body" #~ msgstr "Не удалось отправить тело" #, fuzzy #~ msgid "Failure while receiving entity" #~ msgstr "ошибка при записи данных для категории`%s'" #, fuzzy #~ msgid "Timeout while sending header" #~ msgstr "ошибка при отправке %(message)s ( %(error)s )" #, fuzzy #~ msgid "GET: connection to be closed" #~ msgstr "Не удалось принудительно прервать связь с" #, fuzzy #~ msgid "GET callback returned error" #~ msgstr "Ошибка печати: команда «%s» возвратила %d\n" #, fuzzy #~ msgid "Failed while reading response content" #~ msgstr "ошибка при чтении данных ленты.\n" #, fuzzy #~ msgid "Timeout while reading response content" #~ msgstr "Создание и отправка запроса" #, fuzzy #~ msgid "Error while reading response content" #~ msgstr "Ошибка при чтении %d-ой из %d точек: %s\n" #, fuzzy #~ msgid "GET: calling callback: size: %u" #~ msgstr "Невозможно получить размер диска" #~ msgid "SOAP request failed (%s)" #~ msgstr "Ошибка запроса SOAP (%s)" #, fuzzy #~ msgid "SOAP request failed (srmMkdir)" #~ msgstr "Ошибка запроса SOAP (copy)" #~ msgid "SOAP request failed (get)" #~ msgstr "Ошибка запроса SOAP (get)" #~ msgid "SOAP request failed (getRequestStatus)" #~ msgstr "Ошибка запроса SOAP (getRequestStatus)" #~ msgid "SOAP request failed (put)" #~ msgstr "Ошибка запроса SOAP (put)" #~ msgid "SOAP request failed (copy)" #~ msgstr "Ошибка запроса SOAP (copy)" #~ msgid "SOAP request failed (setFileStatus)" #~ msgstr "Ошибка запроса SOAP (setFileStatus)" #~ msgid "SOAP request failed (getFileMetaData)" #~ msgstr "Ошибка запроса SOAP (getFileMetaData)" #, fuzzy #~ msgid "Response(%i): %s" #~ msgstr "Ответ" #~ msgid "Submission to %s failed, trying next target" #~ msgstr "Сбой засылки задачи на %s, проверка следующего назначения" #~ msgid "" #~ "path to local cache (use to put file into cache). The X509_USER_PROXY and " #~ "X509_CERT_DIR environment variables must be set correctly." #~ msgstr "" #~ "путь к локальному кэшу (используйте для сохранения файла в кэше). " #~ "Убедитесь, что переменные среды X509_USER_PROXY и X509_CERT_DIR заданы " #~ "правильно." #, fuzzy #~ msgid "" #~ "The config: \n" #~ "%s \n" #~ msgstr "Конфигурация %1" #~ msgid "%s > %s => false: \\%s contains non numbers in the version part." #~ msgstr "" #~ "%s > %s => неверно: \\%s содержит нецифровые символы в номере версии." #, fuzzy #~ msgid "Can not locate CA certificate directory." #~ msgstr "не удалось прочитать файл корневых сертификатов \"%s\": %s\n" #~ msgid "Client chain configuration: %s" #~ msgstr "Конфигурация цепочкиприжатые клиента: %s" #~ msgid "Cannot import arc module" #~ msgstr "Не удалось импортировать модуль ARC" #, fuzzy #~ msgid "Cannot find arc XMLNode class" #~ msgstr "Класс ARC XMLNode не найден" #, fuzzy #~ msgid "Cannot stat local executable input file %s" #~ msgstr "Невозможно прочесть локальный список задач" #~ msgid "The parsing of the job description was unsuccessful" #~ msgstr "Синтаксическая разборка описания задачи не удалась" #, fuzzy #~ msgid "XRSL parsing problem" #~ msgstr "&Выводить информацию о проблемах" #, fuzzy #~ msgid "Cannot find arc UserConfig class" #~ msgstr "нет файла для класса %s" #, fuzzy #~ msgid "Encrypted saml assertion: %s" #~ msgstr "Разблокирование зашифрованных данных" #, fuzzy #~ msgid "Failed to create/find directory %s : %s" #~ msgstr "Ошибка создания каталога %s/.gnome." #~ msgid "Failed to create/find directory %s, (%d)" #~ msgstr "Ошибка создания/обнаружения каталога %s, (%d)" #, fuzzy #~ msgid "start_reading_srm: looking for metadata: %s" #~ msgstr "Поиск пакетов Gentoo: " #, fuzzy #~ msgid "start_reading_srm: obtained checksum: %s" #~ msgstr "контрольная сумма %x, а должна быть %x" #~ msgid "explicity select or reject an index server" #~ msgstr "явным образом выбрать или отсеять указанный каталог ресурсов" #~ msgid "[job ...]\n" #~ msgstr "[задача ...]\n" #, fuzzy #~ msgid "Cannot find vomses at %s, %s, %s, %s and %s" #~ msgstr "Не удаётся найти устройство диска %1 с плотностью %2." #~ msgid "IdP name" #~ msgstr "Имя IdP" #~ msgid "Configured username is invalid %s" #~ msgstr "Настроенное имя пользователя недопустимо %s" #~ msgid "%s: State FINISHING: starting child: %s" #~ msgstr "%s: состояние FINISHING: запуск дочернего процесса: %s" #~ msgid "%s: State: PREPARING: credentials probably expired (exit code 3)" #~ msgstr "" #~ "%s: состояние PREPARING: вероятно, истёк срок действия мандата (код " #~ "выхода 3)" #~ msgid "" #~ "%s: State: PREPARING: some error detected (exit code %i). Recover from " #~ "such type of errors is not supported yet." #~ msgstr "" #~ "%s: состояние PREPARING:обнаружена ошибка (код выхода %i). Восстановление " #~ "после такой ошибки пока не поддерживается." #~ msgid "url of myproxy server" #~ msgstr "URL сервера MyProxy" #~ msgid "Returned msg from myproxy server: %s" #~ msgstr "Сообщение сервера MyProxy: %s" #~ msgid "Myproxy server return failure msg" #~ msgstr "Сервер MyProxy сообщил об ошибке" #~ msgid "ARC_PLUGIN_PATH=%s" #~ msgstr "ARC_PLUGIN_PATH=%s" #~ msgid "Can not read key file: %s" #~ msgstr "Не удалось прочитать файл личного ключа: %s" #, fuzzy #~ msgid "StartReading: obtained size: %lli" #~ msgstr "Дисковый кэш, всего" #, fuzzy #~ msgid "Retrying with gsi protocol...\n" #~ msgstr "Проблема с подтверждением мандата" #, fuzzy #~ msgid "start_reading_ftp: failure" #~ msgstr "ftpfs: чтение каталога FTP %s... %s%s" #~ msgid "failed to send to %d of %s" #~ msgstr "не удалось отправить на %d %s" #~ msgid "%s: Plugin in state %s : %s" #~ msgstr "%s: Подключаемый модуль в состоянии %s : %s" #~ msgid "Will not use caching" #~ msgstr "Кэширование использоваться не будет" #~ msgid "Cannot clean up any cache files" #~ msgstr "Не удалось стереть кэшированые файлы" #, fuzzy #~ msgid "store job descriptions in local sandbox." #~ msgstr "Невозможно открыть файл с описанием задачи: %s" #, fuzzy #~ msgid "Failed to load service configuration form file %s" #~ msgstr "Не удалось загрузить конфигурацию сервиса" #, fuzzy #~ msgid "Contacting VOMS server (named %s): %s on port: %i" #~ msgstr "" #~ "Устанавливается связь с сервером VOMS (по имени %s): %s по порту: %s" #, fuzzy #~ msgid "Getting %s jobs" #~ msgstr "" #~ " -a, -all применить ко всем задачам пользователя" #, fuzzy #~ msgid "Killing %s jobs" #~ msgstr "" #~ " -a, -all применить ко всем задачам пользователя" #, fuzzy #~ msgid "Cleaning %s jobs" #~ msgstr "" #~ " -a, -all применить ко всем задачам пользователя" #, fuzzy #~ msgid "Cannot migrate to a %s cluster." #~ msgstr "Дисплей DMX на который происходит перемещение" #~ msgid "No valid jobdescription found for: %s" #~ msgstr "Не обнаружено допустимых описаний задачи: %s" #, fuzzy #~ msgid "Creating delegation failed" #~ msgstr "%s: Ошибка создания канала" #~ msgid "Job registration failed" #~ msgstr "Ошибка регистрации задачи" #~ msgid "Job starting failed" #~ msgstr "Ошибка запуска задачи" #~ msgid "Could not retrieve job information" #~ msgstr "Не удалось получить информацию о задаче" #~ msgid "Failed to clean job" #~ msgstr "Ошибка удаления задачи" #, fuzzy #~ msgid "The node %s has no %s element." #~ msgstr "Документ `%s' не имеет узла верхнего уровня <%s>\n" #, fuzzy #~ msgid "The response was not a SOAP message" #~ msgstr "" #~ "Содержимое письма не было принято.\n" #~ "%1" #~ msgid "Fetching job state" #~ msgstr "Извлекается состояние задачи" #, fuzzy #~ msgid "The status of the job (%s) could not be retrieved." #~ msgstr "Файл не может быть создан" #, fuzzy #~ msgid "The response to a service status request is Fault message: " #~ msgstr "Ответ на запрос о состоянии службы не является сообщением SOAP" #, fuzzy #~ msgid "There was an empty response to an index service query" #~ msgstr "Не поступил ответ на запрос о состоянии службы" #, fuzzy #~ msgid "The response of a index service query was not a SOAP message" #~ msgstr "Ответ на запрос о состоянии не является сообщением SOAP" #, fuzzy #~ msgid "Request failed, service returned: %s" #~ msgstr "Ошибка публикации службы" #, fuzzy #~ msgid "Migration returned failure: %s" #~ msgstr "ошибка выделения памяти" #, fuzzy #~ msgid "Migration failed, service returned: %s" #~ msgstr "Ошибка публикации службы" #, fuzzy #~ msgid "Job resuming failed" #~ msgstr "Не удалось послать задачу" #, fuzzy #~ msgid "Job resumed at state: %s" #~ msgstr "Неверное значение job-state!" #, fuzzy #~ msgid "Failed migrating job" #~ msgstr "Не удалось послать задачу" #~ msgid "Timer kicking" #~ msgstr "Запускается таймер" #~ msgid "Multiple " #~ msgstr "Множественные" #, fuzzy #~ msgid "Multiple timeout attributes in configuration file (%s)" #~ msgstr "Не удалось загрузить конфигурацию сервиса" #, fuzzy #~ msgid "Multiple brokername attributes in configuration file (%s)" #~ msgstr "Не удалось загрузить конфигурацию сервиса" #, fuzzy #~ msgid "Multiple bartender attributes in configuration file (%s)" #~ msgstr "Чтение файла конфигурации: %s" #, fuzzy #~ msgid "Multiple keysize attributes in configuration file (%s)" #~ msgstr "Чтение файла конфигурации: %s" #~ msgid "lasso_assertion_query_new() failed" #~ msgstr "сбой в lasso_assertion_query_new()" #~ msgid "lasso_assertion_query_init_request failed" #~ msgstr "сбой в lasso_assertion_query_init_request" #~ msgid "lasso_assertion_query_build_request_msg failed" #~ msgstr "сбой в lasso_assertion_query_build_request_msg" #~ msgid "assertionRequestBody shouldn't be NULL" #~ msgstr "assertionRequestBody не может быть NULL" #~ msgid "lasso_assertion_query_process_response_msg failed" #~ msgstr "сбой lasso_assertion_query_process_response_msg" #~ msgid "Configuration: Section: %s, Subsection: %s" #~ msgstr "Конфигурация: Раздел: %s, Подраздел: %s" #~ msgid "Configuration: LRMS: %s" #~ msgstr "Конфигурация: СУПО: %s" #~ msgid "Configuration: Queue: %s" #~ msgstr "Конфигурация: Очередь: %s" #~ msgid "process: CreateActivity" #~ msgstr "процесс: CreateActivity" #, fuzzy #~ msgid "Couldn't parse value \"%s\" of benchmark %s. Parse error: \"%s\"." #~ msgstr "Ошибка: невозможно обработать %1 как значение параметра.\n" #, fuzzy #~ msgid "Couldn't parse benchmark string: \"%s\"." #~ msgstr "Невозможно открыть файл проекта" #, fuzzy #~ msgid "lhs > rhs TRUE" #~ msgstr "Перечёркнутый" #, fuzzy #~ msgid "lhs < rhs TRUE" #~ msgstr "Перечёркнутый" #, fuzzy #~ msgid "Failed resolving aliases" #~ msgstr "создание алиасов словаря" #, fuzzy #~ msgid "Matchmaking, ExecutionTarget URL: %s " #~ msgstr "Неправильный URL: %1" #, fuzzy #~ msgid "Resolving alias: %s" #~ msgstr "&Изменить псевдоним..." #, fuzzy #~ msgid "Alias \"%s\" requested but not defined" #~ msgstr "метка %q+D определена, но не используется" #, fuzzy #~ msgid "Done resolving alias: %s" #~ msgstr "Синтаксический анализ сокращённого имени" #, fuzzy #~ msgid "Key is not a file: %s" #~ msgstr "%s: файл %s не является архивом\n" #, fuzzy #~ msgid "The specified configuration file (%s) is not a regular file" #~ msgstr "Файл \"%s\" не является обычным файлом или каталогом." #, fuzzy #~ msgid "Unable to load system configuration (%s)" #~ msgstr "Не удалось загрузить конфигурации клиента" #, fuzzy #~ msgid "XML user configuration (%s) loaded" #~ msgstr "Пакет конфигурации успешно загружен." #, fuzzy #~ msgid "INI user configuration (%s) loaded" #~ msgstr "Пакет конфигурации успешно загружен." #~ msgid "SSL_library_init failed" #~ msgstr "Сбой в SSL_library_init" #, fuzzy #~ msgid "timeout in seconds (default " #~ msgstr "время ожидания в секундах (по умолчанию 20)" #~ msgid "select broker method (Random (default), QueueBalance, or custom)" #~ msgstr "" #~ "выбрать алгоритм планировщика (Random (по умолчанию), QueueBalance, или " #~ "специальный)" #~ msgid "ERROR" #~ msgstr "ОШИБКА" #~ msgid "DMCs are loaded" #~ msgstr "Подгружены компоненты цепи сообщений" #~ msgid " And now I am there" #~ msgstr "Вот мы и здесь" #~ msgid "wrong option in cacheregistration" #~ msgstr "неверная опция в cacheregistration" #, fuzzy #~ msgid "Failed to allocate SSL locks" #~ msgstr "Невозможно выделить память для изображения:" #, fuzzy #~ msgid "Current transfer FAILED" #~ msgstr "Сбой переноса файла." #, fuzzy #~ msgid "Creating and sending a service an index service query" #~ msgstr "Создание и отправка запроса о состоянии службы" #, fuzzy #~ msgid "Creating client chain for UNICORE BES service" #~ msgstr "Создаётся интерфейс клиента" #, fuzzy #~ msgid "Request xml structure is: %s" #~ msgstr "Имя файла \"%s\" формата XML на указывает на каталог" #, fuzzy #~ msgid "UnAuthorized from count.pdp!!!" #~ msgstr "Защитить компьютер от несанкционированного использования" #~ msgid "Plugins element has no Name defined" #~ msgstr "В элементе Plugins не задано имя" #, fuzzy #~ msgid "DataManager has no name attribute defined" #~ msgstr "Не задан ни один параметр с именем «%s»" #, fuzzy #~ msgid "DataManager %s(%s) could not be created" #~ msgstr "Файл не может быть создан" #, fuzzy #~ msgid "Loaded DataManager %s(%s)" #~ msgstr "Страница загружена." #, fuzzy #~ msgid "ArcClientComponent has no name attribute defined" #~ msgstr "Не задан ни один параметр с именем «%s»" #, fuzzy #~ msgid "ArcClientComponent %s(%s) could not be created" #~ msgstr "Файл не может быть создан" #, fuzzy #~ msgid "Loaded ArcClientComponent %s(%s)" #~ msgstr "Страница загружена." #, fuzzy #~ msgid "Adding job info to sandbox" #~ msgstr "" #~ "INFO: Ожидаю окончания выполнения задачи…\n" #, fuzzy #~ msgid "Request failed: Error1" #~ msgstr "Ошибка запроса DDE poke" #, fuzzy #~ msgid "Request failed: Error2" #~ msgstr "Ошибка запроса DDE poke" #, fuzzy #~ msgid "Request failed: Error3" #~ msgstr "Ошибка запроса DDE poke" #, fuzzy #~ msgid "Request failed: Error4" #~ msgstr "Ошибка запроса DDE poke" #, fuzzy #~ msgid "Request failed: Error5" #~ msgstr "Ошибка запроса DDE poke" #, fuzzy #~ msgid "Requirements in sub-requirements satisfied." #~ msgstr "(Недоступно: зависимости не удовлетворены)" #, fuzzy #~ msgid "Extracting local file list from job description failed" #~ msgstr "Невозможно открыть файл с описанием задачи: %s" #~ msgid "Failed uploading file" #~ msgstr "Ошибка загрузки файла" #~ msgid "Can not access ARC job list file: %s (%s)" #~ msgstr "Невозможно открыть файл задач ARC: %s (%s)" #, fuzzy #~ msgid "Cannot access ARC user config file: %s (%s)" #~ msgstr "Невозможно открыть файл пользовательской конфигурации ARC: %s (%s)" #~ msgid "ARC user config file is not a regular file: %s" #~ msgstr "" #~ "Файл пользовательской конфигурации ARC не является стандартным файлом: %s" #, fuzzy #~ msgid "Could not load system client configuration" #~ msgstr "Не удалось обнаружить системную конфигурацию клиента" #~ msgid "Path is %s" #~ msgstr "Путь: %s" #, fuzzy #~ msgid "File type is neither file or directory" #~ msgstr "" #~ "%1:\n" #~ "Неизвестный тип файла: ни каталог ни файл." #, fuzzy #~ msgid "Cannot migrate from %s clusters." #~ msgstr "недопустимая инициализация %qT из %qT" #, fuzzy #~ msgid "Transfer FAILED" #~ msgstr "Сбой переноса файла." #, fuzzy #~ msgid "path for cache data (if different from -y)" #~ msgstr "" #~ " -Y, -cachedata путь путь к описанию кэша (если отличен от -y)" #, fuzzy #~ msgid "Received AuthURL " #~ msgstr "Получен сигнал" #, fuzzy #~ msgid "Received status " #~ msgstr "Новое состояние %s: %s" #, fuzzy #~ msgid "use the Confusa SLCS service" #~ msgstr "Использовать службу PC-To-Phone" #, fuzzy #~ msgid "Confusa Auth module" #~ msgstr "Модуль поддержки содержимого" #, fuzzy #~ msgid "__del__" #~ msgstr " Удалить " #~ msgid "delete run" #~ msgstr "обрывается исполнение" #, fuzzy #~ msgid "passphrase to myproxy server" #~ msgstr "&Путь к сборнику переводов" #, fuzzy #~ msgid " Implementation Version: %s" #~ msgstr "collect2 версия %s" #, fuzzy #~ msgid " JobName: %s" #~ msgstr "Неверное имя задания" #, fuzzy #~ msgid ", value: %s" #~ msgstr "Ошибка: %s" #, fuzzy #~ msgid " Author: %s" #~ msgstr "Автор:" #, fuzzy #~ msgid " Input: %s" #~ msgstr "Вводить" #, fuzzy #~ msgid " Output: %s" #~ msgstr "ВЫВОД" #, fuzzy #~ msgid " Notification element: " #~ msgstr "Неожиданный элемент" #, fuzzy #~ msgid " Address: %s" #~ msgstr "Адрес - 1 1/8 x 3 1/2 дюйма" #, fuzzy #~ msgid " Total CPU Time: %s" #~ msgstr "%t - использование процессора (система + пользователи)" #, fuzzy #~ msgid " Individual CPU Time: %s" #~ msgstr "Длительность по умолчанию (CPU)" #, fuzzy #~ msgid " Total Wall Time: %s" #~ msgstr "Длительность по умолчанию (по часам)" #, fuzzy #~ msgid " Individual Wall Time: %s" #~ msgstr "Длительность по умолчанию (по часам)" #, fuzzy #~ msgid " Benchmark: %s" #~ msgstr "Неприемлемый эталонный тест" #, fuzzy #~ msgid " value: %d" #~ msgstr "Ошибка: %s" #, fuzzy #~ msgid " time: %s" #~ msgstr "Время окончания" #, fuzzy #~ msgid " OSName: %s" #~ msgstr "название класса: %s" #, fuzzy #~ msgid " OSVersion: %s" #~ msgstr "%s, версия %s" #, fuzzy #~ msgid " DiskSpace: %d" #~ msgstr "Недостаточно места на диске" #, fuzzy #~ msgid " Alias: %s" #~ msgstr "Место" #~ msgid " Latitude: %s" #~ msgstr "Широта: %s" #~ msgid " Longitude: %s" #~ msgstr "Долгота: %s" #, fuzzy #~ msgid " Slots: %d" #~ msgstr "Разъёмы карт..." #, fuzzy #~ msgid " RunTimeEnvironment.Version: %s" #~ msgstr "collect2 версия %s" #, fuzzy #~ msgid " Homogeneous: true" #~ msgstr "Перечёркнутый" #, fuzzy #~ msgid " InBound: true" #~ msgstr "Перечёркнутый" #, fuzzy #~ msgid " OutBound: true" #~ msgstr "Исходящие данные:\n" #, fuzzy #~ msgid " Source.Threads: %d" #~ msgstr "X_vid" #, fuzzy #~ msgid " Target.Threads: %d" #~ msgstr "X_vid" #, fuzzy #~ msgid " Target.NeededReplicas: %d" #~ msgstr "Выбор узла назначения" #, fuzzy #~ msgid "Try to parse as POSIX JSDL" #~ msgstr "описание засылаемой задачи: %s" #, fuzzy #~ msgid "[PosixJSDLParser] Failed to create parser context" #~ msgstr "Не удалось создать контекст GSI: %s" #, fuzzy #~ msgid "Invalid notify attribute: %c" #~ msgstr "Недопустимый интервал времени" #, fuzzy #~ msgid "My hash is: %s" #~ msgstr "Контур закрыт." #, fuzzy #~ msgid "RegistrationCollector function is running." #~ msgstr "Код возврата" #, fuzzy #~ msgid "The ServiceID (%s) is found in the database." #~ msgstr "Файл не может быть создан" #, fuzzy #~ msgid "RemoveRegistrations: MGenTime=%s" #~ msgstr "Смысл-конец" #, fuzzy #~ msgid "Connect" #~ msgstr "Нет подключения" #, fuzzy #~ msgid "[PeerID] calculated hash: %s" #~ msgstr "контрольная сумма %x, а должна быть %x" #, fuzzy #~ msgid "[Cert] calculated value: %s" #~ msgstr "контрольная сумма %x, а должна быть %x" #, fuzzy #~ msgid "[Key] calculated value: %s" #~ msgstr "контрольная сумма %x, а должна быть %x" #, fuzzy #~ msgid "[Proxy] calculated value: %s" #~ msgstr "Доверенность действительна до: %s" #, fuzzy #~ msgid "[CaDir] calculated value: %s" #~ msgstr "контрольная сумма %x, а должна быть %x" #, fuzzy #~ msgid "find ServiceID: %s , hash: %d" #~ msgstr "Имя сервиса SP: %s" #, fuzzy #~ msgid "Connect request failed, try again." #~ msgstr "Ошибка при выполнении запроса" #, fuzzy #~ msgid "File size is %ul" #~ msgstr "Файл '%s' имеет размер в ноль байт - используется %s." #~ msgid "years" #~ msgstr "года(лет)" #~ msgid "months" #~ msgstr "месяца(ев)" #~ msgid "days" #~ msgstr "дня(дней)" #~ msgid "hours" #~ msgstr "часа(ов)" #~ msgid "ENV: " #~ msgstr "ENV: " #~ msgid "Broken RSL in NAME" #~ msgstr "Неверный код RSL в NAME" #~ msgid "Broken RSL in clientsoftware" #~ msgstr "Неверный код RSL в clientsoftware" #~ msgid "Broken RSL" #~ msgstr "Неверный код RSL" #, fuzzy #~ msgid "Failed reading RSL" #~ msgstr "Чтение %s раздела %s завершилось неудачей: %s" #, fuzzy #~ msgid "Failed parsing RSL" #~ msgstr "Не удалось проанализировать XML" #~ msgid "Broken RSL in jobid" #~ msgstr "Неверный код RSL в jobid" #, fuzzy #~ msgid "slashes are not allowed in jobid" #~ msgstr "%s: пробелы в имени закладки не разрешаются\n" #~ msgid "Broken RSL in action" #~ msgstr "Неверный код RSL в action" #~ msgid "Broken RSL in queue" #~ msgstr "Неверный код RSL в queue" #~ msgid "Broken RSL in replicacollection" #~ msgstr "Неверный код RSL в replicacollection" #~ msgid "Broken RSL in lifetime" #~ msgstr "Неверный код RSL в lifetime" #~ msgid "Broken RSL in starttime" #~ msgstr "Неверный код RSL в starttime" #~ msgid "Broken RSL in jobname" #~ msgstr "Неверный код RSL в jobname" #~ msgid "Broken RSL in jobreport" #~ msgstr "Неверный код RSL в jobreport" #~ msgid "Broken RSL in rerun" #~ msgstr "Неверный код RSL в rerun" #, fuzzy #~ msgid "Bad integer in rerun" #~ msgstr "переполнение при вычислении целочисленного выражения" #~ msgid "Broken RSL in disk" #~ msgstr "Неверный код RSL в disk" #, fuzzy #~ msgid "disk value is bad" #~ msgstr "Предупреждение: ошибка в подписи." #~ msgid "Broken RSL in notify" #~ msgstr "Неверный код RSL в notify" #~ msgid "Broken RSL in arguments" #~ msgstr "Неверный код RSL в arguments" #~ msgid "Broken RSL in inputdata" #~ msgstr "Неверный код RSL в inputdata" #~ msgid "Broken RSL in outputdata" #~ msgstr "Неверный код RSL в outputdata" #~ msgid "Broken RSL in gmlog" #~ msgstr "Неверный код RSL в gmlog" #~ msgid "Broken RSL in stdout" #~ msgstr "Неверный код RSL в stdout" #~ msgid "Broken RSL in stderr" #~ msgstr "Неверный код RSL в stderr" #~ msgid "Broken RSL in ftpthreads" #~ msgstr "Неверный код RSL в ftpthreads" #~ msgid "Broken RSL in cache" #~ msgstr "Неверный код RSL в cache" #~ msgid "Broken RSL in hostname" #~ msgstr "Неверный код RSL в hostname" #~ msgid "Broken RSL in dryrun" #~ msgstr "Неверный код RSL в dryrun" #~ msgid "Broken RSL in credentialserver" #~ msgstr "Неверный код RSL в credentialserver" #~ msgid "Broken RSL in acl" #~ msgstr "Неверный код RSL в acl" #, fuzzy #~ msgid "Failed evaluating RSL" #~ msgstr "Проверка правил фильтра: " #, fuzzy #~ msgid "UNKNOWN RSL STRUCTURE" #~ msgstr "декрементация указателя на неизвестную структуру" #, fuzzy #~ msgid "UNKNOWN RLS ELEMENT" #~ msgstr "Неизвестный атрибут \"%s\"=\"%s\" в тэге <%s>" #, fuzzy #~ msgid "Could not write the private key!" #~ msgstr "локаль '%s' не может быть установлена." #, fuzzy #~ msgid "Host not found: %s" #~ msgstr "Сервер не найден" #, fuzzy #~ msgid "Migration request failed" #~ msgstr "ошибка запроса об обрыве исполнения задачи" #, fuzzy #~ msgid "Migration request succeed" #~ msgstr "успешный запрос об обрыве исполнения задачи" #, fuzzy #~ msgid "There was no response to a migration request" #~ msgstr "Не поступил ответ на запрос об отправке задачи" #, fuzzy #~ msgid "A job resuming request failed" #~ msgstr "Ошибка запроса DDE poke" #, fuzzy #~ msgid "A job resuming request succeed" #~ msgstr "Неизвестный тип задания." #, fuzzy #~ msgid "There was no response to a job resuming request" #~ msgstr "Не поступил ответ на запрос об удалении результатов работы задачи" #, fuzzy #~ msgid "RegistrationCollector create: %s" #~ msgstr "Код возврата" #, fuzzy #~ msgid "Job description successfully stored in sandbox" #~ msgstr "описание засылаемой задачи: %s" #, fuzzy #~ msgid "Maximal UR Set size is: %d" #~ msgstr "Файл '%s' имеет размер в ноль байт - используется %s." #, fuzzy #~ msgid "Deleting %s" #~ msgstr "Идентификатор A-REX: %s" #, fuzzy #~ msgid "Reporting interval is: %d s" #~ msgstr "&Изменить псевдоним..." #~ msgid "show information about clusters and queues" #~ msgstr "вывести информацию о вычислительных ресурсах и очередях" #, fuzzy #~ msgid " Rank: %s" #~ msgstr "Положение в очереди" #, fuzzy #~ msgid "Error during the XML generation!" #~ msgstr "Ошибка при нахождении различий" #~ msgid " element: " #~ msgstr " элемент:" #, fuzzy #~ msgid "Can not access user's home directory: %s (%s)" #~ msgstr "" #~ "%s: каталог %s не удалён (является домашним каталогом пользователя %s)\n" #, fuzzy #~ msgid "User's home directory is not a directory: %s" #~ msgstr "" #~ "%s: каталог %s не удалён (является домашним каталогом пользователя %s)\n" #, fuzzy #~ msgid "Can not create ARC user config directory: %s (%s)" #~ msgstr "Невозможно создать пользовательский каталог конфигурации Dia" #, fuzzy #~ msgid "ARC user config directory is not a directory: %s" #~ msgstr "Невозможно создать пользовательский каталог конфигурации Dia" #~ msgid "Created empty ARC user config file: %s" #~ msgstr "Создан пустой файл для пользовательской конфигурации ARC: %s" #, fuzzy #~ msgid "CertificatePath defined, but not KeyPath" #~ msgstr "метка %q+D определена, но не используется" #, fuzzy #~ msgid "Delegation handler with service role starts to process" #~ msgstr "Поддержка ETRN не настроена.\n" #~ msgid "Shepherd chosen:" #~ msgstr "Выбран Чабан:" #~ msgid "Couldn't acquire transaction lock" #~ msgstr "Сбой предохранения транзакции" #~ msgid "Source Path size: %d" #~ msgstr "Длина пути в источнике: %d" #~ msgid "Registration for Service: %s" #~ msgstr "Регистрация сервиса: %s" #~ msgid "Outdated data: %s" #~ msgstr "Устаревшие данные: %s" #~ msgid "SOAP operation not supported: %s" #~ msgstr "Операция SOAP не поддерживается: %s" #~ msgid "Route" #~ msgstr "Маршрут" #~ msgid "Routing to %s" #~ msgstr "Маршрутизация к %s" #~ msgid "error on seek" #~ msgstr "ошибка поиска" #~ msgid "\tCache data dir : %s" #~ msgstr "\tКаталог с кэшем данных: %s" #, fuzzy #~ msgid "Can not parse PKCS12 file" #~ msgstr "Файл \"%file:1\" не может быть открыт" #, fuzzy #~ msgid "No per-job directory specified" #~ msgstr "Не задано описание задачи" #, fuzzy #~ msgid "Number of possible targets : %d" #~ msgstr "Сортировка назначений" #, fuzzy #~ msgid " ReferenceTime.value: %s" #~ msgstr "некорректное значение %%C" #~ msgid "WSRF request failed" #~ msgstr "Ошибка запроса WSRF" #, fuzzy #~ msgid "path to CA directory" #~ msgstr "Путь к каталогу для временных файлов" #~ msgid "" #~ "\n" #~ "\n" #~ "!!!!check_again" #~ msgstr "" #~ "\n" #~ "\n" #~ "!!!!check_again" #~ msgid "" #~ "\n" #~ "\n" #~ "!!!!do_delete" #~ msgstr "" #~ "\n" #~ "\n" #~ "!!!!do_delete" #, fuzzy #~ msgid "Can't acquire delegation context" #~ msgstr "%s: Невозможно получить контекст для %s" #~ msgid "" #~ "Argumentd to -i has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Аргумент опции -i задаётся по форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Аргумент опции -c задаётся по форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "Creating an A-REX client." #~ msgstr "Создаётся клиент A-REX" #~ msgid "Client side MCCs are loaded." #~ msgstr "Подгружены клиентские компоненты цепи сообщений" #~ msgid "Failed to find delegation credentials in client configuration." #~ msgstr "Не удалось обнаружить доверенности в конфигурации клиента" #~ msgid "There were no response to a submission request." #~ msgstr "Не поступил ответ на запрос об отправке задачи" #~ msgid "A response to a submission request was not a SOAP message." #~ msgstr "Ответ на запрос о запуске задачи не является сообщением SOAP" #~ msgid "Creating and sending a status request." #~ msgstr "Создание и отправка запроса о состоянии" #~ msgid "There were no response to a status request." #~ msgstr "Не поступил ответ на запрос о состоянии" #~ msgid "Creating and sending a service status request." #~ msgstr "Создание и отправка запроса о состоянии службы" #~ msgid "There were no response to a service status request." #~ msgstr "Не поступил ответ на запрос о состоянии службы" #~ msgid "Creating and sending request to terminate a job." #~ msgstr "Создание и отправка запроса о прерывании задачи" #~ msgid "There was no response to a job termination request." #~ msgstr "Не поступил ответ на запрос о прерывании задачи" #~ msgid "There was no response to a job cleaning request." #~ msgstr "Не поступил ответ на запрос об очистке задачи" nordugrid-arc-5.4.2/po/PaxHeaders.7502/de.gmo0000644000000000000000000000013213214316034016703 xustar000000000000000030 mtime=1513200668.502851479 30 atime=1513200668.502851479 30 ctime=1513200668.606852751 nordugrid-arc-5.4.2/po/de.gmo0000644000175000002070000013436113214316034016761 0ustar00mockbuildmock00000000000000w\8'9' @' N' \'i'l' t'~')' '9'9(<(T(m(($(((L(91)"k))I))2*&6*,]*%*%*4*5 +DA+%+?+H+>5,5t,6,,G-I-`-"{-+-)--:./J.Pz..#.$/)/I/f/ //// /-/)0">0a0,0"0#0%0$1'?1(g1!1 1112(2B2Y2v22-2%2)2&$3K3+h3/3)334 4 :4F40V4*444 445! 5B5!Y5 {55H5 5)5)6*A6"l6S6/6"7867=o7#7$7%7-8J8!`888858(92,9-_9+9.9+9::(:>c:::":(;");!L;4n;5;&;.<)/<'Y<=</<3</#=$S='x==)==->12>-d>>)>>>4 ?6??6v??"?*? @#8@ \@/h@@2@;@<"A _A#mA$A"A#A+A,)BVB.gBB B BBBB C%C5CHC XC dC(CCCCCD+DCD`D"wDD"D+DEE 0E>E,[E#E&EEE F]*F(F=FF pGIG'G!H%H"HHHI I&I5AI#wI)I IIjJ%~J#J0J JK 7KCKSK(mK1K)KKLL:LULdLsL LL L%L LM6M6KM*M M M MMMM"N)1N([NNN"N'N2O3O*SO%~O)O%OOP4PKPePPP P P P P PPQ"Q&BQ&iQQ*Q#Q'Q#!R9ER9RLRMSNTS"S#S%S'T,8T0eT0T'TTi UJsUAU1V_2VLV6V%W,EUZڞW5'%)۟(*./Y0%M1H3z\X Dd,0֣BGJB<դ0;C@9437h4,զ,Q/2ɧ-%**P'{%#ɨ0 =\y'Bȩ( )4^p ʪ Ԫު"</E/u=*0??6Ƭڬ8'-(U~ -ͭNJ _Ri!"ޮ"6>/u<406=g=E@@+2ñ8/45Ij1B5)E_)Xϳ@(i D$0Ofk(P$#Cg& 7ʶ76:/qF% (0A r555v1gA >0]=XKp*z_F73qHDk'C|{)ywQ7J<[py S\B?w 8\..&ml2 /r+{}E4X RxV>}o"PlrxFbneo6-)dY]c8NHVL"Dja :WBMiA h30q*dSjvc=/@`@RZa^~|YECf`O#ObuKI?#~Q+ (Gms <4t9&$$;ft_ZT1 9^'N5I[G( zML;g-e UJ!h:s%ukP%!,WiTU26,n %s attributes: base dn: %s filter: %s%s%s (%s)%s failed%s request failed%s request to %s failed with response: %s%s version %s%s: File request %s in SRM queue. Sleeping for %i secondsA response to a submission request was not a SOAP messageA status request failedA status request succeedARC Auth. request: %sARC delegation policy: %sAREXClient was not created properly.Aborted!Adding location: %s - %sArcAuthZ: failed to initiate all PDPs - this instance will be non-functionalAre you sure you want to synchronize your local job list?Authorized from remote pdp serviceAuthorized from xacml.pdpBoth of CACertificatePath and CACertificatesDir elements missing or emptyBroker %s loadedCan not create function: FunctionId does not existCan not dynamically produce AlgFacrotyCan not dynamically produce AttributeFactoryCan not dynamically produce EvaluatorCan not dynamically produce FnFactoryCan not find element with proper namespaceCan not find element with proper namespaceCan not get the delegation credential: %s from delegation service:%sCan not open job description file: %sCan not parse classname for AttributeFactory from configurationCan not parse classname for CombiningAlgorithmFactory from configurationCan not parse classname for FunctionFactory from configurationCan not parse classname for Policy from configurationCan not parse classname for Request from configurationCan't create delegation contextCan't create information handle - is the ARC ldap DMC plugin available?Can't read from sourceCan't write to destinationCancelling synchronization requestCannot convert module name to Python stringCannot create argument of the constructorCannot create doc argumentCannot find under response soap message:Cannot find content under response soap messageCannot find file at %s for getting the proxy. Please make sure this file exists.Cannot import moduleChangeActivityStatus: request = %sChangeActivityStatus: response = %sCheck: looking for metadata: %sCheck: obtained checksum: %sCheck: obtained size: %lliChecking URL returned by SRM: %sCleaning job: %sClosed successfullyClosing connectionCommand: %sConnect: Authentication timed out after %d msConnect: Connecting timed out after %d msConnect: Failed authentication: %sConnect: Failed to connect: %sConnect: Failed to init auth info handle: %sConnect: Failed to init handle: %sCould not create temporary file: %sCould not determine version of serverCould not open LDAP connection to %sCould not set LDAP network timeout (%s)Could not set LDAP protocol version (%s)Could not set LDAP timelimit (%s)Couldn't parse benchmark XML: %sCreateActivity: request = %sCreateActivity: response = %sCreating a CREAM clientCreating a UNICORE clientCreating a http clientCreating a pdpservice clientCreating a soap clientCreating an A-REX clientCreating and sending a service status requestCreating and sending a status requestCreating and sending job register requestCreating and sending job start requestCreating and sending requestCreating and sending request to clean a jobCreating and sending request to terminate a jobCreating and sending submit request to %sCreating directory %sCurrent transfer FAILED: %sCurrent transfer completeDCAU failedDCAU failed: %sDN %s is cached and is valid until %s for URL %sDN %s is cached but has expired for URL %sData transfer abortedData transfer aborted: %sDefault: %sDelegation ID: %sDelegation authorization failedDelegation role not supported: %sDelegation service: %sDelegation type not supported: %sDeleted %sDestination: %sDirectory size is larger than %i files, will have to call multiple timesDirectory: %sDisconnect: Closing timed out after %d msDisconnect: Failed closing - ignoring: %sDisconnect: Quitting timed out after %d msDuplicate replica found in LFC: %sError encoutered during job ID retrieval. All job IDs might not have been retrievedError opening lock file %s in initial check: %sError: no LDAP query started to %sEvaluator does not support loadable Combining AlgorithmsEvaluator does not support specified Combining Algorithm - %sEvaluator for ArcPDP was not loadedEvaluator for GACLPDP was not loadedEvaluator for XACMLPDP was not loadedFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFailed authenticatingFailed connecting to server %s:%dFailed reading dataFailed reading list of filesFailed retrieving job IDsFailed retrieving job IDs: Unsupported url (%s) givenFailed retrieving job status informationFailed sending CWD command for credentials renewalFailed sending CWD command for job cancellingFailed sending CWD command for job cleaningFailed sending DELE command for job cancellingFailed sending RMD command for job cleaningFailed starting jobFailed to authenticate SAML Token inside the incoming SOAPFailed to authenticate Username Token inside the incoming SOAPFailed to authenticate X509 Token inside the incoming SOAPFailed to bind to ldap server (%s)Failed to connect for credential renewalFailed to connect for job cleaningFailed to connect to server %s:%dFailed to convert security information to ARC policyFailed to convert security information to ARC requestFailed to create ldap bind thread (%s)Failed to disconnect after credentials renewalFailed to disconnect after job cancellingFailed to disconnect after job cleaningFailed to find delegation credentials in client configurationFailed to generate SAML Token for outgoing SOAPFailed to generate Username Token for outgoing SOAPFailed to generate X509 Token for outgoing SOAPFailed to initialize OpenSSL libraryFailed to initialize main Python threadFailed to initiate delegationFailed to initiate delegation credentialsFailed to open data channelFailed to parse SAML Token from incoming SOAPFailed to parse Username Token from incoming SOAPFailed to parse X509 Token from incoming SOAPFailed to read object %s: %sFailed to remove cache per-job dir %s: %sFailed to store ftp fileFailed to transfer dataFailed to verify X509 Token inside the incoming SOAPFailed to verify the signature under Failed to verify the signature under Failed to write file %s: %sFailed uploading local input filesFailed while finishing reading from sourceFailed while reading from sourceFailed while writing to destinationFailure: %sFile delete failed, attempting directory deleteFile is not accessible: %sFile type is not available, attempting file deleteFiles associated with request token %s aborted successfullyFiles associated with request token %s released successfullyFunction : %sGetActivityDocuments: request = %sGetActivityDocuments: response = %sGetActivityStatuses: request = %sGetActivityStatuses: response = %sGetFactoryAttributesDocument: request = %sGetFactoryAttributesDocument: response = %sGlobus error: %sGrid identity is mapped to local identity '%s'HER: %sIdentity: %sInitialized %u-th Python serviceInitiating delegation procedureInteractive mode.Invalid EffectInvalid JobDescription:Invalid URL: %sInvalid class nameInvalid url: %sJVM startedJava object returned NULL statusJob %s does not report a resumable stateJob cancelling successfulJob cleaning successfulJob description to be sent: %sJob description: %sJob resuming successfulJob submission summary:Job submitted with jobid: %sJob termination failedLDAP connection already open to %sLDAP query timed out: %sLDAPQuery: Getting results from %sLDAPQuery: Initializing connection to %s:%dLDAPQuery: Querying %sLdap bind timeout (%s)Library : %sLoading %u-th Python serviceLocations are missing in destination LFC URLMLSD is not supported - trying NLSTMain Python thread was not initializedMemory allocation errorMigrateActivity: request = %sMigrateActivity: response = %sMissing CertificatePath element or ProxyPath element, or is missingMissing or empty CertificatePath elementMissing or empty CertificatePath or CACertificatesDir elementMissing or empty CertificatePath or CACertificatesDir element; will only check the signature, will not do message authenticationMissing or empty KeyPath elementMissing or empty KeyPath element, or is missingMissing or empty PasswordSource elementMissing or empty Username elementMissing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - (Grid)FTP code is disabled. Report to developers.Missing security object in messageNLST/MLSD failedNLST/MLSD failed: %sName: %sNo SOAP responseNo authorization response was returnedNo execution services registered in the index serviceNo job identifier returned by A-REXNo job identifier returned by BES serviceNo jobs givenNo policy file or DNs specified for simplelist.pdp, please set location attribute or at least one DN element for simplelist PDP node in configuration.No response from %sNo target available inside the policyNo target available inside the ruleNumber of OpenSSL locks changed - reinitializingOperation completed successfullyOutgoing Message is not SOAPPASV failedPASV failed: %sPDP: %s can not be loadedPassword encoding type not supported: %sPath %s is invalid, creating required directoriesPolicy Decision Service invocation failedPolicy is emptyProcessing a %s requestProtocol is %s, should be httpsProxy generation succeededProxy path: %sProxy type: %sRead archive file %sReason : %sRecieved transfer URL: %sRemoving %sRenewal of credentials was successfulRenewing credentials for job: %sRequest is emptyRequest is reported as ABORTED, but all files are doneRequest is reported as ABORTED, since it was cancelledRequest is reported as ABORTED. Reason: %sRequest: %sResponse: %sResponse: %sReusing connectionSASL InteractionSOAP invocation failedSRM did not return any informationSRM did not return any useful informationSRM returned no useful Transfer URLs: %sSSL error: %d - %s:%s:%sSendCommand: Failed: %sSendCommand: Timed out after %d msSendData: Data connect write failed: %sSendData: Data connect write timed out after %d msSendData: Data write failed: %sSendData: Data write timed out after %d msSendData: Failed sending DCAU commandSendData: Failed sending STOR command: %sSendData: Failed sending TYPE commandSendData: Local port failed: %sSendData: Local type failed: %sServer SRM version: %sServer implementation: %sService is waiting for requestsServiceURL missingSome transfers failedSource: %sStart testStartReadingStartWritingSubject: %sSubmission request failedSubmission request succeedSubmit: Failed sending CWD commandSubmit: Failed sending CWD new commandSubmit: Failed sending job descriptionSubmit: Failed to connectSubmit: Failed uploading local input filesSucceeded to authenticate SAMLTokenSucceeded to authenticate UsernameTokenSucceeded to authenticate X509TokenSucceeded to verify the signature under Succeeded to verify the signature under Target %s removed by FastestQueueBroker, doesn't report number of free slotsTarget %s removed by FastestQueueBroker, doesn't report number of total slotsTarget %s removed by FastestQueueBroker, doesn't report number of waiting jobsTerminateActivities: request = %sTerminateActivities: response = %sThe Response is not going to this endThe Service advertises no Health State.The Service doesn't advertise its Interface.The Service doesn't advertise its Quality Level.The Service doesn't advertise its Serving State.The Service doesn't advertise its Type.The StatusCode is SuccessThe arccat command performs the cat command on the stdout, stderr or grid manager's error log of the job.The arccp command copies files to, from and between grid storage elements.The arcget command is used for retrieving the results from a job.The arckill command is used to kill running jobs.The arcls command is used for listing files in grid storage elements and file index catalogues.The delegated credential got from delegation service is stored into path: %sThe endpoint of delgation service should be configuredThe job status could not be retrievedThe request has passed the policy evaluationThe response of a job cleaning request was not a SOAP messageThe response of a job termination request was not a SOAP messageThe response of a service status request was not a SOAP messageThe response of a status request was not a SOAP messageThe service status could not be retrievedThere are %d certificates in the returned msgThere are %d requests, which satisfy at least one policyThere is %d subjects, which satisfy at least one policyThere was no HTTP responseThere was no SOAP responseThere was no response to a job cleaning requestThere was no response to a job termination requestThere was no response to a service status requestThere was no response to a status requestThere was no response to a submission requestThis seems like a temporary error, please try again laterTransfer FAILED: %sTransfer completeType is file, calling srmRmUnable to create SOAP client used by CREAMClient.Unauthorized from remote pdp serviceUnsupported destination url: %sUnsupported protocol in url %sUnsupported source url: %sUpdateCredentials: request = %sUpdateCredentials: response = %sUser interface errorUsername Token handler is not configuredUsing insecure data transferUsing secure data transferUsing space token %sVOMS attribute parsing failedWaiting for responseWarning: Using SRM protocol v1 which does not support space tokensWrong number of parameters specifiedX509 Token handler is not configuredXACML request: %sXML response: %sYour identity: %sYour proxy is valid until: %s[-]name[filename ...][job ...]all jobsbrokerchecingBartenderURL: Response: %scheck_ftp: failed to get file's modification timecheck_ftp: failed to get file's sizecheck_ftp: globus_ftp_client_get failedcheck_ftp: globus_ftp_client_modification_time failedcheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size failedcheck_ftp: timeout waiting for modification_timecheck_ftp: timeout waiting for sizeclass name: %sclientxrsl foundclientxrsl not foundconfiguration file (default ~/.arc/client.conf)could not find end of clientxrslcould not find start of clientxrsldirnamedisplay all available metadatado not ask for verificationdo not try to force passive transferdownload directory (the job directory will be created in this directory)echo: Unauthorizedfilenameforce migration, ignore kill failureftp_complete_callback: error: %sftp_complete_callback: successftp_read_callback: successftp_read_thread: Globus error: %sftp_read_thread: for_read failed - aborting: %sftp_read_thread: get and register buffersftp_read_thread: too many registration failures - abort: %sftp_read_thread: waiting for eofftp_write_thread: for_write failed - abortingftp_write_thread: get and register buffersglobus_ftp_client_operationattr_set_authorization: error: %shourhoursinit_handle: globus_ftp_client_handleattr_init failedinit_handle: globus_ftp_client_handleattr_set_gridftp2 failedinit_handle: globus_ftp_client_operationattr_init failedinput is not SOAPjobdescription file describing the job to be submittedjobdescription string describing the job to be submittedkeep the files on the server (do not clean)levellibjvm.so not loadable - check your LD_LIBRARY_PATHlist_files_ftp: failed to get file's modification timelist_files_ftp: failed to get file's sizelist_files_ftp: globus_ftp_client_modification_time failedlist_files_ftp: globus_ftp_client_size failedlist_files_ftp: looking for modification time of %slist_files_ftp: looking for size of %slist_files_ftp: timeout waiting for modification_timelist_files_ftp: timeout waiting for sizelong format (more information)minuteminutesmodule name: %snnd: %snumbernumber of retries before failing file transferonly select jobs whose status is statusstroperate recursively up to specified leveloutput is not SOAPpathpath to config fileprint version informationremove the job from the local list of jobs even if the job is not found in the infosyssecondsecondssecondssetting file %s to size %llushow URLs of file locationsshow progress indicatorshow the stderr of the jobshow the stdout of the job (default)source destinationstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: globus_ftp_client_get failedstart_reading_ftp: globus_thread_create failedstart_writing_ftp: globus_thread_create failedstart_writing_ftp: mkdir failed - still trying to writestart_writing_ftp: put failedstatusstrstop_reading_ftp: aborting connectionstop_reading_ftp: waiting for transfer to finishstringtimeout in seconds (default 20)urluse secure transfer (insecure by default)yProject-Id-Version: Arc Report-Msgid-Bugs-To: support@nordugrid.org POT-Creation-Date: 2017-12-13 22:31+0100 PO-Revision-Date: 2010-02-25 19:18+0100 Last-Translator: Steffen Möller Language-Team: German MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: X-Generator: KBabel 1.11.4 Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2); X-Poedit-Language: Russian X-Poedit-KeywordsList: msg:2;IString:1;istring:1 X-Poedit-Basepath: /home/oxana/CVSROOT/ARC1 X-Poedit-SearchPath-0: src %s Attribute base dn: %s Filter: %s%s%s (%s)%s fehlgeschlagenAnfrage %s schlug fehl%s Anfrage an %s schlug fehl mit Antwort %s%s version %s%s: Datei Anfrage %s in SRM queue. Schlage für %i SekundenEine Antwort zu submission request war keine SOAP messageEine Anfrage nach dem Status schlug fehlDie Anfrage nach dem Status war erfolgreichARC Auth. Anfrage: %sARC delegation policy: %sAREXClient wurde nicht richtig angelegt.Abbruch!Füge location hinzu: %s - %sArcAuthZ: Fehler bei Initiierung wenigstens einer PDP - diese Instanz wird nicht funktional seinSoll die lokale job list wirklich synchronisiert werden?Authorisiert durch remote pdp serviceAuthorisiert durch xaml.pdpSowohl CACertificatePath als auch CACertificatesDir Elemente sind fehlend oder leerBroker %s geladenKann Funktion nicht anlegen: FunctionId existiert nichtKann AlgFactory nicht dynamisch anlegenKann AttributeFactory nicht dynamisch anlegenKann Evaluator nicht dynamisch produzierenKann FnFactory nicht dynamisch anlegenKann element mit passendem namespace nicht findenKann element mit passendem namespace nicht findenKann delegation credential nicht erhalten: %s von delegation service: %sKonnte Datei mit Job Beschreibung nicht öffnen: %sKonnte classname für AttributeFactory nicht von Konfiguration parsenKonnte classname für CombiningAlgorithmFactory nicht von Konfiguration parsenKonnte classname für FunctionFactory nicht von Konfiguration parsenKonnte classname für Policy nicht von Konfiguration parsenKonnte classname für Request nicht von Konfiguration parsenKann delegation context nicht anlegenKann information handle nicht anlegen - ist das ARC LDAP DMC plugin verfügbar?Kann nicht von Quelle lesenKann nicht zu Ziel schreibenAbbruch der SynchronisationsanfrageKann Modul name nicht zu Python Zeichenkette konvertierenKann Argument für den Konstruktor nicht anlegenKann doc Argument nicht anlegenKann in SOAP-Antwort nicht findenKann Inhalt in SOAP-Antwort nicht findenKann Datei nicht bei %s finden, um den Proxy zu erhalten. Bitte stellen Sie sicher, dass diese Datei existiert.Kann Modul nicht importierenChangeActivityStatus: запрос = %sChangeActivityStatus: ответ = %sCheck: looking für Metadata: %sCheck: erhielt checksum: %sCheck: erhielt Größe: %lliÜberprüfen der URL zurückgegeben von SRM: %sAufräumen von Job: %sVerbindung erfolgreich geschlossenSchließe VerbindungKommando: %sConnect: Zeitüberschreitung der Authentikation nach %d msConnect: Zeitüberschreitung der Verbindung nach %d msConnect: Authentikation fehlgeschlagen: %sConnect: Verbindung zu %s schlug fehlConnect: Konnte auth info handle nicht initialisieren: %sConnect: Konnte init handle nicht initialisieren: %sKonnte temporäre Datei nicht anlegen: %sKonnte Version des Server nicht bestimmenKonnte LDAP Verbindung nicht öffnen zu %sKonnte LDAP netowrk Zeitüberschreitung nicht setzen (%s)Konnte LDAP Protokoll Version nicht setzen (%s)Konnte LDAP Zeitlimit nicht setzen (%s)Konnte benchmark XML nicht parsen: %sCreateActivity: запрос = %sCreateActivity: ответ = %sAnlegen eines CREAM clientErstellen von UNICORE clientLege HTTP Client anLege pdpservice client anLege SOAP Clietn anLege A-REX client an.Erstlelen und senden einer Service Status AnfrageErstellen und senden einer Status-AnfrageErstellen und senden einer Anfragen, eien Job zu registrierenErstellen und senden einer Anfrage, einen Job zu startenErstellen und senden von AnfrageErstlelen und senden einer Anfragen einen Job zu löschenErstellen und senden von Anfrage, einen Job zu beendenErstelle und sende submit Anfrage an %sLege Verzeichnis %s anAktueller Transfer SCHLUG FEHL: %sAktueller Transfer vollständigDCAU fehlgeschlagenDCAU fehlgeschlagen: %sDN %s wird gecacht und ist gültig bis %s für Datei %sDN %s wird gecacht aber ist abgelaufen für URL %sDatentransfer abgebrochenDatentransfer abgebrochen: %sVoreinstellung: %sDelegation ID: %sDelegation Authorisierung fehlgeschlagenDelegation role nicht unterstützt: %sDelegation service: %sDelegation Typ nicht unterstützt: %sLöschte %sZiel: %sVerzeichnis enthält mehr als %i Dateien, werde Aufruf mehrfach ausführenVerzeichnis: %sDisconnect: Zeitüberschreitung vom Schließen nach %d msDisconnect: Fehler beim Schließen der Verbindung - ignoriert: %sDisconnect: Zeitüberschreitung beim Verlassen nach %d msDoppelte replica gefunden in LFC: %sFehler beim Bestimmen der job ID. Womöglich wurde keine job ID erhalten.Fehler bei Öffnen von Lock-Datei %s in initialer Überprüfung: %sFehler: keine LDAP Anfrage gestartet bei %sEvaluator unterstützt ladare Combining Algorithms nichtEvaluator unterstützt die angegebenen Combining Algorithms nicht - %sEvaluator für ArcPDP wurde nicht geladenEvaluator für GACLPDP wurde nicht geladenEvaluator für XACMLPDP wurde nicht geladenFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFehler bei AuthentisierenFehler bei Verbinden zu server %s:%dFehler bei Lesen von DatenFehler bei Lesen von DateilisteKonnt job IDs nicht erhalten.Konnte job IDs nicht bestimmen: Nicht unterstützte URL erhalten (%s)Konnte Job Status Information nicht beziehen.Fehler beim Senden von CWD Kommando für Erneuerung von credentialsFehler beim Senden von CWD für den Abbruch eines JobsKonnte CWD Kommando nicht senden um Job aufzuräumenFehler beim Senden von DELE für den Abbruch eines JobsKonnte RMD Kommando nicht senden um Job aufzuräumenKonnte job nicht startenKonnte SAML Token aus eingehender SOAP nicht authentifizierenFehler bei der Authentifikation des Username Token in der einngehenden SOAPFehler bei Authentifizieren von X509 Token in eigehendem SOAPFehler bei Verbinden zu ldap server (%s)Fehler beim Verbindungen für Erneuerung von credentialsKonnte nicht verbinden, um Job aufzuräumenFehler bei Verbinden zu server %s:%dFehler bei Konvertieren von security information für ARC policyFehler bei Konvertierung von security information für ARC AnfrageFehler bei Anlegen von ldap bind thread (%s)Fehler bein Trennen der Verbindung nach Erneuerung der credentialsFehler beim Trennen der Verbindung nach Abbruch von JobKonnte Verbindung nicht trennen nach Aufräumen von JobKonnte delegation credentials in Client Konfiguration nicht findenKonnte SAML Token für ausgehendes SOAP nicht generierenFehler bei Erstellen von Nutzernamen Token für ausgehende SOAPFehler bei Generieren von X509 Token für ausgehende SOAPFehler bei Initialisierung von OpenSSL BibliothekFehler bei Initialisierung des main Python ThreadsInitiierung der Delegation fehlgeschlagenFehler bei der Initialisierung der delegation credentialsFehler bei Öffnen von DatenkanalKonnte SAML Token nicht aus eingehender SOAP herausparsenKonnte Username Token nicht von eingehender SOAP Nachricht herauslesenFehler bei Parsen von X509 Token in eigehendem SOAPFehler bei Lesen von Objekt %s: %sFehler bei Entfernen von cache per-job Verzeichnis %s: %sFehler bei Ablage von FTP DateiFehler bei Transfer von DatenFehler bei Verifizieren von X509 Token in eigehendem SOAPFehler bei der Überprüfung der Signatur unter Fehler bei der Überprüfung der Signatur unter Fehler bei Schreiben zu Datein %s: %sKonnte lokale Inputdateien nicht hochladenFehler bei Abschluß des Lesens von QuelleFehler bei Lesen von QuelleFehler bei Schreiben zu ZielFehler: %sLöschen von Datei schlug fehl, versuche als Verzeichnis zu löschenDatei ist nicht zugreifbar: %sDateitype ist nicht verfügbar, versuche Datei zu löschenDateien assoziiert mit Anfrage Token %s erfolgreich abgebrochenDateien assoziiert mit Anfrage Token %s erfolgreich freigegebenFunktion : %sGetActivityDocuments: запрос = %sGetActivityDocuments: ответ = %sGetActivityStatuses: запрос = %sGetActivityStatuses: ответ = %sGetFactoryAttributesDocument: запрос = %sGetFactoryAttributesDocument: ответ = %sGlobus Fehler: %sGrid Identität wird zugewiesen zu lokaler Identität '%s'HER: %sIdentität: %sInitialisierte %u-th Python servceInitialisierung der Delegations-ProzedurInteraktiver Modus.Ungültiger EffektUngültige JobDescription:Ungültige URL: %sUngültiger KlassennameUngültige url: %sJVM gestartetJava Objekt gab NULL status anJob %s berichtet nicht von einem resumable ZustandJob erfolgreich abgebrochenJob erfolgreich aufgeräumt.Zu sendende Job-Beschreibung : %sJob Beschreibung: %sJob erfolgreich resumed.Job Hochladen Zusammenfassung:Job hochgeladen mit Job ID: %sBeendigung des Jobs schlug fehlLDAP Verbindung bereits offen zu %sZeitüberschreibung bei LDAP Anfrage: %sLDAPQuery: Erhalte Ergebnisse von %sLDAPQuery: Initialisiere Verbindung zu %s:%dLDAPQuery: Frage an %sLdap bind timeout (%s)Bibliothek : %sLade %u-th Python ServiceLocations fehlen in destination LFC URLMLSD ist nicht unterstützt - versuche NLSTMain Python Thread wurde nicht initialisiertSpeicherallokationsfehlerMigrateActivity: запрос = %sMigrateActivity: отзыв = %sFehlendes CertificatePath Element oder ProxyPath Element, oder fehltFehlendes oder leeres CertificatePath ElementFehlendes oder leeres CertificatePath oder CACertificatesDir ElementFehlendes oder leeres CertificatePath oder CACertificatesDir Element; werde nur die Signature überprüfen, die Nachricht jedoch nicht authentifizierenFehlendes oder leeres KeyPath ElementFehlendes oder leeres KeyPath Element, oder fehltFehlendes oder leeres PasswordSource ElementFehlendes oder leeres Username ElementFehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. Bitte die Entwickler informieren.Fehlendes security Objekt in NachrichtNLST/UMLSD fehlgeschlagenNLST/UMLSD fehlgeschlagen: %sName %sKeine SOAP AntwortEs wurde keine authorization response erwidertKeine execution services in index service registriertA-REX lieferte keinen Job Identifikator zurückKein Job identifier von BES service zurückerhaltenKeine Jobs angegebenKeine Policy Datei oder DNs angegeben für simplelist.pdp, bitte setzen Sie ein location Attribut oder zumindest ein DN Element für den PDP Knoten in der KonfigurationKeine Antwort von %sKein Ziel innerhalb der Policy vorhandenKein Ziel verfügbar in dieser RegelAnzahl von OpenSSL locks verändert - reinitialisierungOperation erfolgreich abgeschlossenAusgehende Nachricht ist kein SOAPPASV fehlgeschlagenPASV fehlgeschlagen: %sPDP: %s kann nicht geladen werdenPasswort Kodierung nicht unterstützt: %sPfad %s ist ungültig, lege benötigte Verzeichnisse anAusführen des Policy Decision Service schlug fehlPolicy is leerVerarbeite %s AnfrageProtokol ist %s, sollte https seinProxy erfolgreich angelegtProxy Pfad: %sProxy Typ: %sLese Archiv Datei %sGrund : %sErhielt transfer URL: %sEntferne %sErneuerung der Credentials war erfolgreichErneuern der credentials für Job %sAnfrage ist leerAnfrage wurde berichtet als ABORTED (abgebrochen), aber alle Dateien wurden bearbeitetAnfrage wurde berichtet als ABORTED (abgebrochen), denn sie wurde abgebrochenAnfrage wurde berichtet als ABORTED (abgebrochen). Grund: %sAnfrage: %sAntwort: %sAntwort: %sWiederholte Nutzung von VerbindungSASL InteraktionSOAP Aufruf fehlgeschlagenSRM lieferte keine Information zurückSRM lieferte keinerlei gebrauchbare InformationSRM gab keine nützliche Transfer URLs: %sSSL Fehler: %d - %s:%s:%sSendCommand: Fehler: %sSendCommand: Zeitüberschreitung nach %d msSendData: Fehler bei Datenverbindung zum Schreiben: %sSendData: Zeitüberschreitung bei Datenverbindung zum Schreiben nach %d msSendData: Schreiben von Daten schlug fehl: %sSendData: Zeitüberschreitung beim Schreiben nach %d msSendData: Fehler bei Senden von DCAU KommandoSendData: Fehler bei Senden von STOR Kommando: %sSendData: Fehler bei Senden von TYPE KommandoSendData: Lokaler port schlug fehl: %sSendData: Lokaler type schlug fehl: %sServer SRM version: %sServer Implementation: %sService wartet auf AnfragenServiceURL fehltEinige Transfers schlugen fehlQuelle: %sStarte TestStartReadingStartWritingSubjekt: %sSubmission von Anfrage schlug fehlSubmission von Anfrage ist erfolgtSubmit: Konnte CWD Kommmando nicht sendenSubmit: Konnte CWD new Kommmando nicht sendenSubmit: Fehler bei Senden von Job BeschreibungSubmit: VerbindungsfehlerSubmit; Hochladen der lokalen Inputfiles schlug fehlErfolreiche Anthentifikation von SAMLTOkenErfolgreiche Authentifikation des UsernameTokenX509Token erfolgreich authentifiziertErfolgreiche Überprüfung der Signatur unter Erfolgreiche Verifikation der Signatur unter Ziel %s entfernt durch FastestQueueBroker, die Anzahl freier slots wird nicht genanntZiel %s entfernt durch FastestQueueBroker, die Anzahl vorhandener slots wird nicht genanntZiel %s entfernt durch FastestQueueBroker, die Anzahl wartender Jobs wird nicht genanntTerminateActivities: запрос = %sTerminateActivities: ответ = %sDie Antwort geht nicht bis zu diesem EndeDer Service gibt keinen Health State an.Der Service gibt seine Interface nicht an.Der Service gibt seinen Quality Level nicht an.Der Servcice gibt seinen Serving State nicht an.Der Service gibt seinen Typ nicht an.Der StatusCode ist SuccessЭта команда предназначена для вывода на экран сообщений стандартного выхода, стандартной ошибки или ошибок системы при исполнении задачиMit arccp werden Dateien zu, von und zwischen grid storage Elementen kopiert.Mit arcget erhält man die Ergebnisse eines Jobs.Mit arckill lassen sich laufenden Prozesse beenden.Mit arcls werden Verzeichniss auf grid storage Elementen und Datei Index Katalogen angegebenDas delegierte credential wie erhalten von delegation service is abgelegt unter Pfad: %sDer Endpunkt des delegation service konnte nicht konfiguriert werdenDer Job Status konnte nicht ermittelt werdenDie Anfrage hat die Policy Evaluierung bestandenDie Antwort auf eine Job Löschen-Anfrage war keine SOAP NachrichtDie Antwort zu einer Job Terminierungs-Anfrage war keine SOAP NachrichtDie Antwort zu einer Service Status Anfrage war keine SOAP messageDie Antwort auf eine Status Anfrage war keine SOAP NachrichtDer Service Status konnte nicht ermittelt werdenEs sind %d Zertifikate in der zurückgelieferten Nachricht.Es gibt %d Anfragen, die wenigstens einer Policy Anfrage genügtEs gibt %d Subjekte, die wenigstens eine Policy erfüllenKeine HTTP Antwort erhaltenKeine SOAP response erhaltenKeine Antwort auf eine Job Löschen-Anfrage erhaltenEs gab keine Antwort zu einer Job Terminierungs-AnfrageEs gab keine Antwort zu einer Service Status AnfrageEs gab keine Antwort zu einer Status AnfrageKeine Antwort zu submission request erhaltenDies scheint ein vorübergehender Fehler zu sein, bitte später nochmal probierenTransfer FEHLER: %sTransfer vollständigTyp ist Datei, rufe srmRm aufKonnte SOAP client nicht anlegen für CREAMClient.Nicht authorisiert von entferntem PDP serviceNicht unterstützte URL für Ziel: %sNicht-unterstzütztes Protrokoll in URL %sNicht unterstützte URL für Quelle: %sUpdateCredentials: запрос = %sUpdateCredentials: отзыв = %sBenutzungsschnittstellenfehlerNutzernamen Token handler ist nicht konfiguriertNutze unsicheren DatentransferNutze sicheren DatentransferNutze space token %sKonnte VOMS Attribut nicht herauslesenWarte vor AntwortWarnung: Nutze SRM Protokol v1 das keine space tokens unterstütztFalsche Anzahl an Parametern übertragenX509 Token handler ist nicht konfiguriertXACML Anfrage: %sXML Antwort: %sIhre Identität: %sIhr Proxy ist gültig bis: %s[-]Name[dateiname ...][Job ...]alle JobsBrokercheckingBartenderURL: Response: %scheck_ftp: konnte Modification time von Datei nicht erhaltencheck_ftp: konnten Dateigröße nicht bestimmencheck_ftp: globus_ftp_client_get fehlgeschlagencheck_ftp: globus_ftp_client_modification_time fehlgeschlagencheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size fehlgeschlagencheck_ftp: Zeitüberschreitung bei Warten auf modification_timecheck_ftp: Zeitüberschreitung bei Warten für GrößeKlassenname: %sclientxrsl gefundenclientxrsl nicht gefundenKonfigurationsdatei (Vorteinstellung ~/.arc/client.conf)konnte Ende von clientxrsl nicht findenkonnte Start von clientxrsl nicht findenVerzeichnisnamezeige alle verfügbare Metadatenfrage nicht nach Verifikationversuche nicht, passiven Transfer zu erzwigenDownload-Verzeichnis (das Job-Verzeichnis wird in diesem Verzeichnis abgelegt)echo: UnauthorisiertDateinameerzwinge Migration, ignoriere ein Fehlschlagen des Abbruchs bereits laufender Jobsftp_complete_callback: Fehler: %sftp_complete_callback: erfolgreichftp_read_callback: Erfolgftp_read_thread: Globus Fehler: %sftp_read_thread: for_read fehlgeschlagen - Abbruch: %sftp_read_thread: beziehe und registriere Pufferftp_read_thread: zu viele Registrierungsfehler - Abbruch: %sftp_read_thread: warte auf EOFftp_write_thread: for_write fehlgeschlagen - Abbruchftp_write_thread: Beziehe und Registriere Pufferglobus_ftp_client_operationattr_set_authorisation: Fehler: %sStundenStundeStundeninit_handle: globus_ftp_client_handleattr_init fehlgeschlageninit_handle: globus_ftp_client_handleattr_set_gridftp2 fehlgeschlageninit_handle: globus_ftp_client_operationattr_init fehlgeschlagenEingabe ist kein SOAPDatei mit Job-Beschreibung wird hochgeladenZeichenkette mit Job-Beschreibung wird hochgeladenbehalte die Dateien auf dem Server (dort nicht löschen)Tiefelibjvm.so nicht ladbar - überprüfe LD_LIBRARY_PATHlist_files_ftp: Fehler bei Bezug von Zeitpunkt der letzten Dateiänderunglist_files_ftp: Fehler bei Bezug von Dateigrößelist_files_ftp: globus_ftp_client_modification_time fehlgeschlagenlist_files_ftp: globus_ftp_client_size fehlgeschlagenlist_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %slist_files_ftp: Suche nach Größe von %slist_files_ftp: Zeitüberschreitung bei Warten auf Zeitpunkt der letzten Dateiänderung list_files_ftp: Zeitüberschreitung bei Warten auf Dateigröße ausführliche AusgabeMinutenMinuteMinutenModulname: %snnd: %sNummerAnzahl von Wiederholungen bis zu einem Abbruch der DateiübertragungSelektiere Jobs mit Status statusstrarbeite rekursiv bis zu einer festgelegten TiefeAusgabe ist nicht SOAPPfadPfad zu KonfigurationsdateiAngabe des aktuellen Versionsbezeichnersentferne Job aus lokaler Liste selbst wenn der Job dem Infosys nicht bekannt istSekundenSekundeSekundenSekundenSetze Datei %s zu Größe %lluzeige URLs von Datei-Lokalisationenzeige Fortschrittsanzeigezeige stderr des JobsZeige stdout des Jobs (Voreinstellung)Quelle Zielstart_reading_ftp: globus_ftp_client_get fehlgeschlagenstart_reading_ftp: globus_ftp_client_get fehlgeschlagenstart_reading_ftp: globus_thread_create fehlgeschlagenstart_writitng_ftp: globus_thread_create failedstart_writing_ftp: mkdir fehlgeschlagen - versuche weiter zu schreibenstart_writing_ftp: put fehlgeschlagenstatusstrstop_reading_ftp: Abbruch der Verbindungstop_reading_ftp: warte auf Beenden von TransferZeichenketteZeitüberschreitung nach Sekunden (Voreinstellung 20)URLNutze sicheren Transfer (unsicher ist Voreinstellung)jnordugrid-arc-5.4.2/po/PaxHeaders.7502/en@quot.header0000644000000000000000000000013013214315702020373 xustar000000000000000029 mtime=1513200578.38074925 29 atime=1513200578.38074925 30 ctime=1513200668.590852555 nordugrid-arc-5.4.2/po/en@quot.header0000644000175000002070000000226313214315702020446 0ustar00mockbuildmock00000000000000# All this catalog "translates" are quotation characters. # The msgids must be ASCII and therefore cannot contain real quotation # characters, only substitutes like grave accent (0x60), apostrophe (0x27) # and double quote (0x22). These substitutes look strange; see # http://www.cl.cam.ac.uk/~mgk25/ucs/quotes.html # # This catalog translates grave accent (0x60) and apostrophe (0x27) to # left single quotation mark (U+2018) and right single quotation mark (U+2019). # It also translates pairs of apostrophe (0x27) to # left single quotation mark (U+2018) and right single quotation mark (U+2019) # and pairs of quotation mark (0x22) to # left double quotation mark (U+201C) and right double quotation mark (U+201D). # # When output to an UTF-8 terminal, the quotation characters appear perfectly. # When output to an ISO-8859-1 terminal, the single quotation marks are # transliterated to apostrophes (by iconv in glibc 2.2 or newer) or to # grave/acute accent (by libiconv), and the double quotation marks are # transliterated to 0x22. # When output to an ASCII terminal, the single quotation marks are # transliterated to apostrophes, and the double quotation marks are # transliterated to 0x22. # nordugrid-arc-5.4.2/po/PaxHeaders.7502/insert-header.sin0000644000000000000000000000012713214315702021061 xustar000000000000000029 mtime=1513200578.38974936 29 atime=1513200578.38974936 29 ctime=1513200668.59285258 nordugrid-arc-5.4.2/po/insert-header.sin0000644000175000002070000000124013214315702021120 0ustar00mockbuildmock00000000000000# Sed script that inserts the file called HEADER before the header entry. # # At each occurrence of a line starting with "msgid ", we execute the following # commands. At the first occurrence, insert the file. At the following # occurrences, do nothing. The distinction between the first and the following # occurrences is achieved by looking at the hold space. /^msgid /{ x # Test if the hold space is empty. s/m/m/ ta # Yes it was empty. First occurrence. Read the file. r HEADER # Output the file's contents by reading the next line. But don't lose the # current line while doing this. g N bb :a # The hold space was nonempty. Following occurrences. Do nothing. x :b } nordugrid-arc-5.4.2/po/PaxHeaders.7502/quot.sed0000644000000000000000000000013013214315702017273 xustar000000000000000029 mtime=1513200578.39874947 29 atime=1513200578.39874947 30 ctime=1513200668.590852555 nordugrid-arc-5.4.2/po/quot.sed0000644000175000002070000000023113214315702017337 0ustar00mockbuildmock00000000000000s/"\([^"]*\)"/“\1”/g s/`\([^`']*\)'/‘\1’/g s/ '\([^`']*\)' / ‘\1’ /g s/ '\([^`']*\)'$/ ‘\1’/g s/^'\([^`']*\)' /‘\1’ /g s/“”/""/g nordugrid-arc-5.4.2/po/PaxHeaders.7502/en@boldquot.header0000644000000000000000000000013013214315702021234 xustar000000000000000029 mtime=1513200578.37174914 29 atime=1513200578.37174914 30 ctime=1513200668.591852567 nordugrid-arc-5.4.2/po/en@boldquot.header0000644000175000002070000000247113214315702021310 0ustar00mockbuildmock00000000000000# All this catalog "translates" are quotation characters. # The msgids must be ASCII and therefore cannot contain real quotation # characters, only substitutes like grave accent (0x60), apostrophe (0x27) # and double quote (0x22). These substitutes look strange; see # http://www.cl.cam.ac.uk/~mgk25/ucs/quotes.html # # This catalog translates grave accent (0x60) and apostrophe (0x27) to # left single quotation mark (U+2018) and right single quotation mark (U+2019). # It also translates pairs of apostrophe (0x27) to # left single quotation mark (U+2018) and right single quotation mark (U+2019) # and pairs of quotation mark (0x22) to # left double quotation mark (U+201C) and right double quotation mark (U+201D). # # When output to an UTF-8 terminal, the quotation characters appear perfectly. # When output to an ISO-8859-1 terminal, the single quotation marks are # transliterated to apostrophes (by iconv in glibc 2.2 or newer) or to # grave/acute accent (by libiconv), and the double quotation marks are # transliterated to 0x22. # When output to an ASCII terminal, the single quotation marks are # transliterated to apostrophes, and the double quotation marks are # transliterated to 0x22. # # This catalog furthermore displays the text between the quotation marks in # bold face, assuming the VT100/XTerm escape sequences. # nordugrid-arc-5.4.2/po/PaxHeaders.7502/ru.gmo0000644000000000000000000000013213214316034016741 xustar000000000000000030 mtime=1513200668.392850134 30 atime=1513200668.389850097 30 ctime=1513200668.604852727 nordugrid-arc-5.4.2/po/ru.gmo0000644000175000002070000231606113214316034017020 0ustar00mockbuildmock00000000000000li `fafef|ffffff g g7gNgegIggHg-g1(h<Zh;hh hhi6iGieiiiiiiiij jj+jEjZjpjjjjj-j #k 1k ?kLk ^khknkkk kk$kk& l!2lTlll}l llllll'm.m@mSm$gmm)mm!mnn 0n1:nln"nnnn nnoo8o MoXouooo o"oo p!p%6p\p#vp!ppppq "q -q 9q DqPqdqsqqqqqqqr#r3rHr]rtu u uuu v7(v*`vvvvvv>vww2wAw!Vw xw www4wxx&x@x)Rx.|x1x xxx yy y 9yCy!Yy{yAy+y+z"0z1Szz1z)zAz.>{m{{?{%{!|,*|)W|!|4|+|\}a}-p}}}7} ~++~W~`t~$~'~%")H r"*-&6-V&B'C2Z35ځ3'D(l/&ł+$(=3f+"ƃ/39M" 1ۄs --݅8/@*p(Ć!e3jB1/K%]7$/+-<#j+'$ 0D(`(,ߊ (DY-k<@֋)A^} ..'5?+u$ ƍ Ӎ;VDsԎ3.328)k)ޏ& E!f9‘" >V o+ݒ$-2R& Ó'Փ %D;cה9Om#"Е%,+H0t2Tؖ-Fbzӗ%%< b2n32՘*3Mj} %|KҚ-L;aL!#8E9~$ݜ"/,=\>;ٝ#9\YΞ"7 3Au@AΟ0Of"|" ܠ";Z"y-*:#1^ cɢ`-` '$ChФ "?b1Vܥ(3\p&Ȧ+4!IVA6 '8VoS.+& R^y # ީ##>0b/êߪ1I]z<;ϫX :d8ج*4Iet$)ϭ:47LXݮ99,f415)SHT')#CgE($R5T7ݲ;)Q/{11ݳ'(7?`&Ǵ-/!Df)+͵*$FB?,ɶ%.K2f2%̷a5rQ&,!%N%t"#45Lg!"%DQjּ1'&6E?|;D =E^D$%#4Xs%?H->v56"9'Q"y/738!l#7 #"2Fy()/.0C)t8:%(:Bc>GGM+)+"-N6|79%9,R) (-/K{251(5,^# ,2)Q{'+*0J*{":Vg*2#"6Y#x/4.*N&y#%+&*=@h&%!)BN`1&4['y:,, L6$:%$&K i/P&pvDzq6;Ah&#-2K/`!<>.UM'V~KWRe*%2P$6'&!Df,;I!P!r$ U 0>,T|   +EL > )_""5=X'  (+61b*2C/61f&42#'$Kpv/,%$6[u  "")2\m9!F,SsGi\yVFCO ! (8%I)o&&$% 2 No665(Mv<(! JW gt %5 E R_ q~   '7!G i# %%'3Dx6'9:Xr:<8R7'": /1-aV-)">a,"$ @ 1a$ /!M9o&##=]X:2$B#g@/6%3CY.64<8u!.+-$=&b%(T+-4Y'(!8 X#y $+(,?,l(((@&Z!9!-[(%+(,-<Z8+.-+)Y.-&-.5)d+),/.<^6),) D ^ y 8 . / 6% \ r   !  ' )& %P v C   K  V b r 0 *     / L .j ,  3 4R p!+A[q21.3b*s&%)@DVF' 4hP #40I!z%$+98e8"!+30_!OA7D | &='%!G!_?.F Vbt!WHAQ C'.)V,*)F*I*t#/+1=$o )=O9c"%E,!3!U*w2.#$(4MD$  ". !Q s " \  !!`/!%!C!3!J.":y"M"#c"#V#W#@5$Zv$$/$% %'0%&X%%@%*%) &D3&x&i'd'e'7N(&((#((() !) .)<)%U)%{)2)))!|**_*O+f+-{+++*+ ,$5,$Z,/,!,4,c-$j-S---.(.?.U.3o.%.5..B/]/f{//-000%O05u050!0/131&N17u1,11"12$22W23v282 233-630d3<333 4")4%L41r4.444!5\25!505(5( 6"460W66"6 686=7#T7$x7%7'727)86H8)8:8;8+ 9*L92w939&9: :&:E:?a::":-: ;/;3L;#;;;;; <%*<"P<s<!<#<$<!<-=-F=:t====(>/>"K>n>>>/> >4?(M?,v?4?2?1 @7=@u@$@(@@@ A2AIA)aAA5A-A( B2B2HB-{B+B.B+C0C"NCqC%C9C(CD#.DRD%nD/D$D.D,E!EEgEEE>E"E F!>F.`F2FFF+F!'G5IGG$G,G$GHH_H0~H:H>H:)I(dI&I"I#I'I-#JQJ"oJJ%JJ"J0 K0>K0oK7K&K=K=LPL oL)L(L$L"M+M#JM!nM MM#MIM,>N4kN5N7N O /O-PO ~OO*O"O- P)9P0cP#P$P%PQQ,d@$et ő$ <2U6 ђ"ߒ6%9,_!'͓08&_z!,#$  /2P"#:ʕ+,1-^19֖(9]P.ݗ$ #1Uqɘߘ2 #1Q-k, ۙ!D%j$!͚ܚ B̛6>F!L40);Z7<Ν! I-5w#0ў'*G_!}П":6T7bà&C7[%ϡ$-%A:gP;/"Dg %֣! #1"Ux&Ĥ#"F;Xɥ ݥ  )/(>gv$ɦ%<=Q$%ۧ7JhYK71ݩ>N0iת!)%Fl%ԫ   # /(P*y:߬)8Xu&έ'ޭH*O!zϮK8-Efȯ7 7(X6I;1>8pEб3Jd|Ѳ+*%G2m1ҳ-7W+wE.Hat./׵ %!:\z-˶#9+]""ŷ++<Q<a2 Ѹ޸ A#+e3tŹ2: m){'*ͺ+&$)K*u+ǻ! 1>==|#Լ %<%Y=  ǽԽ$):Pj!߾!6Tm#߿,)$Ni E&#+#O%s&$A@<-}6Q#h IB;l~Nf:iB <N=DShbU=!:_PPE<8ERATLxr\kr;vp%n{rr[g[hqxxs=Rh A<~%4,0=`n%&,*I.t)- !:\m# +]/;O9Y-x "#5&%\3$&/G f!$(=B I"'l! xpJ94#X:v&"3B\;q#,2BQ@o01)=9Q 5!*?:HOG0M?;E%C$i91+!Cee1'B)S&},,&#J"g-#9!1&S)zA$D#h/%25O9o"?$QLv#)%7? X-f*48H<&!8@Z+s*8&5#\8&8'6"^;T5m'#?Pi/+/J$z6)Q{-##=O%`#(6;K##*&2=p6'RB[&)  1Pf!53 "3  e00   "'7_,p&-/'OWP?-8fz  ,80=i1<L c0n)/)B(Q.z0 + 5A Q^}2$D#i?(KKb<R>;(1=+o. "++;%gD. J %e    = 5 ,+ 'X .     C (M v    " & 1# 1U   )     - K [ (u  ,  # 0D'u?3+)!Uhww( =1(6Z9=4 4>s "6By8 8 4E-z=//40d$%$$>Zx(,BQ m"ELDW"r,=>?N^z "$4O3o /@>WS+  .;(j)97U9l7 "=(S| -H ];i&!4 %; 1a  % $ & (!%@! f!!-!4!*"/"5N"K"/"0#1#"F#i#T#*#$$$)$>N$;$M$*%NB%K%]%:;&&v&&&6&6'*:'e'x'-'%' ''7'B6(.y("(&(()()1G),y)!)7)*6*4U*,*B*&*$!+<F+%+!++++,&,E, \, i,(v,f,e-Fl--+-"-".;.:N.4.-. ..,.%,/)R/"|///B/ 040S0\0s00 000000$ 111I1Z1 u1$1*111 2 2#72[2l2$~22"2)2(3/3-H3v3333n3S4i444 4'44(5.5,A5#n5.5.5656'65^6 6)6)6 7*7<7T7m777"7C7&8F8'f8288*8% 9/29)b9%9>99:11:6c:1::::;&;@;O;#m;;#;%;;L <W<<w<<M<6=V=k=~=<=!="=B>U>$i>)>>$>>*?%1?W?i?#y?"??? ?]@a@!@P@&@'ABA%_A-AA)AEA>9BxBB.B(B6B@1C%rC2CCCCAD%VD|DD)D!DD&E 9EDE#_E+EEE/E FF -F8F PF,]F F,FFFFG1GMGmGGG/%H+UH#HH$H HII;NI4I2I4I2'J#ZJ ~J'JJJK*K=KMKdK }K!K.KKL L=L RL^LrL'LLLL"M&$M&KMrM*MQM' N&1NXN2pN9N4N3O#FO'jO#O.O!O/P17PiP-P5PP)Q.QNQnQ Q%Q3Q,RQ2R'R9R9R* S!KS$mS6S/S6S0TD DT `ccccuckdPieQei f4vf/f.f g$g5uPtuDu v"(v[Kv-v4v/ w#:w9^w:w*w*wL)x*vx+xAx2y6By6yy6y1yWz-qz%z#z?zQ){\{{%{${)#|.M|+||Q|(|(#}0L};}}}(C~.l~H~,~=@O?:7 tC) ^Ab45ف;qK4C[6[WF-`08~;A7}02*4D2y,ه1(%\N' ӈ1C&"jE6M?<ʊ5/62f1,ˋ)-"9P % э''!BZd(+(9=Qw'ɏ!%(9b w%"֐($"'G&o&) -86o$'/֒AH\k ؓ.>,R6͔ 45U.i#ҕF@='~*Eі%)=g9—NRKE/!6F7J Dә-+KS[o5˚EY4x08ޛ01H1zD+=8[7"̝-$&B4i+ʞU?2^Q2-Da |T1 =͡E[KDH2<{IQLe?-b AOEB;% "%$H7m?G6-CdG$=7S $ ٩)>#\.'%ת'D% j")Nث '%H)n-(Ƭ "2Uj=ҭ,.J'['ۮ%S93ï+#>H ("/K${# ı!,<4q zU<&0س )D-S@"´w+]2""'1%Y$ ö/'#<0`+!η$(">a"|Ÿ : Wdw%ʹ  ; Pq7׺< J h=&ݻ?/o&.,߼9 -Ft4ݽ: %F$l[, )68`4ο4(4#].JW!j&)IB'$j+:8Z/'m+;D&BQid 9@'z@(G T o}/*(*E,p(*-46Dk/3H']'2-A%Cg #$BWh >,5&4/[:B( )2!\"~31@JH;>'BDGPR Rs$(/)Nx $;Z%x!#,K+]9',K*e"7$+ G h2$-Rn'!6)X!#"1/(I*r$ !7HP:%`*r'+G23zP/0/;`8?>4T7383./b-5B;94u(51^*//,\H;7;4UJcB9C|9@9;)u " !7Mev#*0F\+k!G6U1n$'5*#(N)w0*#@O`u/* Zd/9: W"x!4 ;?3{# ' 7BQW+*&'AEM$l)$KVe$HK4*'!'4"W\ v1$# 5AHf "!24TF4/)N;x- #1Bt5-*.!LnA."8Mez<?<;|RO! 11/-1N1T5=8B3Dv!- +=G # :H%Z!68+U/3  0$ U 6q ) - : -; *i 3 & 0 ) 5J (       / !G i } +      $ 8 H Q S X _ n 0 / 3 2>P-D. 8H*x#)  H2{$0*KBf\ Xik.,1Dv%   6'?g)$&N2u,,57+m   #%DXo  1 ;UK V-J3,~I E fEdiB9I ^j1M8=#v"!$ 7 ;@ <| 7  !!(+!/T!.!.!!7!3"J"h" o"%y""0""6"6,##c###(#;#$;$I6%%B%)% &,&=&)[& &%&& &#&;&+'v'2_(_()( )A=))K))),+,-)-<-T-k----0-2-f.x0|0+0)0.0*1(D10m111,1-13+2_2}a2}2d]3h3r+4q4'5 85F5d555555#5'#6 K6%l6666#6&6&677CT7G7+7' 8348Kh8888 8 99)%9$O9t9 9;9y9:P:\:F:5/;9e; ;!;0;.<B<X<i<@<(<<=5=S=Qr=1=L=-C>(q> >U>+>A'?-i??+?&?@?#?@(c@@@@@/@<&AcAMvA9A>A(=BLfB0B5B4C-OC2}C1C1CD',D#TD%xD%DDDDD%E"F1FAFVFkF}HLJJ&J&K7CK-{KKKZKPLSL,YLLLpLM4/MdMtMUMMON+PN)|NZN,O .OAOO%OJOYPR\PPPGPQQ< Q ]Q.gQDQIQd%R{RfSRmSSGBToTRTMUMU:#V@^VgVUWM]W)WUWL+XhxXbXDYY` Z0mZ2ZZZ5,[qb[A[\P\R$]Lw]Q]P^Dg^]^ _D_W_r'`Q``SaanlbpbJLccr!dQdSde:eYeWePRfTfhfdagFgT hfbhVh> i5_i<ili?j_k{k,l#-lvQl`lE)mYomAmC nOne3oog&pGpPp4'qJ\qZqSriVrXrKsHesgsPt<gt(t[t )u(JuIsu<u\u!Wv!yvvIv!w&w_Eww{-xLxVxRMy9y(yztzZzVz[@{s{S|d|{||%}}'Q~#y~ ~~S~WQq3aY^dibTdOQl^>@\:<؄-/C:sCR 3\+3Hj9X&$>5\.5.p&>8։;/6kO0C#=gPL9CY}5׌S caKōM}_*ݎ65?WuM͏6LR6:֐9@K_a|Qޒ>0Co"7֓?CN-?•3w6L(Z$GJIC~ؘuW_͙-2G?zjg%h>8))9՝sP*ԞhhhaџS37!4,#C+g!.-33F`zBۢ},ZɣZ$T4Ԥ 'ETpLŧNMa1195M6567'/_96ɪn J /k2Xά!'!IEkwͭhE|<%,߯Q G^/,ְ\DZX$@}58%-3S%9I<1UnBĴUQ];>?*j&>ɶ:C׷wFܹiEbź,((U#~8QۻM-{:;ս(WƾW&vNKO8okdLN?>A@P[U3)v}/]s ssKg`L=%;U0L3NCM6OHlX5XDj;3Dx`xDiJNCH@Alm|%/5L[OLR[&4"LWd hqFnMc\HMEI9ESHjfsiE`a6r8FE)8oBsl_JN;Tj*<Oj":TXYvTU%_{+E qrsN \93U WvY\(es0;,NhKHJLt7 <DaDi(hNR>"Ea8VY7NTP5ZE 'VHLRb??>.!.PTEZ4u0I%SE,$%?@QIT`1[UPDXIK8]L/D2Cw>LDGc=L89'<a:Ux/ W T U H l! > = < ?H 8 ( @ + ^^v^8*.gfd>:|0X?A;jQ(zJRIA _B c r!Ky!u!g;"K"l"8\#s#D $cN$$TF%O%U%NA&&''2?'hr''!(E(P)~c)-)***;*+f*8**By+/+#+<,1M,8,0,B,d,-T-K-2.5K.8..5.T/NV/]/i0Em0:0:0j)1h1(1&&2M25^2]2]2HP3D383243J4~43414(4<!5>^5555.5q!6$6=6Q68H7788P9+:9::;U<,<-=!I=k="=#=V=E!>?g>K>M>NA?Z?-?#@'=@<e@6@z@kTA0ATA!FB9hB.B2B&CU+CUCCCDD,DID`DtDDDDDDDE+EBEVEjEEEEEEEFF3FKFbFvFFFDFCGCDG)GGCG= H?GHgH*H-IcHI8IaI8GJ,JJqJs?KoK#L>L?Ml\M+M:MR0NN{ OhONO:?PzP[QnQCQQjQoZRMR@S4YS:S;S8T<>Tt{T#T!UX6UEUSUA)VkV/V!WzWC+XBoXgXZYbuYDYZLZc[n[w[Bf\@\S\V>]U]E]G1^Qy^L^_Q_h`\`K`c,aNaYaE9bBb}b@c.Qc4cKcJdNLdudAe"Se veQe%e'f=7f>uf$f*f$g%)g&OgZvgVgR(hI{hVhiiq"j^j^j\RkgkZlbrlflP(Cg_] ai;ˉH+PP|T͊F"iqGjЌ4KI/1=a=6ݎ&{;].+I$u?-ڐ@I|4qGhi"rRRRJ;a;8$R]7S˘?6_+)™!'?g?.Knc7%*]6XM(f&`|-)ž)+*B(m3ʟ R`rG|OY̡&[{P=aޥ@Q"+t64ק( `5*Hs ~ua^;.;+g2ѭ=TBX.yKm,}-tF5DQ6GDг_uV^B,o,ɷ޷X+Hc͸#1UF0OmNLM PWD-r4H*msL..D]@D4(c]kJ-x=ZY-m8&./U3A\UXC@A3uMjdSS#7wy)IG?{xM?@AG5pZ0]Uc?e9 ?C^`AC3IOzj*1,@^;qSM48.0>7o>;("'K9s:Fd/d&-*L=JCDV^HzGyHd boa}41JN/&~3L,&3SF7m?tZDrT[b#\bBF=$zgF;9Iu=K\INDc:A94EOU5oVcw`63tC]|:jo9QGzeiNX8OP>2Lq640*F[7UWNex Iy)I0sIBl1FMJ3O~M9;V@r@Fwgig7> >H:IK =XFIS'9{FJ1G4yU1K621QI9Rld-3A3[uUa'6DWX]-C^(^#\ =}N2B>JBBZyj+P qa e N9 U ) ^ Bg ? ) $ ?9 8y Y A :N X @ l#YB6-:dbV3F{zffzCU%d{DF1/NaJS>O>UV#Sz^P->~W(*>DiHLtD>X3Q@>iNoeA6Z@<H AX p @ !HL!{!["Nm"b"^#r~#X#7J$<$5$,$J"%4m%Z%%>&Q&T'\n'F'G(EZ(?(Q(=2)lp)>)i*K*8*. +K:+O+C+D,0_,6,6,S,QR-/-/-;.f@.j.>/fQ/X/=0>O0W0^0cE1M1O1[G2?2C2;'3kc3l3<<4;y4?4j4G`55AA66J7#Y7U}7,7Y85Z8B8U8*)9HT9a9g9+g:E:D:N;=m;j;S<Cj<K<d<>_=A=I==*>gh>1>D?EG?d?j?2]@-@@`IAYA_BWdBUB2C=EC)C,CAC=D\ZDDWSEE1IF[{F_F.7GFfGHG3GV*HH-*I|XIZI^0JHJ<JKTKAKX=L0LJL?M5RMLMKMR!NtNlNsgOLO<(PaePIPzQIQ\QS3RR@S,US>SZS>TF[T3T(T9TJ9UJUcU 3V"TV1wVVI:WWGXYY8Z)Z^ZhZ[7[?[>;\Iz\v\N;]D].]N]+M^=y^A^U^QO_S_|_7r`<`H`N0aBaIa1 b->bIlbGb,b(+cUTcUcWdBXd;dQd7)eaee1f83fClfxf)gg[[h'hh(biOiVi]2jgj4jN-k@|k$k!kjlol=ml[mNm)nDAnBnqns;o}o~-p]pO q2ZqEq%q@qK:rwrxrws,titC7uC{uTu!vT6vv\/w~w@ xZLxHxYxWJy5y@yzzC{{@{7|,M|.z|.|-|F}1M}-}\}R ~!]~+~X~^H?3As;{r:.)*X`(& )4b^'%^0n.O΄Qzp(p%׆08Q?k?-M;g/?Ӊ';"ÊTH;Z!ߋC;E:Ì+N;zF;YPPLӑptf}L{ʓsFMxSjsVޖ@58v>B/1aS;C*9n2?G6P1RA(76),ZVy+17ޞ:#Qu>6:Gq*Vc;I$R4a/7Ƣ5&4l[*ȣ1"%H ^1lR%VnH/ϥ2I2K|uȦw>+FFB3B./̪r{sb @PaA86-;dp8~J.ɯ.<'4dBܰ&46IZV۱2)β=D6@{N, *8CcRǴnZ@6%!\~d62<Boq?$Vds/Yݻ^Ev{N/ʽ4'/IW3.վbVx@Ͽi]z'c=d;AH kiHk?3-h,h&*%!Pir33)D"na::.DiQ=S>:G(>QhsQN;_),z:mR= 6&3]F<BUXK4g/F%Sl,`vNO.#D-h+&0Zwu#)(0R(%0 %>-dK<L3h8FHLe7=/(SXG:A/q3|QG<<A=U,>uruIL2QYG+IsAUKU2$)#A%]+~."*G}SrD`_Moh%e%q{Q7vM><;- 5N4?,>&Fe4-D@T,9KK}\OW/0OTjXH#a!<IM.D|@K:N8up8wS!uONVi3*'^ZWq9E`6R N 1 = DH ; ; . 74 9l i E EV ^  = x.D><+Qhwgr7RwKPN0`41.fwC VQ3U!2$T<yvv-RSK_`04E&kl)4 7pBI!"~BK T@~ 6 e i\!&!x!5f";""I{#K#$>*$;i$Q$t$/l%9%0%)&71&Ri&e&V"'py'M'F8(V((5])T))N*g*G6+~+_+|+Oo,h,_(-:-)-/-a.K.{.IG/V/I/20QD0 0X0Z1Mk1I1c2Rg2R2z 3~3W4U_4_4658L5e5#5Q6Ma66Tl7G7A 8K8T8&9M9D:M::?;`<Ac</<X<<.=k=(>.?>1n>j> ?-'?MU?\?k@Wl@D@- BL7BDB|B-FC0tC+C)C6C"2D0UD;D9DKDRHE&ExEE;F3FF^DGeGU H*_HVHaH%CIiIIJ9'JaJkJ1SKAKOKOLWgL-L.L>Mo[MoM';N1cN%NO<O2PKP]PlQv~QQ R%R?RBURTRRO S0ZS2S7S>Sk5TT/UUtVVW4(W0]W2W\W'XJFXVXBXb+YrYZ`ZVqZQZi[r[C[a;\g\U]%[]#]u]^+0^/\^ ^8^3^5_4<_!q___4_!`l)`N`K`1aVav8bxb(cc/dp!eNeheNJfxfg&.g5Ug>gLgch{h0h}h@HiOi0iG j2Rjjjkjjkdk`kgHl;l;lM(mMvmmQLnInInW2o;opo7ppeqr61rkhrr,r!s#?sJcs!s0sptJrt^tAuJ^uVu[vf\vtvT8w$wjw>x\x@yMSyy_z1z={U{o{g|n|jV}}F~6~=2E5x+/4 #?2cVKq4=SrƂ}naQЃq"V1WYuTυ1$HVF:<!*^'#;ՇKL]2݈b<u9F3%ŋ*PqgvٌP'э5+/,[Z9-<Mj--1VF5ӐP:.ő*x~/"R$o6\˓d(6^ĔN#r:31me%Ӗ*a$/YK0|aOQhM],fnȚM7-Mk@mn+LIKMO0CPĞF]\gS"2v)u\m&ʢC:5pD7TBqh]{ƥHBypvP?S3JO,9|9өI PW2]bJa==M8-Ĭ6L)JvEr*zPPeGm=@YlH9P'$ױ&9*S~Im.I!EkH W[sU϶%% KGX.VϷ.&$UBz|:gV*ѹ :(S(|9ĺB<A"~*18;V"/˼9;5q?)ݽ?PGF߾<><2{COB%7#'[(/45 VZiGm nztv^sYI^rmu H*Mx-^AB;]SE>yN;MQO+KIFBQF"%@>f#'!4;H857!+M1y XSe!/k*IP1"LNj)/<ZPT23EOE25 DRC90jI JS:?^7xluo77;srm_q-^;;R0BWV82}:?S(W|J{5W8WI'LBtEHAF.<]R:72ZjSUHAAA"AdD,<OUI+A$],F89/ki/5!&W~6$P5>:t"<@GP#X^S8rduSRJ-FxTHR]HXKR][2XNE7 1X08GW<:"H]HKt;A`YS,DjqLq:'1!SG R)!P|!L!."'I"3q"f" ##8$$.$)%XD%'%%%%km&-&'k''T(X(]0)))3*`*n+0+V+_,]q,M,)-hG-E-N-\E.9..].N/00r152433j]4u4>5p5 H6T77x8^9b9::d;;Y<^<q?==J7>N>\>X.?c?h?dT@d@AkA=4BmrBeBtFCrCn.DqDE?E< F2IFQ|FFmGHHMYHcHM IeYIIauJaJY9KKL<LS)M}M<NoKNqNq-OmOb PpPHaQ;QQmR_R^PSS?TmTkmUwUwQVV9pWPW_Wp[XXYdZr[M[g[:B\O}\O\@]^]@w]8]]1z^-^f^A_c_W+`k``QaUazaabzb<bybfJcUcQdHYdUdmdfeHGg?gKgOh'lhh[1iMiUiv1jjhVk7k?kE7lG}llPm<mw nznLoLMoHoToY8pDpdp{9/M(6v(ށ-|C5zmq߄]k^Ʌ^(x=W>4nˇn:HZDMcSy;͊ CbJhfWB>F@9b''BL|ܔY)}Saї3.1Ki}l\ micכX;LC%z2֞bLn`<%á9ݡRCj!5ТBFIEB֣LNfN;OJڥ{%DD+_O1b#DlEj55a'~P?C7%{F^6G:~3J=8v/8;h~u#;կB2`Z6=%%c#M`N\!ڳ FAU:E5ֶ) V6h|cs5׹: kHWO [\YSEfLV}PԽe%o)<%7b@Qۿ;-\i="a'0<AA9O{$?L0;}(>\!K~77N:a%X%j4/n3d1 uFa0-P`|W^3?1+O1Qo;/>#ngy4%/+UkggU*I24'FYDe%q*@:HW\OZiYQ9w)qA/UqSUWqSUsZajMK?DKc=T.=%c $<=,Uj"26Msidcl2*f* JKUlmYu#[(@m3lyZIO'Jw^$!<F?a?% e)NHAHZ`CFFU\'2O%C-.qKPK=@ fT ; 1 2) >\ 7 I 9 DW 6 e G9 . ! + E -D ,r b ^ ]a b ^" ] . F rU ' J L; < , %  :/ j } m & a E) 2o I H mC O { \} ] _8 \ c bY X [ Wq \ W& S~ J ] i{ l VR  Jo E X =Y = S ) W ? E X {(  rU w z@ e a! R   "  6. 'e      & 0* R[ ]   C# 3g 2  R 55 )k # z 14! %f! ^! K! ,7" :d" *" -" ;" ,4# Ga# Y# I$ !M$ Ro$ W$ $% 2?% Wr% ' , &, F, +8- !d- y- r. _s. a. .5/ @d/ / / 4/ @0 @T0 0 0 10 .0 ")1 L1 )[1 l1 01 /#2 LS2 2 2 2 ?2 O3 h3 6k3 -3 X3 ;)4 e4 4 j5 5 x8 8 -9 -G9 :u9 ~9 G/: _w: 4: ; Q; Kl; ; ; ; /; 1.< `< }i< e< *M= Ox= = 4= 6 > A> 'T> !|> %> > > (? a+? ? s? D@ =b@ L@ _@ KMA 7A A !A ^B sB EB ;B MC L`C 7C C C D j,D ND D D E /E FE _E wE E E E CE v!F aF !F H . J &;J bJ 1}J J J JuM GM ON ?XN N 7N :N BO =TO DO FO !P -@P 2nP $P P sQ .R 9R 'LR tR <R :R /R 7*S bS !tS SS WS GBT T CT ^T ) 3s 3 ۄ Q F "]   ; J N [ o܆ cL 9 . = ]W g n L& s J 9f = Iތ r(  J  < UT = r +  E 6 } B Tř Q 8l ! +ǚ + ' G I -T ] P  7C< _\ jq | DH0 Y  tZ J F ;>   M{9z5    )  2 }f 8 .  u [ J ` { hQ  *e/T l \' @|L L S gDtm t E ybC<  D<( l5   H70 T M  w \  r ]  cJ8 c + * hAy  v 3 6 o`  T  1 ? W { FR u? K  E0 HR #gq| 26  5Sls \va { B Jq  &f G K J>! 43FU _  Qj ^[ a Q 8 B T ]b ?  khN { YP ] TG 5d Hy$ Wm @ bZ  w)lz  A3  AvP1O= ~ g 1 [4  e eI+ io y }'  W rX M x b  l %m= * -[N x gW &~  &V oQ O /c  $ ~?KR D 1 0F  o B 2-o  F z D! 6cK h  3 R 7 [    w = I  G s9 6 y # ^ eE +N ^+ f{@  }    U\s &20  f*iq dVhV M.I xqkj`q ZES}y R0 )+ U Yb  ( M :D  / g  V == u  T 9yl  . + W#  Gb`LU /G}9, } @) ;,Aa@]d V3$ %2 n4 uO $( M 5 J 3 p K J y E sdpU } v   Y {)Wx c / L 3 JS: Z Tn!p } =G   TD< "&]> X h k =  t|  N\ HF x     Vp>    L  Dk 5f GV /  "H  =7 U2^  % t #a.C rEt ,o* %R na9& (#( : *  G # ei B w` " '    i 9z  P 6u  8  pQ[ 4 ~I _ B 0O} 8 ` JL * ;qcYAE  X &W T  P p  (6 ZhalEK ; pRs Hw4  L & u].g ; } *@E5c g  5k 3 A YR.?7 M 0D {LN  O w%  ^ 1  }J J j 5 r! C~ZO O yY E  # Q *-a <  e+  wv\f @b 9[S h % p V J+  (n S kk% R - yt ) a  s^ x!dZ OizI C _l |]&( "2x4 Rl |' o ,'P}4  Kt dh O`7\ y   =~r )ip#9 ' S d @ 1z N < Z +Ut #,D      le   r*Ev*e ~w  %~   $ )_MA q /#^emBvQ D  J t { ym  `.  zD Cvf 0 :2 ]XOa m  :RKi * RU 6 We Z w "u >\ ;7zb!m /F- W L in  -C X,>K qv " F  /uH , H D _ \ ? k  Q *k ^ V y3:kp1  Wv ;< S] $GOc ~ 4o |C[7  [ S g?` : d 6T  rZ j; G u cq{NY_3{  n ?&Y = KU H c9% gK T  g C M< r    Bi  &|XT' & 8d =  w ) H R+ CC@ + ' t d 6qt  cI >P s' f' ,x>  & M r m   Z ,N d1i U7 '] / ~  Q -=Q'$ qb N) ~ j ( WS A5 ~6g6  ;w x :  N ^ l-+l^ L4   B  D 9 ?f*XFj wJ1 E o jQ;N Z EG> I= I SW v_  ~H O c\~  d  L i kx } @ % P Mu   @ ; x  @bI5G  O  LNs.] E i UQ=U3 =s * I n G  MeI 9>` . /84 K )J / ) ! Q/  "r `Io ! i , n } X" 7 q T" r 0 n  z vmG o4X  ha 2uY+ bJ |  Xa n W > ^ ( n  !G b  o 0   's5 H  X{u (  ~ z2 L C Z< N " zx@< I  1IP K _. u "    x  W _N$v!   Pb Bc 6 M? ' a  # c+.S5 8 !s{> ~L 9 V  k\ s F  $  Qm h %  S i  @c Z 7B j ,' xo_ 3M ` \ )  1;  $ :44z D 1 X o I  e  KB =m ` @n@   r yTv A  -{1 F 9 C L j R ( U H[A   l 8    "N x m 6Q e     r` "cKlv $ UzAz} p &B? & y v p 6# \ Y r  ~E N \0- y V<7 s <| }H < %(p  P;nkI gW  [  ns |   J t  P ]8S2 - j /e /`y{U6 [3j q] O  & k, '!R? C >- .   2e <g o .  r 8 _ 4 " & LB  W r  B     m >f o0n :F oA  :W%m TM "B gs  ($Y $ _h   02 ` $X [h3aj9n$Q8A S" 7  P" 1uH  n k? z    h qA0S V8_ /  4 t(Tu  7 mI|9 Cw K  >:K  utM[U  ZClbvqj-  {    PY t*TFMg !) \  x0 a Zd ;3%  [4 3 i #b E }k L@B!  f] +   pU9 _ ]<5 4V! wXwdO s  7 f( h _   d.f \d  q1i  %  : _- , $ xO 7 X | ; w ; <p :  <> l 2a| i FY9 ( t %;GzR j 1 g ,m8N8c :   R X,A X ` )HV f .|+] >0 P - :  g  6 fk , # j V [ 5 S5! PB^e A,  bd  |Y %^ D  D1  j? Y2'3Q. ?72pp  $  6 lC a? u^G  ^ OP)/ [ E a  ? #  f*5 ) # Y F V fF {Z  ^ ] FA^ s e- hb  mwh+  8.8  PV2z:  !# |- r= %s Cache : %s Cache cleaning disabled Cache cleaning enabled Cache link dir : %s Control dir : %s Remote cache : %s Remote cache link: %s Session root dir : %s default LRMS : %s default queue : %s default ttl : %u Run 'arcclean -s Undefined' to remove cleaned jobs from job list Run 'arcclean -s Undefined' to remove killed jobs from job list To recover missing jobs, run arcsync Use arcclean to remove non-existing jobs Use arcclean to remove retrieved jobs from job list Use arclean to remove retrieved jobs from job list Is executable: true Name: %s Sources.DelegationID: %s Sources.Options: %s = %s Sources: %s Targets.DelegationID: %s Targets.Options: %s = %s Targets: %s %s certificate dn: %s expiration time: %s issuer dn: %s serial number: %d %s: %s: %i %s: %s Delivery service: %s Delivery service: LOCAL Delivery slots: %u Emergency slots: %u Post-processor slots: %u Pre-processor slots: %u Prepared slots: %u Shares configuration: %s Status of endpoint (%s) is %s This endpoint (%s) is STARTED or SUCCESSFUL attributes: base dn: %s filter: %s unspecified: %i %s -> %s (%s) --- DRY RUN --- Access control: %s Annotation: %s Argument: %s Benchmark information: Computing Service Log Directory: %s Computing endpoint URL: %s Computing endpoint interface name: %s Computing endpoint requirements: Credential service: %s Delegation IDs: DelegationID element: %s End Time: %s Entry valid for: %s Entry valid from: %s Environment.name: %s Environment: %s Exit Code: %d Exit code for successful execution: %d Health state: %s ID on service: %s Inputfile element: Installed application environments: Job Error: %s Job does not require exclusive execution Job management URL: %s (%s) Job requires exclusive execution Job status URL: %s (%s) Mapping queue: %s Name: %s No exit code for successful execution specified. Node access: inbound Node access: inbound and outbound Node access: outbound Notify: Old activity ID: %s Old job IDs: Operating system requirements: Other Messages: %s Other attributes: [%s], %s Outputfile element: Owner: %s PostExecutable.Argument: %s PreExecutable.Argument: %s Processing start time: %s Proxy valid until: %s Queue: %s RemoteLogging (optional): %s (%s) RemoteLogging: %s (%s) Requested CPU Time: %s Requested Slots: %d Results must be retrieved before: %s Results were deleted: %s Run time environment requirements: Service information URL: %s (%s) Session directory URL: %s Specific state: %s Stagein directory URL: %s Stageout directory URL: %s State: %s Stderr: %s Stdin: %s Stdout: %s Submitted from: %s Submitted: %s Submitting client: %s Used CPU Time: %s Used Memory: %d Used Wall Time: %s Waiting Position: %d [ JobDescription tester ] [ Parsing the original text ] [ egee:jdl ] [ emies:adl ] [ nordugrid:jsdl ] [ nordugrid:xrsl ] $X509_VOMS_FILE, and $X509_VOMSES are not set; User has not specified the location for vomses information; There is also not vomses location information in user's configuration file; Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the corresponding sub-directory$X509_VOMS_FILE, and $X509_VOMSES are not set; User has not specify the location for vomses information; There is also not vomses location information in user's configuration file; Cannot find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the corresponding sub-directory%d Batch Systems%d Endpoints%d Shares%d mapping policies%d of %d jobs were resubmitted%d of %d jobs were submitted%i retries left, will wait until %s before next attempt%li seconds since lock file %s was created%s%s %s%s %s could not be created.%s (%s)%s > %s => false%s > %s => false: %s contains non numbers in the version part.%s > %s => true%s class is not an object%s constructed%s directory created%s directory exist! Skipping job.%s failed%s is an unsupported digest type%s is not a directory%s is not an object%s is not authorized to do action %s in resource %s %s made persistent%s parsing error%s plugin "%s" not found.%s request failed%s request to %s failed with response: %s%s request to %s failed. No expected response.%s request to %s failed. Unexpected response: %s.%s version %s%s->%s%s. Cannot copy fileset%s: %i%s: %s%s: %s: New job belongs to %i/%i%s: %s:%i%s: ACIX Location: %s%s: Adding new output file %s: %s%s: All %s %s successfully%s: Bring online request %s in SRM queue. Sleeping for %i seconds%s: Can't convert checksum %s to int for %s%s: Can't convert filesize %s to int for %s%s: Can't read list of input files%s: Can't read state - no comments, just cleaning%s: Can't rerun on request%s: Can't rerun on request - not a suitable state%s: Canceling job because of user request%s: Cancellation failed (probably job finished) - cleaning anyway%s: Cancellation probably succeeded - cleaning%s: Cancelling active DTRs%s: Cancelling other DTRs%s: Cannot upload two different files %s and %s to same LFN: %s%s: Checking user uploadable file: %s%s: Checksum %llu verified for %s%s: Cleaning control and session directories%s: Critical error for uploadable file %s%s: DTR %s to copy file %s failed%s: DTR %s to copy to %s failed but is not mandatory%s: Delete request due to internal problems%s: Destination file %s was possibly left unfinished from previous A-REX run, will overwrite%s: Destroying%s: Duplicate file in list of input files: %s%s: Error accessing file %s%s: Error reading file %s%s: Error reading user generated output file list in %s%s: Failed creating grami file%s: Failed obtaining local job information.%s: Failed obtaining lrms id%s: Failed reading .local and changing state, job and A-REX may be left in an inconsistent state%s: Failed reading local information%s: Failed running cancellation process%s: Failed running submission process%s: Failed setting executable permissions%s: Failed to cancel running job%s: Failed to clean up session dir%s: Failed to list output directory %s: %s%s: Failed to load evaluator for user policy %s: Failed to open file %s for reading%s: Failed to parse user policy%s: Failed to read dynamic output files in %s%s: Failed to read list of input files%s: Failed to read list of input files, can't clean up session dir%s: Failed to read list of output files%s: Failed to read list of output files, can't clean up session dir%s: Failed to read reprocessed list of input files%s: Failed to read reprocessed list of output files%s: Failed to run plugin%s: Failed to switch user ID to %d/%d to read file %s%s: Failed to write back dynamic output files in %s%s: Failed to write list of input files%s: Failed to write list of output files%s: Failed to write list of output status files%s: Failed writing changed input file.%s: Failed writing list of output files: %s%s: Failed writing local information%s: Failed writing local information: %s%s: Failure creating data storage for child process%s: Failure creating slot for child process%s: Failure starting child process%s: Failure waiting for child process to finish%s: File %s has wrong checksum: %llu. Expected %lli%s: File request %s in SRM queue. Sleeping for %i seconds%s: Going through files in list %s%s: Invalid DTR%s: Invalid file: %s is too big.%s: Invalid size/checksum information (%s) for %s%s: Job cancellation takes too long, but diagnostic collection seems to be done. Pretending cancellation succeeded.%s: Job cancellation takes too long. Failing.%s: Job failed in unknown state. Won't rerun.%s: Job failure detected%s: Job finished%s: Job has completed already. No action taken to cancel%s: Job is ancient - delete rest of information%s: Job is not allowed to be rerun anymore%s: Job is requested to clean - deleting%s: Job is too old - deleting%s: Job submission to LRMS failed%s: Job submission to LRMS takes too long, but ID is already obtained. Pretending submission is done.%s: Job submission to LRMS takes too long. Failing.%s: LRMS scripts limit of %u is reached - suspending submit/cancel%s: Location %s not accessible remotely, skipping%s: Plugin at state %s : %s%s: Plugin execution failed%s: Plugin failed%s: Processing job description failed%s: Reading output files from user generated list in %s%s: Reading status of new job failed%s: Received DTR %s to copy file %s in state %s%s: Received DTR with two remote endpoints!%s: Received data staging request to %s files%s: Received job in a bad state: %s%s: Removing %s from dynamic output file %s%s: Reprocessing job description failed%s: Some %s failed%s: State: %s from %s%s: State: %s: still in data staging%s: State: ACCEPTED%s: State: ACCEPTED: dryrun%s: State: ACCEPTED: has process time %s%s: State: ACCEPTED: moving to PREPARING%s: State: ACCEPTED: parsing job description%s: State: CANCELING%s: State: FINISHING%s: State: INLRMS%s: State: INLRMS: exit message is %i %s%s: State: PREPARING%s: State: SUBMIT%s: This job may be still running - canceling%s: Trying remove job from data staging which does not exist%s: Trying to remove job from data staging which is still active%s: Two identical output destinations: %s%s: Unknown user policy '%s'%s: Uploadable files timed out%s: User has uploaded file %s%s: checksum %s%s: size %llu%s: state CANCELING: child exited with code %i%s: state CANCELING: job diagnostics collected%s: state CANCELING: starting child: %s%s: state CANCELING: timeout waiting for cancellation%s: state SUBMIT: child exited with code %i%s: state SUBMIT: starting child: %s'(' expected')' expected'action' attribute not allowed in user-side job description'control' configuration option is no longer supported, please use 'controldir' instead'stdout' attribute must specified when 'join' attribute is specified(Re)Trying next destination(Re)Trying next source(empty)(null)--same and --not-same cannot be specified together.: %d: %s: Failure creating slot for reporter child process: Failure starting reporter child process: Logger name is not specified: Metrics tool returned error code %i: %s< %s<< %s> %sA computing resource using the GridFTP interface was requested, but %sthe corresponding plugin could not be loaded. Is the plugin installed? %sIf not, please install the package 'nordugrid-arc-plugins-globus'. %sDepending on your type of installation the package name might differ.A job cleaning request failedA job cleaning request succeedA job termination request failedA job termination request succeedA response to a submission request was not a SOAP messageA service status request failedA service status request succeededA start job request failedA start job request succeededA status request failedA status request succeedAC extension information for VO AC is invalid: ACIX returned %sAPEL aggregation message file (%s) created.APEL message file (%s) created.ARC Auth. request: %sARC delegation policy: %sAREXClient was not created properly.Abort request caused by error in transfer functionAbort request caused by transfer errorAborted!Accept failedAccept failed: %sAccepted connection from %u.%u.%u.%u:%uAccepted connection from [%s]:%uAccepted connection on %u.%u.%u.%u:%uAccepted connection on [%s]:%uAccepting submission of new job or modification request: %sAccess list location: %sAcquired auth token for %s: %sActivation failedAdd location: metadata: %sAdd location: url: %sAdding CREAM computing serviceAdding FQAN value: %sAdding FQAN/primary value: %sAdding VOMS group value: %sAdding VOMS primary group value: %sAdding VOMS primary role value: %sAdding VOMS role value: %sAdding Virtual Organization value: %sAdding action-id value: %sAdding cert chain value: %sAdding endpoint '%s' with interface name %sAdding endpoint (%s) to ServiceEndpointRetrieverAdding endpoint (%s) to TargetInformationRetrieverAdding endpoint (%s) to both ServiceEndpointRetriever and TargetInformationRetrieverAdding location: %s - %sAdding profile-id value: %sAdding request token %sAdding resource-id value: %sAdding resource-owner value: %sAdding resoure-id value: %sAdding space token %sAdding subject-id value: %sAdding subject-issuer value: %sAdding to bulk requestAdding virtual-organization value: %sAddress: %sAggregation record (%s) initialization successful.Aggregation record (%s) not exist, initialize it...Aggregation record (%s) read from file successful.Aggregation record (%s) stored successful.All %u process slots usedAll DTRs finished for job %sAll queries failedAll requirements satisfied.All results obtained are invalidAllocated %u buffers %llu bytes each.Allow specified entity to retrieve credential without passphrase. This option is specific for the PUT command when contacting Myproxy server.Already have directory: %sAlready reading from sourceAlready writing to destinationAn error occurred during the generation of job description to be sent to %sAnother process (%s) owns the lock on file %sApplication Options:Arc policy can not been carried by SAML2.0 profile of XACMLArcAuthZ: failed to initiate all PDPs - this instance will be non-functionalArchiving DTR %s, state %sArchiving DTR %s, state ERRORArchiving Usage Record to file %sAre you sure you want to clean jobs missing information?Are you sure you want to synchronize your local job list?Assembling BLAH parser log entry: %sAssigned to VO %sAssigned to authorization group %sAssuming - file not foundAssuming transfer is already aborted or failed.At least two values are needed for the 'inputfiles' attributeAt least two values are needed for the 'outputfiles' attributeAttempt to assign relative path to URL - making it absoluteAttempting to contact %s on port %iAttribute '%s' multiply definedAttribute 'join' cannot be specified when both 'stdout' and 'stderr' attributes is specifiedAttribute Value (1): %sAttribute Value (2): %sAttribute Value inside Subject: %sAttribute drain for sessionRootDir is incorrect booleanAttribute name (%s) contains invalid character (%s)Attribute name expectedAttributes 'gridtime' and 'cputime' cannot be specified togetherAttributes 'gridtime' and 'walltime' cannot be specified togetherAuthenticate in commands failedAuthentication Request URL: %sAuthentication failureAuthorized by arc.pdpAuthorized from remote pdp serviceAuthorized from simplelist.pdp: %sAuthorized from xacml.pdpBN_new || RSA_new failedBN_set_word failedBackup file (%s) created.Bad URL in acix_endpointBad URL in deliveryService: %sBad URL in deliveryservice: %sBad authentication information: %sBad checksum format %sBad credential value %s in cache access rulesBad directory name: %sBad format detected in file %s, in line %sBad format in XML response from delivery service at %s: %sBad format in XML response from service at %s: %sBad format in XML response: %sBad label: "%s"Bad logicBad logic for %s - bringOnline returned ok but SRM request is not finished successfully or on goingBad logic for %s - getTURLs returned ok but SRM request is not finished successfully or on goingBad logic for %s - putTURLs returned ok but SRM request is not finished successfully or on goingBad mount directory specifiedBad name for executable: Bad name for executable: %sBad name for runtime environment: %sBad name for stderr: %sBad name for stdout: %sBad number in definedshare %sBad number in maxdeliveryBad number in maxemergencyBad number in maxpreparedBad number in maxprocessorBad number in maxtransfertriesBad number in priority element: %sBad number in remotesizelimitBad number in speedcontrolBad number: %sBad or old format detected in file %s, in line %sBad path for %s: Rucio supports read/write at /objectstores and read-only at /replicasBad subcommand in configuration line: %sBad value for debugBad value for logLevelBadly formatted pid %s in lock file %sBatch System Information:Batch system information:Block %s not found in configuration file %sBlockName is emptyBoth URLs must have the same protocol, host and portBoth of CACertificatePath and CACertificatesDir elements missing or emptyBring online request %s finished successfully, file is now ONLINEBring online request %s is still in queue, should waitBroken stringBroker %s loadedBroker plugin "%s" not found.Brokers available to %s:Buffer creation failed !Buffer registration failedBusy plugins found while unloading Module Manager. Waiting for them to be released.CA certificate and CA private key do not matchCA certificates directory %s does not existCA name: %sCA-certificates installed:CPU clock speed: %iCPU model: %sCPU vendor: %sCPU version: %sCREAM request generation failed: %sCREAMClient not created properlyCache %s: Free space %f GBCache access allowed to %s by DN %sCache access allowed to %s by VO %sCache access allowed to %s by VO %s and group %sCache access allowed to %s by VO %s and role %sCache area free size: %i GBCache area total size: %i GBCache cleaning script failedCache configuration: %sCache creation date: %sCache directory: %sCache file %s does not existCache file %s not foundCache file %s was deleted during link/copy, must start againCache file %s was locked during link/copy, must start againCache file %s was modified in the last second, sleeping 1 second to avoid race conditionCache file %s was modified while linking, must start againCache file for %s not found in any local or remote cacheCache file is %sCache meta file %s is empty, will recreateCache meta file %s possibly corrupted, will recreateCache not found for file %sCache root: %sCacheService: UnauthorizedCached copy is still validCached file is locked - should retryCached file is outdated, will re-downloadCalculated checksum %s matches checksum reported by serverCalculated checksum: %sCalculated transfer checksum %s matches source checksumCalculated/supplied transfer checksum %s matches checksum reported by SRM destination %sCallback got failureCalling PrepareReading when request was already prepared!Calling PrepareWriting when request was already prepared!Calling acix with query %sCalling http://localhost:60000/Echo using ClientSOAPCalling http://localhost:60000/Echo using httplibCalling https://localhost:60000/Echo using ClientSOAPCalling plugin %s to query endpoint on %sCan not access CA certificate directory: %s. The certificates will not be verified.Can not access CA certificates directory: %s. The certificates will not be verified.Can not access VOMS file/directory: %s.Can not access VOMSES file/directory: %s.Can not access certificate file: %sCan not access key file: %sCan not access proxy file: %sCan not add X509 extended KeyUsage extension to new proxy certificateCan not add X509 extension to proxy certCan not compute digest of public keyCan not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal formatCan not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER encoded formatCan not convert keyUsage struct from DER encoded formatCan not convert keyUsage struct from internal to DER formatCan not convert private key to DER formatCan not convert signed EEC cert into DER formatCan not convert signed proxy cert into DER formatCan not convert signed proxy cert into PEM formatCan not convert string into ASN1_OBJECTCan not copy extended KeyUsage extensionCan not copy the subject name from issuer for proxy certificateCan not create BIO for parsing requestCan not create BIO for requestCan not create BIO for signed EEC certificateCan not create BIO for signed proxy certificateCan not create PolicyStore objectCan not create XACML Action Can not create XACML ActionAttribute: %s Can not create XACML Resource Can not create XACML ResourceAttribute: %s Can not create XACML SubjectAttribute: %s Can not create XACML request Can not create a new X509_NAME_ENTRY for the proxy certificate requestCan not create delegation crendential to delegation service: %sCan not create extension for PROXY_CERT_INFOCan not create extension for keyUsageCan not create extension for proxy certificateCan not create function %sCan not create function: FunctionId does not existCan not create name entry CN for proxy certificateCan not create the SSL Context objectCan not create the SSL objectCan not determine the install location. Using %s. Please set ARC_LOCATION if this is not correct.Can not duplicate serial number for proxy certificateCan not duplicate the subject name for the self-signing proxy certificate requestCan not dynamically produce AlgFacrotyCan not dynamically produce AttributeFactoryCan not dynamically produce EvaluatorCan not dynamically produce FnFactoryCan not dynamically produce PolicyCan not dynamically produce RequestCan not find element with proper namespaceCan not find element with proper namespaceCan not find ArcPDPContextCan not find CA certificates directory in default locations: ~/.arc/certificates, ~/.globus/certificates, %s/etc/certificates, %s/etc/grid-security/certificates, %s/share/certificates, /etc/grid-security/certificates. The certificate will not be verified. If the CA certificates directory does exist, please manually specify the locations via env X509_CERT_DIR, or the cacertificatesdirectory item in client.conf Can not find XACMLPDPContextCan not find certificate file: %sCan not find certificate with name %sCan not find issuer certificate for the certificate with subject %s and hash: %luCan not find key file: %sCan not find key with name: %sCan not find queue '%s' in the configuration fileCan not find voms service configuration file (vomses) in default locations: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomsesCan not generate X509 requestCan not generate policy objectCan not get SAMLAssertion SecAttr from message contextCan not get extended KeyUsage extension from issuer certificateCan not get policy from PROXY_CERT_INFO_EXTENSION extensionCan not get policy language from PROXY_CERT_INFO_EXTENSION extensionCan not get the certificate typeCan not get the delegation credential: %s from delegation service: %sCan not get the delegation credential: %s from delegation service:%sCan not get the issuer's private keyCan not load ARC evaluator object: %sCan not load ARC request object: %sCan not load policy objectCan not load policy object: %sCan not load request objectCan not open job description file: %sCan not open key file %sCan not parse classname for AttributeFactory from configurationCan not parse classname for CombiningAlgorithmFactory from configurationCan not parse classname for FunctionFactory from configurationCan not parse classname for Policy from configurationCan not parse classname for Request from configurationCan not parse date: %sCan not parse month: %sCan not parse the configuration file %sCan not parse time zone offset: %sCan not parse time: %sCan not read PEM private keyCan not read PEM private key: failed to decryptCan not read PEM private key: failed to obtain passwordCan not read PEM private key: probably bad passwordCan not read certificate file: %sCan not read certificate stringCan not read certificate/key stringCan not read information from the local job status fileCan not read key stringCan not set CN in proxy certificateCan not set issuer's subject for proxy certificateCan not set private keyCan not set pubkey for proxy certificateCan not set readable file for request BIOCan not set serial number for proxy certificateCan not set the lifetime for proxy certificateCan not set version number for proxy certificateCan not set writable file for request BIOCan not set writable file for signed EEC certificate BIOCan not set writable file for signed proxy certificate BIOCan not sign a EECCan't accept URL: %sCan't allocate memory for CA policy pathCan't convert DER encoded PROXYCERTINFO extension to internal formCan't convert X509 request from internal to DER encoded formatCan't create delegation contextCan't create information handle - is the ARC LDAP DMC plugin available?Can't create information handle - is the ARC ldap DMC plugin available?Can't delete directory %s: %sCan't delete file %s: %sCan't extract object's name from source urlCan't find LCAS functions in a library %sCan't find LCMAPS functions in a library %sCan't get policy from PROXYCERTINFO extensionCan't get policy language from PROXYCERTINFO extensionCan't get the first byte of input BIO to get its formatCan't get the first byte of input to determine its formatCan't handle URL %sCan't handle location %sCan't interpret configuration file %s as XMLCan't interpret configuration file as XMLCan't load LCAS library %s: %sCan't load LCMAPS library %s: %sCan't load plugin %s for access point %sCan't obtain configurationCan't open configuration fileCan't parse access rights in configuration lineCan't parse configuration lineCan't parse create arguments in configuration lineCan't parse host and/or port in response to EPSV/PASVCan't parse mkdir arguments in configuration lineCan't parse or:and in configuration lineCan't parse user:group in configuration lineCan't read configuration fileCan't read configuration file at %sCan't read from sourceCan't read job local descriptionCan't read list of destinations from file %sCan't read list of input filesCan't read list of locations from file %sCan't read list of output filesCan't read list of sources from file %sCan't read policy namesCan't recognize URL: %sCan't recognize group in configuration lineCan't recognize type of configuration fileCan't recognize type of configuration file at %sCan't recognize user in configuration lineCan't remove junk filesCan't rename file %s: %sCan't reset the inputCan't resolve host %sCan't set OpenSSL verify flagsCan't stat file: %s: %sCan't stat stdio channel %sCan't use URL %sCan't write to destinationCanceling of UNICORE jobs is not supportedCancellation completeCancelling DTR %s with source: %s, destination: %sCancelling active transferCancelling job %sCancelling job: %sCancelling synchronization requestCannot change owner of %s: %s Cannot change permission of %s: %s Cannot compare empty checksumCannot convert ARC module name to Python stringCannot convert ExecutionTarget (%s) to python objectCannot convert JobDescription to python objectCannot convert UserConfig to Python objectCannot convert config to Python objectCannot convert doc to Python objectCannot convert inmsg to Python objectCannot convert module name to Python stringCannot convert outmsg to Python objectCannot convert string %s to int in line %sCannot copy example configuration (%s), it is not a regular fileCannot create ExecutionTarget argumentCannot create JobDescription argumentCannot create UserConfig argumentCannot create argument of the constructorCannot create config argumentCannot create directories for log file %s. Messages will be logged to this logCannot create directory %s for per-job hard linksCannot create doc argumentCannot create http payloadCannot create inmsg argumentCannot create instance of Python classCannot create outmsg argumentCannot create output of %s for any jobsCannot create output of %s for job (%s): Invalid source %sCannot create resolver from /etc/resolv.confCannot determine hostname from gethostname()Cannot determine hostname from gethostname() to generate ceID automatically.Cannot determine the %s location: %sCannot find under response soap message:Cannot find ARC Config classCannot find ARC ExecutionTarget classCannot find ARC JobDescription classCannot find ARC Message classCannot find ARC UserConfig classCannot find ARC XMLNode classCannot find MCC_Status objectCannot find any proxy. This application currently cannot run without a proxy. If you have the proxy file in a non-default location, please make sure the path is specified in the client configuration file. If you don't have a proxy yet, please run 'arcproxy'!Cannot find content under response soap messageCannot find custom broker classCannot find file at %s for getting the proxy. Please make sure this file exists.Cannot find local input file '%s' (%s)Cannot find service classCannot find the CA certificates directory path, please set environment variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file.Cannot find the path of the proxy file, please setup environment X509_USER_PROXY, or proxypath in a configuration fileCannot find the user certificate path, please setup environment X509_USER_CERT, or certificatepath in a configuration fileCannot find the user private key path, please setup environment X509_USER_KEY, or keypath in a configuration fileCannot get VOMS server %s information from the vomses filesCannot get VOMS server address information from vomses line: "%s"Cannot get any AC or attributes info from VOMS server: %s; Returned message from VOMS server: %s Cannot get dictionary of ARC moduleCannot get dictionary of custom broker moduleCannot get dictionary of moduleCannot handle URL %sCannot handle local user %sCannot import ARC moduleCannot import moduleCannot initialize ARCHERY domain name for queryCannot initialize winsock libraryCannot link to a remote destination. Will not use mapped URLCannot link to source which can be modified, will copy insteadCannot open BLAH log file '%s'Cannot open cache log file %s: %s. Cache cleaning messages will be logged to this logCannot output XRSL representation: The Resources.SlotRequirement.NumberOfSlots attribute must be specified when the Resources.SlotRequirement.SlotsPerHost attribute is specified.Cannot parse integer value '%s' for -%cCannot parse password source %s it must be of source_type or source_type:data format. Supported source types are int,stdin,stream,file.Cannot parse password source expression %s it must be of type=source formatCannot parse password source type %s. Supported source types are int,stdin,stream,file.Cannot parse password type %s. Currently supported values are 'key','myproxy','myproxynew' and 'all'.Cannot parse schema!Cannot parse service endpoint TXT records.Cannot query service endpoint TXT records from DNSCannot read specified jobid file: %sCannot remove proxy file at %sCannot remove proxy file at %s, because it's not thereCannot rename to or from root directoryCannot rename to the same URLCannot stat local input file '%s'Cannot switch to group (%s)Cannot switch to primary group for user (%s)Cannot switch to user (%s)Cannot upload two different files %s and %s to same LFN: %sCannot use multiple session directories and remotegmdirs at the same timeCannot use supplied --size optionCannot write job IDs to file (%s)Cannot write jobid (%s) to file (%s)Cannot write jobids to file (%s)Cant retrieve job files for job (%s) - unable to determine URL of stage out directoryCapabilities:Catting %s for job %sCause of failure unclear - choosing randomlyCentral configuration file is missing at guessed location: /etc/arc.conf Use ARC_CONFIG variable for non-standard locationCert Type: %dCertificate %s already expiredCertificate %s will expire in %sCertificate and key ('%s' and '%s') not found in any of the paths: %sCertificate does not have a slotCertificate format is DERCertificate format is PEMCertificate format is PKCSCertificate format is unknownCertificate has unknown extension with numeric ID %u and SN %sCertificate information collection failedCertificate information:Certificate issuer: %sCertificate request is invalidCertificate to use is: %sCertificate verification error: %sCertificate verification failedCertificate verification succeededCertificate with serial number %s and subject "%s" is revokedCertificate with subject %s has expiredCertificate/Proxy path is emptyCertificate: %sCertiticate chain number %dChain(s) configuration failedChallenge: %sChangeActivityStatus: EPR contains no JobIDChangeActivityStatus: Failed to accept delegationChangeActivityStatus: Failed to resume jobChangeActivityStatus: Failed to update credentialsChangeActivityStatus: State change not allowed: from %s/%s to %s/%sChangeActivityStatus: missing NewStatus elementChangeActivityStatus: no ActivityIdentifier foundChangeActivityStatus: no job found: %sChangeActivityStatus: old A-REX state does not matchChangeActivityStatus: old BES state does not matchChangeActivityStatus: request = %sChangeActivityStatus: response = %sCheckCheck: looking for metadata: %sCheck: obtained access latency: high (NEARLINE)Check: obtained access latency: low (ONLINE)Check: obtained checksum: %sCheck: obtained modification date: %sCheck: obtained modification time %sCheck: obtained size %lluCheck: obtained size: %lliChecking %sChecking URL returned by SRM: %sChecking a match for '%s'Checking cache againChecking cache permissions: DN: %sChecking cache permissions: VO: %sChecking cache permissions: VOMS attr: %sChecking file %sChecking for existence of %sChecking for suspended endpoints which should be started.Checking replica %sChecking source file is presentChecking user uploadable file: %sChecksum mismatchChecksum mismatch between calcuated checksum %s and source checksum %sChecksum mismatch between calculated checksum %s and checksum reported by server %sChecksum mismatch between calculated checksum %s and source checksum %sChecksum mismatch between calculated/supplied checksum (%s) and checksum reported by SRM destination (%s)Checksum mismatch between checksum given as meta option (%s:%s) and calculated checksum (%s)Checksum not computedChecksum type of SRM (%s) and calculated/supplied checksum (%s) differ, cannot compareChecksum type of source and calculated checksum differ, cannot compareChecksum type returned by server is different to requested type, cannot compareChild exitedChosen ISIS for communication: %sClass name: %sCleaning failedCleaning job %sCleaning job: %sCleaning of BES jobs is not supportedCleaning of UNICORE jobs is not supportedCleaning up after failure: deleting %sClient chain does not have entry pointClient connection has no entry pointClient connection has no entry point.Client side MCCs are loadedClient version: nordugrid-arc-%sClosed connectionClosed successfullyClosing channel (list)Closing channel (retrieve)Closing channel (retrieve) due to local read error :%sClosing channel (retrieve) due to local read error: %sClosing channel (store)Closing channel (store) due to error: %sClosing connectionClosing may have failedClosing read channelClosing write channelCollected error is: %sCollecting EMI-ES GLUE2 computing info endpoint information.Collecting Job (A-REX jobs) information.Command ABORCommand ALLO %iCommand CDUPCommand CWD %sCommand DCAU: %i '%s'Command DELE %sCommand EPRTCommand EPSV %sCommand ERET %sCommand LIST %sCommand MDTM %sCommand MKD %sCommand MLSD %sCommand MLST %sCommand MODE %cCommand NLST %sCommand NOOPCommand OPTSCommand OPTS RETRCommand PASVCommand PBZS: %sCommand PORTCommand PROT: %sCommand QUITCommand REST %sCommand RETR %sCommand RMD %sCommand SBUF: %iCommand SIZE %sCommand SPASCommand STOR %sCommand TYPE %cCommand USER %sCommand for authPlugin is missingCommand for localCred is missingCommand in helperUtility is missingCommand is being sentCommand: %sComponent %s(%s) could not be createdComponent has no ID attribute definedComponent has no name attribute definedComponent's %s(%s) next has no ID attribute definedComputing service:Computing service: %sComputingShare (%s) does not match selected queue (%s)ComputingShare (%s) explicitly rejectedComputingShareName of ExecutionTarget (%s) is not definedConfig class is not an objectConfiguration (%s) loadedConfiguration errorConfiguration error. Missing mandatory "Endpoint" element.Configuration error. Missing mandatory "Expiration" element.Configuration error. Missing mandatory "Period" element.Configuration error. Retry: "%s" is not a valid value. Default value will be used.Configuration example file created (%s)Configuration file can not be readConfiguration file is broken - block name is too short: %sConfiguration file not specifiedConfiguration file not specified in ConfigBlockConfiguration root element is not Configuration section [vo] is missing name. Check for presence of name= or vo= option.Connect: Authentication timed out after %d msConnect: Connecting timed out after %d msConnect: Failed authentication: %sConnect: Failed to connect: %sConnect: Failed to init auth info handle: %sConnect: Failed to init handle: %sConnecting to %s:%iConnecting to Delivery service at %sConnection from %s: %sConnection to the ISIS (%s) is success and get the list of ISIS.Contacting VOMS server (named %s): %s on port: %sControl connection (probably) closedControl element must be presentConversion failed: %sConversion mode is set to CREAMConversion mode is set to DIRECTConversion mode is set to EMIConversion mode is set to SUBJECTConverting to CREAM action - namespace: %s, operation: %sCopy failed: %sCopying with dlcloseCould not acquire lock on meta file %sCould not connect to service %s: %sCould not convert incoming payload!Could not convert payload!Could not convert the slcs attribute value (%s) to an URL instance in configuration file (%s)Could not create PayloadSOAP!Could not create link to lock file %s as it already existsCould not create lock file %s as it already existsCould not create temporary file "%s"Could not create temporary file: %sCould not determine configuration type or configuration is emptyCould not determine hostname from gethostname()Could not determine session directory from filename %sCould not determine version of serverCould not find any useable delivery service, forcing local transferCould not find loadable module by name %s (%s)Could not find loadable module by names %s and %s (%s)Could not find loadable module descriptor by name %sCould not find loadable module descriptor by names %s and %sCould not handle endpoint %sCould not load configuration (%s)Could not locate module %s in following paths:Could not make new transfer request: %s: %sCould not obtain information about source: %sCould not open LDAP connection to %sCould not open file %s for reading: %sCould not open log directory "%s": %sCould not open output directory "%s": %sCould not read archive file %s for job log file %s (%s), generating new Usage RecordCould not resolve original source of %s: %sCould not resolve original source of %s: out of timeCould not set LDAP network timeout (%s)Could not set LDAP protocol version (%s)Could not set LDAP timelimit (%s)Could not stat file %s: %sCould not validate message!Couldn't handle certificate: %sCouldn't parse benchmark XML: %sCouldn't verify availability of CRLCountry: %sCreate RegEntry XML elementCreateActivity finished successfullyCreateActivity: Failed to accept delegationCreateActivity: Failed to create new jobCreateActivity: Failed to create new job: %sCreateActivity: max jobs total limit reachedCreateActivity: no job description foundCreateActivity: request = %sCreateActivity: response = %sCreated RSA key, proceeding with requestCreating a CREAM clientCreating a UNICORE clientCreating a client to Argus PDP serviceCreating a delegation soap clientCreating a http clientCreating a pdpservice clientCreating a soap clientCreating an A-REX clientCreating an EMI ES clientCreating and sending ISIS information query request to %sCreating and sending a service status requestCreating and sending a start job requestCreating and sending a status requestCreating and sending an index service queryCreating and sending clean request to %sCreating and sending job clean request to %sCreating and sending job description retrieval request to %sCreating and sending job information query request to %sCreating and sending job list request to %sCreating and sending job migrate request to %sCreating and sending job notify request to %sCreating and sending job register requestCreating and sending job restart request to %sCreating and sending job resume request to %sCreating and sending job start requestCreating and sending job submit request to %sCreating and sending job suspend request to %sCreating and sending notify request to %sCreating and sending requestCreating and sending request to clean a jobCreating and sending request to list jobsCreating and sending request to resume a jobCreating and sending request to terminate a jobCreating and sending service information query request to %sCreating and sending service information request to %sCreating and sending submit request to %sCreating and sending terminate request to %sCreating buffer: %lli x %iCreating client interfaceCreating client side chainCreating delegationCreating delegation credential to ARC delegation serviceCreating delegation to CREAM delegation failedCreating delegation to CREAM delegation serviceCreating delegation to CREAM delegation service failedCreating directory %sCreating directory: %sCreating service side chainCredential expires at %sCredential handling exception: %sCredential is not initializedCredentials stored in temporary file %sCritical VOMS attribute processing failedCritical error for uploadable file %sCurrent job's VO name: %sCurrent jobs in system (PREPARING to FINISHING) per-DN (%i entries)Current transfer FAILED: %sCurrent transfer completeDB_OLD_VERSION: The database cannot be opened without being first upgraded.DCAU failedDCAU failed: %sDN %s doesn't match %sDN %s is cached and is valid until %s for URL %sDN %s is cached but has expired for URL %sDN is %sDTR %s cancelledDTR %s could not be cancelledDTR %s failed: %sDTR %s finished successfullyDTR %s finished with state %sDTR %s requested cancel but no active transferDTR %s still in progress (%lluB transferred)DTR %s was already cancelledDTR is ready for transfer, moving to delivery queueDTRGenerator is not running!DTRs still running for job %sDaemonization fork failed: %sData channel (retrieve) %i %i %iData channel (store) %i %i %iData channel connected (list)Data channel connected (retrieve)Data channel connected (store)Data channel: %d.%d.%d.%d:%dData channel: [%s]:%dData delivery loop exitedData transfer abortedData transfer aborted: %sData was already cachedDataDelivery log tail: %sDataDelivery: %sDataMove::Transfer: no checksum calculation for %sDataMove::Transfer: using supplied checksum %s:%sDataMove::Transfer: will calculate %s checksumDataMover: cycleDataMover: destination out of tries - exitDataMover: no retries requested - exitDataMover: source out of tries - exitDataMover::Transfer : starting new threadDataMover::Transfer: trying to destroy/overwrite destination: %sDataPointGFAL::write_file got position %d and offset %d, has to seekDataPointXrootd::write_file got position %d and offset %d, has to seekDataStagingDelivery exited with code %iDeactivating modulesDefault CPU time: %sDefault Storage Service: %sDefault broker (%s) is not available. When using %s a broker should be specified explicitly (-b option).Default wall-time: %sDefault: %sDelegatable credentials expired: %sDelegate proxy failedDelegateCredentialsInit failedDelegateProxy failedDelegated credential from delegation service: %sDelegated credential identity: %sDelegation ID: %sDelegation authorization failedDelegation authorization passedDelegation getProxyReq request failedDelegation handler is not configuredDelegation handler with delegatee role endsDelegation handler with delegatee role starts to processDelegation handler with delegator role starts to processDelegation putProxy request failedDelegation role not supported: %sDelegation service: %sDelegation to ARC delegation service failedDelegation to gridsite delegation service failedDelegation type not supported: %sDelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - %sDelegationStore: PeriodicCheckConsumers failed to resume iteratorDelegationStore: TouchConsumer failed to create file %sDelete errorDeleted %sDeleted but still have locations at %sDelivery received new DTR %s with source: %s, destination: %sDelivery service at %s can copy from %sDelivery service at %s can copy to %sDestination URL missingDestination URL not supported: %sDestination URL not valid: %sDestination file is in cacheDestination is invalid URLDestination is not index service, skipping replica registrationDestination is not ready, will wait %u secondsDestination: %sDestroy JVMDestroying handleDestructor with dlclose (%s)Dir %s allowed at service %sDirectory %s removed successfullyDirectory listing failedDirectory of trusted CAs is not specified/found; Using current path as the CA direcrotyDirectory size is larger than %i files, will have to call multiple timesDirectory size is too large to list in one call, will have to call multiple timesDirectory: %sDiscarding Registrar because the "URL" element is missing or empty.Disconnect: Abort timed out after %d msDisconnect: Closing timed out after %d msDisconnect: Data close timed out after %d msDisconnect: Failed aborting - ignoring: %sDisconnect: Failed closing - ignoring: %sDisconnect: Failed destroying handle: %s. Can't handle such situation.Disconnect: Failed quitting - ignoring: %sDisconnect: Quitting timed out after %d msDisconnect: globus handle is stuck.Disconnect: handle destroyed.Disconnect: waiting for globus handle to settleDo sorting using user created python brokerDoes not sending empty aggregation/synch message.Doesn't support advance reservationsDoesn't support bulk SubmissionDoesn't support preemptionDoing CREAM requestDoing EMI requestDownloaded %sDownloaded file %sDownloader startedDownloading job: %sDowntime ends: %sDowntime starts: %sDumping job description aborted: Unable to load broker %sDuplicate replica found in LFC: %sEACCES Error opening lock file %s: %sEEXIST: DB_CREATE and DB_EXCL were specified and the database exists.EINVALEMI request generation failed: %sEMIES:CancelActivity: job %s - %sEMIES:CreateActivity finished successfullyEMIES:CreateActivity: max jobs total limit reachedEMIES:CreateActivity: no job description foundEMIES:CreateActivity: request = %sEMIES:CreateActivity: response = %sEMIES:CreateActivity: too many activity descriptionsEMIES:GetActivityInfo: job %s - failed to retrieve GLUE2 informationEMIES:GetActivityStatus: job %s - %sEMIES:NotifyService: job %s - %sEMIES:PauseActivity: job %s - %sEMIES:RestartActivity: job %s - %sEMIES:ResumeActivity: job %s - %sEMIES:WipeActivity: job %s - %sEMIRegistry (%s) is not available.ENOENT: The file or directory does not exist, Or a nonexistent re_source file was specified.EPSV failedEPSV failed: %sERROR: Dumping job description aborted because no suitable resources were found for the test-jobERROR: Failed to retrieve informationERROR: Failed to retrieve information from the following endpoints:ERROR: Failed to write job information to file (%s)ERROR: Job submission aborted because no resource returned any informationERROR: One or multiple job descriptions was not submitted.ERROR: Test aborted because no suitable resources were found for the test-jobERROR: Unable to load broker %sERROR: VOMS configuration file %s contains too long line(s). Max supported length is %i characters.ERROR: VOMS configuration file %s contains too many lines. Max supported number is %i.ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. Line was: %sERROR: failed to read file %s while scanning VOMS configuration.ERROR: file tree is too deep while scanning VOMS configuration. Max allowed nesting is %i.ERROR:%sES:CreateActivity: Failed to create new job: %sEchoService (python) 'Process' calledEchoService (python) constructor calledEchoService (python) destructor calledEchoService (python) got: %s EchoService (python) has prefix %(prefix)s and suffix %(suffix)sEchoService (python) request_namespace: %sEchoService (python) thread test startingEchoService (python) thread test, iteration %(iteration)s %(status)sElement "%s" in the profile ignored: the "inidefaultvalue" attribute cannot be specified when the "inisections" and "initag" attributes have not been specified.Element "%s" in the profile ignored: the value of the "inisections" attribute cannot be the empty string.Element "%s" in the profile ignored: the value of the "initag" attribute cannot be the empty string.Element "%s" in the profile ignored: the value of the "initype" attribute cannot be the empty string.Element validation according to GLUE2 schema failed: %sEmpty filename returned from FileCacheEmpty input payload!Empty job description source stringEmpty payload!Empty registration collectorEmpty responseEmpty stringEncrypted: %sEnd of comment not foundEnd of double quoted string not foundEnd of single quoted string not foundEnd of user delimiter (%s) quoted string not foundEndpoint Information:Entry in EGIIS is missing one or more of the attributes 'Mds-Service-type', 'Mds-Service-hn', 'Mds-Service-port' and/or 'Mds-Service-Ldap-suffix'Error accessing cache file %s: %sError accessing file %sError adding communication interface in %s. Maybe another instance of A-REX is already running.Error adding communication interface in %s. Maybe permissions are not suitable.Error creating cacheError creating cache. Stale locks may remain.Error creating directory %s: %sError creating lock file %s: %sError creating required directories for %sError creating required dirs: %sError creating temporary file %s: %sError detected while parsing this ACError due to expiration of provided credentialsError during %s to %s EMIRegistryError during file validation. Can't stat file %s: %sError during file validation: Local file size %llu does not match source file size %llu for file %sError during registration to %s ISISError encoutered during job ID retrieval. All job IDs might not have been retrievedError evaluating profileError evaulating profileError from BDB: %sError from BDB: %s: %sError from SQLite: %sError from SQLite: %s: %sError getting info from statvfs for the path %s: %sError getting list of files (in list)Error in cache processing, will retry without cachingError in caching procedureError in lock file %s, even though linking did not return an errorError initialising X509 storeError initiating delegation database in %s. Maybe permissions are not suitable. Returned error is: %s.Error linking cache file to %s.Error linking tmp file %s to lock file %s: %sError listing lock file %s: %sError loading generated configurationError looking up attributes of cache meta file %s: %sError looking up space tokens matching description %sError number in store context: %iError opening lock file %s in initial check: %sError opening meta file %sError opening meta file for writing %sError parsing the internally set executables attribute.Error pinging delivery service at %s: %s: %sError reading file %sError reading info from file %s:%sError reading lock file %s: %sError reading log directory "%s": %sError reading meta file %s: %sError reading user generated output file list in %sError registering replica, moving to end of data stagingError removing cache file %s: %sError switching uidError to flush output payloadError when extracting public key from requestError when loading the extension config file: %sError when loading the extension config file: %s on line: %dError while reading dir %s: %sError while reading fileError with cache configurationError with cache configuration: %sError with formatting in lock file %sError with post-transfer destination handling: %sError with source file, moving to next replicaError writing raw certificateError writing srm info file %sError writing to lock file %s: %sError: Service returned a limit higher or equal to current limit (current: %d; returned: %d)Error: can't open policy file: %sError: duplicate file in list of input files: %sError: failed to set handler for SIGCHLDError: failed to set handler for SIGTERMError: no LDAP query started to %sError: policy location: %s is not a regular fileErrorDescriptionEstimated average waiting time: %sEstimated worst waiting time: %sEvaluator does not support loadable Combining AlgorithmsEvaluator does not support specified Combining Algorithm - %sEvaluator for ArcPDP was not loadedEvaluator for GACLPDP was not loadedEvaluator for XACMLPDP was not loadedExample configuration (%s) not created.Excessive data received while checking file accessExcluding replica %s matching pattern !%sExecutable path not specified ('executable' attribute)Execution Target on Computing Service: %sExecution environment does not support inbound connectionsExecution environment does not support outbound connectionsExecution environment is a physical machineExecution environment is a virtual machineExecution environment supports inbound connectionsExecution environment supports outbound connectionsExecutionTarget class is not an objectExitingExiting Generator threadExiting jobs processing threadExpiration time: %d secondsExtracted nickname %s from credentials to use for RUCIO_ACCOUNTExtractor[%s] (%s): %s = %sExtractor[%s] (%s): %s contains %sFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFQAN '%s' IS NOT a match to '%s'FQAN '%s' IS a match to '%s'Failed adapting job description to target resourcesFailed allocating memory for handleFailed authenticatingFailed authenticating: %sFailed canceling job: %sFailed checking database (%s)Failed checking source replicaFailed checking source replica %s: %sFailed checking source replica: %sFailed cleaning job: %sFailed cleaning up destination %sFailed configuration initializationFailed configuration initialization.Failed connecting to server %s:%dFailed creating signed delegation certificateFailed creating singed delegation certificateFailed destroying handle: %s. Can't handle such situation.Failed downloading %s to %sFailed downloading file %s - %sFailed in globus_cond_initFailed in globus_ftp_control_handle_initFailed in globus_mutex_initFailed initiate client connection.Failed linking cache file to %sFailed locating credentialsFailed locating credentials.Failed looking up attributes of cached file: %sFailed preparing job descriptionFailed preparing job description to target resourcesFailed processing authorization group %sFailed processing grid-manager configurationFailed processing user mapping command: unixgroup %sFailed processing user mapping command: unixmap %sFailed processing user mapping command: unixvo %sFailed putting signed delegation certificate to serviceFailed reading configurationFailed reading control directory: %sFailed reading control directory: %s: %sFailed reading dataFailed reading list of filesFailed reading local informationFailed registering jobFailed resuming job: %sFailed retrieving information for job: %sFailed retrieving job IDsFailed retrieving job IDs: Unsupported url (%s) givenFailed retrieving job description for job: %sFailed retrieving job status informationFailed running mailerFailed sending CWD command for credentials renewalFailed sending CWD command for job cancellingFailed sending CWD command for job cleaningFailed sending DELE command for job cancellingFailed sending RMD command for job cleaningFailed setting file owner: %sFailed signing certificate requestFailed starting jobFailed to %s to EMIRegistry (%s) - %dFailed to abort data connection - ignoring and recoveringFailed to abort transfer of ftp file: %sFailed to accept SSL connectionFailed to accept connection requestFailed to accept delegationFailed to accept new file/destinationFailed to access proxy of given job id %s at %sFailed to acquire delegation contextFailed to acquire grid-manager's configurationFailed to acquire lock on cache meta file %sFailed to acquire lock on file %sFailed to acquire source: %sFailed to add Independent OIDFailed to add RFC proxy OIDFailed to add VOMS AC extension. Your proxy may be incomplete.Failed to add VOMS AC sequence OIDFailed to add anyLanguage OIDFailed to add certificate and keyFailed to add certificate to token or databaseFailed to add extension into credential extensionsFailed to add extension: %sFailed to add inheritAll OIDFailed to add issuer's extension into proxyFailed to add key usage extensionFailed to add proxy certificate information extensionFailed to add voms AC extensionFailed to allocate certificate trustFailed to allocate item for certificate dataFailed to allocate memory for bufferFailed to allocate memory for certificate subject while matching policy.Failed to allocate p12 contextFailed to apply local address to data connectionFailed to authenticate SAML Token inside the incoming SOAPFailed to authenticate Username Token inside the incoming SOAPFailed to authenticate X509 Token inside the incoming SOAPFailed to authenticate to PKCS11 slot %sFailed to authenticate to key databaseFailed to authenticate to token %sFailed to authenticate to token %s.Failed to bind socket for %s:%s(%s): %sFailed to bind socket for TCP port %s(%s): %sFailed to bind socket(%s): %sFailed to bind to ldap server (%s)Failed to call PORT_NewArenaFailed to cancel transfer request: %sFailed to cancel: %sFailed to cancel: No SOAP responseFailed to cast PayloadSOAP from incoming payloadFailed to cast PayloadSOAP from outgoing payloadFailed to change owner of symbolic link %s to %iFailed to change owner of temp proxy at %s to %i:%i: %sFailed to change permissions on %s: %sFailed to change permissions or set owner of hard link %s: %sFailed to check %sFailed to clean up file %s: %sFailed to close, deleting clientFailed to complete writing to destinationFailed to connect for credential renewalFailed to connect for job cancellingFailed to connect for job cleaningFailed to connect to %s(%s):%iFailed to connect to %s(%s):%i - %sFailed to connect to server %s:%dFailed to contact PDP server: %sFailed to convert ASCII to DERFailed to convert EVP_PKEY to PKCS8Failed to convert GSI credential to GSS credential (major: %d, minor: %d)Failed to convert PrivateKeyInfo to EVP_PKEYFailed to convert security information to ARC policyFailed to convert security information to ARC requestFailed to convert security information to XACML requestFailed to copy file %s to %s: %sFailed to create DTR dump threadFailed to create OpenSSL object %s %s - %u %sFailed to create SOAP containersFailed to create WSRP requestFailed to create X509 certificate with NSSFailed to create XMLNode containerFailed to create any cache directories for %sFailed to create archive directory %s: %sFailed to create cache directory for file %s: %sFailed to create cache meta file %sFailed to create certificate requestFailed to create control directory %sFailed to create directoryFailed to create directory %sFailed to create directory %s! Skipping job.Failed to create directory %s: %sFailed to create export contextFailed to create file %s: %sFailed to create file in %sFailed to create hard link from %s to %s: %sFailed to create input SOAP containerFailed to create key or certificate safeFailed to create ldap bind thread (%s)Failed to create link: %s. Will not use mapped URLFailed to create path lengthFailed to create policy languageFailed to create session directory %sFailed to create socket for connecting to %s(%s):%d - %sFailed to create socket for for listening at %s:%s(%s): %sFailed to create socket for for listening at TCP port %s(%s): %sFailed to create socket(%s): %sFailed to create subject nameFailed to create symbolic link from %s to %s: %sFailed to create temp proxy at %s: %sFailed to create threadFailed to create/open file %s: %sFailed to decode trust stringFailed to delegate credentials to server - %sFailed to delegate credentials to server - no delegation interface foundFailed to delete %sFailed to delete %s but will still try to copyFailed to delete bad copy of remote cache file %s at %s: %sFailed to delete certificateFailed to delete delivery object or deletion timed outFailed to delete destination, retry may failFailed to delete file %s:%sFailed to delete logical fileFailed to delete meta-informationFailed to delete physical fileFailed to delete private keyFailed to delete private key and certificateFailed to delete private key that attaches to certificate: %sFailed to delete replica %s: %sFailed to delete stale cache file %s: %sFailed to delete stale remote cache file %s: %sFailed to disconnect after credentials renewalFailed to disconnect after job cancellingFailed to disconnect after job cleaningFailed to download %sFailed to download (but may be retried) %sFailed to duplicate X509 structureFailed to duplicate extensionFailed to enable IPv6Failed to enable IPv6: %sFailed to encode PKCS12Failed to encode certificateFailed to encode the certificate request with DER formatFailed to establish SSL connectionFailed to establish connection: %sFailed to export X509 certificate from NSS DBFailed to export private keyFailed to extract VOMS nickname from proxyFailed to extract credential informationFailed to finalize reading from sourceFailed to finalize writing to destinationFailed to find CA certificatesFailed to find certificate and/or private key or files have improper permissions or ownership.Failed to find certificates by nickname: %sFailed to find delegation credentials in client configurationFailed to find extensionFailed to find issuer certificate for proxy certificateFailed to find metadata info on %s for determining file or directory deleteFailed to generate SAML Token for outgoing SOAPFailed to generate Username Token for outgoing SOAPFailed to generate X509 Token for outgoing SOAPFailed to generate X509 request with NSSFailed to generate public/private key pairFailed to get DN information from .local file for job %sFailed to get TCP socket options for connection to %s(%s):%d - timeout won't work - %sFailed to get certificate from certificate fileFailed to get credentialFailed to get ftp fileFailed to get initiate GFAL2 parameter handle: %sFailed to get initiate new GFAL2 context: %sFailed to get load average: %sFailed to get private keyFailed to get public keyFailed to get public key from RSA objectFailed to get public key from X509 objectFailed to get results from LDAP server %sFailed to import X509 certificate into NSS DBFailed to import certificate from file: %sFailed to import private keyFailed to import private key from file: %sFailed to initialize LCASFailed to initialize LCMAPSFailed to initialize OpenSSL libraryFailed to initialize PKCS12 file: %sFailed to initialize X509 structureFailed to initialize extensions member for CredentialFailed to initialize main Python threadFailed to initialize the credential configurationFailed to initiate cacheFailed to initiate client connectionFailed to initiate delegationFailed to initiate delegation credentialsFailed to initiate file transfer: %s - %sFailed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same portFailed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at same portFailed to limit socket to IPv6: %sFailed to listen at %s:%s(%s): %sFailed to listen at TCP port %s(%s): %sFailed to listen on socket(%s): %sFailed to load client configurationFailed to load extension section: %sFailed to load policy evaluator for policy of job %sFailed to load private keyFailed to load service configurationFailed to load service configuration from any default config fileFailed to load service configuration from file %sFailed to load service side MCCsFailed to lock arccredential library in memoryFailed to lock arccrypto library in memoryFailed to lock delegated credentials: %sFailed to make symbolic link %s to %s : %sFailed to move %s to %s: %sFailed to move file %s to %sFailed to new arenaFailed to notify serviceFailed to obtain OpenSSL identifier for %sFailed to obtain SOAP responseFailed to obtain bytes transferred: %sFailed to obtain delegation locks for cleaning orphaned locksFailed to obtain information about fileFailed to obtain listing from FTP: %sFailed to obtain local address for %s:%s - %sFailed to obtain local address for port %s - %sFailed to obtain local address: %sFailed to obtain lock on cache file %sFailed to obtain own address: %sFailed to obtain resource description: %sFailed to obtain stat from FTP: %sFailed to obtain state of jobFailed to obtain valid stagein URL for input filesFailed to open %s for reading: %sFailed to open %s, trying to create parent directoriesFailed to open data channelFailed to open directory %s: %sFailed to open file %sFailed to open heartbeat file %sFailed to open input certificate file %sFailed to open log file %sFailed to open log file: %sFailed to open pk12 fileFailed to open stdio channel %dFailed to open stdio channel %sFailed to output the certificate request as ASCII formatFailed to output the certificate request as DER formatFailed to parse ACIX response: %sFailed to parse HTTP headerFailed to parse Rucio response: %sFailed to parse SAML Token from incoming SOAPFailed to parse Username Token from incoming SOAPFailed to parse VOMS command: %sFailed to parse X509 Token from incoming SOAPFailed to parse certificate request from CSR file %sFailed to parse configuration file %sFailed to parse remote addres %sFailed to parse requested VOMS lifetime: %sFailed to parse requested VOMS server port number: %sFailed to parse user policy for job %sFailed to postregister destination %sFailed to pre-clean destination: %sFailed to preallocate space for %sFailed to prepare destinationFailed to prepare destination: %sFailed to prepare job descriptionFailed to prepare job description to target resourcesFailed to prepare job description to target resources.Failed to prepare job description.Failed to prepare sourceFailed to prepare source: %sFailed to preregister destination: %sFailed to process A-REX configuration in %sFailed to process VOMS configuration or no suitable configuration lines found.Failed to process configuration in %sFailed to process security attributes in TLS MCC for incoming messageFailed to process service configurationFailed to query ACIX: %sFailed to query LDAP server %sFailed to query state: %sFailed to read attribute %x from private key.Failed to read cache meta file %sFailed to read certificate file: %sFailed to read data from input fileFailed to read file %sFailed to read input certificate fileFailed to read job's ACL for job %s from %sFailed to read job's local description for job %s from %sFailed to read object %s: %sFailed to read private key file: %sFailed to read proxy file: %sFailed to read request from a fileFailed to read request from a stringFailed to recognize own address type (IPv4 ir IPv6) - %uFailed to register any bufferFailed to register destination replica: %sFailed to register new file/destination: %sFailed to register plugin for state %sFailed to register to ISIS (%s) - %sFailed to release completed requestFailed to release lock on cache file %sFailed to release lock on file %sFailed to release lock on remote cache file %sFailed to remove .meta file %s: %sFailed to remove all physical instancesFailed to remove cache per-job dir %s: %sFailed to remove existing hard link at %s: %sFailed to remove existing symbolic link at %s: %sFailed to remove file %s: %sFailed to remove instanceFailed to remove lock on %s. Some manual intervention may be requiredFailed to remove registration from %s EMIRegistryFailed to remove registration from %s ISISFailed to remove registration from ISIS (%s) - %sFailed to remove stale lock file %s: %sFailed to remove temporary proxy %s: %sFailed to rename URLFailed to renew proxyFailed to resolve %sFailed to resolve %s (%s)Failed to resolve destination: %sFailed to resolve source: %sFailed to retrieve application data from OpenSSLFailed to retrieve link to TLS stream. Additional policy matching is skipped.Failed to retrieve private key for issuerFailed to rewrite output file list %s. Job resuming may not workFailed to run Grid Manager threadFailed to run external pluginFailed to run external plugin: %sFailed to run pluginFailed to send cancel request: %sFailed to send content of bufferFailed to send requestFailed to set GFAL2 monitor callback: %sFailed to set GFAL2 transfer timeout, will use default: %sFailed to set LFC replicas: %sFailed to set credentials for GridFTP transferFailed to set executable bit on file %sFailed to set executable bit on file %s: %sFailed to set overwrite option in GFAL2: %sFailed to set signature algorithm IDFailed to set the pubkey for X509 object by using pubkey from X509_REQFailed to set up credential delegation with %sFailed to shut down SSL: %sFailed to sign encoded certificate dataFailed to sign proxyFailed to sign the certificate requestFailed to sign the proxy certificateFailed to stage file(s)Failed to start archival threadFailed to start cache clean scriptFailed to start certificate extensionFailed to start data staging threads, exiting Grid Manager threadFailed to start listening on any address for %s:%sFailed to start listening on any address for %s:%s(IPv%s)Failed to start new DTR for %sFailed to start new threadFailed to start new thread: cache won't be cleanedFailed to start querying the endpoint on %sFailed to start querying the endpoint on %s (unable to create sub-thread)Failed to start reading from source: %sFailed to start thread for communicationFailed to start thread for listeningFailed to start timer thread - timeout won't workFailed to start transfer request: %sFailed to start writing to cacheFailed to start writing to destination: %sFailed to stat session dir %sFailed to stat source %sFailed to store application dataFailed to store ftp fileFailed to submit jobFailed to submit job description: %sFailed to submit job description: EMIESFault(%s , %s)Failed to submit job description: UnexpectedError(%s)Failed to switch user id to %d/%dFailed to terminate LCASFailed to terminate LCMAPSFailed to transfer dataFailed to unlock file %s: %s. Manual intervention may be requiredFailed to unlock file with lock %s: %sFailed to unregister pre-registered destination %s. You may need to unregister it manuallyFailed to unregister pre-registered destination %s: %s. You may need to unregister it manuallyFailed to unregister preregistered lfn, You may need to unregister it manuallyFailed to unregister preregistered lfn. You may need to unregister it manuallyFailed to unregister preregistered lfn. You may need to unregister it manually: %sFailed to upload %sFailed to upload (but may be retried) %sFailed to verify X509 Token inside the incoming SOAPFailed to verify the requestFailed to verify the signature under Failed to verify the signature under Failed to verify the signed certificateFailed to wait for job to allow stage inFailed to write 'local' informationFailed to write body to output streamFailed to write file %s: %sFailed to write header to output streamFailed to write object %s: %sFailed to write request into a fileFailed to write request into stringFailed to write signed EEC certificate into a fileFailed to write signed proxy certificate into a fileFailed to write to local job list %sFailed updating timestamp on cache lock file %s for file %s: %sFailed uploading file %s - %sFailed uploading file %s to %s: %sFailed uploading local input filesFailed uploading local input files to %sFailed while finishing reading from sourceFailed while finishing writing to destinationFailed while reading from sourceFailed while transferring dataFailed while waiting for connection requestFailed while waiting for connection to %s(%s):%i - %sFailed while writing to destinationFailed writing ACLFailed writing changed input fileFailed writing changed input file.Failed writing changed output fileFailed writing job descriptionFailed writing local descriptionFailed writing output status fileFailed writing statusFailure in parsing response from server - some information may be inaccurateFailure: %sFeature is not implementedFetching resource description from %sFile %s has wrong CRC.File %s is NEARLINE, will make request to bring onlineFile %s is already cached at %s under a different URL: %s - this file will not be cachedFile %s is already cached at %s under a different URL: %s - will not add DN to cached listFile %s is cached (%s) - checking permissionsFile %s removed successfullyFile '%s' in the 'executables' attribute is not present in the 'inputfiles' attributeFile already exists: %sFile could not be moved to Done stateFile could not be moved to Running state: %sFile delete failed, attempting directory deleteFile delete failed, attempting directory delete for %sFile download failed: %sFile is cacheable, will check cacheFile is currently being cached, will wait %isFile is not accessible %s: %sFile is not accessible: %sFile is not cacheable, skipping cache processingFile is not cacheable, was requested not to be cached or no cache available, skipping cache checkFile is ready! TURL is %sFile is smaller than %llu bytes, will use local deliveryFile type is not available, attempting file deleteFileNode: operator= (%s <- %s) %lu <- %luFilePlugin: more unload than loadFilename not returned in Rucio response: %sFiles associated with request token %s aborted successfullyFiles associated with request token %s put done successfullyFiles associated with request token %s released successfullyFileset copy to single object is not supported yetFileset registration is not supported yetFinding existing destination replicasFinishWriting: looking for metadata: %sFinishWriting: obtained checksum: %sFinished successfullyFinished, job log dir: %sFirst stage of registration to index service failedFirst value of 'inputfiles' attribute (filename) cannot be emptyFirst value of 'outputfiles' attribute (filename) cannot be emptyFor registration source must be ordinary URL and destination must be indexing serviceFor the 1st test job you also have to specify a runtime value with -r (--runtime) option.Force-checking source of cache file %sForcing re-download of file %sFork failed: %sFound %s %s (it was loaded already)Found %s in cacheFound %u service endpoints from the index service at %sFound DTR %s for file %s left in transferring state from previous runFound a registry, will query it recursively: %sFound existing token for %s in Rucio token cache with expiry time %sFound file %s in remote cache at %sFound none or multiple URLs (%s) in ACIX URL: %sFound service endpoint %s (type %s)Found started or successful endpoint (%s)Found suspended endpoint (%s)Found the following jobs:Found the following new jobs:Found unexpected empty lock file %s. Must go back to acquire()Found unfinished DTR transfers. It is possible the previous A-REX process did not shut down normallyFree slots grouped according to time limits (limit: free slots):Free slots: %iFull string not used: %sFunction : %sGACL Auth. request: %sGET: id %s path %sGenerate new X509 request!Generated EMIES target: %sGenerating %s job description outputGenerating A-REX target: %sGenerating EMIES targetsGenerating ceID prefix from hostname automaticallyGeneration Time attribute calculated from current timeGenerator startedGeneric errorGet ISIS from list of ISIS handlerGet delegated credential from delegation service: %sGet from cache: Cached file is lockedGet from cache: Error in cache configurationGet from cache: File not in cacheGet from cache: Invalid URL %sGet from cache: Looking in cache for %sGet from cache: could not access cached file: %sGet request %s is still in queue, should wait %i secondsGet: can't process file %sGet: there is no job %s - %sGetActivityDocuments: job %s - %sGetActivityDocuments: non-AREX job requestedGetActivityDocuments: request = %sGetActivityDocuments: response = %sGetActivityStatuses: job %s - %sGetActivityStatuses: job %s - can't understand EPRGetActivityStatuses: request = %sGetActivityStatuses: response = %sGetActivityStatuses: unknown verbosity level requested: %sGetFactoryAttributesDocument: request = %sGetFactoryAttributesDocument: response = %sGetISISList add this (%s) ISIS into the list.Getting VOMS AC for: %sGetting currect timestamp for BLAH parser log: %sGetting delegation credential from ARC delegation serviceGlobus connection errorGlobus error: %sGlobus handle is stuckGlobus location variable substitution is not supported anymore. Please specify path directly.Grid identity is mapped to local identity '%s'HER: %sHTTP Error: %d %sHTTP failure %u - %sHTTP with SAML2SSO invokation failedHandle is not in proper state %u/%uHave %i requests to processHave connections: %i, max: %iHead: can't process file %sHead: there is no job %s - %sHealth state info: %sHealth state: %sHealthState of ExecutionTarget (%s) is not OK (%s)Help Options:Helper process start failed: %sHelper program is missingHere is the end of the infinite calling loop.Homogeneous resourceHostname is not implemented for arc protocolID contains forbidden charactersID: %sINI config file %s does not existISIS (%s) is not available or not valid response. (%d. reconnection)ISIS (%s) is not available.Id= %s,Type= %s,Issuer= %s,Value= %sIdP return some error message: %sIdentity is %sIdentity name: %sIdentity: %sIf the proxy or certificate/key does exist, you can manually specify the locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or '%s' attributes in the client configuration file (e.g. '%s')If you specify a policy you also need to specify a policy languageIgnored incoming benchmark value: %s, Use float value!Ignoring endpoint (%s), it is already registered in retriever.Ignoring incomplete log file "%s"Ignoring job (%s), already tried and were unable to load JobControllerPluginIgnoring job (%s), the job management URL is unknownIgnoring job (%s), the job status URL is unknownIgnoring job (%s), the management interface name is unknownIgnoring job (%s), the status interface name is unknownIgnoring job (%s), unable to load JobControllerPlugin for %sIgnoring job, the job ID is emptyIllegal URL - closing ] for IPv6 address is followed by illegal token: %sIllegal URL - no closing ] for IPv6 address found: %sIllegal URL - no hostname given: %sIllegal URL - path must be absolute or empty: %sIllegal URL - path must be absolute: %sIllegal jobID specified (%s)Illegal time format: %sImmediate completion expectedImmediate completion expected: %sImmediate completion: %sImplementation name: %sImplementor: %sImproper argument for logsize '%s'Improper debug level '%s'Improper number of logs '%s'Improper size of log '%s'In the available CRL the lastUpdate field is not validIn the available CRL, the nextUpdate field is not validIn the configuration profile the 'initype' attribute on the "%s" element has a invalid value "%s".Incoming Message is not SOAPIncoming time range: %sIncompatible options --nolist and --forcelist requestedInconsistent metadataIndependent proxy - no rights grantedInfoCache object is not set upInfoRegister created with config: %sInfoRegister to be registered in Registrar %sInfoRegistrar id "%s" has been found.InfoRegistrar id "%s" was not found. New registrar createdInfoRegistrar thread waiting %d seconds for the all Registers elements creation.Information Registered without static attributes: doc: %sInformation endpointInformation item '%s' is not knownInformational document is emptyInit failedInitialised, archived job log dir: %sInitialised, job log dir: %sInitialize ISIS handlerInitialize ISIS handler succeededInitialized %u-th Python serviceInitially mapped to local group: %sInitially mapped to local user: %sInitiating delegation procedureInput is not SOAPInput is without trailer Input request from a file: Request.xmlInput request from codeInsert filter element: <%s,%s>Installed application environments:Interactive mode.Interface (%s) specified, submitting only to that interfaceInterface extensions:Interface on endpoint (%s) %s.Interface versions:Interface: %sInterfaceNameInvalid DTRInvalid DTR for source %s, destination %sInvalid EffectInvalid HTTP object can't produce resultInvalid ID: %sInvalid ISO duration format: %sInvalid JobDescription:Invalid JobDescription: %sInvalid URL '%s' for input file '%s'Invalid URL '%s' for output file '%s'Invalid URL option syntax in option '%s' for input file '%s'Invalid URL option syntax in option '%s' for output file '%s'Invalid URL option: %sInvalid URL: %sInvalid URL: '%s' in input file '%s'Invalid URL: '%s' in output file '%s'Invalid action value %sInvalid checksum in %s for %sInvalid class nameInvalid class name. The broker argument for the PythonBroker should be Filename.Class.args (args is optional), for example SampleBroker.MyBrokerInvalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' dialect, only "=" is allowedInvalid comparison operator '%s' used at 'queue' attribute, only "!=" or "=" are allowed.Invalid configuration - no allowed IP address specifiedInvalid configuration - no allowed dirs specifiedInvalid credentials, please check proxy and/or CA certificatesInvalid destination URL %sInvalid download destination path specified (%s)Invalid file size in %s for %s Invalid file: %s is too big.Invalid job descriptionInvalid lock on file %sInvalid log level. Using default Invalid nodeaccess value: %sInvalid old log level. Using default Invalid path in Get(): %sInvalid path in Set(): %sInvalid period string: %sInvalid port number in %sInvalid stage out path specified (%s)Invalid url: %sIssuer CA: %sIssuer: %sJVM startedJava object returned NULL statusJob %s does not report a resumable stateJob %s failed to renew delegation %s - %s.Job %s has no delegation associated. Can't renew such job.Job %s not foundJob %s: Some downloads failedJob %s: all files downloaded successfullyJob %s: files still downloadingJob ID argument is required.Job cancelling successfulJob cleaning successfulJob database created successfully (%s)Job deleted: %sJob description file could not be read.Job description language is not specified, unable to output description.Job description languages supported by %s:Job description to be sent to %s:Job description to be sent: %sJob description: %sJob descriptions:Job did not finished successfully. Message will not be written to BLAH log.Job download directory from user configuration file: %s Job download directory will be created in present working directory. Job download directory: %s Job failed on service sideJob has not started yet: %sJob information not found in the information system: %sJob list file (%s) doesn't existJob list file (%s) is not a regular fileJob list file cannot be created: %s is not a directoryJob list file cannot be created: The parent directory (%s) doesn't exist.Job migration aborted, no resource returned any informationJob migration aborted, unable to load broker (%s)Job migration failed for job (%s), no applicable targetsJob nr.Job plugin was not initialisedJob resubmission aborted because no resource returned any informationJob resubmission failed: Unable to load broker (%s)Job resubmission summary:Job resuming successfulJob submission summary:Job submission user: %s (%i:%i)Job submitted with jobid: %sJob termination failedJob: %sJobControllerPlugin %s could not be createdJobControllerPlugin plugin "%s" not found.JobDescription class is not an objectJobDescriptionParserPlugin %s could not be createdJobDescriptionParserPlugin plugin "%s" not found.Jobs left to query: %dJobs missing information will not be cleaned!Jobs processed: %d, deleted: %dJobs processed: %d, renewed: %dJobs processed: %d, resumed: %dJobs processed: %d, successfully killed: %dJobs processed: %d, successfully killed: %d, successfully cleaned: %dJobs processed: %d, successfully retrieved: %dJobs processed: %d, successfully retrieved: %d, successfully cleaned: %dJunk at end of RSLJunk in sessiondir commandKey %s, Cert: %s, CA: %sKey: %s, Cert: %s, Proxy: %s, CADir: %s CAPathKey: %s, Cert: %s, Proxy: %s, CADir: %s, CAFileKey: %s, cert: %sKill failedKilled with signal: Killing connection due to timeoutLCMAPS did not return any GIDLCMAPS did not return any UIDLCMAPS has getCredentialDataLCMAPS has lcmaps_runLCMAPS returned UID which has no username: %uLCMAPS returned invalid GID: %uLCMAPS returned invalid UID: %uLDAP authorization is not supportedLDAP authorization is not supported anymoreLDAP connection already open to %sLDAP query timed out: %sLDAPQuery: Getting results from %sLDAPQuery: Initializing connection to %s:%dLDAPQuery: Querying %sLIST/MLST failedLIST/MLST failed: %sLRMS is missingLanguage (%s) not recognized by any job description parsers.Last stage of registration to index service failedLatitude: %fLdap bind timeout (%s)Leaving downloader (%i)Leaving uploader (%i)Left operand for RSL concatenation does not evaluate to a literalLegacyMap: no configurations blocks definedLegacyPDP: ARC Legacy Sec Attribute not recognized.LegacyPDP: there is no ARCLEGACY Sec Attribute defined. Probably ARC Legacy Sec Handler is not configured or failed.LegacySecHandler: configuration file not specifiedLibrary : %sLine %d.%d of the attributes returned: %sLinking MCC %s(%s) to MCC (%s) under %sLinking MCC %s(%s) to Plexer (%s) under %sLinking MCC %s(%s) to Service (%s) under %sLinking Plexer %s to MCC (%s) under %sLinking Plexer %s to Plexer (%s) under %sLinking Plexer %s to Service (%s) under %sLinking local fileLinking mapped fileLinking mapped file - can't link on WindowsLinking/copying cached fileLinking/copying cached file to %sList failedList functionality is not supported for RESTful VOMS interfaceList functionality is not supported for legacy VOMS interfaceList will stat the URL %sListFiles: looking for metadata: %sListen failedListen finishedListen startedListening on %s:%s(%s)Listening on TCP port %s(%s)Listing jobs succeeded, %d jobs foundLoadable module %s contains no requested plugin %s of kind %sLoaded %sLoaded %s %sLoaded JobControllerPlugin %sLoaded JobDescriptionParserPlugin %sLoaded MCC %s(%s)Loaded Plexer %sLoaded Service %s(%s)Loaded SubmitterPlugin %sLoading %u-th Python serviceLoading Python broker (%i)Loading configuration (%s)Local destination for uploader %sLocal group %s does not existLocal job status file is requiredLocal running jobs: %iLocal source for download: %sLocal suspended jobs: %iLocal user %s does not existLocal user does not existLocal waiting jobs: %iLocation URI for file %s is invalidLocation already existsLocations are missing in destination LFC URLLock %s is owned by a different host (%s)Lock file %s doesn't existLogging UR set of %d URs.Longitude: %fLooking up URL %sLooking up source replicasLower bounded range is not supported for the 'TotalCPUCount' element.MCC %s(%s) - next %s(%s) has no targetMLSD is not supported - trying NLSTMLST is not supported - trying LISTMain Python thread is not initializedMain Python thread was not initializedMain memory size: %iMaking the decision for the queue %sMalformed ARCHERY record found (endpoint type is not defined): %sMalformed ARCHERY record found (endpoint url is not defined): %sMalformed response: missing getProxyReqReturnMapfile at %s can't be opened.Mapfile is missing at %sMapped to local group id: %iMapped to local group name: %sMapped to local id: %iMapped to running user: %sMapped user's home: %sMapped user:group (%s:%s) not foundMapping %s to %sMapping policy:Mapping queue: %sMatch vo: %sMatched nothingMatched: %s %s %s %sMatchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget.Matchmaking, Benchmark %s is not published by the ExecutionTarget.Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); JobDescription: %d MB (CacheDiskSpace)Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %sMatchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) JobDescription: %s (InBound)Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) JobDescription: %s (OutBound)Matchmaking, ExecutionTarget: %s, OperatingSystem is not definedMatchmaking, ExecutionTarget: %s, CacheTotal is not definedMatchmaking, ExecutionTarget: %s, HealthState is not definedMatchmaking, ExecutionTarget: %s, ImplementationName is not definedMatchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not definedMatchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not defined, assuming no CPU time limitMatchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU time limitMatchmaking, ExecutionTarget: %s, NetworkInfo is not definedMatchmaking, ExecutionTarget: %s, Platform is not definedMatchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not satisfiedMatchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not definedMatchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not definedMatchmaking, ExecutionTarget: %s matches job descriptionMatchmaking, ExecutionTarget: %s, ApplicationEnvironments not definedMatchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not definedMatchmaking, ExecutionTarget: %s, MaxVirtualMemory is not definedMatchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfiedMatchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), JobDescription: %d (IndividualPhysicalMemory)Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace)Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (SessionDiskSpace)Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), JobDescription: %d (IndividualPhysicalMemory)Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) JobDescription: %d (NumberOfProcesses)Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d (MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not support %s, specified in the JobDescription.Matchmaking, Platform problem, ExecutionTarget: %s (Platform) JobDescription: %s (Platform)Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the ExecutionTarget.Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) JobDescription: %d (NumberOfProcesses)Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (DiskSpace)Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s (WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)Max CPU time: %sMax disk space: %iMax memory: %iMax pre-LRMS waiting jobs: %iMax running jobs: %iMax slots per job: %iMax stage in streams: %iMax stage out streams: %iMax total jobs: %iMax total wall-time: %sMax user running jobs: %iMax virtual memory: %iMax waiting jobs: %iMax wall-time: %sMaximal inactivity time: %i sMaximum number of threads running - puting new request into queueMemory allocation errorMessage class is not an objectMessage sent to VOMS server %s is: %sMeta info of source and location do not match for %sMetadata of replica and index service differMetadata of source and destination are differentMetadata of source does not match existing destination. Use the --force option to override this.MigrateActivity finished successfullyMigrateActivity: EPR contains no JobIDMigrateActivity: Failed to accept delegationMigrateActivity: Failed to migrate new jobMigrateActivity: Failed to migrate new job: %sMigrateActivity: no job description foundMigrateActivity: request = %sMigrateActivity: response = %sMigrateActivitys: no ActivityIdentifier foundMigration XML sent to AREXJob: %sMin CPU time: %sMin wall-time: %sMinimal average speed: %llu B/sMinimal speed: %llu B/s during %i sMissing 'from' element in mapURLMissing 'to' element in mapURLMissing CA subject in Globus signing policyMissing CertificatePath element or ProxyPath element, or is missingMissing Endpoint Reference value provided by the service %sMissing Host in Connect elementMissing MetaServiceAdvertisment or Expiration values provided by the service %sMissing Port in Connect elementMissing Port in Listen elementMissing Type value provided by the service %sMissing URLMissing VO in configurationMissing argumentMissing authentication informationMissing cache root in configurationMissing cancel-%s-job - job cancellation may not workMissing capabilities in configurationMissing condition subjects in Globus signing policyMissing control directoryMissing directory in control commandMissing executableMissing file name in jobreport_logfileMissing final reply: %sMissing group in configurationMissing information in reply: %sMissing job idMissing name of LCAS libraryMissing name of LCMAPS libraryMissing option for command daemonMissing option for command logreopenMissing or empty CertificatePath elementMissing or empty CertificatePath or CACertificatesDir elementMissing or empty CertificatePath or CACertificatesDir element; will only check the signature, will not do message authenticationMissing or empty KeyPath elementMissing or empty KeyPath element, or is missingMissing or empty PasswordSource elementMissing or empty Username elementMissing parameter for option %cMissing path of credentials fileMissing reference to factory and/or module. Currently safe unloading of LDAP DMC is not supported. Report to developers.Missing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - (Grid)FTP code is disabled. Report to developers.Missing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to developers.Missing required Usage Record element "RecordIdentity", in job log file %sMissing required element "CpuDuration" in job log file %sMissing required element "Status" in job log file %sMissing role in configurationMissing scan-%s-job - may miss when job finished executingMissing schema! Skipping validation...Missing security object in messageMissing service IDMissing service document provided by the service %sMissing session directoryMissing subject nameMissing submit-%s-job - job submission to LRMS may not workModule %s failed to reload (%s)Module %s is not an ARC plugin (%s)Module Manager InitModule Manager Init by ModuleManager::setCfgModule name: %sMount point %sMoving to end of data stagingMulti-request job description not allowed in GRIDMANAGER dialectMulti-request operator only allowed at top levelMultiple %s attributes in configuration file (%s)Multiple '%s' elements are not supported.MyProxy failure: %sMyproxy server did not return proxy with VOMS AC includedNLST/MLSD failedNLST/MLSD failed: %sNSS database to be accessed: %s NSS initialization failed on certificate database: %sNULL BIO passed to InquireRequestNULL callback for %sName: %sNegative rights are not supported in Globus signing policyNeither source nor destination are index services, will skip resolving replicasNeither source nor destination were staged, skipping releasing requestsNetwork information:New connectionNew endpoint is created (%s) from the one with the unspecified interface (%s)New job accepted with id %sNew limit for vector queries returned by EMI ES service: %dNew proxy expires at %sNew proxy expiry time is not later than old proxy, not renewing proxyNo in SAML responseNo in SOAP responseNo A-REX config file found in cache service configurationNo Attribute exists, which can deal with type: %sNo Connect element specifiedNo FQAN found. Using NULL as userFQAN valueNo HTTP response from VOMS serverNo LRMS set in configurationNo LRMSName is provided. This is needed if you wish to completely comply with the BES specifications.No RSE information returned in Rucio response: %sNo RSL content in job description foundNo SOAP responseNo SOAP response from Delivery service %sNo SOAP response from delivery serviceNo URI element found in Location for file %sNo access policy to check, returning successNo active DTR %sNo active job id %sNo authorization response was returnedNo cache directory specifiedNo caches defined in configurationNo callback for %s definedNo changes in uploadable files for %u secondsNo checksum information from serverNo checksum information returned in Rucio response for %sNo checksum verification possibleNo configuration file could be loaded.No control directory set in configurationNo control or remote control directories defined in configurationNo credentials suppliedNo delegated credentials were passedNo delegation policies in this context and message - passing throughNo delegation token in requestNo delivery endpoints available, will try laterNo draining cache directory specifiedNo errorNo execuable path specified in GRIDMANAGER dialectNo execution services registered in the index serviceNo files to retrieve for job %sNo filesize information returned in Rucio response for %sNo group %i for mapped userNo job ID in responseNo job ID suppliedNo job description input specifiedNo job description parser was able to interpret job descriptionNo job description parsers availableNo job description parsers suitable for handling '%s' language are availableNo job identifier returned by A-REXNo job identifier returned by BES serviceNo jobdescription resulted at %d testNo jobsNo jobs found, try laterNo jobs givenNo jobs to resubmit with the specified statusNo left operand for concatenation operatorNo listening ports initiatedNo local account name specifiedNo local user mapping foundNo locations defined for %sNo locations for %sNo locations for destination different from source foundNo locations for destination different from source found: %sNo locations for destination found: %sNo locations for source found: %sNo locations found - probably no more physical instancesNo locations found for %sNo locations left for %sNo match found in cache access rules for %sNo more %s replicasNo more interfaces to try for endpoint %s.No more replicas, will use %sNo need to stage source or destination, skipping stagingNo new informational document assignedNo next MCC or Service at path "%s"No next element in the chainNo non-draining control or session directories availableNo non-draining session dirs availableNo overwrite requested or allowed, skipping pre-cleaningNo physical files found for destinationNo physical files found for sourceNo plugin is configured or authorised for requested path %sNo policy file or DNs specified for simplelist.pdp, please set location attribute or at least one DN element for simplelist PDP node in configuration.No port succeeded for %sNo private key with nickname %s exist in NSS databaseNo proxy foundNo proxy providedNo queue name given in queue block nameNo remote cache directory specifiedNo remote delivery services are useable, forcing local deliveryNo replicas found for %sNo request token specified!No request tokens foundNo requested security information was collectedNo response from %sNo response from AA service %sNo results returned from statNo right operand for concatenation operatorNo security processing/check requested for '%s'No server config part of config fileNo services specified. Please configure default services in the client configuration,or specify a cluster or index (-c or -g options, see arcsync -h).No session directory foundNo session directory set in configurationNo space token specifiedNo space tokens found matching description %sNo stagein URL is providedNo stream responseNo stream response from VOMS serverNo such DTR %sNo such file or directoryNo such group: %sNo such user: %sNo target available inside the policyNo target available inside the ruleNo test-job with ID %d found.No test-job, with ID "%d"No usable cachesNo user certificate by nickname %s foundNo user-certificate foundNo username suppliedNo valid caches found in configuration, caching is disabledNo valid credentials found, exitingNo valid handles left for listeningNo valid job identifier returned by EMI ESNo valid location availableNo valid response from VOMS server: %sNo value provided for Subject Attribute %s skippedNon-homogeneous resourceNone of the requested transfer protocols are supportedNot a collectionNot authorizedNot authorized according to request: %sNot authorized by arc.pdp - some of the RequestItem elements do not satisfy PolicyNot authorized from arc.pdp - failed to get reponse from EvaluatorNot authorized from simplelist.pdp: %sNot enough outputsandboxdesturi elements!Not enough parameters in copyurlNot enough parameters in linkurlNot enough space to store fileNot found %s in cacheNot listening to anythingNot set filter for this URL (%s).Not using delivery service %s due to previous failureNot using delivery service at %s because it is fullNot valid destinationNot valid sourceNothing to do: you have to either specify a test job id with -J (--job) or query information about the certificates with -E (--certificate) Notify failedNow copying (from -> to)Number %d is with nickname: %s%sNumber %d is: %sNumber of ComputingService elements obtained from full document and XPath qury do not match: %d != %dNumber of OpenSSL locks changed - reinitializingNumbers of sources and destinations do not matchOPTION...OS family: %sOS name: %sOS version: %sObject is not suitable for listingObject not initialized (internal error)Obtained XML: %sObtained host and address are not acceptableObtaining information failedObtaining status failedOnly POST is supported in CacheServiceOnly POST is supported in DataDeliveryServiceOnly Raw Buffer payload is supported for outputOnly globus rights are supported in Globus signing policy - %s is not supportedOnly signing rights are supported in Globus signing policy - %s is not supportedOnly standard input is currently supported for password source.Only user '.' for helper program is supportedOpenSSL error -- %sOpenSSL error string: %sOperating System errorOperation cancelled successfullyOperation completed successfullyOperation not supported for this kind of URLOperation on path "%s"OptimizedInformationContainer created temporary file: %sOptimizedInformationContainer failed to create temporary fileOptimizedInformationContainer failed to parse XMLOptimizedInformationContainer failed to rename temprary fileOptimizedInformationContainer failed to store XML document to temporary fileOption: %sOptions 'p' and 'n' can't be used simultaneouslyOptions for plugin are missingOriginal job description is listed below:Orphan delegation lock detected (%s) - cleaningOut of memory when generate random serialOut of retriesOut of tries while allocating new job IDOut of tries while allocating new job ID in %sOutgoing Message is not SOAPOutput EEC certificateOutput the proxy certificateOverwrite requested - will pre-clean destinationOwner: %sPASV failedPASV failed: %sPDP: %s (%s)PDP: %s (%s) can not be loadedPDP: %s can not be loadedPDP: missing name attributePDPD location is missingPDPD location: %sPEM_read_bio_X509_REQ failedPEM_write_bio_X509_REQ failedPEPD location is missingPEPD location: %sPKCS12 add password integrity failedPKCS12 output password not providedParser Context creation failed!Parsing .local file to obtain job-specific identifiers and infoParsing VOMS AC to get FQANs informationParsing error: Elements (%s) representing lower range have different valuesParsing error: Elements (%s) representing upper range have different valuesParsing error: Value of %s element can't be parsed as numberParsing error: Value of lower range (%s) is greater than value of upper range (%s)Parsing the "require" attribute of the "QueueName" nordugrid-JSDL element failed. An invalid comparison operator was used, only "ne" or "eq" are allowed.Passing service's information from collector to registratorPassword encoding type not supported: %sPath %s is invalid, creating required directoriesPath to .local job status file is required.Path to user's proxy file should be specified.Peer name: %sPerforming /* queryPerforming /ComputingService queryPerforming /Services/ComputingService queryPerforming matchmaking against target (%s).Performs neither sorting nor matchingPermanent failurePermanent service errorPermission checking failed, will try downloading without using cachePermission checking failed: %sPermission checking on original URL failed: %sPermission checking passedPermission checking passed for url %sPicking up left jobsPlace: %sPlatform: %sPlease choose the NSS database you would like to use (1-%d): Please choose the NSS database you would use (1-%d): Please choose the one you would use (1-%d): Plexer (%s) - next %s(%s) has no targetPlexer's (%s) next has no ID attribute definedPlugin %s error: %sPlugin %s error: %uPlugin %s failed to runPlugin %s failed to startPlugin %s for access point %s acquire failed (should never happen).Plugin %s for access point %s is broken.Plugin %s printed: %sPlugin %s printed: %uPlugin %s returned too much: %sPlugin %s returned: %uPlugin %s timeout after %u secondsPlugin (user mapping) command is emptyPlugin (user mapping) timeout is not a number: %sPlugin (user mapping) timeout is wrong number: %sPlugin failed: %sPlugin response: %sPolicy Decision Service invocation failedPolicy is emptyPolicy is not gaclPolicy line: %sPolicy subject: %sPolicyId: %s Alg inside this policy is:-- %sPostal code: %sPre-LRMS waiting jobs: %iPre-clean failed, will still try to copyPre-registering destinationPre-registering destination in index servicePreparing to stage destinationPreparing to stage sourceProblem accessing cache file %s: %sProblem creating dtr (source %s, destination %s)Problem loading plugin %s, skipping it.Problem with index service, will proceed to end of data stagingProblem with index service, will release cache lockProcessing a %s requestProcessing thread timed out. Restarting DTRProcessing type not supported: %sProcessingStartTime (%s) specified in job description is inside the targets downtime period [ %s - %s ].Protocol is %s, should be httpsProtocol plugins available:Protocol(s) not supported - please check that the relevant gfal2 plugins are installed (gfal2-plugin-* packages)Provided LRMSName is not a valid URL: %sProxy certificate information:Proxy expiredProxy expired. Job submission aborted. Please run 'arcproxy'!Proxy generation failed: Certificate has expired.Proxy generation failed: Certificate is not valid yet.Proxy generation failed: Failed to create temporary file.Proxy generation failed: Failed to retrieve VOMS information.Proxy generation failed: No valid certificate found.Proxy generation failed: No valid private key found.Proxy generation succeededProxy has expiredProxy key length: %iProxy path: %sProxy signature: %sProxy stored at %sProxy subject: %sProxy type: %sProxy with ARC PolicyProxy with all rights inheritedProxy with empty policy - fail on unrecognized policyProxy with specific policy: %sProxy with unknown policy - fail on unrecognized policyProxy-subject: %sProxy/credentials stored at %sProxy: %sPut request %s is still in queue, should wait %i secondsPut: failed to allocate memory for file %s in job %sPut: failed to create file %s for job %s - %sPut: failed to set position of file %s for job %s to %Lu - %sPut: failed to write to file %s for job %s - %sPut: there is no job: %s - %sPut: there is no payload for file %s in job: %sPut: unrecognized payload for file %s in job: %sPython Wrapper constructor succeededPython Wrapper destructor (%d)Python broker constructor called (%d)Python broker destructor called (%d)Python interpreter lockedPython interpreter releasedPython wrapper process calledPythonBroker initQuality level: %sQuery is not a valid XMLQuery returned no elements.Query returned unexpected element: %s:%sQuerying ACIX server at %sQuerying WSRF GLUE2 computing info endpoint.Querying at %sQuerying batch with %d jobsQuerying source replicas in bulkQuerying status of staging requestQueue '%s' usage is prohibited to FQAN '%s' by the site access policyQueue '%s' usage with provided FQANs is prohibited by the site access policyQueue information:RSA_generate_key_ex failedRSL substitution is not a sequenceRSL substitution sequence is not of length 2RSL substitution variable name does not evaluate to a literalRSL substitution variable value does not evaluate to a literalRandom sortingRaw command: %sRe-creating an A-REX clientRe-creating an EMI ES clientRead %i bytesRead access check failedRead access not allowed for %s: %sRead archive file %sRead commands in authenticate failedRead request from a fileRead request from a stringReading %u bytes from byte %lluReading output files from user generated list in %sReal transfer from %s to %sReason : %sReceived DTR %s back from scheduler in state %sReceived DTR %s during Generator shutdown - may not be processedReceived invalid DTRReceived message out-of-band (not critical, ERROR level is just for debugging purposes)Received no DTRReceived retry for DTR %s still in transferRecieved transfer URL: %sReconnectingRedirecting to %sRedirecting to new URL: %sRefusing connection: Connection limit exceededRegistered static information: doc: %sRegistering destination replicaRegistering directory: %s with plugin: %sRegistering dummy directory: %sRegistering plugin for state %s; options: %s; command: %sRegistering to %s EMIRegistryRegistering to %s ISISRegistrant has no proper URL specified. Registration end.Registration ends: %sRegistration exit: %sRegistration of Globus FTP buffer failed - cancel checkRegistration starts: %sRelation operator expectedReleasing destinationReleasing request(s) made during stagingReleasing requestsReleasing sourceRemapped to local group id: %iRemapped to local group name: %sRemapped to local id: %iRemapped to local user: %sRemapped user's home: %sRemove ISIS (%s) from listRemove: deleting: %sRemoving %sRemoving endpoint %s: It has an unrequested interface (%s).Removing logical file from metadata %sRemoving metadata in %sRemoving outdated job log file %sRemoving pre-registered destination in index serviceRename: globus_ftp_client_move failedRename: timeout waiting for operation to completeRenaming %s to %sRenewal of ARC1 jobs is not supportedRenewal of BES jobs is not supportedRenewal of CREAM jobs is not supportedRenewal of UNICORE jobs is not supportedRenewal of credentials was successfulRenewing credentials for job: %sRenewing proxy for job %sReplacing DTR %s in state %s with new requestReplacing existing token for %s in Rucio token cacheReplacing old SRM info with new for URL %sReplacing queue '%s' with '%s'Replica %s doesn't match preferred pattern or URL mapReplica %s has high latency, but no more sources exist so will use this oneReplica %s has high latency, trying next sourceReplica %s has long latency, trying next replicaReplica %s is mappedReplica %s matches host pattern %sReplica %s matches pattern %sReplicating file %s from remote cache failed due to source being deleted or modifiedReplicating file %s to local cache file %sRequest failedRequest failed: %sRequest failed: No response from IdPRequest failed: No response from IdP when doing authenticationRequest failed: No response from IdP when doing redirectingRequest failed: No response from SP Service when sending SAML assertion to SPRequest failed: No response from SPServiceRequest failed: response from IdP is not as expected when doing authenticationRequest failed: response from IdP is not as expected when doing redirectingRequest failed: response from SP Service is not as expected when sending SAML assertion to SPRequest failed: response from SPService is not as expectedRequest for specific Resource PropertyRequest is emptyRequest is not supported - %sRequest is reported as ABORTED, but all files are doneRequest is reported as ABORTED, since it was cancelledRequest is reported as ABORTED. Reason: %sRequest succeed!!!Request timed outRequest to open file with storing in progressRequest to push to unknown owner - %uRequest: %sRequested slots: %iRequested time range: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d Requesting ComputingService elements of resource description at %sRequesting recursion and --nolist has no senseRequirement "%s %s" NOT satisfied.Requirement "%s %s" satisfied by "%s".Requirement "%s %s" satisfied.Reservation policy: %sResolving destination replicasResolving of index service for destination failedResolving of index service for source failedResolving source replicas in bulkResource description contains unexpected element: %s:%sResource description is emptyResource description provides URL for interface %s: %sResource description provides no URLs for interfacesResource description query validation passedResource description validation according to GLUE2 schema failed: Resource description validation passedResource information provider failedResource information provider failed with exit status: %i %sResource information provider log: %sResource information provider: %sResource manager: %sResource query failedResponse from the ISIS: %sResponse is not SOAPResponse is not expected WS-RPResponse sending errorResponse: %sResponse: %sRestarting after segmentation violation.Resubmission of job (%s) succeeded, but cleaning the job failed - it will still appear in the job listResubmission of job (%s) succeeded, but killing the job failed - it will still appear in the job listResult value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %dResults stored at: %sResumation of UNICORE jobs is not supportedResuming BES jobs is not supportedResuming job: %s at state: %s (%s)Retrieving file %sRetrieving job description of EMI ES jobs is not supportedRetry connecting to the EMIRegistry (%s) %d time(s).Retry connecting to the ISIS (%s) %d time(s).Retry: %dRetryingReturned message from VOMS server %s is: %s Returned message from VOMS server: %sReturned msg from myproxy server: %s %dReturned msg from voms server: %s Returning to generatorReusing connectionRight operand for RSL concatenation does not evaluate to a literalRucio returned %sRucio token for %s has expired or is about to expireRule: %sRule: capabilities: %sRule: group: %sRule: role: %sRule: vo: %sRunning command %sRunning command: %sRunning jobs: %iRunning mailer command (%s)Running user has no nameSAML Token handler is not configuredSAML2SSO process failedSASL InteractionSOAP Request failed: ErrorSOAP Request failed: No responseSOAP Request to AA service %s failedSOAP fault from delivery service at %s: %sSOAP fault receivedSOAP fault: %sSOAP invocation failedSOAP invokation failedSOAP operation is not supported: %sSOAP request: %sSOAP response: %sSOAP with SAML2SSO invokation failedSRM Client status: %sSRM did not return any informationSRM did not return any useful informationSRM returned no useful Transfer URLs: %sSSL error: %d - %s:%s:%sSSL error: %s, libs: %s, func: %s, reason: %sSSL locks not initializedScheduler configuration:Scheduler received NULL DTRScheduler received invalid DTRScheduler received new DTR %s with source: %s, destination: %s, assigned to transfer share %s with priority %dScheduler starting upScheduler stopped, exitingScheduling policy: %sSchema validation errorScheme: %sSecHandler configuration is not definedSecHandler has no configurationSecHandler has no name attribute definedSecHandler: %s(%s)Security Handler %s(%s) could not be createdSecurity Handlers processing failedSecurity check failed for incoming TLS messageSecurity check failed for outgoing TLS messageSecurity check failed in SOAP MCC for incoming messageSecurity check failed in SOAP MCC for outgoing messageSecurity check failed in TLS MCC for incoming messageSecurity processing/check failedSecurity processing/check for '%s' failedSecurity processing/check for '%s' passedSecurity processing/check passedSelect failed: %sSelf-signed certificateSend response failed: %sSendCommand: Command: %sSendCommand: Failed: %sSendCommand: Response: %sSendCommand: Timed out after %d msSendData: Can't parse host and/or port in response to EPSV/PASV: %sSendData: Data channel: %d.%d.%d.%d:%dSendData: Data channel: [%s]:%dSendData: Data connect write failed: %sSendData: Data connect write timed out after %d msSendData: Data write failed: %sSendData: Data write timed out after %d msSendData: Failed sending DCAU commandSendData: Failed sending EPSV and PASV commandsSendData: Failed sending STOR command: %sSendData: Failed sending TYPE commandSendData: Failed to apply local address to data connection: %sSendData: Local port failed: %sSendData: Local type failed: %sSendData: Server EPSV response parsing failed: %sSendData: Server EPSV response port parsing failed: %sSendData: Server PASV response parsing failed: %sSent RegEntries: %sSent entry: %sSent jobIDs: (nr. of job(s) %d)Server SRM version: %sServer implementation: %sServer stoppedService %s of type %s ignoredService %s(%s) could not be createdService Loop: Endpoint %sService has no ID attribute definedService has no Name attribute definedService information:Service is successfully added to the InfoRegistrar connecting to infosys %s.Service is waiting for requestsService removed from InfoRegistrar connecting to infosys %s.Service side MCCs are loadedService was already registered to the InfoRegistrar connecting to infosys %s.ServiceID attribute calculated from Endpoint ReferenceServiceID stored: %sServiceURL missingServing state: %sSession dir %s is owned by %i, but current mapped user is %iSession root directory is missingSet non standard bechmark type: %sSetting connections limit to %i, connections over limit will be %sSetting pbsz to %luSetting status (%s) for endpoint: %sSetting status (STARTED) for endpoint: %sSetting subject name!Setting userRequestDescription to %sShare Information:Should wait for destination to be preparedShould wait for source to be preparedShow help optionsShutdown daemonShutting down data delivery serviceShutting down data staging threadsShutting down job processingShutting down schedulerSimpleMap: %sSkipping ComputingEndpoint '%s', because it has '%s' interface instead of the requested '%s'.Skipping invalid URL option %sSkipping replica on local host %sSkipping retrieved job (%s) because it was submitted via another interface (%s).Skipping service: no SchemaPath found!Skipping service: no ServicePath found!Socket conversion failed: %sSockets do not match on exit %i != %iSome addresses failed. Listening on %u of %u.Some downloads failedSome downloads failed, but may be retriedSome error happens during the Aggregation record (%s) initialization.Some error happens during the Aggregation record (%s) storing.Some transfers failedSome uploads failedSome uploads failed, but (some) may be retriedSorting according to free slots in queueSorting according to input data availability at targetSorting according to specified benchmark (default "specint2000")Sorting replicas according to URL mapSorting replicas according to preferred pattern %sSource URL missingSource URL not supported: %sSource URL not valid: %sSource and/or destination is index service, will resolve replicasSource check requested but failed: %sSource is invalid URLSource is mapped to %sSource is not ready, will wait %u secondsSource is the same as destinationSource modification date: %sSource or destination requires stagingSource: %sSpecified URL is not validSpecified module not found in cacheSpecified overlay file (%s) does not exist.Specified user can't be handledStaging jobs: %iStaging request timed out, will release requestStaging: %sStart foregroundStart testStart waiting 10 sec...StartReadingStartReading: File was not prepared properlyStartWritingStartWriting: File was not prepared properlyStarted remote Delivery at %sStarting DTR threadsStarting controlled processStarting data staging threadsStarting helper process: %sStarting jobs processing threadStarting jobs' monitoringStarting new DTR for %sStarting querying of suspended endpoint (%s) - no other endpoints for this service is being queried or has been queried successfully.Starting sub-thread to query the endpoint on %sStarting thread to query the endpoint on %sStat: obtained modification time %sStat: obtained size %lluState name for authPlugin is missingState name for plugin is missingStatus for service endpoint "%s" is set to inactive in ARCHERY. Skipping.Status of %d jobs was queried, %d jobs returned informationStopReading finished waiting for transfer_condition.StopReading starts waiting for transfer_condition.StopWriting finished waiting for transfer_condition.StopWriting starts waiting for transfer_condition.StopWriting: Calculated checksum %sStopWriting: aborting connectionStopWriting: looking for checksum of %sStopping helper process %sStopping jobs processing threadStopping serverStoring configuration in temporary file %sStoring file %sStoring port %i for %sStoring temp proxy at %sString successfully parsed as %sString successfully parsed as %s.Subject Attribute %s has no known NID, skippedSubject does not start with '/'Subject name: %sSubject of request is null Subject to match: %sSubject: %sSubmission endpointSubmission failedSubmission failed, service returned: %sSubmission request failedSubmission request succeedSubmission returned failure: %sSubmit: Failed sending CWD commandSubmit: Failed sending CWD new commandSubmit: Failed sending job descriptionSubmit: Failed to connectSubmit: Failed uploading local input filesSubmit: service has no suitable information interface - need org.nordugrid.ldapngSubmitterPlugin %s could not be createdSubmitterPlugin plugin "%s" not found.Submitting test-job %d:Succeeded to add RFC proxy OID, tag %d is returnedSucceeded to add VOMS AC sequence OID, tag %d is returnedSucceeded to add anyLanguage OID, tag %d is returnedSucceeded to add inheritAll OID, tag %d is returnedSucceeded to authenticate SAMLTokenSucceeded to authenticate UsernameTokenSucceeded to authenticate X509TokenSucceeded to change password on MyProxy serverSucceeded to change trusts to: %sSucceeded to convert PrivateKeyInfo to EVP_PKEYSucceeded to destroy credential on MyProxy serverSucceeded to export PKCS12Succeeded to generate public/private key pairSucceeded to get a proxy in %s from MyProxy server %sSucceeded to get credentialSucceeded to get info from MyProxy serverSucceeded to import certificateSucceeded to import private keySucceeded to initialize NSSSucceeded to load PrivateKeyInfoSucceeded to output certificate to %sSucceeded to output the certificate request into %sSucceeded to put a proxy onto MyProxy serverSucceeded to send DelegationService: %s and DelegationID: %s info to peer serviceSucceeded to sign the proxy certificateSucceeded to verify the signature under Succeeded to verify the signature under Succeeded to verify the signed certificateSuccessful %s to EMIRegistry (%s)Successful registration to ISIS (%s)Successfuly removed registration from EMIRegistry (%s)Successfuly removed registration from ISIS (%s)Supplied username %s does not match mapped username %sSupported Profiles:Supported constraints are: validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start from now) validityEnd=time validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and validityEnd not specified, the default is 12 hours for local proxy, and 168 hours for delegated proxy on myproxy server) vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value of 12 hours and validityPeriod) myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy server, e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value of 12 hours and validityPeriod (which is lifetime of the delegated proxy on myproxy server)) proxyPolicy=policy content proxyPolicyFile=policy file keybits=number - length of the key to generate. Default is 1024 bits. Special value 'inherit' is to use key length of signing certificate. signingAlgorithm=name - signing algorithm to use for signing public key of proxy. Possible values are sha1, sha2 (alias for sha256), sha224, sha256, sha384, sha512 and inherit (use algorithm of signing certificate). Default is inherit. With old systems, only sha1 is acceptable. Supported information item names are: subject - subject name of proxy certificate. identity - identity subject name of proxy certificate. issuer - issuer subject name of proxy certificate. ca - subject name of CA ehich issued initial certificate path - file system path to file containing proxy. type - type of proxy certificate. validityStart - timestamp when proxy validity starts. validityEnd - timestamp when proxy validity ends. validityPeriod - duration of proxy validity in seconds. validityLeft - duration of proxy validity left in seconds. vomsVO - VO name represented by VOMS attribute vomsSubject - subject of certificate for which VOMS attribute is issued vomsIssuer - subject of service which issued VOMS certificate vomsACvalidityStart - timestamp when VOMS attribute validity starts. vomsACvalidityEnd - timestamp when VOMS attribute validity ends. vomsACvalidityPeriod - duration of VOMS attribute validity in seconds. vomsACvalidityLeft - duration of VOMS attribute validity left in seconds. proxyPolicy keybits - size of proxy certificate key in bits. signingAlgorithm - algorithm used to sign proxy certificate. Items are printed in requested order and are separated by newline. If item has multiple values they are printed in same line separated by |. Supported password destinations are: key - for reading private key myproxy - for accessing credentials at MyProxy service myproxynew - for creating credentials at MyProxy service all - for any purspose. Supported password sources are: quoted string ("password") - explicitly specified password int - interactively request password from console stdin - read password from standard input delimited by newline file:filename - read password from file named filename stream:# - read password from input stream number #. Currently only 0 (standard input) is supported. Supported constraints are: validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start from now) validityEnd=time validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and validityEnd not specified, the default is 12 hours for local proxy, and 168 hours for delegated proxy on myproxy server) vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value of 12 hours and validityPeriod) myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy server, e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value of 12 hours and validityPeriod (which is lifetime of the delegated proxy on myproxy server)) proxyPolicy=policy content proxyPolicyFile=policy fileSupports advance reservationsSupports bulk submissionSupports preemptionSuspended jobs: %iSuspending querying of endpoint (%s) since the service at the endpoint is already being queried, or has been queried.Synchronizing the local list of active jobs with the information in the information system can result in some inconsistencies. Very recently submitted jobs might not yet be present, whereas jobs very recently scheduled for deletion can still be present.Syntax error in 'notify' attribute value ('%s'), it contains unknown state flagsSyntax error in 'notify' attribute value ('%s'), it must contain an email addressSyntax error in 'notify' attribute value ('%s'), it must only contain email addresses after state flag(s)System configuration file (%s or %s) does not exist.System configuration file (%s) contains errors.System configuration file (%s) does not exist.TCP client process calledTCP executor is removedTURL %s cannot be handledTarget %s does not match requested interface(s).Target %s removed by FastestQueueBroker, doesn't report number of free slotsTarget %s removed by FastestQueueBroker, doesn't report number of total slotsTarget %s removed by FastestQueueBroker, doesn't report number of waiting jobsTechnology: %sTemporary service errorTerminateActivities: job %s - %sTerminateActivities: non-AREX job requestedTerminateActivities: request = %sTerminateActivities: response = %sTest aborted because no resource returned any informationTest failed, no more possible targetsTest submitted with jobid: %sTest was defined with ID %d, but some error occurred during parsing it.The "FreeSlotsWithDuration" attribute is wrongly formatted. Ignoring it.The "FreeSlotsWithDuration" attribute published by "%s" is wrongly formatted. Ignoring it.The 'epsilon' attribute to the 'Exact' element is not supported.The 'exclusiveBound' attribute to the '%s' element is not supported.The 'sort' and 'rsort' flags cannot be specified at the same time.The BIO for output is NULLThe CA certificates directory is required for contacting VOMS and MyProxy servers.The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s).The ComputingEndpoint doesn't advertise its Quality Level.The ComputingEndpoint doesn't advertise its Serving State.The ComputingEndpoint has no URL.The ComputingService doesn't advertise its Interface.The ComputingService doesn't advertise its Quality Level.The MyProxy period that you set: %s can't be recognized.The NSS database can not be detected in the Firefox profileThe Response is not going to this endThe Service advertises no Health State.The Service doesn't advertise its Interface.The Service doesn't advertise its Quality Level.The Service doesn't advertise its Serving State.The Service doesn't advertise its Type.The StatusCode is SuccessThe VOMS AC period that you set: %s can't be recognized.The VOMS server with the information: %s can not be reached, please make sure it is availableThe VOMS server with the information: %s can not be reached, please make sure it is available.The VOMS server with the information: %s" can not be reached, please make sure it is availableThe [vo] section labeled '%s' has no file associated and can't be used for matchingThe arccat command performs the cat command on the stdout, stderr or grid manager's error log of the job.The arcclean command removes a job from the computing resource.The arccp command copies files to, from and between grid storage elements.The arcecho command is a client for the ARC echo service.The arcget command is used for retrieving the results from a job.The arcinfo command is used for obtaining the status of computing resources on the Grid.The arckill command is used to kill running jobs.The arcls command is used for listing files in grid storage elements and file index catalogues.The arcmkdir command creates directories on grid storage elements and catalogs.The arcproxy command creates a proxy from a key/certificate pair which can then be used to access grid resources.The arcrename command renames files on grid storage elements.The arcrm command deletes files and on grid storage elements.The arcstat command is used for obtaining the status of jobs that have been submitted to Grid enabled resources.The arcsub command is used for submitting jobs to Grid enabled computing resources.The arcsync command synchronizes your local job list with the information at the given resources or index servers.The arctest command is used for testing clusters as resources.The arcwsrf command is used for obtaining the WS-ResourceProperties of services.The attribute information from VOMS server: %s is list as following:The available CRL has expiredThe available CRL is not yet validThe brokerarguments attribute can only be used in conjunction with the brokername attributeThe certificate with subject %s is not validThe cluster XRSL attribute is currently unsupported.The credential to be signed contains no requestThe credential to be signed is NULLThe credential's private key has already been initializedThe default configuration file (%s) is not a regular file.The defaultTTL element is incorrect numberThe defaultTTR element is incorrect numberThe delegated credential got from delegation service is stored into path: %sThe delegated credential got from path: %sThe delegationDB element is incorrect valueThe downtime of the target (%s) is not published. Keeping target.The end time that you set: %s can't be recognized.The end time that you set: %s is before start time:%s.The endpoint (%s) is not supported by this plugin (%s)The endpoint of delgation service should be configuredThe file %s is currently locked with a valid lockThe first supported interface of the plugin %s is an empty string, skipping the plugin.The fixDirectories element is incorrect valueThe following %d were not resubmittedThe following %d were not submittedThe inputsandboxbaseuri JDL attribute specifies an invalid URL.The interface of this endpoint (%s) is unspecified, will try all possible pluginsThe job description also can be a file or a string in JDL, POSIX JSDL, JSDL, or XRSL format.The job status could not be retrievedThe keybits constraint is wrong: %s.The maxReruns element is incorrect numberThe name of the private key to delete is emptyThe noRootPower element is incorrect numberThe old GSI proxies are not supported anymore. Please do not use -O/--old option.The payload of incoming message is emptyThe payload of outgoing message is emptyThe period that you set: %s can't be recognized.The plugin %s does not support any interfaces, skipping it.The policy file setup for simplelist.pdp does not exist, please check location attribute for simplelist PDP node in service configurationThe policy language: %s is not supportedThe private key for signing is not initializedThe process owning the lock on %s is no longer running, will remove lockThe request has passed the policy evaluationThe response of a job cleaning request was not a SOAP messageThe response of a job termination request was not a SOAP messageThe response of a service status request was not a SOAP messageThe response of a start job request was not a SOAP messageThe response of a status request was not a SOAP messageThe service argument is a URL to an ARC echo service. The message argument is the message the service should return.The service status could not be retrievedThe service won't be registered.The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign certificate requestsThe specified Globus attribute (%s) is not supported. %s ignored.The start time that you set: %s can't be recognized.The start, end and period can't be set simultaneouslyThe subject does not match the issuer name + proxy CN entryThe validity duration of VOMS AC is shortened from %s to %s, due to the validity constraint on voms server side. The value of the acl XRSL attribute isn't valid XML.The value of the ftpthreads attribute must be a number from 1 to 10The value of the keysize attribute in the configuration file (%s) was only partially parsedThe value of the timeout attribute in the configuration file (%s) was only partially parsedThere are %d NSS base directories where the certificate, key, and module databases liveThere are %d RequestItemsThere are %d certificates in the returned msgThere are %d commands to the same VOMS server %sThere are %d requests, which satisfy at least one policyThere are %d servers with the same name: %s in your vomses file, but all of them can not be reached, or can not return valid message.There are %d servers with the same name: %s in your vomses file, but all of them can not be reached, or can not return valid message. But proxy without VOMS AC extension will still be generated.There are %d user certificates existing in the NSS databaseThere is %d subjects, which satisfy at least one policyThere is no Delegated X509 token in the responseThere is no Format delegated token in the responseThere is no Format request in the responseThere is no Id or X509 request value in the responseThere is no Id or X509 token value in the responseThere is no SOAP connection chain configuredThere is no SOAP responseThere is no UpdateCredentialsResponse in responseThere is no X509 request in the responseThere is no certificate named %s found, the certificate could be removed when generating CSRThere is no connection chain configuredThere is no constructor functionThere is no digest in issuer's private key objectThere is no local LRMS ID. Message will not be written to BLAH log.There is no local mapping for userThere is no local name for userThere is no more ISIS available. The list of ISIS's is already empty.There is no responseThere is no service: %s in your Java class search pathThere was a problem during post-transfer destination handling after error: %sThere was a problem during post-transfer source handling: %sThere was no HTTP responseThere was no SOAP responseThere was no SOAP response return from PDP server: %sThere was no response to a job cleaning requestThere was no response to a job termination requestThere was no response to a service status requestThere was no response to a start job requestThere was no response to a status requestThere was no response to a submission requestThird party transfer is not supported for these endpointsThird party transfer was requested but the corresponding plugin could not be loaded. Is the GFAL plugin installed? If not, please install the packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on your type of installation the package names might differ.This INFO message should also be seenThis INFO message should be seenThis VERBOSE message should not be seenThis VERBOSE message should now be seenThis instance was already deletedThis job was very recently submitted and might not yet have reached the information systemThis message goes to initial destinationThis message goes to per-thread destinationThis process already owns the lock on %sThis seems like a temporary error, please try again laterThis tiny tool can be used for testing the JobDescription's conversion abilities.This user is denied to submit new jobs.Thread exited with Glib error: %sThread exited with Glib exception: %sThread exited with generic exception: %sTime left for AC: %sTime left for AC: AC has expiredTime left for AC: AC is not valid yetTime left for proxy: %sTime left for proxy: Proxy expiredTime left for proxy: Proxy not valid yetTime spent waiting for disc: %.3f msTime spent waiting for network: %.3f msTimed out while waiting for cache lockTimeout connecting to %s(%s):%i - %i sTimeout for localCred is incorrect numberTimeout for localCred is missingTimeout has expired, will remove lock file %sTimeout waiting for Globus callback - leaking connectionTimeout waiting for mkdirTo recover missing jobs, run arcsyncToo many connections - dropping new oneToo many connections - waiting for old to closeToo many files in one request - please try again with fewer filesToo many parametersTotal jobs: %iTotal logical CPUs: %iTotal number of jobs found: Total number of new jobs found: Total physical CPUs: %iTotal slots: %iTransfer FAILED: %sTransfer cancelled successfullyTransfer completeTransfer failedTransfer failed: %sTransfer finished: %llu bytes transferred %sTransfer from %s to %sTransfer killed after %i seconds without communicationTransfer succeededTransfer timed outTrusted CAs:Try to get attribute from VOMS server with order: %sTrying all available interfacesTrying next replicaTrying to check X509 cert with check_cert_typeTrying to connect %s(%s):%dTrying to listen on %s:%s(%s)Trying to listen on TCP port %s(%s)Trying to migrate to %s: Migration to a %s interface is not supported.Trying to retrieve job description of %s from computing resourceTrying to start suspended endpoint (%s)Trying to submit directly to endpoint (%s)Trying to submit endpoint (%s) using interface (%s) with plugin (%s).Two identical output destinations: %sTwo input files have identical name '%s'.Type in LRMS is missingType is dir, calling srmRmDirType is file, calling srmRmType: %sTypes of execution services %s is able to submit jobs to:Types of local information services which %s is able collect information from:Types of local information services which %s is able collect job information from:Types of registry services which %s is able collect information from:Types of services %s is able to manage jobs at:UPDATE Aggregation Record called.UR set dump: %sURLURL %s disagrees with stored SRM info, testing new infoURL [query]URL is mapped to local access - checking permissions on original URLURL is mapped to: %sURL is not valid: %sURL option %s does not have format name=valueURL protocol is not urllist: %sURL: %sUnAuthorized from xacml.pdpUnable to adapt job description to any resource, no resource information could be obtained.Unable to associate secondary DB with primary DB (%s)Unable to copy %sUnable to copy example configuration from existing configuration (%s)Unable to create %s directory.Unable to create DB for secondary endpoint keys (%s)Unable to create DB for secondary name keys (%s)Unable to create DB for secondary service info keys (%s)Unable to create SOAP client used by AREXClient.Unable to create SOAP client used by CREAMClient.Unable to create SOAP client used by EMIESClient.Unable to create adapter for the specific reporting destination typeUnable to create data base (%s)Unable to create data base environment (%s)Unable to create directory %sUnable to create directory for storing results (%s) - %sUnable to create index for jobs table in data base (%s)Unable to create job database (%s)Unable to create jobs table in data base (%s)Unable to create temporary directoryUnable to detect format of job record.Unable to detect if issuer certificate is installed.Unable to determine certificate informationUnable to determine error (%d)Unable to download job (%s), no JobControllerPlugin plugin was set to handle the job.Unable to find file size of %sUnable to handle job (%s), no interface specified.Unable to handle job (%s), no plugin associated with the specified interface (%s)Unable to initialise connection to destination: %sUnable to initialise connection to source: %sUnable to list content of %sUnable to list files at %sUnable to load BrokerPlugin (%s)Unable to load broker %sUnable to load plugin (%s) for interface (%s) when trying to submit job description.Unable to load submission plugin for %s interfaceUnable to locate the "%s" plugin. Please refer to installation instructions and check if package providing support for "%s" plugin is installedUnable to locate the "%s" plugin. Please refer to installation instructions and check if package providing support for %s plugin is installedUnable to match target, marking it as not matching. Broker not valid.Unable to migrate job (%s), job description could not be retrieved remotelyUnable to migrate job (%s), unable to parse obtained job descriptionUnable to migrate job. Job description is not valid in the %s format: %sUnable to parse the %s.%s value from execution service (%s).Unable to parse the specified verbosity (%s) to one of the allowed levelsUnable to parse.Unable to prepare job description according to needs of the target resource (%s).Unable to prepare job description according to needs of the target resource.Unable to query job information (%s), invalid URL provided (%s)Unable to read job information from file (%s)Unable to register job submission. Can't get JobDescription object from Broker, Broker is invalid.Unable to remove file %sUnable to rename %sUnable to resubmit job (%s), no targets applicable for submissionUnable to resubmit job (%s), target information retrieval failed for target: %sUnable to resubmit job (%s), unable to parse obtained job descriptionUnable to retrieve job status.Unable to retrieve list of job files to download for job %sUnable to retrieve status of job (%s)Unable to select middlewareUnable to select operating system.Unable to select runtime environmentUnable to set duplicate flags for secondary key DB (%s)Unable to sort ExecutionTarget objects - Invalid Broker object.Unable to sort added jobs. The BrokerPlugin plugin has not been loaded.Unable to submit job. Job description is not valid XMLUnable to submit job. Job description is not valid in the %s formatUnable to submit job. Job description is not valid in the %s format: %sUnable to truncate job database (%s)Unable to write key/value pair to job database (%s): Key "%s"Unable to write records into job database (%s): Id "%s"Unable to write to p12 fileUnauthorizedUnauthorized from remote pdp serviceUndefined control sequence: %%%sUndefined processing errorUnexpected RSL typeUnexpected argumentsUnexpected arguments suppliedUnexpected immediate completion: %sUnexpected name returned in Rucio response: %sUnexpected path %s returned from serverUniq is adding service coming from %sUniq is ignoring service coming from %sUniq is replacing service coming from %s with service coming from %sUnknown ACL policy %s for job %sUnknown LDAP scope %s - using baseUnknown XRSL attribute: %s - Ignoring it.Unknown attribute %s in common section of configuration file (%s), ignoring itUnknown authorization command %sUnknown channel %s for stdio protocolUnknown conversion mode %s, using defaultUnknown credential type %s for URL pattern %sUnknown element in Globus signing policyUnknown entry in EGIIS (%s)Unknown errorUnknown key or hash typeUnknown key or hash type of issuerUnknown log level %sUnknown open mode %iUnknown open mode %sUnknown operator '%s' in attribute require in Version elementUnknown optionUnknown rights in Globus signing policy - %sUnknown section %s, ignoring itUnknown transfer option: %sUnregistering %sUnregistering from index service failedUnregistred Service can not be removed.Unsupported URL givenUnsupported URL given: %sUnsupported command: %sUnsupported configuration command: %sUnsupported destination url: %sUnsupported job list type '%s', using 'BDB'. Supported types are: BDB, SQLITE, XML.Unsupported option: %cUnsupported protocol in url %sUnsupported proxy policy language is requested - %sUnsupported proxy version is requested - %sUnsupported source url: %sUnsupported submission interface %s. Seems arc-blahp-logger need to be updated accordingly :-) Please submit the bug to bugzilla.Untrusted self-signed certificate in chain with subject %s and hash: %luUpdateCredentials failedUpdateCredentials: EPR contains no JobIDUpdateCredentials: failed to update credentialsUpdateCredentials: missing ReferenceUpdateCredentials: no job found: %sUpdateCredentials: request = %sUpdateCredentials: response = %sUpdateCredentials: wrong number of ReferenceUpdateCredentials: wrong number of elements inside ReferenceUploadable files timed outUploaded %sUploaded file %sUploader startedUsage:Usage: %s -I -U -P -L [-c ] [-p ] [-d ]Usage: %s [-N] -P -L [-c ] [-d ]Usage: copy source destinationUsageRecords registration response: %sUse --help option for detailed usage informationUse -? to get usage descriptionUsed configuration file %sUsed slots: %iUser configuration file (%s) contains errors.User configuration file (%s) does not exist or cannot be loaded.User for helper program is missingUser has empty virtual directory tree. Either user has no authorised plugins or there are no plugins configured at all.User has no proper configuration associatedUser has uploaded file %sUser interface errorUser name direct mapping is missing user name: %s.User name mapping command is emptyUser name mapping has empty VO: %sUser name mapping has empty command: %sUser name mapping has empty group: %sUser name mapping has empty name: %sUser name should be specified.User pool at %s can't be opened.User pool at %s failed to perform user mapping.User pool call is missing user subject.User proxy certificate is not validUser proxy file is required but is not specifiedUser subject match is missing user subject.User subject: %sUserConfig class is not an objectUserConfiguration saved to file (%s)Username Token handler is not configuredUsername in helperUtility is emptyUsing A-REX config file %sUsing CA certificate directory: %sUsing Rucio account %sUsing cache %sUsing cached local account '%s'Using cert %sUsing certificate file: %sUsing cipher: %sUsing control directory %sUsing insecure data transferUsing key %sUsing key file: %sUsing local account '%s'Using next %s replicaUsing proxy %sUsing proxy file: %sUsing remote cache file %s for url %sUsing secure data transferUsing session dir %sUsing session directory %sUsing space token %sUsing space token description %sVO %s doesn't match %sVO (%s) not set for this (%s) SGAS server by VO filter.VO filter for host: %sVOMS attr %s doesn't match %sVOMS attr %s matches %sVOMS attribute is ignored due to processing/validation errorVOMS attribute parsing failedVOMS attribute validation failedVOMS command is emptyVOMS line contains wrong number of tokens (%u expected): "%s"VOMS proxy processing returns: %i - %sVOMS trust chains: %sVOMS: AC has expiredVOMS: AC is not complete - missing Serial or Issuer informationVOMS: AC is not yet validVOMS: AC signature verification failedVOMS: CA directory or CA file must be providedVOMS: Can not allocate memory for parsing ACVOMS: Can not allocate memory for storing the order of ACVOMS: Can not find AC_ATTR with IETFATTR typeVOMS: Can not parse ACVOMS: Cannot find certificate of AC issuer for VO %sVOMS: DN of holder in AC: %sVOMS: DN of holder: %sVOMS: DN of issuer: %sVOMS: FQDN of this host %s does not match any target in ACVOMS: The lsc file %s can not be openVOMS: The lsc file %s does not existVOMS: authorityKey is wrongVOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions must be presentVOMS: can not verify the signature of the ACVOMS: cannot validate AC issuer for VO %sVOMS: case of multiple IETFATTR attributes not supportedVOMS: case of multiple policyAuthority not supportedVOMS: create FQAN: %sVOMS: create attribute: %sVOMS: directory for trusted service certificates: %sVOMS: failed to parse attributes from ACVOMS: failed to verify AC signatureVOMS: missing AC partsVOMS: problems while parsing information in ACVOMS: the DN in certificate: %s does not match that in trusted DN list: %sVOMS: the Issuer identity in certificate: %s does not match that in trusted DN list: %sVOMS: the attribute name is emptyVOMS: the attribute qualifier is emptyVOMS: the attribute value for %s is emptyVOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRINGVOMS: the format of policyAuthority is unsupported - expecting URIVOMS: the grantor attribute is emptyVOMS: the holder information in AC is wrongVOMS: the holder issuer name is not the same as that in ACVOMS: the holder issuerUID is not the same as that in ACVOMS: the holder name in AC is not related to the distinguished name in holder certificateVOMS: the holder serial number %lx is not the same as the serial number in AC %lx, the holder certificate that is used to create a voms proxy could be a proxy certificate with a different serial number as the original EEC certVOMS: the holder serial number is: %lxVOMS: the issuer information in AC is wrongVOMS: the issuer name %s is not the same as that in AC - %sVOMS: the only supported critical extension of the AC is idceTargetsVOMS: the serial number in AC is: %lxVOMS: the serial number of AC INFO is too long - expecting no more than 20 octetsVOMS: there is no constraints of trusted voms DNs, the certificates stack in AC will not be checked.VOMS: trust chain to check: %s VOMS: unable to determine hostname of AC from VO name: %sVOMS: unable to extract VO name from ACVOMS: unable to match certificate chain against VOMS trusted DNsVOMS: unable to verify certificate chainVOMS: unsupported time format format in AC - expecting GENERALIZED TIMEValid JobDescription foundValid for: %sValid for: Proxy expiredValid for: Proxy not validValid until: %sValue for 'link' element in mapURL is incorrectValue for maxJobsPerDN is incorrect numberValue for maxJobsRun is incorrect numberValue for maxJobsTotal is incorrect numberValue for maxJobsTracked is incorrect numberValue for maxScripts is incorrect numberValue for wakeupPeriod is incorrect numberValue of %s.%s is "%s"Value of 'count' attribute must be an integerValue of 'countpernode' attribute must be an integerValue of 'exclusiveexecution' attribute must either be 'yes' or 'no'Value of attribute '%s' expected to be a stringValue of attribute '%s' expected to be single valueValue of attribute '%s' has wrong sequence length: Expected %d, found %dValue of attribute '%s' is not a stringValue of attribute '%s' is not sequenceVariable name (%s) contains invalid character (%s)Variable name expectedVersion in Listen element can't be recognizedWARNING: The end time that you set: %s is before current time: %sWARNING: The start time that you set: %s is before current time: %sWaiting 1 minuteWaiting ends.Waiting for bufferWaiting for globus handle to settleWaiting for lock on file %sWaiting for lock on job list file %sWaiting for responseWaiting jobs: %iWaiting period is %d second(s).Waking upWarning: Failed listing files but some information is obtainedWarning: Failed removing jobs from file (%s)Warning: Failed to write job information to file (%s)Warning: Job not found in job list: %sWarning: Some jobs were not removed from serverWarning: Unable to open job list file (%s), unknown formatWarning: Using SRM protocol v1 which does not support space tokensWarning: mount point %s creation failed.Was expecting %s at the beginning of "%s"Watchdog (re)starting applicationWatchdog detected application exitWatchdog detected application exit due to signal %uWatchdog detected application exited with code %uWatchdog detected application timeout or error - killing processWatchdog exiting because application was purposely killed or exited itselfWatchdog failed to kill application - giving up and exitingWatchdog failed to wait till application exited - sending KILLWatchdog fork failed: %sWatchdog starting monitoringWe only support CAs in Globus signing policy - %s is not supportedWe only support X509 CAs in Globus signing policy - %s is not supportedWe only support globus conditions in Globus signing policy - %s is not supportedWe only support subjects conditions in Globus signing policy - %s is not supportedWhen specifying 'countpernode' attribute, 'count' attribute must also be specifiedWill %s in destination index serviceWill calculate %s checksumWill clean up pre-registered destinationWill download to cache file %sWill not map to 'root' account by defaultWill process cacheWill release cache locksWill remove %s on service %s.Will retry without cachingWill use bulk requestWill wait 10sWill wait around %isWiping and re-creating whole storageWon't use more than 10 threadsWorking area free size: %i GBWorking area is not shared among jobsWorking area is shared among jobsWorking area life time: %sWorking area total size: %i GBWriting back dynamic output file %sWriting the info the the BLAH parser log: %sWrong buffer sizeWrong defaultbuffer number in configurationWrong directory in %sWrong format of the "FreeSlotsWithDuration" = "%s" ("%s")Wrong language requested: %sWrong maxbuffer number in configurationWrong maxconnections number in configurationWrong maximal buffer sizeWrong number for timeout in plugin commandWrong number in defaultttl commandWrong number in jobreport_period: %d, minimal value: %sWrong number in jobreport_period: %sWrong number in maxjobdescWrong number in maxjobs: %sWrong number in maxrerun commandWrong number in wakeupperiod: %sWrong number of arguments givenWrong number of arguments!Wrong number of connectionsWrong number of files: %sWrong number of objects (%i) for stat from ftp: %sWrong number of parameters specifiedWrong number of threads: %sWrong option in %sWrong option in daemonWrong option in delegationdbWrong option in fixdirectoriesWrong option in logreopenWrong ownership of certificate file: %sWrong ownership of key file: %sWrong ownership of proxy file: %sWrong permissions of certificate file: %sWrong permissions of key file: %sWrong permissions of proxy file: %sWrong port numberWrong port number in configurationWrong service record field "%s" found in the "%s"Wrote request into a fileWrote signed EEC certificate into a fileWrote signed proxy certificate into a fileX509 Token handler is not configuredXACML authorisation request: %sXACML authorisation response: %sXACML request: %sXML config file %s does not existXML response: %sXML: %sYou are about to remove jobs from the job list for which no information could be found. NOTE: Recently submitted jobs might not have appeared in the information system, and this action will also remove such jobs.You may try to increase verbosity to get more information.Your identity: %sYour issuer's certificate is not installedYour proxy is valid until: %sZero bytes written to file[-]name[ADLParser] %s element must be boolean.[ADLParser] Benchmark is not supported yet.[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number.[ADLParser] CreationFlag value %s is not supported.[ADLParser] Missing Name element or value in ParallelEnvironment/Option element.[ADLParser] Missing or empty Name in InputFile.[ADLParser] Missing or empty Name in OutputFile.[ADLParser] Missing or wrong value in DiskSpaceRequirement.[ADLParser] Missing or wrong value in IndividualCPUTime.[ADLParser] Missing or wrong value in IndividualPhysicalMemory.[ADLParser] Missing or wrong value in IndividualVirtualMemory.[ADLParser] Missing or wrong value in NumberOfSlots.[ADLParser] Missing or wrong value in ProcessesPerSlot.[ADLParser] Missing or wrong value in SlotsPerHost.[ADLParser] Missing or wrong value in ThreadsPerProcess.[ADLParser] Missing or wrong value in TotalCPUTime.[ADLParser] Missing or wrong value in WallTime.[ADLParser] NetworkInfo is not supported yet.[ADLParser] NodeAccess value %s is not supported yet.[ADLParser] Only email Prorocol for Notification is supported yet.[ADLParser] Optional for %s elements are not supported yet.[ADLParser] Root element is not ActivityDescription [ADLParser] The NumberOfSlots element should be specified, when the value of useNumberOfSlots attribute of SlotsPerHost element is "true".[ADLParser] Unsupported EMI ES state %s.[ADLParser] Unsupported URL %s for RemoteLogging.[ADLParser] Unsupported internal state %s.[ADLParser] Wrong URI specified in Source - %s.[ADLParser] Wrong URI specified in Target - %s.[ADLParser] Wrong time %s in ExpirationTime.[ARCJSDLParser] Error during the parsing: missed the name attributes of the "%s" Environment[ARCJSDLParser] Not a JSDL - missing JobDescription element[ARCJSDLParser] RemoteLogging URL is wrongly formatted.[ARCJSDLParser] priority is too large - using max value 100[JDLParser] Attribute named %s has unknown value: %s[JDLParser] Environment variable has been defined without any equals sign.[JDLParser] JDL syntax error. There is at least one equals sign missing where it would be expected.[JDLParser] Lines count is zero or other funny error has occurred.[JDLParser] Semicolon (;) is not allowed inside brackets, at '%s;'.[JDLParser] Syntax error found during the split function.[JDLParser] This kind of JDL descriptor is not supported yet: %s[JDLParser]: Unknown attribute name: '%s', with value: %s[VO filter] Job log will be not send. %s.[filename ...][job ...][job description ...][resource ...]a file containing a list of jobIDsabort_callback: Globus error: %sabort_callback: startaction(%s) != requestactive_data is disabledadd_word failureall jobsauthorizedvo parameter is emptybad arguments to remotegmdirsbad directory for plugin: %sbad directory in plugin command: %sbrokerbuffer: error : %s, read: %s, write: %sbuffer: read EOF : %sbuffer: write EOF: %scache file: %scan't parse configuration line: %s %s %s %scannot create directory: %sceID prefix is set to %schecingBartenderURL: Response: %scheck readability of object, does not show any information about objectcheck_abort: have Globus errorcheck_abort: sending 426check_ftp: failed to get file's modification timecheck_ftp: failed to get file's sizecheck_ftp: globus_ftp_client_get failedcheck_ftp: globus_ftp_client_modification_time failedcheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size failedcheck_ftp: obtained modification date: %scheck_ftp: obtained size: %llicheck_ftp: timeout waiting for modification_timecheck_ftp: timeout waiting for partial getcheck_ftp: timeout waiting for sizeclass name: %sclientxrsl foundclientxrsl not foundclose failed: %sclosing file %s failed: %scommand to MyProxy server. The command can be PUT or GET. PUT/put/Put -- put a delegated credential to the MyProxy server; GET/get/Get -- get a delegated credential from the MyProxy server, credential (certificate and key) is not needed in this case. MyProxy functionality can be used together with VOMS functionality. command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or DESTROY. PUT -- put a delegated credentials to the MyProxy server; GET -- get a delegated credentials from the MyProxy server; INFO -- get and present information about credentials stored at the MyProxy server; NEWPASS -- change password protecting credentials stored at the MyProxy server; DESTROY -- wipe off credentials stored at the MyProxy server; Local credentials (certificate and key) are not necessary except in case of PUT. MyProxy functionality can be used together with VOMS functionality. --voms and --vomses can be used for Get command if VOMS attributes is required to be included in the proxy. computingconfig: %s, class name: %sconfiguration file (default ~/.arc/client.conf)configuration file not foundcontrolDir is missingconvert from specified input database format [bdb|sqlite]convert into specified output database format [bdb|sqlite]could not find end of clientxrslcould not find start of clientxrslcouldn't open file %scouldn't process VO configurationd2i_X509_REQ_bio faileddata_connect_retrieve_callbackdata_connect_retrieve_callback: allocate_data_bufferdata_connect_retrieve_callback: allocate_data_buffer faileddata_connect_retrieve_callback: check for buffer %udata_connect_store_callbackdata_retrieve_callbackdata_retrieve_callback: lost bufferdata_store_callback: lost bufferdatabase formatdebugleveldefaultlrms is emptydefine the requested format (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, emies:adl)delete_ftp: globus_ftp_client_delete faileddelete_ftp: globus_ftp_client_rmdir faileddelete_ftp: timeout waiting for deletedestination.next_locationdirdirnamedisplay all available metadatadisplay more information on each jobdndo not ask for verificationdo not print list of jobsdo not print number of jobs in each statedo not resubmit to the same resourcedo not submit - dump job description in the language accepted by the targetdo not transfer, but register source into destination. destination must be a meta-url.do not try to force passive transferdon't prompt for a credential passphrase, when retrieve a credential from on MyProxy server. The precondition of this choice is the credential is PUT onto the MyProxy server without a passphrase by using -R (--retrievable_by_cert) option when being PUTing onto Myproxy server. This option is specific for the GET command when contacting Myproxy server.download directory (the job directory will be created in this directory)echo: Unauthorizedempty argument to remotegmdirsempty input payloadempty next chain elementend of string encountered while processing type of subject name element #%derror converting number from bin to BIGNUMerror converting serial to ASN.1 formatescape character at end of stringexitfailed to process client identificationfailed while processing configuration command: %s %sfilefile %s is not accessiblefile namefile name too longfile node creation failed: %sfilenameforce download (overwrite existing job directory)force migration, ignore kill failureforcedefaultvoms parameter is emptyfork failedformatfrom the following endpoints:fsync of file %s failed: %sftp_check_callbackftp_complete_callback: error: %sftp_complete_callback: successftp_put_complete_callback: successftp_read_callback: failure: %sftp_read_callback: successftp_read_thread: Globus error: %sftp_read_thread: data callback failed - aborting: %sftp_read_thread: exitingftp_read_thread: failed to register Globus buffer - will try later: %sftp_read_thread: failed to release buffers - leakingftp_read_thread: for_read failed - aborting: %sftp_read_thread: get and register buffersftp_read_thread: too many registration failures - abort: %sftp_read_thread: waiting for buffers releasedftp_read_thread: waiting for eofftp_write_callback: failure: %sftp_write_callback: success %sftp_write_thread: data callback failed - abortingftp_write_thread: exitingftp_write_thread: failed to release buffers - leakingftp_write_thread: for_write failed - abortingftp_write_thread: get and register buffersftp_write_thread: waiting for buffers releasedftp_write_thread: waiting for eofgetISISList from %sgfal_close failed: %sgfal_closedir failed: %sgfal_listxattr failed, no replica information can be obtained: %sgfal_mkdir failed (%s), trying to write anywaygfal_mkdir failed: %sgfal_open failed: %sgfal_opendir failed: %sgfal_read failed: %sgfal_rename failed: %sgfal_rmdir failed: %sgfal_stat failed: %sgfal_unlink failed: %sgfal_write failed: %sglobalid is set to %sglobus_ftp_client_operationattr_set_authorization: error: %sgm-delegations-converter changes format of delegation database.gm-jobs displays information on current jobs in the system.gm-kick wakes up the A-REX corresponding to the given control file. If no file is given it uses the control directory found in the configuration file.group<:role>. Specify ordering of attributes Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/testers:Tester or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/testers:Tester Note that it does not make sense to specify the order if you have two or more different VOMS servers specifiedheadnode is set to %shostname[:port] of MyProxy serverhourhourshttp_get: start=%llu, end=%llu, burl=%s, hpath=%sidif the destination is an indexing service and not the same as the source and the destination is already registered, then the copy is normally not done. However, if this option is specified the source is assumed to be a replica of the destination created in an uncontrolled way and the copy is done like in case of replication. Using this option also skips validation of completed transfers.improper attribute for allowactvedata command: %simproper attribute for allowunknown command: %simproper attribute for encryption command: %sincoming message is not SOAPindexinit_handle: globus_ftp_client_handle_init failedinit_handle: globus_ftp_client_handleattr_init failedinit_handle: globus_ftp_client_handleattr_set_gridftp2 failedinit_handle: globus_ftp_client_operationattr_init failedinit_handle: globus_ftp_client_operationattr_set_allow_ipv6 failedinit_handle: globus_ftp_client_operationattr_set_delayed_pasv failedinmsg.Attributes().getAll() = %s inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %sinput does not define operationinput is not SOAPinputcheck checks that input files specified in the job description are available and accessible using the credentials in the given proxy file.instead of the status only the IDs of the selected jobs will be printedintinterfaceinterface is set to %sinterfacenameinvalid jobID: %sjob %s (will be) cleaned successfullyjob %s cancelled successfullyjob %s restarted successfullyjob idjob_description_file [proxy_file]jobdescription file describing the job to be submittedjobdescription string describing the job to be submittedkeep the files on the server (do not clean)levellibjvm.so does not contain the expected symbolslibjvm.so not loadable - check your LD_LIBRARY_PATHlist record: %slist size: %dlist the available pluginslist the available plugins (protocols supported)list_files_ftp: checksum %slist_files_ftp: failed to get file's modification timelist_files_ftp: failed to get file's sizelist_files_ftp: globus_ftp_client_cksm failedlist_files_ftp: globus_ftp_client_modification_time failedlist_files_ftp: globus_ftp_client_size failedlist_files_ftp: looking for checksum of %slist_files_ftp: looking for modification time of %slist_files_ftp: looking for size of %slist_files_ftp: no checksum information possiblelist_files_ftp: timeout waiting for cksumlist_files_ftp: timeout waiting for modification_timelist_files_ftp: timeout waiting for sizeload serial from %s failurelocal_pasv failedlocal_port failedlocal_spas failedlocalid is set to %slong format (more information)mail parameter is emptymake parent directories as neededmake_abort: leavingmake_abort: startmake_abort: wait for abort flag to be resetmalloc errormeta file %s is emptyminuteminutesmkdir failed: %smkdir_ftp: making %smkdir_ftp: timeout waiting for mkdirmodule name: %smoth: %snnamend: %snew_payload %snext chain element callednext element of the chain returned empty payloadnext element of the chain returned error statusnext element of the chain returned error status: %snext element of the chain returned invalid payloadnext element of the chain returned invalid/unsupported payloadnext element of the chain returned no payloadnext element of the chain returned unknown payload - passing throughnumbernumber of retries before failing file transferold_url new_urlonly get information about executon targets which support this job submission interface (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes)only select jobs whose status is statusstronly use this interface for submitting (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes)open: changing owner for %s, %i, %iopen: owner: %i %ioperate recursivelyoperate recursively up to specified levelorderoutpayload %soutput is not SOAPoutput requested elements (jobs list, delegation ids and tokens) to fileowner subject is set to %sp12 file is emptypassword destination=password sourcepathpath to config filepath to local cache (use to put file into cache)path to the VOMS server configuration filepath to the certificate file, it can be either PEM, DER, or PKCS12 formatedpath to the private key file, if the certificate is in PKCS12 format, then no need to give private keypath to the proxy filepath to the top directory of VOMS *.lsc files, only needed for the VOMS client functionalitypath to the trusted certificate directory, only needed for the VOMS client functionalityperform third party transfer, where the destination pulls from the source (only available with GFAL plugin)physical location to write to when destination is an indexing service. Must be specified for indexing services which do not automatically generate physical locations. Can be specified multiple times - locations will be tried in order until one succeeds.pkey and rsa_key exist!plugin for transport protocol %s is not installedplugin: checkdir: %splugin: checkdir: access: %splugin: checkdir: access: allowed: %splugin: closeplugin: open: %splugin: readplugin: writeprint a list of services configured in the client.confprint all information about this proxy.print all information about this proxy. In order to show the Identity (DN without CN as suffix for proxy) of the certificate, the 'trusted certdir' is needed.print delegation token of specified ID(s)print list of available delegation IDsprint main delegation token of specified Job ID(s)print selected information about this proxy.print state of the serviceprint summary of jobs in each transfer shareprint version informationprints info about installed user- and CA-certificatespriority is too large - using max value 100process: GETprocess: POSTprocess: PUTprocess: endpoint: %sprocess: factory endpointprocess: id: %sprocess: method %s is not supportedprocess: method is not definedprocess: method: %sprocess: operation: %sprocess: request=%sprocess: response is not SOAPprocess: response=%sprocess: subpath: %sproxy constraintsquery: %squeue name is set to %squeue: %sread information from specified control directoryreg_.size(): %dremove logical file name registration even if not all physical instances were removedremove proxyremove the job from the local list of jobs even if the job is not found in the infosysrequest to cancel job(s) with specified ID(s)request to cancel jobs belonging to user(s) with specified subject name(s)request to clean job(s) with specified ID(s)request to clean jobs belonging to user(s) with specified subject name(s)response: %sresubmit to the same resourcereverse sorting of jobs according to jobid, submissiontime or jobnamesave serial to %s failuresecondsecondssecondsselect broker method (list available brokers with --listplugins flag)select one or more computing elements: name can be an alias for a single CE, a group of CEs or a URLselect one or more registries: name can be an alias for a single registry, a group of registries or a URLselecting a computing element for the new jobs with a URL or an alias, or selecting a group of computing elements with the name of the groupservice messageserviceMail is emptyservice_urlsessionRootDir is missingsetting file %s to size %llushow URLs of file locationsshow jobs where status information is unavailableshow only description of requested object, do not list content of directoriesshow only jobs of user(s) with specified subject name(s)show only jobs with specified ID(s)show progress indicatorshow the CE's error log of the jobshow the original job descriptionshow the stderr of the jobshow the stdout of the job (default)shutdownskip jobs which are on a computing element with a given URLskip the service with the given URL during service discoverysort jobs according to jobid, submissiontime or jobnamesource destinationsource.next_locationstart_reading_ftpstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: globus_ftp_client_get failedstart_reading_ftp: globus_thread_create failedstart_writing_ftp: globus_thread_create failedstart_writing_ftp: mkdirstart_writing_ftp: mkdir failed - still trying to writestart_writing_ftp: putstart_writing_ftp: put failedstatusstatusstrstop_reading_ftp: aborting connectionstop_reading_ftp: exiting: %sstop_reading_ftp: waiting for transfer to finishstringsubmit directly - no resource discovery or matchmakingsubmit jobs as dry run (no submission to batch system)submit test job given by the numbersynch message: %ssystem retval: %dtest job runtime specified by the numberthe IDs of the submitted jobs will be appended to this filethe computing element specified by URL at the command line should be queried using this information interface (possible options: org.nordugrid.ldapng, org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies.resourceinfo)the file storing information about active jobs (default %s)this option is not functional (old GSI proxies are not supported anymore)timeout in seconds (default 20)treat requested object as directory and always try to list contenttruncate the joblist before synchronizingunable to load number from: %sundefined pluginundefined virtual plugin pathunknown (non-gridmap) user is not allowedunnamed groupunsupported configuration command: %surlurl [url ...]urllist %s contains invalid URL: %suse GSI communication protocol for contacting VOMS servicesuse HTTP communication protocol for contacting VOMS services that provide RESTful access Note for RESTful access, 'list' command and multiple VOMS server are not supported use NSS credential database in default Mozilla profiles, including Firefox, Seamonkey and Thunderbird. use NSS credential database in the Firefox profileuse passive transfer (off by default if secure is on, on by default if secure is not requested)use secure transfer (insecure by default)use specified configuration fileuse the jobname instead of the short ID as the job directory nameusername to MyProxy serverusername to MyProxy server (if missing subject of user certificate is used)vomsvoms<:command>. Specify VOMS server (More than one VOMS server can be specified like this: --voms VOa:command1 --voms VOb:command2). :command is optional, and is used to ask for specific attributes(e.g: roles) command options are: all --- put all of this DN's attributes into AC; list ---list all of the DN's attribute, will not create AC extension; /Role=yourRole --- specify the role, if this DN has such a role, the role will be put into AC /voname/groupname/Role=yourRole --- specify the VO, group and role; if this DN has such a role, the role will be put into AC wait failed - killing childwrong SSL lock requested: %i of %i: %i - %swrong boolean in %swrong number in %sxrootd close failed: %sxrootd open failed: %sxrootd write failed: %syyear: %s~DataPoint: destroy ftp_handle~DataPoint: destroy ftp_handle failed - retrying~DataPoint: failed to destroy ftp_handle - leakingProject-Id-Version: Arc Report-Msgid-Bugs-To: support@nordugrid.org POT-Creation-Date: 2017-12-13 22:31+0100 PO-Revision-Date: 2017-09-19 15:16+0200 Last-Translator: Oxana Smirnova Language-Team: Russian MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: ru X-Generator: Poedit 1.8.7.1 Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2); X-Poedit-KeywordsList: msg:2;IString:1;istring:1;FindNTrans:1,2 X-Poedit-Basepath: /home/oxana/CVSROOT/ARC1 X-Poedit-SearchPath-0: src %s Кэш : %s Очистка кэша отключена Очистка кэша включена Каталог с кэшем ссылок: %s Контрольный каталог: %s Удалённый кэш : %s Ссылка на удалённый кэш: %s Корневой каталог сессии: %s СУПО по умолчанию : %s очередь по умолчанию : %s Время жизни по умолчанию : %u Запустите 'arcclean -s Undefined' для удаления вычищенных задач из списка Запустите 'arcclean -s Undefined' для удаления оборванных задач из списка Для восполнения недостающих задач, запустите arcsync Используйте arcclean для удаления несуществующих задач Используйте arcclean для удаления полученных задач из списка Используйте arclean для удаления полученных задач из списка Исполняемый: верно Name: %s Sources.DelegationID: %s Sources.Options: %s = %s Источники: %s Targets.DelegationID: %s Targets.Options: %s = %s Назначения: %s %s DN сертификата: %s действителен до: %s DN эмитента: %s Серийный номер: %d %s: %s: %i %s: %s Служба доставки: %s Служба доставки: LOCAL Мест для отгрузки: %u Мест для срочной обработки: %u Мест для окончательной обработки: %u Мест для предварительной обработки: %u Подготовленных мест: %u Конфигурация квот: %s Состояние точки входа (%s): %s Состояние точки входа (%s) - STARTED или SUCCESSFULатрибуты:базовое ОИ (DN): %sфильтр: %s непределённых: %i %s -> %s (%s)--- ХОЛОСТАЯ ПРОГОНКА --- Контроль доступа: %s Аннотация: %s Argument: %s Информация об эталонных тестах: Каталог, содержащий журнальную запись вычислительного сервиса: %s URL точки входа для вычислений: %s Название интерфейса точки входа для вычислений: %sТребования к вычислительному ресурсу: Служба параметров доступа: %s Идентификаторы делегирования: Элемент DelegationID: %sВремя окончания: %sЗапись действительна на: %sЗапись действительна с: %s Environment.name: %s Environment: %sКод выхода: %d Код выхода успешного исполнения: %d Состояние здоровья: %s ID сервиса: %s Элемент Inputfile:Установленные рабочие среды: Ошибка задачи: %s Задача не требует эксклюзивного исполнения URL управления задачей: %s (%s) Задача требует эксклюзивного исполнения URL состояния задачи: %s (%s) Очередь присвоения: %sИмя: %s Код выхода для успешного исполнения не указан Доступ к узлу: входящий Доступ к узлу: входящий и исходящий Доступ к узлу: исходящийУведомить: Старый ярлык задания: %s Старый ярлык задачи:Требования к операционной системе:Другие сообщения: %s Другие атрибуты: [%s], %s Элемент Outputfile:Владелец: %s PostExecutable.Argument: %s PreExecutable.Argument: %s Время начала обработки: %sДоверенность действительна до: %sОчередь: %s Удалённое журналирование (по выбору): %s (%s) Удалённое журналирование: %s (%s)Запрошенное процессорное время: %s Запрошено вакансий: %iРезультаты должны быть востребованы до: %sРезультаты были удалены: %sТребования среды выполнения: URL информации о сервисе: %s (%s) URL каталога Грид-сесии: %s Специфическое состояние: %s URL каталога для загрузки: %s URL каталога для отгрузки: %s Состояние: %sСтандартная ошибка: %sСтандартный вход: %sСтандартный выход: %sЗасылающий клиент: %sЗаслана: %sВерсия клиента: %sИспользованное процессорное время: %sИспользование ОЗУ: %dИспользованное время: %sПоложение в очереди: %d [ тестировщик JobDescription ] [ Обрабатывается исходный текст ] [ egee:jdl ] [ emies:adl ] [ nordugrid:jsdl ] [ nordugrid:xrsl ] $X509_VOMS_FILE и $X509_VOMSES не настроены; Пользователь не указал расположение файла vomses; Расположение файла vomses не найдено в файле настроек пользователя; Файл vomses не обнаружен в ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, а также в соответствующих подкаталогах$X509_VOMS_FILE и $X509_VOMSES не настроены; Пользователь не указал расположение файла vomses; Расположение файла vomses не найдено в файле настроек пользователя; Файл vomses не обнаружен в ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, а также в соответствующих подкаталогах%d системы управления пакетной обработкой%d точки входа%d Совместные ресурсы%d правила присвоения%d из %d задач были перезапущены%d из %d задач были засланыОсталось %i попыток, повторная попытка в %s%li секунд(ы) с момента создания файла блокировки %s%s%s %s%s %s не может быть создан.%s (%s)%s > %s => неверно%s > %s => неверно: %s содержит нецифровые символы в номере версии.%s > %s => верноКласс %s не является объектом%s создансоздан каталог %sКаталог %s уже сушествует! Задача пропускается.%s не удалось%s не является поддерживаемым типом профиля%s не является каталогом%s не является объектом%s не допущен к исполнению действия %s на ресурсе %s Модуль %s сброшен на диск%s ошибка разборкиНе найден подключаемый модуль %s "%s".Запрос %s не выполненЗапрос %s к %s не выполнен, получен ответ: %sСбой запроса %s к %s. Отсутствует ожидаемый отклик.Запрос %s к %s не выполнен, неожиданный ответ: %s%s, версия %s%s->%s%s. Невозможно скопировать набор файлов%s: %i%s: %s%s: %s: Новая задача принадлежит %i/%i%s: %s:%i%s: Местонахождение в ACIX: %s%s: Добавление нового файла выхода %s: %s%s: Все процессы %s успешно завершились (%s)%s: Запрос файла с ленты %s в очереди SRM. Ожидание %i секунд%s: Невозможно преобразовать контрольную сумму файла %s в целое для %s%s: Невозможно преобразовать размер файла %s в целое для %s%s: Невозможно прочесть список входных файлов%s: Невозможно прочесть состояние - никаких комментариев, просто чистка%s: Перезапуск по требованию невозможен%s: Перезапуск по запросу невозможен - неподходящее состояние%s: Прерывание задачи по запросу пользователя%s: Прерывание не удалось (вероятно, задача закончилась) - всё равно удаляем%s: Прерывание, вероятно, удалось - удаление%s: Прерывание активных запросов%s: Прерывание остальных запросов DTR%s: Невозможно записать два разных файла %s и %s с одним LFN: %s%s: Проверка отгружаемого файла пользователя: %s%s: Проверочная сумма %llu подтверждена для %s%s: Очистка управляющей%s: Критическая ошибка для отгружаемого файла %s%s: Сбой запроса DTR %s на копирование файла %s%s: копирование DTR %s в %s не удалось, но не было обязательным%s: Удаление запроса в связи с внутренними неполадками%s: Файл назначения %s вероятно остался недописанным после предыдущего запуска A-REX, перезапись%s: Уничтожается%s: Повторяющееся имя файла в списке входных файлов: %s%s: Ошибка доступа к файлу %s%s: Ошибка при чтении файла %s%s: Чтение выходных файлов в списке пользователя %s%s: Не удалось создать файл grami%s: Не удалось извлечь информацию о локальном состоянии задачи%s: Не удалось получить номер из СУПО%s: Ошибка при чтении .local и изменении состояния, задачи и A-REX могут оказаться в противоречивом состоянии%s: Не удалось прочесть локальную информацию%s: Не удалось выполнить процедуру прерывания%s: Не удалось выполнить процедуру запуска%s: Не удалось установить права на исполнение%s: Не удалось оборвать исполняющуюся задачу%s: Не удалось очистить каталог сессии%s: Сбой вывода содержимого каталога назначения %s: %s%s: Не удалось подгрузить анализатор для правил допуска пользователей%s: Не удалось открыть файл %s на чтение%s: Сбой при разборе правил допуска пользователя%s: Не удалось прочесть динамический список выходных файлов в %s%s: Не удалось прочесть список входных файлов%s: Не удалось прочесть список входных файлов, невозможно очистить каталог сессии%s: Не удалось прочесть список выходных файлов%s: Не удалось прочесть список выходных файлов, невозможно очистить каталог сессии%s: Не удалось прочесть переработанный список входных файлов%s: Не удалось прочесть переработанный список выходных файлов%s: Сбой при запуске подключаемого модуля%s: Не удалось изменить идентификатор пользователя на %d/%d для чтения файла %s%s: Не удалось записать динамические выходные файлы обратно в %s%s: Не удалось записать список входных файлов%s: Не удалось записать список выходных файлов%s: Не удалось вывести список состояний выходных файлов %s: Не удалось записать изменившийся входной файл%s: Не удалось записать список выходных файлов: %s%s: Не удалось записать локальную информацию%s: Не удалось записать локальную информацию: %s%s: Сбой создания хранилища данных для дочернего процесса%s: Сбой создания области памяти для дочернего процесса%s: Сбой при запуске дочернего процесса%s: Сбой ожидания окончания дочернего процесса%s: У файла %s неверная контрольная сумма: %llu. Ожидалась %lli%s: Запрос файла %s в очереди SRM. Ожидание %i секунд%s: Обрабатываются файлы в списке %s%s: Недействительный запрос DTR%s: Неверный файл: %s слишком велик.%s: Неверная информация о размере/контрольной сумме (%s) для %s%s: Прерывание задачи происходит слишком медленно, но диагностика уже доступна. Будем считать, что прерывание произошло.%s: Прерывание задачи происходит слишком долго. Сбой.%s: Сбой исполнения задачи в неизвестном состоянии. Перезапуска не будет.%s: Обнаружен сбой задачи%s: Задача завершена%s:Задача уже завершилась. Действия по прерыванию не применяются %s: Задача устарела - удаляется оставшаяся информация%s: Задачу нельзя больше перезапускать%s: Поступил запрос на удаление задачи - удаляется%s: Задача слишком старая - удаляется%s: Не удалось направить задачу в СУПО%s: Засылка задачи в СУПО происходит слишком медленно, но идентификатор уже доступен. Будем считать, что засылка произведена.%s: Засылка задачи в СУПО происходит слишком долго. Сбой.%s: достигнут предел в %u скриптов СУПО - приостанавливается запуск/прерывание%s: К расположению %s нет удалённого доступа, пропускается%s: Подключаемый модуль в состоянии %s : %s%s: Сбой при исполнении подключаемого модуля%s: Сбой подключаемого модуля%s: Не удалось обработать описание задачи%s: Чтение выходных файлов в списке пользователя %s%s: Не удалось прочесть состояние новой задачи%s: Получен запрос DTR %s на копирование файла %s в состоянии %s%s: Получен запрос DTR с двумя удалёнными адресами!%s: Получен запрос на размещение файлов (%s)%s: Задача получена в плохом состоянии: %s%s: Удаляется %s из динамического списка выходных файлов %s%s: Сбой повторной обработки описания задачи%s: Некоторые процессы %s дали сбой%s: Состояние: %s после %s%s: Состояние: %s: всё ещё в процессе переноса данных%s: Состояние: ACCEPTED%s: Состояние: ACCEPTED: dryrun%s: Состояние: ACCEPTED: время на исполнение %s%s: состояние ACCEPTED: переход в PREPARING%s: Состояние: ACCEPTED: обрабатывается описание задачи%s: Состояние: CANCELING%s: Состояние: FINISHING%s: Состояние: INLRMS%s: состояние INLRMS: сообщение на выходе %i %s%s: Состояние: PREPARING%s: Состояние: SUBMIT%s: Эта задача, возможно, ещё исполняется - прерывание%s: Попытка удалить задание из несуществующего процесса размещения данных%s: Попытка удалить задание из активного процесса размещения данных%s: Два одинаковых назначения для выдачи: %s%s: Неизвестное правило допуска пользователя '%s'%s: Истекло время ожидания отгружаемых файлов%s: Пользователь отгрузил файл %s%s: контрольная сумма %s%s: размер %llu%s: состояние CANCELING: дочерний процесс завершился с кодом выхода %i%s: состояние CANCELING: собирается диагностика задачи%s: состояние CANCELING: запуск дочернего процесса: %s%s: состояние CANCELING: срок ожидания прерывания истёк%s: состояние: SUBMIT: дочерний процесс завершился с кодом выхода: %i%s: состояние SUBMIT: запуск дочернего процесса: %sожидается ')'ожидается ')'Использование атрибута 'action' в пользовательском описании задачи не допускаетсяОпция настроек 'control' теперь называется 'controldir'; пожалуйста, используйте новое названиеНеобходимо задать значение атрибута 'stdout', если задано значение атрибута 'join'Следующее назначениеСледующий источник(пусто)(нулевой)--same и --not-same не могут быть заданы одновременно: %d: %s: Сбой подготовки дочернего процесса для отчёта: Сбой запуска дочернего процесса для отчёта: Не задано имя регистратора: Средство измерения характеристик выдало ошибку %i: %s< %s<< %s> %sЗапрошен вычислительный ресурс, использующий интерфейс GridFTP, но необходимый %sподключаемый модуль не был подгружен. Устанавливали ли Вы этот модуль? %sЕсли нет, пожалуйста, установите пакет 'nordugrid-arc-plugins-globus'. %sНазвание пакета может зависеть от типа вашего дистрибутива.Ошибка запроса об удалении результатов работы задачиУспешный запрос об удалении результатов работы задачиОшибка запроса об обрыве исполнения задачиУспешный запрос об обрыве исполнения задачиОтвет на запрос о засылке не является сообщением SOAPОшибка запроса о состоянии службыУспешный запрос о состоянии службыОшибка запроса о запуске задачиУспешный запрос о запуске задачиСбой запроса о состоянииЗапрос о состоянии удалсяИнформация о расширении AC для VO Сертификат атрибута недействителен:ACIX ответил %sСоздан файл агрегированного сообщения APEL (%s).Создан файл сообщений APEL (%s).Запрос авторизации ARC: %sПолитика делегирования ARC: %sAREXClient не был создан надлежащим образом.Запрос на прерывание по причине ошибки в функции передачиЗапрос на прерывание по причине ошибки передачиИсполнение прервано!Сбой принятияСбой принятия: %sПринято соединение с %u.%u.%u.%u:%uПринято соединение с [%s]:%uПринято соединение к %u.%u.%u.%u:%uПринято соединение к [%s]:%uЗапрос на засылку новой задачи или изменение старой принят: %sМестонахождение списка доступа: %sПолучен маркер доступа для %s: %sОшибка активацииДобавление расположения: metadata: %sДобавление расположения: url: %sДобавляется вычислительная служба типа CREAMДобавляется значение FQAN: %sДобавляется FQAN/первичное значение: %sДобавляется значение группы VOMS: %sДобавляется значение первичной группы VOMS: %sДобавляется значение первичной роли VOMS: %sДобавляется значение роли VOMS: %sДобавляется значение виртуальной организации: %sДобавляется значение action-id: %sДобавляется значение цепочки сертификатов: %sДобавляется точка доступа '%s' с названием интерфейса %sДобавление точки входа (%s) в ServiceEndpointRetrieverДобавление точки входа (%s) в TargetInformationRetrieverТочка входа (%s) добавляется как к ServiceEndpointRetriever, так и к TargetInformationRetrieverДобавляется адрес: %s - %sДобавляется значение profile-id: %sДобавляется маркёр запроса %sДобавляется значение идентификатора ресурса: %sДобавляется значение владельца ресурса: %sДобавляется значение resoure-id: %sДобавляется маркёр пространства памяти %sДобавляется значение subject-id: %sДобавляется значение subject-issuer: %sДобавление к массовому запросуДобавляется значение virtual-organization: %sАдрес: %sАгрегированная запись (%s) успешно инициализирована.Агрегированная запись (%s) не существует, производится инициализация...Агрегированная запись (%s) успешно прочитана из файла.Агрегированная запись (%s) успешно сохранена.Квота на процессы (%u) использованаВсе запросы DTR для задачи %s завершеныСбой всех запросовВсе требования удовлетворены.Все полученные результаты неверныВыделено %u буферов по %llu байт каждый.Разрешить указанному клиенту получать учётные данные без пароля. Эта опция используется только командой PUT в отношении сервера Myproxy.Каталог %s уже существуетЧтения из источника уже в процессеЗапись в цель уже в процессеВозникла ошибка при составлении описания задачи для засылки на %sДругой процесс (%s) обладает блоком файла %sПараметры приложения:Политика ARC не может быть задана в профиле SAML2.0 XACMLArcAuthZ: не удалось инициализировать все PDP - этот процесс будет нерабочимАрхивирование запроса DTR %s, состояние %sАрхивирование запроса DTR %s, состояние ERRORАрхивирование записи Usage Record в файл %sВы уверены, что хотите вычистить задачи с отсутствующей информацией?Вы уверены, что хотите синхронизировать список локальных задач?Формирование записи журнала программы разбора BLAH: %sПриписан к VO %sПриписан к группе допуска %sПредполагается, что файл не найденПредполагаем, что пересылка уже отменена, либо оборваласьДля атрибута 'inputfiles' необходимы как минимум два значенияДля атрибута 'outputfiles' необходимы как минимум два значенияПопытка интерпретации относительного путь как URL - заменяется на абсолютныйПопытка соединиться с %s по порту %iАтрибут '%s' задан несколько разАтрибут 'join' не может использоваться, если заданы оба атрибута 'stdout' и 'stderr'Значение атрибута (1): %sЗначение атрибута (2): %sЗначение атрибута в субъекте: %sЗначение атрибута drain для sessionRootDir не является верным булевскимИмя атрибута (%s) содержит неверный символ (%s)Ожидается имя атрибутаАтрибуты 'gridtime' и 'cputime' не могут быть заданы одновременно.Атрибуты 'gridtime' и 'walltime' не могут быть заданы одновременноСбой проверки подлинности при исполнении инструкцийАдрес URL запроса подтверждения подлинности: %sСбой при проверке подлинностиДопущен через arc.pdpДопущен удалённой службой PDPДопущен через simplelist.pdp: %sДопущен через xacml.pdpСбой метода BN_new или RSA_newСбой метода BN_set_wordСоздан резервный файл (%s).Недопустимый URL в acix_endpointНедопустимый URL в deliveryService: %sНедопустимый URL в deliveryservice: %sНеприемлемая информация для проверки подлинности: %sНеверный формат контрольной суммы %sНедопустимое значение параметра доступа %s в правилах доступа к кэшуНеверное имя каталога: %sОбнаружен недопустимый формат в файле %s, строке %sНеверный формат отклика XML службы доставки на %s: %sНедопустимый формат отзыва XML от сервиса в %s: %sНеверный формат отклика XML: %sПлохая метка: "%s"Неверная логикаНеверная логика в %s - bringOnline завершился успешно, но запрос SRM не завершился успехом, либо ещё в процессеНеверная логика в %s - getTURLs завершился успешно, но запрос SRM не завершился успехом, либо ещё в процессеНеверная логика в %s - putTURLs завершился успешно, но запрос SRM не завершился успехом, либо ещё в процессеУказан неподходящий каталог для монтированияНедопустимое имя для исполняемого файла: Недопустимое имя для исполняемого файла: %sНедопустимое название среды выполнения: %sНедопустимое имя для stderr: %sНедопустимое имя для stdout: %sНедопустимое значение definedshare %sНедопустимое значение maxdeliveryНедопустимое значение maxemergencyНедопустимое значение maxpreparedНедопустимое значение maxprocessorнедопустимое число в maxtransfertriesНедопустимый приоритет: %sНедопустимое значение remotesizelimitНедопустимое значение speedcontrolНеверное число: %sОбнаружен неверный или устаревший формат в файле %s, строке %sНеверный путь к %s: Rucio поддерживает запись/чтение в /objectstores и лишь чтение в /replicasНеверная инструкция в строке настроек: %sНедопустимое значение debugНедопустимое значение logLevelНеверно сформированный pid %s в файле блокировки %sИнформация о СУПО:Информация о СУПО:Блок %s не обнаружен в файле настроек %sНе указан BlockNameОба URL должны содержать одинаковый протокол, адрес сервера и портОба элемента CACertificatePath and CACertificatesDir отсутствуют или пустыЗапрос %s на размещение на диске успешно выполнен, файл теперь в состоянии ONLINEЗапрос %s на размещение на диске всё ещё в очереди, следует подождатьНедопустимая строкаПодгружен планировщик %sПодключаемый модуль брокера "%s" не обнаруженСледующие планировщики доступны для %s:Невозможно создать буфер!Сбой регистрации буфераВ процессе отключения менеджера модулей обнаружены занятые подключаемые модули. Ожидается их завершение.Сертификат и закрытый ключ агентства не совпадаютКаталог сертификатов агентств CA %s не существуетИмя сертификационного агентства: %sУстановленные сертификаты CA:Тактовая частота процессора: %iМодель процессора: %sПроизводитель процессора: %sВерсия процессора: %sНе удалось создать запрос CREAM: %sCREAMClient не был создан надлежащим образом.Кэш %s: Свободное пространство %f GBДоступ к кэшу разрешён для %s пользователю с DN %sДоступ к кэшу разрешён для %s для ВО %sДоступ к кэшу разрешён для %s для ВО %s и группы %sДоступ к кэшу разрешён для %s для ВО %s и роли %sСвободное пространство кэша: %i GBОбщий объём пространства кэша: %i GBСбой в работе скрипта очистки кэшаНастройки кэша: %sДата создания кэша: %sПапка кэша: %sКэшированный файл %s не существуетНе обнаружен кэшированый файл %sКэшированный файл %s был удалён во время создания ссылки или копии, новая попыткаКэшированный файл %s был заблокирован во время создания ссылки или копии, новая попыткаКэшированный файл %s был изменён в последний момент, приостановка процесса на 1 секунду для предотвращения гонки Кэшированный файл %s был изменён во время создания ссылки или копии, новая попыткаКэшированный файл для %s не был обнаружен ни в локальном, ни в удалённом кэшахФайл кэша: %sМета-файл кэша %s пуст, будет воссозданМета-файл кэша %s, возможно, повреждён, будет воссозданНе обнаружен кэш файла %sКорневая папка кэша: %sCacheService: Нет допускаКопия в кэше ещё действительнаФайл в кэше заблокирован - попытаемся зановоФайл в кэше устарел, будет загружен зановоВычисленная проверочная сумма %s совпадает с проверочной суммой сервераВычисленная контрольная сума: %sВычисленная контрольная сумма передачи %s совпадает с контрольной суммой источникаВычисленная/указанная контрольная сумма %s совпадает с контрольной суммой, заявленной точкой назначения SRM %sСбой обратного вызоваВызов PrepareReading когда запрос был уже подготовлен!Вызов PrepareWriting когда запрос был уже подготовлен!Вызов ACIX с запросом %sВызывается http://localhost:60000/Echo используя ClientSOAPВызывается http://localhost:60000/Echo используя httplibВызывается https://localhost:60000/Echo используя ClientSOAPВызывается подключаемый модуль %s для опроса точки входа на %sНе удалось открыть каталог сертификатов CA: %s. Сертификаты .Не удалось открыть каталог сертификатов CA: %s. Сертификаты не будут проверены.Невозможно открыть каталог или файл VOMS: %s.Невозможно открыть каталог или файл VOMSES: %s.Нет доступа к файлу сертификата: %sНет доступа к файлу личного ключа: %sНет доступа к файлу доверенности: %sНевозможно добавить расширенное X509 расширение KeyUsage к новой доверенностиНевозможно добавить расширение X509 к доверенностиНе удалось вычислить дайджест открытого ключаНевозможно преобразовать расширение PROXY_CERT_INFO_EXTENSION в кодировке DER во внутренний форматНевозможно преобразовать структуру PROXY_CERT_INFO_EXTENSION из внутреннего формата в DERНевозможно преобразовать структуру keyUsage из формата кодировки DERНевозможно преобразовать структуру keyUsage из внутреннего формата в DERНевозможно преобразовать закрытый ключ в формат DERНевозможно преобразовать подписанный сертификат EEC в формат DERНевозможно преобразовать подписанную доверенность в формат DERНевозможно преобразовать подписанную доверенность в формат PEMНевозможно преобразовать строку в ASN1_OBJECTНевозможно скопировать расширенное расширение KeyUsageНевозможно скопировать имя субъекта выдающего агентства в доверенностьНевозможно создать BIO для разбора запросаНевозможно создать BIO для запросаНевозможно создать неформатированный ввод/вывод BIO для подписанного сертификата EECНевозможно создать неформатированный ввод/вывод BIO для подписанной доверенностиНе удалось создать объект PolicyStoreНе удалось создать XACML Action Не удалось создать атрибут XACML ActionAttribute: %s Не удалось создать XACML Resource Не удалось создать атрибут XACML ResourceAttribute: %s Не удалось создать атрибут XACML SubjectAttribute: %s Не удалось создать запрос XACML Невозможно создать новую переменную X509_NAME_ENTRY для запроса доверенностиНевозможно создать делегируемый документ для службы делегированию: %sНевозможно создать расширение для PROXY_CERT_INFOНевозможно создать расширение для keyUsageНевозможно создать расширение для доверенностиНевозможно создать функцию %sНевозможно создать функцию: FunctionId не существуетНевозможно создать компонент названия CN для доверенностиНе удалось создать объект SSL ContextНе удалось создать объект SSLНевозможно определить место установки. Используется %s. Если это не соответствует действительности, задайте, пожалуйста, переменную ARC_LOCATIONНевозможно скопировать серийный номер доверенностиНевозможно дублировать имя субъекта для запроса самозаверяющей доверенностиНе удалось динамически создать AlgFacrotyНе удалось динамически создать AttributeFactoryНе удалось динамически создать анализаторНе удалось динамически создать FnFactoryНе удалось динамически создать PolicyНе удалось динамически создать RequestНевозможно найти элемент с нужным пространством имёнНевозможно найти элемент с нужным пространством имёнНе обнаружен ArcPDPContextКаталог сертификатов СА не обнаружен ни в одном из стандартных мест: ~/.arc/certificates, ~/.globus/certificates, %s/etc/certificates, %s/etc/grid-security/certificates, %s/share/certificates, /etc/grid-security/certificates. Сертификат не будет подтверждён. Если каталог сертификатов СА существует, пожалуйста, укажите вручную его расположения с помощью переменной X509_CERT_DIR, или задайте cacertificatesdirectory в файле настроек клиента client.conf Невозможно найти XACMLPDPContextНе найден файл сертификата: %sНе удалось найти открытый ключ по имени: %sНевозможно найти сертификат агентства, выдавшего сертификат с субъектом %s и отпечатком: %luНе удалось обнаружить файл личного ключа: %sНе удалось найти закрытый ключ по имени: %sНе удалось обнаружить очередь '%s' в файле настроекКонфигурация серверов VOMS не обнаружена ни в одном из стандартных расположений: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomsesНе удалось создать запрос X509Невозможно создать объект правил доступаНевозможно извлечь SAMLAssertion SecAttr из контекста сообщенияНевозможно получить расширенное расширение KeyUsage из сертификата агентстваНевозможно извлечь политику из расширения PROXY_CERT_INFO_EXTENSIONНевозможно извлечь язык политики из расширения PROXY_CERT_INFO_EXTENSIONНе удалось определить тип сертификатаНе удалось получить делегированные параметры доступа: %s от службы делегирования:%sНе удалось получить делегированные параметры доступа %s от службы делегирования:%sНевозможно извлечь закрытый ключ выдающего агентстваНевозможно подгрузить объект интерпретатора ARC : %sНевозможно подгрузить объект запроса ARC: %sНевозможно подгрузить объект политикНевозможно подгрузить объект политик: %sНевозможно подгрузить объект запросаНевозможно открыть файл с описанием задачи: %sНе удалось открыть файл личного ключа %sНе удалось определить имя класса для AttributeFactory из настроекНе удалось определить имя класса для CombiningAlgorithmFactory из настроекНе удалось определить имя класса для FunctionFactory из настроекНе удалось определить имя класса для Policy из настроекНе удалось определить имя класса для Request из настроекНевозможно определить дату: %sНевозможно определить месяц: %sНевозможно обработать файл настроек %sНевозможно определить часовой пояс: %sНевозможно определить время: %sНе удалось прочесть закрытый ключ PEMСбой при чтении файла личного ключа PEM: не удалось расшифроватьСбой при чтении файла личного ключа PEM: не был введён парольНевозможно прочесть закрытый ключ PEM: возможно, введён неверный парольНе удалось прочитать файл сертификата: %sНе удалось прочитать сертификатНевозможно прочесть строку сертификата/ключаНевозможно прочесть информацию из файла состояния задачиНе удалось прочитать личный ключНевозможно задать элемент CN в доверенностиНевозможно задать имя выдающего агентства в доверенностиНе удалось задать закрытый ключНевозможно задать открытый ключ доверенностиНевозможно открыть на чтение файл для запроса BIOНевозможно задать серийный номер в доверенностиНевозможно задать срок годности доверенностиНевозможно задать номер версии в доверенностиНевозможно создать записываемый файл для BIO запросаНевозможно открыть на запись файл для неформатированного ввода/вывода подписанного сертификата EECНевозможно открыть на запись файл для неформатированного ввода/вывода подписанной доверенностиНевозможно подписать EECНеприемлемый URL: %sНевозможно выделить память для пути к файлу политик агентстваНевозможно преобразовать расширение PROXYCERTINFO в кодировке DER во внутренний форматНевозможно преобразовать запрос X509 из внутреннего формата в DERНе удалось создать контекст делегированияНе удалось создать ссылку для информации - проверьте, доступен ли подгружаемый модуль ARC LDAP DMCНе удалось создать ссылку для информации - проверьте, доступен ли подгружаемый модуль ARC LDAP DMCНевозможно удалить каталог %s: %sНевозможно удалить файл %s: %sНевозможно извлечь имя объекта из URL источникаНе удалось обнаружить функции LCAS в библиотеке %sНе удалось обнаружить функции LCMAPS в библиотеке %sНевозможно извлечь политику из расширения PROXYCERTINFOНевозможно извлечь язык политики из расширения PROXYCERTINFOНевозможно получить первый байт сертификата, чтобы определить его форматНевозможно получить первый байт сертификата, чтобы определить его форматНевозможно обработать URL %sНевозможно использовать адрес %sНе удалось разобрать файл настроек %s как XMLНе удалось разобрать файл настроек как XMLНевозможно загрузить библиотеку LCAS %s: %sНевозможно загрузить библиотеку LCMAPS %s: %sНевозможно загрузить подключаемый модуль %s для точки доступа %sНе удалось получить настройкиНе удалось открыть файл настроекНе удалось разобрать права доступа в строке настроекНе удалось разобрать строку настроекНе удалось обработать аргументы create в файле конфигурацииНе удалось извлечь адрес узла и/или номер порта из ответа на запрос EPSV/PASVНе удалось обработать аргументы mkdir в файле конфигурацииНе удалось разобрать or:and в строке настроекНе удалось разобрать user:group в строке настроекНе удалось прочесть файл настроекНевозможно прочесть файл настроек в %sНе удалось считать с источникаНевозможно прочесть локальное описание задачиНевозможно прочестьсписок назначений из файла %sНевозможно прочесть список входных файловНевозможно прочесть список адресов из файла %sНевозможно прочесть список выходных файловНевозможно прочесть список источников из файла %sНевозможно прочесть названия политикНеприемлемый URL: %sНе удалось определить группу в строке настроекНевозможно определить тип файла настроекНевозможно определить тип файла настроек в %sНе удалось определить пользователя в строке настроекНевозможно удалить ненужные файлыНе удалось переименовать файл %s: %sНевозможно сбросить вводНе удалось найти сервер %sНе удалось выставить метки подтверждения OpenSSLНевозможно получить статус файла: %s: %sНевозможно выполнить операцию stat для канала stdio %sНевозможно использовать URL %sНе удалось записать в цельПрерывание задач UNICORE не поддерживаетсяОтмена завершенаОтменяется DTR %s с источником: %s, назначением: %sОтмена активных передачПрерывание задачи %sПрерывание задачи: %sЗапрос о синхронизации отменяетсяНевозможно изменить владельца %s: %s Не удалось изменить права доступа к %s: %s Невозможно сравнить пустую контрольную суммуНевозможно перевести название модуля ARC в строку PythonНевозможно преобразовать ExecutionTarget (%s) в объект PythonНевозможно преобразовать JobDescription в объект PythonНе удалось преобразовать UserConfig в объект PythonНе удалось преобразовать настройки в объект PythonНе удалось преобразовать doc в объект PythonНе удалось преобразовать inmsg в объект PythonНевозможно перевести название модуля в строку PythonНе удалось преобразовать outmsg в объект PythonНевозможно преобразовать строку %s в целочисленное значение в строке %sНе удалось скопировать шаблон настроек (%s), т.к. это нестандартный файлНевозможно создать аргумент ExecutionTargetНевозможно создать аргумент JobDescriptionНе удалось создать аргумент UserConfigНе удалось создать аргумент конструктораНе удалось создать аргумент настроекНе удалось создать каталоги для журнального файла %s. Сообщения будут записываться в этот журналНевозможно создать каталог %s для жёстких ссылок задачНе удалось создать аргумент документацииНе удалось создать нагрузку httpНе удалось создать аргумент inmsgНе удалось реализовать класс PythonНе удалось создать аргумент outmsgНевозможно создать выход %s ни для одной задачиНевозможно создать вывод %s для задачи (%s): Недопустимый источник %sНе удалось создать преобразователь из /etc/resolv.confНевозможно извлечь имя узла используя gethostname()Невозможно определить hostname из gethostname() для автоматического создания ceID.Не удаётся определить расположение %s: %sНе удалось найти элемент в ответном сообщении SOAP:Не удалось обнаружить класс ARC ConfigНе удалось найти класс ARC ExecutionTargetНе удалось найти класс ARC JobDescriptionНе удалось обнаружить класс ARC MessageНе удалось найти класс ARC UserConfigКласс ARC XMLNode не найденНе удалось обнаружить объект MCC_StatusНе удалось обнаружить доверенность. Это приложение не работает без доверенности. Если Ваша доверенность хранится в нестандартном месте, пожалуйста, убедитесь, что в настройках клиента указан правильный путь. Если же Вы пока не создали доверенность, запустите 'arcproxy'!Не удалось найти содержание ответного сообщения SOAPНе обнаружен класс пользовательского планировщикаНе удалось найти файл по адресу %s, содержащий доверенность. Пожалуйста, убедитесь, что файл существует.Невозможно обнаружить локальный входной файл '%s' (%s)Не удалось найти класс сервисаНе удалось найти каталог с сертификатами агентств CA. Пожалуйста, задайте переменную среды X509_CERT_DIR, или значение cacertificatesdirectory в файле настроекНе удалось найти доверенность пользователя. Пожалуйста, задайте переменную среды X509_USER_PROXY, или значение proxypath в файле настроекНе удалось найти путь к открытому ключу пользователя. Пожалуйста, задайте переменную среды X509_USER_CERT, или значение certificatepath в файле настроекНе удалось найти закрытый ключ пользователя. Пожалуйста, задайте переменную среды X509_USER_KEY, или значение keypath в файле настроекНевозможно найти информацию о сервере VOMS %s из файлов vomsesИнформация об адресе сервера VOMS отсутствует в строке: %s"Невозможно получить сертификат атрибута (AC) или информацию об атрибутах с сервера VOMS: %s; Сообщение, возвращённое сервером VOMS: %s Ошибка доступа к словарю модуля ARCНевозможно обнаружить словарь пользовательского модуля планировщикаОшибка доступа к словарю модуляНевозможно обработать URL %sНевозможно обслужить локального пользователя %sНе удалось импортировать модуль ARCНе удалось импортировать модульНе удалось инициализировать доменное имя ARCHERY для запросаНе удалось инициализировать библиотеку winsockНевозможно создать ссылку на удалённое назначение. Приписанный URL не будет использованНевозможно создать ссылку на источник, который может измениться; будет сделана копияНе удалось открыть журнальный файл BLAH '%s'Не удалось открыть журнальный файл кэша %s: %s. Сообщения об очистке кэша будут записываться в этот журналНевозможно вывести представление XRSL: атрибут Resources.SlotRequirement.NumberOfSlots должен быть задан, если задан атрибут Resources.SlotRequirement.SlotsPerHost .Не удаётся разобрать целое значение '%s' для -%cНе удалось разобрать источник пароля %s. Формат должен быть source_type или source_type:data format. Поддерживаются следующие типы источников: int,stdin,stream,file.Не удалось разобрать выражение %s для источника пароля: формат должен быть type=sourceНе удалось разобрать тип источника пароля %s. Поддерживаются следующие типы источников: int,stdin,stream,file.Не удалось разобрать тип пароля %s. В настоящий момент поддерживаются значения 'key','myproxy','myproxynew' и 'all'.Невозможно интерпретировать схему!Не удалось разобрать TXT записи точек входа сервисов.Не удалось опросить TXT записи точек входа сервисов в DNSНе удаётся прочесть указанный файл, содержащий ярлыки задач: %sНевозможно удалить файл доверенности в %sНевозможно удалить файл доверенности в %s, потому что его там нетНевозможно переместить в корневую директорию или из неёНевозможно переименовать в идентичный URLНевозможно определить статус локального входного файла '%s'Невозможно перейти к группе (%s)Невозможно переключить на основную группу для пользователя (%s)Невозможно перейти к пользователю (%s)Невозможно записать два разных файла %s и %s с одним LFN: %sНедопустимо одновременное использование нескольких каталогов сеансов и remotegmdirsНевозможно использовать заявленную опцию --sizeНевозможно записать ярлыки задач в файл (%s) Невозможно записать ярлык задачи (%s) в файл (%s) Невозможно записать ярлыки задач в файл (%s)Не удалось получить выходные файлы задачи (%s) - невозможно определить URL для записиВозможности:Подцепляется %s для задачи %sПричина сбоя не установлена - выбирается случайная копияОбщий файл настроек отсутствует в обычном месте: /etc/arc.conf Используйте переменную среды ARC_CONFIG для необычных местТип сертификата: %dСрок действия сертификата %s уже истёкСрок действия сертификата %s истечёт через %sСертификат и ключ ('%s' и '%s') не обнаружены ни в одном из расположений: %sУ сертификата нет ячейкиСертификат в формате DERСертификат в формате PEMСертификат в формате PKCSФормат сертификата неизвестенСертификат содержит неизвестное расширение с численным идентификатором %u и именем субъекта %sСбой сбора информации о сертификатеИнформация о сертификате:Сертификат выдан: %sНедопустимый запрос сертификатаИспользуемый сертификат: %sОшибка проверки сертификата: %sСертификат не подтверждёнУспешное подтверждение сертификатаСертификат с серийным номером %s и субъектом "%s" отозванСрок действия сертификата с субъектом %s истёкПуть к сертификату/доверенности не заданСертификат: %sНомер цепочки сертификатов %dНе удалось настроить цепочку/иЗапрос: %sChangeActivityStatus: EPR не содержит JobIDChangeActivityStatus: невозможно принять делегированиеChangeActivityStatus: невозможно возобновить задачуChangeActivityStatus: невозможно обновить параметры доступаChangeActivityStatus: недопустимое изменение состояния: с %s/%s на %s/%sChangeActivityStatus: отсутствует элемент NewStatusChangeActivityStatus: не найден ActivityIdentifierChangeActivityStatus: задача не найдена: %sChangeActivityStatus: не найдено соответствия старому состоянию A-REXChangeActivityStatus: не найдено соответствия старому состоянию BESChangeActivityStatus: запрос = %sChangeActivityStatus: ответ = %sПроверкаПроверка: поиск метаданных: %sПроверка: получена задержка доступа: длинная (NEARLINE)Проверка: получена задержка доступа: короткая (ONLINE)Проверка: получена контрольная сумма: %sПроверка: получено время изменения: %sCheck: получено время изменения %sПроверка: получен размер %lluПроверка: получен размер: %lliПроверка %sПроверяется URL выданный SRM: %sПроверка совпадения для '%s'Кэш проверяется сноваПроверка прав доступа к кэшу: DN: %sПроверка прав доступа к кэшу: ВО: %sChecking cache permissions: атрибуты VOMS: %sПроверка файла %sПроверка существования %sПроверка отложенных точек входа на предмет повторного опросаПроверяется копия %sПроверка наличия файла-источникаПроверка отгружаемого файла пользователя: %sНесовпадение контрольной суммНесовпадение вычисленной контрольной суммы %s и контрольной суммы источника %sНесовпадение между вычисленной контрольной суммой %s и контрольной суммой, выданной сервером %sНесовпадение вычисленной контрольной суммы %s и контрольной суммы источника %sНесовпадение между вычисленной/указанной контрольной суммой %s и контрольной суммой, заявленной точкой назначения SRM %sНесовпадение контрольной суммы, указанной в метаданных (%s:%s), с вычисленной (%s)Контрольная сумма не вычисленаТипы контрольной суммы в SRM (%s) и вычисленной/указанной контрольной суммы (%s) различаются, сравнение невозможноТип контрольной суммы источника отличается от вычисленной, сравнение невозможноТип контрольной суммы на сервере отличается от запрошенного, сравнение невозможноПотомок завершил работуВыбранный для связи ISIS: %sНазвание класса: %sСбой очисткиУдаляется задача %sУдаляется задача: %sОчистка результатов задач BES не поддерживаетсяУдаление задач UNICORE не поддерживаетсяОчистка после сбоя: уничтожается %sОтсутстует точка входа в клиентскую цепьОтсутствует точка входа в клиентскую цепьОтсутствует точка входа в клиентскую цепь.Подгружены клиентские компоненты цепи сообщенийВерсия клиента: nordugrid-arc-%sСоединение закрытоУспешное прекращениеЗакрывается канал (перечисление)Канал закрывается (получение)Прерывание канала (получение) в связи с локальной ошибкой чтения :%sЗакрывается канал (загрузки) в связи с локальной ошибкой: %sЗакрывается канал (запись)Прерывание канала (запись) в связи с ошибкой: %sПрекращение связиВозможно, был сбой про закрытииЗакрывается канал чтенияЗакрывается канал передачиПолученная ошибка: %sСобирается информация GLUE2 для точки входа EMI-ES.Собирается информация о задачах (задачи на A-REX)Команда ABORКоманда ALLO %iКоманда CDUPКоманда CWD %sКоманда DCAU: %i '%s'Команда DELE %sКоманда EPRTКоманда EPSV %sКоманда ERET %sКоманда LIST %sКоманда MDTM %sКоманда MKD %sКоманда MLSD %sКоманда MLST %sКоманда MODE %cКоманда NLST %sКоманда NOOPКоманда OPTSКоманда OPTS RETRКоманда PASVКоманда PBZS: %sКоманда PORTКоманда PROT: %sКоманда QUITКоманда REST %sКоманда RETR %sКоманда RMD %sКоманда SBUF: %iКоманда SIZE %sКоманда SPASКоманда STOR %sКоманда TYPE %cКоманда USER %sОтсутствует команда для модуля authPluginОтсутствует команда для модуля localCredОтсутствует команда в модуле helperUtilityПосылается инструкцияКоманда: %sКомпонента %s(%s) не может быть созданаДля компонента не задан атрибут IDДля компонента не задан атрибут nameДля компонента %s(%s) отсутствует атрибут ID следующей целиВычислительный сервис:Вычислительный сервис: %sЦель ComputingShare (%s) не соответствует выбранной очереди (%s)Цель ComputingShare (%s) явно отклоненаНе определён параметр ComputingShareName атрибута ExecutionTarget (%s)Класс Config не является объектомНастройки (%s) подгруженыОшибка настройкиОшибка конфигурации. Отсутствует обязательный элемент "Endpoint".Ошибка конфигурации. Отсутствует обязательный элемент "Expiration".Ошибка конфигурации. Отсутствует обязательный элемент "Period".Ошибйка настроек. Retry: "%s" не является допустимым значением. Будет использовано значение по умолчанию.Создан шаблонный файл настроек (%s)Невозможно прочесть файл настроекФайл настроек испорчен - слишком короткое название блока: %sНе указан файл настроекНе указан файл настроек в ConfigBlockКорневой элемент настроек не является Раздел настроек [vo] не содержит имени. Убедитесь в наличии опций name= или vo= .Соединение: Время ожидания проверки подлинности истекло после %d мсСоединение: Время ожидания соединения истекло после %d мсСоединение: Ошибка проверки подлинности: %sСоединение: Сбой соединения с: %sСоединение: Сбой инициализации идентификатора информации проверки подлинности: %sСоединение: Не удалось инициализировать ссылку: %sСоединение с %s:%iСоединяемся со службой доставки на %sСоединение с %s: %sУспешное соединение с ISIS (%s), получение списка сервисов ISIS.Устанавливается связь с сервером VOMS (по имени %s): %s по порту: %sКонтрольное соединение (наверное) закрытоЭлемент Control должен присутствоватьПреобразование не удалось: %sЗадан способ преобразования CREAMЗадан способ преобразования DIRECTЗадан способ преобразования EMIЗадан способ преобразования SUBJECTПреобразование в действие CREAM - пространство имён: %s, операция: %sСбой копирования: %sКопирование с dlcloseНевозможно установить блокировку на мета-файл %sНе удалось соединиться со службой %s: %sНе удалось преобразовать входную информацию!Невозможно преобразовать нагрузку!Не удалось преобразовать значение атрибута slcs (%s) в файле настроек в URL (%s)Не удалось создать PayloadSOAP!Невозможно создать ссылку на файл блокировки %s, потому что она уже существуетНевозможно создать файл блокировки %s, потому что он уже существуетНе удалось создать временный файл "%s"Не удалось создать временный файл: %sНевозможно определить тип файла настроек, или же он пустНевозможно определить имя узла используя gethostname()Не удалось определить каталог сессии из имени файла %sНе удалось определить версию сервераНе удалось обнаружить подходящую службу доставки, вынужденно используется локальная пересылкаНе удалось найти подгружаемый модуль %s (%s)Невозможно найти подгружаемые модули по имени %s и %s (%s)Не удалось найти дескриптор подгружаемого модуля по имени %sНевозможно найти дескрипторы подгружаемых модулей по имени %s и %sНевозможно обработать точку входа %sНе удалось подгрузить настройки (%s)Невозможно найти модуль %s в следующих местах:Невозможно создать новый запрос пересылки: %s: %sНе удалось получить информацию об источнике: %sНевозможно установить соединие LDAP с %sНевозможно открыть файл %s для чтения: %sНевозможно открыть каталог с журналами "%s": %sНевозможно открыть выходной каталог "%s": %sНевозможно прочесть архивный файл %s для файла журнала задач %s (%s), создаётся новая запись Usage RecordНе удалось определить исходный источник %s: %sНе удалось определить исходный источник %s: время истеклоНе удалось задать время ожидания соединения LDAP (%s)Не удалось задать версию протокола LDAP (%s)Не удалось задать время ожидания ответа сервера LDAP (%s)Не удалось определить состояние файла %s: %sНе удалось подтвердить достоверность сообщения!Не удалось использовать сертификат: %sНевозможно разобрать эталонный XML: %sНевозможно подтвердить доступность списков отзыва сертификатов (CRL)Страна: %sСоздание элемента XML RegEntryCreateActivity закончилось успешноCreateActivity: Сбой при принятии делегированияCreateActivity: Не удалось создать новую задачуCreateActivity: Не удалось создать новую задачу: %sCreateActivity: достигнут максимальный предел общего количества задачCreateActivity: Описание задачи не найденоCreateActivity: запрос = %sCreateActivity: ответ = %sСоздан ключ RSA, теперь обрабатывается запросСоздаётся клиент CREAMСоздаётся клиент UNICOREСоздаётся клиент для службы Argus PDPСоздание клиента SOAP для делегацииСоздаётся клиент HTTPСоздаётся клиент pdpserviceСоздаётся клиент SOAPСоздаётся клиент A-REXСоздаётся клиент EMI ESСоздание и отправка запроса об информации ISIS на %sСоздание и отправка запроса о состоянии службыСоздание и отправка запроса о запуске задачиСоздание и отправка запроса о состоянииСоздание и отправка запроса в каталог ресурсовСоздание и отправка запроса об удалении результатов работы задачи на %sСоздание и отправка запроса об удалении результатов работы задачи на %sСоздание и отправка запроса на получение описания задачи на %sСоздание и отправка запроса о состоянии задачи на %sСоздание и отправка запроса о просмотре задачи на %sСоздание и отправка запроса о миграции задачи на %sСоздание и отправка запроса об уведомлении о задаче на %sСоздание и отправка запроса о регистрации задачиСоздание и отправка запроса о перезапуске задачи на %sСоздание и отправка запроса о возобновлении задачи на %sСоздание и отправка запроса о начале задачиСоздание и отправка запроса об исполнении задачи на %sСоздание и отправка запроса о приостановке задачи на %sСоздание и отправка запроса об уведомлении на %sСоздание и засылка запросаСоздание и отправка запроса об удалении результатов работы задачиСоздание и отправка запроса о просмотре списка задачСоздание и отправка запроса о возобновлении задачиСоздание и отправка запроса о прерывании задачиСоздание и отправка запроса о состоянии службы на %sСоздание и отправка запроса информации о службе на %sСоздаётся и отправляется запрос на засылку к %sСоздание и отправка запроса о прерывании задачи на %sСоздаётся буфер: %lli x %iСоздаётся интерфейс клиентаСоздание цепи на стороне клиентаСоздание делегированияСоздание делегируемых параметров доступа для службы делегирования ARCСбой создания делегирования для делегирования CREAMСоздание делегирования для службы делегирования CREAMСбой создания делегирования для службы делегирования CREAMСоздается директория %sСоздается директория %sСоздание цепи на стороне сервисаСрок действия параметров доступа истекает к %sСбой обработки параметров доступа: %sПараметры доступа не инициализированыПараметры доступа сохранены во временном файле %sСбой обработки критического атрибута VOMSКритическая ошибка для отгружаемого файла %sИмя ВО текущей задачи: %sТекущие задачи в системе (от PREPARING до FINISHING) на DN (%i записей)Текущая передача НЕ СОСТОЯЛАСЬ: %sТекущая передача завершенаDB_OLD_VERSION: База данных не может быть открыта без предварительного обновления версии.Инструкция DCAU не прошлаИнструкция DCAU не прошла: %sDN %s не совпадает с %sВыделенное имя %s для URL %s кэшировано, и действительно до %sВыделенное имя %s для URL %s кэшировано, но уже просроченоDN: %sЗапрос DTR %s отменёнЗапрос DTR %s не может быть прерванСбой запроса DTR %s: %sЗапрос DTR %s успешно завершёнDTR %s завершился в состоянии %sDTR %s запросил прерывание, но активные передачи отсутствуютЗапрос DTR %s ещё в процессе (передано %lluB)Запрос DTR %s уже был прерванDTR готов к пересылке, переводится в очередь на доставкуDTRGenerator не запущен!Запросы DTR для задачи %s всё ещё исполняютсяНе удалось создать дочерний демон: %sКанал передачи данных (получение) %i %i %iКанал передачи данных (запись) %i %i %iКанал передачи данных подсоединён (перечисление)Канал передачи данных подсоединён (получение)Канал передачи данных подсоединён (запись)Канал передачи данных: %d.%d.%d.%d:%dКанал передачи данных: [%s]:%dПрерван цикл размещения данныхПередача данных прерванаПередача данных прервана: %sДанные уже записаны в кэщПоследние записи журнала DataDelivery: %sDataDelivery: %sDataMove::Transfer: контрольная сумма для %s не будет вычисленаDataMove::Transfer: используется заданная контрольная сумма %s:%sDataMove::Transfer: будет вычислена контрольная сумма для %sDataMover: циклDataMover: закончились попытки поиска назначений - завершениеDataMover: не запрошено повторных попыток, выходDataMover: закончились попытки поиска источника - завершениеDataMover::Transfer : запуск нового потокаDataMover::Transfer: попытка стереть/перезаписать назначение: %sDataPointGFAL::write_file получил на входе адрес %d и сдвиг %d, проводится поискDataPointXrootd::write_file получил адрес %d и сдвиг %d, проводится поискПроцесс DataStagingDelivery завершился с кодом %iВыгрузка модулейДлительность по умолчанию (процессорная): %sХранилище по умолчанию: %sПланировщик по умолчанию (%s) недоступен. При использовании %s планировщик должен быть указан явным образом (опция -b).Длительность по умолчанию (по часам): %sПо умолчанию: %sСрок действия делегируемых параметров доступа истек: %sНе удалось делегирование доверенностиСбой в DelegateCredentialsInitСбой в DelegateProxyДелегированные параметры доступа от службы делегации: %sОтличительные признаки делегированных параметров доступа: %sID делегирования: %sАвторизация на делегирование не выданаАвторизация делегирование пройденаСбой запроса делегирования getProxyReqОбработчик делегирвания не настроенЗавершена обработка делегирования с ролью делегатаЗапущен обработчик делегирования с ролью делегатаЗапущен обработчик делегирования с ролью поручителяСбой запроса делегирования putProxyНеподдерживаемая роль делегирования: %sСлужба делегирования: %sСбой делегирования службе делегирования ARC Сбой делегирования службе делегирования GridsiteНеподдерживаемый тип делегирования: %sDelegationStore: сбой удаления процессом PeriodicCheckConsumers устаревшего делегирования %s - %sDelegationStore: сбой возобновления итератора процессом PeriodicCheckConsumersDelegationStore: TouchConsumer не смог создать файл %sОшибка удаленияУдалён %sУдалён, но остались копии в %sСлужба доставки получила новый запрос DTR %s с источником %s и назначением %sСлужба доставки в %s может копировать из %sСлужба доставки в %s может копировать в %sОтсутствует URL назначенияНеподдерживаемый URL назначения: %sНедействительный URL назначения: %sФайл назначения записан в кэшНедопустимый URL цели.Назначение не является указателем, пропускается регистрация копииНазначение неготово, следующая попытка через %u секНазначение: %sУничтожение JVMОписатель уничтожаетсяДеструктор с dlclose (%s)Каталог %s допускается для службы %sУспешно удалён каталог %sНе удалось вывести список каталогаКаталог доверяемых агентств не указан/найден; в качестве такового используется текущий путьРазмер директории превышает %i файлов, придётся делать несколько запросовРазмер директории слишком велик для распечатки в одном запросе, придётся делать несколько запросовКаталог: %sRegistrar игнорируется, так как элемент "URL" отсутствует, либо пуст.Отключение: Время ожидания прерывания истекло после %d мсОтключение: Время ожидания отключения истекло после %d мсОтключение: Время ожидания закрытия данных истекло после %d мсОтключение: Сбой прерывания - игнорируется: %sОтключение: Сбой отключения - игнорируется: %sОтключение: Сбой уничтожения ссылки: %s. Невозможно справиться с таким положением.Отключение: Сбой выхода - игнорируется: %sОтключение: Время ожидания выхода истекло после %d мсОтключение: ссылка globus застряла.Отключение: ссылка уничтожена.Отключение: ждём пока ссылка globus устаканитсяСортировка с использованием пользовательского python-скрипта планировщикаОтсылка пустого агрегированного/синхронизационного сообщения не производится.Нет поддержки предварительного бронированияНе поддерживает групповую засылкуУпреждение не поддерживаетсяПроизводится запрос CREAMПроизводится запрос EMIЗагружен %sЗагружен файл %sЗагрузчик запущенЗагружается задача: %sКонец простоя: %sНачало простоя: %sРаспечатка описания задачи оборвана: Невозможно подгрузить планировщик %sВ LFC обнаружена идентичная копия: %sEACCES Ошибка открытия файла блокировки %s: %sEEXIST: были заданы DB_CREATE и DB_EXCL, и база данных существует .EINVALНе удалось создать запрос EMI: %sEMIES:CancelActivity: задача %s - %sEMIES:CreateActivity успешно завершеноEMIES:CreateActivity: достигнут предел общего числа задачEMIES:CreateActivity: описание задачи не обнаруженоEMIES:CreateActivity: запрос = %sEMIES:CreateActivity: ответ = %sEMIES:CreateActivity: обнаружено слишком много описаний задачEMIES:GetActivityInfo: задача %s - не удалось получить информацию по формату GLUE2EMIES:GetActivityStatus: задача %s - %sEMIES:NotifyService: задача %s - %sEMIES:PauseActivity: задача %s - %sEMIES:RestartActivity: задача %s - %sEMIES:ResumeActivity: задача %s - %sEMIES:WipeActivity: задача %s - %sНедоступен сервис EMIRegistry (%s).ENOENT: Файл или каталог не существуют, либо указан несуществующий файл re_sourceСбой EPSVСбой EPSV: %sОШИБКА: Обрыв распечатки описания задачи, так как подходящих ресурсов не найденоОШИБКА: не удалось получить информациюОШИБКА: Не удалось получить информацию через следующие точки входа:ОШИБКА: Сбой записи информации о задаче в файл (%s)ОШИБКА: Обрыв засылки задачи, так как ни один из ресурсов не предоставил информациюОШИБКА: Одна или несколько задач не были запущены.ОШИБКА: Обрыв засылки теста, так как подходящих ресурсов не найденоОШИБКА: не удалось подгрузить планировщик %sERROR: файл настроек VOMS %s содержит слишком длинную строку. Максимально допустимая длина: %i знаков.ERROR: файл настроек VOMS %s содержит слишком много строк. Максимально допустимое количество: %i.ERROR: строка настройки VOMS содержит избыточное число элементов. Ожидается 5 или 6. Строка: %sERROR: сбой чтения файла %s при сканировании настроек VOMS.ERROR: каталог содержит слишком много уровней для сканирования настроек VOMS. Максимально допустимое число уровней: %i.ОШИБКА:%sES:CreateActivity: Не удалось создать новую задачу: %sВызван 'Process' EchoService (Python)Вызван Python-конструктор EchoServiceВызван Python-деструктор EchoServiceEchoService (python) получил: %s EchoService (Python) содержит приставку %(prefix)s и суффикс %(suffix)sEchoService (python) request_namespace: %sЗапуск теста потоков службы EchoService (python)Запуск теста потоков службы EchoService (python), итерация %(iteration)s %(status)sЭлемент "%s" в профиле игнорируется: значение атрибута "inidefaultvalue" не может быть задано, когда не заданы значения атрибутов "inisections" и "initag".Элемент "%s" в профиле игнорируется: значение атрибута "inisections" не может быть пустой строкой.Элемент "%s" в профиле игнорируется: значение атрибута "initag" не может быть пустой строкой.Элемент "%s" в профиле игнорируется: значение атрибута "initype" не может быть пустой строкой.Проверка соответствия элемента схеме GLUE2 не прошла: %sFileCache возвратил пустое имя файлаПустая нагрузка на входе!Пустое исходное описание задачиПустая нагрузка!Пустой сборщик регистрацийПустой ответПустая строкаЗашифрован: %sНе найдено окончание комментарияНе обнаружено конца строки в двойных кавычкахНе обнаружено конца строки в одиночных кавычкахНе обнаружено конца строки, выделенной пользовательским ограничителем (%s)Информация о точке входа:Запись в EGIIS не содержит одного или нескольких атрибутов 'Mds-Service-type', 'Mds-Service-hn', 'Mds-Service-port' и/или 'Mds-Service-Ldap-suffix'Ошибка доступа к кэшированному файлу %s: %sОшибка доступа к файлу %sСбой при добавлении интерфейса связи в %s. Возможно, уже запущен другой процесс A-REX.Сбой при добавлении интерфейса связи в %s. Возможно, отсутствует доступ к директории.Ошибка при создании кэшаОшибка про создании кэша. Возможно, остались старые блокировки.Ошибка создания каталога %s: %sОшибка создания файла блокировки %s: %sОшибка создания необходимых каталогов для %sОшибка создания требуемых каталогов: %sОшибка создания временного файла %s: %sОбнаружена ошибка при разборе сертификата атрибутаОшибка в связи с истечением срока годности предоставленных параметров доступаСбой операции %s по отношению к сервису EMIRegistry %s Ошибка при проверке файла. Невозможно выполнить операцию stat для файла %s: %sОшибка при сверке: размер локального файла %llu не соответствует размеру файла-источника %llu для файла %sОшибка при регистрации в сервис ISIS %sСбой в процессе извлечения ярлыков задач: возможно, не все ярлыки извлеченыОшибка проверки профиляОшибка проверки профиляОшибка BDB: %sОшибка BDB: %s: %sОшибка SQLite: %sОшибка SQLite: %s: %sОшибка получения информации от statvfs для пути %s: %sНе удалось получить список файлов (в list)Ошибка при обработке кэша, попытаемся без кэшированияОшибка кэшированияОшибка в файле блокировки %s, несмотря на то, что создание ссылки прошло без сбоевОшибка при инициализации хранилища X509Сбой при создании базы данных делегирования в %s. Возможно, отсутствует доступ к директории. Возвращена ошибка %s.Ошибка создания ссылки на файл из кэша из %s.Не удалось связать временный файл %s с файлом блокировки %s: %sОшибка перечисления файла блокировки %s: %sОшибка загрузки сгенерированных настроекОшибка поиска атрибутов мета-файла кэша %s: %sОшибка поиска маркёров пространства памяти, соответствующих описанию %sНомер ошибки в контексте хранилища: %iОшибка при открытии файла блокировки %s при предварительной проверке: %sОшибка открытия мета-файла %sОшибка открытия мета-файла для записи %sОшибка разбора переопределённого системой атрибута executables.Ошибка связи со службой доставки на %s: %s: %sОшибка при чтении файла %sОшибка чтения информации из файла %s:%sОшибка чтения файла блокировки %s: %sОшибка чтения каталога журналов "%s": %sОшибка чтения мета-файла %s: %sОшибка чтения списка выходных файлов пользователя в %sОшибка регистрации копии, переход к завершению размещенияОшибка удаления кэшированного файла %s: %sОшибка смены uidОшибка сброса исходящей нагрузкиОшибка при извлечении открытого ключа из запросаОшибка при загрузке файла настроек расширений: %sОшибка при загрузке файла настроек расширений: %s в строке: %dОшибка при чтении каталога %s: %sОшибка чтения файла%1Ошибка при настройке кэшаОшибка при настройке кэша: %sОшибка формата в файле блокировки %sОшибка обслуживания назначения после пересылки: %sОшибка в файле источника, пробуем другую копиюОшибка записи исходного сертификатаОшибка записи файла информации SRM %sОшибка записи в файл блокировки %s: %sОшибка: Сервис требует предел, превышающий или равный текущему (текущий: %d; требуемый: %d)Ошибка: невозможно открыть файл политик: %sОшибка: дублированное имя файла в списке входных файлов: %sОшибка: не удалось установить обработчик SIGCHLDОшибка: не удалось установить обработчик SIGTERMОшибка: не послан запрос LDAP к %sОшибка: местонахождение политик: %s не является стандартным файломОписание ошибкиОценка усреднённого времени ожидания: %sОценка худшего времени ожидания: %sОбработчик не поддерживает подгружаемые алгоритмы комбинированияОбработчик не поддерживает указанный алгоритм комбинирования - %sОбработчик для ArcPDP не был загруженОбработчик для GACLPDP не был загруженОбработчик для XACMLPDP не был загруженШаблон настроек (%s) не создан.При проверке прав доступа к файлу получены избыточные данныеИсключаеся реплика %s в соответствии с шаблоном !%sНе задан путь к исполняемому файлу (атрибут 'executable')Исполняющий ресурс вычислительного сервиса: %sСреда исполнения не поддерживает входящие соединенияСреда исполнения не поддерживает исходящие соединенияРабочая среда - реальная машинаРабочая среда - виртуальная машинаСреда исполнения поддерживает входящие соединенияСреда исполнения поддерживает исходящие соединенияКласс ExecutionTarget не является объектомЗавершаетсяОстанавливается поток GeneratorОстанавливается поток обработки задачиВремя истечения действительности: %d секундВыделен псевдоним %s для параметров доступа, используемых в RUCIO_ACCOUNTExtractor[%s] (%s): %s = %sExtractor[%s] (%s): %s содержит %sFATAL, ERROR, WARNING, INFO, VERBOSE или DEBUGПолный атрибут '%s' НЕ СОВПАДАЕТ с '%s'Полный атрибут '%s' СОВПАДАЕТ с '%s'Сбой адаптирования описания задачи для засылки по назначениюНе удалось зарезервировать память под ссылкуОшибка проверки подлинностиОшибка проверки подлинности: %sСбой прерывания задачи: %sСбой сверки базы данных (%s)Сбой проверки копии источникаСбой проверки копии источника %s: %sСбой проверки копии источника: %sСбой очистки задачи: %sОшибка очистки цели %sНе удалось загрузить настройкиНе удалось загрузить настройки.Сбой установления связи с сервером %s:%dСбой создания подписанного сертификата делегированияСбой создания подписанного сертификата делегированияНе удалось уничтожить ссылку: %s. Невозможно справиться с таким положением.Ошибка загрузки %s в %sСбой загрузки файла %s - %sСбой в globus_cond_initСбой в globus_ftp_control_handle_initСбой в globus_mutex_initСбой инициализации соединения с клиентомСбой создания ссылки на файл из кэша из %sСбой обнаружения параметров доступаСбой обнаружения параметров доступа.Ошибка поиска атрибутов кэшированного файла: %sНе удалось подготовить описание задачиНе удалось адаптировать описание задачи для засылки по назначениюНе удалось обработать группу допуска %sНе удалось обработать настройки grid-managerСбой работы команды соответствия пользователя: unixgroup %sСбой работы команды соответствия пользователя: unixmap %sСбой работы команды соответствия пользователя: unixvo %sСбой при передаче подписанного сертификата делегирования на сервисСбой чтения файла настроекСбой при чтении управляющего каталога: %sСбой при чтении управляющего каталога: %s: %sОшибка чтения данныхОшибка чтения списка файловНе удалось прочесть локальную информациюСбой регистрации задачиСбой возобновления задачи %sСбой извлечения информации о задаче: %sСбой извлечения ярлыков задачСбой извлечения ярлыков задач: задан неподдерживаемый URL (%s)Сбой извлечения описания задачи: %sНе удалось извлечь информацию о состоянии задачиНе удалось запустить службу рассылкиСбой отправки инструкции CWD для обновления параметров доступаСбой отправки инструкции CWD для прерывания задачиНе удалось отправить инструкцию CWD для очистки задачиСбой отправки инструкции DELE для прерывания задачиНе удалось отправить инструкцию RMD для очистки задачиНе удалось задать владельца файла: %sСбой подписи запроса сертификатаСбой запуска задачиНе удалось выполнить операцию %s по отношению к сервису EMIRegistry (%s) - %dНе удалось оборвать соединение для данных - игнорируем и восстанавливаемсяСбой прерывания передачи файла по ftp: %sНе удалось принять соединение SSLНе удалось принять запрос на соединениеНе удалось принять делегированиеСбой при приёме нового файла/направленияСбой доступа к доверенности указанной задачи %s в %sНе удалось извлечь контекст делегированияНе удалось получить настройки grid-managerСбой установки блокировки на кэшированный мета-файл %sСбой установки блокировки на файл %sНе удалось получить источник: %sНе удалось добавить Independent OIDНе удалось добавить OID доверенности RFCСбой добавления расширения VOMS AC. Ваша доверенность может быть неполной.Не удалось добавить OID последовательности VOMS ACНе удалось добавить anyLanguage OID Не удалось добавить закрытый ключ и сертификатСбой добавления сертификата к маркёру или базе данныхНе удалось добавить расширение к расширениям параметров доступаСбой добавления расширения: %sНе удалось добавить inheritAll OIDСбой добавления расширения выдающего агентства в доверенностьСбой добавления расширения об использовании ключаСбой добавления расширения об информации сертификата доверенностиСбой добавления расширения VOMS ACСбой резервирования доверительных отношений сертификатаНе удалось зарезервировать элемент для данных о сертификатеНе удалось зарезервировать память под буферНе удалось зарезервировать память для имени субъекта сертификата при сверке с политиками.Не удалось зарезервировать контекст p12Не удалось применить локальный адрес к соединению передачи данныхНе удалось установить подлинность токена SAML во входящем документе SOAPНе удалось установить подлинность токена Username во входящем документе SOAPНе удалось установить подлинность токена X509 во входящем документе SOAPСбой проверки подлинности для ячейки PKCS11 %sСбой проверки подлинности на базе данных ключейНе удалось аутентифицироваться к маркёру %sНе удалось аутентифицироваться к маркёру %s.Не удалось связать сокет с %s:%s(%s): %sНе удалось связать сокет с портом TCP %s(%s): %sНе удалось связать сокет (%s): %sСбой привязки к серверу LDAP: %sНе удалось вызвать PORT_NewArenaСбой прерывания запроса на передачу: %sОшибка отмены: %sСбой прерывания: нет ответа SOAPНе удалось создать PayloadSOAP из входящей нагрузкиНе удалось создать PayloadSOAP из исходящей нагрузкиНевозможно заменить владельца символьной ссылки %s на %iНе удалось поменять владельца временной доверенности в %s на %i:%i: %sНевозможно изменить права доступа к %s: %sНе удалось сменить права доступа или владельца жёсткой ссылки %s: %sНе удалось проверить %sСбой при очистке файла %s: %sНе удалось закрыть, уничтожается клиентСбой завершения записи в назначениеСбой установления связи для обновления параметров доступаСбой соединения для прерывания задачиНе удалось соединиться для очистки задачиНе удалось установить соединение с %s(%s):%iНе удалось установить соединение с %s(%s):%i - %sНе удалось установить связь с сервером %s:%dСбой соединения с сервером PDP: %sНе удалось преобразовать ASCII в DERНе удалось преобразовать EVP_PKEY в PKCS8Не удалось преобразовать параметры доступа GSI в GSS (major: %d, minor: %d)Сбой преобразования PrivateKeyInfo в EVP_PKEYНе удалось преобразовать информацию о безопасности в политику ARCНе удалось преобразовать информацию о защите в запрос ARCНе удалось преобразовать информацию о защите в запрос XACMLСбой копирования файла %s в %s: %sНе удалось создать поток сброса DTRСбой создания объекта OpenSSL %s %s - %u %sНе удалось создать контейеры SOAPНе удалось создать корректный запрос WSRPСбой создания сертификата X509 с помощью NSSНе удалось создать контейнер XMLNodeНе удалось создать каталоги кэша для %sНе удалось создать архивный каталог %s: %sНе удалось создать каталог кэша для файла %s: %sСбой создания мета-файла кэша %sНе удалось создать запрос сертификатаОшибка создания контрольного каталога %sНе удалось создать каталогНе удалось создать каталог %sСбой создания каталога %s! Задача пропускается.Сбой создания каталога %s: %sНе удалось создать контекст для экспортаСбой при создании файла %s: %sНе удалось создать файл в %sНевозможно создать жёсткую ссылку с %s на %s: %sНе удалось создать входной контейнер SOAPНе удалось создать безопасное хранилище для закрытого ключа или сертификатаНе удалось создать поток для привязки к LDAP (%s)Сбой создания ссылки %s. Приписанный URL не будет использованСбой создания длины путиСбой создания языка политикНе удалось создать каталог сессии %sНе удалось создать сокет для соединения с %s(%s):%d - %sНе удалось создать сокет для прослушки %s:%s(%s): %sНе удалось создать сокет для прослушки порта TCP %s(%s): %sНе удалось создать сокет (%s): %sНе удалось сформировать имя субъектаНевозможно создать символьную ссылку с %s на %s: %sНе удалось создать временную доверенность в %s: %sНе удалось создать потокСбой при создании/открытии файла %s: %sСбой расшифровки описания доверительных отношенийСбой делегирования параметров доступа на сервер - %sСбой делегирования параметров доступа на сервер - не обнаружен интерфейс делегированияСбой при удалении %sСбой удаления %s, всё равно попытаемся скопировать Не удалось удалить испорченную копию удалённо кэшированного файла %s в %s: %sНе удалось уничтожить сертификатСбой удаления объекта доставки, или истекло время ожидания удаленияНе удалось уничтожить назначение, новые попытки могут быть безуспешнымиНе удалось удалить файл %s: %sСбой при удалении логического файлаСбой при удалении мета-информацииСбой при удалении физического файлаНе удалось уничтожить закрытый ключНе удалось уничтожить закрытый ключ и сертификатСбой уничтожения закрытого ключа, прикрепляемого к сертификату: %sСбой удаления копии %s: %sНе удалось удалить устаревший файл кэша %s: %sНе удалось удалить устаревший удалённо кэшированный файл %s: %sСбой отсоединения после обновления параметров доступаСбой отсоединения после прерывания задачиНе удалось отсоединиться после очистки задачиНе удалось загрузить %sНе удалось загрузить (возможна повторная попытка) %sНе удалось скопировать структуру X509Не удалось скопировать расширениеНе удалось включить IPv6Сбой включения IPv6: %sНе удалось шифрование в формат PKCS12Ошибка шифрования сертификатаСбой шифрования запроса сертификата в формате DERНе удалось установить соединение SSLСбой установления соединения: %sСбой сохранения сертификата X509 из базы данных NSSНе удалось сохранить закрытый ключСбой извлечения псевдонима VOMS из сертификата доверенностиСбой извлечения информации о параметрах доступаСбой завершения чтения из источникаСбой завершения записи в цельНевозможно найти сертификаты CA.Не удалось обнаружить сертификат и/или закрытый ключ, либо у файлов неподходящие параметры доступаНе удалось обнаружить сертификат по краткому имени: %sСбой обнаружения параметров доступа для делегирования в настройках клиентаНе удалось найти расширениеНе удалось обнаружить агентство, выдавшее сертификат доверенностиНе удалось найти информацию о типе %s, чтобы определить, стирается файл или каталогНе удалось создать токен SAML для исходящего сообщения SOAPНе удалось создать токен имени пользователя для исходящего сообщения SOAPНе удалось создать токен X509 для исходящего сообщения SOAPСбой создания запроса X509 с помощью NSSСбой создания пары открытого/закрытого ключейНе удалось извлечь информацию о DN из файла .local задачи %sНе удалось получить параметры TCP-сокета для соединения с %s(%s):%d - прерывание по времени не будет работать - %sСбой извлечения сертификата из файлаНе удалось получить параметры доступаНе удалось получить файл ftpНе удалось получить ссылку параметра GFAL2: %sНе удалось получить новый контекст GFAL2: %sСбой вычисления усреднённой загруженности: %sНе удалось получить закрытый ключНе удалось получить открытый ключНевозможно извлечь открытый ключ из объекта RSAНевозможно извлечь открытый ключ из объекта X509Не удалось получить информацию с сервера LDAP %sСбой импортирования сертификата X509 в базу данных NSSСбой импортирования сертификата из файла: %sНе удалось получить закрытый ключСбой импортирования закрытого ключа из файла: %sСбой инициализации LCASСбой инициализации LCMAPSОшибка инициализации библиотеки OpenSSLНе удалось инициализировать файл PKCS12: %sНе удалось инициализировать структуру X509Сбой инициализации элемента расширения для параметров доступаСбой запуска головного потока PythonСбой инициализации настроек параметров доступаСбой при инициализации кэшаСбой запуска соединения с клиентомСбой инициализации делегированиеСбой инициализации параметров доступа для делегированияНевозможно запустить передачу файлов: %s - %sНе удалось ограничить сокет под IPv6 на %s:%s - может привести к ошибкам для IPv4 по этому же портуНе удалось ограничить сокет под IPv6 на порте TCP %s - может привести к ошибкам для IPv4 по этому же портуНе удалось ограничить сокет до IPv6: %sНе удалось прослушать %s:%s(%s): %sНе удалось прослушать порт TCP %s(%s): %sНе удалось прослушать сокет (%s): %sНе удалось загрузить настройки клиентаСбой загрузки раздела расширений: %sНе удалось подгрузить анализатор для правил допуска задачи %sНе удалось загрузить закрытый ключНе удалось загрузить настройки сервисаНе удалось загрузить настройки сервиса ни из какого файла настроекНе удалось загрузить настройки сервиса из файла %sНе удалось загрузить компоненты MCC сервераНевозможно заблокировать библиотеку arccredential в памятиНевозможно заблокировать библиотеку arccrypto в памятиНевозможно заблокировать делегированные параметры доступа: %sСбой при создании символической ссылки %s на %s : %sНе удалось переместить %s в %s: %sНе удалось переместить файл %s в %sСбой выделения новой областиСбой уведомления службыСбой получения идентификатора OpenSSL для %sНе удалось получить отзыв SOAPСбой определения количества переданных байтов: %sНе удалось получить блоки делегирования для очистки неиспользуемых блоковСбой получения информации о файлеНе удалось получить список файлов через FTP: %sНе удалось получить локальный адрес для %s:%s - %sНе удалось получить локальный адрес для порта %s - %sНе удалось получить локальный адрес: %sНевозможно заблокировать файл в кэше %sСбой получения собственного адреса: %sСбой получения описания ресурса: %sНе удалось получить список статус через FTP: %sСбой извлечения состояние задачиСбой получения допустимых URL для размещения входных файловНевозможно открыть %s для чтения: %sСбой открытия %s, попытка создания родительских каталоговНе удалось открыть канал передачи данныхНе удалось открыть каталог %s: %sНе удалось открыть файл %sНе удалось открыть мониторинговый файл %sСбой открытия файла входного сертификата %sНе удалось открыть журнальный файл %sНе удалось открыть журнальный файл: %sСбой при открытии файла pk12Не удалось открыть канал stdio %dНе удалось открыть канал stdio %sСбой вывода запроса сертификата в формате ASCIIСбой вывода запроса сертификата в формате DERСбой разборки отзыва ACIX: %sСбой разбора заголовка HTTPНе удалось разобрать отзыв Rucio: %sНе удалось разобрать токен SAML из входящего документа SOAPНе удалось разобрать токен Username из входящего документа SOAPНе удалось разобрать команду VOMS: %sНе удалось разобрать токен X509 из входящего документа SOAPСбой обработки запроса сертификата из файла CSR %sСбой при разборе файла настроек %sСбой разборки удалённого адреса %sСбой разборки указанного времени действия VOMS: %sСбой разборки указанного номера порта сервера VOMS: %sСбой разбора правил допуска пользователя для задачи %sНе удалось зарегистрировать назначение: %sСбой предварительной очистки назначения: %sСбой предварительного резервирования места для %sНе удалось подготовить назначениеНе удалось подготовить назначение: %sСбой подготовки описания задачиСбой подготовки описания задачи для засылки по назначениюСбой подготовки описания задачи для засылки по назначению.Сбой подготовки описания задачи.Не удалось подготовить источникНе удалось подготовить источник: %sНе удалось предварительно зарегистрировать назначение: %sНе удалось обработать настройки A-REX в %sНе удалось обработать настройки VOMS, или не найдены приемлемые строки конфигурации.Не удалось обработать настройки в %sНе удалось обработать атрибуты безопасности в TLS MCC для входящего сообщенияНе удалось обработать настройки сервисаСбой запроса к ACIX: %sНе удалось запросить информацию с сервера LDAP %sСбой опроса состояния: %sНе удалось прочесть атрибут %x из закрытого ключаСбой чтения мета-файла кэша %sСбой при чтении файла сертификата: %sНевозможно прочитать данные из входного файлаСбой при чтении файла %sСбой чтения файла входного сертификатаНе удалось прочесть правила доступа для задачи %s из %sНе удалось прочесть локальное описание для задачи %s из %sСбой чтения объекта %s: %sСбой при чтении файла личного ключа: %sСбой при чтении файла доверенности: %sПроизошёл сбой при чтении запроса из файлаСбой при чтении запроса из строкиСбой распознавания типа собственного адреса (IPv4 или IPv6) - %uНе удалось зарегистрировать ни одного буфераСбой регистрации копии назначения: %sСбой при регистрации нового файла/цели: %sСбой регистрации подключаемого модуля для состояния %sСбой регистрации в сервис ISIS (%s) - %sСбой сброса завершившегося запросаНевозможно разблокировать файл в кэше %sНевозможно разблокировать файл %sНевозможно разблокировать удалённо кэшированный файл %sСбой удаления файла .meta %s: %sСбой удаления всех фактических копийСбой удаления каталога кэша задач %s: %sНевозможно удалить существующую жёсткую ссылку на %s: %sНевозможно удалить существующую символьную ссылку на %s: %sНе удалось удалить файл %s: %sНе удалось удалить копиюСбой разблокирования файла на %s. Возможно, необходимо ручное вмешательствоНе удалось удалить учётную запись с сервера EMIRegistry %s Не удалось удалить учётную запись с сервера ISIS %s Не удалось удалить учётную запись с сервера ISIS (%s) - %sСбой удаления устаревшего файла блокировки %s: %sНе удалось удалить временную доверенность %s: %sНе удалось переименовать URLНе удалось обновить доверенностьНе удалось разрешить %sСбой при разрешении %s (%s)Не удалось определить назначение: %sНе удалось определить источник: %sНе удалось получить данные о приложении через OpenSSLНе удалось получить ссылку на поток TLS. Дополнительная сверка политики пропускается.Сбой извлечения файла закрытого ключа издателяНе удалось перезаписать список выходных файлов %s. Перезапуск задач может не работать.Сбой запуска потока Grid ManagerНе удалось запустить внешний подключаемый модульНе удалось запустить внешний подключаемый модуль: %sОшибка исполнения модуляСбой отправки запроса на прерывание: %sНе удалось отправить содержимое буфераНе удалось отправить запросСбой установки обратного вызова монитора GFAL2: %sСбой установки времени ожидания передачи GFAL2, будет использоваться значение по умолчанию: %sСбой задания копий в LFC: %sНе удалось установить параметры доступа для передачи данных по GridFTPНевозможно выставить исполняемый бит для файла %sНевозможно выставить исполняемый бит для файла %s: %sСбой установки опции перезаписи в GFAL2: %sСбой задания ID алгоритма подписиНе удалось задать открытый ключ для объекта X509 используя открытый ключ из X509_REQСбой установки делегирования прав доступа с %sНе удалось прервать соединение SSL: %sСбой подписи данных зашифрованного сертификатаСбой подписи доверенностиНе удалось подписать запрос сертификатаНе удалось подписать доверенностьНе удалось разместить файл(ы)Не удалось запустить поток архивированияНе удалось запустить скрипт очистки кэшаСбой начала создания расширения сертификатаНе удалось запустить потоки размещения данных, закрывается поток Grid ManagerНе удалось начать прослушивание ни по какому адресу для %s:%sНе удалось начать прослушивание ни по какому адресу для %s:%s(IPv%s)Не удалось запустить новый запрос DTR для %sНе удалось запустить новый потокНе удалось запустить новый поток: кэш не будет очищенНе удалось начать опрос точки входа на %sСбой начала опроса точки доступа по %s (не удалось создать подпоток)Не удалось начать чтение из источника: %sНе удалось запустить поток для обмена информациейНе удалось запустить поток для прослушиванияНе удалось запустить поток таймера - прерывание по времени не будет работатьСбой запуска запроса на передачу: %sСбой начала записи в кэшСбой начала записи в назначение: %sНе удалось проверить состояние каталога сессии %sСбой проверки статуса источника %sНе удалось записать данные приложенияНе удалось сохранить файл ftpОшибка запуска задачиСбой засылки описания задачи: %sСбой засылки описания задачи: EMIESFault(%s , %s)Сбой засылки описания задачи: UnexpectedError(%s)Не удалось изменить идентификатор пользователя на %d/%dСбой остановки LCASСбой остановки LCMAPSНе удалось передать данныеНе удалось разблокировать файл %s: %s. Возможно, необходимо ручное вмешательствоСбой разблокирования файла с блоком %s: %sНе удалось отменить предварительную регистрацию назначения %s. Возможно, Вам придётся сделать это вручнуюНе удалось отменить предварительную регистрацию назначения %s: %s. Возможно, Вам придётся сделать это вручнуюСбой удаления предварительной записи LFN. Возможно, необходимо удалить её вручнуюСбой удаления предварительной записи LFN. Возможно, необходимо удалить её вручнуюСбой удаления предварительной записи LFN. Возможно, необходимо удалить её вручную: %sНе удалось отгрузить %sНе удалось выгрузить (возможна повторная попытка) %sНе удалось подтвердить токен X509 во входящем документе SOAPНе удалось подтвердить запросПодпись не подтвержденаПодпись не подтвержденаСбой проверки подписанного сертификатаСбой ожидания разрешения от задачи на размещение входных файловНе удалось записать 'локальную' информациюСбой при записи тела в выходной потокСбой при записи файла %s: %sСбой при записи заголовка в выходной потокСбой записи объекта %s: %sНе удалось записать запрос в файлНе удалось записать запрос в строкуСбой записи подписанного сертификата EEC в файлСбой записи подписанной доверенности в файлОшибка записи в локальный файл списка задач %sСбой обновления метки времени файла блокировки кэша %s для файла %s: %sНе удалось отгрузить файл %s - %sНе удалось отгрузить файл %s в %s: %sСбой выгрузки локальных входных файловСбой выгрузки локальных входных файлов в %sСбой завершения чтения из источникаСбой при завершении записи в назначениеОшибка чтения из источникаСбой при передаче данныхСбой при ожидании запроса на соединениеСбой при ожидании соединения с %s(%s):%i - %sОшибка при записи в цельНе удалось записать ACLНе удалось записать изменившийся входной файлНе удалось записать изменившийся входной файлНе удалось записать изменившийся выходной файлНе удалось записать описание задачиСбой записи локального описанияНе удалось записать выходной файл состоянияНе удалось записать состояниеОшибка при разборе отзыва с сервера - информация может быть частично невернойОшибка: %sЭта функция не реализованаПолучение описания ресурса с %sУ файла %s неверная контрольная суммаФайл %s в состоянии NEARLINE, будет сделан запрос о размещении на дискеФайл %s уже находится в кэше %s с другим URL: %s - этот файл не будет кэшированФайл %s уже кэширован в %s с другим URL: %s - выделенное имя не будет добавлено в кэшированный списокФайл %s присутствует в кэше (%s) - проверяется допускУспешно удалён файл %sФайл '%s' перечисленный в атрибуте 'executables' отсутствует в атрибуте 'inputfiles'Файл уже существует: %sФайл не может быть переведён в состояние DoneФайл не может быть переведён в состояние Running: %sСбой при удалении файла, попытка удаления каталогаСбой при удалении файла, попытка удаления каталога для %sНевозможно загрузить файл: %sФайл может быть кэширован, проверяется кэшФайл ещё кэшируется, ожидание %i секФайл недоступен %s: %sФайл недоступен: %sФайл не может быть кэширован, пропускается обработка кэшаФайл либо не может быть кэширован, либо кэширование не было запрошено, либо кэша нет; пропускается проверка кэшаФайл готов! TURL: %sФайл меньше %llu байт, будет использована локальная доставкаТип файла недоступен, попытка стереть файлFileNode: operator= (%s <- %s) %lu <- %luFilePlugin: разгрузок больше, чем загрузокВ отзыве Rucio отсутствует имя файла: %sФайлы, ассоциированные с маркёром запроса %s, успешно прерваныФайлы, ассоциированные с маркёром запроса %s, успешно отгруженыФайлы, ассоциированные с маркёром запроса %s, успешно разблокированыКопирование набора файлов в отдельный объект пока не поддерживаетсяРегистрация наборов файлов пока не поддерживаетсяОбнаружение существующих копий назначенияFinishWriting: поиск метаданных: %sFinishWriting: получена контрольная сумма: %sУспешное завершениеЗавершено, каталог журнала задач: %sСбой первого шага регистрации в каталогеПервая часть значения атрибута 'inputfiles' (filename) не может быть пустойПервая часть значения атрибута 'outputfiles' (filename) не может быть пустойДля регистрации, источник должен быть задан обычным URL, а назначением должен быть каталог ресурсовДля тестовой задачи номер 1 необходимо задать время исполнения с помощью опции -r (--runtime).Принудительная проверка источника кэшированного файла %sПринудительная перезагрузка файла %sОшибка создания дочернего процеса: %sНайден подключаемый модуль %s %s (уже подгружен)%s обнаружен в кэшеОбнаружено %u точек входа служб в каталоге на %sНайден запрос DTR %s для файла %s, оставшийся в состоянии передачи после предыдущего запускаНайден реестр, который будет опрошен рекурсивно: %sОбнаружен существующий маркер для %s в кэше маркеров Rucio, истекающий %sФайл %s обнаружен в удалённом кэше %s В ACIX URL обнаружено ни одного или несколько URL (%s): %sОбнаружена точка входа сервиса %s (тип %s)Найдена точка входа в состоянии STARTED или SUCCESSFUL (%s)Обнаружена временно исключённая точка входа (%s)Обнаружены следующие задачи:Обнаружены следующие новые задачи:Найден непредвиденный пустой файл блокировки %s. Необходимо вернуться в acquire()Найдены незаконченные процессы DTR. Вероятно, предыдущий процесс A-REX завершился сбоемДоступные места сгруппированы по предельному времени (предел: доступные места):Свободные ядра: %iСтрока использована неполностью: %sФункция: %sЗапрос авторизации GACL: %sGET: идентификатор %s путь %sСоздайте новый запрос X509!Созданы назначения EMIES: %sСоздаётся описание задачи в формате %s Создаётся назначение A-REX: %sСоздаются назначения EMIESАвтоматическое создание префикса ceID из имени узлаАтрибут Generation Time получен из текущего времениГенератор запущенНеспецифическая ошибкаИзвлечение сервиса ISIS из списка обработчиков ISISПолучение делегированных параметров доступа от службы делегирования: %sПолучение из кэша: Кэшированный файл забклокированПолучение из кэша: Ошибка настроек кэшаПолучение из кэша: Файла в кэше нетПолучение из кэша: Недопустимый URL %sПолучение из кэша: Поиск %s в кэшеПолучение из кэша: не удалось получить доступ к кэшированному файлу: %sЗапрос на получение %s всё ещё в очереди, следует подождать %i секундGet: невозможно обработать файл %sGet: отсутсвует задача %s - %sGetActivityDocuments: задача %s - %sGetActivityDocuments: запрошенная задача не контролируется AREXGetActivityDocuments: запрос = %sGetActivityDocuments: ответ = %sGetActivityStatuses: задача %s - %sGetActivityStatuses: задание %s - невозможно интерпретировать EPRGetActivityStatuses: запрос = %sGetActivityStatuses: ответ = %sGetActivityStatuses: запрошен неизвестный уровень отладки: %sGetFactoryAttributesDocument: запрос = %sGetFactoryAttributesDocument: ответ = %sGetISISList добавляет этот сервис (%s) ISIS в список.Извлечение сертификата атрибутов VOMS AC для: %sСоздание текущей метки времени для журнала программы разбора BLAH: %sПолучение делегированных параметров доступа от службы делегирования ARCОшибка соединения GlobusОшибка Globus: %sСсылка globus застряла.Переменная, указывающая на расположение Globus, больше не поддерживается. Пожалуйста, укажите полный путь.Опознавательные признаки Грид поставлены в соответствие местной учётной записи '%s'HER: %sОшибка HTTP: %d %sОшибка HTTP %u - %sАктивизация HTTP с SAML2SSO не выполненаСсылка в недопустимом состояии %u/%u%i запросов для обработкиСуществующих соединений: %i, максимально: %iHead: невозможно обработать файл %sHead: отсутсвует задача %s - %sИнформация о состоянии здоровья: %sСостояние здоровья: %sСостояние здоровья назначения для исполнения (%s) неудовлетворительное (%s)Параметры справки:Сбой при запуске вспомогательного процесса: %sОтсутствует вспомогательная программаЗдесь и заканчивается бесконечный цикл запросов.Однородный ресурсHostname не поддерживается протоколом arcID содержит недопустимые символыID: %sФайл настроек INI %s не существуетISIS (%s) недоступен, или получен недопустимый отклик. (%d. Повторное соединение)Сервер ISIS (%s) недоступенId= %s,Тип= %s,Издатель= %s,Значение= %sСлужба IdP выдала сообщение об ошибке: %sЛичные данные: %sВыделенное имя: %sЛичные данные: %sЕсли пара сертификат/ключ или файл сертификата доверенности существуют, Вы можете вручную указать их расположение с помощью переменных среды '%s'/'%s' или '%s', или с помощью атрибутов '%s'/'%s' или '%s' в файле настроек клиента (например, '%s')Указывая политику, указывайте также её языкИгнорируется полученное значение эталонного теста: %s, используйте значение с плавающей запятой!Игнорируется точка входа (%s), т.к. она уже зарегистрирована в загрузчике.Игнорируется неполный журнальный файл "%s"Игнорируется задача (%s), предыдущая попытка подгрузить JobControllerPlugin завершилась неудачейИгнорируется задача (%s), отсутствует URL интерфейса управленияИгнорируется задача (%s), отсутствует URL состояния задачиИгнорируется задача (%s), отсутствует название интерфейса управленияИгнорируется задача (%s), отсутствует название интерфейса состоянияИгнорируется задача (%s), невозможно подгрузить JobControllerPlugin для %sЗадача игнорируется, так как её ярлык пустНедопустимый URL - за закрывающей скобкой ] для адреса IPv6 следует недопустимый маркёр: %sНедопустимый URL - отсутствует закрывающая скобка ] для адреса IPv6: %sНедопустимый адрес - не содержится имя узла: %sНедопустимый URL - путь должен быть абсолютным или пустым: %sНедопустимый URL - путь должен быть абсолютным: %sЗадан недопустимый ярлык задачи (%s)Недопустимый формат времени: %sОжидается немедленное завершениеОжидается немедленное соединение: %sНемедленное завершение: %sИмя реализации: %sВнедритель: %sНедопустимый аргумент для размера журнала '%s'Недопустимый уровень отладки '%s'Недопустимое количество журналов '%s'Недопустимый размер журнала '%s'В доступном списке отзыва сертификатов (CRL) значение lastUpdate недействительноВ доступном списке отзыва сертификатов (CRL) значение nextUpdate недействительноВ профиле настроек атрибут 'initype' элемента "%s" имеет собственное значение "%s".Входящее сообщение не в формате SOAPПромежуток времени засылки: %sЗапрошены несовместимые опции --nolist и --forcelistПротиворечивые метаданныеНезависимая доверенность - права не выделеныОбъект InfoCache не созданInfoRegister создан с настройками: %sInfoRegister будет занесён в Registrar %sОбнаружен InfoRegistrar id "%s".InfoRegistrar id "%s" не был обнаружен. Создан новый реестрПоток InfoRegistrar ожидает %d секунд, пока создадутся все элементы Registers.Информация зарегистрирована без статических аттрибутов: документ: %sТочка входа для информацииНеизвестный тип информации '%s'Пустой информационный документСбой инициализацииИнициализирован каталог архивного хранения учётных записей о задачах: %sЗапущено, каталог журнала задач: %sИнициализация обработчика ISISУспешная инициализация обработчика ISISЗапущена %u-я служба PythonПредварительная привязка к локальной группе: %sНачальная приписка к локальному имени пользователя: %sИнициализация процедуры делегирования:Ввод не в формате SOAPВходные данные не содержат строки окончания Запрос ввода из файла: Request.xmlЗапрос ввода из программыВставка элемента фильтра: <%s,%s>Установленные рабочие среды:Интерактивный режим.Задан интерфейс (%s), засылка производится только через негоРасширения интерфейса:Интерфейс точки входа (%s) %s.Версии интерфейса:Интерфейс %sInterfaceNameНедействительный запрос DTRНедопустимый DTR для источника %s, назначения %sНедопустимый эффектНедопустимый объект HTTP не может дать результатНеверный ID: %sНеверный ISO-формат продолжительности: %sНеверный элемент JobDescription:Неверный элемент JobDescription: %sНедопустимый URL '%s' для входного файла '%s'Недопустимый URL '%s' для выходного файла '%s'Недопустимый синтаксис опции URL в опции '%s' для входного файла '%s'Недопустимый синтаксис опции URL в опции '%s' для выходного файла '%s'Недопустимая опция URL: %sНеверный URL: %sНедопустимый URL: '%s' во входном файле '%s'Недопустимый URL: '%s' в выходном файле '%s'Недопустимое значение action %sНеверная контрольная сумма в %s для %sНеверное название классаНедопустимое имя класса. Аргумент брокера для PythonBroker должен быть Filename.Class.args (args не обязательно), например: SampleBroker.MyBrokerНедопустимый оператор сравнения '%s' используется с атрибутом 'queue' в диалекте GRIDMANAGER, допускается лишь "="Недопустимый оператор сравнения '%s' используется с атрибутом 'queue', допускаются только "!=" или "=".Неверная настройка - не указано ни одного допустимого IP-адресаНеверная настройка - не указано ни одного допустимого каталогаНедействительные реквизиты доступа, пожалуйста, проверьте сертификат доверенности и/или реквизиты органа сертификацииНеверный URL цели %sУказан неверный путь к каталогу загрузки (%s)Недопустимый размер файла в %s для %s Неверный файл: %s слишком велик.Недопустимое описание задачиНедопустимая блокировка файла %sНеверный уровень отладки. Используется уровень по умолчанию.Недопустимое значение nodeaccess: %sНеверный старый уровень отладки. Используется значение по умолчаниюНедопустимый путь в Get(): %sНедопустимый путь в Set(): %sНедопустимый интервал времени: %sНедопустимый номер порта в %sУказан неверный путь для отгрузки(%s)Неверный URL: %sСертификат выдан CA: %sКем выдана: %sЗапущена JVMОбъект Java возвратил статус NULLЗадача %s не находится в возобновляемом состоянииЗадача %s не смогла обновить делегирование %s - %s.С задачей %s не ассоциировано никакого делегирования. Задача не может быть обновлена.Задача %s не обнаруженаЗадача %s: Сбой некоторых загрузокЗадача %s: все файлы успешно загруженыЗадача %s: файлы всё ещё загружаютсяТребуется аргумент - идентификатор задачи.Задача успешно оборванаЗадача успешно удаленаУспешно создана база данных задач (%s)Задача удалена: %sНевозможно прочесть файл с описанием задачи.Язык описания задачи не указан, невозможно вывести описаниеСледующие языки описания задач поддерживаются %s:Описание задачи будет послано на: %sописание засылаемой задачи: %sОписание задачи: %sОписания задач:Задача не завершилась успехом. Сообщение не будет записано в журнал BLAH.Каталог для загрузки задач (настройки пользователя): %s Каталог для загрузки задачи будет создан в текущей рабочей директорииКаталог для загрузки задач: %s Задача дала сбой на сервереИсполнение задачи ещё не началось: %sИнформация о задаче в информационной системе не обнаружена: %sФайл списка задач (%s) не существуетСписок задач (%s) не является стандартным файломФайл списка задач не может быть создан: %s не является каталогомФайл списка задач не может быть создан: родительский каталог (%s) не существует.Перенаправление задачи оборвано, т.к. ни один из ресурсов не предоставил информациюПеренаправление задачи оборвано, невозможно подгрузить планировщик (%s)Не удалось перенаправить задачу (%s), возможные назначения отсутствуютЗадача номерМодуль обработки задач не был запущенПерезасылка задачи оборвана, т.к. ни один из ресурсов не предоставил информациюПерезасылка задачи оборвана: Невозможно подгрузить планировщик (%s)Сводка перезапуска задач:Задача успешно возобновленаСводка засылки задач:Пользователь, отправивший задачу: %s (%i:%i)Задача запущена с ярлыком: %sОшибка прерывания задачиЗадача: %sПодключаемый модуль JobControllerPlugin %s не может быть созданПодключаемый модуль JobControllerPlugin "%s" не обнаруженКласс JobDescription не является объектомПодключаемый модуль JobDescriptionParserPlugin %s не может быть созданПодключаемый модуль JobDescriptionParserPlugin "%s" не обнаруженНеопрошенных задач: %dЗадачи с отсутствующей информацией не будут вычищены!Обработано задач: %d, уничтожено: %dОбработано задач: %d, обновлено: %dОбработано задач: %d, возобновлено: %dОбработано задач: %d, успешно оборвано: %dОбработано задач: %d, успешно оборвано: %d, успешно очищено: %dОбработано задач: %d, успешно получено: %dОбработано задач: %d, успешно получено: %d, успешно очищено: %dНеразборчивые фрагменты в конце RSLБессмыслица в команде sessiondirКлюч %s, сертификат: %s, CA: %sКлюч: %s, Сертификат: %s, Доверенность: %s, Каталог CA: %s, путь CAКлюч: %s, Сертификат: %s, Доверенность: %s, Каталог CA: %s, файл CAКлюч: %s, сертификат: %sСбой прерывания задачиПрерван сигналом: Прерывание соединения в связи с истёкшим лимитом времениLCMAPS не возвратил никакого GIDLCMAPS не возвратил никакого UIDLCMAPS содержит getCredentialDataLCMAPS содержит lcmaps_runLCMAPS возвратил UID не соответствующий учётной записи: %uLCMAPS возвратил недопустимый GID: %uLCMAPS возвратил недопустимый UID: %uАвторизация для LDAP не поддерживаетсяАвторизация для LDAP больше не поддерживаетсяСоединение LDAP с %s уже установленоИстекло время ожидания ответа на запрос LDAP: %sLDAPQuery: Получение результатов с %sLDAPQuery: устанавливается соединение с %s:%dLdapQuery: Запрашивается %sСбой LIST/MLSTСбой LIST/MLST: %sОтсутствует СУПОЯзык (%s) не опознан ни одним из модулей разборки описаний задач.Сбой последнего шага регистрации в каталогеШирота: %fИстекло время соединения с LDAP (%s)Выход из загрузчика (%i)Отгрузчик покидается (%i)Левый операнд для сцепления RSL не приводится к буквенной константеLegacyMap: не заданы группы настроекLegacyPDP: атрибут безопасности ARC Legacy не опознан.LegacyPDP: атрибут безопасности ARCLEGACY не задан. Возможно, обработчик безопасности ARC Legacy не настроен, или претерпел сбой.LegacySecHandler: не указан файл настроекБиблиотека: %sСтрока %d.%d атрибутов выдала: %sПодцепление MCC %s(%s) к MCC (%s) в %sПодцепление MCC %s(%s) к коммутатору (%s) в %sПодцепление MCC %s(%s) к службе (%s) в %sПодцепление коммутатора %s к MCC (%s) в %sПодцепление коммутатора %s к коммутатору (%s) в %sПодцепление коммутатора %s к службе (%s) в %sПодцепляется локальный файлСоздаётся символическая ссылка на соответствующий файлСоздаётся символическая ссылка на соответствующий файл - невыполнимо на WindowsПодцепление/копирование файла из кэшаСоздание ссылки/копирование файла из кэша в %sСбой перечисления задачПеречисление не поддерживается для REST-интерфейса VOMSПеречисление не поддерживается для традиционного интерфейса VOMSПеречисление запросит информацию stat об URL %sListFiles: поиск метаданных: %sСбой прослушиванияПрослушивание завершеноПрослушивание началосьПрослушивается %s:%s(%s)Прослушивается порт TCP %s(%s)Задачи успешно перечислены, обнаружено %d задач(и)Подгружаемый модуль %s не содержит запрашиваемого модуля %s типа %sПодгружен модуль %s Загружен %s %sПодгружен JobControllerPlugin %sПодгружен JobDescriptionParserPlugin %sПодгружен MCC %s(%s)Подгружен Plexer %sПодгружена служба %s(%s)Подгружен SubmitterPlugin %sЗагружается %u-я служба PythonПодгрузка Python broker (%i)Чтение файла настроек (%s)Локальный файл-приёмник для отгрузчика %sЛокальная группа %s не существуетНеобходимо указать файл состояния задачиВнутренние задачи в счёте: %iЛокальный источник загрузки: %sВнутренние приостановленные задачи: %iЛокальный пользователь %s не существуетЛокальное имя пользователя не существуетВнутренние задачи в очереди: %iНедопустимый URI в Location для файла %sТакой файл уже существуетВ URL назначения LFC отсутствуют местоположенияБлок %s принадлежит другому процессу (%s)Файл блокировки %s не существуетЗаписывается набор UR из %d записей UR.Долгота: %fПоиск URL %sПоиск копий файла-источникаИнтервал с нижней границей не поддерживается для элемента 'TotalCPUCount'.MCC %s(%s) - следующий %s(%s) не содержит назначенияMLSD не поддерживается - пробуем NLSTMLST не поддерживается - пробуем LISTГоловной процесс Python не был запущенГоловной поток Python не был запущенОбъём основной памяти: %iПринимается решение для очереди %sОбнаружена недопустимая запись ARCHERY (не задан тип точки входа): %sОбнаружена недопустимая запись ARCHERY (не задан URL точки входа): %sИскажённый отзыв: отсутствует getProxyReqReturnНевозможно открыть пул пользователей в %s.Файл приписки пользователей отсутствует в %sПривязка к локальной группе с идентификатором: %iПривязка к локальной группе с именем: %sПривязка к локальному идентификатору: %iПривязка к текущему пользователю: %sДомашний каталог привязанного пользователя: %sСоответствующие user:group (%s:%s) не обнаружены%s ставится в соответствие %sПравило присвоения:Назначается очередь: %sСовпадение ВО: %sСовпадений нетСоответствие: %s %s %s %sСравнение; %s (%d) не соответствует (%s) значению %s (%d), публикуемому назначением для исполнения.Сравнение; значение эталонного теста %s не публикуется назначением для исполнения.Сравнение; несовпадение CacheTotal: у назначения для исполнения: %d MB (CacheTotal), в описании задачи: %d MB (CacheDiskSpace)Сравнение; не удовлетворено требование к вычислительному ресурсу. Назначение для исполнения: %sСравнение; несовпадение ConnectivityIn: у назначения для исполнения: %s (ConnectivityIn), в описании задачи: %s (InBound)Сравнение; несовпадение ConnectivityOut: у назначения для исполнения: %s (ConnectivityOut), в описании задачи: %s (OutBound)Сравнение; назначение для исполнения: %s, значение OperatingSystem не определеноСравнение; назначение для исполнения: %s, значение CacheTotal не определеноСравнение; назначение для исполнения: %s, состояние здоровья не определеноСравнение; назначение для исполнения: %s, значение ImplementationName не определеноСравнение; назначение для исполнения: %s, значения MaxDiskSpace и WorkingAreaFree не определеноСравнение; ExecutionTarget: %s, не задано MaxTotalCPUTime или MaxCPUTime, предполагается отсутствие ограничений на процессорное времяСравнение; ExecutionTarget: %s, не задано MinCPUTime, предполагается отсутствие ограничений на процессорное времяСравнение; назначение для исполнения: %s, значение NetworkInfo не определеноСравнение; назначение для исполнения: %s, значение Platform не определеноСравнение; не удовлетворены требования RunTimeEnvironment к ExecutionTarget: %sСравнение; назначение для исполнения: %s, значения TotalSlots и MaxSlotsPerJob не определеныСравнение; назначение для исполнения: %s, значение WorkingAreaLifeTime не определеноСравнение; ExecutionTarget: %s соответствует описанию задачиСравнение; назначение для исполнения: %s, значение ApplicationEnvironments не определеноСравнение; назначение для исполнения: %s, значения MaxMainMemory и MainMemorySize не определеныСравнение; назначение для исполнения: %s, значение MaxVirtualMemory не определеноСравнение; не удовлетворены требования OperatingSystem к ExecutionTarget: %sСравнение; несовпадение MainMemorySize: у назначения для исполнения: %d (MainMemorySize), в описании задачи: %d (IndividualPhysicalMemory)Сравнение; проблема с MaxCPUTime, ExecutionTarget: %d (MaxCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Сравнение; несовпадение MaxDiskSpace: у назначения для исполнения: %d MB (MaxDiskSpace), в описании задачи: %d MB (DiskSpace)Сравнение; несовпадение MaxDiskSpace: у назначения для исполнения: %d MB (MaxDiskSpace), в описании задачи: %d MB (SessionDiskSpace)Сравнение; несовпадение MaxMainMemory: у назначения для исполнения: %d (MaxMainMemory), в описании задачи: %d (IndividualPhysicalMemory)Сравнение; несовпадение MaxSlotsPerJob: у назначения для исполнения: %d (MaxSlotsPerJob), в описании задачи: %d (NumberOfProcesses)Сравнение; проблема с MaxTotalCPUTime, ExecutionTarget: %d (MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)Сравнение; несовпадение MaxVirtualMemory: у назначения для исполнения: %d (MaxVirtualMemory), в описании задачи: %d (IndividualVirtualMemory)Сравнение; проблема с MinCPUTime, ExecutionTarget: %d (MinCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Сравнение; не удовлетворено требование NetworkInfo, назначение для исполнения не поддерживает %s, указанное в описании задачи.Сравнение; несовпадение платформ: ExecutionTarget: %s (Platform) JobDescription: %s (Platform)Сравнение; приведённое к значению %s значение %s (%d) не соответствует (%s) значению %s (%d) публикуемому назначением для исполнения.Сравнение; несовпадение TotalSlots: у назначения для исполнения: %d (TotalSlots), в описании задачи: %d (NumberOfProcesses)Сравнение; несовпадение WorkingAreaFree: у назначения для исполнения: %d MB (WorkingAreaFree), в описании задачи: %d MB (DiskSpace)Сравнение; несовпадение WorkingAreaFree: у назначения для исполнения: %d MB (WorkingAreaFree), в описании задачи: %d MB (SessionDiskSpace)Сравнение; несовпадение WorkingAreaLifeTime: у назначения для исполнения: %s (WorkingAreaLifeTime), в описании задачи: %s (SessionLifeTime)Длительность, наибольшая (процессорная): %sПредел дискового пространства: %iМакс. память: %iПредел задач в очереди до СУПО: %iЗадачи в счёте (предел): %iПредел сегментов на задачу: %iПредел потоков размещения: %iПотоки отгрузки (верхний предел): %iВсего заданий (предел): %iПредел общего времени (по часам): %sЗадачи пользователя в счёте (предел): %iПредел виртуальной памяти: %iПредел задач в очереди: %iДлительность, наибольшая (по часам): %sМаксимальное время бездействия: %i sЗапущено максимальное количество потоков - новый запрос помещён в очередьОшибка выделения памятиКласс Message не является объектомСообщение, отправленное на сервер VOMS %s: %s Мета-информация источника и адрес не соответствуют друг другу для %sМетаданные копии отличаются от тех, что в каталогеМетаданные источника и назначения не совпадаютМетаданные источника и цели не совпадают. Используйте опцию --force для принудительного копирования.MigrateActivity успешно завершёнMigrateActivity: EPR не содержит JobIDMigrateActivity: невозможно принять делегированиеMigrateActivity: невозможно мигрировать новую задачуMigrateActivity: невозможно мигрировать новую задачу: %sMigrateActivity: не обнаружено описание задачиMigrateActivity: запрос = %sMigrateActivity: отзыв = %sMigrateActivitys: не обнаружен ActivityIdentifierМиграционный документ XML послан к AREXJob: %sДлительность, наименьшая (процессорная): %sДлительность, наименьшая (по часам): %sМинимальная средняя скорость: %llu B/sМинимальная скорость: %llu Б/с в течение %i сОтсутствующий элемент 'from' в mapURLОтсутствующий элемент 'to' в mapURLСубъект центра сертификации отсутствует в политике подписи GlobusОтсутствует элемент CertificatePath или ProxyPath element, или Отсутствует значение атрибута Endpoint Reference, публикуемое сервисом %sВ элементе Connect отсутствует название узла (Host)Отсутствуют значения атрибутов MetaServiceAdvertisment или Expiration, публикуемые сервисом %sВ элементе Connect отсутствует номер порта (Port)В элементе Listen отсутствует номер порта (Port)Отсутствует значение атрибута Type, публикуемое сервисом %sОтсутствует URLВ настройках отсутствует ВООтсутствует аргументОтсутствует информация для проверки подлинностиВ настройках кэша отсутствует корневой каталогНе найден скрипт cancel-%s-job - прерывание задачи может не работатьВ настройках отсутствуют возможностиУсловия субъекта отсутствуют в политике подписи GlobusОтсутствует каталог контроляВ контрольной инструкции пропущен каталогОтсутствует элемент Executable.Отсутствует имя файла в jobreport_logfileОтсутствует заключительный отклик: %sВ настройках отсутствует группаНеполная информация в отклике: %sОтсутствует ярлык задачиОтсутствует имя библиотеки LCASОтсутствует имя библиотеки LCMAPSПропущены настраиваемые параметры для командного демонаОтсутствующая опция для команды logreopenЭлемент CertificatePath отсутствует или пустЭлемент CertificatePath или CACertificatesDir отсутствует или пустЭлемент CertificatePath или CACertificatesDir отсутствует или пуст; будет выполнена лишь проверка подписи, а не удостоверение подлинности сообщенияЭлемент KeyPath отсутствует или пустЭлемент KeyPath отсутствует или пуст, либо отсутствует Элемент PasswordSource отсутствует или пустЭлемент Username отсутствует или пустОтсутствует параметр для опции %cОтсутствует путь к файлу параметров доступаОтсутствует ссылка на фабрику и/или модуль. В настоящее время безопасная выгрузка LDAP DMC не поддерживается. Пожалуйтесь разработчикам.Отсутствует указание на фабрику и/или модуль. Использование Globus в неопределённом режиме небезопасно - вызов (Grid)FTP заблокирован. Свяжитесь с разработчиками.Отсутствует указание на фабрику и/или модуль. Использование Globus во временном режиме небезопасно - SubmitterPlugin для ARC0 отключён. Сообщите разработчикам.Отсутствует обязательный элемент Usage Record "RecordIdentity", в файле журнала задач %sОтсутствует обязательный элемент "CpuDuration" в файле журнала задач %sОтсутствует обязательный элемент "Status" в файле журнала задач %sВ настройках отсутствует рольНе найден скрипт scan-%s-job - окончание задачи может быть незамеченымСхема отсутствует! Сверка пропускается...В сообщении отсутствует объект авторизацииОтсутствует ярлык сервисаОтсутствует документ службы, публикуемый сервисом %sОтсутствует каталог сеансовОтсутствует имя субъектаНе найден скрипт submit-%s-job - засылка задачи в СУПО может не работатьНе удалось перезагрузить модуль %s (%s)Модуль %s не является подключаемым модулем ARC (%s)Запуск управления модулями Инициализация менеджера модулей в ModuleManager::setCfgНазвание модуля: %sТочка подключения %sЗаканчивается размещение данныхМножественное описание заданий не допускается в диалекте GRIDMANAGERОператор множественности RSL допускается лишь в начале документаМножественные атрибуты %s в файле настроек (%s)Множественные элементы '%s' не поддерживаются.Сбой MyProxy: %sСервер Myproxy не прислал сертификат с расширением VOMS ACИнструкция NLST/MLSD не прошлаИнструкция NLST/MLSD не прошла: %sБудет использоваться база данных NSS %s Инициализация NSS оборвалась на базе данных сертификатов: %sNULL BIO передан в InquireRequestНулевой обратный вызов для %sИмя: %sОтрицательные права не поддерживаются политикой подписи GlobusНи источник, ни назначение не являются каталогом, поиск копий не будет произведёнНи источник, ни назначение не были размещены с ленточного накопителя, пропускается отмена запросовИнформация о сети:Новое соединениеСоздана новая точка доступа (%s) из точки с неизвестным интерфейсом (%s)Новая задача принята с идентификатором %sСервис EMI ES установил новые значения пределов для параллельных запросов: %dСрок действия новой доверенности истекает в %sСрок действия новой доверенности не дольше старой, доверенность не обновляется В отклике SAML отсутствует Отзыв SOAP не содержит Не обнаружен файл настроек A-REX в настройках службы кэшаНе существует атрибутов, способных трактовать этот тип: %sНе задан элемент ConnectFQAN не обнаружен. В качестве значения userFQAN будет использоваться NULLСервер VOMS не отзывается по HTTPНе найдена СУПО в файле настроекНе задан атрибут LRMSName. Он необходим для полного соответствия спецификации интерфейса BES.В отзыве Rucio отсутствует информация RSE: %sВ описании задачи не найдено структуры RSLНет ответа SOAPНет ответа SOAP от службы доставки %sНет ответа SOAP от службы доставкиНе обнаружено элементов URI в Location для файла %sНет политик доступа, нуждающихся в сверке, успешное завершение.Нет активных запросов DTR %sНет активной задачи с ярлыком %sНе получен ответ о допускеНе указан каталог кэшаКэш не описан в файле настроекНе определена функция обратного вызова для %sНикаких изменений в отгружаемых файлах в течение %u сек.Сервер не выдал информацию о контрольной суммеВ отзыве Rucio для %s отсутствует информация о контрольной суммеНевозможно подтвердить контрольную суммуФайл настроек не может быть подгружен.Не найден контрольный каталог в файле настроекВ настройках не заданы контрольные директории (локальные либо удалённые)Не указаны параметры доступаДелегированные параметры доступа не переданыВ данном контексте и сообщении отсутствуют политики делегирования - пропускаетсяВ запросе отсутствует токен делегированияНет доступных назначений для отгрузки, попытаемся позжеНе указан каталог кэша для опорожненияНет ошибокНе задан путь к исполняемому файлу в диалекте GRIDMANAGERНи одна служба исполнения не зарегистрирована в службе регистрацииОтсутствуют загружаемые файлы для задачи %sВ отзыве Rucio для %s отсутствует информация о размере файлаГруппа %i для привязанного пользователя отсутствуетОтзыв не содержит ярлыка задачиНе указан ярлык задачиНе задано описание задачиНи один разборщик не смог обработать описание задачиОтсутствуют разборщики описания заданияНет разборщиков описания задачи, подходящих для обработки языка '%s'A-REX не возвратил ни одного ярлыка задачиСлужба BES не возвратила ни одного ярлыка задачиДля теста %d отсутствует описание задачиЗадач нетНе найдено ни одной задачи, попробуйте позжеЗадачи не указаныНет задач для перезапуска в указанном состоянииОтсутствует левый операнд оператора подцепленияНе инициализированы прослушивающие портыНе указано имя локальной учётной записиПользователь не приписан ни к одному локальному имениНе найдено ни одного местонахождения файла %sНе найдено ни одного местонахождения файла %sНе найдено расположений для назначения, отличающихся от источникаНе найдено расположений для назначения, отличающихся от источника: %sНе найдено физических адресов для назначения: %sНе найдено расположений для файла источника: %sНе найдено расположений - возможно, копий больше нетНе найдено расположений для %sНе осталось расположений для %sНе найдено соответствия для %s в правилах доступа к кэшуБольше копий нет (%s)Опробованы все интерфейсы для точки входа %s.Больше копий нет, будет использован файл %sНе требуется размещение с ленточного накопителя ни источника, ни назначения; размещение пропускается Не присвоено новых информационных документовНе найдено больше MCC или служб в пути "%s"Отсутствует следующий элемент цепиНет контрольных каталогов или каталогов сессий не в состоянии разгрузкиНет каталогов сессий не в состоянии разгрузкиПерезапись не запрошена или не разрешена, предварительная очистка пропускаетсяНе найдено реальных файлов для назначенияНе найдено реальных файлов источникаОтсутствуют настроенные или допущенные расширения по заданному адресу %sДля simplelist.pdp не задан файл политик или DN; пожалуйста, задайте в настройках атрибут location или хотя бы один элемент DN для узла PDP simplelist.Не найдено подходящего порта для %sЗакрытый ключ с именем %s отсутствует в базе данных NSSНе удалось обнаружить доверенностьОтсутствует доверенностьНе указано название очереди в названии блока queueНе указан удалённый каталог кэшаНи одна из удалённых служб доставки не подходит, вынужденно используется локальная доставкаНе найдено копий для %sНе указан маркёр запроса!Не найдены маркёры запросаНе удалось собрать запрошенную информацию о безопасностиНет ответа от %sНет ответа от сервера AA %sВызов stat не возвратил никаких результатовОтсутствует правый операнд оператора подцепленияОбработка/проверка параметров доступа не запрошена для '%s'В файле настроек отсутствуют настройки сервераНе задано ни одного сервиса. Пожалуйста, настройте сервисы по умолчанию в файле настроек клиента, либо укажите явным образом ресурс или каталог ресурсов (опции -c или -g, см. arcsync -h)Не найден каталог сессииНе найден каталог сессии в файле настроекНе указан маркёр пространства памятиНе найдены маркёры пространства памяти, соответствующие описанию %sНе указан URL для загрузкиНе получен ответ с сервераСервер VOMS не отзываетсяНет такого запроса DTR %sНет такого файла или каталогаНет такой группы: %sНет такого пользователя: %sПолитика не содержит назначенийПравило не содержит назначенийТестовая задача под номером %d не найдена.Тестовая задача под номером "%d"не существует.Нет подходящих кэшейНе удалось обнаружить сертификат пользователя с кратким именем %sСертификат пользователя не обнаруженНе указано имя пользователяВ настройках не обнаружено ни одного приемлемого кэша, кэширование отключеноНе найдены действительные параметры доступа, выходНе осталось допустимых дескрипторов для прослушиванияEMI ES не возвратил действительных ярлыков задачНет допустимых адресовНе получено приемлемого отзыва от сервера VOMS: %sНе задана значение атрибута субъекта %s, пропускаетсяНеоднородный ресурсНе поддерживается ни один из запрошенных протоколов транспортного уровняЭто не коллекцияНет допускаНет допуска согласно запросу: %sНет допуска от arc.pdp - некоторые элементы RequestItem не удовлетворяют политикеНет допуска от arc.pdp - не удалось получить ответ обработчикаНе допущен через simplelist.pdp: %sНедостаточно элементов outputsandboxdesturi!Недостаточное количество параметров в copyurlНедостаточное количество параметров в linkurlНедостаточно свободного места для записи файлаМодуль %s не найден в кэшеНичего не прослушиваетсяДля этого URL (%s) фильтр не назначен.Служба доставки %s не используется в связи с предыдущим сбоемСлужба доставки на %s не используется в связи с переполнениемЦель недействительнаНедействительный источникЗадание не указано: Вы должны либо указать номер тестового задания, используя опцию -J (--job), либо запросить информацию о сертификатах, используя опцию -E (--certificate) Сбой уведомленияПроизводится копирование (из -> в)Номер %d с кратким именем: %s%sНомер %d: %sКоличество элементов ComputingService полученных из полного документа и из запроса XPath не совпадает: %d != %dИзменилось число блокировок OpenSSL - повторная инициализацияЧисло источников и число назначений не соответствуют друг другуПАРАМЕТР...Семейство ОС: %sНазвание ОС: %sВерсия ОС: %sОбъект не подходит для перечисленияОбъект не инициализирован (внутренняя ошибка)Полученный XML: %sПолученные адрес и номер порта неприемлемыСбой получения информацииСбой определения состоянияCacheService поддерживает только POSTDataDeliveryService поддерживает только POSTДля вывода поддерживается только неформатированный буферМы поддерживаем только права globus в политике подписи Globus - %s не поддерживаетсяМы поддерживаем только права подписи в политике подписи Globus - %s не поддерживаетсяНа настоящий момент единственным поддерживаемым источником пароля является стандартный вход.Для вспомогательной программы поддерживается только пользователь '.'Ошибка OpenSSL -- %sОшибка OpenSSL: %sОшибка операционной системыОперация успешно прерванаОперация завершена успешноЭта операция не поддерживается для данного типа URLДействие над путём "%s"OptimizedInformationContainer создал временный файл: %sOptimizedInformationContainer не смог создать временный файлOptimizedInformationContainer не смог разобрать XMLOptimizedInformationContainer не смог переименовать временный файлOptimizedInformationContainer не смог записать документ XML во временный файлОпция: %sОпции 'p' и 'n' не могут быть использованы одновременноЭтот модуль не имеет настраиваемых параметров.Изначальное описание задачи приведено ниже:Обнаружен неиспользуемый блок делегирования (%s) - очисткаНедостаточно памяти для создания случайного серийного номераДостигнут предел количества попытокЗакончились попытки присвоения нового ярлыка задачиЗакончились попытки присвоения нового ярлыка задачи в %sИсходящее сообщение не является сообщением SOAPВывод сертификата EECВывод доверенностиЗапрошена перезапись - назначение будет предварительно очищеноВладелец: %sИнструкция PASV не прошлаИнструкция PASV не прошла: %sPDP: %s (%s)PDP: %s (%s) не может быть подгруженPDP: %s не может быть подгруженPDP: отсутствует атрибут имениотсутствует расположение PDPDрасположение PDPD: %sСбой PEM_read_bio_X509_REQСбой PEM_write_bio_X509_REQотсутствует расположение PEPDрасположение PEPD: %sНе удалось задать способ проверки целостности PKCS12 и пароляНе задан пароль для нового сертификата PKCS12Не удалось создать контекст анализатора!Разбирается файл .local с целью извлечения специфических для задачи идентификаторов и информацииРазборка VOMS AC с целью получения информации о FQANОшибка разбора: Элементы (%s) задающие нижнюю границу различаютсяОшибка разбора: Элементы (%s) задающие верхнюю границу различаютсяОшибка разбора: Значение элемента %s не может быть разобрано как числоОшибка разбора: Значение нижней границы (%s) превышает значение верхней (%s)Сбой разбора атрибута "require" элемента "QueueName" из nordugrid-JSDL. Используется недопустимый оператор сравнения, допускаются только "ne" или "eq".Идёт передача информации о службе от сборщика к регистраторуТип шифрования пароля не поддерживается: %sПуть %s недействителен, создаются недостающие директорииТребуется путь к файлу состояния задач .localДолжен быть указан путь к сертификату доверенности пользователя.Имя контакта: %sВыполняется запрос /*Выполняется запрос /ComputingServiceВыполняется запрос /Services/ComputingServiceПроизводится сравнение с назначением (%s).Не производится ни сортировки, ни поиска соответствияУстойчивый сбойХроническая ошибка службыСбой проверки прав доступа, попытка загрузки без использования кэшаПроверка прав доступа не удалась: %sСбой проверки прав доступа к исходному URL: %sПроверка допуска пройденаПроверка прав доступа пройдена для URL %sОбработка оставшихся задачМесто: %sПлатформа: %sПожалуйста, выберите базу данных NSS для использования (1-%d): Пожалуйста, выберите базу данных NSS для использования (1-%d):Пожалуйста, выберите то, что будет использоваться (1-%d): Коммутатор (%s) - следующий %s(%s) не содержит назначенияДля следующего после %s компонента Plexer не задан атрибут IDОшибка подключаемого модуля %s: %sОшибка подключаемого модуля %s: %uПодключаемый модуль %s не смог запуститьсяПодключаемый модуль %s не смог запуститьсяРасширение %s для точки доступа %s недоступно (никогда не должно случаться).Расширение %s для точки доступа %s неисправно.Подключаемый модуль %s вывел на печать: %sПодключаемый модуль %s вывел на печать: %uПодключаемый модуль %s ответил слишком длинно: %sПодключаемый модуль %s ответил: %uВремя ожидания подключаемого модуля %s истекло после %u секундПустая команда в подключаемом модуле (присвоение имени пользователя)Нецифровое значение времени ожидания в подключаемом модуле (присвоение имени пользователя): %sНеприемлемое значение времени ожидания в подключаемом модуле (присвоение имени пользователя): %sСбой модуля: %sОтвет подключаемого модуля: %sНе удалось запустить службу принятия решений по политикамПустые правилаПолитика не в формате GACLСтрока политики: %sСубъект политики: %sPolicyId: %s Внутренний алгоритм политики:-- %sПочтовый индекс: %sЗадачи в очереди до СУПО: %iСбой предварительной очистки, всё же попытаемся скопировать Предварительная регистрация назначенияПредварительная регистрация назначения в каталогеПодготовка к размещению назначенияПодготовка к размещению файла-источникаПроблема при доступе к кэшированному файлу %s: %sПроблема при создании DTR (источник %s, назначение %s)Проблемы при подключении модуля %s, модуль пропускается.Проблема с каталогом, переходим к завершению размещения данныхПроблема с каталогом, кэш будет разблокированОбработка запроса %sВышло время ожидания потока обработки. DTR перезапускаетсяНеподдерживаемый тип обработки: %sВремя начала счёта (%s), указанное в описании задачи, приходится на период недоступности цели [ %s - %s ].Указан протокол %s, а должен быть httpsДоступны модули для следующих протоколов:Протокол не поддерживается - пожалуйста, убедитесь что установлены необходимые подключаемые модули gfal2 (пакеты gfal2-plugin-*)Указанное значение LRMSName не является допустимым URL: %sИнформация о доверенности:Срок действия доверенности вышелСрок действия доверенности вышел. Засылка задачи оборвана. Пожалуйста, запустите 'arcproxy'!Сбой создания доверенности: Срок действия сертификата истёкСбой создания доверенности: Срок действия сертификата ещё не началсяСбой создания доверенности: Сбой создания временного файла.Сбой создания доверенности: Сбой получения информации VOMS.Сбой создания доверенности: Не обнаружено действительных сертификатов.Сбой создания доверенности: Не обнаружено действительных закрытых ключей.Доверенность успешно созданаСрок действия доверенности истёкДлина ключа доверенности: %iРасположение доверенности: %sПодпись доверенности: %sДоверенность записана в %sИмя субъекта доверенности: %sТип доверенности: %sДоверенность с политикой ARCДоверенность со всеми унаследованными правамиДоверенность с незаполненной политикой - отказ по неизвестной политикеДоверенность с ограниченной политикой: %sДоверенность с неизвестной политикой - отказ по неизвестной политикеИмя субъекта доверенности: %sДоверенность/параметры доступа сохранены в %sДоверенность: %sЗапрос на размещение %s всё ещё в очереди, следует подождать %i секундPut: не удалось зарезервировать память для файла %s в задании %sPut: не удалось создать файл %s для задания %s - %sPut: не удалось установить позицию файла %s для задания %s на %Lu - %sPut: не удалось записать в файл %s для задания %s - %sPut: задача отсутствует: %s - %sPut: отсутствует информация о файле %s в задании: %sPut: неприемлемая информация о файле %s в задании: %sКонструктор надстройки Python отработал успешноДеструктор оболочки Python (%d)Вызван Python-конструктор планировщика (%d)Вызван Python-деструктор планировщика (%d)Интерпретатор Python заблокированИнтерпретатор Python разблокированВызван процесс Python wrapperИнициализация PythonBrokerУровень качества: %sЗапрос не является корректным XMLРезультат запроса не содержит элементов.Запрос возвратил неожиданный элемент: %s:%sОпрашивается сервер ACIX на %sОпрашивается точка доступа WSRF GLUE2 к информации о вычислительном ресурсеЗапрос к %sОпрашивается список из %d задач(и)Массовый опрос копий источникаОпрос состояния запроса на размещениеИспользование очереди '%s' запрещено для полного атрибута '%s' в соответствии с локальной политикой доступаИспользование очереди '%s' запрещено для указанных полных атрибутов в соответствии с локальной политикой доступаСведения об очереди:Сбой метода RSA_generate_key_ex Замена в RSL не является последовательностьюЗамена в RSL не является последовательностью из двух элементовИмя переменной для замены RSL не приводится к буквенной константеЗначение переменной для замены RSL не приводится к буквенной константеСлучайная сортировкаНеобработанная инструкция: %sВоссоздаётся клиент A-REXВоссоздаётся клиент EMI ESПрочитано %i байтНе удалось подтвердить наличие доступа на чтениеЗакрыт доступ на чтение для %s: %sЧитается архивный файл %sСбой команд чтения в проверке подлинностиПрочесть запрос из файлаЧтение запроса из строкиЧтение %u байтов из байта %lluЧтение выходных файлов в списке пользователя %sФактическая передача из %s в %sПричина: %sПланировщик вернул запрос DTR %s в состоянии %sЗапрос DTR %s получен в процессе закрытия генератора - не может быть обработанПринят неверный запрос DTRПолучено сообщение вне полосы (некритично, уровень ERROR лишь для отладки)Не получено запросов DTRПолучена повторная попытка запроса DTR %s, всё ещё в состоянии передачиПолучен транспортный URL: %sПересоединениеПеренаправление к %sПеренаправление к новому URL: %sОтказано в соединении: Превышен предел соединенийЗарегистрирована статическая информация: документ: %sРегистрация копии назначенияРегистрируется каталог: %s с подключаемым модулем: %sРегистрируется вспомогательный каталог: %sРегистрируется подключаемый модуль для состояния %s; опции: %s; команда: %sРегистрация на сервере EMIRegistry %s Регистрация на сервере ISIS %s Registrant не содержит действительного URL. Регистрация окончена.Конец регистрации: %sВыход из регистрации: %sСбой регистрации буфера Globus FTP - проверка прерываетсяРегистрация начинается: %sОжидается использование реляционного оператораСброс назначенияОтзыв запросов, сделанных при размещенииСброс запросовСброс источникаПерепривязка к локальной группе с идентификатором: %iПерепривязка к локальной группе с именем: %sПерепривязка к локальному идентификатору: %iПерепривязка к локальному пользователю: %sДомашний каталог перепривязанного пользователя: %sУдаление ISIS (%s) из спискаRemove: удаляется: %sУдаляется %sУдаляется точка входа %s: она содержит ненужный интерфейс (%s).Удаляется логический файл из метаданных %sУдаляются метаданные в %sУдаляется устаревший файл журнала задач %sОтмена предварительной регистрации назначения в каталогеПереименование: сбой в globus_ftp_client_moveПереименование: истекло время ожидания завершения операции%s переименовывается в %sВозобновление задач ARC1 не поддерживаетсяВозобновление задач BES не поддерживаетсяВозобновление задач CREAM не поддерживаетсяВозобновление задач UNICORE не поддерживаетсяПараметры доступа успешно обновленыОбновление параметров доступа для задачи: %sОбновляется доверенность для задачи %sЗапрос DTR %s в состоянии %s заменяется новым запросомЗаменяется существующий маркер для %s в кэше маркеров RucioЗамена старой информации в SRM на новую для URL %sОчередь '%s' заменяется на '%s'Копия %s не соответствует предпочитаемому шаблону или расположению URLКопия %s доступна с большой задержкой, но всё равно будет использоваться в связи с отсутствием других источниковКопия %s доступна с большой задержкой, пробуется другой источникУ копии %s долгая задержка, пробуем следующую копиюКопия %s локализованаКопия %s соответствует шаблону узла %sКопия %s соответствует шаблону %sКопирование файла %s из удалённого кэша не удалось, т.к. источник был удалён или изменёнКопирование файла %s в локальный кэш %sОшибка при выполнении запросаСбой запроса: %sСбой запроса: нет ответа от службы IdPСбой запроса: нет ответа от службы IdP при проверке подлинностиСбой запроса: нет ответа ос службы IdP при перенаправленииСбой запроса: нет ответа от службы SP при отсылке утверждения SAML на SPСбой запроса: нет ответа от службы SPServiceСбой запроса: неверный ответ от службы IdP при проверке подлинностиСбой запроса: неверный ответ от службы IdP при перенаправленииСбой запроса: неприемлемый ответ от службы SP при отсылке утверждения SAML на SPСбой запроса: неверный ответ от службы SPServiceЗапрос заданного свойства ресурсаПустой запросЗапрос не поддерживается - %sЗапрос прерван (ABORTED), но все файлы готовыЗапрос прерван (ABORTED), так как он был отменёнЗапрос прерван (ABORTED). Причина: %sЗапрос удался!!!Истекло время ожидания запросаЗапрос открытия файла в процессе записиПопытка передачи неизвестному владельцу - %uЗапрос: %sЗапрошено сегментов ядер: %iЗапрошенный промежуток времени: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d Запрашиваются элементы описания ресурса ComputingService с %sЗапрос рекурсивного просмотра и --nolist не имеет смыслаТребование "%s %s" НЕ удовлетворено.Требование "%s %s" удовлетворено "%s".Требование "%s %s" удовлетворено.Политика бронирования: %sОбнаружение копий назначенияСбой обнаружения каталога для назначенияСбой обнаружения каталога для источникаМассовое обнаружение копий источникаСхема описания ресурса содержит недействительный элемент: %s:%sОписание ресурса пустоОписание ресурса содержит URL интерфейса %s: %sОписание ресурса не содержит URL интерфейсовПроверка соответствия запроса описания ресурса прошлаПроверка соответствия описания ресурса схеме GLUE2 не прошла:Прошла проверка описания ресурсаСбой сборщика информации о ресурсеСбой сборщика информации о ресурсе с выходным статусом: %i %sЖурнал сборщика информации о ресурсе: %sСборщик информации о ресурсе: %sСистема управления: %sСбой опроса ресурсаОтклик из ISIS: %sОтвет не в формате SOAPОтзыв не является ожидаемым WS-RPОшибка отсылки откликаОтзыв: %sОтвет: %sПерезапуск после нарушения сегментацииУспешно завершена перезасылка задачи (%s), но очистить задачу не удалось - она будет присутствовать в списке задачУспешно завершена перезасылка задачи (%s), но прервать задачу не удалось - она будет присутствовать в списке задачРезультат (0=Допуск, 1=Отказ, 2=Неопределённый, 3=Неприменим): %dРезультаты сохранены в: %sПерезапуск задач UNICORE не поддерживаетсяПерезапуск задач BES не поддерживаетсяВозобновление задачи %s в состоянии %s (%s)Получение файла %sПолучение описания задач EMI ES не поддерживаетсяПопытка повторного соединения с EMIRegistry (%s) %d раз(а).Повторные попытки связи с сервером ISIS (%s) %d раз.Повторная попытка: %dПовторСообщение, полученное с сервера VOMS %s: %s Сообщение с сервера VOMS: %s Сервер myproxy возвратил следующее сообщение: %s %dСообщение с сервера VOMS: %s Возврат в генераторПовторное использование соединенияПравый операнд для сцепления RSL не приводится к буквенной константеRucio возвратил %sСрок действия маркера Rucio для %s истёк, или вскоре истечётПравило: %sПравило: возможности: %sПравило: группа: %sПравило: роль: %sПравило: ВО: %sВыполняется команда %sВыполняется команда %sЗадачи в счёте: %iВыполнение команды рассылки (%s)Текущий пользователь не имеет имениОбработчик токена SAML не настроенСбой процесса SAML2SSOОбмен данными SASLСбой запроса SOAP: ОшибкаСбой запроса SOAP: Нет ответаОшибка запроса SOAP к серверу AA %sОшибка SOAP службы доставки на %s: %sПолучена ошибка SOAPОшибка SOAP: %sНе удалась активизация SOAPНе удалось инициализировать SOAPОперация SOAP не поддерживается: %sЗапроса SOAP: %sОтвет SOAP: %sАктивизация SOAP с SAML2SSO не выполненаСостояние клиента SRM: %sSRM не возвратил никакой информацииSRM не возвратил никакой полезной информацииSRM не выдал пригодных для передачи URL: %sОшибка SSL: %d - %s:%s:%sОшибка SSL: %s, libs: %s, func: %s, причина: %sБлокировка SSL не инициализированаКонфигурация планировщика:Планировщик получил пустой запрос DTRПланировщик получил недопустимый запрос DTRПланировщик получил новый запрос DTR %s с источником: %s, назначением: %s, приписан к доле %s с приоритетом %dЗапуск планировщикаПланировщик остановлен, выходПравила планировки: %sОшибка проверки схемыСхема: %sНастройки SecHandler не заданыНастройки SecHandler отсутствуютНе задан атрибут name для SecHandlerSecHandler: %s(%s)Обработчик безопасности %s(%s) не может быть созданСбой в процессе обработки прав доступаНе прошла проверка безопасности для входящего сообщения TLS Не прошла проверка безопасности для исходящего сообщения TLSНе прошла проверка безопасности в SOAP MCC для входящего сообщенияНе прошла проверка безопасности в SOAP MCC для исходящего сообщенияНе прошла проверка безопасности в TLS MCC для входящего сообщенияОбработка/проверка параметров доступа не прошлаОбработка/проверка параметров доступа '%s' не прошлаОбработка/проверка параметров доступа '%s' завершилась успехомОбработка/проверка параметров доступа завершилась успехомВыбор не удался: %sСамостоятельно подписанный сертификатСбой отсылки отклика: %sSendCommand: Команда: %sОтправка команды: Сбой: %sSendCommand: Отзыв: %sОтправка команды: Время ожидания истекло после %d мсSendData: Не удалось извлечь адрес узла и/или номер порта из ответа на запрос EPSV/PASV: %sSendData: Канал передачи данных: %d.%d.%d.%d:%dSendData: Канал передачи данных: [%s]:%dОтправка данных: Сбой соединения и записи данных: %sПересылка данных: Время ожидания контакта и записи данных истекло после %d мсОтправка данных: Сбой записи данных: %sПересылка данных: Время ожидания записи данных истекло после %d мсОтправка команды: Сбой отправки команды DCAUSendData: Сбой отсылки команд EPSV и PASVОтправка данных: Сбой отправки команды STOR: %sОтправка команды: Сбой отправки команды TYPESendData: Не удалось применить локальный адрес к каналу передачи данных: %sОтправка данных: Сбой локального порта: %sОтправка данных: Сбой локального типа: %sSendData: Сбой разбора отзыва сервера EPSV: %sSendData: Сбой разбора порта отзыва сервера EPSV: %sSendData: Сбой разбора отзыва сервера PASV: %sПосланы RegEntries: %sОтправлена запись: %sОтправленные jobID: (всего %d задач(и))Версия сервера SRM: %sРеализация сервера: %sСервер остановленИгнорируется сервис %s типа %sСлужба %s(%s) не может быть созданаЦикл по сервисам: точка входа %sДля службы не задан атрибут IDДля службы не задан атрибут NameСведения о службе:Сервис успешно добавлен в InfoRegistrar, подключённый к информационной системе %s.Сервис в ожидании запросовСервис удалён из InfoRegistrar, подключённый к информационной системе %s.Подгружены сервисные компоненты цепи сообщенийСервис был уже занесён в InfoRegistrar, подключённый к информационной системе %s.Атрибут ServiceID получен из описания точки входаСохранён ServiceID: %sОтсутствует ServiceURLСостояние обслуживания: %sКаталог сессии %s принадлежит %i, но текущий пользователь - %iОтсутствует корневая директория сессииЗадан нестандартный тип эталонного теста: %sПредельное количество соединений выставляется на %i, соединения сверх предела будут переведены в состояние %sПосылается pbsz на %luПрисваивается состояние (%s) точки входа: %sЗадаётся состояние (STARTED) для точки входа: %sЗадаётся имя субъекта!Установка userRequestDescription в %sИнформация о совместном ресурсе:Следует подождать, когда назначение будет готовоСледует подождать, когда источник будет готовПоказать параметры справкиОстанов демонаЗакрывается служба размещения данныхЗакрываются потоки размещения данныхОтключение обработки задачПланировщик останавливаетсяSimpleMap: %sПропускается ComputingEndpoint '%s', потому что объявлен интерфейс '%s' вместо запрошенного '%s'.Пропускается недопустимая опция URL %sПропускается локальная копия %sПропускается скачанная задача (%s), так как она была запущена через другой интерфейс (%s).Сервис пропускается: отсутствует SchemaPath!Сервис пропускается: отсутствует ServicePath!Ошибка преобразования сокета: %sСокеты на выходе не совпадают %i != %iНекоторые адреса недоступны. Прослушивается %u из %u.Некоторые загрузки не удалисьНекоторые загрузки не удались (возможна повторная попытка)Неизвестная ошибка при инициализации агрегированной записи (%s).Неизвестная ошибка при сохранении агрегированной записи (%s).Некоторые загрузки не удалисьНекоторые отгрузки не удалисьНекоторые выгрузки не удались (для некоторых возможна повторная попытка)Сортировка в соответствии с наличием свободных мест в очередиСортировка в соответствии с доступностью входных данных в пункте назначенияСортировка в соответствии с указанным эталонным тестом (по умолчанию - "specint2000")Копии сортируются в соответствии с расположением URLКопии сортируются в соответствии с предпочитаемым шаблоном %sОтсутствует URL источникаНеподдерживаемый URL источника: %sНедействительный URL источника: %sИсточник и/или назначение является каталогом, будет произведён поиск копийПроверка источника запрошена, но не прошла: %sНедопустимый URL источника.Источник поставлен в соответствие %sИсточник неготов, следующая попытка через %u секИсточник идентичен назначениюДата изменения источника: %sИсточник или назначение требуют размещения с ленточного накопителяИсточник: %sУказанный адрес недействителенУказанные модули не найдены в кэшеУказанный файл с трафаретом (%s) не существует.Указанный пользователь не может быть обработанЗадачи, выполняющие размещение данных: %iИстекло время ожидания запроса на размещение, запрос будет отозванРазмещается: %sЗапуск с высоким приоритетомНачать тестЖдём 10 секунд...Начало чтенияStartReading: Файл не был подготовлен должным образомНачало записиStartWriting: Файл не был подготовлен должным образомЗапущена удалённая служба доставки на %sЗапускаются потоки DTRЗапускается контролируемый процессЗапускаются потоки размещения данныхЗапускается вспомогательный процесс: %sЗапускается поток обработки задачиЗапуск мониторинга задачЗапускается новый запрос DTR для %sНачинается опрос отложенной точки входа (%s) - другие точки входа этого сервиса не опрашиваются, либо были уже успешно опрошены.Запускается подпоток для опроса точки доступа по %sЗапускается поток для опроса точки доступа %sStat: получено время изменения %sПроверка: получен размер %lluОтсутствует наименование состояния модуля authPluginОтсутствует наименование состояния подключаемого модуляСтатус точки доступа сервиса "%s" в ARCHERY указан как неактивный. Пропускается.Опрошено состояние %d задач, %d задач отозвалисьStopReading закончил ожидание transfer_condition.StopReading начинает ожидание transfer_condition.StopWriting закончил ожидание transfer_condition.StopWriting начинает ожидание transfer_condition.StopWriting: Вычислена контрольная сумма %sStopWriting: прерывание связиStopWriting: поиск контрольной суммы %sОстанавливается вспомогательный процесс %sОстанавливается поток обработки задачиОстанавливается серверЗапись настроек во временный файл %sЗаписывается файл %sСохраняется порт %i для %sСохранение временной доверенности в %sСтрока успешно разобрана как %sСтрока успешно разобрана как %s.Атрибут субъекта %s не содержит известного NID, пропускаетсяСубъект не начинается с '/'Имя субъекта: %sОтсутствует субъект запроса Субъект для сверки: %sСубъект: %sТочка входа для засылки задачСбой засылки задачиСбой при запуске, сервис возвратил ошибку: %sСбой запроса отправки задачиЗапрос о засылке задания удалсяСбой при запуске: %sЗасылка: Сбой отправки команды CWDЗасылка: Сбой отправки команды CWD newЗасылка: Сбой отправки описания задачиЗасылка: Сбой связиЗасылка: Сбой выгрузки локальных входных файловЗасылка: сервис не предоставляет подходящего информационного интерфейса - нужен org.nordugrid.ldapngПодключаемый модуль SubmitterPlugin %s не может быть созданПодключаемый модуль SubmitterPlugin "%s" не обнаружен.Запускается тестовая задача %d:Успешно добавлен OID доверенности RFC, возвращена метка %dУспешно добавлен OID последовательности VOMS AC, возвращена метка %d Успешно добавлен OID anyLanguage, возвращена метка %dУспешно добавлен OID inheritAll, возвращена метка %dУспешная проверка подлинности токена SAMLУспешная проверка подлинности UsernameTokenУспешное подтверждение подлинности токена X509Удалось поменять пароль на сервере MyProxyУспешная смена доверительных отношений на: %sУспешное преобразование PrivateKeyInfo в EVP_PKEYУдалось уничтожить доверенность на сервере MyProxyУдалось извлечь сертификат в формате PKCS12Успешное создание пары открытого/закрытого ключейУдалось получить доверенность в %s с сервера MyProxy %sПараметры доступа полученыУдалось получить информацию с сервера MyProxyУспешное импортирование сертификата Закрытый ключ успешно полученNSS успешно инициализированУспешно подгружен PrivateKeyInfoУспешный вывод сертификата в %sУспешный вывод запроса сертификата в %sУдалось делегировать доверенность серверу MyProxyИнформация о DelegationService: %s и DelegationID: %s успешно отправлена партнёрскому сервисуДоверенность успешно подписанаПодпись успешно подтвержденаПодпись успешно подтвержденаПодписанный сертификат успешно проверенУспешное завершение операции %s по отношению к сервису EMIRegistry (%s)Успешная регистрация в сервис ISIS (%s)Успешное удаление учётной записи с сервера EMIRegistry (%s)Успешное удаление учётной записи с сервера ISIS (%s)Указанное имя пользователя %s не совпадает с сопоставленным именем пользователя %sПоддерживаемые профили:Поддерживаемые ограничения: validityStart=время (например, 2008-05-29T10:20:30Z; если не указано, то начинается немедленно) validityEnd=время validityPeriod=время (например, 43200, или 12h, или 12H; если не указаны ни validityPeriod, ни validityEnd, то срок действия по умолчанию составляет 12 часов для локальной доверенности, и 168 часов для делегированной доверенности на сервере MyProxy) vomsACvalidityPeriod=время (например, 43200, или 12h, или 12H; если не указано, то используется наименьшее между 12 часами и значением validityPeriod) myproxyvalidityPeriod=время (срок годности доверенности, делегированной через сервер MyProxy например, 43200, или 12h, или 12H; если не указано, то используется наименьшее между 12 часами и значением validityPeriod - сроком годности доверенности, делегированной через сервер MyProxy) proxyPolicy=содержимое политики proxyPolicyFile=файл политики keybits=число - длина генерируемого ключа. По умолчанию - 1024 бит. Специальное значение 'inherit' означает использование длины ключа подписывающего сертификата. signingAlgorithm=название - алгоритм, используемый для подписания открытого ключа или доверенности. По умолчанию - sha1. Возможные значения: sha1, sha2 (сокращение от sha256), sha224, sha256, sha384, sha512 и inherit (использовать алгоритм подписывающего сертификата). По умолчанию используется inherit. Старые системы поддерживают лишь sha1. Поддерживаемые поля информации: subject - имя субъекта доверенности. identity - идентифицируемое имя субъекта доверенности. issuer - имя субъекта, выдавшего доверенность. ca - имя субъекта агентства, выдавшего исходный сертификат path - локальный путь к файлу, содержащему доверенность. type - тип доверенности. validityStart - время начала действия доверенности. validityEnd - время окончания действия доверенности. validityPeriod - продолжительность годности доверенности в секундах. validityLeft - оставшаяся продолжительность годности доверенности в секундах. vomsVO - имя виртуальной организации, указанное в атрибуте VOMS. vomsSubject - субъект сертификата, которому был присвоен атрибут VOMS. vomsIssuer - субъект службы, выдавшей сертификат VOMS. vomsACvalidityStart - время начала действия атрибута VOMS. vomsACvalidityEnd - время окончания действия атрибута VOMS. vomsACvalidityPeriod - продолжительность годности атрибута VOMS в секундах. vomsACvalidityLeft - оставшаяся продолжительность годности атрибута VOMS в секундах. proxyPolicy - содержимое политики keybits - длина ключа доверенности в битах. signingAlgorithm - алгоритм, используемый при подписи сертификата. Значения выводятся в порядке запроса, каждое с новой строки. Если полю соответствуют несколько значений, они выводятся в строку и разделяются |. Поддерживаемые назначения паролей: key - для чтения закрытых ключей myproxy - для доступа к сертификатам на сервере MyProxy myproxynew - для создания сертификатов на сервере MyProxy all - с любой целью. Поддерживаемые источники паролей: quoted string ("password") - явно указанный пароль int - интерактивный запрос на ввод пароля с терминала stdin - чтение пароля со стандартного ввода по переводу строки file:filename - чтение пароля из файла filename stream:# - чтение пароля из входного потока номер #. На текущий момент поддерживается только 0 (стандартный ввод). Поддерживаемые ограничения: validityStart=время (например, 2008-05-29T10:20:30Z; если не указано, то начинается немедленно) validityEnd=время validityPeriod=время (например, 43200, или 12h, или 12H; если не указаны ни validityPeriod, ни validityEnd, то срок действия по умолчанию составляет 12 часов для локальной доверенности, и 168 часов для делегированной доверенности на сервере MyProxy) vomsACvalidityPeriod=время (например, 43200, или 12h, или 12H; если не указано, то используется наименьшее между 12 часами и значением validityPeriod) myproxyvalidityPeriod=время (срок годности доверенности, делегированной через сервер MyProxy например, 43200, или 12h, или 12H; если не указано, то используется наименьшее между 12 часами и значением validityPeriod - сроком годности доверенности, делегированной через сервер MyProxy) proxyPolicy=содержимое политики proxyPolicyFile=файл политикиПоддержка предварительного бронированияПоддерживает групповую засылкуПоддержка упрежденияПриостановленные задачи: %iПриостанавливается опрос точки входа (%s), т.к. сервис по этому адресу уже опрашивается или опрошен.Синхронизация локального списка активных задач с информацией в системе Грид может привести к некоторым несоответствиям: только что запущенные задачи могут быть ещё не зарегистрированы в системе, тогда как только что удалённые задачи могут всё ещё присутствовать.Синтаксическая ошибка в значении атрибута 'notify' ('%s'), он содержит неизвестные метки статусаСинтаксическая ошибка в значении атрибута 'notify' ('%s'), он должен содержать адрес электронной почтыСинтаксическая ошибка в значении атрибута 'notify' ('%s'), он должен содержать лишь адреса электронной почты после меток статусаФайл системных настроек (%s or %s) не существует.Файл системных настроек (%s) содержит ошибки.Файл системных настроек (%s) не существует.Вызван процесс TCP клиентаУдалён исполнитель TCPTURL %s не может быть обработанНазначение %s не соответствует запрошенному интерфейсу.Назначение %s отброшено алгоритмом FastestQueueBroker, т.к. не сообщает число свободных ячеекНазначение %s отброшено алгоритмом FastestQueueBroker, т.к. не сообщает общее число ячеекНазначение %s отброшено алгоритмом FastestQueueBroker, т.к. не сообщает число ожидающих задачТехнология: %sПреходящая ошибка службыTerminateActivities: задача %s - %sTerminateActivities: запрошена задача, несовместимая с AREXTerminateActivities: запрос = %sTerminateActivities: ответ = %sОбрыв засылки теста, т.к. ни один из ресурсов не предоставил информациюНе удалось заслать тест, возможные назначения отсутствуютТест запущен с ярлыком: %sТест был создан с идентификатором %d, но при обработке возникла ошибка.Атрибут "FreeSlotsWithDuration" неверно сформатирован; игнорируется.Неверно отформатирован атрибут "FreeSlotsWithDuration", публикуемый "%s", - игнорируется.Атрибут 'epsilon' элемента 'Exact' не поддерживается.Атрибут 'exclusiveBound' элемента '%s' не поддерживается.Опции 'sort' и 'rsort' не могут быть указаны одновременноBIO для выхода: NULLКаталог сертификатов агентств CA необходим для связи с серверами VOMS и MyProxyАгентство (%s), выдавшее сертификат (%s), не относится к доверяемым целью (%s).Служба ComputingEndpoint не сообщает о своём уровне качестваСлужба ComputingEndpoint не сообщает о своём состоянии обслуживанияУ ComputingEndpoint отсутствует URL.Служба ComputingService не сообщает о своём интерфейсеСлужба ComputingService не сообщает о своём уровне качестваНевозможно распознать заданный Вами период MyProxy: %s.База данных NSS в профиле Firefox не обнаруженаОтклик досюда не дошёлСлужба не предоставляет информации о состоянии здоровьяСлужба не сообщает о своём интерфейсеСлужба не сообщает о своём уровне качестваСлужба не сообщает о своём состоянии обслуживанияСлужба не сообщает о своём типеStatusCode: SuccessНевозможно распознать заданный Вами период VOMS AC: %s.Невозможно связаться с сервером VOMS с информацией: %s пожалуйста, проверьте, доступен ли этот серверНевозможно связаться с сервером VOMS с информацией: %s Пожалуйста, проверьте, доступен ли этот сервер.Невозможно связаться с сервером VOMS с информацией: %s" пожалуйста, проверьте, доступен ли этот серверРазделу [vo] с названием '%s' не поставлен в соответствие файл, и он не может быть использован для авторизацииЭта команда предназначена для вывода на экран сообщений стандартного выхода, стандартной ошибки или ошибок системы при исполнении задачиЭта команда используется для удаления результатов работы задач с удалённого компьютераКоманда arccp копирует файлы на, с и между запоминающими устройствами Грид.Команда arcecho является клиентским приложением службы ARC echo.Эта команда используется для загрузки результатов работы задачКоманда arcinfo используется для проверки состояния вычислительных ресурсов на ГридеЭта команда используется для прерывания исполняющихся задачКоманда arcls используется для просмотра информации о файлах, хранящихся на накопительных устройствах Грид, а также в занесённых в каталоги данных.Команда arcmkdir создаёт директории на грид-хранилищах и в каталогах данныхКоманда arcproxy создаёт доверенность из пары закрытый/открытый ключ для получения доступа к гриду.Команда arcrename переименовывает файлы на запоминающих устройствах.Команда arcrm удаляет файлы с запоминающих устройств.Команда arcstat используется для вывода информации о состоянии задач, отправленных на Грид Эта команда используется для запуска задач на вычислительные ресурсы ГридКоманда arcsync синхронизирует Ваш локальный список задач с информацией на заданных кластерах или каталогах ресурсов Команда arctest используется для проверки кластеров как вычислительных ресурсов.Команда arcwsrf используется для получения значений WS-ResourceProperties различных служб.Информация об атрибутах с сервера VOMS: %s содержит:Доступный список отзыва сертификатов (CRL) просроченДоступный список отзыва сертификатов (CRL) пока недействителенАтрибут brokerarguments может быть использован только в связи с атрибутом brokernameСертификат с субъектом %s недействителенАтрибут XRSL cluster пока что не поддерживается.Параметры доступа для подписи не содержат запросаПараметры доступа для подписи имеют значение NULLЗакрытый ключ параметров доступа уже инициализированФайл настроек по умолчанию (%s) не является обычным файломЗначение элемента defaultTTL не является допустимым числомЗначение элемента defaultTTR не является допустимым числомДелегированные параметры доступа полученные от службы делегирования записаны в каталоге: %sДелегированные параметры доступа извлечены из каталога: %sЗначение элемента delegationDB неверноПериод недоступности цели (%s) не объявлен. Цель сохраняется.Невозможно распознать заданное Вами время окончания: %sЗаданное Вами время окончания: %s предшествует времени начала: %sТочка входа (%s) не поддерживается этим подключаемым модулем (%s)Конечная точка сервиса делегирования должна быть настроенаФайл %s в настоящий момент заблокирован действительным блокомПервый поддерживаемый интерфейс подключаемого модуля %s оказался пустой строкой, модуль пропускается.Значение элемента fixDirectories неверноСледующие %d не были перезапущеныСледующие %d не были засланыАтрибут JDL inputsandboxbaseuri задаёт недопустимый URL.Интерфейс этой точки доступа (%s) не задан, пробуются все возможные подключаемые модулиОписание задачи может быть также задано файлом или строкой в формате JDL, POSIX JSDL, JSDL, или XRSL.Не удалось определить состояние задачиНедопустимое значение ограничения keybits: %s.Значение элемента maxReruns не является допустимым числомИмя закрытого ключа для уничтожения пустоЗначение элемента noRootPower не является допустимым числомСтарые сертификаты доверенности GSI более не поддерживаются. Пожалуйста, не используйте опцию -O/--old.Во входящем сообщении отсутствует полезная нагрузкаВ исходящем сообщении отсутствует полезная нагрузкаНевозможно распознать заданный Вами интервал: %s.Подключаемый модуль %s не поддерживает никаких интерфейсов, пропускается.Для simplelist.pdp не задан файл настройки политик; пожалуйста, проверьте атрибут location в настройках службы узла PDP simplelist.Язык политик %s не поддерживаетсяЗакрытый ключ для подписи не инициализированПроцесс, которому принадлежит блок в %s, больше не существует, блок будет удалёнЗапрос прошёл сверку с политикойОтвет на запрос об удалении задачи не является сообщением SOAPОтвет на запрос о прерывании задачи не является сообщением SOAPОтвет на запрос о состоянии сервера не является сообщением SOAPОтвет на запрос о создании задачи не является сообщением SOAPОтвет на запрос о состоянии не является сообщением SOAPАргументом службы должен быть URL эхо-сервера ARC. Аргументом сообщения должно быть сообщение, которое этот сервер должен возвратить.Не удалось определить состояние службыСервис не будет зарегистрированНедопустимый алгоритм подписи %s: запросы сертификата должны подписываться SHA1 или SHA2Указанный атрибут Globus (%s) не поддерживается. %s игнорируется.Невозможно распознать заданное Вами время начала: %sОпции start, end и period не могут быть заданы одновременноСубъект не соответствует имени выдавшего агентства и атрибуту доверенности CNСрок действия сертификата атрибута VOMS (AC) сокращён с %s до %s, в связи с ограничением со стороны сервера VOMS. Значение атрибута XRSL acl не является действительным кодом XML.Значение атрибута 'ftpthreads' должно быть целым числом от 1 до 10Значение атрибута keysize (%s) в файле настроек разобрано неполностьюЗначение атрибута timeout (%s) в файле настроек разобрано неполностьюОбнаружено %d основных директорий NSS, содержащих базы данных сертификатов, ключей и модулейОбнаружено %d элементов запросаОтветное сообщение содержит %d сертификатов%d инструкций направлено на один и тот же сервер VOMS, %sОбнаружены %d запроса, удовлетворяющих хотя бы одной политикеВ Вашем файле vomses указаны %d серверов с одинаковым именем %s, но ни один из них не доступен или не отзывается правильно.В Вашем файле vomses указаны %d серверов с одинаковым именем %s, но не все доступны или правильно отзываются. В любом случае, расширение VOMS AC будет создано.В базе данных NSS обнаружено %d сертификата пользователяОбнаружены %d субъекта, удовлетворяющих хотя бы одной политикеОтвет не содержит делегированный токен X509Ответ не содержит делегированный токен в нужном форматеВ ответе отсутствует запрос FormatОтвет не содержит Id или значение запроса X509Ответ не содержит Id или значение маркёра X509Не настроена цепочка соединения SOAPНет ответа SOAPВ ответе отсутствует UpdateCredentialsResponseВ ответе отсутствует запрос X509Не найден сертификат с именем %s, сертификат мог быть удалён при создании CSRНе настроена цепочка связиОтсутствует конструкторВ объекте закрытого ключа издателя отсутствует профильОтсутствует идентификатор СУПО. Сообщение не будет записано в журнал BLAH.Пользователь не приписан ни к одному локальному имениЛокальное имя приписки пользователя не указаноДоступных севрисов ISIS больше нет. Список ISIS-ов уже опустел.Нет ответаОтсутствие услуги %s в пути поиска классов JavaОбнаружена проблема при обслуживании назначения после пересылки после сбоя: %sОбнаружена проблема при обслуживании источника после пересылки: %sНет ответа HTTPНет ответа SOAPСервер PDP не возвратил ответ SOAP: %sНе поступил ответ на запрос об удалении результатов работы задачиНе поступил ответ на запрос об обрыве исполнения задачиНе поступил ответ на запрос о состоянии службыНе поступил ответ на запрос о запуске задачиНе поступил ответ на запрос о состоянииНе поступил ответ на запрос об отправке задачиДля этих точек входа сторонняя пересылка не поддерживаетсяЗапрошена пересылка файла третьим лицом, но необходимый подключаемый модуль не был подгружен. Устанавливали ли Вы модуль GFAL? Если нет, пожалуйста, установите пакеты 'nordugrid-arc-plugins-gfal' и 'gfal2-all'. Эти названия могут зависеть от типа вашего дистрибутива.Это сообщение INFO тоже должно быть видноЭто сообщение INFO должно быть видноЭтого сообщения VERBOSE не должно быть видноЭто сообщение VERBOSE теперь должно быть видноЭта копия уже удаленаЭта задача была запущена лишь недавно, и может быть ещё не зарегистрирована в системеЭто сообщение выводится в изначальное назначениеЭто сообщение направляется в каждый потокУ этого процесса уже существует блокировка в %sПохоже на временный сбой - пожалуйста, попытайтесь снова попозжеЭта программулечка может быть использована для проверки способностей преобразования JobDescriptionЭтому пользователю отказано в праве запуска новых задач.Поток завершился ошибкой Glib: %sПоток завершился прерыванием Glib: %sПоток завершился общим прерыванием: %sСертификат атрибута действителен на: %sСертификат атрибута действителен на: Срок действия сертификата закончилсяСертификат атрибута действителен на: Сертификат пока недействителенДоверенность действительна на: %sДоверенность действительна на: Срок действия доверенности вышелДоверенность действительна на: Доверенность пока недействительнаВремя, проведённое в ожидании диска: %.3f мсВремя, проведённое в ожидании связи: %.3f мсИстекло время ожидания блокировки кэшаИстекло время ожидания соединения с %s(%s):%i - %i сНедопустимое значение тайм-аута для модуля localCredОтсутствует тайм-аут для модуля localCredВремя ожидания истекло, файл блокировки %s будет удалёнИстекло время ожидания ответного сообщения Globus - утечка соединенияИстекло время ожидания команды mkdirДля восполнения недостающих задач, запустите arcsyncСлишком много соединений - новое отклоненоСлишком много соединений - ожидание закрытия старыхСлишком много файлов на один запрос - пожалуйста, попробуйте снова, с меньшим количеством файловСлишком много параметровВсего задач: %iОбщее количество логических процессоров: %iКоличество всех обнаруженных задач:Количество обнаруженных новых задач:Общее количество физических процессоров: %iОбщее количество ядер: %iПередача НЕ УДАЛАСЬ: %sПередача файлов успешно отмененаПередача данных завершенаПередача не удаласьСбой передачи: %sПередача завершена: %llu байтов передано %sПередача из %s в %sПересылка оборвана после %i секунд бездействияПередача удаласьИстечение времени ожидания соединенияДоверенные центры сертификации:Попытка получить атрибут с сервера VOMS с порядком: %sПробуются все доступные интерфейсыПробуем следующую копиюПопытка проверки сертификата X509 с помощью check_cert_typeПопытка соединения с %s(%s):%dПопытка прослушать %s:%s(%s)Попытка прослушать порт TCP %s(%s)Попытка миграции на %s: Миграция на интерфейс %s не поддерживается.Попытка получения описания задачи %s с вычислительного ресурсаПопытка активации временно исключённой точки входа (%s)Попытка засылки задачи напрямую к точке входа (%s)Попытка засылки на точку входа (%s) используя интерфейс (%s) с подключаемым модулем (%s).Два одинаковых назначения для выдачи: %sДва входных файла с идентичными именами '%s'.Отсутствует тип в СУПОТип dir, вызывается srmRmDirТип file, вызывается srmRmТип: %sТипы сервисов выполнения, на которые %s может засылать задачи:Типы локальных служб информации, с которых %s может получить информацию:Типы локальных служб информации, с которых %s может получить информацию о задачах:Типы сервисов регистрации, в которых %s может получить информацию:Типы служб,на которых %s может управлять задачами:Вызов метода UPDATE агрегированной записи.Выведен набор записей UR: %sURLURL %s не соответствует информации, хранящейся в SRM info; проверяется новая информацияURL [запрос]URL сопоставлен локальному файлу - проверка прав доступа к исходному URLURL поставлен в соответствие к: %sНедействительный адрес: %sОпция URL %s не задана в формате имя=значениеПротокол URL не является urllist: %sURL: %sНе допущен через xacml.pdpНе удалось адаптировать описание задачи ни к одному ресурсу, т.к. не получено никакой информации.Невозможно поставить в соответствие вторичную базу данных первичной (%s)Не удалось скопировать %sНе удалось скопировать шаблон настроек из существующих настроек (%s)Не удалось создать каталог %s.Не удалось создать базу данных для вторичных ключей точек входа (%s)Не удалось создать базу данных для вторичных ключей имён (%s)Не удалось создать базу данных для вторичных ключей информации о службах (%s)Не удалось создать клиент SOAP использующийся AREXClient.Не удалось создать клиент SOAP использующийся CREAMClient.Не удалось создать клиент SOAP использующийся EMIESClient.Невозможно создать адаптер для указанного назначения отчётностиНе удалось создать базу данных (%s)Не удалось создать окружение для базы данных (%s)Не удалось создать каталог %sНе удалось создать каталог для сохранения результатов (%s) - %sНе удалось создать индекс для таблицы задач в базе данных (%s)Не удалось создать базу данных задач (%s)Не удалось создать таблицу задач в базе данных (%s)Не удалось создать временный каталогНевозможно определить формат учётной записи о задаче.Не удалось определить, установлены ли сертификаты центра сертификацииНе удалось получить информацию о сертификатеНевозможно распознать ошибку (%d)Невозможно загрузить задачу (%s), не был задан модуль JobControllerPlugin для работы с задачей.Не удалось определить размер файла %sНевозможно обработать задачу (%s), не указан интерфейс.Невозможно обработать задачу (%s), для указанного интерфейса (%s) нету подключаемых модулейНевозможно инициализировать соединение с назначением: %sНевозможно инициализировать соединение с источником: %sНе удалось просмотреть содержимое %sНевозможно перечислить файлы на %sНевозможно загрузить модуль BrokerPlugin (%s)Невозможно подгрузить брокер %sНевозможно подгрузить модуль (%s) для интерфейса (%s) при попытке засылки описания задачи.Невозможно погрузить подключаемый модуль для запуска задач через интерфейс %sНе удалось обнаружить подключаемый модуль "%s". Пожалуйста, проконсультируйтесь с инструкцией по установке и проверьте, установлен ли пакет, содержащий модуль "%s"Не удалось обнаружить подключаемый модуль "%s". Пожалуйста, проконсультируйтесь с инструкцией по установке и проверьте, установлен ли пакет, содержащий модуль "%s"Ресурс не соответствует заданию, помечается как несоответствующий. Планировщик недействителен.Невозможно мигрировать задачу (%s), описание задачи не может быть извлечено с удалённого источникаНе удалось перенаправить задачу (%s), т.к. невозможно разобрать полученное описание задачиНевозможно мигрировать задачу. Описание задачи в формате %s недействительно: %sНевозможно разобрать %s.Получено значение %s от службы исполнения (%s).Невозможно сопоставить запрашиваемый уровень отладки (%s) ни с одним из допустимыхНе удалось обработать.Невозможно адаптировать описание задачи в соответствии с требованиями назначения (%s).Невозможно адаптировать описание задачи в соответствии с требованиями назначения.Невозможно опросить информацию о задаче (%s), задан недопустимый URL (%s)Невозможно прочитать информацию о задаче из файла (%s)Невозможно зарегистрировать засылку задачи. Невозможно получить объект JobDescription из планировщика, планировщик недействителен.Не удалось удалить файл %sНе удалось переименовать %sНевозможно перезапустить задачу (%s), нет подходящих целейНевозможно перезапустить задачу (%s), сбой извлечения информации о цели %sНе удалось перезаслать задачу (%s), т.к. невозможно разобрать полученное описание задачиНевозможно извлечь информацию о состоянии задачи.Невозможно получить список загружаемых файлов для задачи %sОшибка при получении информации о состоянии задачи (%s)Невозможно выбрать подпрограммное обеспечение.Невозможно выбрать операционную систему.Невозможно выбрать среду выполненияНевозможно установить повторяющиеся метки для вторичной базы данных ключей (%s)Невозможно упорядочить объекты ExecutionTarget - недопустимый объект Broker.Невозможно упорядочить добавленные задачи. Подключаемый модуль BrokerPlugin не был подгружен.Невозможно заслать задачу. Описание задачи не является допустимым файлом XMLНевозможно заслать задачу. Описание задачи в формате %s недействительноНевозможно заслать задачу. Описание задачи в формате %s недействительно: %sНе удалось укоротить базу данных задач (%s)Невозможно записать пару ключ/значение в базу данных задач (%s): Ключ "%s"Невозможно внести запись в базу данных задач (%s): Id "%s"Сбой записи в файл p12Доступ закрытНе допущен удалённой службой PDPНеверная управляющая последовательность: %%%sНеопределённая ошибка при обработкеНеожиданный тип RSLНепредусмотренные аргументыЗаданы непредусмотренные аргументыНеожиданное немедленное завершение: %sОтзыв Rucio содержит недопустимое имя: %sСервер возвратил неожиданный путь %sUniq добавляет сервис, обнаруженный через %sUniq игнорирует сервис, обнаруженный через %sUniq заменяет сервис, обнаруженный через %s, на сервис, обнаруженный через %sНеизвестное правило доступа %s для задачи %sНеизвестный контекст LDAP %s - используется baseНеизвестный атрибут XRSL: %s - игнорируется.Игнорируется неизвестный атрибут %s в разделе common файла настроек (%s)Неизвестная ошибка команды допуска %sНеизвестный канал %s для протокола stdioНеизвестный способ преобразования %s, используется значение по умолчаниюНеизвестный тип параметра доступа %s для шаблона URL %sНеизвестный элемент в политике подписи GlobusНеизвестная запись в EGIIS (%s)Неизвестная ошибкаНеизвестный ключ или тип хешированияНеизвестный ключ или тип хеширования издателя сертификатаНеизвестный уровень журналирования %sНеизвестный режим открытия %iНеизвестный режим открытия %sНеизвестный оператор '%s' в атрибуте require элемента VersionНеизвестный параметрНеизвестные права в политике подписи Globus - %sИгнорируется неизвестный раздел %sНеизвестная опция передачи файлов: %sУдаляется запись о %sСбой удаления регистрации из каталогаНезарегистрированная служба не может быть удалена.Заданный URL не поддерживаетсяЗаданный URL не поддерживается: %sНеподдерживаемая команда: %sНеподдерживаемая инструкция настроек: %sНеподдерживаемый URL назначения: %sТип списка задач '%s' не поддерживается, будет использоваться 'BDB'. Поддерживаются следующие типы: BDB, SQLITE, XML.Неподдерживаемая опция: %cНеподдерживаемый протокол в URL %sЗапрошен неподдерживаемый язык политик сертификата доверенности - %sЗапрошена неподдерживаемая версия сертификата доверенности - %sНеподдерживаемый URL источника: %sИнтерфейс засылки %s не поддерживается. Похоже, arc-blahp-logger пора обновить :-) Пожалуйста, опишите проблему в bugzill-е.Цепочка содержит недоверяемый самоподписанный сертификат с субъектом %s и отпечатком: %luСбой в UpdateCredentialsUpdateCredentials: EPR не содержит JobIDUpdateCredentials: невозможно обновить параметры доступаUpdateCredentials: отсутствует ссылкаUpdateCredentials: задача не обнаружена: %sUpdateCredentials: запрос = %sUpdateCredentials: отзыв = %sUpdateCredentials: недопустимое количество ссылокUpdateCredentials: недопустимое число элементов внутри ReferenceИстекло время ожидания отгружаемых файловОтгружен %sЗакачан файл %sОтгрузчик запущенИспользование:Использование: %s -I <задача> -U <пользователь> -P <доверенность> -L <файл состояния задачи> [-c <префикс ceID>] [-p <префикс журнала> ] [-d <отладка>]Использование: %s [-N] -P <доверенность> -L <файл состояния задачи> [-c <файл настроек>] [-d <отладка>]Использование: copy источник назначениеОтклик регистрации записи UsageRecords: %sИспользуйте опцию --help для подробного описанияДля получения справки используйте "-?".Используется файл настроек %sИспользованные ядра: %dФайл настроек пользователя (%s) содержит ошибки.Файл настроек пользователя (%s) не существует или не может быть подгружен.Отсутствует пользователь для вспомогательной программыДерево виртуального каталога пользователя пусто. Либо у пользователя нет допущенных расширений, либо расширения вообще не настроены.Пользователь не ассоциирован с подходящей настройкойПользователь отгрузил файл %sОшибка интерфейса пользователяОтсутствует имя пользователя в прямом присвоении имени: %s.Пустая команда в присвоении имени пользователяПустая VO в присвоении имени пользователя: %sПустая команда в присвоении имени пользователя: %sПустая группа в присвоении имени пользователя: %sПустое имя в присвоении имени пользователя: %sДолжно быть указано имя пользователя.Невозможно открыть пул пользователей в %s.Пул пользователей в %s не смог установить соответствие для пользователяОтсутствует субъект вызова пула пользователейДоверенность пользователя недействительнаФайл доверенности пользователя необходим, но не указанОтсутствует субъект пользователя для соответствия субъектаСубъект сертификата: %sКласс UserConfig не является объектомUserConfiguration сохранены в файле (%s)Обработчик токена Username не настроенНе указано имя пользователя в модуле helperUtilityИспользуется файл настроек A-REX %sИспользуется каталог доверенных сертификатов CA: %sИспользуется учётная запись Rucio %sИспользуется кэш %sИспользуется кэшированная местная учётная запись '%s'Используется сертификат %sИспользуется файл сертификата: %sИспользуется алгоритм шифрования %sИспользуется контрольный каталог %sИспользуется незащищённая передача данныхИспользуется ключ %sИспользуется файл личного ключа: %sИспользуется локальная учётная запись '%s'Используется следующая копия (%s)Используется прокси %sИспользуется файл доверенности: %sИспользуется удалённо кэшированный файл %s для URL %sИспользуется защищённая передача данныхИспользуется каталог сессии %sИспользуется каталог сессии %sИспользуется маркёр пространства памяти %sИспользуется описание маркёра пространства памяти %sВО %s не совпадает с %sФильтр ВО (%s) не настроен для этого сервера SGAS (%s).Фильтр ВО для узла: %sАтрибут VOMS %s не совпадает с %sАтрибут VOMS %s совпадает с %sАтрибут VOMS игнорируется из-за ошибки обработки или проверкиСбой обработки атрибутов VOMSСбой проверки атрибутов VOMSПустая команда VOMSСтрока VOMS содержит неверное количество токенов (ожидается %u): "%s"Обработка доверенности VOMS выдаёт: %i - %sЦепочка сертификатов VOMS: %sVOMS: срок годности AC вышелVOMS: Сертификат атрибута (AC) неполон - отсутствует информация об атрибутах Serial и/или IssuerVOMS: сертификат атрибута ещё не действителенVOMS: сбой подтверждения подписи сертификата атрибутаVOMS: Необходимо задать каталог или файл сертификационного агентстваVOMS: Не удалось зарезервировать память для разбора ACVOMS: Не удалось зарезервировать память для хранения последовательности ACVOMS: Невозможно найти AC_ATTR типа IETFATTRVOMS: Не удалось обработать ACVOMS: невозможно найти сертификат лица, выдавшего сертификат атрибута для виртуальной организации %sVOMS: DN владельца в сертификате атрибута (AC): %sVOMS: DN владельца: %sVOMS: DN эмитента: %sVOMS: FQDN узла %s не соответствует ни одному из назначений в сертификате атрибута (AC)VOMS: Файл lsc %s не может быть открытVOMS: Файл lsc %s не существуетVOMS: неверный authorityKeyVOMS: должны присутствовать оба расширения сертификата, idcenoRevAvail и authorityKeyIdentifierVOMS: не удалось подтвердить подпись сертификата атрибутаVOMS: невозможно удостоверить лицо, выдавшее сертификат атрибута для виртуальной организации %sVOMS: использование множественных атрибутов IETFATTR не поддерживаетсяVOMS: использование множественных атрибутов policyAuthority не поддерживаетсяVOMS: составление FQAN: %sVOMS: созадние атрибута: %sVOMS: директория, содержащая сертификаты доверяемых служб: %sVOMS: сбой при разборе атрибутов в сертификате атрибута (AC)VOMS: не удалось подтвердить подпись сертификата атрибутаVOMS: отсутствуют части ACVOMS: проблемы при разборке информации в ACVOMS: Отличительное имя (DN) в сертификате %s не соответствует таковому в доверяемом списке: %sVOMS: Отличительный признак агентства, выдавшего сертификат %s, не соответствует таковому в доверяемом списке: %sVOMS: отсутствует имя атрибутаVOMS: атрибут qualifier пустVOMS: отсутсвует значение атрибута для %sVOMS: недопустимый формат IETFATTRVAL - ожидается OCTET STRINGVOMS: недопустимый формат атрибута policyAuthority - ожидается URIVOMS: атрибут grantor пустVOMS: неверная информация о владельце в сертификате атрибута (AC)VOMS: имя агентства, выдавшего сертификат, не совпадает с таковым в сертификате атрибута (AC)VOMS: атрибут issuerUID в сертификате владельца не совпадает с таковым в сертификате атрибута (AC)VOMS: имя владельца в сертификате атрибута (AC) не имеет отношения к отличительному имени в сертификате владельцаVOMS: серийный номер владельца %lx не совпадает с таковым в сертификате атрибута (AC) %lx; сертификат, используемый для создания доверенности VOMS, может быть доверенностью с серийным номером, отличным от изначального сертификатаVOMS: серийный номер владельца: %lxVOMS: неверная информация об агентстве, выдавшем сертификат, в сертификате атрибута (AC)VOMS: имя агентства, выдавшего сертификат - %s - не совпадает с таковым в сертификате атрибута (AC) - %sVOMS: единственным поддерживаемым критическим расширением атрибута сертификата (AC) является idceTargetsVOMS: серийный номер в сертификате атрибута (AC): %lxVOMS: слишком длинный серийный номер AC INFO - ожидается не более 20-и октетовVOMS: отсутствуют ограничения по отличительным признакам доверяемых VOMS, цепочка сертификатов в сертификате атрибута (AC) не будет проверена.VOMS: подтверждается цепочка сертификатов: %s VOMS: невозможно определить название узла сертификата атрибута (AC) из названия виртуальной организации: %sVOMS: невозможно извлечь название виртуальной организации из сертификата атрибута (AC)VOMS: невозможно найти цепочку сертификатов, соответствующую доверяемым отличительным признакам VOMSVOMS: невозможно подтвердить цепочку сертификатовVOMS: неверный формат времени в сертификате атрибута (AC) - ожидается GENERALIZED TIMEОбнаружено действительное описание JobDescriptionСрок действия истекает через: %sДоверенность действительна на: Срок действия доверенности вышелДоверенность действительна на: Доверенность недействительнаДействует по: %sЗначение элемента 'link' в mapURL неверноЗначение maxJobsPerDN не является допустимым числомЗначение maxJobsRun не является допустимым числомЗначение maxJobsTotal не является допустимым числомЗначение maxJobsTracked не является допустимым числомЗначение maxScripts не является допустимым числомЗначение wakeupPeriod не является допустимым числомЗначение %s.%s: "%s"Значение атрибута 'count' должно быть целочисленнымЗначение атрибута 'countpernode' должно быть целочисленнымЗначением атрибута 'exclusiveexecution' может быть либо 'yes', либо 'no'Значение атрибута '%s' не является строкойЗначение атрибута '%s' неоднозначноЗначение атрибута '%s' содержит последовательность недопустимой длины: ожидается %d, получено %dЗначение атрибута '%s' не является строкойЗначение атрибута '%s' не является последовательностьюИмя переменной (%s) содержит неверный символ (%s)Ожидается имя переменнойВерсия в элементе Listen не опознанаПРЕДУПРЕЖДЕНИЕ: Заданное Вами время окончания: %s предшествует текущему времени: %sПРЕДУПРЕЖДЕНИЕ: Заданное Вами время начала: %s предшествует текущему времени: %sОжидание: 1 минутаОжидание завершено.Ожидание буфераЖдём пока ссылка globus устаканитсяОжидание разблокирования файла %sОжидание разблокирования файла списка задач %sОжидание откликаЗадачи в очереди: %iВремя ожидания - %d секунд(ы).АктивизацияПредупреждение: Не удалось вывести список файлов, но некоторая информация была полученаПредупреждение: Сбой удаления информации о задачах из файла (%s)Предупреждение: Сбой записи информации о задаче в файл (%s)Предупреждение: Задача не обнаружена в списке задач: %sПредупреждение: некоторые задачи не были удалены с сервераПредупреждение: Невозможно открыть файл списка задач (%s), формат неизвестенWarning: Используется версия v1 протокола SRM, которая не поддерживает токены местаПредупреждение: не удалось создать точку подключения %s.Ожидалось %s в начале "%s"Самоконтроль (пере)запускает приложениеСамоконтроль обнаружил завершение приложенияСамоконтроль обнаружил завершение приложения по сигналу %uСамоконтроль обнаружил приложение, завершившееся с кодом %uСамоконтроль обнаружил превышение времени ожидания приложения или сбой - процесс прерываетсяСамоконтроль останавливается, потому что приложение было прервано намеренно, или завершилосьСамоконтроль не смог прервать приложение - прекращение попыток и выходСамоконтроль не дождался завершения приложения - посылается сигнал KILLНе удалось создать дочерний сторожевой процесс: %sСамоконтроль запускает мониторингМы поддерживаем только CA в Globus signing policy - %s не поддерживаетсяМы поддерживаем только центры сертификации X509 в политике подписи Globus - %s не поддерживаетсяМы поддерживаем только условия globus в политике подписи Globus - %s не поддерживаетсяМы поддерживаем только условия субъекта в политике подписи Globus - %s не поддерживаетсяПри задании атрибута 'countpernode', атрибут 'count' также должен быть заданБудет выполнена операция %s в каталоге назначенияБудет вычислена контрольная сумма для %sПредварительное назначение будет сброшеноБудет произведена загрузка в файл кэша %sПо умолчанию привязки к учётной записи 'root' не будетБудет обработан кэшБудет отменены блокировки в кэшеЗадача %s будет удалена с серивса %s.Будет произведена повторная попытка без кэшированияБудет использован массовый запросОжидание 10 секундОжидание порядка %i секУничтожение и воссоздание всего хранилищаБудет использовано не более 10-и потоковСвободное рабочее пространство: %i GBРабочее пространство используется одной задачейРабочее пространство используется разными задачамиВремя жизни рабочего пространства: %sОбщий объём рабочего пространства: %i GBЗапись динамического списка выходных файлов %sЗапись информации в журнал программы разбора BLAH: %sНедопустимый размер буфераНеприемлемое значение defaultbuffer в настройкахНеверный каталог в %sНеверный формат "FreeSlotsWithDuration" = "%s" ("%s")Запрошен неверный язык: %sНеприемлемое значение maxbuffer в настройкахНеприемлемое значение maxconnections в настройкахНедопустимый максимальный размер буфераНедопустимое значение времени ожидания в инструкции подключаемого модуляНедопустимое число в команде defaultttlНедопустимое число в jobreport_period: %d, наименьшее значение: %sНедопустимое число в jobreport_period: %sНедопустимое число в maxjobdescНедопустимое число в maxjobs: %sНедопустимое число в команде maxrerunНедопустимое число в wakeupperiod: %sУказано неверное количество аргументовНедопустимое число аргументов!Недопустимое количество подключенийНеверное количество файлов: %sНеверное количество объектов (%i) для операции stat от ftp: %sЗадано неверное количество параметровНеверное число потоков: %sНеверная опция в %sНеверные опции в демонеНеверная опция для базы данных delegationdbНеверная опция в fixdirectoriesНеверная опция для logreopenНеправильная принадлежность файла открытого ключа: %sНеправильная принадлежность файла личного ключа: %sНеправильная принадлежность файла доверенности: %sНеправильные права доступа к файлу открытого ключа: %sНеправильные права доступа к файлу личного ключа: %sНеправильные права доступа к файлу доверенности: %sНедопустимый номер портаНеприемлемый номер порта в настройкахНедопустимый элемент учётной записи сервиса "%s" обнаружен в "%s"Запрос записан в файлПодписанный сертификат EEC записан в файлПодписанная доверенность записана в файлОбработчик токена X509 не настроенЗапрос авторизации GACL: %sОтклик допуска XACML: %sзапрос XACML: %sФайл настроек XML %s не существуетОтзыв XML: %sXML: %sИз списка задач будут удалены задачи, о которых не обнаружена информация. ВНИМАНИЕ: задачи, запущенные недавно, могли ещё не появиться в информационной системе, и эта операция удалит также эти задачи.Вы можете попытаться увеличить уровень детальности для получения дополнительной информации.Ваши личные данные: %sНе установлен сертификат Вашего центра сертификацииВаша доверенность действительна до: %sВ файл записано ноль байтов[-]адрес[ADLParser] элемент %s должен быть логическим.[ADLParser] Benchmark пока что не поддерживается.[ADLParser] Код в FailIfExitCodeNotEqualTo в %s не является допустимым числом.[ADLParser] Значение CreationFlag %s не поддерживается.[ADLParser] Отсутствует элемент Name или значение элемента ParallelEnvironment/Option.[ADLParser] Значение Name в InputFile отсутствует или неверно.[ADLParser] Значение Name в OutputFile отсутствует или неверно.[ADLParser] Значение DiskSpaceRequirement отсутствует или неверно.[ADLParser] Значение IndividualCPUTime отсутствует или неверно.[ADLParser] Значение IndividualPhysicalMemory отсутствует или неверно.[ADLParser] Значение IndividualVirtualMemory отсутствует или неверно.[ADLParser] Значение NumberOfSlots отсутствует или неверно.[ADLParser] Значение ProcessesPerSlot отсутствует или неверно.[ADLParser] Значение SlotsPerHost отсутствует или неверно.[ADLParser] Значение ThreadsPerProcess отсутствует или неверно.[ADLParser] Значение TotalCPUTime отсутствует или неверно.[ADLParser] Значение WallTime отсутствует или неверно.[ADLParser] NetworkInfo пока что не поддерживается.[ADLParser] Значение NodeAccess %s пока что не поддерживается.[ADLParser] Пока что поддерживается только email Prorocol для Notification.[ADLParser] Атрибут optional для элементов %s пока не поддерживается.[ADLParser] Корневой элемент не является ActivityDescription [ADLParser] Значение элемента NumberOfSlots должно быть указано, если значение атрибута useNumberOfSlots элемента SlotsPerHost - "true".[ADLParser] Неподдерживаемое состояние EMI ES %s.[ADLParser] Неподдерживаемый URL %s в RemoteLogging.[ADLParser] Неподдерживаемое внутреннее состояние %s.[ADLParser] Указан неверный URI в Source - %s.[ADLParser] Указан неверный URI в Target - %s.[ADLParser] ExpirationTime содержит недопустимое время %s.[ARCJSDLParser] Ошибка при разборе: отсутствует атрибут name в элементе Environment "%s"[ARCJSDLParser] Это не JSDL - отсутствует элемент JobDescription[ARCJSDLParser] Неверный формат RemoteLogging URL[ARCJSDLParser] слишком высокий приоритет - используется максимальное значение 100[JDLParser] У атрибута %s недействительное значение: %s[JDLParser] Переменная среды задана без использования знаков равенства.[JDLParser] Синтаксическая ошибка JDL. По крайней мере один из ожидаемых знаков равенства отсутствует.[JDLParser] Нулевое количество строк, или другая непонятная ошибка.[JDLParser] Точка с запятой (;) не допускается внутри скобок, строка '%s;'.[JDLParser] Обнаружена синтаксическая ошибка при выполнении разбиения.[JDLParser] Этот тип дескриптора JDL пока не поддерживается: %s[JDLParser]: Неизвестное название атрибута: '%s', значение: %s[VO filter] запись о задаче не будет отправлена. %s.[файл ...][задача ...][описание задачи...][ресурс ...]файл, содержащий ярлыки задачabort_callback: ошибка Globus: %sabort_callback: запускaction(%s) != requestactive_data отключёнСбой add_wordвсе задачиПараметр authorizedvo пустневерные аргументы remotegmdirsневерный каталог для подключаемого модуля: %sневерный каталог в команде подключаемого модуля: %sпланировщикбуфер: ошибка: %s, чтение: %s, запись: %sбуфер: чтение конца файла : %sбуфер: запись конца файла: %sкэш-файл: %sневозможно разобрать строку настроек: %s %s %s %sне удалось создать каталог: %sПрефикс ceID задан как %schecingBartenderURL: Ответ: %sпроверить читаемость объекта, не показывать информацию об объектеcheck_abort: получена ошибка Globuscheck_abort: посылается 426check_ftp: сбой при определении времени изменения файлаcheck_ftp: не удалось определить размер файлаcheck_ftp: сбой в globus_ftp_client_getcheck_ftp: сбой в globus_ftp_client_modification_timecheck_ftp: globus_ftp_client_register_readcheck_ftp: сбой в globus_ftp_client_sizecheck_ftp: получена дата изменения: %scheck_ftp: получен размер: %llicheck_ftp: истекло время ожидания modification_timecheck_ftp: истекло время ожидания частичной загрузкиcheck_ftp: истекло время ожидания команды sizeназвание класса: %sнайден оригинал описания задачи в формате XRSLоригинал описания задачи в формате XRSL не найденСбой при закрытии: %sсбой при закрытии файла %s: %sинструкция серверу MyProxy. Возможны две инструкции: PUT и GET: PUT/put/Put -- сохранить делегированный сертификат на сервере MyProxy; GET/get/Get -- получить делегированный сертификат с сервера MyProxy, в этом случае не требуются личные сертификаты и ключи. Инструкции MyProxy и VOMS могут использоваться одновременно. инструкция серверу MyProxy. Возможны следующие инструкции: PUT, GET, INFO, NEWPASS или DESTROY. PUT -- сохранить делегированный сертификат на сервере MyProxy; GET -- получить делегированный сертификат с сервера MyProxy, INFO -- вывести информацию о сертификатах, хранящихся на сервере MyProxy; NEWPASS -- изменить пароль, защищающий сертификаты, хранящиеся на сервере MyProxy; DESTROY -- удалить сертификаты, хранящиеся на сервере MyProxy; Личные сертификаты и ключи не требуются, за исключением инструкции PUT. Инструкции MyProxy и VOMS могут использоваться одновременно. Опции --voms and --vomses могут быть использованы с командой Get, если в доверенность необходимо включить атрибуты VOMS. computingнастройки: %s, класс: %sфайл настроек (по умолчанию ~/.arc/client.conf)файл настроек не найденОтсутствует controlDirпреобразовать из указанного исходного формата базы данных [bdb|sqlite]преобразовать в указанный выходной формат базы данных [bdb|sqlite]невозможно найти конец описания задачи в формате XRSLневозможно найти начало описания задачи в формате XRSLне удалось открыть файл %sне удалось обработать настройки ВОСбой d2i_X509_REQ_biodata_connect_retrieve_callbackdata_connect_retrieve_callback: allocate_data_bufferdata_connect_retrieve_callback: сбой в allocate_data_bufferdata_connect_retrieve_callback: проверка буфера %udata_connect_store_callbackdata_retrieve_callbackdata_retrieve_callback: буфер потерянdata_store_callback: буфер потерянформат базы данныхуровеньпустое значение defaultlrmsукажите запрашиваемый формат (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, emies:adl)delete_ftp: сбой в globus_ftp_client_deletedelete_ftp: сбой в globus_ftp_client_rmdirdelete_ftp: истекло время ожидания команды deletedestination.next_locationкаталогкаталогпоказать все доступные метаданныевывести больше информации о каждом заданииDNне запрашивать подтвержденияне выводить список задачне выводить количество задач в каждом состояниине перезасылать на тот же ресурсне выполнять засылку: распечатка описания задачи на языке, приемлемом назначениемзарегистрировать файл, не передавая его - назначением должен быть мета-URLне пытаться форсировать пассивный способ передачи данныхне запрашивать пароль учётных данных при получении этих данных с сервера MyProxy. Это возможно при условии, если данные были сохранены методом PUT на сервере MyProxy без пароля, используя опцию -R (--retrievable_by_cert) при выполнении операции PUT в отношении сервера Myproxy. Эта опция используется только командой GET в отношении сервера Myproxy.каталог загрузки (подкаталог задачи будет создан в этом каталоге)echo: Доступ закрытне задан аргумент remotegmdirsпустая нагрузка на входеследующий элемент в цепи пустойдостигнут конец строки при обработке типа элемента имени субъекта #%dошибка преобразования числа из bin в BIGNUMошибка преобразования серийного номера в формат ASN.1символ выхода в конце строки выход Не удалось обработать личные данные клиентасбой при обработке команды настройки: %s %sфайлфайл %s недоступенНазвание файласлишком длинное имя файласбой создания узла файла: %sфайлпринудительная загрузка (перезаписать существующий каталог задачи)принудительная миграция, игнорируется сбой прерыванияПараметр forcedefaultvoms пустошибка при выполнении системного вызова forkформатчерез следующие точки входа:Сбой операции fsync на файле %s: %sftp_check_callbackftp_complete_callback: ошибка: %sftp_complete_callback: успехftp_put_complete_callback: успехftp_read_callback: сбой: %sftp_read_callback: успехftp_read_thread: ошибка Globus: %sftp_read_thread: сбой обратного вызова данных - прерывание: %sftp_read_thread: выходftp_read_thread: сбой при регистрации буфера Globus - попробуем попозже: %sftp_read_thread: сбой сброса буферов - утечкаftp_read_thread: сбой for_read - прерывание: %sftp_read_thread: получение и регистрация буферовftp_read_thread: слишком много сбоев регистрации - отмена: %sftp_read_thread: ожидание разблокировки буферовftp_read_thread: ожидание конца файлаftp_write_callback: сбой: %sftp_write_callback: успех %sftp_write_thread: сбой обратного вызова данных - прерываниеftp_write_thread: выходftp_write_thread: сбой сброса буферов - утечкаftp_write_thread: сбой for_write - прерываниеftp_write_thread: получение и регистрация буферовftp_write_thread: ожидание разблокировки буферовftp_read_thread: ожидание конца файлаgetISISList из %sСбой gfal_close: %sСбой gfal_closedir: %sСбой в gfal_listxattr, невозможно получить информацию о копиях: %sСбой в gfal_mkdir (%s), всё же попытаемся записатьСбой gfal_mkdir: %sСбой gfal_open: %sСбой gfal_opendir: %sСбой gfal_read: %sСбой gfal_rename: %sСбой gfal_rmdir: %sСбой gfal_stat: %sСбой gfal_unlink: %sСбой gfal_write: %sglobalid задан как %sglobus_ftp_client_operationattr_set_authorization: ошибка: %sgm-delegations-converter преобразовывает формат базы данных делегирования.gm-jobs выводит информацию о текущих заданиях в системе.gm-kick принудительно запускает цикл A-REX в соответствии с указанным управляющим файлом. Если файл не указан, используется управляющий каталог из файла настроек.group<:role>. Указанная последовательность атрибутов Пример: --order /knowarc.eu/coredev:Developer,/knowarc.eu/testers:Tester или: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/testers:Tester Имейте в виду, что при использовании нескольких серверов VOMS не имеет смысла указывать последовательность атрибутовГоловной узел задан как %shostname[:port] сервера MyProxyчасчасачасовhttp_get: start=%llu, end=%llu, burl=%s, hpath=%sIDесли назначением задан индексирующий сервис, отличный от источника, и это назначение уже зарегистрировано, копирование обычно не допускается. В случае же, когда указана эта опция, источник рассматривается как неофициальная копия зарегистрированного файла, и копирование производится как в случае тиражирования. При использовании этой опции пропускается сверка завершённых передач.недопустимый атрибут команды allowactivedata: %sнедопустимый атрибут команды allowunknown: %sнедопустимый атрибут команды шифрования: %sвходящее сообщение не в формате SOAPindexinit_handle: сбой в globus_ftp_client_handlea_initinit_handle: сбой в globus_ftp_client_handleattr_initinit_handle: сбой в globus_ftp_client_handleattr_set_gridftp2init_handle: сбой в globus_ftp_client_operationattr_initinit_handle: сбой globus_ftp_client_operationattr_set_allow_ipv6init_handle: сбой globus_ftp_client_operationattr_set_delayed_pasvinmsg.Attributes().getAll() = %s inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %sне задана операция на вводеввод не в формате SOAPinputcheck проверяет, доступны ли входные файлы, указанные в описании задачи, используя параметры доступа в указанном файле доверенностивместо состояния будут выведены только ярлыки указанных задаччислоинтерфейсИнтерфейс задан как %sinterfacenameНедействительный ярлык задачи: %sзадача %s (будет) успешно удаленазадача %s успешно прерваназадача %s успешно перезапущенаID заданияjob_description_file [proxy_file]Файл, содержащий описание запускаемой задачиСтрока, содержащая описание запускаемой задачисохранять файлы на сервере (не удалять)уровеньlibjvm.so не содержит ожидаемых символовlibjvm.so не можетр быть подгружена - проверьте LD_LIBRARY_PATHперечисление записи: %sдлина списка: %dперечисление доступных подключаемых модулейПоказать список доступных модулей (поддерживаемые протоколы)list_files_ftp: проверочная сумма %slist_files_ftp: не удалось определить время изменения файлаlist_files_ftp: не удалось определить размер файлаlist_files_ftp: сбой globus_ftp_client_cksmlist_files_ftp: сбой globus_ftp_client_modification_timelist_files_ftp: сбой в globus_ftp_client_sizelist_files_ftp: поиск проверочной суммы %slist_files_ftp: определение времени изменения %slist_files_ftp: поиск размера %slist_files_ftp: информация о контрольных суммах недоступнаlist_files_ftp: истекло время ожидания проверочной суммыlist_files_ftp: истекло время ожидания modification_timelist_files_ftp: истекло время ожидания размерасбой чтения серийного номера из %sСбой local_pasvСбой local_portСбой local_spaslocalid задан как %sрасширенный формат (дополнительная информация)Параметр mail пустсоздавать родительские директории по мере необходимостиmake_abort: выходmake_abort: запускmake_abort: ожидание сброса семафора прерыванияошибка mallocМета-файл %s пустминутаминутыминутсбой mkdir: %smkdir_ftp: создаётся %smkdir_ftp: истекло время ожидания команды mkdirназвание модуля: %sмесяц: %snимяnd: %snew_payload %sвызван следующий элемент в цеписледующий элемент в цепи возвратил пустую нагрузкуследующий элемент цепи возвратил статус ошибкиследующий элемент цепи возвратил статус ошибки: %sследующий элемент в цепи возвратил пустую нагрузкуследующий элемент в цепи возвратил недопустимую или неподдерживаемую нагрузкуследующий элемент в цепочке возвратил пустую нагрузкуследующий элемент в цепи возвратил неопознанную нагрузку - пропускаетсячислоколичество попыток передачи файлаold_url new_urlполучить информацию только о тех вычислительных ресурсах, которые поддерживают заданный интерфейс для засылки задач (например, org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes)выполнить действие лишь над задачами в указанном состояниииспользовать только этот интерфейс для засылки (например, org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes)открытие: смена владельца для %s, %i, %iоткрытие: владелец: %i %iобработать рекурсивнорекурсивное исполнение до указанного уровняпорядокoutpayload %sвывод не в формате SOAPзаписать указанные элементы (список задач, идентификаторы и токены делегирования) в файлИмя субъекта владельца задано как %sФайл сертификата p12 пуст.назначение пароля=источник пароляпутьпуть к файлу настроекпуть к локальному кэшу (используется для записи файла в кэш)путь к файлу настроек серверов VOMSпуть к файлу сертификата, который может быть в формате PEM, DER, или PKCS12путь к закрытому ключу; если сертификат указан в формате PKCS12, закрытый ключ не нуженпуть к файлу доверенностипуть к корневому каталогу с файлами VOMS *.lsc, используется только клиентом VOMSпуть к каталогу с доверяемыми сертификатами, используется только клиентом VOMSвыполнить стороннюю пересылку, когда назначение закачивает файл из источника (доступно только с модулем GFAL)физический адрес для записи, если в качестве назначения указан каталог ресурсов. Должен быть указан для каталогов, не генерирующих физические адреса автоматически. Несколько значений может быть указано - адреса будут перебираться, пока не будет достигнут успех.pkey и rsa_key существуют!не установлен подключаемый модуль для протокола транспортного уровня %sподключаемый модуль: проверка каталога: %sподключаемый модуль: проверка каталога: доступ: %sподключаемый модуль: проверка каталога: доступ: открыт: %sподключаемый модуль: закрытиеподключаемый модуль: открытие: %sподключаемый модуль: чтениеподключаемый модуль: записьвывести список служб, настроенных в client.confвывести всю информацию об этой доверенности.вывести всю информацию о данной доверенности. Для вывода персональной информации (DN без CN как суффикс доверенности) из сертификата, необходим 'trusted certdir'.вывести токен делегирования указанного идентификаторавывести список доступных идентификаторов делегированиявывести основной токен делегирования указанного идентификаторавывести избранную информацию об этой доверенности.вывести состояние сервисавывести сводку о задачах в каждой из трансферных квотвывести информацию о версиивывести информацию об установленных сертификатах пользователя и сертификационных агентствслишком высокий приоритет - используется максимальное значение 100процесс: GETпроцесс: POSTпроцес: PUTпроцесс: конечная точка: %sпроцесс: конечная точка фабрики процесс: идентификатор: %sпроцесс: метод %s не поддерживаетсяпроцесс: неопределённый методпроцесс: метод: %sпроцесс: операция: %sпроцесс: запрос=%sпроцесс: ответ не является документом SOAPпроцесс: отзыв=%sпроцесс: подкаталог: %sограничения доверенностизапрос: %sИмя очереди задано как %sочередь: %sчитать информацию из указанного контрольного каталогаreg_.size(): %dудалить логическое имя файла, даже если не все физические копии удаленыудаление доверенностиудалить задачу из локального списка, даже если информация о ней отсутствуетзапросить обрыв задач с указанными ярлыкамизапросить обрыв задач, принадлежащих пользователям с указанными именами субъектазапросить удаление задач с указанными ярлыкамизапросить удаление задач, принадлежащих пользователям с указанными именами субъектаответ: %sЗаслать заново на тот же ресурссортировать задачи в обратном порядке по идентификатору, времени запуска или именисбой записи серийного номера в %sсекундасекундысекундсекунд(а/ы)выбрать способ планировки (список доступных планировщиков выводится опцией --listplugins)указать один или более вычислительных ресурсов: имя может быть сокращением для одного ресурса, группы ресурсов, или URLвыбрать один или несколько реестров: имя может быть сокращением для одного реестра, группы реестров, или URLвыбор вычислительного ресурса для новых задач с помощью URL или сокращения, или выбор группы элементов с помощью названия группысообщение службыпустой serviceMailservice_urlОтсутствует sessionRootDirфайлу %s присваивается размер %lluвывести адреса физических файловперечислить задачи, для которых отсутствует информация о состояниипоказывать только описание запрашиваемого объекта, не выводить содержимое каталоговпоказать задачи, принадлежащие пользователям с указанными именами субъектапоказать задачи с указанными ярлыкамипоказать индикатор выполнениявывести ошибки системы при исполнении задачипоказать изначальное описание задачивывести стандартную ошибку задачивывести стандартный выход задачи (по умолчанию)Выключениепропустить задачи, находящиеся на вычислительном ресурсе с заданным URLпропустить службу с этим URL при обнаружении службсортировать задачи по идентификатору, времени запуска или имениисточник назначениеsource.next_locationstart_reading_ftpstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: сбой в globus_ftp_client_getstart_reading_ftp: сбой в globus_thread_createstart_writing_ftp: сбой в globus_thread_createstart_writing_ftp: mkdirstart_writing_ftp: сбой mkdir - всё же пытаемся записатьstart_writing_ftp: putstart_writing_ftp: сбой в putсостояниесостояниеstop_reading_ftp: отменяется соединениеstop_reading_ftp: выход: %sstop_reading_ftp: ожидание завершения пересылкистроказапустить напрямую, без обнаружения и проверки соответствия ресурсовзапуск задач в режиме холостой прогонки (без засылки на счёт)апустить тестовую задачу под соответствующим номеромсинхронизационное сообщение: %sСистемное значение retval: %dвремя исполнения тестовой задачиярлыки запущенных задач будут занесены в этот файлвычислительный ресурс, заданный URL в командной строке должен быть опрошен с помощью этого информационного интерфейса (возможные варианты: org.nordugrid.ldapng, org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies.resourceinfo)файл с записью информации о задачах на счёте (по умолчанию %s)эта опция не действует (старые сертификаты доверенности GSI более не поддерживаются)время ожидания в секундах (по умолчанию 20)интерпретировать запрошенный объект как каталог, и всегда пытаться вывести его содержимоесжать список задач перед синхронизациейневозможно прочесть номер из: %sподключаемый модуль не определённе задан путь к виртуальному расширениюнеизвестный (не занесённый в gridmap) пользователь не допускаетсягруппа без именинеподдерживаемая инструкция настроек: %sURLurl [url ...]urllist %s содержит недопустимый URL: %sиспользовать протокол GSI для контакта служб VOMSиспользовать протокол HTTP для связи со службами VOMS, поддерживающими доступ типа REST Внимание: для доступа REST, команда 'list' и множественный сервер VOMS не поддерживаются использовать базу данных параметров доступа NSS из профилей Mozilla по умолчанию, включая Firefox, Seamonkey и Thunderbird. использовать базу данных параметров доступа NSS из профиля Firefoxиспользовать пассивную передачу данных (по умолчанию, опция отключена при защищённой передаче, и включена при незащищённой)использовать защищённую передачу данных (передача не защищена по умолчанию)использовать указанный файл настроекиспользовать имя задачи вместо краткого идентификатора в качестве названия каталогаИмя пользователя сервера MyProxyИмя пользователя сервера MyProxy (при отсутствии имени субъекта, или при применении сертификата пользователя)VOMSvoms<:инструкция>. Описание сервера VOMS (несколько серверов задаются следующим образом: --voms VOa:инструкция1 --voms VOb:инструкция2). <:инструкция> не обязательна и служит для запроса дополнительных атрибутов (например, ролей) Инструкции: all --- добавить все атрибуты, доступные данному пользователю; list --- перечислить все атрибуты, доступные данному пользователю, без создания расширения AC; /Role=вашаРоль --- указать желаемую роль; если данный пользователь может играть такую роль, она будет добавлена; /voname/groupname/Role=вашаРоль --- указать ВО, группу и роль; если данный пользователь может играть такую роль, она будет добавлена. ошибка ожидания - прерывание процесса-потомкаЗапрошена неверная блокировка SSL: %i из %i: %i - %sневерная булева переменная в %sневерное число в %sСбой при закрытии xrootd: %sСбой при открытии xrootd: %sСбой при записи xrootd: %syгод: %s~DataPoint: уничтожение ftp_handle~DataPoint: уничтожение ftp_handle не удалось - новая попытка~DataPoint: уничтожение ftp_handle не удалось - утечкаnordugrid-arc-5.4.2/po/PaxHeaders.7502/sv.gmo0000644000000000000000000000013213214316034016743 xustar000000000000000030 mtime=1513200668.451850855 30 atime=1513200668.450850843 30 ctime=1513200668.605852739 nordugrid-arc-5.4.2/po/sv.gmo0000644000175000002070000022603313214316034017017 0ustar00mockbuildmock00000000000000)d?2CCCCCDD6DMDdD kD yD D DD D DDDDD DE E%E ,E1MEAE.E,E+FIFXF-qFFFFFFG(/GXGmGG(GG-G.H'/HWHsHHHHH H!H9ISIsIIIIIIJJ7JLLJJJJ"J"K)KBKUK$eKKKKKKLL4LCL$^L)LLEL(M71M;iM)M1M'N()N?RN&NN/N!OF*O.qO2O%OaOQ[P&P,P%Q%'Q#MQqQQ?Q$QR.RMR%iR?RHR>S5WS6SSS"ST!-T#OT2sTT(T)T/U.AU0pU)U:U+V2VMV,dV)V'VVW+W)IWsWWWWWXX?X!TX'vXX,XX"Y #Y+1Y/]Y1Y&Y2Y#Z$=ZbZ&sZZZZZZ[ [%*['P[-x[)["[[,\"?\b\#x\\\$\.\$)]'N](v]!]]$]+^(.^,W^(^^^^__6_-O_%}_)_&__+`/=`m`````` aa a>aTana*a&a%a)a@!b bbnbbb&bb b*c3c%Gcmccc c!c c$c%#dIddd/ddd de-0e"^e"e8e=e#f$?f#dff#f!f$f gg(:g-cg+g.g+gh6hJh#jhh%h.h+h>+i:ji"i"ii! j4,j5aj jj,j%k&'kNkbk!kk)k'kl"1l=Tl3l/ll# m1mJm#hm$m*m/m n1(n-Zn%n%n#nn"o$9o&^o$o'o)oo!p8p0Up p$pp2p'q(Bq$kq q*qqq& rN4rRr4r s6(s6_s'ss#s4s"3t*Vt-t t+t#t u,uGu`u2{u)uUu.vMv_v xvvv!v#v$w %w2Fw"yw#w+w,wx.*xYx kx$yxxxx"xyy;yUy%ryyyy&yz(z7zWzozzzzz z z {'{?{^{v{{{"{{"{+|=|T| k|'y|*|+|&|)}*I}t}} }}}},}~&5~#\~~~0~+~&F#e3!( '!Ҁ&"#>b,v!Ɂ:1/a&~D‚" *8U<u&!ك84/Q/$ք%#C _ ȅ0 'D,UOP҆ #D0[ȇ 8V;v( ۈ.7'R)z-lj!-6M8܊0Ni Nj7ދ &"Iap ՌF)<Md#{(؍#6%6\5 Ɏ  "#'F2n*%)%<b#Ɛ&%'Lt  'ґ/"O&r&*ڒ'#-'Q9y9*$ ="^#%iJjA1_)=#ǖ9#%%Io,=&@d?7)8G7' /12a1)ƚ-% D!eDΛ($H"m( ,ǜ3NU(p"ם 5-P~)BОGP[R))$?d{*$ %1A$s'5*(!0J*{#ʢ٢ +@Z$bHУ*3 Fg!ä/ܤ) ;6 r-* 15F=|86%8\+ǧ6ק):8-s3&ը5(2[z$éө0/2N.*) $08i ۫*?TUfV" *6So$ǭЭ( /3.c.7ڮ) G%Qw0Ưͯ#)Y?ͱ+D\ c o |  ֲٲ   $AEJ*ҳ**( S(aE!д!,H a0͵769P;1ƶ(#!EK:R63ķ/A((j!!Ӹ$C]y[ +%Lr&$D\o #*̻%R;78Ƽ?/?Ao*1ܽG*V 7!ھA,>1k aR #s)"" (HGg-&7D^MC:5;p $29W2.0,@3m-D/)Dn5&/*@._)$ 8Y'r"+4"Nq 1.5+49`$ $ :G]p "++)+F,rD2+5 a #.+*"*M$x948I+1.H'w.8=&6d' 1C%Y8029&lG %!((Js ++ :#F.j!("3"S"v&$0.6?eH&'0=n,).#3/W>A?AH# /6)61`8BJAY/8-)2E\G*#;91u4',$'Q:y9'.EEB9#+,#X(|/0:<A%~D;4%3Z,(049J3+4%-?(m=1.$5=Z84138=l"$2_%c?')AQA9&06Gg127(L6u-$DD3[&@!S u"!"$ !14S##,( &.4cs( '7R#k-# *B]"l "*Mi$ *=&Z, ),-@(n+, )<S%i&+.#+R~ 1)+-U% ,F&s)$&,$Qh$;2" %CiI #GB'"9/-7]1&%!G+g+8 4!>`.rKO=U2q!","D#g*E( %$38X'* .%Nt"<?"Gj~  # 6DB] %!$<!QsG| !"D)[5+ED-Dr,"!%9G=(,3 7I 3 % $  $!  F g z . / "    / "> 1a $   0 4* 2_ ' 9 0 '% "M 5p 5 - $ !/#Qu#[\AA=_I~&3#@_*TMmB;":=]8'# 5DO=2+11&c# S*~$&%(  43?%s# 0"6Y"u!,2E!V?xDJKH,1)'[    <7-T;*.//H*x ! 1$ >)JEt2Qk50<! $^ 3 /  # 7!;P!C!>!".":C">~"-"" "A"<@#@}#3#1#+$$4P$/$$ $$($ !%/%1K%-}%5%%,%+&,A&n&&&C&& ' ' ''4'K'e'u''''' 'T']@((( ( (+()+)?) ])j)}))()5)4*48*m*E**#* +'+9+;X++++(+1+{2)mp  ^OHoA's^ .vI#>uxkx0%3.n|'s4vn!/b#H&hRgAyy @m6*9)T1'6`kF N-N=oGx<(27tO uTfz!=j`8; uJMQ|w;? !B[\bS%r"gIC9E/h-%>llJXZm Y@\j){QNWLR?$#f?-Dt[0d68BWp.`$&aEt_:VLAGSK $[< kEqDQ gTi*7a&}]L] %d & 4'wH~|n, c15=i/wBf,3c:qaPMDWeR rU(} e~+J#_Cb"^UleP"1S z$X;dsvI<8:3 ) CcKVMFz,+UYYo{hKq"P0r* V(>+ Z5!_~(p9@7]}jiOF 2Z5\4GyX Cache cleaning disabled Cache cleaning enabled Cache link dir : %s Control dir : %s Session root dir : %s default LRMS : %s default queue : %s default ttl : %u %s attributes: base dn: %s filter: %s %s -> %s Exit Code: %d Name: %s Owner: %s%d of %d jobs were submitted%s%s (%s)%s constructed%s failed%s is not an object%s version %s%s: %s%s: %s: New job belongs to %i/%i%s: Can't read state - no comments, just cleaning%s: Cancellation failed (probably job finished) - cleaning anyway%s: Cancellation probably succeeded - cleaning%s: Cleaning control and session directories%s: Delete request due to internal problems%s: Destroying%s: Failed to run plugin%s: Job failed in unknown state. Won't rerun.%s: Plugin at state %s : %s%s: Plugin execution failed%s: Plugin failed%s: State: %s from %s%s: State: ACCEPTED%s: State: ACCEPTED: dryrun%s: State: ACCEPTED: moving to PREPARING%s: State: CANCELING%s: State: FINISHING%s: State: INLRMS%s: State: INLRMS: exit message is %i %s%s: State: PREPARING%s: This job may be still running - canceling%s: state CANCELING: child exited with code %i%s: state CANCELING: starting child: %s(Re)Trying next destination(Re)Trying next source(empty)(null)A job cleaning request failedA job cleaning request succeedA job termination request failedA job termination request succeedA response to a submission request was not a SOAP messageA service status request failedA status request failedA status request succeedARC Auth. request: %sARC delegation policy: %sAccess list location: %sAdd location: metadata: %sAdd location: url: %sAdding location: %s - %sApplication Options:ArcAuthZ: failed to initiate all PDPs - this instance will be non-functionalAssuming - file not foundAttribute Value (1): %sAttribute Value (2): %sAttribute Value inside Subject: %sAuthorized from remote pdp serviceBN_new || RSA_new failedBN_set_word failedBad label: "%s"Bad name for runtime environment: %sBad name for stderr: %sBad name for stdout: %sBuffer creation failed !Cache configuration: %sCache creation date: %sCache directory: %sCache file %s does not existCache root: %sCached copy is still validCached file is locked - should retryCached file is outdated, will re-downloadCallback got failureCan not add X509 extended KeyUsage extension to new proxy certificateCan not add X509 extension to proxy certCan not convert keyUsage struct from DER encoded formatCan not convert keyUsage struct from internal to DER formatCan not convert private key to DER formatCan not convert signed proxy cert into DER formatCan not convert string into ASN1_OBJECTCan not copy extended KeyUsage extensionCan not copy the subject name from issuer for proxy certificateCan not create BIO for parsing requestCan not create BIO for requestCan not create BIO for signed proxy certificateCan not create PolicyStore objectCan not create a new X509_NAME_ENTRY for the proxy certificate requestCan not create extension for proxy certificateCan not create name entry CN for proxy certificateCan not create the SSL Context objectCan not determine the install location. Using %s. Please set ARC_LOCATION if this is not correct.Can not duplicate the subject name for the self-signing proxy certificate requestCan not dynamically produce AlgFacrotyCan not dynamically produce AttributeFactoryCan not dynamically produce EvaluatorCan not dynamically produce FnFactoryCan not dynamically produce RequestCan not generate X509 requestCan not generate policy objectCan not get extended KeyUsage extension from issuer certificateCan not get the issuer's private keyCan not load policy objectCan not load policy object: %sCan not load request objectCan not open job description file: %sCan not parse classname for AttributeFactory from configurationCan not parse classname for CombiningAlgorithmFactory from configurationCan not parse classname for FunctionFactory from configurationCan not parse classname for Policy from configurationCan not parse classname for Request from configurationCan not parse date: %sCan not parse month: %sCan not parse time zone offset: %sCan not parse time: %sCan not read certificate file: %sCan not set CN in proxy certificateCan not set issuer's subject for proxy certificateCan not set private keyCan not set pubkey for proxy certificateCan not set readable file for request BIOCan not set serial number for proxy certificateCan not set the lifetime for proxy certificateCan not set version number for proxy certificateCan not set writable file for request BIOCan not set writable file for signed proxy certificate BIOCan't extract object's name from source urlCan't obtain configurationCan't read from sourceCan't read list of destinations from file %sCan't read list of locations from file %sCan't read list of sources from file %sCan't set OpenSSL verify flagsCan't write to destinationCannot convert module name to Python stringCannot create argument of the constructorCannot create config argumentCannot create http payloadCannot create inmsg argumentCannot create outmsg argumentCannot find MCC_Status objectCannot find service classCannot get dictionary of moduleCannot import moduleCannot initialize winsock libraryCannot parse integer value '%s' for -%cCannot parse schema!Cause of failure unclear - choosing randomlyCertificate verification failedCertificate verification succeededChallenge: %sChangeActivityStatus: EPR contains no JobIDChangeActivityStatus: missing NewStatus elementChangeActivityStatus: no ActivityIdentifier foundChangeActivityStatus: no job found: %sChangeActivityStatus: old BES state does not matchChangeActivityStatus: request = %sChangeActivityStatus: response = %sCleaning job: %sClient chain does not have entry pointClient side MCCs are loadedClosed successfullyClosing connectionClosing read channelClosing write channelCommand is being sentCommand: %sComponent %s(%s) could not be createdComponent has no name attribute definedConnect: Authentication timed out after %d msConnect: Connecting timed out after %d msConnect: Failed authentication: %sConnect: Failed to connect: %sConnect: Failed to init auth info handle: %sConnect: Failed to init handle: %sConversion failed: %sCould not convert incoming payload!Could not convert payload!Could not create PayloadSOAP!Could not create temporary file "%s"Could not find loadable module by name %s (%s)Could not open LDAP connection to %sCould not set LDAP network timeout (%s)Could not set LDAP protocol version (%s)Could not set LDAP timelimit (%s)Could not validate message!CreateActivity finished successfullyCreateActivity: Failed to accept delegationCreateActivity: Failed to create new jobCreateActivity: Failed to create new job: %sCreateActivity: no job description foundCreateActivity: request = %sCreateActivity: response = %sCreating a CREAM clientCreating a pdpservice clientCreating a soap clientCreating an A-REX clientCreating and sending a service status requestCreating and sending a status requestCreating and sending job register requestCreating and sending job start requestCreating and sending requestCreating and sending request to clean a jobCreating and sending request to terminate a jobCreating client interfaceCreating client side chainCreating delegationCreating directory %sCreating service side chainCurrent transfer FAILED: %sDCAU failedDCAU failed: %sDaemonization fork failed: %sData transfer abortedData transfer aborted: %sDataMover: cycleDataMover: destination out of tries - exitDataMover: no retries requested - exitDataMover: source out of tries - exitDataMover::Transfer : starting new threadDataMover::Transfer: trying to destroy/overwrite destination: %sDefault: %sDelegateProxy failedDelegation authorization failedDelegation authorization passedDeleted but still have locations at %sDestination: %sDirectory: %sDisconnect: Quitting timed out after %d msDownloading job: %sEACCES Error opening lock file %s: %sEmpty input payload!Empty payload!Empty registration collectorEmpty stringError accessing cache file %s: %sError creating required dirs: %sError during registration to %s ISISError getting list of files (in list)Error in caching procedureError listing lock file %s: %sError opening lock file %s in initial check: %sError reading lock file %s: %sError reading meta file %s: %sError removing cache file %s: %sError to flush output payloadError when extracting public key from requestError with cache configuration: %sError: no LDAP query started to %sEvaluator does not support loadable Combining AlgorithmsEvaluator does not support specified Combining Algorithm - %sEvaluator for ArcPDP was not loadedEvaluator for GACLPDP was not loadedFailed allocating memory for handleFailed authenticatingFailed configuration initializationFailed connecting to server %s:%dFailed reading control directory: %sFailed reading dataFailed reading list of filesFailed retrieving job status informationFailed sending CWD command for job cancellingFailed sending CWD command for job cleaningFailed sending DELE command for job cancellingFailed sending RMD command for job cleaningFailed setting file owner: %sFailed starting jobFailed to accept SSL connectionFailed to accept connection requestFailed to accept delegationFailed to accept new file/destinationFailed to acquire grid-manager's configurationFailed to add issuer's extension into proxyFailed to authenticate Username Token inside the incoming SOAPFailed to authenticate X509 Token inside the incoming SOAPFailed to bind to ldap server (%s)Failed to connect for job cleaningFailed to connect to %s(%s):%iFailed to connect to server %s:%dFailed to convert security information to ARC policyFailed to convert security information to ARC requestFailed to create SOAP containersFailed to create file in %sFailed to create hard link from %s to %s: %sFailed to create input SOAP containerFailed to create ldap bind thread (%s)Failed to delete %sFailed to delete logical fileFailed to delete meta-informationFailed to delete physical fileFailed to disconnect after job cancellingFailed to disconnect after job cleaningFailed to duplicate extensionFailed to establish SSL connectionFailed to find delegation credentials in client configurationFailed to generate Username Token for outgoing SOAPFailed to generate X509 Token for outgoing SOAPFailed to get ftp fileFailed to initialize X509 structureFailed to initiate cacheFailed to initiate delegationFailed to load client configurationFailed to load service configurationFailed to make symbolic link %s to %s : %sFailed to obtain local address for port %s - %sFailed to open data channelFailed to parse Username Token from incoming SOAPFailed to parse X509 Token from incoming SOAPFailed to postregister destination %sFailed to preregister destination: %sFailed to read certificate file: %sFailed to read proxy file: %sFailed to read request from a fileFailed to read request from a stringFailed to register plugin for state %sFailed to register to ISIS (%s) - %sFailed to remove all physical instancesFailed to remove cache per-job dir %s: %sFailed to resolve %s (%s)Failed to resolve destination: %sFailed to resolve source: %sFailed to retrieve application data from OpenSSLFailed to send content of bufferFailed to sign the proxy certificateFailed to start new threadFailed to start new thread: cache won't be cleanedFailed to start reading from source: %sFailed to start thread for communicationFailed to start thread for listeningFailed to start writing to cacheFailed to start writing to destination: %sFailed to store ftp fileFailed to transfer dataFailed to unlock file with lock %s: %sFailed to unregister preregistered lfn. You may need to unregister it manuallyFailed to unregister preregistered lfn. You may need to unregister it manually: %sFailed to verify X509 Token inside the incoming SOAPFailed to verify the requestFailed to verify the signature under Failed to verify the signature under Failed to verify the signed certificateFailed to write file %s: %sFailed to write request into a fileFailed to write signed proxy certificate into a fileFailed uploading local input filesFailed while finishing reading from sourceFailed while finishing writing to destinationFailed while reading from sourceFailed while waiting for connection requestFailed while writing to destinationFailure: %sFeature is not implementedFile download failed: %sFile is not accessible: %sFileset copy to single object is not supported yetFileset registration is not supported yetFor registration source must be ordinary URL and destination must be indexing serviceForcing re-download of file %sFound %s in cacheFull string not used: %sFunction : %sGACL Auth. request: %sGet: there is no job %s - %sGetActivityDocuments: job %s - %sGetActivityDocuments: request = %sGetActivityDocuments: response = %sGetActivityStatuses: job %s - %sGetActivityStatuses: job %s - can't understand EPRGetActivityStatuses: request = %sGetActivityStatuses: response = %sGetFactoryAttributesDocument: request = %sGetFactoryAttributesDocument: response = %sGlobus error: %sGrid identity is mapped to local identity '%s'HTTP Error: %d %sHelp Options:Id= %s,Type= %s,Issuer= %s,Value= %sIdentity name: %sIllegal time format: %sImmediate completion: %sImproper argument for logsize '%s'Improper debug level '%s'Improper number of logs '%s'Improper size of log '%s'Incoming Message is not SOAPIndependent proxy - no rights grantedInfoCache object is not set upInitiating delegation procedureInput is not SOAPInput request from a file: Request.xmlInput request from codeInvalid EffectInvalid ISO duration format: %sInvalid JobDescription:Invalid class nameInvalid path in Get(): %sInvalid path in Set(): %sInvalid period string: %sInvalid url: %sJVM startedJava object returned NULL statusJob cancelling successfulJob cleaning successfulJob description to be sent: %sJob submission summary:Job submitted with jobid: %sJob termination failedJob: %sLDAP connection already open to %sLDAP query timed out: %sLDAPQuery: Getting results from %sLDAPQuery: Initializing connection to %s:%dLDAPQuery: Querying %sLdap bind timeout (%s)Library : %sLinking MCC %s(%s) to MCC (%s) under %sLinking MCC %s(%s) to Plexer (%s) under %sLinking MCC %s(%s) to Service (%s) under %sLinking Plexer %s to MCC (%s) under %sLinking Plexer %s to Plexer (%s) under %sLinking Plexer %s to Service (%s) under %sLinking local fileLinking/copying cached fileLoaded %sLoaded MCC %s(%s)Loaded Service %s(%s)Location already existsLocations are missing in destination LFC URLLock file %s doesn't existMCC %s(%s) - next %s(%s) has no targetMLSD is not supported - trying NLSTMapping %s to %sMemory allocation errorMetadata of source and destination are differentMissing CA subject in Globus signing policyMissing Host in Connect elementMissing Port in Connect elementMissing Port in Listen elementMissing cache root in configurationMissing condition subjects in Globus signing policyMissing option for command daemonMissing or empty CertificatePath elementMissing or empty CertificatePath or CACertificatesDir element; will only check the signature, will not do message authenticationMissing or empty KeyPath elementMissing or empty PasswordSource elementMissing or empty Username elementMissing schema! Skipping validation...Missing security object in messageModule %s is not an ARC plugin (%s)Module Manager InitModule Manager Init by ModuleManager::setCfgNLST/MLSD failedNLST/MLSD failed: %sNULL BIO passed to InquireRequestName: %sNegative rights are not supported in Globus signing policyNo Attribute exists, which can deal with type: %sNo Connect element specifiedNo authorization response was returnedNo cache directory specifiedNo delegation policies in this context and message - passing throughNo job description input specifiedNo jobs givenNo listening ports initiatedNo local account name specifiedNo locations for destination different from source found: %sNo locations for destination found: %sNo locations for source found: %sNo locations found - probably no more physical instancesNo next element in the chainNo requested security information was collectedNo security processing/check requested for '%s'No server config part of config fileNo such group: %sNo such user: %sNo target available inside the policyNo target available inside the ruleNo valid location availableNot enough parameters in copyurlNot enough parameters in linkurlNot valid destinationNot valid sourceNow copying (from -> to)Numbers of sources and destinations do not matchOPTION...Object not initialized (internal error)Obtained XML: %sObtained host and address are not acceptableOnly globus rights are supported in Globus signing policy - %s is not supportedOnly signing rights are supported in Globus signing policy - %s is not supportedOperation completed successfullyOperation on path "%s"Options 'p' and 'n' can't be used simultaneouslyOptions for plugin are missingOutgoing Message is not SOAPOutput the proxy certificatePASV failedPASV failed: %sPDP: %s can not be loadedPEM_read_bio_X509_REQ failedPEM_write_bio_X509_REQ failedParser Context creation failed!Passing service's information from collector to registratorPassword encoding type not supported: %sPeer name: %sPermission checking failed: %sPermission checking on original URL failed: %sPermission checking passedPlexer (%s) - next %s(%s) has no targetPolicy Decision Service invocation failedPolicy is emptyPolicy is not gaclPolicyId: %s Alg inside this policy is:-- %sProcessing type not supported: %sProxy with ARC PolicyProxy with all rights inheritedProxy with empty policy - fail on unrecognized policyProxy with specific policy: %sProxy with unknown policy - fail on unrecognized policyPut: there is no job: %s - %sPython interpreter lockedPython interpreter releasedPython wrapper process calledRSA_generate_key_ex failedRead request from a fileRead request from a stringReal transfer from %s to %sReason : %sRegistering to %s ISISRegistration of Globus FTP buffer failed - cancel checkRemoving %sRemoving logical file from metadata %sRemoving metadata in %sRequest failedRequest is not supported - %sRequest succeed!!!Response is not SOAPResponse is not expected WS-RPResponse: %sResult value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %dReusing connectionSASL InteractionSOAP invocation failedSOAP invokation failedSOAP operation is not supported: %sSSL error: %d - %s:%s:%sSecHandler has no configurationSecHandler has no name attribute definedSecurity Handlers processing failedSecurity check failed in SOAP MCC for incoming messageSecurity check failed in SOAP MCC for outgoing messageSecurity check failed in TLS MCC for incoming messageSecurity processing/check failedSecurity processing/check passedSendCommand: Failed: %sSendCommand: Timed out after %d msSendData: Data connect write failed: %sSendData: Data connect write timed out after %d msSendData: Data write failed: %sSendData: Data write timed out after %d msSendData: Failed sending DCAU commandSendData: Failed sending STOR command: %sSendData: Failed sending TYPE commandSendData: Local port failed: %sSendData: Local type failed: %sService %s(%s) could not be createdService is waiting for requestsService side MCCs are loadedShow help optionsShutdown daemonSkipping service: no SchemaPath found!Skipping service: no ServicePath found!Some transfers failedSource: %sStart foregroundStart testStopWriting: aborting connectionSubmission failed, service returned: %sSubmission request failedSubmission request succeedSubmission returned failure: %sSubmit: Failed sending CWD commandSubmit: Failed sending CWD new commandSubmit: Failed sending job descriptionSubmit: Failed to connectSubmit: Failed uploading local input filesSucceeded to authenticate UsernameTokenSucceeded to authenticate X509TokenSucceeded to sign the proxy certificateSucceeded to verify the signature under Succeeded to verify the signature under Succeeded to verify the signed certificateSuccessful registration to ISIS (%s)TerminateActivities: job %s - %sTerminateActivities: request = %sTerminateActivities: response = %sThe BIO for output is NULLThe Response is not going to this endThe StatusCode is SuccessThe arccat command performs the cat command on the stdout, stderr or grid manager's error log of the job.The arccp command copies files to, from and between grid storage elements.The arcget command is used for retrieving the results from a job.The arckill command is used to kill running jobs.The arcls command is used for listing files in grid storage elements and file index catalogues.The arcrm command deletes files and on grid storage elements.The credential to be signed is NULLThe credential's private key has already been initializedThe following %d were not submittedThe job status could not be retrievedThe policy file setup for simplelist.pdp does not exist, please check location attribute for simplelist PDP node in service configurationThe request has passed the policy evaluationThe response of a job cleaning request was not a SOAP messageThe response of a job termination request was not a SOAP messageThe response of a service status request was not a SOAP messageThe response of a status request was not a SOAP messageThe service status could not be retrievedThere are %d requests, which satisfy at least one policyThere is %d subjects, which satisfy at least one policyThere is no connection chain configuredThere is no constructor functionThere is no responseThere was no SOAP responseThere was no response to a job cleaning requestThere was no response to a job termination requestThere was no response to a service status requestThere was no response to a status requestThere was no response to a submission requestThis INFO message should also be seenThis INFO message should be seenThis instance was already deletedTransfer FAILED: %sTransfer from %s to %sTrying to connect %s(%s):%dURL is mapped to local access - checking permissions on original URLURL is not valid: %sURL protocol is not urllist: %sUnauthorized from remote pdp serviceUnknown LDAP scope %s - using baseUnknown element in Globus signing policyUnknown errorUnknown rights in Globus signing policy - %sUnsupported destination url: %sUnsupported protocol in url %sUnsupported source url: %sUsage:Used configuration file %sUsername Token handler is not configuredUsing CA certificate directory: %sUsing certificate file: %sUsing insecure data transferUsing key file: %sUsing local account '%s'Using proxy file: %sUsing secure data transferVersion in Listen element can't be recognizedWaiting for bufferWaiting for responseWas expecting %s at the beginning of "%s"We only support CAs in Globus signing policy - %s is not supportedWe only support X509 CAs in Globus signing policy - %s is not supportedWe only support globus conditions in Globus signing policy - %s is not supportedWe only support subjects conditions in Globus signing policy - %s is not supportedWill not map to 'root' account by defaultWrong directory in %sWrong number of parameters specifiedWrong option in daemonWrote request into a fileWrote signed proxy certificate into a fileX509 Token handler is not configured[-]name[filename ...][job ...]all jobsbrokercache file: %scannot create directory: %scheck_ftp: failed to get file's modification timecheck_ftp: failed to get file's sizecheck_ftp: globus_ftp_client_get failedcheck_ftp: globus_ftp_client_modification_time failedcheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size failedcheck_ftp: timeout waiting for modification_timecheck_ftp: timeout waiting for partial getcheck_ftp: timeout waiting for sizeclass name: %sconfig: %s, class name: %sdebugleveldefaultlrms is emptydelete_ftp: globus_ftp_client_delete faileddestination.next_locationdirnamedo not try to force passive transferdownload directory (the job directory will be created in this directory)echo: Unauthorizedempty input payloadempty next chain elementfile %s is not accessiblefilenameftp_check_callbackftp_complete_callback: error: %sftp_complete_callback: successftp_read_callback: successftp_read_thread: Globus error: %sftp_read_thread: exitingftp_read_thread: for_read failed - aborting: %sftp_read_thread: get and register buffersftp_read_thread: too many registration failures - abort: %sftp_read_thread: waiting for eofftp_write_thread: for_write failed - abortingftp_write_thread: get and register buffershourhoursincoming message is not SOAPinit_handle: globus_ftp_client_handle_init failedinit_handle: globus_ftp_client_handleattr_init failedinit_handle: globus_ftp_client_handleattr_set_gridftp2 failedinit_handle: globus_ftp_client_operationattr_init failedinput does not define operationinput is not SOAPjobdescription file describing the job to be submittedjobdescription string describing the job to be submittedkeep the files on the server (do not clean)levellist record: %slist_files_ftp: failed to get file's modification timelist_files_ftp: failed to get file's sizelist_files_ftp: globus_ftp_client_modification_time failedlist_files_ftp: globus_ftp_client_size failedlist_files_ftp: looking for modification time of %slist_files_ftp: looking for size of %slist_files_ftp: timeout waiting for modification_timelist_files_ftp: timeout waiting for sizelong format (more information)minuteminutesmkdir_ftp: making %smkdir_ftp: timeout waiting for mkdirmodule name: %snext chain element callednext element of the chain returned empty payloadnext element of the chain returned error statusnext element of the chain returned invalid payloadnumbernumber of retries before failing file transferonly select jobs whose status is statusstroperate recursively up to specified leveloutput is not SOAPpathpath to config filepath to local cache (use to put file into cache)print version informationprocess: GETprocess: POSTprocess: PUTprocess: endpoint: %sprocess: factory endpointprocess: id: %sprocess: method: %sprocess: operation: %sprocess: request=%sprocess: response=%sprocess: subpath: %sproxy constraintsremove logical file name registration even if not all physical instances were removedremove the job from the local list of jobs even if the job is not found in the infosyssecondsecondssecondsservice_urlsetting file %s to size %llushow URLs of file locationsshow progress indicatorshow the stderr of the jobshow the stdout of the job (default)shutdownsource destinationsource.next_locationstart_reading_ftpstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: globus_ftp_client_get failedstart_reading_ftp: globus_thread_create failedstart_writing_ftp: globus_thread_create failedstart_writing_ftp: mkdirstart_writing_ftp: mkdir failed - still trying to writestart_writing_ftp: putstart_writing_ftp: put failedstatusstrstop_reading_ftp: aborting connectionstop_reading_ftp: exiting: %sstop_reading_ftp: waiting for transfer to finishstringtimeout in seconds (default 20)urlurllist %s contains invalid URL: %suse secure transfer (insecure by default)Project-Id-Version: Arc Report-Msgid-Bugs-To: support@nordugrid.org POT-Creation-Date: 2017-12-13 22:31+0100 PO-Revision-Date: 2009-06-23 15:28+0200 Last-Translator: name Language-Team: MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Language: Plural-Forms: nplurals=2; plural=n != 1; Cachestädning avstängd Cachestädning påslagen Cachelänkkatalog : %s Kontrollkatalog : %s Sessionkatalog : %s förvalt LRMS : %s förvald kö : %s förvald ttl : %u %s attribut: bas-dn: %s filter: %s %s -> %s Avslutningskod: %d Namn: %s Ägare: %s%d av %d jobb sändes in%s%s (%s)%s skapad%s misslyckades%s är inget objekt%s version %s%s: %s%s: %s: Nytt jobb tillhör %i/%i%s: Kan inte läsa tillstånd - inga kommentarer, endast rensning%s: Avbrytande misslyckades (troligen är jobbet avslutat) - rensar ändå%s: Avbrytande troligen lyckosamt - rensar%s: Rensar kontroll- och sessionskataloger%s: Radera begäran p.g.a. interna problem%s: Förstör%s: Misslyckades med att exekvera plugin%s: Jobbet misslyckades i okänt tillstånd. Kommer ej att omstartas.%s: Plugin vid tillstånd %s : %s%s: Pluginexekvering misslyckades%s: Plugin misslyckades%s: Tillstånd: %s från %s%s: Tillstånd: ACCEPTED%s: Tillstånd: ACCEPTED: dryrun%s: Tillstånd: ACCEPTED: flyttar till PREPARING%s: Tillstånd: CANCELING%s: Tillstånd: FINISHING%s: Tillstånd: INLRMS%s: Tillstånd: INLRMS: avslutningsmeddelande är %i %s%s: Tillstånd: PREPARING%s: Detta job kan fortfarande vara exekverande - avbryter%s: tillstånd CANCELING: barnprocess avslutades med kod %i%s: tillstånd CANCELING: startar barnprocess: %sFörsöker med nästa destination (igen)Försöker med nästa källa (igen)(tom)(null)En förfrågan om uppstädning efter ett jobb misslyckadesEn förfrågan om uppstädning efter ett jobb lyckadesEn förfrågan om att avbryta ett jobb misslyckadesEn förfrågan om att avbryta ett jobb lyckadesEtt svar på en insändningsförfrågan var inget SOAP-meddelandeEn servicestatusförfrågan misslyckadesEn statusförfrågan misslyckadesEn statusförfrågan lyckadesARC-auktoriseringsförfrågan: %sARC delegeringspolicy: %sTillträdeslista: %sLägg till plats: metadata: %sLägg till plats: url: %sLägger till plats: %s - %sProgramflaggor:ArcAuthZ: misslyckades med att initiera alla PDP:er - denna instans kommer inte att fungeraAntar - filen ej funnenAttributvärde (1): %sAttributvärde (2): %sAttributvärde inuti Subject: %sAuktoriserad från fjärr-pdp-serviceBN_new || RSA_new misslyckadesBN_set_word misslyckadesDålig etikett: "%s"Felaktigt namn för runtime-miljö: %sFelaktigt namn för stderr: %sFelaktigt namn för stdout: %sSkapande av buffer misslyckadesCacheinställningar: %sCache skapades: %sCachekatalog: %sCachefil %s existerar intecacherot: %sCachad kopia är fortfarande giltigCachad fil är låst - bör försöka igenCachad fil är gammal, åternedladdarCallback erhöll misslyckandeKan inte lägga till X509-utökat KeyUsage-tillägg till det nya proxycertifikatetKan inte lägga till X509-tillägg till proxycertifikatKan inte omvandla keyUsage struct från DER-kodat formatKan inte omvandla keyUsage struct från internt till DER-formatKan inte omvandla privat nyckel till DER-formatKan inte omvandla det signerade proxycertifikatet till DER-formatKan inte omvandla sträng till ASN1_OBJECTKan inte kopiera det utökade KeyUsage-tilläggetKan inte kopiera subjektnamnet från utfärdaren för proxycertifikatetKan inte skapa BIO för att tolka begäranKan inte skapa BIO för begäranKan inte skapa BIO för det signerade proxycertifikatetKan inte skapa PolicyStore-objektKan inte skapa en ny X509_NAME_ENTRY för proxycertifikatbegäranKan inte skapa tillägg för proxycertifikatKan inte skapa namnpost CN för proxycertifikatetKan inte skapa SSL-kontextobjektKan inte bestämma installationsplats. Använder %s. Ange ARC_LOCATION om detta inte är korrekt.Kan inte duplicera subjektnamnet för den självsignerande proxycertifikatbegäranKan inte skapa AlgFactory dynamisktKan inte skapa AttributeFactory dynamisktKan inte skapa Evaluator dynamisktKan inte skapa FnFactory dynamisktKan inte skapa Request dynamisktKan inte generera X509-begäranKan inte generera policyobjektKan inte erhålla utökad KeyUsage-tillägg från utfärdarcertifikatetKan inte erhålla utfärdarens privata nyckelKan inte ladda policyobjektKan inte ladda policyobjekt: %sKan inte ladda begäranobjektKan inte öppna jobbeskrivningsfil: %sKan inte tolka klassnamn för AttributeFactory från konfigurationenKan inte tolka klassnamn för CombiningAlgorithmFactory från konfigurationenKan inte tolka klassnamn för FunctionFactory från konfigurationenKan inte tolka klassnamn för Policy från konfigurationenKan inte tolka klassnamn för Request från konfigurationenKan inte tolka datum: %sKan inte tolka månad: %sKan inte tolka tidszon: %sKan inte tolka tid: %sKan inte läsa certifikatfil: %sKan inte ange CN i proxycertifikatetKan inte ange utfärdarens subjekt för proxycertifikatetKan inte ange privat nyckelKan inte ange publik nyckel för proxycertifikatetKunde inte ange läsbar fil för begärans BIOKan inte ange serienummer för proxycertifikatetKan inte ange livstid för proxycertifikatetKan inte ange versionsnummer för proxycertifikatetKan inte ange skrivbar fil för begärans BIOKan inte ange skrivbar fil för det signerade proxycertifikatets BIOKan ej extrahera objektets namn från käll-URLMisslyckades med att ladda inställningarKan ej läsa från källaKan inte läsa lista med destinationer från filen %sKan inte läsa platslista från fil %sKan inte läsa lista med källor från filen %sKan inte ange OpenSSL verifikationsflaggorKan ej skriva till destinationKan inte omvandla modulnamn till pythonsträngKan inte skapa argument till konstruktornKan inte skapa inställningsargumentKan inte skapa http-nyttolastKan inte skapa inmsg-argumentKan inte skapa outmsg-argumentKan inte hitta MCC-status-objektHittar inte serviceklassKan inte erhålla ordlista för modulenKan inte importera modulKan inte initiera winsockbibliotekKan inte tolka heltalsvärdet '%s' för -%cKan inte tolka schema!Anledning till misslyckande oklar - väljer slumpvisCertifikatverifiering misslyckadesCertifikatverifiering lyckadesUtmaning: %sChangeActivityStatus: EPR innehåller ingen JobIDChangeActivityStatus: saknat NewStatus-elementChangeActivityStatus: ingen ActivityIdentifier funnenChangeActivityStatus: inget jobb funnet: %sChangeActivityStatus: gammalt BES-tillstånd matchar inteChangeActivityStatus: begäran = %sChangeActivityStatus: svar = %sStädar upp jobb: %sKlientkedjan har ingen ingångspunktKlientsidans MCC:er har laddatsStängdes OKStänger förbindelseStänger läskanalStänger skrivkanalKommande sändsKommando: %sKomponent %s(%s) kunde inte skapasKomponent har inget namnattribut definieratConnect: Autenticering avbröts efter %d msConnect: Uppkoppling avbröts after %d msConnect: Misslyckades med autentisering: %sConnect: Misslyckades med att koppla upp: %sConnect: Misslyckades med att initiera autentiseringsinfohandtag: %sConnect: Misslyckades med att initiera handtag: %sKonvertering misslyckades: %sKunde inte konvertera inkommande nyttolast!Kunde inte konvertera nyttolast!Kunde inte skapa SOAP nyttolast!Kunde inte skapa temporär fil "%s"Kunde inte lokalisera modulen med namn %s (%s)Kunde inte öppna LDAP-förbindelse till %sKunde inte ange LDAP-nätverkstimeout (%s)Kunde inte ange LDAP-protokollversion (%s)Kunde inte ange LDAP-tidsgräns (%s)Kunde inte validera meddelande!CreateActivity avslutades OKCreateActivity: Misslyckades med att acceptera delegeringCreateActivity: Misslyckades med att skapa nytt jobbCreateActivity: Misslyckades med att skapa nytt jobb: %sCreateActivity: ingen jobbeskrivning funnenCreateActivity: begäran = %sCreateActivity: svar = %sSkapar en CREAM-klientSkapar en pdpservice-klientSkapar en echo-klientSkapar en A-REX-klientSkapar och sänder en servicestatusförfråganSkapar och sänder en statusförfråganSkapar och sänder en registreringsförfråganSkapar och sänder en förfrågan om att starta ett jobbSkapar och skickar förfråganSkapar och sänder förfrågan om uppstädning efter ett jobbSkapar och sänder förfrågan om att avbryta ett jobbSkapar klientgränssnittSkapar klientsidokedjanSkapar delegeringSkapar katalog %sSkapar servicesidokedjanNuvarande överföring MISSLYCKADES: %sDCAU misslyckadesDCAU misslyckades: %sDemonisering av fork misslyckades: %sDataöverföring avbrutenDataöverföring avbruten: %sDataMover: nästa cykelDataMover: destinationen har slut på försök - avslutaDataMover: inga återförsök begärda - avslutaDataMover: källan har slut på försök - avslutaDataMover::Transfer : startar ny trådDataMover::Transfer: försöker förstöra/skriva över destination: %sFörval: %sDelegateProxy misslyckadesDelegeringsauktorisering misslyckadesDelegeringsauktorisering lyckadesBorttaget men har fortfarande platser %sDestinaltion: %sKatalog: %sDisconnect: Avslutande avbröts efter %d msLaddar ner jobb: %sEACCES-fel vid öppnanade av låsfil %s: %sTom indatanyttolastTom nyttolast!Tom registreringsinsamlareTom strängTillträdesfel för cachefil %s: %sFel vid skapande av nödvändiga kataloger: %sFel vid registrering till %s ISISFel vid erhållande av fillista (i list)Fel i cachningsprocedurFel vid listning av låsfil %s: %sFel vid öppnande av låsfil %s i initial check: %sFel vid läsning av låsfil %s: %sFel vid läsning av metafil %s: %sFel vid borttagande av cachefil %s: %sFel vid utmatning av utdatanyttolastFel när publik nyckel extraheras från begäranFel med cacheinställningar: %sFel: ingen LDAP-förfrågan påbörjad till %sUtvärderare stöder inte laddningsbara kombinerande algoritmerUtvärderare stöder inte den specificerade kombinerande algoritmen - %sUtvärderare för ArcPDP laddedes inteUtvärderare för GACLPDP laddedes inteMisslyckades med att allokera minne för handtagMisslyckades med autentiseringMisslyckades med att initiera inställningarMisslyckades med att koppla upp mot %s:%dMisslyckades med att läsa kontrollkatalog: %sMisslyckades med att läsa dataMisslyckades med att läsa fillistaMisslyckades med att inhämta statusinformationMisslyckades med att skicka CWD-kommando för att avbryta jobbMisslyckades med att sända CWD-kommando för att städa upp jobbMisslyckades med att skicka DELE-kommando för att avbryta jobbMisslyckades med att sända RMD-kommando för att städa upp jobbMisslyckades med ange filägare: %sMisslyckades med att starta jobbMisslyckades med att acceptera SSL-förbindelseMisslyckades med att acceptera förbindelseförfråganMisslyckades med att acceptera delegeringMisslyckades med att acceptera ny fil/destinationMisslyckades med att ladda grid-managerns inställningarMisslyckades med att lägga till utfärdarens tillägg till proxynMisslyckades med att autentisera användarnamnstoken inuti inkommande SOAPMisslyckades med att autenticera X509-token inuti inkommande SOAPMisslyckades med att binda till ldap-server: %sMisslyckades med att koppla upp för att städa upp jobbMisslyckades med att koppla upp mot %s(%s):%iMisslyckades med att koppla upp mot %s:%dMisslyckades med att konvertera säkerhetsinformation till ARC-policyMisslyckades med att konvertera säkerhetsinformation till ARC-begäranMisslyckades med att skapa SOAP-behållareMisslyckades med att skapa fil i %sMisslyckades med att skapa hård länk från %s till %s: %sMisslyckades med att skapa indata-SOAP-behållareMisslyckades med att skapa tråd för ldap bind (%s)Misslyckades med att ta bort %sMisslyckades med att ta bort logisk filMisslyckades med att ta bort metainformationMisslyckades med att ta bort fysisk filMisslyckades med att koppla ner efter att ha avbrutit jobbMisslyckades med koppla ner efter att ha städat upp jobbMisslyckades med att duplicera tilläggMisslyckades med att etablera SSL-förbindelseMisslyckades med att hitta delegeringskreditiv i klientinställningarMisslyckades med att skapa användarnamnstoken för utgående SOAPMisslyckades med att skapa X509-token för utgående SOAPMisslyckades med att hämta ftp-filMisslyckades med att initiera X509-strukturMisslyckades med att initiera cacheMisslyckades med att initiera delegeringMisslyckades med att ladda klientinställningarMisslyckades med att ladda serviceinställningarMisslyckades med att skapa symbolisk länk %s till %s : %sMisslyckades med att erhålla lokal adress för port %s - %sMisslyckades med att öppna datakanalMisslyckades med att tolka användarnamnstoken från inkommande SOAPMisslyckades med att tolka X509-token från inkommande SOAPMisslyckades med att efterregistrera destination: %sMisslyckades med att förregistrera destination: %sMisslyckades med att läsa certifikatfil: %sMisslyckades med att läsa proxy fil: %sMisslyckades med att läsa begäran från en filMisslyckades med att läsa begäran från en strängMisslyckades med att registrera plugin för tillstånd %sMisslyckades med att registrera till ISIS (%s) - %sMisslyckades med att ta bort alla instanserMisslyckades med att ta bort per-jobb-katalog %s: %sMisslyckades med att slå upp %s (%s)Misslyckades med att slå upp destination: %sMisslyckades med att slå upp källa: %sMisslyckades med att inhämta tillämpningsdata från OpenSSLMisslyckades med att skicka innehåll till bufferMisslyckades med att signera proxycertifikatetMisslyckades med att starta ny trådMisslyckades med att starta tråd: cache kommer ej att rensasMisslyckades med att påbörja läsning från källa: %sMisslyckades med att starta tråd för kommunikationMisslyckades med att starta tråd för att lyssnaMisslyckades med att påbörja skrivning till cacheMisslyckades med att påbörja skrivning till destination: %sMisslyckades med att spara ftp-filMisslyckades med att överföra dataMisslyckades med att låsa upp fil med lås %s: %sMisslyckades med att avregistrera förregistrerad lfn. Du kan behöva avregistrera det manuelltMisslyckades med att avregistrera förregistrerad lfn. Du kan behöva avregistrera det manuellt: %sMisslyckades med att verifiera X509-token inuti inkommande SOAPMisslyckades med att verifiera begäranMisslyckades med att verifiera signaturen under Misslyckades med att verifiera signaturen under Misslyckades med att verifiera det signerade certifikatetMisslyckades med att skriva fil %s: %sMisslyckades med att skriva begäran till en filMisslyckades med att skriva det signerade proxycertifikatet till en filMisslyckades med att ladda upp lokala indatafilerMisslyckades med att avsluta läsning från källaMisslyckades med att avsluta skrivning till destinationMisslyckades under läsning från källaMisslyckades under väntan på förbindelseförfråganMisslyckades under skrivning till destinationMisslyckande: %sFeature ej implementeradFilnedladdning misslyckades: %sFilen är inte tillgänglig: %sKopiering av filuppsättning till ett enstaka objekt stöds ej ännuFiluppsättningsregistrering understöds inte ännuFör registrering måste källan vara en vanlig URL och destinationen en indexeringsserviceFramtvingar åternedläsning av fil %sHittade %s i cacheHela strängen användes inte: %sFunktion : %sGACL-auktoriseringsförfrågan: %sGet: det finns inget jobb %s - %sGetActivityDocuments: jobb %s - %sGetActivityDocuments: begäran = %sGetActivityDocuments: svar = %sGetActivityStatuses: jobb %s - %sGetActivityStatuses: jobb %s - kan inte förstå EPRGetActivityStatuses: begäran = %sGetActivityStatuses: begäran = %sGetFactoryAttributesDocument: begäran = %sGetFactoryAttributesDocument: svar = %sGlobusfel: %sGrididentitet mappas till lokal identitet '%s'HTTP-fel: %d %sHjälpflaggor:Id= %s,Typ= %s,Utfärdare= %s,Värde= %sIdentitetsnamn: %sOgiltigt tidsformat: %sOmedelbart färdigställande: %sOgiltigt argument för loggstorlek '%s'Ogiltig debugnivå '%s'Ogiltigt antal loggar '%s'Ogiltig loggstorlek '%s'Inkommande meddelande är inte SOAPOberoende proxy - inga rättigheter beviljadeInfoCache-objekt är inte inställtInitialiserar delegeringsprocessIndata är inte SOAPMata in begäran från en fil: Request.xmlMata in begäran från kodOgiltig effektOgiltigt ISO-tidsperiodsformat: %sOgiltig jobbeskrivning:Ogiltigt klassnamnOgiltig sökväg i Get(): %sOgiltig sökväg i Set(): %sOgiltig periodsträng: %sOgiltig URL: %sJVM startadJavaobjekt returnerade NULL-statusAvbrytande av jobb lyckadesUppstädning av jobb lyckadesJobbeskrivning som skall sändas: %sJobbinsändningssammanfattning:Jobb insänt med jobb-id: %sAvbrytande av jobb misslyckadesJobb: %sLDAP-förbindelse är redan öppen till %sLDAP-förfrågan timeout: %sLDAPQuery: Erhåller resultat från %sLDAPQuery: Initierar förbindelse till %s:%dLDAPQuery: Frågar %sLdap bind timeout (%s)Bibliotek: %sLänkar MCC %s(%s) till MCC (%s) under %sLänkar MCC %s(%s) till Plexer (%s) under %sLänkar MCC %s(%s) till Service (%s) under %sLänkar Plexer %s till MCC (%s) under %sLänkar Plexer %s till Plexer (%s) under %sLänkar Plexer %s till Service (%s) under %sLänkar lokal filLänkar/kopierar cachad filLaddade %sLaddade MCC %s(%s)Laddade Service %s(%s)Plats existerar redanPlatser saknas i destinations-LFC-URLLåsfil %s existerar inteMCC %s(%s) - nästa %s(%s) saknar målMSLD understöds inte - försöker med NLSTMappar %s till %sMinnesallokeringsfelKällans och destinationens metadata är olikaSaknat CA-subjekt i Globus signeringspolicyVärd saknas i Connect-elementPort saknas i Connect-elementPort saknas i Listen-elementCacherot saknas i inställningarSaknade villkorssubjekt i Globus signeringspolicySaknad inställning för kommandot daemonCertificatePath-element saknas eller är tomtCertificatePath- eller CACertificatesDir-element saknas eller är tomt; kommer endast att kontrollera signatrur, kommer ej att göra meddelandeautenticeringKeyPath-element saknas eller är tomtPasswordSource-element saknas eller är tomtUsername-element saknas eller är tomtSchema saknas! Hoppar över validering...Säkerhetsobjekt saknas i meddelandeModulen %s är inte en ARC-plugin (%s)Modulhanterare initModulhanterare init av ModuleManager::setCfgNLST/MLSD misslyckadesNLST/MLSD misslyckades: %sNULL BIO skickad till InquireRequestNamn: %sNegativa rättigheter stöds inte i Globus signeringspolicyInget attribut existerar som kan hantera typen: %sInget Connect-element specificeratInget auktoriseringssvar returneradesIngen cachecatalog angivanInga delegeringspolicyer i denna kontext och meddelande - passerar igenomIngen jobbeskrivning angivenInga jobb angivnaInga lyssnande portar initieradeInget lokalt kontonamn specificeratInga platser för destinationen som skiljer sig från källan funna: %sInga platser funna för destination: %sInga platser funna för källa: %sInga platser funna - troligen inga fler fysiska instanserInget nästa element i kedjanIngen begärd säkerhetsinformation samlades inIngen säkerhetsprocessering/kontroll begärd för '%s'Ingen serverinställningsdel i inställningsfilenIngen sådan grupp: %sIngen sådan användare: %sInget mål tillgängligt inuti policynInget mål tillgängligt inuti regelnIngen giltig plats tillgängligEj tillräckligt antal parametrar i copyurlEj tillräckligt antal parametrar i linkurlOgiltig destinationOgiltig källaKopierar nu (from -> to)Antalet källor och destinationer stämmer inte överensFLAGGA...Objekt ej initierat (internt fel)Erhållen XML: %sErhållen värd och adress kan inte accepterasEndast globusrättigheter stöds i Globus signeringspolicy - %s stöds inteEndast signeringsrättigheter stöds i Globus signeringspolicy - %s stöds inteOperation avslutades OKOperation på sökväg "%s"Flaggorna 'p' och 'n' kan inte användas samtidigtInställningar för plugin saknasUtgående meddelande är inte SOAPSkriv ut proxycertifikatetPASV misslyckadesPASV misslyckades: %sPDP: %s kan inte laddasPEM_read_bio_X509_REQ misslyckadesPEM_write_bio_X509_REQ misslyckadesSkapande av tolkningskontext misslyckades!Skickar vidare servicens information från insamlare till registratorLösenordskodningstyp ej understödd: %sPeer-namn: %sTillträdeskontroll misslyckades: %sTillträdeskontroll på ursprunglig URL misslyckades: %sTillträdeskontroll OKPlexer (%s) - nästa %s(%s) saknar målPolicy Decision Service-anrop misslyckadesPolicy är tomPolicy är inte gaclPolicyId: %s Alg inuti denna policy är:-- %sProcesseringstyp understöds inte: %sProxy med ARC-policyProxy med alla rättigheter ärvdaProxy med tom policy - misslyckades p.g.a. oigenkänd policyProxy med specifik policy: %sProxy med okänd policy - misslyckades p.g.a. oigenkänd policyPut: det finns inget jobb: %s - %sPythontolkare låstPythontolkare befriadPython-wrapper-process anropadRSA_generate_key_ex misslyckadesLäste begäran från en filLäste begäran från en strängReell överföring från %s till %sAnledning: %sRegistrerar till %s ISISRegistrering av Globus-FTP-buffer misslyckades - avbryter kontrollTar bort %sTar bort logist fil från metadata %sTar bort metadata i %sFörfrågan misslyckadesFörfrågan understöds inte - %sFörfrågan lyckades!!!Svaret är inte SOAPSvaret är inte förväntad WS-RPSvar: %sResultatvärde (0=Tillåt, 1=Vägra, 2=Obestämd, 3=Ej applicerbar): %dÅteranvänder förbindelseSASL växelverkanSOAP-anrop misslyckadesSOAP-anrop misslyckadesSOAP-process understöds inte: %sSSL-fel: %d - %s:%s:%sSäkerhetshanterare saknar inställningarSäkerhetshanterare har inget namnattribut definieratSäkerhetshanterarprocessering misslyckadesSäkerhetskontroll misslyckades i SOAP MCC för inkommande meddelandeSäkerhetskontroll misslyckades i SOAP MCC för utgående meddelandeSäkerhetskontroll misslyckades i TLS MCC för inkommande meddelandeSäkerhetsprocessering/kontroll misslyckadesSäkerhetsprocessering/kontroll OKSendCommand: Misslyckades: %sSendCommand: Avbröts efter %d msSendData: Datauppkoppling för skrivning misslyckades: %sSendData: Datauppkoppling för skrivning avbröts efter %d msSendData: Dataskrivning misslyckades: %sSendData: Dataskrivning avbröts efter %d msSendData: Misslyckades med att sända DCAU-kommandoSendData: Misslyckades med att sända STOR-kommando: %sSendData: Misslyckades med att sända TYPE-kommandoSendData: Lokal port misslyckades: %sSendData: Lokal typ misslyckades: %sService %s(%s) kunde inte skapasServicen väntar på förfrågningarServicesidans MCC:er har laddatsVisa hjälpflaggorStänger av demonHoppar över service: ingen SchemaPath funnen!Hoppar över service: ingen ServicePath funnen!Några överföringar misslyckadesKälla: %sStartar i förgrundenPåbörja testStopWriting: avbryter förbindelseInsändning misslyckades, service returnerade: %sInsändningsförfrågan misslyckadesInsändningsförfrågan lyckadesInsändning returnerade fel: %sSubmit: Misslyckades med att sända CWD-kommandoSubmit: Misslyckades med att sända CWD new-kommandoSubmit: Misslyckades med att sända jobbeskrivningSubmit: Misslyckades med att koppla uppSubmit: Misslyckades med att ladda upp lokala indatafilerLyckades med att autentisera användarnamnstokenLyckades med att autenticera X509-tokenLyckades signera proxycertifikatetLyckades verifiera signaturen under Lyckades verifiera signaturen under Lyckades verifiera det signerade certifikatetLyckosam registrering till ISIS (%s)TerminateActivities: jobb %s - %sTerminateActivities: begäran = %sTerminateActivities: svar = %sUtdata-BIO är NULLSvaret kommer inte till denna ändeStatuskoden är Successarccat-kommandot utför cat-kommandot på jobbets stdout, stderr eller gridmanager-fellogg.arccp-kommandot används för att kopiera filer till, från och mellan gridlagringsresurser.arcget-kommandot används för att hämta resultatet av ett jobb.arckill-kommandot används för att avbryta exekverande jobb.arcls-kommandot används för att lista filer på gridlagringsresurser och i filindexkataloger.arcrm-kommandot används för att ta bort filer på gridlagringsresurser.Kreditivet som skall signeras är NULLKreditivets privata nyckel har redan initialiseratsFöljande %d sändes inte inJobstatus kunde inte inhämtasPolicyfilen angiven för simplelist.pdp existerar inte, vänligen kontrollera platsattribute för simplelist PDP nod i serviceinställnngarnaBegäran har passerat policyutvärderingenEtt svar på en förfrågan om uppstädning efter ett jobb var inget SOAP-meddelandeEtt svar på en förfrågan om att avbryta ett jobb var inget SOAP-meddelandeEtt svar på en servicestatusförfrågan var inget SOAP-meddelandeEtt svar på en statusförfrågan var inget SOAP-meddelandeServicestatus kunde inte inhämtasDet finns %d begärningar som uppfyller åtminstone en policyDet finns %d subjekt som uppfyller åtminstone en policyIngen uppkopplingskedja har ställts inDet finns ingen konstruktorfunktionDet finns inget svarDet fanns inget SOAP-svarDet kom inget svar på en förfrågan om uppstädning efter ett jobbDet kom inget svar på en förfrågan om att avbryta ett jobbDet kom inget svar på en servicestatusförfråganDet kom inget svar på en statusförfråganDet kom inget svar på en insändningsförfråganDetta INFO-meddelande borde också sesDetta INFO-meddelande borde sesDenna instans har redan tagits bortÖverföring MISSLYCKADES: %sÖverföring från %s till %sFörsöker koppla upp %s(%s):%dURL är mappad till lokalt tillträde - kontrollerar tillträde på ursprunglig URLURL:en är inte giltig: %sURL-protokollet är inte urllist: %sOauktoriserad från fjärr-pdp-serviceOkänt LDAP-scope %s - använder baseOkänt element i Globus sugneringspolicyOkänt felOkända rättigheter i Globus signeringspolicy - %sIcke understödd destinations-URL: %sIcke understött protokoll i url %sIcke understödd käll-URL: %sAnvändning:Använd inställningsfil %sAnvändarnamnstokenhanteraren har ej ställts inAnvänder CA-certifikatkatalog: %sAnvänder certifikatfil: %sAnvänder osäker dataöverföringAnvänder nyckelfil: %sAnvänder lokalt konto '%s'Använder proxyfil: %sAnvänder säker dataöverföringVersion i Listen-element kan ej kännas igenVäntar på bufferVäntar på svarFörväntade %s i början av "%s"Vi stöder endast CA i Globus signeringspolicy - %s stöds inteVi stöder endast X509-CA i Globus signeringspolicy - %s stöds inteVi stöder endast globusvillkor i Globus signeringspolicy - %s stöds inteVi stöder endast subjektvillkor i Globus signeringspolicy - %s stöds inteKommer ej att mappa 'root'-konto som förvalFel katalog i %sFel antal parametrar angivnaFelaktig inställning i daemonSkrev begäran till en filSkrev det signerade proxycertifikatet till en filX509-tokenhanteraren har ej ställts in[-]namn[filnamn ...][jobb ...]alla jobbmäklarecachefil: %skan inte skapa katalog: %scheck_ftp: misslyckades med att erhålla filens ändringstidcheck_ftp: misslyckades med att erhålla filens storlekcheck_ftp: globus_ftp_client_get misslyckadescheck_ftp: globus_ftp_client_modification_time misslyckadescheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size misslyckadescheck_ftp: timeout vid väntan på ändringstidcheck_ftp: timeout vid väntan på partiell getcheck_ftp: timeout vid väntan på storlekklassnamn: %sinställningar: %s, klassnamn: %sdebugnivådefaultlrms är tomdelete_ftp: globus_ftp_client_delete misslyckadesdestination.next_locationkatalognamnförsök inte forcera passiv överföringnedladdningskatalog (jobbkatalogen kommer att skapas i denna katalog)echo: Oautoriseradtom indatanyttolasttomt nästa kedjeelementfil %s är inte tillgängligfilnamnftp_check_callbackftp_complete_callback: fel: %sftp_complete_callback: OKftp_read_callback: OKftp_read_thread: Globusfel: %sftp_read_thread: avslutarftp_read_thread: for_read misslyckades - avbryter: %sftp_read_thread: erhåll och registrerar buffrarftp_read_thread: för många registreringsfel - avbryter: %sftp_read_thread: väntar på filslutftp_write_thread: for_write misslyckades - avbryterftp_write_thread: eråll och registrera buffrartimmetimmarinkommande meddelande är inte SOAPinit_handle: globus_ftp_client_handle_init misslyckadesinit_handle: globus_ftp_client_handleattr_init misslyckadesinit_handle: globus_ftp_client_handleattr_set_gridftp2 misslyckadesinit_handle: globus_ftp_client_operationattr_init misslyckadesindata definierar ej operationindata är inte SOAPjobbeskrivningsfil som beskriver jobbet som ska sändas injobbeskrivningssträng som beskriver jobbet som ska sändas inbehåll filerna på servern (städa inte upp)nivålistpost: %slist_files_ftp: misslyckades med att erhålla filens ändringstidlist_files_ftp: misslyckades med att erhålla filens storleklist_files_ftp: globus_ftp_client_modification_time misslyckadeslist_files_ftp: globus_ftp_client_size misslyckadeslist_files_ftp: söker efter ändringstid för %slist_files_ftp: söker efter storlek på %slist_files_ftp: timeout vid väntan på ändringstidlist_files_ftp: timeout vid väntan på storleklångt format (mer information)minutminutermkdir_ftp: skapar %smkdir_ftp: timeout vid väntan på mkdirmodulnamn: %snästa kedjeelement anropatnästa element i kedjan returnerade tom nyttolastnästa element i kedjan returnerade felstatusnästa element i kedjan returnerade ogiltig nyttolastnummerantal försök innan överföring misslyckasvälj endast jobb vars status är statusstrarbeta rekursivt upp till den angivna nivånutdata är inte SOAPsökvägsökväg till inställningsfilsökväg till lokalt cache (använd för att lägga in fil i cache)skriv ut versionsinformationprocess: GETprocess: POSTprocess: PUTprocess: ändpunkt: %sprocess: factoryändpunktprocess: id: %sprocess: metod: %sprocess: operation: %sprocess: begäran=%sprocess: svar=%sprocess: subsökväg: %sproxyvillkorta bort logiska filnamnsregistreringen även om inte alla fysiska kopior tagits bortta bort jobbet från den lokala jobblistan även om jobbet inte hittas i informationssystemetsekundsekundersekunderserviceurlSätter fil %s till storlek %lluvisa URL:er till filens registrerade kopiorvisa fortskridandeindikatorvisa jobbets stderrvisa jobbets stdout (förval)avstängningkälla destinationsource.next_locationstart_reading_ftpstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: globus_ftp_client_get misslyckadesstart_reading_ftp: globus_thread_create misslyckadesstart_writing_ftp: globus_thread_create misslyckadesstart_writing_ftp: mkdirstart_writing_ftp: mkdir misslyckades - försöker fortfarande skrivastart_writing_ftp: putstart_writing_ftp: put misslyckadesstatusstrstop_reading_ftp: avbryter förbindelsestop_reading_ftp: avslutar: %sstop_reading_ftp: väntar på att överföring ska avslutassträngtimeout i sekunder (förval 20)urlurllistan %s innehåller ogiltig URL: %sanvänd säker överföring (osäker som förval)nordugrid-arc-5.4.2/PaxHeaders.7502/aclocal.m40000644000000000000000000000013213214315712017033 xustar000000000000000030 mtime=1513200586.991854568 30 atime=1513200587.096855852 30 ctime=1513200658.600730372 nordugrid-arc-5.4.2/aclocal.m40000644000175000002070000012011613214315712017102 0ustar00mockbuildmock00000000000000# generated automatically by aclocal 1.11.1 -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, # 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.63],, [m4_warning([this file was generated for autoconf 2.63. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically `autoreconf'.])]) # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # # Copyright © 2004 Scott James Remnant . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # PKG_PROG_PKG_CONFIG([MIN-VERSION]) # ---------------------------------- AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_PATH)?$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility])dnl if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])# PKG_PROG_PKG_CONFIG # PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # # Check to see whether a particular set of modules exists. Similar # to PKG_CHECK_MODULES(), but does not set variables or print errors. # # # Similar to PKG_CHECK_MODULES, make sure that the first instance of # this or PKG_CHECK_MODULES is called, or make sure to call # PKG_CHECK_EXISTS manually # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_ifval([$2], [$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) # _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) # --------------------------------------------- m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])# _PKG_CONFIG # _PKG_SHORT_ERRORS_SUPPORTED # ----------------------------- AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])# _PKG_SHORT_ERRORS_SUPPORTED # PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], # [ACTION-IF-NOT-FOUND]) # # # Note that if there is a possibility the first call to # PKG_CHECK_MODULES might not happen, you should be sure to include an # explicit call to PKG_PROG_PKG_CONFIG in your configure.ac # # # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $1]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD ifelse([$4], , [AC_MSG_ERROR(dnl [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT ])], [AC_MSG_RESULT([no]) $4]) elif test $pkg_failed = untried; then ifelse([$4], , [AC_MSG_FAILURE(dnl [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])], [$4]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) ifelse([$3], , :, [$3]) fi[]dnl ])# PKG_CHECK_MODULES # Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.11' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.11.1], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.11.1])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to # `$srcdir', `$srcdir/..', or `$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is `.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [dnl Rely on autoconf to set up CDPATH properly. AC_PREREQ([2.50])dnl # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 9 # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ(2.52)dnl ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 10 # There are a few dirty hacks below to avoid letting `AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "GCJ", or "OBJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl ifelse([$1], CC, [depcc="$CC" am_compiler_list=], [$1], CXX, [depcc="$CXX" am_compiler_list=], [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], UPC, [depcc="$UPC" am_compiler_list=], [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE(dependency-tracking, [ --disable-dependency-tracking speeds up one-time build --enable-dependency-tracking do not reject slow dependency extractors]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. #serial 5 # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Autoconf 2.62 quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named `Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running `make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # When using ansi2knr, U may be empty or an underscore; expand it U=`sed -n 's/^U = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`AS_DIRNAME(["$file"])` AS_MKDIR_P([$dirpart/$fdir]) # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking # is enabled. FIXME. This creates each `.P' file that we will # need in order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) ]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, # 2005, 2006, 2008, 2009 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 16 # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.62])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) AM_MISSING_PROG(AUTOCONF, autoconf) AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) AM_MISSING_PROG(AUTOHEADER, autoheader) AM_MISSING_PROG(MAKEINFO, makeinfo) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AM_PROG_MKDIR_P])dnl # We need awk for the "check" target. The system "awk" is bad on # some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES(CC)], [define([AC_PROG_CC], defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES(CXX)], [define([AC_PROG_CXX], defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES(OBJC)], [define([AC_PROG_OBJC], defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl ]) _AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl dnl The `parallel-tests' driver may need to know about EXEEXT, so add the dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro dnl is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl ]) dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001, 2003, 2005, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST(install_sh)]) # Copyright (C) 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 4 # AM_MAKE_INCLUDE() # ----------------- # Check to see how make treats includes. AC_DEFUN([AM_MAKE_INCLUDE], [am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from `make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi AC_SUBST([am__include]) AC_SUBST([am__quote]) AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 6 # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it supports --run. # If it does, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --run true"; then am_missing_run="$MISSING --run " else am_missing_run= AC_MSG_WARN([`missing' script is too old or missing]) fi ]) # Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_MKDIR_P # --------------- # Check for `mkdir -p'. AC_DEFUN([AM_PROG_MKDIR_P], [AC_PREREQ([2.60])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P, dnl while keeping a definition of mkdir_p for backward compatibility. dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile. dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of dnl Makefile.ins that do not define MKDIR_P, so we do our own dnl adjustment using top_builddir (which is defined more often than dnl MKDIR_P). AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl case $mkdir_p in [[\\/$]]* | ?:[[\\/]]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001, 2002, 2003, 2005, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 4 # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # ------------------------------ # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), 1)]) # _AM_SET_OPTIONS(OPTIONS) # ---------------------------------- # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 5 # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Just in case sleep 1 echo timestamp > conftest.file # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);; esac # Do `set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi rm -f conftest.file if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT(yes)]) # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor `install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in `make install-strip', and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using `strip' when the user # run `make install-strip'. However `strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the `STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be `maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of `v7', `ustar', or `pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. AM_MISSING_PROG([AMTAR], [tar]) m4_if([$1], [v7], [am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'], [m4_case([$1], [ustar],, [pax],, [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' _am_tools=${am_cv_prog_tar_$1-$_am_tools} # Do not fold the above two line into one, because Tru64 sh and # Solaris sh will not grok spaces in the rhs of `-'. for _am_tool in $_am_tools do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([m4/ac_cxx_have_dbdeadlockexception.m4]) m4_include([m4/ac_cxx_have_sstream.m4]) m4_include([m4/ac_cxx_namespaces.m4]) m4_include([m4/arc_api.m4]) m4_include([m4/arc_paths.m4]) m4_include([m4/fsusage.m4]) m4_include([m4/gettext.m4]) m4_include([m4/gpt.m4]) m4_include([m4/iconv.m4]) m4_include([m4/lib-ld.m4]) m4_include([m4/lib-link.m4]) m4_include([m4/lib-prefix.m4]) m4_include([m4/libtool.m4]) m4_include([m4/ltoptions.m4]) m4_include([m4/ltsugar.m4]) m4_include([m4/ltversion.m4]) m4_include([m4/lt~obsolete.m4]) m4_include([m4/nls.m4]) m4_include([m4/po.m4]) m4_include([m4/progtest.m4]) nordugrid-arc-5.4.2/PaxHeaders.7502/include0000644000000000000000000000013213214316030016533 xustar000000000000000030 mtime=1513200664.758805688 30 atime=1513200668.715854084 30 ctime=1513200664.758805688 nordugrid-arc-5.4.2/include/0000755000175000002070000000000013214316030016656 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/PaxHeaders.7502/arc0000644000000000000000000000013213214316030017300 xustar000000000000000030 mtime=1513200664.975808342 30 atime=1513200668.715854084 30 ctime=1513200664.975808342 nordugrid-arc-5.4.2/include/arc/0000755000175000002070000000000013214316030017423 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/job0000644000000000000000000000013213214316030020052 xustar000000000000000030 mtime=1513200664.912807571 30 atime=1513200668.715854084 30 ctime=1513200664.912807571 nordugrid-arc-5.4.2/include/arc/job/0000755000175000002070000000000013214316030020175 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/job/PaxHeaders.7502/error.h0000644000000000000000000000012311133371312021433 xustar000000000000000026 mtime=1231942346.06164 27 atime=1513200577.043732 30 ctime=1513200664.911807559 nordugrid-arc-5.4.2/include/arc/job/error.h0000644000175000002070000000005511133371312021501 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/job/error.h" nordugrid-arc-5.4.2/include/arc/job/PaxHeaders.7502/runtimeenvironment.h0000644000000000000000000000012311133371312024252 xustar000000000000000026 mtime=1231942346.06164 27 atime=1513200577.042732 30 ctime=1513200664.912807571 nordugrid-arc-5.4.2/include/arc/job/runtimeenvironment.h0000644000175000002070000000007211133371312024317 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/job/runtimeenvironment.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/ArcLocation.h0000644000000000000000000000012410764003731021735 xustar000000000000000027 mtime=1204815833.857085 27 atime=1513200577.051732 30 ctime=1513200664.764805761 nordugrid-arc-5.4.2/include/arc/ArcLocation.h0000644000175000002070000000006310764003731022001 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcLocation.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/ArcConfigIni.h0000644000000000000000000000012312771224360022034 xustar000000000000000026 mtime=1474636016.20072 27 atime=1513200577.074733 30 ctime=1513200664.763805749 nordugrid-arc-5.4.2/include/arc/ArcConfigIni.h0000644000175000002070000000006412771224360022102 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcConfigIni.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/data0000644000000000000000000000013213214316030020211 xustar000000000000000030 mtime=1513200664.888807278 30 atime=1513200668.715854084 30 ctime=1513200664.888807278 nordugrid-arc-5.4.2/include/arc/data/0000755000175000002070000000000013214316030020334 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/MkDirRecursive.h0000644000000000000000000000012410711536217023350 xustar000000000000000027 mtime=1193720975.377759 27 atime=1513200577.059733 30 ctime=1513200664.887807266 nordugrid-arc-5.4.2/include/arc/data/MkDirRecursive.h0000644000175000002070000000006710711536217023420 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/MkDirRecursive.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataSpeed.h0000644000000000000000000000012410672011347022302 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.059733 30 ctime=1513200664.881807192 nordugrid-arc-5.4.2/include/arc/data/DataSpeed.h0000644000175000002070000000006210672011347022345 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataSpeed.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataBuffer.h0000644000000000000000000000012311100356223022442 xustar000000000000000027 mtime=1224858771.382722 27 atime=1513200577.061733 29 ctime=1513200664.87180707 nordugrid-arc-5.4.2/include/arc/data/DataBuffer.h0000644000175000002070000000006311100356223022507 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataBuffer.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataMover.h0000644000000000000000000000012410705607674022345 xustar000000000000000027 mtime=1192693692.333737 27 atime=1513200577.060733 30 ctime=1513200664.875807119 nordugrid-arc-5.4.2/include/arc/data/DataMover.h0000644000175000002070000000006210705607674022410 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataMover.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/FileCacheHash.h0000644000000000000000000000012411176406273023065 xustar000000000000000027 mtime=1241124027.708145 27 atime=1513200577.059733 30 ctime=1513200664.884807229 nordugrid-arc-5.4.2/include/arc/data/FileCacheHash.h0000644000175000002070000000006611176406273023134 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/FileCacheHash.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/URLMap.h0000644000000000000000000000012410705607674021563 xustar000000000000000027 mtime=1192693692.333737 27 atime=1513200577.060733 30 ctime=1513200664.888807278 nordugrid-arc-5.4.2/include/arc/data/URLMap.h0000644000175000002070000000005710705607674021632 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/URLMap.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataHandle.h0000644000000000000000000000012410672011347022435 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.059733 30 ctime=1513200664.874807107 nordugrid-arc-5.4.2/include/arc/data/DataHandle.h0000644000175000002070000000006310672011347022501 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataHandle.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataPointDirect.h0000644000000000000000000000012410672011347023466 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.059733 30 ctime=1513200664.878807156 nordugrid-arc-5.4.2/include/arc/data/DataPointDirect.h0000644000175000002070000000007010672011347023530 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataPointDirect.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataPointIndex.h0000644000000000000000000000012410675206226023330 xustar000000000000000027 mtime=1190464662.118294 27 atime=1513200577.059733 30 ctime=1513200664.879807168 nordugrid-arc-5.4.2/include/arc/data/DataPointIndex.h0000644000175000002070000000006710675206226023400 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataPointIndex.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/FileCache.h0000644000000000000000000000012411062170375022254 xustar000000000000000027 mtime=1221128445.846693 27 atime=1513200577.061733 30 ctime=1513200664.883807217 nordugrid-arc-5.4.2/include/arc/data/FileCache.h0000644000175000002070000000006211062170375022317 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/FileCache.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataCallback.h0000644000000000000000000000012410705607674022751 xustar000000000000000027 mtime=1192693692.333737 27 atime=1513200577.059733 30 ctime=1513200664.872807082 nordugrid-arc-5.4.2/include/arc/data/DataCallback.h0000644000175000002070000000006510705607674023017 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataCallback.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataStatus.h0000644000000000000000000000012410765736564022547 xustar000000000000000027 mtime=1205321076.224761 27 atime=1513200577.059733 30 ctime=1513200664.882807204 nordugrid-arc-5.4.2/include/arc/data/DataStatus.h0000644000175000002070000000006310765736564022613 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataStatus.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/DataPoint.h0000644000000000000000000000012410672011347022333 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.061733 30 ctime=1513200664.876807131 nordugrid-arc-5.4.2/include/arc/data/DataPoint.h0000644000175000002070000000006210672011347022376 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataPoint.h" nordugrid-arc-5.4.2/include/arc/data/PaxHeaders.7502/FileInfo.h0000644000000000000000000000012410675412676022160 xustar000000000000000027 mtime=1190532542.881496 27 atime=1513200577.059733 30 ctime=1513200664.886807253 nordugrid-arc-5.4.2/include/arc/data/FileInfo.h0000644000175000002070000000006110675412676022222 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/FileInfo.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/JobPerfLog.h0000644000000000000000000000012412675602216021536 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200577.065733 30 ctime=1513200664.784806006 nordugrid-arc-5.4.2/include/arc/JobPerfLog.h0000644000175000002070000000006212675602216021601 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/JobPerfLog.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/loader0000644000000000000000000000013213214316030020546 xustar000000000000000030 mtime=1513200664.917807632 30 atime=1513200668.715854084 30 ctime=1513200664.917807632 nordugrid-arc-5.4.2/include/arc/loader/0000755000175000002070000000000013214316030020671 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/loader/PaxHeaders.7502/ModuleManager.h0000644000000000000000000000012310667345044023534 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.099733 29 ctime=1513200664.91680762 nordugrid-arc-5.4.2/include/arc/loader/ModuleManager.h0000644000175000002070000000007010667345044023577 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/loader/ModuleManager.h" nordugrid-arc-5.4.2/include/arc/loader/PaxHeaders.7502/FinderLoader.h0000644000000000000000000000012411244514675023353 xustar000000000000000027 mtime=1251121597.562708 27 atime=1513200577.099733 30 ctime=1513200664.914807596 nordugrid-arc-5.4.2/include/arc/loader/FinderLoader.h0000644000175000002070000000006711244514675023423 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/loader/FinderLoader.h" nordugrid-arc-5.4.2/include/arc/loader/PaxHeaders.7502/Loader.h0000644000000000000000000000012410667345044022223 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.101733 30 ctime=1513200664.915807608 nordugrid-arc-5.4.2/include/arc/loader/Loader.h0000644000175000002070000000006110667345044022265 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/loader/Loader.h" nordugrid-arc-5.4.2/include/arc/loader/PaxHeaders.7502/Plugin.h0000644000000000000000000000012411114777117022251 xustar000000000000000027 mtime=1228144207.795496 27 atime=1513200577.099733 30 ctime=1513200664.917807632 nordugrid-arc-5.4.2/include/arc/loader/Plugin.h0000644000175000002070000000006111114777117022313 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/loader/Plugin.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/DateTime.h0000644000000000000000000000012410672011347021233 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.050732 30 ctime=1513200664.772805859 nordugrid-arc-5.4.2/include/arc/DateTime.h0000644000175000002070000000006010672011347021274 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/DateTime.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/data-staging0000644000000000000000000000013213214316030021643 xustar000000000000000030 mtime=1513200664.870807058 30 atime=1513200668.716854096 30 ctime=1513200664.870807058 nordugrid-arc-5.4.2/include/arc/data-staging/0000755000175000002070000000000013214316030021766 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/DTRList.h0000644000000000000000000000012411523013225023360 xustar000000000000000027 mtime=1296832149.591951 27 atime=1513200577.098733 30 ctime=1513200664.860806935 nordugrid-arc-5.4.2/include/arc/data-staging/DTRList.h0000644000175000002070000000006411523013225023425 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DTRList.h" nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/Processor.h0000644000000000000000000000012411523013225024052 xustar000000000000000027 mtime=1296832149.591951 27 atime=1513200577.097733 30 ctime=1513200664.867807021 nordugrid-arc-5.4.2/include/arc/data-staging/Processor.h0000644000175000002070000000006611523013225024121 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/Processor.h" nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/Generator.h0000644000000000000000000000012411523013225024021 xustar000000000000000027 mtime=1296832149.591951 27 atime=1513200577.098733 30 ctime=1513200664.865806996 nordugrid-arc-5.4.2/include/arc/data-staging/Generator.h0000644000175000002070000000006611523013225024070 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/Generator.h" nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/DataDeliveryComm.h0000644000000000000000000000012411523013225025264 xustar000000000000000027 mtime=1296832149.591951 27 atime=1513200577.098733 30 ctime=1513200664.864806984 nordugrid-arc-5.4.2/include/arc/data-staging/DataDeliveryComm.h0000644000175000002070000000007511523013225025333 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DataDeliveryComm.h" nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/DataDelivery.h0000644000000000000000000000012411523013225024450 xustar000000000000000027 mtime=1296832149.591951 27 atime=1513200577.097733 30 ctime=1513200664.863806972 nordugrid-arc-5.4.2/include/arc/data-staging/DataDelivery.h0000644000175000002070000000007111523013225024513 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DataDelivery.h" nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/TransferShares.h0000644000000000000000000000012411727764030025042 xustar000000000000000027 mtime=1331685400.508154 27 atime=1513200577.098733 30 ctime=1513200664.870807058 nordugrid-arc-5.4.2/include/arc/data-staging/TransferShares.h0000644000175000002070000000007311727764030025107 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/TransferShares.h" nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/DTRStatus.h0000644000000000000000000000012411523013225023730 xustar000000000000000027 mtime=1296832149.591951 27 atime=1513200577.096733 30 ctime=1513200664.861806948 nordugrid-arc-5.4.2/include/arc/data-staging/DTRStatus.h0000644000175000002070000000006611523013225023777 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DTRStatus.h" nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/DTR.h0000644000000000000000000000012411523013225022524 xustar000000000000000027 mtime=1296832149.591951 27 atime=1513200577.098733 30 ctime=1513200664.859806923 nordugrid-arc-5.4.2/include/arc/data-staging/DTR.h0000644000175000002070000000006011523013225022565 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DTR.h" nordugrid-arc-5.4.2/include/arc/data-staging/PaxHeaders.7502/Scheduler.h0000644000000000000000000000012411523013225024011 xustar000000000000000027 mtime=1296832149.591951 27 atime=1513200577.097733 30 ctime=1513200664.868807033 nordugrid-arc-5.4.2/include/arc/data-staging/Scheduler.h0000644000175000002070000000006611523013225024060 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/Scheduler.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/XMLNode.h0000644000000000000000000000012410672011347021005 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.054733 30 ctime=1513200664.802806226 nordugrid-arc-5.4.2/include/arc/XMLNode.h0000644000175000002070000000005710672011347021054 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/XMLNode.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/win32.h0000644000000000000000000000012411004026335020472 xustar000000000000000027 mtime=1209019613.327194 27 atime=1513200577.093733 30 ctime=1513200664.975808342 nordugrid-arc-5.4.2/include/arc/win32.h0000644000175000002070000000005511004026335020537 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/win32.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/MysqlWrapper.h0000644000000000000000000000012311204367504022205 xustar000000000000000027 mtime=1242689348.075536 27 atime=1513200577.096733 29 ctime=1513200664.78680603 nordugrid-arc-5.4.2/include/arc/MysqlWrapper.h0000644000175000002070000000006411204367504022253 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/MysqlWrapper.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/User.h0000644000000000000000000000012410720410562020451 xustar000000000000000027 mtime=1195512178.126771 27 atime=1513200577.098733 30 ctime=1513200664.796806153 nordugrid-arc-5.4.2/include/arc/User.h0000644000175000002070000000005410720410562020515 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/User.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/communication0000644000000000000000000000013213214316030022145 xustar000000000000000030 mtime=1513200664.806806275 30 atime=1513200668.716854096 30 ctime=1513200664.806806275 nordugrid-arc-5.4.2/include/arc/communication/0000755000175000002070000000000013214316030022270 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/communication/PaxHeaders.7502/ClientInterface.h0000644000000000000000000000012412042216423025436 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200577.056733 30 ctime=1513200664.803806238 nordugrid-arc-5.4.2/include/arc/communication/ClientInterface.h0000644000175000002070000000010112042216423025473 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/communication/ClientInterface.h" nordugrid-arc-5.4.2/include/arc/communication/PaxHeaders.7502/ClientSAML2SSO.h0000644000000000000000000000012312042216423024740 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200577.057733 29 ctime=1513200664.80480625 nordugrid-arc-5.4.2/include/arc/communication/ClientSAML2SSO.h0000644000175000002070000000010012042216423024775 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/communication/ClientSAML2SSO.h" nordugrid-arc-5.4.2/include/arc/communication/PaxHeaders.7502/ClientX509Delegation.h0000644000000000000000000000012412042216423026177 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200577.057733 30 ctime=1513200664.806806275 nordugrid-arc-5.4.2/include/arc/communication/ClientX509Delegation.h0000644000175000002070000000010612042216423026241 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/communication/ClientX509Delegation.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/credential0000644000000000000000000000013213214316030021412 xustar000000000000000030 mtime=1513200664.852806838 30 atime=1513200668.716854096 30 ctime=1513200664.852806838 nordugrid-arc-5.4.2/include/arc/credential/0000755000175000002070000000000013214316030021535 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/credential/PaxHeaders.7502/Proxycertinfo.h0000644000000000000000000000012411123663540024524 xustar000000000000000027 mtime=1229940576.652314 27 atime=1513200577.064733 30 ctime=1513200664.848806789 nordugrid-arc-5.4.2/include/arc/credential/Proxycertinfo.h0000644000175000002070000000007411123663540024572 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/Proxycertinfo.h" nordugrid-arc-5.4.2/include/arc/credential/PaxHeaders.7502/VOMSAttribute.h0000644000000000000000000000012411123663540024321 xustar000000000000000027 mtime=1229940576.652314 27 atime=1513200577.062733 30 ctime=1513200664.849806801 nordugrid-arc-5.4.2/include/arc/credential/VOMSAttribute.h0000644000175000002070000000007411123663540024367 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/VOMSAttribute.h" nordugrid-arc-5.4.2/include/arc/credential/PaxHeaders.7502/PasswordSource.h0000644000000000000000000000012412223210636024630 xustar000000000000000027 mtime=1380782494.260249 27 atime=1513200577.062733 30 ctime=1513200664.847806776 nordugrid-arc-5.4.2/include/arc/credential/PasswordSource.h0000644000175000002070000000007612223210636024700 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/PasswordSource.h" nordugrid-arc-5.4.2/include/arc/credential/PaxHeaders.7502/NSSUtil.h0000644000000000000000000000012411722652376023164 xustar000000000000000027 mtime=1330337022.696342 27 atime=1513200577.064733 30 ctime=1513200664.845806752 nordugrid-arc-5.4.2/include/arc/credential/NSSUtil.h0000644000175000002070000000006611722652376023233 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/NSSUtil.h" nordugrid-arc-5.4.2/include/arc/credential/PaxHeaders.7502/VOMSConfig.h0000644000000000000000000000012412372474265023576 xustar000000000000000027 mtime=1407875253.192215 27 atime=1513200577.062733 30 ctime=1513200664.851806825 nordugrid-arc-5.4.2/include/arc/credential/VOMSConfig.h0000644000175000002070000000007112372474265023641 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/VOMSConfig.h" nordugrid-arc-5.4.2/include/arc/credential/PaxHeaders.7502/Credential.h0000644000000000000000000000012311062200563023714 xustar000000000000000027 mtime=1221132659.527943 27 atime=1513200577.062733 29 ctime=1513200664.84480674 nordugrid-arc-5.4.2/include/arc/credential/Credential.h0000644000175000002070000000007211062200563023761 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/Credential.h" nordugrid-arc-5.4.2/include/arc/credential/PaxHeaders.7502/VOMSUtil.h0000644000000000000000000000012411123663540023273 xustar000000000000000027 mtime=1229940576.652314 27 atime=1513200577.062733 30 ctime=1513200664.852806838 nordugrid-arc-5.4.2/include/arc/credential/VOMSUtil.h0000644000175000002070000000006711123663540023343 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/VOMSUtil.h" nordugrid-arc-5.4.2/include/arc/credential/PaxHeaders.7502/CertUtil.h0000644000000000000000000000012411123663540023404 xustar000000000000000027 mtime=1229940576.652314 27 atime=1513200577.062733 30 ctime=1513200664.842806715 nordugrid-arc-5.4.2/include/arc/credential/CertUtil.h0000644000175000002070000000006711123663540023454 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/CertUtil.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/ws-security0000644000000000000000000000013213214316030021576 xustar000000000000000030 mtime=1513200664.978808378 30 atime=1513200668.716854096 30 ctime=1513200664.978808378 nordugrid-arc-5.4.2/include/arc/ws-security/0000755000175000002070000000000013214316030021721 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/ws-security/PaxHeaders.7502/SAMLToken.h0000644000000000000000000000012411205022460023561 xustar000000000000000027 mtime=1242834224.755861 27 atime=1513200577.056733 30 ctime=1513200664.977808366 nordugrid-arc-5.4.2/include/arc/ws-security/SAMLToken.h0000644000175000002070000000007111205022460023624 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/ws-security/SAMLToken.h" nordugrid-arc-5.4.2/include/arc/ws-security/PaxHeaders.7502/X509Token.h0000644000000000000000000000012411035661757023515 xustar000000000000000027 mtime=1215783919.762564 27 atime=1513200577.056733 30 ctime=1513200664.978808378 nordugrid-arc-5.4.2/include/arc/ws-security/X509Token.h0000644000175000002070000000007111035661757023560 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/ws-security/X509Token.h" nordugrid-arc-5.4.2/include/arc/ws-security/PaxHeaders.7502/UsernameToken.h0000644000000000000000000000012410730334625024617 xustar000000000000000027 mtime=1197586837.613988 27 atime=1513200577.056733 30 ctime=1513200664.978808378 nordugrid-arc-5.4.2/include/arc/ws-security/UsernameToken.h0000644000175000002070000000007510730334625024666 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/ws-security/UsernameToken.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/ArcConfigFile.h0000644000000000000000000000012312771224360022174 xustar000000000000000026 mtime=1474636016.20072 27 atime=1513200577.074733 30 ctime=1513200664.761805725 nordugrid-arc-5.4.2/include/arc/ArcConfigFile.h0000644000175000002070000000006512771224360022243 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcConfigFile.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/xmlsec0000644000000000000000000000013213214316030020573 xustar000000000000000030 mtime=1513200664.985808464 30 atime=1513200668.716854096 30 ctime=1513200664.985808464 nordugrid-arc-5.4.2/include/arc/xmlsec/0000755000175000002070000000000013214316030020716 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/xmlsec/PaxHeaders.7502/XmlSecUtils.h0000644000000000000000000000012411100075075023241 xustar000000000000000027 mtime=1224768061.139627 27 atime=1513200577.096733 30 ctime=1513200664.984808452 nordugrid-arc-5.4.2/include/arc/xmlsec/XmlSecUtils.h0000644000175000002070000000006611100075075023310 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/xmlsec/XmlSecUtils.h" nordugrid-arc-5.4.2/include/arc/xmlsec/PaxHeaders.7502/XMLSecNode.h0000644000000000000000000000012311100075075022725 xustar000000000000000027 mtime=1224768061.139627 27 atime=1513200577.095733 29 ctime=1513200664.98380844 nordugrid-arc-5.4.2/include/arc/xmlsec/XMLSecNode.h0000644000175000002070000000006511100075075022774 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/xmlsec/XMLSecNode.h" nordugrid-arc-5.4.2/include/arc/xmlsec/PaxHeaders.7502/saml_util.h0000644000000000000000000000012411100075075023016 xustar000000000000000027 mtime=1224768061.139627 27 atime=1513200577.096733 30 ctime=1513200664.985808464 nordugrid-arc-5.4.2/include/arc/xmlsec/saml_util.h0000644000175000002070000000006411100075075023063 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/xmlsec/saml_util.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/ArcConfig.h0000644000000000000000000000012410672011347021372 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.074733 30 ctime=1513200664.760805712 nordugrid-arc-5.4.2/include/arc/ArcConfig.h0000644000175000002070000000006110672011347021434 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcConfig.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/Profile.h0000644000000000000000000000012411246646345021151 xustar000000000000000027 mtime=1251691749.533918 27 atime=1513200577.051732 30 ctime=1513200664.789806067 nordugrid-arc-5.4.2/include/arc/Profile.h0000644000175000002070000000005711246646345021220 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Profile.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/ArcVersion.h0000644000000000000000000000012311605316104021605 xustar000000000000000026 mtime=1310039108.63878 27 atime=1513200577.096733 30 ctime=1513200664.766805786 nordugrid-arc-5.4.2/include/arc/ArcVersion.h0000644000175000002070000000006211605316104021651 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcVersion.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/IniConfig.h0000644000000000000000000000012411244712733021407 xustar000000000000000027 mtime=1251186139.206413 27 atime=1513200577.096733 30 ctime=1513200664.781805969 nordugrid-arc-5.4.2/include/arc/IniConfig.h0000644000175000002070000000006111244712733021451 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/IniConfig.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/external0000644000000000000000000000013213214316030021122 xustar000000000000000030 mtime=1513200664.753805627 30 atime=1513200668.716854096 30 ctime=1513200664.753805627 nordugrid-arc-5.4.2/include/arc/external/0000755000175000002070000000000013214316030021245 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/external/PaxHeaders.7502/libs30000644000000000000000000000013213214316030022136 xustar000000000000000030 mtime=1513200664.901807437 30 atime=1513200668.716854096 30 ctime=1513200664.901807437 nordugrid-arc-5.4.2/include/arc/external/libs3/0000755000175000002070000000000013214316030022261 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/external/libs3/PaxHeaders.7502/libs3.h0000644000000000000000000000012312574514214023414 xustar000000000000000026 mtime=1441962124.10844 27 atime=1513200577.075733 30 ctime=1513200664.894807351 nordugrid-arc-5.4.2/include/arc/external/libs3/libs3.h0000644000175000002070000000006612574514214023464 0ustar00mockbuildmock00000000000000#include "../../../../src/external/libs3/inc/libs3.h" nordugrid-arc-5.4.2/include/arc/external/libs3/PaxHeaders.7502/response_headers_handler.h0000644000000000000000000000012112574514214027424 xustar000000000000000026 mtime=1441962124.10844 27 atime=1513200577.088733 28 ctime=1513200664.8988074 nordugrid-arc-5.4.2/include/arc/external/libs3/response_headers_handler.h0000644000175000002070000000011112574514214027465 0ustar00mockbuildmock00000000000000#include "../../../../src/external/libs3/inc/response_headers_handler.h" nordugrid-arc-5.4.2/include/arc/external/libs3/PaxHeaders.7502/error_parser.h0000644000000000000000000000012312574514214025105 xustar000000000000000026 mtime=1441962124.10844 27 atime=1513200577.087733 30 ctime=1513200664.893807339 nordugrid-arc-5.4.2/include/arc/external/libs3/error_parser.h0000644000175000002070000000007512574514214025155 0ustar00mockbuildmock00000000000000#include "../../../../src/external/libs3/inc/error_parser.h" nordugrid-arc-5.4.2/include/arc/external/libs3/PaxHeaders.7502/simplexml.h0000644000000000000000000000012312574514214024412 xustar000000000000000026 mtime=1441962124.10844 27 atime=1513200577.075733 30 ctime=1513200664.900807424 nordugrid-arc-5.4.2/include/arc/external/libs3/simplexml.h0000644000175000002070000000007212574514214024457 0ustar00mockbuildmock00000000000000#include "../../../../src/external/libs3/inc/simplexml.h" nordugrid-arc-5.4.2/include/arc/external/libs3/PaxHeaders.7502/request_context.h0000644000000000000000000000012312574514214025634 xustar000000000000000026 mtime=1441962124.10844 27 atime=1513200577.087733 30 ctime=1513200664.897807388 nordugrid-arc-5.4.2/include/arc/external/libs3/request_context.h0000644000175000002070000000010012574514214025671 0ustar00mockbuildmock00000000000000#include "../../../../src/external/libs3/inc/request_context.h" nordugrid-arc-5.4.2/include/arc/external/libs3/PaxHeaders.7502/util.h0000644000000000000000000000012312574514214023355 xustar000000000000000026 mtime=1441962124.10844 27 atime=1513200577.087733 30 ctime=1513200664.901807437 nordugrid-arc-5.4.2/include/arc/external/libs3/util.h0000644000175000002070000000006512574514214023424 0ustar00mockbuildmock00000000000000#include "../../../../src/external/libs3/inc/util.h" nordugrid-arc-5.4.2/include/arc/external/libs3/PaxHeaders.7502/request.h0000644000000000000000000000012312574514214024070 xustar000000000000000026 mtime=1441962124.10844 27 atime=1513200577.088733 30 ctime=1513200664.895807363 nordugrid-arc-5.4.2/include/arc/external/libs3/request.h0000644000175000002070000000007012574514214024133 0ustar00mockbuildmock00000000000000#include "../../../../src/external/libs3/inc/request.h" nordugrid-arc-5.4.2/include/arc/external/PaxHeaders.7502/cJSON0000644000000000000000000000013213214316030022036 xustar000000000000000030 mtime=1513200664.891807314 30 atime=1513200668.716854096 30 ctime=1513200664.891807314 nordugrid-arc-5.4.2/include/arc/external/cJSON/0000755000175000002070000000000013214316030022161 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/external/cJSON/PaxHeaders.7502/cJSON.h0000644000000000000000000000012212267746213023220 xustar000000000000000025 mtime=1390398603.1003 27 atime=1513200577.075733 30 ctime=1513200664.891807314 nordugrid-arc-5.4.2/include/arc/external/cJSON/cJSON.h0000644000175000002070000000006212267746213023265 0ustar00mockbuildmock00000000000000#include "../../../../src/external/cJSON/cJSON.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/UserConfig.h0000644000000000000000000000012411252171755021610 xustar000000000000000027 mtime=1252586477.197519 27 atime=1513200577.053733 30 ctime=1513200664.797806165 nordugrid-arc-5.4.2/include/arc/UserConfig.h0000644000175000002070000000006211252171755021653 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/UserConfig.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/IString.h0000644000000000000000000000012310767565236021135 xustar000000000000000026 mtime=1205791390.11605 27 atime=1513200577.056733 30 ctime=1513200664.780805957 nordugrid-arc-5.4.2/include/arc/IString.h0000644000175000002070000000005710767565236021205 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/IString.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/Watchdog.h0000644000000000000000000000012311757661340021307 xustar000000000000000026 mtime=1337942752.88116 27 atime=1513200577.053733 30 ctime=1513200664.800806201 nordugrid-arc-5.4.2/include/arc/Watchdog.h0000644000175000002070000000006011757661340021351 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Watchdog.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/wsrf0000644000000000000000000000013213214316030020261 xustar000000000000000030 mtime=1513200664.982808427 30 atime=1513200668.716854096 30 ctime=1513200664.982808427 nordugrid-arc-5.4.2/include/arc/wsrf/0000755000175000002070000000000013214316030020404 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/wsrf/PaxHeaders.7502/WSResourceProperties.h0000644000000000000000000000012410667345044024646 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.074733 30 ctime=1513200664.982808427 nordugrid-arc-5.4.2/include/arc/wsrf/WSResourceProperties.h0000644000175000002070000000007510667345044024715 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/wsrf/WSResourceProperties.h" nordugrid-arc-5.4.2/include/arc/wsrf/PaxHeaders.7502/WSRFBaseFault.h0000644000000000000000000000012410667345044023100 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.065733 30 ctime=1513200664.980808403 nordugrid-arc-5.4.2/include/arc/wsrf/WSRFBaseFault.h0000644000175000002070000000006610667345044023147 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/wsrf/WSRFBaseFault.h" nordugrid-arc-5.4.2/include/arc/wsrf/PaxHeaders.7502/WSRF.h0000644000000000000000000000012410667345044021311 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.074733 30 ctime=1513200664.979808391 nordugrid-arc-5.4.2/include/arc/wsrf/WSRF.h0000644000175000002070000000005510667345044021356 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/wsrf/WSRF.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/infosys0000644000000000000000000000013213214316030020772 xustar000000000000000030 mtime=1513200664.910807547 30 atime=1513200668.716854096 30 ctime=1513200664.910807547 nordugrid-arc-5.4.2/include/arc/infosys/0000755000175000002070000000000013214316030021115 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/infosys/PaxHeaders.7502/InformationInterface.h0000644000000000000000000000012410667345044025347 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.094733 30 ctime=1513200664.909807535 nordugrid-arc-5.4.2/include/arc/infosys/InformationInterface.h0000644000175000002070000000010010667345044025403 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/infosys/InformationInterface.h" nordugrid-arc-5.4.2/include/arc/infosys/PaxHeaders.7502/InfoCache.h0000644000000000000000000000012410704723650023053 xustar000000000000000027 mtime=1192470440.711563 27 atime=1513200577.093733 30 ctime=1513200664.905807486 nordugrid-arc-5.4.2/include/arc/infosys/InfoCache.h0000644000175000002070000000006510704723650023121 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/infosys/InfoCache.h" nordugrid-arc-5.4.2/include/arc/infosys/PaxHeaders.7502/InfoRegister.h0000644000000000000000000000012410776613475023650 xustar000000000000000027 mtime=1207637821.872846 27 atime=1513200577.094733 30 ctime=1513200664.908807522 nordugrid-arc-5.4.2/include/arc/infosys/InfoRegister.h0000644000175000002070000000007010776613475023712 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/infosys/InfoRegister.h" nordugrid-arc-5.4.2/include/arc/infosys/PaxHeaders.7502/InfoFilter.h0000644000000000000000000000012311135335640023271 xustar000000000000000027 mtime=1232452512.714545 27 atime=1513200577.094733 29 ctime=1513200664.90780751 nordugrid-arc-5.4.2/include/arc/infosys/InfoFilter.h0000644000175000002070000000006611135335640023341 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/infosys/InfoFilter.h" nordugrid-arc-5.4.2/include/arc/infosys/PaxHeaders.7502/RegisteredService.h0000644000000000000000000000012411155204636024651 xustar000000000000000027 mtime=1236601246.787844 27 atime=1513200577.093733 30 ctime=1513200664.910807547 nordugrid-arc-5.4.2/include/arc/infosys/RegisteredService.h0000644000175000002070000000007511155204636024720 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/infosys/RegisteredService.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/Thread.h0000644000000000000000000000012410672011347020746 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.054733 30 ctime=1513200664.793806116 nordugrid-arc-5.4.2/include/arc/Thread.h0000644000175000002070000000005610672011347021014 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Thread.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/Logger.h0000644000000000000000000000012410672011347020756 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.098733 30 ctime=1513200664.785806018 nordugrid-arc-5.4.2/include/arc/Logger.h0000644000175000002070000000005610672011347021024 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Logger.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/credentialstore0000644000000000000000000000013213214316030022467 xustar000000000000000030 mtime=1513200664.856806886 30 atime=1513200668.716854096 30 ctime=1513200664.856806886 nordugrid-arc-5.4.2/include/arc/credentialstore/0000755000175000002070000000000013214316030022612 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/credentialstore/PaxHeaders.7502/ClientVOMS.h0000644000000000000000000000012412343353401024646 xustar000000000000000027 mtime=1401804545.672799 27 atime=1513200577.053733 30 ctime=1513200664.854806862 nordugrid-arc-5.4.2/include/arc/credentialstore/ClientVOMS.h0000644000175000002070000000007612343353401024716 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credentialstore/ClientVOMS.h" nordugrid-arc-5.4.2/include/arc/credentialstore/PaxHeaders.7502/CredentialStore.h0000644000000000000000000000012411346725713026025 xustar000000000000000027 mtime=1268493259.448878 27 atime=1513200577.053733 30 ctime=1513200664.856806886 nordugrid-arc-5.4.2/include/arc/credentialstore/CredentialStore.h0000644000175000002070000000010411346725713026065 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credentialstore/CredentialStore.h" nordugrid-arc-5.4.2/include/arc/credentialstore/PaxHeaders.7502/ClientVOMSRESTful.h0000644000000000000000000000012412343353401026053 xustar000000000000000027 mtime=1401804545.672799 27 atime=1513200577.053733 30 ctime=1513200664.855806874 nordugrid-arc-5.4.2/include/arc/credentialstore/ClientVOMSRESTful.h0000644000175000002070000000010512343353401026114 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credentialstore/ClientVOMSRESTful.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/security0000644000000000000000000000013013214316030021145 xustar000000000000000029 mtime=1513200664.97480833 30 atime=1513200668.716854096 29 ctime=1513200664.97480833 nordugrid-arc-5.4.2/include/arc/security/0000755000175000002070000000000013214316030021272 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/security/PaxHeaders.7502/PDP.h0000644000000000000000000000012410705667474022050 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.110733 30 ctime=1513200664.973808317 nordugrid-arc-5.4.2/include/arc/security/PDP.h0000644000175000002070000000006010705667474022111 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/security/PDP.h" nordugrid-arc-5.4.2/include/arc/security/PaxHeaders.7502/ArcPDP0000644000000000000000000000013213214316030022220 xustar000000000000000030 mtime=1513200664.946807987 30 atime=1513200668.716854096 30 ctime=1513200664.946807987 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/0000755000175000002070000000000013214316030022343 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/alg0000644000000000000000000000013013214316030022761 xustar000000000000000029 mtime=1513200664.95280806 30 atime=1513200668.716854096 29 ctime=1513200664.95280806 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/0000755000175000002070000000000013214316030023106 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/PaxHeaders.7502/AlgFactory.h0000644000000000000000000000012410705667474025274 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.105733 30 ctime=1513200664.947807999 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/AlgFactory.h0000644000175000002070000000011010705667474025331 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/AlgFactory.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/PaxHeaders.7502/PermitOverridesAlg.h0000644000000000000000000000012310705667474027007 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.104733 29 ctime=1513200664.95280806 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/PermitOverridesAlg.h0000644000175000002070000000012010705667474027046 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/PermitOverridesAlg.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/PaxHeaders.7502/DenyOverridesAlg.h0000644000000000000000000000012410705667474026447 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.105733 30 ctime=1513200664.949808024 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/DenyOverridesAlg.h0000644000175000002070000000011610705667474026512 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/DenyOverridesAlg.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/PaxHeaders.7502/OrderedAlg.h0000644000000000000000000000012411073223740025230 xustar000000000000000027 mtime=1223501792.844198 27 atime=1513200577.105733 30 ctime=1513200664.951808048 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/OrderedAlg.h0000644000175000002070000000011011073223740025265 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/OrderedAlg.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/PaxHeaders.7502/CombiningAlg.h0000644000000000000000000000012410705667474025572 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.105733 30 ctime=1513200664.949808024 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/alg/CombiningAlg.h0000644000175000002070000000011210705667474025631 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/CombiningAlg.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/EvaluationCtx.h0000644000000000000000000000012410705667474025264 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.101733 30 ctime=1513200664.935807853 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/EvaluationCtx.h0000644000175000002070000000010410705667474025324 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/EvaluationCtx.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/RequestItem.h0000644000000000000000000000012310705667474024744 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.102733 29 ctime=1513200664.94380795 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/RequestItem.h0000644000175000002070000000010210705667474025003 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/RequestItem.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/EvaluatorLoader.h0000644000000000000000000000012411016522403025541 xustar000000000000000027 mtime=1211802883.941569 27 atime=1513200577.102733 30 ctime=1513200664.937807877 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/EvaluatorLoader.h0000644000175000002070000000010611016522403025603 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/EvaluatorLoader.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/Evaluator.h0000644000000000000000000000012410705667474024440 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.108733 30 ctime=1513200664.936807865 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/Evaluator.h0000644000175000002070000000010010705667474024474 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Evaluator.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/Source.h0000644000000000000000000000012411017526251023716 xustar000000000000000027 mtime=1212066985.979702 27 atime=1513200577.108733 30 ctime=1513200664.946807987 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/Source.h0000644000175000002070000000007511017526251023765 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Source.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/fn0000644000000000000000000000013213214316030022623 xustar000000000000000030 mtime=1513200664.970808281 30 atime=1513200668.716854096 30 ctime=1513200664.970808281 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/0000755000175000002070000000000013214316030022746 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/PaxHeaders.7502/FnFactory.h0000644000000000000000000000012410705667474024774 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.107733 30 ctime=1513200664.967808244 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/FnFactory.h0000644000175000002070000000010610705667474025036 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/FnFactory.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/PaxHeaders.7502/EqualFunction.h0000644000000000000000000000012310705667474025655 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.107733 29 ctime=1513200664.96580822 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/EqualFunction.h0000644000175000002070000000011210705667474025715 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/EqualFunction.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/PaxHeaders.7502/MatchFunction.h0000644000000000000000000000012410705667474025643 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.105733 30 ctime=1513200664.970808281 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/MatchFunction.h0000644000175000002070000000011210705667474025702 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/MatchFunction.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/PaxHeaders.7502/Function.h0000644000000000000000000000012410705667474024666 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.105733 30 ctime=1513200664.968808256 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/Function.h0000644000175000002070000000010510705667474024727 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/Function.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/PaxHeaders.7502/InRangeFunction.h0000644000000000000000000000012410705667474026132 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.105733 30 ctime=1513200664.969808268 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/fn/InRangeFunction.h0000644000175000002070000000011410705667474026173 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/InRangeFunction.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/Request.h0000644000000000000000000000012410705667474024126 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.102733 30 ctime=1513200664.941807926 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/Request.h0000644000175000002070000000007610705667474024176 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Request.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/Result.h0000644000000000000000000000012410705667474023754 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.107733 30 ctime=1513200664.945807975 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/Result.h0000644000175000002070000000007510705667474024023 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Result.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/Response.h0000644000000000000000000000012410705667474024274 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.107733 30 ctime=1513200664.944807963 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/Response.h0000644000175000002070000000007710705667474024345 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Response.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/policy0000644000000000000000000000013213214316030023517 xustar000000000000000030 mtime=1513200664.971808293 30 atime=1513200668.716854096 30 ctime=1513200664.971808293 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/policy/0000755000175000002070000000000013214316030023642 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/security/ArcPDP/policy/PaxHeaders.7502/Policy.h0000644000000000000000000000012410705667474025234 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.102733 30 ctime=1513200664.971808293 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/policy/Policy.h0000644000175000002070000000010710705667474025277 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/policy/Policy.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/PolicyParser.h0000644000000000000000000000012411016777252025102 xustar000000000000000027 mtime=1211891370.520373 27 atime=1513200577.102733 30 ctime=1513200664.939807902 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PolicyParser.h0000644000175000002070000000010311016777252025141 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/PolicyParser.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/PolicyStore.h0000644000000000000000000000012411016777252024742 xustar000000000000000027 mtime=1211891370.520373 27 atime=1513200577.104733 30 ctime=1513200664.940807914 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PolicyStore.h0000644000175000002070000000010211016777252025000 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/PolicyStore.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/PaxHeaders.7502/attr0000644000000000000000000000013213214316030023172 xustar000000000000000030 mtime=1513200664.964808207 30 atime=1513200668.716854096 30 ctime=1513200664.964808207 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/0000755000175000002070000000000013214316030023315 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/AttributeFactory.h0000644000000000000000000000012410705667474026743 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.110733 30 ctime=1513200664.955808097 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/AttributeFactory.h0000644000175000002070000000011710705667474027007 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/AttributeFactory.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/AnyURIAttribute.h0000644000000000000000000000012410705667474026443 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.110733 30 ctime=1513200664.953808073 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/AnyURIAttribute.h0000644000175000002070000000011610705667474026506 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/AnyURIAttribute.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/RequestAttribute.h0000644000000000000000000000012410705667474026764 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.110733 30 ctime=1513200664.962808183 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/RequestAttribute.h0000644000175000002070000000011710705667474027030 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/RequestAttribute.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/DateTimeAttribute.h0000644000000000000000000000012410705667474027030 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.108733 30 ctime=1513200664.960808158 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/DateTimeAttribute.h0000644000175000002070000000012010705667474027066 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/DateTimeAttribute.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/StringAttribute.h0000644000000000000000000000012410705667474026602 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.110733 30 ctime=1513200664.963808195 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/StringAttribute.h0000644000175000002070000000011610705667474026645 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/StringAttribute.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/GenericAttribute.h0000644000000000000000000000012311007457461026674 xustar000000000000000027 mtime=1209950001.793854 27 atime=1513200577.108733 29 ctime=1513200664.96180817 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/GenericAttribute.h0000644000175000002070000000011711007457461026741 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/GenericAttribute.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/AttributeProxy.h0000644000000000000000000000012410705667474026455 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.110733 30 ctime=1513200664.956808109 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/AttributeProxy.h0000644000175000002070000000011510705667474026517 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/AttributeProxy.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/X500NameAttribute.h0000644000000000000000000000012410705667474026571 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.108733 30 ctime=1513200664.964808207 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/X500NameAttribute.h0000644000175000002070000000012010705667474026627 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/X500NameAttribute.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/AttributeValue.h0000644000000000000000000000012410705667474026410 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.108733 30 ctime=1513200664.957808122 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/AttributeValue.h0000644000175000002070000000011510705667474026452 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/AttributeValue.h" nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/PaxHeaders.7502/BooleanAttribute.h0000644000000000000000000000012411231633622026672 xustar000000000000000027 mtime=1248278418.671274 27 atime=1513200577.110733 30 ctime=1513200664.958808134 nordugrid-arc-5.4.2/include/arc/security/ArcPDP/attr/BooleanAttribute.h0000644000175000002070000000011711231633622026736 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/BooleanAttribute.h" nordugrid-arc-5.4.2/include/arc/security/PaxHeaders.7502/ClassLoader.h0000644000000000000000000000012411114777117023610 xustar000000000000000027 mtime=1228144207.795496 27 atime=1513200577.101733 30 ctime=1513200664.972808305 nordugrid-arc-5.4.2/include/arc/security/ClassLoader.h0000644000175000002070000000007011114777117023652 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/security/ClassLoader.h" nordugrid-arc-5.4.2/include/arc/security/PaxHeaders.7502/Security.h0000644000000000000000000000012310705667474023233 xustar000000000000000027 mtime=1192718140.702472 27 atime=1513200577.101733 29 ctime=1513200664.97480833 nordugrid-arc-5.4.2/include/arc/security/Security.h0000644000175000002070000000006510705667474023302 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/security/Security.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/GUID.h0000644000000000000000000000012210672305761020273 xustar000000000000000026 mtime=1189710833.73153 27 atime=1513200577.050732 29 ctime=1513200664.77780592 nordugrid-arc-5.4.2/include/arc/GUID.h0000644000175000002070000000005410672305761020341 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/GUID.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/FileLock.h0000644000000000000000000000012411033351631021222 xustar000000000000000027 mtime=1215157145.886383 27 atime=1513200577.050732 30 ctime=1513200664.775805896 nordugrid-arc-5.4.2/include/arc/FileLock.h0000644000175000002070000000006011033351631021263 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/FileLock.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/DBInterface.h0000644000000000000000000000012411406056077021652 xustar000000000000000027 mtime=1276664895.271889 27 atime=1513200577.053733 30 ctime=1513200664.771805847 nordugrid-arc-5.4.2/include/arc/DBInterface.h0000644000175000002070000000006311406056077021716 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/DBInterface.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/Run.h0000644000000000000000000000012410702711416020301 xustar000000000000000027 mtime=1191940878.967019 27 atime=1513200577.053733 30 ctime=1513200664.791806091 nordugrid-arc-5.4.2/include/arc/Run.h0000644000175000002070000000005310702711416020344 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Run.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/Base64.h0000644000000000000000000000012411003356667020571 xustar000000000000000027 mtime=1208868279.320619 27 atime=1513200577.062733 30 ctime=1513200664.767805798 nordugrid-arc-5.4.2/include/arc/Base64.h0000644000175000002070000000005611003356667020637 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Base64.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/OptionParser.h0000644000000000000000000000012411017471574022172 xustar000000000000000027 mtime=1212052348.966175 27 atime=1513200577.064733 30 ctime=1513200664.788806055 nordugrid-arc-5.4.2/include/arc/OptionParser.h0000644000175000002070000000006411017471574022237 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/OptionParser.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/ArcRegex.h0000644000000000000000000000012410672011347021237 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.093733 30 ctime=1513200664.765805773 nordugrid-arc-5.4.2/include/arc/ArcRegex.h0000644000175000002070000000006010672011347021300 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcRegex.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/FileUtils.h0000644000000000000000000000012411173340741021437 xustar000000000000000027 mtime=1240318433.062595 27 atime=1513200577.051732 30 ctime=1513200664.776805908 nordugrid-arc-5.4.2/include/arc/FileUtils.h0000644000175000002070000000006111173340741021501 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/FileUtils.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/crypto0000644000000000000000000000013213214316030020620 xustar000000000000000030 mtime=1513200664.858806911 30 atime=1513200668.716854096 30 ctime=1513200664.858806911 nordugrid-arc-5.4.2/include/arc/crypto/0000755000175000002070000000000013214316030020743 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/crypto/PaxHeaders.7502/OpenSSL.h0000644000000000000000000000012411223511177022341 xustar000000000000000027 mtime=1246663295.075395 27 atime=1513200577.065733 30 ctime=1513200664.858806911 nordugrid-arc-5.4.2/include/arc/crypto/OpenSSL.h0000644000175000002070000000006211223511177022404 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/crypto/OpenSSL.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/HostnameResolver.h0000644000000000000000000000012413124220252023027 xustar000000000000000027 mtime=1498489002.318043 27 atime=1513200577.092733 30 ctime=1513200664.778805932 nordugrid-arc-5.4.2/include/arc/HostnameResolver.h0000644000175000002070000000007013124220252023071 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/HostnameResolver.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/IntraProcessCounter.h0000644000000000000000000000012411060677516023523 xustar000000000000000027 mtime=1220771662.084916 27 atime=1513200577.056733 30 ctime=1513200664.782805981 nordugrid-arc-5.4.2/include/arc/IntraProcessCounter.h0000644000175000002070000000007311060677516023570 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/IntraProcessCounter.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/FileAccess.h0000644000000000000000000000012411543103767021546 xustar000000000000000027 mtime=1301055479.912292 27 atime=1513200577.053733 30 ctime=1513200664.774805883 nordugrid-arc-5.4.2/include/arc/FileAccess.h0000644000175000002070000000006211543103767021611 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/FileAccess.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/CheckSum.h0000644000000000000000000000012211616222200021226 xustar000000000000000026 mtime=1312367744.82236 27 atime=1513200577.093733 29 ctime=1513200664.76880581 nordugrid-arc-5.4.2/include/arc/CheckSum.h0000644000175000002070000000006011616222200021271 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/CheckSum.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/Utils.h0000644000000000000000000000012411033351631020632 xustar000000000000000027 mtime=1215157145.886383 27 atime=1513200577.051732 30 ctime=1513200664.799806189 nordugrid-arc-5.4.2/include/arc/Utils.h0000644000175000002070000000005511033351631020677 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Utils.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/globusutils0000644000000000000000000000013213214316030021654 xustar000000000000000030 mtime=1513200664.904807474 30 atime=1513200668.716854096 30 ctime=1513200664.904807474 nordugrid-arc-5.4.2/include/arc/globusutils/0000755000175000002070000000000013214316030021777 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/globusutils/PaxHeaders.7502/GSSCredential.h0000644000000000000000000000012411110031017024522 xustar000000000000000027 mtime=1226846735.746351 27 atime=1513200577.050732 30 ctime=1513200664.902807449 nordugrid-arc-5.4.2/include/arc/globusutils/GSSCredential.h0000644000175000002070000000007511110031017024571 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/globusutils/GSSCredential.h" nordugrid-arc-5.4.2/include/arc/globusutils/PaxHeaders.7502/GlobusErrorUtils.h0000644000000000000000000000012411110031017025361 xustar000000000000000027 mtime=1226846735.746351 27 atime=1513200577.050732 30 ctime=1513200664.903807461 nordugrid-arc-5.4.2/include/arc/globusutils/GlobusErrorUtils.h0000644000175000002070000000010011110031017025415 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/globusutils/GlobusErrorUtils.h" nordugrid-arc-5.4.2/include/arc/globusutils/PaxHeaders.7502/GlobusWorkarounds.h0000644000000000000000000000012411145123512025577 xustar000000000000000027 mtime=1234478922.188083 27 atime=1513200577.050732 30 ctime=1513200664.904807474 nordugrid-arc-5.4.2/include/arc/globusutils/GlobusWorkarounds.h0000644000175000002070000000010111145123512025634 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/globusutils/GlobusWorkarounds.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/message0000644000000000000000000000013013214316030020722 xustar000000000000000029 mtime=1513200664.93480784 30 atime=1513200668.716854096 29 ctime=1513200664.93480784 nordugrid-arc-5.4.2/include/arc/message/0000755000175000002070000000000013214316030021047 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/MCC_Status.h0000644000000000000000000000012410671562505023136 xustar000000000000000027 mtime=1189537093.804304 27 atime=1513200577.090733 30 ctime=1513200664.920807669 nordugrid-arc-5.4.2/include/arc/message/MCC_Status.h0000644000175000002070000000006610671562505023205 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MCC_Status.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/PayloadStream.h0000644000000000000000000000012410667345044023740 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.090733 30 ctime=1513200664.927807755 nordugrid-arc-5.4.2/include/arc/message/PayloadStream.h0000644000175000002070000000007110667345044024003 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/PayloadStream.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/PayloadSOAP.h0000644000000000000000000000012410667345044023247 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.088733 30 ctime=1513200664.926807742 nordugrid-arc-5.4.2/include/arc/message/PayloadSOAP.h0000644000175000002070000000006710667345044023317 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/PayloadSOAP.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/SecHandler.h0000644000000000000000000000012411544363267023205 xustar000000000000000027 mtime=1301407415.744771 27 atime=1513200577.091733 30 ctime=1513200664.933807828 nordugrid-arc-5.4.2/include/arc/message/SecHandler.h0000644000175000002070000000006611544363267023254 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/SecHandler.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/Plexer.h0000644000000000000000000000012411114777117022430 xustar000000000000000027 mtime=1228144207.795496 27 atime=1513200577.088733 30 ctime=1513200664.928807767 nordugrid-arc-5.4.2/include/arc/message/Plexer.h0000644000175000002070000000006211114777117022473 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/Plexer.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/MessageAuth.h0000644000000000000000000000012410667345044023401 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.090733 30 ctime=1513200664.924807718 nordugrid-arc-5.4.2/include/arc/message/MessageAuth.h0000644000175000002070000000006710667345044023451 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MessageAuth.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/SecAttr.h0000644000000000000000000000012411003311132022510 xustar000000000000000027 mtime=1208848986.745027 27 atime=1513200577.090733 30 ctime=1513200664.932807816 nordugrid-arc-5.4.2/include/arc/message/SecAttr.h0000644000175000002070000000006311003311132022554 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/SecAttr.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/Message.h0000644000000000000000000000012410667345044022557 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.090733 30 ctime=1513200664.921807681 nordugrid-arc-5.4.2/include/arc/message/Message.h0000644000175000002070000000006310667345044022623 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/Message.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/MCCLoader.h0000644000000000000000000000012411114777117022722 xustar000000000000000027 mtime=1228144207.795496 27 atime=1513200577.091733 30 ctime=1513200664.919807657 nordugrid-arc-5.4.2/include/arc/message/MCCLoader.h0000644000175000002070000000006511114777117022770 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MCCLoader.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/SOAPMessage.h0000644000000000000000000000012410667345044023242 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.091733 30 ctime=1513200664.931807804 nordugrid-arc-5.4.2/include/arc/message/SOAPMessage.h0000644000175000002070000000006710667345044023312 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/SOAPMessage.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/MessageAttributes.h0000644000000000000000000000012410667345044024626 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.091733 30 ctime=1513200664.922807694 nordugrid-arc-5.4.2/include/arc/message/MessageAttributes.h0000644000175000002070000000007510667345044024675 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MessageAttributes.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/MCC.h0000644000000000000000000000012410671562505021573 xustar000000000000000027 mtime=1189537093.804304 27 atime=1513200577.090733 30 ctime=1513200664.918807645 nordugrid-arc-5.4.2/include/arc/message/MCC.h0000644000175000002070000000005710671562505021642 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MCC.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/PayloadRaw.h0000644000000000000000000000012310667345044023235 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.090733 29 ctime=1513200664.92580773 nordugrid-arc-5.4.2/include/arc/message/PayloadRaw.h0000644000175000002070000000006610667345044023305 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/PayloadRaw.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/SOAPEnvelope.h0000644000000000000000000000012410667345044023433 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.088733 30 ctime=1513200664.929807779 nordugrid-arc-5.4.2/include/arc/message/SOAPEnvelope.h0000644000175000002070000000007010667345044023475 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/SOAPEnvelope.h" nordugrid-arc-5.4.2/include/arc/message/PaxHeaders.7502/Service.h0000644000000000000000000000012310671562505022570 xustar000000000000000027 mtime=1189537093.804304 27 atime=1513200577.088733 29 ctime=1513200664.93480784 nordugrid-arc-5.4.2/include/arc/message/Service.h0000644000175000002070000000006310671562505022635 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/Service.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/delegation0000644000000000000000000000013213214316030021413 xustar000000000000000030 mtime=1513200664.890807302 30 atime=1513200668.716854096 30 ctime=1513200664.890807302 nordugrid-arc-5.4.2/include/arc/delegation/0000755000175000002070000000000013214316030021536 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/delegation/PaxHeaders.7502/DelegationInterface.h0000644000000000000000000000012410673575070025557 xustar000000000000000027 mtime=1190066744.714216 27 atime=1513200577.065733 30 ctime=1513200664.890807302 nordugrid-arc-5.4.2/include/arc/delegation/DelegationInterface.h0000644000175000002070000000010210673575070025615 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/delegation/DelegationInterface.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/ws-addressing0000644000000000000000000000013213214316030022052 xustar000000000000000030 mtime=1513200664.976808354 30 atime=1513200668.716854096 30 ctime=1513200664.976808354 nordugrid-arc-5.4.2/include/arc/ws-addressing/0000755000175000002070000000000013214316030022175 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/ws-addressing/PaxHeaders.7502/WSA.h0000644000000000000000000000012410667345044022753 xustar000000000000000027 mtime=1188940324.156506 27 atime=1513200577.064733 30 ctime=1513200664.976808354 nordugrid-arc-5.4.2/include/arc/ws-addressing/WSA.h0000644000175000002070000000006510667345044023021 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/ws-addressing/WSA.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/URL.h0000644000000000000000000000012310672011347020200 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.093733 29 ctime=1513200664.79580614 nordugrid-arc-5.4.2/include/arc/URL.h0000644000175000002070000000005310672011347020244 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/URL.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/compute0000644000000000000000000000013213214316030020754 xustar000000000000000030 mtime=1513200664.841806703 30 atime=1513200668.716854096 30 ctime=1513200664.841806703 nordugrid-arc-5.4.2/include/arc/compute/0000755000175000002070000000000013214316030021077 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/SubmissionStatus.h0000644000000000000000000000012412051174430024546 xustar000000000000000027 mtime=1352988952.745293 27 atime=1513200577.045732 30 ctime=1513200664.837806654 nordugrid-arc-5.4.2/include/arc/compute/SubmissionStatus.h0000644000175000002070000000007412051174430024614 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/SubmissionStatus.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/JobControllerPlugin.h0000644000000000000000000000012412045235201025141 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.043732 30 ctime=1513200664.823806483 nordugrid-arc-5.4.2/include/arc/compute/JobControllerPlugin.h0000644000175000002070000000007712045235201025212 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobControllerPlugin.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/RSLParser.h0000644000000000000000000000012412045235201023021 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.048732 30 ctime=1513200664.834806617 nordugrid-arc-5.4.2/include/arc/compute/RSLParser.h0000644000175000002070000000006512045235201023067 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/RSLParser.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/GLUE2.h0000644000000000000000000000012412045235201022022 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.048732 30 ctime=1513200664.819806434 nordugrid-arc-5.4.2/include/arc/compute/GLUE2.h0000644000175000002070000000006112045235201022064 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/GLUE2.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/JobDescriptionParserPlugin.h0000644000000000000000000000012312072511126026460 xustar000000000000000027 mtime=1357550166.724559 27 atime=1513200577.045732 29 ctime=1513200664.82680652 nordugrid-arc-5.4.2/include/arc/compute/JobDescriptionParserPlugin.h0000644000175000002070000000010612072511126026523 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobDescriptionParserPlugin.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/JobInformationStorage.h0000644000000000000000000000012412110430416025447 xustar000000000000000027 mtime=1361195278.370091 27 atime=1513200577.045732 30 ctime=1513200664.827806532 nordugrid-arc-5.4.2/include/arc/compute/JobInformationStorage.h0000644000175000002070000000010112110430416025504 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobInformationStorage.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/GLUE2Entity.h0000644000000000000000000000012412045235201023217 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.046732 30 ctime=1513200664.820806446 nordugrid-arc-5.4.2/include/arc/compute/GLUE2Entity.h0000644000175000002070000000006712045235201023267 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/GLUE2Entity.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/JobDescription.h0000644000000000000000000000012412045235201024122 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.048732 30 ctime=1513200664.824806495 nordugrid-arc-5.4.2/include/arc/compute/JobDescription.h0000644000175000002070000000007212045235201024166 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobDescription.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/ComputingServiceRetriever.h0000644000000000000000000000012412045235201026362 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.048732 30 ctime=1513200664.810806324 nordugrid-arc-5.4.2/include/arc/compute/ComputingServiceRetriever.h0000644000175000002070000000010512045235201026423 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/ComputingServiceRetriever.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/EndpointQueryingStatus.h0000644000000000000000000000012312045235201025713 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.045732 29 ctime=1513200664.81380636 nordugrid-arc-5.4.2/include/arc/compute/EndpointQueryingStatus.h0000644000175000002070000000010212045235201025752 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/EndpointQueryingStatus.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/Broker.h0000644000000000000000000000012412045235201022430 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.048732 30 ctime=1513200664.807806287 nordugrid-arc-5.4.2/include/arc/compute/Broker.h0000644000175000002070000000006212045235201022473 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Broker.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/SubmitterPlugin.h0000644000000000000000000000012412045235201024341 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.043732 30 ctime=1513200664.840806691 nordugrid-arc-5.4.2/include/arc/compute/SubmitterPlugin.h0000644000175000002070000000007312045235201024406 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/SubmitterPlugin.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/JobInformationStorageXML.h0000644000000000000000000000012412207410346026037 xustar000000000000000027 mtime=1377702118.737323 27 atime=1513200577.043732 30 ctime=1513200664.830806568 nordugrid-arc-5.4.2/include/arc/compute/JobInformationStorageXML.h0000644000175000002070000000010412207410346026077 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobInformationStorageXML.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/ExecutionTarget.h0000644000000000000000000000012412045235201024316 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.046732 30 ctime=1513200664.817806409 nordugrid-arc-5.4.2/include/arc/compute/ExecutionTarget.h0000644000175000002070000000007312045235201024363 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/ExecutionTarget.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/JobInformationStorageBDB.h0000644000000000000000000000012412207644127025774 xustar000000000000000027 mtime=1377781847.058035 27 atime=1513200577.047732 30 ctime=1513200664.829806556 nordugrid-arc-5.4.2/include/arc/compute/JobInformationStorageBDB.h0000644000175000002070000000010412207644127026034 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobInformationStorageBDB.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/Job.h0000644000000000000000000000012412045235201021716 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.046732 30 ctime=1513200664.821806458 nordugrid-arc-5.4.2/include/arc/compute/Job.h0000644000175000002070000000005712045235201021765 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Job.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/JobSupervisor.h0000644000000000000000000000012412045235201024020 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.047732 30 ctime=1513200664.833806605 nordugrid-arc-5.4.2/include/arc/compute/JobSupervisor.h0000644000175000002070000000007112045235201024063 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobSupervisor.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/Submitter.h0000644000000000000000000000012412045235201023162 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.048732 30 ctime=1513200664.838806666 nordugrid-arc-5.4.2/include/arc/compute/Submitter.h0000644000175000002070000000006512045235201023230 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Submitter.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/EntityRetriever.h0000644000000000000000000000012412045235201024350 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.045732 30 ctime=1513200664.814806373 nordugrid-arc-5.4.2/include/arc/compute/EntityRetriever.h0000644000175000002070000000007312045235201024415 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/EntityRetriever.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/JobState.h0000644000000000000000000000012412045235201022717 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.046732 30 ctime=1513200664.831806581 nordugrid-arc-5.4.2/include/arc/compute/JobState.h0000644000175000002070000000006412045235201022764 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobState.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/Endpoint.h0000644000000000000000000000012412045235201022764 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.048732 30 ctime=1513200664.811806336 nordugrid-arc-5.4.2/include/arc/compute/Endpoint.h0000644000175000002070000000006412045235201023031 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Endpoint.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/BrokerPlugin.h0000644000000000000000000000012412072545154023622 xustar000000000000000027 mtime=1357564524.902889 27 atime=1513200577.045732 30 ctime=1513200664.809806312 nordugrid-arc-5.4.2/include/arc/compute/BrokerPlugin.h0000644000175000002070000000007012072545154023664 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/BrokerPlugin.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/Software.h0000644000000000000000000000012312045235201022775 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.043732 29 ctime=1513200664.83580663 nordugrid-arc-5.4.2/include/arc/compute/Software.h0000644000175000002070000000006412045235201023043 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Software.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/TestACCControl.h0000644000000000000000000000012412045235201023773 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200577.043732 30 ctime=1513200664.841806703 nordugrid-arc-5.4.2/include/arc/compute/TestACCControl.h0000644000175000002070000000007212045235201024037 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/TestACCControl.h" nordugrid-arc-5.4.2/include/arc/compute/PaxHeaders.7502/EntityRetrieverPlugin.h0000644000000000000000000000012412072530566025543 xustar000000000000000027 mtime=1357558134.840799 27 atime=1513200577.048732 30 ctime=1513200664.816806397 nordugrid-arc-5.4.2/include/arc/compute/EntityRetrieverPlugin.h0000644000175000002070000000010112072530566025600 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/EntityRetrieverPlugin.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/Counter.h0000644000000000000000000000012411060677516021166 xustar000000000000000027 mtime=1220771662.084916 27 atime=1513200577.056733 30 ctime=1513200664.770805835 nordugrid-arc-5.4.2/include/arc/Counter.h0000644000175000002070000000005711060677516021235 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Counter.h" nordugrid-arc-5.4.2/include/arc/PaxHeaders.7502/StringConv.h0000644000000000000000000000012410672011347021633 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200577.088733 30 ctime=1513200664.792806104 nordugrid-arc-5.4.2/include/arc/StringConv.h0000644000175000002070000000006210672011347021676 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/StringConv.h" nordugrid-arc-5.4.2/include/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713153453763020671 xustar000000000000000027 mtime=1504598003.163227 30 atime=1513200592.015916016 30 ctime=1513200664.757805675 nordugrid-arc-5.4.2/include/Makefile.am0000644000175000002070000000075113153453763020736 0ustar00mockbuildmock00000000000000HEADERFILESCHECK: ./$(DEPDIR)/HEADERFILES: HEADERFILESCHECK echo "HEADERFILES = \\" > HEADERFILES find $(srcdir) -name \*.h -print | sort | \ sed -e 's|^$(srcdir)/||' -e 's/$$/ \\/' >> HEADERFILES echo "./$(DEPDIR)/HEADERFILES" >> HEADERFILES if diff ./$(DEPDIR)/HEADERFILES HEADERFILES >/dev/null 2>&1 ; then \ rm -f HEADERFILES ; \ else \ mkdir -p ./$(DEPDIR) ; \ mv HEADERFILES ./$(DEPDIR)/HEADERFILES ; \ fi include ./$(DEPDIR)/HEADERFILES EXTRA_DIST = $(HEADERFILES) nordugrid-arc-5.4.2/include/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720020662 xustar000000000000000030 mtime=1513200592.045916382 30 atime=1513200652.899660646 30 ctime=1513200664.758805688 nordugrid-arc-5.4.2/include/Makefile.in0000644000175000002070000004024713214315720020737 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = include DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ EXTRA_DIST = $(HEADERFILES) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign include/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am HEADERFILESCHECK: ./$(DEPDIR)/HEADERFILES: HEADERFILESCHECK echo "HEADERFILES = \\" > HEADERFILES find $(srcdir) -name \*.h -print | sort | \ sed -e 's|^$(srcdir)/||' -e 's/$$/ \\/' >> HEADERFILES echo "./$(DEPDIR)/HEADERFILES" >> HEADERFILES if diff ./$(DEPDIR)/HEADERFILES HEADERFILES >/dev/null 2>&1 ; then \ rm -f HEADERFILES ; \ else \ mkdir -p ./$(DEPDIR) ; \ mv HEADERFILES ./$(DEPDIR)/HEADERFILES ; \ fi include ./$(DEPDIR)/HEADERFILES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/include/PaxHeaders.7502/.deps0000644000000000000000000000013213214316030017544 xustar000000000000000030 mtime=1513200664.986808476 30 atime=1513200668.716854096 30 ctime=1513200664.986808476 nordugrid-arc-5.4.2/include/.deps/0000755000175000002070000000000013214316030017667 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/include/.deps/PaxHeaders.7502/HEADERFILES0000644000000000000000000000013213214316030021257 xustar000000000000000030 mtime=1513200664.722805247 30 atime=1513200664.729805333 30 ctime=1513200664.986808476 nordugrid-arc-5.4.2/include/.deps/HEADERFILES0000644000175000002070000001224113214316030021325 0ustar00mockbuildmock00000000000000HEADERFILES = \ arc/ArcConfig.h \ arc/ArcConfigFile.h \ arc/ArcConfigIni.h \ arc/ArcLocation.h \ arc/ArcRegex.h \ arc/ArcVersion.h \ arc/Base64.h \ arc/CheckSum.h \ arc/Counter.h \ arc/DBInterface.h \ arc/DateTime.h \ arc/FileAccess.h \ arc/FileLock.h \ arc/FileUtils.h \ arc/GUID.h \ arc/HostnameResolver.h \ arc/IString.h \ arc/IniConfig.h \ arc/IntraProcessCounter.h \ arc/JobPerfLog.h \ arc/Logger.h \ arc/MysqlWrapper.h \ arc/OptionParser.h \ arc/Profile.h \ arc/Run.h \ arc/StringConv.h \ arc/Thread.h \ arc/URL.h \ arc/User.h \ arc/UserConfig.h \ arc/Utils.h \ arc/Watchdog.h \ arc/XMLNode.h \ arc/communication/ClientInterface.h \ arc/communication/ClientSAML2SSO.h \ arc/communication/ClientX509Delegation.h \ arc/compute/Broker.h \ arc/compute/BrokerPlugin.h \ arc/compute/ComputingServiceRetriever.h \ arc/compute/Endpoint.h \ arc/compute/EndpointQueryingStatus.h \ arc/compute/EntityRetriever.h \ arc/compute/EntityRetrieverPlugin.h \ arc/compute/ExecutionTarget.h \ arc/compute/GLUE2.h \ arc/compute/GLUE2Entity.h \ arc/compute/Job.h \ arc/compute/JobControllerPlugin.h \ arc/compute/JobDescription.h \ arc/compute/JobDescriptionParserPlugin.h \ arc/compute/JobInformationStorage.h \ arc/compute/JobInformationStorageBDB.h \ arc/compute/JobInformationStorageXML.h \ arc/compute/JobState.h \ arc/compute/JobSupervisor.h \ arc/compute/RSLParser.h \ arc/compute/Software.h \ arc/compute/SubmissionStatus.h \ arc/compute/Submitter.h \ arc/compute/SubmitterPlugin.h \ arc/compute/TestACCControl.h \ arc/credential/CertUtil.h \ arc/credential/Credential.h \ arc/credential/NSSUtil.h \ arc/credential/PasswordSource.h \ arc/credential/Proxycertinfo.h \ arc/credential/VOMSAttribute.h \ arc/credential/VOMSConfig.h \ arc/credential/VOMSUtil.h \ arc/credentialstore/ClientVOMS.h \ arc/credentialstore/ClientVOMSRESTful.h \ arc/credentialstore/CredentialStore.h \ arc/crypto/OpenSSL.h \ arc/data-staging/DTR.h \ arc/data-staging/DTRList.h \ arc/data-staging/DTRStatus.h \ arc/data-staging/DataDelivery.h \ arc/data-staging/DataDeliveryComm.h \ arc/data-staging/Generator.h \ arc/data-staging/Processor.h \ arc/data-staging/Scheduler.h \ arc/data-staging/TransferShares.h \ arc/data/DataBuffer.h \ arc/data/DataCallback.h \ arc/data/DataHandle.h \ arc/data/DataMover.h \ arc/data/DataPoint.h \ arc/data/DataPointDirect.h \ arc/data/DataPointIndex.h \ arc/data/DataSpeed.h \ arc/data/DataStatus.h \ arc/data/FileCache.h \ arc/data/FileCacheHash.h \ arc/data/FileInfo.h \ arc/data/MkDirRecursive.h \ arc/data/URLMap.h \ arc/delegation/DelegationInterface.h \ arc/external/cJSON/cJSON.h \ arc/external/libs3/error_parser.h \ arc/external/libs3/libs3.h \ arc/external/libs3/request.h \ arc/external/libs3/request_context.h \ arc/external/libs3/response_headers_handler.h \ arc/external/libs3/simplexml.h \ arc/external/libs3/util.h \ arc/globusutils/GSSCredential.h \ arc/globusutils/GlobusErrorUtils.h \ arc/globusutils/GlobusWorkarounds.h \ arc/infosys/InfoCache.h \ arc/infosys/InfoFilter.h \ arc/infosys/InfoRegister.h \ arc/infosys/InformationInterface.h \ arc/infosys/RegisteredService.h \ arc/job/error.h \ arc/job/runtimeenvironment.h \ arc/loader/FinderLoader.h \ arc/loader/Loader.h \ arc/loader/ModuleManager.h \ arc/loader/Plugin.h \ arc/message/MCC.h \ arc/message/MCCLoader.h \ arc/message/MCC_Status.h \ arc/message/Message.h \ arc/message/MessageAttributes.h \ arc/message/MessageAuth.h \ arc/message/PayloadRaw.h \ arc/message/PayloadSOAP.h \ arc/message/PayloadStream.h \ arc/message/Plexer.h \ arc/message/SOAPEnvelope.h \ arc/message/SOAPMessage.h \ arc/message/SecAttr.h \ arc/message/SecHandler.h \ arc/message/Service.h \ arc/security/ArcPDP/EvaluationCtx.h \ arc/security/ArcPDP/Evaluator.h \ arc/security/ArcPDP/EvaluatorLoader.h \ arc/security/ArcPDP/PolicyParser.h \ arc/security/ArcPDP/PolicyStore.h \ arc/security/ArcPDP/Request.h \ arc/security/ArcPDP/RequestItem.h \ arc/security/ArcPDP/Response.h \ arc/security/ArcPDP/Result.h \ arc/security/ArcPDP/Source.h \ arc/security/ArcPDP/alg/AlgFactory.h \ arc/security/ArcPDP/alg/CombiningAlg.h \ arc/security/ArcPDP/alg/DenyOverridesAlg.h \ arc/security/ArcPDP/alg/OrderedAlg.h \ arc/security/ArcPDP/alg/PermitOverridesAlg.h \ arc/security/ArcPDP/attr/AnyURIAttribute.h \ arc/security/ArcPDP/attr/AttributeFactory.h \ arc/security/ArcPDP/attr/AttributeProxy.h \ arc/security/ArcPDP/attr/AttributeValue.h \ arc/security/ArcPDP/attr/BooleanAttribute.h \ arc/security/ArcPDP/attr/DateTimeAttribute.h \ arc/security/ArcPDP/attr/GenericAttribute.h \ arc/security/ArcPDP/attr/RequestAttribute.h \ arc/security/ArcPDP/attr/StringAttribute.h \ arc/security/ArcPDP/attr/X500NameAttribute.h \ arc/security/ArcPDP/fn/EqualFunction.h \ arc/security/ArcPDP/fn/FnFactory.h \ arc/security/ArcPDP/fn/Function.h \ arc/security/ArcPDP/fn/InRangeFunction.h \ arc/security/ArcPDP/fn/MatchFunction.h \ arc/security/ArcPDP/policy/Policy.h \ arc/security/ClassLoader.h \ arc/security/PDP.h \ arc/security/Security.h \ arc/win32.h \ arc/ws-addressing/WSA.h \ arc/ws-security/SAMLToken.h \ arc/ws-security/UsernameToken.h \ arc/ws-security/X509Token.h \ arc/wsrf/WSRF.h \ arc/wsrf/WSRFBaseFault.h \ arc/wsrf/WSResourceProperties.h \ arc/xmlsec/XMLSecNode.h \ arc/xmlsec/XmlSecUtils.h \ arc/xmlsec/saml_util.h \ ./.deps/HEADERFILES nordugrid-arc-5.4.2/include/PaxHeaders.7502/README0000644000000000000000000000012110667337355017514 xustar000000000000000024 mtime=1188937453.911 27 atime=1513200577.111733 30 ctime=1513200664.755805651 nordugrid-arc-5.4.2/include/README0000644000175000002070000000200310667337355017557 0ustar00mockbuildmock00000000000000 This directory contains tree of header files refering to files with same name located in source directory src/ - actual header files. The refered files constitute API of ARC HED software. Files' location in this tree is defined by how API is presented to external developer. Actual header files are located next to corresponding source files and their location is defined by convenience of developers of ARC HED. These files are not used during istallation procedure. Upon instalaltion actual header files get located under same layout as files in this directory. Files in this directory are used through all ARC code. All source and header files of entire source tree must refer to actual header files through these ones. Code being built outside source tree should refer either to installed actual header files or to those located in this directory. Due to similar layout switch between two options does not require changes in source and header files of code. Only building procedure need to be changed. nordugrid-arc-5.4.2/PaxHeaders.7502/ABOUT-NLS0000644000000000000000000000013013214315702016417 xustar000000000000000029 mtime=1513200578.02174486 29 atime=1513200578.02174486 30 ctime=1513200658.610730494 nordugrid-arc-5.4.2/ABOUT-NLS0000644000175000002070000011311313214315702016467 0ustar00mockbuildmock00000000000000Notes on the Free Translation Project ************************************* Free software is going international! The Free Translation Project is a way to get maintainers of free software, translators, and users all together, so that will gradually become able to speak many languages. A few packages already provide translations for their messages. If you found this `ABOUT-NLS' file inside a distribution, you may assume that the distributed package does use GNU `gettext' internally, itself available at your nearest GNU archive site. But you do _not_ need to install GNU `gettext' prior to configuring, installing or using this package with messages translated. Installers will find here some useful hints. These notes also explain how users should proceed for getting the programs to use the available translations. They tell how people wanting to contribute and work at translations should contact the appropriate team. When reporting bugs in the `intl/' directory or bugs which may be related to internationalization, you should tell about the version of `gettext' which is used. The information can be found in the `intl/VERSION' file, in internationalized packages. Quick configuration advice ========================== If you want to exploit the full power of internationalization, you should configure it using ./configure --with-included-gettext to force usage of internationalizing routines provided within this package, despite the existence of internationalizing capabilities in the operating system where this package is being installed. So far, only the `gettext' implementation in the GNU C library version 2 provides as many features (such as locale alias, message inheritance, automatic charset conversion or plural form handling) as the implementation here. It is also not possible to offer this additional functionality on top of a `catgets' implementation. Future versions of GNU `gettext' will very likely convey even more functionality. So it might be a good idea to change to GNU `gettext' as soon as possible. So you need _not_ provide this option if you are using GNU libc 2 or you have installed a recent copy of the GNU gettext package with the included `libintl'. INSTALL Matters =============== Some packages are "localizable" when properly installed; the programs they contain can be made to speak your own native language. Most such packages use GNU `gettext'. Other packages have their own ways to internationalization, predating GNU `gettext'. By default, this package will be installed to allow translation of messages. It will automatically detect whether the system already provides the GNU `gettext' functions. If not, the GNU `gettext' own library will be used. This library is wholly contained within this package, usually in the `intl/' subdirectory, so prior installation of the GNU `gettext' package is _not_ required. Installers may use special options at configuration time for changing the default behaviour. The commands: ./configure --with-included-gettext ./configure --disable-nls will respectively bypass any pre-existing `gettext' to use the internationalizing routines provided within this package, or else, _totally_ disable translation of messages. When you already have GNU `gettext' installed on your system and run configure without an option for your new package, `configure' will probably detect the previously built and installed `libintl.a' file and will decide to use this. This might be not what is desirable. You should use the more recent version of the GNU `gettext' library. I.e. if the file `intl/VERSION' shows that the library which comes with this package is more recent, you should use ./configure --with-included-gettext to prevent auto-detection. The configuration process will not test for the `catgets' function and therefore it will not be used. The reason is that even an emulation of `gettext' on top of `catgets' could not provide all the extensions of the GNU `gettext' library. Internationalized packages have usually many `po/LL.po' files, where LL gives an ISO 639 two-letter code identifying the language. Unless translations have been forbidden at `configure' time by using the `--disable-nls' switch, all available translations are installed together with the package. However, the environment variable `LINGUAS' may be set, prior to configuration, to limit the installed set. `LINGUAS' should then contain a space separated list of two-letter codes, stating which languages are allowed. Using This Package ================== As a user, if your language has been installed for this package, you only have to set the `LANG' environment variable to the appropriate `LL_CC' combination. Here `LL' is an ISO 639 two-letter language code, and `CC' is an ISO 3166 two-letter country code. For example, let's suppose that you speak German and live in Germany. At the shell prompt, merely execute `setenv LANG de_DE' (in `csh'), `export LANG; LANG=de_DE' (in `sh') or `export LANG=de_DE' (in `bash'). This can be done from your `.login' or `.profile' file, once and for all. You might think that the country code specification is redundant. But in fact, some languages have dialects in different countries. For example, `de_AT' is used for Austria, and `pt_BR' for Brazil. The country code serves to distinguish the dialects. The locale naming convention of `LL_CC', with `LL' denoting the language and `CC' denoting the country, is the one use on systems based on GNU libc. On other systems, some variations of this scheme are used, such as `LL' or `LL_CC.ENCODING'. You can get the list of locales supported by your system for your country by running the command `locale -a | grep '^LL''. Not all programs have translations for all languages. By default, an English message is shown in place of a nonexistent translation. If you understand other languages, you can set up a priority list of languages. This is done through a different environment variable, called `LANGUAGE'. GNU `gettext' gives preference to `LANGUAGE' over `LANG' for the purpose of message handling, but you still need to have `LANG' set to the primary language; this is required by other parts of the system libraries. For example, some Swedish users who would rather read translations in German than English for when Swedish is not available, set `LANGUAGE' to `sv:de' while leaving `LANG' to `sv_SE'. In the `LANGUAGE' environment variable, but not in the `LANG' environment variable, `LL_CC' combinations can be abbreviated as `LL' to denote the language's main dialect. For example, `de' is equivalent to `de_DE' (German as spoken in Germany), and `pt' to `pt_PT' (Portuguese as spoken in Portugal) in this context. Translating Teams ================= For the Free Translation Project to be a success, we need interested people who like their own language and write it well, and who are also able to synergize with other translators speaking the same language. Each translation team has its own mailing list. The up-to-date list of teams can be found at the Free Translation Project's homepage, `http://www.iro.umontreal.ca/contrib/po/HTML/', in the "National teams" area. If you'd like to volunteer to _work_ at translating messages, you should become a member of the translating team for your own language. The subscribing address is _not_ the same as the list itself, it has `-request' appended. For example, speakers of Swedish can send a message to `sv-request@li.org', having this message body: subscribe Keep in mind that team members are expected to participate _actively_ in translations, or at solving translational difficulties, rather than merely lurking around. If your team does not exist yet and you want to start one, or if you are unsure about what to do or how to get started, please write to `translation@iro.umontreal.ca' to reach the coordinator for all translator teams. The English team is special. It works at improving and uniformizing the terminology in use. Proven linguistic skill are praised more than programming skill, here. Available Packages ================== Languages are not equally supported in all packages. The following matrix shows the current state of internationalization, as of May 2003. The matrix shows, in regard of each package, for which languages PO files have been submitted to translation coordination, with a translation percentage of at least 50%. Ready PO files am az be bg ca cs da de el en en_GB eo es +-------------------------------------------+ a2ps | [] [] [] [] | aegis | () | anubis | | ap-utils | | bash | [] [] [] | batchelor | | bfd | [] [] | binutils | [] [] | bison | [] [] [] | bluez-pin | [] [] | clisp | | clisp | [] [] [] | coreutils | [] [] [] [] | cpio | [] [] [] | darkstat | () [] | diffutils | [] [] [] [] [] [] [] | e2fsprogs | [] [] | enscript | [] [] [] [] | error | [] [] [] [] [] | fetchmail | [] () [] [] [] [] | fileutils | [] [] [] | findutils | [] [] [] [] [] [] | flex | [] [] [] [] | gas | [] | gawk | [] [] [] [] | gcal | [] | gcc | [] [] | gettext | [] [] [] [] [] | gettext-runtime | [] [] [] [] [] | gettext-tools | [] [] | gimp-print | [] [] [] [] [] | gliv | | glunarclock | [] [] [] | gnucash | () [] | gnucash-glossary | [] () [] | gnupg | [] () [] [] [] [] | gpe-calendar | [] | gpe-conf | [] | gpe-contacts | [] | gpe-edit | | gpe-login | [] | gpe-ownerinfo | [] | gpe-sketchbook | [] | gpe-timesheet | | gpe-today | [] | gpe-todo | [] | gphoto2 | [] [] [] [] | gprof | [] [] | gpsdrive | () () () | grep | [] [] [] [] [] | gretl | [] | hello | [] [] [] [] [] [] | id-utils | [] [] | indent | [] [] [] [] | jpilot | [] [] [] [] | jwhois | [] | kbd | [] [] [] [] [] | ld | [] [] | libc | [] [] [] [] [] [] | libgpewidget | [] | libiconv | [] [] [] [] [] | lifelines | [] () | lilypond | [] | lingoteach | | lingoteach_lessons | () () | lynx | [] [] [] [] | m4 | [] [] [] [] | mailutils | [] [] | make | [] [] [] | man-db | [] () [] [] () | mysecretdiary | [] [] [] | nano | [] () [] [] [] | nano_1_0 | [] () [] [] [] | opcodes | [] [] | parted | [] [] [] [] [] | ptx | [] [] [] [] [] | python | | radius | | recode | [] [] [] [] [] [] | screem | | sed | [] [] [] [] [] | sh-utils | [] [] [] | sharutils | [] [] [] [] [] [] | sketch | [] () [] | soundtracker | [] [] [] | sp | [] | tar | [] [] [] [] | texinfo | [] [] [] [] | textutils | [] [] [] [] | tin | () () | util-linux | [] [] [] [] [] | vorbis-tools | [] [] [] | wastesedge | () | wdiff | [] [] [] [] | wget | [] [] [] [] [] [] [] | xchat | [] [] [] | xpad | | +-------------------------------------------+ am az be bg ca cs da de el en en_GB eo es 0 1 4 2 31 17 54 60 14 1 4 12 56 et fa fi fr ga gl he hr hu id it ja ko +----------------------------------------+ a2ps | [] [] [] () () | aegis | | anubis | [] | ap-utils | [] | bash | [] [] | batchelor | [] | bfd | [] [] | binutils | [] [] | bison | [] [] [] [] | bluez-pin | [] [] [] [] | clisp | | clisp | [] | coreutils | [] [] [] [] | cpio | [] [] [] [] | darkstat | () [] [] [] | diffutils | [] [] [] [] [] [] [] | e2fsprogs | | enscript | [] [] | error | [] [] [] [] | fetchmail | [] | fileutils | [] [] [] [] [] | findutils | [] [] [] [] [] [] [] [] [] [] [] | flex | [] [] | gas | [] | gawk | [] [] | gcal | [] | gcc | [] | gettext | [] [] [] | gettext-runtime | [] [] [] [] | gettext-tools | [] | gimp-print | [] [] | gliv | () | glunarclock | [] [] [] [] | gnucash | [] | gnucash-glossary | [] | gnupg | [] [] [] [] [] [] [] | gpe-calendar | [] | gpe-conf | | gpe-contacts | [] | gpe-edit | [] [] | gpe-login | [] | gpe-ownerinfo | [] [] [] | gpe-sketchbook | [] | gpe-timesheet | [] [] [] | gpe-today | [] [] | gpe-todo | [] [] | gphoto2 | [] [] [] | gprof | [] [] | gpsdrive | () [] () () | grep | [] [] [] [] [] [] [] [] [] [] [] | gretl | [] | hello | [] [] [] [] [] [] [] [] [] [] [] [] [] | id-utils | [] [] [] | indent | [] [] [] [] [] [] [] [] | jpilot | [] () | jwhois | [] [] [] [] | kbd | [] | ld | [] | libc | [] [] [] [] [] [] | libgpewidget | [] [] [] | libiconv | [] [] [] [] [] [] [] [] | lifelines | () | lilypond | [] | lingoteach | [] [] | lingoteach_lessons | | lynx | [] [] [] [] | m4 | [] [] [] [] | mailutils | | make | [] [] [] [] [] [] | man-db | [] () () | mysecretdiary | [] [] | nano | [] [] [] [] | nano_1_0 | [] [] [] [] | opcodes | [] [] | parted | [] [] [] | ptx | [] [] [] [] [] [] [] | python | | radius | | recode | [] [] [] [] [] [] | screem | | sed | [] [] [] [] [] [] [] [] | sh-utils | [] [] [] [] [] [] | sharutils | [] [] [] [] [] | sketch | [] | soundtracker | [] [] [] | sp | [] () | tar | [] [] [] [] [] [] [] [] [] | texinfo | [] [] [] [] | textutils | [] [] [] [] [] | tin | [] () | util-linux | [] [] [] [] () [] | vorbis-tools | [] | wastesedge | () | wdiff | [] [] [] [] [] | wget | [] [] [] [] [] [] [] [] | xchat | [] [] [] | xpad | | +----------------------------------------+ et fa fi fr ga gl he hr hu id it ja ko 20 1 15 73 14 24 8 10 30 31 19 31 9 lg lt lv ms nb nl nn no pl pt pt_BR ro +----------------------------------------+ a2ps | [] [] () () () [] [] | aegis | () | anubis | [] [] | ap-utils | () | bash | [] | batchelor | | bfd | | binutils | | bison | [] [] [] [] | bluez-pin | [] | clisp | | clisp | [] | coreutils | [] | cpio | [] [] [] | darkstat | [] [] [] [] | diffutils | [] [] [] | e2fsprogs | | enscript | [] [] | error | [] [] | fetchmail | () () | fileutils | [] | findutils | [] [] [] [] | flex | [] | gas | | gawk | [] | gcal | | gcc | | gettext | [] | gettext-runtime | [] | gettext-tools | | gimp-print | [] | gliv | [] | glunarclock | [] | gnucash | | gnucash-glossary | [] [] | gnupg | | gpe-calendar | [] [] | gpe-conf | [] [] | gpe-contacts | [] | gpe-edit | [] [] | gpe-login | [] [] | gpe-ownerinfo | [] [] | gpe-sketchbook | [] [] | gpe-timesheet | [] [] | gpe-today | [] [] | gpe-todo | [] [] | gphoto2 | | gprof | [] | gpsdrive | () () () | grep | [] [] [] [] | gretl | | hello | [] [] [] [] [] [] [] [] [] | id-utils | [] [] [] | indent | [] [] [] | jpilot | () () | jwhois | [] [] [] | kbd | | ld | | libc | [] [] [] [] | libgpewidget | [] [] | libiconv | [] [] | lifelines | | lilypond | [] | lingoteach | | lingoteach_lessons | | lynx | [] [] | m4 | [] [] [] [] | mailutils | | make | [] [] | man-db | [] | mysecretdiary | [] | nano | [] [] [] [] | nano_1_0 | [] [] [] [] | opcodes | [] [] [] | parted | [] [] [] | ptx | [] [] [] [] [] [] [] | python | | radius | | recode | [] [] [] | screem | | sed | [] [] | sh-utils | [] | sharutils | [] | sketch | [] | soundtracker | | sp | | tar | [] [] [] [] [] [] | texinfo | [] | textutils | [] | tin | | util-linux | [] [] | vorbis-tools | [] [] | wastesedge | | wdiff | [] [] [] [] | wget | [] [] [] | xchat | [] [] | xpad | [] | +----------------------------------------+ lg lt lv ms nb nl nn no pl pt pt_BR ro 0 0 2 11 7 26 3 4 18 15 34 34 ru sk sl sr sv ta tr uk vi wa zh_CN zh_TW +-------------------------------------------+ a2ps | [] [] [] [] [] | 16 aegis | () | 0 anubis | [] [] | 5 ap-utils | () | 1 bash | [] | 7 batchelor | | 1 bfd | [] [] [] | 7 binutils | [] [] [] | 7 bison | [] [] | 13 bluez-pin | | 7 clisp | | 0 clisp | | 5 coreutils | [] [] [] [] [] | 14 cpio | [] [] [] | 13 darkstat | [] () () | 9 diffutils | [] [] [] [] | 21 e2fsprogs | [] | 3 enscript | [] [] [] | 11 error | [] [] [] | 14 fetchmail | [] | 7 fileutils | [] [] [] [] [] [] | 15 findutils | [] [] [] [] [] [] | 27 flex | [] [] [] | 10 gas | [] | 3 gawk | [] [] | 9 gcal | [] [] | 4 gcc | [] | 4 gettext | [] [] [] [] [] [] | 15 gettext-runtime | [] [] [] [] [] [] | 16 gettext-tools | [] [] | 5 gimp-print | [] [] | 10 gliv | | 1 glunarclock | [] [] [] | 11 gnucash | [] [] | 4 gnucash-glossary | [] [] [] | 8 gnupg | [] [] [] [] | 16 gpe-calendar | [] | 5 gpe-conf | | 3 gpe-contacts | [] | 4 gpe-edit | [] | 5 gpe-login | [] | 5 gpe-ownerinfo | [] | 7 gpe-sketchbook | [] | 5 gpe-timesheet | [] | 6 gpe-today | [] | 6 gpe-todo | [] | 6 gphoto2 | [] [] | 9 gprof | [] [] | 7 gpsdrive | [] [] | 3 grep | [] [] [] [] | 24 gretl | | 2 hello | [] [] [] [] [] | 33 id-utils | [] [] [] | 11 indent | [] [] [] [] | 19 jpilot | [] [] [] [] [] | 10 jwhois | () () [] [] | 10 kbd | [] [] | 8 ld | [] [] | 5 libc | [] [] [] [] | 20 libgpewidget | | 6 libiconv | [] [] [] [] [] [] | 21 lifelines | [] | 2 lilypond | [] | 4 lingoteach | | 2 lingoteach_lessons | () | 0 lynx | [] [] [] [] | 14 m4 | [] [] [] | 15 mailutils | | 2 make | [] [] [] [] | 15 man-db | [] | 6 mysecretdiary | [] [] | 8 nano | [] [] [] | 15 nano_1_0 | [] [] [] | 15 opcodes | [] [] | 9 parted | [] [] | 13 ptx | [] [] [] | 22 python | | 0 radius | | 0 recode | [] [] [] [] | 19 screem | [] | 1 sed | [] [] [] [] [] | 20 sh-utils | [] [] [] | 13 sharutils | [] [] [] [] | 16 sketch | [] | 5 soundtracker | [] | 7 sp | [] | 3 tar | [] [] [] [] [] | 24 texinfo | [] [] [] [] | 13 textutils | [] [] [] [] [] | 15 tin | | 1 util-linux | [] [] | 14 vorbis-tools | [] | 7 wastesedge | | 0 wdiff | [] [] [] [] | 17 wget | [] [] [] [] [] [] [] | 25 xchat | [] [] [] | 11 xpad | | 1 +-------------------------------------------+ 50 teams ru sk sl sr sv ta tr uk vi wa zh_CN zh_TW 97 domains 32 19 16 0 56 0 48 10 1 1 12 23 913 Some counters in the preceding matrix are higher than the number of visible blocks let us expect. This is because a few extra PO files are used for implementing regional variants of languages, or language dialects. For a PO file in the matrix above to be effective, the package to which it applies should also have been internationalized and distributed as such by its maintainer. There might be an observable lag between the mere existence a PO file and its wide availability in a distribution. If May 2003 seems to be old, you may fetch a more recent copy of this `ABOUT-NLS' file on most GNU archive sites. The most up-to-date matrix with full percentage details can be found at `http://www.iro.umontreal.ca/contrib/po/HTML/matrix.html'. Using `gettext' in new packages =============================== If you are writing a freely available program and want to internationalize it you are welcome to use GNU `gettext' in your package. Of course you have to respect the GNU Library General Public License which covers the use of the GNU `gettext' library. This means in particular that even non-free programs can use `libintl' as a shared library, whereas only free software can use `libintl' as a static library or use modified versions of `libintl'. Once the sources are changed appropriately and the setup can handle the use of `gettext' the only thing missing are the translations. The Free Translation Project is also available for packages which are not developed inside the GNU project. Therefore the information given above applies also for every other Free Software Project. Contact `translation@iro.umontreal.ca' to make the `.pot' files available to the translation teams. nordugrid-arc-5.4.2/PaxHeaders.7502/missing0000644000000000000000000000013213214315717016574 xustar000000000000000030 mtime=1513200591.900914607 30 atime=1513200608.567118445 30 ctime=1513200658.619730605 nordugrid-arc-5.4.2/missing0000755000175000002070000002623313214315717016653 0ustar00mockbuildmock00000000000000#! /bin/sh # Common stub for a few missing GNU programs while installing. scriptversion=2009-04-28.21; # UTC # Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006, # 2008, 2009 Free Software Foundation, Inc. # Originally by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try \`$0 --help' for more information" exit 1 fi run=: sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p' sed_minuso='s/.* -o \([^ ]*\).*/\1/p' # In the cases where this matters, `missing' is being run in the # srcdir already. if test -f configure.ac; then configure_ac=configure.ac else configure_ac=configure.in fi msg="missing on your system" case $1 in --run) # Try to run requested program, and just exit if it succeeds. run= shift "$@" && exit 0 # Exit code 63 means version mismatch. This often happens # when the user try to use an ancient version of a tool on # a file that requires a minimum version. In this case we # we should proceed has if the program had been absent, or # if --run hadn't been passed. if test $? = 63; then run=: msg="probably too old" fi ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an error status if there is no known handling for PROGRAM. Options: -h, --help display this help and exit -v, --version output version information and exit --run try to run the given command, and emulate it if it fails Supported PROGRAM values: aclocal touch file \`aclocal.m4' autoconf touch file \`configure' autoheader touch file \`config.h.in' autom4te touch the output file, or create a stub one automake touch all \`Makefile.in' files bison create \`y.tab.[ch]', if possible, from existing .[ch] flex create \`lex.yy.c', if possible, from existing .c help2man touch the output file lex create \`lex.yy.c', if possible, from existing .c makeinfo touch the output file tar try tar, gnutar, gtar, then tar without non-portable flags yacc create \`y.tab.[ch]', if possible, from existing .[ch] Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and \`g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: Unknown \`$1' option" echo 1>&2 "Try \`$0 --help' for more information" exit 1 ;; esac # normalize program name to check for. program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` # Now exit if we have it, but it failed. Also exit now if we # don't have it and --version was passed (most likely to detect # the program). This is about non-GNU programs, so use $1 not # $program. case $1 in lex*|yacc*) # Not GNU programs, they don't have --version. ;; tar*) if test -n "$run"; then echo 1>&2 "ERROR: \`tar' requires --run" exit 1 elif test "x$2" = "x--version" || test "x$2" = "x--help"; then exit 1 fi ;; *) if test -z "$run" && ($1 --version) > /dev/null 2>&1; then # We have it, but it failed. exit 1 elif test "x$2" = "x--version" || test "x$2" = "x--help"; then # Could not run --version or --help. This is probably someone # running `$TOOL --version' or `$TOOL --help' to check whether # $TOOL exists and not knowing $TOOL uses missing. exit 1 fi ;; esac # If it does not exist, or fails to run (possibly an outdated version), # try to emulate it. case $program in aclocal*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`acinclude.m4' or \`${configure_ac}'. You might want to install the \`Automake' and \`Perl' packages. Grab them from any GNU archive site." touch aclocal.m4 ;; autoconf*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`${configure_ac}'. You might want to install the \`Autoconf' and \`GNU m4' packages. Grab them from any GNU archive site." touch configure ;; autoheader*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`acconfig.h' or \`${configure_ac}'. You might want to install the \`Autoconf' and \`GNU m4' packages. Grab them from any GNU archive site." files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` test -z "$files" && files="config.h" touch_files= for f in $files; do case $f in *:*) touch_files="$touch_files "`echo "$f" | sed -e 's/^[^:]*://' -e 's/:.*//'`;; *) touch_files="$touch_files $f.in";; esac done touch $touch_files ;; automake*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. You might want to install the \`Automake' and \`Perl' packages. Grab them from any GNU archive site." find . -type f -name Makefile.am -print | sed 's/\.am$/.in/' | while read f; do touch "$f"; done ;; autom4te*) echo 1>&2 "\ WARNING: \`$1' is needed, but is $msg. You might have modified some files without having the proper tools for further handling them. You can get \`$1' as part of \`Autoconf' from any GNU archive site." file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -f "$file"; then touch $file else test -z "$file" || exec >$file echo "#! /bin/sh" echo "# Created by GNU Automake missing as a replacement of" echo "# $ $@" echo "exit 0" chmod +x $file exit 1 fi ;; bison*|yacc*) echo 1>&2 "\ WARNING: \`$1' $msg. You should only need it if you modified a \`.y' file. You may need the \`Bison' package in order for those modifications to take effect. You can get \`Bison' from any GNU archive site." rm -f y.tab.c y.tab.h if test $# -ne 1; then eval LASTARG="\${$#}" case $LASTARG in *.y) SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` if test -f "$SRCFILE"; then cp "$SRCFILE" y.tab.c fi SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` if test -f "$SRCFILE"; then cp "$SRCFILE" y.tab.h fi ;; esac fi if test ! -f y.tab.h; then echo >y.tab.h fi if test ! -f y.tab.c; then echo 'main() { return 0; }' >y.tab.c fi ;; lex*|flex*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a \`.l' file. You may need the \`Flex' package in order for those modifications to take effect. You can get \`Flex' from any GNU archive site." rm -f lex.yy.c if test $# -ne 1; then eval LASTARG="\${$#}" case $LASTARG in *.l) SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` if test -f "$SRCFILE"; then cp "$SRCFILE" lex.yy.c fi ;; esac fi if test ! -f lex.yy.c; then echo 'main() { return 0; }' >lex.yy.c fi ;; help2man*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a dependency of a manual page. You may need the \`Help2man' package in order for those modifications to take effect. You can get \`Help2man' from any GNU archive site." file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -f "$file"; then touch $file else test -z "$file" || exec >$file echo ".ab help2man is required to generate this page" exit $? fi ;; makeinfo*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a \`.texi' or \`.texinfo' file, or any other file indirectly affecting the aspect of the manual. The spurious call might also be the consequence of using a buggy \`make' (AIX, DU, IRIX). You might want to install the \`Texinfo' package or the \`GNU make' package. Grab either from any GNU archive site." # The file to touch is that specified with -o ... file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -z "$file"; then # ... or it is the one specified with @setfilename ... infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` file=`sed -n ' /^@setfilename/{ s/.* \([^ ]*\) *$/\1/ p q }' $infile` # ... or it is derived from the source name (dir/f.texi becomes f.info) test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info fi # If the file does not exist, the user really needs makeinfo; # let's fail without touching anything. test -f $file || exit 1 touch $file ;; tar*) shift # We have already tried tar in the generic part. # Look for gnutar/gtar before invocation to avoid ugly error # messages. if (gnutar --version > /dev/null 2>&1); then gnutar "$@" && exit 0 fi if (gtar --version > /dev/null 2>&1); then gtar "$@" && exit 0 fi firstarg="$1" if shift; then case $firstarg in *o*) firstarg=`echo "$firstarg" | sed s/o//` tar "$firstarg" "$@" && exit 0 ;; esac case $firstarg in *h*) firstarg=`echo "$firstarg" | sed s/h//` tar "$firstarg" "$@" && exit 0 ;; esac fi echo 1>&2 "\ WARNING: I can't seem to be able to run \`tar' with the given arguments. You may want to install GNU tar or Free paxutils, or check the command line arguments." exit 1 ;; *) echo 1>&2 "\ WARNING: \`$1' is needed, and is $msg. You might have modified some files without having the proper tools for further handling them. Check the \`README' file, it often tells you about the needed prerequisites for installing this package. You may also peek at any GNU archive site, in case some other package would contain this missing \`$1' program." exit 1 ;; esac exit 0 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: nordugrid-arc-5.4.2/PaxHeaders.7502/debian0000644000000000000000000000013213214316034016336 xustar000000000000000030 mtime=1513200668.686853729 30 atime=1513200668.716854096 30 ctime=1513200668.686853729 nordugrid-arc-5.4.2/debian/0000755000175000002070000000000013214316034016461 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/debian/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612643220702020453 xustar000000000000000026 mtime=1452089794.51291 30 atime=1513200591.926914925 30 ctime=1513200668.631853057 nordugrid-arc-5.4.2/debian/Makefile.am0000644000175000002070000000064312643220702020521 0ustar00mockbuildmock00000000000000EXTRA_DIST = changelog compat control copyright rules watch source/format \ $(srcdir)/*.install $(srcdir)/*.docs $(srcdir)/*.dirs \ $(srcdir)/*.default $(srcdir)/*.logrotate \ $(srcdir)/*.postinst $(srcdir)/*.postrm \ $(srcdir)/*.lintian-overrides README.Debian README.source changelog: changelog.deb cp -p changelog.deb changelog MAINTAINERCLEANFILES = changelog nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-plugins-globus.lintian-overrides0000644000000000000000000000012312331113144027477 xustar000000000000000026 mtime=1399101028.72906 27 atime=1513200576.920731 30 ctime=1513200668.684853705 nordugrid-arc-5.4.2/debian/nordugrid-arc-plugins-globus.lintian-overrides0000644000175000002070000000023012331113144027540 0ustar00mockbuildmock00000000000000# This library is a utility library for the globus dependent plugins nordugrid-arc-plugins-globus: package-name-doesnt-match-sonames libarcglobusutils3 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/rules0000644000000000000000000000012413153456135017502 xustar000000000000000027 mtime=1504599133.152908 27 atime=1513200576.914731 30 ctime=1513200668.636853118 nordugrid-arc-5.4.2/debian/rules0000755000175000002070000001370613153456135017561 0ustar00mockbuildmock00000000000000#!/usr/bin/make -f -include /usr/share/dpkg/buildflags.mk # Filter out -Wl,-Bsymbolic-functions from default Ubuntu LDFLAGS COMMA = , LDFLAGS := $(filter-out -Wl$(COMMA)-Bsymbolic-functions,$(LDFLAGS)) ifeq ($(shell pkg-config --atleast-version 2.6 sigc++-2.0 && echo 1),1) ifeq ($(shell echo __GNUC__ | gcc -E - | tail -1),5) # Workaround for too new libsigc++/glibmm, too old gcc combination CXXFLAGS += -std=c++11 endif endif CANL_SUPPORT = no ifeq ($(CANL_SUPPORT),yes) CANL_CONFIGURE_OPTION = --enable-canlxx else CANL_CONFIGURE_OPTION = endif configure: configure-stamp configure-stamp: dh_testdir if [ ! -f po/POTFILES.in.save ] ; then \ cp -p po/POTFILES.in po/POTFILES.in.save ; \ fi dh_autoreconf CFLAGS="$(CFLAGS)" CXXFLAGS="$(CXXFLAGS)" \ CPPFLAGS="$(CPPFLAGS)" LDFLAGS="$(LDFLAGS) -Wl,-z,defs" \ ./configure --host=$(DEB_HOST_GNU_TYPE) \ --build=$(DEB_BUILD_GNU_TYPE) \ --prefix=/usr \ --libexecdir='$${prefix}/lib' \ --sysconfdir=/etc \ --localstatedir=/var \ --mandir='$${datadir}/man' \ --infodir='$${datadir}/info' \ --disable-doc \ --with-docdir='$${datadir}/doc/nordugrid-arc' \ --with-jnidir='$${datadir}/java' \ --with-jninativedir='$${libdir}/jni' \ --enable-sqlite \ $(CANL_CONFIGURE_OPTION) touch $@ build: build-arch build-indep build-arch: build-stamp build-indep: build-stamp build-stamp: configure-stamp dh_testdir $(MAKE) ifeq ($(filter nocheck,$(DEB_BUILD_OPTIONS)),) $(MAKE) check endif touch $@ clean: dh_testdir dh_testroot if [ -r Makefile ] ; then $(MAKE) distclean ; fi rm -f debian/nordugrid-arc-arex.a-rex.init rm -f debian/nordugrid-arc-hed.arched.init rm -f debian/nordugrid-arc-cache-service.arc-cache-service.init rm -f debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.init rm -f debian/nordugrid-arc-gridftpd.gridftpd.init rm -f debian/nordugrid-arc-aris.nordugrid-arc-bdii.init rm -f debian/nordugrid-arc-ldap-infosys.nordugrid-arc-ldap-infosys.init rm -f debian/nordugrid-arc-ldap-infosys.nordugrid-arc-slapd.init rm -f debian/nordugrid-arc-ldap-infosys.nordugrid-arc-inforeg.init rm -f debian/nordugrid-arc-egiis.nordugrid-arc-egiis.init rm -f debian/nordugrid-arc-acix-cache.acix-cache.init rm -f debian/nordugrid-arc-acix-index.acix-index.init if [ -f po/POTFILES.in.save ] ; then \ mv po/POTFILES.in.save po/POTFILES.in ; \ fi find python src -name *.pyc -exec rm {} ';' rm -rf src/services/acix/*/test/_trial_temp dh_autoreconf_clean dh_clean configure-stamp build-stamp install: build-stamp dh_testdir dh_testroot dh_prep $(MAKE) DESTDIR=$(CURDIR)/debian/tmp install find $(CURDIR)/debian/tmp -name \*.la -exec rm -fv '{}' ';' rm -f $(CURDIR)/debian/tmp/usr/lib/arc/lib*.a rm -f $(CURDIR)/debian/tmp/usr/lib/libarcglobusutils.so mv debian/tmp/etc/init.d/a-rex \ debian/nordugrid-arc-arex.a-rex.init mv debian/tmp/etc/init.d/arched \ debian/nordugrid-arc-hed.arched.init mv debian/tmp/etc/init.d/arc-cache-service \ debian/nordugrid-arc-cache-service.arc-cache-service.init mv debian/tmp/etc/init.d/arc-datadelivery-service \ debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.init mv debian/tmp/etc/init.d/gridftpd \ debian/nordugrid-arc-gridftpd.gridftpd.init mv debian/tmp/etc/init.d/nordugrid-arc-bdii \ debian/nordugrid-arc-aris.nordugrid-arc-bdii.init mv debian/tmp/etc/init.d/nordugrid-arc-ldap-infosys \ debian/nordugrid-arc-ldap-infosys.nordugrid-arc-ldap-infosys.init mv debian/tmp/etc/init.d/nordugrid-arc-slapd \ debian/nordugrid-arc-ldap-infosys.nordugrid-arc-slapd.init mv debian/tmp/etc/init.d/nordugrid-arc-inforeg \ debian/nordugrid-arc-ldap-infosys.nordugrid-arc-inforeg.init mv debian/tmp/etc/init.d/nordugrid-arc-egiis \ debian/nordugrid-arc-egiis.nordugrid-arc-egiis.init mv debian/tmp/etc/init.d/acix-cache \ debian/nordugrid-arc-acix-cache.acix-cache.init mv debian/tmp/etc/init.d/acix-index \ debian/nordugrid-arc-acix-index.acix-index.init binary: binary-arch binary-indep binary-arch: install dh_testdir dh_testroot dh_installdirs -a dh_installdocs -a dh_installexamples -a dh_installman -a dh_installlogrotate -a dh_install -a --fail-missing dh_installchangelogs -a dh_installinit -p nordugrid-arc-hed --name arched dh_installinit -p nordugrid-arc-arex --name a-rex dh_installinit -p nordugrid-arc-cache-service --name arc-cache-service dh_installinit -p nordugrid-arc-datadelivery-service --name arc-datadelivery-service dh_installinit -p nordugrid-arc-egiis --name nordugrid-arc-egiis -- start 76 2 3 4 5 . stop 24 0 1 6 . dh_installinit -p nordugrid-arc-gridftpd --name gridftpd dh_perl -a [ -x /usr/bin/dh_python2 ] && dh_python2 -a || dh_pysupport -a dh_lintian -a dh_link -a dh_strip -a dh_compress -a -X .pdf dh_fixperms -a dh_makeshlibs -a -X arc-infoindex-slapd-wrapper.so dh_installdeb -a dh_shlibdeps -a dh_gencontrol -a dh_md5sums -a dh_builddeb -a binary-indep: install dh_testdir dh_testroot dh_installdirs -i dh_installdocs -i dh_installexamples -i dh_installman -i dh_installlogrotate -i dh_install -i --fail-missing dh_installchangelogs -i dh_installinit -p nordugrid-arc-aris --name nordugrid-arc-bdii -- start 76 2 3 4 5 . stop 24 0 1 6 . dh_installinit -p nordugrid-arc-ldap-infosys --name nordugrid-arc-ldap-infosys -- stop 20 0 1 2 3 4 5 6 . dh_installinit -p nordugrid-arc-ldap-infosys --name nordugrid-arc-slapd -- start 75 2 3 4 5 . stop 25 0 1 6 . dh_installinit -p nordugrid-arc-ldap-infosys --name nordugrid-arc-inforeg -- start 75 2 3 4 5 . stop 25 0 1 6 . dh_installinit -p nordugrid-arc-acix-cache --name acix-cache dh_installinit -p nordugrid-arc-acix-index --name acix-index dh_perl -i [ -x /usr/bin/dh_python2 ] && dh_python2 -i || dh_pysupport -i dh_lintian -i dh_link -i dh_compress -i -X .pdf dh_fixperms -i dh_installdeb -i dh_gencontrol -i dh_md5sums -i dh_builddeb -i .PHONY: binary binary-arch binary-indep build build-arch build-indep clean configure install nordugrid-arc-5.4.2/debian/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720020461 xustar000000000000000030 mtime=1513200592.000915833 30 atime=1513200652.915660842 30 ctime=1513200668.631853057 nordugrid-arc-5.4.2/debian/Makefile.in0000644000175000002070000004051613214315720020535 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = debian DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/changelog.deb.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = changelog.deb CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ EXTRA_DIST = changelog compat control copyright rules watch source/format \ $(srcdir)/*.install $(srcdir)/*.docs $(srcdir)/*.dirs \ $(srcdir)/*.default $(srcdir)/*.logrotate \ $(srcdir)/*.postinst $(srcdir)/*.postrm \ $(srcdir)/*.lintian-overrides README.Debian README.source MAINTAINERCLEANFILES = changelog all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign debian/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign debian/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): changelog.deb: $(top_builddir)/config.status $(srcdir)/changelog.deb.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am changelog: changelog.deb cp -p changelog.deb changelog # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-java.install0000644000000000000000000000012412222312540023457 xustar000000000000000027 mtime=1380554080.164649 27 atime=1513200576.987732 30 ctime=1513200668.653853326 nordugrid-arc-5.4.2/debian/nordugrid-arc-java.install0000644000175000002070000000030412222312540023521 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/jni/libjarc.so debian/tmp/usr/share/java/arc.jar debian/tmp/usr/lib/arc/libjavaservice.so debian/tmp/usr/lib/arc/libjavaservice.apd debian/tmp/usr/share/arc/examples/sdk/*.java nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-ldap-infosys.nordugrid-arc-inforeg.default0000644000000000000000000000012312103734016031315 xustar000000000000000027 mtime=1359984654.688808 27 atime=1513200576.969731 29 ctime=1513200668.67385357 nordugrid-arc-5.4.2/debian/nordugrid-arc-ldap-infosys.nordugrid-arc-inforeg.default0000644000175000002070000000023212103734016031360 0ustar00mockbuildmock00000000000000# To enable nordugrid-arc-inforeg, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-datadelivery-service.arc-datadelivery-servi0000644000000000000000000000012312104473522031557 xustar000000000000000026 mtime=1360164690.67083 27 atime=1513200577.037732 30 ctime=1513200668.670853534 nordugrid-arc-5.4.2/debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.default0000644000175000002070000000023512104473522033560 0ustar00mockbuildmock00000000000000# To enable arc-datadelivery-service, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-arex.logrotate0000644000000000000000000000012412754411363024044 xustar000000000000000027 mtime=1471288051.238835 27 atime=1513200576.991732 30 ctime=1513200668.675853595 nordugrid-arc-5.4.2/debian/nordugrid-arc-arex.logrotate0000644000175000002070000000224112754411363024110 0ustar00mockbuildmock00000000000000/var/log/arc/grid-manager.log { missingok compress delaycompress daily rotate 14 create postrotate kill -HUP `cat /var/run/arched-arex.pid 2> /dev/null` 2> /dev/null || true endscript } /var/log/arc/ws-interface.log { missingok compress delaycompress daily rotate 14 postrotate kill -HUP `cat /var/run/arched-arex.pid 2> /dev/null` 2> /dev/null || true endscript create } /var/log/arc/cache-clean.log { missingok compress delaycompress daily rotate 14 create } /var/log/arc/accounting-jura.log { missingok compress delaycompress daily rotate 14 create } /var/log/arc/perfdata/arex.perflog { missingok compress daily rotate 14 create } /var/log/arc/perfdata/data.perflog { missingok compress daily rotate 14 create } /var/log/arc/perfdata/system.perflog { missingok compress daily rotate 14 create } /var/log/arc/perfdata/backends.perflog { missingok compress daily rotate 14 create } /var/log/arc/perfdata/sysinfo.perflog { missingok compress daily rotate 14 create } /var/log/arc/perfdata/submission.perflog { missingok compress daily rotate 14 create } nordugrid-arc-5.4.2/debian/PaxHeaders.7502/changelog.deb.in0000644000000000000000000000012712375052247021441 xustar000000000000000027 mtime=1408521383.769409 30 atime=1513200652.932661049 30 ctime=1513200668.632853069 nordugrid-arc-5.4.2/debian/changelog.deb.in0000644000175000002070000002160712375052247021511 0ustar00mockbuildmock00000000000000nordugrid-arc (@debianversion@-1) unstable; urgency=low * Unofficial build. -- Anders Waananen @DATER@ nordugrid-arc (4.0.0~rc2-1) unstable; urgency=low * 4.0.0 Release Candidate 2 -- Anders Waananen Thu, 07 Nov 2013 11:01:24 +0100 nordugrid-arc (4.0.0~rc1-1) unstable; urgency=low * 4.0.0 Release Candidate 1 -- Anders Waananen Tue, 29 Oct 2013 23:22:57 +0100 nordugrid-arc (3.0.3-1) unstable; urgency=low * 3.0.3 Final Release -- Anders Waananen Fri, 19 Jul 2013 12:05:50 +0200 nordugrid-arc (3.0.2-1) unstable; urgency=low * 3.0.2 Final Release -- Anders Waananen Wed, 12 Jun 2013 15:09:59 +0200 nordugrid-arc (3.0.1-1) unstable; urgency=low * 3.0.1 Final Release -- Anders Waananen Tue, 30 Apr 2013 00:47:43 +0200 nordugrid-arc (3.0.1~rc2-1) unstable; urgency=low * 3.0.1 Release Candidate 2 -- Anders Waananen Fri, 12 Apr 2013 16:56:03 +0200 nordugrid-arc (3.0.1~rc1-1) unstable; urgency=low * 3.0.1 Release Candidate 1 -- Anders Waananen Fri, 12 Apr 2013 13:50:41 +0200 nordugrid-arc (3.0.0-1) unstable; urgency=low * 3.0.0 Final Release -- Anders Waananen Fri, 22 Mar 2013 12:32:51 +0100 nordugrid-arc (3.0.0~rc5-1) unstable; urgency=low * 3.0.0 Release Candidate 5 -- Anders Waananen Wed, 06 Feb 2013 12:12:48 +0100 nordugrid-arc (3.0.0~rc4-1) unstable; urgency=low * 3.0.0 Release Candidate 4 -- Anders Waananen Sat, 02 Feb 2013 01:00:33 +0100 nordugrid-arc (3.0.0~rc3-1) unstable; urgency=low * 3.0.0 Release Candidate 3 -- Anders Waananen Wed, 30 Jan 2013 09:02:17 +0100 nordugrid-arc (3.0.0~rc2-1) unstable; urgency=low * 3.0.0 Release Candidate 2 -- Anders Waananen Mon, 28 Jan 2013 07:55:14 +0100 nordugrid-arc (3.0.0~rc1-1) unstable; urgency=low * 3.0.0 Release Candidate 1 -- Anders Waananen Thu, 06 Dec 2012 22:05:31 +0100 nordugrid-arc (2.0.1-1) unstable; urgency=low * 2.0.1 Final Release -- Anders Waananen Thu, 22 Nov 2012 23:47:19 +0100 nordugrid-arc (2.0.1rc2) unstable; urgency=low * 2.0.1rc2 Release Candidate 2 -- Anders Waananen Thu, 25 Oct 2012 13:00:02 +0200 nordugrid-arc (2.0.1rc1) unstable; urgency=low * 2.0.1rc1 Release Candidate 1 -- Anders Waananen Mon, 27 Aug 2012 13:26:30 +0200 nordugrid-arc (2.0.0-1) unstable; urgency=low * 2.0.0 Final Release -- Mattias Ellert Wed, 23 May 2012 19:27:47 +0200 nordugrid-arc (2.0.0~rc4-1) unstable; urgency=low * 2.0.0 Release Candidate 4 -- Mattias Ellert Mon, 02 Apr 2012 16:06:45 +0200 nordugrid-arc (2.0.0~rc3.1-1) unstable; urgency=low * 2.0.0 Release Candidate 3.1 -- Mattias Ellert Tue, 27 Mar 2012 10:30:23 +0200 nordugrid-arc (2.0.0~rc3-1) unstable; urgency=low * 2.0.0 Release Candidate 3 -- Mattias Ellert Mon, 05 Mar 2012 16:27:32 +0100 nordugrid-arc (2.0.0~rc2-1) unstable; urgency=low * 2.0.0 Release Candidate 2 -- Mattias Ellert Wed, 15 Feb 2012 13:54:17 +0100 nordugrid-arc (1.1.0-1) unstable; urgency=low * 1.1.0 Final Release -- Mattias Ellert Mon, 03 Oct 2011 14:30:45 +0200 nordugrid-arc (1.1.0~rc2-1) unstable; urgency=low * 1.1.0 Release Candidate 2 -- Mattias Ellert Sun, 25 Sep 2011 05:42:22 +0200 nordugrid-arc (1.1.0~rc1-1) unstable; urgency=low * 1.1.0 Release Candidate 1 -- Mattias Ellert Sun, 11 Sep 2011 20:08:33 +0200 nordugrid-arc (1.0.1-1) unstable; urgency=low * 1.0.0 Final Release -- Mattias Ellert Sat, 23 Jul 2011 09:32:53 +0200 nordugrid-arc (1.0.1~rc4-1) unstable; urgency=low * 1.0.1 Release Candidate 4 -- Mattias Ellert Tue, 19 Jul 2011 15:17:05 +0200 nordugrid-arc (1.0.1~rc1-1) unstable; urgency=low * 1.0.1 Release Candidate 1 -- Mattias Ellert Sat, 18 Jun 2011 18:29:09 +0200 nordugrid-arc (1.0.0-1) unstable; urgency=low * 1.0.0 Final Release -- Mattias Ellert Mon, 18 Apr 2011 08:59:55 +0200 nordugrid-arc (1.0.0~b5-1) unstable; urgency=low * 1.0.0 Beta Release 5 -- Mattias Ellert Wed, 06 Apr 2011 14:08:52 +0200 nordugrid-arc (1.0.0~b4-1) unstable; urgency=low * 1.0.0 Beta Release 4 -- Mattias Ellert Wed, 23 Mar 2011 15:19:08 +0100 nordugrid-arc (1.0.0~b3-1) unstable; urgency=low * 1.0.0 Beta Release 3 -- Mattias Ellert Thu, 10 Mar 2011 17:05:28 +0100 nordugrid-arc (1.0.0~b2-1) unstable; urgency=low * 1.0.0 Beta Release 2 -- Mattias Ellert Mon, 07 Mar 2011 05:12:30 +0100 nordugrid-arc (1.0.0~b1-1) unstable; urgency=low * 1.0.0 Beta Release 1 -- Mattias Ellert Mon, 14 Feb 2011 17:19:04 +0100 nordugrid-arc-nox (1.2.1-1) unstable; urgency=low * 1.2.1 Final release -- Mattias Ellert Tue, 21 Dec 2010 22:34:02 +0100 nordugrid-arc-nox (1.2.1~rc2-1) unstable; urgency=low * 1.2.1 Release Candidate 2 -- Mattias Ellert Tue, 21 Dec 2010 09:36:46 +0100 nordugrid-arc-nox (1.2.1~rc1-1) unstable; urgency=low * 1.2.1 Release Candidate 1 -- Mattias Ellert Wed, 08 Dec 2010 15:30:37 +0100 nordugrid-arc-nox (1.2.0-1) unstable; urgency=low * 1.2.0 Final release -- Mattias Ellert Fri, 22 Oct 2010 15:25:07 +0200 nordugrid-arc-nox (1.2.0~rc2-1) unstable; urgency=low * 1.2.0 Release Candidate 2 -- Mattias Ellert Thu, 30 Sep 2010 10:11:14 +0200 nordugrid-arc-nox (1.2.0~rc1-1) unstable; urgency=low * 1.2.0 Release Candidate 1 -- Mattias Ellert Mon, 13 Sep 2010 11:14:51 +0200 nordugrid-arc-nox (1.1.0-1) unstable; urgency=low * 1.1.0 Final release -- Mattias Ellert Wed, 05 May 2010 18:31:59 +0200 nordugrid-arc-nox (1.1.0~rc6-1) unstable; urgency=low * 1.1.0 Release Candidate 6 -- Mattias Ellert Mon, 08 Mar 2010 20:36:00 +0100 nordugrid-arc-nox (1.1.0~rc5-2) unstable; urgency=low * Rebuild for Globus Toolkit 5 -- Mattias Ellert Fri, 26 Feb 2010 16:25:39 +0100 nordugrid-arc-nox (1.1.0~rc5-1) unstable; urgency=low * 1.1.0 Release Candidate 5 -- Mattias Ellert Fri, 26 Feb 2010 15:07:39 +0100 nordugrid-arc-nox (1.1.0~rc4-1) unstable; urgency=low * 1.1.0 release candidate 4 -- Mattias Ellert Wed, 24 Feb 2010 12:34:41 +0100 nordugrid-arc-nox (1.1.0~rc3-1) unstable; urgency=low * 1.1.0 release candidate 3 -- Mattias Ellert Mon, 22 Feb 2010 10:20:27 +0100 nordugrid-arc-nox (1.1.0~rc2-1) unstable; urgency=low * 1.1.0 release candidate 2 -- Mattias Ellert Mon, 15 Feb 2010 19:08:07 +0100 nordugrid-arc-nox (1.1.0~rc1-1) unstable; urgency=low * 1.1.0 release candidate 1 -- Mattias Ellert Thu, 11 Feb 2010 19:48:01 +0100 nordugrid-arc-nox (1.0.0-1) unstable; urgency=low * 1.0.0 Final release -- Mattias Ellert Sun, 29 Nov 2009 23:13:41 +0100 nordugrid-arc-nox (1.0.0~rc7-1) unstable; urgency=low * 1.0.0 release candidate 7 -- Mattias Ellert Thu, 19 Nov 2009 15:30:32 +0100 nordugrid-arc-nox (1.0.0~rc6-1) unstable; urgency=low * 1.0.0 release candidate 6 -- Mattias Ellert Thu, 12 Nov 2009 10:12:45 +0100 nordugrid-arc-nox (1.0.0~rc5-1) unstable; urgency=low * 1.0.0 release candidate 5 -- Mattias Ellert Wed, 04 Nov 2009 16:45:22 +0100 nordugrid-arc1 (0.9.4~rc4-1) unstable; urgency=low * 0.9.3 release candidate 4 -- Mattias Ellert Mon, 26 Oct 2009 23:19:55 +0100 nordugrid-arc1 (0.9.4~rc3-1) unstable; urgency=low * 0.9.3 release candidate 3 -- Mattias Ellert Thu, 22 Oct 2009 19:22:31 +0200 nordugrid-arc1 (0.9.4~rc2-1) unstable; urgency=low * 0.9.3 release candidate 2 -- Mattias Ellert Thu, 15 Oct 2009 09:04:24 +0200 nordugrid-arc1 (0.9.3-1) unstable; urgency=low * Final 0.9.3 release -- Mattias Ellert Sun, 27 Sep 2009 01:27:31 +0200 nordugrid-arc1 (0.9.3~rc3-1) unstable; urgency=low * Initial release -- Mattias Ellert Mon, 5 Nov 2007 10:12:49 -0400 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-aris.nordugrid-arc-bdii.default0000644000000000000000000000012412064022012027112 xustar000000000000000027 mtime=1355817994.146117 27 atime=1513200576.987732 30 ctime=1513200668.668853509 nordugrid-arc-5.4.2/debian/nordugrid-arc-aris.nordugrid-arc-bdii.default0000644000175000002070000000022712064022012027160 0ustar00mockbuildmock00000000000000# To enable nordugrid-arc-bdii, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-client.install0000644000000000000000000000012412442365766024042 xustar000000000000000027 mtime=1418324982.290198 27 atime=1513200576.911731 30 ctime=1513200668.645853228 nordugrid-arc-5.4.2/debian/nordugrid-arc-client.install0000644000175000002070000000250612442365766024112 0ustar00mockbuildmock00000000000000debian/tmp/usr/bin/arccat debian/tmp/usr/bin/arcclean debian/tmp/usr/bin/arccp debian/tmp/usr/bin/arcecho debian/tmp/usr/bin/arcget debian/tmp/usr/bin/arcinfo debian/tmp/usr/bin/arckill debian/tmp/usr/bin/arcls debian/tmp/usr/bin/arcmkdir debian/tmp/usr/bin/arcproxy debian/tmp/usr/bin/arcrename debian/tmp/usr/bin/arcrenew debian/tmp/usr/bin/arcresub debian/tmp/usr/bin/arcresume debian/tmp/usr/bin/arcrm debian/tmp/usr/bin/arcstat debian/tmp/usr/bin/arcsub debian/tmp/usr/bin/arcsync debian/tmp/usr/bin/arctest debian/tmp/etc/arc/client.conf debian/tmp/usr/share/arc/examples/client.conf debian/tmp/usr/share/man/man1/arccat.1 debian/tmp/usr/share/man/man1/arcclean.1 debian/tmp/usr/share/man/man1/arccp.1 debian/tmp/usr/share/man/man1/arcecho.1 debian/tmp/usr/share/man/man1/arcget.1 debian/tmp/usr/share/man/man1/arcinfo.1 debian/tmp/usr/share/man/man1/arckill.1 debian/tmp/usr/share/man/man1/arcls.1 debian/tmp/usr/share/man/man1/arcmkdir.1 debian/tmp/usr/share/man/man1/arcproxy.1 debian/tmp/usr/share/man/man1/arcrename.1 debian/tmp/usr/share/man/man1/arcrenew.1 debian/tmp/usr/share/man/man1/arcresub.1 debian/tmp/usr/share/man/man1/arcresume.1 debian/tmp/usr/share/man/man1/arcrm.1 debian/tmp/usr/share/man/man1/arcstat.1 debian/tmp/usr/share/man/man1/arcsub.1 debian/tmp/usr/share/man/man1/arcsync.1 debian/tmp/usr/share/man/man1/arctest.1 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/source0000644000000000000000000000013213214316034017636 xustar000000000000000030 mtime=1513200668.638853142 30 atime=1513200668.716854096 30 ctime=1513200668.638853142 nordugrid-arc-5.4.2/debian/source/0000755000175000002070000000000013214316034017761 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/debian/source/PaxHeaders.7502/format0000644000000000000000000000012411400724205021125 xustar000000000000000027 mtime=1275308165.297286 27 atime=1513200576.919731 30 ctime=1513200668.638853142 nordugrid-arc-5.4.2/debian/source/format0000644000175000002070000000001411400724205021165 0ustar00mockbuildmock000000000000003.0 (quilt) nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-arex.install0000644000000000000000000000012412755024121023503 xustar000000000000000027 mtime=1471424593.590537 27 atime=1513200576.992732 30 ctime=1513200668.643853203 nordugrid-arc-5.4.2/debian/nordugrid-arc-arex.install0000644000175000002070000000540112755024121023550 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/cache-clean debian/tmp/usr/lib/arc/cache-list debian/tmp/usr/lib/arc/jura debian/tmp/usr/lib/arc/ssmsend debian/tmp/usr/lib/arc/downloader debian/tmp/usr/lib/arc/gm-* debian/tmp/usr/lib/arc/smtp-send debian/tmp/usr/lib/arc/smtp-send.sh debian/tmp/usr/lib/arc/uploader debian/tmp/usr/lib/arc/inputcheck debian/tmp/usr/lib/arc/arc-vomsac-check debian/tmp/usr/lib/arc/arc-config-check debian/tmp/usr/lib/arc/arc-blahp-logger debian/tmp/usr/share/arc/DGAuthplug.py* debian/tmp/usr/share/arc/DGBridgeDataPlugin.py* debian/tmp/usr/share/arc/DGLog2XML.py* debian/tmp/usr/share/arc/cancel-*-job debian/tmp/usr/share/arc/scan-*-job debian/tmp/usr/share/arc/submit-*-job debian/tmp/usr/share/arc/perferator debian/tmp/usr/lib/arc/libarex.so debian/tmp/usr/lib/arc/libarex.apd debian/tmp/usr/share/arc/CEinfo.pl debian/tmp/usr/share/arc/ARC0mod.pm debian/tmp/usr/share/arc/FORKmod.pm debian/tmp/usr/share/arc/Fork.pm debian/tmp/usr/share/arc/SGEmod.pm debian/tmp/usr/share/arc/SGE.pm debian/tmp/usr/share/arc/LL.pm debian/tmp/usr/share/arc/LSF.pm debian/tmp/usr/share/arc/PBS.pm debian/tmp/usr/share/arc/Condor.pm debian/tmp/usr/share/arc/SLURMmod.pm debian/tmp/usr/share/arc/SLURM.pm debian/tmp/usr/share/arc/DGBridge.pm debian/tmp/usr/share/arc/Boinc.pm debian/tmp/usr/share/arc/XmlPrinter.pm debian/tmp/usr/share/arc/InfosysHelper.pm debian/tmp/usr/share/arc/LdifPrinter.pm debian/tmp/usr/share/arc/GLUE2xmlPrinter.pm debian/tmp/usr/share/arc/GLUE2ldifPrinter.pm debian/tmp/usr/share/arc/NGldifPrinter.pm debian/tmp/usr/share/arc/ARC0ClusterInfo.pm debian/tmp/usr/share/arc/ARC1ClusterInfo.pm debian/tmp/usr/share/arc/ConfigCentral.pm debian/tmp/usr/share/arc/GMJobsInfo.pm debian/tmp/usr/share/arc/HostInfo.pm debian/tmp/usr/share/arc/RTEInfo.pm debian/tmp/usr/share/arc/InfoChecker.pm debian/tmp/usr/share/arc/IniParser.pm debian/tmp/usr/share/arc/LRMSInfo.pm debian/tmp/usr/share/arc/Sysinfo.pm debian/tmp/usr/share/arc/LogUtils.pm debian/tmp/usr/share/arc/cancel_common.sh debian/tmp/usr/share/arc/condor_env.pm debian/tmp/usr/share/arc/config_parser.sh debian/tmp/usr/share/arc/configure-*-env.sh debian/tmp/usr/share/arc/submit_common.sh debian/tmp/usr/share/arc/scan_common.sh debian/tmp/usr/share/man/man1/cache-clean.1 debian/tmp/usr/share/man/man1/cache-list.1 debian/tmp/usr/share/man/man1/jura.1 debian/tmp/usr/share/man/man8/gm-*.8 debian/tmp/usr/share/man/man1/arc-config-check.1 debian/tmp/usr/share/man/man8/arc-vomsac-check.8 debian/tmp/usr/share/man/man8/arc-blahp-logger.8 debian/tmp/usr/share/man/man8/a-rex-backtrace-collect.8 debian/tmp/usr/share/arc/ssm/__init__.py* debian/tmp/usr/share/arc/ssm/crypto.py* debian/tmp/usr/share/arc/ssm/ssm2.py* debian/tmp/usr/share/arc/ssm/brokers.py* debian/tmp/usr/share/arc/ssm/sender.cfg debian/tmp/usr/sbin/a-rex-backtrace-collect nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-hed.install0000644000000000000000000000012411605266607023316 xustar000000000000000027 mtime=1310027143.633758 27 atime=1513200576.919731 30 ctime=1513200668.651853301 nordugrid-arc-5.4.2/debian/nordugrid-arc-hed.install0000644000175000002070000000044211605266607023363 0ustar00mockbuildmock00000000000000debian/tmp/usr/sbin/arched debian/tmp/usr/lib/arc/libecho.so debian/tmp/usr/lib/arc/libecho.apd debian/tmp/usr/share/man/man8/arched.8 debian/tmp/usr/share/man/man5/arc.conf.5 debian/tmp/usr/share/arc/profiles debian/tmp/usr/share/arc/examples/config debian/tmp/usr/share/arc/examples/echo nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-ldap-infosys.install0000644000000000000000000000012412103734016025152 xustar000000000000000027 mtime=1359984654.688808 27 atime=1513200576.949731 30 ctime=1513200668.654853338 nordugrid-arc-5.4.2/debian/nordugrid-arc-ldap-infosys.install0000644000175000002070000000036612103734016025224 0ustar00mockbuildmock00000000000000debian/tmp/usr/share/arc/ldap-schema/nordugrid.schema debian/tmp/usr/share/arc/grid-info-soft-register debian/tmp/usr/share/arc/create-slapd-config debian/tmp/usr/share/arc/create-inforeg-config debian/tmp/usr/share/arc/config_parser_compat.sh nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-python.install0000644000000000000000000000012412242425051024063 xustar000000000000000027 mtime=1384786473.373687 27 atime=1513200576.912731 30 ctime=1513200668.659853399 nordugrid-arc-5.4.2/debian/nordugrid-arc-python.install0000644000175000002070000000170512242425051024133 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/python?.?/site-packages/_arc.*so debian/tmp/usr/lib/python?.?/site-packages/arc/__init__.py* debian/tmp/usr/lib/python?.?/site-packages/arc/common.py* debian/tmp/usr/lib/python?.?/site-packages/arc/loader.py* debian/tmp/usr/lib/python?.?/site-packages/arc/message.py* debian/tmp/usr/lib/python?.?/site-packages/arc/communication.py* debian/tmp/usr/lib/python?.?/site-packages/arc/compute.py* debian/tmp/usr/lib/python?.?/site-packages/arc/credential.py* debian/tmp/usr/lib/python?.?/site-packages/arc/data.py* debian/tmp/usr/lib/python?.?/site-packages/arc/delegation.py* debian/tmp/usr/lib/python?.?/site-packages/arc/security.py* debian/tmp/usr/lib/arc/libaccPythonBroker.so debian/tmp/usr/lib/arc/libpythonservice.so debian/tmp/usr/lib/arc/libaccPythonBroker.apd debian/tmp/usr/lib/arc/libpythonservice.apd debian/tmp/usr/share/arc/examples/PythonBroker debian/tmp/usr/share/arc/examples/sdk/*.py debian/tmp/usr/share/arc/examples/echo_python nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-egiis.install0000644000000000000000000000012411564512026023647 xustar000000000000000027 mtime=1305646102.719953 27 atime=1513200576.920731 30 ctime=1513200668.648853265 nordugrid-arc-5.4.2/debian/nordugrid-arc-egiis.install0000644000175000002070000000044711564512026023721 0ustar00mockbuildmock00000000000000debian/tmp/usr/sbin/arc-infoindex-relay debian/tmp/usr/sbin/arc-infoindex-server debian/tmp/usr/share/man/man8/arc-infoindex-relay.8 debian/tmp/usr/share/man/man8/arc-infoindex-server.8 debian/tmp/usr/lib/arc/arc-infoindex-slapd-wrapper.so debian/tmp/usr/lib/arc/arc-infoindex-slapd-wrapper.apd nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-dev.docs0000644000000000000000000000012412350510553022604 xustar000000000000000027 mtime=1403162987.293169 27 atime=1513200576.920731 30 ctime=1513200668.662853436 nordugrid-arc-5.4.2/debian/nordugrid-arc-dev.docs0000644000175000002070000000003112350510553022643 0ustar00mockbuildmock00000000000000src/hed/shc/arcpdp/*.xsd nordugrid-arc-5.4.2/debian/PaxHeaders.7502/copyright0000644000000000000000000000012412303365417020356 xustar000000000000000027 mtime=1393421071.617194 27 atime=1513200576.949731 30 ctime=1513200668.635853106 nordugrid-arc-5.4.2/debian/copyright0000644000175000002070000001102412303365417020421 0ustar00mockbuildmock00000000000000Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: nordugrid-arc Upstream-Contact: contact@nordigrd.org Source: http://download.nordugrid.org/packages/nordugrid-arc/releases Files: * Copyright: 2006-2013 David Cameron Péter Dóbé Mattias Ellert Thomas Frågåt Ali Gholami Michael Glodek Jørgen Beck Hansen Henrik Thostrup Jensen Daniel Johansson Johan Jönemo Dmytro Karpenko Tamás Kazinczy Marek Kočan Aleksandr Konstantinov Balázs Kónya Hajo Nils Krabbenhöft Juha Lento Peter Lundgaard Rosendahl Iván Márton Bjarte Mohn Steffen Möller Zsombor Nagy Aleksei Nazarov Jon Kerr Nilsen Markus Nordén Weizhong Qiang Gábor Rőczei Florido Paganelli Andrii Salnikov Martin Savko Martin Skou Andersen Oxana Smirnova Ferenc Szalai Gábor Szigeti Christian Ulrik Søttrup Adrian Taga Salman Zubair Toor Olli Tourunen Petter Urkedal Anders Wäänänen Thomas Zangerl . University of Copenhagen (Denmark) NORDUnet - Nordic Infrastructure for Research and Education (Denmark) CSC - IT Center for Science Ltd (Finland) University of Lübeck (Germany) NIIFI - National Information Infrastructure Development Institute (Hungary) University of Oslo (Norway) NordForsk (Norway) Pavol Jozef Šafárik University in Košice (Slovakia) Linköping University (Sweden) Lund University (Sweden) Royal Institute of Technology (Sweden) Uppsala University (Sweden) Taras Shevchenko National University of Kyiv (Ukraine) License: Apache-2.0 Files: src/services/a-rex/jura/ssm/* Copyright: 2012 STFC License: Apache-2.0 Files: src/services/acix/core/hashes.py Copyright: Arash Partow - 2002 License: CPL Free use of the General Purpose Hash Function Algorithms Library is permitted under the guidelines and in accordance with the most current version of the Common Public License. http://www.opensource.org/licenses/cpl1.0.php Files: src/external/cJSON/cJSON.c src/external/cJSON/cJSON.h Copyright: 2009 Dave Gamble License: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. License: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at . http://www.apache.org/licenses/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. . On Debian systems, the complete text of the Apache version 2.0 license can be found in /usr/share/common-licenses/Apache-2.0. nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-datadelivery-service.logrotate0000644000000000000000000000012412477513147027225 xustar000000000000000027 mtime=1425970791.470164 27 atime=1513200577.037732 30 ctime=1513200668.676853607 nordugrid-arc-5.4.2/debian/nordugrid-arc-datadelivery-service.logrotate0000644000175000002070000000034412477513147027273 0ustar00mockbuildmock00000000000000/var/log/arc/datadelivery-service.log { missingok compress delaycompress daily rotate 14 create postrotate kill -HUP `cat /var/run/arched-datadelivery-service.pid 2> /dev/null` 2> /dev/null || true endscript } nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-ldap-monitor.install0000644000000000000000000000012311512716067025157 xustar000000000000000027 mtime=1294703671.721346 27 atime=1513200577.037732 29 ctime=1513200668.65585335 nordugrid-arc-5.4.2/debian/nordugrid-arc-ldap-monitor.install0000644000175000002070000000012711512716067025225 0ustar00mockbuildmock00000000000000debian/tmp/usr/share/arc/ldap-monitor/* debian/tmp/usr/share/man/man7/ldap-monitor.7* nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-plugins-needed.install0000644000000000000000000000012412441125533025450 xustar000000000000000027 mtime=1417980763.391041 27 atime=1513200577.036732 30 ctime=1513200668.658853387 nordugrid-arc-5.4.2/debian/nordugrid-arc-plugins-needed.install0000644000175000002070000000342712441125533025523 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libaccARC1.so debian/tmp/usr/lib/arc/libaccBroker.so debian/tmp/usr/lib/arc/libaccCREAM.so debian/tmp/usr/lib/arc/libaccEMIES.so debian/tmp/usr/lib/arc/libaccJobDescriptionParser.so debian/tmp/usr/lib/arc/libaccSER.so debian/tmp/usr/lib/arc/libaccldap.so debian/tmp/usr/lib/arc/test/libaccTEST.so debian/tmp/usr/lib/arc/libarcshclegacy.so debian/tmp/usr/lib/arc/libarcshc.so debian/tmp/usr/lib/arc/libdmcfile.so debian/tmp/usr/lib/arc/libdmchttp.so debian/tmp/usr/lib/arc/libdmcldap.so debian/tmp/usr/lib/arc/libdmcsrm.so debian/tmp/usr/lib/arc/libdmcrucio.so debian/tmp/usr/lib/arc/libdmcacix.so debian/tmp/usr/lib/arc/libidentitymap.so debian/tmp/usr/lib/arc/libarguspdpclient.so debian/tmp/usr/lib/arc/libmcchttp.so debian/tmp/usr/lib/arc/libmccmsgvalidator.so debian/tmp/usr/lib/arc/libmccsoap.so debian/tmp/usr/lib/arc/libmcctcp.so debian/tmp/usr/lib/arc/libmcctls.so debian/tmp/usr/lib/arc/libaccARC1.apd debian/tmp/usr/lib/arc/libaccBroker.apd debian/tmp/usr/lib/arc/libaccCREAM.apd debian/tmp/usr/lib/arc/libaccEMIES.apd debian/tmp/usr/lib/arc/libaccJobDescriptionParser.apd debian/tmp/usr/lib/arc/libaccSER.apd debian/tmp/usr/lib/arc/libaccldap.apd debian/tmp/usr/lib/arc/test/libaccTEST.apd debian/tmp/usr/lib/arc/libarcshclegacy.apd debian/tmp/usr/lib/arc/libarcshc.apd debian/tmp/usr/lib/arc/libdmcfile.apd debian/tmp/usr/lib/arc/libdmchttp.apd debian/tmp/usr/lib/arc/libmccmsgvalidator.apd debian/tmp/usr/lib/arc/libdmcldap.apd debian/tmp/usr/lib/arc/libdmcsrm.apd debian/tmp/usr/lib/arc/libdmcrucio.apd debian/tmp/usr/lib/arc/libdmcacix.apd debian/tmp/usr/lib/arc/libidentitymap.apd debian/tmp/usr/lib/arc/libarguspdpclient.apd debian/tmp/usr/lib/arc/libmcchttp.apd debian/tmp/usr/lib/arc/libmccsoap.apd debian/tmp/usr/lib/arc/libmcctcp.apd debian/tmp/usr/lib/arc/libmcctls.apd nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-plugins-globus.install0000644000000000000000000000012413213507022025512 xustar000000000000000027 mtime=1513000466.155175 27 atime=1513200577.038732 30 ctime=1513200668.656853363 nordugrid-arc-5.4.2/debian/nordugrid-arc-plugins-globus.install0000644000175000002070000000041013213507022025552 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/libarcglobusutils.so.* debian/tmp/usr/lib/arc/libaccARC0.so debian/tmp/usr/lib/arc/libdmcgridftp.so debian/tmp/usr/lib/arc/libaccARC0.apd debian/tmp/usr/lib/arc/libdmcgridftp.apd debian/tmp/usr/lib/arc/arc-lcas debian/tmp/usr/lib/arc/arc-lcmaps nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-cache-service.arc-cache-service.default0000644000000000000000000000012411631673513030475 xustar000000000000000027 mtime=1315403595.900079 27 atime=1513200576.914731 30 ctime=1513200668.670853534 nordugrid-arc-5.4.2/debian/nordugrid-arc-cache-service.arc-cache-service.default0000644000175000002070000000022611631673513030542 0ustar00mockbuildmock00000000000000# To enable arc-cache-service, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-acix-cache.acix-cache.default0000644000000000000000000000012312104476745026505 xustar000000000000000026 mtime=1360166373.56548 27 atime=1513200576.988732 30 ctime=1513200668.665853473 nordugrid-arc-5.4.2/debian/nordugrid-arc-acix-cache.acix-cache.default0000644000175000002070000000021712104476745026553 0ustar00mockbuildmock00000000000000# To enable acix-cache, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-acix-cache.install0000644000000000000000000000012412010704531024523 xustar000000000000000027 mtime=1344506201.236211 27 atime=1513200576.914731 30 ctime=1513200668.640853167 nordugrid-arc-5.4.2/debian/nordugrid-arc-acix-cache.install0000644000175000002070000000007412010704531024571 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/python?.?/site-packages/acix/cacheserver nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-egiis.postinst0000644000000000000000000000012412132443013024053 xustar000000000000000027 mtime=1365919243.657646 27 atime=1513200576.987732 30 ctime=1513200668.679853643 nordugrid-arc-5.4.2/debian/nordugrid-arc-egiis.postinst0000644000175000002070000000235612132443013024126 0ustar00mockbuildmock00000000000000#!/bin/sh set -e if [ "$1" = "configure" ] ; then APP_PROFILE=/etc/apparmor.d/usr.sbin.slapd LOCAL_APP_PROFILE=/etc/apparmor.d/local/usr.sbin.slapd if [ ! -r "$LOCAL_APP_PROFILE" ] ; then # Create the local profile if it does not yet exist tmp=`mktemp` cat < "$tmp" # Site-specific additions and overrides for usr.sbin.slapd. # For more details, please see /etc/apparmor.d/local/README. EOM mkdir -p `dirname $LOCAL_APP_PROFILE` 2>/dev/null || true mv -f "$tmp" "$LOCAL_APP_PROFILE" chmod 644 "$LOCAL_APP_PROFILE" fi grep -q "AppArmor profile for NorduGrid ARC EGIIS" "$LOCAL_APP_PROFILE" || \ cat <> "$LOCAL_APP_PROFILE" # AppArmor profile for NorduGrid ARC EGIIS START #include /etc/bdii/* r, /usr/share/arc/ldap-schema/* r, /{,var/}run/arc/infosys/bdii-slapd.conf r, /{,var/}run/arc/infosys/giis-fifo wk, /usr/lib/arc/arc-infoindex-slapd-wrapper.so rm, /usr/sbin/arc-infoindex-relay ix, /tmp/* rwk, # AppArmor profile for NorduGrid ARC EGIIS END EOM if [ -r "$APP_PROFILE" ] ; then # Reload the profile if aa-status --enabled 2>/dev/null ; then apparmor_parser -r -T -W "$APP_PROFILE" || true fi fi fi #DEBHELPER# nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-datadelivery-service.install0000644000000000000000000000012311621432155026657 xustar000000000000000027 mtime=1313223789.119505 27 atime=1513200577.039732 29 ctime=1513200668.64685324 nordugrid-arc-5.4.2/debian/nordugrid-arc-datadelivery-service.install0000644000175000002070000000014311621432155026723 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libdatadeliveryservice.so debian/tmp/usr/lib/arc/libdatadeliveryservice.apd nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-hed.arched.default0000644000000000000000000000012311512716067024515 xustar000000000000000027 mtime=1294703671.721346 27 atime=1513200576.948731 29 ctime=1513200668.67385357 nordugrid-arc-5.4.2/debian/nordugrid-arc-hed.arched.default0000644000175000002070000000021311512716067024557 0ustar00mockbuildmock00000000000000# To enable arched, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-ldap-infosys.nordugrid-arc-slapd.default0000644000000000000000000000012412103734016030770 xustar000000000000000027 mtime=1359984654.688808 27 atime=1513200576.914731 30 ctime=1513200668.674853583 nordugrid-arc-5.4.2/debian/nordugrid-arc-ldap-infosys.nordugrid-arc-slapd.default0000644000175000002070000000023012103734016031030 0ustar00mockbuildmock00000000000000# To enable nordugrid-arc-slapd, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/compat0000644000000000000000000000012413153454670017635 xustar000000000000000027 mtime=1504598456.143819 27 atime=1513200576.912731 30 ctime=1513200668.633853081 nordugrid-arc-5.4.2/debian/compat0000644000175000002070000000000213153454670017672 0ustar00mockbuildmock000000000000009 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-cache-service.install0000644000000000000000000000012411512716067025254 xustar000000000000000027 mtime=1294703671.721346 27 atime=1513200576.920731 30 ctime=1513200668.644853216 nordugrid-arc-5.4.2/debian/nordugrid-arc-cache-service.install0000644000175000002070000000012511512716067025317 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libcacheservice.so debian/tmp/usr/lib/arc/libcacheservice.apd nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-aris.logrotate0000644000000000000000000000012413024226322024031 xustar000000000000000027 mtime=1481714898.713233 27 atime=1513200576.988732 30 ctime=1513200668.676853607 nordugrid-arc-5.4.2/debian/nordugrid-arc-aris.logrotate0000644000175000002070000000062013024226322024074 0ustar00mockbuildmock00000000000000/var/log/arc/infoprovider.log { missingok compress delaycompress daily rotate 14 create } /var/log/arc/inforegistration.log { missingok compress delaycompress daily rotate 14 create } /var/log/arc/bdii/bdii-update.log { missingok compress daily rotate 30 copytruncate } /var/log/arc/perfdata/infosys.perflog { missingok compress daily rotate 14 create } nordugrid-arc-5.4.2/debian/PaxHeaders.7502/libarccommon3.install0000644000000000000000000000012213124434176022542 xustar000000000000000025 mtime=1498560638.1341 27 atime=1513200576.919731 30 ctime=1513200668.639853155 nordugrid-arc-5.4.2/debian/libarccommon3.install0000644000175000002070000000205213124434176022610 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/libarccompute.so.* debian/tmp/usr/lib/libarccommunication.so.* debian/tmp/usr/lib/libarccommon.so.* debian/tmp/usr/lib/libarccredential.so.* debian/tmp/usr/lib/libarccredentialstore.so.* debian/tmp/usr/lib/libarccrypto.so.* debian/tmp/usr/lib/libarcdata.so.* debian/tmp/usr/lib/libarcdatastaging.so.* debian/tmp/usr/lib/libarcloader.so.* debian/tmp/usr/lib/libarcmessage.so.* debian/tmp/usr/lib/libarcsecurity.so.* debian/tmp/usr/lib/libarcinfosys.so.* debian/tmp/usr/lib/libarcws.so.* debian/tmp/usr/lib/libarcwssecurity.so.* debian/tmp/usr/lib/libarcxmlsec.so.* debian/tmp/usr/lib/arc/libmodcrypto.so debian/tmp/usr/lib/arc/libmodcredential.so debian/tmp/usr/lib/arc/libmodcrypto.apd debian/tmp/usr/lib/arc/libmodcredential.apd debian/tmp/usr/lib/arc/arc-file-access debian/tmp/usr/lib/arc/arc-hostname-resolver debian/tmp/usr/lib/arc/DataStagingDelivery debian/tmp/usr/share/arc/examples/arc.conf.reference debian/tmp/usr/share/arc/schema debian/tmp/usr/share/locale/*/LC_MESSAGES/nordugrid-arc.mo debian/tmp/usr/share/arc/test-jobs/test-job-* nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-gridftpd.install0000644000000000000000000000012412442324223024346 xustar000000000000000027 mtime=1418307731.508846 27 atime=1513200576.949731 30 ctime=1513200668.649853277 nordugrid-arc-5.4.2/debian/nordugrid-arc-gridftpd.install0000644000175000002070000000021512442324223024411 0ustar00mockbuildmock00000000000000debian/tmp/usr/sbin/gridftpd debian/tmp/usr/share/man/man8/gridftpd.8 debian/tmp/usr/lib/arc/fileplugin.* debian/tmp/usr/lib/arc/jobplugin.* nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-dev.install0000644000000000000000000000012412350510553023322 xustar000000000000000027 mtime=1403162987.293169 27 atime=1513200576.987732 30 ctime=1513200668.647853252 nordugrid-arc-5.4.2/debian/nordugrid-arc-dev.install0000644000175000002070000000050012350510553023362 0ustar00mockbuildmock00000000000000debian/tmp/usr/include/arc debian/tmp/usr/lib/lib*.so debian/tmp/usr/bin/wsdl2hed debian/tmp/usr/share/man/man1/wsdl2hed.1 debian/tmp/usr/bin/arcplugin debian/tmp/usr/share/man/man1/arcplugin.1 debian/tmp/usr/lib/pkgconfig/arcbase.pc debian/tmp/usr/share/arc/examples/sdk/*.cpp debian/tmp/usr/share/arc/examples/sdk/*.h nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-acix-index.install0000644000000000000000000000012412010704531024567 xustar000000000000000027 mtime=1344506201.236211 27 atime=1513200576.991732 30 ctime=1513200668.642853191 nordugrid-arc-5.4.2/debian/nordugrid-arc-acix-index.install0000644000175000002070000000007412010704531024635 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/python?.?/site-packages/acix/indexserver nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-acix-index.acix-index.default0000644000000000000000000000012312104476745026615 xustar000000000000000026 mtime=1360166373.56548 27 atime=1513200577.036732 30 ctime=1513200668.666853485 nordugrid-arc-5.4.2/debian/nordugrid-arc-acix-index.acix-index.default0000644000175000002070000000021712104476745026663 0ustar00mockbuildmock00000000000000# To enable acix-index, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/README.source0000644000000000000000000000012411324104700020566 xustar000000000000000027 mtime=1263569344.388956 27 atime=1513200576.991732 30 ctime=1513200668.686853729 nordugrid-arc-5.4.2/debian/README.source0000644000175000002070000000130611324104700020633 0ustar00mockbuildmock00000000000000The source code presented as .orig.tar.gz to Debian is functionally complete. The sources of all images shall be distributed with it. There is however the possibility that some files do exist in the subversion repository, that are not distributed further. This shall be considered a regular consequence of the development process, i.e. the Debian packages are not expected to be built from the development branch. The upstream developers, who are also maintaining this package, invite everyone to contribute actively with the future development of the ARC middleware and suggest to inspect http://svn.nordugrid.org for first steps. -- Steffen Moeller Thu, 14 Jan 2010 12:13:30 +0000 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/README.Debian0000644000000000000000000000012413065017636020467 xustar000000000000000027 mtime=1490296734.511814 27 atime=1513200576.969731 30 ctime=1513200668.685853717 nordugrid-arc-5.4.2/debian/README.Debian0000644000175000002070000000361113065017636020535 0ustar00mockbuildmock00000000000000nordugrid-arc ============= Open Source, Linux distributions and grid computing --------------------------------------------------- Grid Computing is all about having a community that is full of trust in the integrity of its contributors. Everything is logged - but you would not expect anyone intentionally evil amongst yourselves. The Debian Society is a forerunner in the formal representation of such collaborative floks of individuals and much respected throughout the scientific community for its achievement that maintains most of today's computational grids. The development of this second generation of the Advanced Resource Connector was mostly funded by the EU project "KnowARC". The aim to appeal to smaller and/or more heterogeneous communities than the traditional High Energy Physics is key to the project. It was foreseen from the beginnings, to disseminate the development to the Linux community. The developers of ARC are found on the mailing list of the NorduGrid (http://www.nordugrid.org) where the project has its roots. You may also be interested in the wiki pages (http://wiki.nordugrid.org) for a summary of first steps for you to adopt the technology. If you are interested to prepare your own Campus Grid or when working in a larger company with CPU time to harvest for your computations, or if you just feel to join with your own cluster, then please join in. Comments on the packaging ------------------------- ARC-1 was developed with Debian in mind. No special adaptations were required. For PDF generation, doxygen needs the texlive-extra-utils, texlive-latex-base, texlive-latex-recommended and texlive-latex-extra package, which might come as a surprise to some. -- Mattias Ellert, Steffen Möller, Balazs Konya, Farid Ould-Saada, Anders Wäänänen, Aleksander Konstantinov, Peter Stefan and all other contributors of the ARC grid middleware. Wed, 09 Dec 2009 13:34:52 +0100 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-misc-utils.install0000644000000000000000000000012312100452774024637 xustar000000000000000027 mtime=1359107580.567206 27 atime=1513200576.991732 29 ctime=1513200668.65585335 nordugrid-arc-5.4.2/debian/nordugrid-arc-misc-utils.install0000644000175000002070000000046612100452774024713 0ustar00mockbuildmock00000000000000debian/tmp/usr/bin/arcemiestest debian/tmp/usr/bin/arcperftest debian/tmp/usr/bin/arcwsrf debian/tmp/usr/bin/saml_assertion_init debian/tmp/usr/share/man/man1/arcemiestest.1 debian/tmp/usr/share/man/man1/arcperftest.1 debian/tmp/usr/share/man/man1/arcwsrf.1 debian/tmp/usr/share/man/man1/saml_assertion_init.1 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-arex.a-rex.default0000644000000000000000000000012411551506471024502 xustar000000000000000027 mtime=1302760761.856347 27 atime=1513200576.988732 30 ctime=1513200668.667853497 nordugrid-arc-5.4.2/debian/nordugrid-arc-arex.a-rex.default0000644000175000002070000000021211551506471024542 0ustar00mockbuildmock00000000000000# To enable a-rex, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/watch0000644000000000000000000000012312574532541017457 xustar000000000000000027 mtime=1441969505.680553 27 atime=1513200577.037732 29 ctime=1513200668.63785313 nordugrid-arc-5.4.2/debian/watch0000644000175000002070000000042412574532541017525 0ustar00mockbuildmock00000000000000version = 3 http://download.nordugrid.org/packages/nordugrid-arc/releases/(\d\.\d\.\d)/src/nordugrid-arc-(\d\.\d\.\d)\.tar\.gz debian uupdate #opts=pasv ftp://download.nordugrid.org/packages/nordugrid-arc/releases/([\d\.]+)/src/nordugrid-arc-([\d\.]+)\.tar\.gz debian uupdate nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-egiis.nordugrid-arc-egiis.default0000644000000000000000000000012412064022012027445 xustar000000000000000027 mtime=1355817994.146117 27 atime=1513200577.039732 30 ctime=1513200668.671853546 nordugrid-arc-5.4.2/debian/nordugrid-arc-egiis.nordugrid-arc-egiis.default0000644000175000002070000000023012064022012027505 0ustar00mockbuildmock00000000000000# To enable nordugrid-arc-egiis, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-gridftpd.gridftpd.default0000644000000000000000000000012411551506471026135 xustar000000000000000027 mtime=1302760761.856347 27 atime=1513200576.914731 30 ctime=1513200668.672853558 nordugrid-arc-5.4.2/debian/nordugrid-arc-gridftpd.gridftpd.default0000644000175000002070000000021511551506471026200 0ustar00mockbuildmock00000000000000# To enable gridftpd, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-arex.dirs0000644000000000000000000000012412171515460023001 xustar000000000000000027 mtime=1374067504.812872 27 atime=1513200576.991732 30 ctime=1513200668.663853448 nordugrid-arc-5.4.2/debian/nordugrid-arc-arex.dirs0000644000175000002070000000006212171515460023044 0ustar00mockbuildmock00000000000000var/spool/arc var/spool/arc/ssm var/spool/arc/urs nordugrid-arc-5.4.2/debian/PaxHeaders.7502/control0000644000000000000000000000012413165641137020031 xustar000000000000000027 mtime=1507279455.968913 27 atime=1513200576.969731 30 ctime=1513200668.634853093 nordugrid-arc-5.4.2/debian/control0000644000175000002070000004464113165641137020107 0ustar00mockbuildmock00000000000000Source: nordugrid-arc Section: net Priority: optional Maintainer: Mattias Ellert Uploaders: Anders Waananen Build-Depends: debhelper (>= 9), dh-autoreconf, autopoint, libxml2-dev (>= 2.4.0), libssl-dev, libglibmm-2.4-dev, python-dev, libltdl-dev, libldap2-dev, uuid-dev, libcppunit-dev, pkg-config, libdb++-dev, libxmlsec1-dev (>=1.2.4), libglobus-common-dev, libglobus-gssapi-gsi-dev, libglobus-ftp-client-dev, libglobus-ftp-control-dev, dh-python | python-support, default-jdk | openjdk-6-jdk | java-gcj-compat-dev, junit, openssl, swig, libnss3-dev, pylint, libxml-simple-perl, libdbi-perl, libsqlite3-dev, libinline-python-perl | dash, libldns-dev Standards-Version: 4.0.0 Homepage: http://www.nordugrid.org Package: libarccommon3 Provides: nordugrid-arc-libs, nordugrid-arc1, nordugrid-arc-nox Replaces: nordugrid-arc-libs (<< 1.0.0~), nordugrid-arc1 (<< 1.0.0~), nordugrid-arc-nox (<< 1.3.0~), nordugrid-arc-hed (<< 1.0.1~rc2~), nordugrid-arc-arex (<< 2.0.1~), libarccommon0, libarccommon1, libarccommon2 Conflicts: nordugrid-arc-libs (<< 1.0.0~), nordugrid-arc1 (<< 1.0.0~), nordugrid-arc1-janitor (<< 1.0.0~), nordugrid-arc-nox (<< 1.3.0~), nordugrid-arc-httpsd (<< 0.8.2~), nordugrid-arc-logger-server (<< 0.8.2~), nordugrid-arc-nox-charon (<< 1.3.0~), nordugrid-arc-nox-chelonia (<< 1.3.0~), nordugrid-arc-nox-compiler (<< 1.3.0~), nordugrid-arc-nox-delegation (<< 1.3.0~), nordugrid-arc-nox-hopi (<< 1.3.0~), nordugrid-arc-nox-isis (<< 1.3.0~), nordugrid-arc-nox-janitor (<< 1.3.0~), nordugrid-arc-nox-paul (<< 1.3.0~), nordugrid-arc-nox-saml2sp (<< 1.3.0~), nordugrid-arc-nox-slcs (<< 1.3.0~), nordugrid-arc-chelonia (<< 2.0.0~), nordugrid-arc-hopi (<< 2.0.0~), nordugrid-arc-isis (<< 2.0.0~), nordugrid-arc-janitor (<< 2.0.0~), nordugrid-arc-doxygen (<< 4.0.0~), nordugrid-arc-arcproxyalt (<< 5.3.0~rc1~) Breaks: nordugrid-arc-hed (<< 1.0.1~rc2~), nordugrid-arc-arex (<< 2.0.1~), libarccommon0, libarccommon1, libarccommon2 Architecture: any Section: libs Depends: ${shlibs:Depends}, ${misc:Depends} Description: ARC Grid middleware NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . The Advanced Resource Connector (ARC) is a software suite that makes use of Grid technologies to federate heterogeneous computing and data resources across administrative domains. The resulting infrastructure is commonly referred to as a computational or a data Grid, depending on prevalence of CPU-intensive or data-intensive applications. ARC is developed and supported by the NorduGrid Consortium. . Just like the Web, ARC has its roots in the IT infrastructure that was erected to analyze data collected by high energy physics experiments at CERN. While first versions of ARC made heavy use of the Globus Toolkit, the current release does not require Globus as such, though maintains backward compatibility. Package: nordugrid-arc-client Provides: nordugrid-arc1-client, nordugrid-arc-nox-client Replaces: nordugrid-arc1-client (<< 1.0.0~), nordugrid-arc-nox-client (<< 1.3.0~) Conflicts: nordugrid-arc1-client (<< 1.0.0~), nordugrid-arc-nox-client (<< 1.3.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}) Description: ARC command line interface NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . For the regular user of any ARC-based computational Grid, this client package contains (or depends on) all packages that are needed to submit jobs, query their status and retrieve results. Basic Grid file manipulation tools are also offered. Package: nordugrid-arc-hed Provides: nordugrid-arc1-server, nordugrid-arc-nox-hed Replaces: nordugrid-arc1-server (<< 1.0.0~), nordugrid-arc-nox-hed (<< 1.3.0~), nordugrid-arc-nox-client (<< 1.3.0~), nordugrid-arc-client (<< 1.0.1~rc2~) Conflicts: nordugrid-arc1-server (<< 1.0.0~), nordugrid-arc-nox-hed (<< 1.3.0~) Breaks: nordugrid-arc-nox-client (<< 1.3.0~), nordugrid-arc-client (<< 1.0.1~rc2~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}), lsb-base (>= 3.0-6) Recommends: nordugrid-arc-python Description: ARC Hosting Environment Daemon NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . The ARC Hosting Environment Daemon (HED). This daemon is a container for ARC services. Package: nordugrid-arc-gridftpd Provides: nordugrid-arc-nox-gridftp-server Replaces: nordugrid-arc-nox-gridftp-server (<< 1.3.0~) Conflicts: nordugrid-arc-nox-gridftp-server (<< 1.3.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}), lsb-base (>= 3.0-6) Description: ARC GridFTP server NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains the ARC GridFTP server which is implemented using original Globus tools, but extended with a plugin framework and other functionalities. . Current plugins include: . fileplugin : Standard GridFTP server based on Globus globus-ftp-control jobplugin : Classical ARC job submission interface Package: nordugrid-arc-cache-service Provides: nordugrid-arc-nox-cache-service Replaces: nordugrid-arc-nox-cache-service (<< 1.3.0~) Conflicts: nordugrid-arc-nox-cache-service (<< 1.3.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}), nordugrid-arc-hed (= ${binary:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}), lsb-base (>= 3.0-6) Description: ARC cache service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains the ARC cache service. It provides a way to perform some operations on the ARC Compute Element cache remotely. It can be especially helpful for data-driven job scheduling within pilot job frameworks. Package: nordugrid-arc-datadelivery-service Provides: nordugrid-arc-nox-datadelivery-service Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}), nordugrid-arc-hed (= ${binary:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}), lsb-base (>= 3.0-6) Description: ARC data delivery service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains the ARC data delivery service. Package: nordugrid-arc-ldap-infosys Provides: nordugrid-arc-infosys-ldap, nordugrid-arc-nox-ldap-infosys, nordugrid-arc-infoindex Replaces: nordugrid-arc-infosys-ldap (<< 1.0.0~), nordugrid-arc-nox-ldap-infosys (<< 1.3.0~), nordugrid-arc-infoindex (<< 0.8.2~), nordugrid-arc-aris (<< 3.0.0~) Conflicts: nordugrid-arc-infosys-ldap (<< 1.0.0~), nordugrid-arc-nox-ldap-infosys (<< 1.3.0~), nordugrid-arc-infoindex (<< 0.8.2~), nordugrid-arc-aris (<< 3.0.0~) Architecture: all Depends: ${misc:Depends}, slapd, glue-schema (>= 2.0.10), bdii, lsb-base (>= 3.0-6) Description: ARC LDAP infosys service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains the LDAP based information system for ARC. This package is not self-contained. it should be pulled in by either nordugrid-arc-aris to be a part of a local information system or by nordugrid-arc-egiis to be a part of a EGIIS service. Package: nordugrid-arc-aris Replaces: nordugrid-arc-infosys-ldap (<< 1.0.0~), nordugrid-arc-nox-ldap-infosys (<< 1.3.0~), nordugrid-arc-infoindex (<< 0.8.2~) Conflicts: nordugrid-arc-infosys-ldap (<< 1.0.0~), nordugrid-arc-nox-ldap-infosys (<< 1.3.0~), nordugrid-arc-infoindex (<< 0.8.2~) Architecture: all Depends: ${misc:Depends}, ${perl:Depends}, nordugrid-arc-ldap-infosys (= ${binary:Version}), bdii, lsb-base (>= 3.0-6) Description: ARC local information system NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains the local information system for ARC. This package is not self-contained but is closely connected to nordugrid-arc-arex. Package: nordugrid-arc-egiis Provides: nordugrid-arc-nox-giis Replaces: nordugrid-arc-nox-giis (<< 1.3.0~) Conflicts: nordugrid-arc-nox-giis (<< 1.3.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, nordugrid-arc-ldap-infosys (= ${source:Version}), lsb-base (>= 3.0-6) Description: ARC EGIIS service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . The EGIIS is the Information Index Service used by ARC, This service is used to set up a LDAP-based server that local information systems (ARIS) register to. Package: nordugrid-arc-ldap-monitor Provides: nordugrid-arc-monitor Replaces: nordugrid-arc-monitor (<< 1.0.0~) Conflicts: nordugrid-arc-monitor (<< 1.0.0~) Architecture: all Depends: ${misc:Depends}, php-common | php5-common, php-ldap | php5-ldap, php-gd | php5-gd Description: ARC LDAP monitor service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains the LDAP monitor system for ARC. This package is self-contained. It is used to set up a Web-based monitor which pulls information from ARIS and displays it in a readable manner. Package: nordugrid-arc-ws-monitor Architecture: all Depends: ${misc:Depends} Description: ARC WS monitor service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains the WS monitor system for ARC. This package is self-contained. It is used to set up a Web-based monitor which pulls information from ISIS and displays it in a readable manner. Package: nordugrid-arc-arex Provides: nordugrid-arc-grid-manager, nordugrid-arc-server, nordugrid-arc1-arex, nordugrid-arc-nox-arex Replaces: nordugrid-arc-grid-manager (<< 1.0.0~), nordugrid-arc-server (<< 1.0.0~), nordugrid-arc1-arex (<< 1.0.0~), nordugrid-arc-nox-arex (<< 1.3.0~) Conflicts: nordugrid-arc-grid-manager (<< 1.0.0~), nordugrid-arc-server (<< 1.0.0~), nordugrid-arc1-arex (<< 1.0.0~), nordugrid-arc-nox-arex (<< 1.3.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, ${perl:Depends}, libarccommon3 (= ${binary:Version}), nordugrid-arc-hed (= ${binary:Version}), nordugrid-arc-ldap-infosys (= ${source:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}), nordugrid-arc-python (= ${binary:Version}), libxml-simple-perl, python, python-stompy, python-ldap, lsb-base (>= 3.0-6) Description: ARC Remote EXecution service NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . The ARC Resource-coupled EXecution service (AREX) provides a service for the execution of compute jobs - also known as a Compute Element. A-REX also handles transfer of input and output data, as well as input data caching to minimise data traffic. Package: nordugrid-arc-plugins-needed Provides: nordugrid-arc1-plugins-base, nordugrid-arc-nox-plugins-base, nordugrid-arc-plugins-base Replaces: nordugrid-arc1-plugins-base (<< 1.0.0~), nordugrid-arc-nox-plugins-base (<< 1.3.0~), nordugrid-arc-plugins-base (<< 1.0.0~b2~) Conflicts: nordugrid-arc1-plugins-base (<< 1.0.0~), nordugrid-arc-nox-plugins-base (<< 1.3.0~), nordugrid-arc-nox-plugins-globus (<< 1.3.0~), nordugrid-arc-plugins-base (<< 1.0.0~b2~), nordugrid-arc-plugins-globus (<< 1.0.0~b3~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}) Description: ARC base plugins NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . ARC base plugins. This includes the Message Chain Components (MCCs) and Data Management Components (DMCs). Package: nordugrid-arc-plugins-globus Provides: nordugrid-arc1-plugins-globus, nordugrid-arc-nox-plugins-globus Replaces: nordugrid-arc1-plugins-globus (<< 1.0.0~), nordugrid-arc-nox-plugins-globus (<< 1.3.0~) Conflicts: nordugrid-arc1-plugins-globus (<< 1.0.0~), nordugrid-arc-nox-plugins-globus (<< 1.3.0~), nordugrid-arc-plugins-needed (<< 1.0.1-2~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}) Description: ARC Globus plugins NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . ARC Globus plugins. This includes the Globus dependent Data Management Components (DMCs). These plugins are needed for most traditional Grid infrastructures. Package: nordugrid-arc-acix-core Architecture: all Depends: ${misc:Depends}, ${python:Depends}, python, python-twisted-core, python-twisted-web, python-openssl XB-Python-Version: ${python:Versions} Description: ARC cache index - core components NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . Core components of the ARC Cache Index (ACIX). Package: nordugrid-arc-acix-cache Architecture: all Depends: ${misc:Depends}, ${python:Depends}, python, python-twisted-core, python-twisted-web, python-openssl, nordugrid-arc-acix-core (= ${binary:Version}), lsb-base (>= 3.0-6) XB-Python-Version: ${python:Versions} Description: ARC cache index - cache server NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . Cache server component of the ARC Cache Index (ACIX), usually installed alongside A-REX. This component collects information on the content of an A-REX cache. Package: nordugrid-arc-acix-index Architecture: all Depends: ${misc:Depends}, ${python:Depends}, python, python-twisted-core, python-twisted-web, python-openssl, nordugrid-arc-acix-core (= ${binary:Version}), lsb-base (>= 3.0-6) XB-Python-Version: ${python:Versions} Description: ARC cache index - index server NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . Index server component of the ARC Cache Index (ACIX), usually installed independently of any A-REX installation. This component pulls cache content from cache servers and can be queried by clients for the location of cached files. Package: nordugrid-arc-dev Provides: nordugrid-arc-libs-devel, nordugrid-arc1-dev, nordugrid-arc-nox-dev Replaces: nordugrid-arc-libs-devel (<< 1.0.0~), nordugrid-arc1-dev (<< 1.0.0~), nordugrid-arc-nox-dev (<< 1.3.0~) Conflicts: nordugrid-arc-libs-devel (<< 1.0.0~), nordugrid-arc1-dev (<< 1.0.0~), nordugrid-arc-nox-dev (<< 1.3.0~) Architecture: any Section: libdevel Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}), libxml2-dev (>= 2.4.0), libssl-dev, libglibmm-2.4-dev Description: ARC development files NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . Header files and libraries needed to develop applications using ARC. Package: nordugrid-arc-python Provides: nordugrid-arc1-python, nordugrid-arc-nox-python Replaces: nordugrid-arc1-python (<< 1.0.0~), nordugrid-arc-nox-python (<< 1.3.0~) Conflicts: nordugrid-arc1-python (<< 1.0.0~), nordugrid-arc-nox-python (<< 1.3.0~) Architecture: any Section: python Depends: ${shlibs:Depends}, ${misc:Depends}, ${python:Depends}, libarccommon3 (= ${binary:Version}) XB-Python-Version: ${python:Versions} Description: ARC Python wrapper NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . Python bindings for ARC. Package: nordugrid-arc-java Provides: nordugrid-arc1-java, nordugrid-arc-nox-java Replaces: nordugrid-arc1-java (<< 1.0.0~), nordugrid-arc-nox-java (<< 1.3.0~) Conflicts: nordugrid-arc1-java (<< 1.0.0~), nordugrid-arc-nox-java (<< 1.3.0~) Architecture: any Section: java Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}), default-jre | java6-runtime | java5-runtime Description: ARC Java wrapper NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . Java bindings for ARC. Package: nordugrid-arc-gridmap-utils Architecture: all Depends: libnet-ldap-perl, libxml-dom-perl, libcrypt-ssleay-perl, libsoap-lite-perl, libcrypt-openssl-x509-perl, ${perl:Depends}, ${misc:Depends} Recommends: cron Description: NorduGrid authorization tools NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains the ARC authorization machinery. A prominent tool distributed in this package is the nordugridmap script to map Grid user identities to local Linux accounts. Package: nordugrid-arc-ca-utils Architecture: all Depends: fetch-crl, ${misc:Depends} Description: NorduGrid authentication tools NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . The nordugrid-arc-ca-utils packages has been obsoleted. The functionality of the grid-update-crls tool is provided by the fetch-crl tool in the fetch-crl package. Package: nordugrid-arc-misc-utils Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon3 (= ${binary:Version}) Description: ARC grid middleware - Misc tools NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). . This package contains utilities for various tasks including testing. The package is usually not required by users or sysadmins but is mainly for developers. nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-aris.postrm0000644000000000000000000000012412132441403023353 xustar000000000000000027 mtime=1365918467.545496 27 atime=1513200576.969731 30 ctime=1513200668.681853668 nordugrid-arc-5.4.2/debian/nordugrid-arc-aris.postrm0000644000175000002070000000144612132441403023425 0ustar00mockbuildmock00000000000000#!/bin/sh set -e #DEBHELPER# if [ "$1" = "purge" ] ; then APP_PROFILE=/etc/apparmor.d/usr.sbin.slapd LOCAL_APP_PROFILE=/etc/apparmor.d/local/usr.sbin.slapd if [ -r "$LOCAL_APP_PROFILE" ] ; then sed '/# AppArmor profile for NorduGrid ARC ARIS START/,/# AppArmor profile for NorduGrid ARC ARIS END/d' -i "$LOCAL_APP_PROFILE" fi if [ ! -r "$APP_PROFILE" ] ; then if [ -r "$LOCAL_APP_PROFILE" ] ; then if [ -z "`sed '/^#/d' $LOCAL_APP_PROFILE`" ] ; then rm -f "$LOCAL_APP_PROFILE" || true fi fi rmdir /etc/apparmor.d/local 2>/dev/null || true rmdir /etc/apparmor.d 2>/dev/null || true fi if [ -r "$APP_PROFILE" ] ; then # Reload the profile if aa-status --enabled 2>/dev/null ; then apparmor_parser -r -T -W "$APP_PROFILE" || true fi fi fi nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-ca-utils.lintian-overrides0000644000000000000000000000012411647474414026272 xustar000000000000000027 mtime=1319008524.101213 27 atime=1513200577.037732 30 ctime=1513200668.683853693 nordugrid-arc-5.4.2/debian/nordugrid-arc-ca-utils.lintian-overrides0000644000175000002070000000012311647474414026333 0ustar00mockbuildmock00000000000000# This package is intentionally empty nordugrid-arc-ca-utils: empty-binary-package nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-ws-monitor.install0000644000000000000000000000012411512716067024671 xustar000000000000000027 mtime=1294703671.721346 27 atime=1513200576.912731 30 ctime=1513200668.660853411 nordugrid-arc-5.4.2/debian/nordugrid-arc-ws-monitor.install0000644000175000002070000000012311512716067024732 0ustar00mockbuildmock00000000000000debian/tmp/usr/share/arc/ws-monitor/* debian/tmp/usr/share/man/man7/ws-monitor.7* nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-aris.install0000644000000000000000000000012413153454670023513 xustar000000000000000027 mtime=1504598456.143819 27 atime=1513200576.991732 30 ctime=1513200668.644853216 nordugrid-arc-5.4.2/debian/nordugrid-arc-aris.install0000644000175000002070000000033013153454670023554 0ustar00mockbuildmock00000000000000debian/tmp/usr/share/arc/create-bdii-config debian/tmp/usr/share/arc/glue-generator.pl debian/tmp/usr/share/arc/glite-info-provider-ldap debian/tmp/usr/share/arc/PerfData.pl debian/tmp/usr/share/arc/ConfigParser.pm nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-aris.postinst0000644000000000000000000000012412132441403023712 xustar000000000000000027 mtime=1365918467.545496 27 atime=1513200577.039732 30 ctime=1513200668.678853631 nordugrid-arc-5.4.2/debian/nordugrid-arc-aris.postinst0000644000175000002070000000223512132441403023761 0ustar00mockbuildmock00000000000000#!/bin/sh set -e if [ "$1" = "configure" ] ; then APP_PROFILE=/etc/apparmor.d/usr.sbin.slapd LOCAL_APP_PROFILE=/etc/apparmor.d/local/usr.sbin.slapd if [ ! -r "$LOCAL_APP_PROFILE" ] ; then # Create the local profile if it does not yet exist tmp=`mktemp` cat < "$tmp" # Site-specific additions and overrides for usr.sbin.slapd. # For more details, please see /etc/apparmor.d/local/README. EOM mkdir -p `dirname $LOCAL_APP_PROFILE` 2>/dev/null || true mv -f "$tmp" "$LOCAL_APP_PROFILE" chmod 644 "$LOCAL_APP_PROFILE" fi grep -q "AppArmor profile for NorduGrid ARC ARIS" "$LOCAL_APP_PROFILE" || \ cat <> "$LOCAL_APP_PROFILE" # AppArmor profile for NorduGrid ARC ARIS START #include /etc/bdii/* r, /usr/share/arc/ldap-schema/* r, /{,var/}run/arc/infosys/bdii-slapd.conf r, /var/lib/arc/bdii/db/** rwk, /{,var/}run/arc/bdii/db/* w, # AppArmor profile for NorduGrid ARC ARIS END EOM if [ -r "$APP_PROFILE" ] ; then # Reload the profile if aa-status --enabled 2>/dev/null ; then apparmor_parser -r -T -W "$APP_PROFILE" || true fi fi fi #DEBHELPER# nordugrid-arc-5.4.2/debian/PaxHeaders.7502/changelog0000644000000000000000000000013213214316014020263 xustar000000000000000030 mtime=1513200652.936661098 30 atime=1513200658.416728122 30 ctime=1513200668.633853081 nordugrid-arc-5.4.2/debian/changelog0000644000175000002070000002162513214316014020337 0ustar00mockbuildmock00000000000000nordugrid-arc (5.4.2-1) unstable; urgency=low * Unofficial build. -- Anders Waananen Wed, 13 Dec 2017 22:30:47 +0100 nordugrid-arc (4.0.0~rc2-1) unstable; urgency=low * 4.0.0 Release Candidate 2 -- Anders Waananen Thu, 07 Nov 2013 11:01:24 +0100 nordugrid-arc (4.0.0~rc1-1) unstable; urgency=low * 4.0.0 Release Candidate 1 -- Anders Waananen Tue, 29 Oct 2013 23:22:57 +0100 nordugrid-arc (3.0.3-1) unstable; urgency=low * 3.0.3 Final Release -- Anders Waananen Fri, 19 Jul 2013 12:05:50 +0200 nordugrid-arc (3.0.2-1) unstable; urgency=low * 3.0.2 Final Release -- Anders Waananen Wed, 12 Jun 2013 15:09:59 +0200 nordugrid-arc (3.0.1-1) unstable; urgency=low * 3.0.1 Final Release -- Anders Waananen Tue, 30 Apr 2013 00:47:43 +0200 nordugrid-arc (3.0.1~rc2-1) unstable; urgency=low * 3.0.1 Release Candidate 2 -- Anders Waananen Fri, 12 Apr 2013 16:56:03 +0200 nordugrid-arc (3.0.1~rc1-1) unstable; urgency=low * 3.0.1 Release Candidate 1 -- Anders Waananen Fri, 12 Apr 2013 13:50:41 +0200 nordugrid-arc (3.0.0-1) unstable; urgency=low * 3.0.0 Final Release -- Anders Waananen Fri, 22 Mar 2013 12:32:51 +0100 nordugrid-arc (3.0.0~rc5-1) unstable; urgency=low * 3.0.0 Release Candidate 5 -- Anders Waananen Wed, 06 Feb 2013 12:12:48 +0100 nordugrid-arc (3.0.0~rc4-1) unstable; urgency=low * 3.0.0 Release Candidate 4 -- Anders Waananen Sat, 02 Feb 2013 01:00:33 +0100 nordugrid-arc (3.0.0~rc3-1) unstable; urgency=low * 3.0.0 Release Candidate 3 -- Anders Waananen Wed, 30 Jan 2013 09:02:17 +0100 nordugrid-arc (3.0.0~rc2-1) unstable; urgency=low * 3.0.0 Release Candidate 2 -- Anders Waananen Mon, 28 Jan 2013 07:55:14 +0100 nordugrid-arc (3.0.0~rc1-1) unstable; urgency=low * 3.0.0 Release Candidate 1 -- Anders Waananen Thu, 06 Dec 2012 22:05:31 +0100 nordugrid-arc (2.0.1-1) unstable; urgency=low * 2.0.1 Final Release -- Anders Waananen Thu, 22 Nov 2012 23:47:19 +0100 nordugrid-arc (2.0.1rc2) unstable; urgency=low * 2.0.1rc2 Release Candidate 2 -- Anders Waananen Thu, 25 Oct 2012 13:00:02 +0200 nordugrid-arc (2.0.1rc1) unstable; urgency=low * 2.0.1rc1 Release Candidate 1 -- Anders Waananen Mon, 27 Aug 2012 13:26:30 +0200 nordugrid-arc (2.0.0-1) unstable; urgency=low * 2.0.0 Final Release -- Mattias Ellert Wed, 23 May 2012 19:27:47 +0200 nordugrid-arc (2.0.0~rc4-1) unstable; urgency=low * 2.0.0 Release Candidate 4 -- Mattias Ellert Mon, 02 Apr 2012 16:06:45 +0200 nordugrid-arc (2.0.0~rc3.1-1) unstable; urgency=low * 2.0.0 Release Candidate 3.1 -- Mattias Ellert Tue, 27 Mar 2012 10:30:23 +0200 nordugrid-arc (2.0.0~rc3-1) unstable; urgency=low * 2.0.0 Release Candidate 3 -- Mattias Ellert Mon, 05 Mar 2012 16:27:32 +0100 nordugrid-arc (2.0.0~rc2-1) unstable; urgency=low * 2.0.0 Release Candidate 2 -- Mattias Ellert Wed, 15 Feb 2012 13:54:17 +0100 nordugrid-arc (1.1.0-1) unstable; urgency=low * 1.1.0 Final Release -- Mattias Ellert Mon, 03 Oct 2011 14:30:45 +0200 nordugrid-arc (1.1.0~rc2-1) unstable; urgency=low * 1.1.0 Release Candidate 2 -- Mattias Ellert Sun, 25 Sep 2011 05:42:22 +0200 nordugrid-arc (1.1.0~rc1-1) unstable; urgency=low * 1.1.0 Release Candidate 1 -- Mattias Ellert Sun, 11 Sep 2011 20:08:33 +0200 nordugrid-arc (1.0.1-1) unstable; urgency=low * 1.0.0 Final Release -- Mattias Ellert Sat, 23 Jul 2011 09:32:53 +0200 nordugrid-arc (1.0.1~rc4-1) unstable; urgency=low * 1.0.1 Release Candidate 4 -- Mattias Ellert Tue, 19 Jul 2011 15:17:05 +0200 nordugrid-arc (1.0.1~rc1-1) unstable; urgency=low * 1.0.1 Release Candidate 1 -- Mattias Ellert Sat, 18 Jun 2011 18:29:09 +0200 nordugrid-arc (1.0.0-1) unstable; urgency=low * 1.0.0 Final Release -- Mattias Ellert Mon, 18 Apr 2011 08:59:55 +0200 nordugrid-arc (1.0.0~b5-1) unstable; urgency=low * 1.0.0 Beta Release 5 -- Mattias Ellert Wed, 06 Apr 2011 14:08:52 +0200 nordugrid-arc (1.0.0~b4-1) unstable; urgency=low * 1.0.0 Beta Release 4 -- Mattias Ellert Wed, 23 Mar 2011 15:19:08 +0100 nordugrid-arc (1.0.0~b3-1) unstable; urgency=low * 1.0.0 Beta Release 3 -- Mattias Ellert Thu, 10 Mar 2011 17:05:28 +0100 nordugrid-arc (1.0.0~b2-1) unstable; urgency=low * 1.0.0 Beta Release 2 -- Mattias Ellert Mon, 07 Mar 2011 05:12:30 +0100 nordugrid-arc (1.0.0~b1-1) unstable; urgency=low * 1.0.0 Beta Release 1 -- Mattias Ellert Mon, 14 Feb 2011 17:19:04 +0100 nordugrid-arc-nox (1.2.1-1) unstable; urgency=low * 1.2.1 Final release -- Mattias Ellert Tue, 21 Dec 2010 22:34:02 +0100 nordugrid-arc-nox (1.2.1~rc2-1) unstable; urgency=low * 1.2.1 Release Candidate 2 -- Mattias Ellert Tue, 21 Dec 2010 09:36:46 +0100 nordugrid-arc-nox (1.2.1~rc1-1) unstable; urgency=low * 1.2.1 Release Candidate 1 -- Mattias Ellert Wed, 08 Dec 2010 15:30:37 +0100 nordugrid-arc-nox (1.2.0-1) unstable; urgency=low * 1.2.0 Final release -- Mattias Ellert Fri, 22 Oct 2010 15:25:07 +0200 nordugrid-arc-nox (1.2.0~rc2-1) unstable; urgency=low * 1.2.0 Release Candidate 2 -- Mattias Ellert Thu, 30 Sep 2010 10:11:14 +0200 nordugrid-arc-nox (1.2.0~rc1-1) unstable; urgency=low * 1.2.0 Release Candidate 1 -- Mattias Ellert Mon, 13 Sep 2010 11:14:51 +0200 nordugrid-arc-nox (1.1.0-1) unstable; urgency=low * 1.1.0 Final release -- Mattias Ellert Wed, 05 May 2010 18:31:59 +0200 nordugrid-arc-nox (1.1.0~rc6-1) unstable; urgency=low * 1.1.0 Release Candidate 6 -- Mattias Ellert Mon, 08 Mar 2010 20:36:00 +0100 nordugrid-arc-nox (1.1.0~rc5-2) unstable; urgency=low * Rebuild for Globus Toolkit 5 -- Mattias Ellert Fri, 26 Feb 2010 16:25:39 +0100 nordugrid-arc-nox (1.1.0~rc5-1) unstable; urgency=low * 1.1.0 Release Candidate 5 -- Mattias Ellert Fri, 26 Feb 2010 15:07:39 +0100 nordugrid-arc-nox (1.1.0~rc4-1) unstable; urgency=low * 1.1.0 release candidate 4 -- Mattias Ellert Wed, 24 Feb 2010 12:34:41 +0100 nordugrid-arc-nox (1.1.0~rc3-1) unstable; urgency=low * 1.1.0 release candidate 3 -- Mattias Ellert Mon, 22 Feb 2010 10:20:27 +0100 nordugrid-arc-nox (1.1.0~rc2-1) unstable; urgency=low * 1.1.0 release candidate 2 -- Mattias Ellert Mon, 15 Feb 2010 19:08:07 +0100 nordugrid-arc-nox (1.1.0~rc1-1) unstable; urgency=low * 1.1.0 release candidate 1 -- Mattias Ellert Thu, 11 Feb 2010 19:48:01 +0100 nordugrid-arc-nox (1.0.0-1) unstable; urgency=low * 1.0.0 Final release -- Mattias Ellert Sun, 29 Nov 2009 23:13:41 +0100 nordugrid-arc-nox (1.0.0~rc7-1) unstable; urgency=low * 1.0.0 release candidate 7 -- Mattias Ellert Thu, 19 Nov 2009 15:30:32 +0100 nordugrid-arc-nox (1.0.0~rc6-1) unstable; urgency=low * 1.0.0 release candidate 6 -- Mattias Ellert Thu, 12 Nov 2009 10:12:45 +0100 nordugrid-arc-nox (1.0.0~rc5-1) unstable; urgency=low * 1.0.0 release candidate 5 -- Mattias Ellert Wed, 04 Nov 2009 16:45:22 +0100 nordugrid-arc1 (0.9.4~rc4-1) unstable; urgency=low * 0.9.3 release candidate 4 -- Mattias Ellert Mon, 26 Oct 2009 23:19:55 +0100 nordugrid-arc1 (0.9.4~rc3-1) unstable; urgency=low * 0.9.3 release candidate 3 -- Mattias Ellert Thu, 22 Oct 2009 19:22:31 +0200 nordugrid-arc1 (0.9.4~rc2-1) unstable; urgency=low * 0.9.3 release candidate 2 -- Mattias Ellert Thu, 15 Oct 2009 09:04:24 +0200 nordugrid-arc1 (0.9.3-1) unstable; urgency=low * Final 0.9.3 release -- Mattias Ellert Sun, 27 Sep 2009 01:27:31 +0200 nordugrid-arc1 (0.9.3~rc3-1) unstable; urgency=low * Initial release -- Mattias Ellert Mon, 5 Nov 2007 10:12:49 -0400 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/libarccommon3.docs0000644000000000000000000000012412231166265022025 xustar000000000000000027 mtime=1382345909.085453 27 atime=1513200576.911731 30 ctime=1513200668.661853424 nordugrid-arc-5.4.2/debian/libarccommon3.docs0000644000175000002070000000002612231166265022070 0ustar00mockbuildmock00000000000000README AUTHORS NOTICE nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-gridmap-utils.install0000644000000000000000000000012411567102135025327 xustar000000000000000027 mtime=1306297437.742006 27 atime=1513200576.912731 30 ctime=1513200668.650853289 nordugrid-arc-5.4.2/debian/nordugrid-arc-gridmap-utils.install0000644000175000002070000000016111567102135025372 0ustar00mockbuildmock00000000000000debian/tmp/usr/sbin/nordugridmap debian/tmp/etc/cron.d/nordugridmap debian/tmp/usr/share/man/man8/nordugridmap.8 nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-gridftpd.logrotate0000644000000000000000000000012412477513147024715 xustar000000000000000027 mtime=1425970791.470164 27 atime=1513200576.919731 30 ctime=1513200668.678853631 nordugrid-arc-5.4.2/debian/nordugrid-arc-gridftpd.logrotate0000644000175000002070000000030412477513147024757 0ustar00mockbuildmock00000000000000/var/log/arc/gridftpd.log { missingok compress delaycompress daily rotate 14 create postrotate kill -HUP `cat /var/run/gridftpd.pid 2> /dev/null` 2> /dev/null || true endscript } nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-egiis.postrm0000644000000000000000000000012412132441403023515 xustar000000000000000027 mtime=1365918467.545496 27 atime=1513200576.911731 30 ctime=1513200668.681853668 nordugrid-arc-5.4.2/debian/nordugrid-arc-egiis.postrm0000644000175000002070000000145012132441403023562 0ustar00mockbuildmock00000000000000#!/bin/sh set -e #DEBHELPER# if [ "$1" = "purge" ] ; then APP_PROFILE=/etc/apparmor.d/usr.sbin.slapd LOCAL_APP_PROFILE=/etc/apparmor.d/local/usr.sbin.slapd if [ -r "$LOCAL_APP_PROFILE" ] ; then sed '/# AppArmor profile for NorduGrid ARC EGIIS START/,/# AppArmor profile for NorduGrid ARC EGIIS END/d' -i "$LOCAL_APP_PROFILE" fi if [ ! -r "$APP_PROFILE" ] ; then if [ -r "$LOCAL_APP_PROFILE" ] ; then if [ -z "`sed '/^#/d' $LOCAL_APP_PROFILE`" ] ; then rm -f "$LOCAL_APP_PROFILE" || true fi fi rmdir /etc/apparmor.d/local 2>/dev/null || true rmdir /etc/apparmor.d 2>/dev/null || true fi if [ -r "$APP_PROFILE" ] ; then # Reload the profile if aa-status --enabled 2>/dev/null ; then apparmor_parser -r -T -W "$APP_PROFILE" || true fi fi fi nordugrid-arc-5.4.2/debian/PaxHeaders.7502/nordugrid-arc-acix-core.install0000644000000000000000000000012412010234220024401 xustar000000000000000027 mtime=1344354448.896736 27 atime=1513200577.037732 30 ctime=1513200668.641853179 nordugrid-arc-5.4.2/debian/nordugrid-arc-acix-core.install0000644000175000002070000000016212010234220024445 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/python?.?/site-packages/acix/__init__.py* debian/tmp/usr/lib/python?.?/site-packages/acix/core nordugrid-arc-5.4.2/PaxHeaders.7502/config.guess0000644000000000000000000000013213214315717017515 xustar000000000000000030 mtime=1513200591.873914277 30 atime=1513200617.113222964 30 ctime=1513200658.613730531 nordugrid-arc-5.4.2/config.guess0000755000175000002070000012761513214315717017602 0ustar00mockbuildmock00000000000000#! /bin/sh # Attempt to guess a canonical system name. # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, # 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 # Free Software Foundation, Inc. timestamp='2009-11-20' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA # 02110-1301, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Per Bothner. Please send patches (context # diff format) to and include a ChangeLog # entry. # # This script attempts to guess a canonical system name similar to # config.sub. If it succeeds, it prints the system name on stdout, and # exits with 0. Otherwise, it exits with 1. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > $dummy.c ; for c in cc gcc c89 c99 ; do if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ /usr/sbin/$sysctl 2>/dev/null || echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently, or will in the future. case "${UNAME_MACHINE_ARCH}" in arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "${UNAME_VERSION}" in Debian*) release='-gnu' ;; *) release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; *:SolidBSD:*:*) echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd${UNAME_RELEASE} exit ;; *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE="alpha" ;; "EV4.5 (21064)") UNAME_MACHINE="alpha" ;; "LCA4 (21066/21068)") UNAME_MACHINE="alpha" ;; "EV5 (21164)") UNAME_MACHINE="alphaev5" ;; "EV5.6 (21164A)") UNAME_MACHINE="alphaev56" ;; "EV5.6 (21164PC)") UNAME_MACHINE="alphapca56" ;; "EV5.7 (21164PC)") UNAME_MACHINE="alphapca57" ;; "EV6 (21264)") UNAME_MACHINE="alphaev6" ;; "EV6.7 (21264A)") UNAME_MACHINE="alphaev67" ;; "EV6.8CB (21264C)") UNAME_MACHINE="alphaev68" ;; "EV6.8AL (21264B)") UNAME_MACHINE="alphaev68" ;; "EV6.8CX (21264D)") UNAME_MACHINE="alphaev68" ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE="alphaev69" ;; "EV7 (21364)") UNAME_MACHINE="alphaev7" ;; "EV7.9 (21364A)") UNAME_MACHINE="alphaev79" ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` exit ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # Should we change UNAME_MACHINE based on the output of uname instead # of the specific Alpha model? echo alpha-pc-interix exit ;; 21064:Windows_NT:50:3) echo alpha-dec-winnt3.5 exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; arm:riscos:*:*|arm:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux${UNAME_RELEASE} exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build SUN_ARCH="i386" # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH="x86_64" fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos${UNAME_RELEASE} exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} ;; sun4) echo sparc-sun-sunos${UNAME_RELEASE} ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos${UNAME_RELEASE} exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint${UNAME_RELEASE} exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint${UNAME_RELEASE} exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit ;; m68k:machten:*:*) echo m68k-apple-machten${UNAME_RELEASE} exit ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix${UNAME_RELEASE} exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix${UNAME_RELEASE} exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix${UNAME_RELEASE} exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`$dummy $dummyarg` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos${UNAME_RELEASE} exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] then if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ [ ${TARGET_BINARY_INTERFACE}x = x ] then echo m88k-dg-dgux${UNAME_RELEASE} else echo m88k-dg-dguxbcs${UNAME_RELEASE} fi else echo i586-dg-dgux${UNAME_RELEASE} fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[456]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${IBM_ARCH}-ibm-aix${IBM_REV} exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` case "${UNAME_MACHINE}" in 9000/31? ) HP_ARCH=m68000 ;; 9000/[34]?? ) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in 32) HP_ARCH="hppa2.0n" ;; 64) HP_ARCH="hppa2.0w" ;; '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 esac ;; esac fi if [ "${HP_ARCH}" = "" ]; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ ${HP_ARCH} = "hppa2.0w" ] then eval $set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH="hppa2.0w" else HP_ARCH="hppa64" fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux${HPUX_REV} exit ;; 3050*:HI-UX:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo ${UNAME_MACHINE}-unknown-osf1mk else echo ${UNAME_MACHINE}-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi${UNAME_RELEASE} exit ;; *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit ;; *:FreeBSD:*:*) case ${UNAME_MACHINE} in pc98) echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; amd64) echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; *) echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; esac exit ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; i*:windows32*:*) # uname -m includes "-pc" on this system. echo ${UNAME_MACHINE}-mingw32 exit ;; i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; *:Interix*:*) case ${UNAME_MACHINE} in x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix${UNAME_RELEASE} exit ;; IA64) echo ia64-unknown-interix${UNAME_RELEASE} exit ;; esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; 8664:Windows_NT:*) echo x86_64-pc-mks exit ;; i*:Windows_NT*:* | Pentium*:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we # UNAME_MACHINE based on the output of uname instead of i386? echo i586-pc-interix exit ;; i*:UWIN*:*) echo ${UNAME_MACHINE}-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; p*:CYGWIN*:*) echo powerpcle-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; *:GNU:*:*) # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo ${UNAME_MACHINE}-unknown-linux-gnu else echo ${UNAME_MACHINE}-unknown-linux-gnueabi fi exit ;; avr32*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; cris:Linux:*:*) echo cris-axis-linux-gnu exit ;; crisv32:Linux:*:*) echo crisv32-axis-linux-gnu exit ;; frv:Linux:*:*) echo frv-unknown-linux-gnu exit ;; i*86:Linux:*:*) LIBC=gnu eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __dietlibc__ LIBC=dietlibc #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` echo "${UNAME_MACHINE}-pc-linux-${LIBC}" exit ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } ;; or32:Linux:*:*) echo or32-unknown-linux-gnu exit ;; padre:Linux:*:*) echo sparc-unknown-linux-gnu exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-gnu exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-gnu ;; PA8*) echo hppa2.0-unknown-linux-gnu ;; *) echo hppa-unknown-linux-gnu ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-gnu exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-gnu exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux exit ;; sh64*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; sh*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; vax:Linux:*:*) echo ${UNAME_MACHINE}-dec-linux-gnu exit ;; x86_64:Linux:*:*) echo x86_64-unknown-linux-gnu exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo ${UNAME_MACHINE}-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo ${UNAME_MACHINE}-unknown-stop exit ;; i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit ;; i*86:syllable:*:*) echo ${UNAME_MACHINE}-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit ;; i*86:*DOS:*:*) echo ${UNAME_MACHINE}-pc-msdosdjgpp exit ;; i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} else echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo ${UNAME_MACHINE}-pc-sco$UNAME_REL else echo ${UNAME_MACHINE}-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configury will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos${UNAME_RELEASE} exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos${UNAME_RELEASE} exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos${UNAME_RELEASE} exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos${UNAME_RELEASE} exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv${UNAME_RELEASE} exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo ${UNAME_MACHINE}-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo ${UNAME_MACHINE}-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux${UNAME_RELEASE} exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv${UNAME_RELEASE} else echo mips-unknown-sysv${UNAME_RELEASE} fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux${UNAME_RELEASE} exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux${UNAME_RELEASE} exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux${UNAME_RELEASE} exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux${UNAME_RELEASE} exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; *:Rhapsody:*:*) echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown case $UNAME_PROCESSOR in i386) eval $set_cc_for_build if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then UNAME_PROCESSOR="x86_64" fi fi ;; unknown) UNAME_PROCESSOR=powerpc ;; esac echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = "x86"; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NSE-?:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = "386"; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo ${UNAME_MACHINE}-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit ;; *:DragonFly:*:*) echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "${UNAME_MACHINE}" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos exit ;; i*86:AROS:*:*) echo ${UNAME_MACHINE}-pc-aros exit ;; esac #echo '(No uname command or uname output not recognized.)' 1>&2 #echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 eval $set_cc_for_build cat >$dummy.c < # include #endif main () { #if defined (sony) #if defined (MIPSEB) /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, I don't know.... */ printf ("mips-sony-bsd\n"); exit (0); #else #include printf ("m68k-sony-newsos%s\n", #ifdef NEWSOS4 "4" #else "" #endif ); exit (0); #endif #endif #if defined (__arm) && defined (__acorn) && defined (__unix) printf ("arm-acorn-riscix\n"); exit (0); #endif #if defined (hp300) && !defined (hpux) printf ("m68k-hp-bsd\n"); exit (0); #endif #if defined (NeXT) #if !defined (__ARCHITECTURE__) #define __ARCHITECTURE__ "m68k" #endif int version; version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; if (version < 4) printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); else printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); exit (0); #endif #if defined (MULTIMAX) || defined (n16) #if defined (UMAXV) printf ("ns32k-encore-sysv\n"); exit (0); #else #if defined (CMU) printf ("ns32k-encore-mach\n"); exit (0); #else printf ("ns32k-encore-bsd\n"); exit (0); #endif #endif #endif #if defined (__386BSD__) printf ("i386-pc-bsd\n"); exit (0); #endif #if defined (sequent) #if defined (i386) printf ("i386-sequent-dynix\n"); exit (0); #endif #if defined (ns32000) printf ("ns32k-sequent-dynix\n"); exit (0); #endif #endif #if defined (_SEQUENT_) struct utsname un; uname(&un); if (strncmp(un.version, "V2", 2) == 0) { printf ("i386-sequent-ptx2\n"); exit (0); } if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ printf ("i386-sequent-ptx1\n"); exit (0); } printf ("i386-sequent-ptx\n"); exit (0); #endif #if defined (vax) # if !defined (ultrix) # include # if defined (BSD) # if BSD == 43 printf ("vax-dec-bsd4.3\n"); exit (0); # else # if BSD == 199006 printf ("vax-dec-bsd4.3reno\n"); exit (0); # else printf ("vax-dec-bsd\n"); exit (0); # endif # endif # else printf ("vax-dec-bsd\n"); exit (0); # endif # else printf ("vax-dec-ultrix\n"); exit (0); # endif #endif #if defined (alliant) && defined (i860) printf ("i860-alliant-bsd\n"); exit (0); #endif exit (1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } # Apollos put the system type in the environment. test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } # Convex versions that predate uname can use getsysinfo(1) if [ -x /usr/convex/getsysinfo ] then case `getsysinfo -f cpu_type` in c1*) echo c1-convex-bsd exit ;; c2*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; c34*) echo c34-convex-bsd exit ;; c38*) echo c38-convex-bsd exit ;; c4*) echo c4-convex-bsd exit ;; esac fi cat >&2 < in order to provide the needed information to handle your system. config.guess timestamp = $timestamp uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = ${UNAME_MACHINE} UNAME_RELEASE = ${UNAME_RELEASE} UNAME_SYSTEM = ${UNAME_SYSTEM} UNAME_VERSION = ${UNAME_VERSION} EOF exit 1 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: nordugrid-arc-5.4.2/PaxHeaders.7502/NEWS0000644000000000000000000000012411060557544015703 xustar000000000000000027 mtime=1220730724.412237 27 atime=1513200573.741692 30 ctime=1513200658.612730519 nordugrid-arc-5.4.2/NEWS0000644000175000002070000000002511060557544015745 0ustar00mockbuildmock00000000000000no news good news :) nordugrid-arc-5.4.2/PaxHeaders.7502/ltmain.sh0000644000000000000000000000013113214315707017016 xustar000000000000000029 mtime=1513200583.93981724 30 atime=1513200658.466728733 30 ctime=1513200658.618730592 nordugrid-arc-5.4.2/ltmain.sh0000755000175000002070000073306013214315707017101 0ustar00mockbuildmock00000000000000# Generated from ltmain.m4sh. # ltmain.sh (GNU libtool) 2.2.6b # Written by Gordon Matzigkeit , 1996 # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007 2008 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, # or obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # Usage: $progname [OPTION]... [MODE-ARG]... # # Provide generalized library-building support services. # # --config show all configuration variables # --debug enable verbose shell tracing # -n, --dry-run display commands without modifying any files # --features display basic configuration information and exit # --mode=MODE use operation mode MODE # --preserve-dup-deps don't remove duplicate dependency libraries # --quiet, --silent don't print informational messages # --tag=TAG use configuration variables from tag TAG # -v, --verbose print informational messages (default) # --version print version information # -h, --help print short or long help message # # MODE must be one of the following: # # clean remove files from the build directory # compile compile a source file into a libtool object # execute automatically set library path, then run a program # finish complete the installation of libtool libraries # install install libraries or executables # link create a library or an executable # uninstall remove libraries from an installed directory # # MODE-ARGS vary depending on the MODE. # Try `$progname --help --mode=MODE' for a more detailed description of MODE. # # When reporting a bug, please describe a test case to reproduce it and # include the following information: # # host-triplet: $host # shell: $SHELL # compiler: $LTCC # compiler flags: $LTCFLAGS # linker: $LD (gnu? $with_gnu_ld) # $progname: (GNU libtool) 2.2.6b # automake: $automake_version # autoconf: $autoconf_version # # Report bugs to . PROGRAM=ltmain.sh PACKAGE=libtool VERSION=2.2.6b TIMESTAMP="" package_revision=1.3017 # Be Bourne compatible if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # NLS nuisances: We save the old values to restore during execute mode. # Only set LANG and LC_ALL to C if already set. # These must not be set unconditionally because not all systems understand # e.g. LANG=C (notably SCO). lt_user_locale= lt_safe_locale= for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${$lt_var+set}\" = set; then save_$lt_var=\$$lt_var $lt_var=C export $lt_var lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" fi" done $lt_unset CDPATH : ${CP="cp -f"} : ${ECHO="echo"} : ${EGREP="/bin/grep -E"} : ${FGREP="/bin/grep -F"} : ${GREP="/bin/grep"} : ${LN_S="ln -s"} : ${MAKE="make"} : ${MKDIR="mkdir"} : ${MV="mv -f"} : ${RM="rm -f"} : ${SED="/bin/sed"} : ${SHELL="${CONFIG_SHELL-/bin/sh}"} : ${Xsed="$SED -e 1s/^X//"} # Global variables: EXIT_SUCCESS=0 EXIT_FAILURE=1 EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. exit_status=$EXIT_SUCCESS # Make sure IFS has a sensible default lt_nl=' ' IFS=" $lt_nl" dirname="s,/[^/]*$,," basename="s,^.*/,," # func_dirname_and_basename file append nondir_replacement # perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # Implementation must be kept synchronized with func_dirname # and func_basename. For efficiency, we do not delegate to # those functions but instead duplicate the functionality here. func_dirname_and_basename () { # Extract subdirectory from the argument. func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` } # Generated shell functions inserted here. # Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh # is ksh but when the shell is invoked as "sh" and the current value of # the _XPG environment variable is not equal to 1 (one), the special # positional parameter $0, within a function call, is the name of the # function. progpath="$0" # The name of this program: # In the unlikely event $progname began with a '-', it would play havoc with # func_echo (imagine progname=-n), so we prepend ./ in that case: func_dirname_and_basename "$progpath" progname=$func_basename_result case $progname in -*) progname=./$progname ;; esac # Make sure we have an absolute path for reexecution: case $progpath in [\\/]*|[A-Za-z]:\\*) ;; *[\\/]*) progdir=$func_dirname_result progdir=`cd "$progdir" && pwd` progpath="$progdir/$progname" ;; *) save_IFS="$IFS" IFS=: for progdir in $PATH; do IFS="$save_IFS" test -x "$progdir/$progname" && break done IFS="$save_IFS" test -n "$progdir" || progdir=`pwd` progpath="$progdir/$progname" ;; esac # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. Xsed="${SED}"' -e 1s/^X//' sed_quote_subst='s/\([`"$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Re-`\' parameter expansions in output of double_quote_subst that were # `\'-ed in input to the same. If an odd number of `\' preceded a '$' # in input to double_quote_subst, that '$' was protected from expansion. # Since each input `\' is now two `\'s, look for any number of runs of # four `\'s followed by two `\'s and then a '$'. `\' that '$'. bs='\\' bs2='\\\\' bs4='\\\\\\\\' dollar='\$' sed_double_backslash="\ s/$bs4/&\\ /g s/^$bs2$dollar/$bs&/ s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g s/\n//g" # Standard options: opt_dry_run=false opt_help=false opt_quiet=false opt_verbose=false opt_warning=: # func_echo arg... # Echo program name prefixed message, along with the current mode # name if it has been set yet. func_echo () { $ECHO "$progname${mode+: }$mode: $*" } # func_verbose arg... # Echo program name prefixed message in verbose mode only. func_verbose () { $opt_verbose && func_echo ${1+"$@"} # A bug in bash halts the script if the last line of a function # fails when set -e is in force, so we need another command to # work around that: : } # func_error arg... # Echo program name prefixed message to standard error. func_error () { $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2 } # func_warning arg... # Echo program name prefixed warning message to standard error. func_warning () { $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2 # bash bug again: : } # func_fatal_error arg... # Echo program name prefixed message to standard error, and exit. func_fatal_error () { func_error ${1+"$@"} exit $EXIT_FAILURE } # func_fatal_help arg... # Echo program name prefixed message to standard error, followed by # a help hint, and exit. func_fatal_help () { func_error ${1+"$@"} func_fatal_error "$help" } help="Try \`$progname --help' for more information." ## default # func_grep expression filename # Check whether EXPRESSION matches any line of FILENAME, without output. func_grep () { $GREP "$1" "$2" >/dev/null 2>&1 } # func_mkdir_p directory-path # Make sure the entire path to DIRECTORY-PATH is available. func_mkdir_p () { my_directory_path="$1" my_dir_list= if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then # Protect directory names starting with `-' case $my_directory_path in -*) my_directory_path="./$my_directory_path" ;; esac # While some portion of DIR does not yet exist... while test ! -d "$my_directory_path"; do # ...make a list in topmost first order. Use a colon delimited # list incase some portion of path contains whitespace. my_dir_list="$my_directory_path:$my_dir_list" # If the last portion added has no slash in it, the list is done case $my_directory_path in */*) ;; *) break ;; esac # ...otherwise throw away the child directory and loop my_directory_path=`$ECHO "X$my_directory_path" | $Xsed -e "$dirname"` done my_dir_list=`$ECHO "X$my_dir_list" | $Xsed -e 's,:*$,,'` save_mkdir_p_IFS="$IFS"; IFS=':' for my_dir in $my_dir_list; do IFS="$save_mkdir_p_IFS" # mkdir can fail with a `File exist' error if two processes # try to create one of the directories concurrently. Don't # stop in that case! $MKDIR "$my_dir" 2>/dev/null || : done IFS="$save_mkdir_p_IFS" # Bail out if we (or some other process) failed to create a directory. test -d "$my_directory_path" || \ func_fatal_error "Failed to create \`$1'" fi } # func_mktempdir [string] # Make a temporary directory that won't clash with other running # libtool processes, and avoids race conditions if possible. If # given, STRING is the basename for that directory. func_mktempdir () { my_template="${TMPDIR-/tmp}/${1-$progname}" if test "$opt_dry_run" = ":"; then # Return a directory name, but don't create it in dry-run mode my_tmpdir="${my_template}-$$" else # If mktemp works, use that first and foremost my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` if test ! -d "$my_tmpdir"; then # Failing that, at least try and use $RANDOM to avoid a race my_tmpdir="${my_template}-${RANDOM-0}$$" save_mktempdir_umask=`umask` umask 0077 $MKDIR "$my_tmpdir" umask $save_mktempdir_umask fi # If we're not in dry-run mode, bomb out on failure test -d "$my_tmpdir" || \ func_fatal_error "cannot create temporary directory \`$my_tmpdir'" fi $ECHO "X$my_tmpdir" | $Xsed } # func_quote_for_eval arg # Aesthetically quote ARG to be evaled later. # This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT # is double-quoted, suitable for a subsequent eval, whereas # FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters # which are still active within double quotes backslashified. func_quote_for_eval () { case $1 in *[\\\`\"\$]*) func_quote_for_eval_unquoted_result=`$ECHO "X$1" | $Xsed -e "$sed_quote_subst"` ;; *) func_quote_for_eval_unquoted_result="$1" ;; esac case $func_quote_for_eval_unquoted_result in # Double-quote args containing shell metacharacters to delay # word splitting, command substitution and and variable # expansion for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" ;; *) func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" esac } # func_quote_for_expand arg # Aesthetically quote ARG to be evaled later; same as above, # but do not quote variable references. func_quote_for_expand () { case $1 in *[\\\`\"]*) my_arg=`$ECHO "X$1" | $Xsed \ -e "$double_quote_subst" -e "$sed_double_backslash"` ;; *) my_arg="$1" ;; esac case $my_arg in # Double-quote args containing shell metacharacters to delay # word splitting and command substitution for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") my_arg="\"$my_arg\"" ;; esac func_quote_for_expand_result="$my_arg" } # func_show_eval cmd [fail_exp] # Unless opt_silent is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. func_show_eval () { my_cmd="$1" my_fail_exp="${2-:}" ${opt_silent-false} || { func_quote_for_expand "$my_cmd" eval "func_echo $func_quote_for_expand_result" } if ${opt_dry_run-false}; then :; else eval "$my_cmd" my_status=$? if test "$my_status" -eq 0; then :; else eval "(exit $my_status); $my_fail_exp" fi fi } # func_show_eval_locale cmd [fail_exp] # Unless opt_silent is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. Use the saved locale for evaluation. func_show_eval_locale () { my_cmd="$1" my_fail_exp="${2-:}" ${opt_silent-false} || { func_quote_for_expand "$my_cmd" eval "func_echo $func_quote_for_expand_result" } if ${opt_dry_run-false}; then :; else eval "$lt_user_locale $my_cmd" my_status=$? eval "$lt_safe_locale" if test "$my_status" -eq 0; then :; else eval "(exit $my_status); $my_fail_exp" fi fi } # func_version # Echo version message to standard output and exit. func_version () { $SED -n '/^# '$PROGRAM' (GNU /,/# warranty; / { s/^# // s/^# *$// s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ p }' < "$progpath" exit $? } # func_usage # Echo short help message to standard output and exit. func_usage () { $SED -n '/^# Usage:/,/# -h/ { s/^# // s/^# *$// s/\$progname/'$progname'/ p }' < "$progpath" $ECHO $ECHO "run \`$progname --help | more' for full usage" exit $? } # func_help # Echo long help message to standard output and exit. func_help () { $SED -n '/^# Usage:/,/# Report bugs to/ { s/^# // s/^# *$// s*\$progname*'$progname'* s*\$host*'"$host"'* s*\$SHELL*'"$SHELL"'* s*\$LTCC*'"$LTCC"'* s*\$LTCFLAGS*'"$LTCFLAGS"'* s*\$LD*'"$LD"'* s/\$with_gnu_ld/'"$with_gnu_ld"'/ s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/ s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/ p }' < "$progpath" exit $? } # func_missing_arg argname # Echo program name prefixed message to standard error and set global # exit_cmd. func_missing_arg () { func_error "missing argument for $1" exit_cmd=exit } exit_cmd=: # Check that we have a working $ECHO. if test "X$1" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test "X$1" = X--fallback-echo; then # Avoid inline document here, it may be left over : elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t'; then # Yippee, $ECHO works! : else # Restart under the correct shell, and then maybe $ECHO will work. exec $SHELL "$progpath" --no-reexec ${1+"$@"} fi if test "X$1" = X--fallback-echo; then # used as fallback echo shift cat </dev/null 2>&1; then taglist="$taglist $tagname" # Evaluate the configuration. Be careful to quote the path # and the sed script, to avoid splitting on whitespace, but # also don't use non-portable quotes within backquotes within # quotes we have to do it in 2 steps: extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` eval "$extractedcf" else func_error "ignoring unknown tag $tagname" fi ;; esac } # Parse options once, thoroughly. This comes as soon as possible in # the script to make things like `libtool --version' happen quickly. { # Shorthand for --mode=foo, only valid as the first argument case $1 in clean|clea|cle|cl) shift; set dummy --mode clean ${1+"$@"}; shift ;; compile|compil|compi|comp|com|co|c) shift; set dummy --mode compile ${1+"$@"}; shift ;; execute|execut|execu|exec|exe|ex|e) shift; set dummy --mode execute ${1+"$@"}; shift ;; finish|finis|fini|fin|fi|f) shift; set dummy --mode finish ${1+"$@"}; shift ;; install|instal|insta|inst|ins|in|i) shift; set dummy --mode install ${1+"$@"}; shift ;; link|lin|li|l) shift; set dummy --mode link ${1+"$@"}; shift ;; uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) shift; set dummy --mode uninstall ${1+"$@"}; shift ;; esac # Parse non-mode specific arguments: while test "$#" -gt 0; do opt="$1" shift case $opt in --config) func_config ;; --debug) preserve_args="$preserve_args $opt" func_echo "enabling shell trace mode" opt_debug='set -x' $opt_debug ;; -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break execute_dlfiles="$execute_dlfiles $1" shift ;; --dry-run | -n) opt_dry_run=: ;; --features) func_features ;; --finish) mode="finish" ;; --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break case $1 in # Valid mode arguments: clean) ;; compile) ;; execute) ;; finish) ;; install) ;; link) ;; relink) ;; uninstall) ;; # Catch anything else as an error *) func_error "invalid argument for $opt" exit_cmd=exit break ;; esac mode="$1" shift ;; --preserve-dup-deps) opt_duplicate_deps=: ;; --quiet|--silent) preserve_args="$preserve_args $opt" opt_silent=: ;; --verbose| -v) preserve_args="$preserve_args $opt" opt_silent=false ;; --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break preserve_args="$preserve_args $opt $1" func_enable_tag "$1" # tagname is set here shift ;; # Separate optargs to long options: -dlopen=*|--mode=*|--tag=*) func_opt_split "$opt" set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"} shift ;; -\?|-h) func_usage ;; --help) opt_help=: ;; --version) func_version ;; -*) func_fatal_help "unrecognized option \`$opt'" ;; *) nonopt="$opt" break ;; esac done case $host in *cygwin* | *mingw* | *pw32* | *cegcc*) # don't eliminate duplications in $postdeps and $predeps opt_duplicate_compiler_generated_deps=: ;; *) opt_duplicate_compiler_generated_deps=$opt_duplicate_deps ;; esac # Having warned about all mis-specified options, bail out if # anything was wrong. $exit_cmd $EXIT_FAILURE } # func_check_version_match # Ensure that we are using m4 macros, and libtool script from the same # release of libtool. func_check_version_match () { if test "$package_revision" != "$macro_revision"; then if test "$VERSION" != "$macro_version"; then if test -z "$macro_version"; then cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from an older release. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from $PACKAGE $macro_version. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF fi else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, $progname: but the definition of this LT_INIT comes from revision $macro_revision. $progname: You should recreate aclocal.m4 with macros from revision $package_revision $progname: of $PACKAGE $VERSION and run autoconf again. _LT_EOF fi exit $EXIT_MISMATCH fi } ## ----------- ## ## Main. ## ## ----------- ## $opt_help || { # Sanity checks first: func_check_version_match if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then func_fatal_configuration "not configured to build any kind of library" fi test -z "$mode" && func_fatal_error "error: you must specify a MODE." # Darwin sucks eval std_shrext=\"$shrext_cmds\" # Only execute mode is allowed to have -dlopen flags. if test -n "$execute_dlfiles" && test "$mode" != execute; then func_error "unrecognized option \`-dlopen'" $ECHO "$help" 1>&2 exit $EXIT_FAILURE fi # Change the help message to a mode-specific one. generic_help="$help" help="Try \`$progname --help --mode=$mode' for more information." } # func_lalib_p file # True iff FILE is a libtool `.la' library or `.lo' object file. # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_lalib_p () { test -f "$1" && $SED -e 4q "$1" 2>/dev/null \ | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 } # func_lalib_unsafe_p file # True iff FILE is a libtool `.la' library or `.lo' object file. # This function implements the same check as func_lalib_p without # resorting to external programs. To this end, it redirects stdin and # closes it afterwards, without saving the original file descriptor. # As a safety measure, use it only where a negative result would be # fatal anyway. Works if `file' does not exist. func_lalib_unsafe_p () { lalib_p=no if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then for lalib_p_l in 1 2 3 4 do read lalib_p_line case "$lalib_p_line" in \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; esac done exec 0<&5 5<&- fi test "$lalib_p" = yes } # func_ltwrapper_script_p file # True iff FILE is a libtool wrapper script # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_script_p () { func_lalib_p "$1" } # func_ltwrapper_executable_p file # True iff FILE is a libtool wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_executable_p () { func_ltwrapper_exec_suffix= case $1 in *.exe) ;; *) func_ltwrapper_exec_suffix=.exe ;; esac $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 } # func_ltwrapper_scriptname file # Assumes file is an ltwrapper_executable # uses $file to determine the appropriate filename for a # temporary ltwrapper_script. func_ltwrapper_scriptname () { func_ltwrapper_scriptname_result="" if func_ltwrapper_executable_p "$1"; then func_dirname_and_basename "$1" "" "." func_stripname '' '.exe' "$func_basename_result" func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" fi } # func_ltwrapper_p file # True iff FILE is a libtool wrapper script or wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_p () { func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" } # func_execute_cmds commands fail_cmd # Execute tilde-delimited COMMANDS. # If FAIL_CMD is given, eval that upon failure. # FAIL_CMD may read-access the current command in variable CMD! func_execute_cmds () { $opt_debug save_ifs=$IFS; IFS='~' for cmd in $1; do IFS=$save_ifs eval cmd=\"$cmd\" func_show_eval "$cmd" "${2-:}" done IFS=$save_ifs } # func_source file # Source FILE, adding directory component if necessary. # Note that it is not necessary on cygwin/mingw to append a dot to # FILE even if both FILE and FILE.exe exist: automatic-append-.exe # behavior happens only for exec(3), not for open(2)! Also, sourcing # `FILE.' does not work on cygwin managed mounts. func_source () { $opt_debug case $1 in */* | *\\*) . "$1" ;; *) . "./$1" ;; esac } # func_infer_tag arg # Infer tagged configuration to use if any are available and # if one wasn't chosen via the "--tag" command line option. # Only attempt this if the compiler in the base compile # command doesn't match the default compiler. # arg is usually of the form 'gcc ...' func_infer_tag () { $opt_debug if test -n "$available_tags" && test -z "$tagname"; then CC_quoted= for arg in $CC; do func_quote_for_eval "$arg" CC_quoted="$CC_quoted $func_quote_for_eval_result" done case $@ in # Blanks in the command may have been stripped by the calling shell, # but not from the CC environment variable when configure was run. " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) ;; # Blanks at the start of $base_compile will cause this to fail # if we don't check for them as well. *) for z in $available_tags; do if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then # Evaluate the configuration. eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" CC_quoted= for arg in $CC; do # Double-quote args containing other shell metacharacters. func_quote_for_eval "$arg" CC_quoted="$CC_quoted $func_quote_for_eval_result" done case "$@ " in " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) # The compiler in the base compile command matches # the one in the tagged configuration. # Assume this is the tagged configuration we want. tagname=$z break ;; esac fi done # If $tagname still isn't set, then no tagged configuration # was found and let the user know that the "--tag" command # line option must be used. if test -z "$tagname"; then func_echo "unable to infer tagged configuration" func_fatal_error "specify a tag with \`--tag'" # else # func_verbose "using $tagname tagged configuration" fi ;; esac fi } # func_write_libtool_object output_name pic_name nonpic_name # Create a libtool object file (analogous to a ".la" file), # but don't create it if we're doing a dry run. func_write_libtool_object () { write_libobj=${1} if test "$build_libtool_libs" = yes; then write_lobj=\'${2}\' else write_lobj=none fi if test "$build_old_libs" = yes; then write_oldobj=\'${3}\' else write_oldobj=none fi $opt_dry_run || { cat >${write_libobj}T <?"'"'"' &()|`$[]' \ && func_warning "libobj name \`$libobj' may not contain shell special characters." func_dirname_and_basename "$obj" "/" "" objname="$func_basename_result" xdir="$func_dirname_result" lobj=${xdir}$objdir/$objname test -z "$base_compile" && \ func_fatal_help "you must specify a compilation command" # Delete any leftover library objects. if test "$build_old_libs" = yes; then removelist="$obj $lobj $libobj ${libobj}T" else removelist="$lobj $libobj ${libobj}T" fi # On Cygwin there's no "real" PIC flag so we must build both object types case $host_os in cygwin* | mingw* | pw32* | os2* | cegcc*) pic_mode=default ;; esac if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then # non-PIC code in shared libraries is not supported pic_mode=default fi # Calculate the filename of the output object if compiler does # not support -o with -c if test "$compiler_c_o" = no; then output_obj=`$ECHO "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} lockfile="$output_obj.lock" else output_obj= need_locks=no lockfile= fi # Lock this critical section if it is needed # We use this script file to make the link, it avoids creating a new file if test "$need_locks" = yes; then until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done elif test "$need_locks" = warn; then if test -f "$lockfile"; then $ECHO "\ *** ERROR, $lockfile exists and contains: `cat $lockfile 2>/dev/null` This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi removelist="$removelist $output_obj" $ECHO "$srcfile" > "$lockfile" fi $opt_dry_run || $RM $removelist removelist="$removelist $lockfile" trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 if test -n "$fix_srcfile_path"; then eval srcfile=\"$fix_srcfile_path\" fi func_quote_for_eval "$srcfile" qsrcfile=$func_quote_for_eval_result # Only build a PIC object if we are building libtool libraries. if test "$build_libtool_libs" = yes; then # Without this assignment, base_compile gets emptied. fbsd_hideous_sh_bug=$base_compile if test "$pic_mode" != no; then command="$base_compile $qsrcfile $pic_flag" else # Don't build PIC code command="$base_compile $qsrcfile" fi func_mkdir_p "$xdir$objdir" if test -z "$output_obj"; then # Place PIC objects in $objdir command="$command -o $lobj" fi func_show_eval_locale "$command" \ 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' if test "$need_locks" = warn && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed, then go on to compile the next one if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then func_show_eval '$MV "$output_obj" "$lobj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi # Allow error messages only from the first compilation. if test "$suppress_opt" = yes; then suppress_output=' >/dev/null 2>&1' fi fi # Only build a position-dependent object if we build old libraries. if test "$build_old_libs" = yes; then if test "$pic_mode" != yes; then # Don't build PIC code command="$base_compile $qsrcfile$pie_flag" else command="$base_compile $qsrcfile $pic_flag" fi if test "$compiler_c_o" = yes; then command="$command -o $obj" fi # Suppress compiler output if we already did a PIC compilation. command="$command$suppress_output" func_show_eval_locale "$command" \ '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' if test "$need_locks" = warn && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then func_show_eval '$MV "$output_obj" "$obj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi fi $opt_dry_run || { func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" # Unlock the critical section if it was locked if test "$need_locks" != no; then removelist=$lockfile $RM "$lockfile" fi } exit $EXIT_SUCCESS } $opt_help || { test "$mode" = compile && func_mode_compile ${1+"$@"} } func_mode_help () { # We need to display help for each of the modes. case $mode in "") # Generic help is extracted from the usage comments # at the start of this file. func_help ;; clean) $ECHO \ "Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... Remove files from the build directory. RM is the name of the program to use to delete files associated with each FILE (typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed to RM. If FILE is a libtool library, object or program, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; compile) $ECHO \ "Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE Compile a source file into a libtool library object. This mode accepts the following additional options: -o OUTPUT-FILE set the output file name to OUTPUT-FILE -no-suppress do not suppress compiler output for multiple passes -prefer-pic try to building PIC objects only -prefer-non-pic try to building non-PIC objects only -shared do not build a \`.o' file suitable for static linking -static only build a \`.o' file suitable for static linking COMPILE-COMMAND is a command to be used in creating a \`standard' object file from the given SOURCEFILE. The output file name is determined by removing the directory component from SOURCEFILE, then substituting the C source code suffix \`.c' with the library object suffix, \`.lo'." ;; execute) $ECHO \ "Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... Automatically set library path, then run a program. This mode accepts the following additional options: -dlopen FILE add the directory containing FILE to the library path This mode sets the library path environment variable according to \`-dlopen' flags. If any of the ARGS are libtool executable wrappers, then they are translated into their corresponding uninstalled binary, and any of their required library directories are added to the library path. Then, COMMAND is executed, with ARGS as arguments." ;; finish) $ECHO \ "Usage: $progname [OPTION]... --mode=finish [LIBDIR]... Complete the installation of libtool libraries. Each LIBDIR is a directory that contains libtool libraries. The commands that this mode executes may require superuser privileges. Use the \`--dry-run' option if you just want to see what would be executed." ;; install) $ECHO \ "Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... Install executables or libraries. INSTALL-COMMAND is the installation command. The first component should be either the \`install' or \`cp' program. The following components of INSTALL-COMMAND are treated specially: -inst-prefix PREFIX-DIR Use PREFIX-DIR as a staging area for installation The rest of the components are interpreted as arguments to that command (only BSD-compatible install options are recognized)." ;; link) $ECHO \ "Usage: $progname [OPTION]... --mode=link LINK-COMMAND... Link object files or libraries together to form another library, or to create an executable program. LINK-COMMAND is a command using the C compiler that you would use to create a program from several object files. The following components of LINK-COMMAND are treated specially: -all-static do not do any dynamic linking at all -avoid-version do not add a version suffix if possible -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) -export-symbols SYMFILE try to export only the symbols listed in SYMFILE -export-symbols-regex REGEX try to export only the symbols matching REGEX -LLIBDIR search LIBDIR for required installed libraries -lNAME OUTPUT-FILE requires the installed library libNAME -module build a library that can dlopened -no-fast-install disable the fast-install mode -no-install link a not-installable executable -no-undefined declare that a library does not refer to external symbols -o OUTPUT-FILE create OUTPUT-FILE from the specified objects -objectlist FILE Use a list of object files found in FILE to specify objects -precious-files-regex REGEX don't remove output files matching REGEX -release RELEASE specify package release information -rpath LIBDIR the created library will eventually be installed in LIBDIR -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries -shared only do dynamic linking of libtool libraries -shrext SUFFIX override the standard shared library file extension -static do not do any dynamic linking of uninstalled libtool libraries -static-libtool-libs do not do any dynamic linking of libtool libraries -version-info CURRENT[:REVISION[:AGE]] specify library version info [each variable defaults to 0] -weak LIBNAME declare that the target provides the LIBNAME interface All other options (arguments beginning with \`-') are ignored. Every other argument is treated as a filename. Files ending in \`.la' are treated as uninstalled libtool libraries, other files are standard or library object files. If the OUTPUT-FILE ends in \`.la', then a libtool library is created, only library objects (\`.lo' files) may be specified, and \`-rpath' is required, except when creating a convenience library. If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created using \`ar' and \`ranlib', or on Windows using \`lib'. If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file is created, otherwise an executable program is created." ;; uninstall) $ECHO \ "Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... Remove libraries from an installation directory. RM is the name of the program to use to delete files associated with each FILE (typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed to RM. If FILE is a libtool library, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; *) func_fatal_help "invalid operation mode \`$mode'" ;; esac $ECHO $ECHO "Try \`$progname --help' for more information about other modes." exit $? } # Now that we've collected a possible --mode arg, show help if necessary $opt_help && func_mode_help # func_mode_execute arg... func_mode_execute () { $opt_debug # The first argument is the command name. cmd="$nonopt" test -z "$cmd" && \ func_fatal_help "you must specify a COMMAND" # Handle -dlopen flags immediately. for file in $execute_dlfiles; do test -f "$file" \ || func_fatal_help "\`$file' is not a file" dir= case $file in *.la) # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "\`$lib' is not a valid libtool archive" # Read the libtool library. dlname= library_names= func_source "$file" # Skip this library if it cannot be dlopened. if test -z "$dlname"; then # Warn if it was a shared library. test -n "$library_names" && \ func_warning "\`$file' was not linked with \`-export-dynamic'" continue fi func_dirname "$file" "" "." dir="$func_dirname_result" if test -f "$dir/$objdir/$dlname"; then dir="$dir/$objdir" else if test ! -f "$dir/$dlname"; then func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" fi fi ;; *.lo) # Just add the directory containing the .lo file. func_dirname "$file" "" "." dir="$func_dirname_result" ;; *) func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" continue ;; esac # Get the absolute pathname. absdir=`cd "$dir" && pwd` test -n "$absdir" && dir="$absdir" # Now add the directory to shlibpath_var. if eval "test -z \"\$$shlibpath_var\""; then eval "$shlibpath_var=\"\$dir\"" else eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" fi done # This variable tells wrapper scripts just to set shlibpath_var # rather than running their programs. libtool_execute_magic="$magic" # Check if any of the arguments is a wrapper script. args= for file do case $file in -*) ;; *) # Do a test to see if this is really a libtool program. if func_ltwrapper_script_p "$file"; then func_source "$file" # Transform arg to wrapped name. file="$progdir/$program" elif func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" func_source "$func_ltwrapper_scriptname_result" # Transform arg to wrapped name. file="$progdir/$program" fi ;; esac # Quote arguments (to preserve shell metacharacters). func_quote_for_eval "$file" args="$args $func_quote_for_eval_result" done if test "X$opt_dry_run" = Xfalse; then if test -n "$shlibpath_var"; then # Export the shlibpath_var. eval "export $shlibpath_var" fi # Restore saved environment variables for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${save_$lt_var+set}\" = set; then $lt_var=\$save_$lt_var; export $lt_var else $lt_unset $lt_var fi" done # Now prepare to actually exec the command. exec_cmd="\$cmd$args" else # Display what would be done. if test -n "$shlibpath_var"; then eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" $ECHO "export $shlibpath_var" fi $ECHO "$cmd$args" exit $EXIT_SUCCESS fi } test "$mode" = execute && func_mode_execute ${1+"$@"} # func_mode_finish arg... func_mode_finish () { $opt_debug libdirs="$nonopt" admincmds= if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then for dir do libdirs="$libdirs $dir" done for libdir in $libdirs; do if test -n "$finish_cmds"; then # Do each command in the finish commands. func_execute_cmds "$finish_cmds" 'admincmds="$admincmds '"$cmd"'"' fi if test -n "$finish_eval"; then # Do the single finish_eval. eval cmds=\"$finish_eval\" $opt_dry_run || eval "$cmds" || admincmds="$admincmds $cmds" fi done fi # Exit here if they wanted silent mode. $opt_silent && exit $EXIT_SUCCESS $ECHO "X----------------------------------------------------------------------" | $Xsed $ECHO "Libraries have been installed in:" for libdir in $libdirs; do $ECHO " $libdir" done $ECHO $ECHO "If you ever happen to want to link against installed libraries" $ECHO "in a given directory, LIBDIR, you must either use libtool, and" $ECHO "specify the full pathname of the library, or use the \`-LLIBDIR'" $ECHO "flag during linking and do at least one of the following:" if test -n "$shlibpath_var"; then $ECHO " - add LIBDIR to the \`$shlibpath_var' environment variable" $ECHO " during execution" fi if test -n "$runpath_var"; then $ECHO " - add LIBDIR to the \`$runpath_var' environment variable" $ECHO " during linking" fi if test -n "$hardcode_libdir_flag_spec"; then libdir=LIBDIR eval flag=\"$hardcode_libdir_flag_spec\" $ECHO " - use the \`$flag' linker flag" fi if test -n "$admincmds"; then $ECHO " - have your system administrator run these commands:$admincmds" fi if test -f /etc/ld.so.conf; then $ECHO " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" fi $ECHO $ECHO "See any operating system documentation about shared libraries for" case $host in solaris2.[6789]|solaris2.1[0-9]) $ECHO "more information, such as the ld(1), crle(1) and ld.so(8) manual" $ECHO "pages." ;; *) $ECHO "more information, such as the ld(1) and ld.so(8) manual pages." ;; esac $ECHO "X----------------------------------------------------------------------" | $Xsed exit $EXIT_SUCCESS } test "$mode" = finish && func_mode_finish ${1+"$@"} # func_mode_install arg... func_mode_install () { $opt_debug # There may be an optional sh(1) argument at the beginning of # install_prog (especially on Windows NT). if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || # Allow the use of GNU shtool's install command. $ECHO "X$nonopt" | $GREP shtool >/dev/null; then # Aesthetically quote it. func_quote_for_eval "$nonopt" install_prog="$func_quote_for_eval_result " arg=$1 shift else install_prog= arg=$nonopt fi # The real first argument should be the name of the installation program. # Aesthetically quote it. func_quote_for_eval "$arg" install_prog="$install_prog$func_quote_for_eval_result" # We need to accept at least all the BSD install flags. dest= files= opts= prev= install_type= isdir=no stripme= for arg do if test -n "$dest"; then files="$files $dest" dest=$arg continue fi case $arg in -d) isdir=yes ;; -f) case " $install_prog " in *[\\\ /]cp\ *) ;; *) prev=$arg ;; esac ;; -g | -m | -o) prev=$arg ;; -s) stripme=" -s" continue ;; -*) ;; *) # If the previous option needed an argument, then skip it. if test -n "$prev"; then prev= else dest=$arg continue fi ;; esac # Aesthetically quote the argument. func_quote_for_eval "$arg" install_prog="$install_prog $func_quote_for_eval_result" done test -z "$install_prog" && \ func_fatal_help "you must specify an install program" test -n "$prev" && \ func_fatal_help "the \`$prev' option requires an argument" if test -z "$files"; then if test -z "$dest"; then func_fatal_help "no file or destination specified" else func_fatal_help "you must specify a destination" fi fi # Strip any trailing slash from the destination. func_stripname '' '/' "$dest" dest=$func_stripname_result # Check to see that the destination is a directory. test -d "$dest" && isdir=yes if test "$isdir" = yes; then destdir="$dest" destname= else func_dirname_and_basename "$dest" "" "." destdir="$func_dirname_result" destname="$func_basename_result" # Not a directory, so check to see that there is only one file specified. set dummy $files; shift test "$#" -gt 1 && \ func_fatal_help "\`$dest' is not a directory" fi case $destdir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) for file in $files; do case $file in *.lo) ;; *) func_fatal_help "\`$destdir' must be an absolute directory name" ;; esac done ;; esac # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic="$magic" staticlibs= future_libdirs= current_libdirs= for file in $files; do # Do each installation. case $file in *.$libext) # Do the static libraries later. staticlibs="$staticlibs $file" ;; *.la) # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "\`$file' is not a valid libtool archive" library_names= old_library= relink_command= func_source "$file" # Add the libdir to current_libdirs if it is the destination. if test "X$destdir" = "X$libdir"; then case "$current_libdirs " in *" $libdir "*) ;; *) current_libdirs="$current_libdirs $libdir" ;; esac else # Note the libdir as a future libdir. case "$future_libdirs " in *" $libdir "*) ;; *) future_libdirs="$future_libdirs $libdir" ;; esac fi func_dirname "$file" "/" "" dir="$func_dirname_result" dir="$dir$objdir" if test -n "$relink_command"; then # Determine the prefix the user has applied to our future dir. inst_prefix_dir=`$ECHO "X$destdir" | $Xsed -e "s%$libdir\$%%"` # Don't allow the user to place us outside of our expected # location b/c this prevents finding dependent libraries that # are installed to the same prefix. # At present, this check doesn't affect windows .dll's that # are installed into $libdir/../bin (currently, that works fine) # but it's something to keep an eye on. test "$inst_prefix_dir" = "$destdir" && \ func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" if test -n "$inst_prefix_dir"; then # Stick the inst_prefix_dir data into the link command. relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` else relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%%"` fi func_warning "relinking \`$file'" func_show_eval "$relink_command" \ 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' fi # See the names of the shared library. set dummy $library_names; shift if test -n "$1"; then realname="$1" shift srcname="$realname" test -n "$relink_command" && srcname="$realname"T # Install the shared library and build the symlinks. func_show_eval "$install_prog $dir/$srcname $destdir/$realname" \ 'exit $?' tstripme="$stripme" case $host_os in cygwin* | mingw* | pw32* | cegcc*) case $realname in *.dll.a) tstripme="" ;; esac ;; esac if test -n "$tstripme" && test -n "$striplib"; then func_show_eval "$striplib $destdir/$realname" 'exit $?' fi if test "$#" -gt 0; then # Delete the old symlinks, and create new ones. # Try `ln -sf' first, because the `ln' binary might depend on # the symlink we replace! Solaris /bin/ln does not understand -f, # so we also need to try rm && ln -s. for linkname do test "$linkname" != "$realname" \ && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" done fi # Do each command in the postinstall commands. lib="$destdir/$realname" func_execute_cmds "$postinstall_cmds" 'exit $?' fi # Install the pseudo-library for information purposes. func_basename "$file" name="$func_basename_result" instname="$dir/$name"i func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' # Maybe install the static library, too. test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" ;; *.lo) # Install (i.e. copy) a libtool object. # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile="$destdir/$destname" else func_basename "$file" destfile="$func_basename_result" destfile="$destdir/$destfile" fi # Deduce the name of the destination old-style object file. case $destfile in *.lo) func_lo2o "$destfile" staticdest=$func_lo2o_result ;; *.$objext) staticdest="$destfile" destfile= ;; *) func_fatal_help "cannot copy a libtool object to \`$destfile'" ;; esac # Install the libtool object if requested. test -n "$destfile" && \ func_show_eval "$install_prog $file $destfile" 'exit $?' # Install the old object if enabled. if test "$build_old_libs" = yes; then # Deduce the name of the old-style object file. func_lo2o "$file" staticobj=$func_lo2o_result func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' fi exit $EXIT_SUCCESS ;; *) # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile="$destdir/$destname" else func_basename "$file" destfile="$func_basename_result" destfile="$destdir/$destfile" fi # If the file is missing, and there is a .exe on the end, strip it # because it is most likely a libtool script we actually want to # install stripped_ext="" case $file in *.exe) if test ! -f "$file"; then func_stripname '' '.exe' "$file" file=$func_stripname_result stripped_ext=".exe" fi ;; esac # Do a test to see if this is really a libtool program. case $host in *cygwin* | *mingw*) if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" wrapper=$func_ltwrapper_scriptname_result else func_stripname '' '.exe' "$file" wrapper=$func_stripname_result fi ;; *) wrapper=$file ;; esac if func_ltwrapper_script_p "$wrapper"; then notinst_deplibs= relink_command= func_source "$wrapper" # Check the variables that should have been set. test -z "$generated_by_libtool_version" && \ func_fatal_error "invalid libtool wrapper script \`$wrapper'" finalize=yes for lib in $notinst_deplibs; do # Check to see that each library is installed. libdir= if test -f "$lib"; then func_source "$lib" fi libfile="$libdir/"`$ECHO "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test if test -n "$libdir" && test ! -f "$libfile"; then func_warning "\`$lib' has not been installed in \`$libdir'" finalize=no fi done relink_command= func_source "$wrapper" outputname= if test "$fast_install" = no && test -n "$relink_command"; then $opt_dry_run || { if test "$finalize" = yes; then tmpdir=`func_mktempdir` func_basename "$file$stripped_ext" file="$func_basename_result" outputname="$tmpdir/$file" # Replace the output file specification. relink_command=`$ECHO "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'` $opt_silent || { func_quote_for_expand "$relink_command" eval "func_echo $func_quote_for_expand_result" } if eval "$relink_command"; then : else func_error "error: relink \`$file' with the above command before installing it" $opt_dry_run || ${RM}r "$tmpdir" continue fi file="$outputname" else func_warning "cannot relink \`$file'" fi } else # Install the binary that we compiled earlier. file=`$ECHO "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` fi fi # remove .exe since cygwin /usr/bin/install will append another # one anyway case $install_prog,$host in */usr/bin/install*,*cygwin*) case $file:$destfile in *.exe:*.exe) # this is ok ;; *.exe:*) destfile=$destfile.exe ;; *:*.exe) func_stripname '' '.exe' "$destfile" destfile=$func_stripname_result ;; esac ;; esac func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' $opt_dry_run || if test -n "$outputname"; then ${RM}r "$tmpdir" fi ;; esac done for file in $staticlibs; do func_basename "$file" name="$func_basename_result" # Set up the ranlib parameters. oldlib="$destdir/$name" func_show_eval "$install_prog \$file \$oldlib" 'exit $?' if test -n "$stripme" && test -n "$old_striplib"; then func_show_eval "$old_striplib $oldlib" 'exit $?' fi # Do each command in the postinstall commands. func_execute_cmds "$old_postinstall_cmds" 'exit $?' done test -n "$future_libdirs" && \ func_warning "remember to run \`$progname --finish$future_libdirs'" if test -n "$current_libdirs"; then # Maybe just do a dry run. $opt_dry_run && current_libdirs=" -n$current_libdirs" exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' else exit $EXIT_SUCCESS fi } test "$mode" = install && func_mode_install ${1+"$@"} # func_generate_dlsyms outputname originator pic_p # Extract symbols from dlprefiles and create ${outputname}S.o with # a dlpreopen symbol table. func_generate_dlsyms () { $opt_debug my_outputname="$1" my_originator="$2" my_pic_p="${3-no}" my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` my_dlsyms= if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then if test -n "$NM" && test -n "$global_symbol_pipe"; then my_dlsyms="${my_outputname}S.c" else func_error "not configured to extract global symbols from dlpreopened files" fi fi if test -n "$my_dlsyms"; then case $my_dlsyms in "") ;; *.c) # Discover the nlist of each of the dlfiles. nlist="$output_objdir/${my_outputname}.nm" func_show_eval "$RM $nlist ${nlist}S ${nlist}T" # Parse the name list into a source file. func_verbose "creating $output_objdir/$my_dlsyms" $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ /* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ /* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ #ifdef __cplusplus extern \"C\" { #endif /* External symbol declarations for the compiler. */\ " if test "$dlself" = yes; then func_verbose "generating symbol list for \`$output'" $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" # Add our own program objects to the symbol list. progfiles=`$ECHO "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` for progfile in $progfiles; do func_verbose "extracting global C symbols from \`$progfile'" $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'" done if test -n "$exclude_expsyms"; then $opt_dry_run || { eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi if test -n "$export_symbols_regex"; then $opt_dry_run || { eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi # Prepare the list of exported symbols if test -z "$export_symbols"; then export_symbols="$output_objdir/$outputname.exp" $opt_dry_run || { $RM $export_symbols eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' ;; esac } else $opt_dry_run || { eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' case $host in *cygwin | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' ;; esac } fi fi for dlprefile in $dlprefiles; do func_verbose "extracting global C symbols from \`$dlprefile'" func_basename "$dlprefile" name="$func_basename_result" $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'" } done $opt_dry_run || { # Make sure we have at least an empty file. test -f "$nlist" || : > "$nlist" if test -n "$exclude_expsyms"; then $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T $MV "$nlist"T "$nlist" fi # Try sorting and uniquifying the output. if $GREP -v "^: " < "$nlist" | if sort -k 3 /dev/null 2>&1; then sort -k 3 else sort +2 fi | uniq > "$nlist"S; then : else $GREP -v "^: " < "$nlist" > "$nlist"S fi if test -f "$nlist"S; then eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' else $ECHO '/* NONE */' >> "$output_objdir/$my_dlsyms" fi $ECHO >> "$output_objdir/$my_dlsyms" "\ /* The mapping between symbol names and symbols. */ typedef struct { const char *name; void *address; } lt_dlsymlist; " case $host in *cygwin* | *mingw* | *cegcc* ) $ECHO >> "$output_objdir/$my_dlsyms" "\ /* DATA imports from DLLs on WIN32 con't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */" lt_dlsym_const= ;; *osf5*) echo >> "$output_objdir/$my_dlsyms" "\ /* This system does not cope well with relocations in const data */" lt_dlsym_const= ;; *) lt_dlsym_const=const ;; esac $ECHO >> "$output_objdir/$my_dlsyms" "\ extern $lt_dlsym_const lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[]; $lt_dlsym_const lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[] = {\ { \"$my_originator\", (void *) 0 }," case $need_lib_prefix in no) eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; *) eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; esac $ECHO >> "$output_objdir/$my_dlsyms" "\ {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt_${my_prefix}_LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif\ " } # !$opt_dry_run pic_flag_for_symtable= case "$compile_command " in *" -static "*) ;; *) case $host in # compiling the symbol table file with pic_flag works around # a FreeBSD bug that causes programs to crash when -lm is # linked before any other PIC object. But we must not use # pic_flag when linking with -static. The problem exists in # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; *-*-hpux*) pic_flag_for_symtable=" $pic_flag" ;; *) if test "X$my_pic_p" != Xno; then pic_flag_for_symtable=" $pic_flag" fi ;; esac ;; esac symtab_cflags= for arg in $LTCFLAGS; do case $arg in -pie | -fpie | -fPIE) ;; *) symtab_cflags="$symtab_cflags $arg" ;; esac done # Now compile the dynamic symbol file. func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' # Clean up the generated files. func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' # Transform the symbol file into the correct name. symfileobj="$output_objdir/${my_outputname}S.$objext" case $host in *cygwin* | *mingw* | *cegcc* ) if test -f "$output_objdir/$my_outputname.def"; then compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` else compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` fi ;; *) compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` ;; esac ;; *) func_fatal_error "unknown suffix for \`$my_dlsyms'" ;; esac else # We keep going just in case the user didn't refer to # lt_preloaded_symbols. The linker will fail if global_symbol_pipe # really was required. # Nullify the symbol file. compile_command=`$ECHO "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"` finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"` fi } # func_win32_libid arg # return the library type of file 'arg' # # Need a lot of goo to handle *both* DLLs and import libs # Has to be a shell function in order to 'eat' the argument # that is supplied when $file_magic_command is called. func_win32_libid () { $opt_debug win32_libid_type="unknown" win32_fileres=`file -L $1 2>/dev/null` case $win32_fileres in *ar\ archive\ import\ library*) # definitely import win32_libid_type="x86 archive import" ;; *ar\ archive*) # could be an import, or static if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | $EGREP 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then win32_nmres=`eval $NM -f posix -A $1 | $SED -n -e ' 1,100{ / I /{ s,.*,import, p q } }'` case $win32_nmres in import*) win32_libid_type="x86 archive import";; *) win32_libid_type="x86 archive static";; esac fi ;; *DLL*) win32_libid_type="x86 DLL" ;; *executable*) # but shell scripts are "executable" too... case $win32_fileres in *MS\ Windows\ PE\ Intel*) win32_libid_type="x86 DLL" ;; esac ;; esac $ECHO "$win32_libid_type" } # func_extract_an_archive dir oldlib func_extract_an_archive () { $opt_debug f_ex_an_ar_dir="$1"; shift f_ex_an_ar_oldlib="$1" func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" 'exit $?' if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then : else func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" fi } # func_extract_archives gentop oldlib ... func_extract_archives () { $opt_debug my_gentop="$1"; shift my_oldlibs=${1+"$@"} my_oldobjs="" my_xlib="" my_xabs="" my_xdir="" for my_xlib in $my_oldlibs; do # Extract the objects. case $my_xlib in [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; *) my_xabs=`pwd`"/$my_xlib" ;; esac func_basename "$my_xlib" my_xlib="$func_basename_result" my_xlib_u=$my_xlib while :; do case " $extracted_archives " in *" $my_xlib_u "*) func_arith $extracted_serial + 1 extracted_serial=$func_arith_result my_xlib_u=lt$extracted_serial-$my_xlib ;; *) break ;; esac done extracted_archives="$extracted_archives $my_xlib_u" my_xdir="$my_gentop/$my_xlib_u" func_mkdir_p "$my_xdir" case $host in *-darwin*) func_verbose "Extracting $my_xabs" # Do not bother doing anything if just a dry run $opt_dry_run || { darwin_orig_dir=`pwd` cd $my_xdir || exit $? darwin_archive=$my_xabs darwin_curdir=`pwd` darwin_base_archive=`basename "$darwin_archive"` darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` if test -n "$darwin_arches"; then darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` darwin_arch= func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" for darwin_arch in $darwin_arches ; do func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" func_extract_an_archive "`pwd`" "${darwin_base_archive}" cd "$darwin_curdir" $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" done # $darwin_arches ## Okay now we've a bunch of thin objects, gotta fatten them up :) darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` darwin_file= darwin_files= for darwin_file in $darwin_filelist; do darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` $LIPO -create -output "$darwin_file" $darwin_files done # $darwin_filelist $RM -rf unfat-$$ cd "$darwin_orig_dir" else cd $darwin_orig_dir func_extract_an_archive "$my_xdir" "$my_xabs" fi # $darwin_arches } # !$opt_dry_run ;; *) func_extract_an_archive "$my_xdir" "$my_xabs" ;; esac my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` done func_extract_archives_result="$my_oldobjs" } # func_emit_wrapper_part1 [arg=no] # # Emit the first part of a libtool wrapper script on stdout. # For more information, see the description associated with # func_emit_wrapper(), below. func_emit_wrapper_part1 () { func_emit_wrapper_part1_arg1=no if test -n "$1" ; then func_emit_wrapper_part1_arg1=$1 fi $ECHO "\ #! $SHELL # $output - temporary wrapper script for $objdir/$outputname # Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION # # The $output program cannot be directly executed until all the libtool # libraries that it depends on are installed. # # This wrapper script should never be moved out of the build directory. # If it is, it will not operate correctly. # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. Xsed='${SED} -e 1s/^X//' sed_quote_subst='$sed_quote_subst' # Be Bourne compatible if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH relink_command=\"$relink_command\" # This environment variable determines our operation mode. if test \"\$libtool_install_magic\" = \"$magic\"; then # install mode needs the following variables: generated_by_libtool_version='$macro_version' notinst_deplibs='$notinst_deplibs' else # When we are sourced in execute mode, \$file and \$ECHO are already set. if test \"\$libtool_execute_magic\" != \"$magic\"; then ECHO=\"$qecho\" file=\"\$0\" # Make sure echo works. if test \"X\$1\" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test \"X\`{ \$ECHO '\t'; } 2>/dev/null\`\" = 'X\t'; then # Yippee, \$ECHO works! : else # Restart under the correct shell, and then maybe \$ECHO will work. exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} fi fi\ " $ECHO "\ # Find the directory that this script lives in. thisdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` test \"x\$thisdir\" = \"x\$file\" && thisdir=. # Follow symbolic links until we get to the real thisdir. file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` while test -n \"\$file\"; do destdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` # If there was a directory component, then change thisdir. if test \"x\$destdir\" != \"x\$file\"; then case \"\$destdir\" in [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; *) thisdir=\"\$thisdir/\$destdir\" ;; esac fi file=\`\$ECHO \"X\$file\" | \$Xsed -e 's%^.*/%%'\` file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` done " } # end: func_emit_wrapper_part1 # func_emit_wrapper_part2 [arg=no] # # Emit the second part of a libtool wrapper script on stdout. # For more information, see the description associated with # func_emit_wrapper(), below. func_emit_wrapper_part2 () { func_emit_wrapper_part2_arg1=no if test -n "$1" ; then func_emit_wrapper_part2_arg1=$1 fi $ECHO "\ # Usually 'no', except on cygwin/mingw when embedded into # the cwrapper. WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_part2_arg1 if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then # special case for '.' if test \"\$thisdir\" = \".\"; then thisdir=\`pwd\` fi # remove .libs from thisdir case \"\$thisdir\" in *[\\\\/]$objdir ) thisdir=\`\$ECHO \"X\$thisdir\" | \$Xsed -e 's%[\\\\/][^\\\\/]*$%%'\` ;; $objdir ) thisdir=. ;; esac fi # Try to get the absolute directory name. absdir=\`cd \"\$thisdir\" && pwd\` test -n \"\$absdir\" && thisdir=\"\$absdir\" " if test "$fast_install" = yes; then $ECHO "\ program=lt-'$outputname'$exeext progdir=\"\$thisdir/$objdir\" if test ! -f \"\$progdir/\$program\" || { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ test \"X\$file\" != \"X\$progdir/\$program\"; }; then file=\"\$\$-\$program\" if test ! -d \"\$progdir\"; then $MKDIR \"\$progdir\" else $RM \"\$progdir/\$file\" fi" $ECHO "\ # relink executable if necessary if test -n \"\$relink_command\"; then if relink_command_output=\`eval \$relink_command 2>&1\`; then : else $ECHO \"\$relink_command_output\" >&2 $RM \"\$progdir/\$file\" exit 1 fi fi $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || { $RM \"\$progdir/\$program\"; $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } $RM \"\$progdir/\$file\" fi" else $ECHO "\ program='$outputname' progdir=\"\$thisdir/$objdir\" " fi $ECHO "\ if test -f \"\$progdir/\$program\"; then" # Export our shlibpath_var if we have one. if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then $ECHO "\ # Add our own library path to $shlibpath_var $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" # Some systems cannot cope with colon-terminated $shlibpath_var # The second colon is a workaround for a bug in BeOS R4 sed $shlibpath_var=\`\$ECHO \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` export $shlibpath_var " fi # fixup the dll searchpath if we need to. if test -n "$dllsearchpath"; then $ECHO "\ # Add the dll search path components to the executable PATH PATH=$dllsearchpath:\$PATH " fi $ECHO "\ if test \"\$libtool_execute_magic\" != \"$magic\"; then # Run the actual program with our arguments. " case $host in # Backslashes separate directories on plain windows *-*-mingw | *-*-os2* | *-cegcc*) $ECHO "\ exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} " ;; *) $ECHO "\ exec \"\$progdir/\$program\" \${1+\"\$@\"} " ;; esac $ECHO "\ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 exit 1 fi else # The program doesn't exist. \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 $ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 exit 1 fi fi\ " } # end: func_emit_wrapper_part2 # func_emit_wrapper [arg=no] # # Emit a libtool wrapper script on stdout. # Don't directly open a file because we may want to # incorporate the script contents within a cygwin/mingw # wrapper executable. Must ONLY be called from within # func_mode_link because it depends on a number of variables # set therein. # # ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR # variable will take. If 'yes', then the emitted script # will assume that the directory in which it is stored is # the $objdir directory. This is a cygwin/mingw-specific # behavior. func_emit_wrapper () { func_emit_wrapper_arg1=no if test -n "$1" ; then func_emit_wrapper_arg1=$1 fi # split this up so that func_emit_cwrapperexe_src # can call each part independently. func_emit_wrapper_part1 "${func_emit_wrapper_arg1}" func_emit_wrapper_part2 "${func_emit_wrapper_arg1}" } # func_to_host_path arg # # Convert paths to host format when used with build tools. # Intended for use with "native" mingw (where libtool itself # is running under the msys shell), or in the following cross- # build environments: # $build $host # mingw (msys) mingw [e.g. native] # cygwin mingw # *nix + wine mingw # where wine is equipped with the `winepath' executable. # In the native mingw case, the (msys) shell automatically # converts paths for any non-msys applications it launches, # but that facility isn't available from inside the cwrapper. # Similar accommodations are necessary for $host mingw and # $build cygwin. Calling this function does no harm for other # $host/$build combinations not listed above. # # ARG is the path (on $build) that should be converted to # the proper representation for $host. The result is stored # in $func_to_host_path_result. func_to_host_path () { func_to_host_path_result="$1" if test -n "$1" ; then case $host in *mingw* ) lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' case $build in *mingw* ) # actually, msys # awkward: cmd appends spaces to result lt_sed_strip_trailing_spaces="s/[ ]*\$//" func_to_host_path_tmp1=`( cmd //c echo "$1" |\ $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""` func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ $SED -e "$lt_sed_naive_backslashify"` ;; *cygwin* ) func_to_host_path_tmp1=`cygpath -w "$1"` func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ $SED -e "$lt_sed_naive_backslashify"` ;; * ) # Unfortunately, winepath does not exit with a non-zero # error code, so we are forced to check the contents of # stdout. On the other hand, if the command is not # found, the shell will set an exit code of 127 and print # *an error message* to stdout. So we must check for both # error code of zero AND non-empty stdout, which explains # the odd construction: func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null` if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ $SED -e "$lt_sed_naive_backslashify"` else # Allow warning below. func_to_host_path_result="" fi ;; esac if test -z "$func_to_host_path_result" ; then func_error "Could not determine host path corresponding to" func_error " '$1'" func_error "Continuing, but uninstalled executables may not work." # Fallback: func_to_host_path_result="$1" fi ;; esac fi } # end: func_to_host_path # func_to_host_pathlist arg # # Convert pathlists to host format when used with build tools. # See func_to_host_path(), above. This function supports the # following $build/$host combinations (but does no harm for # combinations not listed here): # $build $host # mingw (msys) mingw [e.g. native] # cygwin mingw # *nix + wine mingw # # Path separators are also converted from $build format to # $host format. If ARG begins or ends with a path separator # character, it is preserved (but converted to $host format) # on output. # # ARG is a pathlist (on $build) that should be converted to # the proper representation on $host. The result is stored # in $func_to_host_pathlist_result. func_to_host_pathlist () { func_to_host_pathlist_result="$1" if test -n "$1" ; then case $host in *mingw* ) lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' # Remove leading and trailing path separator characters from # ARG. msys behavior is inconsistent here, cygpath turns them # into '.;' and ';.', and winepath ignores them completely. func_to_host_pathlist_tmp2="$1" # Once set for this call, this variable should not be # reassigned. It is used in tha fallback case. func_to_host_pathlist_tmp1=`echo "$func_to_host_pathlist_tmp2" |\ $SED -e 's|^:*||' -e 's|:*$||'` case $build in *mingw* ) # Actually, msys. # Awkward: cmd appends spaces to result. lt_sed_strip_trailing_spaces="s/[ ]*\$//" func_to_host_pathlist_tmp2=`( cmd //c echo "$func_to_host_pathlist_tmp1" |\ $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""` func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\ $SED -e "$lt_sed_naive_backslashify"` ;; *cygwin* ) func_to_host_pathlist_tmp2=`cygpath -w -p "$func_to_host_pathlist_tmp1"` func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\ $SED -e "$lt_sed_naive_backslashify"` ;; * ) # unfortunately, winepath doesn't convert pathlists func_to_host_pathlist_result="" func_to_host_pathlist_oldIFS=$IFS IFS=: for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do IFS=$func_to_host_pathlist_oldIFS if test -n "$func_to_host_pathlist_f" ; then func_to_host_path "$func_to_host_pathlist_f" if test -n "$func_to_host_path_result" ; then if test -z "$func_to_host_pathlist_result" ; then func_to_host_pathlist_result="$func_to_host_path_result" else func_to_host_pathlist_result="$func_to_host_pathlist_result;$func_to_host_path_result" fi fi fi IFS=: done IFS=$func_to_host_pathlist_oldIFS ;; esac if test -z "$func_to_host_pathlist_result" ; then func_error "Could not determine the host path(s) corresponding to" func_error " '$1'" func_error "Continuing, but uninstalled executables may not work." # Fallback. This may break if $1 contains DOS-style drive # specifications. The fix is not to complicate the expression # below, but for the user to provide a working wine installation # with winepath so that path translation in the cross-to-mingw # case works properly. lt_replace_pathsep_nix_to_dos="s|:|;|g" func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\ $SED -e "$lt_replace_pathsep_nix_to_dos"` fi # Now, add the leading and trailing path separators back case "$1" in :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result" ;; esac case "$1" in *: ) func_to_host_pathlist_result="$func_to_host_pathlist_result;" ;; esac ;; esac fi } # end: func_to_host_pathlist # func_emit_cwrapperexe_src # emit the source code for a wrapper executable on stdout # Must ONLY be called from within func_mode_link because # it depends on a number of variable set therein. func_emit_cwrapperexe_src () { cat < #include #ifdef _MSC_VER # include # include # include # define setmode _setmode #else # include # include # ifdef __CYGWIN__ # include # define HAVE_SETENV # ifdef __STRICT_ANSI__ char *realpath (const char *, char *); int putenv (char *); int setenv (const char *, const char *, int); # endif # endif #endif #include #include #include #include #include #include #include #include #if defined(PATH_MAX) # define LT_PATHMAX PATH_MAX #elif defined(MAXPATHLEN) # define LT_PATHMAX MAXPATHLEN #else # define LT_PATHMAX 1024 #endif #ifndef S_IXOTH # define S_IXOTH 0 #endif #ifndef S_IXGRP # define S_IXGRP 0 #endif #ifdef _MSC_VER # define S_IXUSR _S_IEXEC # define stat _stat # ifndef _INTPTR_T_DEFINED # define intptr_t int # endif #endif #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' # define PATH_SEPARATOR ':' #endif #if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ defined (__OS2__) # define HAVE_DOS_BASED_FILE_SYSTEM # define FOPEN_WB "wb" # ifndef DIR_SEPARATOR_2 # define DIR_SEPARATOR_2 '\\' # endif # ifndef PATH_SEPARATOR_2 # define PATH_SEPARATOR_2 ';' # endif #endif #ifndef DIR_SEPARATOR_2 # define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) #else /* DIR_SEPARATOR_2 */ # define IS_DIR_SEPARATOR(ch) \ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) #endif /* DIR_SEPARATOR_2 */ #ifndef PATH_SEPARATOR_2 # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) #else /* PATH_SEPARATOR_2 */ # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) #endif /* PATH_SEPARATOR_2 */ #ifdef __CYGWIN__ # define FOPEN_WB "wb" #endif #ifndef FOPEN_WB # define FOPEN_WB "w" #endif #ifndef _O_BINARY # define _O_BINARY 0 #endif #define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) #define XFREE(stale) do { \ if (stale) { free ((void *) stale); stale = 0; } \ } while (0) #undef LTWRAPPER_DEBUGPRINTF #if defined DEBUGWRAPPER # define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args static void ltwrapper_debugprintf (const char *fmt, ...) { va_list args; va_start (args, fmt); (void) vfprintf (stderr, fmt, args); va_end (args); } #else # define LTWRAPPER_DEBUGPRINTF(args) #endif const char *program_name = NULL; void *xmalloc (size_t num); char *xstrdup (const char *string); const char *base_name (const char *name); char *find_executable (const char *wrapper); char *chase_symlinks (const char *pathspec); int make_executable (const char *path); int check_executable (const char *path); char *strendzap (char *str, const char *pat); void lt_fatal (const char *message, ...); void lt_setenv (const char *name, const char *value); char *lt_extend_str (const char *orig_value, const char *add, int to_end); void lt_opt_process_env_set (const char *arg); void lt_opt_process_env_prepend (const char *arg); void lt_opt_process_env_append (const char *arg); int lt_split_name_value (const char *arg, char** name, char** value); void lt_update_exe_path (const char *name, const char *value); void lt_update_lib_path (const char *name, const char *value); static const char *script_text_part1 = EOF func_emit_wrapper_part1 yes | $SED -e 's/\([\\"]\)/\\\1/g' \ -e 's/^/ "/' -e 's/$/\\n"/' echo ";" cat <"))); for (i = 0; i < newargc; i++) { LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : ""))); } EOF case $host_os in mingw*) cat <<"EOF" /* execv doesn't actually work on mingw as expected on unix */ rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz); if (rval == -1) { /* failed to start process */ LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno)); return 127; } return rval; EOF ;; *) cat <<"EOF" execv (lt_argv_zero, newargz); return rval; /* =127, but avoids unused variable warning */ EOF ;; esac cat <<"EOF" } void * xmalloc (size_t num) { void *p = (void *) malloc (num); if (!p) lt_fatal ("Memory exhausted"); return p; } char * xstrdup (const char *string) { return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL; } const char * base_name (const char *name) { const char *base; #if defined (HAVE_DOS_BASED_FILE_SYSTEM) /* Skip over the disk name in MSDOS pathnames. */ if (isalpha ((unsigned char) name[0]) && name[1] == ':') name += 2; #endif for (base = name; *name; name++) if (IS_DIR_SEPARATOR (*name)) base = name + 1; return base; } int check_executable (const char *path) { struct stat st; LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n", path ? (*path ? path : "EMPTY!") : "NULL!")); if ((!path) || (!*path)) return 0; if ((stat (path, &st) >= 0) && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) return 1; else return 0; } int make_executable (const char *path) { int rval = 0; struct stat st; LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n", path ? (*path ? path : "EMPTY!") : "NULL!")); if ((!path) || (!*path)) return 0; if (stat (path, &st) >= 0) { rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); } return rval; } /* Searches for the full path of the wrapper. Returns newly allocated full path name if found, NULL otherwise Does not chase symlinks, even on platforms that support them. */ char * find_executable (const char *wrapper) { int has_slash = 0; const char *p; const char *p_next; /* static buffer for getcwd */ char tmp[LT_PATHMAX + 1]; int tmp_len; char *concat_name; LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n", wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!")); if ((wrapper == NULL) || (*wrapper == '\0')) return NULL; /* Absolute path? */ #if defined (HAVE_DOS_BASED_FILE_SYSTEM) if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } else { #endif if (IS_DIR_SEPARATOR (wrapper[0])) { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } #if defined (HAVE_DOS_BASED_FILE_SYSTEM) } #endif for (p = wrapper; *p; p++) if (*p == '/') { has_slash = 1; break; } if (!has_slash) { /* no slashes; search PATH */ const char *path = getenv ("PATH"); if (path != NULL) { for (p = path; *p; p = p_next) { const char *q; size_t p_len; for (q = p; *q; q++) if (IS_PATH_SEPARATOR (*q)) break; p_len = q - p; p_next = (*q == '\0' ? q : q + 1); if (p_len == 0) { /* empty path: current directory */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal ("getcwd failed"); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); } else { concat_name = XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, p, p_len); concat_name[p_len] = '/'; strcpy (concat_name + p_len + 1, wrapper); } if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } } /* not found in PATH; assume curdir */ } /* Relative path | not found in path: prepend cwd */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal ("getcwd failed"); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); return NULL; } char * chase_symlinks (const char *pathspec) { #ifndef S_ISLNK return xstrdup (pathspec); #else char buf[LT_PATHMAX]; struct stat s; char *tmp_pathspec = xstrdup (pathspec); char *p; int has_symlinks = 0; while (strlen (tmp_pathspec) && !has_symlinks) { LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n", tmp_pathspec)); if (lstat (tmp_pathspec, &s) == 0) { if (S_ISLNK (s.st_mode) != 0) { has_symlinks = 1; break; } /* search backwards for last DIR_SEPARATOR */ p = tmp_pathspec + strlen (tmp_pathspec) - 1; while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) p--; if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) { /* no more DIR_SEPARATORS left */ break; } *p = '\0'; } else { char *errstr = strerror (errno); lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr); } } XFREE (tmp_pathspec); if (!has_symlinks) { return xstrdup (pathspec); } tmp_pathspec = realpath (pathspec, buf); if (tmp_pathspec == 0) { lt_fatal ("Could not follow symlinks for %s", pathspec); } return xstrdup (tmp_pathspec); #endif } char * strendzap (char *str, const char *pat) { size_t len, patlen; assert (str != NULL); assert (pat != NULL); len = strlen (str); patlen = strlen (pat); if (patlen <= len) { str += len - patlen; if (strcmp (str, pat) == 0) *str = '\0'; } return str; } static void lt_error_core (int exit_status, const char *mode, const char *message, va_list ap) { fprintf (stderr, "%s: %s: ", program_name, mode); vfprintf (stderr, message, ap); fprintf (stderr, ".\n"); if (exit_status >= 0) exit (exit_status); } void lt_fatal (const char *message, ...) { va_list ap; va_start (ap, message); lt_error_core (EXIT_FAILURE, "FATAL", message, ap); va_end (ap); } void lt_setenv (const char *name, const char *value) { LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n", (name ? name : ""), (value ? value : ""))); { #ifdef HAVE_SETENV /* always make a copy, for consistency with !HAVE_SETENV */ char *str = xstrdup (value); setenv (name, str, 1); #else int len = strlen (name) + 1 + strlen (value) + 1; char *str = XMALLOC (char, len); sprintf (str, "%s=%s", name, value); if (putenv (str) != EXIT_SUCCESS) { XFREE (str); } #endif } } char * lt_extend_str (const char *orig_value, const char *add, int to_end) { char *new_value; if (orig_value && *orig_value) { int orig_value_len = strlen (orig_value); int add_len = strlen (add); new_value = XMALLOC (char, add_len + orig_value_len + 1); if (to_end) { strcpy (new_value, orig_value); strcpy (new_value + orig_value_len, add); } else { strcpy (new_value, add); strcpy (new_value + add_len, orig_value); } } else { new_value = xstrdup (add); } return new_value; } int lt_split_name_value (const char *arg, char** name, char** value) { const char *p; int len; if (!arg || !*arg) return 1; p = strchr (arg, (int)'='); if (!p) return 1; *value = xstrdup (++p); len = strlen (arg) - strlen (*value); *name = XMALLOC (char, len); strncpy (*name, arg, len-1); (*name)[len - 1] = '\0'; return 0; } void lt_opt_process_env_set (const char *arg) { char *name = NULL; char *value = NULL; if (lt_split_name_value (arg, &name, &value) != 0) { XFREE (name); XFREE (value); lt_fatal ("bad argument for %s: '%s'", env_set_opt, arg); } lt_setenv (name, value); XFREE (name); XFREE (value); } void lt_opt_process_env_prepend (const char *arg) { char *name = NULL; char *value = NULL; char *new_value = NULL; if (lt_split_name_value (arg, &name, &value) != 0) { XFREE (name); XFREE (value); lt_fatal ("bad argument for %s: '%s'", env_prepend_opt, arg); } new_value = lt_extend_str (getenv (name), value, 0); lt_setenv (name, new_value); XFREE (new_value); XFREE (name); XFREE (value); } void lt_opt_process_env_append (const char *arg) { char *name = NULL; char *value = NULL; char *new_value = NULL; if (lt_split_name_value (arg, &name, &value) != 0) { XFREE (name); XFREE (value); lt_fatal ("bad argument for %s: '%s'", env_append_opt, arg); } new_value = lt_extend_str (getenv (name), value, 1); lt_setenv (name, new_value); XFREE (new_value); XFREE (name); XFREE (value); } void lt_update_exe_path (const char *name, const char *value) { LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n", (name ? name : ""), (value ? value : ""))); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); /* some systems can't cope with a ':'-terminated path #' */ int len = strlen (new_value); while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) { new_value[len-1] = '\0'; } lt_setenv (name, new_value); XFREE (new_value); } } void lt_update_lib_path (const char *name, const char *value) { LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n", (name ? name : ""), (value ? value : ""))); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); lt_setenv (name, new_value); XFREE (new_value); } } EOF } # end: func_emit_cwrapperexe_src # func_mode_link arg... func_mode_link () { $opt_debug case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) # It is impossible to link a dll without this setting, and # we shouldn't force the makefile maintainer to figure out # which system we are compiling for in order to pass an extra # flag for every libtool invocation. # allow_undefined=no # FIXME: Unfortunately, there are problems with the above when trying # to make a dll which has undefined symbols, in which case not # even a static library is built. For now, we need to specify # -no-undefined on the libtool link line when we can be certain # that all symbols are satisfied, otherwise we get a static library. allow_undefined=yes ;; *) allow_undefined=yes ;; esac libtool_args=$nonopt base_compile="$nonopt $@" compile_command=$nonopt finalize_command=$nonopt compile_rpath= finalize_rpath= compile_shlibpath= finalize_shlibpath= convenience= old_convenience= deplibs= old_deplibs= compiler_flags= linker_flags= dllsearchpath= lib_search_path=`pwd` inst_prefix_dir= new_inherited_linker_flags= avoid_version=no dlfiles= dlprefiles= dlself=no export_dynamic=no export_symbols= export_symbols_regex= generated= libobjs= ltlibs= module=no no_install=no objs= non_pic_objects= precious_files_regex= prefer_static_libs=no preload=no prev= prevarg= release= rpath= xrpath= perm_rpath= temp_rpath= thread_safe=no vinfo= vinfo_number=no weak_libs= single_module="${wl}-single_module" func_infer_tag $base_compile # We need to know -static, to get the right output filenames. for arg do case $arg in -shared) test "$build_libtool_libs" != yes && \ func_fatal_configuration "can not build a shared library" build_old_libs=no break ;; -all-static | -static | -static-libtool-libs) case $arg in -all-static) if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then func_warning "complete static linking is impossible in this configuration" fi if test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; -static) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=built ;; -static-libtool-libs) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; esac build_libtool_libs=no build_old_libs=yes break ;; esac done # See if our shared archives depend on static archives. test -n "$old_archive_from_new_cmds" && build_old_libs=yes # Go through the arguments, transforming them on the way. while test "$#" -gt 0; do arg="$1" shift func_quote_for_eval "$arg" qarg=$func_quote_for_eval_unquoted_result func_append libtool_args " $func_quote_for_eval_result" # If the previous option needs an argument, assign it. if test -n "$prev"; then case $prev in output) func_append compile_command " @OUTPUT@" func_append finalize_command " @OUTPUT@" ;; esac case $prev in dlfiles|dlprefiles) if test "$preload" = no; then # Add the symbol object into the linking commands. func_append compile_command " @SYMFILE@" func_append finalize_command " @SYMFILE@" preload=yes fi case $arg in *.la | *.lo) ;; # We handle these cases below. force) if test "$dlself" = no; then dlself=needless export_dynamic=yes fi prev= continue ;; self) if test "$prev" = dlprefiles; then dlself=yes elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then dlself=yes else dlself=needless export_dynamic=yes fi prev= continue ;; *) if test "$prev" = dlfiles; then dlfiles="$dlfiles $arg" else dlprefiles="$dlprefiles $arg" fi prev= continue ;; esac ;; expsyms) export_symbols="$arg" test -f "$arg" \ || func_fatal_error "symbol file \`$arg' does not exist" prev= continue ;; expsyms_regex) export_symbols_regex="$arg" prev= continue ;; framework) case $host in *-*-darwin*) case "$deplibs " in *" $qarg.ltframework "*) ;; *) deplibs="$deplibs $qarg.ltframework" # this is fixed later ;; esac ;; esac prev= continue ;; inst_prefix) inst_prefix_dir="$arg" prev= continue ;; objectlist) if test -f "$arg"; then save_arg=$arg moreargs= for fil in `cat "$save_arg"` do # moreargs="$moreargs $fil" arg=$fil # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test "$pic_object" = none && test "$non_pic_object" = none; then func_fatal_error "cannot find name of object for \`$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" if test "$pic_object" != none; then # Prepend the subdirectory the object is found in. pic_object="$xdir$pic_object" if test "$prev" = dlfiles; then if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then dlfiles="$dlfiles $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test "$prev" = dlprefiles; then # Preload the old-style object. dlprefiles="$dlprefiles $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg="$pic_object" fi # Non-PIC object. if test "$non_pic_object" != none; then # Prepend the subdirectory the object is found in. non_pic_object="$xdir$non_pic_object" # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test "$pic_object" = none ; then arg="$non_pic_object" fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object="$pic_object" func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "\`$arg' is not a valid libtool object" fi fi done else func_fatal_error "link input file \`$arg' does not exist" fi arg=$save_arg prev= continue ;; precious_regex) precious_files_regex="$arg" prev= continue ;; release) release="-$arg" prev= continue ;; rpath | xrpath) # We need an absolute path. case $arg in [\\/]* | [A-Za-z]:[\\/]*) ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac if test "$prev" = rpath; then case "$rpath " in *" $arg "*) ;; *) rpath="$rpath $arg" ;; esac else case "$xrpath " in *" $arg "*) ;; *) xrpath="$xrpath $arg" ;; esac fi prev= continue ;; shrext) shrext_cmds="$arg" prev= continue ;; weak) weak_libs="$weak_libs $arg" prev= continue ;; xcclinker) linker_flags="$linker_flags $qarg" compiler_flags="$compiler_flags $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xcompiler) compiler_flags="$compiler_flags $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xlinker) linker_flags="$linker_flags $qarg" compiler_flags="$compiler_flags $wl$qarg" prev= func_append compile_command " $wl$qarg" func_append finalize_command " $wl$qarg" continue ;; *) eval "$prev=\"\$arg\"" prev= continue ;; esac fi # test -n "$prev" prevarg="$arg" case $arg in -all-static) if test -n "$link_static_flag"; then # See comment for -static flag below, for more details. func_append compile_command " $link_static_flag" func_append finalize_command " $link_static_flag" fi continue ;; -allow-undefined) # FIXME: remove this flag sometime in the future. func_fatal_error "\`-allow-undefined' must not be used because it is the default" ;; -avoid-version) avoid_version=yes continue ;; -dlopen) prev=dlfiles continue ;; -dlpreopen) prev=dlprefiles continue ;; -export-dynamic) export_dynamic=yes continue ;; -export-symbols | -export-symbols-regex) if test -n "$export_symbols" || test -n "$export_symbols_regex"; then func_fatal_error "more than one -exported-symbols argument is not allowed" fi if test "X$arg" = "X-export-symbols"; then prev=expsyms else prev=expsyms_regex fi continue ;; -framework) prev=framework continue ;; -inst-prefix-dir) prev=inst_prefix continue ;; # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* # so, if we see these flags be careful not to treat them like -L -L[A-Z][A-Z]*:*) case $with_gcc/$host in no/*-*-irix* | /*-*-irix*) func_append compile_command " $arg" func_append finalize_command " $arg" ;; esac continue ;; -L*) func_stripname '-L' '' "$arg" dir=$func_stripname_result if test -z "$dir"; then if test "$#" -gt 0; then func_fatal_error "require no space between \`-L' and \`$1'" else func_fatal_error "need path for \`-L' option" fi fi # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) absdir=`cd "$dir" && pwd` test -z "$absdir" && \ func_fatal_error "cannot determine absolute directory name of \`$dir'" dir="$absdir" ;; esac case "$deplibs " in *" -L$dir "*) ;; *) deplibs="$deplibs -L$dir" lib_search_path="$lib_search_path $dir" ;; esac case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`$ECHO "X$dir" | $Xsed -e 's*/lib$*/bin*'` case :$dllsearchpath: in *":$dir:"*) ;; ::) dllsearchpath=$dir;; *) dllsearchpath="$dllsearchpath:$dir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) dllsearchpath="$dllsearchpath:$testbindir";; esac ;; esac continue ;; -l*) if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc*) # These systems don't actually have a C or math library (as such) continue ;; *-*-os2*) # These systems don't actually have a C library (as such) test "X$arg" = "X-lc" && continue ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. test "X$arg" = "X-lc" && continue ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C and math libraries are in the System framework deplibs="$deplibs System.ltframework" continue ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype test "X$arg" = "X-lc" && continue ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work test "X$arg" = "X-lc" && continue ;; esac elif test "X$arg" = "X-lc_r"; then case $host in *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc_r directly, use -pthread flag. continue ;; esac fi deplibs="$deplibs $arg" continue ;; -module) module=yes continue ;; # Tru64 UNIX uses -model [arg] to determine the layout of C++ # classes, name mangling, and exception handling. # Darwin uses the -arch flag to determine output architecture. -model|-arch|-isysroot) compiler_flags="$compiler_flags $arg" func_append compile_command " $arg" func_append finalize_command " $arg" prev=xcompiler continue ;; -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) compiler_flags="$compiler_flags $arg" func_append compile_command " $arg" func_append finalize_command " $arg" case "$new_inherited_linker_flags " in *" $arg "*) ;; * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;; esac continue ;; -multi_module) single_module="${wl}-multi_module" continue ;; -no-fast-install) fast_install=no continue ;; -no-install) case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) # The PATH hackery in wrapper scripts is required on Windows # and Darwin in order for the loader to find any dlls it needs. func_warning "\`-no-install' is ignored for $host" func_warning "assuming \`-no-fast-install' instead" fast_install=no ;; *) no_install=yes ;; esac continue ;; -no-undefined) allow_undefined=no continue ;; -objectlist) prev=objectlist continue ;; -o) prev=output ;; -precious-files-regex) prev=precious_regex continue ;; -release) prev=release continue ;; -rpath) prev=rpath continue ;; -R) prev=xrpath continue ;; -R*) func_stripname '-R' '' "$arg" dir=$func_stripname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac case "$xrpath " in *" $dir "*) ;; *) xrpath="$xrpath $dir" ;; esac continue ;; -shared) # The effects of -shared are defined in a previous loop. continue ;; -shrext) prev=shrext continue ;; -static | -static-libtool-libs) # The effects of -static are defined in a previous loop. # We used to do the same as -all-static on platforms that # didn't have a PIC flag, but the assumption that the effects # would be equivalent was wrong. It would break on at least # Digital Unix and AIX. continue ;; -thread-safe) thread_safe=yes continue ;; -version-info) prev=vinfo continue ;; -version-number) prev=vinfo vinfo_number=yes continue ;; -weak) prev=weak continue ;; -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result arg= save_ifs="$IFS"; IFS=',' for flag in $args; do IFS="$save_ifs" func_quote_for_eval "$flag" arg="$arg $wl$func_quote_for_eval_result" compiler_flags="$compiler_flags $func_quote_for_eval_result" done IFS="$save_ifs" func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Wl,*) func_stripname '-Wl,' '' "$arg" args=$func_stripname_result arg= save_ifs="$IFS"; IFS=',' for flag in $args; do IFS="$save_ifs" func_quote_for_eval "$flag" arg="$arg $wl$func_quote_for_eval_result" compiler_flags="$compiler_flags $wl$func_quote_for_eval_result" linker_flags="$linker_flags $func_quote_for_eval_result" done IFS="$save_ifs" func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Xcompiler) prev=xcompiler continue ;; -Xlinker) prev=xlinker continue ;; -XCClinker) prev=xcclinker continue ;; # -msg_* for osf cc -msg_*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; # -64, -mips[0-9] enable 64-bit mode on the SGI compiler # -r[0-9][0-9]* specifies the processor on the SGI compiler # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler # +DA*, +DD* enable 64-bit mode on the HP compiler # -q* pass through compiler args for the IBM compiler # -m*, -t[45]*, -txscale* pass through architecture-specific # compiler args for GCC # -F/path gives path to uninstalled frameworks, gcc on darwin # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC # @file GCC response files -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" func_append compile_command " $arg" func_append finalize_command " $arg" compiler_flags="$compiler_flags $arg" continue ;; # Some other compiler flag. -* | +*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; *.$objext) # A standard object. objs="$objs $arg" ;; *.lo) # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test "$pic_object" = none && test "$non_pic_object" = none; then func_fatal_error "cannot find name of object for \`$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" if test "$pic_object" != none; then # Prepend the subdirectory the object is found in. pic_object="$xdir$pic_object" if test "$prev" = dlfiles; then if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then dlfiles="$dlfiles $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test "$prev" = dlprefiles; then # Preload the old-style object. dlprefiles="$dlprefiles $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg="$pic_object" fi # Non-PIC object. if test "$non_pic_object" != none; then # Prepend the subdirectory the object is found in. non_pic_object="$xdir$non_pic_object" # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test "$pic_object" = none ; then arg="$non_pic_object" fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object="$pic_object" func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "\`$arg' is not a valid libtool object" fi fi ;; *.$libext) # An archive. deplibs="$deplibs $arg" old_deplibs="$old_deplibs $arg" continue ;; *.la) # A libtool-controlled library. if test "$prev" = dlfiles; then # This library was specified with -dlopen. dlfiles="$dlfiles $arg" prev= elif test "$prev" = dlprefiles; then # The library was specified with -dlpreopen. dlprefiles="$dlprefiles $arg" prev= else deplibs="$deplibs $arg" fi continue ;; # Some other compiler argument. *) # Unknown arguments in both finalize_command and compile_command need # to be aesthetically quoted because they are evaled later. func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; esac # arg # Now actually substitute the argument into the commands. if test -n "$arg"; then func_append compile_command " $arg" func_append finalize_command " $arg" fi done # argument parsing loop test -n "$prev" && \ func_fatal_help "the \`$prevarg' option requires an argument" if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then eval arg=\"$export_dynamic_flag_spec\" func_append compile_command " $arg" func_append finalize_command " $arg" fi oldlibs= # calculate the name of the file, without its directory func_basename "$output" outputname="$func_basename_result" libobjs_save="$libobjs" if test -n "$shlibpath_var"; then # get the directories listed in $shlibpath_var eval shlib_search_path=\`\$ECHO \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` else shlib_search_path= fi eval sys_lib_search_path=\"$sys_lib_search_path_spec\" eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" func_dirname "$output" "/" "" output_objdir="$func_dirname_result$objdir" # Create the object directory. func_mkdir_p "$output_objdir" # Determine the type of output case $output in "") func_fatal_help "you must specify an output file" ;; *.$libext) linkmode=oldlib ;; *.lo | *.$objext) linkmode=obj ;; *.la) linkmode=lib ;; *) linkmode=prog ;; # Anything else should be a program. esac specialdeplibs= libs= # Find all interdependent deplibs by searching for libraries # that are linked more than once (e.g. -la -lb -la) for deplib in $deplibs; do if $opt_duplicate_deps ; then case "$libs " in *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; esac fi libs="$libs $deplib" done if test "$linkmode" = lib; then libs="$predeps $libs $compiler_lib_search_path $postdeps" # Compute libraries that are listed more than once in $predeps # $postdeps and mark them as special (i.e., whose duplicates are # not to be eliminated). pre_post_deps= if $opt_duplicate_compiler_generated_deps; then for pre_post_dep in $predeps $postdeps; do case "$pre_post_deps " in *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; esac pre_post_deps="$pre_post_deps $pre_post_dep" done fi pre_post_deps= fi deplibs= newdependency_libs= newlib_search_path= need_relink=no # whether we're linking any uninstalled libtool libraries notinst_deplibs= # not-installed libtool libraries notinst_path= # paths that contain not-installed libtool libraries case $linkmode in lib) passes="conv dlpreopen link" for file in $dlfiles $dlprefiles; do case $file in *.la) ;; *) func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" ;; esac done ;; prog) compile_deplibs= finalize_deplibs= alldeplibs=no newdlfiles= newdlprefiles= passes="conv scan dlopen dlpreopen link" ;; *) passes="conv" ;; esac for pass in $passes; do # The preopen pass in lib mode reverses $deplibs; put it back here # so that -L comes before libs that need it for instance... if test "$linkmode,$pass" = "lib,link"; then ## FIXME: Find the place where the list is rebuilt in the wrong ## order, and fix it there properly tmp_deplibs= for deplib in $deplibs; do tmp_deplibs="$deplib $tmp_deplibs" done deplibs="$tmp_deplibs" fi if test "$linkmode,$pass" = "lib,link" || test "$linkmode,$pass" = "prog,scan"; then libs="$deplibs" deplibs= fi if test "$linkmode" = prog; then case $pass in dlopen) libs="$dlfiles" ;; dlpreopen) libs="$dlprefiles" ;; link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; esac fi if test "$linkmode,$pass" = "lib,dlpreopen"; then # Collect and forward deplibs of preopened libtool libs for lib in $dlprefiles; do # Ignore non-libtool-libs dependency_libs= case $lib in *.la) func_source "$lib" ;; esac # Collect preopened libtool deplibs, except any this library # has declared as weak libs for deplib in $dependency_libs; do deplib_base=`$ECHO "X$deplib" | $Xsed -e "$basename"` case " $weak_libs " in *" $deplib_base "*) ;; *) deplibs="$deplibs $deplib" ;; esac done done libs="$dlprefiles" fi if test "$pass" = dlopen; then # Collect dlpreopened libraries save_deplibs="$deplibs" deplibs= fi for deplib in $libs; do lib= found=no case $deplib in -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else compiler_flags="$compiler_flags $deplib" if test "$linkmode" = lib ; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; esac fi fi continue ;; -l*) if test "$linkmode" != lib && test "$linkmode" != prog; then func_warning "\`-l' is ignored for archives/objects" continue fi func_stripname '-l' '' "$deplib" name=$func_stripname_result if test "$linkmode" = lib; then searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" else searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" fi for searchdir in $searchdirs; do for search_ext in .la $std_shrext .so .a; do # Search the libtool library lib="$searchdir/lib${name}${search_ext}" if test -f "$lib"; then if test "$search_ext" = ".la"; then found=yes else found=no fi break 2 fi done done if test "$found" != yes; then # deplib doesn't seem to be a libtool library if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" fi continue else # deplib is a libtool library # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, # We need to do some special things here, and not later. if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then case " $predeps $postdeps " in *" $deplib "*) if func_lalib_p "$lib"; then library_names= old_library= func_source "$lib" for l in $old_library $library_names; do ll="$l" done if test "X$ll" = "X$old_library" ; then # only static version available found=no func_dirname "$lib" "" "." ladir="$func_dirname_result" lib=$ladir/$old_library if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" fi continue fi fi ;; *) ;; esac fi fi ;; # -l *.ltframework) if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" if test "$linkmode" = lib ; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; esac fi fi continue ;; -L*) case $linkmode in lib) deplibs="$deplib $deplibs" test "$pass" = conv && continue newdependency_libs="$deplib $newdependency_libs" func_stripname '-L' '' "$deplib" newlib_search_path="$newlib_search_path $func_stripname_result" ;; prog) if test "$pass" = conv; then deplibs="$deplib $deplibs" continue fi if test "$pass" = scan; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi func_stripname '-L' '' "$deplib" newlib_search_path="$newlib_search_path $func_stripname_result" ;; *) func_warning "\`-L' is ignored for archives/objects" ;; esac # linkmode continue ;; # -L -R*) if test "$pass" = link; then func_stripname '-R' '' "$deplib" dir=$func_stripname_result # Make sure the xrpath contains only unique directories. case "$xrpath " in *" $dir "*) ;; *) xrpath="$xrpath $dir" ;; esac fi deplibs="$deplib $deplibs" continue ;; *.la) lib="$deplib" ;; *.$libext) if test "$pass" = conv; then deplibs="$deplib $deplibs" continue fi case $linkmode in lib) # Linking convenience modules into shared libraries is allowed, # but linking other static libraries is non-portable. case " $dlpreconveniencelibs " in *" $deplib "*) ;; *) valid_a_lib=no case $deplibs_check_method in match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` if eval "\$ECHO \"X$deplib\"" 2>/dev/null | $Xsed -e 10q \ | $EGREP "$match_pattern_regex" > /dev/null; then valid_a_lib=yes fi ;; pass_all) valid_a_lib=yes ;; esac if test "$valid_a_lib" != yes; then $ECHO $ECHO "*** Warning: Trying to link with static lib archive $deplib." $ECHO "*** I have the capability to make that library automatically link in when" $ECHO "*** you link to this library. But I can only do this if you have a" $ECHO "*** shared version of the library, which you do not appear to have" $ECHO "*** because the file extensions .$libext of this argument makes me believe" $ECHO "*** that it is just a static archive that I should not use here." else $ECHO $ECHO "*** Warning: Linking the shared library $output against the" $ECHO "*** static library $deplib is not portable!" deplibs="$deplib $deplibs" fi ;; esac continue ;; prog) if test "$pass" != link; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi continue ;; esac # linkmode ;; # *.$libext *.lo | *.$objext) if test "$pass" = conv; then deplibs="$deplib $deplibs" elif test "$linkmode" = prog; then if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then # If there is no dlopen support or we're linking statically, # we need to preload. newdlprefiles="$newdlprefiles $deplib" compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else newdlfiles="$newdlfiles $deplib" fi fi continue ;; %DEPLIBS%) alldeplibs=yes continue ;; esac # case $deplib if test "$found" = yes || test -f "$lib"; then : else func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" fi # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$lib" \ || func_fatal_error "\`$lib' is not a valid libtool archive" func_dirname "$lib" "" "." ladir="$func_dirname_result" dlname= dlopen= dlpreopen= libdir= library_names= old_library= inherited_linker_flags= # If the library was installed with an old release of libtool, # it will not redefine variables installed, or shouldnotlink installed=yes shouldnotlink=no avoidtemprpath= # Read the .la file func_source "$lib" # Convert "-framework foo" to "foo.ltframework" if test -n "$inherited_linker_flags"; then tmp_inherited_linker_flags=`$ECHO "X$inherited_linker_flags" | $Xsed -e 's/-framework \([^ $]*\)/\1.ltframework/g'` for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do case " $new_inherited_linker_flags " in *" $tmp_inherited_linker_flag "*) ;; *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";; esac done fi dependency_libs=`$ECHO "X $dependency_libs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` if test "$linkmode,$pass" = "lib,link" || test "$linkmode,$pass" = "prog,scan" || { test "$linkmode" != prog && test "$linkmode" != lib; }; then test -n "$dlopen" && dlfiles="$dlfiles $dlopen" test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" fi if test "$pass" = conv; then # Only check for convenience libraries deplibs="$lib $deplibs" if test -z "$libdir"; then if test -z "$old_library"; then func_fatal_error "cannot find name of link library for \`$lib'" fi # It is a libtool convenience library, so add in its objects. convenience="$convenience $ladir/$objdir/$old_library" old_convenience="$old_convenience $ladir/$objdir/$old_library" elif test "$linkmode" != prog && test "$linkmode" != lib; then func_fatal_error "\`$lib' is not a convenience library" fi tmp_libs= for deplib in $dependency_libs; do deplibs="$deplib $deplibs" if $opt_duplicate_deps ; then case "$tmp_libs " in *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; esac fi tmp_libs="$tmp_libs $deplib" done continue fi # $pass = conv # Get the name of the library we link against. linklib= for l in $old_library $library_names; do linklib="$l" done if test -z "$linklib"; then func_fatal_error "cannot find name of link library for \`$lib'" fi # This library was specified with -dlopen. if test "$pass" = dlopen; then if test -z "$libdir"; then func_fatal_error "cannot -dlopen a convenience library: \`$lib'" fi if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then # If there is no dlname, no dlopen support or we're linking # statically, we need to preload. We also need to preload any # dependent libraries so libltdl's deplib preloader doesn't # bomb out in the load deplibs phase. dlprefiles="$dlprefiles $lib $dependency_libs" else newdlfiles="$newdlfiles $lib" fi continue fi # $pass = dlopen # We need an absolute path. case $ladir in [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; *) abs_ladir=`cd "$ladir" && pwd` if test -z "$abs_ladir"; then func_warning "cannot determine absolute directory name of \`$ladir'" func_warning "passing it literally to the linker, although it might fail" abs_ladir="$ladir" fi ;; esac func_basename "$lib" laname="$func_basename_result" # Find the relevant object directory and library name. if test "X$installed" = Xyes; then if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then func_warning "library \`$lib' was moved." dir="$ladir" absdir="$abs_ladir" libdir="$abs_ladir" else dir="$libdir" absdir="$libdir" fi test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes else if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then dir="$ladir" absdir="$abs_ladir" # Remove this search path later notinst_path="$notinst_path $abs_ladir" else dir="$ladir/$objdir" absdir="$abs_ladir/$objdir" # Remove this search path later notinst_path="$notinst_path $abs_ladir" fi fi # $installed = yes func_stripname 'lib' '.la' "$laname" name=$func_stripname_result # This library was specified with -dlpreopen. if test "$pass" = dlpreopen; then if test -z "$libdir" && test "$linkmode" = prog; then func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" fi # Prefer using a static library (so that no silly _DYNAMIC symbols # are required to link). if test -n "$old_library"; then newdlprefiles="$newdlprefiles $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library" # Otherwise, use the dlname, so that lt_dlopen finds it. elif test -n "$dlname"; then newdlprefiles="$newdlprefiles $dir/$dlname" else newdlprefiles="$newdlprefiles $dir/$linklib" fi fi # $pass = dlpreopen if test -z "$libdir"; then # Link the convenience library if test "$linkmode" = lib; then deplibs="$dir/$old_library $deplibs" elif test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$dir/$old_library $compile_deplibs" finalize_deplibs="$dir/$old_library $finalize_deplibs" else deplibs="$lib $deplibs" # used for prog,scan pass fi continue fi if test "$linkmode" = prog && test "$pass" != link; then newlib_search_path="$newlib_search_path $ladir" deplibs="$lib $deplibs" linkalldeplibs=no if test "$link_all_deplibs" != no || test -z "$library_names" || test "$build_libtool_libs" = no; then linkalldeplibs=yes fi tmp_libs= for deplib in $dependency_libs; do case $deplib in -L*) func_stripname '-L' '' "$deplib" newlib_search_path="$newlib_search_path $func_stripname_result" ;; esac # Need to link against all dependency_libs? if test "$linkalldeplibs" = yes; then deplibs="$deplib $deplibs" else # Need to hardcode shared library paths # or/and link against static libraries newdependency_libs="$deplib $newdependency_libs" fi if $opt_duplicate_deps ; then case "$tmp_libs " in *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; esac fi tmp_libs="$tmp_libs $deplib" done # for deplib continue fi # $linkmode = prog... if test "$linkmode,$pass" = "prog,link"; then if test -n "$library_names" && { { test "$prefer_static_libs" = no || test "$prefer_static_libs,$installed" = "built,yes"; } || test -z "$old_library"; }; then # We need to hardcode the library path if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then # Make sure the rpath contains only unique directories. case "$temp_rpath:" in *"$absdir:"*) ;; *) temp_rpath="$temp_rpath$absdir:" ;; esac fi # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) compile_rpath="$compile_rpath $absdir" esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) finalize_rpath="$finalize_rpath $libdir" esac ;; esac fi # $linkmode,$pass = prog,link... if test "$alldeplibs" = yes && { test "$deplibs_check_method" = pass_all || { test "$build_libtool_libs" = yes && test -n "$library_names"; }; }; then # We only need to search for static libraries continue fi fi link_static=no # Whether the deplib will be linked statically use_static_libs=$prefer_static_libs if test "$use_static_libs" = built && test "$installed" = yes; then use_static_libs=no fi if test -n "$library_names" && { test "$use_static_libs" = no || test -z "$old_library"; }; then case $host in *cygwin* | *mingw* | *cegcc*) # No point in relinking DLLs because paths are not encoded notinst_deplibs="$notinst_deplibs $lib" need_relink=no ;; *) if test "$installed" = no; then notinst_deplibs="$notinst_deplibs $lib" need_relink=yes fi ;; esac # This is a shared library # Warn about portability, can't link against -module's on some # systems (darwin). Don't bleat about dlopened modules though! dlopenmodule="" for dlpremoduletest in $dlprefiles; do if test "X$dlpremoduletest" = "X$lib"; then dlopenmodule="$dlpremoduletest" break fi done if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then $ECHO if test "$linkmode" = prog; then $ECHO "*** Warning: Linking the executable $output against the loadable module" else $ECHO "*** Warning: Linking the shared library $output against the loadable module" fi $ECHO "*** $linklib is not portable!" fi if test "$linkmode" = lib && test "$hardcode_into_libs" = yes; then # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) compile_rpath="$compile_rpath $absdir" esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) finalize_rpath="$finalize_rpath $libdir" esac ;; esac fi if test -n "$old_archive_from_expsyms_cmds"; then # figure out the soname set dummy $library_names shift realname="$1" shift libname=`eval "\\$ECHO \"$libname_spec\""` # use dlname if we got it. it's perfectly good, no? if test -n "$dlname"; then soname="$dlname" elif test -n "$soname_spec"; then # bleh windows case $host in *cygwin* | mingw* | *cegcc*) func_arith $current - $age major=$func_arith_result versuffix="-$major" ;; esac eval soname=\"$soname_spec\" else soname="$realname" fi # Make a new name for the extract_expsyms_cmds to use soroot="$soname" func_basename "$soroot" soname="$func_basename_result" func_stripname 'lib' '.dll' "$soname" newlib=libimp-$func_stripname_result.a # If the library has no export list, then create one now if test -f "$output_objdir/$soname-def"; then : else func_verbose "extracting exported symbol list from \`$soname'" func_execute_cmds "$extract_expsyms_cmds" 'exit $?' fi # Create $newlib if test -f "$output_objdir/$newlib"; then :; else func_verbose "generating import library for \`$soname'" func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' fi # make sure the library variables are pointing to the new library dir=$output_objdir linklib=$newlib fi # test -n "$old_archive_from_expsyms_cmds" if test "$linkmode" = prog || test "$mode" != relink; then add_shlibpath= add_dir= add= lib_linked=yes case $hardcode_action in immediate | unsupported) if test "$hardcode_direct" = no; then add="$dir/$linklib" case $host in *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; *-*-sysv4*uw2*) add_dir="-L$dir" ;; *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ *-*-unixware7*) add_dir="-L$dir" ;; *-*-darwin* ) # if the lib is a (non-dlopened) module then we can not # link against it, someone is ignoring the earlier warnings if /usr/bin/file -L $add 2> /dev/null | $GREP ": [^:]* bundle" >/dev/null ; then if test "X$dlopenmodule" != "X$lib"; then $ECHO "*** Warning: lib $linklib is a module, not a shared library" if test -z "$old_library" ; then $ECHO $ECHO "*** And there doesn't seem to be a static archive available" $ECHO "*** The link will probably fail, sorry" else add="$dir/$old_library" fi elif test -n "$old_library"; then add="$dir/$old_library" fi fi esac elif test "$hardcode_minus_L" = no; then case $host in *-*-sunos*) add_shlibpath="$dir" ;; esac add_dir="-L$dir" add="-l$name" elif test "$hardcode_shlibpath_var" = no; then add_shlibpath="$dir" add="-l$name" else lib_linked=no fi ;; relink) if test "$hardcode_direct" = yes && test "$hardcode_direct_absolute" = no; then add="$dir/$linklib" elif test "$hardcode_minus_L" = yes; then add_dir="-L$dir" # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) add_dir="$add_dir -L$inst_prefix_dir$libdir" ;; esac fi add="-l$name" elif test "$hardcode_shlibpath_var" = yes; then add_shlibpath="$dir" add="-l$name" else lib_linked=no fi ;; *) lib_linked=no ;; esac if test "$lib_linked" != yes; then func_fatal_configuration "unsupported hardcode properties" fi if test -n "$add_shlibpath"; then case :$compile_shlibpath: in *":$add_shlibpath:"*) ;; *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; esac fi if test "$linkmode" = prog; then test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" test -n "$add" && compile_deplibs="$add $compile_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" if test "$hardcode_direct" != yes && test "$hardcode_minus_L" != yes && test "$hardcode_shlibpath_var" = yes; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; esac fi fi fi if test "$linkmode" = prog || test "$mode" = relink; then add_shlibpath= add_dir= add= # Finalize command for both is simple: just hardcode it. if test "$hardcode_direct" = yes && test "$hardcode_direct_absolute" = no; then add="$libdir/$linklib" elif test "$hardcode_minus_L" = yes; then add_dir="-L$libdir" add="-l$name" elif test "$hardcode_shlibpath_var" = yes; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; esac add="-l$name" elif test "$hardcode_automatic" = yes; then if test -n "$inst_prefix_dir" && test -f "$inst_prefix_dir$libdir/$linklib" ; then add="$inst_prefix_dir$libdir/$linklib" else add="$libdir/$linklib" fi else # We cannot seem to hardcode it, guess we'll fake it. add_dir="-L$libdir" # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) add_dir="$add_dir -L$inst_prefix_dir$libdir" ;; esac fi add="-l$name" fi if test "$linkmode" = prog; then test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" test -n "$add" && finalize_deplibs="$add $finalize_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" fi fi elif test "$linkmode" = prog; then # Here we assume that one of hardcode_direct or hardcode_minus_L # is not unsupported. This is valid on all known static and # shared platforms. if test "$hardcode_direct" != unsupported; then test -n "$old_library" && linklib="$old_library" compile_deplibs="$dir/$linklib $compile_deplibs" finalize_deplibs="$dir/$linklib $finalize_deplibs" else compile_deplibs="-l$name -L$dir $compile_deplibs" finalize_deplibs="-l$name -L$dir $finalize_deplibs" fi elif test "$build_libtool_libs" = yes; then # Not a shared library if test "$deplibs_check_method" != pass_all; then # We're trying link a shared library against a static one # but the system doesn't support it. # Just print a warning and add the library to dependency_libs so # that the program can be linked against the static library. $ECHO $ECHO "*** Warning: This system can not link to static lib archive $lib." $ECHO "*** I have the capability to make that library automatically link in when" $ECHO "*** you link to this library. But I can only do this if you have a" $ECHO "*** shared version of the library, which you do not appear to have." if test "$module" = yes; then $ECHO "*** But as you try to build a module library, libtool will still create " $ECHO "*** a static module, that should work as long as the dlopening application" $ECHO "*** is linked with the -dlopen flag to resolve symbols at runtime." if test -z "$global_symbol_pipe"; then $ECHO $ECHO "*** However, this would only work if libtool was able to extract symbol" $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could" $ECHO "*** not find such a program. So, this module is probably useless." $ECHO "*** \`nm' from GNU binutils and a full rebuild may help." fi if test "$build_old_libs" = no; then build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi else deplibs="$dir/$old_library $deplibs" link_static=yes fi fi # link shared/static library? if test "$linkmode" = lib; then if test -n "$dependency_libs" && { test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes || test "$link_static" = yes; }; then # Extract -R from dependency_libs temp_deplibs= for libdir in $dependency_libs; do case $libdir in -R*) func_stripname '-R' '' "$libdir" temp_xrpath=$func_stripname_result case " $xrpath " in *" $temp_xrpath "*) ;; *) xrpath="$xrpath $temp_xrpath";; esac;; *) temp_deplibs="$temp_deplibs $libdir";; esac done dependency_libs="$temp_deplibs" fi newlib_search_path="$newlib_search_path $absdir" # Link against this library test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" # ... and its dependency_libs tmp_libs= for deplib in $dependency_libs; do newdependency_libs="$deplib $newdependency_libs" if $opt_duplicate_deps ; then case "$tmp_libs " in *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; esac fi tmp_libs="$tmp_libs $deplib" done if test "$link_all_deplibs" != no; then # Add the search paths of all dependency libraries for deplib in $dependency_libs; do case $deplib in -L*) path="$deplib" ;; *.la) func_dirname "$deplib" "" "." dir="$func_dirname_result" # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; *) absdir=`cd "$dir" && pwd` if test -z "$absdir"; then func_warning "cannot determine absolute directory name of \`$dir'" absdir="$dir" fi ;; esac if $GREP "^installed=no" $deplib > /dev/null; then case $host in *-*-darwin*) depdepl= eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` if test -n "$deplibrary_names" ; then for tmp in $deplibrary_names ; do depdepl=$tmp done if test -f "$absdir/$objdir/$depdepl" ; then depdepl="$absdir/$objdir/$depdepl" darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` if test -z "$darwin_install_name"; then darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` fi compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}" path= fi fi ;; *) path="-L$absdir/$objdir" ;; esac else eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` test -z "$libdir" && \ func_fatal_error "\`$deplib' is not a valid libtool archive" test "$absdir" != "$libdir" && \ func_warning "\`$deplib' seems to be moved" path="-L$absdir" fi ;; esac case " $deplibs " in *" $path "*) ;; *) deplibs="$path $deplibs" ;; esac done fi # link_all_deplibs != no fi # linkmode = lib done # for deplib in $libs if test "$pass" = link; then if test "$linkmode" = "prog"; then compile_deplibs="$new_inherited_linker_flags $compile_deplibs" finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" else compiler_flags="$compiler_flags "`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` fi fi dependency_libs="$newdependency_libs" if test "$pass" = dlpreopen; then # Link the dlpreopened libraries before other libraries for deplib in $save_deplibs; do deplibs="$deplib $deplibs" done fi if test "$pass" != dlopen; then if test "$pass" != conv; then # Make sure lib_search_path contains only unique directories. lib_search_path= for dir in $newlib_search_path; do case "$lib_search_path " in *" $dir "*) ;; *) lib_search_path="$lib_search_path $dir" ;; esac done newlib_search_path= fi if test "$linkmode,$pass" != "prog,link"; then vars="deplibs" else vars="compile_deplibs finalize_deplibs" fi for var in $vars dependency_libs; do # Add libraries to $var in reverse order eval tmp_libs=\"\$$var\" new_libs= for deplib in $tmp_libs; do # FIXME: Pedantically, this is the right thing to do, so # that some nasty dependency loop isn't accidentally # broken: #new_libs="$deplib $new_libs" # Pragmatically, this seems to cause very few problems in # practice: case $deplib in -L*) new_libs="$deplib $new_libs" ;; -R*) ;; *) # And here is the reason: when a library appears more # than once as an explicit dependence of a library, or # is implicitly linked in more than once by the # compiler, it is considered special, and multiple # occurrences thereof are not removed. Compare this # with having the same library being listed as a # dependency of multiple other libraries: in this case, # we know (pedantically, we assume) the library does not # need to be listed more than once, so we keep only the # last copy. This is not always right, but it is rare # enough that we require users that really mean to play # such unportable linking tricks to link the library # using -Wl,-lname, so that libtool does not consider it # for duplicate removal. case " $specialdeplibs " in *" $deplib "*) new_libs="$deplib $new_libs" ;; *) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$deplib $new_libs" ;; esac ;; esac ;; esac done tmp_libs= for deplib in $new_libs; do case $deplib in -L*) case " $tmp_libs " in *" $deplib "*) ;; *) tmp_libs="$tmp_libs $deplib" ;; esac ;; *) tmp_libs="$tmp_libs $deplib" ;; esac done eval $var=\"$tmp_libs\" done # for var fi # Last step: remove runtime libs from dependency_libs # (they stay in deplibs) tmp_libs= for i in $dependency_libs ; do case " $predeps $postdeps $compiler_lib_search_path " in *" $i "*) i="" ;; esac if test -n "$i" ; then tmp_libs="$tmp_libs $i" fi done dependency_libs=$tmp_libs done # for pass if test "$linkmode" = prog; then dlfiles="$newdlfiles" fi if test "$linkmode" = prog || test "$linkmode" = lib; then dlprefiles="$newdlprefiles" fi case $linkmode in oldlib) if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then func_warning "\`-dlopen' is ignored for archives" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "\`-l' and \`-L' are ignored for archives" ;; esac test -n "$rpath" && \ func_warning "\`-rpath' is ignored for archives" test -n "$xrpath" && \ func_warning "\`-R' is ignored for archives" test -n "$vinfo" && \ func_warning "\`-version-info/-version-number' is ignored for archives" test -n "$release" && \ func_warning "\`-release' is ignored for archives" test -n "$export_symbols$export_symbols_regex" && \ func_warning "\`-export-symbols' is ignored for archives" # Now set the variables for building old libraries. build_libtool_libs=no oldlibs="$output" objs="$objs$old_deplibs" ;; lib) # Make sure we only generate libraries of the form `libNAME.la'. case $outputname in lib*) func_stripname 'lib' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" ;; *) test "$module" = no && \ func_fatal_help "libtool library \`$output' must begin with \`lib'" if test "$need_lib_prefix" != no; then # Add the "lib" prefix for modules if required func_stripname '' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" else func_stripname '' '.la' "$outputname" libname=$func_stripname_result fi ;; esac if test -n "$objs"; then if test "$deplibs_check_method" != pass_all; then func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" else $ECHO $ECHO "*** Warning: Linking the shared library $output against the non-libtool" $ECHO "*** objects $objs is not portable!" libobjs="$libobjs $objs" fi fi test "$dlself" != no && \ func_warning "\`-dlopen self' is ignored for libtool libraries" set dummy $rpath shift test "$#" -gt 1 && \ func_warning "ignoring multiple \`-rpath's for a libtool library" install_libdir="$1" oldlibs= if test -z "$rpath"; then if test "$build_libtool_libs" = yes; then # Building a libtool convenience library. # Some compilers have problems with a `.al' extension so # convenience libraries should have the same extension an # archive normally would. oldlibs="$output_objdir/$libname.$libext $oldlibs" build_libtool_libs=convenience build_old_libs=yes fi test -n "$vinfo" && \ func_warning "\`-version-info/-version-number' is ignored for convenience libraries" test -n "$release" && \ func_warning "\`-release' is ignored for convenience libraries" else # Parse the version information argument. save_ifs="$IFS"; IFS=':' set dummy $vinfo 0 0 0 shift IFS="$save_ifs" test -n "$7" && \ func_fatal_help "too many parameters to \`-version-info'" # convert absolute version numbers to libtool ages # this retains compatibility with .la files and attempts # to make the code below a bit more comprehensible case $vinfo_number in yes) number_major="$1" number_minor="$2" number_revision="$3" # # There are really only two kinds -- those that # use the current revision as the major version # and those that subtract age and use age as # a minor version. But, then there is irix # which has an extra 1 added just for fun # case $version_type in darwin|linux|osf|windows|none) func_arith $number_major + $number_minor current=$func_arith_result age="$number_minor" revision="$number_revision" ;; freebsd-aout|freebsd-elf|sunos) current="$number_major" revision="$number_minor" age="0" ;; irix|nonstopux) func_arith $number_major + $number_minor current=$func_arith_result age="$number_minor" revision="$number_minor" lt_irix_increment=no ;; esac ;; no) current="$1" revision="$2" age="$3" ;; esac # Check that each of the things are valid numbers. case $current in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "CURRENT \`$current' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac case $revision in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "REVISION \`$revision' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac case $age in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "AGE \`$age' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac if test "$age" -gt "$current"; then func_error "AGE \`$age' is greater than the current interface number \`$current'" func_fatal_error "\`$vinfo' is not valid version information" fi # Calculate the version variables. major= versuffix= verstring= case $version_type in none) ;; darwin) # Like Linux, but with the current version available in # verstring for coding it into the library header func_arith $current - $age major=.$func_arith_result versuffix="$major.$age.$revision" # Darwin ld doesn't like 0 for these options... func_arith $current + 1 minor_current=$func_arith_result xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" ;; freebsd-aout) major=".$current" versuffix=".$current.$revision"; ;; freebsd-elf) major=".$current" versuffix=".$current" ;; irix | nonstopux) if test "X$lt_irix_increment" = "Xno"; then func_arith $current - $age else func_arith $current - $age + 1 fi major=$func_arith_result case $version_type in nonstopux) verstring_prefix=nonstopux ;; *) verstring_prefix=sgi ;; esac verstring="$verstring_prefix$major.$revision" # Add in all the interfaces that we are compatible with. loop=$revision while test "$loop" -ne 0; do func_arith $revision - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring="$verstring_prefix$major.$iface:$verstring" done # Before this point, $major must not contain `.'. major=.$major versuffix="$major.$revision" ;; linux) func_arith $current - $age major=.$func_arith_result versuffix="$major.$age.$revision" ;; osf) func_arith $current - $age major=.$func_arith_result versuffix=".$current.$age.$revision" verstring="$current.$age.$revision" # Add in all the interfaces that we are compatible with. loop=$age while test "$loop" -ne 0; do func_arith $current - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring="$verstring:${iface}.0" done # Make executables depend on our current version. verstring="$verstring:${current}.0" ;; qnx) major=".$current" versuffix=".$current" ;; sunos) major=".$current" versuffix=".$current.$revision" ;; windows) # Use '-' rather than '.', since we only want one # extension on DOS 8.3 filesystems. func_arith $current - $age major=$func_arith_result versuffix="-$major" ;; *) func_fatal_configuration "unknown library version type \`$version_type'" ;; esac # Clear the version info if we defaulted, and they specified a release. if test -z "$vinfo" && test -n "$release"; then major= case $version_type in darwin) # we can't check for "0.0" in archive_cmds due to quoting # problems, so we reset it completely verstring= ;; *) verstring="0.0" ;; esac if test "$need_version" = no; then versuffix= else versuffix=".0.0" fi fi # Remove version info from name if versioning should be avoided if test "$avoid_version" = yes && test "$need_version" = no; then major= versuffix= verstring="" fi # Check to see if the archive will have undefined symbols. if test "$allow_undefined" = yes; then if test "$allow_undefined_flag" = unsupported; then func_warning "undefined symbols not allowed in $host shared libraries" build_libtool_libs=no build_old_libs=yes fi else # Don't allow undefined symbols. allow_undefined_flag="$no_undefined_flag" fi fi func_generate_dlsyms "$libname" "$libname" "yes" libobjs="$libobjs $symfileobj" test "X$libobjs" = "X " && libobjs= if test "$mode" != relink; then # Remove our outputs, but don't remove object files since they # may have been created when compiling PIC objects. removelist= tempremovelist=`$ECHO "$output_objdir/*"` for p in $tempremovelist; do case $p in *.$objext | *.gcno) ;; $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) if test "X$precious_files_regex" != "X"; then if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 then continue fi fi removelist="$removelist $p" ;; *) ;; esac done test -n "$removelist" && \ func_show_eval "${RM}r \$removelist" fi # Now set the variables for building old libraries. if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then oldlibs="$oldlibs $output_objdir/$libname.$libext" # Transform .lo files to .o files. oldobjs="$objs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` fi # Eliminate all temporary directories. #for path in $notinst_path; do # lib_search_path=`$ECHO "X$lib_search_path " | $Xsed -e "s% $path % %g"` # deplibs=`$ECHO "X$deplibs " | $Xsed -e "s% -L$path % %g"` # dependency_libs=`$ECHO "X$dependency_libs " | $Xsed -e "s% -L$path % %g"` #done if test -n "$xrpath"; then # If the user specified any rpath flags, then add them. temp_xrpath= for libdir in $xrpath; do temp_xrpath="$temp_xrpath -R$libdir" case "$finalize_rpath " in *" $libdir "*) ;; *) finalize_rpath="$finalize_rpath $libdir" ;; esac done if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then dependency_libs="$temp_xrpath $dependency_libs" fi fi # Make sure dlfiles contains only unique files that won't be dlpreopened old_dlfiles="$dlfiles" dlfiles= for lib in $old_dlfiles; do case " $dlprefiles $dlfiles " in *" $lib "*) ;; *) dlfiles="$dlfiles $lib" ;; esac done # Make sure dlprefiles contains only unique files old_dlprefiles="$dlprefiles" dlprefiles= for lib in $old_dlprefiles; do case "$dlprefiles " in *" $lib "*) ;; *) dlprefiles="$dlprefiles $lib" ;; esac done if test "$build_libtool_libs" = yes; then if test -n "$rpath"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc*) # these systems don't actually have a c library (as such)! ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C library is in the System framework deplibs="$deplibs System.ltframework" ;; *-*-netbsd*) # Don't link with libc until the a.out ld.so is fixed. ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work ;; *) # Add libc to deplibs on all other systems if necessary. if test "$build_libtool_need_lc" = "yes"; then deplibs="$deplibs -lc" fi ;; esac fi # Transform deplibs into only deplibs that can be linked in shared. name_save=$name libname_save=$libname release_save=$release versuffix_save=$versuffix major_save=$major # I'm not sure if I'm treating the release correctly. I think # release should show up in the -l (ie -lgmp5) so we don't want to # add it in twice. Is that correct? release="" versuffix="" major="" newdeplibs= droppeddeps=no case $deplibs_check_method in pass_all) # Don't check for shared/static. Everything works. # This might be a little naive. We might want to check # whether the library exists or not. But this is on # osf3 & osf4 and I'm not really sure... Just # implementing what was already the behavior. newdeplibs=$deplibs ;; test_compile) # This code stresses the "libraries are programs" paradigm to its # limits. Maybe even breaks it. We compile a program, linking it # against the deplibs as a proxy for the library. Then we can check # whether they linked in statically or dynamically with ldd. $opt_dry_run || $RM conftest.c cat > conftest.c </dev/null` for potent_lib in $potential_libs; do # Follow soft links. if ls -lLd "$potent_lib" 2>/dev/null | $GREP " -> " >/dev/null; then continue fi # The statement above tries to avoid entering an # endless loop below, in case of cyclic links. # We might still enter an endless loop, since a link # loop can be closed while we follow links, # but so what? potlib="$potent_lib" while test -h "$potlib" 2>/dev/null; do potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` case $potliblink in [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; *) potlib=`$ECHO "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; esac done if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | $SED -e 10q | $EGREP "$file_magic_regex" > /dev/null; then newdeplibs="$newdeplibs $a_deplib" a_deplib="" break 2 fi done done fi if test -n "$a_deplib" ; then droppeddeps=yes $ECHO $ECHO "*** Warning: linker path does not have real file for library $a_deplib." $ECHO "*** I have the capability to make that library automatically link in when" $ECHO "*** you link to this library. But I can only do this if you have a" $ECHO "*** shared version of the library, which you do not appear to have" $ECHO "*** because I did check the linker path looking for a file starting" if test -z "$potlib" ; then $ECHO "*** with $libname but no candidates were found. (...for file magic test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a file magic. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. newdeplibs="$newdeplibs $a_deplib" ;; esac done # Gone through all deplibs. ;; match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` for a_deplib in $deplibs; do case $a_deplib in -l*) func_stripname -l '' "$a_deplib" name=$func_stripname_result if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then case " $predeps $postdeps " in *" $a_deplib "*) newdeplibs="$newdeplibs $a_deplib" a_deplib="" ;; esac fi if test -n "$a_deplib" ; then libname=`eval "\\$ECHO \"$libname_spec\""` for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do potential_libs=`ls $i/$libname[.-]* 2>/dev/null` for potent_lib in $potential_libs; do potlib="$potent_lib" # see symlink-check above in file_magic test if eval "\$ECHO \"X$potent_lib\"" 2>/dev/null | $Xsed -e 10q | \ $EGREP "$match_pattern_regex" > /dev/null; then newdeplibs="$newdeplibs $a_deplib" a_deplib="" break 2 fi done done fi if test -n "$a_deplib" ; then droppeddeps=yes $ECHO $ECHO "*** Warning: linker path does not have real file for library $a_deplib." $ECHO "*** I have the capability to make that library automatically link in when" $ECHO "*** you link to this library. But I can only do this if you have a" $ECHO "*** shared version of the library, which you do not appear to have" $ECHO "*** because I did check the linker path looking for a file starting" if test -z "$potlib" ; then $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a regex pattern. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. newdeplibs="$newdeplibs $a_deplib" ;; esac done # Gone through all deplibs. ;; none | unknown | *) newdeplibs="" tmp_deplibs=`$ECHO "X $deplibs" | $Xsed \ -e 's/ -lc$//' -e 's/ -[LR][^ ]*//g'` if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then for i in $predeps $postdeps ; do # can't use Xsed below, because $i might contain '/' tmp_deplibs=`$ECHO "X $tmp_deplibs" | $Xsed -e "s,$i,,"` done fi if $ECHO "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' | $GREP . >/dev/null; then $ECHO if test "X$deplibs_check_method" = "Xnone"; then $ECHO "*** Warning: inter-library dependencies are not supported in this platform." else $ECHO "*** Warning: inter-library dependencies are not known to be supported." fi $ECHO "*** All declared inter-library dependencies are being dropped." droppeddeps=yes fi ;; esac versuffix=$versuffix_save major=$major_save release=$release_save libname=$libname_save name=$name_save case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library with the System framework newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's/ -lc / System.ltframework /'` ;; esac if test "$droppeddeps" = yes; then if test "$module" = yes; then $ECHO $ECHO "*** Warning: libtool could not satisfy all declared inter-library" $ECHO "*** dependencies of module $libname. Therefore, libtool will create" $ECHO "*** a static module, that should work as long as the dlopening" $ECHO "*** application is linked with the -dlopen flag." if test -z "$global_symbol_pipe"; then $ECHO $ECHO "*** However, this would only work if libtool was able to extract symbol" $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could" $ECHO "*** not find such a program. So, this module is probably useless." $ECHO "*** \`nm' from GNU binutils and a full rebuild may help." fi if test "$build_old_libs" = no; then oldlibs="$output_objdir/$libname.$libext" build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi else $ECHO "*** The inter-library dependencies that have been dropped here will be" $ECHO "*** automatically added whenever a program is linked with this library" $ECHO "*** or is declared to -dlopen it." if test "$allow_undefined" = no; then $ECHO $ECHO "*** Since this library must not contain undefined symbols," $ECHO "*** because either the platform does not support them or" $ECHO "*** it was explicitly requested with -no-undefined," $ECHO "*** libtool will only create a static version of it." if test "$build_old_libs" = no; then oldlibs="$output_objdir/$libname.$libext" build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi fi fi # Done checking deplibs! deplibs=$newdeplibs fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" case $host in *-*-darwin*) newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` new_inherited_linker_flags=`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` deplibs=`$ECHO "X $deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $deplibs " in *" -L$path/$objdir "*) new_libs="$new_libs -L$path/$objdir" ;; esac ;; esac done for deplib in $deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$new_libs $deplib" ;; esac ;; *) new_libs="$new_libs $deplib" ;; esac done deplibs="$new_libs" # All the library-specific variables (install_libdir is set above). library_names= old_library= dlname= # Test again, we may have decided not to build it any more if test "$build_libtool_libs" = yes; then if test "$hardcode_into_libs" = yes; then # Hardcode the library paths hardcode_libdirs= dep_rpath= rpath="$finalize_rpath" test "$mode" != relink && rpath="$compile_rpath$rpath" for libdir in $rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" dep_rpath="$dep_rpath $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) perm_rpath="$perm_rpath $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" if test -n "$hardcode_libdir_flag_spec_ld"; then eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" else eval dep_rpath=\"$hardcode_libdir_flag_spec\" fi fi if test -n "$runpath_var" && test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do rpath="$rpath$dir:" done eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" fi test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" fi shlibpath="$finalize_shlibpath" test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" if test -n "$shlibpath"; then eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" fi # Get the real and link names of the library. eval shared_ext=\"$shrext_cmds\" eval library_names=\"$library_names_spec\" set dummy $library_names shift realname="$1" shift if test -n "$soname_spec"; then eval soname=\"$soname_spec\" else soname="$realname" fi if test -z "$dlname"; then dlname=$soname fi lib="$output_objdir/$realname" linknames= for link do linknames="$linknames $link" done # Use standard objects if they are pic test -z "$pic_flag" && libobjs=`$ECHO "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` test "X$libobjs" = "X " && libobjs= delfiles= if test -n "$export_symbols" && test -n "$include_expsyms"; then $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" export_symbols="$output_objdir/$libname.uexp" delfiles="$delfiles $export_symbols" fi orig_export_symbols= case $host_os in cygwin* | mingw* | cegcc*) if test -n "$export_symbols" && test -z "$export_symbols_regex"; then # exporting using user supplied symfile if test "x`$SED 1q $export_symbols`" != xEXPORTS; then # and it's NOT already a .def file. Must figure out # which of the given symbols are data symbols and tag # them as such. So, trigger use of export_symbols_cmds. # export_symbols gets reassigned inside the "prepare # the list of exported symbols" if statement, so the # include_expsyms logic still works. orig_export_symbols="$export_symbols" export_symbols= always_export_symbols=yes fi fi ;; esac # Prepare the list of exported symbols if test -z "$export_symbols"; then if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then func_verbose "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $opt_dry_run || $RM $export_symbols cmds=$export_symbols_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" eval cmd=\"$cmd\" func_len " $cmd" len=$func_len_result if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then func_show_eval "$cmd" 'exit $?' skipped_export=false else # The command line is too long to execute in one step. func_verbose "using reloadable object file for export list..." skipped_export=: # Break out early, otherwise skipped_export may be # set to false by a later but shorter cmd. break fi done IFS="$save_ifs" if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi fi if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols="$export_symbols" test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"' fi if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi tmp_deplibs= for test_deplib in $deplibs; do case " $convenience " in *" $test_deplib "*) ;; *) tmp_deplibs="$tmp_deplibs $test_deplib" ;; esac done deplibs="$tmp_deplibs" if test -n "$convenience"; then if test -n "$whole_archive_flag_spec" && test "$compiler_needs_object" = yes && test -z "$libobjs"; then # extract the archives, so we have objects to list. # TODO: could optimize this to just extract one archive. whole_archive_flag_spec= fi if test -n "$whole_archive_flag_spec"; then save_libobjs=$libobjs eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= else gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_extract_archives $gentop $convenience libobjs="$libobjs $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi fi if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then eval flag=\"$thread_safe_flag_spec\" linker_flags="$linker_flags $flag" fi # Make a backup of the uninstalled library when relinking if test "$mode" = relink; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? fi # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then eval test_cmds=\"$module_expsym_cmds\" cmds=$module_expsym_cmds else eval test_cmds=\"$module_cmds\" cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then eval test_cmds=\"$archive_expsym_cmds\" cmds=$archive_expsym_cmds else eval test_cmds=\"$archive_cmds\" cmds=$archive_cmds fi fi if test "X$skipped_export" != "X:" && func_len " $test_cmds" && len=$func_len_result && test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then : else # The command line is too long to link in one step, link piecewise # or, if using GNU ld and skipped_export is not :, use a linker # script. # Save the value of $output and $libobjs because we want to # use them later. If we have whole_archive_flag_spec, we # want to use save_libobjs as it was before # whole_archive_flag_spec was expanded, because we can't # assume the linker understands whole_archive_flag_spec. # This may have to be revisited, in case too many # convenience libraries get linked in and end up exceeding # the spec. if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then save_libobjs=$libobjs fi save_output=$output output_la=`$ECHO "X$output" | $Xsed -e "$basename"` # Clear the reloadable object creation command queue and # initialize k to one. test_cmds= concat_cmds= objlist= last_robj= k=1 if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then output=${output_objdir}/${output_la}.lnkscript func_verbose "creating GNU ld script: $output" $ECHO 'INPUT (' > $output for obj in $save_libobjs do $ECHO "$obj" >> $output done $ECHO ')' >> $output delfiles="$delfiles $output" elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then output=${output_objdir}/${output_la}.lnk func_verbose "creating linker input file list: $output" : > $output set x $save_libobjs shift firstobj= if test "$compiler_needs_object" = yes; then firstobj="$1 " shift fi for obj do $ECHO "$obj" >> $output done delfiles="$delfiles $output" output=$firstobj\"$file_list_spec$output\" else if test -n "$save_libobjs"; then func_verbose "creating reloadable object files..." output=$output_objdir/$output_la-${k}.$objext eval test_cmds=\"$reload_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 # Loop over the list of objects to be linked. for obj in $save_libobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result if test "X$objlist" = X || test "$len" -lt "$max_cmd_len"; then func_append objlist " $obj" else # The command $test_cmds is almost too long, add a # command to the queue. if test "$k" -eq 1 ; then # The first file doesn't have a previous command to add. eval concat_cmds=\"$reload_cmds $objlist $last_robj\" else # All subsequent reloadable object files will link in # the last one created. eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj~\$RM $last_robj\" fi last_robj=$output_objdir/$output_la-${k}.$objext func_arith $k + 1 k=$func_arith_result output=$output_objdir/$output_la-${k}.$objext objlist=$obj func_len " $last_robj" func_arith $len0 + $func_len_result len=$func_arith_result fi done # Handle the remaining objects by creating one last # reloadable object file. All subsequent reloadable object # files will link in the last one created. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" if test -n "$last_robj"; then eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" fi delfiles="$delfiles $output" else output= fi if ${skipped_export-false}; then func_verbose "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $opt_dry_run || $RM $export_symbols libobjs=$output # Append the command to create the export file. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" fi fi test -n "$save_libobjs" && func_verbose "creating a temporary reloadable object file: $output" # Loop through the commands generated above and execute them. save_ifs="$IFS"; IFS='~' for cmd in $concat_cmds; do IFS="$save_ifs" $opt_silent || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test "$mode" = relink; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS="$save_ifs" if test -n "$export_symbols_regex" && ${skipped_export-false}; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi if ${skipped_export-false}; then if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols="$export_symbols" test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"' fi if test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi fi libobjs=$output # Restore the value of output. output=$save_output if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= fi # Expand the library linking commands again to reset the # value of $libobjs for piecewise linking. # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then cmds=$module_expsym_cmds else cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then cmds=$archive_expsym_cmds else cmds=$archive_cmds fi fi fi if test -n "$delfiles"; then # Append the command to remove temporary files to $cmds. eval cmds=\"\$cmds~\$RM $delfiles\" fi # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_extract_archives $gentop $dlprefiles libobjs="$libobjs $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" eval cmd=\"$cmd\" $opt_silent || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test "$mode" = relink; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS="$save_ifs" # Restore the uninstalled library and exit if test "$mode" = relink; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? if test -n "$convenience"; then if test -z "$whole_archive_flag_spec"; then func_show_eval '${RM}r "$gentop"' fi fi exit $EXIT_SUCCESS fi # Create links to the real library. for linkname in $linknames; do if test "$realname" != "$linkname"; then func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' fi done # If -module or -export-dynamic was specified, set the dlname. if test "$module" = yes || test "$export_dynamic" = yes; then # On all known operating systems, these are identical. dlname="$soname" fi fi ;; obj) if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then func_warning "\`-dlopen' is ignored for objects" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "\`-l' and \`-L' are ignored for objects" ;; esac test -n "$rpath" && \ func_warning "\`-rpath' is ignored for objects" test -n "$xrpath" && \ func_warning "\`-R' is ignored for objects" test -n "$vinfo" && \ func_warning "\`-version-info' is ignored for objects" test -n "$release" && \ func_warning "\`-release' is ignored for objects" case $output in *.lo) test -n "$objs$old_deplibs" && \ func_fatal_error "cannot build library object \`$output' from non-libtool objects" libobj=$output func_lo2o "$libobj" obj=$func_lo2o_result ;; *) libobj= obj="$output" ;; esac # Delete the old objects. $opt_dry_run || $RM $obj $libobj # Objects from convenience libraries. This assumes # single-version convenience libraries. Whenever we create # different ones for PIC/non-PIC, this we'll have to duplicate # the extraction. reload_conv_objs= gentop= # reload_cmds runs $LD directly, so let us get rid of # -Wl from whole_archive_flag_spec and hope we can get by with # turning comma into space.. wl= if test -n "$convenience"; then if test -n "$whole_archive_flag_spec"; then eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" reload_conv_objs=$reload_objs\ `$ECHO "X$tmp_whole_archive_flags" | $Xsed -e 's|,| |g'` else gentop="$output_objdir/${obj}x" generated="$generated $gentop" func_extract_archives $gentop $convenience reload_conv_objs="$reload_objs $func_extract_archives_result" fi fi # Create the old-style object. reload_objs="$objs$old_deplibs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test output="$obj" func_execute_cmds "$reload_cmds" 'exit $?' # Exit if we aren't doing a library object file. if test -z "$libobj"; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS fi if test "$build_libtool_libs" != yes; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi # Create an invalid libtool object if no PIC, so that we don't # accidentally link it into a program. # $show "echo timestamp > $libobj" # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? exit $EXIT_SUCCESS fi if test -n "$pic_flag" || test "$pic_mode" != default; then # Only do commands if we really have different PIC objects. reload_objs="$libobjs $reload_conv_objs" output="$libobj" func_execute_cmds "$reload_cmds" 'exit $?' fi if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS ;; prog) case $host in *cygwin*) func_stripname '' '.exe' "$output" output=$func_stripname_result.exe;; esac test -n "$vinfo" && \ func_warning "\`-version-info' is ignored for programs" test -n "$release" && \ func_warning "\`-release' is ignored for programs" test "$preload" = yes \ && test "$dlopen_support" = unknown \ && test "$dlopen_self" = unknown \ && test "$dlopen_self_static" = unknown && \ func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library is the System framework compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'` finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'` ;; esac case $host in *-*-darwin*) # Don't allow lazy linking, it breaks C++ global constructors # But is supposedly fixed on 10.4 or later (yay!). if test "$tagname" = CXX ; then case ${MACOSX_DEPLOYMENT_TARGET-10.0} in 10.[0123]) compile_command="$compile_command ${wl}-bind_at_load" finalize_command="$finalize_command ${wl}-bind_at_load" ;; esac fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $compile_deplibs " in *" -L$path/$objdir "*) new_libs="$new_libs -L$path/$objdir" ;; esac ;; esac done for deplib in $compile_deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$new_libs $deplib" ;; esac ;; *) new_libs="$new_libs $deplib" ;; esac done compile_deplibs="$new_libs" compile_command="$compile_command $compile_deplibs" finalize_command="$finalize_command $finalize_deplibs" if test -n "$rpath$xrpath"; then # If the user specified any rpath flags, then add them. for libdir in $rpath $xrpath; do # This is the magic to use -rpath. case "$finalize_rpath " in *" $libdir "*) ;; *) finalize_rpath="$finalize_rpath $libdir" ;; esac done fi # Now hardcode the library paths rpath= hardcode_libdirs= for libdir in $compile_rpath $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" rpath="$rpath $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) perm_rpath="$perm_rpath $libdir" ;; esac fi case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` case :$dllsearchpath: in *":$libdir:"*) ;; ::) dllsearchpath=$libdir;; *) dllsearchpath="$dllsearchpath:$libdir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) dllsearchpath="$dllsearchpath:$testbindir";; esac ;; esac done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval rpath=\" $hardcode_libdir_flag_spec\" fi compile_rpath="$rpath" rpath= hardcode_libdirs= for libdir in $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" rpath="$rpath $flag" fi elif test -n "$runpath_var"; then case "$finalize_perm_rpath " in *" $libdir "*) ;; *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval rpath=\" $hardcode_libdir_flag_spec\" fi finalize_rpath="$rpath" if test -n "$libobjs" && test "$build_old_libs" = yes; then # Transform all the library objects into standard objects. compile_command=`$ECHO "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` finalize_command=`$ECHO "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` fi func_generate_dlsyms "$outputname" "@PROGRAM@" "no" # template prelinking step if test -n "$prelink_cmds"; then func_execute_cmds "$prelink_cmds" 'exit $?' fi wrappers_required=yes case $host in *cygwin* | *mingw* ) if test "$build_libtool_libs" != yes; then wrappers_required=no fi ;; *cegcc) # Disable wrappers for cegcc, we are cross compiling anyway. wrappers_required=no ;; *) if test "$need_relink" = no || test "$build_libtool_libs" != yes; then wrappers_required=no fi ;; esac if test "$wrappers_required" = no; then # Replace the output file specification. compile_command=`$ECHO "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` link_command="$compile_command$compile_rpath" # We have no uninstalled library dependencies, so finalize right now. exit_status=0 func_show_eval "$link_command" 'exit_status=$?' # Delete the generated files. if test -f "$output_objdir/${outputname}S.${objext}"; then func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' fi exit $exit_status fi if test -n "$compile_shlibpath$finalize_shlibpath"; then compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" fi if test -n "$finalize_shlibpath"; then finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" fi compile_var= finalize_var= if test -n "$runpath_var"; then if test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do rpath="$rpath$dir:" done compile_var="$runpath_var=\"$rpath\$$runpath_var\" " fi if test -n "$finalize_perm_rpath"; then # We should set the runpath_var. rpath= for dir in $finalize_perm_rpath; do rpath="$rpath$dir:" done finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " fi fi if test "$no_install" = yes; then # We don't need to create a wrapper script. link_command="$compile_var$compile_command$compile_rpath" # Replace the output file specification. link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` # Delete the old output file. $opt_dry_run || $RM $output # Link the executable and exit func_show_eval "$link_command" 'exit $?' exit $EXIT_SUCCESS fi if test "$hardcode_action" = relink; then # Fast installation is not supported link_command="$compile_var$compile_command$compile_rpath" relink_command="$finalize_var$finalize_command$finalize_rpath" func_warning "this platform does not like uninstalled shared libraries" func_warning "\`$output' will be relinked during installation" else if test "$fast_install" != no; then link_command="$finalize_var$compile_command$finalize_rpath" if test "$fast_install" = yes; then relink_command=`$ECHO "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'` else # fast_install is set to needless relink_command= fi else link_command="$compile_var$compile_command$compile_rpath" relink_command="$finalize_var$finalize_command$finalize_rpath" fi fi # Replace the output file specification. link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` # Delete the old output files. $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname func_show_eval "$link_command" 'exit $?' # Now create the wrapper script. func_verbose "creating $output" # Quote the relink command for shipping. if test -n "$relink_command"; then # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done relink_command="(cd `pwd`; $relink_command)" relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"` fi # Quote $ECHO for shipping. if test "X$ECHO" = "X$SHELL $progpath --fallback-echo"; then case $progpath in [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; esac qecho=`$ECHO "X$qecho" | $Xsed -e "$sed_quote_subst"` else qecho=`$ECHO "X$ECHO" | $Xsed -e "$sed_quote_subst"` fi # Only actually do things if not in dry run mode. $opt_dry_run || { # win32 will think the script is a binary if it has # a .exe suffix, so we strip it off here. case $output in *.exe) func_stripname '' '.exe' "$output" output=$func_stripname_result ;; esac # test for cygwin because mv fails w/o .exe extensions case $host in *cygwin*) exeext=.exe func_stripname '' '.exe' "$outputname" outputname=$func_stripname_result ;; *) exeext= ;; esac case $host in *cygwin* | *mingw* ) func_dirname_and_basename "$output" "" "." output_name=$func_basename_result output_path=$func_dirname_result cwrappersource="$output_path/$objdir/lt-$output_name.c" cwrapper="$output_path/$output_name.exe" $RM $cwrappersource $cwrapper trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 func_emit_cwrapperexe_src > $cwrappersource # The wrapper executable is built using the $host compiler, # because it contains $host paths and files. If cross- # compiling, it, like the target executable, must be # executed on the $host or under an emulation environment. $opt_dry_run || { $LTCC $LTCFLAGS -o $cwrapper $cwrappersource $STRIP $cwrapper } # Now, create the wrapper script for func_source use: func_ltwrapper_scriptname $cwrapper $RM $func_ltwrapper_scriptname_result trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 $opt_dry_run || { # note: this script will not be executed, so do not chmod. if test "x$build" = "x$host" ; then $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result else func_emit_wrapper no > $func_ltwrapper_scriptname_result fi } ;; * ) $RM $output trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 func_emit_wrapper no > $output chmod +x $output ;; esac } exit $EXIT_SUCCESS ;; esac # See if we need to build an old-fashioned archive. for oldlib in $oldlibs; do if test "$build_libtool_libs" = convenience; then oldobjs="$libobjs_save $symfileobj" addlibs="$convenience" build_libtool_libs=no else if test "$build_libtool_libs" = module; then oldobjs="$libobjs_save" build_libtool_libs=no else oldobjs="$old_deplibs $non_pic_objects" if test "$preload" = yes && test -f "$symfileobj"; then oldobjs="$oldobjs $symfileobj" fi fi addlibs="$old_convenience" fi if test -n "$addlibs"; then gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_extract_archives $gentop $addlibs oldobjs="$oldobjs $func_extract_archives_result" fi # Do each command in the archive commands. if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then cmds=$old_archive_from_new_cmds else # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_extract_archives $gentop $dlprefiles oldobjs="$oldobjs $func_extract_archives_result" fi # POSIX demands no paths to be encoded in archives. We have # to avoid creating archives with duplicate basenames if we # might have to extract them afterwards, e.g., when creating a # static archive out of a convenience library, or when linking # the entirety of a libtool archive into another (currently # not supported by libtool). if (for obj in $oldobjs do func_basename "$obj" $ECHO "$func_basename_result" done | sort | sort -uc >/dev/null 2>&1); then : else $ECHO "copying selected object files to avoid basename conflicts..." gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_mkdir_p "$gentop" save_oldobjs=$oldobjs oldobjs= counter=1 for obj in $save_oldobjs do func_basename "$obj" objbase="$func_basename_result" case " $oldobjs " in " ") oldobjs=$obj ;; *[\ /]"$objbase "*) while :; do # Make sure we don't pick an alternate name that also # overlaps. newobj=lt$counter-$objbase func_arith $counter + 1 counter=$func_arith_result case " $oldobjs " in *[\ /]"$newobj "*) ;; *) if test ! -f "$gentop/$newobj"; then break; fi ;; esac done func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" oldobjs="$oldobjs $gentop/$newobj" ;; *) oldobjs="$oldobjs $obj" ;; esac done fi eval cmds=\"$old_archive_cmds\" func_len " $cmds" len=$func_len_result if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then cmds=$old_archive_cmds else # the command line is too long to link in one step, link in parts func_verbose "using piecewise archive linking..." save_RANLIB=$RANLIB RANLIB=: objlist= concat_cmds= save_oldobjs=$oldobjs oldobjs= # Is there a better way of finding the last object in the list? for obj in $save_oldobjs do last_oldobj=$obj done eval test_cmds=\"$old_archive_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 for obj in $save_oldobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result func_append objlist " $obj" if test "$len" -lt "$max_cmd_len"; then : else # the above command should be used before it gets too long oldobjs=$objlist if test "$obj" = "$last_oldobj" ; then RANLIB=$save_RANLIB fi test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" objlist= len=$len0 fi done RANLIB=$save_RANLIB oldobjs=$objlist if test "X$oldobjs" = "X" ; then eval cmds=\"\$concat_cmds\" else eval cmds=\"\$concat_cmds~\$old_archive_cmds\" fi fi fi func_execute_cmds "$cmds" 'exit $?' done test -n "$generated" && \ func_show_eval "${RM}r$generated" # Now create the libtool archive. case $output in *.la) old_library= test "$build_old_libs" = yes && old_library="$libname.$libext" func_verbose "creating $output" # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done # Quote the link command for shipping. relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"` if test "$hardcode_automatic" = yes ; then relink_command= fi # Only create the output if not a dry run. $opt_dry_run || { for installed in no yes; do if test "$installed" = yes; then if test -z "$install_libdir"; then break fi output="$output_objdir/$outputname"i # Replace all uninstalled libtool libraries with the installed ones newdependency_libs= for deplib in $dependency_libs; do case $deplib in *.la) func_basename "$deplib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` test -z "$libdir" && \ func_fatal_error "\`$deplib' is not a valid libtool archive" newdependency_libs="$newdependency_libs $libdir/$name" ;; *) newdependency_libs="$newdependency_libs $deplib" ;; esac done dependency_libs="$newdependency_libs" newdlfiles= for lib in $dlfiles; do case $lib in *.la) func_basename "$lib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "\`$lib' is not a valid libtool archive" newdlfiles="$newdlfiles $libdir/$name" ;; *) newdlfiles="$newdlfiles $lib" ;; esac done dlfiles="$newdlfiles" newdlprefiles= for lib in $dlprefiles; do case $lib in *.la) # Only pass preopened files to the pseudo-archive (for # eventual linking with the app. that links it) if we # didn't already link the preopened objects directly into # the library: func_basename "$lib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "\`$lib' is not a valid libtool archive" newdlprefiles="$newdlprefiles $libdir/$name" ;; esac done dlprefiles="$newdlprefiles" else newdlfiles= for lib in $dlfiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; *) abs=`pwd`"/$lib" ;; esac newdlfiles="$newdlfiles $abs" done dlfiles="$newdlfiles" newdlprefiles= for lib in $dlprefiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; *) abs=`pwd`"/$lib" ;; esac newdlprefiles="$newdlprefiles $abs" done dlprefiles="$newdlprefiles" fi $RM $output # place dlname in correct position for cygwin tdlname=$dlname case $host,$output,$installed,$module,$dlname in *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; esac $ECHO > $output "\ # $outputname - a libtool library file # Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION # # Please DO NOT delete this file! # It is necessary for linking the library. # The name that we can dlopen(3). dlname='$tdlname' # Names of this library. library_names='$library_names' # The name of the static archive. old_library='$old_library' # Linker flags that can not go in dependency_libs. inherited_linker_flags='$new_inherited_linker_flags' # Libraries that this one depends upon. dependency_libs='$dependency_libs' # Names of additional weak libraries provided by this library weak_library_names='$weak_libs' # Version information for $libname. current=$current age=$age revision=$revision # Is this an already installed library? installed=$installed # Should we warn about portability when linking against -modules? shouldnotlink=$module # Files to dlopen/dlpreopen dlopen='$dlfiles' dlpreopen='$dlprefiles' # Directory that this library needs to be installed in: libdir='$install_libdir'" if test "$installed" = no && test "$need_relink" = yes; then $ECHO >> $output "\ relink_command=\"$relink_command\"" fi done } # Do a symbolic link so that the libtool archive can be found in # LD_LIBRARY_PATH before the program is installed. func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' ;; esac exit $EXIT_SUCCESS } { test "$mode" = link || test "$mode" = relink; } && func_mode_link ${1+"$@"} # func_mode_uninstall arg... func_mode_uninstall () { $opt_debug RM="$nonopt" files= rmforce= exit_status=0 # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic="$magic" for arg do case $arg in -f) RM="$RM $arg"; rmforce=yes ;; -*) RM="$RM $arg" ;; *) files="$files $arg" ;; esac done test -z "$RM" && \ func_fatal_help "you must specify an RM program" rmdirs= origobjdir="$objdir" for file in $files; do func_dirname "$file" "" "." dir="$func_dirname_result" if test "X$dir" = X.; then objdir="$origobjdir" else objdir="$dir/$origobjdir" fi func_basename "$file" name="$func_basename_result" test "$mode" = uninstall && objdir="$dir" # Remember objdir for removal later, being careful to avoid duplicates if test "$mode" = clean; then case " $rmdirs " in *" $objdir "*) ;; *) rmdirs="$rmdirs $objdir" ;; esac fi # Don't error if the file doesn't exist and rm -f was used. if { test -L "$file"; } >/dev/null 2>&1 || { test -h "$file"; } >/dev/null 2>&1 || test -f "$file"; then : elif test -d "$file"; then exit_status=1 continue elif test "$rmforce" = yes; then continue fi rmfiles="$file" case $name in *.la) # Possibly a libtool archive, so verify it. if func_lalib_p "$file"; then func_source $dir/$name # Delete the libtool libraries and symlinks. for n in $library_names; do rmfiles="$rmfiles $objdir/$n" done test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" case "$mode" in clean) case " $library_names " in # " " in the beginning catches empty $dlname *" $dlname "*) ;; *) rmfiles="$rmfiles $objdir/$dlname" ;; esac test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" ;; uninstall) if test -n "$library_names"; then # Do each command in the postuninstall commands. func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi if test -n "$old_library"; then # Do each command in the old_postuninstall commands. func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi # FIXME: should reinstall the best remaining shared library. ;; esac fi ;; *.lo) # Possibly a libtool object, so verify it. if func_lalib_p "$file"; then # Read the .lo file func_source $dir/$name # Add PIC object to the list of files to remove. if test -n "$pic_object" && test "$pic_object" != none; then rmfiles="$rmfiles $dir/$pic_object" fi # Add non-PIC object to the list of files to remove. if test -n "$non_pic_object" && test "$non_pic_object" != none; then rmfiles="$rmfiles $dir/$non_pic_object" fi fi ;; *) if test "$mode" = clean ; then noexename=$name case $file in *.exe) func_stripname '' '.exe' "$file" file=$func_stripname_result func_stripname '' '.exe' "$name" noexename=$func_stripname_result # $file with .exe has already been added to rmfiles, # add $file without .exe rmfiles="$rmfiles $file" ;; esac # Do a test to see if this is a libtool program. if func_ltwrapper_p "$file"; then if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" relink_command= func_source $func_ltwrapper_scriptname_result rmfiles="$rmfiles $func_ltwrapper_scriptname_result" else relink_command= func_source $dir/$noexename fi # note $name still contains .exe if it was in $file originally # as does the version of $file that was added into $rmfiles rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" if test "$fast_install" = yes && test -n "$relink_command"; then rmfiles="$rmfiles $objdir/lt-$name" fi if test "X$noexename" != "X$name" ; then rmfiles="$rmfiles $objdir/lt-${noexename}.c" fi fi fi ;; esac func_show_eval "$RM $rmfiles" 'exit_status=1' done objdir="$origobjdir" # Try to remove the ${objdir}s in the directories where we deleted files for dir in $rmdirs; do if test -d "$dir"; then func_show_eval "rmdir $dir >/dev/null 2>&1" fi done exit $exit_status } { test "$mode" = uninstall || test "$mode" = clean; } && func_mode_uninstall ${1+"$@"} test -z "$mode" && { help="$generic_help" func_fatal_help "you must specify a MODE" } test -z "$exec_cmd" && \ func_fatal_help "invalid operation mode \`$mode'" if test -n "$exec_cmd"; then eval exec "$exec_cmd" exit $EXIT_FAILURE fi exit $exit_status # The TAGs below are defined such that we never get into a situation # in which we disable both kinds of libraries. Given conflicting # choices, we go for a static library, that is the most portable, # since we can't tell whether shared libraries were disabled because # the user asked for that or because the platform doesn't support # them. This is particularly important on AIX, because we don't # support having both static and shared libraries enabled at the same # time on that platform, so we default to a shared-only configuration. # If a disable-shared tag is given, we'll fallback to a static-only # configuration. But we'll never go from static-only to shared-only. # ### BEGIN LIBTOOL TAG CONFIG: disable-shared build_libtool_libs=no build_old_libs=yes # ### END LIBTOOL TAG CONFIG: disable-shared # ### BEGIN LIBTOOL TAG CONFIG: disable-static build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` # ### END LIBTOOL TAG CONFIG: disable-static # Local Variables: # mode:shell-script # sh-indentation:2 # End: # vi:sw=2 nordugrid-arc-5.4.2/PaxHeaders.7502/selinux0000644000000000000000000000013213214316022016600 xustar000000000000000030 mtime=1513200658.630730739 30 atime=1513200668.716854096 30 ctime=1513200658.630730739 nordugrid-arc-5.4.2/selinux/0000755000175000002070000000000013214316022016723 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/selinux/PaxHeaders.7502/nordugrid-arc-egiis.fc0000644000000000000000000000012411547505233023041 xustar000000000000000027 mtime=1302235803.113841 27 atime=1513200577.119733 30 ctime=1513200658.630730739 nordugrid-arc-5.4.2/selinux/nordugrid-arc-egiis.fc0000644000175000002070000000012111547505233023100 0ustar00mockbuildmock00000000000000/usr/sbin/arc-infoindex-relay gen_context(system_u:object_r:arc_relay_exec_t,s0) nordugrid-arc-5.4.2/selinux/PaxHeaders.7502/nordugrid-arc-egiis.te0000644000000000000000000000012411547505233023061 xustar000000000000000027 mtime=1302235803.113841 27 atime=1513200577.119733 30 ctime=1513200658.629730727 nordugrid-arc-5.4.2/selinux/nordugrid-arc-egiis.te0000644000175000002070000000164611547505233023135 0ustar00mockbuildmock00000000000000module nordugrid-arc-egiis 1.0.0; require { type slapd_t; type tmp_t; type var_run_t; attribute file_type; attribute exec_type; class file { execute execute_no_trans open read }; class fifo_file { create getattr lock open read unlink write }; class process { signal }; } type arc_relay_exec_t, file_type, exec_type; allow slapd_t arc_relay_exec_t:file execute; allow slapd_t arc_relay_exec_t:file execute_no_trans; allow slapd_t arc_relay_exec_t:file open; allow slapd_t arc_relay_exec_t:file read; allow slapd_t self:fifo_file getattr; allow slapd_t self:process signal; allow slapd_t tmp_t:fifo_file create; allow slapd_t tmp_t:fifo_file getattr; allow slapd_t tmp_t:fifo_file open; allow slapd_t tmp_t:fifo_file read; allow slapd_t tmp_t:fifo_file unlink; allow slapd_t var_run_t:fifo_file getattr; allow slapd_t var_run_t:fifo_file lock; allow slapd_t var_run_t:fifo_file open; allow slapd_t var_run_t:fifo_file write; nordugrid-arc-5.4.2/PaxHeaders.7502/autogen.sh0000644000000000000000000000012611264700565017203 xustar000000000000000027 mtime=1255375221.441498 30 atime=1513200577.122733864 29 ctime=1513200658.62673069 nordugrid-arc-5.4.2/autogen.sh0000755000175000002070000000170511264700565017254 0ustar00mockbuildmock00000000000000#!/bin/sh # # autogen.sh glue # # Requires: automake 1.9, autoconf 2.57+ # Conflicts: autoconf 2.13 set -x cleanup() { find . -type d -name autom4te.cache -print | xargs rm -rf \; find . -type f \( -name missing -o -name install-sh \ -o -name mkinstalldirs \ -o -name depcomp -o -name ltmain.sh -o -name configure \ -o -name config.sub -o -name config.guess \ -o -name Makefile.in -o -name config.h.in -o -name aclocal.m4 \ -o -name autoscan.log -o -name configure.scan -o -name config.log \ -o -name config.status -o -name config.h -o -name stamp-h1 \ -o -name Makefile -o -name libtool \) \ -print | xargs rm -f } if [ "x$1" = "xclean" ]; then cleanup exit fi # Refresh GNU autotools toolchain. echo Cleaning autotools files... cleanup type glibtoolize > /dev/null 2>&1 && export LIBTOOLIZE=glibtoolize echo Running autoreconf... autoreconf --verbose --force --install exit 0 nordugrid-arc-5.4.2/PaxHeaders.7502/mingw-nordugrid-arc.spec.in0000644000000000000000000000012713214315176022343 xustar000000000000000027 mtime=1513200254.761812 30 atime=1513200652.966661465 30 ctime=1513200658.605730433 nordugrid-arc-5.4.2/mingw-nordugrid-arc.spec.in0000644000175000002070000005024313214315176022411 0ustar00mockbuildmock00000000000000%{?mingw_package_header} %global mingw_pkg_name @PACKAGE@ # Cross-compilation platforms supported %global mingw_build_win32 1 %global mingw_build_win64 1 # External (currently unofficial) dependencies) %define with_mingw32_python 0 %define with_mingw32_xmlsec1 1 %define with_mingw32_globus 0 %define with_mingw64_python 0 %define with_mingw64_xmlsec1 1 %define with_mingw64_globus 0 %define pkgdir arc Name: mingw-%{mingw_pkg_name} Version: @VERSION@ Release: 1%{?dist} Summary: ARC Group: System Environment/Daemons License: ASL 2.0 URL: http://www.nordugrid.org/ Source: http://download.nordugrid.org/packages/nordugrid-arc/releases/%{version}/src/%{mingw_pkg_name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildArch: noarch # mingw32 %if %{mingw_build_win32} BuildRequires: mingw32-filesystem >= 65 BuildRequires: mingw32-binutils BuildRequires: mingw32-gcc BuildRequires: mingw32-gcc-c++ BuildRequires: mingw32-runtime BuildRequires: mingw32-headers BuildRequires: mingw32-dlfcn BuildRequires: mingw32-gettext %if %{with_mingw32_python} BuildRequires: mingw32-python %endif BuildRequires: mingw32-glibmm24 BuildRequires: mingw32-glib2 BuildRequires: mingw32-libxml2 BuildRequires: mingw32-openssl BuildRequires: mingw32-libgnurx %if %{with_mingw32_xmlsec1} BuildRequires: mingw32-xmlsec1 %endif BuildRequires: mingw32-cppunit BuildRequires: mingw32-libdb BuildRequires: mingw32-canl-c++ %if %{with_mingw32_globus} BuildRequires: mingw32-globus-common BuildRequires: mingw32-globus-ftp-client BuildRequires: mingw32-globus-ftp-control %endif %endif # mingw64 %if %{mingw_build_win64} BuildRequires: mingw64-filesystem >= 65 BuildRequires: mingw64-binutils BuildRequires: mingw64-gcc BuildRequires: mingw64-gcc-c++ BuildRequires: mingw64-runtime BuildRequires: mingw64-headers BuildRequires: mingw64-dlfcn BuildRequires: mingw64-gettext %if %{with_mingw64_python} BuildRequires: mingw64-python %endif BuildRequires: mingw64-glibmm24 BuildRequires: mingw64-glib2 BuildRequires: mingw64-libxml2 BuildRequires: mingw64-openssl BuildRequires: mingw64-libgnurx %if %{with_mingw64_xmlsec1} BuildRequires: mingw64-xmlsec1 %endif BuildRequires: mingw64-cppunit BuildRequires: mingw64-libdb BuildRequires: mingw64-canl-c++ %if %{with_mingw64_globus} BuildRequires: mingw64-globus-common BuildRequires: mingw64-globus-ftp-client BuildRequires: mingw64-globus-ftp-control %endif %endif BuildRequires: pkgconfig BuildRequires: swig %description NorduGrid ARC %if %{mingw_build_win32} %package -n mingw32-%{mingw_pkg_name} Summary: ARC core libraries Group: System Environment/Libraries Requires: mingw32-openssl %description -n mingw32-%{mingw_pkg_name} NorduGrid ARC core libraries %package -n mingw32-%{mingw_pkg_name}-client Summary: ARC command line interface Group: Applications/Internet Requires: mingw32-%{mingw_pkg_name} = %{version} Requires: mingw32-%{mingw_pkg_name}-plugins-needed = %{version} %description -n mingw32-%{mingw_pkg_name}-client ARC command line interface. %package -n mingw32-%{mingw_pkg_name}-hed Summary: ARC Hosting Environment Daemon Group: System Environment/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} %description -n mingw32-%{mingw_pkg_name}-hed ARC Hosting Environment Daemon (HED). %package -n mingw32-%{mingw_pkg_name}-plugins-needed Summary: ARC base plugins Group: System Environment/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} %description -n mingw32-%{mingw_pkg_name}-plugins-needed ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). %if %{with_mingw32_globus} %package -n mingw32-%{mingw_pkg_name}-plugins-globus Summary: ARC Globus plugins Group: System Environment/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} Requires: globus_common Requires: globus_ftp_client Requires: globus_ftp_control %description -n mingw32-%{mingw_pkg_name}-plugins-globus ARC Globus plugins. This includes the Globus dependent Data Manager Components (DMCs): libdmcgridftp.so %endif %package -n mingw32-%{mingw_pkg_name}-devel Summary: ARC development files Group: Development/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} Requires: mingw32-glibmm24 Requires: mingw32-glib2 Requires: mingw32-libxml2 Requires: mingw32-openssl %description -n mingw32-%{mingw_pkg_name}-devel Development files for ARC %package -n mingw32-%{mingw_pkg_name}-misc-utils Summary: NorduGrid misc tools Group: Applications/Internet Requires: mingw32-%{mingw_pkg_name} = %{version}-%{release} Requires: mingw32-%{mingw_pkg_name}-plugins-needed = %{version}-%{release} %description -n mingw32-%{mingw_pkg_name}-misc-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains various for various tasks including testing. The package is usually not required by users or sysadmins but mainly for developers. %if %{with_mingw32_python} %package -n mingw32-%{mingw_pkg_name}-python Summary: ARC Python wrapper Group: Development/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} Requires: python %description -n mingw32-%{mingw_pkg_name}-python Python wrapper for ARC %endif %endif # mingw64 %if %{mingw_build_win64} %package -n mingw64-%{mingw_pkg_name} Summary: ARC core libraries Group: System Environment/Libraries Requires: mingw32-openssl %description -n mingw64-%{mingw_pkg_name} NorduGrid ARC core libraries %package -n mingw64-%{mingw_pkg_name}-client Summary: ARC command line interface Group: Applications/Internet Requires: mingw64-%{mingw_pkg_name} = %{version} Requires: mingw64-%{mingw_pkg_name}-plugins-needed = %{version} %description -n mingw64-%{mingw_pkg_name}-client ARC command line interface. %package -n mingw64-%{mingw_pkg_name}-hed Summary: ARC Hosting Environment Daemon Group: System Environment/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} %description -n mingw64-%{mingw_pkg_name}-hed ARC Hosting Environment Daemon (HED). %package -n mingw64-%{mingw_pkg_name}-plugins-needed Summary: ARC base plugins Group: System Environment/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} %description -n mingw64-%{mingw_pkg_name}-plugins-needed ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). %if %{with_mingw64_globus} %package -n mingw64-%{mingw_pkg_name}-plugins-globus Summary: ARC Globus plugins Group: System Environment/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} Requires: globus_common Requires: globus_ftp_client Requires: globus_ftp_control %description -n mingw64-%{mingw_pkg_name}-plugins-globus ARC Globus plugins. This includes the Globus dependent Data Manager Components (DMCs): libdmcgridftp.so %endif %package -n mingw64-%{mingw_pkg_name}-devel Summary: ARC development files Group: Development/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} Requires: mingw64-glibmm24 Requires: mingw64-glib2 Requires: mingw64-libxml2 Requires: mingw64-openssl %description -n mingw64-%{mingw_pkg_name}-devel Development files for ARC %package -n mingw64-%{mingw_pkg_name}-misc-utils Summary: NorduGrid misc tools Group: Applications/Internet Requires: mingw64-%{mingw_pkg_name} = %{version}-%{release} Requires: mingw64-%{mingw_pkg_name}-plugins-needed = %{version}-%{release} %description -n mingw64-%{mingw_pkg_name}-misc-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains various for various tasks including testing. The package is usually not required by users or sysadmins but mainly for developers. %if %{with_mingw64_python} %package -n mingw64-%{mingw_pkg_name}-python Summary: ARC Python wrapper Group: Development/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} Requires: python %description -n mingw64-%{mingw_pkg_name}-python Python wrapper for ARC %endif %endif %{?mingw_debug_package} %prep %setup -q -n %{mingw_pkg_name}-%{version} %build %mingw_configure \ --enable-canlxx \ --disable-java \ --disable-doc \ --disable-ws-monitor \ --disable-ldap-monitor \ --disable-acix \ --disable-static LDFLAGS=-Wl,--enable-auto-import %mingw_make %{?_smp_mflags} #make check %install rm -rf $RPM_BUILD_ROOT %mingw_make_install DESTDIR=$RPM_BUILD_ROOT find $RPM_BUILD_ROOT -type f -name \*.la -exec rm -fv '{}' ';' #mkdir -p $RPM_BUILD_ROOT/etc/init.d #cp -p src/hed/daemon/scripts/arched.redhat $RPM_BUILD_ROOT/etc/init.d/arched #chmod +x $RPM_BUILD_ROOT/etc/init.d/arched # RPM does it's own doc handling rm -fr $RPM_BUILD_ROOT%{mingw32_datadir}/doc/%{mingw_pkg_name}/ rm -fr $RPM_BUILD_ROOT%{mingw64_datadir}/doc/%{mingw_pkg_name}/ rm -fr $RPM_BUILD_ROOT%{mingw32_datadir}/arc/examples/sdk/*.py rm -fr $RPM_BUILD_ROOT%{mingw64_datadir}/arc/examples/sdk/*.py %mingw_find_lang %{mingw_pkg_name} %clean rm -rf $RPM_BUILD_ROOT %if %{mingw_build_win32} %files -n mingw32-%{mingw_pkg_name} -f mingw32-%{mingw_pkg_name}.lang %defattr(-,root,root,-) %doc README AUTHORS LICENSE NOTICE ChangeLog %{mingw32_bindir}/lib*.dll %{mingw32_libdir}/%{pkgdir}/libmodcrypto.dll* %{mingw32_libdir}/%{pkgdir}/libmodcredential.dll* %{mingw32_datadir}/%{pkgdir}/schema %{mingw32_libdir}/%{pkgdir}/arc-file-access.exe %{mingw32_libdir}/%{pkgdir}/DataStagingDelivery.exe %dir %{mingw32_datadir}/%{pkgdir}/test-jobs %{mingw32_datadir}/%{pkgdir}/test-jobs/test-job-* %files -n mingw32-%{mingw_pkg_name}-client %defattr(-,root,root,-) %{mingw32_datadir}/%{pkgdir}/examples/client.conf # # Executables # %{mingw32_bindir}/arcecho.exe %{mingw32_bindir}/arcinfo.exe %{mingw32_bindir}/arcproxy.exe %{mingw32_bindir}/arcproxyalt.exe %{mingw32_bindir}/arccat.exe %{mingw32_bindir}/arccp.exe %{mingw32_bindir}/arcls.exe %{mingw32_bindir}/arcrm.exe %{mingw32_bindir}/arcmkdir.exe %{mingw32_bindir}/arcrename.exe %if %{with_mingw32_xmlsec1} %endif %{mingw32_bindir}/arcstat.exe %{mingw32_bindir}/arcsub.exe %{mingw32_bindir}/arcsync.exe %{mingw32_bindir}/arcresub.exe %{mingw32_bindir}/arcget.exe %{mingw32_bindir}/arcclean.exe %{mingw32_bindir}/arckill.exe %{mingw32_bindir}/arcrenew.exe %{mingw32_bindir}/arcresume.exe %{mingw32_bindir}/arctest.exe # %{mingw32_sysconfdir}/arc/client.conf # # Man pages # %doc %{mingw32_mandir}/man1/arcecho.1* %doc %{mingw32_mandir}/man1/arcinfo.1* %doc %{mingw32_mandir}/man1/arcproxy.1* %if %{with_mingw32_xmlsec1} %endif %doc %{mingw32_mandir}/man1/arccat.1* %doc %{mingw32_mandir}/man1/arccp.1* %doc %{mingw32_mandir}/man1/arcls.1* %doc %{mingw32_mandir}/man1/arcrm.1* %doc %{mingw32_mandir}/man1/arcmkdir.1* %doc %{mingw32_mandir}/man1/arcrename.1* %doc %{mingw32_mandir}/man1/arcstat.1* %doc %{mingw32_mandir}/man1/arcsub.1* %doc %{mingw32_mandir}/man1/arcsync.1* %doc %{mingw32_mandir}/man1/arcresub.1* %doc %{mingw32_mandir}/man1/arcget.1* %doc %{mingw32_mandir}/man1/arcclean.1* %doc %{mingw32_mandir}/man1/arckill.1* %doc %{mingw32_mandir}/man1/arcrenew.1* %doc %{mingw32_mandir}/man1/arcresume.1* %doc %{mingw32_mandir}/man1/arctest.1* %doc %{mingw32_datadir}/%{pkgdir}/examples/echo/echo.wsdl %files -n mingw32-%{mingw_pkg_name}-hed %defattr(-,root,root,-) %{mingw32_sbindir}/arched.exe %doc %{mingw32_mandir}/man8/arched.8* %doc %{mingw32_mandir}/man5/arc.conf.5* %{mingw32_datadir}/%{pkgdir}/profiles %{mingw32_datadir}/%{pkgdir}/examples/config %{mingw32_datadir}/%{pkgdir}/examples/arc.conf.reference %doc %{mingw32_datadir}/%{pkgdir}/examples/echo/echo_service.xml.example %{mingw32_libdir}/%{pkgdir}/libecho.dll* %files -n mingw32-%{mingw_pkg_name}-devel %defattr(-,root,root,-) %{mingw32_includedir}/%{pkgdir} %{mingw32_libdir}/lib*.dll.a %{mingw32_bindir}/wsdl2hed.exe %doc %{mingw32_mandir}/man1/wsdl2hed.1* %{mingw32_bindir}/arcplugin.exe %doc %{mingw32_mandir}/man1/arcplugin.1* %{mingw32_libdir}/pkgconfig/arcbase.pc %doc %{mingw32_datadir}/%{pkgdir}/examples/sdk/*.cpp %doc %{mingw32_datadir}/%{pkgdir}/examples/sdk/*.h %files -n mingw32-%{mingw_pkg_name}-plugins-needed %defattr(-,root,root,-) %{mingw32_libdir}/%{pkgdir}/libmcchttp.dll* %{mingw32_libdir}/%{pkgdir}/libmccmsgvalidator.dll* %{mingw32_libdir}/%{pkgdir}/libmccsoap.dll* %{mingw32_libdir}/%{pkgdir}/libmcctcp.dll* %{mingw32_libdir}/%{pkgdir}/libmcctls.dll* %{mingw32_libdir}/%{pkgdir}/libdmcfile.dll* %{mingw32_libdir}/%{pkgdir}/libdmchttp.dll* %{mingw32_libdir}/%{pkgdir}/libdmcldap.dll* %{mingw32_libdir}/%{pkgdir}/libdmcsrm.dll* %{mingw32_libdir}/%{pkgdir}/libdmcrucio.dll* %{mingw32_libdir}/%{pkgdir}/libdmcacix.dll* %{mingw32_libdir}/%{pkgdir}/libarcshc.dll* %{mingw32_libdir}/%{pkgdir}/libarcshclegacy.dll* %{mingw32_libdir}/%{pkgdir}/libidentitymap.dll* %{mingw32_libdir}/%{pkgdir}/libarguspdpclient.dll* %{mingw32_libdir}/%{pkgdir}/libaccARC1.dll* %{mingw32_libdir}/%{pkgdir}/libaccBroker.dll* %{mingw32_libdir}/%{pkgdir}/libaccCREAM.dll* %{mingw32_libdir}/%{pkgdir}/libaccEMIES.dll* %{mingw32_libdir}/%{pkgdir}/libaccSER.dll* %{mingw32_libdir}/%{pkgdir}/libaccldap.dll* #%{mingw32_libdir}/%{pkgdir}/libaccUNICORE.dll* %{mingw32_libdir}/%{pkgdir}/libaccJobDescriptionParser.dll* %{mingw32_libdir}/%{pkgdir}/test/libaccTEST.dll* %if %{with_mingw32_globus} %files -n mingw32-%{mingw_pkg_name}-plugins-globus %defattr(-,root,root,-) %{mingw32_libdir}/%{pkgdir}/libdmcgridftp.dll* %{mingw32_libdir}/%{pkgdir}/libaccARC0.dll* %{mingw32_libexecdir}/%{pkgdir}/arc-lcas.exe %{mingw32_libexecdir}/%{pkgdir}/arc-lcmaps.exe %endif %files -n mingw32-%{mingw_pkg_name}-misc-utils %defattr(-,root,root,-) %{mingw32_bindir}/arcemiestest.exe %{mingw32_bindir}/arcperftest.exe %{mingw32_bindir}/arcwsrf.exe %doc %{mingw32_mandir}/man1/arcemiestest.1* %doc %{mingw32_mandir}/man1/arcperftest.1* %doc %{mingw32_mandir}/man1/arcwsrf.1* %if %{with_mingw32_xmlsec1} %{mingw32_bindir}/saml_assertion_init.exe %doc %{mingw32_mandir}/man1/saml_assertion_init.1* %endif %if %{with_mingw32_python} %files -n mingw32-%{mingw_pkg_name}-python %defattr(-,root,root,-) %{mingw32_libdir}/python?.?/site-packages/_arc*.dll* %{mingw32_libdir}/python?.?/site-packages/arc/__init__.py* %{mingw32_libdir}/python?.?/site-packages/arc/common.py* %{mingw32_libdir}/python?.?/site-packages/arc/loader.py* %{mingw32_libdir}/python?.?/site-packages/arc/message.py* %{mingw32_libdir}/python?.?/site-packages/arc/communication.py* %{mingw32_libdir}/python?.?/site-packages/arc/compute.py* %{mingw32_libdir}/python?.?/site-packages/arc/credential.py* %{mingw32_libdir}/python?.?/site-packages/arc/data.py* %{mingw32_libdir}/python?.?/site-packages/arc/delegation.py* %{mingw32_libdir}/python?.?/site-packages/arc/security.py* %{mingw32_libdir}/%{pkgdir}/libpythonservice.dll* %{mingw32_libdir}/%{pkgdir}/libaccPythonBroker.dll* %doc %{mingw32_datadir}/%{pkgdir}/examples/PythonBroker/ACIXBroker.py* %doc %{mingw32_datadir}/%{pkgdir}/examples/PythonBroker/SampleBroker.py* %doc %{mingw32_datadir}/%{pkgdir}/examples/sdk/*.py* %doc %{mingw32_datadir}/%{pkgdir}/examples/echo_python/* %endif %endif # # mingw64 # %if %{mingw_build_win64} %files -n mingw64-%{mingw_pkg_name} -f mingw64-%{mingw_pkg_name}.lang %defattr(-,root,root,-) %doc README AUTHORS LICENSE NOTICE ChangeLog %{mingw64_bindir}/lib*.dll %{mingw64_libdir}/%{pkgdir}/libmodcrypto.dll* %{mingw64_libdir}/%{pkgdir}/libmodcredential.dll* %{mingw64_datadir}/%{pkgdir}/schema %{mingw64_libdir}/%{pkgdir}/arc-file-access.exe %{mingw64_libdir}/%{pkgdir}/DataStagingDelivery.exe %dir %{mingw64_datadir}/%{pkgdir}/test-jobs %{mingw64_datadir}/%{pkgdir}/test-jobs/test-job-* %files -n mingw64-%{mingw_pkg_name}-client %defattr(-,root,root,-) %{mingw64_datadir}/%{pkgdir}/examples/client.conf # # Executables # %{mingw64_bindir}/arcecho.exe %{mingw64_bindir}/arcinfo.exe %{mingw64_bindir}/arcproxy.exe %{mingw64_bindir}/arcproxyalt.exe %{mingw64_bindir}/arccat.exe %{mingw64_bindir}/arccp.exe %{mingw64_bindir}/arcls.exe %{mingw64_bindir}/arcrm.exe %{mingw64_bindir}/arcmkdir.exe %{mingw64_bindir}/arcrename.exe %if %{with_mingw64_xmlsec1} %endif %{mingw64_bindir}/arcstat.exe %{mingw64_bindir}/arcsub.exe %{mingw64_bindir}/arcsync.exe %{mingw64_bindir}/arcresub.exe %{mingw64_bindir}/arcget.exe %{mingw64_bindir}/arcclean.exe %{mingw64_bindir}/arckill.exe %{mingw64_bindir}/arcrenew.exe %{mingw64_bindir}/arcresume.exe %{mingw64_bindir}/arctest.exe # %{mingw64_sysconfdir}/arc/client.conf # # Man pages # %doc %{mingw64_mandir}/man1/arcecho.1* %doc %{mingw64_mandir}/man1/arcinfo.1* %doc %{mingw64_mandir}/man1/arcproxy.1* %if %{with_mingw64_xmlsec1} %endif %doc %{mingw64_mandir}/man1/arccat.1* %doc %{mingw64_mandir}/man1/arccp.1* %doc %{mingw64_mandir}/man1/arcls.1* %doc %{mingw64_mandir}/man1/arcrm.1* %doc %{mingw64_mandir}/man1/arcmkdir.1* %doc %{mingw64_mandir}/man1/arcrename.1* %doc %{mingw64_mandir}/man1/arcstat.1* %doc %{mingw64_mandir}/man1/arcsub.1* %doc %{mingw64_mandir}/man1/arcsync.1* %doc %{mingw64_mandir}/man1/arcresub.1* %doc %{mingw64_mandir}/man1/arcget.1* %doc %{mingw64_mandir}/man1/arcclean.1* %doc %{mingw64_mandir}/man1/arckill.1* %doc %{mingw64_mandir}/man1/arcrenew.1* %doc %{mingw64_mandir}/man1/arcresume.1* %doc %{mingw64_mandir}/man1/arctest.1* %doc %{mingw64_datadir}/%{pkgdir}/examples/echo/echo.wsdl %files -n mingw64-%{mingw_pkg_name}-hed %defattr(-,root,root,-) %{mingw64_sbindir}/arched.exe %doc %{mingw64_mandir}/man8/arched.8* %doc %{mingw64_mandir}/man5/arc.conf.5* %{mingw64_datadir}/%{pkgdir}/profiles %{mingw64_datadir}/%{pkgdir}/examples/config %{mingw64_datadir}/%{pkgdir}/examples/arc.conf.reference %doc %{mingw64_datadir}/%{pkgdir}/examples/echo/echo_service.xml.example %{mingw64_libdir}/%{pkgdir}/libecho.dll* %files -n mingw64-%{mingw_pkg_name}-devel %defattr(-,root,root,-) %{mingw64_includedir}/%{pkgdir} %{mingw64_libdir}/lib*.dll.a %{mingw64_bindir}/wsdl2hed.exe %doc %{mingw64_mandir}/man1/wsdl2hed.1* %{mingw64_bindir}/arcplugin.exe %doc %{mingw64_mandir}/man1/arcplugin.1* %{mingw64_libdir}/pkgconfig/arcbase.pc %doc %{mingw64_datadir}/%{pkgdir}/examples/sdk/*.cpp %doc %{mingw64_datadir}/%{pkgdir}/examples/sdk/*.h %files -n mingw64-%{mingw_pkg_name}-plugins-needed %defattr(-,root,root,-) %{mingw64_libdir}/%{pkgdir}/libmcchttp.dll* %{mingw64_libdir}/%{pkgdir}/libmccmsgvalidator.dll* %{mingw64_libdir}/%{pkgdir}/libmccsoap.dll* %{mingw64_libdir}/%{pkgdir}/libmcctcp.dll* %{mingw64_libdir}/%{pkgdir}/libmcctls.dll* %{mingw64_libdir}/%{pkgdir}/libdmcfile.dll* %{mingw64_libdir}/%{pkgdir}/libdmchttp.dll* %{mingw64_libdir}/%{pkgdir}/libdmcldap.dll* %{mingw64_libdir}/%{pkgdir}/libdmcsrm.dll* %{mingw64_libdir}/%{pkgdir}/libdmcrucio.dll* %{mingw64_libdir}/%{pkgdir}/libdmcacix.dll* %{mingw64_libdir}/%{pkgdir}/libarcshc.dll* %{mingw64_libdir}/%{pkgdir}/libarcshclegacy.dll* %{mingw64_libdir}/%{pkgdir}/libidentitymap.dll* %{mingw64_libdir}/%{pkgdir}/libarguspdpclient.dll* %{mingw64_libdir}/%{pkgdir}/libaccARC1.dll* %{mingw64_libdir}/%{pkgdir}/libaccBroker.dll* %{mingw64_libdir}/%{pkgdir}/libaccCREAM.dll* %{mingw64_libdir}/%{pkgdir}/libaccEMIES.dll* %{mingw64_libdir}/%{pkgdir}/libaccSER.dll* %{mingw64_libdir}/%{pkgdir}/libaccldap.dll* #%{mingw64_libdir}/%{pkgdir}/libaccUNICORE.dll* %{mingw64_libdir}/%{pkgdir}/libaccJobDescriptionParser.dll* %{mingw64_libdir}/%{pkgdir}/test/libaccTEST.dll* %if %{with_mingw64_globus} %files -n mingw64-%{mingw_pkg_name}-plugins-globus %defattr(-,root,root,-) %{mingw64_libdir}/%{pkgdir}/libdmcgridftp.dll* %{mingw64_libdir}/%{pkgdir}/libaccARC0.dll* %{mingw64_libexecdir}/%{pkgdir}/arc-lcas.exe %{mingw64_libexecdir}/%{pkgdir}/arc-lcmaps.exe %endif %files -n mingw64-%{mingw_pkg_name}-misc-utils %defattr(-,root,root,-) %{mingw64_bindir}/arcemiestest.exe %{mingw64_bindir}/arcperftest.exe %{mingw64_bindir}/arcwsrf.exe %doc %{mingw64_mandir}/man1/arcemiestest.1* %doc %{mingw64_mandir}/man1/arcperftest.1* %doc %{mingw64_mandir}/man1/arcwsrf.1* %if %{with_mingw64_xmlsec1} %{mingw64_bindir}/saml_assertion_init.exe %doc %{mingw64_mandir}/man1/saml_assertion_init.1* %endif %if %{with_mingw64_python} %files -n mingw64-%{mingw_pkg_name}-python %defattr(-,root,root,-) %{mingw64_libdir}/python?.?/site-packages/_arc.dll* %{mingw64_libdir}/python?.?/site-packages/arc.py* %{mingw64_libdir}/%{pkgdir}/libpythonservice.dll* %{mingw64_libdir}/%{pkgdir}/libaccPythonBroker.dll* %doc %{mingw64_datadir}/%{pkgdir}/examples/PythonBroker/SampleBroker.py* %doc %{mingw64_datadir}/%{pkgdir}/examples/sdk/*.py* %doc %{mingw64_datadir}/%{pkgdir}/examples/echo_python/* %endif %endif %changelog * @SPECDATE@ Anders Waananen - @VERSION@-1 - Initial release nordugrid-arc-5.4.2/PaxHeaders.7502/m40000644000000000000000000000013213214316022015431 xustar000000000000000030 mtime=1513200658.597730335 30 atime=1513200668.716854096 30 ctime=1513200658.597730335 nordugrid-arc-5.4.2/m4/0000755000175000002070000000000013214316022015554 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/m4/PaxHeaders.7502/ac_cxx_have_dbdeadlockexception.m40000644000000000000000000000012711070412255024302 xustar000000000000000027 mtime=1222775981.821677 30 atime=1513200579.482762728 30 ctime=1513200658.580730127 nordugrid-arc-5.4.2/m4/ac_cxx_have_dbdeadlockexception.m40000644000175000002070000000137711070412255024354 0ustar00mockbuildmock00000000000000dnl @synopsis AC_DBCXX_HAVE_DBDEADLOCKEXXCEPTION dnl dnl If the C++ library has a working stringstream, define HAVE_SSTREAM. dnl dnl @author Ben Stanley dnl @version $Id: ac_cxx_have_sstream.m4 3830 2005-06-24 07:01:15Z waananen $ dnl AC_DEFUN([AC_DBCXX_HAVE_DBDEADLOCKEXCEPTION], [AC_CACHE_CHECK(whether the Berkeley DB has DbDeadlockException, ac_cv_dbcxx_dbdeadlockexception, [ AC_LANG_SAVE AC_LANG_CPLUSPLUS AC_TRY_COMPILE([#include ],[try { } catch(DbDeadlockException&) { }; return 0;], ac_cv_dbcxx_have_dbdeadlockexception=yes, ac_cv_dbcxx_have_dbdeadlockexception=no) AC_LANG_RESTORE ]) if test "$ac_cv_dbcxx_have_dbdeadlockexception" = yes; then AC_DEFINE(HAVE_DBDEADLOCKEXCEPTION,,[define if the Berkeley DB has DbDeadLockException]) fi ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/gettext.m40000644000000000000000000000013213214315702017440 xustar000000000000000030 mtime=1513200578.123746107 30 atime=1513200579.478762679 30 ctime=1513200658.587730213 nordugrid-arc-5.4.2/m4/gettext.m40000644000175000002070000004052113214315702017510 0ustar00mockbuildmock00000000000000# gettext.m4 serial 20 (gettext-0.12) dnl Copyright (C) 1995-2003 Free Software Foundation, Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2003. dnl Macro to add for using GNU gettext. dnl Usage: AM_GNU_GETTEXT([INTLSYMBOL], [NEEDSYMBOL], [INTLDIR]). dnl INTLSYMBOL can be one of 'external', 'no-libtool', 'use-libtool'. The dnl default (if it is not specified or empty) is 'no-libtool'. dnl INTLSYMBOL should be 'external' for packages with no intl directory, dnl and 'no-libtool' or 'use-libtool' for packages with an intl directory. dnl If INTLSYMBOL is 'use-libtool', then a libtool library dnl $(top_builddir)/intl/libintl.la will be created (shared and/or static, dnl depending on --{enable,disable}-{shared,static} and on the presence of dnl AM-DISABLE-SHARED). If INTLSYMBOL is 'no-libtool', a static library dnl $(top_builddir)/intl/libintl.a will be created. dnl If NEEDSYMBOL is specified and is 'need-ngettext', then GNU gettext dnl implementations (in libc or libintl) without the ngettext() function dnl will be ignored. If NEEDSYMBOL is specified and is dnl 'need-formatstring-macros', then GNU gettext implementations that don't dnl support the ISO C 99 formatstring macros will be ignored. dnl INTLDIR is used to find the intl libraries. If empty, dnl the value `$(top_builddir)/intl/' is used. dnl dnl The result of the configuration is one of three cases: dnl 1) GNU gettext, as included in the intl subdirectory, will be compiled dnl and used. dnl Catalog format: GNU --> install in $(datadir) dnl Catalog extension: .mo after installation, .gmo in source tree dnl 2) GNU gettext has been found in the system's C library. dnl Catalog format: GNU --> install in $(datadir) dnl Catalog extension: .mo after installation, .gmo in source tree dnl 3) No internationalization, always use English msgid. dnl Catalog format: none dnl Catalog extension: none dnl If INTLSYMBOL is 'external', only cases 2 and 3 can occur. dnl The use of .gmo is historical (it was needed to avoid overwriting the dnl GNU format catalogs when building on a platform with an X/Open gettext), dnl but we keep it in order not to force irrelevant filename changes on the dnl maintainers. dnl AC_DEFUN([AM_GNU_GETTEXT], [ dnl Argument checking. ifelse([$1], [], , [ifelse([$1], [external], , [ifelse([$1], [no-libtool], , [ifelse([$1], [use-libtool], , [errprint([ERROR: invalid first argument to AM_GNU_GETTEXT ])])])])]) ifelse([$2], [], , [ifelse([$2], [need-ngettext], , [ifelse([$2], [need-formatstring-macros], , [errprint([ERROR: invalid second argument to AM_GNU_GETTEXT ])])])]) define(gt_included_intl, ifelse([$1], [external], [no], [yes])) define(gt_libtool_suffix_prefix, ifelse([$1], [use-libtool], [l], [])) AC_REQUIRE([AM_PO_SUBDIRS])dnl ifelse(gt_included_intl, yes, [ AC_REQUIRE([AM_INTL_SUBDIR])dnl ]) dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) dnl Sometimes libintl requires libiconv, so first search for libiconv. dnl Ideally we would do this search only after the dnl if test "$USE_NLS" = "yes"; then dnl if test "$gt_cv_func_gnugettext_libc" != "yes"; then dnl tests. But if configure.in invokes AM_ICONV after AM_GNU_GETTEXT dnl the configure script would need to contain the same shell code dnl again, outside any 'if'. There are two solutions: dnl - Invoke AM_ICONV_LINKFLAGS_BODY here, outside any 'if'. dnl - Control the expansions in more detail using AC_PROVIDE_IFELSE. dnl Since AC_PROVIDE_IFELSE is only in autoconf >= 2.52 and not dnl documented, we avoid it. ifelse(gt_included_intl, yes, , [ AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) ]) dnl Set USE_NLS. AM_NLS ifelse(gt_included_intl, yes, [ BUILD_INCLUDED_LIBINTL=no USE_INCLUDED_LIBINTL=no ]) LIBINTL= LTLIBINTL= POSUB= dnl If we use NLS figure out what method if test "$USE_NLS" = "yes"; then gt_use_preinstalled_gnugettext=no ifelse(gt_included_intl, yes, [ AC_MSG_CHECKING([whether included gettext is requested]) AC_ARG_WITH(included-gettext, [ --with-included-gettext use the GNU gettext library included here], nls_cv_force_use_gnu_gettext=$withval, nls_cv_force_use_gnu_gettext=no) AC_MSG_RESULT($nls_cv_force_use_gnu_gettext) nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext" if test "$nls_cv_force_use_gnu_gettext" != "yes"; then ]) dnl User does not insist on using GNU NLS library. Figure out what dnl to use. If GNU gettext is available we use this. Else we have dnl to fall back to GNU NLS library. dnl Add a version number to the cache macros. define([gt_api_version], ifelse([$2], [need-formatstring-macros], 3, ifelse([$2], [need-ngettext], 2, 1))) define([gt_cv_func_gnugettext_libc], [gt_cv_func_gnugettext]gt_api_version[_libc]) define([gt_cv_func_gnugettext_libintl], [gt_cv_func_gnugettext]gt_api_version[_libintl]) AC_CACHE_CHECK([for GNU gettext in libc], gt_cv_func_gnugettext_libc, [AC_TRY_LINK([#include ]ifelse([$2], [need-formatstring-macros], [#ifndef __GNU_GETTEXT_SUPPORTED_REVISION #define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) #endif changequote(,)dnl typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; changequote([,])dnl ], [])[extern int _nl_msg_cat_cntr; extern int *_nl_domain_bindings;], [bindtextdomain ("", ""); return (int) gettext ("")]ifelse([$2], [need-ngettext], [ + (int) ngettext ("", "", 0)], [])[ + _nl_msg_cat_cntr + *_nl_domain_bindings], gt_cv_func_gnugettext_libc=yes, gt_cv_func_gnugettext_libc=no)]) if test "$gt_cv_func_gnugettext_libc" != "yes"; then dnl Sometimes libintl requires libiconv, so first search for libiconv. ifelse(gt_included_intl, yes, , [ AM_ICONV_LINK ]) dnl Search for libintl and define LIBINTL, LTLIBINTL and INCINTL dnl accordingly. Don't use AC_LIB_LINKFLAGS_BODY([intl],[iconv]) dnl because that would add "-liconv" to LIBINTL and LTLIBINTL dnl even if libiconv doesn't exist. AC_LIB_LINKFLAGS_BODY([intl]) AC_CACHE_CHECK([for GNU gettext in libintl], gt_cv_func_gnugettext_libintl, [gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $INCINTL" gt_save_LIBS="$LIBS" LIBS="$LIBS $LIBINTL" dnl Now see whether libintl exists and does not depend on libiconv. AC_TRY_LINK([#include ]ifelse([$2], [need-formatstring-macros], [#ifndef __GNU_GETTEXT_SUPPORTED_REVISION #define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) #endif changequote(,)dnl typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; changequote([,])dnl ], [])[extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias ();], [bindtextdomain ("", ""); return (int) gettext ("")]ifelse([$2], [need-ngettext], [ + (int) ngettext ("", "", 0)], [])[ + _nl_msg_cat_cntr + *_nl_expand_alias (0)], gt_cv_func_gnugettext_libintl=yes, gt_cv_func_gnugettext_libintl=no) dnl Now see whether libintl exists and depends on libiconv. if test "$gt_cv_func_gnugettext_libintl" != yes && test -n "$LIBICONV"; then LIBS="$LIBS $LIBICONV" AC_TRY_LINK([#include ]ifelse([$2], [need-formatstring-macros], [#ifndef __GNU_GETTEXT_SUPPORTED_REVISION #define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) #endif changequote(,)dnl typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; changequote([,])dnl ], [])[extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias ();], [bindtextdomain ("", ""); return (int) gettext ("")]ifelse([$2], [need-ngettext], [ + (int) ngettext ("", "", 0)], [])[ + _nl_msg_cat_cntr + *_nl_expand_alias (0)], [LIBINTL="$LIBINTL $LIBICONV" LTLIBINTL="$LTLIBINTL $LTLIBICONV" gt_cv_func_gnugettext_libintl=yes ]) fi CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS"]) fi dnl If an already present or preinstalled GNU gettext() is found, dnl use it. But if this macro is used in GNU gettext, and GNU dnl gettext is already preinstalled in libintl, we update this dnl libintl. (Cf. the install rule in intl/Makefile.in.) if test "$gt_cv_func_gnugettext_libc" = "yes" \ || { test "$gt_cv_func_gnugettext_libintl" = "yes" \ && test "$PACKAGE" != gettext-runtime \ && test "$PACKAGE" != gettext-tools; }; then gt_use_preinstalled_gnugettext=yes else dnl Reset the values set by searching for libintl. LIBINTL= LTLIBINTL= INCINTL= fi ifelse(gt_included_intl, yes, [ if test "$gt_use_preinstalled_gnugettext" != "yes"; then dnl GNU gettext is not found in the C library. dnl Fall back on included GNU gettext library. nls_cv_use_gnu_gettext=yes fi fi if test "$nls_cv_use_gnu_gettext" = "yes"; then dnl Mark actions used to generate GNU NLS library. BUILD_INCLUDED_LIBINTL=yes USE_INCLUDED_LIBINTL=yes LIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.[]gt_libtool_suffix_prefix[]a $LIBICONV" LTLIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.[]gt_libtool_suffix_prefix[]a $LTLIBICONV" LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'` fi if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then dnl Mark actions to use GNU gettext tools. CATOBJEXT=.gmo fi ]) if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then AC_DEFINE(ENABLE_NLS, 1, [Define to 1 if translation of program messages to the user's native language is requested.]) else USE_NLS=no fi fi AC_MSG_CHECKING([whether to use NLS]) AC_MSG_RESULT([$USE_NLS]) if test "$USE_NLS" = "yes"; then AC_MSG_CHECKING([where the gettext function comes from]) if test "$gt_use_preinstalled_gnugettext" = "yes"; then if test "$gt_cv_func_gnugettext_libintl" = "yes"; then gt_source="external libintl" else gt_source="libc" fi else gt_source="included intl directory" fi AC_MSG_RESULT([$gt_source]) fi if test "$USE_NLS" = "yes"; then if test "$gt_use_preinstalled_gnugettext" = "yes"; then if test "$gt_cv_func_gnugettext_libintl" = "yes"; then AC_MSG_CHECKING([how to link with libintl]) AC_MSG_RESULT([$LIBINTL]) AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCINTL]) fi dnl For backward compatibility. Some packages may be using this. AC_DEFINE(HAVE_GETTEXT, 1, [Define if the GNU gettext() function is already present or preinstalled.]) AC_DEFINE(HAVE_DCGETTEXT, 1, [Define if the GNU dcgettext() function is already present or preinstalled.]) fi dnl We need to process the po/ directory. POSUB=po fi ifelse(gt_included_intl, yes, [ dnl If this is used in GNU gettext we have to set BUILD_INCLUDED_LIBINTL dnl to 'yes' because some of the testsuite requires it. if test "$PACKAGE" = gettext-runtime || test "$PACKAGE" = gettext-tools; then BUILD_INCLUDED_LIBINTL=yes fi dnl Make all variables we use known to autoconf. AC_SUBST(BUILD_INCLUDED_LIBINTL) AC_SUBST(USE_INCLUDED_LIBINTL) AC_SUBST(CATOBJEXT) dnl For backward compatibility. Some configure.ins may be using this. nls_cv_header_intl= nls_cv_header_libgt= dnl For backward compatibility. Some Makefiles may be using this. DATADIRNAME=share AC_SUBST(DATADIRNAME) dnl For backward compatibility. Some Makefiles may be using this. INSTOBJEXT=.mo AC_SUBST(INSTOBJEXT) dnl For backward compatibility. Some Makefiles may be using this. GENCAT=gencat AC_SUBST(GENCAT) dnl For backward compatibility. Some Makefiles may be using this. if test "$USE_INCLUDED_LIBINTL" = yes; then INTLOBJS="\$(GETTOBJS)" fi AC_SUBST(INTLOBJS) dnl Enable libtool support if the surrounding package wishes it. INTL_LIBTOOL_SUFFIX_PREFIX=gt_libtool_suffix_prefix AC_SUBST(INTL_LIBTOOL_SUFFIX_PREFIX) ]) dnl For backward compatibility. Some Makefiles may be using this. INTLLIBS="$LIBINTL" AC_SUBST(INTLLIBS) dnl Make all documented variables known to autoconf. AC_SUBST(LIBINTL) AC_SUBST(LTLIBINTL) AC_SUBST(POSUB) ]) dnl Checks for all prerequisites of the intl subdirectory, dnl except for INTL_LIBTOOL_SUFFIX_PREFIX (and possibly LIBTOOL), INTLOBJS, dnl USE_INCLUDED_LIBINTL, BUILD_INCLUDED_LIBINTL. AC_DEFUN([AM_INTL_SUBDIR], [ AC_REQUIRE([AC_PROG_INSTALL])dnl AC_REQUIRE([AM_MKINSTALLDIRS])dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_PROG_RANLIB])dnl AC_REQUIRE([AC_ISC_POSIX])dnl AC_REQUIRE([AC_HEADER_STDC])dnl AC_REQUIRE([AC_C_CONST])dnl AC_REQUIRE([AC_C_INLINE])dnl AC_REQUIRE([AC_TYPE_OFF_T])dnl AC_REQUIRE([AC_TYPE_SIZE_T])dnl AC_REQUIRE([AC_FUNC_ALLOCA])dnl AC_REQUIRE([AC_FUNC_MMAP])dnl AC_REQUIRE([jm_GLIBC21])dnl AC_REQUIRE([gt_INTDIV0])dnl AC_REQUIRE([jm_AC_TYPE_UINTMAX_T])dnl AC_REQUIRE([gt_HEADER_INTTYPES_H])dnl AC_REQUIRE([gt_INTTYPES_PRI])dnl AC_CHECK_HEADERS([argz.h limits.h locale.h nl_types.h malloc.h stddef.h \ stdlib.h string.h unistd.h sys/param.h]) AC_CHECK_FUNCS([feof_unlocked fgets_unlocked getc_unlocked getcwd getegid \ geteuid getgid getuid mempcpy munmap putenv setenv setlocale stpcpy \ strcasecmp strdup strtoul tsearch __argz_count __argz_stringify __argz_next \ __fsetlocking]) AM_ICONV AM_LANGINFO_CODESET if test $ac_cv_header_locale_h = yes; then AM_LC_MESSAGES fi dnl intl/plural.c is generated from intl/plural.y. It requires bison, dnl because plural.y uses bison specific features. It requires at least dnl bison-1.26 because earlier versions generate a plural.c that doesn't dnl compile. dnl bison is only needed for the maintainer (who touches plural.y). But in dnl order to avoid separate Makefiles or --enable-maintainer-mode, we put dnl the rule in general Makefile. Now, some people carelessly touch the dnl files or have a broken "make" program, hence the plural.c rule will dnl sometimes fire. To avoid an error, defines BISON to ":" if it is not dnl present or too old. AC_CHECK_PROGS([INTLBISON], [bison]) if test -z "$INTLBISON"; then ac_verc_fail=yes else dnl Found it, now check the version. AC_MSG_CHECKING([version of bison]) changequote(<<,>>)dnl ac_prog_version=`$INTLBISON --version 2>&1 | sed -n 's/^.*GNU Bison.* \([0-9]*\.[0-9.]*\).*$/\1/p'` case $ac_prog_version in '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; 1.2[6-9]* | 1.[3-9][0-9]* | [2-9].*) changequote([,])dnl ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; esac AC_MSG_RESULT([$ac_prog_version]) fi if test $ac_verc_fail = yes; then INTLBISON=: fi ]) dnl Usage: AM_GNU_GETTEXT_VERSION([gettext-version]) AC_DEFUN([AM_GNU_GETTEXT_VERSION], []) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/lt~obsolete.m40000644000000000000000000000013213214315710020325 xustar000000000000000030 mtime=1513200584.514824272 30 atime=1513200584.693826462 30 ctime=1513200658.594730299 nordugrid-arc-5.4.2/m4/lt~obsolete.m40000644000175000002070000001311313214315710020372 0ustar00mockbuildmock00000000000000# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- # # Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004. # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 4 lt~obsolete.m4 # These exist entirely to fool aclocal when bootstrapping libtool. # # In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) # which have later been changed to m4_define as they aren't part of the # exported API, or moved to Autoconf or Automake where they belong. # # The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN # in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us # using a macro with the same name in our local m4/libtool.m4 it'll # pull the old libtool.m4 in (it doesn't see our shiny new m4_define # and doesn't know about Autoconf macros at all.) # # So we provide this file, which has a silly filename so it's always # included after everything else. This provides aclocal with the # AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything # because those macros already exist, or will be overwritten later. # We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. # # Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. # Yes, that means every name once taken will need to remain here until # we give up compatibility with versions before 1.7, at which point # we need to keep only those names which we still refer to. # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) m4_ifndef([AC_LIBTOOL_RC], [AC_DEFUN([AC_LIBTOOL_RC])]) m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/fsusage.m40000644000000000000000000000012710776701515017430 xustar000000000000000027 mtime=1207665485.080762 30 atime=1513200579.480762704 30 ctime=1513200658.586730201 nordugrid-arc-5.4.2/m4/fsusage.m40000644000175000002070000001752210776701515017501 0ustar00mockbuildmock00000000000000#serial 23 # Obtaining file system usage information. # Copyright (C) 1997, 1998, 2000, 2001, 2003-2007 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Written by Jim Meyering. AC_DEFUN([gl_FSUSAGE], [ AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/vfs.h sys/fs_types.h) AC_CHECK_HEADERS(sys/mount.h, [], [], [AC_INCLUDES_DEFAULT [#if HAVE_SYS_PARAM_H #include #endif]]) gl_FILE_SYSTEM_USAGE([gl_cv_fs_space=yes], [gl_cv_fs_space=no]) if test $gl_cv_fs_space = yes; then AC_LIBOBJ(fsusage) gl_PREREQ_FSUSAGE_EXTRA fi ]) # Try to determine how a program can obtain file system usage information. # If successful, define the appropriate symbol (see fsusage.c) and # execute ACTION-IF-FOUND. Otherwise, execute ACTION-IF-NOT-FOUND. # # gl_FILE_SYSTEM_USAGE([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) AC_DEFUN([gl_FILE_SYSTEM_USAGE], [ AC_MSG_NOTICE([checking how to get file system space usage]) ac_fsusage_space=no # Perform only the link test since it seems there are no variants of the # statvfs function. This check is more than just AC_CHECK_FUNCS(statvfs) # because that got a false positive on SCO OSR5. Adding the declaration # of a `struct statvfs' causes this test to fail (as it should) on such # systems. That system is reported to work fine with STAT_STATFS4 which # is what it gets when this test fails. if test $ac_fsusage_space = no; then # SVR4 AC_CACHE_CHECK([for statvfs function (SVR4)], fu_cv_sys_stat_statvfs, [AC_TRY_LINK([#include #if defined __GLIBC__ && !defined __BEOS__ Do not use statvfs on systems with GNU libc, because that function stats all preceding entries in /proc/mounts, and that makes df hang if even one of the corresponding file systems is hard-mounted, but not available. statvfs in GNU libc on BeOS operates differently: it only makes a system call. #endif #ifdef __osf__ "Do not use Tru64's statvfs implementation" #endif #include ], [struct statvfs fsd; statvfs (0, &fsd);], fu_cv_sys_stat_statvfs=yes, fu_cv_sys_stat_statvfs=no)]) if test $fu_cv_sys_stat_statvfs = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATVFS, 1, [ Define if there is a function named statvfs. (SVR4)]) fi fi if test $ac_fsusage_space = no; then # DEC Alpha running OSF/1 AC_MSG_CHECKING([for 3-argument statfs function (DEC OSF/1)]) AC_CACHE_VAL(fu_cv_sys_stat_statfs3_osf1, [AC_TRY_RUN([ #include #include #include int main () { struct statfs fsd; fsd.f_fsize = 0; return statfs (".", &fsd, sizeof (struct statfs)) != 0; }], fu_cv_sys_stat_statfs3_osf1=yes, fu_cv_sys_stat_statfs3_osf1=no, fu_cv_sys_stat_statfs3_osf1=no)]) AC_MSG_RESULT($fu_cv_sys_stat_statfs3_osf1) if test $fu_cv_sys_stat_statfs3_osf1 = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS3_OSF1, 1, [ Define if statfs takes 3 args. (DEC Alpha running OSF/1)]) fi fi if test $ac_fsusage_space = no; then # AIX AC_MSG_CHECKING([for two-argument statfs with statfs.bsize dnl member (AIX, 4.3BSD)]) AC_CACHE_VAL(fu_cv_sys_stat_statfs2_bsize, [AC_TRY_RUN([ #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_SYS_VFS_H #include #endif int main () { struct statfs fsd; fsd.f_bsize = 0; return statfs (".", &fsd) != 0; }], fu_cv_sys_stat_statfs2_bsize=yes, fu_cv_sys_stat_statfs2_bsize=no, fu_cv_sys_stat_statfs2_bsize=no)]) AC_MSG_RESULT($fu_cv_sys_stat_statfs2_bsize) if test $fu_cv_sys_stat_statfs2_bsize = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS2_BSIZE, 1, [ Define if statfs takes 2 args and struct statfs has a field named f_bsize. (4.3BSD, SunOS 4, HP-UX, AIX PS/2)]) fi fi if test $ac_fsusage_space = no; then # SVR3 AC_MSG_CHECKING([for four-argument statfs (AIX-3.2.5, SVR3)]) AC_CACHE_VAL(fu_cv_sys_stat_statfs4, [AC_TRY_RUN([#include #include int main () { struct statfs fsd; return statfs (".", &fsd, sizeof fsd, 0) != 0; }], fu_cv_sys_stat_statfs4=yes, fu_cv_sys_stat_statfs4=no, fu_cv_sys_stat_statfs4=no)]) AC_MSG_RESULT($fu_cv_sys_stat_statfs4) if test $fu_cv_sys_stat_statfs4 = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS4, 1, [ Define if statfs takes 4 args. (SVR3, Dynix, Irix, Dolphin)]) fi fi if test $ac_fsusage_space = no; then # 4.4BSD and NetBSD AC_MSG_CHECKING([for two-argument statfs with statfs.fsize dnl member (4.4BSD and NetBSD)]) AC_CACHE_VAL(fu_cv_sys_stat_statfs2_fsize, [AC_TRY_RUN([#include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif int main () { struct statfs fsd; fsd.f_fsize = 0; return statfs (".", &fsd) != 0; }], fu_cv_sys_stat_statfs2_fsize=yes, fu_cv_sys_stat_statfs2_fsize=no, fu_cv_sys_stat_statfs2_fsize=no)]) AC_MSG_RESULT($fu_cv_sys_stat_statfs2_fsize) if test $fu_cv_sys_stat_statfs2_fsize = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS2_FSIZE, 1, [ Define if statfs takes 2 args and struct statfs has a field named f_fsize. (4.4BSD, NetBSD)]) fi fi if test $ac_fsusage_space = no; then # Ultrix AC_MSG_CHECKING([for two-argument statfs with struct fs_data (Ultrix)]) AC_CACHE_VAL(fu_cv_sys_stat_fs_data, [AC_TRY_RUN([#include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_SYS_FS_TYPES_H #include #endif int main () { struct fs_data fsd; /* Ultrix's statfs returns 1 for success, 0 for not mounted, -1 for failure. */ return statfs (".", &fsd) != 1; }], fu_cv_sys_stat_fs_data=yes, fu_cv_sys_stat_fs_data=no, fu_cv_sys_stat_fs_data=no)]) AC_MSG_RESULT($fu_cv_sys_stat_fs_data) if test $fu_cv_sys_stat_fs_data = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS2_FS_DATA, 1, [ Define if statfs takes 2 args and the second argument has type struct fs_data. (Ultrix)]) fi fi if test $ac_fsusage_space = no; then # SVR2 AC_TRY_CPP([#include ], AC_DEFINE(STAT_READ_FILSYS, 1, [Define if there is no specific function for reading file systems usage information and you have the header file. (SVR2)]) ac_fsusage_space=yes) fi AS_IF([test $ac_fsusage_space = yes], [$1], [$2]) ]) # Check for SunOS statfs brokenness wrt partitions 2GB and larger. # If exists and struct statfs has a member named f_spare, # enable the work-around code in fsusage.c. AC_DEFUN([gl_STATFS_TRUNCATES], [ AC_MSG_CHECKING([for statfs that truncates block counts]) AC_CACHE_VAL(fu_cv_sys_truncating_statfs, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #if !defined(sun) && !defined(__sun) choke -- this is a workaround for a Sun-specific problem #endif #include #include ]], [[struct statfs t; long c = *(t.f_spare); if (c) return 0;]])], [fu_cv_sys_truncating_statfs=yes], [fu_cv_sys_truncating_statfs=no])]) if test $fu_cv_sys_truncating_statfs = yes; then AC_DEFINE(STATFS_TRUNCATES_BLOCK_COUNTS, 1, [Define if the block counts reported by statfs may be truncated to 2GB and the correct values may be stored in the f_spare array. (SunOS 4.1.2, 4.1.3, and 4.1.3_U1 are reported to have this problem. SunOS 4.1.1 seems not to be affected.)]) fi AC_MSG_RESULT($fu_cv_sys_truncating_statfs) ]) # Prerequisites of lib/fsusage.c not done by gl_FILE_SYSTEM_USAGE. AC_DEFUN([gl_PREREQ_FSUSAGE_EXTRA], [ AC_CHECK_HEADERS(dustat.h sys/fs/s5param.h sys/filsys.h sys/statfs.h) gl_STATFS_TRUNCATES ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/ltversion.m40000644000000000000000000000013213214315710020000 xustar000000000000000030 mtime=1513200584.378822609 30 atime=1513200584.696826498 30 ctime=1513200658.594730299 nordugrid-arc-5.4.2/m4/ltversion.m40000644000175000002070000000127713214315710020055 0ustar00mockbuildmock00000000000000# ltversion.m4 -- version numbers -*- Autoconf -*- # # Copyright (C) 2004 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # Generated from ltversion.in. # serial 3017 ltversion.m4 # This file is part of GNU Libtool m4_define([LT_PACKAGE_VERSION], [2.2.6b]) m4_define([LT_PACKAGE_REVISION], [1.3017]) AC_DEFUN([LTVERSION_VERSION], [macro_version='2.2.6b' macro_revision='1.3017' _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) _LT_DECL(, macro_revision, 0) ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/ac_cxx_have_sstream.m40000644000000000000000000000012610656572556022010 xustar000000000000000027 mtime=1186657646.676048 30 atime=1513200579.482762728 29 ctime=1513200658.58173014 nordugrid-arc-5.4.2/m4/ac_cxx_have_sstream.m40000644000175000002070000000134110656572556022052 0ustar00mockbuildmock00000000000000dnl @synopsis AC_CXX_HAVE_SSTREAM dnl dnl If the C++ library has a working stringstream, define HAVE_SSTREAM. dnl dnl @author Ben Stanley dnl @version $Id: ac_cxx_have_sstream.m4 3830 2005-06-24 07:01:15Z waananen $ dnl AC_DEFUN([AC_CXX_HAVE_SSTREAM], [AC_CACHE_CHECK(whether the compiler has stringstream, ac_cv_cxx_have_sstream, [AC_REQUIRE([AC_CXX_NAMESPACES]) AC_LANG_SAVE AC_LANG_CPLUSPLUS AC_TRY_COMPILE([#include #ifdef HAVE_NAMESPACES using namespace std; #endif],[stringstream message; message << "Hello"; return 0;], ac_cv_cxx_have_sstream=yes, ac_cv_cxx_have_sstream=no) AC_LANG_RESTORE ]) if test "$ac_cv_cxx_have_sstream" = yes; then AC_DEFINE(HAVE_SSTREAM,,[define if the compiler has stringstream]) fi ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/gpt.m40000644000000000000000000000012711134117550016552 xustar000000000000000027 mtime=1232117608.531681 30 atime=1513200579.477762667 30 ctime=1513200658.588730225 nordugrid-arc-5.4.2/m4/gpt.m40000644000175000002070000000615711134117550016625 0ustar00mockbuildmock00000000000000# globus.m4 -*- Autoconf -*- # Macros to for compiling and linking against globus/gpt packages AC_DEFUN([GPT_PROG_GPT_FLAVOR_CONFIGURATION], [ AC_ARG_VAR([GPT_FLAVOR_CONFIGURATION], [path to gpt-flavor-configuration]) if test "x$ac_cv_env_GPT_FLAVOR_CONFIGURATION_set" != "xset"; then AC_PATH_TOOL([GPT_FLAVOR_CONFIGURATION], [gpt-flavor-configuration], [], $PATH:/usr/sbin:/opt/gpt/sbin) fi if test -f "$GPT_FLAVOR_CONFIGURATION" && test "x$GPT_LOCATION" = "x"; then GPT_LOCATION=`dirname $GPT_FLAVOR_CONFIGURATION` GPT_LOCATION=`dirname $GPT_LOCATION` export GPT_LOCATION fi ]) AC_DEFUN([GPT_PROG_GPT_QUERY], [ AC_ARG_VAR([GPT_QUERY], [path to gpt-query]) if test "x$ac_cv_env_GPT_QUERY_set" != "xset"; then AC_PATH_TOOL([GPT_QUERY], [gpt-query], [], $PATH:/usr/sbin:/opt/gpt/sbin) fi if test -f "$GPT_QUERY" && test "x$GPT_LOCATION" = "x"; then GPT_LOCATION=`dirname $GPT_QUERY` GPT_LOCATION=`dirname $GPT_LOCATION` export GPT_LOCATION fi ]) AC_DEFUN([GPT_PROG_GLOBUS_MAKEFILE_HEADER], [ AC_ARG_VAR([GLOBUS_MAKEFILE_HEADER], [path to globus-makefile-header]) if test "x$ac_cv_env_GLOBUS_MAKEFILE_HEADER_set" != "xset"; then AC_PATH_TOOL([GLOBUS_MAKEFILE_HEADER], [globus-makefile-header], [], $PATH:/opt/globus/bin) fi if test -f "$GLOBUS_MAKEFILE_HEADER" && test "x$GLOBUS_LOCATION" = "x"; then GLOBUS_LOCATION=`dirname $GLOBUS_MAKEFILE_HEADER` GLOBUS_LOCATION=`dirname $GLOBUS_LOCATION` export GLOBUS_LOCATION fi ]) AC_DEFUN([GPT_ARG_GPT_FLAVOR], [ AC_REQUIRE([GPT_PROG_GPT_FLAVOR_CONFIGURATION]) AC_MSG_CHECKING([for gpt flavor]) AC_ARG_WITH([flavor], AC_HELP_STRING([--with-flavor=(flavor)], [Specify the gpt build flavor [[autodetect]]]), [GPT_FLAVOR=$withval], if test -n "$GPT_FLAVOR_CONFIGURATION" ; then [GPT_FLAVOR=`$GPT_FLAVOR_CONFIGURATION | \\ grep '^[[a-zA-Z]].*:$' | cut -f1 -d: | grep thr | tail -1`] fi) if test -n "$GPT_FLAVOR"; then AC_MSG_RESULT($GPT_FLAVOR) else AC_MSG_RESULT([none detected, is globus_core-devel installed?]) fi ]) AC_DEFUN([GPT_PKG_VERSION], [ AC_REQUIRE([GPT_PROG_GPT_QUERY]) AC_REQUIRE([GPT_ARG_GPT_FLAVOR]) if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_[]$1[]_version=`$GPT_QUERY $1[]-[]$GPT_FLAVOR[]-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi ]) AC_DEFUN([GPT_PKG], [ AC_REQUIRE([GPT_PROG_GLOBUS_MAKEFILE_HEADER]) AC_REQUIRE([GPT_ARG_GPT_FLAVOR]) AC_MSG_CHECKING([for $1]) GPT_PKG_VERSION($1) if test -n "$gpt_cv_[]$1[]_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR $1 | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_[]$1[]_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_[]$1[]_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_[]$1[]_version"; then AC_MSG_RESULT($gpt_cv_[]$1[]_version) m4_toupper([$1])[]_VERSION=$gpt_cv_[]$1[]_version m4_toupper([$1])[]_LIBS=$gpt_cv_[]$1[]_libs m4_toupper([$1])[]_CFLAGS=$gpt_cv_[]$1[]_cflags else AC_MSG_RESULT(no) fi ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/arc_api.m40000644000000000000000000000012712133203240017346 xustar000000000000000027 mtime=1366099616.110521 30 atime=1513200579.481762716 30 ctime=1513200658.583730164 nordugrid-arc-5.4.2/m4/arc_api.m40000644000175000002070000000400112133203240017403 0ustar00mockbuildmock00000000000000 # # ARC Public API # AC_DEFUN([ARC_API], [ ARCCLIENT_LIBS='$(top_builddir)/src/hed/libs/compute/libarccompute.la' ARCCLIENT_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCCLIENT_LIBS) AC_SUBST(ARCCLIENT_CFLAGS) ARCCOMMON_LIBS='$(top_builddir)/src/hed/libs/common/libarccommon.la' ARCCOMMON_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCCOMMON_LIBS) AC_SUBST(ARCCOMMON_CFLAGS) ARCCREDENTIAL_LIBS='$(top_builddir)/src/hed/libs/credential/libarccredential.la' ARCCREDENTIAL_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCCREDENTIAL_LIBS) AC_SUBST(ARCCREDENTIAL_CFLAGS) ARCDATA_LIBS='$(top_builddir)/src/hed/libs/data/libarcdata.la' ARCDATA_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCDATA_LIBS) AC_SUBST(ARCDATA_CFLAGS) ARCJOB_LIBS='$(top_builddir)/src/hed/libs/job/libarcjob.la' ARCJOB_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCJOB_LIBS) AC_SUBST(ARCJOB_CFLAGS) ARCLOADER_LIBS='$(top_builddir)/src/hed/libs/loader/libarcloader.la' ARCLOADER_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCLOADER_LIBS) AC_SUBST(ARCLOADER_CFLAGS) ARCMESSAGE_LIBS='$(top_builddir)/src/hed/libs/message/libarcmessage.la' ARCMESSAGE_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCMESSAGE_LIBS) AC_SUBST(ARCMESSAGE_CFLAGS) ARCSECURITY_LIBS='$(top_builddir)/src/hed/libs/security/libarcsecurity.la' ARCSECURITY_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCSECURITY_LIBS) AC_SUBST(ARCSECURITY_CFLAGS) ARCINFOSYS_LIBS='$(top_builddir)/src/hed/libs/infosys/libarcinfosys.la' ARCINFOSYS_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCINFOSYS_LIBS) AC_SUBST(ARCINFOSYS_CFLAGS) ARCWS_LIBS='$(top_builddir)/src/hed/libs/ws/libarcws.la' ARCWS_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCWS_LIBS) AC_SUBST(ARCWS_CFLAGS) ARCWSSECURITY_LIBS='$(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la' ARCWSSECURITY_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCWSSECURITY_LIBS) AC_SUBST(ARCWSSECURITY_CFLAGS) ARCXMLSEC_LIBS='$(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la' ARCXMLSEC_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCXMLSEC_LIBS) AC_SUBST(ARCXMLSEC_CFLAGS) ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/progtest.m40000644000000000000000000000013213214315702017623 xustar000000000000000030 mtime=1513200578.282748052 30 atime=1513200579.470762582 30 ctime=1513200658.597730335 nordugrid-arc-5.4.2/m4/progtest.m40000644000175000002070000000563413214315702017701 0ustar00mockbuildmock00000000000000# progtest.m4 serial 3 (gettext-0.12) dnl Copyright (C) 1996-2003 Free Software Foundation, Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1996. # Search path for a program which passes the given test. dnl AM_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR, dnl TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]]) AC_DEFUN([AM_PATH_PROG_WITH_TEST], [ # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "$2", so it can be a program name with args. set dummy $2; ac_word=[$]2 AC_MSG_CHECKING([for $ac_word]) AC_CACHE_VAL(ac_cv_path_$1, [case "[$]$1" in [[\\/]]* | ?:[[\\/]]*) ac_cv_path_$1="[$]$1" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in ifelse([$5], , $PATH, [$5]); do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then if [$3]; then ac_cv_path_$1="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" dnl If no 4th arg is given, leave the cache variable unset, dnl so AC_PATH_PROGS will keep looking. ifelse([$4], , , [ test -z "[$]ac_cv_path_$1" && ac_cv_path_$1="$4" ])dnl ;; esac])dnl $1="$ac_cv_path_$1" if test ifelse([$4], , [-n "[$]$1"], ["[$]$1" != "$4"]); then AC_MSG_RESULT([$]$1) else AC_MSG_RESULT(no) fi AC_SUBST($1)dnl ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/ltsugar.m40000644000000000000000000000013213214315710017434 xustar000000000000000030 mtime=1513200584.240820921 30 atime=1513200584.696826498 30 ctime=1513200658.593730287 nordugrid-arc-5.4.2/m4/ltsugar.m40000644000175000002070000001042413214315710017503 0ustar00mockbuildmock00000000000000# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- # # Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 6 ltsugar.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) # lt_join(SEP, ARG1, [ARG2...]) # ----------------------------- # Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their # associated separator. # Needed until we can rely on m4_join from Autoconf 2.62, since all earlier # versions in m4sugar had bugs. m4_define([lt_join], [m4_if([$#], [1], [], [$#], [2], [[$2]], [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) m4_define([_lt_join], [m4_if([$#$2], [2], [], [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) # lt_car(LIST) # lt_cdr(LIST) # ------------ # Manipulate m4 lists. # These macros are necessary as long as will still need to support # Autoconf-2.59 which quotes differently. m4_define([lt_car], [[$1]]) m4_define([lt_cdr], [m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], [$#], 1, [], [m4_dquote(m4_shift($@))])]) m4_define([lt_unquote], $1) # lt_append(MACRO-NAME, STRING, [SEPARATOR]) # ------------------------------------------ # Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. # Note that neither SEPARATOR nor STRING are expanded; they are appended # to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). # No SEPARATOR is output if MACRO-NAME was previously undefined (different # than defined and empty). # # This macro is needed until we can rely on Autoconf 2.62, since earlier # versions of m4sugar mistakenly expanded SEPARATOR but not STRING. m4_define([lt_append], [m4_define([$1], m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) # lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) # ---------------------------------------------------------- # Produce a SEP delimited list of all paired combinations of elements of # PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list # has the form PREFIXmINFIXSUFFIXn. # Needed until we can rely on m4_combine added in Autoconf 2.62. m4_define([lt_combine], [m4_if(m4_eval([$# > 3]), [1], [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl [[m4_foreach([_Lt_prefix], [$2], [m4_foreach([_Lt_suffix], ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) # lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) # ----------------------------------------------------------------------- # Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited # by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. m4_define([lt_if_append_uniq], [m4_ifdef([$1], [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], [lt_append([$1], [$2], [$3])$4], [$5])], [lt_append([$1], [$2], [$3])$4])]) # lt_dict_add(DICT, KEY, VALUE) # ----------------------------- m4_define([lt_dict_add], [m4_define([$1($2)], [$3])]) # lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) # -------------------------------------------- m4_define([lt_dict_add_subkey], [m4_define([$1($2:$3)], [$4])]) # lt_dict_fetch(DICT, KEY, [SUBKEY]) # ---------------------------------- m4_define([lt_dict_fetch], [m4_ifval([$3], m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) # lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) # ----------------------------------------------------------------- m4_define([lt_if_dict_fetch], [m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], [$5], [$6])]) # lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) # -------------------------------------------------------------- m4_define([lt_dict_filter], [m4_if([$5], [], [], [lt_join(m4_quote(m4_default([$4], [[, ]])), lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/lib-link.m40000644000000000000000000000013113214315702017454 xustar000000000000000030 mtime=1513200578.236747489 30 atime=1513200579.472762606 29 ctime=1513200658.59073025 nordugrid-arc-5.4.2/m4/lib-link.m40000644000175000002070000005534313214315702017535 0ustar00mockbuildmock00000000000000# lib-link.m4 serial 4 (gettext-0.12) dnl Copyright (C) 2001-2003 Free Software Foundation, Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl From Bruno Haible. dnl AC_LIB_LINKFLAGS(name [, dependencies]) searches for libname and dnl the libraries corresponding to explicit and implicit dependencies. dnl Sets and AC_SUBSTs the LIB${NAME} and LTLIB${NAME} variables and dnl augments the CPPFLAGS variable. AC_DEFUN([AC_LIB_LINKFLAGS], [ AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) define([Name],[translit([$1],[./-], [___])]) define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) AC_CACHE_CHECK([how to link with lib[]$1], [ac_cv_lib[]Name[]_libs], [ AC_LIB_LINKFLAGS_BODY([$1], [$2]) ac_cv_lib[]Name[]_libs="$LIB[]NAME" ac_cv_lib[]Name[]_ltlibs="$LTLIB[]NAME" ac_cv_lib[]Name[]_cppflags="$INC[]NAME" ]) LIB[]NAME="$ac_cv_lib[]Name[]_libs" LTLIB[]NAME="$ac_cv_lib[]Name[]_ltlibs" INC[]NAME="$ac_cv_lib[]Name[]_cppflags" AC_LIB_APPENDTOVAR([CPPFLAGS], [$INC]NAME) AC_SUBST([LIB]NAME) AC_SUBST([LTLIB]NAME) dnl Also set HAVE_LIB[]NAME so that AC_LIB_HAVE_LINKFLAGS can reuse the dnl results of this search when this library appears as a dependency. HAVE_LIB[]NAME=yes undefine([Name]) undefine([NAME]) ]) dnl AC_LIB_HAVE_LINKFLAGS(name, dependencies, includes, testcode) dnl searches for libname and the libraries corresponding to explicit and dnl implicit dependencies, together with the specified include files and dnl the ability to compile and link the specified testcode. If found, it dnl sets and AC_SUBSTs HAVE_LIB${NAME}=yes and the LIB${NAME} and dnl LTLIB${NAME} variables and augments the CPPFLAGS variable, and dnl #defines HAVE_LIB${NAME} to 1. Otherwise, it sets and AC_SUBSTs dnl HAVE_LIB${NAME}=no and LIB${NAME} and LTLIB${NAME} to empty. AC_DEFUN([AC_LIB_HAVE_LINKFLAGS], [ AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) define([Name],[translit([$1],[./-], [___])]) define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) dnl Search for lib[]Name and define LIB[]NAME, LTLIB[]NAME and INC[]NAME dnl accordingly. AC_LIB_LINKFLAGS_BODY([$1], [$2]) dnl Add $INC[]NAME to CPPFLAGS before performing the following checks, dnl because if the user has installed lib[]Name and not disabled its use dnl via --without-lib[]Name-prefix, he wants to use it. ac_save_CPPFLAGS="$CPPFLAGS" AC_LIB_APPENDTOVAR([CPPFLAGS], [$INC]NAME) AC_CACHE_CHECK([for lib[]$1], [ac_cv_lib[]Name], [ ac_save_LIBS="$LIBS" LIBS="$LIBS $LIB[]NAME" AC_TRY_LINK([$3], [$4], [ac_cv_lib[]Name=yes], [ac_cv_lib[]Name=no]) LIBS="$ac_save_LIBS" ]) if test "$ac_cv_lib[]Name" = yes; then HAVE_LIB[]NAME=yes AC_DEFINE([HAVE_LIB]NAME, 1, [Define if you have the $1 library.]) AC_MSG_CHECKING([how to link with lib[]$1]) AC_MSG_RESULT([$LIB[]NAME]) else HAVE_LIB[]NAME=no dnl If $LIB[]NAME didn't lead to a usable library, we don't need dnl $INC[]NAME either. CPPFLAGS="$ac_save_CPPFLAGS" LIB[]NAME= LTLIB[]NAME= fi AC_SUBST([HAVE_LIB]NAME) AC_SUBST([LIB]NAME) AC_SUBST([LTLIB]NAME) undefine([Name]) undefine([NAME]) ]) dnl Determine the platform dependent parameters needed to use rpath: dnl libext, shlibext, hardcode_libdir_flag_spec, hardcode_libdir_separator, dnl hardcode_direct, hardcode_minus_L. AC_DEFUN([AC_LIB_RPATH], [ AC_REQUIRE([AC_PROG_CC]) dnl we use $CC, $GCC, $LDFLAGS AC_REQUIRE([AC_LIB_PROG_LD]) dnl we use $LD, $with_gnu_ld AC_REQUIRE([AC_CANONICAL_HOST]) dnl we use $host AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT]) dnl we use $ac_aux_dir AC_CACHE_CHECK([for shared library run path origin], acl_cv_rpath, [ CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done ]) wl="$acl_cv_wl" libext="$acl_cv_libext" shlibext="$acl_cv_shlibext" hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" hardcode_direct="$acl_cv_hardcode_direct" hardcode_minus_L="$acl_cv_hardcode_minus_L" dnl Determine whether the user wants rpath handling at all. AC_ARG_ENABLE(rpath, [ --disable-rpath do not hardcode runtime library paths], :, enable_rpath=yes) ]) dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and dnl the libraries corresponding to explicit and implicit dependencies. dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables. AC_DEFUN([AC_LIB_LINKFLAGS_BODY], [ define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_LIB_ARG_WITH([lib$1-prefix], [ --with-lib$1-prefix[=DIR] search for lib$1 in DIR/include and DIR/lib --without-lib$1-prefix don't search for lib$1 in includedir and libdir], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/lib" fi fi ]) dnl Search the library and its dependencies in $additional_libdir and dnl $LDFLAGS. Using breadth-first-seach. LIB[]NAME= LTLIB[]NAME= INC[]NAME= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='$1 $2' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" dnl See if it was already located by an earlier AC_LIB_LINKFLAGS dnl or AC_LIB_HAVE_LINKFLAGS call. uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value" else dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined dnl that this library doesn't exist. So just drop it. : fi else dnl Search the library lib$name in $additional_libdir and $LDFLAGS dnl and the already constructed $LIBNAME/$LTLIBNAME. found_dir= found_la= found_so= found_a= if test $use_additional = yes; then if test -n "$shlibext" && test -f "$additional_libdir/lib$name.$shlibext"; then found_dir="$additional_libdir" found_so="$additional_libdir/lib$name.$shlibext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi else if test -f "$additional_libdir/lib$name.$libext"; then found_dir="$additional_libdir" found_a="$additional_libdir/lib$name.$libext" if test -f "$additional_libdir/lib$name.la"; then found_la="$additional_libdir/lib$name.la" fi fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$shlibext" && test -f "$dir/lib$name.$shlibext"; then found_dir="$dir" found_so="$dir/lib$name.$shlibext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi else if test -f "$dir/lib$name.$libext"; then found_dir="$dir" found_a="$dir/lib$name.$libext" if test -f "$dir/lib$name.la"; then found_la="$dir/lib$name.la" fi fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then dnl Found the library. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then dnl Linking with a shared library. We attempt to hardcode its dnl directory into the executable's runpath, unless it's the dnl standard /usr/lib. if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/lib"; then dnl No hardcoding is needed. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl Use an explicit option to hardcode DIR into the resulting dnl binary. dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi dnl The hardcoding into $LIBNAME is system dependent. if test "$hardcode_direct" = yes; then dnl Using DIR/libNAME.so during linking hardcodes DIR into the dnl resulting binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then dnl Use an explicit option to hardcode DIR into the resulting dnl binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else dnl Rely on "-L$found_dir". dnl But don't add it if it's already contained in the LDFLAGS dnl or the already constructed $LIBNAME haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir" fi if test "$hardcode_minus_L" != no; then dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl We cannot use $hardcode_runpath_var and LD_RUN_PATH dnl here, because this doesn't fit in flags passed to the dnl compiler. So give up. No hardcoding. This affects only dnl very old systems. dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then dnl Linking with a static library. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a" else dnl We shouldn't come here, but anyway it's good to have a dnl fallback. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name" fi fi dnl Assume the include files are nearby. additional_includedir= case "$found_dir" in */lib | */lib/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e 's,/lib/*$,,'` additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then dnl Potentially add $additional_includedir to $INCNAME. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's /usr/local/include and we are using GCC on Linux, dnl 3. if it's already present in $CPPFLAGS or the already dnl constructed $INCNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INC[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $INCNAME. INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir" fi fi fi fi fi dnl Look for dependencies. if test -n "$found_la"; then dnl Read the .la file. It defines the variables dnl dlname, library_names, old_library, dependency_libs, current, dnl age, revision, installed, dlopen, dlpreopen, libdir. save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" dnl We use only dependency_libs. for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` dnl Potentially add $additional_libdir to $LIBNAME and $LTLIBNAME. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's /usr/local/lib and we are using GCC on Linux, dnl 3. if it's already present in $LDFLAGS or the already dnl constructed $LIBNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/lib"; then haveit= if test "X$additional_libdir" = "X/usr/local/lib"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LIBNAME. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LTLIBNAME. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) dnl Handle this in the next round. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) dnl Handle this in the next round. Throw away the .la's dnl directory; it is already contained in a preceding -L dnl option. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) dnl Most likely an immediate library name. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep" ;; esac done fi else dnl Didn't find the library; assume it is in the system directories dnl known to the linker and runtime loader. (All the system dnl directories known to the linker should also be known to the dnl runtime loader, otherwise the system is severely misconfigured.) LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user must dnl pass all path elements in one option. We can arrange that for a dnl single library, but not when more than one $LIBNAMEs are used. alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir" done dnl Note: hardcode_libdir_flag_spec uses $libdir and $wl. acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" else dnl The -rpath options are cumulative. for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then dnl When using libtool, the option that works for both libraries and dnl executables is -R. The -R options are cumulative. for found_dir in $ltrpathdirs; do LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir" done fi ]) dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR, dnl unless already present in VAR. dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes dnl contains two or three consecutive elements that belong together. AC_DEFUN([AC_LIB_APPENDTOVAR], [ for element in [$2]; do haveit= for x in $[$1]; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then [$1]="${[$1]}${[$1]:+ }$element" fi done ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/lib-prefix.m40000644000000000000000000000013213214315702020015 xustar000000000000000030 mtime=1513200578.247747624 30 atime=1513200579.472762606 30 ctime=1513200658.591730262 nordugrid-arc-5.4.2/m4/lib-prefix.m40000644000175000002070000001250513214315702020066 0ustar00mockbuildmock00000000000000# lib-prefix.m4 serial 2 (gettext-0.12) dnl Copyright (C) 2001-2003 Free Software Foundation, Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl From Bruno Haible. dnl AC_LIB_ARG_WITH is synonymous to AC_ARG_WITH in autoconf-2.13, and dnl similar to AC_ARG_WITH in autoconf 2.52...2.57 except that is doesn't dnl require excessive bracketing. ifdef([AC_HELP_STRING], [AC_DEFUN([AC_LIB_ARG_WITH], [AC_ARG_WITH([$1],[[$2]],[$3],[$4])])], [AC_DEFUN([AC_LIB_ARG_WITH], [AC_ARG_WITH([$1],[$2],[$3],[$4])])]) dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed dnl to access previously installed libraries. The basic assumption is that dnl a user will want packages to use other packages he previously installed dnl with the same --prefix option. dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate dnl libraries, but is otherwise very convenient. AC_DEFUN([AC_LIB_PREFIX], [ AC_BEFORE([$0], [AC_LIB_LINKFLAGS]) AC_REQUIRE([AC_PROG_CC]) AC_REQUIRE([AC_CANONICAL_HOST]) AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_LIB_ARG_WITH([lib-prefix], [ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib --without-lib-prefix don't search for libraries in includedir and libdir], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/lib" fi fi ]) if test $use_additional = yes; then dnl Potentially add $additional_includedir to $CPPFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's already present in $CPPFLAGS, dnl 3. if it's /usr/local/include and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= for x in $CPPFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $CPPFLAGS. CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir" fi fi fi fi dnl Potentially add $additional_libdir to $LDFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's already present in $LDFLAGS, dnl 3. if it's /usr/local/lib and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/lib"; then haveit= for x in $LDFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_libdir" = "X/usr/local/lib"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LDFLAGS. LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir" fi fi fi fi fi ]) dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix, dnl acl_final_exec_prefix, containing the values to which $prefix and dnl $exec_prefix will expand at the end of the configure script. AC_DEFUN([AC_LIB_PREPARE_PREFIX], [ dnl Unfortunately, prefix and exec_prefix get only finally determined dnl at the end of configure. if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" ]) dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the dnl variables prefix and exec_prefix bound to the values they will have dnl at the end of the configure script. AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX], [ acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" $1 exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/lib-ld.m40000644000000000000000000000013213214315702017117 xustar000000000000000030 mtime=1513200578.225747355 30 atime=1513200579.475762643 30 ctime=1513200658.589730238 nordugrid-arc-5.4.2/m4/lib-ld.m40000644000175000002070000000676113214315702017177 0ustar00mockbuildmock00000000000000# lib-ld.m4 serial 2 (gettext-0.12) dnl Copyright (C) 1996-2003 Free Software Foundation, Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl Subroutines of libtool.m4, dnl with replacements s/AC_/AC_LIB/ and s/lt_cv/acl_cv/ to avoid collision dnl with libtool.m4. dnl From libtool-1.4. Sets the variable with_gnu_ld to yes or no. AC_DEFUN([AC_LIB_PROG_LD_GNU], [AC_CACHE_CHECK([if the linker ($LD) is GNU ld], acl_cv_prog_gnu_ld, [# I'd rather use --version here, but apparently some GNU ld's only accept -v. if $LD -v 2>&1 &5; then acl_cv_prog_gnu_ld=yes else acl_cv_prog_gnu_ld=no fi]) with_gnu_ld=$acl_cv_prog_gnu_ld ]) dnl From libtool-1.4. Sets the variable LD. AC_DEFUN([AC_LIB_PROG_LD], [AC_ARG_WITH(gnu-ld, [ --with-gnu-ld assume the C compiler uses GNU ld [default=no]], test "$withval" = no || with_gnu_ld=yes, with_gnu_ld=no) AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by GCC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]* | [A-Za-z]:[\\/]*)] [re_direlt='/[^/][^/]*/\.\./'] # Canonicalize the path of ld ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'` while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL(acl_cv_path_LD, [if test -z "$LD"; then IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}" for ac_dir in $PATH; do test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some GNU ld's only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. if "$acl_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then test "$with_gnu_ld" != no && break else test "$with_gnu_ld" != yes && break fi fi done IFS="$ac_save_ifs" else acl_cv_path_LD="$LD" # Let the user override the test with a path. fi]) LD="$acl_cv_path_LD" if test -n "$LD"; then AC_MSG_RESULT($LD) else AC_MSG_RESULT(no) fi test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH]) AC_LIB_PROG_LD_GNU ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/arc_paths.m40000644000000000000000000000012711523746055017736 xustar000000000000000027 mtime=1297075245.799527 30 atime=1513200579.481762716 30 ctime=1513200658.585730189 nordugrid-arc-5.4.2/m4/arc_paths.m40000644000175000002070000000522611523746055020005 0ustar00mockbuildmock00000000000000dnl dnl Substitite some relative paths dnl AC_DEFUN([ARC_RELATIVE_PATHS], [ AC_REQUIRE([ARC_RELATIVE_PATHS_INIT]) AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_LIB_WITH_FINAL_PREFIX([ eval instprefix="\"${exec_prefix}\"" eval arc_libdir="\"${libdir}\"" eval arc_bindir="\"${bindir}\"" eval arc_sbindir="\"${sbindir}\"" eval arc_pkglibdir="\"${libdir}/arc\"" eval arc_pkglibexecdir="\"${libexecdir}/arc\"" # It seems arc_datadir should be evaluated twice to be expanded fully. eval arc_datadir="\"${datadir}/arc\"" eval arc_datadir="\"${arc_datadir}\"" ]) libsubdir=`get_relative_path "$instprefix" "$arc_libdir"` pkglibsubdir=`get_relative_path "$instprefix" "$arc_pkglibdir"` pkglibexecsubdir=`get_relative_path "$instprefix" "$arc_pkglibexecdir"` pkgdatasubdir=`get_relative_path "$instprefix" "$arc_datadir"` pkglibdir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_pkglibdir"` sbindir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_sbindir"` bindir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_bindir"` pkgdatadir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_datadir"` AC_MSG_NOTICE([pkglib subdirectory is: $pkglibsubdir]) AC_MSG_NOTICE([pkglibexec subdirectory is: $pkglibexecsubdir]) AC_MSG_NOTICE([relative path of pkglib to pkglibexec is: $pkglibdir_rel_to_pkglibexecdir]) AC_SUBST([libsubdir]) AC_SUBST([pkglibsubdir]) AC_SUBST([pkglibexecsubdir]) AC_SUBST([pkglibdir_rel_to_pkglibexecdir]) AC_SUBST([sbindir_rel_to_pkglibexecdir]) AC_SUBST([bindir_rel_to_pkglibexecdir]) AC_SUBST([pkgdatadir_rel_to_pkglibexecdir]) AC_SUBST([pkgdatasubdir]) AC_DEFINE_UNQUOTED([INSTPREFIX], ["${instprefix}"], [installation prefix]) AC_DEFINE_UNQUOTED([LIBSUBDIR], ["${libsubdir}"], [library installation subdirectory]) AC_DEFINE_UNQUOTED([PKGLIBSUBDIR], ["${pkglibsubdir}"], [plugin installation subdirectory]) AC_DEFINE_UNQUOTED([PKGLIBEXECSUBDIR], ["${pkglibexecsubdir}"], [helper programs installation subdirectory]) AC_DEFINE_UNQUOTED([PKGDATASUBDIR], ["${pkgdatasubdir}"], [package data subdirectory]) ]) AC_DEFUN([ARC_RELATIVE_PATHS_INIT], [ get_relative_path() { olddir=`echo $[]1 | sed -e 's|/+|/|g' -e 's|^/||' -e 's|/*$|/|'` newdir=`echo $[]2 | sed -e 's|/+|/|g' -e 's|^/||' -e 's|/*$|/|'` O_IFS=$IFS IFS=/ relative="" common="" for i in $olddir; do if echo "$newdir" | grep -q "^$common$i/"; then common="$common$i/" else relative="../$relative" fi done IFS=$O_IFS echo $newdir | sed "s|^$common|$relative|" | sed 's|/*$||' } ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/ltoptions.m40000644000000000000000000000013213214315710020006 xustar000000000000000030 mtime=1513200584.103819246 30 atime=1513200584.696826498 30 ctime=1513200658.592730274 nordugrid-arc-5.4.2/m4/ltoptions.m40000644000175000002070000002724213214315710020063 0ustar00mockbuildmock00000000000000# Helper functions for option handling. -*- Autoconf -*- # # Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 6 ltoptions.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) # _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) # ------------------------------------------ m4_define([_LT_MANGLE_OPTION], [[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) # _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) # --------------------------------------- # Set option OPTION-NAME for macro MACRO-NAME, and if there is a # matching handler defined, dispatch to it. Other OPTION-NAMEs are # saved as a flag. m4_define([_LT_SET_OPTION], [m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), _LT_MANGLE_DEFUN([$1], [$2]), [m4_warning([Unknown $1 option `$2'])])[]dnl ]) # _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) # ------------------------------------------------------------ # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. m4_define([_LT_IF_OPTION], [m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) # _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) # ------------------------------------------------------- # Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME # are set. m4_define([_LT_UNLESS_OPTIONS], [m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), [m4_define([$0_found])])])[]dnl m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 ])[]dnl ]) # _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) # ---------------------------------------- # OPTION-LIST is a space-separated list of Libtool options associated # with MACRO-NAME. If any OPTION has a matching handler declared with # LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about # the unknown option and exit. m4_defun([_LT_SET_OPTIONS], [# Set options m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [_LT_SET_OPTION([$1], _LT_Option)]) m4_if([$1],[LT_INIT],[ dnl dnl Simply set some default values (i.e off) if boolean options were not dnl specified: _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no ]) _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no ]) dnl dnl If no reference was made to various pairs of opposing options, then dnl we run the default mode handler for the pair. For example, if neither dnl `shared' nor `disable-shared' was passed, we enable building of shared dnl archives by default: _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], [_LT_ENABLE_FAST_INSTALL]) ]) ])# _LT_SET_OPTIONS ## --------------------------------- ## ## Macros to handle LT_INIT options. ## ## --------------------------------- ## # _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) # ----------------------------------------- m4_define([_LT_MANGLE_DEFUN], [[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) # LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) # ----------------------------------------------- m4_define([LT_OPTION_DEFINE], [m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl ])# LT_OPTION_DEFINE # dlopen # ------ LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes ]) AU_DEFUN([AC_LIBTOOL_DLOPEN], [_LT_SET_OPTION([LT_INIT], [dlopen]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `dlopen' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) # win32-dll # --------- # Declare package support for building win32 dll's. LT_OPTION_DEFINE([LT_INIT], [win32-dll], [enable_win32_dll=yes case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*) AC_CHECK_TOOL(AS, as, false) AC_CHECK_TOOL(DLLTOOL, dlltool, false) AC_CHECK_TOOL(OBJDUMP, objdump, false) ;; esac test -z "$AS" && AS=as _LT_DECL([], [AS], [0], [Assembler program])dnl test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [0], [DLL creation program])dnl test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [0], [Object dumper program])dnl ])# win32-dll AU_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_REQUIRE([AC_CANONICAL_HOST])dnl _LT_SET_OPTION([LT_INIT], [win32-dll]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `win32-dll' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) # _LT_ENABLE_SHARED([DEFAULT]) # ---------------------------- # implement the --enable-shared flag, and supports the `shared' and # `disable-shared' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_SHARED], [m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) _LT_DECL([build_libtool_libs], [enable_shared], [0], [Whether or not to build shared libraries]) ])# _LT_ENABLE_SHARED LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) # Old names: AC_DEFUN([AC_ENABLE_SHARED], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) ]) AC_DEFUN([AC_DISABLE_SHARED], [_LT_SET_OPTION([LT_INIT], [disable-shared]) ]) AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_SHARED], []) dnl AC_DEFUN([AM_DISABLE_SHARED], []) # _LT_ENABLE_STATIC([DEFAULT]) # ---------------------------- # implement the --enable-static flag, and support the `static' and # `disable-static' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_STATIC], [m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_static=]_LT_ENABLE_STATIC_DEFAULT) _LT_DECL([build_old_libs], [enable_static], [0], [Whether or not to build static libraries]) ])# _LT_ENABLE_STATIC LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) # Old names: AC_DEFUN([AC_ENABLE_STATIC], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) ]) AC_DEFUN([AC_DISABLE_STATIC], [_LT_SET_OPTION([LT_INIT], [disable-static]) ]) AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_STATIC], []) dnl AC_DEFUN([AM_DISABLE_STATIC], []) # _LT_ENABLE_FAST_INSTALL([DEFAULT]) # ---------------------------------- # implement the --enable-fast-install flag, and support the `fast-install' # and `disable-fast-install' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_FAST_INSTALL], [m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([fast-install], [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) _LT_DECL([fast_install], [enable_fast_install], [0], [Whether or not to optimize for fast installation])dnl ])# _LT_ENABLE_FAST_INSTALL LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) # Old names: AU_DEFUN([AC_ENABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `fast-install' option into LT_INIT's first parameter.]) ]) AU_DEFUN([AC_DISABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], [disable-fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `disable-fast-install' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) # _LT_WITH_PIC([MODE]) # -------------------- # implement the --with-pic flag, and support the `pic-only' and `no-pic' # LT_INIT options. # MODE is either `yes' or `no'. If omitted, it defaults to `both'. m4_define([_LT_WITH_PIC], [AC_ARG_WITH([pic], [AS_HELP_STRING([--with-pic], [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], [pic_mode="$withval"], [pic_mode=default]) test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) _LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl ])# _LT_WITH_PIC LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) # Old name: AU_DEFUN([AC_LIBTOOL_PICMODE], [_LT_SET_OPTION([LT_INIT], [pic-only]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `pic-only' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) ## ----------------- ## ## LTDL_INIT Options ## ## ----------------- ## m4_define([_LTDL_MODE], []) LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], [m4_define([_LTDL_MODE], [nonrecursive])]) LT_OPTION_DEFINE([LTDL_INIT], [recursive], [m4_define([_LTDL_MODE], [recursive])]) LT_OPTION_DEFINE([LTDL_INIT], [subproject], [m4_define([_LTDL_MODE], [subproject])]) m4_define([_LTDL_TYPE], []) LT_OPTION_DEFINE([LTDL_INIT], [installable], [m4_define([_LTDL_TYPE], [installable])]) LT_OPTION_DEFINE([LTDL_INIT], [convenience], [m4_define([_LTDL_TYPE], [convenience])]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/libtool.m40000644000000000000000000000013213214315707017425 xustar000000000000000030 mtime=1513200583.977817705 30 atime=1513200584.698826523 30 ctime=1513200658.592730274 nordugrid-arc-5.4.2/m4/libtool.m40000644000175000002070000077341113214315707017510 0ustar00mockbuildmock00000000000000# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008 Free Software Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. m4_define([_LT_COPYING], [dnl # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008 Free Software Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is part of GNU Libtool. # # GNU Libtool is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, or # obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ]) # serial 56 LT_INIT # LT_PREREQ(VERSION) # ------------------ # Complain and exit if this libtool version is less that VERSION. m4_defun([LT_PREREQ], [m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, [m4_default([$3], [m4_fatal([Libtool version $1 or higher is required], 63)])], [$2])]) # _LT_CHECK_BUILDDIR # ------------------ # Complain if the absolute build directory name contains unusual characters m4_defun([_LT_CHECK_BUILDDIR], [case `pwd` in *\ * | *\ *) AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; esac ]) # LT_INIT([OPTIONS]) # ------------------ AC_DEFUN([LT_INIT], [AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT AC_BEFORE([$0], [LT_LANG])dnl AC_BEFORE([$0], [LT_OUTPUT])dnl AC_BEFORE([$0], [LTDL_INIT])dnl m4_require([_LT_CHECK_BUILDDIR])dnl dnl Autoconf doesn't catch unexpanded LT_ macros by default: m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 dnl unless we require an AC_DEFUNed macro: AC_REQUIRE([LTOPTIONS_VERSION])dnl AC_REQUIRE([LTSUGAR_VERSION])dnl AC_REQUIRE([LTVERSION_VERSION])dnl AC_REQUIRE([LTOBSOLETE_VERSION])dnl m4_require([_LT_PROG_LTMAIN])dnl dnl Parse OPTIONS _LT_SET_OPTIONS([$0], [$1]) # This can be used to rebuild libtool when needed LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' AC_SUBST(LIBTOOL)dnl _LT_SETUP # Only expand once: m4_define([LT_INIT]) ])# LT_INIT # Old names: AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PROG_LIBTOOL], []) dnl AC_DEFUN([AM_PROG_LIBTOOL], []) # _LT_CC_BASENAME(CC) # ------------------- # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. m4_defun([_LT_CC_BASENAME], [for cc_temp in $1""; do case $cc_temp in compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` ]) # _LT_FILEUTILS_DEFAULTS # ---------------------- # It is okay to use these file commands and assume they have been set # sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. m4_defun([_LT_FILEUTILS_DEFAULTS], [: ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} ])# _LT_FILEUTILS_DEFAULTS # _LT_SETUP # --------- m4_defun([_LT_SETUP], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl _LT_DECL([], [host_alias], [0], [The host system])dnl _LT_DECL([], [host], [0])dnl _LT_DECL([], [host_os], [0])dnl dnl _LT_DECL([], [build_alias], [0], [The build system])dnl _LT_DECL([], [build], [0])dnl _LT_DECL([], [build_os], [0])dnl dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl dnl AC_REQUIRE([AC_PROG_LN_S])dnl test -z "$LN_S" && LN_S="ln -s" _LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl dnl AC_REQUIRE([LT_CMD_MAX_LEN])dnl _LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl m4_require([_LT_CMD_RELOAD])dnl m4_require([_LT_CHECK_MAGIC_METHOD])dnl m4_require([_LT_CMD_OLD_ARCHIVE])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl _LT_CONFIG_LIBTOOL_INIT([ # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi ]) if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi _LT_CHECK_OBJDIR m4_require([_LT_TAG_COMPILER])dnl _LT_PROG_ECHO_BACKSLASH case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\([["`\\]]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o _LT_CC_BASENAME([$compiler]) # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then _LT_PATH_MAGIC fi ;; esac # Use C for the default configuration in the libtool script LT_SUPPORTED_TAG([CC]) _LT_LANG_C_CONFIG _LT_LANG_DEFAULT_CONFIG _LT_CONFIG_COMMANDS ])# _LT_SETUP # _LT_PROG_LTMAIN # --------------- # Note that this code is called both from `configure', and `config.status' # now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, # `config.status' has no value for ac_aux_dir unless we are using Automake, # so we pass a copy along to make sure it has a sensible value anyway. m4_defun([_LT_PROG_LTMAIN], [m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl _LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) ltmain="$ac_aux_dir/ltmain.sh" ])# _LT_PROG_LTMAIN ## ------------------------------------- ## ## Accumulate code for creating libtool. ## ## ------------------------------------- ## # So that we can recreate a full libtool script including additional # tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS # in macros and then make a single call at the end using the `libtool' # label. # _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) # ---------------------------------------- # Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL_INIT], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_INIT], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_INIT]) # _LT_CONFIG_LIBTOOL([COMMANDS]) # ------------------------------ # Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) # _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) # ----------------------------------------------------- m4_defun([_LT_CONFIG_SAVE_COMMANDS], [_LT_CONFIG_LIBTOOL([$1]) _LT_CONFIG_LIBTOOL_INIT([$2]) ]) # _LT_FORMAT_COMMENT([COMMENT]) # ----------------------------- # Add leading comment marks to the start of each line, and a trailing # full-stop to the whole comment if one is not present already. m4_define([_LT_FORMAT_COMMENT], [m4_ifval([$1], [ m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) )]) ## ------------------------ ## ## FIXME: Eliminate VARNAME ## ## ------------------------ ## # _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) # ------------------------------------------------------------------- # CONFIGNAME is the name given to the value in the libtool script. # VARNAME is the (base) name used in the configure script. # VALUE may be 0, 1 or 2 for a computed quote escaped value based on # VARNAME. Any other value will be used directly. m4_define([_LT_DECL], [lt_if_append_uniq([lt_decl_varnames], [$2], [, ], [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], [m4_ifval([$1], [$1], [$2])]) lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) m4_ifval([$4], [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) lt_dict_add_subkey([lt_decl_dict], [$2], [tagged?], [m4_ifval([$5], [yes], [no])])]) ]) # _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) # -------------------------------------------------------- m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) # lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_tag_varnames], [_lt_decl_filter([tagged?], [yes], $@)]) # _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) # --------------------------------------------------------- m4_define([_lt_decl_filter], [m4_case([$#], [0], [m4_fatal([$0: too few arguments: $#])], [1], [m4_fatal([$0: too few arguments: $#: $1])], [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], [lt_dict_filter([lt_decl_dict], $@)])[]dnl ]) # lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) # -------------------------------------------------- m4_define([lt_decl_quote_varnames], [_lt_decl_filter([value], [1], $@)]) # lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_dquote_varnames], [_lt_decl_filter([value], [2], $@)]) # lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_varnames_tagged], [m4_assert([$# <= 2])dnl _$0(m4_quote(m4_default([$1], [[, ]])), m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) m4_define([_lt_decl_varnames_tagged], [m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) # lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_all_varnames], [_$0(m4_quote(m4_default([$1], [[, ]])), m4_if([$2], [], m4_quote(lt_decl_varnames), m4_quote(m4_shift($@))))[]dnl ]) m4_define([_lt_decl_all_varnames], [lt_join($@, lt_decl_varnames_tagged([$1], lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl ]) # _LT_CONFIG_STATUS_DECLARE([VARNAME]) # ------------------------------------ # Quote a variable value, and forward it to `config.status' so that its # declaration there will have the same value as in `configure'. VARNAME # must have a single quote delimited value for this to work. m4_define([_LT_CONFIG_STATUS_DECLARE], [$1='`$ECHO "X$][$1" | $Xsed -e "$delay_single_quote_subst"`']) # _LT_CONFIG_STATUS_DECLARATIONS # ------------------------------ # We delimit libtool config variables with single quotes, so when # we write them to config.status, we have to be sure to quote all # embedded single quotes properly. In configure, this macro expands # each variable declared with _LT_DECL (and _LT_TAGDECL) into: # # ='`$ECHO "X$" | $Xsed -e "$delay_single_quote_subst"`' m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], [m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAGS # ---------------- # Output comment and list of tags supported by the script m4_defun([_LT_LIBTOOL_TAGS], [_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl available_tags="_LT_TAGS"dnl ]) # _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) # ----------------------------------- # Extract the dictionary values for VARNAME (optionally with TAG) and # expand to a commented shell variable setting: # # # Some comment about what VAR is for. # visible_name=$lt_internal_name m4_define([_LT_LIBTOOL_DECLARE], [_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [description])))[]dnl m4_pushdef([_libtool_name], m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), [0], [_libtool_name=[$]$1], [1], [_libtool_name=$lt_[]$1], [2], [_libtool_name=$lt_[]$1], [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl ]) # _LT_LIBTOOL_CONFIG_VARS # ----------------------- # Produce commented declarations of non-tagged libtool config variables # suitable for insertion in the LIBTOOL CONFIG section of the `libtool' # script. Tagged libtool config variables (even for the LIBTOOL CONFIG # section) are produced by _LT_LIBTOOL_TAG_VARS. m4_defun([_LT_LIBTOOL_CONFIG_VARS], [m4_foreach([_lt_var], m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAG_VARS(TAG) # ------------------------- m4_define([_LT_LIBTOOL_TAG_VARS], [m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) # _LT_TAGVAR(VARNAME, [TAGNAME]) # ------------------------------ m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) # _LT_CONFIG_COMMANDS # ------------------- # Send accumulated output to $CONFIG_STATUS. Thanks to the lists of # variables for single and double quote escaping we saved from calls # to _LT_DECL, we can put quote escaped variables declarations # into `config.status', and then the shell code to quote escape them in # for loops in `config.status'. Finally, any additional code accumulated # from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. m4_defun([_LT_CONFIG_COMMANDS], [AC_PROVIDE_IFELSE([LT_OUTPUT], dnl If the libtool generation code has been placed in $CONFIG_LT, dnl instead of duplicating it all over again into config.status, dnl then we will have config.status run $CONFIG_LT later, so it dnl needs to know what name is stored there: [AC_CONFIG_COMMANDS([libtool], [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], dnl If the libtool generation code is destined for config.status, dnl expand the accumulated commands and init code now: [AC_CONFIG_COMMANDS([libtool], [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) ])#_LT_CONFIG_COMMANDS # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], [ # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' _LT_CONFIG_STATUS_DECLARATIONS LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # Quote evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_quote_varnames); do case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_dquote_varnames); do case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Fix-up fallback echo if it was mangled by the above quoting rules. case \$lt_ECHO in *'\\\[$]0 --fallback-echo"')dnl " lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\[$]0 --fallback-echo"\[$]/\[$]0 --fallback-echo"/'\` ;; esac _LT_OUTPUT_LIBTOOL_INIT ]) # LT_OUTPUT # --------- # This macro allows early generation of the libtool script (before # AC_OUTPUT is called), incase it is used in configure for compilation # tests. AC_DEFUN([LT_OUTPUT], [: ${CONFIG_LT=./config.lt} AC_MSG_NOTICE([creating $CONFIG_LT]) cat >"$CONFIG_LT" <<_LTEOF #! $SHELL # Generated by $as_me. # Run this file to recreate a libtool stub with the current configuration. lt_cl_silent=false SHELL=\${CONFIG_SHELL-$SHELL} _LTEOF cat >>"$CONFIG_LT" <<\_LTEOF AS_SHELL_SANITIZE _AS_PREPARE exec AS_MESSAGE_FD>&1 exec AS_MESSAGE_LOG_FD>>config.log { echo AS_BOX([Running $as_me.]) } >&AS_MESSAGE_LOG_FD lt_cl_help="\ \`$as_me' creates a local libtool stub from the current configuration, for use in further configure time tests before the real libtool is generated. Usage: $[0] [[OPTIONS]] -h, --help print this help, then exit -V, --version print version number, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files Report bugs to ." lt_cl_version="\ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) configured by $[0], generated by m4_PACKAGE_STRING. Copyright (C) 2008 Free Software Foundation, Inc. This config.lt script is free software; the Free Software Foundation gives unlimited permision to copy, distribute and modify it." while test $[#] != 0 do case $[1] in --version | --v* | -V ) echo "$lt_cl_version"; exit 0 ;; --help | --h* | -h ) echo "$lt_cl_help"; exit 0 ;; --debug | --d* | -d ) debug=: ;; --quiet | --q* | --silent | --s* | -q ) lt_cl_silent=: ;; -*) AC_MSG_ERROR([unrecognized option: $[1] Try \`$[0] --help' for more information.]) ;; *) AC_MSG_ERROR([unrecognized argument: $[1] Try \`$[0] --help' for more information.]) ;; esac shift done if $lt_cl_silent; then exec AS_MESSAGE_FD>/dev/null fi _LTEOF cat >>"$CONFIG_LT" <<_LTEOF _LT_OUTPUT_LIBTOOL_COMMANDS_INIT _LTEOF cat >>"$CONFIG_LT" <<\_LTEOF AC_MSG_NOTICE([creating $ofile]) _LT_OUTPUT_LIBTOOL_COMMANDS AS_EXIT(0) _LTEOF chmod +x "$CONFIG_LT" # configure is writing to config.log, but config.lt does its own redirection, # appending to config.log, which fails on DOS, as config.log is still kept # open by configure. Here we exec the FD to /dev/null, effectively closing # config.log, so it can be properly (re)opened and appended to by config.lt. if test "$no_create" != yes; then lt_cl_success=: test "$silent" = yes && lt_config_lt_args="$lt_config_lt_args --quiet" exec AS_MESSAGE_LOG_FD>/dev/null $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false exec AS_MESSAGE_LOG_FD>>config.log $lt_cl_success || AS_EXIT(1) fi ])# LT_OUTPUT # _LT_CONFIG(TAG) # --------------- # If TAG is the built-in tag, create an initial libtool script with a # default configuration from the untagged config vars. Otherwise add code # to config.status for appending the configuration named by TAG from the # matching tagged config vars. m4_defun([_LT_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_CONFIG_SAVE_COMMANDS([ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl m4_if(_LT_TAG, [C], [ # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi cfgfile="${ofile}T" trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. # Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # _LT_COPYING _LT_LIBTOOL_TAGS # ### BEGIN LIBTOOL CONFIG _LT_LIBTOOL_CONFIG_VARS _LT_LIBTOOL_TAG_VARS # ### END LIBTOOL CONFIG _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac _LT_PROG_LTMAIN # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) _LT_PROG_XSI_SHELLFNS sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" ], [cat <<_LT_EOF >> "$ofile" dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded dnl in a comment (ie after a #). # ### BEGIN LIBTOOL TAG CONFIG: $1 _LT_LIBTOOL_TAG_VARS(_LT_TAG) # ### END LIBTOOL TAG CONFIG: $1 _LT_EOF ])dnl /m4_if ], [m4_if([$1], [], [ PACKAGE='$PACKAGE' VERSION='$VERSION' TIMESTAMP='$TIMESTAMP' RM='$RM' ofile='$ofile'], []) ])dnl /_LT_CONFIG_SAVE_COMMANDS ])# _LT_CONFIG # LT_SUPPORTED_TAG(TAG) # --------------------- # Trace this macro to discover what tags are supported by the libtool # --tag option, using: # autoconf --trace 'LT_SUPPORTED_TAG:$1' AC_DEFUN([LT_SUPPORTED_TAG], []) # C support is built-in for now m4_define([_LT_LANG_C_enabled], []) m4_define([_LT_TAGS], []) # LT_LANG(LANG) # ------------- # Enable libtool support for the given language if not already enabled. AC_DEFUN([LT_LANG], [AC_BEFORE([$0], [LT_OUTPUT])dnl m4_case([$1], [C], [_LT_LANG(C)], [C++], [_LT_LANG(CXX)], [Java], [_LT_LANG(GCJ)], [Fortran 77], [_LT_LANG(F77)], [Fortran], [_LT_LANG(FC)], [Windows Resource], [_LT_LANG(RC)], [m4_ifdef([_LT_LANG_]$1[_CONFIG], [_LT_LANG($1)], [m4_fatal([$0: unsupported language: "$1"])])])dnl ])# LT_LANG # _LT_LANG(LANGNAME) # ------------------ m4_defun([_LT_LANG], [m4_ifdef([_LT_LANG_]$1[_enabled], [], [LT_SUPPORTED_TAG([$1])dnl m4_append([_LT_TAGS], [$1 ])dnl m4_define([_LT_LANG_]$1[_enabled], [])dnl _LT_LANG_$1_CONFIG($1)])dnl ])# _LT_LANG # _LT_LANG_DEFAULT_CONFIG # ----------------------- m4_defun([_LT_LANG_DEFAULT_CONFIG], [AC_PROVIDE_IFELSE([AC_PROG_CXX], [LT_LANG(CXX)], [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) AC_PROVIDE_IFELSE([AC_PROG_F77], [LT_LANG(F77)], [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) AC_PROVIDE_IFELSE([AC_PROG_FC], [LT_LANG(FC)], [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal dnl pulling things in needlessly. AC_PROVIDE_IFELSE([AC_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([LT_PROG_GCJ], [LT_LANG(GCJ)], [m4_ifdef([AC_PROG_GCJ], [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([A][M_PROG_GCJ], [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([LT_PROG_GCJ], [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) AC_PROVIDE_IFELSE([LT_PROG_RC], [LT_LANG(RC)], [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) ])# _LT_LANG_DEFAULT_CONFIG # Obsolete macros: AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_CXX], []) dnl AC_DEFUN([AC_LIBTOOL_F77], []) dnl AC_DEFUN([AC_LIBTOOL_FC], []) dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) # _LT_TAG_COMPILER # ---------------- m4_defun([_LT_TAG_COMPILER], [AC_REQUIRE([AC_PROG_CC])dnl _LT_DECL([LTCC], [CC], [1], [A C compiler])dnl _LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl _LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl _LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC ])# _LT_TAG_COMPILER # _LT_COMPILER_BOILERPLATE # ------------------------ # Check for compiler boilerplate output or warnings with # the simple compiler test code. m4_defun([_LT_COMPILER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ])# _LT_COMPILER_BOILERPLATE # _LT_LINKER_BOILERPLATE # ---------------------- # Check for linker boilerplate output or warnings with # the simple link test code. m4_defun([_LT_LINKER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ])# _LT_LINKER_BOILERPLATE # _LT_REQUIRED_DARWIN_CHECKS # ------------------------- m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ case $host_os in rhapsody* | darwin*) AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) AC_CHECK_TOOL([LIPO], [lipo], [:]) AC_CHECK_TOOL([OTOOL], [otool], [:]) AC_CHECK_TOOL([OTOOL64], [otool64], [:]) _LT_DECL([], [DSYMUTIL], [1], [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) _LT_DECL([], [NMEDIT], [1], [Tool to change global to local symbols on Mac OS X]) _LT_DECL([], [LIPO], [1], [Tool to manipulate fat objects and archives on Mac OS X]) _LT_DECL([], [OTOOL], [1], [ldd/readelf like tool for Mach-O binaries on Mac OS X]) _LT_DECL([], [OTOOL64], [1], [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], [lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -rf libconftest.dylib* rm -f conftest.* fi]) AC_CACHE_CHECK([for -exported_symbols_list linker flag], [lt_cv_ld_exported_symbols_list], [lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [lt_cv_ld_exported_symbols_list=yes], [lt_cv_ld_exported_symbols_list=no]) LDFLAGS="$save_LDFLAGS" ]) case $host_os in rhapsody* | darwin1.[[012]]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; 10.[[012]]*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test "$lt_cv_apple_cc_single_mod" = "yes"; then _lt_dar_single_mod='$single_module' fi if test "$lt_cv_ld_exported_symbols_list" = "yes"; then _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' fi if test "$DSYMUTIL" != ":"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac ]) # _LT_DARWIN_LINKER_FEATURES # -------------------------- # Checks for linker and compiler features on darwin m4_defun([_LT_DARWIN_LINKER_FEATURES], [ m4_require([_LT_REQUIRED_DARWIN_CHECKS]) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(whole_archive_flag_spec, $1)='' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=echo _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" m4_if([$1], [CXX], [ if test "$lt_cv_apple_cc_single_mod" != "yes"; then _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi ],[]) else _LT_TAGVAR(ld_shlibs, $1)=no fi ]) # _LT_SYS_MODULE_PATH_AIX # ----------------------- # Links a minimal program and checks the executable # for the system default hardcoded library path. In most cases, # this is /usr/lib:/lib, but when the MPI compilers are used # the location of the communication and MPI libs are included too. # If we don't find anything, use the default library path according # to the aix ld manual. m4_defun([_LT_SYS_MODULE_PATH_AIX], [m4_require([_LT_DECL_SED])dnl AC_LINK_IFELSE(AC_LANG_PROGRAM,[ lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi],[]) if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ])# _LT_SYS_MODULE_PATH_AIX # _LT_SHELL_INIT(ARG) # ------------------- m4_define([_LT_SHELL_INIT], [ifdef([AC_DIVERSION_NOTICE], [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], [AC_DIVERT_PUSH(NOTICE)]) $1 AC_DIVERT_POP ])# _LT_SHELL_INIT # _LT_PROG_ECHO_BACKSLASH # ----------------------- # Add some code to the start of the generated configure script which # will find an echo command which doesn't interpret backslashes. m4_defun([_LT_PROG_ECHO_BACKSLASH], [_LT_SHELL_INIT([ # Check that we are running under the correct shell. SHELL=${CONFIG_SHELL-/bin/sh} case X$lt_ECHO in X*--fallback-echo) # Remove one level of quotation (which was required for Make). ECHO=`echo "$lt_ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` ;; esac ECHO=${lt_ECHO-echo} if test "X[$]1" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test "X[$]1" = X--fallback-echo; then # Avoid inline document here, it may be left over : elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then # Yippee, $ECHO works! : else # Restart under the correct shell. exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} fi if test "X[$]1" = X--fallback-echo; then # used as fallback echo shift cat <<_LT_EOF [$]* _LT_EOF exit 0 fi # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH if test -z "$lt_ECHO"; then if test "X${echo_test_string+set}" != Xset; then # find a string as large as possible, as long as the shell can cope with it for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... if { echo_test_string=`eval $cmd`; } 2>/dev/null && { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null then break fi done fi if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then : else # The Solaris, AIX, and Digital Unix default echo programs unquote # backslashes. This makes it impossible to quote backslashes using # echo "$something" | sed 's/\\/\\\\/g' # # So, first we look for a working echo in the user's PATH. lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for dir in $PATH /usr/ucb; do IFS="$lt_save_ifs" if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then ECHO="$dir/echo" break fi done IFS="$lt_save_ifs" if test "X$ECHO" = Xecho; then # We didn't find a better echo, so look for alternatives. if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then # This shell has a builtin print -r that does the trick. ECHO='print -r' elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } && test "X$CONFIG_SHELL" != X/bin/ksh; then # If we have ksh, try running configure again with it. ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} export ORIGINAL_CONFIG_SHELL CONFIG_SHELL=/bin/ksh export CONFIG_SHELL exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} else # Try using printf. ECHO='printf %s\n' if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then # Cool, printf works : elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && test "X$echo_testing_string" = 'X\t' && echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL export CONFIG_SHELL SHELL="$CONFIG_SHELL" export SHELL ECHO="$CONFIG_SHELL [$]0 --fallback-echo" elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && test "X$echo_testing_string" = 'X\t' && echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then ECHO="$CONFIG_SHELL [$]0 --fallback-echo" else # maybe with a smaller string... prev=: for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null then break fi prev="$cmd" done if test "$prev" != 'sed 50q "[$]0"'; then echo_test_string=`eval $prev` export echo_test_string exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} else # Oops. We lost completely, so just stick with echo. ECHO=echo fi fi fi fi fi fi # Copy echo and quote the copy suitably for passing to libtool from # the Makefile, instead of quoting the original, which is used later. lt_ECHO=$ECHO if test "X$lt_ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then lt_ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" fi AC_SUBST(lt_ECHO) ]) _LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) _LT_DECL([], [ECHO], [1], [An echo program that does not interpret backslashes]) ])# _LT_PROG_ECHO_BACKSLASH # _LT_ENABLE_LOCK # --------------- m4_defun([_LT_ENABLE_LOCK], [AC_ARG_ENABLE([libtool-lock], [AS_HELP_STRING([--disable-libtool-lock], [avoid locking (might break parallel builds)])]) test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '[#]line __oline__ "configure"' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, [AC_LANG_PUSH(C) AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) AC_LANG_POP]) if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; sparc*-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) LD="${LD-ld} -m elf64_sparc" ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks="$enable_libtool_lock" ])# _LT_ENABLE_LOCK # _LT_CMD_OLD_ARCHIVE # ------------------- m4_defun([_LT_CMD_OLD_ARCHIVE], [AC_CHECK_TOOL(AR, ar, false) test -z "$AR" && AR=ar test -z "$AR_FLAGS" && AR_FLAGS=cru _LT_DECL([], [AR], [1], [The archiver]) _LT_DECL([], [AR_FLAGS], [1]) AC_CHECK_TOOL(STRIP, strip, :) test -z "$STRIP" && STRIP=: _LT_DECL([], [STRIP], [1], [A symbol stripping program]) AC_CHECK_TOOL(RANLIB, ranlib, :) test -z "$RANLIB" && RANLIB=: _LT_DECL([], [RANLIB], [1], [Commands used to install an old-style archive]) # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" fi _LT_DECL([], [old_postinstall_cmds], [2]) _LT_DECL([], [old_postuninstall_cmds], [2]) _LT_TAGDECL([], [old_archive_cmds], [2], [Commands used to build an old-style archive]) ])# _LT_CMD_OLD_ARCHIVE # _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------------------- # Check whether the given compiler option works AC_DEFUN([_LT_COMPILER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$3" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi fi $RM conftest* ]) if test x"[$]$2" = xyes; then m4_if([$5], , :, [$5]) else m4_if([$6], , :, [$6]) fi ])# _LT_COMPILER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) # _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------- # Check whether the given linker option works AC_DEFUN([_LT_LINKER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $3" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&AS_MESSAGE_LOG_FD $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi else $2=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" ]) if test x"[$]$2" = xyes; then m4_if([$4], , :, [$4]) else m4_if([$5], , :, [$5]) fi ])# _LT_LINKER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) # LT_CMD_MAX_LEN #--------------- AC_DEFUN([LT_CMD_MAX_LEN], [AC_REQUIRE([AC_CANONICAL_HOST])dnl # find the maximum length of command line arguments AC_MSG_CHECKING([the maximum length of command line arguments]) AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl i=0 teststring="ABCD" case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8 ; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test "X"`$SHELL [$]0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \ = "XX$teststring$teststring"; } >/dev/null 2>&1 && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac ]) if test -n $lt_cv_sys_max_cmd_len ; then AC_MSG_RESULT($lt_cv_sys_max_cmd_len) else AC_MSG_RESULT(none) fi max_cmd_len=$lt_cv_sys_max_cmd_len _LT_DECL([], [max_cmd_len], [0], [What is the maximum length of a command?]) ])# LT_CMD_MAX_LEN # Old name: AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) # _LT_HEADER_DLFCN # ---------------- m4_defun([_LT_HEADER_DLFCN], [AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl ])# _LT_HEADER_DLFCN # _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, # ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) # ---------------------------------------------------------------- m4_defun([_LT_TRY_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test "$cross_compiling" = yes; then : [$4] else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF [#line __oline__ "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif void fnord() { int i=42;} int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; /* dlclose (self); */ } else puts (dlerror ()); return status; }] _LT_EOF if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) $1 ;; x$lt_dlneed_uscore) $2 ;; x$lt_dlunknown|x*) $3 ;; esac else : # compilation failed $3 fi fi rm -fr conftest* ])# _LT_TRY_DLOPEN_SELF # LT_SYS_DLOPEN_SELF # ------------------ AC_DEFUN([LT_SYS_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen="LoadLibrary" lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ]) ;; *) AC_CHECK_FUNC([shl_load], [lt_cv_dlopen="shl_load"], [AC_CHECK_LIB([dld], [shl_load], [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], [AC_CHECK_FUNC([dlopen], [lt_cv_dlopen="dlopen"], [AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], [AC_CHECK_LIB([svld], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], [AC_CHECK_LIB([dld], [dld_link], [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) ]) ]) ]) ]) ]) ;; esac if test "x$lt_cv_dlopen" != xno; then enable_dlopen=yes else enable_dlopen=no fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS="$CPPFLAGS" test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" AC_CACHE_CHECK([whether a program can dlopen itself], lt_cv_dlopen_self, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) ]) if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" AC_CACHE_CHECK([whether a statically linked program can dlopen itself], lt_cv_dlopen_self_static, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) ]) fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi _LT_DECL([dlopen_support], [enable_dlopen], [0], [Whether dlopen is supported]) _LT_DECL([dlopen_self], [enable_dlopen_self], [0], [Whether dlopen of programs is supported]) _LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], [Whether dlopen of statically linked programs is supported]) ])# LT_SYS_DLOPEN_SELF # Old name: AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) # _LT_COMPILER_C_O([TAGNAME]) # --------------------------- # Check to see if options -c and -o are simultaneously supported by compiler. # This macro does not hard code the compiler like AC_PROG_CC_C_O. m4_defun([_LT_COMPILER_C_O], [m4_require([_LT_DECL_SED])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes fi fi chmod u+w . 2>&AS_MESSAGE_LOG_FD $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* ]) _LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], [Does compiler simultaneously support -c and -o options?]) ])# _LT_COMPILER_C_O # _LT_COMPILER_FILE_LOCKS([TAGNAME]) # ---------------------------------- # Check to see if we can do hard links to lock some files if needed m4_defun([_LT_COMPILER_FILE_LOCKS], [m4_require([_LT_ENABLE_LOCK])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_COMPILER_C_O([$1]) hard_links="nottested" if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user AC_MSG_CHECKING([if we can lock with hard links]) hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no AC_MSG_RESULT([$hard_links]) if test "$hard_links" = no; then AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) need_locks=warn fi else need_locks=no fi _LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) ])# _LT_COMPILER_FILE_LOCKS # _LT_CHECK_OBJDIR # ---------------- m4_defun([_LT_CHECK_OBJDIR], [AC_CACHE_CHECK([for objdir], [lt_cv_objdir], [rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null]) objdir=$lt_cv_objdir _LT_DECL([], [objdir], [0], [The name of the directory that contains temporary libtool files])dnl m4_pattern_allow([LT_OBJDIR])dnl AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", [Define to the sub-directory in which libtool stores uninstalled libraries.]) ])# _LT_CHECK_OBJDIR # _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) # -------------------------------------- # Check hardcoding attributes. m4_defun([_LT_LINKER_HARDCODE_LIBPATH], [AC_MSG_CHECKING([how to hardcode library paths into programs]) _LT_TAGVAR(hardcode_action, $1)= if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || test -n "$_LT_TAGVAR(runpath_var, $1)" || test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then # We can hardcode non-existent directories. if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then # Linking always hardcodes the temporary library directory. _LT_TAGVAR(hardcode_action, $1)=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. _LT_TAGVAR(hardcode_action, $1)=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. _LT_TAGVAR(hardcode_action, $1)=unsupported fi AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi _LT_TAGDECL([], [hardcode_action], [0], [How to hardcode a shared library path into an executable]) ])# _LT_LINKER_HARDCODE_LIBPATH # _LT_CMD_STRIPLIB # ---------------- m4_defun([_LT_CMD_STRIPLIB], [m4_require([_LT_DECL_EGREP]) striplib= old_striplib= AC_MSG_CHECKING([whether stripping libraries is possible]) if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" AC_MSG_RESULT([yes]) else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi ;; *) AC_MSG_RESULT([no]) ;; esac fi _LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) _LT_DECL([], [striplib], [1]) ])# _LT_CMD_STRIPLIB # _LT_SYS_DYNAMIC_LINKER([TAG]) # ----------------------------- # PORTME Fill in your ld.so characteristics m4_defun([_LT_SYS_DYNAMIC_LINKER], [AC_REQUIRE([AC_CANONICAL_HOST])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_OBJDUMP])dnl m4_require([_LT_DECL_SED])dnl AC_MSG_CHECKING([dynamic linker characteristics]) m4_if([$1], [], [ if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'` else lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary. lt_tmp_lt_search_path_spec= lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path/$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" else test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk ' BEGIN {RS=" "; FS="/|\n";} { lt_foo=""; lt_count=0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo="/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[[lt_foo]]++; } if (lt_freq[[lt_foo]] == 1) { print lt_foo; } }'` sys_lib_search_path_spec=`$ECHO $lt_search_path_spec` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi]) library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[[4-9]]*) version_type=linux need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[[01]] | aix4.[[01]].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[[45]]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$host_os in yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then # It is most probably a Windows format PATH printed by # mingw gcc, but we are running on Cygwin. Gcc prints its search # path with ; separators, and with drive letters. We can handle the # drive letters (cygwin fileutils understands them), so leave them, # especially as we might pass files found there to a mingw objdump, # which wouldn't understand a cygwinified path. Ahh. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' ;; esac ;; *) library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' ;; esac dynamic_linker='Win32 ld.exe' # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd1*) dynamic_linker=no ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[[123]]*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2*) shlibpath_overrides_runpath=yes ;; freebsd3.[[01]]* | freebsdelf3.[[01]]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; gnu*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555. postinstall_cmds='chmod 555 $lib' ;; interix[[3-9]]*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be Linux ELF. linux* | k*bsd*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], [shlibpath_overrides_runpath=yes])]) LDFLAGS=$save_LDFLAGS libdir=$save_libdir # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[[89]] | openbsd2.[[89]].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac AC_MSG_RESULT([$dynamic_linker]) test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi _LT_DECL([], [variables_saved_for_relink], [1], [Variables whose values should be saved in libtool wrapper scripts and restored at link time]) _LT_DECL([], [need_lib_prefix], [0], [Do we need the "lib" prefix for modules?]) _LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) _LT_DECL([], [version_type], [0], [Library versioning type]) _LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) _LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) _LT_DECL([], [shlibpath_overrides_runpath], [0], [Is shlibpath searched before the hard-coded library search path?]) _LT_DECL([], [libname_spec], [1], [Format of library name prefix]) _LT_DECL([], [library_names_spec], [1], [[List of archive names. First name is the real one, the rest are links. The last name is the one that the linker finds with -lNAME]]) _LT_DECL([], [soname_spec], [1], [[The coded name of the library, if different from the real name]]) _LT_DECL([], [postinstall_cmds], [2], [Command to use after installation of a shared archive]) _LT_DECL([], [postuninstall_cmds], [2], [Command to use after uninstallation of a shared archive]) _LT_DECL([], [finish_cmds], [2], [Commands used to finish a libtool library installation in a directory]) _LT_DECL([], [finish_eval], [1], [[As "finish_cmds", except a single script fragment to be evaled but not shown]]) _LT_DECL([], [hardcode_into_libs], [0], [Whether we should hardcode library paths into libraries]) _LT_DECL([], [sys_lib_search_path_spec], [2], [Compile-time system search path for libraries]) _LT_DECL([], [sys_lib_dlsearch_path_spec], [2], [Run-time system search path for libraries]) ])# _LT_SYS_DYNAMIC_LINKER # _LT_PATH_TOOL_PREFIX(TOOL) # -------------------------- # find a file program which can recognize shared library AC_DEFUN([_LT_PATH_TOOL_PREFIX], [m4_require([_LT_DECL_EGREP])dnl AC_MSG_CHECKING([for $1]) AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, [case $MAGIC_CMD in [[\\/*] | ?:[\\/]*]) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR dnl $ac_dummy forces splitting on constant user-supplied paths. dnl POSIX.2 word splitting is done only on the output of word expansions, dnl not every word. This closes a longstanding sh security hole. ac_dummy="m4_if([$2], , $PATH, [$2])" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/$1; then lt_cv_path_MAGIC_CMD="$ac_dir/$1" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac]) MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then AC_MSG_RESULT($MAGIC_CMD) else AC_MSG_RESULT(no) fi _LT_DECL([], [MAGIC_CMD], [0], [Used to examine libraries when file_magic_cmd begins with "file"])dnl ])# _LT_PATH_TOOL_PREFIX # Old name: AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) # _LT_PATH_MAGIC # -------------- # find a file program which can recognize a shared library m4_defun([_LT_PATH_MAGIC], [_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) else MAGIC_CMD=: fi fi ])# _LT_PATH_MAGIC # LT_PATH_LD # ---------- # find the pathname to the GNU or non-GNU linker AC_DEFUN([LT_PATH_LD], [AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl AC_ARG_WITH([gnu-ld], [AS_HELP_STRING([--with-gnu-ld], [assume the C compiler uses GNU ld @<:@default=no@:>@])], [test "$withval" = no || with_gnu_ld=yes], [with_gnu_ld=no])dnl ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by $CC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL(lt_cv_path_LD, [if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; gnu*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'] lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[[3-9]]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be Linux ELF. linux* | k*bsd*-gnu) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; esac ]) file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown _LT_DECL([], [deplibs_check_method], [1], [Method to check whether dependent libraries are shared objects]) _LT_DECL([], [file_magic_cmd], [1], [Command to use when deplibs_check_method == "file_magic"]) ])# _LT_CHECK_MAGIC_METHOD # LT_PATH_NM # ---------- # find the pathname to a BSD- or MS-compatible name lister AC_DEFUN([LT_PATH_NM], [AC_REQUIRE([AC_PROG_CC])dnl AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, [if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. tmp_nm="$ac_dir/$lt_tmp_nm" if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then # Check to see if the nm accepts a BSD-compat flag. # Adding the `sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in */dev/null* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi]) if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. AC_CHECK_TOOLS(DUMPBIN, ["dumpbin -symbols" "link -dump -symbols"], :) AC_SUBST([DUMPBIN]) if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm AC_SUBST([NM]) _LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], [lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:__oline__: $ac_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:__oline__: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:__oline__: output\"" >&AS_MESSAGE_LOG_FD) cat conftest.out >&AS_MESSAGE_LOG_FD if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest*]) ])# LT_PATH_NM # Old names: AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_PROG_NM], []) dnl AC_DEFUN([AC_PROG_NM], []) # LT_LIB_M # -------- # check for math library AC_DEFUN([LT_LIB_M], [AC_REQUIRE([AC_CANONICAL_HOST])dnl LIBM= case $host in *-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*) # These system don't have libm, or don't need it ;; *-ncr-sysv4.3*) AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") ;; *) AC_CHECK_LIB(m, cos, LIBM="-lm") ;; esac AC_SUBST([LIBM]) ])# LT_LIB_M # Old name: AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_CHECK_LIBM], []) # _LT_COMPILER_NO_RTTI([TAGNAME]) # ------------------------------- m4_defun([_LT_COMPILER_NO_RTTI], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= if test "$GCC" = yes; then _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], lt_cv_prog_compiler_rtti_exceptions, [-fno-rtti -fno-exceptions], [], [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) fi _LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], [Compiler flag to turn off builtin functions]) ])# _LT_COMPILER_NO_RTTI # _LT_CMD_GLOBAL_SYMBOLS # ---------------------- m4_defun([_LT_CMD_GLOBAL_SYMBOLS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([LT_PATH_NM])dnl AC_REQUIRE([LT_PATH_LD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_TAG_COMPILER])dnl # Check for command to grab the raw symbol name followed by C symbol from nm. AC_MSG_CHECKING([command to parse $NM output from $compiler object]) AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [ # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[[BCDEGRST]]' # Regexp to match symbols that can be accessed directly from C. sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[[BCDT]]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[[ABCDGISTW]]' ;; hpux*) if test "$host_cpu" = ia64; then symcode='[[ABCDEGRST]]' fi ;; irix* | nonstopux*) symcode='[[BCDEGRST]]' ;; osf*) symcode='[[BCDEGQRST]]' ;; solaris*) symcode='[[BDRT]]' ;; sco3.2v5*) symcode='[[DT]]' ;; sysv4.2uw2*) symcode='[[DT]]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[[ABDT]]' ;; sysv4) symcode='[[DFNSTU]]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[[ABCDGIRSTW]]' ;; esac # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function # and D for any global variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK ['"\ " {last_section=section; section=\$ 3};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ " {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ " s[1]~/^[@?]/{print s[1], s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx]" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if AC_TRY_EVAL(ac_compile); then # Now try to grab the symbols. nlist=conftest.nm if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ const struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[[]] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_save_LIBS="$LIBS" lt_save_CFLAGS="$CFLAGS" LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS="$lt_save_LIBS" CFLAGS="$lt_save_CFLAGS" else echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD fi else echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then break else lt_cv_sys_global_symbol_pipe= fi done ]) if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then AC_MSG_RESULT(failed) else AC_MSG_RESULT(ok) fi _LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], [Take the output of nm and produce a listing of raw symbols and C names]) _LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], [Transform the output of nm in a proper C declaration]) _LT_DECL([global_symbol_to_c_name_address], [lt_cv_sys_global_symbol_to_c_name_address], [1], [Transform the output of nm in a C name address pair]) _LT_DECL([global_symbol_to_c_name_address_lib_prefix], [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], [Transform the output of nm in a C name address pair when lib prefix is needed]) ]) # _LT_CMD_GLOBAL_SYMBOLS # _LT_COMPILER_PIC([TAGNAME]) # --------------------------- m4_defun([_LT_COMPILER_PIC], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_wl, $1)= _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)= AC_MSG_CHECKING([for $compiler option to produce PIC]) m4_if([$1], [CXX], [ # C++ specific cases for pic, static, wl, etc. if test "$GXX" = yes; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac else case $host_os in aix[[4-9]]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; dgux*) case $cc_basename in ec++*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; ghcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' if test "$host_cpu" != ia64; then _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' fi ;; aCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu) case $cc_basename in KCC*) # KAI C++ Compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64 which still supported -KPIC. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xlc* | xlC*) # IBM XL 8.0 on PPC _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; cxx*) # Digital/Compaq C++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; lcc*) # Lucid _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ], [ if test "$GCC" = yes; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; hpux9* | hpux10* | hpux11*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC (with -KPIC) is the default. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; linux* | k*bsd*-gnu) case $cc_basename in # old Intel for x86_64 which still supported -KPIC. ecc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # Lahey Fortran 8.1. lf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' ;; pgcc* | pgf77* | pgf90* | pgf95*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; ccc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All Alpha code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xl*) # IBM XL C 8.0/Fortran 10.1 on PPC _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ;; *Sun\ F*) # Sun Fortran 8.3 passes all unrecognized flags to the linker _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='' ;; esac ;; esac ;; newsos6) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All OSF/1 code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; rdos*) _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; solaris*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' case $cc_basename in f77* | f90* | f95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; *) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; esac ;; sunos4*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; unicos*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; uts4*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ]) case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" ;; esac AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) _LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], [How to pass a linker flag through the compiler]) # # Check to make sure the PIC flag actually works. # if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in "" | " "*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; esac], [_LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) fi _LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], [Additional compiler flags for building library objects]) # # Check to make sure the static flag actually works. # wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" _LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), $lt_tmp_static_flag, [], [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], [Compiler flag to prevent dynamic linking]) ])# _LT_COMPILER_PIC # _LT_LINKER_SHLIBS([TAGNAME]) # ---------------------------- # See if the linker supports building shared libraries. m4_defun([_LT_LINKER_SHLIBS], [AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) m4_if([$1], [CXX], [ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' case $host_os in aix[[4-9]]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi ;; pw32*) _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" ;; cygwin* | mingw* | cegcc*) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] ], [ runpath_var= _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_cmds, $1)= _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(old_archive_from_new_cmds, $1)= _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= _LT_TAGVAR(thread_safe_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list _LT_TAGVAR(include_expsyms, $1)= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. dnl Note also adjust exclude_expsyms for C++ above. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac _LT_TAGVAR(ld_shlibs, $1)=yes if test "$with_gnu_ld" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi supports_anon_versioning=no case `$LD -v 2>&1` in *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[[3-9]]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.9.1, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to modify your PATH *** so that a non-GNU linker is found, and then restart. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu) tmp_diet=no if test "$host_os" = linux-dietlibc; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test "$tmp_diet" = no then tmp_addflag= tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 _LT_TAGVAR(whole_archive_flag_spec, $1)= tmp_sharedflag='--shared' ;; xl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi case $cc_basename in xlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; sunos4*) _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then runpath_var= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. _LT_TAGVAR(hardcode_minus_L, $1)=yes if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. _LT_TAGVAR(hardcode_direct, $1)=unsupported fi ;; aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' if test "$GCC" = yes; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi ;; esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. _LT_TAGVAR(always_export_symbols, $1)=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' _LT_TAGVAR(archive_cmds_need_lc, $1)=yes # This is similar to how AIX traditionally builds its shared libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; bsdi[[45]]*) _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' # FIXME: Should let the user specify the lib program. _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; freebsd1*) _LT_TAGVAR(ld_shlibs, $1)=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; hpux9*) if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' ;; hpux10*) if test "$GCC" = yes -a "$with_gnu_ld" = no; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test "$with_gnu_ld" = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes fi ;; hpux11*) if test "$GCC" = yes -a "$with_gnu_ld" = no; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac fi if test "$with_gnu_ld" = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" AC_LINK_IFELSE(int foo(void) {}, _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ) LDFLAGS="$save_LDFLAGS" else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes _LT_TAGVAR(link_all_deplibs, $1)=yes ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; newsos6) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' else case $host_os in openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' ;; esac fi else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$GCC" = yes; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$GCC" = yes; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; solaris*) _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' if test "$GCC" = yes; then wlarc='${wl}' _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='${wl}' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. GCC discards it without `$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test "$GCC" = yes; then _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' else _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' fi ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes ;; sunos4*) if test "x$host_vendor" = xsequent; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4) case $host_vendor in sni) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' _LT_TAGVAR(hardcode_direct, $1)=no ;; motorola) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4.3*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes _LT_TAGVAR(ld_shlibs, $1)=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(ld_shlibs, $1)=no ;; esac if test x$host_vendor = xsni; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' ;; esac fi fi ]) AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no _LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld _LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl _LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl _LT_DECL([], [extract_expsyms_cmds], [2], [The commands to extract the exported symbol list from a shared archive]) # # Do we need to explicitly link libc? # case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in x|xyes) # Assume -lc should be added _LT_TAGVAR(archive_cmds_need_lc, $1)=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $_LT_TAGVAR(archive_cmds, $1) in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. AC_MSG_CHECKING([whether -lc should be explicitly linked in]) $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if AC_TRY_EVAL(ac_compile) 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) _LT_TAGVAR(allow_undefined_flag, $1)= if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) then _LT_TAGVAR(archive_cmds_need_lc, $1)=no else _LT_TAGVAR(archive_cmds_need_lc, $1)=yes fi _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* AC_MSG_RESULT([$_LT_TAGVAR(archive_cmds_need_lc, $1)]) ;; esac fi ;; esac _LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], [Whether or not to add -lc for building shared libraries]) _LT_TAGDECL([allow_libtool_libs_with_static_runtimes], [enable_shared_with_static_runtimes], [0], [Whether or not to disallow shared libs when runtime libs are static]) _LT_TAGDECL([], [export_dynamic_flag_spec], [1], [Compiler flag to allow reflexive dlopens]) _LT_TAGDECL([], [whole_archive_flag_spec], [1], [Compiler flag to generate shared objects directly from archives]) _LT_TAGDECL([], [compiler_needs_object], [1], [Whether the compiler copes with passing no objects directly]) _LT_TAGDECL([], [old_archive_from_new_cmds], [2], [Create an old-style archive from a shared archive]) _LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], [Create a temporary old-style archive to link instead of a shared archive]) _LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) _LT_TAGDECL([], [archive_expsym_cmds], [2]) _LT_TAGDECL([], [module_cmds], [2], [Commands used to build a loadable module if different from building a shared archive.]) _LT_TAGDECL([], [module_expsym_cmds], [2]) _LT_TAGDECL([], [with_gnu_ld], [1], [Whether we are building with GNU ld or not]) _LT_TAGDECL([], [allow_undefined_flag], [1], [Flag that allows shared libraries with undefined symbols to be built]) _LT_TAGDECL([], [no_undefined_flag], [1], [Flag that enforces no undefined symbols]) _LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], [Flag to hardcode $libdir into a binary during linking. This must work even if $libdir does not exist]) _LT_TAGDECL([], [hardcode_libdir_flag_spec_ld], [1], [[If ld is used when linking, flag to hardcode $libdir into a binary during linking. This must work even if $libdir does not exist]]) _LT_TAGDECL([], [hardcode_libdir_separator], [1], [Whether we need a single "-rpath" flag with a separated argument]) _LT_TAGDECL([], [hardcode_direct], [0], [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_direct_absolute], [0], [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the resulting binary and the resulting library dependency is "absolute", i.e impossible to change by setting ${shlibpath_var} if the library is relocated]) _LT_TAGDECL([], [hardcode_minus_L], [0], [Set to "yes" if using the -LDIR flag during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_shlibpath_var], [0], [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_automatic], [0], [Set to "yes" if building a shared library automatically hardcodes DIR into the library and all subsequent libraries and executables linked against it]) _LT_TAGDECL([], [inherit_rpath], [0], [Set to yes if linker adds runtime paths of dependent libraries to runtime path list]) _LT_TAGDECL([], [link_all_deplibs], [0], [Whether libtool must link a program against all its dependency libraries]) _LT_TAGDECL([], [fix_srcfile_path], [1], [Fix the shell variable $srcfile for the compiler]) _LT_TAGDECL([], [always_export_symbols], [0], [Set to "yes" if exported symbols are required]) _LT_TAGDECL([], [export_symbols_cmds], [2], [The commands to list exported symbols]) _LT_TAGDECL([], [exclude_expsyms], [1], [Symbols that should not be listed in the preloaded symbols]) _LT_TAGDECL([], [include_expsyms], [1], [Symbols that must always be exported]) _LT_TAGDECL([], [prelink_cmds], [2], [Commands necessary for linking programs (against libraries) with templates]) _LT_TAGDECL([], [file_list_spec], [1], [Specify filename containing input files]) dnl FIXME: Not yet implemented dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], dnl [Compiler flag to generate thread safe objects]) ])# _LT_LINKER_SHLIBS # _LT_LANG_C_CONFIG([TAG]) # ------------------------ # Ensure that the configuration variables for a C compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to `libtool'. m4_defun([_LT_LANG_C_CONFIG], [m4_require([_LT_DECL_EGREP])dnl lt_save_CC="$CC" AC_LANG_PUSH(C) # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' _LT_TAG_COMPILER # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) LT_SYS_DLOPEN_SELF _LT_CMD_STRIPLIB # Report which library types will actually be built AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_CONFIG($1) fi AC_LANG_POP CC="$lt_save_CC" ])# _LT_LANG_C_CONFIG # _LT_PROG_CXX # ------------ # Since AC_PROG_CXX is broken, in that it returns g++ if there is no c++ # compiler, we have our own version here. m4_defun([_LT_PROG_CXX], [ pushdef([AC_MSG_ERROR], [_lt_caught_CXX_error=yes]) AC_PROG_CXX if test -n "$CXX" && ( test "X$CXX" != "Xno" && ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || (test "X$CXX" != "Xg++"))) ; then AC_PROG_CXXCPP else _lt_caught_CXX_error=yes fi popdef([AC_MSG_ERROR]) ])# _LT_PROG_CXX dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([_LT_PROG_CXX], []) # _LT_LANG_CXX_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a C++ compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to `libtool'. m4_defun([_LT_LANG_CXX_CONFIG], [AC_REQUIRE([_LT_PROG_CXX])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl AC_LANG_PUSH(C++) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_caught_CXX_error" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test "$GXX" = yes; then _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' else _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= fi if test "$GXX" = yes; then # Set up default GNU C++ configuration LT_PATH_LD # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test "$with_gnu_ld" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='${wl}' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) _LT_TAGVAR(ld_shlibs, $1)=yes case $host_os in aix3*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' if test "$GXX" = yes; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. _LT_TAGVAR(always_export_symbols, $1)=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an empty # executable. _LT_SYS_MODULE_PATH_AIX _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' _LT_TAGVAR(archive_cmds_need_lc, $1)=yes # This is similar to how AIX traditionally builds its shared # libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; freebsd[[12]]*) # C++ shared libraries reported to be fairly broken before # switch to ELF _LT_TAGVAR(ld_shlibs, $1)=no ;; freebsd-elf*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions _LT_TAGVAR(ld_shlibs, $1)=yes ;; gnu*) ;; hpux9*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; hpux10*|hpux11*) if test $with_gnu_ld = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) ;; *) _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes; then if test $with_gnu_ld = no; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test "$GXX" = yes; then if test "$with_gnu_ld" = no; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` -o $lib' fi fi _LT_TAGVAR(link_all_deplibs, $1)=yes ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes ;; linux* | k*bsd*-gnu) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; esac _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [[1-5]]* | *pgcpp\ [[1-5]]*) _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ $RANLIB $oldlib' _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; *) # Version 6 will use weak symbols _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' ;; cxx*) # Compaq C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; xl*) # IBM XL 8.0 on PPC, with GNU ld _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='echo' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; m88k*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) _LT_TAGVAR(ld_shlibs, $1)=yes ;; openbsd2*) # C++ shared libraries are fairly broken _LT_TAGVAR(ld_shlibs, $1)=no ;; openbsd*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' fi output_verbose_link_cmd=echo else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; cxx*) case $host in osf3*) _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && $ECHO "X${wl}-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' ;; *) _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~ $RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ;; esac _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' case $host in osf3*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; solaris*) case $cc_basename in CC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes output_verbose_link_cmd='echo' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test "$GXX" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else # g++ 2.7 appears to require `-G' NOT `-shared' on this # platform. _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no _LT_TAGVAR(GCC, $1)="$GXX" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" CC=$lt_save_CC LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test "$_lt_caught_CXX_error" != yes AC_LANG_POP ])# _LT_LANG_CXX_CONFIG # _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) # --------------------------------- # Figure out "hidden" library dependencies from verbose # compiler output when linking a shared library. # Parse the compiler output and extract the necessary # objects, libraries and library flags. m4_defun([_LT_SYS_HIDDEN_LIBDEPS], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl # Dependencies to place before and after the object being linked: _LT_TAGVAR(predep_objects, $1)= _LT_TAGVAR(postdep_objects, $1)= _LT_TAGVAR(predeps, $1)= _LT_TAGVAR(postdeps, $1)= _LT_TAGVAR(compiler_lib_search_path, $1)= dnl we can't use the lt_simple_compile_test_code here, dnl because it contains code intended for an executable, dnl not a library. It's possible we should let each dnl tag define a new lt_????_link_test_code variable, dnl but it's only used here... m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF int a; void foo (void) { a = 0; } _LT_EOF ], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF ], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer*4 a a=0 return end _LT_EOF ], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer a a=0 return end _LT_EOF ], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF public class foo { private int a; public void bar (void) { a = 0; } }; _LT_EOF ]) dnl Parse the compiler output and extract the necessary dnl objects, libraries and library flags. if AC_TRY_EVAL(ac_compile); then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case $p in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test $p = "-L" || test $p = "-R"; then prev=$p continue else prev= fi if test "$pre_test_object_deps_done" = no; then case $p in -L* | -R*) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" else _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$_LT_TAGVAR(postdeps, $1)"; then _LT_TAGVAR(postdeps, $1)="${prev}${p}" else _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" fi fi ;; *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test "$pre_test_object_deps_done" = no; then if test -z "$_LT_TAGVAR(predep_objects, $1)"; then _LT_TAGVAR(predep_objects, $1)="$p" else _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" fi else if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then _LT_TAGVAR(postdep_objects, $1)="$p" else _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling $1 test program" fi $RM -f confest.$objext # PORTME: override above test on systems where it is broken m4_if([$1], [CXX], [case $host_os in interix[[3-9]]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. _LT_TAGVAR(predep_objects,$1)= _LT_TAGVAR(postdep_objects,$1)= _LT_TAGVAR(postdeps,$1)= ;; linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac if test "$solaris_use_stlport4" != yes; then _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' fi ;; esac ;; solaris*) case $cc_basename in CC*) # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac # Adding this requires a known-good setup of shared libraries for # Sun compiler versions before 5.6, else PIC objects from an old # archive will be linked into the output, leading to subtle bugs. if test "$solaris_use_stlport4" != yes; then _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' fi ;; esac ;; esac ]) case " $_LT_TAGVAR(postdeps, $1) " in *" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; esac _LT_TAGVAR(compiler_lib_search_dirs, $1)= if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` fi _LT_TAGDECL([], [compiler_lib_search_dirs], [1], [The directories searched by this compiler when creating a shared library]) _LT_TAGDECL([], [predep_objects], [1], [Dependencies to place before and after the objects being linked to create a shared library]) _LT_TAGDECL([], [postdep_objects], [1]) _LT_TAGDECL([], [predeps], [1]) _LT_TAGDECL([], [postdeps], [1]) _LT_TAGDECL([], [compiler_lib_search_path], [1], [The library search path used internally by the compiler when linking a shared library]) ])# _LT_SYS_HIDDEN_LIBDEPS # _LT_PROG_F77 # ------------ # Since AC_PROG_F77 is broken, in that it returns the empty string # if there is no fortran compiler, we have our own version here. m4_defun([_LT_PROG_F77], [ pushdef([AC_MSG_ERROR], [_lt_disable_F77=yes]) AC_PROG_F77 if test -z "$F77" || test "X$F77" = "Xno"; then _lt_disable_F77=yes fi popdef([AC_MSG_ERROR]) ])# _LT_PROG_F77 dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([_LT_PROG_F77], []) # _LT_LANG_F77_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a Fortran 77 compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_F77_CONFIG], [AC_REQUIRE([_LT_PROG_F77])dnl AC_LANG_PUSH(Fortran 77) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for f77 test sources. ac_ext=f # Object file extension for compiled f77 test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the F77 compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_disable_F77" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC CC=${F77-"f77"} compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) GCC=$G77 if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)="$G77" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC="$lt_save_CC" fi # test "$_lt_disable_F77" != yes AC_LANG_POP ])# _LT_LANG_F77_CONFIG # _LT_PROG_FC # ----------- # Since AC_PROG_FC is broken, in that it returns the empty string # if there is no fortran compiler, we have our own version here. m4_defun([_LT_PROG_FC], [ pushdef([AC_MSG_ERROR], [_lt_disable_FC=yes]) AC_PROG_FC if test -z "$FC" || test "X$FC" = "Xno"; then _lt_disable_FC=yes fi popdef([AC_MSG_ERROR]) ])# _LT_PROG_FC dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([_LT_PROG_FC], []) # _LT_LANG_FC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for a Fortran compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_FC_CONFIG], [AC_REQUIRE([_LT_PROG_FC])dnl AC_LANG_PUSH(Fortran) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for fc test sources. ac_ext=${ac_fc_srcext-f} # Object file extension for compiled fc test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the FC compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_disable_FC" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC CC=${FC-"f95"} compiler=$CC GCC=$ac_cv_fc_compiler_gnu _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC="$lt_save_CC" fi # test "$_lt_disable_FC" != yes AC_LANG_POP ])# _LT_LANG_FC_CONFIG # _LT_LANG_GCJ_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Java Compiler compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_GCJ_CONFIG], [AC_REQUIRE([LT_PROG_GCJ])dnl AC_LANG_SAVE # Source file extension for Java test sources. ac_ext=java # Object file extension for compiled Java test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="class foo {}" # Code to be used in simple link tests lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC GCC=yes CC=${GCJ-"gcj"} compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)="$LD" _LT_CC_BASENAME([$compiler]) # GCJ did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC="$lt_save_CC" ])# _LT_LANG_GCJ_CONFIG # _LT_LANG_RC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for the Windows resource compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_RC_CONFIG], [AC_REQUIRE([LT_PROG_RC])dnl AC_LANG_SAVE # Source file extension for RC test sources. ac_ext=rc # Object file extension for compiled RC test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' # Code to be used in simple link tests lt_simple_link_test_code="$lt_simple_compile_test_code" # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC GCC= CC=${RC-"windres"} compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes if test -n "$compiler"; then : _LT_CONFIG($1) fi GCC=$lt_save_GCC AC_LANG_RESTORE CC="$lt_save_CC" ])# _LT_LANG_RC_CONFIG # LT_PROG_GCJ # ----------- AC_DEFUN([LT_PROG_GCJ], [m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], [AC_CHECK_TOOL(GCJ, gcj,) test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" AC_SUBST(GCJFLAGS)])])[]dnl ]) # Old name: AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_GCJ], []) # LT_PROG_RC # ---------- AC_DEFUN([LT_PROG_RC], [AC_CHECK_TOOL(RC, windres,) ]) # Old name: AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_RC], []) # _LT_DECL_EGREP # -------------- # If we don't have a new enough Autoconf to choose the best grep # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_EGREP], [AC_REQUIRE([AC_PROG_EGREP])dnl AC_REQUIRE([AC_PROG_FGREP])dnl test -z "$GREP" && GREP=grep _LT_DECL([], [GREP], [1], [A grep program that handles long lines]) _LT_DECL([], [EGREP], [1], [An ERE matcher]) _LT_DECL([], [FGREP], [1], [A literal string matcher]) dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too AC_SUBST([GREP]) ]) # _LT_DECL_OBJDUMP # -------------- # If we don't have a new enough Autoconf to choose the best objdump # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_OBJDUMP], [AC_CHECK_TOOL(OBJDUMP, objdump, false) test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) AC_SUBST([OBJDUMP]) ]) # _LT_DECL_SED # ------------ # Check for a fully-functional sed program, that truncates # as few characters as possible. Prefer GNU sed if found. m4_defun([_LT_DECL_SED], [AC_PROG_SED test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" _LT_DECL([], [SED], [1], [A sed program that does not truncate output]) _LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], [Sed that helps us avoid accidentally triggering echo(1) options like -n]) ])# _LT_DECL_SED m4_ifndef([AC_PROG_SED], [ ############################################################ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_SED. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # ############################################################ m4_defun([AC_PROG_SED], [AC_MSG_CHECKING([for a sed that does not truncate output]) AC_CACHE_VAL(lt_cv_path_SED, [# Loop through the user's path and test for sed and gsed. # Then use that list of sed's as ones to test for truncation. as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for lt_ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" fi done done done IFS=$as_save_IFS lt_ac_max=0 lt_ac_count=0 # Add /usr/xpg4/bin/sed as it is typically found on Solaris # along with /bin/sed that truncates output. for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do test ! -f $lt_ac_sed && continue cat /dev/null > conftest.in lt_ac_count=0 echo $ECHO_N "0123456789$ECHO_C" >conftest.in # Check for GNU sed and select it if it is found. if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then lt_cv_path_SED=$lt_ac_sed break fi while true; do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo >>conftest.nl $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break cmp -s conftest.out conftest.nl || break # 10000 chars as input seems more than enough test $lt_ac_count -gt 10 && break lt_ac_count=`expr $lt_ac_count + 1` if test $lt_ac_count -gt $lt_ac_max; then lt_ac_max=$lt_ac_count lt_cv_path_SED=$lt_ac_sed fi done done ]) SED=$lt_cv_path_SED AC_SUBST([SED]) AC_MSG_RESULT([$SED]) ])#AC_PROG_SED ])#m4_ifndef # Old name: AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_SED], []) # _LT_CHECK_SHELL_FEATURES # ------------------------ # Find out whether the shell is Bourne or XSI compatible, # or has some other useful features. m4_defun([_LT_CHECK_SHELL_FEATURES], [AC_MSG_CHECKING([whether the shell understands some XSI constructs]) # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes AC_MSG_RESULT([$xsi_shell]) _LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) AC_MSG_CHECKING([whether the shell understands "+="]) lt_shell_append=no ( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes AC_MSG_RESULT([$lt_shell_append]) _LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi _LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac _LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl ])# _LT_CHECK_SHELL_FEATURES # _LT_PROG_XSI_SHELLFNS # --------------------- # Bourne and XSI compatible variants of some useful shell functions. m4_defun([_LT_PROG_XSI_SHELLFNS], [case $xsi_shell in yes) cat << \_LT_EOF >> "$cfgfile" # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac } # func_basename file func_basename () { func_basename_result="${1##*/}" } # func_dirname_and_basename file append nondir_replacement # perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # Implementation must be kept synchronized with func_dirname # and func_basename. For efficiency, we do not delegate to # those functions but instead duplicate the functionality here. func_dirname_and_basename () { case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac func_basename_result="${1##*/}" } # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). func_stripname () { # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are # positional parameters, so assign one to ordinary parameter first. func_stripname_result=${3} func_stripname_result=${func_stripname_result#"${1}"} func_stripname_result=${func_stripname_result%"${2}"} } # func_opt_split func_opt_split () { func_opt_split_opt=${1%%=*} func_opt_split_arg=${1#*=} } # func_lo2o object func_lo2o () { case ${1} in *.lo) func_lo2o_result=${1%.lo}.${objext} ;; *) func_lo2o_result=${1} ;; esac } # func_xform libobj-or-source func_xform () { func_xform_result=${1%.*}.lo } # func_arith arithmetic-term... func_arith () { func_arith_result=$(( $[*] )) } # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=${#1} } _LT_EOF ;; *) # Bourne compatible functions. cat << \_LT_EOF >> "$cfgfile" # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { # Extract subdirectory from the argument. func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi } # func_basename file func_basename () { func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` } dnl func_dirname_and_basename dnl A portable version of this function is already defined in general.m4sh dnl so there is no need for it here. # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # func_strip_suffix prefix name func_stripname () { case ${2} in .*) func_stripname_result=`$ECHO "X${3}" \ | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "X${3}" \ | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;; esac } # sed scripts: my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q' my_sed_long_arg='1s/^-[[^=]]*=//' # func_opt_split func_opt_split () { func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"` func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"` } # func_lo2o object func_lo2o () { func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"` } # func_xform libobj-or-source func_xform () { func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[[^.]]*$/.lo/'` } # func_arith arithmetic-term... func_arith () { func_arith_result=`expr "$[@]"` } # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len` } _LT_EOF esac case $lt_shell_append in yes) cat << \_LT_EOF >> "$cfgfile" # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "$[1]+=\$[2]" } _LT_EOF ;; *) cat << \_LT_EOF >> "$cfgfile" # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "$[1]=\$$[1]\$[2]" } _LT_EOF ;; esac ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/nls.m40000644000000000000000000000013113214315702016547 xustar000000000000000029 mtime=1513200578.25974777 30 atime=1513200579.471762594 30 ctime=1513200658.595730311 nordugrid-arc-5.4.2/m4/nls.m40000644000175000002070000000350513214315702016621 0ustar00mockbuildmock00000000000000# nls.m4 serial 1 (gettext-0.12) dnl Copyright (C) 1995-2003 Free Software Foundation, Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2003. AC_DEFUN([AM_NLS], [ AC_MSG_CHECKING([whether NLS is requested]) dnl Default is enabled NLS AC_ARG_ENABLE(nls, [ --disable-nls do not use Native Language Support], USE_NLS=$enableval, USE_NLS=yes) AC_MSG_RESULT($USE_NLS) AC_SUBST(USE_NLS) ]) AC_DEFUN([AM_MKINSTALLDIRS], [ dnl If the AC_CONFIG_AUX_DIR macro for autoconf is used we possibly dnl find the mkinstalldirs script in another subdir but $(top_srcdir). dnl Try to locate it. MKINSTALLDIRS= if test -n "$ac_aux_dir"; then case "$ac_aux_dir" in /*) MKINSTALLDIRS="$ac_aux_dir/mkinstalldirs" ;; *) MKINSTALLDIRS="\$(top_builddir)/$ac_aux_dir/mkinstalldirs" ;; esac fi if test -z "$MKINSTALLDIRS"; then MKINSTALLDIRS="\$(top_srcdir)/mkinstalldirs" fi AC_SUBST(MKINSTALLDIRS) ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/iconv.m40000644000000000000000000000013213214315702017072 xustar000000000000000030 mtime=1513200578.146746388 30 atime=1513200579.477762667 30 ctime=1513200658.588730225 nordugrid-arc-5.4.2/m4/iconv.m40000644000175000002070000000665313214315702017152 0ustar00mockbuildmock00000000000000# iconv.m4 serial AM4 (gettext-0.11.3) dnl Copyright (C) 2000-2002 Free Software Foundation, Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl From Bruno Haible. AC_DEFUN([AM_ICONV_LINKFLAGS_BODY], [ dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV dnl accordingly. AC_LIB_LINKFLAGS_BODY([iconv]) ]) AC_DEFUN([AM_ICONV_LINK], [ dnl Some systems have iconv in libc, some have it in libiconv (OSF/1 and dnl those with the standalone portable GNU libiconv installed). dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV dnl accordingly. AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) dnl Add $INCICONV to CPPFLAGS before performing the following checks, dnl because if the user has installed libiconv and not disabled its use dnl via --without-libiconv-prefix, he wants to use it. The first dnl AC_TRY_LINK will then fail, the second AC_TRY_LINK will succeed. am_save_CPPFLAGS="$CPPFLAGS" AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCICONV]) AC_CACHE_CHECK(for iconv, am_cv_func_iconv, [ am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no AC_TRY_LINK([#include #include ], [iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd);], am_cv_func_iconv=yes) if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" AC_TRY_LINK([#include #include ], [iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd);], am_cv_lib_iconv=yes am_cv_func_iconv=yes) LIBS="$am_save_LIBS" fi ]) if test "$am_cv_func_iconv" = yes; then AC_DEFINE(HAVE_ICONV, 1, [Define if you have the iconv() function.]) fi if test "$am_cv_lib_iconv" = yes; then AC_MSG_CHECKING([how to link with libiconv]) AC_MSG_RESULT([$LIBICONV]) else dnl If $LIBICONV didn't lead to a usable library, we don't need $INCICONV dnl either. CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi AC_SUBST(LIBICONV) AC_SUBST(LTLIBICONV) ]) AC_DEFUN([AM_ICONV], [ AM_ICONV_LINK if test "$am_cv_func_iconv" = yes; then AC_MSG_CHECKING([for iconv declaration]) AC_CACHE_VAL(am_cv_proto_iconv, [ AC_TRY_COMPILE([ #include #include extern #ifdef __cplusplus "C" #endif #if defined(__STDC__) || defined(__cplusplus) size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); #else size_t iconv(); #endif ], [], am_cv_proto_iconv_arg1="", am_cv_proto_iconv_arg1="const") am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);"]) am_cv_proto_iconv=`echo "[$]am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` AC_MSG_RESULT([$]{ac_t:- }[$]am_cv_proto_iconv) AC_DEFINE_UNQUOTED(ICONV_CONST, $am_cv_proto_iconv_arg1, [Define as const if the declaration of iconv() needs const.]) fi ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/po.m40000644000000000000000000000013213214315702016372 xustar000000000000000030 mtime=1513200578.271747917 30 atime=1513200579.470762582 30 ctime=1513200658.596730323 nordugrid-arc-5.4.2/m4/po.m40000644000175000002070000002152013214315702016440 0ustar00mockbuildmock00000000000000# po.m4 serial 1 (gettext-0.12) dnl Copyright (C) 1995-2003 Free Software Foundation, Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2003. dnl Checks for all prerequisites of the po subdirectory. AC_DEFUN([AM_PO_SUBDIRS], [ AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl AC_REQUIRE([AM_MKINSTALLDIRS])dnl AC_REQUIRE([AM_NLS])dnl dnl Perform the following tests also if --disable-nls has been given, dnl because they are needed for "make dist" to work. dnl Search for GNU msgfmt in the PATH. dnl The first test excludes Solaris msgfmt and early GNU msgfmt versions. dnl The second test excludes FreeBSD msgfmt. AM_PATH_PROG_WITH_TEST(MSGFMT, msgfmt, [$ac_dir/$ac_word --statistics /dev/null >/dev/null 2>&1 && (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], :) AC_PATH_PROG(GMSGFMT, gmsgfmt, $MSGFMT) dnl Search for GNU xgettext 0.12 or newer in the PATH. dnl The first test excludes Solaris xgettext and early GNU xgettext versions. dnl The second test excludes FreeBSD xgettext. AM_PATH_PROG_WITH_TEST(XGETTEXT, xgettext, [$ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >/dev/null 2>&1 && (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], :) dnl Remove leftover from FreeBSD xgettext call. rm -f messages.po dnl Search for GNU msgmerge 0.11 or newer in the PATH. AM_PATH_PROG_WITH_TEST(MSGMERGE, msgmerge, [$ac_dir/$ac_word --update -q /dev/null /dev/null >/dev/null 2>&1], :) dnl This could go away some day; the PATH_PROG_WITH_TEST already does it. dnl Test whether we really found GNU msgfmt. if test "$GMSGFMT" != ":"; then dnl If it is no GNU msgfmt we define it as : so that the dnl Makefiles still can work. if $GMSGFMT --statistics /dev/null >/dev/null 2>&1 && (if $GMSGFMT --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then : ; else GMSGFMT=`echo "$GMSGFMT" | sed -e 's,^.*/,,'` AC_MSG_RESULT( [found $GMSGFMT program is not GNU msgfmt; ignore it]) GMSGFMT=":" fi fi dnl This could go away some day; the PATH_PROG_WITH_TEST already does it. dnl Test whether we really found GNU xgettext. if test "$XGETTEXT" != ":"; then dnl If it is no GNU xgettext we define it as : so that the dnl Makefiles still can work. if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >/dev/null 2>&1 && (if $XGETTEXT --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then : ; else AC_MSG_RESULT( [found xgettext program is not GNU xgettext; ignore it]) XGETTEXT=":" fi dnl Remove leftover from FreeBSD xgettext call. rm -f messages.po fi AC_OUTPUT_COMMANDS([ for ac_file in $CONFIG_FILES; do # Support "outfile[:infile[:infile...]]" case "$ac_file" in *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; esac # PO directories have a Makefile.in generated from Makefile.in.in. case "$ac_file" in */Makefile.in) # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then rm -f "$ac_dir/POTFILES" test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" POMAKEFILEDEPS="POTFILES.in" # ALL_LINGUAS, POFILES, GMOFILES, UPDATEPOFILES, DUMMYPOFILES depend # on $ac_dir but don't depend on user-specified configuration # parameters. if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then # The LINGUAS file contains the set of available languages. if test -n "$OBSOLETE_ALL_LINGUAS"; then test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" fi ALL_LINGUAS_=`sed -e "/^#/d" "$ac_given_srcdir/$ac_dir/LINGUAS"` # Hide the ALL_LINGUAS assigment from automake. eval 'ALL_LINGUAS''=$ALL_LINGUAS_' POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" else # The set of available languages was given in configure.in. eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS' fi case "$ac_given_srcdir" in .) srcdirpre= ;; *) srcdirpre='$(srcdir)/' ;; esac POFILES= GMOFILES= UPDATEPOFILES= DUMMYPOFILES= for lang in $ALL_LINGUAS; do POFILES="$POFILES $srcdirpre$lang.po" GMOFILES="$GMOFILES $srcdirpre$lang.gmo" UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" DUMMYPOFILES="$DUMMYPOFILES $lang.nop" done # CATALOGS depends on both $ac_dir and the user's LINGUAS # environment variable. INST_LINGUAS= if test -n "$ALL_LINGUAS"; then for presentlang in $ALL_LINGUAS; do useit=no if test "%UNSET%" != "$LINGUAS"; then desiredlanguages="$LINGUAS" else desiredlanguages="$ALL_LINGUAS" fi for desiredlang in $desiredlanguages; do # Use the presentlang catalog if desiredlang is # a. equal to presentlang, or # b. a variant of presentlang (because in this case, # presentlang can be used as a fallback for messages # which are not translated in the desiredlang catalog). case "$desiredlang" in "$presentlang"*) useit=yes;; esac done if test $useit = yes; then INST_LINGUAS="$INST_LINGUAS $presentlang" fi done fi CATALOGS= if test -n "$INST_LINGUAS"; then for lang in $INST_LINGUAS; do CATALOGS="$CATALOGS $lang.gmo" done fi test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do if test -f "$f"; then case "$f" in *.orig | *.bak | *~) ;; *) cat "$f" >> "$ac_dir/Makefile" ;; esac fi done fi ;; esac done], [# Capture the value of obsolete ALL_LINGUAS because we need it to compute # POFILES, GMOFILES, UPDATEPOFILES, DUMMYPOFILES, CATALOGS. But hide it # from automake. eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"' # Capture the value of LINGUAS because we need it to compute CATALOGS. LINGUAS="${LINGUAS-%UNSET%}" ]) ]) nordugrid-arc-5.4.2/m4/PaxHeaders.7502/ac_cxx_namespaces.m40000644000000000000000000000012710656572556021447 xustar000000000000000027 mtime=1186657646.676048 30 atime=1513200579.482762728 30 ctime=1513200658.582730152 nordugrid-arc-5.4.2/m4/ac_cxx_namespaces.m40000644000175000002070000000130610656572556021511 0ustar00mockbuildmock00000000000000dnl @synopsis AC_CXX_NAMESPACES dnl dnl If the compiler can prevent names clashes using namespaces, define dnl HAVE_NAMESPACES. dnl dnl @version $Id: ac_cxx_namespaces.m4 3830 2005-06-24 07:01:15Z waananen $ dnl @author Luc Maisonobe dnl AC_DEFUN([AC_CXX_NAMESPACES], [AC_CACHE_CHECK(whether the compiler implements namespaces, ac_cv_cxx_namespaces, [AC_LANG_SAVE AC_LANG_CPLUSPLUS AC_TRY_COMPILE([namespace Outer { namespace Inner { int i = 0; }}], [using namespace Outer::Inner; return i;], ac_cv_cxx_namespaces=yes, ac_cv_cxx_namespaces=no) AC_LANG_RESTORE ]) if test "$ac_cv_cxx_namespaces" = yes; then AC_DEFINE(HAVE_NAMESPACES,,[define if the compiler implements namespaces]) fi ]) nordugrid-arc-5.4.2/PaxHeaders.7502/depcomp0000644000000000000000000000013213214315736016553 xustar000000000000000030 mtime=1513200606.310090837 30 atime=1513200611.193150559 30 ctime=1513200658.616730568 nordugrid-arc-5.4.2/depcomp0000755000175000002070000004426713214315736016641 0ustar00mockbuildmock00000000000000#! /bin/sh # depcomp - compile a program generating dependencies as side-effects scriptversion=2009-04-28.21; # UTC # Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009 Free # Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Alexandre Oliva . case $1 in '') echo "$0: No command. Try \`$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: depcomp [--help] [--version] PROGRAM [ARGS] Run PROGRAMS ARGS to compile a file, generating dependencies as side-effects. Environment variables: depmode Dependency tracking mode. source Source file read by `PROGRAMS ARGS'. object Object file output by `PROGRAMS ARGS'. DEPDIR directory where to store dependencies. depfile Dependency file to output. tmpdepfile Temporary file to use when outputing dependencies. libtool Whether libtool is used (yes/no). Report bugs to . EOF exit $? ;; -v | --v*) echo "depcomp $scriptversion" exit $? ;; esac if test -z "$depmode" || test -z "$source" || test -z "$object"; then echo "depcomp: Variables source, object and depmode must be set" 1>&2 exit 1 fi # Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. depfile=${depfile-`echo "$object" | sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} rm -f "$tmpdepfile" # Some modes work just like other modes, but use different flags. We # parameterize here, but still list the modes in the big case below, # to make depend.m4 easier to write. Note that we *cannot* use a case # here, because this file can only contain one case statement. if test "$depmode" = hp; then # HP compiler uses -M and no extra arg. gccflag=-M depmode=gcc fi if test "$depmode" = dashXmstdout; then # This is just like dashmstdout with a different argument. dashmflag=-xM depmode=dashmstdout fi cygpath_u="cygpath -u -f -" if test "$depmode" = msvcmsys; then # This is just like msvisualcpp but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u="sed s,\\\\\\\\,/,g" depmode=msvisualcpp fi case "$depmode" in gcc3) ## gcc 3 implements dependency tracking that does exactly what ## we want. Yay! Note: for some reason libtool 1.4 doesn't like ## it if -MD -MP comes after the -MF stuff. Hmm. ## Unfortunately, FreeBSD c89 acceptance of flags depends upon ## the command line argument order; so add the flags where they ## appear in depend2.am. Note that the slowdown incurred here ## affects only configure: in makefiles, %FASTDEP% shortcuts this. for arg do case $arg in -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; *) set fnord "$@" "$arg" ;; esac shift # fnord shift # $arg done "$@" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi mv "$tmpdepfile" "$depfile" ;; gcc) ## There are various ways to get dependency output from gcc. Here's ## why we pick this rather obscure method: ## - Don't want to use -MD because we'd like the dependencies to end ## up in a subdir. Having to rename by hand is ugly. ## (We might end up doing this anyway to support other compilers.) ## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like ## -MM, not -M (despite what the docs say). ## - Using -M directly means running the compiler twice (even worse ## than renaming). if test -z "$gccflag"; then gccflag=-MD, fi "$@" -Wp,"$gccflag$tmpdepfile" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ## The second -e expression handles DOS-style file names with drive letters. sed -e 's/^[^:]*: / /' \ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" ## This next piece of magic avoids the `deleted header file' problem. ## The problem is that when a header file which appears in a .P file ## is deleted, the dependency causes make to die (because there is ## typically no way to rebuild the header). We avoid this by adding ## dummy dependencies for each header file. Too bad gcc doesn't do ## this for us directly. tr ' ' ' ' < "$tmpdepfile" | ## Some versions of gcc put a space before the `:'. On the theory ## that the space means something, we add a space to the output as ## well. ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; sgi) if test "$libtool" = yes; then "$@" "-Wp,-MDupdate,$tmpdepfile" else "$@" -MDupdate "$tmpdepfile" fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files echo "$object : \\" > "$depfile" # Clip off the initial element (the dependent). Don't try to be # clever and replace this with sed code, as IRIX sed won't handle # lines with more than a fixed number of characters (4096 in # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; # the IRIX cc adds comments like `#:fec' to the end of the # dependency line. tr ' ' ' ' < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \ tr ' ' ' ' >> "$depfile" echo >> "$depfile" # The second pass generates a dummy entry for each header file. tr ' ' ' ' < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ >> "$depfile" else # The sourcefile does not contain any dependencies, so just # store a dummy comment line, to avoid errors with the Makefile # "include basename.Plo" scheme. echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; aix) # The C for AIX Compiler uses -M and outputs the dependencies # in a .u file. In older versions, this file always lives in the # current directory. Also, the AIX compiler puts `$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then tmpdepfile1=$dir$base.u tmpdepfile2=$base.u tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else tmpdepfile1=$dir$base.u tmpdepfile2=$dir$base.u tmpdepfile3=$dir$base.u "$@" -M fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then # Each line is of the form `foo.o: dependent.h'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" # That's a tab and a space in the []. sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" else # The sourcefile does not contain any dependencies, so just # store a dummy comment line, to avoid errors with the Makefile # "include basename.Plo" scheme. echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; icc) # Intel's C compiler understands `-MD -MF file'. However on # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c # ICC 7.0 will fill foo.d with something like # foo.o: sub/foo.c # foo.o: sub/foo.h # which is wrong. We want: # sub/foo.o: sub/foo.c # sub/foo.o: sub/foo.h # sub/foo.c: # sub/foo.h: # ICC 7.1 will output # foo.o: sub/foo.c sub/foo.h # and will wrap long lines using \ : # foo.o: sub/foo.c ... \ # sub/foo.h ... \ # ... "$@" -MD -MF "$tmpdepfile" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each line is of the form `foo.o: dependent.h', # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this invocation # correctly. Breaking it into two sed invocations is a workaround. sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp2) # The "hp" stanza above does not work with aCC (C++) and HP's ia64 # compilers, which have integrated preprocessors. The correct option # to use with these is +Maked; it writes dependencies to a file named # 'foo.d', which lands next to the object file, wherever that # happens to be. # Much of this is similar to the tru64 case; see comments there. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then tmpdepfile1=$dir$base.d tmpdepfile2=$dir.libs/$base.d "$@" -Wc,+Maked else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d "$@" +Maked fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile" # Add `dependent.h:' lines. sed -ne '2,${ s/^ *// s/ \\*$// s/$/:/ p }' "$tmpdepfile" >> "$depfile" else echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" "$tmpdepfile2" ;; tru64) # The Tru64 compiler uses -MD to generate dependencies as a side # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'. # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put # dependencies in `foo.d' instead, so we check for that too. # Subdirectories are respected. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then # With Tru64 cc, shared objects can also be used to make a # static library. This mechanism is used in libtool 1.4 series to # handle both shared and static libraries in a single compilation. # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d. # # With libtool 1.5 this exception was removed, and libtool now # generates 2 separate objects for the 2 libraries. These two # compilations output dependencies in $dir.libs/$base.o.d and # in $dir$base.o.d. We have to check for both files, because # one of the two compilations can be disabled. We should prefer # $dir$base.o.d over $dir.libs/$base.o.d because the latter is # automatically cleaned when .libs/ is deleted, while ignoring # the former would cause a distcleancheck panic. tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4 tmpdepfile2=$dir$base.o.d # libtool 1.5 tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5 tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else tmpdepfile1=$dir$base.o.d tmpdepfile2=$dir$base.d tmpdepfile3=$dir$base.d tmpdepfile4=$dir$base.d "$@" -MD fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" # That's a tab and a space in the []. sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" else echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; #nosideeffect) # This comment above is used by automake to tell side-effect # dependency tracking mechanisms from slower ones. dashmstdout) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout, regardless of -o. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove `-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done test -z "$dashmflag" && dashmflag=-M # Require at least two characters before searching for `:' # in the target name. This is to cope with DOS-style filenames: # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise. "$@" $dashmflag | sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" tr ' ' ' ' < "$tmpdepfile" | \ ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; dashXmstdout) # This case only exists to satisfy depend.m4. It is never actually # run, as this mode is specially recognized in the preamble. exit 1 ;; makedepend) "$@" || exit $? # Remove any Libtool call if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # X makedepend shift cleared=no eat=no for arg do case $cleared in no) set ""; shift cleared=yes ;; esac if test $eat = yes; then eat=no continue fi case "$arg" in -D*|-I*) set fnord "$@" "$arg"; shift ;; # Strip any option that makedepend may not understand. Remove # the object too, otherwise makedepend will parse it as a source file. -arch) eat=yes ;; -*|$object) ;; *) set fnord "$@" "$arg"; shift ;; esac done obj_suffix=`echo "$object" | sed 's/^.*\././'` touch "$tmpdepfile" ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" sed '1,2d' "$tmpdepfile" | tr ' ' ' ' | \ ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" "$tmpdepfile".bak ;; cpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove `-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done "$@" -E | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" cat < "$tmpdepfile" >> "$depfile" sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; msvisualcpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi IFS=" " for arg do case "$arg" in -o) shift ;; $object) shift ;; "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") set fnord "$@" shift shift ;; *) set fnord "$@" "$arg" shift shift ;; esac done "$@" -E 2>/dev/null | sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile" echo " " >> "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" rm -f "$tmpdepfile" ;; msvcmsys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; none) exec "$@" ;; *) echo "Unknown depmode $depmode" 1>&2 exit 1 ;; esac exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: nordugrid-arc-5.4.2/PaxHeaders.7502/config.sub0000644000000000000000000000013213214315717017160 xustar000000000000000030 mtime=1513200591.875914301 30 atime=1513200617.101222817 30 ctime=1513200658.615730556 nordugrid-arc-5.4.2/config.sub0000755000175000002070000010316713214315717017241 0ustar00mockbuildmock00000000000000#! /bin/sh # Configuration validation subroutine script. # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, # 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 # Free Software Foundation, Inc. timestamp='2009-11-20' # This file is (in principle) common to ALL GNU software. # The presence of a machine in this file suggests that SOME GNU software # can handle that machine. It does not imply ALL GNU software can. # # This file is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA # 02110-1301, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Please send patches to . Submit a context # diff and a properly formatted GNU ChangeLog entry. # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS $0 [OPTION] ALIAS Canonicalize a configuration name. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" exit 1 ;; *local*) # First pass through any local machine types. echo $1 exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). # Here we must recognize all the valid KERNEL-OS combinations. maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \ uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \ kopensolaris*-gnu* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; *) basic_machine=`echo $1 | sed 's/-[^-]*$//'` if [ $basic_machine != $1 ] then os=`echo $1 | sed 's/.*-/-/'` else os=; fi ;; esac ### Let's recognize common machines as not being operating systems so ### that things like config.sub decstation-3100 work. We also ### recognize some manufacturers as not being operating systems, so we ### can provide default operating systems below. case $os in -sun*os*) # Prevent following clause from handling this invalid input. ;; -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ -apple | -axis | -knuth | -cray | -microblaze) os= basic_machine=$1 ;; -bluegene*) os=-cnk ;; -sim | -cisco | -oki | -wec | -winbond) os= basic_machine=$1 ;; -scout) ;; -wrs) os=-vxworks basic_machine=$1 ;; -chorusos*) os=-chorusos basic_machine=$1 ;; -chorusrdb) os=-chorusrdb basic_machine=$1 ;; -hiux*) os=-hiuxwe2 ;; -sco6) os=-sco5v6 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5) os=-sco3.2v5 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco4) os=-sco3.2v4 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2v[4-9]*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5v6*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco*) os=-sco3.2v2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -udk*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -isc) os=-isc2.2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -clix*) basic_machine=clipper-intergraph ;; -isc*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -lynx*) os=-lynxos ;; -ptx*) basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` ;; -windowsnt*) os=`echo $os | sed -e 's/windowsnt/winnt/'` ;; -psos*) os=-psos ;; -mint | -mint[0-9]*) basic_machine=m68k-atari os=-mint ;; esac # Decode aliases for certain CPU-COMPANY combinations. case $basic_machine in # Recognize the basic CPU types without company name. # Some are omitted here because they have special meanings below. 1750a | 580 \ | a29k \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ | bfin \ | c4x | clipper \ | d10v | d30v | dlx | dsp16xx \ | fido | fr30 | frv \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ | maxq | mb | microblaze | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ | mips64octeon | mips64octeonel \ | mips64orion | mips64orionel \ | mips64r5900 | mips64r5900el \ | mips64vr | mips64vrel \ | mips64vr4100 | mips64vr4100el \ | mips64vr4300 | mips64vr4300el \ | mips64vr5000 | mips64vr5000el \ | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nios | nios2 \ | ns16k | ns32k \ | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \ | pyramid \ | rx \ | score \ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ | spu | strongarm \ | tahoe | thumb | tic4x | tic80 | tron \ | ubicom32 \ | v850 | v850e \ | we32k \ | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown ;; m6811 | m68hc11 | m6812 | m68hc12 | picochip) # Motorola 68HC11/12. basic_machine=$basic_machine-unknown os=-none ;; m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) ;; ms1) basic_machine=mt-unknown ;; # We use `pc' rather than `unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) basic_machine=$basic_machine-pc ;; # Object if more than one company name word. *-*-*) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; # Recognize the basic CPU types with company name. 580-* \ | a29k-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ | mips64octeon-* | mips64octeonel-* \ | mips64orion-* | mips64orionel-* \ | mips64r5900-* | mips64r5900el-* \ | mips64vr-* | mips64vrel-* \ | mips64vr4100-* | mips64vr4100el-* \ | mips64vr4300-* | mips64vr4300el-* \ | mips64vr5000-* | mips64vr5000el-* \ | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nios-* | nios2-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ | pyramid-* \ | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \ | tahoe-* | thumb-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \ | tron-* \ | ubicom32-* \ | v850-* | v850e-* | vax-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \ | xstormy16-* | xtensa*-* \ | ymp-* \ | z8k-* | z80-*) ;; # Recognize the basic CPU types without company name, with glob match. xtensa*) basic_machine=$basic_machine-unknown ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 386bsd) basic_machine=i386-unknown os=-bsd ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; a29khif) basic_machine=a29k-amd os=-udi ;; abacus) basic_machine=abacus-unknown ;; adobe68k) basic_machine=m68010-adobe os=-scout ;; alliant | fx80) basic_machine=fx80-alliant ;; altos | altos3068) basic_machine=m68k-altos ;; am29k) basic_machine=a29k-none os=-bsd ;; amd64) basic_machine=x86_64-pc ;; amd64-*) basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; amdahl) basic_machine=580-amdahl os=-sysv ;; amiga | amiga-*) basic_machine=m68k-unknown ;; amigaos | amigados) basic_machine=m68k-unknown os=-amigaos ;; amigaunix | amix) basic_machine=m68k-unknown os=-sysv4 ;; apollo68) basic_machine=m68k-apollo os=-sysv ;; apollo68bsd) basic_machine=m68k-apollo os=-bsd ;; aros) basic_machine=i386-pc os=-aros ;; aux) basic_machine=m68k-apple os=-aux ;; balance) basic_machine=ns32k-sequent os=-dynix ;; blackfin) basic_machine=bfin-unknown os=-linux ;; blackfin-*) basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; bluegene*) basic_machine=powerpc-ibm os=-cnk ;; c90) basic_machine=c90-cray os=-unicos ;; cegcc) basic_machine=arm-unknown os=-cegcc ;; convex-c1) basic_machine=c1-convex os=-bsd ;; convex-c2) basic_machine=c2-convex os=-bsd ;; convex-c32) basic_machine=c32-convex os=-bsd ;; convex-c34) basic_machine=c34-convex os=-bsd ;; convex-c38) basic_machine=c38-convex os=-bsd ;; cray | j90) basic_machine=j90-cray os=-unicos ;; craynv) basic_machine=craynv-cray os=-unicosmp ;; cr16) basic_machine=cr16-unknown os=-elf ;; crds | unos) basic_machine=m68k-crds ;; crisv32 | crisv32-* | etraxfs*) basic_machine=crisv32-axis ;; cris | cris-* | etrax*) basic_machine=cris-axis ;; crx) basic_machine=crx-unknown os=-elf ;; da30 | da30-*) basic_machine=m68k-da30 ;; decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) basic_machine=mips-dec ;; decsystem10* | dec10*) basic_machine=pdp10-dec os=-tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec os=-tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; delta88) basic_machine=m88k-motorola os=-sysv3 ;; dicos) basic_machine=i686-pc os=-dicos ;; djgpp) basic_machine=i586-pc os=-msdosdjgpp ;; dpx20 | dpx20-*) basic_machine=rs6000-bull os=-bosx ;; dpx2* | dpx2*-bull) basic_machine=m68k-bull os=-sysv3 ;; ebmon29k) basic_machine=a29k-amd os=-ebmon ;; elxsi) basic_machine=elxsi-elxsi os=-bsd ;; encore | umax | mmax) basic_machine=ns32k-encore ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson os=-ose ;; fx2800) basic_machine=i860-alliant ;; genix) basic_machine=ns32k-ns ;; gmicro) basic_machine=tron-gmicro os=-sysv ;; go32) basic_machine=i386-pc os=-go32 ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; h8300hms) basic_machine=h8300-hitachi os=-hms ;; h8300xray) basic_machine=h8300-hitachi os=-xray ;; h8500hms) basic_machine=h8500-hitachi os=-hms ;; harris) basic_machine=m88k-harris os=-sysv3 ;; hp300-*) basic_machine=m68k-hp ;; hp300bsd) basic_machine=m68k-hp os=-bsd ;; hp300hpux) basic_machine=m68k-hp os=-hpux ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) basic_machine=m68000-hp ;; hp9k3[2-9][0-9]) basic_machine=m68k-hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) basic_machine=hppa1.1-hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) basic_machine=hppa1.1-hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; hppa-next) os=-nextstep3 ;; hppaosf) basic_machine=hppa1.1-hp os=-osf ;; hppro) basic_machine=hppa1.1-hp os=-proelf ;; i370-ibm* | ibm*) basic_machine=i370-ibm ;; # I'm not sure what "Sysv32" means. Should this be sysv3.2? i*86v32) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv32 ;; i*86v4*) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv4 ;; i*86v) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv ;; i*86sol2) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-solaris2 ;; i386mach) basic_machine=i386-mach os=-mach ;; i386-vsta | vsta) basic_machine=i386-unknown os=-vsta ;; iris | iris4d) basic_machine=mips-sgi case $os in -irix*) ;; *) os=-irix4 ;; esac ;; isi68 | isi) basic_machine=m68k-isi os=-sysv ;; m68knommu) basic_machine=m68k-unknown os=-linux ;; m68knommu-*) basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; m88k-omron*) basic_machine=m88k-omron ;; magnum | m3230) basic_machine=mips-mips os=-sysv ;; merlin) basic_machine=ns32k-utek os=-sysv ;; microblaze) basic_machine=microblaze-xilinx ;; mingw32) basic_machine=i386-pc os=-mingw32 ;; mingw32ce) basic_machine=arm-unknown os=-mingw32ce ;; miniframe) basic_machine=m68000-convergent ;; *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari os=-mint ;; mips3*-*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` ;; mips3*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown ;; monitor) basic_machine=m68k-rom68k os=-coff ;; morphos) basic_machine=powerpc-unknown os=-morphos ;; msdos) basic_machine=i386-pc os=-msdos ;; ms1-*) basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` ;; mvs) basic_machine=i370-ibm os=-mvs ;; ncr3000) basic_machine=i486-ncr os=-sysv4 ;; netbsd386) basic_machine=i386-unknown os=-netbsd ;; netwinder) basic_machine=armv4l-rebel os=-linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony os=-newsos ;; news1000) basic_machine=m68030-sony os=-newsos ;; news-3600 | risc-news) basic_machine=mips-sony os=-newsos ;; necv70) basic_machine=v70-nec os=-sysv ;; next | m*-next ) basic_machine=m68k-next case $os in -nextstep* ) ;; -ns2*) os=-nextstep2 ;; *) os=-nextstep3 ;; esac ;; nh3000) basic_machine=m68k-harris os=-cxux ;; nh[45]000) basic_machine=m88k-harris os=-cxux ;; nindy960) basic_machine=i960-intel os=-nindy ;; mon960) basic_machine=i960-intel os=-mon960 ;; nonstopux) basic_machine=mips-compaq os=-nonstopux ;; np1) basic_machine=np1-gould ;; nsr-tandem) basic_machine=nsr-tandem ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki os=-proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; os400) basic_machine=powerpc-ibm os=-os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=-ose ;; os68k) basic_machine=m68k-none os=-os68k ;; pa-hitachi) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; paragon) basic_machine=i860-intel os=-osf ;; parisc) basic_machine=hppa-unknown os=-linux ;; parisc-*) basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; pbd) basic_machine=sparc-tti ;; pbb) basic_machine=m68k-tti ;; pc532 | pc532-*) basic_machine=ns32k-pc532 ;; pc98) basic_machine=i386-pc ;; pc98-*) basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc ;; pentiumpro | p6 | 6x86 | athlon | athlon_*) basic_machine=i686-pc ;; pentiumii | pentium2 | pentiumiii | pentium3) basic_machine=i686-pc ;; pentium4) basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium4-*) basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould ;; power) basic_machine=power-ibm ;; ppc) basic_machine=powerpc-unknown ;; ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppcle | powerpclittle | ppc-le | powerpc-little) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64le | powerpc64little | ppc64-le | powerpc64-little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; pw32) basic_machine=i586-unknown os=-pw32 ;; rdos) basic_machine=i386-pc os=-rdos ;; rom68k) basic_machine=m68k-rom68k os=-coff ;; rm[46]00) basic_machine=mips-siemens ;; rtpc | rtpc-*) basic_machine=romp-ibm ;; s390 | s390-*) basic_machine=s390-ibm ;; s390x | s390x-*) basic_machine=s390x-ibm ;; sa29200) basic_machine=a29k-amd os=-udi ;; sb1) basic_machine=mipsisa64sb1-unknown ;; sb1el) basic_machine=mipsisa64sb1el-unknown ;; sde) basic_machine=mipsisa32-sde os=-elf ;; sei) basic_machine=mips-sei os=-seiux ;; sequent) basic_machine=i386-sequent ;; sh) basic_machine=sh-hitachi os=-hms ;; sh5el) basic_machine=sh5le-unknown ;; sh64) basic_machine=sh64-unknown ;; sparclite-wrs | simso-wrs) basic_machine=sparclite-wrs os=-vxworks ;; sps7) basic_machine=m68k-bull os=-sysv2 ;; spur) basic_machine=spur-unknown ;; st2000) basic_machine=m68k-tandem ;; stratus) basic_machine=i860-stratus os=-sysv4 ;; sun2) basic_machine=m68000-sun ;; sun2os3) basic_machine=m68000-sun os=-sunos3 ;; sun2os4) basic_machine=m68000-sun os=-sunos4 ;; sun3os3) basic_machine=m68k-sun os=-sunos3 ;; sun3os4) basic_machine=m68k-sun os=-sunos4 ;; sun4os3) basic_machine=sparc-sun os=-sunos3 ;; sun4os4) basic_machine=sparc-sun os=-sunos4 ;; sun4sol2) basic_machine=sparc-sun os=-solaris2 ;; sun3 | sun3-*) basic_machine=m68k-sun ;; sun4) basic_machine=sparc-sun ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun ;; sv1) basic_machine=sv1-cray os=-unicos ;; symmetry) basic_machine=i386-sequent os=-dynix ;; t3e) basic_machine=alphaev5-cray os=-unicos ;; t90) basic_machine=t90-cray os=-unicos ;; tic54x | c54x*) basic_machine=tic54x-unknown os=-coff ;; tic55x | c55x*) basic_machine=tic55x-unknown os=-coff ;; tic6x | c6x*) basic_machine=tic6x-unknown os=-coff ;; tile*) basic_machine=tile-unknown os=-linux-gnu ;; tx39) basic_machine=mipstx39-unknown ;; tx39el) basic_machine=mipstx39el-unknown ;; toad1) basic_machine=pdp10-xkl os=-tops20 ;; tower | tower-32) basic_machine=m68k-ncr ;; tpf) basic_machine=s390x-ibm os=-tpf ;; udi29k) basic_machine=a29k-amd os=-udi ;; ultra3) basic_machine=a29k-nyu os=-sym1 ;; v810 | necv810) basic_machine=v810-nec os=-none ;; vaxv) basic_machine=vax-dec os=-sysv ;; vms) basic_machine=vax-dec os=-vms ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; vxworks960) basic_machine=i960-wrs os=-vxworks ;; vxworks68) basic_machine=m68k-wrs os=-vxworks ;; vxworks29k) basic_machine=a29k-wrs os=-vxworks ;; w65*) basic_machine=w65-wdc os=-none ;; w89k-*) basic_machine=hppa1.1-winbond os=-proelf ;; xbox) basic_machine=i686-pc os=-mingw32 ;; xps | xps100) basic_machine=xps100-honeywell ;; ymp) basic_machine=ymp-cray os=-unicos ;; z8k-*-coff) basic_machine=z8k-unknown os=-sim ;; z80-*-coff) basic_machine=z80-unknown os=-sim ;; none) basic_machine=none-none os=-none ;; # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) basic_machine=hppa1.1-winbond ;; op50n) basic_machine=hppa1.1-oki ;; op60c) basic_machine=hppa1.1-oki ;; romp) basic_machine=romp-ibm ;; mmix) basic_machine=mmix-knuth ;; rs6000) basic_machine=rs6000-ibm ;; vax) basic_machine=vax-dec ;; pdp10) # there are many clones, so DEC is not a safe bet basic_machine=pdp10-unknown ;; pdp11) basic_machine=pdp11-dec ;; we32k) basic_machine=we32k-att ;; sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) basic_machine=sparc-sun ;; cydra) basic_machine=cydra-cydrome ;; orion) basic_machine=orion-highlevel ;; orion105) basic_machine=clipper-highlevel ;; mac | mpw | mac-mpw) basic_machine=m68k-apple ;; pmac | pmac-mpw) basic_machine=powerpc-apple ;; *-unknown) # Make sure to match an already-canonicalized machine name. ;; *) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` ;; *-commodore*) basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if [ x"$os" != x"" ] then case $os in # First match some system type aliases # that might get confused with valid system types. # -solaris* is a basic system type, with this one exception. -auroraux) os=-auroraux ;; -solaris1 | -solaris1.*) os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; -solaris) os=-solaris2 ;; -svr4*) os=-sysv4 ;; -unixware*) os=-sysv4.2uw ;; -gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; # First accept the basic system types. # The portable systems comes first. # Each alternative MUST END IN A *, to match a version number. # -sysv* is not here because it comes later, after sysvr4. -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ | -sym* | -kopensolaris* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ | -aos* | -aros* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ | -openbsd* | -solidbsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* \ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) case $basic_machine in x86-* | i*86-*) ;; *) os=-nto$os ;; esac ;; -nto-qnx*) ;; -nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) ;; -mac*) os=`echo $os | sed -e 's|mac|macos|'` ;; -linux-dietlibc) os=-linux-dietlibc ;; -linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; -sunos5*) os=`echo $os | sed -e 's|sunos5|solaris2|'` ;; -sunos6*) os=`echo $os | sed -e 's|sunos6|solaris3|'` ;; -opened*) os=-openedition ;; -os400*) os=-os400 ;; -wince*) os=-wince ;; -osfrose*) os=-osfrose ;; -osf*) os=-osf ;; -utek*) os=-bsd ;; -dynix*) os=-bsd ;; -acis*) os=-aos ;; -atheos*) os=-atheos ;; -syllable*) os=-syllable ;; -386bsd) os=-bsd ;; -ctix* | -uts*) os=-sysv ;; -nova*) os=-rtmk-nova ;; -ns2 ) os=-nextstep2 ;; -nsk*) os=-nsk ;; # Preserve the version number of sinix5. -sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; -sinix*) os=-sysv4 ;; -tpf*) os=-tpf ;; -triton*) os=-sysv3 ;; -oss*) os=-sysv3 ;; -svr4) os=-sysv4 ;; -svr3) os=-sysv3 ;; -sysvr4) os=-sysv4 ;; # This must come after -sysvr4. -sysv*) ;; -ose*) os=-ose ;; -es1800*) os=-ose ;; -xenix) os=-xenix ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) os=-mint ;; -aros*) os=-aros ;; -kaos*) os=-kaos ;; -zvmoe) os=-zvmoe ;; -dicos*) os=-dicos ;; -none) ;; *) # Get rid of the `-' at the beginning of $os. os=`echo $os | sed 's/[^-]*-//'` echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 exit 1 ;; esac else # Here we handle the default operating systems that come with various machines. # The value should be what the vendor currently ships out the door with their # machine or put another way, the most popular os provided with the machine. # Note that if you're going to try to match "-MANUFACTURER" here (say, # "-sun"), then you have to tell the case statement up towards the top # that MANUFACTURER isn't an operating system. Otherwise, code above # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. case $basic_machine in score-*) os=-elf ;; spu-*) os=-elf ;; *-acorn) os=-riscix1.2 ;; arm*-rebel) os=-linux ;; arm*-semi) os=-aout ;; c4x-* | tic4x-*) os=-coff ;; # This must come before the *-dec entry. pdp10-*) os=-tops20 ;; pdp11-*) os=-none ;; *-dec | vax-*) os=-ultrix4.2 ;; m68*-apollo) os=-domain ;; i386-sun) os=-sunos4.0.2 ;; m68000-sun) os=-sunos3 # This also exists in the configure program, but was not the # default. # os=-sunos4 ;; m68*-cisco) os=-aout ;; mep-*) os=-elf ;; mips*-cisco) os=-elf ;; mips*-*) os=-elf ;; or32-*) os=-coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=-sysv3 ;; sparc-* | *-sun) os=-sunos4.1.1 ;; *-be) os=-beos ;; *-haiku) os=-haiku ;; *-ibm) os=-aix ;; *-knuth) os=-mmixware ;; *-wec) os=-proelf ;; *-winbond) os=-proelf ;; *-oki) os=-proelf ;; *-hp) os=-hpux ;; *-hitachi) os=-hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=-sysv ;; *-cbm) os=-amigaos ;; *-dg) os=-dgux ;; *-dolphin) os=-sysv3 ;; m68k-ccur) os=-rtu ;; m88k-omron*) os=-luna ;; *-next ) os=-nextstep ;; *-sequent) os=-ptx ;; *-crds) os=-unos ;; *-ns) os=-genix ;; i370-*) os=-mvs ;; *-next) os=-nextstep3 ;; *-gould) os=-sysv ;; *-highlevel) os=-bsd ;; *-encore) os=-bsd ;; *-sgi) os=-irix ;; *-siemens) os=-sysv4 ;; *-masscomp) os=-rtu ;; f30[01]-fujitsu | f700-fujitsu) os=-uxpv ;; *-rom68k) os=-coff ;; *-*bug) os=-coff ;; *-apple) os=-macos ;; *-atari*) os=-mint ;; *) os=-none ;; esac fi # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. vendor=unknown case $basic_machine in *-unknown) case $os in -riscix*) vendor=acorn ;; -sunos*) vendor=sun ;; -cnk*|-aix*) vendor=ibm ;; -beos*) vendor=be ;; -hpux*) vendor=hp ;; -mpeix*) vendor=hp ;; -hiux*) vendor=hitachi ;; -unos*) vendor=crds ;; -dgux*) vendor=dg ;; -luna*) vendor=omron ;; -genix*) vendor=ns ;; -mvs* | -opened*) vendor=ibm ;; -os400*) vendor=ibm ;; -ptx*) vendor=sequent ;; -tpf*) vendor=ibm ;; -vxsim* | -vxworks* | -windiss*) vendor=wrs ;; -aux*) vendor=apple ;; -hms*) vendor=hitachi ;; -mpw* | -macos*) vendor=apple ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) vendor=atari ;; -vos*) vendor=stratus ;; esac basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` ;; esac echo $basic_machine$os exit # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: nordugrid-arc-5.4.2/PaxHeaders.7502/configure.ac0000644000000000000000000000012513153455031017464 xustar000000000000000027 mtime=1504598553.682268 29 atime=1513200577.68574075 29 ctime=1513200658.59973036 nordugrid-arc-5.4.2/configure.ac0000644000175000002070000030712613153455031017541 0ustar00mockbuildmock00000000000000# -*- Autoconf -*- # Process this file with autoconf to produce a configure script. AC_PREREQ(2.56) AC_INIT([nordugrid-arc],m4_normalize(m4_include(VERSION)),[http://bugzilla.nordugrid.org/]) dnl serial-tests is not recognized before 1.12, and required after 1.13 m4_define([serial_tests], [ m4_esyscmd([case `${AUTOMAKE:-automake} --version | head -n 1` in *1.11.*|*1.10.*|*1.9.*);; *) echo serial-tests;; esac]) ]) AM_INIT_AUTOMAKE([foreign 1.9 tar-pax] serial_tests) AC_CONFIG_SRCDIR([Makefile.am]) AC_CONFIG_HEADERS([config.h]) baseversion=`echo $VERSION | sed 's/[[^0-9.]].*//'` preversion=`echo $VERSION | sed 's/^[[0-9.]]*//'` if test "x$baseversion" = "x" ; then baseversion=$VERSION preversion="" fi if test "x$preversion" = "x" ; then fedorarelease="1" fedorasetupopts="-q" debianversion="$baseversion" else fedorarelease="0.$preversion" fedorasetupopts="-q -n %{name}-%{version}$preversion" debianversion="$baseversion~$preversion" fi AC_SUBST(baseversion) AC_SUBST(preversion) AC_SUBST(fedorarelease) AC_SUBST(fedorasetupopts) AC_SUBST(debianversion) # This macro was introduced in autoconf 2.57g? but we currently only require 2.56 m4_ifdef([AC_CONFIG_MACRO_DIR], [AC_CONFIG_MACRO_DIR([m4])]) m4_pattern_allow([AC_PATH_PROG]) m4_pattern_allow([AC_MSG_WARN]) AC_PROG_CXX AC_PROG_CC_STDC AC_PROG_CPP AC_GNU_SOURCE AC_PROG_AWK AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_MAKE_SET AC_DISABLE_STATIC AC_LIBTOOL_WIN32_DLL AM_PROG_LIBTOOL AC_CHECK_PROG(PERL, perl, perl, :) # EL-5 compatibility. $(mkdir_p) is now obsolete. test -n "$MKDIR_P" || MKDIR_P="$mkdir_p" AC_SUBST([MKDIR_P]) dnl --with-docdir since older autoconf (<2.60) does not support --docdir AC_ARG_WITH([docdir], AC_HELP_STRING([--with-docdir=DIR], [Install documentation in DIR [[default: ${datadir}/doc]]]), [case $withval in yes|no) AC_MSG_ERROR([Invalid DIR]) ;; *) docdir="$withval" ;; esac ], [ if test -z "$docdir"; then docdir='${datadir}/doc/${PACKAGE}' fi ]) AC_SUBST(docdir) # Use arc for "pkgdir" instead of nordugrid-arc (@PACKAGE@) pkgdatadir='${datadir}/arc' pkgincludedir='${includedir}/arc' pkglibdir='${libdir}/arc' pkglibexecdir='${libexecdir}/arc' AC_SUBST(pkgdatadir) AC_SUBST(pkgincludedir) AC_SUBST(pkglibdir) AC_SUBST(pkglibexecdir) ARC_API ARC_RELATIVE_PATHS AC_ARG_WITH(systemd-units-location, AC_HELP_STRING([--with-systemd-units-location=], [Location of the systemd unit files. [[None]]]), [ unitsdir="$withval" ], [ unitsdir= ] ) AM_CONDITIONAL([SYSTEMD_UNITS_ENABLED],[test "x$unitsdir" != "x"]) AC_MSG_RESULT($unitsdir) AC_SUBST(unitsdir) AC_ARG_WITH(sysv-scripts-location, AC_HELP_STRING([--with-sysv-scripts-location=], [Location of the SYSV init scripts. [[autodetect]]]), [ initddir="$withval" ], [ initddir= case "${host}" in *linux* | *kfreebsd* | *gnu* ) for i in init.d rc.d/init.d rc.d; do if test -d "/etc/$i" -a ! -h "/etc/$i" ; then initddir="$sysconfdir/$i" break fi done if test -z "$initddir"; then AC_MSG_WARN(could not find a suitable location for the SYSV init scripts - not installing) fi ;; esac ] ) AM_CONDITIONAL([SYSV_SCRIPTS_ENABLED],[test "x$unitsdir" = "x" && test "x$initddir" != "x"]) AC_MSG_RESULT($initddir) AC_SUBST(initddir) AC_ARG_WITH(cron-scripts-prefix, AC_HELP_STRING([--with-cron-scripts-prefix=], [Specify the location of the cron directory. [[SYSCONFDIR/cron.d]]]), [ cronddir="$withval" ], [ cronddir="$sysconfdir/cron.d" ] ) AC_SUBST(cronddir) # gettext AM_GNU_GETTEXT([external]) # Using Autoconf 2.60 or later you will get a warning during configure: # config.status: WARNING: 'po/Makefile.in.in' seems to ignore the --datarootdir setting # This warning can be removed by bumping the gettext version requirement below from 0.12 to at least 0.15 # See more: info Autoconf "Changed Directory Variables" AM_GNU_GETTEXT_VERSION([0.12]) [[ -r $srcdir/po/POTFILES.in ]] || touch $srcdir/po/POTFILES.in # Portable 64bit file offsets AC_SYS_LARGEFILE # pkg-config needed for many checks AC_PATH_TOOL(PKG_CONFIG, pkg-config, no) if test "x$PKG_CONFIG" = "xno"; then AC_MSG_ERROR([ *** pkg-config not found]) else pkgconfigdir=${libdir}/pkgconfig AC_SUBST(pkgconfigdir) fi # Default enable/disable switches # Features enables_ldap=yes enables_mysql=no enables_swig_python=yes enables_swig_java=yes # Features directly related to components enables_cppunit=yes enables_java=yes enables_junit=yes enables_python=yes enables_altpython=yes enables_pylint=yes enables_mock_dmc=no enables_gfal=no enables_s3=no enables_xrootd=yes enables_argus=no enables_xmlsec1=yes enables_dbjstore=yes enables_ldns=yes # Libraries and plugins # Currently no fine-grained choice is supported. # Also this variable is used to check if source # build is needed at all because no component can # be built without HED. enables_hed=yes # Services enables_a_rex_service=yes enables_gridftpd_service=yes enables_ldap_service=yes enables_giis_service=yes enables_cache_service=yes enables_datadelivery_service=yes enables_ldap_monitor=yes enables_ws_monitor=yes # Clients enables_compute_client=yes enables_credentials_client=yes enables_echo_client=yes enables_data_client=yes enables_jura_client=yes enables_saml_client=yes enables_wsrf_client=yes enables_unicore_client=no enables_emies_client=yes # Documentation enables_doc=yes # ACIX cache index enables_acix=yes # Handle group enable/disable switches AC_ARG_ENABLE(all, AC_HELP_STRING([--disable-all], [disables all buildable components. Can be overwritten with --enable-* for group or specific component. It is also possible to use --enable-all to overwrite defaults for most of components.]), [ enables_a_rex_service=$enableval enables_gridftpd_service=$enableval enables_ldap_service=$enableval enables_giis_service=$enableval enables_ldap_monitor=$enableval enables_ws_monitor=$enableval enables_cache_service=$enableval enables_datadelivery_service=$enableval enables_compute_client=$enableval enables_credentials_client=$enableval enables_echo_client=$enableval enables_data_client=$enableval enables_jura_client=$enableval enables_saml_client=$enableval enables_wsrf_client=$enableval enables_emies_client=$enableval enables_hed=$enableval enables_java=$enableval enables_junit=$enableval enables_python=$enableval enables_altpython=$enableval enables_pylint=$enableval enables_mock_dmc=$enableval enables_gfal=$enableval enables_s3=$enableval enables_xrootd=$enableval enables_xmlsec1=$enableval enables_argus=$enableval enables_cppunit=$enableval enables_doc=$enableval enables_acix=$enableval enables_dbjstore=$enableval enables_ldns=$enableval ], []) AC_ARG_ENABLE(all-clients, AC_HELP_STRING([--disable-all-clients], [disables all buildable client components. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-clients to overwrite defaults and --enable-all.]), [ enables_compute_client=$enableval enables_credentials_client=$enableval enables_echo_client=$enableval enables_data_client=$enableval enables_jura_client=$enableval enables_saml_client=$enableval enables_wsrf_client=$enableval enables_emies_client=$enableval enables_doc=$enableval ], []) AC_ARG_ENABLE(all-data-clients, AC_HELP_STRING([--disable-all-data-clients], [disables all buildable client components providing data handling abilities. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-data-clients to overwrite defaults, --enable-all and --enable-all-clients.]), [ enables_data_client=$enableval ], []) AC_ARG_ENABLE(all-services, AC_HELP_STRING([--disable-all-services], [disables all buildable service componets. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-services to overwrite defaults and --enable-all.]), [ enables_a_rex_service=$enableval enables_gridftpd_service=$enableval enables_ldap_service=$enableval enables_giis_service=$enableval enables_ldap_monitor=$enableval enables_cache_service=$enableval enables_datadelivery_service=$enableval enables_acix=$enableval ], []) # Be pedantic about compiler warnings. AC_ARG_ENABLE(pedantic-compile, AC_HELP_STRING([--enable-pedantic-compile], [add pedantic compiler flags]), [enables_pedantic_compile="yes"], [enables_pedantic_compile="no"]) if test "x$enables_pedantic_compile" = "xyes"; then # This check need to be enhanced. It won't work in case of cross-compilation # and if path to compiler is explicitly specified. if test x"$CXX" = x"g++"; then # GNU C/C++ flags AM_CXXFLAGS="-Wall -Wextra -Werror -Wno-sign-compare -Wno-unused" SAVE_CPPFLAGS=$CPPFLAGS AC_LANG_SAVE AC_LANG_CPLUSPLUS CPPFLAGS="$CPPFLAGS -Wno-unused-result" AC_TRY_COMPILE([],[], [ AM_CXXFLAGS="$AM_CXXFLAGS -Wno-unused-result" ], [ AC_MSG_NOTICE([compilation flag -Wno-unused-result is not supported]) ] ) AC_LANG_RESTORE CPPFLAGS=$SAVE_CPPFLAGS else # TODO: set generic flags for generic compiler AM_CXXFLAGS="" fi AC_SUBST(AM_CXXFLAGS) fi AM_CONDITIONAL([PEDANTIC_COMPILE], [test "x$enables_pedantic_compile" = "xyes"]) # Enable/disable switches for third-party. # Swig AC_ARG_ENABLE(swig-python, AC_HELP_STRING([--disable-swig-python], [disable SWIG python bindings]), [enables_swig_python=$enableval],[]) AC_ARG_ENABLE(swig-java, AC_HELP_STRING([--disable-swig-java], [disable SWIG java bindings]), [enables_swig_java=$enableval],[]) AC_ARG_ENABLE(swig, AC_HELP_STRING([--disable-swig], [disable all bindings through SWIG]), [enables_swig_python=$enableval enables_swig_java=$enableval],[]) if test "$enables_swig_python" = "yes" || test "$enables_swig_java" = "yes"; then AC_CHECK_PROGS(SWIG, swig) if test "x$SWIG" = "x"; then enables_swig="no" else swigver=`$SWIG -version 2>&1 | grep Version | sed 's/.* //'` swigver1=`echo $swigver | cut -d. -f1` swigver2=`echo $swigver | cut -d. -f2` swigver3=`echo $swigver | cut -d. -f3` if test $swigver1 -lt 1 || ( test $swigver1 -eq 1 && ( \ test $swigver2 -lt 3 || ( test $swigver2 -eq 3 && ( \ test $swigver3 -lt 25 ) ) ) ) ; then AC_MSG_NOTICE([swig is too old (< 1.3.25)]) SWIG="" enables_swig="no" elif test $swigver1 -eq 1 && test $swigver2 -eq 3 && test $swigver3 -eq 38 ; then AC_MSG_NOTICE([swig version 1.3.38 has bug which prevents it from being used for this software. Please upgrade or downgrade.]) SWIG="" enables_swig="no" else SWIG2="no" if test $swigver1 -ge 2 then SWIG2="yes" fi AC_SUBST(SWIG2) SWIG_PYTHON_NAMING="SwigPy" # In SWIG version 1.3.37 naming was changed from "PySwig" to "SwigPy". if test $swigver1 -lt 1 || ( test $swigver1 -eq 1 && ( \ test $swigver2 -lt 3 || ( test $swigver2 -eq 3 && ( \ test $swigver3 -lt 37 ) ) ) ) ; then SWIG_PYTHON_NAMING="PySwig" fi AC_SUBST(SWIG_PYTHON_NAMING) fi fi else SWIG="" fi AM_CONDITIONAL([SWIG_ENABLED],[test "x$enables_swig" = "xyes"]) AC_ARG_ENABLE(hed, AC_HELP_STRING([--disable-hed], [disable building HED libraries and plugins. Do not do that unless You do not want to build anything. Even in that case better use --disable-all.]), [enables_hed=$enableval],[]) # Java dnl Check if Java is explicitly disabled. if test "$enables_hed" = "yes"; then JAVAC_FLAGS= JDK_CFLAGS= AC_ARG_ENABLE(java, AC_HELP_STRING([--disable-java], [disable Java components]), [enables_java=$enableval enables_swig_java=$enableval],[]) if test "$enables_java" = "yes"; then AC_ARG_WITH(jdk, AC_HELP_STRING([--with-jdk=(JDK)], [path to JDK. If unset the system JDK will be used])) JPATH= if test "x$with_jdk" != "x"; then # User specified JDK! JPATH=$with_jdk else # Look for system JDK. for i in /usr/lib/jvm/java-*-openjdk* /usr/lib/jvm/java-*-icedtea* /usr/lib/jvm/java-*-gcj* /usr/lib/jvm/java-*-sun* /usr/lib/gcc/*-redhat-linux/*; do if test -f $i/include/jni.h; then JPATH=$i break fi done fi if test "x$JPATH" != "x"; then JDK_CFLAGS="-I$JPATH/include" # Any extra includes? Look for them. JAVA_EXTRA_INCLUDE= case "${host}" in *-pc-mingw32 | *-pc-cygwin) JAVA_EXTRA_INCLUDE="win32" ;; *linux* | *kfreebsd* | *gnu* ) JAVA_EXTRA_INCLUDE="linux" ;; *solaris*) JAVA_EXTRA_INCLUDE="solaris" ;; esac if test "x$JAVA_EXTRA_INCLUDE" != "x" && test -d $JPATH/include/$JAVA_EXTRA_INCLUDE; then JDK_CFLAGS="$JDK_CFLAGS $JDK_CFLAGS/$JAVA_EXTRA_INCLUDE" fi SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $JDK_CFLAGS" AC_CHECK_HEADERS(jni.h,[jni_h="yes"],[jni_h="no"]) CPPFLAGS=$SAVE_CPPFLAGS AC_PATH_PROGS(JAVA, java gij, , $JPATH/bin:/usr/bin) AC_PATH_PROGS(JAVAC, javac gcj ecj, , $JPATH/bin:/usr/bin) AC_PATH_PROGS(JAR, fastjar jar, , $JPATH/bin:/usr/bin) break fi if test "x$with_jdk" = "x" && test "x$JDK_CFLAGS" = "x"; then # If JDK was not found in any of the above locations try system locations. AC_CHECK_HEADERS(jni.h JavaVM/jni.h,[jni_h="yes"; break],[jni_h="no"]) AC_CHECK_PROGS(JAVA, java gij) AC_CHECK_PROGS(JAVAC, javac gcj ecj) AC_CHECK_PROGS(JAR, fastjar jar) fi # Set absolute limit on Java heap size allocation instead of letting # the java virtual machine and compiler (on Linux) allocate a quarter # of the total memory. JAVA_VM_FLAGS="-Xms50M -Xmx50M" JAVAC_VM_FLAGS="-J-Xms50M -J-Xmx50M" JAR_JFLAGS="" # Old versions of fastjar does not ignore -J options if test "x$JAR" != "x" && test "x`basename $JAR`" = "xjar"; then JAR_JFLAGS="$JAVAC_VM_FLAGS" fi if test "x$JAVAC" != "x" && test "x`basename $JAVAC`" = "xgcj"; then JAVAC_FLAGS="-C -fsource=1.5 -ftarget=1.5 --classpath=." elif test "x$JAVAC" != "x" && test "x`basename $JAVAC`" = "xjavac"; then JAVAC_FLAGS="$JAVAC_VM_FLAGS -source 1.5 -target 1.5 -cp ." fi if test "x$JAVA" != "x" && test "x`basename $JAVA`" = "xjava"; then JAVA_FLAGS="$JAVA_VM_FLAGS" fi # Check if version is 1.5 (Iterable was first introduced in 1.5) echo "public abstract class TestIterable implements Iterable< Object > {}" > TestIterable.java if $JAVAC $JAVAC_FLAGS TestIterable.java >/dev/null 2>&1; then java_is_15_or_above="yes" fi rm -f TestIterable.java TestIterable.class if test "x$JAVAC" = "x"; then AC_MSG_NOTICE([Missing Java compiler - skipping Java components]) enables_java="no" elif test "x$JAR" = "x"; then AC_MSG_NOTICE([Missing Java archiver - skipping Java components]) enables_java="no" elif test "x$jni_h" != "xyes"; then AC_MSG_NOTICE([Missing Java headers - skipping Java components]) enables_java="no" else AC_MSG_NOTICE([Java available: $JAVAC]) fi if test "x$enables_java" != "xyes"; then AC_MSG_NOTICE([Missing Java - skipping Java bindings]) enables_swig_java="no" elif ! test -f java/arc_wrap.cpp && test "x$enables_swig_java" != "xyes"; then AC_MSG_NOTICE([Missing pre-compiled Java wrapper and SWIG - skipping Java bindings]) enables_swig_java="no" fi fi fi AC_MSG_NOTICE([Java enabled: $enables_java]) AC_MSG_NOTICE([Java SWIG binding enabled: $enables_swig_java]) AM_CONDITIONAL([JAVA_ENABLED],[test "x$enables_java" = "xyes"]) AM_CONDITIONAL([JAVA_SWIG_ENABLED],[test "x$enables_swig_java" = "xyes"]) AM_CONDITIONAL([JAVA_IS_15_OR_ABOVE], [test "x$java_is_15_or_above" = "xyes"]) AC_SUBST(JAVA) AC_SUBST(JAR_JFLAGS) AC_SUBST(JAVA_FLAGS) AC_SUBST(JAVAC_FLAGS) AC_SUBST(JDK_CFLAGS) AC_ARG_WITH([jnidir], AC_HELP_STRING([--with-jnidir=DIR], [Install jar in DIR [[default: ${libdir}/java]]]), [case $withval in yes|no) AC_MSG_ERROR([Invalid DIR]) ;; *) jnidir="$withval" ;; esac ], [ if test -z "$jnidir"; then jnidir='${libdir}/java' fi ]) AC_SUBST(jnidir) AC_ARG_WITH([jninativedir], AC_HELP_STRING([--with-jninativedir=DIR], [Install jar in DIR [[default: ${pkglibdir}]]]), [case $withval in yes|no) AC_MSG_ERROR([Invalid DIR]) ;; *) jninativedir="$withval" ;; esac ], [ if test -z "$jninativedir"; then jninativedir='${pkglibdir}' fi ]) AC_SUBST(jninativedir) # Try to find junit - used for unit testing of Java bindings. dnl Check if JUnit is explicitly disabled. if test "$enables_hed" = "yes"; then AC_ARG_ENABLE(junit, AC_HELP_STRING([--disable-junit], [disable unit testing of Java bindings]), [enables_junit=$enableval],[]) if test "$enables_java" = "yes" && test "$enables_junit" = "yes"; then if ! test -f /usr/share/java/junit.jar; then enables_junit="no" fi else enables_junit="no" fi fi AM_CONDITIONAL([JUNIT_ENABLED], [test "x$enables_junit" = "xyes"]) AC_MSG_NOTICE([Java unit testing enabled: $enables_junit]) # Python if test "$enables_hed" = "yes"; then AC_ARG_ENABLE(python, AC_HELP_STRING([--disable-python], [disable Python components]), [enables_python=$enableval enables_swig_python=$enableval], []) if test "$enables_python" = "yes"; then AC_ARG_WITH(python, AC_HELP_STRING([--with-python=(PYTHON)], [specify python program from PATH])) # We do not look for python binary when cross-compiling # but we need to make the variable non-empty if test "${build}" = "${host}"; then AC_CHECK_PROGS(PYTHON, $with_python python) else PYTHON=python fi if test "X$PYTHON" != "X"; then PYNAME=`basename $PYTHON` PKG_CHECK_MODULES(PYTHON, $PYNAME, [ PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` ],[ PYTHON_VERSION=`$PYTHON -c 'import sys; print(sys.version[[:3]])'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[[0]])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" AC_CHECK_LIB([python$PYTHON_VERSION], [Py_Initialize],[ AC_MSG_NOTICE([No additional path to python library needed]) PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS"],[ LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value AC_CHECK_LIB([python$PYTHON_VERSION], [Py_Finalize],[ AC_MSG_NOTICE([Adding path to python library]) PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS"],[ PYTHON_LIBS=""])]) LDFLAGS=$SAVE_LDFLAGS ]) AC_SUBST(PYTHON_VERSION) AC_SUBST(PYTHON_CFLAGS) AC_SUBST(PYTHON_LIBS) if test "${build}" = "${host}"; then PYTHON_SOABI=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SOABI'))" | sed s/None//` else PYTHON_SOABI="" fi AC_SUBST(PYTHON_SOABI) AC_ARG_WITH(python-site-arch, AC_HELP_STRING([--with-python-site-arch=directory], [Direcory where Python modules will be installed - defaults is to query the Python binary])) if test "X$PYTHON_SITE_ARCH" = "X"; then if test "${build}" = "${host}"; then PYTHON_SITE_ARCH=`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(1,0,"${prefix}"))'` else PYTHON_SITE_ARCH="${libdir}/python${PYTHON_VERSION}/site-packages" fi fi AC_SUBST(PYTHON_SITE_ARCH) AC_ARG_WITH(python-site-lib, AC_HELP_STRING([--with-python-site-lib=directory], [Direcory where Python modules will be installed - defaults is to query the Python binary])) if test "X$PYTHON_SITE_LIB" = "X"; then if test "${build}" = "${host}"; then PYTHON_SITE_LIB=`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0,0,"${prefix}"))'` else PYTHON_SITE_LIB="${libdir}/python${PYTHON_VERSION}/site-packages" fi fi AC_SUBST(PYTHON_SITE_LIB) SAVE_LDFLAGS=$LDFLAGS SAVE_CPPFLAGS=$CPPFLAGS LDFLAGS="$LDFLAGS $PYTHON_LIBS" CPPFLAGS="$CPPFLAGS $PYTHON_CFLAGS" AC_CHECK_HEADER(Python.h, [pythonh="yes"], [pythonh="no"]) AC_TRY_COMPILE([#include ], [Py_InitializeEx(0)],[ AC_MSG_NOTICE([Python includes functionality of skipping initialization registration of signal handlers]) AC_DEFINE(HAVE_PYTHON_INITIALIZE_EX,,[Define if you have Py_InitializeEx function]) enables_python_service="yes" ],[ AC_MSG_NOTICE([Python does not include functionality of skipping initialization registration of signal handlers, since its version is below 2.4]) enables_python_service="no" ]) LDFLAGS=$SAVE_LDFLAGS CPPFLAGS=$SAVE_CPPFLAGS fi if test "X$PYTHON" = "X"; then AC_MSG_NOTICE([Missing Python - skipping Python components]) enables_python=no elif test "X$PYTHON_SITE_ARCH" = "X" || test "X$PYTHON_SITE_LIB" = "X"; then AC_MSG_NOTICE([Missing python site packages location - skipping Python components]) enables_python=no else AC_MSG_NOTICE([Python available: $PYTHON_VERSION]) fi if test "x$enables_python" != "xyes"; then AC_MSG_NOTICE([Missing Python - skipping Python bindings]) enables_swig_python=no elif test "X$PYTHON_LIBS" = "X"; then AC_MSG_NOTICE([Missing Python library - skipping Python bindings]) enables_swig_python=no elif test "X$pythonh" != "Xyes"; then AC_MSG_NOTICE([Missing Python header - skipping Python bindings]) enables_swig_python=no elif ! test -f python/arc_wrap.cpp && test "x$enables_swig_python" != "xyes"; then AC_MSG_NOTICE([Missing pre-compiled Python wrapper and SWIG - skipping Python bindings]) enables_swig_python=no fi fi fi AC_MSG_NOTICE([Python enabled: $enables_python]) AC_MSG_NOTICE([Python SWIG bindings enabled: $enables_swig_python]) AM_CONDITIONAL([PYTHON_ENABLED],[test "x$enables_python" = "xyes"]) AM_CONDITIONAL([PYTHON3], [test "x$enables_python" = "xyes" && test "x$PYTHON_MAJOR" = "x3"]) AM_CONDITIONAL([PYTHON_SWIG_ENABLED],[test "x$enables_swig_python" = "xyes"]) AM_CONDITIONAL([PYTHON_SERVICE],[test "x$enables_swig_python" = "xyes" && test "x$enables_python_service" = "xyes"]) # Alternative Python if test "$enables_hed" = "yes"; then AC_ARG_ENABLE(altpython, AC_HELP_STRING([--disable-altpython], [enable alternative Python binding]), [enables_altpython=$enableval], []) if test "$enables_altpython" = "yes"; then AC_ARG_WITH(altpython, AC_HELP_STRING([--with-altpython=(PYTHON)], [specify alternative python program from PATH])) AC_CHECK_PROGS(ALTPYTHON, $with_altpython) if test "X$ALTPYTHON" != "X"; then ALTPYNAME=`basename $ALTPYTHON` PKG_CHECK_MODULES(ALTPYTHON, $ALTPYNAME, [ ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` ],[ ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(sys.version[[:3]])'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[[0]])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" AC_CHECK_LIB([python$ALTPYTHON_VERSION], [Py_Initialize],[ AC_MSG_NOTICE([No additional path to python library needed]) ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS"],[ LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value AC_CHECK_LIB([python$ALTPYTHON_VERSION], [Py_Finalize],[ AC_MSG_NOTICE([Adding path to python library]) ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS"],[ ALTPYTHON_LIBS=""])]) LDFLAGS=$SAVE_LDFLAGS ]) AC_SUBST(ALTPYTHON_VERSION) AC_SUBST(ALTPYTHON_CFLAGS) AC_SUBST(ALTPYTHON_LIBS) ALTPYTHON_SOABI=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SOABI'))" | sed s/None//` AC_SUBST(ALTPYTHON_SOABI) AC_ARG_WITH(altpython-site-arch, AC_HELP_STRING([--with-altpython-site-arch=directory], [Direcory where Python modules will be installed - defaults is to query the Python binary])) if test "X$ALTPYTHON_SITE_ARCH" = "X"; then ALTPYTHON_SITE_ARCH=`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(1,0,"${prefix}"))'` fi AC_SUBST(ALTPYTHON_SITE_ARCH) AC_ARG_WITH(altpython-site-lib, AC_HELP_STRING([--with-altpython-site-lib=directory], [Direcory where Python modules will be installed - defaults is to query the Python binary])) if test "X$ALTPYTHON_SITE_LIB" = "X"; then ALTPYTHON_SITE_LIB=`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0,0,"${prefix}"))'` fi AC_SUBST(ALTPYTHON_SITE_LIB) SAVE_LDFLAGS=$LDFLAGS SAVE_CPPFLAGS=$CPPFLAGS LDFLAGS="$LDFLAGS $ALTPYTHON_LIBS" CPPFLAGS="$CPPFLAGS $ALTPYTHON_CFLAGS" AC_CHECK_HEADER(Python.h, [altpythonh="yes"], [altpythonh="no"]) LDFLAGS=$SAVE_LDFLAGS CPPFLAGS=$SAVE_CPPFLAGS fi if test "X$ALTPYTHON" = "X"; then AC_MSG_NOTICE([Missing alternative Python - skipping alternative Python]) enables_altpython=no elif test "X$ALTPYTHON_LIBS" = "X"; then AC_MSG_NOTICE([Missing alternative Python library - skipping alternative Python bindings]) enables_altpython=no elif test "X$altpythonh" != "Xyes"; then AC_MSG_NOTICE([Missing alternative Python header - skipping alternative Python bindings]) enables_altpython=no elif test "X$ALTPYTHON_SITE_ARCH" = "X" || test "X$ALTPYTHON_SITE_LIB" = "X"; then AC_MSG_NOTICE([Missing python site packages location - skipping Python bindings]) enables_altpython=no else AC_MSG_NOTICE([Alternative Python available: $ALTPYTHON_VERSION]) fi if test "x$enables_altpython" != "xyes"; then AC_MSG_NOTICE([Missing alternative Python - skipping alternative Python bindings]) enables_altpython=no elif ! test -f python/arc_wrap.cpp && test "x$enables_swig_python" != "xyes"; then AC_MSG_NOTICE([Missing pre-compiled Python wrapper and SWIG - skipping alternative Python bindings]) enables_altpython=no fi fi fi AC_MSG_NOTICE([Alternative Python enabled: $enables_altpython]) AM_CONDITIONAL([ALTPYTHON_ENABLED],[test "x$enables_altpython" = "xyes"]) AM_CONDITIONAL([ALTPYTHON3], [test "x$enables_altpython" = "xyes" && test "x$ALTPYTHON_MAJOR" = "x3"]) # check for pylint dnl Check if pylint is explicitly disabled. if test "$enables_hed" = "yes"; then AC_ARG_ENABLE(pylint, AC_HELP_STRING([--disable-pylint], [disable python example checking using pylint]), [enables_pylint=$enableval],[]) AC_CHECK_PROGS(PYLINT, pylint) if test "x$PYLINT" = "x"; then enables_pylint="no" else PYLINT_VERSION=`$PYLINT --version 2> /dev/null | sed -n 's/^pylint \([[0-9.]]*\).*/\1/p'` # Check if pylint supports the following arguments, otherwise disable pylint (python example checking). # Do not generate report # Disable convention and recommendation messages - we are only interested in fatals, errors and warnings. PYLINT_ARGS="--reports=no --disable=C,R" if $PYLINT $PYLINT_ARGS /dev/null > /dev/null 2>&1 ; then AC_MSG_NOTICE([pylint version $PYLINT_VERSION found - version ok]) enables_pylint="yes" else AC_MSG_NOTICE([pylint version $PYLINT_VERSION found - bad version]) enables_pylint="no" PYLINT_ARGS="" fi AC_SUBST(PYLINT_ARGS) fi # Check if the --disable=W0221 option is supported # W0221: Disable arguments differ messages since Swig uses tuple syntax (*args). if test "$enables_pylint" = "yes"; then PYLINT_ARGS_ARGUMENTS_DIFFER="--disable=W0221" if ! $PYLINT $PYLINT_ARGS $PYLINT_ARGS_ARGUMENTS_DIFFER /dev/null > /dev/null 2>&1 ; then PYLINT_ARGS_ARGUMENTS_DIFFER="" fi AC_SUBST(PYLINT_ARGS_ARGUMENTS_DIFFER) fi fi AM_CONDITIONAL([PYLINT_ENABLED], [test "x$enables_pylint" = "xyes"]) AC_MSG_NOTICE([Python example checking with pylint enabled: $enables_pylint]) # check gthread if test "$enables_hed" = "yes"; then PKG_CHECK_MODULES(GTHREAD, [gthread-2.0 >= 2.4.7]) AC_SUBST(GTHREAD_CFLAGS) AC_SUBST(GTHREAD_LIBS) fi # check glibmm # check for giomm which became a part of glibmm as of version 2.16 if test "$enables_hed" = "yes"; then "$PKG_CONFIG" giomm-2.4 if test "$?" = '1'; then PKG_CHECK_MODULES(GLIBMM, [glibmm-2.4 >= 2.4.7]) else PKG_CHECK_MODULES(GLIBMM, [giomm-2.4]) AC_DEFINE(HAVE_GIOMM,, [define if giomm is supported in glibmm]) fi AC_SUBST(GLIBMM_CFLAGS) AC_SUBST(GLIBMM_LIBS) SAVE_CPPFLAGS=$CPPFLAGS AC_LANG_SAVE AC_LANG_CPLUSPLUS CPPFLAGS="$CPPFLAGS $GLIBMM_CFLAGS" AC_CHECK_HEADER([glibmm/optioncontext.h], [ AC_TRY_COMPILE([#include ], [Glib::OptionContext ctx; ctx.set_summary("summary")], [ AC_DEFINE(HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY,, [define if glibmm has Glib::OptionContext::set_summary()]) AC_MSG_NOTICE([using glibmm command line parsing]) ], [ AC_MSG_NOTICE([using getopt_long command line parsing]) ] ) AC_TRY_COMPILE([#include ], [Glib::OptionContext ctx; ctx.get_help();],[ AC_DEFINE(HAVE_GLIBMM_OPTIONCONTEXT_GET_HELP,, [define if glibmm has Glib::OptionContext::get_help()]) ], [ ] ) ]) AC_TRY_COMPILE([#include ],[Glib::SignalChildWatch watch = Glib::signal_child_watch();],[glibmm_childwatch=yes],[glibmm_childwatch=no]) if test "$glibmm_childwatch" = yes; then AC_DEFINE(HAVE_GLIBMM_CHILDWATCH,,[define if glibmm have support for controling state of children processes]) else AC_MSG_NOTICE([WARNING: glibmm has no API for controlling children processes - result of external processes may be inconsistent]) fi AC_TRY_COMPILE([#include ],[Glib::ModuleFlags flags = Glib::MODULE_BIND_LOCAL;],[glibmm_bind_local=yes],[glibmm_bind_local=no]) if test "$glibmm_bind_local" = yes; then AC_DEFINE(HAVE_GLIBMM_BIND_LOCAL,,[define if glibmm have support local symbol resolution in shared libraries]) else AC_MSG_NOTICE([WARNING: glibmm has no way to limit scope of symbols of shared libraries. Make sure external libraries used by plugins have no conflicting symbols. HINT: use Globus compiled against system OpenSSL library.]) fi AC_TRY_COMPILE([#include ],[Glib::getenv("");],[glibmm_getenv=yes],[glibmm_getenv=no]) if test "$glibmm_getenv" = yes; then AC_DEFINE(HAVE_GLIBMM_GETENV,,[define if glibmm have getenv operations]) else AC_MSG_NOTICE([WARNING: glibmm has no support for getenv. Usage of libc getenv is unsafe in multi-threaded applications.]) fi AC_TRY_COMPILE([#include ],[Glib::setenv("", "");],[glibmm_setenv=yes],[glibmm_setenv=no]) if test "$glibmm_setenv" = yes; then AC_DEFINE(HAVE_GLIBMM_SETENV,,[define if glibmm have setenv operations]) else AC_MSG_NOTICE([WARNING: glibmm has no support for setenv. Usage of libc setenv may be unsafe in multi-threaded applications.]) fi AC_TRY_COMPILE([#include ],[Glib::unsetenv("");],[glibmm_unsetenv=yes],[glibmm_unsetenv=no]) if test "$glibmm_unsetenv" = yes; then AC_DEFINE(HAVE_GLIBMM_UNSETENV,,[define if glibmm have unsetenv operations]) else AC_MSG_NOTICE([WARNING: glibmm has no support for unsetenv. Usage of libc unsetenv may be unsafe in multi-threaded applications.]) fi AC_TRY_COMPILE([#include ],[Glib::listenv();],[glibmm_listenv=yes],[glibmm_listenv=no]) if test "$glibmm_listenv" = yes; then AC_DEFINE(HAVE_GLIBMM_LISTENV,,[define if glibmm have listenv operations]) else AC_MSG_NOTICE([WARNING: glibmm has no support for listenv. Usage of libc environ is unsafe in multi-threaded applications.]) fi AC_LANG_RESTORE CPPFLAGS=$SAVE_CPPFLAGS fi # check libxml if test "$enables_hed" = "yes"; then PKG_CHECK_MODULES(LIBXML2, [libxml-2.0 >= 2.4.0]) AC_SUBST(LIBXML2_CFLAGS) AC_SUBST(LIBXML2_LIBS) fi # check openssl if test "$enables_hed" = "yes"; then PKG_CHECK_MODULES(OPENSSL, [openssl >= 1.0.0]) PKG_CHECK_MODULES(OPENSSL_1_1, [openssl >= 1.1.0], [ OPENSSL_CFLAGS="$OPENSSL_CFLAGS -DOPENSSL_API_COMPAT=0x10100000L" AC_MSG_NOTICE([Forcing off deprecated functions for OpenSSL >= 1.1]) ], [ AC_MSG_NOTICE([OpenSSL is pre-1.1]) ]) AC_SUBST(OPENSSL_CFLAGS) AC_SUBST(OPENSSL_LIBS) fi # Check for available *_method functions in OpenSSL SAVE_CPPFLAGS=$CPPFLAGS SAVE_LIBS=$LIBS CPPFLAGS="$CPPFLAGS $OPENSSL_CFLAGS" LIBS="$LIBS $OPENSSL_LIBS" AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)SSLv3_method(); } ]])], [AC_DEFINE(HAVE_SSLV3_METHOD,1,[define if SSLv3_method is available])], [AC_MSG_NOTICE([No SSLv3_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)TLSv1_method(); } ]])], [AC_DEFINE(HAVE_TLSV1_METHOD,1,[define if TLSv1_method is available])], [AC_MSG_NOTICE([No TLSv1_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)TLSv1_1_method(); } ]])], [AC_DEFINE(HAVE_TLSV1_1_METHOD,1,[define if TLSv1_1_method is available])], [AC_MSG_NOTICE([No TLSv1_1_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)TLSv1_2_method(); } ]])], [AC_DEFINE(HAVE_TLSV1_2_METHOD,1,[define if TLSv1_2_method is available])], [AC_MSG_NOTICE([No TLSv1_2_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)TLS_method(); } ]])], [AC_DEFINE(HAVE_TLS_METHOD,1,[define if TLS_method is available])], [AC_MSG_NOTICE([No TLS_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)DTLSv1_method(); } ]])], [AC_DEFINE(HAVE_DTLSV1_METHOD,1,[define if DTLSv1_method is available])], [AC_MSG_NOTICE([No DTLSv1_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)DTLSv1_2_method(); } ]])], [AC_DEFINE(HAVE_DTLSV1_2_METHOD,1,[define if DTLSv1_2_method is available])], [AC_MSG_NOTICE([No DTLSv1_2_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)DTLS_method(); } ]])], [AC_DEFINE(HAVE_DTLS_METHOD,1,[define if DTLS_method is available])], [AC_MSG_NOTICE([No DTLS_method function avialable])]) AC_LANG_POP([C++]) CPPFLAGS=$SAVE_CPPFLAGS LIBS=$SAVE_LIBS #check mozilla nss enables_nss=yes NSS_INSTALLED=no dnl Check if nss lib is explicitly enabled, default is disable. AC_ARG_ENABLE(nss, AC_HELP_STRING([--disable-nss], [disable use of the mozilla nss library]),[enables_nss="$enableval"],[]) if test "$enables_nss" = "yes"; then PKG_CHECK_MODULES(NSS, [nss >= 3.10], [NSS_INSTALLED=yes] , [ AC_MSG_WARN([Cannot locate nss lib]) NSS_INSTALLED=no enables_nss=no ]) if test "x$NSS_INSTALLED" = "xyes" ; then AC_DEFINE(HAVE_NSS,,[define if NSS is enabled and available]) fi fi AC_SUBST(NSS_CFLAGS) AC_SUBST(NSS_LIBS) AM_CONDITIONAL([NSS_ENABLED], test x$NSS_INSTALLED = xyes) #check SQLite enables_sqlite=no SQLITE_INSTALLED=no dnl Check if sqlite is explicitly enabled, default is disable. AC_ARG_ENABLE(sqlite, AC_HELP_STRING([--enable-sqlite], [enable use of the SQLite (not affected by --enable-all)]),[enables_sqlite="$enableval"],[]) if test "$enables_sqlite" = "yes"; then PKG_CHECK_MODULES(SQLITE, [sqlite3 >= 3.6], [SQLITE_INSTALLED=yes] , [ AC_MSG_WARN([Cannot locate SQLite newer than 3.6]) SQLITE_INSTALLED=no enables_sqlite=no ]) if test "x$SQLITE_INSTALLED" = "xyes" ; then AC_DEFINE(HAVE_SQLITE,,[define if SQLite is enabled and available]) # Check for finction available since 3.8 SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $SQLITE_CFLAGS" LIBS="$LIBS $SQLITE_LIBS" AC_CHECK_FUNCS(sqlite3_errstr) CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS fi fi AC_SUBST(SQLITE_CFLAGS) AC_SUBST(SQLITE_LIBS) AM_CONDITIONAL([SQLITE_ENABLED], test x$SQLITE_INSTALLED = xyes) #check emi common authentiation library enables_canlxx=no AC_ARG_ENABLE(canlxx, AC_HELP_STRING([--enable-canlxx], [enable use of EMI common authentication libraries]), [enables_canlxx="$enableval"], []) if test "x$enables_canlxx" = "xyes"; then AC_ARG_WITH(canlxx, AC_HELP_STRING([--with-canlxx=PATH], [CANL++ installation path]), [ if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$withval/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$withval/lib/pkgconfig" fi ] ) AC_MSG_NOTICE([PKG_CONFIG_PATH for CANL++ is: $PKG_CONFIG_PATH]) PKG_CHECK_MODULES(CANLXX, [canl-c++], [], [ AC_MSG_NOTICE([Failed to find EMI common authentication libraries]) enables_canlxx=no ]) fi if test "x$enables_canlxx" = "xyes" ; then AC_DEFINE(HAVE_CANLXX,,[define if CANL++ is enabled and available]) fi AC_SUBST(CANLXX_CFLAGS) AC_SUBST(CANLXX_LIBS) AM_CONDITIONAL(CANLXX_ENABLED, test "x$enables_canlxx" = "xyes") # check cppunit if test "$enables_hed" = "yes"; then AC_ARG_ENABLE(cppunit, AC_HELP_STRING([--disable-cppunit], [disable cppunit-based UNIT testing of code]),[enables_cppunit=$enableval],[]) if test "$enables_cppunit" = "yes"; then PKG_CHECK_MODULES(CPPUNIT, [cppunit],[], [AC_PATH_PROG(CPPUNIT_CONFIG, cppunit-config, no) if test "x$CPPUNIT_CONFIG" = "xno"; then AC_MSG_WARN([cppunit-config not found - no UNIT testing will be performed]) CPPUNIT_CFLAGS= CPPUNIT_LIBS= enables_cppunit="no" else CPPUNIT_CFLAGS="`$CPPUNIT_CONFIG --cflags`" CPPUNIT_LIBS="`$CPPUNIT_CONFIG --libs`" fi]) if test "x$CPPUNIT_CONFIG" != "xno" || test "x$CPPUNIT_PKG_ERRORS" != "x" then TEST_DIR=test else enables_cppunit=no TEST_DIR= fi fi AC_SUBST(CPPUNIT_CFLAGS) AC_SUBST(CPPUNIT_LIBS) AC_SUBST(TEST_DIR) else enables_cppunit="no" fi # check ldns library if test "$enables_compute_client" = "yes"; then AC_ARG_ENABLE(ldns, AC_HELP_STRING([--disable-ldns], [disable ldns library usage (makes ARCHERY client unavailable) ]),[enables_ldns=$enableval],[]) if test "$enables_ldns" = "yes"; then PKG_CHECK_MODULES(LDNS, [ldns],[], [AC_PATH_PROG(LDNS_CONFIG, ldns-config, no) if test "x$LDNS_CONFIG" = "xno"; then AC_CHECK_HEADER([ldns/ldns.h], [AC_CHECK_LIB([ldns], [ldns_dname_new_frm_str], [ LDNS_CFLAGS="$LDNS_CFLAGS" LDNS_LIBS="$LDNS_LIBS -lldns" ], [enables_ldns="no"]) ],[enables_ldns="no"]) else LDNS_CFLAGS="`$LDNS_CONFIG --cflags`" LDNS_LIBS="`$LDNS_CONFIG --libs`" fi ]) if test "$enables_ldns" = "no"; then AC_MSG_WARN([ldns library was not found. Compute clients will be built without ARCHERY support.]) fi fi else enables_ldns="no" fi if test "x$enables_ldns" = "xyes" ; then AC_DEFINE(HAVE_LDNS,,[define if LDNS is enabled and available]) else LDNS_CFLAGS= LDNS_LIBS= fi AC_SUBST(LDNS_CFLAGS) AC_SUBST(LDNS_LIBS) AM_CONDITIONAL(LDNS_ENABLED, test "x$enables_ldns" = "xyes") ############################## # # Check xmlsec1 # ############################# MACOSX="" case "${host}" in *darwin*) MACOSX="yes" ;; esac if test "x$MACOSX" = "xyes"; then AC_DEFINE(_MACOSX,,[Define if compiling for MacOSX]) fi AM_CONDITIONAL([MACOSX], [ test "x$MACOSX" = "xyes"]) if test "$enables_hed" = "yes"; then XMLSEC_MIN_VERSION="1.2.4" XMLSEC_OPENSSL_MIN_VERSION="1.2.4" XMLSEC_CONFIG="${XMLSEC1_CONFIG:-xmlsec1-config}" XMLSEC_CFLAGS="" XMLSEC_LIBS="" XMLSEC_INSTALLED=no dnl Check if xmlsec1 is explicitly disabled, default is enable. AC_ARG_ENABLE(xmlsec1, AC_HELP_STRING([--disable-xmlsec1], [disable features which need xmlsec1 library]),[enables_xmlsec1=$enableval],[]) if test "x$enables_xmlsec1" = "xyes"; then AC_ARG_WITH(xmlsec1, [ --with-xmlsec1=(PATH) xmlsec1 location]) if test "x$with_xmlsec1" = "x" ; then PKG_CHECK_MODULES(XMLSEC, [xmlsec1 >= $XMLSEC_MIN_VERSION], [XMLSEC_INSTALLED=yes], [XMLSEC_INSTALLED=no]) if test "x$XMLSEC_INSTALLED" = "xyes" ; then PKG_CHECK_MODULES(XMLSEC_OPENSSL, [xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION], [XMLSEC_INSTALLED=yes],[XMLSEC_INSTALLED=no]) fi # Find number of backslashes in XMLSEC_CFLAGS n=$(echo $XMLSEC_CFLAGS|sed 's/.*-DXMLSEC_CRYPTO=\([[^ ]]*\).*/\1/'|tr -d '[[A-Za-z0-1\n"]]'| wc -c) # Fixes due to bugs in pkg-config and/or xmlsec1 # # 0: Indicates a bug in pkg-config which removes the escaping of the quotes # 2: Correct value with escaped quotes # 6: Old xmlsec1 version which used 3 back-slashes to escape quotes # See eg. https://bugzilla.redhat.com/show_bug.cgi?id=675334 # Make sure that the quotes are escaped with single backslash if test $n = 0 -o $n = 6; then AC_MSG_NOTICE([Working around bad combination of pkgconfig and xmlsec1 with $n back-slashes]) XMLSEC_CFLAGS=$(echo $XMLSEC_CFLAGS|sed 's/\(.*-DXMLSEC_CRYPTO=\)\\*"\([[^ \\"]]*\)\\*" \(.*\)/\1\\"\2\\" \3/') XMLSEC_OPENSSL_CFLAGS=$(echo $XMLSEC_OPENSSL_CFLAGS|sed 's/\(.*-DXMLSEC_CRYPTO=\)\\*"\([[^ \\"]]*\)\\*" \(.*\)/\1\\"\2\\" \3/') fi fi if test "x$XMLSEC_INSTALLED" = "xno" -a "x$MACOSX" != "xyes"; then AC_MSG_CHECKING(for xmlsec1 libraries >= $XMLSEC_MIN_VERSION) if test "x$with_xmlsec1" != "x" ; then XMLSEC_CONFIG=$with_xmlsec1/bin/$XMLSEC_CONFIG fi "$XMLSEC_CONFIG" --version 2>/dev/null 1>/dev/null if test "$?" != '0' ; then AC_MSG_WARN(Could not find xmlsec1 anywhere; The xml security related functionality will not be compiled) else vers=`$XMLSEC_CONFIG --version 2>/dev/null | awk -F. '{ printf "%d", ($1 * 1000 + $2) * 1000 + $3;}'` minvers=`echo $XMLSEC_MIN_VERSION | awk -F. '{ printf "%d", ($1 * 1000 + $2) * 1000 + $3;}'` if test "$vers" -ge "$minvers" ; then XMLSEC_LIBS="`$XMLSEC_CONFIG --libs`" XMLSEC_CFLAGS="`$XMLSEC_CONFIG --cflags`" #check the xmlsec1-openssl here if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig" fi PKG_CHECK_MODULES(XMLSEC_OPENSSL, [xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION], [XMLSEC_INSTALLED=yes],[XMLSEC_INSTALLED=no]) else AC_MSG_WARN(You need at least xmlsec1 $XMLSEC_MIN_VERSION for this version of arc1) fi fi elif test "x$XMLSEC_INSTALLED" = "xno" -a "x$MACOSX" = "xyes"; then #MACOSX has no "ldd" which is needed by xmlsec1-config, so here simply we use PKG_CHECK_MODULES if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig" fi PKG_CHECK_MODULES(XMLSEC, [xmlsec1 >= $XMLSEC_MIN_VERSION], [XMLSEC_INSTALLED=yes], [XMLSEC_INSTALLED=no]) if test "x$XMLSEC_INSTALLED" = "xyes" ; then PKG_CHECK_MODULES(XMLSEC_OPENSSL, [xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION], [XMLSEC_INSTALLED=yes],[XMLSEC_INSTALLED=no]) fi fi AC_SUBST(XMLSEC_CFLAGS) AC_SUBST(XMLSEC_LIBS) AC_SUBST(XMLSEC_OPENSSL_CFLAGS) AC_SUBST(XMLSEC_OPENSSL_LIBS) #AC_SUBST(XMLSEC_CONFIG) #AC_SUBST(XMLSEC_MIN_VERSION) enables_xmlsec1="$XMLSEC_INSTALLED" fi else enables_xmlsec1="no" fi ######################### # # Check libmysqlclient # ######################### MYSQL_INSTALLED=no if test "$enables_hed" = "yes"; then MYSQL_CONFIG="mysql_config" MYSQL_CFLAGS="" MYSQL_LIBS="" dnl Check if libmysqlclient is explicitly enabled, default is disable. AC_ARG_ENABLE(mysql, AC_HELP_STRING([--enable-mysql], [enable use of the MySQL client library]),[enables_mysql="$enableval"],[]) # Ask user for path to libmysqlclient if test "x$enables_mysql" = "xyes"; then AC_ARG_WITH(mysql, [ --with-mysql=(PATH) prefix of MySQL installation. e.g. /usr/local or /usr]) AC_MSG_CHECKING(for mysql client library) if test "x$with_mysql" != "x" ; then MYSQL_CONFIG=$with_mysql/bin/$MYSQL_CONFIG fi if ! $MYSQL_CONFIG --version > /dev/null 2>&1 ; then AC_MSG_ERROR(Could not find mysql C library anywhere (see config.log for details).) fi MYSQL_LIBS="`$MYSQL_CONFIG --libs`" MYSQL_CFLAGS="`$MYSQL_CONFIG --cflags`" MYSQL_INSTALLED="yes" AC_SUBST(MYSQL_LIBS) AC_SUBST(MYSQL_CFLAGS) enables_mysql=$MYSQL_INSTALLED fi AC_MSG_NOTICE([MySQL client library enabled: $MYSQL_INSTALLED]) fi AM_CONDITIONAL([MYSQL_LIBRARY_ENABLED],[test "x$MYSQL_INSTALLED" = "xyes"]) # Check ldap-monitor AC_ARG_ENABLE(ldap_monitor, AC_HELP_STRING([--enable-ldap-monitor], [enable use of the ldap monitor]),[enables_ldap_monitor="$enableval"],[]) if test "x$enables_ldap_monitor" = "xyes"; then AC_ARG_WITH(ldap_monitor, [ --with-ldap-monitor=(PATH) where to install the monitor, eg /var/www/ldap-monitor or /usr/share/arc/ldap-monitor]) AC_MSG_CHECKING(for ldap-monitor installation path) if test "x$with_ldap_monitor" != "x" ; then ldap_monitor_prefix=$with_ldap_monitor else ldap_monitor_prefix=${datadir}/arc/ldap-monitor fi AC_MSG_RESULT([$ldap_monitor_prefix]) AC_SUBST(ldap_monitor_prefix) fi # Check ws-monitor AC_ARG_ENABLE(ws_monitor, AC_HELP_STRING([--enable-ws-monitor], [enable use of the ws monitor]),[enables_ws_monitor="$enableval"],[]) if test "x$enables_ws_monitor" = "xyes"; then AC_ARG_WITH(ws_monitor, [ --with-ws-monitor=(PATH) where to install the monitor, eg /var/www/ws-monitor or /usr/share/arc/ws-monitor]) AC_MSG_CHECKING(for ws-monitor installation path) if test "x$with_ws_monitor" != "x" ; then ws_monitor_prefix=$with_ws_monitor else ws_monitor_prefix=${datadir}/arc/ws-monitor fi AC_MSG_RESULT([$ws_monitor_prefix]) AC_SUBST(ws_monitor_prefix) fi # check zlib ZLIB_CFLAGS= ZLIB_LDFLAGS= ZLIB_LIBS= if test "$enables_hed" = "yes"; then SAVE_CPPFLAGS=$CPPFLAGS SAVE_LDFLAGS=$LDFLAGS AC_ARG_WITH(zlib, AC_HELP_STRING([--with-zlib=PATH], [where zlib is installed]), [ if test -d "$withval"; then ZLIB_CFLAGS="${CPPFLAGS} -I$withval/include" ZLIB_LDFLAGS="${LDFLAGS} -L$withval/lib" fi ] ) CPPFLAGS="$CPPFLAGS $ZLIB_CFLAGS" LDFLAGS="$LDFLAGS $ZLIB_LDFLAGS" AC_CHECK_HEADER([zlib.h],[ZLIB_CFLAGS="$ZLIB_CFLAGS"],AC_MSG_ERROR([unable to find zlib header files])) AC_CHECK_LIB([z],[deflateInit2_],[ZLIB_LIBS="$ZLIB_LDFLAGS -lz"],AC_MSG_ERROR([unable to link with zlib library])) CPPFLAGS=$SAVE_CPPFLAGS LDFLAGS=$SAVE_LDFLAGS fi AC_SUBST(ZLIB_CFLAGS) AC_SUBST(ZLIB_LIBS) # check ARGUS ARGUS_CFLAGS= ARGUS_LIBS= AC_ARG_ENABLE(argus, AC_HELP_STRING([--enable-argus], [enable use of Argus PEP V2 libraries]),[enables_argus="$enableval"],[]) if test "x$enables_argus" = "xyes"; then AC_ARG_WITH(argus, AC_HELP_STRING([--with-argus=PATH], [ARGUS PEP installation path]), [ if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$withval/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$withval/lib/pkgconfig" fi ] ) PKG_CHECK_MODULES(ARGUS, [libargus-pep >= 2.0.0], [], [ AC_MSG_NOTICE([Failed to find Argus PEP libraries with version >= 2]) enables_argus=no ]) fi AC_SUBST(ARGUS_CFLAGS) AC_SUBST(ARGUS_LIBS) AM_CONDITIONAL(ARGUS_ENABLED, test "x$enables_argus" = "xyes") ############################################### # # Check for Berkeley DB C++ # ############################################### DBCXX_LIBS="" DBCXX_CPPFLAGS= if test "$enables_hed" = "yes"; then # # Allow the user to specify db_cxx.h location (we will still check though) # dbcxx_include_paths= AC_ARG_WITH(dbcxx-include, [ --with-dbcxx-include=PATH Specify path to db_cxx.h], [ if test "x$withval" = "xyes" ; then AC_MSG_ERROR([--with-dbcxx-include requires PATH argument]) fi if test "x$withval" != "xno" ; then dbcxx_include_paths=$withval fi ] ) # # Allow the user to specify DB4 library location (we will still check though) # db4_library_path= AC_ARG_WITH(db4-library-path, [ --with-db4-library-path=PATH Specify path to DB4 library], [ if test "x$withval" = "xyes" ; then AC_MSG_ERROR([--with-db4-library-path requires PATH argument]) fi if test "x$withval" != "xno" ; then db4_library_path=$withval fi ] ) AC_LANG_SAVE AC_LANG_CPLUSPLUS # # If user did not specify location we start by searching at the standard locations # if test "x$dbcxx_include_paths" = "x" then AC_MSG_NOTICE([Looking for db_cxx.h in standard locations]) AC_CHECK_HEADERS(db_cxx.h,HAVE_DBCXX=yes,HAVE_DBCXX=no) # If the user did not provide a location we have some good suggestions dbcxx_include_paths="/usr/include/db4 /usr/include/db44 /usr/include/db43" else HAVE_DBCXX=no fi # # Now Look for db_cxx.h in non-standard locations # if test "$HAVE_DBCXX" = no then for dbcxx_dir in $dbcxx_include_paths do SAVE_CPPFLAGS=$CPPFLAGS DBCXX_CPPFLAGS=-I$dbcxx_dir CPPFLAGS="$CPPFLAGS $DBCXX_CPPFLAGS" # Disable Autoconf caching unset ac_cv_header_db_cxx_h AC_MSG_NOTICE([Looking for db_cxx.h in $dbcxx_dir]) AC_CHECK_HEADERS(db_cxx.h,HAVE_DBCXX=yes,HAVE_DBCXX=no) CPPFLAGS=$SAVE_CPPFLAGS # If a db_cxx.h was found we break and keep the current value of DBCXX_CPPFLAGS if test "$HAVE_DBCXX" = yes then break fi DBCXX_CPPFLAGS= done fi AC_SUBST(DBCXX_CPPFLAGS) if test "x$db4_library_path" != "x" then db4_library_path="-L$db4_library_path" fi if test "$HAVE_DBCXX" = no then DBCXX_LIBS="" else SAVE_LDFLAGS=$LDFLAGS SAVE_CXXFLAGS=$CXXFLAGS case $host in *-*-mingw*) CXXFLAGS="-I$dbcxx_dir $CXXFLAGS" ;; *) # pthread needed for RH9 LDFLAGS="$LDFLAGS -lpthread" ;; esac LDFLAGS="$LDFLAGS $db4_library_path" for db_ver in "" -4.7 -4.3 -4.2 do AC_CHECK_LIB(db_cxx$db_ver,main,DBCXX_LIBS="$db4_library_path -ldb_cxx$db_ver",DBCXX_LIBS="") if test "$DBCXX_LIBS" = "" then AC_MSG_WARN([BerkeleyDB library libdb_cxx$db_ver was not found!]) else break fi done if test "$DBCXX_LIBS" = "" then AC_MSG_WARN([No BerkeleyDB library found!]) fi LDFLAGS=$SAVE_LDFLAGS CXXFLAGS=$SAVE_CXXFLAGS fi AC_SUBST(DBCXX_LIBS) if test ! "x$DBCXX_LIBS" = "x" then # Mingw need -I$dbcxx_dir AC_DEFINE(HAVE_DBCXX, , [define if Berkeley DB C++ binding is available]) SAVE_CXXFLAGS=$CXXFLAGS CXXFLAGS="-I$dbcxx_dir $CXXFLAGS" AC_DBCXX_HAVE_DBDEADLOCKEXCEPTION CXXFLAGS=$SAVE_CXXFLAGS fi AC_LANG_RESTORE fi # DBJSTORE (storing jobs information in BDB) AC_ARG_ENABLE(dbjstore, AC_HELP_STRING([--disable-dbjstore], [disable storing local jobs information in BDB]), [enables_dbjstore=$enableval],[]) if test "$enables_dbjstore" = "yes"; then if test "x$DBCXX_LIBS" = "x" ; then AC_MSG_NOTICE([For storing jobs in BDB C++ API is needed (dbcxx) - disabling]) enables_dbjstore="no" fi fi AC_MSG_NOTICE([Storing jobs in BDB enabled: $enables_dbjstore]) AM_CONDITIONAL([DBJSTORE_ENABLED],[test "x$enables_dbjstore" = "xyes"]) if test "x$enables_dbjstore" = "xyes"; then AC_DEFINE(DBJSTORE_ENABLED,, [define if to build job information in BDB storage]) fi # globus/gpt packages if test "$enables_hed" = "yes"; then PKG_CHECK_MODULES(GLOBUS_COMMON, [globus-common], [ GLOBUS_COMMON_VERSION=`$PKG_CONFIG --modversion globus-common`], [ GPT_PKG(globus_common) ]) AC_SUBST(GLOBUS_COMMON_CFLAGS) AC_SUBST(GLOBUS_COMMON_LIBS) PKG_CHECK_MODULES(GLOBUS_GSSAPI_GSI, [globus-gssapi-gsi], [ GLOBUS_GSSAPI_GSI_VERSION=`$PKG_CONFIG --modversion globus-gssapi-gsi`], [ GPT_PKG(globus_gssapi_gsi) ]) AC_SUBST(GLOBUS_GSSAPI_GSI_CFLAGS) AC_SUBST(GLOBUS_GSSAPI_GSI_LIBS) PKG_CHECK_MODULES(GLOBUS_GSS_ASSIST, [globus-gss-assist], [ GLOBUS_GSS_ASSIST_VERSION=`$PKG_CONFIG --modversion globus-gss-assist`], [ GPT_PKG(globus_gss_assist) ]) AC_SUBST(GLOBUS_GSS_ASSIST_CFLAGS) AC_SUBST(GLOBUS_GSS_ASSIST_LIBS) PKG_CHECK_MODULES(GLOBUS_GSI_CALLBACK, [globus-gsi-callback], [ GLOBUS_GSI_CALLBACK_VERSION=`$PKG_CONFIG --modversion globus-gsi-callback`], [ GPT_PKG(globus_gsi_callback) ]) AC_SUBST(GLOBUS_GSI_CALLBACK_CFLAGS) AC_SUBST(GLOBUS_GSI_CALLBACK_LIBS) PKG_CHECK_MODULES(GLOBUS_FTP_CLIENT, [globus-ftp-client], [ GLOBUS_FTP_CLIENT_VERSION=`$PKG_CONFIG --modversion globus-ftp-client`], [ GPT_PKG(globus_ftp_client) ]) AC_SUBST(GLOBUS_FTP_CLIENT_CFLAGS) AC_SUBST(GLOBUS_FTP_CLIENT_LIBS) PKG_CHECK_MODULES(GLOBUS_FTP_CONTROL, [globus-ftp-control], [ GLOBUS_FTP_CONTROL_VERSION=`$PKG_CONFIG --modversion globus-ftp-control`], [ GPT_PKG(globus_ftp_control) ]) AC_SUBST(GLOBUS_FTP_CONTROL_CFLAGS) AC_SUBST(GLOBUS_FTP_CONTROL_LIBS) PKG_CHECK_MODULES(GLOBUS_IO, [globus-io], [ GLOBUS_IO_VERSION=`$PKG_CONFIG --modversion globus-io`], [ GPT_PKG(globus_io) ]) AC_SUBST(GLOBUS_IO_CFLAGS) AC_SUBST(GLOBUS_IO_LIBS) PKG_CHECK_MODULES(GLOBUS_GSI_CERT_UTILS, [globus-gsi-cert-utils], [ GLOBUS_GSI_CERT_UTILS_VERSION=`$PKG_CONFIG --modversion globus-gsi-cert-utils`], [ GPT_PKG(globus_gsi_cert_utils) ]) AC_SUBST(GLOBUS_GSI_CERT_UTILS_CFLAGS) AC_SUBST(GLOBUS_GSI_CERT_UTILS_LIBS) PKG_CHECK_MODULES(GLOBUS_GSI_CREDENTIAL, [globus-gsi-credential], [ GLOBUS_GSI_CREDENTIAL_VERSION=`$PKG_CONFIG --modversion globus-gsi-credential`], [ GPT_PKG(globus_gsi_credential) ]) AC_SUBST(GLOBUS_GSI_CREDENTIAL_CFLAGS) AC_SUBST(GLOBUS_GSI_CREDENTIAL_LIBS) PKG_CHECK_MODULES(GLOBUS_OPENSSL_MODULE, [globus-openssl-module], [ GLOBUS_OPENSSL_MODULE_VERSION=`$PKG_CONFIG --modversion globus-openssl-module`], [ GPT_PKG(globus_openssl_module) ]) AC_SUBST(GLOBUS_OPENSSL_MODULE_CFLAGS) AC_SUBST(GLOBUS_OPENSSL_MODULE_LIBS) # Check for new globus thread model selection SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $GLOBUS_COMMON_CFLAGS" LIBS="$LIBS $GLOBUS_COMMON_LIBS" AC_CHECK_FUNCS(globus_thread_set_model) CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS # Check for gridftp-v2 SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $GLOBUS_FTP_CLIENT_CFLAGS" LIBS="$LIBS $GLOBUS_FTP_CLIENT_LIBS" AC_CHECK_FUNCS(globus_ftp_client_handleattr_set_gridftp2) CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS globus_openssl_detected= PKG_CHECK_MODULES(GLOBUS_OPENSSL, [globus-openssl], [ GLOBUS_OPENSSL_VERSION=`$PKG_CONFIG --modversion globus-openssl`], [ GPT_PKG(globus_openssl) ]) if test ! "x$GLOBUS_OPENSSL_LIBS" = "x" ; then globus_openssl_detected=`echo "$GLOBUS_OPENSSL_LIBS" | grep "lssl_$GPT_FLAVOR"` if test ! "x$globus_openssl_detected" = "x" ; then globus_openssl_detected="yes" fi fi if test "x$globus_openssl_detected" = "xyes" ; then AC_MSG_RESULT([ Globus own OpenSSL library detected. In order to avoid runtime conflicts following components will be disabled: GridFTP DMC, SRM DMC, GSI MCC. To enable these components use Globus compiled for system OpenSSL. ]) GLOBUS_FTP_CLIENT_VERSION= GLOBUS_FTP_CONTROL_VERSION= GLOBUS_IO_VERSION= GLOBUS_GSSAPI_GSI_VERSION= fi if test "x$GLOBUS_IO_VERSION" = "x"; then IO_VERSION_MAJOR=0 else IO_VERSION_MAJOR=`echo "$GLOBUS_IO_VERSION" | sed 's/^\([[^.]]*\).*/\1/'`; fi AC_DEFINE_UNQUOTED(GLOBUS_IO_VERSION,$IO_VERSION_MAJOR,[Globus IO version]) if test "x$GLOBUS_GSSAPI_GSI_VERSION" = "x"; then GLOBUS_GSSAPI_GSI_VERSION_MAJOR=0 GLOBUS_GSSAPI_GSI_VERSION_MINOR=0 else GLOBUS_GSSAPI_GSI_VERSION_MAJOR=`echo "$GLOBUS_GSSAPI_GSI_VERSION" | sed 's/^\([[^.]]*\).*/\1/'`; GLOBUS_GSSAPI_GSI_VERSION_MINOR=`echo "$GLOBUS_GSSAPI_GSI_VERSION" | sed 's/^[[^.]]*\.\([[^.]]*\).*/\1/'`; fi if test "$GLOBUS_GSSAPI_GSI_VERSION_MAJOR" -lt "12"; then GLOBUS_GSSAPI_GSI_OLD_OPENSSL=1 elif test "$GLOBUS_GSSAPI_GSI_VERSION_MAJOR" -eq "12"; then if test "$GLOBUS_GSSAPI_GSI_VERSION_MINOR" -lt "2"; then GLOBUS_GSSAPI_GSI_OLD_OPENSSL=1 else GLOBUS_GSSAPI_GSI_OLD_OPENSSL=0 fi else GLOBUS_GSSAPI_GSI_OLD_OPENSSL=0 fi AC_DEFINE_UNQUOTED(GLOBUS_GSSAPI_GSI_VERSION,$GSSAPI_GSI_VERSION_MAJOR,[Globus GSSAPI GSI version]) AC_DEFINE_UNQUOTED(GLOBUS_GSSAPI_GSI_OLD_OPENSSL,$GLOBUS_GSSAPI_GSI_OLD_OPENSSL,[Globus GSSAPI GSI is for OpenSSL post-1.1]) dnl dnl DEFAULT_GLOBUS_LOCATION dnl AC_MSG_CHECKING(for DEFAULT_GLOBUS_LOCATION) # GLOBUS_LOCATION is set by GPT macros DEFAULT_GLOBUS_LOCATION="$GLOBUS_LOCATION" AC_MSG_RESULT($DEFAULT_GLOBUS_LOCATION) AC_SUBST(DEFAULT_GLOBUS_LOCATION) #check lcas DEFAULT_LCAS_LOCATION=/opt/glite LCAS_LOCATION= LCAS_CFLAGS= LCAS_LIBS= AC_ARG_WITH(lcas-location, [ --with-lcas-location= Specify the LCAS installation path. [[/opt/glite]]], [ LCAS_LOCATION=$with_lcas_location if test ! -d $LCAS_LOCATION; then AC_MSG_WARN([LCAS_LOCATION ($LCAS_LOCATION) does not exist]) LCAS_LOCATION= fi ],[ if test "x$LCAS_LOCATION" = "x"; then LCAS_LOCATION=$DEFAULT_LCAS_LOCATION fi if test ! -d $LCAS_LOCATION; then LCAS_LOCATION= fi ] ) if test "x$LCAS_LOCATION" != "x"; then LCAS_CFLAGS=$LCAS_LOCATION/include/glite/security/lcas if test ! -d $LCAS_CFLAGS; then LCAS_CFLAGS=$LCAS_LOCATION/include/lcas if test ! -d $LCAS_CFLAGS; then LCAS_CFLAGS=$LCAS_LOCATION/include fi fi LCAS_CFLAGS=-I$LCAS_CFLAGS SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$LCAS_CFLAGS $GLOBUS_GSSAPI_GSI_CFLAGS" AC_CHECK_HEADERS([lcas.h], LCAS_LDFLAGS= if test -d $LCAS_LOCATION/lib64; then LCAS_LDFLAGS="-L$LCAS_LOCATION/lib64 $GLOBUS_GSSAPI_GSI_LIBS" else LCAS_LDFLAGS="-L$LCAS_LOCATION/lib $GLOBUS_GSSAPI_GSI_LIBS" fi SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $LCAS_LDFLAGS" AC_CHECK_LIB(lcas,lcas_init, LCAS_LIBS="$LCAS_LDFLAGS -llcas",LCAS_LOCATION="",) LDFLAGS=$SAVE_LDFLAGS , LCAS_LOCATION="" ) CPPFLAGS=$SAVE_CPPFLAGS fi if test "x$LCAS_LOCATION" != "x"; then AC_DEFINE(HAVE_LCAS,, [define if lcas is available]) AC_SUBST(LCAS_LOCATION) AC_SUBST(LCAS_CFLAGS) AC_SUBST(LCAS_LIBS) fi #check lcmaps DEFAULT_LCMAPS_LOCATION=/opt/glite LCMAPS_LOCATION= LCMAPS_CFLAGS= LCMAPS_LIBS= AC_ARG_WITH(lcmaps-location, [ --with-lcmaps-location= Specify the LCMAPS installation path. [[/opt/glite]]], [ LCMAPS_LOCATION=$with_lcmaps_location if test ! -d $LCMAPS_LOCATION; then AC_MSG_WARN([LCMAPS_LOCATION ($LCMAPS_LOCATION) does not exist]) LCMAPS_LOCATION= fi ],[ if test "x$LCMAPS_LOCATION" = "x"; then LCMAPS_LOCATION=$DEFAULT_LCMAPS_LOCATION fi if test ! -d $LCMAPS_LOCATION; then LCMAPS_LOCATION= fi ] ) if test "x$LCMAPS_LOCATION" != "x"; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include/glite/security/lcmaps if test ! -d $LCMAPS_CFLAGS; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include/lcmaps if test ! -d $LCMAPS_CFLAGS; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include fi fi LCMAPS_CFLAGS=-I$LCMAPS_CFLAGS SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$LCMAPS_CFLAGS $GLOBUS_GSSAPI_GSI_CFLAGS" AC_CHECK_HEADERS([lcmaps.h], LCMAPS_LDFLAGS= if test -d $LCMAPS_LOCATION/lib64; then LCMAPS_LDFLAGS="-L$LCMAPS_LOCATION/lib64 $GLOBUS_GSSAPI_GSI_LIBS" else LCMAPS_LDFLAGS="-L$LCMAPS_LOCATION/lib $GLOBUS_GSSAPI_GSI_LIBS" fi SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $LCMAPS_LDFLAGS" AC_CHECK_LIB(lcmaps,lcmaps_init, LCMAPS_LIBS="$LCMAPS_LDFLAGS -llcmaps",LCMAPS_LOCATION="",) LDFLAGS=$SAVE_LDFLAGS , LCMAPS_LOCATION="" ) CPPFLAGS=$SAVE_CPPFLAGS fi if test "x$LCMAPS_LOCATION" != "x"; then AC_DEFINE(HAVE_LCMAPS,, [define if lcmaps is available]) AC_SUBST(LCMAPS_LOCATION) AC_SUBST(LCMAPS_CFLAGS) AC_SUBST(LCMAPS_LIBS) fi # Check if mock DMC is enabled AC_ARG_ENABLE(mock-dmc, AC_HELP_STRING([--enable-mock-dmc], [enable mock DMC, default is disable]),[enables_mock_dmc="$enableval"],[]) # Check for GFAL2 AC_ARG_ENABLE(gfal, AC_HELP_STRING([--enable-gfal], [enable the GFAL support, default is disable]),[enables_gfal="$enableval"],[]) if test "x$enables_gfal" = "xyes"; then PKG_CHECK_MODULES(GFAL2, gfal_transfer, [], [enables_gfal="no"]) AC_SUBST(GFAL2_CFLAGS) AC_SUBST(GFAL2_LIBS) fi # Check for S3 AC_ARG_ENABLE(s3, AC_HELP_STRING([--enable-s3], [enable the S3 support, default is disable]),[enables_s3="$enableval"],[]) if test "x$enables_s3" = "xyes"; then AC_ARG_WITH(s3, [ --with-s3=(PATH) libs3 location]) if test ! "x$with_s3" = "x" ; then S3_LOCATION="$with_s3" S3_CPPFLAGS="-I$S3_LOCATION/include" if test -d $S3_LOCATION/lib64; then S3_LDFLAGS="-L$S3_LOCATION/lib64" else S3_LDFLAGS="-L$S3_LOCATION/lib" fi fi SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $S3_CPPFLAGS" AC_CHECK_HEADER(libs3.h, [], [enables_s3="no"]) CPPFLAGS=$SAVE_CPPFLAGS SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $S3_LDFLAGS" AC_CHECK_LIB([s3], [S3_initialize], [S3_LIBS="$S3_LDFLAGS -ls3"], [enables_s3="no"]) LDFLAGS=$SAVE_LDFLAGS AC_SUBST(S3_CPPFLAGS) AC_SUBST(S3_LIBS) if test x$enables_s3 = xyes then if s3 help 2>&1 | grep -q -- '--timeout' then AC_DEFINE([HAVE_S3_TIMEOUT],,[Define if S3 API has timeouts]) fi fi fi # Check for xrootd (c++) AC_LANG_SAVE AC_LANG_CPLUSPLUS AC_ARG_ENABLE(xrootd, AC_HELP_STRING([--disable-xrootd], [disable the xrootd support, default is enable]),[enables_xrootd="$enableval"],[]) if test "x$enables_xrootd" = "xyes"; then XROOTD_CPPFLAGS="-I/usr/include/xrootd" AC_ARG_WITH(xrootd, [ --with-xrootd=(PATH) Xrootd location]) if test ! "x$with_xrootd" = "x" ; then XROOTD_LOCATION="$with_xrootd" XROOTD_CPPFLAGS="-I$XROOTD_LOCATION/include/xrootd" if test -d $XROOTD_LOCATION/lib64; then XROOTD_LDFLAGS="-L$XROOTD_LOCATION/lib64" else XROOTD_LDFLAGS="-L$XROOTD_LOCATION/lib" fi fi SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $XROOTD_CPPFLAGS" AC_CHECK_HEADER(XrdPosix/XrdPosixXrootd.hh, [], [enables_xrootd="no"] [#include ]) CPPFLAGS=$SAVE_CPPFLAGS SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $XROOTD_LDFLAGS" AC_CHECK_LIB([XrdPosix], [_init], [XROOTD_LIBS="$XROOTD_LDFLAGS -lXrdPosix"], [enables_xrootd="no"]) LDFLAGS=$SAVE_LDFLAGS fi AC_SUBST(XROOTD_CPPFLAGS) AC_SUBST(XROOTD_LIBS) fi AC_LANG_RESTORE # Setup conditionals AM_CONDITIONAL([GLOBUSUTILS_ENABLED], test -n "$GLOBUS_COMMON_VERSION") AM_CONDITIONAL([GRIDFTP_ENABLED], test -n "$GLOBUS_FTP_CLIENT_VERSION") AM_CONDITIONAL([MOCK_DMC_ENABLED], test x$enables_mock_dmc = xyes) AM_CONDITIONAL([GFAL_ENABLED], test x$enables_gfal = xyes) AM_CONDITIONAL([S3_DMC_ENABLED], test x$enables_s3 = xyes) AM_CONDITIONAL([XROOTD_ENABLED], test x$enables_xrootd = xyes) AM_CONDITIONAL([XMLSEC_ENABLED], test x$XMLSEC_INSTALLED = xyes) AM_CONDITIONAL([CPPUNIT_ENABLED], test x$enables_cppunit = xyes) enables_srm_dmc=no if test "$enables_hed" = "yes"; then enables_srm_dmc=yes fi if test "$enables_giis_service" = "yes"; then if test "$enables_ldap_service" = "no"; then enables_ldap_service="yes" fi fi AM_CONDITIONAL([SRM_DMC_ENABLED],[test "x$enables_srm_dmc" = "xyes"]) # Setup defines if test -n "$GLOBUS_COMMON_VERSION"; then AC_DEFINE(HAVE_GLOBUS,,[define if GLOBUS is available]) fi if test x"$XMLSEC_INSTALLED" = xyes; then AC_DEFINE(HAVE_XMLSEC,,[define if XMLSEC package is available]) fi # Setup messages for reporting enables_gridftp=no if test -n "$GLOBUS_FTP_CLIENT_VERSION" ; then enables_gridftp=yes; fi enables_dbcxx=no if test -n "$DBCXX_LIBS" ; then enables_dbcxx=yes; fi # Check for LDAP if test "$enables_hed" = "yes"; then LDAP=no AC_ARG_ENABLE(ldap, AC_HELP_STRING([--disable-ldap], [disable the LDAP support - requires OpenLDAP]),[enables_ldap="$enableval"],[]) if test "x$enables_ldap" = "xyes"; then AC_CHECK_HEADER(ldap.h, [ LDAP=yes SAVE_LDFLAGS=$LDFLAGS case "${host}" in *-*-mingw32): ;; *): LDFLAGS=-lpthread ;; esac AC_CHECK_LIB([ldap_r], [ldap_first_message], [ AC_CHECK_LIB([ldap_r], [ldap_initialize], [ AC_DEFINE(HAVE_LDAP_INITIALIZE,[],[Define if you have ldap_initialize function]) ]) LDAP_LIBS=-lldap_r ], [ AC_CHECK_LIB([ldap], [ldap_first_message], [ AC_CHECK_LIB([ldap], [ldap_initialize], [ AC_DEFINE(HAVE_LDAP_INITIALIZE,[],[Define if you have ldap_initialize function]) ]) LDAP_LIBS=-lldap ], [ LDAP=no ]) ]) AC_CHECK_LIB([lber], [ber_init], [LDAP_LIBS="$LDAP_LIBS -llber"], []) AC_SUBST(LDAP_LIBS) LDFLAGS=$SAVE_LDFLAGS ], [ LDAP=no # Try native LDAP on Win32 if OpenLDAP fails case "${host}" in *-*-mingw32) AC_CHECK_HEADER([winldap.h], [ AC_CHECK_LIB([wldap32], [ldap_init], [ LDAP=yes LDAP_LIBS="-lwldap32" AC_DEFINE(USE_WIN32_LDAP_API,,[Define if using WIN32 LDAP API]) ]) ], [], [[#include ]]) ;; esac ]) enables_ldap="$LDAP" fi else enables_ldap="no" fi AM_CONDITIONAL([LDAP_ENABLED], test x$LDAP = xyes) if test "x$LDAP" = "xyes"; then AC_DEFINE(HAVE_LDAP,[],[Define if OpenLDAP is available]) fi # Check for the uuid lib UUID_LIBS="" if test "$enables_hed" = "yes"; then AC_CHECK_HEADER(uuid/uuid.h, [ AC_CHECK_FUNC([uuid_generate], [UUID_LIBS=], [ AC_CHECK_LIB([uuid], [uuid_generate], [UUID_LIBS=-luuid], [ AC_MSG_NOTICE([Can't find library containing uuid implementation]) ]) ]) ], [AC_MSG_NOTICE([Can't find uuid header])]) AC_SUBST(UUID_LIBS) LIBS="$LIBS $UUID_LIBS" fi # Check for dlopen DLOPEN_LIBS="" if test "$enables_hed" = "yes"; then AC_CHECK_FUNC([dlopen], [DLOPEN_LIBS=], [ AC_CHECK_LIB([dl], [dlopen], [DLOPEN_LIBS=-ldl], [ AC_MSG_NOTICE([Can't find library containing dlopen implementation]) ]) ]) AC_SUBST(DLOPEN_LIBS) fi # Check for clock_gettime AC_SEARCH_LIBS([clock_gettime], [rt]) # check for fsusage if test "$enables_hed" = "yes"; then gl_FSUSAGE fi if test "$enables_hed" = "yes"; then # Checks for header files. AC_HEADER_DIRENT AC_HEADER_STDC AC_HEADER_SYS_WAIT AC_CHECK_HEADERS([arpa/inet.h fcntl.h float.h limits.h netdb.h netinet/in.h sasl.h sasl/sasl.h stdint.h stdlib.h string.h sys/file.h sys/socket.h sys/vfs.h unistd.h uuid/uuid.h getopt.h]) AC_CXX_HAVE_SSTREAM # Checks for typedefs, structures, and compiler characteristics. AC_HEADER_STDBOOL AC_C_CONST AC_TYPE_UID_T AC_C_INLINE AC_TYPE_MODE_T AC_TYPE_OFF_T AC_TYPE_PID_T AC_TYPE_SIZE_T AC_CHECK_MEMBERS([struct stat.st_blksize]) AC_HEADER_TIME AC_STRUCT_TM AC_CHECK_TYPES([ptrdiff_t]) # Checks for library functions. AC_FUNC_CHOWN AC_FUNC_CLOSEDIR_VOID AC_FUNC_ERROR_AT_LINE AC_FUNC_FORK AC_FUNC_LSTAT AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK AC_FUNC_MEMCMP AC_FUNC_MKTIME # GNU compatible *ALLOC functions are available on Windows # The test will however fail when cross-compiling with mingw case "${host}" in *-*-mingw32) : ;; *) AC_FUNC_MALLOC AC_FUNC_REALLOC ;; esac AC_FUNC_SELECT_ARGTYPES AC_TYPE_SIGNAL AC_FUNC_STRERROR_R AC_FUNC_STAT AC_CHECK_FUNCS([acl dup2 floor ftruncate gethostname getdomainname getpid gmtime_r lchown localtime_r memchr memmove memset mkdir mkfifo regcomp rmdir select setenv socket strcasecmp strchr strcspn strdup strerror strncasecmp strstr strtol strtoul strtoull timegm tzset unsetenv getopt_long_only getgrouplist mkdtemp posix_fallocate readdir_r [mkstemp] mktemp]) AC_CHECK_LIB([resolv], [res_query], [LIBRESOLV=-lresolv], [LIBRESOLV=]) AC_CHECK_LIB([resolv], [__dn_skipname], [LIBRESOLV=-lresolv], [LIBRESOLV=]) AC_CHECK_LIB([nsl], [gethostbyname], [LIBRESOLV="$LIBRESOLV -lnsl"], []) AC_CHECK_LIB([nsl], [getdomainname]) AC_SUBST(LIBRESOLV) fi # check for platfom specific extra libraries and flags EXTRA_LIBS="" REGEX_LIBS="" SOCKET_LIBS="" WIN32="" case "${host}" in *-*-mingw32) WIN32="yes" REGEX_LIBS="-lregex" SOCKET_LIBS="-lws2_32" EXTRA_LIBS="-lole32" # its required to libtool generate .dlls on win32 using mingw LDFLAGS="$LDFLAGS -no-undefined" AC_DEFINE(HAVE_GETDOMAINNAME,,[windows has gethostname method]) AC_CHECK_PROGS(WINDRES, windres.exe ${host}-windres) ;; *solaris*) SOCKET_LIBS="-lsocket" CFLAGS="$CFLAGS -D_POSIX_PTHREAD_SEMANTICS" CXXFLAGS="$CXXFLAGS -D_POSIX_PTHREAD_SEMANTICS" ;; *) AC_DEFINE(HAVE_GETDOMAINNAME,,[let's suppose the unix-alike (except solaris) has gethostname method]) ;; esac AC_SUBST(EXTRA_LIBS) AC_SUBST(REGEX_LIBS) AC_SUBST(SOCKET_LIBS) AM_CONDITIONAL([WIN32], [ test "x$WIN32" = "xyes" ]) case " $LDFLAGS " in " -Wl,--no-undefined ") ;; " -Wl,-no-undefined ") ;; " -Wl,-z -Wl,defs ") ;; " -Wl,-z,defs ") ;; *) case "${host}" in *darwin*);; *solaris*);; *) LDFLAGS="$LDFLAGS -Wl,--no-undefined" ;; esac ;; esac AC_CHECK_PROGS(PDFLATEX, pdflatex) AC_CHECK_PROGS(DOXYGEN, doxygen) AC_CHECK_PROGS(DOT, dot) # Check if user asks to skip documentation build AC_ARG_ENABLE(doc, AC_HELP_STRING([--disable-doc], [disable building documentation (requires doxygen and pdflatex)]),[enables_doc=$enableval],[]) #if test "x$enables_doc" = "xyes"; then # There is no point disabling docs due to missing tools since the pdf # files are both in svn and in the dist tarball # if test "x$PDFLATEX" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing pdflatex - documentation won't be built]) # elif test "x$DOXYGEN" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing doxygen - documentation won't be built]) # elif test "x$DOT" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing dot - documentation won't be built]) # fi #fi AC_MSG_NOTICE([Documentation enabled: $enables_doc]) AM_CONDITIONAL([DOC_ENABLED],[test "x$enables_doc" = "xyes"]) AM_CONDITIONAL([PYDOXYGEN],[test -f python/python/arc/index.xml -o "x$DOXYGEN" != "x"]) AM_CONDITIONAL([ALTPYDOXYGEN],[test -f python/altpython/arc/index.xml -o "x$DOXYGEN" != "x"]) # Check for explicitly and implicitely disabled services if test "x$WIN32" = "xyes" ; then AC_MSG_NOTICE([In WIN32 environment many (all) services are not supported yet]) fi # A-Rex AC_ARG_ENABLE(a_rex_service, AC_HELP_STRING([--disable-a-rex-service], [disable building A-Rex service]), [enables_a_rex_service=$enableval],[]) if test "$enables_a_rex_service" = "yes"; then if test "x$WIN32" = "xyes" ; then AC_MSG_NOTICE([A-Rex can't be built for WIN32 environment - disabling]) enables_a_rex_service="no" elif test "x$DBCXX_LIBS" = "x" ; then AC_MSG_NOTICE([A-Rex can't be built without C++ API for DB4.x - disabling]) enables_a_rex_service="no" fi fi AC_MSG_NOTICE([A-Rex service enabled: $enables_a_rex_service]) AM_CONDITIONAL([A_REX_SERVICE_ENABLED],[test "x$enables_a_rex_service" = "xyes"]) # Gridftpd AC_ARG_ENABLE(gridftpd_service, AC_HELP_STRING([--disable-gridftpd-service], [disable building Gridftpd service]), [enables_gridftpd_service=$enableval],[]) if test "$enables_gridftpd_service" = "yes"; then if test "x$WIN32" = "xyes" ; then AC_MSG_NOTICE([Gridftpd can not be built for WIN32 environment - disabling]) enables_gridftpd_service="no" fi gridftpd_service_globus_pkgs="globus-common globus-io globus-gsi-credential globus-openssl-module globus-ftp-control" gridftpd_service_globus_pkgs_missing="" for pkg in $gridftpd_service_globus_pkgs do var=`echo '$'$pkg|tr '[\-a-z]' '[_A-Z]'|sed 's/$/_VERSION/'` if test -z "`eval echo $var`" then gridftpd_service_globus_pkgs_missing="$gridftpd_service_globus_pkgs_missing $pkg" fi done if test -n "$gridftpd_service_globus_pkgs_missing" ; then AC_MSG_NOTICE([GridFTP service can not be built (missing development packages for$gridftpd_service_globus_pkgs_missing) - disabling]) enables_gridftpd_service="no" fi #check for struct statfs AC_CHECK_FUNCS([fstatfs]) AC_CHECK_HEADERS([sys/param.h sys/statfs.h sys/mount.h sys/vfs.h])dnl AC_CHECK_MEMBERS([struct statfs.f_type],,, [$ac_includes_default #if HAVE_SYS_STATFS_H #include #endif #if HAVE_SYS_MOUNT_H #include #endif #if HAVE_SYS_VFS_H #include #endif]) fi AC_MSG_NOTICE([Gridftpd service enabled: $enables_gridftpd_service]) AM_CONDITIONAL([GRIDFTPD_SERVICE_ENABLED],[test "x$enables_gridftpd_service" = "xyes"]) # LDAP service AC_ARG_ENABLE(ldap_service, AC_HELP_STRING([--disable-ldap-service], [disable building LDAP Infosystem Service]), [enables_ldap_service=$enableval],[]) if test "$enables_ldap_service" = "yes"; then if test "x$WIN32" = "xyes" ; then AC_MSG_NOTICE([LDAP infosystem can't be built for WIN32 environment - disabling]) enables_ldap_service="no" fi fi AC_MSG_NOTICE([LDAP Infosystem service enabled: $enables_ldap_service]) AM_CONDITIONAL([LDAP_SERVICE_ENABLED],[test "x$enables_ldap_service" = "xyes"]) # GIIS service AC_ARG_ENABLE(giis_service, AC_HELP_STRING([--disable-giis-service], [disable building GIIS Service]), [enables_giis_service=$enableval],[]) if test "$enables_giis_service" = "yes"; then if test "x$WIN32" = "xyes" ; then AC_MSG_NOTICE([GIIS can't be built for WIN32 environment - disabling]) enables_giis_service="no" fi AC_CHECK_HEADERS(lber.h, [],[enables_giis_service=no]) AC_CHECK_HEADERS(ldap_features.h, [],[enables_giis_service=no]) fi if test "$enables_ldap_service" = "no"; then if test "$enables_giis_service" = "yes"; then enables_giis_service="no" AC_MSG_NOTICE([WARNING: GIIS service can't be enabled without LDAP Infosys, disabling GIIS]) fi fi AC_MSG_NOTICE([GIIS service enabled: $enables_giis_service]) AM_CONDITIONAL([GIIS_SERVICE_ENABLED],[test "x$enables_giis_service" = "xyes"]) # LDAP monitor AC_ARG_ENABLE(ldap_monitor, AC_HELP_STRING([--disable-ldap-monitor], [disable building LDAP Monitor]), [enables_ldap_monitor=$enableval],[]) AC_MSG_NOTICE([LDAP Monitor enabled: $enables_ldap_monitor]) AM_CONDITIONAL([LDAP_MONITOR_ENABLED],[test "x$enables_ldap_monitor" = "xyes"]) # WS monitor AC_ARG_ENABLE(ws_monitor, AC_HELP_STRING([--disable-ws-monitor], [disable building WS Monitor]), [enables_ws_monitor=$enableval],[]) AC_MSG_NOTICE([WS Monitor enabled: $enables_ws_monitor]) AM_CONDITIONAL([WS_MONITOR_ENABLED],[test "x$enables_ws_monitor" = "xyes"]) # Cache service AC_ARG_ENABLE(cache_service, AC_HELP_STRING([--disable-cache-service], [disable building cache service]), [enables_cache_service=$enableval],[]) if test "$enables_cache_service" = "yes"; then if test "x$WIN32" = "xyes" ; then enables_cache_service="no" AC_MSG_NOTICE([Cache service can't be built for WIN32 environment - disabling]) elif test ! "x$enables_a_rex_service" = "xyes" ; then enables_cache_service="no" AC_MSG_NOTICE([Cache service can't be built without A-REX - disabling]) fi fi AC_MSG_NOTICE([Cache service enabled: $enables_cache_service]) AM_CONDITIONAL([CACHE_SERVICE_ENABLED],[test "x$enables_cache_service" = "xyes"]) AM_CONDITIONAL([CACHE_WEBSERVICE_ENABLED],[test "x$enables_cache_service" = "xyes"]) # DataDelivery service AC_ARG_ENABLE(datadelivery_service, AC_HELP_STRING([--disable-datadelivery-service], [disable building DataDelivery service]), [enables_datadelivery_service=$enableval],[]) if test "$enables_datadelivery_service" = "yes"; then if test "x$WIN32" = "xyes" ; then enables_datadelivery_service="no" AC_MSG_NOTICE([DataDelivery service can't be built for WIN32 environment - disabling]) fi fi AC_MSG_NOTICE([DataDelivery service enabled: $enables_datadelivery_service]) AM_CONDITIONAL([DATADELIVERY_SERVICE_ENABLED],[test "x$enables_datadelivery_service" = "xyes"]) # ACIX service AC_ARG_ENABLE(acix, AC_HELP_STRING([--disable-acix], [disable building ACIX service]), [enables_acix=$enableval],[]) dnl AM_PATH_PYTHON([2.4], [], [enables_acix="no"]) if test "x$PYTHON_VERSION" = "x2.2" -o "x$PYTHON_VERSION" = "x2.3" then enables_acix="no" fi AC_MSG_NOTICE([ACIX enabled: $enables_acix]) AM_CONDITIONAL([ACIX_ENABLED],[test "x$enables_acix" = "xyes"]) # trial command (from python-twisted-core) is used for acix unittests AC_CHECK_PROGS(TRIAL, trial) if test "x$TRIAL" = "x"; then AC_MSG_NOTICE([trial not found - ACIX unit tests will be skipped]) fi # unit tests also require python >=2.6 AM_CONDITIONAL([ACIX_TESTS_ENABLED], [test "x$TRIAL" != "x" && test "x$PYTHON_VERSION" != "x2.4" && test "x$PYTHON_VERSION" != "x2.5"]) # Check for explicitly and implicitely disabled clients AC_ARG_ENABLE(compute_client, AC_HELP_STRING([--disable-compute-client], [disable building compute (job management) client tools]), [enables_compute_client=$enableval],[]) AC_MSG_NOTICE([Compute client tools enabled: $enables_compute_client]) AM_CONDITIONAL([COMPUTE_CLIENT_ENABLED],[test "x$enables_compute_client" = "xyes"]) AC_ARG_ENABLE(credentials_client, AC_HELP_STRING([--disable-credentials-client], [disable building client tools for handling X.509 credentials]), [enables_credentials_client=$enableval],[]) AC_MSG_NOTICE([Credentials client tools enabled: $enables_credentials_client]) AM_CONDITIONAL([CREDENTIALS_CLIENT_ENABLED],[test "x$enables_credentials_client" = "xyes"]) AC_ARG_ENABLE(echo_client, AC_HELP_STRING([--disable-echo-client], [disable building client tools for communicationg with Echo service]), [enables_echo_client=$enableval],[]) AC_MSG_NOTICE([Echo client tool enabled: $enables_echo_client]) AM_CONDITIONAL([ECHO_CLIENT_ENABLED],[test "x$enables_echo_client" = "xyes"]) AC_ARG_ENABLE(data_client, AC_HELP_STRING([--disable-data-client], [disable building generic client tools for handling data]), [enables_data_client=$enableval],[]) AC_MSG_NOTICE([Data client tools enabled: $enables_data_client]) AM_CONDITIONAL([DATA_CLIENT_ENABLED],[test "x$enables_data_client" = "xyes"]) AC_ARG_ENABLE(jura_client, AC_HELP_STRING([--disable-jura-client], [disable building client tool for communicating JURA]), [enables_jura_client=$enableval],[]) AC_MSG_NOTICE([JURA client tool enabled: $enables_jura_client]) AM_CONDITIONAL([JURA_CLIENT_ENABLED],[test "x$enables_jura_client" = "xyes"]) AC_ARG_ENABLE(saml_client, AC_HELP_STRING([--disable-saml-client], [disable building client tool for communicating SAML-based VOMS service]), [enables_saml_client=$enableval],[]) if test "$enables_saml_client" = "yes"; then if test "$enables_xmlsec1" != "yes" ; then enables_saml_client="no" AC_MSG_NOTICE([SAML client requires xmlsec1 - disabling]) fi fi AC_MSG_NOTICE([SAML client tool enabled: $enables_saml_client]) AM_CONDITIONAL([SAML_CLIENT_ENABLED],[test "x$enables_saml_client" = "xyes"]) AC_ARG_ENABLE(wsrf_client, AC_HELP_STRING([--disable-wsrf-client], [disable building client tools for querying WSRF-enabled services.]), [enables_wsrf_client=$enableval],[]) AC_MSG_NOTICE([WSRF client tool enabled: $enables_wsrf_client]) AM_CONDITIONAL([WSRF_CLIENT_ENABLED],[test "x$enables_wsrf_client" = "xyes"]) AC_ARG_ENABLE(unicore_client, AC_HELP_STRING([--enable-unicore-client], [enables building UNICORE-related plugins (not affected by --enable-all).]), [enables_unicore_client=$enableval],[]) AC_MSG_NOTICE([UNICORE plugin(s) enabled: $enables_unicore_client]) AM_CONDITIONAL([UNICORE_ENABLED],[test "x$enables_unicore_client" = "xyes"]) AC_ARG_ENABLE(emies_client, AC_HELP_STRING([--disable-emies-client], [disables building EMI ES-related client plugins.]), [enables_emies_client=$enableval],[]) AC_MSG_NOTICE([EMI ES plugin(s) enabled: $enables_emies_client]) AM_CONDITIONAL([EMIES_ENABLED],[test "x$enables_emies_client" = "xyes"]) # Check for consistency among disabled components if test "$enables_hed" = "no"; then if test "$enables_a_rex_service" = "yes" -o \ "$enables_cache_service" = "yes" -o \ "$enables_datadelivery_service" = "yes" -o \ "$enables_compute_client" = "yes" -o \ "$enables_credentials_client" = "yes" -o \ "$enables_echo_client" = "yes" -o \ "$enables_data_client" = "yes" -o \ "$enables_jura_client" = "yes" -o \ "$enables_saml_client" = "yes" -o \ "$enables_wsrf_client" = "yes" -o \ ; then AC_MSG_ERROR(HED is needed for building any of the client or service tools. Please enable HED by using --enable-hed.) fi fi AM_CONDITIONAL([HED_ENABLED],[test "x$enables_hed" = "xyes"]) # A-Rex specific hack for backend scripts pbs_bin_path=/usr/bin pbs_log_path=/var/spool/pbs/server_logs tmp_dir=/tmp gnu_time=/usr/bin/time case "${host}" in *solaris* | *darwin* ) # hostname -f does not work on Solaris, OS X nodename="hostname" ;; *) nodename="/bin/hostname -f" ;; esac arc_location=$prefix AC_SUBST(arc_location) AC_SUBST(pbs_bin_path) AC_SUBST(pbs_log_path) AC_SUBST(tmp_dir) AC_SUBST(gnu_time) AC_SUBST(nodename) # Shell for the job control scripts case $host_os in solaris* ) posix_shell='/usr/xpg4/bin/sh' ;; * ) posix_shell='/bin/sh' ;; esac AC_SUBST(posix_shell) DATE=`date +%Y-%m-%d` AC_SUBST(DATE) #DATER=`date -R` DATER=`date +'%a, %d %b %Y %H:%M:%S %z'` AC_SUBST(DATER) SPECDATE=`LANG=C date +"%a %b %d %Y"` AC_SUBST(SPECDATE) AH_BOTTOM([#ifdef WIN32 #include #endif]) AC_CONFIG_FILES([Makefile src/Makefile src/external/Makefile src/external/cJSON/Makefile src/hed/Makefile src/hed/libs/compute/Makefile src/hed/libs/compute/test/Makefile src/hed/libs/compute/examples/Makefile src/hed/libs/common/Makefile src/hed/libs/common/test/Makefile src/hed/libs/communication/Makefile src/hed/libs/credential/Makefile src/hed/libs/credential/test/Makefile src/hed/libs/credentialmod/Makefile src/hed/libs/crypto/Makefile src/hed/libs/cryptomod/Makefile src/hed/libs/data/Makefile src/hed/libs/data/cache-clean src/hed/libs/data/cache-clean.1 src/hed/libs/data/cache-list src/hed/libs/data/cache-list.1 src/hed/libs/data/test/Makefile src/hed/libs/data/examples/Makefile src/hed/libs/Makefile src/hed/libs/loader/Makefile src/hed/libs/loader/schema/Makefile src/hed/libs/loader/test/Makefile src/hed/libs/message/Makefile src/hed/libs/message/test/Makefile src/hed/libs/security/Makefile src/hed/libs/security/ArcPDP/Makefile src/hed/libs/security/ArcPDP/attr/Makefile src/hed/libs/security/ArcPDP/policy/Makefile src/hed/libs/security/ArcPDP/alg/Makefile src/hed/libs/security/ArcPDP/fn/Makefile src/hed/libs/credentialstore/Makefile src/hed/libs/wsrf/Makefile src/hed/libs/ws-addressing/Makefile src/hed/libs/ws-security/Makefile src/hed/libs/ws-security/test/Makefile src/hed/libs/infosys/Makefile src/hed/libs/infosys/schema/Makefile src/hed/libs/infosys/test/Makefile src/hed/libs/delegation/Makefile src/hed/libs/delegation/test/Makefile src/hed/libs/ws/Makefile src/hed/libs/xmlsec/Makefile src/hed/libs/globusutils/Makefile src/hed/daemon/Makefile src/hed/daemon/scripts/Makefile src/hed/daemon/schema/Makefile src/hed/daemon/unix/Makefile src/hed/daemon/win32/Makefile src/hed/mcc/Makefile src/hed/mcc/soap/Makefile src/hed/mcc/tcp/Makefile src/hed/mcc/tcp/schema/Makefile src/hed/mcc/http/Makefile src/hed/mcc/http/schema/Makefile src/hed/mcc/tls/Makefile src/hed/mcc/tls/schema/Makefile src/hed/mcc/msgvalidator/Makefile src/hed/mcc/msgvalidator/schema/Makefile src/hed/acc/Makefile src/hed/acc/ARC0/Makefile src/hed/acc/ARC1/Makefile src/hed/acc/ARC1/test/Makefile src/hed/acc/EMIES/Makefile src/hed/acc/EMIES/arcemiestest.1 src/hed/acc/EMIES/schema/Makefile src/hed/acc/CREAM/Makefile src/hed/acc/UNICORE/Makefile src/hed/acc/Broker/Makefile src/hed/acc/Broker/test/Makefile src/hed/acc/PythonBroker/Makefile src/hed/acc/JobDescriptionParser/Makefile src/hed/acc/JobDescriptionParser/test/Makefile src/hed/acc/SER/Makefile src/hed/acc/ldap/Makefile src/hed/acc/TEST/Makefile src/hed/dmc/Makefile src/hed/dmc/file/Makefile src/hed/dmc/gridftp/Makefile src/hed/dmc/http/Makefile src/hed/dmc/ldap/Makefile src/hed/dmc/srm/Makefile src/hed/dmc/srm/srmclient/Makefile src/hed/dmc/gfal/Makefile src/hed/dmc/xrootd/Makefile src/hed/dmc/mock/Makefile src/hed/dmc/acix/Makefile src/hed/dmc/rucio/Makefile src/hed/dmc/s3/Makefile src/hed/profiles/general/general.xml src/hed/shc/Makefile src/hed/shc/arcpdp/Makefile src/hed/shc/arcpdp/schema/Makefile src/hed/shc/xacmlpdp/Makefile src/hed/shc/xacmlpdp/schema/Makefile src/hed/shc/delegationpdp/Makefile src/hed/shc/delegationpdp/schema/Makefile src/hed/shc/gaclpdp/Makefile src/hed/shc/pdpserviceinvoker/Makefile src/hed/shc/pdpserviceinvoker/schema/Makefile src/hed/shc/allowpdp/Makefile src/hed/shc/denypdp/Makefile src/hed/shc/simplelistpdp/Makefile src/hed/shc/simplelistpdp/schema/Makefile src/hed/shc/arcauthzsh/Makefile src/hed/shc/arcauthzsh/schema/Makefile src/hed/shc/usernametokensh/Makefile src/hed/shc/usernametokensh/schema/Makefile src/hed/shc/x509tokensh/Makefile src/hed/shc/x509tokensh/schema/Makefile src/hed/shc/samltokensh/Makefile src/hed/shc/samltokensh/schema/Makefile src/hed/shc/saml2sso_assertionconsumersh/Makefile src/hed/shc/delegationsh/Makefile src/hed/shc/delegationsh/schema/Makefile src/hed/shc/legacy/Makefile src/hed/shc/legacy/schema/Makefile src/hed/identitymap/Makefile src/hed/identitymap/schema/Makefile src/libs/Makefile src/libs/data-staging/Makefile src/libs/data-staging/test/Makefile src/libs/data-staging/examples/Makefile src/services/Makefile src/services/a-rex/Makefile src/services/a-rex/a-rex src/services/a-rex/a-rex.service src/services/a-rex/a-rex-start src/services/a-rex/a-rex-backtrace-collect src/services/a-rex/a-rex-backtrace-collect.8 src/services/a-rex/perferator src/services/a-rex/grid-manager/arc-vomsac-check.8 src/services/a-rex/grid-manager/arc-blahp-logger.8 src/services/a-rex/grid-manager/gm-jobs.8 src/services/a-rex/grid-manager/gm-delegations-converter.8 src/services/a-rex/delegation/Makefile src/services/a-rex/grid-manager/Makefile src/services/a-rex/grid-manager/conf/Makefile src/services/a-rex/grid-manager/files/Makefile src/services/a-rex/grid-manager/jobs/Makefile src/services/a-rex/grid-manager/jobplugin/Makefile src/services/a-rex/grid-manager/loaders/Makefile src/services/a-rex/grid-manager/log/Makefile src/services/a-rex/grid-manager/mail/Makefile src/services/a-rex/grid-manager/misc/Makefile src/services/a-rex/grid-manager/run/Makefile src/services/a-rex/grid-manager/arc-config-check.1 src/services/a-rex/infoproviders/Makefile src/services/a-rex/infoproviders/CEinfo.pl src/services/a-rex/infoproviders/PerfData.pl src/services/a-rex/infoproviders/test/Makefile src/services/a-rex/jura/Makefile src/services/a-rex/jura/jura.1 src/services/a-rex/jura/ssm/Makefile src/services/a-rex/ldif/Makefile src/services/a-rex/lrms/Makefile src/services/a-rex/lrms/submit_common.sh src/services/a-rex/lrms/scan_common.sh src/services/a-rex/lrms/condor/Makefile src/services/a-rex/lrms/condor/scan-condor-job src/services/a-rex/lrms/condor/cancel-condor-job src/services/a-rex/lrms/condor/submit-condor-job src/services/a-rex/lrms/fork/Makefile src/services/a-rex/lrms/fork/scan-fork-job src/services/a-rex/lrms/fork/submit-fork-job src/services/a-rex/lrms/fork/cancel-fork-job src/services/a-rex/lrms/ll/Makefile src/services/a-rex/lrms/ll/submit-ll-job src/services/a-rex/lrms/ll/cancel-ll-job src/services/a-rex/lrms/ll/scan-ll-job src/services/a-rex/lrms/lsf/Makefile src/services/a-rex/lrms/lsf/submit-lsf-job src/services/a-rex/lrms/lsf/cancel-lsf-job src/services/a-rex/lrms/lsf/scan-lsf-job src/services/a-rex/lrms/pbs/Makefile src/services/a-rex/lrms/pbs/submit-pbs-job src/services/a-rex/lrms/pbs/cancel-pbs-job src/services/a-rex/lrms/pbs/scan-pbs-job src/services/a-rex/lrms/pbs/configure-pbs-env.sh src/services/a-rex/lrms/sge/Makefile src/services/a-rex/lrms/sge/submit-sge-job src/services/a-rex/lrms/sge/scan-sge-job src/services/a-rex/lrms/sge/cancel-sge-job src/services/a-rex/lrms/slurm/Makefile src/services/a-rex/lrms/slurm/submit-SLURM-job src/services/a-rex/lrms/slurm/scan-SLURM-job src/services/a-rex/lrms/slurm/cancel-SLURM-job src/services/a-rex/lrms/dgbridge/Makefile src/services/a-rex/lrms/dgbridge/submit-DGBridge-job src/services/a-rex/lrms/dgbridge/scan-DGBridge-job src/services/a-rex/lrms/dgbridge/cancel-DGBridge-job src/services/a-rex/lrms/boinc/Makefile src/services/a-rex/lrms/boinc/submit-boinc-job src/services/a-rex/lrms/boinc/scan-boinc-job src/services/a-rex/lrms/boinc/cancel-boinc-job src/services/a-rex/schema/Makefile src/services/acix/Makefile src/services/acix/cacheserver/Makefile src/services/acix/cacheserver/acix-cache.service src/services/acix/cacheserver/test/Makefile src/services/acix/core/Makefile src/services/acix/core/test/Makefile src/services/acix/indexserver/Makefile src/services/acix/indexserver/acix-index.service src/services/acix/indexserver/test/Makefile src/services/cache_service/Makefile src/services/cache_service/arc-cache-service src/services/cache_service/arc-cache-service.service src/services/cache_service/arc-cache-service-start src/services/data-staging/Makefile src/services/data-staging/arc-datadelivery-service src/services/data-staging/arc-datadelivery-service.service src/services/data-staging/arc-datadelivery-service-start src/services/gridftpd/Makefile src/services/gridftpd/gridftpd.init src/services/gridftpd/gridftpd.service src/services/gridftpd/gridftpd-start src/services/gridftpd/gridftpd.8 src/services/gridftpd/auth/Makefile src/services/gridftpd/conf/Makefile src/services/gridftpd/misc/Makefile src/services/gridftpd/run/Makefile src/services/gridftpd/fileplugin/Makefile src/services/ldap-infosys/Makefile src/services/ldap-infosys/create-bdii-config src/services/ldap-infosys/create-inforeg-config src/services/ldap-infosys/create-slapd-config src/services/ldap-infosys/nordugrid-arc-bdii src/services/ldap-infosys/nordugrid-arc-egiis src/services/ldap-infosys/nordugrid-arc-inforeg src/services/ldap-infosys/nordugrid-arc-ldap-infosys src/services/ldap-infosys/nordugrid-arc-slapd src/services/ldap-infosys/giis/Makefile src/services/ldap-infosys/giis/arc-infoindex-relay.8 src/services/ldap-infosys/giis/arc-infoindex-server.8 src/services/ldap-monitor/Makefile src/services/ldap-monitor/ldap-monitor src/services/ldap-monitor/README src/services/ldap-monitor/man/Makefile src/services/ldap-monitor/man/ldap-monitor.7 src/services/ldap-monitor/includes/Makefile src/services/ldap-monitor/mon-icons/Makefile src/services/ldap-monitor/lang/Makefile src/services/ws-monitor/Makefile src/services/ws-monitor/ws-monitor src/services/ws-monitor/README src/services/ws-monitor/man/Makefile src/services/ws-monitor/man/ws-monitor.7 src/services/ws-monitor/includes/Makefile src/services/ws-monitor/mon-icons/Makefile src/services/ws-monitor/lang/Makefile src/services/examples/Makefile src/services/examples/echo_java/Makefile src/services/examples/echo_python/Makefile src/services/wrappers/Makefile src/services/wrappers/java/Makefile src/services/wrappers/java/schema/Makefile src/services/wrappers/python/Makefile src/services/wrappers/python/schema/Makefile src/clients/Makefile src/clients/data/Makefile src/clients/data/arccp.1 src/clients/data/arcls.1 src/clients/data/arcrm.1 src/clients/data/arcmkdir.1 src/clients/data/arcrename.1 src/clients/echo/Makefile src/clients/echo/arcecho.1 src/clients/credentials/Makefile src/clients/credentials/arcproxy.1 src/clients/saml/Makefile src/clients/saml/saml_assertion_init.1 src/clients/compute/Makefile src/clients/compute/arcstat.1 src/clients/compute/arcinfo.1 src/clients/compute/arcsub.1 src/clients/compute/arcclean.1 src/clients/compute/arckill.1 src/clients/compute/arcget.1 src/clients/compute/arccat.1 src/clients/compute/arcresub.1 src/clients/compute/arcsync.1 src/clients/compute/arcrenew.1 src/clients/compute/arcresume.1 src/clients/compute/arctest.1 src/clients/wsrf/arcwsrf.1 src/clients/wsrf/Makefile src/tests/Makefile src/tests/echo/Makefile src/tests/echo/perftest.1 src/tests/echo/echo_service.xml.example src/tests/echo/schema/Makefile src/tests/policy-delegation/Makefile src/tests/delegation/Makefile src/tests/translator/Makefile src/tests/xpath/Makefile src/tests/arcpolicy/Makefile src/tests/perf/Makefile src/tests/perf/arcperftest.1 src/tests/client/Makefile src/utils/hed/wsdl2hed.1 src/utils/hed/arcplugin.1 src/utils/hed/Makefile src/utils/gridmap/nordugridmap.cron src/utils/gridmap/nordugridmap.8 src/utils/gridmap/Makefile src/utils/Makefile src/doc/Makefile src/doc/arc.conf.5 swig/Makefile java/Makefile java/test/Makefile java/test/strip_test_file_name_and_run_junit java/examples/Makefile python/Makefile python/Doxyfile.api python/python/Makefile python/python/arc/Makefile python/altpython/Makefile python/altpython/arc/Makefile python/test/Makefile python/test/python/Makefile python/test/altpython/Makefile python/examples/Makefile po/Makefile.in include/Makefile debian/Makefile debian/changelog.deb nordugrid-arc.spec mingw-nordugrid-arc.spec src/hed/daemon/arched.8 src/hed/daemon/scripts/arched src/hed/daemon/scripts/arched.service src/hed/daemon/scripts/arched-start arcbase.pc nsis/Makefile nsis/arc.nsis src/doxygen/Makefile ]) AC_OUTPUT AC_MSG_RESULT([ Unit testing: ${enables_cppunit} Java binding: ${enables_swig_java} Python binding: ${enables_swig_python} ($PYTHON_VERSION) Alt.Python binding: ${enables_altpython} ($ALTPYTHON_VERSION) Available third-party features: GridFTP: ${enables_gridftp} GFAL: ${enables_gfal} S3: ${enables_s3} Xrootd: ${enables_xrootd} MYSQL CLIENT LIB: ${enables_mysql} LDAP: ${enables_ldap} xmlsec1: ${enables_xmlsec1} ARGUS: ${enables_argus} NSS: ${enables_nss} CANL++: ${enables_canlxx} BDB C++: ${enables_dbcxx} SQLite: ${enables_sqlite} LDNS: ${enables_ldns} Enabled features: Local jobs info in BDB: ${enables_dbjstore} Included components: HED: ${enables_hed} A-REX service: ${enables_a_rex_service} GRIDFTPD service: ${enables_gridftpd_service} LDAP Info service: ${enables_ldap_service} GIIS service: ${enables_giis_service} CACHE service: ${enables_cache_service} DATADELIVERY service: ${enables_datadelivery_service} ACIX service: ${enables_acix} COMPUTE clients: ${enables_compute_client} DATA clients: ${enables_data_client} CREDENTIAL clients: ${enables_credentials_client} ECHO client: ${enables_echo_client} JURA client: ${enables_jura_client} SAML VOMS client: ${enables_saml_client} WSRF client: ${enables_wsrf_client} UNICORE client (ACC): ${enables_unicore_client} EMI ES client (ACC): ${enables_emies_client} SRM client (DMC): ${enables_srm_dmc} Documentation: ${enables_doc} Monitoring: LDAP Monitor ${enables_ldap_monitor} WS Monitor ${enables_ws_monitor} ]) nordugrid-arc-5.4.2/PaxHeaders.7502/AUTHORS0000644000000000000000000000012412504065715016252 xustar000000000000000027 mtime=1427139533.287816 27 atime=1513200574.158697 30 ctime=1513200658.611730507 nordugrid-arc-5.4.2/AUTHORS0000644000175000002070000000455312504065715016326 0ustar00mockbuildmock00000000000000 Individual contributors to the source code (2001-2014) ------------------------------------------------------ David Cameron Péter Dóbé Mattias Ellert Thomas Frågåt Ali Gholami Michael Glodek Jørgen Beck Hansen Henrik Thostrup Jensen Daniel Johansson Johan Jönemo Dmytro Karpenko Tamás Kazinczy Marek Kočan Aleksandr Konstantinov Balázs Kónya Hajo Nils Krabbenhöft Andrew Lahiff Juha Lento Peter Lundgaard Rosendahl Iván Márton Luca Mazzaferro Bjarte Mohn Steffen Möller Zsombor Nagy Aleksei Nazarov Jon Kerr Nilsen Markus Nordén Weizhong Qiang Gábor Rőczei Florido Paganelli Andrii Salnikov Martin Savko Martin Skou Andersen Oxana Smirnova Ferenc Szalai Gábor Szigeti Christian Ulrik Søttrup Adrian Taga Salman Zubair Toor Olli Tourunen Petter Urkedal Wenjing Wu Anders Wäänänen Thomas Zangerl Organisations employing contributors (2001-2014) ------------------------------------------------ University of Copenhagen (Denmark) NORDUnet - Nordic Infrastructure for Research and Education (Denmark) CSC - IT Center for Science Ltd (Finland) University of Lübeck (Germany) NIIFI - National Information Infrastructure Development Institute (Hungary) University of Oslo (Norway) NordForsk (Norway) Pavol Jozef Šafárik University in Košice (Slovakia) Linköping University (Sweden) Lund University (Sweden) Royal Institute of Technology (Sweden) Uppsala University (Sweden) Taras Shevchenko National University of Kyiv (Ukraine) nordugrid-arc-5.4.2/PaxHeaders.7502/LICENSE0000644000000000000000000000012412113212161016170 xustar000000000000000027 mtime=1361908849.952411 27 atime=1513200577.039732 30 ctime=1513200658.627730702 nordugrid-arc-5.4.2/LICENSE0000644000175000002070000002367612113212161016253 0ustar00mockbuildmock00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS nordugrid-arc-5.4.2/PaxHeaders.7502/java0000644000000000000000000000013213214316031016032 xustar000000000000000030 mtime=1513200665.051809271 30 atime=1513200668.716854096 30 ctime=1513200665.051809271 nordugrid-arc-5.4.2/java/0000755000175000002070000000000013214316031016155 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/java/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712222312540020146 xustar000000000000000027 mtime=1380554080.164649 30 atime=1513200592.060916565 30 ctime=1513200665.042809161 nordugrid-arc-5.4.2/java/Makefile.am0000644000175000002070000001020212222312540020203 0ustar00mockbuildmock00000000000000JAVA_PATH = nordugrid/arc JAVA_INTERFACE_SRC = EndpointConsumer.java JobConsumer.java ComputingServiceTypeConsumer.java JAVA_BUILD_PATH = $(top_builddir)/java/$(JAVA_PATH)/ JAVA_INTERFACE_SRC_MOVED = $(addprefix $(JAVA_BUILD_PATH), $(JAVA_INTERFACE_SRC)) jninative_LTLIBRARIES = libjarc.la jni_DATA = arc.jar if MACOSX SHREXT = -shrext .dylib else SHREXT = endif if JAVA_IS_15_OR_ABOVE SWIG_IS_JAVA_15_OR_ABOVE = -DJAVA_IS_15_OR_ABOVE else SWIG_IS_JAVA_15_OR_ABOVE = -DJAVA_IS_NOT_15_OR_ABOVE endif if DBJSTORE_ENABLED SWIG_IS_DBJSTORE_ENABLED = -DDBJSTORE_ENABLED else SWIG_IS_DBJSTORE_ENABLED = endif libjarc_la_SOURCES = arc_init.cpp arc_jni.h nodist_libjarc_la_SOURCES = arc_wrap.cpp libjarc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(JDK_CFLAGS) $(ZLIB_CFLAGS) $(DBCXX_CPPFLAGS) \ -fno-strict-aliasing $(AM_CXXFLAGS) libjarc_la_LIBADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(ZLIB_LIBS) $(DBCXX_LIBS) libjarc_la_LDFLAGS = -no-undefined -avoid-version -module $(SHREXT) SWIG_OUTPUTS = arc_wrap.cpp arc_wrap.h $(JAVA_PATH)/*.java CLEANFILES = $(SWIG_OUTPUTS) $(JAVA_PATH)/*.class arc.jar @AMDEP_TRUE@include ./$(DEPDIR)/arc_wrap.deps arc_wrap.cpp: $(top_srcdir)/swig/*.i mkdir -p $(DEPDIR) grep -h '^#' $(top_srcdir)/swig/*.i | \ $(CXXCOMPILE) $(libjarc_la_CXXFLAGS) -M -MT arc_wrap.cpp -MT $(JAVA_PATH)/arc.java -MP -MF "$(DEPDIR)/arc_wrap.deps" -x c++ - rm -rf $(JAVA_PATH) mkdir -p $(JAVA_PATH) $(SWIG) -v -c++ -java -nodefaultctor -nodefaultdtor -module arc \ -package nordugrid.arc -o arc_wrap.cpp \ -outdir $(JAVA_PATH) \ $(SWIG_IS_JAVA_15_OR_ABOVE) $(SWIG_IS_DBJSTORE_ENABLED) \ -I/usr/include -I$(top_srcdir)/include \ $(OPENSSL_CFLAGS) $(top_srcdir)/swig/Arc.i sed 's%#include %#include %' < \ arc_wrap.cpp > arc_wrap.cpp.tmp mv arc_wrap.cpp.tmp arc_wrap.cpp # When mapping a template with a template class argument no space is # inserted between the two right angle brackets. sed 's/>>(new/> >(new/g' arc_wrap.cpp > arc_wrap.cpp.tmp mv arc_wrap.cpp.tmp arc_wrap.cpp # When mapping a template with a template class argument no space is # inserted between the two right angle brackets. sed 's/>>(self->/> >(self->/g' arc_wrap.cpp > arc_wrap.cpp.tmp mv arc_wrap.cpp.tmp arc_wrap.cpp # When mapping a template with another template class as argument, and # that template class takes two classes as argument, then older swigs # put parentheses around the two class arguments, e.g. T<(A,B)>, not # valid syntax should be T instead. sed 's/<(\([,:[:alnum:]]*\))>/<\1>/g' arc_wrap.cpp > arc_wrap.cpp.tmp mv arc_wrap.cpp.tmp arc_wrap.cpp for javafile in `ls $(JAVA_PATH)/*.java` ; do \ echo >> $(DEPDIR)/arc_wrap.deps ; \ echo `sed s/.java$$/.class/ <<< $${javafile}`: $${javafile} \ >> $(DEPDIR)/arc_wrap.deps ; \ echo >> $(DEPDIR)/arc_wrap.deps ; \ echo arc.jar: `sed s/.java$$/.class/ <<< $${javafile}` \ >> $(DEPDIR)/arc_wrap.deps ; \ done $(JAVA_INTERFACE_SRC_MOVED): $(JAVA_BUILD_PATH)% : $(JAVA_INTERFACE_SRC) cp -p $(top_srcdir)/java/$* $@ $(JAVA_PATH)/*.java: arc_wrap.cpp $(JAVA_INTERFACE_SRC_MOVED) $(JAVA_PATH)/arc.class: $(JAVA_PATH)/arc.java $(JAVA_INTERFACE_SRC_MOVED) .java.class: $(JAVA_INTERFACE_SRC_MOVED) $(JAVAC) $(JAVAC_FLAGS) $< arc.jar: $(JAVA_PATH)/arc.class $(subst .java,.class,$(wildcard $(JAVA_PATH)/*.java)) rm -f $@ $(JAR) cf $@ $(JAVA_PATH)/*.class $(JAR_JFLAGS) DIST_SUBDIRS = test examples SUBDIRS = $(TEST_DIR) examples EXTRA_DIST = $(JAVA_INTERFACE_SRC) # Try not to build these objects in parallel in order to save memory .NOTPARALLEL: %.o %.class %.so nordugrid-arc-5.4.2/java/PaxHeaders.7502/arc_jni.h0000644000000000000000000000012411462265301017674 xustar000000000000000027 mtime=1288268481.804064 27 atime=1513200574.074696 30 ctime=1513200665.045809198 nordugrid-arc-5.4.2/java/arc_jni.h0000644000175000002070000000033211462265301017737 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #ifndef __JNI_INCLUDE_ARC__ #define __JNI_INCLUDE_ARC__ #ifdef HAVE_JNI_H #include #else #ifdef HAVE_JAVAVM_JNI_H #include #endif #endif #endif nordugrid-arc-5.4.2/java/PaxHeaders.7502/ComputingServiceTypeConsumer.java0000644000000000000000000000012412124602004024614 xustar000000000000000027 mtime=1364395012.873263 27 atime=1513200574.027696 30 ctime=1513200665.048809235 nordugrid-arc-5.4.2/java/ComputingServiceTypeConsumer.java0000644000175000002070000000017512124602004024664 0ustar00mockbuildmock00000000000000package nordugrid.arc; public interface ComputingServiceTypeConsumer { public void addEntity(ComputingServiceType e); } nordugrid-arc-5.4.2/java/PaxHeaders.7502/arc_init.cpp0000644000000000000000000000012411446704400020412 xustar000000000000000027 mtime=1285261568.493821 27 atime=1513200574.028696 30 ctime=1513200665.044809186 nordugrid-arc-5.4.2/java/arc_init.cpp0000644000175000002070000000163011446704400020457 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include __attribute__((constructor)) void __arc_init(void) { Glib::Module* module = NULL; std::string modulepath; #ifdef _MACOSX // According http://developer.apple.com/java/faq/development.html // java libraries have jnilib suffix on MacOSX modulepath = std::string("libjarc.")+".jnilib"; #else modulepath = std::string("libjarc.")+G_MODULE_SUFFIX; #endif #ifdef HAVE_GLIBMM_BIND_LOCAL module = new Glib::Module(modulepath,Glib::ModuleFlags(0)); #else module = new Glib::Module(modulepath); #endif if(module != NULL) { if(*module) return; delete module; }; Arc::Logger::getRootLogger().msg(Arc::WARNING,"Failed to export symbols of ARC java module"); } nordugrid-arc-5.4.2/java/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720020160 xustar000000000000000030 mtime=1513200592.227918607 30 atime=1513200652.667657808 30 ctime=1513200665.043809173 nordugrid-arc-5.4.2/java/Makefile.in0000644000175000002070000011046313214315720020233 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = java DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(jninativedir)" "$(DESTDIR)$(jnidir)" LTLIBRARIES = $(jninative_LTLIBRARIES) am__DEPENDENCIES_1 = libjarc_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libjarc_la_OBJECTS = libjarc_la-arc_init.lo nodist_libjarc_la_OBJECTS = libjarc_la-arc_wrap.lo libjarc_la_OBJECTS = $(am_libjarc_la_OBJECTS) \ $(nodist_libjarc_la_OBJECTS) libjarc_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libjarc_la_CXXFLAGS) \ $(CXXFLAGS) $(libjarc_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libjarc_la_SOURCES) $(nodist_libjarc_la_SOURCES) DIST_SOURCES = $(libjarc_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive DATA = $(jni_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ JAVA_PATH = nordugrid/arc JAVA_INTERFACE_SRC = EndpointConsumer.java JobConsumer.java ComputingServiceTypeConsumer.java JAVA_BUILD_PATH = $(top_builddir)/java/$(JAVA_PATH)/ JAVA_INTERFACE_SRC_MOVED = $(addprefix $(JAVA_BUILD_PATH), $(JAVA_INTERFACE_SRC)) jninative_LTLIBRARIES = libjarc.la jni_DATA = arc.jar @MACOSX_FALSE@SHREXT = @MACOSX_TRUE@SHREXT = -shrext .dylib @JAVA_IS_15_OR_ABOVE_FALSE@SWIG_IS_JAVA_15_OR_ABOVE = -DJAVA_IS_NOT_15_OR_ABOVE @JAVA_IS_15_OR_ABOVE_TRUE@SWIG_IS_JAVA_15_OR_ABOVE = -DJAVA_IS_15_OR_ABOVE @DBJSTORE_ENABLED_FALSE@SWIG_IS_DBJSTORE_ENABLED = @DBJSTORE_ENABLED_TRUE@SWIG_IS_DBJSTORE_ENABLED = -DDBJSTORE_ENABLED libjarc_la_SOURCES = arc_init.cpp arc_jni.h nodist_libjarc_la_SOURCES = arc_wrap.cpp libjarc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(JDK_CFLAGS) $(ZLIB_CFLAGS) $(DBCXX_CPPFLAGS) \ -fno-strict-aliasing $(AM_CXXFLAGS) libjarc_la_LIBADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(ZLIB_LIBS) $(DBCXX_LIBS) libjarc_la_LDFLAGS = -no-undefined -avoid-version -module $(SHREXT) SWIG_OUTPUTS = arc_wrap.cpp arc_wrap.h $(JAVA_PATH)/*.java CLEANFILES = $(SWIG_OUTPUTS) $(JAVA_PATH)/*.class arc.jar DIST_SUBDIRS = test examples SUBDIRS = $(TEST_DIR) examples EXTRA_DIST = $(JAVA_INTERFACE_SRC) all: all-recursive .SUFFIXES: .SUFFIXES: .class .cpp .java .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign java/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign java/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-jninativeLTLIBRARIES: $(jninative_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(jninativedir)" || $(MKDIR_P) "$(DESTDIR)$(jninativedir)" @list='$(jninative_LTLIBRARIES)'; test -n "$(jninativedir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(jninativedir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(jninativedir)"; \ } uninstall-jninativeLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(jninative_LTLIBRARIES)'; test -n "$(jninativedir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(jninativedir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(jninativedir)/$$f"; \ done clean-jninativeLTLIBRARIES: -test -z "$(jninative_LTLIBRARIES)" || rm -f $(jninative_LTLIBRARIES) @list='$(jninative_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libjarc.la: $(libjarc_la_OBJECTS) $(libjarc_la_DEPENDENCIES) $(libjarc_la_LINK) -rpath $(jninativedir) $(libjarc_la_OBJECTS) $(libjarc_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjarc_la-arc_init.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjarc_la-arc_wrap.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libjarc_la-arc_init.lo: arc_init.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjarc_la_CXXFLAGS) $(CXXFLAGS) -MT libjarc_la-arc_init.lo -MD -MP -MF $(DEPDIR)/libjarc_la-arc_init.Tpo -c -o libjarc_la-arc_init.lo `test -f 'arc_init.cpp' || echo '$(srcdir)/'`arc_init.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjarc_la-arc_init.Tpo $(DEPDIR)/libjarc_la-arc_init.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_init.cpp' object='libjarc_la-arc_init.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjarc_la_CXXFLAGS) $(CXXFLAGS) -c -o libjarc_la-arc_init.lo `test -f 'arc_init.cpp' || echo '$(srcdir)/'`arc_init.cpp libjarc_la-arc_wrap.lo: arc_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjarc_la_CXXFLAGS) $(CXXFLAGS) -MT libjarc_la-arc_wrap.lo -MD -MP -MF $(DEPDIR)/libjarc_la-arc_wrap.Tpo -c -o libjarc_la-arc_wrap.lo `test -f 'arc_wrap.cpp' || echo '$(srcdir)/'`arc_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjarc_la-arc_wrap.Tpo $(DEPDIR)/libjarc_la-arc_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_wrap.cpp' object='libjarc_la-arc_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjarc_la_CXXFLAGS) $(CXXFLAGS) -c -o libjarc_la-arc_wrap.lo `test -f 'arc_wrap.cpp' || echo '$(srcdir)/'`arc_wrap.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-jniDATA: $(jni_DATA) @$(NORMAL_INSTALL) test -z "$(jnidir)" || $(MKDIR_P) "$(DESTDIR)$(jnidir)" @list='$(jni_DATA)'; test -n "$(jnidir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(jnidir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(jnidir)" || exit $$?; \ done uninstall-jniDATA: @$(NORMAL_UNINSTALL) @list='$(jni_DATA)'; test -n "$(jnidir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(jnidir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(jnidir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(jninativedir)" "$(DESTDIR)$(jnidir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-jninativeLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-jniDATA install-jninativeLTLIBRARIES install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-jniDATA uninstall-jninativeLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-jninativeLTLIBRARIES clean-libtool ctags ctags-recursive \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-jniDATA install-jninativeLTLIBRARIES \ install-man install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am uninstall-jniDATA \ uninstall-jninativeLTLIBRARIES @AMDEP_TRUE@include ./$(DEPDIR)/arc_wrap.deps arc_wrap.cpp: $(top_srcdir)/swig/*.i mkdir -p $(DEPDIR) grep -h '^#' $(top_srcdir)/swig/*.i | \ $(CXXCOMPILE) $(libjarc_la_CXXFLAGS) -M -MT arc_wrap.cpp -MT $(JAVA_PATH)/arc.java -MP -MF "$(DEPDIR)/arc_wrap.deps" -x c++ - rm -rf $(JAVA_PATH) mkdir -p $(JAVA_PATH) $(SWIG) -v -c++ -java -nodefaultctor -nodefaultdtor -module arc \ -package nordugrid.arc -o arc_wrap.cpp \ -outdir $(JAVA_PATH) \ $(SWIG_IS_JAVA_15_OR_ABOVE) $(SWIG_IS_DBJSTORE_ENABLED) \ -I/usr/include -I$(top_srcdir)/include \ $(OPENSSL_CFLAGS) $(top_srcdir)/swig/Arc.i sed 's%#include %#include %' < \ arc_wrap.cpp > arc_wrap.cpp.tmp mv arc_wrap.cpp.tmp arc_wrap.cpp # When mapping a template with a template class argument no space is # inserted between the two right angle brackets. sed 's/>>(new/> >(new/g' arc_wrap.cpp > arc_wrap.cpp.tmp mv arc_wrap.cpp.tmp arc_wrap.cpp # When mapping a template with a template class argument no space is # inserted between the two right angle brackets. sed 's/>>(self->/> >(self->/g' arc_wrap.cpp > arc_wrap.cpp.tmp mv arc_wrap.cpp.tmp arc_wrap.cpp # When mapping a template with another template class as argument, and # that template class takes two classes as argument, then older swigs # put parentheses around the two class arguments, e.g. T<(A,B)>, not # valid syntax should be T instead. sed 's/<(\([,:[:alnum:]]*\))>/<\1>/g' arc_wrap.cpp > arc_wrap.cpp.tmp mv arc_wrap.cpp.tmp arc_wrap.cpp for javafile in `ls $(JAVA_PATH)/*.java` ; do \ echo >> $(DEPDIR)/arc_wrap.deps ; \ echo `sed s/.java$$/.class/ <<< $${javafile}`: $${javafile} \ >> $(DEPDIR)/arc_wrap.deps ; \ echo >> $(DEPDIR)/arc_wrap.deps ; \ echo arc.jar: `sed s/.java$$/.class/ <<< $${javafile}` \ >> $(DEPDIR)/arc_wrap.deps ; \ done $(JAVA_INTERFACE_SRC_MOVED): $(JAVA_BUILD_PATH)% : $(JAVA_INTERFACE_SRC) cp -p $(top_srcdir)/java/$* $@ $(JAVA_PATH)/*.java: arc_wrap.cpp $(JAVA_INTERFACE_SRC_MOVED) $(JAVA_PATH)/arc.class: $(JAVA_PATH)/arc.java $(JAVA_INTERFACE_SRC_MOVED) .java.class: $(JAVA_INTERFACE_SRC_MOVED) $(JAVAC) $(JAVAC_FLAGS) $< arc.jar: $(JAVA_PATH)/arc.class $(subst .java,.class,$(wildcard $(JAVA_PATH)/*.java)) rm -f $@ $(JAR) cf $@ $(JAVA_PATH)/*.class $(JAR_JFLAGS) # Try not to build these objects in parallel in order to save memory .NOTPARALLEL: %.o %.class %.so # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/java/PaxHeaders.7502/test0000644000000000000000000000013013214316031017007 xustar000000000000000029 mtime=1513200665.07380954 30 atime=1513200668.716854096 29 ctime=1513200665.07380954 nordugrid-arc-5.4.2/java/test/0000755000175000002070000000000013214316031017134 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/java/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712075622673021146 xustar000000000000000027 mtime=1358374331.920823 30 atime=1513200592.310919622 30 ctime=1513200665.070809504 nordugrid-arc-5.4.2/java/test/Makefile.am0000644000175000002070000000126112075622673021210 0ustar00mockbuildmock00000000000000JAVAUNITTESTS = \ MappingOf_time_t_and_uint32_t_CTypesToJavaRegressionTest nodist_TESTSCRIPTS = \ $(JAVAUNITTESTS:=.class) if JUNIT_ENABLED CLASSPATH = $(top_builddir)/java/arc.jar:.:/usr/share/java/junit.jar TESTS_ENVIRONMENT = \ ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs \ CLASSPATH=$(CLASSPATH) \ JAVA_FLAGS="$(JAVA_FLAGS)" \ LD_LIBRARY_PATH=$(top_builddir)/java/.libs \ $(SHELL) $(top_builddir)/java/test/strip_test_file_name_and_run_junit TESTS = $(nodist_TESTSCRIPTS) else CLASSPATH = TESTS = endif check_SCRIPTS = $(nodist_TESTSCRIPTS) CLEANFILES = *.class .java.class: $(JAVAC) $(JAVAC_FLAGS) -cp $(CLASSPATH) -d . $< EXTRA_DIST = $(JAVAUNITTESTS:=.java) nordugrid-arc-5.4.2/java/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720021137 xustar000000000000000030 mtime=1513200592.343920025 30 atime=1513200652.684658016 30 ctime=1513200665.071809516 nordugrid-arc-5.4.2/java/test/Makefile.in0000644000175000002070000004737713214315720021227 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @JUNIT_ENABLED_TRUE@TESTS = $(am__EXEEXT_2) subdir = java/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/strip_test_file_name_and_run_junit.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = strip_test_file_name_and_run_junit CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__tty_colors = \ red=; grn=; lgn=; blu=; std= am__EXEEXT_1 = MappingOf_time_t_and_uint32_t_CTypesToJavaRegressionTest.class am__EXEEXT_2 = $(am__EXEEXT_1) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ JAVAUNITTESTS = \ MappingOf_time_t_and_uint32_t_CTypesToJavaRegressionTest nodist_TESTSCRIPTS = \ $(JAVAUNITTESTS:=.class) @JUNIT_ENABLED_FALSE@CLASSPATH = @JUNIT_ENABLED_TRUE@CLASSPATH = $(top_builddir)/java/arc.jar:.:/usr/share/java/junit.jar @JUNIT_ENABLED_TRUE@TESTS_ENVIRONMENT = \ @JUNIT_ENABLED_TRUE@ ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs \ @JUNIT_ENABLED_TRUE@ CLASSPATH=$(CLASSPATH) \ @JUNIT_ENABLED_TRUE@ JAVA_FLAGS="$(JAVA_FLAGS)" \ @JUNIT_ENABLED_TRUE@ LD_LIBRARY_PATH=$(top_builddir)/java/.libs \ @JUNIT_ENABLED_TRUE@ $(SHELL) $(top_builddir)/java/test/strip_test_file_name_and_run_junit check_SCRIPTS = $(nodist_TESTSCRIPTS) CLEANFILES = *.class EXTRA_DIST = $(JAVAUNITTESTS:=.java) all: all-am .SUFFIXES: .SUFFIXES: .class .java $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign java/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign java/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): strip_test_file_name_and_run_junit: $(top_builddir)/config.status $(srcdir)/strip_test_file_name_and_run_junit.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool distclean distclean-generic distclean-libtool \ distdir dvi dvi-am html html-am info info-am install \ install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am .java.class: $(JAVAC) $(JAVAC_FLAGS) -cp $(CLASSPATH) -d . $< # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/java/test/PaxHeaders.7502/MappingOf_time_t_and_uint32_t_CTypesToJavaRegressionTe0000644000000000000000000000012212075221710031533 xustar000000000000000026 mtime=1358242760.99832 27 atime=1513200574.157697 29 ctime=1513200665.07380954 nordugrid-arc-5.4.2/java/test/MappingOf_time_t_and_uint32_t_CTypesToJavaRegressionTest.java0000644000175000002070000000712312075221710033074 0ustar00mockbuildmock00000000000000import java.lang.reflect.Method; import java.lang.reflect.Constructor; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; import junit.framework.Assert.*; import nordugrid.arc.Time; import nordugrid.arc.Period; public class MappingOf_time_t_and_uint32_t_CTypesToJavaRegressionTest extends TestCase { public void checkMappedTypeOf_time_t_CType() { try { Class[] types = new Class[1]; types[0] = long.class; Time.class.getConstructor(types); } catch (Exception e) { fail("In class Time: Constructor taking type \"long\" as argument was expected to exist but it was not found."); } try { assertEquals("In class Time: Method \"GetTime\" was expected to return a value of type \"long\".", Long.TYPE, Time.class.getMethod("GetTime", new Class[0]).getReturnType()); } catch (Exception e) { fail("In class Time: Method \"GetTime\" was expected to exist but it was not found."); } try { Class[] types = new Class[1]; types[0] = long.class; Period.class.getConstructor(types); } catch (Exception e) { fail("In class Period: Constructor taking type \"long\" as argument was expected to exist but it was not found."); } try { assertEquals("In class Period: Method \"GetPeriod\" was expected to return a value of type \"long\".", Long.TYPE, Period.class.getMethod("GetPeriod", new Class[0]).getReturnType()); } catch (Exception e) { fail("In class Period: Method \"GetPeriod\" was expected to exist but it was not found."); } } public void checkMappedTypeOf_uint32_t_CType() { try { Class[] types = new Class[2]; types[0] = long.class; types[1] = int.class; Time.class.getConstructor(types); } catch (Exception e) { fail("In class Time: Constructor taking types \"(long, int)\" as argument was expected to exist but it was not found."); } try { assertEquals("In class Time: Method \"GetTimeNanoseconds\" was expected to return a value of type \"long\".", Long.TYPE, Time.class.getMethod("GetTimeNanoseconds", new Class[0]).getReturnType()); } catch (Exception e) { fail("In class Time: Method \"GetTimeNanoseconds\" was expected to exist but it was not found."); } try { Class[] types = new Class[2]; types[0] = long.class; types[1] = int.class; Period.class.getConstructor(types); } catch (Exception e) { fail("In class Period: Constructor taking types \"(long, int)\" as argument was expected to exist but it was not found."); } try { assertEquals("In class Period: Method \"GetPeriodNanoseconds\" was expected to return a value of type \"long\".", Long.TYPE, Period.class.getMethod("GetPeriodNanoseconds", new Class[0]).getReturnType()); } catch (Exception e) { fail("In class Period: Method \"GetPeriodNanoseconds\" was expected to exist but it was not found."); } } public static Test suite() { TestSuite suite = new TestSuite(); suite.addTest( new MappingOf_time_t_and_uint32_t_CTypesToJavaRegressionTest() { protected void runTest() { checkMappedTypeOf_time_t_CType(); checkMappedTypeOf_uint32_t_CType(); } } ); return suite; } } nordugrid-arc-5.4.2/java/test/PaxHeaders.7502/strip_test_file_name_and_run_junit.in0000644000000000000000000000012712075600273026550 xustar000000000000000027 mtime=1358364859.979204 30 atime=1513200652.700658212 30 ctime=1513200665.072809528 nordugrid-arc-5.4.2/java/test/strip_test_file_name_and_run_junit.in0000644000175000002070000000043712075600273026616 0ustar00mockbuildmock00000000000000#!/bin/bash export ARC_PLUGIN_PATH=${ARC_PLUGIN_PATH} export CLASSPATH=${CLASSPATH} export LD_LIBRARY_PATH=${LD_LIBRARY_PATH} # Remove leading './' from first argument class=${1#./} # Remove trailing '.class' class=${class%.class} @JAVA@ ${JAVA_FLAGS} junit.textui.TestRunner ${class} nordugrid-arc-5.4.2/java/PaxHeaders.7502/JobConsumer.java0000644000000000000000000000012412124602004021176 xustar000000000000000027 mtime=1364395012.873263 27 atime=1513200574.027696 30 ctime=1513200665.047809222 nordugrid-arc-5.4.2/java/JobConsumer.java0000644000175000002070000000013312124602004021240 0ustar00mockbuildmock00000000000000package nordugrid.arc; public interface JobConsumer { public void addEntity(Job e); } nordugrid-arc-5.4.2/java/PaxHeaders.7502/examples0000644000000000000000000000013213214316031017650 xustar000000000000000030 mtime=1513200665.098809846 30 atime=1513200668.716854096 30 ctime=1513200665.098809846 nordugrid-arc-5.4.2/java/examples/0000755000175000002070000000000013214316031017773 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/java/examples/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712132017616021772 xustar000000000000000027 mtime=1365778318.355138 30 atime=1513200592.243918802 30 ctime=1513200665.094809797 nordugrid-arc-5.4.2/java/examples/Makefile.am0000644000175000002070000000101012132017616022024 0ustar00mockbuildmock00000000000000if JAVA_IS_15_OR_ABOVE JAVA_15_EXAMPLES = ResourceDiscovery.java else JAVA_15_EXAMPLES = endif EXAMPLES = BasicJobSubmission.java DTRGenerator.java $(JAVA_15_EXAMPLES) if JAVA_SWIG_ENABLED CLASSPATH = $(top_builddir)/java/arc.jar:. TESTS_ENVIRONMENT = $(JAVAC) $(JAVAC_FLAGS) -cp $(CLASSPATH) -d . TESTS = $(EXAMPLES) else CLASSPATH = TESTS = endif check_SCRIPTS = $(EXAMPLES) CLEANFILES = *.class exampledir = $(pkgdatadir)/examples/sdk example_DATA = $(EXAMPLES) EXTRA_DIST = $(EXAMPLES) ResourceDiscovery.java nordugrid-arc-5.4.2/java/examples/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720021776 xustar000000000000000030 mtime=1513200592.294919426 30 atime=1513200652.715658395 30 ctime=1513200665.095809809 nordugrid-arc-5.4.2/java/examples/Makefile.in0000644000175000002070000005237313214315720022056 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @JAVA_SWIG_ENABLED_TRUE@TESTS = $(am__EXEEXT_2) subdir = java/examples DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(exampledir)" DATA = $(example_DATA) am__tty_colors = \ red=; grn=; lgn=; blu=; std= @JAVA_IS_15_OR_ABOVE_TRUE@am__EXEEXT_1 = ResourceDiscovery.java am__EXEEXT_2 = BasicJobSubmission.java DTRGenerator.java \ $(am__EXEEXT_1) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @JAVA_IS_15_OR_ABOVE_FALSE@JAVA_15_EXAMPLES = @JAVA_IS_15_OR_ABOVE_TRUE@JAVA_15_EXAMPLES = ResourceDiscovery.java EXAMPLES = BasicJobSubmission.java DTRGenerator.java $(JAVA_15_EXAMPLES) @JAVA_SWIG_ENABLED_FALSE@CLASSPATH = @JAVA_SWIG_ENABLED_TRUE@CLASSPATH = $(top_builddir)/java/arc.jar:. @JAVA_SWIG_ENABLED_TRUE@TESTS_ENVIRONMENT = $(JAVAC) $(JAVAC_FLAGS) -cp $(CLASSPATH) -d . check_SCRIPTS = $(EXAMPLES) CLEANFILES = *.class exampledir = $(pkgdatadir)/examples/sdk example_DATA = $(EXAMPLES) EXTRA_DIST = $(EXAMPLES) ResourceDiscovery.java all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign java/examples/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign java/examples/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool distclean distclean-generic distclean-libtool \ distdir dvi dvi-am html html-am info info-am install \ install-am install-data install-data-am install-dvi \ install-dvi-am install-exampleDATA install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-exampleDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/java/examples/PaxHeaders.7502/DTRGenerator.java0000644000000000000000000000012412116062401023070 xustar000000000000000027 mtime=1362650369.397264 27 atime=1513200574.123697 30 ctime=1513200665.097809834 nordugrid-arc-5.4.2/java/examples/DTRGenerator.java0000644000175000002070000000654612116062401023150 0ustar00mockbuildmock00000000000000// // The nordugrid-arc-java package is required. To compile and run this example: // // export CLASSPATH=/usr/lib64/java/arc.jar:. // export LD_LIBRARY_PATH=/usr/lib64/java // javac DTRGenerator.java // java DTRGenerator /bin/ls /tmp/dtrtest // // The PATHs above may vary depending on ARC install location and system // architecture. import nordugrid.arc.*; // For the sake of brevity in this example import everything from arc // Implementation of DTR Generator. // Cannot inherit from DTRCallback as it is a pure virtual class and swig does not // create a default constructor, so extend Scheduler which inherits from DTRCallback. class DTRGenerator extends Scheduler { private Logger logger; private LogDestination logdest; private SimpleCondition cond; // Create a new Generator and set up logging to stdout public DTRGenerator() { logger = new Logger(Logger.getRootLogger(), "Generator"); logdest = new LogStream_ostream(arc.getStdout()); Logger.getRootLogger().addDestination(logdest); Logger.getRootLogger().setThreshold(LogLevel.DEBUG); cond = new SimpleCondition(); } // Implementation of callback from DTRCallback public void receiveDTR(DTRPointer dtr) { // root logger is disabled in Scheduler thread so need to add it here Logger.getRootLogger().addDestination(logdest); logger.msg(LogLevel.INFO, "Received DTR " + dtr.get_id() + " in state " + dtr.get_status().str()); Logger.getRootLogger().removeDestinations(); cond.signal(); } // Run the transfer and wait for the callback on completion private void run(final String source, final String dest) { // Set log level for DTR (must be done before starting Scheduler) DTR.setLOG_LEVEL(LogLevel.DEBUG); // Start Scheduler thread Scheduler scheduler = new Scheduler(); scheduler.start(); // UserConfig contains information such as the location of credentials UserConfig cfg = new UserConfig(); // The ID can be used to group DTRs together String id = "1234"; // Logger for DTRs DTRLogger dtrlog = arc.createDTRLogger(Logger.getRootLogger(), "DTR"); dtrlog.addDestination(logdest); // Use current user's uid for the transfer User user = new User(); // Create a DTR DTRPointer dtr = arc.createDTRPtr(source, dest, cfg, id, user.get_uid(), dtrlog); logger.msg(LogLevel.INFO, "Created DTR "+ dtr.get_id()); // Register this callback in order to receive completed DTRs dtr.registerCallback(this, StagingProcesses.GENERATOR); // This line must be here in order to pass the DTR to the Scheduler dtr.registerCallback(scheduler, StagingProcesses.SCHEDULER); // Push the DTR to the Scheduler DTR.push(dtr, StagingProcesses.SCHEDULER); // Wait until callback is called // Note: SimpleCondition.wait() is renamed to _wait() as wait() is a java.lang.Object method cond._wait(); // DTR is finished, so stop Scheduler scheduler.stop(); } public static void main(String[] args) { if (args.length != 2) { System.out.println("Usage: java DTRGenerator source destination"); return; } DTRGenerator gen = new DTRGenerator(); gen.run(args[0], args[1]); } } nordugrid-arc-5.4.2/java/examples/PaxHeaders.7502/ResourceDiscovery.java0000644000000000000000000000012412124602004024245 xustar000000000000000027 mtime=1364395012.873263 27 atime=1513200574.075696 30 ctime=1513200665.098809846 nordugrid-arc-5.4.2/java/examples/ResourceDiscovery.java0000644000175000002070000000414412124602004024315 0ustar00mockbuildmock00000000000000// // The nordugrid-arc-java package is required. To compile and run this example: // // export CLASSPATH=/usr/lib64/java/arc.jar:. // export LD_LIBRARY_PATH=/usr/lib64/java // javac ResourceDiscovery.java // java ResourceDiscovery ldap://index1.nordugrid.org/Mds-vo-name=NorduGrid,o=grid // // The PATHs above may vary depending on ARC install location and system // architecture. import nordugrid.arc.*; // For the sake of brevity in this example import everything from arc public class ResourceDiscovery implements ComputingServiceTypeConsumer { public int i; public ResourceDiscovery() { i = 0; } public static void main(String[] args) { // Set up logging to stderr with level VERBOSE (a lot of output will be shown) LogStream_ostream logstdout = new LogStream_ostream(nordugrid.arc.arc.getStdout()); logstdout.setFormat(nordugrid.arc.LogFormat.ShortFormat); Logger.getRootLogger().addDestination(logstdout); Logger.getRootLogger().setThreshold(nordugrid.arc.LogLevel.VERBOSE); Logger logger = new Logger(Logger.getRootLogger(), "resourcediscovery"); // Create Endpoint object from the passed argument (registry or index service) Endpoint e = new Endpoint(args[0], Endpoint.CapabilityEnum.REGISTRY); // This object holds various attributes, including proxy location and selected services. UserConfig uc = new UserConfig(""); // Example Java consumer ResourceDiscovery rd = new ResourceDiscovery(); // Create a instance for discovering computing services at the registry service. ComputingServiceRetriever csr = new ComputingServiceRetriever(uc); csr.addConsumer(rd); csr.addEndpoint(e); // Add endpoint ... which initiates discovery. csr._wait(); // Wait for results to be retrieved. for (ComputingServiceType cst : csr) { System.out.println(cst); } System.out.println(rd.i + " services found."); } public void addEntity(ComputingServiceType cst) { i += 1; } } nordugrid-arc-5.4.2/java/examples/PaxHeaders.7502/BasicJobSubmission.java0000644000000000000000000000012412721014565024332 xustar000000000000000027 mtime=1464080757.755156 27 atime=1513200574.075696 30 ctime=1513200665.096809822 nordugrid-arc-5.4.2/java/examples/BasicJobSubmission.java0000644000175000002070000000565012721014565024405 0ustar00mockbuildmock00000000000000import nordugrid.arc.Logger; import nordugrid.arc.LogStream_ostream; import nordugrid.arc.UserConfig; import nordugrid.arc.Endpoint; import nordugrid.arc.EndpointList; import nordugrid.arc.Job; import nordugrid.arc.JobList; import nordugrid.arc.JobDescription; import nordugrid.arc.JobDescriptionList; import nordugrid.arc.Submitter; import nordugrid.arc.JobInformationStorageXML; public class BasicJobSubmission { public static void main(String argv[]) { // Set up logging to stderr with level VERBOSE (a lot of output will be shown) LogStream_ostream logstdout = new LogStream_ostream(nordugrid.arc.arc.getStdout()); logstdout.setFormat(nordugrid.arc.LogFormat.ShortFormat); Logger.getRootLogger().addDestination(logstdout); Logger.getRootLogger().setThreshold(nordugrid.arc.LogLevel.VERBOSE); Logger logger = new Logger(Logger.getRootLogger(), "jobsubmit"); // UserConfig contains information on credentials and default services to use. // This form of the constructor is necessary to initialise the local job list. UserConfig usercfg = new UserConfig("", ""); // Simple job description which outputs hostname to stdout String jobdesc = "&(executable=/bin/hostname)(stdout=stdout)"; // Parse job description JobDescriptionList jobdescs = new JobDescriptionList(); if (!JobDescription.Parse(jobdesc, jobdescs).toBool()) { logger.msg(nordugrid.arc.LogLevel.ERROR, "Invalid job description"); System.exit(1); } /* * Use 'JobDescription.ParseFromFile("helloworld.xrsl", jobdescs)' * to parse job description from file. */ // Use top-level NorduGrid information index to find resources Endpoint index = new Endpoint("ldap://index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", Endpoint.CapabilityEnum.REGISTRY, "org.nordugrid.ldapegiis"); EndpointList services = new EndpointList(); services.add(index); // Do the submission JobList jobs = new JobList(); Submitter submitter = new Submitter(usercfg); if (!submitter.BrokeredSubmit(services, jobdescs, jobs).equals(nordugrid.arc.SubmissionStatus.SubmissionStatusType.NONE)) { logger.msg(nordugrid.arc.LogLevel.ERROR, "Failed to submit job"); System.exit(1); } // Write information on submitted job to local job list (~/.arc/jobs.xml) JobInformationStorageXML jobList = new JobInformationStorageXML(usercfg.JobListFile()); if (!jobList.Write(jobs)) { logger.msg(nordugrid.arc.LogLevel.WARNING, "Failed to write to local job list " + usercfg.JobListFile()); } // Job submitted ok System.out.println("Job submitted with job id " + jobs.begin().next().getJobID()); return; } } nordugrid-arc-5.4.2/java/PaxHeaders.7502/EndpointConsumer.java0000644000000000000000000000012312124602004022243 xustar000000000000000027 mtime=1364395012.873263 27 atime=1513200574.074696 29 ctime=1513200665.04680921 nordugrid-arc-5.4.2/java/EndpointConsumer.java0000644000175000002070000000014512124602004022311 0ustar00mockbuildmock00000000000000package nordugrid.arc; public interface EndpointConsumer { public void addEntity(Endpoint e); } nordugrid-arc-5.4.2/java/PaxHeaders.7502/README0000644000000000000000000000012410621372302016772 xustar000000000000000027 mtime=1178989762.348759 27 atime=1513200574.075696 30 ctime=1513200665.041809149 nordugrid-arc-5.4.2/java/README0000644000175000002070000000025410621372302017040 0ustar00mockbuildmock00000000000000XMLNode.h: - Identifier warning for friend methods in a namespace (XMLMatchName) https://sourceforge.net/tracker/?func=detail&atid=101645&aid=1692787&group_id=1645 nordugrid-arc-5.4.2/PaxHeaders.7502/py-compile0000644000000000000000000000013213214315733017177 xustar000000000000000030 mtime=1513200603.513056628 30 atime=1513200603.513056628 30 ctime=1513200658.621730629 nordugrid-arc-5.4.2/py-compile0000755000175000002070000001013513214315733017250 0ustar00mockbuildmock00000000000000#!/bin/sh # py-compile - Compile a Python program scriptversion=2009-04-28.21; # UTC # Copyright (C) 2000, 2001, 2003, 2004, 2005, 2008, 2009 Free Software # Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . if [ -z "$PYTHON" ]; then PYTHON=python fi basedir= destdir= files= while test $# -ne 0; do case "$1" in --basedir) basedir=$2 if test -z "$basedir"; then echo "$0: Missing argument to --basedir." 1>&2 exit 1 fi shift ;; --destdir) destdir=$2 if test -z "$destdir"; then echo "$0: Missing argument to --destdir." 1>&2 exit 1 fi shift ;; -h|--h*) cat <<\EOF Usage: py-compile [--help] [--version] [--basedir DIR] [--destdir DIR] FILES..." Byte compile some python scripts FILES. Use --destdir to specify any leading directory path to the FILES that you don't want to include in the byte compiled file. Specify --basedir for any additional path information you do want to be shown in the byte compiled file. Example: py-compile --destdir /tmp/pkg-root --basedir /usr/share/test test.py test2.py Report bugs to . EOF exit $? ;; -v|--v*) echo "py-compile $scriptversion" exit $? ;; *) files="$files $1" ;; esac shift done if test -z "$files"; then echo "$0: No files given. Try \`$0 --help' for more information." 1>&2 exit 1 fi # if basedir was given, then it should be prepended to filenames before # byte compilation. if [ -z "$basedir" ]; then pathtrans="path = file" else pathtrans="path = os.path.join('$basedir', file)" fi # if destdir was given, then it needs to be prepended to the filename to # byte compile but not go into the compiled file. if [ -z "$destdir" ]; then filetrans="filepath = path" else filetrans="filepath = os.path.normpath('$destdir' + os.sep + path)" fi $PYTHON -c " import sys, os, py_compile files = '''$files''' sys.stdout.write('Byte-compiling python modules...\n') for file in files.split(): $pathtrans $filetrans if not os.path.exists(filepath) or not (len(filepath) >= 3 and filepath[-3:] == '.py'): continue sys.stdout.write(file) sys.stdout.flush() py_compile.compile(filepath, filepath + 'c', path) sys.stdout.write('\n')" || exit $? # this will fail for python < 1.5, but that doesn't matter ... $PYTHON -O -c " import sys, os, py_compile files = '''$files''' sys.stdout.write('Byte-compiling python modules (optimized versions) ...\n') for file in files.split(): $pathtrans $filetrans if not os.path.exists(filepath) or not (len(filepath) >= 3 and filepath[-3:] == '.py'): continue sys.stdout.write(file) sys.stdout.flush() py_compile.compile(filepath, filepath + 'o', path) sys.stdout.write('\n')" 2>/dev/null || : # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: nordugrid-arc-5.4.2/PaxHeaders.7502/nordugrid-arc.SlackBuild0000644000000000000000000000012411513564546021706 xustar000000000000000027 mtime=1294920038.448863 27 atime=1513200573.741692 30 ctime=1513200658.625730678 nordugrid-arc-5.4.2/nordugrid-arc.SlackBuild0000755000175000002070000000267611513564546021771 0ustar00mockbuildmock00000000000000#!/bin/sh PACKAGE="nordugrid-arc" if [ -z "$VERSION" ] ; then VERSION=`grep AC_INIT configure.ac | head -n 1 | sed 's/^[^(]*(\([^)]*\)).*/\1/;t exit;s/.*//;:exit' | sed 's/^[^,]*,\([^,]*\).*/\1/;t exit;s/.*//;:exit' | sed 's/^\[\([^]]*\).*/\1/'` fi VERSION=${VERSION:=trunk} ARCH=${ARCH:=i486} BUILD=${BUILD:=1} CONFIGOPT=${CONFIGOPT:=} TMP=${TMP:=/tmp} SRCDIR=`pwd` BASEDIR=`mktemp -d "$TMP/arc.XXXXXX"` BUILDDIR="$BASEDIR/build" INSTALLDIR="$BASEDIR/install" mkdir "$BUILDDIR" mkdir "$INSTALLDIR" cd "$BUILDDIR" # We can't build ARC outside source yet. So just # copy it to have clean environment and continue. #"$SRCDIR/configure" --prefix=/usr $CONFIGOPT cp -r $SRCDIR/* "$BUILDDIR" ./configure --prefix=/usr $CONFIGOPT make make install "DESTDIR=$INSTALLDIR" mkdir "$INSTALLDIR/install" cat <"$INSTALLDIR/install/slack-desc" |-----handy-ruler------------------------------------------------------| nordugrid-arc: The Advanced Resource Connector (ARC) middleware, introduced by the nordugrid-arc: NorduGrid Collaboration (www.nordugrid.org), is an open source software nordugrid-arc: solution enabling production quality computational and data grids. nordugrid-arc: nordugrid-arc: nordugrid-arc: nordugrid-arc: nordugrid-arc: nordugrid-arc: nordugrid-arc: nordugrid-arc: EOM cd $INSTALLDIR PKGFILE="$BASEDIR/$PACKAGE-$VERSION-$ARCH-$BUILD.txz" makepkg -l y -c n "$PKGFILE" cd "$SRCDIR" mv "$PKGFILE" "$SRCDIR/" rm -rf "$BASEDIR" nordugrid-arc-5.4.2/PaxHeaders.7502/NOTICE0000644000000000000000000000012212267746213016111 xustar000000000000000025 mtime=1390398603.1003 27 atime=1513200574.158697 30 ctime=1513200658.628730715 nordugrid-arc-5.4.2/NOTICE0000644000175000002070000000340112267746213016156 0ustar00mockbuildmock00000000000000Advanced Resource Connector (ARC) This product includes Advanced Resource Connector (ARC) software. The software is developed by the NorduGrid collaboration (http://www.nordugrid.org) with financial support from the European Commission and Nordic Research Councils. Unless stated otherwise, the Copyright is collectively owned by individual contributors and contributing organisations as listed in the AUTHORS file. The software is licensed under the Apache License, Version 2.0 (the "License"); you may not use files from this software distribution except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Support for APEL SSM in JURA is provided by the APEL code licensed under Apache 2.0 and owned by STFC (authors: Kevin Haines, Will Rogers). This code is in src/services/a-rex/jura/ssm/. The original software is available from http://apel.github.com/apel/ Some hash function code in ACIX is provided by the General Purpose Hash Function Algorithms Library licensed under the Common Public License and written by Arash Partow. This code is in src/services/acix/core/hashes.py. The original software is available from http://www.partow.net/programming/hashfunctions/index.html Support for json parsing is provided by the cJSON library written by Dave Gamble and licensed under the MIT license. This code is in src/external/cJSON. The original software is available from http://cjson.sourceforge.net/ nordugrid-arc-5.4.2/PaxHeaders.7502/src0000644000000000000000000000013213214316022015700 xustar000000000000000030 mtime=1513200658.903734078 30 atime=1513200668.716854096 30 ctime=1513200658.903734078 nordugrid-arc-5.4.2/src/0000755000175000002070000000000013214316022016023 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612574532570020034 xustar000000000000000027 mtime=1441969528.390811 29 atime=1513200592.91992707 30 ctime=1513200658.651730996 nordugrid-arc-5.4.2/src/Makefile.am0000644000175000002070000000153412574532570020102 0ustar00mockbuildmock00000000000000if HED_ENABLED if DOC_ENABLED BUILD_SOURCES = external doc hed libs tests services clients utils doxygen else BUILD_SOURCES = external doc hed libs tests services clients utils endif else BUILD_SOURCES = endif SUBDIRS = $(BUILD_SOURCES) DIST_SUBDIRS = external doc hed libs tests services clients utils doxygen # This won't work in case of cross-compilation. Please # some autotools experts fix it. if HED_ENABLED install-exec-hook: if test "x$(build_triplet)" = "x$(host_triplet)"; then env LD_LIBRARY_PATH=$(DESTDIR)$(libdir):$(LD_LIBRARY_PATH) $(top_builddir)/src/utils/hed/arcplugin$(EXEEXT) -c $(DESTDIR)$(pkglibdir) -c $(DESTDIR)$(pkglibdir)/test; else echo "No .apd files since we are cross-compiling"; fi uninstall-local: test "x$(build_triplet)" = "x$(host_triplet)" && rm -f $(DESTDIR)$(pkglibdir)/*.apd $(DESTDIR)$(pkglibdir)/test/*.apd endif nordugrid-arc-5.4.2/src/PaxHeaders.7502/doc0000644000000000000000000000013213214316022016445 xustar000000000000000030 mtime=1513200658.733731999 30 atime=1513200668.716854096 30 ctime=1513200658.733731999 nordugrid-arc-5.4.2/src/doc/0000755000175000002070000000000013214316022016570 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/doc/PaxHeaders.7502/arc.conf.5.in0000644000000000000000000000012713153455371020727 xustar000000000000000027 mtime=1504598777.935254 30 atime=1513200652.625657295 30 ctime=1513200658.732731987 nordugrid-arc-5.4.2/src/doc/arc.conf.5.in0000644000175000002070000032634513153455371021006 0ustar00mockbuildmock00000000000000.TH arc.conf 5 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid ARC" .SH NAME arc.conf \- ARC configuration .SH SYNOPSIS .B /etc/arc.conf .B ${ARC_LOCATION}/etc/arc.conf .SH DESCRIPTION ARC has two separate configuration files - one for client tools and another for services. This document describes the services configuration file. For client configuration please see "ARC Clients User Manual" at http://www.nordugrid.org/documents/arc-ui.pdf ARC configuration uses a plain-text "ini-style" format. It is also possible to use an XML format, however that is outside the scope of this document. The configuration file consists of several configuration blocks. Each configuration block is identified by a keyword and contains the configuration options for a specific part of the ARC middleware. Each configuration block starts with its identifying keyword inside square brackets. Thereafter follows one or more attribute value pairs written one on each line in the following format (note that the attribute names are CASE-SENSITIVE): .nf .B [keyword1] .BR attribute1 ="value1" .BR attribute2 ="value2" .B [keyword2] .BR attribute ="value" .fi If the ARC_LOCATION environment variable is set the ARC configuration file located at ${ARC_LOCATION}/etc/arc.conf is read first. If this file is not present or the relevant configuration information is not found in this file, the file at /etc/arc.conf is read. .SH The [common] block The parameters set within this block are available for all the other blocks. These are the configuration parameters shared by the different components of ARC (e.g. grid-manager, infosys) .TP .B hostname hostname - the FQDN of the frontend node, optional in the common block but MUST be set in the cluster block .IR Example: .br hostname="myhost.org" .TP .B x509_voms_dir x509_voms_dir path - the path to the directory containing *.lsc files needed for checking validity of VOMS extensions. If not specified default value /etc/grid-security/vomsdir is used. .IR Example: .br x509_voms_dir="/etc/grid-security/vomsdir" .TP .B lrms ARC supports various LRMS flavours, as listed in this section. For detailed description of options please refer to ARC CE sysadmin guide: http://www.nordugrid.org/documents/arc-ce-sysadm-guide.pdf .B ONLY ONE LRMS IS ALLOWED. MULTIPLE lrms ENTRIES WILL TRIGGER UNEXPECTED BEHAVIOUR. .B lrms sets the type of the Local Resource Management System (queue system), and optionally - the default queue name, separated with a blank space: .B lrmstype queue_name. For .B lrmstype, the following systems are supported and can be chosen (one per server): fork - simple forking of jobs to the same node as the server sge - (Sun/Oracle) Grid Engine condor - Condor pbs - PBS lsf - LSF ll - LoadLeveler slurm - SLURM dgbridge - Desktop Grid PBS has many flavours, ARC currenly supports OpenPBS, PBSPro, ScalablePBS and Torque (the official name for ScalablePBS). There is no need to specify the flavour or the version number of the PBS, simply write 'pbs'. Similarly, there is no need to specify (Sun/Oracle) Grid Engine versions and flavours. "lrmstype" MUST be set here, it is a MANDATORY parameter! The optional .B "queue" parameter specifies the default Grid queue of the LRMS. Jobs will be submitted to this queue if they do not specify queue name in job description. Queue name must match one of the [queue/queue_name] block labels, see below. .IR Example: .br lrms="pbs gridlong" .br lrms="pbs" .SH PBS options .TP .B pbs_bin_path the path to the qstat,pbsnodes,qmgr etc PBS binaries, no need to set if PBS is not used .IR Example: .br pbs_bin_path="/usr/bin" .TP .B pbs_log_path the path of the PBS server logfiles which are used by A-REX to determine whether a PBS job is completed. If not specified, A-REX will use qstat for that. .IR Example: .br pbs_log_path="/var/spool/pbs/server_logs" .br .SH Condor options .TP .B condor_rank condor_rank - If you are not happy with the way Condor picks nodes when running jobs, you can define your own ranking algorithm by optionally setting the condor_rank attribute. condor_rank should be set to a ClassAd float expression that you could use in the Rank attribute in a Condor job description. Obviously no need to set if Condor is not used. An example: .IR Example: .br condor_rank="(1-LoadAvg/2)*(1-LoadAvg/2)*Memory/1000*KFlops/1000000" .TP .B condor_bin_path condor_bin_path - Path to Condor binaries. Must be set if Condor is used. .IR Example: .br condor_bin_path=/opt/condor/bin .br .TP .B condor_config condor_config - Path to Condor config file. Must be set if Condor is used and the config file is not in its default location (/etc/condor/condir_config or ~/condor/condor_config). The full path to the file should be given. .IR Example: .br condor_config=/opt/condor/etc/condor_config .br .SH SGE options .TP .B sge_bin_path sge_bin_path - Path to Sun Grid Engine (SGE) binaries, MUST be set if SGE is the LRMS used .IR Example: .br sge_bin_path="/opt/n1ge6/bin/lx24-x86" .TP .B sge_root sge_root - Path to SGE installation directory. MUST be set if SGE is used. .IR Example: .br sge_root="/opt/n1ge6" .TP .B sge_cell sge_cell - The name of the SGE cell to use. This option is only necessary in case SGE is set up with a cell name different from 'default' .IR Example: .br sge_cell="default" .TP .B sge_qmaster_port sge_qmaster_port, sge_execd_port - these options should be used in case SGE command line clients require SGE_QMASTER_PORT and SGE_EXECD_PORT environment variables to be set. Usually they are not necessary. .IR Example: .br sge_qmaster_port="536" .br sge_execd_port="537" .SH SLURM options .TP .B slurm_bin_path slurm_bin_path - Path to SLURM binaries, must be set if installed outside of normal $PATH .IR Example: .br slurm_bin_path="/usr/bin" .TP .B slurm_wakeupperiod How long should infosys wait before querying SLURM for new data (seconds) .IR Example: .br slurm_wakeupperiod="15" .TP .B slurm_use_sacct Should ARC use sacct instead of scontrol to get information on finished jobs. Requires that accounting is turned on in SLURM. Default is "no". .IR Example: .br slurm_use_sacct="yes" .SH LSF options .TP .B lsf_bin_path the PATH to LSF bin folder no need to set if LSF is not used .IR Example: .br lsf_bin_path="/usr/local/lsf/bin/" .TP .B lsf_profile_path the PATH to profile.lsf no need to set if LSF is not used .IR Example: .br lsf_profile_path="/usr/share/lsf/conf" .br .SH LL options .TP .B ll_bin_path the PATH to the LoadLeveler bin folder no need to set if LoadLeveler is not used .IR Example: .br ll_bin_path="/opt/ibmll/LoadL/full/bin" .TP .B ll_consumable_resources support for a LoadLeveler setup using Consumable Resources no need to set if LoadLeveler is not used .IR Example: .br ll_consumable_resources="yes" .SH Desktop Grid options .TP .B dgbridge_stage_dir Desktop Bridge www publish dir .IR Example: .br dgbridge_stage_dir="/var/www/DGBridge" .TP .B dgbridge_stage_prepend Desktop Bridge URL prefix pointing to dgbridge_stage_dir .IR Example: .br dgbridge_stage_prepend="http://edgi-bridge.example.com/DGBridge/" .SH Boinc options .TP .B boinc_db_host boinc_db_port boinc_db_name boinc_db_user boinc_db_pass Connection details for the Boinc database. .IR Example: .br boinc_db_host="localhost" .br boinc_db_port="3306" .br boinc_db_name="myproject" .br boinc_db_user="boinc" .br boinc_db_pass="password" .TP .B boinc_app_id = id ID of the app handled by this CE. Setting this option makes database queries much faster in large projects with many apps. .IR Example: .br boinc_app_id="1" .SH Other [common] options .TP .B globus_tcp_port_range globus_tcp_port_range, globus_udp_port_range - Firewall configuration In a firewalled environment the software which uses GSI needs to know what ports are available. The full documentation can be found at: http://dev.globus.org/wiki/FirewallHowTo These variable are similar to the Globus environment variables: GLOBUS_TCP_PORT_RANGE and GLOBUS_UDP_PORT_RANGE. These variables are not limited to [common], but can be set individually for each service in corresponding section: [grid-manager], [gridftpd] Example: .IR Example: .br globus_tcp_port_range="9000,12000" .br globus_udp_port_range="9000,12000" .TP .B x509_user_key x509_user_cert, x509_user_key - Server credentials location. These variables are similar to the GSI environment variables: X509_USER_KEY and X509_USER_CERT These variables are not limited to [common], but can be set individually for each service in corresponding section: [grid-manager], [gridftpd], [nordugridmap] .IR Example: .br x509_user_key="/etc/grid-security/hostkey.pem" .br x509_user_cert="/etc/grid-security/hostcert.pem" .TP .B x509_cert_dir x509_cert_dir - Location of trusted CA certificates This variable is similar to the GSI environment variable: X509_CERT_DIR This variable is not limited to [common], but can be set individually for each service in corresponding section: [grid-manager], [gridftpd] .IR Example: .br x509_cert_dir="/etc/grid-security/certificates" .TP .B gridmap gridmap - The gridmap file location This variable is similar to the GSI environment variable: GRIDMAP This variable is not limited to [common], but can be set individually for each service in corresponding section: [grid-manager], [gridftpd] The default is /etc/grid-security/grid-mapfile .IR Example: .br gridmap="/etc/grid-security/grid-mapfile" .TP .B voms_processing voms_processing - Defines how to behave if errors in VOMS AC processing detected. relaxed - use everything that passed validation. standard - same as relaxed but fail if parsing errors took place and VOMS extension is marked as critical. This is the default. strict - fail if any parsing error was discovered. noerrors - fail if any parsing or validation error happened. This command can also be used in [grid-manager] and [gridftpd] blocks. .IR Example: .br voms_processing="standard" .TP .B voms_trust_chain voms_trust_chain - Define the DN chain that the host services trust when the VOMS AC from peer VOMS proxy certificate is parsed and validated. There can be multiple "voms_trust_chain" existing, each one corresponds to a VOMS server. This variable is similar to the information in *.lsc file, but with two differences: 1, You don't need to create a *.lsc file per VOMS server, but create a chain per VOMS server; 2, Regular expressions are supported when matching the DNs. This variable is not limited to [common], but can be used in [grid-manager] and [gridftpd] blocks. This variable should be used together with voms_processing. This variable will overwrite the information in *.lsc if *.lsc exists. .nf .IR Example: .br voms_trust_chain = "/O=Grid/O=NorduGrid/CN=host/arthur.hep.lu.se" "/O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority" .br voms_trust_chain = "/O=Grid/O=NorduGrid/CN=host/emi-arc.eu" "/O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority" .br voms_trust_chain = "^/O=Grid/O=NorduGrid" .fi .TP .B enable_perflog_reporting enable_perflog_reporting expert-debug-on/no - Switch on or off performance reporting. Default is no. Only switch on if you specifically need it, and are aware of the possible local root exploit due to permissive directory. .IR Example: .br enable_perflog_reporting="expert-debug-on" .TP .B perflogdir perflogdir logdir - Directory where performance logs should be stored. Default is /var/log/arc/perflogs .IR Example: .br perflogdir="/var/log/arc/perflogs" .SH [vo] block [vo] block is used to define VOs and generate mapfiles from user list maintained by VO databases. VO block is a configuration block for the nordugridmap utility. Please note that [vo] block processing by nordugridmap utility depend on parameters defined in the [nordugridmap] block. [vo] block by itself does not affect authorization of client/user. For that label defined by vo="" attribute may be used in [group] block with 'vo' rule. Also mapfiles generated by nordugridmap utility can be used with 'file' rule. .TP .B id id blockid - specifies the unique configuration block id (this does not affect nordugridmap utility) .IR Example: .br id="vo_1" .TP .B vo vo vo_name - specifies the VO name, this name can be used in other blocks. MUST be given. .IR Example: .br vo="nordugrid" .TP .B file file path - output gridmap-file where GENERATED mapping list will be stored. See parameters below to define how to generate this file. If the same file specified as output for different [vo] blocks, nordugridmap will automatically merge entries in given blocks order. Default is '/etc/grid-security/gridmapfile'. .IR Example: .br file="/etc/grid-security/VOs/atlas-users" .TP .B source source URL - the URL of the VO database which is assigned to this VO. The nordugridmap will use this URL to automatically generate and keep up-to-date userlist (mapfile) specified by the 'file' attribute. URL is a multivalued attribute, several sources can be specified for the [vo] block and all the users from those sources will be merged into the same file. The source URLs are processed in the given order. Currently supported URL types are: http(s):// - URL to plain text file. File should contain a list of DNs with optional issuer certificate authority DN (see require_issuerdn): "user DN" ["issuer DN"] voms(s):// - URL to VOMS-Admin interface nordugrid - add NorduGrid VO members ldap:// - expect LDAP-schema formatted VO Group file:// - local file (stand-alone or dynamically generated by nordugridmap). File should contain a list of DNs with optional mapped unixid: "user DN" [mapped user ID] Result of optional mapped unixid processing depend on mapuser_processing option settings. vo:// - reference to another [vo] configuration block edg-mkgridmap:// - local configuration file used by edg-mkgridmap tool. nordugridmap will parse configuration from file and process it as additional [vo] block that will be referred authomatically in place URL specified. This allow easy migration from edg-mkgridmap solution without rewriting your previous configuration (NOTE that rarely used 'auth' directive and 'AUTO' mapping options are not supported) You can use either vo:// or file:// entries to specify dependencies between [vo] blocks, but using vo:// is a recommended way. For each separate source URL it is possible to override some parameters value. You can use the following syntax to perform this: source="URL < parameter1=value1 parameter2=value2" You can override the following parameters: mapped_unixid for http(s),voms(s),ldap and file URLs cache_enable for http(s),voms(s),ldap and file URLs voms_method for voms(s) URLs mapuser_processing for file URLs with mapped_unixid='' overrides (control mapped_unixid overriding behaviour for URL) .IR Example: .br source="vomss://voms.ndgf.org:8443/voms/nordugrid.org" .br source="vomss://lcg-voms.cern.ch:8443/voms/atlas?/atlas/Role=VO-Admin < mapped_unixid=atlasadmin" .br source="vomss://kuiken.nikhef.nl:8443/voms/gin.ggf.org < voms_method=get" .br source="http://www.nordugrid.org/developers.dn" .br source="ldap://grid-vo.nikhef.nl/ou=lcg1,o=atlas,dc=eu-datagrid,dc=org" .br source="file:///etc/grid-security/priviliged_users.dn" .br source="vo://nordugrid_community" .br source="nordugrid" .TP .B mapped_unixid mapped_unixid unixid - the local UNIXID which is used in the generated grid-mapfile by the nordugridmap utility. If any of the sources have already provided mapping information (file:// or vo://) behaviour depends on 'mapuser_processing' [nordugridmap] block configuration: mapuser_processing = 'overwrite': ignore already provided mapping and apply mapped_unixid for all sources mapuser_processing = 'keep': apply mapped_unixid only for sources that does not already has mapping information [vo] block can only have one UNIXID. If 'mapped_unixid' is not specified behaviour depends on 'allow_empty_unixid' [nordugridmap] block configuration value: allow_empty_unixid = 'yes': empty value will be used for mapped_unixid which means that nordugridmap will generate only the list of DNs without mapping (consider using mapuser_processing='overwrite' along with this option or sources that does not provide previously defined mapping information) allow_empty_unixid = 'no': skip users without mapping information (if no mapping information provided by sources) .IR Example: .br mapped_unixid="gridtest" .TP .B voms_fqan_map voms_fqan_map fqan unixid - the local UNIXID which is used to map voms(s) sources with specific FQAN given. Several voms_fqan_map can be specified for a [vo] block. For each voms(s) sources in [vo] block and every voms_fqan_map record separate source record will be authomatically generated with mapped_unixid overrided to specified one. Sources are generated in a given voms_fqan_map order. Original voms(s) source URL are processed LAST. This allows to simplify configuration, especially in redundancy cases when several VOMS servers are used for the same VO. .IR Example: .br voms_fqan_map="/atlas/Role=VO-Admin atlasadmin" .br voms_fqan_map="/atlas/Role=production atlasprod" .TP .B require_issuerdn require_issuerdn yes/no - another nordugridmap option. YES would map only those DNs obtained from the URLs which have the corresponding public CA packages installed. Default is 'no'. Note, that some sources does not provide issuer information (like voms(s):// or file://). If this sources are used within [vo] block and require_issuerdn is set to 'yes' behaviour depends on issuer_processing [nordugridmap] block configuration: issuer_processing = 'relaxed': check only those records that have issuer information provided, allow other sources issuer_processing = 'strict': if issuer information was not found record is filtered and will not be passed into mapfile .IR Example: .br require_issuerdn="no" .TP .B filter filter ACL string - An ACL filter for the nordugridmap utility. Multiple allow/deny statements are possible. The fetched DNs are filtered against the specified rules before they are added to the generated mapfile. * can be used as a wildcard. You may run the nordugridmap with the --test command line option to see how the filters you specified work. If at least one allow filter is specified implicit deny is used at the end of ACL. If only deny filters are present - implicit allow used at the end. .IR Example: .br filter="deny *infn*" .br filter="allow *NorduGrid*" .SH [group] Authorisation block These configuration blocks define rules used to define to which authorization group a user belongs. The group should not be mistaken for a virtual organisation (VO). A group may match a single vo if only a single check (rule) on vo membership is performed. It is however more common to allow multiple VOs in a single group. ARC also allows many other ways to assign users to groups. Technically, permissions are only granted to groups, not directly to VOs. The block specifies single authorization group. There may be multiple [group] blocks in configuration defining multiple authorization groups. The block can be specified in two ways - either using [group/group1] like subblock declaration per group or just [group]. The two formats are equivalent. Every block (till the beginning of next block or the end of the file) defines one authorization group. .B IMPORTANT: Rules in a group are processed in their order of appearance. The first matching rule decides the membership of a the user to a group and the processing STOPS. There are positively and negatively matching rules. If a rule is matched positively then the user tested is accepted into the respective group and further processing is stopped. Upon a negative match the user would be rejected for that group - processing stops too. The sign of rule is determined by prepending the rule with '+' (for positive) or '-' (for negative) signs. '+' is default and can be omitted. A rule may also be prepended with '!' to invert result of rule, which will let the rule match the complement of users. That complement operator ('!') may be combined with the operator for positive or negative matching. A group MUST be defined before it may be used. In this respect the arc.conf is ORDER SENSITIVE. The authorization groups can be used in [gridftpd] and in its sub-blocks. The syntax of their specification varies with the service they are used for. For using authorization groups and VO blocks in HED framework please read "Security Framework of ARC" at http://www.nordugrid.org/documents/arc-security-documentation.pdf .TP .B name name group_name - Specify name of group. If there is no such command in block, name of subblock is used instead (that is what subblocks are used for). For example [group/users]. .IR Example: .br name="users" .TP .B subject subject certificate_subject - Rule to match specific subject of user's X.509 certificate. No masks, patterns and regular expressions are allowed. For more information about X.509 refer to http://www.wikipedia.org/wiki/X509 .IR Example: .br subject="/O=Grid/O=Big VO/CN=Main Boss" .TP .B file file path - Start reading rules from another file. That file has a bit different format. It can't contain blocks and commands are separated from arguments by space. Also word "subject" in subject command may be skipped. That makes it convenient to directly add gridmap-like lists to authorization group. .IR Example: .br file="/etc/grid-security/local_users" .TP .B voms voms vo group role capabilities - Match VOMS attribute in user's credential. Use '*' to match any value. More information about VOMS can be found at http://grid-auth.infn.it .IR Example: .br voms="nordugrid /nordugrid/Guests * *" .TP .B group group group_name [group_name ...] - Match user already belonging to one of specified groups. Groups refered here must be defined earlier in configuration file. Multiple group names may be specified for this rule. That allows creating hierarchical structure of authorization groups like 'clients' are those which are 'users' and 'admins'. .IR Example: .br group="local_admins" .TP .B plugin plugin timeout path [argument ...] - Run external executable or function from shared library. Rule is matched if plugin returns 0. In arguments following substitutions are supported: %D - subject of certificate %P - path to proxy For more about plugins read documentation. .IR Example: .br plugin="10 /opt/external/bin/permis %P" .TP .B lcas lcas library directory database - Call LCAS functions to check rule. Here library is path to shared library of LCAS, either absolute or relative to directory; directory is path to LCAS installation directory, equivalent of LCAS_DIR variable; database is path to LCAS database, equivalent to LCAS_DB_FILE variable. Each arguments except library is optional and may be either skipped or replaced with ’*’. .IR Example: .br lcas="" .TP .B remote remote URL ... - Check user's credentials against remote service. Only DN groups stored at LDAP directories are supported. Multiple URLs are allowed in this rule. .IR Example: .br remote="ldap://grid-vo.nordugrid.org/ou=People,dc=nordugrid,dc=org" .TP .B vo vo vo_name ... - Match user belonging to VO specified by "vo=vo_name" as configured in one of PREVIOUSLY defined [vo] blocks. Multiple VO names are allowed for this rule. .IR Example: .br vo="nordugrid" .TP .B all all - Matches any user identity. This command requires no arguments but still can be written as all="" or all= for consistency. .IR Example: .br all="" .SH The [grid-manager] block The [grid-manager] block configures the part of A-REX service hosted in .B arched taking care of the grid tasks on the frontend (stagein/stageout, LRMS job submission, caching, etc..). Name of this block is historical and comes from times then this functionality was handled by separate process called grid-manager. This section also configures WS interfaces of A-REX service also hosted by same container. .TP .B controldir controldir path - The directory of the A-REX's internal job log files, not needed on the nodes. .IR Example: .br controldir="/var/spool/nordugrid/jobstatus" .TP .B sessiondir sessiondir path [drain] - the directory which holds the sessiondirs of the grid jobs. Multiple session directories may be specified by specifying multiple sessiondir commands. In this case jobs are spread evenly over the session directories. If sessiondir="*" is set, the session directory will be spread over the ${HOME}/.jobs directories of every locally mapped unix user. It is preferred to use common session directories. The path may be followed by "drain", in which case no new jobs will be assigned to that sessiondir, but current jobs will still be processed and accessible. .IR Example: .br sessiondir="/scratch/grid" .br sessiondir="/mnt/grid drain" .TP .B runtimedir runtimedir path - The directory which holds the runtimeenvironment scripts, should be available on the nodes as well! The runtimeenvironments are automatically detected and advertised in the information system. .IR Example: .br runtimedir="/SOFTWARE/runtime" .TP .B scratchdir scratchdir path - path on computing node to move session directory to before execution. If defined should contain the path to the directory on the computing node which can be used to store a jobs' files during execution. Sets the environment variable RUNTIME_LOCAL_SCRATCH_DIR. Default is not to move session directory before execution. .IR Example: .br scratchdir="/local/scratch/" .TP .B shared_scratch shared_scratch path - path on frontend where scratchdir can be found. If defined should contain the path corresponding to that set in scratchdir as seen on the frontend machine. Sets the environment variable RUNTIME_FRONTEND_SEES_NODE. .IR Example: .br shared_scratch="/mnt/scratch" .TP .B nodename nodename path - command to obtain hostname of computing node. .IR Example: .br nodename="/bin/hostname" .TP .B cachedir cachedir cache_path [link_path] - specifies a directory to store cached data. Multiple cache directories may be specified by specifying multiple cachedir commands. Cached data will be distributed evenly over the caches. Specifying no cachedir command or commands with an empty path disables caching. Optional link_path specifies the path at which the cache_path is accessible on computing nodes, if it is different from the path on the A-REX host. Example: cache="/shared/cache /frontend/jobcache" If "link-path" is set to '.' files are not soft-linked, but copied to session directory. If a cache directory needs to be drained, then cachedir should specify "drain" as the link path, in which case no new files will be added to the cache. .IR Example: .br cachedir="/scratch/cache" .br cachedir="/fs1/cache drain" .TP .B remotecachedir remotecachedir cache_path [link_path] - specifies caches which are under the control of other A-REXs, but which this A-REX can have read-only access to. Multiple remote cache directories may be specified by specifying multiple remotecachedir commands. If a file is not available in paths specified by cachedir, A-REX looks in remote caches. link_path has the same meaning as in cachedir, but the special path ``replicate'' means files will be replicated from remote caches to local caches when they are requested. .IR Example: .br remotecachedir="/mnt/fs1/cache replicate" .TP .B cachesize cachesize max min - specifies high and low watermarks for space used by cache, as a percentage of the space on the file system on which the cache directory is located. When the max is exceeded, files will be deleted to bring the used space down to the min level. It is a good idea to have the cache on its own separate file system. To turn off this feature "cachesize" without parameters can be specified. .IR Example: .br cachesize="80 70" .TP .B cachelifetime If cache cleaning is enabled, files accessed less recently than the given time period will be deleted. Example values of this option are 1800, 90s, 24h, 30d. When no suffix is given the unit is seconds. .IR Example: .br cachelifetime="30d" .TP .B cacheshared cacheshared yes|no - specifies whether the caches share a filesystem with other data. If set to yes then cache-clean calculates the size of the cache instead of using filesystem used space. .IR Example: .br cacheshared="yes" .TP .B cachespacetool cachespacetool path [options] - specifies an alternative tool to "df" that cache-clean should use to obtain space information on the cache file system. The output of this command must be "total_bytes used_bytes". The cache directory is passed as the last argument to this command. .IR Example: .br cachespacetool="/etc/getspace.sh" .TP .B cachelogfile cachelogfile path - specifies the filename where output of the cache-clean tool should be logged. Defaults to /var/log/arc/cache-clean.log. .IR Example: .br cachelogfile="/tmp/cache-clean.log" .TP .B cacheloglevel cacheloglevel level - specifies the level of logging by the cache-clean tool, between 0 (FATAL) and 5 (DEBUG). Defaults to 3 (INFO). .IR Example: .br cacheloglevel="4" .TP .B cachecleantimeout cachecleantimeout time - the timeout in seconds for running the cache-clean tool. If using a large cache or slow file system this value can be increased to allow the cleaning to complete. Defaults to 3600 (1 hour). .IR Example: .br cachecleantimeout="10000" .TP .B cacheaccess cacheaccess rule - rules for allowing access to files in the cache remotely through the A-REX web interface. A rule has three parts: 1. Regular expression defining a URL pattern 2. Credential attribute to match against a client's credential 3. Regular expression defining a credential value to match against a client's credential A client is allowed to access the cached file if a URL pattern matches the cached file URL and the client's credential has the attribute and matches the value required for that pattern. Possible values for credential attribute are dn, voms:vo, voms:role and voms:group. Remote cache access requires that the A-REX web interface is enabled via arex_mount_point. .IR Examples: .br cacheaccess="gsiftp://host.org/private/data/.* voms:vo myvo:production" .br cacheaccess="gsiftp://host.org/private/data/ng/.* dn /O=Grid/O=NorduGrid/.*" .TP .B enable_cache_service enable_cache_service yes|no - Turn on or off the cache service interface. If turned on the cache service must be installed and the A-REX WS interface must be enabled via arex_mount_point. The interface is accessible at the same host and port as given inn arex_mount_point with path /cacheservice. Default is off. .IR Example: .br enable_cache_service="yes" .TP .B user user user[:group] - Switch to a non root user/group after startup. Use with caution. .IR Example: .br user="grid" .TP .B debug debug debuglevel - Set debug level of the arched daemon hosting A-REX service, between 0 (FATAL) and 5 (DEBUG). Defaults to 3 (INFO). .IR Example: .br debug="2" .TP .B logfile logfile path - Specify log file location. If using an external log rotation tool be careful to make sure it matches the path specified here. Default log file is "/var/log/arc/grid-manager.log" .IR Example: .br logfile="/var/log/arc/grid-manager.log" .TP .B wslogfile wslogfile path - Specify log file location for WS-interface operations. This file is only created if the WS-interface is enabled through the arex_mount_point option. The logsize, logreopen and debug options also apply to this file. If using an external log rotation tool be careful to make sure it matches the path specified here. It is possible to specify the same file as logfile to combine the logs. Default is /var/log/arc/ws-interface.log. .IR Example: .br wslogfile="/var/log/arc/ws-interface.log" .TP .B logsize logsize size [number] - 'Size' specifies in bytes how big log file is allowed to grow (approximately). If log file exceeds specified size it is renamed into logfile.0. And logfile.0 is renamed into logfile.1, etc. up to 'number' logfiles. Don't set logsize if you don't want to enable the ARC logrotation because another logrotation tool is used. .IR Example: .br logsize="100000 2" .TP .B logreopen logreopen yes|no - Specifies if log file must be closed after each record is added. By default arched keeps log file open. This option can be used to make behaviour of arched compatible with external log rotation utilities. .IR Example: .br logreopen="no" .TP .B pidfile pidfile path - Specify location of file containing PID of daemon process. This is useful for automatic start/stop scripts. .IR Example: .br pidfile="/var/run/arched-arex.pid" .TP .B gnu_time the gnu time command, default /usr/bin/time .IR Example: .br gnu_time="/usr/bin/time" .TP .B shared_filesystem if computing node can access session directory at frontend, defaults to 'yes' .IR Example: .br shared_filesystem="yes" .TP .B mail specifies the email address from where the notification mails are sent, .IR Example: .br mail="grid.support@somewhere.org" .TP .B joblog joblog path - specifies where to store specialized log about started and finished jobs. If path is empty or no such command - log is not written. This log is not used by any other part of ARC, so keep it disabled unless needed. .IR Example: .br joblog="/var/log/arc/gm-jobs.log" .TP .B jobreport jobreport [URL ...] [timeout] - tells to report all started and finished jobs to logger service at 'URL'. Multiple URLs and multiple jobreport commands are allowed. In that case the job info will be sent to all of them. Timeout specifies how long (in days) to try to pass information before give up. Suggested value is 30 days. .IR Example: .br jobreport="https://grid.uio.no:8001/logger" .TP .B jobreport_publisher jobreport publisher - name of the accounting records publisher. .IR Example: .br jobreport_publisher="jura" .TP .B jobreport_credentials jobreport credentials path [key_file [cert_file [ca_dir]]] - specifies the credentials for accessing the accounting service. .IR Example: .br jobreport_credentials="/etc/grid-security/hostkey.pem /etc/grid-security/hostcert.pem /etc/grid-security/certificates" .TP .B jobreport_options jobreport options [name:value, ...]- specifies additional parameters for the jobreporter. .IR Example: .br jobreport_options="urbatch:50,archiving:/tmp/archive,topic:/topic/global.accounting.cpu.central" .TP .B jobreport_logfile jobreport logfile - name of the file to store stderr of the publisher executable. .IR Example: .br jobreport_logfile="/var/log/arc/jura.log" .TP .B max_job_control_requests max_job_control_requests number - max number of simultaneously processed job management requests over WS interface - like job submission, cancel, status check etc. Default value is 100. .IR Example: .br max_job_control_requests="100" .TP .B max_infosys_requests max_infosys_requests number - max number of simultaneously processed resource info requests over WS interface. Default value is 1. .IR Example: .br max_infosys_requests="1" .TP .B max_data_transfer_requests max_data_transfer_requests number - max number of simultaneously processed data transfer requests over WS interface - like data staging. Default value is 100. .IR Example: .br max_data_transfer_requests="100" .TP .B maxjobs maxjobs number1 number2 number3 number4 - specifies maximum allowed number of jobs. number1 - jobs which are not in FINISHED state (jobs tracked in RAM) number2 - jobs being run (SUBMITTING, INLRMS states) number3 - jobs being processed per DN number4 - jobs in whole system number5 - LRMS scripts limit (jobs in SUBMITTING and CANCELING) Missing number or -1 means no limit. .IR Example: .br maxjobs="10000 10 2000" .TP .B wakeupperiod wakeupperiod time - specifies how often A-REX cheks for new jobs arrived, job state change requests, etc. That is resposivity of A-REX. 'time' is time period in seconds. Default is 3 minutes. Usually this command is not needed because important state changes are also trigering out-of-schedule checks. NOTE: This parameter does not affect responsivity of backend scripts - especially scan-*-job. That means that upper estimation of time for detecting job finished executing is sum of responsivity of backend script + wakeupperiod. .IR Example: .br wakeupperiod="180" .TP .B defaultttl defaultttl [ttl [ttr]] - ttl is the time in seconds for how long a session directory will survive after job execution has finished. If not specified the default is 1 week. ttr is how long information about a job will be kept after the session directory is deleted. If not specified, the ttr default is one month. .IR Example: .br defaultttl="259200" .TP .B authplugin authplugin state options plugin_path - Every time job goes to 'state' run 'plugin_path' executable. Options consist of key=value pairs separated by ','. Possible keys are timeout - wait for result no longer that 'value' seconds (timeout= can be omitted). onsuccess,onfailure,ontimeout - what to do if plugin exited with exit code 0, not 0, timeout achieved. Possible actions are: pass - continue executing job, fail - cancel job, log - write to log fail about problem and continue executing job. .IR Example: .br authplugin="ACCEPTED timeout=10 /usr/libexec/arc/bank %C/job.%I.local %S" .TP .B authplugin ARC is distributed with the plugin "inputcheck". Its purpose is to check if input files requested in job's RSL are accessible from this machine. It is better to run it before job enters cluster. It accepts 2 arguments: names of files containing RSL and credentials' proxy. This plugin is only guaranteed to work for job submitted through the legacy GridFTP interface, as this is the only interface for which credentials in the form of proxy certificate files are guaranteed to exist. .IR Example: .br authplugin="ACCEPTED 60 /usr/libexec/arc/inputcheck %C/job.%I.description %C/job.%I.proxy" .TP .B authplugin ARC is distributed with the plugin "arc-vomsac-check". Its purpose is to enforce per-queue access policies based on VOMS attributes present in user's proxy-certificate. Plugin should be run before job enters the cluster. It requires 2 argments: path to job information .local file and path to credentials file. Enforced per-queue access policies are configured with 'ac_policy' option in the [queue/name] configuration block. .IR Example: .br authplugin="ACCEPTED 60 /usr/libexec/arc/arc-vomsac-check -L %C/job.%I.local -P %C/job.%I.proxy" .TP .B localcred localcred timeout plugin_path - Every time an external executable is run this plugin will be called. Its purpose is to set non-unix permissions/credentials on running tasks. Note: the process itself can still be run under the root account. If plugin_path looks like somename@somepath, then function 'somename' from the shared library located at 'somepath' will be called (timeout is not effective in that case). A-REX must be run as root to use this option. Comment it out unless you really know what you are doing. .IR Example: .br localcred="0 acquire@/opt/nordugrid/lib/afs.so %C/job.%I.proxy" .TP .B norootpower norootpower yes|no - if set to yes, all job management proccesses will switch to mapped user's identity while accessing session directory. This is useful if session directory is on NFS root squashing turned on. Default is no. .IR Example: .br norootpower="yes" .TP .B allowsubmit allowsubmit [group ...] - list of authorization groups of users allowed to submit new jobs while "allownew=no" is active in jobplugin configuration. Multiple commands are allowed. .IR Example: .br allowsubmit="mygroup" .br allowsubmit="yourgroup" .TP .B helper helper user executable arguments - associates an external program with A-REX. This program will be kept running under the account of the user specified by username. Currently only ’.’ is supported as username, corresponding to the user running A-REX. Every time this executable finishes it will be started again. This helper plugin mechanism can be used as an alternative to /etc/init.d or cron to (re)start external processes. .IR Example: .br helper=". /usr/local/bin/myutility" .TP .B tmpdir tmpdir - used by the A-REX, default is /tmp .IR Example: .br tmpdir="/tmp" .TP .B maxrerun maxrerun - specifies how many times job can be rerun if it failed in LRMS. Default value is 5. This is only an upper limit, the actual rerun value is set by the user in his xrsl. .IR Example: .br maxrerun="5" .TP .B globus_tcp_port_range globus_tcp_port_range, globus_udp_port_range - Firewall configuration. .IR Example: .br globus_tcp_port_range="9000,12000" .br globus_udp_port_range="9000,12000" .TP .B x509_user_key x509_user_cert, x509_user_key - Location of credentials for service. These may be used by any module or external utility which need to contact another service not on behalf of user who submited job. .IR Example: .br x509_user_key="/etc/grid-security/hostkey.pem" .br x509_user_cert="/etc/grid-security/hostcert.pem" .TP .B x509_cert_dir x509_cert_dir - Location of trusted CA certificates .IR Example: .br x509_cert_dir="/etc/grid-security/certificates" .TP .B http_proxy http_proxy - http proxy server location .IR Example: .br http_proxy="proxy.mydomain.org:3128" .TP .B fixdirectories fixdirectories yes|missing|no - specifies during startup A-REX should create all directories needed for it operation and set suitable default permissions. If "no" is specified then A-REX does nothing to prepare its operational environment. In case of "missing" A-REX only creates and sets permissions for directories which are not present yet. For "yes" all directories are created and permisisons for all used directories are set to default safe values. Default behaviour is as if "yes" is specified. .IR Example: .br fixdirectories="yes" .TP .B arex_mount_point arex_mount_point - enables web services interfaces, including job execution and information system. The argument is an https URL defining the endpoint port and path: https://:/ In order to submit job a client must specify the exact published path. Make sure the chosen port is not blocked by firewall or other security rules. .IR Example: .br arex_mount_point="https://piff.hep.lu.se:443/arex" .TP .B enable_arc_interface enable_arc_interface yes|no - turns on or off the ARC own WS interface based on OGSA BES and WSRF. If enabled the interface can be accessed at the URL specified by arex_mount_point (this option must also be specified). Default is yes. .IR Example: .br enable_arc_interface="yes" .TP .B enable_emies_interface enable_emies_interface - enable the EMI Execution Service interface. If enabled the interface can be accessed at the URL specified in arex_mount_point (this option must also be specified) .IR Example: .br enable_emies_interface="yes" .TP .B arguspep_endpoint arguspep_endpoint - specifies URL of Argus PEPD service (by default, the argus pepd service runs on port 8154 with path /authz) to use for authorization and user mapping. It is worth to mention that "requireClientCertAuthentication" (default is false) item of pepd.ini (configuration of Argus PEPD service) is set to be 'true', then https should be used, otherwise http is proper. If specified Argus is contacted for every operation requested through WS interface (see arex_mount_point). .IR Example: .br arguspep_endpoint="https://somehost.somedomain:8154/authz" .TP .B arguspep_profile arguspep_profile - defines which communication profile to use while communicationg with Argus PEPD service. Possible values are: direct - pass all authorization attributes (only for debugging) subject - pass only subject name of client cream - makes A-REX pretend it is gLite CREAM service. This is recommended profile for interoperability with gLite. emi - new profile devloped in EMI project. This is default option. .IR Example: .br arguspep_profile="cream" .TP .B arguspep_usermap arguspep_usermap - specifies either response from Argus servie may define mapping of client to local account. Possible values are 'yes' and 'no'. Default is 'no'. Argus is contacted after all other user mapping is performed. Hence it can overwrite all other decisions. .IR Example: .br arguspep_usermap="no" .TP .B arguspdp_endpoint arguspdp_endpoint - specifies URL of Argus PDP service (by default, the argus pepd service runs on port 8152 with path /authz) to use for authorization and user mapping. It is worth to mention that "requireClientCertAuthentication" (default is false) item of pdp.ini (configuration of Argus PDP service) is set to be 'true', then https should be used, otherwise http is proper. If specified Argus is contacted for every operation requested through WS interface (see arex_mount_point). .IR Example: .br arguspdp_endpoint="https://somehost.somedomain:8152/authz" .TP .B arguspdp_profile arguspdp_profile - defines which communication profile to use while communicationg with Argus PDP service. Possible values are: subject - pass only subject name of client cream - makes A-REX pretend it is gLite CREAM service. This is recommended profile for interoperability with gLite. emi - new profile devloped in EMI project. This is default option. .IR Example: .br arguspdp_profile="cream" .TP .B arguspdp_acceptnotapplicable arguspdp_accpetnotapplicable - specify if the "NotApplicable" decision returned by Argus PDP service is treated as reason to deny request. Default is no, which treats "NotApplicable" as reson to deny request. .IR Example: .br arguspdp_acceptnotapplicable="no" .TP .B watchdog watchdog - specifies if additinal watchdog processes is spawned to restart main process if it is stuck or dies. Possible values are 'yes' and 'no'. Default is 'no'. .IR Example: .br watchdog="no" .TP .B groupcfg groupcfg group_name [group_name ...] - specifies authorization groups for grid-manager to accept. The main location of this parameter is inside [gridftpd/jobs] block. The 'groupcfg' located here is only effective if computing service is configured without GridFTP interface and hence [gridftpd/jobs] block is missing. .IR Example: .br groupcfg="users" .TP .B unixmap unixgroup unixvo unixmap [unixname][:unixgroup] rule - more sophisticated mapping to local account .br unixgroup group rule - more sophisticated mapping to local account for specific authorization groups. .br unixvo vo rule - more sophisticated mapping to local account for users belonging to specified VO. .br The main location for these parameters is [gridftpd] section. If located here they are only active if computing service is configured without GridFTP interface and hence [gridftpd/jobs] block is missing. For more detailed information see section [gridftpd] and read "ARC Computing Element. System Administrator guide" manual. .IR Example: .br unixmap="nobody:nogroup all" .br unixgroup="users simplepool /etc/grid-security/pool/users" .br unixvo="ATLAS unixuser atlas:atlas" .TP .B allowunknown allowunknown yes|no - check user subject against grid-mapfile. The main location for this parameter is [gridftpd] section. If located here it is only active if computing service is configured without GridFTP interface and hence [gridftpd/jobs] block is missing. For more detailed information see section [gridftpd]. .IR Example: .br allowunknown="no" .TP .B delegationdb delegationdb db_name - specify which DB to use to store delegations. Currently supported db_names are bdb and sqlite. Default is bdb. .IR Example: .br delegationdb="bdb" .TP .B forcedefaultvoms forcedefaultvoms VOMS_FQAN - specify VOMS FQAN which user will be assigned if his/her credentials contain no VOMS attributes. To assign different values to different queues put this command into [queue] block. .IR Example: .br forcedefaultvoms="/vo/group/subgroup" .SH [data-staging] block [data-staging] block configures DTR data staging parameters. .TP .B debug debug - Log level for transfer logging in job.id.errors files, between 0 (FATAL) and 5 (DEBUG). Default is to use value set by debug option in [grid-manager] section. .IR Example: .br debug="4" .TP .B maxdelivery maxdelivery - Maximum number of concurrent file transfers, i.e. active transfers using network bandwidth. This is the total number for the whole system including any remote staging hosts. Default is 10. .IR Example: .br maxdelivery="40" .TP .B maxprocessor maxprocessor - Maximum number of concurrent files in each pre- and post- processing state, eg cache check or replica resolution. Default is 10. .IR Example: .br maxprocessor="20" .TP .B maxemergency maxemergency - Maximum "emergency" slots which can be assigned to transfer shares when all slots up to the limits configured by the above two options are used by other shares. This ensures shares cannot be blocked by others. Default is 1. .IR Example: .br maxemergency="5" .TP .B maxprepared maxprepared - Maximum number of files in a prepared state, i.e. pinned on a remote storage such as SRM for transfer. A good value is a small multiple of maxdelivery. Default is 200. .IR Example: .br maxprepared="250" .TP .B sharetype sharetype - Scheme to assign transfer shares. Possible values are dn, voms:vo, voms:role and voms:group. .IR Example: .br sharetype="voms:role" .TP .B definedshare definedshare - Defines a share with a fixed priority, different from the default (50). Priority is an integer between 1 (lowest) and 100 (highest). .IR Example: .br definedshare="myvo:production 80" .br definedshare="myvo:student 20" .TP .B dtrlog dtrlog - A file in which data staging state information (for monitoring and recovery purposes) is periodically dumped. Default is controldir/dtrstate.log .IR Example: .br dtrlog="/tmp/dtrstate.log" .TP .B central_logfile central_logfile - A file in which all data staging messages from every job will be logged (in addition to their job.id.errors files). If this option is not present or the path is empty the log file is not created. Note this file is not automatically controlled by logrotate. .IR Example: .br central_logfile="/var/log/arc/datastaging.log" .TP .B deliveryservice The following 4 options are used to configure multi-host data staging. deliveryservice - URL to a data delivery service which can perform remote data staging .IR Example: .br deliveryservice="https://myhost.org:60003/datadeliveryservice" .TP .B localdelivery localdelivery - If any deliveryservice is defined, this option determines whether local data transfer is also performed. Default is no. .IR Example: .br localdelivery="yes" .TP .B remotesizelimit remotesizelimit - Lower limit on file size (in bytes) of files that remote hosts should transfer. Can be used to increase performance by transferring small files using local processes. .IR Example: .br remotesizelimit="100000" .TP .B usehostcert usehostcert - Whether the A-REX host certificate should be used for communication with remote hosts instead of the users' proxies. Default is no. .IR Example: .br usehostcert="yes" .TP .B acix_endpoint acix_endpoint URL - the ARC Cache Index specified here will be queried for every input file specified in a job description and any replicas found in sites with accessible caches will be added to the replica list of the input file. The replicas will be tried in the order specified by .B preferredpattern. .IR Example: .br acix_endpoint="https://cacheindex.ndgf.org:6443/data/index" .TP .B securetransfer securetransfer yes|no - if data connection allows to choose use secure|non-secure data transfer. Currently only works for gridftp. default is no .IR Example: .br securetransfer="no" .TP .B passivetransfer passivetransfer yes|no - If yes, gridftp transfers are passive. Setting this option to yes can solve transfer problems caused by firewalls. default is no .IR Example: .br passivetransfer="no" .TP .B httpgetpartial httpgetpartial yes|no - If yes, HTTP GET transfers may transfer data in chunks/parts. If no - data is always transfered in one piece. Default is yes. .IR Example: .br httpgetpartial="yes" .TP .B speedcontrol speedcontrol min_speed min_time min_average_speed max_inactivity - specifies how slow data transfer must be to trigger error. Tranfer is canceled if speed is below min_speed bytes per second for at least min_time seconds, or if average rate is below min_average_speed bytes per second, or no data was transfered for longer than max_inactivity seconds. Value of zero turns feature off. Default is "0 300 0 300" .IR Example: .br speedcontrol="0 300 0 300" .TP .B preferredpattern preferredpattern pattern - specifies a preferred pattern on which to sort multiple replicas of an input file. It consists of one or more patterns separated by a pipe character (|) listed in order of preference. Replicas will be ordered by the earliest match. If the dollar character ($) is used at the end of a pattern, the pattern will be matched to the end of the hostname of the replica. If an exclamation mark (!) is used at the beginning of a pattern, any replicas matching the pattern will be excluded from the sorted replicas. .IR Example: .br preferredpattern="srm://myhost.ac.uk|.uk$|ndgf.org$|!badhost.org$" .TP .B copyurl copyurl url_head local_path - specifies that URLs, starting from 'url_head' should be accessed in a different way (most probaly unix open). The 'url_head' part of the URL will be replaced with 'local_path' and file from obtained path will be copied to the session directory. NOTE: 'local_path' can also be of URL type. you can have several copyurl lines .IR Example: .br copyurl="gsiftp://example.org:2811/data/ gsiftp://example.org/data/" .br copyurl="gsiftp://example2.org:2811/data/ gsiftp://example2.org/data/" .TP .B linkurl linkurl url_head local_path [node_path] - identical to 'copyurl', only file won't be copied, but soft-link will be created. The 'local_path' specifies the way to access the file from the gatekeeper, and is used to check permissions. The 'node_path' specifies how the file can be accessed from computing nodes, and will be used for soft-link creation. If 'node_path' is missing - 'local_path' will be used. you can have multiple linkurl settings .IR Example: .br linkurl="gsiftp://somewhere.org/data /data" .br linkurl="gsiftp://example.org:2811/data/ /scratch/data/" .TP .B maxtransfertries maxtransfertries - the maximum number of times download and upload will be attempted per job (retries are only performed if an error is judged to be temporary) .IR Example: .br maxtransfertries="10" .SH [gridftpd] block The .B [gridftpd] block configures the gridftpd server .TP .B user user user[:group] - Switch to a non root user/group after startup WARNING: Make sure that the certificate files are owned by the user/group specified by this option. Default value is root. .IR Example: .br user="grid" .TP .B debug debug debuglevel - Set debug level of the gridftpd daemon, between 0 (FATAL) and 5 (DEBUG). Default is 3 (INFO). .IR Example: .br debug="2" .TP .B daemon daemon yes|no - Whether GFS is run in daemon mode. Default is yes. .IR Example: .br daemon="yes" .TP .B logfile logfile path - Set logfile location .IR Example: .br logfile="/var/log/arc/gridftpd.log" .TP .B logsize logsize size [number] - 'Size' specifies in bytes how big log file is allowed to grow (approximately). If log file exceeds specified size it is renamed into logfile.0. And logfile.0 is renamed into logfile.1, etc. up to 'number' logfiles. Don't set logsize if you don't want to enable the ARC logrotation because another logrotation tool is used. .IR Example: .br logsize="100000 2" .TP .B pidfile pidfile path - Specify location of file containig PID of daemon process. This is useful for automatic star/stop scripts. .IR Example: .br pidfile="/var/run/gridftpd.pid" .TP .B port port bindport - Port to listen on (default 2811) .IR Example: .br port="2811" .TP .B pluginpath pluginpath - directory where the plugin libraries are installed, default is $ARC_LOCATION/lib(64)/arc .IR Example: .br pluginpath="/usr/lib/arc/" .TP .B encryption encryption yes|no - should data encryption be allowed, default is no, encryption is very heavy .IR Example: .br encryption="no" .TP .B include include - Include contents of another configuration file. .IR Example: .br include="path" .TP .B allowunknown allowunknown yes|no - if no, check user subject against grid-mapfile and reject if missing. By default unknown (not in the grid-mapfile) grid users are rejected .IR Example: .br allowunknown="no" .TP .B allowactivedata yes|no - if no, only passive data transfer is allowed. By default both passive and active data transfers are allowed. .IR Example .br allowactivedata="yes" .TP .B maxconnections maxconnections - maximum number of connections accepted by a gridftpd server. Default is 100. .IR Example: .br maxconnections="200" .TP .B defaultbuffer defaultbuffer size - defines size of every buffer for data reading/writing. Default is 65536. The actual value may decrease if the cumulative size of all buffers exceeds value specified by maxbuffer. .IR Example: .br defaultbuffer="65536" .TP .B maxbuffer maxbuffer size - defines maximal amount of memory in bytes to be allocated for all data reading/writing buffers. Default is 640kB. The number of buffers is (max {3, min {41, 2P + 1}}), where P is the parallelism level requested by the client. Hence, even without parallel streams enabled number of buffers will be 3. .IR Example: .br maxbuffer="655360" .TP .B globus_tcp_port_range globus_tcp_port_range, globus_udp_port_range - Firewall configuration .IR Example: .br globus_tcp_port_range="9000,12000" .br globus_udp_port_range="9000,12000" .TP .B firewall firewall - hostname or IP addres to use in response to PASV command instead of IP address of a network interface of computer. .IR Example: .br firewall="hostname" .TP .B x509_user_key x509_user_cert, x509_user_key - Server credentials location .IR Example: .br x509_user_key="/etc/grid-security/hostkey.pem" .br x509_user_cert="/etc/grid-security/hostcert.pem" .TP .B x509_cert_dir x509_cert_dir - Location of trusted CA certificates .IR Example: .br x509_cert_dir="/etc/grid-security/certificates" .TP .B gridmap gridmap - The gridmap file location The default is /etc/grid-security/grid-mapfile .IR Example: .br gridmap="/etc/grid-security/grid-mapfile" .TP .B unixmap unixmap [unixname][:unixgroup] rule - more sophisticated way to map Grid identity of client to local account. If client matches 'rule' it's assigned specified unix identity or one generated by rule. Mapping commands are processed sequentially and processing stops at first successful one (like in [group] section). For possible rules read "ARC Computing Element. System Administrator guide" manual. All rules defined in [group] section canbe used. There are also additional rules which produce not only yes/no result but also give back user and group names to which mapping should happen. The way it works is quite complex so it is better to read full documentation. For safety reasons if sophisticated mapping is used it is better to finish mapping sequence with default mapping to nonexistent or safe account. .IR Example: .br unixmap="nobody:nogroup all" .TP .B unixgroup unixgroup group rule - do mapping only for users belonging to specified authorization 'group'. It is similar to an additional filter for unixmap command which filters out all users not belonging to specified authorization group. Only rules which generate unix user and group names may be used in this command. Please read "ARC Computing Element System Administrator Guide" for more information. .IR Example: .br unixgroup="users simplepool /etc/grid-security/pool/users" .TP .B unixvo unixvo vo rule - do mapping only for users belonging to specified VO. Only rules which generate unix identity name may be used in this command. Please read "ARC Computing Element. System Administrator Guide" for more information. This command is similar to 'unixgroup' described above and exists for convenience for setups which base mapping on VOs users belong to. .IR Example: .br unixvo="ATLAS unixuser atlas:atlas" .SH [gridftpd/filedir] block [gridftpd/filedir] "fileplugin" storage block subblock for "exporting" a directory using the gridftpd's fileplugin plugin. gridftp plugins are shared libraries. "filedir" is a unique label. The access control is set by using the "dir" configuration option .TP .B plugin plugin name - specifies name of shared library to be loaded relative to "pluginpath". The next line is MUST for a gridftp file server with "fileplugin", don't change anything .IR Example: .br plugin="fileplugin.so" .TP .B groupcfg groupcfg group_name [group_name ...] - specifies authorization groups for which this plugin is activated. In case groupcfg is not used the plugin is loaded for every mapped grid user. Multiple names were may be specified delimited by blank space. Group names are as specified in [group] sections. .IR Example: .br groupcfg="users" .TP .B path the name of the virtual directory served by the gridftp server, REQUIRED the exported storage area is accessible as gsiftp://my_server/topdir. "topdir" is just an example, call the virtual path anything you like, even "/" is a valid choice. .IR Example: .br path="/topdir" .TP .B mount the physical directory corresponding to the virtual one: gsiftp://my_server/topdir will give access to the /scratch/grid directory on my_server, REQUIRED .IR Example: .br mount="/scratch/grid" .TP .B dir dir - this is the access control parameter, you can have several "dir" lines controlling different directories within then same block dir path options - specifies access rules for accessing files in 'path' (relative to virtual and real path) and all the files and directories below. 'options' are: nouser - do not use local file system rights, only use those specifies in this line owner - check only file owner access rights group - check only group access rights other - check only "others" access rights if none of the above specified usual unix access rights are applied. read - allow reading files delete - allow deleting files append - allow appending files (does not allow creation) overwrite - allow overwriting already existing files (does not allow creation, file attributes are not changed) dirlist - allow obtaining list of the files cd - allow to make this directory current create owner:group permissions_or:permissions_and - allow creating new files. File will be owned by 'owner' and owning group will be 'group'. If '*' is used, the user/group to which connected user is mapped will be used. The permissions will be set to permissions_or & permissions_and. (second number is reserved for the future usage). mkdir owner:group permissions_or:permissions_and - allow creating new directories. .IR Example: .br Set permissions on mounted directory: .br dir="/ nouser read cd dirlist delete create *:* 664:664 mkdir *:* 775:775" .IR Example: .br Adjust permissions on some subdirectories: .br dir="/section1 nouser read mkdir *:* 700:700 cd dirlist" .br dir="/section2 nouser read mkdir *:* 700:700 cd dirlist" .SH [gridftpd/jobs] subblock [gridftpd/jobs] subblock which creates the jobsubmission interface, using the jobplugin of the gridftpd service. gridftp plugins are shared libraries. 'jobs' is a unique label. .TP .B path the path to the virtual gridftpd directory which is used during the job submission. MUST be set. .IR Example: .br path="/jobs" .TP .B plugin plugin name - specifies name of shared library to be loaded relative to "pluginpath". The next line is MUST for a job submission service via gridftpd "jobplugin", don't change anything! .IR Example: .br plugin="jobplugin.so" .TP .B groupcfg groupcfg group_name [group_name ...] - specifies authorization groups for which this plugin is activated. In case groupcfg is not used the plugin is loaded for every mapped grid user. .IR Example: .br groupcfg="users" .TP .B allownew The 'allownew' configuration parameter sets if the grid resource accepts submission of new jobs. This parameter can be used to close down a grid. The default is yes .IR Example: .br allownew="yes" .TP .B remotegmdirs remotegmdirs controldir sessiondir - Specifies control and session directories to which jobs can be submitted but which are under the control of another A-REX. The corresponding controldir and sessiondir parameters must be defined in another A-REX's configuration. Multiple remotegmdirs can be specified. .IR Example: .br remotegmdirs="/mnt/host1/control /mnt/host1/session" .TP .B maxjobdesc maxjobdesc size - specifies maximal allowed size of job description in bytes. Default value is 5MB. If value is missing or 0 size is not limited. .IR Example: .br maxjobdesc="5242880" .TP .B configfile configfile service_configuration_path - If [gridftpd] and [grid-manager] configuration parts are located in separate files this configuration option allows to link them. The service_configuration_path points to configuration file containing [grid-manager] section. Use this option only if You really know what You are doing. .IR Example: .br configfile="/etc/arc.conf" .SH [infosys] block [infosys] block configures the hosting environment of the Information services (Local Info Tree, Index Service, Registrations, see the Information System manual) provided by the OpenLDAP slapd server. .TP .B infosys_compat infosys_compat - Setting this variable will cause ARC to use the old infoproviders. Basically, the new version uses A-REX to create LDIF while the old version uses a BDII provider-script to do it. The new version is required for GLUE2 output. .IR Example: .br infosys_compat="disable" .TP .B infoproviders_timeout infoproviders_timeout - this only applies to new infoproviders. it changes A-REX behaviour with respect to a single infoprovider run. Increase this value if you have many jobs in the controldir and infoproviders need more time to process. The value is in seconds. Default is 600 seconds. .IR Example: .br infoproviders_timeout = "600" .TP .B debug debug - sets the debug level/verbosity of the startup script {0 or 1}. Default is 0. .IR Example: .br debug="1" .TP .B hostname hostname - the hostname of the machine running the slapd service will be the bind for slapd. If not present, will be taken from the [common] block or guessed .IR Example: .br hostname="my.testbox" .TP .B port port - the port where the slapd service runs. Default infosys port is 2135. .IR Example: .br port="2135" .TP .B slapd_loglevel slapd_loglevel - sets the native slapd loglevel (see man slapd). Slapd logs via syslog. The default is set to no-logging (0) and it is RECOMMENDED not to be changed in a production environment. Non-zero slap_loglevel value causes serious performance decrease. .IR Example: .br slapd_loglevel="0" .TP .B slapd_hostnamebind slapd_hostnamebind - may be used to set the hostname part of the network interface to which the slapd process will bind. Most of the cases no need to set since the hostname configuration parameter is already sufficient. The default is empty. The example below will bind the slapd process to all the network interfaces available on the server. .IR Example: .br slapd_hostnamebind="*" .TP .B threads threads - the native slapd threads parameter, default is 32. If you run an Index service too you should modify this value. .IR Example: .br threads="128" .TP .B timelimit timelimit - the native slapd timelimit parameter. Maximum number of seconds the slapd server will spend answering a search request. Default is 3600. You probably want a much lower value. .IR Example: .br timelimit="1800" .TP .B idletimeout idletimeout - the native slapd idletimeout parameter. Maximum number of seconds the slapd server will wait before forcibly closing idle client connections. Its value must be larger than the value of "timelimit" option. If not set, it defaults to timelimit + 1. .IR Example: .br idletimeout="1800" .TP .B ldap_schema_dir ldap_schema_dir - allows to explicitly specify a path to the schema files. Note that this doesn't override standard location, but adds the specified path to the standard locations /etc/ldap and /etc/openldap. If you plan to relocate Glue1 and GLUE2 schemas, all these should be in the same directory that you specify here. this option does NOT apply to nordugrid.schema file. Such file has a release dependent location. Default is to use only standard locations described above. .IR Example: .br ldap_schema_dir="/nfs/ldap/schema/" .TP .B oldconfsuffix oldconfsuffix .suffix - sets the suffix of the backup files of the low-level slapd configuration files in case they are regenerated. Default is ".oldconfig". .IR Example: .br oldconfsuffix=".oldconfig" .TP .B overwrite_config overwrite_config yes|no - determines if the infosys startup scripts should generate new low-level slapd configuration files. By default the low-level configuration files are regenerated with every server startup making use of the values specified in the arc.conf. .IR Example: .br overwrite_config="yes" .TP .B registrationlog registrationlog path - specifies the logfile for the registration processes initiated by your machine. Default is "/var/log/arc/inforegistration.log" .IR Example: .br registrationlog="/var/log/arc/inforegistration.log" .TP .B providerlog providerlog path - Specifies log file location for the information provider scripts. The feature is only available with >= 0.5.26 tag. Default is "/var/log/arc/infoprovider.log" .IR Example: .br providerlog="/var/log/arc/infoprovider.log" .TP .B provider_loglevel provider_loglevel - loglevel for the infoprovider scripts (0-5). The default is 1 (critical errors are logged) .IR Example: .br provider_loglevel="2" .TP .B user user unix_user - the unix user running the infosys processes such as the slapd, the registrations and infoprovider scripts. By default the ldap-user is used, you can run it as root if you wish. In case of non-root value you must make sure that the A-REX directories and their content are readable by the 'user' and the 'user' has access to the full LRMS information including jobs submitted by other users. The A-REX directories (controldir, sessiondir runtimedir, cachedir) are specified in the [grid-manager] block .IR Example: .br user="root" .TP .B giis_location giis_location - If giis_location is not set, ARC_LOCATION will be used instead. .IR Example: .br giis_location="/usr/" .TP .B infosys_nordugrid These three variables decide which schema should be used for publishing data. They can all be enabled at the same time. Default is to enable nordugrid mds and disable glue. infosys_nordugrid - Enables NorduGrid schema .IR Example: .br infosys_nordugrid="enable" .TP .B infosys_glue12 infosys_glue12 - Enables glue1.2/1.3 schema If infosys_glue12 is enabled, then resource_location, resource_latitude and resource_longitude need to be set in the [infosys/glue12] block. These variables do not have default values. The rest of the variables defaults are showcased below. .IR Example: .br infosys_glue12="disable" .TP .B infosys_glue2_ldap infosys_glue2 - Enables GLUE2 schema .IR Example: .br infosys_glue2_ldap="disable" .TP .B infosys_glue2_ldap_showactivities infosys_glue2_ldap_showactivities - Enables GLUE2 ComputingActivities to appear in the LDAP rendering they're currently disabled by default. .IR Example: .br infosys_glue2_ldap_showactivities="disable" .TP .B infosys_glue2_service_qualitylevel infosys_glue2_service_qualitylevel - Allows a sysadmin to define a different GLUE2 QualityLevel for A-REX. This can be used for operations. default: production Allowed value is one of: "production", "pre-production", "testing", "development" Refer to GLUE2 documentation for the meaning of these strings. .IR Example: .br infosys_glue2_service_qualitylevel="production" .TP .B slapd slapd - Configure where the slapd command is located, default is: /usr/sbin/slapd .IR Example: .br slapd="/usr/sbin/slapd" .TP .B slapadd slapadd - Configure where the slapadd command is located, default is: /usr/sbin/slapadd .IR Example: .br slapadd="/usr/sbin/slapadd" .SH BDII specific Starting from 11.05, Nordugrid ARC only supports BDII5. These variables are usually automatically set by ARC, and are here mostly for debug purposes and to tweak exotic BDII5 installations. In general, a sysadmin should not set these. .TP .B bdii_debug_level bdii_debug_level - set the following to DEBUG to check bdii errors in bdii-update.log useful not to enable slapd logs reducing performance issues. .IR Example: .br bdii_debug_level="ERROR" .TP .B provider_timeout provider_timeout - This variable allows a system administrator to modify the behaviour of bdii-update. This is the time BDII waits for the scripts generated by A-REX infoproviders to produce their output. Default is 300 seconds. .IR Example: .br provider_timeout=300 .TP .B infosys_debug infosys_debug - This variable disables/enables an ldap-database containing information about the ldap database itself on "o=infosys" it is very useful for debugging. Default is enabled. .IR Example: .br infosys_debug="disable" .P BDII5 uses the following variables. These might change depending on BDII version. ARC sets them by inspecting distributed bdii configuration files. .B DO NOT CHANGE UNLESS YOU KNOW WHAT YOU'RE DOING .TP .B bdii_location bdii_location - The installation directory for the BDII. Default is /usr .IR Example: .br bdii_location="/usr" .TP .B bdii_var_dir bdii_var_dir - Contains BDII pid files and slapd pid files .IR Example: .br bdii_var_dir="/var/run/arc/bdii" .TP .B bdii_log_dir bdii_log_dir - Contains infosys logs .IR Example: .br bdii_log_dir="/var/log/arc/bdii" .TP .B bdii_tmp_dir bdii_tmp_dir - Contains provider scripts .IR Example: .br bdii_tmp_dir="/var/tmp/arc/bdii" .TP .B bdii_lib_dir bdii_lib_dir - Contains slapd databases .IR Example: .br bdii_lib_dir="/var/lib/arc/bdii" .TP .B bdii_update_pid_file bdii_update_pid_file, slapd_pid_file - Allows to change bdii-update and slapd pidfiles filename and location .IR Example: .br bdii_update_pid_file="/var/run/arc/bdii-update.pid" .br slapd_pid_file="$bdii_var_dir/db/slapd.pid" .TP .B bdii_database bdii_database - Configure what ldap database backend should be used, default is: bdb .IR Example: .br bdii_database="bdb" .P The following options are for tweaking only. Usually one should not configure them. They change the BDII configuration file generated by ARC. Please consult BDII manual for details. .TP .B bdii_conf bdii_conf - Location of the bdii configuration file. ARC modifies the original and sets it as default /var/run/arc/infosys/bdii.conf .IR Example: .br bdii_conf="/var/run/arc/infosys/bdii.conf" .P Command line options used to run bdii-update. ARC finds it looking into bdii configuration. default: ${bdii_location}/sbin/bdii-update .B bdii_update_cmd .br .B bdii_archive_size .br .B bdii_db_config .br .B bdii_breathe_time .br .B bdii_delete_delay .br .B bdii_read_timeout .br .B bdii_run_dir .br .B bindmethod .br .B cachettl .br .B db_archive .br .B db_checkpoint .SH EGIIS-related commands .TP .B giis_fifo giis_fifo - path to fifo used by EGIIS. default is /var/run/arc/giis-fifo This file is automatically created by ARC, the option is only for tweaking. .IR Example: .br giis_fifo=/var/run/arc/giis-fifo .P LDAP parameters of the cluster.pl (old) infoprovider, use the defaults, do NOT change them unless you know what you are doing .TP .B cachetime cachetime affects old infoproviders, and forces the validity time of the record. .IR Example: .br cachetime="30" .TP .B sizelimit sizelimit affects registration to egiis .IR Example: .br sizelimit="10" .TP .B slapd_cron_checkpoint slapd_cron_checkpoint - LDAP checkpoint enable/disable This option was introduced to solve bug #2032, to reduce the number of log files produced by BDII. It is usually not needed, but if BDII produces large logs and huge number of files, should help solving the issues related to that. .IR Example: .br slapd_cron_checkpoint="enable" .SH [infosys/glue12] block This block holds information that is needed by the glue 1.2 generation. This is only necessary if infosys_glue12 is enabled. .TP .B resource_location These variables need to be set if infosys_glue12 is enabled. IMPORTANT: no slashes or backslashes here! Example: "Kastrup, Denmark" .IR Example: .br resource_location="" .TP .B resource_latitude Example: "55.75000" .IR Example: .br resource_latitude="" .TP .B resource_longitude Example: "12.41670" .IR Example: .br resource_longitude="" .TP .B cpu_scaling_reference_si00 Example 2400 .IR Example: .br cpu_scaling_reference_si00="" .TP .B processor_other_description Example Cores=3,Benchmark=9.8-HEP-SPEC06 .IR Example: .br processor_other_description="" .TP .B glue_site_web Example http://www.ndgf.org .IR Example: .br glue_site_web="" .TP .B glue_site_unique_id Example NDGF-T1 .IR Example: .br glue_site_unique_id="" .TP .B provide_glue_site_info This variable decides if the GlueSite should be published. In case you want to set up a more complicated setup with several publishers of data to a GlueSite, then you may wish to tweak this parameter. .IR Example: .br provide_glue_site_info="true" .SH [infosys/site/sitename] block [infosys/site/sitename] Site BDII configuration block, this block is used to configure ARC to generate a site-bdii that can be registered in GOCDB etc to make it a part of a gLite network. The sitename part is to be declarative of the site-bdii being generated. .TP .B unique_id The unique id used to identify this site, eg "NDGF-T1" .IR Example: .br unique_id="" .TP .B url The URL is of the format: ldap://host.domain:2170/mds-vo-name=something,o=grid and should point to the resource-bdii .IR Example: .br url="" .SH [infosys/admindomain] block [infosys/admindomain] GLUE2 AdminDomain configuration block, to configure administrative items of the cluster. This values do not affect neither glue12 or nordugrid renderings. If the whole block is not specified, will default to an AdminDomain called UNDEFINEDVALUE. .TP .B name name - the Name attribute for the domain. This will show in top-BDII to group the resources belonging to this cluster. to group a bunch of clusters under the same AdminDomain, just use the same name. If not specified, will default to UNDEFINEDVALUE. .IR Example: .br name="ARC-TESTDOMAIN" .TP .B description description - description of this domain. Not mandatory. .IR Example: .br description="ARC test Domain" .TP .B www www - URL pointing at a site holding information about the AdminDomain. Not mandatory. .IR Example: .br www="http://www.nordugrid.org/" .TP .B distributed distributed - set this to yes if the domain is distributed that means, if the resources belonging to the domain are considered geographically distributed. .IR Example: .br distributed=yes .TP .B owner owner - contact email of a responsible subject for the domain .IR Example: .br owner=admin@nordugrid.org .TP .B otherinfo otherinfo - fills the OtherInfo GLUE2 field. no need to set, used only for future development. .IR Example: .br otherinfo=Test Other info .SH [infosys/index/indexname] block [infosys/index/indexname] Index Service block configures and enables an Information Index Service. A separate Index block is required for every Index Service you may run on the given machine. The 'indexname' constitutes to the 'mds-vo-name=indexname,o=grid' LDAP suffix characterizing the Index Service. .TP .B name name - The unique (within the hosting machine) name of the Index Service. Its value becomes part of the LDAP suffix of the Index Service: (mds-vo-name=value of the name attribute, o=grid) .IR Example: .br name="indexname" .TP .B allowreg allowregistration - Implements registration filtering within an Index Sevice Sets the Local Information Trees or lower level Index Services allowed to register to the Index Service. List each allowed registrants with the allowreg attribute. WARNING: specifying allowreg implies setting up a strict filtering, only the matching registrants will be able to register to the Index. The wildcard * can be used in allowreg. Several allowreg lines can be used. Some examples: -All the Swedish machines can register regardless they are resources or Indices allowreg="*.se:2135" -Cluster resources from Denmark can register allowreg="*.dk:2135/nordugrid-cluster-name=*, Mds-Vo-name=local, o=grid" -Storage resources from HIP, Finland can register allowreg="*hip.fi:2135/nordugrid-se-name=*, Mds-Vo-name=local, o=grid" -The index1.sweden.se can register as a Sweden Index (and only as a Sweden Index) allowreg="index1.sweden.se:2135/Mds-vo-Name=Sweden,o=Grid" -Any Index Service can register allowreg="*:2135/Mds-vo-Name=*,o=Grid" .IR Example: .br allowreg="trusted.host.org.se:2135/Mds-vo-Name=Trusted-Index,o=Grid" .SH [infosys/index/indexname/registration/registrationname] block [infosys/index/indexname/registration/registrationname] Index service registration block This block enables a registration process initiated by the 'indexname' Index Service (configured previuosly) to a target Index Service. NorduGrid maintains a webpage with information on major Index Services: http://www.nordugrid.org/NorduGridMDS/index_service.html .TP .B targethostname targethostname - the hostname of the machine running the registration target Index Service .IR Example: .br targethostname="index.myinstitute.org" .TP .B targetport targetport - the port on which the target Index Service is running. The default is the 2135 Infosys port. .IR Example: .br targetport="2135" .TP .B targetsuffix targetsuffix - the LDAP suffix of the target Index Service .IR Example: .br targetsuffix="mds-vo-name=BigIndex,o=grid" .TP .B regperiod regperiod - The registration period in seconds, the registration messages are continously sent according to the regperiod. Default is 120 sec. .IR Example: .br regperiod="300" .TP .B registranthostname registranthostname - the hostname of the machine sending the registrations. This attribute inherits its value from the [common] and [infosys] blocks, most cases no need to set. .IR Example: .br registranthostname="myhost.org" .TP .B registrantport registrantport - the port of the slapd service hosting the registrant Index Service. The attribute inherits its value from the [infosys] block (and therefore defaults to 2135) .IR Example: .br registrantport="2135" .TP .B registrantsuffix registrantsuffix - the LDAP suffix of the registrant Index Service. It is automatically determined from the registration block name, therefore most of the cases no need to specify. In this case the default registrantsuffix will be: "Mds-Vo-name=indexname" please mind uppercase/lowercase characters in the above string when defining allowreg in an index! Don't set it unless you want to overwrite the default. .IR Example: .br registrantsuffix="mds-vo-name=indexname,o=grid" .br .SH [cluster] block This block configures how your cluster is seen on the grid monitor (infosys point of view). Please consult the Infosys manual for detailed information on cluster attributes. If you want your cluster (configured below) to appear in the infosys (on the monitor) you also need to create a cluster registration block (see the next block). .TP .B hostname hostname - the FQDN of the frontend node, if the hostname is not set already in the common block then it MUST be set here .IR Example: .br hostname="myhost.org" .TP .B interactive_contactstring interactive_contactstring - the contact string for interactive logins, set this if the cluster supports some sort of grid-enabled interactive login (gsi-ssh), multivalued .IR Example: .br interactive_contactstring="gsissh://frontend.cluster:2200" .TP .B cluster_alias alias - an arbitrary alias name of the cluster, optional .IR Example: .br cluster_alias="Big Blue Cluster in Nowhere" .TP .B comment comment - a free text field for additional comments on the cluster in a single line, no newline character is allowed! .IR Example: .br comment="This cluster is specially designed for XYZ applications: www.xyz.org" .TP .B cluster_location cluster_location - The geographical location of the cluster, preferably specified as a postal code with a two letter country prefix .IR Example: .br cluster_location="DK-2100" .TP .B cluster_owner cluster_owner - it can be used to indicate the owner of a resource, multiple entries can be used .IR Example: .br cluster_owner="World Grid Project" .br cluster_owner="University of NeverLand" .TP .B authorizedvo authorizedvo - this attribute is used to advertise which VOs are authorized on the cluster. Multiple entries are allowed. This entries will be shown in GLUE2 AccessPolicy and MappingPolicy objects. .IR Example: .br authorizedvo="developer.nordugrid.org" .br authorizedvo="community.nordugrid.org" .TP .B clustersupport clustersupport - this is the support email address of the resource, multiple entries can be used .IR Example: .br clustersupport="grid.support@mysite.org" .br clustersupport="grid.support@myproject.org" .TP .B lrmsconfig lrmsconfig - an optional free text field to describe the configuration of your Local Resource Management System (batch system). .IR Example: .br lrmsconfig="single job per processor" .TP .B homogeneity homogeneity - determines whether the cluster consists of identical NODES with respect to cputype, memory, installed software (opsys). The frontend is NOT needed to be homogeneous with the nodes. In case of inhomogeneous nodes, try to arrange the nodes into homogeneous groups assigned to a queue and use queue-level attributes. Possible values: True,False, the default is True. False will trigger multiple GLUE2 ExecutionEnvironments to be published if applicable. .IR Example: .br homogeneity="True" .TP .B architecture architecture - sets the hardware architecture of the NODES. The "architecture" is defined as the output of the "uname -m" (e.g. i686). Use this cluster attribute if only the NODES are homogeneous with respect to the architecture. Otherwise the queue-level attribute may be used for inhomogeneous nodes. If the frontend's architecture agrees to the nodes, the "adotf" (Automatically Determine On The Frontend) can be used to request automatic determination. .IR Example: .br architecture="adotf" .TP .B opsys opsys - this multivalued attribute is meant to describe the operating system of the computing NODES. Set it to the opsys distribution of the NODES and not the frontend! opsys can also be used to describe the kernel or libc version in case those differ from the originally shipped ones. The distribution name should be given as distroname-version.number, where spaces are not allowed. Kernel version should come in the form kernelname-version.number. If the NODES are inhomogeneous with respect to this attribute do NOT set it on cluster level, arrange your nodes into homogeneous groups assigned to a queue and use queue-level attributes. .IR Example: .br opsys="Linux-2.6.18" .br opsys="glibc-2.5.58" .br opsys="CentOS-5.6" .TP .B nodecpu nodecpu - this is the cputype of the homogeneous nodes. The string is constructed from the /proc/cpuinfo as the value of "model name" and "@" and value of "cpu MHz". Do NOT set this attribute on cluster level if the NODES are inhomogeneous with respect to cputype, instead arrange the nodes into homogeneous groups assigned to a queue and use queue-level attributes. Setting the nodecpu="adotf" will result in Automatic Determination On The Frontend, which should only be used if the frontend has the same cputype as the homogeneous nodes. .IR Example: .br nodecpu="AMD Duron(tm) Processor @ 700 MHz" .TP .B nodememory nodememory - this is the amount of memory (specified in MB) on the node which can be guaranteed to be available for the application. Please note in most cases it is less than the physical memory installed in the nodes. Do NOT set this attribute on cluster level if the NODES are inhomogeneous with respect to their memories, instead arrange the nodes into homogeneous groups assigned to a queue and use queue-level attributes. .IR Example: .br nodememory="512" .TP .B defaultmemory defaultmemory - If a user submits a job without specifying how much memory should be used, this value will be taken first. The order is: xrsl -> defaultmemory -> nodememory -> 1GB. This is the amount of memory (specified in MB) that a job will request(per rank). .IR Example: .br defaultmemory="512" .TP .B benchmark benchmark name value - this optional multivalued attribute can be used to specify benchmark results on the cluster level. Use this cluster attribute if only the NODES are homogeneous with respect to the benchmark performance. Otherwise the similar queue-level attribute should be used. Please try to use one of standard benchmark names given below if possible. .IR Example: .br benchmark="SPECINT2000 222" .br benchmark="SPECFP2000 333" .TP .B middleware middleware - the multivalued attribute shows the installed grid software on the cluster, nordugrid and globus-ng is automatically set, no need to specify middleware=nordugrid or middleware=globus .IR Example: .br middleware="my grid software" .TP .B nodeaccess nodeaccess - determines how the nodes can connect to the internet. Not setting anything means the nodes are sitting on a private isolated network. "outbound" access means the nodes can connect to the outside world while "inbound" access means the nodes can be connected from outside. inbound & outbound access together means the nodes are sitting on a fully open network. .IR Example: .br nodeaccess="inbound" .br nodeaccess="outbound" .TP .B dedicated_node_string dedicated_node_string - the string which is used in the PBS node configuration to distinguish the grid nodes from the rest. Suppose only a subset of nodes are available for grid jobs, and these nodes have a common "node property" string, this case the dedicated_node_string should be set to this value and only the nodes with the corresponding "pbs node property" are counted as grid enabled nodes. Setting the dedicated_node_string to the value of the "pbs node property" of the grid-enabled nodes will influence how the totalcpus, user freecpus is calculated. You don't need to set this attribute if your cluster is fully available for the grid and your cluster's PBS configuration does not use the "node property" method to assign certain nodes to grid queues. You shouldn't use this configuration option unless you make sure your PBS configuration makes use of the above described setup. .IR Example: .br dedicated_node_string="gridnode" .TP .B localse localse - this multivalued parameter tells the BROKER that certain URLs (and locations below that) should be considered "locally" available to the cluster. .IR Example: .br localse="gsiftp://my.storage/data1/" .br localse="gsiftp://my.storage/data2/" .TP .B gm_mount_point gm_mount_point - this is the same as the "path" from the [gridftpd/jobs] block. The default is "/jobs". Will be cleaned up later, do NOT touch it. .IR Example: .br gm_mount_point="/jobs" .TP .B gm_port gm_port - this is the same as the "port" from the [gridftpd] block. The default is "2811". Will be cleaned up later. .IR Example: .br gm_port="2811" .TP .B cpudistribution cpudistribution - this is the CPU distribution over nodes given in the form: ncpu:m where n is the number of CPUs per machine m is the number of such computers Example: 1cpu:3,2cpu:4,4cpu:1 represents a cluster with 3 single CPU machines, 4 dual CPU machines, one machine with 4 CPUs. This command is needed to tweak and overwrite the values returned by the underlying LRMS. In general there is no need to configure it. .IR Example: .br cpudistribution=1cpu:3,2cpu:4,4cpu:1 .SH [infosys/cluster/registration/registrationname] block Computing resource (cluster) registration block configures and enables the registration process of a computing resource to an Index Service. A cluster can register to several Index Services this case each registration process should have its own block. NorduGrid maintains a webpage with information on major Index Services: http://www.nordugrid.org/NorduGridMDS/index_service.html .TP .B targethostname targethostname - see description earlier .IR Example: .br targethostname="index.myinstitute.org" .TP .B targetport targetport - see description earlier .IR Example: .br targetport="2135" .TP .B targetsuffix targetsuffix - see description earlier .IR Example: .br targetsuffix="mds-vo-name=BigIndex,o=grid" .TP .B regperiod regperiod - see description earlier .IR Example: .br regperiod="300" .TP .B registranthostname registranthostname - see description earlier .IR Example: .br registranthostname="myhost.org" .TP .B registrantport registrantport - see description earlier .IR Example: .br registrantport="2135" .TP .B registrantsuffix registrantsuffix - the LDAP suffix of the registrant cluster resource It is automatically determined from the [infosys] block and the registration blockname. In this case the default registrantsuffix will be: "nordugrid-cluster-name=hostname,Mds-Vo-name=local,o=Grid" please mind uppercase/lowercase characters above if defining allowreg in an index! Don't set it unless you want to overwrite the default. .IR Example: .br registrantsuffix="nordugrid-cluster-name=myhost.org,Mds-Vo-name=local,o=grid" .SH [queue/queue_name] block Each grid-enabled queue should have a separate queue block. The queuename should be used as a label in the block name. A queue can represent a PBS/LSF/SGE/SLURM/LL queue, a SGE pool, a Condor pool or a single machine in case 'fork' type of LRMS is specified in the [common] block. Queues don't need to be registered (there is no queue registration block), once you configured your cluster to register to an Index Service the queue entries (configured with this block) automatically will be there. Please consult the ARC Information System manual for detailed information on queue attributes: http://www.nordugrid.org/documents/arc_infosys.pdf use the queue_name for labeling the block. The special name 'fork' should be used for labeling the queue block in case you specified 'fork' type of LRMS in the [common] block. .TP .B name name sets the name of the grid-enabled queue. It MUST match the queue_name label of the corresponding queue block, see above. Use "fork" if you specified 'fork' type of LRMS in the [common] block. Queue name MUST be specified, even if the queue block is already correctly labeled. .IR Example: .br name="gridlong" .TP .B homogeneity homogeneity - determines whether the queue consists of identical NODES with respect to cputype, memory, installed software (opsys). In case of inhomogeneous nodes, try to arrange the nodes into homogeneous groups and assigned them to a queue. Possible values: True,False, the default is True. .IR Example: .br homogeneity="True" .TP .B scheduling_policy scheduling_policy - this optional parameter tells the schedulling policy of the queue, PBS by default offers the FIFO scheduller, many sites run the MAUI. At the moment FIFO & MAUI is supported. If you have a MAUI scheduller you should specify the "MAUI" value since it modifies the way the queue resources are calculated. BY default the "FIFO" sceduller is assumed. .IR Example: .br scheduling_policy="FIFO" .TP .B comment comment - a free text field for additional comments on the queue in a single line, no newline character is allowed! .IR Example: .br comment="This queue is nothing more than a condor pool" .TP .B maui_bin_path maui_bin_path - set this parameter for the path of the maui commands like showbf in case you specified the "MAUI" scheduling_policy above. This parameter can be set in the [common] block as well. .IR Example: .br maui_bin_path="/usr/local/bin" .TP .B queue_node_string queue_node_string - In PBS you can assign nodes to a queue (or a queue to nodes) by using the "node property" PBS node configuration method and asssigning the marked nodes to the queue (setting the resources_default.neednodes = queue_node_string for that queue). This parameter should contain the "node property" string of the queue-assigned nodes. Setting the queue_node_string changes how the queue-totalcpus, user freecpus are determined for this queue. Essentially, queue_node_string value is used to construct nodes= string in PBS script, such as nodes=count:queue_node_string where count is taken from the job description (1 if not specified). You shouldn't use this option unless you are sure that your PBS configuration makes use of the above configuration. Read NorduGrid PBS instructions for more information: http://www.nordugrid.org/documents/pbs-config.html .IR Example: .br queue_node_string="gridlong_nodes" .br queue_node_string="ppn=4:ib" .TP .B sge_jobopts sge_jobopts - additional SGE options to be used when submitting jobs to SGE from this queue. If in doubt, leave it commented out .IR Example: .br sge_jobopts="-P atlas -r yes" .TP .B condor_requirements condor_requirements - only needed if using Condor. It needs to be defined for each queue. Use this option to determine which nodes belong to the current queue. The value of 'condor_requirements' must be a valid constraints string which is recognized by a condor_status -constraint '....' command. It can reference pre-defined ClassAd attributes (like Memory, Opsys, Arch, HasJava, etc) but also custom ClassAd attributes. To define a custom attribute on a condor node, just add two lines like the ones below in the `hostname`.local config file on the node: NORDUGRID_RESOURCE=TRUE STARTD_EXPRS = NORDUGRID_RESOURCE, $(STARTD_EXPRS) A job submitted to this queue is allowed to run on any node which satisfies the 'condor_requirements' constraint. If 'condor_requirements' is not set, jobs will be allowed to run on any of the nodes in the pool. When configuring multiple queues, you can differentiate them based on memory size or disk space, for example: .IR Example: .br condor_requirements="(OpSys == "linux" && NORDUGRID_RESOURCE && Memory >= 1000 && Memory < 2000)" .TP .B lsf_architecture CPU architecture to request when submitting jobs to LSF. Use only if you know what you are doing. .IR Example: .br lsf_architecture="PowerPC" .TP .B totalcpus totalcpus - manually sets the number of cpus assigned to the queue. No need to specify the parameter in case the queue_node_string method was used to assign nodes to the queue (this case it is dynamically calculated and the static value is overwritten) or when the queue have access to the entire cluster (this case the cluster level totalcpus is the relevant parameter). Use this static parameter only if some special method is applied to assign a subset of totalcpus to the queue. .IR Example: .br totalcpus="32" .TP .B nodecpu queue-level configuration parameters: nodecpu, nodememory, architecture, opsys and benchmark should be set if they are homogeneous over the nodes assigned to the queue AND they are different from the cluster-level value. Their meanings are described in the cluster block. Usage: this queue collects nodes with "nodememory=512" while another queue has nodes with "nodememory=256" -> don't set the cluster attributes but use the queue-level attributes. When the frontend's architecture or cputype agrees with the queue nodes, the "adotf" (Automatically Determine On The Frontend) can be used to request automatic determination of architecture or nodecpu. .IR Example: .br nodecpu="adotf" .br nodememory="512" .br architecture="adotf" .br opsys="Fedora 16" .br opsys="Linux-3.0" .br benchmark="SPECINT2000 222" .br benchmark="SPECFP2000 333" .TP .B ac_policy queue access policy rules based on VOMS attributes in user's proxy certificate (requires the arc-vomsac-check plugin to be enabled). Matching rules have the following format: ac_policy="[+/-]VOMS: " Please read arc-vomsac-check manual page for more information. .IR Example: .br ac_policy="-VOMS: /badvo" .br ac_policy="VOMS: /.*/Role=production" .TP .B authorizedvo authorizedvo - this attribute is used to advertise which VOs are authorized on the specific queue. Multiple entries are allowed. This entries will be shown in the MappingPolicy objects. If something is already defined in the [cluster] block, the shown VOs will be the union set of those defined in [cluster] with those specific to this [queue] block. .IR Example: .br authorizedvo="LocalUsers" .br authorizedvo="atlas" .br authorizedvo="community.nordugrid.org" .TP .B cachetime this affects old infoproviders, and forces the validity time of the record. .IR Example: .br cachetime="30" .TP .B sizelimit sizelimit affects registration to EGIIS .IR Example: .br sizelimit="5000" .SH [registration/emir] block Services registration into EMIR block configures and enables the registration process of a services enabled in this configuration file into EMI indexing service (EMIR). .TP .B emirurls List of URL separated by comma of EMIR services which are to accept registration. This is mandatory. .IR Example: .br emirurls="https://somehost:60002/emir" .TP .B validity Time in seconds for which registration records should stay valid. .IR Example: .br validity=600 .TP .B period Time in seconds how othen registration record should be sent to the registration service. .IR Example: .br period=60 .TP .B disablereg_xbes disablereg_xbes may be used to selectively disable registration of A-REX service. Possible values are yes and no. Default is no, .IR Example: .br disablereg_xbes="no" .SH [nordugridmap] block [nordugridmap] block configuration is used to fine-tune behaviour of the nordugridmap - an ARC tool used to generate grid-mapfiles. Please refer to [vo] block description to find information how to specify VO sources for mapfile generation. This section setup general VO-independent parameters. .TP .B x509_user_key x509_user_cert, x509_user_key - public certificate and privat key to be used when fetching sources over TLS (https:// and vomss:// sources retrieval rely on this parameter) If not specified, values defined in [common] section will be used. If there is also no [common] section, X509_USER_{CERT,KEY} variables are used. Default is '/etc/grid-security/host{cert,key}.pem' .IR Example: .br x509_user_key="/etc/grid-security/hostkey.pem" .br x509_user_cert="/etc/grid-security/hostcert.pem" .TP .B x509_cert_dir x509_cert_dir - the directory containing CA certificates. This information is needed by the 'require_issuerdn' [vo] block option. Default is '/etc/grid-security/certificates/'. .IR Example: .br x509_cert_dir="/etc/grid-security/certificates/" .TP .B generate_vomapfile generate_vomapfile - control is nordugridmap will generate vo-mapfile used by arc-ur-logger. Default is 'yes'. .IR Example: .br generate_vomapfile="yes" .TP .B vomapfile vomapfile - path to vo-mapfile location. Default is /etc/grid-security/grid-vo-mapfile .IR Example: .br vomapfile="/etc/grid-security/grid-vo-mapfile" .TP .B log_to_file log_to_file - control whether logging output of nordugridmap will be saved to file. Default is 'no' (STDERR is used). .IR Example: .br log_to_file="yes" .TP .B logfile logfile - specify the nordugridmap log file location when in use. Default is '/var/log/arc/nordugridmap.log'. .IR Example: .br logfile="/var/log/arc/nordugridmap.log" .TP .B cache_enable cache_enable - control whether caching of external sources will be used. Default is 'yes'. .IR Example: .br cache_enable="yes" .TP .B cachedir cachedir - specify the path where cached sources will be stored. Default is '/var/spool/nordugrid/gridmapcache/' .IR Example: .br cachedir="/var/spool/nordugrid/gridmapcache/" .TP .B cachetime cachetime - controls how many time (in seconds) cached information remains valid. Default is 259200 (3 days). .IR Example: .br cachetime="259200" .TP .B issuer_processing issuer_processing - control the behaviour of [vo] block require_issuerdn parameter. Valid values are 'relaxed' and 'strict'. Please see 'require_issuerdn' description in [vo] block for details. Default is 'relaxed'. .IR Example: .br issuer_processing="relaxed" .TP .B mapuser_processing mapuser_processing - control the behaviour of [vo] block mapped_unixid parameter usage. Valid values are 'overwrite' and 'keep'. Please see 'mapped_unixid' description in [vo] block for details. Default is 'keep'. .IR Example: .br mapuser_processing="keep" .TP .B allow_empty_unixid allow_empty_unixid - control whether empty (or unspecified) 'mapped_unixid' [vo] block option is allowed to be used. Please see 'mapped_unixid' description of [vo] block for details. Default is 'no' .IR Example: .br allow_empty_unixid="no" .TP .B voms_method voms_method - control how to get information from voms(s) sources. Valid values are: soap - call SOAP method directly using SOAP::Lite get - use old implementation that manually parses XML response Default is 'soap'. .IR Example: .br voms_method="soap" .TP .B debug debug level - controls the verbosity of nordugridmap output. Valid values are: 0 - FATAL - only critical fatal error shown 1 - ERROR - errors, including non-critical are shown 2 - WARNING (default) - configuration errors that can be ignored 3 - INFO - processing information 4 - VERBOSE - a bit more processing information 5 - DEBUG - lot of processing information When test run is requested (--test command line option of the nordugridmap) debug level is automatically set to 5 (DEBUG). Default is 2 (WARNING) .IR Example: .br debug="4" .TP .B fetch_timeout fetch_timeout - control how many time (in seconds) nordugridmap will wait for external sources retrieval. Default is 15. .IR Example: .br fetch_timeout="15" .SH [acix/cacheserver] block The cache server component of ACIX runs alongside A-REX. It periodically scans the cache directories and composes a Bloom filter of cache content which can be pulled by an ACIX index server. .TP .B hostname Hostname on which the cache server listens. Default is all available interfaces. .IR Example: .br hostname="myhost.org" .TP .B port Port on which the cache server listens. Default is 5443. .IR Example: .br port="6000" .TP .B logfile Log file location for the cache server. Default is /var/log/arc/acix-cache.log .IR Example: .br logfile="/tmp/acix-cache.log" .TP .B cachedump Whether to make a dump of the cache contents in a file at $TMP/ARC-ACIX/timestamp each time the cache server runs. Default is no. .IR Example: .br cachedump="yes" .SH [acix/indexserver] block The index server component of ACIX collects cache content filters from a set of cache servers configured in this block. The index server can be queried for the location of cached files. .TP .B cacheserver ACIX cache servers from which to pull information .IR Example: .br cacheserver="https://some.host:5443/data/cache" .br cacheserver="https://another.host:5443/data/cache" .SH [gangliarc] block Gangliarc provides monitoring of ARC-specific metrics through ganglia. It can be run with zero configuration or customised with options in the [gangliarc] block. .TP .B frequency The period between each information gathering cycle, in seconds. Default is 20. .IR Example: .br frequency="30" .TP .B gmetric_exec Path to gmetric executable. Default is /usr/bin/gmetric. .IR Example: .br gmetric_exec="/usr/local/bin/gmetric" .TP .B logfile log file of the daemon. Default is /var/log/arc/gangliarc.log. .IR Example: .br logfile="/tmp/gangliarc.log" .TP .B pidfile pid file of the daemon. Default is /var/run/gangliarc.pid. .IR Example: .br pidfile="/tmp/gangliarc.pid" .TP .B python_bin_path path to python executable. Default is /usr/bin/python. .IR Example: .br python_bin_path="/usr/local/bin/python" .TP .B metrics the metrics to be monitored. Default is "all". metrics takes a comma-separated list of one or more of the following metrics: - staging -- number of tasks in different data staging states - cache -- free cache space - session -- free session directory space - heartbeat -- last modification time of A-REX heartbeat - processingjobs -- the number of jobs currently being processed by ARC (jobs between PREPARING and FINISHING states) - failedjobs -- the number of failed jobs per last 100 finished - jobstates -- number of jobs in different A-REX internal stages - all -- all of the above metrics .IR Example: .br metrics="all" nordugrid-arc-5.4.2/src/doc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712232152633020567 xustar000000000000000027 mtime=1382602139.212547 30 atime=1513200593.482933956 30 ctime=1513200658.730731962 nordugrid-arc-5.4.2/src/doc/Makefile.am0000644000175000002070000000017012232152633020627 0ustar00mockbuildmock00000000000000man_MANS = arc.conf.5 exampledir = $(pkgdatadir)/examples example_DATA = arc.conf.reference EXTRA_DIST = $(example_DATA)nordugrid-arc-5.4.2/src/doc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315721020573 xustar000000000000000029 mtime=1513200593.51593436 30 atime=1513200652.608657087 30 ctime=1513200658.731731974 nordugrid-arc-5.4.2/src/doc/Makefile.in0000644000175000002070000005055313214315721020652 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/doc DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arc.conf.5.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc.conf.5 CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man5dir = $(mandir)/man5 am__installdirs = "$(DESTDIR)$(man5dir)" "$(DESTDIR)$(exampledir)" NROFF = nroff MANS = $(man_MANS) DATA = $(example_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ man_MANS = arc.conf.5 exampledir = $(pkgdatadir)/examples example_DATA = arc.conf.reference EXTRA_DIST = $(example_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/doc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/doc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc.conf.5: $(top_builddir)/config.status $(srcdir)/arc.conf.5.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man5: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man5dir)" || $(MKDIR_P) "$(DESTDIR)$(man5dir)" @list=''; test -n "$(man5dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.5[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^5][0-9a-z]*$$,5,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man5dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man5dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man5dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man5dir)" || exit $$?; }; \ done; } uninstall-man5: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man5dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.5[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^5][0-9a-z]*$$,5,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man5dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man5dir)" && rm -f $$files; } install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(MANS) $(DATA) installdirs: for dir in "$(DESTDIR)$(man5dir)" "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man5 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA uninstall-man uninstall-man: uninstall-man5 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exampleDATA install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man5 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am uninstall uninstall-am uninstall-exampleDATA \ uninstall-man uninstall-man5 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/doc/PaxHeaders.7502/arc.conf.reference0000644000000000000000000000012413153455371022111 xustar000000000000000027 mtime=1504598777.935254 27 atime=1513200576.775729 30 ctime=1513200658.733731999 nordugrid-arc-5.4.2/src/doc/arc.conf.reference0000644000175000002070000032706613153455371022174 0ustar00mockbuildmock00000000000000#################################################################### ## ## Configuration blocks and variables for the ARC services via the arc.conf. ## ## NEVER USE THIS DOCUMENT AS A CONFIGURATION FILE! ## There are out-of-the-box minimal configuration templates provided ## for all services. ## ## The arc.conf configuration file consists of blocks like ## [common], [grid-manager], [gridftpd], [gridftpd/name], [group], ## [data-staging], [nordugridmap], [vo], [cluster], [queue/name], ## [infosys], [infosys/index/name], [infosys/index/name/registration/name]. ## ## A block is identified by its blockname. A blockname consists of ## keywords and optionally block identifiers: [gridftpd/jobs] here 'gridftpd' is ## a keyword while 'jobs' is an identifier. Block names must be UNIQUE ([vo] is ## the only exception). A block starts with a unique [blockname] and ends with ## the next [blockname] directive. Currently the following keywords are used in ## the block names: common, grid-manager, gridftpd, group, data-staging, ## nordugridmap, vo, infosys, registration, cluster, queue, logger, acix ## ## Some parts of the configuration file are order-dependent, blocks may require ## other blocks be defined earlier (especially the authorization blocks). ## ## The arc.conf configures all the ARC services, and enabling an ARC service on ## a resource requires the presence of the appropriate configuration blocks. ## The ARC services are the following: gridftpd, A-REX (grid-manager block), ## infosys, infosys registration. The nordugridmap utility is also configured by ## the [nordugridmap] and [vo] block(s) of this file. ## ## For example, to connect a computing cluster to the grid and for it to ## accept grid jobs you should have at least the [common], [grid-manager], ## [infosys], [cluster], [queue/queue_name], [infosys/cluster/registration/name] ## blocks configured. For classic setup with GridFTP interface [gridftpd] and ## [gridftpd/jobs] section are also necessary. ## ## As another example we give the necessary blocks for an Index Service: ## [infosys], [infosys/index/name], [infosys/index/name/registration/name] ## ## Configuration blocks contain (variable, variable value) pairs following the ## syntax: variable="the variable value comes in quotes". ## The quotes around the variable value are a MUST! ## Note that the variable names are CASE-SENSITIVE! ## Unset configuration options (variables) take the default values. ## ## Below we give a detailed description of all the configuration options of the ## different configuration blocks. ## ## WARNING: this file will not work as a configuration template! ## ## Developers: This file is parsed by configuration validation scripts and so ## it is important that it follows the correct syntax. For each block or ## option please add explanatory text with two ## and a space at the ## beginning of the line and then an example with a single # and no spaces at ## the beginning of the line. ## ##################################################################### ### The [common] block ############################################## ## The parameters set within this block are available for all the other blocks. ## These are the config parameters shared by the different components of ARC ## (e.g. grid-manager, infosys) #[common] ## hostname - the FQDN of the frontend node, optional in the common block but ## MUST be set in the cluster block #hostname="myhost.org" ## x509_voms_dir path - the path to the directory containing *.lsc files ## needed for checking validity of VOMS extensions. If not specified default ## value /etc/grid-security/vomsdir is used. #x509_voms_dir="/etc/grid-security/vomsdir" ### LRMS ## ARC supports various LRMS flavours, as listed in this section. For detailed ## description of options please refer to ARC CE sysadmin guide: ## http://www.nordugrid.org/documents/arc-ce-sysadm-guide.pdf ## ONLY ONE LRMS IS ALLOWED. MULTIPLE lrms ENTRIES WILL TRIGGER UNEXPECTED ## BEHAVIOUR. ## ## lrms sets the type of the Local Resource Management System (queue system), ## and optionally - the default queue name, separated with a blank space: ## "lrmstype queue_name". For lrmstype, the following systems are supported ## and can be chosen (one per server): ## fork - simple forking of jobs to the same node as the server ## sge - (Sun/Oracle) Grid Engine ## condor - Condor ## pbs - PBS ## lsf - LSF ## ll - LoadLeveler ## slurm - SLURM ## boinc - Boinc ## PBS has many flavours, ARC currenly supports OpenPBS, PBSPro, ScalablePBS ## and Torque (the official name for ScalablePBS). There is no need to specify ## the flavour or the version number of the PBS, simply write 'pbs'. Similarly, ## there is no need to specify (Sun/Oracle) Grid Engine versions and flavours. ## "lrmstype" MUST be set here, it is a MANDATORY parameter! ## The optional "queue" parameter specifies the default Grid queue of the ## LRMS. Jobs will be submitted to this queue if they do not specify queue ## name in job description. Queue name must match one of the [queue/queue_name] ## block labels, see below. ## ### PBS #lrms="pbs gridlong" #lrms="pbs" ## the path to the qstat,pbsnodes,qmgr etc PBS binaries, ## no need to set if PBS is not used #pbs_bin_path="/usr/bin" ## the path of the PBS server logfiles which are used by A-REX to determine ## whether a PBS job is completed. If not specified, A-REX will use qstat for that. #pbs_log_path="/var/spool/pbs/server_logs" ### Condor #lrms="condor" ## condor_rank - If you are not happy with the way Condor picks nodes when ## running jobs, you can define your own ranking algorithm by optionally ## setting the condor_rank attribute. condor_rank should be set to a ## ClassAd float expression that you could use in the Rank attribute ## in a Condor job description. ## Obviously no need to set if Condor is not used. An example: #condor_rank="(1-LoadAvg/2)*(1-LoadAvg/2)*Memory/1000*KFlops/1000000" ## condor_bin_path - Path to Condor binaries. Must be set if Condor ## is used. #condor_bin_path=/opt/condor/bin ## condor_bin_path - Path to Condor config file. Must be set if Condor ## is used and the config file is not in its default location ## (/etc/condor/condor_config or ~/condor/condor_config). ## The full path to the file should be given. #condor_config=/opt/condor/etc/condor_config ### SGE #lrms="sge" ## sge_bin_path - Path to Sun Grid Engine (SGE) binaries, ## MUST be set if SGE is the LRMS used #sge_bin_path="/opt/n1ge6/bin/lx24-x86" ## sge_root - Path to SGE installation directory. MUST be set if SGE is used. #sge_root="/opt/n1ge6" ## sge_cell - The name of the SGE cell to use. This option is only necessary ## in case SGE is set up with a cell name different from 'default' #sge_cell="default" ## sge_qmaster_port, sge_execd_port - these options should be used in case SGE ## command line clients requre SGE_QMASTER_PORT and SGE_EXECD_PORT environment ## variables to be set. Usually they are not necessary. #sge_qmaster_port="536" #sge_execd_port="537" ### SLURM #lrms="slurm" ## slurm_bin_path - Path to SLURM binaries, must be set if installed ## outside of normal $PATH #slurm_bin_path="/usr/bin" ## How long should infosys wait before querying SLURM for new data (seconds) #slurm_wakeupperiod="15" ## Should ARC use sacct instead of scontrol to get information about finished jobs #slurm_use_sacct="yes" ### LSF #lrms="lsf" ## the PATH to LSF bin folder ## no need to set if LSF is not used #lsf_bin_path="/usr/local/lsf/bin/" ## the PATH to profile.lsf ## no need to set if LSF is not used #lsf_profile_path="/usr/share/lsf/conf" ### LoadLeveler #lrms="ll" ## the PATH to the LoadLeveler bin folder ## no need to set if LoadLeveler is not used #ll_bin_path="/opt/ibmll/LoadL/full/bin" ## support for a LoadLeveler setup using Consumable Resources ## no need to set if LoadLeveler is not used #ll_consumable_resources="yes" ### Desktop Bridge #lrms="dgbridge" ## Desktop Bridge www publish dir #dgbridge_stage_dir="/var/www/DGBridge" ## Desktop Bridge url prefix pointing to dgbridge_stage_dir #dgbridge_stage_prepend="http://edgi-bridge.example.com/DGBridge/" ### Boinc #lrms="boinc" ## Connection strings for the boinc database #boinc_db_host="localhost" #boinc_db_port="3306" #boinc_db_name="myproject" #boinc_db_user="boinc" #boinc_db_pass="password" ## boinc_app_id - ID of the app handled by this CE. Setting this option ## makes database queries much faster in large projects with many apps. #boinc_app_id="1" ### Globus specifics ## globus_tcp_port_range, globus_udp_port_range - Firewall configuration ## In a firewalled environment the software which uses GSI needs to know what ## ports are available. The full documentation can be found at: ## http://dev.globus.org/wiki/FirewallHowTo ## These variable are similar to the Globus enviroment variables: ## GLOBUS_TCP_PORT_RANGE and GLOBUS_UDP_PORT_RANGE. ## These variables are not limited to [common], but can be set individually ## for each service in corresponding section: [grid-manager], [gridftpd] ## Example: #globus_tcp_port_range="9000,12000" #globus_udp_port_range="9000,12000" ### Certificates ## x509_user_cert, x509_user_key - Server credentials location. ## These variables are similar to the GSI enviroment variables: ## X509_USER_KEY and X509_USER_CERT ## These variables are not limited to [common], but can be set individually ## for each service in corresponding section: [grid-manager], [gridftpd], [nordugridmap] #x509_user_key="/etc/grid-security/hostkey.pem" #x509_user_cert="/etc/grid-security/hostcert.pem" ## x509_cert_dir - Location of trusted CA certificates ## This variable is similar to the GSI enviroment variable: X509_CERT_DIR ## This variable is not limited to [common], but can be set individually ## for each service in corresponding section: [grid-manager], [gridftpd] #x509_cert_dir="/etc/grid-security/certificates" ### Authorization ## gridmap - The gridmap file location ## This variable is similar to the GSI enviroment variable: GRIDMAP ## This variable is not limited to [common], but can be set individually ## for each service in corresponding section: [grid-manager], [gridftpd] ## The default is /etc/grid-security/grid-mapfile #gridmap="/etc/grid-security/grid-mapfile" ## voms_processing - Defines how to behave if errors in VOMS AC processing detected. ## relaxed - use everything that passed validation. ## standard - same as relaxed but fail if parsing errors took place and ## VOMS extension is marked as critical. This is a default. ## strict - fail if any parsing error was discovered. ## noerrors - fail if any parsing or validation error happened. ## This command can also be used in [grid-manager] and [griftpd] blocks. #voms_processing="standard" ## voms_trust_chain - Define the DN chain that the host services trust when the ## voms AC from peer voms proxy certificate is parsed and validated. ## There can be multiple "voms_trust_chain" existing, each one corresponds to a voms server. ## This variable is similar to the information in *.lsc file, but with two differences: ## 1, you don't need to create a *.lsc file per VOMS server, but create a chain per VOMS server; ## 2, the regular expression is support when matching the DNs. ## This variable is not limited to [common], but can be used in [grid-manager] ## and [gridftpd] blocks. ## This variable should be used together with voms_processing. ## This variable will overwrite the information in *.lsc if *.lsc exists. #voms_trust_chain = "/O=Grid/O=NorduGrid/CN=host/arthur.hep.lu.se" "/O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority" #voms_trust_chain = "/O=Grid/O=NorduGrid/CN=host/emi-arc.eu" "/O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority" #voms_trust_chain = "^/O=Grid/O=NorduGrid" ## enable_perflog_reporting expert-debug-on/no - Switch on or off performance reporting. ## Default is no #enable_perflog_reporting="expert-debug-on" ## perflogdir logdir - Directory where performance logs should be stored. ## Default is /var/log/arc/perfdata #perflogdir="/var/log/arc/perfdata" ### [vo] block ##################################################### ## ## [vo] block is used to define VOs and generate mapfiles from user ## list maintained by VO databases. VO block is a configuration block ## for the nordugridmap utility. ## Please note that [vo] block processing by nordugridmap utility ## depend on parameters defined in the [nordugridmap] block. ## [vo] block by itself does not affect authorization of client/user. For ## that label defined by vo="" atribute may be used in [group] block with ## 'vo' rule. Also mapfiles generated by nordugridmap utility can be used ## with 'file' rule. ## #[vo] ## id blockid - specifies the unique configuration block id (this does not ## affect nordugridmap utility) #id="vo_1" ## vo vo_name - specifies the VO name, this name can be used in other blocks. ## MUST be given. #vo="nordugrid" ## file path - output gridmap-file where GENERATED mapping list will be ## stored. See parameters below to define how to generate this file. ## If the same file specified as output for different [vo] blocks, ## nordugridmap will automatically merge enrties in given blocks order. ## Default is '/etc/grid-security/gridmapfile'. #file="/etc/grid-security/VOs/atlas-users" ## source url - the URL of the VO database which is assigned to this VO. ## The nordugridmap will use this URL to automatically generate and keep ## up-to-date userlist (mapfile) specified by the 'file' attribute. ## ## url is a multivalued attribute, several sources can be specified for ## the [vo] block and all the users from those sources will be merged ## into the same file. The source URLs are processed in the given order. ## ## Currently supported URL types are: ## http(s):// - URL to plain text file. File should contain a list ## of DNs with optional issuer certificate authority DN ## (see require_issuerdn): "user DN" ["issuer DN"] ## voms(s):// - URL to VOMS-Admin interface ## nordugrid - add NorduGrid VO members ## file:// - local file (stand-alone or dynamicaly generated by ## nordugridmap). File should contain a list of DNs with ## optional mapped unixid: "user DN" [mapped user ID] ## Result of optional mapped unixid processing depend ## on mapuser_processing option settings. ## vo:// - reference to another [vo] configuration block ## ## You can use either vo:// or file:// entries to specify dependencies ## between [vo] blocks, but using vo:// is a recommended way. ## ## For each separate source URL it is possible to override some parameters ## value. You can use the following syntax to perform this: ## source="URL < parameter1=value1 parameter2=value2" ## You can override the following parameters: ## mapped_unixid for http(s),voms(s),ldap and file URLs ## cache_enable for http(s),voms(s),ldap and file URLs ## voms_method for voms(s) URLs ## mapuser_processing for file URLs with mapped_unixid='' overrided ## (control mapped_unixid overriding behaviour for URL) #source="vomss://voms.ndgf.org:8443/voms/nordugrid.org" #source="vomss://lcg-voms.cern.ch:8443/voms/atlas?/atlas/Role=VO-Admin < mapped_unixid=atlasadmin" #source="vomss://kuiken.nikhef.nl:8443/voms/gin.ggf.org < voms_method=get" #source="http://www.nordugrid.org/developers.dn" #source="file:///etc/grid-security/priviliged_users.dn" #source="vo://nordugrid_community" #source="nordugrid" ## mapped_unixid unixid - the local UNIXID which is used in the generated ## grid-mapfile by the nordugridmap utility. ## ## If any of the sources have already provided mapping information (file:// ## or vo://) behavior depends on 'mapuser_processing' [nordugridmap] block ## conguration: ## mapuser_processing = 'overwrite': ignore already provided mapping and ## apply mapped_unixid for all sources ## mapuser_processing = 'keep': apply mapped_unixid only for sources that ## does not already has mapping information ## ## [vo] block can only have one UNIXID ## If 'mapped_unixid' is not specified behavior depends on 'allow_empty_unixid' ## [nordugridmap] block conguration value: ## allow_empty_unixid = 'yes': empty value will be used for mapped_unixid ## which means that nordugridmap will generate only ## the list of DNs without mapping (consider using ## mapuser_processing='overwrite' along with this ## option or sources that does not provide previously ## defined mapping information) ## allow_empty_unixid = 'no': skip users without mapping information (if ## no mapping information provided by sources) ## #mapped_unixid="gridtest" ## voms_fqan_map fqan unixid - the local UNIXID which is used to map voms(s) ## sources with specific FQAN given. ## ## Several voms_fqan_map can be specified for a [vo] block. ## For each voms(s) sources in [vo] block and every voms_fqan_map record ## separate source record will be authomatically generated with mapped_unixid ## overrided to specified one. ## ## Sources are generated in a given voms_fqan_map order. Original voms(s) source ## URL are processed LAST. ## ## This allows to simplify configuration, especially in redundancy cases when ## several VOMS servers are used for the same VO. ## #voms_fqan_map="/atlas/Role=VO-Admin atlasadmin" #voms_fqan_map="/atlas/Role=production atlasprod" ## filter ACL string - An ACL filter for the nordugridmap utility. Multiple ## allow/deny statements are possible. The fetched DNs are filtered against ## the specified rules before they are added to the generated mapfile. ## ## * can be used as a wildcard. You may run the nordugridmap with the --test ## command line option to see how the filters you specified work. ## ## If at least one allow filter is specified implicit deny is used at the end ## of ACL. If only deny filters are present - implicit allow used at the end. #filter="deny *infn*" #filter="allow *NorduGrid*" ###################################################################### ### [group] Authorisation block ###################################### ## ## These configuration blocks define rules used to define to which ## authorization group a user belongs. The group should not be mistaken ## for a virtual organisation (VO). A group may match a single vo if ## only a single check (rule) on vo membership is perfomed. It is ## however more common to allow multiple VOs in a single group. ARC ## also allows many other ways to assign users to groups. Technically, ## permissions are only granted to groups, not directly to VOs. ## ## The block specifies single authorization group. Ther may be multiple ## [group] blocks in configuration defining multiple authorization ## groups. ## ## The block can be specified in two ways - either using [group/group1] ## like subblock decalration per group or just [group]. The two formats ## are equivalent. Every block (till the beginning of next block or the ## end of the file) defines one authorization group. ## ## IMPORTANT: Rules in a group are processed in their order of appearance. ## The first matching rule decides the membership of a the user to a group ## and the processing STOPS. There are positively and negatively matching ## rules. If a rule is matched positively then the user tested is accepted ## into the respective group and further processing is stopped. Upon a ## negative match the user would be rejected for that group - processing ## stops too. The sign of rule is determined by prepending the rule with ## '+' (for positive) or '-' (for negative) signs. '+' is default and can ## be omitted. A rule may also be prepended with '!' to invert result of rule, ## which will let the rule match the complement of users. That complement ## operator ('!') may be combined with the operator for positive or negative ## matching. ## ## A group MUST be defined before it may be used. In this respect the ## arc.conf is ORDER SENSITIVE. ## ## The authorization groups can be used in [gridftpd] ## and in its sub-blocks. The syntax of their specification varies with ## the service they are used for. ## For using authorization groups and VO blocks in HED framework please ## read "Security Framework of ARC" at ## http://www.nordugrid.org/documents/arc-security-documentation.pdf ## #[group] ## name group_name - Specify name of group. If there is no such command ## in block, name of subblock is used instead (that is what subblocks ## are used for). For example [group/users] #name="users" ## subject certificate_subject - Rule to match specific subject of user's ## X.509 certificate. No masks, patterns and regular expressions are allowed. ## For more information about X.509 refer to http://www.wikipedia.org/wiki/X509 #subject="/O=Grid/O=Big VO/CN=Main Boss" ## file path - Start reading rules from another file. That file has a bit ## different format. It can't contain blocks and commands are separated ## from arguments by space. Also word "subject" in subject command may be ## skipped. That makes it convinient to directly add gridmap-like lists to ## authorization group. #file="/etc/grid-security/local_users" ## voms vo group role capabilities - Match VOMS attribute in user's credential. ## Use '*' to match any value. More information about VOMS can be found at ## http://grid-auth.infn.it #voms="nordugrid /nordugrid/Guests * *" ## group group_name [group_name ...] - Match user already belonging to one ## of specified groups. Groups refered here must be defined earlier in ## configuration file. Multiple group names may be specified for this rule. ## That allows creating hierarchical structure of authorization groups like ## 'clients' are those which are 'users' and 'admins'. #group="local_admins" ## plugin timeout path [argument ...] - Run external executable or ## function from shared library. Rule is matched if plugin returns 0. ## In arguments following substitutions are supported: ## %D - subject of certicate ## %P - path to proxy ## For more about plugins read documentation. #plugin="10 /opt/external/bin/permis %P" ## lcas library directory database - Call LCAS functions to check rule. ## Here library is path to shared library of LCAS, either absolute or ## relative to directory; directory is path to LCAS installation directory, ## equivalent of LCAS_DIR variable; database is path to LCAS database, ## equivalent to LCAS_DB_FILE variable. Each arguments except library is ## optional and may be either skipped or replaced with ’*’. #lcas="" ## remote URL ... - Check user's credentials against remote service. Only ## DN groups stored at LDAP directories are supported. Multiple URLs are ## allowed in this rule. #remote="ldap://grid-vo.nordugrid.org/ou=People,dc=nordugrid,dc=org" ## vo vo_name ... - Match user belonging to VO specified by "vo=vo_name" as ## configured in one of PREVIOUSLY defined [vo] blocks. Multiple VO names ## are allowed for this rule. #vo="nordugrid" ## all - Matches any user identity. This command requires no arguments but ## still can be written as all="" or all= for consistency. #all="" ###################################################################### ### The [grid-manager] block ######################################### ## The [grid-manager] block configures the part of A-REX service hosted ## in *arched* and taking care of the grid tasks on the frontend ## (stagein/stageout, LRMS job submission, caching, etc..). Name of this ## block is historical and comes from times then this functionality was ## handled by separate process called grid-manager. This section also ## configures WS interfaces of A-REX service also hosted by same container. #[grid-manager] ## controldir path - The directory of the A-REX's internal job log files, ## not needed on the nodes. #controldir="/var/spool/nordugrid/jobstatus" ## sessiondir path [drain] - the directory which holds the sessiondirs of the grid jobs. ## Multiple session directories may be specified by specifying multiple sessiondir ## commands. In this case jobs are spread evenly over the session directories. ## If sessiondir="*" is set, the session directory will be spread over the ## ${HOME}/.jobs directories of every locally mapped unix user. It is preferred ## to use common session directories. The path may be followed by "drain", in ## which case no new jobs will be assigned to that sessiondir, but current jobs ## will still be processed and accessible. #sessiondir="/scratch/grid" #sessiondir="/mnt/grid drain" ## runtimedir path - The directory which holds the runtimeenvironment scripts, ## should be available on the nodes as well! the runtimeenvironments are ## automatically detected and advertised in the information system. #runtimedir="/SOFTWARE/runtime" ## scratchdir path - path on computing node to move session directory to before ## execution. If defined should contain the path to the directory on the ## computing node which can be used to store a jobs' files during execution. ## Sets the environment variable RUNTIME_LOCAL_SCRATCH_DIR. Default is not to ## move session directory before execution. #scratchdir="/local/scratch/" ## shared_scratch path - path on frontend where scratchdir can be found. If ## defined should contain the path corresponding to that set in scratchdir as ## seen on the frontend machine. Sets the environment variable ## RUNTIME_FRONTEND_SEES_NODE. #shared_scratch="/mnt/scratch" ## nodename path - command to obtain hostname of computing node. #nodename="/bin/hostname" ## cachedir cache_path [link_path] - specifies a directory to store cached ## data. Multiple cache directories may be specified by specifying multiple ## cachedir commands. Cached data will be distributed evenly over the caches. ## Specifying no cachedir command or commands with an empty path disables caching. ## Optional link_path specifies the path at which the cache_path is accessible on ## computing nodes, if it is different from the path on the A-REX host. ## Example: cache="/shared/cache /frontend/jobcache" ## If "link-path" is set to '.' files are not soft-linked, but copied to session ## directory. If a cache directory needs to be drained, then cachedir ## should specify "drain" as the link path, in which case no new files will be ## added to the cache. #cachedir="/scratch/cache" #cachedir="/fs1/cache drain" ## remotecachedir cache_path [link_path] - specifies caches which are under ## the control of other A-REXs, but which this A-REX can have read-only access to. ## Multiple remote cache directories may be specified by specifying multiple ## remotecachedir commands. If a file is not available in paths specified by ## cachedir, A-REX looks in remote caches. link_path has the same meaning as in ## cachedir, but the special path ``replicate'' means files will ## be replicated from remote caches to local caches when they are requested. #remotecachedir="/mnt/fs1/cache replicate" ## cachesize max min - specifies high and low watermarks for space used ## by cache, as a percentage of the space on the file system on which ## the cache directory is located. When the max is exceeded, files will ## be deleted to bring the used space down to the min level. It is a ## good idea to have the cache on its own separate file system. To turn ## off this feature "cachesize" without parameters can be specified. #cachesize="80 70" ## If cache cleaning is enabled, files accessed less recently than the given ## time period will be deleted. Example values of this option are 1800, 90s, 24h, ## 30d. When no suffix is given the unit is seconds. #cachelifetime="30d" ## cacheshared yes|no - specifies whether the caches share a filesystem with ## other data. If set to yes then cache-clean calculates the size of the cache ## instead of using filesystem used space. #cacheshared="yes" ## cachespacetool path [options] - specifies an alternative tool to "df" that ## cache-clean should use to obtain space information on the cache file system. ## The output of this command must be "total_bytes used_bytes". The cache ## directory is passed as the last argument to this command. #cachespacetool="/etc/getspace.sh" ## cachelogfile path - specifies the filename where output of the cache-clean ## tool should be logged. Defaults to /var/log/arc/cache-clean.log. #cachelogfile="/tmp/cache-clean.log" ## cacheloglevel level - specifies the level of logging by the cache-clean ## tool, between 0 (FATAL) and 5 (DEBUG). Defaults to 3 (INFO). #cacheloglevel="4" ## cachecleantimeout time - the timeout in seconds for running the cache-clean ## tool. If using a large cache or slow file system this value can be ## increased to allow the cleaning to complete. Defaults to 3600 (1 hour). #cachecleantimeout="10000" ## cacheaccess rule - rules for allowing access to files in the cache remotely ## through the A-REX web interface. A rule has three parts: ## 1. Regular expression defining a URL pattern ## 2. Credential attribute to match against a client's credential ## 3. Regular expression defining a credential value to match against a ## client's credential ## A client is allowed to access the cached file if a URL pattern matches the ## cached file URL and the client's credential has the attribute and matches ## the value required for that pattern. Possible values for credential ## attribute are dn, voms:vo, voms:role and voms:group. Remote cache access ## requires that the A-REX web interface is enabled via arex_mount_point. #cacheaccess="gsiftp://host.org/private/data/.* voms:vo myvo:production" #cacheaccess="gsiftp://host.org/private/data/ng/.* dn /O=Grid/O=NorduGrid/.*" ## enable_cache_service yes|no - Turn on or off the cache service interface. ## If turned on the cache service must be installed and the A-REX WS interface ## must be enabled via arex_mount_point The interface is accessible at the same ## host and port as given in arex_mount_point with path /cacheservice. Default ## is off. #enable_cache_service="yes" ## user user[:group] - Switch to a non root user/group after startup. ## Use with caution. #user="grid" ## debug debuglevel - Set debug level of the arched daemon hosting A-REX service, ## between 0 (FATAL) and 5 (DEBUG). Defaults to 3 (INFO). #debug="2" ## logfile path - Specify A-REX log file location. If using an external log ## rotation tool be careful to make sure it matches the path specified here. ## Default log file is /var/log/arc/grid-manager.log #logfile="/var/log/arc/grid-manager.log" ## wslogfile path - Specify log file location for WS-interface operations. This ## file is only created if the WS-interface is enabled through the ## arex_mount_point option. The logsize, logreopen and debug options also apply ## to this file. If using an external log rotation tool be careful to make sure ## it matches the path specified here. It is possible to specify the same file ## as logfile to combine the logs. Default is /var/log/arc/ws-interface.log #wslogfile="/var/log/arc/ws-interface.log" ## logsize size [number] - 'Size' specifies in bytes how big log file is ## allowed to grow (approximately). If log file exceeds specified size ## it is renamed into logfile.0. And logfile.0 is renamed into ## logfile.1, etc. up to 'number' logfiles. Don't set logsize if you don't ## want to enable the ARC logrotation because another logrotation tool is used. #logsize="100000 2" ## logreopen yes|no - Specifies if log file must be closed after each record is ## added. By default arched keeps log file open. This option can be used to ## make behavior of arched compatible with external log rotation utilities. #logreopen="no" ## pidfile path - Specify location of file containing PID of daemon process. ## This is useful for automatic start/stop scripts. #pidfile="/var/run/arched-arex.pid" ## the gnu time command, default /usr/bin/time #gnu_time="/usr/bin/time" ## if computing node can access session directory at frontend, defaults to 'yes' #shared_filesystem="yes" ## specifies the email address from where the notification mails are sent, #mail="grid.support@somewhere.org" ## joblog path - specifies where to store specialized log about started ## and finished jobs. If path is empty or no such command - log is not written. ## This log is not used by any other part of ARC, so keep it disabled unless ## needed. #joblog="/var/log/arc/gm-jobs.log" ## jobreport [url ...] [timeout] - tells to report all started and finished jobs ## to logger service at 'url'. Multiple urls and multiple jobreport commands ## are allowed. In that case the job info will be sent to all of them. ## Timeout specifies how long (in days) to try to pass information before ## give up. Suggested value is 30 days. #jobreport="https://grid.uio.no:8001/logger" ## jobreport_vo_filters [VO URL[,VO URL]...] - it is a filter option ## where can be set which VO will be send to the configured jobreport server. ## When add new URL here (that no exist in the list of jobreport) need to ## add this URL into the jobreport element. Multiple VO's are allowed BUT ## only just one jobreport_vo_filters is allowed. #jobreport_vo_filters="vo1 url1, vo2 url2" ## jobreport publisher - name of the accounting records publisher. #jobreport_publisher="jura" ## jobreport credentials path [key_file [cert_file [ca_dir]]] - specifies the ## credentials for accessing the accounting service. #jobreport_credentials="/etc/grid-security/hostkey.pem /etc/grid-security/hostcert.pem /etc/grid-security/certificates" ## jobreport options [name:value, ...]- specifies additional parameters for ## the jobreporter. #jobreport_options="urbatch:50,archiving:/tmp/archive,topic:/topic/global.accounting.cpu.central # gocdb_name:GRID_UIO_NO,benchmark_type:HEPSPEC,benchmark_value:2.0,use_ssl:false, # localid_prefix:prefix_for_SGAS" # options element can be the followings: # - urbatch:size # default size: 50 by SGAS # 1000 by APEL # - archiving:directory for the archiving files # - topic:name of the topic # - gocdb_name:GOCDB name of CE # - benchmark_type:type of benchmark (Si2k, Sf2k, HEPSPEC) # - benchmark_value:value of benchmark # - benchmark_description:additional description for a benchmark # - use_ssl:true or false # default value: false # - localid_prefix:prefix string for the SGAS records ## jobreport logfile - name of the file to store stderr of the publisher executable. #jobreport_logfile="/var/log/arc/jura.log" ## max_job_control_requests number - max number of simultaneously processed job management ## requests over WS interface - like job submission, cancel, status check etc. ## Default value is 100. #max_job_control_requests="100" ## max_infosys_requests number - max number of simultaneously processed resource info ## requests over WS interface. Default value is 1. #max_infosys_requests="1" ## max_data_transfer_requests number - max number of simultaneously processed data transfer ## requests over WS interface - like data staging. ## Default value is 100. #max_data_transfer_requests="100" ## maxjobs number1 number2 number3 number4 number5 - specifies maximum allowed number of jobs. ## number1 - jobs which are not in FINISHED state (jobs tracked in RAM) ## number2 - jobs being run (SUBMITTING, INLRMS states) ## number3 - jobs being processed per DN ## number4 - jobs in whole system ## number5 - LRMS scripts limit (jobs in SUBMITTING and CANCELING) ## Missing number or -1 means no limit. #maxjobs="10000 10 2000" ## wakeupperiod time - specifies how often A-REX cheks for new jobs ## arrived, job state change requests, etc. That is resposivity of ## A-REX. 'time' is time period in seconds. Default is 3 minutes. ## Usually this command is not needed because important state changes ## are also trigering out-of-schedule checks. ## NOTE: This parameter does not affect responsivity of backend scripts - ## especially scan-*-job. That means that upper estimation of time for ## detecting job finished executing is sum of responsivity of backend ## script + wakeupperiod. #wakeupperiod="180" ## defaultttl [ttl [ttr]] - ttl is the time in seconds for how long a session ## directory will survive after job execution has finished. If not specified ## the default is 1 week. ttr is how long information about a job will be kept ## after the session directory is deleted. If not specified, the ttr default is ## one month. #defaultttl="259200" ## authplugin state options plugin_path - Every time job goes to 'state' ## run 'plugin_path' executable. Options consist of key=value pairs separated ## by ','. Possible keys are ## timeout - wait for result no longer that 'value' seconds (timeout= can be ## omitted). ## onsuccess,onfailure,ontimeout - what to do if plugin exited with exit ## code 0, not 0, timeout achieved. Possible actions are: ## pass - continue executing job, ## fail - cancel job, ## log - write to log fail about problem and continue executing job. #authplugin="ACCEPTED timeout=10 /usr/libexec/arc/bank %C/job.%I.local %S" ## ARC is distributed with the plugin "inputcheck". It's purpose is ## to check if input files requested in job's RSL are accessible ## from this machine. It is better to run it before job enters cluster. ## It accepts 2 arguments: names of files containing RSL and credentials' ## proxy. This plugin is only guaranteed to work for job submitted through ## the legacy GridFTP interface, as this is the only interface for which ## credentials in the form of proxy certificate files are guaranteed to ## exist. #authplugin="ACCEPTED 60 /usr/libexec/arc/inputcheck %C/job.%I.description %C/job.%I.proxy" ## ARC is distributed with the plugin "arc-vomsac-check". It's purpose is to enforce ## per-queue access policies based on VOMS attributes present in user's ## proxy-certificate. Plugin should be run before job enters the cluster. ## It requires 2 argments: path to job information .local file and path to ## credentials file. ## Enforced per-queue access policies are configured with 'ac_policy' option ## in the [queue/name] configuration block. #authplugin="ACCEPTED 60 /usr/libexec/arc/arc-vomsac-check -L %C/job.%I.local -P %C/job.%I.proxy" ## localcred timeout plugin_path - Every time an external executable ## is run this plugin will be called. Its purpose is to set non-unix ## permissions/credentials on running tasks. Note: the process itself ## can still be run under the root account. If plugin_path looks like ## somename@somepath, then function 'somename' from the shared library ## located at 'somepath' will be called (timeout is not effective in ## that case). ## A-REX must be run as root to use this option. ## Comment it out unless you really know what you are doing. #localcred="0 acquire@/opt/nordugrid/lib/afs.so %C/job.%I.proxy" ## norootpower yes|no - if set to yes, all job management proccesses ## will switch to mapped user's identity while accessing session ## directory. This is useful if session directory is on NFS ## root squashing turned on. Default is no. #norootpower="yes" ## allowsubmit [group ...] - list of authorization groups of users allowed ## to submit new jobs while "allownew=no" is active in jobplugin ## configuration. Multiple commands are allowed. #allowsubmit="mygroup" #allowsubmit="yourgroup" ## helper user executable arguments - associates an external program with ## A-REX. This program will be kept running under the account of the user ## specified by username. Currently only ’.’ is supported as username, ## corresponding to the user running A-REX. Every time this executable finishes ## it will be started again. This helper plugin mechanism can be used as an ## alternative to /etc/init.d or cron to (re)start external processes. #helper=". /usr/local/bin/myutility" ## tmpdir - used by the A-REX, default is /tmp #tmpdir="/tmp" ## maxrerun - specifies how many times job can be rerun if it failed in LRMS. ## Default value is 5. This is only an upper limit, the actual rerun value is set ## by the user in his xrsl. #maxrerun="5" ## globus_tcp_port_range, globus_udp_port_range - Firewall configuration. #globus_tcp_port_range="9000,12000" #globus_udp_port_range="9000,12000" ## x509_user_cert, x509_user_key - Location of credentials for service. ## These may be used by any module or external utility which need to ## contact another service not on behalf of user who submited job. #x509_user_key="/etc/grid-security/hostkey.pem" #x509_user_cert="/etc/grid-security/hostcert.pem" ## x509_cert_dir - Location of trusted CA certificates #x509_cert_dir="/etc/grid-security/certificates" ## http_proxy - http proxy server location #http_proxy="proxy.mydomain.org:3128" ## fixdirectories yes|missing|no - specifies during startup A-REX should ## create all directories needed for it operation and set suitable default ## permissions. If "no" is specified then A-REX does nothing to prepare its ## operational environment. In case of "missing" A-REX only creates and ## sets permissions for directories which are not present yet. For "yes" ## all directories are created and permisisons for all used directories are ## set to default safe values. Default behavior is as if "yes" is specified. #fixdirectories="yes" ## arex_mount_point - enables web services interfaces, including ## job execution and information system. The argument is ## an https URL defining the endpoint port and path: ## https://:/ ## In order to submit job a client must specify the exact published ## path. Make sure the chosen port is not blocked by firewall ## or other security rules. #arex_mount_point="https://piff.hep.lu.se:60000/arex" ## enable_arc_interface yes|no - turns on or off the ARC own WS interface ## based on OGSA BES and WSRF. If enabled the interface can be accessed at ## the URL specified by arex_mount_point (this option must also be ## specified). Default is yes. #enable_arc_interface="yes" ## enable_emies_interface - enable the EMI Execution Service interface. If ## enabled the interface can be accessed at the URL specified in ## arex_mount_point (this option must also be specified) #enable_emies_interface="yes" ## arguspep_endpoint - specifies URL of Argus PEPD service to use for ## authorization and user mapping. ## It is worth to mention that "requireClientCertAuthentication" (default is false) ## item of pepd.ini (configuration of Argus PEPD service) is set to be 'true', then ## https should be used, otherwise http is proper. ## If specified Argus is contacted for every operation requested ## through WS interface (see arex_mount_point). #arguspep_endpoint="https://somehost.somedomain:8154/authz" ## arguspep_profile - defines which communication profile to use while communicationg ## with Argus PEPD service. Possible values are: ## direct - pass all authorization attributes (only for debugging) ## subject - pass only subject name of client ## cream - makes A-REX pretend it is gLite CREAM service. This is ## recommended profile for interoperability with gLite. ## emi - new profile devloped in EMI project. This is default option. #arguspep_profile="cream" ## arguspep_usermap - specifies whether response from Argus service may define mapping ## of client to local account. Possible values are 'yes' and 'no'. Default is 'no'. ## Argus is contacted after all other user mapping is performed. Hence it can ## overwrite all other decisions. #arguspep_usermap="no" ## arguspdp_endpoint - specifies URL of Argus PDP service to use for ## authorization and user mapping. ## It is worth to mention that "requireClientCertAuthentication" (default is false) ## item of pdp.ini (configuration of Argus PDP service) is set to be 'true', then ## https should be used, otherwise http is proper. ## If specified Argus is contacted for every operation requested ## through WS interface (see arex_mount_point). #arguspdp_endpoint="https://somehost.somedomain:8152/authz" ## arguspdp_profile - defines which communication profile to use while communicationg ## with Argus PDP service. Possible values are: ## subject - pass only subject name of client ## cream - makes A-REX pretend it is gLite CREAM service. This is ## recommended profile for interoperability with gLite. ## emi - new profile devloped in EMI project. This is default option. #arguspdp_profile="cream" ## arguspdp_accpetnotapplicable - specify if the "NotApplicable" decision returned by Argus ## PDP service is treated as reason to deny request. Default is 'no', which treats ## "NotApplicable" as reson to deny request. #arguspdp_acceptnotapplicable="no" ## watchdog - specifies if additinal watchdog processes is spawned to restart ## main process if it is stuck or dies. Possible values are 'yes' and 'no'. ## Default is 'no'. #watchdog="no" ## groupcfg group_name [group_name ...] - specifies authorization groups ## for grid-manager to accept. The main location of this parameter is ## inside [gridftpd/jobs] block. The 'groupcfg' located here is only ## effective if computing service is configured without GridFTP interface ## and hence [gridftpd/jobs] block is missing. #groupcfg="users" ## unixmap [unixname][:unixgroup] rule - more sophisticated mapping to ## local account ## unixgroup group rule - more sophisticated mapping to local account ## for specific authorization groups. ## unixvo vo rule - more sophisticated mapping to local account for ## users belonging to specified VO. ## The main location for these parameters is [gridftpd] section. If ## located here they are only active if computing service is configured ## without GridFTP interface and hence [gridftpd/jobs] block is missing. ## For more detailed information see section [gridftpd] and read "ARC ## Computing Element. System Administrator guide" manual. #unixmap="nobody:nogroup all" #unixgroup="users simplepool /etc/grid-security/pool/users" #unixvo="ATLAS unixuser atlas:atlas" ## allowunknown yes|no - check user subject against grid-mapfile. ## The main location for this parameter is [gridftpd] section. If ## located here it is only active if computing service is configured ## without GridFTP interface and hence [gridftpd/jobs] block is missing. ## For more detailed information see section [gridftpd]. #allowunknown="no" ## delegationdb db_name - specify which DB to use to store delegations. ## Currently supported db_names are bdb and sqlite. Default is bdb. #delegationdb="bdb" ## forcedefaultvoms VOMS_FQAN - specify VOMS FQAN which user will be ## assigned if his/her credentials contain no VOMS attributes. ## To assign different values to different queues put this command ## into [queue] block. #forcedefaultvoms="/vo/group/subgroup" ## arex-ganglia implementation. Can run alongside the standalone gangliarc ## To enable, the path to gmetric must be set, otherwise the histograms will ## not show up. Path is usually /usr/bin, but should naturally point to your ## specific ganglia location #enable_ganglia="yes" #ganglialocation="/usr/bin" #################################################################### ### [data-staging] block ########################################### ## ## [data-staging] block configures DTR data staging parameters ## #[data-staging] ## debug - Log level for transfer logging in job.id.errors files, between ## 0 (FATAL) and 5 (DEBUG). Default is to use value set by debug option in ## [grid-manager] section. #debug="4" ## maxdelivery - Maximum number of concurrent file transfers, i.e. active ## transfers using network bandwidth. This is the total number for the whole ## system including any remote staging hosts. Default is 10. #maxdelivery="40" ## maxprocessor - Maximum number of concurrent files in each pre- and post- ## processing state, eg cache check or replica resolution. Default is 10. #maxprocessor="20" ## maxemergency - Maximum "emergency" slots which can be assigned to transfer ## shares when all slots up to the limits configured by the above two options ## are used by other shares. This ensures shares cannot be blocked by others. ## Default is 1. #maxemergency="5" ## maxprepared - Maximum number of files in a prepared state, i.e. pinned on a ## remote storage such as SRM for transfer. A good value is a small multiple of ## maxdelivery. Default is 200. #maxprepared="250" ## sharetype - Scheme to assign transfer shares. Possible values are dn, ## voms:vo, voms:role and voms:group. #sharetype="voms:role" ## definedshare - Defines a share with a fixed priority, different from the ## default (50). Priority is an integer between 1 (lowest) and 100 (highest). #definedshare="myvo:production 80" #definedshare="myvo:student 20" ## dtrlog - A file in which data staging state information (for monitoring and ## recovery purposes) is periodically dumped. Default is controldir/dtrstate.log #dtrlog="/tmp/dtrstate.log" ## central_logfile - A file in which all data staging messages from every job ## will be logged (in addition to their job.id.errors files). If this option is ## not present or the path is empty the log file is not created. Note this file ## is not automatically controlled by logrotate. #central_logfile="/var/log/arc/datastaging.log" ## The following 4 options are used to configure multi-host data staging. ## ## deliveryservice - URL to a data delivery service which can perform remote ## data staging #deliveryservice="https://myhost.org:60003/datadeliveryservice" ## localdelivery - If any deliveryservice is defined, this option determines ## whether local data transfer is also performed. Default is no. #localdelivery="yes" ## remotesizelimit - Lower limit on file size (in bytes) of files that remote ## hosts should transfer. Can be used to increase performance by transferring ## small files using local processes. #remotesizelimit="100000" ## usehostcert - Whether the A-REX host certificate should be used for ## communication with remote hosts instead of the users' proxies. Default is no. #usehostcert="yes" ## acix_endpoint URL - the ARC Cache Index specified here will be queried for ## every input file specified in a job description and any replicas found in ## sites with accessible caches will be added to the replica list of the input ## file. The replicas will be tried in the order specified by preferredpattern. #acix_endpoint="https://cacheindex.ndgf.org:6443/data/index" ## securetransfer yes|no - if data connection allows to choose use ## secure|non-secure data transfer. Currently only works for gridftp. ## default is no #securetransfer="no" ## passivetransfer yes|no - If yes, gridftp transfers are passive. Setting ## this option to yes can solve transfer problems caused by firewalls. ## default is no #passivetransfer="no" ## localtransfer yes|no - If yes, then the data download from Grid to the ## session directory (stagein) will be part of the batch job (prior to the ## execution of the binary). Default is no. #localtransfer="no" ## httpgetpartial yes|no - If yes, HTTP GET transfers may transfer data in ## chunks/parts. If no - data is always transfered in one piece. ## default is yes #httpgetpartial="yes" ## speedcontrol min_speed min_time min_average_speed max_inactivity - specifies ## how slow data transfer must be to trigger error. Tranfer is canceled if ## speed is below min_speed bytes per second for at least min_time seconds, ## or if average rate is below min_average_speed bytes per second, or no data ## was transfered for longer than max_inactivity seconds. Value of zero turns ## feature off. Default is "0 300 0 300" #speedcontrol="0 300 0 300" ## preferredpattern pattern - specifies a preferred pattern on which ## to sort multiple replicas of an input file. It consists of one or ## more patterns separated by a pipe character (|) listed in order of ## preference. Replicas will be ordered by the earliest match. If the ## dollar character ($) is used at the end of a pattern, the pattern ## will be matched to the end of the hostname of the replica. If an ## exclamation mark (!) is used at the beginning of a pattern, any replicas ## matching the pattern will be excluded from the sorted replicas. #preferredpattern="srm://myhost.ac.uk|.uk$|ndgf.org$|badhost.org$" ## copyurl url_head local_path - specifies that URLs, starting from 'url_head' ## should be accessed in a different way (most probaly unix open). The ## 'url_head' part of the URL will be replaced with 'local_path' and ## file from obtained path will be copied to the session directory. ## NOTE: 'local_path' can also be of URL type. ## you can have several copyurl lines #copyurl="gsiftp://example.org:2811/data/ gsiftp://example.org/data/" #copyurl="gsiftp://example2.org:2811/data/ gsiftp://example2.org/data/" ## linkurl url_head local_path [node_path] - identical to 'copyurl', only ## file won't be copied, but soft-link will be created. The 'local_path' ## specifies the way to access the file from the gatekeeper, and is used ## to check permissions. The 'node_path' specifies how the file can be ## accessed from computing nodes, and will be used for soft-link creation. ## If 'node_path' is missing - 'local_path' will be used. ## you can have multiple linkurl settings #linkurl="gsiftp://somewhere.org/data /data" #linkurl="gsiftp://example.org:2811/data/ /scratch/data/" ## maxtransfertries - the maximum number of times download and upload will ## be attempted per job (retries are only performed if an error is judged ## to be temporary) #maxtransfertries="10" #################################################################### ## ### [gridftpd] block configures the gridftpd server ################ ## ## #[gridftpd] ## user user[:group] - Switch to a non root user/group after startup ## WARNING: Make sure that the certificate files are owned by the user/group ## specified by this option. Default value is root. #user="grid" ## debug debuglevel - Set debug level of the gridftpd daemon, between ## 0 (FATAL) and 5 (DEBUG). Default is 3 (INFO). #debug="2" ## daemon yes|no - Whether GFS is run in daemon mode. Default is yes. #daemon="yes" ## logfile path - Set logfile location #logfile="/var/log/arc/gridftpd.log" ## logsize size [number] - 'Size' specifies in bytes how big log file is ## allowed to grow (approximately). If log file exceeds specified size ## it is renamed into logfile.0. And logfile.0 is renamed into ## logfile.1, etc. up to 'number' logfiles. Don't set logsize if you don't ## want to enable the ARC logrotation because another logrotation tool is used. #logsize="100000 2" ## pidfile path - Specify location of file containig PID of daemon process. ## This is useful for automatic star/stop scripts. #pidfile="/var/run/gridftpd.pid" ## port bindport - Port to listen on (default 2811) #port="2811" ## pluginpath - directory where the plugin libraries are installed, default is ## $ARC_LOCATION/lib(64)/arc #pluginpath="/usr/lib/arc/" ## encryption yes|no - should data encryption be allowed, default is no, ## encryption is very heavy #encryption="no" ## include - Include contents of another config file. #include="path" ## allowunknown yes|no - if no, check user subject against grid-mapfile and ## reject if missing. By default unknown (not in the grid-mapfile) grid users ## are rejected #allowunknown="no" ## allowactivedata yes|no - if no, only passive data transfer is allowed. ## By default both passive and active data transfers are allowed. allowactivedata="yes" ## maxconnections - maximum number of connections accepted by a gridftpd server. ## Default is 100. #maxconnections="200" ## defaultbuffer size - defines size of every buffer for data ## reading/writing. Default is 65536. The actual value may decrease if the ## cumulative size of all buffers exceeds value specified by maxbuffer. #defaultbuffer="65536" ## maxbuffer size - defines maximal amount of memory in bytes to be ## allocated for all data reading/writing buffers. Default is 640kB. ## The number of buffers is (max {3, min {41, 2P + 1}}), where P is the ## parallelism level requested by the client. Hence, even without parallel ## streams enabled number of buffers will be 3. #maxbuffer="655360" ## globus_tcp_port_range, globus_udp_port_range - Firewall configuration #globus_tcp_port_range="9000,12000" #globus_udp_port_range="9000,12000" ## firewall - hostname or IP addres to use in response to PASV command ## instead of IP address of a network interface of computer. #firewall="hostname" ## x509_user_cert, x509_user_key - Server credentials location #x509_user_key="/etc/grid-security/hostkey.pem" #x509_user_cert="/etc/grid-security/hostcert.pem" ## x509_cert_dir - Location of trusted CA certificates #x509_cert_dir="/etc/grid-security/certificates" ## gridmap - The gridmap file location ## The default is /etc/grid-security/grid-mapfile #gridmap="/etc/grid-security/grid-mapfile" ## unixmap [unixname][:unixgroup] rule - more sophisticated way to map ## Grid identity of client to local account. If client matches 'rule' ## it's assigned specified unix identity or one generated by rule. ## Mapping commands are processed sequentially and processing stops ## at first successful one (like in [group] section). For possible rules ## read "ARC Computing Element. System Administrator guide" manual. All ## rules defined in [group] section canbe used. There are also additional ## rules which produce not only yes/no result but also give back user and ## group names to which mapping should happen. The way it works is quite ## complex so it is better to read full documentation. ## For safety reasons if sophisticated mapping is used it is better to ## finish mapping sequence with default mapping to nonexistent or safe ## account. #unixmap="nobody:nogroup all" ## unixgroup group rule - do mapping only for users belonging to ## specified authorization 'group'. It is similar to an additional filter ## for unixmap command which filters out all users not belonging to specified ## authorization group. Only rules which generate unix user and group names ## may be used in this command. Please read "ARC Computing Element System ## Administrator Guide" for more information. #unixgroup="users simplepool /etc/grid-security/pool/users" ## unixvo vo rule - do mapping only for users belonging to specified VO. ## Only rules which generate unix identity name may be used in this command. ## Please read "ARC Computing Element. System Administrator Guide" for more ## information. This command is similar to 'unixgroup' described above and ## exists for convenience for setups which base mapping on VOs users belong to. #unixvo="ATLAS unixuser atlas:atlas" ############################################################ ## ### [gridftpd/filedir] block ##################################### ## ## [gridftpd/filedir] "fileplugin" storage block ## subblock for "exporting" a directory using the gridftpd's fileplugin plugin. ## gridftp plugins are shared libraries. ## "filedir" is a unique label. The access control is set by using the ## "dir" config option ## #[gridftpd/filedir] ## plugin name - specifies name of shared library to be loaded relative to ## "pluginpath". ## The next line is MUST for a gridftp file server with "fileplugin", don't ## change anything #plugin="fileplugin.so" ## groupcfg group_name [group_name ...] - specifies authorization groups ## for which this plugin is activated. In case groupcfg is not used the ## plugin is loaded for every mapped grid user. Multiple names were ## may be specified delimited by blank space. Group names are as specified ## in [group] sections. #groupcfg="users" ## the name of the virtual directory served by the gridftp server, REQUIRED ## the exported storage area is accessible as gsiftp://my_server/topdir. ## "topdir" is just an example, call the virtual path anything you like, ## even "/" is a valid choice. #path="/topdir" ## the physical directory corresponding to the virtual one: ## gsiftp://my_server/topdir will give access to the ## /scratch/grid directory on my_server, REQUIRED #mount="/scratch/grid" ## dir - this is the access control parameter, you can have several "dir" lines ## controlling different directories within then same block ## ## dir path options - specifies access rules for accessing files in 'path' ## (relative to virtual and real path) and all the files and directories below. ## 'options' are: ## nouser - do not use local file system rights, only use those ## specifies in this line ## owner - check only file owner access rights ## group - check only group access rights ## other - check only "others" access rights ## if none of the above specified usual unix access rights are applied. ## read - allow reading files ## delete - allow deleting files ## append - allow appending files (does not allow creation) ## overwrite - allow overwriting already existing files (does not ## allow creation, file attributes are not changed) ## dirlist - allow obtaining list of the files ## cd - allow to make this directory current ## create owner:group permissions_or:permissions_and - allow creating ## new files. File will be owned by 'owner' and owning group ## will be 'group'. If '*' is used, the user/group to which ## connected user is mapped will be used. The permissions ## will be set to permissions_or & permissions_and. (second ## number is reserved for the future usage). ## ## mkdir owner:group permissions_or:permissions_and - allow creating new directories. ## some examples: ## Set permissions on mounted directory #dir="/ nouser read cd dirlist delete create *:* 664:664 mkdir *:* 775:775" ## Adjust permissions on some subdirectories #dir="/section1 nouser read mkdir *:* 700:700 cd dirlist" #dir="/section2 nouser read mkdir *:* 700:700 cd dirlist" ############################################################ ## ### [gridftpd/jobs] subblock ############################### ## [gridftpd/jobs] subblock which creates the jobsubmission interface, ## using the jobplugin of the gridftpd service. ## gridftp plugins are shared libraries. 'jobs' is a unique label. #[gridftpd/jobs] ## the path to the virtual gridftpd directory which is used during the ## job submission. MUST be set. #path="/jobs" ## plugin name - specifies name of shared library to be loaded relative to ## "pluginpath". ## The next line is MUST for a job submission service via gridftpd ## "jobplugin", don't change anything! #plugin="jobplugin.so" ## groupcfg group_name [group_name ...] - specifies authorization groups ## for which this plugin is activated. In case groupcfg is not used the ## plugin is loaded for every mapped grid user. #groupcfg="users" ## The 'allownew' config parameter sets if the grid resource accepts ## submission of new jobs. This parameter can be used to close down a grid. ## The default is yes #allownew="yes" ## remotegmdirs controldir sessiondir - Specifies control ## and session directories to which jobs can be submitted but which are ## under the control of another A-REX. The corresponding controldir and ## sessiondir parameters must be defined in another A-REX's ## configuration. Multiple remotegmdirs can be specified. #remotegmdirs="/mnt/host1/control /mnt/host1/session" ## maxjobdesc size - specifies maximal allowed size of job description ## in bytes. Default value is 5MB. If value is missing or 0 size is not ## limited. #maxjobdesc="5242880" ## configfile service_configuration_path - If [gridftpd] and [grid-manager] ## configuration parts are located in separate files this configuration ## option allows to link them. The service_configuration_path points ## to configuration file containing [grid-manager] section. ## Use this option only if You really know what You are doing. #configfile="/etc/arc.conf" #################################################################### ## ### [infosys] block ################################################ ## [infosys] block configures the hosting environment of the ## Information services (Local Info Tree, Index Service, Registrations, ## see the Information System manual) provided by the ## OpenLDAP slapd server. #[infosys] ## infosys_compat - Setting this variable will cause ARC to use the old ## infoproviders. ## NOTE: this only applies to ARC < 13.11. Old infoproviders ## have been REMOVED from release 13.11 on. ##Basically, the new version uses A-REX to create LDIF ## while the old version uses a BDII provider-script to do it. The new ## version is required for GLUE2 output. #infosys_compat="disable" ## infoproviders_timeout - this only applies to new infoproviders. ## it changes A-REX behaviour with respect to a single infoprovider run. ## Increase this value if you have many jobs in the controldir and ## infoproviders need more time to process. ## The value is in seconds. ## Default is 10800 seconds. #infoproviders_timeout="10800" ## validity_ttl - allows the sysadmin to define a specific validity ## time for the records in the information system. The number is in ## seconds. This number appears "as is" in the GLUE2 Validity fields. ## Default is 10800 seconds. #validity_ttl="10800" ## debug - sets the debug level/verbosity of the startup script {0 or 1}. ## Default is 0. #debug="1" ### Openldap specifics ## hostname - the hostname of the machine running the slapd service ## will be the bind for slapd. If not present, will be ## taken from the [common] block or guessed #hostname="my.testbox" ## port - the port where the slapd service runs. Default infosys port is 2135. #port="2135" ## slapd_loglevel - sets the native slapd loglevel (see man slapd). ## Slapd logs via syslog. The default is set to no-logging (0) and it is ## RECOMMENDED not to be changed in a production environment. ## Non-zero slap_loglevel value causes serious performance decrease. #slapd_loglevel="0" ## slapd_hostnamebind - may be used to set the hostname part of the ## network interface to which the slapd process will bind. Most of ## the cases no need to set since the hostname config parameter is already ## sufficient. The default is empty. The example below will bind the slapd ## process to all the network interfaces available on the server. #slapd_hostnamebind="*" ## threads - the native slapd threads parameter, default is 32. If you run an ## Index service too you should modify this value. #threads="128" ## timelimit - the native slapd timelimit parameter. Maximum number of seconds ## the slapd server will spend answering a search request. Default is 3600. ## You probably want a much lower value. #timelimit="1800" ## idletimeout - the native slapd idletimeout parameter. Maximum number of ## seconds the slapd server will wait before forcibly closing idle client ## connections. It's value must be larger than the value of "timelimit" option. ## If not set, it defaults to timelimit + 1. #idletimeout="1800" ## ldap_schema_dir - allows to explicitly specify a path to the schema ## files. Note that this doesn't override standard location, but adds ## the specified path to the standard locations /etc/ldap and /etc/openldap. ## If you plan to relocate Glue1 and GLUE2 schemas, all these should be ## in the same directory that you specify here. ## this option does NOT apply to nordugrid.schema file. Such file has a ## release dependent location. ## Default is to use only standard locations described above. #ldap_schema_dir="/nfs/ldap/schema/" ## oldconfsuffix .suffix - sets the suffix of the backup files of the ## low-level slapd config files in case they are regenerated. Default is ## ".oldconfig". #oldconfsuffix=".oldconfig" ## overwrite_config yes|no - determines if the infosys startup scripts ## should generate new low-level slapd configuration files. By default the ## low-level configuration files are regenerated with every server startup ## making use of the values specified in the arc.conf. #overwrite_config="yes" ### Logging ## registrationlog path - specifies the logfile for the registration processes ## initiated by your machine. Default is "/var/log/arc/inforegistration.log" #registrationlog="/var/log/arc/inforegistration.log" ## providerlog path - Specifies log file location for the information ## provider scripts. The feature is only available with >= 0.5.26 tag. ## Default is "/var/log/arc/infoprovider.log" #providerlog="/var/log/arc/infoprovider.log" ## provider_loglevel - loglevel for the infoprovider scripts (0-5). ## The default is 1 (critical errors are logged) #provider_loglevel="2" ### Other ## user unix_user - the unix user running the infosys processes such as ## the slapd, the registrations and infoprovider scripts. ## By default the ldap-user is used, you can run it as root if you wish. ## In case of non-root value you must make sure that the A-REX ## directories and their content are readable by the 'user' and the 'user' ## has access to the full LRMS information including jobs submitted by ## other users. The A-REX directories (controldir, sessiondir ## runtimedir, cachedir) are specified in the [grid-manager] block #user="root" ## giis_location - If giis_location is not set, ARC_LOCATION will be ## used instead. #giis_location="/usr/" ## infosys_ldap_run_dir - path where NorduGrid/GLUE2 LDAP ldif file ## will be generated, and where the fifo to sync between infoproviders ## and BDII will be generated. Default: /var/run/arc/infosys/ #infosys_ldap_run_dir="/var/run/arc/infosys/" ## These three variables decide which schema should be used for ## publishing data. They can all be enabled at the same time. Default is ## to enable nordugrid mds and disable glue. ## infosys_nordugrid - Enables NorduGrid schema #infosys_nordugrid="enable" ## infosys_glue12 - Enables glue1.2/1.3 schema ## If infosys_glue12 is enabled, then resource_location, ## resource_latitude and resource_longitude need to be set in the ## [infosys/glue12] block. These variables do not have default values. ## The rest of the variables defaults are showcased below. #infosys_glue12="disable" ## infosys_glue2 - Enables GLUE2 schema #infosys_glue2_ldap="disable" ## infosys_glue2_ldap_showactivities - Enables GLUE2 ComputingActivities ## to appear in the LDAP rendering ## they're currently disabled by default. #infosys_glue2_ldap_showactivities="disable" ## infosys_glue2_service_qualitylevel - Allows a sysadmin to define a ## different GLUE2 QualityLevel for A-REX. ## This can be used for operations. ## default: production ## Allowed value is one of: "production", "pre-production", "testing", ## "development" ## Refer to GLUE2 documentation for the meaning of these strings. #infosys_glue2_service_qualitylevel="production" ### Slapd specific ## slapd - Configure where the slapd command is located, default is: ## /usr/sbin/slapd #slapd="/usr/sbin/slapd" ## slapadd - Configure where the slapadd command is located, default is: ## /usr/sbin/slapadd #slapadd="/usr/sbin/slapadd" ### BDII specific ## Starting from 11.05, Nordugrid ARC only supports BDII5. ## These variables are usually automatically set by ARC, and ## are here mostly for debug purposes and to tweak exotic ## BDII5 installations. In general, a sysadmin should not set these. ## bdii_debug_level - set the following to DEBUG to check bdii errors ## in bdii-update.log ## useful not to enable slapd logs reducing performance issues. #bdii_debug_level="ERROR" ## provider_timeout - This variable allows a system administrator to ## modify the behaviour of bdii-update. This is the time BDII waits ## for the scripts generated by A-REX infoproviders to produce ## their output. ## Default is 10800 seconds. #provider_timeout=108000 ## infosys_debug - This variable disables/enables an ldap-database ## containing information about the ldap database itself on "o=infosys" ## it is very useful for debugging. Default is enabled. #infosys_debug="disable" ## BDII5 uses these variables. These might change depending on BDII version. ## ARC sets them by inspecting distributed bdii configuration files. ## DO NOT CHANGE UNLESS YOU KNOW WHAT YOU'RE DOING ## bdii_location - The installation directory for the BDII. ## Default is /usr #bdii_location="/usr" ## bdii_var_dir - Contains BDII pid files and slapd pid files #bdii_var_dir="/var/run/arc/bdii" ## bdii_log_dir - Contains infosys logs #bdii_log_dir="/var/log/arc/bdii" ## bdii_tmp_dir - Contains provider scripts #bdii_tmp_dir="/var/tmp/arc/bdii" ## bdii_lib_dir - Contains slapd databases #bdii_lib_dir="/var/lib/arc/bdii" ## bdii_update_pid_file, slapd_pid_file - Allows to change bdii-update ## and slapd pidfiles filename and location #bdii_update_pid_file="/var/run/arc/bdii-update.pid" #slapd_pid_file="$bdii_var_dir/db/slapd.pid" ## bdii_database - Configure what ldap database backend should be used, ## default is: bdb #bdii_database="bdb" ## The following options are for tweaking only. Usually one should not ## configure them. They change the BDII configuration file generated ## by ARC. ## Please consult BDII manual for details. ## bdii_conf - Location of the bdii config file. ## ARC modifies the original and sets it ## as default /var/run/arc/infosys/bdii.conf #bdii_conf="/var/run/arc/infosys/bdii.conf" ## Command line options used to run bdii-update. ## ARC finds it looking into bdii configuration. ## default: ${bdii_location}/sbin/bdii-update #bdii_update_cmd #bdii_archive_size #bdii_db_config #bdii_breathe_time #bdii_delete_delay #bdii_read_timeout #bdii_run_dir #bindmethod #cachettl #db_archive #db_checkpoint ## EGIIS-related commands ## giis_fifo - path to fifo used by EGIIS. ## default is /var/run/arc/giis-fifo ## This file is automatically created by ARC, the option ## is only for tweaking. #giis_fifo=/var/run/arc/giis-fifo ## LDAP parameters of the cluster.pl (old) infoprovider, use the defaults, ## do NOT change them unless you know what you are doing ## cachetime affects old infoproviders, and forces the validity time of the record. #cachetime="30" ## sizelimit affects registration to egiis #sizelimit="10" ## slapd_cron_checkpoint - LDAP checkpoint enable/disable ## This option was introduced to solve bug #2032, to reduce the number ## of log files produced by BDII. It is usually not needed, ## but if BDII produces large logs and huge number of files, ## should help solving the issues related to that. #slapd_cron_checkpoint="enable" #################################################################### ## ### [infosys/glue12] block ######################################### ## ## This block holds information that is needed by the glue 1.2 ## generation. This is only necessary if infosys_glue12 is enabled. ## #[infosys/glue12] ## These variables need to be set if infosys_glue12 is enabled. ## IMPORTANT: no slashes or backslashes here! ## Example: "Kastrup, Denmark" #resource_location="" ## Example: "55.75000" #resource_latitude="" ## Example: "12.41670" #resource_longitude="" ## Example 2400 #cpu_scaling_reference_si00="" ## Example Cores=3,Benchmark=9.8-HEP-SPEC06 #processor_other_description="" ## Example http://www.ndgf.org #glue_site_web="" ## Example NDGF-T1 #glue_site_unique_id="" ## This variable decides if the GlueSite should be published. In case ## you want to set up a more complicated setup with several publishers ## of data to a GlueSite, then you may wish to tweak this parameter. #provide_glue_site_info="true" #################################################################### ## ### [infosys/site/sitename] block ################################## ## ## [infosys/site/sitename] Site BDII configuration block, this block is ## used to configure ARC to generate a site-bdii that can be registered ## in GOCDB etc to make it a part of a gLite network. The sitename ## part is to be declarative of the site-bdii being generated. #[infosys/site/sitename] ## The unique id used to identify this site, eg "NDGF-T1" #unique_id="" ## The url is on the format: ## ldap://host.domain:2170/mds-vo-name=something,o=grid and should ## point to the resource-bdii #url="" #################################################################### ## ### [infosys/admindomain] block ################################## ## ## [infosys/admindomain] GLUE2 AdminDomain configuration block, ## to configure administrative items of the cluster. This values ## do not affect neither glue12 or nordugrid renderings. ## If the whole block is not specified, will default to an AdminDomain ## called UNDEFINEDVALUE. #[infosys/admindomain] ## name - the Name attribute for the domain. This will show ## in top-BDII to group the resources belonging to this cluster. ## to group a bunch of clusters under the same AdminDomain, ## just use the same name. ## If not specified, will default to UNDEFINEDVALUE. ## Example: #name="ARC-TESTDOMAIN" ## description - description of this domain. Not mandatory. #description="ARC test Domain" ## www - url pointing at a site holding information about the ## AdminDomain. Not mandatory. ## Example: #www="http://www.nordugrid.org/" ## distributed - set this to yes if the domain is distributed ## that means, if the resources belonging to the domain ## are considered geographically distributed. #distributed=yes ## owner - contact email of a responsible ## subject for the domain #owner=admin@nordugrid.org ## otherinfo - fills the OtherInfo GLUE2 field. ## no need to set, used only for future development. #otherinfo=Test Other info #################################################################### ## ### [infosys/index/indexname] block ################################ ## ## [infosys/index/indexname] Index Service block configures and enables ## an Information Index Service. A separate Index block is required for ## every Index Service you may run on the given machine. ## The 'indexname' constitutes to the ## 'mds-vo-name=indexname,o=grid' LDAP suffix characterizing the Index Service. #[infosys/index/indexname] ## name - The unique (within the hosting machine) name of the ## Index Service. Its value becomes part of the LDAP suffix ## of the Index Service: ## (mds-vo-name=value of the name attribute, o=grid) #name="indexname" ## allowregistration - Implements registration filtering within an Index Sevice ## Sets the Local Information Trees or lower level Index Services ## allowed to register to the Index Service. List each allowed registrants ## with the allowreg atribute. ## WARNING: specifying allowreg implies setting up a strict filtering, ## only the matching registrants will be able to register to the Index. ## The wildcard * can be used in allowreg. Several allowreg lines can be used. ## Some examples: ## -All the Swedish machines can register regardless they are resources or Indices ## allowreg="*.se:2135" ## -Cluster resources from Denmark can register ## allowreg="*.dk:2135/nordugrid-cluster-name=*, Mds-Vo-name=local, o=grid" ## -Storage resources from HIP, Finland can register ## allowreg="*hip.fi:2135/nordugrid-se-name=*, Mds-Vo-name=local, o=grid" ## -The index1.sweden.se can register as a Sweden Index (and only as a Sweden Index) ## allowreg="index1.sweden.se:2135/Mds-vo-Name=Sweden,o=Grid" ## -Any Index Service can register ## allowreg="*:2135/Mds-vo-Name=*,o=Grid" #allowreg="trusted.host.org.se:2135/Mds-vo-Name=Trusted-Index,o=Grid" #################################################################### ## ### [infosys/index/indexname/registration/registrationname] block # ## [infosys/index/indexname/registration/registrationname] ## Index service registration block ## This block enables a registration process initiated by the ## 'indexname' Index Service (configured previuosly) ## to a target Index Service. ## NorduGrid maintains a webpage with information on major ## Index Services: ## http://www.nordugrid.org/NorduGridMDS/index_service.html #[infosys/index/indexname/registration/registrationname] ## targethostname - the hostname of the machine running the registration target ## Index Service #targethostname="index.myinstitute.org" ## targetport - the port on which the target Index Service is running. ## The default is the 2135 Infosys port. #targetport="2135" ## targetsuffix - the LDAP suffix of the target Index Service #targetsuffix="mds-vo-name=BigIndex,o=grid" ## regperiod - The registration period in seconds, the registration messages are ## continously sent according to the regperiod. Default is 120 sec. #regperiod="300" ## registranthostname - the hostname of the machine sending the registrations. ## This attribute inherits its value from the [common] and [infosys] blocks, ## most cases no need to set. #registranthostname="myhost.org" ## registrantport - the port of the slapd service hosting the ## registrant Index Service. The attribute inherits its value from the ## [infosys] block (and therefore defaults to 2135) #registrantport="2135" ## registrantsuffix - the LDAP suffix of the registrant Index Service. ## It is automatically determined from the registration block name, ## therefore most of the cases no need to specify. ## In this case the default registrantsuffix will be: ## "Mds-Vo-name=indexname" ## please mind uppercase/lowercase characters in the above string ## when defining allowreg in an index! ## Don't set it unless you want to overwrite the default. #registrantsuffix="mds-vo-name=indexname,o=grid" ## timeout - The suggested timeout to be included in the registration. ## Default: 120. #timeout="120" ## ttl - The suggested TTL to be included in the registration. The default ## is twice the registration period. #ttl="600" #################################################################### # ### [cluster] block ################################################ ## This block configures how your cluster is seen on the grid monitor (infosys ## point of view). Please consult the Infosys manual for detailed information ## on cluster attributes. ## If you want your cluster (configured below) to appear in the infosys ## (on the monitor) you also need to create a cluster registration block ## (see the next block). #[cluster] ## hostname - the FQDN of the frontend node, if the hostname is not set already ## in the common block then it MUST be set here #hostname="myhost.org" ## interactive_contactstring - the contact string for interactive logins, set this ## if the cluster supports some sort of grid-enabled interactive login (gsi-ssh), ## multivalued #interactive_contactstring="gsissh://frontend.cluster:2200" ## alias - an arbitrary alias name of the cluster, optional #cluster_alias="Big Blue Cluster in Nowhere" ## comment - a free text field for additional comments on the cluster in a single ## line, no newline character is allowed! #comment="This cluster is specially designed for XYZ applications: www.xyz.org" ## cluster_location - The geographical location of the cluster, preferably ## specified as a postal code with a two letter country prefix #cluster_location="DK-2100" ## cluster_owner - it can be used to indicate the owner of a resource, multiple ## entries can be used #cluster_owner="World Grid Project" #cluster_owner="University of NeverLand" ## authorizedvo - this attribute is used to advertise which VOs are authorized ## on the cluster. Multiple entries are allowed. ## Add only one VO for each authorizedvo entry. Multiple VOs in the same line ## will cause errors. ## These entries will be shown in all GLUE2 AccessPolicy and MappingPolicy ## objects, that is, they will apply for all Endpoints(Interfaces) and all ## Shares except the one representing the bare queue. #authorizedvo="developer.nordugrid.org" #authorizedvo="community.nordugrid.org" ## clustersupport - this is the support email address of the resource, multiple ## entries can be used #clustersupport="grid.support@mysite.org" #clustersupport="grid.support@myproject.org" ## lrmsconfig - an optional free text field to describe the configuration of your ## Local Resource Management System (batch system). #lrmsconfig="single job per processor" ## homogeneity - determines whether the cluster consists of identical NODES with ## respect to cputype, memory, installed software (opsys). The frontend is NOT ## needed to be homogeneous with the nodes. In case of inhomogeneous nodes, try ## to arrange the nodes into homogeneous groups assigned to a queue and use ## queue-level attributes. Possible values: True,False, the default is True. ## False will trigger multiple GLUE2 ExecutionEnvironments to be published ## if applicable. #homogeneity="True" ## architecture - sets the hardware architecture of the NODES. The "architecture" ## is defined as the output of the "uname -m" (e.g. i686). Use this cluster ## attribute if only the NODES are homogeneous with respect to the architecture. ## Otherwise the queue-level attribute may be used for inhomogeneous nodes. If ## the frontend's architecture agrees to the nodes, the "adotf" (Automatically ## Determine On The Frontend) can be used to request automatic determination. #architecture="adotf" ## opsys - this multivalued attribute is meant to describe the operating system ## of the computing NODES. Set it to the opsys distribution of the NODES and not ## the frontend! opsys can also be used to describe the kernel or libc version ## in case those differ from the originally shipped ones. The distribution name ## should be given as distroname-version.number, where spaces are not allowed. ## Kernel version should come in the form kernelname-version.number. ## If the NODES are inhomogeneous with respect to this attribute do NOT set it on ## cluster level, arrange your nodes into homogeneous groups assigned to a queue ## and use queue-level attributes. ## opsys will be used to fill GLUE2 OSName, OSVersion and OSFamily unless ## these values are explicitly defined. See below for their usage. #opsys="Linux-2.6.18" #opsys="glibc-2.5.58" #opsys="CentOS-5.6" ## nodecpu - this is the cputype of the homogeneous nodes. The string is ## constructed from the /proc/cpuinfo as the value of "model name" and "@" and ## value of "cpu MHz". Do NOT set this attribute on cluster level if the NODES ## are inhomogeneous with respect to cputype, instead arrange the nodes into ## homogeneous groups assigned to a queue and use queue-level attributes. Setting ## the nodecpu="adotf" will result in Automatic Determination On The Frontend, ## which should only be used if the frontend has the same cputype as the ## homogeneous nodes. #nodecpu="AMD Duron(tm) Processor @ 700 MHz" ## nodememory - this is the amount of memory (specified in MB) on the node ## which can be guaranteed to be available for the application. Please note ## in most cases it is less than the physical memory installed in the nodes. ## Do NOT set this attribute on cluster level if the NODES are inhomogeneous ## with respect to their memories, instead arrange the nodes into homogeneous ## groups assigned to a queue and use queue-level attributes. #nodememory="512" ## defaultmemory - If a user submits a job without specifying how much ## memory should be used, this value will be taken first. The order is: ## xrsl -> defaultmemory -> nodememory -> 1GB. This is the amount of ## memory (specified in MB) that a job will request(per rank). #defaultmemory="512" ## benchmark name value - this optional multivalued attribute can be used to ## specify benchmark results on the cluster level. Use this cluster attribute ## if only the NODES are homogeneous with respect to the benchmark performance. ## Otherwise the similar queue-level attribute should be used. Please try to ## use one of standard benchmark names given below if possible. #benchmark="SPECINT2000 222" #benchmark="SPECFP2000 333" ## middleware - the multivalued attribute shows the installed grid software on ## the cluster, nordugrid and globus-ng is automatically set, no need to specify ## middleware=nordugrid or middleware=globus #middleware="my grid software" ## nodeaccess - determines how the nodes can connect to the internet. Not setting ## anything means the nodes are sitting on a private isolated network. "outbound" ## access means the nodes can connect to the outside world while "inbound" access ## means the nodes can be connected from outside. inbound & outbound access ## together means the nodes are sitting on a fully open network. #nodeaccess="inbound" #nodeaccess="outbound" ## dedicated_node_string - the string which is used in the PBS node config to ## distinguish the grid nodes from the rest. Suppose only a subset of nodes are ## available for grid jobs, and these nodes have a common "node property" string, ## this case the dedicated_node_string should be set to this value and only the ## nodes with the corresponding "pbs node property" are counted as grid enabled ## nodes. Setting the dedicated_node_string to the value of the "pbs node ## property" of the grid-enabled nodes will influence how the totalcpus, user ## freecpus is calculated. You don't need to set this attribute if your cluster ## is fully available for the grid and your cluster's PBS config does not use ## the "node property" method to assign certain nodes to grid queues. You ## shouldn't use this config option unless you make sure your PBS config makes ## use of the above described setup. #dedicated_node_string="gridnode" ## localse - this multivalued parameter tells the BROKER that certain URLs (and ## locations below that) should be considered "locally" available to the cluster. #localse="gsiftp://my.storage/data1/" #localse="gsiftp://my.storage/data2/" ## gm_mount_point - this is the same as the "path" from the [gridftpd/jobs] ## block. The default is "/jobs". Will be cleaned up later, do NOT touch it. #gm_mount_point="/jobs" ## gm_port - this is the same as the "port" from the [gridftpd] block. The ## default is "2811". Will be cleaned up later. #gm_port="2811" ## cpudistribution - this is the CPU distribution over nodes ## given in the form: ncpu:m where ## n is the number of CPUs per machine ## m is the number of such computers ## Example: 1cpu:3,2cpu:4,4cpu:1 represents a cluster with ## 3 single CPU machines, 4 dual CPU machines, one machine with 4 CPUs. ## This command is needed to tweak and overwrite the values returned ## by the underlying LRMS. In general there is no need to configure it. #cpudistribution=1cpu:3,2cpu:4,4cpu:1 ### GLUE2 specific attributes ## OSName, OSVersion and OSFamily are a replacement for nordugrid opsys ## configuration variable. They define which operating system is running ## on the hardware (ExecutionEnvironment) behind a ComputingShare ## (a specific set of resources, for example a batch system queue) ## These strings are lowercase text and they should be ## listed and existing in the GLUE2 open enumerations at: ## https://github.com/OGF-GLUE/Enumerations ## However the sysadmin is free to enter new values if these are not ## present in the above registry. ## If defined, these commands will have the following effect: ## GLUE2 rendering: their values override whatever is defined in opsys ## NorduGrid: their value will be added as an new entry to ## existing nordugrid-cluster-opsys or nordugrid-queue-opsys new entry ## with the following format: ## nordugrid-queue-opsys: - ## OSName - Name of the operating system distribution or product name. Lowercase. ## Example: centos ## Example: windowsxp # OSName="Ubuntu" ## OSVersion - Vendor specific string that identifies the operating system ## version. ## Example: 6.6 # OSVersion="12.04" ## OSFamily - Name that identify a family of operating systems, e.g. linux ## kernel or windows kernel based. Lowercase. ## Example: solaris ## Example: windows # OSFamily="linux" #################################################################### ## ## [infosys/cluster/registration/registrationname] block ############ ## Computing resource (cluster) registration block ## configures and enables the registration process of a ## computing resource to an Index Service. ## A cluster can register to several Index Services this case ## each registration process should have its own block. ## NorduGrid maintains a webpage with information on major ## Index Services: ## http://www.nordugrid.org/NorduGridMDS/index_service.html #[infosys/cluster/registration/registrationname] ## targethostname - see description earlier #targethostname="index.myinstitute.org" ## targetport - see description earlier #targetport="2135" ## targetsuffix - see description earlier #targetsuffix="mds-vo-name=BigIndex,o=grid" ## regperiod - see description earlier #regperiod="300" ## registranthostname - see description earlier #registranthostname="myhost.org" ## registrantport - see description earlier #registrantport="2135" ## registrantsuffix - the LDAP suffix of the registrant cluster resource ## It is automatically determined from the [infosys] block and the ## registration blockname. In this case the default registrantsuffix will be: ## "nordugrid-cluster-name=hostname,Mds-Vo-name=local,o=Grid" ## please mind uppercase/lowercase characters above if defining ## allowreg in an index! ## Don't set it unless you want to overwrite the default. #registrantsuffix="nordugrid-cluster-name=myhost.org,Mds-Vo-name=local,o=grid" ## timeout - The suggested timeout to be included in the registration. ## Default: 45. #timeout="45" ## ttl - The suggested TTL to be included in the registration. The default ## is twice the registration period. #ttl="600" #################################################################### ## ### [queue/queue_name] block ####################################### ## ## Each grid-enabled queue should have a separate queue block. ## The queuename should be used as a label in the block name. ## A queue can represent a PBS/LSF/SGE/SLURM/LL queue, a SGE pool, a Condor ## pool or a single machine in case 'fork' type of LRMS is specified in ## the [common] block. ## Queues don't need to be registerd (there is no queue registration block), ## once you configured your cluster to register to an Index Service ## the queue entries (configured with this block) automatically will be there. ## Please consult the ARC Information System manual for detailed information ## on queue attributes: ## http://www.nordugrid.org/documents/arc_infosys.pdf ## ## use the queue_name for labeling the block. The special name 'fork' should be ## used for labeling the queue block in case you specified 'fork' type of ## LRMS in the [common] block. #[queue/gridlong] ## name sets the name of the grid-enabled queue. It MUST match the queue_name ## label of the corresponding queue block, see above. ## Use "fork" if you specified 'fork' type of LRMS in the [common] block. ## Queue name MUST be specified, even if the queue block is already correctly ## labeled. #name="gridlong" ## homogeneity - determines whether the queue consists of identical NODES with ## respect to cputype, memory, installed software (opsys). ## In case of inhomogeneous nodes, try to arrange the nodes into homogeneous ## groups and assigned them to a queue. ## Possible values: True,False, the default is True. #homogeneity="True" ## scheduling_policy - this optional parameter tells the schedulling policy of ## the queue, PBS by default offers the FIFO scheduller, many sites run the MAUI. ## At the moment FIFO & MAUI is supported. If you have a MAUI scheduller you ## should specify the "MAUI" value since it modifies the way the queue resources ## are calculated. BY default the "FIFO" sceduller is assumed. #scheduling_policy="FIFO" ## comment - a free text field for additional comments on the queue in a single ## line, no newline character is allowed! #comment="This queue is nothing more than a condor pool" ## maui_bin_path - set this parameter for the path of the maui commands like ## showbf in case you specified the "MAUI" scheduling_policy above. This ## parameter can be set in the [common] block as well. #maui_bin_path="/usr/local/bin" ## queue_node_string - In PBS you can assign nodes to a queue (or a queue to ## nodes) by using the "node property" PBS node config method and asssigning the ## marked nodes to the queue (setting the resources_default.neednodes = ## queue_node_string for that queue). This parameter should contain the "node ## property" string of the queue-assigned nodes. Setting the queue_node_string ## changes how the queue-totalcpus, user freecpus are determined for this queue. ## Essentially, queue_node_string value is used to construct nodes= string in ## PBS script, such as nodes=count:queue_node_string where count is taken from ## the job description (1 if not specified). You shouldn't use this option unless ## you are sure that your PBS configuration makes use of the above configuration. ## Read NorduGrid PBS instructions for more information: ## http://www.nordugrid.org/documents/pbs-config.html #queue_node_string="gridlong_nodes" #queue_node_string="ppn=4:ib" ## sge_jobopts - additional SGE options to be used when submitting jobs to SGE ## from this queue. If in doubt, leave it commented out #sge_jobopts="-P atlas -r yes" ## condor_requirements - only needed if using Condor. It needs to be defined for ## each queue. Use this option to determine which nodes belong to the current ## queue. The value of 'condor_requirements' must be a valid constraints string ## which is recognized by a condor_status -constraint '....' command. It can ## reference pre-defined ClassAd attributes (like Memory, Opsys, Arch, HasJava, ## etc) but also custom ClassAd attributes. To define a custom attribute on a ## condor node, just add two lines like the ones below in the `hostname`.local ## config file on the node: ## NORDUGRID_RESOURCE=TRUE ## STARTD_EXPRS = NORDUGRID_RESOURCE, $(STARTD_EXPRS) ## A job submitted to this queue is allowed to run on any node which satisfies ## the 'condor_requirements' constraint. If 'condor_requirements' is not set, ## jobs will be allowed to run on any of the nodes in the pool. When configuring ## multiple queues, you can differentiate them based on memory size or disk ## space, for example: ## #condor_requirements="(OpSys == "linux" && NORDUGRID_RESOURCE && Memory >= 1000 && Memory < 2000)" ## CPU architecture to request when submitting jobs to LSF. Use only if you know ## what you are doing. #lsf_architecture="PowerPC" ## totalcpus - manually sets the number of cpus assigned to the queue. No need to ## specify the parameter in case the queue_node_string method was used to assign ## nodes to the queue (this case it is dynamically calculated and the static ## value is overwritten) or when the queue have access to the entire cluster ## (this case the cluster level totalcpus is the relevant parameter). Use this ## static parameter only if some special method is applied to assign a subset of ## totalcpus to the queue. #totalcpus="32" ## queue-level configuration parameters: nodecpu, nodememory, architecture, opsys ## and benchmark should be set if they are homogeneous over the nodes assigned ## to the queue AND they are different from the cluster-level value. ## Their meanings are described in the cluster block. Usage: this queue collects ## nodes with "nodememory=512" while another queue has nodes with ## "nodememory=256" -> don't set the cluster attributes but use the queue-level ## attributes. When the frontend's architecture or cputype agrees with the queue ## nodes, the "adotf" (Automatically Determine On The Frontend) can be used to ## request automatic determination of architecture or nodecpu. ## For GLUE2, fine tune configuration of OSName, OSVersion, OSFamily ## is allowed as described in the [cluster] block. #nodecpu="adotf" #nodememory="512" #architecture="adotf" #opsys="Fedora 16" #opsys="Linux-3.0" #OSName="Fedora" #OSVersion="16" #OSFamily="linux" #benchmark="SPECINT2000 222" #benchmark="SPECFP2000 333" ## queue access policy rules based on VOMS attributes in user's ## proxy certificate (requires the arc-vomsac-check plugin to be enabled). ## Matching rules have the following format: ## ac_policy="[+/-]VOMS: " ## Please read arc-vomsac-check manual page for more information. #ac_policy="-VOMS: /badvo" #ac_policy="VOMS: /.*/Role=production" ## maxslotsperjob - this configures the GLUE2 MaxSlotsPerJob value ## on a particular queue (see GLUE2 definition). ## This value is usually generated by LRMS ## infocollectors, but there are cases in which a system administrator ## might like to tweak it. ## Default is to publish what is returned by the LRMS, and if nothing is ## returned, NOT to publish the MaxSlotsPerJob attribute. ## If a system administrator sets the value here, that value will be ## published instead, regardless of what the LRMS returns. ## Each LRMS might have a different meaning for this value. #maxslotsperjob="5" ## authorizedvo - this attribute is used to advertise which VOs are authorized ## on each specific queue. Multiple entries are allowed. ## Add only one VO for each authorizedvo entry. Multiple VOs in the same line ## will cause errors. ## These entries will be shown in the MappingPolicy objects, that is, ## they will apply for the Shares that corresponds to each VO. ## A ComputingShare object will be created for the bare queue to ## gather overall queue statistics that are NOT VO-related. ## NOTE: if you have also configured authorizedvo in the [cluster] block, ## the values placed in the [queue/*] blocks will override the values in [cluster]. ## #authorizedvo="LocalUsers" #authorizedvo="atlas" #authorizedvo="community.nordugrid.org" ## LDAP parameters of the queue+jobs+users.pl (old) infoprovider, use the defaults, ## do NOT change them unless you know what you are doing ## this affects old infoproviders, and forces the validity time of the record. #cachetime="30" ## sizelimit affects registration to EGIIS #sizelimit="5000" #################################################################### ## ### [registration/emir] block ###################################### ## ## Services registration into EMIR block ## configures and enables the registration process of a ## services enabled in this configuration file into EMI ## indexing service (EMIR). ## Currently only implemented for A-REX. ## #[registration/emir] ## List of URL separated by comma of EMIR services which are to accept ## registration. This is mandatory. #emirurls="https://somehost:60002/emir" ## Time in seconds for which registration records should stay valid. #validity=600 ## Time in seconds how othen registration record should be sent to the ## registration service. #period=60 ## disablereg_xbes may be used to selectively disable registration of ## A-REX service. Possible values are yes and no. Default is no, #disablereg_xbes="no" #################################################################### ### [nordugridmap] block ########################################### ## ## [nordugridmap] block configuration is used to fine-tune behavior ## of the nordugridmap - an ARC tool used to generate grid-mapfiles. ## Please refer to [vo] block description to find information how ## to specify VO sources for mapfile generation. This section setup ## general VO-independent parameters. ## #[nordugridmap] ## x509_user_cert, x509_user_key - public certificate and privat key ## to be used when fetching sources over TLS (https:// and vomss:// ## sources retrieval rely on this parameter) ## If not specified, values defined in [common] section will be used. ## If there is also no [common] section, X509_USER_{CERT,KEY} variables ## are used. Default is '/etc/grid-security/host{cert,key}.pem' #x509_user_key="/etc/grid-security/hostkey.pem" #x509_user_cert="/etc/grid-security/hostcert.pem" ## x509_cert_dir - the directory containing CA certificates. ## Default is '/etc/grid-security/certificates/'. #x509_cert_dir="/etc/grid-security/certificates/" ## gridmap_owner username - owner of generated gridmapfiles. ## Default is a owner of grid-manager process (default is 'root'). #gridmap_owner="root" ## gridmap_group groupname - group of generated gridmapfiles. ## Default is 'root' #gridmap_group="root" ## gridmap_permissions filemode - permissions of generated gridmapfiles. ## Default is '0600' #gridmap_permissions="0600" ## log_to_file - control whether logging output of nordugridmap ## will be saved to file. If the value is 'no' nordugridmap will write all ## information to STDERR. Default is 'yes'. #log_to_file="yes" ## logfile - specify the nordugridmap log file location when in use. ## Default is '/var/log/arc/nordugridmap.log'. #logfile="/var/log/arc/nordugridmap.log" ## cache_enable - control whether caching of external sources ## will be used. Default is 'yes'. #cache_enable="yes" ## cachedir - specify the path where cached sources will be stored. ## Default is '/var/spool/nordugrid/gridmapcache/' #cachedir="/var/spool/nordugrid/gridmapcache/" ## cachetime - controls how many time (in seconds) cached information ## remains valid. Default is 259200 (3 days). #cachetime="259200" ## mapuser_processing - control the behavior of [vo] block mapped_unixid ## parameter usage. Valid values are 'overwrite' and 'keep'. ## Please see 'mapped_unixid' description in [vo] block for details. ## Default is 'keep'. #mapuser_processing="keep" ## allow_empty_unixid - control whether empty (or unspecified) ## 'mapped_unixid' [vo] block option is allowed to be used. ## Please see 'mapped_unixid' description of [vo] block for details. ## Default is 'no' #allow_empty_unixid="no" ## voms_method - control how to get information from VOMS(S) sources. ## Valid values are: ## soap - call SOAP method directly using SOAP::Lite ## get - use old implementation that manually parses XML response ## Default is 'soap'. #voms_method="soap" ## debug level - controls the verbosity of nordugridmap output. Valid ## values are: ## 0 - FATAL - only critical fatal error shown ## 1 - ERROR - errors, including non-critical are shown ## 2 - WARNING (default) - configuration errors that can be ignored ## 3 - INFO - processing information ## 4 - VERBOSE - a bit more processing information ## 5 - DEBUG - lot of processing information ## ## When test run is requested (--test command line option of the ## nordugridmap) debug level is automatically set to 5 (DEBUG). ## Default is 2 (WARNING) #debug="4" ## fetch_timeout - control how many time (in seconds) nordugridmap will ## wait for external sources retrieval. Default is 15. #fetch_timeout="15" #################################################################### ## ### The [acix/cacheserver] block ################################### ## ## The cache server component of ACIX runs alongside A-REX. It ## periodically scans the cache directories and composes a Bloom ## filter of cache content which can be pulled by an ACIX index ## server. #[acix/cacheserver] ## hostname - Hostname on which the cache server listens. Default is all ## available interfaces. #hostname="myhost.org" ## port - Port on which the cache server listens. Default is 5443. #port="6000" ## logfile - Log file location for the cache server. Default is ## /var/log/arc/acix-cache.log #logfile="/tmp/acix-cache.log" ## cachedump - Whether to make a dump of the cache contents in a file at ## $TMP/ARC-ACIX/timestamp each time the cache server runs. Default is no. #cachedump="yes" #################################################################### ## ### The [acix/indexserver] block ################################### ## ## The index server component of ACIX collects cache content filters ## from a set of cache servers configured in this block. The index ## server can be queried for the location of cached files. #[acix/indexserver] ## cacheserver - ACIX cache servers from which to pull information #cacheserver="https://some.host:5443/data/cache" #cacheserver="https://another.host:5443/data/cache" #################################################################### ## ### The [gangliarc] block ########################################## ## ## Gangliarc provides monitoring of ARC-specific metrics through ganglia. ## It can be run with zero configuration or customised with options in the ## [gangliarc] block. #[gangliarc] ## frequency - The period between each information gathering cycle, in seconds. ## Default is 20. #frequency="30" ## gmetric_exec - Path to gmetric executable. Default is /usr/bin/gmetric. #gmetric_exec="/usr/local/bin/gmetric" ## logfile - log file of the daemon. Default is /var/log/arc/gangliarc.log. #logfile="/tmp/gangliarc.log" ## pidfile - pid file of the daemon. Default is /var/run/gangliarc.pid. #pidfile="/tmp/gangliarc.pid" ## python_bin_path - path to python executable. Default is /usr/bin/python. #python_bin_path="/usr/local/bin/python" ## metrics - the metrics to be monitored. Default is "all". ## metrics takes a comma-separated list of one or more of the following metrics: ## - staging -- number of tasks in different data staging states ## - cache -- free cache space ## - session -- free session directory space ## - heartbeat -- last modification time of A-REX heartbeat ## - processingjobs -- the number of jobs currently being processed by ARC (jobs ## between PREPARING and FINISHING states) ## - failedjobs -- the number of failed jobs per last 100 finished ## - jobstates -- number of jobs in different A-REX internal stages ## - all -- all of the above metrics #metrics="all" nordugrid-arc-5.4.2/src/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720020026 xustar000000000000000030 mtime=1513200592.952927474 30 atime=1513200647.735597487 30 ctime=1513200658.651730996 nordugrid-arc-5.4.2/src/Makefile.in0000644000175000002070000005755013214315720020110 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @DOC_ENABLED_FALSE@@HED_ENABLED_TRUE@BUILD_SOURCES = external doc hed libs tests services clients utils @DOC_ENABLED_TRUE@@HED_ENABLED_TRUE@BUILD_SOURCES = external doc hed libs tests services clients utils doxygen @HED_ENABLED_FALSE@BUILD_SOURCES = SUBDIRS = $(BUILD_SOURCES) DIST_SUBDIRS = external doc hed libs tests services clients utils doxygen all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." @HED_ENABLED_FALSE@uninstall-local: @HED_ENABLED_FALSE@install-exec-hook: clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: @$(NORMAL_INSTALL) $(MAKE) $(AM_MAKEFLAGS) install-exec-hook install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-local .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-exec-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-exec-hook install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am uninstall-local # This won't work in case of cross-compilation. Please # some autotools experts fix it. @HED_ENABLED_TRUE@install-exec-hook: @HED_ENABLED_TRUE@ if test "x$(build_triplet)" = "x$(host_triplet)"; then env LD_LIBRARY_PATH=$(DESTDIR)$(libdir):$(LD_LIBRARY_PATH) $(top_builddir)/src/utils/hed/arcplugin$(EXEEXT) -c $(DESTDIR)$(pkglibdir) -c $(DESTDIR)$(pkglibdir)/test; else echo "No .apd files since we are cross-compiling"; fi @HED_ENABLED_TRUE@uninstall-local: @HED_ENABLED_TRUE@ test "x$(build_triplet)" = "x$(host_triplet)" && rm -f $(DESTDIR)$(pkglibdir)/*.apd $(DESTDIR)$(pkglibdir)/test/*.apd # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/PaxHeaders.7502/doxygen0000644000000000000000000000013213214316030017354 xustar000000000000000030 mtime=1513200664.702805003 30 atime=1513200668.716854096 30 ctime=1513200664.702805003 nordugrid-arc-5.4.2/src/doxygen/0000755000175000002070000000000013214316030017477 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/doxygen/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612733561102021477 xustar000000000000000026 mtime=1466884674.11683 30 atime=1513200593.530934543 30 ctime=1513200664.692804881 nordugrid-arc-5.4.2/src/doxygen/Makefile.am0000644000175000002070000000757312733561102021556 0ustar00mockbuildmock00000000000000SWIG_DEPENDS = \ $(top_srcdir)/swig/common.i \ $(top_srcdir)/swig/credential.i \ $(top_srcdir)/swig/data.i \ $(top_srcdir)/swig/compute.i INPUT = $(top_srcdir)/src/hed/libs/common \ $(top_srcdir)/src/hed/libs/credential \ $(top_srcdir)/src/hed/libs/data \ $(top_srcdir)/src/libs/data-staging \ $(top_srcdir)/src/hed/libs/compute CPP_EXAMPLES = $(top_srcdir)/src/hed/libs/common/examples \ $(top_srcdir)/src/hed/libs/credential/examples \ $(top_srcdir)/src/hed/libs/data/examples \ $(top_srcdir)/src/libs/data-staging/examples \ $(top_srcdir)/src/hed/libs/compute/examples PYTHON_EXAMPLES = $(top_srcdir)/python/examples JAVA_EXAMPLES = $(top_srcdir)/java/examples SPECIALISATION_MAPPINGS = JobState JobDescription SPECIALISATION_MAPPINGS_JobState = \ $(top_srcdir)/src/hed/acc/ARC0/JobStateARC0.cpp \ $(top_srcdir)/src/hed/acc/EMIES/JobStateEMIES.cpp \ $(top_srcdir)/src/hed/acc/ARC1/JobStateBES.cpp \ $(top_srcdir)/src/hed/acc/ARC1/JobStateARC1.cpp \ $(top_srcdir)/src/hed/acc/CREAM/JobStateCREAM.cpp SPECIALISATION_MAPPINGS_JobDescription = \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/XRSLParser.cpp \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/ADLParser.cpp \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/JDLParser.cpp .SECONDEXPANSION: $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)): %_Mapping.dox: $(srcdir)/create-mapping-documentation.py $(top_srcdir)/src/hed/libs/compute/%.h $$(SPECIALISATION_MAPPINGS_%) $(PYTHON) $^ $*_Mapping.dox Doxyfile.SDK.build: $(top_srcdir)/src/doxygen/Doxyfile.SDK cp $(srcdir)/Doxyfile.SDK Doxyfile.SDK.build sed "s/@TOP_SRCDIR@/$(subst /,\/,$(top_srcdir))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/@INPUT@/$(subst /,\/,$(INPUT) $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/@EXAMPLES@/$(subst /,\/,$(CPP_EXAMPLES) $(PYTHON_EXAMPLES) $(JAVA_EXAMPLES))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/Doxyfile.SDK.layout.xml/Doxyfile.SDK.build.layout.xml/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build for mapping in $(SPECIALISATION_MAPPINGS); do \ sed "s/^FILTER_PATTERNS[[:space:]]*=/& *\/$${mapping}.h=$(subst /,\/,$(srcdir))\/adapt-and-filter-mapping-attributes.sed/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp;\ mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build;\ done Doxyfile.SDK.build.layout.xml: $(top_srcdir)/src/doxygen/Doxyfile.SDK.layout.xml cp $(srcdir)/Doxyfile.SDK.layout.xml Doxyfile.SDK.build.layout.xml SDKDEPENDENCIES = Doxyfile.SDK.build Doxyfile.SDK.build.layout.xml \ $(srcdir)/add-bindings-deviations-to-dox.py \ $(srcdir)/add-java-getter-setter-method-notice.py \ $(srcdir)/images/arcsdk.png \ $(srcdir)/adapt-and-filter-mapping-attributes.sed \ $(SWIG_DEPENDS) \ $(wildcard $(addsuffix /*.h, $(INPUT))) \ $(wildcard $(addsuffix /*.cpp, $(CPP_EXAMPLES))) \ $(wildcard $(addsuffix /*.py, $(PYTHON_EXAMPLES))) \ $(wildcard $(addsuffix /*.java, $(JAVA_EXAMPLES))) \ $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)) SDK: $(SDKDEPENDENCIES) doxygen -v | awk -F . '{ exit !($$1 >= 2 || $$1 == 1 && $$2 >= 8) }' || (echo "doxygen version 1.8.0 or greater required (version $$(doxygen -v) found)" && exit 1) doxygen Doxyfile.SDK.build # Postprocessing: Add deviations from SDK API for language bindings (Python and Java). for file in $(SWIG_DEPENDS); do $(PYTHON) $(srcdir)/add-bindings-deviations-to-dox.py $${file} SDK/html; done $(PYTHON) $(srcdir)/add-java-getter-setter-method-notice.py SDK/html EXTRA_DIST = Doxyfile.SDK Doxyfile.SDK.layout.xml images/arcsdk.png \ add-bindings-deviations-to-dox.py \ add-java-getter-setter-method-notice.py \ adapt-and-filter-mapping-attributes.sed \ create-mapping-documentation.py CLEANFILES = SDK nordugrid-arc-5.4.2/src/doxygen/PaxHeaders.7502/images0000644000000000000000000000013213214316030020621 xustar000000000000000030 mtime=1513200664.698804954 30 atime=1513200668.716854096 30 ctime=1513200664.698804954 nordugrid-arc-5.4.2/src/doxygen/images/0000755000175000002070000000000013214316030020744 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/doxygen/images/PaxHeaders.7502/arcsdk.png0000644000000000000000000000012412116355577022677 xustar000000000000000027 mtime=1362746239.263982 27 atime=1513200574.217698 30 ctime=1513200664.698804954 nordugrid-arc-5.4.2/src/doxygen/images/arcsdk.png0000644000175000002070000032432712116355577022757 0ustar00mockbuildmock00000000000000PNG  IHDR sBIT|d pHYs B(xtEXtSoftwarewww.inkscape.org< IDATxwxǙ?.zg;)*Ջe[]dɖ9N8)\ߥI.qrTqkmYV!% v? . HA )bwg@/ywfaAGOO* BaQk`:ufCc1d[^6!ξFND#ȓ T[(+:tyty<!ĒqCYi N'تJSp@9^^m04crƍ4aZZZ^vC]q'hijzMMM0ϹM8z,>پO!0 Xuwvv %22t#999vZQ j)CQ-RlmT$d4bdhhDVNt'YQ)ZP|~?:aw8a0kJ*e?ueB~U ]ca`NEVD{[>,2,F xV(g%dLl#*:Z^ -r )= kOn'VK;8pt>,_1&nby-p:]J|P(r{@Ȅ!p\Q/Q/ѐ1^p/2B$d43Σ*$8*fx<(@ B,^8 .ք3a 79K|fsEcW0<'W\GNIz؂UnϞN>Appڎg/Go%媔JN)\6yAiiQ[htը%:}vG`cɀPW 1"Bp ́JQ|1\~nήHN2.7^{w8XAjJA/8sJr30p*͟Lhk;{gC[\&L|_8EJ%44̜1e\UB!ry?|"de7H$/"#-qYx, ?/3 pӰZkl{; xǖq$//8x$xx勑E!D}p 啖U[2lr9V7bRF*8G[G8ڰYrs'd4j+*/UfӧsP@!Bt! ObOCwɸvEU7. !eY\vJJ+ !'eS`px l :xn]5Ge̝]}74##= Hnٓ9\S0Fq=s+ م$ժO"BHԤ$`n!\n,mֆ8v,6;6_x{{"%9)Ai` +.VZ^GOCH>hi%h͚1?;Jf /OS8}2nys >ł? q` R0 ;AQq!x{(@ BƙIiv8s>}@TᒤMHl!K{=>J7{$s&ɨ˲p8]uac@Z&\Q W܈`FF^)Q xA@KK`9xV+Q+jH;qFA9ya/xfB!L fL}8pSDӀeYQnG=e9`2WwA2W&Ad0,?70Wl~y'.sN9 Wbbs(P`0cZިG\-W=@(P#N61,brN&VFȗ"wܒ \n.T ]KƺZd:ty؝xHca6mbWrZ:y8\b ,RXU@q@>?p‡v6ER,ٓ(G&rms@[-eB}V¡Ώ+AEAW`åZ?{x b rSM_2>T6J҈IJ,L&.g0*P9KaKUuh wDP^ ߋFeu}R*!B^.>)L2'!"8t=G^rmA_, ؊:ܺfJ4#=5 M*Ss$ۊ]{}S}kqg+Ww.N'*qɱ2_“49W=v VJ|u7Y0_\c]V),Wn`݂@dC)J0}Vyx=8'F063Uw?ӇF~^&xbw/QA=L`jw xa/nwe]xhOݮA~ xy DENaqbQ+;~npw7/ |J~Sǂ'Q,cJ53yB۵<^Ra2854Z؃/nYn5WX5Wymn\zNj]ЩA]!.i>T=Y4&.\*啵(5 8}Bk1&̙9Mܞ6%U5^ Xq砺Anj͙9-$@; F>$Sae5X^&kVބd3Z8ق%(>|L&'b2Rîpyڕ{L&#Qt_ \A?x@V!d {97ES3B'q}/V5Y{`y?+]6|zOn;Ay# n~.'IXo $/Mvpc;1ZE냻5nX8 LLG߈}ba\>qݹ}&i@Zj}-XX69wQRV# ̜1]=8}asgN?Q0mrDiW%@8oԔDzɘ6a`0%/SNEU-*P[߄ cY)xp_x]G:_|lSlvv M-p\7G=㾶  6iL">.`^ᐍu݁=A]0 bpS_ϭG]}3lvdb\.ÔI.^5&ٷa21=~N|^4mAŋfcvq 0zdr~G&Y87.* [N^#is;$8Щ|n-LQ 1ECV[yyn ^ ip3F nDZ Mj[9:Ş3ܻl /fJn_“xǮw-h8;YgEOHpPGר15CG@ùJ?#nJܷ\t&VAlsQ*SҎ*1&#bMF%pM ĠAFp sai@gW7~FfsS#}RtZپO/%&.א\}C3>d. ߏ斶 j q[Riذ 1OpKʪ});mWaU.?[ Z7n}s j݇QWׄ;n[=h$4@Ix&XՅg깪>Ձ>cb xUi>/M~``2;7߸C.:@'/TR,-<Fk|'|~o}l9nAύ_>aI},9Vpk0Ǚ tA:&]Wz/=살>ׁsrܺPlҡ~O $3R›`ȅc2* 0HI6#%9z'kBl2M&ckB Iҳ1g-ml cp&V ZZZ۱yސ. [v4UY]0?H2`YxRY8ن>/2I&85pu ϒEސQnr_H7OBqgnvďN7?$n w$j,.g+r@ /`6iMUHb $Z[ڰ |y'< OKMBz l'qqCO \.CN ǃήćyE%4ZkV݌D:SťW\7/DaA><^*jqIq|skH&dvjJ"V-[ ^j?x\Lz3gL4z65i5pKKM gIY.=-w޶)bwbCc N)\*UM"P%u{顕ju _m<>̃{~MCɣ=ז*02]NKt96] ׏m 6U T7m.|m0Xs9ikjiԍ^K?߽72&DF[G'8+٧T*vN76LCkpgLf˙#cYquc##J UZV%^f)'gec܈F::pL( 츼2!FEUpR}w*.pL8n;AŒAB!f4C&ˠ!933Ra$veH,=KlUڻZ IAS3h'2%8 4b/mw s}vtqf%86ˀ?n Ya?]kY03i*ӡD<@ШO'ZA9~dInV;2 1&fh@{{؁&ޗήcώᣧ%Y"4m<~7|}?J|p*W>u?8}gcR?KIdߢ4Fy XU56;>GK}Oߥŗ_\o @!dPIJi ;}WYX&-Kc)\}~YdjzJ1&-dGw p+{Hݷ\3'{}ŠF¤^B)^ty¾j mES}&ݻ7Dz OXLK7v+Bȕx¤T0 #fz}8q<޴ EE!hУxza9`0\'' HzPrsO6龄 xlNK+$wlX9b} P s~H^nVg&+o;7R2V'3,LJfqB'J"g%K㓥>Ȟ뻥Ë6[+ۇPP/ .n!=~R{WHVLZHG<[Ǥ!2 /kRu k%L|Aj5!\<Evt詨[QUWxlIlh'J:U6 :sd;>nt _ؤmƻ![cJ^vCmH)A{r߸C7_ 4=P/qsȋSe Oe ~l=>v=d|kjC`d9q}ʀ$ u?^xYR0X0Exm Gmrդ[9I2@v "ZsU/i5س(^}#g_ęQm҅NRQ*]Ly<|/.EUM=: er sfMhTJhׇ7ŗQSۀg.\Q1^@+q_*v2:~xD#>,9~t>+pH*`b.!N+f+`~]}OV4xcB2;! 3׌ƿ+d4Ͼf?bfzW@Ҥ yq*HF,sOJ>߲ 핌eojr5R^$LAItK*pbFłߟ^ TݼU͎m}a2~e93h< IDAT M-hhj\?#-ɒFC'T*u=E=6lپO<:̀tyY]Ө+ʍONF_m-Ƕ6=~YL:{ʈ#89/+A\pzߟzf rt r Oxr5~ovd13G=#rLɐ`OG+!CPw/i$TWw^%%*S⓱q&OρFƖ`3֭^e#d4k VVa]ɶ}\a0 ,^8`LTTա}4 hn3;or&j .꤯oݿ\[5-SWoo?@obĚ ?e ~eO3Wn#\^aaMt ԑێ{$>?.^59t 8܂$/R05H 5 6-SKur5~7xt֋UsÛ#pM*,āsɆ 7ĵ2<ޅ%>bNU3ae?2aw;p}gw6[ lymݼ$x/-AEȵeˎhljG2uto|(V0-ߺ"u$COLHSOuϓXvBq>*Ѝ \mD(r\vq|HGo;90(iy#?sK+L$G^Ѝ)!~7~8'|B-<?mvJ"&DZ\_>q:t%Y 0m _X0Gn#'N0@` WU8ΛsM*|p8x_pl߶EJ]v#'3+G 4vcݭFُZ浤b=<AEf +o[WadeL!)nSjQ-sϖQT˜xPGS{`5fs D؈LmhP¡&a`aa6H0ab@e bt dC22MDG_cՆj۷mEvz:h<{`4\#j~!odsĨc"Ohc0[3n! )E$'Ϗ Fwn: Jv(x/͎Ӹ'%&`Z@`ŒJX; CBB,(6Y&׹tܸp8ÅKolABB,ϞXx\IYZ,쒮?uY(qq™ QidN@jj"R=ѧB! bI<=tL~YE5/k_~(k9.U`Z^HPUS>d;&ϋp뚥!//Ta;vT[߄ e[9u2pn!JkGWO}A^&~_JKM‚/F!2Ua!Lvu4wHSUE_-m{[ Xz.gQOl޶W z&'`Љ\':AٙjB!DĎ݇$`M cC}c3>} u~ɚK.`X.c>Mwz+.B^nZ,t!Dͭm!dbؽ3ZPl>ZBgW\`R[,Ŭ©GEUݘI B!5p)S͸c*1JI3)"嵴#ǤTSɳ bLHsfMǢ)bM,c 1FVI'R'%%LRѱ)fq18\9:B!&]\`Z^Dwޓs8ήAךH7iz%ሉ1B~e(XLX$19'3bAXB!鑎7 WپOpVK;Z-8{2S f[(@ B!!L&d> @2S_\7 !cD\BZ5`㒣^.!#å"&.pFH6%.`ULrR6ݹq&qDS]V18VѲecڔܰ6M-`тY2d<> !dB"rٝ4t&d$$&FƸltvLMQ/ >)̉N31{ 1%/Սn+/tZ-c.9~VTdO ;Ơ '+_{qR;VN 99Y闥h|dJBzVfdsVahtjÛz{IT*%xt[bi{7!6ք%Dt{t ŤԘۖ+ի~z<&~ |Q-pΰ`Vcߎ {^ASx+=(i$Fqؽk'RPLq<ꛭXSaLBHK7,^<&AJY+p{,e _ü$\.Q9ebIZxKԃdXpZکEƯ6;̙ \b! -=ل5׏ڦnO>2F#=3 [pmx'%N>|?FzrČY=S'߇[#Ԅg?IZ%Fp3x^~x|ݰFhpXZqn {`B)Ll#dĜxF&.ENf&dKucf㻐"E .6BxƺZ:Ѕ+0oZŅu 8y8tyV@6AW%p}py9`$$@χYyf_EadAd .>$''1~Ԡ fBR&SBzBF\B3<&ÊqZZZZf} S|;>'T\B9 qLhnnFG{;:: OkLY{abr#!w!=_ 勻PE??!cHɕz1! y}L[rhjD%9iҐ[rL9dWH=)1c !B!B!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!DD!B!D$hj Ѯτ瓥/KY:!;ƨF,UO`uhii2>Ug>l %Z[-`d,`0@фχӧOr:p~7x'n3 rv kD /vCr"B: |p@ԏ]&0#++s΅Bp9@޽{a6#--2lK!B8CCC:::r*Łz͛8u$B!DYWW7jkqF0NL1"B!ZŃmiiFBBXׇB!24Zmh-!B!3N Ìu]!B!ca?:Wp=PW-pVF I)!B!Wb«Eq'Gyxs~ۨ@}}N'@&!!!(((C=!z;wŋ,b)Xn~AČB!rcz-ayal Z}ؿϐ3f̂- /:.䵚st' &"B!vpg'_xWΝǹs1kaѨ(((ߋt7)**ĉkZ}!rss}f`[};B!=7=mT|{߅`룮DaadZ֭$}ߐB! +@hmmŞ={j7.ۺuFUISR?~\mB!B;B!6#,x^{x/y-͆tݻ%ry-~F0!B!d xǻ/nZR\c`BL<wyw_—}'3f\.$QBBǡr= db2 ˃Beez= ##cWhmmEmm`rMu >]oq(++ᚑh4!??# MMM]w!ys@),X01;d5FL&^tttC/:^(aRXC ;]p:fg_r@6=rt_db}vf XV._ygϝ_^ xvDYDqpۇ|HNN^}->T.7o))ix+_\^^?SgkrMx۝ f'u0BiO?mDἼ<' =k5.f4pBtwwԩ1زe3zܥKWQ>vf𷿡-^y=}<#۠6=\.mۍoZ%78ͫ/a}wWL ~_/h{Qa6 #Uwc3\'/߾K.[zo!nk7oV^=u^/J!0th[aK Ęh(չMMM8}sq{%(,1౹9HOOCCC "))i <x@` /Ҫ7|7C|f|w>lذ~ز`8rM >g(t }אpw 7d!>4@-<a<&rn޼_L&Ïx}?~!̙7x hm {]v X0H»gHPC "#`!LmPXhaoXrŐ;pYq)8xxssCx/^|{Q<;(nϛ7{;+ݻܹTdeea[f6 b+!Aa?(2 l6.Կn>X'+ ~Byyyx|_'VcQ0UYxLU&E:: NpvQl,sV+8ܨ>w>=y bˎZكam@H_P}nPq h]x IDAT 0\/~zV:;2_\v[RAdoN(m9 0yTc\k!F}}巿C|_<joDqeF&\g9.>,Àex@鍒 vsx5Na4z-X7xp?F\N߷zXp]ݐ '|;Y Klmo OL{95{z۬P_Y{ '$=dؿ5fBdx/F46qR(:ʸ0)uI={vg'XIY{d11Epyl,s:$׳wEmmV0'B?`?~ KX!7C[(5EBWܻd{k䵱-l4}{s-hsrsb;:h}/k2c&Z$H6 /V=nW g_#o}1ۙgy0Mu4~97ܔ0rR\7 H`=;y#]EJ?o "mO?5mVvqgk~UB.${'b8gGSVA8&:.Q* r(F8wHj5 CB\QU@;ixpm٪ %ĉ쯉P,a=0fùqa?iXT m[@ \8V Kw@[].Udd/(};,ojݷZti2'LЀ'pm܀u}S!zp :EuUIma(+:s*9{oM nBhrH;O }X׋.7)ƽw6&3rDız5FT a96#}lg0ЋWD=+ǧ[JQ$sCO녰J{tG:t\)mDכo`4K$o!E)<97b!]9$ى73/2ڜ\C܎{ㆤ}@06&'@Td\/)E%IwRMz:Y^b7Fϙ{q"AeMxkDcOޮ?ܰk!V&#󸱘ƌ;(2TL.;G;V=`O/aMZ:llYSxvU.77 HJ,4b r`>3]IwI }L*1F-oޯHtBN'r 14ۑjpXi ۶G;?r,㫫H,# M^ τ_&쫫g)淕c x`$q+rd9g|Lœ H?wR)tˀuZR 498~"&3|Lҗ0݄O`ixH?s6㰝v& *;$I|r =}4Sɽ&PMcԅed^x*>d]rBD߷یǗh]=@+kfN;Ihy1}V8矍$IhǓ 'wNz}/e'3$%^c:_x>dfzJ7b߉<8ao+ ).4XIJ@G+2t9ە㖉NI 15YɈ{T 9^ye(g\tgH޵3HdP5 ̳ӽ}+ip(h(_yWgގ{vx`5C,&,}>[ut,A|WI9o@Zٗ]y$7hmϮjs 8h3i_5Ĭ4q毭w[f})._o;N;O_O XJߏ;6K: :cN}>ݎٳ'@KAm2%4lrOF$&3E߼]A*@g']oL97oT5:"#{/ 4@tHm)אw-hBn7}+W@Xpp)jL?^|&9{Mƌŵy/ _C]^r0@Gt`9 yʢw[W; 1(Tx'f1ی[ek*˸qtE76$;v=T#ƸPe7`G]+zJӔ~-AWJIIF᭯t|m)E_Ʊa'zw2WVm/uii~!g  11Ľg7nfsN, # \X'MVPwRbOz-2ϿCq CKKnf,.&G]G|F-3H9+r?OɞdBk6~ƙZِ.?_[&VKW'<) ,G\8,VF&-7PX&%vv*[Hg ̸΁`\ҀyW\-vKӵS3ȿ&$I¹} O Y_S˘;/}AO+2WPUגv-}?$ׇsgjJc:o6\5w}}@n&tkVxՠ:w{"q$ISD~i\2ǎi-GrX?$CU~?ڰ;yR04ko=R9)[Su4z>X{9Il xm2*rcd9Lo}d w~L]% Njn=]𷷳_"a5O{#US7~idŠĿ]}@ׇt8g tw3vUXkbe̙(!pm߆싾"~ϞmmX (37^TVvmkv:Q!l1&Btb$ ժ밌= >`kO0GT \l_Q=ߓ|byKq).o]f&g6֡ω4ڲ,㍓] Ue?֤ZN5^4zw}Nm[e[QIYf_[ r#3jcr|Aوke:)$&O")|>T}KR<K#Kܱ [+a>t^ӣS`,fT:=aDQ\1p;Cn߯{^-8 <{.R 4xk]t@+hoT2p9 4H++]f vb3f`qksRQG_gGUԠ'chǡ[;Y?"I\.;wDP_!Q5r2: w $%zP$ zN,իh{e 064rG&bd>lQYIKCK=6q"wMkzSxP?Rtm&MN*r4>4Q `P40 Xjs RS% Iz'* 4vўPEGH w|sre3B}`?^v>ΨօxT$UA{֛ڥ{KѼOk,,S{ r}7a<T* ]ѼU #kR?N9GCףHj5cNt̙|-7HݩhVoͦ*BnwL,'ި"ץӣGݠdx;x/z,b@XHI5U #F*=.\Z*j#"SH%g ;sw5ͥeIeJb{ d"EqcQ^oMtnقm܄RmFe7~Z$d "i4Kqܛ H<}~R7|--S r4HU<}"?+m,*Af .w./!/OS_`׷UR 쫓ۙ޵s'c&Xm_:~+j&k]65(ʁZMGSlcc;׾*}{A|ңk"Q^ ⢤ *8\*u4LXqùca;}Naw6^A f`zCN3 tuѷn-if@(DL燥 a$//fdY"I$w9x(r;Ωkŧ0@pQ;1u8;#ONk.QLLg9r3{fuP%ɛGg Cn܀Ca!l6@9hԩx[$.QXF9ed&Og7 a*.V<38+ADv9QGC_&}&:-$!G щfX%%[b6.PRk^,=XMTZ; +t\rE]$@轢8wBuv0G1>}=}hlJl..xd? e)X/22wu)$I"ۻy#ee '*sekVSHo гfA _[{UG4B&e״Ђ1 Ho=44 c 345ݕ60Jf VѹbވD̤JK*i;gJMO;L>CMZ t.Nk$8E@ Qiԃ1lFkE.gϣDdqtZ[kt{{HZ !}^r `Á6%)3{<-C`RJ"ϰ>u="A7/slv0*)er))[l#VK^gܸq\~e,Zeцpߠ\qq\Z-;Ϟϗ%L&2xMyQO~#;܄4|!/5O<Έ/\"8wO֜yt,W>\5|_c4;EW]C͟1F1T:E^Ƙ4A4 bܓXsr) _Z,wu*Y¤>|x,PeKGΨ5Y̳:k]YLͣ[-Vg6 y=֝;ψ.JrUZ]t?"} 2N;#L3Rqڸmi9?yL._[h'`/`]wGeEA?7^9 =Gq BG]WPt4-yO#بkjRQAWG"E+RoiH~eqU* .s吭3ΤseC88/ĵt!l,IXVJoKIt8|--.Nč]LGfϞGeзc Ŀ?6ڂ17T|U͹k k; &zc)-#3\dW_W/J6q jQӢђ>i2="ϰե4sB4f3A@_/^ Ͻj&8˱DIg6Oԭ{*-w]_$e}|l%F&;Z^v%>{(^;RAx7dž ؼy3 (`e>MzN :~|a~pu:]4un8M'ZNM9 W]mX:/D!/^J0fe3?۽ _K L%%\A Z "ZE%IZ,"ǺtrZU9nW "ǵfrR^N&!3o>RZy W}A SA!i&{ ƴKRY4Fc¢|%Si~+ԴEBN'|Z. TRbp IDATN;iϺ^lUcvQrJ$t| /8OލΠ9Ÿ2 V@OE]>}Fg+Ѳ=MJ4SqAfH a}--nل|$r PڿZgk0y k=-ODZə=cްoK .& B ّnPoѽq={񶵠RV}C^yN~t##+-wy\Hǐm츤PK8܌]ϥgb*.2BߐØ45tҰUf̏Fx!%tKڢcJ E Gu4ЧcU#:P?{22/].n{-اNq8qiddͯ,!ۇb%qulq:>3BǭļRY:qh-Z?xOONR"坻O]-=QW@oJ\t;;qml@PvWt:"e!K!4 %(_sRfyk(I OS#*I䆛?mmIm?(Anڙ3gac.ͨFek14f>1ig&өwq]a,,ե:5gIsdu}Haǎ/jL##_p;MMMuם)wnr_s礜pĘO}!h>;3F=8nt$ߞ$o5i1,eXDndE|yC}\@uxZ y}48s}ƙ MIꬵϠsJdYwz~1r?SQd۾WaW y Ώ6 {t^EgnjCůZ,g̠+٤kZw_sIib&gC߮rO<>5FAy=ԿYq7`%?`HDdN;f_h1]Z: 1b W_ɘ`ƍ'mjfHh"w!'Y+:\#;aw7/<; M VVX:xGɞ3mkSX,d5)ӕŖߤ7-0OQRr杍>2HèMftF+VIaWw]AGFdBkb,,Rvkge1هp n6z,$aƱg.B^ Ʉ>;C^dlfCk>==]N4f B*Go`w᮫H@kaB$8kk%r6Q  KO'3ji})CN>c gd* .^HN͉?w9أruScW4+ʁJEC*~"p饗Mii N}jxי3g̚5_!jkطoh 裿aҤI~i[nК_\}uj'Qp9zpJg}?\:ѿP%ۇ*pPxʮKI`z m*?) ,s*BM4 ƼrziX2V$!GB`fҫGBsg כĭAнy#7 FLlTu7//؈>3naYNfjbyb'cv.~ ^|mt;{&*$4Fuꗼ3(!mNrl#)W{ᙄNRk> οOe=%FFsBTQq>-}Z:pȴB7`82+m6'xȞ>3wwGLCfW|e/o/gjɌb97u /;JZ+fw 8x1#'ժ =AUDp,.g1 J£qda{}>#oo:rM2]΁^uVZcd{NUR1@/$Qo & 1d2[o_Ͽ=( QFq [!|M&#nw$ۿ;.`vD-'u3Aa9%kJuF7C * 9FpXh:,LjT*5, ,˨S7Hi'ebo~K_ǎHAN?b>~VX D6dwng֬U#YYpRWǨF`1 \mVޘQQ[EB]SU[7S28H$TZUU1ud@TVYÎe0暫x& jjjeڲe+|]V3k֬Aӛf| ϟu;0~P;(ϩ :C֛sfw]^!z%fce\)k~Ԩ vo;2>{#HaiӦmaW|65#9?hS$ m=;wVsOXװ`yX`G?q׾v7n;Z-W,\ȒXD|N0/^iS&cO_L2OW 3|Gw7/w ߸N^Ӵ54 3|[wF.Y|xj57\u/=G)G͘0Ldy瑃a_|#Գ;{6fsꡃrkÆ q]\~L2ᥗ%meAg:;;Yf NZvɫPLF$`'ƢERf?Cߧi3g?BrS k.cSgK=I7r(@ߧ|W)p `1ŏ~8䚩xF#~F'6&/CdSˈӧF3Z=j?S>m)dge>?>t'jU$b}|IIs%01Sϣxz߄N>J2***+8ggΜǟ ~p''3:9sf'?RfѢ;vpWc=W]uee qvl~ݣwsaHp"вt捻Q"A߫|"Y@ SA]$IbɒYd +WbjeFCnn.UUvۭvZN5\E}}M8ʈ#GeԆaHp2Rdggcە4l}zlF_3_hB0=l۶NԩP-1g߾}l޼\lX,IpAp#azسg7vfΜVE#2˖-c„-@ N`fEErJfϞ="S޽{= t:p#M^^.Fd>C,8)++ʁ@ yy9X,&[ca۶mL>Y(^ݻP43bDp$ĶmyillnQRz7WF0nQ ˗7>~סPWoT@QQ:nE={Ot+-s2jH8HYyVL& 4i1|>[BY:p6F%MӃ$IXVFQRR=RdYf͚A (..nJJF`28&mA__jƸqc@YYQصk lJ|ԲgFz+LWW'vD_n7YYYvg,Y ͸\.(.. /jQnĉ>w^t+wߏ+ɲf| .` ք Su4q>SY ‰@OO,ZtYYY\. )**DA_Llx<ZZZq:]AlJZtf}>:]$UVVV,pjbPZZ^OǃJ%)bD˪@AA>V[j58N{{;*@ 0}O(";;/}|j]LƄt]]]444EpF0͔jRЈWܢcȑliL&d2fZ(3@PߎcȲLII1F$Z[LHvBT*p8 BQVVXn7mm\.V .,SW@oo/f@V</F22CuwށEVOFp8L}}}J݈Ge;hllBRAQp<9jc>_~ c׮RXX@ee+L@y<^_DVS__OmmP>T*p8xgY|wu7A$ uuttt`6TҮ.oANNFURE Ȳk(Q`]8ƏbǶmQTL41҇ʲ̞={imme1dff8`^֭C1nXfDA('3pmddd`6^+Wv䒋̤z7va֬餥cӦ-Qh&;; o=qpi3ЛoKWW9s̠%K^M8l>ZO~~k֬c۶Ju62e~F6mrXKff&ىwfZ$I̚5q1kG?fnO磏!I2X_v=]]\txhoMSVZd‹W֭@qqsfٲB! ,3|2(:p8pܤ((p/:;;{hEXV>dyBM6s h4YNQ(bL&-Ʉ,ˇ\cG5{Ku޼ٔ~֬xw-~ d A z/[o<߸_* ‚_zrرcyzyذa-~N;3[o7"I"YgɄ S顦f?MMM\M){\{5<_VY5ed')))Q|~{7oPg?7D_裿Q>bW`uj{я~0=O0kLyA X -FaaM^͛1jH&N0{ emOKj5;fvfSR27Յݞ΄ ٷ&/_ŢE)yhϟG '33SQ(lL44ZZZٶm;f9st+Xb5#cHĉb8ΤVY9;q\,]:Y?V\9Xv=sXnj-[磨Qrrr)Ǔ̾} IDAT{HWS'cX0Mt: kZƏKCCc8wJjj͋/.q0bDQB ƍtV^sZnV^K_%dJ Y~#}QSٳgM LVaٲ e;QFVٽ{we۶ z=@ @vv6ջimmh4*ns+VRһ\.jkj5̝;D($6sn_! xر?=* .XQ= ;;kQRR¬Y3XU}7TUU)U<--DoE7P|Kp\ y;پ}'n-[o~-H1f}v1}8N< WB\.755illdiS()) {ĝCjMBd`45jr|@DYSx<\.mȲLff&ӧO`*ii6-26lZ;e455sYg( 3qꦫIp\h49g^Qrrr ԓQ\\`-//ں/++pn^^.]Ć o&Mt&S=؄d2t:q:] i"Ga&>WT3vh feZb?8VpbVUbJ`hPTAcMv[ e̍oͶmijjF$yQ-3gp[3f wOS]]= 6m& >+/b(i|w{ꢢb4Wf}wPV {Omm-|ɇT*n{yYMO< ~?|5jjr*$I%++ |aߧ*]~f3%%%\}+(#Zĉ?~,Բl \.;wbɊI8ZɄ^xfdةُ1̄6MC q܇}MAclpU9u(-spݬ_ݻk&O(\yPy: 2TʲL8NPhZ[ۨor3fff&w}}>l-lܸ  ZƎլZJfU%ysG옹9LSJ=K֏"Y>V\CmmuYμy'&\GFΝZ6.rvܙ:om}+q2:sM~222(.e]w>d}"AӧO ##Ra߾efy5L:E鿦MsϽs/3gAF,--Qֹʏ{0cƌ+__|;?5\5 8QktT*eyъ_oo_<`]8-6>Z#$yyv ,VCJ:ɤX 2plVn冄˔kV1fӏ6r?N㕕Ȳ̧FfGLU5lVjJ1.ƞ=r0fL$hbI*MM͸nr+YiwMJJ˙;,j5դ\'/bz<L8SO;wrm&& zzzWrxGسkjٷo}>1v1b۷tR[[Dt@Wqq5۷GϏ'rss/ÑP~l,+bok/+{7aA8x<3Bʕihh=i^ջ -2 xwtvFfc.*cƌf|:F)dddؑ$ kljغu;--,^2999>l4f͚NQQQT6'=!ջشi3 E~NݞNZZoK! V?Ydee~m(.."//Vz] uu:+8pN`1YAtÆMnOGYmY1Rggo6 a4Fmݺ!'#c-⒄(U}kקli=={Q_K/-+V%Dd- C=Bcc}1t:njjh#J>V+ ^X멩0!vX|r~?zeA**F2fhT*fŷusΡ3< ZZZᆛ9_b.K޽z7@ 7\ѣ檫e޼<|ˑs322xwy70a<=&MK/W^CR%(/6BA8p :q* L<3N  ن.At:-3gPx^6mBcc$1jTfR~̬YYv l8GX"JbE7Y{wʼn$Mͽ{7ldIb'qKlY͒,YS(J~yHE-6q~_$9yh!))d֬YɁZ; ޙ4)h&+ZgM?Q̟?WXn{M>O}V-c߾ՇT.Ϟ={ikkqyKJf`Xho|R!'gZZ=?Msq,\4g29SJ2i̚UBWWMMyhHttVСtvkQ*$CyygˊK9|U0 ( -{ Nx/D9AqNG9sfyQo8|o˙3g6G{>/?׾ gIhnnvo~߿!dڵ]]vaaa%ȟvxt=\.nz|\nT*%:l8N</>=mZuSѵaV\9l?Ӵ7KڷoI!|c̙466tHLL`ҤLyL_N{?H!**x}>h,YLXX=z ALL )))dgg{vNY%b c.1hHH'[5F&77z[`00ct~?II7&U$eH5j}t:y0""AAi"ͩ   2       2  ׵)X[A9"@AAA&AAAd7T$s8#:}]86$ѥ>,Tj &ʡ׆}mV~lbQ("C$ ݆-~Vo9J5*t3U-Ecpirv`sP&RK^#,RoE᳓TTcgR\ƚ˸]MIa&3J{Khbh'5+o PwI p&ML#8MuWp9FVSXѮƸtz oxq۹֊?wp&eadNMY49/MsSC?>_pZF"$Z[imב^ 9n 0bc 3\=RШx>~?* H_{AKk;mLAAAK%@z}|7~ANiZ!W嫤%/>@m"#ӏb6 ;pU*(s.\̻b )3on1\5d#;t\x{ gظ~ sg)kujU-kV.f9bx6s3SA̵7]ĺ0x!э9Q7d:.{zxxOІAT*Z .CǩkR-xßޤ݂^ccl2"IM.]| 0@ NҴwXzQ(Dr{ٺnHIN~|(9vp_{|&&& NG}C-m·r 4IG{rUA]@>^aazV,]01/SqS'=eju-GFJR&67$I8|W;ŰxlcoFm;qۃV!2"Ԕ$Oˑ @-3)Y0>>CӲb|yV:KSS+.̦H&eQ\T@dD8 f:Am}#N )$f]f0nwh,f#;v}a_+'Ico5s=`M,H5[[lruP(`'{aڰw$lvQFS`N~G/VQq,:}2ğx6c) u ~-4~oo`YQ(AŋC `RT#T0sIhG7^Sy2 %8 *uP` sF&2Ps:lCz]%v{`n;MuM=uM% wkDVCHJR98(.g0$U"Wz |d/\sKaeZmbP54^㵷>l٠9""](L?tyXP(0C^/_ Cٽ(tO{`B2^}}9mkMmg]K_ye5.lc]]}#{pmc##A@x8_VR??vOmhl˨JΟE{8)9QfEyc'HĪ q:]|j;. )kv;[Ξ9_ewp|9 rP(hjn2j gܺc;dزmwHOgM]O|o*D^x yne'~*P]S7a9wI"$@wx9 d >x(sLV< g2!_~z~})"oi:8zVoѩz z"7{i o6pulHdck ]DQ0y^6%{ZU;@ɽ m5A' Eo`k*/C4UعGV>r؛ݷ̈TR<=#Nko}@yJ%}n=49 @L7!7 Yir7VՒBrXOss+o% y{LOEQXCSs+Kc 0LX6*tzݘz*)\,RJfL#ldO˙LAiM 1HKI[w͞왅|}/mlh= /{֐k>nX|<¨khTy6^{A{vlw9SK=Q*j1fJfL#̠gpq:]( VXH|\,.V\!?oumlj!=a=Ą8g˶=Q;~$}?t<<SV~υC>uw(*A@$""™3cd8e啔_ju-}wON1v= TTVAOلX;rpʜӑ$#)!nxNp[х(\YtWcBRP*VA;oƅZ$oyh }K00M.m PiRz/qߋt2ieiPh *ѓ*.dpkDnjj8wIqqr+HKM o<cZ~69l۹。Y+<{K[w\nVbq=:6v( ʙ: +vظ~ &c$~*sJpRRbfv~446ca6o[v3 ϿFȱ5ܱ9s2fO=:l 3 ;krͭccB-Y4g: O{:;ms_j*^)ɉ{P(Zfϛ*:}.$@8u<JYN$IB/=|ΛS{_8wzWjuݽ lKkj agDGoT puN5<0$~_K{ Aٯnx #SRt}-'qYMn y,R:qnfh⤹ҁC/aJғ&#v;e7@ IDATR*Ǣ7sǺ߽nV-[x&c$\BsK-=w "1.{0@| ^OFZ >ׯ2ϧ$~HD{Ewbٮ{Xu;ǒDp ։!o+-`}Ss.WsBk%RU]GKk; ͔͟C~]n]v;~v!_JvQf܋XQ)*S\\$<̀$I 5˙Ι!=VX#'$:L= =3M-|r()4Q^q^NjɉbP(HĻ2(=n*vȏ6G00Ww!I(Y LqZ9X8Q6dh +bʣ2MV\=a壟U =ߣR #Zjht5zu灨4 Gs,b&7ޞrQ.BEӥ`ρJѪ-5B,>j‚֭^/ehtTʟ_G@nv=<"RI 8ϵ2SW7SQY'R^qo}@fzJmk[qT|%q_R9ˁTU񻗃ARb<{ $xQ2c'N2x7?+3R,=wsQ*,[]떣P(ta;$}tںξGq:]!gjkov4(x=jh4x}?<=AfzJ-r+`tr.^BLU+ :D;Vp+^?9Y'ӛL[[7x@Oy6hjVI!"PzPyZ_2#}him 1##vhl ^î>֊e x8w+G$*jU-VMN`6Yl;wpeKQa.w]>y+ՔvRSyäE`Z$&)Dz`9. ERP!pn<'ި&gq4 =w=Aϖ pzK)cn;:cC/& q?|ǰKS<0& 䈈p{h#_hmOϔ 1fZZ9w[ók) NdL?–m{|74 NyWӦ.c_*_|Za,vڨ3$[}![=ߵ IʅwFdd !IQP(̒͞EsdQkCbƎ v~^sW'(Z]Ǟ}Xxf29|+1;F[|+gAJT Q(0twZ_iI$%̟[‰B&c$ 3% ,NO%/w*.gaNCP`N M-jB&OJOpR%@s~E(s6\/k&v* ?=u'C35dCbȰZ̍C;i,{ZV~9iҐB*! 6lm,'6?z $Inriz8.|C|!-EQy3.xlyQ;}o4ߝk`N -N.^lzN4$%Ajr"3 l-[<ں+u^T* &,khh'~FӲr/Ǚsyݭx<^>wxa˶=쓁?I{h3_?^͛Sm`-9`76&ZS_ԯf?= }AςŜPc(țl`82ӘF[[[܅KX,>sa?1e~ ;>d[͔#@r8uZ|ltوlree g;~?$uZ}h#}uVݘMF rHCv{L3&I $ XRIQaTsiyD=LMF,N Di#22)r\4ᶏqwx(ʐDj8|gN3IĖm{߬̌T G@09+hyroEZ/>Bk;WqBhᘂ0fݟ(ORD%itR sYhش6t a3)7>y(iEF#̤!ڳNp8?Zؘ($IBVm݂ËO>]3gtCO.0 I0uJ&E|~Ξ/-v;cdOw>75p%h5T^׎L6dʍ7Es-ɓ #M37kYk};.oɞ23:&Ί돮0v~m?²/G qfk MEcX1R _okB( V~ZY͙-׺8 IyhtJ,.Z<~ e hj  r!ϩ*֮ZUK;tvvn ""\R\T@qQAv*jȹ=֬\̚޵{@o~)~c[ =) c%@Yyp]NN**g^Gmg5~=VʽQI'NRU4 Ka9f=`>'dXq*CH!~m4X'QxG<[ivֿ\D T=SFqiwpWe>CS(ATkz֫NZ8h)Xn')g$=7"<@]!.6Um3׭vP(z,[2$GlL4ӧRhllre"59Y%!ws'/=3hk̒B"d33DG+/:7$&ıjy0m߬MR)rޅRCss0M{%8U(re-x<^H:z8 M8N DGU\vZ9ʤC9L!%evFc\ZLty2cUkv3HJq|r's=>>IirA$V-_*~ LBRËu\b$0"ɚF~qp̸;'@5#c5n`XLފX'0F &uHHiO&qj8Gh0k<7b3k85818DZsA|VyVOh2VZgeSyB[F7j "I?b…_p4TW sP߾J>gm­)z`GK֎`n-@r+?{NFnDF{cYrл_r޽?#&e{:#55gz-lk*L[қ I0;Eep[sW yrk4qÒ_l|o  h B HɈAAAD  [b ­'A!2 €D   FbE[OR_;Ȃo­T 7gh\MJ*i$Dfv&ε#gsm6bܚUiU 7MEUD&?LkI5 F=b6L7GIt0viNXX~* E3>@/RB1tCHPC>BRyU#MDo0&ap qzmtXᨕC/RP(ypƷ48=ĢV1hcfZJR嶈EЇ"tf|2Ok/P 7pBD% I H]Nt+ P9E#  L   D   L   D  _! MAnc8|o3չ-1fq֦!|u;tP&RKNVe?J.zrWKZ."<),ɺɱ7Aa$Y26ոu;VbJVTGfOg}c? /xbq6d&Y/O+ZyXIAuH.*LC롶 Pv{FNKWMju\a 0Q%@FPVOKns,** M N ruIʂ l6GxĤ)Aᳱ|t:hWGAIj8~>VBAnB z,XȄ"YY|^i ARZ- wk47IAs`RcNԣPqZ}8;}}̚ס{(@kP/ @cPƸ{۹|BY/vu<) Iw6qGAD*X8rR`qdΐT%YsLߛ@T^~Ы ւB?T}~eojSI:C&OaRyglԜ$e'7 k*v3nGA&:PËBd=_"[zŠC&)7\iQq;e]ܘHTJڰԹ2ie1~.n΅EcP7)%\NsAErn83I@#AA]PELb'0ϩ5!I`k]Pi({%lma~ew4x]K;LłGSZ7)u_8nW919T t6{P$L grsIŏquTd53~A:=YLzQkXʣ>y۽,}J%C aJ8]sv[7JMFz-W䯈%*heKd.OuRfPn9>@%;g>Wwֆ#_ AAMe9F>O/Qa yc]ؿ IkrLR^Dvpv7pvG+e{ۙ$Z:ivbd'.h}0~x3[4D'q~g+jP@Ss6 8D e{zm(U ~.rA.1ZvXE\SZԐ)9R@9#oN>y\&pu mw|Qz':MS=,U'Md4> +Я\Ҋ@0ضwx9J=HN= \>l|_;w0uA\336@\3+b8nu绨8SN0D0yݝtVv5A$+tbmg[J[Ky|gu1%d5XZ<yS7t2KL!/τ<N`m!QCjN.)!Z-7"0µqǿ!`J ],0idwS|os:*T0yY~cMx'8?$*YϪĜc+) fުŏ `kzɕVrFcJ@I79Sq'H,y2T=iEFq> (?9anm'OS@*Bi`LA 60=micidtK3NIΒhl 6Dۿ;gY&/PPQDhѐZI.hiQ(x}Zik8v#.?M!B HV[,=C 2t֬)-x8iqu} |l εQi0b3 _袳e8];7Fs{ <A[LigA 0r?Mf}@p|OcGRNOܥ^urDo/`Ud1c0wxSF Snz'@Pi5% =ϣ%2+=wƝ*P@Dt:=|~o.wHPx3'K:mz]7x|>1A`jl  kE])8B=º#I,AAM9>.mF@<*g9ggoH f{w;7Zl^LlO-"T( ND1XnTtD%,X O/7B{kif!xM ``3v05\FuzOubϿ3lo;mUӝ> VŲŒԛ'5p mNn>sSRG^O7i4^!՛1v5Afi<%?6'LC ƻ*^'4O0DWǕ[\Uq/*Q(']Fi IDAT0 Fd)U/'kbx yIY&GxFNn;k//gPf{Mz~>b&%?]ÏMCw0{PZm _!e,:JI =&0W"gI4Ys̜JN)쓡jRtW<'k|{z.cUni7`ϫ8(C= 4Rc䯊d}HՉNG5: 3\_] A Xɏ }s´QiU+g?ac3QkDixI2!S\f53 ]J8|>=a& 7nwτ;51l'@3iӍGrP/ 'x۽! ͥNW5,~"= 73jv.m'̬!gI4sHJGx7$b2 nօ0%wOwI"U\=Ig^&&À֠ZI{ ŋF$6@ɆD9̍( NXwx 3kZ3K$2^˅mXDh(1Q>AcYCR^>fPj4V)O F AAm-!oy NZADV2̏bnngN"YT+Xtl(I̼/qF9I羟˅]mU;$D0uATH:eAi*v`mp\+1LU?r8K JA0 ΍ߙʥ4W8{ц0iI-s5&53ywJ3I`= nXC_R0DyϵE(z3JdC[Dj Ǒ$AAAmoQ=gg0l0&愓pOʍ )wᇺp3E陓4d]{n+6dALRAAA&AAw!>= 7C   D  =ce"A2cvڊjEj-jaĸ, n%Νba$t:bcc1L#ҘX,^ﰿpcmmmAJ #IP`41?Ҕ qOn uu9Ck#f?j6:N:$::OȌpEFѮ0X3684J-9s)J3c…XucvAnb܂$I؄$Idc2t:)-=F!??_ֆd2]Cۃ/޽ofsG7^,Yc=ٳgy_p{'oLuu5W<9_(H磏VBl6?d"??Nr '*J555jvr'<<\օ^zcOiݻ__]UVeeewo$I:u)SB\/JAA>QQQ6͆?GnłVoiiAדNgx6 swRFЈ%;{jx18e͚ddd\r$%%Q]]Ckk+adggw6X,dd&7Ȕ)Qz \rMV$mSS3*.]̾}c0Cd9s,\Z NLAIhss (,,fuYs'CV+6=@AAW_|1vMQQ kɊ))͑#GxgÆMr㸠9s9˔Q__ϢE7v.==ᅬ|p8{6xG3_f6lDaa1>U1/I̜ªUkYNO/aݺ8r(99[wzO?,YYlذWo!y7'''&.GQLVZ˒%yӟ|志 7vP\ng2+_yww>;:VUV{lcאHB }ВL7 6reɲz6mi$[[9:G;sg)wr70Ld4i"V}ʚ5_SO=ѣt:'7770v@ 裕L0/>+>biz,_NjhZnV+˗hXYY{Us=` ^mRR>}:WP_߀?~+zZVϿƍlT*]vinSZZΝ)))گ_Dze2ax^z_O3o222xR[[_ʌ3hllDr_pARR4i"W婧ѣ39E&ijjf޼ylݺ̞=ӧyFzQ{ϟ9EfϞ9}c=_V+k֬_ /_&MHFFJ|uƒ%KX >Cx?P__^'**kzf͚ɺuqݤrgbl߾#Fp啗'qmBBB555|\}U{n{53vv6N'睷j?M7JVZ-zt.jvWx /I&\N+ p2B&-kEoeN\MrxT QLeX6~{1bӦ|?@rrW_䢋~^aݺt:9l^*ǵmrrr'337xY#r䒋_d2l6s&%%Gy Z.t"+V|Ċy>z?PZ1j(BCC߶&̪U?o{/˻:I\tѲNt̙eBXXO>ײ rssDEER_HIIɽ%;w.lݺyO *++y뭷dddxƲlGsطo?J܈V%!!_|GXx/Ϸr;7p=gnO?C&??BPPVgyo#]wĉX=ݻK \v٥{ݞ~JViӦr9gswl?xƍo+^&%<pp;W&3sUUUlܸ.Zu.&MHYY9wgw3C tJR[B^]yܧxMsjwZyi/3 e —>FzĴ6E!F' HC?vl5Y~#ӧbYP^.U&r!x?IZZ*e1q>h%ΝNj{LL ӦM|~bTɽ[>}V<QF IŔWUUqmw2~d~qs+ k`ݺ=@=yݷh|<32},zm^y晧Q_2~dzQn7,ߖt?uϺ6z;jZ^TTfvxRO3< @YYyFZZ U<ꎲrv; #"MҺ iid2$Mxi0Hw1K/ꫯ{ma]=hM:_PP6m碹Wl:BX ݮ[A4Ш6cR$<\*Iw.۷/x sssyͷv݇괯,n7sII#YS&NppWs9oSith<^6$'K#9WQT||Z.\@ee%w%H#3:M7݈Fڵ_r*J%\sU>7w~DDSWWSOI1j>=kС<}(r^ 6lޓP_XXbJN1SZwky۽ /{cmt|tGll, Um5<t%K.7 tUV+غu{=r_'{^xlݺ~#TW1rd27tI{w---\z%w\ԍ7/oP[[wEFFp5W),, tga<󌔀oMffIl6^z_:tk|O<wf֬$'x7vVog}^{eˤDqn7/zXNE[x[o?~W^yPf͚ AV˯˾FaEy/yߵhYh^7W^r?m!DWGV… f@  .*;|/eVOxAG233ygyGYVbnj]w?Ǹ暫5k& Ïi;nXXdk|GR7lTur~\ҥ2ax<O<@rr !]{wy}S!9W^y9o;vcO+[ng׮ڵ-f9s&KH_o͛y@Jx֛e<6oҫ87|#{W|;,_ ;%n=Olsy5sH6O>4 %P,_;wyvۭ\bx\w5͙3?Vk>y8S@6o=~:tR̙;q:L:Ƈ\HMM޷o' 22 &YVpMJ\\{l6{l?r$۫Frh cCUU0zhETVV96͞Z.{ ==SVL&bbbhii࣏Vr믠<ͧLNcTWWͬY3<-oAEEaa,X0h4^+Ն5krͷɎ[~o}:b0il1~|L>bcO:@L&{d23g@P`2سg/j;v,ƍ\E8f̘)I`0}N?~srss :mωp22FQ[[G^^!!3kصkFbcc5oΝill`ȑ̜9j `Ԩt ڃ/3fzFfej2qZ-TWWw 8̂ \Wxp9lȜnecN90?*b7(.}.`'X| 6= O?oSbP(oȣ>ƛoŊ\.gIC] g:'>$g˞B P_lxc P3Cלr#5MX0x pfaFb4ϣLFjj { &==!)PcOSZɓ-F2 $67e]o1 }WSqHA3q#̙͜9`(+tR:uu]VGF?sm;:AcsMXˉUjV=Pog 8$ƝNA=:(qZ>r,̼.L1PYDކWs8JŪ4 bZϣ[w2{WWSyԈ@ PA9$M>q{ ="IY A 読TIؽ AfO *HE_.7Ƿ7yr8"GR7HS3+dL + ] B @C4Iyp:FzW װSVw_Uˠ&s=niC+lAWmE!*{/AgJ9Um ÈOp @ 0lҺ}]2bFu7PI>o3rJn} M@4ئ @@  @p3$KZ=)|*.@  lf'-T@  0}t‡R_cd[2}95GptcGgF_J?J#i=:̌G *՟W/t%jJ\ā/jAz!'TNh+Y76}k(݌&PɅJFSю7@ d, WL%?(l،юo$?>R/ v5SgB`]'kUv {p|k/B2H񞮯?ĢPɰll&DKH5cqz$Ln6rSYHӍ* D|]4A&#(JM`O'l~]$LTR @ 8m/jpPL\Q)tSWԂB%s>gu}:|Vn;:Ėeݠ T0(ZT5:;d@^rB))Mepv7[.btpx]i!ڰ[\{!=mmnzB&~ *fSH!!@Be}OAh}B:Aw9#BhrGvj ?aȔ֙oI> }QIHH][h=px]soj5ZW58g0{2d2)T,,ї &/޻${].%W5ߠGGn zy' #rd,{$ի]z@OF/BΆN SW[~Gx(,y&y[05PjiH5RU<;03=m>D&>—iAlnPo#8ڇY!^!fj [XqH\)#{-:>$O*٧&߄Y@+'(FCܐ~ĉҮ|qlNO" L_)Z)uF!—Үl3?3B d}RK-nL#&vi)37ZKC]!02yf8XNW,0B\s1)zmndڕ1-2PvPN[cc$BBW`bh(:4 ü@S_ui:Ł򌃍\)&Ώ*}C\m;LD/&@){Eͬy8[{8LӤmnV9S)BSЦZ.|(_GǦ7J|^=UZys߶{U5N{vpٓ5T發;+/̬1ǖ|Ba!yQ#PFBpIۨ>O"ɗx{06߼PюOKNYƲ #nL@m?~^DX撾Q)=q>җ!N€p5gQm⨑ʣRxҴ :?)Aȱ͍R8[e\ :Fč 2ŏ\Ƹs#0h`#gS5?i+7J ;5BDN R0*rZW{N%$!ƟVtpG[MNm 6n^gJ (}*:.7d̹O^˂c%qv',}]A1٭" `̒jpt@T9N%"y 8H'*v97)ѣB\d? 3TY67r 9L1uYq,z)BzYmΑB%#8FC@ ߞB)#"ɏO9FccKWsE%nGSPb d, Po#|/5y&-@a] )1T)%n}5OEgpw>.;#=WɁ<ˡ bF^= [c[\jB 1jK̤TzVH;ʷqRApݬ{!5&>r{ .98j [h(1N뵭ULMN6Ztv͝^mMuֿPH`Wǒ2#C8iNQG 1>4{^~G:mcu90q)L 4^CSk]7?YF&uSNb$ʜ a+Z"3*s1ۼbj#D1w+ !W<;̳l̢u%g3l2;LFT?nFo{I΂J#Cjg[u|0H ]'&莽WSoB$bOcqPYtH'杳[\jߡ|vcύ@ݿ8b@hMXXzwے*9Cf[ٺ7ummD1ܑi^@ gaSO2."}^(3(+8m {;VRz@OHPcRhLT8P|N;Y$1)݌B%#}^:`8Q*:+*9sn %o$)sogDZ[\|8^G2F/ Cz*sT x 3r?6vZ쇏ݫ/6S_]F2~li)@ބ_YDZRZ|*9 cީ<@pao}^;J|mab ճe,%$·WRWԂ Rv[jr`;n œ+ede0Ww_rpF2 fkbm'hTZcZ6I<0yGLaȋmr_},uh"7%nFz>FT9fϦ~ДIO[@;8/6EpUqbu`stZQiz!xhm) >L$AG量  >F@ ,D#@ ' PC@ @A@ Di@0T9@ tҝm@EHx*c&_@: ~IYd@5z6C0@8Vŏ+}bBg#Ɉ”kH=I9Y7oqZ̃_ҌPՃmNp]+X4*-1)n$U:QLOp6F v&Rk(G!'U%[>`ˆ-ˬ 9=`&V`19ZBøy]wݼgMl j݂|@ Cz%LB :K=-UDh\_xh-5:c‘-m°HnBXPD_~Tue 3D@0pl nwuudEsB(XB2R&'~*[Vވ`!f2@ t[!@ 3) 0E@ $U`(3 9+喓(N7 u6,4۱[|hCUdhQxڂtVF.T\p\6>mM=|{ @ mbGkYpjِIǷ5uZT˙tq/,E|Qg!WHBjt͋E,#a8nϲY1Jg$v F8x`3;Q*aڂd@oD_\.7MPdBk frRoBWm%kU)~ď j S쇩NcS-och1jQ7PϾϫQȹуm'RWki(1cPd#mvcou5u*O)]g fE젞]W$aj6rnm4vr~y&\MԠCs\iaHFLl  9! {d0h2j[Wu4Y*Is#<#GČE@!p"SNiK@97JF'&EU@Ȃ;yZtv6^Je G_HP`ìwqfH,K:yv75MhCU`lPj?D-XNaVRgó+:V7ey9 %R{/8q:dWah=EᠡVk?%nXj>Q"PBPBXX~~j¬f}∁z.|8 ,kڂmnd^ޣcZI RVܗ~H k1bl U1ni9E %f"~$;>d'qb sWmA ʊ\!# B͔K 9YWel-N MD/d6>ZF,'>L  ?4xAh-ECKʣ. ڻyӿK8 v V:JG`,F{t _JE=$*HAVVI(&x{ * -N ^&;EY説D19vϯ2Hs΁OAd?(QW؂ZRγAPP C  !G e)(G 8.]&,їG 7W IDATSkк:*slj` ކd54[رS ~䲮7o=HJSFPg3 ڧ+jB,Sc4WY V>tV|h(5{;f㶙]L\?ayPFH$~^Ҿ2 t^+jpފ>d*)'$VÅzV#Z\DtB1pR"uE-|" Q>Z)3Cز\9z-eisB+d+*[$mnfIhE$ PRvPw`kqz^UI^!&txOii1gOGA_1H‚ΨT?J.]c?GrSR/\ia:BXvK#c<?T8.5RaѶISprdjI5UF6Wj` 4qȕdoʊֆohj޹\#ۥУ{864J@h+x0(ٯG9.﫞 ki4JFuXeǜ1ۚ%xRKe…#8l. v6yy >r&.dp޺n7ȤІز$8}X!N3VnZm .8;};ΦM?_+**fÆ +/bv̘1#F=T=x23qc`[rb@x";.`?YxQGd2!~J &{{nؿU:ft :3}P ȎDw4Q]č `#liw{JF$uPku3[J+eF\eP ~>m=*4[8WlpAIEyE1c+oJ،Ν2(SNeU%v{ > {TUY"J9>k&4OG;:uCush:___Xp }/~KNfl%L&֯3gqOaqtY,V/_7+? .8P]]l&)idj׿q-7 @Xn?g.ɓ'q~G P(p:@OGjt3f1'ֿM0tꘉH5H d7dح.*,Zj)9?H0u'q`jSmhd֑T3)15=_Ntj?=K9M8b j.*~A*Ztv;m@ۍ8u_!y}NP2 MeS`;G}kGYO(jͣ΃>h1 MpZJ~|Zٳ((( 99ߎhѹλΝxwm_TTC= `|Сuׯp݄H ?yɧyƎCttlS.*K>fb=B4:ѷ/y}NBHGÚ+-|0k\!cޭږSvP,"ɏs}%Gi8l.d2#7WYHܘƊY |/gjlj)4 'b9K%nۚ(ޣ#6S&@A&)$2ouO7J#&ROGf4: htV0륑d[֤s@ -a4G0qY$j_K}-=֫b;ub;h,7J#nqQc$}nGFzK]N7GgyjnىLWd ~q3|oo.]… 0(FOun(..@RSӼ֟^lT* _t FZZaaF#GC||~;/Gfʔ|ƓRoמ/Oܭ⵬ӮzHOO9BMT1""ܳl6sH6Vqضm;$PX6'""T\.Gv5jM{u&o1nCn7RT``'N7yVb !%%K^ZZJiiAAA${˕2b2%י u3׵4K ď `1^am$N ‚_ĉLj-ZZt6O2_~]Uo j []&(ڇ;+:~&ϲәiWPvXdJO%̳Ø4;jqlz}h&ҍL>YI̎*q\f ^͌c)=bt>Joi9eMqgpX]|j >!8·s~Z{ea \.}oT?}X>۴+cH~g*qcO1ph]P1M1Td8U-y[;λa9*Ual2S+nL{umi7nߖ)Aď +*ٿ lx:>-Cɓ'q啗s ׳sn9r$ pͷy&&&ɮ]YlvvTT$oft\/>믿5\ͬYsg۶3f šC=d2n|q@lϏKx"V_@^qY`IGu !ijj>஻k@.rm/`93'Lϧ"<]ܬZ)V} @NabbbxE\8r>> 33+?$!!}dy^`̘1LF%;L0ٳSUUi?y$>t%!!>^}5RY+6n;+vÏ\y5xキ܂F$0RIԆLf[]hT a⅒h RTVU\eA&1[QW>Ώ*-his+3b*=B-D"4EH\D:,&DS0B)S K9aB 3egUXN*2<+ t-| / ^xӧOW_^'((nFVf͚Xz _0~ɓǺu/~q:.K.իPW^ᦛn3>>Lv K.x{{s^`>}2 w-b],XNk~λtf=8zLHH070~Çy`0ide~L8>\ s-#|@~wnh7>rfaz-Ε=bԜ֫{yM SB[CpG2D* Ti%COEPlC(X+Irp>W8ԏ,=5eFTDtN`?f'S_1W? kz|D`(όtIjGu/5;Fm 7/+w{ךk-xz#Ф2r;".1n`GzuP;w|է}/N{r={^;HO?BNN~~~<8|94888zorp::7͡[o{{2͘q; 6cExǙ1c:+VoZ{l߾LdbȐtSO=firNE\.gU,_ %&&)S&|  ޽(**bo5HKKsAOᆩfOKK/=OJJO̹;w:go>L>___^y%FŔ)SٿcC=޽(..h42Mdܺu;K.'88+2a=""~kİa#ؽ{Vwu֬YZfժ:y;[Idk\ؑ@ @dR*h\BZO-16{.4$@^x\&6NUM@|57Zby aAϱch4!|'jt:SpƸUWTUU9YYǑ߯M%&&aF"z~sO3sl,Y\.gŊUЛbxyy1{LV\=O]]O^Od}l6Z8 @ ?A/f hZ g=Lv;z}OŝwO?'**IxTc""K,㫯fݺNՎNש~׽2{LnzU8v,Rbb?_{RBjg3w,\ o^6Q/rʟh/֬w}q_ 8%\JXX{=c:]@ \ ڏSO>cm<3|{$$ēqI _G3xx<ĉdҥ%ݽi1!yl>CG2\.ne|bF͔)Ϋ=<|[o믟Zrz΍7g}cѻw*{g-Ķmꫯ//̜y{˩˯믿믿[o-3<رche{nٴi3EEE640@ h1 ;NG  ǎe"fӦ_vĻ+گXF``2իWJbEdgTJ*W_}l&66?^` )IJ3.&N@T,[QQAީ\~hjkkn}]8p\N^ɄQTTLFFFA_HNNgq? $&&p֮]GYY;$j5JT@@ ÇC.3̙sDӑ 78!h4w+Jɬ^͈fT+P*P=s^֭[$cu0a<IaOW_}?5#F g5h@V^Rפ>zRɾ}92!_Bp@DDJ[`ge&-_kqWWߘPBCSr9^:}||:4!C.vz}H8 t:~E^9V???t:FOoVGF}[R|.wwwƎ9-%#'''ܪx{{;vwwwzDFe #,,9 jh4L&kM7Bffޔ0v厐+Ӧh4^1$1>hIXKϢ4]Zw E2t `h|\ h_C ;3Bz)`1h4uƵN~>F˨Qׯ/~~ wX%m Ȑ1}|Qu!#·;#.ku\az3(ZRwZ)=I FRH*E`c_:+m7'BѯB拞dC2GϢt]HNlG/&hʨQ#5jdg!8 ^|Ⴁ9nJ#d2zjI;`AA)W1"a*#v(]($&u@ QT hƁ BAD@ 0@ h@]a @@ @@ 4+"*@  ԙ]"IK(VKrd;V,Juo(\ZW\?3U%ɺJwRB@9dA,.%6)ȱ\pCg->RuMyFpmxna毿,K69B.>F;xD RXVr9vq3v ,3W6KǷ9t]LZonܟ΢4]9mBڏm>___Zz YAL6ulv+Vv?Ž8{XlƖY}8[Xl6#Av8Aף] `JKm>+SSSgE#@ $,9p15OO/*mN+b8r}b$*rB8p0ΖI 8oX,Ww~|n5RSS)+ ##UTJee;ww>T-W@ Pxzz2bĥSXXLmmR 85J@{!4hEE=dfOTdP(e*dn *9mbQg@_m/@~o IDAT8ZYVhBhB;[ $V$ZFYc͎ Cqi\B#v8獎õd&(c6@c5۩*4PSj"Og`2߀h#,圞lmQ!-hC+HͰ"݉ҝ@ 3v*ieK?QCIӞWQt4?FaZ j f WO%*γVƦqTp[N9~KJ"zybgG3 WpPPSjd2|f$=F_A a @tR>]'w'q?z*)5}ia=ѮkY6vP孎3ZpPz,FN=`G)ΨszOˋ7|/Ɏ ASv\ϯoeQ]bb"{{᧢2@ZH) %tסvS0bJʲy)H&8VpB*N-J3>{z9j,dlȈwx%:- y. 0CעPL./a0XZāpU12jۿׄ2&qe 6sG&2S0kk- u:jJʲDf#WȰ[hq&Н(70*)lqd}yz&= \HyT)Qcب)3<ܼNGg{#fGplK}}uDmY)32/ͻA*SҌΊ M74ݚzt* Ȉބ$6%bB _'c0=5%F6Vp:+Y۪0Y w%~J*rjz*t#b_dr陷ۡ,KG1&l;>!.$l>,Gz2 ,CV ;ቋcܶU\yy&|~.e׊"vZXL6 CtZwV\"RPvgM@ TI_[Ae MOCܑ ~/~.-oC5S~B1:Y!%3"IoL@>ߝ:?sNJ(;w)cZ *YYXan 2-£:@hѠgmNeE}0azM{K&S MU3Y r`1رzb&< 6YgB,&SrJL7*sX]Տ%w9t[I&륯9 -Ss7jJO* ce6߯ASÈL<鵔ד #uT`(ܼD=k-e=$a19[rAׇbçb6NHB+1X0NV \FW,lV; kOrVGSJJR9]I/5{ \ƍOTo\tJi-1hίs79@^@UOl8h2O+G0Y[sb-^ 7̕LZ|B\5'9Q):_v_,[+XL6GKF.hbwY hoC{w@.A`Lz'5\O6WN/•kΑuԔPAToo\NCsǿ7pWSQThxOҠlM{ g&G7TcURd2H H ߻WSg`yluf_"N{>Wsr,&?z 7o%r pW!(7 ֒gp*ֹڏsW_J5&'v0r71xjۗcirzbݙjL:+uUf\=\_2 "eLuftUf\N!J ='}U1mz)4TI[#.1bW5Y;yeUMVR~5͖ uq*YOTo8{wm_+k:G=N]g"S%#; w?ylVc]&A߫ƒ\!e>۪v~e8 _ńYq.%:GnH㒪ŀɡUڮuxIvi{{ҥdm"wo [?دg -z~'"pWII_J^SRV SDdȳp.qA@ 8O9V+٤8CO.Lzpq,v5,&)T[l<7o%Rkb#6 Ryӛ'4+ ʍtb-6vS8vS51n 'i&@?Z*bmUSd47O=D3jst㟠vW0Dזs|. u# @ N"ZK &<yW4Z ~ܝsRva YUgR&J9>ssQuy.a ?B M\X@:6U^ .pd2Q @p." @ @@@@ \h8rL&6ނ\"RJSk'J$8 u Bu:a S7Tٵ{NHBA``III(v\&& '3{ۍ΢D]pVucG/gQKo%l߾8MA.sѝ-z=9~8lRbشivxڧrZjQZZ/ AhtcGUw.s)N+ĸ3Rʤ;W. tޮ\}5 V={ҿ~T1PC.#Wȕs yuu5~~EuctlFwTmлwszѡfrss>;9 ǎe2p<ges***:[ @ 8_%%DEGYYiSZZ֦? |DLLAAF/bkב?^2hgnlvv}^Geǎ!'TUUu~t?-((曧EϞILArr*_Ekf~_l{m $%/QQqٻ֫g=9rWt:*2LT+]9c TTTPUU'QQQknw.+vJ>@bb"c^~Z =ߛl_|)g„.煈\.jmfEHKVՅ>}zSUUEvvVt6w=N= ux)k<<<4h uuu8pY$%%C9s@ 8%_`o(,,"% ֬gnz{}{7|75\ 7Lm~-[ƍbk/`ƍhդK/PTTķ~ϡC(++ˋz"ߏ7]w-/EMM \s}zO>ȑСC={d2O b޼G ?Z-)|h4Rl.]+~<\|||شi3˖-''':wuM_~e]3mڭ r1;v';;:k6l(ee‹//SZZʃ%2!{5k6ma߾}DEE2z(l+ɡHNˆsL&>S6nLEEAAA{n䓏s1ܹ`nF.|}ͷ߿2ݙ3NÇq\v4oems9qqqرBzNup73cZ/eÆѣ?# |8aFK^y%c555|縻; ŋŭd Z/ͬY3X~_}5=e˖ dY[Flf̘+صk7>>>hZ-[̙71BCCb c1NAMM Ç"''WWW +V䣏0y$6mO<4ӰZ-|; 523(((O?'))Gy{1ώ;_K(//͍͛7l ,K/޽G}͛;DZqz[}^9l۶6o^Odd$UUUu]&Ga|wǛo ̙ٿdΝ{gdɤ;`ᆛصk7˗d޼3fL77駟e)m ~I^=qssouܽX/ѣGpڵ-84yǙ7n'+2y$<}槟~@xXnz23gРAMr6oW_}Rd7(+<?$̞=W^ye˖sxr#E\{D>F#ryu%a}2Jks;[ LN\@*o%/qՙ)vaAGfHXlf~< ע3՜E.JwzjҸ"i6n*SЈ.c :tZ;P*L>kױuVxyy1edErrrHOO?mNR]]V۶IZm5s Z}6m`ҤI;...86Jˋ˂R\\޽B"##[,j6;w7̝{} v"qqQ3edx0͔Fhh(=ocΜ;O Uߵ߽s., L2SǟtT*:4͑<ф6]vSRRd=(~1:CF!**T*ûbJWW#))akAcV>Ky)++'Ͷme,P(r* }] P.x06lرL&MЬn ԩ7!1cc#G0`T*tɒeq}0vS^îJj]mTʙ{ɂǬ6Y΢T]lVxx5$ IDATl Yϲd] I֜v ntx:R)1M+4 3FkHMH/..^ nʕ>Ŵi8 /Y~!ƊCk>l6;V"];AԴ0͸fj&MHyy/*F]dL)7t\v]cꟍsCnl'{,w\\Ԏ`@qdjRg KC? '\woUUUߌ1xS~W|.]ҥN׼Gz+(..!Hk&gѣ;7nbԛxwNw5Gqp(l]n(MŎ>BB9qL@̻v<ٳQJ FCZڐ&ލH҆~LL4DD3x y ʻ͓O>ͮ]턇K鍇RWcɒyͷ8Fީq,bCZ"##s2ш+6}+v+rsed>lٲ_;v ;;???bcc9RDVZMee}ᩧpT#-mQ'"-mqqRSddubbj Q 6'xqwB aӦhՓݻ9*-͛f^Ns$&&xױ !&F(#-m ^{?<<0\ux<<<4l\֬ld29II=0aiLL4 /ľ}$)QQ<~5k[nۛ+h0z|}}O|@kU< lܸbbX, p+/*Tǵߟluƴi0k֌S<)@ 8wdwy'2eikeeHMmO[ٵkx|><w9Qƴ0L̚u'C.hbefѢo;U637oc̘2׺u-WWS=P/?uqqkP)TZQS}z;laّ+e(խZL6* US\!X6f]lj<fcFZRNkaS-Nlre׫ $n[ 8ٳg:hG@p.a0lx$C 8UI ٻ?JEWefѣs_j.syۗp1 =;9[ej,`w>!.ï,[Ov-."lq }_ngڻ0@ 9{f_[n"sk 91T[Xh::/WP+0X0l4.x<5uQ[nbB.)m]+q|/`j˛m:.t4J3:W:wO)AfsWgr{ה61'2rS8=/ Da18CKT %$ $s54|[lKɰi-{>N?*rdFia>aIƸI?ipVBSǍ4fǤhq p"'0lP c:vkBجvlVyfS)k)5᫤eAaOgFR%R]b$<ًRzv*2߀L&} !$ñ` sI,F;WStJ`;ƇvRmxILk+k-h\HDp|=o:a @pr|J3jwD⹣f(') :+6W-͟ ;J1 ɢyRяC_-5:-W Spu\;zء(7UΆ/$B%'q/CnpİN6jktݓEܠXgԱY: 'OW/(;j''. YNi#I̒ 㠃)dћk[g7)SHIˆ)E9m6f`G/8@ :aKǔ{8@veXy`~grRtSo }m#bIRWީORn+v; alZX^{~@ RJ~i;Ẍ́'q=AY7S[.][]UD0!KP$7ِiԜ0;ąxwCl2h?'`ފ\)##J׌\Dn[<IR.ZV>:<[SB oE d2ir#ź3`r#W J Y|>_slK9z*r%EފX7a ֪46ļMm:uZQod#gcҵ)7{\&ݣ֕3#\7+dױ@}:_W4w{ҘfN~V8?&@ 6j;}I/,I1.9C[dOEt_) sKΧv0ZREuq"#[IG.)^c]՝KQGoBCnr/ʎٱw'"ٱayޯ_yt$3?=,*ŀAٙWbAAT]Qο#ǽa{.& 3NdM)}_6F$Gƴ+yuRcmG5srY&QŶwjȜ.ddNꕻ(C+_Y6ۆZ+K7ɚe4XߍTU9=-NjM9CN,>Dwbks|Hghv~\eK[w8:}j:Ö7F5p }"3LlG V7_RKJy- 0^~ eΌ @Wv?2AuGLe)4J?VV*7S.W9kt*f\%燷G$IN*3߯%&ͨa_ܴsə/Mh,d݋G8ܛRLvxY7H %G(->1fD&"AO9|d 3"Lb=@H Ĥ%!'(-ZTӯLd5r` Qsك5%q:sB>"4\8,&2QOhx(ԣ CI #,F.>TMRnIa5*@Rn1y AAA4:>1BLZ.+|i{c so͘m zqbA#q8:=I3hkpʵ^ܟMf 8:=C#5ft^ͻ8ë U%x+*;Ky3?vh *Bu~)8b\X 3TމZ"atcύVi*#KIB~e߄ibzHg2DhXpk ,^g17-oL~t1R2epM"iW$<AA&p zN$ӘsӁ.!wqAҔkUjIX&G0jrTh *&/^>sI”>]Ɛg x>ڛ^:h+EKGx$p+ z,ˋhvcvzu~)@n6*"NT}G8}_ž$|<.W޷c?]92 `p.7AaȜ}^#ѩZ[ިf;5ʶ^tD%Pi$N/q9D LIzŇfEWɵ>9ַjx"Y8uG}>)6G_m|GTQ  {&򸻯zRk]{Ç0vzYeJpzKA +h<?&HBJ|p4b4a};ֿR$I4WpX=j\x}v d2e$"AO[̔˜ь_};C4m9p2%y!U .akgDg'6 s{]?-?i,kZʒ35UjH(B_m=ԝKvM)Hjµf;[)ʨ),+o00~PMf<./Z8Q,i3J{SÐ rxuOAv 3c0X:vS[ׂ}6;*dqe%m NZl_UÞ9gmE@]nG[Csko'VEb+v𐿺41$.ٓ0 Z;?4vU81&+f%n'#z2vqoBtH2SSG齇Ff$n,'N^Ąd/:@y~->ɗMSMhW?/ݓ^ht*a IDATι%i'Ṕ5MTmcGu̾.VM]ŷ1D|DwfoidE}@(j$#S>oY(UL\Kv|݄F]þpXœ R'rFFj 5ϗs3ʮFK3Xm 3S :P.F&4uU#%Zz|l_UCPNJxLx^18iT:Rxrˈ@F.İ2wvz(`VmhVkqJ.ݟb2D\lB:-]q:\̵OSZdcI%)75I vF5S#5˄!B}P >, ǶI*\zhfa(-gd=Ffӣkfa磻" .3nO{;. 9}U@pƆb*1ɔgֵIkp$@:Inꣃ,Au zyݽO(4RUo>9T~9cQ԰ś-ؔA'BEm|LHQuΚ9=2yֽO'iD׫huѩpHiH>. Tvz$ЅqZ=ʍDƌsEDŽ2hpדeo-qO d>b;,\=2Ou,?/ Cr@|j ;7[پhTHD&Ϯm=T$eF'|A31Fhz_"i *4: #հRs;ly%%)޽{711&"##;~OAr3Ug [NEXe'=tޚuSr܁pX=8m|^yYy]r@N'/n۶e.<)ݸq#cdc0 7o]NZYrA[KJʉn` XHsMz3ѥ@A8um#AAAN?   h\.QMED  2A8i^jZ 0TW}02:AN'1*_>d4}Í>jUuZ'\}]'O0RSS)(8(AxM7;,FFNpSU6ᨄL#G~z7$$.b۴ W~qVucfzKtvrIeF#DA >S_߈eH447M=jSa)D'H&8j 4QXqwӛ.J&ᶹOn#vWPgD2hØInU]4!Ijc01霡. Ǐ'==:jjq:eA *(rs,Di m'Jo's4AJ RAAA!AAA"@AAA!AAA"@AAA1f;*9R(&9$I"9"nkw[9N47;}H'b ơ]֡.ʈdԆ1&nZ|h>@4좒Tda2cAa@|SQII iz]i7+uKu Ep— GuQmKNcs'>,-z>KZNs.s놺( #ʀu:^SUڍ4Yz]=g.Fm*HmcsG/n)3Aq 8ܝx}SU^X'\6יs;]mC]F_33sZAAAPAAAAAAPAAAARNG̩" ijߗt49J1{^ e;Z=؁Z+1$$mK5f]!B~ nId Kb[Xu*8Awf.\x=Q)zfctI̾._d$QC=7 ]aBXx="Hdb .!*p_Ӄ֨e݋G΢:1<6#6R+'ľ';9:<mFkCPF1nt49q=h jb$ %=/ZzhS3Me1FjzYB7`mv`l^BșV ; 2A AT;mB6 %A׍X()s'v T3"It{ EVRƇ7@j<./ CȘ鷍Ma=ֶkuM(jnDkPBfڛE+ΏkSC([*Zhn yBXm]XމÃJ-=@ƴH(/6Yy}no5@p;X-syoT\P_wo)D/e;q!&-$=ϋaԔ>c8)ʆ_wuA͕6*r{>DMhʽj )I4Wv8uR8q!~x=>vRQ• ^ebЈAa;mBxYM~LI7=Z|V͈M ;2I ~ wI

ZCVV`݋Glajr-3Fho kح?-w*7,v |&aG&&.cFꊬ+lĤ3hF{Չ|CtXR%7?7ae8ďe2Fq>ֽXɡM̘sI6%Thgr-tLW%jUvxiuDN3%cz$iy~5ns {qK3Ÿv7E)يZ+ʶqF|uL"Q58:GlKb !NSnTi$~ֵW.”,o#Y;du52kiow+ ][{c2!swlkЁozi ˁow mۺo[! UTHn;T n&=f< K €|g߲ rW&Q+X-.JJhs`oﻛA0;Zх"ڃ+ls2L^Ƕklq$\^r "i{o1/ygt:d?%u7a8:M6Vo_WKÍ+l]FRn m3?Sh-Vμ(Z2xE|<=[z!.ĤUj >hߏYAo{jorTN 'qL(){{I%Wuf 4)UcT7:Z\8;R_4wA4}ZM 7+Ciq0ZbKSwfz `kA/ؿO[< 9q|58pW}B'N{p{gFF]OXN $sfku|ƄqWsw̿wֵr /zEDD9匃*JtqF._ hk[7^G%u_{>C~lhXb/2zO#hZL&qFaվCW#!'TەT?07u9J:؉JVG3zD%}SC Gaë aCS좦n =bvHh8ZˌvsQ#H"9M֓46_erďңs$;D|`Uv6^Mʄ]6e;0ץ|w+0r>ÞϺӞ9Gna o;‡kOگܼ2?-[^g׮]TUU^/|"ŋSTTDhh_˅pv:Ʈݜ>d҅q[ Ғ5Kh$9;ӛv& qlw+l H}&4zuviRK,=0E̻i|/{QS"RC ߚvWKY*yRyhuMRjɗ9DFJ:Yy&T67L>7©5+oޒ3ɜy}O%O :ՈPHXb6[߬z=vE"UەlQW!6Rn%-??i]X3")ʸs_і тpmy_o+sy46v+ '$=]NkӟĚ5kp:̘1|d3>ihhpM7ĥ^p?R,X0?XMX:q^W^y<w6lO>5@/ ]_|Shm^]]O?4v(&Lٳ),,#_oߧ&"""DEEO+]{bj*>ӱ̞=|e~wy|RSS))) 2{n}]^z%~ӟ+,,.hH|j5 ^ss3#vuuuyljDY[o!Isϥ?k٦Mg466*O"..~+Wa~m EN[KQ)eAdž1DT=z]C^E22DLIxktݘ:C"&'.QMDT)/<%#!L>]Vk(-q!mK֮M'uR80^H Ϳ/Z<[ƕUF{S(Io\@5,+MNw'A1hR'SKN(-=&N~4{ERy4PH"zo3DhH %ql( ف!soL\iC䊑&B,s _Ǐ m’Xڛ"^xBL8 ~''66{(nF8G^z%yt:{/駟*n&O~eޓ+G][9^xo@]];v>`ܸq'<$''?%K0sL*+[2e /"~-<f^{% _-@O~\fHZcgd{n߃&/{Fה ~aj.I~G2I%=D1gwH-33CpzX,+t49ч 1iA4ziIa\q[8PI%Z.{ ƱgMtoƟKZ^DРңpM} )BʄD 1~fȚ]1sf$3[ ,]B?wOonEFyeΝ;ZZZ3g~~ԩ3}CQRR͘1csNZc2 MXRlm[y0rEFF2k,f͚?OMMMG9 eA}>LLN&^ѩBj-jH$) IDATAȥ|bGEUUDrץK]jjjzoh9w:8^F'zLnz=III̝;;y{ Gn3f DNN555~0AA@<ă &a Ytq}>83gwuߺ0Tu'$;]Mhz\'=zBJJJ ޳9it8N>sn&T*U@ #xA $Ф:}CФ^׿5q/_}~-< կy>.Vj L,p䦦&Mٳ1̝;w@JW_}v8rȀdQIj.oD1ssu}_c zw}Jvv6j];JW1*?2334iz A pҍ{)$s{I'? jZLyt:v~˗spwb67n2<(?0nNƎKVVW\qE1}}Μ9dee / u<&;1i1c(|A>SbbbXb|&L{ $#GpA%Kh]AL6w?܌|c? PE8sx')(((Ҹjzjq 2e j &ILLT2m޼b &MRf퍌T3 ʲ?ϰl9rXRSSNJRqG;0[{OEBbل39ea$Ղ`4Yz5=r^}Uo 22DoK/DEEaaavɤ%iر,\<SLX &}j߬YKpwtNqI!x֒v=ޚuLgK'9Wkۦ&3VɓOl`ݻwc"2=}vol*[ۢ, 7yKtv2&po{mvlمDC,Oǽn F7YTQ3NcF6fޯQ!܇M}K믿fL8?eR;wdO 5!,s _c?Ue:iTZɺ`9rfs/4Y.b9YWQԸƎ'!i1oۺo$C2)"$^jPuD1hCi˾h.pC41Iϴ%'>ĉxy"8P BN]{`'} >, >IA[DtH7a[ԷWpw20ь줒T$EdV HmA[?r\.\r%,_|& SSH3gCMB")ЄOBxPAф` EhѢ.pCAAAPAAAAAAPAAAAAAP 8QcG%*őSQHfv$GduQ3#_nHsz4aLJZQ"IEAy QjJNEYX )ߞnQ3C]P#SuQAPWAIC] snPQP/ε 02 (@=UT|;z 0R   (D   B 0sg.grq9x g6"GAFͺBJ T6cwPK*#eaaV. aC]XCgG v JTL!. A Wni}1wv(˴j5vV$g" 2Ǣz|ӉFrRw۬V;wwDt\ NyjyڎyE=EvF|oV%xF_`o)x=>K9;[M eeWD .jz|x=79{AgTaբ5ǡ:f~Ǝ_?` XRu#* 1Wy˰ۉN1N* h¨܉hZM4UVjn$,2Qq:loClPjEg02x<^֖oH\b쩧bd$IO> N'EEETVVx7nr(..7B~~>MMM3qDq:ٳ <iiiL>^]cc#֗c0ȕcǎݮt)))rM|||r`0a~ٽ{7)S᷾Bn=7n8m9ɡN666b6IOOh4z9tL& )**jɓ;jСCTUUh4i9)((T2dk~:jĦq9h_@Di'P% #6]|BV75P Kog_g_LmQGɵFҧFRF'?gҲ_Jpfd\iyςRF'*ĥď\a#~tv:ʃĢR}_4(AZ^p ś-[N _>5k8;=S.AَVZo0 ;ti1>˟̘=?AF>9W_y~MFV{|{l~W%-PmCx!IOAxt +8ڛG:[Oh*u1}~Ǎ'|Bhhu7n@]]f+V .+Vh)x甛ؗ_~;ol>3rssXt)|6?0K.UG?RoxYr%cК3f ^+W9m6]t֭[yf͛;NKK[n#,LXnR77P{UW)tMxGy饗Qo>^{[n}1uT6nH\\\@+5\#<£>l…[fnV>Sso2{lN! :h,,YE yTGCD A!sF*P YL gIy>nCuȻ4KY|6˜yu"1yK[TpJFrCkjU#O {)`;}y7 dLd=,5կ2 uR8!$vZ KBRIX-.̕AAص' #H˝Hޝz2&M!ux:,Jp3}jNVo ul.$6e:>z#eD'%=&O#>=U^1粫0TQ_!o?'5B A^{5*++yPT7Q`Zh4{x^fW;0vwv%8XlUVa4i#;;:֯_OII wMWT&MD}}=s;ޘ1c0JWJŊ+Xr%eee˷~ErW_Us=qƑVܹoKxx8߿_Py狋#99Fh/^zba r-lڴ (Ν#//qvYf o. ^OEEN0͛j~)&/;vp!VXAaa"s Y`V4j2O85ι%@A9~<(RRI$?*D :$MbpO$>h]݇#s]Z`kU|^-S.'uhIkђƅF<.m NLIM]cnA"*Հ.#E| TuͯdWbwɦa7tܹ ȝ@ysOKQ' 7z߲Qu#|Ŝˮ`¢əqkZOt4׋Kyza|IqFFF@db͚5v;gE$&L@AAA_zoSO='|0a ,`ʶ?8<}'?ᡇRo߾]USwyć~<իuQQƍrULL z@ס'xKJJ {%::ロg}^xGy/7SO=ڲ{n1gf3J @^^JKKYJNpyJjj*].(LCFۣNgkHosȞmBh(+)=%ԇoG#tFQkz<>\Aht򗧳5OKkPqգcumQZV߿[˞G+r5}0Uk/k] #H|XZz '7?~ o)rW0>t{ϟΙU 8{f^x)[7uZ[ߨzcH|yyy<#P^^̙3ٲ/AϳV[{\s ))))7fyPh裏||c]~jf3G+*OLL $)ztyCCCYl}kʟaƍJWv. K/Uxo߾}nXp٪[֩4dBLZwyw;!&}* ;M\fKyPgots4 r A,FjZ-2Nvk:&55Qʔk(x cXu7%vii1:ʬ DMlf\d- !AF CF:(/nj? WˏA( A0{e"NV%3=gIFX2jذamƦ OҗO09ѿPpj}D"-[ӧuZn&5h}rvСCeS}?^T*ŋ0+wFGG}V$~\cZaaۑtO8]y' 0w8jGdJ &Ԟ2\;͡BUBX. nUpJ7C x<fF/H͏@9~ [(<$BTHmGA"p8GlZ׭)#.hED<X .b3?ϻB aaίb]z;[ 8l9D?!WXp c -tդ @y v3P'C$Te?v6'/# !D>t:}dܹþП׋\~ڵklZÇX0_t)D"nܸz,X˗/͛{nuGz۷oGii), <8KKKiӦAA[4\lEm.]z;<z NȔB䯍 8@xc`𾔿mfByfv<^0s~Ŵҋ#m,.~ ÃVf?)^Rӟ݇PüUσ ! $wg J(O ߶!Ayзy4? PWzNqqyWsm38=辣MASamp%BeǃD`ڴihnnFEsحF.cƍ_ .Nϟ.Jyyy>غu+}R_Gdd$F#|M=zF~)[\ZZW^yf:&k.8q3ge'hڲe p]$''###Xf_+ӿVh҂͛7ԩSΝ;\%K v%%%j5k[j*zسgJP̛7o?hkkr1A B.^EC;_Xzt:ٳǯg@v܉N|cpU_:߿\.qqqxw⧟~keH$Ž;i&f۷Sۗl2$%%?LX,6dee>x\"aaCv|x~LM |yxƘh4K} _XMM Z-ΝЧ.]dBxx8T*U/j"!!PUUUBrr2 1555!++ b4qyy^lV G!j!˗Q[[ iz! &A!<d"1diu"Xp\PMtB"A"c.\z0544@" 111(c B! P*C*/bA? lX9@;Bfa&ڕB4!LFTh8 ') K | !MN!lX)Fpm8 ͦ[ ژ〃y |nr)4K[{8S1Ȏ]B!b5Q X6c,Qqx!q^HM!2uQO)B!BB!BB!BB!BB!BRRBƊ@0vi|>\.ט]gm&H!q^BFWW7 Ř]/22cv=Be</ aAȤx`2uiR׷ Zh= BYYY{f9c!dz~&23g P(DZtܤ>BF :]+3=B8^kXPYY ,J2Y(i2&St"))i\SWWj5P8vuLU^ftt`1oField Documentation\n': i += 1 inFieldDocumentation = True continue regMatch = re.match('

.*', doxHTMLFileLines[i]) if regMatch: i += 1 inFieldDocumentation = False continue if doxHTMLFileLines[i] == '
\n': fieldName = None i += 1 while i < len(doxHTMLFileLines): regMatch = re.match('\s+.*::([A-Za-z0-9_]+)', doxHTMLFileLines[i]) if regMatch: fieldName = regMatch.group(1) elif doxHTMLFileLines[i] == '
\n' and fieldName: doxHTMLFile.write('
Java interface deviation
The member is only accessible through the get' + fieldName + ' and set' + fieldName + ' methods in the Java interface
') doxHTMLFile.write(doxHTMLFileLines[i]) break doxHTMLFile.write(doxHTMLFileLines[i]) i += 1 continue i += 1 doxHTMLFile.close() nordugrid-arc-5.4.2/src/doxygen/PaxHeaders.7502/Doxyfile.SDK.layout.xml0000644000000000000000000000012412117640070023700 xustar000000000000000027 mtime=1363099704.123892 27 atime=1513200574.245698 30 ctime=1513200664.697804942 nordugrid-arc-5.4.2/src/doxygen/Doxyfile.SDK.layout.xml0000644000175000002070000001341412117640070023750 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/doxygen/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721021504 xustar000000000000000030 mtime=1513200593.563934947 30 atime=1513200653.094663031 30 ctime=1513200664.694804905 nordugrid-arc-5.4.2/src/doxygen/Makefile.in0000644000175000002070000004715713214315721021570 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/doxygen DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SWIG_DEPENDS = \ $(top_srcdir)/swig/common.i \ $(top_srcdir)/swig/credential.i \ $(top_srcdir)/swig/data.i \ $(top_srcdir)/swig/compute.i INPUT = $(top_srcdir)/src/hed/libs/common \ $(top_srcdir)/src/hed/libs/credential \ $(top_srcdir)/src/hed/libs/data \ $(top_srcdir)/src/libs/data-staging \ $(top_srcdir)/src/hed/libs/compute CPP_EXAMPLES = $(top_srcdir)/src/hed/libs/common/examples \ $(top_srcdir)/src/hed/libs/credential/examples \ $(top_srcdir)/src/hed/libs/data/examples \ $(top_srcdir)/src/libs/data-staging/examples \ $(top_srcdir)/src/hed/libs/compute/examples PYTHON_EXAMPLES = $(top_srcdir)/python/examples JAVA_EXAMPLES = $(top_srcdir)/java/examples SPECIALISATION_MAPPINGS = JobState JobDescription SPECIALISATION_MAPPINGS_JobState = \ $(top_srcdir)/src/hed/acc/ARC0/JobStateARC0.cpp \ $(top_srcdir)/src/hed/acc/EMIES/JobStateEMIES.cpp \ $(top_srcdir)/src/hed/acc/ARC1/JobStateBES.cpp \ $(top_srcdir)/src/hed/acc/ARC1/JobStateARC1.cpp \ $(top_srcdir)/src/hed/acc/CREAM/JobStateCREAM.cpp SPECIALISATION_MAPPINGS_JobDescription = \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/XRSLParser.cpp \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/ADLParser.cpp \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/JDLParser.cpp SDKDEPENDENCIES = Doxyfile.SDK.build Doxyfile.SDK.build.layout.xml \ $(srcdir)/add-bindings-deviations-to-dox.py \ $(srcdir)/add-java-getter-setter-method-notice.py \ $(srcdir)/images/arcsdk.png \ $(srcdir)/adapt-and-filter-mapping-attributes.sed \ $(SWIG_DEPENDS) \ $(wildcard $(addsuffix /*.h, $(INPUT))) \ $(wildcard $(addsuffix /*.cpp, $(CPP_EXAMPLES))) \ $(wildcard $(addsuffix /*.py, $(PYTHON_EXAMPLES))) \ $(wildcard $(addsuffix /*.java, $(JAVA_EXAMPLES))) \ $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)) EXTRA_DIST = Doxyfile.SDK Doxyfile.SDK.layout.xml images/arcsdk.png \ add-bindings-deviations-to-dox.py \ add-java-getter-setter-method-notice.py \ adapt-and-filter-mapping-attributes.sed \ create-mapping-documentation.py CLEANFILES = SDK all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/doxygen/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/doxygen/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am .SECONDEXPANSION: $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)): %_Mapping.dox: $(srcdir)/create-mapping-documentation.py $(top_srcdir)/src/hed/libs/compute/%.h $$(SPECIALISATION_MAPPINGS_%) $(PYTHON) $^ $*_Mapping.dox Doxyfile.SDK.build: $(top_srcdir)/src/doxygen/Doxyfile.SDK cp $(srcdir)/Doxyfile.SDK Doxyfile.SDK.build sed "s/@TOP_SRCDIR@/$(subst /,\/,$(top_srcdir))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/@INPUT@/$(subst /,\/,$(INPUT) $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/@EXAMPLES@/$(subst /,\/,$(CPP_EXAMPLES) $(PYTHON_EXAMPLES) $(JAVA_EXAMPLES))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/Doxyfile.SDK.layout.xml/Doxyfile.SDK.build.layout.xml/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build for mapping in $(SPECIALISATION_MAPPINGS); do \ sed "s/^FILTER_PATTERNS[[:space:]]*=/& *\/$${mapping}.h=$(subst /,\/,$(srcdir))\/adapt-and-filter-mapping-attributes.sed/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp;\ mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build;\ done Doxyfile.SDK.build.layout.xml: $(top_srcdir)/src/doxygen/Doxyfile.SDK.layout.xml cp $(srcdir)/Doxyfile.SDK.layout.xml Doxyfile.SDK.build.layout.xml SDK: $(SDKDEPENDENCIES) doxygen -v | awk -F . '{ exit !($$1 >= 2 || $$1 == 1 && $$2 >= 8) }' || (echo "doxygen version 1.8.0 or greater required (version $$(doxygen -v) found)" && exit 1) doxygen Doxyfile.SDK.build # Postprocessing: Add deviations from SDK API for language bindings (Python and Java). for file in $(SWIG_DEPENDS); do $(PYTHON) $(srcdir)/add-bindings-deviations-to-dox.py $${file} SDK/html; done $(PYTHON) $(srcdir)/add-java-getter-setter-method-notice.py SDK/html # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/doxygen/PaxHeaders.7502/adapt-and-filter-mapping-attributes.sed0000644000000000000000000000012412267225537027102 xustar000000000000000027 mtime=1390226271.218164 27 atime=1513200574.201698 30 ctime=1513200664.701804991 nordugrid-arc-5.4.2/src/doxygen/adapt-and-filter-mapping-attributes.sed0000755000175000002070000000225312267225537027154 0ustar00mockbuildmock00000000000000#!/bin/sed -f # Copy mapdef ID to buffer /\\mapdef / { # Copy current line to buffer h # Remove every thing but mapdef ID. s/.*\\mapdef \([^[:space:]]\+\)[[:space:]]\+.*/\1/ # Swap buffer with pattern space. x } # Remove \mapdef attribute plus associated description. End at first empty line, # line with asterisks (*) or line with asterisks followed by slash (/) modulo # spaces. /\\mapdef /,/^[[:space:]]*\**\/\?[[:space:]]*$/ { /^[[:space:]]*\**\/\?[[:space:]]*$/ ! d } # Replace mapdefattr command with link to attribute mapping. /\\mapdefattr/ { # Append buffer (prefixed with new line) to pattern space. This should be the # mapdef ID copied above. Thus the assumption is that the mapdef command must # come before the mapdefattr command. G # Replace \mapdefattr line with a link pointing to mapping of specific # attribute. # mapdefattr name mapdef ID s/\\mapdefattr[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+\n\(.*\)$/\2.html#attr_\1/ s/[^[:space:]]\+$/Attribute mapping specific to this field\/value.<\/a>/ # :: should be transformed to _ in URLs. s/::/_/g } nordugrid-arc-5.4.2/src/doxygen/PaxHeaders.7502/add-bindings-deviations-to-dox.py0000644000000000000000000000012412153361305025710 xustar000000000000000027 mtime=1370350277.274744 27 atime=1513200574.201698 30 ctime=1513200664.699804966 nordugrid-arc-5.4.2/src/doxygen/add-bindings-deviations-to-dox.py0000644000175000002070000001675712153361305025775 0ustar00mockbuildmock00000000000000#!/usr/bin/python # ''' Script for parsing Swig interface files (.i) and extracting renames (%rename) and ignores (%s), and adding that information to the doxygen generated HTML. Usage: add-bindings-deviations-to-dox.py E.g.: add-bindings-deviations-to-dox.py swig-interface.i dox/html Limitations: * Unable to handle #else or #elif statements. * Unable to handle templates. ''' import sys, re from os.path import isfile # Location of swig file filename = sys.argv[1] # Location of generated doxygen HTML documentation sdkDocumentationLocation = sys.argv[2] # Use list to deal with scoping of #if and #ifdef statements. inIfdef = [] # Use dictionary below to group %rename and %ignore statements per HTML file. expressionsFound = {} f = open(filename, "r") for line in f: line = line.strip() regMatch = re.match('\A#if(n?def)?\s+(\w+)', line) if regMatch: inIfdef.append(regMatch.group(2)) #print " #ifdef %s" % inIfdef continue regMatch = re.search('\A#endif', line) if regMatch: #print " #endif // %s" % inIfdef inIfdef.pop() continue regMatch = re.match('%ignore\s+([^;]+)', line) if regMatch: ignoredName = regMatch.group(1) #print "Expression ignored: %s" % ignoredName regMatch = re.match('\A(Arc|ArcCredential|AuthN|DataStaging)::([^:<]+)(<([^:>]+(::[^:>]+)+)>)?::(.*)', ignoredName) if regMatch: namespaceName, className, _, templateParameters, _, methodName = regMatch.groups() if templateParameters: #print "Found template: %s::%s<%s>::%s" % (namespaceName, className, templateParameters, methodName) print "Error: Unable to handle template signatures %s" % ignoredName continue #print " Ignoring method '%s' in class '%s' in Arc namespace." % (methodName, className) sdkFNOfIgnoredInstance = sdkDocumentationLocation + '/class' + namespaceName + '_1_1' + className + '.html' if not expressionsFound.has_key(sdkFNOfIgnoredInstance): expressionsFound[sdkFNOfIgnoredInstance] = [] ignoreScope = ["Python"] if "SWIGPYTHON" in inIfdef else ["Python", "Java"] ignoreScope = ["Java"] if "SWIGJAVA" in inIfdef else ignoreScope expressionsFound[sdkFNOfIgnoredInstance].append({"text" : "Method is unavailable", "scope" : ignoreScope, "name" : methodName}) continue print "Error: Couldn't parse ignore signature %s" % ignoredName continue regMatch = re.match('%rename\(([^)]+)\)\s+([^;]+)', line) if regMatch: #print "Expression '%s' renamed to '%s'" % (regMatch.group(2), regMatch.group(1)) toName, renameFullName = regMatch.groups() regMatch = re.match('\A(Arc|ArcCredential|AuthN|DataStaging)::([^:<]+)(<([^:>]+(::[^:>]+)+)>)?::(.*)', renameFullName) if regMatch: namespaceName, className, _, templateParameters, _, methodName = regMatch.groups() if templateParameters: #print "Found template: %s::%s<%s>::%s" % (namespaceName, className, templateParameters, methodName) print "Error: Unable to handle template signatures %s" % renameFullName continue #print " Ignoring method '%s' in class '%s' in Arc namespace." % (methodName, className) sdkFNOfRenamedInstance = sdkDocumentationLocation + '/class' + namespaceName + '_1_1' + className + '.html' if not expressionsFound.has_key(sdkFNOfRenamedInstance): expressionsFound[sdkFNOfRenamedInstance] = [] renameScope = ["Python"] if "SWIGPYTHON" in inIfdef else ["Python", "Java"] renameScope = ["Java"] if "SWIGJAVA" in inIfdef else renameScope expressionsFound[sdkFNOfRenamedInstance].append({"text" : "Renamed to " + toName + "", "scope" : renameScope, "name" : methodName}) continue print "Error: Couldn't parse rename signature %s" % renameFullName continue f.close() #print expressionsFound for filename, v in expressionsFound.iteritems(): if not isfile(filename): print "Error: No such file %s" % filename continue doxHTMLFile = open(filename, "r") doxHTMLFileLines = doxHTMLFile.readlines() doxHTMLFile.close() doxHTMLFile = open(filename, "w") i = 0 while i < len(doxHTMLFileLines): doxHTMLFile.write(doxHTMLFileLines[i]) regMatch = re.match('\s+(.+)', doxHTMLFileLines[i]) if not regMatch: i += 1 continue doxMethodName = regMatch.group(1).strip() #print doxMethodName for entry in v: regMatch = re.match("(operator\(\)|[^(]+)" "(\(([^(]*)\))?" "\s*(const)?", entry["name"]) if regMatch: methodName, _, methodParameters, isConst = regMatch.groups() #print "Method name: '%s'; Parameters: '%s'; isConst: %s" % (methodName, methodParameters, str(bool(isConst))) #print "'%s\Z', %s" % (methodName.strip(), doxMethodName) doxMethodName = doxMethodName.replace(">", ">") if doxMethodName.endswith(methodName.strip()): #print "Method '%s' found in file '%s' as '%s'" % (methodName, filename, doxMethodName) isInsideMemdocDiv = False methodParameters = methodParameters.split(",") if methodParameters else [] while True: i += 1 regMatch = re.match('\s+(.+)', doxHTMLFileLines[i]) if regMatch: doxParam = regMatch.group(1).replace(" ", "").replace(" &", "\s*&").strip() doxParam = re.sub(']*>', '', doxParam) # Remove anchor tags if len(methodParameters) == 0: if doxParam != "void": # Doesn't match that in HTML document doxHTMLFile.write(doxHTMLFileLines[i]) break elif re.match(doxParam, methodParameters[0]): methodParameters.pop(0) elif isInsideMemdocDiv and re.match('', doxHTMLFileLines[i]): if len(methodParameters) > 0: # Doesn't match that in HTML document doxHTMLFile.write(doxHTMLFileLines[i]) break for scope in entry["scope"]: doxHTMLFile.write('
' + scope + ' interface deviation
' + entry["text"] + ' in ' + scope + ' interface
') v.remove(entry) doxHTMLFile.write(doxHTMLFileLines[i]) break elif re.search('
', doxHTMLFileLines[i]): isInsideMemdocDiv = True doxHTMLFile.write(doxHTMLFileLines[i]) break else: print "Error: Unable to parse method signature %s" % entry["name"] i += 1 doxHTMLFile.close() if v: print "Error: The following methods was not found in the HTML file '%s':" % filename for entry in v: print " %s" % entry["name"] print "??? => Is there a API description in the corresponding header file for these?" nordugrid-arc-5.4.2/src/doxygen/PaxHeaders.7502/Doxyfile.SDK0000644000000000000000000000012412574532370021577 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200574.174697 30 ctime=1513200664.695804917 nordugrid-arc-5.4.2/src/doxygen/Doxyfile.SDK0000644000175000002070000023414012574532370021650 0ustar00mockbuildmock00000000000000# Doxyfile 1.8.3.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "ARC SDK" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = SDK # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Tradditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = YES # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. Note that you specify absolute paths here, but also # relative paths, which will be relative from the directory where doxygen is # started. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, # and language is one of the parsers supported by doxygen: IDL, Java, # Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, # C++. For instance to make doxygen treat .inc files as Fortran files (default # is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note # that for custom extensions you also need to set FILE_PATTERNS otherwise the # files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented classes, # or namespaces to their corresponding documentation. Such a link can be # prevented in individual cases by by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES (the # default) will make doxygen replace the get and set methods by a property in # the documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if section-label ... \endif # and \cond section-label ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = Doxyfile.SDK.layout.xml # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. Do not use # file names with spaces, bibtex cannot handle them. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = @INPUT@ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.h *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = YES # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */test* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = @EXAMPLES@ # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = @TOP_SRCDIR@/src/doxygen/images/ # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = # If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page (index.html). # This can be useful if you have a project on for instance GitHub and want reuse # the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If left blank doxygen will # generate a default style sheet. Note that it is recommended to use # HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this # tag will in the future become obsolete. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional # user-defined cascading style sheet that is included after the standard # style sheets created by doxygen. Using this option one can overrule # certain style aspects. This is preferred over using HTML_STYLESHEET # since it does not replace the standard style sheet and is therefor more # robust against future updates. Doxygen will copy the style sheet file to # the output directory. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely # identify the documentation publisher. This should be a reverse domain-name # style string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # thA MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and # SVG. The default value is HTML-CSS, which is slower, but has the best # compatibility. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://www.mathjax.org/mathjax # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. # There are two flavours of web server based search depending on the # EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for # searching and an index file used by the script. When EXTERNAL_SEARCH is # enabled the indexing and searching needs to be provided by external tools. # See the manual for details. SERVER_BASED_SEARCH = NO # When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP # script for searching. Instead the search results are written to an XML file # which needs to be processed by an external indexer. Doxygen will invoke an # external search engine pointed to by the SEARCHENGINE_URL option to obtain # the search results. Doxygen ships with an example indexer (doxyindexer) and # search engine (doxysearch.cgi) which are based on the open source search engine # library Xapian. See the manual for configuration details. EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will returned the search results when EXTERNAL_SEARCH is enabled. # Doxygen ships with an example search engine (doxysearch) which is based on # the open source search engine library Xapian. See the manual for configuration # details. SEARCHENGINE_URL = # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed # search data is written to a file for indexing by an external tool. With the # SEARCHDATA_FILE tag the name of this file can be specified. SEARCHDATA_FILE = searchdata.xml # When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple # projects and redirect the results back to the right project. EXTERNAL_SEARCH_ID = # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen # projects other than the one defined by this configuration file, but that are # all added to the same external search index. Each project needs to have a # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id # of to a relative location where the documentation can be found. # The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES nordugrid-arc-5.4.2/src/doxygen/PaxHeaders.7502/create-mapping-documentation.py0000644000000000000000000000012412267231263025562 xustar000000000000000027 mtime=1390228147.136231 27 atime=1513200574.231698 30 ctime=1513200664.702805003 nordugrid-arc-5.4.2/src/doxygen/create-mapping-documentation.py0000644000175000002070000002256212267231263025636 0ustar00mockbuildmock00000000000000#!/usr/bin/python # TODO: Document how to use. # TODO: Add list of the plugins which provides the mappings. # TODO: Deal with multiple values. # TODO: Deal with fixed values. # TODO: Deal with conditional values. # TODO: Deal with units # TODO: Deal with expressions # TODO: Deal with attributes in specialisation not mapped to library # TODO: Deal with attributes in library which is not mapped to specialisation # # # Usable commands and syntax: # Use in library files: # \mapdef \n # \mapdefattr # Use in specialisation files: # \mapname \n # \mapattr {->|<-} [""] # \mapnote import sys, re # File to write documentation to outfilename = sys.argv[-1] sourcefilename = sys.argv[1] # Find files which contains documentation on mappings, i.e. specifies the \mapfile attribute. mapfiles = sys.argv[2:-1] mapdef = {"id" : "", "name" : "", "description" : "", "attributes" : [], "attributeprefixes" : []} inMapdef = False justAfterMapdef = False # Go through library file sourcefile = open(sourcefilename, "r") i = 0 for line in sourcefile: i += 1 line = line.strip().lstrip("*").lstrip() if line[0:3] == "///": line = line.lstrip("/").lstrip() if justAfterMapdef: if line == "" or line == "/": justAfterMapdef = False continue mapdef["description"] += line + " " continue elif line[0:12] == "\mapdefattr ": regMatch = re.match("([^\s]+)\s+([^\s]+)", line[12:].lstrip()) if not regMatch: print "ERROR: Wrong format of the \mapdefattr attribute in '%s' file on line %d" % (sourcefilename, i) sys.exit(1) mapdef["attributes"].append(regMatch.group(1)) mapdef["attributeprefixes"].append(regMatch.group(2)) elif line[0:8] == "\mapdef ": regMatch = re.match("(\w+)\s+(.+)", line[8:].lstrip()) if not regMatch: print "ERROR: Wrong format of the \mapdef attribute in '%s' file on line %d" % (sourcefilename, i) sys.exit(1) mapdef["id"] = regMatch.group(1) mapdef["name"] = regMatch.group(2) inMapdef = True justAfterMapdef = True continue sourcefile.close() # Go through specialisation files mappings = [] for filename in mapfiles: m = {"id" : "", "name" : "", "description" : [], "notes" : [], "attributes" : {}} for attr in mapdef["attributes"]: m["attributes"][attr] = {} m["attributes"][attr]["in"] = [] m["attributes"][attr]["out"] = [] m["attributes"][attr]["in-note"] = [] m["attributes"][attr]["out-note"] = [] f = open(filename, "r") justAfterMapName = False i = 0 for line in f: i += 1 line = line.strip() if line[0:3] != "///": justAfterMapName = False continue line = line[3:].lstrip() if line[0:9] == "\mapname ": regMatch = re.match("(\w+)\s+(.+)", line[9:].lstrip()) if not regMatch: print "ERROR: Wrong format of the \mapname command in '%s' file on line %d" % (filename, i) sys.exit(1) m["id"] = regMatch.group(1) m["name"] = regMatch.group(2) justAfterMapName = True elif line[0:9] == "\mapnote ": justAfterMapdef = False m["notes"].append(line[9:].lstrip()) elif line[0:9] == "\mapattr ": justAfterMapdef = False # -> [""] regMatch = re.match("(.+)\s+->\s+([^\s]+)(?:\s+\"([^\"]+)\")?", line[9:]) if regMatch: if not m["attributes"].has_key(regMatch.group(2)): print "ERROR: The '%s' attribute present in file '%s' on line %d is not defined in file '%s'" % (regMatch.group(2), filename, i, sourcefilename) sys.exit(1) m["attributes"][regMatch.group(2)]["in"].append(regMatch.group(1)) if regMatch.group(3): m["attributes"][regMatch.group(2)]["in-note"].append(regMatch.group(3)) continue regMatch = re.match("(.+)\s+<-\s+([^\s]+)(?:\s+\"([^\"]+)\")?", line[9:]) if regMatch: if not m["attributes"].has_key(regMatch.group(2)): print "ERROR: The '%s' attribute present in file '%s' on line %d is not defined in file '%s'" % (regMatch.group(2), filename, i, sourcefilename) sys.exit(1) m["attributes"][regMatch.group(2)]["out"].append(regMatch.group(1)) if regMatch.group(3): m["attributes"][regMatch.group(2)]["out-note"].append(regMatch.group(3)) continue elif justAfterMapName: m["description"].append(line) mappings.append(m) f.close() # Write mapping to doxygen formatted file. outfile = open(outfilename, "w") outfile.write("/** \n") outfile.write("\\page {id} {name}\n{description}\n".format(**mapdef)) outfile.write("\\tableofcontents\n") # Create mapping per lib. attribute outfile.write("\\section attr Grouped by libarccompute attributes\n") for i in range(len(mapdef["attributes"])): outfile.write("\n\\subsection attr_{formatted_attr} {attr}\n".format(formatted_attr = re.sub('::', "_", mapdef["attributes"][i]), attr = mapdef["attributes"][i])) outfile.write("\\ref {prefix}::{attr} \"Attribute description\"\n\n".format(attr = mapdef["attributes"][i], prefix = mapdef["attributeprefixes"][i])) has_input = has_output = False attributes_to_write_to_table = "" for m in mappings: has_input = has_input or m["attributes"][mapdef["attributes"][i]]["in"] has_output = has_output or m["attributes"][mapdef["attributes"][i]]["out"] notes = [] for m in mappings: attr = m["attributes"][mapdef["attributes"][i]] if attr["in"] or attr["out"]: attributes_to_write_to_table += "| %s |" % (m["name"]) if has_input: attributes_to_write_to_table += " %s" % (",
".join(attr["in"])) if attr["in-note"]: attributes_to_write_to_table += "[%s]" % ("][".join(str(x) for x in range(len(notes)+1, len(notes)+1+len(attr["in-note"])))) notes += attr["in-note"] attributes_to_write_to_table += " |" if has_output else "" if has_output: attributes_to_write_to_table += " %s" % (",
".join(attr["out"])) if attr["out-note"]: attributes_to_write_to_table += "[%s]" % ("][".join(str(x) for x in range(len(notes)+1, len(notes)+1+len(attr["out-note"])))) notes += attr["out-note"] attributes_to_write_to_table += " |\n" if attributes_to_write_to_table and (has_input or has_output): table_header = "| Specialisation" table_header += " | Input" if has_input else "" table_header += " | Output" if has_output else "" outfile.write(table_header + " |\n") outfile.write(re.sub(r'[ \w]', '-', table_header) + " |\n") outfile.write(attributes_to_write_to_table) if notes: outfile.write("Notes:
  1. %s
" % ("
  • ".join(notes))) else: outfile.write("No specialisations maps attributes to this field/value.\n") # Create mapping per specialisation outfile.write("\\section specialisation Grouped by plugin\n") for m in mappings: outfile.write("\n\\subsection specialisation_{id} {name}\n".format(**m)) if m["description"]: outfile.write(" ".join(m["description"]) + "\n") if len(m["notes"]) > 0: outfile.write('
    \n
    Note
    \n') for note in m["notes"]: outfile.write('
    ' + note+ '
    \n') outfile.write('
    \n') has_input = has_output = False for attr, m_attrs in m["attributes"].iteritems(): has_input = has_input or bool(m_attrs["in"]) has_output = has_output or bool(m_attrs["out"]) table_header = "| Input " if has_input else "" table_header += "| Lib. attr. |" table_header += " Output |" if has_output else "" outfile.write(table_header + "\n") outfile.write(re.sub(r'[. \w]', '-', table_header) + "\n") notes = [] for attr, m_attrs in m["attributes"].iteritems(): if not m_attrs["in"] and not m_attrs["out"]: continue line = "" if has_input: line += "| %s" % (", ".join(m_attrs["in"])) if m_attrs["in-note"]: line += "[%s]" % ("][".join(str(x) for x in range(len(notes)+1, len(notes)+1+len(m_attrs["in-note"])))) notes += m_attrs["in-note"] line += " " line += "| \\ref Arc::" + attr + ' "' + attr + '" |' if has_output: line += " %s" % (", ".join(m_attrs["out"])) if m_attrs["out-note"]: line += "[%s]" % ("][".join(str(x) for x in range(len(notes)+1, len(notes)+1+len(m_attrs["out-note"])))) notes += m_attrs["out-note"] line += " |" outfile.write(line + '\n') if notes: outfile.write("Notes:
    1. %s
    " % ("
  • ".join(notes))) outfile.write("**/\n") nordugrid-arc-5.4.2/src/PaxHeaders.7502/libs0000644000000000000000000000012613214316026016640 xustar000000000000000028 mtime=1513200662.0697728 30 atime=1513200668.716854096 28 ctime=1513200662.0697728 nordugrid-arc-5.4.2/src/libs/0000755000175000002070000000000013214316026016760 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/libs/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711523013225020746 xustar000000000000000027 mtime=1296832149.591951 30 atime=1513200601.338030027 30 ctime=1513200662.066772763 nordugrid-arc-5.4.2/src/libs/Makefile.am0000644000175000002070000000006311523013225021007 0ustar00mockbuildmock00000000000000SUBDIRS = data-staging DIST_SUBDIRS = data-staging nordugrid-arc-5.4.2/src/libs/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731020761 xustar000000000000000030 mtime=1513200601.369030406 30 atime=1513200649.644620835 30 ctime=1513200662.067772775 nordugrid-arc-5.4.2/src/libs/Makefile.in0000644000175000002070000005545113214315731021041 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/libs DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = data-staging DIST_SUBDIRS = data-staging all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/libs/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/libs/PaxHeaders.7502/data-staging0000644000000000000000000000013213214316026021200 xustar000000000000000030 mtime=1513200662.115773362 30 atime=1513200668.716854096 30 ctime=1513200662.115773362 nordugrid-arc-5.4.2/src/libs/data-staging/0000755000175000002070000000000013214316026021323 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713153455410023320 xustar000000000000000027 mtime=1504598792.949892 30 atime=1513200601.384030589 30 ctime=1513200662.099773167 nordugrid-arc-5.4.2/src/libs/data-staging/Makefile.am0000644000175000002070000000271613153455410023370 0ustar00mockbuildmock00000000000000DIST_SUBDIRS = test examples SUBDIRS = . $(TEST_DIR) examples lib_LTLIBRARIES = libarcdatastaging.la libarcdatastaging_ladir = $(pkgincludedir)/data-staging libarcdatastaging_la_HEADERS = DataDelivery.h DataDeliveryComm.h \ DataDeliveryLocalComm.h DataDeliveryRemoteComm.h DTR.h DTRList.h \ DTRStatus.h Processor.h Scheduler.h TransferShares.h libarcdatastaging_la_SOURCES = DataDelivery.cpp DataDeliveryComm.cpp \ DataDeliveryLocalComm.cpp DataDeliveryRemoteComm.cpp DTR.cpp DTRList.cpp \ DTRStatus.cpp Processor.cpp Scheduler.cpp TransferShares.cpp libarcdatastaging_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcdatastaging_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(GLIBMM_LIBS) libarcdatastaging_la_LDFLAGS = -version-info 3:0:0 pgmpkglibdir = $(pkglibdir) pgmpkglib_PROGRAMS = DataStagingDelivery DataStagingDelivery_SOURCES = DataStagingDelivery.cpp DataStagingDelivery_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) DataStagingDelivery_LDADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DTRList.h0000644000000000000000000000012412675602216022725 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200576.794729 30 ctime=1513200662.094773106 nordugrid-arc-5.4.2/src/libs/data-staging/DTRList.h0000644000175000002070000001120112675602216022765 0ustar00mockbuildmock00000000000000#ifndef DTRLIST_H_ #define DTRLIST_H_ #include #include "DTR.h" namespace DataStaging { /// Global list of all active DTRs in the system. /** * This class contains several methods for filtering the list by owner, state * etc. * \ingroup datastaging * \headerfile DTRList.h arc/data-staging/DTRList.h */ class DTRList { private: /// Internal list of DTRs std::list DTRs; /// Lock to protect list during modification Arc::SimpleCondition Lock; /// Internal set of sources that are currently being cached std::set CachingSources; /// Lock to protect caching sources set during modification Arc::SimpleCondition CachingLock; public: /// Put a new DTR into the list. bool add_dtr(DTR_ptr DTRToAdd); /// Remove a DTR from the list. bool delete_dtr(DTR_ptr DTRToDelete); /// Filter the queue to select DTRs owned by a specified process. /** * @param OwnerToFilter The owner to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_owner(StagingProcesses OwnerToFilter, std::list& FilteredList); /// Returns the number of DTRs owned by a particular process int number_of_dtrs_by_owner(StagingProcesses OwnerToFilter); /// Filter the queue to select DTRs with particular status. /** * If we have only one common queue for all DTRs, this method is * necessary to make virtual queues for the DTRs about to go into the * pre-, post-processor or delivery stages. * @param StatusToFilter DTR status to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_status(DTRStatus::DTRStatusType StatusToFilter, std::list& FilteredList); /// Filter the queue to select DTRs with particular statuses. /** * @param StatusesToFilter Vector of DTR statuses to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_statuses(const std::vector& StatusesToFilter, std::list& FilteredList); /// Filter the queue to select DTRs with particular statuses. /** * @param StatusesToFilter Vector of DTR statuses to filter on * @param FilteredList This map is filled with filtered DTRs, * one list per state. */ bool filter_dtrs_by_statuses(const std::vector& StatusesToFilter, std::map >& FilteredList); /// Select DTRs that are about to go to the specified process. /** * This selection is actually a virtual queue for pre-, post-processor * and delivery. * @param NextReceiver The process to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_next_receiver(StagingProcesses NextReceiver, std::list& FilteredList); /// Select DTRs that have just arrived from pre-, post-processor, delivery or generator. /** * These DTRs need some reaction from the scheduler. This selection is * actually a virtual queue of DTRs that need to be processed. * @param FilteredList This list is filled with filtered DTRs */ bool filter_pending_dtrs(std::list& FilteredList); /// Get the list of DTRs corresponding to the given job ID. /** * @param jobid Job id to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_job(const std::string& jobid, std::list& FilteredList); /// Update the caching set, add a DTR (only if it is CACHEABLE). void caching_started(DTR_ptr request); /// Update the caching set, removing a DTR. void caching_finished(DTR_ptr request); /// Returns true if the DTR's source is currently in the caching set. bool is_being_cached(DTR_ptr DTRToCheck); /// Returns true if there are no DTRs in the list bool empty(); /// Get the list of all job IDs std::list all_jobs(); /// Return the size of the DTR list unsigned int size(); /// Dump state of all current DTRs to a destination, eg file, database, url... /** * Currently only file is supported. * @param path Path to the file in which to dump state. */ void dumpState(const std::string& path); }; } // namespace DataStaging #endif /*DTRLIST_H_*/ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/Processor.h0000644000000000000000000000012212462175330023411 xustar000000000000000026 mtime=1422457560.71215 27 atime=1513200576.788729 29 ctime=1513200662.09677313 nordugrid-arc-5.4.2/src/libs/data-staging/Processor.h0000644000175000002070000000744112462175330023466 0ustar00mockbuildmock00000000000000#ifndef PROCESSOR_H_ #define PROCESSOR_H_ #include #include "DTR.h" namespace DataStaging { /// The Processor performs pre- and post-transfer operations. /** * The Processor takes care of everything that should happen before * and after a transfer takes place. Calling receiveDTR() spawns a * thread to perform the required operation depending on the DTR state. * \ingroup datastaging * \headerfile Processor.h arc/data-staging/Processor.h */ class Processor: public DTRCallback { private: /// Private copy constructor because Processor should not be copied Processor(const Processor&); /// Private assignment operator because Processor should not be copied Processor& operator=(const Processor&); /// Class used to pass information to spawned thread class ThreadArgument { public: Processor* proc; DTR_ptr dtr; ThreadArgument(Processor* proc_, DTR_ptr dtr_):proc(proc_),dtr(dtr_) { }; }; /// Class used to pass information to spawned thread (for bulk operations) class BulkThreadArgument { public: Processor* proc; std::list dtrs; BulkThreadArgument(Processor* proc_, const std::list& dtrs_):proc(proc_),dtrs(dtrs_) { }; }; /// Counter of active threads Arc::SimpleCounter thread_count; /// List of DTRs to be processed in bulk. Filled between receiveDTR /// receiving a DTR with bulk_start on and receiving one with bulk_end on. /// It is up to the caller to make sure that all the requests are suitable /// for bulk handling. The list is cleared after the DTR with bulk_end set. std::list bulk_list; /// Our hostname static std::string hostname; /* Thread methods which deal with each state */ /// Check the cache to see if the file already exists static void DTRCheckCache(void* arg); /// Resolve replicas of source and destination static void DTRResolve(void* arg); /// Bulk resolve replicas of source and destination static void DTRBulkResolve(void* arg); /// Check if source exists static void DTRQueryReplica(void* arg); /// Bulk check if source exists static void DTRBulkQueryReplica(void* arg); /// Remove destination file before creating a new version static void DTRPreClean(void *arg); /// Call external services to prepare physical files for reading/writing static void DTRStagePrepare(void* arg); /// Release requests made during DTRStagePrepare static void DTRReleaseRequest(void* arg); /// Register destination file in catalog static void DTRRegisterReplica(void* arg); /// Link cached file to final destination static void DTRProcessCache(void* arg); public: /// Constructor Processor(); /// Destructor waits for all active threads to stop. ~Processor() { stop(); }; /// Start Processor. /** * This method actually does nothing. It is here only to make all classes * of data staging to look alike. But it is better to call it before * starting to use object because it may do something in the future. */ void start(void); /// Stop Processor. /** * This method sends waits for all started threads to end and exits. Since * threads are short-lived it is better to wait rather than interrupt them. */ void stop(void); /// Send a DTR to the Processor. /** * The DTR is sent to the Processor through this method when some * long-latency processing is to be performed, eg contacting a * remote service. The Processor spawns a thread to do the processing, * and then returns. The thread pushes the DTR back to the scheduler when * it is finished. */ virtual void receiveDTR(DTR_ptr dtr); }; } // namespace DataStaging #endif /* PROCESSOR_H_ */ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731023324 xustar000000000000000030 mtime=1513200601.458031495 30 atime=1513200649.661621043 30 ctime=1513200662.101773191 nordugrid-arc-5.4.2/src/libs/data-staging/Makefile.in0000644000175000002070000014173713214315731023407 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pgmpkglib_PROGRAMS = DataStagingDelivery$(EXEEXT) subdir = src/libs/data-staging DIST_COMMON = README $(libarcdatastaging_la_HEADERS) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pgmpkglibdir)" \ "$(DESTDIR)$(libarcdatastaging_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcdatastaging_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(am__DEPENDENCIES_1) am_libarcdatastaging_la_OBJECTS = \ libarcdatastaging_la-DataDelivery.lo \ libarcdatastaging_la-DataDeliveryComm.lo \ libarcdatastaging_la-DataDeliveryLocalComm.lo \ libarcdatastaging_la-DataDeliveryRemoteComm.lo \ libarcdatastaging_la-DTR.lo libarcdatastaging_la-DTRList.lo \ libarcdatastaging_la-DTRStatus.lo \ libarcdatastaging_la-Processor.lo \ libarcdatastaging_la-Scheduler.lo \ libarcdatastaging_la-TransferShares.lo libarcdatastaging_la_OBJECTS = $(am_libarcdatastaging_la_OBJECTS) libarcdatastaging_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcdatastaging_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(pgmpkglib_PROGRAMS) am_DataStagingDelivery_OBJECTS = \ DataStagingDelivery-DataStagingDelivery.$(OBJEXT) DataStagingDelivery_OBJECTS = $(am_DataStagingDelivery_OBJECTS) DataStagingDelivery_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) DataStagingDelivery_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcdatastaging_la_SOURCES) \ $(DataStagingDelivery_SOURCES) DIST_SOURCES = $(libarcdatastaging_la_SOURCES) \ $(DataStagingDelivery_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive HEADERS = $(libarcdatastaging_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ DIST_SUBDIRS = test examples SUBDIRS = . $(TEST_DIR) examples lib_LTLIBRARIES = libarcdatastaging.la libarcdatastaging_ladir = $(pkgincludedir)/data-staging libarcdatastaging_la_HEADERS = DataDelivery.h DataDeliveryComm.h \ DataDeliveryLocalComm.h DataDeliveryRemoteComm.h DTR.h DTRList.h \ DTRStatus.h Processor.h Scheduler.h TransferShares.h libarcdatastaging_la_SOURCES = DataDelivery.cpp DataDeliveryComm.cpp \ DataDeliveryLocalComm.cpp DataDeliveryRemoteComm.cpp DTR.cpp DTRList.cpp \ DTRStatus.cpp Processor.cpp Scheduler.cpp TransferShares.cpp libarcdatastaging_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcdatastaging_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(GLIBMM_LIBS) libarcdatastaging_la_LDFLAGS = -version-info 3:0:0 pgmpkglibdir = $(pkglibdir) DataStagingDelivery_SOURCES = DataStagingDelivery.cpp DataStagingDelivery_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) DataStagingDelivery_LDADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libs/data-staging/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/libs/data-staging/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcdatastaging.la: $(libarcdatastaging_la_OBJECTS) $(libarcdatastaging_la_DEPENDENCIES) $(libarcdatastaging_la_LINK) -rpath $(libdir) $(libarcdatastaging_la_OBJECTS) $(libarcdatastaging_la_LIBADD) $(LIBS) install-pgmpkglibPROGRAMS: $(pgmpkglib_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(pgmpkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pgmpkglibdir)" @list='$(pgmpkglib_PROGRAMS)'; test -n "$(pgmpkglibdir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pgmpkglibdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pgmpkglibdir)$$dir" || exit $$?; \ } \ ; done uninstall-pgmpkglibPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pgmpkglib_PROGRAMS)'; test -n "$(pgmpkglibdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pgmpkglibdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pgmpkglibdir)" && rm -f $$files clean-pgmpkglibPROGRAMS: @list='$(pgmpkglib_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list DataStagingDelivery$(EXEEXT): $(DataStagingDelivery_OBJECTS) $(DataStagingDelivery_DEPENDENCIES) @rm -f DataStagingDelivery$(EXEEXT) $(DataStagingDelivery_LINK) $(DataStagingDelivery_OBJECTS) $(DataStagingDelivery_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DTR.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DTRList.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DTRStatus.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DataDelivery.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-Processor.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-Scheduler.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-TransferShares.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcdatastaging_la-DataDelivery.lo: DataDelivery.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DataDelivery.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DataDelivery.Tpo -c -o libarcdatastaging_la-DataDelivery.lo `test -f 'DataDelivery.cpp' || echo '$(srcdir)/'`DataDelivery.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-DataDelivery.Tpo $(DEPDIR)/libarcdatastaging_la-DataDelivery.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataDelivery.cpp' object='libarcdatastaging_la-DataDelivery.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DataDelivery.lo `test -f 'DataDelivery.cpp' || echo '$(srcdir)/'`DataDelivery.cpp libarcdatastaging_la-DataDeliveryComm.lo: DataDeliveryComm.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DataDeliveryComm.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Tpo -c -o libarcdatastaging_la-DataDeliveryComm.lo `test -f 'DataDeliveryComm.cpp' || echo '$(srcdir)/'`DataDeliveryComm.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Tpo $(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataDeliveryComm.cpp' object='libarcdatastaging_la-DataDeliveryComm.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DataDeliveryComm.lo `test -f 'DataDeliveryComm.cpp' || echo '$(srcdir)/'`DataDeliveryComm.cpp libarcdatastaging_la-DataDeliveryLocalComm.lo: DataDeliveryLocalComm.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DataDeliveryLocalComm.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Tpo -c -o libarcdatastaging_la-DataDeliveryLocalComm.lo `test -f 'DataDeliveryLocalComm.cpp' || echo '$(srcdir)/'`DataDeliveryLocalComm.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Tpo $(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataDeliveryLocalComm.cpp' object='libarcdatastaging_la-DataDeliveryLocalComm.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DataDeliveryLocalComm.lo `test -f 'DataDeliveryLocalComm.cpp' || echo '$(srcdir)/'`DataDeliveryLocalComm.cpp libarcdatastaging_la-DataDeliveryRemoteComm.lo: DataDeliveryRemoteComm.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DataDeliveryRemoteComm.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Tpo -c -o libarcdatastaging_la-DataDeliveryRemoteComm.lo `test -f 'DataDeliveryRemoteComm.cpp' || echo '$(srcdir)/'`DataDeliveryRemoteComm.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Tpo $(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataDeliveryRemoteComm.cpp' object='libarcdatastaging_la-DataDeliveryRemoteComm.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DataDeliveryRemoteComm.lo `test -f 'DataDeliveryRemoteComm.cpp' || echo '$(srcdir)/'`DataDeliveryRemoteComm.cpp libarcdatastaging_la-DTR.lo: DTR.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DTR.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DTR.Tpo -c -o libarcdatastaging_la-DTR.lo `test -f 'DTR.cpp' || echo '$(srcdir)/'`DTR.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-DTR.Tpo $(DEPDIR)/libarcdatastaging_la-DTR.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DTR.cpp' object='libarcdatastaging_la-DTR.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DTR.lo `test -f 'DTR.cpp' || echo '$(srcdir)/'`DTR.cpp libarcdatastaging_la-DTRList.lo: DTRList.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DTRList.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DTRList.Tpo -c -o libarcdatastaging_la-DTRList.lo `test -f 'DTRList.cpp' || echo '$(srcdir)/'`DTRList.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-DTRList.Tpo $(DEPDIR)/libarcdatastaging_la-DTRList.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DTRList.cpp' object='libarcdatastaging_la-DTRList.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DTRList.lo `test -f 'DTRList.cpp' || echo '$(srcdir)/'`DTRList.cpp libarcdatastaging_la-DTRStatus.lo: DTRStatus.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DTRStatus.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DTRStatus.Tpo -c -o libarcdatastaging_la-DTRStatus.lo `test -f 'DTRStatus.cpp' || echo '$(srcdir)/'`DTRStatus.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-DTRStatus.Tpo $(DEPDIR)/libarcdatastaging_la-DTRStatus.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DTRStatus.cpp' object='libarcdatastaging_la-DTRStatus.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DTRStatus.lo `test -f 'DTRStatus.cpp' || echo '$(srcdir)/'`DTRStatus.cpp libarcdatastaging_la-Processor.lo: Processor.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-Processor.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-Processor.Tpo -c -o libarcdatastaging_la-Processor.lo `test -f 'Processor.cpp' || echo '$(srcdir)/'`Processor.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-Processor.Tpo $(DEPDIR)/libarcdatastaging_la-Processor.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Processor.cpp' object='libarcdatastaging_la-Processor.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-Processor.lo `test -f 'Processor.cpp' || echo '$(srcdir)/'`Processor.cpp libarcdatastaging_la-Scheduler.lo: Scheduler.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-Scheduler.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-Scheduler.Tpo -c -o libarcdatastaging_la-Scheduler.lo `test -f 'Scheduler.cpp' || echo '$(srcdir)/'`Scheduler.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-Scheduler.Tpo $(DEPDIR)/libarcdatastaging_la-Scheduler.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Scheduler.cpp' object='libarcdatastaging_la-Scheduler.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-Scheduler.lo `test -f 'Scheduler.cpp' || echo '$(srcdir)/'`Scheduler.cpp libarcdatastaging_la-TransferShares.lo: TransferShares.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-TransferShares.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-TransferShares.Tpo -c -o libarcdatastaging_la-TransferShares.lo `test -f 'TransferShares.cpp' || echo '$(srcdir)/'`TransferShares.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatastaging_la-TransferShares.Tpo $(DEPDIR)/libarcdatastaging_la-TransferShares.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TransferShares.cpp' object='libarcdatastaging_la-TransferShares.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-TransferShares.lo `test -f 'TransferShares.cpp' || echo '$(srcdir)/'`TransferShares.cpp DataStagingDelivery-DataStagingDelivery.o: DataStagingDelivery.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) -MT DataStagingDelivery-DataStagingDelivery.o -MD -MP -MF $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Tpo -c -o DataStagingDelivery-DataStagingDelivery.o `test -f 'DataStagingDelivery.cpp' || echo '$(srcdir)/'`DataStagingDelivery.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Tpo $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataStagingDelivery.cpp' object='DataStagingDelivery-DataStagingDelivery.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) -c -o DataStagingDelivery-DataStagingDelivery.o `test -f 'DataStagingDelivery.cpp' || echo '$(srcdir)/'`DataStagingDelivery.cpp DataStagingDelivery-DataStagingDelivery.obj: DataStagingDelivery.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) -MT DataStagingDelivery-DataStagingDelivery.obj -MD -MP -MF $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Tpo -c -o DataStagingDelivery-DataStagingDelivery.obj `if test -f 'DataStagingDelivery.cpp'; then $(CYGPATH_W) 'DataStagingDelivery.cpp'; else $(CYGPATH_W) '$(srcdir)/DataStagingDelivery.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Tpo $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataStagingDelivery.cpp' object='DataStagingDelivery-DataStagingDelivery.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) -c -o DataStagingDelivery-DataStagingDelivery.obj `if test -f 'DataStagingDelivery.cpp'; then $(CYGPATH_W) 'DataStagingDelivery.cpp'; else $(CYGPATH_W) '$(srcdir)/DataStagingDelivery.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcdatastaging_laHEADERS: $(libarcdatastaging_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcdatastaging_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcdatastaging_ladir)" @list='$(libarcdatastaging_la_HEADERS)'; test -n "$(libarcdatastaging_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcdatastaging_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcdatastaging_ladir)" || exit $$?; \ done uninstall-libarcdatastaging_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcdatastaging_la_HEADERS)'; test -n "$(libarcdatastaging_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcdatastaging_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcdatastaging_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pgmpkglibdir)" "$(DESTDIR)$(libarcdatastaging_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ clean-pgmpkglibPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcdatastaging_laHEADERS \ install-pgmpkglibPROGRAMS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarcdatastaging_laHEADERS \ uninstall-pgmpkglibPROGRAMS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool clean-pgmpkglibPROGRAMS \ ctags ctags-recursive distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-libLTLIBRARIES \ install-libarcdatastaging_laHEADERS install-man install-pdf \ install-pdf-am install-pgmpkglibPROGRAMS install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarcdatastaging_laHEADERS \ uninstall-pgmpkglibPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataDeliveryLocalComm.h0000644000000000000000000000012313214315176025577 xustar000000000000000027 mtime=1513200254.761812 26 atime=1513200576.80973 30 ctime=1513200662.090773057 nordugrid-arc-5.4.2/src/libs/data-staging/DataDeliveryLocalComm.h0000644000175000002070000000260113214315176025644 0ustar00mockbuildmock00000000000000#ifndef DATADELIVERYLOCALCOMM_H_ #define DATADELIVERYLOCALCOMM_H_ #include #include "DataDeliveryComm.h" namespace DataStaging { /// This class starts, monitors and controls a local Delivery process. /** * \ingroup datastaging * \headerfile DataDeliveryLocalComm.h arc/data-staging/DataDeliveryLocalComm.h */ class DataDeliveryLocalComm : public DataDeliveryComm { public: /// Starts child process DataDeliveryLocalComm(DTR_ptr dtr, const TransferParameters& params); /// This stops the child process virtual ~DataDeliveryLocalComm(); /// Read from stdout of child to get status virtual void PullStatus(); /// Returns "/" since local Delivery can access everywhere static bool CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg); /// Returns true if child process exists virtual operator bool() const { return (child_ != NULL); }; /// Returns true if child process does not exist virtual bool operator!() const { return (child_ == NULL); }; private: /// Child process Arc::Run* child_; /// Stdin of child, used to pass credentials std::string stdin_; /// Temporary credentails location std::string tmp_proxy_; /// Time last communication was received from child Arc::Time last_comm; }; } // namespace DataStaging #endif /* DATADELIVERYLOCALCOMM_H_ */ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DTRStatus.cpp0000644000000000000000000000012411751765733023640 xustar000000000000000027 mtime=1336404955.912247 27 atime=1513200576.796729 30 ctime=1513200662.109773289 nordugrid-arc-5.4.2/src/libs/data-staging/DTRStatus.cpp0000644000175000002070000000500411751765733023704 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "DTRStatus.h" namespace DataStaging { // to do states static const DTRStatus::DTRStatusType to_process_states[] = { DTRStatus::CHECK_CACHE, DTRStatus::RESOLVE, DTRStatus::QUERY_REPLICA, DTRStatus::PRE_CLEAN, DTRStatus::STAGE_PREPARE, DTRStatus::TRANSFER, DTRStatus::RELEASE_REQUEST, DTRStatus::REGISTER_REPLICA, DTRStatus::PROCESS_CACHE }; // doing states static const DTRStatus::DTRStatusType processing_states[] = { DTRStatus::CHECKING_CACHE, DTRStatus::RESOLVING, DTRStatus::QUERYING_REPLICA, DTRStatus::PRE_CLEANING, DTRStatus::STAGING_PREPARING, DTRStatus::TRANSFERRING, DTRStatus::RELEASING_REQUEST, DTRStatus::REGISTERING_REPLICA, DTRStatus::PROCESSING_CACHE }; static const DTRStatus::DTRStatusType staged_states[] = { DTRStatus::STAGING_PREPARING, DTRStatus::STAGING_PREPARING_WAIT, DTRStatus::STAGED_PREPARED, DTRStatus::TRANSFER, DTRStatus::TRANSFERRING, DTRStatus::TRANSFERRING_CANCEL, }; const std::vector DTRStatus::ToProcessStates(to_process_states, to_process_states + sizeof to_process_states / sizeof to_process_states[0]); const std::vector DTRStatus::ProcessingStates(processing_states, processing_states + sizeof processing_states / sizeof processing_states[0]); const std::vector DTRStatus::StagedStates(staged_states, staged_states + sizeof staged_states / sizeof staged_states[0]); static const std::string status_string[DTRStatus::NULL_STATE + 1] = { "NEW", "CHECK_CACHE", "CHECKING_CACHE", "CACHE_WAIT", "CACHE_CHECKED", "RESOLVE", "RESOLVING", "RESOLVED", "QUERY_REPLICA", "QUERYING_REPLICA", "REPLICA_QUERIED", "PRE_CLEAN", "PRE_CLEANING", "PRE_CLEANED", "STAGE_PREPARE", "STAGING_PREPARING", "STAGING_PREPARING_WAIT", "STAGED_PREPARED", "TRANSFER", "TRANSFERRING", "TRANSFERRING_CANCEL", "TRANSFERRED", "RELEASE_REQUEST", "RELEASING_REQUEST", "REQUEST_RELEASED", "REGISTER_REPLICA", "REGISTERING_REPLICA", "REPLICA_REGISTERED", "PROCESS_CACHE", "PROCESSING_CACHE", "CACHE_PROCESSED", "DONE", "CANCELLED", "CANCELLED_FINISHED", "ERROR", "NULL_STATE" }; std::string DTRStatus::str() const { return status_string[status]; } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataDeliveryComm.h0000644000000000000000000000012412675602216024631 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200576.798729 30 ctime=1513200662.089773044 nordugrid-arc-5.4.2/src/libs/data-staging/DataDeliveryComm.h0000644000175000002070000001431212675602216024677 0ustar00mockbuildmock00000000000000#ifndef DATA_DELIVERY_COMM_H_ #define DATA_DELIVERY_COMM_H_ #include "DTR.h" namespace DataStaging { class DataDeliveryCommHandler; /// This class provides an abstract interface for the Delivery layer. /** * Different implementations provide different ways of providing Delivery * functionality. DataDeliveryLocalComm launches a local process to perform * the transfer and DataDeliveryRemoteComm contacts a remote service which * performs the transfer. The implementation is chosen depending on what is * set in the DTR, which the Scheduler should set based on various factors. * * CreateInstance() should be used to get a pointer to the instantiated * object. This also starts the transfer. Deleting this object stops the * transfer and cleans up any used resources. A singleton instance of * DataDeliveryCommHandler regularly polls all active transfers using * PullStatus() and fills the Status object with current information, * which can be obtained through GetStatus(). * \ingroup datastaging * \headerfile DataDeliveryComm.h arc/data-staging/DataDeliveryComm.h */ class DataDeliveryComm { friend class DataDeliveryCommHandler; public: /// Communication status with transfer enum CommStatusType { CommInit, ///< Initializing/starting transfer, rest of information not valid CommNoError, ///< Communication going on smoothly CommTimeout, ///< Communication experienced timeout CommClosed, ///< Communication channel was closed CommExited, ///< Transfer exited. Mostly same as CommClosed but exit detected before pipe closed CommFailed ///< Transfer failed. If we have CommFailed and no error code ///< reported that normally means segfault or external kill. }; #pragma pack(4) /// Plain C struct to pass information from executing process back to main thread /** \ingroup datastaging */ struct Status { CommStatusType commstatus; ///< Communication state (filled by main thread) time_t timestamp; ///< Time when information was generated (filled externally) DTRStatus::DTRStatusType status; ///< Generic status DTRErrorStatus::DTRErrorStatusType error; ///< Error type DTRErrorStatus::DTRErrorLocation error_location; ///< Where error happened char error_desc[1024]; ///< Error description unsigned int streams; ///< Number of transfer streams active unsigned long long int transferred;///< Number of bytes transferred unsigned long long int offset; ///< Last position to which file has no missing pieces unsigned long long int size; ///< File size as obtained by protocol unsigned int speed; ///< Current transfer speed in bytes/sec during last ~minute char checksum[128]; ///< Calculated checksum unsigned long long int transfer_time; ///< Time in ns to complete transfer (0 if not completed) }; #pragma pack() protected: /// Current status of transfer Status status_; /// Latest status of transfer is read into this buffer Status status_buf_; /// Reading position of Status buffer unsigned int status_pos_; /// Lock to protect access to status Glib::Mutex lock_; /// Pointer to singleton handler of all DataDeliveryComm objects DataDeliveryCommHandler* handler_; /// Transfer limits TransferParameters transfer_params; /// Time transfer was started Arc::Time start_; /// Logger object. Pointer to DTR's Logger. DTRLogger logger_; /// Check for new state and fill state accordingly. /** * This method is periodically called by the comm handler to obtain status * info. It detects communication and delivery failures and delivery * termination. */ virtual void PullStatus() = 0; /// Start transfer with parameters taken from DTR and supplied transfer limits. /** * Constructor should not be used directly, CreateInstance() should be used * instead. */ DataDeliveryComm(DTR_ptr dtr, const TransferParameters& params); public: /// Factory method to get DataDeliveryComm instance. static DataDeliveryComm* CreateInstance(DTR_ptr dtr, const TransferParameters& params); /// Destroy object. This stops any ongoing transfer and cleans up resources. virtual ~DataDeliveryComm() {}; /// Obtain status of transfer Status GetStatus() const; /// Check the delivery method is available. Calls CheckComm of the appropriate subclass. /** * \param dtr DTR from which credentials are used * \param allowed_dirs filled with list of dirs that this comm is allowed * to read/write * \param load_avg filled with the load average reported by the service * \return true if selected delivery method is available */ static bool CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg); /// Get explanation of error std::string GetError() const { return status_.error_desc; }; /// Returns true if transfer is currently active virtual operator bool() const = 0; /// Returns true if transfer is currently not active virtual bool operator!() const = 0; }; /// Singleton class handling all active DataDeliveryComm objects /** * \ingroup datastaging * \headerfile DataDeliveryComm.h arc/data-staging/DataDeliveryComm.h */ class DataDeliveryCommHandler { private: Glib::Mutex lock_; static void func(void* arg); std::list items_; static DataDeliveryCommHandler* comm_handler; /// Constructor is private - getInstance() should be used instead DataDeliveryCommHandler(); DataDeliveryCommHandler(const DataDeliveryCommHandler&); DataDeliveryCommHandler& operator=(const DataDeliveryCommHandler&); public: ~DataDeliveryCommHandler() {}; /// Add a new DataDeliveryComm instance to the handler void Add(DataDeliveryComm* item); /// Remove a DataDeliveryComm instance from the handler void Remove(DataDeliveryComm* item); /// Get the singleton instance of the handler static DataDeliveryCommHandler* getInstance(); }; } // namespace DataStaging #endif // DATA_DELIVERY_COMM_H_ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataDelivery.h0000644000000000000000000000012312075743241024012 xustar000000000000000027 mtime=1358415521.722217 26 atime=1513200576.81873 30 ctime=1513200662.088773032 nordugrid-arc-5.4.2/src/libs/data-staging/DataDelivery.h0000644000175000002070000000636012075743241024065 0ustar00mockbuildmock00000000000000#ifndef DATA_DELIVERY_H_ #define DATA_DELIVERY_H_ #include #include #include #include "DTR.h" #include "DTRList.h" #include "DTRStatus.h" namespace DataStaging { /// DataDelivery transfers data between specified physical locations. /** * start() must be called to start the delivery thread for processing DTRs * and stop() should be called to stop it (this waits for all data transfers * to exit). stop() is also called in the destructor. * * All meta-operations for a DTR such as resolving replicas must be done * before sending to DataDelivery. Calling receiveDTR() starts a new process * which performs data transfer as specified in DTR. * \ingroup datastaging * \headerfile DataDelivery.h arc/data-staging/DataDelivery.h */ class DataDelivery: public DTRCallback { private: /// lock for DTRs list Arc::SimpleCondition dtr_list_lock; /// Wrapper class around delivery process handler class delivery_pair_t; /// DTRs which delivery process has in its queue std::list dtr_list; /// Transfer limits TransferParameters transfer_params; /// Logger object static Arc::Logger logger; /// Flag describing delivery state. Used to decide whether to keep running main loop ProcessState delivery_state; /// Condition to signal end of running Arc::SimpleCondition run_signal; /// Condition on which main thread waits, so it can wake up immediately /// when a new transfer arrives Arc::SimpleCondition cond; /// Thread to start new Delivery process static void start_delivery(void* arg); /// Thread to stop Delivery process static void stop_delivery(void* arg); /// Delete delivery_pair_t object. Starts a new thread which calls stop_delivery() bool delete_delivery_pair(delivery_pair_t* dp); /// Static version of main_thread, used when thread is created static void main_thread(void* arg); /// Main thread, which runs until stopped void main_thread(void); /// Copy constructor is private because DataDelivery should not be copied DataDelivery(const DataDelivery&); /// Assignment constructor is private because DataDelivery should not be copied DataDelivery& operator=(const DataDelivery&); public: /// Constructor. DataDelivery(); /// Destructor calls stop() and waits for cancelled processes to exit. ~DataDelivery() { stop(); }; /// Pass a DTR to Delivery. /** * This method is called by the scheduler to pass a DTR to the delivery. * The DataDelivery starts the data transfer either using a local process * or by sending a request to a remote delivery service, and then returns. * DataDelivery's own thread then monitors the transfer. */ virtual void receiveDTR(DTR_ptr request); /// Stop the transfer corresponding to the given DTR. bool cancelDTR(DTR_ptr request); /// Start the Delivery thread, which runs until stop() is called. bool start(); /// Tell the delivery to stop all transfers and threads and exit. bool stop(); /// Set transfer limits. void SetTransferParameters(const TransferParameters& params); }; } // namespace DataStaging #endif /*DATA_DELIVERY_H_*/ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/test0000644000000000000000000000013213214316026022157 xustar000000000000000030 mtime=1513200662.143773705 30 atime=1513200668.716854096 30 ctime=1513200662.143773705 nordugrid-arc-5.4.2/src/libs/data-staging/test/0000755000175000002070000000000013214316026022302 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/libs/data-staging/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712060141370024270 xustar000000000000000027 mtime=1354810104.605745 30 atime=1513200601.543032534 30 ctime=1513200662.138773644 nordugrid-arc-5.4.2/src/libs/data-staging/test/Makefile.am0000644000175000002070000000343412060141370024336 0ustar00mockbuildmock00000000000000# Tests require mock DMC which can be enabled via configure --enable-mock-dmc if MOCK_DMC_ENABLED TESTS = DTRTest ProcessorTest DeliveryTest else TESTS = endif check_PROGRAMS = $(TESTS) TESTS_ENVIRONMENT = env ARC_PLUGIN_PATH=$(top_builddir)/src/hed/dmc/mock/.libs:$(top_builddir)/src/hed/dmc/file/.libs DTRTest_SOURCES = $(top_srcdir)/src/Test.cpp DTRTest.cpp DTRTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DTRTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ProcessorTest_SOURCES = $(top_srcdir)/src/Test.cpp ProcessorTest.cpp ProcessorTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ProcessorTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) DeliveryTest_SOURCES = $(top_srcdir)/src/Test.cpp DeliveryTest.cpp DeliveryTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DeliveryTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) noinst_PROGRAMS = DTRMemTest DTRMemTest_SOURCES = DTRMemTest.cpp DTRMemTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DTRMemTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/libs/data-staging/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731024303 xustar000000000000000030 mtime=1513200601.607033317 30 atime=1513200649.676621227 30 ctime=1513200662.139773656 nordugrid-arc-5.4.2/src/libs/data-staging/test/Makefile.in0000644000175000002070000012302113214315731024350 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @MOCK_DMC_ENABLED_TRUE@TESTS = DTRTest$(EXEEXT) ProcessorTest$(EXEEXT) \ @MOCK_DMC_ENABLED_TRUE@ DeliveryTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) noinst_PROGRAMS = DTRMemTest$(EXEEXT) subdir = src/libs/data-staging/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = @MOCK_DMC_ENABLED_TRUE@am__EXEEXT_1 = DTRTest$(EXEEXT) \ @MOCK_DMC_ENABLED_TRUE@ ProcessorTest$(EXEEXT) \ @MOCK_DMC_ENABLED_TRUE@ DeliveryTest$(EXEEXT) PROGRAMS = $(noinst_PROGRAMS) am_DTRMemTest_OBJECTS = DTRMemTest-DTRMemTest.$(OBJEXT) DTRMemTest_OBJECTS = $(am_DTRMemTest_OBJECTS) am__DEPENDENCIES_1 = DTRMemTest_DEPENDENCIES = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) DTRMemTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(DTRMemTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_DTRTest_OBJECTS = DTRTest-Test.$(OBJEXT) DTRTest-DTRTest.$(OBJEXT) DTRTest_OBJECTS = $(am_DTRTest_OBJECTS) DTRTest_DEPENDENCIES = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) DTRTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(DTRTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_DeliveryTest_OBJECTS = DeliveryTest-Test.$(OBJEXT) \ DeliveryTest-DeliveryTest.$(OBJEXT) DeliveryTest_OBJECTS = $(am_DeliveryTest_OBJECTS) DeliveryTest_DEPENDENCIES = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) DeliveryTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(DeliveryTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_ProcessorTest_OBJECTS = ProcessorTest-Test.$(OBJEXT) \ ProcessorTest-ProcessorTest.$(OBJEXT) ProcessorTest_OBJECTS = $(am_ProcessorTest_OBJECTS) ProcessorTest_DEPENDENCIES = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) ProcessorTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(ProcessorTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(DTRMemTest_SOURCES) $(DTRTest_SOURCES) \ $(DeliveryTest_SOURCES) $(ProcessorTest_SOURCES) DIST_SOURCES = $(DTRMemTest_SOURCES) $(DTRTest_SOURCES) \ $(DeliveryTest_SOURCES) $(ProcessorTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ TESTS_ENVIRONMENT = env ARC_PLUGIN_PATH=$(top_builddir)/src/hed/dmc/mock/.libs:$(top_builddir)/src/hed/dmc/file/.libs DTRTest_SOURCES = $(top_srcdir)/src/Test.cpp DTRTest.cpp DTRTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DTRTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ProcessorTest_SOURCES = $(top_srcdir)/src/Test.cpp ProcessorTest.cpp ProcessorTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ProcessorTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) DeliveryTest_SOURCES = $(top_srcdir)/src/Test.cpp DeliveryTest.cpp DeliveryTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DeliveryTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) DTRMemTest_SOURCES = DTRMemTest.cpp DTRMemTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DTRMemTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libs/data-staging/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/libs/data-staging/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list DTRMemTest$(EXEEXT): $(DTRMemTest_OBJECTS) $(DTRMemTest_DEPENDENCIES) @rm -f DTRMemTest$(EXEEXT) $(DTRMemTest_LINK) $(DTRMemTest_OBJECTS) $(DTRMemTest_LDADD) $(LIBS) DTRTest$(EXEEXT): $(DTRTest_OBJECTS) $(DTRTest_DEPENDENCIES) @rm -f DTRTest$(EXEEXT) $(DTRTest_LINK) $(DTRTest_OBJECTS) $(DTRTest_LDADD) $(LIBS) DeliveryTest$(EXEEXT): $(DeliveryTest_OBJECTS) $(DeliveryTest_DEPENDENCIES) @rm -f DeliveryTest$(EXEEXT) $(DeliveryTest_LINK) $(DeliveryTest_OBJECTS) $(DeliveryTest_LDADD) $(LIBS) ProcessorTest$(EXEEXT): $(ProcessorTest_OBJECTS) $(ProcessorTest_DEPENDENCIES) @rm -f ProcessorTest$(EXEEXT) $(ProcessorTest_LINK) $(ProcessorTest_OBJECTS) $(ProcessorTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DTRMemTest-DTRMemTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DTRTest-DTRTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DTRTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DeliveryTest-DeliveryTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DeliveryTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ProcessorTest-ProcessorTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ProcessorTest-Test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< DTRMemTest-DTRMemTest.o: DTRMemTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRMemTest_CXXFLAGS) $(CXXFLAGS) -MT DTRMemTest-DTRMemTest.o -MD -MP -MF $(DEPDIR)/DTRMemTest-DTRMemTest.Tpo -c -o DTRMemTest-DTRMemTest.o `test -f 'DTRMemTest.cpp' || echo '$(srcdir)/'`DTRMemTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DTRMemTest-DTRMemTest.Tpo $(DEPDIR)/DTRMemTest-DTRMemTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DTRMemTest.cpp' object='DTRMemTest-DTRMemTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRMemTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRMemTest-DTRMemTest.o `test -f 'DTRMemTest.cpp' || echo '$(srcdir)/'`DTRMemTest.cpp DTRMemTest-DTRMemTest.obj: DTRMemTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRMemTest_CXXFLAGS) $(CXXFLAGS) -MT DTRMemTest-DTRMemTest.obj -MD -MP -MF $(DEPDIR)/DTRMemTest-DTRMemTest.Tpo -c -o DTRMemTest-DTRMemTest.obj `if test -f 'DTRMemTest.cpp'; then $(CYGPATH_W) 'DTRMemTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DTRMemTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DTRMemTest-DTRMemTest.Tpo $(DEPDIR)/DTRMemTest-DTRMemTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DTRMemTest.cpp' object='DTRMemTest-DTRMemTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRMemTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRMemTest-DTRMemTest.obj `if test -f 'DTRMemTest.cpp'; then $(CYGPATH_W) 'DTRMemTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DTRMemTest.cpp'; fi` DTRTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -MT DTRTest-Test.o -MD -MP -MF $(DEPDIR)/DTRTest-Test.Tpo -c -o DTRTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DTRTest-Test.Tpo $(DEPDIR)/DTRTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='DTRTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp DTRTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -MT DTRTest-Test.obj -MD -MP -MF $(DEPDIR)/DTRTest-Test.Tpo -c -o DTRTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DTRTest-Test.Tpo $(DEPDIR)/DTRTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='DTRTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` DTRTest-DTRTest.o: DTRTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -MT DTRTest-DTRTest.o -MD -MP -MF $(DEPDIR)/DTRTest-DTRTest.Tpo -c -o DTRTest-DTRTest.o `test -f 'DTRTest.cpp' || echo '$(srcdir)/'`DTRTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DTRTest-DTRTest.Tpo $(DEPDIR)/DTRTest-DTRTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DTRTest.cpp' object='DTRTest-DTRTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRTest-DTRTest.o `test -f 'DTRTest.cpp' || echo '$(srcdir)/'`DTRTest.cpp DTRTest-DTRTest.obj: DTRTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -MT DTRTest-DTRTest.obj -MD -MP -MF $(DEPDIR)/DTRTest-DTRTest.Tpo -c -o DTRTest-DTRTest.obj `if test -f 'DTRTest.cpp'; then $(CYGPATH_W) 'DTRTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DTRTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DTRTest-DTRTest.Tpo $(DEPDIR)/DTRTest-DTRTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DTRTest.cpp' object='DTRTest-DTRTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRTest-DTRTest.obj `if test -f 'DTRTest.cpp'; then $(CYGPATH_W) 'DTRTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DTRTest.cpp'; fi` DeliveryTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -MT DeliveryTest-Test.o -MD -MP -MF $(DEPDIR)/DeliveryTest-Test.Tpo -c -o DeliveryTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DeliveryTest-Test.Tpo $(DEPDIR)/DeliveryTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='DeliveryTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -c -o DeliveryTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp DeliveryTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -MT DeliveryTest-Test.obj -MD -MP -MF $(DEPDIR)/DeliveryTest-Test.Tpo -c -o DeliveryTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DeliveryTest-Test.Tpo $(DEPDIR)/DeliveryTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='DeliveryTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -c -o DeliveryTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` DeliveryTest-DeliveryTest.o: DeliveryTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -MT DeliveryTest-DeliveryTest.o -MD -MP -MF $(DEPDIR)/DeliveryTest-DeliveryTest.Tpo -c -o DeliveryTest-DeliveryTest.o `test -f 'DeliveryTest.cpp' || echo '$(srcdir)/'`DeliveryTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DeliveryTest-DeliveryTest.Tpo $(DEPDIR)/DeliveryTest-DeliveryTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DeliveryTest.cpp' object='DeliveryTest-DeliveryTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -c -o DeliveryTest-DeliveryTest.o `test -f 'DeliveryTest.cpp' || echo '$(srcdir)/'`DeliveryTest.cpp DeliveryTest-DeliveryTest.obj: DeliveryTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -MT DeliveryTest-DeliveryTest.obj -MD -MP -MF $(DEPDIR)/DeliveryTest-DeliveryTest.Tpo -c -o DeliveryTest-DeliveryTest.obj `if test -f 'DeliveryTest.cpp'; then $(CYGPATH_W) 'DeliveryTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DeliveryTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DeliveryTest-DeliveryTest.Tpo $(DEPDIR)/DeliveryTest-DeliveryTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DeliveryTest.cpp' object='DeliveryTest-DeliveryTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -c -o DeliveryTest-DeliveryTest.obj `if test -f 'DeliveryTest.cpp'; then $(CYGPATH_W) 'DeliveryTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DeliveryTest.cpp'; fi` ProcessorTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -MT ProcessorTest-Test.o -MD -MP -MF $(DEPDIR)/ProcessorTest-Test.Tpo -c -o ProcessorTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ProcessorTest-Test.Tpo $(DEPDIR)/ProcessorTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ProcessorTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -c -o ProcessorTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ProcessorTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -MT ProcessorTest-Test.obj -MD -MP -MF $(DEPDIR)/ProcessorTest-Test.Tpo -c -o ProcessorTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ProcessorTest-Test.Tpo $(DEPDIR)/ProcessorTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ProcessorTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -c -o ProcessorTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ProcessorTest-ProcessorTest.o: ProcessorTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -MT ProcessorTest-ProcessorTest.o -MD -MP -MF $(DEPDIR)/ProcessorTest-ProcessorTest.Tpo -c -o ProcessorTest-ProcessorTest.o `test -f 'ProcessorTest.cpp' || echo '$(srcdir)/'`ProcessorTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ProcessorTest-ProcessorTest.Tpo $(DEPDIR)/ProcessorTest-ProcessorTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ProcessorTest.cpp' object='ProcessorTest-ProcessorTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -c -o ProcessorTest-ProcessorTest.o `test -f 'ProcessorTest.cpp' || echo '$(srcdir)/'`ProcessorTest.cpp ProcessorTest-ProcessorTest.obj: ProcessorTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -MT ProcessorTest-ProcessorTest.obj -MD -MP -MF $(DEPDIR)/ProcessorTest-ProcessorTest.Tpo -c -o ProcessorTest-ProcessorTest.obj `if test -f 'ProcessorTest.cpp'; then $(CYGPATH_W) 'ProcessorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ProcessorTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ProcessorTest-ProcessorTest.Tpo $(DEPDIR)/ProcessorTest-ProcessorTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ProcessorTest.cpp' object='ProcessorTest-ProcessorTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -c -o ProcessorTest-ProcessorTest.obj `if test -f 'ProcessorTest.cpp'; then $(CYGPATH_W) 'ProcessorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ProcessorTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool \ clean-noinstPROGRAMS ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/libs/data-staging/test/PaxHeaders.7502/DeliveryTest.cpp0000644000000000000000000000012412365443155025376 xustar000000000000000027 mtime=1406551661.397686 27 atime=1513200576.803729 30 ctime=1513200662.142773693 nordugrid-arc-5.4.2/src/libs/data-staging/test/DeliveryTest.cpp0000644000175000002070000001124412365443155025445 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "../DTRStatus.h" #include "../DTR.h" #include "../DataDelivery.h" class DeliveryTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(DeliveryTest); CPPUNIT_TEST(TestDeliverySimple); CPPUNIT_TEST(TestDeliveryFailure); CPPUNIT_TEST(TestDeliveryUnsupported); CPPUNIT_TEST_SUITE_END(); public: void TestDeliverySimple(); void TestDeliveryFailure(); void TestDeliveryUnsupported(); void setUp(); void tearDown(); private: DataStaging::DTRLogger logger; Arc::UserConfig cfg; }; void DeliveryTest::setUp() { // Hack to make sure DataStagingDelivery executable in the parent dir is used // A fake ARC location is used and a symlink is created in the libexec subdir // to the DataStagingDelivery in the parent dir. TODO: maybe put a test flag // in DTR code which tells it to use this local executable. Arc::DirCreate(std::string("../tmp/")+std::string(PKGLIBEXECSUBDIR), S_IRWXU, true); Arc::ArcLocation::Init("../tmp/x/x"); Arc::FileLink("../../../DataStagingDelivery", std::string("../tmp/")+std::string(PKGLIBSUBDIR)+std::string("/DataStagingDelivery"), true); logger = new Arc::Logger(Arc::Logger::getRootLogger(), "DataStagingTest"); } void DeliveryTest::tearDown() { Arc::DirDelete("../tmp"); } void DeliveryTest::TestDeliverySimple() { std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); std::string jobid("1234"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source,destination,cfg,jobid,Arc::User().get_uid(),logger)); CPPUNIT_ASSERT(*dtr); // Pass DTR to Delivery DataStaging::DataDelivery delivery; delivery.start(); delivery.receiveDTR(dtr); DataStaging::DTRStatus status = dtr->get_status(); // Wait for result. It must be either ERROR or TRANSFERRED at end. // During transfer state may be NULL or TRANSFERRING for(int cnt=0;;++cnt) { status = dtr->get_status(); if(status == DataStaging::DTRStatus::ERROR) { break; } else if(status == DataStaging::DTRStatus::TRANSFERRED) { break; } else if(status == DataStaging::DTRStatus::TRANSFERRING) { } else if(status == DataStaging::DTRStatus::NULL_STATE) { } else { break; } CPPUNIT_ASSERT(cnt < 300); // 30s limit on transfer time Glib::usleep(100000); } CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::TRANSFERRED, status.GetStatus()); CPPUNIT_ASSERT_EQUAL_MESSAGE(dtr->get_error_status().GetDesc(), DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); } void DeliveryTest::TestDeliveryFailure() { std::string source("fail://mocksrc/1"); std::string destination("fail://mockdest/1"); std::string jobid("1234"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source,destination,cfg,jobid,Arc::User().get_uid(),logger)); CPPUNIT_ASSERT(*dtr); // Pass DTR to Delivery DataStaging::DataDelivery delivery; delivery.start(); delivery.receiveDTR(dtr); DataStaging::DTRStatus status = dtr->get_status(); // Wait for result. It must be either ERROR or TRANSFERRED at end. // During transfer state may be NULL or TRANSFERRING for(int cnt=0;;++cnt) { status = dtr->get_status(); if(status == DataStaging::DTRStatus::ERROR) { break; } else if(status == DataStaging::DTRStatus::TRANSFERRED) { break; } else if(status == DataStaging::DTRStatus::TRANSFERRING) { } else if(status == DataStaging::DTRStatus::NULL_STATE) { } else { break; } CPPUNIT_ASSERT(cnt < 200); // 20s limit on transfer time Glib::usleep(100000); } CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::TRANSFERRED, status.GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR, dtr->get_error_status().GetErrorStatus()); } void DeliveryTest::TestDeliveryUnsupported() { std::string source("proto://host/file"); std::string destination("mock://mockdest/1"); std::string jobid("1234"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source,destination,cfg,jobid,Arc::User().get_uid(),logger)); CPPUNIT_ASSERT(!(*dtr)); // Pass DTR to Delivery DataStaging::DataDelivery delivery; delivery.start(); delivery.receiveDTR(dtr); // DTR should be checked by delivery and immediately set to TRANSFERRED // with error status set to LOGIC error CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::TRANSFERRED, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::INTERNAL_LOGIC_ERROR, dtr->get_error_status().GetErrorStatus()); } CPPUNIT_TEST_SUITE_REGISTRATION(DeliveryTest); nordugrid-arc-5.4.2/src/libs/data-staging/test/PaxHeaders.7502/DTRMemTest.cpp0000644000000000000000000000012412060141370024665 xustar000000000000000027 mtime=1354810104.605745 27 atime=1513200576.803729 30 ctime=1513200662.140773668 nordugrid-arc-5.4.2/src/libs/data-staging/test/DTRMemTest.cpp0000644000175000002070000000155612060141370024741 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "../DTR.h" /** * A simple program to create a number of DTR objects. The memory * consumption can be checked running a program such as top. */ int main() { std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); Arc::UserConfig cfg; const int no_dtrs = 10000; std::cout<<"Creating "< #endif #include #include #include #include #include #include "../DTRStatus.h" #include "../Processor.h" class ProcessorTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ProcessorTest); CPPUNIT_TEST(TestPreClean); CPPUNIT_TEST(TestCacheCheck); CPPUNIT_TEST(TestResolve); CPPUNIT_TEST(TestQueryReplica); CPPUNIT_TEST(TestReplicaRegister); CPPUNIT_TEST(TestCacheProcess); CPPUNIT_TEST_SUITE_END(); public: void TestPreClean(); void TestCacheCheck(); void TestResolve(); void TestQueryReplica(); void TestReplicaRegister(); void TestCacheProcess(); void setUp(); void tearDown(); private: DataStaging::DTRLogger logger; Arc::UserConfig cfg; std::string tmpdir; }; void ProcessorTest::setUp() { logger = new Arc::Logger(Arc::Logger::getRootLogger(), "DataStagingTest"); } void ProcessorTest::tearDown() { if (!tmpdir.empty()) Arc::DirDelete(tmpdir); } void ProcessorTest::TestPreClean() { // Note: mock doesn't really delete, but reports success std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::PRE_CLEAN); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); DataStaging::Processor processor; processor.start(); processor.receiveDTR(dtr); // sleep while thread deletes while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::PRE_CLEANED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::PRE_CLEANED, dtr->get_status().GetStatus()); // use a non-existent file destination = "fail://badhost/file1"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::PRE_CLEAN); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread deletes while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::PRE_CLEANED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR, dtr->get_error_status().GetErrorStatus()); // PRE_CLEANED is the correct status even after an error CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::PRE_CLEANED, dtr->get_status().GetStatus()); } void ProcessorTest::TestCacheCheck() { // create tmp cache dir for test CPPUNIT_ASSERT(Arc::TmpDirCreate(tmpdir)); std::string session(tmpdir); session += "/session"; std::string cache_dir(tmpdir); cache_dir += "/cache"; DataStaging::DTRCacheParameters cache_param; cache_param.cache_dirs.push_back(cache_dir); // use non-cacheable input and check it cannot be not cached std::string jobid("123456789"); std::string source("mock://mocksrc;cache=no/1"); std::string destination(std::string(session+"/file1")); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(dtr->get_cache_state() == DataStaging::NON_CACHEABLE); dtr->set_cache_parameters(cache_param); // use cacheable input - set invariant since mock does not set a modification // time and so cache file will appear outdated source = "mock://mocksrc;cache=invariant/1"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(dtr->get_cache_state() == DataStaging::CACHEABLE); dtr->set_cache_parameters(cache_param); dtr->set_status(DataStaging::DTRStatus::CHECK_CACHE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); std::string cache_file(cache_dir + "/data/58/32ec5285b5990e13fd6628af93ea2b751dac7b"); DataStaging::Processor processor; processor.start(); processor.receiveDTR(dtr); // sleep while thread checks cache while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_CHECKED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_CHECKED, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::CACHEABLE, dtr->get_cache_state()); CPPUNIT_ASSERT_EQUAL(cache_file, dtr->get_cache_file()); // locked file std::string lock_file(cache_file + ".lock"); int fd = ::open(lock_file.c_str(), O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR); CPPUNIT_ASSERT(fd); char lock_contents[] = "1@localhost"; CPPUNIT_ASSERT(write(fd, lock_contents, sizeof(lock_contents)) > 0); CPPUNIT_ASSERT_EQUAL(0, close(fd)); dtr->set_status(DataStaging::DTRStatus::CHECK_CACHE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread checks cache while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_WAIT) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_WAIT, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::CACHE_LOCKED, dtr->get_cache_state()); CPPUNIT_ASSERT_EQUAL(0, remove(lock_file.c_str())); // write cache file fd = ::open(cache_file.c_str(), O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR); CPPUNIT_ASSERT(fd); char cache_file_contents[] = "abcde"; CPPUNIT_ASSERT(write(fd, cache_file_contents, sizeof(cache_file_contents)) > 0); CPPUNIT_ASSERT_EQUAL(0, close(fd)); // check again, should return already present dtr->set_status(DataStaging::DTRStatus::CHECK_CACHE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread checks cache while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_CHECKED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_CHECKED, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::CACHE_ALREADY_PRESENT, dtr->get_cache_state()); CPPUNIT_ASSERT_EQUAL(0, remove(cache_file.c_str())); // test files using guids are handled properly source = "mock://mocksrc/1:guid=4a2b61aa-1e57-4d32-9f23-873a9c9b9aed"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(dtr->get_cache_state() == DataStaging::CACHEABLE); dtr->set_cache_parameters(cache_param); dtr->set_status(DataStaging::DTRStatus::CHECK_CACHE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); cache_file = cache_dir + "/data/ba/bb0555ddfccde73069558aacfe512ea42c8c79"; processor.receiveDTR(dtr); // sleep while thread checks cache while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_CHECKED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_CHECKED, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::CACHEABLE, dtr->get_cache_state()); CPPUNIT_ASSERT_EQUAL(cache_file, dtr->get_cache_file()); } void ProcessorTest::TestResolve() { // Note: using mock in resolve doesn't really test resolving since mock is // not a DataPointIndex DataStaging::Processor processor; processor.start(); std::string jobid("123456789"); // resolve a good source std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // check that it found replicas CPPUNIT_ASSERT(dtr->get_source()->HaveLocations()); /* This part can be uncommented if a mock index DataPoint exists // pre-register a good destination source = "mock://mocksrc/1"; destination = "mockindex://mock://mockdest/1@mockindexdest/1"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // check that it added the destination replica CPPUNIT_ASSERT(dtr->get_destination()->HaveLocations()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/1"), dtr->get_destination()->CurrentLocation().str()); std::list files; CPPUNIT_ASSERT(dtr->get_destination()->List(files)); CPPUNIT_ASSERT_EQUAL(1, (int)files.size()); CPPUNIT_ASSERT_EQUAL(std::string("mockindex://mockindexdest/1"), files.front().GetName()); // test replication source = "mockindex://mockdestindex/ABCDE"; destination = "mockindex://mock://mockdest/ABCDE@mockindexdest/ABCDE"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); dtr->set_replication(true); // usually set automatically by scheduler processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // check that it found replicas CPPUNIT_ASSERT(dtr->get_source()->HaveLocations()); // check that it added the destination replica CPPUNIT_ASSERT(dtr->get_destination()->HaveLocations()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/ABCDE"), dtr->get_destination()->CurrentLocation().str()); // copy to an existing LFN from a different LFN source = "mock://mocksrc/2"; destination = "mockindex://mock://mockdest/2@mockindexdest/ABCDE"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) Glib::usleep(100); // will fail since force_registration is not set CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // set force registration and try again dtr->set_force_registration(true); dtr->reset_error_status(); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) Glib::usleep(100); // should be successful now CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // check that it added the destination replica CPPUNIT_ASSERT(dtr->get_destination()->HaveLocations()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/2"), dtr->get_destination()->CurrentLocation().str()); */ } void ProcessorTest::TestQueryReplica() { // query a valid file std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); DataStaging::Processor processor; processor.start(); dtr->set_status(DataStaging::DTRStatus::QUERY_REPLICA); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while replica is queried while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::REPLICA_QUERIED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::REPLICA_QUERIED, dtr->get_status().GetStatus()); // invalid file source = "fail://mocksrc/1"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::QUERY_REPLICA); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while replica is queried while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::REPLICA_QUERIED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::REPLICA_QUERIED, dtr->get_status().GetStatus()); } void ProcessorTest::TestReplicaRegister() { /* Needs mock index DMC DataStaging::Processor processor; processor.start(); std::string jobid("123456789"); // register a file std::string source("mock://mocksrc/1"); std::string destination("mockindex://mock://mockdest/1@mockindexdest/1"); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); // have to resolve first CPPUNIT_ASSERT(dtr->get_destination()->Resolve(false).Passed()); CPPUNIT_ASSERT(dtr->get_destination()->HaveLocations()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/1"), dtr->get_destination()->CurrentLocation().str()); // pre-register CPPUNIT_ASSERT(dtr->get_destination()->PreRegister(false, false).Passed()); // post-register dtr->set_status(DataStaging::DTRStatus::REGISTER_REPLICA); DataStaging::DTR::push(dtr, DataStaging::POST_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resgisters while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::REPLICA_REGISTERED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::REPLICA_REGISTERED, dtr->get_status().GetStatus()); // check registration is ok Arc::FileInfo file; Arc::DataPoint::DataPointInfoType verb = (Arc::DataPoint::DataPointInfoType)(Arc::DataPoint::INFO_TYPE_CONTENT | Arc::DataPoint::INFO_TYPE_STRUCT); CPPUNIT_ASSERT(dtr->get_destination()->Stat(file, verb).Passed()); std::list replicas = file.GetURLs(); CPPUNIT_ASSERT_EQUAL(1, (int)replicas.size()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/1"), replicas.front().str()); // clean up CPPUNIT_ASSERT(dtr->get_destination()->Unregister(true).Passed()); */ } void ProcessorTest::TestCacheProcess() { CPPUNIT_ASSERT(Arc::TmpDirCreate(tmpdir)); std::string session(tmpdir); session += "/session"; std::string cache_dir(tmpdir); cache_dir += "/cache"; DataStaging::DTRCacheParameters cache_param; cache_param.cache_dirs.push_back(cache_dir); std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination(std::string(session+"/file1")); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(dtr->get_cache_state() == DataStaging::CACHEABLE); dtr->set_cache_parameters(cache_param); // process with no cache file present std::string cache_file(cache_dir + "/data/58/32ec5285b5990e13fd6628af93ea2b751dac7b"); remove(cache_file.c_str()); DataStaging::Processor processor; processor.start(); dtr->set_status(DataStaging::DTRStatus::PROCESS_CACHE); DataStaging::DTR::push(dtr, DataStaging::POST_PROCESSOR); processor.receiveDTR(dtr); // sleep while cache is processed while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_PROCESSED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::CACHE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_PROCESSED, dtr->get_status().GetStatus()); // create cache file and try again CPPUNIT_ASSERT(Arc::DirCreate(std::string(cache_dir+"/data/58"), 0700, true)); int fd = ::open(cache_file.c_str(), O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR); CPPUNIT_ASSERT(fd); char cache_file_contents[] = "abcde"; CPPUNIT_ASSERT_EQUAL_MESSAGE(Arc::StrError(errno), (int)sizeof(cache_file_contents), (int)write(fd, cache_file_contents, sizeof(cache_file_contents))); CPPUNIT_ASSERT_EQUAL(0, close(fd)); dtr->reset_error_status(); dtr->set_status(DataStaging::DTRStatus::PROCESS_CACHE); DataStaging::DTR::push(dtr, DataStaging::POST_PROCESSOR); processor.receiveDTR(dtr); // sleep while cache is processed while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_PROCESSED) Glib::usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_PROCESSED, dtr->get_status().GetStatus()); // check correct links exist struct stat st; CPPUNIT_ASSERT_EQUAL(0, stat(std::string(cache_dir + "/joblinks/123456789/file1").c_str(), &st)); CPPUNIT_ASSERT_EQUAL(0, stat(std::string(session + "/file1").c_str(), &st)); } CPPUNIT_TEST_SUITE_REGISTRATION(ProcessorTest); nordugrid-arc-5.4.2/src/libs/data-staging/test/PaxHeaders.7502/DTRTest.cpp0000644000000000000000000000012412060141370024226 xustar000000000000000027 mtime=1354810104.605745 27 atime=1513200576.803729 30 ctime=1513200662.141773681 nordugrid-arc-5.4.2/src/libs/data-staging/test/DTRTest.cpp0000644000175000002070000000532012060141370024273 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "../DTR.h" class DTRTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(DTRTest); CPPUNIT_TEST(TestDTRConstructor); CPPUNIT_TEST(TestDTREndpoints); CPPUNIT_TEST_SUITE_END(); public: void TestDTRConstructor(); void TestDTREndpoints(); void setUp(); void tearDown(); private: DataStaging::DTRLogger logger; Arc::UserConfig cfg; }; void DTRTest::setUp() { logger = new Arc::Logger(Arc::Logger::getRootLogger(), "DataStagingTest"); } void DTRTest::tearDown() { } void DTRTest::TestDTRConstructor() { std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger)); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(!dtr->get_id().empty()); // Copy constructor DataStaging::DTR_ptr dtr2(dtr); CPPUNIT_ASSERT(*dtr2); CPPUNIT_ASSERT_EQUAL(dtr->get_id(), dtr2->get_id()); // a new DataHandle object is created for the new DTR so they should // not be equal. Why does this test pass???? CPPUNIT_ASSERT_EQUAL(dtr->get_source(), dtr2->get_source()); CPPUNIT_ASSERT_EQUAL(dtr->get_owner(), dtr2->get_owner()); CPPUNIT_ASSERT_EQUAL(dtr->get_status().GetStatus(), dtr2->get_status().GetStatus()); // check that creating and destroying a copy doesn't affect the original { DataStaging::DTR_ptr dtr3(dtr); CPPUNIT_ASSERT(*dtr3); } CPPUNIT_ASSERT_EQUAL(std::string("mock://mocksrc/1"), dtr->get_source()->str()); // make a bad DTR source = "myprocotol://blabla/file1"; DataStaging::DTR_ptr dtr4(new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger)); CPPUNIT_ASSERT(!(*dtr4)); // bad DTR copying to itself DataStaging::DTR_ptr dtr5(new DataStaging::DTR(source, source, cfg, jobid, Arc::User().get_uid(), logger)); CPPUNIT_ASSERT(!(*dtr5)); } void DTRTest::TestDTREndpoints() { std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger)); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT_EQUAL(std::string("mock://mocksrc/1"), dtr->get_source()->str()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/1"), dtr->get_destination()->str()); // create a bad url source = "mock:/file1"; DataStaging::DTR_ptr dtrbad(new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logger)); CPPUNIT_ASSERT(!dtrbad->get_source()->GetURL()); // TODO DTR validity } CPPUNIT_TEST_SUITE_REGISTRATION(DTRTest); nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DTR.cpp0000644000000000000000000000012413153455410022416 xustar000000000000000027 mtime=1504598792.949892 27 atime=1513200576.792729 30 ctime=1513200662.107773265 nordugrid-arc-5.4.2/src/libs/data-staging/DTR.cpp0000644000175000002070000003516513153455410022475 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "Processor.h" #include "DataDelivery.h" #include "Scheduler.h" #include "DTR.h" namespace DataStaging { static const char* const owner_name[] = { "GENERATOR", "SCHEDULER", "PRE-PROCESSOR", "DELIVERY", "POST-PROCESSOR" }; static const char* get_owner_name(StagingProcesses proc) { if(((int)proc) < 0) return ""; if(((int)proc) >= sizeof(owner_name)/sizeof(const char*)) return ""; return owner_name[proc]; } const Arc::URL DTR::LOCAL_DELIVERY("file:/local"); Arc::LogLevel DTR::LOG_LEVEL(Arc::WARNING); DTR::DTR(const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, const uid_t& uid, DTRLogger log) : DTR_ID(""), source_url(source), destination_url(destination), cfg(usercfg), source_endpoint(source_url, cfg), destination_endpoint(destination_url, cfg), source_url_str(source_url.str()), destination_url_str(destination_url.str()), use_acix(false), user(uid), parent_job_id(jobid), priority(50), transfershare("_default"), sub_share(""), tries_left(1), initial_tries(1), replication(false), force_registration(false), status(DTRStatus::NEW,"Created by the generator"), bytes_transferred(0), transfer_time(0), created(time(NULL)), cancel_request(false), bulk_start(false), bulk_end(false), source_supports_bulk(false), mandatory(true), delivery_endpoint(LOCAL_DELIVERY), use_host_cert_for_remote_delivery(false), current_owner(GENERATOR), logger(log), delete_log_destinations(true), perf_record(perf_log) { if (!logger) { // use root logger if none is supplied logger = new Arc::Logger(Arc::Logger::getRootLogger(), "DTR"); logger->addDestinations(Arc::Logger::getRootLogger().getDestinations()); } log_destinations = logger->getDestinations(); // check that endpoints can be handled if (!source_endpoint) { logger->msg(Arc::ERROR, "Could not handle endpoint %s", source); return; } if (!destination_endpoint) { logger->msg(Arc::ERROR, "Could not handle endpoint %s", destination); return; } // Some validation checks if (source_url == destination_url) { // It is possible to replicate inside an index service // The physical replicas will be checked in RESOLVING if (source_endpoint->IsIndex() && destination_endpoint->IsIndex()) { replication = true; } else { logger->msg(Arc::ERROR, "Source is the same as destination"); set_error_status(DTRErrorStatus::SELF_REPLICATION_ERROR, DTRErrorStatus::NO_ERROR_LOCATION, "Cannot replicate a file to itself"); return; } } // set insecure by default. Real value will come from configuration source_endpoint->SetSecure(false); destination_endpoint->SetSecure(false); // check for bulk support - call bulk methods with empty list std::list datapoints; if (source_endpoint->IsIndex()) { if (source_endpoint->Resolve(true, datapoints) == Arc::DataStatus::Success) source_supports_bulk = true; } else { std::list files; if (source_endpoint->Stat(files, datapoints) == Arc::DataStatus::Success) source_supports_bulk = true; } #ifdef WIN32 cache_state = NON_CACHEABLE; #else cache_state = (source_endpoint->Cache() && destination_endpoint->Local()) ? CACHEABLE : NON_CACHEABLE; #endif if (source_url.Option("failureallowed") == "yes" || destination_url.Option("failureallowed") == "yes") { mandatory = false; } /* Think how to populate transfer parameters */ mark_modification(); set_timeout(60); // setting ID last means all the previous steps have to pass for the DTR to be valid DTR_ID = Arc::UUID(); // Prefix all log messages for this DTR with the short ID for (std::list::iterator dest = log_destinations.begin(); dest != log_destinations.end(); ++dest) { (*dest)->setPrefix("DTR " + get_short_id() + ": "); } } void DTR::registerCallback(DTRCallback* cb, StagingProcesses owner) { lock.lock(); proc_callback[owner].push_back(cb); lock.unlock(); } void DTR::reset() { // remove resolved locations if (source_endpoint->IsIndex()) { source_endpoint->ClearLocations(); } // clear any transfer locations source_endpoint->ClearTransferLocations(); // reset retry count to 1 source_endpoint->SetTries(1); if (destination_endpoint->IsIndex()) { destination_endpoint->ClearLocations(); } destination_endpoint->ClearTransferLocations(); destination_endpoint->SetTries(1); // empty cache and map info cache_file.clear(); mapped_source.clear(); bytes_transferred = 0; transfer_time = 0; reset_error_status(); } void DTR::set_id(const std::string& id) { // sanity check - regular expressions would be useful here if (id.length() != DTR_ID.length()) { logger->msg(Arc::WARNING, "Invalid ID: %s", id); } else { DTR_ID = id; // Change logging prefix to new ID for (std::list::iterator dest = log_destinations.begin(); dest != log_destinations.end(); ++dest) { (*dest)->setPrefix("DTR " + get_short_id() + ": "); } } } std::string DTR::get_short_id() const { if(DTR_ID.length() < 8) return DTR_ID; std::string short_id(DTR_ID.substr(0,4)+"..."+DTR_ID.substr(DTR_ID.length()-4)); return short_id; } void DTR::set_priority(int pri) { // limit priority between 1 and 100 if (pri <= 0) pri = 1; if (pri > 100) pri = 100; priority = pri; mark_modification(); } void DTR::set_tries_left(unsigned int tries) { initial_tries = tries; tries_left = initial_tries; } void DTR::decrease_tries_left() { if (tries_left > 0) tries_left--; } void DTR::set_status(DTRStatus stat) { logger->msg(Arc::VERBOSE, "%s->%s", status.str(), stat.str()); lock.lock(); status = stat; lock.unlock(); mark_modification(); } DTRStatus DTR::get_status() { lock.lock(); DTRStatus s = status; lock.unlock(); return s; } void DTR::set_error_status(DTRErrorStatus::DTRErrorStatusType error_stat, DTRErrorStatus::DTRErrorLocation error_loc, const std::string& desc) { lock.lock(); error_status = DTRErrorStatus(error_stat, status.GetStatus(), error_loc, desc); lock.unlock(); mark_modification(); } void DTR::reset_error_status() { lock.lock(); error_status = DTRErrorStatus(); lock.unlock(); mark_modification(); } DTRErrorStatus DTR::get_error_status() { lock.lock(); DTRErrorStatus s = error_status; lock.unlock(); return s; } void DTR::set_bytes_transferred(unsigned long long int bytes) { bytes_transferred = bytes; } void DTR::set_transfer_time(unsigned long long int t) { transfer_time = t; } void DTR::set_cache_file(const std::string& filename) { cache_file = filename; mark_modification(); } void DTR::set_cache_state(CacheState state) { cache_state = state; mark_modification(); } void DTR::set_cancel_request() { cancel_request = true; // set process time to now so it is picked up straight away set_process_time(0); mark_modification(); } void DTR::set_process_time(const Arc::Period& process_time) { Arc::Time t; t = t + process_time; next_process_time.SetTime(t.GetTime(), t.GetTimeNanoseconds()); } bool DTR::bulk_possible() { if (status == DTRStatus::RESOLVE && source_supports_bulk) return true; if (status == DTRStatus::QUERY_REPLICA) { std::list files; std::list datapoints; if (source_endpoint->CurrentLocationHandle()->Stat(files, datapoints) == Arc::DataStatus::Success) return true; } return false; } void DTR::clean_log_destinations(Arc::LogDestination* exclude) { // This method makes sure log_destinations is in sync with the LogDestination // objects inside logger. First clear internal list (not deleting objects) log_destinations.clear(); // Then delete objects inside logger if requested if (logger) { if (delete_log_destinations) logger->deleteDestinations(exclude); else logger->removeDestinations(); } } std::list DTR::get_callbacks(const std::map >& proc_callback, StagingProcesses owner) { std::list l; lock.lock(); std::map >::const_iterator c = proc_callback.find(owner); if(c == proc_callback.end()) { lock.unlock(); return l; } l = c->second; lock.unlock(); return l; } void DTR::push(DTR_ptr dtr, StagingProcesses new_owner) { /* This function contains necessary operations * to pass the pointer to this DTR to another * process and make sure that the process accepted it */ dtr->lock.lock(); dtr->current_owner = new_owner; dtr->lock.unlock(); std::list callbacks = dtr->get_callbacks(dtr->proc_callback,dtr->current_owner); if (callbacks.empty()) dtr->logger->msg(Arc::INFO, "No callback for %s defined", get_owner_name(dtr->current_owner)); for (std::list::iterator callback = callbacks.begin(); callback != callbacks.end(); ++callback) { switch(dtr->current_owner) { case GENERATOR: case SCHEDULER: case PRE_PROCESSOR: case DELIVERY: case POST_PROCESSOR: { // call registered callback if (*callback) (*callback)->receiveDTR(dtr); else dtr->logger->msg(Arc::WARNING, "NULL callback for %s", get_owner_name(dtr->current_owner)); } break; default: // impossible dtr->logger->msg(Arc::INFO, "Request to push to unknown owner - %u", (unsigned int)dtr->current_owner); break; } } dtr->mark_modification(); } bool DTR::suspend() { /* This function will contain necessary operations * to stop the transfer in the DTR */ mark_modification(); return true; } bool DTR::is_destined_for_pre_processor() const { return (status == DTRStatus::PRE_CLEAN || status == DTRStatus::CHECK_CACHE || status == DTRStatus::RESOLVE || status == DTRStatus::QUERY_REPLICA || status == DTRStatus::STAGE_PREPARE); } bool DTR::is_destined_for_post_processor() const { return (status == DTRStatus::RELEASE_REQUEST || status == DTRStatus::REGISTER_REPLICA || status == DTRStatus::PROCESS_CACHE); } bool DTR::is_destined_for_delivery() const { return (status == DTRStatus::TRANSFER); } bool DTR::came_from_pre_processor() const { return (status == DTRStatus::PRE_CLEANED || status == DTRStatus::CACHE_WAIT || status == DTRStatus::CACHE_CHECKED || status == DTRStatus::RESOLVED || status == DTRStatus::REPLICA_QUERIED || status == DTRStatus::STAGING_PREPARING_WAIT || status == DTRStatus::STAGED_PREPARED); } bool DTR::came_from_post_processor() const { return (status == DTRStatus::REQUEST_RELEASED || status == DTRStatus::REPLICA_REGISTERED || status == DTRStatus::CACHE_PROCESSED); } bool DTR::came_from_delivery() const { return (status == DTRStatus::TRANSFERRED); } bool DTR::came_from_generator() const { return (status == DTRStatus::NEW); } bool DTR::is_in_final_state() const { return (status == DTRStatus::DONE || status == DTRStatus::CANCELLED || status == DTRStatus::ERROR); } void DTR::set_transfer_share(const std::string& share_name) { lock.lock(); transfershare = share_name; if (!sub_share.empty()) transfershare += "-" + sub_share; lock.unlock(); } DTRCacheParameters::DTRCacheParameters(std::vector caches, std::vector remote_caches, std::vector drain_caches): cache_dirs(caches), remote_cache_dirs(remote_caches), drain_cache_dirs(drain_caches) { } DTRCredentialInfo::DTRCredentialInfo(const std::string& DN, const Arc::Time& expirytime, const std::list vomsfqans): DN(DN), expirytime(expirytime), vomsfqans(vomsfqans) { } std::string DTRCredentialInfo::extractVOMSVO() const { if (vomsfqans.empty()) return ""; std::vector parts; Arc::tokenize(*(vomsfqans.begin()), parts, "/"); return parts.at(0); } std::string DTRCredentialInfo::extractVOMSGroup() const { if (vomsfqans.empty()) return ""; std::string vomsvo; for (std::list::const_iterator i = vomsfqans.begin(); i != vomsfqans.end(); ++i) { std::vector parts; Arc::tokenize(*i, parts, "/"); if (vomsvo.empty()) vomsvo = parts.at(0); if (parts.size() > 1 && parts.at(1).find("Role=") != 0) { return std::string(vomsvo+":"+parts.at(1)); } } return std::string(vomsvo + ":null"); } std::string DTRCredentialInfo::extractVOMSRole() const { if (vomsfqans.empty()) return ""; std::string vomsvo; for (std::list::const_iterator i = vomsfqans.begin(); i != vomsfqans.end(); ++i) { std::vector parts; Arc::tokenize(*i, parts, "/"); if (vomsvo.empty()) vomsvo = parts.at(0); if (parts.size() > 1 && parts.at(1).find("Role=") == 0) { return std::string(parts.at(0)+":"+parts.at(1).substr(5)); } } return std::string(vomsvo + ":null"); } DTR_ptr createDTRPtr(const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, const uid_t& uid, DTRLogger log) { return DTR_ptr(new DTR(source, destination, usercfg, jobid, uid, log)); } DTRLogger createDTRLogger(Arc::Logger& parent, const std::string& subdomain) { return DTRLogger(new Arc::Logger(parent, subdomain)); } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DTRList.cpp0000644000000000000000000000012412675602216023260 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200576.791729 30 ctime=1513200662.108773277 nordugrid-arc-5.4.2/src/libs/data-staging/DTRList.cpp0000644000175000002070000001506012675602216023327 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "DTRList.h" namespace DataStaging { bool DTRList::add_dtr(DTR_ptr DTRToAdd) { Lock.lock(); DTRs.push_back(DTRToAdd); Lock.unlock(); // Added successfully return true; } bool DTRList::delete_dtr(DTR_ptr DTRToDelete) { Lock.lock(); DTRs.remove(DTRToDelete); Lock.unlock(); // Deleted successfully return true; } bool DTRList::filter_dtrs_by_owner(StagingProcesses OwnerToFilter, std::list& FilteredList){ std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->get_owner() == OwnerToFilter) FilteredList.push_back(*it); Lock.unlock(); // Filtered successfully return true; } int DTRList::number_of_dtrs_by_owner(StagingProcesses OwnerToFilter){ std::list::iterator it; int counter = 0; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->get_owner() == OwnerToFilter) counter++; Lock.unlock(); // Filtered successfully return counter; } bool DTRList::filter_dtrs_by_status(DTRStatus::DTRStatusType StatusToFilter, std::list& FilteredList){ std::vector StatusesToFilter(1, StatusToFilter); return filter_dtrs_by_statuses(StatusesToFilter, FilteredList); } bool DTRList::filter_dtrs_by_statuses(const std::vector& StatusesToFilter, std::list& FilteredList){ std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) { for (std::vector::const_iterator i = StatusesToFilter.begin(); i != StatusesToFilter.end(); ++i) { if((*it)->get_status().GetStatus() == *i) { FilteredList.push_back(*it); break; } } } Lock.unlock(); // Filtered successfully return true; } bool DTRList::filter_dtrs_by_statuses(const std::vector& StatusesToFilter, std::map >& FilteredList) { std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) { for (std::vector::const_iterator i = StatusesToFilter.begin(); i != StatusesToFilter.end(); ++i) { if((*it)->get_status().GetStatus() == *i) { FilteredList[*i].push_back(*it); break; } } } Lock.unlock(); // Filtered successfully return true; } bool DTRList::filter_dtrs_by_next_receiver(StagingProcesses NextReceiver, std::list& FilteredList) { std::list::iterator it; switch(NextReceiver){ case PRE_PROCESSOR: { Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->is_destined_for_pre_processor()) FilteredList.push_back(*it); Lock.unlock(); return true; } case POST_PROCESSOR: { Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->is_destined_for_post_processor()) FilteredList.push_back(*it); Lock.unlock(); return true; } case DELIVERY: { Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->is_destined_for_delivery()) FilteredList.push_back(*it); Lock.unlock(); return true; } default: // A strange receiver requested return false; } } bool DTRList::filter_pending_dtrs(std::list& FilteredList){ std::list::iterator it; Arc::Time now; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it){ if( ((*it)->came_from_pre_processor() || (*it)->came_from_post_processor() || (*it)->came_from_delivery() || (*it)->came_from_generator()) && ((*it)->get_process_time() <= now) ) FilteredList.push_back(*it); } Lock.unlock(); // Filtered successfully return true; } bool DTRList::filter_dtrs_by_job(const std::string& jobid, std::list& FilteredList) { std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->get_parent_job_id() == jobid) FilteredList.push_back(*it); Lock.unlock(); // Filtered successfully return true; } void DTRList::caching_started(DTR_ptr request) { CachingLock.lock(); CachingSources.insert(request->get_source_str()); CachingLock.unlock(); } void DTRList::caching_finished(DTR_ptr request) { CachingLock.lock(); CachingSources.erase(request->get_source_str()); CachingLock.unlock(); } bool DTRList::is_being_cached(DTR_ptr DTRToCheck) { CachingLock.lock(); bool caching = (CachingSources.find(DTRToCheck->get_source_str()) != CachingSources.end()); CachingLock.unlock(); return caching; } bool DTRList::empty() { Lock.lock(); bool empty = DTRs.empty(); Lock.unlock(); return empty; } unsigned int DTRList::size() { Lock.lock(); unsigned int size = DTRs.size(); Lock.unlock(); return size; } std::list DTRList::all_jobs() { std::list alljobs; std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) { std::list::iterator i = alljobs.begin(); for (; i != alljobs.end(); ++i) { if (*i == (*it)->get_parent_job_id()) break; } if (i == alljobs.end()) alljobs.push_back((*it)->get_parent_job_id()); } Lock.unlock(); return alljobs; } void DTRList::dumpState(const std::string& path) { // only files supported for now - simply overwrite path std::string data; Lock.lock(); for(std::list::iterator it = DTRs.begin();it != DTRs.end(); ++it) { data += (*it)->get_id() + " " + (*it)->get_status().str() + " " + Arc::tostring((*it)->get_priority()) + " " + (*it)->get_transfer_share(); // add destination for recovery after crash if ((*it)->get_status() == DTRStatus::TRANSFERRING || (*it)->get_status() == DTRStatus::TRANSFER) { data += " " + (*it)->get_destination()->CurrentLocation().fullstr(); data += " " + (*it)->get_delivery_endpoint().Host(); } data += "\n"; } Lock.unlock(); Arc::FileCreate(path, data); } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/TransferShares.h0000644000000000000000000000012413153455410024364 xustar000000000000000027 mtime=1504598792.949892 27 atime=1513200576.805729 30 ctime=1513200662.098773154 nordugrid-arc-5.4.2/src/libs/data-staging/TransferShares.h0000644000175000002070000001171013153455410024431 0ustar00mockbuildmock00000000000000#ifndef TRANSFERSHARES_H_ #define TRANSFERSHARES_H_ #include #include "DTR.h" namespace DataStaging { /// TransferSharesConf describes the configuration of TransferShares. /** * It allows reference shares to be defined with certain priorities. An * instance of this class is used when creating a TransferShares object. * \ingroup datastaging * \headerfile TransferShares.h arc/data-staging/TransferShares.h */ class TransferSharesConf { public: /// The criterion for assigning a share to a DTR enum ShareType { /// Shares are defined per DN of the user's proxy USER, /// Shares are defined per VOMS VO of the user's proxy VO, /// Shares are defined per VOMS group of the user's proxy GROUP, /// Shares are defined per VOMS role of the user's proxy ROLE, /// No share criterion - all DTRs will be assigned to a single share NONE }; private: /// ReferenceShares are special shares defined in the configuration with /// specific priorities. The "_default" share must always be defined. std::map ReferenceShares; /// Configured share type ShareType shareType; public: /// Construct a new TransferSharesConf with given share type and reference shares TransferSharesConf(const std::string& type, const std::map& ref_shares); /// Construct a new TransferSharesConf with no defined shares or policy TransferSharesConf(); /// Set the share type void set_share_type(const std::string& type); /// Add a reference share void set_reference_share(const std::string& RefShare, int Priority); /// Set reference shares void set_reference_shares(const std::map& shares); /// Returns true if the given share is a reference share bool is_configured(const std::string& ShareToCheck); /// Get the priority of this share int get_basic_priority(const std::string& ShareToCheck); /// Return human-readable configuration of shares std::string conf() const; /// Get the name of the share the DTR should be assigned to and the proxy type std::string extract_share_info(DTR_ptr DTRToExtract); }; /// TransferShares is used to implement fair-sharing and priorities. /** * TransferShares defines the algorithm used to prioritise and share * transfers among different users or groups. Configuration information on * the share type and reference shares is held in a TransferSharesConf * instance. The Scheduler uses TransferShares to determine which DTRs in the * queue for each process go first. The calculation is based on the * configuration and the currently active shares (the DTRs already in the * process). can_start() is the method called by the Scheduler to * determine whether a particular share has an available slot in the process. * \ingroup datastaging * \headerfile TransferShares.h arc/data-staging/TransferShares.h */ class TransferShares { private: /// Configuration of share type and reference shares TransferSharesConf conf; /// Shares which are active, ie running or in the queue, and number of DTRs std::map ActiveShares; /// How many transfer slots each active share can grab std::map ActiveSharesSlots; public: /// Create a new TransferShares with default configuration TransferShares() {}; /// Create a new TransferShares with given configuration TransferShares(const TransferSharesConf& shares_conf); /// Empty destructor ~TransferShares(){}; /// Set a new configuration, if a new reference share gets added for example void set_shares_conf(const TransferSharesConf& share_conf); /// Calculate how many slots to assign to each active share. /** * This method is called each time the Scheduler loops to calculate the * number of slots to assign to each share, based on the current number * of active shares and the shares' relative priorities. */ void calculate_shares(int TotalNumberOfSlots); /// Increase by one the active count for the given share. Called when a new DTR enters the queue. void increase_transfer_share(const std::string& ShareToIncrease); /// Decrease by one the active count for the given share. Called when a completed DTR leaves the queue. void decrease_transfer_share(const std::string& ShareToDecrease); /// Decrease by one the number of slots available to the given share. /** * Called when there is a slot already used by this share to reduce the * number available. */ void decrease_number_of_slots(const std::string& ShareToDecrease); /// Returns true if there is a slot available for the given share bool can_start(const std::string& ShareToStart); /// Returns the map of active shares std::map active_shares() const; }; // class TransferShares } // namespace DataStaging #endif /* TRANSFERSHARES_H_ */ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/Processor.cpp0000644000000000000000000000012313153455410023743 xustar000000000000000027 mtime=1504598792.949892 26 atime=1513200576.81673 30 ctime=1513200662.110773301 nordugrid-arc-5.4.2/src/libs/data-staging/Processor.cpp0000644000175000002070000012065513153455410024022 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "DTRStatus.h" #include "Processor.h" namespace DataStaging { std::string Processor::hostname; /** Set up logging. Should be called at the start of each thread method. */ void setUpLogger(DTR_ptr request) { // disconnect this thread's root logger Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().addDestinations(request->get_logger()->getDestinations()); // now disconnect the DTR logger - the root logger is enabled and we // don't want duplicate messages. IMPORTANT: the DTR logger must be // re-enabled at the end of the thread request->disconnect_logger(); } Processor::Processor() { // Get hostname, needed to exclude ACIX replicas on localhost char hostn[256]; if (gethostname(hostn, sizeof(hostn)) == 0){ hostname = hostn; } } /* Thread methods for each state of the DTR */ void Processor::DTRCheckCache(void* arg) { ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); // IMPORTANT: This method creates a lock on the cached file for // this DTR. It must be released at some point using ProcessCache // Create cache using configuration Arc::FileCache cache(request->get_cache_parameters().cache_dirs, request->get_cache_parameters().remote_cache_dirs, request->get_cache_parameters().drain_cache_dirs, request->get_parent_job_id(), request->get_local_user().get_uid(), request->get_local_user().get_gid()); if (!cache) { request->get_logger()->msg(Arc::ERROR, "Error creating cache"); request->set_cache_state(CACHE_SKIP); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to create cache"); request->set_status(DTRStatus::CACHE_CHECKED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } // DN is used for checking cache permissions std::string dn = request->get_credential_info().getDN(); Arc::Time exp_time = request->get_credential_info().getExpiryTime(); std::string canonic_url(request->get_source()->GetURL().plainstr()); std::string cacheoption(request->get_source()->GetURL().Option("cache")); // add guid if present // TODO handle guids better in URL class so we don't need to care here if (!request->get_source()->GetURL().MetaDataOption("guid").empty()) canonic_url += ":guid=" + request->get_source()->GetURL().MetaDataOption("guid"); bool is_in_cache = false; bool is_locked = false; bool use_remote = true; // check for forced re-download option bool renew = (cacheoption == "renew"); if (renew) request->get_logger()->msg(Arc::VERBOSE, "Forcing re-download of file %s", canonic_url); for (;;) { if (!cache.Start(canonic_url, is_in_cache, is_locked, use_remote, renew)) { if (is_locked) { request->get_logger()->msg(Arc::WARNING, "Cached file is locked - should retry"); request->set_cache_state(CACHE_LOCKED); request->set_status(DTRStatus::CACHE_WAIT); // set a flat wait time with some randomness, fine-grained to minimise lock clashes // this may change in future eg be taken from configuration or increase over time time_t cache_wait_time = 10; time_t randomness = (rand() % cache_wait_time) - (cache_wait_time/2); cache_wait_time += randomness; // add random number of milliseconds uint32_t nano_randomness = (rand() % 1000) * 1000000; Arc::Period cache_wait_period(cache_wait_time, nano_randomness); request->get_logger()->msg(Arc::INFO, "Will wait around %is", cache_wait_time); request->set_process_time(cache_wait_period); request->connect_logger(); DTR::push(request, SCHEDULER); return; } request->get_logger()->msg(Arc::ERROR, "Failed to initiate cache"); request->set_cache_state(CACHE_SKIP); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to initiate cache"); break; } request->set_cache_file(cache.File(canonic_url)); if (is_in_cache) { // Whether cache file is outdated bool outdated = (cacheoption != "invariant"); // Check source if requested if (cacheoption == "check") { request->get_logger()->msg(Arc::INFO, "Force-checking source of cache file %s", cache.File(canonic_url)); Arc::DataStatus cres = request->get_source()->Check(true); if (!cres.Passed()) { request->get_logger()->msg(Arc::ERROR, "Source check requested but failed: %s", std::string(cres)); // Try again skipping cache, maybe this is not worth it request->set_cache_state(CACHE_SKIP); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to check source for " + canonic_url + ": " + std::string(cres)); break; } } else { // just need to check permissions request->get_logger()->msg(Arc::INFO, "File %s is cached (%s) - checking permissions", canonic_url, cache.File(canonic_url)); // check the list of cached DNs if (cache.CheckDN(canonic_url, dn)) { outdated = false; // If DN is cached then don't check creation date } else { Arc::DataStatus cres = request->get_source()->Check(cacheoption != "invariant"); if (!cres.Passed()) { request->get_logger()->msg(Arc::ERROR, "Permission checking failed, will try downloading without using cache"); request->set_cache_state(CACHE_SKIP); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to check cache permissions for " + canonic_url + ": " + std::string(cres)); break; } cache.AddDN(canonic_url, dn, exp_time); } } request->get_logger()->msg(Arc::INFO, "Permission checking passed"); // check if file is fresh enough if (request->get_source()->CheckModified() && cache.CheckCreated(canonic_url)) { Arc::Time sourcetime = request->get_source()->GetModified(); Arc::Time cachetime = cache.GetCreated(canonic_url); request->get_logger()->msg(Arc::VERBOSE, "Source modification date: %s", sourcetime.str()); request->get_logger()->msg(Arc::VERBOSE, "Cache creation date: %s", cachetime.str()); if (sourcetime <= cachetime) outdated = false; } if (outdated) { request->get_logger()->msg(Arc::INFO, "Cached file is outdated, will re-download"); use_remote = false; renew = true; continue; } // cached file is present and valid request->get_logger()->msg(Arc::VERBOSE, "Cached copy is still valid"); request->set_cache_state(CACHE_ALREADY_PRESENT); } else { // file is not there but we are ready to download it request->get_logger()->msg(Arc::VERBOSE, "Will download to cache file %s", request->get_cache_file()); request->set_cache_state(CACHEABLE); } break; } request->set_status(DTRStatus::CACHE_CHECKED); request->connect_logger(); DTR::push(request, SCHEDULER); } void Processor::DTRResolve(void* arg) { // call request->source.Resolve() to get replicas // call request->destination.Resolve() to check supplied replicas // call request->destination.PreRegister() to lock destination LFN ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); // check for source replicas if (request->get_source()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Looking up source replicas"); Arc::DataStatus res = request->get_source()->Resolve(true); if (!res.Passed() || !request->get_source()->HaveLocations() || !request->get_source()->LocationValid()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Could not resolve any source replicas for " + request->get_source()->str() + ": " + std::string(res)); request->set_status(DTRStatus::RESOLVED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } } // If using ACIX, remove sources on our own host if (request->get_use_acix()) { int tries = request->get_source()->GetTries(); while (request->get_source()->LocationValid()) { if (request->get_source()->CurrentLocation().Host() == Processor::hostname) { request->get_logger()->msg(Arc::INFO, "Skipping replica on local host %s", request->get_source()->CurrentLocation().str()); request->get_source()->RemoveLocation(); } else { request->get_source()->NextLocation(); } } // Check that there are still replicas to use if (!request->get_source()->HaveLocations()) { request->get_logger()->msg(Arc::ERROR, "No locations left for %s", request->get_source()->str()); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Could not resolve any source replicas for " + request->get_source()->str()); request->set_status(DTRStatus::RESOLVED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } // reset retries request->get_source()->SetTries(tries); } // If overwrite is requested, the resolving and pre-registering of the // destination will be done in the pre-clean stage after deleting. if (!request->is_replication() && request->get_destination()->GetURL().Option("overwrite") == "yes") { request->set_status(DTRStatus::RESOLVED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } // Check replicas supplied for destination if (request->get_destination()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Resolving destination replicas"); Arc::DataStatus res = request->get_destination()->Resolve(false); if (!res.Passed() || !request->get_destination()->HaveLocations() || !request->get_destination()->LocationValid()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Could not resolve any destination replicas for " + request->get_destination()->str() + ": " + std::string(res)); request->set_status(DTRStatus::RESOLVED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } } // check that replication is possible if (request->is_replication()) { // we do not want to replicate to same physical file request->get_destination()->RemoveLocations(*(request->get_source())); if (!request->get_destination()->HaveLocations()) { request->get_logger()->msg(Arc::ERROR, "No locations for destination different from source found"); request->set_error_status(DTRErrorStatus::SELF_REPLICATION_ERROR, DTRErrorStatus::NO_ERROR_LOCATION, "No locations for destination different from source found for " + request->get_destination()->str()); request->set_status(DTRStatus::RESOLVED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } } // pre-register destination if (request->get_destination()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Pre-registering destination in index service"); Arc::DataStatus res = request->get_destination()->PreRegister(request->is_replication(), request->is_force_registration()); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Could not pre-register destination " + request->get_destination()->str() + ": " + std::string(res)); } } // finished with resolving - send back to scheduler request->set_status(DTRStatus::RESOLVED); request->connect_logger(); DTR::push(request, SCHEDULER); } void Processor::DTRBulkResolve(void* arg) { // call request->source.BulkResolve() to get replicas // NOTE only source resolution can be done in bulk BulkThreadArgument* targ = (BulkThreadArgument*)arg; std::list requests = targ->dtrs; delete targ; if (requests.empty()) return; std::list sources; for (std::list::iterator i = requests.begin(); i != requests.end(); ++i) { setUpLogger(*i); (*i)->get_logger()->msg(Arc::VERBOSE, "Resolving source replicas in bulk"); sources.push_back(&(*((*i)->get_source()))); // nasty... } // check for source replicas Arc::DataStatus res = requests.front()->get_source()->Resolve(true, sources); for (std::list::iterator i = requests.begin(); i != requests.end(); ++i) { DTR_ptr request = *i; if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Could not resolve any source replicas for " + request->get_source()->str() + ": " + std::string(res)); } else if (!request->get_source()->HaveLocations() || !request->get_source()->LocationValid()) { request->get_logger()->msg(Arc::ERROR, "No replicas found for %s", request->get_source()->str()); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "No replicas found for " + request->get_source()->str()); } // If using ACIX, remove sources on our own host if (request->get_use_acix()) { int tries = request->get_source()->GetTries(); while (request->get_source()->LocationValid()) { if (request->get_source()->CurrentLocation().Host() == Processor::hostname) { request->get_logger()->msg(Arc::INFO, "Skipping replica on local host %s", request->get_source()->CurrentLocation().str()); request->get_source()->RemoveLocation(); } else { request->get_source()->NextLocation(); } } // Check that there are still replicas to use if (!request->get_source()->HaveLocations()) { request->get_logger()->msg(Arc::ERROR, "No locations left for %s", request->get_source()->str()); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Could not resolve any source replicas for " + request->get_source()->str()); } // reset retries request->get_source()->SetTries(tries); } request->set_status(DTRStatus::RESOLVED); request->connect_logger(); DTR::push(request, SCHEDULER); } } void Processor::DTRQueryReplica(void* arg) { // check source is ok and obtain metadata ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); Arc::DataStatus res; request->get_logger()->msg(Arc::INFO, "Checking %s", request->get_source()->CurrentLocation().str()); if (request->get_source()->IsIndex()) { res = request->get_source()->CompareLocationMetadata(); } else { Arc::FileInfo file; res = request->get_source()->Stat(file, Arc::DataPoint::INFO_TYPE_CONTENT); } if (res == Arc::DataStatus::InconsistentMetadataError) { request->get_logger()->msg(Arc::ERROR, "Metadata of replica and index service differ"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Metadata of replica and index service differ for " + request->get_source()->CurrentLocation().str() + " and " + request->get_source()->str()); } else if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed checking source replica %s: %s", request->get_source()->CurrentLocation().str(), std::string(res) ); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Failed checking source replica " + request->get_source()->CurrentLocation().str() + ": " + std::string(res)); } else { // assign metadata to destination request->get_destination()->SetMeta(*request->get_source()); } // finished querying - send back to scheduler request->set_status(DTRStatus::REPLICA_QUERIED); request->connect_logger(); DTR::push(request, SCHEDULER); } void Processor::DTRBulkQueryReplica(void* arg) { BulkThreadArgument* targ = (BulkThreadArgument*)arg; std::list requests = targ->dtrs; delete targ; if (requests.empty()) return; std::list sources; for (std::list::iterator i = requests.begin(); i != requests.end(); ++i) { setUpLogger(*i); (*i)->get_logger()->msg(Arc::VERBOSE, "Querying source replicas in bulk"); sources.push_back((*i)->get_source()->CurrentLocationHandle()); } // Query source std::list files; Arc::DataStatus res = sources.front()->Stat(files, sources, Arc::DataPoint::INFO_TYPE_CONTENT); std::list::const_iterator file = files.begin(); for (std::list::iterator i = requests.begin(); i != requests.end(); ++i, ++file) { DTR_ptr request = *i; if (!res.Passed() || files.size() != requests.size()) { request->get_logger()->msg(Arc::ERROR, "Failed checking source replica: %s", std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Failed checking source replica " + request->get_source()->CurrentLocation().str() + ": " + std::string(res)); } else if (!*file) { request->get_logger()->msg(Arc::ERROR, "Failed checking source replica"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Failed checking source replica " + request->get_source()->CurrentLocation().str()); } else if (request->get_source()->IsIndex() && !request->get_source()->CompareMeta(*(request->get_source()->CurrentLocationHandle()))) { request->get_logger()->msg(Arc::ERROR, "Metadata of replica and index service differ"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Metadata of replica and index service differ for " + request->get_source()->CurrentLocation().str() + " and " + request->get_source()->str()); } else { // assign metadata to destination request->get_destination()->SetMeta(*request->get_source()); } request->set_status(DTRStatus::REPLICA_QUERIED); request->connect_logger(); DTR::push(request, SCHEDULER); } } void Processor::DTRPreClean(void *arg) { // for physical files call Remove() // for index services delete entry and all existing replicas // only if the entry already exists ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); Arc::DataStatus res = Arc::DataStatus::Success; if (!request->get_destination()->IsIndex()) { request->get_logger()->msg(Arc::INFO, "Removing %s", request->get_destination()->CurrentLocation().str()); res = request->get_destination()->Remove(); } else { // get existing locations Arc::DataHandle dest(request->get_destination()->GetURL(), request->get_destination()->GetUserConfig()); request->get_logger()->msg(Arc::VERBOSE, "Finding existing destination replicas"); res = dest->Resolve(true); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); } else { if (dest->HaveLocations()) { while (dest->LocationValid()) { request->get_logger()->msg(Arc::INFO, "Removing %s", dest->CurrentLocation().str()); res = dest->Remove(); if (!res.Passed()) { // if we fail to delete one replica then bail out request->get_logger()->msg(Arc::ERROR, "Failed to delete replica %s: %s", dest->CurrentLocation().str(), std::string(res)); break; } // unregister this replica from the index // not critical if this fails as will be removed in the next step dest->Unregister(false); // next replica dest->RemoveLocation(); } } if (!dest->HaveLocations()) { // all replicas were deleted successfully, now unregister the LFN request->get_logger()->msg(Arc::INFO, "Unregistering %s", dest->str()); res = dest->Unregister(true); } } // if deletion was successful resolve destination and pre-register if (!dest->HaveLocations()) { request->get_logger()->msg(Arc::VERBOSE, "Resolving destination replicas"); res = request->get_destination()->Resolve(false); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); } else { request->get_logger()->msg(Arc::VERBOSE, "Pre-registering destination"); res = request->get_destination()->PreRegister(false, request->is_force_registration()); } } } if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed to pre-clean destination: %s", std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to pre-clean destination " + request->get_destination()->str() + ": " + std::string(res)); } request->set_status(DTRStatus::PRE_CLEANED); request->connect_logger(); DTR::push(request, SCHEDULER); } void Processor::DTRStagePrepare(void* arg) { // Only valid for stageable (SRM-like) protocols. // Call request->source.PrepareReading() to get TURL for reading or query status of request // and/or request->destination.PrepareWriting() to get TURL for writing or query status of request ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); // first source - if stageable and not already staged yet if (request->get_source()->IsStageable() && request->get_source()->TransferLocations().empty()) { // give default wait time for cases where no wait time is given by the remote service unsigned int source_wait_time = 10; request->get_logger()->msg(Arc::VERBOSE, "Preparing to stage source"); Arc::DataStatus res = request->get_source()->PrepareReading(0, source_wait_time); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Failed to prepare source " + request->get_source()->CurrentLocation().str() + ": " + std::string(res)); } else if (res == Arc::DataStatus::ReadPrepareWait) { // if timeout then don't wait - scheduler will deal with it immediately if (Arc::Time() < request->get_timeout()) { if (source_wait_time > 60) source_wait_time = 60; request->set_process_time(source_wait_time); request->get_logger()->msg(Arc::VERBOSE, "Source is not ready, will wait %u seconds", source_wait_time); } request->set_status(DTRStatus::STAGING_PREPARING_WAIT); } else { if (request->get_source()->TransferLocations().empty()) { request->get_logger()->msg(Arc::ERROR, "No physical files found for source"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "No physical files found for source " + request->get_source()->CurrentLocation().str()); } else { // TODO order physical files according to eg preferred pattern } } } if (request->error()) { request->set_status(DTRStatus::STAGED_PREPARED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } // now destination - if stageable and not already staged yet if (request->get_destination()->IsStageable() && request->get_destination()->TransferLocations().empty()) { // give default wait time for cases where no wait time is given by the remote service unsigned int dest_wait_time = 10; request->get_logger()->msg(Arc::VERBOSE, "Preparing to stage destination"); Arc::DataStatus res = request->get_destination()->PrepareWriting(0, dest_wait_time); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to prepare destination " + request->get_destination()->CurrentLocation().str() + ": " + std::string(res)); } else if (res == Arc::DataStatus::WritePrepareWait) { // if timeout then don't wait - scheduler will deal with it immediately if (Arc::Time() < request->get_timeout()) { if (dest_wait_time > 60) dest_wait_time = 60; request->set_process_time(dest_wait_time); request->get_logger()->msg(Arc::VERBOSE, "Destination is not ready, will wait %u seconds", dest_wait_time); } request->set_status(DTRStatus::STAGING_PREPARING_WAIT); } else { if (request->get_destination()->TransferLocations().empty()) { request->get_logger()->msg(Arc::ERROR, "No physical files found for destination"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "No physical files found for destination " + request->get_destination()->CurrentLocation().str()); } else { // TODO choose best physical file } } } // set to staged prepared if we don't have to wait for source or destination if (request->get_status() != DTRStatus::STAGING_PREPARING_WAIT) request->set_status(DTRStatus::STAGED_PREPARED); request->connect_logger(); DTR::push(request, SCHEDULER); } void Processor::DTRReleaseRequest(void* arg) { // only valid for stageable (SRM-like) protocols. call request->source.FinishReading() and/or // request->destination.FinishWriting() to release or abort requests ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); Arc::DataStatus res; if (request->get_source()->IsStageable()) { request->get_logger()->msg(Arc::VERBOSE, "Releasing source"); res = request->get_source()->FinishReading(request->error() || request->cancel_requested()); if (!res.Passed()) { // an error here is not critical to the transfer request->get_logger()->msg(Arc::WARNING, "There was a problem during post-transfer source handling: %s", std::string(res)); } } if (request->get_destination()->IsStageable()) { request->get_logger()->msg(Arc::VERBOSE, "Releasing destination"); res = request->get_destination()->FinishWriting(request->error() || request->cancel_requested()); if (!res.Passed()) { if (request->error()) { request->get_logger()->msg(Arc::WARNING, "There was a problem during post-transfer destination handling after error: %s", std::string(res)); } else { request->get_logger()->msg(Arc::ERROR, "Error with post-transfer destination handling: %s", std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Error with post-transfer destination handling of " + request->get_destination()->CurrentLocation().str() + ": " + std::string(res)); } } } request->set_status(DTRStatus::REQUEST_RELEASED); request->connect_logger(); DTR::push(request, SCHEDULER); } void Processor::DTRRegisterReplica(void* arg) { // call request->destination.Register() to add new replica and metadata for normal workflow // call request->destination.PreUnregister() to delete LFN placed during // RESOLVE stage for error workflow ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); // TODO: If the copy completed before request was cancelled, unregistering // here will lead to dark data. Need to check for successful copy if (request->error() || request->cancel_requested()) { request->get_logger()->msg(Arc::VERBOSE, "Removing pre-registered destination in index service"); Arc::DataStatus res = request->get_destination()->PreUnregister(request->is_replication()); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed to unregister pre-registered destination %s: %s." " You may need to unregister it manually", request->get_destination()->str(), std::string(res)); } } else { request->get_logger()->msg(Arc::VERBOSE, "Registering destination replica"); Arc::DataStatus res = request->get_destination()->PostRegister(request->is_replication()); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed to register destination replica: %s", std::string(res)); if (!request->get_destination()->PreUnregister(request->is_replication()).Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed to unregister pre-registered destination %s." " You may need to unregister it manually", request->get_destination()->str()); } request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Could not post-register destination " + request->get_destination()->str() + ": " + std::string(res)); } } // finished with registration - send back to scheduler request->set_status(DTRStatus::REPLICA_REGISTERED); request->connect_logger(); DTR::push(request, SCHEDULER); } void Processor::DTRProcessCache(void* arg) { // link or copy cached file to session dir, or release locks in case // of error or deciding not to use cache (for example because of a mapped link) ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); Arc::FileCache cache(request->get_cache_parameters().cache_dirs, request->get_cache_parameters().remote_cache_dirs, request->get_cache_parameters().drain_cache_dirs, request->get_parent_job_id(), request->get_local_user().get_uid(), request->get_local_user().get_gid()); if (!cache) { request->get_logger()->msg(Arc::ERROR, "Error creating cache. Stale locks may remain."); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to create cache for " + request->get_source()->str()); request->set_status(DTRStatus::CACHE_PROCESSED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } std::string canonic_url(request->get_source()->GetURL().plainstr()); // add guid if present if (!request->get_source()->GetURL().MetaDataOption("guid").empty()) canonic_url += ":guid=" + request->get_source()->GetURL().MetaDataOption("guid"); // don't link if error, cancellation or cache not being used if (request->error() || request->cancel_requested() || request->get_cache_state() == CACHE_NOT_USED) { // release locks if they were acquired if (request->get_cache_state() == CACHEABLE || request->get_cache_state() == CACHE_NOT_USED) { if (request->error() || request->cancel_requested()) { cache.StopAndDelete(canonic_url); } else { cache.Stop(canonic_url); } } request->set_status(DTRStatus::CACHE_PROCESSED); request->connect_logger(); DTR::push(request, SCHEDULER); return; } // check options for whether to copy or link bool executable = (request->get_source()->GetURL().Option("exec") == "yes") ? true : false; bool cache_copy = (request->get_source()->GetURL().Option("cache") == "copy") ? true : false; request->get_logger()->msg(Arc::INFO, "Linking/copying cached file to %s", request->get_destination()->CurrentLocation().Path()); bool was_downloaded = (request->get_cache_state() == CACHE_DOWNLOADED) ? true : false; if (was_downloaded) { // Add DN to cached permissions std::string dn = request->get_credential_info().getDN(); Arc::Time exp_time = request->get_credential_info().getExpiryTime(); cache.AddDN(canonic_url, dn, exp_time); } bool try_again = false; if (!cache.Link(request->get_destination()->CurrentLocation().Path(), canonic_url, cache_copy, executable, was_downloaded, try_again)) { if (try_again) { // set cache status to CACHE_LOCKED, so that the Scheduler will try again request->set_cache_state(CACHE_LOCKED); request->get_logger()->msg(Arc::WARNING, "Failed linking cache file to %s", request->get_destination()->CurrentLocation().Path()); } else { request->get_logger()->msg(Arc::ERROR, "Error linking cache file to %s.", request->get_destination()->CurrentLocation().Path()); } request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to link/copy cache file to session dir"); } if (was_downloaded) cache.Stop(canonic_url); request->set_status(DTRStatus::CACHE_PROCESSED); request->connect_logger(); DTR::push(request, SCHEDULER); } /* main process method called from DTR::push() */ void Processor::receiveDTR(DTR_ptr request) { BulkThreadArgument* bulk_arg = NULL; ThreadArgument* arg = NULL; // first deal with bulk if (request->get_bulk_end()) { // end of bulk request->get_logger()->msg(Arc::VERBOSE, "Adding to bulk request"); request->set_bulk_end(false); bulk_list.push_back(request); bulk_arg = new BulkThreadArgument(this, bulk_list); bulk_list.clear(); } else if (request->get_bulk_start() || !bulk_list.empty()) { // filling bulk list request->get_logger()->msg(Arc::VERBOSE, "Adding to bulk request"); bulk_list.push_back(request); if (request->get_bulk_start()) request->set_bulk_start(false); } else { // non-bulk request arg = new ThreadArgument(this, request); } // switch through the expected DTR states switch (request->get_status().GetStatus()) { // pre-processor states case DTRStatus::CHECK_CACHE: { request->set_status(DTRStatus::CHECKING_CACHE); Arc::CreateThreadFunction(&DTRCheckCache, (void*)arg, &thread_count); }; break; case DTRStatus::RESOLVE: { request->set_status(DTRStatus::RESOLVING); if (bulk_arg) Arc::CreateThreadFunction(&DTRBulkResolve, (void*)bulk_arg, &thread_count); else if (arg) Arc::CreateThreadFunction(&DTRResolve, (void*)arg, &thread_count); }; break; case DTRStatus::QUERY_REPLICA: { request->set_status(DTRStatus::QUERYING_REPLICA); if (bulk_arg) Arc::CreateThreadFunction(&DTRBulkQueryReplica, (void*)bulk_arg, &thread_count); else if (arg) Arc::CreateThreadFunction(&DTRQueryReplica, (void*)arg, &thread_count); }; break; case DTRStatus::PRE_CLEAN: { request->set_status(DTRStatus::PRE_CLEANING); Arc::CreateThreadFunction(&DTRPreClean, (void*)arg, &thread_count); }; break; case DTRStatus::STAGE_PREPARE: { request->set_status(DTRStatus::STAGING_PREPARING); Arc::CreateThreadFunction(&DTRStagePrepare, (void*)arg, &thread_count); }; break; // post-processor states case DTRStatus::RELEASE_REQUEST: { request->set_status(DTRStatus::RELEASING_REQUEST); Arc::CreateThreadFunction(&DTRReleaseRequest, (void*)arg, &thread_count); }; break; case DTRStatus::REGISTER_REPLICA: { request->set_status(DTRStatus::REGISTERING_REPLICA); Arc::CreateThreadFunction(&DTRRegisterReplica, (void*)arg, &thread_count); }; break; case DTRStatus::PROCESS_CACHE: { request->set_status(DTRStatus::PROCESSING_CACHE); Arc::CreateThreadFunction(&DTRProcessCache, (void*)arg, &thread_count); }; break; default: { // unexpected state - report error request->set_error_status(DTRErrorStatus::INTERNAL_LOGIC_ERROR, DTRErrorStatus::ERROR_UNKNOWN, "Received a DTR in an unexpected state ("+request->get_status().str()+") in processor"); DTR::push(request, SCHEDULER); if (arg) delete arg; if (bulk_arg) delete bulk_arg; }; break; } } void Processor::start(void) { } void Processor::stop(void) { // threads are short lived so wait for them to complete rather than interrupting thread_count.wait(60*1000); } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DTRStatus.h0000644000000000000000000000012412075743241023273 xustar000000000000000027 mtime=1358415521.722217 27 atime=1513200576.805729 30 ctime=1513200662.095773118 nordugrid-arc-5.4.2/src/libs/data-staging/DTRStatus.h0000644000175000002070000002164612075743241023351 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DTRSTATUS_H__ #define __ARC_DTRSTATUS_H__ #include #include namespace DataStaging { /// Class representing the status of a DTR. /** * \ingroup datastaging * \headerfile DTRStatus.h arc/data-staging/DTRStatus.h */ class DTRStatus { public: /// Possible state values enum DTRStatusType { // ORDER IS IMPORTANT!! /// Just created NEW, /// Check the cache for the file may be already there CHECK_CACHE, /// Checking the cache CHECKING_CACHE, /// Cache file is locked, waiting for its release CACHE_WAIT, /// Cache check completed CACHE_CHECKED, /// Resolve a meta-protocol RESOLVE, /// Resolving replicas RESOLVING, /// Replica resolution completed RESOLVED, /// Query a replica QUERY_REPLICA, /// Replica is being queried QUERYING_REPLICA, /// Replica was queried REPLICA_QUERIED, /// The destination should be deleted PRE_CLEAN, /// Deleting the destination PRE_CLEANING, /// The destination file has been deleted PRE_CLEANED, /// Prepare or stage the source and/or destination STAGE_PREPARE, /// Making a staging or preparing request STAGING_PREPARING, /// Wait for the status of the staging/preparing request STAGING_PREPARING_WAIT, /// Staging/preparing request completed STAGED_PREPARED, /// Transfer ready and can be started TRANSFER, /// Transfer is going TRANSFERRING, /// Transfer is on-going but scheduled for cancellation TRANSFERRING_CANCEL, /// Transfer completed TRANSFERRED, /// Transfer finished, release requests on the storage RELEASE_REQUEST, /// Releasing staging/preparing request RELEASING_REQUEST, /// Release of staging/preparing request completed REQUEST_RELEASED, /// Register a new replica of the destination REGISTER_REPLICA, /// Registering a replica in an index service REGISTERING_REPLICA, /// Replica registration completed REPLICA_REGISTERED, /// Destination is cacheable, process cache PROCESS_CACHE, /// Releasing locks and copying/linking cache files to the session dir PROCESSING_CACHE, /// Cache processing completed CACHE_PROCESSED, /// Everything completed successfully DONE, /// Cancellation request fulfilled successfully CANCELLED, /// Cancellation request fulfilled but DTR also completed transfer successfully CANCELLED_FINISHED, /// Error occured ERROR, /// "Stateless" DTR NULL_STATE }; /// Make new DTRStatus with given status and optional description. DTRStatus(const DTRStatusType& status, std::string desc="") : status(status), desc(desc) {} /// Make new DTRStatus with default NEW status DTRStatus() : status(NEW), desc ("") {} /// Returns true if this status is the same as the given DTRStatusType bool operator==(const DTRStatusType& s) const { return status == s; } /// Returns true if this status is the same as the given DTRStatus bool operator==(const DTRStatus& s) const { return status == s.status; } /// Returns true if this status is not the same as the given DTRStatusType bool operator!=(const DTRStatusType& s) const { return status != s; } /// Returns true if this status is not the same as the given DTRStatus bool operator!=(const DTRStatus& s) const { return status != s.status; } /// Make a new DTRStatus with the same status as the given DTRStatusType DTRStatus& operator=(const DTRStatusType& s) { status = s; return *this; } /// Returns a string representation of the current state std::string str() const; /// Set the detailed description of the current state void SetDesc(const std::string& d) { desc = d; } /// Get the detailed description of the current state std::string GetDesc() const { return desc; } /// Get the DTRStatusType of the current state DTRStatusType GetStatus() const { return status; } // The actions in the following two vectors must match /// Vector of states with a to be processed action, eg CHECK_CACHE static const std::vector ToProcessStates; /// Vector of states with a processing action, eg CHECKING_CACHE static const std::vector ProcessingStates; /// Vector of states where a DTR is staged - used to limit the number of staged files static const std::vector StagedStates; private: /// status code DTRStatusType status; /// description set by the owner process std::string desc; }; // DTRStatus /// A class to represent error states reported by various components. /** * \ingroup datastaging * \headerfile DTRStatus.h arc/data-staging/DTRStatus.h */ class DTRErrorStatus { public: /// A list of error types enum DTRErrorStatusType { /// No error NONE_ERROR, /// Internal error in Data Staging logic INTERNAL_LOGIC_ERROR, /// Internal processing error, like losing contact with external process INTERNAL_PROCESS_ERROR, /// Attempt to replicate a file to itself SELF_REPLICATION_ERROR, /// Permanent error with cache CACHE_ERROR, /// Temporary error with remote service TEMPORARY_REMOTE_ERROR, /// Permanent error with remote service PERMANENT_REMOTE_ERROR, /// Error with local file LOCAL_FILE_ERROR, /// Transfer rate was too slow TRANSFER_SPEED_ERROR, /// Waited for too long to become staging STAGING_TIMEOUT_ERROR }; /// Describes where the error occurred enum DTRErrorLocation { /// No error NO_ERROR_LOCATION, /// Error with source ERROR_SOURCE, /// Error with destination ERROR_DESTINATION, /// Error during transfer not directly related to source or destination ERROR_TRANSFER, /// Error occurred in an unknown location ERROR_UNKNOWN }; /// Create a new DTRErrorStatus with given error states /** * @param status Type of error * @param error_state DTR state in which the error occurred * @param location Location of error (at source, destination or during transfer) * @param desc Text description of error */ DTRErrorStatus(DTRErrorStatusType status, DTRStatus::DTRStatusType error_state, DTRErrorLocation location, const std::string& desc = ""): error_status(status), last_error_state(error_state), error_location(location), desc(desc) {}; /// Create a new DTRErrorStatus with default none/null error states DTRErrorStatus() : error_status(NONE_ERROR), last_error_state(DTRStatus::NULL_STATE), error_location(NO_ERROR_LOCATION), desc("") {}; /// Returns the error type DTRErrorStatusType GetErrorStatus() const { return error_status; } /// Returns the state in which the error occurred DTRStatus::DTRStatusType GetLastErrorState() const { return last_error_state.GetStatus(); } /// Returns the location at which the error occurred DTRErrorLocation GetErrorLocation() const { return error_location; } /// Returns the error description std::string GetDesc() const { return desc; } /// Returns true if this error status is the same as the given DTRErrorStatusType bool operator==(const DTRErrorStatusType& s) const { return error_status == s; } /// Returns true if this error status is the same as the given DTRErrorStatus bool operator==(const DTRErrorStatus& s) const { return error_status == s.error_status; } /// Returns true if this error status is not the same as the given DTRErrorStatusType bool operator!=(const DTRErrorStatusType& s) const { return error_status != s; } /// Returns true if this error status is not the same as the given DTRErrorStatus bool operator!=(const DTRErrorStatus& s) const { return error_status != s.error_status; } /// Make a new DTRErrorStatus with the same error status as the given DTRErrorStatusType DTRErrorStatus& operator=(const DTRErrorStatusType& s) { error_status = s; return *this; } private: /// error state DTRErrorStatusType error_status; /// state that error occurred in DTRStatus last_error_state; /// place where the error occurred DTRErrorLocation error_location; /// description of error std::string desc; }; } // namespace DataStaging #endif /*__ARC_DTRSTATUS_H_*/ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataDeliveryComm.cpp0000644000000000000000000000012412675602216025164 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200576.792729 30 ctime=1513200662.103773216 nordugrid-arc-5.4.2/src/libs/data-staging/DataDeliveryComm.cpp0000644000175000002070000000571012675602216025234 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "DataDeliveryComm.h" #include "DataDeliveryRemoteComm.h" #include "DataDeliveryLocalComm.h" namespace DataStaging { DataDeliveryComm* DataDeliveryComm::CreateInstance(DTR_ptr dtr, const TransferParameters& params) { if (!dtr->get_delivery_endpoint() || dtr->get_delivery_endpoint() == DTR::LOCAL_DELIVERY) return new DataDeliveryLocalComm(dtr, params); return new DataDeliveryRemoteComm(dtr, params); } DataDeliveryComm::DataDeliveryComm(DTR_ptr dtr, const TransferParameters& params) : status_pos_(0),transfer_params(params),logger_(dtr->get_logger()) { handler_= DataDeliveryCommHandler::getInstance(); } DataDeliveryComm::Status DataDeliveryComm::GetStatus(void) const { Glib::Mutex::Lock lock(*(const_cast(&lock_))); DataDeliveryComm::Status tmp = status_; return tmp; } bool DataDeliveryComm::CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg) { if (!dtr->get_delivery_endpoint() || dtr->get_delivery_endpoint() == DTR::LOCAL_DELIVERY) return DataDeliveryLocalComm::CheckComm(dtr, allowed_dirs, load_avg); return DataDeliveryRemoteComm::CheckComm(dtr, allowed_dirs, load_avg); } DataDeliveryCommHandler::DataDeliveryCommHandler(void) { Glib::Mutex::Lock lock(lock_); Arc::CreateThreadFunction(&func,this); } void DataDeliveryCommHandler::Add(DataDeliveryComm* item) { Glib::Mutex::Lock lock(lock_); items_.push_back(item); } void DataDeliveryCommHandler::Remove(DataDeliveryComm* item) { Glib::Mutex::Lock lock(lock_); for(std::list::iterator i = items_.begin(); i!=items_.end();) { if(*i == item) { i=items_.erase(i); } else { ++i; } } } DataDeliveryCommHandler* DataDeliveryCommHandler::comm_handler = NULL; DataDeliveryCommHandler* DataDeliveryCommHandler::getInstance() { if(comm_handler) return comm_handler; return (comm_handler = new DataDeliveryCommHandler); } // This is a dedicated thread which periodically checks for // new state reported by comm instances and modifies states accordingly void DataDeliveryCommHandler::func(void* arg) { if(!arg) return; // disconnect from root logger since messages are logged to per-DTR Logger Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); // We do not need extremely low latency, so this // thread simply polls for data 2 times per second. DataDeliveryCommHandler& it = *(DataDeliveryCommHandler*)arg; for(;;) { { Glib::Mutex::Lock lock(it.lock_); for(std::list::iterator i = it.items_.begin(); i != it.items_.end();++i) { DataDeliveryComm* comm = *i; if(comm) comm->PullStatus(); } } Glib::usleep(500000); } } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataDeliveryRemoteComm.h0000644000000000000000000000012312675602216026004 xustar000000000000000027 mtime=1459029134.924374 26 atime=1513200576.81073 30 ctime=1513200662.091773069 nordugrid-arc-5.4.2/src/libs/data-staging/DataDeliveryRemoteComm.h0000644000175000002070000000472312675602216026060 0ustar00mockbuildmock00000000000000#ifndef DATADELIVERYREMOTECOMM_H_ #define DATADELIVERYREMOTECOMM_H_ #include #include #include #include "DataDeliveryComm.h" namespace DataStaging { /// This class contacts a remote service to make a Delivery request. /** * \ingroup datastaging * \headerfile DataDeliveryRemoteComm.h arc/data-staging/DataDeliveryRemoteComm.h */ class DataDeliveryRemoteComm : public DataDeliveryComm { public: /// Send the transfer request to the remote service. DataDeliveryRemoteComm(DTR_ptr dtr, const TransferParameters& params); /// If transfer is still ongoing, sends a cancellation message to the service. virtual ~DataDeliveryRemoteComm(); /// Read status from service virtual void PullStatus(); /// Pings service to find allowed dirs static bool CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg); /// Returns true if service is still processing request virtual operator bool() const { return valid; }; /// Returns true if service is not processing request or down virtual bool operator!() const { return !valid; }; private: /// Connection to service Arc::ClientSOAP* client; /// Full DTR ID std::string dtr_full_id; /// Retries allowed after failing to query transfer status, so that a /// transfer is not lost due to temporary communication problem. If a /// transfer fails to start it is handled by the normal DTR retries. int query_retries; /// MCC configuration for connecting to service Arc::MCCConfig cfg; /// Endpoint of remote delivery service Arc::URL endpoint; /// Connection timeout int timeout; /// Flag to say whether transfer is running and service is still up bool valid; /// Logger object (main log, not DTR's log) static Arc::Logger logger; /// Cancel a DTR, by sending a cancel request to the service void CancelDTR(); /// Fill Status object with data in node. If empty fields are initialised /// to default values. void FillStatus(const Arc::XMLNode& node = Arc::XMLNode()); /// Set up delegation so the credentials can be used by the service bool SetupDelegation(Arc::XMLNode& op, const Arc::UserConfig& usercfg); /// Handle a fault during query of service. Attempts to reconnect void HandleQueryFault(const std::string& err=""); }; } // namespace DataStaging #endif /* DATADELIVERYREMOTECOMM_H_ */ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataStagingDelivery.cpp0000644000000000000000000000012412675602216025665 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200576.794729 30 ctime=1513200662.113773338 nordugrid-arc-5.4.2/src/libs/data-staging/DataStagingDelivery.cpp0000644000175000002070000004400312675602216025733 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "DataDeliveryComm.h" using namespace Arc; static Arc::Logger logger(Arc::Logger::getRootLogger(), "DataDelivery"); static bool delivery_shutdown = false; static void sig_shutdown(int) { if(delivery_shutdown) _exit(0); delivery_shutdown = true; } static void ReportStatus(DataStaging::DTRStatus::DTRStatusType st, DataStaging::DTRErrorStatus::DTRErrorStatusType err, DataStaging::DTRErrorStatus::DTRErrorLocation err_loc, const std::string& err_desc, unsigned long long int transferred, unsigned long long int size, Arc::Time transfer_start_time, const std::string& checksum = "") { static DataStaging::DataDeliveryComm::Status status; static unsigned int status_pos = 0; static bool status_changed = true; unsigned long long int transfer_time = 0; if (transfer_start_time != Arc::Time(0)) { Arc::Period p = Arc::Time() - transfer_start_time; transfer_time = p.GetPeriod() * 1000000000 + p.GetPeriodNanoseconds(); } // Filling status.commstatus = DataStaging::DataDeliveryComm::CommNoError; status.timestamp = ::time(NULL); status.status = st; status.error = err; status.error_location = err_loc; strncpy(status.error_desc,err_desc.c_str(),sizeof(status.error_desc)); status.streams = 0; status.transferred = transferred; status.size = size; status.transfer_time = transfer_time; status.offset = 0; status.speed = 0; strncpy(status.checksum, checksum.c_str(), sizeof(status.checksum)); if(status_pos == 0) { status_changed=true; }; if(status_changed) { for(;;) { ssize_t l = ::write(STDOUT_FILENO,((char*)&status)+status_pos,sizeof(status)-status_pos); if(l == -1) { // error, parent exited? break; } else if(l == 0) { // will happen if stdout is non-blocking break; } else { status_pos+=l; }; if(status_pos >= sizeof(status)) { status_pos=0; status_changed=false; break; }; }; }; } static unsigned long long int GetFileSize(const DataPoint& source, const DataPoint& dest) { if(source.CheckSize()) return source.GetSize(); if(dest.CheckSize()) return dest.GetSize(); return 0; } int main(int argc,char* argv[]) { Arc::Time start_time; // log to stderr Arc::Logger::getRootLogger().setThreshold(Arc::VERBOSE); //TODO: configurable Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::EmptyFormat); Arc::Logger::getRootLogger().addDestination(logcerr); // Collecting parameters // --surl: source URL // --durl: destination URL // --sopt: any URL option, credential - path to file storing credentials // --dopt: any URL option, credential - path to file storing credentials // --topt: minspeed, minspeedtime, minavgspeed, maxinacttime, avgtime // --size: total size of data to be transferred // --cstype: checksum type to calculate // --csvalue: checksum value of source file to validate against // surl, durl, cstype and csvalue may be given only once // sopt, dopt, topt may be given multiple times // type of credentials is detected automatically, so far only // X.509 proxies or key+certificate are accepted std::string source_str; std::string dest_str; std::list source_opts; std::list dest_opts; std::list transfer_opts; std::string size; std::string checksum_type; std::string checksum_value; std::string source_cred_path; std::string dest_cred_path; std::string source_ca_path; std::string dest_ca_path; OptionParser opt; opt.AddOption(0,"surl","","source URL",source_str); opt.AddOption(0,"durl","","destination URL",dest_str); opt.AddOption(0,"sopt","","source options",source_opts); opt.AddOption(0,"dopt","","destination options",dest_opts); opt.AddOption(0,"topt","","transfer options",transfer_opts); opt.AddOption(0,"size","","total size",size); opt.AddOption(0,"cstype","","checksum type",checksum_type); opt.AddOption(0,"csvalue","","checksum value",checksum_value); if(opt.Parse(argc,argv).size() != 0) { logger.msg(ERROR, "Unexpected arguments"); return -1; }; if(source_str.empty()) { logger.msg(ERROR, "Source URL missing"); return -1; }; if(dest_str.empty()) { logger.msg(ERROR, "Destination URL missing"); return -1; }; URL source_url(source_str); if(!source_url) { logger.msg(ERROR, "Source URL not valid: %s", source_str); return -1; }; URL dest_url(dest_str); if(!dest_url) { logger.msg(ERROR, "Destination URL not valid: %s", dest_str); return -1; }; for(std::list::iterator o = source_opts.begin(); o != source_opts.end();++o) { std::string::size_type p = o->find('='); if(p == std::string::npos) { source_url.AddOption(*o); } else { std::string name = o->substr(0,p); if(name == "credential") { source_cred_path = o->substr(p+1); } else if(name == "ca") { source_ca_path = o->substr(p+1); } else { source_url.AddOption(*o); }; }; }; for(std::list::iterator o = dest_opts.begin(); o != dest_opts.end();++o) { std::string::size_type p = o->find('='); if(p == std::string::npos) { dest_url.AddOption(*o); } else { std::string name = o->substr(0,p); if(name == "credential") { dest_cred_path = o->substr(p+1); } else if(name == "ca") { dest_ca_path = o->substr(p+1); } else { dest_url.AddOption(*o); }; }; }; DataBuffer buffer; buffer.speed.verbose(true); unsigned long long int minspeed = 0; time_t minspeedtime = 0; for(std::list::iterator o = transfer_opts.begin(); o != transfer_opts.end();++o) { std::string::size_type p = o->find('='); if(p != std::string::npos) { std::string name = o->substr(0,p); unsigned long long int value; if(stringto(o->substr(p+1),value)) { if(name == "minspeed") { minspeed=value; } else if(name == "minspeedtime") { minspeedtime=value; } else if(name == "minavgspeed") { buffer.speed.set_min_average_speed(value); } else if(name == "maxinacttime") { buffer.speed.set_max_inactivity_time(value); } else if(name == "avgtime") { buffer.speed.set_base(value); } else { logger.msg(ERROR, "Unknown transfer option: %s", name); _exit(-1); } }; }; } buffer.speed.set_min_speed(minspeed,minspeedtime); // Checksum objects must be destroyed after DataHandles CheckSumAny crc; CheckSumAny crc_source; CheckSumAny crc_dest; // Read credential from stdin if available std::string proxy_cred; std::getline(std::cin, proxy_cred, '\0'); initializeCredentialsType source_cred(initializeCredentialsType::SkipCredentials); UserConfig source_cfg(source_cred); if(!source_cred_path.empty()) source_cfg.ProxyPath(source_cred_path); else if (!proxy_cred.empty()) source_cfg.CredentialString(proxy_cred); if(!source_ca_path.empty()) source_cfg.CACertificatesDirectory(source_ca_path); //source_cfg.UtilsDirPath(...); - probably not needed DataHandle source(source_url,source_cfg); if(!source) { logger.msg(ERROR, "Source URL not supported: %s", source_url.str()); _exit(-1); //return -1; }; if (source->RequiresCredentialsInFile() && source_cred_path.empty()) { logger.msg(ERROR, "No credentials supplied"); _exit(-1); } source->SetSecure(false); source->Passive(true); initializeCredentialsType dest_cred(initializeCredentialsType::SkipCredentials); UserConfig dest_cfg(dest_cred); if(!dest_cred_path.empty()) dest_cfg.ProxyPath(dest_cred_path); else if (!proxy_cred.empty()) dest_cfg.CredentialString(proxy_cred); if(!dest_ca_path.empty()) dest_cfg.CACertificatesDirectory(dest_ca_path); //dest_cfg.UtilsDirPath(...); - probably not needed DataHandle dest(dest_url,dest_cfg); if(!dest) { logger.msg(ERROR, "Destination URL not supported: %s", dest_url.str()); _exit(-1); //return -1; }; if (dest->RequiresCredentialsInFile() && dest_cred_path.empty()) { logger.msg(ERROR, "No credentials supplied"); _exit(-1); } dest->SetSecure(false); dest->Passive(true); // set X509* for 3rd party tools which need it (eg GFAL) if (!source_cfg.ProxyPath().empty()) { SetEnv("X509_USER_PROXY", source_cfg.ProxyPath()); if (!source_cfg.CACertificatesDirectory().empty()) SetEnv("X509_CERT_DIR", source_cfg.CACertificatesDirectory()); // those tools also use hostcert by default if the user is root... if (getuid() == 0) { SetEnv("X509_USER_CERT", source_cfg.ProxyPath()); SetEnv("X509_USER_KEY", source_cfg.ProxyPath()); } } // set signal handlers signal(SIGTERM, sig_shutdown); signal(SIGINT, sig_shutdown); // Filling initial report buffer ReportStatus(DataStaging::DTRStatus::NULL_STATE, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "",0,0,0,""); // if checksum type is supplied, use that type, otherwise use default for the // destination (if checksum is supported by the destination protocol) std::string crc_type(""); if (!checksum_type.empty()) { crc_type = checksum_type; if (!checksum_value.empty()) source->SetCheckSum(checksum_type+':'+checksum_value); } else if (dest->AcceptsMeta() || dest->ProvidesMeta()) { crc_type = dest->DefaultCheckSum(); } if (!crc_type.empty()) { crc = crc_type.c_str(); crc_source = crc_type.c_str(); crc_dest = crc_type.c_str(); if (crc.Type() != CheckSumAny::none) logger.msg(INFO, "Will calculate %s checksum", crc_type); source->AddCheckSumObject(&crc_source); dest->AddCheckSumObject(&crc_dest); } buffer.set(&crc); if (!size.empty()) { unsigned long long int total_size; if (stringto(size, total_size)) { dest->SetSize(total_size); } else { logger.msg(WARNING, "Cannot use supplied --size option"); } } // Initiating transfer DataStatus source_st = source->StartReading(buffer); if(!source_st) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, (source_url.Protocol()!="file") ? (source_st.Retryable() ? DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR) : DataStaging::DTRErrorStatus::LOCAL_FILE_ERROR, DataStaging::DTRErrorStatus::ERROR_SOURCE, std::string("Failed reading from source: ")+source->CurrentLocation().str()+ " : "+std::string(source_st), 0,0,0); _exit(-1); //return -1; }; DataStatus dest_st = dest->StartWriting(buffer); if(!dest_st) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, (dest_url.Protocol() != "file") ? (dest_st.Retryable() ? DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR) : DataStaging::DTRErrorStatus::LOCAL_FILE_ERROR, DataStaging::DTRErrorStatus::ERROR_DESTINATION, std::string("Failed writing to destination: ")+dest->CurrentLocation().str()+ " : "+std::string(dest_st), 0,0,0); _exit(-1); //return -1; }; // While transfer is running in another threads // here we periodically report status to parent bool eof_reached = false; for(;!buffer.error() && !delivery_shutdown;) { if(buffer.eof_read() && buffer.eof_write()) { eof_reached = true; break; }; ReportStatus(DataStaging::DTRStatus::TRANSFERRING, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "", buffer.speed.transferred_size(), GetFileSize(*source,*dest),0); buffer.wait_any(); }; if (delivery_shutdown) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::INTERNAL_PROCESS_ERROR, DataStaging::DTRErrorStatus::ERROR_TRANSFER, "DataStagingProcess process killed", buffer.speed.transferred_size(), GetFileSize(*source,*dest),0); dest->StopWriting(); _exit(-1); } ReportStatus(DataStaging::DTRStatus::TRANSFERRING, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "", buffer.speed.transferred_size(), GetFileSize(*source,*dest),0); bool source_failed = buffer.error_read(); bool dest_failed = buffer.error_write(); dest_st = dest->StopWriting(); source_st = source->StopReading(); bool reported = false; // Error at source or destination if(source_failed || !source_st) { std::string err("Failed reading from source: "+source->CurrentLocation().str()); // If error reported in read callback, use that instead if (source->GetFailureReason() != DataStatus::UnknownError) source_st = source->GetFailureReason(); if (!source_st) err += " : " + std::string(source_st); ReportStatus(DataStaging::DTRStatus::TRANSFERRED, (source_url.Protocol() != "file") ? (((!source_st && source_st.Retryable()) || buffer.speed.transferred_size() > 0) ? DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR) : DataStaging::DTRErrorStatus::LOCAL_FILE_ERROR, DataStaging::DTRErrorStatus::ERROR_SOURCE, err, buffer.speed.transferred_size(), GetFileSize(*source,*dest), start_time); reported = true; }; if(dest_failed || !dest_st) { std::string err("Failed writing to destination: "+dest->CurrentLocation().str()); // If error reported in write callback, use that instead if (dest->GetFailureReason() != DataStatus::UnknownError) dest_st = dest->GetFailureReason(); if (!dest_st) err += " : " + std::string(dest_st); ReportStatus(DataStaging::DTRStatus::TRANSFERRED, (dest_url.Protocol() != "file") ? (((!dest_st && dest_st.Retryable()) || buffer.speed.transferred_size() > 0) ? DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR) : DataStaging::DTRErrorStatus::LOCAL_FILE_ERROR, DataStaging::DTRErrorStatus::ERROR_DESTINATION, err, buffer.speed.transferred_size(), GetFileSize(*source,*dest), start_time); reported = true; }; // Transfer error, usually timeout if(!eof_reached) { if((!dest_failed) && (!source_failed)) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::TRANSFER_SPEED_ERROR, DataStaging::DTRErrorStatus::ERROR_UNKNOWN, "Transfer timed out", buffer.speed.transferred_size(), GetFileSize(*source,*dest), start_time); reported = true; }; }; // checksum validation against supplied value std::string calc_csum; if (crc && buffer.checksum_valid()) { char buf[100]; crc.print(buf,100); calc_csum = buf; } else if(crc_source) { char buf[100]; crc_source.print(buf,100); calc_csum = buf; } else if(crc_dest) { char buf[100]; crc_dest.print(buf,100); calc_csum = buf; } if (!reported && !calc_csum.empty() && crc.Type() != CheckSumAny::none) { // compare calculated to any checksum given as an option if (source->CheckCheckSum()) { // Check the checksum types match. Some buggy GridFTP servers return a // different checksum type than requested so also check that the checksum // length matches before comparing. if (calc_csum.substr(0, calc_csum.find(":")) != checksum_type || calc_csum.substr(calc_csum.find(":")+1).length() != checksum_value.length()) { logger.msg(INFO, "Checksum type of source and calculated checksum differ, cannot compare"); } else if (calc_csum.substr(calc_csum.find(":")+1) != Arc::lower(checksum_value)) { logger.msg(ERROR, "Checksum mismatch between calculated checksum %s and source checksum %s", calc_csum, source->GetCheckSum()); ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::TRANSFER_SPEED_ERROR, DataStaging::DTRErrorStatus::ERROR_UNKNOWN, "Checksum mismatch", 0,0,start_time); reported = true; eof_reached = false; // TODO general error flag is better than this // Delete destination if (!dest->Remove().Passed()) { logger.msg(WARNING, "Failed cleaning up destination %s", dest->GetURL().str()); } } else logger.msg(INFO, "Calculated transfer checksum %s matches source checksum", calc_csum); } } else { logger.msg(VERBOSE, "Checksum not computed"); } if(!reported) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "", buffer.speed.transferred_size(), GetFileSize(*source,*dest), start_time, calc_csum); }; _exit(eof_reached?0:1); //return eof_reached?0:1; } nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataDeliveryLocalComm.cpp0000644000000000000000000000012313214315176026132 xustar000000000000000027 mtime=1513200254.761812 26 atime=1513200576.80773 30 ctime=1513200662.104773228 nordugrid-arc-5.4.2/src/libs/data-staging/DataDeliveryLocalComm.cpp0000644000175000002070000002334213214315176026204 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "DataDeliveryLocalComm.h" namespace DataStaging { // Check if needed and create copy of proxy with suitable ownership static std::string prepare_proxy(const std::string& proxy_path, int child_uid, int child_gid) { #ifdef WIN32 return ""; #else if (proxy_path.empty()) return ""; // No credentials int my_uid = (int)::getuid(); if (my_uid != 0) return ""; // Can't switch user id if (child_uid == 0) return ""; // Not switching if (child_uid == my_uid) return ""; // Not switching // Check ownership of credentials. struct ::stat st; if(!Arc::FileStat(proxy_path,&st,true)) return ""; // Can't stat - won't read if(st.st_uid == child_uid) return ""; // Owned by child // Ownership may prevent reading of file. std::string proxy_content; if(!Arc::FileRead(proxy_path, proxy_content)) return ""; // Creating temporary file // Probably not most effective solution. But makes sure // access permissions are set properly. std::string proxy_new_path; if(!Arc::TmpFileCreate(proxy_new_path, proxy_content, child_uid, child_gid, S_IRUSR|S_IWUSR)) { if (!proxy_new_path.empty()) Arc::FileDelete(proxy_new_path); return ""; } return proxy_new_path; #endif } DataDeliveryLocalComm::DataDeliveryLocalComm(DTR_ptr dtr, const TransferParameters& params) : DataDeliveryComm(dtr, params),child_(NULL),last_comm(Arc::Time()) { if(!dtr->get_source()) return; if(!dtr->get_destination()) return; { Glib::Mutex::Lock lock(lock_); // Initial empty status memset(&status_,0,sizeof(status_)); status_.commstatus = CommInit; status_pos_ = 0; // Generate options for child std::list args; std::string execpath = Arc::ArcLocation::GetLibDir()+G_DIR_SEPARATOR_S+"DataStagingDelivery"; args.push_back(execpath); // check for alternative source or destination eg cache, mapped URL, TURL std::string surl; if (!dtr->get_mapped_source().empty()) { surl = dtr->get_mapped_source(); } else if (!dtr->get_source()->TransferLocations().empty()) { surl = dtr->get_source()->TransferLocations()[0].fullstr(); } else { logger_->msg(Arc::ERROR, "No locations defined for %s", dtr->get_source()->str()); return; } if (dtr->get_destination()->TransferLocations().empty()) { logger_->msg(Arc::ERROR, "No locations defined for %s", dtr->get_destination()->str()); return; } std::string durl = dtr->get_destination()->TransferLocations()[0].fullstr(); bool caching = false; if ((dtr->get_cache_state() == CACHEABLE) && !dtr->get_cache_file().empty()) { durl = dtr->get_cache_file(); caching = true; } int child_uid = 0; int child_gid = 0; if(!caching) { child_uid = dtr->get_local_user().get_uid(); child_gid = dtr->get_local_user().get_gid(); } args.push_back("--surl"); args.push_back(surl); args.push_back("--durl"); args.push_back(durl); // Check if credentials are needed for source/dest Arc::DataHandle surl_h(surl, dtr->get_usercfg()); Arc::DataHandle durl_h(durl, dtr->get_usercfg()); if (!dtr->get_usercfg().CredentialString().empty() && surl_h && !surl_h->RequiresCredentialsInFile() && durl_h && !durl_h->RequiresCredentialsInFile()) { // If file-based credentials are not required then send through stdin stdin_ = dtr->get_usercfg().CredentialString(); } else { // If child is going to be run under different user ID // we must ensure it will be able to read credentials. tmp_proxy_ = prepare_proxy(dtr->get_usercfg().ProxyPath(), child_uid, child_gid); if (!tmp_proxy_.empty()) { args.push_back("--sopt"); args.push_back("credential="+tmp_proxy_); args.push_back("--dopt"); args.push_back("credential="+tmp_proxy_); } else if(!dtr->get_usercfg().ProxyPath().empty()) { args.push_back("--sopt"); args.push_back("credential="+dtr->get_usercfg().ProxyPath()); args.push_back("--dopt"); args.push_back("credential="+dtr->get_usercfg().ProxyPath()); } } if (!dtr->get_usercfg().CACertificatesDirectory().empty()) { args.push_back("--sopt"); args.push_back("ca="+dtr->get_usercfg().CACertificatesDirectory()); args.push_back("--dopt"); args.push_back("ca="+dtr->get_usercfg().CACertificatesDirectory()); } args.push_back("--topt"); args.push_back("minspeed="+Arc::tostring(transfer_params.min_current_bandwidth)); args.push_back("--topt"); args.push_back("minspeedtime="+Arc::tostring(transfer_params.averaging_time)); args.push_back("--topt"); args.push_back("minavgspeed="+Arc::tostring(transfer_params.min_average_bandwidth)); args.push_back("--topt"); args.push_back("maxinacttime="+Arc::tostring(transfer_params.max_inactivity_time)); if (dtr->get_source()->CheckSize()) { args.push_back("--size"); args.push_back(Arc::tostring(dtr->get_source()->GetSize())); } if (dtr->get_source()->CheckCheckSum()) { std::string csum(dtr->get_source()->GetCheckSum()); std::string::size_type pos(csum.find(':')); if (pos == std::string::npos || pos == csum.length()-1) { logger_->msg(Arc::WARNING, "Bad checksum format %s", csum); } else { args.push_back("--cstype"); args.push_back(csum.substr(0, pos)); args.push_back("--csvalue"); args.push_back(csum.substr(pos+1)); } } else if (!dtr->get_destination()->GetURL().MetaDataOption("checksumtype").empty()) { args.push_back("--cstype"); args.push_back(dtr->get_destination()->GetURL().MetaDataOption("checksumtype")); if (!dtr->get_destination()->GetURL().MetaDataOption("checksumvalue").empty()) { args.push_back("--csvalue"); args.push_back(dtr->get_destination()->GetURL().MetaDataOption("checksumvalue")); } } else if (!dtr->get_destination()->GetURL().Option("checksum").empty()) { args.push_back("--cstype"); args.push_back(dtr->get_destination()->GetURL().Option("checksum")); } else if (dtr->get_destination()->AcceptsMeta() || dtr->get_destination()->ProvidesMeta()) { args.push_back("--cstype"); args.push_back(dtr->get_destination()->DefaultCheckSum()); } child_ = new Arc::Run(args); // Set up pipes child_->KeepStdout(false); child_->KeepStderr(false); child_->KeepStdin(false); child_->AssignUserId(child_uid); child_->AssignGroupId(child_gid); child_->AssignStdin(stdin_); // Start child std::string cmd; for(std::list::iterator arg = args.begin();arg!=args.end();++arg) { cmd += *arg; cmd += " "; } logger_->msg(Arc::DEBUG, "Running command: %s", cmd); if(!child_->Start()) { delete child_; child_=NULL; return; } } handler_->Add(this); } DataDeliveryLocalComm::~DataDeliveryLocalComm(void) { { Glib::Mutex::Lock lock(lock_); if(child_) { child_->Kill(10); // Give it a chance delete child_; child_=NULL; // And then kill for sure } } if(!tmp_proxy_.empty()) Arc::FileDelete(tmp_proxy_); if(handler_) handler_->Remove(this); } void DataDeliveryLocalComm::PullStatus(void) { Glib::Mutex::Lock lock(lock_); if(!child_) return; for(;;) { if(status_pos_ < sizeof(status_buf_)) { int l; // TODO: direct redirect for(;;) { char buf[1024+1]; l = child_->ReadStderr(0,buf,sizeof(buf)-1); if(l <= 0) break; buf[l] = 0; char* start = buf; for(;*start;) { char* end = strchr(start,'\n'); if(end) *end = 0; logger_->msg(Arc::INFO, "DataDelivery: %s", start); if(!end) break; start = end + 1; } } l = child_->ReadStdout(0,((char*)&status_buf_)+status_pos_,sizeof(status_buf_)-status_pos_); if(l == -1) { // child error or closed comm if(child_->Running()) { status_.commstatus = CommClosed; } else { status_.commstatus = CommExited; if(child_->Result() != 0) { logger_->msg(Arc::ERROR, "DataStagingDelivery exited with code %i", child_->Result()); status_.commstatus = CommFailed; } } delete child_; child_=NULL; return; } if(l == 0) break; status_pos_+=l; last_comm = Arc::Time(); } if(status_pos_ >= sizeof(status_buf_)) { status_buf_.error_desc[sizeof(status_buf_.error_desc)-1] = 0; status_=status_buf_; status_pos_-=sizeof(status_buf_); } } // check for stuck child process (no report through comm channel) Arc::Period t = Arc::Time() - last_comm; if (transfer_params.max_inactivity_time > 0 && t >= transfer_params.max_inactivity_time*2) { logger_->msg(Arc::ERROR, "Transfer killed after %i seconds without communication", t.GetPeriod()); child_->Kill(1); delete child_; child_ = NULL; } } bool DataDeliveryLocalComm::CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg) { allowed_dirs.push_back("/"); double avg[3]; if (getloadavg(avg, 3) != 3) { load_avg = "-1"; } else { load_avg = Arc::tostring(avg[1]); } return true; } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/examples0000644000000000000000000000013213214316026023016 xustar000000000000000030 mtime=1513200662.169774023 30 atime=1513200668.716854096 30 ctime=1513200662.169774023 nordugrid-arc-5.4.2/src/libs/data-staging/examples/0000755000175000002070000000000013214316026023141 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/libs/data-staging/examples/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612115114117025126 xustar000000000000000027 mtime=1362401359.636778 29 atime=1513200601.47403169 30 ctime=1513200662.165773974 nordugrid-arc-5.4.2/src/libs/data-staging/examples/Makefile.am0000644000175000002070000000104712115114117025173 0ustar00mockbuildmock00000000000000check_PROGRAMS = generator generator_SOURCES = generator-main.cpp Generator.h Generator.cpp generator_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) generator_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ ../libarcdatastaging.la $(GLIBMM_LIBS) exampledir = $(pkgdatadir)/examples/sdk example_DATA = $(generator_SOURCES) EXTRA_DIST = $(generator_SOURCES) nordugrid-arc-5.4.2/src/libs/data-staging/examples/PaxHeaders.7502/Generator.h0000644000000000000000000000012412110726523025174 xustar000000000000000027 mtime=1361292627.682019 27 atime=1513200576.798729 30 ctime=1513200662.168774011 nordugrid-arc-5.4.2/src/libs/data-staging/examples/Generator.h0000644000175000002070000000253012110726523025241 0ustar00mockbuildmock00000000000000#ifndef GENERATOR_H_ #define GENERATOR_H_ #include #include #include // This Generator basic implementation shows how a Generator can // be written. It has one method, run(), which creates a single DTR // and submits it to the Scheduler. class Generator: public DataStaging::DTRCallback { private: // Condition to wait on until DTR has finished static Arc::SimpleCondition cond; // DTR Scheduler DataStaging::Scheduler scheduler; // Logger object static Arc::Logger logger; // Root LogDestinations to be used in receiveDTR std::list root_destinations; public: // Counter for main to know how many DTRs are in the system Arc::SimpleCounter counter; // Create a new Generator. start() must be called to start DTR threads. Generator(); // Stop Generator and DTR threads ~Generator(); // Implementation of callback from DTRCallback - the callback method used // when DTR processing is complete to pass the DTR back to the generator. // It decrements counter. virtual void receiveDTR(DataStaging::DTR_ptr dtr); // Start Generator and DTR threads void start(); // Submit a DTR with given source and destination. Increments counter. void run(const std::string& source, const std::string& destination); }; #endif /* GENERATOR_H_ */ nordugrid-arc-5.4.2/src/libs/data-staging/examples/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731025142 xustar000000000000000030 mtime=1513200601.527032338 30 atime=1513200649.694621447 30 ctime=1513200662.166773986 nordugrid-arc-5.4.2/src/libs/data-staging/examples/Makefile.in0000644000175000002070000006671313214315731025225 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ check_PROGRAMS = generator$(EXEEXT) subdir = src/libs/data-staging/examples DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am_generator_OBJECTS = generator-generator-main.$(OBJEXT) \ generator-Generator.$(OBJEXT) generator_OBJECTS = $(am_generator_OBJECTS) am__DEPENDENCIES_1 = generator_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ ../libarcdatastaging.la $(am__DEPENDENCIES_1) generator_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(generator_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(generator_SOURCES) DIST_SOURCES = $(generator_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(exampledir)" DATA = $(example_DATA) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ generator_SOURCES = generator-main.cpp Generator.h Generator.cpp generator_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) generator_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ ../libarcdatastaging.la $(GLIBMM_LIBS) exampledir = $(pkgdatadir)/examples/sdk example_DATA = $(generator_SOURCES) EXTRA_DIST = $(generator_SOURCES) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libs/data-staging/examples/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/libs/data-staging/examples/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list generator$(EXEEXT): $(generator_OBJECTS) $(generator_DEPENDENCIES) @rm -f generator$(EXEEXT) $(generator_LINK) $(generator_OBJECTS) $(generator_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/generator-Generator.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/generator-generator-main.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< generator-generator-main.o: generator-main.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -MT generator-generator-main.o -MD -MP -MF $(DEPDIR)/generator-generator-main.Tpo -c -o generator-generator-main.o `test -f 'generator-main.cpp' || echo '$(srcdir)/'`generator-main.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/generator-generator-main.Tpo $(DEPDIR)/generator-generator-main.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='generator-main.cpp' object='generator-generator-main.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -c -o generator-generator-main.o `test -f 'generator-main.cpp' || echo '$(srcdir)/'`generator-main.cpp generator-generator-main.obj: generator-main.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -MT generator-generator-main.obj -MD -MP -MF $(DEPDIR)/generator-generator-main.Tpo -c -o generator-generator-main.obj `if test -f 'generator-main.cpp'; then $(CYGPATH_W) 'generator-main.cpp'; else $(CYGPATH_W) '$(srcdir)/generator-main.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/generator-generator-main.Tpo $(DEPDIR)/generator-generator-main.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='generator-main.cpp' object='generator-generator-main.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -c -o generator-generator-main.obj `if test -f 'generator-main.cpp'; then $(CYGPATH_W) 'generator-main.cpp'; else $(CYGPATH_W) '$(srcdir)/generator-main.cpp'; fi` generator-Generator.o: Generator.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -MT generator-Generator.o -MD -MP -MF $(DEPDIR)/generator-Generator.Tpo -c -o generator-Generator.o `test -f 'Generator.cpp' || echo '$(srcdir)/'`Generator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/generator-Generator.Tpo $(DEPDIR)/generator-Generator.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Generator.cpp' object='generator-Generator.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -c -o generator-Generator.o `test -f 'Generator.cpp' || echo '$(srcdir)/'`Generator.cpp generator-Generator.obj: Generator.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -MT generator-Generator.obj -MD -MP -MF $(DEPDIR)/generator-Generator.Tpo -c -o generator-Generator.obj `if test -f 'Generator.cpp'; then $(CYGPATH_W) 'Generator.cpp'; else $(CYGPATH_W) '$(srcdir)/Generator.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/generator-Generator.Tpo $(DEPDIR)/generator-Generator.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Generator.cpp' object='generator-Generator.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -c -o generator-Generator.obj `if test -f 'Generator.cpp'; then $(CYGPATH_W) 'Generator.cpp'; else $(CYGPATH_W) '$(srcdir)/Generator.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exampleDATA \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-exampleDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/libs/data-staging/examples/PaxHeaders.7502/Generator.cpp0000644000000000000000000000012413153456243025536 xustar000000000000000027 mtime=1504599203.178558 27 atime=1513200576.797729 30 ctime=1513200662.170774035 nordugrid-arc-5.4.2/src/libs/data-staging/examples/Generator.cpp0000644000175000002070000000455313153456243025612 0ustar00mockbuildmock00000000000000#include #include #include "Generator.h" Arc::Logger Generator::logger(Arc::Logger::getRootLogger(), "Generator"); Arc::SimpleCondition Generator::cond; Generator::Generator() { // Set up logging root_destinations = Arc::Logger::getRootLogger().getDestinations(); DataStaging::DTR::LOG_LEVEL = Arc::Logger::getRootLogger().getThreshold(); } Generator::~Generator() { logger.msg(Arc::INFO, "Shutting down scheduler"); scheduler.stop(); logger.msg(Arc::INFO, "Scheduler stopped, exiting"); } void Generator::receiveDTR(DataStaging::DTR_ptr dtr) { // root logger is disabled in Scheduler thread so need to add it here Arc::Logger::getRootLogger().addDestinations(root_destinations); logger.msg(Arc::INFO, "Received DTR %s back from scheduler in state %s", dtr->get_id(), dtr->get_status().str()); Arc::Logger::getRootLogger().removeDestinations(); // DTR logger destinations can be destroyed when DTR has finished dtr->clean_log_destinations(); counter.dec(); } void Generator::start() { // Starting scheduler with default configuration logger.msg(Arc::INFO, "Generator started"); logger.msg(Arc::INFO, "Starting DTR threads"); scheduler.SetDumpLocation("/tmp/dtr.log"); scheduler.start(); } void Generator::run(const std::string& source, const std::string& destination) { std::string job_id = Arc::UUID(); Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::TryCredentials); Arc::UserConfig cfg(cred_type); // check credentials if (!Arc::Credential::IsCredentialsValid(cfg)) { logger.msg(Arc::ERROR, "No valid credentials found, exiting"); return; } cfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY); DataStaging::DTRLogger log(new Arc::Logger(Arc::Logger::getRootLogger(), "DataStaging")); Arc::LogDestination * dest = new Arc::LogStream(std::cout); log->addDestination(*dest); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, cfg, job_id, Arc::User().get_uid(), log)); if (!(*dtr)) { logger.msg(Arc::ERROR, "Problem creating dtr (source %s, destination %s)", source, destination); return; } // register callback with DTR dtr->registerCallback(this,DataStaging::GENERATOR); dtr->registerCallback(&scheduler,DataStaging::SCHEDULER); dtr->set_tries_left(5); DataStaging::DTR::push(dtr, DataStaging::SCHEDULER); counter.inc(); } nordugrid-arc-5.4.2/src/libs/data-staging/examples/PaxHeaders.7502/generator-main.cpp0000644000000000000000000000012412207406206026511 xustar000000000000000027 mtime=1377700998.944273 27 atime=1513200576.798729 30 ctime=1513200662.167773999 nordugrid-arc-5.4.2/src/libs/data-staging/examples/generator-main.cpp0000644000175000002070000000376712207406206026573 0ustar00mockbuildmock00000000000000/* // To compile this example requires that nordugrid-arc-devel be installed. It // also requires including headers of external libraries used by ARC core code: // // g++ -o generator `pkg-config --cflags glibmm-2.4` -I/usr/include/libxml2 \ // -larcdatastaging Generator.cpp Generator.h generator-main.cpp // // If ARC is installed in a non-standard location, the options // -L ARC_LOCATION/lib and -I ARC_LOCATION/include should also be used */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "Generator.h" static Arc::SimpleCounter counter; static bool run = true; static void do_shutdown(int) { run = false; } static void usage() { std::cout << "Usage: generator [num mock transfers]" << std::endl; std::cout << " generator source destination" << std::endl; std::cout << "To use mock transfers ARC must be built with configure --enable-mock-dmc" << std::endl; std::cout << "The default number of mock transfers is 10" << std::endl; } int main(int argc, char** argv) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); signal(SIGINT, do_shutdown); // Log to stderr Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::INFO); Generator generator; int num = 10; if (argc == 1 || argc == 2) { // run mock a number of times if (argc == 2 && (std::string(argv[1]) == "-h" || !Arc::stringto(argv[1], num))) { usage(); return 1; } generator.start(); for (int i = 0; i < num; ++i) { std::string source = "mock://mocksrc/mock." + Arc::tostring(i); std::string destination = "mock://mockdest/mock." + Arc::tostring(i); generator.run(source, destination); } } else if (argc == 3) { // run with given source and destination generator.start(); generator.run(argv[1], argv[2]); } else { usage(); return 1; } while (generator.counter.get() > 0 && run) { sleep(1); } return 0; } nordugrid-arc-5.4.2/src/libs/data-staging/examples/PaxHeaders.7502/README0000644000000000000000000000012312110726523023754 xustar000000000000000027 mtime=1361292627.682019 27 atime=1513200576.798729 29 ctime=1513200662.16377395 nordugrid-arc-5.4.2/src/libs/data-staging/examples/README0000644000175000002070000000006012110726523024016 0ustar00mockbuildmock00000000000000Examples of how to use the data staging library.nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataDeliveryRemoteComm.cpp0000644000000000000000000000012212754411434026334 xustar000000000000000027 mtime=1471288092.080352 26 atime=1513200576.80773 29 ctime=1513200662.10577324 nordugrid-arc-5.4.2/src/libs/data-staging/DataDeliveryRemoteComm.cpp0000644000175000002070000004341712754411434026414 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "DataDeliveryRemoteComm.h" namespace DataStaging { Arc::Logger DataDeliveryRemoteComm::logger(Arc::Logger::getRootLogger(), "DataStaging.DataDeliveryRemoteComm"); DataDeliveryRemoteComm::DataDeliveryRemoteComm(DTR_ptr dtr, const TransferParameters& params) : DataDeliveryComm(dtr, params), client(NULL), dtr_full_id(dtr->get_id()), query_retries(20), endpoint(dtr->get_delivery_endpoint()), timeout(dtr->get_usercfg().Timeout()), valid(false) { { Glib::Mutex::Lock lock(lock_); // Initial empty status memset(&status_,0,sizeof(status_)); FillStatus(); } if(!dtr->get_source()) return; if(!dtr->get_destination()) return; // check for alternative source or destination eg cache, mapped URL, TURL std::string surl; if (!dtr->get_mapped_source().empty()) { surl = dtr->get_mapped_source(); } else if (!dtr->get_source()->TransferLocations().empty()) { surl = dtr->get_source()->TransferLocations()[0].fullstr(); } else { logger_->msg(Arc::ERROR, "No locations defined for %s", dtr->get_source()->str()); return; } if (dtr->get_destination()->TransferLocations().empty()) { logger_->msg(Arc::ERROR, "No locations defined for %s", dtr->get_destination()->str()); return; } std::string durl = dtr->get_destination()->TransferLocations()[0].fullstr(); bool caching = false; if ((dtr->get_cache_state() == CACHEABLE) && !dtr->get_cache_file().empty()) { durl = dtr->get_cache_file(); caching = true; } if (dtr->host_cert_for_remote_delivery()) { Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::TryCredentials); Arc::UserConfig host_cfg(cred_type); host_cfg.ProxyPath(""); // to force using cert/key files instead of non-existent proxy host_cfg.ApplyToConfig(cfg); } else { dtr->get_usercfg().ApplyToConfig(cfg); } // connect to service and make a new transfer request logger_->msg(Arc::VERBOSE, "Connecting to Delivery service at %s", endpoint.str()); client = new Arc::ClientSOAP(cfg, endpoint, timeout); Arc::NS ns; Arc::PayloadSOAP request(ns); Arc::XMLNode dtrnode = request.NewChild("DataDeliveryStart").NewChild("DTR"); dtrnode.NewChild("ID") = dtr_full_id; dtrnode.NewChild("Source") = surl; dtrnode.NewChild("Destination") = durl; if (dtr->get_source()->CheckSize()) dtrnode.NewChild("Size") = Arc::tostring(dtr->get_source()->GetSize()); if (dtr->get_source()->CheckCheckSum()) dtrnode.NewChild("CheckSum") = dtr->get_source()->GetCheckSum(); dtrnode.NewChild("Uid") = Arc::tostring(dtr->get_local_user().get_uid()); dtrnode.NewChild("Gid") = Arc::tostring(dtr->get_local_user().get_gid()); // transfer parameters dtrnode.NewChild("MinAverageSpeed") = Arc::tostring(params.min_average_bandwidth); dtrnode.NewChild("AverageTime") = Arc::tostring(params.averaging_time); dtrnode.NewChild("MinCurrentSpeed") = Arc::tostring(params.min_current_bandwidth); dtrnode.NewChild("MaxInactivityTime") = Arc::tostring(params.max_inactivity_time); // caching if (caching) dtrnode.NewChild("Caching") = "true"; else dtrnode.NewChild("Caching") = "false"; // delegate credentials Arc::XMLNode op = request.Child(0); if (!SetupDelegation(op, dtr->get_usercfg())) { logger_->msg(Arc::ERROR, "Failed to set up credential delegation with %s", endpoint.str()); return; } std::string xml; request.GetXML(xml, true); logger_->msg(Arc::DEBUG, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client->process(&request, &response); if (!status) { logger_->msg(Arc::ERROR, "Could not connect to service %s: %s", endpoint.str(), (std::string)status); if (response) delete response; return; } if (!response) { logger_->msg(Arc::ERROR, "No SOAP response from Delivery service %s", endpoint.str()); return; } response->GetXML(xml, true); logger_->msg(Arc::DEBUG, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } logger_->msg(Arc::ERROR, "Failed to start transfer request: %s", err); delete response; return; } Arc::XMLNode resultnode = (*response)["DataDeliveryStartResponse"]["DataDeliveryStartResult"]["Result"][0]; if (!resultnode || !resultnode["ResultCode"]) { logger_->msg(Arc::ERROR, "Bad format in XML response from service at %s: %s", endpoint.str(), xml); delete response; return; } std::string resultcode = (std::string)(resultnode["ResultCode"]); if (resultcode != "OK") { logger_->msg(Arc::ERROR, "Could not make new transfer request: %s: %s", resultcode, (std::string)(resultnode[0]["ErrorDescription"])); delete response; return; } logger_->msg(Arc::INFO, "Started remote Delivery at %s", endpoint.str()); delete response; valid = true; handler_->Add(this); } DataDeliveryRemoteComm::~DataDeliveryRemoteComm() { // If transfer is still going, send cancellation request to service if (valid) CancelDTR(); if (handler_) handler_->Remove(this); Glib::Mutex::Lock lock(lock_); delete client; } void DataDeliveryRemoteComm::CancelDTR() { Glib::Mutex::Lock lock(lock_); if (!client) return; Arc::NS ns; Arc::PayloadSOAP request(ns); Arc::XMLNode dtrnode = request.NewChild("DataDeliveryCancel").NewChild("DTR"); dtrnode.NewChild("ID") = dtr_full_id; std::string xml; request.GetXML(xml, true); logger_->msg(Arc::DEBUG, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client->process(&request, &response); if (!status) { logger_->msg(Arc::ERROR, "Failed to send cancel request: %s", (std::string)status); if (response) delete response; return; } if (!response) { logger_->msg(Arc::ERROR, "Failed to cancel: No SOAP response"); return; } response->GetXML(xml, true); logger_->msg(Arc::DEBUG, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } logger_->msg(Arc::ERROR, "Failed to cancel transfer request: %s", err); delete response; return; } Arc::XMLNode resultnode = (*response)["DataDeliveryCancelResponse"]["DataDeliveryCancelResult"]["Result"][0]; if (!resultnode || !resultnode["ResultCode"]) { logger_->msg(Arc::ERROR, "Bad format in XML response: %s", xml); delete response; return; } if ((std::string)resultnode["ResultCode"] != "OK") { Arc::XMLNode errnode = resultnode["ErrorDescription"]; logger_->msg(Arc::ERROR, "Failed to cancel: %s", (std::string)errnode); } delete response; } void DataDeliveryRemoteComm::PullStatus() { // send query request to service and fill status_ Glib::Mutex::Lock lock(lock_); if (!client) return; // check time since last query - check every second for the first 20s and // after every 5s // TODO be more intelligent, using transfer rate and file size if (Arc::Time() - start_ < 20 && Arc::Time() - Arc::Time(status_.timestamp) < 1) return; if (Arc::Time() - start_ > 20 && Arc::Time() - Arc::Time(status_.timestamp) < 5) return; Arc::NS ns; Arc::PayloadSOAP request(ns); Arc::XMLNode dtrnode = request.NewChild("DataDeliveryQuery").NewChild("DTR"); dtrnode.NewChild("ID") = dtr_full_id; std::string xml; request.GetXML(xml, true); logger_->msg(Arc::DEBUG, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client->process(&request, &response); if (!status) { logger_->msg(Arc::ERROR, "%s", (std::string)status); status_.commstatus = CommFailed; if (response) delete response; valid = false; return; } if (!response) { if (--query_retries > 0) { HandleQueryFault("No SOAP response from delivery service"); return; } logger_->msg(Arc::ERROR, "No SOAP response from delivery service"); status_.commstatus = CommFailed; valid = false; return; } response->GetXML(xml, true); logger_->msg(Arc::DEBUG, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } delete response; if (--query_retries > 0) { HandleQueryFault("Failed to query state: " + err); return; } logger_->msg(Arc::ERROR, "Failed to query state: %s", err); status_.commstatus = CommFailed; strncpy(status_.error_desc, "SOAP error in connection with delivery service", sizeof(status_.error_desc)); valid = false; return; } Arc::XMLNode resultnode = (*response)["DataDeliveryQueryResponse"]["DataDeliveryQueryResult"]["Result"][0]; if (!resultnode || !resultnode["ResultCode"]) { logger_->msg(Arc::ERROR, "Bad format in XML response: %s", xml); delete response; status_.commstatus = CommFailed; valid = false; return; } // Fill status fields with results from service FillStatus(resultnode[0]); delete response; } bool DataDeliveryRemoteComm::CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg) { // call Ping Arc::MCCConfig cfg; if (dtr->host_cert_for_remote_delivery()) { Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::TryCredentials); Arc::UserConfig host_cfg(cred_type); host_cfg.ProxyPath(""); // to force using cert/key files instead of non-existent proxy host_cfg.ApplyToConfig(cfg); } else { dtr->get_usercfg().ApplyToConfig(cfg); } dtr->get_logger()->msg(Arc::VERBOSE, "Connecting to Delivery service at %s", dtr->get_delivery_endpoint().str()); Arc::ClientSOAP client(cfg, dtr->get_delivery_endpoint(), dtr->get_usercfg().Timeout()); Arc::NS ns; Arc::PayloadSOAP request(ns); Arc::XMLNode ping = request.NewChild("DataDeliveryPing"); std::string xml; request.GetXML(xml, true); dtr->get_logger()->msg(Arc::DEBUG, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client.process(&request, &response); if (!status) { dtr->get_logger()->msg(Arc::ERROR, "Could not connect to service %s: %s", dtr->get_delivery_endpoint().str(), (std::string)status); if (response) delete response; return false; } if (!response) { dtr->get_logger()->msg(Arc::ERROR, "No SOAP response from Delivery service %s", dtr->get_delivery_endpoint().str()); return false; } response->GetXML(xml, true); dtr->get_logger()->msg(Arc::DEBUG, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } dtr->get_logger()->msg(Arc::ERROR, "SOAP fault from delivery service at %s: %s", dtr->get_delivery_endpoint().str(), err); delete response; return false; } Arc::XMLNode resultnode = (*response)["DataDeliveryPingResponse"]["DataDeliveryPingResult"]["Result"][0]; if (!resultnode || !resultnode["ResultCode"]) { dtr->get_logger()->msg(Arc::ERROR, "Bad format in XML response from delivery service at %s: %s", dtr->get_delivery_endpoint().str(), xml); delete response; return false; } std::string resultcode = (std::string)(resultnode["ResultCode"]); if (resultcode != "OK") { dtr->get_logger()->msg(Arc::ERROR, "Error pinging delivery service at %s: %s: %s", dtr->get_delivery_endpoint().str(), resultcode, (std::string)(resultnode[0]["ErrorDescription"])); delete response; return false; } for (Arc::XMLNode dir = resultnode["AllowedDir"]; dir; ++dir) { allowed_dirs.push_back((std::string)dir); dtr->get_logger()->msg(Arc::DEBUG, "Dir %s allowed at service %s", (std::string)dir, dtr->get_delivery_endpoint().str()); } if (resultnode["LoadAvg"]) { load_avg = (std::string)(resultnode["LoadAvg"]); } else { load_avg = "-1"; } delete response; return true; } void DataDeliveryRemoteComm::FillStatus(const Arc::XMLNode& node) { if (!node) { // initial state std::string empty(""); status_.commstatus = DataDeliveryComm::CommInit; status_.timestamp = ::time(NULL); status_.status = DTRStatus::NULL_STATE; status_.error = DTRErrorStatus::NONE_ERROR; status_.error_location = DTRErrorStatus::NO_ERROR_LOCATION; strncpy(status_.error_desc, empty.c_str(), sizeof(status_.error_desc)); status_.streams = 0; status_.transferred = 0; status_.size = 0; status_.transfer_time = 0; status_.offset = 0; status_.speed = 0; strncpy(status_.checksum, empty.c_str(), sizeof(status_.checksum)); return; } Arc::XMLNode datanode = node["ResultCode"]; if (std::string(datanode) == "TRANSFERRED") { status_.commstatus = CommExited; status_.status = DTRStatus::TRANSFERRED; } else if (std::string(datanode) == "TRANSFER_ERROR") { status_.commstatus = CommFailed; status_.status = DTRStatus::TRANSFERRED; } else if (std::string(datanode) == "SERVICE_ERROR") { status_.commstatus = CommFailed; status_.status = DTRStatus::TRANSFERRED; } else { status_.commstatus = CommNoError; status_.status = DTRStatus::TRANSFERRING; } status_.timestamp = time(NULL); datanode = node["ErrorStatus"]; if (datanode) { int error_status; Arc::stringto(std::string(datanode), error_status); status_.error = (DTRErrorStatus::DTRErrorStatusType)error_status; } datanode = node["ErrorLocation"]; if (datanode) { int error_location; Arc::stringto(std::string(datanode), error_location); status_.error_location = (DTRErrorStatus::DTRErrorLocation)error_location; } datanode = node["ErrorDescription"]; if (datanode) { strncpy(status_.error_desc, ((std::string)datanode).c_str(), sizeof(status_.error_desc)); } datanode = node["BytesTransferred"]; if (datanode) { unsigned long long int bytes; Arc::stringto(std::string(datanode), bytes); status_.transferred = bytes; } datanode = node["TransferTime"]; if (datanode) { unsigned long long int t; Arc::stringto(std::string(datanode), t); status_.transfer_time = t; } // TODO size, offset, speed (currently not used) datanode = node["CheckSum"]; if (datanode) { strncpy(status_.checksum, ((std::string)datanode).c_str(), sizeof(status_.checksum)); } // if terminal state, write log if (status_.commstatus != CommNoError) { // log message is limited to 2048 chars so just print last few lines std::string log = (std::string)node["Log"]; if (!log.empty()) { if (log.size() > 2000) log = log.substr(log.find('\n', log.size()-2000)); logger_->msg(Arc::INFO, "DataDelivery log tail:\n%s", log); } valid = false; } } bool DataDeliveryRemoteComm::SetupDelegation(Arc::XMLNode& op, const Arc::UserConfig& usercfg) { const std::string& cert = (!usercfg.ProxyPath().empty() ? usercfg.ProxyPath() : usercfg.CertificatePath()); const std::string& key = (!usercfg.ProxyPath().empty() ? usercfg.ProxyPath() : usercfg.KeyPath()); const std::string& credentials = usercfg.CredentialString(); if (credentials.empty() && (key.empty() || cert.empty())) { logger_->msg(Arc::VERBOSE, "Failed locating credentials"); return false; } if(!client->Load()) { logger_->msg(Arc::VERBOSE, "Failed to initiate client connection"); return false; } Arc::MCC* entry = client->GetEntry(); if(!entry) { logger_->msg(Arc::VERBOSE, "Client connection has no entry point"); return false; } Arc::DelegationProviderSOAP * deleg = NULL; // Use in-memory credentials if set in UserConfig if (!credentials.empty()) deleg = new Arc::DelegationProviderSOAP(credentials); else deleg = new Arc::DelegationProviderSOAP(cert, key); logger_->msg(Arc::VERBOSE, "Initiating delegation procedure"); if (!deleg->DelegateCredentialsInit(*entry, &(client->GetContext()))) { logger_->msg(Arc::VERBOSE, "Failed to initiate delegation credentials"); delete deleg; return false; } deleg->DelegatedToken(op); delete deleg; return true; } void DataDeliveryRemoteComm::HandleQueryFault(const std::string& err) { // Just return without changing status logger_->msg(Arc::WARNING, err); status_.timestamp = time(NULL); // A reconnect may be needed after losing connection delete client; client = new Arc::ClientSOAP(cfg, endpoint, timeout); } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DTR.h0000644000000000000000000000012413153455410022063 xustar000000000000000027 mtime=1504598792.949892 27 atime=1513200576.800729 30 ctime=1513200662.093773093 nordugrid-arc-5.4.2/src/libs/data-staging/DTR.h0000644000175000002070000006553113153455410022142 0ustar00mockbuildmock00000000000000// Summary page of data staging for doxygen namespace DataStaging { /** * \defgroup datastaging ARC data staging (libarcdatastaging) * * ARC data staging components form a complete data transfer management system. * Whereas \ref data is a library for data access, enabling several types of * operation on data files on the Grid using a variety of access protocols, * \ref datastaging is a framework for managed data transfer to and from the * Grid. The data staging system is designed to run as a persistent process, to * execute data transfers on demand. Data transfers are defined and fed into * the system, and then notification is given when they complete. No knowledge * is required of the internal workings of the Grid, a user only needs to * specify URLs representing the source and destination of the transfer. * * The system is highly configurable and features an intelligent priority, * fair-share and error handling mechanism, as well as the ability to spread * data transfer across multiple hosts using ARC's DataDelivery service. It is * used by ARC's Computing Element (A-REX) for pre- and post- job data transfer * of input and output files. Note that this system is primarily for data * transfer to and from local files and that third-party transfer is not * supported. It is designed for the case of pulling or pushing data between * the Grid and a local file system, rather than a service for transfer between * two Grid storage elements. It is possible to transfer data between two * remote endpoints, but all data flows through the client. * * Simple examples of how to use libarcdatastaging are shown for several * languages in the \ref dtrgenerator "DTR examples page". In all the examples * a Generator class receives as input a source and destination, and creates * a DTR which describes the data transfer. It is then passed to the Scheduler * and the Generator defines a receiveDTR() method for the Scheduler to calls * to notify that the transfer has finished. The examples all allow using the * Generator as a basic copy tool from the command line to copy a single file. * * For more information see http://wiki.nordugrid.org/index.php/Data_Staging */ } // namespace DataStaging #ifndef DTR_H_ #define DTR_H_ #include #include #include #include #include #include #include #include #include #include "DTRStatus.h" #ifdef WIN32 #ifndef uid_t #define uid_t int #endif #ifndef gid_t #define gid_t int #endif #endif /// DataStaging contains all components for data transfer scheduling and execution. namespace DataStaging { class DTR; /// Provides automatic memory management of DTRs and thread-safe destruction. /** \ingroup datastaging */ typedef Arc::ThreadedPointer DTR_ptr; /// The DTR's Logger object can be used outside the DTR object with DTRLogger. /** \ingroup datastaging */ typedef Arc::ThreadedPointer DTRLogger; /// Components of the data staging framework /** \ingroup datastaging */ enum StagingProcesses { GENERATOR, ///< Creator of new DTRs and receiver of completed DTRs SCHEDULER, ///< Controls queues and moves DTRs bewteen other components when necessary PRE_PROCESSOR, ///< Performs all pre-transfer operations DELIVERY, ///< Performs physical transfer POST_PROCESSOR ///< Performs all post-transfer operations }; /// Internal state of StagingProcesses /** \ingroup datastaging */ enum ProcessState { INITIATED, ///< Process is ready to start RUNNING, ///< Process is running TO_STOP, ///< Process has been instructed to stop STOPPED ///< Proecess has stopped }; /// Represents limits and properties of a DTR transfer. These generally apply to all DTRs. /** * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class TransferParameters { public: /// Minimum average bandwidth in bytes/sec. /** * If the average bandwidth used over the whole transfer drops below this * level the transfer will be killed. */ unsigned long long int min_average_bandwidth; /// Maximum inactivity time in sec. /** * If transfer stops for longer than this time it will be killed. */ unsigned int max_inactivity_time; /// Minimum current bandwidth in bytes/sec. /** * If bandwidth averaged over the previous averaging_time seconds is less * than min_current_bandwidth the transfer will be killed (allows transfers * which slow down to be killed quicker). */ unsigned long long int min_current_bandwidth; /// The time in seconds over which to average the calculation of min_current_bandwidth. unsigned int averaging_time; /// Constructor. Initialises all values to zero. TransferParameters() : min_average_bandwidth(0), max_inactivity_time(0), min_current_bandwidth(0), averaging_time(0) {}; }; /// The configured cache directories /** * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class DTRCacheParameters { public: /// List of (cache dir [link dir]) std::vector cache_dirs; /// List of (cache dir [link dir]) for remote caches std::vector remote_cache_dirs; /// List of draining caches. Not necessary for data staging but here for completeness. std::vector drain_cache_dirs; /// Constructor with empty lists initialised DTRCacheParameters(void) {}; /// Constructor with supplied cache lists DTRCacheParameters(std::vector caches, std::vector remote_caches, std::vector drain_caches); }; /// Class for storing credential information /** * To avoid handling credentials directly this class is used to hold * information in simple string/time attributes. It should be filled before * the DTR is started. * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class DTRCredentialInfo { public: /// Default constructor DTRCredentialInfo() {}; /// Constructor with supplied credential info DTRCredentialInfo(const std::string& DN, const Arc::Time& expirytime, const std::list vomsfqans); /// Get the DN std::string getDN() const { return DN; }; /// Get the expiry time Arc::Time getExpiryTime() const { return expirytime; }; /// Get the VOMS VO std::string extractVOMSVO() const; /// Get the VOMS Group (first in the supplied list of fqans) std::string extractVOMSGroup() const; /// Get the VOMS Role (first in the supplied list of fqans) std::string extractVOMSRole() const; private: std::string DN; Arc::Time expirytime; std::list vomsfqans; }; /// Represents possible cache states of this DTR /** \ingroup datastaging */ enum CacheState { CACHEABLE, ///< Source should be cached NON_CACHEABLE, ///< Source should not be cached CACHE_ALREADY_PRESENT, ///< Source is available in cache from before CACHE_DOWNLOADED, ///< Source has just been downloaded and put in cache CACHE_LOCKED, ///< Cache file is locked CACHE_SKIP, ///< Source is cacheable but due to some problem should not be cached CACHE_NOT_USED ///< Cache was started but was not used }; /// The base class from which all callback-enabled classes should be derived. /** * This class is a container for a callback method which is called when a * DTR is to be passed to a component. Several components in data staging * (eg Scheduler, Generator) are subclasses of DTRCallback, which allows * them to receive DTRs through the callback system. * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class DTRCallback { public: /// Empty virtual destructor virtual ~DTRCallback() {}; /// Defines the callback method called when a DTR is pushed to this object. /** * The automatic memory management of DTR_ptr ensures that the DTR object * is only deleted when the last copy is deleted. */ virtual void receiveDTR(DTR_ptr dtr) = 0; // TODO //virtual void suspendDTR(DTR& dtr) = 0; //virtual void cancelDTR(DTR& dtr) = 0; }; /// Data Transfer Request. /** * DTR stands for Data Transfer Request and a DTR describes a data transfer * between two endpoints, a source and a destination. There are several * parameters and options relating to the transfer contained in a DTR. * The normal workflow is for a Generator to create a DTR and send it to the * Scheduler for processing using DTR::push(SCHEDULER). If the Generator is a * subclass of DTRCallback, when the Scheduler has finished with the DTR * the DTRCallback::receiveDTR() callback method is called. * * DTRs should always be used through the Arc::ThreadedPointer DTR_ptr. This * ensures proper memory management when passing DTRs among various threads. * To enforce this policy the copy constructor and assignment operator are * private. * * A lock protects member variables that are likely to be accessed and * modified by multiple threads. * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class DTR { private: /// Identifier std::string DTR_ID; /// UserConfig and URL objects. Needed as DataHandle keeps a reference to them. Arc::URL source_url; Arc::URL destination_url; Arc::UserConfig cfg; /// Source file Arc::DataHandle source_endpoint; /// Destination file Arc::DataHandle destination_endpoint; /// Source file as a string std::string source_url_str; /// Destination file as a string std::string destination_url_str; /// Endpoint of cached file. /* Kept as string so we don't need to duplicate DataHandle properties * of destination. Delivery should check if this is set and if so use * it as destination. */ std::string cache_file; /// Cache configuration DTRCacheParameters cache_parameters; /// Cache state for this DTR CacheState cache_state; /// Whether ACIX is used as a source bool use_acix; /// Local user information Arc::User user; /// Credential information DTRCredentialInfo credentials; /// Job that requested the transfer. Could be used as a generic way of grouping DTRs. std::string parent_job_id; /// A flattened number set by the scheduler int priority; /// Transfer share this DTR belongs to std::string transfershare; /// This string can be used to form sub-sets of transfer shares. /** It is appended to transfershare. It can be used by the Generator * for example to split uploads and downloads into separate shares or * make shares for different endpoints. */ std::string sub_share; /// Number of attempts left to complete this DTR unsigned int tries_left; /// Initial number of attempts unsigned int initial_tries; /// A flag to say whether the DTR is replicating inside the same LFN of an index service bool replication; /// A flag to say whether to forcibly register the destination in an index service. /** Even if the source is not the same file, the destination will be * registered to an existing LFN. It should be set to true in * the case where an output file is uploaded to several locations but * with the same index service LFN */ bool force_registration; /// The file that the current source is mapped to. /** Delivery should check if this is set and if so use this as source. */ std::string mapped_source; /// Status of the DTR DTRStatus status; /// Error status of the DTR DTRErrorStatus error_status; /// Number of bytes transferred so far unsigned long long int bytes_transferred; // TODO and/or offset? /// Time taken in ns to complete transfer (0 if incomplete) unsigned long long int transfer_time; /** Timing variables **/ /// When should we finish the current action Arc::Time timeout; /// Creation time Arc::Time created; /// Modification time Arc::Time last_modified; /// Wait until this time before doing more processing Arc::Time next_process_time; /// True if some process requested cancellation bool cancel_request; /// Bulk start flag bool bulk_start; /// Bulk end flag bool bulk_end; /// Whether bulk operations are supported for the source bool source_supports_bulk; /// Flag to say whether success of the DTR is mandatory bool mandatory; /// Endpoint of delivery service this DTR is scheduled for. /** By default it is LOCAL_DELIVERY so local Delivery is used. */ Arc::URL delivery_endpoint; /// List of problematic endpoints - those which the DTR definitely cannot use std::vector problematic_delivery_endpoints; /// Whether to use host instead of user credentials for contacting remote delivery services. bool use_host_cert_for_remote_delivery; /// The process in charge of this DTR right now StagingProcesses current_owner; /// Logger object. /** Creation and deletion of this object should be managed * in the Generator and a pointer passed in the DTR constructor. */ DTRLogger logger; /// Log Destinations. /** This list is kept here so that the Logger can be connected and * disconnected in threads which have their own root logger * to avoid duplicate messages */ std::list log_destinations; /// Flag to say whether to delete LogDestinations. /** Set to true when a DTR thread is stuck or lost so it doesn't crash when * waking up after DTR has finished */ bool delete_log_destinations; /// Performance metric logger Arc::JobPerfLog perf_log; /// Performance record used for recording transfer time Arc::JobPerfRecord perf_record; /// List of callback methods called when DTR moves between processes std::map > proc_callback; /// Lock to avoid collisions while changing DTR properties Arc::SimpleCondition lock; /** Possible fields (types, names and so on are subject to change) ** /// DTRs that are grouped must have the same number here int affiliation; /// History of recent statuses DTRStatus::DTRStatusType *history_of_statuses; **/ /* Methods */ /// Change modification time void mark_modification () { last_modified.SetTime(time(NULL)); }; /// Get the list of callbacks for this owner. Protected by lock. std::list get_callbacks(const std::map >& proc_callback, StagingProcesses owner); /// Private and not implemented because DTR_ptr should always be used. DTR& operator=(const DTR& dtr); DTR(const DTR& dtr); DTR(); public: /// URL that is used to denote local Delivery should be used static const Arc::URL LOCAL_DELIVERY; /// Log level for all DTR activity static Arc::LogLevel LOG_LEVEL; /// Normal constructor. /** Construct a new DTR. * @param source Endpoint from which to read data * @param destination Endpoint to which to write data * @param usercfg Provides some user configuration information * @param jobid ID of the job associated with this data transfer * @param uid UID to use when accessing local file system if source * or destination is a local file. If this is different to the current * uid then the current uid must have sufficient privileges to change uid. * @param log ThreadedPointer containing log object. If NULL the root * logger is used. */ DTR(const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, const uid_t& uid, DTRLogger log); /// Empty destructor ~DTR() {}; /// Is DTR valid? operator bool() const { return (!DTR_ID.empty()); } /// Is DTR not valid? bool operator!() const { return (DTR_ID.empty()); } /// Register callback objects to be used during DTR processing. /** * Objects deriving from DTRCallback can be registered with this method. * The callback method of these objects will then be called when the DTR * is passed to the specified owner. Protected by lock. */ void registerCallback(DTRCallback* cb, StagingProcesses owner); /// Reset information held on this DTR, such as resolved replicas, error state etc. /** * Useful when a failed DTR is to be retried. */ void reset(); /// Set the ID of this DTR. Useful when passing DTR between processes. void set_id(const std::string& id); /// Get the ID of this DTR std::string get_id() const { return DTR_ID; }; /// Get an abbreviated version of the DTR ID - useful to reduce logging verbosity std::string get_short_id() const; /// Get source handle. Return by reference since DataHandle cannot be copied Arc::DataHandle& get_source() { return source_endpoint; }; /// Get destination handle. Return by reference since DataHandle cannot be copied Arc::DataHandle& get_destination() { return destination_endpoint; }; /// Get source as a string std::string get_source_str() const { return source_url_str; }; /// Get destination as a string std::string get_destination_str() const { return destination_url_str; }; /// Get the UserConfig object associated with this DTR const Arc::UserConfig& get_usercfg() const { return cfg; }; /// Set the timeout for processing this DTR void set_timeout(time_t value) { timeout.SetTime(Arc::Time().GetTime() + value); }; /// Get the timeout for processing this DTR Arc::Time get_timeout() const { return timeout; }; /// Set the next processing time to current time + given time void set_process_time(const Arc::Period& process_time); /// Get the next processing time for the DTR Arc::Time get_process_time() const { return next_process_time; }; /// Get the creation time Arc::Time get_creation_time() const { return created; }; /// Get the modification time Arc::Time get_modification_time() const { return last_modified; }; /// Get the parent job ID std::string get_parent_job_id() const { return parent_job_id; }; /// Set the priority void set_priority(int pri); /// Get the priority int get_priority() const { return priority; }; /// Set credential info void set_credential_info(const DTRCredentialInfo& cred) { credentials = cred; }; /// Get credential info const DTRCredentialInfo& get_credential_info() const { return credentials; }; /// Set the transfer share. sub_share is automatically added to transfershare. void set_transfer_share(const std::string& share_name); /// Get the transfer share. sub_share is automatically added to transfershare. std::string get_transfer_share() const { return transfershare; }; /// Set sub-share void set_sub_share(const std::string& share) { sub_share = share; }; /// Get sub-share std::string get_sub_share() const { return sub_share; }; /// Set the number of attempts remaining void set_tries_left(unsigned int tries); /// Get the number of attempts remaining unsigned int get_tries_left() const { return tries_left; }; /// Get the initial number of attempts (set by set_tries_left()) unsigned int get_initial_tries() const { return initial_tries; } /// Decrease attempt number void decrease_tries_left(); /// Set the status. Protected by lock. void set_status(DTRStatus stat); /// Get the status. Protected by lock. DTRStatus get_status(); /// Set the error status. /** * The DTRErrorStatus last error state field is set to the current status * of the DTR. Protected by lock. */ void set_error_status(DTRErrorStatus::DTRErrorStatusType error_stat, DTRErrorStatus::DTRErrorLocation error_loc, const std::string& desc=""); /// Set the error status back to NONE_ERROR and clear other fields void reset_error_status(); /// Get the error status. DTRErrorStatus get_error_status(); /// Set bytes transferred (should be set by whatever is controlling the transfer) void set_bytes_transferred(unsigned long long int bytes); /// Get current number of bytes transferred unsigned long long int get_bytes_transferred() const { return bytes_transferred; }; /// Set transfer time (should be set by whatever is controlling the transfer) void set_transfer_time(unsigned long long int t); /// Get transfer time unsigned long long int get_transfer_time() const { return transfer_time; }; /// Set the DTR to be cancelled void set_cancel_request(); /// Returns true if cancellation has been requested bool cancel_requested() const { return cancel_request; }; /// Set delivery endpoint void set_delivery_endpoint(const Arc::URL& endpoint) { delivery_endpoint = endpoint; }; /// Returns delivery endpoint const Arc::URL& get_delivery_endpoint() const { return delivery_endpoint; }; /// Add problematic endpoint. /** * Should only be those endpoints where there is a problem with the service * itself and not the transfer. */ void add_problematic_delivery_service(const Arc::URL& endpoint) { problematic_delivery_endpoints.push_back(endpoint); }; /// Get all problematic endpoints const std::vector& get_problematic_delivery_services() const { return problematic_delivery_endpoints; }; /// Set the flag for using host certificate for contacting remote delivery services void host_cert_for_remote_delivery(bool host) { use_host_cert_for_remote_delivery = host; }; /// Get the flag for using host certificate for contacting remote delivery services bool host_cert_for_remote_delivery() const { return use_host_cert_for_remote_delivery; }; /// Set cache filename void set_cache_file(const std::string& filename); /// Get cache filename std::string get_cache_file() const { return cache_file; }; /// Set cache parameters void set_cache_parameters(const DTRCacheParameters& param) { cache_parameters = param; }; /// Get cache parameters const DTRCacheParameters& get_cache_parameters() const { return cache_parameters; }; /// Set the cache state void set_cache_state(CacheState state); /// Get the cache state CacheState get_cache_state() const { return cache_state; }; /// Set whether ACIX is a source void set_use_acix(bool acix) { use_acix = acix; }; /// Get whether ACIX is a source bool get_use_acix() const { return use_acix; }; /// Set the mapped file void set_mapped_source(const std::string& file = "") { mapped_source = file; }; /// Get the mapped file std::string get_mapped_source() const { return mapped_source; }; /// Find the DTR owner StagingProcesses get_owner() const { return current_owner; }; /// Get the local user information Arc::User get_local_user() const { return user; }; /// Set replication flag void set_replication(bool rep) { replication = rep; }; /// Get replication flag bool is_replication() const { return replication; }; /// Set force replication flag void set_force_registration(bool force) { force_registration = force; }; /// Get force replication flag bool is_force_registration() const { return force_registration; }; /// Set bulk start flag void set_bulk_start(bool value) { bulk_start = value; }; /// Get bulk start flag bool get_bulk_start() const { return bulk_start; }; /// Set bulk end flag void set_bulk_end(bool value) { bulk_end = value; }; /// Get bulk start flag bool get_bulk_end() const { return bulk_end; }; /// Whether bulk operation is possible according to current state and src/dest bool bulk_possible(); /// Whether DTR success is mandatory bool is_mandatory() const { return mandatory; }; /// Get Logger object, so that processes can log to this DTR's log const DTRLogger& get_logger() const { return logger; }; /// Connect log destinations to logger. Only needs to be done after disconnect() void connect_logger() { if (logger) logger->setDestinations(log_destinations); }; /// Disconnect log destinations from logger. void disconnect_logger() { if (logger) logger->removeDestinations(); }; /// Set whether or not to delete log destinations in delete_logger_destinations() void set_delete_log_destinations(bool del) { delete_log_destinations = del; }; /// Clean log destinations. Takes care of cleaning internal list and list in logger. void clean_log_destinations(Arc::LogDestination* exclude=NULL); /// Pass the DTR from one process to another. Protected by lock. static void push(DTR_ptr dtr, StagingProcesses new_owner); /// Suspend the DTR which is in doing transfer in the delivery process bool suspend(); /// Did an error happen? bool error() const { return (error_status != DTRErrorStatus::NONE_ERROR); } /// Returns true if this DTR is about to go into the pre-processor bool is_destined_for_pre_processor() const; /// Returns true if this DTR is about to go into the post-processor bool is_destined_for_post_processor() const; /// Returns true if this DTR is about to go into delivery bool is_destined_for_delivery() const; /// Returns true if this DTR just came from the pre-processor bool came_from_pre_processor() const; /// Returns true if this DTR just came from the post-processor bool came_from_post_processor() const; /// Returns true if this DTR just came from delivery bool came_from_delivery() const; /// Returns true if this DTR just came from the generator bool came_from_generator() const; /// Returns true if this DTR is in a final state (finished, failed or cancelled) bool is_in_final_state() const; /// Get the performance log Arc::JobPerfLog& get_job_perf_log() { return perf_log; }; /// Get the performance log record Arc::JobPerfRecord& get_job_perf_record() { return perf_record; }; }; /// Helper method to create smart pointer, only for swig bindings DTR_ptr createDTRPtr(const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, const uid_t& uid, DTRLogger log); /// Helper method to create smart pointer, only for swig bindings DTRLogger createDTRLogger(Arc::Logger& parent, const std::string& subdomain); } // namespace DataStaging #endif /*DTR_H_*/ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/DataDelivery.cpp0000644000000000000000000000012312675602216024347 xustar000000000000000027 mtime=1459029134.924374 26 atime=1513200576.81073 30 ctime=1513200662.102773203 nordugrid-arc-5.4.2/src/libs/data-staging/DataDelivery.cpp0000644000175000002070000003134212675602216024420 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "DataDeliveryComm.h" #include "DataDelivery.h" namespace DataStaging { Arc::Logger DataDelivery::logger(Arc::Logger::getRootLogger(), "DataStaging.DataDelivery"); /// Wrapper class around DataDeliveryComm class DataDelivery::delivery_pair_t { public: DTR_ptr dtr; TransferParameters params; DataDeliveryComm* comm; bool cancelled; Arc::SimpleCounter thread_count; delivery_pair_t(DTR_ptr request, const TransferParameters& params); ~delivery_pair_t(); void start(); }; DataDelivery::delivery_pair_t::delivery_pair_t(DTR_ptr request, const TransferParameters& params) :dtr(request),params(params),comm(NULL),cancelled(false) {} DataDelivery::delivery_pair_t::~delivery_pair_t() { if (comm) delete comm; } void DataDelivery::delivery_pair_t::start() { comm = DataDeliveryComm::CreateInstance(dtr, params); } DataDelivery::DataDelivery(): delivery_state(INITIATED) { } bool DataDelivery::start() { if(delivery_state == RUNNING || delivery_state == TO_STOP) return false; delivery_state = RUNNING; Arc::CreateThreadFunction(&main_thread,this); return true; } void DataDelivery::receiveDTR(DTR_ptr dtr) { if(!(*dtr)) { logger.msg(Arc::ERROR, "Received invalid DTR"); dtr->set_error_status(DTRErrorStatus::INTERNAL_LOGIC_ERROR, DTRErrorStatus::ERROR_UNKNOWN, "Invalid DTR"); dtr->set_status(DTRStatus::TRANSFERRED); DTR::push(dtr, SCHEDULER); return; } dtr->get_logger()->msg(Arc::INFO, "Delivery received new DTR %s with source: %s, destination: %s", dtr->get_id(), dtr->get_source()->CurrentLocation().str(), dtr->get_destination()->CurrentLocation().str()); dtr->set_status(DTRStatus::TRANSFERRING); delivery_pair_t* d = new delivery_pair_t(dtr, transfer_params); dtr_list_lock.lock(); dtr_list.push_back(d); dtr_list_lock.unlock(); cond.signal(); return; } bool DataDelivery::cancelDTR(DTR_ptr request) { if(!request) { logger.msg(Arc::ERROR, "Received no DTR"); return false; } if(!(*request)) { logger.msg(Arc::ERROR, "Received invalid DTR"); request->set_status(DTRStatus::ERROR); return false; } dtr_list_lock.lock(); for (std::list::iterator i = dtr_list.begin(); i != dtr_list.end(); ++i) { delivery_pair_t* ip = *i; if (ip->dtr->get_id() == request->get_id()) { request->get_logger()->msg(Arc::INFO, "Cancelling DTR %s with source: %s, destination: %s", request->get_id(), request->get_source()->str(), request->get_destination()->str()); ip->cancelled = true; ip->dtr->set_status(DTRStatus::TRANSFERRING_CANCEL); dtr_list_lock.unlock(); cond.signal(); return true; } } // DTR is not in the active transfer list, probably because it just finished dtr_list_lock.unlock(); request->get_logger()->msg(Arc::WARNING, "DTR %s requested cancel but no active transfer", request->get_id()); // if request is already TRANSFERRED, no need to push to Scheduler again if (request->get_status() != DTRStatus::TRANSFERRED) { request->set_status(DTRStatus::TRANSFERRED); DTR::push(request, SCHEDULER); } return true; } bool DataDelivery::stop() { if(delivery_state != RUNNING) return false; delivery_state = TO_STOP; cond.signal(); run_signal.wait(); delivery_state = STOPPED; return true; } void DataDelivery::SetTransferParameters(const TransferParameters& params) { transfer_params = params; } void DataDelivery::start_delivery(void* arg) { delivery_pair_t* dp = (delivery_pair_t*)arg; dp->start(); } void DataDelivery::stop_delivery(void* arg) { delivery_pair_t* dp = (delivery_pair_t*)arg; delete dp->comm; dp->comm = NULL; // In case transfer finished before getting cancel signal, delete destination if (dp->cancelled || dp->dtr->error()) dp->dtr->get_destination()->Remove(); } bool DataDelivery::delete_delivery_pair(delivery_pair_t* dp) { bool res = Arc::CreateThreadFunction(&stop_delivery, dp, &dp->thread_count); if (res) { res = dp->thread_count.wait(300*1000); } if (res) delete dp; return res; } // Delete DTR destination, called after losing contact with delivery process static void delete_dtr_destination(DTR_ptr dtr) { Arc::URL dest(dtr->get_destination()->CurrentLocation()); // Check for TURL if (!dtr->get_destination()->TransferLocations().empty()) { dest = dtr->get_destination()->TransferLocations().front(); } // Check for cache file if ((dtr->get_cache_state() == CACHEABLE) && !dtr->get_cache_file().empty()) { dest = dtr->get_cache_file(); } dtr->get_logger()->msg(Arc::VERBOSE, "Cleaning up after failure: deleting %s", dest.str()); Arc::DataHandle h(dest, dtr->get_usercfg()); if (h) h->Remove(); } void DataDelivery::main_thread (void* arg) { DataDelivery* it = (DataDelivery*)arg; it->main_thread(); } void DataDelivery::main_thread (void) { // disconnect from root logger so // messages are logged to per-DTR Logger Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().setThreshold(DTR::LOG_LEVEL); while(delivery_state != TO_STOP){ dtr_list_lock.lock(); std::list::iterator d = dtr_list.begin(); dtr_list_lock.unlock(); for(;;) { dtr_list_lock.lock(); if(d == dtr_list.end()) { dtr_list_lock.unlock(); break; } dtr_list_lock.unlock(); delivery_pair_t* dp = *d; // first check for cancellation if (dp->cancelled) { dtr_list_lock.lock(); d = dtr_list.erase(d); dtr_list_lock.unlock(); // deleting delivery_pair_t kills the spawned process // Do this before passing back to Scheduler to avoid race condition // of DTR being deleted before Comm object has finished with it. // With ThreadedPointer this may not be a problem any more. DTR_ptr tmp = dp->dtr; if (!delete_delivery_pair(dp)) { tmp->get_logger()->msg(Arc::ERROR, "Failed to delete delivery object or deletion timed out"); } tmp->get_job_perf_record().End("SchedulerTransferTime_"+tmp->get_delivery_endpoint().Host()); tmp->set_status(DTRStatus::TRANSFERRED); DTR::push(tmp, SCHEDULER); continue; } // check for new transfer if (!dp->comm) { dp->dtr->get_job_perf_record().Start(dp->dtr->get_short_id()); // Connecting to a remote delivery service can hang in rare cases, // so launch a separate thread with a timeout bool res = Arc::CreateThreadFunction(&start_delivery, dp, &dp->thread_count); if (res) { res = dp->thread_count.wait(300*1000); } if (!res) { // error or timeout - in this case do not delete dp since if the // thread timed out it may wake up at some point. Better to have a // small memory leak than seg fault. dtr_list_lock.lock(); d = dtr_list.erase(d); dtr_list_lock.unlock(); DTR_ptr tmp = dp->dtr; tmp->set_error_status(DTRErrorStatus::INTERNAL_PROCESS_ERROR, DTRErrorStatus::NO_ERROR_LOCATION, "Failed to start thread to start delivery or thread timed out"); tmp->get_job_perf_record().End("SchedulerTransferTime_"+tmp->get_delivery_endpoint().Host()); tmp->set_status(DTRStatus::TRANSFERRED); DTR::push(tmp, SCHEDULER); } else { dtr_list_lock.lock(); ++d; dtr_list_lock.unlock(); } continue; } // ongoing transfer - get status DataDeliveryComm::Status status; status = dp->comm->GetStatus(); dp->dtr->set_bytes_transferred(status.transferred); if((status.commstatus == DataDeliveryComm::CommExited) || (status.commstatus == DataDeliveryComm::CommClosed) || (status.commstatus == DataDeliveryComm::CommFailed)) { // Transfer finished - either successfully or with error dtr_list_lock.lock(); d = dtr_list.erase(d); dtr_list_lock.unlock(); if ((status.commstatus == DataDeliveryComm::CommFailed) || (status.error != DTRErrorStatus::NONE_ERROR)) { if (status.error == DTRErrorStatus::NONE_ERROR) { // Lost track of process - delete destination so it can be tried again delete_dtr_destination(dp->dtr); status.error = DTRErrorStatus::INTERNAL_PROCESS_ERROR; } dp->dtr->set_error_status(status.error,status.error_location, status.error_desc[0]?status.error_desc:dp->comm->GetError().c_str()); } else if (status.checksum) { dp->dtr->get_destination()->SetCheckSum(status.checksum); } dp->dtr->get_logger()->msg(Arc::INFO, "Transfer finished: %llu bytes transferred %s", status.transferred, (status.checksum[0] ? ": checksum "+std::string(status.checksum) : " ")); timespec dummy; dp->dtr->get_job_perf_log().Log("DeliveryTransferTime_"+dp->dtr->get_delivery_endpoint().Host(), dp->dtr->get_short_id()+"\t"+Arc::tostring(status.transfer_time), dummy, dummy); dp->dtr->set_transfer_time(status.transfer_time); DTR_ptr tmp = dp->dtr; if (!delete_delivery_pair(dp)) { tmp->get_logger()->msg(Arc::ERROR, "Failed to delete delivery object or deletion timed out"); } tmp->get_job_perf_record().End("SchedulerTransferTime_"+tmp->get_delivery_endpoint().Host()); tmp->set_status(DTRStatus::TRANSFERRED); DTR::push(tmp, SCHEDULER); continue; } if(!(*(dp->comm))) { // Error happened - either delivery process is stuck or could not start dtr_list_lock.lock(); d = dtr_list.erase(d); dtr_list_lock.unlock(); std::string comm_err = dp->comm->GetError(); if (status.commstatus == DataDeliveryComm::CommInit) { if (comm_err.empty()) comm_err = "Failed to start delivery process"; if (dp->dtr->get_delivery_endpoint() == DTR::LOCAL_DELIVERY) { // Serious problem, so mark permanent error dp->dtr->set_error_status(DTRErrorStatus::INTERNAL_LOGIC_ERROR, DTRErrorStatus::ERROR_TRANSFER, comm_err); } else { // Failing to start on remote service should be retried dp->dtr->add_problematic_delivery_service(dp->dtr->get_delivery_endpoint()); dp->dtr->set_error_status(DTRErrorStatus::INTERNAL_PROCESS_ERROR, DTRErrorStatus::ERROR_TRANSFER, comm_err); } } else { if (comm_err.empty()) comm_err = "Connection with delivery process lost"; // delete destination so it can be tried again delete_dtr_destination(dp->dtr); dp->dtr->set_error_status(DTRErrorStatus::INTERNAL_PROCESS_ERROR, DTRErrorStatus::ERROR_TRANSFER, comm_err); } DTR_ptr tmp = dp->dtr; if (!delete_delivery_pair(dp)) { tmp->get_logger()->msg(Arc::ERROR, "Failed to delete delivery object or deletion timed out"); } tmp->get_job_perf_record().End("SchedulerTransferTime_"+tmp->get_delivery_endpoint().Host()); tmp->set_status(DTRStatus::TRANSFERRED); DTR::push(tmp, SCHEDULER); continue; } dtr_list_lock.lock(); ++d; dtr_list_lock.unlock(); } // Go through main loop every half a second or when new transfer arrives cond.wait(100); } // Kill any transfers still running dtr_list_lock.lock(); for (std::list::iterator d = dtr_list.begin(); d != dtr_list.end();) { DTR_ptr tmp = (*d)->dtr; if (!delete_delivery_pair(*d)) { tmp->get_logger()->msg(Arc::ERROR, "Failed to delete delivery object or deletion timed out"); } d = dtr_list.erase(d); } dtr_list_lock.unlock(); logger.msg(Arc::INFO, "Data delivery loop exited"); run_signal.signal(); } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/Scheduler.h0000644000000000000000000000012312675602216023355 xustar000000000000000027 mtime=1459029134.924374 26 atime=1513200576.81873 30 ctime=1513200662.097773142 nordugrid-arc-5.4.2/src/libs/data-staging/Scheduler.h0000644000175000002070000002716312675602216023434 0ustar00mockbuildmock00000000000000#ifndef SCHEDULER_H_ #define SCHEDULER_H_ #include #include #include #include #include #include "DTR.h" #include "DTRList.h" #include "Processor.h" #include "DataDelivery.h" #include "TransferShares.h" namespace DataStaging { /// The Scheduler is the control centre of the data staging framework. /** * The Scheduler manages a global list of DTRs and schedules when they should * go into the next state or be sent to other processes. The DTR priority is * used to decide each DTR's position in a queue. * \ingroup datastaging * \headerfile Scheduler.h arc/data-staging/Scheduler.h */ class Scheduler: public DTRCallback { private: /// All the DTRs the scheduler is aware of. /** The DTR comes to this list once received from the generator * and leaves the list only when pushed back to the generator. */ DTRList DtrList; /// A list of jobs that have been requested to be cancelled. /** External threads add items to this list, and the Scheduler * processes it during the main loop. */ std::list cancelled_jobs; /// A list of DTRs to process std::list events; /// Map of transfer shares to staged DTRs. Filled each event processing loop std::map > staged_queue; /// A lock for the cancelled jobs list Arc::SimpleCondition cancelled_jobs_lock; /// Configuration of transfer shares TransferSharesConf transferSharesConf; /// URLMap containing information on any local mappings defined in the configuration Arc::URLMap url_map; /// Preferred pattern to match replicas defined in configuration std::string preferred_pattern; /// Lock to protect multi-threaded access to start() and stop() Arc::SimpleCondition state_lock; /// Lock for events list Arc::SimpleCondition event_lock; /// Condition to signal end of running Arc::SimpleCondition run_signal; /// Condition to signal end of dump thread Arc::SimpleCondition dump_signal; /// Limit on number of DTRs in pre-processor unsigned int PreProcessorSlots; /// Limit on number of DTRs in delivery unsigned int DeliverySlots; /// Limit on number of DTRs in post-processor unsigned int PostProcessorSlots; /// Limit on number of emergency DTRs in each state unsigned int EmergencySlots; /// Limit on number of staged-prepared files, per share unsigned int StagedPreparedSlots; /// Where to dump DTR state. Currently only a path to a file is supported. std::string dumplocation; /// Performance metrics logger Arc::JobPerfLog job_perf_log; /// Endpoints of delivery services from configuration std::vector configured_delivery_services; /// Map of delivery services and directories they can access, filled after /// querying all services when the first DTR is processed std::map > usable_delivery_services; /// Timestamp of last check of delivery services Arc::Time delivery_last_checked; /// File size limit (in bytes) under which local transfer is used unsigned long long int remote_size_limit; /// Counter of transfers per delivery service std::map delivery_hosts; /// Logger object static Arc::Logger logger; /// Root logger destinations, to use when logging non-DTR specific messages std::list root_destinations; /// Flag describing scheduler state. Used to decide whether to keep running main loop. ProcessState scheduler_state; /// Processor object Processor processor; /// Delivery object DataDelivery delivery; /// Static instance of Scheduler static Scheduler* scheduler_instance; /// Lock for multiple threads getting static Scheduler instance static Glib::Mutex instance_lock; /// Copy constructor is private because Scheduler should not be copied Scheduler(const Scheduler&); // should not happen /// Assignment operator is private because Scheduler should not be copied Scheduler& operator=(const Scheduler&); // should not happen /* Functions to process every state of the DTR during normal workflow */ /// Process a DTR in the NEW state void ProcessDTRNEW(DTR_ptr request); /// Process a DTR in the CACHE_WAIT state void ProcessDTRCACHE_WAIT(DTR_ptr request); /// Process a DTR in the CACHE_CHECKED state void ProcessDTRCACHE_CHECKED(DTR_ptr request); /// Process a DTR in the RESOLVED state void ProcessDTRRESOLVED(DTR_ptr request); /// Process a DTR in the REPLICA_QUERIED state void ProcessDTRREPLICA_QUERIED(DTR_ptr request); /// Process a DTR in the PRE_CLEANED state void ProcessDTRPRE_CLEANED(DTR_ptr request); /// Process a DTR in the STAGING_PREPARING_WAIT state void ProcessDTRSTAGING_PREPARING_WAIT(DTR_ptr request); /// Process a DTR in the STAGED_PREPARED state void ProcessDTRSTAGED_PREPARED(DTR_ptr request); /// Process a DTR in the TRANSFERRED state void ProcessDTRTRANSFERRED(DTR_ptr request); /// Process a DTR in the REQUEST_RELEASED state void ProcessDTRREQUEST_RELEASED(DTR_ptr request); /// Process a DTR in the REPLICA_REGISTERED state void ProcessDTRREPLICA_REGISTERED(DTR_ptr request); /// Process a DTR in the CACHE_PROCESSED state void ProcessDTRCACHE_PROCESSED(DTR_ptr request); /// Process a DTR in a final state /* This is a special function to deal with states after which * the DTR is returned to the generator, i.e. DONE, ERROR, CANCELLED */ void ProcessDTRFINAL_STATE(DTR_ptr request); /// Log a message to the root logger. This sends the message to the log /// destinations attached to the root logger at the point the Scheduler /// was started. void log_to_root_logger(Arc::LogLevel level, const std::string& message); /// Call the appropriate Process method depending on the DTR state void map_state_and_process(DTR_ptr request); /// Maps the DTR to the appropriate state when it is cancelled. /** This is a separate function, since cancellation request * can arrive at any time, breaking the normal workflow. */ void map_cancel_state(DTR_ptr request); /// Map a DTR stuck in a processing state to new state from which it can /// recover and retry. void map_stuck_state(DTR_ptr request); /// Choose a delivery service for the DTR, based on the file system paths /// each service can access. These paths are determined by calling all the /// configured services when the first DTR is received. void choose_delivery_service(DTR_ptr request); /// Go through all DTRs waiting to go into a processing state and decide /// whether to push them into that state, depending on shares and limits. void revise_queues(); /// Add a new event for the Scheduler to process. Used in receiveDTR(). void add_event(DTR_ptr event); /// Process the pool of DTRs which have arrived from other processes void process_events(void); /// Move to the next replica in the DTR. /** Utility function which should be called in the case of error * if the next replica should be tried. It takes care of sending * the DTR to the appropriate state, depending on whether or not * there are more replicas to try. */ void next_replica(DTR_ptr request); /// Handle a DTR whose source is mapped to another URL. /** If a file is mapped, this method should be called to deal * with the mapping. It sets the mapped_file attribute of * request to mapped_url. Returns true if the processing was * successful. */ bool handle_mapped_source(DTR_ptr request, Arc::URL& mapped_url); /// Thread method for dumping state static void dump_thread(void* arg); /// Static version of main_thread, used when thread is created static void main_thread(void* arg); /// Main thread, which runs until stopped void main_thread(void); public: /// Get static instance of Scheduler, to use one DTR instance with multiple generators. /** * Configuration of Scheduler by Set* methods can only be done before * start() is called, so undetermined behaviour can result from multiple * threads simultaneously calling Set* then start(). It is safer to make * sure that all threads use the same configuration (calling start() twice * is harmless). It is also better to make sure that threads call stop() in * a roughly coordinated way, i.e. all generators stop at the same time. */ static Scheduler* getInstance(); /// Constructor, to be used when only one Generator uses this Scheduler. Scheduler(); /// Destructor calls stop(), which cancels all DTRs and waits for them to complete ~Scheduler() { stop(); }; /* The following Set/Add methods are only effective when called before start() */ /// Set number of slots for processor and delivery stages void SetSlots(int pre_processor = 0, int post_processor = 0, int delivery = 0, int emergency = 0, int staged_prepared = 0); /// Add URL mapping entry. See Arc::URLMap. void AddURLMapping(const Arc::URL& template_url, const Arc::URL& replacement_url, const Arc::URL& access_url = Arc::URL()); /// Replace all URL mapping entries void SetURLMapping(const Arc::URLMap& mapping = Arc::URLMap()); /// Set the preferred pattern for ordering replicas. /** * This pattern will be used in the case of an index service URL with * multiple physical replicas and allows sorting of those replicas in order * of preference. It consists of one or more patterns separated by a pipe * character (|) listed in order of preference. If the dollar character ($) * is used at the end of a pattern, the pattern will be matched to the end * of the hostname of the replica. Example: "srm://myhost.org|.uk$|.ch$" */ void SetPreferredPattern(const std::string& pattern); /// Set TransferShares configuration void SetTransferSharesConf(const TransferSharesConf& share_conf); /// Set transfer limits void SetTransferParameters(const TransferParameters& params); /// Set the list of delivery services. DTR::LOCAL_DELIVERY means local Delivery. void SetDeliveryServices(const std::vector& endpoints); /// Set the remote transfer size limit void SetRemoteSizeLimit(unsigned long long int limit); /// Set location for periodic dump of DTR state (only file paths currently supported) void SetDumpLocation(const std::string& location); /// Set JobPerfLog object for performance metrics logging void SetJobPerfLog(const Arc::JobPerfLog& perf_log); /// Start scheduling activity. /** * This method must be called after all configuration parameters are set * properly. Scheduler can be stopped either by calling stop() method or * by destroying its instance. */ bool start(void); /// Callback method implemented from DTRCallback. /** * This method is called by the generator when it wants to pass a DTR * to the scheduler and when other processes send a DTR back to the * scheduler after processing. */ virtual void receiveDTR(DTR_ptr dtr); /// Tell the Scheduler to cancel all the DTRs in the given job description bool cancelDTRs(const std::string& jobid); /// Tell the Scheduler to shut down all threads and exit. /** * All active DTRs are cancelled and this method waits until they finish * (all DTRs go to CANCELLED state) */ bool stop(); }; } // namespace DataStaging #endif /*SCHEDULER_H_*/ nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/TransferShares.cpp0000644000000000000000000000012413153455410024717 xustar000000000000000027 mtime=1504598792.949892 27 atime=1513200576.796729 30 ctime=1513200662.112773326 nordugrid-arc-5.4.2/src/libs/data-staging/TransferShares.cpp0000644000175000002070000001322113153455410024763 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "TransferShares.h" #include #include namespace DataStaging { TransferSharesConf::TransferSharesConf(const std::string& type, const std::map& ref_shares) { set_share_type(type); set_reference_shares(ref_shares); } TransferSharesConf::TransferSharesConf() : shareType(NONE) { ReferenceShares["_default"] = 50; } void TransferSharesConf::set_share_type(const std::string& type) { if (Arc::lower(type) == "dn") shareType = USER; else if (Arc::lower(type) == "voms:vo") shareType = VO; else if (Arc::lower(type) == "voms:role") shareType = ROLE; else if (Arc::lower(type) == "voms:group") shareType = GROUP; else shareType = NONE; } bool TransferSharesConf::is_configured(const std::string& ShareToCheck) { return (ReferenceShares.find(ShareToCheck) != ReferenceShares.end()); } int TransferSharesConf::get_basic_priority(const std::string& ShareToCheck) { if (!is_configured(ShareToCheck)) return ReferenceShares["_default"]; return ReferenceShares[ShareToCheck]; } void TransferSharesConf::set_reference_share(const std::string& RefShare, int Priority) { ReferenceShares[RefShare] = Priority; } void TransferSharesConf::set_reference_shares(const std::map& shares) { ReferenceShares = shares; // there should always be a _default share defined if (ReferenceShares.find("_default") == ReferenceShares.end()) ReferenceShares["_default"] = 50; } std::string TransferSharesConf::conf() const { std::string conf; conf += " Share type: "; switch (shareType){ case USER: conf += "DN"; break; case VO: conf += "VOMS VO"; break; case GROUP: conf += "VOMS group"; break; case ROLE: conf += "VOMS role"; break; case NONE: conf += "None"; break; default: // Something really strange conf += "unknown"; break; } if (!ReferenceShares.empty()) { for (std::map::const_iterator i = ReferenceShares.begin(); i != ReferenceShares.end(); ++i) { conf += "\n Reference share " + i->first + ", priority " + Arc::tostring(i->second); } } return conf; } std::string TransferSharesConf::extract_share_info(DTR_ptr DTRToExtract) { DTRCredentialInfo cred = DTRToExtract->get_credential_info(); switch (shareType){ case USER: return cred.getDN(); case VO: return cred.extractVOMSVO(); case GROUP: return cred.extractVOMSGroup(); case ROLE: return cred.extractVOMSRole(); case NONE: return "_default"; default: // Something really strange return ""; } } TransferShares::TransferShares(const TransferSharesConf& shares_conf) : conf(shares_conf) { ActiveShares.clear(); ActiveSharesSlots.clear(); } void TransferShares::set_shares_conf(const TransferSharesConf& shares_conf) { conf = shares_conf; } void TransferShares::calculate_shares(int TotalNumberOfSlots) { ActiveSharesSlots.clear(); // clear active shares with 0 count // and compute the summarized priority of other active shares std::map::iterator i; int SummarizedPriority = 0; int TotalQueued = 0; for (i = ActiveShares.begin(); i != ActiveShares.end(); ){ if (i->second == 0) { ActiveShares.erase(i++); } else { SummarizedPriority += conf.get_basic_priority(i->first); TotalQueued += i->second; ++i; } } int slots_used = 0; // first calculate shares based on the share priority for (i = ActiveShares.begin(); i != ActiveShares.end(); i++){ // Number of slots for this share is its priority divided by total // priorities of all active shares multiplied by the total number of slots int slots = int(::floor(float(conf.get_basic_priority(i->first)) / float(SummarizedPriority) * float(TotalNumberOfSlots))); if (slots > i->second) { // Don't assign more slots than the share needs ActiveSharesSlots[i->first] = i->second; } else if (slots == 0) { // Some shares can receive 0 slots. // It can happen when there are lots of shares active // or one share has enormously big priority. // There should be no 0 in the number of slots, so every // share has at least theoretical possibility to start ActiveSharesSlots[i->first] = 1; } else { ActiveSharesSlots[i->first] = slots; } slots_used += ActiveSharesSlots[i->first]; } // now assign unused slots among shares with more DTRs than slots while (slots_used < TotalQueued && slots_used < TotalNumberOfSlots) { // TODO share slots using priorities for (i = ActiveShares.begin(); i != ActiveShares.end(); i++){ if (ActiveSharesSlots[i->first] < ActiveShares[i->first]) { ActiveSharesSlots[i->first]++; slots_used++; if (slots_used >= TotalQueued || slots_used >= TotalNumberOfSlots) break; } } } } void TransferShares::increase_transfer_share(const std::string& ShareToIncrease) { ActiveShares[ShareToIncrease]++; } void TransferShares::decrease_transfer_share(const std::string& ShareToDecrease) { ActiveShares[ShareToDecrease]--; } void TransferShares::decrease_number_of_slots(const std::string& ShareToDecrease) { ActiveSharesSlots[ShareToDecrease]--; } bool TransferShares::can_start(const std::string& ShareToStart) { return (ActiveSharesSlots[ShareToStart] > 0); } std::map TransferShares::active_shares() const { return ActiveShares; } } nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/Scheduler.cpp0000644000000000000000000000012313153455410023702 xustar000000000000000027 mtime=1504598792.949892 26 atime=1513200576.81473 30 ctime=1513200662.111773314 nordugrid-arc-5.4.2/src/libs/data-staging/Scheduler.cpp0000644000175000002070000017375213153455410023767 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "Scheduler.h" #include "DataDeliveryRemoteComm.h" namespace DataStaging { Arc::Logger Scheduler::logger(Arc::Logger::getRootLogger(), "DataStaging.Scheduler"); Scheduler* Scheduler::scheduler_instance = NULL; Glib::Mutex Scheduler::instance_lock; Scheduler* Scheduler::getInstance() { Glib::Mutex::Lock lock(instance_lock); if (!scheduler_instance) { scheduler_instance = new Scheduler(); } return scheduler_instance; } Scheduler::Scheduler(): remote_size_limit(0), scheduler_state(INITIATED) { // Conservative defaults PreProcessorSlots = 20; DeliverySlots = 10; PostProcessorSlots = 20; EmergencySlots = 2; StagedPreparedSlots = 200; } void Scheduler::SetSlots(int pre_processor, int post_processor, int delivery, int emergency, int staged_prepared) { if (scheduler_state == INITIATED) { if(pre_processor > 0) PreProcessorSlots = pre_processor; if(post_processor > 0) PostProcessorSlots = post_processor; if(delivery > 0) DeliverySlots = delivery; if(emergency > 0) EmergencySlots = emergency; if(staged_prepared > 0) StagedPreparedSlots = staged_prepared; } } void Scheduler::AddURLMapping(const Arc::URL& template_url, const Arc::URL& replacement_url, const Arc::URL& access_url) { if (scheduler_state == INITIATED) url_map.add(template_url,replacement_url,access_url); // else should log warning, but logger is disconnected } void Scheduler::SetURLMapping(const Arc::URLMap& mapping) { if (scheduler_state == INITIATED) url_map = mapping; } void Scheduler::SetPreferredPattern(const std::string& pattern) { if (scheduler_state == INITIATED) preferred_pattern = pattern; } void Scheduler::SetTransferSharesConf(const TransferSharesConf& share_conf) { if (scheduler_state == INITIATED) transferSharesConf = share_conf; } void Scheduler::SetTransferParameters(const TransferParameters& params) { delivery.SetTransferParameters(params); } void Scheduler::SetDeliveryServices(const std::vector& endpoints) { if (scheduler_state == INITIATED) configured_delivery_services = endpoints; } void Scheduler::SetRemoteSizeLimit(unsigned long long int limit) { if (scheduler_state == INITIATED) remote_size_limit = limit; } void Scheduler::SetDumpLocation(const std::string& location) { dumplocation = location; } void Scheduler::SetJobPerfLog(const Arc::JobPerfLog& perf_log) { job_perf_log = perf_log; } bool Scheduler::start(void) { state_lock.lock(); if(scheduler_state == RUNNING || scheduler_state == TO_STOP) { state_lock.unlock(); return false; } scheduler_state = RUNNING; state_lock.unlock(); processor.start(); delivery.start(); // if no delivery services set, then use local if (configured_delivery_services.empty()) { std::vector services; services.push_back(DTR::LOCAL_DELIVERY); configured_delivery_services = services; } Arc::CreateThreadFunction(&main_thread, this); return true; } void Scheduler::log_to_root_logger(Arc::LogLevel level, const std::string& message) { Arc::Logger::getRootLogger().addDestinations(root_destinations); logger.msg(level, message); Arc::Logger::getRootLogger().removeDestinations(); } /* Function to sort the list of the pointers to DTRs * according to the priorities the DTRs have. * DTRs with higher priority go first to the beginning, * with lower -- to the end */ bool dtr_sort_predicate(DTR_ptr dtr1, DTR_ptr dtr2) { return dtr1->get_priority() > dtr2->get_priority(); } void Scheduler::next_replica(DTR_ptr request) { if (!request->error()) { // bad logic request->set_error_status(DTRErrorStatus::INTERNAL_LOGIC_ERROR, DTRErrorStatus::ERROR_UNKNOWN, "Bad logic: next_replica called when there is no error"); // TODO: how to deal with these internal errors? return; } // Logic of whether to go for next source or destination bool source_error(false); if (request->get_error_status().GetErrorLocation() == DTRErrorStatus::ERROR_SOURCE) source_error = true; else if (request->get_error_status().GetErrorLocation() == DTRErrorStatus::ERROR_DESTINATION) source_error = false; else if (request->get_source()->IsIndex() && !request->get_destination()->IsIndex()) source_error = true; else if (!request->get_source()->IsIndex() && request->get_destination()->IsIndex()) source_error = false; else if (!request->get_source()->LastLocation() && request->get_destination()->LastLocation()) source_error = true; else if (request->get_source()->LastLocation() && !request->get_destination()->LastLocation()) source_error = false; else // Unknown error location, and either both are index services with remaining // replicas or neither are index services. Choose source in this case. source_error = true; bool replica_exists; if (source_error) { // reset mapped file request->set_mapped_source(); replica_exists = request->get_source()->NextLocation(); } else { replica_exists = request->get_destination()->NextLocation(); } if (replica_exists) { // Use next replica // Clear the error flag to resume normal workflow request->reset_error_status(); request->get_logger()->msg(Arc::INFO, "Using next %s replica", source_error ? "source" : "destination"); // Perhaps not necessary to query replica again if the error was in the destination // but the error could have been caused by a source problem during transfer request->set_status(DTRStatus::QUERY_REPLICA); } else { // No replicas - move to appropriate state for the post-processor to do cleanup request->get_logger()->msg(Arc::ERROR, "No more %s replicas", source_error ? "source" : "destination"); if (request->get_destination()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Will clean up pre-registered destination"); request->set_status(DTRStatus::REGISTER_REPLICA); } else if (!request->get_cache_parameters().cache_dirs.empty() && (request->get_cache_state() == CACHE_ALREADY_PRESENT || request->get_cache_state() == CACHEABLE)) { request->get_logger()->msg(Arc::VERBOSE, "Will release cache locks"); request->set_status(DTRStatus::PROCESS_CACHE); } else { // nothing to clean up - set to end state request->get_logger()->msg(Arc::VERBOSE, "Moving to end of data staging"); request->set_status(DTRStatus::CACHE_PROCESSED); } } } bool Scheduler::handle_mapped_source(DTR_ptr request, Arc::URL& mapped_url) { // The DTR source is mapped to another place so set the mapped location in request. // If mapped_url is set delivery will use it as source request->get_logger()->msg(Arc::INFO, "Source is mapped to %s", mapped_url.str()); if (!request->get_source()->ReadOnly() && mapped_url.Protocol() == "link") { // read-write access means user can potentially modify source, so copy instead request->get_logger()->msg(Arc::WARNING, "Cannot link to source which can be modified, will copy instead"); mapped_url.ChangeProtocol("file"); } if (mapped_url.Protocol() == "link") { #ifndef WIN32 // If the map is a link then do the link here and set to TRANSFERRED. Local file // copies should still have to wait in the queue. For links we should also // turn off caching, remembering that we still need to release any cache // locks later if necessary. if (!request->get_destination()->Local()) { request->get_logger()->msg(Arc::ERROR, "Cannot link to a remote destination. Will not use mapped URL"); } else { request->get_logger()->msg(Arc::INFO, "Linking mapped file"); // Access session dir under mapped user if (!Arc::FileLink(mapped_url.Path(), request->get_destination()->CurrentLocation().Path(), request->get_local_user().get_uid(), request->get_local_user().get_gid(), true)) { request->get_logger()->msg(Arc::ERROR, "Failed to create link: %s. Will not use mapped URL", Arc::StrError(errno)); } else { // successful link, so turn off caching, set to TRANSFERRED and return request->set_mapped_source(mapped_url.str()); if (request->get_cache_state() == CACHEABLE) request->set_cache_state(CACHE_NOT_USED); request->set_status(DTRStatus::TRANSFERRED); return true; } } #else request->get_logger()->msg(Arc::ERROR, "Linking mapped file - can't link on Windows"); #endif } else { // Ready to copy mapped file // Assume that mapped urls are not index services or stageable // TODO: handle case when mapped url is index request->set_mapped_source(mapped_url.str()); request->set_status(DTRStatus::STAGED_PREPARED); return true; } return false; } void Scheduler::ProcessDTRNEW(DTR_ptr request){ request->get_logger()->msg(Arc::INFO, "Scheduler received new DTR %s with source: %s," " destination: %s, assigned to transfer share %s with priority %d", request->get_id(), request->get_source()->str(), request->get_destination()->str(), request->get_transfer_share(), request->get_priority()); // Normal workflow is CHECK_CACHE if (request->get_cache_state() == NON_CACHEABLE || request->get_cache_parameters().cache_dirs.empty()) { request->get_logger()->msg(Arc::VERBOSE, "File is not cacheable, was requested not to be cached or no cache available, skipping cache check"); request->set_status(DTRStatus::CACHE_CHECKED); } else { // Cache checking should have quite a long timeout as it may // take a long time to download a big file or there is a long delivery queue request->set_timeout(86400); request->get_logger()->msg(Arc::VERBOSE, "File is cacheable, will check cache"); if (DtrList.is_being_cached(request)) { Arc::Period cache_wait_period(10); request->get_logger()->msg(Arc::VERBOSE, "File is currently being cached, will wait %is", cache_wait_period.GetPeriod()); request->set_process_time(cache_wait_period); request->set_status(DTRStatus::CACHE_WAIT); } else { request->set_status(DTRStatus::CHECK_CACHE); } } } void Scheduler::ProcessDTRCACHE_WAIT(DTR_ptr request){ // The waiting time should be calculated within DTRList so // by the time we are here we know to query the cache again // If we timed out on it send to CACHE_PROCESSED where it // may be retried without caching if(request->get_timeout() < time(NULL)) { request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Timed out while waiting for cache for " + request->get_source()->str()); request->get_logger()->msg(Arc::ERROR, "Timed out while waiting for cache lock"); request->set_status(DTRStatus::CACHE_PROCESSED); } else if (DtrList.is_being_cached(request)) { // TODO A low priority DTR holding the cache lock can block a high priority DTR // downloading the same file. Here we should cancel the low priority one to let // the high priority one go through Arc::Period cache_wait_period(10); request->get_logger()->msg(Arc::VERBOSE, "File is currently being cached, will wait %is", cache_wait_period.GetPeriod()); request->set_process_time(cache_wait_period); } else { // Try to check cache again request->get_logger()->msg(Arc::VERBOSE, "Checking cache again"); request->set_status(DTRStatus::CHECK_CACHE); } } void Scheduler::ProcessDTRCACHE_CHECKED(DTR_ptr request){ // There's no need to check additionally for cache error // If the error has occurred -- we just proceed the normal // workflow as if it was not cached at all. // But we should clear error flag if it was set by the pre-processor //setting timeout back to 1 hour, was set to 1 day in ProcessDTRNEW(). request->set_timeout(3600); request->reset_error_status(); if (request->get_cache_state() == CACHEABLE) DtrList.caching_started(request); if(request->get_cache_state() == CACHE_ALREADY_PRESENT){ // File is on place already. After the post-processor // the DTR is DONE. request->get_logger()->msg(Arc::VERBOSE, "Destination file is in cache"); request->set_status(DTRStatus::PROCESS_CACHE); } else if (request->get_source()->IsIndex() || request->get_destination()->IsIndex()) { // The Normal workflow -- RESOLVE request->get_logger()->msg(Arc::VERBOSE, "Source and/or destination is index service, will resolve replicas"); request->set_status(DTRStatus::RESOLVE); } else { request->get_logger()->msg(Arc::VERBOSE, "Neither source nor destination are index services, will skip resolving replicas"); request->set_status(DTRStatus::RESOLVED); } } void Scheduler::ProcessDTRRESOLVED(DTR_ptr request){ if(request->error()){ // It's impossible to download anything, since no replica location is resolved // if cacheable, move to PROCESS_CACHE, the post-processor will do the cleanup if (request->get_cache_state() == CACHEABLE && !request->get_cache_parameters().cache_dirs.empty()) { request->get_logger()->msg(Arc::ERROR, "Problem with index service, will release cache lock"); request->set_status(DTRStatus::PROCESS_CACHE); // else go to end state } else { request->get_logger()->msg(Arc::ERROR, "Problem with index service, will proceed to end of data staging"); request->set_status(DTRStatus::CACHE_PROCESSED); } } else { // Normal workflow is QUERY_REPLICA // Should we always do this? // logic to choose best replica - sort according to configured preference request->get_source()->SortLocations(preferred_pattern, url_map); // Access latency is not known until replica is queried request->get_logger()->msg(Arc::VERBOSE, "Checking source file is present"); request->set_status(DTRStatus::QUERY_REPLICA); } } void Scheduler::ProcessDTRREPLICA_QUERIED(DTR_ptr request){ if(request->error()){ // go to next replica or exit with error request->get_logger()->msg(Arc::ERROR, "Error with source file, moving to next replica"); next_replica(request); return; } if (request->get_source()->CheckSize()) { // Log performance metric with size of DTR timespec dummy; job_perf_log.Log("DTRSize", request->get_short_id()+"\t"+Arc::tostring(request->get_source()->GetSize()), dummy, dummy); } // Check if the replica is mapped if (url_map) { Arc::URL mapped_url(request->get_source()->CurrentLocation()); if (url_map.map(mapped_url)) { if (handle_mapped_source(request, mapped_url)) return; } } if (request->get_mapped_source().empty() && request->get_source()->GetAccessLatency() == Arc::DataPoint::ACCESS_LATENCY_LARGE) { // If the current source location is long latency, try the next replica // TODO add this replica to the end of location list, so that if there // are problems with other replicas, we eventually come back to this one request->get_logger()->msg(Arc::INFO, "Replica %s has long latency, trying next replica", request->get_source()->CurrentLocation().str()); if (request->get_source()->LastLocation()) { request->get_logger()->msg(Arc::INFO, "No more replicas, will use %s", request->get_source()->CurrentLocation().str()); } else { request->get_source()->NextLocation(); request->get_logger()->msg(Arc::VERBOSE, "Checking replica %s", request->get_source()->CurrentLocation().str()); request->set_status(DTRStatus::QUERY_REPLICA); return; } } // Normal workflow is PRE_CLEAN state // Delete destination if requested in URL options and not replication if (!request->is_replication() && (request->get_destination()->GetURL().Option("overwrite") == "yes" || request->get_destination()->CurrentLocation().Option("overwrite") == "yes")) { request->get_logger()->msg(Arc::VERBOSE, "Overwrite requested - will pre-clean destination"); request->set_status(DTRStatus::PRE_CLEAN); } else { request->get_logger()->msg(Arc::VERBOSE, "No overwrite requested or allowed, skipping pre-cleaning"); request->set_status(DTRStatus::PRE_CLEANED); } } void Scheduler::ProcessDTRPRE_CLEANED(DTR_ptr request){ // If an error occurred in pre-cleaning, try to copy anyway if (request->error()) request->get_logger()->msg(Arc::INFO, "Pre-clean failed, will still try to copy"); request->reset_error_status(); if (request->get_source()->IsStageable() || request->get_destination()->IsStageable()) { // Normal workflow is STAGE_PREPARE // Need to set the timeout to prevent from waiting for too long request->set_timeout(3600); // processor will take care of staging source or destination or both request->get_logger()->msg(Arc::VERBOSE, "Source or destination requires staging"); request->set_status(DTRStatus::STAGE_PREPARE); } else { request->get_logger()->msg(Arc::VERBOSE, "No need to stage source or destination, skipping staging"); request->set_status(DTRStatus::STAGED_PREPARED); } } void Scheduler::ProcessDTRSTAGING_PREPARING_WAIT(DTR_ptr request){ // The waiting time should be calculated within DTRList so // by the time we are here we know to query the request again // If there's timeout -- it's error case if(request->get_timeout() < time(NULL)){ // With a special error status we signal to the post-processor // that after releasing request this DTR should go into // QUERY_REPLICA again if necessary // Here we can't tell at which end the timeout was, so make an educated guess if (request->get_source()->IsStageable() && !request->get_destination()->IsStageable()) request->set_error_status(DTRErrorStatus::STAGING_TIMEOUT_ERROR, DTRErrorStatus::ERROR_SOURCE, "Stage request for source file timed out"); else if (!request->get_source()->IsStageable() && request->get_destination()->IsStageable()) request->set_error_status(DTRErrorStatus::STAGING_TIMEOUT_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Stage request for destination file timed out"); else // both endpoints are stageable - don't know the error location request->set_error_status(DTRErrorStatus::STAGING_TIMEOUT_ERROR, DTRErrorStatus::ERROR_UNKNOWN, "Stage request for source or destination file timed out"); // Let the post-processor do the job request->get_logger()->msg(Arc::ERROR, "Staging request timed out, will release request"); request->set_status(DTRStatus::RELEASE_REQUEST); } else { // Normal workflow is STAGE_PREPARE again request->get_logger()->msg(Arc::VERBOSE, "Querying status of staging request"); request->set_status(DTRStatus::STAGE_PREPARE); } } void Scheduler::ProcessDTRSTAGED_PREPARED(DTR_ptr request){ if(request->error()){ // We have to try another replica if the source failed to stage // but first we have to release any requests request->get_logger()->msg(Arc::VERBOSE, "Releasing requests"); request->set_status(DTRStatus::RELEASE_REQUEST); return; } if (url_map && request->get_mapped_source().empty() && request->get_source()->IsStageable()) { // check if any TURLs are mapped std::vector turls = request->get_source()->TransferLocations(); for (std::vector::iterator i = turls.begin(); i != turls.end(); ++i) { Arc::URL mapped_url(i->fullstr()); if (url_map.map(mapped_url)) { if (handle_mapped_source(request, mapped_url)) return; } } } // After normal workflow the DTR is ready for delivery request->get_logger()->msg(Arc::VERBOSE, "DTR is ready for transfer, moving to delivery queue"); // set long timeout for waiting for transfer slot // (setting timeouts for active transfers is done in Delivery) request->set_timeout(7200); request->set_status(DTRStatus::TRANSFER); } void Scheduler::ProcessDTRTRANSFERRED(DTR_ptr request){ // We don't check if error has happened - if it has the post-processor // will take needed steps in RELEASE_REQUEST in any case. The error flag // will work now as a sign to return the DTR to QUERY_REPLICA again. // Delivery will clean up destination physical file on error if (request->error()) request->get_logger()->msg(Arc::ERROR, "Transfer failed: %s", request->get_error_status().GetDesc()); // Resuming normal workflow after the DTR has finished transferring // The next state is RELEASE_REQUEST // if cacheable and no cancellation or error, mark the DTR as CACHE_DOWNLOADED // Might be better to do this in delivery instead if (!request->cancel_requested() && !request->error() && request->get_cache_state() == CACHEABLE) request->set_cache_state(CACHE_DOWNLOADED); if (request->get_source()->IsStageable() || request->get_destination()->IsStageable()) { request->get_logger()->msg(Arc::VERBOSE, "Releasing request(s) made during staging"); request->set_status(DTRStatus::RELEASE_REQUEST); } else { request->get_logger()->msg(Arc::VERBOSE, "Neither source nor destination were staged, skipping releasing requests"); request->set_status(DTRStatus::REQUEST_RELEASED); } } void Scheduler::ProcessDTRREQUEST_RELEASED(DTR_ptr request){ // if the post-processor had troubles releasing the request, continue // normal workflow and the DTR will be cleaned up. If the error // originates from before (like Transfer errors, staging errors) // and is not from destination, we need to query another replica if (request->error() && request->get_error_status().GetLastErrorState() != DTRStatus::RELEASING_REQUEST) { request->get_logger()->msg(Arc::ERROR, "Trying next replica"); next_replica(request); } else if (request->get_destination()->IsIndex()) { // Normal workflow is REGISTER_REPLICA request->get_logger()->msg(Arc::VERBOSE, "Will %s in destination index service", ((request->error() || request->cancel_requested()) ? "unregister":"register")); request->set_status(DTRStatus::REGISTER_REPLICA); } else { request->get_logger()->msg(Arc::VERBOSE, "Destination is not index service, skipping replica registration"); request->set_status(DTRStatus::REPLICA_REGISTERED); } } void Scheduler::ProcessDTRREPLICA_REGISTERED(DTR_ptr request){ // If there was a problem registering the destination file, // using a different source replica won't help, so pass to final step // (remote destinations can't be cached). The post-processor should have // taken care of deleting the physical file. If the error originates from // before, follow normal workflow and processor will clean up if(request->error() && request->get_error_status().GetLastErrorState() == DTRStatus::REGISTERING_REPLICA) { request->get_logger()->msg(Arc::ERROR, "Error registering replica, moving to end of data staging"); request->set_status(DTRStatus::CACHE_PROCESSED); } else if (!request->get_cache_parameters().cache_dirs.empty() && (request->get_cache_state() == CACHE_ALREADY_PRESENT || request->get_cache_state() == CACHE_DOWNLOADED || request->get_cache_state() == CACHEABLE || request->get_cache_state() == CACHE_NOT_USED)) { // Normal workflow is PROCESS_CACHE request->get_logger()->msg(Arc::VERBOSE, "Will process cache"); request->set_status(DTRStatus::PROCESS_CACHE); } else { // not a cacheable file request->get_logger()->msg(Arc::VERBOSE, "File is not cacheable, skipping cache processing"); request->set_status(DTRStatus::CACHE_PROCESSED); } } void Scheduler::ProcessDTRCACHE_PROCESSED(DTR_ptr request){ // Final stage within scheduler. Retries are initiated from here if necessary, // otherwise report success or failure to generator // First remove from caching list DtrList.caching_finished(request); if (request->cancel_requested()) { // Cancellation steps finished request->get_logger()->msg(Arc::VERBOSE, "Cancellation complete"); request->set_status(DTRStatus::CANCELLED); } else if(request->error()) { // If the error occurred in cache processing we send back // to REPLICA_QUERIED to try the same replica again without cache, // or to CACHE_CHECKED if the file was already in cache, or to NEW // to try again if there was a locking problem during link. If there // was a cache timeout we also go back to CACHE_CHECKED. If in // another place we are finished and report error to generator if (request->get_error_status().GetLastErrorState() == DTRStatus::PROCESSING_CACHE) { if (request->get_cache_state() == CACHE_LOCKED) { // set a flat wait time of 10s Arc::Period cache_wait_period(10); request->get_logger()->msg(Arc::INFO, "Will wait 10s"); request->set_process_time(cache_wait_period); request->set_cache_state(CACHEABLE); request->set_status(DTRStatus::NEW); } else { request->get_logger()->msg(Arc::ERROR, "Error in cache processing, will retry without caching"); if (request->get_cache_state() == CACHE_ALREADY_PRESENT) request->set_status(DTRStatus::CACHE_CHECKED); else request->set_status(DTRStatus::REPLICA_QUERIED); request->set_cache_state(CACHE_SKIP); } request->reset_error_status(); return; } else if (request->get_error_status().GetLastErrorState() == DTRStatus::CACHE_WAIT) { request->get_logger()->msg(Arc::ERROR, "Will retry without caching"); request->set_cache_state(CACHE_SKIP); request->reset_error_status(); request->set_status(DTRStatus::CACHE_CHECKED); return; } else { request->decrease_tries_left(); // Here we decide to retry based on whether the error is // temporary or not and the configured retry strategy if (request->get_error_status().GetErrorStatus() == DTRErrorStatus::TEMPORARY_REMOTE_ERROR || request->get_error_status().GetErrorStatus() == DTRErrorStatus::TRANSFER_SPEED_ERROR || request->get_error_status().GetErrorStatus() == DTRErrorStatus::INTERNAL_PROCESS_ERROR) { if (request->get_tries_left() > 0) { // Check if credentials are ok if (request->get_source()->RequiresCredentials() || request->get_destination()->RequiresCredentials()) { Arc::Time exp_time = request->get_credential_info().getExpiryTime(); if (exp_time < Arc::Time()) { request->get_logger()->msg(Arc::WARNING, "Proxy has expired"); // Append this information to the error string DTRErrorStatus status = request->get_error_status(); request->set_error_status(status.GetErrorStatus(), status.GetErrorLocation(), status.GetDesc()+" (Proxy expired)"); request->set_status(DTRStatus::ERROR); return; } } // exponential back off - 10s, 40s, 90s, ... request->set_process_time(10*(request->get_initial_tries()-request->get_tries_left())* (request->get_initial_tries()-request->get_tries_left())); request->get_logger()->msg(Arc::INFO, "%i retries left, will wait until %s before next attempt", request->get_tries_left(), request->get_process_time().str()); // set state depending on where the error occurred if (request->get_error_status().GetLastErrorState() == DTRStatus::REGISTERING_REPLICA) { request->set_status(DTRStatus::REGISTER_REPLICA); } else if (request->get_error_status().GetLastErrorState() == DTRStatus::RELEASING_REQUEST) { request->set_status(DTRStatus::RELEASE_REQUEST); } else { // If error happened before or during transfer set back to NEW // Reset DTR information set during this transfer request->reset(); request->set_status(DTRStatus::NEW); } return; } else request->get_logger()->msg(Arc::ERROR, "Out of retries"); } request->get_logger()->msg(Arc::ERROR, "Permanent failure"); request->set_status(DTRStatus::ERROR); } } else { // Normal workflow is completed for this DTR successfully request->get_logger()->msg(Arc::INFO, "Finished successfully"); request->set_status(DTRStatus::DONE); } } void Scheduler::ProcessDTRFINAL_STATE(DTR_ptr request){ // This is the only place where the DTR is returned to the generator // and deleted from the global list // Return to the generator request->get_logger()->msg(Arc::INFO, "Returning to generator"); DTR::push(request, GENERATOR); // Delete from the global list DtrList.delete_dtr(request); } void Scheduler::map_state_and_process(DTR_ptr request){ // For cancelled DTRs set the appropriate post-processor state if(request->cancel_requested()) map_cancel_state(request); // Loop until the DTR is sent somewhere for some action to be done // This is more efficient because many DTRs will skip some states and // we don't want to have to wait for the full list to be processed before // advancing to the next state Arc::Time now; while((request->came_from_pre_processor() || request->came_from_delivery() || request->came_from_post_processor() || request->came_from_generator()) && request->get_process_time() <= now) { switch (request->get_status().GetStatus()) { case DTRStatus::NEW: ProcessDTRNEW(request); continue; case DTRStatus::CACHE_WAIT: ProcessDTRCACHE_WAIT(request); continue; case DTRStatus::CACHE_CHECKED: ProcessDTRCACHE_CHECKED(request); continue; case DTRStatus::RESOLVED: ProcessDTRRESOLVED(request); continue; case DTRStatus::REPLICA_QUERIED: ProcessDTRREPLICA_QUERIED(request); continue; case DTRStatus::PRE_CLEANED: ProcessDTRPRE_CLEANED(request); continue; case DTRStatus::STAGING_PREPARING_WAIT: ProcessDTRSTAGING_PREPARING_WAIT(request); continue; case DTRStatus::STAGED_PREPARED: ProcessDTRSTAGED_PREPARED(request); continue; case DTRStatus::TRANSFERRED: ProcessDTRTRANSFERRED(request); continue; case DTRStatus::REQUEST_RELEASED: ProcessDTRREQUEST_RELEASED(request); continue; case DTRStatus::REPLICA_REGISTERED: ProcessDTRREPLICA_REGISTERED(request); continue; case DTRStatus::CACHE_PROCESSED: ProcessDTRCACHE_PROCESSED(request); continue; default: break; //DoNothing } } } void Scheduler::map_cancel_state(DTR_ptr request){ switch (request->get_status().GetStatus()) { case DTRStatus::NEW: case DTRStatus::CHECK_CACHE: case DTRStatus::CACHE_WAIT: { // Nothing has yet been done to require cleanup or additional // activities. Return to the generator via CACHE_PROCESSED. request->set_status(DTRStatus::CACHE_PROCESSED); } break; case DTRStatus::CACHE_CHECKED: case DTRStatus::RESOLVE: { // The cache may have been started, so set to // REPLICA_REIGSTERED to allow post-processor to clean up cache request->set_status(DTRStatus::REPLICA_REGISTERED); } break; case DTRStatus::RESOLVED: case DTRStatus::QUERY_REPLICA: case DTRStatus::REPLICA_QUERIED: case DTRStatus::PRE_CLEAN: case DTRStatus::PRE_CLEANED: case DTRStatus::STAGE_PREPARE: { // At this stage we may have registered a file in an // index service so set to REQUEST_RELEASED to allow // the post-processor to clean it up request->set_status(DTRStatus::REQUEST_RELEASED); } break; case DTRStatus::STAGING_PREPARING_WAIT: case DTRStatus::STAGED_PREPARED: case DTRStatus::TRANSFER: { // At this stage we in addition to cache work // may already have pending requests. // The post-processor should take care of it too request->set_status(DTRStatus::TRANSFERRED); } break; case DTRStatus::TRANSFERRED: case DTRStatus::RELEASE_REQUEST: case DTRStatus::REQUEST_RELEASED: case DTRStatus::REGISTER_REPLICA: case DTRStatus::REPLICA_REGISTERED: case DTRStatus::PROCESS_CACHE: case DTRStatus::CACHE_PROCESSED: { // post-processing states // If the request was cancelled during the transfer, the delivery // should have cleaned up the destination file. If after the // transfer we have to decide whether to clean up or not. /* delete_destination_file() */ // No other action required here, just let the normal workflow // resume and the post-processor will take care of clean up } break; default: break; //Do Nothing } } void Scheduler::map_stuck_state(DTR_ptr request) { switch (request->get_status().GetStatus()) { case DTRStatus::CHECKING_CACHE: { // The cache may have been started, so set to // REPLICA_REIGSTERED to allow post-processor to clean up cache request->set_status(DTRStatus::REPLICA_REGISTERED); } break; case DTRStatus::RESOLVING: case DTRStatus::QUERYING_REPLICA: case DTRStatus::PRE_CLEANING: { // At this stage we may have registered a file in an // index service so set to REQUEST_RELEASED to allow // the post-processor to clean it up request->set_status(DTRStatus::REQUEST_RELEASED); } break; case DTRStatus::STAGING_PREPARING: { // At this stage we in addition to cache work // may already have pending requests. // The post-processor should take care of it too request->set_status(DTRStatus::TRANSFERRED); } break; // For post-processor states simply move on to next state case DTRStatus::RELEASING_REQUEST: { request->set_status(DTRStatus::REQUEST_RELEASED); } break; case DTRStatus::REGISTERING_REPLICA: { request->set_status(DTRStatus::REPLICA_REGISTERED); } break; case DTRStatus::PROCESSING_CACHE: { request->set_status(DTRStatus::CACHE_PROCESSED); } break; default: break; // Unexpected state - do nothing } } void Scheduler::add_event(DTR_ptr event) { event_lock.lock(); events.push_back(event); event_lock.unlock(); } void Scheduler::choose_delivery_service(DTR_ptr request) { if (configured_delivery_services.empty()) return; // Only local is configured if (configured_delivery_services.size() == 1 && configured_delivery_services.front() == DTR::LOCAL_DELIVERY) return; // Check for size limit under which local should be used if (remote_size_limit > 0 && request->get_source()->CheckSize() && request->get_source()->GetSize() < remote_size_limit) { request->get_logger()->msg(Arc::INFO, "File is smaller than %llu bytes, will use local delivery", remote_size_limit); request->set_delivery_endpoint(DTR::LOCAL_DELIVERY); return; } // Remember current endpoint Arc::URL delivery_endpoint(request->get_delivery_endpoint()); // Check delivery services when the first DTR is processed, and every 5 // minutes after that. The ones that work are the only ones that will be // used until the next check. // This method assumes that the DTR has permission on all services, // which may not be true if DN filtering is used on those services. if (usable_delivery_services.empty() || Arc::Time() - delivery_last_checked > 300) { delivery_last_checked = Arc::Time(); usable_delivery_services.clear(); for (std::vector::iterator service = configured_delivery_services.begin(); service != configured_delivery_services.end(); ++service) { request->set_delivery_endpoint(*service); std::vector allowed_dirs; std::string load_avg; if (!DataDeliveryComm::CheckComm(request, allowed_dirs, load_avg)) { log_to_root_logger(Arc::WARNING, "Error with delivery service at " + request->get_delivery_endpoint().str() + " - This service will not be used"); } else { usable_delivery_services[*service] = allowed_dirs; // This is not a timing measurement so use dummy timestamps timespec dummy; job_perf_log.Log("DTR_load_" + service->Host(), load_avg, dummy, dummy); } } request->set_delivery_endpoint(delivery_endpoint); if (usable_delivery_services.empty()) { log_to_root_logger(Arc::ERROR, "No usable delivery services found, will use local delivery"); return; } } // Make a list of the delivery services that this DTR can use std::vector possible_delivery_services; bool can_use_local = false; for (std::map >::iterator service = usable_delivery_services.begin(); service != usable_delivery_services.end(); ++service) { if (service->first == DTR::LOCAL_DELIVERY) can_use_local = true; for (std::vector::iterator dir = service->second.begin(); dir != service->second.end(); ++dir) { if (request->get_destination()->Local()) { // check for caching std::string dest = request->get_destination()->TransferLocations()[0].Path(); if ((request->get_cache_state() == CACHEABLE) && !request->get_cache_file().empty()) dest = request->get_cache_file(); if (dest.find(*dir) == 0) { request->get_logger()->msg(Arc::DEBUG, "Delivery service at %s can copy to %s", service->first.str(), *dir); possible_delivery_services.push_back(service->first); break; } } else if (request->get_source()->Local()) { if (request->get_source()->TransferLocations()[0].Path().find(*dir) == 0) { request->get_logger()->msg(Arc::DEBUG, "Delivery service at %s can copy from %s", service->first.str(), *dir); possible_delivery_services.push_back(service->first); break; } } else { // copy between two remote endpoints so any service is ok possible_delivery_services.push_back(service->first); break; } } } if (possible_delivery_services.empty()) { request->get_logger()->msg(Arc::WARNING, "Could not find any useable delivery service," " forcing local transfer"); request->set_delivery_endpoint(DTR::LOCAL_DELIVERY); return; } // only local if (possible_delivery_services.size() == 1 && can_use_local) { request->set_delivery_endpoint(DTR::LOCAL_DELIVERY); return; } // Exclude full services with transfers greater than slots/no services for (std::vector::iterator possible = possible_delivery_services.begin(); possible != possible_delivery_services.end();) { if (delivery_hosts[possible->Host()] > (int)(DeliverySlots/configured_delivery_services.size())) { request->get_logger()->msg(Arc::DEBUG, "Not using delivery service at %s because it is full", possible->str()); possible = possible_delivery_services.erase(possible); } else { ++possible; } } // If none left then we should not use local but wait if (possible_delivery_services.empty()) { request->set_delivery_endpoint(Arc::URL()); return; } // First try, use any service if (request->get_tries_left() == request->get_initial_tries()) { delivery_endpoint = possible_delivery_services.at(rand() % possible_delivery_services.size()); request->set_delivery_endpoint(delivery_endpoint); return; } // Retry, try not to use a previous problematic service. If all are // problematic then default to local (even if not configured) for (std::vector::iterator possible = possible_delivery_services.begin(); possible != possible_delivery_services.end();) { std::vector::const_iterator problem = request->get_problematic_delivery_services().begin(); while (problem != request->get_problematic_delivery_services().end()) { if (*possible == *problem) { request->get_logger()->msg(Arc::VERBOSE, "Not using delivery service %s due to previous failure", problem->str()); possible = possible_delivery_services.erase(possible); break; } ++problem; } if (problem == request->get_problematic_delivery_services().end()) ++possible; } if (possible_delivery_services.empty()) { // force local if (!can_use_local) request->get_logger()->msg(Arc::WARNING, "No remote delivery services " "are useable, forcing local delivery"); request->set_delivery_endpoint(DTR::LOCAL_DELIVERY); } else { // Find a random service different from the previous one, looping a // limited number of times in case all delivery services are the same url Arc::URL ep(possible_delivery_services.at(rand() % possible_delivery_services.size())); for (unsigned int i = 0; ep == delivery_endpoint && i < possible_delivery_services.size() * 10; ++i) { ep = possible_delivery_services.at(rand() % possible_delivery_services.size()); } request->set_delivery_endpoint(ep); } } void Scheduler::process_events(void){ Arc::Time now; event_lock.lock(); for (std::list::iterator event = events.begin(); event != events.end();) { DTR_ptr tmp = *event; event_lock.unlock(); if (tmp->get_process_time() <= now) { map_state_and_process(tmp); // If final state, the DTR is returned to the generator and deleted if (tmp->is_in_final_state()) { ProcessDTRFINAL_STATE(tmp); event_lock.lock(); event = events.erase(event); continue; } // If the event was sent on to a queue, erase it from the list if (tmp->is_destined_for_pre_processor() || tmp->is_destined_for_delivery() || tmp->is_destined_for_post_processor()) { event_lock.lock(); event = events.erase(event); continue; } } event_lock.lock(); ++event; } event_lock.unlock(); } void Scheduler::revise_queues() { // The DTRs ready to go into a processing state std::map > DTRQueueStates; DtrList.filter_dtrs_by_statuses(DTRStatus::ToProcessStates, DTRQueueStates); // The active DTRs currently in processing states std::map > DTRRunningStates; DtrList.filter_dtrs_by_statuses(DTRStatus::ProcessingStates, DTRRunningStates); // Get the number of current transfers for each delivery service for // enforcing limits per server delivery_hosts.clear(); for (std::list::const_iterator i = DTRRunningStates[DTRStatus::TRANSFERRING].begin(); i != DTRRunningStates[DTRStatus::TRANSFERRING].end(); i++) { delivery_hosts[(*i)->get_delivery_endpoint().Host()]++; } // Get all the DTRs in a staged state staged_queue.clear(); std::list staged_queue_list; DtrList.filter_dtrs_by_statuses(DTRStatus::StagedStates, staged_queue_list); // filter out stageable DTRs per transfer share, putting the highest // priority at the front for (std::list::iterator i = staged_queue_list.begin(); i != staged_queue_list.end(); ++i) { if ((*i)->get_source()->IsStageable() || (*i)->get_destination()->IsStageable()) { std::list& queue = staged_queue[(*i)->get_transfer_share()]; if (!queue.empty() && (*i)->get_priority() > queue.front()->get_priority()) { queue.push_front(*i); } else { queue.push_back(*i); } } } Arc::Time now; // Go through "to process" states, work out shares and push DTRs for (unsigned int i = 0; i < DTRStatus::ToProcessStates.size(); ++i) { std::list DTRQueue = DTRQueueStates[DTRStatus::ToProcessStates.at(i)]; std::list ActiveDTRs = DTRRunningStates[DTRStatus::ProcessingStates.at(i)]; if (DTRQueue.empty() && ActiveDTRs.empty()) continue; // Map of job id to list of DTRs, used for grouping bulk requests std::map > bulk_requests; // Transfer shares for this queue TransferShares transferShares(transferSharesConf); // Sort the DTR queue according to the priorities the DTRs have. // Highest priority will be at the beginning of the list. DTRQueue.sort(dtr_sort_predicate); int highest_priority = 0; // First go over the queue and check for cancellation and timeout for (std::list::iterator dtr = DTRQueue.begin(); dtr != DTRQueue.end();) { DTR_ptr tmp = *dtr; if (dtr == DTRQueue.begin()) highest_priority = tmp->get_priority(); // There's no check for cancellation requests for the post-processor. // Most DTRs with cancellation requests will go to the post-processor // for cleanups, hold releases, etc., so the cancellation requests // don't break normal workflow in the post-processor (as opposed // to any other process), but instead act just as a sign that the // post-processor should do additional cleanup activities. if (tmp->is_destined_for_pre_processor() || tmp->is_destined_for_delivery()) { // The cancellation requests break the normal workflow. A cancelled // request will either go back to generator or be put into a // post-processor state for clean up. if (tmp->cancel_requested()) { map_cancel_state(tmp); add_event(tmp); dtr = DTRQueue.erase(dtr); continue; } } // To avoid the situation where DTRs get blocked due to higher // priority DTRs, DTRs that have passed their timeout should have their // priority boosted. But this should only happen if there are higher // priority DTRs, since there could be a large queue of low priority DTRs // which, after having their priority boosted, would then block new // high priority requests. // The simple solution here is to increase priority by 1 every 5 minutes. // There is plenty of scope for more intelligent solutions. // TODO reset priority back to original value once past this stage. if (tmp->get_timeout() < now && tmp->get_priority() < highest_priority) { tmp->set_priority(tmp->get_priority() + 1); tmp->set_timeout(300); } // STAGE_PREPARE is a special case where we have to apply a limit to // avoid preparing too many files and then pins expire while in the // transfer queue. In future it may be better to limit per remote host. // For now count DTRs staging and transferring in this share and apply // limit. In order not to block the highest priority DTRs here we allow // them to bypass the limit. if (DTRStatus::ToProcessStates.at(i) == DTRStatus::STAGE_PREPARE) { if (staged_queue[tmp->get_transfer_share()].size() < StagedPreparedSlots || staged_queue[tmp->get_transfer_share()].front()->get_priority() < tmp->get_priority() ) { // Reset timeout tmp->set_timeout(3600); // add to the staging queue and sort to put highest priority first staged_queue[tmp->get_transfer_share()].push_front(tmp); staged_queue[tmp->get_transfer_share()].sort(dtr_sort_predicate); } else { // Past limit - this DTR cannot be processed this time so erase from queue dtr = DTRQueue.erase(dtr); continue; } } // check if bulk operation is possible for this DTR. To keep it simple // there is only one bulk request per job per revise_queues loop if (tmp->bulk_possible()) { std::string jobid(tmp->get_parent_job_id()); if (bulk_requests.find(jobid) == bulk_requests.end()) { std::set bulk_list; bulk_list.insert(tmp); bulk_requests[jobid] = bulk_list; } else { DTR_ptr first_bulk = *bulk_requests[jobid].begin(); // Only source bulk operations supported at the moment and limit to 100 if (bulk_requests[jobid].size() < 100 && first_bulk->get_source()->GetURL().Protocol() == tmp->get_source()->GetURL().Protocol() && first_bulk->get_source()->GetURL().Host() == tmp->get_source()->GetURL().Host() && first_bulk->get_source()->CurrentLocation().Protocol() == tmp->get_source()->CurrentLocation().Protocol() && first_bulk->get_source()->CurrentLocation().Host() == tmp->get_source()->CurrentLocation().Host() && // This is because we cannot have a mix of LFNs and GUIDs when querying a catalog like LFC first_bulk->get_source()->GetURL().MetaDataOption("guid").length() == tmp->get_source()->GetURL().MetaDataOption("guid").length()) { bulk_requests[jobid].insert(tmp); } } } transferShares.increase_transfer_share(tmp->get_transfer_share()); ++dtr; } // Go over the active DTRs and add to transfer share for (std::list::iterator dtr = ActiveDTRs.begin(); dtr != ActiveDTRs.end();) { DTR_ptr tmp = *dtr; if (tmp->get_status() == DTRStatus::TRANSFERRING) { // If the DTR is in Delivery, check for cancellation. The pre- and // post-processor DTRs don't get cancelled here but are allowed to // continue processing. if ( tmp->cancel_requested()) { tmp->get_logger()->msg(Arc::INFO, "Cancelling active transfer"); delivery.cancelDTR(tmp); dtr = ActiveDTRs.erase(dtr); continue; } } else if (tmp->get_modification_time() + 3600 < now) { // Stuck in processing thread for more than one hour - assume a hang // and try to recover and retry. It is potentially dangerous if a // stuck thread wakes up. // Need to re-connect logger as it was disconnected in Processor thread tmp->connect_logger(); // Tell DTR not to delete LogDestinations - this creates a memory leak // but avoids crashes when stuck threads wake up. A proper fix could // be using autopointers in Logger tmp->set_delete_log_destinations(false); tmp->get_logger()->msg(Arc::WARNING, "Processing thread timed out. Restarting DTR"); tmp->set_error_status(DTRErrorStatus::INTERNAL_PROCESS_ERROR, DTRErrorStatus::NO_ERROR_LOCATION, "Processor thread timed out"); map_stuck_state(tmp); add_event(tmp); ++dtr; continue; } transferShares.increase_transfer_share((*dtr)->get_transfer_share()); ++dtr; } // If the queue is empty we can go straight to the next state if (DTRQueue.empty()) continue; // Slot limit for this state unsigned int slot_limit = DeliverySlots; if (DTRQueue.front()->is_destined_for_pre_processor()) slot_limit = PreProcessorSlots; else if (DTRQueue.front()->is_destined_for_post_processor()) slot_limit = PostProcessorSlots; // Calculate the slots available for each active share transferShares.calculate_shares(slot_limit); // Shares which have at least one DTR active and running. // Shares can only use emergency slots if they are not in this list. std::set active_shares; unsigned int running = ActiveDTRs.size(); // Go over the active DTRs again and decrease slots in corresponding shares for (std::list::iterator dtr = ActiveDTRs.begin(); dtr != ActiveDTRs.end(); ++dtr) { transferShares.decrease_number_of_slots((*dtr)->get_transfer_share()); active_shares.insert((*dtr)->get_transfer_share()); } // Now at the beginning of the queue we have DTRs that should be // launched first. Launch them, but with respect to the transfer shares. for (std::list::iterator dtr = DTRQueue.begin(); dtr != DTRQueue.end(); ++dtr) { DTR_ptr tmp = *dtr; // Check if there are any shares left in the queue which might need // an emergency share - if not we are done if (running >= slot_limit && transferShares.active_shares().size() == active_shares.size()) break; // Check if this DTR is still in a queue state (was not sent already // in a bulk operation) if (tmp->get_status() != DTRStatus::ToProcessStates.at(i)) continue; // Are there slots left for this share? bool can_start = transferShares.can_start(tmp->get_transfer_share()); // Check if it is possible to use an emergency share if (running >= slot_limit && active_shares.find(tmp->get_transfer_share()) != active_shares.end()) { can_start = false; } if (can_start) { transferShares.decrease_number_of_slots(tmp->get_transfer_share()); // Send to processor/delivery if (tmp->is_destined_for_pre_processor()) { // Check for bulk if (tmp->bulk_possible()) { std::set bulk_set(bulk_requests[tmp->get_parent_job_id()]); if (bulk_set.size() > 1 && bulk_set.find(tmp) != bulk_set.end()) { tmp->get_logger()->msg(Arc::INFO, "Will use bulk request"); unsigned int dtr_no = 0; for (std::set::iterator i = bulk_set.begin(); i != bulk_set.end(); ++i) { if (dtr_no == 0) (*i)->set_bulk_start(true); if (dtr_no == bulk_set.size() - 1) (*i)->set_bulk_end(true); DTR::push(*i, PRE_PROCESSOR); ++dtr_no; } } else { DTR::push(tmp, PRE_PROCESSOR); } } else { DTR::push(tmp, PRE_PROCESSOR); } } else if (tmp->is_destined_for_post_processor()) DTR::push(tmp, POST_PROCESSOR); else if (tmp->is_destined_for_delivery()) { choose_delivery_service(tmp); if (!tmp->get_delivery_endpoint()) { // With a large queue waiting for delivery and different dirs per // delivery service this could slow things down as it could go // through every DTR in the queue tmp->get_logger()->msg(Arc::DEBUG, "No delivery endpoints available, will try later"); continue; } DTR::push(tmp, DELIVERY); delivery_hosts[tmp->get_delivery_endpoint().Host()]++; } ++running; active_shares.insert(tmp->get_transfer_share()); } // Hard limit with all emergency slots used if (running == slot_limit + EmergencySlots) break; } } } void Scheduler::receiveDTR(DTR_ptr request){ if (!request) { logger.msg(Arc::ERROR, "Scheduler received NULL DTR"); return; } if (request->get_status() != DTRStatus::NEW) { add_event(request); return; } // New DTR - first check it is valid if (!(*request)) { logger.msg(Arc::ERROR, "Scheduler received invalid DTR"); request->set_status(DTRStatus::ERROR); DTR::push(request, GENERATOR); return; } request->registerCallback(&processor,PRE_PROCESSOR); request->registerCallback(&processor,POST_PROCESSOR); request->registerCallback(&delivery,DELIVERY); /* Shares part*/ // First, get the transfer share this dtr should belong to std::string DtrTransferShare = transferSharesConf.extract_share_info(request); // If no share information could be obtained, use default share if (DtrTransferShare.empty()) DtrTransferShare = "_default"; // If this share is a reference share, we have to add the sub-share // to the reference list bool in_reference = transferSharesConf.is_configured(DtrTransferShare); int priority = transferSharesConf.get_basic_priority(DtrTransferShare); request->set_transfer_share(DtrTransferShare); DtrTransferShare = request->get_transfer_share(); // Now the sub-share is added to DtrTransferShare, add it to reference // shares if appropriate and update each TransferShare if (in_reference && !transferSharesConf.is_configured(DtrTransferShare)) { transferSharesConf.set_reference_share(DtrTransferShare, priority); } // Compute the priority this DTR receives - this is the priority of the // share adjusted by the priority of the parent job request->set_priority(int(transferSharesConf.get_basic_priority(DtrTransferShare) * request->get_priority() * 0.01)); /* Shares part ends*/ DtrList.add_dtr(request); add_event(request); } bool Scheduler::cancelDTRs(const std::string& jobid) { cancelled_jobs_lock.lock(); cancelled_jobs.push_back(jobid); cancelled_jobs_lock.unlock(); return true; } void Scheduler::dump_thread(void* arg) { Scheduler* sched = (Scheduler*)arg; while (sched->scheduler_state == RUNNING && !sched->dumplocation.empty()) { // every second, dump state sched->DtrList.dumpState(sched->dumplocation); // Performance metric - total number of DTRs in the system timespec dummy; sched->job_perf_log.Log("DTR_total", Arc::tostring(sched->DtrList.size()), dummy, dummy); if (sched->dump_signal.wait(1000)) break; // notified by signal() } } bool Scheduler::stop() { state_lock.lock(); if(scheduler_state != RUNNING) { state_lock.unlock(); return false; } // cancel all jobs std::list alljobs = DtrList.all_jobs(); cancelled_jobs_lock.lock(); for (std::list::iterator job = alljobs.begin(); job != alljobs.end(); ++job) cancelled_jobs.push_back(*job); cancelled_jobs_lock.unlock(); // signal main loop to stop and wait for completion of all DTRs scheduler_state = TO_STOP; run_signal.wait(); scheduler_state = STOPPED; state_lock.unlock(); return true; } void Scheduler::main_thread (void* arg) { Scheduler* it = (Scheduler*)arg; it->main_thread(); } void Scheduler::main_thread (void) { logger.msg(Arc::INFO, "Scheduler starting up"); logger.msg(Arc::INFO, "Scheduler configuration:"); logger.msg(Arc::INFO, " Pre-processor slots: %u", PreProcessorSlots); logger.msg(Arc::INFO, " Delivery slots: %u", DeliverySlots); logger.msg(Arc::INFO, " Post-processor slots: %u", PostProcessorSlots); logger.msg(Arc::INFO, " Emergency slots: %u", EmergencySlots); logger.msg(Arc::INFO, " Prepared slots: %u", StagedPreparedSlots); logger.msg(Arc::INFO, " Shares configuration:\n%s", transferSharesConf.conf()); for (std::vector::iterator i = configured_delivery_services.begin(); i != configured_delivery_services.end(); ++i) { if (*i == DTR::LOCAL_DELIVERY) logger.msg(Arc::INFO, " Delivery service: LOCAL"); else logger.msg(Arc::INFO, " Delivery service: %s", i->str()); } // Start thread dumping DTR state if (!Arc::CreateThreadFunction(&dump_thread, this)) logger.msg(Arc::ERROR, "Failed to create DTR dump thread"); // Disconnect from root logger so that messages are logged to per-DTR Logger Arc::Logger::getRootLogger().setThreadContext(); root_destinations = Arc::Logger::getRootLogger().getDestinations(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().setThreshold(DTR::LOG_LEVEL); while(scheduler_state != TO_STOP || !DtrList.empty()) { // first check for cancelled jobs cancelled_jobs_lock.lock(); std::list::iterator jobid = cancelled_jobs.begin(); for (;jobid != cancelled_jobs.end();) { std::list requests; DtrList.filter_dtrs_by_job(*jobid, requests); for (std::list::iterator dtr = requests.begin(); dtr != requests.end(); ++dtr) { (*dtr)->set_cancel_request(); (*dtr)->get_logger()->msg(Arc::INFO, "DTR %s cancelled", (*dtr)->get_id()); } jobid = cancelled_jobs.erase(jobid); } cancelled_jobs_lock.unlock(); // Dealing with pending events, i.e. DTRs from another processes process_events(); // Revise all the internal queues and take actions revise_queues(); Glib::usleep(50000); } // make sure final state is dumped before exit dump_signal.signal(); if (!dumplocation.empty()) DtrList.dumpState(dumplocation); log_to_root_logger(Arc::INFO, "Scheduler loop exited"); run_signal.signal(); } } // namespace DataStaging nordugrid-arc-5.4.2/src/libs/data-staging/PaxHeaders.7502/README0000644000000000000000000000012211523013225022130 xustar000000000000000027 mtime=1296832149.591951 26 atime=1513200576.81073 29 ctime=1513200662.08777302 nordugrid-arc-5.4.2/src/libs/data-staging/README0000644000175000002070000000033611523013225022201 0ustar00mockbuildmock00000000000000New data staging implementation. This code provides an advanced mechanism for data transfer and scheduling. It replaces most code in the loaders/ directory and other directories previously used for data transfer handling. nordugrid-arc-5.4.2/src/libs/PaxHeaders.7502/README0000644000000000000000000000012311523013225017566 xustar000000000000000027 mtime=1296832149.591951 26 atime=1513200576.83073 30 ctime=1513200662.065772751 nordugrid-arc-5.4.2/src/libs/README0000644000175000002070000000010511523013225017630 0ustar00mockbuildmock00000000000000ARC libraries. Libraries related to HED can be found in src/hed/libs.nordugrid-arc-5.4.2/src/PaxHeaders.7502/services0000644000000000000000000000013213214316026017527 xustar000000000000000030 mtime=1513200662.437777301 30 atime=1513200668.716854096 30 ctime=1513200662.437777301 nordugrid-arc-5.4.2/src/services/0000755000175000002070000000000013214316026017652 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/acix0000644000000000000000000000013213214316030020446 xustar000000000000000030 mtime=1513200664.247799438 30 atime=1513200668.716854096 30 ctime=1513200664.247799438 nordugrid-arc-5.4.2/src/services/acix/0000755000175000002070000000000013214316030020571 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712364472122022575 xustar000000000000000027 mtime=1406301266.326292 30 atime=1513200603.498056445 30 ctime=1513200664.246799426 nordugrid-arc-5.4.2/src/services/acix/Makefile.am0000644000175000002070000000163612364472122022645 0ustar00mockbuildmock00000000000000 if PYTHON_ENABLED pythondir = $(PYTHON_SITE_LIB)/acix nobase_python_PYTHON = __init__.py \ core/__init__.py \ core/bitvector.py \ core/bloomfilter.py \ core/cacheclient.py \ core/hashes.py \ core/indexclient.py \ core/ssl.py \ cacheserver/__init__.py \ cacheserver/cache.py \ cacheserver/cacheresource.py \ cacheserver/cachesetup.py \ cacheserver/pscan.py \ indexserver/__init__.py \ indexserver/index.py \ indexserver/indexresource.py \ indexserver/indexsetup.py SUBDIRS = core cacheserver indexserver DIST_SUBDIRS = core cacheserver indexserver endif nordugrid-arc-5.4.2/src/services/acix/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315733022600 xustar000000000000000030 mtime=1513200603.539056946 29 atime=1513200650.87063583 30 ctime=1513200664.247799438 nordugrid-arc-5.4.2/src/services/acix/Makefile.in0000644000175000002070000006662313214315733022664 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/acix DIST_COMMON = README $(am__nobase_python_PYTHON_DIST) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__nobase_python_PYTHON_DIST = __init__.py core/__init__.py \ core/bitvector.py core/bloomfilter.py core/cacheclient.py \ core/hashes.py core/indexclient.py core/ssl.py \ cacheserver/__init__.py cacheserver/cache.py \ cacheserver/cacheresource.py cacheserver/cachesetup.py \ cacheserver/pscan.py indexserver/__init__.py \ indexserver/index.py indexserver/indexresource.py \ indexserver/indexsetup.py am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pythondir)" py_compile = $(top_srcdir)/py-compile RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @PYTHON_ENABLED_TRUE@pythondir = $(PYTHON_SITE_LIB)/acix @PYTHON_ENABLED_TRUE@nobase_python_PYTHON = __init__.py \ @PYTHON_ENABLED_TRUE@ core/__init__.py \ @PYTHON_ENABLED_TRUE@ core/bitvector.py \ @PYTHON_ENABLED_TRUE@ core/bloomfilter.py \ @PYTHON_ENABLED_TRUE@ core/cacheclient.py \ @PYTHON_ENABLED_TRUE@ core/hashes.py \ @PYTHON_ENABLED_TRUE@ core/indexclient.py \ @PYTHON_ENABLED_TRUE@ core/ssl.py \ @PYTHON_ENABLED_TRUE@ cacheserver/__init__.py \ @PYTHON_ENABLED_TRUE@ cacheserver/cache.py \ @PYTHON_ENABLED_TRUE@ cacheserver/cacheresource.py \ @PYTHON_ENABLED_TRUE@ cacheserver/cachesetup.py \ @PYTHON_ENABLED_TRUE@ cacheserver/pscan.py \ @PYTHON_ENABLED_TRUE@ indexserver/__init__.py \ @PYTHON_ENABLED_TRUE@ indexserver/index.py \ @PYTHON_ENABLED_TRUE@ indexserver/indexresource.py \ @PYTHON_ENABLED_TRUE@ indexserver/indexsetup.py @PYTHON_ENABLED_TRUE@SUBDIRS = core cacheserver indexserver @PYTHON_ENABLED_TRUE@DIST_SUBDIRS = core cacheserver indexserver all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/acix/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/acix/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-nobase_pythonPYTHON: $(nobase_python_PYTHON) @$(NORMAL_INSTALL) test -z "$(pythondir)" || $(MKDIR_P) "$(DESTDIR)$(pythondir)" @list='$(nobase_python_PYTHON)'; test -n "$(pythondir)" || list=; \ $(am__nobase_list) | while read dir files; do \ xfiles=; for p in $$files; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f "$$b$$p"; then xfiles="$$xfiles $$b$$p"; dlist="$$dlist $$p"; \ else :; fi; done; \ test -z "$$xfiles" || { \ test "x$$dir" = x. || { \ echo "$(MKDIR_P) '$(DESTDIR)$(pythondir)/$$dir'"; \ $(MKDIR_P) "$(DESTDIR)$(pythondir)/$$dir"; }; \ echo " $(INSTALL_DATA) $$xfiles '$(DESTDIR)$(pythondir)/$$dir'"; \ $(INSTALL_DATA) $$xfiles "$(DESTDIR)$(pythondir)/$$dir" || exit $$?; }; \ if test -n "$$dlist"; then \ if test -z "$(DESTDIR)"; then \ PYTHON=$(PYTHON) $(py_compile) --basedir "$(pythondir)" $$dlist; \ else \ PYTHON=$(PYTHON) $(py_compile) --destdir "$(DESTDIR)" --basedir "$(pythondir)" $$dlist; \ fi; \ else :; fi \ done uninstall-nobase_pythonPYTHON: @$(NORMAL_UNINSTALL) @list='$(nobase_python_PYTHON)'; test -n "$(pythondir)" || list=; \ $(am__nobase_strip_setup); files=`$(am__nobase_strip)`; \ test -n "$$files" || exit 0; \ filesc=`echo "$$files" | sed 's|$$|c|'`; \ fileso=`echo "$$files" | sed 's|$$|o|'`; \ echo " ( cd '$(DESTDIR)$(pythondir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pythondir)" && rm -f $$files || exit $$?; \ echo " ( cd '$(DESTDIR)$(pythondir)' && rm -f" $$filesc ")"; \ cd "$(DESTDIR)$(pythondir)" && rm -f $$filesc || exit $$?; \ echo " ( cd '$(DESTDIR)$(pythondir)' && rm -f" $$fileso ")"; \ cd "$(DESTDIR)$(pythondir)" && rm -f $$fileso # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pythondir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-nobase_pythonPYTHON install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-nobase_pythonPYTHON .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-nobase_pythonPYTHON \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-nobase_pythonPYTHON # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/acix/PaxHeaders.7502/indexserver0000644000000000000000000000013213214316030023004 xustar000000000000000030 mtime=1513200664.353800734 30 atime=1513200668.716854096 30 ctime=1513200664.353800734 nordugrid-arc-5.4.2/src/services/acix/indexserver/0000755000175000002070000000000013214316030023127 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/indexresource.py0000644000000000000000000000012312046514637026331 xustar000000000000000026 mtime=1352309151.21856 27 atime=1513200575.619715 30 ctime=1513200664.245799414 nordugrid-arc-5.4.2/src/services/acix/indexserver/indexresource.py0000644000175000002070000000177312046514637026407 0ustar00mockbuildmock00000000000000""" resource to query urls for """ # json module is stock in Python 2.6, for Python 2.5 we use simplejson try: import json except ImportError: import simplejson as json from twisted.python import log from twisted.web import resource class IndexResource(resource.Resource): isLeaf = True def __init__(self, index): resource.Resource.__init__(self) self.index = index def render_GET(self, request): log.msg("Index get. Args:" + str(request.args)) try: urls = request.args['url'][0].split(',') except KeyError, e: log.msg("Couldn't get url argument from request") request.setResponseCode(400) return "Couldn't get url argument from request" log.msg("Query for urls: " + str(urls)) result = self.index.query(urls) rv = json.dumps(result) request.setHeader('Content-type', 'application/json') request.setHeader('Content-length', str(len(rv))) return rv nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712754431715025141 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200603.777059857 30 ctime=1513200664.349800685 nordugrid-arc-5.4.2/src/services/acix/indexserver/Makefile.am0000644000175000002070000000070112754431715025201 0ustar00mockbuildmock00000000000000if SYSV_SCRIPTS_ENABLED INDEXSERVER_SCRIPT = acix-index else INDEXSERVER_SCRIPT = endif dist_initd_SCRIPTS = $(INDEXSERVER_SCRIPT) if SYSTEMD_UNITS_ENABLED INDEXSERVER_UNIT = acix-index.service INDEXSERVER_UNIT_WRAPPER = acix-index-start else INDEXSERVER_UNIT = INDEXSERVER_UNIT_WRAPPER = endif units_DATA = $(INDEXSERVER_UNIT) pkgdata_SCRIPTS = $(INDEXSERVER_UNIT_WRAPPER) EXTRA_DIST = acix-index-start SUBDIRS = $(TEST_DIR) dist_SUBDIRS = test nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/acix-index0000644000000000000000000000012412623100535025042 xustar000000000000000027 mtime=1447854429.759916 27 atime=1513200575.615715 30 ctime=1513200664.349800685 nordugrid-arc-5.4.2/src/services/acix/indexserver/acix-index0000755000175000002070000001240712623100535025116 0ustar00mockbuildmock00000000000000#!/bin/sh # ARC cache index server # # chkconfig: 2345 75 25 # description: The ARC cache index server collects cache information from \ # cache servers and can be queried for the locations of cached \ # files. ### BEGIN INIT INFO # Provides: acix-index # Required-Start: $network $local_fs # Required-Stop: $network $local_fs # Should-Start: $time # Should-Stop: $time # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC cacheindex, index server # Description: The ARC cache index server collects cache information # from cache servers and can be queried for the locations # of cached files. ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi PIDFILE=/var/run/acix-index.pid LOGFILE=/var/log/arc/acix-index.log LOGD=`dirname $LOGFILE` LOGN=`basename $LOGFILE` if [ ! -d $LOGD ]; then mkdir -p $LOGD fi prog=twistd RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/acix-index ]; then . /etc/sysconfig/acix-index elif [ -r /etc/default/acix-index ]; then . /etc/default/acix-index fi if [ `id -u` = 0 ] ; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then LOCKFILE=/var/lock/subsys/acix-index else LOCKFILE=/var/lock/acix-index fi else LOCKFILE=$HOME/acix-index.lock fi APPSTART=" from acix import indexserver; from twisted.python import log; from twisted.python.logfile import LogFile; application = indexserver.createApplication(); log.startLogging(LogFile('$LOGN', '$LOGD', rotateLength=1000000, maxRotatedFiles=25)) " do_start() { if [ "$RUN" != "yes" ] ; then echo "acix-index service is disabled, please adjust the configuration to your" echo "needs and then set RUN to 'yes' in /etc/default/acix-index to enable it." return 0 fi # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ] && [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi echo -n "Starting ARC cache index..." # Check if we are already running if [ -f $PIDFILE ]; then read pid < $PIDFILE if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PIDFILE" "$LOCKFILE" fi TACFILE=`mktemp` || exit 1 echo $APPSTART > $TACFILE $prog --pidfile $PIDFILE -y $TACFILE -l $LOGFILE RETVAL=$? rm -f $TACFILE if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } do_stop() { echo -n "Stopping ARC cache index..." if [ -f "$PIDFILE" ]; then read pid < "$PIDFILE" if [ ! -z "$pid" ] ; then kill "$pid" RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi timeout=2; # for stopping nicely while ( ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null ) && [ $timeout -ge 1 ] ; do sleep 1 timeout=$(($timeout - 1)) done [ $timeout -lt 1 ] && kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PIDFILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } do_status() { if [ -f "$PIDFILE" ]; then read pid < "$PIDFILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } do_restart() { do_stop do_start } case "$1" in start) do_start ;; stop) do_stop ;; restart|reload|force-reload) do_restart ;; condrestart|try-restart) [ -f $LOCKFILE ] && do_restart || : ;; status) do_status $prog ;; *) echo "Usage: $0 {start|stop|restart|status|reload|condrestart|try-restart}" exit 1 ;; esac exit 0 nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/acix-index-start0000644000000000000000000000012412754431715026211 xustar000000000000000027 mtime=1471296461.229277 27 atime=1513200575.619715 30 ctime=1513200664.352800722 nordugrid-arc-5.4.2/src/services/acix/indexserver/acix-index-start0000755000175000002070000000341612754431715026265 0ustar00mockbuildmock00000000000000#!/bin/sh PIDFILE=/var/run/acix-index.pid DEFAULT_LOGFILE=/var/log/arc/acix-index.log prog=twistd RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/acix-index ]; then . /etc/sysconfig/acix-index elif [ -r /etc/default/acix-index ]; then . /etc/default/acix-index fi if [ "$RUN" != "yes" ] ; then echo "acix-index service is disabled, please adjust the configuration to your" echo "needs and then set RUN to 'yes' in /etc/default/acix-index to enable it." return 0 fi # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ] && [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi CONFIG_SECTION=acix\\/indexserver # read in cacheserver section from arc.conf # this will put the read values into the environment, e.g., $logfile eval `sed -e 's/[[:space:]]*\=[[:space:]]*/=/g' \ -e 's/;.*$//' \ -e 's/[[:space:]]*$//' \ -e 's/^[[:space:]]*//' \ -e "s/^\(.*\)=\([^\"']*\)$/\1=\"\2\"/" \ < $ARC_CONFIG \ | sed -n -e "/^\[$CONFIG_SECTION\]/,/^\s*\[/{/^[^;].*\=.*/p;}" ` LOGFILE=${logfile:-$DEFAULT_LOGFILE} LOGD=`dirname $LOGFILE` LOGN=`basename $LOGFILE` if [ ! -d $LOGD ]; then mkdir -p $LOGD fi APPSTART=" from acix import indexserver; from twisted.python import log; from twisted.python.logfile import LogFile; application = indexserver.createApplication(); log.startLogging(LogFile('$LOGN', '$LOGD', rotateLength=1000000, maxRotatedFiles=25)) " TACFILE=`mktemp` || exit 1 echo $APPSTART > $TACFILE exec $prog --pidfile $PIDFILE -y $TACFILE -l $LOGFILE rm -f $TACFILE nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733025137 xustar000000000000000030 mtime=1513200603.819060371 30 atime=1513200650.963636967 30 ctime=1513200664.350800698 nordugrid-arc-5.4.2/src/services/acix/indexserver/Makefile.in0000644000175000002070000007145513214315733025221 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/acix/indexserver DIST_COMMON = $(am__dist_initd_SCRIPTS_DIST) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/acix-index.service.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = acix-index.service CONFIG_CLEAN_VPATH_FILES = am__dist_initd_SCRIPTS_DIST = acix-index am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(unitsdir)" SCRIPTS = $(dist_initd_SCRIPTS) $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive DATA = $(units_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @SYSV_SCRIPTS_ENABLED_FALSE@INDEXSERVER_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@INDEXSERVER_SCRIPT = acix-index dist_initd_SCRIPTS = $(INDEXSERVER_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@INDEXSERVER_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@INDEXSERVER_UNIT = acix-index.service @SYSTEMD_UNITS_ENABLED_FALSE@INDEXSERVER_UNIT_WRAPPER = @SYSTEMD_UNITS_ENABLED_TRUE@INDEXSERVER_UNIT_WRAPPER = acix-index-start units_DATA = $(INDEXSERVER_UNIT) pkgdata_SCRIPTS = $(INDEXSERVER_UNIT_WRAPPER) EXTRA_DIST = acix-index-start SUBDIRS = $(TEST_DIR) dist_SUBDIRS = test all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/acix/indexserver/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/acix/indexserver/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): acix-index.service: $(top_builddir)/config.status $(srcdir)/acix-index.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-dist_initdSCRIPTS: $(dist_initd_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(initddir)" || $(MKDIR_P) "$(DESTDIR)$(initddir)" @list='$(dist_initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(initddir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(initddir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) test -z "$(unitsdir)" || $(MKDIR_P) "$(DESTDIR)$(unitsdir)" @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(unitsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(unitsdir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(SCRIPTS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dist_initdSCRIPTS install-pkgdataSCRIPTS \ install-unitsDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-unitsDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dist_initdSCRIPTS install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkgdataSCRIPTS install-ps \ install-ps-am install-strip install-unitsDATA installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-dist_initdSCRIPTS \ uninstall-pkgdataSCRIPTS uninstall-unitsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/acix-index.service.in0000644000000000000000000000012612754431715027124 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200650.979637163 29 ctime=1513200664.35180071 nordugrid-arc-5.4.2/src/services/acix/indexserver/acix-index.service.in0000644000175000002070000000032512754431715027167 0ustar00mockbuildmock00000000000000[Unit] Description=ARC cache index server After=network.target local_fs.target [Service] Type=forking PIDFile=/var/run/acix-index.pid ExecStart=@pkgdatadir@/acix-index-start [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/test0000644000000000000000000000013213214316030023763 xustar000000000000000030 mtime=1513200664.367800906 30 atime=1513200668.716854096 30 ctime=1513200664.367800906 nordugrid-arc-5.4.2/src/services/acix/indexserver/test/0000755000175000002070000000000013214316030024106 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/indexserver/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712014647710026111 xustar000000000000000027 mtime=1345540040.509752 30 atime=1513200603.836060579 30 ctime=1513200664.365800881 nordugrid-arc-5.4.2/src/services/acix/indexserver/test/Makefile.am0000644000175000002070000000033012014647710026147 0ustar00mockbuildmock00000000000000TESTSCRIPTS = test_system.py if ACIX_TESTS_ENABLED TESTS_ENVIRONMENT = PYTHONPATH=$(top_srcdir)/src/services $(TRIAL) TESTS = $(TESTSCRIPTS) else TESTS = endif check_SCRIPTS = $(TESTS) EXTRA_DIST = $(TESTSCRIPTS) nordugrid-arc-5.4.2/src/services/acix/indexserver/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733026116 xustar000000000000000030 mtime=1513200603.872061019 30 atime=1513200650.994637347 30 ctime=1513200664.366800893 nordugrid-arc-5.4.2/src/services/acix/indexserver/test/Makefile.in0000644000175000002070000004546113214315733026176 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @ACIX_TESTS_ENABLED_TRUE@TESTS = $(TESTSCRIPTS) subdir = src/services/acix/indexserver/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ TESTSCRIPTS = test_system.py @ACIX_TESTS_ENABLED_TRUE@TESTS_ENVIRONMENT = PYTHONPATH=$(top_srcdir)/src/services $(TRIAL) check_SCRIPTS = $(TESTS) EXTRA_DIST = $(TESTSCRIPTS) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/acix/indexserver/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/acix/indexserver/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool distclean distclean-generic distclean-libtool \ distdir dvi dvi-am html html-am info info-am install \ install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/acix/indexserver/test/PaxHeaders.7502/test_system.py0000644000000000000000000000012412242373672027015 xustar000000000000000027 mtime=1384773562.388247 27 atime=1513200575.617715 30 ctime=1513200664.367800906 nordugrid-arc-5.4.2/src/services/acix/indexserver/test/test_system.py0000644000175000002070000000763112242373672027071 0ustar00mockbuildmock00000000000000""" setup two cache resources, and one index resources, and try out the whole things and see if it works :-) """ import hashlib from twisted.trial import unittest from twisted.internet import reactor, defer from twisted.web import resource, server from acix.core import indexclient from acix.cacheserver import cache, cacheresource from acix.indexserver import index, indexresource TEST_URLS1 = [ 'srm://srm.ndgf.org/pnfs/ndgf.org/data/ops/sam-test/testfile', 'gsiftp://grid.tsl.uu.se:2811/storage/sam/testfile'] TEST_URLS2 = [ 'lfc://lfc1.ndgf.org//grid/ops.ndgf.org/sam/testfile', 'srm://srm.ndgf.org/pnfs/ndgf.org/data/ops/sam-test/testfile'] class TestScanner: def __init__(self, urls): self.urls = urls def dir(self): return "testscanner (no dir)" def scan(self, filter): for url in self.urls: filter(hashlib.sha1(url).hexdigest()) return defer.succeed(None) class SystemTest(unittest.TestCase): cport1 = 4080 cport2 = 4081 xport = 4082 @defer.inlineCallbacks def setUp(self): # cheap trick to get multiple hostnames on one host self.cache_urls = [ 'http://localhost:%i/cache' % self.cport1, 'http://127.0.0.1:%i/cache' % self.cport2 ] scanner1 = TestScanner(TEST_URLS1) scanner2 = TestScanner(TEST_URLS2) self.cs1 = cache.Cache(scanner1, 10000, 60, '') self.cs2 = cache.Cache(scanner2, 10000, 60, 'http://127.0.0.1/arex/cache') self.idx = index.CacheIndex(self.cache_urls) cr1 = cacheresource.CacheResource(self.cs1) cr2 = cacheresource.CacheResource(self.cs2) idxr = indexresource.IndexResource(self.idx) c1siteroot = resource.Resource() c1siteroot.putChild('cache', cr1) c1site = server.Site(c1siteroot) c2siteroot = resource.Resource() c2siteroot.putChild('cache', cr2) c2site = server.Site(c2siteroot) idx_siteroot = resource.Resource() idx_siteroot.putChild('index', idxr) idx_site = server.Site(idx_siteroot) yield self.cs1.startService() yield self.cs2.startService() self.iport1 = reactor.listenTCP(self.cport1, c1site) self.iport2 = reactor.listenTCP(self.cport2, c2site) #yield self.idx.startService() yield self.idx.renewIndex() # ensure that we have fetched cache self.iport3 = reactor.listenTCP(self.xport, idx_site) self.index_url = "http://localhost:%i/index" % (self.xport) @defer.inlineCallbacks def tearDown(self): yield self.cs1.stopService() yield self.cs2.stopService() #yield self.idx.stopService() yield self.iport1.stopListening() yield self.iport2.stopListening() yield self.iport3.stopListening() @defer.inlineCallbacks def testIndexQuery(self): urls1 = [ TEST_URLS1[1] ] result = yield indexclient.queryIndex(self.index_url, urls1) self.failUnlessIn(urls1[0], result) locations = result[urls1[0]] self.failUnlessEqual(locations, [u'localhost']) urls2 = [ TEST_URLS1[0] ] result = yield indexclient.queryIndex(self.index_url, urls2) self.failUnlessIn(urls2[0], result) locations = result[urls2[0]] self.failUnlessEqual(len(locations), 2) self.failUnlessIn(u'localhost', locations) self.failUnlessIn(u'http://127.0.0.1/arex/cache', locations) urls3 = [ 'srm://host/no_such_file' ] result = yield indexclient.queryIndex(self.index_url, urls3) self.failUnlessIn(urls3[0], result) self.failUnlessEqual(result, {urls3[0]: []}) urls4 = [ TEST_URLS2[0] ] result = yield indexclient.queryIndex(self.index_url, urls4) self.failUnlessIn(urls4[0], result) locations = result[urls4[0]] self.failUnlessEqual(locations, [u'http://127.0.0.1/arex/cache']) nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/__init__.py0000644000000000000000000000012412010730234025172 xustar000000000000000027 mtime=1344516252.982799 27 atime=1513200575.619715 30 ctime=1513200664.243799389 nordugrid-arc-5.4.2/src/services/acix/indexserver/__init__.py0000644000175000002070000000014012010730234025232 0ustar00mockbuildmock00000000000000from acix.indexserver import indexsetup createApplication = indexsetup.createIndexApplication nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/indexsetup.py0000644000000000000000000000012312046514637025642 xustar000000000000000026 mtime=1352309151.21856 27 atime=1513200575.617715 30 ctime=1513200664.246799426 nordugrid-arc-5.4.2/src/services/acix/indexserver/indexsetup.py0000644000175000002070000000337312046514637025716 0ustar00mockbuildmock00000000000000import os from twisted.application import internet, service from twisted.web import resource, server from acix.core import ssl from acix.indexserver import index, indexresource # -- constants SSL_DEFAULT = True INDEX_TCP_PORT = 6080 INDEX_SSL_PORT = 6443 DEFAULT_INDEX_REFRESH_INTERVAL = 301 # seconds between updating cache ARC_CONF = '/etc/arc.conf' def getCacheServers(): cache_dirs = [] config = ARC_CONF if 'ARC_CONFIG' in os.environ: config = os.environ['ARC_CONFIG'] for line in file(config): if line.startswith('cacheserver'): args = line.split('=', 2)[1] cache_dir = args.replace('"', '').strip() cache_dirs.append(cache_dir) # use localhost as default if none defined if len(cache_dirs) == 0: cache_dirs.append('https://localhost:5443/data/cache') return cache_dirs def createIndexApplication(use_ssl=SSL_DEFAULT, port=None, refresh_interval=DEFAULT_INDEX_REFRESH_INTERVAL): # monkey-patch fix for dealing with low url-length limit from twisted.protocols import basic basic.LineReceiver.MAX_LENGTH = 65535 ci = index.CacheIndex(getCacheServers(), refresh_interval) siteroot = resource.Resource() dataroot = resource.Resource() dataroot.putChild('index', indexresource.IndexResource(ci)) siteroot.putChild('data', dataroot) site = server.Site(siteroot) application = service.Application("arc-indexserver") ci.setServiceParent(application) if use_ssl: cf = ssl.ContextFactory() internet.SSLServer(port or INDEX_SSL_PORT, site, cf).setServiceParent(application) else: internet.TCPServer(port or INDEX_TCP_PORT, site).setServiceParent(application) return application nordugrid-arc-5.4.2/src/services/acix/indexserver/PaxHeaders.7502/index.py0000644000000000000000000000012312415470135024553 xustar000000000000000026 mtime=1412853853.46933 27 atime=1513200575.617715 30 ctime=1513200664.244799401 nordugrid-arc-5.4.2/src/services/acix/indexserver/index.py0000644000175000002070000000451412415470135024625 0ustar00mockbuildmock00000000000000""" resource to fetch a bloom filter from """ import hashlib import urlparse from twisted.python import log from twisted.internet import defer, task from twisted.application import service from acix.core import bloomfilter, cacheclient, ssl class CacheIndex(service.Service): def __init__(self, urls, refresh_interval=300): self.urls = urls self.refresh_interval = refresh_interval # seconds self.index_task = task.LoopingCall(self.renewIndex) self.filters = {} # host -> filter mapping def startService(self): self.index_task.start(self.refresh_interval) def stopService(self): self.index_task.stop() def renewIndex(self): log.msg("Renewing index") dl = [] cf = ssl.ContextFactory(verify=True) for url in self.urls: log.msg("Fetching cache from: " + url) d = cacheclient.retrieveCache(url, cf) d.addCallback(self._updateCache, url) d.addErrback(self._failedCacheRetrieval, url) dl.append(d) return defer.DeferredList(dl) def _updateCache(self, result, url): hashes, cache_time, cache, cache_url = result if not cache_url: host = urlparse.urlparse(url).netloc if ':' in host: host = host.split(':')[0] cache_url = host if not cache: log.msg("No cache info returned from %s" % cache_url) return try: size = len(cache) * 8 self.filters[cache_url] = bloomfilter.BloomFilter(size=size, bits=cache, hashes=hashes) except Exception, e: log.err(e) log.msg("New cache added for " + cache_url) def _failedCacheRetrieval(self, failure, url): log.msg("Failed to retrieve cache index from %s. Reason: %s" % (url, failure.getErrorMessage())) def query(self, keys): results = {} for host, filter_ in self.filters.items(): for key in keys: khash = hashlib.sha1(key).hexdigest() hosts = results.setdefault(key, []) log.msg("Query: %s for %s" % (khash, host)) if khash in filter_: log.msg("Found match for %s at %s" % (key, host)) hosts.append(host) return results nordugrid-arc-5.4.2/src/services/acix/PaxHeaders.7502/__init__.py0000644000000000000000000000012312007731345022645 xustar000000000000000026 mtime=1344254693.57267 27 atime=1513200575.619715 30 ctime=1513200664.231799242 nordugrid-arc-5.4.2/src/services/acix/__init__.py0000644000175000002070000000000012007731345022701 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/PaxHeaders.7502/cacheserver0000644000000000000000000000013213214316030022740 xustar000000000000000030 mtime=1513200664.317800294 30 atime=1513200668.716854096 30 ctime=1513200664.317800294 nordugrid-arc-5.4.2/src/services/acix/cacheserver/0000755000175000002070000000000013214316030023063 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712754431715025075 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200603.557057166 30 ctime=1513200664.312800233 nordugrid-arc-5.4.2/src/services/acix/cacheserver/Makefile.am0000644000175000002070000000070112754431715025135 0ustar00mockbuildmock00000000000000if SYSV_SCRIPTS_ENABLED CACHESERVER_SCRIPT = acix-cache else CACHESERVER_SCRIPT = endif dist_initd_SCRIPTS = $(CACHESERVER_SCRIPT) if SYSTEMD_UNITS_ENABLED CACHESERVER_UNIT = acix-cache.service CACHESERVER_UNIT_WRAPPER = acix-cache-start else CACHESERVER_UNIT = CACHESERVER_UNIT_WRAPPER = endif units_DATA = $(CACHESERVER_UNIT) pkgdata_SCRIPTS = $(CACHESERVER_UNIT_WRAPPER) EXTRA_DIST = acix-cache-start SUBDIRS = $(TEST_DIR) dist_SUBDIRS = test nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733025073 xustar000000000000000030 mtime=1513200603.600057692 30 atime=1513200650.886636026 30 ctime=1513200664.313800245 nordugrid-arc-5.4.2/src/services/acix/cacheserver/Makefile.in0000644000175000002070000007145513214315733025155 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/acix/cacheserver DIST_COMMON = $(am__dist_initd_SCRIPTS_DIST) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/acix-cache.service.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = acix-cache.service CONFIG_CLEAN_VPATH_FILES = am__dist_initd_SCRIPTS_DIST = acix-cache am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(unitsdir)" SCRIPTS = $(dist_initd_SCRIPTS) $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive DATA = $(units_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @SYSV_SCRIPTS_ENABLED_FALSE@CACHESERVER_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@CACHESERVER_SCRIPT = acix-cache dist_initd_SCRIPTS = $(CACHESERVER_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@CACHESERVER_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@CACHESERVER_UNIT = acix-cache.service @SYSTEMD_UNITS_ENABLED_FALSE@CACHESERVER_UNIT_WRAPPER = @SYSTEMD_UNITS_ENABLED_TRUE@CACHESERVER_UNIT_WRAPPER = acix-cache-start units_DATA = $(CACHESERVER_UNIT) pkgdata_SCRIPTS = $(CACHESERVER_UNIT_WRAPPER) EXTRA_DIST = acix-cache-start SUBDIRS = $(TEST_DIR) dist_SUBDIRS = test all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/acix/cacheserver/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/acix/cacheserver/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): acix-cache.service: $(top_builddir)/config.status $(srcdir)/acix-cache.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-dist_initdSCRIPTS: $(dist_initd_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(initddir)" || $(MKDIR_P) "$(DESTDIR)$(initddir)" @list='$(dist_initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(initddir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(initddir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) test -z "$(unitsdir)" || $(MKDIR_P) "$(DESTDIR)$(unitsdir)" @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(unitsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(unitsdir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(SCRIPTS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dist_initdSCRIPTS install-pkgdataSCRIPTS \ install-unitsDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-unitsDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dist_initdSCRIPTS install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkgdataSCRIPTS install-ps \ install-ps-am install-strip install-unitsDATA installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-dist_initdSCRIPTS \ uninstall-pkgdataSCRIPTS uninstall-unitsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/pscan.py0000644000000000000000000000012312706354762024516 xustar000000000000000026 mtime=1461311986.82806 27 atime=1513200575.634715 30 ctime=1513200664.242799377 nordugrid-arc-5.4.2/src/services/acix/cacheserver/pscan.py0000644000175000002070000001250612706354762024570 0ustar00mockbuildmock00000000000000""" Scans the ARC cache directory, but in another process in order to avoid blocking the twisted reactor. This is done in a not-so-nice way, where we create a python in a temporary file and execute that program. """ import os import tempfile import time from twisted.python import log from twisted.internet import reactor, defer, protocol from twisted.protocols import basic ARC_CONF = '/etc/arc.conf' DATA_CACHE_SUBDIR = 'data' SCAN_PROGRAM_DUMP = '''#generated by arc cacheindex import os import shelve import tempfile import time f = None t = time.time() dump_file = '%s' if dump_file: f = tempfile.NamedTemporaryFile(delete=False) m = shelve.open(tempfile.gettempdir() + '/ARC-ACIX/.db') for dirpath, dirnames, filenames in os.walk('%s'): for filename in filenames: if filename.endswith('.meta') and os.path.exists(os.path.join(dirpath, filename[:-5])): url = dirpath.rsplit('/')[-1] + filename.split('.')[0] print url + "\\r\\n", if dump_file and time.time() < t + 300: # Don't spend more than 5 mins looking up URLs try: murl = m[url] except KeyError: # first line of meta is url murl = '' with open(os.path.join(dirpath, filename)) as mf: murl = mf.readline().strip() m[url] = murl f.write(murl+'\\n') if dump_file: f.close() m.close() os.rename(f.name, dump_file) ''' class URLReceiver(basic.LineReceiver): def __init__(self, filter): self.filter = filter def lineReceived(self, line): self.filter(line.strip()) class ScanProtocol(protocol.ProcessProtocol): def __init__(self, filter, d): self.url_receiver = URLReceiver(filter) self.d = d def outReceived(self, data): self.url_receiver.dataReceived(data) def errReceived(self, data): log.msg("Error data received from scanning program. Oh noes: %s" % data) def processEnded(self, reason): if reason.value.exitCode == 0: self.d.callback(None) return # everything is just peachy log.err(reason) self.d.callback(reason) def getARCCacheDirs(): cache_dirs = [] config = ARC_CONF if 'ARC_CONFIG' in os.environ: config = os.environ['ARC_CONFIG'] ingm = False for line in file(config): if line.startswith('['): ingm = line.startswith('[grid-manager]') if ingm and line.startswith('cachedir') or line.startswith('remotecachedir'): args = line.split('=', 2)[1] cache_dir = args.split(' ')[0].replace('"', '').strip() cache_dirs.append(cache_dir) return cache_dirs class CacheScanner: def __init__(self, cache_dir=None, cache_dump=False): if cache_dir is None: cache_dir = getARCCacheDirs() # compat with older configs, where cache_dir is a string # but we need to support multiple cache dirs if type(cache_dir) is str: cache_dir = [cache_dir] self.cache_dir = cache_dir self.cache_dump = cache_dump def dir(self): return self.cache_dir def scan(self, filter): defs = [] dump_file = '' if self.cache_dump: dump_file = '%s/ARC-ACIX/%s' % (tempfile.gettempdir(), str(int(time.time()))) try: os.mkdir('%s/ARC-ACIX' % tempfile.gettempdir()) except: pass for cd in self.cache_dir: program = SCAN_PROGRAM_DUMP % (dump_file, cd) tf = tempfile.NamedTemporaryFile() tf.write(program) # ensure file content is in kernel before spawning process tf.flush() d = defer.Deferred() pp = ScanProtocol(filter, d) pt = reactor.spawnProcess(pp, 'python', args=['python', tf.name]) def err(failure): log.err(failure) return failure def passthru(result, _): return result d.addErrback(err) # The semantics of the temporary file is that it will automatically # get deleted once it gets garbage collected. This means that if we # don't use the tf variable or set the delete flag to False, the # file will get deleted before we start using it. Unfortuantely # Python 2.5 and earlier does not support the delete flag, so # instead we keep the variable for the temporary file in use, # dealying its deletion until the filter has been generated, hence # the bogus passthru. d.addBoth(passthru, tf) defs.append(d) return defer.DeferredList(defs) @defer.inlineCallbacks def main(): import sys, time #cache_dirs = sys.argv[2:] #print "Cache dirs", cache_dirs class Count: def __init__(self): self.count = 0 def gotHash(self, hash): print hash self.count += 1 c = Count() t0 = time.time() #yield CacheScanner(cache_dirs).scan(c.gotHash) yield CacheScanner().scan(c.gotHash) td = time.time() - t0 print "Scan time:", td print "Objects scanned: ", c.count reactor.stop() if __name__ == '__main__': reactor.callWhenRunning(main) reactor.run() nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/test0000644000000000000000000000013213214316030023717 xustar000000000000000030 mtime=1513200664.335800514 30 atime=1513200668.716854096 30 ctime=1513200664.335800514 nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/0000755000175000002070000000000013214316030024042 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712014647710026045 xustar000000000000000027 mtime=1345540040.509752 30 atime=1513200603.618057912 30 ctime=1513200664.332800478 nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/Makefile.am0000644000175000002070000000035412014647710026111 0ustar00mockbuildmock00000000000000TESTSCRIPTS = test_cacheresource.py test_scan.py if ACIX_TESTS_ENABLED TESTS_ENVIRONMENT = PYTHONPATH=$(top_srcdir)/src/services $(TRIAL) TESTS = $(TESTSCRIPTS) else TESTS = endif check_SCRIPTS = $(TESTS) EXTRA_DIST = $(TESTSCRIPTS) nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/PaxHeaders.7502/test_cacheresource.py0000644000000000000000000000012412242373672030240 xustar000000000000000027 mtime=1384773562.388247 27 atime=1513200575.636715 30 ctime=1513200664.334800502 nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/test_cacheresource.py0000644000175000002070000000332512242373672030310 0ustar00mockbuildmock00000000000000from twisted.trial import unittest from twisted.internet import reactor, defer from twisted.web import resource, server from acix.core import bloomfilter, cacheclient from acix.cacheserver import cache, cacheresource TEST_URLS1 = [ 'srm://srm.ndgf.org/biogrid/db/uniprot/UniProt12.6/uniprot_sprot.fasta.gz', 'gsiftp://grid.tsl.uu.se:2811/storage/sam/testfile'] class TestScanner: def __init__(self, urls): self.urls = urls def dir(self): return "testscanner (no dir)" def scan(self, filter): for url in self.urls: filter(url) d = defer.Deferred() d.callback(None) return d class CacheResourceTest(unittest.TestCase): port = 4080 @defer.inlineCallbacks def setUp(self): scanner = TestScanner(TEST_URLS1) self.cs = cache.Cache(scanner, 10000, 60, '') cr = cacheresource.CacheResource(self.cs) siteroot = resource.Resource() siteroot.putChild('cache', cr) site = server.Site(siteroot) yield self.cs.startService() self.iport = reactor.listenTCP(self.port, site) self.cache_url = "http://localhost:%i/cache" % (self.port) @defer.inlineCallbacks def tearDown(self): yield self.cs.stopService() yield self.iport.stopListening() @defer.inlineCallbacks def testCacheRetrieval(self): hashes, cache_time, cache, cache_url = yield cacheclient.retrieveCache(self.cache_url) size = len(cache) * 8 bf = bloomfilter.BloomFilter(size, bits=cache, hashes=hashes) for url in TEST_URLS1: self.assertTrue(url in bf) self.assertFalse("gahh" in bf) self.assertFalse("whuu" in bf) nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315733026051 xustar000000000000000030 mtime=1513200603.653058341 30 atime=1513200650.916636393 29 ctime=1513200664.33380049 nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/Makefile.in0000644000175000002070000004550513214315733026131 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @ACIX_TESTS_ENABLED_TRUE@TESTS = $(TESTSCRIPTS) subdir = src/services/acix/cacheserver/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ TESTSCRIPTS = test_cacheresource.py test_scan.py @ACIX_TESTS_ENABLED_TRUE@TESTS_ENVIRONMENT = PYTHONPATH=$(top_srcdir)/src/services $(TRIAL) check_SCRIPTS = $(TESTS) EXTRA_DIST = $(TESTSCRIPTS) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/acix/cacheserver/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/acix/cacheserver/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool distclean distclean-generic distclean-libtool \ distdir dvi dvi-am html html-am info info-am install \ install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/PaxHeaders.7502/test_scan.py0000644000000000000000000000012412707112435026343 xustar000000000000000027 mtime=1461490973.022692 27 atime=1513200575.637715 30 ctime=1513200664.335800514 nordugrid-arc-5.4.2/src/services/acix/cacheserver/test/test_scan.py0000644000175000002070000000564612707112435026423 0ustar00mockbuildmock00000000000000import os import shutil import tempfile from twisted.trial import unittest from twisted.internet import defer from acix.cacheserver import pscan class ScanTestCase(unittest.TestCase): def setUp(self): # fill caches self.tmpdir = tempfile.mkdtemp(prefix='/tmp/acix-test-cache') os.mkdir(self.tmpdir+'/cache') os.mkdir(self.tmpdir+'/cache/data') os.mkdir(self.tmpdir+'/cache/data/6b') f = open(self.tmpdir+'/cache/data/6b/27f066ef9e22d2e3e40c668cae72e9e163fafd.meta', 'w') f.write('http://localhost/file1') f.close() f = open(self.tmpdir+'/cache/data/6b/27f066ef9e22d2e3e40c668cae72e9e163fafd', 'w') f.write('1234') f.close() os.mkdir(self.tmpdir+'/cache/data/a5') f = open(self.tmpdir+'/cache/data/a5/7c87cedbb464eb765a9fa8b8d506686cf0d0ee.meta', 'w') f.write('http://localhost/file2') f.close() f = open(self.tmpdir+'/cache/data/a5/7c87cedbb464eb765a9fa8b8d506686cf0d0ee', 'w') f.write('1234') f.close() self.tmpdir2 = tempfile.mkdtemp(prefix='/tmp/acix-test-cache2') os.mkdir(self.tmpdir2+'/cache') os.mkdir(self.tmpdir2+'/cache/data') os.mkdir(self.tmpdir2+'/cache/data/9f') f = open(self.tmpdir2+'/cache/data/9f/4f96f6aada65ef3dafce1af2e36ba8428aeb03.meta', 'w') f.write('http://localhost/file3') f.close() f = open(self.tmpdir2+'/cache/data/9f/4f96f6aada65ef3dafce1af2e36ba8428aeb03', 'w') f.write('1234') f.close() os.mkdir(self.tmpdir2+'/cache/data/dc') f = open(self.tmpdir2+'/cache/data/dc/294265ad76c92fe388f4f3c452734b10064ac2.meta', 'w') f.write('http://localhost/file4') f.close() f = open(self.tmpdir2+'/cache/data/dc/294265ad76c92fe388f4f3c452734b10064ac2', 'w') f.write('1234') f.close() def tearDown(self): shutil.rmtree(self.tmpdir) shutil.rmtree(self.tmpdir2) @defer.inlineCallbacks def testScan(self): scanner = pscan.CacheScanner(self.tmpdir+'/cache') l = [] yield scanner.scan(lambda url : l.append(url)) self.failUnlessIn('6b27f066ef9e22d2e3e40c668cae72e9e163fafd', l) self.failUnlessIn('a57c87cedbb464eb765a9fa8b8d506686cf0d0ee', l) self.failIfIn('abc', l) self.failIfIn('some_other_thing', l) @defer.inlineCallbacks def testScanMultipleDirs(self): scanner = pscan.CacheScanner([self.tmpdir+'/cache', self.tmpdir2+'/cache']) l = [] yield scanner.scan(lambda url : l.append(url)) # from the first dir self.failUnlessIn('a57c87cedbb464eb765a9fa8b8d506686cf0d0ee', l) self.failUnlessIn('6b27f066ef9e22d2e3e40c668cae72e9e163fafd', l) # from the second dir self.failUnlessIn('9f4f96f6aada65ef3dafce1af2e36ba8428aeb03', l) self.failUnlessIn('dc294265ad76c92fe388f4f3c452734b10064ac2', l) nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/__init__.py0000644000000000000000000000012212046514637025144 xustar000000000000000026 mtime=1352309151.21856 27 atime=1513200575.639715 29 ctime=1513200664.23979934 nordugrid-arc-5.4.2/src/services/acix/cacheserver/__init__.py0000644000175000002070000000014012046514637025206 0ustar00mockbuildmock00000000000000from acix.cacheserver import cachesetup createApplication = cachesetup.createCacheApplication nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/acix-cache-start0000644000000000000000000000012312754431715026100 xustar000000000000000027 mtime=1471296461.229277 27 atime=1513200575.637715 29 ctime=1513200664.31580027 nordugrid-arc-5.4.2/src/services/acix/cacheserver/acix-cache-start0000755000175000002070000000341612754431715026155 0ustar00mockbuildmock00000000000000#!/bin/sh PIDFILE=/var/run/acix-cache.pid DEFAULT_LOGFILE=/var/log/arc/acix-cache.log prog=twistd RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/acix-cache ]; then . /etc/sysconfig/acix-cache elif [ -r /etc/default/acix-cache ]; then . /etc/default/acix-cache fi if [ "$RUN" != "yes" ] ; then echo "acix-cache service is disabled, please adjust the configuration to your" echo "needs and then set RUN to 'yes' in /etc/default/acix-cache to enable it." return 0 fi # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ] && [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi CONFIG_SECTION=acix\\/cacheserver # read in cacheserver section from arc.conf # this will put the read values into the environment, e.g., $logfile eval `sed -e 's/[[:space:]]*\=[[:space:]]*/=/g' \ -e 's/;.*$//' \ -e 's/[[:space:]]*$//' \ -e 's/^[[:space:]]*//' \ -e "s/^\(.*\)=\([^\"']*\)$/\1=\"\2\"/" \ < $ARC_CONFIG \ | sed -n -e "/^\[$CONFIG_SECTION\]/,/^\s*\[/{/^[^;].*\=.*/p;}" ` LOGFILE=${logfile:-$DEFAULT_LOGFILE} LOGD=`dirname $LOGFILE` LOGN=`basename $LOGFILE` if [ ! -d $LOGD ]; then mkdir -p $LOGD fi APPSTART=" from acix import cacheserver; from twisted.python import log; from twisted.python.logfile import LogFile; application = cacheserver.createApplication(); log.startLogging(LogFile('$LOGN', '$LOGD', rotateLength=1000000, maxRotatedFiles=25)) " TACFILE=`mktemp` || exit 1 echo $APPSTART > $TACFILE exec $prog --pidfile $PIDFILE -y $TACFILE -l $LOGFILE rm -f $TACFILE nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/acix-cache.service.in0000644000000000000000000000012712754431715027015 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200650.902636221 30 ctime=1513200664.314800257 nordugrid-arc-5.4.2/src/services/acix/cacheserver/acix-cache.service.in0000644000175000002070000000033312754431715027056 0ustar00mockbuildmock00000000000000[Unit] Description=ARC cache index cache server After=network.target local_fs.target [Service] Type=forking PIDFile=/var/run/acix-cache.pid ExecStart=@pkgdatadir@/acix-cache-start [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/cacheresource.py0000644000000000000000000000012312415470135026213 xustar000000000000000026 mtime=1412853853.46933 27 atime=1513200575.638715 30 ctime=1513200664.240799352 nordugrid-arc-5.4.2/src/services/acix/cacheserver/cacheresource.py0000644000175000002070000000200012415470135026251 0ustar00mockbuildmock00000000000000""" resource to fetch a bloom filter from """ from twisted.python import log from twisted.web import resource class CacheResource(resource.Resource): isLeaf = True def __init__(self, cache_service): resource.Resource.__init__(self) self.cache_service = cache_service def render_GET(self, request): client = request.getClient() + "/" + request.getClientIP() log.msg("GET request on cache from %s" % client) gen_time, hashes, cache, cache_url = self.cache_service.getCache() request.setHeader('Content-type', 'application/vnd.org.ndgf.acix.bloomfilter') if not cache: log.msg("Cache content has not been built yet") cache = '' gen_time = 0 request.setHeader('Content-length', str(len(cache))) request.setHeader("x-hashes", ','.join(hashes)) request.setHeader("x-cache-time", str(gen_time)) request.setHeader("x-cache-url", cache_url) return cache nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/cache.py0000644000000000000000000000012312771222566024453 xustar000000000000000027 mtime=1474635126.501877 27 atime=1513200575.649715 29 ctime=1513200664.23979934 nordugrid-arc-5.4.2/src/services/acix/cacheserver/cache.py0000644000175000002070000000614012771222566024522 0ustar00mockbuildmock00000000000000""" resource to fetch a bloom filter from """ import time from twisted.python import log from twisted.internet import task from twisted.application import service from acix.core import bloomfilter CAPACITY_CHUNK = 10000 # 10k entries is the least we bother with WATERMARK_LOW = 10000 # 10k entries -> 35k memory, no reason to go lower class _Counter: def __init__(self): self.n = 0 def up(self): self.n += 1 class Cache(service.Service): def __init__(self, scanner, capacity, refresh_interval, cache_url): self.scanner = scanner self.capacity = capacity self.refresh_interval = refresh_interval self.cache_url = cache_url self.cache_task = task.LoopingCall(self.renewCache) self.cache = None self.generation_time = None self.hashes = [] def startService(self): log.msg("-" * 60) log.msg("Starting cache service") log.msg(" Directory : %s" % self.scanner.dir()) log.msg(" Capacity : %s" % self.capacity) log.msg(" Refresh interval : %i" % self.refresh_interval) log.msg("-" * 60) self.cache_task.start(self.refresh_interval) def stopService(self): self.cache_task.stop() def renewCache(self): n_bits = bloomfilter.calculateSize(capacity=self.capacity) log.msg("Renewing cache. Filter capacity %i, size: %i bits" % (self.capacity, n_bits)) filter = bloomfilter.BloomFilter(n_bits) file_counter = _Counter() def addEntry(key): file_counter.up() filter.add(key) t0 = time.time() d = self.scanner.scan(addEntry) d.addCallback(self._scanDone, filter, t0, file_counter) return d def _scanDone(self, _, filter, t0, file_counter): td = time.time() - t0 self.cache = filter.serialize() self.generation_time = time.time() self.hashes = filter.get_hashes() log.msg("Cache updated. Time taken: %f seconds. Entries: %i" % (round(td, 2), file_counter.n)) if file_counter.n == 0: log.msg("No file entries registered. Possible misconfiguration.") return self.checkCapacity(file_counter.n) def checkCapacity(self, n_files): if n_files > self.capacity: log.msg("Filter capacity exceeded. Capacity: %i. Files: %i" % (self.capacity, n_files)) self.capacity = (round(n_files/float(CAPACITY_CHUNK))+1) * CAPACITY_CHUNK log.msg("Filter capacity expanded to %i (will take effect on next cache run)" % self.capacity) return if self.capacity / float(n_files) > 3.0 and self.capacity > WATERMARK_LOW: # filter under 1/3 full log.msg("Filter capacity underutilized. Capacity: %i. Files: %i" % (self.capacity, n_files)) self.capacity = max(self.capacity - CAPACITY_CHUNK, WATERMARK_LOW) log.msg("Filter capacity reduced to %i (will take effect on next cache run)" % self.capacity) def getCache(self): return self.generation_time, self.hashes, self.cache, self.cache_url nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/acix-cache0000644000000000000000000000012412623100535024732 xustar000000000000000027 mtime=1447854429.759916 27 atime=1513200575.638715 30 ctime=1513200664.311800221 nordugrid-arc-5.4.2/src/services/acix/cacheserver/acix-cache0000755000175000002070000001330512623100535025004 0ustar00mockbuildmock00000000000000#!/bin/sh # ARC cache index cache server # # chkconfig: 2345 75 25 # description: The ARC cache server collects cache information which \ # can be pulled by a cache index. ### BEGIN INIT INFO # Provides: acix-cache # Required-Start: $network $local_fs # Required-Stop: $network $local_fs # Should-Start: $time # Should-Stop: $time # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC cacheindex, cache server # Description: The ARC cache server collects cache information which # can be pulled by a cache index. ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi PIDFILE=/var/run/acix-cache.pid DEFAULT_LOGFILE=/var/log/arc/acix-cache.log prog=twistd RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/acix-cache ]; then . /etc/sysconfig/acix-cache elif [ -r /etc/default/acix-cache ]; then . /etc/default/acix-cache fi if [ `id -u` = 0 ] ; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then LOCKFILE=/var/lock/subsys/acix-cache else LOCKFILE=/var/lock/acix-cache fi else LOCKFILE=$HOME/acix-cache.lock fi do_start() { if [ "$RUN" != "yes" ] ; then echo "acix-cache service is disabled, please adjust the configuration to your" echo "needs and then set RUN to 'yes' in /etc/default/acix-cache to enable it." return 0 fi # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ] && [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi echo -n "Starting ARC cache server..." # Check if we are already running if [ -f $PIDFILE ]; then read pid < $PIDFILE if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PIDFILE" "$LOCKFILE" fi CONFIG_SECTION=acix\\/cacheserver # read in cacheserver section from arc.conf # this will put the read values into the environment, e.g., $logfile eval `sed -e 's/[[:space:]]*\=[[:space:]]*/=/g' \ -e 's/;.*$//' \ -e 's/[[:space:]]*$//' \ -e 's/^[[:space:]]*//' \ -e "s/^\(.*\)=\([^\"']*\)$/\1=\"\2\"/" \ < $ARC_CONFIG \ | sed -n -e "/^\[$CONFIG_SECTION\]/,/^\s*\[/{/^[^;].*\=.*/p;}" ` LOGFILE=${logfile:-$DEFAULT_LOGFILE} LOGD=`dirname $LOGFILE` LOGN=`basename $LOGFILE` if [ ! -d $LOGD ]; then mkdir -p $LOGD fi APPSTART=" from acix import cacheserver; from twisted.python import log; from twisted.python.logfile import LogFile; application = cacheserver.createApplication(); log.startLogging(LogFile('$LOGN', '$LOGD', rotateLength=1000000, maxRotatedFiles=25)) " TACFILE=`mktemp` || exit 1 echo $APPSTART > $TACFILE $prog --pidfile $PIDFILE -y $TACFILE -l $LOGFILE RETVAL=$? rm -f $TACFILE if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } do_stop() { echo -n "Stopping ARC cache server..." if [ -f "$PIDFILE" ]; then read pid < "$PIDFILE" if [ ! -z "$pid" ] ; then kill "$pid" RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi timeout=2; # for stopping nicely while ( ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null ) && [ $timeout -ge 1 ] ; do sleep 1 timeout=$(($timeout - 1)) done [ $timeout -lt 1 ] && kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PIDFILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } do_status() { if [ -f "$PIDFILE" ]; then read pid < "$PIDFILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } do_restart() { do_stop do_start } case "$1" in start) do_start ;; stop) do_stop ;; restart|reload|force-reload) do_restart ;; condrestart|try-restart) [ -f $LOCKFILE ] && do_restart || : ;; status) do_status $prog ;; *) echo "Usage: $0 {start|stop|restart|status|reload|condrestart|try-restart}" exit 1 ;; esac exit 0 nordugrid-arc-5.4.2/src/services/acix/cacheserver/PaxHeaders.7502/cachesetup.py0000644000000000000000000000012412771225424025531 xustar000000000000000027 mtime=1474636564.219421 27 atime=1513200575.634715 30 ctime=1513200664.241799365 nordugrid-arc-5.4.2/src/services/acix/cacheserver/cachesetup.py0000644000175000002070000000545612771225424025610 0ustar00mockbuildmock00000000000000import os from twisted.application import internet, service from twisted.web import resource, server from acix.core import ssl from acix.cacheserver import pscan, cache, cacheresource # -- constants SSL_DEFAULT = True CACHE_INTERFACE = '' CACHE_TCP_PORT = 5080 CACHE_SSL_PORT = 5443 DEFAULT_CAPACITY = 30000 # number of files in cache DEFAULT_CACHE_REFRESH_INTERVAL = 600 # seconds between updating cache ARC_CONF = '/etc/arc.conf' def getCacheConf(): '''Return a tuple of (cache_url, cache_dump, cache_host, cache_port)''' # Use cache access URL if mount point and at least one cacheaccess is defined cache_url = '' cache_dump = False cache_host = CACHE_INTERFACE cache_port = CACHE_SSL_PORT config = ARC_CONF if 'ARC_CONFIG' in os.environ: config = os.environ['ARC_CONFIG'] cacheaccess = False block = '' for line in file(config): if line.startswith('['): block = line[1:-2] if line.startswith('arex_mount_point'): args = line.split('=', 2)[1] url = args.replace('"', '').strip() cache_url = url + '/cache' if line.startswith('cacheaccess'): cacheaccess = True if (line.startswith('cachedump') and line.find('yes') != -1) or \ (line.startswith('cache_dump') and line.find('yes') != -1): cache_dump = True if block == 'acix/cacheserver' and line.startswith('port'): args = line.split('=', 2)[1] cache_port = int(args.replace('"', '').strip()) if block == 'acix/cacheserver' and line.startswith('hostname'): args = line.split('=', 2)[1] cache_host = args.replace('"', '').strip() if not cacheaccess: cache_url = '' return (cache_url, cache_dump, cache_host, cache_port) def createCacheApplication(use_ssl=SSL_DEFAULT, port=None, cache_dir=None, capacity=DEFAULT_CAPACITY, refresh_interval=DEFAULT_CACHE_REFRESH_INTERVAL): (cache_url, cache_dump, cache_host, cache_port) = getCacheConf() scanner = pscan.CacheScanner(cache_dir, cache_dump) cs = cache.Cache(scanner, capacity, refresh_interval, cache_url) cr = cacheresource.CacheResource(cs) siteroot = resource.Resource() dataroot = resource.Resource() dataroot.putChild('cache', cr) siteroot.putChild('data', dataroot) site = server.Site(siteroot) # setup application application = service.Application("arc-cacheserver") cs.setServiceParent(application) if use_ssl: cf = ssl.ContextFactory() internet.SSLServer(port or cache_port, site, cf, interface=cache_host).setServiceParent(application) else: internet.TCPServer(port or CACHE_TCP_PORT, site, interface=cache_host).setServiceParent(application) return application nordugrid-arc-5.4.2/src/services/acix/PaxHeaders.7502/README0000644000000000000000000000012312242373672021422 xustar000000000000000027 mtime=1384773562.388247 27 atime=1513200575.615715 29 ctime=1513200664.23079923 nordugrid-arc-5.4.2/src/services/acix/README0000644000175000002070000001021112242373672021463 0ustar00mockbuildmock00000000000000ARC Cache IndeX - allows publishing of cache contents from several sites to an index, which can be queried for data-aware brokering. It consists of two components: a cache server which runs alongside A-REX and gathers cache content information, and a cache index to which the server publishes the content using a Bloom filter to reduce the data volume. Several cache servers can publish to one index. Required software: * Python. Only 2.4, 2.5 and 2.6 have been tested. Unit tests can only be run on Python >= 2.6. * Twisted Core and twisted web (python-twisted-core and python-twisted-web) * pyOpenSSL (package name python-openssl in Ubuntu) * (Python 2.4 only) python-hashlib ACIX Cache Server: ----------------- This is the component which runs on each CE collecting cache information. Usually no configuration is necessary, but it is possible to specify a custom logfile location by setting the logfile parameter in arc.conf, like this: --- [acix/cacheserver] logfile="/tmp/arc-cacheserver.log" --- Starting instructions: /etc/init.d/acix-cache start Update your rc* catalogs accordingly. You can stop the daemon with: $ /etc/init.d/acix-cache stop You can inspect the log file to check that everything is running. It is located at /var/log/arc/acix-cache.log. An initial warning about the creation of zombie process is typically generated (no zombie processes from the program has been observed). If any zombie processes are observed, please file a bug report. Send the URL at which your cache filter is located at, to the index admins(s). Unless you changed anything in the configuration, this will be: https://HOST_FQDN:5443/data/cache This is important as the index server pulls the cache filter from your site (the filter doesn't get registered automatically). If you have both arex_mount_point and at least one cacheaccess rule defined in arc.conf then the URL for remote cache access will be sent to the index, otherwise just the hostname is used. ACIX Index Server: ----------------- This is the index of registered caches which is queried by users to discover locations of cached files. To configure, edit /etc/arc.conf to include cache server URLs corresponding to the sites to be indexed. --- [acix/indexserver] cacheserver="https://myhost:5443/data/cache" cacheserver="https://anotherhost:5443/data/cache" --- Starting instructions. $ /etc/init.d/acix-index start Update your rc* catalogs accordingly. You can stop the daemon with: $ /etc/init.d/acix-index stop A log file is at /var/log/arc/acix-index.log. By default the index server will listen on port 6443 (ssl+http) so you need to open this port (or the configured port) in the firewall. It is possible to configure port, use of ssl, and the index refresh interval. See the indexsetup.py file (a bit of Python understanding is required). Clients: ------- To query an index server, construct a URL, like this: https://orval.grid.aau.dk:6443/data/index?url=http://www.nordugrid.org:80/data/echo.sh Here you ask the index services located at https://orval.grid.aau.dk:6443/data/index for the location(s) of the file http://www.nordugrid.org:80/data/echo.sh It is possible to query for multiple files by comma-seperating the files, e.g.: index?url=http://www.nordugrid.org:80/data/echo.sh,http://www.nordugrid.org:80/data/echo.sh Remember to quote/urlencode the strings when performing the get (wget and curl will do this automatically, but most http libraries won't) The result a JSON encoded datastructure with the top level structure being a dictionary/hash-table with the mapping: url -> [machines], where [machines] is a list of the machines on which the files are cached. You should always use a JSON parser to decode the result (the string might be escaped). If a machine has enabled remote cache access then a URL at which cache files may be accessed is shown, otherwise just the hostname is used. To access a cached file remotely, simply append the URL of the original file to the cache access endpoint and call HTTP GET (or use wget, curl, arccp,...), eg https://arex.host/arex/cache/http://www.nordugrid.org:80/data/echo.sh Some encoding of the original URL may be necessary depending on the tool you use. nordugrid-arc-5.4.2/src/services/acix/PaxHeaders.7502/core0000644000000000000000000000013213214316030021376 xustar000000000000000030 mtime=1513200664.269799707 30 atime=1513200668.716854096 30 ctime=1513200664.269799707 nordugrid-arc-5.4.2/src/services/acix/core/0000755000175000002070000000000013214316030021521 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/Makefile.am0000644000000000000000000000012512007731345023522 xustar000000000000000026 mtime=1344254693.57267 30 atime=1513200603.670058548 29 ctime=1513200664.26679967 nordugrid-arc-5.4.2/src/services/acix/core/Makefile.am0000644000175000002070000000005212007731345023563 0ustar00mockbuildmock00000000000000SUBDIRS = $(TEST_DIR) dist_SUBDIRS = test nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/bloomfilter.py0000644000000000000000000000012312046514637024362 xustar000000000000000026 mtime=1352309151.21856 27 atime=1513200575.620715 30 ctime=1513200664.234799279 nordugrid-arc-5.4.2/src/services/acix/core/bloomfilter.py0000644000175000002070000000632412046514637024435 0ustar00mockbuildmock00000000000000""" Bloom Filter for Acix. Heavily inspired from: http://stackoverflow.com/questions/311202/modern-high-performance-bloom-filter-in-python but modified to use bitarray instead of BitVector, as serialization utterly sucks for the latter. The calculateSize is more or less copied from pybloom (which also doesn't support serialization and restore in a sensibile way. The hash library is from: http://www.partow.net/programming/hashfunctions/index.html """ import math from acix.core import bitvector, hashes # Note: These names are used to identify hashes used to generate a bloom # filter between machines, i.e., they are used in the protocol. # Do NOT change unless you are REALLY certain you know what you are doing HASHES = { 'rs' : hashes.RSHash, 'js' : hashes.JSHash, 'pjw' : hashes.PJWHash, 'elf' : hashes.ELFHash, 'bkdr' : hashes.BKDRHash, 'sdbm' : hashes.SDBMHash, 'djb' : hashes.DJBHash, 'dek' : hashes.DEKHash, 'bp' : hashes.BPHash, 'fnv' : hashes.FNVHash, } # These hashes have been tested to be reasonably fast # By all means try to avoid the rs hash, as it is awfully slow. DEFAULT_HASHES = [ 'dek', 'elf', 'djb', 'sdbm' ] def calculateSize(capacity, error_rate=0.001): slices = math.ceil(math.log(1 / error_rate, 2)) # the error_rate constraint assumes a fill rate of 1/2 # so we double the capacity to simplify the API bits = math.ceil( (2 * capacity * abs(math.log(error_rate))) / (slices * (math.log(2) ** 2))) size = int(slices * bits) ROUND_TO = 32 # make sure we return a multiple of 32 (otherwise bitvector serialization will explode) if size % ROUND_TO != 0: mp = math.ceil(size / ROUND_TO) size = int(mp * ROUND_TO) return size class BloomFilter(object): def __init__(self, size=None, bits=None, hashes=None): self.size = size if bits is None: self.bits = bitvector.BitVector(size) else: assert size == len(bits) * 8, "Size and bit length does not match (%i,%i)" % (size, len(bits)) self.bits = bitvector.BitVector(size, bits) self.used_hashes = [] self.hashes = [] if hashes is None: hashes = DEFAULT_HASHES[:] for hash in hashes: self.used_hashes.append(hash) self.hashes.append(HASHES[hash]) def __contains__(self, key): for i in self._indexes(key): if not self.bits[i]: return False return True def add(self, key): for i in self._indexes(key): self.bits[i] = 1 def _indexes(self, key): ords = [ ord(c) for c in key ] return [ hash(ords) % self.size for hash in self.hashes ] def get_hashes(self): return self.used_hashes[:] def serialize(self): return str(self.bits) if __name__ == '__main__': import time from acix.cacheserver import pscan try: scanner = pscan.CacheScanner() except IOError: scanner = pscan.CacheScanner('test/cache') bf = BloomFilter(1000672) t0 = time.time() scanner.scan(bf.add) td = time.time() - t0 print "Time taken for bloom filter build: %s" % td nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733023531 xustar000000000000000030 mtime=1513200603.706058989 30 atime=1513200650.931636576 30 ctime=1513200664.267799683 nordugrid-arc-5.4.2/src/services/acix/core/Makefile.in0000644000175000002070000005553513214315733023614 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/acix/core DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = $(TEST_DIR) dist_SUBDIRS = test all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/acix/core/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/acix/core/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/ssl.py0000644000000000000000000000012412417747511022647 xustar000000000000000027 mtime=1413467977.785832 27 atime=1513200575.622715 30 ctime=1513200664.238799328 nordugrid-arc-5.4.2/src/services/acix/core/ssl.py0000644000175000002070000000323712417747511022721 0ustar00mockbuildmock00000000000000import os from OpenSSL import SSL DEFAULT_HOST_KEY = '/etc/grid-security/hostkey.pem' DEFAULT_HOST_CERT = '/etc/grid-security/hostcert.pem' DEFAULT_CERTIFICATES = '/etc/grid-security/certificates' class ContextFactory: def __init__(self, key_path=DEFAULT_HOST_KEY, cert_path=DEFAULT_HOST_CERT, verify=False, ca_dir=None): self.key_path = key_path self.cert_path = cert_path self.verify = verify self.ca_dir = ca_dir if self.verify and ca_dir is None: self.ca_dir = DEFAULT_CERTIFICATES self.ctx = None def getContext(self): if self.ctx is not None: return self.ctx ctx = SSL.Context(SSL.SSLv23_METHOD) # this also allows tls 1.0 ctx.set_options(SSL.OP_NO_SSLv2) # ssl2 is unsafe ctx.set_options(SSL.OP_NO_SSLv3) # ssl3 is also unsafe ctx.use_privatekey_file(self.key_path) ctx.use_certificate_file(self.cert_path) ctx.check_privatekey() # sanity check def verify_callback(conn, x509, error_number, error_depth, allowed): # just return what openssl thinks is right return allowed if self.verify: ctx.set_verify(SSL.VERIFY_PEER, verify_callback) calist = [ ca for ca in os.listdir(self.ca_dir) if ca.endswith('.0') ] for ca in calist: # openssl wants absolute paths ca = os.path.join(self.ca_dir, ca) ctx.load_verify_locations(ca) if self.ctx is None: self.ctx = ctx return ctx if __name__ == '__main__': cf = ContextFactory() ctx = cf.getContext() print ctx nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/test0000644000000000000000000000013213214316030022355 xustar000000000000000030 mtime=1513200664.291799976 30 atime=1513200668.716854096 30 ctime=1513200664.291799976 nordugrid-arc-5.4.2/src/services/acix/core/test/0000755000175000002070000000000013214316030022500 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/core/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712014647710024503 xustar000000000000000027 mtime=1345540040.509752 30 atime=1513200603.724059209 30 ctime=1513200664.289799952 nordugrid-arc-5.4.2/src/services/acix/core/test/Makefile.am0000644000175000002070000000033512014647710024546 0ustar00mockbuildmock00000000000000TESTSCRIPTS = test_bloomfilter.py if ACIX_TESTS_ENABLED TESTS_ENVIRONMENT = PYTHONPATH=$(top_srcdir)/src/services $(TRIAL) TESTS = $(TESTSCRIPTS) else TESTS = endif check_SCRIPTS = $(TESTS) EXTRA_DIST = $(TESTSCRIPTS) nordugrid-arc-5.4.2/src/services/acix/core/test/PaxHeaders.7502/test_bloomfilter.py0000644000000000000000000000012312046514637026400 xustar000000000000000026 mtime=1352309151.21856 27 atime=1513200575.624715 30 ctime=1513200664.291799976 nordugrid-arc-5.4.2/src/services/acix/core/test/test_bloomfilter.py0000644000175000002070000000242512046514637026451 0ustar00mockbuildmock00000000000000from twisted.trial import unittest from acix.core import bloomfilter KEYS = ['one', 'two', 'three', 'four'] FALSE_KEYS = ['five', 'six', 'seven' ] SIZE = 160 class BloomFilterTestCase(unittest.TestCase): def setUp(self): self.bf = bloomfilter.BloomFilter(SIZE) def testContains(self): for key in KEYS: self.bf.add(key) for key in KEYS: self.failUnlessIn(key, self.bf) for key in FALSE_KEYS: self.failIfIn(key, self.bf) def testSerialization(self): for key in KEYS: self.bf.add(key) s = self.bf.serialize() bf2 = bloomfilter.BloomFilter(SIZE, s) for key in KEYS: self.failUnlessIn(key, bf2) for key in FALSE_KEYS: self.failIfIn(key, bf2) def testReconstruction(self): # create filter with some non-standard hashes... bf1 = bloomfilter.BloomFilter(SIZE, hashes=['js', 'dek', 'sdbm']) for key in KEYS: bf1.add(key) # just to be sure for key in KEYS: self.failUnlessIn(key, bf1) for key in FALSE_KEYS: self.failIfIn(key, bf1) # reconstruct bf2 = bloomfilter.BloomFilter(SIZE, bits=bf1.serialize(), hashes=bf1.get_hashes()) for key in KEYS: self.failUnlessIn(key, bf2) for key in FALSE_KEYS: self.failIfIn(key, bf2) nordugrid-arc-5.4.2/src/services/acix/core/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315733024507 xustar000000000000000030 mtime=1513200603.760059649 29 atime=1513200650.94663676 30 ctime=1513200664.290799964 nordugrid-arc-5.4.2/src/services/acix/core/test/Makefile.in0000644000175000002070000004544113214315733024566 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @ACIX_TESTS_ENABLED_TRUE@TESTS = $(TESTSCRIPTS) subdir = src/services/acix/core/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ TESTSCRIPTS = test_bloomfilter.py @ACIX_TESTS_ENABLED_TRUE@TESTS_ENVIRONMENT = PYTHONPATH=$(top_srcdir)/src/services $(TRIAL) check_SCRIPTS = $(TESTS) EXTRA_DIST = $(TESTSCRIPTS) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/acix/core/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/acix/core/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool distclean distclean-generic distclean-libtool \ distdir dvi dvi-am html html-am info info-am install \ install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/__init__.py0000644000000000000000000000012312007731345023575 xustar000000000000000026 mtime=1344254693.57267 27 atime=1513200575.621715 30 ctime=1513200664.232799254 nordugrid-arc-5.4.2/src/services/acix/core/__init__.py0000644000175000002070000000000012007731345023631 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/cacheclient.py0000644000000000000000000000012312345604550024302 xustar000000000000000026 mtime=1402407272.36452 27 atime=1513200575.624715 30 ctime=1513200664.235799291 nordugrid-arc-5.4.2/src/services/acix/core/cacheclient.py0000644000175000002070000000344012345604550024351 0ustar00mockbuildmock00000000000000""" Client for retrieving cache. """ from urlparse import urlparse from twisted.python import log from twisted.internet import reactor from twisted.web import client HEADER_HASHES = 'x-hashes' HEADER_CACHE_TIME = 'x-cache-time' HEADER_CACHE_URL = 'x-cache-url' class InvalidCacheReplyError(Exception): pass def retrieveCache(url, contextFactory=None): # mostly copied from twisted.web.client """ Returns a deferred, which will fire with a tuple consisting of a the hashes, generation-time, and the cache. """ u = urlparse(url) factory = client.HTTPClientFactory(url) factory.noisy = False if u.scheme == 'https': from twisted.internet import ssl if contextFactory is None: contextFactory = ssl.ClientContextFactory() reactor.connectSSL(u.hostname, u.port, factory, contextFactory) else: reactor.connectTCP(u.hostname, u.port, factory) factory.deferred.addCallback(_gotCache, factory, url) return factory.deferred def _gotCache(result, factory, url): log.msg("Got reply from cache service %s" % url) try: hashes = factory.response_headers[HEADER_HASHES] cache_time = factory.response_headers[HEADER_CACHE_TIME] except KeyError, e: raise InvalidCacheReplyError(str(e)) try: cache_url = factory.response_headers[HEADER_CACHE_URL][0] except KeyError, e: # Site may not expose cache to outside cache_url = '' #log.msg("Raw cache headers. Hashes: %s. Cache time: %s." % (hashes, cache_time)) assert len(hashes) == 1, "Got more than one hash header" assert len(cache_time) == 1, "Got more than one cache time header" hashes = hashes[0].split(',') cache_time = float(cache_time[0]) return hashes, cache_time, result, cache_url nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/bitvector.py0000644000000000000000000000012312046514637024045 xustar000000000000000026 mtime=1352309151.21856 27 atime=1513200575.622715 30 ctime=1513200664.233799267 nordugrid-arc-5.4.2/src/services/acix/core/bitvector.py0000644000175000002070000000212412046514637024112 0ustar00mockbuildmock00000000000000""" Custom bitvector implementation, as most other suck. Both BitVector and bitarray has problems with serialization, which is rather critical for us. There might be endian issues. Author: Henrik Thostrup Jensen """ import array ARRAY_TYPE = 'B' TYPE_SIZE = 8 class BitVector: def __init__(self, n_bits, bits=None): assert n_bits % TYPE_SIZE == 0, "Size must be a multiple of %i" % TYPE_SIZE if bits is None: self.bits = array.array(ARRAY_TYPE , [0] * (n_bits / TYPE_SIZE)) else: assert n_bits == len(bits) * 8, "Size and given bits does not match" self.bits = array.array(ARRAY_TYPE) self.bits.fromstring(bits) def __setitem__(self, index, value): assert value == 1, "Only possible to set bits" l = self.bits[index // TYPE_SIZE] self.bits[index // TYPE_SIZE] = l | 1 << (index % TYPE_SIZE) def __getitem__(self, index): l = self.bits[index // TYPE_SIZE] return (l >> (index % TYPE_SIZE)) & 1 def __str__(self): return self.bits.tostring() nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/indexclient.py0000644000000000000000000000012312046514637024352 xustar000000000000000026 mtime=1352309151.21856 27 atime=1513200575.624715 30 ctime=1513200664.237799316 nordugrid-arc-5.4.2/src/services/acix/core/indexclient.py0000644000175000002070000000170512046514637024423 0ustar00mockbuildmock00000000000000""" Client for retrieving cache. Note that json is only available on python >= 2.6. """ import json from urllib import quote from twisted.python import log from twisted.web import client class InvalidIndexReplyError(Exception): pass def queryIndex(index_url, urls): for url in urls: assert ',' not in urls, "Commas ',' not allowed in urls currently" eurls = [ quote(url) for url in urls ] url = index_url + "?url=" + ','.join(eurls) d = client.getPage(url) d.addCallback(_gotResult, index_url) d.addErrback(_indexError, index_url) return d def _gotResult(result, index_url): log.msg("Got reply from index service %s" % index_url) try: decoded_result = json.loads(result) return decoded_result except ValueError, e: raise InvalidIndexReplyError(str(e)) def _indexError(failure, index_url): log.msg("Error while getting index results:") log.err(failure) return failure nordugrid-arc-5.4.2/src/services/acix/core/PaxHeaders.7502/hashes.py0000644000000000000000000000012312046514637023317 xustar000000000000000026 mtime=1352309151.21856 27 atime=1513200575.624715 30 ctime=1513200664.236799303 nordugrid-arc-5.4.2/src/services/acix/core/hashes.py0000644000175000002070000000645612046514637023400 0ustar00mockbuildmock00000000000000# #************************************************************************** #* * #* General Purpose Hash Function Algorithms Library * #* * #* Author: Arash Partow - 2002 * #* URL: http://www.partow.net * #* URL: http://www.partow.net/programming/hashfunctions/index.html * #* * #* Modified by Henrik Thostrup Jensen to operate on int * #* arrays instead of strings (large optimization when performing several * #* hashes of the same string. (2009) * #* * #* Copyright notice: * #* Free use of the General Purpose Hash Function Algorithms Library is * #* permitted under the guidelines and in accordance with the most current * #* version of the Common Public License. * #* http://www.opensource.org/licenses/cpl.php * #* * #************************************************************************** # def RSHash(key): a = 378551 b = 63689 hash = 0 for k in key: hash = hash * a + k a = a * b return hash def JSHash(key): hash = 1315423911 for k in key: hash ^= ((hash << 5) + k + (hash >> 2)) return hash def PJWHash(key): BitsInUnsignedInt = 4 * 8 ThreeQuarters = long((BitsInUnsignedInt * 3) / 4) OneEighth = long(BitsInUnsignedInt / 8) HighBits = (0xFFFFFFFF) << (BitsInUnsignedInt - OneEighth) hash = 0 test = 0 for k in key: hash = (hash << OneEighth) + k test = hash & HighBits if test != 0: hash = (( hash ^ (test >> ThreeQuarters)) & (~HighBits)); return (hash & 0x7FFFFFFF) def ELFHash(key): hash = 0 x = 0 for k in key: hash = (hash << 4) + k x = hash & 0xF0000000 if x != 0: hash ^= (x >> 24) hash &= ~x return hash def BKDRHash(key): seed = 131 # 31 131 1313 13131 131313 etc.. hash = 0 for k in key: hash = (hash * seed) + k return hash def SDBMHash(key): hash = 0 for k in key: hash = k + (hash << 6) + (hash << 16) - hash; return hash def DJBHash(key): hash = 5381 for k in key: hash = ((hash << 5) + hash) + k return hash def DEKHash(key): hash = len(key); for k in key: hash = ((hash << 5) ^ (hash >> 27)) ^ k return hash def BPHash(key): hash = 0 for k in key: hash = hash << 7 ^ k return hash def FNVHash(key): fnv_prime = 0x811C9DC5 hash = 0 for k in key: hash *= fnv_prime hash ^= k return hash ## requres index, so we don't use it #def APHash(key): # hash = 0xAAAAAAAA # for k in key: # if ((i & 1) == 0): # hash ^= ((hash << 7) ^ k * (hash >> 3)) # else: # hash ^= (~((hash << 11) + k ^ (hash >> 5))) # return hash nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712440333267021652 xustar000000000000000027 mtime=1417787063.331373 30 atime=1513200601.623033513 30 ctime=1513200662.427777178 nordugrid-arc-5.4.2/src/services/Makefile.am0000644000175000002070000000163212440333267021716 0ustar00mockbuildmock00000000000000if A_REX_SERVICE_ENABLED AREX_SERVICE = a-rex else AREX_SERVICE = endif if GRIDFTPD_SERVICE_ENABLED GRIDFTPD_SERVICE = gridftpd else GRIDFTPD_SERVICE = endif if LDAP_SERVICE_ENABLED LDAP_SERVICE = ldap-infosys else LDAP_SERVICE = endif if LDAP_MONITOR_ENABLED LDAP_MONITOR = ldap-monitor else LDAP_MONITOR = endif if WS_MONITOR_ENABLED WS_MONITOR = ws-monitor else WS_MONITOR = endif if CACHE_SERVICE_ENABLED CACHE_SERVICE = cache_service else CACHE_SERVICE = endif if DATADELIVERY_SERVICE_ENABLED DATADELIVERY_SERVICE = data-staging else DATADELIVERY_SERVICE = endif if ACIX_ENABLED ACIX = acix else ACIX = endif SUBDIRS = $(GRIDFTPD_SERVICE) $(AREX_SERVICE) $(LDAP_SERVICE) \ $(LDAP_MONITOR) $(WS_MONITOR) \ $(CACHE_SERVICE) \ $(DATADELIVERY_SERVICE) \ $(ACIX) \ wrappers examples DIST_SUBDIRS = gridftpd a-rex ldap-infosys \ ldap-monitor ws-monitor \ cache_service \ data-staging wrappers examples acix nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731021653 xustar000000000000000030 mtime=1513200601.659033953 30 atime=1513200649.712621667 30 ctime=1513200662.428777191 nordugrid-arc-5.4.2/src/services/Makefile.in0000644000175000002070000005750213214315731021732 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @A_REX_SERVICE_ENABLED_FALSE@AREX_SERVICE = @A_REX_SERVICE_ENABLED_TRUE@AREX_SERVICE = a-rex @GRIDFTPD_SERVICE_ENABLED_FALSE@GRIDFTPD_SERVICE = @GRIDFTPD_SERVICE_ENABLED_TRUE@GRIDFTPD_SERVICE = gridftpd @LDAP_SERVICE_ENABLED_FALSE@LDAP_SERVICE = @LDAP_SERVICE_ENABLED_TRUE@LDAP_SERVICE = ldap-infosys @LDAP_MONITOR_ENABLED_FALSE@LDAP_MONITOR = @LDAP_MONITOR_ENABLED_TRUE@LDAP_MONITOR = ldap-monitor @WS_MONITOR_ENABLED_FALSE@WS_MONITOR = @WS_MONITOR_ENABLED_TRUE@WS_MONITOR = ws-monitor @CACHE_SERVICE_ENABLED_FALSE@CACHE_SERVICE = @CACHE_SERVICE_ENABLED_TRUE@CACHE_SERVICE = cache_service @DATADELIVERY_SERVICE_ENABLED_FALSE@DATADELIVERY_SERVICE = @DATADELIVERY_SERVICE_ENABLED_TRUE@DATADELIVERY_SERVICE = data-staging @ACIX_ENABLED_FALSE@ACIX = @ACIX_ENABLED_TRUE@ACIX = acix SUBDIRS = $(GRIDFTPD_SERVICE) $(AREX_SERVICE) $(LDAP_SERVICE) \ $(LDAP_MONITOR) $(WS_MONITOR) \ $(CACHE_SERVICE) \ $(DATADELIVERY_SERVICE) \ $(ACIX) \ wrappers examples DIST_SUBDIRS = gridftpd a-rex ldap-infosys \ ldap-monitor ws-monitor \ cache_service \ data-staging wrappers examples acix all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/data-staging0000644000000000000000000000013213214316030022065 xustar000000000000000030 mtime=1513200664.015796601 30 atime=1513200668.716854096 30 ctime=1513200664.015796601 nordugrid-arc-5.4.2/src/services/data-staging/0000755000175000002070000000000013214316030022210 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/data-staging/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612754431715024221 xustar000000000000000027 mtime=1471296461.229277 29 atime=1513200603.96206212 30 ctime=1513200664.010796539 nordugrid-arc-5.4.2/src/services/data-staging/Makefile.am0000644000175000002070000000227212754431715024267 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdatadeliveryservice.la if SYSV_SCRIPTS_ENABLED DATA_DELIVERY_SCRIPT = arc-datadelivery-service else DATA_DELIVERY_SCRIPT = endif initd_SCRIPTS = $(DATA_DELIVERY_SCRIPT) if SYSTEMD_UNITS_ENABLED DATA_DELIVERY_UNIT = arc-datadelivery-service.service DATA_DELIVERY_UNIT_WRAPPER = arc-datadelivery-service-start else DATA_DELIVERY_UNIT = DATA_DELIVERY_UNIT_WRAPPER = endif units_DATA = $(DATA_DELIVERY_UNIT) pkgdata_SCRIPTS = $(DATA_DELIVERY_UNIT_WRAPPER) libdatadeliveryservice_la_SOURCES = DataDeliveryService.h DataDeliveryService.cpp libdatadeliveryservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdatadeliveryservice_la_LIBADD = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(GLIBMM_LIBS) libdatadeliveryservice_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/services/data-staging/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315734024220 xustar000000000000000030 mtime=1513200604.014062756 29 atime=1513200651.07663835 30 ctime=1513200664.011796552 nordugrid-arc-5.4.2/src/services/data-staging/Makefile.in0000644000175000002070000007621713214315734024304 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/data-staging DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arc-datadelivery-service-start.in \ $(srcdir)/arc-datadelivery-service.in \ $(srcdir)/arc-datadelivery-service.service.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc-datadelivery-service \ arc-datadelivery-service.service \ arc-datadelivery-service-start CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" \ "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdatadeliveryservice_la_DEPENDENCIES = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(am__DEPENDENCIES_1) am_libdatadeliveryservice_la_OBJECTS = \ libdatadeliveryservice_la-DataDeliveryService.lo libdatadeliveryservice_la_OBJECTS = \ $(am_libdatadeliveryservice_la_OBJECTS) libdatadeliveryservice_la_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdatadeliveryservice_la_CXXFLAGS) $(CXXFLAGS) \ $(libdatadeliveryservice_la_LDFLAGS) $(LDFLAGS) -o $@ SCRIPTS = $(initd_SCRIPTS) $(pkgdata_SCRIPTS) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdatadeliveryservice_la_SOURCES) DIST_SOURCES = $(libdatadeliveryservice_la_SOURCES) DATA = $(units_DATA) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdatadeliveryservice.la @SYSV_SCRIPTS_ENABLED_FALSE@DATA_DELIVERY_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@DATA_DELIVERY_SCRIPT = arc-datadelivery-service initd_SCRIPTS = $(DATA_DELIVERY_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@DATA_DELIVERY_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@DATA_DELIVERY_UNIT = arc-datadelivery-service.service @SYSTEMD_UNITS_ENABLED_FALSE@DATA_DELIVERY_UNIT_WRAPPER = @SYSTEMD_UNITS_ENABLED_TRUE@DATA_DELIVERY_UNIT_WRAPPER = arc-datadelivery-service-start units_DATA = $(DATA_DELIVERY_UNIT) pkgdata_SCRIPTS = $(DATA_DELIVERY_UNIT_WRAPPER) libdatadeliveryservice_la_SOURCES = DataDeliveryService.h DataDeliveryService.cpp libdatadeliveryservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdatadeliveryservice_la_LIBADD = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(GLIBMM_LIBS) libdatadeliveryservice_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/data-staging/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/data-staging/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc-datadelivery-service: $(top_builddir)/config.status $(srcdir)/arc-datadelivery-service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-datadelivery-service.service: $(top_builddir)/config.status $(srcdir)/arc-datadelivery-service.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-datadelivery-service-start: $(top_builddir)/config.status $(srcdir)/arc-datadelivery-service-start.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdatadeliveryservice.la: $(libdatadeliveryservice_la_OBJECTS) $(libdatadeliveryservice_la_DEPENDENCIES) $(libdatadeliveryservice_la_LINK) -rpath $(pkglibdir) $(libdatadeliveryservice_la_OBJECTS) $(libdatadeliveryservice_la_LIBADD) $(LIBS) install-initdSCRIPTS: $(initd_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(initddir)" || $(MKDIR_P) "$(DESTDIR)$(initddir)" @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(initddir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(initddir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdatadeliveryservice_la-DataDeliveryService.lo: DataDeliveryService.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdatadeliveryservice_la_CXXFLAGS) $(CXXFLAGS) -MT libdatadeliveryservice_la-DataDeliveryService.lo -MD -MP -MF $(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Tpo -c -o libdatadeliveryservice_la-DataDeliveryService.lo `test -f 'DataDeliveryService.cpp' || echo '$(srcdir)/'`DataDeliveryService.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Tpo $(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataDeliveryService.cpp' object='libdatadeliveryservice_la-DataDeliveryService.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdatadeliveryservice_la_CXXFLAGS) $(CXXFLAGS) -c -o libdatadeliveryservice_la-DataDeliveryService.lo `test -f 'DataDeliveryService.cpp' || echo '$(srcdir)/'`DataDeliveryService.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) test -z "$(unitsdir)" || $(MKDIR_P) "$(DESTDIR)$(unitsdir)" @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(unitsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(unitsdir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-initdSCRIPTS install-pkgdataSCRIPTS \ install-unitsDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-pkglibLTLIBRARIES uninstall-unitsDATA .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-initdSCRIPTS install-man install-pdf install-pdf-am \ install-pkgdataSCRIPTS install-pkglibLTLIBRARIES install-ps \ install-ps-am install-strip install-unitsDATA installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-initdSCRIPTS \ uninstall-pkgdataSCRIPTS uninstall-pkglibLTLIBRARIES \ uninstall-unitsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/data-staging/PaxHeaders.7502/DataDeliveryService.h0000644000000000000000000000012312133525051026215 xustar000000000000000026 mtime=1366207017.17064 27 atime=1513200575.483713 30 ctime=1513200664.015796601 nordugrid-arc-5.4.2/src/services/data-staging/DataDeliveryService.h0000644000175000002070000001144012133525051026263 0ustar00mockbuildmock00000000000000#ifndef DATADELIVERYSERVICE_H_ #define DATADELIVERYSERVICE_H_ #include #include #include #include #include #include #include namespace DataStaging { /// Service for the Delivery layer of data staging. /** * This service starts and controls data transfers. It assumes that the * files in any request submitted are ready for immediate transfer and * so do not need to be resolved or prepared in any way. * * It implements DTRCallback to get callbacks when a DTR has finished * transfer. * * Status codes in results returned: * - OK - successful submission/cancellation * - TRANSFERRING - transfer still ongoing * - TRANSFERRED - transfer finished successfully * - TRANSFER_ERROR - transfer failed * - SERVICE_ERROR - something went wrong in the service itself * * An internal list of active transfers is held in memory. After the first * query of a finished transfer (successful or not) the DTR is moved to an * archived list where only summary information is kept about the transfer * (DTR ID, state and short error description). The DTR object is then * deleted. This archived list is also kept in memory. In case a transfer is * never queried, a separate thread moves any transfers which completed more * than one hour ago to the archived list. */ class DataDeliveryService: public Arc::RegisteredService, DTRCallback { /// Managed pointer to stringstream used to hold log output typedef Arc::ThreadedPointer sstream_ptr; private: /// Construct a SOAP error message with optional extra reason string Arc::MCC_Status make_soap_fault(Arc::Message& outmsg, const std::string& reason = ""); /// DataDeliveryService namespace Arc::NS ns; /// Directories the service is allowed to copy files from or to std::list allowed_dirs; /// Process limit read from cache service configuration unsigned int max_processes; /// Current processes - using gint to guarantee atomic thread-safe operations gint current_processes; /// Internal list of active DTRs, mapped to the stream with the transfer log std::map active_dtrs; /// Lock for active DTRs list Arc::SimpleCondition active_dtrs_lock; /// Archived list of finished DTRs, just ID and final state and short explanation /// TODO: save to file, DB? std::map > archived_dtrs; /// Lock for archive DTRs list Arc::SimpleCondition archived_dtrs_lock; /// Object to manage Delivery processes DataDelivery delivery; /// Container for delegated credentials Arc::DelegationContainerSOAP delegation; /// Directory in which to store temporary delegated proxies std::string tmp_proxy_dir; /// Root logger destinations, to use when logging messages in methods /// called from Delivery layer where root logger is disabled std::list root_destinations; /// Logger object static Arc::Logger logger; /// Log a message to root destinations void LogToRootLogger(Arc::LogLevel level, const std::string& message); /// Static version of ArchivalThread, used when thread is created static void ArchivalThread(void* arg); /// Archival thread void ArchivalThread(void); /// Sanity check on file sources and destinations bool CheckInput(const std::string& url, const Arc::UserConfig& usercfg, Arc::XMLNode& resultelement, bool& require_credential_file); /* individual operations */ /// Start a new transfer Arc::MCC_Status Start(Arc::XMLNode in, Arc::XMLNode out); /// Query status of transfer Arc::MCC_Status Query(Arc::XMLNode in, Arc::XMLNode out); /// Cancel a transfer Arc::MCC_Status Cancel(Arc::XMLNode in, Arc::XMLNode out); /// Check service is ok and return service information Arc::MCC_Status Ping(Arc::XMLNode in, Arc::XMLNode out); public: /// Make a new DataDeliveryService. Sets up the process handler. DataDeliveryService(Arc::Config *cfg, Arc::PluginArgument* parg); /// Destroy the DataDeliveryService virtual ~DataDeliveryService(); /// Main method called by HED when service is invoked. Directs call to appropriate internal method. virtual Arc::MCC_Status process(Arc::Message &inmsg, Arc::Message &outmsg); /// Implementation of callback method from DTRCallback virtual void receiveDTR(DTR_ptr dtr); /// Supplies information on the service for use in the information system. bool RegistrationCollector(Arc::XMLNode &doc); }; } // namespace DataStaging #endif /* DATADELIVERYSERVICE_H_ */ nordugrid-arc-5.4.2/src/services/data-staging/PaxHeaders.7502/arc-datadelivery-service.in0000644000000000000000000000012613153455353027371 xustar000000000000000026 mtime=1504598763.23014 30 atime=1513200651.095638582 30 ctime=1513200664.013796576 nordugrid-arc-5.4.2/src/services/data-staging/arc-datadelivery-service.in0000644000175000002070000002052113153455353027434 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the DataDelivery service # # chkconfig: 2345 87 13 # description: ARC DataDelivery service # processname: arched ### BEGIN INIT INFO # Provides: arc-datadelivery-service # Required-Start: $local_fs $remote_fs # Required-Stop: $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC DataDelivery service # Description: ARC DataDelivery service ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=arched RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-datadelivery-service ]; then . /etc/sysconfig/arc-datadelivery-service elif [ -r /etc/default/arc-datadelivery-service ]; then . /etc/default/arc-datadelivery-service fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ ! -d "$GLOBUS_LOCATION" ]; then log_failure_msg "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION # Needed to get pid file readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | grep -e '^\[' -e "^${vname}[[:space:]]*=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` vlname=`eval "echo $vlname"` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi PID_FILE=`readconfigvar "$ARC_CONFIG" common pidfile` if [ `id -u` = 0 ] ; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then LOCKFILE=/var/lock/subsys/$prog-datadelivery-service else LOCKFILE=/var/lock/$prog-datadelivery-service fi if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog-datadelivery-service.pid fi else LOCKFILE=$HOME/$prog-datadelivery-service.lock if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog-datadelivery-service.pid fi fi LOG_FILE=`readconfigvar "$ARC_CONFIG" common logfile` if [ "x$LOG_FILE" = "x" ]; then LOG_FILE=/var/log/arc/datadelivery-service.log fi prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then log_failure_msg "Missing executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # check that at least service is defined grep -e '^\[datadelivery-service\]' $ARC_CONFIG 1>/dev/null 2>&1 if [ $? -ne 0 ] ; then log_failure_msg "Data delivery service not defined in configuration" exit 1 fi # check that if service is insecure no allowed_dns are defined SECURE=`readconfigvar "$ARC_CONFIG" common secure` ALLOWEDDN=`readconfigvar "$ARC_CONFIG" datadelivery-service allowed_dn` if [ "$SECURE" = "no" ]; then if [ "x$ALLOWEDDN" != "x" ]; then log_failure_msg "allowed_dn cannot be used with secure=no" exit 1 fi fi # Assuming ini style config CMD="$CMD -i '$ARC_CONFIG' -p '$PID_FILE' -l '$LOG_FILE'" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH cd / } start() { if [ "$RUN" != "yes" ] ; then echo "arc-datadelivery-service disabled, please adjust the configuration to your" echo "needs and then set RUN to 'yes' in /etc/default/arc-datadelivery-service to" echo "enable it." return 0 fi echo -n "Starting $prog: " # Check if we are already running if [ -f $PID_FILE ]; then read pid < $PID_FILE if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PID_FILE" "$LOCKFILE" fi prepare eval "$CMD" RETVAL=$? if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } stop() { echo -n "Stopping $prog: " if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ ! -z "$pid" ] ; then kill "$pid" RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi timeout=10; # enough time to kill any active processes while ( ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null ) && [ $timeout -ge 1 ] ; do sleep 1 timeout=$(($timeout - 1)) done [ $timeout -lt 1 ] && kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PID_FILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } status() { if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } restart() { stop start } case "$1" in start) start ;; stop) stop ;; status) status $prog ;; restart | force-reload) restart ;; reload) ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload|reload|condrestart|try-restart}" exit 1 ;; esac exit $? nordugrid-arc-5.4.2/src/services/data-staging/PaxHeaders.7502/arc-datadelivery-service.service.in0000644000000000000000000000012712754431715031033 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200651.110638765 30 ctime=1513200664.014796588 nordugrid-arc-5.4.2/src/services/data-staging/arc-datadelivery-service.service.in0000644000175000002070000000037212754431715031077 0ustar00mockbuildmock00000000000000[Unit] Description=A-REX datadelivery service After=local_fs.target remote_fs.target [Service] Type=forking PIDFile=/var/run/arched-datadelivery-service.pid ExecStart=@pkgdatadir@/arc-datadelivery-service-start [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/data-staging/PaxHeaders.7502/DataDeliveryService.cpp0000644000000000000000000000012412754411434026561 xustar000000000000000027 mtime=1471288092.080352 27 atime=1513200575.481713 30 ctime=1513200664.016796613 nordugrid-arc-5.4.2/src/services/data-staging/DataDeliveryService.cpp0000644000175000002070000006606612754411434026644 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "DataDeliveryService.h" namespace DataStaging { static Arc::Plugin *get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; DataDeliveryService* s = new DataDeliveryService((Arc::Config*)(*srvarg),arg); if (*s) return s; delete s; return NULL; } Arc::Logger DataDeliveryService::logger(Arc::Logger::rootLogger, "DataDeliveryService"); void DataDeliveryService::ArchivalThread(void* arg) { DataDeliveryService* service = (DataDeliveryService*)arg; service->ArchivalThread(); } void DataDeliveryService::ArchivalThread() { // archive every 10 mins DTRs older than 1 hour // TODO: configurable, save to disk? int frequency = 600; while (true) { sleep(frequency); Arc::Time timelimit(Arc::Time()-Arc::Period(3600)); active_dtrs_lock.lock(); for (std::map::iterator i = active_dtrs.begin(); i != active_dtrs.end();) { DTR_ptr dtr = i->first; if (dtr->get_modification_time() < timelimit && dtr->get_status() != DTRStatus::TRANSFERRING) { archived_dtrs_lock.lock(); if (dtr->error()) { logger.msg(Arc::VERBOSE, "Archiving DTR %s, state ERROR", dtr->get_id()); archived_dtrs[dtr->get_id()] = std::pair("TRANSFER_ERROR", dtr->get_error_status().GetDesc()); } else { logger.msg(Arc::VERBOSE, "Archiving DTR %s, state %s", dtr->get_id(), dtr->get_status().str()); archived_dtrs[dtr->get_id()] = std::pair("TRANSFERRED", ""); } archived_dtrs_lock.unlock(); // clean up DTR memory - delete DTR LogDestinations dtr->clean_log_destinations(); active_dtrs.erase(i++); } else ++i; } active_dtrs_lock.unlock(); } } bool DataDeliveryService::CheckInput(const std::string& url, const Arc::UserConfig& usercfg, Arc::XMLNode& resultelement, bool& require_credential_file) { Arc::DataHandle h(url, usercfg); if (!h || !(*h)) { resultelement.NewChild("ErrorDescription") = "Can't handle URL " + url; return false; } if (h->Local()) { std::string path(h->GetURL().Path()); if (path.find("../") != std::string::npos) { resultelement.NewChild("ErrorDescription") = "'../' is not allowed in filename"; return false; } bool allowed = false; for (std::list::iterator i = allowed_dirs.begin(); i != allowed_dirs.end(); ++i) { if (path.find(*i) == 0) allowed = true; } if (!allowed) { resultelement.NewChild("ErrorDescription") = "Access denied to path " + path; return false; } } if (h->RequiresCredentialsInFile()) require_credential_file = true; return true; } void DataDeliveryService::LogToRootLogger(Arc::LogLevel level, const std::string& message) { Arc::Logger::getRootLogger().addDestinations(root_destinations); logger.msg(level, message); Arc::Logger::getRootLogger().removeDestinations(); } void DataDeliveryService::receiveDTR(DTR_ptr dtr) { LogToRootLogger(Arc::INFO, "Received DTR "+dtr->get_id()+" from Delivery in state "+dtr->get_status().str()); // delete temp proxy file if it was created if (dtr->get_source()->RequiresCredentialsInFile() || dtr->get_destination()->RequiresCredentialsInFile()) { std::string proxy_file(tmp_proxy_dir+"/DTR."+dtr->get_id()+".proxy"); LogToRootLogger(Arc::DEBUG, "Removing temp proxy "+proxy_file); if (unlink(proxy_file.c_str()) != 0 && errno != ENOENT) { LogToRootLogger(Arc::WARNING, "Failed to remove temporary proxy "+proxy_file+": "+Arc::StrError(errno)); } } if (current_processes > 0) --current_processes; } /* Accepts: id url url 1000 1000 true 12345 adler32:12345678 100 60 100 120 ... Returns id SERVICE_ERROR ... ... */ Arc::MCC_Status DataDeliveryService::Start(Arc::XMLNode in, Arc::XMLNode out) { Arc::XMLNode resp = out.NewChild("DataDeliveryStartResponse"); Arc::XMLNode results = resp.NewChild("DataDeliveryStartResult"); // Save credentials to temp file and set in UserConfig Arc::XMLNode delegated_token = in["DataDeliveryStart"]["deleg:DelegatedToken"]; if (!delegated_token) { logger.msg(Arc::ERROR, "No delegation token in request"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "DataDeliveryService", "No delegation token received"); } // Check credentials were already delegated std::string credential; if (!delegation.DelegatedToken(credential, delegated_token)) { // Failed to accept delegation logger.msg(Arc::ERROR, "Failed to accept delegation"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "DataDeliveryService", "Failed to accept delegation"); } for(int n = 0;;++n) { Arc::XMLNode dtrnode = in["DataDeliveryStart"]["DTR"][n]; if (!dtrnode) break; std::string dtrid((std::string)dtrnode["ID"]); std::string src((std::string)dtrnode["Source"]); std::string dest((std::string)dtrnode["Destination"]); int uid = Arc::stringtoi((std::string)dtrnode["Uid"]); int gid = Arc::stringtoi((std::string)dtrnode["Gid"]); if (dtrnode["Caching"] == "true") { uid = Arc::User().get_uid(); gid = Arc::User().get_gid(); } // proxy path will be set later Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); bool require_credential_file = false; Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("ID") = dtrid; if (!CheckInput(src, usercfg, resultelement, require_credential_file)) { resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement["ErrorDescription"] = (std::string)resultelement["ErrorDescription"] + ": Cannot use source"; logger.msg(Arc::ERROR, (std::string)resultelement["ErrorDescription"]); continue; } if (!CheckInput(dest, usercfg, resultelement, require_credential_file)) { resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement["ErrorDescription"] = (std::string)resultelement["ErrorDescription"] + ": Cannot use destination"; logger.msg(Arc::ERROR, (std::string)resultelement["ErrorDescription"]); continue; } if (current_processes >= max_processes) { logger.msg(Arc::WARNING, "All %u process slots used", max_processes); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "No free process slot available"; continue; } // check if dtrid is in the active list - if so it is probably a retry active_dtrs_lock.lock(); std::map::iterator i = active_dtrs.begin(); for (; i != active_dtrs.end(); ++i) { if (i->first->get_id() == dtrid) break; } if (i != active_dtrs.end()) { if (i->first->get_status() == DTRStatus::TRANSFERRING) { logger.msg(Arc::ERROR, "Received retry for DTR %s still in transfer", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "DTR is still in transfer"; active_dtrs_lock.unlock(); continue; } // Erase this DTR from active list logger.msg(Arc::VERBOSE, "Replacing DTR %s in state %s with new request", dtrid, i->first->get_status().str()); i->first->clean_log_destinations(); active_dtrs.erase(i); } active_dtrs_lock.unlock(); std::string proxy_file(tmp_proxy_dir+"/DTR."+dtrid+".proxy"); if (require_credential_file) { // Store proxy, only readable by user. Use DTR job id as proxy name. // TODO: it is inefficient to create a file for every DTR, better to // use some kind of proxy store logger.msg(Arc::VERBOSE, "Storing temp proxy at %s", proxy_file); bool proxy_result = Arc::FileCreate(proxy_file, credential, 0, 0, S_IRUSR | S_IWUSR); if (!proxy_result && errno == ENOENT) { Arc::DirCreate(tmp_proxy_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH, true); proxy_result = Arc::FileCreate(proxy_file, credential); } if (!proxy_result) { logger.msg(Arc::ERROR, "Failed to create temp proxy at %s: %s", proxy_file, Arc::StrError(errno)); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "Failed to store temporary proxy"; continue; } if (chown(proxy_file.c_str(), uid, gid) != 0) { logger.msg(Arc::ERROR, "Failed to change owner of temp proxy at %s to %i:%i: %s", proxy_file, uid, gid, Arc::StrError(errno)); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "Failed to store temporary proxy"; continue; } usercfg.ProxyPath(proxy_file); } else { usercfg.CredentialString(credential); } // Logger for this DTR. Uses a string stream so log can easily be sent // back to the client. LogStream keeps a reference to the stream so we // cannot delete it until deleting LogStream. These pointers are // deleted when the DTR is archived. sstream_ptr stream(new std::stringstream()); Arc::LogDestination * output = new Arc::LogStream(*stream); output->setFormat(Arc::MediumFormat); DTRLogger log(new Arc::Logger(Arc::Logger::getRootLogger(), "DataStaging")); log->removeDestinations(); log->addDestination(*output); std::string groupid(Arc::UUID()); DTR_ptr dtr(new DTR(src, dest, usercfg, groupid, uid, log)); if (!(*dtr)) { logger.msg(Arc::ERROR, "Invalid DTR"); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "Could not create DTR"; log->deleteDestinations(); if (unlink(proxy_file.c_str()) != 0 && errno != ENOENT) { logger.msg(Arc::WARNING, "Failed to remove temporary proxy %s: %s", proxy_file, Arc::StrError(errno)); } continue; } ++current_processes; // Set source checksum to validate against if (dtrnode["CheckSum"]) dtr->get_source()->SetCheckSum((std::string)dtrnode["CheckSum"]); // Set filesize for protocols which need it if (dtrnode["Size"]) dtr->get_source()->SetSize(Arc::stringtoull((std::string)dtrnode["Size"])); // Get the callbacks sent to Scheduler and connect Delivery dtr->registerCallback(this, SCHEDULER); dtr->registerCallback(&delivery, DELIVERY); // Set transfer limits TransferParameters transfer_params; if (dtrnode["MinAverageSpeed"]) transfer_params.min_average_bandwidth = Arc::stringtoull((std::string)dtrnode["MinAverageSpeed"]); if (dtrnode["AverageTime"]) transfer_params.averaging_time = Arc::stringtoui((std::string)dtrnode["AverageTime"]); if (dtrnode["MinCurrentSpeed"]) transfer_params.min_current_bandwidth = Arc::stringtoull((std::string)dtrnode["MinCurrentSpeed"]); if (dtrnode["MaxInactivityTime"]) transfer_params.max_inactivity_time = Arc::stringtoui((std::string)dtrnode["MaxInactivityTime"]); delivery.SetTransferParameters(transfer_params); dtr->set_id(dtrid); dtr->set_status(DTRStatus::TRANSFER); DTR::push(dtr, DELIVERY); // Add to active list active_dtrs_lock.lock(); active_dtrs[dtr] = stream; active_dtrs_lock.unlock(); resultelement.NewChild("ResultCode") = "OK"; } return Arc::MCC_Status(Arc::STATUS_OK); } /* Accepts: id ... Returns: id ERROR ... 2 1 ... 1234 123456789 adler32:a123a45 ... */ Arc::MCC_Status DataDeliveryService::Query(Arc::XMLNode in, Arc::XMLNode out) { Arc::XMLNode resp = out.NewChild("DataDeliveryQueryResponse"); Arc::XMLNode results = resp.NewChild("DataDeliveryQueryResult"); for(int n = 0;;++n) { Arc::XMLNode dtrnode = in["DataDeliveryQuery"]["DTR"][n]; if (!dtrnode) break; std::string dtrid((std::string)dtrnode["ID"]); Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("ID") = dtrid; active_dtrs_lock.lock(); std::map::iterator dtr_it = active_dtrs.begin(); for (; dtr_it != active_dtrs.end(); ++dtr_it) { if (dtr_it->first->get_id() == dtrid) break; } if (dtr_it == active_dtrs.end()) { active_dtrs_lock.unlock(); // if not in active list, look in archived list archived_dtrs_lock.lock(); std::map >::const_iterator arc_it = archived_dtrs.find(dtrid); if (arc_it != archived_dtrs.end()) { resultelement.NewChild("ResultCode") = archived_dtrs[dtrid].first; resultelement.NewChild("ErrorDescription") = archived_dtrs[dtrid].second; archived_dtrs_lock.unlock(); continue; } archived_dtrs_lock.unlock(); logger.msg(Arc::ERROR, "No such DTR %s", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "No such DTR"; continue; } DTR_ptr dtr = dtr_it->first; resultelement.NewChild("Log") = dtr_it->second->str(); resultelement.NewChild("BytesTransferred") = Arc::tostring(dtr->get_bytes_transferred()); if (dtr->error()) { logger.msg(Arc::INFO, "DTR %s failed: %s", dtrid, dtr->get_error_status().GetDesc()); resultelement.NewChild("ResultCode") = "TRANSFER_ERROR"; resultelement.NewChild("ErrorDescription") = dtr->get_error_status().GetDesc(); resultelement.NewChild("ErrorStatus") = Arc::tostring(dtr->get_error_status().GetErrorStatus()); resultelement.NewChild("ErrorLocation") = Arc::tostring(dtr->get_error_status().GetErrorLocation()); resultelement.NewChild("TransferTime") = Arc::tostring(dtr->get_transfer_time()); archived_dtrs_lock.lock(); archived_dtrs[dtrid] = std::pair("TRANSFER_ERROR", dtr->get_error_status().GetDesc()); archived_dtrs_lock.unlock(); } else if (dtr->get_status() == DTRStatus::TRANSFERRED) { logger.msg(Arc::INFO, "DTR %s finished successfully", dtrid); resultelement.NewChild("ResultCode") = "TRANSFERRED"; resultelement.NewChild("TransferTime") = Arc::tostring(dtr->get_transfer_time()); // pass calculated checksum back to Scheduler (eg to insert in catalog) if (dtr->get_destination()->CheckCheckSum()) resultelement.NewChild("CheckSum") = dtr->get_destination()->GetCheckSum(); archived_dtrs_lock.lock(); archived_dtrs[dtrid] = std::pair("TRANSFERRED", ""); archived_dtrs_lock.unlock(); } else { logger.msg(Arc::VERBOSE, "DTR %s still in progress (%lluB transferred)", dtrid, dtr->get_bytes_transferred()); resultelement.NewChild("ResultCode") = "TRANSFERRING"; active_dtrs_lock.unlock(); return Arc::MCC_Status(Arc::STATUS_OK); } // Terminal state - clean up DTR LogDestinations dtr->clean_log_destinations(); //delete dtr_it->second; active_dtrs.erase(dtr_it); active_dtrs_lock.unlock(); } return Arc::MCC_Status(Arc::STATUS_OK); } /* Accepts: id ... Returns: id ERROR ... ... */ Arc::MCC_Status DataDeliveryService::Cancel(Arc::XMLNode in, Arc::XMLNode out) { Arc::XMLNode resp = out.NewChild("DataDeliveryCancelResponse"); Arc::XMLNode results = resp.NewChild("DataDeliveryCancelResult"); for (int n = 0;;++n) { Arc::XMLNode dtrnode = in["DataDeliveryCancel"]["DTR"][n]; if (!dtrnode) break; std::string dtrid((std::string)dtrnode["ID"]); Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("ID") = dtrid; // Check if DTR is still in active list active_dtrs_lock.lock(); std::map::iterator dtr_it = active_dtrs.begin(); for (; dtr_it != active_dtrs.end(); ++dtr_it) { if (dtr_it->first->get_id() == dtrid) break; } if (dtr_it == active_dtrs.end()) { active_dtrs_lock.unlock(); logger.msg(Arc::ERROR, "No active DTR %s", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "No such active DTR"; continue; } // DTR could be already finished, but report successful cancel anyway DTR_ptr dtr = dtr_it->first; if (dtr->get_status() == DTRStatus::TRANSFERRING_CANCEL) { active_dtrs_lock.unlock(); logger.msg(Arc::ERROR, "DTR %s was already cancelled", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "DTR already cancelled"; continue; } // Delivery will automatically kill running process if (!delivery.cancelDTR(dtr)) { active_dtrs_lock.unlock(); logger.msg(Arc::ERROR, "DTR %s could not be cancelled", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "DTR could not be cancelled"; continue; } logger.msg(Arc::INFO, "DTR %s cancelled", dtr->get_id()); resultelement.NewChild("ResultCode") = "OK"; active_dtrs_lock.unlock(); } return Arc::MCC_Status(Arc::STATUS_OK); } /* Accepts: Returns: ERROR ... /var/arc/cache 6.5 ... ... */ Arc::MCC_Status DataDeliveryService::Ping(Arc::XMLNode in, Arc::XMLNode out) { Arc::XMLNode resultelement = out.NewChild("DataDeliveryPingResponse").NewChild("DataDeliveryPingResult").NewChild("Result"); resultelement.NewChild("ResultCode") = "OK"; for (std::list::iterator dir = allowed_dirs.begin(); dir != allowed_dirs.end(); ++dir) { resultelement.NewChild("AllowedDir") = *dir; } // Send the 5 min load average double avg[3]; if (getloadavg(avg, 3) != 3) { logger.msg(Arc::WARNING, "Failed to get load average: %s", Arc::StrError()); resultelement.NewChild("LoadAvg") = "-1"; } else { resultelement.NewChild("LoadAvg") = Arc::tostring(avg[1]); } return Arc::MCC_Status(Arc::STATUS_OK); } DataDeliveryService::DataDeliveryService(Arc::Config *cfg, Arc::PluginArgument* parg) : RegisteredService(cfg,parg), max_processes(100), current_processes(0) { valid = false; // Set medium format for logging root_destinations = Arc::Logger::getRootLogger().getDestinations(); for (std::list::iterator i = root_destinations.begin(); i != root_destinations.end(); ++i) { (*i)->setFormat(Arc::MediumFormat); } // Check configuration - at least one allowed IP address and dir must be specified if (!(*cfg)["SecHandler"]["PDP"]["Policy"]["Rule"]["Subjects"]["Subject"]) { logger.msg(Arc::ERROR, "Invalid configuration - no allowed IP address specified"); return; } if (!(*cfg)["AllowedDir"]) { logger.msg(Arc::ERROR, "Invalid configuration - no allowed dirs specified"); return; } for (int n = 0;;++n) { Arc::XMLNode allowed_dir = (*cfg)["AllowedDir"][n]; if (!allowed_dir) break; allowed_dirs.push_back((std::string)allowed_dir); } // Start archival thread if (!Arc::CreateThreadFunction(ArchivalThread, this)) { logger.msg(Arc::ERROR, "Failed to start archival thread"); return; } // Create tmp dir for proxies // TODO get from configuration tmp_proxy_dir = "/tmp/arc"; // clear any proxies left behind from previous bad shutdown Arc::DirDelete(tmp_proxy_dir); // Set restrictive umask umask(0077); // Set log level for DTR DataStaging::DTR::LOG_LEVEL = Arc::Logger::getRootLogger().getThreshold(); // Start new DataDelivery delivery.start(); valid = true; } DataDeliveryService::~DataDeliveryService() { // Stop accepting new requests and cancel all active transfers // DataDelivery destructor automatically calls stop() valid = false; // clear any proxies left behind Arc::DirDelete(tmp_proxy_dir); logger.msg(Arc::INFO, "Shutting down data delivery service"); } Arc::MCC_Status DataDeliveryService::process(Arc::Message &inmsg, Arc::Message &outmsg) { if (!valid) return make_soap_fault(outmsg, "Service is not valid"); // Check authorization if(!ProcessSecHandlers(inmsg, "incoming")) { logger.msg(Arc::ERROR, "Unauthorized"); return make_soap_fault(outmsg, "Authorization failed"); } std::string method = inmsg.Attributes()->get("HTTP:METHOD"); if(method == "POST") { logger.msg(Arc::VERBOSE, "process: POST"); logger.msg(Arc::VERBOSE, "Identity is %s", inmsg.Attributes()->get("TLS:PEERDN")); // Both input and output are supposed to be SOAP // Extracting payload Arc::PayloadSOAP* inpayload = NULL; try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) { logger.msg(Arc::ERROR, "input is not SOAP"); return make_soap_fault(outmsg); } // Applying known namespaces inpayload->Namespaces(ns); if(logger.getThreshold() <= Arc::DEBUG) { std::string str; inpayload->GetDoc(str, true); logger.msg(Arc::DEBUG, "process: request=%s",str); } // Analyzing request Arc::XMLNode op = inpayload->Child(0); if(!op) { logger.msg(Arc::ERROR, "input does not define operation"); return make_soap_fault(outmsg); } logger.msg(Arc::VERBOSE, "process: operation: %s",op.Name()); Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns); outpayload->Namespaces(ns); Arc::MCC_Status result(Arc::STATUS_OK); // choose operation // Make a new request if (MatchXMLName(op,"DataDeliveryStart")) { result = Start(*inpayload, *outpayload); } // Query a request else if (MatchXMLName(op,"DataDeliveryQuery")) { result = Query(*inpayload, *outpayload); } // Cancel a request else if (MatchXMLName(op,"DataDeliveryCancel")) { result = Cancel(*inpayload, *outpayload); } // ping service else if (MatchXMLName(op,"DataDeliveryPing")) { result = Ping(*inpayload, *outpayload); } // Delegate credentials. Should be called before making a new request else if (delegation.MatchNamespace(*inpayload)) { if (!delegation.Process(*inpayload, *outpayload)) { delete outpayload; return make_soap_fault(outmsg); } } // Unknown operation else { logger.msg(Arc::ERROR, "SOAP operation is not supported: %s", op.Name()); delete outpayload; return make_soap_fault(outmsg); } if (!result) return make_soap_fault(outmsg, result.getExplanation()); if (logger.getThreshold() <= Arc::DEBUG) { std::string str; outpayload->GetDoc(str, true); logger.msg(Arc::DEBUG, "process: response=%s", str); } outmsg.Payload(outpayload); if (!ProcessSecHandlers(outmsg,"outgoing")) { logger.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); } } else { // only POST supported logger.msg(Arc::ERROR, "Only POST is supported in DataDeliveryService"); return Arc::MCC_Status(); } return Arc::MCC_Status(Arc::STATUS_OK); } bool DataDeliveryService::RegistrationCollector(Arc::XMLNode &doc) { Arc::NS isis_ns; isis_ns["isis"] = "http://www.nordugrid.org/schemas/isis/2008/08"; Arc::XMLNode regentry(isis_ns, "RegEntry"); regentry.NewChild("SrcAdv").NewChild("Type") = "org.nordugrid.execution.datadeliveryservice"; regentry.New(doc); return true; } Arc::MCC_Status DataDeliveryService::make_soap_fault(Arc::Message& outmsg, const std::string& reason) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns,true); Arc::SOAPFault* fault = outpayload?outpayload->Fault():NULL; if(fault) { fault->Code(Arc::SOAPFault::Sender); if (reason.empty()) fault->Reason("Failed processing request"); else fault->Reason("Failed processing request: "+reason); } outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace DataStaging extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "datadeliveryservice", "HED:SERVICE", NULL, 0, &DataStaging::get_service }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/services/data-staging/PaxHeaders.7502/arc-datadelivery-service-start.in0000644000000000000000000000012613153455353030524 xustar000000000000000026 mtime=1504598763.23014 30 atime=1513200651.126638961 30 ctime=1513200664.012796564 nordugrid-arc-5.4.2/src/services/data-staging/arc-datadelivery-service-start.in0000644000175000002070000001120313153455353030564 0ustar00mockbuildmock00000000000000#!/bin/bash add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=arched RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-datadelivery-service ]; then . /etc/sysconfig/arc-datadelivery-service elif [ -r /etc/default/arc-datadelivery-service ]; then . /etc/default/arc-datadelivery-service fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ ! -d "$GLOBUS_LOCATION" ]; then echo "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then echo "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION # Needed to get pid file readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | grep -e '^\[' -e "^${vname}[[:space:]]*=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` vlname=`eval "echo $vlname"` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi PID_FILE=`readconfigvar "$ARC_CONFIG" common pidfile` if [ `id -u` = 0 ] ; then if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog-datadelivery-service.pid fi else if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog-datadelivery-service.pid fi fi LOG_FILE=`readconfigvar "$ARC_CONFIG" common logfile` if [ "x$LOG_FILE" = "x" ]; then LOG_FILE=/var/log/arc/datadelivery-service.log fi prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then echo "Missing executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # check that at least service is defined grep -e '^\[datadelivery-service\]' $ARC_CONFIG 1>/dev/null 2>&1 if [ $? -ne 0 ] ; then echo "Data delivery service not defined in configuration" exit 1 fi # check that if service is insecure no allowed_dns are defined SECURE=`readconfigvar "$ARC_CONFIG" common secure` ALLOWEDDN=`readconfigvar "$ARC_CONFIG" datadelivery-service allowed_dn` if [ "$SECURE" = "no" ]; then if [ "x$ALLOWEDDN" != "x" ]; then echo "allowed_dn cannot be used with secure=no" exit 1 fi fi # Assuming ini style config CMD="$CMD -i '$ARC_CONFIG' -p '$PID_FILE' -l '$LOG_FILE'" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH cd / } if [ "$RUN" != "yes" ] ; then echo "arc-datadelivery-service disabled, please adjust the configuration to your" echo "needs and then set RUN to 'yes' in /etc/default/arc-datadelivery-service to" echo "enable it." return 0 fi prepare exec "$CMD" nordugrid-arc-5.4.2/src/services/data-staging/PaxHeaders.7502/README0000644000000000000000000000012311620001662023023 xustar000000000000000026 mtime=1312818098.16033 27 atime=1513200575.481713 30 ctime=1513200664.009796527 nordugrid-arc-5.4.2/src/services/data-staging/README0000644000175000002070000000011211620001662023063 0ustar00mockbuildmock00000000000000DataDeliveryService is a HED service for executing data transfer requests.nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/ldap-monitor0000644000000000000000000000013213214316027022135 xustar000000000000000030 mtime=1513200663.650792136 30 atime=1513200668.716854096 30 ctime=1513200663.650792136 nordugrid-arc-5.4.2/src/services/ldap-monitor/0000755000175000002070000000000013214316027022260 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306024254 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200604.758071855 30 ctime=1513200663.643792051 nordugrid-arc-5.4.2/src/services/ldap-monitor/Makefile.am0000644000175000002070000000032512047045306024316 0ustar00mockbuildmock00000000000000SUBDIRS = man mon-icons lang includes monitordir = @ldap_monitor_prefix@ dist_monitor_DATA = $(srcdir)/*.php $(srcdir)/*.js monitor_DATA = README install-data-local: $(MKDIR_P) $(DESTDIR)$(monitordir)/cache nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/userlist.php0000644000000000000000000000012411605357421024603 xustar000000000000000027 mtime=1310056209.254814 27 atime=1513200575.650715 30 ctime=1513200663.639792002 nordugrid-arc-5.4.2/src/services/ldap-monitor/userlist.php0000644000175000002070000002051311605357421024651 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $giislist = &$toppage->giislist; // Header table $toppage->tabletop("",$toptitle." $family"); // Array defining the attributes to be returned $lim = array( "dn", USR_USSN, USR_CPUS, USR_QUEU, USR_DISK ); $ulim = array( "dn", JOB_NAME, JOB_EQUE, JOB_ECLU, JOB_GOWN, JOB_SUBM, JOB_STAT, JOB_USET, JOB_ERRS, JOB_CPUS ); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 20; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // ldapsearch filter string for jobs $filter = "(&(objectclass=".OBJ_USER.")(".USR_USSN."=$uname))"; $ufilter = "(&(objectclass=".OBJ_AJOB.")(".JOB_GOWN."=$uname))"; $gentries = recursive_giis_info($giislist,"cluster",$errors,$debug); $nc = count($gentries); if ( !$nc ) { $errno = "1"; echo "
    ".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $pnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $clconn = ldap_connect($clhost,$clport); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); array_push($pnarray,$clport); $sitetag[$clhost] = 1; /* filtering tag */ } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters for (a) allowed queues and (b) for user jobs $uiarray = @ldap_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); // Loop on results: first go queues // HTML table initialisation $utable = new LmTable("userres",$strings["userres"]); $urowcont = array(); $dnmsg = "".$errors["420"].": ".$uname; $utable->adderror($dnmsg, "#cccccc"); $nauclu = 0; $goodds = array(); $goodhn = array(); $goodpn = array(); for ( $ids = 0; $ids < $nhosts; $ids++ ) { $ui = $uiarray[$ids]; $hn = $hnarray[$ids]; $pn = $pnarray[$ids]; $dst = $dsarray[$ids]; $curl = popup("clusdes.php?host=$hn&port=$pn",700,620,1,$lang,$debug); if ($dst && $ui) { $nqueues = @ldap_count_entries($dst,$ui); if ($nqueues > 0) { $nauclu++; array_push($goodds,$dst); array_push($goodhn,$hn); array_push($goodpn,$pn); // If there are valid entries, tabulate results $allres = ldap_get_entries($dst,$ui); $results = ldap_purge($allres); $nqueues = $allres["count"]; // define("CMPKEY",USR_CPUS); // usort($allres,"ldap_entry_comp"); // loop on queues for ($j=0; $j<$nqueues; $j++) { $parts = ldap_explode_dn($allres[$j]["dn"],0); foreach ($parts as $part) { $pair = explode("=",$part); switch ( $pair[0] ) { case CLU_NAME: $ucluster = $pair[1]; break; case QUE_NAME: $uqueue = $pair[1]; break; } } if ( $debug == 2 ) dbgmsg("$hn -- $ucluster
    "); $qurl = popup("quelist.php?host=$ucluster&port=$pn&qname=$uqueue",750,430,6,$lang,$debug); $curl = popup("clusdes.php?host=$ucluster&port=$pn",700,620,1,$lang,$debug); $fcpu = $allres[$j][USR_CPUS][0]; $fproc = freeproc($fcpu); $fdisk = $allres[$j][USR_DISK][0]; $exque = $allres[$j][USR_QUEU][0]; $urowcont[] = "$ucluster:$uqueue"; $urowcont[] = $fcpu; $urowcont[] = $exque; $urowcont[] = $fdisk; $utable->addrow($urowcont); $urowcont = array(); } } else { $utable->adderror("".$errors["11"]." $hn"); } } else { $utable->adderror("$hn ".$errors["12"].""); } @ldap_free_result($ui); } $utable->adderror("".$errors["421"].$nauclu.$errors["422"]."", "#0099FF"); $utable->close(); echo "
    \n"; $srarray = @ldap_search($goodds,DN_LOCAL,$ufilter,$ulim,0,0,$tlim,LDAP_DEREF_NEVER); // HTML table initialisation $jtable = new LmTable($module,$toppage->$module); $rowcont = array(); $jcount = 0; $nghosts = count($goodds); for ( $ids = 0; $ids < $nghosts; $ids++ ) { $sr = $srarray[$ids]; $dst = $goodds[$ids]; $gpn = $goodpn[$ids]; $ghn = $goodhn[$ids]; if ($dst && $sr) { // If search returned, check that there are valid entries $nmatch = @ldap_count_entries($dst,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $allentries = ldap_get_entries($dst,$sr); $entries = ldap_purge($allentries); $njobs = $entries["count"]; define("CMPKEY",JOB_SUBM); usort($entries,"ldap_entry_comp"); // loop on jobs for ($i=1; $i<$njobs+1; $i++) { $jobdn = rawurlencode($entries[$i]["dn"]); $curstat = $entries[$i][JOB_STAT][0]; $stahead = substr($curstat,0,12); if ($stahead=="FINISHED at:") { $ftime = substr(strrchr($curstat, " "), 1); $ftime = cnvtime($ftime); $curstat = "FINISHED at: ".$ftime; } $jname = htmlentities($entries[$i][JOB_NAME][0]); $jobname = ($entries[$i][JOB_NAME][0]) ? $jname : "N/A"; $queue = ($entries[$i][JOB_EQUE][0]) ? $entries[$i][JOB_EQUE][0] : "N/A"; $cluster = ($entries[$i][JOB_ECLU][0]) ? $entries[$i][JOB_ECLU][0] : "N/A"; $time = ($entries[$i][JOB_USET][0]) ? $entries[$i][JOB_USET][0] : "N/A"; $ncpus = ($entries[$i][JOB_CPUS][0]) ? $entries[$i][JOB_CPUS][0] : ""; $error = ($entries[$i][JOB_ERRS][0]); if ( $error ) $error = ( preg_match("/user/i",$error) ) ? "X" : "!"; if ( $debug == 2 ) dbgmsg("$ghn --- $cluster
    "); $newwin = popup("jobstat.php?host=$cluster&port=$gpn&status=$status&jobdn=$jobdn",750,430,4,$lang,$debug); $quewin = popup("quelist.php?host=$cluster&port=$gpn&qname=$queue",750,430,6,$lang,$debug); $clstring = popup("clusdes.php?host=$cluster&port=$gpn",700,620,1,$lang,$debug); $jcount++; // filling the table $rowcont[] = "$jcount $error"; $rowcont[] = "$jobname"; $rowcont[] = "$curstat"; $rowcont[] = "$time"; $rowcont[] = "$cluster"; $rowcont[] = "$queue"; $rowcont[] = "$ncpus"; $jtable->addrow($rowcont); $rowcont = array(); } } } @ldap_free_result($sr); } if ( !$jcount ) $jtable->adderror("".$errors["13"].$family.""); $jtable->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/lang0000644000000000000000000000013213214316027023056 xustar000000000000000030 mtime=1513200663.747793323 30 atime=1513200668.717854109 30 ctime=1513200663.747793323 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/0000755000175000002070000000000013214316027023201 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711634415166025203 xustar000000000000000027 mtime=1316100726.768006 30 atime=1513200604.859073091 30 ctime=1513200663.738793213 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/Makefile.am0000644000175000002070000000016311634415166025245 0ustar00mockbuildmock00000000000000monitorlangdir = @ldap_monitor_prefix@/lang monitorlang_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorlang_DATA) nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/de.inc0000644000000000000000000000012412050701227024214 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.701716 30 ctime=1513200663.740793237 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/de.inc0000644000175000002070000014167212050701227024274 0ustar00mockbuildmock00000000000000 N/A bezeichnet einen Job ohne Namen.
    X bezeichnet einen Job, der durch den Nutzer abgebrochen wurde.
    ! bezeichnet einen Job, der nicht erfolgreich ausgeführt wurde.
    Klicken Sie auf den jeweiligen Jobnamen für eine detaillierte Beschreibung."; $str_nam = "Name des Nutzers wie im user certificate spezifiziert. Klicken Sie auf den Namen, um eine Liste aller Ressourcen zu erhalten, die für diesen Nutzer zur Verfügung stehen, sowie eine Liste aller Jobs dieses Users im System."; $str_sta = "Jobstatus wie angegeben durch the Grid Manager (GM) und LRMS.; Jobs durchlaufen die Zustände in der folgenden Reihenfolge:
    ACCEPTED – job wurde submitted aber er wird noch nicht ausgeführt.
    PREPARING – Eingabedateien werden übertragen
    SUBMITTING – Interaktion mit dem LRMS
    INLRMS – der Job ist unter der Kontrolle des LRMS; sein interner Zustand wird durch das Infosystem bestimmt. Mögliche solche Zustände sind:
    : Q – Job ist in der Queue (queued)
    : U – Job wurde unterbrochen (suspended) wegen eines anderen Prozesses (PBSPro)
    : S – Job wurde unterbrochen (suspended) (Condor)
    : R, run – Job wird ausgeführt
    : E – Job wird beendet (PBS)
    FINISHING – Ausgabedateien werden durch den GM transferiert
    FINISHED – Job wurde beendet, eine Zeitmarke (time stamp) wird durch das Infosystem hinzugefügt
    CANCELING – Job wurde abgebrochen
    DELETED – Job wurde nicht nach dem Download des Anwenders, sondern durch den GM wegen Überschreitung der Ablauffrist (expiration date) gelöscht.
    Jeder der Zust&aauml;nde kann durch den Prefix PENDING gekennzeichnet sein. Der GM versucht dann, diesen Zustand für diesen Job zu erreichen."; $str_tim = "CPU-Zeit des Jobs, gemessen in Minuten."; $str_mem = "Speicherbedarf des Jobs, gemessen in KB."; $str_cpu = "Die Anzahl genutzter Prozessoren des Jobs."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    Diese Übersicht zeigt alle Teilnehmer, die an der Spitze des ARC registiert sind. Sie sind primär nach ihrem Land sortiert und dann anhand deren Namen. Ausgewählte Parameter werden überwacht: Cluster alias, die Anzahl aller CPUs und solcher reserviert für lokale Jobs, die Anzahl laufender und wartender Aufträge. Nutzen Sie die "Search" Funktion, um andere Charakteristica von Clustern, Queues, Aufträgen, etc. zu vergleichen
    Land
    ".$clickable.". Landesflagge und -name wie abgeleitet von der resource-Beschreibung. Anklicken, um Informationen zur Gridnutzung dieses Landes zu sehen.
    Cluster
    ".$clickable.". Alternativer Name des Cluster wie durch dessen owner festgelegt. Es werden maximal 22 Zeichen dargestellt. Durch Anlicken werden detaillierte Informationen zum Cluster dargestellt.
    CPUs
    Gesamtanzahl der CPUs im Cluster. NB! Nur ein Teil dieser mag tatsächlich auch für Grid Nutzer verfügbar sein.
    Last (Prozesse:Grid+lokal)
    ".$clickable.". Relative Auslastung des Clusters, abgeleitet von der Anzahl belegter CPUs. Graue Balken stellen die mit lokalen Jobs belegten CPUs dar, rote Balken solche, die von über das Grid submitteten Jobs beansprucht sind. Klicke auf die Balken, um eine detaillierte Liste aller Jobs zu erhalten, inklusive der Anzahl genutzter Prozessoren je Job.
    Wartend
    ".$clickable.". Anzahl aller wartenden Jobs auf dem Cluster, angezeigt als die Anzahl solcher durch das Grid submitteter Jobs plus die Anzahl derjenigen, die lokal submitted wurden. Klicke auf die erste Nummer, um die Liste der wartenden Grid-Jobs zu erhalten.
    ", "Land" => 30, "Site" => 160, "CPUs" => 10, "Last (Prozesse: Grid+lokal)" => 210, "In einer Queue" => 10 ), "clusdes" => array("0" => "Details einer Resource zu", "help" => "
    Attribut
    ".$clickable.". Cluster Attributename".$str_att."
    Wert
    ".$str_val."
    Queue
    ".$clickable.". Namen von batch queues verfügbar fü ARC Nutzer, wie festgelegt durch die owner des Clusters." .$str_que."
    Status
    Queue status. Eine operationelle Queue hat typischerweise den Status active.
    CPU (min)
    Zeitbegrenzung für einen Job. Der erste Wert ist untere Grenze, der zweite die obere. Wenn keine Begrenzungen gesetzt sind, es wird dann jede Lauflänge akzeptiert, wird N/A angezeigt.
    Running
    Die Anzahl von Jobs, die in der Queue aktiv sind. Die Gesamtanzahl der Jobs wird angezeigt, mit der Anzahl belegter Prozessoren in Klammern. NB! Für Jobs mit Parallelverarbeitung kann diese Anzahl deutlich höher sein als die Anzahl der Jobs.
    Queing
    Anzahl von Jobs, die auf deren Ausführung warten. Die Gesamtanzahl wird angezeigt mit der Anzahl durch das Grid submitteter Jobs in Klammern.
    ", "Queue" => 0, "Mapping Queue" => 0, "Status" => 0, "Limiten (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    JOB LIST:
    Job name
    ".$clickable.". Name des Jobs wie durch den owner festgelegt. Wenn kein Name zugewiesen wurde, so wird "N/A" angezeigt. Bei Klick auf dem Namen wird eine detaillierte Beschreibung des Jobs angezeigt.
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Queue
    ".$clickable.". Name der Queue in der der Job ausgeführt wird. ".$str_que."
    CPUs
    ".$str_cpu."
    JOB DETAILS:
    Attribut
    ".$clickable.". Job Attributname".$str_att."
    Wert
    ".$str_val."
    ", "Jobname" => 0, "Eigner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtuelle Organisationen", "help" => "
    Virtuelle Organisation
    ".$clickable.". Gruppe von Anwendern, üblicherweise gemeinsame Aktivit&aul;ten und Ressourcen teilend. Wenigstens ein Cluster des ARC akzeptiert diese. Klicken Sie auf den Namen, um eine Liste der Mitglieder zu erhalten.
    Mitglieder
    Anzahl der Mitglieder.
    Verwaltet durch
    LDAP Server der die Mitglieder-Datenbank hält.
    ", "Virtuelle Organisation" => 0, "Mitglieder" => 0, "Verwaltet durch" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    Des Nutzers Arbeitgeber wie durch den VO manager angegeben. Kann freigelassen werden.
    E-mail
    ".$clickable.". Des Nutzers eMail-Adresse wie angegeben durch den VO Manager. Darf freigelassen werden. Durch Anlicken der Adresse kann eine eMail an den Nutzer gesendet werden.
    ", "#" => 0, "Name" => 0, "Zugehörigkeit" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information für", "help" => "
    Cluster:queue
    ".$clickable.". Namen der Cluster und deren Queues (getrennt durch einen Doppelpunkt ":") auf welche ein Nutzer Zugriff hat. Ist ein Nutzer nicht autorisiert, wird die Nachricht "Not authorised at host ..." angezeigt. Bei Anlicken der Cluster Namens wird die Beschreibung des Clusters gegeben, genauso wie bei einer Auswahl der Queue.
    Freie CPUs
    Die Anzahl von freien CPUs, die für eine bestimmte Queue für einen bestimmten Nutzer zu einem bestimmten Moment, ggf. eingeschränkt durch die Angabe der maximalen Laufzeit (Angabe in Minuten), verfügbar sind. Zum Beispiel bedeutet "3", daß 3 CPUs für einen Job unbeschränkter Laufzeit verfügbar sind. "4:360" beschreibt die Verfügbarkeit von vier Jobs für nicht länger al 6 Stunden. "10:180 30" bedeutet, daß 10 CPUs verfügbar sind für Jobs, die nicht länger rechnen als 3 Stunden, sowie weitere 30 für Jobs mit unbeschränkter Laufzeit. "0" bedeutet, daß keine CPUs verfügbar sind und neue Jobs entsprechend warten müssen.
    Wartenden Jobs
    Anzahl von Jobs des Anwenders, die in der Wartschlange vor einem Neuen Job sind. Die Zahl "0" bedeutet, dass der Job sofort ausgeführt wird. NB! Dies ist nur eine Abschätzung, durch den Einfluss lokaler Administratoren ist eine sichere Angabe nicht möglich.
    Freier Diskplatz (MB)
    Für einen Nutzer verfügbarer Diskplatz (in Megabytes). NB! Dies ist nur eine Abschätzung, die meisten Cluster haben keine solchen Quotas festgelegt.
    Jobname
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Name des Clusters bei dem der Job ausgeführt wird. Bei Klick auf den Namen werden detaillierte Informationen zu dem Cluster präsentiert.
    Queue
    ".$clickable.". Name der Queue, in which der Job ausgeführt wird oder wurde. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Jobname" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "attlist" => array("0" => "Attributwerte", "help" => "
    Objekt
    ".$clickable.". Names des Objektes dessen Attribute angezeigt werden. Es kann ein Cluster sein, dessen Queue, ein Job, ein Anwender etc. Nach einer Auswahl durch Anklicken der Zeichenkette werden detaillierte Information angezeigt.
    Attribute
    Fü jedes Objekt wird eines oder mehrere Attribute angezeigt. Der Spaltentitel ist der Klarname des Attributes, von einigen MDS-spezifischen Attributen abgesehen, der Inhalt entspricht den Werten wie sie im Informationssystem abgelegt sind.
    ", "Object" => 0, "Attribute" => 0 ), "quelist" => array("0" => "Queue", "help" => "
    Attribut
    ".$clickable.". Name des Queue Attributs".$str_att."
    Wert
    ".$str_val."
    Jobname
    ".$clickable.". ".$str_job."
    Eigner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Speicher (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Jobname" => 0, "Eigner" => 0, "Status" => 0, "CPU (min)" => 0, "Speicher (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias
    Storage Element Zweitname wie festgelegt im Informationssystem. Maximal 15 Zeichen werden angezeigt.
    Tot. space
    Totaler Plattenplatz (GB).
    Freier Plattenplatz
    Verfügbarer Plattenplatz in GB.
    Name
    Name des Storage Elements, bestehend aus einem logischen Namen und dem Namen des Hosts, getrennt durch einen Doppelpunk ":"). Der logische Name wird nur für die interne Verwaltung genutzt, um verschiedene Einheiten auf demselben System zu unterscheiden.
    Basis URL
    URL des Storage Elements, üblich ist die Verwendung des gsiftp:// Protokols. Dieser URL dient als Basis für den Zugriff auf Dateien.
    Typ
    Storage Element typ. "gridftp-based" beschreibt Plattenplatz der über die GridFTP Schnittstelle verfügbar ist.
    ", "#" => 0, "Alias" => 0, // "Tot. Platz" => 0, "Freier/Tot. Platz, GB" => 0, "Name" => 0, "Basis URL" => 0, "Typ" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Zugehörigkeit:
    Das Institut oder die Firma, die den Anwender beschäftigt. Der Eintrag ist abgeleitet vom personal certificate
    Jobs
    Zählt alle Jobs des Anwenders im System (running, pending, finished oder deleted)
    Sites
    Gibt an, wieviele teilnehmende Cluster Aufträge dieses Nutzers annehmen.
    ", "#" => 0, "Name" => 0, "Zugehörigkeit" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Freie CPUs" => 0, "Wartenden Jobs" => 0, "Freier Plattenplatz (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribut" => 0, "Wert" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info gültig von (GMT)", "Mds-validto" => "Info gültig bis (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain Name", "nordugrid-cluster-aliasname" => "Cluster Alias", "nordugrid-cluster-contactstring" => "Kontakt", "nordugrid-cluster-interactive-contactstring" => "Interactiver Kontakt", "nordugrid-cluster-comment" => "Kommentar", "nordugrid-cluster-support" => "E-mail Kontakt", "nordugrid-cluster-acl" => "Autorisierte VOs", "nordugrid-cluster-lrms-type" => "LRMS Typ", "nordugrid-cluster-lrms-version" => "LRMS Version", "nordugrid-cluster-lrms-config" => "LRMS Details", "nordugrid-cluster-architecture" => "Architektur", "nordugrid-cluster-opsys" => "Operating System", "nordugrid-cluster-homogeneity" => "Homogener Cluster", "nordugrid-cluster-nodecpu" => "CPU Typ (langsamster)", "nordugrid-cluster-nodememory" => "Memory (MB, kleinster)", "nordugrid-cluster-totalcpus" => "CPUs, gesamt", "nordugrid-cluster-cpudistribution" => "CPU:Hosts", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Plattenplatz, verfügbar (MB)", "nordugrid-cluster-sessiondir-total" => "Plattenplatz, gesamt (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Lebensdauer der Grid Session (min)", "nordugrid-cluster-cache-free" => "Cache size, verfügbar (MB)", "nordugrid-cluster-cache-total" => "Cache size, gesamt (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, lokal", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, totale Anzahl", "nordugrid-cluster-usedcpus" => "CPUs, belegt", "nordugrid-cluster-queuedjobs" => "Jobs, in Queue wartend", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, noch nicht submitted", "nordugrid-cluster-location" => "Postleitzahl", "nordugrid-cluster-owner" => "Eigner", "nordugrid-cluster-issuerca" => "Zertifikat-Aussteller", "nordugrid-cluster-issuerca-hash" => "Zertifikat-Aussteller's kash", "nordugrid-cluster-trustedca" => "Akzeptierte Zertificat-Aussteller", "nordugrid-cluster-nodeaccess" => "IP Konnektivität der Hosts", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid Plattenplatz (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS Distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Queue Name", "nordugrid-queue-comment" => "Kommentar", "nordugrid-queue-status" => "Queue Status", "nordugrid-queue-running" => "CPUs, belegt", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, noch nicht submitted", "nordugrid-queue-queued" => "Jobs, queued (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs pro Unix User (max)", "nordugrid-queue-maxcputime" => "CPU Zeit, max. (min)", "nordugrid-queue-mincputime" => "CPU Zeit, min. (min)", "nordugrid-queue-defaultcputime" => "CPU Zeit, default (min)", "nordugrid-queue-maxwalltime" => "Zeit auf Küchenuhr, max. (min)", "nordugrid-queue-minwalltime" => "Zeit auf Küchenuhr, min. (min)", "nordugrid-queue-defaultwalltime" => "Zeit auf Küchenuhr, default (min)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, gesamt", "nordugrid-queue-nodecpu" => "CPU Typ", "nordugrid-queue-nodememory" => "Speicher (MB)", "nordugrid-queue-architecture" => "Architektur", "nordugrid-queue-opsys" => "Betriebssystem", "nordugrid-queue-homogeneity" => "Homogene Queue", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPUs je Queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU typ (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Eigner", "nordugrid-job-execcluster" => "Execution Cluster", "nordugrid-job-execqueue" => "Execution Queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcputime" => "Verlangte CPU Zeit", "nordugrid-job-reqwalltime" => "Verlangte Zeit auf Küchenuhr", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in der Queue", "nordugrid-job-comment" => "LRMS Kommentar", "nordugrid-job-submissionui" => "Submitted von", "nordugrid-job-submissiontime" => "Submitted um (GMT)", "nordugrid-job-usedcputime" => "Benötigte CPU Zeit", "nordugrid-job-usedwalltime" => "Benötigte Zeit auf Küchenuhr", "nordugrid-job-completiontime" => "Job beendet um (GMT)", "nordugrid-job-sessiondirerasetime" => "Job gelöscht um (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy Verfallzeit (GMT)", "nordugrid-job-usedmem" => "Verwendeter Speicher (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-exitcode" => "Exit Code", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Angeforderte CPUs", "nordugrid-job-executionnodes" => "Aufrührende Rechner", "nordugrid-job-gmlog" => "GM Logdatei", "nordugrid-job-clientsoftware" => "Version des Clients", "nordugrid-job-rerunable" => "Rerunnable", "nordugrid-job-reqcput" => "Requested time (OBSOLETE)", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-lrmscomment" => "LRMS comment (OBSOLETE)", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Freie CPUs", "nordugrid-authuser-diskspace" => "Freier Plattenplatz (MB)", "nordugrid-authuser-queuelength" => "Wartende Jobs des Users", "nordugrid-se-name" => "Name", "nordugrid-se-aliasname" => "Storage Element Alias", "nordugrid-se-type" => "Storage Element Typ", "nordugrid-se-acl" => "Autorisierte VOs", "nordugrid-se-freespace" => "Freier Plattenplatz (MB)", "nordugrid-se-totalspace" => "Gesamter Plattenplatz (MB)", "nordugrid-se-url" => "Kontakt URL", "nordugrid-se-baseurl" => "Kontakt URL (OBSOLETE)", "nordugrid-se-accesscontrol" => "Zugangskontrolle", "nordugrid-se-authuser" => "Zugelassene User (DN)", "nordugrid-se-location" => "Postleitzahl", "nordugrid-se-owner" => "Eigner", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Zertifikat-Aussteller", "nordugrid-se-issuerca-hash" => "Zertifikat-Aussteller ID", "nordugrid-se-trustedca" => "Vertrauten Zertifikat-Ausstellern", "nordugrid-se-comment" => "Kommentar", "nordugrid-rc-name" => "Domainname", "nordugrid-rc-aliasname" => "Replica Catalog Alias", "nordugrid-rc-baseurl" => "Kontakt URL", "nordugrid-rc-authuser" => "Zugelassene User (DN)", "nordugrid-rc-location" => "Postleitzahl", "nordugrid-rc-owner" => "Eigner", "nordugrid-rc-issuerca" => "Zertifikat-Aussteller" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Die top-level resource Indizes konnten nicht gelesen werden", "2" => "Keiner der lokalen Indizes konnte erreicht werden", // ? "3" => " schlechte Konfiguration oder Zeitüberschreitung bei der Anfrage", "4" => "Keine Grid Jobs gefunden", "5" => "Keine Information gefunden", "6" => "Server nicht verfügbar", "7" => " - später neu laden", "8" => "Keine Informationen zur Queue gefunden", "9" => "Keine Ei nträge gefunden.", "10" => "Keine Nutzer gefunden.", "11" => "Bei diesem Host nicht autorisiert", "12" => "antwortet nicht", "13" => "Keine jüngst submitteten Jobs gefunden für ", // debug messages "101" => " Monitor timeouts für GRIS: ", "102" => " sek für Verbindung und ", "103" => " sek beim Suchen", "104" => " sek verbracht beim Suchen", "105" => "Zeige Ressourcen nur in ", "106" => "Polled top-level Indizes: ", "107" => "Erhielt geographische Ortsangaben, gescante Sites: ", // ? "108" => " sites geographisch geordnet", "109" => "Suche nach Cluster Attributen", "110" => "Suche for Queue Attributen", "111" => "Keine Daten von ", "112" => " funktioniert in ", // ? "113" => " hat keine Resourcen anzubieten", "114" => " Monitor timeouts für GIIS: ", "115" => "Überspringe GRIS: ", "116" => "nicht ein ", "117" => "Teste Verbindung: ", "118" => "OK", "119" => "Entdeckte bislang Ressourcen der folgenden Art ", "120" => "LDAP Fehler beim Suchen ", "121" => " Status bei ", "122" => "Blacklisted: ", "123" => "Registrant gefunden für ", "124" => "Suche nach SE Attributen", "125" => "Suche nach Nutzern", "126" => "Suche nach jobs", "127" => " hat Job ", "128" => " obwohl nicht autorisiert", "129" => "Kann die Objektdaten nicht erhalten: Fehler ", "130" => " Monitor timeouts für EMIR: ", // icon titles "301" => "Update", "302" => "Drucken", "303" => "Hilfe", "304" => "Schließen", "305" => "Rot", "306" => "Grün", "307" => "Alle Nutzer", "308" => "Aktive Nutzer", "309" => "Suchen", "310" => "Storage", "311" => "VOs", "312" => "Flagge von ", "313" => " Grid Prozesse und ", "314" => " lokale Prozesse", // auxilliary strings "401" => "Prozesse", "402" => "Grid", "403" => "Lokal", "404" => "Globus", "405" => "TOTAL", "406" => " sites", "407" => "eine Menge", "408" => " GB", "409" => " ALLE", "410" => "Cluster", "411" => "Queue", "412" => "Job", "413" => "Nutzer", "414" => "Storage", "415" => "Replica Cat.", "416" => "Definiere Attribute, die für das Objekt anzuzeigen sind: ", "417" => "AND von allen Ausdrücken wird gesucht", // ? "418" => "Feld ganz rechts freilassen, um alles anzuzeigen", "419" => "Personalisierte Anzeige von Ressourcen", "420" => "Eindeutiger Name", "421" => "Kann insgesamt nutzen ", "422" => " sites", "423" => "Resource / Object:", "424" => "Nr. von Attributen (def. 6):", "425" => "Objekt", "426" => "Nächstes", "427" => "Auswahl", "428" => "Reset", "429" => "ANZEIGEN" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australien", "Austria" => "Österreich", "Armenia" => "Armenien", "Algeria" => "Algerien", "Belgium" => "Belgien", "Bulgaria" => "Bulgarien", "Canada" => "Canada", "China" => "China", "Czechia" => "Tschechien", "Denmark" => "Dänemark", "Estonia" => "Estland", "Finland" => "Finnland", "France" => "Frankreich", "Georgia" => "Georgien", "Germany" => "Deutschland", "Greece" => "Griechenland", "Hungary" => "Ungarn", "Iceland" => "Island", "Ireland" => "Irland", "Italy" => "Italien", "Japan" => "Japan", "Latvia" => "Lettland", "Lithuania" => "Litauen", "Morocco" => "Marokko", "Netherlands" => "Niederlande", "Norway" => "Norwegen", "Poland" => "Polen", "Portugal" => "Portugal", "Romania" => "Rumänien", "Russia" => "Russland", "SriLanka" => "Sri Lanka", "Sweden" => "Schweden", "Slovakia" => "Slowakei", "Slovenia" => "Slowenien", "Switzerland" => "Schweiz", "Turkey" => "Türkei", "UK" => "UK", "Ukraine" => "Ukraine", "USA" => "USA" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/sk.inc0000644000000000000000000000012412050701227024241 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.711716 30 ctime=1513200663.745793298 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/sk.inc0000644000175000002070000015155512050701227024322 0ustar00mockbuildmock00000000000000ARC
    ."; $str_val = "Hodnota atribútu v Informačnom Systéme."; $str_que = "Obyčajne sa jednotlivé rady navzájom líšia prípustným trvaním úloh, prípadne rôznymi skupinami užívateľov. Po kliknutí na príslušnú radu získate podrobné informácie vrátane zoznamu bežiacich, čakajúcich a ukončených úloh."; $str_job = "Názov úlohy zvolený užívateľom.
    N/A znamená, že vlastník nepriradil úlohe žiadne meno.
    X znamená, že úloha bola ukončená vlastníkom.
    ! znamená, že pri plnení úlohy došlo k chybe.
    Kliknutím zobrazíte podrobné informácie o úlohe."; $str_nam = "Meno užívateľa, podľa jeho osobného certifikátu. Kliknutím získate zoznam všetkých jemu dostupných zdrojov, ako aj zoznam všetkých úloh spustených týmto užívateľom momentálne v systéme."; $str_sta = "Stav úlohy: podľa Gridového Manažéra (GM) a systému správy lokálnych zdrojov (LRMS). Poradie možných stavov je na nasledujúce:
    ACCEPTED – úloha je prijatá, ale jej vykonávanie ešte nezačalo
    PREPARING – sťahujú sa vstupné súbory
    SUBMITTING – informácie sa posielajú do LRMS
    INLRMS – správa úlohy predaná LRMS; informačný systém zabezpečuje informácie o vnútornom stave úlohy. Možné sú nasledujúce stavy:
    : Q – úloha čaká v rade
    : U – úloha je pozastavená na preťaženom pracovnom uzle (PBSPro)
    : S – úloha je pozastavená (Condor)
    : R, run – úloha sa vykonáva
    : E – úloha sa ukončuje (PBS)
    FINISHING – výstupné súbory sú prenášané na miesto určenia
    FINISHED – úloha je ukončená; čas ukončenia je stanovený informačným systémom
    CANCELING – úloha sa ruší
    DELETED – výstupy úlohy nezmazané užívateľom, ale zmazané GM po expiračnej dobe
    Ku každému stavu môže byť pridaná predpona \"PENDING:\", ktorá značí, že GM práve nemôže prejsť k nasledujúcemu kroku v dôsledku vnútorných obmedzení."; $str_tim = "CPU čas spotrebovaný úlohou, v minútach."; $str_mem = "Rozsah operačnej pamäte využívanej úlohou, v KB."; $str_cpu = "Počet CPU využívaných úlohou."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    V tomto okne sa zobrazujú všetky výpočtové zdroje, ktoré sa registrujú do najvyššieho indexovacieho servisu ARC. Tabuľka je usporiadaná podľa anglického názvu príslušnej krajiny a v sekcii pre danú krajinu podľa názvu hlavného stroja daného zdroja (výpočtového klástra). Pre každý kláster sa uvádzajú nasledujúce parametre: názov, celkové množstvo procesorov, počet obsadených procesorov a tiež počet bežiacich a čakajúcich úloh tak spustených cez Grid ako aj lokálne. Použite utilitu "Vyhľadávanie" pre prezeranie a porovnanie s parametrami ostatných klástrov, rád, úloh atď.
    Krajina
    ".$clickable.". Vlajka a názov krajiny. Názov krajiny určený z dostupného popisu výpočtového zdroja. Kliknite pre zobrazenie zdrojov výlučne z danej krajiny.
    Zdroj
    ".$clickable.". Názov zdroja (obyčajne výpočtového klástra) daný jeho majiteľom. Maximálna dĺžka zobrazeného reťazca je 22 znakov. Kliknite na názov pre podrobné informácie o zdroji.
    CPU
    Celkový počet CPU zdroja. Pozor! Je možné, že iba časť je využiteľná cez Grid.
    Záťaž (procesy)
    ".$clickable.". Relatívne využitie zdroja, zodpovedajúce počtu zaťažených CPU. Sivá úsečka zodpovedá počtu procesorov obsadených lokálnymi úlohami, zelená úsečka zodpovedá procesorom vykonávajúcim Gridové úlohy. Kliknite pre podrobné informácie o všetkých Gridových úlohách bežiacich na zdroji, vrátane informácií o počte procesorov na každú úlohu.
    Čakajúce
    ".$clickable.". Počet všetkých úloh čakajúcich v rade na danom zdroji, uvádzaný ako súčet Gridových a lokálnych úloh. Kliknite na prvé z čísel pre podrobné informácie o všetkých Gridových úlohách čakajúcich v rade.
    ", "Krajina" => 30, "Zdroj" => 160, "Počet CPU" => 10, "Záťaž (procesy: Grid+lokálne)" => 210, "Čakajúce" => 10 ), "clusdes" => array( "0" => "Opis zdroja", "help" => "
    Atribút
    ".$clickable.". Názov atribútov zdroja".$str_att."
    Hodnota
    ".$str_val."
    Rada
    ".$clickable.". Názvy (dané majiteľom zdroja) rád, dostupných Gridovým užívateľom. ".$str_que."
    Stav
    Stav rady. Fungujúca rada obyčajne udáva stav active.
    Trvanie (min)
    Limit trvania úlohy v danej rade - ak je limit stanovený - v minútach procesorového času. Prvá zobrazená hodnota je dolný limit, druhá predstavuje horné ohraničenie tohto parametra. Ak limity nie so stanovené (úlohy ľubovoľného času trvania sú akceptované), zobrazí sa reťazec N/A.
    Bežiace
    Počet úloh spracovávaných v rade. Zobrazuje sa celkový počet úloh, pričom počet procesorov obsadených Gridovými úlohami je uvedený v zátvorkách, napr. (Grid: 12). Pozor! Pri paralelných mnohoprocesorových úlohách môže byť číslo v zátvorkách väčšie ako počet úloh.
    Čakajúce
    Počet úloh čakajúcich na spustenie v rade. Zobrazuje sa celkový počet úloh, pričom množstvo úloh spustených cez Grid je uvedené v zátvorkách, napr. (Grid: 235)
    ", "Rada" => 0, "Mapping Queue" => 0, "Stav" => 0, "Trvanie (min)" => 0, "Počet CPU" => 0, "Bežiace" => 0, "Čakajúce" => 0 ), "jobstat" => array( "0" => "Úlohy na:identifikátor úlohy", "help" => "
    ZOZNAM ÚLOH:
    Názov úlohy
    ".$clickable.". Názov úlohy daný užívateľom. "N/A" znamená, že užívateľ úlohe názov nepriradil. Po kliknutí sa zobrazí detailný popis úlohy.
    Užívateľ
    ".$clickable.". ".$str_nam."
    Stav
    ".$str_sta."
    CPU čas (min)
    ".$str_tim."
    Rada
    ".$clickable.". Názov rady, v ktorej sa úloha vykonáva. ".$str_que."
    Procesory
    ".$str_cpu."
    OPIS ÚLOHY:
    Atribút
    ".$clickable.". Názvy atribútov úlohy".$str_att."
    Hodnota
    ".$str_val."
    ", "Názov úlohy" => 0, "Užívateľ" => 0, "Stav" => 0, "CPU čas (min)" => 0, "Rada" => 0, "Počet CPU" => 0 ), "volist" => array( "0" => "Virtuálne organizácie", "help" => "
    Virtuálne organizácie
    ".$clickable.". Skupina užívateľov, obyčajne zdieľajúca spoločné aktivity a zdroje, autorizovaná na aspoň jednom zdroji zapojenom v ARC. Po kliknutí sa zobrazí zoznam členov skupiny.
    Členovia
    Počet členov skupiny.
    Obsluhuje sa
    Adresa servera spravujúceho databázu s údajmi o členoch skupiny.
    ", "Virtuálna organizácia" => 0, "Členovia" => 0, "Obsluhuje sa" => 0 ), "vousers" => array( "0" => "Užívatelia", "help" => "
    Meno
    ".$clickable.". ".$str_nam."
    Pracovisko
    Pracovisko užívateľa, podľa zápisu v databáze. Nepovinný údaj.
    Elektronická pošta
    ".$clickable.". Adresa elektronickej poštovej schránky užívateľa, podľa zápisu v databáze. Nepovinný údaj. Kliknite na adresu pre poslanie správy užívateľovi.
    ", "#" => 0, "Meno" => 0, "Pracovisko" => 0, "Elektronická pošta" => 0 ), "userlist" => array( "0" => "Informácia pre", "help" => "
    Zdroj:rada
    ".$clickable.". Názvy zdrojov (klástrov) a zodpovedajúcich rád lokálnych systémov správy úloh (LRMS) (oddelené dvojbodkou,":"), na ktorých je užívateľ oprávnený posielať úlohy. Ak užívateľ nie je oprávnený, objaví sa správa: "Not authorised at host ...". Po kliknutí na názov klástra sa zobrazí jeho podrobný opis. Pre získanie podrobností o rade kliknite na jej názov.
    Voľné CPU
    Počet voľných CPU v danej rade, pre daného užívateľa, v danom čase. V prípade, že rada využíva časové ohraničenia na prípustné trvanie behu úlohy, je tento údaj zobrazený za číslom reprezentujúcim počet procesorov (v minútach, oddelený dvojbodkou). Napríklad "3" znamená dostupnosť troch voľných CPU pre úlohy s akoukoľvek dĺžkou trvania; "4:360" označuje dostupnosť štyroch voľných CPU, pre úlohy s dobou trvania behu nie dlhšou ako šesť hodín; "10:180:30" znamená, že je dostupných desať CPU pre úlohy s trvaním nepresahujúcim 3 hodiny a ďalších tridsať procesorov, ktoré môžu prijať úlohy s neobmedzeným časom behu; "0" znamená, že v danom momente nie sú žiadne voľné CPU a úlohy budú zaradené do príslušnej rady ako čakajúce.
    Úlohy v rade
    Počet úloh užívateľa v zozname čakajúcich úloh pred novou úlohou, zaslanou daným užívateľom. Počet "0" znamená, že úloha by sa mala začať vykonávať okamžite. POZOR! Je to odhad, ktorý nemusí zohľadňovať všetky lokálne nastavenia správcu zdroja.
    Voľný diskový priestor (MB)
    Diskový priestor dostupný pre užívateľa v danej rade (v megabajtoch). POZOR! Ide len o odhad, väčšina zdrojov nepodporuje resp. nevyužíva kvóty na diskový priestor.
    Názov úlohy
    ".$clickable.". ".$str_job."
    Stav
    ".$str_sta."
    Čas (min)
    ".$str_tim."
    Zdroj
    ".$clickable.". Názov zdroja (obyčajne klástra), na ktorom sa úloha vykonáva. Po kliknutí sa zobrazia podrobné informácie o zdroji.
    Rada
    ".$clickable.". Názov rady v LRMS, v ktorej sa vykonávajúcej úlohu. ".$str_que."
    Počet CPU
    ".$str_cpu."
    ", "" => 0, "Názov úlohy" => 0, "Stav" => 0, "Čas (min)" => 0, "Zdroj" => 0, "Rada" => 0, "Počet CPU" => 0 ), "attlist" => array( "0" => "Attribute values", "help" => "
    Objekt
    ".$clickable.". Názov objektu, atribúty ktorého sú zobrazené. Môže ísť o názov rady klástra, názov úlohy, meno užívateľa atď. Po kliknutí sa zobrazia podrobné informácie o objekte.
    Atribút
    Pre každý objekt v tabuľke je možné uviesť jeden alebo viacero atribútov. V hlavičke stĺpca je uvedený názov atribútu, upravený do jednoducho čitateľnej formy (s výnimkou niekoľkých atribútov špecifických pre systém MDS), obsah jednotlivých stĺpcov predstavujú význam týchto atribútov podľa toho, ako sú popísané v Informačnom Systéme.
    ", "Objekt" => 0, "Atribút" => 0 ), "quelist" => array( "0" => "Popis rady", "help" => "
    Atribút
    ".$clickable.". Názvy atribútov rady".$str_att."
    Hodnota
    ".$str_val."
    Názov úlohy
    ".$clickable.". ".$str_job."
    Majiteľ
    ".$clickable.". ".$str_nam."
    Stav
    ".$str_sta."
    CPU čas (min)
    ".$str_tim."
    Pamäť (KB)
    ".$str_mem."
    Počet CPU
    ".$str_cpu."
    ", "" => 0, "Názov úlohy" => 0, "Majiteľ" => 0, "Stav" => 0, "CPU čas (min)" => 0, "Operačná pamäť (KB)" => 0, "Počet CPU" => 0 ), "sestat" => array( "0" => "Úložné zariadenia", "help" => "
    Názov
    Názov úložného zariadenia zaregistrovaný v Informačnom Systéme. Zobrazených maximálne 15 znakov.
    Celková kapacita
    Celkový diskový priestor v GB.
    Voľná kapacita
    Momentálne dostupný diskový priestor v GB.
    Názov
    Názov úložného zariadenia skladajúci sa z názvu logickej jednotky a názvu servera (rozdelených dvojbodkou). Logický názov sa využíva len pre účely Informačného Systému pre zjednodušenie rozoznávania rôznych úložných zariadení, nachádzajúcich sa na jednom a tom istom servere.
    Bázová URL
    URL úložného zariadenia, obyčajne využívajúc protokol gsiftp. Použite túto URL ako bázu pre prístup k súborom.
    Typ
    Typ úložného zariadenia. Typ "gridftp-based" označuje úložnú jednotku dostupnú cez GridFTP rozhranie.
    ", "#" => 0, "Názov" => 0, // "Celková kapacita" => 0, "Voľná/celková kapacita v GB" => 0, "Názov" => 0, "Bázová URL" => 0, "Typ" => 0 ), "allusers" => array( "0" => "Autorizovaný užívatelia:Aktívny užívatelia", "help" => "
    Meno
    ".$clickable.". ".$str_nam."
    Pracovisko
    Pracovisko užívateľa, podľa informácií v jeho osobnom certifikáte
    Úlohy
    Počet všetkých užívateľových úloh v systéme (bežiacich, čakajúcich, ukončených a vymazaných)
    Zdroje
    Počet klástrov, na ktoré má daný užívateľ prístup
    ", "#" => 0, "Meno" => 0, "pracovisko" => 0, "Úlohy" => 0, "Zdroje" => 0 ), "userres" => array( "0" => "", "Zdroj:rada" => 0, "Voľné CPU" => 0, "Úlohy v rade" => 0, "Voľný diskový priestor (MB)" => 0 ), "ldapdump" => array( "0" => "", "Atribút" => 0, "Hodnota" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Údaje platné od (GMT)", "Mds-validto" => "Údaje platné do (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Meno hlavného stroja", "nordugrid-cluster-aliasname" => "Názov", "nordugrid-cluster-contactstring" => "Kontaktná adresa", "nordugrid-cluster-interactive-contactstring" => "Interaktívna adresa", "nordugrid-cluster-comment" => "Komentár", "nordugrid-cluster-support" => "Elektronická adresa zodpovednej osoby", "nordugrid-cluster-acl" => "Autorizované VO", "nordugrid-cluster-lrms-type" => "typ LRMS", "nordugrid-cluster-lrms-version" => "verzia LRMS", "nordugrid-cluster-lrms-config" => "podrobnosti o LRMS", "nordugrid-cluster-architecture" => "Architektúra", "nordugrid-cluster-opsys" => "Operačný systém", "nordugrid-cluster-homogeneity" => "Homogenita klástra", "nordugrid-cluster-nodecpu" => "Typ procesoru (najslabšieho)", "nordugrid-cluster-nodememory" => "Pamäť (MB, najmenšia)", "nordugrid-cluster-totalcpus" => "Počet CPU celkovo", "nordugrid-cluster-cpudistribution" => "Počet CPU na jeden uzol", "nordugrid-cluster-benchmark" => "Etalónový test (Benchmark)", "nordugrid-cluster-sessiondir-free" => "Diskový priestor, dostupný (MB)", "nordugrid-cluster-sessiondir-total" => "Diskový priestor, celkový (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Doba života gridovej úlohy (min)", "nordugrid-cluster-cache-free" => "Kešová pamäť, dostupná (MB)", "nordugrid-cluster-cache-total" => "Kešová pamäť, celková (MB)", "nordugrid-cluster-runtimeenvironment" => "Pracovné prostredie", "nordugrid-cluster-localse" => "Lokálne úložné zariadenie", "nordugrid-cluster-middleware" => "Gridové rozhranie (middleware)", "nordugrid-clAliasuster-totaljobs" => "Úlohy, celkovo", "nordugrid-cluster-usedcpus" => "CPU, obsadené", "nordugrid-cluster-queuedjobs" => "Úlohy v rade (ZASTARANÉ)", "nordugrid-cluster-prelrmsqueued" => "Gridové úlohy čakajúce na zaslanie", "nordugrid-cluster-location" => "Poštové smerovacie číslo", "nordugrid-cluster-owner" => "Majiteľ", "nordugrid-cluster-issuerca" => "Certifikačná autorita", "nordugrid-cluster-issuerca-hash" => "Hash-kód certifikačnej autority", "nordugrid-cluster-trustedca" => "Akceptované certifikačné autority", "nordugrid-cluster-nodeaccess" => "IP-konektivita uzlov", "nordugrid-cluster-gridarea" => "Priestor gridovej úlohy (ZASTARANÉ)", "nordugrid-cluster-gridspace" => "Gridový diskový priestor (ZASTARANÉ)", "nordugrid-cluster-opsysdistribution" => "Distribúcia OS (ZASTARANÉ)", "nordugrid-cluster-runningjobs" => "Bežiace úlohy (ZASTARANÉ)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Názov rady", "nordugrid-queue-comment" => "Komentár", "nordugrid-queue-status" => "Stav rady", "nordugrid-queue-running" => "Všetky obsadené CPU", "nordugrid-queue-localqueued" => "Lokálne úlohy v rade", "nordugrid-queue-prelrmsqueued" => "Gridové úlohy čakajúce na zaslanie do rady", "nordugrid-queue-queued" => "Úlohy v rade (ZASTARANÉ)", "nordugrid-queue-maxrunning" => "Bežiace úlohy (max)", "nordugrid-queue-maxqueuable" => "Počet úloh v rade (max)", "nordugrid-queue-maxuserrun" => "Počet úloh na užívateľa (max)", "nordugrid-queue-maxcputime" => "CPU čas, maximum (min.)", "nordugrid-queue-mincputime" => "CPU čas, minimum (min.)", "nordugrid-queue-defaultcputime" => "CPU čas, bez udania (min.)", "nordugrid-queue-maxwalltime" => "Trvanie, maximum (min.)", "nordugrid-queue-minwalltime" => "Trvanie, minimum (min.)", "nordugrid-queue-defaultwalltime" => "Trvanie, bez udania (min.)", "nordugrid-queue-schedulingpolicy" => "Pravidlá rozvrhu úloh", "nordugrid-queue-totalcpus" => "Celkový počet CPU", "nordugrid-queue-nodecpu" => "Typ CPU", "nordugrid-queue-nodememory" => "Operačná pamäť (MB)", "nordugrid-queue-architecture" => "Architektúra", "nordugrid-queue-opsys" => "Operačný systém", "nordugrid-queue-homogeneity" => "Homogenita rady", "nordugrid-queue-gridrunning" => "CPU obsadené Gridovými úlohami", "nordugrid-queue-gridqueued" => "Gridové úlohy v rade", "nordugrid-queue-benchmark" => "Etalónový test - Benchmark", "nordugrid-queue-assignedcpunumber" => "Počet CPU v rade (ZASTARANÉ)", "nordugrid-queue-assignedcputype" => "Typ CPU v rade (ZASTARANÉ)", "nordugrid-job-globalid" => "Identifikátor", "nordugrid-job-globalowner" => "Majiteľ", "nordugrid-job-execcluster" => "Vykonávajúci kláster", "nordugrid-job-execqueue" => "Vykonávajúca rada", "nordugrid-job-stdout" => "Štandardný výstup", "nordugrid-job-stderr" => "Štandardný chybový výstup", "nordugrid-job-stdin" => "Štandardný vstup", "nordugrid-job-reqcputime" => "Požadovaná CPU čas", "nordugrid-job-reqwalltime" => "Požadovaný čas trvania", "nordugrid-job-status" => "Stav úlohy", "nordugrid-job-queuerank" => "Pozícia úlohy v rade", "nordugrid-job-comment" => "LRMS komentár", "nordugrid-job-submissionui" => "Stroj, z ktorého bola úloha zaslaná", "nordugrid-job-submissiontime" => "Čas zaslania (GMT)", "nordugrid-job-usedcputime" => "Použitý CPU čas", "nordugrid-job-usedwalltime" => "Doterajšie trvanie úlohy", "nordugrid-job-completiontime" => "Čas ukončenia (GMT)", "nordugrid-job-sessiondirerasetime" => "Čas vymazania (GMT)", "nordugrid-job-proxyexpirationtime" => "Čas vypršania proxy certifikátu (GMT)", "nordugrid-job-usedmem" => "Použitá pamäť (KB)", "nordugrid-job-errors" => "Chyby", "nordugrid-job-exitcode" => "Návratová hodnota", "nordugrid-job-jobname" => "Názov", "nordugrid-job-runtimeenvironment" => "Pracovné prostredie", "nordugrid-job-cpucount" => "Požadovaný počet CPU", "nordugrid-job-executionnodes" => "Vykonávajúce uzly", "nordugrid-job-gmlog" => "GM log súbor", "nordugrid-job-clientsoftware" => "Verzia klienta", "nordugrid-job-rerunable" => "Znovuspustiteľnosť", "nordugrid-job-reqcput" => "Požadovaný čas (ZASTARANÉ)", "nordugrid-job-gridlog" => "Gridlog súbor (ZASTARANÉ)", "nordugrid-job-lrmscomment" => "LRMS komentár (ZASTARANÉ)", "nordugrid-authuser-name" => "Meno", "nordugrid-authuser-sn" => "Subjekt", "nordugrid-authuser-freecpus" => "Voľné CPU", "nordugrid-authuser-diskspace" => "Voľný úložný priestor (MB)", "nordugrid-authuser-queuelength" => "Úlohy užívateľa v rade", "nordugrid-se-name" => "Celý názov", "nordugrid-se-aliasname" => "Názov", "nordugrid-se-type" => "Typ", "nordugrid-se-acl" => "Autorizované VO", "nordugrid-se-freespace" => "Voľný priestor (MB)", "nordugrid-se-totalspace" => "Celkový priestor (MB)", "nordugrid-se-url" => "Kontaktná URL adresa", "nordugrid-se-baseurl" => "Kontaktná základná URL adresa (ZASTARANÉ)", "nordugrid-se-accesscontrol" => "Kontrola prístupu", "nordugrid-se-authuser" => "Autorizovaný užívateľ (DN)", "nordugrid-se-location" => "Poštové smerovacie číslo", "nordugrid-se-owner" => "Majiteľ", "nordugrid-se-middleware" => "Gridové rozhranie", "nordugrid-se-issuerca" => "Certifikačná autorita", "nordugrid-se-issuerca-hash" => "Hash-kód certifikačnej autority", "nordugrid-se-trustedca" => "Akceptované certifikačné autority", "nordugrid-se-comment" => "Komentár", "nordugrid-rc-name" => "Názov domény", "nordugrid-rc-aliasname" => "Názov", "nordugrid-rc-baseurl" => "Kontaktná URL adresa", "nordugrid-rc-authuser" => "Autorizovaní užívatelia (DN)", "nordugrid-rc-location" => "Poštové smerovacie číslo", "nordugrid-rc-owner" => "Majiteľ", "nordugrid-rc-issuerca" => "Certifikačná autorita" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Nemožno prečítať údaje z indexu vyššej úrovne", "2" => "Žiaden z lokálnych indexov neodpovedá", "3" => " nesprávna konfigurácia alebo uplynul čas požiadavky", "4" => "Žiadna gridová úloha", "5" => "Žiadna informácia", "6" => "Služba je nedostupná", "7" => " - pokúste sa obnoviť neskôr", "8" => "Informácie o rade nedostupné", "9" => "Žiadne údaje", "11" => "Žiadny užívatelia", "11" => "Neautorizovaný na danom zdroji", "12" => "neodpovedá", "13" => "Momentálne niet úloh od daného užívateľa ", // debug messages "101" => " Časový limit pre spojenie s lokálnym indexom: ", "102" => " s pre spojenie a ", "103" => " s pre hľadanie", "104" => " s strávených hľadaním", "105" => "Zobrazenie zdrojov výlučne v ", "106" => "Dopytované indexy vyššej úrovne: ", "107" => "Prijaté geografické koordináty zdrojov, preskenované zdroje: ", "108" => " zdrojov usporiadaných podľa geografickej polohy", "109" => "Vyhľadávanie atribútov klástra", "110" => "Vyhľadávanie atribútov rady", "111" => "Niet údajov od ", "112" => " fungujúcich v krajine: ", "113" => " žiadne ponúkané zdroje", "114" => " Časový limit pre spojenie s globálnym indexom: ", "115" => "Ignoruje sa zdroj: ", "116" => "nezodpovedá typu ", "117" => "Preverovanie spojenia: ", "118" => "V poriadku", "119" => "Doteraz objavených zdrojov typu ", "120" => "Chyba LDAP pri hľadaní ", "121" => " stav v ", "122" => "Zablokované: ", "123" => "Objavený registrant ", "124" => "Vyhľadávanie atribútov úložných zariadení", "125" => "Vyhľadávanie užívateľov", "126" => "Vyhľadávanie úloh", "127" => " spustil úlohu ", "128" => " nemajúc autorizáciu", "129" => "Niet údajov o objekte: chyba ", "130" => " Časový limit pre spojenie s EMIR: ", // icon titles "301" => "Obnoviť", "302" => "Tlač", "303" => "Pomoc", "304" => "Zatvoriť", "305" => "Zelená", "306" => "Sivá", "307" => "Všetci užívatelia", "308" => "Aktívny užívatelia", "309" => "Vyhľadávanie", "310" => "Úložiská", "311" => "Virtuálne organizácie", "312" => "Vlajka krajiny: ", "313" => " (gridové procesy), ", "314" => " (lokálne procesy)", // auxilliary strings "401" => "Procesy", "402" => "Grid", "403" => "Lokálne", "404" => "Svet", "405" => "CELKOVO", "406" => " zdrojov", "407" => "mnoho", "408" => " GB", "409" => " VŠETKY", "410" => "Kláster", "411" => "Rada", "412" => "Úloha", "413" => "Užívateľ", "414" => "Úložisko", "415" => "Katalóg replík", "416" => "Zadajte atribúty, ktoré sa majú zobraziť pre vybraný objekt: ", "417" => "Vyhľadávanie sa vykonáva pre logické A všetkých zadaných výrazov", "418" => "Ponechajte pravé pole prázdne ak filter nie je potrebný", "419" => "Prezeranie zdrojov alebo objektov podľa výberu", "420" => "Plný názov (DN)", "421" => "Môže použiť celkovo ", "422" => " zdrojov", "423" => "Zdroj / objekt:", "424" => "Počet atribútov (6 automaticky):", "425" => "Objekt", "426" => "Ďalší", "427" => "Vyberte", "428" => "Znovunačítať", "429" => "UKÁZAŤ" ), // Post code conversion: only for [en]! "tlconvert" => array ( "AU" => "Austrália", "AT" => "Rakúsko", "AM" => "Arménsko", "DZ" => "Alžírsko", "BE" => "Belgicko", "BG" => "Bulharsko", "CA" => "Kanada", "CN" => "Čína", "CZ" => "Česko", "DK" => "Dánsko", "EE" => "Estónsko", "FI" => "Fínsko", "FIN" => "Fínsko", "SF" => "Fínsko", "FR" => "Francúzsko", "GE" => "Gruzínsko", "DE" => "Nemecko", "D" => "Nemecko", "GR" => "Grécko", "HU" => "Maďarsko", "IS" => "Island", "IR" => "Írsko", "IE" => "Írsko", "IT" => "Taliansko", "JP" => "Japonsko", "KEK" => "Japonsko", "TOKYO" => "Japonsko", "LV" => "Lotyšsko", "LT" => "Litva", "MA" => "Maroko", "NL" => "Holandsko", "NO" => "Nórsko", "N" => "Nórsko", "PL" => "Poľsko", "PT" => "Portugalsko", "RO" => "Rumunsko", "RU" => "Rusko", "LK" => "Srí Lanka", "SE" => "Švédsko", "SK" => "Slovensko", "SI" => "Slovinsko", "CH" => "Švajčiarsko", "TR" => "Turecko", "UK" => "UK", "UA" => "Ukrajina", "COM" => "USA", "GOV" => "USA", "USA" => "USA", "US" => "USA", "Australia" => "Austrália", "Austria" => "Rakúsko", "Armenia" => "Arménsko", "Algeria" => "Alžírsko", "Belgium" => "Belgicko", "Bulgaria" => "Bulharsko", "Canada" => "Kanada", "China" => "Čína", "Czechia" => "Česko", "Denmark" => "Dánsko", "Estonia" => "Estónsko", "Finland" => "Fínsko", "France" => "Francúzsko", "Georgia" => "Gruzínsko", "Germany" => "Nemecko", "Greece" => "Grécko", "Hungary" => "Maďarsko", "Iceland" => "Island", "Ireland" => "Írsko", "Italy" => "Taliansko", "Japan" => "Japonsko", "Latvia" => "Lotyšsko", "Lithuania" => "Litva", "Morocco" => "Maroko", "Netherlands" => "Holandsko", "Norway" => "Nórsko", "Poland" => "Poľsko", "Portugal" => "Portugalsko", "Romania" => "Rumunsko", "Russia" => "Rusko", "SriLanka" => "Srí Lanka", "Sweden" => "Švédsko", "Slovakia" => "Slovensko", "Slovenia" => "Slovinsko", "Switzerland" => "Švajčiarsko", "Turkey" => "Turecko", "UK" => "Veľká Británia", "Ukraine" => "Ukrajina", "USA" => "USA", "World" => "Svet" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/fi.inc0000644000000000000000000000012412050701227024222 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.714716 30 ctime=1513200663.742793262 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/fi.inc0000644000175000002070000013701612050701227024277 0ustar00mockbuildmock00000000000000 N/A tarkoittaa: käyttäjä ei antanut tyolle nimeä.
    X tarkoittaa: käyttäjä tappoi tyonsä.
    ! tarkoittaa: tyon suoritus epäonnistui.
    Valitse tyon nimi jos haluat tyon tarkemmat tiedot."; $str_nam = "Käyttäjän nimi, siten kuin se on käyttäjän varmenteessa. Valitse käyttäjän nimi jos haluat tietoa resursseista jotka ovat hänen käytettävissään ja käyttäjän ajossa olevista toistä."; $str_sta = "Työn tila, siten kuin Grid Manager (GM) ja jonosuoritusohjelma (LRMS) sen kertoivat. Tilat ovat:
    ACCEPTED – tyo lähetetty
    PREPARING – haetaan syötetiedostoja
    SUBMITTING – lähetys jononsuoritusohjelmaan (LRMS) menossa
    INLRMS – tyo on jononsuoritusohjelman armoilla; Tietojärjestelmä lisää seuraavat LRMSn sisäiset tilat:
    : Q – jonossa,
    : U – jono on jaädytetty väliaikaisesti koska tietokone on kuormitettu (PBSPro)
    : S – jono on jäädytty (Condor)
    : R, run – työtä suoritetaan
    : E – tyo on loppuvaiheessa (PBS)
    FINISHING – GM siirtää tyon tulostiedostoja
    FINISHED – tyo suoritettu loppuun; tietojärjestelmä lisää aikaleimaa
    CANCELING – tyo peruutetaan
    DELETED – käyttäjä ei siirtanyt tulosteita, GM poisti ne koska maksimiaika ylittyi
    Kaikkiin tiloihin voi liittya PENDING: -etuliite, joka tarkoittaa etta GM yrittää siirtää työtä seuraavaan tilaan"; $str_tim = "Tyon käyttämä prosessoriaika minuutteina."; $str_mem = "Tyon käyttämä muisti, KB"; $str_cpu = "Tyon käyttämien prosessorien lukumäärä."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    Kohteet jotka rekisteroityvät ARCin luettelopalveluun lajiteltuna maan ja tietokoneen nimen mukaisesti. Kohteista rekisteroidään seuraavat ominaisuudet: klusterin alias-nimi, prosessorikapasiteetti, ajossa olevat ja jonottavat työt (sekä Grid-toiminnoilla lahetetyt etta paikalliset). Käytä "Search" toimintoa jos haluat vertailla muita klusterin, jonon tai tyon ominaisuuksia
    Maa
    ".$clickable.". Maa (lippu ja nimi) kuten annettu resurssien kuvauksessa. Valitse maa jos haluat näyttää vain taman maan tiedot.
    Klusteri
    ".$clickable.". Klusterin alias-nimi kuten omistaja on sen antanut. Naytetään max 22 merkkia. Valitse alias jos haluat tarkemman kuvauksen klusterista.
    Prosessoreita
    Klusterin prosessorien kokonaismäärä. Huom! Grid-käyttäjien saatavilla voi olla näistä vai osa.
    Kuorma (prosesseja:Grid+paikallinen)
    ".$clickable.". Klusterin suhteelinen kuorma, eli kuormitettujen prosessorien määrä. Harmaat palkit vastaavat prosessoreita jotka suorittavat paikallisesti lahetettyha töitä, punaiset palkit Grid-töitä. Valitse palkki jos haluat tarkempaa tietoa Grid-toistä joita suoritetaan klusterissa.
    Jonottamassa
    ".$clickable.". Klusterissa jonottavien toiden lukumäärä, Grid työt sekä paikallisesti lähetetyt työt. Valitse ensimmainen numero jos haluat tarkempaa jonottavista Grid-toista.
    ", "Maa" => 30, "Kohde" => 160, "Prosesseja" => 10, "Kuorma (prosesseja: Grid+paikall.)" => 210, "Jonottamassa" => 10 ), "clusdes" => array("0" => "Resource Details for", "help" => "
    Ominaisuus
    ".$clickable." Klusterin ominaisuuden nimi".$str_att."
    Arvo
    ".$str_val."
    Jono
    ".$clickable.". Jonon nimi siten kuin jonon omistaja on sen maarittanyt. ".$str_que."
    Tila
    Jonon tila. Toiminnassa oleva jono ilmoittaa yleensa tilan active.
    Prosessorit (min)
    Jonon toiden aikarajoitus (jos annettu) prosessoriminuutteina. Naytetään ala- ja yläraja. N/A näytetään ios rajoituksia ei ole (kaikenkestoiset työt sallitaan).
    Ajossa
    Ajossa olevat jonon työt. Toiden kokonaismäärä, suluissa Grid-töitä suorittavien prosessorien kokonaismäärä. Huom: rinnakkaisille multiprosessoritoille suluissa oleva numero voi olla suurempi kuin toiden määrä.
    Jonottamassa
    TyöT jotka odottavat suoritukseen paäsyä jonossa. Toiden kokonaismäärä ja Grid-toiminnoilla lähetetyt suluissa esim. (Grid: 235)
    ", "Jono" => 0, "Mapping Queue" => 0, "Tila" => 0, "Rajoitukset (min)" => 0, "Prosessoreita" => 0, "Ajossa" => 0, "Jonottamassa" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    TYÖT:
    Tyon nimi
    ".$clickable.". Tyon nimi (omistajan antama). Jos omistaja ei antanut tyolle nimea, näytetään " style={color:red;}>N/A" . Valitse nimi jos haluat kuvauksen tyostä.
    Omistaja
    ".$clickable.". ".$str_nam."
    Tila
    ".$str_sta."
    Prosessoreita (min)
    ".$str_tim."
    Jono
    ".$clickable.". Eräajojono, jossa työtä suoritetaan. ".$str_que."
    Prosessoreita
    ".$str_cpu."
    TYÖT (YKSITYISKOHTAISESTI):
    Ominaisuus
    ".$clickable.". Tyon ominaisuus".$str_att."
    Arvo
    ".$str_val."
    ", "Tyon nimi" => 0, "Omistaja" => 0, "Tila" => 0, "Prosessoreita (min)" => 0, "Jono" => 0, "Prosessoreita" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtuaaliorganisaatio (VO)
    ".$clickable.". Ryhmä käyttäjiä jotka käyttävät samanlaisia resursseja ARC-tietkoneissa. Valitse ryhmän nimi jos haluta listan ryhmän jäsenistä.
    Jäseniä
    Ryhmän jäsenten määrä.
    Served by
    LDAP palvelin johon ryhmä/jäsenyystiedot talletetaan.
    ", "Virtuaaliorganisaatio (VO)" => 0, "Jäsenet" => 0, "Palvelin" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Nimi
    ".$clickable.". ".$str_nam."
    Organisaatio
    ".$clickable.". Käyttäjän organisaatio siinä muodossa kuin VO'n hallinnoija on sen antanut (voi olla myos tyhjä).
    Sähkopostiosoite
    ".$clickable.". Käyttäjän sähkopostiosoite siinä muodossa kuin VO'n hallinnoija on sen antanut (voi olla myos tyhjä). Valitsemalla sähkopostiosoitteen voit lähettää käyttäjälle sähkopostia.
    ", "#" => 0, "Nimi" => 0, "Organisaatio" => 0, "Sähkopostiosoite" => 0 ), "userlist" => array("0" => "Information for", "help" => "
    Klusteri:jono
    ".$clickable.". Klusterit ja niiden jonot (kaksoispisteella erotettuina, ":") joihin käyttäja voi lähettää töitä. Jos käyttäjällä ei ole oikeutta lähettää työtä, tuloste on "Not authorised at host ..." Valitse klusterin nimi jos haluat yksityiskohtaisen kuvauksen klusterista. Valitse jonon nimi jos haluat yksityiskohtaisen kuvauksen jonosta.
    Vapaita prosessoreita
    Tälle käyttäjälle, annetussa jonossa saatavilla olevien prosessorien maara. Tämän jälkeen saattaa ilmetä myos maksimiarvo joka kertoo kuinka monta minuuttia prosessori on käytettävissä. "3" tarkoittaa: 3 prosessoria käytettävissä ilman aikarjaa. "4:360" tarkoittaa: 4 prosessoria korkeintaan kuudeksi tunniksi. "0" tarkoittaa: ei prosessoreita saatavilla ja työt jonottavat kunnes niita vapautuu.
    Jonossa olevia töitä
    TyöT jotka todennäkoisesti suoritetaan ennen uutta jonoon tulevaa. "0" tarkoittaa: tyo suoritetaan heti. Huom! Tämä on arvio, jonon paikalliskaytanto saattaa muuttaa prioriteetteja.
    Vapaa tila (MB)
    Käyttäjälle tarjolla oleva levytila tässä jonossa (megatavuina). Huom! Tämä on arvio, koska klusterit eivät tarjoa levykiintioitä.
    Tyon nimi
    ".$clickable.". ".$str_job."
    Tila
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Klusteri
    ".$clickable.". Klusteri kossa tyo suoritetaan/suoritettiin.
    Queue
    ".$clickable.". Jono jossa tyo suoritetaan/suoritettiin. ".$str_que."
    Prosessoreita
    ".$str_cpu."
    ", "" => 0, "Tyon nimi" => 0, "Tila" => 0, "Prosessoreita (min)" => 0, "Klusteri" => 0, "Jono" => 0, "Prosessoreita" => 0 ), "attlist" => array("0" => "Attribute values", "help" => "
    Object
    ".$clickable.". Objekti jonka ominaisuuksia tarkastellaan. Objekti voi olla klusteri, jono, tyo, käyttäjä jne. Valitsemalla objektin sen kuvauksen.
    Ominaisuus
    Ominaisuuksia ja niiden arvoja tulostetaan yksi tai usempia per kohde. Sarakkeen otsikko on ihmiselle ymmarrettävässä muodossa (poislukien jotkin MDS-spesifit ominaisuudet). Sarake sisältää vastaavan ominaisuuden arvot tälle kohteelle (arvot saadaan tietojärjestelmästä).
    ", "Objekti" => 0, "Ominaisuus" => 0 ), "quelist" => array("0" => "Jono", "help" => "
    Ominaisuus
    ".$clickable.". Name of a queue attribute".$str_att."
    Arvo
    ".$str_val."
    Tyon nimi
    ".$clickable.". ".$str_job."
    Omistaja
    ".$clickable.". ".$str_nam."
    Tila
    ".$str_sta."
    Prosessoreita (min)
    ".$str_tim."
    Muisti (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Tyon nimi" => 0, "Omistaja" => 0, "Tila" => 0, "Prosessoreita (min)" => 0, "Muisti (KB)" => 0, "Prosessoreita" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias-nimi
    Talletuselementin nimi siinä muodossa kuin se on tietojärjestelmässä (IS), max. 15 merkkiä näytetään.
    Tilaa kaikkiaan
    Kokonaislevytila, GB.
    Vapaa tila
    Tälle hetkellä vapaana oleva levytila, GB.
    Name
    Talletuselementin nimi, looginen nimi ja tietokoneen nimi kaksoispisteella eroteltuna. Loogista nimeä käyttää vain tietojärjestelmä (IS), jotta voidaan erottaa eri talletuselementit samalla koneella.
    URLin alku
    Talletuselementin URL, usein gsiftp://.. Tama URL on edeltää yksittäisiä tiedostoja tai hakemistoja.
    Type
    Talletuselementin tyyppi. "gridftp-based" tarkoittaa tietovarantoa jossa GridFTP liittymä.
    ", "#" => 0, "Alias-nimi" => 0, // "Tilaa kaikkiaan" => 0, "Vapaa/kaikkiaan tila, GB" => 0, "Nimi" => 0, "URLin alku" => 0, "Typpi" => 0 ), "allusers" => array("0" => "Grid kayttäjät joille käytto sallittu:Aktiiviset Grid käyttäjät", "help" => "
    Nimi
    ".$clickable.". ".$str_nam."
    Organisaatio
    Käyttäjän organisaatio, tieto saatu varmenteesta
    TyöT
    Kaikki käyttäjien työt (ajossa, odottamassa, suoritettu tai poistettu)
    Kohteet
    Kuinka monta kohdetta tämä käyttäjä voi käyttää
    ", "#" => 0, "Nimi" => 0, "Organisaatio" => 0, "TöIta" => 0, "Kohteita" => 0 ), "userres" => array("0" => "", "Klusteri:jono" => 0, "Vapaita prosessoreita" => 0, "Jonossa olevia töitä" => 0, "Vapaata levytilaa (MB)" => 0 ), "ldapdump" => array("0" => "", "Ominaisuus" => 0, "Arvo" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Edustakoneen domain", "nordugrid-cluster-aliasname" => "Klusterin alias-nimi", "nordugrid-cluster-contactstring" => "Kontakti", "nordugrid-cluster-interactive-contactstring" => "Interaktiivisten toiden kontakti", "nordugrid-cluster-comment" => "Kommentti", "nordugrid-cluster-support" => "Tukipalvelun sähkoposti", "nordugrid-cluster-acl" => "Authorised VOs", "nordugrid-cluster-lrms-type" => "Jononhallintaohjelmiston tyyppi", "nordugrid-cluster-lrms-version" => "Jononhallintaohjelmiston versio", "nordugrid-cluster-lrms-config" => "Jononhallintaohjelmisto, tarkemmin", "nordugrid-cluster-architecture" => "Arkkitehtuuri", "nordugrid-cluster-opsys" => "Käyttojärjestelmä", "nordugrid-cluster-homogeneity" => "Klusterin arkkitehtuuri yhtenainen ", "nordugrid-cluster-nodecpu" => "Prosessorin tyyppi (hitain)", "nordugrid-cluster-nodememory" => "Muisti (MB, pienin määrä)", "nordugrid-cluster-totalcpus" => "Prosessoreita kaikkiaan", "nordugrid-cluster-cpudistribution" => "Prosessoreita tietokonetta kohti", "nordugrid-cluster-benchmark" => "Suoritustesti", "nordugrid-cluster-sessiondir-free" => "Levytila, saatavilla (MB)", "nordugrid-cluster-sessiondir-total" => "Levytila kaikkiaan (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Sessiohakemiston elinaika (min)", "nordugrid-cluster-cache-free" => "Valimuistin koko, saatavilla (MB)", "nordugrid-cluster-cache-total" => "Valimuistin koko kaikkiaan (MB)", "nordugrid-cluster-runtimeenvironment" => "Ajoaikainen ymparisto", "nordugrid-cluster-localse" => "Paikallinen talletuselementti (SE)", "nordugrid-cluster-middleware" => "Väliohjelmisto", "nordugrid-cluster-totaljobs" => "Töiden kokonaismäärä", "nordugrid-cluster-usedcpus" => "Prosessoreita varattu", "nordugrid-cluster-queuedjobs" => "TöItä jonossa", "nordugrid-cluster-prelrmsqueued" => "Grid–töitä odottamassa", "nordugrid-cluster-location" => "Postinumero", "nordugrid-cluster-owner" => "Omistaja", "nordugrid-cluster-issuerca" => "Varmenteen myontäjä", "nordugrid-cluster-issuerca-hash" => "Varmenteen myontäjän hajakoodi", "nordugrid-cluster-trustedca" => "Luotetut varmenteen myontäjä", "nordugrid-cluster-nodeaccess" => "Laskentasolmun internet-yhteys", "nordugrid-cluster-gridarea" => "Sessiotila (VANHENTUNUT)", "nordugrid-cluster-gridspace" => "Grid levytila (VANHENTUNUT)", "nordugrid-cluster-opsysdistribution" => "Käyttojärjestelmän jakelunimi (VANHENTUNUT)", "nordugrid-cluster-runningjobs" => "TöItä, ajossa (VANHENTUNUT)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Jonon nimi", "nordugrid-queue-comment" => "Kommentti", "nordugrid-queue-status" => "Jonon tila", "nordugrid-queue-running" => "Prosessoreita varattu", "nordugrid-queue-localqueued" => "Paikallisia töitä jonossa", "nordugrid-queue-prelrmsqueued" => "Grid–töitä odottamassa", "nordugrid-queue-queued" => "Jonossa olevia töitä (VANHENTUNUT)", "nordugrid-queue-maxrunning" => "Ajossa olevia töitä (max)", "nordugrid-queue-maxqueuable" => "TöItä jotka voivat jonottaa (max)", "nordugrid-queue-maxuserrun" => "TöItä käyttäjää kohti (max)", "nordugrid-queue-maxcputime" => "Prosessoriaika, max. (minuutteja)", "nordugrid-queue-mincputime" => "Prosessoriaika, min. (minuutteja)", "nordugrid-queue-defaultcputime" => "Prosessoriaika, oletusarvo (minuutteja)", "nordugrid-queue-maxwalltime" => "Kokonaisaika, max. (minuutteja)", "nordugrid-queue-minwalltime" => "Kokonaisaika, min. (minuutteja)", "nordugrid-queue-defaultwalltime" => "Kokonaisaika, oletusarvo (minuutteja)", "nordugrid-queue-schedulingpolicy" => "Schedulointipolitiikka", "nordugrid-queue-totalcpus" => "Prosessoreita kaikkiaan", "nordugrid-queue-nodecpu" => "Prosessrin tyyppi", "nordugrid-queue-nodememory" => "Muistia (MB)", "nordugrid-queue-architecture" => "Arkkitehtuuri", "nordugrid-queue-opsys" => "Käyttojärjestelmä", "nordugrid-queue-homogeneity" => "Jonon arkkitehtuuri yhtenäinen", "nordugrid-queue-gridrunning" => "Grid–töiden käyttämät prosessorit", "nordugrid-queue-gridqueued" => "Grid työt, jonossa", "nordugrid-queue-benchmark" => "Mitattu suorituskyky", "nordugrid-queue-assignedcpunumber" => "Prosessoreita jonoa kohti (VANHENTUNUT)", "nordugrid-queue-assignedcputype" => "Prosessorin tyyppi (VANHENTUNUT)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Omistaja", "nordugrid-job-execcluster" => "Suoritusklusteri", "nordugrid-job-execqueue" => "Suoritusjono", "nordugrid-job-stdout" => "Standardi tulostiedosto", "nordugrid-job-stderr" => "Standardi virhetiedosto ", "nordugrid-job-stdin" => "Standardi syotetiedosto", "nordugrid-job-reqcputime" => "Pyydetty prosessoriaika", "nordugrid-job-reqwalltime" => "Pyydetty kokonaisaika", "nordugrid-job-status" => "Tyon tila", "nordugrid-job-queuerank" => "Paikka jonossa", "nordugrid-job-comment" => "Jonosuoritusohjelman kommentti", "nordugrid-job-submissionui" => "Lähetetty koneesta", "nordugrid-job-submissiontime" => "Lähetysaika (GMT)", "nordugrid-job-usedcputime" => "Käytetty prosessoriaika", "nordugrid-job-usedwalltime" => "Käytetty kokonaisaika", "nordugrid-job-completiontime" => "Saatu suoritettua (GMT)", "nordugrid-job-sessiondirerasetime" => "Poistamisaika (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxyn käyttoaika loppuu (GMT)", "nordugrid-job-usedmem" => "Käytetty muisti (KB)", "nordugrid-job-errors" => "Virheet", "nordugrid-job-exitcode" => "Poistumiskoodi", "nordugrid-job-jobname" => "Nimi", "nordugrid-job-runtimeenvironment" => "Ajoaikainen ympäristo", "nordugrid-job-cpucount" => "Pyydetyt prosessorit", "nordugrid-job-executionnodes" => "Suoritusnoodi", "nordugrid-job-gmlog" => "GM log -tiedosto", "nordugrid-job-clientsoftware" => "Asiakasohjelmiston nimi", "nordugrid-job-rerunable" => "Uudelleen ajettavissa", "nordugrid-job-reqcput" => "Pyydetty suoritusaika (VANHENTUNUT)", "nordugrid-job-lrmscomment" => "Jonosuoritusohjelman kommentti (VANHENTUNUT)", "nordugrid-job-gridlog" => "Gridlog tiedosto (VANHENTUNUT)", "nordugrid-authuser-name" => "Nimi", "nordugrid-authuser-sn" => "Subject-nimi", "nordugrid-authuser-freecpus" => "Vapaita prosessoreita", "nordugrid-authuser-diskspace" => "Vapaa levytila (MB)", "nordugrid-authuser-queuelength" => "KДyttДjДn tЖitД jonossa", "nordugrid-se-name" => "Nimi", "nordugrid-se-aliasname" => "Talletuselementin alias-nimi", "nordugrid-se-type" => "Talletuselementin tyyppi", "nordugrid-se-acl" => "Autorisoidut VOt", "nordugrid-se-freespace" => "Vapaa tila (MB)", "nordugrid-se-totalspace" => "Kokonaistila (MB)", "nordugrid-se-url" => "Yhteys-URL", "nordugrid-se-baseurl" => "Yhteys-URL (VANHENTUNUT)", "nordugrid-se-accesscontrol" => "Kayttokontrolli", "nordugrid-se-authuser" => "Auktorisoitu käyttäjä (DN)", "nordugrid-se-location" => "Postinumero", "nordugrid-se-owner" => "Omistaja", "nordugrid-se-middleware" => "VДliohjelmisto", "nordugrid-se-issuerca" => "Varmenteen myontäjä", "nordugrid-se-issuerca-hash" => "Varmenteen myontäjän hajakoodi", "nordugrid-se-trustedca" => "Luotetut varmenteen myontäjä", "nordugrid-se-comment" => "Kommentteja", "nordugrid-rc-name" => "Domainin nimi", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Yhteys-URL", "nordugrid-rc-authuser" => "Auktorisoitu käyttäjä (DN)", "nordugrid-rc-location" => "Postinumero", "nordugrid-rc-owner" => "Omistaja", "nordugrid-rc-issuerca" => "Varmenteen myontäjä (CA)" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Ei voitu lukea ylätason indeksejä", "2" => "Ei saatu yhteyttä paikallisiin indeksipalveluihin", "3" => " viallinen konfiguraatio tai pyyynnolle annettu aika ylittyi", "4" => "Ei Grid-töitä", "5" => "Ei loytynyt tietoa", "6" => "Tietokone ei saavutettavissa", "7" => " - hae uudestaan myohemmin", "8" => "Ei jonotietoa", "9" => "Ei kohderiveja", "10" => "Ei käyttäjiä", "11" => "Ei oikeutta käyttää tietokonetta", "12" => "ei vastaa", "13" => "Ei töitä ", // debug messages "101" => " Monitoriprosesille annettu aika: GRIS: ", "102" => " sekuntia yhteyksien luomiseen ", "103" => " sekuntia käytetty etsimisprosessissa", "104" => " sekuntia käytetty etsimiseen", "105" => "Näytetään vain resurssit: ", "106" => "Tutkittu ylimman tason indeksit: ", "107" => "Maantieteelliset kohteet haettu, lisataan tietoa: ", "108" => " kohteet jarjestetty maantieteellisesti", "109" => "Etsi klusterin ominaisuuksilla", "110" => "Etsi jonon ominaisuuksilla", "111" => "Ei dataa kohteesta ", "112" => " on toiminnassa: ", "113" => " ei resursseja tarjolla", "114" => " Monitoriprosessille annettu aika ylittyi, GIIS: ", "115" => "Jätetään valiin GRIS: ", "116" => "ei ole ", "117" => "Tarkintan yhteyttä: ", "118" => "OK", "119" => "Siihen mennessä loytynyt seuraavanlaisia resursseja ", "120" => "LDAP etsinnässä virhe ", "121" => " status ", "122" => "Mustalla listalla: ", "123" => "Rekisteroitynyt ", "124" => "Etsi tallennuselementin (SE) ominaisuuskai", "125" => "Etsi käyttäjiä", "126" => "Etsi töitä", "127" => " tyo ", "128" => " ei käyttooikeutta", "129" => "Virhe: ei tietoa kohteesta ", "130" => " Monitoriprosessille annettu aika ylittyi, EMIR: ", // icon titles "301" => "Lataa uudestaan", "302" => "Tulosta", "303" => "Ohjeet", "304" => "Sulje", "305" => "Punainen", "306" => "Harmaa", "307" => "Kaikki käyttäjät", "308" => "Aktiiviset käyttäjät", "309" => "Hae", "310" => "Tietovarannot", "311" => "Virtuaaliorganisaatiot", "312" => "Lippu: ", "313" => " Grid prosessit ja ", "314" => " paikalliset prosessit", // auxilliary strings "401" => "Prosessit", "402" => "Grid", "403" => "Paikallinen", "404" => "Maailma", "405" => "TOTAL", "406" => " kohdetta ", "407" => "paljon", "408" => " GB", "409" => " KAIKKI", "410" => "Klusteri", "411" => "Jono", "412" => "Tyo", "413" => "Kayttäjä", "414" => "Tietovaranto", "415" => "Replica Cat.", "416" => "Valitse ominaisuudet jotka näytetään: ", "417" => "Kaikkien valintojen kombinaation näytetään", "418" => "Jata oikeanpuoleinen kenttä tyhjäksi jos haluat kaikki tulokset näyttoon", "419" => "Näyta valitut resurssit tai kohteet", "420" => "Distinguished name", "421" => "käytettävissä ", "422" => " kohdetta", "423" => "Resurssi / objekti:", "424" => "Ominaisuuksia (def. 6):", "425" => "Objekti", "426" => "Seuraava", "427" => "Valise yksi", "428" => "Tyhjennä valinnat", "429" => "NÄYTÄ" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australia", "Austria" => "Itävalta", "Armenia" => "Armenia", "Algeria" => "Algeria", "Belgium" => "Belgia", "Bulgaria" => "Bulgaria", "Canada" => "Kanada", "Czechia" => "Tsekki", "China" => "Kiina", "Denmark" => "Tanska", "Estonia" => "Eesti", "Finland" => "Suomi", "France" => "Ranska", "Georgia" => "Georgia", "Germany" => "Saksa", "Greece" => "Kreikka", "Hungary" => "Unkari", "Iceland" => "Islanti", "Ireland" => "Irlanti", "Italy" => "Italia", "Japan" => "Japani", "Latvia" => "Latvia", "Lithuania" => "Liettua", "Morocco" => "Marokko", "Netherlands" => "Alankomaat", "Norway" => "Norja", "Poland" => "Puola", "Portugal" => "Portugali", "Romania" => "Romania", "Russia" => "Venäjä", "SriLanka" => "Sri Lanka", "Sweden" => "Ruotsi", "Slovakia" => "Slovakia", "Slovenia" => "Slovenia", "Switzerland" => "Sveitsi", "Turkey" => "Turkki", "UK" => "Iso-Britannia", "Ukraine" => "Ukraina", "USA" => "USA" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/ru.inc0000644000000000000000000000012412050701227024252 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.692716 30 ctime=1513200663.744793286 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/ru.inc0000644000175000002070000017575112050701227024337 0ustar00mockbuildmock00000000000000ARC
    ."; $str_val = "Значение атрибута, записанное в Информационной Системе."; $str_que = "Обычно очереди различаются либо по допустимой продолжительности счёта, либо по допущенной группе пользователей. По щелчку выводится полное описание очереди, включающее список всех известных задач: в счёте, в очереди и закончившихся."; $str_job = " Имя задачи, присвоенное хозяином.
    N/A означает, что хозяин не присвоил никакого имени.
    X означает, что хозяин отменил исполнение задачи.
    ! означает, что при исполнении задачи произошла ошибка.
    По щелчку выводится подробное описание задачи."; $str_nam = "Имя пользователя, в соответствии с его личным сертификатом. По щелчку выводится сводная таблица всех Грид-ресурсов, доступных данному пользователю, и список всех его задач, зарегистрированных на данный момент в системе."; $str_sta = "Состояние задачи: стадия прогресса в ГМ или статус в СУПО. Последовательность возможных состояний такова:
    ACCEPTED – задача принята, но исполнение ещё не началось
    PREPARING – подгружаются необходимые входные данные
    SUBMITTING – посылается задание в СУПО
    INLRMS – управление задачей передано в СУПО; информационная система обеспечивает сведения о внутреннем состоянии задачи. Возможны следующие состояния:
    : Q – задача ожидает в очереди
    : U – задача приостановлена на перегруженом узле (PBSPro)
    : S – задача приостановлена (Condor)
    : R, run – задача исполняется
    : E – задача заканчивается (PBS)
    FINISHING – выходные данные пересылаются по назначению
    FINISHED – задача завершена; информационная система добавляет метку времени окончания
    CANCELING – задача отменяется
    DELETED – результаты задачи не затребованы хозяином, но уничтожены сервером по истечении времени хранения (обычно 24 часа).
    К каждому состоянию может быть добавлена приставка \"PENDING:\", что означает, что ГМ не может в данный момент перейти к следующему этапу исполнения из-за соответствующих внутренних ограничений."; $str_tim = "Процессорное время, затраченное задачей, в минутах."; $str_mem = "Объём оперативной памяти, используемый задачей на текущий момент, в килобайтах"; $str_cpu = "Число процессоров, занимаемых задачей."; // Actual messages $message = array ( // Table headers and help "loadmon" => array( "0" => "Грид-монитор", "help" => "
    В этом окне приведена сводная таблица всех вычислительных ресурсов, регистрирующихся в списки высшего уровня ARC. Таблица упорядочена по английскому названию страны, и в каждой стране – по имени головной машины. Для каждого ресурса выведены следующие параметры: название, общее число процессоров, число занятых процессоров, а также количество заданий в очереди, как засланных через Грид, так и местных. Используйте утилиту \"Поиск\" для просмотра и сравнения других параметров кластеров, очередей, задач и т.д..
    Страна
    ".$clickable.". Флаг и название страны, как следует из доступного описания ресурса. По щелчку выводится сводная таблица только для этой страны.
    Ресурс
    ".$clickable.". Название ресурса (обычно, кластера), присвоенное владельцем. Длина строки не должна превышать 22 символа. По щелчку выводится полное описание ресурса (кластера).
    ЦП
    Общее число центральных процессоров в кластере. Внимание! Лишь часть из них может быть доступна Грид-пользователям.
    Загрузка (процессы)
    ".$clickable.". Относительная загрузка кластера, исходя из числа занятых процессоров. Серая полоса соответствует количеству процессоров, занятых под местные задачи, тогда как красная полоса указывает на количество процессоров, исполняющих Грид-задачи. По щелчку выводится сводка всех активных Грид-задач на кластере, включающая информацию о числе процессоров на каждую задачу.
    Ожидают
    ".$clickable.". Число всех задач, ожидающих в очереди на данном кластере, представленное в виде суммы Грид- и локальных задач. По щелчку на первой цифре выводится сводка всех задач в очереди, засланных через Грид.
    ", "Страна" => 30, "Ресурс" => 160, "ЦП" => 10, "Загрузка (процессы)" => 210, "Ожидают" => 10 ), "clusdes" => array( "0" => "Описание ресурса", "help" => "
    Атрибут
    ".$clickable.". Названия атрибутов кластера".$str_att."
    Значение
    ".$str_val."
    Очередь
    ".$clickable.". Названия очередей (присвоенные владельцами), доступных для Грид-пользователей. ".$str_que."
    Состояние
    Состояние очереди. Работающая очередь обычно выдаёт состояние active.
    Длительность (мин)
    Пределы по времени на продолжительность обработки задания в очереди, если таковые установлены, в минутах процессорного времени. Первое значение соответствует нижнему пределу, второе – верхнему. Если пределы не установлены (т.е., очередь принимает задачи любой продолжительности), выводится метка N/A.
    Считаются
    Число задач, считающихся в очереди. Показано общее число задач, причём число процессоров, занятых под Грид-задачи, указано в скобках, например: (Грид: 12). Внимание! При наличии параллельных многопроцессорных задач, число в скобках может превышать общее число задач.
    Ожидают
    Число заданий, ожидающих исполнения в очереди. Показано общее число задач, причём количество заданий, засланных через Грид, указано в скобках, например: (Грид: 235).
    ", "Очередь" => 0, "Mapping Queue" => 0, "Состояние" => 0, "Длительность (мин)" => 0, "ЦП" => 0, "Считаются" => 0, "Ожидают" => 0 ), "jobstat" => array( "0" => "Задачи на:Ярлык задачи", "help" => "
    СПИСОК ЗАДАЧ:
    Имя задачи
    ".$clickable.". Имя задачи, присвоенное хозяином. N/A означает, что хозяин не присвоил никакого имени. По щелчку выводится подробное описание задачи.
    Хозяин
    ".$clickable.". ".$str_nam."
    Состояние
    ".$str_sta."
    Время (мин)
    ".$str_tim."
    Очередь
    ".$clickable.". Название очереди СУПО, в которой происходит исполнение задачи.".$str_que."
    ЦП
    ".$str_cpu."
    ОПИСАНИЕ ЗАДАЧИ:
    Атрибут
    ".$clickable.". Названия атрибутов задачи.".$str_att."
    Значение
    ".$str_val."
    ", "Имя задачи" => 0, "Хозяин" => 0, "Состояние" => 0, "Время (мин)" => 0, "Очередь" => 0, "ЦП" => 0 ), "volist" => array( "0" => "Виртуальные организации", "help" => "
    Виртуальные организации
    ".$clickable.". Группа пользователей – обычно объединяемых совместной целью или ресурсами, – допущенная к работе по крайней мере на одном из ресурсов ARC. По щелчку выводится список членов группы.
    Члены
    Количество членов группы.
    Обслуживается
    Адрес сервера, поддерживающего базу данных членов группы.
    ", "Виртуальная оргаизация" => 0, "Члены" => 0, "Обслуживается" => 0 ), "vousers" => array( "0" => "Пользователи", "help" => "
    Имя
    ".$clickable.". ".$str_nam."
    Место работы
    Место работы пользователя, в соответствии с записью в базе данных. Необязательно.
    Электронная почта
    ".$clickable.". Адрес электронной почты пользователя, в соответствии с записью в базе данных. Необязательно. По щелчку создается сообщение для пользователя.
    ", "#" => 0, "Имя" => 0, "Место работы" => 0, "Электронная почта" => 0 ), "userlist" => array( "0" => "Информация для", "help" => "
    Ресурс:очередь
    ".$clickable.". Названия ресурсов (кластеров) и соответствующих очередей СУПО (разделённые двоеточием), доступных данному пользователю. Если доступ закрыт, выводится сообщение "Нет доступа к ресурсу". По щелчку на названии кластера выводится полное описание ресурса (кластера). По щелчку на названии очереди выводится полное описание очереди.
    Свободные ЦП.
    Число свободных центральных процессоров, доступных в данной очереди для данного пользователя на данный момент времени. Если очередь имеет ограничения по времени на исполнение задач, этот предел указан после числа процессоров (в минутах, разделяется двоеточием). Например, "3" означает, что 3 процессора доступно для сколь угодно продолжительных задач; "4:360" означает, что 4 процессора доступно для задач, не превышающих 6 часов; "10:180 30" означает, что 10 процессоров доступно для задач, не превышающих 3 часов, и 30 процессоров доступно для сколь угодно продолжительных задач; "0" означает, что свободных ресурсов нет, и задачи будут направлены на ожидание в очереди.
    Задачи в очереди
    Количество задач пользователя, расположенных в списке ожидания перед новой задачей, засланной от имени данного пользователя. Число "0" означает, что задача предположительно будет запущена на счёт немедленно. Внимание! Это лишь предположительные значения, которые могут быть изменены локальными операторами.
    Диск, доступно (Мб)
    Пространство на локальном жёстком диске, доступное данному пользователю в данной очереди (в мегабайтах). Внимание! Это лишь предположительные значения, т.к. большинство кластеров не поддерживают дисковые квоты.
    Имя задачи
    ".$clickable.". ".$str_job."
    Состояние
    ".$str_sta."
    Время (мин)
    ".$str_tim."
    Ресурс
    ".$clickable.". Имя ресурса (обычно, кластера), на котором происходит исполнение. задачи. По щелчку выводится полное описание ресурса (кластера).
    Очередь
    ".$clickable.". Название очереди СУПО, в которой происходит исполнение задачи. ".$str_que."
    ЦП
    ".$str_cpu."
    ", "" => 0, "Имя задачи" => 0, "Состояние" => 0, "Время (мин)" => 0, "Ресурс" => 0, "Очередь" => 0, "ЦП" => 0 ), "attlist" => array( "0" => "Значения атрибутов", "help" => "
    Объект
    ".$clickable." Название объекта, атрибуты которого перечислены в строке. Это может быть имя кластера, имя очереди, имя задачи, имя пользователя и.т.д.. По щелчку выводится подробное описание объекта.
    Атрибут
    Для каждого объекта в таблице приведены значения одного или нескольких его атрибутов. В заголовке столбца указано название атрибута, интерпретированное для простоты чтения (за исключением нескольких атрибутов, специфичных для системы MDS), а содержимым каждого столбца являются значения соответствующих атрибутов, записанные в Информационной Системе.
    ", "Объект" => 0, "Атрибут" => 0 ), "quelist" => array( "0" => "Очередь", "help" => "
    Атрибут
    ".$clickable.". Названия атрибутов очереди".$str_att."
    Значение
    ".$str_val."
    Имя задачи
    ".$clickable.". ".$str_job."
    Хозяин
    ".$clickable.". ".$str_nam."
    Состояние
    ".$str_sta."
    Время (мин)
    ".$str_tim."
    ОЗУ (Кб)
    ".$str_mem."
    ЦП
    ".$str_cpu."
    ", "" => 0, "Имя задачи" => 0, "Хозяин" => 0, "Состояние" => 0, "Время (мин)" => 0, "ОЗУ (Кб)" => 0, "ЦП" => 0 ), "sestat" => array( "0" => "Накопительные устройства", "help" => "
    Название
    Название накопительного устройства, зарегистрированное в Информационной Системе. Максимально допустимая длина: 15 символов.
    Весь объём
    Полный объём диска, Гб.
    Свободно
    Доступное пространство на диске в настоящий момент, Гб.
    Имя
    Имя накопительного устройства, состоящее из логического имени и имени сервера (разделённое двоеточием). Логическое имя используется только Информационной Системой, для простоты распознавания разных накопительных устройств, находящихся на одном и том же сервере.
    URL базы
    URL накопительного устройства, обычно по протоколу gsiftp://. Используйте этот адрес как базовый для доступа к файлам.
    Тип
    Тип накопительного устройства. Тип "gridftp-based" означает что это дисковый накопитель с интерфейсом GridFTP.
    ", "#" => 0, "Название" => 0, // "Весь объём" => 0, "Свободно/весь объём, Гб"=> 0, "Имя" => 0, "URL базы" => 0, "Тип" => 0 ), "allusers" => array( "0" => "Допущенные пользователи:Активные пользователи", "help" => "
    Имя
    ".$clickable.". ".$str_nam."
    Место работы
    Место работы пользователя, в соответствии с записью в его сертификате.
    Задачи
    Число всех задач пользователя, находящихся в системе (в счёте, в очереди и закончившихся).
    Ресурсы
    Число кластеров, на которых данный пользователь имеет допуск.
    ", "#" => 0, "Имя" => 0, "Место работы" => 0, "Задачи" => 0, "Ресурсы" => 0 ), "userres" => array( "0" => "", "Ресурс:очередь" => 0, "Свободные ЦП" => 0, "Задачи в очереди" => 0, "Диск, доступно (Мб)" => 0 ), "ldapdump" => array( "0" => "", "Атрибут" => 0, "Значение" => 0 ), "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Данные действительны с (GMT)", "Mds-validto" => "Данные действительны по (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Имя головной машины", "nordugrid-cluster-aliasname" => "Название", "nordugrid-cluster-contactstring" => "Контактный адрес", "nordugrid-cluster-interactive-contactstring" => "Интерактивный адрес", "nordugrid-cluster-comment" => "Комментарий", "nordugrid-cluster-support" => "Адрес ответственного", "nordugrid-cluster-acl" => "Допущенные ВО", "nordugrid-cluster-lrms-type" => "СУПО, тип", "nordugrid-cluster-lrms-version" => "СУПО, версия", "nordugrid-cluster-lrms-config" => "СУПО, подробности", "nordugrid-cluster-architecture" => "Архитектура", "nordugrid-cluster-opsys" => "Операционная система", "nordugrid-cluster-homogeneity" => "Однородность ресурса", "nordugrid-cluster-nodecpu" => "Процессор, тип (худший)", "nordugrid-cluster-nodememory" => "ОЗУ (Мб, наименьшее)", "nordugrid-cluster-totalcpus" => "Процессоры, всего", "nordugrid-cluster-cpudistribution" => "Процессоры:узлы", "nordugrid-cluster-benchmark" => "Эталонный тест", "nordugrid-cluster-sessiondir-free" => "Диск, доступно (Мб)", "nordugrid-cluster-sessiondir-total" => "Диск, весь объём (Мб)", "nordugrid-cluster-sessiondir-lifetime"=> "Время жизни Грид-сессии (мин)", "nordugrid-cluster-cache-free" => "Дисковый кэш, свободно (Мб)", "nordugrid-cluster-cache-total" => "Дисковый кэш, всего (Мб)", "nordugrid-cluster-runtimeenvironment" => "Рабочая среда", "nordugrid-cluster-localse" => "Локальный накопитель", "nordugrid-cluster-middleware" => "Грид-ПО", "nordugrid-cluster-totaljobs" => "Задачи, всего", "nordugrid-cluster-usedcpus" => "Процессоры, занятые", "nordugrid-cluster-queuedjobs" => "Задачи в очереди (УСТАРЕВШИЙ)", "nordugrid-cluster-prelrmsqueued" => "Грид-задачи, ждущие засылки", "nordugrid-cluster-location" => "Почтовый индекс", "nordugrid-cluster-owner" => "Владелец", "nordugrid-cluster-issuerca" => "Центр сертификации", "nordugrid-cluster-issuerca-hash" => "Хеш-код центра сертификации", "nordugrid-cluster-trustedca" => "Доверяемые центры сертификации", "nordugrid-cluster-nodeaccess" => "IP-соединение узлов", "nordugrid-cluster-gridarea" => "Адрес сессий (УСТАРЕВШИЙ)", "nordugrid-cluster-gridspace" => "Грид-диск (УСТАРЕВШИЙ)", "nordugrid-cluster-opsysdistribution" => "Дистрибутив ОС (УСТАРЕВШИЙ)", "nordugrid-cluster-runningjobs" => "Задачи в счёте (УСТАРЕВШИЙ)", "nordugrid-cluster-credentialexpirationtime" => "Срок действия сертификата", "nordugrid-queue-name" => "Имя очереди", "nordugrid-queue-comment" => "Комментарий", "nordugrid-queue-status" => "Состояние очереди", "nordugrid-queue-running" => "Все занятые процессоры", "nordugrid-queue-localqueued" => "Локальные задачи в очереди", "nordugrid-queue-prelrmsqueued" => "Грид-задачи, ждущие засылки", "nordugrid-queue-queued" => "Задачи в очереди (УСТАРЕВШИЙ)", "nordugrid-queue-maxrunning" => "Задачи в счёте (предел)", "nordugrid-queue-maxqueuable" => "Задачи в очереди (предел)", "nordugrid-queue-maxuserrun" => "Задачи на пользователя (предел)", "nordugrid-queue-maxcputime" => "Время процессора, наибольшее (мин)", "nordugrid-queue-mincputime" => "Время процессора, наименьшее (мин)", "nordugrid-queue-defaultcputime" => "Время процессора, по умолчанию (мин)", "nordugrid-queue-maxwalltime" => "Продолжительность, наибольшая (мин)", "nordugrid-queue-minwalltime" => "Продолжительность, наименьшая (мин)", "nordugrid-queue-defaultwalltime" => "Продолжительность, по умолчанию (мин)", "nordugrid-queue-schedulingpolicy" => "Правила планировки", "nordugrid-queue-totalcpus" => "Процессоры, всего", "nordugrid-queue-nodecpu" => "Процессор, тип", "nordugrid-queue-nodememory" => "ОЗУ (Мб)", "nordugrid-queue-architecture" => "Архитектура", "nordugrid-queue-opsys" => "Операционная система", "nordugrid-queue-homogeneity" => "Однородность очереди", "nordugrid-queue-gridrunning" => "Процессоры под грид-задачами", "nordugrid-queue-gridqueued" => "Грид-задачи в очереди", "nordugrid-queue-benchmark" => "Эталонный тест", "nordugrid-queue-assignedcpunumber" => "Процессоры (УСТАРЕВШИЙ)", "nordugrid-queue-assignedcputype" => "Тип процессора (УСТАРЕВШИЙ)", "nordugrid-job-globalid" => "Ярлык", "nordugrid-job-globalowner" => "Хозяин", "nordugrid-job-execcluster" => "Выполняющий кластер", "nordugrid-job-execqueue" => "Выполняющая очередь", "nordugrid-job-stdout" => "Стандартный выход", "nordugrid-job-stderr" => "Стандартная ошибка", "nordugrid-job-stdin" => "Стандартный вход", "nordugrid-job-reqcputime" => "Запрошенное процессорное время", "nordugrid-job-reqwalltime" => "Запрошенное время", "nordugrid-job-status" => "Состояние", "nordugrid-job-queuerank" => "Положение в очереди", "nordugrid-job-comment" => "Комментарий СУПО", "nordugrid-job-submissionui" => "Засылающий клиент", "nordugrid-job-submissiontime" => "Время засылки (GMT)", "nordugrid-job-usedcputime" => "Использованное процессорное время", "nordugrid-job-usedwalltime" => "Использованное время", "nordugrid-job-completiontime" => "Время окончания (GMT)", "nordugrid-job-sessiondirerasetime" => "Срок уничтожения (GMT)", "nordugrid-job-proxyexpirationtime" => "Окончание доверенности (GMT)", "nordugrid-job-usedmem" => "Использование ОЗУ (Кб)", "nordugrid-job-errors" => "Ошибки", "nordugrid-job-exitcode" => "Код возврата", "nordugrid-job-jobname" => "Имя", "nordugrid-job-runtimeenvironment" => "Рабочая среда", "nordugrid-job-cpucount" => "Запрошено процессоров", "nordugrid-job-executionnodes" => "Выполняющие узлы", "nordugrid-job-gmlog" => "Журнальная запись ГМ", "nordugrid-job-clientsoftware" => "Версия клиента", "nordugrid-job-rerunable" => "Перезапускаемость", "nordugrid-job-reqcput" => "Запрошенное время (УСТАРЕВШИЙ)", "nordugrid-job-gridlog" => "Грид-запись (УСТАРЕВШИЙ)", "nordugrid-job-lrmscomment" => "Комментарий СУПО (УСТАРЕВШИЙ)", "nordugrid-authuser-name" => "Имя", "nordugrid-authuser-sn" => "Субъект", "nordugrid-authuser-freecpus" => "Свободные ЦП", "nordugrid-authuser-diskspace" => "Диск, доступно (Мб)", "nordugrid-authuser-queuelength" => "Задачи пользователя в очереди", "nordugrid-se-name" => "Условное имя", "nordugrid-se-aliasname" => "Название", "nordugrid-se-type" => "Тип", "nordugrid-se-acl" => "Допущенные ВО", "nordugrid-se-freespace" => "Свободный объём (Мб)", "nordugrid-se-totalspace" => "Весь объём (Мб)", "nordugrid-se-url" => "Контактный адрес", "nordugrid-se-baseurl" => "Контактный адрес (УСТАРЕВШИЙ)", "nordugrid-se-accesscontrol" => "Контроль доступа", "nordugrid-se-authuser" => "Допущенные пользователи (DN)", "nordugrid-se-location" => "Почтовый индекс", "nordugrid-se-owner" => "Владелец", "nordugrid-se-middleware" => "Грид-ПО", "nordugrid-se-issuerca" => "Центр сертификации", "nordugrid-se-issuerca-hash" => "Хеш-код центра сертификации", "nordugrid-se-trustedca" => "Доверяемые центры сертификации", "nordugrid-se-comment" => "Комментарий", "nordugrid-rc-name" => "Доменное имя", "nordugrid-rc-aliasname" => "Название", "nordugrid-rc-baseurl" => "Контактный адрес", "nordugrid-rc-authuser" => "Допущенные пользователи (DN)", "nordugrid-rc-location" => "Почтовый индекс", "nordugrid-rc-owner" => "Владелец", "nordugrid-rc-issuerca" => "Сертификат выдан" ), "errors" => array( "1" => "Невозможно прочесть списки высшего уровня", "2" => "Ни один из местных списков не отзывается", "3" => " неверная конфигурация или истекло время запроса", "4" => "Нет Грид-задач", "5" => "Нет информации", "6" => "Служба недоступна", "7" => " - попробуйте обновить позже", "8" => "Нет информации об очереди", "9" => "Нет данных", "10" => "Нет пользователей", "11" => "Нет доступа к ресурсу", "12" => "не отзывается", "13" => "На настоящий момент нет задач пользователя ", "101" => " Время на связь с локальным списком: ", "102" => " с на соединение и ", "103" => " с на поиск", "104" => " с затрачено на поиск", "105" => "Перечисление ресурсов: ", "106" => "Опрошено списков верхнего уровня: ", "107" => "Получены географические координаты, просканировано ресурсов: ", "108" => " ресурсов упорядочено по геополитическому признаку", "109" => "Поиск атрибутов кластера", "110" => "Поиск атрибутов очереди", "111" => "Нет данных с ", "112" => " функционирует в стране: ", "113" => " не располагает ресурсами", "114" => " Время на связь с глобальным списком: ", "115" => "Игнорируется ресурс: ", "116" => "не соответствует типу ", "117" => "Проверка связи: ", "118" => "есть", "119" => "На данный момент обнаружено ресурсов типа ", "120" => "Ошибка LDAP при поиске на ", "121" => "-состояние на ", "122" => "Заблокирован: ", "123" => "Обнаружен регистрант ", "124" => "Поиск атрибутов накопителей", "125" => "Поиск пользователей", "126" => "Поиск задач", "127" => " запустил(а) задачу ", "128" => " не будучи допущенным(ой)", "129" => "Нет информации об объекте: ошибка ", "130" => " Время на связь с глобальным списком: ", "301" => "Перезагрузить", "302" => "Печать", "303" => "Помощь", "304" => "Закрыть", "305" => "Красный", "306" => "Серый", "307" => "Все пользователи", "308" => "Активные пользователи", "309" => "Поиск", "310" => "Накопители", "311" => "Виртуальные организации", "312" => "Флаг страны: ", "313" => " Грид-процессов и ", "314" => " местных процессов", "401" => "Процессы", "402" => "Грид", "403" => "местные", "404" => "Мир", "405" => "ВСЕГО", "406" => " объектов", "407" => "куча", "408" => " Гб", "409" => " ВСЕ", "410" => "Кластер", "411" => "Очередь", "412" => "Задача", "413" => "Пользователь", "414" => "Накопитель", "415" => "Каталог реплик", "416" => "Задайте атрибуты для просмотра; выбранный объект: ", "417" => "Поиск проводится для логического И всех выражений", "418" => "Не заполняйте правое поле, если фильтр не нужен", "419" => "Просмотр ресурсов или объектов по выбору", "420" => "Выделенное имя", "421" => "Может использовать ", "422" => " кластеров", "423" => "Ресурс / объект:", "424" => "Кол.-во атрибутов (6 по ум.):", "425" => "Объект", "426" => "Дальше", "427" => "Выберите", "428" => "Очистить", "429" => "ПОКАЗАТЬ" ), // Country name conversion, no postcode! "tlconvert" => array ( "Australia" => "Австралия", "Austria" => "Австрия", "Armenia" => "Армения", "Algeria" => "Алжир", "Belgium" => "Бельгия", "Bulgaria" => "Болгария", "Canada" => "Канада", "China" => "Китай", "Czechia" => "Чехия", "Denmark" => "Дания", "Estonia" => "Эстония", "Finland" => "Финляндия", "France" => "Франция", "Georgia" => "Грузия", "Germany" => "Германия", "Greece" => "Греция", "Hungary" => "Венгрия", "Iceland" => "Исландия", "Ireland" => "Ирландия", "Italy" => "Италия", "Japan" => "Япония", "Latvia" => "Латвия", "Lithuania" => "Литва", "Morocco" => "Марокко", "Netherlands" => "Нидерланды", "Norway" => "Норвегия", "Poland" => "Польша", "Portugal" => "Португалия", "Romania" => "Румыния", "Russia" => "Россия", "SriLanka" => "Шри-Ланка", "Sweden" => "Швеция", "Slovakia" => "Словакия", "Slovenia" => "Словения", "Switzerland" => "Швейцария", "Turkey" => "Турция", "UK" => "Великобритания", "Ukraine" => "Украина", "USA" => "США", "World" => "Мир" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734025204 xustar000000000000000030 mtime=1513200604.893073507 30 atime=1513200651.612644905 30 ctime=1513200663.739793225 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/Makefile.in0000644000175000002070000004362413214315734025263 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ldap-monitor/lang DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(monitorlangdir)" DATA = $(monitorlang_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ monitorlangdir = @ldap_monitor_prefix@/lang monitorlang_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorlang_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ldap-monitor/lang/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ldap-monitor/lang/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitorlangDATA: $(monitorlang_DATA) @$(NORMAL_INSTALL) test -z "$(monitorlangdir)" || $(MKDIR_P) "$(DESTDIR)$(monitorlangdir)" @list='$(monitorlang_DATA)'; test -n "$(monitorlangdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitorlangdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitorlangdir)" || exit $$?; \ done uninstall-monitorlangDATA: @$(NORMAL_UNINSTALL) @list='$(monitorlang_DATA)'; test -n "$(monitorlangdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitorlangdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitorlangdir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitorlangdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitorlangDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitorlangDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-monitorlangDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am uninstall uninstall-am uninstall-monitorlangDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/no.inc0000644000000000000000000000012412050701227024240 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.708716 30 ctime=1513200663.744793286 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/no.inc0000644000175000002070000013711012050701227024310 0ustar00mockbuildmock00000000000000 N/A indikerer at eier ikke har tildelt jobben et navn.
    X indikerer at eier har avbrutt jobben.
    ! indikerer at jobben ikke ble fullført.
    Klikk på et navn for å få en detaljert beskrivelse av jobben."; $str_nam = "Brukernavn som spesifisert i det personlige sertifikatet. Klikk på et navn for resurser tilgjengelige for denne brukeren og dennes jobber i systemet."; $str_sta = "Jobbstatus som returnert av gridmanageren (GM) og lokalt resursmanagementsystem LRMS. Kronologisk er tilstandene :
    ACCEPTED – jobben er sendt, men ikke behandlet.
    PREPARING – inputfiler hentes.
    SUBMITTING – forhandlinger med LRMS pågår
    INLRMS – jobben er overført til LRMS. Informasjonssystemet lagrer lokal status. Mulige tilstander er :
    : Q – jobben er i køen
    : U – jobben er satt på vent på en opptatt maskin (PBSPro)
    : S – jobben er satt på vent (Condor)
    : R, run – jobben kjøres.
    : E – jobben avsluttes (PBS)
    FINISHING – outputfiler overføres av GM.
    FINISHED – jobben err avsluttet; tidsstempel legges til av informasjonssystemet.
    CANCELING – jobben avbrytes.
    DELETED – jobben er ikke ryddet opp av eier, men slettet av GM på grunn av overgått lagringstid.
    Alla disse tilstandene kan meldes med prefikset PENDING: som betyr at GM prøver å flytte jobben over i neste tilstand."; $str_tim = "CPU-tid i minutter brukt av jobben."; $str_mem = "Minne i KB brukt av jobben."; $str_cpu = "Antall prosessorer brukt av jobben."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Gridmonitor", "help" => "
    Denne siden viser alle klynger (sites) som har registrert seg i indekstjenesten til ARC, sortert etter land og deretter maskinnavn. Følgende klyngeparametere listes : klyngealias, total CPU-kapasitet og antall kjørende og ventende jobber, både lokale jobber og gridjobber. Bruk søkefunksjonen hvis annen informasjon om klynger, køer, jobber eller lignende er ønsket.
    Land
    ".$clickable.". Flagg og navn på land hentet fra tilgjengellige resursbeskrivelser. Klikk for å få opp infomasjon om et land.
    Klynger
    ".$clickable.". Klyngealias tildelt av eier. Maksimalt vises 22 tegn. Klikk på aliaset for en detaljert klyngebeskrivelse.
    CPU-er
    Totalt antall CPU-er i en klynge. OBS! Muligens er bare noen av disse tilgjengelige for gridbrukere.
    Belastning (prosesser: grid + lokalt)
    ".$clickable.". Relativ klyngebelastning som tilsvarer antall opptatte CPU-er. Grå felt viser antall prosessorer som kjører lokale jobbber, røde felt viser antall CPU-er som kjører gridjobber. Klikk på feltet for en detaljert liste over alle gridjobber som kjøres på klyngen, inklusive antall prosessorer per jobb.
    Ventende
    ".$clickable.". Totalt antall jobber som venter på klyngen, vises som antall ventende gridjobber pluss antall ventende lokale jobber. Klikk på det første sifferet for å liste ventende gridjobber på klyngen.
    ", "Land" => 30, "Klynge" => 160, "CPU-er" => 10, "Belastning (prosesser: grid + lokalt)" => 210, "Ventende" => 10 ), "clusdes" => array("0" => "Resursinformasjon for", "help" => "
    Attributt
    ".$clickable.". Klyngeattributtnavn".$str_att."
    Verdi
    ".$str_val."
    ".$clickable.". Klyngeeiers navn på batchkøene som er tilgjengelige for ARC brukere.".$str_que."
    Status
    Køstatus. Fungerende køer viser normalt status active.
    Tidsgrenser (min)
    Tidsgrense for jobblengde per kø, hvis definert, i CPU-minutter. Den første verdien er den nedre grensen, den andre den øvre. Hvis ingen grenser er definert, dvs. alle jobber er tillatt, vises N/A
    Kjøres
    Antall kjørende jobber. Totalt antall jobber vises med antall prosessorer med gridjobber i parentes, f.eks. (Grid: 12). OBS! For parallelle multiprosessorjobber kan nummeret i parentes være større enn antall jobber.
    Køer
    Antall jobber i køen. Totalt antall jobber vises med gridjobber i parentes, f.eks. (Grid: 235)
    ", "Kø" => 0, "Mapping Queue" => 0, "Status" => 0, "Tidsgrenser (min)" => 0, "CPU-er" => 0, "Kjøres" => 0, "Køer" => 0 ), "jobstat" => array("0" => "Jobber på:Jobb-ID", "help" => "
    JOBBLISTE:
    Jobbnavn
    ".$clickable.". ".$str_job."
    Eier
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    ".$clickable.". Navn på batchkø hvor jobben kjøres. ".$str_que."
    CPU-er
    ".$str_cpu."
    JOBBINFORMASJON:
    Attributt
    ".$clickable.". Jobbattributtnavn".$str_att."
    Verdi
    ".$str_val."
    ", "Jobbnavn" => 0, "Eier" => 0, "Status" => 0, "CPU (min)" => 0, "Kø" => 0, "CPU-er" => 0 ), "volist" => array("0" => "Virtuelle organisasjoner", "help" => "
    Virtuell organisasjon VO
    ".$clickable.". En gruppe brukere som ofte arbeider med det samme og bruker de samme resursene. En VO er autorisert på minst en ARC klynge. Klikk på navnet for å få en liste over medlemmene.
    Medlemmer
    Antall medlemmer.
    Tjener
    LDAP-tjener som huser databasen med medlemskapene.
    ", "Virtuell organisasjon" => 0, "Medlemmer" => 0, "Tjener" => 0 ), "vousers" => array("0" => "Gridbrukerbase", "help" => "
    Navn
    ".$clickable.". ".$str_nam."
    Tilknytning
    Brukerens hjemmeinstitutt registrert av en VO manager. Kan være tomt.
    E-post
    ".$clickable.". Brukerens e-post registrert av en VO-manager. Kan være tomt. Klikk på adressen for å sende en e-post til brukeren.
    ", "#" => 0, "Navn" => 0, "Tilknytning" => 0, "E-post" => 0 ), "userlist" => array("0" => "Informasjon om", "help" => "
    Klynge:kø
    ".$clickable.". Navn på klynge og dens respektive køer (separert med et kolon, ":") som brukerern er autorisert til å sende jobber til. Hvis brukeren ikke er autorisert vises meldingen "Not authorised at host ...". Klikk på klyngenavn for å få detaljert klyngebeskrivelse. Klikk på kønavn for å få detaljert købeskrivelse.
    Ledige CPU-er
    Antall ledige CPU-er i køen for denne brukeren i øyeblikket, iblant med en øvre tidsgrense i minutter. F.eks. "3" betyr tre CPU-er tilgjengelige for en jobb med ubegrenset kjøringstid; "4:360" indikerer at det finnes fire CPU-er tilgjengelige for jobber kortere enn seks timer; "10:180 30" betyr at det finnes ti CPU-er tilgjengelige for jobber som ikke overgår tre timer, pluss 30 CPU-er tilgjengelige for jobber av valgfri lengde; "0" betyr at det ikke finnes noen CPU-er tilgjenglige for øyeblikket og at jobben kommer til å bli satt i kø.
    Ventende jobber
    Antall brukerens forventede jobber foran i køen for denne brukeren. "0" betyr at jobben forventes å kjøres umiddelbart. OBS! Dette er kun et estimat som kan overkjøres av lokale regler.
    Ledig disk (MB)
    Diskplass tilgjengelig for brukeren i en gitt kø (i megabyte). OBS! Dette er kun et estimat da de fleste klynger ikke tilbyr faste diskkvoter.
    Jobbnavn
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Klynge
    ".$clickable.". Navn på klynge hvor jobben kjøres / ble kjørt. Klikk på klyngenavn for detaljert informasjon om klyngen.
    ".$clickable.". Navn på batchkøen hvor jobben kjøres / ble kjørt. ".$str_que."
    CPU-er
    ".$str_cpu."
    ", "" => 0, "Jobbnavn" => 0, "Status" => 0, "CPU (min)" => 0, "Klynger" => 0, "Kø" => 0, "CPU-er" => 0 ), "attlist" => array("0" => "Attributtverdi", "help" => "
    Objekt
    ".$clickable.". Namn på det objekt vars attribut visas. Det kan vara ett klusternamn, ett klusters könamn, ett jobbnamn, ett användarnamn etc. Klicka på namnet för att få en detaljerad beskrivning av objektet.
    Attributt
    För varje objekt, ett eller flera attributtvärden kan listas. Kolumntiteln är det human-readable attributtnamnet (förutom för några MDS-specifika attributt), och Kolumnens innehåll är attributtvärden per objekt inmatade i informationssystemet.
    ", "Objekt" => 0, "Attributt" => 0 ), "quelist" => array("0" => "Kø", "help" => "
    Attributt
    ".$clickable.". Køattributtnavn".$str_att."
    Verdi
    ".$str_val."
    Jobbnavn
    ".$clickable.". ".$str_job."
    Eier
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Minne (KB)
    ".$str_mem."
    CPU-er
    ".$str_cpu."
    ", "" => 0, "Jobbnavn" => 0, "Eier" => 0, "Status" => 0, "CPU (min)" => 0, "Minne (KB)" => 0, "CPU-er" => 0 ), "sestat" => array("0" => "Lagringselementer", "help" => "
    Alias
    Lagringselementets alias som angitt i informasjonssystemet. Det vises maksimalt 15 tegn.
    Total plass
    Total diskplass (GB).
    Ledig plass
    Diskplass tilgjengelig for øyeblikket (GB).
    Navn
    Lagringselementets navn. Både logisk navn og maskinnavn (separert med et kolon, ":") angis. Det logiske navnet brukes av informasjonssystemet for å skille mellom ulike lagringselementer på samme maskin.
    Base-URL
    Lagringselementets URL, oftest en gsiftp:// protokoll. Bruk URL som basis for tilgang til filer.
    Type
    Lagringselementets type. "gridftp-based" indikerer disklagring med gridftp-grensesnitt.
    ", "#" => 0, "Alias" => 0, // "Total plass" => 0, "Ledig/total plass" => 0, "Navn" => 0, "Base-URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Autoriserte gridbrukere:Aktive gridbrukere", "help" => "
    Navn
    ".$clickable.". ".$str_nam."
    Tilknytning
    Brukerens tilknytning som spesifisert i det personlige sertifikatet.
    Jobber
    Totalt antall jobber som denne brukeren har i systemet (kjørende, ventende, ferdige eller slettede).
    Klynger
    Viser antall klynger som denne brukeren er autorisert på.
    ", "#" => 0, "Navn" => 0, "Tilknytning" => 0, "Jobber" => 0, "Klynger" => 0 ), "userres" => array("0" => "", "Klynge:kø" => 0, "Ledige CPU-er" => 0, "Ventende jobber" => 0, "Ledig disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attributt" => 0, "Verdi" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info gyldig f.o.m. (GMT)", "Mds-validto" => "Info gyldig t.o.m. (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domenenavn", "nordugrid-cluster-aliasname" => "Klyngealias", "nordugrid-cluster-contactstring" => "Kontaktstreng", "nordugrid-cluster-interactive-contactstring" => "Interaktiv kontakt", "nordugrid-cluster-comment" => "Kommentar", "nordugrid-cluster-support" => "E-postkontakt", "nordugrid-cluster-acl" => "Autoriserte VO-er", "nordugrid-cluster-lrms-type" => "LRMS-type", "nordugrid-cluster-lrms-version" => "LRMS-versjon", "nordugrid-cluster-lrms-config" => "LRMS-detaljer", "nordugrid-cluster-architecture" => "Arkitektur", "nordugrid-cluster-opsys" => "Operativsystem", "nordugrid-cluster-homogeneity" => "Homogen klynge", "nordugrid-cluster-nodecpu" => "CPU-type (langsomste)", "nordugrid-cluster-nodememory" => "Minne (MB, minste)", "nordugrid-cluster-totalcpus" => "CPU-er, totalt", "nordugrid-cluster-cpudistribution" => "CPU:maskiner", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Diskplass tilgjengelig (MB)", "nordugrid-cluster-sessiondir-total" => "Diskplass totalt (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Gridsesjonens levetid (min)", "nordugrid-cluster-cache-free" => "Cachestørrelse tilgjengelig (MB)", "nordugrid-cluster-cache-total" => "Cachestørrelse totalt (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtimemiljø", "nordugrid-cluster-localse" => "Lagringselement, lokalt", "nordugrid-cluster-middleware" => "Grid-middleware", "nordugrid-cluster-totaljobs" => "Jobber, totalt antall", "nordugrid-cluster-usedcpus" => "CPU-er, opptatte", "nordugrid-cluster-queuedjobs" => "Jobber, ventende", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postnummer", "nordugrid-cluster-owner" => "Eier", "nordugrid-cluster-issuerca" => "Sertifikatutstedere", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node-IP-Oppkobling", "nordugrid-cluster-gridarea" => "Sesjonsområde (Utgått)", "nordugrid-cluster-gridspace" => "Griddiskplass (Utgått)", "nordugrid-cluster-opsysdistribution" => "OS-distribusjon (Utgått)", "nordugrid-cluster-runningjobs" => "Kjørende jobber (Utgått)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Kønavn", "nordugrid-queue-comment" => "Kommentar", "nordugrid-queue-status" => "Køstatus", "nordugrid-queue-running" => "CPU-er, opptatte", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Jobber, ventende (Utgått)", "nordugrid-queue-maxrunning" => "Jobber, kjørende (max)", "nordugrid-queue-maxqueuable" => "Jobber, ventende (max)", "nordugrid-queue-maxuserrun" => "Jobber per unixbruker (max)", "nordugrid-queue-maxcputime" => "CPU-tid, max. (min)", "nordugrid-queue-mincputime" => "CPU-tid, min. (min)", "nordugrid-queue-defaultcputime" => "CPU-tid, spesifisert (min)", "nordugrid-queue-maxwalltime" => "Klokketid, max. (min)", "nordugrid-queue-minwalltime" => "Klokketid, min. (min)", "nordugrid-queue-defaultwalltime" => "Klokketid, spesifisert (min)", "nordugrid-queue-schedulingpolicy" => "Scheduleringspolicy", "nordugrid-queue-totalcpus" => "CPU-er, totalt", "nordugrid-queue-nodecpu" => "CPU-type", "nordugrid-queue-nodememory" => "Minne (MB)", "nordugrid-queue-architecture" => "Arkitektur", "nordugrid-queue-opsys" => "Operativsystem", "nordugrid-queue-homogeneity" => "Homogen kø", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Gridjobber, ventende", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPU-er per kø (FÖRLEGAD)", "nordugrid-queue-assignedcputype" => "CPU-type (Utgått)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Eier", "nordugrid-job-execcluster" => "Eksekveringsklynge", "nordugrid-job-execqueue" => "Eksekveringskø", "nordugrid-job-stdout" => "Standard outputfil", "nordugrid-job-stderr" => "Standard errorfil", "nordugrid-job-stdin" => "Standard inputfil", "nordugrid-job-reqcputime" => "Forlangt CPU-tid", "nordugrid-job-reqwalltime" => "Forlangt klokketid", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Plass i køen", "nordugrid-job-comment" => "LRMS-kommentar", "nordugrid-job-submissionui" => "Innsendingsmaskin", "nordugrid-job-submissiontime" => "Innsendingstid (GMT)", "nordugrid-job-usedcputime" => "Brukt CPU-tid", "nordugrid-job-usedwalltime" => "Brukt klokketid", "nordugrid-job-completiontime" => "Avslutningstid (GMT)", "nordugrid-job-sessiondirerasetime" => "Slettetid (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy forfallstid (GMT)", "nordugrid-job-usedmem" => "Brukt minne (KB)", "nordugrid-job-errors" => "Feil", "nordugrid-job-exitcode" => "Returkode", "nordugrid-job-jobname" => "Navn", "nordugrid-job-runtimeenvironment" => "Runtimemiljø", "nordugrid-job-cpucount" => "Forlangte CPU-er", "nordugrid-job-executionnodes" => "Ekseekveringsnoder", "nordugrid-job-gmlog" => "GM loggfil", "nordugrid-job-clientsoftware" => "Klientversjon", "nordugrid-job-rerunable" => "Omkjørbar", "nordugrid-job-reqcput" => "Forlangt tid (Utgått)", "nordugrid-job-gridlog" => "Gridloggfil (Utgått)", "nordugrid-job-lrmscomment" => "LRMS-kommentar (Utgått)", "nordugrid-authuser-name" => "Navn", "nordugrid-authuser-sn" => "Subjektnavn", "nordugrid-authuser-freecpus" => "Ledige CPU-er", "nordugrid-authuser-diskspace" => "Ledig diskplass (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Navn", "nordugrid-se-aliasname" => "Lagringselementalias", "nordugrid-se-type" => "Lagringselementtype", "nordugrid-se-acl" => "Autoriserte VO-er", "nordugrid-se-freespace" => "Ledig plass (MB)", "nordugrid-se-totalspace" => "Totalt utrymme (MB)", "nordugrid-se-url" => "Kontakt-URL", "nordugrid-se-baseurl" => "Kontakt-URL (Utgått)", "nordugrid-se-accesscontrol" => "Tilgangskontroll", "nordugrid-se-authuser" => "Autorisert bruker", "nordugrid-se-location" => "Postnummer", "nordugrid-se-owner" => "Eier", "nordugrid-se-middleware" => "Grid-middleware", "nordugrid-se-issuerca" => "Sertifikatutsteder", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Kommentar", "nordugrid-rc-name" => "Domenenavn", "nordugrid-rc-aliasname" => "Replikkakatalog-Alias", "nordugrid-rc-baseurl" => "Kontakt-URL", "nordugrid-rc-authuser" => "Autorisert bruker (DN)", "nordugrid-rc-location" => "Postnummer", "nordugrid-rc-owner" => "Eier", "nordugrid-rc-issuerca" => "Sertifikatutsteder" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Kan ikke lese toppnivå indekstjenere", "2" => "Ingen av de lokale indekstjenerne returnerte oppkoblingen", "3" => " dålig konfigurering eller begäran drog över tiden", "4" => "Ingen gridjobber funnet", "5" => "Ingen informasjon funnet", "6" => "Tjener utilgjengelig", "7" => " - reload senere", "8" => "Ingen køinformasjon funnet", "9" => "Ingen poster funnet", "10" => "Ingen brukere funnet", "11" => "Ikke autorisert på ", "12" => "svarer ikke", "13" => "Ingen nye jobber funnet for ", // debug messages "101" => " Monitor timeout for GRIS: ", "102" => " sek for oppkobling og ", "103" => " sek for søk", "104" => " sek brukere for søk", "105" => "Viser resurser også i ", "106" => "Spurte toppnivå indekstjenere: ", "107" => "Fikk geografiske data, skannede klynger: ", "108" => " klynger sortert etter geografiske data", "109" => "Søk etter klyngeattributter", "110" => "Søk etter køattributter", "111" => "Ingen data fra ", "112" => " Er oppe i ", "113" => " har ingen resurser å tilby", "114" => " Monitor timeout for GIIS: ", "115" => "Hopper over GRIS: ", "116" => "ikke en ", "117" => "Verifiserer oppkobling: ", "118" => "OK", "119" => "Hittil, detekterte resurser av slag ", "120" => "LDAP-feil ved søk etter ", "121" => " status ved ", "122" => "Svartelistede: ", "123" => "Registrert funnet for ", "124" => "Søk etter lagringselementattributter", "125" => "Søk etter brukere", "126" => "Søk etter jobb", "127" => " har jobb ", "128" => " uten være autorisert", "129" => "Kan ikke lade objektdata: feil ", "130" => " Monitor timeout for EMIR: ", // icon titles "301" => "Reload", "302" => "Skriv ut", "303" => "Hjelp", "304" => "Lukk", "305" => "Rød", "306" => "Grå", "307" => "Alle brukere", "308" => "Aktive brukere", "309" => "Søk", "310" => "Lagringsenheter", "311" => "VO-er", "312" => "Flagg for ", "313" => " gridprosesser og ", "314" => " lokale prosesser", // auxilliary strings "401" => "Prosesser", "402" => "Grid", "403" => "Lokalt", "404" => "Verden", "405" => "TOTALT", "406" => " klynger", "407" => "en masse", "408" => " GB", "409" => " ALLE", "410" => "Klynge", "411" => "Kø", "412" => "Jobb", "413" => "Bruker", "414" => "Lagringsenhet", "415" => "Replikkakatalog", "416" => "Definer søkeattributter for objekt : ", "417" => "Det søkes logisk OG av alle uttrykkene.", "418" => "La det høyre feltet stå tomt for å vise alt.", "419" => "Vis resurser eller objekt som samsvarer med ditt valg", "420" => "Særskilt navn", "421" => "Kan bruke totalt ", "422" => " klynger", "423" => "Resurs / objekt:", "424" => "Antall attributter (standard er 6):", "425" => "Objekt", "426" => "Neste", "427" => "Velg", "428" => "Gjenopprett", "429" => "VIS" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australia", "Austria" => "Österrike", "Armenia" => "Armenia", "Algeria" => "Algerie", "Belgium" => "Belgia", "Bulgaria" => "Bulgaria", "Canada" => "Canada", "China" => "Kina", "Czechia" => "Tsjekkia", "Denmark" => "Danmark", "Estonia" => "Estland", "Finland" => "Finland", "France" => "Frankrike", "Georgia" => "Georgia", "Germany" => "Tyskland", "Greece" => "Hellas", "Hungary" => "Ungarn", "Iceland" => "Island", "Ireland" => "Irland", "Italy" => "Italia", "Japan" => "Japan", "Latvia" => "Lettland", "Lithuania" => "Litauen", "Morocco" => "Marokko", "Netherlands" => "Nederland", "Norway" => "Norge", "Poland" => "Polen", "Portugal" => "Portugal", "Romania" => "Romania", "Russia" => "Russland", "SriLanka" => "Sri Lanka", "Sweden" => "Sverige", "Slovakia" => "Slovakia", "Slovenia" => "Slovenia", "Switzerland" => "Sveits", "Turkey" => "Tyrkia", "UK" => "Storbritannia", "Ukraine" => "Ukraina", "USA" => "USA", "World" => "Verden" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/da.inc0000644000000000000000000000012412050701227024210 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.689716 30 ctime=1513200663.740793237 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/da.inc0000644000175000002070000014253012050701227024262 0ustar00mockbuildmock00000000000000 N/A viser at brugeren ikke tildelte et navn.
    X viser at jobbet er slået ihjel af ejeren.
    ! viser at jobbet fejlede i systemet.
    Tryk på et navn for at få en detaljeret beskrivelse af jobbet."; $str_nam = "Navn på brugeren som angivet i det personlige certifikat. Tryk på navnet for at få en liste af alle ressourcer tilgængelige for denne bruger og alle brugerens job i systemet."; $str_sta = "Jobstatus som returneret af Gridmanageren (GM) og LRMS. Tilstandene er i sekventiel rækkefølge:
    ACCEPTED – jobbet er overført til systemet men endnu ikke behandlet
    PREPARING – inputfilerne hentes
    SUBMITTING – der udveksles data med LRMS
    INLRMS – jobbet overføres til LRMS; intern status tilføjes af informationssystemet. Mulige tilstande er:
    : Q – jobbet venter i kø
    : U – jobbets udførelse er udskudt på en travl knude (PBSPro)
    : S – jobbets udførelse er udskudt (Condor)
    : R, run – jobbet kører
    : E – jobbet er færdigt (PBS)
    FINISHING – uddatafilerne overføres af GM
    FINISHED – jobbet er færdigt; tidsstemplet tilføjes af informationssystemet
    CANCELING – jobbet afbrydes
    DELETED – jobbet er ikke afryddet på anmodning af brugeren men fjernet af GM fordi udløbstiden er passeret
    Hver tilstand kan rapporteres med et PENDING præfiks som betyder at GM forsøger at rykke jobbet op i næste tilstand"; $str_tim = "CPU-tid brugt at jobbet, minutter."; $str_mem = "Lager (RAM) brugt af jobbet, KB"; $str_cpu = "Antal processorer brugt af jobbet."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    Denne skærmside viser alle steder, der registrerer sig hos den øverste ARC indekseringstjeneste sorteret først efter land så efter maskinnavn. Udvalgte lokale parametre overvåges: klyngealias, kø, job, o.s.v. Brug quot;Search" tjenesten hvis du vil sammenligne klynger, køer, job, osv.
    Land
    ".$clickable.". Landets flag og navn fra tilgængelig resource. Tryk for vise landeinformation.
    Klynge
    ".$clickable.". Klyngealias som tildelt af ejeren. Højst 22 tegn vises. Tryk på aliases for deltaljerede klyngebeskrivelse.
    CPU-er
    Totalt antal CPU-er i en klynge. NB! Kun en del af disse kan bruges af Grid-brugere.
    Belastning (processer:Grid + lokale)
    ".$clickable.". Relativ klyngebelastning, svarende til antallet af optagede CPU-er . Grå felter viser processorer optaget af lokale job, røde felter viser CPU-er optaget af Grid-job. Tryk på feltet for at få en detaljeret liste med alle kørende Grid-job på klyngen , inklusiv antallet af processorer per job.
    job I Kø
    ".$clickable.". Antal job i køen på klyngen, vist som antallet Grid-job plus antal lokale job i køen. Tryk på det første antal for at få en liste af Grid-job i køen på klyngen
    ", "Land" => 30, "Sted" => 160, "CPU-er" => 10, "Belastning (processer: Grid+lokale)" => 210, "I kø" => 10 ), "clusdes" => array("0" => "Ressourcedetaljer for", "help" => "
    Attribut
    ".$clickable.". Klyngeattributnavn".$str_att."
    Værdi
    ".$str_val."
    ".$clickable.". Navn på batchkøer til rådighed for ARC brugere, som angivet af klyngeejere. ".$str_que."
    Status
    Køstatus. Fungerende køer viser som regel active status.
    CPU (min)
    Tidsgrænse for varigheden af job per kø, hvis sat, i CPU-minutter. Den første værdi er den nedre grænse, den anden er den øvre grænse. Hvis der ikke er en grænse (job med envher varighed accepteres), vises N/A.
    Kørende
    Antal job der udføres i køen. Det totale antal job is vises, med antallet af processorer optaget af Grid-job vist i parentes, fx (Grid: 12). NB! For parallelle multiprocessorjob kan tallet i parentes være større end antallet af job.
    I kø
    Antallet af job, der venter i køen på at komme til at køre. Det totale antal vises, med Grid-job i parentes, fx (Grid: 235)
    ", "Kø" => 0, "Mapping Queue" => 0, "Status" => 0, "Grænse (min)" => 0, "CPU-er" => 0, "kørende" => 0, "I Kø" => 0 ), "jobstat" => array("0" => "Job på:Job ID", "help" => "
    JOBLISTE:
    Job navn
    ".$clickable.". Navn på et job som tildelt af ejeren. Hvis der ikke er tildelt et navn, vises "N/A". Tryk på et navn for at få en detaljeret beskrivelse af jobbet.
    Ejer
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    ".$clickable.". Navn på batchkøen, hvor jobbet udføres. ".$str_que."
    CPU'er
    ".$str_cpu."
    JOBDETALJER:
    Attribut
    ".$clickable.". Jobattributens navn".$str_att."
    Værdi
    ".$str_val."
    ", "Job navn" => 0, "Ejer" => 0, "Status" => 0, "CPU (min)" => 0, "Kø" => 0, "CPU'er" => 0 ), "volist" => array("0" => "Virtuel Organisations", "help" => "
    Virtuel Organisation
    ".$clickable.". Group of users, typically sharing common activities and resources, authorised at at least one ARC-enabled site. Click on the navn to get the list of group members.
    Members
    Number of group members.
    Served by
    LDAP server that supports the group membership database.
    ", "Virtuel Organisation" => 0, "Medlemmer" => 0, "Server" => 0 ), "vousers" => array("0" => "Gridbrugerbase", "help" => "
    Navn
    ".$clickable.". ".$str_nam."
    Tilknytning
    Brugerens hjemmeinstitut som anmeldt af VO bestyreren. Kan være tom.
    E-mail
    ".$clickable.". Brugerens e-mail som anmeldt af VO bestyreren. Kan være tom. Tryk på adressen for sende en email til brugeren.
    ", "#" => 0, "Navn" => 0, "Tilknytning" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information om", "help" => "
    Klynge:kø
    ".$clickable.". Navne på klynger of respektive køer (adskilt af kolon, ":") hvor en bruger er autoriseret til at indlevere job. Hvis brugeren ikke er autoriseret, vises beskeden: "Ikke authoriset på vært ...". Tryk på et klyngenavn for at få en detaljeret beskrivelse af klyngen. Tryk på et kønavn for at få en detaljeret beskrivelse af køen
    Ledige CPU'er
    Det aktuelle antal ledige CPU'er i en given kø til rådighed for brugeren , evt tilføjet den øvre grænse for varigheden af jobs (i minutter). Fx betyder "3" at der er 3 CPU'er til rådighed for et job med ubegrænset køretid; "4:360" angiver at der er 4 CPU'er til rådighed for job, der kører mindre end 6 timer; "10:180 30" betyder at der er 10 CPU'er til rådighed for job, der kører mindre end 3 timer, samt 30 CPU'er til rådighed for jobs, der kan køre en vilkårlig tid; "0" betyder at der ikke er nogen CPU'er til rådighed for tiden the moment, og nye job vil havne i en ventekø.
    Job i ventekø
    Antal brugerjob, der forventes at være foran en brugers nye job i en ventekø (for this user). Antallet "0" betyder at jobbet forventes udført med det samme. NB! Det er kun et estimat, som kan tilsidesættes af lokale regler.
    Ledig diskplads (MB)
    Diskplads til rådighed for brugeren i en given bestemt kø (i MegaBytes). NB! Det er kun et estimat, da de færreste klynges kan tilbyde faste diskpladskvoter.
    Jobnavn
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Klynge
    ".$clickable.". Navn på klyngen hvor jobbet udføres. Tryk på et klyngenavn for at få detaljerede oplysninger om klyngen.
    ".$clickable.". Navn på batchkøen hvor jobbet udføres eller blev udført. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Jobnavn" => 0, "Status" => 0, "CPU (min)" => 0, "Klynge" => 0, "Kø" => 0, "CPU'er" => 0 ), "attlist" => array("0" => "Attributværdier", "help" => "
    Objekt
    ".$clickable.". Navn objektet hvis attributter vises. Det kan være et klyngenavn, et klyngekønavn, et jobnavn, et brugernavn osv. Tryk på navneteksten for at få en detaljeret beskrivelse af objektet.
    Attribut
    For hvert objekt kan en eller flere værdier vises. Kolonnetitel er det menneskelæselige navn (bortset fra visse MDS-specifikke attributter) og indholdet i kolonnen er attributværdier for objektet som det blev til indtastet i informationssystemet.
    ", "Objelt" => 0, "Attribut" => 0 ), "quelist" => array("0" => "Kø", "help" => "
    Attribut
    ".$clickable.". Navn på en køattribut".$str_att."
    Værdi
    ".$str_val."
    Jobnavn
    ".$clickable.". ".$str_job."
    Ejer
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Lager (RAM) (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Jobnavn" => 0, "Ejer" => 0, "Status" => 0, "CPU (min)" => 0, "Lager (RAM) (KB)" => 0, "CPU'er" => 0 ), "sestat" => array("0" => "Lagerenhed", "help" => "
    Alias
    Lagerenheder alias som angivet i informationssystemet. Højst 15 tegn vises.
    Total lagerplads
    Total lagetplads på harddisken, GigaByte.
    Ledig Plads
    Aktuel Ledig diskplads, GigaByte.
    Navn
    Lagerenheds, bestående af et logsik navn og et værtsnavn (adskilt af kolon, ":"). Det logiske navn bruges kun af hensyn til informationssystemet, for at skelne mellem forskellige lagringsenheder på den samme maskine.
    Grund-URL
    Lagringsenhedens URL, som regel en gsiftp:// protokol. Brug denne URL som udgangspunkt for at tilgå filer.
    Type
    Lagringselement type. "gridftp-baseret" angiver en harddisk med GridFTP-grænseflade.
    ", "#" => 0, "Alias" => 0, // "Total lagerplads" => 0, "Ledig/Total Diskplads, GB" => 0, "Navn" => 0, "Grund-URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authoriserede Gridbrugere:Aktive Gridbrugere", "help" => "
    Navn
    ".$clickable.". ".$str_nam."
    Tilknytning
    Brugertilknytning, uddraget af det personlige certifikat
    Job
    Antal brugerjob i systemet (kørende, ventende, afsluttede eller slettede)
    Steder
    Viser hvor mange steder brugeren er autoriseret
    ", "#" => 0, "Navn" => 0, "Tilknytning" => 0, "Job" => 0, "Steder" => 0 ), "userres" => array("0" => "", "Klynge:kø" => 0, "Ledige CPU'er" => 0, "Job, i ventekø" => 0, "Ledig diskplads (MegaByte)" => 0 ), "ldapdump" => array("0" => "", "Attribut" => 0, "Værdi" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objektklasse", "Mds-validfrom" => "Info gyldig fra (GMT)", "Mds-validto" => "Info gyldig til (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Frontend domænenavn", "nordugrid-cluster-aliasname" => "Klyngealias", "nordugrid-cluster-contactstring" => "Kontakttekst", "nordugrid-cluster-interactive-contactstring" => "Interaktiv kontakt", "nordugrid-cluster-comment" => "Kommentar", "nordugrid-cluster-support" => "E-mail kontact", "nordugrid-cluster-acl" => "Authoriserede VO'er", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS detaljer", "nordugrid-cluster-architecture" => "Arkitektur", "nordugrid-cluster-opsys" => "Styresystem", "nordugrid-cluster-homogeneity" => "Homogen klynge", "nordugrid-cluster-nodecpu" => "CPU type (langsomste)", "nordugrid-cluster-nodememory" => "Lager (MB, mindst)", "nordugrid-cluster-totalcpus" => "CPU's, i alt", "nordugrid-cluster-cpudistribution" => "CPU:maskiner", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Harddiskplads, til rådighed (MB)", "nordugrid-cluster-sessiondir-total" => "Harddiskplads, i alt (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Gridsession levetid (min)", "nordugrid-cluster-cache-free" => "Cachestørrelse, til rådighed (MB)", "nordugrid-cluster-cache-total" => "Cachestørrelse, i alt (MB)", "nordugrid-cluster-runtimeenvironment" => "Køretidsomgivelser", "nordugrid-cluster-localse" => "Lagringsenhed, lokal", "nordugrid-cluster-middleware" => "Gridmiddleware", "nordugrid-cluster-totaljobs" => "Jobs, samlet antal", "nordugrid-cluster-usedcpus" => "CPU'er, optagede", "nordugrid-cluster-queuedjobs" => "Jobs, i kø", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postnummer", "nordugrid-cluster-owner" => "Ejer", "nordugrid-cluster-issuerca" => "Certifikatudsteder", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node IP sammenhæng", "nordugrid-cluster-gridarea" => "Session area (FORфLDET)", "nordugrid-cluster-gridspace" => "Griddiskplads (FORфLDET)", "nordugrid-cluster-opsysdistribution" => "OS fordeling (FORфLDET)", "nordugrid-cluster-runningjobs" => "Job, kørende (FORфLDET)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Kønavn", "nordugrid-queue-comment" => "Kommentar", "nordugrid-queue-status" => "Køstatus", "nordugrid-queue-running" => "CPU'er, optagede", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Job, i ventekø (FORфLDET)", "nordugrid-queue-maxrunning" => "Jobs, kørende (max)", "nordugrid-queue-maxqueuable" => "Jobs, kan udskydes (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unixbruger (max)", "nordugrid-queue-maxcputime" => "CPU-tid, max. (minutter)", "nordugrid-queue-mincputime" => "CPU-tid, min. (minutter)", "nordugrid-queue-defaultcputime" => "CPU-tid, default (minutter)", "nordugrid-queue-maxwalltime" => "Vægurstid, max. (minutter)", "nordugrid-queue-minwalltime" => "Vægurstid, min. (minutter)", "nordugrid-queue-defaultwalltime" => "Vægurstid, default (minutter)", "nordugrid-queue-schedulingpolicy" => "Skeduleringspolitik", "nordugrid-queue-totalcpus" => "CPU'er, i alt", "nordugrid-queue-nodecpu" => "CPUtype", "nordugrid-queue-nodememory" => "Lager (RAM) (MB)", "nordugrid-queue-architecture" => "Arkitektur", "nordugrid-queue-opsys" => "Styresystem", "nordugrid-queue-homogeneity" => "Homogen kø", "nordugrid-queue-gridrunning" => "CPU'er, optagede af Gridjobs", "nordugrid-queue-gridqueued" => "Gridjobs, i ventekø", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPU'er per kø (FORфLDET)", "nordugrid-queue-assignedcputype" => "CPUtype (FORфLDET)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Ejer", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Aktivkø", "nordugrid-job-stdout" => "Standard uddatafil", "nordugrid-job-stderr" => "Standard fejlfil", "nordugrid-job-stdin" => "Standard inddatafile", "nordugrid-job-reqcputime" => "Anmodet CPU-tid", "nordugrid-job-reqwalltime" => "Anmodet vægurstid", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position i køen", "nordugrid-job-comment" => "LRMS kommentar", "nordugrid-job-submissionui" => "Indleveringsmaskine", "nordugrid-job-submissiontime" => "Indleveringstid (GMT)", "nordugrid-job-usedcputime" => "Forbrugt CPU-tid", "nordugrid-job-usedwalltime" => "Forbrugt vægurstid", "nordugrid-job-completiontime" => "Job afslutningstidspunkt (GMT)", "nordugrid-job-sessiondirerasetime" => "Sletningstidspunkt (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy udløbstidspunkt (GMT)", "nordugrid-job-usedmem" => "Benyttet lager (RAM) (KB)", "nordugrid-job-errors" => "Fejl", "nordugrid-job-exitcode" => "Afslutningskode", "nordugrid-job-jobname" => "Navn", "nordugrid-job-runtimeenvironment" => "Køretidsomgivelser", "nordugrid-job-cpucount" => "Anmodede CPU'er", "nordugrid-job-executionnodes" => "Udførelsesknuder", "nordugrid-job-gmlog" => "GM logfil", "nordugrid-job-clientsoftware" => "klientversion", "nordugrid-job-rerunable" => "Genkørbare", "nordugrid-job-reqcput" => "Anmodet tid (Forældet)", "nordugrid-job-gridlog" => "Gridlogfil (Forældet)", "nordugrid-job-lrmscomment" => "LRMS kommentar (Forældet)", "nordugrid-authuser-name" => "Navn", "nordugrid-authuser-sn" => "Subjektnavn", "nordugrid-authuser-freecpus" => "Ledige CPU'er", "nordugrid-authuser-diskspace" => "Ledig harddiskplads (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Nabn", "nordugrid-se-aliasname" => "Lagerelement alias", "nordugrid-se-type" => "Lagerelement type", "nordugrid-se-acl" => "Authoriserede VO'er", "nordugrid-se-freespace" => "Legid plads (MB)", "nordugrid-se-totalspace" => "Total plads (MB)", "nordugrid-se-url" => "Kontakt URL", "nordugrid-se-baseurl" => "Kontact URL (Forældet)", "nordugrid-se-accesscontrol" => "Adgangskontrol", "nordugrid-se-authuser" => "Autoriseret bruger (DN)", "nordugrid-se-location" => "Postnummer", "nordugrid-se-owner" => "Ejer", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Certifikatudsteder", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Kommentar", "nordugrid-rc-name" => "Domænenavn", "nordugrid-rc-aliasname" => "Replikeringskatalogalias", "nordugrid-rc-baseurl" => "Kontact URL", "nordugrid-rc-authuser" => "Autoriseret bruger (DN)", "nordugrid-rc-location" => "Postnummer", "nordugrid-rc-owner" => "Ejer", "nordugrid-rc-issuerca" => "Certifikatudsteder" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Kan ikke topniveau ressourceindekser", "2" => "ingen af de lokale indekser returnerede en forbindelse", "3" => " dårlig konfigurations eller anmodning udløb", "4" => "Ingen Gridjobs fundet", "5" => "ingen information fundet", "6" => "Server ikke tilgængelig", "7" => " - genlæs senere", "8" => "ingen køinformationer fundet", "9" => "Ingen indgange fundet", "10" => "Ingen brugere fundet", "11" => "Ingen autoriserede på værten", "12" => "svarer ikke", "13" => "Fandt ingen nylige jobs for ", // debug messages "101" => " Monitor timeout for GRIS: ", "102" => " sek on forbindelse og ", "103" => " sek on søgning", "104" => " sek brugt på at søge", "105" => "Viser kun ressourcer i ", "106" => "Spurgte topniveau indeksservere: ", "107" => "Fik geokrafiske placeringer, skanned steder: ", "108" => " steder sorteret efter geografisk placering", "109" => "Leder efter klyngeattributter", "110" => "Leder efter køattributter", "111" => "Ingen daya fra ", "112" => " er oppe i ", "113" => " tilbyder ingen ressourcer", "114" => " Monitor timeouts for GIIS: ", "115" => "springer over GRIS: ", "116" => "ikke en ", "117" => "Checker forbindelse: ", "118" => "OK", "119" => "Så vidt, opdagede ressource at typen ", "120" => "LDAP fejl ved søgning ", "121" => " status ved ", "122" => "Sortlistet: ", "123" => "Registrant fundet for ", "124" => "Led efter SE attributter", "125" => "Led efter brugere", "126" => "Led efter jobs", "127" => " har job ", "128" => " men ikke autoriseret", "129" => "Kan ikke få objektdata: fejl ", "130" => " Monitor timeouts for EMIR: ", // icon titles "301" => "Genlæs", "302" => "Udskriv", "303" => "Hjælp", "304" => "Luk", "305" => "Rød", "306" => "Grå", "307" => "Alle brugere", "308" => "Aktive brugere", "309" => "Søg", "310" => "Lager", "311" => "VO-er", "312" => "Flaget for ", "313" => " Grid processer og ", "314" => " locale processer", // auxilliary strings "401" => "Processer", "402" => "Grid", "403" => "Lokal", "404" => "Verden", "405" => "TOTAL", "406" => " steder", "407" => "en masse", "408" => " GB", "409" => " ALLE", "410" => "Klynge", "411" => "Kø", "412" => "Job", "413" => "Bruger", "414" => "Lager", "415" => "Replikerings Kat.", "416" => "Definer attributter for at vise objektet: ", "417" => "logisk OG af alle udtrykkene findes", "418" => "Efterlad feltet længst til højre tomt for vise alt", "419" => "Vis de ressourcer eller objekter, du vil", "420" => "Distinguished name", "421" => "Kan bruge i alt ", "422" => " steder", "423" => "Ressource / objekt:", "424" => "Ant. attributter (def. 6):", "425" => "Objekt", "426" => "Næste", "427" => "Vælg een", "428" => "Nulstil", "429" => "VIS" ), // Post code conversion: only for [en]! "tlconvert" => array ( "Australia" => "Australien", "Austria" => "ьstrig", "Armenia" => "Armenien", "Algeria" => "Algeriet", "Belgium" => "Belgien", "Bulgaria" => "Bulgarien", "Canada" => "Canada", "China" => "Kina", "Czechia" => "Tjekkiet", "Denmark" => "Danmark", "Estonia" => "Estland", "Finland" => "Finland", "France" => "Frankrig", "Georgia" => "Georgien", "Germany" => "Tyskland", "Greece" => "Grækenland", "Hungary" => "Ungarn", "Iceland" => "Island", "Ireland" => "Irland", "Italy" => "Italien", "Japan" => "Japan", "Latvia" => "Letland", "Lithuania" => "Lithauen", "Morocco" => "Marocco", "Netherlands" => "Nederlandene", "Norway" => "Norge", "Poland" => "Polen", "Portugal" => "Portugal", "Romania" => "Rumænien", "Russia" => "Rusland", "SriLanka" => "Sri Lanka", "Sweden" => "Sverige", "Slovakia" => "Slovakiet", "Slovenia" => "Slovenien", "Switzerland" => "Schweiz", "Turkey" => "Tyrkiet", "UK" => "UK", "Ukraine" => "Ukraine", "USA" => "USA", "World" => "Verden" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/uk.inc0000644000000000000000000000012412050701227024243 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.717716 30 ctime=1513200663.747793323 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/uk.inc0000644000175000002070000017333512050701227024324 0ustar00mockbuildmock00000000000000 // -- Author: oxana.smirnova@hep.lu.se // Some common strings: $clickable = "ПОСИЛАННЯ"; $str_att = ", інтерпретировані для простоти читання (за виключенням декількох атрибутів, специфічних для системи MDS). За кліком выводяться значення цього атрибута на всіх відомих ресурсах ARC."; $str_val = "Значення атрибута, записане в інформаційній системі."; $str_que = "Зазвичай черги розрізняються або за допустимою тривалістю обрахунку, або за допущеною групою користувачів. За кліком виводиться повний опис черги, що містить список всіх відомих завдань: в обрахунку, в черзі та завершених."; $str_job = " Ім'я завдання, присвоєне хазяїном.
    N/A означає, що хазяїн не присвоїв ніякого імені.
    X означає, що хазяїн відмінив виконання завдання.
    ! означає, що при виконанні завдання виникла помилка.
    За кліком виводиться детальний опис завдання."; $str_nam = "Ім'я користувача, у відповідності до його особистого сертифікату. За кліком виводиться зведена таблиця всіх Грід-ресурсів, доступних даному користувачу, і список всіх його завдань, зареєстрованих на разі в системі."; $str_sta = "Стан завдання: стадія прогресу в ГМ або стан в ЛСКР. Послідовність можливих станів така:
    ACCEPTED – завдання прийнято, але виконання ще не почалось
    PREPARING – підвантажуються необхідні вхідні дані
    SUBMITTING – завдання направляється до ЛСКР
    INLRMS – управління завданням передано в ЛСКР; інформаційна система висвітлює відомості про внутрішній стан завдання. Можливі наступні стани:
    : Q – завдання очікує у черзі
    : U – завдання призупинено на перевантаженому вузлі (PBSPro)
    : S – завдання призупинено (Condor)
    : R, run – завдання виконується
    : E – завдання завершується (PBS)
    FINISHING – вихідні дані пересилаются за призначенням
    FINISHED – завдання завершене; інформаційна система додає мітку часу завершення
    CANCELING – завдання відміняється
    DELETED – результати завдання не були вивантажені його хазяїном і були знищені сервером після того як сплив час зберігання (зазвичай 24 години).
    До кожного стану може бути додана приставка \"PENDING:\", що означає, що ГМ не може в даний момент перейти до наступного етапу виконання через відповідні внутрішні обмеження."; $str_tim = "Процесорний час, витрачений завданням, у хвилинах."; $str_mem = "Об'єм оперативної пам'яті, що використовує завдання на разі, в кілобайтах"; $str_cpu = "Число процесорів, що займає завдання."; // Actual messages $message = array ( // Table headers and help "loadmon" => array( "0" => "Грід-монітор", "help" => "
    У цьому вікні приведена таблиця всіх обчислювальних ресурсів, що реєструються у списки вищого рівня ARC. Таблиця впорядкована по англійській назві країни, і в кожній країні – за іменем керуючої машини. Для кожного ресурса виведені наступні параметри: назва, загальна кількість процесорів, число зайнятих процесорів, а також кількість завдань у черзі, як засланих через Грід, так і місцевих. Використовуйте утиліту \"Пошук\" для огляду та порівняння інших параметрів кластерів, черг, завдань і т.д.
    Країна
    ".$clickable.". Прапор та назва країни, як слідує із доступного опису ресурсу. За кліком виводиться зведена таблиця тільки для цієї країни.
    Ресурс
    ".$clickable.". Назва ресурса (зазвичай кластера), присвоєна власником. Довжина рядка не повинна перевищувати 22 символа. За кліком виводиться повний опис ресурсу (кластера).
    ЦП
    Загальна кількість процесорів (ядер) у кластері. Увага! Тільки частина з них може бути доступна користувачам грід.
    Завантаженість (процесори)
    ".$clickable.". Відносна завантаженість кластера, виходячи із числа зайнятих процесорів. Сіра смуга відповідає кількості процесорів, зайнятих під місцеві завдання, а червона смуга вказує кількість процесорів, що виконують грід-завдання. За кліком виводиться список всіх активних грід-завдань на кластері, включаючи інформацію про число процесорів на кожне завдання.
    Очікують
    ".$clickable.". Число всіх завдань, що стоять у черзі на даному кластері, представлене у вигляді суми грід- і локальних завдань. За кліком на першій цифрі виводиться список всіх завдань у черзі, засланих через грід.
    ", "Країна" => 30, "Ресурс" => 160, "ЦП" => 10, "Завантаженість (процесори)" => 210, "Очікують" => 10 ), "clusdes" => array( "0" => "Опис ресурсу", "help" => "
    Атрибут
    ".$clickable.". Назви атрибутів кластера".$str_att."
    Значення
    ".$str_val."
    Черга
    ".$clickable.". Назви черг (присвоєні власниками), що є доступними для грід-користувачів. ".$str_que."
    Стан
    Стан черги. Активна черга зазвичай видає стан active.
    Тривалість (хв)
    Межі по часу на тривалість перебування завдання в черзі, якщо такі встановлені, у хвилинах процесорного часу. Перше значення відповідає нижній межі, друге – верхній. Якщо межі не встановлені (тобто черга приймає завдання будь-якої тривалості), виводиться мітка N/A.
    Рахуються
    Число завдань, що обраховуються в черзі. Показано загальне число завдань, причому число процесорів, зайнятих під грід-завдання, вказано в дужках, наприклад: (Грід: 12). Увага! За наявності паралельних багатопроцесорних завдань, число в дужках может перевищувати загальне число завдань.
    Очікують
    Число завдань, що чекують на виконання в черзі. Показано загальне число завдань, причому кількість завдань, засланих через Грід, вказано в дужках, наприклад: (Грід: 235).
    ", "Черга" => 0, "Mapping Queue" => 0, "Стан" => 0, "Тривалість (хв)" => 0, "ЦП" => 0, "Рахуються" => 0, "Очікують" => 0 ), "jobstat" => array( "0" => "завдання на:Ярлик завдання", "help" => "
    СПИСОК завдань:
    Ім'я завдання
    ".$clickable.". Ім'я завдання, присвоєне хазяїном. N/A означає, що хазяїн не присвоїв ниякого імені. За кліком виводиться детальний опис завдання.
    Хазяїн
    ".$clickable.". ".$str_nam."
    Стан
    ".$str_sta."
    Час (хв)
    ".$str_tim."
    Черга
    ".$clickable.". Назва черги ЛСКР, у котрій проходить виконання завдання.".$str_que."
    ЦП
    ".$str_cpu."
    ОПИС завдання:
    Атрибут
    ".$clickable.". Назви атрибутів завдання.".$str_att."
    Значення
    ".$str_val."
    ", "Ім'я завдання" => 0, "Хазяїн" => 0, "Стан" => 0, "Час (хв)" => 0, "Черга" => 0, "ЦП" => 0 ), "volist" => array( "0" => "Віртуальні організації", "help" => "
    Віртуальні організації
    ".$clickable.". Група користувачів – зазвичай об'єднаних спільною ціллю чи ресурсами, – допущена до работи хоча б на одному із ресурсів ARC. За кліком виводиться список членів групи.
    Члени
    Кількість членів групи.
    Обслуговується
    Адреса сервера, що підтримує базу даних членів групи.
    ", "Віртуальна організація" => 0, "Члени" => 0, "Обслуговується" => 0 ), "vousers" => array( "0" => "Користувачі", "help" => "
    Ім'я
    ".$clickable.". ".$str_nam."
    Місце роботи
    Місце роботи користувача, у відповідності до запису у базі даних. Необов'язково.
    Електронна пошта
    ".$clickable.". Адреса електронної пошти користувача, у відповідності до запису у базі даних. Необов'язково. За кліком створюється лист для користувача.
    ", "№" => 0, "Ім'я" => 0, "Місце роботи" => 0, "Електронна пошта" => 0 ), "userlist" => array( "0" => "Інформація про", "help" => "
    Ресурс:черга
    ".$clickable.". Назви ресурсів (кластерів) та відповідних черг ЛСКР (разділені двокрапкою), доступних даному користувачу. Якщо доступ закритий, виводиться повідомлення "Немає доступу до ресурсу". За кліком на назві кластера виводиться повний опис ресурса (кластера). За кліком на назві черги виводиться повний опис черги.
    вільних процесорів.
    Число вільних центральних процесорів, доступних у даній черзі для даного користувача на даний момент часу. Якщо черга має обмеження за часом на виконання завдань, ця межа вказана після числа процесорів (у хвилинах, розділяєтся двокрапкою). Наприклад, "3" означає, що 3 процессора доступно для завдань будь-якої тривалості; "4:360" означає, що 4 процесора доступно для завдань, час виконання яких не перевищує 6 годин; "10:180 30" означає, що 10 процесорів доступно для завдань, час виконання яких не перевищує 3 годин, і 30 процесорів доступно для завдань будь-якої тривалості; "0" означає, що вільних ресурсів немає, і завдання будуть направлені на очікування в черзі.
    завдань у черзі
    Кількість завдань користувача, що знаходяться у списку очікування перед новим завданням, засланим від імені даного користувача. Число "0" означає, що завдання можливо буде запущене на обрахунок негайно. Увага! Це лише приблизні значення, які можуть бути змінені локальними операторами.
    доступний простір на сховищі (Мб)
    Простір на локальному жорсткому диску, доступне даному користувачу у даній черзі (в мегабайтах). Увага! Це лише приблизні значення, оскільки більшість кластерів не підтримують дискові квоти.
    Ім'я завдання
    ".$clickable.". ".$str_job."
    Стан
    ".$str_sta."
    Час (хв)
    ".$str_tim."
    Ресурс
    ".$clickable.". Ім'я ресурсу (зазвичай кластера), на котрому проходить виконання. завдання. За кліком виводиться повний опис ресурсу (кластера).
    Черга
    ".$clickable.". Назва черги ЛСКР, у якій проходить виконання завдання. ".$str_que."
    ЦП
    ".$str_cpu."
    ", "" => 0, "Ім'я завдання" => 0, "Стан" => 0, "Час (хв)" => 0, "Ресурс" => 0, "Черга" => 0, "ЦП" => 0 ), "attlist" => array( "0" => "Значення атрибутів", "help" => "
    Об'єкт
    ".$clickable." Назва об'єкта, атрибути якого перераховані у рядку. Це може бути ім'я кластера, черги, завдання, користувача і т.д.. За кліком виводиться змістовний опис об'єкту.
    Атрибут
    Для кожного об'єкта в таблиці приведені значення одного чи декількох його атрибутів. У заголовку стовпця вказано назва атрибута, інтерпретована для простоти читання (за виключенням декількох атрибутів, специфічних для системи MDS), а вмістом кожного стовпця являються значення відповідних атрибутів, що записані в інформаційній системі.
    ", "Об'єкт" => 0, "Атрибут" => 0 ), "quelist" => array( "0" => "Черга", "help" => "
    Атрибут
    ".$clickable.". Назва атрибутів черги".$str_att."
    Значення
    ".$str_val."
    Ім'я завдання
    ".$clickable.". ".$str_job."
    Хазяїн
    ".$clickable.". ".$str_nam."
    Стан
    ".$str_sta."
    Час (хв)
    ".$str_tim."
    ОЗУ (Кб)
    ".$str_mem."
    ЦП
    ".$str_cpu."
    ", "" => 0, "І'мя завдання" => 0, "Хазяїн" => 0, "Стан" => 0, "Час (хв)" => 0, "ОЗУ (Кб)" => 0, "ЦП" => 0 ), "sestat" => array( "0" => "Зберігальні пристрої", "help" => "
    Назва
    Назва зберігального пристрою, зареєстрована в інформаційній системі. Максимально допустима довжина: 15 символів.
    Весь об'єм
    Повний об'єм диску, Гб.
    Вільно
    Доступний простір на диску на разі, Гб.
    Ім'я
    Ім'я зберігального пристрою, складене із логічного імені та імені сервера (розділених двокрапкою). Логічне ім'я використовується тільки інформаційною системою для розрізнення зберігальних пристроїв на одному й тому ж сервері.
    URL бази
    URL зберігального пристрою, зазвичай протоколу gsiftp://. Використовуйте цю адресу як базову для доступу до файлів.
    Тип
    Тип зберігального пристрою. Тип "gridftp-based" означає що це дисковий накопичувач з інтерфейсом GridFTP.
    ", "№" => 0, "Назва" => 0, // "Весь об'єм" => 0, "Вільний/весь об'єм, Гб" => 0, "Ім'я" => 0, "URL бази" => 0, "Тип" => 0 ), "allusers" => array( "0" => "Допущені користувачі:Активні користувачі", "help" => "
    Ім'я
    ".$clickable.". ".$str_nam."
    Місце роботи
    Місце роботи користувача, у відповідності із записом у його сертифікаті.
    Завдання
    Число всіх завдань користувача, що знаходяться в системі (на обрахунку, в черзі та завершених).
    Ресурси
    Число кластерів, до яких даний користувач має допуск.
    ", "№" => 0, "Ім'я" => 0, "Місце роботи" => 0, "Завдання" => 0, "Ресурси" => 0 ), "userres" => array( "0" => "", "Ресурс:черга" => 0, "вільних процесорів" => 0, "завдань у черзі" => 0, "вільний простір на сховищі (Мб)" => 0 ), "ldapdump" => array( "0" => "", "Атрибут" => 0, "Значення" => 0 ), "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Відомості дійсні з (GMT)", "Mds-validto" => "Відомості дійсні до (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Ім'я керуючої машини", "nordugrid-cluster-aliasname" => "Назва", "nordugrid-cluster-contactstring" => "Точка входу", "nordugrid-cluster-interactive-contactstring" => "Інтерактивна точка входу", "nordugrid-cluster-comment" => "Коментар", "nordugrid-cluster-support" => "Технічна підтримка", "nordugrid-cluster-acl" => "Допущені ВО", "nordugrid-cluster-lrms-type" => "ЛСКР, тип", "nordugrid-cluster-lrms-version" => "ЛСКР, версія", "nordugrid-cluster-lrms-config" => "ЛСКР, подробиці", "nordugrid-cluster-architecture" => "Архітектура", "nordugrid-cluster-opsys" => "Операційна система", "nordugrid-cluster-homogeneity" => "Однорідність ресурсу", "nordugrid-cluster-nodecpu" => "Процесор, тип (найгірший)", "nordugrid-cluster-nodememory" => "ОЗУ (Мб, найменьше)", "nordugrid-cluster-totalcpus" => "Процесори, усього", "nordugrid-cluster-cpudistribution" => "Процесори: вузли", "nordugrid-cluster-benchmark" => "Еталонний тест", "nordugrid-cluster-sessiondir-free" => "Сховище, доступно (Мб)", "nordugrid-cluster-sessiondir-total" => "Сховище, весь об'єм (Мб)", "nordugrid-cluster-sessiondir-lifetime"=> "Час життя грід-сеансу (хв)", "nordugrid-cluster-cache-free" => "Дисковий кеш, вільно (Мб)", "nordugrid-cluster-cache-total" => "Дисковий кеш, усього (Мб)", "nordugrid-cluster-runtimeenvironment" => "Робоче середовище", "nordugrid-cluster-localse" => "Локальний накопичувач", "nordugrid-cluster-middleware" => "Грід-ПЗ", "nordugrid-cluster-totaljobs" => "завдань, всього", "nordugrid-cluster-usedcpus" => "Процесори, зайняті", "nordugrid-cluster-queuedjobs" => "завдань у черзі (ЗАСТАРІЛИЙ)", "nordugrid-cluster-prelrmsqueued" => "Грід-завдань, що очікують на засилку", "nordugrid-cluster-location" => "Поштовий індекс", "nordugrid-cluster-owner" => "Власник", "nordugrid-cluster-issuerca" => "Центр сертифікації", "nordugrid-cluster-issuerca-hash" => "Хеш-код центра сертификації", "nordugrid-cluster-trustedca" => "Довірені центри сертификації", "nordugrid-cluster-nodeaccess" => "IP-з'єднання вузлів", "nordugrid-cluster-gridarea" => "Адреса сеансів (ЗАСТАРІЛИЙ)", "nordugrid-cluster-gridspace" => "Грід-диск (ЗАСТАРІЛИЙ)", "nordugrid-cluster-opsysdistribution" => "Дистрибутив ОС (ЗАСТАРІЛИЙ)", "nordugrid-cluster-runningjobs" => "завдань в обрахунку (ЗАСТАРІЛИЙ)", "nordugrid-cluster-credentialexpirationtime" => "Термін дії сертифікату", "nordugrid-queue-name" => "І'мя черги", "nordugrid-queue-comment" => "Коментар", "nordugrid-queue-status" => "Стан черги", "nordugrid-queue-running" => "завдань в обрахунку", "nordugrid-queue-localqueued" => "Локальні завдання у черзі", "nordugrid-queue-prelrmsqueued" => "Грід-завдання, що очікують на засилку", "nordugrid-queue-queued" => "завдань в черзі (ЗАСТАРІЛИЙ)", "nordugrid-queue-maxrunning" => "завдань в обрахунку (межа)", "nordugrid-queue-maxqueuable" => "завдань в черзі (межа)", "nordugrid-queue-maxuserrun" => "завдань на користувача (межа)", "nordugrid-queue-maxcputime" => "Тривалість, найбільша (хв)", "nordugrid-queue-mincputime" => "Тривалість, найменьша (ха)", "nordugrid-queue-defaultcputime" => "Тривалість, за замовчуванням (хв)", "nordugrid-queue-schedulingpolicy" => "Правила планування", "nordugrid-queue-totalcpus" => "Процесори, всього", "nordugrid-queue-nodecpu" => "Процесор, тип", "nordugrid-queue-nodememory" => "ОЗУ (Мб)", "nordugrid-queue-architecture" => "Архітектура", "nordugrid-queue-opsys" => "Операційна система", "nordugrid-queue-homogeneity" => "Однорідність черги", "nordugrid-queue-gridrunning" => "Грід-завдань в обрахунку", "nordugrid-queue-gridqueued" => "Грід-завдань в черзі", "nordugrid-queue-benchmark" => "Еталонний тест", "nordugrid-queue-assignedcpunumber" => "Процесори (ЗАСТАРІЛИЙ)", "nordugrid-queue-assignedcputype" => "Тип процесора (ЗАСТАРІЛИЙ)", "nordugrid-job-globalid" => "Ярлик", "nordugrid-job-globalowner" => "Хазяїн", "nordugrid-job-execcluster" => "Виконуючий кластер", "nordugrid-job-execqueue" => "Виконуюча черга", "nordugrid-job-stdout" => "Стандартний потік виведення", "nordugrid-job-stderr" => "Стандартний потік видачі помилок", "nordugrid-job-stdin" => "Стандартний потік введення", "nordugrid-job-reqcputime" => "Запитаний процессорний час", "nordugrid-job-reqwalltime" => "Запитаний реальний час", "nordugrid-job-status" => "Стан", "nordugrid-job-queuerank" => "Положення в черзі", "nordugrid-job-comment" => "Коментар ЛСКР", "nordugrid-job-submissionui" => "Засилаючий клієнт", "nordugrid-job-submissiontime" => "Час засилки (GMT)", "nordugrid-job-usedcputime" => "Використаний процесорний час", "nordugrid-job-usedwalltime" => "Використаний реальний час", "nordugrid-job-completiontime" => "Час завершення (GMT)", "nordugrid-job-sessiondirerasetime" => "Срок знищення (GMT)", "nordugrid-job-proxyexpirationtime" => "Закінчення довірення (GMT)", "nordugrid-job-usedmem" => "Використання ОЗУ (Кб)", "nordugrid-job-errors" => "Помилки", "nordugrid-job-exitcode" => "Код повернення", "nordugrid-job-jobname" => "Ім'я", "nordugrid-job-runtimeenvironment" => "Рабоче середовище", "nordugrid-job-cpucount" => "Запитано процесорів", "nordugrid-job-executionnodes" => "Виконуючі вузли", "nordugrid-job-gmlog" => "Журнальний запис ГМ", "nordugrid-job-clientsoftware" => "Версія клієнта", "nordugrid-job-rerunable" => "Можливість перезапуску", "nordugrid-job-reqcput" => "Запитаний час (ЗАСТАРІЛИЙ)", "nordugrid-job-gridlog" => "Грід-запис (ЗАСТАРІЛИЙ)", "nordugrid-job-lrmscomment" => "Коментар ЛСКР (ЗАСТАРІЛИЙ)", "nordugrid-authuser-name" => "Ім'я", "nordugrid-authuser-sn" => "Суб'єкт", "nordugrid-authuser-freecpus" => "Вільні ЦП", "nordugrid-authuser-diskspace" => "Диск, доступно (Мб)", "nordugrid-authuser-queuelength" => "завдань користувача в черзі", "nordugrid-se-name" => "Умовне ім'я", "nordugrid-se-aliasname" => "Назва", "nordugrid-se-type" => "Тип", "nordugrid-se-acl" => "Допущені ВО", "nordugrid-se-freespace" => "Вільний об'єм (Мб)", "nordugrid-se-totalspace" => "Весь об'єм (Мб)", "nordugrid-se-url" => "Адреса доступу", "nordugrid-se-baseurl" => "Адреса доступу (ЗАСТАРІЛИЙ)", "nordugrid-se-accesscontrol" => "Контроль доступу", "nordugrid-se-authuser" => "Допущені користувачі (DN)", "nordugrid-se-location" => "Поштовий індекс", "nordugrid-se-owner" => "Власник", "nordugrid-se-middleware" => "Грід-ПЗ", "nordugrid-se-issuerca" => "Центр сертифікації", "nordugrid-se-issuerca-hash" => "Хеш-код центра сертификації", "nordugrid-se-trustedca" => "Довірені центи сертификації", "nordugrid-se-comment" => "Коментар", "nordugrid-rc-name" => "Доменне ім'я", "nordugrid-rc-aliasname" => "Назва", "nordugrid-rc-baseurl" => "Контактна адреса", "nordugrid-rc-authuser" => "Допущені користувачі (DN)", "nordugrid-rc-location" => "Поштовий індекс", "nordugrid-rc-owner" => "Власник", "nordugrid-rc-issuerca" => "Сертифікат виданий" ), "errors" => array( "1" => "Неможливо опитати каталоги вищого рівня", "2" => "Жоден із місцевих каталогів не відзивається", "3" => " невірна конфігурація або сплив час запиту", "4" => "Немає грід-завдань", "5" => "Немає інформації", "6" => "Служба недоступна", "7" => " - спробуйте поновити пізніше", "8" => "Немає інформації про чергу", "9" => "Немає даних", "10" => "Немає користувачів", "11" => "Немає доступу до ресурсу", "12" => "не відзиваєтся", "13" => "На разі немає завдань користувача ", "101" => " Час на зв'язок із локальним каталогом: ", "102" => " с на з'єднання та ", "103" => " с на пошук", "104" => " с затрачено на пошук", "105" => "Перерахування ресурсів: ", "106" => "Опитано каталогів верхнього рівня: ", "107" => "Отримані географічні координати, проскановано ресурсів: ", "108" => " ресурсів впорядковано за геополітичною ознакою", "109" => "Пошук атрибутів кластера", "110" => "Пошук атрибутів черги", "111" => "Немає даних з ", "112" => " функціонує в країні: ", "113" => " не має ресурсів", "114" => " Час на з'вязок із глобальним каталогом: ", "115" => "Ігнорується ресурс: ", "116" => "не відповідає типу ", "117" => "Перевірка зв'язку: ", "118" => "так!", "119" => "На разі виявлено ресурсів типу ", "120" => "Помилка LDAP при пошуку на ", "121" => "-стан на ", "122" => "Заблокований: ", "123" => "Виявлено реєстранта ", "124" => "Пошук атрибутів накопичувачів", "125" => "Пошук користувачів", "126" => "Пошук завдань", "127" => " запустив(ла) завдання ", "128" => " не будучи допущеним(ою)", "129" => "Немає інформації про об'єкт: помилка ", "130" => " Час на з'вязок із глобальним каталогом: ", "301" => "Перезавантажити", "302" => "Друк", "303" => "Допомога", "304" => "Закрити", "305" => "Червоний", "306" => "Сірий", "307" => "Всі користувачі", "308" => "Активні користувачі", "309" => "Пошук", "310" => "Накопичувачі", "311" => "Віртуальні организації", "312" => "Прапор країни: ", "313" => " процесорів під грід та ", "314" => " процесорів під місцеві", "401" => "Процеси", "402" => "Грід", "403" => "місцеві", "404" => "Світ", "405" => "ЗАГАЛОМ", "406" => " об'єктів", "407" => "купа", "408" => " Гб", "409" => " ВСІ", "410" => "Кластер", "411" => "Черга", "412" => "завдання", "413" => "Користувач", "414" => "Накопичувач", "415" => "Каталог реплік", "416" => "Задайте атрибути для огляду; вибраний об'єкт: ", "417" => "Пошук проводиться для логічного І всіх виразів", "418" => "Не заповнюйте праве поле, якщо фільтр непотрібен", "419" => "Огляд ресурсів чи об'єктів за вибором", "420" => "Виділене ім'я", "421" => "Може використовувати ", "422" => " кластерів", "423" => "Ресурс / об'єкт:", "424" => "Кількість атрибутів (6 за зам.):", "425" => "Об'єкт", "426" => "Далі", "427" => "Виберіть", "428" => "Очистити", "429" => "ПОКАЗАТИ" ), // Country name conversion, no postcode! "tlconvert" => array ( "Australia" => "Австралія", "Austria" => "Австрія", "Armenia" => "Арменія", "Algeria" => "Алжир", "Belgium" => "Бельгія", "Bulgaria" => "Болгарія", "Canada" => "Канада", "China" => "Китай", "Czechia" => "Чехія", "Denmark" => "Данія", "Estonia" => "Естонія", "Finland" => "Фінляндія", "France" => "Франція", "Georgia" => "Грузія", "Germany" => "Німеччина", "Greece" => "Греція", "Hungary" => "Угорщина", "Iceland" => "Ісландія", "Ireland" => "Ірландія", "Italy" => "Італія", "Japan" => "Японія", "Latvia" => "Латвія", "Lithuania" => "Литва", "Morocco" => "Марокко", "Netherlands" => "Нідерланди", "Norway" => "Норвегія", "Poland" => "Польща", "Portugal" => "Португалія", "Romania" => "Румунія", "Russia" => "Росія", "SriLanka" => "Шрі-Ланка", "Sweden" => "Швеція", "Slovakia" => "Словаччина", "Slovenia" => "Словенія", "Switzerland" => "Швейцарія", "Turkey" => "Туреччина", "UK" => "Великобританія", "Ukraine" => "Україна", "USA" => "США", "World" => "Світ" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/en.inc0000644000000000000000000000012413153453630024235 xustar000000000000000027 mtime=1504597912.048594 27 atime=1513200575.687716 30 ctime=1513200663.741793249 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/en.inc0000644000175000002070000014401013153453630024302 0ustar00mockbuildmock00000000000000 N/A indicates that user did not assign any name.
    X indicates that the job has been killed by the owner
    ! indicates that the job failed in the system
    Click on a name to get a detailed description of the job."; $str_nam = "Name of the user, as specified in the personal certificate. Click on a name to get the list of all the resources available for this user and all the jobs by this user which are currently in the system."; $str_sta = "Job status as returned by the Grid Manager (GM) and LRMS. In sequential order, the states are:
    ACCEPTED – job submitted but not yet processed
    PREPARING – input files are being retreived
    SUBMITTING – interaction with LRMS ongoing
    INLRMS – the job is transferred to the LRMS; internal status is added by the infosystem. Possible states are:
    : Q – job is queued
    : U – job is in a suspended state on a busy node (PBSPro)
    : S – job is in a suspended state (Condor)
    : R, run – job is running
    : E – job is finishing (PBS)
    FINISHING – output files are being transferred by the GM
    FINISHED – job is finished; time stamp is added by the infosystem
    CANCELING – job is being cancelled
    DELETED – job not cleaned upon user request but removed by the GM due to expiration time
    Each of the states can be reported with the PENDING: prefix, meaning the GM is attempting to move the job to the next state"; $str_tim = "CPU time used by the job, minutes."; $str_mem = "Memory consumed by the job, KB"; $str_cpu = "Number of processors used by the job."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    This screen displays all the sites registering to the top ARC indexing service, sorted by country then by host name. Selected site parameters are monitored: cluster alias, total CPU capacity and number of running and queued jobs, both Grid and local ones. Use "Search" utility if you want to compare other cluster, queue, job etc. characteristics
    Country
    ".$clickable.". Country flag and name as deduced from available resource descriptions. Click to show only this country info.
    Cluster
    ".$clickable.". Cluster alias as assigned by the owner. Maximal displayed length is 22 characters. Click on the alias to get a detailed cluster description.
    CPUs
    Total number of CPUs in a cluster. NB! Only a fraction of those can be actualy available for the Grid users.
    Load (processes:Grid+local)
    ".$clickable.". Relative cluster load, corresponding to the occupied CPUs count. Grey bars indicate processors occupied by the localy submitted jobs, while red bars show CPUs occupied by jobs submitted via Grid. Click on the bar to get the detailed list of all the running Grid jobs on the cluster, including amount of processors per job.
    Queueing
    ".$clickable.". Number of all queued jobs on the cluster, shown as number of queueing grid jobs plus number of locally submitted queueing jobs. Click the first number to get the list of queued Grid jobs on the cluster.
    ", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Resource Details for", "help" => "
    Attribute
    ".$clickable.". Cluster attribute name".$str_att."
    Value
    ".$str_val."
    Queue
    ".$clickable.". Names of batch queues available for the ARC users, as set by cluster owners. ".$str_que."
    Status
    Queue status. Operating queue typically reports active status.
    CPU (min)
    Time limit for job duration per queue, if set, in CPU-minutes. First displayed value is the lower limit, second - the upper one. If limits are not set (jobs of any duration are accepted), N/A tag is shown.
    Running
    Number of jobs running in the queue. Total number of jobs is shown, with number of processors occupied by Grid-submitted jobs displayed in parentheses, e.g. (Grid: 12). NB! For parallel multiprocessor jobs, number in parentheses can be larger than number of jobs.
    Queing
    Number of jobs awaiting execution in the queue. Total number of jobs is shown, with Grid-submitted jobs displayed in parentheses, e.g. (Grid: 235)
    ", "Queue" => 0, "Mapping Queue" => 0, "Status" => 0, "Limits (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    JOB LIST:
    Job name
    ".$clickable.". Name of a job as assigned by the owner. If no name has been assigned, "N/A" is displayed. Click on a name to get a detailed description of the job.
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Queue
    ".$clickable.". Name of the batch queue in which the job is being executed. ".$str_que."
    CPUs
    ".$str_cpu."
    JOB DETAILS:
    Attribute
    ".$clickable.". Job attribute name".$str_att."
    Value
    ".$str_val."
    ", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtual Organisation
    ".$clickable.". Group of users, typically sharing common activities and resources, authorised at at least one ARC-enabled site. Click on the name to get the list of group members.
    Members
    Number of group members.
    Served by
    LDAP server that supports the group membership database.
    ", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    Users home institute as entered by a VO manager. Can be empty.
    E-mail
    ".$clickable.". Users e-mail as entered by a VO manager. Can be empty. Click the address to send an e-mail to the user.
    ", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "help" => "
    Cluster:queue
    ".$clickable.". Names of clusters and respective queues (separated by a column, ":") where a user is authorized to submit jobs. If a user is not authorized, message "Not authorised at host ..." is displayed. Click a cluster name to get a detailed cluster description. Click on a queue name to get a detailed queue description.
    Free CPUs
    Number of free CPUs available in a given queue for the user at this moment of time, optionally appended with the upper time limit value (in minutes). For example, "3" means 3 CPUs available for a job of unlimited running time; "4:360" indicates there are 4 CPUs available for jobs not longer than 6 hours; "10:180 30" means there are 10 CPUs available for jobs not exceeding 3 hours, plus 30 CPUs available for jobs of any length; "0" means there are no CPUs available at the moment, and the jobs will be placed in a waiting queue.
    Queued jobs
    Number of user's jobs expected to sit ahead of a new submitted job (for this user) in a waiting queue. Number of "0" means the job is expected to be executed immediately. NB! This is only an estimation, which can be overwritten by local policies.
    Free disk (MB)
    Disk space available for the user in a given queue (in Megabytes). NB! This is only an estimation, as most clusters do not provide fixed disk quotas.
    Job name
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Name of the cluster at which the job is being executed. Click on a cluster name to get detailed information about the cluster.
    Queue
    ".$clickable.". Name of the batch queue in which the job is/was executed. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "attlist" => array("0" => "Attribute values", "help" => "
    Object
    ".$clickable.". Name of the object which attributes are displayed. It can be a cluster name, a clusters queue name, a job name, a user name etc. Click on the string to get a detailed decscription of the object.
    Attribute
    For each object, one or more attribute values can be listed. Column title is the human-readable attribute name (except of some MDS-specific attributes), and the column contents are attribute values per object as entered in the Information System.
    ", "Object" => 0, "Attribute" => 0 ), "quelist" => array("0" => "Queue", "help" => "
    Attribute
    ".$clickable.". Name of a queue attribute".$str_att."
    Value
    ".$str_val."
    Job name
    ".$clickable.". ".$str_job."
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memory (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Memory (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias
    Storage element alias as specified in the Information System. Maximal displayed length is 15 characters.
    Tot. space
    Total disk space, GB.
    Free space
    Disk space available at the moment, GB.
    Name
    Storage element name, consisting of a logical name and host name (separated by a column, ":"). Logical name is used only for information system purposes, to distinguish between different storage elements hosted by the same machine.
    Base URL
    URL for the storage element, typically a gsiftp:// protocol. Use this URL as the base to access files.
    Type
    Storage element type. "gridftp-based" indicates a disk storage with GridFTP interface.
    ", "#" => 0, "Alias" => 0, // "Tot. space" => 0, "Free/total space, Gb" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    User's affiliation, derived from the personal certificate
    Jobs
    Count of all user jobs in the system (running, pending, finished or deleted)
    Sites
    Shows how many sites authorise this user
    ", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Queued jobs" => 0, "Free disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-acl" => "Authorised VOs", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Grid session lifetime (min)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-comment" => "Comment", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Total occupied CPUs", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Jobs, queued (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-maxwalltime" => "Walltime, max. (minutes)", "nordugrid-queue-minwalltime" => "Walltime, min. (minutes)", "nordugrid-queue-defaultwalltime" => "Walltime, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-homogeneity" => "Homogeneous queue", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcputime" => "Requested CPU time", "nordugrid-job-reqwalltime" => "Requested wall clock time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-comment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall clock time", "nordugrid-job-completiontime" => "Job completion time (GMT)", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-exitcode" => "Exit code", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-job-rerunable" => "Rerunnable", "nordugrid-job-reqcput" => "Requested time (OBSOLETE)", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-lrmscomment" => "LRMS comment (OBSOLETE)", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-acl" => "Authorised VOs", "nordugrid-se-freespace" => "Free space (MB)", "nordugrid-se-totalspace" => "Total space (MB)", "nordugrid-se-url" => "Contact URL", "nordugrid-se-baseurl" => "Contact URL (OBSOLETE)", "nordugrid-se-accesscontrol" => "Access control", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "8" => "No queue information found", "9" => "No entries found", "10" => "No users found", "11" => "Not authorised at host", "12" => "does not answer", "13" => "No recent jobs found for ", // debug messages "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "129" => "Can not get object data: error ", "130" => " Monitor timeouts for EMIR: ", "131" => " Monitor timeouts for ARCHERY depends on OS DNS resolver settings (In DNS cache we trust!)", "132" => "Failed to query the following ARCHERY endpoint: ", "133" => "Reached the recursive loop limit while querying ARCHERY endpoint: ", "134" => "Unsupported ARCHERY endpoint type %s for endpoint URL %s", // icon titles "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", // auxilliary strings "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL", "410" => "Cluster", "411" => "Queue", "412" => "Job", "413" => "User", "414" => "Storage", "415" => "Replica Cat.", "416" => "Define attributes to display for the object: ", "417" => "AND of all the expressions will be matched", "418" => "Leave the righmost field empty to show everything", "419" => "Display resources or objects of your choice", "420" => "Distinguished name", "421" => "Can use a total of ", "422" => " sites", "423" => "Resource / object:", "424" => "Nr.of attributes (def. 6):", "425" => "Object", "426" => "Next", "427" => "Select one", "428" => "Reset", "429" => "SHOW" ), // Post code conversion: only for [en]! "tlconvert" => array ( "AU" => "Australia", "AT" => "Austria", "AM" => "Armenia", "DZ" => "Algeria", "BE" => "Belgium", "BG" => "Bulgaria", "CA" => "Canada", "CN" => "China", "CZ" => "Czechia", "DK" => "Denmark", "EE" => "Estonia", "FI" => "Finland", "FIN" => "Finland", "SF" => "Finland", "FR" => "France", "GE" => "Georgia", "DE" => "Germany", "D" => "Germany", "GR" => "Greece", "HU" => "Hungary", "IS" => "Iceland", "IR" => "Ireland", "IE" => "Ireland", "IT" => "Italy", "JP" => "Japan", "KEK" => "Japan", "TOKYO" => "Japan", "LV" => "Latvia", "LT" => "Lithuania", "MA" => "Morocco", "NL" => "Netherlands", "NO" => "Norway", "N" => "Norway", "PL" => "Poland", "PT" => "Portugal", "RO" => "Romania", "RU" => "Russia", "LK" => "SriLanka", "SE" => "Sweden", "SK" => "Slovakia", "SI" => "Slovenia", "CH" => "Switzerland", "TR" => "Turkey", "UK" => "UK", "UA" => "Ukraine", "COM" => "USA", "GOV" => "USA", "USA" => "USA", "US" => "USA" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/us.inc0000644000000000000000000000012412050701227024253 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.698716 30 ctime=1513200663.747793323 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/us.inc0000644000175000002070000014277512050701227024340 0ustar00mockbuildmock00000000000000 [<2-letter code>] // -- Translation: // -- Author: oxana.smirnova@hep.lu.se // Some common strings: $clickable = "CLICKABLE"; $str_att = ", human-readable except of some MDS-specific attributes. Click on the attribute name to get the list of the attribute values across the ARC universe."; $str_val = "Attribute value as entered in the Information System."; $str_que = "Typically, different queues correspond to different allowed task duration, or to different groups of users. Click on a queue name to get detailed information about the queue, including running, queued, and finished tasks."; $str_job = "Name of a job as assigned by the job owner.
    N/A indicates that user did not assign any name.
    X indicates that the job has been killed by the owner
    ! indicates that the job failed in the system
    Click on a name to get a detailed description of the job."; $str_nam = "Name of the user, as specified in the personal certificate. Click on a name to get the list of all the resources available for this user and all the jobs by this user which are currently in the system."; $str_sta = "Job status as returned by the Grid Manager (GM) and LRMS. In sequential order, the states are:
    ACCEPTED – job submitted but not yet processed
    PREPARING – input files are being retreived
    SUBMITTING – interaction with LRMS ongoing
    INLRMS – the job is transferred to the LRMS; internal status is added by the infosystem. Possible states are:
    : Q – job is queued
    : U – job is in a suspended state on a busy node (PBSPro)
    : S – job is in a suspended state (Condor)
    : R, run – job is running
    : E – job is finishing (PBS)
    FINISHING – output files are being transferred by the GM
    FINISHED – job is finished; time stamp is added by the infosystem
    CANCELING – job is being cancelled
    DELETED – job not cleaned upon user request but removed by the GM due to expiration time
    Each of the states can be reported with the PENDING: prefix, meaning the GM is attempting to move the job to the next state"; $str_tim = "CPU time used by the job, minutes."; $str_mem = "Memory consumed by the job, KB"; $str_cpu = "Number of processors used by the job."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    This screen displays all the sites registering to the top ARC indexing service, sorted by country then by host name. Selected site parameters are monitored: cluster alias, total CPU capacity and number of running and queued jobs, both Grid and local ones. Use "Search" utility if you want to compare other cluster, queue, job etc. characteristics
    Country
    ".$clickable.". Country flag and name as deduced from available resource descriptions. Click to show only this country info.
    Cluster
    ".$clickable.". Cluster alias as assigned by the owner. Maximal displayed length is 22 characters. Click on the alias to get a detailed cluster description.
    CPUs
    Total number of CPUs in a cluster. NB! Only a fraction of those can be actualy available for the Grid users.
    Load (processes:Grid+local)
    ".$clickable.". Relative cluster load, corresponding to the occupied CPUs count. Grey bars indicate processors occupied by the localy submitted jobs, while red bars show CPUs occupied by jobs submitted via Grid. Click on the bar to get the detailed list of all the running Grid jobs on the cluster, including amount of processors per job.
    Queueing
    ".$clickable.". Number of all queued jobs on the cluster, shown as number of queueing grid jobs plus number of locally submitted queueing jobs. Click the first number to get the list of queued Grid jobs on the cluster.
    ", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Resource Details for", "help" => "
    Attribute
    ".$clickable.". Cluster attribute name".$str_att."
    Value
    ".$str_val."
    Queue
    ".$clickable.". Names of batch queues available for the ARC users, as set by cluster owners. ".$str_que."
    Status
    Queue status. Operating queue typically reports active status.
    CPU (min)
    Time limit for job duration per queue, if set, in CPU-minutes. First displayed value is the lower limit, second - the upper one. If limits are not set (jobs of any duration are accepted), N/A tag is shown.
    Running
    Number of jobs running in the queue. Total number of jobs is shown, with number of processors occupied by Grid-submitted jobs displayed in parentheses, e.g. (Grid: 12). NB! For parallel multiprocessor jobs, number in parentheses can be larger than number of jobs.
    Queing
    Number of jobs awaiting execution in the queue. Total number of jobs is shown, with Grid-submitted jobs displayed in parentheses, e.g. (Grid: 235)
    ", "Queue" => 0, "Mapping Queue" => 0, "Status" => 0, "Limits (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    JOB LIST:
    Job name
    ".$clickable.". Name of a job as assigned by the owner. If no name has been assigned, "N/A" is displayed. Click on a name to get a detailed description of the job.
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Queue
    ".$clickable.". Name of the batch queue in which the job is being executed. ".$str_que."
    CPUs
    ".$str_cpu."
    JOB DETAILS:
    Attribute
    ".$clickable.". Job attribute name".$str_att."
    Value
    ".$str_val."
    ", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtual Organisation
    ".$clickable.". Group of users, typically sharing common activities and resources, authorised at at least one ARC-enabled site. Click on the name to get the list of group members.
    Members
    Number of group members.
    Served by
    LDAP server that supports the group membership database.
    ", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    Users home institute as entered by a VO manager. Can be empty.
    E-mail
    ".$clickable.". Users e-mail as entered by a VO manager. Can be empty. Click the address to send an e-mail to the user.
    ", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "help" => "
    Cluster:queue
    ".$clickable.". Names of clusters and respective queues (separated by a column, ":") where a user is authorized to submit jobs. If a user is not authorized, message "Not authorised at host ..." is displayed. Click a cluster name to get a detailed cluster description. Click on a queue name to get a detailed queue description.
    Free CPUs
    Number of free CPUs available in a given queue for the user at this moment of time, optionally appended with the upper time limit value (in minutes). For example, "3" means 3 CPUs available for a job of unlimited running time; "4:360" indicates there are 4 CPUs available for jobs not longer than 6 hours; "10:180 30" means there are 10 CPUs available for jobs not exceeding 3 hours, plus 30 CPUs available for jobs of any length; "0" means there are no CPUs available at the moment, and the jobs will be placed in a waiting queue.
    Queued jobs
    Number of user's jobs expected to sit ahead of a new submitted job (for this user) in a waiting queue. Number of "0" means the job is expected to be executed immediately. NB! This is only an estimation, which can be overwritten by local policies.
    Free disk (MB)
    Disk space available for the user in a given queue (in Megabytes). NB! This is only an estimation, as most clusters do not provide fixed disk quotas.
    Job name
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Name of the cluster at which the job is being executed. Click on a cluster name to get detailed information about the cluster.
    Queue
    ".$clickable.". Name of the batch queue in which the job is/was executed. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "attlist" => array("0" => "Attribute values", "help" => "
    Object
    ".$clickable.". Name of the object which attributes are displayed. It can be a cluster name, a clusters queue name, a job name, a user name etc. Click on the string to get a detailed decscription of the object.
    Attribute
    For each object, one or more attribute values can be listed. Column title is the human-readable attribute name (except of some MDS-specific attributes), and the column contents are attribute values per object as entered in the Information System.
    ", "Object" => 0, "Attribute" => 0 ), "quelist" => array("0" => "Queue", "help" => "
    Attribute
    ".$clickable.". Name of a queue attribute".$str_att."
    Value
    ".$str_val."
    Job name
    ".$clickable.". ".$str_job."
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memory (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Memory (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias
    Storage element alias as specified in the Information System. Maximal displayed length is 15 characters.
    Tot. space
    Total disk space, GB.
    Free space
    Disk space available at the moment, GB.
    Name
    Storage element name, consisting of a logical name and host name (separated by a column, ":"). Logical name is used only for information system purposes, to distinguish between different storage elements hosted by the same machine.
    Base URL
    URL for the storage element, typically a gsiftp:// protocol. Use this URL as the base to access files.
    Type
    Storage element type. "gridftp-based" indicates a disk storage with GridFTP interface.
    ", "#" => 0, "Alias" => 0, "Free/tot. space, GB" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    User's affiliation, derived from the personal certificate
    Jobs
    Count of all user jobs in the system (running, pending, finished or deleted)
    Sites
    Shows how many sites authorise this user
    ", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Queued jobs" => 0, "Free disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-acl" => "Authorised VOs", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Grid session lifetime (min)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-comment" => "Comment", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Total occupied CPUs", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Jobs, queued (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-maxwalltime" => "Walltime, max. (minutes)", "nordugrid-queue-minwalltime" => "Walltime, min. (minutes)", "nordugrid-queue-defaultwalltime" => "Walltime, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-homogeneity" => "Homogeneous queue", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcputime" => "Requested CPU time", "nordugrid-job-reqwalltime" => "Requested wall clock time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-comment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall clock time", "nordugrid-job-completiontime" => "Completion time (GMT)", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-exitcode" => "Exit code", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-job-rerunable" => "Rerunnable", "nordugrid-job-reqcput" => "Requested time (OBSOLETE)", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-lrmscomment" => "LRMS comment (OBSOLETE)", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-acl" => "Authorised VOs", "nordugrid-se-freespace" => "Free space (MB)", "nordugrid-se-totalspace" => "Total space (MB)", "nordugrid-se-url" => "Contact URL", "nordugrid-se-baseurl" => "Contact URL (OBSOLETE)", "nordugrid-se-accesscontrol" => "Access control", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "8" => "No queue information found", "9" => "No entries found", "10" => "No users found", "11" => "Not authorised at host", "12" => "does not answer", "13" => "No recent jobs found for ", // debug messages "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "129" => "Can not get object data: error ", "130" => " Monitor timeouts for EMIR: ", // icon titles "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", // auxilliary strings "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL", "410" => "Cluster", "411" => "Queue", "412" => "Job", "413" => "User", "414" => "Storage", "415" => "Replica Cat.", "416" => "Define attributes to display for the object: ", "417" => "AND of all the expressions will be matched", "418" => "Leave the righmost field empty to show everything", "419" => "Display resources or objects of your choice", "420" => "Distinguished name", "421" => "Can use a total of ", "422" => " sites", "423" => "Resource / object:", "424" => "Nr.of attributes (def. 6):", "425" => "Object", "426" => "Next", "427" => "Select one", "428" => "Reset", "429" => "SHOW" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australia", "Austria" => "Austria", "Armenia" => "Armenia", "Algeria" => "Algeria", "Belgium" => "Belgium", "Bulgaria" => "Bulgaria", "Canada" => "Canada", "China" => "China", "Czechia" => "Czechia", "Denmark" => "Denmark", "Estonia" => "Estonia", "Finland" => "Finland", "France" => "France", "Georgia" => "Georgia", "Germany" => "Germany", "Greece" => "Greece", "Hungary" => "Hungary", "Iceland" => "Iceland", "Ireland" => "Ireland", "Ireland" => "Ireland", "Italy" => "Italy", "Japan" => "Japan", "Latvia" => "Latvia", "Lithuania" => "Lithuania", "Morocco" => "Morocco", "Netherlands" => "Netherlands", "Norway" => "Norway", "Poland" => "Poland", "Portugal" => "Portugal", "Romania" => "Romania", "Russia" => "Russia", "SriLanka" => "Sri Lanka", "Sweden" => "Sweden", "Slovakia" => "Slovakia", "Slovenia" => "Slovenia", "Switzerland" => "Switzerland", "Turkey" => "Turkey", "UK" => "UK", "Ukraine" => "Ukraine", "USA" => "USA" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/fr.inc0000644000000000000000000000012412050701227024233 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.696716 30 ctime=1513200663.742793262 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/fr.inc0000644000175000002070000014632012050701227024306 0ustar00mockbuildmock00000000000000 N/A indique que l'utilisateur n'a pas donné de nom.
    X indique que le job a été tué par le propriétaire.
    ! indique que le job a échoué dans le systÈme
    Cliquer sur un nom pour voir une description détaillée du job."; $str_nam = "Nom de l'utilisateur, tel que spécifié dans le certificat personnel. Cliquer sur un nom pour voir la list de toutes les ressource disponibles pour cet utilisateur et tous les jobs soumis par cet utilisateur qui sont actuellement dans le système."; $str_sta = "Statut du job tel que fourni par le Grid Manager (GM) et LRMS. Dans l'ordre, les états sont :
    ACCEPTED – job soumis mais non encore pris en charge
    PREPARING – les fichiers d'entrée sont en train d'être récupérés
    SUBMITTING – interaction avec LRMS en cours
    INLRMS – le job est transféré au LRMS; un statut interne est ajouté par l'infosystem. Les états possible sont :
    : Q – le job est en attente
    : U – le job est suspendu dans un node occupé (PBSPro)
    : S – le job est suspendu (Condor)
    : R, run – le job est en cours
    : E – le job se termine (PBS)
    FINISHING – les fichiers de sortie sont en train d'être transférés par le GM
    FINISHED – le job est terminé; un indicateur temporel est ajouté par l'infosystem
    CANCELING – le job est en train d'être annulé
    DELETED – le job n'a pas été supprimé par l'utilisateur mais par le GM à cause de la date d'expiration
    Chaque état peut être donné avec le préfixe PENDING:, ce qui signifie que le GM essaie de déplacer le job vers l'état suivant"; $str_tim = "Temps CPU utilisé par le job, en minutes."; $str_mem = "Mémoire consomée par le job, en ko"; $str_cpu = "Nombre de processeurs utilisés par le job."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    Cet écran montre tous les sites enrégistrés dans l'indexing service d'ARC, triés par pays puis par nom de site. Une selection de paramètres de site sont affichés : alias du cluster, capacité CPU totale et nombre de jobs courants et en attente, aussi bien du Grid que locaux. Utiliser "Search" pour comparer d'autres caractéristiques de cluster, file, job etc...
    Pays
    ".$clickable.". Drapeau et nom du pays, extrait des descriptions de ressource disponibles. Cliquer pour montrer les information concernant ce pays uniquement.
    Cluster
    ".$clickable.". Alias du cluster assigné par le propriétaire. La longueur maximale affichée est 22 caractÈres. Cliquer sur l'alias pour voir une description détaillée du cluster.
    CPU
    nombre total de CPU dans un cluster. NB! Seule une fraction de ceux-ci est effectivement accessible aux utilisateurs du Grid.
    Charge (processus:Grid+local)
    ".$clickable.". Charge relative du cluster, correspondant au nombre de CPU occupés. Les barres grises indiquent les processeurs occupés par les jobs soumis localement, les barres rouges montrent les CPU occupés par des jobs soumis à travers le Grid. Cliquer sur la barre pour voir la liste détaillée de tous les jobs d'origine Grid dans le cluster, y compris le nombre de processus par job.
    Files d'attente
    ".$clickable.". Nombre total de jobs en attente dans le cluster, montré comme le nombre de jobs du grid en attente plus le nombre de jobs en attente soumis localement. Cliquer sur le premier nombre pour voir la liste des jobs du Grid en attente dans le cluster.
    ", "Pays" => 30, "Cluster" => 160, "CPU" => 10, "Charge (processus: Grid+local)" => 210, "File d'attente" => 10 ), "clusdes" => array("0" => "Details des ressources pour", "help" => "
    Attribut
    ".$clickable.". Nom de l'attribut de cluster".$str_att."
    Valeur
    ".$str_val."
    File d'attente
    ".$clickable.". Noms des files batch disponibles pour les utilisateurs d'ARC, donné par le propriétaire du cluster. ".$str_que."
    Statut
    Statut des files. Une file active indiquera typiquement le statut : active.
    CPU (min)
    Limite de durée pour un job dans une file, si elle existe, en minutes. La première valeur affichée est la limite basse, la seconde la limite haute. Si les limites ne sont pas données (jobs de durée quelconque acceptés), le symbole N/A est affiché.
    En cours
    Nombre de jobs en cours dans la file. Le nombre total de jobs est indiqué, avec le nombre de processeurs occupś par des jobs du Grid entre parenthèses, par ex. (Grid:12). NB! Pour les jobs multiprocesseurs en parallèle, le nombre entre parenthèse peut être plus élevé que le nombre de jobs.
    En attente
    Nombre de jobs en attente d'execution dans la file. Le nombre total de jobs est affiché, avec les jobs du Grid entre parenthèse, par ex. (Grid: 235)
    ", "File d'attente" => 0, "Mapping Queue" => 0, "Statut" => 0, "Limites (min)" => 0, "CPU" => 0, "En cours" => 0, "En attente" => 0 ), "jobstat" => array("0" => "Jobs à:Job ID", "help" => "
    LIST DES JOBS:
    Nom du job
    ".$clickable.". Nom d'un job, assigné par le propriétaire. Si aucun nom n'a été assigné, "N/A" est affiché. Cliquer sur un nom pour voir une description détaillée du job.
    Propriétaire
    ".$clickable.". ".$str_nam."
    Statut
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    File d'attente
    ".$clickable.". Nom de la file batch dans laquelle le job est exécuté. ".$str_que."
    CPU
    ".$str_cpu."
    DETAILS DU JOB:
    Attribut
    ".$clickable.". Nom de l'attribut du job".$str_att."
    Valeur
    ".$str_val."
    ", "Nom du job" => 0, "propriétaire" => 0, "Statut" => 0, "CPU (min)" => 0, "Filer d'attente" => 0, "CPU" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtual Organisation
    ".$clickable.". Groupe d'utilisateurs, généralement partageant la même activités et les mêmes ressources, autorisé à au moins un site ARC. Cliquer sur le nom pour voir la liste des membres du groupe.
    Membres
    Nombre de membres du groupe.
    Desservi par
    serveur LDAP qui supporte la base de données des membres du groupe.
    ", "Virtual Organisation" => 0, "Membres" => 0, "Desservi par" => 0 ), "vousers" => array("0" => "Utilisateurs du Grid", "help" => "
    Nom
    ".$clickable.". ".$str_nam."
    Affiliation
    Institut d'origine de l'utilisateur, entré par le VO manager. Peut être vide.
    Adresse électronique
    ".$clickable.". Adresse électronique de l'utilisateur, entré par le VO manager. Peut être vide. Cliquer sur l'adresse pour envoyer un courriel à l'utilisateur.
    ", "#" => 0, "Nom" => 0, "Affiliation" => 0, "Adresse èlectronique" => 0 ), "userlist" => array("0" => "Information pour", "help" => "
    Cluster:file
    ".$clickable.". Noms des clusters et file respective (separés par deux points, ":") où un utilisateur est autorisé à soumettre des jobs. Si un utilisateur n'est pas autorisé, le message "Not authorised at host ..." est affiché. Cliquer sur un nom de cluster pour voir une description détaillée du cluster. Cliquer sur un nom de file pour voir une description détaillée de la file.
    CPU libres
    Nombre de CPU libres disponibles dans une file données pour l'utilisateur à cet instant, eventuellement associé avec la durée maximum (en minutes) Par exemple, "3" signifie 3 CPU disponible pour un job de durée illimitée; "4:360" indique qu'il y a 4 CPU disponibles pour des jobs de moins de 6 heures; "10:180 30" signifie qu'il y a 10 CPU disponibles pour des jobs n'excédant pas 3 heures, plus 30 CPU disponibles pour des jobs de n'omporte quelle durée; "0" signifie qu'il n'y a pas de CPU disponible à cet instant, et les jobs seront placés dans une file d'attente.
    Jobs en attente
    Nombre de jobs d'utilisateur qui seront avant un nouveau job (pour cet utilisateur) dans une file d'attente. Un nombre de "0" signifie que le job devrait être exécuté immédiatement. NB! Ceci n'est qu'une estimation, qui peut être outrepassée par des politiques locales.
    Disque libre (Mo)
    Espace disque disponible pour l'utilisateur dans une file donnée (en Mégaoctets). NB! Ceci n'est qu'une estimation, étant donné que la plupart des clusters ne fournissent pas de quotas fixes.
    Nom du job
    ".$clickable.". ".$str_job."
    Statut
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Nom du cluster où le job est exécuté. Cliquer sur un nom de cluster pour voir des information détaillées sur le cluster.
    Queue
    ".$clickable.". Nom de la file batch dans laquelle le job est/était exécuté. ".$str_que."
    CPU
    ".$str_cpu."
    ", "" => 0, "Nom du job" => 0, "Statut" => 0, "CPU (min)" => 0, "Cluster" => 0, "File" => 0, "CPU" => 0 ), "attlist" => array("0" => "Valeur des attributs", "help" => "
    Objet
    ".$clickable.". Nom de l'objet dont les attributs sont affichés Ce peut être le nom d'un cluster, d'une file de clusters, d'un job, d'un utilisateur etc... Cliquer sur le texte pour voir une description détaillée de l'objet.
    Attribut
    Pour chaque objet, un ou plusieurs attributs peuvent Être listés Le titre de la colonne est une version "human-readable" du nom de l'attribut (sauf pour certain attributs MDS-specifiques), et le contenu de la colonne est la valeur de l'attribut par objet, telle qu'elle est entrée dans l'Information System.
    ", "Objet" => 0, "Attribut" => 0 ), "quelist" => array("0" => "File", "help" => "
    Attribut
    ".$clickable.". Nom d'un attribut de file".$str_att."
    Valeur
    ".$str_val."
    Nom du job
    ".$clickable.". ".$str_job."
    Propriétaire
    ".$clickable.". ".$str_nam."
    Statut
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memoire (KB)
    ".$str_mem."
    CPU
    ".$str_cpu."
    ", "" => 0, "Nom du job" => 0, "Propriétaire" => 0, "Statut" => 0, "CPU (min)" => 0, "Memoire (ko)" => 0, "CPU" => 0 ), "sestat" => array("0" => "Elements de Stockage", "help" => "
    Alias
    Alias de l'Element de Stockage specifié dans l'Information System. Longueur maximal affichée de 15 caractères.
    Espace total
    Espace disque total, Go.
    Espace libre
    Espace disque disponible sur le moment, Go.
    Nom
    Nom de l'Element de Stockage, composé d'un nom logique et d'un nom d'hôte (séparés par deux points, ":"). Le nom logique est utilisé uniquement pour le système d'information, pour distinguer différents élément de stockages accueillis par la même machine-hôte.
    URL de base
    URL pour l'élément de stockage, généralement un protocole gsiftp:// Utiliser cet URL comme base pour accéder aux fichiers.
    Type
    Type d'élément de stockage. "gridftp-based" indique un stockage disque avec une interface GridFTP.
    ", "#" => 0, "Alias" => 0, "Espace libre/total, GB" => 0, "Nom" => 0, "URL de base" => 0, "Type" => 0 ), "allusers" => array("0" => "Utilisateurs Grid autorisés:Utilisateurs Grid actifs", "help" => "
    Nom
    ".$clickable.". ".$str_nam."
    Affiliation
    Affiliation de l'utilisateur, dérivé du certificat personnel
    Jobs
    Compteur de tous les jobs d'utilisateur dans le système (en cours, en attente, terminés ou supprimés)
    Sites
    Affiche le nombre de sites qui admettent cet utilisateur
    ", "#" => 0, "Nom" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:file" => 0, "CPU libres" => 0, "Jobs en attente" => 0, "Espace disque libre (Mo)" => 0 ), "ldapdump" => array("0" => "", "Attribut" => 0, "Valeur" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valide depuis (GMT)", "Mds-validto" => "Info valide jusqu'à (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Nom du domaine Front-end", "nordugrid-cluster-aliasname" => "Alias du cluster", "nordugrid-cluster-contactstring" => "Fil de contact", "nordugrid-cluster-interactive-contactstring" => "Contact interactif", "nordugrid-cluster-comment" => "Commentaire", "nordugrid-cluster-support" => "Contact courriel", "nordugrid-cluster-acl" => "VO authorisés", "nordugrid-cluster-lrms-type" => "type de LRMS", "nordugrid-cluster-lrms-version" => "Version de LRMS", "nordugrid-cluster-lrms-config" => "détails de LRMS", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Système d'exploitation", "nordugrid-cluster-homogeneity" => "Cluster homogène", "nordugrid-cluster-nodecpu" => "type de CPU (le plus lent)", "nordugrid-cluster-nodememory" => "Memoire (Mo, la plus petite)", "nordugrid-cluster-totalcpus" => "CPU, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Espace disque, disponible (Mo)", "nordugrid-cluster-sessiondir-total" => "Espace disque, total (Mo)", "nordugrid-cluster-sessiondir-lifetime"=> "Durée de vie d'une session Grid (min)", "nordugrid-cluster-cache-free" => "Taille du cache, disponible (Mo)", "nordugrid-cluster-cache-total" => "Taille du cache, total (Mo)", "nordugrid-cluster-runtimeenvironment" => "Moteur d'exécution (runtime environment)", "nordugrid-cluster-localse" => "Element de stockage, local", "nordugrid-cluster-middleware" => "Intergiciel (middleware) du Grid", "nordugrid-cluster-totaljobs" => "Jobs, quantité totale", "nordugrid-cluster-usedcpus" => "CPU, occupés", "nordugrid-cluster-queuedjobs" => "Jobs, en attente", "nordugrid-cluster-prelrmsqueued" => "Jobs Grid, en attente d'être soumis", "nordugrid-cluster-location" => "Code postal", "nordugrid-cluster-owner" => "Propriétaire", "nordugrid-cluster-issuerca" => "Fournisseur du certificat", "nordugrid-cluster-issuerca-hash" => "Hachage du fournisseur du certificat", "nordugrid-cluster-trustedca" => "Fournisseurs de certificat fiables", "nordugrid-cluster-nodeaccess" => "IP-connectivité du node", "nordugrid-cluster-gridarea" => "zone de la session (OBSOLETE)", "nordugrid-cluster-gridspace" => "Espace disque Grid (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, en cours (OBSOLETE)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Nom de file", "nordugrid-queue-comment" => "Commentaire", "nordugrid-queue-status" => "Statut de file", "nordugrid-queue-running" => "CPU, occupés", "nordugrid-queue-localqueued" => "Jobs locaux, en attente", "nordugrid-queue-prelrmsqueued" => "Jobs Grid, en attente d'être soumis", "nordugrid-queue-queued" => "Jobs, en attente (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, en cours (max)", "nordugrid-queue-maxqueuable" => "Jobs, pouvant être mis en attente (max)", "nordugrid-queue-maxuserrun" => "Jobs par utilisateur Unix (max)", "nordugrid-queue-maxcputime" => "Temps CPU, max. (minutes)", "nordugrid-queue-mincputime" => "Temps CPU, min. (minutes)", "nordugrid-queue-defaultcputime" => "Temps CPU, default (minutes)", "nordugrid-queue-maxwalltime" => "Temps d'horloge, max. (minutes)", "nordugrid-queue-minwalltime" => "Temps d'horloge, min. (minutes)", "nordugrid-queue-defaultwalltime" => "Temps d'horloge, defaut (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPU, total", "nordugrid-queue-nodecpu" => "type de CPU", "nordugrid-queue-nodememory" => "Memoire (Mo)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "System d'exploitation", "nordugrid-queue-homogeneity" => "File homogène", "nordugrid-queue-gridrunning" => "CPU, occupés par jobs Grid", "nordugrid-queue-gridqueued" => "Jobs Grid, en attente", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPU par file (OBSOLETE)", "nordugrid-queue-assignedcputype" => "Type de CPU (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Propriétaire", "nordugrid-job-execcluster" => "Cluster d'execution", "nordugrid-job-execqueue" => "File d'execution", "nordugrid-job-stdout" => "Fichier de sortie standard", "nordugrid-job-stderr" => "Fichier d'erreur standard", "nordugrid-job-stdin" => "Fichier d'entrée standard", "nordugrid-job-reqcputime" => "Temps CPU requis", "nordugrid-job-reqwalltime" => "Temps d'horloge requis", "nordugrid-job-status" => "Statut", "nordugrid-job-queuerank" => "Position dans la file", "nordugrid-job-comment" => "Commentaire LRMS", "nordugrid-job-submissionui" => "Machine de soumission", "nordugrid-job-submissiontime" => "Date de soumission (GMT)", "nordugrid-job-usedcputime" => "Temps CPU utilisé", "nordugrid-job-usedwalltime" => "Temps d'horloge utilisé", "nordugrid-job-completiontime" => "Date de termination (GMT)", "nordugrid-job-sessiondirerasetime" => "Date de suppression (GMT)", "nordugrid-job-proxyexpirationtime" => "Date d'expiration du proxy (GMT)", "nordugrid-job-usedmem" => "Memoire utilisée (Ko)", "nordugrid-job-errors" => "Erreurs", "nordugrid-job-exitcode" => "Code de sortie", "nordugrid-job-jobname" => "Nom", "nordugrid-job-runtimeenvironment" => "Moteur d'exécution (runtime environment)", "nordugrid-job-cpucount" => "CPU requis", "nordugrid-job-executionnodes" => "Nodes d'exécution", "nordugrid-job-gmlog" => "Fichier de journal du GM", "nordugrid-job-clientsoftware" => "Version du client", "nordugrid-job-rerunable" => "Réexecutable", "nordugrid-job-reqcput" => "Temps requis (OBSOLETE)", "nordugrid-job-gridlog" => "Fichier Gridlog (OBSOLETE)", "nordugrid-job-lrmscomment" => "commentaire LRMS (OBSOLETE)", "nordugrid-authuser-name" => "Nom", "nordugrid-authuser-sn" => "Nom du sujet", "nordugrid-authuser-freecpus" => "CPU libres", "nordugrid-authuser-diskspace" => "Espace disque libre (Mo)", "nordugrid-authuser-queuelength" => "Jobs en attente de l'utilisateur", "nordugrid-se-name" => "Nom", "nordugrid-se-aliasname" => "Alias de l'élément de stockage", "nordugrid-se-type" => "Type d'élément de stockage", "nordugrid-se-acl" => "VO autorisés", "nordugrid-se-freespace" => "Espace libre (Mo)", "nordugrid-se-totalspace" => "Espace total (Mo)", "nordugrid-se-url" => "URL de contact", "nordugrid-se-baseurl" => "URL de contact (OBSOLETE)", "nordugrid-se-accesscontrol" => "Contrôle d'accès", "nordugrid-se-authuser" => "Utilisateur autorisé (DN)", "nordugrid-se-location" => "Code postal", "nordugrid-se-owner" => "Propriétaire", "nordugrid-se-middleware" => "Intergiciel (middleware)", "nordugrid-se-issuerca" => "Fournisseur de certificate", "nordugrid-se-issuerca-hash" => "Hachage du fournisseur de certificat", "nordugrid-se-trustedca" => "Fournisseurs de certificat fiables", "nordugrid-se-comment" => "Commentaire", "nordugrid-rc-name" => "Nom de domaine", "nordugrid-rc-aliasname" => "Alias du duplicata du Catalogue", "nordugrid-rc-baseurl" => "URL de contact", "nordugrid-rc-authuser" => "Utilisateur autorisé (DN)", "nordugrid-rc-location" => "Code postal", "nordugrid-rc-owner" => "Propriétaire", "nordugrid-rc-issuerca" => "Fournisseur de certificat" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Impossible de lire les index de ressource", "2" => "Aucun des index locaux ne retourne de connexion", "3" => " mauvaise configuration ou la requête a expiré", "4" => "Aucun job Grid trouvé", "5" => "Aucune information trouvée", "6" => "Serveur indisponible", "7" => " - rafraîchir plus tard", "8" => "Aucune information de liste trouvée", "9" => "Aucune entrée trouvée", "10" => "Aucun utilisateur trouvé", "11" => "Non autorisé chez l'hôte", "12" => "Ne répond pas", "13" => "Aucun job récent trouvé pour ", // debug messages "101" => " Monitor timeouts pour GRIS: ", "102" => " sec pendant la connection et ", "103" => " sec pendant la recherche", "104" => " sec en recherche", "105" => "N'affiche les ressources qu'en ", "106" => "Polled top-level indices: ", "107" => "Situations géographique obtenues, sites scannés: ", "108" => " sites rangés par situation géographique", "109" => "Recherche d'attributs du cluster", "110" => "Recherche d'attributs de la file", "111" => "Aucune donnée de ", "112" => " is up in ", "113" => " n'a aucune ressource à proposer", "114" => " Monitor timeouts for GIIS: ", "115" => "Saute GRIS: ", "116" => "pas un ", "117" => "Vérifie la connexion: ", "118" => "OK", "119" => "Jusqu'ici, a détecté des ressource de genre ", "120" => "Erreur LDAP en cherchant ", "121" => " statut à ", "122" => "Sur liste noire: ", "123" => "Registrant found for ", "124" => "Recherche d'attributs de SE", "125" => "Recherche d'utilisateurs", "126" => "Recherche de jobs", "127" => " a un job ", "128" => " alors que ce n'est pas autorisé", "129" => "Impossible d'obtenir les données d'object: erreur ", "130" => " Monitor timeouts for EMIR: ", // icon titles "301" => "Rafraîchir", "302" => "Imprimer", "303" => "Aide", "304" => "Fermer", "305" => "Rouge", "306" => "Gris", "307" => "Tous utilisateurs", "308" => "Utilisateurs actifs", "309" => "Rechercher", "310" => "Stockage", "311" => "VO", "312" => "Drapeau de ", "313" => " processus Grid et ", "314" => " processus locaux", // auxilliary strings "401" => "Processus", "402" => "Grid", "403" => "Local", "404" => "Monde", "405" => "TOTAL", "406" => " sites", "407" => "beaucoup de", "408" => " Go", "409" => " ALL", "410" => "Cluster", "411" => "File", "412" => "Job", "413" => "Utilisateur", "414" => "Stockage", "415" => "Duplicata Cat.", "416" => "Définir les attributs à afficher pour l'objet: ", "417" => "Le produit logique (ET) de toutes les expressions va être testé", "418" => "Laisser le champ de droite vide pour tout afficher", "419" => "Afficher les ressources ou objets de votre choix", "420" => "Nom Distinct", "421" => "Peut utiliser un total de ", "422" => " sites", "423" => "Ressource / objet:", "424" => "Nr. des attributs (def. 6):", "425" => "Objet", "426" => "Suivant", "427" => "Choisir un", "428" => "Reinitialiser", "429" => "AFFICHER" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australie", "Austria" => "Autriche", "Armenia" => "Armenie", "Algeria" => "Algerie", "Belgium" => "Belgique", "Bulgaria" => "Bulgarie", "Canada" => "Canada", "China" => "Chine", "Czechia" => "République Tchèque", "Denmark" => "Danemark", "Estonia" => "Estonie", "Finland" => "Finlande", "France" => "France", "Georgia" => "Georgie", "Germany" => "Allemagne", "Greece" => "Grèce", "Hungary" => "Hongrie", "Iceland" => "Islande", "Ireland" => "Irlande", "Italy" => "Italie", "Japan" => "Japon", "Latvia" => "Lettonie", "Lithuania" => "Lithuanie", "Morocco" => "Maroc", "Netherlands" => "Pays-Bas", "Norway" => "Norvège", "Poland" => "Pologne", "Portugal" => "Portugal", "Romania" => "Roumanie", "Russia" => "Russie", "SriLanka" => "Sri Lanka", "Sweden" => "Suède", "Slovakia" => "Slovaquie", "Slovenia" => "Slovenie", "Switzerland" => "Suisse", "Turkey" => "Turquie", "UK" => "Grande-Bretagne", "Ukraine" => "Ukraine", "USA" => "USA" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/sv.inc0000644000000000000000000000012312050701227024253 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.703716 29 ctime=1513200663.74679331 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/sv.inc0000644000175000002070000014266212050701227024334 0ustar00mockbuildmock00000000000000 N/A betyder att ägaren inte tilldelat ett jobbnamn
    X betyder att jobbet dödats av ägaren
    ! betyder att jobbet inte fullbordades i systemet
    Klicka på ett namn för att få en detaljerad beskrivning av jobbet."; $str_nam = "Användarens namn såsom specificerat i det personliga cerifikatet. Klicka på ett namn för att få en lista över alla resurser som är tillgängliga för denna användare och denna användares alla jobb som för närvarande finns i systemet."; $str_sta = "Jobbstatus returnerad av gridmanagern (GM) och LRMS. Tillstånden är i tidsordning:
    ACCEPTED – jobbet har skickats in men är ännu ej behandlat
    PREPARING – indatafiler hämtas
    SUBMITTING – växelverkan med LRMS pågår
    INLRMS – jobbet har överförts till LRMS; intern status läggs till av informationsstystemet. Möjliga tillstånd är:
    : Q – jobbet är köat
    : U – jobbet är i ett uppskjutet tillstånd på en upptagen nod (PBSPro)
    : S – jobbet är i ett uppskjutet tillstånd (Condor)
    : R, run – jobbet exekveras
    : E – jobbet avslutas (PBS)
    FINISHING – utdatafiler överförs av GM
    FINISHED – jobbet är avslutat; tidsstämpel läggs till av informationssystemet
    CANCELING – jobbet håller på att avbrytas
    DELETED – jobbet har inte tagits bort på begäran av användaren utan av GM p.g.a. att maximala lagringstiden har passerat
    Alla dessa tillstånd kan rapporteras med prefixet PENDING:, vilket betyder att GM försöker a flytta jobbet till nästa tillstånd"; $str_tim = "CPU-tid som jobbet använt, minuter."; $str_mem = "Minne som jobbet använt, KB."; $str_cpu = "Antal processorer som jobbet använt."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Gridmonitor", "help" => "
    Denna sida visar alla kluster som registrerar sig till ARCs indexservice, sorterade efter land och därefter värdnamn. Utvalda klusterparametrar monitoreras: klusteralias, total CPU-kapacitet och antal jobb som exekveras och köar på klustret, såväl gridjobb som lokala jobb. Använd sökfuntionen om du vill jämföra annan kluster-, kö- och jobbinformation.
    Land
    ".$clickable.". Landslagga och landsnamn härledda från tillgängliga resursbeskrivningar. Klicka för att visa endast detta lands information.
    Kluster
    ".$clickable.". Klusteralias tilldelat av ägaren. Maximal visad längd är 22 tecken. Klicka på detta alias för att få en detaljerad klusterbeskrivning.
    CPU:er
    Totalt antal CPU:er i ett kluster. OBS! Endast en del av dessa kan vara tillgängliga för gridanvändare.
    Belastning (processer: grid + lokala)
    ".$clickable.". Relativ klusterbelastning, motsvarande antalet upptagna CPU:er. Grå fält markerar processorer upptagna av de lokalt inskickade jobben, medan röda fält visar CPU:er upptagna av jobb som skickats in via grid. Klicka på fältet för att få en detaljerad lista av alla gridjobb som exekveras på klustret, inklusive antalet processorer per job.
    Köande
    ".$clickable.". Totalt antal jobb som köar på klustret, visat som antalet köande gridjobb plus antalet lokalt inskickade köande jobb. Klicka på den första siffran för att få en lista av köande gridjob på klustret.
    ", "Land" => 30, "Kluster" => 160, "CPU:er" => 10, "Belastning (processer: grid + lokala)" => 210, "Köande" => 10 ), "clusdes" => array("0" => "Resursinformation för", "help" => "
    Attribut
    ".$clickable.". Klusterattributnamn".$str_att."
    Värde
    ".$str_val."
    ".$clickable.". Namn på batchköer tillgängliga för ARCanvändarna uppsatta av klusterägarna. ".$str_que."
    Status
    Köstatus. Fungerande köer visar normalt status active.
    Tidsgränser (min)
    Tidsgräns för jobblängd per kö, om definierad, i CPU-minuter. Det första visade värdet är den nedre gränsen, det andra den övre. Om inga gränser är definierade (jobb med alla längder är tillåtna), visas N/A
    Exekveras
    Antal jobb som exekveras i kön. Det totala antalet jobb visas, med antalet processorer upptagna av gridjobb i parentes, t.ex. (Grid: 12). OBS! För parallella multiprocessorjobb kan numret i parentes vara större än antalet jobb.
    Köar
    Antal jobb som väntar på att exekveras i kön. Det totala antalet jobb visas, med gridjobb visade i parentes, t.ex. (Grid: 235)
    ", "Kö" => 0, "Mapping Queue" => 0, "Status" => 0, "Tidsgränser (min)" => 0, "CPU:er" => 0, "Exekveras" => 0, "Köar" => 0 ), "jobstat" => array("0" => "Jobb på:Jobb-ID", "help" => "
    JOBBLISTA:
    Jobbnamn
    ".$clickable.". ".$str_job."
    Ägare
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    ".$clickable.". Namn på batchkön i vilken jobbet exekveras. ".$str_que."
    CPU:er
    ".$str_cpu."
    JOBBINFORMATION:
    Attribut
    ".$clickable.". Jobbattributnamn".$str_att."
    Värde
    ".$str_val."
    ", "Jobbnamn" => 0, "Ägare" => 0, "Status" => 0, "CPU (min)" => 0, "Kö" => 0, "CPU:er" => 0 ), "volist" => array("0" => "Virtuella organisationer", "help" => "
    Virtuell organisation
    ".$clickable.". Användargrupp, delar oftast gemensamma activiteter och resurser, autoriserad på åtminstone ett ARC-kluster. Klicka på namnet för att få en lista med gruppmedlemmar.
    Medlemmar
    Antal gruppmedlemmar.
    Server
    LDAP-server som huserar gruppmedlemsskapsdatabasen.
    ", "Virtuell organisation" => 0, "Medlemmar" => 0, "Server" => 0 ), "vousers" => array("0" => "Gridanvändarbas", "help" => "
    Namn
    ".$clickable.". ".$str_nam."
    Anknytning
    Användarens heminstitut inmatat av VO-managern. Kan vara tomt.
    E-post
    ".$clickable.". Användarens e-post inmatad av en VO-manager. Kan vara tomt. Klicka på adressen för att sända ett e-brev till användaren.
    ", "#" => 0, "Namn" => 0, "Anknytning" => 0, "E-post" => 0 ), "userlist" => array("0" => "Information för", "help" => "
    Kluster:kö
    ".$clickable.". Namn på kluster och dess respektive köer (separade med ett kolon, ":") där en användare är autoriserad att skicka in jobb. Om en avändare inte är autoriserad visas meddelendet "Not authorised at host ...". Klicka på ett klusternamn för att få en detaljerad klusterbeskrivning. Klicka på ett könamn föt att få en detaljerad köbeskrivning.
    Fria CPU:er
    Antal fria CPU:er tillgängliga i en given kö för denna användare vid detta tillfälle, ibland med en övre tidsgräns (i minuter) bifogad. T.ex. "3" betyder 3 CPU:er tillgängliga för ett jobb med obegränsad exekveringstid; "4:360" indikerar att det finns 4 CPU:er tillgängliga för jobb som inte är längre än 6 timmar; "10:180 30" betyder att det finns 10 CPU:er tillgängliga för jobb som inte övergår 3 timmar, plus 30 CPU:er tillgängliga för jobb av valfri längd; "0" betyder att det inte finns några CPU:er tillgängliga för tillfället, och att jobben kommer att placeras i kö.
    Köade jobb
    Antal användarens jobb som förväntas stå före ett nytt inskickat jobb (för denna användare) i en kö. "0" betyder att jobbet förväntas exekveras omedelbart. OBS! Detta är endast en uppskattning, som kan åsidosättas av lokala regler.
    Fri disk (MB)
    Diskutrymme gillgängligt för användaren i en given kö (i megabyte). OBS! Detta är endast en uppskattning, då de flesta kluster inte erbjuder fasta diskkvoter.
    Jobbnamn
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Kluster
    ".$clickable.". Namn på det kluster på vilket jobbet exekvera(de)s. Klicka på ett klusternamn för att få detaljerad information om klustret.
    ".$clickable.". Name på den batchkö i vilken jobbet exekvera(de)s. ".$str_que."
    CPU:er
    ".$str_cpu."
    ", "" => 0, "Jobbnamn" => 0, "Status" => 0, "CPU (min)" => 0, "Kluster" => 0, "Kö" => 0, "CPU:er" => 0 ), "attlist" => array("0" => "Attributvärden", "help" => "
    Objekt
    ".$clickable.". Namn på det objekt vars attribut visas. Det kan vara ett klusternamn, ett klusters könamn, ett jobbnamn, ett användarnamn etc. Klicka på namnet för att få en detaljerad beskrivning av objektet.
    Attribut
    För varje objekt, ett eller flera attributvärden kan listas. Kolumntiteln är det human-readable attributnamnet (förutom för några MDS-specifika attribut), och Kolumnens innehåll är attributvärden per objekt inmatade i informationssystemet.
    ", "Objekt" => 0, "Attribut" => 0 ), "quelist" => array("0" => "Kö", "help" => "
    Attribut
    ".$clickable.". Köattributnamn".$str_att."
    Värde
    ".$str_val."
    Jobbnamn
    ".$clickable.". ".$str_job."
    Ägare
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Minne (KB)
    ".$str_mem."
    CPU:er
    ".$str_cpu."
    ", "" => 0, "Jobbnamn" => 0, "Ägare" => 0, "Status" => 0, "CPU (min)" => 0, "Minne (KB)" => 0, "CPU:er" => 0 ), "sestat" => array("0" => "Lagringselement", "help" => "
    Alias
    Lagringselementalias specificerat i informationssytemet. Maximal visad längd är 15 tecken.
    Totalt utrymme
    Totalt diskutrymme, GB.
    Fritt utrymme
    Diskutrymme tillgängligt för tillfället, GB.
    Namn
    Lagringselementnamn, bestående av ett logiskt namn och värdnamn (separerade av ett kolon, ":"). Det logiska namnet används endast för informationssystemsyften, för att särskilja olika lagringselement som huserar på samma maskin.
    Bas-URL
    Lagringselementats URL, oftast ett gsiftp:// protocol. Använd denna URL som bas för att komma åt filer.
    Typ
    Lagringselementtyp. "gridftp-based" indikerar disklagring med gridftp-gränssnitt.
    ", "#" => 0, "Alias" => 0, // "Totalt utrymme" => 0, "Fritt/totalt utrymme, GB" => 0, "Namn" => 0, "Bas-URL" => 0, "Typ" => 0 ), "allusers" => array("0" => "Autoriserade gridanvändare:Aktiva gridanvändare", "help" => "
    Namn
    ".$clickable.". ".$str_nam."
    Anknytning
    Användarens anknytning, härledd från det personliga certifikatet
    Jobb
    Totalt antal jobb från denna användarens i systemet (exekveras, avvaktande, avslutade eller borttagna)
    Kluster
    Visar hur många kluster som autoriserar denna användare
    ", "#" => 0, "Namn" => 0, "Anknytning" => 0, "Jobb" => 0, "Kluster" => 0 ), "userres" => array("0" => "", "Kluster:kö" => 0, "Fria CPU:er" => 0, "Köade jobb" => 0, "Fri disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribut" => 0, "Värde" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info giltig fr.o.m. (GMT)", "Mds-validto" => "Info giltig t.o.m. (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domännamn", "nordugrid-cluster-aliasname" => "Klusteralias", "nordugrid-cluster-contactstring" => "Kontaktsträng", "nordugrid-cluster-interactive-contactstring" => "Interaktiv kontakt", "nordugrid-cluster-comment" => "Kommentar", "nordugrid-cluster-support" => "E-postkontakt", "nordugrid-cluster-acl" => "Auktoriserade VO:er", "nordugrid-cluster-lrms-type" => "LRMS-typ", "nordugrid-cluster-lrms-version" => "LRMS-version", "nordugrid-cluster-lrms-config" => "LRMS-detaljer", "nordugrid-cluster-architecture" => "Arkitektur", "nordugrid-cluster-opsys" => "Operativsystem", "nordugrid-cluster-homogeneity" => "Homogent kluster", "nordugrid-cluster-nodecpu" => "CPU-typ (långsammast)", "nordugrid-cluster-nodememory" => "Minne (MB, minsta)", "nordugrid-cluster-totalcpus" => "CPU:er, totalt", "nordugrid-cluster-cpudistribution" => "CPU:er, per maskin", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Diskutrymme, tillgängligt (MB)", "nordugrid-cluster-sessiondir-total" => "Diskutrymme, totalt (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Gridsessionens livstid (min)", "nordugrid-cluster-cache-free" => "Cachestorlek, tillgängligt (MB)", "nordugrid-cluster-cache-total" => "Cachestorlek, totalt (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime-miljö", "nordugrid-cluster-localse" => "Lagringselement, lokalt", "nordugrid-cluster-middleware" => "Grid-middleware", "nordugrid-cluster-totaljobs" => "Jobb, totalt antal", "nordugrid-cluster-usedcpus" => "CPU:er, upptagna", "nordugrid-cluster-queuedjobs" => "Jobb, köade", "nordugrid-cluster-prelrmsqueued" => "Gridjobb, köade före LRMS", "nordugrid-cluster-location" => "Postnummer", "nordugrid-cluster-owner" => "Ägare", "nordugrid-cluster-issuerca" => "Certifikatutfärdare", "nordugrid-cluster-issuerca-hash" => "Certifikatutfärdares hashsumma", "nordugrid-cluster-trustedca" => "Betrodd certifikatutfärdare", "nordugrid-cluster-nodeaccess" => "Nod-IP-uppkoppling", "nordugrid-cluster-gridarea" => "Sessionsarea (FÖRLEGAD)", "nordugrid-cluster-gridspace" => "Griddiskutrymme (FÖRLEGAD)", "nordugrid-cluster-opsysdistribution" => "OS-distribution (FÖRLEGAD)", "nordugrid-cluster-runningjobs" => "Jobb, exekveras (FÖRLEGAD)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Könamn", "nordugrid-queue-comment" => "Kommentar", "nordugrid-queue-status" => "Köstatus", "nordugrid-queue-running" => "CPU:er, upptagna", "nordugrid-queue-localqueued" => "Lokala jobb, köade", "nordugrid-queue-prelrmsqueued" => "Grid jobb, köade före LRMS", "nordugrid-queue-queued" => "Jobb, köade (FÖRLEGAD)", "nordugrid-queue-maxrunning" => "Jobb, exekveras (max)", "nordugrid-queue-maxqueuable" => "Jobb, köbara (max)", "nordugrid-queue-maxuserrun" => "Jobb per unixanvändare (max)", "nordugrid-queue-maxcputime" => "CPU-tid, max. (minuter)", "nordugrid-queue-mincputime" => "CPU-tid, min. (minuter)", "nordugrid-queue-defaultcputime" => "CPU-tid, förvald (minuter)", "nordugrid-queue-maxwalltime" => "Klocktid, max. (minuter)", "nordugrid-queue-minwalltime" => "Klocktid, min. (minuter)", "nordugrid-queue-defaultwalltime" => "Klocktid, förvald (minuter)", "nordugrid-queue-schedulingpolicy" => "Scheduleringspolicy", "nordugrid-queue-totalcpus" => "CPU:er, totalt", "nordugrid-queue-nodecpu" => "CPU-typ", "nordugrid-queue-nodememory" => "Minne (MB)", "nordugrid-queue-architecture" => "Arkitektur", "nordugrid-queue-opsys" => "Operativsystem", "nordugrid-queue-homogeneity" => "Homogen kö", "nordugrid-queue-gridrunning" => "CPU:er, upptagna av gridjobb", "nordugrid-queue-gridqueued" => "Gridjobb, köade", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPU:er per kö (FÖRLEGAD)", "nordugrid-queue-assignedcputype" => "CPU-typ (FÖRLEGAD)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Ägare", "nordugrid-job-execcluster" => "Exekveringskluster", "nordugrid-job-execqueue" => "Exekveringskö", "nordugrid-job-stdout" => "Standard output-fil", "nordugrid-job-stderr" => "Standard error-fil", "nordugrid-job-stdin" => "Standard input-fil", "nordugrid-job-reqcputime" => "Begärd CPU-tid", "nordugrid-job-reqwalltime" => "Begärd klocktid", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Plats i kön", "nordugrid-job-comment" => "LRMS-kommentar", "nordugrid-job-submissionui" => "Inskickningsmaskin", "nordugrid-job-submissiontime" => "Inskickningstid (GMT)", "nordugrid-job-usedcputime" => "Använd CPU-tid", "nordugrid-job-usedwalltime" => "Använd klocktid", "nordugrid-job-completiontime" => "Avslutningstid (GMT)", "nordugrid-job-sessiondirerasetime" => "Raderingstid (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxyförfallotid (GMT)", "nordugrid-job-usedmem" => "Använt minne (KB)", "nordugrid-job-errors" => "Fel", "nordugrid-job-exitcode" => "Returkod", "nordugrid-job-jobname" => "Namn", "nordugrid-job-runtimeenvironment" => "Runtimemiljö", "nordugrid-job-cpucount" => "Begärda CPU:er", "nordugrid-job-executionnodes" => "Exekveringsnoder", "nordugrid-job-gmlog" => "GM loggfil", "nordugrid-job-clientsoftware" => "Klientversion", "nordugrid-job-rerunable" => "Omkörbart", "nordugrid-job-reqcput" => "Begärd tid (FÖRLEGAD)", "nordugrid-job-gridlog" => "Gridloggfil (FÖRLEGAD)", "nordugrid-job-lrmscomment" => "LRMS-kommentar (FÖRLEGAD)", "nordugrid-authuser-name" => "Namn", "nordugrid-authuser-sn" => "Subjektnamn", "nordugrid-authuser-freecpus" => "Fria CPU:er", "nordugrid-authuser-diskspace" => "Fritt diskutrymme (MB)", "nordugrid-authuser-queuelength" => "Användarens kölängd", "nordugrid-se-name" => "Namn", "nordugrid-se-aliasname" => "Lagringselementalias", "nordugrid-se-type" => "Lagringselementtyp", "nordugrid-se-acl" => "Auktoriserade VO:er", "nordugrid-se-freespace" => "Fritt utrymme (MB)", "nordugrid-se-totalspace" => "Totalt utrymme (MB)", "nordugrid-se-url" => "Kontakt-URL", "nordugrid-se-baseurl" => "Kontakt-URL (FÖRLEGAD)", "nordugrid-se-accesscontrol" => "Access kontroll", "nordugrid-se-authuser" => "Auktoriserad användare (DN)", "nordugrid-se-location" => "Postnummer", "nordugrid-se-owner" => "Ägare", "nordugrid-se-middleware" => "Grid-middleware", "nordugrid-se-issuerca" => "Certifikatutfärdare", "nordugrid-se-issuerca-hash" => "Certifikatutfärdares hashsumma", "nordugrid-se-trustedca" => "Betrodd certifikatutfärdare", "nordugrid-se-comment" => "Kommentar", "nordugrid-rc-name" => "Domännamn", "nordugrid-rc-aliasname" => "Replica Catalog-alias", "nordugrid-rc-baseurl" => "Kontakt-URL", "nordugrid-rc-authuser" => "Auktoriserad användare (DN)", "nordugrid-rc-location" => "Postnummer", "nordugrid-rc-owner" => "Ägare", "nordugrid-rc-issuerca" => "Certifikatutfärdare" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Kan inte läsa topp-nivå-indexservrarna", "2" => "Ingen av de lokala indexservrarna returnerade uppkopplingen", "3" => " dålig konfigurering eller begäran drog över tiden", "4" => "Inga gridjobb funna", "5" => "Ingen information funnen", "6" => "Server otillgänglig", "7" => " - ladda om senare", "8" => "Ingen köinformation funnen", "9" => "Inga poster funna", "10" => "Inga användare funna", "11" => "Inte autoriserad på värden", "12" => "svarar inte", "13" => "Inga nya jobb funna för ", // debug messages "101" => " Monitor-time-out för GRIS: ", "102" => " sek för uppkoppling och ", "103" => " sek för sökning", "104" => " sek använda för sökning", "105" => "Visar resurser endast i ", "106" => "Frågade topp-nivå-indexservrar: ", "107" => "Fick geografiska data, skannade kluster: ", "108" => " kluster sorterade efter geografiska data", "109" => "Sökning efter klusterattribut", "110" => "Sökning efter köattribut", "111" => "Inga data från ", "112" => " är uppe i ", "113" => " har inga resurser att erbjuda", "114" => " Monitor-time-out för GIIS: ", "115" => "Hoppar över GRIS: ", "116" => "inte en ", "117" => "Verifierar uppkoppling: ", "118" => "OK", "119" => "Hittills, detekterade resurser av slag ", "120" => "LDAP-fel vid sökning efter ", "121" => " status vid ", "122" => "Svartlistad: ", "123" => "Registrant funnen för ", "124" => "Sökning efter lagringselementattribut", "125" => "Sökning efter användare", "126" => "Sökning efter jobb", "127" => " har jobb ", "128" => " utan att vara auktoriserad", "129" => "Kan inte hämta objektdata: fel ", "130" => " Monitor-time-out för EMIR: ", // icon titles "301" => "Ladda om", "302" => "Skriv ut", "303" => "Hjälp", "304" => "Stäng", "305" => "Röd", "306" => "Grå", "307" => "Alla användare", "308" => "Aktiva användare", "309" => "Sök", "310" => "Lagring", "311" => "VO:ar", "312" => "Flagga för ", "313" => " Gridprocesser and ", "314" => " lokala processer", // auxilliary strings "401" => "Processer", "402" => "Grid", "403" => "Lokala", "404" => "Världen", "405" => "TOTALT", "406" => " kluster", "407" => "en massa", "408" => " GB", "409" => " ALLA", "410" => "Kluster", "411" => "Kö", "412" => "Jobb", "413" => "Användare", "414" => "Lagring", "415" => "Replikakatalog", "416" => "Definera attribut att visa för objektet: ", "417" => "logiskt OCH av alla uttryck kommer att hittas", "418" => "Lämna det högra fältet tomt för att visa allt", "419" => "Visa resurser eller objekt enligt ditt val", "420" => "Särskijlande namn", "421" => "Kan använda totalt ", "422" => " kluster", "423" => "Resurs / objekt:", "424" => "Antal attribut (förval 6):", "425" => "Objekt", "426" => "Nästa", "427" => "Välj ett", "428" => "Återställ", "429" => "VISA" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australien", "Austria" => "Österrike", "Armenia" => "Armenien", "Algeria" => "Algeriet", "Belgium" => "Belgien", "Bulgaria" => "Bulgarien", "Canada" => "Canada", "China" => "Kina", "Czechia" => "Tjeckien", "Denmark" => "Danmark", "Estonia" => "Estland", "Finland" => "Finland", "France" => "Frankrike", "Georgia" => "Georgien", "Germany" => "Tyskland", "Greece" => "Grekland", "Hungary" => "Ungern", "Iceland" => "Island", "Ireland" => "Irland", "Italy" => "Italien", "Japan" => "Japan", "Latvia" => "Lettland", "Lithuania" => "Litauen", "Morocco" => "Marocko", "Netherlands" => "Nederländerna", "Norway" => "Norge", "Poland" => "Polen", "Portugal" => "Portugal", "Romania" => "Rumänien", "Russia" => "Ryssland", "SriLanka" => "Sri Lanka", "Sweden" => "Sverige", "Slovakia" => "Slovakien", "Slovenia" => "Slovenien", "Switzerland" => "Schweiz", "Turkey" => "Turkiet", "UK" => "Storbritannien", "Ukraine" => "Ukraina", "USA" => "USA", "World" => "Världen" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/PaxHeaders.7502/hu.inc0000644000000000000000000000012412050701227024240 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.706716 30 ctime=1513200663.743793274 nordugrid-arc-5.4.2/src/services/ldap-monitor/lang/hu.inc0000644000175000002070000014220212050701227024306 0ustar00mockbuildmock00000000000000 N/A:  ez utóbbi azt jelenti, hogy a felhasználó nem adott meg neki nevet.
    X:  ez azt jelenti, hogy a job-ot a tulajdonosa "megölte"
    !:  ez azt jelenti, hogy a job futása közben hiba lépett fel a rendszerben
    Kattintson a névre, hogy bővebb információt kapjon a job-ról."; $str_nam = "A felhasználó neve, ez van megadva a személyes tanusítványban. Kattintson a névre azért, hogy megkapja a felhasználó összes elérhető erőforrásoknak a listáját és a hozzá tartozó összes job-ot, ami a rendszerben éppen jelen van."; $str_sta = "A job állapota, amit a Grid Menedzser (GM) és az LRMS ad vissza. Szekvenciális sorrendben az állapotok a következők:
    ACCEPTED – a job elküldve, de még nincs feldolgozás alatt
    PREPARING – bemeneti állományok kinyerése.
    SUBMITTING – az interrakció az LRMS-el folyamatban
    INLRMS – a job átküldve az LRMS-nek; a belső állapot hozzá lett adva az információs rendszer segítségével. A lehetséges állapotok:
    Q – a job a várakozósorban van
    U – a job egy felfüggesztett állapotban van egy elfoglalt csomópontban (PBSPro)
    S – a job egy felfüggesztett állapotban van (Condor)
    R – a job fut
    E – a job véget ért (PBS)
    FINISHING – a kimeneti fájlok átvitelre megtötént a GM segítségével
    FINISHED – a job véget ért; az idő pecsétet hozzá adta az információs rendszer
    CANCELING – a job érvénytelenítve lett
    DELETED – a job nem lett kitörölve a felhasználó kérésére, viszont a GM eltávolította, mert lejárt a határideje
    Minden állapotot jelenteni lehet a PENDING állapottal, ez azt jelenti a GM számára, hogy a job-ot a következő állapotba próbálja meg átbillenteni"; $str_tim = "A job által lefoglalt CPU idő (perc)."; $str_mem = "A job által lefoglalt memória (KB)."; $str_cpu = "Azon processzorok száma, amit a job használ."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitorozó", "help" => "
    Ez a képernyő mutatja meg az összes site regisztrációt, ami a legfelső ARC indexelő szolgáltatás esetén előfordul, először az ország, majd a hoszt név szerint van rendezve a lista. A következő site paraméterek vannak monitorozva: klaszter alias, teljes CPU kapacitás és a futó ill. várakozósoros jobok száma (Grid-es és helyi együttesen). Használja a "Keresés" segédeszközt, ha szeretne megosztani egyéb klaszter, várakozósor, job stb. jellemzőt.
    Ország
    ".$clickable.". Ország zászló és név, ez az elérhető leírásokból száramzik. Kattintson ide, hogy csak ennek az országnak az információit lássa.
    Klaszter
    ".$clickable.". A klaszter álnevét a tulajdonos jelöli ki. Maximum 22 karakter hosszúságú lehet. Kattintson az aliasre, hogy részletesebb információt kapjon a klaszterről.
    CPU-k
    A klaszterben lévő összes CPU száma. Csak ezek töredékét tudják éppen elérni a grid-es felhasználók.
    Betöltés (feldolgoz:Grid+helyi)
    ".$clickable.". Relatív klaszter betöltés, megfelelően a foglalt CPU-k számához. A szürke sáv azt mutatja, hogy a processzorokat helyileg elküldött job-ok foglalják le, a piros sáv pedig azt, hogy a CPU-kat a grid-ről küldött job-ok foglalják le. Kattintson a sávra, hogy részletes információt kapjon a klaszteren futó Grid-es job-okról, ebben benne foglaltatik az is, hogy egy job-hoz hány darab processzor tartozik.
    Várakozólistában
    ".$clickable.". A klaszterben lévő összes várakozósoros job száma, megmutatja a várakozósoros grid-es job-okat plusz a helyileg elküldött várakozósoros job-okat. Kattintson az első számra ahhoz, hogy a klaszterben lévő várakozósoros grid-es jobok listáját megkapja.
    ", "Ország" => 30, "Site" => 160, "CPU-k" => 10, "Betöltés (feldolgoz: Grid+helyi)" => 210, "Várólistán" => 10 ), "clusdes" => array("0" => "Erőforrás részletek a következőkről:", "help" => "
    Attribútum
    ".$clickable.". Klaszter attribútum név".$str_att."
    Érték
    ".$str_val."
    Várakozósor
    ".$clickable.". Azon kötegelt várakozósoroknak a nevei, amik az ARC felhasználók számára elérhetőek, ezt a klaszter tulajdonosa állítja be. ".$str_que."
    Állapot
    A várakozósor állapota. A működő lista tipikusan aktív állapotot jelez.
    CPU (min)
    Idő korlát a várakozósoronkénti job-ok időtartamára, ha meg van adva, akkor az CPU percben értendő. Az első megjelenő érték az alsó korlát, a második a felső korlát. Ha a korlátok nincsenek beállítva (a job-ok bármikor elfogadásra kerülnek), ez így van jelölve: N/A .
    Futás
    Azon job-ok száma, amik a várakozósorban futnak. Az összes job számát megmutatja, a processzorok számával illetve a zárójelben jelzett grid-feladatokkal együtt, e.g. (Grid: 12). Párhuzamos, többprocesszoros feladatok esetén a zárójelek közötti szám nagyobb is lehet, mint a feladatok száma
    Várakozólistán
    Azon job-ok száma, melyek a várakozósorban a futtatásra várnak. Az összes job száma látható, a zárójelben jelzett grid-feladatokkal együtt, például (Grid: 235)
    ", "Várakozósor" => 0, "LRMS várakozósor" => 0, "Állapot" => 0, "Korlátok (min)" => 0, "CPU-k" => 0, "Futás" => 0, "Várólistán" => 0 ), "jobstat" => array("0" => "Job helye:Job ID", "help" => "
    JOB LISTA
    Job név
    ".$clickable.". A job neve, amit a tulajdonos jelöl ki. Ha nincsen név kijelölve, akkor a következőt látjuk: "N/A". Kattintson a névre, hogy megkapja a job részletes leírását.
    Tulajdonos
    ".$clickable.". ".$str_nam."
    Állapot
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Várakozósor
    ".$clickable.". A kötegelt várakozósor neve, amiben a job végrehajtódott. ".$str_que."
    CPU-k
    ".$str_cpu."
    JOB RÉSZLETEK
    Attribútum
    ".$clickable.". Job attribútum név".$str_att."
    Érték
    ".$str_val."
    ", "Job név" => 0, "Tulajdonos" => 0, "Állapot" => 0, "CPU (min)" => 0, "Várakozósor" => 0, "CPU-k" => 0 ), "volist" => array("0" => "Virtuális szervezetek", "help" => "
    Virtuális szervezet
    ".$clickable.". Felhasználók csoportja, tipikusan közös feladatokat és erőforrásokat osztanak meg egymással, az engedélyezés legalább egy ARC-os site-on megtörténik. Kattintson a névre, hogy megkapja a csoport tajainak a listáját.
    Tagok
    A csoport tagjainak a száma.
    Kiszolgáló
    LDAP szerver, ami támogatja a csoportos tagsági adatbázist.
    ", "Virtuális szervezet" => 0, "Tagok" => 0, "Kiszolgáló" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Név
    ".$clickable.". ".$str_nam."
    Kapcsolatok
    A felhasználók saját intézménye, amit egy VO menedzser szolgáltat. üres is lehet.
    E-mail
    ".$clickable.". Felhasználók E-mail címe, amit egy VO menedzser ad meg. üres is lehet. Kattintson a címre, hogy levelet küldhessen a felhasználó E-mail címére.
    ", "#" => 0, "Név" => 0, "Kapcsolatok" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Információszerzés", "help" => "
    Klaszter:várakozósor
    ".$clickable.". A klaszterek nevei és a megfelelő várakozósorok(oszlopokkal elválasztva, ":"), ahol a felhasználó job küldésekre jogosult. Ha a felhasználó nem jogosult, akkor a következő üzenet fog megjelenni: "Nincs megfelelő jogosultsága ennél a hosztnál". Kattintson a klaszter nevére, hogy egy részletesebb leírást kapjon a klaszterről. Kattintson a várakozósor nevére, hogy egy részletesebb leírást kapjon a várakozósorról.
    Szabad CPU-k
    Ebben a pillanatban a felhasználó számára elérhető szabad CPU-k száma, az adott várakozósorban, feltételesen ki van egészítve a felső idő korláttal (ez percben értendő). Például a "3" azt jelenti, hogy 3 CPU használható fel a job számára korlátlan ideig; "4:360" ez azt mutatja, hogy 4 CPU-t tud felhasználni a job, de csak 6 órán keresztül; "10:180 30" ez azt jelenti, hogy 10 CPU áll a job-ok rendelkezésére 3 órán keresztül, és ezen kívül 30 CPU van még pluszba korlátlan időre; "0" ez azt jelenti, hogy nem áll rendelkezésre CPU, ebben a pillanatban, és ekkor a job-ok várakozólistára kerülnek.
    Várakosósorban elhelyezett job-ok
    A felhasználó azon job-jainak a száma, amiknek várhatóan várakoznia kell egy újonnan elküldött job előtt a várakozási sorban. A "0" száma azt jelenti, hogy a job remélhetőleg azonnal lefuthat. Ez csupán egy becslés, amit a helyi irányelvek felülbírálhatnak.
    Szabad lemez terület (MB)
    A felhasználó számára elérhető szabad lemezterület egy adott várakozósorban (MB). Ez csupán egy értékelés, a legtöbb klaszter nem nyújt fix lemez kvótákat.
    Job név
    ".$clickable.". ".$str_job."
    Állapot
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Klaszter
    ".$clickable.". A klaszter neve, amelyben a feladat éppen fut. Kattintson a klaszter nevére, hogy egy részletesebb leírást kapjon.
    Várakozósor
    ".$clickable.". Azon várakozósor neve, amiben a job lefuttot, vagy le fog futni. ".$str_que."
    CPU-k
    ".$str_cpu."
    ", "" => 0, "Job név" => 0, "Állapot" => 0, "CPU (min)" => 0, "Klaszter" => 0, "Várakozósor" => 0, "CPU-k" => 0 ), "attlist" => array("0" => "Attribútum értékek", "help" => "
    Objektum
    ".$clickable.". Az objektumok neve, ezek lesznek megjelenítve. Ez lehet klaszter név, egy klaszter várakozósorának a neve, egy job név, egy felhasználói név stb. Kattintson a szövegre, hogy egy részletesebb leírást kapjon az objektumról.
    Attribútumok
    Minden objektum számára egy vagy több attribútum értéket lehet kilistáztatni. Az oszlop címe egy emberi olvasásra szánt név (kivéve néhány MDS specifikus attribútumot), az oszlopok attribútum értékeket tartalmaznak az adott objektumról, ahogy az az információs rendszerbe be lett írva .
    ", "Objektum" => 0, "Attribútum" => 0 ), "quelist" => array("0" => "Várakozósor", "help" => "
    Attribútum
    ".$clickable.". Egy várakozósor attribútumának a neve".$str_att."
    Érték
    ".$str_val."
    Job név
    ".$clickable.". ".$str_job."
    Tulajdonos
    ".$clickable.". ".$str_nam."
    Állapot
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memória (KB)
    ".$str_mem."
    CPU-k
    ".$str_cpu."
    ", "" => 0, "Job név" => 0, "Tulajdonos" => 0, "Állapot" => 0, "CPU (min)" => 0, "Memória (KB)" => 0, "CPU-k" => 0 ), "sestat" => array("0" => "Adattároló elemek", "help" => "
    Alias
    Az adattároló elem álneve az információs rendszerben van meghatározva. Maximális megjeleníthető hosszúság: 15 karakter
    Összes lemezterület
    Összes lemezterület (GB).
    Szabad terület
    Pillanatnyilag ennyi szabad terület van, (GB)
    Név
    Adattároló elem neve, egy logikai névből és egy hoszt névből áll (egy oszloppal van elválasztva, ":"). A logikai nevet az információs rendszer használja azért, hogy megkülönböztesse a különbözö adattároló elemeket ugyanazon a gépen.
    Alap URL
    Az adattároló elem URL-je tipikusan egy gsiftp:// protokoll. Ezt használja alapból ahhoz, hogy elérje a fájlokat.
    Típus
    Az adattároló elem típusa. "gridftp-based" jelzi a GridFTP interfészen keresztül az adattároló lemezt.
    ", "#" => 0, "Alias" => 0, "Szabad/összes hely, GB" => 0, "Név" => 0, "Alap URL" => 0, "Típus" => 0 ), "allusers" => array("0" => "Megbízható Grid felhasználók:Aktív Grid felhasználók", "help" => "
    Név
    ".$clickable.". ".$str_nam."
    Kapcsolat
    A felhasználó kapcsolódás-rendszere, amely a személyes tanúsítványából származik.
    Job-ok
    Az összes felhasználó rendszerben lévő job-jainak a száma (futás, függőben lévő, befejezett vagy törölt)
    Site-ok
    Megmutatja, hogy hány darab site engedélyezi ezt a felhasználót
    ", "#" => 0, "Név" => 0, "Kapcsolatok" => 0, "Job-ok" => 0, "Site-ok" => 0 ), "userres" => array("0" => "", "Klaszter:várakozósor" => 0, "Szabad CPU-k" => 0, "Várakozólistára helyezett job-ok" => 0, "Szabad lemezterület (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribútum" => 0, "Érték" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "érvényes információ tőle (GMT)", "Mds-validto" => "érvényes információ neki (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end tartomány név", "nordugrid-cluster-aliasname" => "Klaszter alias", "nordugrid-cluster-contactstring" => "Elérhetőségi szöveg", "nordugrid-cluster-interactive-contactstring" => "Interaktív elérhetőség", "nordugrid-cluster-comment" => "Megjegyzés", "nordugrid-cluster-support" => "E-mail cím", "nordugrid-cluster-acl" => "Engedélyezett VO-k", "nordugrid-cluster-lrms-type" => "LRMS típus", "nordugrid-cluster-lrms-version" => "LRMS verzió", "nordugrid-cluster-lrms-config" => "LRMS részletek", "nordugrid-cluster-architecture" => "Architektúra", "nordugrid-cluster-opsys" => "Operációs rendszer", "nordugrid-cluster-homogeneity" => "Homogén klaszter", "nordugrid-cluster-nodecpu" => "CPU típus (leglassabb)", "nordugrid-cluster-nodememory" => "Memória (MB, legkisebb)", "nordugrid-cluster-totalcpus" => "CPU-k, összesen", "nordugrid-cluster-cpudistribution" => "CPU:gépek", "nordugrid-cluster-benchmark" => "Teljesítmény értékelés", "nordugrid-cluster-sessiondir-free" => "Lemez terület, elérhető (MB)", "nordugrid-cluster-sessiondir-total" => "Lemez terület, összesen (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Grid session élettartam (min)", "nordugrid-cluster-cache-free" => "Cache méret, elérhető (MB)", "nordugrid-cluster-cache-total" => "Cache méret, összesen (MB)", "nordugrid-cluster-runtimeenvironment" => "Futásidejű környezet", "nordugrid-cluster-localse" => "Adattárolási elem, helyi", "nordugrid-cluster-middleware" => "Grid köztesréteg", "nordugrid-cluster-totaljobs" => "Job-ok, teljes összeg", "nordugrid-cluster-usedcpus" => "CPU-k, foglalt", "nordugrid-cluster-queuedjobs" => "Job-ok, várakozólistás", "nordugrid-cluster-prelrmsqueued" => "Grid job-ok, várakozó részfeladat", "nordugrid-cluster-location" => "Irányítószám", "nordugrid-cluster-owner" => "Tulajdonos", "nordugrid-cluster-issuerca" => "Tanúsítvány kibocsájtó", "nordugrid-cluster-issuerca-hash" => "Tanúsítvány kibocsájtó-s hash", "nordugrid-cluster-trustedca" => "Megbízható tanúsítvány kibocsájtók", "nordugrid-cluster-nodeaccess" => "Csomópont IP összekapcsolhatóság", "nordugrid-cluster-gridarea" => "Session terület (ELAVULT)", "nordugrid-cluster-gridspace" => "Grid lemez terület (ELAVULT)", "nordugrid-cluster-opsysdistribution" => "OS disztribúció (ELAVULT)", "nordugrid-cluster-runningjobs" => "Job-ok, futás (ELAVULT)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "A várakozósor neve", "nordugrid-queue-comment" => "Megjegyzés", "nordugrid-queue-status" => "A várakozósor állapota", "nordugrid-queue-running" => "CPU-k, foglalt", "nordugrid-queue-localqueued" => "Helyi job-ok, várakozólistás", "nordugrid-queue-prelrmsqueued" => "Grid job-ok, várakozó részfeladat", "nordugrid-queue-queued" => "Job-ok, várakozólistás (ELAVULT)", "nordugrid-queue-maxrunning" => "Job-ok, futás(max)", "nordugrid-queue-maxqueuable" => "Job-ok, várakozólistába tehető (max)", "nordugrid-queue-maxuserrun" => "Unix felhasználókénti job-ok (max)", "nordugrid-queue-maxcputime" => "CPU idő, max. (perc)", "nordugrid-queue-mincputime" => "CPU idő, min. (perc)", "nordugrid-queue-defaultcputime" => "CPU idő, alap. (perc)", "nordugrid-queue-maxwalltime" => ""Wall clock" idő, max. (perc)", "nordugrid-queue-minwalltime" => ""Wall clock" idő, min. (perc)", "nordugrid-queue-defaultwalltime" => ""Wall clock" idő, alap. (perc)", "nordugrid-queue-schedulingpolicy" => "Ütemezési politika", "nordugrid-queue-totalcpus" => "CPU-k, összesen", "nordugrid-queue-nodecpu" => "CPU típusa", "nordugrid-queue-nodememory" => "Memória (MB)", "nordugrid-queue-architecture" => "Architektúra", "nordugrid-queue-opsys" => "Operációs rendszer", "nordugrid-queue-homogeneity" => "Homogén várakozósor", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid job-ok, várakozólistás", "nordugrid-queue-benchmark" => "Teljesítmény értékelés", "nordugrid-queue-assignedcpunumber" => "Várakozósoronkénti CPU-k (ELAVULT)", "nordugrid-queue-assignedcputype" => "CPU típus (ELAVULT)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Tulajdonos", "nordugrid-job-execcluster" => "Végrehajtási klaszter", "nordugrid-job-execqueue" => "Végrehajtási várakozósor", "nordugrid-job-stdout" => "Szabványos kimeneti fájl", "nordugrid-job-stderr" => "Szabványos hiba fájl", "nordugrid-job-stdin" => "Szabványos bemeneti fájl", "nordugrid-job-reqcputime" => "Kért CPU idő", "nordugrid-job-reqwalltime" => "Kért $quot;wall clock$quot; idő", "nordugrid-job-status" => "Állapot", "nordugrid-job-queuerank" => "A városkozási sorban lévő pozíciója", "nordugrid-job-comment" => "LRMS megjegyzés", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Részfeladat idő (GMT)", "nordugrid-job-usedcputime" => "Felhasznált CPU idő", "nordugrid-job-usedwalltime" => "Felhasznált "wall clock" idő", "nordugrid-job-completiontime" => "Elkészítési idő (GMT)", "nordugrid-job-sessiondirerasetime" => "Törlési idő (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy lejárati idő (GMT)", "nordugrid-job-usedmem" => "Felhasznált memória (KB)", "nordugrid-job-errors" => "Hibák", "nordugrid-job-exitcode" => "Kilépési kód", "nordugrid-job-jobname" => "Név", "nordugrid-job-runtimeenvironment" => "Futásidejű környezet", "nordugrid-job-cpucount" => "Kért CPU-k", "nordugrid-job-executionnodes" => "Végrehajtási csomópontok", "nordugrid-job-gmlog" => "GM napló fájl", "nordugrid-job-clientsoftware" => "Kliens verzió", "nordugrid-job-rerunable" => "újra futtatható", "nordugrid-job-reqcput" => "Kért idő (ELAVULT)", "nordugrid-job-gridlog" => "Grid napló fájl (ELAVULT)", "nordugrid-job-lrmscomment" => "LRMS megjegyzés (ELAVULT)", "nordugrid-authuser-name" => "Név", "nordugrid-authuser-sn" => "Téma neve", "nordugrid-authuser-freecpus" => "Szabad CPU-k", "nordugrid-authuser-diskspace" => "Szabad hely (MB)", "nordugrid-authuser-queuelength" => "A felhasználó várakozósoros job-jai", "nordugrid-se-name" => "Név", "nordugrid-se-aliasname" => "Az adattároló elem álneve", "nordugrid-se-type" => "Az adattároló elem típusa", "nordugrid-se-acl" => "Engedélyezett VO-k", "nordugrid-se-freespace" => "Szabad hely (MB)", "nordugrid-se-totalspace" => "Összes lemezterület (MB)", "nordugrid-se-url" => "URL elérhetőség", "nordugrid-se-baseurl" => "URL elérhetőség (ELAVULT)", "nordugrid-se-accesscontrol" => "Hozzáférés ellenőrzése", "nordugrid-se-authuser" => "Engedélyezett felhasználó (DN)", "nordugrid-se-location" => "Irányítószám", "nordugrid-se-owner" => "Tulajdonos", "nordugrid-se-middleware" => "Köztesréteg", "nordugrid-se-issuerca" => "Tanúsítvány kibocsátó", "nordugrid-se-issuerca-hash" => "Tanúsítvány kibocsátó hash-e", "nordugrid-se-trustedca" => "Megbízható tanúsítvány kibocsájtók", "nordugrid-se-comment" => "Megjegyzés", "nordugrid-rc-name" => "Tartomány név", "nordugrid-rc-aliasname" => "Replika katalógus alias", "nordugrid-rc-baseurl" => "URL elérhetőség", "nordugrid-rc-authuser" => "Engedélyezett felhasználó (DN)", "nordugrid-rc-location" => "Irányítószám", "nordugrid-rc-owner" => "Tulajdonos", "nordugrid-rc-issuerca" => "Tanúsítvány kibocsátó" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Nem tudom olvasni a top level GIS és EMIR szerverek index szolgáltatásait", "2" => "Egyik helyi index sem jelzett vissza kapcsolatot", "3" => " rossz konfiguráció, vagy kérési időtúllépés", "4" => "Grid-es job nem található", "5" => "Nincs információ", "6" => "A szervert nem lehet elérni", "7" => " - frissítés később", "8" => "Nincs információ a várakozási sorról", "9" => "Nem található bejegyzés", "10" => "Nincs felhasználó", "11" => "Nincs megfelelő jogosultsága ennél a hosztnál ", "12" => "nincs válasz", "13" => "Nincsenek nemrégi feladatok ", // debug messages "101" => " Monitorozási időtúllépések a GRIS esetén: ", "102" => " mp kapcsolódáskor és ", "103" => " mp kereséskor", "104" => " mp (keresésre szánt idő)", "105" => "Az erőforrásokat csupán a következőben mutatja meg ", "106" => "Lekérdezett felsőszintű indexek: ", "107" => "Kapott földrajzi helyek, átvizsgált site-ok: ", "108" => " site-ok intézése földrajzi helyek szerint", "109" => "Klaszter attribútumok keresése", "110" => "A várakozási sor attribútumainak a keresése", "111" => "Nincs adat erről ", "112" => " működőképes ", "113" => " nincs erőforrása, amit felkínálhat", "114" => " Monitorozási időtúllépések a GIIS esetén: ", "115" => "GRIS kihagyása: ", "116" => "nem egy ", "117" => "Kapcsolat ellenőrzése: ", "118" => "OK", "119" => "Eddig, ebből a fajta erőforrásból ", "120" => "LDAP hiba keresése ", "121" => " állapot ", "122" => "Fekete listára került: ", "123" => "Regisztálót találtam a következő számára ", "124" => "SE-s attribútumok keresése", "125" => "Felhasználók keresése", "126" => "Jobok keresése", "127" => " van job-ja ", "128" => " amíg nincsen engedélyezve ", "129" => "Nem lehet elérni az objektum adatait: hiba ", "130" => "Monitorozási időtúllépések az EMIR esetén: ", // icon titles "301" => "Frissítés", "302" => "Nyomtatás", "303" => "Súgó", "304" => "Bezár", "305" => "Piros", "306" => "Szürke", "307" => "Minden felhasználó", "308" => "Aktív felhasználók", "309" => "Keresés", "310" => "Adattároló", "311" => "VO-k", "312" => "Zászlaja ", "313" => " Grid-es feldolgozás és ", "314" => " helyi feldolgozás", // auxiliary strings "401" => "Feldolgoz", "402" => "Grid", "403" => "Helyi", "404" => "Világ", "405" => "TELJES", "406" => " site-ok", "407" => "rengeteg", "408" => " GB", "409" => " MIND", "410" => "Klaszter", "411" => "Várakozási sor", "412" => "Job", "413" => "Felhasználó", "414" => "Adattároló", "415" => "Replika katalógus", "416" => "Attribútumok megadása az objektum megjelenítése miatt: ", "417" => "Minden kifejezés összevetésre fog kerülni", "418" => "A jobb szélső mezőt hagyja üresen azért, hogy mindent lásson", "419" => "A kiválasztott erőforrások vagy objektumok megjelenítése", "420" => "Megkülönböztető név", "421" => "Összesen használni tud ", "422" => " site-ot", "423" => "Erőforrás / objektum:", "424" => "Attribútumok száma (alap.: 6):", "425" => "Objektum", "426" => "Következő", "427" => "Válassz ki egyet", "428" => "Törlés", "429" => "Mutat" ), // Post code conversion "tlconvert" => array ( "Australia" => "Ausztrália", "Austria" => "Ausztria", "Armenia" => "Örményország", "Algeria" => "Algéria", "Belgium" => "Belgium", "Bulgaria" => "Bulgária", "Canada" => "Kanada", "China" => "Kína", "Czechia" => "Cseszlovákia", "Denmark" => "Dánia", "Estonia" => "észtország", "Finland" => "Finnország", "France" => "Franciaország", "Georgia" => "GrúÉzia", "Germany" => "Németország", "Greece" => "Görögország", "Hungary" => "Magyarország", "Iceland" => "Izland", "Ireland" => "írország", "Italy" => "Olaszország", "Japan" => "Japán", "Latvia" => "Lettország", "Lithuania" => "Litvánia", "Morocco" => "Marokkó", "Netherlands" => "Hollandia", "Norway" => "Norvégia", "Poland" => "Lengyelország", "Portugal" => "Portugália", "Romania" => "Románia", "Russia" => "Oroszország", "SriLanka" => "Sri Lanka", "Sweden" => "Svédország", "Slovakia" => "Szlovákia", "Slovenia" => "Szlovénia", "Switzerland" => "Svájc", "Turkey" => "Törökország", "UK" => "UK", "Ukraine" => "Ukrajna", "USA" => "USA" ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/allusers.php0000644000000000000000000000012411605357421024563 xustar000000000000000027 mtime=1310056209.254814 27 atime=1513200575.687716 30 ctime=1513200663.630791892 nordugrid-arc-5.4.2/src/services/ldap-monitor/allusers.php0000644000175000002070000001421611605357421024634 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $giislist = &$toppage->giislist; $yazyk = &$toppage->language; // Array defining the attributes to be returned $lim = array( "dn", USR_USSN ); /* need only SN per each user */ $jlim = array( "dn", JOB_GOWN ); /* Job owner only is needed */ if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 20; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Header table $titles = explode(":",$toptitle); // two alternative titles, separated by column $gtitle = $titles[0]; if ( $jobnum ) $gtitle = $titles[1]; $toppage->tabletop($gtitle,""); $family = cnvname($ussn); // ldapsearch filter string for jobs $filter = "(objectclass=".OBJ_USER.")"; /* Find all users */ $jfilter = "(objectclass=".OBJ_AJOB.")"; /* Find all jobs */ $gentries = recursive_giis_info($giislist,"cluster",$errors,$debug); $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $clconn = ldap_connect($clhost,$clport); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); $sitetag[$clhost] = 1; /* filtering tag */ } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters for users $uiarray = array(); $ts1 = time(); $uiarray = @ldap_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["125"]." (".($ts2-$ts1).$errors["104"].")
    "); // Search all clusters for jobs $jiarray = array(); $ts1 = time(); $jiarray = @ldap_search($dsarray,DN_LOCAL,$jfilter,$jlim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["126"]." (".($ts2-$ts1).$errors["104"].")
    "); // Loop on clusters; building user list $usrlist = array (); for ( $ids = 0; $ids < $nhosts; $ids++ ) { $ui = array (); $ui = $uiarray[$ids]; $ji = array (); $ji = $jiarray[$ids]; $dst = array (); $dst = $dsarray[$ids]; if ($dst && $ui) { $nusers = @ldap_count_entries($dst,$ui); $njobs = @ldap_count_entries($dst,$ji); if ($nusers > 0 || $njobs > 0) { // If there are valid entries, tabulate results $allres = array(); $allres = @ldap_get_entries($dst,$ui); $results = ldap_purge($allres,USR_USSN,$debug); $alljobs = array(); $alljobs = @ldap_get_entries($dst,$ji); // $nusers = $allres["count"]; $nusers = $results["count"]; $njobs = $alljobs["count"]; // loop on users, filling $usrlist[$ussn]["name"] and counting $usrlist[$ussn]["hosts"] for ($j=0; $j<$nusers; $j++) { // $ussn = $allres[$j][USR_USSN][0]; $ussn = $results[$j][USR_USSN][0]; $family = cnvname($ussn, 2); if ( $family == "host" || strlen($family) < 2 ) continue; $ussn = trim($ussn); $ussn = addslashes($ussn); // In case $ussn contains escape characters if ( !$usrlist[$ussn] ) { $usrlist[$ussn]["name"] = $family; $usrlist[$ussn]["org"] = getorg($ussn); $usrlist[$ussn]["jobs"] = 0; $usrlist[$ussn]["hosts"] = 0; } $usrlist[$ussn]["hosts"]++; } // loop on jobs, filling $usrlist[$jown]["jobs"] for ($k=0; $k<$njobs; $k++) { $jdn = $alljobs[$k]["dn"]; $jown = $alljobs[$k][JOB_GOWN][0]; $family = cnvname($jown, 2); if ( $family == "host" || strlen($family) < 2 ) continue; $jown = addslashes($jown); // In case $jown contains escape characters if ( !$usrlist[$jown] ) { // Shouldn't be happening, but... $usrlist[$jown]["name"] = $family; $usrlist[$jown]["org"] = getorg($jown); $usrlist[$jown]["jobs"] = 0; if( $debug == 2 ) dbgmsg("$family".$errors["127"]."$jdn".$errors["128"]."
    "); } $usrlist[$jown]["jobs"]++; } } } } uasort($usrlist,"hncmp"); // HTML table initialisation $utable = new LmTableSp($module,$toppage->$module); $urowcont = array(); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $count = 0; foreach ( $usrlist as $ussn => $data ) { // if ( $count > 9 ) continue; $name = $data["name"]; $org = $data["org"]; $nhosts = 0; $nhosts = $data["hosts"]; $jcount = 0; $jcount = $data["jobs"]; if ( $jcount < $jobnum ) continue; /* In case list only those with jobs */ $count++; $encuname = rawurlencode($ussn); $usrwin = popup("userlist.php?owner=$encuname",700,500,5,$lang,$debug); $urowcont[] = $count; $urowcont[] = "$name"; $urowcont[] = $org; $urowcont[] = $jcount; $urowcont[] = $nhosts; $utable->addrow($urowcont); $urowcont = array(); } $utable->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/man0000644000000000000000000000013213214316027022710 xustar000000000000000030 mtime=1513200663.671792393 30 atime=1513200668.717854109 30 ctime=1513200663.671792393 nordugrid-arc-5.4.2/src/services/ldap-monitor/man/0000755000175000002070000000000013214316027023033 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/man/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306025027 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200604.909073702 30 ctime=1513200663.670792381 nordugrid-arc-5.4.2/src/services/ldap-monitor/man/Makefile.am0000644000175000002070000000003212047045306025064 0ustar00mockbuildmock00000000000000man_MANS = ldap-monitor.7 nordugrid-arc-5.4.2/src/services/ldap-monitor/man/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734025036 xustar000000000000000030 mtime=1513200604.942074106 30 atime=1513200651.549644134 30 ctime=1513200663.670792381 nordugrid-arc-5.4.2/src/services/ldap-monitor/man/Makefile.in0000644000175000002070000004663213214315734025117 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ldap-monitor/man DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/ldap-monitor.7.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = ldap-monitor.7 CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man7dir = $(mandir)/man7 am__installdirs = "$(DESTDIR)$(man7dir)" NROFF = nroff MANS = $(man_MANS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ man_MANS = ldap-monitor.7 all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ldap-monitor/man/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ldap-monitor/man/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): ldap-monitor.7: $(top_builddir)/config.status $(srcdir)/ldap-monitor.7.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man7: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man7dir)" || $(MKDIR_P) "$(DESTDIR)$(man7dir)" @list=''; test -n "$(man7dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.7[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man7dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man7dir)" || exit $$?; }; \ done; } uninstall-man7: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man7dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.7[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man7dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man7dir)" && rm -f $$files; } tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(MANS) installdirs: for dir in "$(DESTDIR)$(man7dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man7 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-man uninstall-man: uninstall-man7 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man7 \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-man uninstall-man7 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ldap-monitor/man/PaxHeaders.7502/ldap-monitor.7.in0000644000000000000000000000012611560417362026100 xustar000000000000000027 mtime=1304567538.361844 29 atime=1513200651.56564433 30 ctime=1513200663.671792393 nordugrid-arc-5.4.2/src/services/ldap-monitor/man/ldap-monitor.7.in0000644000175000002070000000370611560417362026151 0ustar00mockbuildmock00000000000000.TH ldap-monitor 7 "2003-03-03" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME ldap-monitor \- Real-time NorduGrid monitoring tool .SH DESCRIPTION .B "LDAP Grid Monitor" is a set of .B PHP and .B Java scripts, providing a Web interface to the .B NorduGrid Information System. Should be working for any similar .B LDAP based service. .SH REQUIREMENTS .IP "LDAP library" e.g., http://www.openldap.org .IP "GD library" http://www.boutell.com/gd .IP "PHP4 library" http://www.php.net, must be compiled with LDAP and GD extensions .IP "HTTP server" must be compiled with PHP4 .IP "Globus MDS" http://www.globus.org/mds, or a similar .B LDAP based service .IP "Virtual Organisation" Is optional .SH INSTALLATION Copy all the files in a folder, accessible by the HTTP server. Modify .I settings.inc according to your MDS structure and liking. Run the whole stuff by loading .I loadmon.php into your favorite browser. .SH FILES .I loadmon.php .RS To monitor several servers at once, add hosts and DNs to the .IR $arrhost and, correspondingly, .IR $arrbdn arrays in .I loadmon.php .RE .I isattr.inc .I cnvname.inc .I cnvalias.inc .RS Making output more human-readable: modify .IR isattr.inc, .IR cnvname.inc, .IR cnvalias.inc. Otherwise, these files are not needed. .RE .I blacklist.inc .RS To prevent sites from being polled, modify array entries in .IR blacklist.inc. Otherwise, the file is not needed. .RE .I vo-users.php .RS Not needed when working without a Virtual Organisation. In such a case, remove the corresponding link from .I loadmon.php . .RE .I jobstat.php .RS When working without the .B NorduGrid Information System: to make sure that the job status is defined properly, edit .I jobstat.php (look for .B adjustment instructions in the code). .SH AUTHOR Oxana Smirnova .SH "SEE ALSO" .BR ngsub (1), .BR ngstat (1), .BR ngdel (1), .BR ngget (1), .BR ngsync (1), .BR ngcopy (1), .BR ngremove (1) nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734024263 xustar000000000000000030 mtime=1513200604.794072296 30 atime=1513200651.496643486 30 ctime=1513200663.644792063 nordugrid-arc-5.4.2/src/services/ldap-monitor/Makefile.in0000644000175000002070000006451213214315734024341 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ldap-monitor DIST_COMMON = $(dist_monitor_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/README.in \ $(srcdir)/ldap-monitor.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = ldap-monitor README CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(monitordir)" "$(DESTDIR)$(monitordir)" DATA = $(dist_monitor_DATA) $(monitor_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = man mon-icons lang includes monitordir = @ldap_monitor_prefix@ dist_monitor_DATA = $(srcdir)/*.php $(srcdir)/*.js monitor_DATA = README all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ldap-monitor/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ldap-monitor/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): ldap-monitor: $(top_builddir)/config.status $(srcdir)/ldap-monitor.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ README: $(top_builddir)/config.status $(srcdir)/README.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_monitorDATA: $(dist_monitor_DATA) @$(NORMAL_INSTALL) test -z "$(monitordir)" || $(MKDIR_P) "$(DESTDIR)$(monitordir)" @list='$(dist_monitor_DATA)'; test -n "$(monitordir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitordir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitordir)" || exit $$?; \ done uninstall-dist_monitorDATA: @$(NORMAL_UNINSTALL) @list='$(dist_monitor_DATA)'; test -n "$(monitordir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitordir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitordir)" && rm -f $$files install-monitorDATA: $(monitor_DATA) @$(NORMAL_INSTALL) test -z "$(monitordir)" || $(MKDIR_P) "$(DESTDIR)$(monitordir)" @list='$(monitor_DATA)'; test -n "$(monitordir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitordir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitordir)" || exit $$?; \ done uninstall-monitorDATA: @$(NORMAL_UNINSTALL) @list='$(monitor_DATA)'; test -n "$(monitordir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitordir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitordir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(monitordir)" "$(DESTDIR)$(monitordir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-data-local install-dist_monitorDATA \ install-monitorDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_monitorDATA uninstall-monitorDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-data-local install-dist_monitorDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-monitorDATA install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-dist_monitorDATA \ uninstall-monitorDATA install-data-local: $(MKDIR_P) $(DESTDIR)$(monitordir)/cache # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/help.php0000644000000000000000000000012411605357421023661 xustar000000000000000027 mtime=1310056209.254814 27 atime=1513200575.683716 30 ctime=1513200663.635791953 nordugrid-arc-5.4.2/src/services/ldap-monitor/help.php0000644000175000002070000000072611605357421023733 0ustar00mockbuildmock00000000000000$module; $helptext = $data["help"]; echo $helptext; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/volist.php0000644000000000000000000000012411605357421024251 xustar000000000000000027 mtime=1310056209.254814 27 atime=1513200575.666716 30 ctime=1513200663.641792026 nordugrid-arc-5.4.2/src/services/ldap-monitor/volist.php0000644000175000002070000001403711605357421024323 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $toppage->tabletop("".$toptitle."

    "); // The main function $vos = array ( array ( "name" => "NorduGrid members", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=people,dc=nordugrid,dc=org" ), array ( "name" => "NorduGrid guests", "server" => "https://www.pdc.kth.se/grid/swegrid-vo", "port" => "", "dn" => "vo.ng-guest-vo" ), array ( "name" => "NorduGrid developers", "server" => "http://www.nordugrid.org", "port" => "", "dn" => "", "group" => "developers.dn" ), array ( "name" => "NorduGrid tutorials", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=tutorial,dc=nordugrid,dc=org" ), array ( "name" => "ATLAS test users (SWEGRID)", "server" => "https://www.pdc.kth.se", "port" => "", "dn" => "", "group" => "grid/swegrid-vo/vo.atlas-testusers-vo" ), /* array ( "name" => "NorduGrid services", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=services,dc=nordugrid,dc=org" ), */ array ( "name" => "BaBar", "server" => "babar-vo.gridpp.ac.uk", "port" => "389", "dn" => "ou=babar,dc=gridpp,dc=ac,dc=uk" ), array ( "name" => "EDG ALICE", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=alice,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG ATLAS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=atlas,dc=eu-datagrid,dc=org" ), array ( "name" => "LCG ATLAS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=atlas,dc=eu-datagrid,dc=org", "group" => "ou=lcg1" ), array ( "name" => "EDG CMS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=cms,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG LHC-B", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=lhcb,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG D0", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=dzero,dc=eu-datagrid,dc=org", "group" => "ou=testbed1" ), array ( "name" => "EDG Earth Observation", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=earthob,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG Genomics", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=biomedical,dc=eu-datagrid,dc=org", "group" => "ou=genomics" ), array ( "name" => "EDG Medical Imaging", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=biomedical,dc=eu-datagrid,dc=org", "group" => "ou=medical imaging" ), array ( "name" => "EDG ITeam", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=ITeam" ), array ( "name" => "EDG TSTG", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=TSTG" ), array ( "name" => "EDG Tutorials", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=EDGtutorial" ), array ( "name" => "EDG WP6", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=wp6" ) ); $votable = new LmTableSp($module,$toppage->$module); $rowcont = array (); foreach ( $vos as $contact ) { $server = $contact["server"]; $port = $contact["port"]; $dn = $contact["dn"]; $group = $contact["group"]; $nusers = ""; if ( $dn ) { // open ldap connection $ds = ldap_connect($server,$port); if ($ds) { if ( $group ) { $newfilter = "(objectclass=*)"; $newdn = $group.",".$dn; $newlim = array("dn","member"); $sr = @ldap_search($ds,$newdn,$newfilter,$newlim,0,0,10,LDAP_DEREF_NEVER); $groupdesc = @ldap_get_entries($ds,$sr); $nusers = $groupdesc[0]["member"]["count"]; } else { $sr = @ldap_search($ds,$dn,"(objectclass=organizationalPerson)",array("dn"),0,0,10,LDAP_DEREF_NEVER); if ($sr) $nusers = @ldap_count_entries($ds,$sr); } } $vostring = popup("vo-users.php?host=$server&port=$port&vo=$dn&group=$group",750,300,6,$lang,$debug); } else { $url = $server."/".$group; $users = file($url); $nusers = count($users); $vostring = popup($url,750,300,6,$lang,$debug); } $rowcont[] = "".$contact["name"].""; $rowcont[] = $nusers; $rowcont[] = $server; $votable->addrow($rowcont); $rowcont = array (); } $votable->close; $toppage->close; /* group http://www.nbi.dk/~waananen/ngssc2003.txt ### Datagrid VO Groups and their user mappings */ ?>nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/clusdes.php0000644000000000000000000000012412050701227024363 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.719716 30 ctime=1513200663.632791916 nordugrid-arc-5.4.2/src/services/ldap-monitor/clusdes.php0000644000175000002070000001673312050701227024442 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $toppage->tabletop("","".$toptitle." $host"); // Array defining the attributes to be returned $qlim = array( QUE_NAME, QUE_QUED, QUE_GQUE, QUE_PQUE, QUE_LQUE, QUE_RUNG, QUE_GRUN, QUE_ASCP, QUE_MAXT, QUE_MINT, QUE_STAT ); // ldapsearch filter strings for cluster and queues $qfilter = "(objectclass=".OBJ_QUEU.")"; $dn = DN_LOCAL; if ($schema == "GLUE2") { $qlim = array( GQUE_NAME, GQUE_MAPQ, GQUE_QUED, GQUE_GQUE, GQUE_PQUE, GQUE_LQUE, GQUE_RUNG, GQUE_GRUN, GQUE_ASCP, GQUE_MAXT, GQUE_MINT, GQUE_STAT ); // ldapsearch filter strings for cluster and queues $qfilter = "(objectclass=".GOBJ_QUEU.")"; $dn = DN_GLUE; } if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // establish connection to the requested LDAP server $chost = $host; if ( $isse ) $chost=substr(strstr($host,":"),1); $ds = ldap_connect($chost,$port); if ($ds) { // If contact OK, search for clusters $ts1 = time(); if ( $isse ) { $exclude = array(SEL_USER); if ( $dn == DN_LOCAL ) $thisdn = ldap_nice_dump($strings,$ds,SEL_NAME."=".$host.",".$dn,$exclude); if ( $dn == DN_GLUE ) { $querydn = SEL_NAME."=".$host.":arex,GLUE2GroupID=services,".DN_GLUE;//TODO: change SEL_NAME $thisdn = ldap_nice_dump($strings,$ds,$querydn,$exclude); } } else { if ( $dn == DN_LOCAL ) $thisdn = ldap_nice_dump($strings,$ds,CLU_NAME."=".$host.",".$dn); if ( $dn == DN_GLUE ) { $querydn = "GLUE2ServiceID=urn:ogf:ComputingService:".$host.":arex,GLUE2GroupID=services,".DN_GLUE; $thisdn = ldap_nice_dump($strings,$ds,$querydn); } } $ts2 = time(); if($debug) dbgmsg("
    ".$errors["109"]." (".($ts2-$ts1).$errors["104"].")
    "); if ( strlen($thisdn) < 4 && $debug ) dbgmsg("
    ".$errors["129"].$thisdn."

    "); echo "
    "; // Loop on queues (if everything works) if ($thisdn != 1 && !$isse) { $ts1 = time(); $qsr = @ldap_search($ds,$dn,$qfilter,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["110"]." (".($ts2-$ts1).$errors["104"].")
    "); // Fall back to conventional LDAP // if (!$qsr) $qsr = @ldap_search($ds,$dn,$qfilter,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); } if ($qsr) { // If search returned, check that there are valid entries $nqmatch = @ldap_count_entries($ds,$qsr); if ($nqmatch > 0) { // If there are valid entries, tabulate results $qentries = @ldap_get_entries($ds,$qsr); $nqueues = $qentries["count"]; // HTML table initialisation $qtable = new LmTableSp($module,$toppage->$module,$schema); // loop on the rest of attributes define("CMPKEY",QUE_MAXT); usort($qentries,"quetcmp"); for ($k=1; $k<$nqueues+1; $k++) { if ( $dn == DN_LOCAL ) { $qname = $qentries[$k][QUE_NAME][0]; $qstatus = $qentries[$k][QUE_STAT][0]; // $queued = @$qentries[$k][QUE_QUED][0]; $queued = @($qentries[$k][QUE_QUED][0]) ? ($entries[$k][QUE_QUED][0]) : 0; /* deprecated since 0.5.38 */ $locque = @($qentries[$k][QUE_LQUE][0]) ? ($qentries[$k][QUE_LQUE][0]) : 0; /* new since 0.5.38 */ $run = @($qentries[$k][QUE_RUNG][0]) ? ($qentries[$k][QUE_RUNG][0]) : 0; $cpumin = @($qentries[$k][QUE_MINT][0]) ? $qentries[$k][QUE_MINT][0] : "0"; $cpumax = @($qentries[$k][QUE_MAXT][0]) ? $qentries[$k][QUE_MAXT][0] : ">"; $cpu = @($qentries[$k][QUE_ASCP][0]) ? $qentries[$k][QUE_ASCP][0] : "N/A"; $gridque = @($qentries[$k][QUE_GQUE][0]) ? $qentries[$k][QUE_GQUE][0] : "0"; $gmque = @($qentries[$k][QUE_PQUE][0]) ? ($qentries[$k][QUE_PQUE][0]) : 0; /* new since 0.5.38 */ $gridrun = @($qentries[$k][QUE_GRUN][0]) ? $qentries[$k][QUE_GRUN][0] : "0"; $quewin = popup("quelist.php?host=$host&port=$port&qname=$qname&schema=$schema",750,430,6,$lang,$debug); } if ( $dn == DN_GLUE ) { $qname = $qentries[$k][GQUE_NAME][0]; $mapque = $qentries[$k][GQUE_MAPQ][0]; $qstatus = $qentries[$k][GQUE_STAT][0]; // $queued = @$qentries[$k][GQUE_QUED][0]; $queued = @($qentries[$k][GQUE_QUED][0]) ? ($entries[$k][GQUE_QUED][0]) : 0; /* deprecated since 0.5.38 */ $locque = @($qentries[$k][GQUE_LQUE][0]) ? ($qentries[$k][GQUE_LQUE][0]) : 0; /* new since 0.5.38 */ $run = @($qentries[$k][GQUE_RUNG][0]) ? ($qentries[$k][GQUE_RUNG][0]) : 0; $cpumin = @($qentries[$k][GQUE_MINT][0]) ? $qentries[$k][GQUE_MINT][0] : "0"; $cpumax = @($qentries[$k][GQUE_MAXT][0]) ? $qentries[$k][GQUE_MAXT][0] : ">"; $cpu = @($qentries[$k][GQUE_ASCP][0]) ? $qentries[$k][GQUE_ASCP][0] : "N/A"; $gridque = @($qentries[$k][GQUE_GQUE][0]) ? $qentries[$k][GQUE_GQUE][0] : "0"; $gmque = @($qentries[$k][GQUE_PQUE][0]) ? ($qentries[$k][GQUE_PQUE][0]) : 0; /* new since 0.5.38 */ $gridrun = @($qentries[$k][GQUE_GRUN][0]) ? $qentries[$k][GQUE_GRUN][0] : "0"; $quewin = popup("quelist.php?host=$host&port=$port&qname=$qname&schema=$schema",750,430,6,$lang,$debug); } $gridque = $gridque + $gmque; if ( $queued == 0 ) $queued = $locque + $gridque; // filling the table $qrowcont[] = "$qname"; if ( $mapque ) { $qrowcont[] = "$mapque"; } $qrowcont[] = "$qstatus"; $qrowcont[] = "$cpumin – $cpumax"; $qrowcont[] = "$cpu"; $qrowcont[] = "$run (".$errors["402"].": $gridrun)"; $qrowcont[] = "$queued (".$errors["402"].": $gridque)"; $qtable->addrow($qrowcont); $qrowcont = array (); } $qtable->close(); } else { $errno = 8; echo "
    ".$errors["8"]."\n"; return $errno; } } elseif ( !$isse ) { $errno = 5; echo "
    ".$errors["5"]."\n"; return $errno; } @ldap_free_result($qsr); @ldap_close($ds); return 0; } else { $errno = 6; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/mon-icons0000644000000000000000000000013213214316027024037 xustar000000000000000030 mtime=1513200663.726793066 30 atime=1513200668.717854109 30 ctime=1513200663.726793066 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/0000755000175000002070000000000013214316027024162 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Russia.png0000644000000000000000000000012411506337146026100 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.657715 30 ctime=1513200663.709792858 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Russia.png0000644000175000002070000000040011506337146026137 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<*PLTE$'3S6^\Dl3S6^\Dl$''v\IDATxbEĀ.@ʉXY 0133;`f $@ \Ȁ xx@102 F_CFY9IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611634415166026163 xustar000000000000000027 mtime=1316100726.768006 30 atime=1513200604.958074301 29 ctime=1513200663.68379254 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Makefile.am0000644000175000002070000000023411634415166026225 0ustar00mockbuildmock00000000000000monitoriconsdir = @ldap_monitor_prefix@/mon-icons monitoricons_DATA = $(srcdir)/*.png $(srcdir)/*.php $(srcdir)/*.gif EXTRA_DIST = $(monitoricons_DATA) nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Finland.png0000644000000000000000000000012411506337146026205 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.659715 30 ctime=1513200663.695792687 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Finland.png0000644000175000002070000000050211506337146026247 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME?IDATxڕ;r0 DIg"((R Ev}/+>{je q:o™,ȱ#堊1u28hxqO@:1*J1;EO;KZFg_i)=^ nc%es9ņ+d U6U/x C< ~48Qp p#F{2IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Slovakia.png0000644000000000000000000000012311506337146026402 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.658715 29 ctime=1513200663.71079287 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Slovakia.png0000644000175000002070000000050211506337146026445 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<`PLTElGd柮B/OSn꯻xOj[CAp`;_;Z'???F?]-Q,Cz_蝬_x_x fhIDATxbDĀ.@`NV&V@17+/3rr @@E`Fkt–\ vLx=J,M4K)~IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734026165 xustar000000000000000030 mtime=1513200604.990074693 30 atime=1513200651.596644709 30 ctime=1513200663.684792552 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Makefile.in0000644000175000002070000004373713214315734026251 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ldap-monitor/mon-icons DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(monitoriconsdir)" DATA = $(monitoricons_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ monitoriconsdir = @ldap_monitor_prefix@/mon-icons monitoricons_DATA = $(srcdir)/*.png $(srcdir)/*.php $(srcdir)/*.gif EXTRA_DIST = $(monitoricons_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ldap-monitor/mon-icons/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ldap-monitor/mon-icons/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitoriconsDATA: $(monitoricons_DATA) @$(NORMAL_INSTALL) test -z "$(monitoriconsdir)" || $(MKDIR_P) "$(DESTDIR)$(monitoriconsdir)" @list='$(monitoricons_DATA)'; test -n "$(monitoriconsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitoriconsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitoriconsdir)" || exit $$?; \ done uninstall-monitoriconsDATA: @$(NORMAL_UNINSTALL) @list='$(monitoricons_DATA)'; test -n "$(monitoriconsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitoriconsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitoriconsdir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitoriconsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitoriconsDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitoriconsDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-monitoriconsDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am uninstall uninstall-am uninstall-monitoriconsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Spain.png0000644000000000000000000000012411506337146025704 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.658715 30 ctime=1513200663.711792882 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Spain.png0000644000175000002070000000030611506337146025750 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE'VXj 2s4IDATxڄ1 R)#) -IaE[TBLIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Slovenia.png0000644000000000000000000000012311506337146026411 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.660715 29 ctime=1513200663.71079287 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Slovenia.png0000644000175000002070000000047111506337146026461 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<`PLTEl_xKNl8tڭtX;Z'F@S -Q_GdSn|-e.[CCTn_IDATxbF ,'B X xxey <,>@1K#bE!@z%qIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Portugal.png0000644000000000000000000000012411506337146026427 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.662716 30 ctime=1513200663.708792846 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Portugal.png0000644000175000002070000000031411506337146026472 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE+L'EÜI9o=5NE|:IDATxdA ¦bN{l ` ml A]xY*Yfϱ)zvu7%8IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Denmark.png0000644000000000000000000000012411506337146026213 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.653715 30 ctime=1513200663.693792662 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Denmark.png0000644000175000002070000000047111506337146026262 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME mIDATxun0لET"P'?`VsѮ,H6xb+t8^=5c/( {P9̜T}b6nki~%~7P}oUH:kVQW N"io ׀4"jt2*ldha SmjkǕLfIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Latvia.png0000644000000000000000000000012411506337146026052 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.660715 30 ctime=1513200663.703792785 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Latvia.png0000644000175000002070000000025511506337146026121 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe< PLTE333f3'9'IDATxb`dB Lh01`h! 0mA7Zt-\NIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/USA.png0000644000000000000000000000012411506337146025262 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.655715 30 ctime=1513200663.715792931 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/USA.png0000644000175000002070000000027511506337146025333 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE3ff3futRNSS#IDATxb`F8``fA C`DD`AB.$sIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon-close.png0000644000000000000000000000012411506337146026665 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.656715 30 ctime=1513200663.717792956 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon-close.png0000644000175000002070000000051411506337146026732 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE```®tRNSSIDATxb`@  y( #3LLD&de$ a3AĀ2!1XXII XdYM<b`A; pJNӹӃ3Hg ` @8 pJN ^/x%PIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Georgia.png0000644000000000000000000000012411506337146026207 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.652715 30 ctime=1513200663.696792699 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Georgia.png0000644000175000002070000000030411506337146026251 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE3f3c;IDATxڌ10[jl:&%^($a3@(-5Hwl%}|Ŗ\uIIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Poland.png0000644000000000000000000000012411506337146026047 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.652715 30 ctime=1513200663.707792833 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Poland.png0000644000175000002070000000022311506337146026111 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE'?IDATxb``D0 Q>YIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Netherlands.png0000644000000000000000000000012411506337146027101 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.664716 30 ctime=1513200663.705792809 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Netherlands.png0000644000175000002070000000025011506337146027143 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE33fW3IDATxb`F aXasjޓIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Germany.png0000644000000000000000000000012411506337146026234 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.657715 30 ctime=1513200663.697792711 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Germany.png0000644000175000002070000000036211506337146026302 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<'PLTE$)C;fi)$ifD;fu@XQIDATxbAĀ.@02` &N@ ,( 8P@10sf}IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Romania.png0000644000000000000000000000012411506337146026220 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.661715 30 ctime=1513200663.708792846 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Romania.png0000644000175000002070000000024111506337146026262 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE333f pIDATxb`& `d*`}eB$IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Morocco.png0000644000000000000000000000012411506337146026233 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.662716 30 ctime=1513200663.704792797 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Morocco.png0000644000175000002070000000031411506337146026276 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTEcM3''",*6 /#E/:IDATx\A0D`ƵhRh)gցB'hsۀ"~ 0ZIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon_led.php0000644000000000000000000000012411506337146026411 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.660715 30 ctime=1513200663.724793041 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon_led.php0000644000175000002070000000073011506337146026456 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Switzerland.png0000644000000000000000000000012411506337146027140 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.657715 30 ctime=1513200663.713792907 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Switzerland.png0000644000175000002070000000026011506337146027203 0ustar00mockbuildmock00000000000000PNG  IHDR ޜbKGD pHYs  ~tIMEW=IDATxcd@3al& ~` ^.لf% o{=IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Algeria.png0000644000000000000000000000012411506337146026176 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.662716 30 ctime=1513200663.685792564 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Algeria.png0000644000175000002070000000030311506337146026237 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE(J3AߠrZ?9` J1IDATxb`eVF(``!̎"†"ĆOC H֢; Dϵ0IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon_back.php0000644000000000000000000000012411506337146026545 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.659715 30 ctime=1513200663.723793029 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon_back.php0000644000175000002070000000062111506337146026611 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Ireland.png0000644000000000000000000000012311506337146026207 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.652715 29 ctime=1513200663.70179276 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Ireland.png0000644000175000002070000000026611506337146026261 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTEff3f33'IDATxb`f `dV6 ]֢9 SIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/spacer.gif0000644000000000000000000000012411506337146026070 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.657715 30 ctime=1513200663.726793066 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/spacer.gif0000644000175000002070000000005311506337146026133 0ustar00mockbuildmock00000000000000GIF89a!,D;nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Sweden.png0000644000000000000000000000012411506337146026057 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.661715 30 ctime=1513200663.712792895 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Sweden.png0000644000175000002070000000047111506337146026126 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIMEIDATxڕ[JAE};-?<Ѯt2.v0< flyVP{; h#Z7 \ 8ǵ>Ej d^cS>ڪpm*\h?K>,LsBukz(Ns(KˍORWh gȂ[IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon-folks.png0000644000000000000000000000012411506337146026676 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.663716 30 ctime=1513200663.718792968 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon-folks.png0000644000175000002070000000076311506337146026751 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<ZPLTEd=٥=JDѫ;5idՄ ~%\xs1 bJ% ߴ-̘0~tRNS^IDATxb@ D 84+@A$8Y@A$!LLP HE$d "@:d,@ ID\,@PW\Ō,@ III.ii.!X 0fX! @ PQ1666H N: @&#dASPŁ. XbHLB pΈ S pJ<oSIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/World.png0000644000000000000000000000012411506337146025721 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.661715 30 ctime=1513200663.716792943 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/World.png0000644000175000002070000000021311506337146025762 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIMEI,6IDATxc@"0844diy?IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Bulgaria.png0000644000000000000000000000012411506337146026360 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.655715 30 ctime=1513200663.689792613 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Bulgaria.png0000644000175000002070000000025511506337146026427 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE3ff333f33x*|!IDATxb` 0V4X# ,`Mm6/YIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Czechia.png0000644000000000000000000000012311506337146026177 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.664716 29 ctime=1513200663.69279265 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Czechia.png0000644000175000002070000000025711506337146026251 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe< PLTE33ff,IDATxd1 Aٸq*Hlk@$ %a_IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon-run.png0000644000000000000000000000012411506337146026364 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.656715 30 ctime=1513200663.721793005 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon-run.png0000644000175000002070000000113611506337146026432 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<`PLTEU;5id(-1 mܐ"ҖIѥ,%٥JD*xsżZT% =aߴ%0 < tRNS\\XIDATxb@ `fcec TM$@ .^)i99N6V1f@%ٸYXD@bbfgdc#(#/@P%8qI2qrUBL@qFY99Y)yI0I3 r1r1Lh1<( / @ 0I"|@P qF0B L 1"$!`abcG bl(@ < ,t#fD<; Q @8% _8S%ZIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon-refresh.png0000644000000000000000000000012411506337146027216 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.654715 30 ctime=1513200663.721793005 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon-refresh.png0000644000175000002070000000070611506337146027266 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<-PLTE𠠠ppp```PPP@@@000 4qstRNSܘIDATxb@!I01 xanf@A%Xx"@ Vb L=7#4 ,@ 8'LNV>& @|fz$15H\@Qb$@ |l`0%|>bkN  .pt# P ,nj `b$` @8 pF@Z™@A Qk%IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Armenia.png0000644000000000000000000000012411506337146026206 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.665716 30 ctime=1513200663.686792577 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Armenia.png0000644000175000002070000000025711506337146026257 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTEf̙f3fff33f'#IDATxb`B X+`F  `JIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Italy.png0000644000000000000000000000012311506337146025713 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.653715 29 ctime=1513200663.70179276 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Italy.png0000644000175000002070000000024511506337146025762 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE3333IDATxb`Vf `d*`w[_|pдIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Hungary.png0000644000000000000000000000012411506337146026247 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.659715 30 ctime=1513200663.699792736 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Hungary.png0000644000175000002070000000025011506337146026311 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE333`DIDATxb`F aXasjޓIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Greece.png0000644000000000000000000000012411506337146026024 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.655715 30 ctime=1513200663.698792723 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Greece.png0000644000175000002070000000026311506337146026072 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe< PLTETtRNS A!IDATxb`dd`D 0vbU0wrt-IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon-help.png0000644000000000000000000000012311506337146026507 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.659715 29 ctime=1513200663.71979298 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon-help.png0000644000175000002070000000066211506337146026561 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<3PLTE@@@ PPP```000ppptRNS%bIDATxb@ +;###7L 4 ? pCBr@X / bAb1  3N@heb+q &@!K00Y 3H*@H c8Xa<BH0I B`c@4 pJN S pJj!kq.IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Japan.png0000644000000000000000000000012411506337146025663 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.662716 30 ctime=1513200663.702792772 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Japan.png0000644000175000002070000000067311506337146025736 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME' nHIDATx-ΡSAmXAaXDA1<Y,]bPl;3 >RiQY3N붑JiyKcc3Rͥ~*ۭF=i]%zzvtwHĩ9H/y~AUTQ5^tF ^AJSʢWo8 EҬT?{xjH~:E(<~}ԫƬ'gOb%g$\˧C+L\t;HK#"IB!w|5cYV iBE2 )IY S>2Ӥ02 MIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon_spacer.php0000644000000000000000000000012411506337146027122 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.655715 30 ctime=1513200663.725793054 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon_spacer.php0000644000175000002070000000034411506337146027170 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Austria.png0000644000000000000000000000012411506337146026242 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.658715 30 ctime=1513200663.688792601 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Austria.png0000644000175000002070000000023611506337146026310 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe< PLTE3XIDATxb`B atk K CAhIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Lithuania.png0000644000000000000000000000012411506337146026550 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.652715 30 ctime=1513200663.704792797 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Lithuania.png0000644000175000002070000000037611506337146026623 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME IDATxڍ1@ gi 5/y5HKar9!2՜[˫$KYa3iј& X!J` a\kz6{ln>C]g}#׵ޓLKE6F]/RbaIPdIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Norway.png0000644000000000000000000000012411506337146026111 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.652715 30 ctime=1513200663.706792821 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Norway.png0000644000175000002070000000054411506337146026161 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME3~NIDATxڕ?JAglt B<4`e <l  &yi|:A#=@BQ:vU6R67 tRNSSOxIDATxb@ ;@@!$PiB`d``EHN B`fccDH ++B pZ@8%$S YfKB$ IT ] )@ (A )@8 pJN  #lIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon_bar.php0000644000000000000000000000012411506337146026411 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.659715 30 ctime=1513200663.723793029 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon_bar.php0000644000175000002070000000241411506337146026457 0ustar00mockbuildmock00000000000000 9) {$x3 = $x1-16;} if (strlen($text) > 5) $x3 = 84; if (strlen($text) > 10) $x3 = 36; $im = @imagecreate(200,$y); $bgcolor = imagecolorallocate($im,204,204,204); $red = imagecolorallocate($im,97,144,0); $grey = imagecolorallocate($im,176,176,176); // $white = imagecolorallocate($im,255,255,255); $white = imagecolorallocate($im,48,48,48); if ( $x1 < $x3 ) $white = imagecolorallocate($im,82,82,82); if ( $x1 ) imagefilledrectangle($im,0,0,$x1,$y,$grey); if ( $xg1 ) imagefilledrectangle($im,0,0,$xg1,$y,$red); imagestring ($im, 3, $x3, 0, $text, $white); imagepng ($im); ImageDestroy($im); } $x = $_GET["x"]; $xg = $_GET["xg"]; $y = $_GET["y"]; $text = $_GET["text"]; do_bar($x,$xg,$y,$text); ?>nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon_start.php0000644000000000000000000000012411506337146027002 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.664716 30 ctime=1513200663.725793054 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon_start.php0000644000175000002070000000071611506337146027053 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/UK.png0000644000000000000000000000012411506337146025151 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.659715 30 ctime=1513200663.714792919 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/UK.png0000644000175000002070000000036411506337146025221 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTEff3f333f3f;\IDATx< 0Bk޲&"?|fZ v tk&Sn ȞV ޯXkxnF6|~ǑT1`? IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Australia.png0000644000000000000000000000012411506337146026557 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.654715 30 ctime=1513200663.687792589 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Australia.png0000644000175000002070000000054211506337146026625 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<`PLTElGdSn_xBxo⏠꯻/O_n;Z,Cz脗?]_x4柮֊IDATxbgcbb`吔gE@1  ع9%9d9 h@1@bgb H@D$ a8 @ h %IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/China.png0000644000000000000000000000012411506337146025654 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.661715 30 ctime=1513200663.691792638 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/China.png0000644000175000002070000000043711506337146025725 0ustar00mockbuildmock00000000000000PNG  IHDR ޜbKGD pHYs  tIME .IDAT(-@w7-=YA QׄSG! AWB@ p 4aؤdFDZ<ISJrIoX}ۂ.4Aj7[),v*P9t5p]hByy,# _,FwbZ_MC/x14aIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon-disk.png0000644000000000000000000000012411506337146026512 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.653715 30 ctime=1513200663.718792968 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon-disk.png0000644000175000002070000000070111506337146026555 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<NPLTEm1 Մ -=%xaҖI(UJD٥id,%ZṪ0·tRNS"IDATxb@ S pJTBSTX E9E "ade>fVF@A$Ĺ%[,@P 1&v6 6v&1@A%YĸX @ S pJT! /#¹`!(H3@ S pJh3Mf}IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Turkey.png0000644000000000000000000000012411506337146026115 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.653715 30 ctime=1513200663.714792919 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Turkey.png0000644000175000002070000000035211506337146026162 0ustar00mockbuildmock00000000000000PNG  IHDR 2I pHYs  IDATxcǠ@ `"I5 2PS4׋75D6xiMIz~:yBVB640I D ?)Tgq,_~!> _.vdR0¯'/4ad d fd, `EXYE& ``e`faB9_p@ q~d b@ '@1 ĀC\ lE"). @ 8E<@L S +Ei IENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Iceland.png0000644000000000000000000000012411506337146026171 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.657715 30 ctime=1513200663.700792748 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Iceland.png0000644000175000002070000000021111506337146026230 0ustar00mockbuildmock00000000000000PNG  IHDR ޜPIDAT(cZ YðQ % ^Y"0? AR{=,Ah a [sIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/Estonia.png0000644000000000000000000000012411506337146026234 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.662716 30 ctime=1513200663.694792675 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/Estonia.png0000644000175000002070000000034411506337146026302 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE=];[;Z;[;Z=]@@@*ɋLIDATxb`bbALLȌ 00; 4@ @a3WhIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/PaxHeaders.7502/icon-vo.png0000644000000000000000000000012411506337146026204 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.658715 30 ctime=1513200663.722793017 nordugrid-arc-5.4.2/src/services/ldap-monitor/mon-icons/icon-vo.png0000644000175000002070000000105511506337146026252 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<]PLTE1 -% JD,%̇ܐ"mҖax*٥ZT;5Մ ߴIUid%=04tRNSv+IDATxb@!$8   @p AfYYYF&i `l\@ Y@$edY `̌ q>q6@`IHv99KHrpII0Iqs¹l Alb tJEܹ`!@$xYXXa @%8A3A bs B|`'"@4ZS !d@1φp @@;x` BĀp6@L S 3B7;\YIENDB`nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/discover.php0000644000000000000000000000012411605357421024547 xustar000000000000000027 mtime=1310056209.254814 27 atime=1513200575.665716 30 ctime=1513200663.633791929 nordugrid-arc-5.4.2/src/services/ldap-monitor/discover.php0000644000175000002070000001100611605357421024612 0ustar00mockbuildmock00000000000000title); $strings = &$toppage->strings; $giislist = &$toppage->giislist; $isattr = &$toppage->isattr; $errors = &$toppage->errors; require_once('attlist.inc'); $itself = $_SERVER["PHP_SELF"]; $ifsub = $_POST["submit"] ? TRUE : FALSE ; $ifsel = $_POST["select"] ? TRUE : FALSE ; echo "
    \n"; if ( $ifsub ) { // Call attributes list function for all selected arguments $request = $_POST; $attributes = array (); $signs = array (); $filters = array (); $attributes = $request["attributes"]; $signs = $request["signs"]; $filters = $request["filters"]; $thething = $request["scope"]; if ( $thething == "job" || $thething == "queue" || $thething == "authuser" ) $thething = "cluster"; // $attwin = popup("attlist.php?attribute=$encatt",650,300,7,$lang,$debug); do_attlist($thething,$attributes,$signs,$filters,$strings,$giislist); echo "
     "; echo " \n
    "; } elseif ( $ifsel ) { // If selection of search object and nr. of attributres is made, display options: $scope = $_POST; $object = $scope["object"]; $nlines = $scope["nlines"]; if ( !$nlines ) $nlines = 6; echo "

    ".$errors["416"].$object."

    \n"; echo "
    ".$errors["417"]."
    \n"; echo "
    ".$errors["418"]."


    \n"; $attwin = popup($itself,650,300,7,$lang,$debug); echo "
    "; echo "\n"; $rcol = "#ccffff"; for ( $i = 0; $i < $nlines; $i++ ) { echo "\n"; echo "\n"; echo "\n"; } echo "\n"; echo "
    "; echo "

      
    \n"; echo " \n"; } else { echo "

    ".$errors["419"]."

    \n"; echo "
    "; echo "

    ".$errors["423"]." \n"; echo "  ".$errors["424"]." \n"; echo "  \n"; echo "

    \n"; } echo "
    \n"; $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/vo-users.php0000644000000000000000000000012411605357421024514 xustar000000000000000027 mtime=1310056209.254814 27 atime=1513200575.680716 30 ctime=1513200663.640792014 nordugrid-arc-5.4.2/src/services/ldap-monitor/vo-users.php0000644000175000002070000000772411605357421024573 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $toppage->tabletop("","".$toptitle.""); // ldap search filter string for jobs $ufilter = "(objectclass=".OBJ_PERS.")"; $ulim = array ( "dn", VO_USSN, VO_USCN, VO_DESC, VO_INST, VO_MAIL ); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 10; $tout = 15; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Establish connection to the requested VO server if( $debug ) dbgmsg($errors["117"].$host.":".$port); $ds = ldap_connect($host,$port); if ($ds) { // If contact OK, search for people $ts1 = time(); $sr = @ldap_search($ds,$vo,$ufilter,$ulim,0,0,$tlim,LDAP_DEREF_NEVER,$tout); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["125"]." (".($ts2-$ts1).$errors["104"].")
    "); if ($sr) { // If search returned, check that there are valid entries $nmatch = @ldap_count_entries($ds,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $entries = @ldap_get_entries($ds,$sr); $nusers = $entries["count"]; define("CMPKEY",VO_USSN); usort($entries,"ldap_entry_comp"); // HTML table initialization $utable = new LmTable($module,$toppage->$module); // loop on users $uscnt = 0; for ($i=1; $i<$nusers+1; $i++) { $dn = $entries[$i]["dn"]; if ( $group ) { $newfilter = "(member=$dn)"; $newdn = $group.",".$vo; $newlim = array("dn"); $gcheck = @ldap_search($ds,$newdn,$newfilter,$newlim,0,0,$tlim,LDAP_DEREF_NEVER,$tout); if ( !ldap_count_entries($ds,$gcheck) ) continue; } $usname = $entries[$i][VO_USCN][0]; // $usname = utf2cyr($usname,"n"); // $ussn = strstr($entries[$i][VO_DESC][0],"/"); $ussn = substr(strstr($entries[$i][VO_DESC][0],"subject="),8); $ussn = trim($ussn); $encuname = rawurlencode($ussn); $org = $entries[$i][VO_INST][0]; // $org = utf8_decode($org); $mail = $entries[$i][VO_MAIL][0]; $mailstr = "mailto:".$mail; $usrwin = popup("userlist.php?owner=$encuname",700,500,5,$lang,$debug); // filling the table $uscnt++; $urowcont[] = $uscnt; $urowcont[] = "$usname"; $urowcont[] = "$org"; $urowcont[] = "$mail"; $utable->addrow($urowcont); $urowcont = array (); } $utable->close(); } else { $errno = 10; echo "
    ".$errors[$errno]."\n"; return $errno; } } else { $errno = 5; echo "
    ".$errors[$errno]."\n"; return $errno; } ldap_free_result($sr); ldap_close($ds); return 0; } else { $errno = 6; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/loadmon.php0000644000000000000000000000012413153453630024361 xustar000000000000000027 mtime=1504597912.048594 27 atime=1513200575.683716 30 ctime=1513200663.637791977 nordugrid-arc-5.4.2/src/services/ldap-monitor/loadmon.php0000644000175000002070000006032013153453630024427 0ustar00mockbuildmock00000000000000module; $strings = &$toppage->strings; $errors = &$toppage->errors; $giislist = &$toppage->giislist; $emirslist= &$toppage->emirslist; $cert = &$toppage->cert; $yazyk = &$toppage->language; $archery_list = &$toppage->archery_list; // Header table $toptit = date("Y-m-d T H:i:s"); $toppage->tabletop("".EXTRA_TITLE." ".$toppage->title."

    ","$toptit"); //********************* Schema changing ****************************** $other_schema = "GLUE2"; if ( $schema == "GLUE2" ) $other_schema = "NG"; $_GET["schema"] = $other_schema; $get_options = ""; $keys = array_keys($_GET); foreach ($_GET as $key => $value) { if ( $key == $keys[0] ) { $get_options = "?"; } else { $get_options = $get_options."&"; } $get_options = $get_options."$key=$value"; } //TODO: use translate messages echo "Current data rendered according to $schema schema.
    "; echo "Schema switching to: $other_schema

    "; //********************** Legend - only needed for this module ********************* echo "
    \n"; echo "".$errors["401"].":\n"; echo "\"".$errors["305"]."\"".$errors["402"]." \"".$errors["306"]."\"".$errors["403"]."\n"; echo ""; $sewin = popup("sestat.php",650,200,8,$lang,$debug); $discwin = popup("discover.php",700,400,9,$lang,$debug); $vostring = popup("volist.php",440,330,11,$lang,$debug); $usstring = popup("allusers.php",650,700,12,$lang,$debug); $acstring = popup("allusers.php?limit=1",500,600,12,$lang,$debug); echo "
    \n"; //******** Authorised users echo "\"".$errors["307"]."\" \n"; //******** Active users echo "\"".$errors["308"]."\" \n"; //******** Search echo "\"".$errors["309"]."\" \n"; //******** Storage echo "\"".$errors["310"]."\" \n"; //******** Virtual Organisations echo "\"".$errors["311"]."\"\n"; echo "
    \n"; echo "
    \n"; //****************************** End of legend **************************************** // Some debug output if ( $debug ) { ob_end_flush(); ob_implicit_flush(); dbgmsg("
    ARC ".$toppage->getVersion()."
    "); } $tcont = array(); // array with rows, to be sorted $cachefile = CACHE_LOCATION."/loadmon-$schema-".$yazyk; $tcont = get_from_cache($cachefile,120); // If cache exists, skip ldapsearch if ( !$tcont || $debug || $display != "all" ) { // Do LDAP search $tcont = array(); // Setting time limits for ldapsearch $tlim = 10; $tout = 11; if($debug) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::
    "); // ldapsearch filter string for clusters and queues $filter="(|(objectClass=".OBJ_CLUS.")(objectClass=".OBJ_QUEU.")(objectclass=".GOBJ_CLUS.")(objectclass=".GOBJ_QUEU.")(objectClass=".GOBJ_MAN.")(objectClass=".GOBJ_LOC."))"; // Array defining the attributes to be returned $lim = array( "dn", GCLU_ANAM, GCLU_ZIPC, GCLU_TCPU, GCLU_UCPU, GCLU_TJOB, GCLU_QJOB, GCLU_PQUE, GQUE_STAT, GQUE_GQUE, GQUE_QUED, GQUE_LQUE, GQUE_PQUE, GQUE_RUNG, GQUE_GRUN, CLU_ANAM, CLU_ZIPC, CLU_TCPU, CLU_UCPU, CLU_TJOB, CLU_QJOB, CLU_PQUE, QUE_STAT, QUE_GQUE, QUE_QUED, QUE_LQUE, QUE_PQUE, QUE_RUNG, QUE_GRUN ); // Adjusting cluster display filter $showvo = ""; if ( substr($display,0,2) == "vo" ) { $showvo = substr(strrchr($display,"="),1); if ($debug) dbgmsg(" ::: ".$errors["105"]."$showvo"); } if ( $display != "all" && !$showvo ) $filter = "(&".$filstr."(".$display."))"; //========================= GET CLUSTER LIST ============================ $gentries = array(); // EGIIS if ( ! empty($giislist) ) { $ngiis = count($giislist); $ts1 = time(); $gentries = recursive_giis_info($giislist,"cluster",$errors,$debug); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["106"].$ngiis." (".($ts2-$ts1).$errors["104"].")
    "); } // EMIR if ( ! empty($emirslist)) $gentries = emirs_info($emirslist,"cluster",$errors,$gentries,$debug,$cert); // ARCHERY if ( ! empty($archery_list) ) $gentries = array_merge($gentries, archery_info($archery_list, $schema, $errors, $debug)); //======================================================================= $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; return $errno; } else { if ( $debug == 2 ) { dbgmsg("

    ".$errors["119"]."cluster: ".$nc."
    "); foreach ( $gentries as $num=>$val ) dbgmsg($val["host"].":".$val["base"]."
    "); } } $dsarray = array (); $hnarray = array (); $pnarray = array (); $dnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ // Purging cluster entries for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $basedn = $gentries[$k]["base"]; $fp = @fsockopen($clhost, $clport, $errno, $errstr, 2); $clconn = ldap_connect($clhost,$clport); if ( $fp && $clconn && !@$sitetag[$clhost] ) { fclose($fp); array_push($dsarray,$clconn); array_push($hnarray,$clhost); array_push($pnarray,$clport); array_push($dnarray,$basedn); @ldap_set_option($clconn, LDAP_OPT_NETWORK_TIMEOUT, $tout); $sitetag[$clhost] = 1; /* filtering tag */ if ($debug==2) dbgmsg("$k - $clhost:$clport "); } elseif ( $fp && $clconn && @$sitetag[$clhost] ) { fclose($fp); if ( $schema == "GLUE2"){ // Add only the base option $index = array_keys($hnarray, $clhost); $dnarray[$index[0]] = DN_GLUE; } elseif ( $schema == "NG"){ // Add only the base option $index = array_keys($hnarray, $clhost); $dnarray[$index[0]] = DN_LOCAL; } else { array_push($dsarray,$clconn); array_push($hnarray,$clhost); array_push($pnarray,$clport); array_push($dnarray,$basedn); } } } $nhosts = count($dsarray); if( $debug == 2 ) dbgmsg("
    ".$nhosts.$errors["108"]."
    "); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters and queues $ts1 = time(); $srarray = @ldap_search($dsarray,$dnarray,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); // If using the patched LDAP //$srarray = @ldap_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER,$tout); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["109"]." (".($ts2-$ts1).$errors["104"].")
    "); /* * $ts1 = time(); * $qsarray = @ldap_search($dsarray,DN_LOCAL,$qfilstr,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); * // Fall back to a conventional LDAP * // if ( !count($qsrarray)) $qsarray = @ldap_search($dsarray,DN_LOCAL,$qfilstr,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); * $ts2 = time(); if($debug) dbgmsg("
    ".$errors["110"]." (".($ts2-$ts1).$errors["104"].")
    "); */ // Loop on clusters for ( $ids = 0; $ids < $nhosts; $ids++ ) { $entries = array(); $jentries = array(); $gentries = array(); $rowcont = array(); $sr = $srarray[$ids]; $hn = $hnarray[$ids]; $pn = $pnarray[$ids]; $ds = $dsarray[$ids]; $nr = @ldap_count_entries($ds,$sr); if ( !$sr || !$ds || !$nr ) { $error = ldap_error($ds); if ( $error == "Success" ) $error = $errors["3"]; if ( $debug ) dbgmsg("".$errors["111"]."$hn ($error)
    "); $sr = FALSE; } if ($ds && $sr) { $entries = @ldap_get_entries($ds,$sr); $nclusters = $entries["count"]; /* Actually, cluster and queue blocks, 2+ */ if ( !$nclusters ) { if ( $debug ) dbgmsg("$hn:".$errors["3"]."
    "); continue; } $nclu = 0; $nqueues = 0; $allqrun = 0; $gridjobs = 0; $allqueued = 0; $gridqueued = 0; $lrmsqueued = 0; $prequeued = 0; $totgridq = 0; $toflag2 = FALSE; $stopflag = FALSE; for ($i=0; $i<$nclusters; $i++) { $curdn = $entries[$i]["dn"]; $preflength = strrpos($curdn,","); $basedn = substr($curdn,$preflength+1); $allbasedn = strtolower(substr($curdn,$preflength-17)); if ($basedn == DN_GLUE) { // check if it is a site or a job; count $preflength = strpos($curdn,":"); $preflength = strpos($curdn,":",$preflength+1); $object = substr($curdn,$preflength+1,strpos($curdn,":",$preflength+1)-$preflength-1); if ($object=="ComputingService") { $dnparts = ldap_explode_dn($curdn,0); $endpointArray=explode(":",$dnparts[0]); $curname = $endpointArray[3]; $curport = $pn; $curalias = $entries[$i][GCLU_ANAM][0]; // Manipulate alias: replace the string if necessary and cut off at 22 characters; strip HTML tags if (file_exists("cnvalias.inc")) include('cnvalias.inc'); $curalias = strip_tags($curalias); if ( strlen($curalias) > 22 ) $curalias = substr($curalias,0,21) . ">"; $totqueued = @($entries[$i][GCLU_QJOB][0]) ? $entries[$i][GCLU_QJOB][0] : 0; /* deprecated since 0.5.38 */ $gmqueued = @($entries[$i][GCLU_PQUE][0]) ? $entries[$i][GCLU_PQUE][0] : 0; /* new since 0.5.38 */ $clstring = popup("clusdes.php?host=$curname&port=$curport&schema=$schema",700,620,1,$lang,$debug); $nclu++; } elseif ($object=="ComputingManager") { $curtotcpu = @($entries[$i][GCLU_TCPU][0]) ? $entries[$i][GCLU_TCPU][0] : 0; if ( !$curtotcpu && $debug ) dbgmsg("$curname".$errors["113"]."
    "); $curtotjobs = @($entries[$i][GCLU_TJOB][0]) ? $entries[$i][GCLU_TJOB][0] : 0; $curusedcpu = @($entries[$i][GCLU_UCPU][0]) ? $entries[$i][GCLU_UCPU][0] : -1; } elseif ($object=="ComputingShare") { $qstatus = $entries[$i][GQUE_STAT][0]; if ( $qstatus != "production" ) $stopflag = TRUE; $allqrun += @($entries[$i][GQUE_RUNG][0]) ? ($entries[$i][GQUE_RUNG][0]) : 0; $gridjobs += @($entries[$i][GQUE_GRUN][0]) ? ($entries[$i][GQUE_GRUN][0]) : 0; $gridqueued += @($entries[$i][GQUE_GQUE][0]) ? ($entries[$i][GQUE_GQUE][0]) : 0; $allqueued += @($entries[$i][GQUE_QUED][0]) ? ($entries[$i][GQUE_QUED][0]) : 0; /* deprecated since 0.5.38 */ $lrmsqueued += @($entries[$i][GQUE_LQUE][0]) ? ($entries[$i][GQUE_LQUE][0]) : 0; /* new since 0.5.38 */ $prequeued += @($entries[$i][GQUE_PQUE][0]) ? ($entries[$i][GQUE_PQUE][0]) : 0; /* new since 0.5.38 */ $nqueues++; } elseif ($object=="Location") { $dnparts = ldap_explode_dn($curdn,0); $endpointArray=explode(":",$dnparts[0]); $curname = $endpointArray[3]; $curport = $pn; // Country name massaging $vo = guess_country($curname,$entries[$i][GCLU_ZIPC][0]); if ($debug==2) dbgmsg("$ids: $curname".$errors["112"]."$vo
    "); $vostring = $_SERVER['PHP_SELF']."?display=vo=$vo"; if ( $lang != "default") $vostring .= "&lang=".$lang; if ( $debug ) $vostring .= "&debug=".$debug; $country = $vo; if ( $yazyk !== "en" ) $country = $strings["tlconvert"][$vo]; $country_content = "\"".$errors["312"]."\" ".$country." "; if (!in_array($country_content,$rowcont)){ $rowcont[] = $country_content; } } } elseif ($allbasedn == DN_LOCAL) { // check if it is a site or a job; count $preflength = strpos($curdn,"-"); $object = substr($curdn,$preflength+1,strpos($curdn,"-",$preflength+1)-$preflength-1); if ($object=="cluster") { $dnparts = ldap_explode_dn($curdn,0); $curname = substr(strstr($dnparts[0],"="),1); $curport = $pn; // Country name massaging $vo = guess_country($curname,$entries[$i][CLU_ZIPC][0]); if ($debug==2) dbgmsg("$ids: $curname".$errors["112"]."$vo
    "); $vostring = $_SERVER['PHP_SELF']."?display=vo=$vo"; if ( $lang != "default") $vostring .= "&lang=".$lang; if ( $debug ) $vostring .= "&debug=".$debug; $country = $vo; if ( $yazyk !== "en" ) $country = $strings["tlconvert"][$vo]; $rowcont[] = "\"".$errors["312"]."\" ".$country." "; $curtotcpu = @($entries[$i][CLU_TCPU][0]) ? $entries[$i][CLU_TCPU][0] : 0; if ( !$curtotcpu && $debug ) dbgmsg("$curname".$errors["113"]."
    "); $curalias = $entries[$i][CLU_ANAM][0]; // Manipulate alias: replace the string if necessary and cut off at 22 characters; strip HTML tags if (file_exists("cnvalias.inc")) include('cnvalias.inc'); $curalias = strip_tags($curalias); if ( strlen($curalias) > 22 ) $curalias = substr($curalias,0,21) . ">"; $curtotjobs = @($entries[$i][CLU_TJOB][0]) ? $entries[$i][CLU_TJOB][0] : 0; $curusedcpu = @($entries[$i][CLU_UCPU][0]) ? $entries[$i][CLU_UCPU][0] : -1; $totqueued = @($entries[$i][CLU_QJOB][0]) ? $entries[$i][CLU_QJOB][0] : 0; /* deprecated since 0.5.38 */ $gmqueued = @($entries[$i][CLU_PQUE][0]) ? $entries[$i][CLU_PQUE][0] : 0; /* new since 0.5.38 */ $clstring = popup("clusdes.php?host=$curname&port=$curport",700,620,1,$lang,$debug); $nclu++; } elseif ($object=="queue") { $qstatus = $entries[$i][QUE_STAT][0]; if ( $qstatus != "active" ) $stopflag = TRUE; $allqrun += @($entries[$i][QUE_RUNG][0]) ? ($entries[$i][QUE_RUNG][0]) : 0; $gridjobs += @($entries[$i][QUE_GRUN][0]) ? ($entries[$i][QUE_GRUN][0]) : 0; $gridqueued += @($entries[$i][QUE_GQUE][0]) ? ($entries[$i][QUE_GQUE][0]) : 0; $allqueued += @($entries[$i][QUE_QUED][0]) ? ($entries[$i][QUE_QUED][0]) : 0; /* deprecated since 0.5.38 */ $lrmsqueued += @($entries[$i][QUE_LQUE][0]) ? ($entries[$i][QUE_LQUE][0]) : 0; /* new since 0.5.38 */ $prequeued += @($entries[$i][QUE_PQUE][0]) ? ($entries[$i][QUE_PQUE][0]) : 0; /* new since 0.5.38 */ $nqueues++; } } } if ( !$nclu && $nqueues ) { if ( $debug ) dbgmsg("$hn:".$errors["3"].": ".$errors["111"].$errors["410"]."
    "); continue; } if ( $nclu > 1 && $debug ) dbgmsg("$hn:".$errors["3"].": $nclu ".$errors["406"]."
    "); if (!$nqueues) $toflag2 = TRUE; if ($debug==2 && $prequeued != $gmqueued) dbgmsg("$curname: cluster-prelrmsqueued != sum(queue-prelrmsqueued)"); $allrun = ($curusedcpu < 0) ? $allqrun : $curusedcpu; if ($gridjobs > $allrun) $gridjobs = $allrun; /* For versions < 0.5.38: * Some Grid jobs are counted towards $totqueued and not towards $allqueued * (those in GM), so $totqueued - $allqueued = $gmqueued, * and $truegridq = $gmqueued + $gridqueued * and $nongridq = $totqueued - $truegridq == $allqueued - $gridqueued * hence $truegridq = $totqueued - $nongridq */ $nongridq = ($totqueued) ? $allqueued - $gridqueued : $lrmsqueued; $truegridq = ($totqueued) ? $totqueued - $nongridq : $gridqueued + $prequeued; // temporary hack: // $truegridq = $gridqueued; // $formtgq = sprintf(" s",$truegridq); $formngq = sprintf("\ \;s",$nongridq); $localrun = $allrun - $gridjobs; $gridload = ($curtotcpu > 0) ? $gridjobs/$curtotcpu : 0; $clusload = ($curtotcpu > 0) ? $allrun/$curtotcpu : 0; $tstring = urlencode("$gridjobs+$localrun"); $jrstring = popup("jobstat.php?host=$curname&port=$curport&status=Running&jobdn=all",600,500,2,$lang,$debug); $jqstring = popup("jobstat.php?host=$curname&port=$curport&status=Queueing&jobdn=all",600,500,2,$lang,$debug); if ( $schema == "GLUE2"){ $jrstring = popup("jobstat.php?host=$curname&port=$curport&status=Running&jobdn=all&schema=$schema",600,500,2,$lang,$debug); $jqstring = popup("jobstat.php?host=$curname&port=$curport&status=Queueing&jobdn=all&schema=$schema",600,500,2,$lang,$debug); } if ( $toflag2 ) { $tstring .= " (no queue info)"; // not sure if this is localizeable at all } elseif ( $stopflag ) { $tstring .= " (queue inactive)"; // not sure if this is localizeable at all } // Add a cluster row $rowcont[] = " $curalias"; $rowcont[] = "$curtotcpu"; if ( $curtotcpu ) { $rowcont[] = "\"$gridjobs+$localrun\""; } else { $rowcont[] = "\"$gridjobs+$localrun\""; } // $rowcont[] = "$totqueued"; $rowcont[] = "$truegridq+$nongridq"; // Not adding anymore, cache instead // $ctable->addrow($rowcont); $tcont[] = $rowcont; $rowcont = array (); } } // Dump the collected table cache_table($cachefile,$tcont); } // HTML table initialization $ctable = new LmTableSp($module,$toppage->$module); // Sort /** possible ordering keywords: * country - sort by country, default * cpu - sort by advertised CPU number * grun - sort by number of running Grid jobs */ $ostring = "comp_by_".$order; usort($tcont,$ostring); $nrows = count($tcont); $votolink = array(); $affiliation = array(); foreach ( $tcont as $trow ) { $vo = $trow[0]; $vo = substr(stristr($vo,"./mon-icons/"),12); $vo = substr($vo,0,strpos($vo,".")); if ( !in_array($vo,$votolink) ) $votolink[]=$vo; array_push($affiliation,$vo); } $affcnt = array_count_values($affiliation); $prevvo = "boo"; $sumcpu = 0; $sumgridjobs = 0; $sumlocljobs = 0; $sumclusters = 0; $sumgridqueued = 0; $sumloclqueued = 0; //$sumqueued = 0; // actual loop foreach ( $tcont as $trow ) { $gridjobs = $trow[3]; $gridjobs = substr(stristr($gridjobs,"alt=\""),5); $gridjobs = substr($gridjobs,0,strpos($gridjobs,"+")); $localrun = $trow[3]; $localrun = substr(stristr($localrun,"+"),1); $localrun = substr($localrun,0,strpos($localrun,"\" w")); $truegridq = $trow[4]; $truegridq = substr(stristr($truegridq,""),3); $truegridq = substr($truegridq,0,strpos($truegridq,"")); $nongridq = $trow[4]; $nongridq = substr(stristr($nongridq,"+"),1); $vo = $trow[0]; $vo = substr(stristr($vo,"./mon-icons/"),12); $vo = substr($vo,0,strpos($vo,".")); if ( @$showvo && $showvo != $vo ) continue; $sumcpu += $trow[2]; $sumgridjobs += $gridjobs; $sumlocljobs += $localrun; $sumgridqueued += $truegridq; $sumloclqueued += $nongridq; // $sumqueued += $totqueued; $sumclusters ++; if ( $vo != $prevvo && $order == "country" ) { // start new country rowspan $prevvo = $vo; $vostring = $trow[0]; $ctable->addspacer("#000099"); $ctable->rowspan( $affcnt[$vo], $vostring, "#FFF2DF" ); $tcrow = array_shift($trow); $ctable->addrow($trow); } else { if ( $order == "country" ) $tcrow = array_shift($trow); $ctable->addrow($trow); } } $tcont = array(); $ctable->addspacer("#990000"); $rowcont[] = "".$errors["405"].""; $rowcont[] = "$sumclusters".$errors["406"].""; $rowcont[] = "$sumcpu"; $rowcont[] = "$sumgridjobs + $sumlocljobs"; $rowcont[] = "$sumgridqueued + $sumloclqueued"; // $rowcont[] = "$sumqueued"; $ctable->addrow($rowcont, "#ffffff"); $ctable->close(); // To change language, link back to ALL $linkback = $_SERVER['PHP_SELF']; if ( $debug ) { $linkback .= "?debug=".$debug; $separator = "&"; } else { $separator = "?"; } // Show flags if only one country is chosen if ( @$showvo ) { echo "
    \n"; foreach ( $votolink as $volink ) { $vostring = $_SERVER['PHP_SELF']."?display=vo=$volink"; if ( $lang != "default" ) $vostring .= "&lang=".$lang; if ( $debug ) $vostring .= "&debug=".$debug; $voimage = "\"".$errors["312"]."\""; echo "$voimage  "; } if ( $lang != "default") $linkall = $linkback.$separator."lang=".$lang; echo "".$errors["409"]."
    \n"; // Show ALL echo "
    \n"; } else { // Show languages $translations = scandir(getcwd()."/lang"); echo "

    \n"; foreach ( $translations as $transfile ) { $twoletcod = substr($transfile,0,2); if ( stristr($transfile,".") == ".inc" && $twoletcod != "us" ) { $linklang = $linkback.$separator."lang=".$twoletcod; echo "$twoletcod  "; } } echo "
    \n"; } return 0; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/mylo.js0000644000000000000000000000012411506337146023540 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.721716 30 ctime=1513200663.642792039 nordugrid-arc-5.4.2/src/services/ldap-monitor/mylo.js0000644000175000002070000000051711506337146023610 0ustar00mockbuildmock00000000000000function mylo (fnam,lnam,dom1,dom2){ if ( lnam == "" ) { var name = fnam; } else { var name = fnam + "." + lnam; } var host = dom1 + "." + dom2; var complete = name + "@" + host; output = "" + complete + ""; document.write(output); return output; } nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/attlist.php0000644000000000000000000000012411605357421024415 xustar000000000000000027 mtime=1310056209.254814 27 atime=1513200575.680716 30 ctime=1513200663.631791904 nordugrid-arc-5.4.2/src/services/ldap-monitor/attlist.php0000644000175000002070000000224511605357421024465 0ustar00mockbuildmock00000000000000title); $strings = &$toppage->strings; $giislist = &$toppage->giislist; require_once('attlist.inc'); $object = $_GET["object"]; $attribute = $_GET["attribute"]; $filter = $_GET["filter"]; if ( !$filter ) $filter=""; if ( !$object ) $object="cluster"; $attribute = rawurldecode($attribute); $filter = rawurldecode($filter); if ( $attribute[1]==":") { $attribute = unserialize($attribute); $filter = unserialize($filter); $attributes = $attribute; $filters = $filter; $n = count($attributes); $signs = array_fill(0,$n,"="); } else { $attributes = array ($attribute); $signs = array ("="); $filters = array ($filter); } do_attlist($object,$attributes,$signs,$filters,$strings,$giislist); // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/quelist.php0000644000000000000000000000012312050460110024377 xustar000000000000000027 mtime=1352818760.684994 27 atime=1513200575.683716 29 ctime=1513200663.63879199 nordugrid-arc-5.4.2/src/services/ldap-monitor/quelist.php0000644000175000002070000001675512050460110024463 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $clstring = popup("clusdes.php?host=$host&port=$port&schema=$schema",700,620,1,$lang,$debug); // Header table $toppage->tabletop("","".$toptitle." ".$qname." (".$host.")"); $lim = array( "dn", JOB_NAME, JOB_GOWN, JOB_SUBM, JOB_STAT, JOB_COMP, JOB_USET, JOB_USEM, JOB_ERRS, JOB_CPUS, JOB_EQUE ); if ( $schema == "GLUE2") { $lim = array( "dn", GJOB_NAME, GJOB_GOWN, GJOB_SUBM, GJOB_STAT, GJOB_COMP, GJOB_USET, GJOB_USEM, GJOB_ERRS, GJOB_CPUS, GJOB_EQUE ); } if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // ldapsearch filter strings for cluster and queues $filstr = "(objectclass=".OBJ_AJOB.")"; $dn = DN_LOCAL; $topdn = DN_GLOBL; if ( $schema == "GLUE2") { $filstr = "(objectclass=".GOBJ_AJOB.")"; $dn = "GLUE2GroupID=services,".DN_GLUE; } // Establish connection to the requested LDAP server $ds = ldap_connect($host,$port); if ($ds) { // If contact OK, search for NorduGrid clusters $basedn = QUE_NAME."=".$qname.",".CLU_NAME."=".$host.","; $locdn = $basedn.$dn; if ( $schema == "GLUE2") { $basedn = GQUE_NAME."=".$qname.",".GCLU_NAME."=".$host.","; $basedn = "GLUE2ShareID=urn:ogf:ComputingShare:".$host.":".$qname.",GLUE2ServiceID=urn:ogf:ComputingService:".$host.":arex,"; $locdn = $basedn.$dn; } $aaa = ldap_nice_dump($strings,$ds,$locdn); echo "
    "; $ts1 = time(); $sr = ldap_search($ds,$dn,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["110"]." (".($ts2-$ts1).$errors["104"].")
    "); // Fall back to conventional LDAP // if (!$sr) $sr = ldap_search($ds,$dn,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); if ($sr) { $nmatch = ldap_count_entries($ds,$sr); if ($nmatch > 0) { $entries = ldap_get_entries($ds,$sr); $njobs = $entries["count"]; define("CMPKEY",JOB_SUBM); if ( $schema == "GLUE2") define("CMPKEY",GJOB_SUBM); usort($entries,"ldap_entry_comp"); // HTML table initialisation $ltable = new LmTable($module,$toppage->$module); // loop on jobs $nj = 0; for ($i=1; $i<$njobs+1; $i++) { if ( $schema == "GLUE2") { $equeue = $entries[$i][GJOB_EQUE][0]; if ( $equeue !== $qname ) { if ( $debug == 2 ) dbgmsg($equeue." != ".$qname); continue; } $jobdn = rawurlencode($entries[$i]["dn"]); $curstat = $entries[$i][GJOB_STAT][0]; $stahead = substr($curstat,0,12); $ftime = ""; if ($stahead=="FINISHED at:") { $ftime = substr(strrchr($curstat, " "), 1); } elseif ($curstat=="FINISHED") { $ftime = $entries[$i][GJOB_COMP][0]; } if ( $ftime ) { $ftime = cnvtime($ftime); $curstat = "FINISHED at: ".$ftime; } $uname = $entries[$i][GJOB_GOWN][0]; $encuname = rawurlencode($uname); $family = cnvname($uname, 2); $jname = htmlentities($entries[$i][GJOB_NAME][0]); $jobname = ($entries[$i][GJOB_NAME][0]) ? $jname : "N/A"; $time = ($entries[$i][GJOB_USET][0]) ? $entries[$i][GJOB_USET][0] : ""; $memory = ($entries[$i][GJOB_USEM][0]) ? $entries[$i][GJOB_USEM][0] : ""; $ncpus = ($entries[$i][GJOB_CPUS][0]) ? $entries[$i][GJOB_CPUS][0] : ""; $error = ($entries[$i][GJOB_ERRS][0]); if ( $error ) $error = ( preg_match("/user/i",$error) ) ? "X" : "!"; $status = "All"; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jobdn&schema=$schema",750,430,4,$lang,$debug); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname&schema=$schema",700,500,5,$lang,$debug); } else { //NG schema parse $equeue = $entries[$i][JOB_EQUE][0]; if ( $equeue !== $qname ) { if ( $debug == 2 ) dbgmsg($equeue." != ".$qname); continue; } $jobdn = rawurlencode($entries[$i]["dn"]); $curstat = $entries[$i][JOB_STAT][0]; $stahead = substr($curstat,0,12); $ftime = ""; if ($stahead=="FINISHED at:") { $ftime = substr(strrchr($curstat, " "), 1); } elseif ($curstat=="FINISHED") { $ftime = $entries[$i][JOB_COMP][0]; } if ( $ftime ) { $ftime = cnvtime($ftime); $curstat = "FINISHED at: ".$ftime; } $uname = $entries[$i][JOB_GOWN][0]; $encuname = rawurlencode($uname); $family = cnvname($uname, 2); $jname = htmlentities($entries[$i][JOB_NAME][0]); $jobname = ($entries[$i][JOB_NAME][0]) ? $jname : "N/A"; $time = ($entries[$i][JOB_USET][0]) ? $entries[$i][JOB_USET][0] : ""; $memory = ($entries[$i][JOB_USEM][0]) ? $entries[$i][JOB_USEM][0] : ""; $ncpus = ($entries[$i][JOB_CPUS][0]) ? $entries[$i][JOB_CPUS][0] : ""; $error = ($entries[$i][JOB_ERRS][0]); if ( $error ) $error = ( preg_match("/user/i",$error) ) ? "X" : "!"; $status = "All"; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jobdn",750,430,4,$lang,$debug); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname",700,500,5,$lang,$debug); } // filling the table $nj++; $lrowcont[] = "$nj $error"; $lrowcont[] = "$jobname"; $lrowcont[] = "$family"; $lrowcont[] = "$curstat"; $lrowcont[] = "$time"; $lrowcont[] = "$memory"; $lrowcont[] = "$ncpus"; $ltable->addrow($lrowcont); $lrowcont = array (); } $ltable->close(); } else { $errno = "4"; echo "
    ".$errors[$errno]."\n"; return $errno; } } else { $errno = "5"; echo "
    ".$errors[$errno]."\n"; return $errno; } ldap_free_result($sr); ldap_close($ds); return 0; } else { $errno = "6"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/README.in0000644000000000000000000000012711506337146023512 xustar000000000000000027 mtime=1293532774.731583 30 atime=1513200651.526643853 30 ctime=1513200663.645792075 nordugrid-arc-5.4.2/src/services/ldap-monitor/README.in0000644000175000002070000000520511506337146023556 0ustar00mockbuildmock00000000000000NorduGrid ARC version @VERSION@ Grid Monitor ============ Description ----------- Set of PHP scripts, providing a Web interface to the NorduGrid Information System. Should be working for any similar LDAP-based service, if the schema configuration is done carefuly. The directory contains: cache - directory for front page cache includes - directory with common methods and configuration file (settings.inc) lang - directory with localizations man - directory for the man page mon-icons - directory with icons allusers.php - list grid users attlist.php - show values of selected attributes on the grid clusdes.php - show cluster or storage information discover.php - list attributes specific for an object for consecutive search ldap-monitor.in - lynx call for the monitor (template) help.php - print help jobstat.php - show running/other jobs in a queue loadmon.php - main grid monitor script Makefile.am - Makefile template monitor.js - Java script for pop-up screens mylo.js - Java script for mail addresses quelist.php - show queue details and jobs README.in - README file (template) sestat.php - list storage elements userlist.php - show allowed sites and list of jobs of a user volist.php - static list of some VOs vo-users.php - lists users in a VO Requirements ------------ - GD library (http://www.boutell.com/gd/) - LDAP library (e.g., http://www.openldap.org) - PHP4 or PHP5 (http://www.php.net) compiled with LDAP and GD extensions - HTTP server compiled with PHP4 or PHP5 - Working ARC information system instance or a similar LDAP-based service - Optional: running Virtual Organisation LDAP-based service Installation ------------ 1. Copy all the files in a folder, accessible by the HTTP server 2. Verify that this folder contains a directory called "cache" and that it is writeable by the HTTP server. If your server is configured to have write access only to a specific location, such as "../htdata", modify CACHE_LOCATION value in "includes/settings.inc" accordingly 3. Modify "includes/settings.inc" according to your infosystem structure and liking: most likely, you want to modify the $giislist array by removing some GIISes/GRISes and adding other(s) 4. Run the whole stuff by loading "loadmon.php" into your browser Fine tuning ----------- - Making output more human-readable: modify "/lang/*.inc", "includes/cnvname.inc", "includes/cnvalias.inc". - Preventing sites from being polled: modify "includes/blacklist.inc". Otherwise, the file is not needed. Contact ------- Oxana Smirnova, oxana.smirnova@hep.lu.se nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/ldap-monitor.in0000644000000000000000000000012611506337146025161 xustar000000000000000027 mtime=1293532774.731583 29 atime=1513200651.51164367 30 ctime=1513200663.646792087 nordugrid-arc-5.4.2/src/services/ldap-monitor/ldap-monitor.in0000755000175000002070000000010411506337146025222 0ustar00mockbuildmock00000000000000#!/bin/sh lynx http://localhost/@monitor_local_prefix@/loadmon.php nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/sestat.php0000644000000000000000000000012411605357421024234 xustar000000000000000027 mtime=1310056209.254814 27 atime=1513200575.719716 30 ctime=1513200663.639792002 nordugrid-arc-5.4.2/src/services/ldap-monitor/sestat.php0000644000175000002070000001375311605357421024312 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $giislist = &$toppage->giislist; // Header table $toppage->tabletop("".$toptitle."

    ",""); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 10; $tout = 15; if($debug) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::
    "); // Arrays defining the attributes to be returned $lim = array( "dn", SEL_NAME, SEL_ANAM, SEL_CURL, SEL_BURL, SEL_TYPE, SEL_FREE, SEL_TOTA ); // ldapsearch filter strings for clusters and queues $filstr = "(objectclass=".OBJ_STEL.")"; // Top GIIS server: get all from the pre-defined list $ngiis = count($giislist); $ts1 = time(); $gentries = recursive_giis_info($giislist,"nordugrid-SE",$errors,$debug); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["106"].$ngiis." (".($ts2-$ts1).$errors["104"].")
    "); $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $clconn = ldap_connect($clhost,$clport); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); $sitetag[$clhost] = 1; /* filtering tag */ if ($debug==2) dbgmsg("$k - $clhost:$clport "); } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all SEs $ts1 = time(); $srarray = @ldap_search($dsarray,DN_LOCAL,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["124"]." (".($ts2-$ts1).$errors["104"].")
    "); $ctable = new LmTableSp($module,$toppage->$module); // Loop on SEs $senum = 0; $space = 0; $capacity = 0; for ( $ids = 0; $ids < $nhosts; $ids++ ) { $sr = $srarray[$ids]; $ds = $dsarray[$ids]; $hn = $hnarray[$ids]; /* host name, for debugging */ if ($ds && $sr) { $entries = @ldap_get_entries($ds,$sr); $nclusters = $entries["count"]; /* May be several SEs! */ if ( !$nclusters ) continue; for ( $i = 0; $i < $nclusters; $i++) { $senum++; $curdn = $entries[$i]["dn"]; $curname = $entries[$i][SEL_NAME][0]; $curalias = $entries[$i][SEL_ANAM][0]; $curspace = ( $entries[$i][SEL_FREE][0] ) ? $entries[$i][SEL_FREE][0] : 0; // $curcapacity = ( $entries[$i][SEL_TOTA][0] ) ? $entries[$i][SEL_TOTA][0] : $errors["407"]; $curcapacity = ( $entries[$i][SEL_TOTA][0] ) ? $entries[$i][SEL_TOTA][0] : $curspace; $cururl = ( $entries[$i][SEL_BURL][0] ) ? $entries[$i][SEL_BURL][0] : $entries[$i][SEL_CURL][0]; $curtype = $entries[$i][SEL_TYPE][0]; $clstring = popup("clusdes.php?host=$curname&port=$curport&isse=1&debug=$debug",700,620,1,$lang,$debug); $curspace = intval($curspace/1000); $occupancy = 1; // by default, all occupied $space += $curspace; // if ( $curcapacity != $errors["407"] ) { if ( $curcapacity != 0 ) { $curcapacity = intval($curcapacity/1000); $occupancy = ($curcapacity - $curspace)/$curcapacity; $capacity += $curcapacity; } $tstring = $curspace."/".$curcapacity; $tlen = strlen($tstring); if ($tlen<11) { $nspaces = 11 - $tlen; for ( $is = 0; $is < $nspaces; $is++ ) $tstring .= " "; } $tstring = urlencode($tstring); if ($debug==2) dbgmsg("$senum: $curname at $hn
    "); if ( strlen($curalias) > 15 ) $curalias = substr($curalias,0,15) . ">"; // $clstring = popup("clusdes.php?host=$curname&port=2135",700,620,1,$lang,$debug); $rowcont[] = "$senum"; $rowcont[] = " $curalias"; $rowcont[] = "\"$tstring\""; // $rowcont[] = $curcapacity.$errors["408"]; // $rowcont[] = $curspace.$errors["408"]; $rowcont[] = "$curname"; $rowcont[] = "$cururl"; $rowcont[] = "$curtype"; $ctable->addrow($rowcont); $rowcont = array (); } } $entries = array(); $jentries = array(); $gentries = array(); } $occupancy = ($capacity - $space)/$capacity; $tstring = $space."/".$capacity; $ctable->addspacer("#ffcc33"); $rowcont[] = " "; $rowcont[] = "".$errors["405"].""; $rowcont[] = "\"$tstring\""; //$rowcont[] = "$capacity".$errors["408"].""; //$rowcont[] = "$space".$errors["408"].""; $rowcont[] = " "; $rowcont[] = " "; $rowcont[] = " "; $ctable->addrow($rowcont, "#ffffff"); $ctable->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/jobstat.php0000644000000000000000000000012412050711011024357 xustar000000000000000027 mtime=1352897033.257021 27 atime=1513200575.721716 30 ctime=1513200663.636791965 nordugrid-arc-5.4.2/src/services/ldap-monitor/jobstat.php0000644000175000002070000001755612050711011024442 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $titles = explode(":",$toptitle); // two alternative titles, separated by column if ($jobdn=="all") { $clstring = popup("clusdes.php?host=$host&port=$port&schema=$schema",700,620,1,$lang,$debug); $gtitle = "".$titles[0]." $host"; } else { $jobdn = rawurldecode($jobdn); $jobdn = preg_replace("/\"/","",$jobdn); $dn_pieces = ldap_explode_dn($jobdn,1); $jobgid = $dn_pieces[0]; $gtitle = "".$titles[1].": $jobgid"; } $toppage->tabletop("",$gtitle); // Arrays defining the attributes to be returned $lim = array( "dn", JOB_NAME, JOB_EQUE, JOB_GOWN, JOB_STAT, JOB_USET, JOB_SUBM, JOB_CPUS ); // ldapsearch filter string for jobs $filstr="(objectclass=".OBJ_AJOB.")"; if ( $schema == "GLUE2") { $lim = array( "dn", GJOB_NAME, GJOB_EQUE, GJOB_GOWN, GJOB_STAT, GJOB_USET, GJOB_SUBM, GJOB_CPUS ); $filstr="(objectclass=".GOBJ_AJOB.")"; } if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Establish connection to the requested LDAP server $ds = ldap_connect($host,$port); $bdn = DN_LOCAL; $topdn = DN_GLOBL; if ( $schema == "GLUE2") { $bdn = DN_GLUE; if ($jobdn != "all") $bdn = ""; } if ($ds) { // Single job info dump and quit if ($jobdn != "all") { // $basedn = explode("Mds",$jobdn); $basedn = preg_split("/mds/i",$jobdn); $locdn = $basedn[0].$bdn; $thisdn = ldap_nice_dump($strings,$ds,$locdn); ldap_close($ds); return 0; } // Loop over all the jobs $sr = @ldap_search($ds,$bdn,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); // Fall back to conventional LDAP // if (!$sr) $sr = @ldap_search($ds,$bdn,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); if ($sr) { // If search returned, check that there are valid entries $nmatch = ldap_count_entries($ds,$sr); if ($nmatch > 0) { // HTML table initialisation $jtable = new LmTable($module,$toppage->$module); // If there are valid entries, tabulate results $entries = ldap_get_entries($ds,$sr); $njobs = $entries["count"]; define("CMPKEY",JOB_SUBM); if ( $schema == "GLUE2") define("CMPKEY",GJOB_SUBM); usort($entries,"ldap_entry_comp"); // loop on jobs $jcount = 0; for ($i=1; $i<$njobs+1; $i++) { $jdn = rawurlencode($entries[$i]["dn"]); $curstat = $entries[$i][JOB_STAT][0]; if ( $schema == "GLUE2") { $curstat = $entries[$i][GJOB_STAT][0]; } /* * The following flags may need an adjustment, * depending on the Job Status provider */ // Running job: statail == "R" or "run" // $statail = substr($curstat,-3); $statail = substr(strstr($curstat,"INLRMS:"),7); $statail = trim($statail); // Queued job: stahead != "FIN" && statail != "R" and "run" etc $stahead = substr($curstat,0,3); $flagrun = ( $status == "Running" && ( $statail == "R" || /* PBS */ $statail == "S" || /* suspended by Condor */ $statail == "run" ) /* easypdc */ ); $flagque = ( $status != "Running" && $statail != "R" && $statail != "S" && $statail != "run" && $stahead != "FIN" && $stahead != "FAI" && $stahead != "EXE" && $stahead != "KIL" && $stahead != "DEL" ); /* No changes necessary below */ $flagact = ($flagrun || $flagque)?1:0; if ($flagact == 1 || $status == "All" ) { if ( $schema == "GLUE2") { $uname = $entries[$i][GJOB_GOWN][0]; $encuname = rawurlencode($uname); $uname = addslashes($uname); // In case $uname contains escape characters $family = cnvname($uname, 2); $jname = htmlentities($entries[$i][JOB_NAME][0]); $jobname = ($entries[$i][GJOB_NAME][0]) ? $jname : "N/A"; $queue = ($entries[$i][GJOB_EQUE][0]) ? $entries[$i][GJOB_EQUE][0] : ""; $time = ($entries[$i][GJOB_USET][0]) ? $entries[$i][GJOB_USET][0] : ""; $ncpus = ($entries[$i][GJOB_CPUS][0]) ? $entries[$i][GJOB_CPUS][0] : ""; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jdn&schema=$schema",750,430,4,$lang,$debug); $quewin = popup("quelist.php?host=$host&port=$port&qname=$queue&schema=$schema",750,430,6,$lang,$debug); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname&schema=$schema",700,500,5,$lang,$debug); } else { $uname = $entries[$i][JOB_GOWN][0]; $encuname = rawurlencode($uname); $uname = addslashes($uname); // In case $uname contains escape characters $family = cnvname($uname, 2); $jname = htmlentities($entries[$i][JOB_NAME][0]); $jobname = ($entries[$i][JOB_NAME][0]) ? $jname : "N/A"; $queue = ($entries[$i][JOB_EQUE][0]) ? $entries[$i][JOB_EQUE][0] : ""; $time = ($entries[$i][JOB_USET][0]) ? $entries[$i][JOB_USET][0] : ""; $ncpus = ($entries[$i][JOB_CPUS][0]) ? $entries[$i][JOB_CPUS][0] : ""; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jdn",750,430,4,$lang,$debug); $quewin = popup("quelist.php?host=$host&port=$port&qname=$queue",750,430,6,$lang,$debug); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname",700,500,5,$lang,$debug); } $jcount++; // filling the table $jrowcont[] = "$jcount $jobname"; $jrowcont[] = "$family"; $jrowcont[] = "$curstat"; $jrowcont[] = "$time"; $jrowcont[] = "$queue"; $jrowcont[] = "$ncpus"; $jtable->addrow($jrowcont); $jrowcont = array (); } } if ($jcount == 0) $jtable->adderror("".$errors["4"].": ".$status.""); $jtable->close(); } else { echo "
    ".$errors["4"]."".$errors["7"]."\n"; } } else { echo "
    ".$errors["4"]."".$errors["7"]."\n"; } $entries = array(); @ldap_free_result($sr); ldap_close($ds); return 0; } else { $errno = "6"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/monitor.js0000644000000000000000000000012411506337146024247 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.665716 30 ctime=1513200663.641792026 nordugrid-arc-5.4.2/src/services/ldap-monitor/monitor.js0000644000175000002070000000322711506337146024320 0ustar00mockbuildmock00000000000000function ngurl(link) { var wloc="http://"+document.domain+link; var vtest=link; var prot=vtest.substring(0,4); var vhttp="http"; if (prot == vhttp) { var wloc=link } return wloc; } function monitor(link,x,y,n) { // "n" is needed to keep dedicated windows for each monitor type // function ngurl() adds HTTP contact string, if needed // wloc=ngurl(link); var ua = ' ' + navigator.userAgent.toLowerCase(); var is_opera = ua.indexOf('opera'); var is_lynx = ua.indexOf('lynx'); var is_konqueror = ua.indexOf('konqueror'); wloc = link; browser = navigator.appName; if ( is_opera>0 || is_lynx>0 || is_konqueror>0 ) { window.location = wloc; } else { aaa=open("","win"+n,"innerWidth="+x+",innerHeight="+y+",resizable=1,scrollbars=1,width="+x+",height="+y); aaa.document.encoding = "text/html; charset=utf-8"; aaa.document.clear(); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln("NorduGrid"); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln("






    "); aaa.document.writeln("Collecting information..."); aaa.document.writeln("

    "); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.close(); aaa.document.location.href=wloc; aaa.document.close(); } } nordugrid-arc-5.4.2/src/services/ldap-monitor/PaxHeaders.7502/includes0000644000000000000000000000013213214316027023743 xustar000000000000000030 mtime=1513200663.776793677 30 atime=1513200668.717854109 30 ctime=1513200663.776793677 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/0000755000175000002070000000000013214316027024066 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611634415166026067 xustar000000000000000027 mtime=1316100726.768006 30 atime=1513200604.809072479 29 ctime=1513200663.75979347 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/Makefile.am0000644000175000002070000000016411634415166026133 0ustar00mockbuildmock00000000000000monitorincdir = @ldap_monitor_prefix@/includes monitorinc_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorinc_DATA) nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/comfun.inc0000644000000000000000000000012411506337146026012 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.670716 30 ctime=1513200663.766793555 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/comfun.inc0000644000175000002070000000570011506337146026061 0ustar00mockbuildmock00000000000000 XXX   */ $geo1 = $a[0]; $geo2 = $b[0]; $geo1 = substr(stristr($geo1,""),3); $geo2 = substr(stristr($geo2,""),3); $geo1 = substr($geo1,0,strpos($geo1,"<")); $geo2 = substr($geo2,0,strpos($geo2,"<")); $ali1 = $a[1]; $ali2 = $b[1]; $ali1 = substr(stristr($ali1,""),3); $ali2 = substr(stristr($ali2,""),3); $ali1 = substr($ali1,0,strpos($ali1,"<")); $ali2 = substr($ali2,0,strpos($ali2,"<")); $cmpgeo = strcasecmp ($geo1,$geo2); $cmpali = strcasecmp ($ali1,$ali2); if ( !$cmpgeo ) return $cmpali; return $cmpgeo; } /** * @return int * @param a array * @param b array * @desc Compares by CPU */ function comp_by_cpu ($a, $b) { $cpu1 = $a[2]; $cpu2 = $b[2]; $cmpcpu = $cpu2 - $cpu1; return $cmpcpu; } /** * @return int * @param a array * @param b array * @desc Compares by grid running jobs */ function comp_by_grun ($a, $b) { $sum1 = $a[3]; $sum2 = $b[3]; // echo $sum1." vs ".$sum2."
    "; $sum1 = substr(stristr($sum1,"alt=\""),5); $sum2 = substr(stristr($sum2,"alt=\""),5); $sum1 = substr($sum1,0,strpos($sum1,"+")); $sum2 = substr($sum2,0,strpos($sum2,"+")); $cmpsum = $sum2 - $sum1; return $cmpsum; } ?>nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/blacklist.inc0000644000000000000000000000012411506337146026473 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.678716 30 ctime=1513200663.762793506 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/blacklist.inc0000644000175000002070000000257311506337146026547 0ustar00mockbuildmock00000000000000"0", "grid.fi.uib.no"=>"0", "dc2.uio.no"=>"0", "dc1.uio.no"=>"0", "dc3.uio.no"=>"0", "dc4.uio.no"=>"0", "fire.ii.uib.no"=>"0", "hydra.ii.uib.no"=>"0", "grid.nbi.dk"=>"0", "lscf.nbi.dk"=>"0", "hepax1.nbi.dk"=>"0", "morpheus.nbi.dk"=>"0", "heppc08.nbi.dk"=>"0", "grid.uni-c.dk"=>"0", "tambohuse.imada.sdu.dk"=>"0", "gridrouter.imada.sdu.dk"=>"0", "tiger.imada.sdu.dk"=>"0", "cbs202.cbs.dtu.dk"=>"0", "gridgate.it.dtu.dk"=>"0", "amigos24.diku.dk"=>"0", "nroot.hip.fi"=>"0", "grid.hip.fi"=>"0", "hirmu.hip.fi"=>"0", "pc19.hip.fi"=>"0", "pc30.hip.helsinki.fi"=>"0", "testbed0.hip.helsinki.fi"=>"0", "pchip04.cern.ch"=>"0", "quark.hep.lu.se"=>"0", "farm.hep.lu.se"=>"0", "hathi.hep.lu.se"=>"0", "grid.tsl.uu.se"=>"0", "grid.scfab.se"=>"0", "bambi.quark.lu.se"=>"0", "nexus.swegrid.se"=>"0", "hagrid.it.uu.se"=>"0", "ingrid.hpc2n.umu.se"=>"0", "sigrid.lunarc.lu.se"=>"0", "bluesmoke.nsc.liu.se"=>"0", "g01n01.pdc.kth.se"=>"0", "ingvar.nsc.liu.se"=>"0", "seth.hpc2n.umu.se"=>"0", "banan.hpc2n.umu.se"=>"0", "jakarta.hpc2n.umu.se"=>"0", "gridum2.cs.umu.se"=>"0", "gridum1.cs.umu.se"=>"0", "sleipner.byggmek.lth.se"=>"0", "grendel.it.uu.se"=>"0", "login-3.monolith.nsc.liu.se"=>"0", "vls.science.upjs.sk"=>"0", "213-35-172-38-dsl.plus.estpak.ee"=>"0", "cm-gw.phys.ualberta.ca"=>"0", "tgrid.icepp.s.u-tokyo.ac.jp"=>"0", "hmx00.kek.jp"=>"0", "dummy"=>"0", "dummy"=>"0"); ?>nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/locale.inc0000644000000000000000000000012311765334733025767 xustar000000000000000026 mtime=1339406811.15757 27 atime=1513200575.674716 30 ctime=1513200663.772793628 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/locale.inc0000644000175000002070000007054111765334733026044 0ustar00mockbuildmock00000000000000 array ( // Table headers "loadmon" => array( "0" => "Grid Monitor", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Cluster Details for", "Queue" => 0, "Status" => 0, "Limits (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "" => 0, "Job name" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Exp. queue length" => 0, "Free disk (MB)" => 0 ), "attlist" => array("0" => "Attribute values", "Resource" => 0, "Current value" => 0 ), "quelist" => array("0" => "Details for the queue", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Memory (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "#" => 0, "Alias" => 0, "Tot. space" => 0, "Free space" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Jobs, running", "nordugrid-queue-queued" => "Jobs, queued", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-gridrunning" => "Grid jobs, running", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcput" => "Requested CPU time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-lrmscomment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall time", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "Experienced queue length", "nordugrid-se-name" => "Domain name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-freespace" => "Free space (GB)", "nordugrid-se-totalspace" => "Total space (GB)", "nordugrid-se-baseurl" => "Contact URL", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "129" => "Can not get object data: error ", "130" => " Monitor timeouts for EMIR: ", "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL" ), // Post code conversion: only for [en]! "tlconvert" => array ( "AU" => "Australia", "CA" => "Canada", "CH" => "Switzerland", "DK" => "Denmark", "EE" => "Estonia", "FI" => "Finland", "FIN" => "Finland", "SF" => "Finland", "DE" => "Germany", "JP" => "Japan", "NO" => "Norway", "N" => "Norway", "SE" => "Sweden", "SK" => "Slovakia", "SI" => "Slovenia", "KEK" => "Japan", "TOKYO" => "Japan" ) ), "ru" => array ( // Table headers "loadmon" => array("0" => "Грид-монитор", "Страна" => 0, "Ресурс" => 0, "ЦП" => 0, "Загрузка" => 0, "Ожидают" => 0 ), "clusdes" => array("0" => "Описание кластера", "Очередь" => 0, "Состояние" => 0, "Длительность (мин)" => 0, "ЦП" => 0, "Считаются" => 0, "Ожидают" => 0 ), "jobstat" => array("0" => "Задачи на:Номер задачи", "Имя задачи" => 0, "Хозяин" => 0, "Состояние" => 0, "Время (мин)" => 0, "Очередь" => 0, "ЦП" => 0 ), "volist" => array("0" => "Виртуальные организации", "Виртуальная оргаизация" => 0, "Члены" => 0, "Обслуживается" => 0 ), "vousers" => array("0" => "Пользователи", "#" => 0, "Имя" => 0, "Место работы" => 0, "Электронная почта" => 0 ), "userlist" => array("0" => "Информация для", "" => 0, "Имя задачи" => 0, "Состояние" => 0, "Время (мин)" => 0, "Ресурс" => 0, "Очередь" => 0, "ЦП" => 0 ), "userres" => array("0" => "", "Ресурс:очередь" => 0, "Свободные ЦП" => 0, "Длина очереди" => 0, "Диск, доступно (Мб)" => 0 ), "attlist" => array("0" => "Значения аттрибутов", "Ресурс" => 0, "Значение" => 0 ), "quelist" => array("0" => "Описание очереди", "" => 0, "Имя задачи" => 0, "Хозяин" => 0, "Состояние" => 0, "Время (мин)" => 0, "ОЗУ (КБ)" => 0, "ЦП" => 0 ), "sestat" => array("0" => "Внешние запоминающие устройства", "#" => 0, "Название" => 0, "Весь объём" => 0, "Свободно" => 0, "Имя" => 0, "URL базы" => 0, "Тип" => 0 ), "allusers" => array("0" => "Допущенные пользователи:Активные пользователи", "#" => 0, "Имя" => 0, "Место работы" => 0, "Задачи" => 0, "Ресурсы" => 0 ), "ldapdump" => array("0" => "", "Аттрибут" => 0, "Значение" => 0 ), "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Данные действительны с (GMT)", "Mds-validto" => "Данные действительны по (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Имя головной машины", "nordugrid-cluster-aliasname" => "Название", "nordugrid-cluster-contactstring" => "Контактный адрес", "nordugrid-cluster-interactive-contactstring" => "Интерактивный адрес", "nordugrid-cluster-comment" => "Комментарий", "nordugrid-cluster-support" => "Е-почта ответственного", "nordugrid-cluster-lrms-type" => "СУПО, тип", "nordugrid-cluster-lrms-version" => "СУПО, версия", "nordugrid-cluster-lrms-config" => "СУПО, подробности", "nordugrid-cluster-architecture" => "Архитектура", "nordugrid-cluster-opsys" => "Операционная система", "nordugrid-cluster-homogeneity" => "Гомогенность ресурса", "nordugrid-cluster-nodecpu" => "Процессор, тип (худший)", "nordugrid-cluster-nodememory" => "ОЗУ (Мб, наименьшее)", "nordugrid-cluster-totalcpus" => "Процессоры, всего", "nordugrid-cluster-cpudistribution" => "Процессоры:узлы", "nordugrid-cluster-sessiondir-free" => "Диск, доступно (Мб)", "nordugrid-cluster-sessiondir-total" => "Диск, весь объём (Мб)", "nordugrid-cluster-cache-free" => "Дисковый кэш, свободно (Мб)", "nordugrid-cluster-cache-total" => "Дисковый кэш, всего (Мб)", "nordugrid-cluster-runtimeenvironment" => "Рабочая среда", "nordugrid-cluster-localse" => "ВЗУ, локальное", "nordugrid-cluster-middleware" => "Грид-ПО", "nordugrid-cluster-totaljobs" => "Задачи, всего", "nordugrid-cluster-usedcpus" => "Процессоры, занятые", "nordugrid-cluster-queuedjobs" => "Задачи, в очереди", "nordugrid-cluster-location" => "Почтовый индекс", "nordugrid-cluster-owner" => "Владелец", "nordugrid-cluster-issuerca" => "Сертификат выдан", "nordugrid-cluster-nodeaccess" => "IP-соединение узлов", "nordugrid-cluster-gridarea" => "Адрес сессий (СТАРЫЙ)", "nordugrid-cluster-gridspace" => "Грид-диск (СТАРЫЙ)", "nordugrid-cluster-opsysdistribution" => "Дистрибутив ОС (СТАРЫЙ)", "nordugrid-cluster-runningjobs" => "Задачи, в счёте (СТАРЫЙ)", "nordugrid-queue-name" => "Имя очереди", "nordugrid-queue-status" => "Состояние очереди", "nordugrid-queue-running" => "Задачи, в счёте", "nordugrid-queue-queued" => "Задачи, в очереди", "nordugrid-queue-maxrunning" => "Задачи, в счёте (предел)", "nordugrid-queue-maxqueuable" => "Задачи, в очереди (предел)", "nordugrid-queue-maxuserrun" => "Задачи на пользователя (предел)", "nordugrid-queue-maxcputime" => "Длительность, наиб. (мин)", "nordugrid-queue-mincputime" => "Длительность, наим. (мин)", "nordugrid-queue-defaultcputime" => "Длительность, по ум. (мин)", "nordugrid-queue-schedulingpolicy" => "Правила планировки", "nordugrid-queue-totalcpus" => "Процессоры, всего", "nordugrid-queue-nodecpu" => "Процессор, тип", "nordugrid-queue-nodememory" => "ОЗУ (Мб)", "nordugrid-queue-architecture" => "Архитектура", "nordugrid-queue-opsys" => "Операционная система", "nordugrid-queue-gridrunning" => "Грид-задачи, в счёте", "nordugrid-queue-gridqueued" => "Грид-задачи, в очереди", "nordugrid-queue-assignedcpunumber" => "Процессоры (СТАРЫЙ)", "nordugrid-queue-assignedcputype" => "Тип процессора (СТАРЫЙ)", "nordugrid-job-globalid" => "Номер", "nordugrid-job-globalowner" => "Хозяин", "nordugrid-job-execcluster" => "Выполняющий кластер", "nordugrid-job-execqueue" => "Выполняющая очередь", "nordugrid-job-stdout" => "Стандартный выход", "nordugrid-job-stderr" => "Стандартная ошибка", "nordugrid-job-stdin" => "Стандартный вход", "nordugrid-job-reqcput" => "Запрошенное время", "nordugrid-job-status" => "Состояние", "nordugrid-job-queuerank" => "Положение в очереди", "nordugrid-job-lrmscomment" => "Комментарий СУПО", "nordugrid-job-submissionui" => "Засылающий клиент", "nordugrid-job-submissiontime" => "Время засылки (GMT)", "nordugrid-job-usedcputime" => "Использованное время ЦП", "nordugrid-job-usedwalltime" => "Использованное время", "nordugrid-job-sessiondirerasetime" => "Срок уничтожения (GMT)", "nordugrid-job-proxyexpirationtime" => "Окончание доверенности (GMT)", "nordugrid-job-usedmem" => "Использование ОЗУ (Кб)", "nordugrid-job-errors" => "Ошибки", "nordugrid-job-jobname" => "Имя", "nordugrid-job-runtimeenvironment" => "Рабочая среда", "nordugrid-job-cpucount" => "Запрошено процессоров", "nordugrid-job-executionnodes" => "Выполняющие узлы", "nordugrid-job-gmlog" => "Журнальная запись ГМ", "nordugrid-job-gridlog" => "Грид-запись (СТАРЫЙ)", "nordugrid-job-clientsoftware" => "Версия клиента", "nordugrid-authuser-name" => "Имя", "nordugrid-authuser-sn" => "Субъект", "nordugrid-authuser-freecpus" => "Свободные ЦП", "nordugrid-authuser-diskspace" => "Диск, доступно (Мб)", "nordugrid-authuser-queuelength" => "Длина очереди", "nordugrid-se-name" => "Доменное имя", "nordugrid-se-aliasname" => "Название", "nordugrid-se-type" => "Тип", "nordugrid-se-freespace" => "Свободный объём (Гб)", "nordugrid-se-totalspace" => "Весь объём (Гб)", "nordugrid-se-baseurl" => "Контактный адрес", "nordugrid-se-authuser" => "Допущенные ползьзователи (DN)", "nordugrid-se-location" => "Почтовый индекс", "nordugrid-se-owner" => "Владелец", "nordugrid-se-issuerca" => "Сертификат выдан", "nordugrid-se-comment" => "Комментарий", "nordugrid-rc-name" => "Доменное имя", "nordugrid-rc-aliasname" => "Название", "nordugrid-rc-baseurl" => "Контактный адрес", "nordugrid-rc-authuser" => "Допущенные пользователи (DN)", "nordugrid-rc-location" => "Почтовый индекс", "nordugrid-rc-owner" => "Владелец", "nordugrid-rc-issuerca" => "Сертификат выдан" ), "errors" => array( "1" => "Невозможно прочесть списки высшего уровня", "2" => "Ни один из местных списков не отзывается", "3" => " неверная конфигурация или истекло время запроса", "4" => "Не обнаружено Грид-задач", "5" => "Нет информации", "6" => "Служба недоступна", "7" => " - попробуйте обновить поззже", "101" => " Время на связь с локальным списком: ", "102" => " с на соединение и ", "103" => " с на поиск", "104" => " с затрачено на поиск", "105" => "Перечисление ресурсов: ", "106" => "Опрошено списков верхнего уровня: ", "107" => "Получены географические координаты, просканировано ресурсов: ", "108" => " ресурсов упорядочено по геополитическому признаку", "109" => "Поиск аттрибутов кластера", "110" => "Поиск аттрибутов очереди", "111" => "Нет данных с ", "112" => " фукционирует в стране: ", "113" => " не располагает ресурсами", "114" => " Время на связь с глобальным списком: ", "115" => "Игнорируется ресурс: ", "116" => "не соответствует типу ", "117" => "Проверка связи: ", "118" => "есть", "119" => "На данный момент обнаружено ресурсов типа ", "120" => "Ошибка LDAP при поиске на ", "121" => "-состояние на ", "122" => "Заблокирован: ", "123" => "Обнаружен регистрант ", "124" => "Поиск аттрибутов ВЗУ", "125" => "Поиск пользователей", "126" => "Поиск задач", "127" => " запустил(а) задачу ", "128" => " не будучи допущенным(ой)", "301" => "Перезагрузить", "302" => "Печать", "303" => "Помощь", "304" => "Закрыть", "305" => "Красный", "306" => "Серый", "307" => "Все пользователи", "308" => "Активные пользователи", "309" => "Поиск", "310" => "ВЗУ", "311" => "Виртуальные организации", "312" => "Флаг страны: ", "313" => " Грид-процессов и ", "314" => " местных процессов", "401" => "Процессы", "402" => "Грид", "403" => "местные", "404" => "Мир", "405" => "ВСЕГО", "406" => " ресурс(а)(ов)", "407" => "куча", "408" => " Гб", "409" => " ВСЕ" ), // Country name conversion, no postcode! "tlconvert" => array ( "Australia" => "Австралия", "Canada" => "Канада", "Switzerland" => "Швейцария", "Denmark" => "Дания", "Estonia" => "Эстония", "Finland" => "Финляндия", "Germany" => "Германия", "Japan" => "Япония", "Norway" => "Норвегия", "Sweden" => "Швеция", "Slovakia" => "Словакия", "Slovenia" => "Словения", "World" => "Мир" ) ) ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734026071 xustar000000000000000030 mtime=1513200604.843072895 30 atime=1513200651.580644514 30 ctime=1513200663.760793482 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/Makefile.in0000644000175000002070000004361513214315734026150 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ldap-monitor/includes DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(monitorincdir)" DATA = $(monitorinc_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ monitorincdir = @ldap_monitor_prefix@/includes monitorinc_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorinc_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ldap-monitor/includes/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ldap-monitor/includes/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitorincDATA: $(monitorinc_DATA) @$(NORMAL_INSTALL) test -z "$(monitorincdir)" || $(MKDIR_P) "$(DESTDIR)$(monitorincdir)" @list='$(monitorinc_DATA)'; test -n "$(monitorincdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitorincdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitorincdir)" || exit $$?; \ done uninstall-monitorincDATA: @$(NORMAL_UNINSTALL) @list='$(monitorinc_DATA)'; test -n "$(monitorincdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitorincdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitorincdir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitorincdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitorincDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitorincDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-monitorincDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am uninstall uninstall-am uninstall-monitorincDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/attlist.inc0000644000000000000000000000012411506337146026207 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.668716 30 ctime=1513200663.761793494 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/attlist.inc0000644000175000002070000001533311506337146026261 0ustar00mockbuildmock00000000000000".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $pnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $clconn = ldap_connect($clhost,$clport); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($pnarray,$clport); $sitetag[$clhost] = 1; /* filtering tag */ } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters $srarray = @ldap_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); echo "\n"; // HTML table initialisation array_unshift($engatts,$errors["425"]); $jtable = new LmTableFree($engatts); $rowcont = array(); $tabcont = array(); $rc = 0; for ( $ids = 0; $ids < $nhosts; $ids++ ) { $sr = $srarray[$ids]; $dst = $dsarray[$ids]; $pn = $pnarray[$ids]; if ($dst && $sr) { // If search returned, check that there are valid entries $nmatch = @ldap_count_entries($dst,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $allentries = ldap_get_entries($dst,$sr); $entries = ldap_purge($allentries); if ( $object == OBJ_AJOB ) { define("CMPKEY",JOB_STAT); //usort($entries,"ldap_entry_comp"); } $nclus = $entries["count"]; for ($i=0; $i<$nclus; $i++) { $cluster = "N/A"; $queue = "N/A"; $job = "N/A"; $currdn = $entries[$i]["dn"]; $currdn = preg_replace("/\"/","",$currdn); $dnparts = ldap_explode_dn($currdn,0); foreach ($dnparts as $part) { $pair = explode("=",$part); switch ( $pair[0] ) { case CLU_NAME: $cluster = $pair[1]; break; case SEL_NAME: $se = $pair[1]; break; case QUE_NAME: $queue = $pair[1]; break; case JOB_GLID: $job = $pair[1]; $encjob = rawurlencode($currdn); break; } } $sort = "cluster"; // 410: cluster; 411: queue; 412: job; 413: user; 414: SE switch ( $object ) { case OBJ_CLUS: $resource = $errors["410"]." $cluster"; $winstring = popup("clusdes.php?host=$cluster&port=$pn",700,620,1); break; case OBJ_QUEU: $resource = $errors["410"]." $cluster, ".$errors["411"]." $queue"; $winstring = popup("quelist.php?host=$cluster&port=$pn&qname=$queue",750,430,6); break; case OBJ_USER: $resource = $errors["410"]." $cluster, ".$errors["411"]." $queue"; $winstring = popup("quelist.php?host=$cluster&port=$pn&qname=$queue",750,430,6); break; case OBJ_AJOB: $resource = $errors["412"]." $job"; $winstring = popup("jobstat.php?host=$cluster&port=$pn&status=&jobdn=$encjob",750,430,4); break; case OBJ_STEL: $resource = $errors["414"]." $se"; $winstring = ""; break; } $rc++; $rowcont[0] = ( $winstring ) ? "$rc $resource" : "$rc $resource"; // determine maximum row count per object $vcount = 0; foreach ( $attributes as $attribute ) { if ( !$attribute ) continue; $ccount = $entries[$i][$attribute]["count"]; $vcount = ( $ccount > $vcount ) ? $ccount : $vcount; } if ($vcount == 0) $jtable->adderror($resource); $attrtag = array(); for ( $j = 0; $j < $vcount; $j++ ) { $attval = ""; $attcheck = FALSE; for ( $k = 0; $k < $n ; $k++ ) { $attribute = $attributes[$k]; if ( !$attribute || @in_array($attribute,$attrtag[$j]) ) continue; if ( $entries[$i][$attribute][$j] ) { $attval = $entries[$i][$attribute][$j]; $attcheck = TRUE; } else { $attval = " "; } // Some time-stamp readability adjustment if (substr(strrchr($attribute, "-"), 1) == "sessiondirerasetime" || substr(strrchr($attribute, "-"), 1) == "submissiontime" || substr($attribute,0,9) == "Mds-valid" ) $attval=cnvtime($attval); $rowcont[] = htmlentities($attval); $attrtag[$j][] = $attribute; } if ( $attcheck ) { $tabcont[] = $rowcont; } else { $rc--; } // if ( $attcheck ) $jtable->addrow($rowcont); $rowcont = array(); $rowcont[0] = " "; } } } } @ldap_free_result($sr); } foreach ( $tabcont as $row ) $jtable->addrow($row,""); $jtable->close(); return 0; } ?>nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/ldap_nice_dump.inc0000644000000000000000000000012412050711011027444 xustar000000000000000027 mtime=1352897033.257021 27 atime=1513200575.678716 30 ctime=1513200663.769793592 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/ldap_nice_dump.inc0000644000175000002070000001040612050711011027512 0ustar00mockbuildmock00000000000000"; // Plain LDAP dump for the DN $filstr = "(objectclass=*)"; if ( strpos($dn, DN_GLUE) ) { $filstr = "(|(objectClass=".GOBJ_CLUS.")(objectClass=".GOBJ_MAN.")(objectClass=".GOBJ_LOC.")(objectClass=".GOBJ_QUEU.")(objectClass=".GOBJ_CON."))"; if ( strpos(strtolower(" ".$dn), GJOB_GLID) ) { $filstr = "(|(objectClass=".GOBJ_AJOB."))"; } } $sr = @ldap_search($ds,$dn,$filstr,array("*"),0,0,$tlim,LDAP_DEREF_NEVER); if ($sr) { // If search returned, check that there are valid entries $nmatch = ldap_count_entries($ds,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $first = ldap_first_entry($ds,$sr); if ( !strpos($dn, DN_GLUE) ) $nmatch = 1; for ( $j=0; $j<$nmatch; $j++){ $entries = ldap_get_attributes($ds,$first); $nfields = $entries["count"]; // get the Distinguished Name $thisdn = ldap_get_dn($ds,$first); // HTML table initialisation $dtable = new LmTableSp("ldapdump",$strings["ldapdump"]); // add the DN entry $drowcont = array("".$errors["420"]."",$thisdn); $dtable->addrow($drowcont, "#cccccc"); $drowcont = array(); // loop on the rest of attributes for ($i=0; $i<$nfields; $i++) { $curatt = $entries[$i]; if ( $exclude && in_array($curatt,$exclude) ) continue; $engatt = ($isattr[$curatt]) ? $isattr[$curatt] : $curatt; $nval = $entries[$curatt]["count"]; $encatt = rawurlencode($curatt); $attwin = popup("attlist.php?attribute=$encatt",650,300,7); $attstring = @( $mdsattr[$curatt] ) ? "$engatt" : "$engatt"; $drowcont[0] = $attstring; $drowcont[1] = " "; if ($nval==0) $dtable->addrow($drowcont); $drowcont[1] = ""; if ( $nval > 4 ) $drowcont[1] = $fhead; for ($k=0; $k<$nval; $k++) { $curval = $entries[$curatt][$k]; // Strip HTML tags some smart folks are adding $curval = strip_tags($curval); // Some time-stamp readability adjustment if ( strlen($curval) == 15 && $curval{14} == "Z" ) $curval=cnvtime($curval); $encval = htmlspecialchars($curval,ENT_QUOTES,"UTF-8"); // E-mail masquerading for short lists (dunno what to do with long lists) if (strpos($curval,"@",1) && $nval<5) { $m = mylo ($curval); if ( $m[0] ) $encval = ""; } if ( $nval > 4 ) { $drowcont[1] .= "$encval"; if ( $k < $nval-1 ) $drowcont[1] .= "\n"; } else { $drowcont[1] .= $encval; if ( $k < $nval-1 ) $drowcont[1] .= "
     "; } } if ( $nval > 4 ) $drowcont[1] .= $ftail; $dtable->addrow($drowcont); } $dtable->close(); echo "
    "; $first = ldap_next_entry($ds,$first); } ldap_free_result($sr); return $thisdn; } else { $errno = 9; echo "
    ".$errors[$errno]."\n"; return $errno; } } else { $errno = 5; echo "
    ".$errors[$errno]."\n"; return $errno; } } ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/cnvtime.inc0000644000000000000000000000012411506337146026170 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.678716 30 ctime=1513200663.765793543 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/cnvtime.inc0000644000175000002070000000055211506337146026237 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/archery.inc0000644000000000000000000000012413153453630026155 xustar000000000000000027 mtime=1504597912.048594 27 atime=1513200575.673716 30 ctime=1513200663.760793482 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/archery.inc0000644000175000002070000001260413153453630026225 0ustar00mockbuildmock00000000000000" .$error_str."
  • \n"); } /** * @param $hostname * @return int */ function check_blacklist($hostname) { global $blacklist; if ( ! isset($blacklist) ) { return 0; } if ( in_array($hostname, $blacklist) ) { return $blacklist[$hostname]; } return 0; } /** * @param $endpoint * @param $errors * @param $debug * @return array */ function query_dns_archery($endpoint, $errors, $debug) { $archery_endpoints = array(); // get dns record hostname according to ARCHERY entree point convention if (substr($endpoint, 0, 6) == 'dns://') { $dns_endpoint = substr($endpoint, 6); } else { $dns_endpoint = '_archery.' . $endpoint; } // perform query $dnsquery = dns_get_record($dns_endpoint, DNS_TXT); if ( $dnsquery === FALSE ) { if ( $debug ) dbgerr_html($errors["132"].$endpoint); return $archery_endpoints; } // parse query foreach ( $dnsquery as $dnsrr ) { if (isset($dnsrr['entries'])) { foreach ($dnsrr['entries'] as $dnsrr_value) { $erecord_arr = array(); $akv = explode(' ', $dnsrr_value); foreach ($akv as $kv) { $ae = explode('=', $kv, 2); if ( count($ae) == 2 ) { $erecord_arr[$ae[0]] = $ae[1]; } } $archery_endpoints[] = $erecord_arr; } } else { if ( $debug ) dbgerr_html($errors["132"].$endpoint); } } return $archery_endpoints; } /** * @param $endpoint * @param $schema * @param $errors * @param int $debug * @param int $looplimit * @return array */ function recursive_archery_info ($endpoint, $schema, $errors, $debug=0, $looplimit=5) { $endpoints = array(); // Just in case recursion limit if ( $looplimit == 0 ) { dbgerr_html($errors["133"].$endpoint); return $endpoints; } // Query archery for endpoints info $archery_endpoints = query_dns_archery($endpoint, $errors, $debug); foreach ($archery_endpoints as $ainfo) { if ($ainfo['t'] == 'org.nordugrid.archery') { $more_endpoints = recursive_archery_info ($ainfo['u'], $schema, $errors, $debug=0, $looplimit-1); $endpoints = array_merge($endpoints, $more_endpoints); } elseif ($ainfo['t'] == 'org.nordugrid.ldapegiis') { //TODO: invoke egiis query continue; } elseif ($ainfo['t'] == 'org.nordugrid.emir') { //TODO: invoke emir query (or maybe we should abandone it) continue; } elseif ($ainfo['t'] == 'org.nordugrid.ldapng') { if ( $schema !== 'NG' ) continue; // ldap://:2135/Mds-Vo-Name=local,o=grid $parsed_url = array(); if ( preg_match('/^ldap:\/\/(?P[^:]+):(?[0-9]+)\/(?P.*)/', $ainfo['u'], $parsed_url) ) { if ( check_blacklist($parsed_url['host'])) { if ( $debug ) dbgerr_html($errors["122"].$parsed_url['host']); continue; } $endpoints[] = array ( 'host' => $parsed_url['host'], 'port' => $parsed_url['port'], 'base' => "nordugrid-cluster-name=".$parsed_url['host'].",".$parsed_url['base'] ); } } elseif ($ainfo['t'] == 'org.nordugrid.ldapglue2') { if ( $schema !== 'GLUE2' ) continue; // ldap://:2135/o=glue $parsed_url = array(); if ( preg_match('/^ldap:\/\/(?P[^:]+):(?[0-9]+)\/(?P.*)/', $ainfo['u'], $parsed_url) ) { if ( check_blacklist($parsed_url['host'])) { if ( $debug ) dbgerr_html($errors["122"].$parsed_url['host']); continue; } $endpoints[] = array ( 'host' => $parsed_url['host'], 'port' => $parsed_url['port'], // dirty hack, monitor only works with array of ldapng endpoints even for GLUE2 :-) 'base' => "nordugrid-cluster-name=".$parsed_url['host'].",".DN_LOCAL ); } } elseif ($ainfo['t'] == 'org.ogf.glue.emies.resourceinfo') { // silently skip EMIES endpoints for now continue; } else { if ($debug) dbgerr_html(sprintf($errors["134"],$ainfo['t'],$ainfo['u'])); } } return $endpoints; } /** * @return array * @param archery_list array * @param schema string * @param debug integer * @param loopcnt integer * @desc Returns list of LDAP endpoints */ function archery_info($archery_list, $schema, $errors, $debug="0") { // show the debug message regarding ARCHERY timeouts if($debug && ! empty($archery_list)) { dbgmsg("
    :::> " . $errors["131"] . " <:::

    "); } // start recursively querying ARCHERY $entries = array(); foreach ( $archery_list as $archery ) { $entries = array_merge($entries, recursive_archery_info($archery['endpoint'], $schema, $errors, $debug)); } return $entries; } ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/headfoot.inc0000644000000000000000000000012313153453630026310 xustar000000000000000027 mtime=1504597912.048594 27 atime=1513200575.670716 29 ctime=1513200663.76879358 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/headfoot.inc0000644000175000002070000002175013153453630026363 0ustar00mockbuildmock00000000000000\n"; //echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; if ( $wintyp ) { $this->module = $wintyp; // Localize $yaccept = @$_SERVER["HTTP_ACCEPT_LANGUAGE"] ; if ( !$yaccept ) $yaccept = "en"; if ( FORCE_LANG != "default" ) $yaccept = FORCE_LANG; $yazyk = "en"; $yazyki = explode(",",$yaccept); foreach ( $yazyki as $option ) { if ( $yazyk != "en" ) continue; $option = trim($option); $option = substr($option,0,2); // some sniffing // touch("test/".$option); // echo "\n"; $locfile = $option.".inc"; if ( !file_exists("lang/".$locfile) ) continue; $yazyk = $option; } $locfile = $yazyk.".inc"; include $locfile; setlocale(LC_ALL, $yazyk); $this->language = $yazyk; $this->strings = $message; $this->errors = $message["errors"]; $this->countries = $message["tlconvert"]; $this->mdsattr = $message["mdsattr"]; $this->isattr = $message["isattr"]; // Assigns $this->clusdes = $message["clusdes"]; $this->$wintyp = $message[$wintyp]; $toptitle = $message[$wintyp][0]; // Set page parameters require ('settings.inc'); $inpnam = implode("_",array("def",$wintyp)); // Page style definitions (see settings.inc) // Sets top window title $this->title = ( $toptitle ) ? $toptitle : ""; // Refresh rate $this->refresh = (${$inpnam}["refresh"]) ? ${$inpnam}["refresh"] : 0; // Background and link colors $this->bg = (${$inpnam}["bgcolor"]) ? ${$inpnam}["bgcolor"] : "#ffffff"; $this->lc = (${$inpnam}["lcolor"]) ? ${$inpnam}["lcolor"] : "#cc0000"; // Dumps the header HTML code $titles = explode(":",$this->title); // sometimes titles are many echo "".$titles[0]." ".$extratitle."\n"; if ( $this->refresh ) echo "\n"; echo "\n"; // define giislist if ( ! isset($emirslist) ) { $emirslist = array (); } if ( ! isset($archery_list)) { $archery_list = array (); } $this->giislist = $giislist; $this->emirslist = $emirslist; $this->cert = $cert; $this->archery_list = $archery_list; } // Finishes HTML header, starts document body echo "\n"; echo "\n"; echo "
    \n"; } /** * @return void * @param errors array * @param title string * @param subtitle string * @desc Makes an opening Monitor header */ function tabletop ( $toptitle="", $subtitle="" ) { // function tabletop() $lang = FORCE_LANG; echo "\n"; echo "\n"; echo "\n"; echo "
    ".$toptitle."
    ".$subtitle."\n"; echo " errors["301"]."\" alt=\"".$this->errors["301"]."\">\n"; echo " \n"; echo " errors["302"]."\" alt=\"".$this->errors["302"]."\">\n"; echo " module."&lang=".$lang."',400,300,10);\" onClick=\"javascript:monitor('help.php?module=".$this->module."',400,300,10);\">\n"; echo " errors["303"]."\" alt=\"".$this->errors["303"]."\">\n"; echo " \n"; echo " errors["304"]."\" alt=\"".$this->errors["304"]."\">\n"; echo "
    \n"; } /** * @return string * @desc returns version number from README */ function getVersion () { $v = "N/A"; if ( file_exists("README") ) { $readme = fopen("README","r"); $fline = fgets($readme); $v = substr(stristr($fline,"version "),8); fclose($readme); } $this->version = $v; return $v; } /** * @return void * @desc Closes an HTML document */ function close () { // Closes the HTML document echo "\n
    \n"; echo "\n"; ob_end_flush(); ob_implicit_flush(); } } /** * Below are some generic functions, non-class-specific * * function dbgmsg ( string ) : prints out a message and flushes output; useful for debugging * function popup ( string, int, int, int ) : opens up a new window, depending on the client */ /** * @return void * @param dbgtxt string * @desc Outputs a debug message outside the table */ function dbgmsg( $dbgtxt="Debug" ) { echo "$dbgtxt\n"; flush(); } /** * @return void * @param contact string * @param x int * @param y int * @param n int * @desc Returns a new monitor window URL */ $agent = @$_SERVER["HTTP_USER_AGENT"] ; if ( !defined("USERAGENT") ) define("USERAGENT",$agent); function popup( $contact, $x=400, $y=300, $n=1, $lang, $debug ) { ( USERAGENT ) ? $agent = USERAGENT : $agent = "lynx"; if ( preg_match("/opera/i",$agent) || preg_match("/lynx/i",$agent) || preg_match("/konqueror/i",$agent) ) return $contact; // $link = "javascript:monitor('".$contact."',$x,$y,$n)"; if ( $lang != "default" && $lang != FALSE ) { if ( strpos($contact,"?") ) { $contact .= "&" ; } else { $contact .= "?" ; } $contact .= "lang=$lang"; } if ( $debug ) { if ( strpos($contact,"?") ) { $contact .= "&" ; } else { $contact .= "?" ; } $contact .= "debug=$debug"; } $link = $contact."\" target=\"win".$n."\" onClick=\"monitor('".$contact."',$x,$y,$n); return false"; return $link; } ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/ldap_purge.inc0000644000000000000000000000012411506337146026645 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.668716 30 ctime=1513200663.770793604 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/ldap_purge.inc0000644000175000002070000000176611506337146026724 0ustar00mockbuildmock00000000000000### purged DN:".$curdn."

    \n"; } } $entries["count"] = $storesize; return $entries; } ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/lmtable.inc0000644000000000000000000000012412050701227026131 xustar000000000000000027 mtime=1352893079.143025 27 atime=1513200575.667716 30 ctime=1513200663.771793616 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/lmtable.inc0000644000175000002070000002165112050701227026203 0ustar00mockbuildmock00000000000000color_header = (${$inpnam}["thcolor"]) ? ${$inpnam}["thcolor"] : "#999999"; $this->color_bg = (${$inpnam}["tbcolor"]) ? ${$inpnam}["tbcolor"] : "#f0f0f0"; $this->font_title = (${$inpnam}["thfont"]) ? ${$inpnam}["thfont"] : "color=\"#ffffff\""; $this->font_main = (${$inpnam}["tbfont"]) ? ${$inpnam}["tbfont"] : "color=\"#000000\""; $this->columns = $locset; $this->ncols = 0; echo "color_bg."\">\n"; echo "color_header."\">\n"; $colnr = 0; if ( $wintyp == "clusdes" && $schema != "GLUE2" ) { $position = 3; $keys = array_keys($locset); unset($locset[$keys[$position]]); } foreach ( $locset as $colnam => $colwid) { if ( $colnam == "0" || $colnam == "help" ) continue; $this->ncols ++; $colnr++; $value = $colnam; if ( $schema == "GLUE2" && $value == "Queue") { $value = "Share Name"; } // Specific sorting links for the front module if ( $wintyp == "loadmon" ) { // Keep old arguments, if any, except of order $allargs = ""; foreach ( $_GET as $argm => $argval ) { if ( $argm == "order" ) continue; $allargs .= $argm."=".$argval."&"; } $str1 = "font_title.">".$value.""; if ( $colnr == 1 ) $value = $str1."country".$str2; elseif ( $colnr == 3 ) $value = $str1."cpu".$str2; elseif ( $colnr == 4 ) $value = $str1."grun".$str2; } $width = ($colwid)?$colwid:"1%"; echo "\n"; } echo "\n"; } /** * @return void * @param contents array * @desc Draws a table row */ function addrow( $contents, $bgcol="" ) { if ( count($contents) != $this->ncols ) { $this->adderror("Incompatible data"); return 1; } $this->contents = $contents; if ($bgcol) { echo "\n"; } else { echo "\n"; } foreach ($contents as $colent) { $value = $colent; echo "\n"; } echo "\n"; } /** * @return void * @param color string * @desc Draws a spanning row containing a spacer */ function addspacer( $color="#000000" ) { echo "\n"; echo ""; echo "\n"; } /** * @return void * @param errtxt string * @desc Draws a spanning row containing error message */ function adderror( $errtxt="Error", $bgcol="" ) { $this->errtxt = $errtxt; echo "\n"; echo ""; echo "\n"; } /** * @return void * @param errtxt string * @param nrows integer * @param color string * @desc Adds a cell spanning $nrows rows */ function rowspan( $nrows, $errtxt=" ", $color="#ffffcc" ) { $this->errtxt = $errtxt; $ncols = $this->ncols - 1; $nrows = $nrows + 1; echo "\n"; echo ""; echo ""; echo "\n"; } /** * @return void * @desc Closes a table */ function close() { echo "
    font_title."> $value 
    font_main."> $value 
    ncols."\" bgcolor=\"$color\" height=\"0\">\"\"
    ncols."\""; if ($bgcol) echo " bgcolor=\"$bgcol\""; echo ">font_main."> $errtxt
     $errtxt\"\"
    \n"; # ob_end_flush(); ob_implicit_flush(FALSE); } } class LmTableSp extends LmTable { var $spcolor; /** * @return void * @param contents array * @param color string * @desc Draws a table row with a spacer above */ function addrow( $contents, $bgcol="", $color="#ffffff" ) { $ncols = count($contents); $this->contents = $contents; if ($bgcol) { echo "\n"; } else { echo "\n"; } foreach ($contents as $colent) { $value = $colent; echo "font_main."> $value \n"; } echo "\n"; echo "\n"; echo "\"\""; echo "\n"; } /** * @return void * @param errtxt string * @param color string * @desc Draws a spanning row containing error message */ function adderror( $errtxt="Error", $color="#ffffff", $bgcol="" ) { $this->errtxt = $errtxt; $ncols = $this->ncols; $tospan = $this->rowspan; if ( $tospan ) $ncols = $ncols - 1; echo "\n"; echo "\"\""; echo "\n"; echo "\n"; echo "ncols."\""; if ($bgcol) echo " bgcolor=\"$bgcol\""; echo ">font_main."> $errtxt"; echo "\n"; } /** * @return void * @param errtxt string * @param nrows integer * @param color string * @desc Adds a cell spanning $nrows rows */ function rowspan( $nrows, $errtxt=" ", $color="#ffffcc" ) { $this->errtxt = $errtxt; $ncols = $this->ncols - 1; $nrows = (2 * $nrows) + 1; echo "\n"; echo " $errtxt"; echo "\"\""; echo "\n"; } } class LmTableFree extends LmTableSp { /** * @return LmTableFree * @param headers array * @desc Starts an HTML table */ function LmTableFree( $headers ) { ob_implicit_flush(0); ob_start(); $this->color_header = "#666666"; $this->color_bg = "#f0f0f0"; $this->font_title = "color=\"#ffffff\""; $this->font_main = "color=\"#000000\""; $this->columns = count($headers); $this->ncols = 0; echo "color_bg."\">\n"; echo "color_header."\">\n"; foreach ( $headers as $colnam ) { $this->ncols ++; $value = $colnam; $width = "1%"; echo "\n"; } echo "\n"; } } ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/postcode.inc0000644000000000000000000000012411506337146026343 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.668716 30 ctime=1513200663.774793653 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/postcode.inc0000644000175000002070000000560411506337146026415 0ustar00mockbuildmock00000000000000$tout sec t/o"; if ( !$record ) continue; $nrecords = $record["count"]; /* should be 1 */ for ($m = 0; $m < $nrecords; $m++) { $curcod = $record[$m][CLU_ZIPC][0]; if ( $curcod ) $cllist[$idx]["zvoname"] = cnvvo($curcod,$curnam); } } return($cllist); } /** * @return string * @param curnam string * @desc Guesses geographical location of a cluster */ function guess_country($curnam, $zip) { // Dumb domain name guess by 2 last letters $zvoname = cnvvo("",$curnam); // overwrite the previous decision if country code is set in the postal code if ( $zip ) $zvoname = cnvvo($zip,$curnam); return $zvoname; } ?>nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/mylo.inc0000644000000000000000000000012411506337146025503 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.667716 30 ctime=1513200663.773793641 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/mylo.inc0000644000175000002070000000124011506337146025545 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/cnvname.inc0000644000000000000000000000012411506337146026152 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.670716 30 ctime=1513200663.764793531 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/cnvname.inc0000644000175000002070000000401111506337146026213 0ustar00mockbuildmock00000000000000 1 && $family != "Doe") { /* catch for the tutorials */ $doestr = substr($family,1,1); /* returns "1" if it is a number, or a letter if it's a name */ if ( preg_match("/[0-9]/",$doestr) ) { $number = array_pop($names); $family = end($names); } // $family = substr(strrchr($uname, " "), 1); $name = $cn{0}."."; /* First letter of the name (doesn't work with 8-bit strings) */ if ( $flag == 2 ) $name = $names[0]; eval("\$name = \"$name\";"); $family = $name." ".$family; } else { $family = $cn; } if ( !$family ) return $uname /* Give up */; return $family; } /** * @return string * @param uname string * @desc Takes user DN and attempts to extract her affiliation */ function getorg ( $uname ) { $uname = trim($uname); $pieces = explode("/L=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/DC=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/OU=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/O=", $uname); $org = end($pieces); $tailpos = strpos($org, "/"); if ( $tailpos ) $org = substr($org,0,$tailpos); return $org; } ?>nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/settings.inc0000644000000000000000000000012413153453630026360 xustar000000000000000027 mtime=1504597912.048594 27 atime=1513200575.676716 30 ctime=1513200663.775793665 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/settings.inc0000644000175000002070000004163513153453630026436 0ustar00mockbuildmock00000000000000 "index1.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index2.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index3.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index4.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid") //*** A country-level GIIS example, use as many as you wish to monitor: //, //array("host" => "f9pc18.ijs.si", // "port" => "2135", // "base" => "mds-vo-name=Slovenia,o=grid", // "vo" => "Slovenia") //*** A single site GRIS example, use as many as you wish to monitor: //, //array("host" => "gridmaster.pzr.uni-rostock.de", // "port" => "2135", // "base" => "nordugrid-cluster-name=gridmaster.pzr.uni-rostock.de,mds-vo-name=local,o=grid", // "vo" => "Germany") ); // list of ARCHERY endpoints to query $archery_list = array ( array ( "endpoint" => "grid.org.ua", // TODO: add country grouping identifier if needed (something not sounds like VO :-)) ) ); // list of available EMIRs /* $emirslist = array( array("schema" => "https", "host" => "testbed-emi5.grid.upjs.sk", "port" => "54321", "base" => "mds-vo-name=NorduGrid,o=grid") ); /* * To set up TLS connection to EMIR server client certificate is mandatory: * Convert client certificate from *.pfx (pkcs12) into *.pem with openssl (if needed): * > openssl pkcs12 -in keys.pfx -out keys.pem */ $cert = "/var/www/ldap-monitor-svn/includes/test.pem"; $cert_pass = 'emi'; // base DNs for searches: local (GRIS), global (GIIS), VO if ( !defined("DN_LOCAL") ) define("DN_LOCAL","mds-vo-name=local,o=grid"); if ( !defined("DN_GLUE") ) define("DN_GLUE","o=glue"); if ( !defined("DN_GLOBL") ) define("DN_GLOBL","mds-vo-name=NorduGrid,o=grid"); if ( !defined("DN_VIORG") ) define("DN_VIORG","dc=nordugrid,dc=org"); if ( !defined("DN_PEOPL") ) define("DN_PEOPL","ou=people,dc=nordugrid,dc=org"); if ( !defined("DN_GUEST") ) define("DN_GUEST","ou=guests,dc=nordugrid,dc=org"); if ( !defined("DN_TUTOR") ) define("DN_TUTOR","ou=tutorial,dc=nordugrid,dc=org"); if ( !defined("DN_SERVS") ) define("DN_SERVS","ou=services,dc=nordugrid,dc=org"); if ( !defined("DN_RECAT") ) define("DN_RECAT","rc=NorduGrid,dc=nordugrid,dc=org"); // Information system classes and attributes namespace prefix, "nordugrid" if ( !defined("IS_PREFX") ) define("IS_PREFX","nordugrid"); if ( !defined("IS_PREFXG") ) define("IS_PREFXG","glue2"); // Cache location (use ../htdata when installing directly in ./htdocs) if ( !defined("CACHE_LOCATION") ) define("CACHE_LOCATION","cache"); // Extra title to be added to "Grid Monitor" (e.g. My Favorite) if ( !defined("EXTRA_TITLE") ) define("EXTRA_TITLE",""); //========================================================================= // =================== no need to change things below ===================== //========================================================================= // objectclasses if ( !defined("OBJ_CLUS") ) define("OBJ_CLUS",IS_PREFX."-cluster"); if ( !defined("OBJ_STEL") ) define("OBJ_STEL",IS_PREFX."-se"); if ( !defined("OBJ_QUEU") ) define("OBJ_QUEU",IS_PREFX."-queue"); if ( !defined("OBJ_AJOB") ) define("OBJ_AJOB",IS_PREFX."-job"); if ( !defined("OBJ_USER") ) define("OBJ_USER",IS_PREFX."-authuser"); //GLUE2 if ( !defined("GOBJ_CLUS") ) define("GOBJ_CLUS",IS_PREFXG."ComputingService"); if ( !defined("GOBJ_STEL") ) define("GOBJ_STEL",IS_PREFXG."-se"); if ( !defined("GOBJ_QUEU") ) define("GOBJ_QUEU",IS_PREFXG."ComputingShare"); if ( !defined("GOBJ_AJOB") ) define("GOBJ_AJOB",IS_PREFXG."ComputingActivity"); if ( !defined("GOBJ_USER") ) define("GOBJ_USER",IS_PREFXG."-authuser"); if ( !defined("GOBJ_LOC") ) define("GOBJ_LOC",IS_PREFXG."Location"); if ( !defined("GOBJ_MAN") ) define("GOBJ_MAN",IS_PREFXG."Manager"); if ( !defined("GOBJ_CON") ) define("GOBJ_CON",IS_PREFXG."Contact"); if ( !defined("OBJ_PERS") ) define("OBJ_PERS","organizationalPerson"); if ( !defined("OBJ_RCOL") ) define("OBJ_RCOL","GlobusReplicaLogicalCollection"); /* RC Logical Collection object */ if ( !defined("OBJ_RFIL") ) define("OBJ_RFIL","GlobusReplicaLogicalFile"); /* RC Logical File object */ if ( !defined("OBJ_RFIN") ) define("OBJ_RFIN","GlobusReplicaFileInfo"); /* RC File Info object */ if ( !defined("OBJ_RSEL") ) define("OBJ_RSEL","GlobusReplicaInfo"); /* RC Info object */ // attributes //NG if ( !defined("CLU_NAME") ) define("CLU_NAME",IS_PREFX."-cluster-name"); if ( !defined("CLU_ANAM") ) define("CLU_ANAM",IS_PREFX."-cluster-aliasname"); if ( !defined("CLU_ZIPC") ) define("CLU_ZIPC",IS_PREFX."-cluster-location"); if ( !defined("CLU_TCPU") ) define("CLU_TCPU",IS_PREFX."-cluster-totalcpus"); if ( !defined("CLU_UCPU") ) define("CLU_UCPU",IS_PREFX."-cluster-usedcpus"); if ( !defined("CLU_TJOB") ) define("CLU_TJOB",IS_PREFX."-cluster-totaljobs"); if ( !defined("CLU_QJOB") ) define("CLU_QJOB",IS_PREFX."-cluster-queuedjobs"); /* deprecated since 0.5.38 */ if ( !defined("CLU_OWNR") ) define("CLU_OWNR",IS_PREFX."-cluster-owner"); if ( !defined("CLU_SUPP") ) define("CLU_SUPP",IS_PREFX."-cluster-support"); if ( !defined("CLU_PQUE") ) define("CLU_PQUE",IS_PREFX."-cluster-prelrmsqueued"); /* new since 0.5.38 */ if ( !defined("SEL_NAME") ) define("SEL_NAME",IS_PREFX."-se-name"); if ( !defined("SEL_BURL") ) define("SEL_BURL",IS_PREFX."-se-baseurl"); /* gone since 0.5.26 */ if ( !defined("SEL_CURL") ) define("SEL_CURL",IS_PREFX."-se-url"); /* in place since 0.5.26 */ if ( !defined("SEL_ANAM") ) define("SEL_ANAM",IS_PREFX."-se-aliasname"); if ( !defined("SEL_TYPE") ) define("SEL_TYPE",IS_PREFX."-se-type"); if ( !defined("SEL_FREE") ) define("SEL_FREE",IS_PREFX."-se-freespace"); if ( !defined("SEL_TOTA") ) define("SEL_TOTA",IS_PREFX."-se-totalspace"); if ( !defined("SEL_USER") ) define("SEL_USER",IS_PREFX."-se-authuser"); if ( !defined("QUE_NAME") ) define("QUE_NAME",IS_PREFX."-queue-name"); if ( !defined("QUE_STAT") ) define("QUE_STAT",IS_PREFX."-queue-status"); if ( !defined("QUE_RUNG") ) define("QUE_RUNG",IS_PREFX."-queue-running"); if ( !defined("QUE_GRUN") ) define("QUE_GRUN",IS_PREFX."-queue-gridrunning"); if ( !defined("QUE_MAXR") ) define("QUE_MAXR",IS_PREFX."-queue-maxrunning"); if ( !defined("QUE_QUED") ) define("QUE_QUED",IS_PREFX."-queue-queued"); /* deprecated since 0.5.38 */ if ( !defined("QUE_LQUE") ) define("QUE_LQUE",IS_PREFX."-queue-localqueued"); /* new since 0.5.38 */ if ( !defined("QUE_PQUE") ) define("QUE_PQUE",IS_PREFX."-queue-prelrmsqueued"); /* new since 0.5.38 */ if ( !defined("QUE_GQUE") ) define("QUE_GQUE",IS_PREFX."-queue-gridqueued"); if ( !defined("QUE_MAXQ") ) define("QUE_MAXQ",IS_PREFX."-queue-maxqueuable"); if ( !defined("QUE_ASCP") ) define("QUE_ASCP",IS_PREFX."-queue-totalcpus"); if ( !defined("QUE_MINT") ) define("QUE_MINT",IS_PREFX."-queue-mincputime"); if ( !defined("QUE_MAXT") ) define("QUE_MAXT",IS_PREFX."-queue-maxcputime"); if ( !defined("JOB_GLID") ) define("JOB_GLID",IS_PREFX."-job-globalid"); if ( !defined("JOB_NAME") ) define("JOB_NAME",IS_PREFX."-job-jobname"); if ( !defined("JOB_STAT") ) define("JOB_STAT",IS_PREFX."-job-status"); if ( !defined("JOB_EQUE") ) define("JOB_EQUE",IS_PREFX."-job-execqueue"); if ( !defined("JOB_ECLU") ) define("JOB_ECLU",IS_PREFX."-job-execcluster"); if ( !defined("JOB_GOWN") ) define("JOB_GOWN",IS_PREFX."-job-globalowner"); if ( !defined("JOB_USET") ) define("JOB_USET",IS_PREFX."-job-usedcputime"); if ( !defined("JOB_USEM") ) define("JOB_USEM",IS_PREFX."-job-usedmem"); if ( !defined("JOB_SUBM") ) define("JOB_SUBM",IS_PREFX."-job-submissiontime"); if ( !defined("JOB_COMP") ) define("JOB_COMP",IS_PREFX."-job-completiontime"); if ( !defined("JOB_ERRS") ) define("JOB_ERRS",IS_PREFX."-job-errors"); if ( !defined("JOB_CPUS") ) define("JOB_CPUS",IS_PREFX."-job-cpucount"); if ( !defined("USR_NAME") ) define("USR_NAME",IS_PREFX."-authuser-name"); if ( !defined("USR_USSN") ) define("USR_USSN",IS_PREFX."-authuser-sn"); if ( !defined("USR_CPUS") ) define("USR_CPUS",IS_PREFX."-authuser-freecpus"); if ( !defined("USR_QUEU") ) define("USR_QUEU",IS_PREFX."-authuser-queuelength"); if ( !defined("USR_DISK") ) define("USR_DISK",IS_PREFX."-authuser-diskspace"); //GLUE2 if ( !defined("GCLU_NAME") ) define("GCLU_NAME",IS_PREFXG."entityname"); if ( !defined("GCLU_ANAM") ) define("GCLU_ANAM",IS_PREFXG."entityname"); if ( !defined("GCLU_ZIPC") ) define("GCLU_ZIPC",IS_PREFXG."locationpostcode"); if ( !defined("GCLU_TCPU") ) define("GCLU_TCPU",IS_PREFXG."computingmanagertotallogicalcpus"); if ( !defined("GCLU_UCPU") ) define("GCLU_UCPU",IS_PREFXG."-cluster-usedcpus"); if ( !defined("GCLU_TJOB") ) define("GCLU_TJOB",IS_PREFXG."computingservicetotaljobs"); if ( !defined("GCLU_QJOB") ) define("GCLU_QJOB",IS_PREFXG."-cluster-queuedjobs"); /* deprecated since 0.5.38 */ if ( !defined("GCLU_OWNR") ) define("GCLU_OWNR",IS_PREFXG."-cluster-owner"); if ( !defined("GCLU_SUPP") ) define("GCLU_SUPP",IS_PREFXG."-cluster-support"); if ( !defined("GCLU_PQUE") ) define("GCLU_PQUE",IS_PREFXG."computingserviceprelrmswaitingjobs"); /* new since 0.5.38 */ if ( !defined("GSEL_NAME") ) define("GSEL_NAME",IS_PREFXG."-se-name"); if ( !defined("GSEL_BURL") ) define("GSEL_BURL",IS_PREFXG."-se-baseurl"); /* gone since 0.5.26 */ if ( !defined("GSEL_CURL") ) define("GSEL_CURL",IS_PREFXG."-se-url"); /* in place since 0.5.26 */ if ( !defined("GSEL_ANAM") ) define("GSEL_ANAM",IS_PREFXG."-se-aliasname"); if ( !defined("GSEL_TYPE") ) define("GSEL_TYPE",IS_PREFXG."-se-type"); if ( !defined("GSEL_FREE") ) define("GSEL_FREE",IS_PREFXG."-se-freespace"); if ( !defined("GSEL_TOTA") ) define("GSEL_TOTA",IS_PREFXG."-se-totalspace"); if ( !defined("GSEL_USER") ) define("GSEL_USER",IS_PREFXG."-se-authuser"); if ( !defined("GQUE_NAME") ) define("GQUE_NAME",IS_PREFXG."entityname"); if ( !defined("GQUE_MAPQ") ) define("GQUE_MAPQ",IS_PREFXG."computingsharemappingqueue"); if ( !defined("GQUE_STAT") ) define("GQUE_STAT",IS_PREFXG."computingshareservingstate"); if ( !defined("GQUE_RUNG") ) define("GQUE_RUNG",IS_PREFXG."computingsharelocalrunningjobs"); if ( !defined("GQUE_GRUN") ) define("GQUE_GRUN",IS_PREFXG."computingsharerunningjobs"); if ( !defined("GQUE_MAXR") ) define("GQUE_MAXR",IS_PREFXG."computingsharemaxrunningjobs"); if ( !defined("GQUE_QUED") ) define("GQUE_QUED",IS_PREFXG."-queue-queued"); /* deprecated since 0.5.38 */ if ( !defined("GQUE_LQUE") ) define("GQUE_LQUE",IS_PREFXG."computingsharelocalwaitingjobs"); /* new since 0.5.38 */ if ( !defined("GQUE_PQUE") ) define("GQUE_PQUE",IS_PREFXG."computingshareprelrmswaitingjobs"); /* new since 0.5.38 */ if ( !defined("GQUE_GQUE") ) define("GQUE_GQUE",IS_PREFXG."computingsharewaitingjobs"); if ( !defined("GQUE_MAXQ") ) define("GQUE_MAXQ",IS_PREFXG."-queue-maxqueuable"); if ( !defined("GQUE_ASCP") ) define("GQUE_ASCP",IS_PREFXG."-queue-totalcpus"); if ( !defined("GQUE_MINT") ) define("GQUE_MINT",IS_PREFXG."-queue-mincputime"); if ( !defined("GQUE_MAXT") ) define("GQUE_MAXT",IS_PREFXG."-queue-maxcputime"); if ( !defined("GJOB_GLID") ) define("GJOB_GLID",IS_PREFXG."activityid"); if ( !defined("GJOB_NAME") ) define("GJOB_NAME",IS_PREFXG."name"); if ( !defined("GJOB_STAT") ) define("GJOB_STAT",IS_PREFXG."computingactivitystate"); if ( !defined("GJOB_EQUE") ) define("GJOB_EQUE",IS_PREFXG."computingactivityqueue"); if ( !defined("GJOB_ECLU") ) define("GJOB_ECLU",IS_PREFXG."computingactivityexecutionnode"); if ( !defined("GJOB_GOWN") ) define("GJOB_GOWN",IS_PREFXG."computingactivityowner"); if ( !defined("GJOB_USET") ) define("GJOB_USET",IS_PREFXG."computingactivityusedtotalcputime"); if ( !defined("GJOB_USEM") ) define("GJOB_USEM",IS_PREFXG."computingactivityusedmainmemory"); if ( !defined("GJOB_SUBM") ) define("GJOB_SUBM",IS_PREFXG."computingactivitysubmissiontime"); if ( !defined("GJOB_COMP") ) define("GJOB_COMP",IS_PREFXG."computingactivitycomputingmanagerendtime"); if ( !defined("GJOB_ERRS") ) define("GJOB_ERRS",IS_PREFXG."computingactivityerror"); if ( !defined("GJOB_CPUS") ) define("GJOB_CPUS",IS_PREFXG."computingactivityrequestedslots"); if ( !defined("GUSR_NAME") ) define("GUSR_NAME",IS_PREFXG."-authuser-name"); if ( !defined("GUSR_USSN") ) define("GUSR_USSN",IS_PREFXG."-authuser-sn"); if ( !defined("GUSR_CPUS") ) define("GUSR_CPUS",IS_PREFXG."-authuser-freecpus"); if ( !defined("GUSR_QUEU") ) define("GUSR_QUEU",IS_PREFXG."-authuser-queuelength"); if ( !defined("GUSR_DISK") ) define("GUSR_DISK",IS_PREFXG."-authuser-diskspace"); if ( !defined("VO_USCN" ) ) define("VO_USCN" ,"cn"); if ( !defined("VO_USSN" ) ) define("VO_USSN" ,"sn"); if ( !defined("VO_DESC" ) ) define("VO_DESC" ,"description"); if ( !defined("VO_MAIL" ) ) define("VO_MAIL" ,"mail"); if ( !defined("VO_INST" ) ) define("VO_INST" ,"o"); //************************************* Grid Monitor top window style ****************************** $def_loadmon = array( "refresh" => 120, "bgcolor" => "#ffffff", "thcolor" => "#005659", "lcolor" => "#005659", "tbcolor" => "#ffecb5", "thfont" => "face=\"sans-serif\" color=#ffffff", "tbfont" => "face=\"sans-serif\"" ); //************************************* Cluster description style ********************************** $def_clusdes = array ( //"title" => $theaders["clusdes"][0], "refresh" => 600, "bgcolor" => "#ffcc33", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //*************************************** Job statistics style ************************************* $def_jobstat = array ( "refresh" => 600, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //******************************************* VO list style *************************************** $def_volist = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#ffff00", "tbcolor" => "#cc0033", "thfont" => "face=\"sans-serif\" color=\"#993300\"", "tbfont" => "face=\"sans-serif\" color=\"#ffffff\"" ); //***************************************** VO user base style ************************************* $def_vousers = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#ffcccc", "tbcolor" => "#000099", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#ffffff\"" ); //***************************************** User job list style ************************************ $def_userlist = array( "refresh" => 0, "bgcolor" => "#ffffcc", "thcolor" => "#ffcc33", "lcolor" => "#000099", "tbcolor" => "#ffffff", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); $def_userres = array( "thcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //**************************************** Attribute list style ************************************ $def_attlist = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ccffff", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //****************************************** Queue job list style ********************************** $def_quelist = array( "refresh" => 300, "bgcolor" => "#ffffff", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //******************************************* SE info style *************************************** $def_sestat = array( "refresh" => 300, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#003300", "tbcolor" => "#CCCC99", "thfont" => "face=\"sans-serif\" color=\"#990000\"", "tbfont" => "face=\"sans-serif\" color=\"#000000\"" ); //******************************************* Users info style *************************************** $def_allusers = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#339966", "lcolor" => "#003300", "tbcolor" => "#ccffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000000\"" ); //***************************** LDAP parameters dump style - no need to modify ********************* $def_ldapdump = array( "thcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/toreload.inc0000644000000000000000000000012411506337146026334 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.678716 30 ctime=1513200663.776793677 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/toreload.inc0000644000175000002070000000060611506337146026403 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/cache.inc0000644000000000000000000000012411506337146025566 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.677716 30 ctime=1513200663.763793518 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/cache.inc0000644000175000002070000000255311506337146025640 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/emirs_info.inc0000644000000000000000000000012413153453630026652 xustar000000000000000027 mtime=1504597912.048594 27 atime=1513200575.671716 30 ctime=1513200663.767793567 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/emirs_info.inc0000644000175000002070000001525513153453630026727 0ustar00mockbuildmock00000000000000 "https", * "host" => "testbed-emi5.grid.upjs.sk, * "port" => "54321", * "base" => "mds-vo-name=NorduGrid,o=grid"),...) */ $tlim = 2; $tout = 5; if($debug && !empty($emirslist)) dbgmsg("
    :::> ".$errors["130"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); $nemirs = count($emirslist); $counter = count($gentries); $tag = array(); $entries = $gentries; $blacklist = array(); if ( file_exists("blacklist.inc") ) include('blacklist.inc'); // Loop on entered EMIR sites // If a host is blacklisted, skip // If a host is a cluster (GRIS), keep and skip // If a host is any other local GRIS, skip for ( $ig = 0; $ig < $nemirs; $ig++ ) { $eschema = $emirslist[$ig]["schema"]; $ehost = $emirslist[$ig]["host"]; if ( @$blacklist[$ehost] ) { if ( $debug ) dbgmsg("
    ".$errors["122"]."$ehost
    \n"); continue; } $eport = $emirslist[$ig]["port"]; $ebase = $emirslist[$ig]["base"]; if ( preg_match("/$element/i",$ebase) ) { // Invent a "fake DN" for host tagging and skip duplicated entries $fakedn = "hn=".$ehost.", ".$ebase; if ( @$tag[$fakedn] ) continue; $tag[$fakedn] = 1; continue; } elseif ( preg_match("/local/i",$ebase) ) { if ( $debug ) dbgmsg("
    ".$errors["115"].$ehost." (".$errors["116"].$element.")"); continue; } if ( $debug == 2 ) dbgmsg("
    ".$errors["117"]."$ehost..."); // Connection to EMIR $path = "services"; $query = "Service_Endpoint_Capability=information.discovery.resource&Service_Endpoint_Technology=ldap"; $res = http_request('GET', $eschema."://".$ehost.":".$eport."/".$path."?".$query, $data, $cert, $referer=''); if ($res["status"] == "ok"){ if ( $debug == 2 ) dbgmsg($errors["118"]); $json_a=json_decode($res["content"], true); $nrecords = count($json_a); for ($i = 0; $i < $nrecords; $i++) { $url = parse_url($json_a[$i]["Service_Endpoint_URL"]); $curhost = $url["host"]; $curhstat = $json_a[$i]["Service_Endpoint_HealthState"]; $cursstat = $json_a[$i]["Service_Endpoint_ServingState"]; /* * URL structure: * * ldapng: ldap://:2135/Mds-Vo-Name=local,o=grid * * ldapglue1: ldap://:2135/Mds-Vo-Name=resource,o=grid * * ldapglue2: ldap://:2135/o=glue */ // Introduce "fake" DN for tagging purpose - helps skipping sites registering twice $fakedn = "hn=".$url["host"].", ".$url["base"]; //if ( @$tag[$fakedn] ) continue; // Note: We need all enpoint about a service!!! if ( @$blacklist[$curhost] ) { if ( $debug>0 ) dbgmsg("
    ".$errors["122"]."$curhost
    \n"); continue; } $curstat = (($curhstat == "ok")&&($cursstat == "production")) ? "VALID": "healt state: '".$curhstate."', serving state: '".$cursstate."'"; if ( $curstat != "VALID" ) { if ( $debug ) dbgmsg("
    $curstat".$errors["121"]."$fakedn
    \n"); //continue; } $entries[$counter]["host"] = $url["host"]; $entries[$counter]["port"] = $url["port"]; $entries[$counter]["base"] = substr($url["path"],1); if ( $debug == 2 ) dbgmsg("
    ".$errors["123"]."$base: $fakedn
    \n"); $tag[$fakedn] = 1; $counter++; } } } if ( $debug == 2 ) dbgmsg("
    "); // Some debugging printout if ( $debug == 2 ) { dbgmsg("

    ".$errors["119"].$element.": ".$counter."
    "); foreach ( $entries as $num=>$val ) dbgmsg($val["host"].":".$val["port"]."/".$val["base"]."
    "); } return $entries; } /* * Send http request to the given URL of the server. */ function http_request($type, $url, $data, $cert, $referer='') { // Convert the data array into URL Parameters like a=b&foo=bar etc. $data = http_build_query($data); // parse the given URL $url = parse_url($url); if ($url['scheme'] != 'https' && $url['scheme'] != 'http') { die('Error: Only HTTP(S) request are supported !'); } // extract host and path: $host = $url['host']; $port = $url['port']; $path = $url['path']; $query= $url['query']; // open a socket connection on the given port - timeout: 30 sec $fp = stream_socket_client($host.":".$port, $errno, $errstr, 30); if ($url['scheme'] == 'https') { // add secure properties $context = stream_context_create(); $result = stream_context_set_option($context, 'ssl', 'local_cert', $cert); $result = stream_context_set_option($context, 'ssl', 'passphrase', $cert_pass); // open a secure socket connection on the given port - timeout: 30 sec $fp = stream_socket_client("ssl://".$host.":".$port, $errno, $errstr, 30, STREAM_CLIENT_CONNECT,$context); } if ($fp){ // send the request headers: fputs($fp, $type." $path?$query HTTP/1.1\r\n"); fputs($fp, "Host: $host\r\n"); if ($referer != '') fputs($fp, "Referer: $referer\r\n"); fputs($fp, "Content-type: application/x-www-form-urlencoded\r\n"); fputs($fp, "Content-length: ". strlen($data) ."\r\n"); fputs($fp, "Connection: close\r\n\r\n"); fputs($fp, $data); $result = ''; while(!feof($fp)) { // receive the results of the request $result .= fgets($fp, 128); } } else { return array( 'status' => 'err', 'error' => "$errstr ($errno)" ); } // close the socket connection: fclose($fp); // split the result header from the content $result = explode("\r\n\r\n", $result, 2); $header = isset($result[0]) ? $result[0] : ''; $content = isset($result[1]) ? $result[1] : ''; // return as structured array: return array( 'status' => 'ok', 'header' => $header, 'content' => $content ); } ?> nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/cnvalias.inc0000644000000000000000000000012411506337146026323 xustar000000000000000027 mtime=1293532774.731583 27 atime=1513200575.676716 30 ctime=1513200663.763793518 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/cnvalias.inc0000644000175000002070000000302611506337146026371 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/PaxHeaders.7502/recursive_giis_info.inc0000644000000000000000000000012412223274207030553 xustar000000000000000027 mtime=1380808839.583104 27 atime=1513200575.672716 30 ctime=1513200663.774793653 nordugrid-arc-5.4.2/src/services/ldap-monitor/includes/recursive_giis_info.inc0000644000175000002070000001253312223274207030624 0ustar00mockbuildmock00000000000000 "grid.nbi.dk", * "port" => "2135", * "base" => "mds-vo-name=NorduGrid,o=grid"),...) */ $loopcnt++; $tlim = 2; $tout = 5; if($debug && count($giislist) < 5) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); $greg = array(); $gfilter = "(objectclass=mds*)"; $ngiis = count($giislist); $counter = 0; $tag = array(); $dsarray = array(); $dnarray = array(); $hnarray = array(); $entries = array(); $blacklist = array(); @include('blacklist.inc'); // uses blacklist if it is in includes or current path // Loop on entered sites // If a host is blacklisted, skip // If a host is a cluster (ARIS), keep and skip // If a host is any other local ARIS, skip for ( $ig = 0; $ig < $ngiis; $ig++ ) { $ghost = $giislist[$ig]["host"]; if ( @$blacklist[$ghost] ) { if ( $debug ) dbgmsg("
    ".$errors["122"]."$ghost
    \n"); continue; } $gport = $giislist[$ig]["port"]; $gbase = $giislist[$ig]["base"]; if ( preg_match("/$element/i",$gbase) ) { // Invent a "fake DN" for host tagging and skip duplicated entries $fakedn = "hn=".$ghost.", ".$gbase; if ( @$tag[$fakedn] ) continue; $tag[$fakedn] = 1; array_push($entries,$giislist[$ig]); $counter++; continue; } elseif ( preg_match("/local/i",$gbase) ) { if ( $debug ) dbgmsg("
    ".$errors["115"].$ghost." (".$errors["116"].$element.")"); continue; } if ( $debug == 2 ) dbgmsg("
    ".$errors["117"]."$ghost..."); $fp = @fsockopen($ghost, $gport, $errno, $errstr, 2); $gconn = ldap_connect($ghost,$gport); if ( $fp && $gconn ) { fclose($fp); if ( $debug == 2 ) dbgmsg($errors["118"]); array_push($dsarray,$gconn); array_push($dnarray,$gbase); array_push($hnarray,$ghost); } if ( $debug == 2 ) dbgmsg("
    "); } // Some debugging printout if ( $debug == 2 ) { dbgmsg("

    ".$errors["119"].$element.": ".$counter."
    "); foreach ( $entries as $num=>$val ) dbgmsg($val["host"].":".$val["base"]."
    "); } // Check if there is underlying structure $srarray = @ldap_read($dsarray,$dnarray,$gfilter,$greg,0,0,$tlim,LDAP_DEREF_NEVER); // If using the pached LDAP //$srarray = @ldap_read($dsarray,$dnarray,$gfilter,$greg,0,0,$tlim,LDAP_DEREF_NEVER,$tout); // Debug: check if something eventualy timeouts or something if ( $debug ) { $nconns = count($dsarray); for ( $ii = 0; $ii < $nconns; $ii++ ) { $ldconn = $dsarray[$ii]; $hnconn = $hnarray[$ii]; if ( ldap_errno($ldconn) != 0x00 ) { $ldaperrmess = ldap_error($ldconn); dbgmsg("".$errors["120"].$hnconn.": ".$ldaperrmess."
    "); } } } $nhosts = count($srarray); // If EGIISes are found, loop on contacted EGIISes if ( $nhosts ) { $truecount = 0; for( $ids = 0; $ids < $nhosts; $ids++ ) { // suppose N hosts answered (nhosts), each returned M lower registrants (nrecords) // some of lower registrants are the same and have to be purged // and everything should be re-arranged in a new common array $sr = $srarray[$ids]; $ds = $dsarray[$ids]; $base = $dnarray[$ids]; if ($sr) $truecount++; $record = @ldap_get_entries($ds,$sr); $nrecords = $record["count"]; // Per each contacted EGIIS, loop on potential lower-level EGIISes/clusters for ($i = 0; $i < $nrecords; $i++) { $curdn = $record[$i]["dn"]; $curhost = $record[$i]["mds-service-hn"][0]; $curstat = $record[$i]["mds-reg-status"][0]; $curport = $record[$i]["mds-service-port"][0]; $cursuff = $record[$i]["mds-service-ldap-suffix"][0]; // Introduce "fake" DN for tagging purpose - helps skipping sites registering twice $fakedn = "hn=".$curhost.", ".$cursuff; if ( @$tag[$fakedn] ) continue; if ( @$blacklist[$curhost] ) { if ( $debug>0 ) dbgmsg("
    ".$errors["122"]."$curhost
    \n"); continue; } if ( $curstat != "VALID" ) { if ( $debug ) dbgmsg("
    $curstat".$errors["121"]."$fakedn
    \n"); continue; } // array_push($entries,$record[$i]); $entries[$counter]["host"] = $curhost; $entries[$counter]["port"] = $curport; $entries[$counter]["base"] = $cursuff; if ( $debug == 2 ) dbgmsg("
    ".$errors["123"]."$base: $fakedn
    \n"); $tag[$fakedn] = 1; $counter++; } } // Array $entries contains all possible stuff which registers to a EGIIS // Keep recursing if ($truecount && $loopcnt < 10 ) $entries = recursive_giis_info($entries,$element,$errors,$debug,$loopcnt); } return $entries; } ?> nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/ws-monitor0000644000000000000000000000013213214316027021646 xustar000000000000000030 mtime=1513200663.817794179 30 atime=1513200668.717854109 30 ctime=1513200663.817794179 nordugrid-arc-5.4.2/src/services/ws-monitor/0000755000175000002070000000000013214316027021771 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306023765 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200605.281078252 30 ctime=1513200663.810794093 nordugrid-arc-5.4.2/src/services/ws-monitor/Makefile.am0000755000175000002070000000032312047045306024030 0ustar00mockbuildmock00000000000000SUBDIRS = man mon-icons lang includes monitordir = @ws_monitor_prefix@ dist_monitor_DATA = $(srcdir)/*.php $(srcdir)/*.js monitor_DATA = README install-data-local: $(MKDIR_P) $(DESTDIR)$(monitordir)/cache nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/userlist.php0000644000000000000000000000012411524504547024317 xustar000000000000000027 mtime=1297254759.916296 27 atime=1513200576.666728 30 ctime=1513200663.806794044 nordugrid-arc-5.4.2/src/services/ws-monitor/userlist.php0000755000175000002070000002142411524504547024372 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $isislist = &$toppage->isislist; $cert = &$toppage->cert; // Header table $toppage->tabletop("",$toptitle." $family"); // Array defining the attributes to be returned $lim = array( "dn", USR_USSN, USR_CPUS, USR_QUEU, USR_DISK ); $ulim = array( "dn", JOB_NAME, JOB_EQUE, JOB_ECLU, JOB_GOWN, JOB_SUBM, JOB_STAT, JOB_USET, JOB_ERRS, JOB_CPUS ); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 20; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); $type = 'DB'; $isis = new $type($cert,$isislist); $isis->connect($debug); $gentries = $isis->get_infos(); $nc = count($gentries); if ( !$nc ) { $errno = "1"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } $dsarray = array (); $hostsarray = array (); $hnarray = array (); $pnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ // Purging cluster entries $ncc=0; foreach ( $gentries as $vo) { $sitec = count($vo)/2; $ncc += $sitec; for ( $i = 0; $i < $sitec; $i++ ) { array_push($hostsarray,(string)$vo[(string)$vo[$i]]["EPR"]); } } for ( $k = 0; $k < count($hostsarray); $k++ ) { $clport = $gentries[$k]["port"]; $clhost = $hostsarray[$k]; $clconn = $isis->cluster_info($clhost,$debug); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); array_push($pnarray,$clport); $sitetag[$clhost] = 1; /* filtering tag */ } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } // HTML table initialisation $utable = new LmTable("userres",$strings["userres"]); $urowcont = array(); $dnmsg = "".$errors["420"].": ".$uname; $utable->adderror($dnmsg, "#cccccc"); $nauclu = 0; $goodds = array(); $goodhn = array(); $goodpn = array(); for ( $ids = 0; $ids < $nhosts; $ids++ ) { $hn = $hnarray[$ids]; $pn = $pnarray[$ids]; $dst = $dsarray[$ids]; $curl = popup("clusdes.php?host=$hn&port=$pn",700,620,1); if ($dst) { $nqueues = count($dst->Domains->AdminDomain->Services->ComputingService->ComputingEndpoint->Associations->ComputingShareID); if ($nqueues > 0) { $nauclu++; array_push($goodds,$dst); array_push($goodhn,$hn); array_push($goodpn,$pn); // If there are valid entries, tabulate results $allres = $dst->Domains->AdminDomain->Services->ComputingService; $queues = $allres->ComputingShares->ComputingShare; $queues_new = $allres->ComputingShare; $queues = @($queues) ? $queues : $queues_new ; $nqueues = count($queues); // define("CMPKEY",USR_CPUS); // usort($allres,"ldap_entry_comp"); // loop on queues for ($j=0; $j<$nqueues; $j++) { $ucluster = $hn; $uqueue = $queues[$j]->MappingQueue; if ( $debug == 2 ) dbgmsg("$hn -- $ucluster
    "); $qurl = popup("quelist.php?host=$ucluster&port=$pn&qname=$uqueue",750,430,6); $curl = popup("clusdes.php?host=$ucluster&port=$pn",700,620,1); // for FreeCPUs $computingmanager = $allres->ComputingManager; $curtotcpu = @($computingmanager->{CLU_TCPU}) ? $computingmanager->{CLU_TCPU} : $computingmanager->TotalSlots; $curusedcpu = @($computingmanager->SlotsUsedByGridJobs && $computingmanager->SlotsUsedByLocalJobs) ? $computingmanager->SlotsUsedByLocalJobs + $computingmanager->SlotsUsedByGridJobs : 0; $fcpu = $curtotcpu - $curusedcpu; $fproc = freeproc($fcpu); $fdisk = @($allres[$j][USR_DISK][0]) ? $allres[$j][USR_DISK][0] : "N/A"; //$exque = $allres[$j][USR_QUEU][0]; $exque = $queues[$j]->{QUE_GQUE}; $urowcont[] = "$ucluster:$uqueue"; $urowcont[] = $fcpu; $urowcont[] = $exque; $urowcont[] = $fdisk; $utable->addrow($urowcont); $urowcont = array(); } } else { $utable->adderror("".$errors["11"]." $hn"); } } else { $utable->adderror("$hn ".$errors["12"].""); } } $utable->adderror("".$errors["421"].$nauclu.$errors["422"]."", "#0099FF"); $utable->close(); echo "
    \n"; // HTML table initialisation $jtable = new LmTable($module,$toppage->$module); $rowcont = array(); $jcount = 0; $nghosts = count($goodds); for ( $ids = 0; $ids < $nghosts; $ids++ ) { $dst = $goodds[$ids]; $gpn = $goodpn[$ids]; $ghn = $goodhn[$ids]; if ($dst) { // If search returned, check that there are valid entries $allentries = $dst->Domains->AdminDomain->Services->ComputingService->ComputingEndpoint->ComputingActivities->ComputingActivity; $nmatch = count($allentries); if ($nmatch > 0) { // If there are valid entries, tabulate results $entries = $allentries; $njobs = $nmatch; define("CMPKEY",JOB_SUBM); //usort($entries,"ldap_entry_comp"); // loop on jobs for ($i=0; $i<$njobs; $i++) { if ( $owner != $entries[$i]->{JOB_GOWN}) continue; $jobdn = rawurlencode($entries[$i]->{JOB_GOWN}); $curstat = $entries[$i]->{JOB_STAT}; $stahead = substr($curstat,10,8); if ($stahead=="FINISHED") { $ftime = substr(strrchr($curstat, " "), 1); $ftime = $entries[$i]->{JOB_COMP}; $ftime = cnvtime($ftime); $curstat = "FINISHED at: ".$ftime; } $jname = htmlentities($entries[$i]->{JOB_NAME}); $jobname = ($entries[$i]->{JOB_NAME}) ? $jname : "N/A"; $queue = ($entries[$i]->{JOB_EQUE}) ? $entries[$i]->{JOB_EQUE} : "N/A"; $cluster = $ghn; $time = ($entries[$i]->{JOB_USET}) ? $entries[$i]->{JOB_USET} : "N/A"; $ncpus = ($entries[$i]->{JOB_CPUS}) ? $entries[$i]->{JOB_CPUS} : ""; $error = ($entries[$i]->{JOB_ERRS}); if ( $error ) $error = ( preg_match("/user/i",$error) ) ? "X" : "!"; if ( $debug == 2 ) dbgmsg("$ghn --- $cluster
    "); $newwin = popup("jobstat.php?host=$cluster&port=$gpn&status=$status&jobdn=$jobdn",750,430,4); $quewin = popup("quelist.php?host=$cluster&port=$gpn&qname=$queue",750,430,6); $clstring = popup("clusdes.php?host=$cluster&port=$gpn",700,620,1); $jcount++; // filling the table $rowcont[] = "$jcount $error"; $rowcont[] = "$jobname"; $rowcont[] = "$curstat"; $rowcont[] = "$time"; $rowcont[] = "$cluster"; $rowcont[] = "$queue"; $rowcont[] = "$ncpus"; $jtable->addrow($rowcont); $rowcont = array(); } } } } if ( !$jcount ) $jtable->adderror("".$errors["13"].$family.""); $jtable->close(); $toppage->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/lang0000644000000000000000000000013213214316027022567 xustar000000000000000030 mtime=1513200663.933795598 30 atime=1513200668.717854109 30 ctime=1513200663.933795598 nordugrid-arc-5.4.2/src/services/ws-monitor/lang/0000755000175000002070000000000013214316027022712 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/lang/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711634415166024714 xustar000000000000000027 mtime=1316100726.768006 30 atime=1513200605.381079475 30 ctime=1513200663.930795561 nordugrid-arc-5.4.2/src/services/ws-monitor/lang/Makefile.am0000755000175000002070000000016111634415166024757 0ustar00mockbuildmock00000000000000monitorlangdir = @ws_monitor_prefix@/lang monitorlang_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorlang_DATA) nordugrid-arc-5.4.2/src/services/ws-monitor/lang/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735024716 xustar000000000000000030 mtime=1513200605.413079866 30 atime=1513200651.741646483 30 ctime=1513200663.931795573 nordugrid-arc-5.4.2/src/services/ws-monitor/lang/Makefile.in0000644000175000002070000004361413214315735024774 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ws-monitor/lang DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(monitorlangdir)" DATA = $(monitorlang_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ monitorlangdir = @ws_monitor_prefix@/lang monitorlang_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorlang_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ws-monitor/lang/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ws-monitor/lang/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitorlangDATA: $(monitorlang_DATA) @$(NORMAL_INSTALL) test -z "$(monitorlangdir)" || $(MKDIR_P) "$(DESTDIR)$(monitorlangdir)" @list='$(monitorlang_DATA)'; test -n "$(monitorlangdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitorlangdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitorlangdir)" || exit $$?; \ done uninstall-monitorlangDATA: @$(NORMAL_UNINSTALL) @list='$(monitorlang_DATA)'; test -n "$(monitorlangdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitorlangdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitorlangdir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitorlangdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitorlangDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitorlangDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-monitorlangDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am uninstall uninstall-am uninstall-monitorlangDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ws-monitor/lang/PaxHeaders.7502/en.inc0000644000000000000000000000012411506361170023743 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.720728 30 ctime=1513200663.932795585 nordugrid-arc-5.4.2/src/services/ws-monitor/lang/en.inc0000755000175000002070000014247511506361170024030 0ustar00mockbuildmock00000000000000 N/A indicates that user did not assign any name.
    X indicates that the job has been killed by the owner
    ! indicates that the job failed in the system
    Click on a name to get a detailed description of the job."; $str_nam = "Name of the user, as specified in the personal certificate. Click on a name to get the list of all the resources available for this user and all the jobs by this user which are currently in the system."; $str_sta = "Job status as returned by the Grid Manager (GM) and LRMS. In sequential order, the states are:
    ACCEPTED – job submitted but not yet processed
    PREPARING – input files are being retreived
    SUBMITTING – interaction with LRMS ongoing
    INLRMS – the job is transferred to the LRMS; internal status is added by the infosystem. Possible states are:
    : Q – job is queued
    : U – job is in a suspended state on a busy node (PBSPro)
    : S – job is in a suspended state (Condor)
    : R, run – job is running
    : E – job is finishing (PBS)
    FINISHING – output files are being transferred by the GM
    FINISHED – job is finished; time stamp is added by the infosystem
    CANCELING – job is being cancelled
    DELETED – job not cleaned upon user request but removed by the GM due to expiration time
    Each of the states can be reported with the PENDING: prefix, meaning the GM is attempting to move the job to the next state"; $str_tim = "CPU time used by the job, seconds."; $str_mem = "Memory consumed by the job, MB"; $str_cpu = "Number of processors used by the job."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    This screen displays all the sites registering to the top ARC indexing service, sorted by country then by host name. Selected site parameters are monitored: cluster alias, total CPU capacity and number of running and queued jobs, both Grid and local ones. Use "Search" utility if you want to compare other cluster, queue, job etc. characteristics
    Country
    ".$clickable.". Country flag and name as deduced from available resource descriptions. Click to show only this country info.
    Cluster
    ".$clickable.". Cluster alias as assigned by the owner. Maximal displayed length is 22 characters. Click on the alias to get a detailed cluster description.
    CPUs
    Total number of CPUs in a cluster. NB! Only a fraction of those can be actualy available for the Grid users.
    Load (processes:Grid+local)
    ".$clickable.". Relative cluster load, corresponding to the occupied CPUs count. Grey bars indicate processors occupied by the localy submitted jobs, while red bars show CPUs occupied by jobs submitted via Grid. Click on the bar to get the detailed list of all the running Grid jobs on the cluster, including amount of processors per job.
    Queueing
    ".$clickable.". Number of all queued jobs on the cluster, shown as number of queueing grid jobs plus number of locally submitted queueing jobs. Click the first number to get the list of queued Grid jobs on the cluster.
    ", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Resource Details for", "help" => "
    Attribute
    ".$clickable.". Cluster attribute name".$str_att."
    Value
    ".$str_val."
    Queue
    ".$clickable.". Names of batch queues available for the ARC users, as set by cluster owners. ".$str_que."
    Status
    Queue status. Operating queue typically reports active status.
    CPU (sec)
    Time limit for job duration per queue, if set, in CPU-seconds. First displayed value is the lower limit, second - the upper one. If limits are not set (jobs of any duration are accepted), N/A tag is shown.
    Running
    Number of jobs running in the queue. Total number of jobs is shown, with number of processors occupied by Grid-submitted jobs displayed in parentheses, e.g. (Grid: 12). NB! For parallel multiprocessor jobs, number in parentheses can be larger than number of jobs.
    Queing
    Number of jobs awaiting execution in the queue. Total number of jobs is shown, with Grid-submitted jobs displayed in parentheses, e.g. (Grid: 235)
    ", "Queue" => 0, "Status" => 0, "Limits (sec)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    JOB LIST:
    Job name
    ".$clickable.". Name of a job as assigned by the owner. If no name has been assigned, "N/A" is displayed. Click on a name to get a detailed description of the job.
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (sec)
    ".$str_tim."
    Queue
    ".$clickable.". Name of the batch queue in which the job is being executed. ".$str_que."
    CPUs
    ".$str_cpu."
    JOB DETAILS:
    Attribute
    ".$clickable.". Job attribute name".$str_att."
    Value
    ".$str_val."
    ", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (sec)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtual Organisation
    ".$clickable.". Group of users, typically sharing common activities and resources, authorised at at least one ARC-enabled site. Click on the name to get the list of group members.
    Members
    Number of group members.
    Served by
    LDAP server that supports the group membership database.
    ", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    Users home institute as entered by a VO manager. Can be empty.
    E-mail
    ".$clickable.". Users e-mail as entered by a VO manager. Can be empty. Click the address to send an e-mail to the user.
    ", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "help" => "
    Cluster:queue
    ".$clickable.". Names of clusters and respective queues (separated by a column, ":") where a user is authorized to submit jobs. If a user is not authorized, message "Not authorised at host ..." is displayed. Click a cluster name to get a detailed cluster description. Click on a queue name to get a detailed queue description.
    Free CPUs
    Number of free CPUs available in a given queue for the user at this moment of time, optionally appended with the upper time limit value (in minutes). For example, "3" means 3 CPUs available for a job of unlimited running time; "4:360" indicates there are 4 CPUs available for jobs not longer than 6 hours; "10:180 30" means there are 10 CPUs available for jobs not exceeding 3 hours, plus 30 CPUs available for jobs of any length; "0" means there are no CPUs available at the moment, and the jobs will be placed in a waiting queue.
    Queued jobs
    Number of user's jobs expected to sit ahead of a new submitted job (for this user) in a waiting queue. Number of "0" means the job is expected to be executed immediately. NB! This is only an estimation, which can be overwritten by local policies.
    Free disk (GB)
    Disk space available for the user in a given queue (in Gigabytes). NB! This is only an estimation, as most clusters do not provide fixed disk quotas.
    Job name
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (sec)
    ".$str_tim."
    Cluster
    ".$clickable.". Name of the cluster at which the job is being executed. Click on a cluster name to get detailed information about the cluster.
    Queue
    ".$clickable.". Name of the batch queue in which the job is/was executed. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Status" => 0, "CPU (sec)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "attlist" => array("0" => "Attribute values", "help" => "
    Object
    ".$clickable.". Name of the object which attributes are displayed. It can be a cluster name, a clusters queue name, a job name, a user name etc. Click on the string to get a detailed decscription of the object.
    Attribute
    For each object, one or more attribute values can be listed. Column title is the human-readable attribute name (except of some MDS-specific attributes), and the column contents are attribute values per object as entered in the Information System.
    ", "Object" => 0, "Attribute" => 0 ), "quelist" => array("0" => "Details for the queue", "help" => "
    Attribute
    ".$clickable.". Name of a queue attribute".$str_att."
    Value
    ".$str_val."
    Job name
    ".$clickable.". ".$str_job."
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (sec)
    ".$str_tim."
    Memory (MB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (sec)" => 0, "Memory (MB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias
    Storage element alias as specified in the Information System. Maximal displayed length is 15 characters.
    Tot. space
    Total disk space, GB.
    Free space
    Disk space available at the moment, GB.
    Name
    Storage element name, consisting of a logical name and host name (separated by a column, ":"). Logical name is used only for information system purposes, to distinguish between different storage elements hosted by the same machine.
    Base URL
    URL for the storage element, typically a gsiftp:// protocol. Use this URL as the base to access files.
    Type
    Storage element type. "gridftp-based" indicates a disk storage with GridFTP interface.
    ", "#" => 0, "Alias" => 0, // "Tot. space" => 0, "Free/total space, Gb" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    User's affiliation, derived from the personal certificate
    Jobs
    Count of all user jobs in the system (running, pending, finished or deleted)
    Sites
    Shows how many sites authorise this user
    ", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Queued jobs" => 0, "Free disk (GB)" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-acl" => "Authorised VOs", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Grid session lifetime (min)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-comment" => "Comment", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Total occupied CPUs", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Jobs, queued (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-maxwalltime" => "Walltime, max. (minutes)", "nordugrid-queue-minwalltime" => "Walltime, min. (minutes)", "nordugrid-queue-defaultwalltime" => "Walltime, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-homogeneity" => "Homogeneous queue", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcputime" => "Requested CPU time", "nordugrid-job-reqwalltime" => "Requested wall clock time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-comment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall clock time", "nordugrid-job-completiontime" => "Job completion time (GMT)", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-exitcode" => "Exit code", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-job-rerunable" => "Rerunnable", "nordugrid-job-reqcput" => "Requested time (OBSOLETE)", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-lrmscomment" => "LRMS comment (OBSOLETE)", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-acl" => "Authorised VOs", "nordugrid-se-freespace" => "Free space (MB)", "nordugrid-se-totalspace" => "Total space (MB)", "nordugrid-se-url" => "Contact URL", "nordugrid-se-baseurl" => "Contact URL (OBSOLETE)", "nordugrid-se-accesscontrol" => "Access control", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "8" => "No queue information found", "9" => "No entries found", "10" => "No users found", "11" => "Not authorised at host", "12" => "does not answer", "13" => "No recent jobs found for ", // debug messages "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "129" => "Can not get object data: error ", // icon titles "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", // auxilliary strings "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL", "410" => "Cluster", "411" => "Queue", "412" => "Job", "413" => "User", "414" => "Storage", "415" => "Replica Cat.", "416" => "Define attributes to display for the object: ", "417" => "AND of all the expressions will be matched", "418" => "Leave the righmost field empty to show everything", "419" => "Display resources or objects of your choice", "420" => "Distinguished name", "421" => "Can use a total of ", "422" => " sites", "423" => "Resource / object:", "424" => "Nr.of attributes (def. 6):", "425" => "Object", "426" => "Next", "427" => "Select one", "428" => "Reset", "429" => "SHOW" ), // Post code conversion: only for [en]! "tlconvert" => array ( "AU" => "Australia", "AT" => "Austria", "AM" => "Armenia", "DZ" => "Algeria", "BE" => "Belgium", "BG" => "Bulgaria", "CA" => "Canada", "CN" => "China", "CZ" => "Czechia", "DK" => "Denmark", "EE" => "Estonia", "FI" => "Finland", "FIN" => "Finland", "SF" => "Finland", "FR" => "France", "GE" => "Georgia", "DE" => "Germany", "D" => "Germany", "GR" => "Greece", "HU" => "Hungary", "IS" => "Iceland", "IR" => "Ireland", "IE" => "Ireland", "IT" => "Italy", "JP" => "Japan", "KEK" => "Japan", "TOKYO" => "Japan", "LV" => "Latvia", "LT" => "Lithuania", "MA" => "Morocco", "NL" => "Netherlands", "NO" => "Norway", "N" => "Norway", "PL" => "Poland", "PT" => "Portugal", "RO" => "Romania", "RU" => "Russia", "LK" => "SriLanka", "SE" => "Sweden", "SK" => "Slovakia", "SI" => "Slovenia", "CH" => "Switzerland", "TR" => "Turkey", "UK" => "UK", "UA" => "Ukraine", "COM" => "USA", "GOV" => "USA", "USA" => "USA", "US" => "USA" ) ); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/lang/PaxHeaders.7502/us.inc0000644000000000000000000000012411506361170023770 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.702728 30 ctime=1513200663.933795598 nordugrid-arc-5.4.2/src/services/ws-monitor/lang/us.inc0000755000175000002070000014225711506361170024053 0ustar00mockbuildmock00000000000000 [<2-letter code>] // -- Translation: // -- Author: oxana.smirnova@hep.lu.se // Some common strings: $clickable = "CLICKABLE"; $str_att = ", human-readable except of some MDS-specific attributes. Click on the attribute name to get the list of the attribute values across the ARC universe."; $str_val = "Attribute value as entered in the Information System."; $str_que = "Typically, different queues correspond to different allowed task duration, or to different groups of users. Click on a queue name to get detailed information about the queue, including running, queued, and finished tasks."; $str_job = "Name of a job as assigned by the job owner.
    N/A indicates that user did not assign any name.
    X indicates that the job has been killed by the owner
    ! indicates that the job failed in the system
    Click on a name to get a detailed description of the job."; $str_nam = "Name of the user, as specified in the personal certificate. Click on a name to get the list of all the resources available for this user and all the jobs by this user which are currently in the system."; $str_sta = "Job status as returned by the Grid Manager (GM) and LRMS. In sequential order, the states are:
    ACCEPTED – job submitted but not yet processed
    PREPARING – input files are being retreived
    SUBMITTING – interaction with LRMS ongoing
    INLRMS – the job is transferred to the LRMS; internal status is added by the infosystem. Possible states are:
    : Q – job is queued
    : U – job is in a suspended state on a busy node (PBSPro)
    : S – job is in a suspended state (Condor)
    : R, run – job is running
    : E – job is finishing (PBS)
    FINISHING – output files are being transferred by the GM
    FINISHED – job is finished; time stamp is added by the infosystem
    CANCELING – job is being cancelled
    DELETED – job not cleaned upon user request but removed by the GM due to expiration time
    Each of the states can be reported with the PENDING: prefix, meaning the GM is attempting to move the job to the next state"; $str_tim = "CPU time used by the job, minutes."; $str_mem = "Memory consumed by the job, KB"; $str_cpu = "Number of processors used by the job."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    This screen displays all the sites registering to the top ARC indexing service, sorted by country then by host name. Selected site parameters are monitored: cluster alias, total CPU capacity and number of running and queued jobs, both Grid and local ones. Use "Search" utility if you want to compare other cluster, queue, job etc. characteristics
    Country
    ".$clickable.". Country flag and name as deduced from available resource descriptions. Click to show only this country info.
    Cluster
    ".$clickable.". Cluster alias as assigned by the owner. Maximal displayed length is 22 characters. Click on the alias to get a detailed cluster description.
    CPUs
    Total number of CPUs in a cluster. NB! Only a fraction of those can be actualy available for the Grid users.
    Load (processes:Grid+local)
    ".$clickable.". Relative cluster load, corresponding to the occupied CPUs count. Grey bars indicate processors occupied by the localy submitted jobs, while red bars show CPUs occupied by jobs submitted via Grid. Click on the bar to get the detailed list of all the running Grid jobs on the cluster, including amount of processors per job.
    Queueing
    ".$clickable.". Number of all queued jobs on the cluster, shown as number of queueing grid jobs plus number of locally submitted queueing jobs. Click the first number to get the list of queued Grid jobs on the cluster.
    ", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Resource Details for", "help" => "
    Attribute
    ".$clickable.". Cluster attribute name".$str_att."
    Value
    ".$str_val."
    Queue
    ".$clickable.". Names of batch queues available for the ARC users, as set by cluster owners. ".$str_que."
    Status
    Queue status. Operating queue typically reports active status.
    CPU (min)
    Time limit for job duration per queue, if set, in CPU-minutes. First displayed value is the lower limit, second - the upper one. If limits are not set (jobs of any duration are accepted), N/A tag is shown.
    Running
    Number of jobs running in the queue. Total number of jobs is shown, with number of processors occupied by Grid-submitted jobs displayed in parentheses, e.g. (Grid: 12). NB! For parallel multiprocessor jobs, number in parentheses can be larger than number of jobs.
    Queing
    Number of jobs awaiting execution in the queue. Total number of jobs is shown, with Grid-submitted jobs displayed in parentheses, e.g. (Grid: 235)
    ", "Queue" => 0, "Status" => 0, "Limits (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    JOB LIST:
    Job name
    ".$clickable.". Name of a job as assigned by the owner. If no name has been assigned, "N/A" is displayed. Click on a name to get a detailed description of the job.
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Queue
    ".$clickable.". Name of the batch queue in which the job is being executed. ".$str_que."
    CPUs
    ".$str_cpu."
    JOB DETAILS:
    Attribute
    ".$clickable.". Job attribute name".$str_att."
    Value
    ".$str_val."
    ", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtual Organisation
    ".$clickable.". Group of users, typically sharing common activities and resources, authorised at at least one ARC-enabled site. Click on the name to get the list of group members.
    Members
    Number of group members.
    Served by
    LDAP server that supports the group membership database.
    ", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    Users home institute as entered by a VO manager. Can be empty.
    E-mail
    ".$clickable.". Users e-mail as entered by a VO manager. Can be empty. Click the address to send an e-mail to the user.
    ", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "help" => "
    Cluster:queue
    ".$clickable.". Names of clusters and respective queues (separated by a column, ":") where a user is authorized to submit jobs. If a user is not authorized, message "Not authorised at host ..." is displayed. Click a cluster name to get a detailed cluster description. Click on a queue name to get a detailed queue description.
    Free CPUs
    Number of free CPUs available in a given queue for the user at this moment of time, optionally appended with the upper time limit value (in minutes). For example, "3" means 3 CPUs available for a job of unlimited running time; "4:360" indicates there are 4 CPUs available for jobs not longer than 6 hours; "10:180 30" means there are 10 CPUs available for jobs not exceeding 3 hours, plus 30 CPUs available for jobs of any length; "0" means there are no CPUs available at the moment, and the jobs will be placed in a waiting queue.
    Queued jobs
    Number of user's jobs expected to sit ahead of a new submitted job (for this user) in a waiting queue. Number of "0" means the job is expected to be executed immediately. NB! This is only an estimation, which can be overwritten by local policies.
    Free disk (MB)
    Disk space available for the user in a given queue (in Megabytes). NB! This is only an estimation, as most clusters do not provide fixed disk quotas.
    Job name
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Name of the cluster at which the job is being executed. Click on a cluster name to get detailed information about the cluster.
    Queue
    ".$clickable.". Name of the batch queue in which the job is/was executed. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "attlist" => array("0" => "Attribute values", "help" => "
    Object
    ".$clickable.". Name of the object which attributes are displayed. It can be a cluster name, a clusters queue name, a job name, a user name etc. Click on the string to get a detailed decscription of the object.
    Attribute
    For each object, one or more attribute values can be listed. Column title is the human-readable attribute name (except of some MDS-specific attributes), and the column contents are attribute values per object as entered in the Information System.
    ", "Object" => 0, "Attribute" => 0 ), "quelist" => array("0" => "Details for the queue", "help" => "
    Attribute
    ".$clickable.". Name of a queue attribute".$str_att."
    Value
    ".$str_val."
    Job name
    ".$clickable.". ".$str_job."
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memory (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Memory (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias
    Storage element alias as specified in the Information System. Maximal displayed length is 15 characters.
    Tot. space
    Total disk space, GB.
    Free space
    Disk space available at the moment, GB.
    Name
    Storage element name, consisting of a logical name and host name (separated by a column, ":"). Logical name is used only for information system purposes, to distinguish between different storage elements hosted by the same machine.
    Base URL
    URL for the storage element, typically a gsiftp:// protocol. Use this URL as the base to access files.
    Type
    Storage element type. "gridftp-based" indicates a disk storage with GridFTP interface.
    ", "#" => 0, "Alias" => 0, "Free/tot. space, GB" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    User's affiliation, derived from the personal certificate
    Jobs
    Count of all user jobs in the system (running, pending, finished or deleted)
    Sites
    Shows how many sites authorise this user
    ", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Queued jobs" => 0, "Free disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-acl" => "Authorised VOs", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Grid session lifetime (min)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-comment" => "Comment", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Total occupied CPUs", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Jobs, queued (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-maxwalltime" => "Walltime, max. (minutes)", "nordugrid-queue-minwalltime" => "Walltime, min. (minutes)", "nordugrid-queue-defaultwalltime" => "Walltime, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-homogeneity" => "Homogeneous queue", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcputime" => "Requested CPU time", "nordugrid-job-reqwalltime" => "Requested wall clock time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-comment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall clock time", "nordugrid-job-completiontime" => "Completion time (GMT)", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-exitcode" => "Exit code", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-job-rerunable" => "Rerunnable", "nordugrid-job-reqcput" => "Requested time (OBSOLETE)", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-lrmscomment" => "LRMS comment (OBSOLETE)", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-acl" => "Authorised VOs", "nordugrid-se-freespace" => "Free space (MB)", "nordugrid-se-totalspace" => "Total space (MB)", "nordugrid-se-url" => "Contact URL", "nordugrid-se-baseurl" => "Contact URL (OBSOLETE)", "nordugrid-se-accesscontrol" => "Access control", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "8" => "No queue information found", "9" => "No entries found", "10" => "No users found", "11" => "Not authorised at host", "12" => "does not answer", "13" => "No recent jobs found for ", // debug messages "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "129" => "Can not get object data: error ", // icon titles "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", // auxilliary strings "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL", "410" => "Cluster", "411" => "Queue", "412" => "Job", "413" => "User", "414" => "Storage", "415" => "Replica Cat.", "416" => "Define attributes to display for the object: ", "417" => "AND of all the expressions will be matched", "418" => "Leave the righmost field empty to show everything", "419" => "Display resources or objects of your choice", "420" => "Distinguished name", "421" => "Can use a total of ", "422" => " sites", "423" => "Resource / object:", "424" => "Nr.of attributes (def. 6):", "425" => "Object", "426" => "Next", "427" => "Select one", "428" => "Reset", "429" => "SHOW" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australia", "Austria" => "Austria", "Armenia" => "Armenia", "Algeria" => "Algeria", "Belgium" => "Belgium", "Bulgaria" => "Bulgaria", "Canada" => "Canada", "China" => "China", "Czechia" => "Czechia", "Denmark" => "Denmark", "Estonia" => "Estonia", "Finland" => "Finland", "France" => "France", "Georgia" => "Georgia", "Germany" => "Germany", "Greece" => "Greece", "Hungary" => "Hungary", "Iceland" => "Iceland", "Ireland" => "Ireland", "Ireland" => "Ireland", "Italy" => "Italy", "Japan" => "Japan", "Latvia" => "Latvia", "Lithuania" => "Lithuania", "Morocco" => "Morocco", "Netherlands" => "Netherlands", "Norway" => "Norway", "Poland" => "Poland", "Portugal" => "Portugal", "Romania" => "Romania", "Russia" => "Russia", "SriLanka" => "Sri Lanka", "Sweden" => "Sweden", "Slovakia" => "Slovakia", "Slovenia" => "Slovenia", "Switzerland" => "Switzerland", "Turkey" => "Turkey", "UK" => "UK", "Ukraine" => "Ukraine", "USA" => "USA" ) ); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/allusers.php0000644000000000000000000000012411506361170024270 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.702728 30 ctime=1513200663.796793922 nordugrid-arc-5.4.2/src/services/ws-monitor/allusers.php0000755000175000002070000001364111506361170024345 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $isislist = &$toppage->isislist; $cert = &$toppage->cert; $yazyk = &$toppage->language; // Array defining the attributes to be returned $lim = array( "dn", USR_USSN ); /* need only SN per each user */ $jlim = array( "dn", JOB_GOWN ); /* Job owner only is needed */ if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 20; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Header table $titles = explode(":",$toptitle); // two alternative titles, separated by column $gtitle = $titles[0]; if ( $jobnum ) $gtitle = $titles[1]; $toppage->tabletop($gtitle,""); $family = cnvname($ussn); // Search all clusters for users $ts1 = time(); $type = 'DB'; $isis = new $type($cert,$isislist); $isis->connect($debug); $gentries = $isis->get_infos(); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["125"]." (".($ts2-$ts1).$errors["104"].")
    "); $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } $dsarray = array (); $hostsarray = array (); $hnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ // Purging cluster entries $ncc=0; foreach ( $gentries as $vo) { $sitec = count($vo)/2; $ncc += $sitec; for ( $i = 0; $i < $sitec; $i++ ) { array_push($hostsarray,(string)$vo[(string)$vo[$i]]["EPR"]); } } // Get all clusters informations $jiarray = array(); $ts1 = time(); for ( $k = 0; $k < count($hostsarray); $k++ ) { $clhost = $hostsarray[$k]; $clconn = $isis->cluster_info($clhost,$debug); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); $sitetag[$clhost] = 1; /* filtering tag */ } } $ts2 = time(); if($debug) dbgmsg("
    ".$errors["126"]." (".($ts2-$ts1).$errors["104"].")
    "); $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } // Loop on clusters; building user list $usrlist = array (); for ( $ids = 0; $ids < $nhosts; $ids++ ) { $dst = array (); $dst = $dsarray[$ids]; if ($dst) { $njobs = count($dst->Domains->AdminDomain->Services->ComputingService->ComputingEndpoint->ComputingActivities->ComputingActivity); if ($nusers > 0 || $njobs > 0) { // If there are valid entries, tabulate results $allres = array(); $alljobs = array(); $alljobs = $dst->Domains->AdminDomain->Services->ComputingService->ComputingEndpoint->ComputingActivities->ComputingActivity; $nusers = $results["count"]; // loop on users, filling $usrlist[$ussn]["name"] and counting $usrlist[$ussn]["hosts"] for ($j=0; $j<$nusers; $j++) { $ussn = $results[$j][USR_USSN][0]; $family = cnvname($ussn, 2); if ( $family == "host" || strlen($family) < 2 ) continue; $ussn = trim($ussn); $ussn = addslashes($ussn); // In case $ussn contains escape characters if ( !$usrlist[$ussn] ) { $usrlist[$ussn]["name"] = $family; $usrlist[$ussn]["org"] = getorg($ussn); $usrlist[$ussn]["jobs"] = 0; $usrlist[$ussn]["hosts"] = 0; } $usrlist[$ussn]["hosts"]++; } // loop on jobs, filling $usrlist[$jown]["jobs"] for ($k=0; $k<$njobs; $k++) { $jdn = $alljobs[$k]->{JOB_GOWN}; $jown = $alljobs[$k]->{JOB_GOWN}; $family = cnvname($jown, 2); if ( $family == "host" || strlen($family) < 2 ) continue; $jown = addslashes($jown); // In case $jown contains escape characters if ( !$usrlist[$jown] ) { // Shouldn't be happening, but... $usrlist[$jown]["name"] = $family; $usrlist[$jown]["org"] = getorg($jown); $usrlist[$jown]["jobs"] = 0; if( $debug == 2 ) dbgmsg("$family".$errors["127"]."$jdn".$errors["128"]."
    "); } $usrlist[$jown]["jobs"]++; } } } } uasort($usrlist,"hncmp"); // HTML table initialisation $utable = new LmTableSp($module,$toppage->$module); $urowcont = array(); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $count = 0; foreach ( $usrlist as $ussn => $data ) { $name = $data["name"]; $org = $data["org"]; $nhosts = 0; $nhosts = $data["hosts"]; $jcount = 0; $jcount = $data["jobs"]; if ( $jcount < $jobnum ) continue; /* In case list only those with jobs */ $count++; $encuname = rawurlencode($ussn); $usrwin = popup("userlist.php?owner=$encuname",700,500,5); $urowcont[] = $count; $urowcont[] = "$name"; $urowcont[] = $org; $urowcont[] = $jcount; $urowcont[] = $nhosts; $utable->addrow($urowcont); $urowcont = array(); } $utable->close(); $toppage->close(); return 0; // Done ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/man0000644000000000000000000000013213214316027022421 xustar000000000000000030 mtime=1513200663.839794448 30 atime=1513200668.717854109 30 ctime=1513200663.839794448 nordugrid-arc-5.4.2/src/services/ws-monitor/man/0000755000175000002070000000000013214316027022544 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/man/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306024540 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200605.429080062 30 ctime=1513200663.836794411 nordugrid-arc-5.4.2/src/services/ws-monitor/man/Makefile.am0000755000175000002070000000003012047045306024576 0ustar00mockbuildmock00000000000000man_MANS = ws-monitor.7 nordugrid-arc-5.4.2/src/services/ws-monitor/man/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735024550 xustar000000000000000030 mtime=1513200605.462080466 30 atime=1513200651.676645688 30 ctime=1513200663.837794424 nordugrid-arc-5.4.2/src/services/ws-monitor/man/Makefile.in0000644000175000002070000004661213214315735024627 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ws-monitor/man DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/ws-monitor.7.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = ws-monitor.7 CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man7dir = $(mandir)/man7 am__installdirs = "$(DESTDIR)$(man7dir)" NROFF = nroff MANS = $(man_MANS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ man_MANS = ws-monitor.7 all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ws-monitor/man/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ws-monitor/man/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): ws-monitor.7: $(top_builddir)/config.status $(srcdir)/ws-monitor.7.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man7: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man7dir)" || $(MKDIR_P) "$(DESTDIR)$(man7dir)" @list=''; test -n "$(man7dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.7[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man7dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man7dir)" || exit $$?; }; \ done; } uninstall-man7: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man7dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.7[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man7dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man7dir)" && rm -f $$files; } tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(MANS) installdirs: for dir in "$(DESTDIR)$(man7dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man7 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-man uninstall-man: uninstall-man7 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man7 \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-man uninstall-man7 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ws-monitor/man/PaxHeaders.7502/ws-monitor.7.in0000644000000000000000000000012711560313142025312 xustar000000000000000027 mtime=1304532578.373825 30 atime=1513200651.694645908 30 ctime=1513200663.839794448 nordugrid-arc-5.4.2/src/services/ws-monitor/man/ws-monitor.7.in0000755000175000002070000000347311560313142025366 0ustar00mockbuildmock00000000000000.TH ws-monitor 7 "2003-03-03" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME ws-monitor \- Real-time NorduGrid monitoring tool .SH DESCRIPTION .B "WS Grid Monitor" is a set of .B PHP and .B Java scripts, providing a Web interface to the .B NorduGrid Information System. Should be working for any similar .B WS based service. .SH REQUIREMENTS .IP "GD library" http://www.boutell.com/gd .IP "PHP4 library" http://www.php.net, must be compiled with LDAP and GD extensions .IP "HTTP server" must be compiled with PHP4 .IP "Virtual Organisation" Is optional .SH INSTALLATION Copy all the files in a folder, accessible by the HTTP server. Modify .I settings.inc according to your MDS structure and liking. Run the whole stuff by loading .I loadmon.php into your favorite browser. .SH FILES .I loadmon.php .RS To monitor several servers at once, add hosts and DNs to the .IR $arrhost and, correspondingly, .IR $arrbdn arrays in .I loadmon.php .RE .I isattr.inc .I cnvname.inc .I cnvalias.inc .RS Making output more human-readable: modify .IR isattr.inc, .IR cnvname.inc, .IR cnvalias.inc. Otherwise, these files are not needed. .RE .I blacklist.inc .RS To prevent sites from being polled, modify array entries in .IR blacklist.inc. Otherwise, the file is not needed. .RE .I vo-users.php .RS Not needed when working without a Virtual Organisation. In such a case, remove the corresponding link from .I loadmon.php . .RE .I jobstat.php .RS When working without the .B NorduGrid Information System: to make sure that the job status is defined properly, edit .I jobstat.php (look for .B adjustment instructions in the code). .SH AUTHOR Oxana Smirnova .SH "SEE ALSO" .BR ngsub (1), .BR ngstat (1), .BR ngdel (1), .BR ngget (1), .BR ngsync (1), .BR ngcopy (1), .BR ngremove (1) nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735023775 xustar000000000000000030 mtime=1513200605.317078692 30 atime=1513200651.629645113 30 ctime=1513200663.811794106 nordugrid-arc-5.4.2/src/services/ws-monitor/Makefile.in0000644000175000002070000006447213214315735024060 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ws-monitor DIST_COMMON = $(dist_monitor_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/README.in \ $(srcdir)/ws-monitor.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = ws-monitor README CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(monitordir)" "$(DESTDIR)$(monitordir)" DATA = $(dist_monitor_DATA) $(monitor_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = man mon-icons lang includes monitordir = @ws_monitor_prefix@ dist_monitor_DATA = $(srcdir)/*.php $(srcdir)/*.js monitor_DATA = README all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ws-monitor/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ws-monitor/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): ws-monitor: $(top_builddir)/config.status $(srcdir)/ws-monitor.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ README: $(top_builddir)/config.status $(srcdir)/README.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_monitorDATA: $(dist_monitor_DATA) @$(NORMAL_INSTALL) test -z "$(monitordir)" || $(MKDIR_P) "$(DESTDIR)$(monitordir)" @list='$(dist_monitor_DATA)'; test -n "$(monitordir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitordir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitordir)" || exit $$?; \ done uninstall-dist_monitorDATA: @$(NORMAL_UNINSTALL) @list='$(dist_monitor_DATA)'; test -n "$(monitordir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitordir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitordir)" && rm -f $$files install-monitorDATA: $(monitor_DATA) @$(NORMAL_INSTALL) test -z "$(monitordir)" || $(MKDIR_P) "$(DESTDIR)$(monitordir)" @list='$(monitor_DATA)'; test -n "$(monitordir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitordir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitordir)" || exit $$?; \ done uninstall-monitorDATA: @$(NORMAL_UNINSTALL) @list='$(monitor_DATA)'; test -n "$(monitordir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitordir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitordir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(monitordir)" "$(DESTDIR)$(monitordir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-data-local install-dist_monitorDATA \ install-monitorDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_monitorDATA uninstall-monitorDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-data-local install-dist_monitorDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-monitorDATA install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-dist_monitorDATA \ uninstall-monitorDATA install-data-local: $(MKDIR_P) $(DESTDIR)$(monitordir)/cache # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/help.php0000644000000000000000000000012411506361170023366 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.699728 30 ctime=1513200663.801793983 nordugrid-arc-5.4.2/src/services/ws-monitor/help.php0000755000175000002070000000054611506361170023443 0ustar00mockbuildmock00000000000000$module; $helptext = $data["help"]; echo $helptext; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/volist.php0000644000000000000000000000012411506361170023756 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.694728 30 ctime=1513200663.808794069 nordugrid-arc-5.4.2/src/services/ws-monitor/volist.php0000755000175000002070000001362511506361170024035 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $toppage->tabletop("".$toptitle."

    "); // The main function $vos = array ( array ( "name" => "NorduGrid members", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=people,dc=nordugrid,dc=org" ), array ( "name" => "NorduGrid guests", "server" => "https://www.pdc.kth.se/grid/swegrid-vo", "port" => "", "dn" => "vo.ng-guest-vo" ), array ( "name" => "NorduGrid developers", "server" => "http://www.nordugrid.org", "port" => "", "dn" => "", "group" => "developers.dn" ), array ( "name" => "NorduGrid tutorials", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=tutorial,dc=nordugrid,dc=org" ), array ( "name" => "ATLAS test users (SWEGRID)", "server" => "https://www.pdc.kth.se", "port" => "", "dn" => "", "group" => "grid/swegrid-vo/vo.atlas-testusers-vo" ), /* array ( "name" => "NorduGrid services", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=services,dc=nordugrid,dc=org" ), */ array ( "name" => "BaBar", "server" => "babar-vo.gridpp.ac.uk", "port" => "389", "dn" => "ou=babar,dc=gridpp,dc=ac,dc=uk" ), array ( "name" => "EDG ALICE", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=alice,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG ATLAS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=atlas,dc=eu-datagrid,dc=org" ), array ( "name" => "LCG ATLAS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=atlas,dc=eu-datagrid,dc=org", "group" => "ou=lcg1" ), array ( "name" => "EDG CMS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=cms,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG LHC-B", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=lhcb,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG D0", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=dzero,dc=eu-datagrid,dc=org", "group" => "ou=testbed1" ), array ( "name" => "EDG Earth Observation", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=earthob,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG Genomics", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=biomedical,dc=eu-datagrid,dc=org", "group" => "ou=genomics" ), array ( "name" => "EDG Medical Imaging", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=biomedical,dc=eu-datagrid,dc=org", "group" => "ou=medical imaging" ), array ( "name" => "EDG ITeam", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=ITeam" ), array ( "name" => "EDG TSTG", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=TSTG" ), array ( "name" => "EDG Tutorials", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=EDGtutorial" ), array ( "name" => "EDG WP6", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=wp6" ) ); $votable = new LmTableSp($module,$toppage->$module); $rowcont = array (); foreach ( $vos as $contact ) { $server = $contact["server"]; $port = $contact["port"]; $dn = $contact["dn"]; $group = $contact["group"]; $nusers = ""; if ( $dn ) { // open ldap connection $ds = ldap_connect($server,$port); if ($ds) { if ( $group ) { $newfilter = "(objectclass=*)"; $newdn = $group.",".$dn; $newlim = array("dn","member"); $sr = @ldap_search($ds,$newdn,$newfilter,$newlim,0,0,10,LDAP_DEREF_NEVER); $groupdesc = @ldap_get_entries($ds,$sr); $nusers = $groupdesc[0]["member"]["count"]; } else { $sr = @ldap_search($ds,$dn,"(objectclass=organizationalPerson)",array("dn"),0,0,10,LDAP_DEREF_NEVER); if ($sr) $nusers = @ldap_count_entries($ds,$sr); } } $vostring = popup("vo-users.php?host=$server&port=$port&vo=$dn&group=$group",750,300,6); } else { $url = $server."/".$group; $users = file($url); $nusers = count($users); $vostring = popup($url,750,300,6); } $rowcont[] = "".$contact["name"].""; $rowcont[] = $nusers; $rowcont[] = $server; $votable->addrow($rowcont); $rowcont = array (); } $votable->close; $toppage->close; /* group http://www.nbi.dk/~waananen/ngssc2003.txt ### Datagrid VO Groups and their user mappings */ ?>nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/clusdes.php0000644000000000000000000000012411524504547024107 xustar000000000000000027 mtime=1297254759.916296 27 atime=1513200576.664728 30 ctime=1513200663.799793959 nordugrid-arc-5.4.2/src/services/ws-monitor/clusdes.php0000755000175000002070000001152311524504547024161 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $cert = &$toppage->cert; // Header table $toppage->tabletop("","".$toptitle." $host"); // Array defining the attributes to be returned $qlim = array( QUE_NAME, QUE_QUED, QUE_GQUE, QUE_PQUE, QUE_LQUE, QUE_RUNG, QUE_GRUN, QUE_ASCP, QUE_MAXT, QUE_MINT, QUE_STAT ); $dn = DN_LOCAL; if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // establish connection to the requested LDAP server $chost = $host; if ( $isse ) $chost=substr(strstr($host,":"),1); $type = 'DB'; $isis = new $type($cert,array()); if ($isis) { // If contact OK, search for clusters $ts1 = time(); // for($i=0; $i<30; $i++) $info = $isis->cluster_info($host, $debug); $isis->xml_nice_dump($info->Domains->AdminDomain->Services->ComputingService, "cluster", $debug); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["109"]." (".($ts2-$ts1).$errors["104"].")
    "); echo "
    "; if (true) { // If search returned, check that there are valid entries if ($host) { // If there are valid entries, tabulate results $qentries = $info; $queues = $qentries->Domains->AdminDomain->Services->ComputingService->ComputingShares->ComputingShare; $queues_new = $qentries->Domains->AdminDomain->Services->ComputingService->ComputingShare; $queues = @($queues) ? $queues : $queues_new ; $nqueues = count($queues); // HTML table initialisation $qtable = new LmTableSp($module,$toppage->$module); // loop on the rest of attributes define("CMPKEY",QUE_MAXT); usort($qentries,"quetcmp"); for ($k=0; $k<$nqueues; $k++) { $qname = $queues[$k]->{QUE_NAME}; $qstatus = $queues[$k]->{QUE_STAT}; $qid = $queues[$k]->ID; $queued = @($queues[$k]->{QUE_QUED}) ? ($queues[$k]->{QUE_QUED}) : 0; /* deprecated since 0.5.38 */ $locque = @($queues[$k]->{QUE_LQUE}) ? ($queues[$k]->{QUE_LQUE}) : 0; /* new since 0.5.38 */ $run = @($queues[$k]->{QUE_RUNG}) ? ($queues[$k]->{QUE_RUNG}) : 0; $cpumin = @($queues[$k]->{QUE_MINT}) ? $queues[$k]->{QUE_MINT} : "0"; $cpumax = @($queues[$k]->{QUE_MAXT}) ? $queues[$k]->{QUE_MAXT} : $queues[$k]->MaxCPUTime; //">"; $cpu = @($queues[$k]->{QUE_ASCP}) ? $queues[$k]->{QUE_ASCP} : "N/A"; $gridque = @($queues[$k]->{QUE_GQUE} && $queues[$k]->{QUE_LQUE}) ? ($queues[$k]->{QUE_GQUE} - $queues[$k]->{QUE_LQUE}) : 0; $gmque = @($queues[$k]->{QUE_PQUE}) ? ($queues[$k]->{QUE_PQUE}) : 0; /* new since 0.5.38 */ $gridrun = @($queues[$k]->{QUE_RUNG} && $queues[$k]->LocalRunningJobs) ? ($queues[$k]->{QUE_RUNG} - $queues[$k]->LocalRunningJobs) : 0; $quewin = popup("quelist.php?host=$host&port=$port&qname=$qname&qid=$qid",750,430,6); $gridque = $gridque + $gmque; if ( $queued == 0 ) $queued = $locque + $gridque; // filling the table $qrowcont[] = "$qname"; $qrowcont[] = "$qstatus"; $qrowcont[] = "$cpumin – $cpumax"; $qrowcont[] = "$cpu"; $qrowcont[] = "$run (".$errors["402"].": $gridrun)"; $qrowcont[] = "$queued (".$errors["402"].": $gridque)"; $qtable->addrow($qrowcont); $qrowcont = array (); } $qtable->close(); } else { $errno = 8; echo "
    ".$errors["8"]."\n"; $toppage->close(); return $errno; } } elseif ( !$isse ) { $errno = 5; echo "
    ".$errors["5"]."\n"; $toppage->close(); return $errno; } $toppage->close(); return 0; } else { $errno = 6; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } // Done ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/mon-icons0000644000000000000000000000013213214316027023550 xustar000000000000000030 mtime=1513200663.912795341 30 atime=1513200668.717854109 30 ctime=1513200663.912795341 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/0000755000175000002070000000000013214316027023673 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Russia.png0000644000000000000000000000012411506361170025603 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.667728 30 ctime=1513200663.888795047 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Russia.png0000755000175000002070000000040011506361170025645 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<*PLTE$'3S6^\Dl3S6^\Dl$''v\IDATxbEĀ.@ʉXY 0133;`f $@ \Ȁ xx@102 F_CFY9IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711634415166025675 xustar000000000000000027 mtime=1316100726.768006 30 atime=1513200605.478080661 30 ctime=1513200663.860794705 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Makefile.am0000755000175000002070000000023211634415166025737 0ustar00mockbuildmock00000000000000monitoriconsdir = @ws_monitor_prefix@/mon-icons monitoricons_DATA = $(srcdir)/*.png $(srcdir)/*.php $(srcdir)/*.gif EXTRA_DIST = $(monitoricons_DATA) nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Finland.png0000644000000000000000000000012411506361170025710 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.669728 30 ctime=1513200663.872794852 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Finland.png0000755000175000002070000000050211506361170025755 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME?IDATxڕ;r0 DIg"((R Ev}/+>{je q:o™,ȱ#堊1u28hxqO@:1*J1;EO;KZFg_i)=^ nc%es9ņ+d U6U/x C< ~48Qp p#F{2IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Slovakia.png0000644000000000000000000000012411506361170026106 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.668728 30 ctime=1513200663.889795059 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Slovakia.png0000755000175000002070000000050211506361170026153 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<`PLTElGd柮B/OSn꯻xOj[CAp`;_;Z'???F?]-Q,Cz_蝬_x_x fhIDATxbDĀ.@`NV&V@17+/3rr @@E`Fkt–\ vLx=J,M4K)~IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735025677 xustar000000000000000030 mtime=1513200605.511081065 30 atime=1513200651.727646311 30 ctime=1513200663.861794717 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Makefile.in0000644000175000002070000004372713214315735025762 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ws-monitor/mon-icons DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(monitoriconsdir)" DATA = $(monitoricons_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ monitoriconsdir = @ws_monitor_prefix@/mon-icons monitoricons_DATA = $(srcdir)/*.png $(srcdir)/*.php $(srcdir)/*.gif EXTRA_DIST = $(monitoricons_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ws-monitor/mon-icons/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ws-monitor/mon-icons/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitoriconsDATA: $(monitoricons_DATA) @$(NORMAL_INSTALL) test -z "$(monitoriconsdir)" || $(MKDIR_P) "$(DESTDIR)$(monitoriconsdir)" @list='$(monitoricons_DATA)'; test -n "$(monitoriconsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitoriconsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitoriconsdir)" || exit $$?; \ done uninstall-monitoriconsDATA: @$(NORMAL_UNINSTALL) @list='$(monitoricons_DATA)'; test -n "$(monitoriconsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitoriconsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitoriconsdir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitoriconsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitoriconsDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitoriconsDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-monitoriconsDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am uninstall uninstall-am uninstall-monitoriconsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Spain.png0000644000000000000000000000012411506361170025407 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.668728 30 ctime=1513200663.891795084 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Spain.png0000755000175000002070000000030611506361170025456 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE'VXj 2s4IDATxڄ1 R)#) -IaE[TBLIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Slovenia.png0000644000000000000000000000012411506361170026115 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.671728 30 ctime=1513200663.890795072 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Slovenia.png0000755000175000002070000000047111506361170026167 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<`PLTEl_xKNl8tڭtX;Z'F@S -Q_GdSn|-e.[CCTn_IDATxbF ,'B X xxey <,>@1K#bE!@z%qIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Portugal.png0000644000000000000000000000012411506361170026132 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.673728 30 ctime=1513200663.886795023 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Portugal.png0000755000175000002070000000031411506361170026200 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE+L'EÜI9o=5NE|:IDATxdA ¦bN{l ` ml A]xY*Yfϱ)zvu7%8IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Denmark.png0000644000000000000000000000012411506361170025716 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.677728 30 ctime=1513200663.870794827 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Denmark.png0000755000175000002070000000047111506361170025770 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME mIDATxun0لET"P'?`VsѮ,H6xb+t8^=5c/( {P9̜T}b6nki~%~7P}oUH:kVQW N"io ׀4"jt2*ldha SmjkǕLfIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Latvia.png0000644000000000000000000000012411506361170025555 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.671728 30 ctime=1513200663.881794962 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Latvia.png0000755000175000002070000000025511506361170025627 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe< PLTE333f3'9'IDATxb`dB Lh01`h! 0mA7Zt-\NIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/USA.png0000644000000000000000000000012411506361170024765 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.678728 30 ctime=1513200663.896795145 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/USA.png0000755000175000002070000000027511506361170025041 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE3ff3futRNSS#IDATxb`F8``fA C`DD`AB.$sIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon-close.png0000644000000000000000000000012411506361170026370 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.680728 30 ctime=1513200663.899795182 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon-close.png0000755000175000002070000000051411506361170026440 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE```®tRNSSIDATxb`@  y( #3LLD&de$ a3AĀ2!1XXII XdYM<b`A; pJNӹӃ3Hg ` @8 pJN ^/x%PIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Georgia.png0000644000000000000000000000012411506361170025712 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.675728 30 ctime=1513200663.874794876 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Georgia.png0000755000175000002070000000030411506361170025757 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE3f3c;IDATxڌ10[jl:&%^($a3@(-5Hwl%}|Ŗ\uIIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Poland.png0000644000000000000000000000012311506361170025551 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.675728 29 ctime=1513200663.88579501 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Poland.png0000755000175000002070000000022311506361170025617 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE'?IDATxb``D0 Q>YIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Netherlands.png0000644000000000000000000000012411506361170026604 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.674728 30 ctime=1513200663.884794998 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Netherlands.png0000755000175000002070000000025011506361170026651 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE33fW3IDATxb`F aXasjޓIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Germany.png0000644000000000000000000000012411506361170025737 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.681728 30 ctime=1513200663.875794888 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Germany.png0000755000175000002070000000036211506361170026010 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<'PLTE$)C;fi)$ifD;fu@XQIDATxbAĀ.@02` &N@ ,( 8P@10sf}IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Romania.png0000644000000000000000000000012411506361170025723 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.671728 30 ctime=1513200663.887795035 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Romania.png0000755000175000002070000000024111506361170025770 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE333f pIDATxb`& `d*`}eB$IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Morocco.png0000644000000000000000000000012411506361170025736 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.673728 30 ctime=1513200663.883794986 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Morocco.png0000755000175000002070000000031411506361170026004 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTEcM3''",*6 /#E/:IDATx\A0D`ƵhRh)gցB'hsۀ"~ 0ZIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon_led.php0000644000000000000000000000012411506361170026114 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.671728 30 ctime=1513200663.909795304 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon_led.php0000755000175000002070000000073011506361170026164 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Switzerland.png0000644000000000000000000000012411506361170026643 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.668728 30 ctime=1513200663.893795108 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Switzerland.png0000755000175000002070000000026011506361170026711 0ustar00mockbuildmock00000000000000PNG  IHDR ޜbKGD pHYs  ~tIMEW=IDATxcd@3al& ~` ^.لf% o{=IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Algeria.png0000644000000000000000000000012411506361170025701 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.673728 30 ctime=1513200663.862794729 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Algeria.png0000755000175000002070000000030311506361170025745 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE(J3AߠrZ?9` J1IDATxb`eVF(``!̎"†"ĆOC H֢; Dϵ0IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon_back.php0000644000000000000000000000012311506361170026247 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.669728 29 ctime=1513200663.90779528 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon_back.php0000755000175000002070000000062111506361170026317 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Ireland.png0000644000000000000000000000012411506361170025713 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.676728 30 ctime=1513200663.878794925 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Ireland.png0000755000175000002070000000026611506361170025767 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTEff3f33'IDATxb`f `dV6 ]֢9 SIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/spacer.gif0000644000000000000000000000012411506361170025573 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.681728 30 ctime=1513200663.912795341 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/spacer.gif0000755000175000002070000000005311506361170025641 0ustar00mockbuildmock00000000000000GIF89a!,D;nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Sweden.png0000644000000000000000000000012411506361170025562 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.671728 30 ctime=1513200663.892795096 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Sweden.png0000755000175000002070000000047111506361170025634 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIMEIDATxڕ[JAE};-?<Ѯt2.v0< flyVP{; h#Z7 \ 8ǵ>Ej d^cS>ڪpm*\h?K>,LsBukz(Ns(KˍORWh gȂ[IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon-folks.png0000644000000000000000000000012411506361170026401 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.674728 30 ctime=1513200663.900795194 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon-folks.png0000755000175000002070000000076311506361170026457 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<ZPLTEd=٥=JDѫ;5idՄ ~%\xs1 bJ% ߴ-̘0~tRNS^IDATxb@ D 84+@A$8Y@A$!LLP HE$d "@:d,@ ID\,@PW\Ō,@ III.ii.!X 0fX! @ PQ1666H N: @&#dASPŁ. XbHLB pΈ S pJ<oSIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/World.png0000644000000000000000000000012311506361170025423 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.671728 29 ctime=1513200663.89879517 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/World.png0000755000175000002070000000021311506361170025470 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIMEI,6IDATxc@"0844diy?IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Bulgaria.png0000644000000000000000000000012311506361170026062 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.678728 29 ctime=1513200663.86779479 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Bulgaria.png0000755000175000002070000000025511506361170026135 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE3ff333f33x*|!IDATxb` 0V4X# ,`Mm6/YIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Czechia.png0000644000000000000000000000012411506361170025703 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.674728 30 ctime=1513200663.869794815 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Czechia.png0000755000175000002070000000025711506361170025757 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe< PLTE33ff,IDATxd1 Aٸq*Hlk@$ %a_IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon-run.png0000644000000000000000000000012411506361170026067 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.680728 30 ctime=1513200663.905795255 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon-run.png0000755000175000002070000000113611506361170026140 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<`PLTEU;5id(-1 mܐ"ҖIѥ,%٥JD*xsżZT% =aߴ%0 < tRNS\\XIDATxb@ `fcec TM$@ .^)i99N6V1f@%ٸYXD@bbfgdc#(#/@P%8qI2qrUBL@qFY99Y)yI0I3 r1r1Lh1<( / @ 0I"|@P qF0B L 1"$!`abcG bl(@ < ,t#fD<; Q @8% _8S%ZIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon-refresh.png0000644000000000000000000000012411506361170026721 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.678728 30 ctime=1513200663.904795243 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon-refresh.png0000755000175000002070000000070611506361170026774 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<-PLTE𠠠ppp```PPP@@@000 4qstRNSܘIDATxb@!I01 xanf@A%Xx"@ Vb L=7#4 ,@ 8'LNV>& @|fz$15H\@Qb$@ |l`0%|>bkN  .pt# P ,nj `b$` @8 pF@Z™@A Qk%IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Armenia.png0000644000000000000000000000012411506361170025711 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.675728 30 ctime=1513200663.863794741 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Armenia.png0000755000175000002070000000025711506361170025765 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTEf̙f3fff33f'#IDATxb`B X+`F  `JIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Italy.png0000644000000000000000000000012411506361170025417 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.677728 30 ctime=1513200663.879794937 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Italy.png0000755000175000002070000000024511506361170025470 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE3333IDATxb`Vf `d*`w[_|pдIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Hungary.png0000644000000000000000000000012211506361170025750 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.669728 28 ctime=1513200663.8767949 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Hungary.png0000755000175000002070000000025011506361170026017 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE333`DIDATxb`F aXasjޓIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Greece.png0000644000000000000000000000012211506361170025525 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.679728 28 ctime=1513200663.8767949 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Greece.png0000755000175000002070000000026311506361170025600 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe< PLTETtRNS A!IDATxb`dd`D 0vbU0wrt-IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon-help.png0000644000000000000000000000012411506361170026213 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.669728 30 ctime=1513200663.901795206 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon-help.png0000755000175000002070000000066211506361170026267 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<3PLTE@@@ PPP```000ppptRNS%bIDATxb@ +;###7L 4 ? pCBr@X / bAb1  3N@heb+q &@!K00Y 3H*@H c8Xa<BH0I B`c@4 pJN S pJj!kq.IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Japan.png0000644000000000000000000000012411506361170025366 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.673728 30 ctime=1513200663.880794949 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Japan.png0000755000175000002070000000067311506361170025444 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME' nHIDATx-ΡSAmXAaXDA1<Y,]bPl;3 >RiQY3N붑JiyKcc3Rͥ~*ۭF=i]%zzvtwHĩ9H/y~AUTQ5^tF ^AJSʢWo8 EҬT?{xjH~:E(<~}ԫƬ'gOb%g$\˧C+L\t;HK#"IB!w|5cYV iBE2 )IY S>2Ӥ02 MIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon_spacer.php0000644000000000000000000000012411506361170026625 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.678728 30 ctime=1513200663.910795316 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon_spacer.php0000755000175000002070000000034411506361170026676 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Austria.png0000644000000000000000000000012411506361170025745 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.668728 30 ctime=1513200663.865794766 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Austria.png0000755000175000002070000000023611506361170026016 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe< PLTE3XIDATxb`B atk K CAhIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Lithuania.png0000644000000000000000000000012411506361170026253 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.676728 30 ctime=1513200663.882794974 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Lithuania.png0000755000175000002070000000037611506361170026331 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME IDATxڍ1@ gi 5/y5HKar9!2՜[˫$KYa3iј& X!J` a\kz6{ln>C]g}#׵ޓLKE6F]/RbaIPdIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Norway.png0000644000000000000000000000012411506361170025614 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.676728 30 ctime=1513200663.884794998 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Norway.png0000755000175000002070000000054411506361170025667 0ustar00mockbuildmock00000000000000PNG  IHDR 2IbKGD pHYs  ~tIME3~NIDATxڕ?JAglt B<4`e <l  &yi|:A#=@BQ:vU6R67 tRNSSOxIDATxb@ ;@@!$PiB`d``EHN B`fccDH ++B pZ@8%$S YfKB$ IT ] )@ (A )@8 pJN  #lIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon_bar.php0000644000000000000000000000012411506361170026114 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.669728 30 ctime=1513200663.908795292 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon_bar.php0000755000175000002070000000241411506361170026165 0ustar00mockbuildmock00000000000000 9) {$x3 = $x1-16;} if (strlen($text) > 5) $x3 = 84; if (strlen($text) > 10) $x3 = 36; $im = @imagecreate(200,$y); $bgcolor = imagecolorallocate($im,204,204,204); $red = imagecolorallocate($im,97,144,0); $grey = imagecolorallocate($im,176,176,176); // $white = imagecolorallocate($im,255,255,255); $white = imagecolorallocate($im,48,48,48); if ( $x1 < $x3 ) $white = imagecolorallocate($im,82,82,82); if ( $x1 ) imagefilledrectangle($im,0,0,$x1,$y,$grey); if ( $xg1 ) imagefilledrectangle($im,0,0,$xg1,$y,$red); imagestring ($im, 3, $x3, 0, $text, $white); imagepng ($im); ImageDestroy($im); } $x = $_GET["x"]; $xg = $_GET["xg"]; $y = $_GET["y"]; $text = $_GET["text"]; do_bar($x,$xg,$y,$text); ?>nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon_start.php0000644000000000000000000000012411506361170026505 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.675728 30 ctime=1513200663.911795328 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon_start.php0000755000175000002070000000071611506361170026561 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/UK.png0000644000000000000000000000012411506361170024654 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.669728 30 ctime=1513200663.895795133 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/UK.png0000755000175000002070000000036411506361170024727 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTEff3f333f3f;\IDATx< 0Bk޲&"?|fZ v tk&Sn ȞV ޯXkxnF6|~ǑT1`? IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Australia.png0000644000000000000000000000012411506361170026262 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.678728 30 ctime=1513200663.864794754 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Australia.png0000755000175000002070000000054211506361170026333 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<`PLTElGdSn_xBxo⏠꯻/O_n;Z,Cz脗?]_x4柮֊IDATxbgcbb`吔gE@1  ع9%9d9 h@1@bgb H@D$ a8 @ h %IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/China.png0000644000000000000000000000012411506361170025357 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.671728 30 ctime=1513200663.868794803 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/China.png0000755000175000002070000000043711506361170025433 0ustar00mockbuildmock00000000000000PNG  IHDR ޜbKGD pHYs  tIME .IDAT(-@w7-=YA QׄSG! AWB@ p 4aؤdFDZ<ISJrIoX}ۂ.4Aj7[),v*P9t5p]hByy,# _,FwbZ_MC/x14aIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon-disk.png0000644000000000000000000000012411506361170026215 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.676728 30 ctime=1513200663.899795182 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon-disk.png0000755000175000002070000000070111506361170026263 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<NPLTEm1 Մ -=%xaҖI(UJD٥id,%ZṪ0·tRNS"IDATxb@ S pJTBSTX E9E "ade>fVF@A$Ĺ%[,@P 1&v6 6v&1@A%YĸX @ S pJT! /#¹`!(H3@ S pJh3Mf}IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Turkey.png0000644000000000000000000000012411506361170025620 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.678728 30 ctime=1513200663.894795121 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Turkey.png0000755000175000002070000000035211506361170025670 0ustar00mockbuildmock00000000000000PNG  IHDR 2I pHYs  IDATxcǠ@ `"I5 2PS4׋75D6xiMIz~:yBVB640I D ?)Tgq,_~!> _.vdR0¯'/4ad d fd, `EXYE& ``e`faB9_p@ q~d b@ '@1 ĀC\ lE"). @ 8E<@L S +Ei IENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Iceland.png0000644000000000000000000000012411506361170025674 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.681728 30 ctime=1513200663.877794913 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Iceland.png0000755000175000002070000000021111506361170025736 0ustar00mockbuildmock00000000000000PNG  IHDR ޜPIDAT(cZ YðQ % ^Y"0? AR{=,Ah a [sIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/Estonia.png0000644000000000000000000000012411506361170025737 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.673728 30 ctime=1513200663.871794839 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/Estonia.png0000755000175000002070000000034411506361170026010 0ustar00mockbuildmock00000000000000PNG  IHDR `.gAMA7tEXtSoftwareAdobe ImageReadyqe<PLTE=];[;Z;[;Z=]@@@*ɋLIDATxb`bbALLȌ 00; 4@ @a3WhIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/PaxHeaders.7502/icon-vo.png0000644000000000000000000000012411506361170025707 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.669728 30 ctime=1513200663.906795267 nordugrid-arc-5.4.2/src/services/ws-monitor/mon-icons/icon-vo.png0000755000175000002070000000105511506361170025760 0ustar00mockbuildmock00000000000000PNG  IHDRשgAMA7tEXtSoftwareAdobe ImageReadyqe<]PLTE1 -% JD,%̇ܐ"mҖax*٥ZT;5Մ ߴIUid%=04tRNSv+IDATxb@!$8   @p AfYYYF&i `l\@ Y@$edY `̌ q>q6@`IHv99KHrpII0Iqs¹l Alb tJEܹ`!@$xYXXa @%8A3A bs B|`'"@4ZS !d@1φp @@;x` BĀp6@L S 3B7;\YIENDB`nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/discover.php0000644000000000000000000000012411506361170024254 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.682728 30 ctime=1513200663.800793971 nordugrid-arc-5.4.2/src/services/ws-monitor/discover.php0000755000175000002070000001057411506361170024333 0ustar00mockbuildmock00000000000000title); $strings = &$toppage->strings; $giislist = &$toppage->giislist; $isattr = &$toppage->isattr; $errors = &$toppage->errors; require_once('attlist.inc'); $itself = $_SERVER["PHP_SELF"]; $ifsub = $_POST["submit"] ? TRUE : FALSE ; $ifsel = $_POST["select"] ? TRUE : FALSE ; echo "
    \n"; if ( $ifsub ) { // Call attributes list function for all selected arguments $request = $_POST; $attributes = array (); $signs = array (); $filters = array (); $attributes = $request["attributes"]; $signs = $request["signs"]; $filters = $request["filters"]; $thething = $request["scope"]; if ( $thething == "job" || $thething == "queue" || $thething == "authuser" ) $thething = "cluster"; // $attwin = popup("attlist.php?attribute=$encatt",650,300,7); do_attlist($thething,$attributes,$signs,$filters,$strings,$giislist); echo "
     "; echo " \n
    "; } elseif ( $ifsel ) { // If selection of search object and nr. of attributres is made, display options: $scope = $_POST; $object = $scope["object"]; $nlines = $scope["nlines"]; if ( !$nlines ) $nlines = 6; echo "

    ".$errors["416"].$object."

    \n"; echo "
    ".$errors["417"]."
    \n"; echo "
    ".$errors["418"]."


    \n"; $attwin = popup($itself,650,300,7); echo "
    "; echo "
    font_title."> $value 
    \n"; $rcol = "#ccffff"; for ( $i = 0; $i < $nlines; $i++ ) { echo "\n"; echo "\n"; echo "\n"; } echo "\n"; echo "
    "; echo "

      
    \n"; echo " \n"; } else { echo "

    ".$errors["419"]."

    \n"; echo "
    "; echo "

    ".$errors["423"]." \n"; echo "  ".$errors["424"]." \n"; echo "  \n"; echo "

    \n"; } echo "\n"; $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/vo-users.php0000644000000000000000000000012411506361170024221 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.695728 30 ctime=1513200663.807794057 nordugrid-arc-5.4.2/src/services/ws-monitor/vo-users.php0000755000175000002070000000752311506361170024300 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $toppage->tabletop("","".$toptitle.""); // ldap search filter string for jobs $ufilter = "(objectclass=".OBJ_PERS.")"; $ulim = array ( "dn", VO_USSN, VO_USCN, VO_DESC, VO_INST, VO_MAIL ); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 10; $tout = 15; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Establish connection to the requested VO server if( $debug ) dbgmsg($errors["117"].$host.":".$port); $ds = ldap_connect($host,$port); if ($ds) { // If contact OK, search for people $ts1 = time(); $sr = @ldap_search($ds,$vo,$ufilter,$ulim,0,0,$tlim,LDAP_DEREF_NEVER,$tout); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["125"]." (".($ts2-$ts1).$errors["104"].")
    "); if ($sr) { // If search returned, check that there are valid entries $nmatch = @ldap_count_entries($ds,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $entries = @ldap_get_entries($ds,$sr); $nusers = $entries["count"]; define("CMPKEY",VO_USSN); usort($entries,"ldap_entry_comp"); // HTML table initialization $utable = new LmTable($module,$toppage->$module); // loop on users $uscnt = 0; for ($i=1; $i<$nusers+1; $i++) { $dn = $entries[$i]["dn"]; if ( $group ) { $newfilter = "(member=$dn)"; $newdn = $group.",".$vo; $newlim = array("dn"); $gcheck = @ldap_search($ds,$newdn,$newfilter,$newlim,0,0,$tlim,LDAP_DEREF_NEVER,$tout); if ( !ldap_count_entries($ds,$gcheck) ) continue; } $usname = $entries[$i][VO_USCN][0]; // $usname = utf2cyr($usname,"n"); // $ussn = strstr($entries[$i][VO_DESC][0],"/"); $ussn = substr(strstr($entries[$i][VO_DESC][0],"subject="),8); $ussn = trim($ussn); $encuname = rawurlencode($ussn); $org = $entries[$i][VO_INST][0]; // $org = utf8_decode($org); $mail = $entries[$i][VO_MAIL][0]; $mailstr = "mailto:".$mail; $usrwin = popup("userlist.php?owner=$encuname",700,500,5); // filling the table $uscnt++; $urowcont[] = $uscnt; $urowcont[] = "$usname"; $urowcont[] = "$org"; $urowcont[] = "$mail"; $utable->addrow($urowcont); $urowcont = array (); } $utable->close(); } else { $errno = 10; echo "
    ".$errors[$errno]."\n"; return $errno; } } else { $errno = 5; echo "
    ".$errors[$errno]."\n"; return $errno; } ldap_free_result($sr); ldap_close($ds); return 0; } else { $errno = 6; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/loadmon.php0000644000000000000000000000012411524504547024076 xustar000000000000000027 mtime=1297254759.916296 27 atime=1513200576.699728 30 ctime=1513200663.802793995 nordugrid-arc-5.4.2/src/services/ws-monitor/loadmon.php0000755000175000002070000004313311524504547024152 0ustar00mockbuildmock00000000000000module; $strings = &$toppage->strings; $errors = &$toppage->errors; $isislist = &$toppage->isislist; $cert = &$toppage->cert; $yazyk = &$toppage->language; // Header table $toptit = date("Y-m-d T H:i:s"); $toppage->tabletop("".EXTRA_TITLE." ".$toppage->title."

    ","$toptit"); //********************** Legend - only needed for this module ********************* echo "
    \n"; echo "".$errors["401"].":\n"; echo "\"".$errors["305"]."\"".$errors["402"]." \"".$errors["306"]."\"".$errors["403"]."\n"; echo ""; $sewin = popup("sestat.php",650,200,8); $discwin = popup("discover.php",700,400,9); $vostring = popup("volist.php",440,330,11); $usstring = popup("allusers.php",650,700,12); $acstring = popup("allusers.php?limit=1",500,600,12); echo "
    \n"; //******** Authorised users echo "\"".$errors["307"]."\" \n"; //******** Active users echo "\"".$errors["308"]."\" \n"; //******** Search if ($for_brussel_demo) echo "\"".$errors["309"]."\" \n"; //******** Storage echo "\"".$errors["310"]."\" \n"; //******** Virtual Organisations if ($for_brussel_demo) echo "\"".$errors["311"]."\"\n"; echo "
    \n"; echo "
    \n"; //****************************** End of legend **************************************** // Some debug output if ( $debug ) { ob_end_flush(); ob_implicit_flush(); dbgmsg("
    ARC ".$toppage->getVersion()."
    "); } $tcont = array(); // array with rows, to be sorted $cachefile = CACHE_LOCATION."/loadmon-".$yazyk; $tcont = get_from_cache($cachefile,120); // If cache exists, skip ldapsearch if ( !$tcont || $debug || $display != "all" ) { // Do LDAP search $tcont = array(); // Setting time limits for ldapsearch $tlim = 20; $tout = 21; if($debug) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::
    "); // Array defining the attributes to be returned $lim = array( "dn", CLU_ANAM, CLU_ZIPC, CLU_TCPU, CLU_UCPU, CLU_TJOB, CLU_QJOB, CLU_PQUE, QUE_STAT, QUE_GQUE, QUE_QUED, QUE_LQUE, QUE_PQUE, QUE_RUNG, QUE_GRUN ); // Adjusting cluster display filter $showvo = ""; if ( substr($display,0,2) == "vo" ) { $showvo = substr(strrchr($display,"="),1); if ($debug) dbgmsg(" ::: ".$errors["105"]."$showvo"); } // Top ISIS server: get all from the pre-defined list $nisis = count($isislist); //========================= GET CLUSTER LIST ============================ $type = 'DB'; $isis = new $type($cert,$isislist); $ts1 = time(); $isis->connect($debug); $gentries = $isis->get_infos(); //======================================================================= $ts2 = time(); if($debug) dbgmsg("
    ".$errors["106"].$nisis." (".($ts2-$ts1).$errors["104"].")
    "); $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } $dsarray = array (); $hnarray = array (); $avhnarray = array (); $pnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ // Purging cluster entries $ncc=0; foreach ( $gentries as $vo) { $sitec = count($vo)/2; $ncc += $sitec; for ( $i = 0; $i < $sitec; $i++ ) { if (!in_array((string)$vo[(string)$vo[$i]]["EPR"],$hnarray)){ array_push($hnarray,(string)$vo[(string)$vo[$i]]["EPR"]); } } } for ( $k = 0; $k < count($hnarray); $k++ ) { $clport = $gentries[$k]["port"]; $clhost = $hnarray[$k]; $clconn = $isis->cluster_info($clhost,$debug); if ( $clconn && !@$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($avhnarray,$clhost); array_push($pnarray,$clport); $sitetag[$clhost] = 1; /* filtering tag */ if ($debug==2) dbgmsg("$k - $clhost:$clport "); } } $nhosts = count($dsarray); //count of avaliables Arex's if( $debug == 2 ) dbgmsg("
    ".$nhosts.$errors["108"]."
    "); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } // Search all clusters and queues $ts1 = time(); $srarray = $isis->cluster_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["109"]." (".($ts2-$ts1).$errors["104"].")
    "); /* * $ts1 = time(); * $qsarray = @ldap_search($dsarray,DN_LOCAL,$qfilstr,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); * // Fall back to a conventional LDAP * // if ( !count($qsrarray)) $qsarray = @ldap_search($dsarray,DN_LOCAL,$qfilstr,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); * $ts2 = time(); if($debug) dbgmsg("
    ".$errors["110"]." (".($ts2-$ts1).$errors["104"].")
    "); */ // Loop on clusters for ( $ids = 0; $ids < $nhosts; $ids++ ) { $entries = array(); $jentries = array(); $gentries = array(); $rowcont = array(); $sr = $srarray[$ids]; $hn = $avhnarray[$ids]; $pn = $pnarray[$ids]; $ds = $dsarray[$ids]; $nr = $isis->count_entries($sr); if ( !$sr || !$ds || !$nr ) { if ( $error == "Success" ) $error = $errors["3"]; if ( $debug ) dbgmsg("".$errors["111"]."$hn ($error)
    "); $sr = FALSE; } if ($ds && $sr) { $entries = $isis->get_entries($ds,$sr); $cluster = $entries->Domains->AdminDomain->Services->ComputingService; $queues = $entries->Domains->AdminDomain->Services->ComputingService->ComputingShares->ComputingShare; $queues_new = $entries->Domains->AdminDomain->Services->ComputingService->ComputingShare; $queues = @($queues) ? $queues : $queues_new ; $nclusters = count($queues); /* Actually, queue blocks, 2+ */ if ( !$nclusters ) { if ( $debug ) dbgmsg("$hn:".$errors["3"]."
    "); continue; } $nclu = 0; $nqueues = 0; $allqrun = 0; $gridjobs = 0; $allqueued = 0; $gridqueued = 0; $lrmsqueued = 0; $prequeued = 0; $totgridq = 0; $toflag2 = FALSE; $stopflag = FALSE; $curdn = $entries["Owner"]; // check if it is a site or a job; count $preflength = strpos($curdn,"-"); //$object = substr($curdn,$preflength+1,strpos($curdn,"-",$preflength+1)-$preflength-1); $object = "cluster"; if ($object=="cluster") { $curname = $hn; $curport = $pn; // Country name massaging $vo = (string)$cluster->{CLU_ZIPC}->Country; if ($debug==2) dbgmsg("$ids: $curname".$errors["112"]."$vo
    "); $vostring = $_SERVER['PHP_SELF']."?display=vo=$vo"; $country = $vo; if ( $yazyk !== "en" ) $country = $strings["tlconvert"][$vo]; $rowcont[] = "\"".$errors["312"]."\" ".$country." "; $computingmanager = $cluster->ComputingManager; $curtotcpu = @($computingmanager->{CLU_TCPU}) ? $computingmanager->{CLU_TCPU} : $computingmanager->TotalSlots; if ( !$curtotcpu && $debug ) dbgmsg("$curname".$errors["113"]."
    "); //for only test $curalias = $entries->Domains->AdminDomain->Services->ComputingService->Name; // Manipulate alias: replace the string if necessary and cut off at 22 characters if (file_exists("cnvalias.inc")) include('cnvalias.inc'); if ( strlen($curalias) > 22 ) $curalias = substr($curalias,0,21) . ">"; $curtotjobs = @($cluster->{CLU_TJOB}) ? $cluster->{CLU_TJOB} : 0; $curusedcpu = @($computingmanager->SlotsUsedByGridJobs && $computingmanager->SlotsUsedByLocalJobs) ? $computingmanager->SlotsUsedByLocalJobs + $computingmanager->SlotsUsedByGridJobs : -1; $totqueued = @($cluster->{CLU_QJOB}) ? $cluster->{CLU_QJOB} : 0; /* deprecated since 0.5.38 */ $gmqueued = @($cluster->{CLU_PQUE}) ? $cluster->{CLU_PQUE} : 0; /* new since 0.5.38 */ $clstring = popup("clusdes.php?host=$curname&port=$curport",700,620,1); $nclu++; } for ($i=0; $i<$nclusters; $i++) { $qstatus = $queues[$i]->{QUE_STAT}; if ( $qstatus != "production" ) $stopflag = TRUE; $allqrun += @($queues[$i]->{QUE_RUNG}) ? ($queues[$i]->{QUE_RUNG}) : 0; $gridjobs += @($queues[$i]->RunningJobs && $queues[$i]->LocalRunningJobs) ? ($queues[$i]->RunningJobs - $queues[$i]->LocalRunningJobs) : 0; $gridqueued += @($queues[$i]->WaitingJobs && $queues[$i]->LocalWaitingJobs) ? ($queues[$i]->WaitingJobs - $queues[$i]->LocalWaitingJobs) : 0; $allqueued += @($queues[$i]->{QUE_QUED}) ? ($queues[$i]->{QUE_QUED}) : 0; /* deprecated since 0.5.38 */ $lrmsqueued += @($queues[$i]->{QUE_LQUE}) ? ($queues[$i]->{QUE_LQUE}) : 0; /* new since 0.5.38 */ $prequeued += @($queues[$i]->{QUE_PQUE}) ? ($queues[$i]->{QUE_PQUE}) : 0; /* new since 0.5.38 */ $nqueues++; } if ( !$nclu && $nqueues ) { if ( $debug ) dbgmsg("$hn:".$errors["3"].": ".$errors["111"].$errors["410"]."
    "); continue; } if ( $nclu > 1 && $debug ) dbgmsg("$hn:".$errors["3"].": $nclu ".$errors["406"]."
    "); if (!$nqueues) $toflag2 = TRUE; if ($debug==2 && $prequeued != $gmqueued) dbgmsg("$curname: cluster-prelrmsqueued != sum(queue-prelrmsqueued)"); $allrun = ($curusedcpu < 0) ? $allqrun : $curusedcpu; if ($gridjobs > $allrun) $gridjobs = $allrun; /* For versions < 0.5.38: * Some Grid jobs are counted towards $totqueued and not towards $allqueued * (those in GM), so $totqueued - $allqueued = $gmqueued, * and $truegridq = $gmqueued + $gridqueued * and $nongridq = $totqueued - $truegridq == $allqueued - $gridqueued * hence $truegridq = $totqueued - $nongridq */ $nongridq = ($totqueued) ? $allqueued - $gridqueued : $lrmsqueued; $truegridq = ($totqueued) ? $totqueued - $nongridq : $gridqueued + $prequeued; // temporary hack: // $truegridq = $gridqueued; // $formtgq = sprintf(" s",$truegridq); $formngq = sprintf("\ \;s",$nongridq); $localrun = $allrun - $gridjobs; $gridload = ($curtotcpu > 0) ? $gridjobs/$curtotcpu : 0; $clusload = ($curtotcpu > 0) ? $allrun/$curtotcpu : 0; $tstring = urlencode("$gridjobs+$localrun"); $jrstring = popup("jobstat.php?host=$curname&port=$curport&status=Running&jobdn=all",600,500,2); $jqstring = popup("jobstat.php?host=$curname&port=$curport&status=Queueing&jobdn=all",600,500,2); if ( $toflag2 ) { $tstring .= " (no queue info)"; // not sure if this is localizeable at all } elseif ( $stopflag ) { $tstring .= " (queue $qstatus)"; // not sure if this is localizeable at all } // Add a cluster row $rowcont[] = " $curalias"; $rowcont[] = "$curtotcpu"; if ( $curtotcpu ) { $rowcont[] = "\"$gridjobs+$localrun\""; } else { $rowcont[] = "\"$gridjobs+$localrun\""; } // $rowcont[] = "$totqueued"; $rowcont[] = "$truegridq+$nongridq"; // Not adding anymore, cache instead // $ctable->addrow($rowcont); $tcont[] = $rowcont; $rowcont = array (); } } // Dump the collected table cache_table($cachefile,$tcont); } // HTML table initialization $ctable = new LmTableSp($module,$toppage->$module); // Sort /** possible ordering keywords: * country - sort by country, default * cpu - sort by advertised CPU number * grun - sort by number of running Grid jobs */ $ostring = "comp_by_".$order; usort($tcont,$ostring); $nrows = count($tcont); $votolink = array(); $affiliation = array(); foreach ( $tcont as $trow ) { $vo = $trow[0]; $vo = substr(stristr($vo,"./mon-icons/"),12); $vo = substr($vo,0,strpos($vo,".")); if ( !in_array($vo,$votolink) ) $votolink[]=$vo; array_push($affiliation,$vo); } //var_dump($affiliation); $affcnt = array_count_values($affiliation); //var_dump($affcnt); $prevvo = "boo"; $sumcpu = 0; $sumgridjobs = 0; $sumlocljobs = 0; $sumclusters = 0; $sumgridqueued = 0; $sumloclqueued = 0; //$sumqueued = 0; // actual loop foreach ( $tcont as $trow ) { $gridjobs = $trow[3]; $gridjobs = substr(stristr($gridjobs,"alt=\""),5); $gridjobs = substr($gridjobs,0,strpos($gridjobs,"+")); $localrun = $trow[3]; $localrun = substr(stristr($localrun,"+"),1); $localrun = substr($localrun,0,strpos($localrun,"\" w")); $truegridq = $trow[4]; $truegridq = substr(stristr($truegridq,""),3); $truegridq = substr($truegridq,0,strpos($truegridq,"")); $nongridq = $trow[4]; $nongridq = substr(stristr($nongridq,"+"),1); $vo = $trow[0]; $vo = substr(stristr($vo,"./mon-icons/"),12); $vo = substr($vo,0,strpos($vo,".")); if ( @$showvo && $showvo != $vo ) continue; $sumcpu += $trow[2]; $sumgridjobs += $gridjobs; $sumlocljobs += $localrun; $sumgridqueued += $truegridq; $sumloclqueued += $nongridq; // $sumqueued += $totqueued; $sumclusters ++; if ( $vo != $prevvo && $order == "country" ) { // start new country rowspan $prevvo = $vo; $vostring = $trow[0]; $ctable->addspacer("#000099"); $ctable->rowspan( $affcnt[$vo], $vostring, "#FFF2DF" ); $tcrow = array_shift($trow); $ctable->addrow($trow); } else { if ( $order == "country" ) $tcrow = array_shift($trow); $ctable->addrow($trow); } } $tcont = array(); $ctable->addspacer("#990000"); $rowcont[] = "".$errors["405"].""; $rowcont[] = "$sumclusters".$errors["406"].""; $rowcont[] = "$sumcpu"; $rowcont[] = "$sumgridjobs + $sumlocljobs"; $rowcont[] = "$sumgridqueued + $sumloclqueued"; // $rowcont[] = "$sumqueued"; $ctable->addrow($rowcont, "#ffffff"); $ctable->close(); if ( @$showvo ) { echo "
    \n"; foreach ( $votolink as $volink ) { $vostring = $_SERVER['PHP_SELF']."?debug=$debug&display=vo=$volink"; $voimage = "\"".$errors["312"]."\""; echo "$voimage  "; } echo "".$errors["409"]."
    \n"; echo "
    \n"; } $toppage->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/mylo.js0000644000000000000000000000012411506361170023243 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.665728 30 ctime=1513200663.810794093 nordugrid-arc-5.4.2/src/services/ws-monitor/mylo.js0000755000175000002070000000051711506361170023316 0ustar00mockbuildmock00000000000000function mylo (fnam,lnam,dom1,dom2){ if ( lnam == "" ) { var name = fnam; } else { var name = fnam + "." + lnam; } var host = dom1 + "." + dom2; var complete = name + "@" + host; output = "" + complete + ""; document.write(output); return output; } nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/attlist.php0000644000000000000000000000012411506361170024122 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.695728 30 ctime=1513200663.798793946 nordugrid-arc-5.4.2/src/services/ws-monitor/attlist.php0000755000175000002070000000206511506361170024175 0ustar00mockbuildmock00000000000000title); $strings = &$toppage->strings; $giislist = &$toppage->giislist; require_once('attlist.inc'); $object = $_GET["object"]; $attribute = $_GET["attribute"]; $filter = $_GET["filter"]; if ( !$filter ) $filter=""; if ( !$object ) $object="cluster"; $attribute = rawurldecode($attribute); $filter = rawurldecode($filter); if ( $attribute[1]==":") { $attribute = unserialize($attribute); $filter = unserialize($filter); $attributes = $attribute; $filters = $filter; $n = count($attributes); $signs = array_fill(0,$n,"="); } else { $attributes = array ($attribute); $signs = array ("="); $filters = array ($filter); } do_attlist($object,$attributes,$signs,$filters,$strings,$giislist); // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/quelist.php0000644000000000000000000000012411524504547024133 xustar000000000000000027 mtime=1297254759.916296 27 atime=1513200576.699728 30 ctime=1513200663.803794008 nordugrid-arc-5.4.2/src/services/ws-monitor/quelist.php0000755000175000002070000001263511524504547024212 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $cert = &$toppage->cert; $clstring = popup("clusdes.php?host=$host&port=$port",700,620,1); // Header table $toppage->tabletop("","".$toptitle." ".$qname." (".$host.")"); $lim = array( "dn", JOB_NAME, JOB_GOWN, JOB_SUBM, JOB_STAT, JOB_COMP, JOB_USET, JOB_USEM, JOB_ERRS, JOB_CPUS, JOB_EQUE ); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // ldapsearch filter strings for cluster and queues $filstr = "(objectclass=".OBJ_AJOB.")"; $dn = DN_LOCAL; $topdn = DN_GLOBL; // Establish connection to the requested information server $type = 'DB'; $isis = new $type($cert,array()); if ($isis) { // If contact OK, search for NorduGrid clusters if ($host) { $ts1 = time(); $entries = $isis->cluster_info($host); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["110"]." (".($ts2-$ts1).$errors["104"].")
    "); if ($entries) { $queues = $entries->Domains->AdminDomain->Services->ComputingService->ComputingShares->ComputingShare; $queues_new = $entries->Domains->AdminDomain->Services->ComputingService->ComputingShare; $queues = @($queues) ? $queues : $queues_new ; $actual_queue = NULL; foreach ($queues as $queue) { if ($queue->ID == $qid) $actual_queue = $queue; } $isis->xml_nice_dump($actual_queue, "queue", $debug); echo "
    "; $jobs = $entries->Domains->AdminDomain->Services->ComputingService->ComputingEndpoint->ComputingActivities->ComputingActivity; $njobs = count($jobs); define("CMPKEY",JOB_SUBM); usort($entries,"ldap_entry_comp"); // HTML table initialisation $ltable = new LmTable($module,$toppage->$module); // loop on jobs $nj = 0; for ($i=0; $i<$njobs; $i++) { $equeue = (string)$jobs[$i]->{JOB_EQUE}; if ( $equeue !== $qname ) { if ( $debug == 2 ) dbgmsg(",".$equeue." != ".$qname.","); continue; } $jobdn = rawurlencode($jobs[$i]->ID); $curstat = $jobs[$i]->{JOB_STAT}; $stahead = substr($curstat,0,12); $ftime = ""; if ($stahead=="FINISHED at:") { $ftime = substr(strrchr($curstat, " "), 1); } elseif ($curstat=="FINISHED") { $ftime = $jobs[$i]->{JOB_COMP}; } if ( $ftime ) { $ftime = cnvtime($ftime); $curstat = "FINISHED at: ".$ftime; } $uname = $jobs[$i]->{JOB_GOWN}; $encuname = rawurlencode($uname); $family = cnvname($uname, 2); $jname = htmlentities($jobs[$i]->{JOB_NAME}); $jobname = ($jobs[$i]->{JOB_NAME}) ? $jname : "N/A"; $time = ($jobs[$i]->{JOB_USET}) ? $jobs[$i]->{JOB_USET} : ""; $memory = ($jobs[$i]->{JOB_USEM}) ? $jobs[$i]->{JOB_USEM} : ""; $ncpus = ($jobs[$i]->{JOB_CPUS}) ? $jobs[$i]->{JOB_CPUS} : ""; $error = ($jobs[$i]->{JOB_ERRS}); if ( $error ) $error = ( preg_match("/user/i",$error) ) ? "X" : "!"; $status = "All"; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jobdn",750,430,4); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname",700,500,5); // filling the table $nj++; $lrowcont[] = "$nj $error"; $lrowcont[] = "$jobname"; $lrowcont[] = "$family"; $lrowcont[] = "$curstat"; $lrowcont[] = "$time"; $lrowcont[] = "$memory"; $lrowcont[] = "$ncpus"; $ltable->addrow($lrowcont); $lrowcont = array (); } $ltable->close(); } else { $errno = "4"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } } else { $errno = "5"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } $toppage->close(); return 0; } else { $errno = "6"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/README.in0000644000000000000000000000012611506361170023214 xustar000000000000000027 mtime=1293542008.286757 29 atime=1513200651.65964548 30 ctime=1513200663.812794118 nordugrid-arc-5.4.2/src/services/ws-monitor/README.in0000755000175000002070000000515711506361170023272 0ustar00mockbuildmock00000000000000NorduGrid ARC version @VERSION@ Grid Monitor ============ Description ----------- Set of PHP scripts, providing a Web interface to the NorduGrid Information System. Should be working for any similar LDAP-based service, if the schema configuration is done carefuly. The directory contains: cache - directory for front page cache includes - directory with common methods and configuration file (settings.inc) lang - directory with localizations man - directory for the man page mon-icons - directory with icons allusers.php - list grid users attlist.php - show values of selected attributes on the grid clusdes.php - show cluster or storage information discover.php - list attributes specific for an object for consecutive search ws-monitor.in - lynx call for the monitor (template) help.php - print help jobstat.php - show running/other jobs in a queue loadmon.php - main grid monitor script Makefile.am - Makefile template monitor.js - Java script for pop-up screens mylo.js - Java script for mail addresses quelist.php - show queue details and jobs README.in - README file (template) sestat.php - list storage elements userlist.php - show allowed sites and list of jobs of a user volist.php - static list of some VOs vo-users.php - lists users in a VO Requirements ------------ - GD library (http://www.boutell.com/gd/) - LDAP library (e.g., http://www.openldap.org) - PHP4 or PHP5 (http://www.php.net) compiled with LDAP and GD extensions - HTTP server compiled with PHP4 or PHP5 - Working ARC information system instance or a similar LDAP-based service - Optional: running Virtual Organisation LDAP-based service Installation ------------ 1. Copy all the files in a folder, accessible by the HTTP server 2. Verify that this folder contains a directory called "cache" and that it is writeable by the HTTP server. If your server is configured to have write access only to a specific location, such as "../htdata", modify CACHE_LOCATION value in "includes/settings.inc" accordingly 3. Modify "includes/settings.inc" according to your infosystem structure and liking: most likely, you want to modify the $giislist array by removing some GIISes/GRISes and adding other(s) 4. Run the whole stuff by loading "loadmon.php" into your browser Fine tuning ----------- - Making output more human-readable: modify "/lang/*.inc", "includes/cnvname.inc", "includes/cnvalias.inc". - Preventing sites from being polled: modify "includes/blacklist.inc". Otherwise, the file is not needed. Contact ------- Oxana Smirnova, oxana.smirnova@hep.lu.se nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/ws-monitor.in0000644000000000000000000000012711506361170024376 xustar000000000000000027 mtime=1293542008.286757 30 atime=1513200651.644645297 30 ctime=1513200663.812794118 nordugrid-arc-5.4.2/src/services/ws-monitor/ws-monitor.in0000755000175000002070000000010411506361170024436 0ustar00mockbuildmock00000000000000#!/bin/sh lynx http://localhost/@monitor_local_prefix@/loadmon.php nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/db.php0000644000000000000000000000012411506361170023023 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.697728 30 ctime=1513200663.800793971 nordugrid-arc-5.4.2/src/services/ws-monitor/db.php0000755000175000002070000003376511506361170023111 0ustar00mockbuildmock00000000000000place_of_cert = $args[0]; for( $i=0, $n=count($args[1]); $i<$n; $i++ ) $this->add($args[1][$i]); $this->type = ""; $this->error = ""; } public function add( /*string*/ $name = null ) { if( isset($name) ) $this->isis_urls[$name] = $name; } function Country($SSPair) { $country; foreach($SSPair as $a) { if ($a->Name = "Country") { $country = $a->Value; break; } } return $country; } public function connect( $debug = null, $type_ = "arex" ) { $this->type = $type_; switch ($this->type) { case "arex": $isisRequest["QueryString"] = "/RegEntry/SrcAdv[Type = 'org.nordugrid.execution.arex']"; break; case "storage": //$isisRequest["QueryString"] = "/RegEntry/SrcAdv[Type = 'org.nordugrid.storage.bartender']"; $isisRequest["QueryString"] = "/RegEntry/SrcAdv[Type = 'org.nordugrid.storage.shepherd']"; break; default: echo "Invalid type: ". $this->type; $this->error = "Invalid type add in the constructor: ". $this->type; return NULL; } ini_set("soap.wsdl_cache_enabled", "1"); $all_response = ""; foreach( $this->isis_urls as $url) { $options = array("location" => $url, "exceptions" => true, ); if (preg_match("/^https:/", $url)) $options["local_cert"] = $this->place_of_cert; $isis_SoapClient = new SoapClient("http://www.nordugrid.org/schemas/isis/2007/06/index.wsdl", $options ); try { $response = @$isis_SoapClient->Query($isisRequest); } catch (SoapFault $fault) { if($debug==2) echo '
    SOAP fault at '.$url.': '. $fault->faultstring.' ('.$fault->faultcode . ")\n"; } catch (Exception $e) { if($debug==2){ //echo 'Caught exception: ', $e->getMessage(), "\n"; echo 'ISIS is not avaliable: ', $url, "\n"; } continue; } $all_response = $all_response.$response->any; if ($this->only_one_isis_checking) break; } $this->entries = simplexml_load_string("".$all_response.""); // Response processing/transformation for ($i=0; $ientries); $i++) { $sid = $this->entries->RegEntry[$i]->MetaSrcAdv->ServiceID; // add all SSPair elements into the array $sspairs = array(); foreach($this->entries->RegEntry[$i]->SrcAdv->SSPair as $item) { $sspairs[(string)$item->Name] = (string)$item->Value; } $sspairs["EPR"] = (string)$this->entries->RegEntry[$i]->SrcAdv->EPR->Address; if (array_key_exists((string)$sid, $this->datas[(string)$sspairs["Country"]])) continue; $this->datas[(string)$sspairs["Country"]][(string)$sid] = $sspairs; $this->datas[(string)$sspairs["Country"]][((int)(count($this->datas[(string)$sspairs["Country"]])/2))] = (string)$sid; } } public function get_infos() { return $this->datas; } public function cluster_info( /*string*/ $url = null, $debug = null, $dump_or_query = true, $auth_type = null ) { if ($this->error != "") { echo $this->error; return NULL; } $ns = "http://www.w3.org/2005/08/addressing"; //Namespace of the WS. if ( $dump_or_query ){ $header = new SOAPHeader( $ns, 'Action','http://docs.oasis-open.org/wsrf/rpw-2/GetResourcePropertyDocument/GetResourcePropertyDocumentRequest'); } else { $header = new SOAPHeader( $ns, 'Action','http://docs.oasis-open.org/wsrf/rpw-2/QueryResourceProperties/QueryResourcePropertiesRequest'); } ini_set("soap.wsdl_cache_enabled", "1"); $options = array("location" => $url, "exceptions" => true, ); if (preg_match("/^https:/", $url)) $options["local_cert"] = $this->place_of_cert; $arex_SoapClient = new SoapClient("http://www.nordugrid.org/schemas/a-rex/arex.wsdl", $options ); $arex_SoapClient->__setSoapHeaders($header); try{ if ( $dump_or_query ){ // Full LIDI information $response = @$arex_SoapClient->GetResourcePropertyDocument(array()); } else { // Part of the LIDI information $query["QueryExpression"]["any"] = "//InfoRoot"; $query["QueryExpression"]["Dialect"] = "http://www.w3.org/TR/1999/REC-xpath-19991116"; $response = @$arex_SoapClient->QueryResourceProperties($query); } //print($response->any); /*martoni*/ $arex_info = simplexml_load_string($response->any); return $arex_info; } catch (SoapFault $fault) { if($debug==2) echo '
    SOAP fault at '.$url.': '. $fault->faultstring.' ('.$fault->faultcode . ")\n"; } catch (Exception $e) { if($debug==2){ //echo 'Caught exception: ', $e->getMessage(), "\n"; switch ($this->type) { case "arex": echo '
    A-rex is not avaliable: ', $url, "\n"; break; case "storage": echo '
    Storage is not avaliable: ', $url, "\n"; break; default: echo "Invalid type: ". $this->type; return NULL; } } return; } } public function cluster_search($infos, /*string*/ $filter) { //TODO: search method is not working now return $infos; } public function count_entries($infos) { //TODO: count method is not working now return count($infos); } public function get_entries($infos) { //TODO: get method is not working now return $infos; } private $result = array(); private function recursive_xml_dump($root) { foreach($root->children() as $children) { if (count($children->children()) == 0) { $this->result[$children->getName()][] = (string) $children; } else { if ($children->getName() != "ComputingActivities" && $children->getName() != "ComputingShares" && $children->getName() != "Contact") $this->recursive_xml_dump($children); } } } public function xml_nice_dump($xml = NULL, $scope = "cluster", $debug = 0) { global $errors, $strings; if (is_object($xml)) { require_once('mylo.inc'); $this->recursive_xml_dump($xml); $dtable = new LmTableSp("ldapdump",$strings["ldapdump"]); $drowcont = array(); if ($debug >= 10) { echo "Debug level: $debug
    \n"; echo "\n"; } $expected_names = array(); //TODO: define visible elements $expected_names["cluster"] = array("URL", "CPUModel", "CPUClockSpeed", "CPUTimeScalingFactor", "CPUVersion", "CPUVendor", "PhysicalCPUs", "TotalLogicalCPUs", "TotalSlots", "Name", /*"ID",*/ "OSFamily", "OSName", "OSVersion", "Platform", "VirtualMachine", "ProductName", "ProductVersion", "TrustedCA", "AppName", "ApplicationEnvironment", "MainMemorySize", "VirtualMemorySize", "NetworkInfo", "Country", "Place", "PostCode", "Address", "Latitude", "Longitude", "OtherInfo", "Benchmark" , "BaseType", "Homogeneous", "SlotsUsedByGridJobs", "SlotsUsedByLocalJobs", "WorkingAreaFree", "WorkingAreaGuaranteed", "WorkingAreaLifeTime", "WorkingAreaShared", "WorkingAreaTotal", "ConnectivityIn", "ConnectivityOut", "PreLRMSWaitingJobs", "RunningJobs", "StagingJobs", "SuspendedJobs", "TotalJobs", "WaitingJobs", "Implementor", "ImplementationName", "ImplementationVersion", "QualityLevel", "ServingState", "Type" ); $expected_names["job"] = array("ID", "Owner", "LocalOwner", "Error", "ComputingShareID", "IDFromEndpoint", "JobDescription", "ProxyExpirationTime", "Queue", "RequestedSlots", "RestartState", "State", "StdErr", "StdIn", "StdOut", "SubmissionHost", "SubmissionTime", "Type", "WorkingAreaEraseTime"); $expected_names["queue"] = array("ID", "ComputingEndpointID", "ExecutionEnvironmentID", "Description", "FreeSlots", "FreeSlotsWithDuration", "LocalRunningJobs", "LocalSuspendedJobs", "LocalWaitingJobs", "Rule", "Scheme", "MappingQueue", "MaxCPUTime", "MaxRunningJobs", "MaxSlotsPerJob", "MaxVirtualMemory", "MaxWallTime", "MinCPUTime", "MinWallTime", "Name", "PreLRMSWaitingJobs", "RequestedSlots", "RunningJobs", "ServingState", "StagingJobs", "SuspendedJobs", "TotalJobs", "UsedSlots", "WaitingJobs"); $app_count = 0; foreach ($expected_names[$scope] as $expected_name) { if (isset($this->result[$expected_name])) { $drowcont[0] = $expected_name; if ( $expected_name == "AppName") { $drowcont[0] = "ApplicationEnvironments"; $drowcont[1] = ""; } else if (count($this->result[$expected_name]) == 1) { $drowcont[1] = $this->result[$expected_name][0]; } else if (count($this->result[$expected_name]) > 1) { $drowcont[1] = ""; // If all elements equal, then show only one of them. if ( $all_elements_equal == true ) $drowcont[1] = $first_value; } else if (count($this->result[$expected_name]) < 1) { $drowcont[1] = " "; } if ($expected_names[$scope][0] == $expected_name) { $drowcont[0] = "".$drowcont[0].""; $dtable->addrow($drowcont, "#cccccc"); } else $dtable->addrow($drowcont); } } $dtable->close(); } } } ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/sestat.php0000644000000000000000000000012311506361170023740 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.664728 29 ctime=1513200663.80479402 nordugrid-arc-5.4.2/src/services/ws-monitor/sestat.php0000755000175000002070000001347611506361170024024 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $isislist = &$toppage->isislist; $cert = &$toppage->cert; // Header table $toppage->tabletop("".$toptitle."

    ",""); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 10; $tout = 15; if($debug) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::
    "); // Arrays defining the attributes to be returned $lim = array( "dn", SEL_NAME, SEL_ANAM, SEL_CURL, SEL_BURL, SEL_TYPE, SEL_FREE, SEL_TOTA ); // Top ISIS server: get all from the pre-defined list $nisis = count($isislist); $type = 'DB'; $isis = new $type($cert,$isislist); $ts1 = time(); $isis->connect($debug, "storage"); $gentries = $isis->get_infos(); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["106"].$nisis." (".($ts2-$ts1).$errors["104"].")
    "); $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } $dsarray = array (); $hnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ $ncc = 0; foreach ( $gentries as $vo) { $sitec = count($vo)/2; $ncc += $sitec; for ( $i = 0; $i < $sitec; $i++ ) { array_push($hnarray,(string)$vo[(string)$vo[$i]]["EPR"]); } } for ( $k = 0; $k < count($hnarray); $k++ ) { $clport = $gentries[$k]["port"]; $clhost = $hnarray[$k]; $clconn = $isis->cluster_info($clhost,$debug); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); $sitetag[$clhost] = 1; /* filtering tag */ if ($debug==2) dbgmsg("$k - $clhost:$clport "); } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } $ctable = new LmTableSp($module,$toppage->$module); // Loop on SEs $senum = 0; $space = 0; $capacity = 0; for ( $ids = 0; $ids < $nhosts; $ids++ ) { $ds = $dsarray[$ids]; $hn = $hnarray[$ids]; /* host name, for debugging */ if ($ds) { $entries = $ds->AdminDomain->Services->StorageService; $nclusters = count($entries); /* May be several SEs! */ if ( !$nclusters ) continue; for ( $i = 0; $i < $nclusters; $i++) { $senum++; $curdn = $entries[$i]["dn"];//TODO: $curname = $entries[$i][SEL_NAME][0];//TODO: $curalias = $hn; $curspace = ( $entries[$i]->StorageServiceCapacity->FreeSize ) ? $entries[$i]->StorageServiceCapacity->FreeSize : 0; $curcapacity = ( $entries[$i]->StorageServiceCapacity->TotalSize ) ? $entries[$i]->StorageServiceCapacity->TotalSize : $curspace; $cururl = ( $entries[$i][SEL_BURL][0] ) ? $entries[$i][SEL_BURL][0] : $hn; $curtype = $entries[$i]->Type; $clstring = popup("clusdes.php?host=$curname&port=$curport&isse=1&debug=$debug",700,620,1); //$curspace = intval($curspace/1000); $occupancy = 1; // by default, all occupied $space += $curspace; // if ( $curcapacity != $errors["407"] ) { if ( $curcapacity != 0 ) { //$curcapacity = intval($curcapacity/1000); $occupancy = ($curcapacity - $curspace)/$curcapacity; $capacity += $curcapacity; } $tstring = $curspace."/".$curcapacity; $tlen = strlen($tstring); if ($tlen<11) { $nspaces = 11 - $tlen; for ( $is = 0; $is < $nspaces; $is++ ) $tstring .= " "; } $tstring = urlencode($tstring); if ($debug==2) dbgmsg("$senum: $curname at $hn
    "); if ( strlen($curalias) > 30 ) $curalias = substr($curalias,8,22) . ">"; $clstring = popup("clusdes.php?host=$curname&port=2135",700,620,1); $rowcont[] = "$senum"; //$rowcont[] = " $curalias"; $rowcont[] = "$curalias"; $rowcont[] = "\"$tstring\""; // $rowcont[] = $curcapacity.$errors["408"]; // $rowcont[] = $curspace.$errors["408"]; $rowcont[] = "$curname"; $rowcont[] = "$cururl"; $rowcont[] = "$curtype"; $ctable->addrow($rowcont); $rowcont = array (); } } $entries = array(); $jentries = array(); $gentries = array(); } $occupancy = ($capacity - $space)/$capacity; $tstring = $space."/".$capacity; $ctable->addspacer("#ffcc33"); $rowcont[] = " "; $rowcont[] = "".$errors["405"].""; $rowcont[] = "\"$tstring\""; //$rowcont[] = "$capacity".$errors["408"].""; //$rowcont[] = "$space".$errors["408"].""; $rowcont[] = " "; $rowcont[] = " "; $rowcont[] = " "; $ctable->addrow($rowcont, "#ffffff"); $ctable->close(); $toppage->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/jobstat.php0000644000000000000000000000012411506361170024104 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.665728 30 ctime=1513200663.802793995 nordugrid-arc-5.4.2/src/services/ws-monitor/jobstat.php0000755000175000002070000001516511506361170024164 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $cert = &$toppage->cert; // Header table $titles = explode(":",$toptitle); // two alternative titles, separated by column if ($jobdn=="all") { $clstring = popup("clusdes.php?host=$host&port=$port",700,620,1); $gtitle = "".$titles[0]." $host"; } else { $jobdn = rawurldecode($jobdn); $jobdn = preg_replace("/\"/","",$jobdn); $jobgid = $jobdn; $gtitle = "".$titles[1].": $jobgid"; } $toppage->tabletop("",$gtitle); // Arrays defining the attributes to be returned $lim = array( "dn", JOB_NAME, JOB_EQUE, JOB_GOWN, JOB_STAT, JOB_USET, JOB_SUBM, JOB_CPUS ); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Establish connection to the requested information server $type = 'DB'; $isis = new $type($cert,array()); $bdn = DN_LOCAL; $topdn = DN_GLOBL; if ($isis) { // Single job info dump and quit if ($jobdn != "all") { $basedn = explode("Mds",$jobdn); $basedn = preg_split("/mds/i",$jobdn); $locdn = $basedn[0].$bdn; $info = $isis->cluster_info($host); $jobs = $info->Domains->AdminDomain->Services->ComputingService->ComputingEndpoint->ComputingActivities->ComputingActivity; $actual_job = NULL; foreach ($jobs as $job) { if ($job->ID == $jobdn) $actual_job = $job; } $isis->xml_nice_dump($actual_job, "job", $debug); $toppage->close(); return 0; } // Loop over all the jobs $sr = $info; if ($sr) { // If search returned, check that there are valid entries //$nmatch = ldap_count_entries($ds,$sr); //TODO:: $nmatch = 1; if ($nmatch > 0) { // HTML table initialisation $jtable = new LmTable($module,$toppage->$module); // If there are valid entries, tabulate results $jobs = $sr->Domains->AdminDomain->Services->ComputingService->ComputingEndpoint->ComputingActivities->ComputingActivity; $njobs = count($jobs); define("CMPKEY",JOB_SUBM); // usort($entries,"ldap_entry_comp"); // loop on jobs $jcount = 0; for ($i=0; $i<$njobs; $i++) { $jdn = rawurlencode((string)$jobs[$i]->{JOB_GOWN}); $curstat = $jobs[$i]->{JOB_STAT}; /* * The following flags may need an adjustment, * depending on the Job Status provider */ // Running job: statail == "R" or "run" $statail = substr(strstr($curstat,"INLRMS:"),7); $statail = trim($statail); // Queued job: stahead != "FIN" && statail != "R" and "run" etc $stahead = substr($curstat,0,3); $flagrun = ( $status == "Running" && ( $statail == "R" || /* PBS */ $statail == "S" || /* suspended by Condor */ $statail == "run" ) /* easypdc */ ); $flagque = ( $status != "Running" && $statail != "R" && $statail != "S" && $statail != "run" && $stahead != "FIN" && $stahead != "FAI" && $stahead != "EXE" && $stahead != "KIL" && $stahead != "DEL" ); /* No changes necessary below */ $flagact = ($flagrun || $flagque)?1:0; if ($flagact == 1 || $status == "All" ) { $uname = (string)$jobs[$i]->{JOB_GOWN}->{0}; $encuname = rawurlencode($uname); $uname = addslashes($uname); // In case $uname contains escape characters $family = cnvname($uname, 2); $jname = htmlentities($jobs[$i]->{JOB_NAME}); $jobname = ($jobs[$i]->{JOB_NAME}) ? $jname : "N/A"; $queue = ($jobs[$i]->{JOB_EQUE}) ? $jobs[$i]->{JOB_EQUE} : ""; $time = ($jobs[$i]->{JOB_USET}) ? $jobs[$i]->{JOB_USET} : ""; $ncpus = ($jobs[$i]->{JOB_CPUS}) ? $jobs[$i]->{JOB_CPUS} : ""; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jdn",750,430,4); $quewin = popup("quelist.php?host=$host&port=$port&qname=$queue",750,430,6); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname",700,500,5); $jcount++; // filling the table $jrowcont[] = "$jcount $jobname"; $jrowcont[] = "$family"; $jrowcont[] = "$curstat"; $jrowcont[] = "$time"; $jrowcont[] = "$queue"; $jrowcont[] = "$ncpus"; $jtable->addrow($jrowcont); $jrowcont = array (); } if ($jcount == 0) { $jtable->adderror("".$errors["4"].": ".$status.""); break; } } $jtable->close(); } else { echo "
    ".$errors["4"]."".$errors["7"]."\n"; } } else { echo "
    ".$errors["4"]."".$errors["7"]."\n"; } $toppage->close(); return 0; } else { $errno = "6"; echo "
    ".$errors[$errno]."\n"; $toppage->close(); return $errno; } // Done $toppage->close(); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/monitor.js0000644000000000000000000000012411506361170023752 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.667728 30 ctime=1513200663.809794081 nordugrid-arc-5.4.2/src/services/ws-monitor/monitor.js0000755000175000002070000000322711506361170024026 0ustar00mockbuildmock00000000000000function ngurl(link) { var wloc="http://"+document.domain+link; var vtest=link; var prot=vtest.substring(0,4); var vhttp="http"; if (prot == vhttp) { var wloc=link } return wloc; } function monitor(link,x,y,n) { // "n" is needed to keep dedicated windows for each monitor type // function ngurl() adds HTTP contact string, if needed // wloc=ngurl(link); var ua = ' ' + navigator.userAgent.toLowerCase(); var is_opera = ua.indexOf('opera'); var is_lynx = ua.indexOf('lynx'); var is_konqueror = ua.indexOf('konqueror'); wloc = link; browser = navigator.appName; if ( is_opera>0 || is_lynx>0 || is_konqueror>0 ) { window.location = wloc; } else { aaa=open("","win"+n,"innerWidth="+x+",innerHeight="+y+",resizable=1,scrollbars=1,width="+x+",height="+y); aaa.document.encoding = "text/html; charset=utf-8"; aaa.document.clear(); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln("NorduGrid"); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln("






    "); aaa.document.writeln("Collecting information..."); aaa.document.writeln("

    "); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.close(); aaa.document.location.href=wloc; aaa.document.close(); } } nordugrid-arc-5.4.2/src/services/ws-monitor/PaxHeaders.7502/includes0000644000000000000000000000013213214316027023454 xustar000000000000000030 mtime=1513200663.964795977 30 atime=1513200668.717854109 30 ctime=1513200663.964795977 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/0000755000175000002070000000000013214316027023577 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711634415166025601 xustar000000000000000027 mtime=1316100726.768006 30 atime=1513200605.333078888 30 ctime=1513200663.948795781 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/Makefile.am0000755000175000002070000000016211634415166025645 0ustar00mockbuildmock00000000000000monitorincdir = @ws_monitor_prefix@/includes monitorinc_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorinc_DATA) nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/comfun.inc0000644000000000000000000000012411506361170025515 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.690728 30 ctime=1513200663.955795867 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/comfun.inc0000755000175000002070000000570011506361170025567 0ustar00mockbuildmock00000000000000 XXX   */ $geo1 = $a[0]; $geo2 = $b[0]; $geo1 = substr(stristr($geo1,""),3); $geo2 = substr(stristr($geo2,""),3); $geo1 = substr($geo1,0,strpos($geo1,"<")); $geo2 = substr($geo2,0,strpos($geo2,"<")); $ali1 = $a[1]; $ali2 = $b[1]; $ali1 = substr(stristr($ali1,""),3); $ali2 = substr(stristr($ali2,""),3); $ali1 = substr($ali1,0,strpos($ali1,"<")); $ali2 = substr($ali2,0,strpos($ali2,"<")); $cmpgeo = strcasecmp ($geo1,$geo2); $cmpali = strcasecmp ($ali1,$ali2); if ( !$cmpgeo ) return $cmpali; return $cmpgeo; } /** * @return int * @param a array * @param b array * @desc Compares by CPU */ function comp_by_cpu ($a, $b) { $cpu1 = $a[2]; $cpu2 = $b[2]; $cmpcpu = $cpu2 - $cpu1; return $cmpcpu; } /** * @return int * @param a array * @param b array * @desc Compares by grid running jobs */ function comp_by_grun ($a, $b) { $sum1 = $a[3]; $sum2 = $b[3]; // echo $sum1." vs ".$sum2."
    "; $sum1 = substr(stristr($sum1,"alt=\""),5); $sum2 = substr(stristr($sum2,"alt=\""),5); $sum1 = substr($sum1,0,strpos($sum1,"+")); $sum2 = substr($sum2,0,strpos($sum2,"+")); $cmpsum = $sum2 - $sum1; return $cmpsum; } ?>nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/blacklist.inc0000644000000000000000000000012411506361170026176 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.685728 30 ctime=1513200663.950795806 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/blacklist.inc0000755000175000002070000000257311506361170026255 0ustar00mockbuildmock00000000000000"0", "grid.fi.uib.no"=>"0", "dc2.uio.no"=>"0", "dc1.uio.no"=>"0", "dc3.uio.no"=>"0", "dc4.uio.no"=>"0", "fire.ii.uib.no"=>"0", "hydra.ii.uib.no"=>"0", "grid.nbi.dk"=>"0", "lscf.nbi.dk"=>"0", "hepax1.nbi.dk"=>"0", "morpheus.nbi.dk"=>"0", "heppc08.nbi.dk"=>"0", "grid.uni-c.dk"=>"0", "tambohuse.imada.sdu.dk"=>"0", "gridrouter.imada.sdu.dk"=>"0", "tiger.imada.sdu.dk"=>"0", "cbs202.cbs.dtu.dk"=>"0", "gridgate.it.dtu.dk"=>"0", "amigos24.diku.dk"=>"0", "nroot.hip.fi"=>"0", "grid.hip.fi"=>"0", "hirmu.hip.fi"=>"0", "pc19.hip.fi"=>"0", "pc30.hip.helsinki.fi"=>"0", "testbed0.hip.helsinki.fi"=>"0", "pchip04.cern.ch"=>"0", "quark.hep.lu.se"=>"0", "farm.hep.lu.se"=>"0", "hathi.hep.lu.se"=>"0", "grid.tsl.uu.se"=>"0", "grid.scfab.se"=>"0", "bambi.quark.lu.se"=>"0", "nexus.swegrid.se"=>"0", "hagrid.it.uu.se"=>"0", "ingrid.hpc2n.umu.se"=>"0", "sigrid.lunarc.lu.se"=>"0", "bluesmoke.nsc.liu.se"=>"0", "g01n01.pdc.kth.se"=>"0", "ingvar.nsc.liu.se"=>"0", "seth.hpc2n.umu.se"=>"0", "banan.hpc2n.umu.se"=>"0", "jakarta.hpc2n.umu.se"=>"0", "gridum2.cs.umu.se"=>"0", "gridum1.cs.umu.se"=>"0", "sleipner.byggmek.lth.se"=>"0", "grendel.it.uu.se"=>"0", "login-3.monolith.nsc.liu.se"=>"0", "vls.science.upjs.sk"=>"0", "213-35-172-38-dsl.plus.estpak.ee"=>"0", "cm-gw.phys.ualberta.ca"=>"0", "tgrid.icepp.s.u-tokyo.ac.jp"=>"0", "hmx00.kek.jp"=>"0", "dummy"=>"0", "dummy"=>"0"); ?>nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/locale.inc0000644000000000000000000000012411506361170025465 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.692728 30 ctime=1513200663.960795928 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/locale.inc0000755000175000002070000007037211506361170025546 0ustar00mockbuildmock00000000000000 array ( // Table headers "loadmon" => array( "0" => "Grid Monitor", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Cluster Details for", "Queue" => 0, "Status" => 0, "Limits (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "" => 0, "Job name" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Exp. queue length" => 0, "Free disk (MB)" => 0 ), "attlist" => array("0" => "Attribute values", "Resource" => 0, "Current value" => 0 ), "quelist" => array("0" => "Details for the queue", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Memory (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "#" => 0, "Alias" => 0, "Tot. space" => 0, "Free space" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Jobs, running", "nordugrid-queue-queued" => "Jobs, queued", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-gridrunning" => "Grid jobs, running", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcput" => "Requested CPU time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-lrmscomment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall time", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "Experienced queue length", "nordugrid-se-name" => "Domain name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-freespace" => "Free space (GB)", "nordugrid-se-totalspace" => "Total space (GB)", "nordugrid-se-baseurl" => "Contact URL", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL" ), // Post code conversion: only for [en]! "tlconvert" => array ( "AU" => "Australia", "CA" => "Canada", "CH" => "Switzerland", "DK" => "Denmark", "EE" => "Estonia", "FI" => "Finland", "FIN" => "Finland", "SF" => "Finland", "DE" => "Germany", "JP" => "Japan", "NO" => "Norway", "N" => "Norway", "SE" => "Sweden", "SK" => "Slovakia", "SI" => "Slovenia", "KEK" => "Japan", "TOKYO" => "Japan" ) ), "ru" => array ( // Table headers "loadmon" => array("0" => "Грид-монитор", "Страна" => 0, "Ресурс" => 0, "ЦП" => 0, "Загрузка" => 0, "Ожидают" => 0 ), "clusdes" => array("0" => "Описание кластера", "Очередь" => 0, "Состояние" => 0, "Длительность (мин)" => 0, "ЦП" => 0, "Считаются" => 0, "Ожидают" => 0 ), "jobstat" => array("0" => "Задачи на:Номер задачи", "Имя задачи" => 0, "Хозяин" => 0, "Состояние" => 0, "Время (мин)" => 0, "Очередь" => 0, "ЦП" => 0 ), "volist" => array("0" => "Виртуальные организации", "Виртуальная оргаизация" => 0, "Члены" => 0, "Обслуживается" => 0 ), "vousers" => array("0" => "Пользователи", "#" => 0, "Имя" => 0, "Место работы" => 0, "Электронная почта" => 0 ), "userlist" => array("0" => "Информация для", "" => 0, "Имя задачи" => 0, "Состояние" => 0, "Время (мин)" => 0, "Ресурс" => 0, "Очередь" => 0, "ЦП" => 0 ), "userres" => array("0" => "", "Ресурс:очередь" => 0, "Свободные ЦП" => 0, "Длина очереди" => 0, "Диск, доступно (Мб)" => 0 ), "attlist" => array("0" => "Значения аттрибутов", "Ресурс" => 0, "Значение" => 0 ), "quelist" => array("0" => "Описание очереди", "" => 0, "Имя задачи" => 0, "Хозяин" => 0, "Состояние" => 0, "Время (мин)" => 0, "ОЗУ (КБ)" => 0, "ЦП" => 0 ), "sestat" => array("0" => "Внешние запоминающие устройства", "#" => 0, "Название" => 0, "Весь объём" => 0, "Свободно" => 0, "Имя" => 0, "URL базы" => 0, "Тип" => 0 ), "allusers" => array("0" => "Допущенные пользователи:Активные пользователи", "#" => 0, "Имя" => 0, "Место работы" => 0, "Задачи" => 0, "Ресурсы" => 0 ), "ldapdump" => array("0" => "", "Аттрибут" => 0, "Значение" => 0 ), "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Данные действительны с (GMT)", "Mds-validto" => "Данные действительны по (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Имя головной машины", "nordugrid-cluster-aliasname" => "Название", "nordugrid-cluster-contactstring" => "Контактный адрес", "nordugrid-cluster-interactive-contactstring" => "Интерактивный адрес", "nordugrid-cluster-comment" => "Комментарий", "nordugrid-cluster-support" => "Е-почта ответственного", "nordugrid-cluster-lrms-type" => "СУПО, тип", "nordugrid-cluster-lrms-version" => "СУПО, версия", "nordugrid-cluster-lrms-config" => "СУПО, подробности", "nordugrid-cluster-architecture" => "Архитектура", "nordugrid-cluster-opsys" => "Операционная система", "nordugrid-cluster-homogeneity" => "Гомогенность ресурса", "nordugrid-cluster-nodecpu" => "Процессор, тип (худший)", "nordugrid-cluster-nodememory" => "ОЗУ (Мб, наименьшее)", "nordugrid-cluster-totalcpus" => "Процессоры, всего", "nordugrid-cluster-cpudistribution" => "Процессоры:узлы", "nordugrid-cluster-sessiondir-free" => "Диск, доступно (Мб)", "nordugrid-cluster-sessiondir-total" => "Диск, весь объём (Мб)", "nordugrid-cluster-cache-free" => "Дисковый кэш, свободно (Мб)", "nordugrid-cluster-cache-total" => "Дисковый кэш, всего (Мб)", "nordugrid-cluster-runtimeenvironment" => "Рабочая среда", "nordugrid-cluster-localse" => "ВЗУ, локальное", "nordugrid-cluster-middleware" => "Грид-ПО", "nordugrid-cluster-totaljobs" => "Задачи, всего", "nordugrid-cluster-usedcpus" => "Процессоры, занятые", "nordugrid-cluster-queuedjobs" => "Задачи, в очереди", "nordugrid-cluster-location" => "Почтовый индекс", "nordugrid-cluster-owner" => "Владелец", "nordugrid-cluster-issuerca" => "Сертификат выдан", "nordugrid-cluster-nodeaccess" => "IP-соединение узлов", "nordugrid-cluster-gridarea" => "Адрес сессий (СТАРЫЙ)", "nordugrid-cluster-gridspace" => "Грид-диск (СТАРЫЙ)", "nordugrid-cluster-opsysdistribution" => "Дистрибутив ОС (СТАРЫЙ)", "nordugrid-cluster-runningjobs" => "Задачи, в счёте (СТАРЫЙ)", "nordugrid-queue-name" => "Имя очереди", "nordugrid-queue-status" => "Состояние очереди", "nordugrid-queue-running" => "Задачи, в счёте", "nordugrid-queue-queued" => "Задачи, в очереди", "nordugrid-queue-maxrunning" => "Задачи, в счёте (предел)", "nordugrid-queue-maxqueuable" => "Задачи, в очереди (предел)", "nordugrid-queue-maxuserrun" => "Задачи на пользователя (предел)", "nordugrid-queue-maxcputime" => "Длительность, наиб. (мин)", "nordugrid-queue-mincputime" => "Длительность, наим. (мин)", "nordugrid-queue-defaultcputime" => "Длительность, по ум. (мин)", "nordugrid-queue-schedulingpolicy" => "Правила планировки", "nordugrid-queue-totalcpus" => "Процессоры, всего", "nordugrid-queue-nodecpu" => "Процессор, тип", "nordugrid-queue-nodememory" => "ОЗУ (Мб)", "nordugrid-queue-architecture" => "Архитектура", "nordugrid-queue-opsys" => "Операционная система", "nordugrid-queue-gridrunning" => "Грид-задачи, в счёте", "nordugrid-queue-gridqueued" => "Грид-задачи, в очереди", "nordugrid-queue-assignedcpunumber" => "Процессоры (СТАРЫЙ)", "nordugrid-queue-assignedcputype" => "Тип процессора (СТАРЫЙ)", "nordugrid-job-globalid" => "Номер", "nordugrid-job-globalowner" => "Хозяин", "nordugrid-job-execcluster" => "Выполняющий кластер", "nordugrid-job-execqueue" => "Выполняющая очередь", "nordugrid-job-stdout" => "Стандартный выход", "nordugrid-job-stderr" => "Стандартная ошибка", "nordugrid-job-stdin" => "Стандартный вход", "nordugrid-job-reqcput" => "Запрошенное время", "nordugrid-job-status" => "Состояние", "nordugrid-job-queuerank" => "Положение в очереди", "nordugrid-job-lrmscomment" => "Комментарий СУПО", "nordugrid-job-submissionui" => "Засылающий клиент", "nordugrid-job-submissiontime" => "Время засылки (GMT)", "nordugrid-job-usedcputime" => "Использованное время ЦП", "nordugrid-job-usedwalltime" => "Использованное время", "nordugrid-job-sessiondirerasetime" => "Срок уничтожения (GMT)", "nordugrid-job-proxyexpirationtime" => "Окончание доверенности (GMT)", "nordugrid-job-usedmem" => "Использование ОЗУ (Кб)", "nordugrid-job-errors" => "Ошибки", "nordugrid-job-jobname" => "Имя", "nordugrid-job-runtimeenvironment" => "Рабочая среда", "nordugrid-job-cpucount" => "Запрошено процессоров", "nordugrid-job-executionnodes" => "Выполняющие узлы", "nordugrid-job-gmlog" => "Журнальная запись ГМ", "nordugrid-job-gridlog" => "Грид-запись (СТАРЫЙ)", "nordugrid-job-clientsoftware" => "Версия клиента", "nordugrid-authuser-name" => "Имя", "nordugrid-authuser-sn" => "Субъект", "nordugrid-authuser-freecpus" => "Свободные ЦП", "nordugrid-authuser-diskspace" => "Диск, доступно (Мб)", "nordugrid-authuser-queuelength" => "Длина очереди", "nordugrid-se-name" => "Доменное имя", "nordugrid-se-aliasname" => "Название", "nordugrid-se-type" => "Тип", "nordugrid-se-freespace" => "Свободный объём (Гб)", "nordugrid-se-totalspace" => "Весь объём (Гб)", "nordugrid-se-baseurl" => "Контактный адрес", "nordugrid-se-authuser" => "Допущенные ползьзователи (DN)", "nordugrid-se-location" => "Почтовый индекс", "nordugrid-se-owner" => "Владелец", "nordugrid-se-issuerca" => "Сертификат выдан", "nordugrid-se-comment" => "Комментарий", "nordugrid-rc-name" => "Доменное имя", "nordugrid-rc-aliasname" => "Название", "nordugrid-rc-baseurl" => "Контактный адрес", "nordugrid-rc-authuser" => "Допущенные пользователи (DN)", "nordugrid-rc-location" => "Почтовый индекс", "nordugrid-rc-owner" => "Владелец", "nordugrid-rc-issuerca" => "Сертификат выдан" ), "errors" => array( "1" => "Невозможно прочесть списки высшего уровня", "2" => "Ни один из местных списков не отзывается", "3" => " неверная конфигурация или истекло время запроса", "4" => "Не обнаружено Грид-задач", "5" => "Нет информации", "6" => "Служба недоступна", "7" => " - попробуйте обновить поззже", "101" => " Время на связь с локальным списком: ", "102" => " с на соединение и ", "103" => " с на поиск", "104" => " с затрачено на поиск", "105" => "Перечисление ресурсов: ", "106" => "Опрошено списков верхнего уровня: ", "107" => "Получены географические координаты, просканировано ресурсов: ", "108" => " ресурсов упорядочено по геополитическому признаку", "109" => "Поиск аттрибутов кластера", "110" => "Поиск аттрибутов очереди", "111" => "Нет данных с ", "112" => " фукционирует в стране: ", "113" => " не располагает ресурсами", "114" => " Время на связь с глобальным списком: ", "115" => "Игнорируется ресурс: ", "116" => "не соответствует типу ", "117" => "Проверка связи: ", "118" => "есть", "119" => "На данный момент обнаружено ресурсов типа ", "120" => "Ошибка LDAP при поиске на ", "121" => "-состояние на ", "122" => "Заблокирован: ", "123" => "Обнаружен регистрант ", "124" => "Поиск аттрибутов ВЗУ", "125" => "Поиск пользователей", "126" => "Поиск задач", "127" => " запустил(а) задачу ", "128" => " не будучи допущенным(ой)", "301" => "Перезагрузить", "302" => "Печать", "303" => "Помощь", "304" => "Закрыть", "305" => "Красный", "306" => "Серый", "307" => "Все пользователи", "308" => "Активные пользователи", "309" => "Поиск", "310" => "ВЗУ", "311" => "Виртуальные организации", "312" => "Флаг страны: ", "313" => " Грид-процессов и ", "314" => " местных процессов", "401" => "Процессы", "402" => "Грид", "403" => "местные", "404" => "Мир", "405" => "ВСЕГО", "406" => " ресурс(а)(ов)", "407" => "куча", "408" => " Гб", "409" => " ВСЕ" ), // Country name conversion, no postcode! "tlconvert" => array ( "Australia" => "Австралия", "Canada" => "Канада", "Switzerland" => "Швейцария", "Denmark" => "Дания", "Estonia" => "Эстония", "Finland" => "Финляндия", "Germany" => "Германия", "Japan" => "Япония", "Norway" => "Норвегия", "Sweden" => "Швеция", "Slovakia" => "Словакия", "Slovenia" => "Словения", "World" => "Мир" ) ) ); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735025603 xustar000000000000000030 mtime=1513200605.365079279 30 atime=1513200651.710646104 30 ctime=1513200663.948795781 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/Makefile.in0000644000175000002070000004360513214315735025661 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ws-monitor/includes DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(monitorincdir)" DATA = $(monitorinc_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ monitorincdir = @ws_monitor_prefix@/includes monitorinc_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorinc_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ws-monitor/includes/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ws-monitor/includes/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitorincDATA: $(monitorinc_DATA) @$(NORMAL_INSTALL) test -z "$(monitorincdir)" || $(MKDIR_P) "$(DESTDIR)$(monitorincdir)" @list='$(monitorinc_DATA)'; test -n "$(monitorincdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitorincdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitorincdir)" || exit $$?; \ done uninstall-monitorincDATA: @$(NORMAL_UNINSTALL) @list='$(monitorinc_DATA)'; test -n "$(monitorincdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(monitorincdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(monitorincdir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitorincdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitorincDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitorincDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-monitorincDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am uninstall uninstall-am uninstall-monitorincDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/attlist.inc0000644000000000000000000000012411506361170025712 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.688728 30 ctime=1513200663.949795793 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/attlist.inc0000755000175000002070000001533311506361170025767 0ustar00mockbuildmock00000000000000".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $pnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $clconn = ldap_connect($clhost,$clport); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($pnarray,$clport); $sitetag[$clhost] = 1; /* filtering tag */ } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters $srarray = @ldap_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); echo "\n"; // HTML table initialisation array_unshift($engatts,$errors["425"]); $jtable = new LmTableFree($engatts); $rowcont = array(); $tabcont = array(); $rc = 0; for ( $ids = 0; $ids < $nhosts; $ids++ ) { $sr = $srarray[$ids]; $dst = $dsarray[$ids]; $pn = $pnarray[$ids]; if ($dst && $sr) { // If search returned, check that there are valid entries $nmatch = @ldap_count_entries($dst,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $allentries = ldap_get_entries($dst,$sr); $entries = ldap_purge($allentries); if ( $object == OBJ_AJOB ) { define("CMPKEY",JOB_STAT); //usort($entries,"ldap_entry_comp"); } $nclus = $entries["count"]; for ($i=0; $i<$nclus; $i++) { $cluster = "N/A"; $queue = "N/A"; $job = "N/A"; $currdn = $entries[$i]["dn"]; $currdn = preg_replace("/\"/","",$currdn); $dnparts = ldap_explode_dn($currdn,0); foreach ($dnparts as $part) { $pair = explode("=",$part); switch ( $pair[0] ) { case CLU_NAME: $cluster = $pair[1]; break; case SEL_NAME: $se = $pair[1]; break; case QUE_NAME: $queue = $pair[1]; break; case JOB_GLID: $job = $pair[1]; $encjob = rawurlencode($currdn); break; } } $sort = "cluster"; // 410: cluster; 411: queue; 412: job; 413: user; 414: SE switch ( $object ) { case OBJ_CLUS: $resource = $errors["410"]." $cluster"; $winstring = popup("clusdes.php?host=$cluster&port=$pn",700,620,1); break; case OBJ_QUEU: $resource = $errors["410"]." $cluster, ".$errors["411"]." $queue"; $winstring = popup("quelist.php?host=$cluster&port=$pn&qname=$queue",750,430,6); break; case OBJ_USER: $resource = $errors["410"]." $cluster, ".$errors["411"]." $queue"; $winstring = popup("quelist.php?host=$cluster&port=$pn&qname=$queue",750,430,6); break; case OBJ_AJOB: $resource = $errors["412"]." $job"; $winstring = popup("jobstat.php?host=$cluster&port=$pn&status=&jobdn=$encjob",750,430,4); break; case OBJ_STEL: $resource = $errors["414"]." $se"; $winstring = ""; break; } $rc++; $rowcont[0] = ( $winstring ) ? "$rc $resource" : "$rc $resource"; // determine maximum row count per object $vcount = 0; foreach ( $attributes as $attribute ) { if ( !$attribute ) continue; $ccount = $entries[$i][$attribute]["count"]; $vcount = ( $ccount > $vcount ) ? $ccount : $vcount; } if ($vcount == 0) $jtable->adderror($resource); $attrtag = array(); for ( $j = 0; $j < $vcount; $j++ ) { $attval = ""; $attcheck = FALSE; for ( $k = 0; $k < $n ; $k++ ) { $attribute = $attributes[$k]; if ( !$attribute || @in_array($attribute,$attrtag[$j]) ) continue; if ( $entries[$i][$attribute][$j] ) { $attval = $entries[$i][$attribute][$j]; $attcheck = TRUE; } else { $attval = " "; } // Some time-stamp readability adjustment if (substr(strrchr($attribute, "-"), 1) == "sessiondirerasetime" || substr(strrchr($attribute, "-"), 1) == "submissiontime" || substr($attribute,0,9) == "Mds-valid" ) $attval=cnvtime($attval); $rowcont[] = htmlentities($attval); $attrtag[$j][] = $attribute; } if ( $attcheck ) { $tabcont[] = $rowcont; } else { $rc--; } // if ( $attcheck ) $jtable->addrow($rowcont); $rowcont = array(); $rowcont[0] = " "; } } } } @ldap_free_result($sr); } foreach ( $tabcont as $row ) $jtable->addrow($row,""); $jtable->close(); return 0; } ?>nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/ldap_nice_dump.inc0000644000000000000000000000012411506361170027171 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.684728 30 ctime=1513200663.957795891 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/ldap_nice_dump.inc0000755000175000002070000000720111506361170027241 0ustar00mockbuildmock00000000000000"; // Plain LDAP dump for the DN $filstr = "(objectclass=*)"; $sr = @ldap_search($ds,$dn,$filstr,array("*"),0,0,$tlim,LDAP_DEREF_NEVER); if ($sr) { // If search returned, check that there are valid entries $nmatch = ldap_count_entries($ds,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $first = ldap_first_entry($ds,$sr); $entries = ldap_get_attributes($ds,$first); $nfields = $entries["count"]; // get the Distinguished Name $thisdn = ldap_get_dn($ds,$first); // HTML table initialisation $dtable = new LmTableSp("ldapdump",$strings["ldapdump"]); // add the DN entry $drowcont = array("".$errors["420"]."",$thisdn); $dtable->addrow($drowcont, "#cccccc"); $drowcont = array(); // loop on the rest of attributes for ($i=0; $i<$nfields; $i++) { $curatt = $entries[$i]; if ( $exclude && in_array($curatt,$exclude) ) continue; $engatt = ($isattr[$curatt]) ? $isattr[$curatt] : $curatt; $nval = $entries[$curatt]["count"]; $encatt = rawurlencode($curatt); $attwin = popup("attlist.php?attribute=$encatt",650,300,7); $attstring = ( $mdsattr[$curatt] ) ? "$engatt" : "$engatt"; $drowcont[0] = $attstring; $drowcont[1] = " "; if ($nval==0) $dtable->addrow($drowcont); $drowcont[1] = ""; if ( $nval > 4 ) $drowcont[1] = $fhead; for ($k=0; $k<$nval; $k++) { $curval = $entries[$curatt][$k]; // Some time-stamp readability adjustment if ( strlen($curval) == 15 && $curval{14} == "Z" ) $curval=cnvtime($curval); $encval = htmlspecialchars($curval,ENT_QUOTES,"UTF-8"); // E-mail masquerading for short lists (dunno what to do with long lists) if (strpos($curval,"@",1) && $nval<5) { $m = mylo ($curval); if ( $m[0] ) $encval = ""; } if ( $nval > 4 ) { $drowcont[1] .= "$encval"; if ( $k < $nval-1 ) $drowcont[1] .= "\n"; } else { $drowcont[1] .= $encval; if ( $k < $nval-1 ) $drowcont[1] .= "
     "; } } if ( $nval > 4 ) $drowcont[1] .= $ftail; $dtable->addrow($drowcont); } $dtable->close(); ldap_free_result($sr); return $thisdn; } else { $errno = 9; echo "
    ".$errors[$errno]."\n"; return $errno; } } else { $errno = 5; echo "
    ".$errors[$errno]."\n"; return $errno; } } ?> nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/cnvtime.inc0000644000000000000000000000012411506361170025673 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.685728 30 ctime=1513200663.954795855 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/cnvtime.inc0000755000175000002070000000064311506361170025746 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/headfoot.inc0000644000000000000000000000012411506361170026017 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.691728 30 ctime=1513200663.956795879 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/headfoot.inc0000755000175000002070000001760211506361170026075 0ustar00mockbuildmock00000000000000\n"; //echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; if ( $wintyp ) { $this->module = $wintyp; // Localize $yaccept = @$_SERVER["HTTP_ACCEPT_LANGUAGE"] ; if ( !$yaccept ) $yaccept = "en"; $yazyk = "en"; $yazyki = explode(",",$yaccept); foreach ( $yazyki as $option ) { if ( $yazyk != "en" ) continue; $option = trim($option); $option = substr($option,0,2); // some sniffing // touch("test/".$option); // echo "\n"; $locfile = $option.".inc"; if ( !file_exists("lang/".$locfile) ) continue; $yazyk = $option; } $locfile = $yazyk.".inc"; include $locfile; setlocale(LC_ALL, $yazyk); $this->language = $yazyk; $this->strings = $message; $this->errors = $message["errors"]; $this->countries = $message["tlconvert"]; $this->mdsattr = $message["mdsattr"]; $this->isattr = $message["isattr"]; // Assigns $this->clusdes = $message["clusdes"]; $this->$wintyp = $message[$wintyp]; $toptitle = $message[$wintyp][0]; // Set page parameters require ('settings.inc'); $inpnam = implode("_",array("def",$wintyp)); // Page style definitions (see settings.inc) // Sets top window title $this->title = ( $toptitle ) ? $toptitle : ""; // Refresh rate $this->refresh = (${$inpnam}["refresh"]) ? ${$inpnam}["refresh"] : 0; // Background and link colors $this->bg = (${$inpnam}["bgcolor"]) ? ${$inpnam}["bgcolor"] : "#ffffff"; $this->lc = (${$inpnam}["lcolor"]) ? ${$inpnam}["lcolor"] : "#cc0000"; // Dumps the header HTML code $titles = explode(":",$this->title); // sometimes titles are many echo "".$titles[0]." ".$extratitle."\n"; if ( $this->refresh ) echo "\n"; echo "\n"; // define giislist $this->giislist = $giislist; $this->isislist = $isislist; $this->cert = $cert; } // Finishes HTML header, starts document body echo "\n"; echo "\n"; echo "
    \n"; } /** * @return void * @param errors array * @param title string * @param subtitle string * @desc Makes an opening Monitor header */ function tabletop ( $toptitle="", $subtitle="" ) { // function tabletop() echo "\n"; echo "\n"; echo "\n"; echo "
    ".$toptitle."
    ".$subtitle."\n"; echo " errors["301"]."\" alt=\"".$this->errors["301"]."\">\n"; echo " \n"; echo " errors["302"]."\" alt=\"".$this->errors["302"]."\">\n"; echo " module."',400,300,10);\" onClick=\"javascript:monitor('help.php?module=".$this->module."',400,300,10);\">\n"; echo " errors["303"]."\" alt=\"".$this->errors["303"]."\">\n"; echo " \n"; echo " errors["304"]."\" alt=\"".$this->errors["304"]."\">\n"; echo "
    \n"; } /** * @return string * @desc returns version number from README */ function getVersion () { $v = "N/A"; if ( file_exists("README") ) { $readme = fopen("README","r"); $fline = fgets($readme); $v = substr(stristr($fline,"version "),8); fclose($readme); } $this->version = $v; return $v; } /** * @return void * @desc Closes an HTML document */ function close () { // Closes the HTML document echo "\n
    \n"; echo "\n"; ob_end_flush(); ob_implicit_flush(); } } /** * Below are some generic functions, non-class-specific * * function dbgmsg ( string ) : prints out a message and flushes output; useful for debugging * function popup ( string, int, int, int ) : opens up a new window, depending on the client */ /** * @return void * @param dbgtxt string * @desc Outputs a debug message outside the table */ function dbgmsg( $dbgtxt="Debug" ) { echo "$dbgtxt\n"; flush(); } /** * @return void * @param contact string * @param x int * @param y int * @param n int * @desc Returns a new monitor window URL */ $agent = @$_SERVER["HTTP_USER_AGENT"] ; if ( !defined("USERAGENT") ) define("USERAGENT",$agent); function popup( $contact, $x=400, $y=300, $n=1 ) { ( USERAGENT ) ? $agent = USERAGENT : $agent = "lynx"; if ( preg_match("/opera/i",$agent) || preg_match("/lynx/i",$agent) || preg_match("/konqueror/i",$agent) ) return $contact; // $link = "javascript:monitor('".$contact."',$x,$y,$n)"; $link = $contact."\" target=\"win".$n."\" onClick=\"monitor('".$contact."',$x,$y,$n); return false"; return $link; } ?> nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/ldap_purge.inc0000644000000000000000000000012411506361170026350 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.688728 30 ctime=1513200663.958795903 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/ldap_purge.inc0000755000175000002070000000176611506361170026432 0ustar00mockbuildmock00000000000000### purged DN:".$curdn."

    \n"; } } $entries["count"] = $storesize; return $entries; } ?> nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/lmtable.inc0000644000000000000000000000012411506361170025646 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.688728 30 ctime=1513200663.959795916 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/lmtable.inc0000755000175000002070000002117711506361170025726 0ustar00mockbuildmock00000000000000color_header = (${$inpnam}["thcolor"]) ? ${$inpnam}["thcolor"] : "#999999"; $this->color_bg = (${$inpnam}["tbcolor"]) ? ${$inpnam}["tbcolor"] : "#f0f0f0"; $this->font_title = (${$inpnam}["thfont"]) ? ${$inpnam}["thfont"] : "color=\"#ffffff\""; $this->font_main = (${$inpnam}["tbfont"]) ? ${$inpnam}["tbfont"] : "color=\"#000000\""; $this->columns = $locset; $this->ncols = 0; echo "color_bg."\">\n"; echo "color_header."\">\n"; $colnr = 0; foreach ( $locset as $colnam => $colwid) { if ( $colnam == "0" || $colnam == "help" ) continue; $this->ncols ++; $colnr++; $value = $colnam; // Specific sorting links for the front module if ( $wintyp == "loadmon" ) { // Keep old arguments, if any, except of order $allargs = ""; foreach ( $_GET as $argm => $argval ) { if ( $argm == "order" ) continue; $allargs .= $argm."=".$argval."&"; } $str1 = "font_title.">".$value.""; if ( $colnr == 1 ) $value = $str1."country".$str2; elseif ( $colnr == 3 ) $value = $str1."cpu".$str2; elseif ( $colnr == 4 ) $value = $str1."grun".$str2; } $width = ($colwid)?$colwid:"1%"; echo "\n"; } echo "\n"; } /** * @return void * @param contents array * @desc Draws a table row */ function addrow( $contents, $bgcol="" ) { if ( count($contents) != $this->ncols ) { $this->adderror("Incompatible data"); return 1; } $this->contents = $contents; if ($bgcol) { echo "\n"; } else { echo "\n"; } foreach ($contents as $colent) { $value = $colent; echo "\n"; } echo "\n"; } /** * @return void * @param color string * @desc Draws a spanning row containing a spacer */ function addspacer( $color="#000000" ) { echo "\n"; echo ""; echo "\n"; } /** * @return void * @param errtxt string * @desc Draws a spanning row containing error message */ function adderror( $errtxt="Error", $bgcol="" ) { $this->errtxt = $errtxt; echo "\n"; echo ""; echo "\n"; } /** * @return void * @param errtxt string * @param nrows integer * @param color string * @desc Adds a cell spanning $nrows rows */ function rowspan( $nrows, $errtxt=" ", $color="#ffffcc" ) { $this->errtxt = $errtxt; $ncols = $this->ncols - 1; $nrows = $nrows + 1; echo "\n"; echo ""; echo ""; echo "\n"; } /** * @return void * @desc Closes a table */ function close() { echo "
    font_title."> $value 
    font_main."> $value 
    ncols."\" bgcolor=\"$color\" height=\"0\">\"\"
    ncols."\""; if ($bgcol) echo " bgcolor=\"$bgcol\""; echo ">font_main."> $errtxt
     $errtxt\"\"
    \n"; ob_end_flush(); ob_implicit_flush(); } } class LmTableSp extends LmTable { var $spcolor; /** * @return void * @param contents array * @param color string * @desc Draws a table row with a spacer above */ function addrow( $contents, $bgcol="", $color="#ffffff" ) { $ncols = count($contents); $this->contents = $contents; if ($bgcol) { echo "\n"; } else { echo "\n"; } foreach ($contents as $colent) { $value = $colent; echo "font_main."> $value \n"; } echo "\n"; echo "\n"; echo "\"\""; echo "\n"; } /** * @return void * @param errtxt string * @param color string * @desc Draws a spanning row containing error message */ function adderror( $errtxt="Error", $color="#ffffff", $bgcol="" ) { $this->errtxt = $errtxt; $ncols = $this->ncols; $tospan = $this->rowspan; if ( $tospan ) $ncols = $ncols - 1; echo "\n"; echo "\"\""; echo "\n"; echo "\n"; echo "ncols."\""; if ($bgcol) echo " bgcolor=\"$bgcol\""; echo ">font_main."> $errtxt"; echo "\n"; } /** * @return void * @param errtxt string * @param nrows integer * @param color string * @desc Adds a cell spanning $nrows rows */ function rowspan( $nrows, $errtxt=" ", $color="#ffffcc" ) { $this->errtxt = $errtxt; $ncols = $this->ncols - 1; $nrows = (2 * $nrows) + 1; echo "\n"; echo " $errtxt"; echo "\"\""; echo "\n"; } } class LmTableFree extends LmTableSp { /** * @return LmTableFree * @param headers array * @desc Starts an HTML table */ function LmTableFree( $headers ) { ob_implicit_flush(0); ob_start(); $this->color_header = "#666666"; $this->color_bg = "#f0f0f0"; $this->font_title = "color=\"#ffffff\""; $this->font_main = "color=\"#000000\""; $this->columns = count($headers); $this->ncols = 0; echo "color_bg."\">\n"; echo "color_header."\">\n"; foreach ( $headers as $colnam ) { $this->ncols ++; $value = $colnam; $width = "1%"; echo "\n"; } echo "\n"; } } ?>nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/postcode.inc0000644000000000000000000000012311506361170026045 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.686728 29 ctime=1513200663.96179594 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/postcode.inc0000755000175000002070000000560411506361170026123 0ustar00mockbuildmock00000000000000$tout sec t/o"; if ( !$record ) continue; $nrecords = $record["count"]; /* should be 1 */ for ($m = 0; $m < $nrecords; $m++) { $curcod = $record[$m][CLU_ZIPC][0]; if ( $curcod ) $cllist[$idx]["zvoname"] = cnvvo($curcod,$curnam); } } return($cllist); } /** * @return string * @param curnam string * @desc Guesses geographical location of a cluster */ function guess_country($curnam, $zip) { // Dumb domain name guess by 2 last letters $zvoname = cnvvo("",$curnam); // overwrite the previous decision if country code is set in the postal code if ( $zip ) $zvoname = cnvvo($zip,$curnam); return $zvoname; } ?>nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/mylo.inc0000644000000000000000000000012411506361170025206 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.688728 30 ctime=1513200663.960795928 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/mylo.inc0000755000175000002070000000124011506361170025253 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/cnvname.inc0000644000000000000000000000012411506361170025655 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.690728 30 ctime=1513200663.953795842 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/cnvname.inc0000755000175000002070000000401111506361170025721 0ustar00mockbuildmock00000000000000 1 && $family != "Doe") { /* catch for the tutorials */ $doestr = substr($family,1,1); /* returns "1" if it is a number, or a letter if it's a name */ if ( preg_match("/[0-9]/",$doestr) ) { $number = array_pop($names); $family = end($names); } // $family = substr(strrchr($uname, " "), 1); $name = $cn{0}."."; /* First letter of the name (doesn't work with 8-bit strings) */ if ( $flag == 2 ) $name = $names[0]; eval("\$name = \"$name\";"); $family = $name." ".$family; } else { $family = $cn; } if ( !$family ) return $uname /* Give up */; return $family; } /** * @return string * @param uname string * @desc Takes user DN and attempts to extract her affiliation */ function getorg ( $uname ) { $uname = trim($uname); $pieces = explode("/L=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/DC=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/OU=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/O=", $uname); $org = end($pieces); $tailpos = strpos($org, "/"); if ( $tailpos ) $org = substr($org,0,$tailpos); return $org; } ?>nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/settings.inc0000644000000000000000000000012411506361170026066 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.683728 30 ctime=1513200663.963795965 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/settings.inc0000755000175000002070000002636611506361170026153 0ustar00mockbuildmock00000000000000 "index1.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index2.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index3.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index4.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid")*/ //*** A country-level GIIS example, use as many as you wish to monitor: //, //array("host" => "f9pc18.ijs.si", // "port" => "2135", // "base" => "mds-vo-name=Slovenia,o=grid", // "vo" => "Slovenia") //*** A single site GRIS example, use as many as you wish to monitor: //, //array("host" => "gridmaster.pzr.uni-rostock.de", // "port" => "2135", // "base" => "nordugrid-cluster-name=gridmaster.pzr.uni-rostock.de,mds-vo-name=local,o=grid", // "vo" => "Germany") ); // list of available ISISs $isislist = array( "https://knowarc2.grid.niif.hu:50000/isis_unavailable", "https://knowarc2.grid.niif.hu:50000/isis" ); // place of the certificate file $cert = "/var/www/ws-monitor/gridmonitor_user.pem"; // base DNs for searches: local (GRIS), global (GIIS), VO if ( !defined("DN_LOCAL") ) define("DN_LOCAL","mds-vo-name=local,o=grid"); if ( !defined("DN_GLOBL") ) define("DN_GLOBL","mds-vo-name=NorduGrid,o=grid"); if ( !defined("DN_VIORG") ) define("DN_VIORG","dc=nordugrid,dc=org"); if ( !defined("DN_PEOPL") ) define("DN_PEOPL","ou=people,dc=nordugrid,dc=org"); if ( !defined("DN_GUEST") ) define("DN_GUEST","ou=guests,dc=nordugrid,dc=org"); if ( !defined("DN_TUTOR") ) define("DN_TUTOR","ou=tutorial,dc=nordugrid,dc=org"); if ( !defined("DN_SERVS") ) define("DN_SERVS","ou=services,dc=nordugrid,dc=org"); if ( !defined("DN_RECAT") ) define("DN_RECAT","rc=NorduGrid,dc=nordugrid,dc=org"); // Information system classes and attributes namespace prefix, "nordugrid" if ( !defined("IS_PREFX") ) define("IS_PREFX","nordugrid"); // Cache location (use ../htdata when installing directly in ./htdocs) if ( !defined("CACHE_LOCATION") ) define("CACHE_LOCATION","cache"); // Extra title to be added to "Grid Monitor" (e.g. My Favorite) if ( !defined("EXTRA_TITLE") ) define("EXTRA_TITLE",""); //========================================================================= // =================== no need to change things below ===================== //========================================================================= // objectclasses if ( !defined("OBJ_CLUS") ) define("OBJ_CLUS",IS_PREFX."-cluster"); if ( !defined("OBJ_STEL") ) define("OBJ_STEL",IS_PREFX."-se"); if ( !defined("OBJ_QUEU") ) define("OBJ_QUEU",IS_PREFX."-queue"); if ( !defined("OBJ_AJOB") ) define("OBJ_AJOB",IS_PREFX."-job"); if ( !defined("OBJ_USER") ) define("OBJ_USER",IS_PREFX."-authuser"); if ( !defined("OBJ_PERS") ) define("OBJ_PERS","organizationalPerson"); if ( !defined("OBJ_RCOL") ) define("OBJ_RCOL","GlobusReplicaLogicalCollection"); /* RC Logical Collection object */ if ( !defined("OBJ_RFIL") ) define("OBJ_RFIL","GlobusReplicaLogicalFile"); /* RC Logical File object */ if ( !defined("OBJ_RFIN") ) define("OBJ_RFIN","GlobusReplicaFileInfo"); /* RC File Info object */ if ( !defined("OBJ_RSEL") ) define("OBJ_RSEL","GlobusReplicaInfo"); /* RC Info object */ // attributes if ( !defined("CLU_NAME") ) define("CLU_NAME",IS_PREFX."-cluster-name"); if ( !defined("CLU_ANAM") ) define("CLU_ANAM","Name"); if ( !defined("CLU_ZIPC") ) define("CLU_ZIPC","Location"); if ( !defined("CLU_TCPU") ) define("CLU_TCPU","TotalLogicalCPUs"); if ( !defined("CLU_UCPU") ) define("CLU_UCPU","usedcpus"); if ( !defined("CLU_TJOB") ) define("CLU_TJOB","TotalJobs"); if ( !defined("CLU_QJOB") ) define("CLU_QJOB",IS_PREFX."-cluster-queuedjobs"); /* deprecated since 0.5.38 */ if ( !defined("CLU_OWNR") ) define("CLU_OWNR",IS_PREFX."-cluster-owner"); if ( !defined("CLU_SUPP") ) define("CLU_SUPP",IS_PREFX."-cluster-support"); if ( !defined("CLU_PQUE") ) define("CLU_PQUE","PreLRMSWaitingJobs"); /* new since 0.5.38 */ if ( !defined("SEL_NAME") ) define("SEL_NAME",IS_PREFX."-se-name"); if ( !defined("SEL_BURL") ) define("SEL_BURL",IS_PREFX."-se-baseurl"); /* gone since 0.5.26 */ if ( !defined("SEL_CURL") ) define("SEL_CURL",IS_PREFX."-se-url"); /* in place since 0.5.26 */ if ( !defined("SEL_ANAM") ) define("SEL_ANAM",IS_PREFX."-se-aliasname"); if ( !defined("SEL_TYPE") ) define("SEL_TYPE",IS_PREFX."-se-type"); if ( !defined("SEL_FREE") ) define("SEL_FREE",IS_PREFX."-se-freespace"); if ( !defined("SEL_TOTA") ) define("SEL_TOTA",IS_PREFX."-se-totalspace"); if ( !defined("SEL_USER") ) define("SEL_USER",IS_PREFX."-se-authuser"); if ( !defined("QUE_NAME") ) define("QUE_NAME","Name"); if ( !defined("QUE_STAT") ) define("QUE_STAT","ServingState"); if ( !defined("QUE_RUNG") ) define("QUE_RUNG","RunningJobs"); if ( !defined("QUE_GRUN") ) define("QUE_GRUN","gridrunning"); if ( !defined("QUE_MAXR") ) define("QUE_MAXR",IS_PREFX."-queue-maxrunning"); if ( !defined("QUE_QUED") ) define("QUE_QUED",IS_PREFX."-queue-queued"); /* deprecated since 0.5.38 */ if ( !defined("QUE_LQUE") ) define("QUE_LQUE","LocalWaitingJobs"); /* new since 0.5.38 */ if ( !defined("QUE_PQUE") ) define("QUE_PQUE","PreLRMSWaitingJobs"); /* new since 0.5.38 */ if ( !defined("QUE_GQUE") ) define("QUE_GQUE","WaitingJobs"); if ( !defined("QUE_MAXQ") ) define("QUE_MAXQ",IS_PREFX."-queue-maxqueuable"); if ( !defined("QUE_ASCP") ) define("QUE_ASCP","MaxRunningJobs"); if ( !defined("QUE_MINT") ) define("QUE_MINT","MinCPUTime"); if ( !defined("QUE_MAXT") ) define("QUE_MAXT","MaxTotalCPUTime"); if ( !defined("JOB_GLID") ) define("JOB_GLID",IS_PREFX."-job-globalid"); if ( !defined("JOB_NAME") ) define("JOB_NAME","Name"); if ( !defined("JOB_STAT") ) define("JOB_STAT","State"); if ( !defined("JOB_EQUE") ) define("JOB_EQUE","Queue"); if ( !defined("JOB_ECLU") ) define("JOB_ECLU",IS_PREFX."-job-execcluster"); if ( !defined("JOB_GOWN") ) define("JOB_GOWN","Owner"); if ( !defined("JOB_USET") ) define("JOB_USET","UsedTotalCPUTime"); if ( !defined("JOB_USEM") ) define("JOB_USEM","UsedMainMemory"); if ( !defined("JOB_SUBM") ) define("JOB_SUBM",IS_PREFX."-job-submissiontime"); if ( !defined("JOB_COMP") ) define("JOB_COMP","EndTime"); if ( !defined("JOB_ERRS") ) define("JOB_ERRS","OtherMessages"); if ( !defined("JOB_CPUS") ) define("JOB_CPUS","RequestedSlots"); if ( !defined("USR_NAME") ) define("USR_NAME",IS_PREFX."-authuser-name"); if ( !defined("USR_USSN") ) define("USR_USSN",IS_PREFX."-authuser-sn"); if ( !defined("USR_CPUS") ) define("USR_CPUS","-authuser-freecpus"); if ( !defined("USR_QUEU") ) define("USR_QUEU",IS_PREFX."-authuser-queuelength"); if ( !defined("USR_DISK") ) define("USR_DISK","diskspace"); if ( !defined("VO_USCN" ) ) define("VO_USCN" ,"cn"); if ( !defined("VO_USSN" ) ) define("VO_USSN" ,"sn"); if ( !defined("VO_DESC" ) ) define("VO_DESC" ,"description"); if ( !defined("VO_MAIL" ) ) define("VO_MAIL" ,"mail"); if ( !defined("VO_INST" ) ) define("VO_INST" ,"o"); //************************************* Grid Monitor top window style ****************************** $def_loadmon = array( "refresh" => 120, "bgcolor" => "#ffffff", "thcolor" => "#005659", "lcolor" => "#005659", "tbcolor" => "#ffecb5", "thfont" => "face=\"sans-serif\" color=#ffffff", "tbfont" => "face=\"sans-serif\"" ); //************************************* Cluster description style ********************************** $def_clusdes = array ( //"title" => $theaders["clusdes"][0], "refresh" => 600, "bgcolor" => "#ffcc33", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //*************************************** Job statistics style ************************************* $def_jobstat = array ( "refresh" => 600, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //******************************************* VO list style *************************************** $def_volist = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#ffff00", "tbcolor" => "#cc0033", "thfont" => "face=\"sans-serif\" color=\"#993300\"", "tbfont" => "face=\"sans-serif\" color=\"#ffffff\"" ); //***************************************** VO user base style ************************************* $def_vousers = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#ffcccc", "tbcolor" => "#000099", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#ffffff\"" ); //***************************************** User job list style ************************************ $def_userlist = array( "refresh" => 0, "bgcolor" => "#ffffcc", "thcolor" => "#ffcc33", "lcolor" => "#000099", "tbcolor" => "#ffffff", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); $def_userres = array( "thcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //**************************************** Attribute list style ************************************ $def_attlist = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ccffff", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //****************************************** Queue job list style ********************************** $def_quelist = array( "refresh" => 300, "bgcolor" => "#ffffff", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //******************************************* SE info style *************************************** $def_sestat = array( "refresh" => 300, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#003300", "tbcolor" => "#CCCC99", "thfont" => "face=\"sans-serif\" color=\"#990000\"", "tbfont" => "face=\"sans-serif\" color=\"#000000\"" ); //******************************************* Users info style *************************************** $def_allusers = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#339966", "lcolor" => "#003300", "tbcolor" => "#ccffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000000\"" ); //***************************** LDAP parameters dump style - no need to modify ********************* $def_ldapdump = array( "thcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); ?> nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/toreload.inc0000644000000000000000000000012411506361170026037 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.686728 30 ctime=1513200663.964795977 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/toreload.inc0000755000175000002070000000060611506361170026111 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/cache.inc0000644000000000000000000000012411506361170025271 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.685728 30 ctime=1513200663.951795818 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/cache.inc0000755000175000002070000000255311506361170025346 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/cnvalias.inc0000644000000000000000000000012311506361170026025 xustar000000000000000027 mtime=1293542008.286757 27 atime=1513200576.683728 29 ctime=1513200663.95279583 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/cnvalias.inc0000755000175000002070000000302611506361170026077 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ws-monitor/includes/PaxHeaders.7502/recursive_giis_info.inc0000644000000000000000000000012412223274207030264 xustar000000000000000027 mtime=1380808839.583104 27 atime=1513200576.691728 30 ctime=1513200663.962795952 nordugrid-arc-5.4.2/src/services/ws-monitor/includes/recursive_giis_info.inc0000755000175000002070000001220012223274207030327 0ustar00mockbuildmock00000000000000 "grid.nbi.dk", * "port" => "2135", * "base" => "mds-vo-name=NorduGrid,o=grid"),...) */ $loopcnt++; $tlim = 2; $tout = 5; if($debug && count($giislist) < 5) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); $greg = array(); $gfilter = "(objectclass=mds*)"; $ngiis = count($giislist); $counter = 0; $tag = array(); $dsarray = array(); $dnarray = array(); $hnarray = array(); $entries = array(); $blacklist = array(); if ( file_exists("blacklist.inc") ) include('blacklist.inc'); // Loop on entered sites // If a host is blacklisted, skip // If a host is a cluster (GRIS), keep and skip // If a host is any other local GRIS, skip for ( $ig = 0; $ig < $ngiis; $ig++ ) { $ghost = $giislist[$ig]["host"]; if ( @$blacklist[$ghost] ) continue; $gport = $giislist[$ig]["port"]; $gbase = $giislist[$ig]["base"]; if ( preg_match("/$element/i",$gbase) ) { // Invent a "fake DN" for host tagging and skip duplicated entries $fakedn = "hn=".$ghost.", ".$gbase; if ( @$tag[$fakedn] ) continue; $tag[$fakedn] = 1; array_push($entries,$giislist[$ig]); $counter++; continue; } elseif ( preg_match("/local/i",$gbase) ) { if ( $debug ) dbgmsg("
    ".$errors["115"].$ghost." (".$errors["116"].$element.")"); continue; } if ( $debug == 2 ) dbgmsg("
    ".$errors["117"]."$ghost..."); $gconn = ldap_connect($ghost,$gport); if ( $gconn ) { if ( $debug == 2 ) dbgmsg($errors["118"]); array_push($dsarray,$gconn); array_push($dnarray,$gbase); array_push($hnarray,$ghost); } if ( $debug == 2 ) dbgmsg("
    "); } // Some debugging printout if ( $debug == 2 ) { dbgmsg("

    ".$errors["119"].$element.": ".$counter."
    "); foreach ( $entries as $num=>$val ) dbgmsg($val["host"].":".$val["base"]."
    "); } // Check if there is underlying structure $srarray = @ldap_read($dsarray,$dnarray,$gfilter,$greg,0,0,$tlim,LDAP_DEREF_NEVER); // Fall back to a conventional LDAP // if (!count($srarray)) $srarray = @ldap_read($dsarray,$dnarray,$gfilter,$greg,0,0,$tlim,LDAP_DEREF_NEVER); // Debug: check if something eventualy timeouts or something if ( $debug ) { $nconns = count($dsarray); for ( $ii = 0; $ii < $nconns; $ii++ ) { $ldconn = $dsarray[$ii]; $hnconn = $hnarray[$ii]; if ( ldap_errno($ldconn) != 0x00 ) { $ldaperrmess = ldap_error($ldconn); dbgmsg("".$errors["120"].$hnconn.": ".$ldaperrmess."
    "); } } } $nhosts = count($srarray); // If GIISes are found, loop on contacted GIISes if ( $nhosts ) { $truecount = 0; for( $ids = 0; $ids < $nhosts; $ids++ ) { // suppose N hosts answered (nhosts), each returned M lower registrants (nrecords) // some of lower registrants are the same and have to be purged // and everything should be re-arranged in a new common array $sr = $srarray[$ids]; $ds = $dsarray[$ids]; $base = $dnarray[$ids]; if ($sr) $truecount++; $record = @ldap_get_entries($ds,$sr); $nrecords = $record["count"]; // Per each contacted GIIS, loop on potential lower-level GIISes/clusters for ($i = 0; $i < $nrecords; $i++) { $curdn = $record[$i]["dn"]; $curhost = $record[$i]["mds-service-hn"][0]; $curstat = $record[$i]["mds-reg-status"][0]; $curport = $record[$i]["mds-service-port"][0]; $cursuff = $record[$i]["mds-service-ldap-suffix"][0]; // Introduce "fake" DN for tagging purpose - helps skipping sites registering twice $fakedn = "hn=".$curhost.", ".$cursuff; if ( @$tag[$fakedn] ) continue; if ( $curstat != "VALID" ) { if ( $debug ) dbgmsg("
    $curstat".$errors["121"]."$fakedn
    \n"); continue; } if ( @$blacklist[$curhost] ) { if ( $debug ) dbgmsg("
    ".$errors["122"]."$curhost
    \n"); continue; } // array_push($entries,$record[$i]); $entries[$counter]["host"] = $curhost; $entries[$counter]["port"] = $curport; $entries[$counter]["base"] = $cursuff; if ( $debug == 2 ) dbgmsg("
    ".$errors["123"]."$base: $fakedn
    \n"); $tag[$fakedn] = 1; $counter++; } } // Array $entries contains all possible stuff which registers to a GIIS // Keep recursing if ($truecount && $loopcnt < 10 ) $entries = recursive_giis_info($entries,$element,$errors,$debug,$loopcnt); } return $entries; } ?>nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/wrappers0000644000000000000000000000013013214316030021363 xustar000000000000000029 mtime=1513200664.03779687 30 atime=1513200668.717854109 29 ctime=1513200664.03779687 nordugrid-arc-5.4.2/src/services/wrappers/0000755000175000002070000000000013214316030021510 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/wrappers/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611542111161023501 xustar000000000000000026 mtime=1300796017.30964 30 atime=1513200605.006074889 30 ctime=1513200664.033796821 nordugrid-arc-5.4.2/src/services/wrappers/Makefile.am0000644000175000002070000000031511542111161023543 0ustar00mockbuildmock00000000000000if JAVA_ENABLED JAVA_WRAPPER = java else JAVA_WRAPPER = endif if PYTHON_SERVICE PYTHON_WRAPPER = python else PYTHON_WRAPPER = endif SUBDIRS = $(JAVA_WRAPPER) $(PYTHON_WRAPPER) DIST_SUBDIRS = java python nordugrid-arc-5.4.2/src/services/wrappers/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735023522 xustar000000000000000030 mtime=1513200605.039075292 30 atime=1513200651.805647266 30 ctime=1513200664.034796833 nordugrid-arc-5.4.2/src/services/wrappers/Makefile.in0000644000175000002070000005600413214315735023575 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/wrappers DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @JAVA_ENABLED_FALSE@JAVA_WRAPPER = @JAVA_ENABLED_TRUE@JAVA_WRAPPER = java @PYTHON_SERVICE_FALSE@PYTHON_WRAPPER = @PYTHON_SERVICE_TRUE@PYTHON_WRAPPER = python SUBDIRS = $(JAVA_WRAPPER) $(PYTHON_WRAPPER) DIST_SUBDIRS = java python all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/wrappers/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/wrappers/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/wrappers/PaxHeaders.7502/java0000644000000000000000000000013213214316030022306 xustar000000000000000030 mtime=1513200664.067797237 30 atime=1513200668.717854109 30 ctime=1513200664.067797237 nordugrid-arc-5.4.2/src/services/wrappers/java/0000755000175000002070000000000013214316030022431 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/wrappers/java/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024433 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200605.055075488 30 ctime=1513200664.061797163 nordugrid-arc-5.4.2/src/services/wrappers/java/Makefile.am0000644000175000002070000000113012052416515024470 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libjavaservice.la libjavaservice_la_SOURCES = javawrapper.cpp javawrapper.h libjavaservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(JDK_CFLAGS) \ -fno-strict-aliasing $(AM_CXXFLAGS) libjavaservice_la_LDFLAGS = -no-undefined -avoid-version -module libjavaservice_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/services/wrappers/java/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735024443 xustar000000000000000030 mtime=1513200605.103076075 30 atime=1513200651.823647486 30 ctime=1513200664.062797175 nordugrid-arc-5.4.2/src/services/wrappers/java/Makefile.in0000644000175000002070000007472613214315735024531 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/wrappers/java DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libjavaservice_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libjavaservice_la_OBJECTS = libjavaservice_la-javawrapper.lo libjavaservice_la_OBJECTS = $(am_libjavaservice_la_OBJECTS) libjavaservice_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libjavaservice_la_CXXFLAGS) $(CXXFLAGS) \ $(libjavaservice_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libjavaservice_la_SOURCES) DIST_SOURCES = $(libjavaservice_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema pkglib_LTLIBRARIES = libjavaservice.la libjavaservice_la_SOURCES = javawrapper.cpp javawrapper.h libjavaservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(JDK_CFLAGS) \ -fno-strict-aliasing $(AM_CXXFLAGS) libjavaservice_la_LDFLAGS = -no-undefined -avoid-version -module libjavaservice_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/wrappers/java/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/wrappers/java/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libjavaservice.la: $(libjavaservice_la_OBJECTS) $(libjavaservice_la_DEPENDENCIES) $(libjavaservice_la_LINK) -rpath $(pkglibdir) $(libjavaservice_la_OBJECTS) $(libjavaservice_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjavaservice_la-javawrapper.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libjavaservice_la-javawrapper.lo: javawrapper.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjavaservice_la_CXXFLAGS) $(CXXFLAGS) -MT libjavaservice_la-javawrapper.lo -MD -MP -MF $(DEPDIR)/libjavaservice_la-javawrapper.Tpo -c -o libjavaservice_la-javawrapper.lo `test -f 'javawrapper.cpp' || echo '$(srcdir)/'`javawrapper.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjavaservice_la-javawrapper.Tpo $(DEPDIR)/libjavaservice_la-javawrapper.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='javawrapper.cpp' object='libjavaservice_la-javawrapper.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjavaservice_la_CXXFLAGS) $(CXXFLAGS) -c -o libjavaservice_la-javawrapper.lo `test -f 'javawrapper.cpp' || echo '$(srcdir)/'`javawrapper.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/wrappers/java/PaxHeaders.7502/javawrapper.cpp0000644000000000000000000000012412675602216025430 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.608715 30 ctime=1513200664.063797187 nordugrid-arc-5.4.2/src/services/wrappers/java/javawrapper.cpp0000644000175000002070000002024412675602216025477 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "javawrapper.h" static Arc::Plugin* get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; return new Arc::Service_JavaWrapper((Arc::Config*)(*srvarg),arg); } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "arcservice_javawrapper", "HED:SERVICE", NULL, 0, &get_service }, { NULL, NULL, NULL, 0, NULL } }; namespace Arc { Arc::Logger Service_JavaWrapper::logger(Service::logger, "JavaWrapper"); Service_JavaWrapper::Service_JavaWrapper(Arc::Config *cfg, Arc::PluginArgument* parg) : Service(cfg,parg), libjvm(NULL), jvm(NULL), classPath(NULL) { std::string path = "-Djava.class.path=" + (std::string)((*cfg)["ClassPath"]); std::string class_name = (std::string)(*cfg)["ClassName"]; logger.msg(Arc::VERBOSE, "config: %s, class name: %s", path, class_name); JNIEnv *jenv = NULL; JavaVMInitArgs jvm_args; JavaVMOption options[1]; /* Initiliaze Java engine */ Glib::ModuleFlags flags = Glib::ModuleFlags(0); libjvm = new Glib::Module("libjvm.so", flags); if (!*libjvm) { logger.msg(Arc::ERROR, "libjvm.so not loadable - check your LD_LIBRARY_PATH"); return; } void* myJNI_GetDefaultJavaVMInitArgs; libjvm->get_symbol("JNI_GetDefaultJavaVMInitArgs", myJNI_GetDefaultJavaVMInitArgs); void* myJNI_CreateJavaVM; libjvm->get_symbol("JNI_CreateJavaVM", myJNI_CreateJavaVM); if (myJNI_GetDefaultJavaVMInitArgs == NULL || myJNI_CreateJavaVM == NULL) { logger.msg(Arc::ERROR, "libjvm.so does not contain the expected symbols"); return; } ((jint(*)(void*))myJNI_GetDefaultJavaVMInitArgs)(&jvm_args); jvm_args.version = JNI_VERSION_1_2; jvm_args.nOptions = 1; classPath = strdup(path.c_str()); options[0].optionString = classPath; options[0].extraInfo = NULL; // "-Djava.class.path=.:/home/szferi/arc1/src/services/echo_java/:/home/szferi/arc1/java/arc.jar"; jvm_args.options = options; jvm_args.ignoreUnrecognized = JNI_FALSE; ((jint(*)(JavaVM**, void**, void*))myJNI_CreateJavaVM)(&jvm, (void **)&jenv, &jvm_args); logger.msg(Arc::VERBOSE, "JVM started"); /* Find and construct class */ serviceClass = jenv->FindClass(class_name.c_str()); if (serviceClass == NULL) { logger.msg(Arc::ERROR, "There is no service: %s in your Java class search path", class_name); if (jenv->ExceptionOccurred()) { jenv->ExceptionDescribe(); } return; } jmethodID constructorID = jenv->GetMethodID(serviceClass, "", "()V"); if (constructorID == NULL) { logger.msg(Arc::ERROR, "There is no constructor function"); if (jenv->ExceptionOccurred()) { jenv->ExceptionDescribe(); } return; } serviceObj = jenv->NewObject(serviceClass, constructorID); logger.msg(Arc::VERBOSE, "%s constructed", class_name); } Service_JavaWrapper::~Service_JavaWrapper(void) { logger.msg(Arc::VERBOSE, "Destroy JVM"); if (jvm) jvm->DestroyJavaVM(); if (libjvm) delete libjvm; if (classPath) free(classPath); } Arc::MCC_Status Service_JavaWrapper::make_fault(Arc::Message& outmsg) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(Arc::NS(),true); Arc::SOAPFault* fault = outpayload->Fault(); if(fault) { fault->Code(Arc::SOAPFault::Sender); fault->Reason("Failed processing request"); }; outmsg.Payload(outpayload); return Arc::MCC_Status(); } Arc::MCC_Status Service_JavaWrapper::java_error(JNIEnv *jenv, const char *str) { std::cerr << str << std::endl; if (jenv->ExceptionOccurred()) { jenv->ExceptionDescribe(); } /* Cleanup */ jvm->DetachCurrentThread(); return Arc::MCC_Status(Arc::GENERIC_ERROR); } Arc::MCC_Status Service_JavaWrapper::process(Arc::Message& inmsg, Arc::Message& outmsg) { JNIEnv *jenv = NULL; /* Attach to the current Java engine thread */ jvm->AttachCurrentThread((void **)&jenv, NULL); /* Get the process function of service */ jmethodID processID = jenv->GetMethodID(serviceClass, "process", "(Lnordugrid/arc/SOAPMessage;Lnordugrid/arc/SOAPMessage;)Lnordugrid/arc/MCC_Status;"); if (processID == NULL) { return java_error(jenv, "Cannot find process method of Java class"); } /* convert inmsg and outmsg to Java objects */ Arc::SOAPMessage *inmsg_ptr = NULL; Arc::SOAPMessage *outmsg_ptr = NULL; try { inmsg_ptr = new Arc::SOAPMessage(inmsg); outmsg_ptr = new Arc::SOAPMessage(outmsg); } catch(std::exception& e) { }; if(!inmsg_ptr) { logger.msg(Arc::ERROR, "input is not SOAP"); return make_fault(outmsg); }; if(!outmsg_ptr) { logger.msg(Arc::ERROR, "output is not SOAP"); return make_fault(outmsg); }; jclass JSOAPMessageClass = jenv->FindClass("nordugrid/arc/SOAPMessage"); if (JSOAPMessageClass == NULL) { return java_error(jenv, "Cannot find SOAPMessage object"); } /* Get the constructor of Java object */ jmethodID constructorID = jenv->GetMethodID(JSOAPMessageClass, "", "(I)V"); if (constructorID == NULL) { return java_error(jenv, "Cannot find constructor function of message"); } /* Convert C++ object to Java objects */ jobject jinmsg = jenv->NewObject(JSOAPMessageClass, constructorID, (jlong)((long int)inmsg_ptr)); if (jinmsg == NULL) { return java_error(jenv, "Cannot convert input message to Java object"); } jobject joutmsg = jenv->NewObject(JSOAPMessageClass, constructorID, (jlong)((long int)outmsg_ptr)); if (jinmsg == NULL) { return java_error(jenv, "Cannot convert output message to Java object"); } /* Create arguments for Java process function */ jvalue args[2]; args[0].l = jinmsg; args[1].l = joutmsg; /* Call the process method of Java object */ jobject jmcc_status = jenv->CallObjectMethodA(serviceObj, processID, args); if (jmcc_status == NULL) { return java_error(jenv, "Error in call process function of Java object"); } /* Get SWIG specific getCPtr function of Message class */ jmethodID msg_getCPtrID = jenv->GetStaticMethodID(JSOAPMessageClass, "getCPtr", "(Lnordugrid/arc/SOAPMessage;)J"); if (msg_getCPtrID == NULL) { return java_error(jenv, "Cannot find getCPtr method of Java Message class"); } /* Get Java MCC_Status class */ jclass JMCC_StatusClass = jenv->FindClass("nordugrid/arc/MCC_Status"); if (JMCC_StatusClass == NULL) { logger.msg(Arc::ERROR, "Cannot find MCC_Status object"); /* Cleanup */ jvm->DetachCurrentThread(); return Arc::MCC_Status(Arc::GENERIC_ERROR); } /* Get SWIG specific getCPtr function of MCC_Status class */ jmethodID mcc_status_getCPtrID = jenv->GetStaticMethodID(JMCC_StatusClass, "getCPtr", "(Lnordugrid/arc/MCC_Status;)J"); if (mcc_status_getCPtrID == NULL) { return java_error(jenv, "Cannot find getCPtr method of Java MCC_Status class"); } /* Convert Java status object to C++ class */ jlong mcc_status_addr = jenv->CallStaticLongMethod(JMCC_StatusClass, mcc_status_getCPtrID, jmcc_status); if (!mcc_status_addr) { logger.msg(ERROR, "Java object returned NULL status"); return MCC_Status(GENERIC_ERROR); } Arc::MCC_Status status(*((Arc::MCC_Status *)(long)mcc_status_addr)); /* Convert Java output message object to C++ class */ jlong outmsg_addr = jenv->CallStaticLongMethod(JSOAPMessageClass, msg_getCPtrID, joutmsg); Arc::SOAPMessage *outmsg_ptr2 = (Arc::SOAPMessage *)(long)outmsg_addr; /* std::string xml; outmsg_ptr2->Payload()->GetXML(xml); std::cout << xml << std::endl; */ Arc::PayloadSOAP *pl = new Arc::PayloadSOAP(*(outmsg_ptr2->Payload())); outmsg.Payload((MessagePayload *)pl); // XXX: how to handle error? /* Detach from the Java engine */ jvm->DetachCurrentThread(); return status; } } // namespace Arc nordugrid-arc-5.4.2/src/services/wrappers/java/PaxHeaders.7502/javawrapper.h0000644000000000000000000000012211770355631025074 xustar000000000000000027 mtime=1340201881.630982 27 atime=1513200575.607715 28 ctime=1513200664.0647972 nordugrid-arc-5.4.2/src/services/wrappers/java/javawrapper.h0000644000175000002070000000164511770355631025151 0ustar00mockbuildmock00000000000000#ifndef __ARC_SERVICE_JAVA_WRAPPER_H__ #define __ARC_SERVICE_JAVA_WRAPPER_H__ #ifdef HAVE_JNI_H #include #else #ifdef HAVE_JAVAVM_JNI_H #include #endif #endif #include #include namespace Arc { class Service_JavaWrapper: public Arc::Service { protected: Glib::Module *libjvm; JavaVM *jvm; char* classPath; jclass serviceClass; jobject serviceObj; Arc::MCC_Status make_fault(Arc::Message& outmsg); Arc::MCC_Status java_error(JNIEnv *jenv, const char *str); static Arc::Logger logger; public: Service_JavaWrapper(Arc::Config *cfg, Arc::PluginArgument* parg); virtual ~Service_JavaWrapper(void); /** Service request processing routine */ virtual Arc::MCC_Status process(Arc::Message&, Arc::Message&); }; } // namespace Arc #endif // __ARC_SERVICE_JAVA_WRAPPER_H__ nordugrid-arc-5.4.2/src/services/wrappers/java/PaxHeaders.7502/schema0000644000000000000000000000013213214316030023546 xustar000000000000000030 mtime=1513200664.088797493 30 atime=1513200668.717854109 30 ctime=1513200664.088797493 nordugrid-arc-5.4.2/src/services/wrappers/java/schema/0000755000175000002070000000000013214316030023671 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/wrappers/java/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321025666 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200605.120076283 30 ctime=1513200664.086797469 nordugrid-arc-5.4.2/src/services/wrappers/java/schema/Makefile.am0000644000175000002070000000014411255700321025727 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = javawrapper.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/services/wrappers/java/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735025703 xustar000000000000000030 mtime=1513200605.152076674 30 atime=1513200651.837647657 30 ctime=1513200664.087797481 nordugrid-arc-5.4.2/src/services/wrappers/java/schema/Makefile.in0000644000175000002070000004355113214315735025761 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/wrappers/java/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = javawrapper.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/wrappers/java/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/wrappers/java/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/wrappers/java/schema/PaxHeaders.7502/javawrapper.xsd0000644000000000000000000000012411255700321026671 xustar000000000000000027 mtime=1253540049.444682 27 atime=1513200575.608715 30 ctime=1513200664.089797506 nordugrid-arc-5.4.2/src/services/wrappers/java/schema/javawrapper.xsd0000644000175000002070000000146011255700321026737 0ustar00mockbuildmock00000000000000 Defines search path of java classes. Defines the name of the class which containes the service implementation. nordugrid-arc-5.4.2/src/services/wrappers/java/PaxHeaders.7502/README0000644000000000000000000000012411001653037023247 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200575.608715 30 ctime=1513200664.059797139 nordugrid-arc-5.4.2/src/services/wrappers/java/README0000644000175000002070000000005111001653037023310 0ustar00mockbuildmock00000000000000service which wraps java based services. nordugrid-arc-5.4.2/src/services/wrappers/PaxHeaders.7502/README0000644000000000000000000000012411001653037022326 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200575.566714 30 ctime=1513200664.032796808 nordugrid-arc-5.4.2/src/services/wrappers/README0000644000175000002070000000004011001653037022365 0ustar00mockbuildmock00000000000000collection of language bindings nordugrid-arc-5.4.2/src/services/wrappers/PaxHeaders.7502/python0000644000000000000000000000013013214316030022704 xustar000000000000000029 mtime=1513200664.11879786 30 atime=1513200668.717854109 29 ctime=1513200664.11879786 nordugrid-arc-5.4.2/src/services/wrappers/python/0000755000175000002070000000000013214316030023031 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/wrappers/python/PaxHeaders.7502/pythonwrapper.cpp0000644000000000000000000000012412675602216026430 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.612715 30 ctime=1513200664.114797811 nordugrid-arc-5.4.2/src/services/wrappers/python/pythonwrapper.cpp0000644000175000002070000004454512675602216026511 0ustar00mockbuildmock00000000000000// based on: // http://www.codeproject.com/cpp/embedpython_1.asp // http://coding.derkeiler.com/Archive/Python/comp.lang.python/2006-11/msg01211.html #ifdef HAVE_CONFIG_H #include #endif #include "pythonwrapper.h" #include #include #include #include #include #ifndef WIN32 #include #endif #ifdef __cplusplus extern "C" { #endif /* SWIG Specific object SHOULD BE SYNC WITH generated SWIG CODE */ typedef void *(*swig_converter_func)(void *); typedef struct swig_type_info *(*swig_dycast_func)(void **); typedef struct swig_type_info { const char *name; /* mangled name of this type */ const char *str; /* human readable name of this type */ swig_dycast_func dcast; /* dynamic cast function down a hierarchy */ struct swig_cast_info *cast; /* linked list of types that can cast into this type */ void *clientdata; /* language specific type data */ int owndata; /* flag if the structure owns the clientdata */ } swig_type_info; /* Structure to store a type and conversion function used for casting */ typedef struct swig_cast_info { swig_type_info *type; /* pointer to type that is equivalent to this type */ swig_converter_func converter; /* function to cast the void pointers */ struct swig_cast_info *next; /* pointer to next cast in linked list */ struct swig_cast_info *prev; /* pointer to the previous cast */ } swig_cast_info; typedef struct { PyObject_HEAD void *ptr; swig_type_info *ty; int own; PyObject *next; } PySwigObject; #ifdef __cplusplus } #endif void *extract_swig_wrappered_pointer(PyObject *obj) { char this_str[] = "this"; if (!PyObject_HasAttrString(obj, this_str)) { return NULL; } PyObject *thisAttr = PyObject_GetAttrString(obj, this_str); if (thisAttr == NULL) { return NULL; } void* ptr = ((PySwigObject *)thisAttr)->ptr; Py_DECREF(thisAttr); return ptr; } // Thread state of main python interpreter thread static PyThreadState *tstate = NULL; static int python_service_counter = 0; static Glib::Mutex service_lock; Arc::Logger Arc::Service_PythonWrapper::logger(Service::logger, "PythonWrapper"); static Arc::Plugin* get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; Arc::ChainContext* ctx = (Arc::ChainContext*)(*srvarg); #ifndef WIN32 // ((Arc::PluginsFactory*)(*ctx))->load("pythonservice",false,true); // doesn't work, why? ::dlopen(((Arc::PluginsFactory*)(*ctx))->findLocation("pythonservice").c_str(),RTLD_NOW | RTLD_GLOBAL); #endif service_lock.lock(); // Initialize the Python Interpreter if (!Py_IsInitialized()) { Py_InitializeEx(0); // python does not handle signals PyEval_InitThreads(); // Main thread created and lock acquired tstate = PyThreadState_Get(); // Get current thread if(tstate == NULL) { Arc::Logger::getRootLogger().msg(Arc::ERROR, "Failed to initialize main Python thread"); return NULL; } } else { if(tstate == NULL) { Arc::Logger::getRootLogger().msg(Arc::ERROR, "Main Python thread was not initialized"); return NULL; } PyEval_AcquireThread(tstate); } python_service_counter++; Arc::Logger::getRootLogger().msg(Arc::DEBUG, "Loading %u-th Python service", python_service_counter); service_lock.unlock(); Arc::Service* service = new Arc::Service_PythonWrapper((Arc::Config*)(*srvarg),arg); PyEval_ReleaseThread(tstate); // Release current thread Arc::Logger::getRootLogger().msg(Arc::DEBUG, "Initialized %u-th Python service", python_service_counter); return service; } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "pythonservice", "HED:SERVICE", NULL, 0, &get_service }, { NULL, NULL, NULL, 0, NULL } }; namespace Arc { Service_PythonWrapper::Service_PythonWrapper(Arc::Config *cfg,Arc::PluginArgument* parg):Service(cfg,parg), initialized(false) { PyObject *py_module_name = NULL; PyObject *py_arc_module_name = NULL; PyObject *dict = NULL; PyObject *arc_dict = NULL; PyObject *arc_cfg_klass = NULL; PyObject *arg = NULL; PyObject *py_cfg = NULL; PyObject *klass = NULL; arc_module = NULL; module = NULL; object = NULL; inforeg = NULL; if (tstate == NULL) { logger.msg(Arc::ERROR, "Main Python thread is not initialized"); return; } //PyEval_AcquireThread(tstate); std::string path = (std::string)(*cfg)["ClassName"]; std::size_t p = path.rfind("."); if (p == std::string::npos) { logger.msg(Arc::ERROR, "Invalid class name"); return; } std::string module_name = path.substr(0, p); std::string class_name = path.substr(p+1, path.length()); logger.msg(Arc::VERBOSE, "class name: %s", class_name); logger.msg(Arc::VERBOSE, "module name: %s", module_name); // Convert module name to Python string #if PY_MAJOR_VERSION >= 3 py_module_name = PyUnicode_FromString(module_name.c_str()); #else py_module_name = PyString_FromString(module_name.c_str()); #endif if (py_module_name == NULL) { logger.msg(Arc::ERROR, "Cannot convert module name to Python string"); if (PyErr_Occurred()) PyErr_Print(); return; } // Load module module = PyImport_Import(py_module_name); if (module == NULL) { logger.msg(Arc::ERROR, "Cannot import module"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(py_module_name); return; } Py_DECREF(py_module_name); // Import ARC python wrapper #if PY_MAJOR_VERSION >= 3 py_arc_module_name = PyUnicode_FromString("arc"); #else py_arc_module_name = PyString_FromString("arc"); #endif if (py_arc_module_name == NULL) { logger.msg(Arc::ERROR, "Cannot convert ARC module name to Python string"); if (PyErr_Occurred()) PyErr_Print(); return; } // Load arc module arc_module = PyImport_Import(py_arc_module_name); if (arc_module == NULL) { logger.msg(Arc::ERROR, "Cannot import ARC module"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(py_arc_module_name); return; } Py_DECREF(py_arc_module_name); // arc_dict is a borrowed reference arc_dict = PyModule_GetDict(arc_module); if (arc_dict == NULL) { logger.msg(Arc::ERROR, "Cannot get dictionary of ARC module"); if (PyErr_Occurred()) PyErr_Print(); return; } // Get the arc config class // arc_cfg_klass is a borrowed reference arc_cfg_klass = PyDict_GetItemString(arc_dict, "Config"); if (arc_cfg_klass == NULL) { logger.msg(Arc::ERROR, "Cannot find ARC Config class"); if (PyErr_Occurred()) PyErr_Print(); return; } // check is it really a class if (!PyCallable_Check(arc_cfg_klass)) { logger.msg(Arc::ERROR, "Config class is not an object"); return; } // Get dictionary of module content // dict is a borrowed reference dict = PyModule_GetDict(module); if (dict == NULL) { logger.msg(Arc::ERROR, "Cannot get dictionary of module"); if (PyErr_Occurred()) PyErr_Print(); return; } // Get the class // klass is a borrowed reference klass = PyDict_GetItemString(dict, (char*)class_name.c_str()); if (klass == NULL) { logger.msg(Arc::ERROR, "Cannot find service class"); if (PyErr_Occurred()) PyErr_Print(); return; } // check is it really a class if (PyCallable_Check(klass)) { arg = Py_BuildValue("(l)", (long int)cfg); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create config argument"); if (PyErr_Occurred()) PyErr_Print(); return; } py_cfg = PyObject_CallObject(arc_cfg_klass, arg); if (py_cfg == NULL) { logger.msg(Arc::ERROR, "Cannot convert config to Python object"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(arg); return; } Py_DECREF(arg); arg = Py_BuildValue("(O)", py_cfg); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create argument of the constructor"); if (PyErr_Occurred()) PyErr_Print(); return; } // create instance of class object = PyObject_CallObject(klass, arg); if (object == NULL) { logger.msg(Arc::ERROR, "Cannot create instance of Python class"); if (PyErr_Occurred()) PyErr_Print(); return; } Py_DECREF(arg); } else { logger.msg(Arc::ERROR, "%s is not an object", class_name); return; } // check is it really a class if (!PyCallable_Check(klass)) { logger.msg(Arc::ERROR, "Message class is not an object"); return; } //tstate = PyGILState_GetThisThreadState(); //PyEval_ReleaseThread(tstate); inforeg = new InfoRegisters(*cfg, this); logger.msg(Arc::VERBOSE, "Python Wrapper constructor succeeded"); initialized = true; } Service_PythonWrapper::~Service_PythonWrapper(void) { if(inforeg) delete inforeg; service_lock.lock(); PyEval_AcquireThread(tstate); // Release python objects - it is needed for Python // destructors to be called if(arc_module) { Py_DECREF(arc_module); } if(module) { Py_DECREF(module); } if(object) { Py_DECREF(object); } // Finish the Python Interpreter python_service_counter--; logger.msg(Arc::VERBOSE, "Python Wrapper destructor (%d)", python_service_counter); if (python_service_counter == 0) { Py_Finalize(); } else { PyEval_ReleaseThread(tstate); } service_lock.unlock(); } Arc::MCC_Status Service_PythonWrapper::make_fault(Arc::Message& outmsg) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(Arc::NS(),true); Arc::SOAPFault* fault = outpayload->Fault(); if(fault) { fault->Code(Arc::SOAPFault::Sender); fault->Reason("Failed processing request"); }; outmsg.Payload(outpayload); return Arc::MCC_Status(); } /* Arc::MCC_Status Service_PythonWrapper::python_error(const char *str) { return Arc::MCC_Status(Arc::GENERIC_ERROR); }*/ class PythonLock { private: PyGILState_STATE gstate_; Arc::Logger& logger_; public: PythonLock(Arc::Logger& logger):logger_(logger) { gstate_ = PyGILState_Ensure(); logger_.msg(Arc::VERBOSE, "Python interpreter locked"); }; ~PythonLock(void) { PyGILState_Release(gstate_); logger_.msg(Arc::VERBOSE, "Python interpreter released"); }; }; class XMLNodeP { private: Arc::XMLNode* obj_; public: XMLNodeP(Arc::XMLNode& node):obj_(NULL) { try { obj_ = new Arc::XMLNode(node); } catch(std::exception& e) { }; }; ~XMLNodeP(void) { if(obj_) delete obj_; }; XMLNode& operator*(void) const { return *obj_; }; XMLNode* operator->(void) const { return obj_; }; operator bool(void) { return (obj_ != NULL); }; bool operator!(void) { return (obj_ == NULL); }; operator long int(void) { return (long int)obj_; }; }; class SOAPMessageP { private: Arc::SOAPMessage* obj_; public: SOAPMessageP(Arc::Message& msg):obj_(NULL) { try { obj_ = new Arc::SOAPMessage(msg); } catch(std::exception& e) { }; }; ~SOAPMessageP(void) { if(obj_) delete obj_; }; SOAPMessage& operator*(void) const { return *obj_; }; SOAPMessage* operator->(void) const { return obj_; }; operator bool(void) { return (obj_ != NULL); }; bool operator!(void) { return (obj_ == NULL); }; operator long int(void) { return (long int)obj_; }; }; class PyObjectP { private: PyObject* obj_; public: PyObjectP(PyObject* obj):obj_(obj) { }; ~PyObjectP(void) { if(obj_) { Py_DECREF(obj_); } }; operator bool(void) { return (obj_ != NULL); }; bool operator!(void) { return (obj_ == NULL); }; operator PyObject*(void) { return obj_; }; }; Arc::MCC_Status Service_PythonWrapper::process(Arc::Message& inmsg, Arc::Message& outmsg) { //PyObject *py_status = NULL; //PyObject *py_inmsg = NULL; //PyObject *py_outmsg = NULL; PyObject *arg = NULL; logger.msg(Arc::VERBOSE, "Python wrapper process called"); if(!initialized) return Arc::MCC_Status(); PythonLock plock(logger); // Convert in message to SOAP message Arc::SOAPMessageP inmsg_ptr(inmsg); if(!inmsg_ptr) { logger.msg(Arc::ERROR, "Failed to create input SOAP container"); return make_fault(outmsg); } if(!inmsg_ptr->Payload()) { logger.msg(Arc::ERROR, "input is not SOAP"); return make_fault(outmsg); } // Convert incoming message to python object arg = Py_BuildValue("(l)", (long int)inmsg_ptr); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create inmsg argument"); if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } // arc_dict is a borrowed reference PyObject *arc_dict = PyModule_GetDict(arc_module); if (arc_dict == NULL) { logger.msg(Arc::ERROR, "Cannot get dictionary of ARC module"); if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } // arc_msg_klass is a borrowed reference PyObject *arc_msg_klass = PyDict_GetItemString(arc_dict, "SOAPMessage"); if (arc_msg_klass == NULL) { logger.msg(Arc::ERROR, "Cannot find ARC Message class"); if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } PyObjectP py_inmsg(PyObject_CallObject(arc_msg_klass, arg)); if (!py_inmsg) { logger.msg(Arc::ERROR, "Cannot convert inmsg to Python object"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(arg); return make_fault(outmsg); } Py_DECREF(arg); Arc::SOAPMessageP outmsg_ptr(outmsg); if(!outmsg_ptr) { logger.msg(Arc::ERROR, "Failed to create SOAP containers"); return make_fault(outmsg); } // Convert incoming and outcoming messages to python objects arg = Py_BuildValue("(l)", (long int)outmsg_ptr); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create outmsg argument"); if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } PyObjectP py_outmsg = PyObject_CallObject(arc_msg_klass, arg); if (!py_outmsg) { logger.msg(Arc::ERROR, "Cannot convert outmsg to Python object"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(arg); return make_fault(outmsg); } Py_DECREF(arg); // Call the process method PyObjectP py_status(PyObject_CallMethod(object, (char*)"process", (char*)"(OO)", (PyObject*)py_inmsg, (PyObject*)py_outmsg)); if (!py_status) { if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } MCC_Status *status_ptr2 = (MCC_Status *)extract_swig_wrappered_pointer(py_status); Arc::MCC_Status status; if(status_ptr2) status=(*status_ptr2); { // std::string str = (std::string)status; // std::cout << "status: " << str << std::endl; }; SOAPMessage *outmsg_ptr2 = (SOAPMessage *)extract_swig_wrappered_pointer(py_outmsg); if(outmsg_ptr2 == NULL) return make_fault(outmsg); SOAPEnvelope *p = outmsg_ptr2->Payload(); if(p == NULL) return make_fault(outmsg); { // std::string xml; // if(p) p->GetXML(xml); // std::cout << "XML: " << xml << std::endl; }; Arc::PayloadSOAP *pl = new Arc::PayloadSOAP(*p); { // std::string xml; // pl->GetXML(xml); // std::cout << "XML: " << xml << std::endl; }; outmsg.Payload(pl); return status; } bool Service_PythonWrapper::RegistrationCollector(Arc::XMLNode& doc) { PyObject *arg = NULL; // logger.msg(Arc::VERBOSE, "Python 'RegistrationCollector' wrapper process called"); if(!initialized) return false; PythonLock plock(logger); // Convert doc to XMLNodeP // logger.msg(Arc::VERBOSE, "Convert doc to XMLNodeP"); Arc::XMLNodeP doc_ptr(doc); if (!doc_ptr) { logger.msg(Arc::ERROR, "Failed to create XMLNode container"); return false; } // Convert doc to Python object // logger.msg(Arc::VERBOSE, "Convert doc to Python object"); // logger.msg(Arc::VERBOSE, "Create Python XMLNode"); // arc_dict is a borrowed reference PyObject *arc_dict = PyModule_GetDict(arc_module); if (arc_dict == NULL) { logger.msg(Arc::ERROR, "Cannot get dictionary of ARC module"); if (PyErr_Occurred()) PyErr_Print(); return false; } // arc_xmlnode_klass is a borrowed reference PyObject *arc_xmlnode_klass = PyDict_GetItemString(arc_dict, "XMLNode"); if (arc_xmlnode_klass == NULL) { logger.msg(Arc::ERROR, "Cannot find ARC XMLNode class"); if (PyErr_Occurred()) PyErr_Print(); return false; } arg = Py_BuildValue("(l)", (long int)doc_ptr); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create doc argument"); if (PyErr_Occurred()) PyErr_Print(); return false; } PyObjectP py_doc(PyObject_CallObject(arc_xmlnode_klass, arg)); if (!py_doc) { logger.msg(Arc::ERROR, "Cannot convert doc to Python object"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(arg); return false; } Py_DECREF(arg); // Call the RegistrationCollector method // logger.msg(Arc::VERBOSE, "Call the RegistrationCollector method"); PyObjectP py_bool(PyObject_CallMethod(object, (char*)"RegistrationCollector", (char*)"(O)", (PyObject*)py_doc)); if (!py_bool) { if (PyErr_Occurred()) PyErr_Print(); return false; } // Convert the return value of the function back to cpp // logger.msg(Arc::VERBOSE, "Convert the return value of the function back to cpp"); bool *ret_val2 = (bool *)extract_swig_wrappered_pointer(py_bool); bool return_value = false; if (ret_val2) return_value = (*ret_val2); XMLNode *doc2 = (XMLNode *)extract_swig_wrappered_pointer(py_doc); if (doc2 == NULL) return false; (*doc2).New(doc); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/services/wrappers/python/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612053177163025036 xustar000000000000000027 mtime=1353514611.374025 29 atime=1513200605.16807687 30 ctime=1513200664.112797787 nordugrid-arc-5.4.2/src/services/wrappers/python/Makefile.am0000644000175000002070000000117112053177163025101 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libpythonservice.la libpythonservice_la_SOURCES = pythonwrapper.cpp pythonwrapper.h libpythonservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(PYTHON_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libpythonservice_la_LDFLAGS = -no-undefined -avoid-version -module libpythonservice_la_LIBADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(PYTHON_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(DLOPEN_LIBS) nordugrid-arc-5.4.2/src/services/wrappers/python/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735025043 xustar000000000000000030 mtime=1513200605.216077457 30 atime=1513200651.851647828 30 ctime=1513200664.113797799 nordugrid-arc-5.4.2/src/services/wrappers/python/Makefile.in0000644000175000002070000007521413214315735025122 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/wrappers/python DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libpythonservice_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libpythonservice_la_OBJECTS = libpythonservice_la-pythonwrapper.lo libpythonservice_la_OBJECTS = $(am_libpythonservice_la_OBJECTS) libpythonservice_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libpythonservice_la_CXXFLAGS) $(CXXFLAGS) \ $(libpythonservice_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libpythonservice_la_SOURCES) DIST_SOURCES = $(libpythonservice_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema pkglib_LTLIBRARIES = libpythonservice.la libpythonservice_la_SOURCES = pythonwrapper.cpp pythonwrapper.h libpythonservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(PYTHON_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libpythonservice_la_LDFLAGS = -no-undefined -avoid-version -module libpythonservice_la_LIBADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(PYTHON_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(DLOPEN_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/wrappers/python/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/wrappers/python/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libpythonservice.la: $(libpythonservice_la_OBJECTS) $(libpythonservice_la_DEPENDENCIES) $(libpythonservice_la_LINK) -rpath $(pkglibdir) $(libpythonservice_la_OBJECTS) $(libpythonservice_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpythonservice_la-pythonwrapper.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libpythonservice_la-pythonwrapper.lo: pythonwrapper.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpythonservice_la_CXXFLAGS) $(CXXFLAGS) -MT libpythonservice_la-pythonwrapper.lo -MD -MP -MF $(DEPDIR)/libpythonservice_la-pythonwrapper.Tpo -c -o libpythonservice_la-pythonwrapper.lo `test -f 'pythonwrapper.cpp' || echo '$(srcdir)/'`pythonwrapper.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libpythonservice_la-pythonwrapper.Tpo $(DEPDIR)/libpythonservice_la-pythonwrapper.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='pythonwrapper.cpp' object='libpythonservice_la-pythonwrapper.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpythonservice_la_CXXFLAGS) $(CXXFLAGS) -c -o libpythonservice_la-pythonwrapper.lo `test -f 'pythonwrapper.cpp' || echo '$(srcdir)/'`pythonwrapper.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/wrappers/python/PaxHeaders.7502/schema0000644000000000000000000000013213214316030024146 xustar000000000000000030 mtime=1513200664.140798129 30 atime=1513200668.717854109 30 ctime=1513200664.140798129 nordugrid-arc-5.4.2/src/services/wrappers/python/schema/0000755000175000002070000000000013214316030024271 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/wrappers/python/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321026266 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200605.232077653 30 ctime=1513200664.137798093 nordugrid-arc-5.4.2/src/services/wrappers/python/schema/Makefile.am0000644000175000002070000000014611255700321026331 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = pythonwrapper.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/services/wrappers/python/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735026303 xustar000000000000000030 mtime=1513200605.265078056 30 atime=1513200651.865647999 30 ctime=1513200664.138798105 nordugrid-arc-5.4.2/src/services/wrappers/python/schema/Makefile.in0000644000175000002070000004356113214315735026362 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/wrappers/python/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = pythonwrapper.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/wrappers/python/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/wrappers/python/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/wrappers/python/schema/PaxHeaders.7502/pythonwrapper.xsd0000644000000000000000000000012411255700321027671 xustar000000000000000027 mtime=1253540049.444682 27 atime=1513200575.612715 30 ctime=1513200664.140798129 nordugrid-arc-5.4.2/src/services/wrappers/python/schema/pythonwrapper.xsd0000644000175000002070000000124311255700321027736 0ustar00mockbuildmock00000000000000 Defines the full module name of the class which containes the service implementation. The full name should follow the rules of python 'import' command. nordugrid-arc-5.4.2/src/services/wrappers/python/PaxHeaders.7502/pythonwrapper.h0000644000000000000000000000012412356757245026106 xustar000000000000000027 mtime=1404821157.416181 27 atime=1513200575.614715 30 ctime=1513200664.115797824 nordugrid-arc-5.4.2/src/services/wrappers/python/pythonwrapper.h0000644000175000002070000000157112356757245026157 0ustar00mockbuildmock00000000000000#ifndef __ARC_SERVICE_PYTHON_WRAPPER_H__ #define __ARC_SERVICE_PYTHON_WRAPPER_H__ #include #include #include #include namespace Arc { class Service_PythonWrapper: public Arc::Service { protected: Arc::MCC_Status make_fault(Arc::Message& outmsg); static Arc::Logger logger; PyObject *arc_module; PyObject *module; PyObject *object; InfoRegisters *inforeg; bool initialized; public: Service_PythonWrapper(Arc::Config *cfg, Arc::PluginArgument* parg); virtual ~Service_PythonWrapper(void); /** Service request processing routine */ virtual Arc::MCC_Status process(Arc::Message&, Arc::Message&); bool RegistrationCollector(Arc::XMLNode&); }; } // namespace Arc #endif // __ARC_SERVICE_PYTHON_WRAPPER_H__ nordugrid-arc-5.4.2/src/services/wrappers/python/PaxHeaders.7502/README0000644000000000000000000000012411001653037023647 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200575.611715 30 ctime=1513200664.111797775 nordugrid-arc-5.4.2/src/services/wrappers/python/README0000644000175000002070000000005211001653037023711 0ustar00mockbuildmock00000000000000service which wraps python based services nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/a-rex0000644000000000000000000000013213214316026020543 xustar000000000000000030 mtime=1513200662.717780725 30 atime=1513200668.717854109 30 ctime=1513200662.717780725 nordugrid-arc-5.4.2/src/services/a-rex/0000755000175000002070000000000013214316026020666 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/PayloadFile.h0000644000000000000000000000012412232316467023173 xustar000000000000000027 mtime=1382653239.499491 27 atime=1513200576.605727 30 ctime=1513200662.705780578 nordugrid-arc-5.4.2/src/services/a-rex/PayloadFile.h0000644000175000002070000000777512232316467023260 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADFILE_H__ #define __ARC_PAYLOADFILE_H__ #include #include #include #include namespace ARex { /** Implementation of PayloadRawInterface which provides access to ordinary file. Currently only read-only mode is supported. */ class PayloadFile: public Arc::PayloadRawInterface { protected: /* TODO: use system-independent file access */ int handle_; char* addr_; off_t size_; off_t start_; off_t end_; void SetRead(int h,Size_t start,Size_t end); public: /** Creates object associated with file for reading from it. Use end=-1 for full size. */ PayloadFile(const char* filename,Size_t start,Size_t end); PayloadFile(int h,Size_t start,Size_t end); /** Creates object associated with file for writing into it. Use size=-1 for undefined size. */ //PayloadFile(const char* filename,Size_t size); virtual ~PayloadFile(void); virtual char operator[](Size_t pos) const; virtual char* Content(Size_t pos = -1); virtual Size_t Size(void) const; virtual char* Insert(Size_t pos = 0,Size_t size = 0); virtual char* Insert(const char* s,Size_t pos = 0,Size_t size = -1); virtual char* Buffer(unsigned int num); virtual Size_t BufferSize(unsigned int num) const; virtual Size_t BufferPos(unsigned int num) const; virtual bool Truncate(Size_t size); operator bool(void) { return (handle_ != -1); }; bool operator!(void) { return (handle_ == -1); }; }; class PayloadBigFile: public Arc::PayloadStream { private: static Size_t threshold_; off_t limit_; public: /** Creates object associated with file for reading from it */ PayloadBigFile(const char* filename,Size_t start,Size_t end); PayloadBigFile(int h,Size_t start,Size_t end); /** Creates object associated with file for writing into it. Use size=-1 for undefined size. */ //PayloadBigFile(const char* filename,Size_t size); virtual ~PayloadBigFile(void); virtual Size_t Pos(void) const; virtual Size_t Size(void) const; virtual Size_t Limit(void) const; virtual bool Get(char* buf,int& size); operator bool(void) { return (handle_ != -1); }; bool operator!(void) { return (handle_ == -1); }; static Size_t Threshold(void) { return threshold_; }; static void Threshold(Size_t t) { if(t > 0) threshold_=t; }; }; class PayloadFAFile: public Arc::PayloadStreamInterface { protected: Arc::FileAccess* handle_; off_t limit_; public: /** Creates object associated with file for reading from it */ PayloadFAFile(Arc::FileAccess* h,Size_t start,Size_t end); virtual ~PayloadFAFile(void); virtual Size_t Pos(void) const; virtual Size_t Size(void) const; virtual Size_t Limit(void) const; virtual bool Get(char* buf,int& size); virtual bool Get(std::string& buf) { char cbuf[1024]; int size = sizeof(cbuf); if(!Get(cbuf,size)) return false; buf.assign(cbuf,size); return true; }; virtual std::string Get(void) { std::string buf; Get(buf); return buf; }; virtual bool Put(const char* buf,Size_t size) { return false; }; virtual bool Put(const std::string& buf) { return Put(buf.c_str(),buf.length()); }; virtual bool Put(const char* buf) { return Put(buf,buf?strlen(buf):0); }; virtual int Timeout(void) const { return 0; }; virtual void Timeout(int to) { }; operator bool(void) { return (handle_ != NULL); }; bool operator!(void) { return (handle_ == NULL); }; }; // For ranges start is inclusive and end is exclusive Arc::MessagePayload* newFileRead(const char* filename,Arc::PayloadRawInterface::Size_t start = 0,Arc::PayloadRawInterface::Size_t end = (Arc::PayloadRawInterface::Size_t)(-1)); Arc::MessagePayload* newFileRead(int h,Arc::PayloadRawInterface::Size_t start = 0,Arc::PayloadRawInterface::Size_t end = (Arc::PayloadRawInterface::Size_t)(-1)); Arc::MessagePayload* newFileRead(Arc::FileAccess* h,Arc::PayloadRawInterface::Size_t start = 0,Arc::PayloadRawInterface::Size_t end = (Arc::PayloadRawInterface::Size_t)(-1)); } // namespace ARex #endif /* __ARC_PAYLOADFILE_H__ */ nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713065017103022656 xustar000000000000000027 mtime=1490296387.698578 30 atime=1513200601.675034149 30 ctime=1513200662.674780199 nordugrid-arc-5.4.2/src/services/a-rex/Makefile.am0000644000175000002070000000575613065017103022735 0ustar00mockbuildmock00000000000000if JURA_CLIENT_ENABLED JURA_CLIENT = jura else JURA_CLIENT = endif SUBDIRS = delegation grid-manager ldif infoproviders $(JURA_CLIENT) lrms schema DIST_SUBDIRS = delegation grid-manager ldif infoproviders jura lrms schema pkglib_LTLIBRARIES = libarex.la noinst_PROGRAMS = test test_cache_check if SYSV_SCRIPTS_ENABLED AREX_SCRIPT = a-rex else AREX_SCRIPT = endif initd_SCRIPTS = $(AREX_SCRIPT) if SYSTEMD_UNITS_ENABLED AREX_UNIT = a-rex.service AREX_UNIT_WRAPPER = a-rex-start else AREX_UNIT = AREX_UNIT_WRAPPER = endif units_DATA = $(AREX_UNIT) pkgdata_SCRIPTS = $(AREX_UNIT_WRAPPER) perferator sbin_SCRIPTS = a-rex-backtrace-collect man_MANS = a-rex-backtrace-collect.8 GRIDMANAGER_LIBS = grid-manager/libgridmanager.la delegation/libdelegation.la libarex_la_SOURCES = arex.cpp job.cpp \ create_activity.cpp get_activity_documents.cpp \ get_activity_statuses.cpp terminate_activities.cpp \ change_activity_status.cpp migrate_activity.cpp \ get_factory_attributes_document.cpp update_credentials.cpp faults.cpp \ get.cpp put.cpp PayloadFile.cpp FileChunks.cpp \ information_collector.cpp cachecheck.cpp tools.cpp \ arex.h job.h PayloadFile.h FileChunks.h tools.h libarex_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) # Needs real cleaning in respect to dependencies libarex_la_LIBADD = \ ldif/libldif.la \ $(GRIDMANAGER_LIBS) \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libarex_la_LDFLAGS = -no-undefined -avoid-version -module $(DBCXX_LIBS) test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_LDADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_cache_check_SOURCES = test_cache_check.cpp test_cache_check_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_cache_check_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/tools.cpp0000644000000000000000000000012413107553350022471 xustar000000000000000027 mtime=1495193320.739286 27 atime=1513200576.426725 30 ctime=1513200662.702780542 nordugrid-arc-5.4.2/src/services/a-rex/tools.cpp0000644000175000002070000002275413107553350022550 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "tools.h" namespace ARex { void convertActivityStatus(const std::string& gm_state,std::string& bes_state,std::string& arex_state,bool failed,bool pending) { if(gm_state == "ACCEPTED") { bes_state="Pending"; arex_state="Accepted"; } else if(gm_state == "PREPARING") { bes_state="Running"; arex_state=(!pending)?"Preparing":"Prepared"; } else if(gm_state == "SUBMIT") { bes_state="Running"; arex_state="Submitting"; } else if(gm_state == "INLRMS") { bes_state="Running"; arex_state=(!pending)?"Executing":"Executed"; } else if(gm_state == "FINISHING") { bes_state="Running"; arex_state="Finishing"; } else if(gm_state == "FINISHED") { if(!failed) { bes_state="Finished"; arex_state="Finished"; } else { bes_state="Failed"; arex_state="Failed"; }; } else if(gm_state == "DELETED") { // AFAIR failed is not avialable anymore. bes_state=(!failed)?"Finished":"Failed"; arex_state="Deleted"; } else if(gm_state == "CANCELING") { bes_state="Running"; arex_state="Killing"; }; } Arc::XMLNode addActivityStatus(Arc::XMLNode pnode,const std::string& gm_state,Arc::XMLNode glue_xml,bool failed,bool pending) { std::string bes_state(""); std::string arex_state(""); std::string glue_state(""); convertActivityStatus(gm_state,bes_state,arex_state,failed,pending); Arc::XMLNode state = pnode.NewChild("bes-factory:ActivityStatus"); state.NewAttribute("state")=bes_state; state.NewChild("a-rex:State")=arex_state; if(pending) state.NewChild("a-rex:State")="Pending"; if((bool)glue_xml) { Arc::XMLNode state_node = glue_xml["State"]; for(;(bool)state_node;++state_node) { std::string state = (std::string)state_node; if(state.empty()) continue; // Look for nordugrid prefix if(::strncmp("nordugrid:",state.c_str(),10) == 0) { // Remove prefix state.erase(0,10); glue_state = state; }; }; }; if(!glue_state.empty()) { std::string::size_type p = glue_state.find(':'); if(p != std::string::npos) { if(glue_state.substr(0,p) == "INLRMS") { // Extrach state of batch system state.NewChild("a-rex:LRMSState")=glue_state.substr(p+1); }; }; state.NewChild("glue:State")=glue_state; }; return state; } // primary: // accepted|preprocessing| // processing|processing-accepting|processing-queued|processing-running| // postprocessing|terminal // attribute: // validating| // server-paused| // client-paused| // client-stagein-possible| // client-stageout-possible| // provisioning| // deprovisioning| // server-stagein| // server-stageout| // batch-suspend| // app-running| // preprocessing-cancel| // processing-cancel| // postprocessing-cancel| // validation-failure| // preprocessing-failure| // processing-failure| // postprocessing-failure| // app-failure| // expired void convertActivityStatusES(const std::string& gm_state,std::string& primary_state,std::list& state_attributes,bool failed,bool pending,const std::string& failedstate,const std::string& failedcause) { bool failed_set = false; bool canceled = (failedcause == "client"); primary_state = ""; if(gm_state == "ACCEPTED") { primary_state="accepted"; state_attributes.push_back("client-stagein-possible"); } else if(gm_state == "PREPARING") { primary_state="preprocessing"; state_attributes.push_back("client-stagein-possible"); state_attributes.push_back("server-stagein"); } else if(gm_state == "SUBMIT") { primary_state="processing-accepting"; } else if(gm_state == "INLRMS") { // Reporting job state as not started executing yet. // Because we have no more detailed information this // is probably safest solution. primary_state="processing-queued"; } else if(gm_state == "FINISHING") { primary_state="postprocessing"; state_attributes.push_back("client-stageout-possible"); state_attributes.push_back("server-stageout"); } else if(gm_state == "FINISHED") { primary_state="terminal"; state_attributes.push_back("client-stageout-possible"); } else if(gm_state == "DELETED") { primary_state="terminal"; state_attributes.push_back("expired"); } else if(gm_state == "CANCELING") { primary_state="processing"; }; if(failedstate == "ACCEPTED") { state_attributes.push_back("validation-failure"); failed_set = true; } else if(failedstate == "PREPARING") { state_attributes.push_back(canceled?"preprocessing-cancel":"preprocessing-failure"); failed_set = true; } else if(failedstate == "SUBMIT") { state_attributes.push_back(canceled?"processing-cancel":"processing-failure"); failed_set = true; } else if(failedstate == "INLRMS") { state_attributes.push_back(canceled?"processing-cancel":"processing-failure"); // Or maybe APP-FAILURE failed_set = true; } else if(failedstate == "FINISHING") { state_attributes.push_back(canceled?"postprocessing-cancel":"postprocessing-failure"); failed_set = true; } else if(failedstate == "FINISHED") { } else if(failedstate == "DELETED") { } else if(failedstate == "CANCELING") { }; if(primary_state == "terminal") { if(failed && !failed_set) { // Must put something to mark job failed state_attributes.push_back("app-failure"); }; }; if(!primary_state.empty()) { if(pending) state_attributes.push_back("server-paused"); }; } // ActivityStatus // Status // [accepted|preprocessing| // processing|processing-accepting|processing-queued|processing-running| // postprocessing|terminal] // Attribute 0- // [validating| // server-paused| // client-paused| // client-stagein-possible| // client-stageout-possible| // provisioning| // deprovisioning| // server-stagein| // server-stageout| // batch-suspend| // app-running| // preprocessing-cancel| // processing-cancel| // postprocessing-cancel| // validation-failure| // preprocessing-failure| // processing-failure| // postprocessing-failure| // app-failure| // expired] // Timestamp (dateTime) // Description 0-1 Arc::XMLNode addActivityStatusES(Arc::XMLNode pnode,const std::string& gm_state,Arc::XMLNode glue_xml,bool failed,bool pending,const std::string& failedstate,const std::string& failedcause) { std::string primary_state; std::list state_attributes; std::string glue_state(""); convertActivityStatusES(gm_state,primary_state,state_attributes,failed,pending,failedstate,failedcause); Arc::XMLNode state = pnode.NewChild("estypes:ActivityStatus"); state.NewChild("estypes:Status") = primary_state; for(std::list::iterator st = state_attributes.begin(); st!=state_attributes.end();++st) { state.NewChild("estypes:Attribute") = *st; }; return state; } JobIDGeneratorARC::JobIDGeneratorARC(const std::string& endpoint):endpoint_(endpoint) { } void JobIDGeneratorARC::SetLocalID(const std::string& id) { id_ = id; } Arc::XMLNode JobIDGeneratorARC::GetGlobalID(Arc::XMLNode& pnode) { Arc::XMLNode node; if(!pnode) { Arc::NS ns; ns["bes-factory"]="http://schemas.ggf.org/bes/2006/08/bes-factory"; ns["a-rex"]="http://www.nordugrid.org/schemas/a-rex"; Arc::XMLNode(ns,"bes-factory:ActivityIdentifier").Exchange(pnode); node = pnode; } else { node = pnode.NewChild("bes-factory:ActivityIdentifier"); }; Arc::WSAEndpointReference identifier(node); // Make job's ID identifier.Address(endpoint_); // address of service identifier.ReferenceParameters().NewChild("a-rex:JobID")=id_; identifier.ReferenceParameters().NewChild("a-rex:JobSessionDir")=endpoint_+"/"+id_; return node; } std::string JobIDGeneratorARC::GetGlobalID(void) { Arc::XMLNode node; GetGlobalID(node); std::string jobid; node.GetDoc(jobid); std::string::size_type p = 0; // squeeze into 1 line while((p=jobid.find_first_of("\r\n",p)) != std::string::npos) jobid.replace(p,1," "); return jobid; } std::string JobIDGeneratorARC::GetManager(void) { return endpoint_; } std::string JobIDGeneratorARC::GetInterface(void) { return "org.nordugrid.xbes"; } JobIDGeneratorES::JobIDGeneratorES(const std::string& endpoint):endpoint_(endpoint) { } void JobIDGeneratorES::SetLocalID(const std::string& id) { id_ = id; } Arc::XMLNode JobIDGeneratorES::GetGlobalID(Arc::XMLNode& pnode) { Arc::XMLNode node; if(!pnode) { Arc::NS ns; ns["estypes"]="http://www.eu-emi.eu/es/2010/12/types"; Arc::XMLNode(ns,"estypes:ActivityID").Exchange(pnode); node = pnode; } else { node = pnode.NewChild("estypes:ActivityID"); }; node = id_; return node; } std::string JobIDGeneratorES::GetGlobalID(void) { return id_; } std::string JobIDGeneratorES::GetManager(void) { return endpoint_; } std::string JobIDGeneratorES::GetInterface(void) { return "org.ogf.glue.emies.activitycreation"; } } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/arex.h0000644000000000000000000000012413153453517021742 xustar000000000000000027 mtime=1504597839.377703 27 atime=1513200576.432725 30 ctime=1513200662.703780554 nordugrid-arc-5.4.2/src/services/a-rex/arex.h0000644000175000002070000001717113153453517022016 0ustar00mockbuildmock00000000000000#ifndef __ARC_AREX_H__ #define __ARC_AREX_H__ #include #include #include #include #include #include #include "FileChunks.h" #include "grid-manager/GridManager.h" #include "delegation/DelegationStores.h" #include "grid-manager/conf/GMConfig.h" namespace ARex { class ARexGMConfig; class ARexConfigContext; class CountedResourceLock; class CountedResource { friend class CountedResourceLock; public: CountedResource(int maxconsumers = -1); ~CountedResource(void); void MaxConsumers(int maxconsumers); private: Glib::Cond cond_; Glib::Mutex lock_; int limit_; int count_; void Acquire(void); void Release(void); }; class OptimizedInformationContainer: public Arc::InformationContainer { private: bool parse_xml_; std::string filename_; int handle_; Arc::XMLNode doc_; Glib::Mutex olock_; public: OptimizedInformationContainer(bool parse_xml = true); ~OptimizedInformationContainer(void); int OpenDocument(void); Arc::MessagePayload* Process(Arc::SOAPEnvelope& in); void AssignFile(const std::string& filename); void Assign(const std::string& xml,const std::string filename = ""); }; #define AREXOP(NAME) Arc::MCC_Status NAME(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) class ARexService: public Arc::Service { private: static void gm_threads_starter(void* arg); void gm_threads_starter(); Arc::MCC_Status cache_get(Arc::Message& outmsg, const std::string& subpath, off_t range_start, off_t range_end, ARexGMConfig& config, bool no_content); protected: Arc::ThreadRegistry thread_count_; Arc::NS ns_; Arc::Logger logger_; DelegationStores delegation_stores_; OptimizedInformationContainer infodoc_; Arc::InfoRegisters* inforeg_; CountedResource infolimit_; CountedResource beslimit_; CountedResource datalimit_; std::string endpoint_; bool publishstaticinfo_; std::string uname_; std::string common_name_; std::string long_description_; std::string lrms_name_; std::string os_name_; std::string gmrun_; unsigned int infoprovider_wakeup_period_; unsigned int all_jobs_count_; //Glib::Mutex glue_states_lock_; //std::map glue_states_; FileChunksList files_chunks_; GMConfig config_; GridManager* gm_; ARexConfigContext* get_configuration(Arc::Message& inmsg); // A-REX operations Arc::MCC_Status CreateActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& clientid); AREXOP(GetActivityStatuses); AREXOP(TerminateActivities); AREXOP(GetActivityDocuments); AREXOP(GetFactoryAttributesDocument); AREXOP(StopAcceptingNewActivities); AREXOP(StartAcceptingNewActivities); AREXOP(ChangeActivityStatus); Arc::MCC_Status MigrateActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& clientid); AREXOP(CacheCheck); /** Update credentials for specified job through A-REX own interface */ Arc::MCC_Status UpdateCredentials(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& credentials); // EMI ES operations Arc::MCC_Status ESCreateActivities(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& clientid); AREXOP(ESGetResourceInfo); AREXOP(ESQueryResourceInfo); AREXOP(ESPauseActivity); AREXOP(ESResumeActivity); AREXOP(ESNotifyService); AREXOP(ESCancelActivity); AREXOP(ESWipeActivity); AREXOP(ESRestartActivity); AREXOP(ESListActivities); AREXOP(ESGetActivityStatus); AREXOP(ESGetActivityInfo); // Convenience methods Arc::MCC_Status make_empty_response(Arc::Message& outmsg); Arc::MCC_Status make_fault(Arc::Message& outmsg); Arc::MCC_Status make_http_fault(Arc::Message& outmsg,int code,const char* resp); Arc::MCC_Status make_soap_fault(Arc::Message& outmsg,const char* resp = NULL); // HTTP operations Arc::MCC_Status Get(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string id,std::string subpath); Arc::MCC_Status Head(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string id,std::string subpath); Arc::MCC_Status Put(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string id,std::string subpath); // A-REX faults void GenericFault(Arc::SOAPFault& fault); void NotAuthorizedFault(Arc::XMLNode fault); void NotAuthorizedFault(Arc::SOAPFault& fault); void NotAcceptingNewActivitiesFault(Arc::XMLNode fault); void NotAcceptingNewActivitiesFault(Arc::SOAPFault& fault); void UnsupportedFeatureFault(Arc::XMLNode fault,const std::string& feature); void UnsupportedFeatureFault(Arc::SOAPFault& fault,const std::string& feature); void CantApplyOperationToCurrentStateFault(Arc::XMLNode fault,const std::string& gm_state,bool failed,const std::string& message); void CantApplyOperationToCurrentStateFault(Arc::SOAPFault& fault,const std::string& gm_state,bool failed,const std::string& message); void OperationWillBeAppliedEventuallyFault(Arc::XMLNode fault,const std::string& gm_state,bool failed,const std::string& message); void OperationWillBeAppliedEventuallyFault(Arc::SOAPFault& fault,const std::string& gm_state,bool failed,const std::string& message); void UnknownActivityIdentifierFault(Arc::XMLNode fault,const std::string& message); void UnknownActivityIdentifierFault(Arc::SOAPFault& fault,const std::string& message); void InvalidRequestMessageFault(Arc::XMLNode fault,const std::string& element,const std::string& message); void InvalidRequestMessageFault(Arc::SOAPFault& fault,const std::string& element,const std::string& message); // EMI ES faults #define ES_MSG_FAULT_HEAD(NAME) \ void NAME(Arc::XMLNode fault,const std::string& message,const std::string& desc = ""); \ void NAME(Arc::SOAPFault& fault,const std::string& message,const std::string& desc = ""); #define ES_SIMPLE_FAULT_HEAD(NAME) \ void NAME(Arc::XMLNode fault,const std::string& message = "",const std::string& desc = ""); \ void NAME(Arc::SOAPFault& fault,const std::string& message = "",const std::string& desc = ""); ES_MSG_FAULT_HEAD(ESInternalBaseFault) void ESVectorLimitExceededFault(Arc::XMLNode fault,unsigned long limit,const std::string& message = "",const std::string& desc = ""); void ESVectorLimitExceededFault(Arc::SOAPFault& fault,unsigned long limit,const std::string& message = "",const std::string& desc = ""); ES_SIMPLE_FAULT_HEAD(ESAccessControlFault); ES_SIMPLE_FAULT_HEAD(ESUnsupportedCapabilityFault) ES_SIMPLE_FAULT_HEAD(ESInvalidActivityDescriptionSemanticFault) ES_SIMPLE_FAULT_HEAD(ESInvalidActivityDescriptionFault) ES_SIMPLE_FAULT_HEAD(ESNotSupportedQueryDialectFault) ES_SIMPLE_FAULT_HEAD(ESNotValidQueryStatementFault) ES_SIMPLE_FAULT_HEAD(ESUnknownQueryFault) ES_SIMPLE_FAULT_HEAD(ESInternalResourceInfoFault) ES_SIMPLE_FAULT_HEAD(ESResourceInfoNotFoundFault) ES_SIMPLE_FAULT_HEAD(ESUnableToRetrieveStatusFault) ES_SIMPLE_FAULT_HEAD(ESUnknownAttributeFault) ES_SIMPLE_FAULT_HEAD(ESOperationNotAllowedFault) ES_SIMPLE_FAULT_HEAD(ESActivityNotFoundFault) ES_SIMPLE_FAULT_HEAD(ESInternalNotificationFault) ES_SIMPLE_FAULT_HEAD(ESOperationNotPossibleFault) ES_SIMPLE_FAULT_HEAD(ESInvalidActivityStateFault) ES_SIMPLE_FAULT_HEAD(ESInvalidActivityLimitFault) ES_SIMPLE_FAULT_HEAD(ESInvalidParameterFault) public: ARexService(Arc::Config *cfg,Arc::PluginArgument *parg); virtual ~ARexService(void); virtual Arc::MCC_Status process(Arc::Message& inmsg,Arc::Message& outmsg); void InformationCollector(void); virtual bool RegistrationCollector(Arc::XMLNode &doc); virtual std::string getID(); void StopChildThreads(void); }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/tools.h0000644000000000000000000000012411712244077022141 xustar000000000000000027 mtime=1328105535.039435 27 atime=1513200575.763717 30 ctime=1513200662.707780603 nordugrid-arc-5.4.2/src/services/a-rex/tools.h0000644000175000002070000000447711712244077022222 0ustar00mockbuildmock00000000000000#include #include namespace ARex { void convertActivityStatus(const std::string& gm_state,std::string& bes_state,std::string& arex_state,bool failed = false,bool pending = false); Arc::XMLNode addActivityStatus(Arc::XMLNode pnode,const std::string& gm_state,Arc::XMLNode glue_xml = Arc::XMLNode(),bool failed = false,bool pending = false); void convertActivityStatusES(const std::string& gm_state,std::string& primary_state,std::list& state_attributes,bool failed,bool pending,const std::string& failedstate,const std::string& failedcause); Arc::XMLNode addActivityStatusES(Arc::XMLNode pnode,const std::string& gm_state,Arc::XMLNode glue_xml = Arc::XMLNode(),bool failed = false,bool pending = false,const std::string& failedstate = "",const std::string& failedcause = ""); class JobIDGenerator { public: JobIDGenerator() { }; virtual ~JobIDGenerator() { }; virtual void SetLocalID(const std::string& id) = 0; virtual Arc::XMLNode GetGlobalID(Arc::XMLNode& pnode) = 0; virtual std::string GetGlobalID(void) = 0; virtual std::string GetManager(void) = 0; virtual std::string GetInterface(void) = 0; }; class JobIDGeneratorARC:public JobIDGenerator { public: JobIDGeneratorARC(const std::string& endpoint); virtual ~JobIDGeneratorARC() { }; virtual void SetLocalID(const std::string& id); virtual Arc::XMLNode GetGlobalID(Arc::XMLNode& pnode); virtual std::string GetGlobalID(void); virtual std::string GetManager(void); virtual std::string GetInterface(void); private: std::string endpoint_; std::string id_; }; class JobIDGeneratorES:public JobIDGenerator { public: JobIDGeneratorES(const std::string& endpoint); virtual ~JobIDGeneratorES() { }; virtual void SetLocalID(const std::string& id); virtual Arc::XMLNode GetGlobalID(Arc::XMLNode& pnode); virtual std::string GetGlobalID(void); virtual std::string GetManager(void); virtual std::string GetInterface(void); private: std::string endpoint_; std::string id_; }; Arc::XMLNode addJobID(Arc::XMLNode& pnode,const std::string& endpoint,const std::string& id); std::string makeJobID(const std::string& endpoint,const std::string& id); Arc::XMLNode addJobIDES(Arc::XMLNode& pnode,const std::string& endpoint,const std::string& id); std::string makeJobIDES(const std::string& endpoint,const std::string& id); } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/get.cpp0000644000000000000000000000012412733561667022126 xustar000000000000000027 mtime=1466885047.133664 27 atime=1513200576.451725 30 ctime=1513200662.696780468 nordugrid-arc-5.4.2/src/services/a-rex/get.cpp0000644000175000002070000004577612733561667022216 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "PayloadFile.h" #include "job.h" #include "arex.h" #define HTTP_ERR_NOT_SUPPORTED (501) #define MAX_CHUNK_SIZE (10*1024*1024) namespace ARex { static Arc::MCC_Status http_get(Arc::Message& outmsg,const std::string& burl,ARexJob& job,std::string hpath,off_t start,off_t end,bool no_content); static Arc::MCC_Status http_get_log(Arc::Message& outmsg,const std::string& burl,ARexJob& job,std::string hpath,off_t start,off_t end,bool no_content); Arc::MCC_Status ARexService::Get(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string id,std::string subpath) { bool force_logs = false; off_t range_start = 0; off_t range_end = (off_t)(-1); { std::string val; val=inmsg.Attributes()->get("HTTP:RANGESTART"); if(!val.empty()) { // Negative ranges not supported if(!Arc::stringto(val,range_start)) { range_start=0; } else { val=inmsg.Attributes()->get("HTTP:RANGEEND"); if(!val.empty()) { if(!Arc::stringto(val,range_end)) { range_end=(off_t)(-1); } else { // Rest of code here treats end of range as exclusive // While HTTP ranges are inclusive ++range_end; }; }; }; }; }; if(id.empty()) { // Make list of jobs std::string html; html="\r\n\r\nARex: Jobs list\r\n\r\n\r\n
      \r\n"; std::list jobs = ARexJob::Jobs(config,logger_); for(std::list::iterator job = jobs.begin();job!=jobs.end();++job) { std::string line = "
    • job "; line+=(*job); line+=""; line+=" logs\r\n"; html+=line; }; html+="
    \r\n"; // Service description access html+="SERVICE DESCRIPTION"; html+="\r\n"; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Insert(html.c_str(),0,html.length()); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); return Arc::MCC_Status(Arc::STATUS_OK); }; if(id == "?info") { if(!subpath.empty()) return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); int h = infodoc_.OpenDocument(); if(h == -1) return Arc::MCC_Status(); Arc::MessagePayload* payload = newFileRead(h); if(!payload) { ::close(h); return Arc::MCC_Status(); }; outmsg.Payload(payload); outmsg.Attributes()->set("HTTP:content-type","text/xml"); return Arc::MCC_Status(Arc::STATUS_OK); }; if(id == "?logs") { force_logs = true; if(subpath.empty()) return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); std::string::size_type p = subpath.find('/'); if(p == 0) { subpath = subpath.substr(1); p = subpath.find('/'); }; if(p == std::string::npos) { id = subpath; subpath = ""; } else { id = subpath.substr(0,p); subpath = subpath.substr(p+1); }; }; if (id == "cache") { return cache_get(outmsg, subpath, range_start, range_end, config, false); } ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "Get: there is no job %s - %s", id, job.Failure()); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; Arc::MCC_Status r; if(force_logs) { r=http_get_log(outmsg,config.Endpoint()+"/?logs/"+id,job,subpath,range_start,range_end,false); } else { r=http_get(outmsg,config.Endpoint()+"/"+id,job,subpath,range_start,range_end,false); }; if(!r) { // Can't get file logger.msg(Arc::ERROR, "Get: can't process file %s", subpath); return r; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::Head(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string id,std::string subpath) { bool force_logs = false; if(id.empty()) { Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Truncate(0); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); return Arc::MCC_Status(Arc::STATUS_OK); } if(id == "?info") { if(!subpath.empty()) return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); int h = infodoc_.OpenDocument(); if(h == -1) return Arc::MCC_Status(); struct stat st; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf && (::fstat(h,&st) == 0)) buf->Truncate(st.st_size); ::close(h); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); return Arc::MCC_Status(Arc::STATUS_OK); }; if(id == "?logs") { force_logs = true; if(subpath.empty()) return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); std::string::size_type p = subpath.find('/'); if(p == 0) { subpath = subpath.substr(1); p = subpath.find('/'); }; if(p == std::string::npos) { id = subpath; subpath = ""; } else { id = subpath.substr(0,p); subpath = subpath.substr(p+1); }; }; if (id == "cache") { return cache_get(outmsg, subpath, 0, (off_t)(-1), config, true); } ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "Head: there is no job %s - %s", id, job.Failure()); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; Arc::MCC_Status r; if(force_logs) { r=http_get_log(outmsg,config.Endpoint()+"/?logs/"+id,job,subpath,0,(off_t)(-1),true); } else { r=http_get(outmsg,config.Endpoint()+"/"+id,job,subpath,0,(off_t)(-1),true); }; if(!r) { // Can't stat file logger.msg(Arc::ERROR, "Head: can't process file %s", subpath); return r; }; return Arc::MCC_Status(Arc::STATUS_OK); } // burl - base URL // bpath - base path // hpath - path relative to base path and base URL // start - chunk start // start - chunk end static Arc::MCC_Status http_get(Arc::Message& outmsg,const std::string& burl,ARexJob& job,std::string hpath,off_t start,off_t end,bool no_content) { Arc::Logger::rootLogger.msg(Arc::VERBOSE, "http_get: start=%llu, end=%llu, burl=%s, hpath=%s", (unsigned long long int)start, (unsigned long long int)end, burl, hpath); if(!hpath.empty()) if(hpath[0] == '/') hpath=hpath.substr(1); if(!hpath.empty()) if(hpath[hpath.length()-1] == '/') hpath.resize(hpath.length()-1); std::string joblog = job.LogDir(); if(!joblog.empty()) { if((strncmp(joblog.c_str(),hpath.c_str(),joblog.length()) == 0) && ((hpath[joblog.length()] == '/') || (hpath[joblog.length()] == 0))) { hpath.erase(0,joblog.length()+1); return http_get_log(outmsg,burl+"/"+joblog,job,hpath,start,end,no_content); }; }; Arc::FileAccess* dir = job.OpenDir(hpath); if(dir) { // Directory - html with file list if(!no_content) { std::string file; std::string html; html="\r\n\r\nARex: Job\r\n\r\n\r\n
      \r\n"; std::string furl = burl; if(!hpath.empty()) furl+="/"+hpath; std::string path = job.GetFilePath(hpath); for(;;) { if(!dir->fa_readdir(file)) break; if(file == ".") continue; if(file == "..") continue; std::string fpath = path+"/"+file; struct stat st; if(lstat(fpath.c_str(),&st) == 0) { if(S_ISREG(st.st_mode)) { std::string line = "
    • file "; line+=file; line+=" - "+Arc::tostring(st.st_size)+" bytes"+"\r\n"; html+=line; } else if(S_ISDIR(st.st_mode)) { std::string line = "
    • dir "; line+=file; line+="\r\n"; html+=line; }; } else { std::string line = "
    • unknown "; line+=file; line+="\r\n"; html+=line; }; }; if((hpath.empty()) && (!joblog.empty())) { std::string line = "
    • dir "; line+=joblog; line+=" - log directory\r\n"; html+=line; }; html+="
    \r\n\r\n"; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Insert(html.c_str(),0,html.length()); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); } else { Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Truncate(0); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); }; dir->fa_closedir(); Arc::FileAccess::Release(dir); return Arc::MCC_Status(Arc::STATUS_OK); }; Arc::FileAccess* file = job.OpenFile(hpath,true,false); if(file) { // File if(!no_content) { Arc::MessagePayload* h = newFileRead(file,start,end); if(!h) { file->fa_close(); Arc::FileAccess::Release(file); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; outmsg.Payload(h); } else { struct stat st; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf && (file->fa_fstat(st))) buf->Truncate(st.st_size); file->fa_close(); Arc::FileAccess::Release(file); outmsg.Payload(buf); }; outmsg.Attributes()->set("HTTP:content-type","application/octet-stream"); return Arc::MCC_Status(Arc::STATUS_OK); }; // Can't process this path // offset=0; size=0; return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } static Arc::MCC_Status http_get_log(Arc::Message& outmsg,const std::string& burl,ARexJob& job,std::string hpath,off_t start,off_t end,bool no_content) { if(hpath.empty()) { if(!no_content) { std::list logs = job.LogFiles(); std::string html; html="\r\n\r\nARex: Job Logs\r\n\r\n\r\n
      \r\n"; for(std::list::iterator l = logs.begin();l != logs.end();++l) { if(strncmp(l->c_str(),"proxy",5) == 0) continue; std::string line = "
    • file "; line+=*l; line+=" - log file\r\n"; html+=line; }; html+="
    \r\n\r\n"; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Insert(html.c_str(),0,html.length()); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); } else { Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Truncate(0); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); }; return Arc::MCC_Status(Arc::STATUS_OK); } else { int file = job.OpenLogFile(hpath); if(file != -1) { if(!no_content) { Arc::MessagePayload* h = newFileRead(file,start,end); if(!h) { ::close(file); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; outmsg.Payload(h); } else { struct stat st; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf && (::fstat(file,&st) == 0)) buf->Truncate(st.st_size); ::close(file); outmsg.Payload(buf); }; outmsg.Attributes()->set("HTTP:content-type","text/plain"); return Arc::MCC_Status(Arc::STATUS_OK); }; }; return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } static bool cache_get_allowed(const std::string& url, ARexGMConfig& config, Arc::Logger& logger) { // Extract information from credentials std::string dn; // DN of credential std::string vo; // Assuming only one VO std::list voms; // VOMS attributes for (std::list::const_iterator a = config.beginAuth(); a!=config.endAuth(); ++a) { if (*a) { Arc::SecAttr* sattr = (*a)->get("TLS"); if (!sattr) continue; dn = sattr->get("IDENTITY"); vo = sattr->get("VO"); voms = sattr->getAll("VOMS"); break; } } // At least DN should be found. VOMS info may not be present. if (dn.empty()) { logger.msg(Arc::ERROR, "Failed to extract credential information"); return false; } logger.msg(Arc::DEBUG, "Checking cache permissions: DN: %s", dn); logger.msg(Arc::DEBUG, "Checking cache permissions: VO: %s", vo); for (std::list::const_iterator att = voms.begin(); att != voms.end(); ++att) { logger.msg(Arc::DEBUG, "Checking cache permissions: VOMS attr: %s", *att); } // Cache configuration specifies URL regexps and a certificate attribute and // value. Go through looking for a match. for (std::list::const_iterator access = config.GmConfig().CacheParams().getCacheAccess().begin(); access != config.GmConfig().CacheParams().getCacheAccess().end(); ++access) { if (access->regexp.match(url)) { if (Arc::lower(access->cred_type) == "dn") { if (access->cred_value.match(dn)) { logger.msg(Arc::VERBOSE, "Cache access allowed to %s by DN %s", url, dn); return true; } logger.msg(Arc::DEBUG, "DN %s doesn't match %s", dn, access->cred_value.getPattern()); } else if (Arc::lower(access->cred_type) == "voms:vo") { if (access->cred_value.match(vo)) { logger.msg(Arc::VERBOSE, "Cache access allowed to %s by VO %s", url, vo); return true; } logger.msg(Arc::DEBUG, "VO %s doesn't match %s", vo, access->cred_value.getPattern()); } else if (Arc::lower(access->cred_type) == "voms:role") { // Get the configured allowed role std::vector role_parts; Arc::tokenize(access->cred_value.getPattern(), role_parts, ":"); if (role_parts.size() != 2) { logger.msg(Arc::WARNING, "Bad credential value %s in cache access rules", access->cred_value.getPattern()); continue; } std::string cred_vo = role_parts[0]; std::string cred_role = role_parts[1]; std::string allowed_role("/VO="+cred_vo+"/Group="+cred_vo+"/Role="+cred_role); for (std::list::const_iterator attr = voms.begin(); attr != voms.end(); ++attr) { if (*attr == allowed_role) { logger.msg(Arc::DEBUG, "VOMS attr %s matches %s", *attr, allowed_role); logger.msg(Arc::VERBOSE, "Cache access allowed to %s by VO %s and role %s", url, cred_vo, cred_role); return true; } logger.msg(Arc::DEBUG, "VOMS attr %s doesn't match %s", *attr, allowed_role); } } else if (Arc::lower(access->cred_type) == "voms:group") { // Get the configured allowed group std::vector group_parts; Arc::tokenize(access->cred_value.getPattern(), group_parts, ":"); if (group_parts.size() != 2) { logger.msg(Arc::WARNING, "Bad credential value %s in cache access rules", access->cred_value.getPattern()); continue; } std::string cred_vo = group_parts[0]; std::string cred_group = group_parts[1]; std::string allowed_group("/VO="+cred_vo+"/Group="+cred_vo+"/Group="+cred_group); for (std::list::const_iterator attr = voms.begin(); attr != voms.end(); ++attr) { if (*attr == allowed_group) { logger.msg(Arc::DEBUG, "VOMS attr %s matches %s", *attr, allowed_group); logger.msg(Arc::VERBOSE, "Cache access allowed to %s by VO %s and group %s", url, cred_vo, cred_group); return true; } logger.msg(Arc::DEBUG, "VOMS attr %s doesn't match %s", *attr, allowed_group); } } else { logger.msg(Arc::WARNING, "Unknown credential type %s for URL pattern %s", access->cred_type, access->regexp.getPattern()); } } } // If we get to here no match was found logger.msg(Arc::VERBOSE, "No match found in cache access rules for %s", url); return false; } Arc::MCC_Status ARexService::cache_get(Arc::Message& outmsg, const std::string& subpath, off_t range_start, off_t range_end, ARexGMConfig& config, bool no_content) { // subpath contains the URL, which can be encoded. Constructing a URL // object with encoded=true only decodes the path so have to decode first std::string unencoded(Arc::uri_unencode(subpath)); Arc::URL cacheurl(unencoded); logger.msg(Arc::INFO, "Get from cache: Looking in cache for %s", cacheurl.str()); if (!cacheurl) { logger.msg(Arc::ERROR, "Get from cache: Invalid URL %s", subpath); return make_http_fault(outmsg, 400, "Bad request: Invalid URL"); } // Security check. The access is configured in arc.conf like // cache_access="srm://srm-atlas.cern.ch/grid/atlas* voms:vo atlas" // then the url is compared to the certificate attribute specified if (!cache_get_allowed(cacheurl.str(), config, logger)) { return make_http_fault(outmsg, 403, "Forbidden"); } Arc::FileCache cache(config.GmConfig().CacheParams().getCacheDirs(), config.GmConfig().CacheParams().getRemoteCacheDirs(), config.GmConfig().CacheParams().getDrainingCacheDirs(), "0", // Jobid is not used config.User().get_uid(), config.User().get_gid()); if (!cache) { logger.msg(Arc::ERROR, "Get from cache: Error in cache configuration"); return make_http_fault(outmsg, 500, "Error in cache configuration"); } // Get the cache file corresponding to the URL std::string cache_file(cache.File(cacheurl.str())); // Check if file exists struct stat st; if (!Arc::FileStat(cache_file, &st, false)) { if (errno == ENOENT) { logger.msg(Arc::INFO, "Get from cache: File not in cache"); return make_http_fault(outmsg, 404, "File not found"); } else { logger.msg(Arc::WARNING, "Get from cache: could not access cached file: %s", Arc::StrError(errno)); return make_http_fault(outmsg, 500, "Error accessing cached file"); } } // Check file size against specified range if (range_start > st.st_size) range_start = st.st_size; if (range_end > st.st_size) range_end = st.st_size; // Check if lockfile exists if (Arc::FileStat(cache_file + Arc::FileLock::getLockSuffix(), &st, false)) { logger.msg(Arc::INFO, "Get from cache: Cached file is locked"); return make_http_fault(outmsg, 409, "Cached file is locked"); } // Read the file and fill the payload if (!no_content) { Arc::MessagePayload* h = newFileRead(cache_file.c_str(), range_start, range_end); outmsg.Payload(h); } else { struct stat st; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf && Arc::FileStat(cache_file, &st, false)) buf->Truncate(st.st_size); outmsg.Payload(buf); } outmsg.Attributes()->set("HTTP:content-type","application/octet-stream"); return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/a-rex-backtrace-collect.in0000644000000000000000000000012712733561506025542 xustar000000000000000027 mtime=1466884934.787409 30 atime=1513200649.800622743 30 ctime=1513200662.678780248 nordugrid-arc-5.4.2/src/services/a-rex/a-rex-backtrace-collect.in0000755000175000002070000000463512733561506025617 0ustar00mockbuildmock00000000000000#!/bin/bash readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | sed -e 's/\r$//' -e 's/^\r//' | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi if [ "x$ARC_CONFIG" = "x" ]; then echo "Can't find configuration file." exit 1 fi if [ ! -f "${ARC_CONFIG}" ]; then echo "Can't find configuration file at ${ARC_CONFIG}." exit 1 fi ARCHED="${ARC_LOCATION}/sbin/arched" if [ ! -f "${ARCHED}" ]; then echo "Can't find arched at ${ARCHED}." exit 1 fi LOGFILE=`readconfigvar "$ARC_CONFIG" grid-manager logfile` LOGFILE=${LOGFILE:-/var/log/arc/grid-manager.log} COREDIR=`dirname "${LOGFILE}"`/arccore if [ ! -d "${COREDIR}" ]; then echo "Can't find core collection folder at ${COREDIR}." exit 1 fi backtrace_generated=no for corename in "${COREDIR}"/*; do echo "${corename}" | grep '\.backtrace$' if [ ! "$?" = '0' ]; then backtracename="${corename}.backtrace" echo "--- Processing ${corename} - storing into ${backtracename} ---" gdb --batch --core="${corename}" "${ARCHED}" --eval-command='thread apply all bt full' 1>"${backtracename}" 2>&1 backtrace_generated=yes fi done if [ $backtrace_generated = yes ]; then echo "Please send generated backtrace(s) to support@nordugrid.org or report them on http://bugzilla.nordugrid.org" finordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/information_collector.cpp0000644000000000000000000000012413153453672025733 xustar000000000000000027 mtime=1504597946.546683 27 atime=1513200576.488726 30 ctime=1513200662.700780517 nordugrid-arc-5.4.2/src/services/a-rex/information_collector.cpp0000644000175000002070000004515213153453672026007 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ldif/LDIFtoXML.h" #include "grid-manager/files/ControlFileHandling.h" #include "job.h" #include "arex.h" namespace ARex { void ARexService::InformationCollector(void) { thread_count_.RegisterThread(); for(;;) { // Run information provider std::string xml_str; int r = -1; { std::string cmd; cmd=Arc::ArcLocation::GetDataDir()+"/CEinfo.pl --splitjobs --config "+config_.ConfigFile(); std::string stdin_str; std::string stderr_str; Arc::Run run(cmd); run.AssignStdin(stdin_str); run.AssignStdout(xml_str); run.AssignStderr(stderr_str); logger_.msg(Arc::DEBUG,"Resource information provider: %s",cmd); if(!run.Start()) { if(thread_count_.WaitForExit()) break; continue; // try again }; while(!run.Wait()) { logger_.msg(Arc::DEBUG,"Resource information provider failed"); } r = run.Result(); if (r!=0) { logger_.msg(Arc::WARNING,"Resource information provider failed with exit status: %i\n%s",r,stderr_str); } else { logger_.msg(Arc::DEBUG,"Resource information provider log:\n%s",stderr_str); }; }; if (r!=0) { logger_.msg(Arc::WARNING,"No new informational document assigned"); } else { logger_.msg(Arc::VERBOSE,"Obtained XML: %s",xml_str.substr(0,100)); // Following code is suboptimal. Most of it should go away // and functionality to be moved to information providers. if(!xml_str.empty()) { // Currently glue states are lost. Counter of all jobs is lost too. infodoc_.Assign(xml_str,config_.ControlDir()+G_DIR_SEPARATOR_S+"info.xml"); Arc::XMLNode root = infodoc_.Acquire(); Arc::XMLNode all_jobs_count = root["Domains"]["AdminDomain"]["Services"]["ComputingService"]["AllJobs"]; if((bool)all_jobs_count) { Arc::stringto((std::string)all_jobs_count,all_jobs_count_); all_jobs_count.Destroy(); // is not glue2 info }; infodoc_.Release(); } else { logger_.msg(Arc::ERROR,"Informational document is empty"); }; }; if(thread_count_.WaitOrCancel(infoprovider_wakeup_period_*100)) break; }; thread_count_.UnregisterThread(); } bool ARexService::RegistrationCollector(Arc::XMLNode &doc) { logger_.msg(Arc::VERBOSE,"Passing service's information from collector to registrator"); Arc::XMLNode empty(ns_, "RegEntry"); empty.New(doc); doc.NewChild("SrcAdv"); doc.NewChild("MetaSrcAdv"); doc["SrcAdv"].NewChild("Type") = "org.nordugrid.execution.arex"; doc["SrcAdv"].NewChild("EPR").NewChild("Address") = endpoint_; if(publishstaticinfo_){ /** This section uses SSPair to send Static Information of Arex to the ISIS. **/ Arc::XMLNode root = infodoc_.Acquire(); Arc::XMLNode staticInfo = doc["SrcAdv"].NewChild("SSPair"); staticInfo.NewChild("Name") = "HealthState"; staticInfo.NewChild("Value") = (std::string)root["Domains"]["AdminDomain"]["Services"] ["ComputingService"]["ComputingEndpoint"]["HealthState"]; staticInfo = doc["SrcAdv"].NewChild("SSPair"); staticInfo.NewChild("Name") = "Capability"; staticInfo.NewChild("Value") = (std::string)root["Domains"]["AdminDomain"]["Services"] ["ComputingService"]["ComputingEndpoint"]["Capability"]; staticInfo = doc["SrcAdv"].NewChild("SSPair"); staticInfo.NewChild("Name") = "OSFamily"; staticInfo.NewChild("Value") = (std::string)root["Domains"]["AdminDomain"]["Services"] ["ComputingService"]["ComputingManager"] ["ExecutionEnvironments"]["ExecutionEnvironment"]["OSFamily"]; staticInfo = doc["SrcAdv"].NewChild("SSPair"); staticInfo.NewChild("Name") = "Platform"; staticInfo.NewChild("Value") = (std::string)root["Domains"]["AdminDomain"]["Services"] ["ComputingService"]["ComputingManager"] ["ExecutionEnvironments"]["ExecutionEnvironment"]["Platform"]; staticInfo = doc["SrcAdv"].NewChild("SSPair"); staticInfo.NewChild("Name") = "PhysicalCPUs"; staticInfo.NewChild("Value") = (std::string)root["Domains"]["AdminDomain"]["Services"] ["ComputingService"]["ComputingManager"] ["ExecutionEnvironments"]["ExecutionEnvironment"]["PhysicalCPUs"]; staticInfo = doc["SrcAdv"].NewChild("SSPair"); staticInfo.NewChild("Name") = "CPUMultiplicity"; staticInfo.NewChild("Value") = (std::string)root["Domains"]["AdminDomain"]["Services"] ["ComputingService"]["ComputingManager"] ["ExecutionEnvironments"]["ExecutionEnvironment"]["CPUMultiplicity"]; staticInfo = doc["SrcAdv"].NewChild("SSPair"); staticInfo.NewChild("Name") = "CPUModel"; staticInfo.NewChild("Value") = (std::string)root["Domains"]["AdminDomain"]["Services"] ["ComputingService"]["ComputingManager"] ["ExecutionEnvironments"]["ExecutionEnvironment"]["CPUModel"]; std::string path = "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment"; Arc::XMLNodeList AEs = root.Path(path); for(Arc::XMLNodeList::iterator AE = AEs.begin(); AE!=AEs.end(); AE++){ staticInfo = doc["SrcAdv"].NewChild("SSPair"); staticInfo.NewChild("Name") = (std::string)((*AE)["AppName"])+"-"+(std::string)((*AE)["AppVersion"]); staticInfo.NewChild("Value") = (std::string)((*AE)["ID"]); } logger.msg(Arc::VERBOSE, "Registered static information: \n doc: %s",(std::string)doc); infodoc_.Release(); } else logger.msg(Arc::VERBOSE, "Information Registered without static attributes: \n doc: %s",(std::string)doc); return true; // // TODO: filter information here. //Arc::XMLNode regdoc(""); //regdoc.New(doc); //doc.NewChild(root); //infodoc_.Release(); } std::string ARexService::getID() { return "ARC:AREX"; } class PrefixedFilePayload: public Arc::PayloadRawInterface { private: std::string prefix_; std::string postfix_; int handle_; void* addr_; off_t length_; public: PrefixedFilePayload(const std::string& prefix,const std::string& postfix,int handle) { prefix_ = prefix; postfix_ = postfix; handle_ = handle; addr_ = MAP_FAILED; length_ = 0; if(handle != -1) { struct stat st; if(::fstat(handle,&st) == 0) { if(st.st_size > 0) { length_ = st.st_size; addr_ = ::mmap(NULL,st.st_size,PROT_READ,MAP_PRIVATE,handle,0); if(addr_ == MAP_FAILED) length_=0; }; }; }; }; ~PrefixedFilePayload(void) { if(addr_ != MAP_FAILED) ::munmap(addr_,length_); if(handle_ != -1) ::close(handle_); }; virtual char operator[](Size_t pos) const { char* p = ((PrefixedFilePayload*)this)->Content(pos); if(!p) return 0; return *p; }; virtual char* Content(Size_t pos) { if(pos < prefix_.length()) return (char*)(prefix_.c_str() + pos); pos -= prefix_.length(); if(pos < length_) return ((char*)(addr_) + pos); pos -= length_; if(pos < postfix_.length()) return (char*)(postfix_.c_str() + pos); return NULL; }; virtual Size_t Size(void) const { return (prefix_.length() + length_ + postfix_.length()); }; virtual char* Insert(Size_t /* pos */ = 0,Size_t /* size */ = 0) { return NULL; }; virtual char* Insert(const char* /* s */,Size_t /* pos */ = 0,Size_t /* size */ = -1) { return NULL; }; virtual char* Buffer(unsigned int num = 0) { if(num == 0) return (char*)(prefix_.c_str()); if(addr_ != MAP_FAILED) { if(num == 1) return (char*)addr_; } else { ++num; }; if(num == 2) return (char*)(postfix_.c_str()); return NULL; }; virtual Size_t BufferSize(unsigned int num = 0) const { if(num == 0) return prefix_.length(); if(addr_ != MAP_FAILED) { if(num == 1) return length_; } else { ++num; }; if(num == 2) return postfix_.length(); return 0; }; virtual Size_t BufferPos(unsigned int num = 0) const { if(num == 0) return 0; if(addr_ != MAP_FAILED) { if(num == 1) return prefix_.length(); } else { ++num; }; if(num == 2) return (prefix_.length() + length_); return (prefix_.length() + length_ + postfix_.length()); }; virtual bool Truncate(Size_t /* size */) { return false; }; }; OptimizedInformationContainer::OptimizedInformationContainer(bool parse_xml) { handle_=-1; parse_xml_=parse_xml; } OptimizedInformationContainer::~OptimizedInformationContainer(void) { if(handle_ != -1) ::close(handle_); if(!filename_.empty()) ::unlink(filename_.c_str()); } int OptimizedInformationContainer::OpenDocument(void) { int h = -1; olock_.lock(); if(handle_ != -1) h = ::dup(handle_); olock_.unlock(); return h; } Arc::MessagePayload* OptimizedInformationContainer::Process(Arc::SOAPEnvelope& in) { Arc::WSRF& wsrp = Arc::CreateWSRP(in); if(!wsrp) { delete &wsrp; return NULL; }; try { Arc::WSRPGetResourcePropertyDocumentRequest* req = dynamic_cast(&wsrp); if(!req) throw std::exception(); if(!(*req)) throw std::exception(); // Request for whole document std::string fake_str("fake"); Arc::XMLNode xresp(fake_str); Arc::WSRPGetResourcePropertyDocumentResponse resp(xresp); std::string rest_str; resp.SOAP().GetDoc(rest_str); std::string::size_type p = rest_str.find(fake_str); if(p == std::string::npos) throw std::exception(); PrefixedFilePayload* outpayload = new PrefixedFilePayload(rest_str.substr(0,p),rest_str.substr(p+fake_str.length()),OpenDocument()); delete &wsrp; return outpayload; } catch(std::exception& e) { }; delete &wsrp; if(!parse_xml_) return NULL; // No XML document available Arc::NS ns; Arc::SOAPEnvelope* out = InformationContainer::Process(in); if(!out) return NULL; Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns); out->Swap(*outpayload); delete out; return outpayload; } void OptimizedInformationContainer::AssignFile(const std::string& filename) { olock_.lock(); if(!filename_.empty()) ::unlink(filename_.c_str()); if(handle_ != -1) ::close(handle_); filename_ = filename; handle_ = -1; if(!filename_.empty()) { handle_ = ::open(filename_.c_str(),O_RDONLY); if(parse_xml_) { lock_.lock(); doc_.ReadFromFile(filename_); lock_.unlock(); Arc::InformationContainer::Assign(doc_,false); }; }; olock_.unlock(); } void OptimizedInformationContainer::Assign(const std::string& xml, const std::string filename) { std::string tmpfilename; int h = -1; if(filename.empty()) { h = Glib::file_open_tmp(tmpfilename); } else { tmpfilename = filename; tmpfilename += ".tmpXXXXXX"; h = Glib::mkstemp(tmpfilename); }; if(h == -1) { Arc::Logger::getRootLogger().msg(Arc::ERROR,"OptimizedInformationContainer failed to create temporary file"); return; }; Arc::Logger::getRootLogger().msg(Arc::VERBOSE,"OptimizedInformationContainer created temporary file: %s",tmpfilename); for(std::string::size_type p = 0;p 0) expression = expression.Child(0); std::string xpath = (std::string)expression; if(xpath.empty()) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESNotValidQueryStatementFault(fault,"Could not extract xpath query from request"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); } // WARNING. Suboptimal temporary solution. int h = infodoc_.OpenDocument(); if(h == -1) ESFAULT("Failed to open resource information file"); ::lseek(h,0,SEEK_SET); struct stat st; if((::fstat(h,&st) != 0) || (st.st_size == 0)) { ::close(h); ESFAULT("Failed to stat resource information file"); }; char* buf = (char*)::malloc(st.st_size+1); if(!buf) { ::close(h); ESFAULT("Failed to allocate memory for resoure information"); }; off_t p = 0; for(;pfirst,-1); } return Arc::MCC_Status(Arc::STATUS_OK); } } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/a-rex-start.in0000644000000000000000000000012712754431715023336 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200649.780622499 30 ctime=1513200662.679780261 nordugrid-arc-5.4.2/src/services/a-rex/a-rex-start.in0000755000175000002070000006264012754431715023413 0ustar00mockbuildmock00000000000000#!/bin/bash add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=arched RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/a-rex ]; then . /etc/sysconfig/a-rex elif [ -r /etc/default/a-rex ]; then . /etc/default/a-rex fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ -n "$GLOBUS_LOCATION" ]; then if [ ! -d "$GLOBUS_LOCATION" ]; then echo "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then echo "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION testconfigblock() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" cat "$fname" | sed -e 's/\r$//' -e 's/^\r//' | grep -e '^\[' | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then echo 'true' return fi done echo 'false' } } readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | sed -e 's/\r$//' -e 's/^\r//' | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } readconfigvars() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | sed -e 's/\r$//' -e 's/^\r//' | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` echo "$val" fi done fi done } } tokenize() { line="$1" prefix="$2" suffix="$3" while true; do if [ -z "$line" ]; then break; fi token=`echo "$line" | sed 's/^"\([^"]*\)" *.*/\1/;t exit;s/^\([^ ]*\) *.*/\1/;:exit'` line=`echo "$line" | sed 's/^"[^"]*" *\(.*\)/\1/;t exit;s/^[^ ]* *\(.*\)/\1/;t exit;s/.*//;:exit'` echo "${prefix}${token}${suffix}" done } voms_trust_to_xml() { xml="" while true; do read line if [ $? -ne '0' ]; then break; fi if [ -z "$line" ]; then continue; fi xmlchain=`tokenize "$line" "" ""` echo "${xmlchain}" done } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi # PID file PID_FILE=`readconfigvar "$ARC_CONFIG" grid-manager pidfile` if [ `id -u` = 0 ] ; then if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog-arex.pid fi else if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog-arex.pid fi fi prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then echo "Missing executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # Creating configuration file of arched # Reading following information from config file: # Log file # Debug level # User name GRIDFTPD_PRESENT=`testconfigblock "$ARC_CONFIG" gridftpd/jobs` LOGFILE=`readconfigvar "$ARC_CONFIG" grid-manager logfile` WSLOGFILE=`readconfigvar "$ARC_CONFIG" grid-manager wslogfile` PERFLOGDIR=`readconfigvar "$ARC_CONFIG" common perflogdir` LOGLEVEL=`readconfigvar "$ARC_CONFIG" grid-manager debug` LOGSIZE=`readconfigvar "$ARC_CONFIG" grid-manager logsize` LOGREOPEN=`readconfigvar "$ARC_CONFIG" grid-manager logreopen` WATCHDOG=`readconfigvar "$ARC_CONFIG" grid-manager watchdog` USERNAME=`readconfigvar "$ARC_CONFIG" grid-manager user` GROUPNAME=`echo "$USERNAME" | sed 's/^[^:]*//;s/^://'` USERNAME=`echo "$USERNAME" | sed 's/:.*//'` X509_USER_CERT=`readconfigvar "$ARC_CONFIG" grid-manager x509_user_cert` X509_USER_KEY=`readconfigvar "$ARC_CONFIG" grid-manager x509_user_key` X509_CERT_DIR=`readconfigvar "$ARC_CONFIG" grid-manager x509_cert_dir` GRIDMAP=`readconfigvar "$ARC_CONFIG" grid-manager gridmap` GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" grid-manager globus_tcp_port_range` GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" grid-manager globus_udp_port_range` VOMS_PROCESSING=`readconfigvar "$ARC_CONFIG" grid-manager voms_processing` VOMS_TRUST_CHAINS=`readconfigvars "$ARC_CONFIG" grid-manager voms_trust_chain | voms_trust_to_xml` MAX_JOB_CONTROL_REQUESTS=`readconfigvar "$ARC_CONFIG" grid-manager max_job_control_requests` MAX_INFOSYS_REQUESTS=`readconfigvar "$ARC_CONFIG" grid-manager max_infosys_requests` MAX_DATA_TRANSFER_REQUESTS=`readconfigvar "$ARC_CONFIG" grid-manager max_data_transfer_requests` if [ "$GRIDFTPD_PRESENT" == 'true' ] ; then ALLOWUNKNOWN=`readconfigvar "$ARC_CONFIG" gridftpd allowunknown` USERMAP_BLOCK='gridftpd' USERAUTH_BLOCK='gridftpd/jobs' else ALLOWUNKNOWN=`readconfigvar "$ARC_CONFIG" grid-manager allowunknown` USERMAP_BLOCK='grid-manager' USERAUTH_BLOCK='grid-manager' fi if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=`readconfigvar "$ARC_CONFIG" common x509_user_cert` fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=`readconfigvar "$ARC_CONFIG" common x509_user_key` fi if [ -z "$X509_CERT_DIR" ] ; then X509_CERT_DIR=`readconfigvar "$ARC_CONFIG" common x509_cert_dir` fi if [ -z "$GRIDMAP" ] ; then GRIDMAP=`readconfigvar "$ARC_CONFIG" common gridmap` fi if [ -z "$GLOBUS_TCP_PORT_RANGE" ] ; then GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" common globus_tcp_port_range` fi if [ -z "$GLOBUS_UDP_PORT_RANGE" ] ; then GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" common globus_udp_port_range` fi if [ -z "$VOMS_PROCESSING" ] ; then VOMS_PROCESSING=`readconfigvar "$ARC_CONFIG" common voms_processing` fi VOMS_TRUST_CHAINS="$VOMS_TRUST_CHAINS"`readconfigvars "$ARC_CONFIG" common voms_trust_chain | voms_trust_to_xml` # Exporting collected variables if [ ! -z "$X509_USER_CERT" ] ; then export X509_USER_CERT ; fi if [ ! -z "$X509_USER_KEY" ] ; then export X509_USER_KEY ; fi if [ ! -z "$X509_CERT_DIR" ] ; then export X509_CERT_DIR ; fi if [ ! -z "$GRIDMAP" ] ; then export GRIDMAP ; fi if [ ! -z "$GLOBUS_TCP_PORT_RANGE" ] ; then export GLOBUS_TCP_PORT_RANGE ; fi if [ ! -z "$GLOBUS_UDP_PORT_RANGE" ] ; then export GLOBUS_UDP_PORT_RANGE ; fi # Required defaults if [ -z "$GRIDMAP" ] ; then GRIDMAP=/etc/grid-security/grid-mapfile fi if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=/etc/grid-security/hostcert.pem fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=/etc/grid-security/hostkey.pem fi if [ -z "$X509_CERT_DIR" ] ; then X509_CERT_DIR=/etc/grid-security/certificates fi if [ "$ALLOWUNKNOWN" != "yes" ] ; then ALLOWUNKNOWN="" fi # Web Service configuration arex_endpoint="" arex_mount_point=`readconfigvar "$ARC_CONFIG" grid-manager arex_mount_point` arex_mount_point=${arex_mount_point:-`readconfigvar "$ARC_CONFIG" cluster arex_mount_point`} if [ ! -z "$arex_mount_point" ] ; then arex_proto=`echo "$arex_mount_point" | sed 's/^\([^:]*\):\/\/.*/\1/;t;s/.*//'` arex_host=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/\([^:\/]*\).*/\1/;t;s/.*//'` arex_port=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/[^:]*:\([^\/]*\)\(.*\)/\1/;t;s/.*//'` arex_path=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/[^\/]*\/\(.*\)/\1/;t;s/.*//'` if [ -z "$arex_port" ] ; then if [ "$arex_proto" = "https" ] ; then arex_port="443" else arex_port="80" fi fi arex_endpoint="$arex_mount_point" fi enable_emies_interface=`readconfigvar "$ARC_CONFIG" grid-manager enable_emies_interface` if [ ! -z "$enable_emies_interface" ] ; then enable_emies_interface="$enable_emies_interface" fi emir_registration="" # if [ ! -z "$arex_mount_point" ]; then # emir_urls=`readconfigvar "$ARC_CONFIG" "registration/emir" emirurls` # if [ ! -z "$emir_urls" ]; then # emir_no_xbes=`readconfigvar "$ARC_CONFIG" "registration/emir" disablereg_xbes` # emir_no_emies=`readconfigvar "$ARC_CONFIG" "registration/emir" disablereg_emies` # if [ ! "$emir_no_xbes" = "yes" ]; then # emir_validity=`readconfigvar "$ARC_CONFIG" "registration/emir" validity` # if [ -z "$emir_validity" ]; then emir_validity="600"; fi # emir_period=`readconfigvar "$ARC_CONFIG" "registration/emir" period` # if [ -z "$emir_period" ]; then emir_period="60"; fi # emir_urls=`echo "$emir_urls" | sed ':loop;s/, */<\/URL>EMIREG:/;t loop'` # emir_urls="EMIREG:$emir_urls" # emir_registration="\ # # $arex_mount_point # $emir_validity # $emir_period # # $emir_urls # 10 # $arex_mount_point # $emir_validity # $emir_period # $X509_USER_KEY # $X509_USER_CERT # $X509_CERT_DIR # #" # fi # fi # fi isis_registration="" if [ ! -z "$arex_mount_point" ]; then isis_urls=`readconfigvar "$ARC_CONFIG" "registration/isis" isisurls` if [ ! -z "$isis_urls" ]; then isis_no_xbes=`readconfigvar "$ARC_CONFIG" "registration/isis" disablereg_xbes` isis_no_emies=`readconfigvar "$ARC_CONFIG" "registration/isis" disablereg_emies` if [ ! "$isis_no_xbes" = "yes" ]; then isis_validity=`readconfigvar "$ARC_CONFIG" "registration/isis" validity` if [ -z "$isis_validity" ]; then isis_validity="600"; fi isis_period=`readconfigvar "$ARC_CONFIG" "registration/isis" period` if [ -z "$isis_period" ]; then isis_period="60"; fi isis_urls=`echo "$isis_urls" | sed ':loop;s/, */<\/URL>ISIS:/;t loop'` isis_urls="ISIS:$isis_urls" isis_registration="\ $arex_mount_point $isis_validity $isis_period $isis_urls 10 $arex_mount_point $isis_validity $isis_period $X509_USER_KEY $X509_USER_CERT $X509_CERT_DIR " fi fi fi infoproviders_timeout=`readconfigvar "$ARC_CONFIG" infosys infoproviders_timeout` if [ -z "$infoproviders_timeout" ]; then infoproviders_wakeup="\ 600" else infoproviders_wakeup="\ $infoproviders_timeout" fi argus_shc="" argus_plugin="" arguspep_endpoint=`readconfigvar "$ARC_CONFIG" grid-manager arguspep_endpoint` if [ ! -z "$arguspep_endpoint" ]; then argus_plugin="${argus_plugin}arguspepclient" if [ ! -f "$ARC_LOCATION/lib/arc/libarguspepclient.so" ] && [ ! -f "$ARC_LOCATION/lib64/arc/libarguspepclient.so" ]; then echo "Plugin arguspepclient(libarguspepclient.so) not found" echo "You may need to install corresponding package" exit 1 fi arguspep_profile=`readconfigvar "$ARC_CONFIG" grid-manager arguspep_profile` if [ -z "$arguspep_profile" ]; then arguspep_profile="emi"; fi arguspep_usermap=`readconfigvar "$ARC_CONFIG" grid-manager arguspep_usermap` if [ -z "$arguspep_usermap" ]; then arguspep_usermap="false"; fi if [ "$arguspep_usermap" = "yes" ]; then arguspep_usermap="true"; fi if [ "$arguspep_usermap" = "no" ]; then arguspep_usermap="false"; fi argus_shc="${argus_shc} $arguspep_endpoint $arguspep_profile $X509_USER_KEY $X509_USER_CERT $X509_CERT_DIR $arguspep_usermap " fi arguspdp_endpoint=`readconfigvar "$ARC_CONFIG" grid-manager arguspdp_endpoint` if [ ! -z "$arguspdp_endpoint" ]; then argus_plugin="${argus_plugin}arguspdpclient" if [ ! -f "$ARC_LOCATION/lib/arc/libarguspdpclient.so" ] && [ ! -f "$ARC_LOCATION/lib64/arc/libarguspdpclient.so" ]; then echo "Plugin arguspdpclient(libarguspdpclient.so) not found" echo "You may need to install corresponding package" exit 1 fi arguspdp_profile=`readconfigvar "$ARC_CONFIG" grid-manager arguspdp_profile` if [ -z "$arguspdp_profile" ]; then arguspdp_profile="emi"; fi arguspdp_usermap=`readconfigvar "$ARC_CONFIG" grid-manager arguspdp_usermap` if [ -z "$arguspdp_usermap" ]; then arguspdp_usermap="false"; fi if [ "$arguspdp_usermap" = "yes" ]; then arguspdp_usermap="true"; fi if [ "$arguspdp_usermap" = "no" ]; then arguspdp_usermap="false"; fi arguspdp_acceptnotapplicable=`readconfigvar "$ARC_CONFIG" grid-manager arguspdp_acceptnotapplicable` if [ -z "$arguspdp_acceptnotapplicable" ]; then arguspdp_acceptnotapplicable="false"; fi if [ "$arguspdp_acceptnotapplicable" = "yes" ]; then arguspdp_acceptnotapplicable="true"; fi if [ "$arguspdp_acceptnotapplicable" = "no" ]; then arguspdp_acceptnotapplicable="false"; fi argus_shc="${argus_shc} $arguspdp_endpoint $arguspdp_profile $X509_USER_KEY $X509_USER_CERT $X509_CERT_DIR $arguspdp_usermap $arguspdp_acceptnotapplicable " fi legacy_shc=" $ARC_CONFIG $USERAUTH_BLOCK $ARC_CONFIG $USERMAP_BLOCK " # cache service cache_service_plexer="" cache_service="" use_cache_service=`readconfigvar "$ARC_CONFIG" grid-manager enable_cache_service` if [ "$use_cache_service" = "yes" ]; then use_dtr=`readconfigvar "$ARC_CONFIG" grid-manager enable_dtr` if [ -z "$arex_mount_point" -o "$use_dtr" = "no" ]; then echo "Both DTR and A-REX WS interface must be turned on to use cache service" exit 1 fi cache_service_plexer="^/cacheservice" cache_service="\ \ $legacy_shc \ $ARC_CONFIG\ true\ \ " fi if [ -z "$ALLOWUNKNOWN" ]; then gridmapmatch="\ " fi AREX_CONFIG=`mktemp -t arex.xml.XXXXXX` if [ -z "$AREX_CONFIG" ] ; then echo "Failed to create temporary file" exit 1 fi CMD="$CMD -c '$AREX_CONFIG'" # VOMS_LOCATION VOMS_LOCATION=${VOMS_LOCATION:-@DEFAULT_VOMS_LOCATION@} add_library_path "$VOMS_LOCATION" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH case "$LOGLEVEL" in 0) LOGLEVEL="FATAL" ;; 1) LOGLEVEL="ERROR" ;; 2) LOGLEVEL="WARNING" ;; 3) LOGLEVEL="INFO" ;; 4) LOGLEVEL="VERBOSE" ;; 5) LOGLEVEL="DEBUG" ;; *) LOGLEVEL="WARNING" ;; esac if [ "$USERNAME" = "root" ] ; then USERNAME="" fi if [ "$GROUPNAME" = "root" ] ; then GROUPNAME="" fi LOGFILE=${LOGFILE:-/var/log/arc/grid-manager.log} if [ ! -d `dirname $LOGFILE` ]; then mkdir -p `dirname $LOGFILE` fi WSLOGFILE=${WSLOGFILE:-/var/log/arc/ws-interface.log} if [ ! -d `dirname $WSLOGFILE` ]; then mkdir -p `dirname $WSLOGFILE` fi PERFLOGDIR=${PERFLOGDIR:-/var/log/arc/perfdata} if [ ! -d $PERFLOGDIR ]; then mkdir -p $PERFLOGDIR fi # Set permissions suitable for logs written under user accounts chmod a+rwx $PERFLOGDIR LOGSIZE=${LOGSIZE:--1 -1} LOGNUM=`echo "$LOGSIZE" | sed 's/^ *[-+0-9]* *//'` LOGSIZE=`echo "$LOGSIZE" | sed 's/^ *\([-+0-9]*\).*/\1/'` LOGREOPEN=${LOGREOPEN:-no} if [ "$LOGREOPEN" = "yes" ] ; then LOGREOPEN="true" else LOGREOPEN="false" fi WATCHDOG=${WATCHDOG:-no} if [ "$WATCHDOG" = "yes" ] ; then WATCHDOG="true" else WATCHDOG="false" fi VOMS_PROCESSING=${VOMS_PROCESSING:-standard} MAX_JOB_CONTROL_REQUESTS=${MAX_JOB_CONTROL_REQUESTS:-100} MAX_INFOSYS_REQUESTS=${MAX_INFOSYS_REQUESTS:-1} MAX_DATA_TRANSFER_REQUESTS=${MAX_DATA_TRANSFER_REQUESTS:-100} if [ ! -z "$USERNAME" ] ; then CMD="$CMD -u '$USERNAME'" fi if [ ! -z "$GROUPNAME" ] ; then CMD="$CMD -g '$GROUPNAME'" fi # A-Rex without WS interface AREXCFG="\ $PID_FILE $LOGFILE $LOGLEVEL $LOGNUM $LOGSIZE $LOGREOPEN $WATCHDOG $ARC_LOCATION/@pkglibsubdir@/ arex $ARC_CONFIG $infoproviders_wakeup " # A-Rex with WS interface over HTTPS AREXCFGWSS="\ $PID_FILE $LOGFILE $WSLOGFILE $LOGLEVEL $LOGNUM $LOGSIZE $LOGREOPEN $WATCHDOG $ARC_LOCATION/@pkglibsubdir@/ mcctcp mcctls mcchttp mccsoap arex identitymap arcshc arcshclegacy $argus_plugin $arex_port $X509_USER_KEY $X509_USER_CERT $X509_CERT_DIR $VOMS_TRUST_CHAINS $VOMS_PROCESSING $gridmapmatch $GRIDMAP nobody $ARC_CONFIG POST GET PUT HEAD ^/$arex_path $cache_service_plexer $legacy_shc $argus_shc $arex_endpoint $enable_emies_interface $emir_registration $isis_registration $ARC_CONFIG $MAX_INFOSYS_REQUESTS $infoproviders_wakeup $MAX_JOB_CONTROL_REQUESTS $MAX_DATA_TRANSFER_REQUESTS $cache_service " if [ -z "$arex_proto" ] ; then echo "$AREXCFG" > "$AREX_CONFIG" elif [ "$arex_proto" = "https" ] ; then echo "$AREXCFGWSS" > "$AREX_CONFIG" else echo "Unsupported protocol: $arex_proto" exit 1 fi if [ ! -z "$USERNAME" ] ; then [ -f $AREX_CONFIG ] && chown $USERNAME $AREX_CONFIG fi # prepare to collect crash information COREDIR=`dirname ${LOGFILE}`/arccore mkdir -p ${COREDIR} cd ${COREDIR} ulimit -c unlimited } if [ "$RUN" != "yes" ] ; then echo "a-rex disabled, please adjust the configuration to your needs " echo "and then set RUN to 'yes' in /etc/default/a-rex to enable it." return 0 fi prepare exec "$CMD" nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731022667 xustar000000000000000030 mtime=1513200601.774035359 30 atime=1513200649.728621863 30 ctime=1513200662.676780224 nordugrid-arc-5.4.2/src/services/a-rex/Makefile.in0000644000175000002070000021006513214315731022741 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test$(EXEEXT) test_cache_check$(EXEEXT) subdir = src/services/a-rex DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/a-rex-backtrace-collect.8.in \ $(srcdir)/a-rex-backtrace-collect.in $(srcdir)/a-rex-start.in \ $(srcdir)/a-rex.in $(srcdir)/a-rex.service.in \ $(srcdir)/perferator.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = a-rex a-rex.service a-rex-start \ a-rex-backtrace-collect a-rex-backtrace-collect.8 perferator CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" \ "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(sbindir)" \ "$(DESTDIR)$(man8dir)" "$(DESTDIR)$(unitsdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) libarex_la_DEPENDENCIES = ldif/libldif.la $(GRIDMANAGER_LIBS) \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libarex_la_OBJECTS = libarex_la-arex.lo libarex_la-job.lo \ libarex_la-create_activity.lo \ libarex_la-get_activity_documents.lo \ libarex_la-get_activity_statuses.lo \ libarex_la-terminate_activities.lo \ libarex_la-change_activity_status.lo \ libarex_la-migrate_activity.lo \ libarex_la-get_factory_attributes_document.lo \ libarex_la-update_credentials.lo libarex_la-faults.lo \ libarex_la-get.lo libarex_la-put.lo libarex_la-PayloadFile.lo \ libarex_la-FileChunks.lo libarex_la-information_collector.lo \ libarex_la-cachecheck.lo libarex_la-tools.lo libarex_la_OBJECTS = $(am_libarex_la_OBJECTS) libarex_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarex_la_CXXFLAGS) \ $(CXXFLAGS) $(libarex_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am_test_OBJECTS = test-test.$(OBJEXT) test_OBJECTS = $(am_test_OBJECTS) am__DEPENDENCIES_1 = test_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(test_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_test_cache_check_OBJECTS = \ test_cache_check-test_cache_check.$(OBJEXT) test_cache_check_OBJECTS = $(am_test_cache_check_OBJECTS) test_cache_check_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_cache_check_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_cache_check_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SCRIPTS = $(initd_SCRIPTS) $(pkgdata_SCRIPTS) $(sbin_SCRIPTS) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarex_la_SOURCES) $(test_SOURCES) \ $(test_cache_check_SOURCES) DIST_SOURCES = $(libarex_la_SOURCES) $(test_SOURCES) \ $(test_cache_check_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive man8dir = $(mandir)/man8 NROFF = nroff MANS = $(man_MANS) DATA = $(units_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @JURA_CLIENT_ENABLED_FALSE@JURA_CLIENT = @JURA_CLIENT_ENABLED_TRUE@JURA_CLIENT = jura SUBDIRS = delegation grid-manager ldif infoproviders $(JURA_CLIENT) lrms schema DIST_SUBDIRS = delegation grid-manager ldif infoproviders jura lrms schema pkglib_LTLIBRARIES = libarex.la @SYSV_SCRIPTS_ENABLED_FALSE@AREX_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@AREX_SCRIPT = a-rex initd_SCRIPTS = $(AREX_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@AREX_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@AREX_UNIT = a-rex.service @SYSTEMD_UNITS_ENABLED_FALSE@AREX_UNIT_WRAPPER = @SYSTEMD_UNITS_ENABLED_TRUE@AREX_UNIT_WRAPPER = a-rex-start units_DATA = $(AREX_UNIT) pkgdata_SCRIPTS = $(AREX_UNIT_WRAPPER) perferator sbin_SCRIPTS = a-rex-backtrace-collect man_MANS = a-rex-backtrace-collect.8 GRIDMANAGER_LIBS = grid-manager/libgridmanager.la delegation/libdelegation.la libarex_la_SOURCES = arex.cpp job.cpp \ create_activity.cpp get_activity_documents.cpp \ get_activity_statuses.cpp terminate_activities.cpp \ change_activity_status.cpp migrate_activity.cpp \ get_factory_attributes_document.cpp update_credentials.cpp faults.cpp \ get.cpp put.cpp PayloadFile.cpp FileChunks.cpp \ information_collector.cpp cachecheck.cpp tools.cpp \ arex.h job.h PayloadFile.h FileChunks.h tools.h libarex_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) # Needs real cleaning in respect to dependencies libarex_la_LIBADD = \ ldif/libldif.la \ $(GRIDMANAGER_LIBS) \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libarex_la_LDFLAGS = -no-undefined -avoid-version -module $(DBCXX_LIBS) test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_LDADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_cache_check_SOURCES = test_cache_check.cpp test_cache_check_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_cache_check_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): a-rex: $(top_builddir)/config.status $(srcdir)/a-rex.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ a-rex.service: $(top_builddir)/config.status $(srcdir)/a-rex.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ a-rex-start: $(top_builddir)/config.status $(srcdir)/a-rex-start.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ a-rex-backtrace-collect: $(top_builddir)/config.status $(srcdir)/a-rex-backtrace-collect.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ a-rex-backtrace-collect.8: $(top_builddir)/config.status $(srcdir)/a-rex-backtrace-collect.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ perferator: $(top_builddir)/config.status $(srcdir)/perferator.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarex.la: $(libarex_la_OBJECTS) $(libarex_la_DEPENDENCIES) $(libarex_la_LINK) -rpath $(pkglibdir) $(libarex_la_OBJECTS) $(libarex_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test$(EXEEXT): $(test_OBJECTS) $(test_DEPENDENCIES) @rm -f test$(EXEEXT) $(test_LINK) $(test_OBJECTS) $(test_LDADD) $(LIBS) test_cache_check$(EXEEXT): $(test_cache_check_OBJECTS) $(test_cache_check_DEPENDENCIES) @rm -f test_cache_check$(EXEEXT) $(test_cache_check_LINK) $(test_cache_check_OBJECTS) $(test_cache_check_LDADD) $(LIBS) install-initdSCRIPTS: $(initd_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(initddir)" || $(MKDIR_P) "$(DESTDIR)$(initddir)" @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(initddir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(initddir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files install-sbinSCRIPTS: $(sbin_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)" @list='$(sbin_SCRIPTS)'; test -n "$(sbindir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ } \ ; done uninstall-sbinSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(sbin_SCRIPTS)'; test -n "$(sbindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(sbindir)" && rm -f $$files mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-FileChunks.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-PayloadFile.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-arex.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-cachecheck.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-change_activity_status.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-create_activity.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-faults.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-get.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-get_activity_documents.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-get_activity_statuses.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-get_factory_attributes_document.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-information_collector.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-job.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-migrate_activity.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-put.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-terminate_activities.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-tools.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-update_credentials.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_cache_check-test_cache_check.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarex_la-arex.lo: arex.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-arex.lo -MD -MP -MF $(DEPDIR)/libarex_la-arex.Tpo -c -o libarex_la-arex.lo `test -f 'arex.cpp' || echo '$(srcdir)/'`arex.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-arex.Tpo $(DEPDIR)/libarex_la-arex.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arex.cpp' object='libarex_la-arex.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-arex.lo `test -f 'arex.cpp' || echo '$(srcdir)/'`arex.cpp libarex_la-job.lo: job.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-job.lo -MD -MP -MF $(DEPDIR)/libarex_la-job.Tpo -c -o libarex_la-job.lo `test -f 'job.cpp' || echo '$(srcdir)/'`job.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-job.Tpo $(DEPDIR)/libarex_la-job.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='job.cpp' object='libarex_la-job.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-job.lo `test -f 'job.cpp' || echo '$(srcdir)/'`job.cpp libarex_la-create_activity.lo: create_activity.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-create_activity.lo -MD -MP -MF $(DEPDIR)/libarex_la-create_activity.Tpo -c -o libarex_la-create_activity.lo `test -f 'create_activity.cpp' || echo '$(srcdir)/'`create_activity.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-create_activity.Tpo $(DEPDIR)/libarex_la-create_activity.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='create_activity.cpp' object='libarex_la-create_activity.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-create_activity.lo `test -f 'create_activity.cpp' || echo '$(srcdir)/'`create_activity.cpp libarex_la-get_activity_documents.lo: get_activity_documents.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-get_activity_documents.lo -MD -MP -MF $(DEPDIR)/libarex_la-get_activity_documents.Tpo -c -o libarex_la-get_activity_documents.lo `test -f 'get_activity_documents.cpp' || echo '$(srcdir)/'`get_activity_documents.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-get_activity_documents.Tpo $(DEPDIR)/libarex_la-get_activity_documents.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='get_activity_documents.cpp' object='libarex_la-get_activity_documents.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-get_activity_documents.lo `test -f 'get_activity_documents.cpp' || echo '$(srcdir)/'`get_activity_documents.cpp libarex_la-get_activity_statuses.lo: get_activity_statuses.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-get_activity_statuses.lo -MD -MP -MF $(DEPDIR)/libarex_la-get_activity_statuses.Tpo -c -o libarex_la-get_activity_statuses.lo `test -f 'get_activity_statuses.cpp' || echo '$(srcdir)/'`get_activity_statuses.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-get_activity_statuses.Tpo $(DEPDIR)/libarex_la-get_activity_statuses.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='get_activity_statuses.cpp' object='libarex_la-get_activity_statuses.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-get_activity_statuses.lo `test -f 'get_activity_statuses.cpp' || echo '$(srcdir)/'`get_activity_statuses.cpp libarex_la-terminate_activities.lo: terminate_activities.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-terminate_activities.lo -MD -MP -MF $(DEPDIR)/libarex_la-terminate_activities.Tpo -c -o libarex_la-terminate_activities.lo `test -f 'terminate_activities.cpp' || echo '$(srcdir)/'`terminate_activities.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-terminate_activities.Tpo $(DEPDIR)/libarex_la-terminate_activities.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='terminate_activities.cpp' object='libarex_la-terminate_activities.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-terminate_activities.lo `test -f 'terminate_activities.cpp' || echo '$(srcdir)/'`terminate_activities.cpp libarex_la-change_activity_status.lo: change_activity_status.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-change_activity_status.lo -MD -MP -MF $(DEPDIR)/libarex_la-change_activity_status.Tpo -c -o libarex_la-change_activity_status.lo `test -f 'change_activity_status.cpp' || echo '$(srcdir)/'`change_activity_status.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-change_activity_status.Tpo $(DEPDIR)/libarex_la-change_activity_status.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='change_activity_status.cpp' object='libarex_la-change_activity_status.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-change_activity_status.lo `test -f 'change_activity_status.cpp' || echo '$(srcdir)/'`change_activity_status.cpp libarex_la-migrate_activity.lo: migrate_activity.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-migrate_activity.lo -MD -MP -MF $(DEPDIR)/libarex_la-migrate_activity.Tpo -c -o libarex_la-migrate_activity.lo `test -f 'migrate_activity.cpp' || echo '$(srcdir)/'`migrate_activity.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-migrate_activity.Tpo $(DEPDIR)/libarex_la-migrate_activity.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='migrate_activity.cpp' object='libarex_la-migrate_activity.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-migrate_activity.lo `test -f 'migrate_activity.cpp' || echo '$(srcdir)/'`migrate_activity.cpp libarex_la-get_factory_attributes_document.lo: get_factory_attributes_document.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-get_factory_attributes_document.lo -MD -MP -MF $(DEPDIR)/libarex_la-get_factory_attributes_document.Tpo -c -o libarex_la-get_factory_attributes_document.lo `test -f 'get_factory_attributes_document.cpp' || echo '$(srcdir)/'`get_factory_attributes_document.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-get_factory_attributes_document.Tpo $(DEPDIR)/libarex_la-get_factory_attributes_document.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='get_factory_attributes_document.cpp' object='libarex_la-get_factory_attributes_document.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-get_factory_attributes_document.lo `test -f 'get_factory_attributes_document.cpp' || echo '$(srcdir)/'`get_factory_attributes_document.cpp libarex_la-update_credentials.lo: update_credentials.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-update_credentials.lo -MD -MP -MF $(DEPDIR)/libarex_la-update_credentials.Tpo -c -o libarex_la-update_credentials.lo `test -f 'update_credentials.cpp' || echo '$(srcdir)/'`update_credentials.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-update_credentials.Tpo $(DEPDIR)/libarex_la-update_credentials.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='update_credentials.cpp' object='libarex_la-update_credentials.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-update_credentials.lo `test -f 'update_credentials.cpp' || echo '$(srcdir)/'`update_credentials.cpp libarex_la-faults.lo: faults.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-faults.lo -MD -MP -MF $(DEPDIR)/libarex_la-faults.Tpo -c -o libarex_la-faults.lo `test -f 'faults.cpp' || echo '$(srcdir)/'`faults.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-faults.Tpo $(DEPDIR)/libarex_la-faults.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='faults.cpp' object='libarex_la-faults.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-faults.lo `test -f 'faults.cpp' || echo '$(srcdir)/'`faults.cpp libarex_la-get.lo: get.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-get.lo -MD -MP -MF $(DEPDIR)/libarex_la-get.Tpo -c -o libarex_la-get.lo `test -f 'get.cpp' || echo '$(srcdir)/'`get.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-get.Tpo $(DEPDIR)/libarex_la-get.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='get.cpp' object='libarex_la-get.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-get.lo `test -f 'get.cpp' || echo '$(srcdir)/'`get.cpp libarex_la-put.lo: put.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-put.lo -MD -MP -MF $(DEPDIR)/libarex_la-put.Tpo -c -o libarex_la-put.lo `test -f 'put.cpp' || echo '$(srcdir)/'`put.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-put.Tpo $(DEPDIR)/libarex_la-put.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='put.cpp' object='libarex_la-put.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-put.lo `test -f 'put.cpp' || echo '$(srcdir)/'`put.cpp libarex_la-PayloadFile.lo: PayloadFile.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-PayloadFile.lo -MD -MP -MF $(DEPDIR)/libarex_la-PayloadFile.Tpo -c -o libarex_la-PayloadFile.lo `test -f 'PayloadFile.cpp' || echo '$(srcdir)/'`PayloadFile.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-PayloadFile.Tpo $(DEPDIR)/libarex_la-PayloadFile.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PayloadFile.cpp' object='libarex_la-PayloadFile.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-PayloadFile.lo `test -f 'PayloadFile.cpp' || echo '$(srcdir)/'`PayloadFile.cpp libarex_la-FileChunks.lo: FileChunks.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-FileChunks.lo -MD -MP -MF $(DEPDIR)/libarex_la-FileChunks.Tpo -c -o libarex_la-FileChunks.lo `test -f 'FileChunks.cpp' || echo '$(srcdir)/'`FileChunks.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-FileChunks.Tpo $(DEPDIR)/libarex_la-FileChunks.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileChunks.cpp' object='libarex_la-FileChunks.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-FileChunks.lo `test -f 'FileChunks.cpp' || echo '$(srcdir)/'`FileChunks.cpp libarex_la-information_collector.lo: information_collector.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-information_collector.lo -MD -MP -MF $(DEPDIR)/libarex_la-information_collector.Tpo -c -o libarex_la-information_collector.lo `test -f 'information_collector.cpp' || echo '$(srcdir)/'`information_collector.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-information_collector.Tpo $(DEPDIR)/libarex_la-information_collector.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='information_collector.cpp' object='libarex_la-information_collector.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-information_collector.lo `test -f 'information_collector.cpp' || echo '$(srcdir)/'`information_collector.cpp libarex_la-cachecheck.lo: cachecheck.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-cachecheck.lo -MD -MP -MF $(DEPDIR)/libarex_la-cachecheck.Tpo -c -o libarex_la-cachecheck.lo `test -f 'cachecheck.cpp' || echo '$(srcdir)/'`cachecheck.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-cachecheck.Tpo $(DEPDIR)/libarex_la-cachecheck.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='cachecheck.cpp' object='libarex_la-cachecheck.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-cachecheck.lo `test -f 'cachecheck.cpp' || echo '$(srcdir)/'`cachecheck.cpp libarex_la-tools.lo: tools.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-tools.lo -MD -MP -MF $(DEPDIR)/libarex_la-tools.Tpo -c -o libarex_la-tools.lo `test -f 'tools.cpp' || echo '$(srcdir)/'`tools.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarex_la-tools.Tpo $(DEPDIR)/libarex_la-tools.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='tools.cpp' object='libarex_la-tools.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-tools.lo `test -f 'tools.cpp' || echo '$(srcdir)/'`tools.cpp test-test.o: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.o -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp test-test.obj: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.obj -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` test_cache_check-test_cache_check.o: test_cache_check.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_cache_check_CXXFLAGS) $(CXXFLAGS) -MT test_cache_check-test_cache_check.o -MD -MP -MF $(DEPDIR)/test_cache_check-test_cache_check.Tpo -c -o test_cache_check-test_cache_check.o `test -f 'test_cache_check.cpp' || echo '$(srcdir)/'`test_cache_check.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_cache_check-test_cache_check.Tpo $(DEPDIR)/test_cache_check-test_cache_check.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_cache_check.cpp' object='test_cache_check-test_cache_check.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_cache_check_CXXFLAGS) $(CXXFLAGS) -c -o test_cache_check-test_cache_check.o `test -f 'test_cache_check.cpp' || echo '$(srcdir)/'`test_cache_check.cpp test_cache_check-test_cache_check.obj: test_cache_check.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_cache_check_CXXFLAGS) $(CXXFLAGS) -MT test_cache_check-test_cache_check.obj -MD -MP -MF $(DEPDIR)/test_cache_check-test_cache_check.Tpo -c -o test_cache_check-test_cache_check.obj `if test -f 'test_cache_check.cpp'; then $(CYGPATH_W) 'test_cache_check.cpp'; else $(CYGPATH_W) '$(srcdir)/test_cache_check.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_cache_check-test_cache_check.Tpo $(DEPDIR)/test_cache_check-test_cache_check.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_cache_check.cpp' object='test_cache_check-test_cache_check.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_cache_check_CXXFLAGS) $(CXXFLAGS) -c -o test_cache_check-test_cache_check.obj `if test -f 'test_cache_check.cpp'; then $(CYGPATH_W) 'test_cache_check.cpp'; else $(CYGPATH_W) '$(srcdir)/test_cache_check.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man8: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man8dir)" || $(MKDIR_P) "$(DESTDIR)$(man8dir)" @list=''; test -n "$(man8dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ done; } uninstall-man8: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man8dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man8dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man8dir)" && rm -f $$files; } install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) test -z "$(unitsdir)" || $(MKDIR_P) "$(DESTDIR)$(unitsdir)" @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(unitsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(unitsdir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(SCRIPTS) $(MANS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(man8dir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ clean-pkglibLTLIBRARIES mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-initdSCRIPTS install-man \ install-pkgdataSCRIPTS install-unitsDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-sbinSCRIPTS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man8 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-initdSCRIPTS uninstall-man \ uninstall-pkgdataSCRIPTS uninstall-pkglibLTLIBRARIES \ uninstall-sbinSCRIPTS uninstall-unitsDATA uninstall-man: uninstall-man8 .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstPROGRAMS clean-pkglibLTLIBRARIES ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-initdSCRIPTS install-man install-man8 \ install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-pkglibLTLIBRARIES install-ps install-ps-am \ install-sbinSCRIPTS install-strip install-unitsDATA \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-initdSCRIPTS uninstall-man uninstall-man8 \ uninstall-pkgdataSCRIPTS uninstall-pkglibLTLIBRARIES \ uninstall-sbinSCRIPTS uninstall-unitsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/migrate_activity.cpp0000644000000000000000000000012412046514361024675 xustar000000000000000027 mtime=1352308977.507652 27 atime=1513200576.434725 30 ctime=1513200662.691780407 nordugrid-arc-5.4.2/src/services/a-rex/migrate_activity.cpp0000644000175000002070000002020712046514361024743 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include // #include "../../hed/acc/ARC1/AREXClient.h" #include "../../hed/libs/compute/JobDescription.h" #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::MigrateActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& clientid) { /* MigrateActivity ActivityIdentifier (wsa:EndpointReferenceType) ActivityDocument jsdl:JobDefinition ForceMigration MigrateActivityResponse ActivityIdentifier (wsa:EndpointReferenceType) ActivityDocument jsdl:JobDefinition NotAuthorizedFault NotAcceptingNewActivitiesFault UnsupportedFeatureFault InvalidRequestMessageFault */ { std::string s; in.GetXML(s); logger_.msg(Arc::VERBOSE, "MigrateActivity: request = \n%s", s); }; Arc::WSAEndpointReference id(in["ActivityIdentifier"]); if(!(Arc::XMLNode)id) { // Wrong request logger_.msg(Arc::ERROR, "MigrateActivitys: no ActivityIdentifier found"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find ActivityIdentifier element in request"); InvalidRequestMessageFault(fault,"jsdl:ActivityIdentifier","Element is missing"); out.Destroy(); return Arc::MCC_Status(); }; std::string migrateid = Arc::WSAEndpointReference(id).Address() + "/" + (std::string)Arc::WSAEndpointReference(id).ReferenceParameters()["a-rex:JobID"]; if(migrateid.empty()) { // EPR is wrongly formated or not an A-REX EPR logger_.msg(Arc::ERROR, "MigrateActivity: EPR contains no JobID"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobID element in ActivityIdentifier"); InvalidRequestMessageFault(fault,"a-rex:JobID","Element is missing"); out.Destroy(); return Arc::MCC_Status(); }; // HPC Basic Profile 1.0 comply (these fault handlings are defined in the KnowARC standards // conformance roadmap 2nd release) // End of the HPC BP 1.0 fault handling part std::string delegation; Arc::XMLNode delegated_token = in["arcdeleg:DelegatedToken"]; if(delegated_token) { // Client wants to delegate credentials if(!delegation_stores_.DelegatedToken(config.GmConfig().DelegationDir(),delegated_token,config.GridName(),delegation)) { // Failed to accept delegation (report as bad request) logger_.msg(Arc::ERROR, "MigrateActivity: Failed to accept delegation"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Failed to accept delegation"); InvalidRequestMessageFault(fault,"arcdeleg:DelegatedToken","This token does not exist"); out.Destroy(); return Arc::MCC_Status(); }; }; if( !(in["ActivityDocument"]["JobDefinition"])) { /* // First try to get job desc from old cluster logger_.msg(Arc::VERBOSE, "MigrateActivity: no job description found try to get it from old cluster"); Arc::MCCConfig cfg; // TODO: //if (!proxyPath.empty()) cfg.AddProxy(delegation); //if (!certificatePath.empty()) //cfg.AddCertificate(certificatePath); //if (!keyPath.empty()) //cfg.AddPrivateKey(keyPath); //if (!caCertificatesDir.empty()) //cfg.AddCADir(caCertificatesDir); Arc::URL url(migrateid); Arc::PathIterator pi(url.Path(), true); url.ChangePath(*pi); Arc::AREXClient ac(url, cfg); Arc::NS ns; ns["a-rex"] = "http://www.nordugrid.org/schemas/a-rex"; ns["bes-factory"] = "http://schemas.ggf.org/bes/2006/08/bes-factory"; ns["wsa"] = "http://www.w3.org/2005/08/addressing"; ns["jsdl"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; ns["jsdl-posix"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix"; ns["jsdl-arc"] = "http://www.nordugrid.org/ws/schemas/jsdl-arc"; ns["jsdl-hpcpa"] = "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa"; Arc::XMLNode id(ns, "ActivityIdentifier"); id.NewChild("wsa:Address") = url.str(); id.NewChild("wsa:ReferenceParameters").NewChild("a-rex:JobID") = pi.Rest(); std::string idstr; id.GetXML(idstr); std::string desc_str; if (ac.getdesc(idstr,desc_str)){ Arc::JobDescription desc; desc.setSource(desc_str); if (desc.isValid()) { logger_.msg(Arc::INFO,"Valid job description obtained"); if ( !( in["ActivityDocument"] ) ) in.NewChild("bes-factory:ActivityDocument"); Arc::XMLNode XMLdesc; desc.getXML(XMLdesc); in["ActivityDocument"].NewChild(XMLdesc); } else { // Wrongly formatted job description logger_.msg(Arc::ERROR, "MigrateActivity: job description could not be fetch from old cluster"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobDefinition element in request"); InvalidRequestMessageFault(fault,"jsdl:JobDefinition","Element is missing"); out.Destroy(); return Arc::MCC_Status(); } } */ //else { // Not able to get job description logger_.msg(Arc::ERROR, "MigrateActivity: no job description found"); //logger_.msg(Arc::ERROR, "MigrateActivity: job description could not be fetch from old cluster"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobDefinition element in request"); InvalidRequestMessageFault(fault,"jsdl:JobDefinition","Element is missing"); out.Destroy(); return Arc::MCC_Status(); //} }; Arc::XMLNode jsdl = in["ActivityDocument"]["JobDefinition"]; Arc::NS ns; // Creating migration XMLNode Arc::XMLNode migration(ns, "Migration"); migration.NewChild("ActivityIdentifier") = migrateid; if( (bool)in["ForceMigration"]){ migration.NewChild("ForceMigration") = (std::string)in["ForceMigration"]; } else { migration.NewChild("ForceMigration") = "true"; } std::string migrationStr; migration.GetDoc(migrationStr, true); logger_.msg(Arc::INFO, "Migration XML sent to AREXJob: %s", migrationStr); JobIDGeneratorARC idgenerator(config.Endpoint()); ARexJob job(jsdl,config,delegation,clientid,logger_,idgenerator,migration); if(!job) { ARexJobFailure failure_type = job; std::string failure = job.Failure(); switch(failure_type) { case ARexJobDescriptionUnsupportedError: { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Unsupported feature in job description"); UnsupportedFeatureFault(fault,failure); }; break; case ARexJobDescriptionMissingError: { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Missing needed element in job description"); UnsupportedFeatureFault(fault,failure); }; break; case ARexJobDescriptionLogicalError: { std::string element; std::string::size_type pos = failure.find(' '); if(pos != std::string::npos) { element=failure.substr(0,pos); failure=failure.substr(pos+1); }; Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Logical error in job description"); InvalidRequestMessageFault(fault,element,failure); }; break; default: { logger_.msg(Arc::ERROR, "MigrateActivity: Failed to migrate new job: %s",failure); // Failed to migrate new job (no corresponding BES fault defined - using generic SOAP error) logger_.msg(Arc::ERROR, "MigrateActivity: Failed to migrate new job"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,("Failed to migrate new activity: "+failure).c_str()); GenericFault(fault); }; break; }; out.Destroy(); return Arc::MCC_Status(); }; // Make SOAP response Arc::WSAEndpointReference identifier(out.NewChild("bes-factory:ActivityIdentifier")); // Make job's ID identifier.Address(config.Endpoint()); // address of service identifier.ReferenceParameters().NewChild("a-rex:JobID")=job.ID(); identifier.ReferenceParameters().NewChild("a-rex:JobSessionDir")=config.Endpoint()+"/"+job.ID(); out.NewChild(in["ActivityDocument"]); logger_.msg(Arc::VERBOSE, "MigrateActivity finished successfully"); { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "MigrateActivity: response = \n%s", s); }; /* Needs to kill old job */ return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/cachecheck.cpp0000644000000000000000000000012312046514361023371 xustar000000000000000027 mtime=1352308977.507652 27 atime=1513200576.629727 29 ctime=1513200662.70178053 nordugrid-arc-5.4.2/src/services/a-rex/cachecheck.cpp0000644000175000002070000000567512046514361023454 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "job.h" #include "arex.h" #define CACHE_CHECK_SESSION_DIR_ID "9999999999999999999999999999999" namespace ARex { Arc::MCC_Status ARexService::CacheCheck(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { std::vector caches; // use cache dir(s) from conf file try { CacheConfig cache_config(config.GmConfig().CacheParams()); cache_config.substitute(config.GmConfig(), config.User()); caches = cache_config.getCacheDirs(); } catch (CacheConfigException& e) { logger.msg(Arc::ERROR, "Error with cache configuration: %s", e.what()); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Error with cache configuration"); fault.Detail(true).NewChild("CacheConfigurationFault"); out.Destroy(); return Arc::MCC_Status(); } if (caches.empty()) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Cache is disabled"); fault.Detail(true).NewChild("CacheDisabledFault"); out.Destroy(); return Arc::MCC_Status(); } Arc::FileCache cache(caches, CACHE_CHECK_SESSION_DIR_ID ,config.User().get_uid(), config.User().get_gid()); if (!cache) { logger.msg(Arc::ERROR, "Error with cache configuration"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Error with cache configuration"); fault.Detail(true).NewChild("CacheConfigurationFault"); out.Destroy(); return Arc::MCC_Status(); } bool fileexist; Arc::XMLNode resp = out.NewChild("CacheCheckResponse"); Arc::XMLNode results = resp.NewChild("CacheCheckResult"); for(int n = 0;;++n) { Arc::XMLNode id = in["CacheCheck"]["TheseFilesNeedToCheck"]["FileURL"][n]; if (!id) break; std::string fileurl = (std::string)in["CacheCheck"]["TheseFilesNeedToCheck"]["FileURL"][n]; Arc::XMLNode resultelement = results.NewChild("Result"); fileexist = false; std::string file_lfn; Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); Arc::URL url(fileurl); Arc::DataHandle d(url, usercfg); logger.msg(Arc::INFO, "Looking up URL %s", d->str()); file_lfn = cache.File(d->str()); logger.msg(Arc::INFO, "Cache file is %s", file_lfn); struct stat fileStat; fileexist = (stat(file_lfn.c_str(), &fileStat) == 0) ? true : false; resultelement.NewChild("FileURL") = fileurl; resultelement.NewChild("ExistInTheCache") = (fileexist ? "true": "false"); if (fileexist) resultelement.NewChild("FileSize") = Arc::tostring(fileStat.st_size); else resultelement.NewChild("FileSize") = "0"; } return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/put.cpp0000644000000000000000000000012413213710100022123 xustar000000000000000027 mtime=1513066560.143104 27 atime=1513200575.953719 30 ctime=1513200662.697780481 nordugrid-arc-5.4.2/src/services/a-rex/put.cpp0000644000175000002070000001312513213710100022172 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "PayloadFile.h" #include "job.h" #include "arex.h" #define MAX_CHUNK_SIZE (10*1024*1024) namespace ARex { static Arc::MCC_Status http_put(ARexJob& job,const std::string& hpath,Arc::Logger& logger,Arc::PayloadStreamInterface& stream,FileChunksList& fchunks); static Arc::MCC_Status http_put(ARexJob& job,const std::string& hpath,Arc::Logger& logger,Arc::PayloadRawInterface& buf,FileChunksList& fchunks); Arc::MCC_Status ARexService::Put(Arc::Message& inmsg,Arc::Message& /*outmsg*/,ARexGMConfig& config,std::string id,std::string subpath) { if(id.empty()) return Arc::MCC_Status(); ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "Put: there is no job: %s - %s", id, job.Failure()); // TODO: make proper html message return Arc::MCC_Status(); }; Arc::MessagePayload* payload = inmsg.Payload(); if(!payload) { logger_.msg(Arc::ERROR, "Put: there is no payload for file %s in job: %s", subpath, id); return Arc::MCC_Status(); }; Arc::PayloadStreamInterface* stream = NULL; try { stream = dynamic_cast(payload); } catch(std::exception& e) { }; if(stream) return http_put(job,subpath,logger_,*stream,files_chunks_); Arc::PayloadRawInterface* buf = NULL; try { buf = dynamic_cast(payload); } catch(std::exception& e) { }; if(buf) return http_put(job,subpath,logger_,*buf,files_chunks_); logger_.msg(Arc::ERROR, "Put: unrecognized payload for file %s in job: %s", subpath, id); return Arc::MCC_Status(); } static bool write_file(Arc::FileAccess* h,char* buf,size_t size) { for(;size>0;) { ssize_t l = h->fa_write(buf,size); if(l == -1) return false; size-=l; buf+=l; }; return true; } static Arc::MCC_Status http_put(ARexJob& job,const std::string& hpath,Arc::Logger& logger,Arc::PayloadStreamInterface& stream,FileChunksList& fchunks) { // TODO: Use memory mapped file to minimize number of in memory copies // File const int bufsize = 1024*1024; Arc::FileAccess* h = job.CreateFile(hpath.c_str()); if(h == NULL) { // TODO: report something logger.msg(Arc::ERROR, "Put: failed to create file %s for job %s - %s", hpath, job.ID(), job.Failure()); return Arc::MCC_Status(); }; FileChunksRef fc = fchunks.Get(job.ID()+"/"+hpath); if(!fc->Size()) fc->Size(stream.Size()); off_t pos = stream.Pos(); if(h->fa_lseek(pos,SEEK_SET) != pos) { std::string err = Arc::StrError(); h->fa_close(); Arc::FileAccess::Release(h); logger.msg(Arc::ERROR, "Put: failed to set position of file %s for job %s to %Lu - %s", hpath, job.ID(), (unsigned long long int)pos, err); return Arc::MCC_Status(); }; char* buf = new char[bufsize]; if(!buf) { h->fa_close(); Arc::FileAccess::Release(h); logger.msg(Arc::ERROR, "Put: failed to allocate memory for file %s in job %s", hpath, job.ID()); return Arc::MCC_Status(); }; bool got_something = false; for(;;) { int size = bufsize; if(!stream.Get(buf,size)) break; if(size > 0) got_something = true; if(!write_file(h,buf,size)) { std::string err = Arc::StrError(); delete[] buf; h->fa_close(); Arc::FileAccess::Release(h); logger.msg(Arc::ERROR, "Put: failed to write to file %s for job %s - %s", hpath, job.ID(), err); return Arc::MCC_Status(); }; if(size) fc->Add(pos,size); pos+=size; }; delete[] buf; h->fa_close(); Arc::FileAccess::Release(h); if(fc->Complete()) { job.ReportFileComplete(hpath); } else { // Due to limitation of PayloadStreamInterface it is not possible to // directly distingush between zero sized file and file with undefined // size. But by applying some dynamic heuristics it possible. // TODO: extend/modify PayloadStreamInterface. if((stream.Size() == 0) && (stream.Pos() == 0) && (!got_something)) { job.ReportFileComplete(hpath); } } return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status http_put(ARexJob& job,const std::string& hpath,Arc::Logger& logger,Arc::PayloadRawInterface& buf,FileChunksList& fchunks) { // File Arc::FileAccess* h = job.CreateFile(hpath.c_str()); if(h == NULL) { // TODO: report something logger.msg(Arc::ERROR, "Put: failed to create file %s for job %s - %s", hpath, job.ID(), job.Failure()); return Arc::MCC_Status(); }; FileChunksRef fc = fchunks.Get(job.ID()+"/"+hpath); bool got_something = false; if(!fc->Size()) fc->Size(buf.Size()); for(int n = 0;;++n) { char* sbuf = buf.Buffer(n); if(sbuf == NULL) break; off_t offset = buf.BufferPos(n); off_t size = buf.BufferSize(n); if(size > 0) { got_something = true; off_t o = h->fa_lseek(offset,SEEK_SET); if(o != offset) { h->fa_close(); Arc::FileAccess::Release(h); return Arc::MCC_Status(); }; if(!write_file(h,sbuf,size)) { h->fa_close(); Arc::FileAccess::Release(h); return Arc::MCC_Status(); }; if(size) fc->Add(offset,size); }; }; h->fa_close(); Arc::FileAccess::Release(h); if(fc->Complete()) { job.ReportFileComplete(hpath); } else { if((buf.Size() == 0) && (!got_something)) { job.ReportFileComplete(hpath); } } return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/test_cache_check.cpp0000644000000000000000000000012412106724427024573 xustar000000000000000027 mtime=1360767255.383414 27 atime=1513200576.412725 30 ctime=1513200662.709780628 nordugrid-arc-5.4.2/src/services/a-rex/test_cache_check.cpp0000644000175000002070000000247112106724427024644 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include int main(void) { Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); logger.msg(Arc::INFO, "Creating client side chain"); std::string id; std::string url("https://localhost/arex"); Arc::NS ns("a-rex", "http://www.nordugrid.org/schemas/a-rex"); Arc::MCCConfig cfg; Arc::UserConfig uc; uc.ApplyToConfig(cfg); Arc::ClientSOAP client(cfg, url, 60); std::string faultstring; Arc::PayloadSOAP request(ns); Arc::XMLNode req = request.NewChild("a-rex:CacheCheck").NewChild("a-rex:TheseFilesNeedToCheck"); req.NewChild("a-rex:FileURL") = "http://example.org/test.txt"; Arc::PayloadSOAP* response; Arc::MCC_Status status = client.process(&request, &response); if (!status) { std::cerr << "Request failed" << std::endl; } std::string str; response->GetDoc(str, true); std::cout << str << std::endl; return 0; } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/lrms0000644000000000000000000000013213214316027021521 xustar000000000000000030 mtime=1513200663.312788002 30 atime=1513200668.717854109 30 ctime=1513200663.312788002 nordugrid-arc-5.4.2/src/services/a-rex/lrms/0000755000175000002070000000000013214316027021644 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/submit_common.sh.in0000644000000000000000000000012713213472040025413 xustar000000000000000027 mtime=1512993824.886731 30 atime=1513200650.236628076 30 ctime=1513200663.304787905 nordugrid-arc-5.4.2/src/services/a-rex/lrms/submit_common.sh.in0000644000175000002070000007177513213472040025476 0ustar00mockbuildmock00000000000000###################################################### #Common functions for submit scripts ###################################################### # This script should not be executed directly but is sourced in # from various backend scripts that itself are called from the # grid manager. Its purpose is to prepare the runtime environments, # which is almost the same procedure invariant of the backend # used. # # To test the functionality of this script, you may source # this script from the bourne command shell, i.e. # # . /opt/nordugrid/libexec/submit_common.sh # # and it should give no error. Then call the methods defined in # this script directly. Also, you can test the parse_arg_file # by executing # # /opt/nordugrid/libexec/submit_common.sh # # directly. More tests still require to be added. sourcewithargs () { script=$1 shift . $script } init () { parse_arg_file $1 if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi if [ -z "$joboption_lrms" ]; then echo 'joboption_lrms must be set' 1>&2; exit 1; fi # May use joboption_queue (set by parse_arg_file) . ${pkgdatadir}/configure-${joboption_lrms}-env.sh # Where runtime scripts can be found on computing nodes (empty if does not exist) RUNTIME_CONFIG_DIR=$CONFIG_runtimedir export RUNTIME_CONFIG_DIR # Description of (cross-)mounted disc space on cluster RUNTIME_FRONTEND_SEES_NODE=$CONFIG_shared_scratch RUNTIME_NODE_SEES_FRONTEND=$CONFIG_shared_filesystem RUNTIME_LOCAL_SCRATCH_DIR=$CONFIG_scratchdir #default is NFS if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then RUNTIME_NODE_SEES_FRONTEND=yes fi # locally empty means no if [ "${RUNTIME_NODE_SEES_FRONTEND}" = 'no' ] ; then RUNTIME_NODE_SEES_FRONTEND= fi # Only CPU time specified in job limits, rough limit for wall time walltime_ratio='1' # Use specified CPU time as soft limit, allow to run a bit longer before hard limit time_hardlimit_ratio='1/1' # Use specified memory requirement as soft limit, allow a bit more before hard limit memory_hardlimit_ratio='1/1' # Where to store temporary files on gatekeeper TMPDIR=${TMPDIR:-@tmp_dir@} # Where GNU time utility is located on computing nodes (empty if does not exist) GNU_TIME=${CONFIG_gnu_time:-@gnu_time@} # Command to get name of executing node NODENAME=${CONFIG_nodename:-"@nodename@"} } read_arc_conf () { ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} . $pkgdatadir/config_parser_compat.sh || exit $? config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "cluster" # Also read queue section if [ ! -z "$joboption_queue" ]; then if ! config_import_section "queue/$joboption_queue"; then echo "ERROR: Job requested invalid share: $joboption_queue" 1>&2 exit 1 fi fi } usage="usage: `basename $0` (|-h|--help)" parse_arg_file () { arg_file=$1 if [ -z "$arg_file" ] ; then echo "Arguments file should be specified" 1>&2 echo "$usage" 1>&2 exit 1 fi if [ "--help" = "$1" -o "-h" = "$1" ]; then echo "$usage" 1>&2 cat <&2 This script should not be executed directly but it is called from the grid manager. EOHELP exit 1 fi if [ ! -f $arg_file ] ; then echo "No such arguments file at '$arg_file'" 1>&2 echo "$usage" 1>&2 exit 1 fi . $arg_file if [ -z "$joboption_controldir" ] ; then joboption_controldir=`dirname "$arg_file"` if [ "$joboption_controldir" = '.' ] ; then joboption_controldir="$PWD" fi fi if [ -z "$joboption_gridid" ] ; then joboption_gridid=`basename "$arg_file" | sed 's/^job\.\(.*\)\.grami$/\1/'` fi ############################################################## # combine arguments to command - easier to use ############################################################## i=0 joboption_args= eval "var_is_set=\${joboption_arg_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_arg_$i}" # Use -- to avoid echo eating arguments it understands var_value=`echo -- "$var_value" |cut -f2- -d' '| sed 's/\\\\/\\\\\\\\/g' | sed 's/"/\\\"/g'` joboption_args="$joboption_args \"${var_value}\"" i=$(( $i + 1 )) eval "var_is_set=\${joboption_arg_$i+yes}" done } # # Exits with 0 if the argument is all digits # is_number () { /usr/bin/perl -e 'exit 1 if $ARGV[0] !~ m/^\d+$/' "$1" } # # Sets a default memory limit for jobs that don't have one # set_req_mem () { if ! is_number "$joboption_memory"; then echo "---------------------------------------------------------------------" 1>&2 echo "WARNING: The job description contains no explicit memory requirement." 1>&2 if is_number "$CONFIG_defaultmemory"; then joboption_memory=$CONFIG_defaultmemory echo " A default memory limit taken from 'defaultmemory' in " 1>&2 echo " arc.conf will apply. " 1>&2 echo " Limit is: $CONFIG_defaultmemory mb. " 1>&2 elif is_number "$CONFIG_nodememory"; then joboption_memory=$CONFIG_nodememory echo " A default memory limit taken from 'nodememory' in arc.conf " 1>&2 echo " will apply. You may want to set 'defaultmemory' to something" 1>&2 echo " else in arc.conf to better handle jobs with no memory " 1>&2 echo " specified. " 1>&2 echo " Limit is: $CONFIG_nodememory mb. " 1>&2 else joboption_memory=1000 echo " nodememory is not specified in arc.conf. A default " 1>&2 echo " memory limit of 1GB will apply. " 1>&2 fi echo "---------------------------------------------------------------------" 1>&2 fi if [ "$joboption_localtransfer" = 'yes' ] && [ "$joboption_memory" -lt 1000 ]; then echo "---------------------------------------------------------------------" 1>&2 echo "WARNING: localtransfers are enabled and job has less than 1GB of " 1>&2 echo " ram. up- and downloaders take up a lot of ram, this can give" 1>&2 echo " you problems. " 1>&2 echo "---------------------------------------------------------------------" 1>&2 fi } set_count () { if [ -z "$joboption_count" ] || [ "$joboption_count" -le 1 ] ; then joboption_count=1 joboption_countpernode=-1 joboption_numnodes=-1 fi } ############################################################## # Zero stage of runtime environments ############################################################## # should we write failures to job.111.failed like the old pbs? RTE_stage0 () { joboption_num=0 eval "var_is_set=\${joboption_runtime_$joboption_num+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_runtime_$joboption_num}" if [ -r "$RUNTIME_CONFIG_DIR/${var_value}" ] ; then arg_num=1 args_value="0 " eval "arg_is_set=\${joboption_runtime_${joboption_num}_${arg_num}+yes}" while [ ! -z "${arg_is_set}" ] ; do eval "arg_value=\${joboption_runtime_${joboption_num}_${arg_num}}" # Use -- to avoid echo eating arguments it understands arg_value=`echo -- "$arg_value" |cut -f2- -d' '| sed 's/\\\\/\\\\\\\\/g' | sed 's/"/\\\"/g'` args_value="$args_value \"${arg_value}\"" arg_num=$(( $arg_num + 1 )) eval "arg_is_set=\${joboption_runtime_${joboption_num}_${arg_num}+yes}" done #eval ". \"$RUNTIME_CONFIG_DIR/${var_value}\" $args_value" 1>&2 cmdl="$RUNTIME_CONFIG_DIR/${var_value}" sourcewithargs $cmdl $args_value 1>&2 if [ $? -ne '0' ] ; then echo "ERROR: runtime script ${var_value} failed" 1>&2 exit 1 fi else echo "Warning: runtime script ${var_value} is missing" 1>&2 #should we exit here? fi joboption_num=$(( $joboption_num + 1 )) eval "var_is_set=\${joboption_runtime_$joboption_num+yes}" done # joboption_count might have been changed by an RTE. Save it for accounting purposes. if [ -n "$joboption_count" ]; then diagfile="${joboption_controldir}/job.${joboption_gridid}.diag" echo "Processors=$joboption_count" >> "$diagfile" if [ -n "$joboption_numnodes" ]; then echo "Nodecount=$joboption_numnodes" >> "$diagfile" fi fi } ############################################################## # create temp job script ############################################################## mktempscript () { # File name to be used for temporary job script LRMS_JOB_SCRIPT=`mktemp ${TMPDIR}/${joboption_lrms}_job_script.XXXXXX` echo "Created file $LRMS_JOB_SCRIPT" if [ -z "$LRMS_JOB_SCRIPT" ] ; then echo "Creation of temporary file failed" exit 1 fi LRMS_JOB_OUT="${LRMS_JOB_SCRIPT}.out" touch $LRMS_JOB_OUT LRMS_JOB_ERR="${LRMS_JOB_SCRIPT}.err" touch $LRMS_JOB_ERR if [ ! -f "$LRMS_JOB_SCRIPT" ] || [ ! -f "$LRMS_JOB_OUT" ] || [ ! -f "$LRMS_JOB_ERR" ] ; then echo "Something is wrong. Either somebody is deleting files or I cannot write to ${TMPDIR}" rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi } ############################################################## # Execution times (obtained in seconds) ############################################################## ############################################################## # Add environment variables ############################################################## add_user_env () { echo "# Setting environment variables as specified by user" >> $LRMS_JOB_SCRIPT has_gridglobalid='' i=0 eval "var_is_set=\${joboption_env_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_env_$i}" if [ "$var_value" ] && [ -z "${var_value##GRID_GLOBAL_JOBID=*}" ]; then has_gridglobalid=yes fi var_escaped=`echo "$var_value" | sed "s/'/'\\\\\''/g"` echo "export '${var_escaped}'" >> $LRMS_JOB_SCRIPT i=$(( $i + 1 )) eval "var_is_set=\${joboption_env_$i+yes}" done # guess globalid in case not already provided if [ -z "$has_gridglobalid" ]; then hostname=`/usr/bin/perl -MSys::Hostname -we 'print hostname'` hostname=${CONFIG_hostname:-$hostname} gm_port=${CONFIG_gm_port:-2811} gm_mount_point=${CONFIG_gm_mount_point:-/jobs} echo "export GRID_GLOBAL_JOBID='gsiftp://$hostname:$gm_port$gm_mount_point/$joboption_gridid'" >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT } sourcewithargs_jobscript () { echo "# source with arguments for DASH shells" >> $LRMS_JOB_SCRIPT echo "sourcewithargs() {" >> $LRMS_JOB_SCRIPT echo "script=\$1" >> $LRMS_JOB_SCRIPT echo "shift" >> $LRMS_JOB_SCRIPT echo ". \$script" >> $LRMS_JOB_SCRIPT echo "}" >> $LRMS_JOB_SCRIPT } ############################################################## # Runtime configuration ############################################################## RTE_stage1 () { echo "# Running runtime scripts" >> $LRMS_JOB_SCRIPT echo "export RUNTIME_CONFIG_DIR=\${RUNTIME_CONFIG_DIR:-$RUNTIME_CONFIG_DIR}" >> $LRMS_JOB_SCRIPT i=0 eval "var_is_set=\${joboption_runtime_$i+yes}" echo "runtimeenvironments=" >> $LRMS_JOB_SCRIPT while [ ! -z "${var_is_set}" ] ; do if [ "$i" = '0' ] ; then echo "if [ ! -z \"\$RUNTIME_CONFIG_DIR\" ] ; then" >> $LRMS_JOB_SCRIPT fi eval "var_value=\"\${joboption_runtime_$i}\"" echo " if [ -r \"\${RUNTIME_CONFIG_DIR}/${var_value}\" ] ; then" >> $LRMS_JOB_SCRIPT echo " runtimeenvironments=\"\${runtimeenvironments}${var_value};\"" >> $LRMS_JOB_SCRIPT arg_num=1 args_value="1 " eval "arg_is_set=\${joboption_runtime_${i}_${arg_num}+yes}" while [ ! -z "${arg_is_set}" ] ; do eval "arg_value=\${joboption_runtime_${i}_${arg_num}}" # Use -- to avoid echo eating arguments it understands arg_value=`printf "%s" "$arg_value" | sed 's/"/\\\\\\\"/g'` args_value="$args_value \"${arg_value}\"" arg_num=$(( $arg_num + 1 )) eval "arg_is_set=\${joboption_runtime_${i}_${arg_num}+yes}" done #echo " . \${RUNTIME_CONFIG_DIR}/${var_value} $args_value " >> $LRMS_JOB_SCRIPT echo " cmdl=\${RUNTIME_CONFIG_DIR}/${var_value} " >> $LRMS_JOB_SCRIPT printf " sourcewithargs \$cmdl $args_value \n" >> $LRMS_JOB_SCRIPT echo " if [ \$? -ne '0' ] ; then " >> $LRMS_JOB_SCRIPT echo " echo \"Runtime ${var_value} script failed \" 1>&2 " >> $LRMS_JOB_SCRIPT echo " echo \"Runtime ${var_value} script failed \" 1>\"\$RUNTIME_JOB_DIAG\" " >> $LRMS_JOB_SCRIPT echo " exit 1" >> $LRMS_JOB_SCRIPT echo " fi " >> $LRMS_JOB_SCRIPT echo " fi" >> $LRMS_JOB_SCRIPT i=$(( $i + 1 )) eval "var_is_set=\${joboption_runtime_$i+yes}" done if [ ! "$i" = '0' ] ; then echo "fi" >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT } ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### setup_local_transfer () { RUNTIME_CONTROL_DIR=`mktemp ${joboption_directory}/control.XXXXXX` if [ -z "$RUNTIME_CONTROL_DIR" ] ; then echo 'Failed to choose name for temporary control directory' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi rm -f "$RUNTIME_CONTROL_DIR" mkdir "$RUNTIME_CONTROL_DIR" if [ $? -ne '0' ] ; then echo 'Failed to create temporary control directory' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi chmod go-rwx,u+rwx "${RUNTIME_CONTROL_DIR}" echo '' >"${RUNTIME_CONTROL_DIR}/job.local.proxy" chmod go-rw,u+r,a-x "${RUNTIME_CONTROL_DIR}/job.local.proxy" cat "${joboption_controldir}/job.${joboption_gridid}.proxy" >"${RUNTIME_CONTROL_DIR}/job.local.proxy" cat "${joboption_controldir}/job.${joboption_gridid}.input" >"${RUNTIME_CONTROL_DIR}/job.local.input" cat "${joboption_controldir}/job.${joboption_gridid}.output">"${RUNTIME_CONTROL_DIR}/job.local.output" cat "${joboption_controldir}/job.${joboption_gridid}.local">"${RUNTIME_CONTROL_DIR}/job.local.local" RUNTIME_CONTROL_DIR_REL=`basename "$RUNTIME_CONTROL_DIR"` echo "$RUNTIME_CONTROL_DIR_REL *.*" >>"${RUNTIME_CONTROL_DIR}/job.local.input" echo "$RUNTIME_CONTROL_DIR_REL" >>"${RUNTIME_CONTROL_DIR}/job.local.output" echo "$RUNTIME_CONTROL_DIR_REL" >>"${joboption_controldir}/job.${joboption_gridid}.output" RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}##"` echo "$RUNTIME_STDOUT_REL *.*" >>"${RUNTIME_CONTROL_DIR}/job.local.input" echo "$RUNTIME_STDERR_REL *.*" >>"${RUNTIME_CONTROL_DIR}/job.local.input" echo "RUNTIME_CONTROL_DIR=$RUNTIME_CONTROL_DIR" >> $LRMS_JOB_SCRIPT } ####################################################################### #uploading output files ####################################################################### upload_output_files () { if [ "$joboption_localtransfer" = 'yes' ] ; then echo "UPLOADER=\${UPLOADER:-$ARC_LOCATION/@pkglibexecsubdir@/uploader}" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ "$RESULT" = '0' ] ; then $UPLOADER -p -c 'local' "$RUNTIME_CONTROL_DIR" "$RUNTIME_JOB_DIR" 2>>${RUNTIME_CONTROL_DIR}/job.local.errors if [ $? -ne '0' ] ; then echo 'ERROR: Uploader failed.' 1>&2 if [ "$RESULT" = '0' ] ; then RESULT=1 ; fi fi fi rm -f "${RUNTIME_CONTROL_DIR}/job.local.proxy" EOSCR fi } ####################################################################### # downloading input files (this might fail for fork) ####################################################################### download_input_files () { if [ "$joboption_localtransfer" = 'yes' ] ; then RUNTIME_ARC_LOCATION="$ARC_LOCATION" RUNTIME_GLOBUS_LOCATION="$GLOBUS_LOCATION" echo "ARC_LOCATION=\${ARC_LOCATION:-$RUNTIME_ARC_LOCATION}" >> $LRMS_JOB_SCRIPT echo "GLOBUS_LOCATION=\${GLOBUS_LOCATION:-$RUNTIME_GLOBUS_LOCATION}" >> $LRMS_JOB_SCRIPT echo "DOWNLOADER=\${DOWNLOADER:-$ARC_LOCATION/@pkglibexecsubdir@/downloader}" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ -z "$ARC_LOCATION" ] ; then echo 'Variable ARC_LOCATION is not set' 1>&2 exit 1 fi if [ -z "$GLOBUS_LOCATION" ] ; then echo 'Variable GLOBUS_LOCATION is not set' 1>&2 exit 1 fi export GLOBUS_LOCATION export ARC_LOCATION if [ "x$LD_LIBRARY_PATH" = "x" ]; then export LD_LIBRARY_PATH="$GLOBUS_LOCATION/lib" else export LD_LIBRARY_PATH="$GLOBUS_LOCATION/lib:$LD_LIBRARY_PATH" fi export SASL_PATH="$GLOBUS_LOCATION/lib/sasl" export X509_USER_KEY="${RUNTIME_CONTROL_DIR}/job.local.proxy" export X509_USER_CERT="${RUNTIME_CONTROL_DIR}/job.local.proxy" export X509_USER_PROXY="${RUNTIME_CONTROL_DIR}/job.local.proxy" unset X509_RUN_AS_SERVER $DOWNLOADER -p -c 'local' "$RUNTIME_CONTROL_DIR" "$RUNTIME_JOB_DIR" 2>>${RUNTIME_CONTROL_DIR}/job.local.errors if [ $? -ne '0' ] ; then echo 'ERROR: Downloader failed.' 1>&2 exit 1 fi EOSCR fi } ############################################################## # Add std... to job arguments ############################################################## include_std_streams () { input_redirect= output_redirect= if [ ! -z "$joboption_stdin" ] ; then input_redirect="<\$RUNTIME_JOB_STDIN" fi if [ ! -z "$joboption_stdout" ] ; then output_redirect="1>\$RUNTIME_JOB_STDOUT" fi if [ ! -z "$joboption_stderr" ] ; then if [ "$joboption_stderr" = "$joboption_stdout" ] ; then output_redirect="$output_redirect 2>&1" else output_redirect="$output_redirect 2>\$RUNTIME_JOB_STDERR" fi fi } ############################################################## # Runtime configuration ############################################################## configure_runtime () { i=0 eval "var_is_set=\${joboption_runtime_$i+yes}" while [ ! -z "${var_is_set}" ] ; do if [ "$i" = '0' ] ; then echo "if [ ! -z \"\$RUNTIME_CONFIG_DIR\" ] ; then" >> $LRMS_JOB_SCRIPT fi eval "var_value=\"\${joboption_runtime_$i}\"" echo " if [ -r \"\${RUNTIME_CONFIG_DIR}/${var_value}\" ] ; then" >> $LRMS_JOB_SCRIPT arg_num=1 args_value="2 " eval "arg_is_set=\${joboption_runtime_${i}_${arg_num}+yes}" while [ ! -z "${arg_is_set}" ] ; do eval "arg_value=\${joboption_runtime_${i}_${arg_num}}" # Use -- to avoid echo eating arguments it understands arg_value=`printf -- "$arg_value" | sed 's/\\\\/\\\\\\\\/g' | sed 's/"/\\\\\\\"/g'` args_value="$args_value \"${arg_value}\"" arg_num=$(( $arg_num + 1 )) eval "arg_is_set=\${joboption_runtime_${i}_${arg_num}+yes}" done echo " cmdl=\${RUNTIME_CONFIG_DIR}/${var_value}" >> $LRMS_JOB_SCRIPT printf " sourcewithargs \$cmdl $args_value \n" >> $LRMS_JOB_SCRIPT echo " fi" >> $LRMS_JOB_SCRIPT i=$(( $i + 1 )) eval "var_is_set=\${joboption_runtime_$i+yes}" done if [ ! "$i" = '0' ] ; then echo "fi" >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT } ############################################################## # move files to node ############################################################## move_files_to_node () { if [ "$joboption_count" -eq 1 ] || [ ! -z "$RUNTIME_ENABLE_MULTICORE_SCRATCH" ] || [ "$joboption_count" -eq "$joboption_countpernode" ]; then echo "RUNTIME_LOCAL_SCRATCH_DIR=\${RUNTIME_LOCAL_SCRATCH_DIR:-$RUNTIME_LOCAL_SCRATCH_DIR}" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_LOCAL_SCRATCH_DIR=\${RUNTIME_LOCAL_SCRATCH_DIR:-}" >> $LRMS_JOB_SCRIPT fi echo "RUNTIME_FRONTEND_SEES_NODE=\${RUNTIME_FRONTEND_SEES_NODE:-$RUNTIME_FRONTEND_SEES_NODE}" >> $LRMS_JOB_SCRIPT echo "RUNTIME_NODE_SEES_FRONTEND=\${RUNTIME_NODE_SEES_FRONTEND:-$RUNTIME_NODE_SEES_FRONTEND}" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then RUNTIME_NODE_JOB_DIR="$RUNTIME_LOCAL_SCRATCH_DIR"/`basename "$RUNTIME_JOB_DIR"` rm -rf "$RUNTIME_NODE_JOB_DIR" mkdir -p "$RUNTIME_NODE_JOB_DIR" # move directory contents for f in "$RUNTIME_JOB_DIR"/.* "$RUNTIME_JOB_DIR"/*; do [ "$f" = "$RUNTIME_JOB_DIR/*" ] && continue # glob failed, no files [ "$f" = "$RUNTIME_JOB_DIR/." ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.." ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.diag" ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.comment" ] && continue if ! mv "$f" "$RUNTIME_NODE_JOB_DIR"; then echo "Failed to move '$f' to '$RUNTIME_NODE_JOB_DIR'" 1>&2 exit 1 fi done if [ ! -z "$RUNTIME_FRONTEND_SEES_NODE" ] ; then # creating link for whole directory ln -s "$RUNTIME_FRONTEND_SEES_NODE"/`basename "$RUNTIME_JOB_DIR"` "$RUNTIME_JOB_DIR" else # keep stdout, stderr and control directory on frontend # recreate job directory mkdir -p "$RUNTIME_JOB_DIR" # make those files mkdir -p `dirname "$RUNTIME_JOB_STDOUT"` mkdir -p `dirname "$RUNTIME_JOB_STDERR"` touch "$RUNTIME_JOB_STDOUT" touch "$RUNTIME_JOB_STDERR" RUNTIME_JOB_STDOUT__=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDERR__=`echo "$RUNTIME_JOB_STDERR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` rm "$RUNTIME_JOB_STDOUT__" 2>/dev/null rm "$RUNTIME_JOB_STDERR__" 2>/dev/null if [ ! -z "$RUNTIME_JOB_STDOUT__" ] && [ "$RUNTIME_JOB_STDOUT" != "$RUNTIME_JOB_STDOUT__" ]; then ln -s "$RUNTIME_JOB_STDOUT" "$RUNTIME_JOB_STDOUT__" fi if [ "$RUNTIME_JOB_STDOUT__" != "$RUNTIME_JOB_STDERR__" ] ; then if [ ! -z "$RUNTIME_JOB_STDERR__" ] && [ "$RUNTIME_JOB_STDERR" != "$RUNTIME_JOB_STDERR__" ]; then ln -s "$RUNTIME_JOB_STDERR" "$RUNTIME_JOB_STDERR__" fi fi if [ ! -z "$RUNTIME_CONTROL_DIR" ] ; then # move control directory back to frontend RUNTIME_CONTROL_DIR__=`echo "$RUNTIME_CONTROL_DIR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` mv "$RUNTIME_CONTROL_DIR__" "$RUNTIME_CONTROL_DIR" fi fi # adjust stdin,stdout & stderr pointers RUNTIME_JOB_STDIN=`echo "$RUNTIME_JOB_STDIN" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDOUT=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDERR=`echo "$RUNTIME_JOB_STDERR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_FRONTEND_JOB_DIR="$RUNTIME_JOB_DIR" RUNTIME_JOB_DIR="$RUNTIME_NODE_JOB_DIR" fi if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then mkdir -p "$RUNTIME_JOB_DIR" fi EOSCR } ############################################################## # move files back to frontend ############################################################## move_files_to_frontend () { cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then if [ ! -z "$RUNTIME_FRONTEND_SEES_NODE" ] ; then # just move it rm -rf "$RUNTIME_FRONTEND_JOB_DIR" destdir=`dirname "$RUNTIME_FRONTEND_JOB_DIR"` if ! mv "$RUNTIME_NODE_JOB_DIR" "$destdir"; then echo "Failed to move '$RUNTIME_NODE_JOB_DIR' to '$destdir'" 1>&2 RESULT=1 fi else # remove links rm -f "$RUNTIME_JOB_STDOUT" 2>/dev/null rm -f "$RUNTIME_JOB_STDERR" 2>/dev/null # move directory contents for f in "$RUNTIME_NODE_JOB_DIR"/.* "$RUNTIME_NODE_JOB_DIR"/*; do [ "$f" = "$RUNTIME_NODE_JOB_DIR/*" ] && continue # glob failed, no files [ "$f" = "$RUNTIME_NODE_JOB_DIR/." ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.." ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.diag" ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.comment" ] && continue if ! mv "$f" "$RUNTIME_FRONTEND_JOB_DIR"; then echo "Failed to move '$f' to '$RUNTIME_FRONTEND_JOB_DIR'" 1>&2 RESULT=1 fi done rm -rf "$RUNTIME_NODE_JOB_DIR" fi fi echo "exitcode=$RESULT" >> "$RUNTIME_JOB_DIAG" exit $RESULT EOSCR } ############################################################## # copy runtime settings to jobscript ############################################################## setup_runtime_env () { echo "RUNTIME_JOB_DIR=$joboption_directory" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_STDIN=$joboption_stdin" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_STDOUT=$joboption_stdout" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_STDERR=$joboption_stderr" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=${joboption_directory}.diag" >> $LRMS_JOB_SCRIPT # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes echo "if [ ! -z \"\$RUNTIME_GRIDAREA_DIR\" ] ; then" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_DIR=\$RUNTIME_GRIDAREA_DIR/\`basename \$RUNTIME_JOB_DIR\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_STDIN=\`echo \"\$RUNTIME_JOB_STDIN\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_STDOUT=\`echo \"\$RUNTIME_JOB_STDOUT\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_STDERR=\`echo \"\$RUNTIME_JOB_STDERR\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_DIAG=\`echo \"\$RUNTIME_JOB_DIAG\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_CONTROL_DIR=\`echo \"\$RUNTIME_CONTROL_DIR\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT } ############################################################## # change to runtime dir and setup timed run ############################################################## cd_and_run () { cat >> $LRMS_JOB_SCRIPT <<'EOSCR' # Changing to session directory HOME=$RUNTIME_JOB_DIR export HOME if ! cd "$RUNTIME_JOB_DIR"; then echo "Failed to switch to '$RUNTIME_JOB_DIR'" 1>&2 RESULT=1 fi if [ ! -z "$RESULT" ] && [ "$RESULT" != 0 ]; then exit $RESULT fi EOSCR if [ ! -z "$NODENAME" ] ; then if [ -z "$NODENAME_WRITTEN" ] ; then echo "nodename=\`$NODENAME\`" >> $LRMS_JOB_SCRIPT echo "echo \"nodename=\$nodename\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT fi fi #TODO this should probably be done on headnode instead echo "echo \"Processors=${joboption_count}\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT # In case the job executable does not exist the error message might be # printed by GNU_TIME, which can be confusing for the user. # This will print more appropriate error message. echo "executable='$joboption_arg_0'" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' # Check if executable exists if [ ! -f "$executable" ]; then echo "Path \"$executable\" does not seem to exist" 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR 1>&2 exit 1 fi EOSCR # In case the job executable is written in a scripting language and the # interpreter is not found, the error message printed by GNU_TIME is # misleading. This will print a more appropriate error message. cat >> $LRMS_JOB_SCRIPT <<'EOSCR' # See if executable is a script, and extract the name of the interpreter line1=`dd if="$executable" count=1 2>/dev/null | head -n 1` command=`echo $line1 | sed -n 's/^#! *//p'` interpreter=`echo $command | awk '{print $1}'` if [ "$interpreter" = /usr/bin/env ]; then interpreter=`echo $command | awk '{print $2}'`; fi # If it's a script and the interpreter is not found ... [ "x$interpreter" = x ] || type "$interpreter" > /dev/null 2>&1 || { echo "Cannot run $executable: $interpreter: not found" 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR 1>&2 exit 1; } EOSCR # Check that gnu_time works cat >> $LRMS_JOB_SCRIPT </dev/null 2>&1; then echo "WARNING: GNU time not found at: \$GNU_TIME" 2>&1; GNU_TIME= fi if [ -z "\$GNU_TIME" ] ; then $joboption_args $input_redirect $output_redirect else \$GNU_TIME -o "\$RUNTIME_JOB_DIAG" -a -f '\ WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\n\ MaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\n\ AverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\n\ AverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\n\ PageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\n\ Swaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\n\ Inputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\n\ Signals=%k\n' \ $joboption_args $input_redirect $output_redirect fi RESULT=\$? EOSCR } if [ "submit_common.sh" = `basename $0` ]; then parse_arg_file $* fi nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612257131147023641 xustar000000000000000027 mtime=1388098151.021102 30 atime=1513200602.911049266 29 ctime=1513200663.30278788 nordugrid-arc-5.4.2/src/services/a-rex/lrms/Makefile.am0000644000175000002070000000027012257131147023703 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = config_parser.sh config_parser_compat.sh cancel_common.sh pkgdata_DATA = submit_common.sh scan_common.sh SUBDIRS = pbs fork sge condor lsf ll slurm dgbridge boinc nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732023645 xustar000000000000000030 mtime=1513200602.948049718 30 atime=1513200650.219627868 30 ctime=1513200663.303787892 nordugrid-arc-5.4.2/src/services/a-rex/lrms/Makefile.in0000644000175000002070000006447013214315732023726 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms DIST_COMMON = README $(dist_pkgdata_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/scan_common.sh.in \ $(srcdir)/submit_common.sh.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit_common.sh scan_common.sh CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" DATA = $(dist_pkgdata_DATA) $(pkgdata_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = config_parser.sh config_parser_compat.sh cancel_common.sh pkgdata_DATA = submit_common.sh scan_common.sh SUBDIRS = pbs fork sge condor lsf ll slurm dgbridge boinc all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit_common.sh: $(top_builddir)/config.status $(srcdir)/submit_common.sh.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan_common.sh: $(top_builddir)/config.status $(srcdir)/scan_common.sh.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files install-pkgdataDATA: $(pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dist_pkgdataDATA install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkgdataDATA install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/boinc0000644000000000000000000000013013214316027022611 xustar000000000000000029 mtime=1513200663.51779051 30 atime=1513200668.717854109 29 ctime=1513200663.51779051 nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/0000755000175000002070000000000013214316027022736 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712257131147024734 xustar000000000000000027 mtime=1388098151.021102 30 atime=1513200602.965049926 30 ctime=1513200663.514790473 nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/Makefile.am0000644000175000002070000000015612257131147025000 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-boinc-env.sh pkgdata_SCRIPTS = submit-boinc-job cancel-boinc-job scan-boinc-job nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733024740 xustar000000000000000030 mtime=1513200603.001050366 30 atime=1513200650.788634827 30 ctime=1513200663.514790473 nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/Makefile.in0000644000175000002070000005007013214315733025010 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/boinc DIST_COMMON = $(dist_pkgdata_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/cancel-boinc-job.in \ $(srcdir)/scan-boinc-job.in $(srcdir)/submit-boinc-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-boinc-job scan-boinc-job cancel-boinc-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(dist_pkgdata_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = configure-boinc-env.sh pkgdata_SCRIPTS = submit-boinc-job cancel-boinc-job scan-boinc-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/boinc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/boinc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-boinc-job: $(top_builddir)/config.status $(srcdir)/submit-boinc-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-boinc-job: $(top_builddir)/config.status $(srcdir)/scan-boinc-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-boinc-job: $(top_builddir)/config.status $(srcdir)/cancel-boinc-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_pkgdataDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/PaxHeaders.7502/scan-boinc-job.in0000644000000000000000000000012713213472017026011 xustar000000000000000027 mtime=1512993807.604046 30 atime=1513200650.822635243 30 ctime=1513200663.516790497 nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/scan-boinc-job.in0000755000175000002070000001152413213472017026061 0ustar00mockbuildmock00000000000000#!/bin/sh # # Periodically monitor for jobs which has finished or failed but not # reported an exitcode # #set -x id=`id -u` #debug=: debug () { echo -n `date` 1>&2 echo -n ' ' 1>&2 echo $@ 1>&2 } debug "starting" debug "options = $@" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . "${pkgdatadir}/configure-boinc-env.sh" || exit $? if [ -z "$1" ] ; then echo "Argument missing" 1>&2 ; exit 1 ; fi # Prints the uid of the owner of the file given as argument # Perl is used because it's more portable than using the stat command printuid () { code='my @s = stat($ARGV[0]); print($s[4] || "")' /usr/bin/perl -we "$code" "$1" } # # Attempts to switch to uid passed as the first argument and then runs the # commands passed as the second argument in a shell. The remaining arguments # are passed as arguments to the shell. No warning is given in case switching # uid is not possible. # do_as_uid () { test $# -ge 2 || { log "do_as_uid requires 2 arguments"; return 1; } script='use English; my ($uid, @args) = @ARGV; if ( $UID == 0 && $uid ) { eval { $UID = $uid }; print STDERR "Cannot switch to uid($UID): $@\n" if $UID != $uid; } system("/bin/sh","-c",@args); exit ($?>>8||128+($?&127)); ' /usr/bin/perl -we "$script" "$@" } # Append .comment (containing STDOUT & STDERR of the job wrapper) to .errors save_commentfile () { uid=$1 commentfile=$2 errorsfile=$3 echo '---------- Output of the job wrapper script -----------' >> $errorsfile cat $commentfile 2> /dev/null >> $errorsfile echo '------------------------- End of output -------------------------' >> $errorsfile #do_as_uid "$uid" "$action" } for control_dir in "$@" ; do if [ ! -d "${control_dir}" ]; then echo "No control dir $control_dir" 1>&2 continue fi # Bash specific, but this script will be rewritten in python soon... declare -A finished_jobs appidclause="" if [ ! -z "$CONFIG_boinc_app_id" ]; then appidclause="and appid=$CONFIG_boinc_app_id" fi finished=$(mysql -h $CONFIG_boinc_db_host -P $CONFIG_boinc_db_port -u $CONFIG_boinc_db_user --password=$CONFIG_boinc_db_pass $CONFIG_boinc_db_name -e "select name from workunit where assimilate_state=2 $appidclause") for job in `echo $finished`; do finished_jobs[$job]=1 done # iterate over all jobs known in the control directory find "${control_dir}/processing" -name 'job.*.status' \ | xargs egrep -l "INLRMS|CANCELING" \ | sed -e 's/.*job\.//' -e 's/\.status$//' \ | while read job; do #debug "scanning job = $job" unset joboption_jobid unset joboption_directory # this job was already completed, nothing remains to be done [ -f "${control_dir}/job.${job}.lrms_done" ] && continue # a grami file exists for all jobs that GM thinks are running. # proceed to next job if this file is missing. if [ ! -f "${control_dir}/job.${job}.grami" ]; then continue fi # extract process IDs of the grami file [ ! -f "${control_dir}/job.${job}.grami" ] && continue . "${control_dir}/job.${job}.grami" # process IDs could not be learned, proceeding to next [ -z "$joboption_jobid" ] && continue #debug "local jobid = $joboption_jobid" # checking if process is still running if [[ ! ${finished_jobs[$joboption_jobid]} ]]; then #debug "$joboption_jobid is still running, Continueing to next" continue else debug "$joboption_jobid is finished" fi uid=$(printuid "${control_dir}/job.${job}.local") debug "local user id = $uid" diagfile=${joboption_directory}.diag debug "checking $diagfile" exitcode=$(do_as_uid "$uid" "cat '$diagfile'" | sed -n 's/^exitcode=\([0-9]*\).*/\1/p') debug "exitcode = [$exitcode] extracted from $diagfile" exitcode=0 comment="" if [ -z "$joboption_arg_code" ] ; then joboption_arg_code='0' ; fi if [ -z "$exitcode" ]; then echo "Job $job with PID $joboption_jobid died unexpectedly" 1>&2 comment="Job died unexpectedly" 1>&2 exitcode=-1 elif [ "$exitcode" -ne "$joboption_arg_code" ]; then comment="Job finished with wrong exit code - $exitcode != $joboption_arg_code" 1>&2 fi debug "got exitcode=$exitcode" save_commentfile "$uid" "${joboption_directory}.comment" "${control_dir}/job.${job}.errors" echo "$exitcode $comment" > "${control_dir}/job.${job}.lrms_done" done done debug "done, going to sleep" sleep 120 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/PaxHeaders.7502/configure-boinc-env.sh0000644000000000000000000000012412366723236027077 xustar000000000000000027 mtime=1406903966.033074 27 atime=1513200576.588727 30 ctime=1513200663.513790461 nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/configure-boinc-env.sh0000644000175000002070000000104412366723236027143 0ustar00mockbuildmock00000000000000# # set environment variables for boinc # ############################################################## # Reading configuration from $ARC_CONFIG ############################################################## if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" # Script returned ok true nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/PaxHeaders.7502/submit-boinc-job.in0000644000000000000000000000012613213472002026361 xustar000000000000000027 mtime=1512993794.856585 30 atime=1513200650.805635035 29 ctime=1513200663.51779051 nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/submit-boinc-job.in0000755000175000002070000003015413213472002026432 0ustar00mockbuildmock00000000000000#!/bin/bash # set -x # # Input: path to grami file (same as Globus). # This script creates a temporary job script and runs it DEBUG=0 echo "----- starting submit_boinc_job -----" 1>&2 joboption_lrms=boinc # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/submit_common.sh || exit $? ############################################################## # Parse grami file, read arc config ############################################################## init $1 joboption_directory_orig=$joboption_directory joboption_directory='`pwd`' joboption_stdout='`pwd`/'`basename $joboption_stdout` joboption_stderr='`pwd`/'`basename $joboption_stderr` read_arc_conf # make sure session is world-writable chmod 777 $joboption_directory_orig RUNTIME_NODE_SEES_FRONTEND=yes ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 echo "project_root=$PROJECT_ROOT" 1>&2 cd $PROJECT_ROOT ############################################################## # create job script ############################################################## mktempscript LRMS_JOB_BOINC="${LRMS_JOB_SCRIPT}.boinc" touch $LRMS_JOB_BOINC chmod u+x ${LRMS_JOB_SCRIPT} ############################################################## # Start job script ############################################################## N=0 x=$joboption_directory_orig while [ "$x" != "/" ] do x=`dirname $x` N=$((N+1)) done echo '#!/bin/sh' > $LRMS_JOB_SCRIPT echo "#job script built by grid-manager and input file for BOINC job" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<"FMARK" set -x export RUNTIME_CONFIG_DIR=`pwd`/ #tar zxvf RTE.tar.gz cp ../shared/ATLAS.root ./ #rename root file #unzip RTE file FMARK echo tar --strip-components=$N -xvf *input.tar.gz >> $LRMS_JOB_SCRIPT ############################################################## # non-parallel jobs ############################################################## set_count sourcewithargs_jobscript ############################################################## # Override umask ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 exit 1 fi ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then setup_local_transfer fi setup_runtime_env ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node ############################################################## # Runtime configuration ############################################################## echo RUNTIME_JOB_DIAG='`pwd`/'`basename $joboption_directory_orig`.diag >>$LRMS_JOB_SCRIPT RTE_stage1 echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT ##################################################### # Go to working dir and start job ##################################################### cd_and_run ## print nodename cat >> $LRMS_JOB_SCRIPT <<"FMARK" sed -i -e '/nodename=/d' $RUNTIME_JOB_DIAG hostname=` grep domain_name init_data.xml |awk -F '>' '{print $2}'|awk -F "<" '{print $1}'|sed -e "s# #_#g"` username=` grep user_name init_data.xml |awk -F '>' '{print $2}'|awk -F "<" '{print $1}'|sed -e "s# #_#g"` nodename=$username@$hostname echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" FMARK ##################################################### # Upload output files ##################################################### upload_output_files echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## configure_runtime ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## #move_files_to_frontend echo 'echo exitcode=$RESULT >> $RUNTIME_JOB_DIAG' >> $LRMS_JOB_SCRIPT ############################################################# # zip the result files into 1 file zip.tar.gz ############################################################# notnull () { if [ -z $1 ];then echo 0 else echo 1 fi } result_list= i=0 eval opt=\${joboption_outputfile_$i} ret=`notnull $opt` while [ $ret = "1" ] do output_file=$(echo $opt|sed -e "s#^/#./#") output_file=$(echo $output_file|sed -e "s#@##") result_list=$result_list" "$output_file i=$((i+1)) eval opt=\${joboption_outputfile_$i} ret=`notnull $opt` echo "ret="$ret done files=$(echo $result_list|tr " " "\n") cat >> $LRMS_JOB_SCRIPT <<'EOF' echo "zip all output files" flist="*.diag " EOF echo for f in $files >>$LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOF' do if [ -e $f ];then flist=$flist" "$f fi done EOF #echo $flist cat <<'EOF' >>$LRMS_JOB_SCRIPT if [ -f output.list ];then ol=$(awk '{print $1}' output.list) for i in $ol do if [ -f $i ];then flist=$flist" "$i fi done fi EOF echo 'tar czvf result.tar.gz $flist' >>$LRMS_JOB_SCRIPT chmod a+r $LRMS_JOB_SCRIPT ####################################### # Submit the job ####################################### ## generate RTE file on the fly joboption_num=0 rflist=$RUNTIME_CONFIG_DIR/APPS/HEP/ATLAS-SITE eval "var_is_set=\${joboption_runtime_$joboption_num+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_runtime_$joboption_num}" if [ -r "$RUNTIME_CONFIG_DIR/${var_value}" ] ; then rflist=$rflist" $RUNTIME_CONFIG_DIR/${var_value}" fi joboption_num=$(( $joboption_num + 1 )) eval "var_is_set=\${joboption_runtime_$joboption_num+yes}" done RN=0 x=$RUNTIME_CONFIG_DIR while [ "$x" != "/" ] do x=`dirname $x` RN=$((RN+1)) done sed -i -e "s/#unzip RTE file/tar --strip-components=$RN -zxvf RTE.tar.gz/g" $LRMS_JOB_SCRIPT echo "job script ${LRMS_JOB_SCRIPT} built" 1>&2 JobId=`basename $joboption_directory_orig` JobInput=$joboption_directory_orig/$JobId"_input.tar.gz" wu=$JobId RTE_FILE="rte_"$wu".tar.gz" RTE_LOCATION="$joboption_directory_orig/RTE.tar.gz" echo "#!/bin/bash" >> $LRMS_JOB_BOINC echo "set -x" >> $LRMS_JOB_BOINC tflist="" Root_basename=() RootFile=() ## RootFile keeps the orginal path of the root files ## Root_basename keeps the basename of the root files, with adding JobID to make them unique i=0 for file in `ls $joboption_directory_orig` do echo $file|grep ".root" > /dev/null ret=$? if [ $ret -eq 0 ]; then echo skip root file $file Root_basename[$i]=$JobId"_"$file RootFile[$i]=$joboption_directory_orig/$file sed -i -e "/#rename root file/a\mv ATLAS.root_$i $file" $LRMS_JOB_SCRIPT let i=$i+1 continue else tflist=$tflist" "$joboption_directory_orig/$file fi done echo tar zhcvf $JobInput $tflist echo "tar zhcvf $JobInput $tflist" >> $LRMS_JOB_BOINC echo tar zhcvf $RTE_LOCATION $rflist echo "tar zhcvf $RTE_LOCATION $rflist" >> $LRMS_JOB_BOINC echo "cd $PROJECT_ROOT " >>$LRMS_JOB_BOINC JobInput_basename=`basename $JobInput` Script_basename=`basename $LRMS_JOB_SCRIPT` echo "cp $JobInput "'`bin/dir_hier_path '$(basename $JobInput)'`' >> $LRMS_JOB_BOINC echo "chmod a+r "'`bin/dir_hier_path '$(basename $JobInput)'`' >> $LRMS_JOB_BOINC echo "cp $LRMS_JOB_SCRIPT " '`bin/dir_hier_path' $(basename $LRMS_JOB_SCRIPT)'`' >>$LRMS_JOB_BOINC echo "chmod a+r " '`bin/dir_hier_path' $(basename $LRMS_JOB_SCRIPT)'`' >>$LRMS_JOB_BOINC echo "cp $RTE_LOCATION " '`bin/dir_hier_path' $RTE_FILE '`' >> $LRMS_JOB_BOINC echo "chmod a+r " '`bin/dir_hier_path' $RTE_FILE '`' >> $LRMS_JOB_BOINC [ -n $PROJECT_DOWNLOAD_ROOT ] && echo "cd $PROJECT_DOWNLOAD_ROOT" >> $LRMS_JOB_BOINC ## process the root files as remote files cd $PROJECT_ROOT echo "current directory is the project_root: "$PWD remote_url=() fsize=() md5=() i=0 while [ $i -lt ${#RootFile[@]} ] do [ -L ${RootFile[$i]} ] && RootFile[$i]=`ls -l ${RootFile[$i]}|awk '{print $11}'` echo "ln ${RootFile[$i]} "'`bin/dir_hier_path' ${Root_basename[$i]} '`' >> $LRMS_JOB_BOINC echo "chmod a+r "'`bin/dir_hier_path ' ${Root_basename[$i]} '`' >> $LRMS_JOB_BOINC download_dir=`bin/dir_hier_path ${Root_basename[$i]} | awk -F/ '{print $(NF-1)}'` remote_url[$i]="${PROJECT_DOWNLOAD_URL}/${download_dir}/${Root_basename[$i]}" fsize[$i]=`stat -c %s ${RootFile[$i]}` md5[$i]=`md5sum ${RootFile[$i]} | awk '{print $1}'` echo "Using remote file ${remote_url[$i]} ${fsize[$i]} ${md5[$i]}" 1>&2 let i=$i+1 done [ -n $PROJECT_DOWNLOAD_ROOT ] && echo "cd $PROJECT_ROOT" >> $LRMS_JOB_BOINC ## generate the input template file let ifileno=3+${#remote_url[@]} i=0 intmp="" while [ $i -lt $ifileno ] do intmp="$intmp $i " let i=$i+1 done intmp="$intmp " i=0 while [ $i -lt ${#remote_url[@]} ] do intmp="$intmp $i shared/ATLAS.root_$i " let i=$i+1 done intmp="$intmp $i shared/input.tar.gz " let i=$i+1 intmp="$intmp $i shared/RTE.tar.gz " let i=$i+1 intmp="$intmp $i shared/start_atlas.sh " intmp_res=$(cat $WU_TEMPLATE) intmp="$intmp $intmp_res " intmp="$intmp " WU_TEMPLATE_tmp=$(mktemp /tmp/${BOINC_APP}_XXXXXX) cat << EOF > $WU_TEMPLATE_tmp $intmp EOF ####################################### if [ -z $joboption_memory ];then memreq=2000000000 else memreq=$((joboption_memory*1000000)) fi if [ -z $joboption_cputime ];then maxcputime=$((2*3600*3000000000)) else maxcputime=$((joboption_cputime*3000000000)) fi priority= if [ ! -z "$joboption_priority" ]; then priority="--priority $joboption_priority" fi cmd="bin/create_work \ --appname $BOINC_APP \ --wu_name $wu \ --wu_template $WU_TEMPLATE_tmp \ --result_template $RESULT_TEMPLATE \ --rsc_memory_bound $memreq \ --rsc_fpops_est $maxcputime \ $priority" j=0 while [ $j -lt ${#remote_url[@]} ] do cmd="$cmd \ --remote_file ${remote_url[$j]} ${fsize[$j]} ${md5[$j]}" let j=$j+1 done cmd="$cmd \ $(basename $JobInput) \ $RTE_FILE \ $(basename $LRMS_JOB_SCRIPT)" echo $cmd >> $LRMS_JOB_BOINC echo 'ret=$?' >>$LRMS_JOB_BOINC echo 'exit $ret' >>$LRMS_JOB_BOINC if [ $DEBUG -eq 2 ];then cat $LRMS_JOB_BOINC else sh $LRMS_JOB_BOINC 1>&2 >/tmp/log.boinc fi rc=$? if [ $rc -eq 0 ];then echo "job $wu submitted successfully!" 1>&2 echo "joboption_jobid=$wu" >> $arg_file fi echo "----- removing intermediate files ----" 1>&2 if [ $DEBUG -ne 1 ];then rm -fr $WU_TEMPLATE_tmp rm -fr $LRMS_JOB_BOINC $LRMS_JOB_ERR $LRMS_JOB_OUT $LRMS_JOB_SCRIPT rm -fr $JobInput fi echo "----- exiting submit_boinc_job -----" 1>&2 echo "" 1>&2 exit $rc nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/PaxHeaders.7502/cancel-boinc-job.in0000644000000000000000000000012712366723236026324 xustar000000000000000027 mtime=1406903966.033074 30 atime=1513200650.837635426 30 ctime=1513200663.515790485 nordugrid-arc-5.4.2/src/services/a-rex/lrms/boinc/cancel-boinc-job.in0000755000175000002070000000437012366723236026375 0ustar00mockbuildmock00000000000000#!/bin/sh # set -x # # Cancel job running in boinc. # ProjectDir=/home/boinc/projects/ATLAS echo "----- starting cancel_boinc_job -----" 1>&2 trap 'echo "----- exiting cancel_boinc_job -----" 1>&2; echo "" 1>&2' EXIT joboption_lrms=boinc # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? if [ -z "$joboption_controldir" ] ; then joboption_controldir=`dirname "$arg_file"` if [ "$joboption_controldir" = '.' ] ; then joboption_controldir="$PWD" fi fi job_control_dir="$joboption_controldir" if [ -z "$joboption_gridid" ] ; then joboption_gridid=`basename "$arg_file" | sed 's/^job\.\(.*\)\.grami$/\1/'` fi echo "Deleting job $joboption_gridid, local id $joboption_jobid" 1>&2 if [ ! -r "$job_control_dir/job.${joboption_gridid}.local" ]; then echo "Local description of job ${joboption_gridid} not found at '$job_control_dir/job.${joboption_gridid}.local'. Job was not killed, if running at all." 1>&2 exit 1 fi if [ -z "$joboption_jobid" ] ; then joboption_jobid=`cat "$job_control_dir/job.${joboption_gridid}.local" | grep '^localid=' | sed 's/^localid=//'` fi job_control_subdir= if [ -r "$job_control_dir/accepting/job.${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/accepting" elif [ -r "$job_control_dir/processing/job.${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/processing" elif [ -r "$job_control_dir/finished/job.${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/finished" else echo "Status file of job ${joboption_gridid} not found in '$job_control_dir'. Job was not killed, if running at all." 1>&2 exit 1 fi case X`cat "$job_control_subdir/job.${joboption_gridid}.status"` in XINLRMS | XCANCELING) if [ -z "$joboption_jobid" ] ; then echo "Can't find local id of job" 1>&2 exit 1 fi cd $ProjectDir;bin/cancel_jobs --name $joboption_gridid ;; XFINISHED | XDELETED) echo "Job already died, won't do anything" 1>&2 ;; *) echo "Job is at unkillable state" 1>&2 ;; esac exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/condor0000644000000000000000000000013213214316027023005 xustar000000000000000030 mtime=1513200663.395789018 30 atime=1513200668.717854109 30 ctime=1513200663.395789018 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/0000755000175000002070000000000013214316027023130 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712055122610025115 xustar000000000000000027 mtime=1354016136.453887 30 atime=1513200603.018050574 30 ctime=1513200663.391788969 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/Makefile.am0000644000175000002070000000016512055122610025161 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-condor-env.sh pkgdata_SCRIPTS = cancel-condor-job scan-condor-job \ submit-condor-job nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/PaxHeaders.7502/configure-condor-env.sh0000644000000000000000000000012413066672136027463 xustar000000000000000027 mtime=1490777182.579293 27 atime=1513200576.563727 30 ctime=1513200663.390788956 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/configure-condor-env.sh0000644000175000002070000000434613066672136027537 0ustar00mockbuildmock00000000000000 if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser_compat.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" # performance logging: if perflogdir or perflogfile is set, logging is turned on. So only set them when enable_perflog_reporting is ON unset perflogdir unset perflogfile enable_perflog=${CONFIG_enable_perflog_reporting:-no} if [ "$CONFIG_enable_perflog_reporting" == "expert-debug-on" ]; then perflogdir=${CONFIG_perflogdir:-/var/log/arc/perfdata} perflogfile="${perflogdir}/backends.perflog" fi # Initializes environment variables: CONDOR_BIN_PATH # Valued defines in arc.conf take priority over pre-existing environment # variables. # Condor executables are located using the following cues: # 1. condor_bin_path option in arc.conf # 2. PATH environment variable # Synopsis: # # . config_parser.sh # config_parse_file /etc/arc.conf || exit 1 # config_import_section "common" # . configure-condor-env.sh || exit 1 if [ ! -z "$CONFIG_condor_bin_path" ]; then CONDOR_BIN_PATH=$CONFIG_condor_bin_path; else condor_version=$(type -p condor_version) CONDOR_BIN_PATH=${condor_version%/*} fi; if [ ! -x "$CONDOR_BIN_PATH/condor_version" ]; then echo 'Condor executables not found!'; return 1; fi echo "Using Condor executables from: $CONDOR_BIN_PATH" export CONDOR_BIN_PATH if [ ! -z "$CONFIG_condor_config" ]; then CONDOR_CONFIG=$CONFIG_condor_config; else CONDOR_CONFIG="/etc/condor/condor_config"; fi; if [ ! -e "$CONDOR_CONFIG" ]; then echo 'Condor config file not found!'; return 1; fi echo "Using Condor config file at: $CONDOR_CONFIG" export CONDOR_CONFIG # FIX: Recent versions (8.5+?) of HTCondor does not show all jobs when running condor_q, but only own # Solution according Brain Bockelman GGUS 123947 _condor_CONDOR_Q_ONLY_MY_JOBS=false export _condor_CONDOR_Q_ONLY_MY_JOBS _condor_CONDOR_Q_DASH_BATCH_IS_DEFAULT=false export _condor_CONDOR_Q_DASH_BATCH_IS_DEFAULT nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733025132 xustar000000000000000030 mtime=1513200603.055051027 30 atime=1513200650.269628479 30 ctime=1513200663.392788981 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/Makefile.in0000644000175000002070000005013113214315733025200 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/condor DIST_COMMON = README $(dist_pkgdata_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/cancel-condor-job.in \ $(srcdir)/scan-condor-job.in $(srcdir)/submit-condor-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = scan-condor-job cancel-condor-job \ submit-condor-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(dist_pkgdata_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = configure-condor-env.sh pkgdata_SCRIPTS = cancel-condor-job scan-condor-job \ submit-condor-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/condor/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/condor/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): scan-condor-job: $(top_builddir)/config.status $(srcdir)/scan-condor-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-condor-job: $(top_builddir)/config.status $(srcdir)/cancel-condor-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ submit-condor-job: $(top_builddir)/config.status $(srcdir)/submit-condor-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_pkgdataDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/PaxHeaders.7502/cancel-condor-job.in0000644000000000000000000000012712272453512026701 xustar000000000000000027 mtime=1391089482.348275 30 atime=1513200650.302628883 30 ctime=1513200663.393788993 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/cancel-condor-job.in0000755000175000002070000000110312272453512026741 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Cancel job running in Condor # progname=$(basename "$0") echo "----- starting $progname -----" 1>&2 joboption_lrms=condor # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? echo "$progname: canceling job $joboption_jobid with condor_rm..." 1>&2 $CONDOR_BIN_PATH/condor_rm ${joboption_jobid%.`hostname -f`} 1>&2 echo "----- exiting $progname -----" 1>&2 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/PaxHeaders.7502/submit-condor-job.in0000644000000000000000000000012713065022075026754 xustar000000000000000027 mtime=1490297917.267218 30 atime=1513200650.317629067 30 ctime=1513200663.395789018 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/submit-condor-job.in0000755000175000002070000004424213065022075027027 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Based on globus submission script for pbs # # Submits job to HTCondor. # Input: path to grami file (same as Globus). # # The temporary job description file is created for the submission and then removed # at the end of this script. echo "----- starting submit_condor_job -----" 1>&2 joboption_lrms=condor # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/configure-condor-env.sh || exit $? . ${pkgdatadir}/submit_common.sh || exit $? perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ############################################################## # Parse grami file, read arc config ############################################################## init $1 read_arc_conf failures_file="$joboption_controldir/job.$joboption_gridid.failed" if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then RUNTIME_LOCAL_SCRATCH_DIR="\${_CONDOR_SCRATCH_DIR}" fi if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then if [ -z "${RUNTIME_LOCAL_SCRATCH_DIR}" ] ; then echo "Need to know at which directory to run job: RUNTIME_LOCAL_SCRATCH_DIR must be set if RUNTIME_NODE_SEES_FRONTEND is empty" 1>&2 echo "Submission: Configuration error.">>"$failures_file" exit 1 fi fi ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript is_cluster=true ############################################################## # Start job description file ############################################################## CONDOR_SUBMIT='condor_submit' if [ ! -z "$CONDOR_BIN_PATH" ] ; then CONDOR_SUBMIT=${CONDOR_BIN_PATH}/${CONDOR_SUBMIT} fi # HTCondor job script and submit description file rm -f "$LRMS_JOB_SCRIPT" LRMS_JOB_SCRIPT="${joboption_directory}/condorjob.sh" LRMS_JOB_DESCRIPT="${joboption_directory}/condorjob.jdl" echo "# HTCondor job description built by grid-manager" > $LRMS_JOB_DESCRIPT echo "Executable = condorjob.sh" >> $LRMS_JOB_DESCRIPT echo "Input = $joboption_stdin" >> $LRMS_JOB_DESCRIPT echo "Log = ${joboption_directory}/log">> $LRMS_JOB_DESCRIPT # write HTCondor output to .comment file if possible, but handle the situation when # jobs are submitted by HTCondor-G < 8.0.5 condor_stdout="${joboption_directory}.comment" condor_stderr="${joboption_directory}.comment" if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then # if [[ $joboption_stdout =~ _condor_stdout$ ]]; then if expr match "$joboption_stdout" '.*_condor_stdout$' > /dev/null; then condor_stdout=$joboption_stdout; condor_stderr=$joboption_stderr; fi fi echo "Output = $condor_stdout">> $LRMS_JOB_DESCRIPT echo "Error = $condor_stderr">> $LRMS_JOB_DESCRIPT # queue if [ ! -z "${joboption_queue}" ] ; then echo "+NordugridQueue = \"$joboption_queue\"" >> $LRMS_JOB_DESCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then #TODO is this necessary? do parts of the infosys need these limitations? jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "Description = $jobname" >> $LRMS_JOB_DESCRIPT else jobname="gridjob" echo "Description = $jobname" >> $LRMS_JOB_DESCRIPT fi # environment echo "GetEnv = True" >> $LRMS_JOB_DESCRIPT # universe echo "Universe = vanilla" >> $LRMS_JOB_DESCRIPT # notification echo "Notification = Never" >> $LRMS_JOB_DESCRIPT # requirements if [ ! -z "$CONFIG_condor_requirements" ] ; then echo "Requirements = $CONFIG_condor_requirements" >> $LRMS_JOB_DESCRIPT fi ##################################################### # priority ##################################################### if [ ! -z "$joboption_priority" ]; then #Condor uses any integer as priority. 0 being default. Only per user basis. #We assume that only grid jobs are relevant. #In that case we can use ARC 0-100 but translated so default is 0. priority=$((joboption_priority-50)) echo "Priority = $priority" >> $LRMS_JOB_DESCRIPT fi # rank if [ ! -z "$CONFIG_condor_rank" ] ; then echo "Rank = $CONFIG_condor_rank" >> $LRMS_JOB_DESCRIPT fi # proxy echo "x509userproxy = $joboption_controldir/job.$joboption_gridid.proxy" >> $LRMS_JOB_DESCRIPT ############################################################## # (non-)parallel jobs ############################################################## set_count if [ ! -z $joboption_count ] && [ $joboption_count -gt 0 ] ; then echo "request_cpus = $joboption_count" >> $LRMS_JOB_DESCRIPT fi if [ "$joboption_exclusivenode" = "true" ]; then echo "+RequiresWholeMachine=True" >> $LRMS_JOB_DESCRIPT fi ############################################################## # Execution times (minutes) ############################################################## REMOVE="FALSE" if [ ! -z "$joboption_cputime" ] ; then if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 cpu time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi maxcputime=$(( $joboption_cputime / $joboption_count )) echo "+JobCpuLimit = $joboption_cputime" >> $LRMS_JOB_DESCRIPT REMOVE="${REMOVE} || RemoteUserCpu + RemoteSysCpu > JobCpuLimit" fi if [ -z "$joboption_walltime" ] ; then if [ ! -z "$joboption_cputime" ] ; then # Set walltime for backward compatibility or incomplete requests joboption_walltime=$(( $maxcputime * $walltime_ratio )) fi fi if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi echo "+JobTimeLimit = $joboption_walltime" >> $LRMS_JOB_DESCRIPT REMOVE="${REMOVE} || RemoteWallClockTime > JobTimeLimit" fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem if [ ! -z "$joboption_memory" ] ; then memory_bytes=$(( $joboption_memory * 1024 )) memory_req=$(( $joboption_memory )) # HTCondor needs to know the total memory for the job, not memory per core if [ ! -z $joboption_count ] && [ $joboption_count -gt 0 ] ; then memory_bytes=$(( $joboption_count * $memory_bytes )) memory_req=$(( $joboption_count * $memory_req )) fi echo "request_memory=$memory_req" >> $LRMS_JOB_DESCRIPT echo "+JobMemoryLimit = $memory_bytes" >> $LRMS_JOB_DESCRIPT REMOVE="${REMOVE} || ResidentSetSize > JobMemoryLimit" fi ############################################################## # HTCondor stage in/out ############################################################## if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then ( cd "$joboption_directory" if [ $? -ne '0' ] ; then echo "Can't change to session directory: $joboption_directory" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_DESCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Configuration error.">>"$failures_file" exit 1 fi scratch_dir=`dirname "$joboption_directory"` echo "should_transfer_files = YES" >> $LRMS_JOB_DESCRIPT echo "When_to_transfer_output = ON_EXIT_OR_EVICT" >> $LRMS_JOB_DESCRIPT count=0 input_files="Transfer_input_files = " eval "input_file=\$joboption_inputfile_$count" while [ -n "${input_file}" ]; do if [ "$count" -gt "0" ]; then input_files="${input_files}," fi input_files="${input_files} ${joboption_directory}${input_file}" count=$((count+1)) eval "input_file=\$joboption_inputfile_$count" done echo "${input_files}" >> $LRMS_JOB_DESCRIPT ) fi echo "Periodic_remove = ${REMOVE}" >> $LRMS_JOB_DESCRIPT echo "Queue" >> $LRMS_JOB_DESCRIPT echo "#!/bin/bash -l" > $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT # Script must have execute permission chmod 0755 $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_DESCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error.">>"$failures_file" exit 1 fi ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then setup_local_transfer fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then echo "# move input files to local working directory" >> $LRMS_JOB_SCRIPT count=0 eval "input_file=\$joboption_inputfile_$count" while [ -n "${input_file}" ]; do echo "mv .${input_file} ${joboption_gridid}/." >> $LRMS_JOB_SCRIPT count=$((count+1)) eval "input_file=\$joboption_inputfile_$count" done fi ##################################################### # Download input files #################################################### download_input_files ############################################################## # Skip execution if something already failed ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 ############################################################## # Diagnostics ############################################################## echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' EOSCR ############################################################## # Check intermediate result again ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## configure_runtime ##################################################### # Upload output files #################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then upload_output_files else # There is no sense to keep trash till GM runs uploader echo 'if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] ; then' >> $LRMS_JOB_SCRIPT # Delete all files except listed in job.#.output echo ' find ./ -type l -exec rm -f "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' find ./ -type f -exec chmod u+w "{}" ";"' >> $LRMS_JOB_SCRIPT if [ -f "$joboption_controldir/job.$joboption_gridid.output" ] ; then cat "$joboption_controldir/job.$joboption_gridid.output" | \ # remove leading backslashes, if any sed 's/^\/*//' | \ # backslashes and spaces are escaped with a backslash in job.*.output. The # shell built-in read undoes this escaping. while read name rest; do # make it safe for shell by replacing single quotes with '\'' name=`printf "%s" "$name"|sed "s/'/'\\\\\\''/g"`; # protect from deleting output files including those in the dynamic list if [ "${name#@}" != "$name" ]; then # Does $name start with a @ ? dynlist=${name#@} echo " dynlist='$dynlist'" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' chmod -R u-w "./$dynlist" 2>/dev/null cat "./$dynlist" | while read name rest; do chmod -R u-w "./$name" 2>/dev/null done EOSCR else echo " chmod -R u-w \"\$RUNTIME_JOB_DIR\"/'$name' 2>/dev/null" >> $LRMS_JOB_SCRIPT if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then echo " mv \"\$RUNTIME_JOB_DIR\"/'$name' ../." >> $LRMS_JOB_SCRIPT fi fi done fi echo ' find ./ -type f -perm /200 -exec rm -f "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' find ./ -type f -exec chmod u+w "{}" ";"' >> $LRMS_JOB_SCRIPT echo 'fi' >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id # !!!!!!!!!!!!!!!!!!! would be better to know the names of files !!!!!!!!!!! ############################################################## move_files_to_frontend if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-condor-job, JobScriptCreation: $t" >> $perflogfilesub fi ####################################### # Submit the job ####################################### echo "HTCondor job script built" 1>&2 # Execute condor_submit command cd "$joboption_directory" echo "HTCondor script follows:" 1>&2 echo "-------------------------------------------------------------------" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "-------------------------------------------------------------------" 1>&2 echo "" 1>&2 CONDOR_RESULT=1 CONDOR_TRIES=0 while [ "$CONDOR_TRIES" -lt '10' ] ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${CONDOR_SUBMIT} $LRMS_JOB_DESCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR CONDOR_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-condor-job, JobSubmission: $t" >> $perflogfilesub fi if [ "$CONDOR_RESULT" -eq '0' ] ; then break ; fi CONDOR_TRIES=$(( $CONDOR_TRIES + 1 )) sleep 2 done if [ $CONDOR_RESULT -eq '0' ] ; then job_out=`cat $LRMS_JOB_OUT` if [ "${job_out}" = "" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the condor jobid for the job!" 1>&2 echo "Submission: Local submission client behaved unexpectedly.">>"$failures_file" elif [ `echo "${job_out}" | grep -Ec "submitted to cluster\s[0-9]+"` != "1" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "badly formatted condor jobid for the job !" 1>&2 echo "Submission: Local submission client behaved unexpectedly.">>"$failures_file" else job_id=`echo $job_out | grep cluster | awk '{print $8}' | sed 's/[\.]//g'` hostname=`hostname -f` echo "joboption_jobid=${job_id}.${hostname}" >> $arg_file echo "condor_log=${joboption_directory}/log" >> $arg_file echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary files rm -f $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_condor_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from condor_submit: $CONDOR_RESULT !" 1>&2 echo "Submission: Local submission client failed.">>"$failures_file" fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "----- exiting submit_condor_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/PaxHeaders.7502/scan-condor-job.in0000644000000000000000000000012712721014037026372 xustar000000000000000027 mtime=1464080415.207124 30 atime=1513200650.285628675 30 ctime=1513200663.394789005 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/scan-condor-job.in0000755000175000002070000007371612721014037026455 0ustar00mockbuildmock00000000000000#!@posix_shell@ progname=$(basename "$0") LRMS=Condor # for use in log messages # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? libexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@/" pkgdatadir="$basedir" . "${pkgdatadir}/configure-condor-env.sh" || exit $? . "${pkgdatadir}/scan_common.sh" || exit $? # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi ############################################################################# ########################## LRMS specific functions ########################## ############################################################################# # # Should print the id's of all jobs in the LRMS, one per line. If left # unimplemented then lrms_job_finished must be implemented. If it's # implemented then implementing lrms_job_finished is optional. # lrms_list_jobs() { LIST_IMPLEMENTED= } # # Should return 0 only if the job is not in the LRMS. The job's LRMS id is # stored in the lrmsid variable. It's called for all grid jobs that are in # INLRMS and CANCELING states and whose LRMS id was not listed by # lrms_list_jobs. STDOUT and STDERR are redirected to job.$gridid.error. # lrms_job_finished() { return 0 } # # Should attempt to collect accounting info from LRMS for a job. The job's # LRMS id is stored in the lrmsid variable. This function will be called after # the job has left the LRMS. Diagnostics might not be available right after # the job has finished and therefore a retry mechanism is implemented. If more # time is needed, the function should signal this by returning without setting # the LRMSExitcode variable. In this case it will be called again on the next # run on scan-*-jobs, but not more than $maxwait times for any given job. If # it sets LRMSExitcode, or $maxwait retries have already been done, then # lrms_last_call will be called shortly afterwards and the job declared done. # STDOUT and STDERR are redirected to job.$gridid.errors. The interval between # successive runs of scan-*-jobs is controlled by $wakeupperiod. # Input variables: # * gridid # * lrmsid # * sessiondir # * uid -- numerical unix ID of the user owning the job # The following variables are initialized with values read from # $sessiondir.diag. All except exitcode are expected to be updated by this # function: # * exitcode -- It's the exitcode of the user's executable, as reported by # the job wrapper. Do not change. # * nodename -- may contain multiple lines, one execution node per line # * WallTime -- in seconds # * UserTime -- in seconds # * KernelTime -- in seconds # * TotalMemory -- in kB # * ResidentMemory -- in kB # * LRMSStartTime -- in Mds time format, UTC time zone (20091201140049Z) # * LRMSEndTime -- in Mds time format, UTC time zone (20091201140049Z) # Output variables: # * LRMSExitcode -- as reported by the LRMS. It will be saved to the .diag file # * LRMSMessage -- any clues obtained from the LRMS about job failure. It # content will be addedd to .lrms_done in case LRMSExitcode is not 0. # lrms_get_accounting() { ACCT_IMPLEMENTED= LRMSExitcode=${exitcode:--1} } # # Called just before uptading .diag and writing the .lrms_done file. STDOUT and # STDERR are redirected to job.$gridid.error. Can be left as is. # Input/Output variables: # * the same as for lrms_get_accounting # * any variables set in lrms_get_accounting are visible here # lrms_last_call() { [ -n "$LRMSExitcode" ] \ || log "LRMS exit status not available for job $gridid ($LRMS id: $lrmsid)" [ -n "$ACCT_IMPLEMENTED" ] || LRMSExitcode= # Suspect killing due to resource limit exceeded only if exitcode is # missing or is > 128 (as in the case of a shell killed by a signal) if [ -z "$exitcode" ] || [ "$exitcode" -gt 128 ]; then read_grami; autodetect_overlimit fi } ############################################################################# # # scan-*-jobs has STDOUT redirected to /dev/null and STDERR redirected to # job.helper..errors # log () { echo "[`date +%Y-%m-%d\ %T`] $progname: $*" 1>&2; } # # Attempts to switch to uid passed as the first argument and then runs the # commands passed as the second argument in a shell. The remaining arguments # are passed as arguments to the shell. No warning is given in case switching # uid is not possible. # do_as_uid () { test $# -ge 2 || { log "do_as_uid requires 2 arguments"; return 1; } script='use English; my ($uid, @args) = @ARGV; if ( $UID == 0 ) { eval { $UID = $uid }; print STDERR "Cannot switch to uid($UID): $@\n" if $@; } system("@posix_shell@","-c",@args); exit 0 if $? eq 0; exit ($?>>8||128+($?&127)); ' /usr/bin/perl -we "$script" "$@" } # # Reads a line from STDIN and prints integer part on STDOUT. # If not a valid number, prints nothing and returns 1 # to_integer() { /usr/bin/perl -we 'chomp(my $line = <>); exit 0 if $line eq ""; if ( $line =~ m/^(\d*)(?:\.\d+)?$/ ) { print $1 || 0; } else { exit 1; }' } # # Input variables: # * sessiondir # * uid # Output variables: # * diagstring -- the whole contents of .diag # * nodename # * WallTime # * UserTime # * KernelTime # * TotalMemory # * ResidentMemory # * LRMSStartTime # * LRMSEndTime # * exitcode # job_read_diag() { [ -n "$uid" ] && [ -n "$sessiondir" ] \ || { log "job_read_diag requires the following to be set: uid sessiondir"; return 1; } diagfile=$sessiondir.diag; [ -f "$diagfile" ] || { log "diag file not found at: $sessiondir.diag"; return 1; } diagstring=$(do_as_uid $uid "tail -n 1000 '$diagfile'") [ $? = 0 ] || { log "cannot read diag file at: $diagfile"; return 1; } nodename=$(echo "$diagstring" | sed -n 's/^nodename=\(..*\)/\1/p') WallTime=$(echo "$diagstring" | sed -n 's/^WallTime=\([0-9.]*\)s/\1/p' | tail -n 1) UserTime=$(echo "$diagstring" | sed -n 's/^UserTime=\([0-9.]*\)s/\1/p' | tail -n 1) KernelTime=$(echo "$diagstring" | sed -n 's/^KernelTime=\([0-9.]*\)s/\1/p' | tail -n 1) TotalMemory=$(echo "$diagstring" | sed -n 's/^AverageTotalMemory=\([0-9.]*\)kB/\1/p' | tail -n 1) ResidentMemory=$(echo "$diagstring" | sed -n 's/^AverageResidentMemory=\([0-9.]*\)kB/\1/p' | tail -n 1) LRMSStartTime=$(echo "$diagstring" | sed -n 's/^LRMSStartTime=\([0-9][0-9]*Z\)/\1/p' | tail -n 1) LRMSEndTime=$(echo "$diagstring" | sed -n 's/^LRMSEndTime=\([0-9][0-9]*Z\)/\1/p' | tail -n 1) exitcode=$(echo "$diagstring" | sed -n 's/^exitcode=\([0-9]*\)/\1/p' | tail -n 1) for key in nodename WallTime UserTime KernelTime AverageTotalMemory AverageResidentMemory \ exitcode LRMSStartTime LRMSEndTime LRMSExitcode LRMSMessage; do diagstring=$(echo "$diagstring" | grep -v "^$key=") done # These are set on the font-end. Not to be overwritten diagstring=$(echo "$diagstring" | grep -v "^frontend_") diagstring=$(echo "$diagstring" | grep -v "^Processors=") } # # Input variables: # * sessiondir # * uid # * LRMSExitcode # * LRMSMessage # + all output variables from job_read_diag # job_write_diag() { [ -n "$uid" ] && [ -n "$sessiondir" ] \ || { log "job_write_diag requires the following to be set: uid sessiondir"; return 1; } diagfile=$sessiondir.diag; { echo "$diagstring" && echo [ -n "$nodename" ] && echo "$nodename" | sed -n 's/^\(..*\)/nodename=\1/p' [ -n "$RequestCpus" ] && echo "Processors=${RequestCpus}" [ -n "$WallTime" ] && echo "WallTime=${WallTime}s" [ -n "$UserTime" ] && echo "UserTime=${UserTime}s" [ -n "$KernelTime" ] && echo "KernelTime=${KernelTime}s" [ -n "$TotalMemory" ] && echo "AverageTotalMemory=${TotalMemory}kB" [ -n "$ResidentMemory" ] && echo "AverageResidentMemory=${ResidentMemory}kB" [ -n "$LRMSStartTime" ] && echo "LRMSStartTime=$LRMSStartTime" [ -n "$LRMSEndTime" ] && echo "LRMSEndTime=$LRMSEndTime" [ -n "$LRMSMessage" ] && echo "LRMSMessage=$LRMSMessage" [ -n "$LRMSExitcode" ] && echo "LRMSExitcode=$LRMSExitcode" [ -n "$exitcode" ] && echo "exitcode=$exitcode" } | do_as_uid $uid "cat > '$diagfile'" [ $? = 0 ] || { log "cannot write diag file at: $diagfile"; return 1; } } # Input variables # * gridid # Output variables: # * ReqWallTime # * ReqCPUTime # * ReqTotalMemory read_grami() { gramifile="job.$gridid.grami" [ -f "$gramifile" ] || { log "grami file not found: $PWD/$gramifile"; return 1; } ReqWallTime=$(sed -n "s/^joboption_walltime=//p" "$gramifile" | tail -n 1) ReqCPUTime=$(sed -n "s/^joboption_cputime=//p" "$gramifile" | tail -n 1) ReqTotalMemory=$(sed -n "s/^joboption_memory=//p" "$gramifile" | tail -n 1) ReqWallTime=$(echo $ReqWallTime | to_integer) || log "joboption_walltime not a number" ReqCPUTime=$(echo $ReqCPUTime | to_integer) || log "joboption_cputime not a number" ReqTotalMemory=$(echo $ReqTotalMemory | to_integer) || log "joboption_memory not a number" # convert MB to KB [ -n "$ReqTotalMemory" ] && ReqTotalMemory=$(( $ReqTotalMemory * 1024 )) log "---- Requested resources specified in grami file ----" [ -n "$ReqWallTime" ] && log "Requested walltime: $ReqWallTime seconds" [ -n "$ReqCPUTime" ] && log "Requested cputime: $ReqCPUTime seconds" [ -n "$ReqTotalMemory" ] && log "Requested memory: $(( $ReqTotalMemory / 1024 )) MB" log "-----------------------------------------------------" } # # Can be used from lrms_get_accounting() to guess whether the job was killed due to # an exceeded resource limit and set LRMSMessage accordingly. # Input variables # * gridid # * uid # * ReqWallTime # * ReqCPUTime # * ReqTotalMemory # * WallTime # * UserTime # * KernelTime # * TotalMemory # * ResidentMemory # * exitcode # * LRMSExitcode # * LRMSMessage # Output variables: # * overlimit (if set, then one of memory cputime walltime ) # autodetect_overlimit() { # round to integers wallt=$(echo $WallTime | to_integer) || log "WallTime not a number" usert=$(echo $UserTime | to_integer) || log "UserTime not a number" kernelt=$(echo $KernelTime | to_integer) || log "KernelTime not a number" totalmem=$(echo $TotalMemory | to_integer) || log "TotalMemory not a number" residentmem=$(echo $ResidentMemory | to_integer) || log "ResidentMemory not a number" cput=$(( ${usert:-0} + ${kernelt:-0} )) if [ -n "$cput" ] && [ "$cput" -gt 0 ] \ && [ -n "$ReqCPUTime" ] && [ "$ReqCPUTime" -gt 0 ] \ && [ $(( 100 * $cput / $ReqCPUTime )) -gt 95 ]; then overlimit="cputime" fi if [ -n "$wallt" ] && [ "$wallt" -gt 0 ] \ && [ -n "$ReqWallTime" ] && [ "$ReqWallTime" -gt 0 ] \ && [ $(( 100 * $wallt / $ReqWallTime )) -gt 95 ]; then overlimit="walltime" fi if [ -n "$totalmem" ] && [ "$totalmem" -gt 0 ] \ && [ -n "$ReqTotalMemory" ] && [ "$ReqTotalMemory" -gt 0 ] \ && [ $(( 100 * $totalmem / $ReqTotalMemory )) -gt 95 ]; then overlimit="memory" fi if [ -n "$residentmem" ] && [ "$residentmem" -gt 0 ] \ && [ -n "$ReqTotalMemory" ] && [ "$ReqTotalMemory" -gt 0 ] \ && [ $(( 100 * $residentmem / $ReqTotalMemory )) -gt 95 ]; then overlimit="memory" fi [ -n "$overlimit" ] && log "Job have likely hit $overlimit limit" } # # Returns 0 at most maxwait calls for any given gridid. Returns 1 on # further calls or if an error has occured. # job_canwait() { [ -n "$gridid" ] && [ -n "$maxwait" ] \ || { log "job_canwait requires the following to be set: gridid, maxwait"; return 1; } countfile=job.$gridid.lrms_job if [ ! -f "$countfile" ]; then echo "1" > "$countfile" || { log "cannot write count file: $PWD/$countfile"; return 1; } else count=$(head -n 1 "$countfile") || { log "cannot read count file: $PWD/$countfile"; return 1; } [ -z "$count" ] && { log "empty count file: $PWD/$countfile"; return 1; } dummy=$(echo "$count" | grep -v '[0-9]') && { log "not an integer in count file: $PWD/$countfile"; return 1; } [ "$count" -lt "$maxwait" ] || { rm -f "$countfile"; return 1; } echo "$(( $count + 1 ))" > "$countfile" || { log "cannot write count file: $PWD/$countfile"; return 1; } fi return 0 } # # Append .comment (containing STDOUT & STDERR of the job wrapper) to .errors # Input variables: # * uid # * sessiondir job_print_comment() { [ -n "$uid" ] && [ -n "$sessiondir" ] \ || { log "job_print_comment requires the following to be set: uid, sessiondir"; return 1; } commentfile=$sessiondir.comment [ -f "$commentfile" ] && do_as_uid "$uid" " echo '--------- Contents of output stream forwarded by $LRMS ------------' cat '$commentfile' echo '------------------------- End of output -------------------------' " || log "failed reading: $commentfile" } # In case overlimit is set, tweak what will go into .lrms_done set_overlimit_message() { [ -n "$overlimit" ] || return if [ $overlimit = "cputime" ]; then LRMSMessage="job killed: cput" elif [ $overlimit = "walltime" ]; then LRMSMessage="job killed: wall" elif [ $overlimit = "memory" ]; then LRMSMessage="job killed: vmem" else log "invalid value overlimit=$overlimit"; return 1 fi LRMSExitcode=271 } # # Input variables: # * gridid # * basedir # * exitcode # * LRMSExitcode # * LRMSMessage # * overlimit # job_write_donefile() { [ -n "$gridid" ] && [ -n "$basedir" ] && [ -n "$LRMS" ] \ || { log "job_write_donefile requires the following to be set: gridid, basedir, LRMS"; return 1; } set_overlimit_message if [ -n "$LRMSMessage" ] && [ "$LRMSExitcode" != 0 ]; then msg="$LRMSMessage" else if [ "$exitcode" = 0 ]; then if [ -z "$LRMSExitcode" ] || [ "$LRMSExitcode" = 0 ]; then msg= else msg="Job finished properly but $LRMS reported failure" fi elif [ -z "$exitcode" ]; then if [ "$LRMSExitcode" = 0 ]; then LRMSExitcode=-1; fi msg="Job was killed by $LRMS" else if [ "$LRMSExitcode" = 0 ]; then LRMSExitcode=-1; fi msg="Job failed with exit code $exitcode" fi fi log "${msg:-$LRMS job $lrmsid finished normally}" donefile=job.$gridid.lrms_done echo "${LRMSExitcode:--1} $msg" > $donefile || log "failed writing file: $PWD/$donefile" # wake up GM "${libexecdir}"/gm-kick "job.$gridid.local" } # # Should check that the job has exited lrms, and then do whatever post-processing is necesarry. # Called with STDOUT and STDERR redirected to the job.*.errors file. # Input variables: # * gridid # * lrmsid # * uid # process_job() { [ -n "$gridid" ] && [ -n "$lrmsid" ] && [ -n "$uid" ] && [ -n "$LRMS" ] \ || { log "process_job requires the following to be set: gridid, lrmsid, uid, LRMS"; return 1; } lrms_job_finished || return log "[$(date +%Y-%m-%d\ %T)] $LRMS job $lrmsid has exited" localfile=job.$gridid.local sessiondir=$(sed -n 's/^sessiondir=//p' "$localfile" | tail -n 1) [ -n "$sessiondir" ] || { log "failed reading sessiondir from: $PWD/$localfile"; return 1; } job_read_diag lrms_get_accounting if [ -z "$LRMSExitcode" ] && job_canwait; then : # Come back again next time else rm -f "$countfile" job_print_comment lrms_last_call job_write_diag job_write_donefile fi } scan_init () { [ -n "$basedir" ] || { log "basedir must be set"; exit 1; } [ -n "$LRMS" ] || { log "LRMS must be set"; exit 1; } LIST_IMPLEMENTED=yes ACCT_IMPLEMENTED=yes maxwait=5 wakeupperiod=60 trap 'sleep $wakeupperiod' EXIT TERM TMPDIR=${TMPDIR:-@tmp_dir@} export TMPDIR # default is shared sessiondirs if [ -z "$CONFIG_shared_filesystem" ]; then CONFIG_shared_filesystem=yes elif [ "$CONFIG_shared_filesystem" = 'no' ]; then CONFIG_shared_filesystem= fi } scan_main() { # Initial working directory myworkdir=$(pwd) || { log "pwd failed"; exit 1; } # Validate control directories supplied on command-line test -n "$1" || { log "control_dir not specified"; exit 1; } for ctr_dir in "$@"; do cd "$myworkdir" || { log "cannot cd to $myworkdir"; exit 1; } cd "$ctr_dir" || { log "erroneous control dir: $ctr_dir"; exit 1; } done for ctr_dir in "$@"; do cd "$myworkdir" || { log "cannot cd to $myworkdir"; exit 1; } cd "$ctr_dir" || { log "erroneous control dir: $ctr_dir"; exit 1; } # This perl script scans the 'processing' sub-directory for grid jobs # in INLRMS and CANCELING states. If not running as the superuser, also # filter out any jobs not belonging to the current user. Finally, # prints to STDOUT onle line for each job containing: # * grid ID, only the digits # * local ID, as in LRMS # * uid of owner of the job.*.local file listscript='use English; exit 1 unless opendir(DIR,"processing"); while (my $fname = readdir DIR) { my ($gridid, $lrmsid, $status); ($gridid) = ($fname =~ m/^job\.(\w+)\.status$/); next unless defined $gridid; next unless open(STATUS,"< processing/$fname"); $status = ; close STATUS; next unless $status and $status =~ m/^INLRMS|CANCELING$/; next unless open(LOCAL,"< job.$gridid.local"); my @stat = stat(LOCAL); { local $/=undef; ($lrmsid) = ( =~ m/^localid=(\d+)/m) }; close LOCAL; next unless $lrmsid; next unless @stat; next unless $EUID == 0 or $EUID == $stat[4]; print "$gridid $lrmsid $stat[4]\n"; } closedir DIR; ' # This perl script filters out from the output of the previous script # jobs whose lrms id is among the arguments passed to the script. filterscript='my $lrmsids = " @ARGV "; while(my $line = ) { chomp $line; my ($gridid,$lrmsid,$uid) = split / /, $line; next if $lrmsids =~ m/\s$lrmsid\s/; print "$gridid $lrmsid $uid\n"; } ' if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi lrmsids=$(lrms_list_jobs) || { log "lrms_list_jobs failed"; continue; } if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-condor-job, condor_q: $t" >> $perflogfile fi if [ -n "$LIST_IMPLEMENTED" ]; then filter_jobs() { /usr/bin/perl -we "$filterscript" $lrmsids; } else filter_jobs() { cat; } # no filtering fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi /usr/bin/perl -we "$listscript" | filter_jobs | while read gridid lrmsid uid; do log () { echo "$progname: $*" 1>&2; } donefile=job.$gridid.lrms_done [ -f "$donefile" ] && continue errorsfile=job.$gridid.errors [ -w "$errorsfile" ] || { log "cannot write to errors file at: $PWD/$errorsfile"; continue; } # run in separate process to make sure shell vars of one job # are not influencing other jobs ( process_job; ) >> "$errorsfile" 2>&1 done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-condor-job, ControlDirTraversalAndProcessing: $t" >> $perflogfile fi done } ################################### Condor #################################### lrms_list_jobs() { script='my $cmd="$ENV{CONDOR_BIN_PATH}/condor_q"; open Q, "$cmd|" or die "Failed running $cmd : $!\n"; my $out; { local $/; $out = ; }; close Q; exit 0 if $out =~ m/All queues are empty/; die "Non-zero exit status returned by $cmd\n" if $?; my @ids = ($out =~ m/^\s*(\d+)\.\d+\s+/mg); print "$_\n" for @ids; ' /usr/bin/perl -we "$script" } condor_read_history() { # This Perl script reads and prints a per-job condor history file. We need to use a # hash rather than printing the file directly because some attributes appear multiple # times and we need to use the last occurrence. condorscript='use strict; my %data; if (-e $ARGV[0]) { open(FILE, "<$ARGV[0]"); foreach my $line () { if ($line =~ /([\w\+]+)\s=\s(.*)/) { $data{$1} = $2; } } foreach my $key (keys %data) { print $key." = ".$data{$key}."\n"; } } ' hostname=`hostname -f` historydir=`$CONDOR_BIN_PATH/condor_config_val HISTORY` perjobhistorydir=`$CONDOR_BIN_PATH/condor_config_val PER_JOB_HISTORY_DIR` historyfile=$perjobhistorydir/history.$lrmsid.0 if ! expr match "$perjobhistorydir" '.*Not\sdefined.*' > /dev/null; then # per-job history files are being used, so we can immediately find the right file histstring=$( /usr/bin/perl -we "$condorscript" "$historyfile" ) fi if expr match "$perjobhistorydir" '.*Not\sdefined.*' > /dev/null | test ! -f $historyfile; then # find the appropriate history file historyfile=`grep "$hostname#$lrmsid.0" -l $historydir*` # try to get the full job classad if expr match "$historyfile" '.*condor.*' > /dev/null; then { histstring=$( $CONDOR_BIN_PATH/condor_history -l -file $historyfile -match 1 "$lrmsid" ); } 2>&1 else histstring="" fi fi # the awk expression checks that the input is more than 1 line long [ $? = 0 ] && echo "$histstring" | awk 'END{if(NR<2){exit 1}}' || return 1 # Extract information from condor_history output __RemoteHost=$(echo "$histstring" | sed -n 's/^LastRemoteHost *= *"\(.*\)"[^"]*$/\1/p') __WallTime=$(echo "$histstring" | sed -n 's/^RemoteWallClockTime *= *\([0-9][0-9]*\).*/\1/p') __KernelTime=$(echo "$histstring" | sed -n 's/^RemoteSysCpu *= *\([0-9][0-9]*\).*/\1/p') __UserTime=$(echo "$histstring" | sed -n 's/^RemoteUserCpu *= *\([0-9][0-9]*\).*/\1/p') __ImageSize=$(echo "$histstring" | sed -n 's/^ImageSize *= *//p') __ExitCode=$(echo "$histstring" | sed -n 's/^ExitCode *= *//p') ExitStatus=$(echo "$histstring" | sed -n 's/^ExitStatus *= *//p') JobStatus=$(echo "$histstring" | sed -n 's/^JobStatus *= *//p') ExitSignal=$(echo "$histstring" | sed -n 's/^ExitSignal *= *//p') RemoveReason=$(echo "$histstring" | sed -n 's/^RemoveReason *= *"\(.*\)"[^"]*$/\1/p') ExitReason=$(echo "$histstring" | sed -n 's/^ExitReason *= *"\(.*\)"[^"]*$/\1/p') JobCurrentStartDate=$(echo "$histstring" | sed -n 's/^JobCurrentStartDate *= *\([0-9][0-9]*\).*/\1/p') EnteredCurrentStatus=$(echo "$histstring" | sed -n 's/^EnteredCurrentStatus *= *\([0-9][0-9]*\).*/\1/p') RequestCpus=$(echo "$histstring" | sed -n 's/^RequestCpus *= *//p') echo "$RemoveReason" | grep -q 'PeriodicRemove .*evaluated to \(TRUE\)' [ $? = 0 ] && PeriodicRemove=TRUE return 0 } seconds() { /usr/bin/perl -e 'my $str = "'"$1"'"; exit unless $str =~ /(\d+) (\d\d):(\d\d):(\d\d)/; printf "%.0f", ( $1 * 24 + $2 ) * 3600 + $3 * 60 + $4; ' } find_in_file() { file=$1; regex=$2; grep "$regex" "$file" | tail -n 1 | sed -n "s/\(.*\)$regex\(.*\)/\2/ip"; } condor_read_log() { # Find the Condor log. gramifile=job.$gridid.grami [ -f "$gramifile" ] || { log "grami file not found: $PWD/$gramifile"; return 1; } condor_log=$(sed -n 's/^condor_log=//p' "$gramifile" | tail -n 1) [ -n "$condor_log" ] || { log "condor_log not set in grami file: $PWD/$gramifile"; return 1; } log "condor log is at: $condor_log" [ -r "$condor_log" ] || { log "Condor log file not readable: $condor_log"; return 1; } # Parse condor log. Look for lines like: # (return value 0) # Image size of job updated: 692632 # Usr 0 00:37:09, Sys 0 00:00:04 - Total Remote Usage # Job executing on host: <129.240.86.70:32769> _RemoteHost=$( find_in_file "$condor_log" 'Job executing on host: *<\([^:>]*\)' ) _UserTime=$( find_in_file "$condor_log" 'Usr \([0-9][0-9]* [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\).*Total Remote Usage' ) _KernelTime=$( find_in_file "$condor_log" 'Sys \([0-9][0-9]* [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\).*Total Remote Usage' ) _ImageSize=$(find_in_file "$condor_log" 'Image size of job updated: \([0-9][0-9]*\)' ) _ExitCode=$( find_in_file "$condor_log" '(return value \([0-9][0-9]*\))' ) _UserTime=$(seconds "$_UserTime") _KernelTime=$(seconds "$_KernelTime") } lrms_get_accounting() { condor_read_history || { log "Job has exited but is not yet listed by condor_history"; return 1; } # set LRMSExitcode to signal that no more tries are necessary LRMSExitcode=-1 } lrms_last_call() { condor_read_log && { # override values read from .diag with those from condor log nodename=${_RemoteHost:-$nodename} UserTime=${_UserTime:-$UserTime} KernelTime=${_KernelTime:-$KernelTime} TotalMemory=${_ImageSize:-$TotalMemory} echo "$progname: ----- begin condor log ($condor_log) -----" cat "$condor_log" echo "$progname: ----- end condor log ($condor_log) -----" echo "$progname: ----- Information extracted from Condor log -----" [ -n "$_RemoteHost" ] && echo "$progname: RemoteHost=$_RemoteHost" [ -n "$_UserTime" ] && echo "$progname: UserTime=$_UserTime" [ -n "$_KernelTime" ] && echo "$progname: KernelTime=$_KernelTime" [ -n "$_ImageSize" ] && echo "$progname: ImageSize=$_ImageSize" [ -n "$_ExitCode" ] && echo "$progname: ExitCode=$_ExitCode" echo "$progname: -------------------------------------------------" } if [ -z "$LRMSExitcode" ]; then log "$progname: No condor_history for Condor ID $lrmsid" else # override with values from condor_history nodename=${__RemoteHost:-$nodename} WallTime=${__WallTime:-$WallTime} UserTime=${__UserTime:-$UserTime} KernelTime=${__KernelTime:-$KernelTime} TotalMemory=${__ImageSize:-$TotalMemory} echo "$progname: ----- begin condor history message -----" echo "$histstring" echo "$progname: ----- end condor history message -----" echo "$progname: ----- Information extracted from condor_history -----" [ -n "$__RemoteHost" ] && echo "$progname: LastRemoteHost=$__RemoteHost" [ -n "$__WallTime" ] && echo "$progname: RemoteWallClockTime=$__WallTime" [ -n "$__UserTime" ] && echo "$progname: RemoteUserCpu=$__UserTime" [ -n "$__KernelTime" ] && echo "$progname: RemoteSysCpu=$__KernelTime" [ -n "$__ImageSize" ] && echo "$progname: ImageSize=$__ImageSize" [ -n "$__ExitCode" ] && echo "$progname: ExitCode=$__ExitCode" [ -n "$ExitStatus" ] && echo "$progname: ExitStatus=$ExitStatus" [ -n "$JobStatus" ] && echo "$progname: JobStatus=$JobStatus" [ -n "$ExitSignal" ] && echo "$progname: ExitSignal=$ExitSignal" [ -n "$RemoveReason" ] && echo "$progname: RemoveReason=$RemoveReason" [ -n "$JobCurrentStartDate" ] && echo "$progname: JobCurrentStartDate=$JobCurrentStartDate" [ -n "$EnteredCurrentStatus" ] && echo "$progname: EnteredCurrentStatus=$EnteredCurrentStatus" [ -n "$ExitReason" ] && echo "$progname: ExitReason=$ExitReason" [ -n "$RequestCpus" ] && echo "$progname: RequestCpus=$RequestCpus" echo "$progname: -----------------------------------------------------" if [ -n "$JobCurrentStartDate" ]; then date_seconds_to_utc "$JobCurrentStartDate" seconds_to_mds_date "$return_date_seconds" LRMSStartTime=$return_mds_date echo "$progname: LRMSStartTime=$LRMSStartTime" fi if [ -n "$EnteredCurrentStatus" ]; then date_seconds_to_utc "$EnteredCurrentStatus" seconds_to_mds_date "$return_date_seconds" LRMSEndTime=$return_mds_date echo "$progname: LRMSEndTime=$LRMSEndTime" fi fi LRMSExitcode=${__ExitCode:-$_ExitCode} # set LRMSExitcode to signal that no more tries are necessary [ -n "$LRMSExitcode" ] || log "ExitCode not found in condor log and condor_history" # set message in case condor killed the job. LRMSExitcode should not be 0. if [ -n "$PeriodicRemove" ]; then [ "$LRMSExitcode" = 0 ] && LRMSExitcode= LRMSMessage="PeriodicRemove evaluated to TRUE" elif [ -n "$RemoveReason" ] && [ "$RemoveReason" != "None" ]; then [ "$LRMSExitcode" = 0 ] && LRMSExitcode= LRMSMessage="RemoveReason: $RemoveReason" elif [ -n "$ExitReason" ] && [ "$ExitReason" != "None" ]; then [ "$LRMSExitcode" = 0 ] && LRMSExitcode= LRMSMessage="ExitReason: $ExitReason" fi # Check whether the job was killed by Condor. If yes, check for exceeded resources limits if ( [ -n "$RemoveReason" ] && [ "$RemoveReason" != "None" ] ) || [ -n "$PeriodicRemove" ]; then read_grami; autodetect_overlimit fi # Condor does not write a .diag file. exitcode=$LRMSExitcode } scan_init scan_main "$@" exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/PaxHeaders.7502/README0000644000000000000000000000012311016612002023730 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.570727 30 ctime=1513200663.390788956 nordugrid-arc-5.4.2/src/services/a-rex/lrms/condor/README0000644000175000002070000000003011016612002023767 0ustar00mockbuildmock00000000000000Condor control scripts. nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/config_parser.sh0000644000000000000000000000012411542336027024760 xustar000000000000000027 mtime=1300872215.906494 27 atime=1513200576.603727 30 ctime=1513200663.300787856 nordugrid-arc-5.4.2/src/services/a-rex/lrms/config_parser.sh0000644000175000002070000000703611542336027025033 0ustar00mockbuildmock00000000000000############################################## # Configuration parser module. # Requires a POSIX shell and perl ############################################## # # Synopsis: # # . config_parser.sh # # config_parse_file /etc/arc/arex.xml || exit 1 # config_import_section common # config_import_section grid-manager # config_import_section infosys # # set | grep CONFIG_ # # config_match_section queue/short || echo No such queue # # port=$(config_print_option gridftpd port) # # for name in config_subsections queue; do # echo Found section: queue/$name # fi # ############################################## # # Parse the config file given as an argument # config_parse_file() { arex_conf=$1 if [ -z "$arex_conf" ]; then echo 'config_parser: No config file given!' 1>&2 return 1 elif [ ! -r "$arex_conf" ]; then echo "config_parser: Cannot read config file: $arex_conf" 1>&2 return 1 fi if [ -z "$pkgdatadir" ]; then echo "pkgdatadir must be set" 1>&2; return 1; fi config=`/usr/bin/perl -I$pkgdatadir -MConfigCentral -we 'ConfigCentral::printLRMSConfigScript($ARGV[0])' "$arex_conf"` || return $? eval "$config" || return $? unset config return 0 } # # Imports a section of the config file into shell variables. # Option names from will be prefixed with CONFIG_ # config_import_section() { block=$1 i=0 if [ -z "$_CONFIG_NUM_BLOCKS" ]; then return 1; fi while [ $i -lt $_CONFIG_NUM_BLOCKS ]; do i=$(($i+1)) eval name="\$_CONFIG_BLOCK${i}_NAME" if [ "x$block" != "x$name" ]; then continue; fi eval num="\$_CONFIG_BLOCK${i}_NUM" if [ -z "$num" ]; then return 1; fi j=0 while [ $j -lt $num ]; do j=$(($j+1)) eval name="\$_CONFIG_BLOCK${i}_OPT${j}_NAME" if [ -z "$name" ]; then return 1; fi eval "CONFIG_$name=\$_CONFIG_BLOCK${i}_OPT${j}_VALUE" done return 0 done return 1 } config_print_option() { block=$1 opt=$2 i=0 if [ -z "$_CONFIG_NUM_BLOCKS" ]; then return 1; fi while [ $i -lt $_CONFIG_NUM_BLOCKS ]; do i=$(($i+1)) eval name="\$_CONFIG_BLOCK${i}_NAME" if [ "x$block" != "x$name" ]; then continue; fi eval num="\$_CONFIG_BLOCK${i}_NUM" if [ -z "$num" ]; then return 1; fi j=0 val= while [ $j -lt $num ]; do j=$(($j+1)) eval name="\$_CONFIG_BLOCK${i}_OPT${j}_NAME" if [ -z "$name" ]; then return 1; fi if [ "x$name" = "x$opt" ]; then eval "val=\$_CONFIG_BLOCK${i}_OPT${j}_VALUE" fi done echo -n "$val" [ -n "$val" ] && return 0 done return 1 } config_match_section() { block=$1 i=0 if [ -z "$_CONFIG_NUM_BLOCKS" ]; then return 1; fi while [ $i -lt $_CONFIG_NUM_BLOCKS ]; do i=$(($i+1)) eval name="\$_CONFIG_BLOCK${i}_NAME" if [ "x$block" = "x$name" ]; then return 0; fi done return 1 } config_subsections() { block=$1 i=0 if [ -z "$_CONFIG_NUM_BLOCKS" ]; then return 1; fi { while [ $i -lt $_CONFIG_NUM_BLOCKS ]; do i=$(($i+1)) eval name="\$_CONFIG_BLOCK${i}_NAME" tail=${name#$block/} if [ "x$name" != "x$tail" ]; then echo ${tail%%/*}; fi done } | sort -u } config_hide_all() { unset `set|cut -f1 -d=|grep '^CONFIG_[A-Za-z0-9_]*$'` } config_reset() { config_hide_all unset `set|cut -f1 -d=|grep '^_CONFIG_[A-Za-z0-9_]*$'` } config_destroy() { config_reset unset config_parse_file unset config_import_section unset config_match_section unset config_subsections unset config_hide_all unset config_reset unset config_destroy } nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/lsf0000644000000000000000000000013213214316027022305 xustar000000000000000030 mtime=1513200663.420789323 30 atime=1513200668.717854109 30 ctime=1513200663.420789323 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/0000755000175000002070000000000013214316027022430 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306024424 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200603.234053216 30 ctime=1513200663.415789262 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/Makefile.am0000644000175000002070000000014612047045306024467 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-lsf-env.sh pkgdata_SCRIPTS = scan-lsf-job submit-lsf-job cancel-lsf-job nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733024432 xustar000000000000000030 mtime=1513200603.270053656 30 atime=1513200650.461630828 30 ctime=1513200663.416789274 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/Makefile.in0000644000175000002070000005003113214315733024477 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/lsf DIST_COMMON = README $(dist_pkgdata_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/cancel-lsf-job.in \ $(srcdir)/scan-lsf-job.in $(srcdir)/submit-lsf-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-lsf-job cancel-lsf-job scan-lsf-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(dist_pkgdata_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = configure-lsf-env.sh pkgdata_SCRIPTS = scan-lsf-job submit-lsf-job cancel-lsf-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/lsf/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/lsf/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-lsf-job: $(top_builddir)/config.status $(srcdir)/submit-lsf-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-lsf-job: $(top_builddir)/config.status $(srcdir)/cancel-lsf-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-lsf-job: $(top_builddir)/config.status $(srcdir)/scan-lsf-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_pkgdataDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/PaxHeaders.7502/submit-lsf-job.in0000644000000000000000000000012712733561146025564 xustar000000000000000027 mtime=1466884710.220101 30 atime=1513200650.478631036 30 ctime=1513200663.420789323 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/submit-lsf-job.in0000755000175000002070000003374112733561146025641 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -xv # # Submits job to LSF # Input: path to grami file (same as Globus). # # A temporary job script is created for the submission and then removed # at the end of this script. echo "----- starting submit_lsf_job -----" 1>&2 joboption_lrms=lsf # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" ############################################################## # Set LSF specific environment. # relies on $joboption_queue # ############################################################## . ${pkgdatadir}/configure-lsf-env.sh || exit $? . ${pkgdatadir}/submit_common.sh || exit $? ############################################################## # Parse grami file, read arc config ############################################################## perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi init $1 read_arc_conf # enforce this #RUNTIME_FRONTEND_SEES_NODE='yes' #RUNTIME_NODE_SEES_FRONTEND='yes' #TODO should this be specified here? joboption_localtransfer='no' ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create temp job script ############################################################## mktempscript ############################################################## # Start job script ############################################################## LSF_BSUB='bsub' LSF_BPARAMS='bparams' if [ ! -z "$LSF_BIN_PATH" ] ; then LSF_BSUB=${LSF_BIN_PATH}/${LSF_BSUB} LSF_BPARAMS=${LSF_BIN_PATH}/${LSF_BPARAMS} fi echo "#! /bin/bash" > $LRMS_JOB_SCRIPT echo "#LSF batch job script built by grid-manager" >> $LRMS_JOB_SCRIPT echo "#" >> $LRMS_JOB_SCRIPT # Specify the bash shell as default #echo "#BSUB -L /bin/bash" >> $LRMS_JOB_SCRIPT # Write output to comment file: echo "#BSUB -oo ${joboption_directory}.comment" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT # Choose queue(s). if [ ! -z "${joboption_queue}" ] ; then echo "#BSUB -q $joboption_queue" >> $LRMS_JOB_SCRIPT fi if [ ! -z "${joboption_rsl_architecture}" ] ; then queuearch=`echo ${joboption_rsl_architecture}|sed 's/\"//g'` echo "#BSUB -R type=${queuearch}" >> $LRMS_JOB_SCRIPT else if [ ! -z $CONFIG_lsf_architecture ] ; then echo "#BSUB -R type=$CONFIG_lsf_architecture" >> $LRMS_JOB_SCRIPT fi fi # Project name for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "#BSUB -P $joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#BSUB -J $jobname" >> $LRMS_JOB_SCRIPT fi echo "LSF jobname: $jobname" 1>&2 ############################################################## # (non-)parallel jobs ############################################################## set_count ############################################################## # parallel jobs ############################################################## echo "#BSUB -n $joboption_count" >> $LRMS_JOB_SCRIPT # parallel structure if [ ! -z $joboption_countpernode ] && [ $joboption_countpernode != '-1' ] ; then echo "#BSUB -R span[ptile=$joboption_countpernode]" >> $LRMS_JOB_SCRIPT fi # exclusive execution if [ "$joboption_exclusivenode" = "true" ]; then echo "#BSUB -x" >> $LRMS_JOB_SCRIPT fi ############################################################## # Execution times (obtained in seconds) ############################################################## #OBS: Assuming here that LSB_JOB_CPULIMIT=y or is unset. if [ -n "$joboption_cputime" ] && [ $joboption_cputime -gt 0 ]; then cputime=$(( ${joboption_cputime} / 60 )) echo "#BSUB -c ${cputime}" >> $LRMS_JOB_SCRIPT fi if [ -n "$joboption_walltime" ] && [ $joboption_walltime -gt 0 ] ; then walltime=$(( ${joboption_walltime} / 60 )) echo "#BSUB -W ${walltime}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem #-M is memory limit per process in LSF, so no need to modify memory limit based on count. if [ ! -z "$joboption_memory" ]; then memory=$(( ${joboption_memory} * 1024 )) echo "#BSUB -M ${memory}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Start Time ############################################################## if [ -n "$joboption_starttime" ] ; then echo "#BSUB -b ${joboption_starttime}" >> $LRMS_JOB_SCRIPT fi ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #first we must parse the max priority maxprio=`bparams -a| grep MAX_USER_PRIORITY | cut -f 2 -d '=' | cut -f 2 -d ' '` #scale priority LSF: 1 -> MAX_USER_PRIORITY ARC: 0-100 if [ ! -z "$maxprio" ]; then if [ "$maxprio" -gt "0" ]; then priority=$((joboption_priority * ($maxprio - 1) / 100 +1)) echo "#BSUB -sp ${priority}" >> $LRMS_JOB_SCRIPT fi fi fi ############################################################## # Override umask ############################################################## echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existence of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error.">>"$failures_file" exit 1 fi if [ ! "$joboption_localtransfer" = 'yes' ] ; then program_start=`echo ${joboption_arg_0} | head -c 1 2>&1` if [ "$program_start" != '$' ] && [ "$program_start" != '/' ] ; then if [ ! -f $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable does not exist, or permission denied.' 1>&2 echo " Executable $joboption_directory/${joboption_arg_0}" 1>&2 echo " whoami: "`whoami` 1>&2 echo " ls -l $joboption_directory/${joboption_arg_0}: "`ls -l $joboption_directory/${joboption_arg_0}` exit 1 fi if [ ! -x $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable is not executable' 1>&2 exit 1 fi fi fi ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then setup_local_transfer fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Download input files #################################################### download_input_files ##################################################### # Go to working dir and start job #################################################### echo "# Changing to session directory" >> $LRMS_JOB_SCRIPT echo "cd \$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT echo "export HOME=\$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 #extra checks if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then echo "Nodes detached from gridarea are not supported when LSF is used. Aborting job submit" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi gate_host=`uname -n` if [ -z "$gate_host" ] ; then echo "Can't get own hostname" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## configure_runtime ##################################################### # Upload output files #################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then echo "UPLOADER=\${UPLOADER:-$basedir/uploader}" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ "$RESULT" = '0' ] ; then $UPLOADER -p -c 'local' "$RUNTIME_CONTROL_DIR" "$RUNTIME_JOB_DIR" 2>>${RUNTIME_CONTROL_DIR}/job.local.errors if [ $? -ne '0' ] ; then echo 'ERROR: Uploader failed.' 1>&2 if [ "$RESULT" = '0' ] ; then RESULT=1 ; fi fi fi rm -f "${RUNTIME_CONTROL_DIR}/job.local.proxy" EOSCR fi ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ####################################### # Submit the job ####################################### # Execute bsub command cd "$joboption_directory" #chmod 0755 $LRMS_JOB_SCRIPT # We make the assumption that $joboption_directory is locally available according to the requirements of any arc installation echo "----------------- BEGIN job script -----" 1>&2 cat $LRMS_JOB_SCRIPT 1>&2 echo "----------------- END job script -----" 1>&2 if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-lsf-job, JobScriptCreation: $t" >> $perflogfilesub fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${LSF_BSUB} < $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR LSF_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-lsf-job, JobSubmission: $t" >> $perflogfilesub fi if [ $LSF_RESULT -eq '0' ] ; then job_id=`cat $LRMS_JOB_OUT | awk '{split($0,field," ");print field[2]}' | sed 's/[<>]//g'` if [ "${job_id}" = "" ] ; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the jobid for the job!" 1>&2 else echo "joboption_jobid=$job_id" >> $arg_file echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_lsf_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from qsub!" 1>&2 fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_lsf_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/PaxHeaders.7502/cancel-lsf-job.in0000644000000000000000000000012711562710611025476 xustar000000000000000027 mtime=1305186697.469298 30 atime=1513200650.495631244 30 ctime=1513200663.418789299 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/cancel-lsf-job.in0000755000175000002070000000120211562710611025536 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in LSF. # echo "----- starting cancel_lsf_job -----" 1>&2 joboption_lrms=lsf # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? LSF_BKILL='bkill' if [ ! -z "$LSF_BIN_PATH" ] ; then LSF_BKILL="${LSF_BIN_PATH}/${LSF_BKILL} -s 9 " fi echo "executing ${LSF_BKILL} with job id ${joboption_jobid}" 1>&2 $LSF_BKILL $joboption_jobid echo "----- exiting cancel_lsf_job -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/PaxHeaders.7502/scan-lsf-job.in0000644000000000000000000000012712721014037025172 xustar000000000000000027 mtime=1464080415.207124 30 atime=1513200650.510631427 30 ctime=1513200663.419789311 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/scan-lsf-job.in0000755000175000002070000001527012721014037025244 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # # Scan for finished LSF jobs, using bjobs # # usage: scan_lsf_job control_dir ... # Set variables: # LSF_BIN_PATH # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? libexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@/" pkgdatadir="$basedir" # Assume that gm-kick is installed in the same directory GMKICK=${libexecdir}/gm-kick . "${pkgdatadir}/configure-lsf-env.sh" || exit $? . "${pkgdatadir}/scan_common.sh" || exit $? umask 022 if [ -z "$1" ] ; then echo "Missing Input Script file as arg1" 1>&2 exit 1 ; fi # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi # first control_dir is used for storing own files echo `date`" : control_dir=$1" 1>&2 control_dir=$1 control_dirs= while [ $# -gt 0 ] ; do control_dirs="${control_dirs} $1" shift done my_id=`id -u` # Get all running jobs # if [ -z ${LSF_BIN_PATH} ]; then echo "${LSF_BIN_PATH} not set" 1>&2 exit 1 fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi lsf_stat=`${LSF_BIN_PATH}/bjobs -a -u all 2>/dev/null` # | grep RUN | grep '^ [:digit:]' if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-lsf-job, bjobs -a -u all: $t" >> $perflogfile fi if [ -z "${lsf_stat}" ] ; then echo "bjobs returned empty result" 1>&2 fi pids=`echo "${lsf_stat}" | egrep 'PSUSP|USUSP|SSUSP|RUN|PEND' | sed -e 's/^\([^ ]*\).*/\1/'` eval "set -- $control_dirs" # Go through directories for ctr_dir in $control_dir ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Obtain ids stored in job.*.local ids=`find ${ctr_dir}/processing -name 'job.*.status' -print0 \ | sed 's/processing\/job\.\([^\.]*\)\.status/job.\1.local/g' \ | xargs -0 grep -h "^localid=" 2>/dev/null | sed 's/^localid=\([0-9]*\).*/\1/'` if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-lsf-job, ControlDirTraversal: $t" >> $perflogfile fi if [ -z "$ids" ] ; then continue ; fi # compare them to running jobs and find missing bids= for id in $ids ; do found=`echo "$pids" | grep "^$id$"` if [ -z "$found" ] ; then bids="$bids $id" fi done if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi done_count=0 total_count=0 # go through missing ids for id in $bids ; do # find grid job corresponding to curent local id jobfile=`find ${ctr_dir}/processing -name 'job.*.status' -print0 \ | sed 's/processing\/job\.\([^\.]*\)\.status/job.\1.local/g' \ | xargs -0 grep -F -l "localid=$id" 2>/dev/null` if [ -z "$jobfile" ] ; then continue ; fi total_count=$(( total_count + 1 )) # extract grid id gridid=`basename "$jobfile" '.local' | sed 's/^job\.//'` donefile="${ctr_dir}/job.${gridid}.lrms_done" if [ -f "$donefile" ] ; then continue ; fi statusfile="${ctr_dir}/processing/job.${gridid}.status" if [ ! -f "$statusfile" ] ; then continue ; fi status=`cat "$statusfile"` if [ "$status" != "INLRMS" ] && [ "$status" != "CANCELING" ] ; then continue ; fi if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } # get session directory of this job sessiondir=`grep -h '^sessiondir=' "$jobfile" | sed 's/^sessiondir=\(.*\)/\1/'` # get job specific output and remove header bjobs_output="`${LSF_BIN_PATH}/bjobs -W -w $id 2>/dev/null | sed -e'1,1d'`" job_status="`echo $bjobs_output | awk '{print $3}'`" # DONE if exit_code is 0, EXIT if non zero if [ "${job_status}" = "DONE" ] || [ "${job_status}" = "EXIT" ]; then job_read_diag starttime="`echo $bjobs_output | awk '{print $14}' | sed 's/-/ /g'`" endtime="`echo $bjobs_output | awk '{print $15}' | sed 's/-/ /g'`" date_to_utc_seconds "$starttime" starttime_seconds="$return_date_seconds" seconds_to_mds_date "$return_date_seconds" LRMSStartTime=$return_mds_date date_to_utc_seconds "$endtime" endtime_seconds="$return_date_seconds" seconds_to_mds_date "$return_date_seconds" LRMSEndTime=$return_mds_date #TODO handle cputime (walltime * count?) etc. walltime=$(( $endtime_seconds - $starttime_seconds)) #cputime=$(( $walltime * $count)) # Values to write to diag. These will override values already written. [ -n "$walltime" ] && WallTime=$walltime #[ -n "$cputime" ] && UserTime=$cputime #[ -n "$cputime" ] && KernelTime=0 job_write_diag done_count=$(( done_count + 1 )) fi if [ -n "$sessiondir" ] ; then # have chance to obtain exit code diagfile="${sessiondir}.diag" if [ -n "$sessiondir" ] ; then # have chance to obtain exit code exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') fi if [ -n "$exitcode" ] ; then # job finished and exit code is known save_commentfile "$uid" "${sessiondir}.comment" "${ctr_dir}/job.${gridid}.errors" echo "$exitcode Executable finished with exit code $exitcode" > "$donefile" ${GMKICK} "$jobfile" continue fi fi # job has probaly finished and exit code is not known exitcode='-1' countfile="${ctr_dir}/job.${gridid}.lrms_job" counter=0 if [ -f "$countfile" ] ; then counter=`cat "$countfile"` counter=$(( $counter + 1 )) fi if [ "$counter" -gt 5 ] ; then rm -f "$countfile" save_commentfile "$uid" "${sessiondir}.comment" "${ctr_dir}/job.${gridid}.errors" echo "$exitcode Job was lost with unknown exit code" > "$donefile" ${GMKICK} "$jobfile" else echo "$counter" > "$countfile" fi done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-lsf-job, JobProcessing, T=$total_count D=$done_count: $t" >> $perflogfile fi # go through existing ids for id in $pids ; do # find grid job corresponding to curent local id jobfile=`find ${ctr_dir} -name 'job.*.local' -print0 | xargs -0 grep -F -l "localid=$id." 2>/dev/null` if [ -z "$jobfile" ] ; then continue ; fi gridid=`basename "$jobfile" '.local' | sed 's/^job\.//'` countfile="${ctr_dir}/job.${gridid}.lrms_job" # reset failure counter rm -f "$countfile" done done sleep 60 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/PaxHeaders.7502/configure-lsf-env.sh0000644000000000000000000000012313044140007026241 xustar000000000000000027 mtime=1485881351.109312 27 atime=1513200576.571727 29 ctime=1513200663.41478925 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/configure-lsf-env.sh0000755000175000002070000000236313044140007026316 0ustar00mockbuildmock00000000000000# # set environment variables: # LSF_BIN_PATH # CONFIG_lsf_architecture # ############################################################## # Reading configuration from $ARC_CONFIG ############################################################## if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser_compat.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" config_import_section "cluster" # Also read queue section if [ ! -z "$joboption_queue" ]; then config_import_section "queue/$joboption_queue" fi # performance logging: if perflogdir or perflogfile is set, logging is turned on. So only set them when enable_perflog_reporting is ON unset perflogdir unset perflogfile enable_perflog=${CONFIG_enable_perflog_reporting:-no} if [ "$CONFIG_enable_perflog_reporting" == "expert-debug-on" ]; then perflogdir=${CONFIG_perflogdir:-/var/log/arc/perfdata} perflogfile="${perflogdir}/backends.perflog" fi # Path to LSF commands LSF_BIN_PATH=$CONFIG_lsf_bin_path if [ ! -d ${LSF_BIN_PATH} ] ; then echo "Could not set LSF_BIN_PATH." 1>&2 exit 1 fi nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/PaxHeaders.7502/README0000644000000000000000000000012311016612002023230 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.570727 30 ctime=1513200663.413789238 nordugrid-arc-5.4.2/src/services/a-rex/lrms/lsf/README0000644000175000002070000000002511016612002023273 0ustar00mockbuildmock00000000000000LSF control scripts. nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/pbs0000644000000000000000000000013213214316027022305 xustar000000000000000030 mtime=1513200663.335788284 30 atime=1513200668.717854109 30 ctime=1513200663.335788284 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/0000755000175000002070000000000013214316027022430 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306024424 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200603.287053864 30 ctime=1513200663.331788235 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/Makefile.am0000644000175000002070000000014112047045306024462 0ustar00mockbuildmock00000000000000pkgdata_DATA = configure-pbs-env.sh pkgdata_SCRIPTS = submit-pbs-job cancel-pbs-job scan-pbs-job nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733024432 xustar000000000000000030 mtime=1513200603.322054292 30 atime=1513200650.527631635 30 ctime=1513200663.332788247 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/Makefile.in0000644000175000002070000005023213214315733024502 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/pbs DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/cancel-pbs-job.in $(srcdir)/configure-pbs-env.sh.in \ $(srcdir)/scan-pbs-job.in $(srcdir)/submit-pbs-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-pbs-job cancel-pbs-job scan-pbs-job \ configure-pbs-env.sh CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(pkgdata_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkgdata_DATA = configure-pbs-env.sh pkgdata_SCRIPTS = submit-pbs-job cancel-pbs-job scan-pbs-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/pbs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/pbs/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-pbs-job: $(top_builddir)/config.status $(srcdir)/submit-pbs-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-pbs-job: $(top_builddir)/config.status $(srcdir)/cancel-pbs-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-pbs-job: $(top_builddir)/config.status $(srcdir)/scan-pbs-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ configure-pbs-env.sh: $(top_builddir)/config.status $(srcdir)/configure-pbs-env.sh.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-pkgdataDATA: $(pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataDATA install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-pkgdataDATA uninstall-pkgdataSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/PaxHeaders.7502/scan-pbs-job.in0000644000000000000000000000012712721014037025172 xustar000000000000000027 mtime=1464080415.207124 30 atime=1513200650.579632271 30 ctime=1513200663.334788271 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/scan-pbs-job.in0000644000175000002070000003704112721014037025241 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Periodically read log files of PBS and put mark files # for job, which finished. # If log files are not available scan for finished (absent) jobs # in PBS and put mark files for job, which finished. # # usage: scan_pbs_job control_dir ... # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi if [ -z "$1" ] ; then exit 1 ; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? libexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@/" pkgdatadir="$basedir" . ${pkgdatadir}/configure-pbs-env.sh || exit $? # Assume that gm-kick and scan_common is installed in the same directory GMKICK=${libexecdir}/gm-kick . "${pkgdatadir}/scan_common.sh" || exit $? # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi # Where to store temporary files TMPDIR=${TMPDIR:-@tmp_dir@} # directory containing PBS server logs pbs_log_dir=${CONFIG_pbs_log_path:-@pbs_log_path@} RUNTIME_NODE_SEES_FRONTEND=$CONFIG_shared_filesystem #default is NFS if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then RUNTIME_NODE_SEES_FRONTEND=yes fi # locally empty means no if [ "${RUNTIME_NODE_SEES_FRONTEND}" = 'no' ] ; then RUNTIME_NODE_SEES_FRONTEND= fi # first control_dir is used for storing own files control_dir=$1 control_dirs= while [ $# -gt 0 ] ; do control_dirs="${control_dirs} \"$1\"" shift done my_id=`id -u` state_file=$control_dir/pbs_log_scan.`id -un` lines=`cat "$state_file" 2>/dev/null` ldt=`echo $lines | awk '{split($0,field," ");print field[1]}' ` lines=`echo $lines | awk '{split($0,field," ");print field[2]}'` lines_skip=$(( $lines + 0 )) ldate=$(( $ldt + 0 )) if [ -z "$lines_skip" ] ; then lines_skip='0' ; fi if [ -z "$ldate" ] ; then ldate='0' ; fi whole_line= find_by_local() { eval "set -- $control_dirs" for ctr_dir in "$@"; do find ${ctr_dir}/processing -name 'job.*.status' -print0 \ | sed 's/processing\/job\.\([^\.]*\)\.status/job.\1.local/g' \ | xargs -0 grep -F -l $whole_line "localid=$job_id" 2>/dev/null done \ | head -n 1 } find_by_grami() { eval "set -- $control_dirs" for ctr_dir in "$@"; do find ${ctr_dir}/processing -name 'job.*.status' -print0 \ | sed 's/processing\/job\.\([^\.]*\)\.status/job.\1.grami/g' \ | xargs -0 grep -F -l $whole_line "joboption_jobid=$job_id" 2>/dev/null done \ | sed 's/\.grami$/.local/' \ | head -n 1 } # set_job_vars takes a line from pbs logs and splits it, returning information # in pbs_date, pbs_code, pbs_server, pbs_job, job_id, job_message and rest_line set_job_vars() { pbs_date=$1 pbs_code=$2 pbs_server=$3 pbs_job=$4 job_id=$5 job_message=$6 rest_line=$7 } # # Main function for processing one PBS log. # Extracts log lines with code 0010 (job exited) and 0008 (job killed) # # TODO this should be split into smaller functions process_log_file () { eval "set -- $control_dirs" #we grep for finished jobs, then use sed to remove already processed lines #OBS: deleted jobs have a 0008 message with not much info in it. A 0010 # message may follow (or not) with full usage stats. By this time the # job has already been processed, so this info is ignored! #TODO: make log scanning more intelligent. exited_killed_jobs=`egrep '^[^;]*;0010;[^;]*;Job;|^[^;]*;0008;[^;]*;Job;[^;]*;Exit_status=|^[^;]*;0008;[^;]*;Job;[^;]*;Job deleted' ${lname} | tail -n+$(( $lines_skip + 1 ))` #TODO should we add processed lines before jobs have actually been processed? What if the last job only has half a record? new_lines=`echo "$exited_killed_jobs" | wc -l` # new_lines set to 1 when string is empty, should have been 0 [ "x$exited_killed_jobs" = x ] && continue lines_processed=$(( $lines_skip + $new_lines )) if [ "$lines_processed" -lt '0' ] ; then lines_processed=0; fi echo "$cname $lines_processed"> $state_file exited_killed_jobs=`echo "$exited_killed_jobs" | sort -u` # force word splitting to happen only on newlines old_IFS=$IFS; IFS=' ' for job in $exited_killed_jobs; do # Split line into fields by forcing word splitting to happen on ";" IFS=";" set_job_vars $job IFS=$old_IFS # Try to extract exit code of PBS (note: if executable fails it's code goes to PBS) exit_code=`echo "$job_message" | sed -n 's/^.*Exit_status=\([-0-9]*\).*/\1/p'` # Check if job has suffix echo "$job_id" | grep -q -F . if [ ! $? = '0' ] ; then whole_line=-x else job_id=`echo "$job_id" | awk '{split($0,field,".");print field[1]"."field[2]}'` whole_line= fi # look for this id in job.ID.local, then in job.ID.grami name=`find_by_local` if [ -z "$name" ]; then name=`find_by_grami` if [ -z "$name" ]; then continue; fi fi if [ "$my_id" != '0' ] ; then if [ ! -O "$name" ] ; then continue ; fi fi uid=$(get_owner_uid "$name") [ -z "$uid" ] && { log "Failed to stat $name"; continue; } base_name=`echo "$name" 2>/dev/null | sed -n 's/\.local$//p'` if [ -z "${base_name}" ] ; then continue ; fi # check if job already reported if [ -f "${base_name}.lrms_done" ] ; then continue ; fi statusfile=`echo "$name" 2>/dev/null | sed -n 's/job\.\([^\.]*\)\.local$/processing\/job.\1.status/p'` # more protection - check if grid-manager thinks job is still running egrep 'INLRMS|SUBMIT|CANCELING' "$statusfile" >/dev/null 2>&1 if [ ! $? = '0' ] ; then continue ; fi # So far only PBS exit code is available # It would be nice to have exit code of main executable exitcode='' # get session directory of this job sessiondir=`grep -h '^sessiondir=' "${base_name}.local" | sed 's/^sessiondir=\(.*\)/\1/'` diagfile="${sessiondir}.diag" commentfile="${sessiondir}.comment" if [ -z "$sessiondir" ] ; then log "Failed to determine the path of the job's session directory" else # have chance to obtain exit code if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then # In case of non-NFS setup it may take some time till # diagnostics file is delivered. Wait for it max 2 minutes. # OBS: exitcode may never appear in the .diag file if the job was # killed. There will be a 2 minute delay for every such job! diag_tries=0 while [ "$diag_tries" -lt 20 ] ; do job_read_diag # uses $sessiondir, $uid if [ ! -z "$exitcode" ] ; then break ; fi sleep 10 diag_tries=$(( $diag_tries + 1 )) log "no exitcode in diag file $diagfile (try $diag_tries of 20)" done else job_read_diag # uses $sessiondir, $uid fi fi # Try to obtain message from PBS if any pbs_comment=$(do_as_uid "$uid" "tail -n 1 '$commentfile'") save_commentfile "$uid" "$commentfile" "${base_name}.errors" # Extract values from PBS walltime=`echo "$job_message" | sed -n 's/^.*resources_used.walltime=\(\([0-9]*:\)*[0-9][0-9]\).*/\1/p'` cputime=`echo "$job_message" | sed -n 's/^.*resources_used.cput=\(\([0-9]*:\)*[0-9][0-9]\).*/\1/p'` mem=`echo "$job_message" | sed -n 's/^.*resources_used.mem=\([0-9]*\)kb.*/\1/p'` vmem=`echo "$job_message" | sed -n 's/^.*resources_used.vmem=\([0-9]*\)kb.*/\1/p'` # Convert to utc and store as seconds date_to_utc_seconds "$pbs_date" if [ ! -z "$return_date_seconds" ]; then # Convert from seconds to YYYYMMDDHHMMSSZ seconds_to_mds_date "$return_date_seconds" endtime=$return_mds_date # Find out how many seconds the job executed interval_to_seconds "$walltime" if [ ! -z "$return_interval_seconds" ]; then # Convert from seconds to YYYYMMDDHHMMSSZ seconds_to_mds_date $(( $return_date_seconds - $return_interval_seconds )) starttime=$return_mds_date fi fi # Values to write to diag. These will override values already written. interval_to_seconds "$walltime" [ -n "$return_interval_seconds" ] && WallTime=$return_interval_seconds interval_to_seconds "$cputime" [ -n "$return_interval_seconds" ] && UserTime=$return_interval_seconds [ -n "$return_interval_seconds" ] && KernelTime=0 [ -n "$mem" ] && UsedMemory=$mem [ -n "$vmem" ] && TotalMemory=$vmem [ -n "$starttime" ] && LRMSStartTime=$starttime [ -n "$endtime" ] && LRMSEndTime=$endtime [ -n "$pbs_comment" ] && LRMSMessage=$pbs_comment [ -n "$exit_code" ] && LRMSExitcode=$exit_code job_write_diag if [ -z "$exitcode" ] ; then # No exit code of job means job was most probably killed if [ -z "$exit_code" ] ; then exit_code='-1'; fi if [ "$exit_code" = '0' ] ; then echo "Job $job_id failed but PBS have not noticed that" 1>&2 echo "-1 Job failed but PBS reported 0 exit code." > "${base_name}.lrms_done" elif [ -z "$pbs_comment" ] ; then echo "Job $job_id failed with PBS exit code $exit_code" 1>&2 echo "$exit_code Job was killed by PBS." > "${base_name}.lrms_done" else echo "Job $job_id failed with PBS exit code $exit_code" 1>&2 echo "$exit_code $pbs_comment" > "${base_name}.lrms_done" fi else if [ -z "$exit_code" ] ; then exit_code='-1'; fi if [ ! "$exitcode" = 0 ] ; then if [ "$exit_code" = '0' ] ; then exit_code='-1'; fi echo "Job $job_id failed with exit code $exitcode, PBS reported $exit_code." 1>&2 echo "$exit_code Job failed with exit code $exitcode." > "${base_name}.lrms_done" else if [ ! "$exit_code" = '0' ] ; then echo "Job finished properly but PBS reported $exit_code." 1>&2 if [ -z "$pbs_comment" ] ; then echo "$exit_code Job was killed by PBS." > "${base_name}.lrms_done" else echo "$exit_code $pbs_comment" > "${base_name}.lrms_done" fi else # echo "Job finished without errors." 1>&2 echo "0" > "${base_name}.lrms_done" fi fi fi # wake up GM ${GMKICK} "${base_name}.local" done IFS=$old_IFS } readable_logs=no # Check $pbs_log_dir for readable files # if any are found, process them and update relevant information if [ ! -z "${pbs_log_dir}" ] ; then for cname in `ls -1 ${pbs_log_dir}/ 2>/dev/null | grep '^[0-9]*$'` ; do lname="${pbs_log_dir}/$cname" if [ ! -r "$lname" ] ; then continue ; fi readable_logs=yes if [ "$cname" -lt "$ldate" ] ; then continue elif [ "$cname" -gt "$ldate" ] ; then lines_skip=0 fi echo "Date: " $cname last_modified=`stat $lname | grep Modify` process_log_file done fi # main loop, stay here up to 60 seconds if log is still updated while # we are reading it. if [ "$readable_logs" = 'yes' ] ; then time_count=0 while true ; do new_modified=`stat $lname | grep Modify` if [ "$new_modified" != "$last_modified" ] ; then last_modified="$new_modified" lines=`cat "$state_file" 2>/dev/null` ldt=`echo $lines | awk '{split($0,field," ");print field[1]}' ` lines=`echo $lines | awk '{split($0,field," ");print field[2]}'` lines_skip=$(( $lines + 0 )) ldate=$(( $ldt + 0 )) process_log_file fi sleep 10 time_count=$(( $time_count + 1 )) if [ "$time_count" -gt 60 ] ; then break ; fi done exit 0 fi # If no PBS logs found try ordinary 'qstat' eval "set -- $control_dirs" # Get all running jobs pidslist=`mktemp "$TMPDIR/qstat.XXXXXX"` || if [ ! "$?" = '0' ] ; then # FS problems ? # TODO debug output here sleep 60 exit 1 fi ${PBS_BIN_PATH}/qstat -a 2>/dev/null 1>"$pidslist" if [ ! "$?" = '0' ] ; then rm -f "$pidslist" # PBS server down ? sleep 60 exit 1 fi exclude_completed () { awk '$10!="C"{print $0}' } pids=`cat "$pidslist" | grep '^[0-9][0-9]*\.' | exclude_completed | sed 's/^\([0-9][0-9]*\).*/\1/'` rm -f "$pidslist" # Go through directories for ctr_dir in "$@" ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Obtain ids stored in job.*.local ids=`find ${ctr_dir}/processing -name 'job.*.status' -print0 \ | sed 's/processing\/job\.\([^\.]*\)\.status/job.\1.local/g' \ | xargs -0 grep -h "^localid=" 2>/dev/null | sed 's/^localid=\([0-9]*\).*/\1/'` if [ -z "$ids" ] ; then continue ; fi if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-pbs-job, ControlDirTraversal: $t" >> $perflogfile fi # compare them to running jobs and find missing bids= for id in $ids ; do found=`echo "$pids" | grep "^$id$"` if [ -z "$found" ] ; then bids="$bids $id" fi done if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # go through missing ids for id in $bids ; do # find grid job corresponding to curent local id jobfile=`find ${ctr_dir}/processing -name 'job.*.status' -print0 \ | sed 's/processing\/job\.\([^\.]*\)\.status/job.\1.local/g' \ | xargs -0 grep -F -l "localid=$id." 2>/dev/null` if [ -z "$jobfile" ] ; then continue ; fi if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } # extract grid id gridid=`basename "$jobfile" '.local' | sed 's/^job\.//'` donefile="${ctr_dir}/job.${gridid}.lrms_done" if [ -f "$donefile" ] ; then continue ; fi statusfile="${ctr_dir}/processing/job.${gridid}.status" if [ ! -f "$statusfile" ] ; then continue ; fi status=`cat "$statusfile"` if [ "$status" != "INLRMS" ] && [ "$status" != "CANCELING" ]; then continue ; fi # get session directory of this job session=`grep -h '^sessiondir=' "$jobfile" | sed 's/^sessiondir=\(.*\)/\1/'` if [ ! -z "$session" ] ; then # have chance to obtain exit code diagfile="${session}.diag" if [ ! -z "$session" ] ; then # have chance to obtain exit code exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') fi if [ ! -z "$exitcode" ] ; then # job finished and exit code is known save_commentfile "$uid" "${session}.comment" "${ctr_dir}/job.${gridid}.errors" echo "$exitcode Executable finished with exit code $exitcode" > "$donefile" ${GMKICK} "$jobfile" echo "Job $gridid finished with exit code $exitcode" continue fi fi # job has probaly finished and exit code is not known exitcode='-1' countfile="${ctr_dir}/job.${gridid}.lrms_job" counter=0 if [ -f "$countfile" ] ; then counter=`cat "$countfile"` counter=$(( $counter + 1 )) fi if [ "$counter" -gt 5 ] ; then rm -f "$countfile" save_commentfile "$uid" "${session}.comment" "${ctr_dir}/job.${gridid}.errors" echo "$exitcode Job was lost with unknown exit code" > "$donefile" ${GMKICK} "$jobfile" echo "Job $gridid finished with unknown exit code" else echo "$counter" > "$countfile" fi done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-pbs-job, JobProcessing: $t" >> $perflogfile fi # go through existing ids for id in $pids ; do # find grid job corresponding to curent local id jobfile=`find ${ctr_dir} -name 'job.*.local' -print0 | xargs -0 grep -F -l "localid=$id." 2>/dev/null` if [ -z "$jobfile" ] ; then continue ; fi gridid=`basename "$jobfile" '.local' | sed 's/^job\.//'` countfile="${ctr_dir}/job.${gridid}.lrms_job" # reset failure counter rm -f "$countfile" done done sleep 60 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/PaxHeaders.7502/configure-pbs-env.sh.in0000644000000000000000000000012713044140007026652 xustar000000000000000027 mtime=1485881351.109312 30 atime=1513200650.594632454 30 ctime=1513200663.333788259 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/configure-pbs-env.sh.in0000644000175000002070000000226013044140007026714 0ustar00mockbuildmock00000000000000# # set environment variables: # PBS_BIN_PATH # ############################################################## # Reading configuration from $ARC_CONFIG ############################################################## if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser_compat.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" if [ ! -z "$joboption_queue" ]; then config_import_section "queue/$joboption_queue" fi # performance logging: if perflogdir or perflogfile is set, logging is turned on. So only set them when enable_perflog_reporting is ON unset perflogdir unset perflogfile enable_perflog=${CONFIG_enable_perflog_reporting:-no} if [ "$CONFIG_enable_perflog_reporting" == "expert-debug-on" ]; then perflogdir=${CONFIG_perflogdir:-/var/log/arc/perfdata} perflogfile="${perflogdir}/backends.perflog" fi # Path to PBS commands PBS_BIN_PATH=${CONFIG_pbs_bin_path:-@pbs_bin_path@} if [ ! -d ${PBS_BIN_PATH} ] ; then echo "Could not set PBS_BIN_PATH." 1>&2 exit 1 fi nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/PaxHeaders.7502/cancel-pbs-job.in0000644000000000000000000000012712100556217025475 xustar000000000000000027 mtime=1359142031.233238 30 atime=1513200650.562632063 30 ctime=1513200663.332788247 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/cancel-pbs-job.in0000644000175000002070000000117212100556217025540 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in PBS. # echo "----- starting cancel_pbs_job -----" 1>&2 joboption_lrms=pbs # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? PBS_QDEL='qdel' if [ ! -z "$PBS_BIN_PATH" ] ; then PBS_QDEL="${PBS_BIN_PATH}/${PBS_QDEL}" fi echo executing qdel with job id $joboption_jobid 1>&2 "${PBS_QDEL}" "${joboption_jobid}" echo "----- exiting cancel_pbs_job -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/PaxHeaders.7502/submit-pbs-job.in0000644000000000000000000000012712733561146025564 xustar000000000000000027 mtime=1466884710.220101 30 atime=1513200650.543631831 30 ctime=1513200663.335788284 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/submit-pbs-job.in0000644000175000002070000004434112733561146025634 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Based on globus submission script for pbs # # Submits job to PBS. # Input: path to grami file (same as Globus). # # The temporary job script is created for the submission and then removed # at the end of this script. echo "----- starting submit_pbs_job -----" 1>&2 joboption_lrms=pbs # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/configure-pbs-env.sh || exit $? . ${pkgdatadir}/submit_common.sh || exit $? perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ############################################################## # Parse grami file, read arc config ############################################################## init $1 read_arc_conf failures_file="$joboption_controldir/job.$joboption_gridid.failed" if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then if [ -z "${RUNTIME_LOCAL_SCRATCH_DIR}" ] ; then echo "Need to know at which directory to run job: RUNTIME_LOCAL_SCRATCH_DIR must be set if RUNTIME_NODE_SEES_FRONTEND is empty" 1>&2 echo "Submission: Configuration error.">>"$failures_file" exit 1 fi fi ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript PBS_QSUB='qsub -r n -S /bin/bash -m n ' if [ ! -z "$PBS_BIN_PATH" ] ; then PBS_QSUB=${PBS_BIN_PATH}/${PBS_QSUB} fi is_cluster=true ############################################################## # Start job script ############################################################## echo "# PBS batch job script built by grid-manager" > $LRMS_JOB_SCRIPT # write PBS output to 'comment' file echo "#PBS -e '${joboption_directory}.comment'" >> $LRMS_JOB_SCRIPT echo "#PBS -j eo">> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT # choose queue if [ ! -z "${joboption_queue}" ] ; then echo "#PBS -q $joboption_queue" >> $LRMS_JOB_SCRIPT fi ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #first we must scale priority. PBS: -1024 -> 1023 ARC: 0-100 priority=$((joboption_priority * (1024+1023) / 100)) priority=$((priority-1024)) echo "#PBS -p ${priority}" >> $LRMS_JOB_SCRIPT fi # project name for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "#PBS -A $joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#PBS -N '$jobname'" >> $LRMS_JOB_SCRIPT fi echo "PBS jobname: $jobname" 1>&2 ############################################################## # (non-)parallel jobs ############################################################## set_count if [ "$joboption_count" = "1" ] ; then nodes_string="#PBS -l nodes=1" else if [ ! -z $joboption_numnodes ] ; then nodes_string="#PBS -l nodes=${joboption_numnodes}" else #in case no countpernode is requested in job, numnodes will also not be set, use count instead nodes_string="#PBS -l nodes=${joboption_count}" fi fi if [ ! -z $joboption_countpernode ] && [ $joboption_countpernode -gt 0 ] ; then nodes_string="${nodes_string}:ppn=${joboption_countpernode}" fi if [ ! -z "$CONFIG_queue_node_string" ] ; then nodes_string="${nodes_string}:${CONFIG_queue_node_string}" fi i=0 eval "var_is_set=\${joboption_nodeproperty_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_nodeproperty_$i}" nodes_string="${nodes_string}:${var_value}" i=$(( $i + 1 )) eval "var_is_set=\${joboption_nodeproperty_$i+yes}" done echo "$nodes_string" >> $LRMS_JOB_SCRIPT # exclusice execution: # there is no standard way to express this in PBS. # One way would be to request a full nodes memory, # but this is only feasible on a cluster with # homogenous nodes ############################################################## # Execution times (minutes) ############################################################## if [ ! -z "$joboption_cputime" ] ; then # TODO: parallel jobs, add initialization time, make walltime bigger, ... # is cputime for every process ? if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 cpu time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi maxcputime="$joboption_cputime" cputime_min=$(( $maxcputime / 60 )) cputime_sec=$(( $maxcputime - $cputime_min * 60 )) echo "#PBS -l cput=${cputime_min}:${cputime_sec}" >> $LRMS_JOB_SCRIPT fi if [ -z "$joboption_walltime" ] ; then if [ ! -z "$joboption_cputime" ] ; then # Set walltime for backward compatibility or incomplete requests joboption_walltime=$(( $joboption_cputime * $walltime_ratio )) fi fi if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi maxwalltime="$joboption_walltime" walltime_min=$(( $maxwalltime / 60 )) walltime_sec=$(( $maxwalltime - $walltime_min * 60 )) echo "#PBS -l walltime=${walltime_min}:${walltime_sec}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem #pmem and pvmem are per process, and enforced by PBS via setting up memory ulimit. #But in case of using threads - single process is used and limited to per-process memory. #To support correct operation of threaded apps in PBS - submit-pbs-job set the general job memory (vmem) #Moreover according to the PBS manuals, setting vmem is supported on the sufficiently #bigger ammount of operating systems then pvmem. memreq="${joboption_memory}" if [ ! -z $joboption_count ] && [ $joboption_count -gt 0 ] ; then memreq=$(( $joboption_count * $memreq )) fi #requested memory is used to simulate exclusive execution if [ "$joboption_exclusivenode" = "true" ]; then # using nodememory as maximum mem if [ -n "${CONFIG_nodememory}" ] ; then tempmem=`expr $CONFIG_nodememory / $joboption_countpernode ` if [ "${tempmem}" -gt "${joboption_memory}" ] ; then memreq="${tempmem}" fi else echo "WARNING: Could not set memory limit to simulate exclusive execution." 1>&2 fi fi if [ ! -z "$joboption_memory" ] ; then echo "#PBS -l vmem=${memreq}mb" >> $LRMS_JOB_SCRIPT fi gate_host=`uname -n` if [ -z "$gate_host" ] ; then echo "Can't get own hostname" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Configuration error.">>"$failures_file" exit 1 fi ############################################################## # PBS stage in/out ############################################################## if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then ( cd "$joboption_directory" if [ $? -ne '0' ] ; then echo "Can't change to session directory: $joboption_directory" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Configuration error.">>"$failures_file" exit 1 fi scratch_dir=`dirname "$joboption_directory"` echo "#PBS -W stagein=$RUNTIME_LOCAL_SCRATCH_DIR@$gate_host:$joboption_directory" >> $LRMS_JOB_SCRIPT echo "#PBS -W stageout=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid@$gate_host:$scratch_dir" >> $LRMS_JOB_SCRIPT echo "#PBS -W stageout=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid.diag@$gate_host:$joboption_directory.diag" >> $LRMS_JOB_SCRIPT ) fi echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error.">>"$failures_file" exit 1 fi ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then setup_local_transfer fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Download input files #################################################### download_input_files ##################################################### # Go to working dir and start job #################################################### echo "" >> $LRMS_JOB_SCRIPT echo "# Changing to session directory" >> $LRMS_JOB_SCRIPT echo "cd \$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT echo "export HOME=\$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 ############################################################## # Diagnostics ############################################################## echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! "X$PBS_NODEFILE" = 'X' ] ; then if [ -r "$PBS_NODEFILE" ] ; then cat "$PBS_NODEFILE" | sed 's/\(.*\)/nodename=\1/' >> "$RUNTIME_JOB_DIAG" NODENAME_WRITTEN="1" else PBS_NODEFILE= fi fi EOSCR ############################################################## # Check intermediate result again ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## configure_runtime ##################################################### # Upload output files #################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then upload_output_files else # There is no sense to keep trash till GM runs uploader echo 'if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] ; then' >> $LRMS_JOB_SCRIPT # Delete all files except listed in job.#.output echo ' find ./ -type l -exec rm -f "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' find ./ -type f -exec chmod u+w "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' find ./ -type d -exec chmod u+w "{}" ";"' >> $LRMS_JOB_SCRIPT if [ -f "$joboption_controldir/job.$joboption_gridid.output" ] ; then cat "$joboption_controldir/job.$joboption_gridid.output" | \ # remove leading backslashes, if any sed 's/^\/*//' | \ # backslashes and spaces are escaped with a backslash in job.*.output. The # shell built-in read undoes this escaping. while read name rest; do # make it safe for shell by replacing single quotes with '\'' name=`printf "%s" "$name"|sed "s/'/'\\\\\\''/g"`; # protect from deleting output files including those in the dynamic list if [ "${name#@}" != "$name" ]; then # Does $name start with a @ ? dynlist=${name#@} echo " dynlist='$dynlist'" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' chmod -R u-w "./$dynlist" 2>/dev/null cat "./$dynlist" | while read name rest; do chmod -R u-w "./$name" 2>/dev/null done EOSCR else echo " chmod -R u-w \"\$RUNTIME_JOB_DIR\"/'$name' 2>/dev/null" >> $LRMS_JOB_SCRIPT fi done fi echo ' find ./ -type f -perm /200 -exec rm -f "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' find ./ -type f -exec chmod u+w "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' find ./ -type d -exec chmod u+w "{}" ";"' >> $LRMS_JOB_SCRIPT echo 'fi' >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id # !!!!!!!!!!!!!!!!!!! would be better to know the names of files !!!!!!!!!!! ############################################################## move_files_to_frontend ####################################### # Submit the job ####################################### echo "PBS job script built" 1>&2 # Execute qsub command cd "$joboption_directory" echo "PBS script follows:" 1>&2 echo "-------------------------------------------------------------------" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "-------------------------------------------------------------------" 1>&2 echo "" 1>&2 PBS_RESULT=1 PBS_TRIES=0 if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-pbs-job, JobScriptCreation: $t" >> $perflogfilesub fi while [ "$PBS_TRIES" -lt '10' ] ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${PBS_QSUB} < $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR PBS_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-pbs-job, JobSiubmission: $t" >> $perflogfilesub fi if [ "$PBS_RESULT" -eq '0' ] ; then break ; fi if [ "$PBS_RESULT" -eq '198' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 PBS_TRIES=0 continue fi grep 'maximum number of jobs' "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" if [ $? -eq '0' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 PBS_TRIES=0 continue fi PBS_TRIES=$(( $PBS_TRIES + 1 )) sleep 2 done if [ $PBS_RESULT -eq '0' ] ; then job_id=`cat $LRMS_JOB_OUT` # This should be on the format 1414162.$hostname if [ "${job_id}" = "" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the pbs jobid for the job!" 1>&2 echo "Submission: Local submission client behaved unexpectedly.">>"$failures_file" elif [ `echo "${job_id}" | grep -Ec "^[0-9]+"` != "1" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "badly formatted pbs jobid for the job: $job_id !" 1>&2 echo "Submission: Local submission client behaved unexpectedly.">>"$failures_file" else echo "joboption_jobid=$job_id" >> $arg_file echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_pbs_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from qsub: $PBS_RESULT !" 1>&2 echo "Submission: Local submission client failed.">>"$failures_file" fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "----- exiting submit_pbs_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/PaxHeaders.7502/README0000644000000000000000000000012311016612002023230 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.575727 30 ctime=1513200663.330788223 nordugrid-arc-5.4.2/src/services/a-rex/lrms/pbs/README0000644000175000002070000000003411016612002023273 0ustar00mockbuildmock00000000000000PBS/Torque control scripts. nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/cancel_common.sh0000644000000000000000000000012311542336027024733 xustar000000000000000027 mtime=1300872215.906494 27 atime=1513200576.517726 29 ctime=1513200663.30278788 nordugrid-arc-5.4.2/src/services/a-rex/lrms/cancel_common.sh0000644000175000002070000000132411542336027025001 0ustar00mockbuildmock00000000000000# # Common block for cancel scripts # must be called with the grami file as argument # remember to set $joboption_lrms if [ -z "$joboption_lrms" ]; then echo 'joboption_lrms must be set' 1>&2; exit 1; fi if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . ${pkgdatadir}/configure-${joboption_lrms}-env.sh || exit $? arg_file=$1 ############################################################## # Source the argument file. ############################################################## if [ -z "$arg_file" ] ; then echo "Arguments file should be specified" 1>&2 exit 1 fi if [ ! -f $arg_file ] ; then echo "Missing arguments file expected at '$arg_file'." 1>&2 exit 1 fi . $arg_file : nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/ll0000644000000000000000000000013213214316027022130 xustar000000000000000030 mtime=1513200663.446789641 30 atime=1513200668.718854121 30 ctime=1513200663.446789641 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/0000755000175000002070000000000013214316027022253 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612047045306024246 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200603.181052568 29 ctime=1513200663.44178958 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/Makefile.am0000644000175000002070000000014212047045306024306 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-ll-env.sh pkgdata_SCRIPTS = scan-ll-job submit-ll-job cancel-ll-job nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315733024254 xustar000000000000000030 mtime=1513200603.216052996 29 atime=1513200650.39963007 30 ctime=1513200663.442789592 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/Makefile.in0000644000175000002070000005000613214315733024324 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/ll DIST_COMMON = README $(dist_pkgdata_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/cancel-ll-job.in \ $(srcdir)/scan-ll-job.in $(srcdir)/submit-ll-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-ll-job cancel-ll-job scan-ll-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(dist_pkgdata_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = configure-ll-env.sh pkgdata_SCRIPTS = scan-ll-job submit-ll-job cancel-ll-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/ll/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/ll/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-ll-job: $(top_builddir)/config.status $(srcdir)/submit-ll-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-ll-job: $(top_builddir)/config.status $(srcdir)/cancel-ll-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-ll-job: $(top_builddir)/config.status $(srcdir)/scan-ll-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_pkgdataDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/PaxHeaders.7502/cancel-ll-job.in0000644000000000000000000000012713124220265025141 xustar000000000000000027 mtime=1498489013.515904 30 atime=1513200650.429630436 30 ctime=1513200663.444789617 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/cancel-ll-job.in0000755000175000002070000000120013124220265025177 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Cancel job running in LoadLeveler. # progname=`basename $0` echo "----- starting $progname -----" 1>&2 joboption_lrms=ll # ARC1 passes the config file first. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? LL_DEL='llcancel' if [ ! -z "$LL_BIN_PATH" ] ; then LL_DEL="${LL_BIN_PATH}/${LL_DEL}" fi echo executing job removal with job id $joboption_jobid 1>&2 "${LL_DEL}" "${joboption_jobid}" echo "----- exiting $progname -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/PaxHeaders.7502/submit-ll-job.in0000644000000000000000000000012712733561146025232 xustar000000000000000027 mtime=1466884710.220101 30 atime=1513200650.416630277 30 ctime=1513200663.446789641 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/submit-ll-job.in0000755000175000002070000003026512733561146025305 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -xv # # Submits job to loadleveler. # # A temporary job script is created for the submission and then removed # at the end of this script. # echo "----- starting submit_ll_job -----" 1>&2 joboption_lrms=ll # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/configure-ll-env.sh || exit $? . ${pkgdatadir}/submit_common.sh || exit $? perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ############################################################## # Parse grami file, read arc config ############################################################## init $1 read_arc_conf # GD enforce this for the moment RUNTIME_FRONTEND_SEES_NODE='' RUNTIME_NODE_SEES_FRONTEND='yes' ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 LL_SUB='llsubmit' if [ ! -z "$LL_BIN_PATH" ] ; then LL_SUB=${LL_BIN_PATH}/${LL_SUB} fi mktempscript ############################################################## # Start job script ############################################################## echo "# LL batch job script built by grid-manager" > $LRMS_JOB_SCRIPT # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "# @ job_name = $jobname" >> $LRMS_JOB_SCRIPT fi echo "LL jobname: $jobname" 1>&2 echo "# @ output = ${joboption_directory}.comment" >> $LRMS_JOB_SCRIPT echo "# @ error = ${joboption_directory}.comment" >> $LRMS_JOB_SCRIPT # Project account number for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "# @ account_no = $joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi ############################################################## # (non-)parallel jobs ############################################################## set_count if [ $joboption_count -gt 1 ] || [ "$LL_PARALLEL_SINGLE_JOBS" = "yes" ] ; then echo "# @ job_type = parallel" >> $LRMS_JOB_SCRIPT echo "# @ total_tasks = $joboption_count" >> $LRMS_JOB_SCRIPT echo "# @ node = $joboption_numnodes" >> $LRMS_JOB_SCRIPT fi #set node to exclusive if [ "$joboption_exclusivenode" = "true" ]; then echo "# @ node_usage = not_shared " >> $LRMS_JOB_SCRIPT fi ############################################################## # Execution times (obtained in seconds) ############################################################## # cputime/walltime is obtained in seconds via $joboption_cputime and $joboption_walltime if ( [ -n "$joboption_cputime" ] && [ $joboption_cputime -gt 0 ] ) ; then # CPU time must be given per-task for LL cputime_pertask=$(( $joboption_cputime / $joboption_count )) cputime_hard_pertask=$(($(( $cputime_pertask * $time_hardlimit_ratio))+30)) echo "# @ cpu_limit = ${cputime_hard_pertask} , ${cputime_pertask}" >> $LRMS_JOB_SCRIPT fi if [ -n "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi joboption_walltime_hard=$(($(( $joboption_walltime * $time_hardlimit_ratio))+30)) echo "# @ wall_clock_limit = ${joboption_walltime_hard} , ${joboption_walltime}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem # There are soft and hard limits for virtual memory consumption in LL # The limits are interpreted by LoadLeveler as per process in a # parallel job. There is no need to recalculate the mem limit. if [ -n "$joboption_memory" ] ; then joboption_memory_hard=$(( $joboption_memory * $memory_hardlimit_ratio )) requirements="(Memory > ${joboption_memory_hard})" preferences="(Memory > ${joboption_memory})" if [ "$LL_CONSUMABLE_RESOURCES" != "yes" ]; then echo "# @ requirements = ${requirements}" >> $LRMS_JOB_SCRIPT echo "# @ preferences = ${preferences}" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Consumable resources # One cpu should be requested per task created. I.e. per count. ############################################################# if [ "$LL_CONSUMABLE_RESOURCES" = "yes" ]; then echo "# @ resources = ConsumableCpus(1) ConsumableMemory(${joboption_memory})" >> $LRMS_JOB_SCRIPT fi ############################################################## # Override umask ############################################################## #echo "umask 077" >> $LRMS_JOB_SCRIPT #echo 'exec > /var/tmp/grid-job-output.$$ 2>&1' >> $LRMS_JOB_SCRIPT ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existence of executable, ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 exit 1 fi if [ ! "$joboption_localtransfer" = 'yes' ] ; then program_start=`echo ${joboption_arg_0} | cut -c 1 2>&1` if [ "$program_start" != '$' ] && [ "$program_start" != '/' ] ; then if [ ! -f $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable does not exist, or permission denied.' 1>&2 echo " Executable $joboption_directory/${joboption_arg_0}" 1>&2 echo " whoami: "`whoami` 1>&2 echo " ls -l $joboption_directory/${joboption_arg_0}: "`ls -l $joboption_directory/${joboption_arg_0}` exit 1 fi if [ ! -x $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable is not executable' 1>&2 exit 1 fi fi fi ################################################################## #Read queue from config or figure out which queue to use ################################################################## if [ ! -z "${joboption_queue}" ] ; then class=$joboption_queue else #if queue is not set we must choose one LL_CLASS='llclass -l' if [ ! -z "$LL_BIN_PATH" ] ; then LL_CLASS=${LL_BIN_PATH}/${LL_CLASS} fi queue_names=`${LL_CLASS}|grep Name|awk '{split($0,field," ");print field[2]}'` #default will be shortest queue if [! -n "$joboption_walltime" ] ; then joboption_walltime_hard=1 fi queue_time_sel=0 for queue in $queue_names do queue_time=`${LL_CLASS} ${queue}|grep Wall_clock_limit|awk '{split($0,field,"(");print field[2]}'|awk '{split($0,field," ");print field[1]}'` if [${joboption_walltime_hard} -lt ${queue_time}] ; then if [${queue_time_sel} -eq 0] || [${queue_time_sel} -gt ${queue_time}] ; then class=${queue} queue_time_sel=${queue_time} fi fi done fi echo "# @ class=${class}" >> $LRMS_JOB_SCRIPT ################################################################### #Priority of jobs ################################################################## if [ ! -z $joboption_priority ]; then # LL: priority from 0-100. 50 is default # We can just use ARC priority directly echo "# @ user_priority = ${joboption_priority}" >> $LRMS_JOB_SCRIPT fi ################################################################### #Queue job #No mail notification ################################################################## echo "# @ notification = never" >> $LRMS_JOB_SCRIPT echo "# @ queue" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then setup_local_transfer fi setup_runtime_env ################################################################### #setup soft limit trap ################################################################## echo "trap \"echo 'exitcode=24'>>\$RUNTIME_JOB_DIAG;exit 24\" SIGXCPU" >> $LRMS_JOB_SCRIPT ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Download input files #################################################### download_input_files ############################################################## # Skip execution if something already failed ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration ############################################################## RTE_stage1 if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then echo "Nodes detached from gridarea are not supported when LL is used. Aborting job submit" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi ############################################################## # Execution ############################################################## cd_and_run echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## configure_runtime ##################################################### # Upload output files #################################################### upload_output_files ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ####################################### # Submit the job ####################################### echo "ll job script built" 1>&2 #job creation finished if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-ll-job, JobScriptCreation: $t" >> $perflogfilesub fi # Execute sub command cd "$joboption_directory" echo "LL script follows:" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "" 1>&2 if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${LL_SUB} $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR LLSUB_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-ll-job, JobSubmission: $t" >> $perflogfilesub fi if [ $LLSUB_RESULT -eq '0' ] ; then echo "LRMS_JOB_OUT is $LRMS_JOB_OUT" job_id=`cat $LRMS_JOB_OUT | awk '{split($0,field,"\"");print field[2]}'`.0 if [ "${job_id}" = "" ] ; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the LL jobid for the job!" 1>&2 else echo "joboption_jobid=$job_id" >> $arg_file echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_ll_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from llsubmit!" 1>&2 fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_ll_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/PaxHeaders.7502/configure-ll-env.sh0000644000000000000000000000012413044140007025710 xustar000000000000000027 mtime=1485881351.109312 27 atime=1513200576.526726 30 ctime=1513200663.440789568 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/configure-ll-env.sh0000644000175000002070000000302313044140007025753 0ustar00mockbuildmock00000000000000# # set environment variables: # LL_BIN_PATH # ############################################################## # Reading configuration from $ARC_CONFIG ############################################################## if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser_compat.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" # performance logging: if perflogdir or perflogfile is set, logging is turned on. So only set them when enable_perflog_reporting is ON unset perflogdir unset perflogfile enable_perflog=${CONFIG_enable_perflog_reporting:-no} if [ "$CONFIG_enable_perflog_reporting" == "expert-debug-on" ]; then perflogdir=${CONFIG_perflogdir:-/var/log/arc/perfdata} perflogfile="${perflogdir}/backends.perflog" fi # Also read queue section if [ ! -z "$joboption_queue" ]; then config_import_section "queue/$joboption_queue" fi # Path to ll commands LL_BIN_PATH=$CONFIG_ll_bin_path if [ ! -d ${LL_BIN_PATH} ] ; then echo "Could not set LL_BIN_PATH." 1>&2 exit 1 fi # Consumable resources LL_CONSUMABLE_RESOURCES=${LL_CONSUMABLE_RESOURCES:-$CONFIG_ll_consumable_resources} # Enable parallel single jobs LL_PARALLEL_SINGLE_JOBS=${LL_PARALLEL_SINGLE_JOBS:-$CONFIG_ll_parallel_single_jobs} # Local scratch disk RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-$CONFIG_scratchdir} export RUNTIME_LOCAL_SCRATCH_DIR nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/PaxHeaders.7502/scan-ll-job.in0000644000000000000000000000012612721014037024637 xustar000000000000000027 mtime=1464080415.207124 29 atime=1513200650.44463062 30 ctime=1513200663.445789629 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/scan-ll-job.in0000755000175000002070000001625112721014037024712 0ustar00mockbuildmock00000000000000#!/bin/bash # Helper script to flag done LoadLeveler jobs. # The script is called periodically by the grid-manager. # # This function retrieve the jobs status and id in one shot # look for jobs which have a known status but are not completed (!=C) # and save the localid of these jobe in the string variable $outLocalIdsString # The input variable is a string list of localid to check. # Example of usage # get_bunch_jobs_status "$inLocalIdsString" outLocalIdsString="" get_bunch_jobs_status() { #get the string list of jobs loop=`$LL_BIN_PATH/llq -r %st %id $1` if [ $? -eq 0 ]; then for elm in $loop do if [ `echo $elm | grep '^[A-Z]\{1,2\}!.\+$'` ]; then if [ ! `echo $elm | grep '^C!'` ]; then outLocalIdsString=$outLocalIdsString" "`echo $elm | awk -F! '{ print $NF}'` fi fi done fi } ################## # ARC1 passes the config file first. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? libexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@/" pkgdatadir="$basedir" # Assume that gm-kick is installed in the same directory GMKICK=${libexecdir}/gm-kick # Does the control directory exist? control_dir="$1" test -d "$control_dir" || exit 1 # Get LoadLeveler environment . "${pkgdatadir}/configure-ll-env.sh" || exit $? . "${pkgdatadir}/scan_common.sh" || exit $? # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi my_id=`id -u` # mergedlist: array where each element is made of # key:value, where key is the arc jobid and value is the # localid mergedlist=() # inLocalIdsString: in this string we save the localid retrived by from # the arc .local file divided by space inLocalIdsString="" findoutput=$(find "$control_dir/processing" -maxdepth 1 -type f -name 'job.*.status' | sed 's/processing\/job\.\([^\.]*\)\.status$/job.\1.local/') while read i do # Continue if no glob expansion or other problems test -f "$i" || continue jobid=`basename $i .local|sed 's/^job.//'` donefile="${control_dir}/job.${jobid}.lrms_done" statusfile="${control_dir}/processing/job.${jobid}.status" # Continue if the job is already flagged as done? test -f "$donefile" && continue if [ ! -f "$statusfile" ] ; then continue ; fi gmstatus=`cat "$statusfile"` if [ "$gmstatus" != "INLRMS" ] && [ "$gmstatus" != "CANCELING" ] ; then continue ; fi # Get local LRMS id of job by evaluating the line with localid localid=`grep ^localid= $i|head -1` eval $localid # Did we get a local id? test "$localid" = "" && continue # HACK: save the localid to be queried into inLocalIdsString # associate the localid to its jobid and save them in a list inLocalIdsString=$inLocalIdsString" "$localid mergedlist+=("$jobid:$localid") done <<< "$findoutput" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, ControldirTraversal: $t" >> $perflogfile fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Query the LoadLeveler for jobs # and save the not completed into $outLocalIdsString # Call the funcion only if there is some into the string if [[ $inLocalIdsString =~ /^[0-9]|[a-z]|[A-Z]*$/ ]]; then get_bunch_jobs_status "$inLocalIdsString" fi if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, llq -r %st %id: $t" >> $perflogfile fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi numelem=0 # Start the loop based on element of the mergelist for element in ${mergedlist[@]} do # Divide the jobid from the localid jobid=`echo $element | awk '{split($0,a,":"); print a[1]}'` localid=`echo $element | awk '{split($0,a,":"); print a[2]}'` # Exclude the not completed jobs stored into the $outLocalIdsString if [[ $outLocalIdsString == *$localid* ]] then continue fi numelem=$((numelem+1)) donefile="${control_dir}/job.${jobid}.lrms_done" statusfile="${control_dir}/processing/job.${jobid}.status" jobfile="${control_dir}/job.${jobid}.local" errorsfile="${control_dir}/job.${jobid}.errors" # Continue if the job is already flagged as done? test -f "$donefile" && continue if [ ! -f "$statusfile" ] ; then continue ; fi gmstatus=`cat "$statusfile"` exitcode='' # get session directory of this job sessiondir=`grep -h '^sessiondir=' "$control_dir/job.${jobid}.local" | sed 's/^sessiondir=\(.*\)/\1/'` diagfile="${sessiondir}.diag" commentfile="${sessiondir}.comment" if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } if [ ! -z "$sessiondir" ] ; then # have chance to obtain exit code exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') else continue fi if [ ! -z "$exitcode" ] ; then if [ "$exitcode" = "152" -o $exitcode = "24" ] ; then exitcode="24" save_commentfile "$uid" "${sessiondir}.comment" "$errorsfile" echo "$exitcode Job exceeded time limit." > "$donefile" # If job exceeded time, then it will have been killed and no cputime/walltime has been written walltime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Wall Clk Hard Limit:.*(\([0-9]*\) seconds.*/\1/p'` usertime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Step Cpu Hard Limit:.*(\([0-9]*\) seconds.*/\1/p'` starttime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Dispatch Time: \(.*\)/\1/p'` endtime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Completion Date: \(.*\)/\1/p'` if [ -n "$starttime" ]; then date_to_utc_seconds "$starttime" seconds_to_mds_date "$return_date_seconds" starttime=$return_mds_date fi if [ -n "$endtime" ]; then date_to_utc_seconds "$endtime" seconds_to_mds_date "$return_date_seconds" endtime=$return_mds_date fi job_read_diag [ -n "$walltime" ] && WallTime=${walltime} [ -n "$usertime" ] && UserTime=${usertime} [ -n "$usertime" ] && KernelTime=0 [ -n "$starttime" ] && LRMSStartTime=${starttime} [ -n "$endtime" ] && LRMSEndTime=${endtime} #This needs investigating, might be user program exit code [ -n "$exitcode" ] && LRMSExitcode=$exitcode job_write_diag ${GMKICK} "$jobfile" continue fi # job finished and exit code is known save_commentfile "$uid" "${sessiondir}.comment" "$errorsfile" echo "$exitcode Executable finished with exit code $exitcode" >> "$donefile" ${GMKICK} "$jobfile" continue fi exitcode=-1 save_commentfile "$uid" "${sessiondir}.comment" "$errorsfile" echo "$exitcode Job finished with unknown exit code" >> "$donefile" ${GMKICK} "$jobfile" done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, JobHandling, Handled= $numelem: $t" >> $perflogfile fi sleep 60 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/PaxHeaders.7502/README0000644000000000000000000000012311016612002023053 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.529726 30 ctime=1513200663.439789556 nordugrid-arc-5.4.2/src/services/a-rex/lrms/ll/README0000644000175000002070000000003511016612002023117 0ustar00mockbuildmock00000000000000Load Leveler control script. nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/sge0000644000000000000000000000013213214316027022277 xustar000000000000000030 mtime=1513200663.376788785 30 atime=1513200668.718854121 30 ctime=1513200663.376788785 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/0000755000175000002070000000000013214316027022422 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306024416 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200603.340054512 30 ctime=1513200663.373788749 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/Makefile.am0000644000175000002070000000014612047045306024461 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-sge-env.sh pkgdata_SCRIPTS = scan-sge-job submit-sge-job cancel-sge-job nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733024424 xustar000000000000000030 mtime=1513200603.376054952 30 atime=1513200650.606632601 30 ctime=1513200663.374788761 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/Makefile.in0000644000175000002070000005003113214315733024471 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/sge DIST_COMMON = README $(dist_pkgdata_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/cancel-sge-job.in \ $(srcdir)/scan-sge-job.in $(srcdir)/submit-sge-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-sge-job scan-sge-job cancel-sge-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(dist_pkgdata_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = configure-sge-env.sh pkgdata_SCRIPTS = scan-sge-job submit-sge-job cancel-sge-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/sge/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/sge/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-sge-job: $(top_builddir)/config.status $(srcdir)/submit-sge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-sge-job: $(top_builddir)/config.status $(srcdir)/scan-sge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-sge-job: $(top_builddir)/config.status $(srcdir)/cancel-sge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_pkgdataDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/PaxHeaders.7502/configure-sge-env.sh0000644000000000000000000000012413044140007026226 xustar000000000000000027 mtime=1485881351.109312 27 atime=1513200576.558726 30 ctime=1513200663.372788736 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/configure-sge-env.sh0000644000175000002070000000434313044140007026277 0ustar00mockbuildmock00000000000000# set environment variables: # SGE_BIN_PATH # SGE_ROOT # SGE_CELL # SGE_QMASTER_PORT # SGE_EXECD_PORT # ############################################################## # Read ARC config file ############################################################## if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser_compat.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" # Also read queue section if [ ! -z "$joboption_queue" ]; then config_import_section "queue/$joboption_queue" fi # performance logging: if perflogdir or perflogfile is set, logging is turned on. So only set them when enable_perflog_reporting is ON unset perflogdir unset perflogfile enable_perflog=${CONFIG_enable_perflog_reporting:-no} if [ "$CONFIG_enable_perflog_reporting" == "expert-debug-on" ]; then perflogdir=${CONFIG_perflogdir:-/var/log/arc/perfdata} perflogfile="${perflogdir}/backends.perflog" fi ############################################################## # Initialize SGE environment variables ############################################################## SGE_ROOT=${CONFIG_sge_root:-$SGE_ROOT} if [ -z "$SGE_ROOT" ]; then echo 'SGE_ROOT not set' 1>&2 return 1 fi SGE_CELL=${SGE_CELL:-default} SGE_CELL=${CONFIG_sge_cell:-$SGE_CELL} export SGE_ROOT SGE_CELL if [ ! -z "$CONFIG_sge_qmaster_port" ]; then export SGE_QMASTER_PORT=$CONFIG_sge_qmaster_port fi if [ ! -z "$CONFIG_sge_execd_port" ]; then export SGE_EXECD_PORT=$CONFIG_sge_execd_port fi ############################################################## # Find path to SGE executables ############################################################## # 1. use sge_bin_path config option, if set if [ ! -z "$CONFIG_sge_bin_path" ]; then SGE_BIN_PATH=$CONFIG_sge_bin_path; fi # 2. otherwise see if qsub can be found in the path if [ -z "$SGE_BIN_PATH" ]; then qsub=$(type -p qsub) SGE_BIN_PATH=${qsub%/*} unset qsub fi if [ ! -x "$SGE_BIN_PATH/qsub" ]; then echo 'SGE executables not found! Check that sge_bin_path is defined' 1>&2 return 1 fi export SGE_BIN_PATH nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/PaxHeaders.7502/cancel-sge-job.in0000644000000000000000000000012711562710611025462 xustar000000000000000027 mtime=1305186697.469298 30 atime=1513200650.650633139 30 ctime=1513200663.375788773 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/cancel-sge-job.in0000644000175000002070000000105511562710611025525 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in SGE. # echo "----- starting cancel_sge_job -----" 1>&2 joboption_lrms=sge # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? echo executing $SGE_BIN_PATH/qdel with job id $joboption_jobid 1>&2 $SGE_BIN_PATH/qdel "${joboption_jobid}" echo "----- exiting cancel_sge_job -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/PaxHeaders.7502/README0000644000000000000000000000012311016612002023222 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.559726 30 ctime=1513200663.372788736 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/README0000644000175000002070000000004111016612002023263 0ustar00mockbuildmock00000000000000Sun Grid Engine control scripts. nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/PaxHeaders.7502/submit-sge-job.in0000644000000000000000000000012712733561146025550 xustar000000000000000027 mtime=1466884710.220101 30 atime=1513200650.618632748 30 ctime=1513200663.376788785 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/submit-sge-job.in0000755000175000002070000003417212733561146025624 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -xv # # Submits job to Sun Grid Engine (SGE). # Input: path to grami file (same as Globus). # # A temporary job script is created for the submission and then removed # at the end of this script. # echo "----- starting submit_sge_job -----" 1>&2 joboption_lrms=sge # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/configure-sge-env.sh || exit $? . ${pkgdatadir}/submit_common.sh || exit $? joboption_localtransfer='no' #Log performance perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then #start time stamp start_ts=`date +%s.%N` fi ############################################################## # Parse grami file, read arc config ############################################################## init $1 cat $1 1>&2 read_arc_conf ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 # Force shell /bin/sh, other qsub options have been moved to the job script SGE_QSUB='qsub -S @posix_shell@' SGE_QCONF=qconf if [ "$SGE_BIN_PATH" ] ; then SGE_QSUB=${SGE_BIN_PATH}/${SGE_QSUB} SGE_QCONF=${SGE_BIN_PATH}/${SGE_QCONF} fi mktempscript ############################################################## # Start job script ############################################################## echo '#!@posix_shell@' > $LRMS_JOB_SCRIPT echo "# SGE batch job script built by grid-manager" >> $LRMS_JOB_SCRIPT # Job not rerunable: echo "#$ -r n" >> $LRMS_JOB_SCRIPT # Don't send mail when job finishes: echo "#$ -m n" >> $LRMS_JOB_SCRIPT # Mix standard output and standard error: echo "#$ -j y" >> $LRMS_JOB_SCRIPT # Write output to comment file: echo "#$ -o ${joboption_directory}/.comment" >> $LRMS_JOB_SCRIPT ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #first we must scale priority. SGE: -1023 -> 1024 ARC: 0-100 #user can only decrease priority: i.e. -1023 -> 0 (info from gsciacca@lhep.unibe.ch) #Same problem as SLURM. We can only prioritize grid jobs. Locally submitted jobs will get highest priority. priority=$((joboption_priority * 1023 / 100)) priority=$((priority-1023)) echo "#$ -p ${priority}" >> $LRMS_JOB_SCRIPT fi # Choose queue. echo "#$ -q $joboption_queue" >> $LRMS_JOB_SCRIPT # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#$ -N \"$jobname\"" >> $LRMS_JOB_SCRIPT fi echo "SGE jobname: $jobname" 1>&2 ############################################################## # (non-)parallel jobs ############################################################## set_count ############################################################## # parallel jobs ############################################################## # In addition to the number of parallel tasks, also a valid # parallel environment (PE) must be set for SGE. # # The selection of PE is done by Runtime Environment setup script in the zero # stage. The user has to request a proper RE in addition to setting the # "count" -property in the xrsl. The RE script must set the name of the desired # PE to joboption_nodeproperty_# -variable (# is a number starting from zero, # RE should use the lowest previously undefined number). This script then # searches through the joboption_nodeproperty_# variables and compares them to # the PE list obtained from SGE. The first matching PE name is used. # if [ -n "$joboption_nodeproperty_0" ]; then i=0 sge_parallel_environment_list=`$SGE_QCONF -spl` while eval jope=\${joboption_nodeproperty_$i} && test "$jope" ; do for ipe in $sge_parallel_environment_list ; do if [ "$jope" = "$ipe" ] ; then break 2 # now $jope contains the requested parallel env fi done i=$(($i + 1)) done if [ -n "$jope" ] ; then echo "#\$ -pe $jope $joboption_count" >> $LRMS_JOB_SCRIPT else echo 'ERROR: Setting parallel environment failed.' 1>&2 fi fi if [ "$joboption_exclusivenode" = "true" ]; then sge_excl_complex=`$SGE_QCONF -sc | grep EXCL | head -n 1` if [ -n "$sge_excl_complex" ]; then sge_excl_complex_name=`echo $sge_excl_complex | awk '{print $1}'` echo "#\$ -l ${sge_excl_complex_name}=true" >> $LRMS_JOB_SCRIPT else echo "WARNING: Exclusive execution support is not configured by this Grid Engine" 1>&2 echo "WARNING: Example configuration: https://wiki.nordugrid.org/index.php/LRMS_Backends/Testbeds" 1>&2 fi fi ############################################################## # Execution times (obtained in seconds) ############################################################## # SGE has soft and hard limits (soft = SIGUSR1, hard = SIGKILL sent to the job), # let's allow time_hardlimit_ratio extra before the hard limit. # cputime/walltime is obtained in seconds via $joboption_cputime and $joboption_walltime # parallel jobs, add initialization time, soft/hard limit configurable... if ( [ -n "$joboption_cputime" ] && [ $joboption_cputime -gt 0 ] ) ; then # SGE enforces job-total cpu time limit, but it expects in h_cpu and s_cpu # per-slot limits. It then scales these with the number of requested slots # before enforcing them. cputime_perslot=$(( $joboption_cputime / $joboption_count )) cputime_hard_perslot=$(( $cputime_perslot * $time_hardlimit_ratio )) s_cpu_requestable=$($SGE_QCONF -sc|awk '($1=="s_cpu" && ( $5=="YES" || $5=="FORCED" )){print $5}') h_cpu_requestable=$($SGE_QCONF -sc|awk '($1=="h_cpu" && ( $5=="YES" || $5=="FORCED" )){print $5}') opt="#$" if [ $s_cpu_requestable ]; then opt="$opt -l s_cpu=::${cputime_perslot}"; fi if [ $h_cpu_requestable ]; then opt="$opt -l h_cpu=::${cputime_hard_perslot}"; fi echo $opt >> $LRMS_JOB_SCRIPT fi if [ -n "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi joboption_walltime_hard=$(( $joboption_walltime * $time_hardlimit_ratio )) s_rt_requestable=$($SGE_QCONF -sc|awk '($1=="s_rt" && ( $5=="YES" || $5=="FORCED" )){print $5}') h_rt_requestable=$($SGE_QCONF -sc|awk '($1=="h_rt" && ( $5=="YES" || $5=="FORCED" )){print $5}') opt="#$" if [ $s_rt_requestable ]; then opt="$opt -l s_rt=::${joboption_walltime}"; fi if [ $h_rt_requestable ]; then opt="$opt -l h_rt=::${joboption_walltime_hard}"; fi echo $opt >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem # There are soft and hard limits for virtual memory consumption in SGE if [ -n "$joboption_memory" ] ; then joboption_memory_hard=$(( $joboption_memory * $memory_hardlimit_ratio )) h_vmem_requestable=$($SGE_QCONF -sc|awk '($1=="h_vmem" && ( $5=="YES" || $5=="FORCED" )){print $5}') s_vmem_requestable=$($SGE_QCONF -sc|awk '($1=="s_vmem" && ( $5=="YES" || $5=="FORCED" )){print $5}') opt="#$" if [ $s_vmem_requestable ]; then opt="$opt -l s_vmem=${joboption_memory}M"; fi if [ $h_vmem_requestable ]; then opt="$opt -l h_vmem=${joboption_memory_hard}M"; fi echo $opt >> $LRMS_JOB_SCRIPT fi ############################################################## # Extra job options. This is the last, so that # it can overwrite previously set options. ############################################################## if [ ! -z "$CONFIG_sge_jobopts" ]; then echo "#$ $CONFIG_sge_jobopts" >> $LRMS_JOB_SCRIPT fi ############################################################## # Override umask ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT ############################################################## # By default, use $TMPDIR from SGE to alleviate its cleanup facilities. # It can be overridden with scratchdir though. # Don't do this if "shared_scratch" is defined in arc.conf. ############################################################## if [ "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! "$RUNTIME_FRONTEND_SEES_NODE" ]; then echo "if [ -d \"${CONFIG_scratchdir:-\$TMPDIR}\" ]; then RUNTIME_LOCAL_SCRATCH_DIR=${CONFIG_scratchdir:-\$TMPDIR}; fi" >> $LRMS_JOB_SCRIPT fi sourcewithargs_jobscript ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 exit 1 fi if [ ! "$joboption_localtransfer" = 'yes' ] ; then program_start=`echo ${joboption_arg_0} | cut -c 1 2>&1` if [ "$program_start" != '$' ] && [ "$program_start" != '/' ] ; then if [ ! -f $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable does not exist, or permission denied.' 1>&2 echo " Executable $joboption_directory/${joboption_arg_0}" 1>&2 echo " whoami: "`whoami` 1>&2 echo " ls -l $joboption_directory/${joboption_arg_0}: "`ls -l $joboption_directory/${joboption_arg_0}` exit 1 fi if [ ! -x $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable is not executable' 1>&2 exit 1 fi fi fi ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then setup_local_transfer fi setup_runtime_env # Override location of .diag file: put it under the working directory echo 'RUNTIME_JOB_DIAG=$RUNTIME_JOB_DIR/.diag' >> $LRMS_JOB_SCRIPT ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Download input files #################################################### download_input_files ############################################################## # Skip execution if something already failed ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration ############################################################## RTE_stage1 echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then echo "Nodes detached from gridarea are not supported when SGE is used. Aborting job submit" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi ############################################################## # Execution ############################################################## cd_and_run echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## configure_runtime ##################################################### # Upload output files #################################################### upload_output_files ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-sge-job, JobScriptCreation: $t" >> $perflogfilesub fi if [ ! -z "$perflogdir" ]; then #start time stamp start_ts=`date +%s.%N` fi ####################################### # Submit the job ####################################### ( echo "SGE job script built" cd "$joboption_directory" echo "SGE script follows:" cat "$LRMS_JOB_SCRIPT" echo # Execute qsub command ${SGE_QSUB} < $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR # expected SGE output is like: 'Your job 77 ("perftest") has been # submitted', the line below uses only the job number as job id. job_id=$(cat $LRMS_JOB_OUT $LRMS_JOB_ERR \ | awk '/^.our job .* has been submitted/ {split($0,field," ");print field[3]}') # anything else is a sign of problems, which should be logged warnings=$(cat $LRMS_JOB_OUT $LRMS_JOB_ERR \ | grep -v '^.our job .* has been submitted' | grep -v '^Exit') if [ ! -z "$warnings" ]; then echo "WARNING: $warnings"; echo; fi exitcode=0 if [ -z $job_id ] ; then echo "job *NOT* submitted successfully!" exitcode=1 else echo "joboption_jobid=$job_id" >> $arg_file echo "local job id: $job_id" echo "job submitted successfully!" exitcode=0 fi # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_sge_job -----"; ) 1>&2 if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-sge-job, JobSubmission: $t" >> $perflogfilesub fi exit $exitcode nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/PaxHeaders.7502/scan-sge-job.in0000644000000000000000000000012613024226256025162 xustar000000000000000026 mtime=1481714862.73284 30 atime=1513200650.633632931 30 ctime=1513200663.376788785 nordugrid-arc-5.4.2/src/services/a-rex/lrms/sge/scan-sge-job.in0000755000175000002070000004335213024226256025237 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -xv # # scan-sge-job does not use log-files, it only uses qacct. # # usage: scan_sge_job control_dir ... # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? libexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@/" pkgdatadir="$basedir" # Assume that gm-kick and scan_common are installed in the same directory GMKICK=${libexecdir}/gm-kick . "${pkgdatadir}/scan_common.sh" || exit $? ############################################################## # Set SGE specific environment. ############################################################## . "${pkgdatadir}/configure-sge-env.sh" 1>&2 || exit $? ############################################################## olog () { echo "[`date +%Y-%m-%d\ %T`] scan-sge-job: $*" 1>&2; } umask 022 my_id=`id -u` if [ -z "$1" ] ; then exit 1 ; fi TMPDIR=${TMPDIR:-@tmp_dir@} # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi # first control_dir is used for storing own files control_dir=$1 control_dirs= while [ $# -gt 0 ] ; do control_dirs="${control_dirs} $1" shift done # Takes diagfile_acct, SGE qacct command, id and extraargs # writes output to diagfile_acct (temporary file for accounting info) write_diagfile_acct () { diagfile_acct=$1 qacct_command=$2 id=$3 extraargs=$4 #Check that qacct is available. if ! `which $qacct_command >dev/null` ; then olog "WARNING: qacct was not found. Accounting data will not be recorded." fi $qacct_command -j $id $extraargs 2> /dev/null \ | /usr/bin/perl -e 'while(<>) { $nodename=$1 if /^hostname\s+(\S+)/; $id=$1 if /^jobnumber\s+(\S+)/; $exitcode=$1 if /^exit_status\s+(\S+)/; $failed=$1 if /^failed\s+(.*\S)/; $CPUTime=$1 if /^cpu\s+(\d+)/; $Processors=$1 if /^slots\s+(\d+)/; $KernelTime=$1 if /^ru_stime\s+(\d+)/; $WallTime=$1 if /^ru_wallclock\s+(\d+)/; $UsedMemory=$1 if /^maxvmem\s+(\S+)M/; $UsedMemory=$1*1024 if /^maxvmem\s+(\S+)G/; $start_time=`date -d "$1" +%s` if /^start_time\s+(.+)/; $end_time =`date -d "$1" +%s` if /^end_time\s+(.+)/; } # converts seconds since epoch in local time into Mds time format (UTC) sub mds_date { my ($seconds) = @_; my @t = gmtime $seconds; my ($Y,$M,$D,$h,$m,$s) = (1900+$t[5],1+$t[4],$t[3],$t[2],$t[1],$t[0]); return sprintf "%04i%02i%02i%02i%02i%02iZ",$Y,$M,$D,$h,$m,$s; } END { exit unless $id; print "LRMSStartTime=".mds_date($1)."\n" if $start_time =~ m/^(\d+)$/; print "LRMSEndTime=" .mds_date($1)."\n" if $end_time =~ m/^(\d+)$/; print "nodename=${nodename}\n"; print "CPUTime=${CPUTime}.0s\n"; print "Processors=${Processors}\n"; print "WallTime=${WallTime}.0s\n"; print "KernelTime=${KernelTime}.0s\n"; print "UserTime=".int($CPUTime-$KernelTime).".0s\n"; print "AverageTotalMemory=".int($UsedMemory*1024)."kB\n"; print "failed=$failed\n"; print "\nsgeexitcode=$exitcode\n"; }' \ > "$diagfile_acct" } # Parse GRAMI-file and see what was requested check_exceeded_resources_grami () { gramifile=$1 errorsfile=$2 used_cputime=$3 used_walltime=$4 used_memory=$5 req_walltime=`sed -n "s/^joboption_walltime=//p" "$gramifile" | tail -n 1` req_cputime=`sed -n "s/^joboption_cputime=//p" "$gramifile" | tail -n 1` req_memory=`sed -n "s/^joboption_memory=//p" "$gramifile" | tail -n 1` if [ ! -z "$used_memory" ] && [ ! -z "$req_memory" ] \ && [ "$req_memory" != "" ] && [ "$req_memory" -gt 0 ] \ && [ $(( 100*$used_memory/1024/$req_memory )) -gt 95 ]; then overlimit="memory" fi if [ ! -z "$used_cputime" ] && [ ! -z "$req_cputime" ] \ && [ "$req_cputime" != "" ] && [ "$req_cputime" -gt 0 ] \ && [ $(( 100*$used_cputime/$req_cputime )) -gt 95 ]; then overlimit="cputime" fi if [ ! -z "$used_walltime" ] && [ ! -z "$req_walltime" ] \ && [ "$req_walltime" != "" ] && [ "$req_walltime" -gt 0 ] \ && [ $(( 100*$used_walltime/$req_walltime )) -gt 95 ]; then overlimit="walltime" fi echo ++++++++++++++++++++++++++ >> "$errorsfile" echo Resources: >> "$errorsfile" echo ++++++++++++++++++++++++++ >> "$errorsfile" echo req_memory=$req_memory MB >> "$errorsfile" echo req_cputime=$req_cputime >> "$errorsfile" echo req_walltime=$req_walltime >> "$errorsfile" echo used_memory=$used_memory kB >> "$errorsfile" echo used_cputime=$used_cputime >> "$errorsfile" echo used_walltime=$used_walltime >> "$errorsfile" if [ ! -z "$overlimit" ]; then echo overlimit=$overlimit >> "$errorsfile" fi echo ++++++++++++++++++++++++++ >> "$errorsfile" } #Handle failedcode handle_failedcode () { failedcode=$1 donefile=$2 exitcode=$3 sgeexitcode=$4 overlimit=$5 if [ -z "$failedcode" ]; then # Should never happen olog "SGE job $id failed: SGE accouting record is incomplete" echo "-1 SGE accouting record is incomplete" > "$donefile" elif [ "$failedcode" = "0" ]; then if [ -z "$exitcode" ]; then olog "SGE job $id failed with unknown exit code" if [ -z "$sgeexitcode" ] || [ "$sgeexitcode" = 0 ]; then sgeexitcode="-1"; fi echo "$sgeexitcode Job failed with unknown exit code" > "$donefile" elif [ "$exitcode" = "0" ]; then #olog "SGE job $id finished successfully" echo "0" > "$donefile" else #olog "SGE job $id failed with exit code $exitcode" if [ -z "$sgeexitcode" ] || [ "$sgeexitcode" = 0 ]; then sgeexitcode="-1"; fi echo "$sgeexitcode Job failed with exit code $exitcode" > "$donefile" fi else # SGE reports a problem if [ "$failedcode" = "25" ]; then failedreason="SGE error $failedcode: Job will be rescheduled" elif [ "$failedcode" = "24" ]; then failedreason="SGE error $failedcode: Job will be migrated" elif [ "$failedcode" = "100" ]; then # This happens when SGE signals the job, as in the case when a # resource limit is exceeded. We don't know for sure whether # they were enforced or not but if a job is killed by SGE, this # might be the likely cause. if [ -z "$overlimit" ]; then failedreason="SGE error $failedreason" elif [ $overlimit = "memory" ]; then failedreason="job killed: vmem" elif [ $overlimit = "cputime" ]; then failedreason="job killed: cput" elif [ $overlimit = "walltime" ]; then failedreason="job killed: wall" fi else failedreason="SGE error $failedreason" fi if [ ! -z "$eqwmessage" ]; then failedreason="$eqwmessage" fi olog "SGE job $id failed: $failedreason" if [ -z "$sgeexitcode" ] || [ "$sgeexitcode" = 0 ]; then sgeexitcode="-1"; fi echo "271 $failedreason" > "$donefile" fi # failedcode } # Add accounting info to $diagfile add_accounting_to_diag () { diagfile=$1 diagfile_acct=$2 diagfile_tmp=$3 errorsfile=$4 uid=$5 commentfile=$6 ctr_dir=$7 gramifile="${ctr_dir}/job.${gridid}.grami" donefile="${ctr_dir}/job.${gridid}.lrms_done" countfile="${ctr_dir}/job.${gridid}.lrms_job" errorsfile="${ctr_dir}/job.${gridid}.errors" localfile="${ctr_dir}/job.${gridid}.local" used_walltime=`sed -n 's/^WallTime=\(.*\).0s/\1/p' "$diagfile_acct" | tail -n 1` used_cputime=`sed -n 's/^CPUTime=\(.*\).0s/\1/p' "$diagfile_acct" | tail -n 1` used_memory=`sed -n 's/^AverageTotalMemory=\(.*\)kB/\1/p' "$diagfile_acct" | tail -n 1` sgeexitcode=`sed -n 's/^sgeexitcode=//p' "$diagfile_acct" | tail -n 1` failedreason=`sed -n 's/^failed=//p' "$diagfile_acct" | tail -n 1` failedcode=`echo $failedreason | awk '{print $1}'` if [ -s "$diagfile_acct" ]; then # Remove attributes from existing diagfile that we should have # gotten from qacct this time, otherwise we will get duplicates do_as_uid "$uid" "cat '$diagfile'" \ | grep -v "^nodename=" \ | grep -v "^WallTime=" \ | grep -v "^KernelTime=" \ | grep -v "^UserTime=" \ | grep -v "^CPUTime=" \ | grep -v "^Processors=" \ | grep -v "^LRMSStartTime=" \ | grep -v "^LRMSEndTime=" \ | grep -v "^MaxResidentMemory=" \ | grep -v "^AverageTotalMemory=" \ > "$diagfile_tmp" cat "$diagfile_tmp" "$diagfile_acct" \ | grep -v '^sgeexitcode=' \ | do_as_uid "$uid" "cat > '$diagfile'" # Check for exceeded resources limits overlimit= if [ -s "$gramifile" ]; then check_exceeded_resources_grami "$gramifile" "$errorsfile" "$used_cputime" "$used_walltime" "$used_memory" fi # grami file save_commentfile "$uid" "$commentfile" "$errorsfile" handle_failedcode "$failedcode" "$donefile" "$exitcode" "$sgeexitcode" "$overlimit" # wake up GM $GMKICK "$localfile" >> "$errorsfile" rm -f "$countfile" rm -f "$diagfile_tmp" "$diagfile_acct" # we're done, go to next job id return 0 fi # accounting info ok rm -f "$diagfile_tmp" "$diagfile_acct" return 1 } # Handle missing accounting info handle_missing_accounting () { countfile=$1 uid=$2 commentfile=$3 errorsfile=$4 donefile=$5 exitcode=$6 localfile=$7 GMKICK=$8 SGE_BIN_PATH=$9 id=$10 if [ -n "$noaccounting" ]; then # Accounting file is not accessible on this host. echo "scan-sge-job: WARNING: SGE's accounting file is not accessible on the Grid frontend node" >> "$errorsfile" echo "scan-sge-job: WARNING: Resource usage reported for this job may be inaccurate or incomplete" >> "$errorsfile" if [ -z "$exitcode" ]; then echo "-1 Job failed with unknown exit code" > "$donefile" elif [ "$exitcode" = "0" ]; then echo "0" > "$donefile" else echo "$exitcode Job failed with exit code $exitcode" > "$donefile" fi $GMKICK "$localfile" >> "$errorsfile" return fi # There is a certain lag between the end of the job # and the time when accouting information becomes available. # We do 5 retries, keeping the count in $countfile counter=0 if [ -f "$countfile" ] ; then counter=`cat "$countfile"` counter=$(( $counter + 1 )) fi if [ "$counter" -gt 5 ]; then # Cannot wait more for accounting info. echo "scan-sge-job: WARNING: No SGE accounting record found for this job" >> "$errorsfile" echo "scan-sge-job: WARNING: Resource usage reported for this job may be inaccurate or incomplete" >> "$errorsfile" save_commentfile "$uid" "$commentfile" "$errorsfile" if [ -z "$exitcode" ]; then olog "No SGE accounting record found for job $id. No exitcode in diag file" echo "-1 Job failed with unknown exit code" > "$donefile" elif [ "$exitcode" = "0" ]; then olog "No SGE accounting record found for job $id. exitcode=$exitcode in diag file" echo "0" > "$donefile" else olog "No SGE accounting record found for job $id. exitcode=$exitcode in diag file" echo "$exitcode Job failed with exit code $exitcode" > "$donefile" fi rm -f "$countfile" # wake up GM $GMKICK "$localfile" >> "$errorsfile" else # test again for job existence, only count if not known ${SGE_BIN_PATH}/qstat -j $id > /dev/null 2>&1 if [ $? -ne 0 ]; then echo "$counter" > "$countfile" else olog "SGE job $id disappeared and then reappeared!" rm -f "$countfile" fi fi } # GD: no attempt to look for SGE Manager logfiles, restrict to job logs. if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Get all jobs pids=`${SGE_BIN_PATH}/qstat -u '*' 2>/dev/null | sed -n 's/^ *\([0-9][0-9]*\) .*/\1/p'` if [ $? != 0 ]; then olog "Failed running ${SGE_BIN_PATH}/qstat" sleep 60 exit 1 fi if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-sge-job, qstat -u '*': $t" >> $perflogfile fi # Go through directories for ctr_dir in $control_dirs ; do # Obtain ids of pending/running jobs stored in job.*.local if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi rjobs=`find "$ctr_dir/processing" -name 'job.*.status' 2>/dev/null \ | sed 's/processing\/job\.\([^\.]*\)\.status$/job.\1.local/'` if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-sge-job, control dir traversal: $t" >> $perflogfile fi if [ -z "$rjobs" ] ; then continue ; fi ids=`echo $rjobs | xargs grep -h '^localid=' 2>/dev/null | sed 's/^localid=\([^ ]*\)/\1/'` if [ -z "$ids" ] ; then continue ; fi # compare them to running jobs and find missing bids= for id in $ids ; do found=`echo "$pids" | grep "^$id"` if [ -z "$found" ] ; then bids="$bids $id" fi done # go through missing ids for id in $bids ; do # find grid job corresponding to current local id jobfile=`find "$ctr_dir/processing" -name 'job.*.status' 2>/dev/null \ | sed 's/processing\/job\.\([^\.]*\)\.status$/job.\1.local/' \ | xargs grep -l "localid=$id\$" 2>/dev/null` if [ -z "$jobfile" ] ; then continue ; fi # unless running as superuser, skip jobs not belonging to the current user if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi # find who runs this job uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } # extract grid id gridid=`basename "$jobfile" '.local' | sed 's/^job\.//'` donefile="${ctr_dir}/job.${gridid}.lrms_done" countfile="${ctr_dir}/job.${gridid}.lrms_job" failedfile="${ctr_dir}/job.${gridid}.failed" errorsfile="${ctr_dir}/job.${gridid}.errors" localfile="${ctr_dir}/job.${gridid}.local" xmlfile="${ctr_dir}/job.${gridid}.xml" if [ -f "$donefile" ] ; then continue ; fi statusfile="${ctr_dir}/processing/job.${gridid}.status" if [ ! -f "$statusfile" ] ; then continue ; fi status=`cat "$statusfile"` if [ "$status" != "INLRMS" ] && [ "$status" != "CANCELING" ] ; then continue ; fi # get session directory of this job session=`grep -h '^sessiondir=' "$jobfile" | sed 's/^sessiondir=\(.*\)/\1/'` commentfile="${session}.comment" commentfile2="${session}/.comment" do_as_uid "$uid" "cat '$commentfile2' >> '$commentfile' 2> /dev/null; rm -f '$commentfile2'"; if [ -d "$session" ] ; then diagfile="${session}.diag" diagfile2="${session}/.diag" do_as_uid "$uid" "cat '$diagfile2' >> '$diagfile' 2> /dev/null; rm -f '$diagfile2'"; # try to obtain the exit code # $diagfile is owned by the user running the job. Switch user to access it. exitcode=`do_as_uid "$uid" "sed -n 's/^exitcode=//p' '$diagfile'" | tail -n 1` diagfile_tmp=`mktemp "$TMPDIR/diag_tmp.XXXXXX"` || { sleep 60; exit 1; } diagfile_acct=`mktemp "$TMPDIR/diag_acct.XXXXXX"` || { sleep 60; exit 1; } noaccounting= # qacct can take quite long. Here is a workaround. # Find the accounting file, and copy the last 50000 # records to a temp file. acctfile=$SGE_ROOT/$SGE_CELL/common/accounting if [ -f "$acctfile" ]; then briefacct=`mktemp "$TMPDIR/accounting.XXXXXX"` || { sleep 60; exit 1; } tail -n 50000 "$acctfile" > "$briefacct" if [ $? = 0 ]; then extraargs="-f $briefacct"; fi else # Accounting file is not accessible on this host. noaccounting=1 fi eqwmessage=`grep "SGE job state was Eqw" $xmlfile | sed -r 's/<[/]?OtherMessages>//g'` # get accounting info. write diag file write_diagfile_acct "$diagfile_acct" "${SGE_BIN_PATH}/qacct" "$id" "$extraargs" if [ "x$briefacct" != "x" ]; then rm -f "$briefacct"; fi # If the last qacct record is about migration, # we should wait for the next qacct record to appear # Delete file, like there was no accounting present at all! if grep -q "^failed=24 *: migrating" "$diagfile_acct" \ || grep -q "^failed=25 *: rescheduling" "$diagfile_acct"; then rm -f "$diagfile_acct" olog "SGE job $id: the last record in qacct is about migration. Waiting for next record to appear" fi # Add accounting info to $diagfile add_accounting_to_diag "$diagfile" "$diagfile_acct" "$diagfile_tmp" "$errorsfile" "$uid" "$commentfile" "$ctr_dir" if [ $? = 0 ]; then continue fi fi # session directory exists # This section is only reached when accounting info is not present handle_missing_accounting "$countfile" "$uid" "$commentfile" "$errorsfile" "$donefile" "$exitcode" "$localfile" "$GMKICK" "$SGE_BIN_PATH" "$id" done # loop over bids # Detect the unlikely situation when a job reappears in the qstat listing # after being absent in the previous run of the scan-*-job (which updated # the counter file) for countfile in `find $ctr_dir -name 'job.*.lrms_job'`; do localfile=${countfile%.lrms_job}.local pid=`sed -n 's/^localid=\([^ ]*\)/\1/p' "$localfile" 2>/dev/null` if [ -z "$pid" ]; then continue; fi if echo "$pids" | grep "^$pid\$" >/dev/null; then olog "SGE job $id disappeared and then reappeared!" rm -f "$countfile" fi done done # loop over control_dirs sleep 60 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/slurm0000644000000000000000000000013213214316027022663 xustar000000000000000030 mtime=1513200663.467789898 30 atime=1513200668.718854121 30 ctime=1513200663.467789898 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/0000755000175000002070000000000013214316027023006 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612047045306025001 xustar000000000000000027 mtime=1352420038.089673 29 atime=1513200603.39305516 30 ctime=1513200663.463789849 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/Makefile.am0000644000175000002070000000015612047045306025046 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-SLURM-env.sh pkgdata_SCRIPTS = submit-SLURM-job cancel-SLURM-job scan-SLURM-job nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733025010 xustar000000000000000030 mtime=1513200603.429055601 30 atime=1513200650.665633323 30 ctime=1513200663.464789862 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/Makefile.in0000644000175000002070000005007013214315733025060 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/slurm DIST_COMMON = $(dist_pkgdata_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/cancel-SLURM-job.in \ $(srcdir)/scan-SLURM-job.in $(srcdir)/submit-SLURM-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-SLURM-job scan-SLURM-job cancel-SLURM-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(dist_pkgdata_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = configure-SLURM-env.sh pkgdata_SCRIPTS = submit-SLURM-job cancel-SLURM-job scan-SLURM-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-SLURM-job: $(top_builddir)/config.status $(srcdir)/submit-SLURM-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-SLURM-job: $(top_builddir)/config.status $(srcdir)/scan-SLURM-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-SLURM-job: $(top_builddir)/config.status $(srcdir)/cancel-SLURM-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_pkgdataDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/PaxHeaders.7502/scan-SLURM-job.in0000644000000000000000000000012712723506061025673 xustar000000000000000027 mtime=1464765489.781788 30 atime=1513200650.696633702 30 ctime=1513200663.466789886 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/scan-SLURM-job.in0000755000175000002070000003235612723506061025751 0ustar00mockbuildmock00000000000000#!/bin/bash # # Periodically check state of grid jobs in SLURM, and put mark files # for finished jobs. # # usage: scan_slurm_job control_dir ... # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? libexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@/" pkgdatadir="$basedir" . "${pkgdatadir}/configure-SLURM-env.sh" || exit $? . "${pkgdatadir}/scan_common.sh" || exit $? # Prevent multiple instances of scan-slurm-job to run concurrently lockfile="${TMPDIR:-@tmp_dir@}/scan-slurm-job.lock" #Check if lockfile exist, if not, create it. (set -C; : > "$lockfile") 2> /dev/null if [ "$?" != "0" ]; then if ps -p $(< "$lockfile") 2>/dev/null;then echo "lockfile exists and PID $(< $lockfile) is running" exit 1 fi echo "old lockfile found, was scan-slurm-job killed?" # sleep, and if no other have removed and recreated the lockfile we remove it. # there are still races possible, but this will have to do sleep $((${RANDOM}%30+10)) if ps -p $(< $lockfile) &>/dev/null;then echo "lockfile exists and $(< $lockfile) is running" exit 1 else echo "still not running, removing lockfile" rm $lockfile exit 1 fi fi echo "$$" > "$lockfile" #If killed, remove lockfile trap 'rm $lockfile' EXIT KILL TERM #Default sleep-time is 30 seconds sleep ${CONFIG_slurm_wakeupperiod:-30} # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi ### use sacct? unset use_sacct if [ ! -z "${CONFIG_slurm_use_sacct}" ]; then if [ "${CONFIG_slurm_use_sacct}" = "yes" ]; then use_sacct="true" fi fi ### If GM sees the session dirs... copied from scan-pbs-jobs RUNTIME_NODE_SEES_FRONTEND=$CONFIG_shared_filesystem #default is NFS if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then RUNTIME_NODE_SEES_FRONTEND=yes fi # locally empty means no if [ "${RUNTIME_NODE_SEES_FRONTEND}" = 'no' ] ; then RUNTIME_NODE_SEES_FRONTEND= fi my_id=`id -u` #Validate control directories supplied on command-line if [ -z "$1" ] ; then echo "no control_dir specified" 1>&2; exit 1 fi for ctr_dir in "$@"; do if [ ! -d "$ctr_dir" ]; then echo "called with erronous control dir: $ctr_dir" exit 1 fi done if [ ! -z "$perflogdir" ]; then #start time stamp start_ts=`date +%s.%N` fi # List of SLURM jobids for grid-jobs with state INLRMS declare -a localids # Array with basenames of grid-job files in ctrl_dir, indexed by localid # example /some/path/job.XXXXX /some/other/parh/job.YYYYY declare -a basenames # Array with states of the jobs in SLURM, indexed by localid declare -a jobstates # Array to store localids of jobs that are determined to have finished, which are sent to gm-kick declare -a kicklist # Array with jobid blocks declare -a lidblocks # Find list of grid jobs with status INLRMS, store localid and # basename for those jobs for ctr_dir in "$@"; do for basename in $(find "$ctr_dir/processing" -name 'job.*.status' -print0 \ | xargs -0 egrep -l "INLRMS|CANCELING" \ | sed 's/processing\/job\.\([^\.]*\)\.status$/job.\1/') do localid=$(grep ^localid= "${basename}.local" | cut -d= -f2) verify_jobid "$localid" || continue localids[${#localids[@]}]="$localid" basenames[$localid]="$basename" done done # No need to continue further if no jobs have status INLRMS if [ ${#localids[@]} -eq 0 ]; then exit 0 fi # Distribute localids into block so that we don't exceed max command line length for jids in `echo "${localids[@]}" | xargs -n 4000 | tr ' ' ,`; do lidblocks[${#lidblocks[@]}]=$jids done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` # t=`perl -e "printf '%.2f',$stop_ts-$start_ts;"` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-slurm-job, ControldirTraversal: $t" >> $perflogfile fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Get JobStates from SLURM jobstate_squeue=$(echo "${lidblocks[@]}" | xargs -n 1 $squeue -a -h -o "%i:%T" -t all -j )\ || { echo "squeue failed" 1>&2; exit 1; } for record in $jobstate_squeue; do localid=$(echo "$record"|cut -d: -f1) state=$(echo "$record"|cut -d: -f2) jobstates[$localid]=$state; done unset jobstate_squeue if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-slurm-job, squeue -a -h -o %i:%T -t all -j: $t" >> $perflogfile fi handle_commentfile () { localid=$1 sessiondir=`grep -h '^sessiondir=' $jobfile | sed 's/^sessiondir=\(.*\)/\1/'` if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } save_commentfile "$uid" "${sessiondir}.comment" "${basenames[$localid]}.errors" } # Call scontrol and find the exitcode of a job. Write this, together with a # message to the lrms_done file. This function is used in the loop below. function handle_exitcode { localid="$1" tmpexitcode="$2" reason="$3" if [ "$use_sacct" ]; then jobinfostring=$("$sacct" -j $localid.batch -o ExitCode -P | tail -n 1) exitcode1=$(echo $jobinfostring|awk -F':' '{print $1}') exitcode2=$(echo $jobinfostring|awk -F':' '{print $2}') else jobinfostring=$("$scontrol" -o show job $localid) exitcode1=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\1/p') exitcode2=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\2/p') fi if [ -z "$exitcode1" ] && [ -z "$exitcode2" ] ; then exitcode=$tmpexitcode elif [ $exitcode2 -ne 0 ]; then exitcode=$(( $exitcode2 + 256 )) elif [ $exitcode1 -ne 0 ]; then exitcode=$exitcode1 else exitcode=0 fi echo "$exitcode $reason" > "${basenames[$localid]}.lrms_done" kicklist=(${kicklist[@]} $localid) } # A special version of the function above, needed to force # exit code to non-zero if the job was cancelled, since # CANCELLED jobs in SLURM can have 0 exit code. # This is a temporary workaround, should later be replaced by # proper fix that determines the reason of failure function handle_exitcode_cancelled { localid="$1" tmpexitcode="$2" reason="$3" if [ "$use_sacct" ]; then jobinfostring=$("$sacct" -j $localid.batch -o ExitCode -P | tail -n 1) exitcode1=$(echo $jobinfostring|awk -F':' '{print $1}') exitcode2=$(echo $jobinfostring|awk -F':' '{print $2}') else jobinfostring=$("$scontrol" -o show job $localid) exitcode1=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\1/p') exitcode2=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\2/p') fi if [ -z "$exitcode1" ] && [ -z "$exitcode2" ] ; then exitcode=$tmpexitcode elif [ $exitcode2 -ne 0 ]; then exitcode=$(( $exitcode2 + 256 )) elif [ $exitcode1 -ne 0 ]; then exitcode=$exitcode1 else exitcode=0 fi if [ $exitcode -eq 0 ]; then exitcode=15 reason="Job was cancelled by SLURM" fi echo "$exitcode $reason" > "${basenames[$localid]}.lrms_done" kicklist=(${kicklist[@]} $localid) } #This function filters out WallTime from the .diag-file if present and #replaces it with output from the LRMS, it also adds StartTime and #EndTime for accounting. function handle_diag_file { localid="$1" ctr_diag="$2" job_read_diag if [ "$use_sacct" ]; then jobinfostring=$("$sacct" -j $localid.batch -o NCPUS,NNODES,CPUTimeRAW,Start,End,ExitCode,State -P | tail -n 1) cpus=$(echo "$jobinfostring" | awk -F'|' '{print $1}') starttime=$(echo "$jobinfostring"|awk -F'|' '{print $4}'| sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g') endtime=$(echo "$jobinfostring"|awk -F'|' '{print $5}'| sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g') cputime=$(echo "$jobinfostring" | awk -F'|' '{print $3}') else jobinfostring=$("$scontrol" -o show job $localid) #Slurm can report StartTime and EndTime in at least these two formats: #2010-02-15T15:30:29 #02/15-15:25:15 #For our code to be able to manage both, the first needs to keep its hyphens, #the second needs them removed starttime=$(echo "$jobinfostring"|sed -n 's/.*StartTime=\([^ ]*\) .*/\1/p' | \ sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g') endtime=$(echo "$jobinfostring"|sed -n 's/.*EndTime=\([^ ]*\) .*/\1/p' | \ sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g') cpus=$(echo "$jobinfostring"|sed -n 's/.*NumCPUs=\([^ ]*\) .*/\1/p') fi date_to_utc_seconds "$starttime" starttime_seconds="$return_date_seconds" seconds_to_mds_date "$return_date_seconds" LRMSStartTime=$return_mds_date date_to_utc_seconds "$endtime" endtime_seconds="$return_date_seconds" seconds_to_mds_date "$return_date_seconds" LRMSEndTime=$return_mds_date #TODO handle cputime, exitcode etc. walltime=$(( $endtime_seconds - $starttime_seconds)) #cputime=$(( $walltime * $count)) # Values to write to diag. These will override values already written. [ -n "$walltime" ] && WallTime=$walltime [ -n "$cpus" ] && Processors=$cpus [ -n "$cputime" ] && UserTime=$cputime #[ -n "$cputime" ] && KernelTime=0 job_write_diag } if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi run=0 done=0 zombie=0 failed=0 # Look at the list of jobstates and determine which jobs that have # finished. Write job.XXXX.lrms_done according to this for localid in ${localids[@]}; do # state=${jobstates[$localid]} # case $state in # Initialize jobfile variable since it's used below jobfile="${basenames[$localid]}.local" case "${jobstates[$localid]}" in "") # Job is missing (no state) from slurm but INLRMS. zombie=$(($zombie + 1)) exitcode='' # get session directory of this job sessiondir=`grep -h '^sessiondir=' $jobfile | sed 's/^sessiondir=\(.*\)/\1/'` diagfile="${sessiondir}.diag" commentfile="${sessiondir}.comment" if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } if [ ! -z "$sessiondir" ] ; then # have chance to obtain exit code if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then # In case of non-NFS setup it may take some time till # diagnostics file is delivered. Wait for it max 2 minutes. diag_tries=20 while [ "$diag_tries" -gt 0 ] ; do if [ -z "$uid" ] ; then exitcode=`grep '^exitcode=' "$diagfile" 2>/dev/null | sed 's/^exitcode=//'` else exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') fi if [ ! -z "$exitcode" ] ; then break ; fi sleep 10 diag_tries=$(( $diag_tries - 1 )) done else if [ -z "$uid" ] ; then exitcode=`grep '^exitcode=' "$diagfile" 2>/dev/null | sed 's/^exitcode=//'` else exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') fi fi fi jobstatus="$exitcode Job missing from SLURM, exitcode recovered from session directory" if [ -z $exitcode ];then exitcode="-1" jobstatus="$exitcode Job missing from SLURM" fi save_commentfile "$uid" "$commentfile" "${basenames[$localid]}.errors" echo "$jobstatus" > "${basenames[$localid]}.lrms_done" kicklist=(${kicklist[@]} $localid) ;; PENDING|RUNNING|SUSPENDE|COMPLETING) #Job is running, nothing to do. run=$(($run + 1)) ;; CANCELLED) failed=$(($failed + 1)) handle_commentfile $localid echo "-1 Job was cancelled" > "${basenames[$localid]}.lrms_done" kicklist=(${kicklist[@]} $localid) handle_exitcode_cancelled $localid "-1" "Job was cancelled" handle_diag_file "$localid" "${basenames[$localid]}.diag" ;; COMPLETED) done=$(($done + 1)) handle_commentfile $localid handle_exitcode $localid "0" "" handle_diag_file "$localid" "${basenames[$localid]}.diag" ;; FAILED) failed=$(($failed + 1)) handle_commentfile $localid handle_exitcode $localid "-1" "Job failed" handle_diag_file "$localid" "${basenames[$localid]}.diag" ;; TIMEOUT) failed=$(($failed + 1)) handle_commentfile $localid handle_exitcode $localid "-1" "Job timeout" handle_diag_file "$localid" "${basenames[$localid]}.diag" ;; NODE_FAIL) failed=$(($failed + 1)) handle_commentfile $localid handle_exitcode_cancelled $localid "-1" "Node fail" handle_diag_file "$localid" "${basenames[$localid]}.diag" ;; esac done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-slurm-job, JobHandling, R= $run, D= $done, Z= $zombie, F= $failed: $t" >> $perflogfile fi # Kick the GM if [ -n "${kicklist[*]}" ];then "${libexecdir}/gm-kick" \ $(for localid in "${kicklist[@]}";do echo "${basenames[$localid]}.local" done | xargs) fi exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/PaxHeaders.7502/submit-SLURM-job.in0000644000000000000000000000012712733561146026260 xustar000000000000000027 mtime=1466884710.220101 30 atime=1513200650.680633506 30 ctime=1513200663.467789898 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/submit-SLURM-job.in0000755000175000002070000004215512733561146026334 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Based on globus submission script for pbs # # Submits job to SLURM. # Input: path to grami file (same as Globus). # # The temporary job script is created for the submission and then removed # at the end of this script. echo "----- starting submit_slurm_job -----" 1>&2 joboption_lrms=SLURM # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/configure-SLURM-env.sh || exit $? . ${pkgdatadir}/submit_common.sh || exit $? perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ############################################################## # Parse grami file, read arc config ############################################################## init $1 read_arc_conf failures_file="$joboption_controldir/job.$joboption_gridid.failed" if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then if [ -z "${RUNTIME_LOCAL_SCRATCH_DIR}" ] ; then echo "Need to know at which directory to run job: RUNTIME_LOCAL_SCRATCH_DIR must be set if RUNTIME_NODE_SEES_FRONTEND is empty" 1>&2 echo "Submission: Configuration error.">>"$failures_file" exit 1 fi fi ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript is_cluster=true ############################################################## # Start job script ############################################################## echo "#!/bin/bash -l" > $LRMS_JOB_SCRIPT echo "# SLURM batch job script built by grid-manager" >> $LRMS_JOB_SCRIPT # rerun is handled by GM, do not let SLURM requeue jobs itself. echo "#SBATCH --no-requeue" >> $LRMS_JOB_SCRIPT # write SLURM output to 'comment' file echo "#SBATCH -e ${joboption_directory}.comment">> $LRMS_JOB_SCRIPT echo "#SBATCH -o ${joboption_directory}.comment">> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT # choose queue if [ ! -z "${joboption_queue}" ] ; then echo "#SBATCH -p $joboption_queue" >> $LRMS_JOB_SCRIPT fi ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #Slurm priority is -10000 to 10000. Lower is better. #Default is 0, and only superusers can assign priorities #less than 0. #We set the priority as 100 - arc priority. #This will have the desired effect for all grid jobs #Local jobs will unfortunatly have a default priority equal #to ARC priority 100, but there is no way around that. priority=$((100-joboption_priority)) echo "#SBATCH --nice=${priority}" >> $LRMS_JOB_SCRIPT else #If priority is not set we should set it to #50 to match the default in the documentation priority=50 echo "#SBATCH --nice=${priority}" >> $LRMS_JOB_SCRIPT fi # project name for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "#SBATCH -U $joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then #TODO is this necessary? do parts of the infosys need these limitations? jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#SBATCH -J '$jobname'" >> $LRMS_JOB_SCRIPT else jobname="gridjob" echo "#SBATCH -J '$jobname'" >> $LRMS_JOB_SCRIPT fi echo "SLURM jobname: $jobname" 1>&2 # Set up the user's environment on the compute node where the script # is executed. echo "#SBATCH --get-user-env=10L" >> $LRMS_JOB_SCRIPT ############################################################## # (non-)parallel jobs ############################################################## set_count nodes_string="#SBATCH -n ${joboption_count}" echo "$nodes_string" >> $LRMS_JOB_SCRIPT if [ ! -z $joboption_countpernode ] && [ $joboption_countpernode -gt 0 ] ; then echo "#SBATCH --ntasks-per-node $joboption_countpernode" >> $LRMS_JOB_SCRIPT fi nodes_string="#SBATCH " i=0 eval "var_is_set=\${joboption_nodeproperty_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_nodeproperty_$i}" nodes_string="${nodes_string} ${var_value}" i=$(( $i + 1 )) eval "var_is_set=\${joboption_nodeproperty_$i+yes}" done echo "$nodes_string" >> $LRMS_JOB_SCRIPT if [ "$joboption_exclusivenode" = "true" ]; then echo "#SBATCH --exclusive" >> $LRMS_JOB_SCRIPT fi ############################################################## # Execution times (minutes) ############################################################## if [ ! -z "$joboption_cputime" ] ; then if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 cpu time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi # this is actually walltime deduced from cputime ! maxcputime=$(( $joboption_cputime / $joboption_count )) cputime_min=$(( $maxcputime / 60 )) cputime_sec=$(( $maxcputime - $cputime_min * 60 )) echo "#SBATCH -t ${cputime_min}:${cputime_sec}" >> $LRMS_JOB_SCRIPT fi if [ -z "$joboption_walltime" ] ; then if [ ! -z "$joboption_cputime" ] ; then # Set walltime for backward compatibility or incomplete requests joboption_walltime=$(( $maxcputime * $walltime_ratio )) fi fi if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi maxwalltime="$joboption_walltime" walltime_min=$(( $maxwalltime / 60 )) walltime_sec=$(( $maxwalltime - $walltime_min * 60 )) echo "#SBATCH -t ${walltime_min}:${walltime_sec}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem if [ ! -z "$joboption_memory" ] ; then echo "#SBATCH --mem-per-cpu=${joboption_memory}" >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error.">>"$failures_file" exit 1 fi ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then setup_local_transfer fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Download input files #################################################### download_input_files ############################################################## # Skip execution if something already failed ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 ############################################################## # Diagnostics ############################################################## echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then cat "$SLURM_NODEFILE" | sed 's/\(.*\)/nodename=\1/' >> "$RUNTIME_JOB_DIAG" NODENAME_WRITTEN="1" else SLURM_NODEFILE= fi fi EOSCR ############################################################## # Check intermediate result again ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## configure_runtime ##################################################### # Upload output files #################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then upload_output_files else # There is no sense to keep trash till GM runs uploader echo 'if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] ; then' >> $LRMS_JOB_SCRIPT # Delete all files except listed in job.#.output echo ' find ./ -type l -exec rm -f "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' find ./ -type f -exec chmod u+w "{}" ";"' >> $LRMS_JOB_SCRIPT if [ -f "$joboption_controldir/job.$joboption_gridid.output" ] ; then cat "$joboption_controldir/job.$joboption_gridid.output" | \ # remove leading backslashes, if any sed 's/^\/*//' | \ # backslashes and spaces are escaped with a backslash in job.*.output. The # shell built-in read undoes this escaping. while read name rest; do # make it safe for shell by replacing single quotes with '\'' name=`printf "%s" "$name"|sed "s/'/'\\\\\\''/g"`; # protect from deleting output files including those in the dynamic list if [ "${name#@}" != "$name" ]; then # Does $name start with a @ ? dynlist=${name#@} echo " dynlist='$dynlist'" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' chmod -R u-w "./$dynlist" 2>/dev/null cat "./$dynlist" | while read name rest; do chmod -R u-w "./$name" 2>/dev/null done EOSCR else echo " chmod -R u-w \"\$RUNTIME_JOB_DIR\"/'$name' 2>/dev/null" >> $LRMS_JOB_SCRIPT fi done fi echo ' find ./ -type f -perm /200 -exec rm -f "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' find ./ -type f -exec chmod u+w "{}" ";"' >> $LRMS_JOB_SCRIPT echo 'fi' >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id # !!!!!!!!!!!!!!!!!!! would be better to know the names of files !!!!!!!!!!! ############################################################## move_files_to_frontend ####################################### # Submit the job ####################################### echo "SLURM job script built" 1>&2 # Execute sbatch command cd "$joboption_directory" echo "SLURM script follows:" 1>&2 echo "-------------------------------------------------------------------" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "-------------------------------------------------------------------" 1>&2 echo "" 1>&2 SLURM_RESULT=1 SLURM_TRIES=0 #job creation finished if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-slurm-job, JobScriptCreation: $t" >> $perflogfilesub fi while [ "$SLURM_TRIES" -lt '10' ] ; do # Unset all environment variables before calling sbatch. Otherwise # SLURM will forward them to the job and leak information about # the grid-manager. # Only unset lines with assignments. # Risks unsetting variables in sub assignments, but this is likely harmless. # TODO: Maybe we only should unset $ARC_*, $CONFIG_*, $GLOBUS_* etc? if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi (for i in $(env|grep '^[A-Za-z][A-Za-z0-9]*='|grep -v "LRMS_JOB_SCRIPT"|cut -d= -f1);do unset $i;done; \ ${sbatch} $LRMS_JOB_SCRIPT) 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR SLURM_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-slurm-job, JobSubmission: $t" >> $perflogfilesub fi if [ "$SLURM_RESULT" -eq '0' ] ; then break ; fi if [ "$SLURM_RESULT" -eq '198' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 SLURM_TRIES=0 continue fi grep 'maximum number of jobs' "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" if [ $? -eq '0' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 SLURM_TRIES=0 continue fi # A rare SLURM error, but may cause chaos in the information/accounting system grep 'unable to accept job' "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" if [ $? -eq '0' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 SLURM_TRIES=0 continue fi SLURM_TRIES=$(( $SLURM_TRIES + 1 )) sleep 2 done if [ $SLURM_RESULT -eq '0' ] ; then #TODO test what happens when the jobqueue is full or when the slurm ctld is not responding # SLURM 1.x and 2.2.x outputs the jobid into STDERR and STDOUT respectively. Concat them, # and let sed sort it out. From the exit code we know that the job was submitted, so this # is safe. Ulf Tigerstedt 1.5.2011 # This is unfortunately not safe. Cray's SLURM sbatch returns 0, when it fails to submit a job. 22.1.2015 job_id=`cat $LRMS_JOB_OUT $LRMS_JOB_ERR |sed -e 's/^\(sbatch: \)\{0,1\}Submitted batch job \([0-9]*\)$/\2/'` if expr match "${job_id}" '[0-9][0-9]*' >/dev/null; then echo "joboption_jobid=$job_id" >> $arg_file echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_slurm_job -----" 1>&2 echo "" 1>&2 exit 0 else echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the slurm jobid for the job!" 1>&2 echo "Instead got: ${job_id}" 1>&2 echo "Submission: Local submission client behaved unexpectedly.">>"$failures_file" fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from sbatch: $SLURM_RESULT !" 1>&2 echo "Submission: Local submission client failed.">>"$failures_file" fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "----- exiting submit_slurm_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/PaxHeaders.7502/cancel-SLURM-job.in0000644000000000000000000000012712100556217026171 xustar000000000000000027 mtime=1359142031.233238 30 atime=1513200650.712633898 30 ctime=1513200663.465789874 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/cancel-SLURM-job.in0000755000175000002070000000123412100556217026236 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in SLURM. # Input: grami file (same as Globus) echo "----- starting cancel_slurm_job -----" 1>&2 joboption_lrms=SLURM # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? verify_jobid $joboption_jobid || exit 1 echo "executing scancel with job id $joboption_jobid" 1>&2 "${scancel}" "${joboption_jobid}" if [ "$?" != "0" ];then echo "scancel failed" 1>&2 fi echo "----- exiting cancel_slurm_job -----" 1>&2 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/PaxHeaders.7502/configure-SLURM-env.sh0000644000000000000000000000012413044140007026736 xustar000000000000000027 mtime=1485881351.109312 27 atime=1513200576.495726 30 ctime=1513200663.462789837 nordugrid-arc-5.4.2/src/services/a-rex/lrms/slurm/configure-SLURM-env.sh0000755000175000002070000000363113044140007027011 0ustar00mockbuildmock00000000000000# # set environment variables: # ############################################################## # Reading configuration from $ARC_CONFIG ############################################################## if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser_compat.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" config_import_section "cluster" if [ ! -z "$joboption_queue" ]; then config_import_section "queue/$joboption_queue" fi # performance logging: if perflogdir or perflogfile is set, logging is turned on. So only set them when enable_perflog_reporting is ON unset perflogdir unset perflogfile enable_perflog=${CONFIG_enable_perflog_reporting:-no} if [ "$CONFIG_enable_perflog_reporting" == "expert-debug-on" ]; then perflogdir=${CONFIG_perflogdir:-/var/log/arc/perfdata} perflogfile="${perflogdir}/backends.perflog" fi # Path to slurm commands SLURM_BIN_PATH=${CONFIG_slurm_bin_path:-/usr/bin} if [ ! -d ${SLURM_BIN_PATH} ] ; then echo "Could not set SLURM_BIN_PATH." 1>&2 exit 1 fi # Paths to SLURM commands squeue="$SLURM_BIN_PATH/squeue" scontrol="$SLURM_BIN_PATH/scontrol" sinfo="$SLURM_BIN_PATH/sinfo" scancel="$SLURM_BIN_PATH/scancel" sbatch="$SLURM_BIN_PATH/sbatch" sacct="$SLURM_BIN_PATH/sacct" # Verifies that a SLURM jobid is set, and is an integer verify_jobid () { joboption_jobid="$1" # Verify that the jobid is somewhat sane. if [ -z ${joboption_jobid} ];then echo "error: joboption_jobid is not set" 1>&2 return 1 fi # jobid in slurm is always an integer, so anything else is an error. if [ "x" != "x$(echo ${joboption_jobid} | sed s/[0-9]//g )" ];then echo "error: non-numeric characters in joboption_jobid: ${joboption_jobid}" 1>&2 return 1 fi return 0 } nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/fork0000644000000000000000000000013213214316027022462 xustar000000000000000030 mtime=1513200663.355788528 30 atime=1513200668.718854121 30 ctime=1513200663.355788528 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/0000755000175000002070000000000013214316027022605 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306024601 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200603.129051932 30 ctime=1513200663.353788504 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/Makefile.am0000644000175000002070000000015212047045306024641 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-fork-env.sh pkgdata_SCRIPTS = scan-fork-job submit-fork-job cancel-fork-job nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/PaxHeaders.7502/submit-fork-job.in0000644000000000000000000000012613165641321026107 xustar000000000000000027 mtime=1507279569.032075 29 atime=1513200650.36862969 30 ctime=1513200663.355788528 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/submit-fork-job.in0000644000175000002070000001573713165641321026167 0ustar00mockbuildmock00000000000000#!/bin/bash # set -x # # Input: path to grami file (same as Globus). # This script creates a temporary job script and runs it. echo "----- starting submit_fork_job -----" 1>&2 joboption_lrms=fork # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkglibexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@" pkgdatadir="$basedir" . ${pkgdatadir}/submit_common.sh || exit $? ############################################################## # Parse grami file, read arc config ############################################################## init $1 read_arc_conf RUNTIME_NODE_SEES_FRONTEND=yes ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript chmod u+x ${LRMS_JOB_SCRIPT} ############################################################## # Start job script ############################################################## echo '#!/bin/sh' > $LRMS_JOB_SCRIPT echo "# Fork job script built by grid-manager" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ############################################################## # non-parallel jobs ############################################################## set_count ############################################################## # Execution times (obtained in seconds) ############################################################## if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi maxwalltime="$joboption_walltime" elif [ ! -z "$joboption_cputime" ] ; then if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 cpu time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi maxwalltime="$joboption_cputime" fi if [ ! -z "$maxwalltime" ] ; then echo "ulimit -t $maxwalltime" >> $LRMS_JOB_SCRIPT fi sourcewithargs_jobscript ############################################################## # Override umask ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 exit 1 fi ####################################################################### # copy information useful for transfering files to/from node directly ####################################################################### if [ "$joboption_localtransfer" = 'yes' ] ; then setup_local_transfer fi setup_runtime_env ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Download input files ##################################################### download_input_files echo "" >> $LRMS_JOB_SCRIPT echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration ############################################################## RTE_stage1 echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT ##################################################### # Go to working dir and start job ##################################################### # Set the nice value (20 to -20) based on priority (1 to 100) # Note negative values are normally only settable by superusers priority=$joboption_priority if [ ! -z $priority ]; then nicevalue=$[ 20 - ($priority * 2 / 5) ] joboption_args="nice -n $nicevalue $joboption_args" fi cd_and_run echo "fi" >> $LRMS_JOB_SCRIPT ##################################################### # Upload output files ##################################################### upload_output_files echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## configure_runtime ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ####################################### # watcher process ####################################### JOB_ID= cleanup() { [ -n "$JOB_ID" ] && kill -9 $JOB_ID 2>/dev/null # remove temp files rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" } watcher() { "$1" > "$2" 2>&1 & rc=$? JOB_ID=$! export JOB_ID trap cleanup 0 1 2 3 4 5 6 7 8 10 12 15 if [ $rc -ne 0 ]; then echo "FAIL" > "$3" exit 1 else echo "OK" > "$3" wait $JOB_ID fi } ####################################### # Submit the job ####################################### echo "job script ${LRMS_JOB_SCRIPT} built" 1>&2 # simple queuing system: make hard reference to the queue cd "$joboption_directory" 1>&2 || { echo "Could not cd to $joboption_directory, aborting" && exit 1; } # Bash (but not dash) needs the parantheses, otherwise 'trap' has no effect! ( watcher "$LRMS_JOB_SCRIPT" "${joboption_directory}.comment" "$LRMS_JOB_ERR"; ) & job_id=$! result= while [ -z "$result" ]; do sleep 1 result=`cat $LRMS_JOB_ERR` done case "$result" in OK) echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 echo "joboption_jobid=$job_id" >> $arg_file rc=0 ;; *) echo "job *NOT* submitted successfully!" 1>&2 echo "" 1>&2 echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR rc=1 ;; esac rm "$LRMS_JOB_ERR" echo "----- exiting submit_fork_job -----" 1>&2 echo "" 1>&2 exit $rc nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315733024606 xustar000000000000000029 mtime=1513200603.16405236 30 atime=1513200650.336629299 30 ctime=1513200663.353788504 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/Makefile.in0000644000175000002070000005005413214315733024661 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/fork DIST_COMMON = README $(dist_pkgdata_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/cancel-fork-job.in \ $(srcdir)/scan-fork-job.in $(srcdir)/submit-fork-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = scan-fork-job submit-fork-job cancel-fork-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(dist_pkgdata_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = configure-fork-env.sh pkgdata_SCRIPTS = scan-fork-job submit-fork-job cancel-fork-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/fork/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/fork/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): scan-fork-job: $(top_builddir)/config.status $(srcdir)/scan-fork-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ submit-fork-job: $(top_builddir)/config.status $(srcdir)/submit-fork-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-fork-job: $(top_builddir)/config.status $(srcdir)/cancel-fork-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_pkgdataDATA \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/PaxHeaders.7502/scan-fork-job.in0000644000000000000000000000012711671645500025534 xustar000000000000000027 mtime=1323780928.047317 30 atime=1513200650.353629507 30 ctime=1513200663.355788528 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/scan-fork-job.in0000644000175000002070000001035211671645500025577 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Periodically monitor for jobs which has finished or failed but not # reported an exitcode # id=`id -u` #debug='eval echo >> @tmp_dir@/parse-fork-log.$id' debug=: $debug "run at `date`" $debug "options = $@" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi if [ -z "$1" ] ; then echo "Argument missing" 1>&2 ; exit 1 ; fi # Prints the uid of the owner of the file given as argument # Perl is used because it's more portable than using the stat command printuid () { code='my @s = stat($ARGV[0]); print($s[4] || "")' /usr/bin/perl -we "$code" "$1" } # # Attempts to switch to uid passed as the first argument and then runs the # commands passed as the second argument in a shell. The remaining arguments # are passed as arguments to the shell. No warning is given in case switching # uid is not possible. # do_as_uid () { test $# -ge 2 || { log "do_as_uid requires 2 arguments"; return 1; } script='use English; my ($uid, @args) = @ARGV; if ( $UID == 0 && $uid ) { eval { $UID = $uid }; print STDERR "Cannot switch to uid($UID): $@\n" if $UID != $uid; } system("@posix_shell@","-c",@args); exit ($?>>8||128+($?&127)); ' /usr/bin/perl -we "$script" "$@" } # Append .comment (containing STDOUT & STDERR of the job wrapper) to .errors save_commentfile () { uid=$1 commentfile=$2 errorsfile=$3 action=" { echo '---------- Output of the job wrapper script -----------' cat '$commentfile' 2> /dev/null echo '------------------------- End of output -------------------------' } >> '$errorsfile' " do_as_uid "$uid" "$action" } for control_dir in "$@" ; do if [ ! -d "${control_dir}" ]; then echo "No control dir $control_dir" 1>&2 continue fi # iterate over all jobs known in the control directory find "${control_dir}/processing" -name 'job.*.status' \ | xargs egrep -l "INLRMS|CANCELING" \ | sed -e 's/.*job\.//' -e 's/\.status$//' \ | while read job; do $debug "scanning job = $job" unset joboption_jobid unset joboption_directory # this job was already completed, nothing remains to be done [ -f "${control_dir}/job.${job}.lrms_done" ] && continue # a grami file exists for all jobs that GM thinks are running. # proceed to next job if this file is missing. if [ ! -f "${control_dir}/job.${job}.grami" ]; then continue fi # extract process IDs of the grami file [ ! -f "${control_dir}/job.${job}.grami" ] && continue . "${control_dir}/job.${job}.grami" # process IDs could not be learned, proceeding to next [ -z "$joboption_jobid" ] && continue $debug "local jobid = $joboption_jobid" # checking if process is still running if ps -ouser= -p$joboption_jobid > /dev/null; then $debug "ps returned $? - process $joboption_jobid of job $job is still running. Continuing to next" continue else $debug "ps returned $? - process $joboption_jobid of job $job has exited!" fi uid=$(printuid "${control_dir}/job.${job}.local") $debug "local user id = $uid" diagfile=${joboption_directory}.diag $debug "checking $diagfile" exitcode=$(do_as_uid "$uid" "cat '$diagfile'" | sed -n 's/^exitcode=\([0-9]*\).*/\1/p') $debug "exitcode = [$exitcode] extracted from $diagfile" fork_comment="" if [ -z "$joboption_arg_code" ] ; then joboption_arg_code='0' ; fi if [ -z "$exitcode" ]; then echo "Job $job with PID $joboption_jobid died unexpectedly" 1>&2 fork_comment="Job died unexpectedly" 1>&2 exitcode=-1 elif [ "$exitcode" -ne "$joboption_arg_code" ]; then fork_comment="Job finished with wrong exit code - $exitcode != $joboption_arg_code" 1>&2 fi $debug "got exitcode=$exitcode" save_commentfile "$uid" "${joboption_directory}.comment" "${control_dir}/job.${job}.errors" echo "$exitcode $fork_comment" > "${control_dir}/job.${job}.lrms_done" done done $debug "done, going to sleep" sleep 10 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/PaxHeaders.7502/cancel-fork-job.in0000644000000000000000000000012711562710611026030 xustar000000000000000027 mtime=1305186697.469298 30 atime=1513200650.383629874 30 ctime=1513200663.354788516 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/cancel-fork-job.in0000644000175000002070000000435411562710611026100 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in FORK. # echo "----- starting cancel_fork_job -----" 1>&2 trap 'echo "----- exiting cancel_fork_job -----" 1>&2; echo "" 1>&2' EXIT joboption_lrms=fork # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? if [ -z "$joboption_controldir" ] ; then joboption_controldir=`dirname "$arg_file"` if [ "$joboption_controldir" = '.' ] ; then joboption_controldir="$PWD" fi fi job_control_dir="$joboption_controldir" if [ -z "$joboption_gridid" ] ; then joboption_gridid=`basename "$arg_file" | sed 's/^job\.\(.*\)\.grami$/\1/'` fi echo "Deleting job $joboption_gridid, local id $joboption_jobid" 1>&2 if [ ! -r "$job_control_dir/job.${joboption_gridid}.local" ]; then echo "Local description of job ${joboption_gridid} not found at '$job_control_dir/job.${joboption_gridid}.local'. Job was not killed, if running at all." 1>&2 exit 1 fi if [ -z "$joboption_jobid" ] ; then joboption_jobid=`cat "$job_control_dir/job.${joboption_gridid}.local" | grep '^localid=' | sed 's/^localid=//'` fi job_control_subdir= if [ -r "$job_control_dir/accepting/job.${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/accepting" elif [ -r "$job_control_dir/processing/job.${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/processing" elif [ -r "$job_control_dir/finished/job.${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/finished" else echo "Status file of job ${joboption_gridid} not found in '$job_control_dir'. Job was not killed, if running at all." 1>&2 exit 1 fi case X`cat "$job_control_subdir/job.${joboption_gridid}.status"` in XINLRMS | XCANCELING) if [ -z "$joboption_jobid" ] ; then echo "Can't find local id of job" 1>&2 exit 1 fi kill -TERM $joboption_jobid sleep 5 kill -KILL $joboption_jobid ;; XFINISHED | XDELETED) echo "Job already died, won't do anything" 1>&2 ;; *) echo "Job is at unkillable state" 1>&2 ;; esac exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/PaxHeaders.7502/configure-fork-env.sh0000644000000000000000000000012411542336027026606 xustar000000000000000027 mtime=1300872215.906494 27 atime=1513200576.492726 30 ctime=1513200663.352788492 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/configure-fork-env.sh0000644000175000002070000000104011542336027026646 0ustar00mockbuildmock00000000000000# # set environment fork variables: # ############################################################## # Reading configuration from $ARC_CONFIG ############################################################## if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" # Script returned ok true nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/PaxHeaders.7502/README0000644000000000000000000000012311016612002023405 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.492726 30 ctime=1513200663.351788479 nordugrid-arc-5.4.2/src/services/a-rex/lrms/fork/README0000644000175000002070000000002611016612002023451 0ustar00mockbuildmock00000000000000Fork control scripts. nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/dgbridge0000644000000000000000000000013213214316027023270 xustar000000000000000030 mtime=1513200663.496790253 30 atime=1513200668.718854121 30 ctime=1513200663.496790253 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/0000755000175000002070000000000013214316027023413 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/dgbridge.xml0000644000000000000000000000012411524061561025641 xustar000000000000000027 mtime=1297113969.097822 27 atime=1513200576.521726 30 ctime=1513200663.496790253 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/dgbridge.xml0000644000175000002070000002614411524061561025715 0ustar00mockbuildmock00000000000000 /tmp/arex_arched.pid /tmp/arex_arched.log DEBUG /usr/lib64/arc/ mcctcp mcctls mcchttp mccsoap arcshc identitymap arex 0.0.0.0 60000 4 /etc/grid-security/hostkey.pem /etc/grid-security/hostcert.pem /etc/grid-security/certificates ^.*$ POST GET PUT ^/arex TLS AREX file /etc/arc/arex_policy.xml /etc/grid-security/grid-mapfile nobody https://localhost:60000/arex nobody internal Out-of-the-box CE ARC execution service DGBridge LINUX support@cluster.org VERBOSE 1000 100 20 2 4 /etc/grid-security/grid-mapfile /etc/grid-security/certificates /etc/grid-security/hostcert.pem /etc/grid-security/hostkey.pem /tmp/arex-jobs.log . /tmp/jobstatus /tmp/grid /tmp/cache PREPARING /usr/libexec/arc/DGBridgeDataPlugin.py %C %I ACCEPTED /usr/libexec/arc/DGAuthplug.py %S %C /var/spool/nordugrid/runtime %I %U PREPARING /usr/libexec/arc/DGAuthplug.py %S %C /var/spool/nordugrid/runtime %I %U DGBridge DGQueue /var/spool/nordugrid/runtime INFO ORG/NORDUGRID General This cluster is specially designed for XYZ applications: www.xyz.org City, World Street 1 City World 11111 47.51 19.05 User Support mailto:support@cluster.org usersupport nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612047045306025406 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200603.073051247 29 ctime=1513200663.49079018 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/Makefile.am0000644000175000002070000000127412047045306025455 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-DGBridge-env.sh dist_pkgdata_SCRIPTS = DGAuthplug.py DGBridgeDataPlugin.py DGLog2XML.py pkgdata_SCRIPTS = scan-DGBridge-job submit-DGBridge-job cancel-DGBridge-job profiledir = $(pkgdatadir)/profiles profile_DATA = dgbridge.xml install-data-hook: cp -p $(DESTDIR)$(profiledir)/dgbridge.xml $(DESTDIR)$(profiledir)/dgbridge.xml.orig && \ sed -e "s,/usr/libexec/arc,${pkgdatadir},g" -e "s,/usr/lib64/arc,${pkglibdir},g" \ < $(DESTDIR)$(profiledir)/dgbridge.xml.orig > $(DESTDIR)$(profiledir)/dgbridge.xml ;\ rm -f $(DESTDIR)$(profiledir)/dgbridge.xml.orig EXTRA_DIST = README.DGBridge dgbridge_service.ini dgbridge.conf $(profile_DATA) nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/scan-DGBridge-job.in0000644000000000000000000000012712100556217027002 xustar000000000000000027 mtime=1359142031.233238 30 atime=1513200650.757634448 30 ctime=1513200663.492790204 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/scan-DGBridge-job.in0000755000175000002070000002001712100556217027047 0ustar00mockbuildmock00000000000000#!/bin/bash # # Periodically check state of grid jobs in DGBridge, and put mark files # for finished jobs. # # usage: scan_DGBridge_job control_dir ... # Load arc.conf and set up environment joboption_lrms=DGBridge # ARC1 passes the config file first. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? basewebdir="/var/www/3GBridge/" libexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@/" pkgdatadir="$basedir" . "${pkgdatadir}/configure-${joboption_lrms}-env.sh" || exit $? . "${pkgdatadir}/scan_common.sh" || exit $? # Prevent multiple instances of scan job to run concurrently lockfile="${TMPDIR:-/tmp}/scan-DGBridge-job.lock" #Check if lockfile exist, if not, create it. (set -C; : > "$lockfile") 2> /dev/null if [ "$?" != "0" ]; then if ps -p $(< "$lockfile") 2>/dev/null;then echo "lockfile exists and PID $(< $lockfile) is running" exit 1 fi echo "old lockfile found, was scan-DGBridge-job killed?" # sleep, and if no other have removed and recreated the lockfile we remove it. # there are still races possible, but this will have to do sleep $((${RANDOM}%30+10)) if ps -p $(< $lockfile) &>/dev/null;then echo "lockfile exists and $(< $lockfile) is running" exit 1 else echo "still not running, removing lockfile" rm $lockfile exit 1 fi fi echo "$$" > "$lockfile" #If killed, remove lockfile trap 'rm $lockfile' EXIT KILL TERM #Default sleep-time is 30 seconds sleep ${CONFIG_scan_wakeupperiod:-30} ## There is no shared file system possible in the DGBridge, instead we must copy output files from upload to session dir #Validate control directories supplied on command-line if [ -z "$1" ] ; then echo "no control_dir specified" 1>&2; exit 1 fi for ctr_dir in "$@"; do if [ ! -d "$ctr_dir" ]; then echo "called with erronous control dir: $ctr_dir" 1>&2 exit 1 fi done # List of DGBridge jobids for grid-jobs with state INLRMS declare -a localids # Array with basenames of grid-job files in ctrl_dir, indexed by localid # example /some/path/job.XXXXX /some/other/parh/job.YYYYY declare -a basenames declare -a gridids declare -a endpoints # Array with states of the jobs in SLURM, indexed by localid declare -a jobstates # Array to store localids of jobs that are determined to have finished, which are sent to gm-kick declare -a kicklist option_ctrdir=$@ # Find list of grid jobs with status INLRMS, store localid and # basename for those jobs #for basename in $(find "$@" -name 'job.*.status' -print0 \ # | xargs -0 egrep -l "INLRMS|CANCELING" \ # | sed 's/.status$//') for basename in $(ls $option_ctrdir/processing|sed -e 's/.status//g' -e "s,^,$option_ctrdir/,") do localid=$(grep ^localid= "${basename}.local" | cut -d= -f2 | tr -d '"') ind=${#localids[@]} localids[$ind]=`echo $localid| awk -F '|' '{ print $2; }'` endpoints[$ind]=`echo $localid| awk -F '|' '{ print $1; }'` temp=${basename##*/} gridids[$ind]=`echo $temp|sed 's/^job.//'` basenames[$ind]="$basename" done # No need to continue further if no jobs have status INLRMS if [ "${#localids[@]}" -eq 0 ];then exit 0 fi # Get JobStates from wsclient #performance: change this to use the file - switch to read multiple jids from stdin numids=$((${#localids[@]}-1)) for ind in `seq 0 $numids` ; do #get endpoint wsendpoint=${endpoints[$ind]} jobid=${localids[$ind]} jobstate=$(wsclient -e "$wsendpoint" -m status -j "$jobid" 2>&1) if [[ $? -ne 0 || ! "${jobstate#$jobid }" =~ \ Init|Running|Finished|Unknown|Error|TempFailed ]]; then echo "Failed to get job status from web service: $jobstate" 2>&1 jobstate="$jobid WSError" fi jobstates[$ind]="${jobstate#$jobid }" done function cleanbridge() { #$1 wsendpoint wsep=$1 #3g id lid=$2 #ARC id gridid=$3 # clean local input storage echo "cleaning job: $gridid" # extract gridid # if [ ! "$gridid" = "" ]; then # rm -rf $basewebdir/$gridid/ # fi for ((i=0; i<=$EDGES_3G_RETRIES; i++)); do OUTPUT=$(wsclient -e "$wsep" -m delete -j "$lid" 2>&1) [ $? -eq 0 ] && break (( i < EDGES_3G_RETRIES )) && sleep "$((EDGES_3G_TIMEOUT / EDGES_3G_RETRIES))" done & } #setup edgi monitoring logs dato=`date +%Y-%m-%d` edgilog=$option_ctrdir/3gbridge_logs/$dato dato=`date +%Y-%m-%d_%H:%M:%S` # Look at the list of jobstates and determine which jobs that have # finished. Write job.XXXX.lrms_done according to this numids=$((${#localids[@]}-1)) for ind in `seq 0 $numids` ; do wsendpoint=${endpoints[$ind]} case "${jobstates[$ind]}" in Init) ;; Running) #Job is running, nothing to do. #performance: delete input files in running state, only possible if remote 3gbridge does not use passthrough of data msg="dt=$dato event=job_status job_id=${gridids[$ind]} status=Running" flock -w 2 $edgilog -c "echo $msg >> $edgilog" if [ $? = 1 ]; then echo "Failed to log monitor data to: $edgilog" 1>&2 fi ;; Unknown) #bridge doesn't know job, maybe cancelled echo "-1 Job was cancelled" > "${basenames[$ind]}.lrms_done" kicklist=(${kicklist[@]} $ind) cleanbridge $wsendpoint ${localids[$ind]} ${gridids[$ind]} msg="dt=$dato event=job_status job_id=${gridids[$ind]} status=Failed" flock -w 2 $edgilog -c "echo $msg >> $edgilog" if [ $? = 1 ]; then echo "Failed to log monitor data to: $edgilog" 1>&2 fi ;; Finished) #fetch outputfiles. Maybe this will take too long. #first get list OUTPUT=$(wsclient -e "$wsendpoint" -m output -j "${localids[$ind]}" 2>&1) if [ $? -ne 0 ]; then echo "-1 Job could not get output" > "${basenames[$ind]}.lrms_done" kicklist=(${kicklist[@]} $ind) #clean bridge? cleanbridge $wsendpoint ${localids[$ind]} ${gridids[$ind]} msg="dt=$dato event=job_status job_id=${gridids[$ind]} status=Failed" flock -w 2 $edgilog -c "echo $msg >> $edgilog" if [ $? = 1 ]; then echo "Failed to log monitor data to: $edgilog" 1>&2 fi continue fi #fetch list using wget? yes # parse output session=`grep -h '^sessiondir=' "$option_ctrdir/job.${gridids[$ind]}.local" | sed 's/^sessiondir=\(.*\)/\1/'` output=$(echo "$OUTPUT"|grep http|awk '{print $2}') for line in $output; do wget -P "$session" $line done #uid=`id -u` #gid=`id -g` uid=`sed -n 's!.*\(.*\).*!\1!p' < "$option_ctrdir/job.${gridids[$ind]}.xml"` gid="" chown -Rv $uid:$gid "$session" #clean 3Gbridge cleanbridge $wsendpoint ${localids[$ind]} ${gridids[$ind]} #trigger done echo "0 Job Finished" > "${basenames[$ind]}.lrms_done" #monitor msg="dt=$dato event=job_status job_id=${gridids[$ind]} status=Finished" flock -w 2 $edgilog -c "echo $msg >> $edgilog" if [ $? = 1 ]; then echo "Failed to log monitor data to: $edgilog" 1>&2 fi ;; Error|TempFailed) #job failed echo "-1 Job Failed" > "${basenames[$ind]}.lrms_done" kicklist=(${kicklist[@]} $ind) #clean cleanbridge $wsendpoint ${localids[$ind]} ${gridids[$ind]} #monitor msg="dt=$dato event=job_status job_id=${gridids[$ind]} status=Failed" flock -w 2 $edgilog -c "echo $msg >> $edgilog" if [ $? = 1 ]; then echo "Failed to log monitor data to: $edgilog" 1>&2 fi ;; WSError) #webservice failed - perhaps have a count and then fail job? ;; *) ;; esac done # Kick the GM if [ -n "${kicklist[*]}" ];then "${libexecdir}/gm-kick" \ $(for ind in "${kicklist[@]}";do echo "${basenames[$ind]}.status" done | xargs) fi exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315733025414 xustar000000000000000030 mtime=1513200603.112051724 30 atime=1513200650.725634057 29 ctime=1513200663.49079018 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/Makefile.in0000644000175000002070000005662013214315733025474 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/dgbridge DIST_COMMON = $(dist_pkgdata_DATA) $(dist_pkgdata_SCRIPTS) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/cancel-DGBridge-job.in \ $(srcdir)/scan-DGBridge-job.in \ $(srcdir)/submit-DGBridge-job.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-DGBridge-job scan-DGBridge-job \ cancel-DGBridge-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(profiledir)" SCRIPTS = $(dist_pkgdata_SCRIPTS) $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(dist_pkgdata_DATA) $(profile_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkgdata_DATA = configure-DGBridge-env.sh dist_pkgdata_SCRIPTS = DGAuthplug.py DGBridgeDataPlugin.py DGLog2XML.py pkgdata_SCRIPTS = scan-DGBridge-job submit-DGBridge-job cancel-DGBridge-job profiledir = $(pkgdatadir)/profiles profile_DATA = dgbridge.xml EXTRA_DIST = README.DGBridge dgbridge_service.ini dgbridge.conf $(profile_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/dgbridge/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/dgbridge/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-DGBridge-job: $(top_builddir)/config.status $(srcdir)/submit-DGBridge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-DGBridge-job: $(top_builddir)/config.status $(srcdir)/scan-DGBridge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-DGBridge-job: $(top_builddir)/config.status $(srcdir)/cancel-DGBridge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-dist_pkgdataSCRIPTS: $(dist_pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files install-profileDATA: $(profile_DATA) @$(NORMAL_INSTALL) test -z "$(profiledir)" || $(MKDIR_P) "$(DESTDIR)$(profiledir)" @list='$(profile_DATA)'; test -n "$(profiledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(profiledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(profiledir)" || exit $$?; \ done uninstall-profileDATA: @$(NORMAL_UNINSTALL) @list='$(profile_DATA)'; test -n "$(profiledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(profiledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(profiledir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(profiledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-dist_pkgdataSCRIPTS \ install-pkgdataSCRIPTS install-profileDATA @$(NORMAL_INSTALL) $(MAKE) $(AM_MAKEFLAGS) install-data-hook install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-dist_pkgdataSCRIPTS \ uninstall-pkgdataSCRIPTS uninstall-profileDATA .MAKE: install-am install-data-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-data-hook \ install-dist_pkgdataDATA install-dist_pkgdataSCRIPTS \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-profileDATA install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-dist_pkgdataSCRIPTS \ uninstall-pkgdataSCRIPTS uninstall-profileDATA install-data-hook: cp -p $(DESTDIR)$(profiledir)/dgbridge.xml $(DESTDIR)$(profiledir)/dgbridge.xml.orig && \ sed -e "s,/usr/libexec/arc,${pkgdatadir},g" -e "s,/usr/lib64/arc,${pkglibdir},g" \ < $(DESTDIR)$(profiledir)/dgbridge.xml.orig > $(DESTDIR)$(profiledir)/dgbridge.xml ;\ rm -f $(DESTDIR)$(profiledir)/dgbridge.xml.orig # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/cancel-DGBridge-job.in0000644000000000000000000000012712100556217027303 xustar000000000000000027 mtime=1359142031.233238 30 atime=1513200650.773634644 30 ctime=1513200663.491790192 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/cancel-DGBridge-job.in0000755000175000002070000000143712100556217027355 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in 3GBridge. # Input: grami file (same as Globus) echo "----- starting cancel_DGBridge_job -----" 1>&2 joboption_lrms=DGBridge # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/cancel_common.sh || exit $? dgjobid=`echo "$joboption_jobid" |cut -f 2 -d \|` echo "executing wsclient delete with endpoint $joboption_wsendpoint and job id $dgjobid" 1>&2 #remember to generalize endpoint to config file wsclient -e $joboption_wsendpoint -m delete -j $dgjobid if [ "$?" != "0" ];then echo "wsclient delete failed" 1>&2 fi echo "----- exiting cancel_DGBridge_job -----" 1>&2 exit 0 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/submit-DGBridge-job.in0000644000000000000000000000012712101056503027353 xustar000000000000000027 mtime=1359240515.312488 30 atime=1513200650.741634252 30 ctime=1513200663.493790216 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/submit-DGBridge-job.in0000755000175000002070000002073012101056503027422 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Based on globus submission script for pbs # # Submits job to the 3GBridge. # Input: path to grami file (same as Globus). # #opposed to other backends there is no jobscript echo "----- starting submit_DGBridge_job -----" 1>&2 joboption_lrms=DGBridge # ARC1 passes the config file first. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . ${pkgdatadir}/submit_common.sh || exit $? ############################################################## # Parse grami file, read arc config ############################################################## #output the grami file init $1 #extract staging info: bdir=$dgbridge_stage_dir bweb=$dgbridge_stage_prepend failures_file="$joboption_controldir/job.$joboption_gridid.failed" if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then if [ -z "${RUNTIME_LOCAL_SCRATCH_DIR}" ] ; then echo "Need to know at which directory to run job: RUNTIME_LOCAL_SCRATCH_DIR must be set if RUNTIME_NODE_SEES_FRONTEND is empty" 1>&2 echo "Submission: Configuration error.">>"$failures_file" exit 1 fi fi ############################################################## # Zero stage of runtime environments # In the dg bridge this should set the # endpoint,grid and app, and VO access # $DG_Endpoint, $DG_Grid, $DG_App, $DG_AllowedVOs # # There must be exactly one runtime environment ############################################################## joboption_num=0 eval "var_is_set=\${joboption_runtime_$joboption_num+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_runtime_$joboption_num}" if [ -r "$RUNTIME_CONFIG_DIR/${var_value}" ] ; then cmdl="$RUNTIME_CONFIG_DIR/${var_value}" sourcewithargs $cmdl 0 1>&2 if [ $? -ne '0' ] ; then echo "ERROR: runtime script ${var_value} failed" 1>&2 exit 1 fi else echo "ERROR: runtime script ${var_value} is missing" 1>&2 exit 1 fi joboption_num=$(( $joboption_num + 1 )) eval "var_is_set=\${joboption_runtime_$joboption_num+yes}" done # There must be exactly one runtime environment if [ "$joboption_num" -eq 0 ]; then echo "ERROR: No runtime environment specified." 1>&2 exit 1 fi if [ "$joboption_num" -gt 1 ]; then echo "ERROR: More than one runtime environment specified." 1>&2 exit 1 fi ############################################################## # Start job script #Note: there is no script for DGBridge ############################################################## # choose queue #if [ ! -z "${joboption_queue}" ] ; then # here we must extract and check endpoint data # once the RTE per queue options is implemented #fi ############################################################ # DG: # check that EP,APP and Grid has been set ########################################################### if [ -z "$DG_App" ]; then echo "ERROR: RTE has not set the application id." 1>&2 exit 1 fi if [ -z "$DG_Endpoint" ]; then echo "ERROR: RTE has not set the endpoint address." 1>&2 exit 1 fi if [ -z "$DG_Grid" ]; then echo "ERROR: RTE has not set the target grid." 1>&2 exit 1 fi if [ -z "$DG_AllowedVOs" ]; then echo "ERROR: RTE has not set any allowed VOs." 1>&2 exit 1 fi ############################################################ # DG: # check that the user has correct VO ########################################################### # check is performed in the plugin now. ############################################################## # DGBridge: # calculate md5sums of input files and move input files to # be served from apache (perhaps Attic in the future) ############################################################## #set storage dir from config #stageweb="http://edgi-dev.nbi.dk:9090/3GBridge/in/$joboption_gridid" #stagedir="/var/www/3GBridge/in/$joboption_gridid/" stageweb=$bweb"/in/$joboption_gridid" stagedir=$bdir"/in/$joboption_gridid/" #clean dir and link stageweb=`echo $stageweb|sed -e 's/^[ \t]*//'` stagedir=`echo $stagedir|sed -e 's/^[ \t]*//'` DG_Inputlist="" # read files inputs0=$joboption_directory/* #remove directories inputs="" for inf in $inputs0; do if [ -f $inf ]; then inputs="$inputs$inf " fi done if [ ! -z "$inputs" ]; then mkdir $stagedir chmod a+rx $stagedir for infile in $inputs; do #calc md5 and size mdsum=`md5sum $infile` infmdsum=${mdsum%%\ *} infsize=`stat -c %s $infile` cp $infile $stagedir DG_Inputlist="$DG_Inputlist -i ${infile##*/}=$stageweb/${infile##*/}=$infmdsum=$infsize" done chmod a+rx -R $stagedir fi #get files from .3GData I.e. atticlinks dgdatafile=$joboption_controldir/job.$joboption_gridid.3gdata if [ -f $dgdatafile ]; then while read line; do if [ ${#line} -gt 2 ]; then infile=${line%% *} infurl=${line#* } infurl=${infurl%;*} infmd5=${line#*md5=} infmd5=${infmd5%\:*} infsize=${line#*size=} infsize=${infsize%:*} DG_Inputlist="$DG_Inputlist -i $infile=$infurl=$infmd5=$infsize" fi done < $dgdatafile fi ############################################################ # DG: # create input and output list ########################################################### DG_Outputlist="" outfile=$joboption_controldir/job.$joboption_gridid.output outputfs=`cat $outfile` for ofile in $outputfs; do DG_Outputlist="$DG_Outputlist -o ${ofile#/} " done ########################################################### # Get Arguments from job ########################################################### DG_Args="" # remember to set -a flag if there are arguments i=1 joboption_args= eval "var_is_set=\${joboption_arg_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_arg_$i}" # Use -- to avoid echo eating arguments it understands var_value=`echo -- "$var_value" |cut -f2- -d' '| sed 's/\\\\/\\\\\\\\/g' | sed 's/"/\\\"/g'` joboption_args="$joboption_args \"${var_value}\"" i=$(( $i + 1 )) eval "var_is_set=\${joboption_arg_$i+yes}" done if [ ! -z "$joboption_args" ]; then temp_arg=`echo "$joboption_args" | sed 's/^[ \t]*//' | sed 's/^\"//' | sed 's/\"$//'` DG_Args="-a \"$temp_arg\"" fi ########################################################### # Get environment variables from job # remember to set: PROXY_USERDN="/C=HU/..." for xtremweb ########################################################### DN=`grep -i "^subject=" $joboption_controldir/job.$joboption_gridid.local | sed "s/subject=//"` DG_Env="-E PROXY_USERDN=\"$DN\" " echo "# Setting environment variables as specified by user" 1>&2 has_gridglobalid='' i=0 eval "var_is_set=\${joboption_env_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_env_$i}" if [ "$var_value" ] && [ -z "${var_value##GRID_GLOBAL_JOBID=*}" ]; then has_gridglobalid=yes else var_escaped=`echo "$var_value" | sed "s/'/'\\\\\''/g" | sed "s/\"/\\\\\"/g" | sed "s/^\'//" | sed "s/\'$/\"/" | sed "s/=/=\"/"` echo "# var $i: $var_escaped" 1>&2 DG_Env="$DG_Env -E $var_escaped" fi i=$(( $i + 1 )) eval "var_is_set=\${joboption_env_$i+yes}" done ####################################### # Submit the job ####################################### cmdline="wsclient -e $DG_Endpoint -m add -n $DG_App -g $DG_Grid $DG_Env $DG_Inputlist $DG_Outputlist $DG_Args" echo "running command: $cmdline" 1>&2 dato=`date +%Y-%m-%d` edgilog=$joboption_controldir/3gbridge_logs/$dato dato=`date +%Y-%m-%d_%H:%M:%S` OUT=$(eval "$cmdline 2>&1") if [ $? -eq 0 ]; then jobid="$DG_Endpoint|$OUT" #write EDGI monitor log msg="dt=$dato event=job_submission bridge_id=$DG_Endpoint job_id=$joboption_gridid job_id_bridge=\"$jobid\" status=Submitted" flock -w 10 $edgilog -c "echo $msg >> $edgilog" if [ $? = 1 ]; then echo "Failed to log monitor data to: $edgilog" 1>&2 fi # write output to arg file echo "joboption_wsendpoint=$DG_Endpoint" >> $arg_file echo "joboption_jobid=\"$jobid\"" >> $arg_file echo "job submitted successfully!" 1>&2 echo "local job id: $jobid" 1>&2 echo "----- exiting submit_DGBridge_job -----" 1>&2 echo "" 1>&2 exit 0 fi #write EDGI monitor log msg="dt=$dato event=job_submission bridge_id=$DG_Endpoint job_id=$joboption_gridid status=Failed" echo "monitor: $msg" flock -w 10 $edgilog -c "echo $msg >> $edgilog" if [ $? = 1 ]; then echo "Failed to log monitor data to: $edgilog" 1>&2 fi echo "jobsubmission failed." 1>&2 echo "$OUT" 1>&2 echo "----- exiting submit_DGBridge_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/dgbridge_service.ini0000644000000000000000000000012411524061561027340 xustar000000000000000027 mtime=1297113969.097822 27 atime=1513200576.519726 30 ctime=1513200663.495790241 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/dgbridge_service.ini0000644000175000002070000000405711524061561027413 0ustar00mockbuildmock00000000000000# Out-of-the-box MINIMAL configuration template for a secure standalone Computing # Element (CE) with a 3g queue. # Assignments after empty line in a section must be updated to local cluster [common] profile=DGBridge x509_user_key=/etc/grid-security/hostkey.pem x509_user_cert=/etc/grid-security/hostcert.pem x509_cert_dir=/etc/grid-security/certificates voms_conf=/etc/grid-security/voms.xml pidfile=/var/run/arex_arched.pid logfile=/var/log/arex_arched.log port=60000 loglevel=VERBOSE lrms=DGBridge defaultshare=DGQueue gridmap=/etc/grid-security/grid-mapfile #hostname=edgi-dev.nbi.dk hostname= [arex] jobs_tracked=1000 max_jobs_run=2 max_jobs_transfered=100 max_jobs_transfered_additional=2 files_transfered=4 #endpoint=https://edgi-dev.nbi.dk:60000/arex #username=3gbridge #usermap=3gbridge #controldir=/var/spool/nordugrid/jobstatus #sessiondir=/var/spool/nordugrid/session #runtimedir=/var/spool/nordugrid/runtime #cachedir=/var/cache/arc #joblog=/var/log/arex-jobs.log endpoint= username= usermap= controldir= sessiondir= runtimedir= cachedir= joblog= [lrms/DG] #stage_path=/var/www/3GBridge/ #stage_prepend=http://edgi-dev.nbi.dk:9090/3GBridge/ stage_path= stage_prepend= [cluster] operating_system=LINUX osname=adotf osversion=adotf cpuvendor=adotf cpumodel=adotf cpuclockspeed=adotf connectivityout=true connectivityin=false #clustername=EDGI DEV #common_name=out of the box CE #long_description=EDGI DG Bridge test #otherinfo=This is an A-REX cluster used for testing the EDGI DG bridge #admindomain=ORG/NORDUGRID #location=Niels Bohr Institute #address=Blegdamsvej 21 #city=Copenhagen #country=Denmark #postcode=2100 #latitude=55.697 #longitude=12.572 #contact_name=User support #contact_detail=mailto:waananen@nbi.dk #contact_type=usersupport #service_mail=waananen@nbi.dk clustername= common_name= long_description= otherinfo= admindomain= location= address= city= country= postcode= latitude= longitude= contact_name= contact_detail= contact_type= service_mail= [queue/DGqueue] name=DGqueue queue_description=This is a queue for the 3GBridge scheduling_policy=FIFO nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/DGBridgeDataPlugin.py0000644000000000000000000000012412041075417027302 xustar000000000000000027 mtime=1350859535.217639 27 atime=1513200576.523726 30 ctime=1513200663.488790155 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/DGBridgeDataPlugin.py0000755000175000002070000000346012041075417027355 0ustar00mockbuildmock00000000000000#!/usr/bin/python import sys import arc import time import re #get cmdline args if not len(sys.argv)==3: print("Number of arguments must be 3 was: " + str(len(sys.argv))) sys.exit(-1) controldir=sys.argv[1] jobid=sys.argv[2] filename=controldir+'/job.'+jobid+'.description' locfile=controldir+'/job.'+jobid+'.3gdata' inpfile=controldir+'/job.'+jobid+'.input' print("starting 3GDataPlugin\n"+filename+"\n"+locfile+"\n\n") #read in file f=open(filename,'r') rawdesc=f.read() f.close() jobdesc=arc.JobDescription() jobdesclist=arc.JobDescriptionList() if not jobdesc.Parse(rawdesc,jobdesclist): sys.exit(-1) #extract staging info dselements=[] while jobdesclist.front().DataStaging.InputFiles.size()>0: dselements.append(jobdesclist.front().DataStaging.InputFiles.pop()) inputfiles="" locelements=[] #filter staging for dsel in dselements: #check for attic or http proto=dsel.Sources[0].Protocol() url=dsel.Sources[0].str() matchlist = re.findall(".*?;md5=.*?:size=.*?$",url) #check whether url has md5 and size set if (proto=='attic' or proto=='http') and len(matchlist)==1 and matchlist[0]==url : #we can put http here also, but we'll need to check for md5 and size locelements.append(dsel) else: jobdesclist.front().DataStaging.InputFiles.append(dsel) if not dsel.Sources[0].Protocol()=='file': inputfiles = inputfiles + dsel.Name + " " + dsel.Sources[0].str() + "\n" else: inputfiles = inputfiles + dsel.Name + "\n" #write modified input f=open(inpfile,'w') f.write(inputfiles) f.close() print(inputfiles) #print(jobdesc.UnParse()+"\n\n") print("attic type files\n") #append attic and http info to .3gdata f=open(locfile,'a') outstr="" for elem in locelements: outstr=outstr + elem.Name+ " " + elem.Sources[0].str() + "\n" print(outstr) f.write(outstr) f.close() sys.exit(0) nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/README.DGBridge0000644000000000000000000000012411742037157025644 xustar000000000000000027 mtime=1334328943.821461 27 atime=1513200576.518726 30 ctime=1513200663.494790228 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/README.DGBridge0000644000175000002070000001137111742037157025714 0ustar00mockbuildmock00000000000000README file for the ARC DGBridge == Files == README.DGBridge - This file == Prerequisites == The NorduGrid ARC CE must be installed. The easiest way to accomplish this is to first setup the NorduGrid repositories. On RHEL5/CentOS5 as root: % wget http://download.nordugrid.org/software/nordugrid-release/releases/1.0/redhat/el5/x86_64/nordugrid-release-1.0-1.el5.noarch.rpm % yum localinstall nordugrid-release-1.0-1.el5.noarch.rpm Also voms-proxy-info from the voms-clients package is needed. == Installation == Installation of the ARC CE including the 3GBridge back-end on RHEL/Centos: % yum groupinstall "ARC Server" On other RPM based platforms the actual name of the RPM is slightly different. === Configuration === Setup a standard ARC CE and make sure the following attributes are set correctly in the [common] section of /etc/arc.conf: dgbridge_stage_dir dgbridge_stage_prepend ==== 3gbridge user creation ==== You must set up a user that will run the ARC services. This can be root but it is not recommended for security reasons. Then set the user config option to that user in arc.conf. ==== Setup directories ==== Now you must decide where ARC will store its data. This is normally: /var/spool/arc/ When you've decided what directory to create, you must create the subdirectories: jobstatus/ runtime/ session/ These directories must be writable by the user chosen above. ==== Setup runtime environments ==== An important part of the setup is creating the RTEs. The RTEs will decide which applications and 3GBridges that are available. The RTEs are simple shell scripts that must set four variables: DG_App DG_Grid DG_Endpoint DG_AllowedVOs An example RTE setting up DSP to be run on Asuka at SZTAKI: #!/bin/sh # This is a test RTE for the DG bridge on ASUKA # running the DSP app with ATTIC enabled WS DG_App="dsp" DG_Grid="SZDG" DG_Endpoint="http://asuka.lpds.sztaki.hu:8091" DG_AllowedVOs="all" These files must be places in the runtime directory configured in arc.conf The naming convention requires the file to be named woth only uppercase letters and without a suffix. RTEs pointing to a specific 3G bridge must be placed in a subdirectory named 3GBRIDGE/[bridge identifier], again the subdirectories must be all uppercase letters. The RTE above is stored on the develop infrastructure as: /var/spool/arc/runtime/3GBRIDGE/ASUKA/DSP. ==== Setup logging via cron ==== Add a cron entry to log every 5 minute: echo '0-59/5 * * * * /usr/share/arc/DGLog2XML.py'|crontab -u 3gbridge - * Setup apache You must setup apache or another webserver to serve the datastaging and monitor data. You can use whatever method available. The data can even be served from another machine as long as the backend can access it using the filesystem, e.g. WebDAV mounted using FUSE. The development infrastructure uses an apache server configured as below: Listen 9090 Alias /3GBridge "/var/www/3GBridge" Options Indexes AllowOverride None Order allow,deny Allow from all Remember to also configure the data staging in the arc.conf config file. ==== Certificates ==== To use ARC you must have a valid server certificate. The location must be set in the configuration. ==== Authorization ==== Authorization in the ARC bridge is a two step process. First the user certificate must be authorized. Then the VO membership will be checked against the RTE. The authorized users' DNs must be put in the gridmap file. The DNs must be mapped to the local user. An example: "/O=Grid/O=NorduGrid/OU=nbi.dk/CN=Christian Ulrik Soettrup" 3gbridge Normally it is a bad idea to map the clients to the same user running the daemons, but it can be done on the bridge as long as only 3GBridge RTEs are available. It of course also contingent on the daemons not running as root. == Starting daemons == % service a-rex start == Testing == To test a simple DSP job: % arcproxy --voms % arcsub -c https://arex.example.com:60000/arex -f dsp.xrsl where is the allowed VOMS VO and arex.example.com is the fully qualified hostname of the machine running the 3G bridge. A simple dsp.xrsl looks like: $ cat dsp.xrsl &(executable="/3G") (arguments="-f 22 -i 22 -p 723 -n pools.txt") (runtimeenvironment="3GBRIDGE/ASUKA/DSP") (inputfiles= ("pools.txt" "attic://voldemort.cs.cf.ac.uk:7048/dl/meta/pointer/0c19f2b2-589a-4dad-902b-f6d2a3e7ad44;md5=7b7eb86bf50c58cbf92dc12ff5adf7f4:size=9652") ) (outputfiles= ("cost.txt" "" ) ) After submission the job status can be checked with something like: % arcstat where looks like: https://arex.example.com:60000/arex/1234567812345678 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/DGLog2XML.py0000644000000000000000000000012411750232665025327 xustar000000000000000027 mtime=1335965109.164507 27 atime=1513200576.520726 30 ctime=1513200663.489790167 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/DGLog2XML.py0000755000175000002070000001455611750232665025412 0ustar00mockbuildmock00000000000000#!/usr/bin/python """Usage: DGLog2XML.py [-h] [-d|debug] [-c inifile|--config=inifile] -h,--help Print this information -d,--debug enable debug printing (default: 0) -c inifile,--config=inifile Specify the ARC config (default: /etc/arc/service.ini) """ # Set to 1 to enable debug messages on stdout debug_level = 0 def debug(msg): """Print debug messages. The global variable debug_level controls the behavior. Currently only 0 and 1 is used """ global debug_level if debug_level == 1: print(msg) def readlog(logfile): """Read whole logfile - deprecated""" f = open(logfile) oldlog = f.readlines() f.close newlog = [] for line in oldlog: newlog.append(line.strip()) return newlog def line2dict(line): """Convert a log line to a dictionary It is assumed that lines are of the form attr1=val1 attr2=val2 ... """ import sys fields = line.split(" ") f = {} for i in fields: pair = i.split("=",1) if len(pair) != 2: print("""Error parsing field: "%s" in line:\n%s"""%(i,line)) sys.exit(1) (attr,val) = i.split("=") f[attr] = val return f def line2xml(line): """Convert a log line to XML - ignoring empty lines""" line = line.strip() if line == "": return "" else: return dict2xml(line2dict(line)) def dict2xml(line): """Translation of log between log in dictionary format to xml""" # This dictionary/arrayrepresents valid elements as well as the order order = ['dt', 'event', 'bridge_id', 'job_id', 'job_id_bridge', 'status', 'application', 'input_grid_name', ] emap = { 'dt' : 'dt', 'event' : 'event', 'job_id' : 'job_id', 'job_id_bridge' : 'job_id_bridge', 'status' : 'status', 'application' : 'application', 'input_grid_name' : 'input_grid_name', 'bridge_id' : 'output_grid_name', } # Fix date #pos = line['dt'].find('_') #line['dt'] = line['dt'][0:pos] + ' ' + line['dt'][pos+1:] line['dt'] = line['dt'].replace('_',' ') xml = "" for key in order: if key in line: xml += " <%s>%s\n" %(emap[key],line[key],emap[key]) xml = " \n%s \n" %xml return xml def getstatus(status_file): """Read status file and return tuple of last log file name and index line""" try: f = open(status_file) except: debug("Problem reading %s"%status_file) return log_start for l in f.readlines(): # Valid lines starts with 2 if l[0] == '2': result = l.strip().split(' ') if len(result) == 2: f.close() return result print("Badly formatted line: %s"%l) f.close() return ["",0] def parseini(inifile): """Get required info from the ARC ini file (eg. /etc/arc/service.ini)""" import os import ConfigParser config = ConfigParser.RawConfigParser() config.read(inifile) stage_path = config.get('common', 'dgbridge_stage_dir') xmldir = os.path.join(stage_path,"monitor") controldir = config.get('grid-manager', 'controldir') logdir = os.path.join(controldir,"3gbridge_logs") return xmldir,logdir def writexml(timestamp,xmldir,xml): """Write the log report in XML""" import os xmlfile = os.path.join(xmldir,"report_%s.xml"%str(timestamp)) debug("Writing xml to %s"%xmlfile) x = open(xmlfile,"w") x.write(xml) x.close() return def updatestatus(status_file,lastlog,lastline): """Update status file The file is updated with every call even though the content does not change """ debug("Updating status %(lastlog)s %(lastline)s in %(status_file)s" %locals()) f = open(status_file,"w") f.write("%s %s"%(lastlog,lastline)) f.close() return def main(): """Main routine""" import os,time,sys import getopt global debug_level inifile = "/etc/arc/service.ini" try: opts, args = getopt.getopt(sys.argv[1:], "hc:d", ["help","config","debug"]) except getopt.error, msg: print msg print "for help use --help" sys.exit(2) for o, a in opts: if o in ("-h", "--help"): print __doc__ sys.exit(0) if o in ("-c", "--config"): inifile = a if o in ("-d", "--debug"): debug_level=1 xmldir,logdir = parseini(inifile) if not os.path.isdir(xmldir): debug("No such directory: %s"%xmldir) debug("Please create %s such that xml log files can be produced"%xmldir) sys.exit(1) status_name = "status" status_file = os.path.join(logdir,status_name) xml = "" entries = os.listdir(logdir) entries.sort() # Check for status file log_start = ["",0] if status_name in entries: log_start = getstatus(status_file) # Find index of the first logfile in entries first_logfile = 0 if log_start[0] != "": first_logfile = entries.index(log_start[0]) # Find relevant log files for i in range(first_logfile,len(entries)): e = entries[i] # Only accept logs from this millenia if e[0] == '2': f = open(os.path.join(logdir,e)) n = 0 # If this is the first logfile skip lines if log_start[0] == e and log_start[1] > 0: for i in range(int(log_start[1])): line = f.readline() n = int(log_start[1]) line = f.readline() if line != "": xml += line2xml(line) while line: line = f.readline() n += 1 xml += line2xml(line) f.close() lastlog = e lastline = n timestamp = int(time.mktime(time.localtime())) if not xml == "": xml = """\n"""% timestamp+xml+"""""" writexml(timestamp,xmldir,xml) else: xml = """""" writexml(timestamp,xmldir,xml) if 'lastlog' in locals() and 'lastline' in locals(): updatestatus(status_file,lastlog,lastline) if __name__ == "__main__": main() nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/DGAuthplug.py0000644000000000000000000000012411742037157025734 xustar000000000000000027 mtime=1334328943.821461 27 atime=1513200576.524726 30 ctime=1513200663.487790143 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/DGAuthplug.py0000755000175000002070000001363311742037157026012 0ustar00mockbuildmock00000000000000#!/usr/bin/python """Usage: DGAuthplug.py [username] Authplugin for ACCEPTING state in 3G-Bridge Example: authplugin="ACCEPTED timeout=60,onfailure=fail,onsuccess=log /usr/share/arc/DGAuthplug.py %S %C /var/spool/arc/runtime %I" """ def ExitError(msg,code): """Print error message and exit""" from sys import exit print(msg) exit(code) def WriteLog(msg,logfile,username): """Write the logfile""" from fcntl import flock, LOCK_EX, LOCK_UN, LOCK_NB from datetime import datetime import pwd,os # We should have logging and locking fd = open(logfile, 'a') # Set correct ownership if this plugin (and thus A-REX) create the file (pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell) = pwd.getpwnam(username) os.chown(logfile,pw_uid,pw_gid) try: flock(fd, LOCK_EX) except IOError: fd.close() ExitError('Failed to lock',1) else: date = datetime.now().strftime("%Y-%m-%d_%H:%M:%S") fd.write("dt=%s %s\n"%(date,msg)) fd.close() def GetVOs(control_dir,jobid): """Return an array of VO TODO: support arcproxy """ from subprocess import Popen,PIPE from os.path import isfile proxy_file = '%s/job.%s.proxy' %(control_dir,jobid) if not isfile(proxy_file): ExitError("No such proxy: %s"%proxy_file,1) cmd1 = ["voms-proxy-info","-all","-file",proxy_file] cmd2 = ["grep", "^VO "] p1 = Popen(cmd1, stdout=PIPE) p2 = Popen(cmd2, stdin=p1.stdout, stdout=PIPE) stdout,stderr = p2.communicate() lines = stdout.split('\n') vos = [] for line in lines: if line: vo = line.split(':')[1].strip() vos.append(vo) return vos def GetPrimaryVO(control_dir,jobid): """Return the first VO (if any) as a simle string""" VOs = GetVOs(control_dir,jobid) if len(VOs) >0: return VOs[0] else: return "" return GetVOs(control_dir,jobid)[0] def GetDescription(control_dir,jobid): from os.path import isfile desc_file = '%s/job.%s.description' %(control_dir,jobid) if not isfile(desc_file): ExitError("No such description file: %s"%desc_file,1) f = open(desc_file) desc = f.read() f.close() return desc def GetRTEs(desc): """Return array with RTEs""" import arc jl = arc.JobDescriptionList() jd = arc.JobDescription() jd.Parse(desc,jl) rtes = [] for i in jl.front().Resources.RunTimeEnvironment.getSoftwareList(): rtes.append(i.getName()) return rtes def GetAllowedVOs(runtime_dir,rte): """Find allowed VOs from site RTE""" from os.path import isfile rte_file = "%s/%s" % (runtime_dir,rte) # RTE should be available if not isfile(rte_file): ExitError("No such RTE: %s"%rte,1) f = open(rte_file) rte_script = f.readlines() f.close() for l in rte_script: # Remove newlines l = l.strip() if l[0:14] == 'DG_AllowedVOs=': l = l.split("=")[1] vos = l[1:-1].split(" ") return vos def GetUniqueRTE(desc): """Return unique RTE from job description""" import sys rtes = GetRTEs(desc) # Make sure there is only 1 RTE specified if len(rtes) != 1: ExitError("Number of RTEs should be 1 - not: %s" % len(rtes),1) return rtes[0] def IsAccepted(control_dir,runtime_dir,jobid): # Find the primary VO vo = GetPrimaryVO(control_dir,jobid) # Find requested runtime environments desc = GetDescription(control_dir,jobid) rte = GetUniqueRTE(desc) allowed_vos = GetAllowedVOs(runtime_dir,rte) if vo not in allowed_vos and 'all' not in allowed_vos: # ExitError("You are not allowed",1) # ExitError("Your vo %s are not among the allowed: %s"%(vo,allowed_vos),1) if not vo: ExitError("You must be assigned to one of the allowed VOs",1) else: ExitError("Your vo <%s> is not among the allowed VOs for %s"%(vo,rte),1) return True def ParseRTE(runtime_dir,rte): """Parse RTE an return as dictionary""" import ConfigParser # import io import StringIO import sys from os.path import isfile # Find VOs rte_file = "%s/%s" % (runtime_dir,rte) # RTE should be available if not isfile(rte_file): ExitError("No such RTE: %s"%rte,1) f = open(rte_file) str = f.read() f.close() str = "[s]\n" + str config = ConfigParser.RawConfigParser() # config.readfp(io.BytesIO(str)) strfp = StringIO.StringIO(str) config.readfp(strfp) d = {} for i in config.items('s'): d[i[0]] = i[1].strip('"') return d def DoLog(control_dir,runtime_dir,jobid,username): """Log the state""" import datetime # Logfile is in /3gbridge_logs/YYYY-MM-DD logfile = "%s/3gbridge_logs/%s" % (control_dir,datetime.date.today().isoformat()) desc = GetDescription(control_dir,jobid) rte = GetUniqueRTE(desc) rte_dict = ParseRTE(runtime_dir,rte) application = rte_dict['dg_app'] vo = GetPrimaryVO(control_dir,jobid) WriteLog("event=job_entry job_id=%(jobid)s application=%(application)s input_grid_name=ARC/%(vo)s"%locals(),logfile,username) return True def main(): """Main""" import sys # Parse arguments if len(sys.argv) < 5: ExitError("Too few arguments:\n"+"%s"%sys.argv,1) if len(sys.argv) == 5: (exe, status, control_dir, runtime_dir, jobid) = sys.argv username = 'root' elif len(sys.argv) == 6: (exe, status, control_dir, runtime_dir, jobid, username) = sys.argv else: ExitError("Too many arguments\n"+__doc__,1) if status == "ACCEPTED" and IsAccepted(control_dir,runtime_dir,jobid): sys.exit(0) if status == "PREPARING" and DoLog(control_dir,runtime_dir,jobid,username): sys.exit(0) sys.exit(1) if __name__ == "__main__": main() nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/dgbridge.conf0000644000000000000000000000012411524061561025766 xustar000000000000000027 mtime=1297113969.097822 27 atime=1513200576.526726 30 ctime=1513200663.495790241 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/dgbridge.conf0000644000175000002070000000035111524061561026032 0ustar00mockbuildmock00000000000000Listen 9090 Alias /3GBridge "/var/www/3GBridge" Options Indexes AllowOverride None Order allow,deny Allow from all nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/PaxHeaders.7502/configure-DGBridge-env.sh0000644000000000000000000000012411742037157030066 xustar000000000000000027 mtime=1334328943.821461 27 atime=1513200576.518726 30 ctime=1513200663.485790118 nordugrid-arc-5.4.2/src/services/a-rex/lrms/dgbridge/configure-DGBridge-env.sh0000644000175000002070000000136011742037157030133 0ustar00mockbuildmock00000000000000# # set environment variables for the desktop grid bridge: # ############################################################## # Reading configuration from $ARC_CONFIG ############################################################## if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi . "$pkgdatadir/config_parser.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "infosys" config_import_section "grid-manager" config_import_section "lrms" dgbridge_stage_dir=$CONFIG_dgbridge_stage_dir dgbridge_stage_prepend=$CONFIG_dgbridge_stage_prepend #set 3G values EDGES_3G_TIMEOUT=24 EDGES_3G_RETRIES=3 # Script returned ok true nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/config_parser_compat.sh0000644000000000000000000000012411717461530026325 xustar000000000000000027 mtime=1329488728.134454 27 atime=1513200576.603727 30 ctime=1513200663.301787868 nordugrid-arc-5.4.2/src/services/a-rex/lrms/config_parser_compat.sh0000644000175000002070000001235711717461530026402 0ustar00mockbuildmock00000000000000############################################## # Configuration parser module. # Requires a POSIX shell and perl # # multi-valued options: only the last value is remembered ############################################## # # Synopsis: # # . config_parser_compat.sh # # config_parse_file /etc/arc.conf || exit 1 # config_import_section common # config_import_section grid-manager # config_import_section infosys # # set | grep CONFIG_ # # config_match_section queue/short || echo No such queue # # port=$(config_print_option gridftpd port) # # for name in config_subsections queue; do # echo Found section: queue/$name # fi # ############################################## # # Parse the config file given as an argument # config_parse_file() { arc_conf=$1 if [ -z "$arc_conf" ]; then echo 'config_parser: No config file given!' 1>&2 return 1 elif [ ! -r "$arc_conf" ]; then echo "config_parser: Cannot read config file: $arc_conf" 1>&2 return 1 fi script='my ($nb,$bn)=(0,0,""); my %opts=(); while(<>) { chomp; if (/^\s*\[([\w\-\.\/]+)\]\s*$/) { print_section() if $nb; $nb++; $bn=$1; } elsif (/^\+?(\w+)\s*=\s*([\"'\'']?)(.*)(\2)\s*$/) { my ($opt,$val)=($1,$3); $val=~s/'\''/'\''\\'\'''\''/g; $bn .= "/$val" if $bn eq "group" && $opt eq "name"; $bn .= "/$val" if $bn eq "vo" && $opt eq "id"; unshift @{$opts{$opt}}, $val; } elsif (/^\s*#/) { # skip comment line } elsif (/^\s*$/) { # skip empty line } elsif (/^\s*all\s*$/) { # make an exception for "all" command } elsif (/^\s*[-!].*$/) { # skip lines starting with a dash or exclamation mark, not relevant for infosys } elsif (/^\s*\/.*$/) { # skip voms_trust_chain lines } elsif (/^\s*\".*$/) { # skip voms_trust_chain lines } else { print "echo config_parser: Skipping malformed line in section \\\[$bn\\\] at line number $. 1>&2\n"; } } print_section(); print "_CONFIG_NUM_BLOCKS='\''$nb'\'';\n"; sub print_section { my $no=0; while (my ($opt,$val)=each %opts) { $no++; print "_CONFIG_BLOCK${nb}_OPT${no}_NAME='\''$opt'\'';\n"; print "_CONFIG_BLOCK${nb}_OPT${no}_VALUE='\''$val->[@$val-1]'\'';\n"; if (@$val > 1) { for $i (1 .. @$val) { $no++; # multi-valued option print "_CONFIG_BLOCK${nb}_OPT${no}_NAME='\''${opt}_$i'\'';\n"; print "_CONFIG_BLOCK${nb}_OPT${no}_VALUE='\''$val->[$i-1]'\'';\n"; } } }; %opts=(); print "_CONFIG_BLOCK${nb}_NAME='\''$bn'\'';\n"; print "_CONFIG_BLOCK${nb}_NUM='\''$no'\'';\n"; } ' config=`cat $arc_conf | perl -w -e "$script"` || return $? unset script eval "$config" || return $? unset config return 0 } # # Imports a section of the config file into shell variables. # Option names from will be prefixed with CONFIG_ # config_import_section() { block=$1 i=0 if [ -z "$_CONFIG_NUM_BLOCKS" ]; then return 1; fi while [ $i -lt $_CONFIG_NUM_BLOCKS ]; do i=$(($i+1)) eval name="\$_CONFIG_BLOCK${i}_NAME" if [ "x$block" != "x$name" ]; then continue; fi eval num="\$_CONFIG_BLOCK${i}_NUM" if [ -z "$num" ]; then return 1; fi j=0 while [ $j -lt $num ]; do j=$(($j+1)) eval name="\$_CONFIG_BLOCK${i}_OPT${j}_NAME" if [ -z "$name" ]; then return 1; fi eval "CONFIG_$name=\$_CONFIG_BLOCK${i}_OPT${j}_VALUE" done return 0 done return 1 } config_print_option() { block=$1 opt=$2 i=0 if [ -z "$_CONFIG_NUM_BLOCKS" ]; then return 1; fi while [ $i -lt $_CONFIG_NUM_BLOCKS ]; do i=$(($i+1)) eval name="\$_CONFIG_BLOCK${i}_NAME" if [ "x$block" != "x$name" ]; then continue; fi eval num="\$_CONFIG_BLOCK${i}_NUM" if [ -z "$num" ]; then return 1; fi j=0 val= while [ $j -lt $num ]; do j=$(($j+1)) eval name="\$_CONFIG_BLOCK${i}_OPT${j}_NAME" if [ -z "$name" ]; then return 1; fi if [ "x$name" = "x$opt" ]; then eval "val=\$_CONFIG_BLOCK${i}_OPT${j}_VALUE" fi done echo -n "$val" [ -n "$val" ] && return 0 done return 1 } config_match_section() { block=$1 i=0 if [ -z "$_CONFIG_NUM_BLOCKS" ]; then return 1; fi while [ $i -lt $_CONFIG_NUM_BLOCKS ]; do i=$(($i+1)) eval name="\$_CONFIG_BLOCK${i}_NAME" if [ "x$block" = "x$name" ]; then return 0; fi done return 1 } config_subsections() { block=$1 i=0 if [ -z "$_CONFIG_NUM_BLOCKS" ]; then return 1; fi { while [ $i -lt $_CONFIG_NUM_BLOCKS ]; do i=$(($i+1)) eval name="\$_CONFIG_BLOCK${i}_NAME" tail=${name#$block/} if [ "x$name" != "x$tail" ]; then echo ${tail%%/*}; fi done } | sort -u } config_hide_all() { unset `set|cut -f1 -d=|grep '^CONFIG_[A-Za-z0-9_]*$'` } config_reset() { config_hide_all unset `set|cut -f1 -d=|grep '^_CONFIG_[A-Za-z0-9_]*$'` } config_destroy() { config_reset unset config_parse_file unset config_import_section unset config_match_section unset config_subsections unset config_hide_all unset config_reset unset config_destroy } nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/README0000644000000000000000000000012311016612002022444 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.603727 30 ctime=1513200663.299787843 nordugrid-arc-5.4.2/src/services/a-rex/lrms/README0000644000175000002070000000002611016612002022510 0ustar00mockbuildmock00000000000000LRMS control scripts. nordugrid-arc-5.4.2/src/services/a-rex/lrms/PaxHeaders.7502/scan_common.sh.in0000644000000000000000000000012712777647314025062 xustar000000000000000027 mtime=1476349644.781438 30 atime=1513200650.254628296 30 ctime=1513200663.303787892 nordugrid-arc-5.4.2/src/services/a-rex/lrms/scan_common.sh.in0000644000175000002070000002152312777647314025127 0ustar00mockbuildmock00000000000000# This file contains functions that are used througout the scan-*-job scripts. progname=$(basename "$0") # # scan-*-jobs has STDOUT redirected to /dev/null and STDERR redirected to # job.helper..errors # log () { echo "[`date +%Y-%m-%d\ %T`] $progname: $*" 1>&2; } perflog_common () { perflog_dname=$1 d=`date +%F` perflog_fname=${perflog_dname}/system${d}.perflog jobstatus_dir=$2 #gather performance information loadavg=`cat /proc/loadavg` memtotal=$(a=`grep MemTotal /proc/meminfo`; echo ${a#MemTotal:}) memfree=$(a=`grep MemFree /proc/meminfo`; echo ${a#MemFree:}) if [ -d "$jobstatus_dir" ]; then jsd_size=`ls -l $jobstatus_dir| wc -l` jsdP_size=`ls -l $jobstatus_dir/processing | wc -l` fi #log the loadavg, stripping the last elemenmt, and the rest of the gathered info echo "[`date +%Y-%m-%d\ %T`] LoadAVG: ${loadavg% *}" >> $perflog_fname; echo "[`date +%Y-%m-%d\ %T`] MemStat: $memtotal $memfree" >> $perflog_fname; echo "[`date +%Y-%m-%d\ %T`] Control dir: $jsd_size $jsdP_size" >> $perflog_fname; # gather gridftp info gftp_pid=`cat /var/run/gridftpd.pid` gsiftp=`top -b -n 1 -p ${gftp_pid} | grep -w ${gftp_pid} | sed -e 's/[[:space:]]*$//'` echo -e "[`date +%Y-%m-%d\ %T`] Gridftpd: $gsiftp" >> $perflog_fname; # gather slapd info slapd_pid=`cat /var/run/arc/bdii/db/slapd.pid` slapd=`top -b -n 1 -p ${slapd_pid} | grep -w ${slapd_pid} | sed -e 's/[[:space:]]*$//'` echo -e "[`date +%Y-%m-%d\ %T`] Slapd: ${slapd}" >> $perflog_fname; # gather a-rex information arex_pid=`cat /var/run/arched-arex.pid` arex=`top -b -n 1 -p ${arex_pid} | grep -w ${arex_pid} | sed -e 's/[[:space:]]*$//'` echo -e "[`date +%Y-%m-%d\ %T`] A-Rex: ${arex}" >> $perflog_fname; unset perflog_dname unset perflog_fname unset jobstatus_dir } # This function takes a time interval formatted as 789:12:34:56 (with days) or # 12:34:56 (without days) and transforms it to seconds. It returns the result in # the return_interval_seconds variable. interval_to_seconds () { _interval_dhms=$1 _interval_size=`echo $_interval_dhms | grep -o : | wc -l` if [ $_interval_size -eq 2 ]; then return_interval_seconds=`echo $_interval_dhms | tr : ' ' | awk '{print $1*60*60+$2*60+$3;}'` elif [ $_interval_size -eq 3 ]; then return_interval_seconds=`echo $_interval_dhms | tr : ' ' | awk '{print $1*24*60*60+$2*60*60+$3*60+$4;}'` else echo "Bad formatting of time interval: $_interval_dhms" >&2 return_interval_seconds= fi unset _interval_dhms _interval_size } # This function takes a date string in the form recognized by the date utility # and transforms it into seconds in UTC time. It returns the result in the # return_date_seconds variable. date_to_utc_seconds () { _date_string=$1 return_date_seconds= [ -z "$_date_string" ] && return _date_seconds=`date -d "$_date_string" +%s` [ ! $? = 0 ] && return date_seconds_to_utc "$_date_seconds" unset _date_string _date_seconds } # This function takes a timestamp as seconds in local time and transforms it into # seconds in UTC time. It returns the result in the return_date_seconds variable. date_seconds_to_utc () { _date_seconds=$1 _offset_hms=`date +"%::z"` _offset_seconds=`echo $_offset_hms | tr ':' ' ' | awk '{ print $1*60*60+$2*60+$3; }'` return_date_seconds=$(( $_date_seconds - ($_offset_seconds) )) unset _date_seconds _offset_hms _offset_seconds } # This function takes a timestamp as seconds and transforms it to Mds date # format (YYYYMMDDHHMMSSZ). It returns the result in the return_mds_date # variable. seconds_to_mds_date () { _date_seconds=$1 return_mds_date=`date -d "1970-01-01 UTC $_date_seconds seconds" +"%Y%m%d%H%M%SZ"` unset _date_seconds } # # gets the numerical uid of the owner of a file # get_owner_uid () { script='my $filename = $ARGV[0]; exit 1 unless $filename; my @stat = stat($ARGV[0]); exit 1 unless defined $stat[4]; print "$stat[4]\n"; ' /usr/bin/perl -we "$script" "$1" } # # If running as root, attempts to switch to the uid passed as the first # argument and then runs the command passed as the second argument in a shell. # The remaining arguments are passed as arguments to the shell. # do_as_uid () { test $# -ge 2 || { log "do_as_uid requires 2 arguments"; return 1; } script='use English; my ($uid, @args) = @ARGV; if ( $UID == 0 ) { my ($name, $pass, $uid, $gid, $quota, $comment, $gcos, $dir, $shell, $expire) = getpwuid($uid); eval { $GID = $gid; $UID = $uid }; print STDERR "Cannot switch to uid($UID): $@\n" if $@; } system("@posix_shell@","-c",@args); exit 0 if $? eq 0; exit ($?>>8||128+($?&127)); ' /usr/bin/perl -we "$script" "$@" } # # Input variables: # * sessiondir # * uid # Output variables: # * diagstring -- the whole contents of .diag # * nodename # * WallTime # * UserTime # * KernelTime # * TotalMemory # * ResidentMemory # * LRMSStartTime # * LRMSEndTime # * exitcode # job_read_diag() { [ -n "$uid" ] && [ -n "$sessiondir" ] \ || { log "job_read_diag requires the following to be set: uid sessiondir"; return 1; } diagfile=$sessiondir.diag; [ -f "$diagfile" ] || { log "diag file not found at: $sessiondir.diag"; return 1; } diagstring=$(do_as_uid $uid "tail -n 1000 '$diagfile'") [ $? = 0 ] || { log "cannot read diag file at: $diagfile"; return 1; } nodename=$(echo "$diagstring" | sed -n 's/^nodename=\(..*\)/\1/p') WallTime=$(echo "$diagstring" | sed -n 's/^WallTime=\([0-9.]*\)s/\1/p' | tail -n 1) UserTime=$(echo "$diagstring" | sed -n 's/^UserTime=\([0-9.]*\)s/\1/p' | tail -n 1) KernelTime=$(echo "$diagstring" | sed -n 's/^KernelTime=\([0-9.]*\)s/\1/p' | tail -n 1) TotalMemory=$(echo "$diagstring" | sed -n 's/^AverageTotalMemory=\([0-9.]*\)kB/\1/p' | tail -n 1) ResidentMemory=$(echo "$diagstring" | sed -n 's/^AverageResidentMemory=\([0-9.]*\)kB/\1/p' | tail -n 1) LRMSStartTime=$(echo "$diagstring" | sed -n 's/^LRMSStartTime=\([0-9][0-9]*Z\)/\1/p' | tail -n 1) LRMSEndTime=$(echo "$diagstring" | sed -n 's/^LRMSEndTime=\([0-9][0-9]*Z\)/\1/p' | tail -n 1) exitcode=$(echo "$diagstring" | sed -n 's/^exitcode=\([0-9]*\)/\1/p' | tail -n 1) for key in nodename WallTime UserTime KernelTime AverageTotalMemory AverageResidentMemory \ exitcode LRMSStartTime LRMSEndTime LRMSExitcode LRMSMessage; do diagstring=$(echo "$diagstring" | grep -v "^$key=") done # These are set on the font-end. Not to be overwritten diagstring=$(echo "$diagstring" | grep -v "^frontend_") diagstring=$(echo "$diagstring" | grep -v "^Processors=") } # # Input variables: # * sessiondir # * uid # * LRMSExitcode # * LRMSMessage # + all output variables from job_read_diag # OBS: nodename should be a multi-line string, one line per node (or is it per cpu used?) # OBS: UserTime, KernelTime, Walltime must be given in seconds (without unit at the end) # OBS: TotalMemory, ResidentMemory must be given in kB (without unit at the end) # OBS: LRMSStartTime, LRMSEndTime must be of Mds form YYYYMMDDHHMMSSZ (note: UTC timezone) # job_write_diag() { [ -n "$uid" ] && [ -n "$sessiondir" ] \ || { log "job_write_diag requires the following to be set: uid sessiondir"; return 1; } diagfile=$sessiondir.diag; { echo "$diagstring" && echo [ -n "$nodename" ] && echo "$nodename" | sed -n 's/^\(..*\)/nodename=\1/p' [ -n "$WallTime" ] && echo "WallTime=${WallTime}s" [ -n "$Processors" ] && echo "Processors=${Processors}" [ -n "$UserTime" ] && echo "UserTime=${UserTime}s" [ -n "$KernelTime" ] && echo "KernelTime=${KernelTime}s" [ -n "$TotalMemory" ] && echo "AverageTotalMemory=${TotalMemory}kB" [ -n "$ResidentMemory" ] && echo "AverageResidentMemory=${ResidentMemory}kB" [ -n "$LRMSStartTime" ] && echo "LRMSStartTime=$LRMSStartTime" [ -n "$LRMSEndTime" ] && echo "LRMSEndTime=$LRMSEndTime" [ -n "$LRMSMessage" ] && echo "LRMSMessage=$LRMSMessage" [ -n "$LRMSExitcode" ] && echo "LRMSExitcode=$LRMSExitcode" [ -n "$exitcode" ] && echo "exitcode=$exitcode" } | do_as_uid $uid "cat > '$diagfile'" [ $? = 0 ] || { log "cannot write diag file at: $diagfile"; return 1; } } # Append .comment (containing STDOUT & STDERR of the job wrapper) to .errors # This file can also contain a message from the LRMS (i.e. the reason for killing the job). save_commentfile () { uid=$1 commentfile=$2 errorsfile=$3 action=" { echo '------- Contents of output stream forwarded by the LRMS ---------' cat '$commentfile' 2> /dev/null echo '------------------------- End of output -------------------------' } >> '$errorsfile' " do_as_uid "$uid" "$action" } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/arex.cpp0000644000000000000000000000012413213710714022265 xustar000000000000000027 mtime=1513066956.143115 27 atime=1513200576.607727 30 ctime=1513200662.683780309 nordugrid-arc-5.4.2/src/services/a-rex/arex.cpp0000644000175000002070000012322513213710714022337 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "job.h" #include "grid-manager/log/JobLog.h" #include "grid-manager/log/JobsMetrics.h" #include "grid-manager/run/RunPlugin.h" #include "grid-manager/jobs/ContinuationPlugins.h" #include "arex.h" namespace ARex { #define DEFAULT_INFOPROVIDER_WAKEUP_PERIOD (600) #define DEFAULT_INFOSYS_MAX_CLIENTS (1) #define DEFAULT_JOBCONTROL_MAX_CLIENTS (100) #define DEFAULT_DATATRANSFER_MAX_CLIENTS (100) static const std::string BES_FACTORY_ACTIONS_BASE_URL("http://schemas.ggf.org/bes/2006/08/bes-factory/BESFactoryPortType/"); static const std::string BES_FACTORY_NPREFIX("bes-factory"); static const std::string BES_FACTORY_NAMESPACE("http://schemas.ggf.org/bes/2006/08/bes-factory"); static const std::string BES_MANAGEMENT_ACTIONS_BASE_URL("http://schemas.ggf.org/bes/2006/08/bes-management/BESManagementPortType/"); static const std::string BES_MANAGEMENT_NPREFIX("bes-management"); static const std::string BES_MANAGEMENT_NAMESPACE("http://schemas.ggf.org/bes/2006/08/bes-management"); static const std::string BES_ARC_NPREFIX("a-rex"); static const std::string BES_ARC_NAMESPACE("http://www.nordugrid.org/schemas/a-rex"); static const std::string DELEG_ARC_NPREFIX("arcdeleg"); static const std::string DELEG_ARC_NAMESPACE("http://www.nordugrid.org/schemas/delegation"); static const std::string BES_GLUE2PRE_NPREFIX("glue2pre"); static const std::string BES_GLUE2PRE_NAMESPACE("http://schemas.ogf.org/glue/2008/05/spec_2.0_d41_r01"); static const std::string BES_GLUE2_NPREFIX("glue2"); static const std::string BES_GLUE2_NAMESPACE("http://schemas.ogf.org/glue/2009/03/spec/2/0"); static const std::string BES_GLUE2D_NPREFIX("glue2d"); static const std::string BES_GLUE2D_NAMESPACE("http://schemas.ogf.org/glue/2009/03/spec_2.0_r1"); static const std::string ES_TYPES_NPREFIX("estypes"); static const std::string ES_TYPES_NAMESPACE("http://www.eu-emi.eu/es/2010/12/types"); static const std::string ES_CREATE_NPREFIX("escreate"); static const std::string ES_CREATE_NAMESPACE("http://www.eu-emi.eu/es/2010/12/creation/types"); static const std::string ES_DELEG_NPREFIX("esdeleg"); static const std::string ES_DELEG_NAMESPACE("http://www.eu-emi.eu/es/2010/12/delegation/types"); static const std::string ES_RINFO_NPREFIX("esrinfo"); static const std::string ES_RINFO_NAMESPACE("http://www.eu-emi.eu/es/2010/12/resourceinfo/types"); static const std::string ES_MANAG_NPREFIX("esmanag"); static const std::string ES_MANAG_NAMESPACE("http://www.eu-emi.eu/es/2010/12/activitymanagement/types"); static const std::string ES_AINFO_NPREFIX("esainfo"); static const std::string ES_AINFO_NAMESPACE("http://www.eu-emi.eu/es/2010/12/activity/types"); static const std::string WSRF_NAMESPACE("http://docs.oasis-open.org/wsrf/rp-2"); #define AREX_POLICY_OPERATION_URN "http://www.nordugrid.org/schemas/policy-arc/types/a-rex/operation" #define AREX_POLICY_OPERATION_ADMIN "Admin" #define AREX_POLICY_OPERATION_INFO "Info" // Id: http://www.nordugrid.org/schemas/policy-arc/types/arex/joboperation // Value: // Create - creation of new job // Modify - modification of job paramaeters - change state, write data. // Read - accessing job information - get status information, read data. // Id: http://www.nordugrid.org/schemas/policy-arc/types/arex/operation // Value: // Admin - administrator level operation // Info - information about service class ARexSecAttr: public Arc::SecAttr { public: ARexSecAttr(const std::string& action); ARexSecAttr(const Arc::XMLNode op); virtual ~ARexSecAttr(void); virtual operator bool(void) const; virtual bool Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const; virtual std::string get(const std::string& id) const; void SetResource(const std::string& service, const std::string& job, const std::string& action); protected: std::string action_; std::string id_; std::string service_; std::string job_; std::string file_; virtual bool equal(const Arc::SecAttr &b) const; }; ARexSecAttr::ARexSecAttr(const std::string& action) { id_=JOB_POLICY_OPERATION_URN; action_=action; } ARexSecAttr::ARexSecAttr(const Arc::XMLNode op) { if(MatchXMLNamespace(op,BES_FACTORY_NAMESPACE)) { if(MatchXMLName(op,"CreateActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_CREATE; } else if(MatchXMLName(op,"GetActivityStatuses")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_READ; } else if(MatchXMLName(op,"TerminateActivities")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"GetActivityDocuments")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_READ; } else if(MatchXMLName(op,"GetFactoryAttributesDocument")) { id_=AREX_POLICY_OPERATION_URN; action_=AREX_POLICY_OPERATION_INFO; } } else if(MatchXMLNamespace(op,BES_MANAGEMENT_NAMESPACE)) { if(MatchXMLName(op,"StopAcceptingNewActivities")) { id_=AREX_POLICY_OPERATION_URN; action_=AREX_POLICY_OPERATION_ADMIN; } else if(MatchXMLName(op,"StartAcceptingNewActivities")) { id_=AREX_POLICY_OPERATION_URN; action_=AREX_POLICY_OPERATION_ADMIN; } } else if(MatchXMLNamespace(op,BES_ARC_NAMESPACE)) { if(MatchXMLName(op,"ChangeActivityStatus")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"MigrateActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"CacheCheck")) { id_=AREX_POLICY_OPERATION_URN; action_=AREX_POLICY_OPERATION_INFO; } } else if(MatchXMLNamespace(op,DELEG_ARC_NAMESPACE)) { if(MatchXMLName(op,"DelegateCredentialsInit")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_CREATE; } else if(MatchXMLName(op,"UpdateCredentials")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } } else if(MatchXMLNamespace(op,WSRF_NAMESPACE)) { id_=AREX_POLICY_OPERATION_URN; action_=AREX_POLICY_OPERATION_INFO; } else if(MatchXMLNamespace(op,ES_CREATE_NAMESPACE)) { if(MatchXMLName(op,"CreateActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_CREATE; } } else if(MatchXMLNamespace(op,ES_DELEG_NAMESPACE)) { if(MatchXMLName(op,"InitDelegation")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_CREATE; } else if(MatchXMLName(op,"PutDelegation")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"GetDelegationInfo")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_READ; } } else if(MatchXMLNamespace(op,ES_RINFO_NAMESPACE)) { if(MatchXMLName(op,"GetResourceInfo")) { id_=AREX_POLICY_OPERATION_URN; action_=AREX_POLICY_OPERATION_INFO; } else if(MatchXMLName(op,"QueryResourceInfo")) { id_=AREX_POLICY_OPERATION_URN; action_=AREX_POLICY_OPERATION_INFO; } } else if(MatchXMLNamespace(op,ES_MANAG_NAMESPACE)) { if(MatchXMLName(op,"PauseActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"ResumeActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"ResumeActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"NotifyService")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"CancelActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"WipeActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"RestartActivity")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } else if(MatchXMLName(op,"GetActivityStatus")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_READ; } else if(MatchXMLName(op,"GetActivityInfo")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_READ; } } else if(MatchXMLNamespace(op,ES_AINFO_NAMESPACE)) { if(MatchXMLName(op,"ListActivities")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_READ; } else if(MatchXMLName(op,"GetActivityStatus")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_READ; } else if(MatchXMLName(op,"GetActivityInfo")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_READ; } } } void ARexSecAttr::SetResource(const std::string& service, const std::string& job, const std::string& action) { service_ = service; job_ = job; action_ = action; } ARexSecAttr::~ARexSecAttr(void) { } ARexSecAttr::operator bool(void) const { return !action_.empty(); } bool ARexSecAttr::equal(const SecAttr &b) const { try { const ARexSecAttr& a = (const ARexSecAttr&)b; return ((id_ == a.id_) && (action_ == a.action_)); } catch(std::exception&) { }; return false; } bool ARexSecAttr::Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const { if(format == UNDEFINED) { } else if(format == ARCAuth) { Arc::NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; val.Namespaces(ns); val.Name("ra:Request"); Arc::XMLNode item = val.NewChild("ra:RequestItem"); if(!action_.empty()) { Arc::XMLNode action = item.NewChild("ra:Action"); action=action_; action.NewAttribute("Type")="string"; action.NewAttribute("AttributeId")=id_; }; // TODO: add resource part return true; } else { }; return false; } std::string ARexSecAttr::get(const std::string& id) const { if(id == "ACTION") return action_; if(id == "NAMESPACE") return id_; if(id == "SERVICE") return service_; if(id == "JOB") return job_; if(id == "FILE") return file_; return ""; }; static Arc::XMLNode BESFactoryResponse(Arc::PayloadSOAP& res,const char* opname) { Arc::XMLNode response = res.NewChild(BES_FACTORY_NPREFIX + ":" + opname + "Response"); Arc::WSAHeader(res).Action(BES_FACTORY_ACTIONS_BASE_URL + opname + "Response"); return response; } static Arc::XMLNode BESManagementResponse(Arc::PayloadSOAP& res,const char* opname) { Arc::XMLNode response = res.NewChild(BES_MANAGEMENT_NPREFIX + ":" + opname + "Response"); Arc::WSAHeader(res).Action(BES_MANAGEMENT_ACTIONS_BASE_URL + opname + "Response"); return response; } static Arc::XMLNode BESARCResponse(Arc::PayloadSOAP& res,const char* opname) { Arc::XMLNode response = res.NewChild(BES_ARC_NPREFIX + ":" + opname + "Response"); return response; } static Arc::XMLNode ESCreateResponse(Arc::PayloadSOAP& res,const char* opname) { Arc::XMLNode response = res.NewChild(ES_CREATE_NPREFIX + ":" + opname + "Response"); return response; } /* static Arc::XMLNode ESDelegResponse(Arc::PayloadSOAP& res,const char* opname) { Arc::XMLNode response = res.NewChild(ES_DELEG_NPREFIX + ":" + opname + "Response"); return response; } */ static Arc::XMLNode ESRInfoResponse(Arc::PayloadSOAP& res,const char* opname) { Arc::XMLNode response = res.NewChild(ES_RINFO_NPREFIX + ":" + opname + "Response"); return response; } static Arc::XMLNode ESManagResponse(Arc::PayloadSOAP& res,const char* opname) { Arc::XMLNode response = res.NewChild(ES_MANAG_NPREFIX + ":" + opname + "Response"); return response; } static Arc::XMLNode ESAInfoResponse(Arc::PayloadSOAP& res,const char* opname) { Arc::XMLNode response = res.NewChild(ES_AINFO_NPREFIX + ":" + opname + "Response"); return response; } //static Arc::LogStream logcerr(std::cerr); static Arc::Plugin* get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; ARexService* arex = new ARexService((Arc::Config*)(*srvarg),arg); if(!*arex) { delete arex; arex=NULL; }; return arex; } class ARexConfigContext:public Arc::MessageContextElement, public ARexGMConfig { public: ARexConfigContext(GMConfig& config,const std::string& uname,const std::string& grid_name,const std::string& service_endpoint): ARexGMConfig(config,uname,grid_name,service_endpoint) { }; virtual ~ARexConfigContext(void) { }; }; void CountedResource::Acquire(void) { lock_.lock(); while((limit_ >= 0) && (count_ >= limit_)) { cond_.wait(lock_); }; ++count_; lock_.unlock(); } void CountedResource::Release(void) { lock_.lock(); --count_; cond_.signal(); lock_.unlock(); } void CountedResource::MaxConsumers(int maxconsumers) { limit_ = maxconsumers; } CountedResource::CountedResource(int maxconsumers): limit_(maxconsumers),count_(0) { } CountedResource::~CountedResource(void) { } class CountedResourceLock { private: CountedResource& r_; public: CountedResourceLock(CountedResource& resource):r_(resource) { r_.Acquire(); }; ~CountedResourceLock(void) { r_.Release(); }; }; static std::string GetPath(std::string url){ std::string::size_type ds, ps; ds=url.find("//"); if (ds==std::string::npos) { ps=url.find("/"); } else { ps=url.find("/", ds+2); } if (ps==std::string::npos) return ""; return url.substr(ps); } Arc::MCC_Status ARexService::StopAcceptingNewActivities(ARexGMConfig& /*config*/,Arc::XMLNode /*in*/,Arc::XMLNode /*out*/) { return Arc::MCC_Status(); } Arc::MCC_Status ARexService::StartAcceptingNewActivities(ARexGMConfig& /*config*/,Arc::XMLNode /*in*/,Arc::XMLNode /*out*/) { return Arc::MCC_Status(); } Arc::MCC_Status ARexService::make_soap_fault(Arc::Message& outmsg, const char* resp) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns_,true); Arc::SOAPFault* fault = outpayload?outpayload->Fault():NULL; if(fault) { fault->Code(Arc::SOAPFault::Sender); if(!resp) { fault->Reason("Failed processing request"); } else { fault->Reason(resp); }; }; delete outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::make_http_fault(Arc::Message& outmsg,int code,const char* resp) { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); delete outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE",Arc::tostring(code)); if(resp) outmsg.Attributes()->set("HTTP:REASON",resp); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } Arc::MCC_Status ARexService::make_fault(Arc::Message& /*outmsg*/) { // That will cause 500 Internal Error in HTTP return Arc::MCC_Status(); } Arc::MCC_Status ARexService::make_empty_response(Arc::Message& outmsg) { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); delete outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } ARexConfigContext* ARexService::get_configuration(Arc::Message& inmsg) { ARexConfigContext* config = NULL; Arc::MessageContextElement* mcontext = (*inmsg.Context())["arex.gmconfig"]; if(mcontext) { try { config = dynamic_cast(mcontext); logger_.msg(Arc::DEBUG,"Using cached local account '%s'", config->User().Name()); } catch(std::exception& e) { }; }; if(config) return config; // TODO: do configuration detection // TODO: do mapping to local unix name std::string uname; uname=inmsg.Attributes()->get("SEC:LOCALID"); if(uname.empty()) uname=uname_; if(uname.empty()) { if(getuid() == 0) { logger_.msg(Arc::ERROR, "Will not map to 'root' account by default"); return NULL; }; struct passwd pwbuf; char buf[4096]; struct passwd* pw; if(getpwuid_r(getuid(),&pwbuf,buf,sizeof(buf),&pw) == 0) { if(pw && pw->pw_name) { uname = pw->pw_name; }; }; }; if(uname.empty()) { logger_.msg(Arc::ERROR, "No local account name specified"); return NULL; }; logger_.msg(Arc::DEBUG,"Using local account '%s'",uname); std::string grid_name = inmsg.Attributes()->get("TLS:IDENTITYDN"); std::string endpoint = endpoint_; if(endpoint.empty()) { std::string http_endpoint = inmsg.Attributes()->get("HTTP:ENDPOINT"); std::string tcp_endpoint = inmsg.Attributes()->get("TCP:ENDPOINT"); bool https_proto = !grid_name.empty(); endpoint = tcp_endpoint; if(https_proto) { endpoint="https"+endpoint; } else { endpoint="http"+endpoint; }; endpoint+=GetPath(http_endpoint); }; config=new ARexConfigContext(config_,uname,grid_name,endpoint); if(config) { if(*config) { inmsg.Context()->Add("arex.gmconfig",config); } else { delete config; config=NULL; logger_.msg(Arc::ERROR, "Failed to acquire grid-manager's configuration"); }; }; return config; } static std::string GetPath(Arc::Message &inmsg,std::string &base) { base = inmsg.Attributes()->get("HTTP:ENDPOINT"); Arc::AttributeIterator iterator = inmsg.Attributes()->getAll("PLEXER:EXTENSION"); std::string path; if(iterator.hasMore()) { // Service is behind plexer path = *iterator; if(base.length() > path.length()) base.resize(base.length()-path.length()); } else { // Standalone service path=Arc::URL(base).Path(); base.resize(0); }; // Path is encoded in HTTP URLs path = Arc::uri_unencode(path); return path; } #define SOAP_NOT_SUPPORTED { \ logger_.msg(Arc::ERROR, "SOAP operation is not supported: %s", op.Name()); \ delete outpayload; \ return make_soap_fault(outmsg,"Operation not supported"); \ } Arc::MCC_Status ARexService::process(Arc::Message& inmsg,Arc::Message& outmsg) { // Split request path into parts: service, job and file path. // TODO: make it HTTP independent std::string endpoint; std::string method = inmsg.Attributes()->get("HTTP:METHOD"); std::string id = GetPath(inmsg,endpoint); std::string clientid = (inmsg.Attributes()->get("TCP:REMOTEHOST"))+":"+(inmsg.Attributes()->get("TCP:REMOTEPORT")); logger_.msg(Arc::INFO, "Connection from %s: %s", inmsg.Attributes()->get("TCP:REMOTEHOST"), inmsg.Attributes()->get("TLS:IDENTITYDN")); if((inmsg.Attributes()->get("PLEXER:PATTERN").empty()) && id.empty()) id=endpoint; logger_.msg(Arc::VERBOSE, "process: method: %s", method); logger_.msg(Arc::VERBOSE, "process: endpoint: %s", endpoint); while(id[0] == '/') id=id.substr(1); std::string subpath; { std::string::size_type p = id.find('/'); if(p != std::string::npos) { subpath = id.substr(p); id.resize(p); while(subpath[0] == '/') subpath=subpath.substr(1); }; }; logger_.msg(Arc::VERBOSE, "process: id: %s", id); logger_.msg(Arc::VERBOSE, "process: subpath: %s", subpath); // Sort out request Arc::PayloadSOAP* inpayload = NULL; Arc::XMLNode op; ARexSecAttr* sattr = NULL; if(method == "POST") { logger_.msg(Arc::VERBOSE, "process: POST"); // Both input and output are supposed to be SOAP // Extracting payload try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) { logger_.msg(Arc::ERROR, "input is not SOAP"); return make_soap_fault(outmsg); }; if(logger_.getThreshold() <= Arc::VERBOSE) { std::string str; inpayload->GetDoc(str, true); logger_.msg(Arc::VERBOSE, "process: request=%s",str); }; // Analyzing request op = inpayload->Child(0); if(!op) { logger_.msg(Arc::ERROR, "input does not define operation"); return make_soap_fault(outmsg); }; logger_.msg(Arc::INFO, "process: operation: %s",op.Name()); // Adding A-REX attributes sattr = new ARexSecAttr(op); } else if(method == "GET") { sattr = new ARexSecAttr(std::string(JOB_POLICY_OPERATION_READ)); } else if(method == "HEAD") { sattr = new ARexSecAttr(std::string(JOB_POLICY_OPERATION_READ)); } else if(method == "PUT") { sattr = new ARexSecAttr(std::string(JOB_POLICY_OPERATION_MODIFY)); } if(sattr) { inmsg.Auth()->set("AREX",sattr); sattr->SetResource(endpoint,id,subpath); } if(!ProcessSecHandlers(inmsg,"incoming")) { logger_.msg(Arc::ERROR, "Security Handlers processing failed"); return make_soap_fault(outmsg, "Not authorized"); }; // Process grid-manager configuration if not done yet ARexConfigContext* config = get_configuration(inmsg); if(!config) { logger_.msg(Arc::ERROR, "Can't obtain configuration"); // Service is not operational return Arc::MCC_Status(); }; config->ClearAuths(); config->AddAuth(inmsg.Auth()); config->AddAuth(inmsg.AuthContext()); // Identify which of served endpoints request is for. // Using simplified algorithm - POST for SOAP messages, // GET and PUT for data transfer if(method == "POST") { // Check if request is for top of tree (BES factory) or particular // job (listing activity) if(id.empty()) { // Factory operations logger_.msg(Arc::VERBOSE, "process: factory endpoint"); Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns_); Arc::PayloadSOAP& res = *outpayload; // Preparing known namespaces outpayload->Namespaces(ns_); if(config_.ARCInterfaceEnabled() && MatchXMLNamespace(op,BES_FACTORY_NAMESPACE)) { // Aplying known namespaces inpayload->Namespaces(ns_); if(MatchXMLName(op,"CreateActivity")) { CountedResourceLock cl_lock(beslimit_); CreateActivity(*config,op,BESFactoryResponse(res,"CreateActivity"),clientid); } else if(MatchXMLName(op,"GetActivityStatuses")) { CountedResourceLock cl_lock(beslimit_); GetActivityStatuses(*config,op,BESFactoryResponse(res,"GetActivityStatuses")); } else if(MatchXMLName(op,"TerminateActivities")) { CountedResourceLock cl_lock(beslimit_); TerminateActivities(*config,op,BESFactoryResponse(res,"TerminateActivities")); } else if(MatchXMLName(op,"GetActivityDocuments")) { CountedResourceLock cl_lock(beslimit_); GetActivityDocuments(*config,op,BESFactoryResponse(res,"GetActivityDocuments")); } else if(MatchXMLName(op,"GetFactoryAttributesDocument")) { CountedResourceLock cl_lock(beslimit_); GetFactoryAttributesDocument(*config,op,BESFactoryResponse(res,"GetFactoryAttributesDocument")); } else { SOAP_NOT_SUPPORTED; } } else if(config_.ARCInterfaceEnabled() && MatchXMLNamespace(op,BES_MANAGEMENT_NAMESPACE)) { // Aplying known namespaces inpayload->Namespaces(ns_); if(MatchXMLName(op,"StopAcceptingNewActivities")) { CountedResourceLock cl_lock(beslimit_); StopAcceptingNewActivities(*config,op,BESManagementResponse(res,"StopAcceptingNewActivities")); } else if(MatchXMLName(op,"StartAcceptingNewActivities")) { CountedResourceLock cl_lock(beslimit_); StartAcceptingNewActivities(*config,op,BESManagementResponse(res,"StartAcceptingNewActivities")); } else { SOAP_NOT_SUPPORTED; } } else if(config_.EMIESInterfaceEnabled() && MatchXMLNamespace(op,ES_CREATE_NAMESPACE)) { // Aplying known namespaces inpayload->Namespaces(ns_); if(MatchXMLName(op,"CreateActivity")) { CountedResourceLock cl_lock(beslimit_); ESCreateActivities(*config,op,ESCreateResponse(res,"CreateActivity"),clientid); } else { SOAP_NOT_SUPPORTED; } } else if(config_.EMIESInterfaceEnabled() && MatchXMLNamespace(op,ES_RINFO_NAMESPACE)) { // Aplying known namespaces inpayload->Namespaces(ns_); if(MatchXMLName(op,"GetResourceInfo")) { CountedResourceLock cl_lock(infolimit_); ESGetResourceInfo(*config,op,ESRInfoResponse(res,"GetResourceInfo")); } else if(MatchXMLName(op,"QueryResourceInfo")) { CountedResourceLock cl_lock(infolimit_); ESQueryResourceInfo(*config,op,ESRInfoResponse(res,"QueryResourceInfo")); } else { SOAP_NOT_SUPPORTED; } } else if(config_.EMIESInterfaceEnabled() && MatchXMLNamespace(op,ES_MANAG_NAMESPACE)) { // Aplying known namespaces inpayload->Namespaces(ns_); if(MatchXMLName(op,"PauseActivity")) { CountedResourceLock cl_lock(beslimit_); ESPauseActivity(*config,op,ESManagResponse(res,"PauseActivity")); } else if(MatchXMLName(op,"ResumeActivity")) { CountedResourceLock cl_lock(beslimit_); ESResumeActivity(*config,op,ESManagResponse(res,"ResumeActivity")); } else if(MatchXMLName(op,"NotifyService")) { CountedResourceLock cl_lock(beslimit_); ESNotifyService(*config,op,ESManagResponse(res,"NotifyService")); } else if(MatchXMLName(op,"CancelActivity")) { CountedResourceLock cl_lock(beslimit_); ESCancelActivity(*config,op,ESManagResponse(res,"CancelActivity")); } else if(MatchXMLName(op,"WipeActivity")) { CountedResourceLock cl_lock(beslimit_); ESWipeActivity(*config,op,ESManagResponse(res,"WipeActivity")); } else if(MatchXMLName(op,"RestartActivity")) { CountedResourceLock cl_lock(beslimit_); ESRestartActivity(*config,op,ESManagResponse(res,"RestartActivity")); } else if(MatchXMLName(op,"GetActivityStatus")) { CountedResourceLock cl_lock(beslimit_); ESGetActivityStatus(*config,op,ESManagResponse(res,"GetActivityStatus")); } else if(MatchXMLName(op,"GetActivityInfo")) { CountedResourceLock cl_lock(beslimit_); ESGetActivityInfo(*config,op,ESManagResponse(res,"GetActivityInfo")); } else { SOAP_NOT_SUPPORTED; } } else if(config_.EMIESInterfaceEnabled() && MatchXMLNamespace(op,ES_AINFO_NAMESPACE)) { // Aplying known namespaces inpayload->Namespaces(ns_); if(MatchXMLName(op,"ListActivities")) { CountedResourceLock cl_lock(beslimit_); ESListActivities(*config,op,ESAInfoResponse(res,"ListActivities")); } else if(MatchXMLName(op,"GetActivityStatus")) { CountedResourceLock cl_lock(beslimit_); ESGetActivityStatus(*config,op,ESAInfoResponse(res,"GetActivityStatus")); } else if(MatchXMLName(op,"GetActivityInfo")) { CountedResourceLock cl_lock(beslimit_); ESGetActivityInfo(*config,op,ESAInfoResponse(res,"GetActivityInfo")); } else { SOAP_NOT_SUPPORTED; } } else if(config_.ARCInterfaceEnabled() && MatchXMLNamespace(op,BES_ARC_NAMESPACE)) { // Aplying known namespaces inpayload->Namespaces(ns_); if(MatchXMLName(op,"ChangeActivityStatus")) { CountedResourceLock cl_lock(beslimit_); ChangeActivityStatus(*config,op,BESARCResponse(res,"ChangeActivityStatus")); } else if(MatchXMLName(op,"MigrateActivity")) { CountedResourceLock cl_lock(beslimit_); MigrateActivity(*config,op,BESFactoryResponse(res,"MigrateActivity"),clientid); } else if(MatchXMLName(op,"CacheCheck")) { CacheCheck(*config,*inpayload,*outpayload); } else { SOAP_NOT_SUPPORTED; } } else if(delegation_stores_.MatchNamespace(*inpayload)) { // Aplying known namespaces inpayload->Namespaces(ns_); CountedResourceLock cl_lock(beslimit_); std::string credentials; if(!delegation_stores_.Process(config->GmConfig().DelegationDir(), *inpayload,*outpayload,config->GridName(),credentials)) { delete outpayload; return make_soap_fault(outmsg); }; if(!credentials.empty()) { // Credentials obtained as outcome of operation if(MatchXMLNamespace(op,DELEG_ARC_NAMESPACE)) { // ARC delegation is done per job but stored under // own id. So storing must be done outside processing code. UpdateCredentials(*config,op,outpayload->Child(),credentials); } else if(MatchXMLNamespace(op,ES_DELEG_NAMESPACE)) { // ES has delegations assigned their own ids and are // already updated in delegation_stores_.Process() #if 1 // But for compatibility during intermediate period store delegations in // per-job proxy file too. // Obtaining all jobs associated with that delegation id would cause // scanning all job.#.local files for associates delegationid. // To avoid this costly procedure job.#.proxy will be updated // when job state changes (including restart). /* if(op.Name() == "PutDelegation") { // PutDelegation // DelegationID // Credential std::string id = op["DelegationId"]; if(!id.empty()) { delegation_stores_[config->GmConfig().DelegationDir()]. }; }; */ #endif }; }; } else if(config_.ARCInterfaceEnabled() && MatchXMLNamespace(op,WSRF_NAMESPACE)) { CountedResourceLock cl_lock(infolimit_); /* Arc::SOAPEnvelope* out_ = infodoc_.Arc::InformationInterface::Process(*inpayload); if(out_) { out_->Swap(*outpayload); delete out_; } else { delete outpayload; return make_soap_fault(outmsg); }; */ delete outpayload; Arc::MessagePayload* mpayload = infodoc_.Process(*inpayload); if(!mpayload) { return make_soap_fault(outmsg); }; try { outpayload = dynamic_cast(mpayload); } catch(std::exception& e) { }; outmsg.Payload(mpayload); if(logger_.getThreshold() <= Arc::VERBOSE) { if(outpayload) { std::string str; outpayload->GetDoc(str, true); logger_.msg(Arc::VERBOSE, "process: response=%s",str); } else { logger_.msg(Arc::VERBOSE, "process: response is not SOAP"); }; }; if(!ProcessSecHandlers(outmsg,"outgoing")) { logger_.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); }; return Arc::MCC_Status(Arc::STATUS_OK); } else { SOAP_NOT_SUPPORTED; }; if(logger_.getThreshold() <= Arc::VERBOSE) { std::string str; outpayload->GetDoc(str, true); logger_.msg(Arc::VERBOSE, "process: response=%s",str); }; outmsg.Payload(outpayload); } else { // Listing operations for session directories // TODO: proper failure like interface is not supported }; if(!ProcessSecHandlers(outmsg,"outgoing")) { logger_.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); }; return Arc::MCC_Status(Arc::STATUS_OK); } else if(method == "GET") { // HTTP plugin either provides buffer or stream logger_.msg(Arc::VERBOSE, "process: GET"); logger_.msg(Arc::INFO, "GET: id %s path %s", id, subpath); CountedResourceLock cl_lock(datalimit_); // TODO: in case of error generate some content Arc::MCC_Status ret = Get(inmsg,outmsg,*config,id,subpath); if(ret) { if(!ProcessSecHandlers(outmsg,"outgoing")) { logger_.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); }; }; return ret; } else if(method == "HEAD") { Arc::MCC_Status ret = Head(inmsg,outmsg,*config,id,subpath); if(ret) { if(!ProcessSecHandlers(outmsg,"outgoing")) { logger_.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); }; }; return ret; } else if(method == "PUT") { logger_.msg(Arc::VERBOSE, "process: PUT"); CountedResourceLock cl_lock(datalimit_); Arc::MCC_Status ret = Put(inmsg,outmsg,*config,id,subpath); if(!ret) return make_fault(outmsg); // Put() does not generate response yet ret=make_empty_response(outmsg); if(ret) { if(!ProcessSecHandlers(outmsg,"outgoing")) { logger_.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); }; }; return ret; } else if(!method.empty()) { logger_.msg(Arc::VERBOSE, "process: method %s is not supported",method); return make_http_fault(outmsg,501,"Not Implemented"); } else { logger_.msg(Arc::VERBOSE, "process: method is not defined"); return Arc::MCC_Status(); }; return Arc::MCC_Status(); } static void information_collector_starter(void* arg) { if(!arg) return; ((ARexService*)arg)->InformationCollector(); } void ARexService::gm_threads_starter(void* arg) { if(!arg) return; ARexService* arex = (ARexService*)arg; arex->gm_threads_starter(); } void ARexService::gm_threads_starter() { if (!endpoint_.empty()) { // no need to do this if no WS interface // Remove the WS log from the log destinations. // Here we assume the order is gm, ws, [stderr (arched -f)] std::list dests = Arc::Logger::getRootLogger().getDestinations(); if (dests.size() > 1) { std::list::iterator i = dests.begin(); ++i; dests.erase(i); Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().addDestinations(dests); } } // Run grid-manager in thread if ((gmrun_.empty()) || (gmrun_ == "internal")) { gm_ = new GridManager(config_); if (!(*gm_)) { logger_.msg(Arc::ERROR, "Failed to run Grid Manager thread"); delete gm_; gm_=NULL; return; } } // Start info collector thread CreateThreadFunction(&information_collector_starter, this); } ARexService::ARexService(Arc::Config *cfg,Arc::PluginArgument *parg):Arc::Service(cfg,parg), logger_(Arc::Logger::rootLogger, "A-REX"), delegation_stores_(), infodoc_(true), inforeg_(NULL), infoprovider_wakeup_period_(0), all_jobs_count_(0), gm_(NULL) { valid = false; config_.SetJobLog(new JobLog()); config_.SetJobsMetrics(new JobsMetrics()); config_.SetJobPerfLog(new Arc::JobPerfLog()); config_.SetContPlugins(new ContinuationPlugins()); config_.SetCredPlugin(new RunPlugin()); // logger_.addDestination(logcerr); // Define supported namespaces ns_[BES_ARC_NPREFIX]=BES_ARC_NAMESPACE; ns_[BES_GLUE2_NPREFIX]=BES_GLUE2_NAMESPACE; ns_[BES_GLUE2PRE_NPREFIX]=BES_GLUE2PRE_NAMESPACE; ns_[BES_GLUE2D_NPREFIX]=BES_GLUE2D_NAMESPACE; ns_[BES_FACTORY_NPREFIX]=BES_FACTORY_NAMESPACE; ns_[BES_MANAGEMENT_NPREFIX]=BES_MANAGEMENT_NAMESPACE; ns_[DELEG_ARC_NPREFIX]=DELEG_ARC_NAMESPACE; ns_[ES_TYPES_NPREFIX]=ES_TYPES_NAMESPACE; ns_[ES_CREATE_NPREFIX]=ES_CREATE_NAMESPACE; ns_[ES_DELEG_NPREFIX]=ES_DELEG_NAMESPACE; ns_[ES_RINFO_NPREFIX]=ES_RINFO_NAMESPACE; ns_[ES_MANAG_NPREFIX]=ES_MANAG_NAMESPACE; ns_[ES_AINFO_NPREFIX]=ES_AINFO_NAMESPACE; ns_["wsa"]="http://www.w3.org/2005/08/addressing"; ns_["jsdl"]="http://schemas.ggf.org/jsdl/2005/11/jsdl"; ns_["wsrf-bf"]="http://docs.oasis-open.org/wsrf/bf-2"; ns_["wsrf-r"]="http://docs.oasis-open.org/wsrf/r-2"; ns_["wsrf-rw"]="http://docs.oasis-open.org/wsrf/rw-2"; // Obtain information from configuration endpoint_=(std::string)((*cfg)["endpoint"]); uname_=(std::string)((*cfg)["usermap"]["defaultLocalName"]); std::string gmconfig=(std::string)((*cfg)["gmconfig"]); if (Arc::lower((std::string)((*cfg)["publishStaticInfo"])) == "yes") { publishstaticinfo_=true; } else { publishstaticinfo_=false; } config_.SetDelegations(&delegation_stores_); if(gmconfig.empty()) { // No external configuration file means configuration is // directly embedded into this configuration node. config_.SetXMLNode(*cfg); // Create temporary file with this node. This is mainly for // external GM processes such as infoproviders and LRMS scripts so that in // the case of multiple A-REXes in one HED they know which one to serve. In // future hopefully this can be replaced by passing the service id to those // scripts instead. The temporary file is deleted in this destructor. Arc::TmpFileCreate(gmconfig, "", getuid(), getgid(), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); logger.msg(Arc::DEBUG, "Storing configuration in temporary file %s", gmconfig); cfg->SaveToFile(gmconfig); config_.SetConfigFile(gmconfig); config_.SetConfigIsTemp(true); if (!config_.Load()) { logger_.msg(Arc::ERROR, "Failed to process service configuration"); return; } } else { // External configuration file config_.SetConfigFile(gmconfig); if (!config_.Load()) { logger_.msg(Arc::ERROR, "Failed to process configuration in %s", gmconfig); return; } } // Check for mandatory commands in configuration if (config_.ControlDir().empty()) { logger.msg(Arc::ERROR, "No control directory set in configuration"); return; } if (config_.SessionRoots().empty()) { logger.msg(Arc::ERROR, "No session directory set in configuration"); return; } if (config_.DefaultLRMS().empty()) { logger.msg(Arc::ERROR, "No LRMS set in configuration"); return; } // create control directory if not yet done if(!config_.CreateControlDirectory()) { logger_.msg(Arc::ERROR, "Failed to create control directory %s", config_.ControlDir()); return; } // Pass information about delegation db type { DelegationStore::DbType deleg_db_type = DelegationStore::DbBerkeley; switch(config_.DelegationDBType()) { case GMConfig::deleg_db_bdb: deleg_db_type = DelegationStore::DbBerkeley; break; case GMConfig::deleg_db_sqlite: deleg_db_type = DelegationStore::DbSQLite; break; }; delegation_stores_.SetDbType(deleg_db_type); }; // Set default queue if none given if(config_.DefaultQueue().empty() && (config_.Queues().size() == 1)) { config_.SetDefaultQueue(config_.Queues().front()); } gmrun_ = (std::string)((*cfg)["gmrun"]); common_name_ = (std::string)((*cfg)["commonName"]); long_description_ = (std::string)((*cfg)["longDescription"]); lrms_name_ = (std::string)((*cfg)["LRMSName"]); // Must be URI. URL may be too restrictive, but is safe. if(!Arc::URL(lrms_name_)) { if (!lrms_name_.empty()) { logger_.msg(Arc::ERROR, "Provided LRMSName is not a valid URL: %s",lrms_name_); } else { logger_.msg(Arc::VERBOSE, "No LRMSName is provided. This is needed if you wish to completely comply with the BES specifications."); }; // Filling something to make it follow BES specs lrms_name_ = "uri:undefined"; }; // TODO: check for enumeration values os_name_ = (std::string)((*cfg)["OperatingSystem"]); std::string debugLevel = (std::string)((*cfg)["debugLevel"]); if(!debugLevel.empty()) { logger_.setThreshold(Arc::istring_to_level(debugLevel)); }; int valuei; if ((!(*cfg)["InfoproviderWakeupPeriod"]) || (!Arc::stringto((std::string)((*cfg)["InfoproviderWakeupPeriod"]),infoprovider_wakeup_period_))) { infoprovider_wakeup_period_ = DEFAULT_INFOPROVIDER_WAKEUP_PERIOD; }; if ((!(*cfg)["InfosysInterfaceMaxClients"]) || (!Arc::stringto((std::string)((*cfg)["InfosysInterfaceMaxClients"]),valuei))) { valuei = DEFAULT_INFOSYS_MAX_CLIENTS; }; infolimit_.MaxConsumers(valuei); if ((!(*cfg)["JobControlInterfaceMaxClients"]) || (!Arc::stringto((std::string)((*cfg)["JobControlInterfaceMaxClients"]),valuei))) { valuei = DEFAULT_JOBCONTROL_MAX_CLIENTS; }; beslimit_.MaxConsumers(valuei); if ((!(*cfg)["DataTransferInterfaceMaxClients"]) || (!Arc::stringto((std::string)((*cfg)["DataTransferInterfaceMaxClients"]),valuei))) { valuei = DEFAULT_DATATRANSFER_MAX_CLIENTS; }; datalimit_.MaxConsumers(valuei); // If WS interface is enabled and multiple log files are configured then here // the log splits between WS interface operations and GM job processing. // Start separate thread to start GM and info collector threads so they can // log to GM log after we remove it in this thread Arc::SimpleCounter counter; if (!CreateThreadFunction(&gm_threads_starter, this, &counter)) return; counter.wait(); if ((gmrun_.empty() || gmrun_ == "internal") && !gm_) return; // GM didn't start // If WS is used then remove gm log destination from this thread if (!endpoint_.empty()) { // Assume that gm log is first in list - potentially dangerous std::list dests = logger.getRootLogger().getDestinations(); if (dests.size() > 1) { dests.pop_front(); logger.getRootLogger().removeDestinations(); logger.getRootLogger().addDestinations(dests); } } valid=true; inforeg_ = new Arc::InfoRegisters(*cfg,this); } ARexService::~ARexService(void) { if(inforeg_) delete inforeg_; thread_count_.RequestCancel(); delete gm_; // This should stop all GM-related threads too thread_count_.WaitForExit(); // Here A-REX threads are waited for // There should be no more threads using resources - can proceed if(config_.ConfigIsTemp()) unlink(config_.ConfigFile().c_str()); delete config_.GetContPlugins(); delete config_.GetJobLog(); delete config_.GetJobsMetrics(); } } // namespace ARex extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "a-rex", "HED:SERVICE", NULL, 0, &ARex::get_service }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/job.h0000644000000000000000000000012412754411222021546 xustar000000000000000027 mtime=1471287954.153765 27 atime=1513200576.302723 30 ctime=1513200662.704780566 nordugrid-arc-5.4.2/src/services/a-rex/job.h0000644000175000002070000001472412754411222021623 0ustar00mockbuildmock00000000000000#ifndef __ARC_AREX_JOB_H__ #define __ARC_AREX_JOB_H__ #include #include #include #include #include #include #include "grid-manager/files/ControlFileContent.h" #include "tools.h" namespace ARex { class GMConfig; #define JOB_POLICY_OPERATION_URN "http://www.nordugrid.org/schemas/policy-arc/types/a-rex/joboperation" #define JOB_POLICY_OPERATION_CREATE "Create" #define JOB_POLICY_OPERATION_MODIFY "Modify" #define JOB_POLICY_OPERATION_READ "Read" class ARexGMConfig { private: const GMConfig& config_; Arc::User user_; bool readonly_; std::string grid_name_; // temporary solution std::string service_endpoint_; // temporary solution std::list auths_; // Separate lists outside GMConfig as they can be substituted per user std::vector session_roots_; std::vector session_roots_non_draining_; static Arc::Logger logger; public: ARexGMConfig(const GMConfig& config,const std::string& uname,const std::string& grid_name,const std::string& service_endpoint); operator bool(void) const { return (bool)user_; }; bool operator!(void) const { return !(bool)user_; }; const Arc::User& User(void) const { return user_; }; const GMConfig& GmConfig() const { return config_; }; bool ReadOnly(void) const { return readonly_; }; const std::string& GridName(void) const { return grid_name_; }; const std::string& Endpoint(void) const { return service_endpoint_; }; void AddAuth(Arc::MessageAuth* auth) { auths_.push_back(auth); }; void ClearAuths(void) { auths_.clear(); }; std::list::iterator beginAuth(void) { return auths_.begin(); }; std::list::iterator endAuth(void) { return auths_.end(); }; std::vector SessionRootsNonDraining(void) { return session_roots_non_draining_; }; std::vector SessionRoots(void) { return session_roots_; }; }; typedef enum { ARexJobNoError, ARexJobInternalError, // Failed during some internal operation - like writing some file ARexJobConfigurationError, // Problem detected which can be fixed by adjusting configuration of service ARexJobDescriptionUnsupportedError, // Job asks for feature or combination not supported by service ARexJobDescriptionMissingError, // Job is missing optional but needed for this service element ARexJobDescriptionSyntaxError, // Job description is malformed - missing elements, wrong names, etc. ARexJobDescriptionLogicalError // Job request otherwise corect has some values out of scope of service } ARexJobFailure; /** This class represents convenience interface to manage jobs handled by Grid Manager. It works mostly through corresponding classes and functions of Grid Manager. */ class ARexJob { private: std::string id_; std::string failure_; ARexJobFailure failure_type_; bool allowed_to_see_; bool allowed_to_maintain_; Arc::Logger& logger_; /** Returns true if job exists and authorization was checked without errors. Fills information about authorization in this instance. */ bool is_allowed(bool fast = false); ARexGMConfig& config_; JobLocalDescription job_; bool make_job_id(void); bool delete_job_id(void); bool update_credentials(const std::string& credentials); public: /** Create instance which is an interface to existing job */ ARexJob(const std::string& id,ARexGMConfig& config,Arc::Logger& logger,bool fast_auth_check = false); /** Create new job with provided JSDL description */ ARexJob(Arc::XMLNode jsdl,ARexGMConfig& config,const std::string& delegid,const std::string& clientid,Arc::Logger& logger,JobIDGenerator& idgenerator,Arc::XMLNode migration = Arc::XMLNode()); operator bool(void) { return !id_.empty(); }; bool operator!(void) { return id_.empty(); }; /** Returns textual description of failure of last operation */ std::string Failure(void) { std::string r=failure_; failure_=""; failure_type_=ARexJobNoError; return r; }; operator ARexJobFailure(void) { return failure_type_; }; /** Return ID assigned to job */ std::string ID(void) { return id_; }; /** Fills provided jsdl with job description */ bool GetDescription(Arc::XMLNode& jsdl); /** Cancel processing/execution of job */ bool Cancel(void); /** Remove job from local pool */ bool Clean(void); /** Resume execution of job after error */ bool Resume(void); /** Returns current state of job */ std::string State(void); /** Returns current state of job and sets job_pending to true if job is pending due to external limits */ std::string State(bool& job_pending); /** Returns true if job has failed */ bool Failed(void); /** Returns state at which job failed and sets cause to information what caused job failure: "internal" for server initiated and "client" for canceled on client request. */ std::string FailedState(std::string& cause); /** Returns time when job was created. */ Arc::Time Created(void); /** Returns time when job state was last modified. */ Arc::Time Modified(void); /** Returns path to session directory */ std::string SessionDir(void); /** Returns name of virtual log directory */ std::string LogDir(void); /** Return number of jobs associated with this configuration. TODO: total for all user configurations. */ static int TotalJobs(ARexGMConfig& config,Arc::Logger& logger); /** Returns list of user's jobs. Fine-grained ACL is ignored. */ static std::list Jobs(ARexGMConfig& config,Arc::Logger& logger); /** Creates file in job's session directory and returns handler */ Arc::FileAccess* CreateFile(const std::string& filename); /** Opens file in job's session directory and returns handler */ Arc::FileAccess* OpenFile(const std::string& filename,bool for_read,bool for_write); std::string GetFilePath(const std::string& filename); bool ReportFileComplete(const std::string& filename); bool ReportFilesComplete(); /** Opens log file in control directory */ int OpenLogFile(const std::string& name); std::string GetLogFilePath(const std::string& name); /** Opens directory inside session directory */ Arc::FileAccess* OpenDir(const std::string& dirname); /** Returns list of existing log files */ std::list LogFiles(void); /** Updates job credentials */ bool UpdateCredentials(const std::string& credentials); /** Select a session dir to use for this job */ bool ChooseSessionDir(const std::string& jobid, std::string& sessiondir); }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/terminate_activities.cpp0000644000000000000000000000012311426376255025555 xustar000000000000000026 mtime=1280965805.59036 27 atime=1513200575.961719 30 ctime=1513200662.688780371 nordugrid-arc-5.4.2/src/services/a-rex/terminate_activities.cpp0000644000175000002070000000423111426376255025623 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::TerminateActivities(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* TerminateActivities ActivityIdentifier (wsa:EndpointReferenceType, unbounded) TerminateActivitiesResponse Response (unbounded) ActivityIdentifier Terminated (boolean) Fault (soap:Fault) */ { std::string s; in.GetXML(s); logger_.msg(Arc::VERBOSE, "TerminateActivities: request = \n%s", s); }; for(int n = 0;;++n) { Arc::XMLNode id = in["ActivityIdentifier"][n]; if(!id) break; // Create place for response Arc::XMLNode resp = out.NewChild("bes-factory:Response"); resp.NewChild(id); std::string jobid = Arc::WSAEndpointReference(id).ReferenceParameters()["a-rex:JobID"]; if(jobid.empty()) { // EPR is wrongly formated or not an A-REX EPR logger_.msg(Arc::ERROR, "TerminateActivities: non-AREX job requested"); Arc::SOAPFault fault(resp,Arc::SOAPFault::Sender,"Missing a-rex:JobID in ActivityIdentifier"); UnknownActivityIdentifierFault(fault,"Unrecognized EPR in ActivityIdentifier"); continue; }; // Look for obtained ID ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "TerminateActivities: job %s - %s", jobid, job.Failure()); Arc::SOAPFault fault(resp,Arc::SOAPFault::Sender,"No corresponding activity found"); UnknownActivityIdentifierFault(fault,("No activity "+jobid+" found: "+job.Failure()).c_str()); continue; }; /* // Check permissions on that ID */ // Cancel job (put a mark) bool result = job.Cancel(); if(result) { resp.NewChild("bes-factory:Terminated")="true"; } else { resp.NewChild("bes-factory:Terminated")="false"; // Or should it be a fault? }; }; { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "TerminateActivities: response = \n%s", s); }; return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/a-rex.service.in0000644000000000000000000000012712754431715023642 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200649.765622315 30 ctime=1513200662.681780285 nordugrid-arc-5.4.2/src/services/a-rex/a-rex.service.in0000644000175000002070000000031512754431715023703 0ustar00mockbuildmock00000000000000[Unit] Description=ARC grid manager After=local_fs.target remote_fs.target [Service] Type=forking PIDFile=/var/run/arched-arex.pid ExecStart=@pkgdatadir@/a-rex-start [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/perferator.in0000644000000000000000000000012713024226133023323 xustar000000000000000027 mtime=1481714779.277331 30 atime=1513200649.829623098 30 ctime=1513200662.682780297 nordugrid-arc-5.4.2/src/services/a-rex/perferator.in0000644000175000002070000000427413024226133023374 0ustar00mockbuildmock00000000000000#!/bin/bash # script to write static system data and upload to perflog.nordugrid.org # together with performance data taken by data, arex, infosys and backends # runs once a day, run by a-rex when configured through the helper option # in the grid-manager block in arc.conf. # e.g.: # [grid-manager] # helper=". /usr/share/arc/perferator" # Path to arc.conf can be given with --config option, default is /etc/arc.conf # TODO: Upload performance data to perflog.nordugrid.org command_exists () { type "$1" &> /dev/null ; } write_static_system_data () { outfile="$1" echo "=== Timestamp: ===" >> $outfile date >> $outfile echo "" >> $outfile echo "=== ARC version: ===" >> $outfile arched --version >> $outfile echo "" >> $outfile echo "=== fs types: ===" >> $outfile controldir_fstype=`df --output=fstype $CONFIG_controldir | grep -v Type` echo "controldir fstype: $controldir_fstype" >> $outfile sessiondir_fstype=`df --output=fstype $CONFIG_sessiondir | grep -v Type` echo "sessiondir fstype: $sessiondir_fstype" >> $outfile echo "" >> $outfile echo "=== CPU info: ===" >> $outfile echo "no. of CPUs: `getconf _NPROCESSORS_ONLN`" >> $outfile cat /proc/cpuinfo >> $outfile echo "" >> $outfile echo "=== Mem info: ===" >> $outfile cat /proc/meminfo >> $outfile echo "" >> $outfile echo "=== OS info: ===" >> $outfile uname -a >> $outfile cat /etc/*-release >> $outfile cat /proc/version >> $outfile echo "" >> $outfile } # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkgdatadir="$basedir" . "$pkgdatadir/config_parser_compat.sh" || exit $? ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} config_parse_file $ARC_CONFIG 1>&2 || exit $? config_import_section "common" config_import_section "grid-manager" HOSTNAME=$CONFIG_hostname PERFDIR=${CONFIG_perflogdir:-/var/log/arc/perfdata} # sleep a bit, waiting for performance data to accumulate sleep 86400 # merge infosys files MERGEDATE=`date +%Y%m%d` write_static_system_data $PERFDIR/sysinfo.perflog # extract infosys data from nytprofd databases $pkgdatadir/PerfData.pl --config=$ARC_CONFIG || exit $? nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/FileChunks.cpp0000644000000000000000000000012412574535552023377 xustar000000000000000027 mtime=1441971050.007158 27 atime=1513200576.401725 30 ctime=1513200662.699780505 nordugrid-arc-5.4.2/src/services/a-rex/FileChunks.cpp0000644000175000002070000001114512574535552023446 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "FileChunks.h" namespace ARex { void FileChunks::Print(void) { //int n = 0; lock.lock(); for(chunks_t::iterator c = chunks.begin();c!=chunks.end();++c) { //Hopi::logger.msg(Arc::DEBUG, "Chunk %u: %u - %u",n,c->first,c->second); }; lock.unlock(); } void FileChunks::Size(off_t size) { lock.lock(); if(size > FileChunks::size) FileChunks::size = size; lock.unlock(); } FileChunks::FileChunks(FileChunksList& container): list(container),self(container.files.end()),size(0), last_accessed(time(NULL)),refcount(0) { } FileChunks::FileChunks(const FileChunks& obj): lock(),list(obj.list),self(obj.list.files.end()),chunks(obj.chunks), size(0),last_accessed(time(NULL)),refcount(0) { } FileChunks* FileChunksList::GetStuck(void) { if(((int)(time(NULL)-last_timeout)) < timeout) return NULL; lock.lock(); for(std::map::iterator f = files.begin(); f != files.end();++f) { f->second->lock.lock(); if((f->second->refcount <= 0) && (((int)(time(NULL) - f->second->last_accessed)) >= timeout )) { ++(f->second->refcount); f->second->lock.unlock(); lock.unlock(); return f->second; } f->second->lock.unlock(); } last_timeout=time(NULL); lock.unlock(); return NULL; } void FileChunksList::RemoveStuck(void) { std::list stuck; for(;;) { FileChunks* s = GetStuck(); if(!s) break; stuck.push_back(s); } for(std::list::iterator s = stuck.begin(); s!=stuck.end();++s) { (*s)->Remove(); } } /* FileChunks* FileChunksList::GetFirst(void) { lock.lock(); std::map::iterator f = files.begin(); if(f != files.end()) { f->second.lock.lock(); ++(f->second.refcount); f->second.lock.unlock(); lock.unlock(); return &(f->second); }; lock.unlock(); return NULL; } */ void FileChunks::Remove(void) { list.lock.lock(); lock.lock(); --refcount; if(refcount <= 0) { if(self != list.files.end()) { lock.unlock(); list.files.erase(self); list.lock.unlock(); delete this; return; } } lock.unlock(); list.lock.unlock(); } FileChunks& FileChunksList::Get(std::string path) { lock.lock(); std::map::iterator c = files.find(path); if(c == files.end()) { c=files.insert(std::pair(path,new FileChunks(*this))).first; c->second->lock.lock(); c->second->self=c; } else { c->second->lock.lock(); } ++(c->second->refcount); c->second->lock.unlock(); lock.unlock(); RemoveStuck(); return *(c->second); } void FileChunks::Release(void) { lock.lock(); if(chunks.empty()) { lock.unlock(); Remove(); } else { --refcount; lock.unlock(); } } void FileChunks::Add(off_t start,off_t csize) { off_t end = start+csize; lock.lock(); last_accessed=time(NULL); if(end > size) size=end; for(chunks_t::iterator chunk = chunks.begin();chunk!=chunks.end();++chunk) { if((start >= chunk->first) && (start <= chunk->second)) { // New chunk starts within existing chunk if(end > chunk->second) { // Extend chunk chunk->second=end; // Merge overlapping chunks chunks_t::iterator chunk_ = chunk; ++chunk_; for(;chunk_!=chunks.end();) { if(chunk->second < chunk_->first) break; // Merge two chunks if(chunk_->second > chunk->second) chunk->second=chunk_->second; chunk_=chunks.erase(chunk_); }; }; lock.unlock(); return; } else if((end >= chunk->first) && (end <= chunk->second)) { // New chunk ends within existing chunk if(start < chunk->first) { // Extend chunk chunk->first=start; }; lock.unlock(); return; } else if(end < chunk->first) { // New chunk is between existing chunks or first chunk chunks.insert(chunk,std::pair(start,end)); lock.unlock(); return; }; }; // New chunk is last chunk or there are no chunks currently chunks.insert(chunks.end(),std::pair(start,end)); lock.unlock(); } bool FileChunks::Complete(void) { lock.lock(); bool r = ((chunks.size() == 1) && (chunks.begin()->first == 0) && (chunks.begin()->second == size)); lock.unlock(); return r; } FileChunksList::FileChunksList(void):timeout(600), last_timeout(time(NULL)) { } FileChunksList::~FileChunksList(void) { lock.lock(); // Not sure lock.unlock(); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/test.cpp0000644000000000000000000000012411115022101022266 xustar000000000000000027 mtime=1228153921.476466 27 atime=1513200576.474725 30 ctime=1513200662.708780615 nordugrid-arc-5.4.2/src/services/a-rex/test.cpp0000644000175000002070000003141011115022101022332 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); signal(SIGPIPE,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); // Load service chain logger.msg(Arc::INFO, "Creating service side chain"); Arc::Config service_config("service.xml"); if(!service_config) { logger.msg(Arc::ERROR, "Failed to load service configuration"); return -1; }; Arc::MCCLoader service_loader(service_config); logger.msg(Arc::INFO, "Service side MCCs are loaded"); // for(;;) sleep(10); logger.msg(Arc::INFO, "Creating client side chain"); // Create client chain Arc::Config client_config("client.xml"); if(!client_config) { logger.msg(Arc::ERROR, "Failed to load client configuration"); return -1; }; Arc::MCCLoader client_loader(client_config); logger.msg(Arc::INFO, "Client side MCCs are loaded"); Arc::MCC* client_entry = client_loader["soap"]; if(!client_entry) { logger.msg(Arc::ERROR, "Client chain does not have entry point"); return -1; }; Arc::MessageContext context; // ------------------------------------------------------- // Preparing delegation // ------------------------------------------------------- std::string credentials; { std::ifstream ic("./cert.pem"); for(;!ic.eof();) { char buf[256]; ic.get(buf,sizeof(buf),0); if(ic.gcount() <= 0) break; credentials.append(buf,ic.gcount()); }; }; { std::ifstream ic("key.pem"); for(;!ic.eof();) { char buf[256]; ic.get(buf,sizeof(buf),0); if(ic.gcount() <= 0) break; credentials.append(buf,ic.gcount()); }; }; Arc::DelegationProviderSOAP deleg(credentials); if(!credentials.empty()) { logger.msg(Arc::INFO, "Initiating delegation procedure"); if(!deleg.DelegateCredentialsInit(*client_entry,&context)) { logger.msg(Arc::ERROR, "Failed to initiate delegation"); return -1; }; }; // ------------------------------------------------------- // Requesting information about service // ------------------------------------------------------- { Arc::NS ns; Arc::InformationRequest inforeq; Arc::PayloadSOAP req(*(inforeq.SOAP())); Arc::Message reqmsg; Arc::Message repmsg; Arc::MessageAttributes attributes_in; Arc::MessageAttributes attributes_out; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_in); reqmsg.Context(&context); repmsg.Attributes(&attributes_out); repmsg.Context(&context); Arc::MCC_Status status = client_entry->process(reqmsg,repmsg); if(!status) { logger.msg(Arc::ERROR, "Request failed"); return -1; }; logger.msg(Arc::INFO, "Request succeed!!!"); if(repmsg.Payload() == NULL) { logger.msg(Arc::ERROR, "There is no response"); return -1; }; Arc::PayloadSOAP* resp = NULL; try { resp = dynamic_cast(repmsg.Payload()); } catch(std::exception&) { }; if(resp == NULL) { logger.msg(Arc::ERROR, "Response is not SOAP"); delete repmsg.Payload(); return -1; }; { std::string str; resp->GetXML(str); std::cout << "Response: " << str << std::endl; }; Arc::InformationResponse inforesp(*resp); if(!inforesp) { logger.msg(Arc::ERROR, "Response is not expected WS-RP"); delete repmsg.Payload(); return -1; }; std::list results = inforesp.Result(); int n = 0; for(std::list::iterator i = results.begin();i!=results.end();++i) { std::string str; i->GetXML(str); std::cout << "Response("<(jsdl_file,jsdl_str,0); act_doc.NewChild(Arc::XMLNode(jsdl_str)); deleg.DelegatedToken(op); req.GetXML(jsdl_str); // Send job request Arc::Message reqmsg; Arc::Message repmsg; Arc::MessageAttributes attributes_in; Arc::MessageAttributes attributes_out; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_in); reqmsg.Context(&context); repmsg.Attributes(&attributes_out); repmsg.Context(&context); Arc::MCC_Status status = client_entry->process(reqmsg,repmsg); if(!status) { logger.msg(Arc::ERROR, "Request failed"); return -1; }; logger.msg(Arc::INFO, "Request succeed!!!"); if(repmsg.Payload() == NULL) { logger.msg(Arc::ERROR, "There is no response"); return -1; }; Arc::PayloadSOAP* resp = NULL; try { resp = dynamic_cast(repmsg.Payload()); } catch(std::exception&) { }; if(resp == NULL) { logger.msg(Arc::ERROR, "Response is not SOAP"); delete repmsg.Payload(); return -1; }; { std::string str; resp->GetXML(str); std::cout << "Response: " << str << std::endl; }; (*resp)["CreateActivityResponse"]["ActivityIdentifier"].New(id); { std::string str; id.GetDoc(str); std::cout << "Job ID: " << std::endl << str << std::endl; }; delete repmsg.Payload(); }; // ------------------------------------------------------- // Requesting job's JSDL from service // ------------------------------------------------------- { std::string str; logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadSOAP req(arex_ns); Arc::XMLNode jobref = req.NewChild("bes-factory:GetActivityDocuments").NewChild(id); // Send job request Arc::Message reqmsg; Arc::Message repmsg; Arc::MessageAttributes attributes_in; Arc::MessageAttributes attributes_out; Arc::MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_in); reqmsg.Context(&context); repmsg.Attributes(&attributes_out); repmsg.Context(&context); req.GetXML(str); std::cout << "REQUEST: " << str << std::endl; Arc::MCC_Status status = client_entry->process(reqmsg,repmsg); if(!status) { logger.msg(Arc::ERROR, "Request failed"); return -1; }; logger.msg(Arc::INFO, "Request succeed!!!"); if(repmsg.Payload() == NULL) { logger.msg(Arc::ERROR, "There is no response"); return -1; }; Arc::PayloadSOAP* resp = NULL; try { resp = dynamic_cast(repmsg.Payload()); } catch(std::exception&) { }; if(resp == NULL) { logger.msg(Arc::ERROR, "Response is not SOAP"); delete repmsg.Payload(); return -1; }; resp->GetXML(str); std::cout << "Response: " << str << std::endl; delete resp; }; // ------------------------------------------------------- // Requesting job's status from service // ------------------------------------------------------- { std::string str; logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadSOAP req(arex_ns); Arc::XMLNode jobref = req.NewChild("bes-factory:GetActivityStatuses").NewChild(id); // Send job request Arc::Message reqmsg; Arc::Message repmsg; Arc::MessageAttributes attributes_in; Arc::MessageAttributes attributes_out; Arc::MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_in); reqmsg.Context(&context); repmsg.Attributes(&attributes_out); repmsg.Context(&context); req.GetXML(str); std::cout << "REQUEST: " << str << std::endl; Arc::MCC_Status status = client_entry->process(reqmsg,repmsg); if(!status) { logger.msg(Arc::ERROR, "Request failed"); return -1; }; logger.msg(Arc::INFO, "Request succeed!!!"); if(repmsg.Payload() == NULL) { logger.msg(Arc::ERROR, "There is no response"); return -1; }; Arc::PayloadSOAP* resp = NULL; try { resp = dynamic_cast(repmsg.Payload()); } catch(std::exception&) { }; if(resp == NULL) { logger.msg(Arc::ERROR, "Response is not SOAP"); delete repmsg.Payload(); return -1; }; resp->GetXML(str); std::cout << "Response: " << str << std::endl; delete resp; }; // ------------------------------------------------------- // Requesting job's termination // ------------------------------------------------------- { std::string str; logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadSOAP req(arex_ns); Arc::XMLNode jobref = req.NewChild("bes-factory:TerminateActivities").NewChild(id); // Send job request Arc::Message reqmsg; Arc::Message repmsg; Arc::MessageAttributes attributes_in; Arc::MessageAttributes attributes_out; Arc::MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_in); reqmsg.Context(&context); repmsg.Attributes(&attributes_out); repmsg.Context(&context); req.GetXML(str); std::cout << "REQUEST: " << str << std::endl; Arc::MCC_Status status = client_entry->process(reqmsg,repmsg); if(!status) { logger.msg(Arc::ERROR, "Request failed"); return -1; }; logger.msg(Arc::INFO, "Request succeed!!!"); if(repmsg.Payload() == NULL) { logger.msg(Arc::ERROR, "There is no response"); return -1; }; Arc::PayloadSOAP* resp = NULL; try { resp = dynamic_cast(repmsg.Payload()); } catch(std::exception&) { }; if(resp == NULL) { logger.msg(Arc::ERROR, "Response is not SOAP"); delete repmsg.Payload(); return -1; }; resp->GetXML(str); std::cout << "Response: " << str << std::endl; delete resp; }; // ------------------------------------------------------- // Requesting service's attributes // ------------------------------------------------------- { std::string str; logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadSOAP req(arex_ns); req.NewChild("bes-factory:GetFactoryAttributesDocument"); // Send job request Arc::Message reqmsg; Arc::Message repmsg; Arc::MessageAttributes attributes_in; Arc::MessageAttributes attributes_out; Arc::MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_in); reqmsg.Context(&context); repmsg.Attributes(&attributes_out); repmsg.Context(&context); req.GetXML(str); std::cout << "REQUEST: " << str << std::endl; Arc::MCC_Status status = client_entry->process(reqmsg,repmsg); if(!status) { logger.msg(Arc::ERROR, "Request failed"); return -1; }; logger.msg(Arc::INFO, "Request succeed!!!"); if(repmsg.Payload() == NULL) { logger.msg(Arc::ERROR, "There is no response"); return -1; }; Arc::PayloadSOAP* resp = NULL; try { resp = dynamic_cast(repmsg.Payload()); } catch(std::exception&) { }; if(resp == NULL) { logger.msg(Arc::ERROR, "Response is not SOAP"); delete repmsg.Payload(); return -1; }; resp->GetXML(str); std::cout << "Response: " << str << std::endl; delete resp; }; }; for(;;) sleep(10); return 0; } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/ldif0000644000000000000000000000013213214316027021462 xustar000000000000000030 mtime=1513200663.096785361 30 atime=1513200668.718854121 30 ctime=1513200663.096785361 nordugrid-arc-5.4.2/src/services/a-rex/ldif/0000755000175000002070000000000013214316027021605 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/ldif/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023601 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200602.836048348 30 ctime=1513200663.089785275 nordugrid-arc-5.4.2/src/services/a-rex/ldif/Makefile.am0000644000175000002070000000107612052416515023647 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libldif.la noinst_PROGRAMS = test ldif2xml libldif_la_SOURCES = LDIFtoXML.cpp LDIFtoXML.h libldif_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libldif_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la ldif2xml_SOURCES = main.cpp ldif2xml_CXXFLAGS = -I$(top_srcdir)/include $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) ldif2xml_LDADD = libldif.la test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) test_LDADD = libldif.la $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/services/a-rex/ldif/PaxHeaders.7502/Makefile.in0000644000000000000000000000013013214315732023604 xustar000000000000000030 mtime=1513200602.893049045 30 atime=1513200650.203627672 28 ctime=1513200663.0917853 nordugrid-arc-5.4.2/src/services/a-rex/ldif/Makefile.in0000644000175000002070000006615413214315732023670 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test$(EXEEXT) ldif2xml$(EXEEXT) subdir = src/services/a-rex/ldif DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libldif_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libldif_la_OBJECTS = libldif_la-LDIFtoXML.lo libldif_la_OBJECTS = $(am_libldif_la_OBJECTS) libldif_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libldif_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am_ldif2xml_OBJECTS = ldif2xml-main.$(OBJEXT) ldif2xml_OBJECTS = $(am_ldif2xml_OBJECTS) ldif2xml_DEPENDENCIES = libldif.la ldif2xml_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(ldif2xml_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_test_OBJECTS = test-test.$(OBJEXT) test_OBJECTS = $(am_test_OBJECTS) am__DEPENDENCIES_1 = test_DEPENDENCIES = libldif.la $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) test_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(test_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libldif_la_SOURCES) $(ldif2xml_SOURCES) $(test_SOURCES) DIST_SOURCES = $(libldif_la_SOURCES) $(ldif2xml_SOURCES) \ $(test_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libldif.la libldif_la_SOURCES = LDIFtoXML.cpp LDIFtoXML.h libldif_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libldif_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la ldif2xml_SOURCES = main.cpp ldif2xml_CXXFLAGS = -I$(top_srcdir)/include $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) ldif2xml_LDADD = libldif.la test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) test_LDADD = libldif.la $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/ldif/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/ldif/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libldif.la: $(libldif_la_OBJECTS) $(libldif_la_DEPENDENCIES) $(libldif_la_LINK) $(libldif_la_OBJECTS) $(libldif_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list ldif2xml$(EXEEXT): $(ldif2xml_OBJECTS) $(ldif2xml_DEPENDENCIES) @rm -f ldif2xml$(EXEEXT) $(ldif2xml_LINK) $(ldif2xml_OBJECTS) $(ldif2xml_LDADD) $(LIBS) test$(EXEEXT): $(test_OBJECTS) $(test_DEPENDENCIES) @rm -f test$(EXEEXT) $(test_LINK) $(test_OBJECTS) $(test_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ldif2xml-main.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libldif_la-LDIFtoXML.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libldif_la-LDIFtoXML.lo: LDIFtoXML.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libldif_la_CXXFLAGS) $(CXXFLAGS) -MT libldif_la-LDIFtoXML.lo -MD -MP -MF $(DEPDIR)/libldif_la-LDIFtoXML.Tpo -c -o libldif_la-LDIFtoXML.lo `test -f 'LDIFtoXML.cpp' || echo '$(srcdir)/'`LDIFtoXML.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libldif_la-LDIFtoXML.Tpo $(DEPDIR)/libldif_la-LDIFtoXML.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LDIFtoXML.cpp' object='libldif_la-LDIFtoXML.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libldif_la_CXXFLAGS) $(CXXFLAGS) -c -o libldif_la-LDIFtoXML.lo `test -f 'LDIFtoXML.cpp' || echo '$(srcdir)/'`LDIFtoXML.cpp ldif2xml-main.o: main.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ldif2xml_CXXFLAGS) $(CXXFLAGS) -MT ldif2xml-main.o -MD -MP -MF $(DEPDIR)/ldif2xml-main.Tpo -c -o ldif2xml-main.o `test -f 'main.cpp' || echo '$(srcdir)/'`main.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ldif2xml-main.Tpo $(DEPDIR)/ldif2xml-main.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='main.cpp' object='ldif2xml-main.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ldif2xml_CXXFLAGS) $(CXXFLAGS) -c -o ldif2xml-main.o `test -f 'main.cpp' || echo '$(srcdir)/'`main.cpp ldif2xml-main.obj: main.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ldif2xml_CXXFLAGS) $(CXXFLAGS) -MT ldif2xml-main.obj -MD -MP -MF $(DEPDIR)/ldif2xml-main.Tpo -c -o ldif2xml-main.obj `if test -f 'main.cpp'; then $(CYGPATH_W) 'main.cpp'; else $(CYGPATH_W) '$(srcdir)/main.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ldif2xml-main.Tpo $(DEPDIR)/ldif2xml-main.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='main.cpp' object='ldif2xml-main.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ldif2xml_CXXFLAGS) $(CXXFLAGS) -c -o ldif2xml-main.obj `if test -f 'main.cpp'; then $(CYGPATH_W) 'main.cpp'; else $(CYGPATH_W) '$(srcdir)/main.cpp'; fi` test-test.o: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.o -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp test-test.obj: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.obj -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES clean-noinstPROGRAMS \ ctags distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/ldif/PaxHeaders.7502/LDIFtoXML.cpp0000644000000000000000000000012411741502232023704 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200576.409725 30 ctime=1513200663.092785312 nordugrid-arc-5.4.2/src/services/a-rex/ldif/LDIFtoXML.cpp0000644000175000002070000001277711741502232023767 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "LDIFtoXML.h" namespace ARex { static bool get_ldif_string(std::istream& ldif,std::string& str) { while(ldif) { getline(ldif,str); if(str.empty()) continue; if(str[0] == '#') continue; return true; }; return false; } static void strtolower(std::string& str) { std::string::size_type l = str.length(); char* s = (char*)(str.c_str()); for(;l>0;--l,++s) *s=tolower(*s); } static void trim(std::string& str) { std::string::size_type first = str.find_first_not_of(' '); if(first == std::string::npos) { str.resize(0); return; }; std::string::size_type last = str.find_last_not_of(' '); str=str.substr(first,last-first+1); return; } static bool split_ldif_path(const std::string& str,std::list >& path) { std::string::size_type cur = 0; while(true) { std::string::size_type p = str.find('=',cur); if(p == std::string::npos) return true; std::string name = str.substr(cur,p-cur); std::string::size_type e = str.find(',',p); if(e == std::string::npos) e = str.length(); std::string val = str.substr(p+1,e-p-1); trim(name); trim(val); strtolower(name); strtolower(val); path.push_front(std::pair(name,val)); cur=e+1; }; return false; } static bool compare_paths(const std::list >& path1,const std::list >& path2,int size) { std::list >::const_iterator i1 = path1.begin(); std::list >::const_iterator i2 = path2.begin(); for(;size>0;--size) { if((i1 == path1.end()) && (i2 == path2.end())) break; if(i1 == path1.end()) return false; if(i2 == path2.end()) return false; if(i1->first != i2->first) return false; if(i1->second != i2->second) return false; ++i1; ++i2; }; return true; } static Arc::XMLNode path_to_XML(const std::list >& path,Arc::XMLNode node) { Arc::XMLNode cur = node; std::list >::const_iterator i = path.begin(); for(;i!=path.end();++i) { Arc::XMLNode n = cur[i->first]; Arc::XMLNode nn; for(int num = 0;;++num) { nn=n[num]; if(!nn) break; if((std::string)(nn.Attribute("name")) == i->second) break; }; if(!nn) { nn=cur.NewChild(i->first); nn.NewAttribute("name")=i->second; }; cur=nn; }; return cur; } static void reduce_name(std::string& name,Arc::XMLNode x) { std::string::size_type p = std::string::npos; for(;;) { p=name.rfind('-',p); if(p == std::string::npos) break; std::string urn = "urn:"+name.substr(0,p); std::string prefix = x.NamespacePrefix(urn.c_str()); if(!prefix.empty()) { name=prefix+":"+name.substr(p+1); break; }; --p; }; } static void reduce_names(Arc::XMLNode x) { if(x.Size() == 0) return; std::string name = x.Name(); reduce_name(name,x); x.Name(name.c_str()); for(int n = 0;;++n) { Arc::XMLNode x_ = x.Child(n); if(!x_) break; reduce_names(x_); }; } static void reduce_prefix(std::string& prefix) { std::string::size_type p = 0; p=0; for(;p > base; split_ldif_path(ldif_base,base); std::string str; if(!get_ldif_string(ldif,str)) return true; for(;;) { // LDIF processing loop for(;;) { // Looking for dn: if(strncasecmp(str.c_str(),"dn:",3) == 0) break; if(!get_ldif_string(ldif,str)) { reduce_names(xml); return true; }; }; str.replace(0,3,""); std::list > dn; split_ldif_path(str,dn); if(base.size() > dn.size()) continue; // Above base if(!compare_paths(base,dn,base.size())) continue; // Wrong base // Removing base for(int n = 0;n #include #include namespace ARex { bool LDIFtoXML(std::istream& ldif,const std::string& ldif_base,Arc::XMLNode xml); } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/ldif/PaxHeaders.7502/test.cpp0000644000000000000000000000012411741502232023221 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200576.411725 30 ctime=1513200663.096785361 nordugrid-arc-5.4.2/src/services/a-rex/ldif/test.cpp0000644000175000002070000000061311741502232023266 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "LDIFtoXML.h" int main(int /*args*/,char* argv[]) { std::ifstream f(argv[1]); std::string ldif_base = "Mds-Vo-name=local,O=Grid"; Arc::NS ns; Arc::XMLNode xml(ns,"LDIF"); ARex::LDIFtoXML(f,ldif_base,xml); std::string s; xml.GetDoc(s); std::cout< #endif #include #include #include "LDIFtoXML.h" int main(int argc,char* argv[]) { std::string base; std::istream* in = &std::cin; std::istream* fin = NULL; Arc::NS ns; Arc::XMLNode xml(ns,"LDIFTree"); int r = -1; if(argc < 2) { std::cerr<<"Usage: ldif2xml LDIF_base [input_file [output_file]] "<= 3) in = (fin = new std::ifstream(argv[2])); if(*in) { if(ARex::LDIFtoXML(*in,base,xml)) { std::string s; xml.GetDoc(s); if(argc < 4) { std::cout<new(); # my $lrms_info = $collector->get_info($options); # # Arguments: # $options - a hash reference containing options. This module will check it # against $lrms_options_schema and the LRMS plugin's own schema # and then pass it on to the LRMS plugin. # # Returns: # $lrms_info - a hash reference containing all information collected from # the LRMS. This module will check it against # $lrms_info_schema (see below) ############################################################################## # Schemas ############################################################################## # # The usage of these schemas is described in InfoChecker.pm # # $lrms_options_schema - for checking $options hash. This is just a minimal # schema, LRMS plugins may use an extended version # $lrms_info_schema - for checking data returned by LRMS modules my $lrms_options_schema = { 'lrms' => '', # name of the LRMS module 'queues' => { # queue names are keys in this hash '*' => { 'users' => [ '' ] # list of user IDs to query in the LRMS } }, 'jobs' => [ '' ] # list of jobs IDs to query in the LRMS }; my $lrms_info_schema = { 'cluster' => { 'lrms_type' => '', 'lrms_glue_type' => '*', # one of: bqs condor fork loadleveler lsf openpbs sungridengine torque torquemaui ... 'lrms_version' => '', 'schedpolicy' => '*', 'totalcpus' => '', 'queuedcpus' => '', 'usedcpus' => '', 'queuedjobs' => '', 'runningjobs' => '', 'cpudistribution' => '' }, 'queues' => { '*' => { 'status' => '', 'maxrunning' => '', # the max number of jobs allowed to run in this queue 'maxqueuable' => '*', # the max number of jobs allowed to be queued 'maxuserrun' => '*', # the max number of jobs that a single user can run 'maxcputime' => '*', # units: seconds (per-slot) 'maxtotalcputime' => '*', # units: seconds 'mincputime' => '*', # units: seconds 'defaultcput' => '*', # units: seconds 'maxwalltime' => '*', # units: seconds 'minwalltime' => '*', # units: seconds 'defaultwallt' => '*', # units: seconds 'running' => '', # the number of cpus being occupied by running jobs 'queued' => '', # the number of queued jobs 'suspended' => '*', # the number of suspended jobs 'total' => '*', # the total number of jobs in this queue 'totalcpus' => '', # the number of cpus dedicated to this queue 'preemption' => '*', 'acl_users' => [ '*' ], 'users' => { '*' => { 'freecpus' => { '*' => '' # key: # of cpus, value: time limit in minutes (0 for unlimited) }, 'queuelength' => '' } } } }, 'jobs' => { '*' => { 'status' => '', 'cpus' => '*', 'rank' => '*', 'mem' => '*', # units: kB 'walltime' => '*', # units: seconds 'cputime' => '*', # units: seconds 'reqwalltime' => '*', # units: seconds 'reqcputime' => '*', # units: seconds 'nodes' => [ '*' ], # names of nodes where the job runs 'comment' => [ '*' ] } }, 'nodes' => { '*' => { # key: hostname of the node (as known to the LRMS) 'isavailable' => '', # is available for running jobs 'isfree' => '', # is available and not yet fully used, can accept more jobs 'tags' => [ '*' ], # tags associated to nodes, i.e. node properties in PBS 'vmem' => '*', # virtual memory, units: kb 'pmem' => '*', # physical memory, units: kb 'slots' => '*', # job slots or virtual processors 'lcpus' => '*', # cpus visible to the os 'pcpus' => '*', # number of sockets 'sysname' => '*', # what would uname -s print on the node 'release' => '*', # what would uname -r print on the node 'machine' => '*', # what would uname -m print (if the node would run linux) } } }; our $log = LogUtils->getLogger("LRMSInfo"); sub collect($) { my ($options) = @_; my ($checker, @messages); my ($lrms_name, $share) = split / /, $options->{lrms}; $options->{scheduling_policy} = $options->{SchedulingPolicy} if $options->{SchedulingPolicy}; $log->error('lrms option is missing') unless $lrms_name; load_lrms($lrms_name); # merge schema exported by the LRMS plugin my $schema = { %$lrms_options_schema, %{get_lrms_options_schema()} }; $checker = InfoChecker->new($schema); @messages = $checker->verify($options); $log->warning("config key options->$_") foreach @messages; $log->fatal("Some required options are missing") if @messages; my $result = get_lrms_info($options); use Data::Dumper('Dumper'); my $custom_lrms_schema = customize_info_schema($lrms_info_schema, $options); $checker = InfoChecker->new($custom_lrms_schema); @messages = $checker->verify($result); $log->warning("return value lrmsinfo->$_") foreach @messages; # some backends leave extra spaces -- trim them $result->{cluster}{cpudistribution} =~ s/^\s+//; $result->{cluster}{cpudistribution} =~ s/\s+$//; # make sure nodes are unique for my $job (values %{$result->{jobs}}) { next unless $job->{nodes}; my %nodes; $nodes{$_} = 1 for @{$job->{nodes}}; $job->{nodes} = [ sort keys %nodes ]; } return $result; } # Loads the needed LRMS plugin at runtime # First try to load XYZmod.pm (implementing the native ARC1 interface) # otherwise try to load XYZ.pm (ARC0.6 plugin) sub load_lrms($) { my $lrms_name = uc(shift); my $module = $lrms_name."mod"; eval { require "$module.pm" }; if ($@) { $log->debug("require for $module returned: $@"); $log->debug("Using ARC0.6 compatible $lrms_name module"); require ARC0mod; ARC0mod::load_lrms($lrms_name); $module = "ARC0mod"; } import $module qw(get_lrms_info get_lrms_options_schema); } # prepares a custom schema that has individual keys for each queue and each job # which is named in $options sub customize_info_schema($$) { my ($info_schema,$options) = @_; my $new_schema; # make a deep copy $new_schema = Storable::dclone($info_schema); # adjust schema for each job: Replace "*" with actual job id's for my $job (@{$options->{jobs}}) { $new_schema->{jobs}{$job} = $new_schema->{jobs}{"*"}; } delete $new_schema->{jobs}{"*"}; # adjust schema for each queue: Replace "*" with actual queue names for my $queue (keys %{$options->{queues}}) { $new_schema->{queues}{$queue} = $new_schema->{queues}{"*"}; } delete $new_schema->{queues}{"*"}; return $new_schema; } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### my $opt1 = {lrms => 'fork', sge_root => '/opt/n1ge6', sge_cell => 'cello', sge_bin_path => '/opt/n1ge6/bin/lx24-x86', queues => {'shar' => {users => []}, 'loca' => {users => ['joe','pete'], maxjobs => '4 2'}}, jobs => [qw(7 101 5865)] }; my $opt2 = {lrms => 'sge', sge_root => '/opt/n1ge6', sge_cell => 'cello', sge_bin_path => '/opt/n1ge6/bin/lx24-amd64', queues => {'shar' => {users => []}, 'all.q' => {users => ['joe','pete']}}, jobs => [63, 36006] }; my $opt3 = {lrms => 'pbs', pbs_bin_path => '/opt/torque/bin', pbs_log_path => '/var/spool/torque/server_logs', queues => {'batch' => {users => ['joe','pete']}}, jobs => [63, 453] }; sub test { my $options = shift; LogUtils::level('VERBOSE'); require Data::Dumper; import Data::Dumper qw(Dumper); $log->debug("Options: " . Dumper($options)); my $results = LRMSInfo::collect($options); $log->debug("Results: " . Dumper($results)); } #test($opt3); 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713065020171025546 xustar000000000000000027 mtime=1490296953.800307 30 atime=1513200602.579045205 30 ctime=1513200663.166786217 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/Makefile.am0000644000175000002070000000211713065020171025611 0ustar00mockbuildmock00000000000000pkgdata_SCRIPTS = CEinfo.pl PerfData.pl dist_pkgdata_SCRIPTS = glue-generator.pl \ grid-info-soft-register glite-info-provider-ldap dist_pkgdata_DATA = ARC0mod.pm SGEmod.pm FORKmod.pm PBS.pm DGBridge.pm \ LL.pm LSF.pm Condor.pm condor_env.pm SLURM.pm SLURMmod.pm Boinc.pm \ IniParser.pm LogUtils.pm Sysinfo.pm \ LRMSInfo.pm GMJobsInfo.pm HostInfo.pm RTEInfo.pm \ InfoChecker.pm ConfigCentral.pm \ ARC0ClusterInfo.pm ARC1ClusterInfo.pm \ SGE.pm Fork.pm \ XmlPrinter.pm GLUE2xmlPrinter.pm \ LdifPrinter.pm GLUE2ldifPrinter.pm \ NGldifPrinter.pm InfosysHelper.pm arcldapschemadir = $(pkgdatadir)/ldap-schema dist_arcldapschema_DATA = schema/nordugrid.schema PERL = @PERL@ PERLSCRIPTS = $(dist_pkgdata_DATA) CEinfo.pl TESTS_ENVIRONMENT = $(PERL) -I$(srcdir) -Mstrict -wc TESTS = $(PERLSCRIPTS) check_SCRIPTS = $(PERLSCRIPTS) # Check if BDI module is available, if not exclude Boinc.pm from TESTS variable. Boinc.pm: FORCE $(eval TESTS := $(shell if `$(PERL) -e "use BDI; exit;" > /dev/null 2>&1`; then echo "$(TESTS)"; else echo "$(TESTS)" | sed 's/Boinc.pm//'; fi)) FORCE: SUBDIRS = test nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/condor_env.pm0000644000000000000000000000012312144153067026210 xustar000000000000000027 mtime=1368446519.129163 27 atime=1513200575.836718 29 ctime=1513200663.13678585 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/condor_env.pm0000644000175000002070000000240612144153067026260 0ustar00mockbuildmock00000000000000package condor_env; use strict; use warnings; BEGIN { use base 'Exporter'; our @EXPORT = qw( configure_condor_env ); } # Initializes environment variables: CONDOR_BIN_PATH # Values defined in arc.conf take priority over previously set environment # variables. # Condor executables are located using the following cues: # 1. condor_bin_path option in arc.conf # 2. PATH environment variable # Synopsis: # # use IniParser; # use condor_env; # # my $parser = IniParser->new('/etc/arc.conf'); # my %config = $parser->get_section("common"); # configure_condor_env(%config) or die "Condor executables not found"; # Returns 1 if Condor executables were NOT found, 0 otherwise. sub configure_condor_env(%) { my %config = @_; if ($config{condor_bin_path}) { $ENV{CONDOR_BIN_PATH} = $config{condor_bin_path}; } else { for (split ':', $ENV{PATH}) { $ENV{CONDOR_BIN_PATH} = $_ and last if -x "$_/condor_version"; } } return 0 unless -x "$ENV{CONDOR_BIN_PATH}/condor_version"; if ($config{condor_config}) { $ENV{CONDOR_CONFIG} = $config{condor_config}; } else { $ENV{CONDOR_CONFIG} = "/etc/condor/condor_config"; } return 0 unless -e "$ENV{CONDOR_CONFIG}"; return 1; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/LdifPrinter.pm0000644000000000000000000000012411531244130026266 xustar000000000000000027 mtime=1298483288.359123 27 atime=1513200575.774717 30 ctime=1513200663.157786107 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/LdifPrinter.pm0000644000175000002070000001236411531244130026341 0ustar00mockbuildmock00000000000000package LdifPrinter; use MIME::Base64; use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); sub new { my ($this, $handle) = @_; my $class = ref($this) || $this; # This would only affect comment lines, the rest is guaranteed to be ASCII binmode $handle, ':encoding(utf8)' or $log->error("binmode failed: $!"); #print $handle "# extended LDIF\n#\n# LDAPv3\n" # or $log->error("print failed: $!"); my $self = {fh => $handle, dn => undef, nick => undef, attrs => undef}; return bless $self, $class; } sub begin { my ($self, $dnkey, $name) = @_; $self->_flush() if defined $self->{dn}; unshift @{$self->{dn}}, safe_dn("$dnkey=$name"); unshift @{$self->{nick}}, safe_comment("$name"); } sub attribute { my ($self, $attr, $value) = @_; push @{$self->{attrs}}, [$attr, $value]; } sub attributes { my ($self, $data, $prefix, @keys) = @_; my $attrs = $self->{attrs} ||= []; push @$attrs, ["$prefix$_", $data->{$_}] for @keys; } sub end { my ($self) = @_; $self->_flush(); shift @{$self->{dn}}; shift @{$self->{nick}}; } # # Prints an entry with the attributes added so far. # Prints nothing if there are no attributes. # sub _flush { my ($self) = @_; my $fh = $self->{fh}; my $attrs = $self->{attrs}; return unless defined $attrs; my $dn = join ",", @{$self->{dn}}; my $nick = join ", ", @{$self->{nick}}; print $fh "\n"; #print $fh "# $nick\n"; print $fh safe_attrval("dn", $dn)."\n" or $log->error("print failed: $!"); for my $pair (@$attrs) { my ($attr, $val) = @$pair; next unless defined $val; if (not ref $val) { print $fh safe_attrval($attr, $val)."\n" or $log->error("print failed: $!"); } elsif (ref $val eq 'ARRAY') { for (@$val) { print $fh safe_attrval($attr, $_)."\n" or $log->error("print failed: $!"); } } else { $log->error("Not an ARRAY reference in: $attr"); } } $self->{attrs} = undef; } # # Make a string safe to use as a Relative Distinguished Name, cf. RFC 2253 # sub safe_dn { my ($rdn) = @_; # Escape with \ the following characters ,;+"\<> Also escape # at the # beginning and space at the beginning and at the end of the string. $rdn =~ s/((?:^[#\s])|[,+"\\<>;]|(?:\s$))/\\$1/g; # Encode CR, LF and NUL characters (necessary except when the string # is further base64 encoded) $rdn =~ s/\x0D/\\0D/g; $rdn =~ s/\x0A/\\0A/g; $rdn =~ s/\x00/\\00/g; return $rdn; } # # Construct an attribute-value string safe to use in LDIF, fc. RFC 2849 # sub safe_attrval { my ($attr, $val) = @_; return "${attr}:: ".encode_base64($val,'') if $val =~ /^[\s,:<]/ or $val =~ /[\x0D\x0A\x00]/ or $val =~ /[^\x00-\x7F]/; return "${attr}: $val"; } # # Leave comments as they are, just encode CR, LF and NUL characters # sub safe_comment { my ($line) = @_; $line =~ s/\x0D/\\0D/g; $line =~ s/\x0A/\\0A/g; $line =~ s/\x00/\\00/g; return $line; } # # Fold long lines and add a final newline. Handles comments specially. # sub fold78 { my ($tail) = @_; my $is_comment = "#" eq substr($tail, 0, 1); my $contchar = $is_comment ? "# " : " "; my $output = ""; while (length $tail > 78) { $output .= substr($tail, 0, 78) . "\n"; $tail = $contchar . substr($tail, 78); } return "$output$tail\n"; } # # Higher level functions for recursive printing # # $collector - a func ref that upon evaluation returns a hash ref ($data) # $idkey - a key in %$data to be used to construct the relative DN component # $prefix - to be prepended to the relative DN # $attributes - a func ref that is meant to print attributes. Called with $data as input. # $subtree - yet another func ref that is meant to descend into the hierachy. Called # with $data as input. Optional. # # Prints a single entry sub Entry { my ($self, $collector, $prefix, $idkey, $attributes, $subtree) = @_; return unless $collector and my $data = &$collector(); $self->begin("$prefix$idkey", $data->{$idkey}); &$attributes($self,$data); &$subtree($self, $data) if $subtree; $self->end(); } # Prints entries for as long as $collector continues to evaluate to non-null sub Entries { my ($self, $collector, $prefix, $idkey, $attributes, $subtree) = @_; while ($collector and my $data = &$collector()) { $self->begin("$prefix$idkey", $data->{$idkey}); &$attributes($self,$data); &$subtree($self, $data) if $subtree; $self->end(); } } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test { my $data; my $printer = LdifPrinter->new(*STDOUT); $printer->begin(o => "glue"); $data = { objectClass => "organization", o => "glue" }; $printer->attributes("", $data, qw(objectClass o)); $printer->begin(GLUE2GroupID => "grid"); $printer->attribute(objectClass => "GLUE2GroupID"); $data = { GLUE2GroupID => "grid" }; $printer->attributes("GLUE2", $data, qw( GroupID )); $printer->end(); $printer->end(); } #test; 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/Condor.pm0000644000000000000000000000012413024225763025302 xustar000000000000000027 mtime=1481714675.902662 27 atime=1513200575.776717 30 ctime=1513200663.135785838 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/Condor.pm0000644000175000002070000004620713024225763025360 0ustar00mockbuildmock00000000000000package Condor; use strict; use POSIX; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); use condor_env; ########################################## # Saved private variables ########################################## # contains the requirements string for the current queue. # It is used by queue-aware functions my $qdef = ''; my %config = (); my $arcconf = $ENV{ARC_CONFIG} ? $ENV{ARC_CONFIG} : '/etc/arc.conf'; my %lrms_queue; my $lrms_queue_initialized = 0; my @allnodedata = (); my $allnodedata_initialized = 0; my %alljobdata = (); my $alljobdata_initialized = 0; my @queuenodes = (); my $queuenodes_initialized = 0; my @jobids_thisqueue = (); my @jobids_otherqueue = (); ########################################## # Private subs ########################################## # Runs a command. Returns a list of three values: # # [0] String containing stdout. # [1] String containing stderr. # [2] Program exit code ($?) that was returned to the shell. sub condor_run($) { my $command = shift; my $stderr_file = "/tmp/condor_run.$$"; my $stdout = `$ENV{CONDOR_BIN_PATH}/$command 2>$stderr_file`; debug "===condor_run: $command"; my $ret = $? >> 8; local (*ERROR, $/); open ERROR, "<$stderr_file"; my $stderr = ; close ERROR; unlink $stderr_file; return $stdout, $stderr, $ret; } # String containing LRMS version. ('UNKNOWN' in case of errors.) sub type_and_version() { my ($out, $err, $ret) = condor_run('condor_version'); return 'UNKNOWN' if $ret != 0; $out =~ /\$CondorVersion:\s+(\S+)/; my $version = $1 || 'UNKNOWN'; my $type = 'Condor'; return $type, $version; } # # Helper funtion which collects all the information about condor nodes. # sub collect_node_data() { return if $allnodedata_initialized; $allnodedata_initialized = 1; my ($out, $err, $ret) = condor_run('condor_status -format "Name = %V\n" Name -format "Machine = %V\n" Machine -format "State = %V\n" State -format "Cpus = %V\n" Cpus -format "TotalCpus = %V\n" TotalCpus -format "SlotType = %V\n\n" SlotType'); error("Failed collecting node information.") if $ret; for (split /\n\n+/, $out) { my %target = condor_digest_classad($_); next unless defined $target{machine}; push @allnodedata, \%target; } debug "===collect_node_data: " . join " ", (map { $$_{machine} } @allnodedata); } # # Helper funtion which collects all the information about condor jobs. # sub collect_job_data() { return if $alljobdata_initialized; $alljobdata_initialized = 1; $ENV{_condor_CONDOR_Q_ONLY_MY_JOBS}='false'; my ($out, $err, $ret) = condor_run('condor_q -constraint "NiceUser == False" -format "ClusterId = %V\n" ClusterId -format "ProcId = %V\n" ProcId -format "JobStatus = %V\n" JobStatus -format "CurrentHosts = %V\n" CurrentHosts -format "LastRemoteHost = %V\n" LastRemoteHost -format "RemoteHost = %V\n" RemoteHost -format "ImageSize = %V\n" ImageSize -format "RemoteWallClockTime = %V\n" RemoteWallClockTime -format "RemoteUserCpu = %V\n" RemoteUserCpu -format "RemoteSysCpu = %V\n" RemoteSysCpu -format "JobTimeLimit = %V\n" JobTimeLimit -format "JobCpuLimit = %V\n\n" JobCpuLimit'); return if $out =~ m/All queues are empty/; error("Failed collecting job information.") if $ret; for (split /\n\n+/, $out) { my %job = condor_digest_classad($_); next unless defined $job{clusterid}; $job{procid} = "0" unless $job{procid}; my $jobid = "$job{clusterid}.$job{procid}"; $alljobdata{$jobid} = \%job; } debug "===collect_job_data: " . (join " ", keys %alljobdata); } # # Scans grid-manager's controldir for jobs in LRMS state belonging to a # queue. Returns a list of their Condor jobids: clusterid.0 (assumes # procid=0). # sub collect_jobids($$) { my %pairs; my $qname = shift; my $controldir = shift; my $cmd = "find $controldir/processing -maxdepth 1 -name 'job.??????????*.status'"; $cmd .= ' | xargs grep -l INLRMS '; $cmd .= ' | sed \'s/processing\/job\.\([^\.]*\)\.status$/job.\1.local/\' '; $cmd .= ' | xargs grep -H "^queue=\|^localid="'; local *LOCAL; open(LOCAL, "$cmd |"); while () { m#/job\.(\w{10,})\.local:queue=(\S+)# && ($pairs{$1}{queue} = $2); m#/job\.(\w{10,})\.local:localid=(\S+)# && ($pairs{$1}{id} = $2); } close LOCAL; foreach my $pair (values %pairs) { # get rid of .condor from localid. $$pair{id} =~ s/(\d+)\..*/$1.0/; if ( $$pair{queue} eq $qname ) { push @jobids_thisqueue, $$pair{id}; } else { push @jobids_otherqueue, $$pair{id}; } } debug "===collect_jobids: thisqueue: @jobids_thisqueue"; debug "===collect_jobids: otherqueue: @jobids_otherqueue"; } # # Returns a job's rank (place) in the current queue, or False if job is not in # current queue. Highest rank is 1. The rank is deduced from jobid, based on # the assumption that jobs are started sequentially by Condor. # Input: jobid (of the form: clusterid.0) # sub rank($) { my $id = shift; my $rank = 0; # only calculate rank for queued jobs return 0 unless exists $alljobdata{$id}; return 0 unless $alljobdata{$id}{lc 'JobStatus'} == 1; foreach (@jobids_thisqueue) { # only include queued jobs in rank next unless exists $alljobdata{$_}; next unless $alljobdata{$_}{lc 'JobStatus'} == 1; $rank++; last if $id eq $_; } #debug "===rank($id) = $rank"; return $rank; } # # Parses long output from condor_q -l # and condor_status -l into and hash. # OBS: Field names are lowercased! # OBS: It removes quotes around strings # sub condor_digest_classad($) { my %classad; for (split /\n+/, shift) { next unless /^(\w+)\s*=\s*(.*\S|)\s*$/; my ($field, $val) = ($1, $2); $val =~ s/"(.*)"/$1/; # remove quotes, if any $classad{lc $field} = $val; } return %classad; } # # Takes an optional constraint description string and returns the names of the # nodes which satisfy this contraint. If no constraint is given, returns all # the nodes in the Condor pool # sub condor_grep_nodes { my $req = shift; my $cmd = 'condor_status -format "%s\n" Machine'; $cmd .= " -constraint '$req'" if $req; my ($out, $err, $ret) = condor_run($cmd); debug "===condor_grep_nodes: ". (join ', ', split /\n/, $out); return () if $ret; return split /\n/, $out; } # # Takes one argument: # 1. The LRMS job id as represented in the GM. (In Condor terms, # it's .) # # Returns the current status of the job by mapping Condor's JobStatus # integer into corresponding one-letter codes used by ARC: # # 1 (Idle) --> Q (job is queuing, waiting for a node, etc.) # 2 (Running) --> R (running on a host controlled by the LRMS) # 2 (Suspended) --> S (an already running job in a suspended state) # 3 (Removed) --> E (finishing in the LRMS) # 4 (Completed) --> E (finishing in the LRMS) # 5 (Held) --> O (other) # 6 (Transfer) --> O (other, almost finished. Transferring output.) # 7 (Suspended) --> S (newer condor version support suspended) # # If the job couldn't be found, E is returned since it is probably finished. # sub condor_get_job_status($) { my $id = shift; my %num2letter = qw(1 Q 2 R 3 E 4 E 5 O 6 O 7 S); return 'E' unless $alljobdata{$id}; my $s = $alljobdata{$id}{jobstatus}; return 'E' if !defined $s; $s = $num2letter{$s}; if ($s eq 'R') { $s = 'S' if condor_job_suspended($id); } debug "===condor_get_job_status $id: $s"; return $s; } # # Returns the list of nodes belonging to the current queue # sub condor_queue_get_nodes() { return @queuenodes if $queuenodes_initialized; $queuenodes_initialized = 1; @queuenodes = condor_grep_nodes($qdef); debug "===condor_queue_get_nodes @queuenodes"; return @queuenodes; } # # Count queued jobs (idle or held) within the current queue. # sub condor_queue_get_queued() { my $gridqueued = 0; my $localqueued = 0; my $qfactor = 0; if(condor_cluster_totalcpus() != 0){ $qfactor = condor_queue_get_nodes() / condor_cluster_totalcpus(); } for (values %alljobdata) { my %job = %$_; # only include jobs which are idle or held next unless $job{jobstatus} == 1 || $job{jobstatus} == 5; my $clusterid = "$job{clusterid}.$job{procid}"; if (grep { $_ eq $clusterid } @jobids_thisqueue) { $gridqueued += 1; } elsif (grep { $_ eq $clusterid } @jobids_otherqueue) { # this is a grid job, but in a different queue } else { $localqueued += 1; } } # for locally queued jobs, we don't know to which queue it belongs # try guessing the odds my $total = $gridqueued + int($localqueued * $qfactor); debug "===condor_queue_get_queued: $total = $gridqueued+int($localqueued*$qfactor)"; return $total; } # # Counts all queued cpus (idle and held) in the cluster. # TODO: this counts jobs, not cpus. sub condor_cluster_get_queued_cpus() { my $sum = 0; do {$sum++ if $$_{jobstatus} == 1 || $$_{jobstatus} == 5} for values %alljobdata; debug "===condor_cluster_get_queued_cpus: $sum"; return $sum; } # # Counts all queued jobs (idle and held) in the cluster. # sub condor_cluster_get_queued_jobs() { my $sum = 0; do {$sum++ if $$_{jobstatus} == 1 || $$_{jobstatus} == 5} for values %alljobdata; debug "===condor_cluster_get_queued_jobs: $sum"; return $sum; } # # Counts all running jobs in the cluster. # TODO: also counts suspended jobs apparently. # only counts suspended jobs in earlier versions of Condor # Newer versions have separate state (7) for supended jobs sub condor_cluster_get_running_jobs() { my $sum = 0; do {$sum++ if $$_{jobstatus} == 2} for values %alljobdata; debug "===condor_cluster_get_running_jobs: $sum"; return $sum; } # # Counts nodes in the current queue with state other than 'Unclaimed' # Every running job is automatically included, plus nodes used # interactively by their owners # sub condor_queue_get_running() { my $running = 0; my @qnod = condor_queue_get_nodes(); for (@allnodedata) { my %node = %$_; next unless grep { $_ eq $node{machine} } @qnod; $running += $node{cpus} if ($node{slottype} !~ /^Partitionable/i && $node{state} !~ /^Unclaimed/i); } debug "===condor_queue_get_running: $running"; return $running; } # # Same as above, but for the whole cluster # sub condor_cluster_get_usedcpus() { my $used = 0; for (@allnodedata) { $used += $$_{cpus} if ($$_{slottype} !~ /^Partitionable/i && $$_{state} !~ /^Unclaimed/i); } debug "===condor_cluster_get_usedcpus: $used"; return $used; } # # returns the total number of CPUs in the cluster # sub condor_queue_totalcpus() { my @qnod = condor_queue_get_nodes(); # List all machines in the pool. Create a hash specifying the TotalCpus # for each machine. my %machines; $machines{$$_{machine}} = $$_{totalcpus} for @allnodedata; my $totalcpus = 0; for (keys %machines) { my $machine = $_; next unless grep { $machine eq $_ } @qnod; $totalcpus += $machines{$_}; } return $totalcpus; } # # returns the total number of nodes in the cluster # sub condor_cluster_totalcpus() { # List all machines in the pool. Create a hash specifying the TotalCpus # for each machine. my %machines; $machines{$$_{machine}} = $$_{totalcpus} for @allnodedata; my $totalcpus = 0; for (keys %machines) { $totalcpus += $machines{$_}; } return $totalcpus; } # # This function parses the condor log to see if the job has been suspended. # (condor_q reports 'R' for running even when the job is suspended, so we need # to parse the log to be sure that 'R' actually means running.) # # Argument: the condor job id # Returns: true if the job is suspended, and false if it's running. # sub condor_job_suspended($) { my $id = shift; return 0 unless $alljobdata{$id}; my $logfile = $alljobdata{$id}{lc 'UserLog'}; return 0 unless $logfile; local *LOGFILE; open LOGFILE, "<$logfile" or return 0; my $suspended = 0; while (my $line = ) { $suspended = 1 if $line =~ /Job was suspended\.$/; $suspended = 0 if $line =~ /Job was unsuspended\.$/; } close LOGFILE; return $suspended; } # # CPU distribution string (e.g., '1cpu:5 2cpu:1'). # sub cpudistribution { # List all machines in the pool. Create a hash specifying the TotalCpus # for each machine. my %machines; $machines{$$_{machine}} = $$_{totalcpus} for @allnodedata; # Count number of machines with one CPU, number with two, etc. my %dist; for (keys %machines) { $dist{$machines{$_}}++; } # Generate CPU distribution string. my $diststr = ''; for (sort { $a <=> $b } keys %dist) { $diststr .= ' ' unless $diststr eq ''; $diststr .= "${_}cpu:$dist{$_}"; } return $diststr; } ############################################ # Public subs ############################################# sub cluster_info ($) { my $config = shift; my %lrms_cluster; configure_condor_env(%$config) or die "Condor executables or config file not found\n"; collect_node_data(); collect_job_data(); ( $lrms_cluster{lrms_type}, $lrms_cluster{lrms_version} ) = type_and_version(); # not sure how Condor counts RemoteUserCpu and RemoteSysCpu but it should # not matter anyway since we don't support parallel jobs under Condor $lrms_cluster{has_total_cputime_limit} = 0; # Count used/free CPUs and queued jobs in the cluster # Note: SGE has the concept of "slots", which roughly corresponds to # concept of "cpus" in ARC (PBS) LRMS interface. $lrms_cluster{totalcpus} = condor_cluster_totalcpus(); $lrms_cluster{cpudistribution} = cpudistribution(); $lrms_cluster{usedcpus} = condor_cluster_get_usedcpus(); #NOTE: counts jobs, not cpus. $lrms_cluster{queuedcpus} = condor_cluster_get_queued_cpus(); $lrms_cluster{queuedjobs} = condor_cluster_get_queued_jobs(); $lrms_cluster{runningjobs} = condor_cluster_get_running_jobs(); # List LRMS queues. # This does not seem to be used in cluster.pl! @{$lrms_cluster{queue}} = (); return %lrms_cluster; } sub queue_info ($$) { return %lrms_queue if $lrms_queue_initialized; $lrms_queue_initialized = 1; my $config = shift; my $qname = shift; $qdef = join "", split /\[separator\]/, ($$config{condor_requirements} || ''); warning("Option 'condor_requirements' is not defined for queue $qname") unless $qdef; debug("===Requirements for queue $qname: $qdef"); configure_condor_env(%$config) or die "Condor executables or config file not found\n"; collect_node_data(); collect_job_data(); collect_jobids($qname, $$config{control}{'.'}{controldir}); # Number of available (free) cpus can not be larger that # free cpus in the whole cluster my $totalcpus = condor_queue_totalcpus(); my $usedcpus = condor_queue_get_running(); my $queuedcpus = condor_queue_get_queued(); $lrms_queue{freecpus} = $totalcpus - $usedcpus; $lrms_queue{running} = $usedcpus; $lrms_queue{totalcpus} = $totalcpus; # In theory any job in some circumstances can consume all available slots $lrms_queue{MaxSlotsPerJob} = $totalcpus; $lrms_queue{queued} = $queuedcpus; # reserve negative numbers for error states if ($lrms_queue{freecpus}<0) { warning("lrms_queue{freecpus} = $lrms_queue{freecpus}") } # nordugrid-queue-maxrunning # nordugrid-queue-maxqueuable # nordugrid-queue-maxuserrun # nordugrid-queue-mincputime # nordugrid-queue-defaultcputime $lrms_queue{maxrunning} = $totalcpus; $lrms_queue{maxqueuable} = 2 * $lrms_queue{maxrunning}; $lrms_queue{maxuserrun} = $lrms_queue{maxrunning}; $lrms_queue{maxwalltime} = ''; $lrms_queue{minwalltime} = ''; $lrms_queue{defaultwallt} = ''; $lrms_queue{maxcputime} = ''; $lrms_queue{mincputime} = ''; $lrms_queue{defaultcput} = ''; $lrms_queue{status} = 1; return %lrms_queue; } sub jobs_info ($$@) { my $config = shift; my $qname = shift; my $jids = shift; my %lrms_jobs; queue_info($config, $qname); foreach my $id ( @$jids ) { # submit-condor-job might return identifiers of the form ClusterId.condor # Replace .hostname with .0. It is safe to assume that ProcId is 0 because # we only submit one job at a time. my $id0 = $id; $id0 =~ s/(\d+)\..*/$1.0/; debug "===jobs_info: Mapping $id to $id0"; if ( $alljobdata{$id0} ) { my %job = %{$alljobdata{$id0}}; $lrms_jobs{$id}{status} = condor_get_job_status($id0); $lrms_jobs{$id}{mem} = $job{lc 'ImageSize'}; $lrms_jobs{$id}{walltime} = floor($job{lc 'RemoteWallClockTime'} / 60); $lrms_jobs{$id}{cputime} = floor(($job{lc 'RemoteUserCpu'} + $job{lc 'RemoteSysCpu'}) / 60); $lrms_jobs{$id}{nodes} = []; $lrms_jobs{$id}{nodes} = [$job{lc 'LastRemoteHost'}] if ($job{lc 'LastRemoteHost'} ne "undefined"); $lrms_jobs{$id}{nodes} = [$job{lc 'RemoteHost'}] if ($job{lc 'RemoteHost'} ne "undefined"); if ($job{lc 'JobTimeLimit'} ne "undefined") { $lrms_jobs{$id}{reqwalltime} = floor($job{lc 'JobTimeLimit'} / 60); # caller knows these better } if ($job{lc 'JobCpuLimit'} ne "undefined") { $lrms_jobs{$id}{reqcputime} = floor($job{lc 'JobCpuLimit'} / 60); # caller knows these better } $lrms_jobs{$id}{rank} = rank($id0) ? rank($id0) : ''; $lrms_jobs{$id}{comment} = []; # TODO $lrms_jobs{$id}{cpus} = $job{lc 'CurrentHosts'}; # For queued jobs, unset meanigless values if ($lrms_jobs{$id}{status} eq 'Q') { $lrms_jobs{$id}{mem} = ''; $lrms_jobs{$id}{walltime} = ''; $lrms_jobs{$id}{cputime} = ''; $lrms_jobs{$id}{nodes} = []; } } else { # Job probably already finished debug("===Condor job $id not found. Probably it has finished"); $lrms_jobs{$id}{status} = ''; $lrms_jobs{$id}{mem} = ''; $lrms_jobs{$id}{walltime} = ''; $lrms_jobs{$id}{cputime} = ''; $lrms_jobs{$id}{reqwalltime} = ''; $lrms_jobs{$id}{reqcputime} = ''; $lrms_jobs{$id}{rank} = ''; $lrms_jobs{$id}{nodes} = []; $lrms_jobs{$id}{comment} = []; } } return %lrms_jobs; } sub users_info($$@) { my $config = shift; my $qname = shift; my $accts = shift; my %lrms_users; queue_info($config, $qname); foreach my $u ( @{$accts} ) { # all users are treated as equals # there is no maximum walltime/cputime limit in Condor $lrms_users{$u}{freecpus} = $lrms_queue{freecpus}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732025561 xustar000000000000000030 mtime=1513200602.624045755 30 atime=1513200650.085626229 30 ctime=1513200663.167786229 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/Makefile.in0000644000175000002070000010301713214315732025631 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/infoproviders DIST_COMMON = README $(dist_arcldapschema_DATA) $(dist_pkgdata_DATA) \ $(dist_pkgdata_SCRIPTS) $(srcdir)/CEinfo.pl.in \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/PerfData.pl.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CEinfo.pl PerfData.pl CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(arcldapschemadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(dist_pkgdata_SCRIPTS) $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive DATA = $(dist_arcldapschema_DATA) $(dist_pkgdata_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkgdata_SCRIPTS = CEinfo.pl PerfData.pl dist_pkgdata_SCRIPTS = glue-generator.pl \ grid-info-soft-register glite-info-provider-ldap dist_pkgdata_DATA = ARC0mod.pm SGEmod.pm FORKmod.pm PBS.pm DGBridge.pm \ LL.pm LSF.pm Condor.pm condor_env.pm SLURM.pm SLURMmod.pm Boinc.pm \ IniParser.pm LogUtils.pm Sysinfo.pm \ LRMSInfo.pm GMJobsInfo.pm HostInfo.pm RTEInfo.pm \ InfoChecker.pm ConfigCentral.pm \ ARC0ClusterInfo.pm ARC1ClusterInfo.pm \ SGE.pm Fork.pm \ XmlPrinter.pm GLUE2xmlPrinter.pm \ LdifPrinter.pm GLUE2ldifPrinter.pm \ NGldifPrinter.pm InfosysHelper.pm arcldapschemadir = $(pkgdatadir)/ldap-schema dist_arcldapschema_DATA = schema/nordugrid.schema PERLSCRIPTS = $(dist_pkgdata_DATA) CEinfo.pl TESTS_ENVIRONMENT = $(PERL) -I$(srcdir) -Mstrict -wc TESTS = $(PERLSCRIPTS) check_SCRIPTS = $(PERLSCRIPTS) SUBDIRS = test all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/infoproviders/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/infoproviders/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): CEinfo.pl: $(top_builddir)/config.status $(srcdir)/CEinfo.pl.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ PerfData.pl: $(top_builddir)/config.status $(srcdir)/PerfData.pl.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-dist_pkgdataSCRIPTS: $(dist_pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_arcldapschemaDATA: $(dist_arcldapschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcldapschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcldapschemadir)" @list='$(dist_arcldapschema_DATA)'; test -n "$(arcldapschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcldapschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcldapschemadir)" || exit $$?; \ done uninstall-dist_arcldapschemaDATA: @$(NORMAL_UNINSTALL) @list='$(dist_arcldapschema_DATA)'; test -n "$(arcldapschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcldapschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcldapschemadir)" && rm -f $$files install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-recursive all-am: Makefile $(SCRIPTS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(arcldapschemadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dist_arcldapschemaDATA \ install-dist_pkgdataDATA install-dist_pkgdataSCRIPTS \ install-pkgdataSCRIPTS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_arcldapschemaDATA \ uninstall-dist_pkgdataDATA uninstall-dist_pkgdataSCRIPTS \ uninstall-pkgdataSCRIPTS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) check-am \ ctags-recursive install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-TESTS check-am clean clean-generic \ clean-libtool ctags ctags-recursive distclean \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_arcldapschemaDATA \ install-dist_pkgdataDATA install-dist_pkgdataSCRIPTS \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-dist_arcldapschemaDATA \ uninstall-dist_pkgdataDATA uninstall-dist_pkgdataSCRIPTS \ uninstall-pkgdataSCRIPTS # Check if BDI module is available, if not exclude Boinc.pm from TESTS variable. Boinc.pm: FORCE $(eval TESTS := $(shell if `$(PERL) -e "use BDI; exit;" > /dev/null 2>&1`; then echo "$(TESTS)"; else echo "$(TESTS)" | sed 's/Boinc.pm//'; fi)) FORCE: # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/Fork.pm0000644000000000000000000000012312116113676024756 xustar000000000000000027 mtime=1362663358.222032 27 atime=1513200575.799717 29 ctime=1513200663.15478607 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/Fork.pm0000644000175000002070000002503312116113676025027 0ustar00mockbuildmock00000000000000package Fork; use strict; use POSIX qw(ceil floor); use Sys::Hostname; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our (%lrms_queue); our $running = undef; # total running jobs in a queue # the queue passed in the latest call to queue_info, jobs_info or users_info my $currentqueue = undef; # Resets queue-specific global variables if # the queue has changed since the last call sub init_globals($) { my $qname = shift; if (not defined $currentqueue or $currentqueue ne $qname) { $currentqueue = $qname; %lrms_queue = (); $running = undef; } } ########################################## # Private subs ########################################## sub cpu_threads_cores_sockets { my $nsockets; # total number of physical cpu sockets my $ncores; # total number of cpu cores my $nthreads; # total number of hardware execution threads if (-f "/proc/cpuinfo") { # Linux variant my %sockets; # cpu socket IDs my %cores; # cpu core IDs open (CPUINFO, " ) { if ($line=~/^processor\s*:\s+(\d+)$/) { ++$nthreads; } elsif ($line=~/^physical id\s*:\s+(\d+)$/) { ++$sockets{$1}; } elsif ($line=~/^core id\s*:\s+(\d+)$/) { ++$cores{$1}; } } close CPUINFO; # count total cpu cores and sockets $ncores = scalar keys %cores; $nsockets = scalar keys %sockets; if ($nthreads) { # if /proc/cpuinfo does not provide socket and core IDs, # assume every thread represents a separate cpu $ncores = $nthreads unless $ncores; $nsockets = $nthreads unless $nsockets; } else { warning("Failed parsing /proc/cpuinfo"); } } elsif (-x "/usr/sbin/system_profiler") { # OS X my @lines = `system_profiler SPHardwareDataType`; warning("Failed running system_profiler: $!") if $?; for my $line ( @lines ) { if ($line =~ /Number Of Processors:\s*(.+)/) { $nsockets = $1; } elsif ($line =~ /Total Number Of Cores:\s*(.+)/) { $ncores = $1; $nthreads = $1; # Assume 1 execution thread per core } } unless ($nsockets and $ncores) { warning("Failed parsing output of system_profiler"); } } elsif (-x "/usr/bin/kstat" ) { # Solaris my %chips; eval { require Sun::Solaris::Kstat; my $ks = Sun::Solaris::Kstat->new(); my $cpuinfo = $ks->{cpu_info}; die "key not found: cpu_info" unless defined $cpuinfo; for my $id (keys %$cpuinfo) { my $info = $cpuinfo->{$id}{"cpu_info$id"}; die "key not found: cpu_info$id" unless defined $info; $chips{$info->{chip_id}}++; $nthreads++; } }; if ($@) { error("Failed running module Sun::Solaris::Kstat: $@"); } # assume each core is in a separate socket $nsockets = $ncores = scalar keys %chips; } else { warning("Cannot query CPU info: unsupported operating system"); } return ($nthreads,$ncores,$nsockets); } # Produces stats for all processes on the system sub process_info() { my $command = "ps -e -o ppid,pid,vsz,time,etime,user,comm"; my @pslines = `$command`; if ($? != 0) { warning("Failed running (non-zero exit status): $command"); return (); } shift @pslines; # drop header line my @procinfo; for my $line (@pslines) { my ($ppid,$pid,$vsize,$cputime,$etime,$user,$comm) = split ' ', $line, 7; # matches time formats like: 21:29.44, 12:21:29, 3-12:21:29 if ($cputime =~ /^(?:(?:(\d+)-)?(\d+):)?(\d+):(\d\d(?:\.\d+)?)$/) { my ($days,$hours,$minutes,$seconds) = (($1||0), ($2||0), $3, $4); $cputime = $seconds + 60*($minutes + 60*($hours + 24*$days)); } else { warning("Invalid cputime: $cputime"); $cputime = 0; } # matches time formats like: 21:29.44, 12:21:29, 3-12:21:29 if ($etime =~ /^(?:(?:(\d+)-)?(\d+):)?(\d+):(\d\d(?:\.\d+)?)$/) { my ($days,$hours,$minutes,$seconds) = (($1||0), ($2||0), $3, $4); $etime = $seconds + 60*($minutes + 60*($hours + 24*$days)); } elsif ($etime eq '-') { $etime = 0; # a zombie ? } else { warning("Invalid etime: $etime"); $etime = 0; } my $pi = { ppid => $ppid, pid => $pid, vsize => $vsize, user => $user, cputime => $cputime, etime => $etime, comm => $comm }; push @procinfo, $pi, } return @procinfo; } ############################################ # Public subs ############################################# sub cluster_info ($) { my ($config) = shift; my (%lrms_cluster); $lrms_cluster{lrms_type} = "fork"; $lrms_cluster{lrms_version} = "1"; # only enforcing per-process cputime limit $lrms_cluster{has_total_cputime_limit} = 0; my ($cputhreads) = cpu_threads_cores_sockets(); $lrms_cluster{totalcpus} = $cputhreads; # Since fork is a single machine backend all there will only be one machine available $lrms_cluster{cpudistribution} = $lrms_cluster{totalcpus}."cpu:1"; # usedcpus on a fork machine is determined from the 1min cpu # loadaverage and cannot be larger than the totalcpus if (`uptime` =~ /load averages?:\s+([.\d]+),?\s+([.\d]+),?\s+([.\d]+)/) { $lrms_cluster{usedcpus} = ($1 <= $lrms_cluster{totalcpus}) ? floor(0.5+$1) : $lrms_cluster{totalcpus}; } else { error("Failed getting load averages"); $lrms_cluster{usedcpus} = 0; } #Fork does not support parallel jobs $lrms_cluster{runningjobs} = $lrms_cluster{usedcpus}; # no LRMS queuing jobs on a fork machine, fork has no queueing ability $lrms_cluster{queuedcpus} = 0; $lrms_cluster{queuedjobs} = 0; $lrms_cluster{queue} = [ ]; return %lrms_cluster; } sub queue_info ($$) { my ($config) = shift; my ($qname) = shift; init_globals($qname); if (defined $running) { # job_info was already called, we know exactly how many grid jobs # are running $lrms_queue{running} = $running; } else { # assuming that the submitted grid jobs are cpu hogs, approximate # the number of running jobs with the number of running processes $lrms_queue{running}= 0; unless (open PSCOMMAND, "ps axr |") { error("error in executing ps axr"); } while(my $line = ) { chomp($line); next if ($line =~ m/PID TTY/); next if ($line =~ m/ps axr/); next if ($line =~ m/cluster-fork/); $lrms_queue{running}++; } close PSCOMMAND; } my ($cputhreads) = cpu_threads_cores_sockets(); $lrms_queue{totalcpus} = $cputhreads; $lrms_queue{status} = $lrms_queue{totalcpus}-$lrms_queue{running}; # reserve negative numbers for error states # Fork is not real LRMS, and cannot be in error state if ($lrms_queue{status}<0) { debug("lrms_queue{status} = $lrms_queue{status}"); $lrms_queue{status} = 0; } my $job_limit; $job_limit = 1; if ( $$config{maxjobs} ) { #extract lrms maxjobs from config option my @maxes = split(' ', $$config{maxjobs}); my $len=@maxes; if ($len > 1){ $job_limit = $maxes[1]; if ($job_limit eq "cpunumber") { $job_limit = $lrms_queue{totalcpus}; } } } $lrms_queue{maxrunning} = $job_limit; $lrms_queue{maxuserrun} = $job_limit; $lrms_queue{maxqueuable} = $job_limit; chomp( my $maxcputime = `ulimit "-t"` ); if ($maxcputime =~ /^\d+$/) { $lrms_queue{maxcputime} = $maxcputime; } elsif ($maxcputime eq 'unlimited') { $lrms_queue{maxcputime} = ""; } else { warning("Could not determine max cputime with ulimit -t"); $lrms_queue{maxcputime} = ""; } $lrms_queue{queued} = 0; $lrms_queue{mincputime} = ""; $lrms_queue{defaultcput} = ""; $lrms_queue{minwalltime} = ""; $lrms_queue{defaultwallt} = ""; $lrms_queue{maxwalltime} = $lrms_queue{maxcputime}; return %lrms_queue; } sub jobs_info ($$@) { my ($config) = shift; my ($qname) = shift; my ($jids) = shift; init_globals($qname); my (%lrms_jobs); my @procinfo = process_info(); foreach my $id (@$jids){ $lrms_jobs{$id}{nodes} = [ hostname ]; my ($proc) = grep { $id == $_->{pid} } @procinfo; if ($proc) { # number of running jobs. Will be used in queue_info ++$running; # sum cputime of all child processes my $cputime = $proc->{cputime}; $_->{ppid} == $id and $cputime += $_->{cputime} for @procinfo; $lrms_jobs{$id}{mem} = $proc->{vsize}; $lrms_jobs{$id}{walltime} = ceil $proc->{etime}/60; $lrms_jobs{$id}{cputime} = ceil $cputime/60; $lrms_jobs{$id}{status} = 'R'; $lrms_jobs{$id}{comment} = [ "LRMS: Running under fork" ]; } else { $lrms_jobs{$id}{mem} = ''; $lrms_jobs{$id}{walltime} = ''; $lrms_jobs{$id}{cputime} = ''; $lrms_jobs{$id}{status} = ''; # job is EXECUTED $lrms_jobs{$id}{comment} = [ "LRMS: no longer running" ]; } $lrms_jobs{$id}{reqwalltime} = ""; $lrms_jobs{$id}{reqcputime} = ""; $lrms_jobs{$id}{rank} = "0"; #Fork backend does not support parallel jobs $lrms_jobs{$id}{cpus} = "1"; } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($qname) = shift; my ($accts) = shift; init_globals($qname); my (%lrms_users); # freecpus # queue length if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $config, $qname ); } foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $lrms_queue{maxuserrun} - $lrms_queue{running}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/GLUE2ldifPrinter.pm0000644000000000000000000000012412116113676027077 xustar000000000000000027 mtime=1362663358.222032 27 atime=1513200575.771717 30 ctime=1513200663.158786119 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/GLUE2ldifPrinter.pm0000644000175000002070000006513012116113676027151 0ustar00mockbuildmock00000000000000package GLUE2ldifPrinter; use base "LdifPrinter"; sub new { my ($this, $handle, $splitjobs) = @_; my $self = $this->SUPER::new($handle); $self->{splitjobs} = $splitjobs; return $self; } # bools come in lowecase, must be uppercased for LDAP # In the XML schema allowed values are: true, false, undefined # In the LDAP schema allowed values are: TRUE, FALSE sub uc_bools { my ($data, @keys) = @_; for (@keys) { my $val = $data->{$_}; next unless defined $val; $data->{$_} = $val = uc $val; delete $data->{$_} unless $val eq 'FALSE' or $val eq 'TRUE'; } } # # Print attributes # sub beginGroup { my ($self, $name) = @_; $self->begin(GLUE2GroupID => $name); my $data = { objectClass => "GLUE2Group", GLUE2GroupID => $name}; $self->attributes($data, "", qw(objectClass GLUE2GroupID)); } sub EntityAttributes { my ($self, $data) = @_; $self->attributes($data, "GLUE2Entity", qw( CreationTime Validity Name OtherInfo )); } sub LocationAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Location"); $self->attributes($data, "GLUE2Location", qw( ID Address Place Country PostCode Latitude Longitude )); $self->attribute(GLUE2LocationServiceForeignKey => $data->{ServiceForeignKey}); } sub ContactAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Contact"); $self->attributes($data, "GLUE2Contact", qw( ID Detail Type )); $self->attribute(GLUE2ContactServiceForeignKey => $data->{ServiceForeignKey}); } sub DomainAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Domain"); $self->attributes($data, "GLUE2Domain", qw( ID Description WWW )); } sub AdminDomainAttributes { my ($self, $data) = @_; uc_bools($data, qw( Distributed )); $self->DomainAttributes($data); $self->attribute(objectClass => "GLUE2AdminDomain"); $self->attributes($data, "GLUE2AdminDomain", qw( Distributed Owner )); $self->attribute(GLUE2AdminDomainAdminDomainForeignKey => $data->{AdminDomainID}); } sub UserDomainAttributes { my ($self, $data) = @_; $self->DomainAttributes($data); $self->attribute(objectClass => "GLUE2UserDomain"); $self->attributes($data, "GLUE2UserDomain", qw( Level UserManager Member )); $self->attribute(GLUE2UserDomainUserDomainForeignKey => $data->{UserDomainID}); } sub ServiceAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Service"); $self->attributes($data, "GLUE2Service", qw( ID Capability Type QualityLevel StatusInfo Complexity )); $self->attribute(GLUE2ServiceAdminDomainForeignKey => $data->{AdminDomainID}); $self->attribute(GLUE2ServiceServiceForeignKey => $data->{ServiceID}); } sub EndpointAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Endpoint"); $self->attributes($data, "GLUE2Endpoint", qw( ID URL Capability Technology InterfaceName InterfaceVersion InterfaceExtension WSDL SupportedProfile Semantics Implementor ImplementationName ImplementationVersion QualityLevel HealthState HealthStateInfo ServingState StartTime IssuerCA TrustedCA DowntimeAnnounce DowntimfeStart DowntimeEnd DowntimeInfo )); $self->attribute(GLUE2EndpointServiceForeignKey => $data->{ServiceID}); } sub ShareAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Share"); $self->attributes($data, "GLUE2Share", qw( ID Description )); $self->attribute(GLUE2ShareServiceForeignKey => $data->{ServiceID}); $self->attribute(GLUE2ShareEndpointForeignKey => $data->{EndpointID}); $self->attribute(GLUE2ShareResourceForeignKey => $data->{ResourceID}); $self->attribute(GLUE2ShareShareForeignKey => $data->{ShareID}); } sub ManagerAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Manager"); $self->attributes($data, "GLUE2Manager", qw( ID ProductName ProductVersion )); $self->attribute(GLUE2ManagerServiceForeignKey => $data->{ServiceID}); } sub ResourceAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Resource"); $self->attributes($data, "GLUE2Resource", qw( ID )); $self->attribute(GLUE2ResourceManagerForeignKey => $data->{ManagerID}); } sub ActivityAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Activity"); $self->attributes($data, "GLUE2Activity", qw( ID )); $self->attribute(GLUE2ActivityUserDomainForeignKey => $data->{UserDomainID}); $self->attribute(GLUE2ActivityEndpointForeignKey => $data->{EndpointID}); $self->attribute(GLUE2ActivityShareForeignKey => $data->{ShareID}); $self->attribute(GLUE2ActivityResourceForeignKey => $data->{ResourceID}); $self->attribute(GLUE2ActivityActivityForeignKey => $data->{ActivityID}); } sub PolicyAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Policy"); $self->attributes($data, "GLUE2Policy", qw( ID Scheme Rule )); $self->attribute(GLUE2PolicyUserDomainForeignKey => $data->{UserDomainID}); } sub AccessPolicyAttributes { my ($self, $data) = @_; $self->PolicyAttributes($data); $self->attribute(objectClass => "GLUE2AccessPolicy"); $self->attribute(GLUE2AccessPolicyEndpointForeignKey => $data->{EndpointID}); } sub MappingPolicyAttributes { my ($self, $data) = @_; $self->PolicyAttributes($data); $self->attribute(objectClass => "GLUE2MappingPolicy"); $self->attribute(GLUE2MappingPolicyShareForeignKey => $data->{ShareID}); } sub ComputingServiceAttributes { my ($self, $data) = @_; $self->ServiceAttributes($data); $self->attribute(objectClass => "GLUE2ComputingService"); $self->attributes($data, "GLUE2ComputingService", qw( TotalJobs RunningJobs WaitingJobs StagingJobs SuspendedJobs PreLRMSWaitingJobs )); } sub ComputingEndpointAttributes { my ($self, $data) = @_; $self->EndpointAttributes($data); # The LDAP schema required both this and GLUE2ComputingEndpointComputingServiceForeignKey ! $self->attribute(GLUE2EndpointServiceForeignKey => $data->{ComputingServiceID}); $self->attribute(objectClass => "GLUE2ComputingEndpoint"); $self->attributes($data, "GLUE2ComputingEndpoint", qw( Staging JobDescription TotalJobs RunningJobs WaitingJobs StagingJobs SuspendedJobs PreLRMSWaitingJobs )); $self->attribute(GLUE2ComputingEndpointComputingServiceForeignKey => $data->{ComputingServiceID}); } sub ComputingShareAttributes { my ($self, $data) = @_; uc_bools($data, qw( Preemption )); $self->ShareAttributes($data); $self->attribute(objectClass => "GLUE2ComputingShare"); $self->attributes($data, "GLUE2ComputingShare", qw( MappingQueue MaxWallTime MaxMultiSlotWallTime MinWallTime DefaultWallTime MaxCPUTime MaxTotalCPUTime MinCPUTime DefaultCPUTime MaxTotalJobs MaxRunningJobs MaxWaitingJobs MaxPreLRMSWaitingJobs MaxUserRunningJobs MaxSlotsPerJob MaxStateInStreams MaxStageOutStreams SchedulingPolicy MaxMainMemory GuaranteedMainMemory MaxVirtualMemory GuaranteedVirtualMemory MaxDiskSpace DefaultStorageService Preemption ServingState TotalJobs RunningJobs LocalRunningJobs WaitingJobs LocalWaitingJobs SuspendedJobs LocalSuspendedJobs StagingJobs PreLRMSWaitingJobs EstimatedAverageWaitingTime EstimatedWorstWaitingTime FreeSlots FreeSlotsWithDuration UsedSlots RequestedSlots ReservationPolicy Tag )); $self->attribute(GLUE2ComputingShareComputingServiceForeignKey => $data->{ComputingServiceID}); # Mandatory by schema $self->attribute(GLUE2ComputingShareComputingEndpointForeignKey => $data->{ComputingEndpointID}); $self->attribute(GLUE2ComputingShareExecutionEnvironmentForeignKey => $data->{ExecutionEnvironmentID}); } sub ComputingManagerAttributes { my ($self, $data) = @_; uc_bools($data, qw( Reservation BulkSubmission Homogeneous WorkingAreaShared WorkingAreaGuaranteed )); $self->ManagerAttributes($data); $self->attribute(objectClass => "GLUE2ComputingManager"); $self->attributes($data, "GLUE2ComputingManager", qw( Reservation BulkSubmission TotalPhysicalCPUs TotalLogicalCPUs TotalSlots SlotsUsedByLocalJobs SlotsUsedByGridJobs Homogeneous NetworkInfo LogicalCPUDistribution WorkingAreaShared WorkingAreaGuaranteed WorkingAreaTotal WorkingAreaFree WorkingAreaLifeTime WorkingAreaMultiSlotTotal WorkingAreaMultiSlotFree WorkingAreaMultiSlotLifeTime CacheTotal CacheFree TmpDir ScratchDir ApplicationDir )); $self->attribute(GLUE2ComputingManagerComputingServiceForeignKey => $data->{ComputingServiceID}); } sub BenchmarkAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Benchmark"); $self->attributes($data, "GLUE2Benchmark", qw( ID Type Value )); $self->attribute(GLUE2BenchmarkExecutionEnvironmentForeignKey => $data->{ExecutionEnvironmentID}); $self->attribute(GLUE2BenchmarkComputingManagerForeignKey => $data->{ComputingManagerID}); } sub ExecutionEnvironmentAttributes { my ($self, $data) = @_; uc_bools($data, qw( VirtualMachine ConnectivityIn ConnectivityOut )); $self->ResourceAttributes($data); $self->attribute(objectClass => "GLUE2ExecutionEnvironment"); $self->attributes($data, "GLUE2ExecutionEnvironment", qw( Platform VirtualMachine TotalInstances UsedInstances UnavailableInstances PhysicalCPUs LogicalCPUs CPUMultiplicity CPUVendor CPUModel CPUVersion CPUClockSpeed CPUTimeScalingFactor WallTimeScalingFactor MainMemorySize VirtualMemorySize OSFamily OSName OSVersion ConnectivityIn ConnectivityOut NetworkInfo )); $self->attribute(GLUE2ExecutionEnvironmentComputingManagerForeignKey => $data->{ComputingManagerID}); } sub ApplicationEnvironmentAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2ApplicationEnvironment"); $self->attributes($data, "GLUE2ApplicationEnvironment", qw( ID AppName AppVersion State RemovalDate License Description BestBenchmark ParallelSupport MaxSlots MaxJobs MaxUserSeats FreeSlots FreeJobs FreeUserSeats )); $self->attribute(GLUE2ApplicationEnvironmentComputingManagerForeignKey => $data->{ComputingManagerID}); $self->attribute(GLUE2ApplicationEnvironmentExecutionEnvironmentForeignKey => $data->{ExecutionEnvironmentID}); } sub ApplicationHandleAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2ApplicationHandle"); $self->attributes($data, "GLUE2ApplicationHandle", qw( ID Type Value )); $self->attribute(GLUE2ApplicationHandleApplicationEnvironmentForeignKey => $data->{ApplicationEnvironmentID}); } sub ComputingActivityAttributes { my ($self, $data) = @_; $self->ActivityAttributes($data); $self->attribute(objectClass => "GLUE2ComputingActivity"); $self->attributes($data, "GLUE2ComputingActivity", qw( Type IDFromEndpoint LocalIDFromManager JobDescription State RestartState ExitCode ComputingManagerExitCode Error WaitingPosition UserDomain Owner LocalOwner RequestedTotalWallTime RequestedTotalCPUTime RequestedSlots RequestedApplicationEnvironment StdIn StdOut StdErr LogDir ExecutionNode Queue UsedTotalWallTime UsedTotalCPUTime UsedMainMemory SubmissionTime ComputingManagerSubmissionTime StartTime ComputingManagerEndTime EndTime WorkingAreaEraseTime ProxyExpirationTime SubmissionHost SubmissionClientName OtherMessages )); $self->attribute(GLUE2ActivityShareForeignKey => $data->{ComputingShareID}); $self->attribute(GLUE2ActivityResourceForeignKey => $data->{ExecutionEnvironmentID}); $self->attribute(GLUE2ActivityActivityForeignKey => $data->{ActivityID}); } sub ToStorageServiceAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2ToStorageService"); $self->attributes($data, "GLUE2ToStorageService", qw( ID LocalPath RemotePath )); $self->attribute(GLUE2ToStorageServiceComputingServiceForeignKey => $data->{ComputingServiceID}); $self->attribute(GLUE2ToStorageServiceStorageServiceForeignKey => $data->{StorageServiceID}); } # # Follow hierarchy # sub Location { LdifPrinter::Entry(@_, 'GLUE2Location', 'ID', \&LocationAttributes); } sub Contacts { LdifPrinter::Entries(@_, 'GLUE2Contact', 'ID', \&ContactAttributes); } sub AdminDomain { LdifPrinter::Entry(@_, 'GLUE2Domain', 'ID', \&AdminDomainAttributes, sub { my ($self, $data) = @_; $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); #$self->ComputingService($data->{ComputingService}); }); } sub AccessPolicies { LdifPrinter::Entries(@_, 'GLUE2Policy', 'ID', \&AccessPolicyAttributes); } sub MappingPolicies { LdifPrinter::Entries(@_, 'GLUE2Policy', 'ID', \&MappingPolicyAttributes); } sub Services { LdifPrinter::Entries(@_, 'GLUE2Service', 'ID', \&ServiceAttributes, sub { my ($self, $data) = @_; $self->Endpoints($data->{Endpoints}); $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); }); } sub ComputingService { LdifPrinter::Entry(@_, 'GLUE2Service', 'ID', \&ComputingServiceAttributes, sub { my ($self, $data) = @_; $self->ComputingEndpoints($data->{ComputingEndpoints}); $self->ComputingShares($data->{ComputingShares}); $self->ComputingManager($data->{ComputingManager}); $self->ToStorageServices($data->{ToStorageServices}); $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); }); } sub Endpoint { LdifPrinter::Entry(@_, 'GLUE2Endpoint', 'ID', \&EndpointAttributes, sub { my ($self, $data) = @_; $self->AccessPolicies($data->{AccessPolicies}); }); } sub Endpoints { LdifPrinter::Entries(@_, 'GLUE2Endpoint', 'ID', \&EndpointAttributes, sub { my ($self, $data) = @_; $self->AccessPolicies($data->{AccessPolicies}); }); } sub ComputingEndpoint { LdifPrinter::Entry(@_, 'GLUE2Endpoint', 'ID', \&ComputingEndpointAttributes, sub { my ($self, $data) = @_; $self->AccessPolicies($data->{AccessPolicies}); }); } sub ComputingEndpoints { LdifPrinter::Entries(@_, 'GLUE2Endpoint', 'ID', \&ComputingEndpointAttributes, sub { my ($self, $data) = @_; if (!($self->{splitjobs}) && $data->{ComputingActivities}) { $self->beginGroup("ComputingActivities"); $self->ComputingActivities($data->{ComputingActivities}); $self->end(); } $self->AccessPolicies($data->{AccessPolicies}); }); } sub ComputingShares { LdifPrinter::Entries(@_, 'GLUE2Share', 'ID', \&ComputingShareAttributes, sub { my ($self, $data) = @_; $self->MappingPolicies($data->{MappingPolicies}); }); } sub ComputingManager { LdifPrinter::Entry(@_, 'GLUE2Manager', 'ID', \&ComputingManagerAttributes, sub { my ($self, $data) = @_; $self->Benchmarks($data->{Benchmarks}); $self->beginGroup("ExecutionEnvironments"); $self->ExecutionEnvironments($data->{ExecutionEnvironments}); $self->end(); $self->beginGroup("ApplicationEnvironments"); $self->ApplicationEnvironments($data->{ApplicationEnvironments}); $self->end(); }); } sub Benchmarks { LdifPrinter::Entries(@_, 'GLUE2Benchmark', 'ID', \&BenchmarkAttributes); } sub ExecutionEnvironments { LdifPrinter::Entries(@_, 'GLUE2Resource', 'ID', \&ExecutionEnvironmentAttributes, sub { my ($self, $data) = @_; $self->Benchmarks($data->{Benchmarks}); }); } sub ApplicationEnvironments { LdifPrinter::Entries(@_, 'GLUE2ApplicationEnvironment', 'ID', \&ApplicationEnvironmentAttributes, sub { my ($self, $data) = @_; $self->ApplicationHandles($data->{ApplicationHandles}); }); } sub ApplicationHandles { LdifPrinter::Entries(@_, 'GLUE2ApplicationHandle', 'ID', \&ApplicationHandleAttributes); } sub ComputingActivities { LdifPrinter::Entries(@_, 'GLUE2Activity', 'ID', \&ComputingActivityAttributes); } sub ToStorageServices { LdifPrinter::Entries(@_, 'GLUE2ToStorageService', 'ID', \&ToStorageServiceAttributes); } sub Top { my ($self, $data ) = @_; $self->begin(o => "glue"); #$self->attribute(objectClass => "top"); #$self->attribute(objectClass => "organization"); #$self->attribute(o => "glue"); # builds the grid subtree, with domain information #$self->beginGroup("grid"); $self->AdminDomain(&$data->{AdminDomain}); #$self->end; $self->beginGroup("services"); $self->Services(&$data->{Services}); $self->ComputingService(&$data->{ComputingService}); $self->end; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/ARC0mod.pm0000644000000000000000000000012412256747723025256 xustar000000000000000027 mtime=1388040147.583926 27 atime=1513200575.781717 30 ctime=1513200663.128785752 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/ARC0mod.pm0000644000175000002070000001301712256747723025325 0ustar00mockbuildmock00000000000000package ARC0mod; # # Loads ARC0.6 LRMS modules for use with ARC1 # # To include a new (ARC 0.6) LRMS plugin: # # 1. Each LRMS specific module needs to provide subroutines # cluster_info, queue_info, jobs_info, and users_info. # # 2. References to subroutines defined in new LRMS modules are added # to the select_lrms subroutine in this module, and the module reference # itself, naturally. # NB: ARC0 modules use minutes for time units. ARC1 modules use seconds. require Exporter; our @ISA = qw(Exporter); our @EXPORT_OK = qw(get_lrms_info get_lrms_options_schema); use LogUtils; use strict; our $log = LogUtils->getLogger(__PACKAGE__); our %modnames = ( PBS => "PBS", SGE => "SGE", LL => "LL", LSF => "LSF", DGBRIDGE => "DGBridge", CONDOR => "Condor", SLURM => "SLURM", BOINC => "Boinc", FORK => "Fork" ); # Whether the module implements support for listing nodes. our $has_nodes = 1; sub load_lrms($) { my $lrms_name = uc(shift); my $module = $modnames{$lrms_name}; $log->error("No ARC0 module for $lrms_name") unless $module; eval { require "$module.pm" }; $log->error("Failed to load LRMS module $module: $@") if $@; import $module qw(cluster_info queue_info jobs_info users_info); eval { import $module qw(nodes_info) }; if ($@) { $log->debug("LRMS module $module does not export 'nodes_info'"); $has_nodes=0; } $LogUtils::default_logger = LogUtils->getLogger($module); } # Just generic options, cannot assume anything LRMS specific here sub get_lrms_options_schema { return { 'lrms' => '', # name of the LRMS module 'queues' => { # queue names are keys in this hash '*' => { 'users' => [ '' ] # list of user IDs to query in the LRMS } }, 'jobs' => [ '' ] # list of jobs IDs to query in the LRMS } } sub get_lrms_info($) { my $options = shift; my %cluster_config = %$options; delete $cluster_config{queues}; delete $cluster_config{jobs}; my $lrms_info = {cluster => {}, queues => {}, jobs => {}}; my $cluster_info = { cluster_info(\%cluster_config) }; delete $cluster_info->{queue}; $lrms_info->{cluster} = delete_empty($cluster_info); $lrms_info->{nodes} = { nodes_info(\%cluster_config) } if $has_nodes; for my $qname ( keys %{$options->{queues}} ) { my %queue_config = (%cluster_config, %{$options->{queues}{$qname}}); delete $queue_config{users}; my $jids = $options->{jobs}; # TODO: interface change: jobs under each queue my $jobs_info = { jobs_info(\%queue_config, $qname, $jids) }; for my $job ( values %$jobs_info ) { $job->{status} ||= 'EXECUTED'; delete_empty($job); } $lrms_info->{jobs} = { %{$lrms_info->{jobs}}, %$jobs_info }; my $queue_info = { queue_info(\%queue_config, $qname) }; $lrms_info->{queues}{$qname} = delete_empty($queue_info); my $users = $options->{queues}{$qname}{users}; $queue_info->{users} = { users_info(\%queue_config, $qname, $users) }; for my $user ( values %{$queue_info->{users}} ) { my $freecpus = $user->{freecpus}; $user->{freecpus} = split_freecpus($freecpus) if defined $freecpus; delete_empty($user); } $queue_info->{acl_users} = $queue_config{acl_users} if defined $queue_config{acl_users}; } # ARC0 LRMS plugins use minutes. Convert to seconds here. for my $queue (values %{$lrms_info->{queues}}) { $queue->{minwalltime} = int 60*$queue->{minwalltime} if $queue->{minwalltime}; $queue->{mincputime} = int 60*$queue->{mincputime} if $queue->{mincputime}; $queue->{maxwalltime} = int 60*$queue->{maxwalltime} if $queue->{maxwalltime}; $queue->{maxcputime} = int 60*$queue->{maxcputime} if $queue->{maxcputime}; $queue->{defaultwallt} = int 60*$queue->{defaultwallt} if $queue->{defaultwallt}; $queue->{defaultcput} = int 60*$queue->{defaultcput} if $queue->{defaultcput}; } for my $job (values %{$lrms_info->{jobs}}) { $job->{reqcputime} = int 60*$job->{reqcputime} if $job->{reqcputime}; $job->{reqwalltime} = int 60*$job->{reqwalltime} if $job->{reqwalltime}; $job->{cputime} = int 60*$job->{cputime} if $job->{cputime}; $job->{walltime} = int 60*$job->{walltime} if $job->{walltime}; delete $job->{nodes} unless @{$job->{nodes}}; delete $job->{comment} unless @{$job->{comment}}; } return $lrms_info; } sub delete_empty($) { my $hashref = shift; foreach my $k ( keys %$hashref) { delete $hashref->{$k} if ! defined $hashref->{$k} || $hashref->{$k} eq ''; } return $hashref; } # Convert frecpus string into a hash. # Example: "6 11:2880 23:1440" --> { 6 => 0, 11 => 2880, 23 => 1440 } # OBS: Assuming the function cpu vs. time is monotone, this transformation is safe. sub split_freecpus($) { my $freecpus_string = shift; my $freecpus_hash = {}; for my $countsecs (split ' ', $freecpus_string) { if ($countsecs =~ /^(\d+)(?::(\d+))?$/) { $freecpus_hash->{$1} = $2 || 0; # 0 means unlimited } else { $log->warning("Bad freecpus string: $freecpus_string"); return {}; } } return $freecpus_hash; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/Sysinfo.pm0000644000000000000000000000012412122115200025466 xustar000000000000000027 mtime=1363712640.104238 27 atime=1513200575.834718 30 ctime=1513200663.142785923 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/Sysinfo.pm0000644000175000002070000002747012122115200025545 0ustar00mockbuildmock00000000000000package Sysinfo; use strict; use POSIX; use Sys::Hostname; use Exporter; our @ISA = ('Exporter'); # Inherit from Exporter our @EXPORT_OK = qw(cpuinfo meminfo osinfo processid diskinfo diskspaces); use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); # Return PIDs of named commands owned by the current user # Only one pid is returned per command name sub processid (@) { my @procs = `ps -u $< -o pid,comm 2>/dev/null`; if ( $? != 0 ) { $log->info("Failed running: ps -u $< -o pid,comm"); $log->warning("Failed listing processes"); return {}; } shift @procs; # throw away header line # make hash of comm => pid my %all; /\s*(\d+)\s+(.+)/ and $all{$2} = $1 for @procs; my %pids; foreach my $name ( @_ ) { $pids{$name} = $all{$name} if $all{$name}; } return \%pids; } sub cpuinfo { my $info = {}; my $nsockets; # total number of physical cpu sockets my $ncores; # total number of cpu cores my $nthreads; # total number of hardware execution threads if (-f "/proc/cpuinfo") { # Linux variant my %sockets; # cpu socket IDs my %cores; # cpu core IDs open (CPUINFO, "warning("Failed opening /proc/cpuinfo: $!"); while ( my $line = ) { if ($line=~/^model name\s*:\s+(.*)$/) { $info->{cpumodel} = $1; } elsif ($line=~/^vendor_id\s+:\s+(.*)$/) { $info->{cpuvendor} = $1; } elsif ($line=~/^cpu MHz\s+:\s+(.*)$/) { $info->{cpufreq} = int $1; } elsif ($line=~/^stepping\s+:\s+(.*)$/) { $info->{cpustepping} = int $1; } elsif ($line=~/^processor\s*:\s+(\d+)$/) { ++$nthreads; } elsif ($line=~/^physical id\s*:\s+(\d+)$/) { ++$sockets{$1}; } elsif ($line=~/^core id\s*:\s+(\d+)$/) { ++$cores{$1}; } } close CPUINFO; if ($info->{cpumodel} =~ m/^(.*?)\s+@\s+([.\d]+)GHz$/) { $info->{cpumodel} = $1; $info->{cpufreq} = int($2*1000); } elsif ($info->{cpumodel} =~ m/\s+([.\d]+)GHz$/) { $info->{cpufreq} = int($1*1000); } elsif ($info->{cpumodel} =~ m/\s+([.\d]+)MHz$/) { $info->{cpufreq} = int($1); } # count total cpu cores and sockets $ncores = scalar keys %cores; $nsockets = scalar keys %sockets; if ($nthreads) { # if /proc/cpuinfo does not provide socket and core IDs, # assume every thread represents a separate cpu $ncores = $nthreads unless $ncores; $nsockets = $nthreads unless $nsockets; } } elsif (-x "/usr/sbin/system_profiler") { # OS X my @lines = `/usr/sbin/system_profiler SPHardwareDataType`; $log->warning("Failed running system_profiler: $!") if $?; for my $line ( @lines ) { if ($line =~ /Processor Name:\s*(.*)/) { $info->{cpumodel} = $1; } elsif ($line =~ /Processor Speed:\s*([.\d]+) (\w+)/) { if ($2 eq "MHz") { $info->{cpufreq} = int $1; } elsif ($2 eq "GHz") { $info->{cpufreq} = int 1000*$1; } } elsif ($line =~ /Number Of Processors:\s*(.+)/) { $nsockets = $1; } elsif ($line =~ /Total Number Of Cores:\s*(.+)/) { $ncores = $1; $nthreads = $1; # Assume 1 execution thread per core (Ouch!) } } } elsif (-x "/usr/bin/kstat" ) { # Solaris my %chips; eval { require Sun::Solaris::Kstat; my $ks = Sun::Solaris::Kstat->new(); my $cpuinfo = $ks->{cpu_info}; $log->error("kstat: key not found: cpu_info") unless defined $cpuinfo; for my $id (keys %$cpuinfo) { my $info = $cpuinfo->{$id}{"cpu_info$id"}; $log->error("kstat: key not found: cpu_info$id") unless defined $info; $chips{$info->{chip_id}}++; $nthreads++; } my $info = $cpuinfo->{0}{"cpu_info0"}; $log->error("kstat: key not found: cpu_info0") unless defined $info; # $info->{cpumodel} = $info->{cpu_type}; # like sparcv9 $info->{cpumodel} = $info->{implementation}; # like UltraSPARC-III+ $info->{cpufreq} = int $info->{clock_MHz}; }; if ($@) { $log->error("Failed running module Sun::Solaris::Kstat: $@"); } $nsockets = $ncores = scalar keys %chips; } else { $log->warning("Unsupported operating system"); } $info->{cputhreadcount} = $nthreads if $nthreads; $info->{cpucorecount} = $ncores if $ncores; $info->{cpusocketcount} = $nsockets if $nsockets; return $info; } sub meminfo { my ($memtotal, $swaptotal); if (-f "/proc/cpuinfo") { # Linux variant open (MEMINFO, "warning("Failed opening /proc/meminfo: $!"); while ( my $line = ) { if ($line =~ /^MemTotal:\s+(.*) kB$/) { $memtotal = int ($1/1024); } elsif ($line =~ /^SwapTotal:\s+(.*) kB$/) { $swaptotal = int ($1/1024); } } } my $info = {}; $info->{pmem} = $memtotal if $memtotal; $info->{vmem} = $memtotal + $swaptotal if $memtotal and $swaptotal; return $info; } sub osinfo { my $info = {}; my ($sysname, $nodename, $release, $version, $machine) = POSIX::uname(); $info->{machine} = $machine; $info->{sysname} = $sysname; $info->{release} = $release; if ($sysname =~ /linux/i) { my ($id, $descr, $version); if (-x '/usr/bin/lsb_release' or -x '/bin/lsb_release') { if (open RELEASE, 'lsb_release -a |') { while () { $id = lc $1 if m/^Distributor ID:\s+(.*)/; $descr = $1 if m/^Description:\s+(.*)/; $version = $1 if m/^Release:\s+([.\d]+)/; } } close RELEASE; } elsif (open RELEASE, '< /etc/lsb-release') { while () { $id = lc $1 if m/^DISTRIB_ID=(.*)/; $descr = $1 if m/^DISTRIB_DESCRIPTION=(.*)/; $version = $1 if m/^DISTRIB_RELEASE=([.\d]+)/; } close RELEASE; } elsif (open RELEASE, '< /etc/redhat-release') { ($descr, $version) = ($1,$2) if =~ m/(.*) release ([.\d]+)/; close RELEASE; } elsif (open RELEASE, '< /etc/debian_version') { $version = $1 if =~ m/^([.\d]+)$/; $id = 'debian'; close RELEASE; } elsif (open RELEASE, '< /etc/SuSE-release') { while () { $version = $1 if m/^VERSION\s*=\s*([.\d]+)/; } $id = 'suse'; close RELEASE; } elsif (open RELEASE, '< /etc/gentoo-release') { $version = $1 if =~ m/.* version ([.\d]+)/; $id = 'gentoo'; close RELEASE; } # Try to stay within the predefined values for OSName_t from GLUE2 spec (GFD.147). if ($descr) { $id = 'centos' if $descr =~ m/^CentOS/i; $id = 'fedoracore' if $descr =~ m/^Fedora/i; $id = 'scientificlinux' if $descr =~ m/^Scientific Linux/i; $id = 'scientificlinuxcern' if $descr =~ m/^Scientific Linux CERN/i; $id = 'redhatenterpriseas' if $descr =~ m/^Red Hat Enterprise/i and not $id; } $info->{osname} = $id if $id; $info->{osversion} = $version if $version; } elsif ($sysname eq 'Darwin') { my $version = `sw_vers -productVersion`; chomp $version; if ($version =~ m/10\.[\d.]+/) { my $name; $info->{osname} = 'panther' if $version =~ m/^10\.3/; $info->{osname} = 'tiger' if $version =~ m/^10\.4/; $info->{osname} = 'leopard' if $version =~ m/^10\.5/; $info->{osname} = 'snowleopard' if $version =~ m/^10\.6/; $info->{osversion} = $version; } } elsif ($sysname eq 'SunOS') { $release =~ s/^5\.//; # SunOS 5.10 == solaris 10 $info->{osname} = 'solaris'; $info->{osversion} = $release; } return $info; } # # Returns disk space (total and free) in MB on a filesystem # sub diskinfo ($) { my $path = shift; my ($diskfree, $disktotal, $mountpoint); if ( -d "$path") { # check if on afs if ($path =~ m#/afs/#) { my @dfstring =`fs listquota $path 2>/dev/null`; if ($? != 0) { $log->warning("Failed running: fs listquota $path"); } elsif ($dfstring[-1] =~ /\s+(\d+)\s+(\d+)\s+\d+%\s+\d+%/) { $disktotal = int $1/1024; $diskfree = int(($1 - $2)/1024); $mountpoint = '/afs'; } else { $log->warning("Failed interpreting output of: fs listquota $path"); } # "ordinary" disk } else { my @dfstring =`df -k $path 2>/dev/null`; if ($? != 0) { $log->warning("Failed running: df -k $path"); # The first column may be printed on a separate line. # The relevant numbers are always on the last line. } elsif ($dfstring[-1] =~ /\s+(\d+)\s+\d+\s+(\d+)\s+\d+%\s+(\/\S*)$/) { $disktotal = int $1/1024; $diskfree = int $2/1024; $mountpoint = $3; } else { $log->warning("Failed interpreting output of: df -k $path"); } } } else { $log->warning("No such directory: $path"); } return undef if not defined $disktotal; return {megstotal => $disktotal, megsfree => $diskfree, mountpoint => $mountpoint}; } # Given a list of paths, it finds the set of mount points of the filesystems # containing the paths. It then returns a hash with these keys: # ndisks: number of distinct nount points # freesum: sum of free space on all mount points # freemin: minimum free space of any mount point # freemax: maximum free space of any mount point # totalsum: sum of total space on all mountpoints # totalmin: minimum total space of any mount point # totalmax: maximum total space of any mount point # errors: the number of non-existing paths sub diskspaces { my ($freesum, $freemin, $freemax); my ($totalsum, $totalmin, $totalmax); my $errors = 0; my %mounts; for my $path (@_) { my $di = diskinfo($path); if ($di) { my ($total, $free, $mount) = ($di->{megstotal},$di->{megsfree},$di->{mountpoint}); $mounts{$mount}{free} = $free; $mounts{$mount}{total} = $total; } else { ++$errors; } } for my $stats (values %mounts) { if (defined $freesum) { $freesum += $stats->{free}; $freemin = $stats->{free} if $freemin > $stats->{free}; $freemax = $stats->{free} if $freemax < $stats->{free}; $totalsum += $stats->{total}; $totalmin = $stats->{total} if $totalmin > $stats->{total}; $totalmax = $stats->{total} if $totalmax < $stats->{total}; } else { $freesum = $freemin = $freemax = $stats->{free}; $totalsum = $totalmin = $totalmax = $stats->{total}; } } return ( ndisks => scalar keys %mounts, freesum => $freesum || 0, freemin => $freemin || 0, freemax => $freemax || 0, totalsum => $totalsum || 0, totalmin => $totalmin || 0, totalmax => $totalmax || 0, errors => $errors ); } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/PBS.pm0000644000000000000000000000012412540535326024503 xustar000000000000000027 mtime=1434630870.745566 27 atime=1513200575.831718 30 ctime=1513200663.132785801 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PBS.pm0000644000175000002070000010627212540535326024560 0ustar00mockbuildmock00000000000000package PBS; use strict; our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info', 'nodes_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our(%lrms_queue); my (%user_jobs_running, %user_jobs_queued); # the queue passed in the latest call to queue_info, jobs_info or users_info my $currentqueue = undef; # cache info about nodes my $pbsnodes; # PBS type and flavour my $lrms_type = undef; my $lrms_version = undef; # Resets queue-specific global variables if # the queue has changed since the last call sub init_globals($) { my $qname = shift; if (not defined $currentqueue or $currentqueue ne $qname) { $currentqueue = $qname; %lrms_queue = (); %user_jobs_running = (); %user_jobs_queued = (); } } # get PBS type and version sub get_pbs_version ($) { return unless not defined $lrms_type; # path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; # determine the flavour and version of PBS my $qmgr_string=`$path/qmgr -c "list server"`; if ( $? != 0 ) { warning("Can't run qmgr"); } if ($qmgr_string =~ /pbs_version = \b(\D+)_(\d\S+)\b/) { $lrms_type = $1; $lrms_version = $2; } else { $qmgr_string =~ /pbs_version = \b(\d\S+)\b/; $lrms_type = "torque"; $lrms_version = $1; } } ########################################## # Private subs ########################################## sub read_pbsnodes ($) { return %$pbsnodes if $pbsnodes; #processing the pbsnodes output by using a hash of hashes # %hoh_pbsnodes (referrenced by $hashref) my ( $path ) = shift; my ( %hoh_pbsnodes); my ($nodeid,$node_var,$node_value); unless (open PBSNODESOUT, "$path/pbsnodes -a 2>/dev/null |") { error("error in executing pbsnodes"); } while (my $line= ) { if ($line =~ /^$/) {next}; if ($line =~ /^([\w\-]+)/) { $nodeid= $1 ; next; } if ($line =~ / = /) { ($node_var,$node_value) = split (/ = /, $line); $node_var =~ s/\s+//g; chop $node_value; } $hoh_pbsnodes{$nodeid}{$node_var} = $node_value; } close PBSNODESOUT; $pbsnodes = \%hoh_pbsnodes; return %hoh_pbsnodes; } # Splits up the value of the exec_host string. # Returns a list of node names, one for each used cpu. # Should handle node specs of the form: # (1) node1/0+node0/1+node0/2+node2/1 (torque) # (2) hosta/J1+hostb/J2*P (according to the PBSPro manual) # (3) node1+node1+node2+node2 # (4) altix:ssinodes=2:mem=7974912kb:ncpus=4 (found on the web) # (5) grid-wn0749.desy.de/2 Resource_List.neednodes=1:ppn=8 (reported by Andreas Gellrich from Desy HH) sub split_hostlist { my ($exec_host_string) = @_; my @nodes; my $err; for my $nodespec (split '\+', $exec_host_string) { if ($nodespec =~ m{^([^/:]+)/\d+(?:\*(\d+))?$}) { # cases (1) and (2) my ($nodename, $multiplier) = ($1, $2 || 1); push @nodes, $nodename for 1..$multiplier; } elsif ($nodespec =~ m{^([^/:]+)(?::(.+))?$}) { # cases (3) and (4) my ($nodename, $resc) = ($1, $2 || ''); my $multiplier = get_ncpus($resc); push @nodes, $nodename for 1..$multiplier; } elsif ($nodespec =~ m{^([^/]+)/\d+ Resource_List\.neednodes=(\d+):ppn=(\d+)?$} ){ # case (5) my $nodename = $1; my $numnodes = $2 || 1; my $ppn = $3 || 1; my $multiplier = $ppn; #Not sure if this is the correct multiplier. Is there an entry for each node? or should multiplier be numnodes*ppn? push @nodes, $nodename for 1..$multiplier; } else { $err = $nodespec; } } warning("failed counting nodes in expression: $exec_host_string") if $err; return @nodes; } # Deduces the number of requested cpus from the values of these job properties: # Resource_List.select (PBSPro 8+) # Resource_List.nodes # Resource_List.ncpus sub set_cpucount { my ($job) = (@_); my $select = $job->{"Resource_List.select"}; my $nodes = $job->{"Resource_List.nodes"}; my $ncpus = $job->{"Resource_List.ncpus"}; $job->{cpus} = count_usedcpus($select, $nodes, $ncpus); delete $job->{"Resource_List.select"}; delete $job->{"Resource_List.nodes"}; delete $job->{"Resource_List.ncpus"}; } # Convert time from [DD:[HH:[MM:]]]SS to minutes sub count_time { my $pbs_time = shift; # split and reverse PBS time to start from seconds, then drop seconds my @t = reverse split /:/, $pbs_time; my $minutes = 0; if ( ! defined $t[1] ) { # PBS seconds only case $minutes = int( $t[0] / 60 ); } else { # drop seconds shift @t; $minutes = $t[0]; $minutes += $t[1]*60 if defined $t[1]; $minutes += $t[2]*60*24 if defined $t[2]; } return $minutes; } sub count_usedcpus { my ($select, $nodes, $ncpus) = @_; return sum_over_chunks(\&cpus_in_select_chunk, $select) if defined $select; return $ncpus || 1 if not defined $nodes or $nodes eq '1'; return sum_over_chunks(\&cpus_in_nodes_chunk, $nodes) if defined $nodes; return 1; } sub sum_over_chunks { my ($count_func, $string) = @_; my $totalcpus = 0; for my $chunk (split '\+', $string) { my $cpus = &$count_func($chunk); $totalcpus += $cpus; } return $totalcpus; } # counts cpus in chunk definitions of the forms found in Resource_List.nodes property # 4 # 2:ppn=2 # host1 # host1:prop1:prop2 # prop1:prop2:ppn=4 sub cpus_in_nodes_chunk { my ($chunk) = @_; my ($ncpus,$dummy) = split ':', $chunk; $ncpus = 1 if $ncpus =~ m/\D/; # chunk count ommited return $ncpus * get_ppn($chunk); } # counts cpus in chunk definitions of the forms found in Resource_List.select (PBSPro 8+): # 4 # 2:ncpus=1 # 1:ncpus=4:mpiprocs=4:host=hostA sub cpus_in_select_chunk { my ($chunk) = @_; return $1 if $chunk =~ m/^(\d+)$/; if ($chunk =~ m{^(\d+):(.*)$}) { my ($cpus, $resc) = ($1, $2); return $cpus * get_ncpus($resc); } return 0; # not a valid chunk } # extracts the value of ppn from a string like blah:ppn=N:blah sub get_ppn { my ($resc) = @_; for my $res (split ':', $resc) { return $1 if $res =~ m /^ppn=(\d+)$/; } return 1; } # extracts the value of ncpus from a string like blah:ncpus=N:blah sub get_ncpus { my ($resc) = @_; for my $res (split ':', $resc) { return $1 if $res =~ m /^ncpus=(\d+)$/; } return 1; } sub get_variable($$){ my $match = shift; my $string = shift; $string =~ m/(\w\s)*?$match\s?[=:] ((\w|\s|\/|,|.|:|;|\[|\]|\(|\)|-)*?)($| \w+=.*)/ ; my $var = $2; return $var; } sub read_qstat_f ($) { my $path=shift; unless ( open QSTAT_F, "$path/qstat -f 2>/dev/null |") { error("Error in executing qstat"); } my $jobid=""; my %qstat_jobs; my $rs=$/; $/=""; while () { s/\n\t//g; foreach(split(/\n/)) { my $string = $_; if ($string =~ /Job Id/) { $jobid= get_variable("Job Id", $string); } if (!$jobid) { next; } if ($string =~ /Resource_List.nodes/){ $qstat_jobs{$jobid}{"Resource_List.nodes"}= get_variable("Resource_List.nodes",$string); } if ($string =~ /exec_host/){ $qstat_jobs{$jobid}{"exec_host"}= get_variable("exec_host",$string); } if ($string =~ /job_state/){ $qstat_jobs{$jobid}{"job_state"}= get_variable("job_state",$string); } if ($string =~ /Resource_List.select/){ $qstat_jobs{$jobid}{"Resource_List.select"}= get_variable("Resource_List.select",$string); } if ($string =~ /Resource_List.ncpus/){ $qstat_jobs{$jobid}{"Resource_List.ncpus"}= get_variable("Resource_List.ncpus",$string); } } }; $/=$rs; close QSTAT_F; return %qstat_jobs; } # gets information about each destination queue behind a # routing queue and copies it into the routing queue data structure. # at the moment it only copies data from the first queue # # input: $queue name of the current queue # $path to pbs binaries # $singledqueue that contains the only queue behind the routing one # %qstat{} for the current queue # output: the %dqueue hash containing info about destination queues # in %lrms_queue fashion sub process_dqueues($$%){ my $qname = shift; my $path = shift; my (%qstat) = %{$_[0]}; my $singledqueue; my %dqueues; # build DQs data structure my @dqnames; if (defined $qstat{'route_destinations'}) { @dqnames=split(",",$qstat{'route_destinations'}); @dqueues{@dqnames}=undef; foreach my $dqname ( keys %dqueues ) { debug("Processing queues behind routing queue. Current queue is $dqname"); my (%dqstat); unless (open QSTATOUTPUT, "$path/qstat -Q -f $dqname 2>/dev/null |") { error("Error in executing qstat: $path/qstat -Q -f $dqname"); } while (my $line= ) { if ($line =~ m/ = /) { chomp($line); my ($dqstat_var,$dqstat_value) = split("=", $line); $dqstat_var =~ s/\s+//g; $dqstat_value =~ s/\s+//g; $dqstat{$dqstat_var}=$dqstat_value; } } close QSTATOUTPUT; $dqueues{$dqname}=\%dqstat; } # debug($dqueues{'verylong'}{'resources_max.walltime'}); } else { error("No route_destinations for routing queue $qname. Please check LRMS configuration."); } # take the first destination queue behind the RQ, copy its data to the RQ # this happens only if the RQ has no data defined on PBS # this should solve bug #859 $singledqueue=shift(@dqnames); debug('Just one queue behind routing queue is currently supported: '.$singledqueue); my @attributes=( 'max_running', 'max_user_run', 'max_queuable', 'resources_max.cput', 'resources_min.cput', 'resources_default.cput', 'resources_max.walltime', 'resources_min.walltime', 'resources_default.walltime', 'state_count' ); foreach my $rkey (@attributes) { # line to check queues under routing queue values. Undefined values generate crap in logs, # so is commented out. # debug('with key '.$rkey.' qstat returns '.$qstat{$rkey}.' and the dest. queue has '.$dqueues{$singledqueue}{$rkey} ); if (!defined $qstat{$rkey}) {${$_[0]}{$rkey}=$dqueues{$singledqueue}{$rkey};}; } return %dqueues; } ############################################ # Public subs ############################################# sub cluster_info ($) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; # Return data structure %lrms_cluster{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_cluster); # flavour and version of PBS get_pbs_version($config); $lrms_cluster{lrms_type} = $lrms_type; $lrms_cluster{lrms_glue_type}=lc($lrms_type); $lrms_cluster{lrms_version} = $lrms_version; if ( $lrms_type eq "torque" and exists $$config{scheduling_policy} and lc($$config{scheduling_policy}) eq "maui") { $lrms_cluster{lrms_glue_type}="torquemaui"; } # PBS treats cputime limit for parallel/multi-cpu jobs as job-total $lrms_cluster{has_total_cputime_limit} = 1; # processing the pbsnodes output by using a hash of hashes %hoh_pbsnodes my ( %hoh_pbsnodes ) = read_pbsnodes( $path ); error("The given flavour of PBS $lrms_cluster{lrms_type} is not supported") unless grep {$_ eq lc($lrms_cluster{lrms_type})} qw(openpbs spbs torque pbspro); $lrms_cluster{totalcpus} = 0; my ($number_of_running_jobs) = 0; $lrms_cluster{cpudistribution} = ""; my (@cpudist) = 0; my %available_nodes = (); # loop over all available nodes foreach my $node (keys %hoh_pbsnodes) { # skip nodes that does not conform dedicated_node_string filter if ( exists $$config{dedicated_node_string} && $$config{dedicated_node_string} ne "") { next unless ( $hoh_pbsnodes{$node}{"properties"} =~ m/^([^,]+,)*$$config{dedicated_node_string}(,[^,]+)*$/); } # add node to available_nodes hash $available_nodes{$node} = 1; # get node state and number of CPUs my ($nodestate) = $hoh_pbsnodes{$node}{"state"}; my $nodecpus; if ($hoh_pbsnodes{$node}{'np'}) { $nodecpus = $hoh_pbsnodes{$node}{'np'}; } elsif ($hoh_pbsnodes{$node}{'resources_available.ncpus'}) { $nodecpus = $hoh_pbsnodes{$node}{'resources_available.ncpus'}; } next if ($nodestate=~/down/ or $nodestate=~/offline/); if ($nodestate=~/(?:,|^)busy/) { $lrms_cluster{totalcpus} += $nodecpus; $cpudist[$nodecpus] +=1; $number_of_running_jobs += $nodecpus; next; } $lrms_cluster{totalcpus} += $nodecpus; $cpudist[$nodecpus] += 1; if ($hoh_pbsnodes{$node}{"jobs"}){ $number_of_running_jobs++; my ( @comma ) = ($hoh_pbsnodes{$node}{"jobs"}=~ /,/g); $number_of_running_jobs += @comma; } } # form LRMS cpudistribution string for (my $i=0; $i<=$#cpudist; $i++) { next unless ($cpudist[$i]); $lrms_cluster{cpudistribution} .= " ".$i."cpu:".$cpudist[$i]; } # read the qstat -n information about all jobs # queued cpus, total number of cpus in all jobs $lrms_cluster{usedcpus} = 0; $lrms_cluster{queuedcpus} = 0; $lrms_cluster{queuedjobs} = 0; $lrms_cluster{runningjobs} = 0; my %qstat_jobs = read_qstat_f($path); for my $key (keys %qstat_jobs) { # usercpus (running jobs) if ( $qstat_jobs{$key}{job_state} =~ /R/) { $lrms_cluster{runningjobs}++; my @nodes = split_hostlist($qstat_jobs{$key}{exec_host}); # filter using dedicated_node_string foreach my $node ( @nodes ) { next unless defined $available_nodes{$node}; $lrms_cluster{usedcpus}++; } } # if ( $qstat_jobs{$key}{job_state} =~ /(W|T|Q)/) { $lrms_cluster{queuedjobs}++; $lrms_cluster{queuedcpus}+=count_usedcpus($qstat_jobs{$key}{"Resource_List.select"}, $qstat_jobs{$key}{"Resource_List.nodes"}, $qstat_jobs{$key}{"Resource_List.ncpus"}); } } # Names of all LRMS queues @{$lrms_cluster{queue}} = (); unless (open QSTATOUTPUT, "$path/qstat -Q 2>/dev/null |") { error("Error in executing qstat"); } while (my $line= ) { if ( $. == 1 or $. == 2 ) {next} # Skip header lines my (@a) = split " ", $line; push @{$lrms_cluster{queue}}, $a[0]; } close QSTATOUTPUT; return %lrms_cluster; } sub queue_info ($$) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; # Name of the queue to query my ($qname) = shift; init_globals($qname); # The return data structure is %lrms_queue. # In this template it is defined as persistent module data structure, # because it is later used by jobs_info(), and we wish to avoid # re-construction of it. If it were not needed later, it would be defined # only in the scope of this subroutine, as %lrms_cluster previously. # Return data structure %lrms_queue{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. # read the queue information for the queue entry from the qstat my (%qstat); unless (open QSTATOUTPUT, "$path/qstat -Q -f $qname 2>/dev/null |") { error("Error in executing qstat: $path/qstat -Q -f $qname"); } while (my $line= ) { if ($line =~ m/ = /) { chomp($line); my ($qstat_var,$qstat_value) = split("=", $line); $qstat_var =~ s/\s+//g; $qstat_value =~ s/\s+//g; $qstat{$qstat_var}=$qstat_value; } } close QSTATOUTPUT; # this script contain a solution for a single queue behind the # routing one, the routing queue will inherit some of its # attributes. # this hash contains qstat records for queues - in this case just one my %dqueues; # this variable contains the single destination queue my $singledqueue; if ($qstat{queue_type} =~ /Route/) { %dqueues = process_dqueues($qname,$path,\%qstat); $singledqueue = ( keys %dqueues )[0]; } else { undef %dqueues; undef $singledqueue; } # publish queue limits parameters # general limits (publish as is) my (%keywords); my (%keywords_all) = ( 'max_running' => 'maxrunning', 'max_user_run' => 'maxuserrun', 'max_queuable' => 'maxqueuable' ); # TODO: MinSlots, etc. my (%keywords_torque) = ( 'resources_max.procct' => 'MaxSlotsPerJob' ); my (%keywords_pbspro) = ( 'resources_max.ncpus' => 'MaxSlotsPerJob' ); get_pbs_version($config); if ( $lrms_type eq lc "torque" ) { %keywords = (%keywords_all, %keywords_torque); } elsif ( $lrms_type eq lc "pbspro" ) { %keywords = (%keywords_all, %keywords_pbspro); } else { %keywords = %keywords_all; } foreach my $k (keys %keywords) { if (defined $qstat{$k} ) { $lrms_queue{$keywords{$k}} = $qstat{$k}; } else { $lrms_queue{$keywords{$k}} = ""; } } # queue time limits (convert to minutes) %keywords = ( 'resources_max.cput' => 'maxcputime', 'resources_min.cput' => 'mincputime', 'resources_default.cput' => 'defaultcput', 'resources_max.walltime' => 'maxwalltime', 'resources_min.walltime' => 'minwalltime', 'resources_default.walltime' => 'defaultwallt' ); foreach my $k (keys %keywords) { if ( defined $qstat{$k} ) { $lrms_queue{$keywords{$k}} = (&count_time($qstat{$k})+($k eq 'resources_min.cput'?1:0)); } else { $lrms_queue{$keywords{$k}} = ""; } } # determine the queue status from the LRMS # Used to be set to 'active' if the queue can accept jobs # Now lists the number of available processors, "0" if no free # cpus. Negative number signals some error state of PBS # (reserved for future use). # processing the pbsnodes output by using a hash of hashes %hoh_pbsnodes my ( %hoh_pbsnodes ) = read_pbsnodes( $path ); $lrms_queue{status} = -1; $lrms_queue{running} = 0; $lrms_queue{queued} = 0; $lrms_queue{totalcpus} = 0; if ( ($qstat{"enabled"} =~ /True/) and ($qstat{"started"} =~ /True/)) { unless (open QSTATOUTPUT, "$path/qstat -Q -f $qname 2>/dev/null |") { error("Error in executing qstat: $path/qstat -Q -f $qname"); } my %qstat; while (my $line= ) { if ($line =~ m/ = /) { chomp($line); my ($qstat_var,$qstat_value) = split("=", $line); $qstat_var =~ s/\s+//g; $qstat_value =~ s/\s+//g; $qstat{$qstat_var}=$qstat_value; } } close QSTATOUTPUT; # refresh routing queue records, in case something changed on the # destination queues if ($qstat{queue_type} =~ /Route/) { debug("CPUs calculation pass. Queues are scanned a second time. Current queue is: $qstat{queue_type}"); %dqueues = process_dqueues($qname,$path,\%qstat); # this variable contains the single destination queue $singledqueue = ( keys %dqueues )[0]; } else { undef %dqueues; undef $singledqueue; } # qstat does not return number of cpus, use pbsnodes instead. my ($torque_freecpus,$torque_totalcpus)=(0,0); foreach my $node (keys %hoh_pbsnodes){ # If pbsnodes have properties assigned to them # check if queuename or dedicated_node_string matches. # $singledqueue check has been added for routing queue support, # also the destination queue is checked to calculate totalcpus # also adds correct behaviour for queue_node_string if ( ( ! defined $hoh_pbsnodes{$node}{'properties'} ) || ( ( defined $qname && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$qname(,[^,]+)*$/ ) || ( defined $$config{dedicated_node_string} && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$$config{dedicated_node_string}(,[^,]+)*$/ ) || ( defined $$config{queue_node_string} && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$$config{queue_node_string}(,[^,]+)*$/ ) || ( defined $singledqueue && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$singledqueue(,[^,]+)*$/ ) ) ) { my $cpus; next if $hoh_pbsnodes{$node}{'state'} =~ m/offline/; next if $hoh_pbsnodes{$node}{'state'} =~ m/down/; if ($hoh_pbsnodes{$node}{'np'}) { $cpus = $hoh_pbsnodes{$node}{'np'}; } elsif ($hoh_pbsnodes{$node}{'resources_available.ncpus'}) { $cpus = $hoh_pbsnodes{$node}{'resources_available.ncpus'}; } $torque_totalcpus+=$cpus; if ($hoh_pbsnodes{$node}{'state'} =~ m/free/){ $torque_freecpus+=$cpus; } } } $lrms_queue{totalcpus} = $torque_totalcpus; debug("Totalcpus for all queues are: $lrms_queue{totalcpus}"); if(defined $$config{totalcpus}){ if ($lrms_queue{totalcpus} eq "" or $$config{totalcpus} < $lrms_queue{totalcpus}) { $lrms_queue{totalcpus}=$$config{totalcpus}; } } $lrms_queue{status} = $torque_freecpus; $lrms_queue{status}=0 if $lrms_queue{status} < 0; if ( $qstat{state_count} =~ m/.*Running:([0-9]*).*/ ){ $lrms_queue{running}=$1; } else { $lrms_queue{running}=0; } # calculate running in case of a routing queue if ( $qstat{queue_type} =~ /Route/ ) { debug($dqueues{$singledqueue}{state_count}); if ( $dqueues{$singledqueue}{state_count} =~ m/.*Running:([0-9]*).*/ ) { $lrms_queue{running}=$1; } } # the above gets the number of nodes not the number of cores in use. If multi core jobs are running, "running" will be underestimated. # Instead use totalcpus - freecpus (This might overrepresent running. because pbsnodes count whole nodes in use.) # CUS (2015-02-09) my $runningcores = $torque_totalcpus - $torque_freecpus ; $runningcores = 0 if $runningcores < 0; $lrms_queue{running} = $runningcores if $runningcores > $lrms_queue{running}; if ($lrms_queue{totalcpus} eq 0) { warning("Can't determine number of cpus for queue $qname"); } if ( $qstat{state_count} =~ m/.*Queued:([0-9]*).*/ ){ $lrms_queue{queued}=$1; } else { $lrms_queue{queued}=0; } # fallback for defult values that are required for normal operation $lrms_queue{MaxSlotsPerJob} = $lrms_queue{totalcpus} if $lrms_queue{MaxSlotsPerJob} eq ""; # calculate queued in case of a routing queue # queued jobs is the sum of jobs queued in the routing queue # plus jobs in the destination queue if ( $qstat{queue_type} =~ /Route/ ) { debug($dqueues{$singledqueue}{state_count}); if ( $dqueues{$singledqueue}{state_count} =~ m/.*Queued:([0-9]*).*/ ) { $lrms_queue{queued}=$lrms_queue{queued}+$1; } } } return %lrms_queue; } sub jobs_info ($$@) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; # Name of the queue to query my ($qname) = shift; init_globals($qname); # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($jids) = shift; # Return data structure %lrms_jobs{$lrms_local_job_id}{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_jobs); # Fill %lrms_jobs here (da implementation) # rank is treated separately as it does not have an entry in # qstat output, comment because it is an array, and mem # because "kB" needs to be stripped from the value my (%skeywords) = ('job_state' => 'status'); my (%tkeywords) = ( 'resources_used.walltime' => 'walltime', 'resources_used.cput' => 'cputime', 'Resource_List.walltime' => 'reqwalltime', 'Resource_List.cputime' => 'reqcputime'); my (%nkeywords) = ( 'Resource_List.select' => 1, 'Resource_List.nodes' => 1, 'Resource_List.ncpus' => 1); my ($alljids) = join ' ', @{$jids}; my ($rank) = 0; my %job_owner; # better rank for maui my %showqrank; if (exists $$config{scheduling_policy} and lc($$config{scheduling_policy}) eq "maui") { my $showq = (defined $$config{maui_bin_path}) ? $$config{maui_bin_path}."/showq" : "showq"; unless (open SHOWQOUTPUT, " $showq |"){ error("error in executing $showq "); } my $idle=-1; while(my $line=) { if($line=~/^IDLE.+/) { $idle=0; $line=;$line=; } next if $idle == -1; if ($line=~/^(\d+).+/) { $idle++; $showqrank{$1}=$idle; } } close SHOWQOUTPUT; } my $handle_attr = sub { my ($jid, $k, $v) = @_; if ( defined $skeywords{$k} ) { $lrms_jobs{$jid}{$skeywords{$k}} = $v; if($k eq "job_state") { if( $v eq "U" ) { $lrms_jobs{$jid}{status} = "S"; } elsif ( $v eq "C" ) { $lrms_jobs{$jid}{status} = ""; # No status means job has completed } elsif ( $v ne "R" and $v ne "Q" and $v ne "S" and $v ne "E" ) { $lrms_jobs{$jid}{status} = "O"; } } } elsif ( defined $tkeywords{$k} ) { $lrms_jobs{$jid}{$tkeywords{$k}} = &count_time($v); } elsif ( defined $nkeywords{$k} ) { $lrms_jobs{$jid}{$k} = $v; } elsif ( $k eq 'exec_host' ) { my @nodes = split_hostlist($v); $lrms_jobs{$jid}{nodes} = \@nodes; #$lrms_jobs{$jid}{cpus} = scalar @nodes; } elsif ( $k eq 'comment' ) { $lrms_jobs{$jid}{comment} = [] unless $lrms_jobs{$jid}{comment}; push @{$lrms_jobs{$jid}{comment}}, "LRMS: $v"; } elsif ($k eq 'resources_used.vmem') { $v =~ s/(\d+).*/$1/; $lrms_jobs{$jid}{mem} = $v; } if ( $k eq 'Job_Owner' ) { $v =~ /(\S+)@/; $job_owner{$jid} = $1; } if ( $k eq 'job_state' ) { if ($v eq 'R') { $lrms_jobs{$jid}{rank} = ""; } elsif ($v eq 'C') { $lrms_jobs{$jid}{rank} = ""; } else { $rank++; $lrms_jobs{$jid}{rank} = $rank; $jid=~/^(\d+).+/; if (defined $showqrank{$1}) { $lrms_jobs{$jid}{rank} = $showqrank{$1}; } } if ($v eq 'R' or 'E'){ ++$user_jobs_running{$job_owner{$jid}}; } if ($v eq 'Q'){ ++$user_jobs_queued{$job_owner{$jid}}; } } }; unless (open QSTATOUTPUT, "$path/qstat -f 2>/dev/null |") { error("Error in executing qstat: $path/qstat -f "); } my ($jid, $k, $v) = (); while (my $line = ) { if ($line =~ /^Job Id:\s+(\d+.*)$/) { my $pbsjid = $1; &$handle_attr($jid, $k, $v) if $k and $jid; set_cpucount($lrms_jobs{$jid}) if $jid; ($jid, $k, $v) = (); foreach my $j (@$jids) { if ( $pbsjid =~ /^$j$/ ) { $jid = $j; last; } } next; } next unless defined $jid; # a line starting with a tab is a continuation line if ( $line =~ m/^\t(.+)$/ ) { $v .= $1; next; } if ( $line =~ m/^\s*(\S+) = (.*)$/ ) { my ($newk, $newv) = ($1, $2); &$handle_attr($jid, $k, $v) if $k and $jid; ($k, $v) = ($newk, $newv); } } &$handle_attr($jid, $k, $v) if $k and $jid; set_cpucount($lrms_jobs{$jid}) if $jid; close QSTATOUTPUT; my (@scalarkeywords) = ('status', 'rank', 'mem', 'walltime', 'cputime', 'reqwalltime', 'reqcputime'); foreach $jid ( @$jids ) { foreach my $k ( @scalarkeywords ) { if ( ! defined $lrms_jobs{$jid}{$k} ) { $lrms_jobs{$jid}{$k} = ""; } } $lrms_jobs{$jid}{comment} = [] unless $lrms_jobs{$jid}{comment}; $lrms_jobs{$jid}{nodes} = [] unless $lrms_jobs{$jid}{nodes}; } return %lrms_jobs; } sub users_info($$@) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; # Name of the queue to query my ($qname) = shift; init_globals($qname); # Unix user names mapped to grid users my ($accts) = shift; # Return data structure %lrms_users{$unix_local_username}{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_users); # Check that users have access to the queue unless (open QSTATOUTPUT, "$path/qstat -f -Q $qname 2>/dev/null |") { error("Error in executing qstat: $path/qstat -f -Q $qname"); } my $acl_user_enable = 0; my @acl_users; my $more_acls = 0; # added for routing queue support my @dqueues; my $singledqueue; my $isrouting; while (my $line= ) { chomp $line; # is this a continuation of the acl line? if ($more_acls) { $line =~ s/^\s*//; # strip leading spaces push @acl_users, split ',', $line; $more_acls = 0 unless $line =~ /,\s*$/; next; } if ( $line =~ /\s*acl_user_enable/ ) { my ( $k ,$v ) = split ' = ', $line; unless ( $v eq 'False' ) { $acl_user_enable = 1; } } if ( $line =~ /\s*acl_users/ ) { my ( $k ,$v ) = split ' = ', $line; unless ( $v eq 'False' ) { # This condition is kept here in case the reason # for it being there in the first place was that some # version or flavour of PBS really has False as an alternative # to usernames to indicate the absence of user access control # A Corrallary: Dont name your users 'False' ... push @acl_users, split ',', $v; $more_acls = 1 if $v =~ /,\s*$/; } } # added to support routing queues if (!$acl_user_enable){ if ($line =~ /\s*route_destinations\s=\s(.*)$/) { @dqueues=split (',',$1); $singledqueue=shift(@dqueues); warning('Routing queue did not have acl information. Local user acl taken from destination queue: '.$singledqueue); $isrouting = 1; } } } close QSTATOUTPUT; # if the acl_user_enable is not defined in the RQ, # it could be defined in the destination queues. # we proceed same way as before but on the first # destination queue to propagate the info to the routing one if ($isrouting){ debug("Getting acl from destination queue $singledqueue"); # Check that users have access to the queue unless (open QSTATOUTPUT, "$path/qstat -f -Q $singledqueue 2>/dev/null |") { error("Error in executing qstat on destination queue: $path/qstat -f -Q $singledqueue"); } $acl_user_enable = 0; $more_acls = 0; while (my $line= ) { chomp $line; # is this a continuation of the acl line? if ($more_acls) { $line =~ s/^\s*//; # strip leading spaces push @acl_users, split ',', $line; $more_acls = 0 unless $line =~ /,\s*$/; next; } if ( $line =~ /\s*acl_user_enable/ ) { my ( $k ,$v ) = split ' = ', $line; unless ( $v eq 'False' ) { $acl_user_enable = 1; } } if ( $line =~ /\s*acl_users/ ) { my ( $k ,$v ) = split ' = ', $line; unless ( $v eq 'False' ) { # This condition is kept here in case the reason # for it being there in the first place was that some # version or flavour of PBS really has False as an alternative # to usernames to indicate the absence of user access control # A Corrallary: Dont name your users 'False' ... push @acl_users, split ',', $v; $more_acls = 1 if $v =~ /,\s*$/; } } } close QSTATOUTPUT; debug(@acl_users); } # acl_users is only in effect when acl_user_enable is true if ($acl_user_enable) { foreach my $a ( @{$accts} ) { if ( grep { $a eq $_ } @acl_users ) { # The acl_users list has to be sent back to the caller. # This trick works because the config hash is passed by # reference. push @{$$config{acl_users}}, $a; } else { warning("Local user $a does not ". "have access in queue $qname."); } } } else { delete $$config{acl_users}; } # Uses saved module data structure %lrms_queue, which # exists if queue_info is called before if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $config, $qname ); } foreach my $u ( @{$accts} ) { if (exists $$config{scheduling_policy} and lc($$config{scheduling_policy}) eq "maui") { my $maui_freecpus; my $showbf = (defined $$config{maui_bin_path}) ? $$config{maui_bin_path}."/showbf" : "showbf"; if (exists $$config{dedicated_node_string}) { $showbf.=" -f ".$$config{dedicated_node_string}; } unless (open SHOWBFOUTPUT, " $showbf -u $u |"){ error("error in executing $showbf -u $u "); } while (my $line= ) { if ($line =~ /^partition/) { last; } if ($line =~ /no procs available/) { $maui_freecpus= " 0"; last; } if ($line =~ /(\d+).+available for\s+([\w:]+)/) { $maui_freecpus .= " ".$1.":".&count_time($2); } if ($line =~ /(\d+).+available with no timelimit/) { $maui_freecpus.= " ".$1; last; } } $lrms_users{$u}{freecpus} = $maui_freecpus; $lrms_users{$u}{queuelength} = $user_jobs_queued{$u} || 0; } else { $user_jobs_running{$u} = 0 unless $user_jobs_running{$u}; if ($lrms_queue{maxuserrun} and ($lrms_queue{maxuserrun} - $user_jobs_running{$u}) < $lrms_queue{status} ) { $lrms_users{$u}{freecpus} = $lrms_queue{maxuserrun} - $user_jobs_running{$u}; } else { $lrms_users{$u}{freecpus} = $lrms_queue{status}; } $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; if ($lrms_users{$u}{freecpus} < 0) { $lrms_users{$u}{freecpus} = 0; } if ($lrms_queue{maxcputime} and $lrms_users{$u}{freecpus} > 0) { $lrms_users{$u}{freecpus} .= ':'.$lrms_queue{maxcputime}; } } } return %lrms_users; } sub nodes_info($) { my $config = shift; my $path = $config->{pbs_bin_path}; my %hoh_pbsnodes = read_pbsnodes($path); my %nodes; for my $host (keys %hoh_pbsnodes) { my ($isfree, $isavailable) = (0,0); $isfree = 1 if $hoh_pbsnodes{$host}{state} =~ /free/; $isavailable = 1 unless $hoh_pbsnodes{$host}{state} =~ /down|offline|unknown/; $nodes{$host} = {isfree => $isfree, isavailable => $isavailable}; my $props = $hoh_pbsnodes{$host}{properties}; $nodes{$host}{tags} = [ split /,\s*/, $props ] if $props; my $np = $hoh_pbsnodes{$host}{np}; $nodes{$host}{slots} = int $np if $np; my $status = $hoh_pbsnodes{$host}{status}; if ($status) { for my $token (split ',', $status) { my ($opt, $val) = split '=', $token, 2; next unless defined $val; if ($opt eq 'totmem') { $nodes{$host}{vmem} = int($1/1024) if $val =~ m/^(\d+)kb/; } elsif ($opt eq 'physmem') { $nodes{$host}{pmem} = int($1/1024) if $val =~ m/^(\d+)kb/; } elsif ($opt eq 'ncpus') { $nodes{$host}{lcpus} = int $val; } elsif ($opt eq 'uname') { my @uname = split ' ', $val; $nodes{$host}{sysname} = $uname[0]; $nodes{$host}{release} = $uname[2] if @uname > 2; $nodes{$host}{machine} = $uname[-1] if $uname[-1]; } } } } return %nodes; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/ConfigCentral.pm0000644000000000000000000000012413153455371026577 xustar000000000000000027 mtime=1504598777.935254 27 atime=1513200575.820717 30 ctime=1513200663.149786009 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/ConfigCentral.pm0000644000175000002070000011024213153455371026644 0ustar00mockbuildmock00000000000000package ConfigCentral; # Builds an intermediate config hash that is used by the A-REX infoprovider and LRMS control scripts # Can read XML and INI use strict; use warnings; use XML::Simple; use Data::Dumper qw(Dumper); use IniParser; use InfoChecker; use LogUtils; # while parsing, loglevel is WARNING (the default) our $log = LogUtils->getLogger(__PACKAGE__); ############################################################### # Internal representation of configuration data after parsing # ############################################################### my $lrms_options = { pbs_bin_path => '*', pbs_log_path => '*', dedicated_node_string => '*', maui_bin_path => '*', condor_bin_path => '*', condor_config => '*', condor_rank => '*', sge_bin_path => '*', sge_root => '*', sge_cell => '*', sge_qmaster_port => '*', sge_execd_port => '*', lsf_bin_path => '*', lsf_profile_path => '*', ll_bin_path => '*', slurm_bin_path => '*', slurm_wakeupperiod => '*', dgbridge_stage_dir => '*', dgbridge_stage_prepend => '*', boinc_db_host => '*', boinc_db_port => '*', boinc_db_name => '*', boinc_db_user => '*', boinc_db_pass => '*', boinc_app_id => '*', }; my $lrms_share_options = { queue_node_string => '*', condor_requirements => '*', sge_jobopts => '*', lsf_architecture => '*', ll_consumable_resources => '*', }; my $xenv_options = { Platform => '*', Homogeneous => '*', PhysicalCPUs => '*', LogicalCPUs => '*', CPUVendor => '*', CPUModel => '*', CPUVersion => '*', CPUClockSpeed => '*', CPUTimeScalingFactor => '*', WallTimeScalingFactor => '*', MainMemorySize => '*', VirtualMemorySize => '*', OSFamily => '*', OSName => '*', OSVersion => '*', VirtualMachine => '*', NetworkInfo => '*', ConnectivityIn => '*', ConnectivityOut => '*', Benchmark => [ '*' ], OpSys => [ '*' ], nodecpu => '*', }; my $share_options = { MaxVirtualMemory => '*', MaxSlotsPerJob => '*', SchedulingPolicy => '*', Preemption => '*', totalcpus => '*', defaultmemory => '*', authorizedvo => [ '*' ], }; my $gmuser_options = { controldir => '', sessiondir => [ '' ], cachedir => [ '*' ], cachesize => '*', remotecachedir => [ '*' ], defaultttl => '*', }; my $gmcommon_options = { lrms => '', gmconfig => '*', endpoint => '*', hostname => '*', maxjobs => '*', maxload => '*', maxloadshare => '*', wakeupperiod => '*', gridmap => '*', x509_user_key => '*', x509_user_cert => '*', x509_cert_dir => '*', runtimedir => '*', gnu_time => '*', shared_filesystem => '*', shared_scratch => '*', scratchdir => '*', enable_emies_interface => '*', enable_arc_interface => '*', enable_perflog_reporting => '*', perflogdir => '*' }; my $ldap_infosys_options = { SlapdPort => '*', infosys_ldap_run_dir => '*', bdii_var_dir => '*', bdii_tmp_dir => '*', bdii_run_dir => '*', infosys_nordugrid => '*', infosys_glue12 => '*', infosys_glue2_ldap => '*', bdii_update_pid_file => '*', infosys_glue2_ldap_showactivities => '*', infosys_glue2_service_qualitylevel => '*', infoproviders_timeout => '*', validity_ttl => '*' }; my $gridftpd_options = { GridftpdEnabled => '*', GridftpdPort => '*', GridftpdMountPoint => '*', GridftpdAllowNew => '*', remotegmdirs => [ '*' ], GridftpdPidFile => '*', }; my $admindomain_options = { Name => '*', OtherInfo => [ '*' ], Description => '*', WWW => '*', Distributed => '*', Owner => '*' }; # # # # # # # # # # # # # # my $config_schema = { defaultLocalName => '*', debugLevel => '*', ProviderLog => '*', PublishNordugrid => '*', AdminDomain => '*', ttl => '*', admindomain => { %$admindomain_options }, %$gmcommon_options, %$gridftpd_options, %$ldap_infosys_options, %$lrms_options, %$lrms_share_options, control => { '*' => { %$gmuser_options } }, service => { OtherInfo => [ '*' ], StatusInfo => [ '*' ], Downtime => '*', ClusterName => '*', ClusterAlias => '*', ClusterComment => '*', ClusterOwner => [ '*' ], Middleware => [ '*' ], AuthorizedVO => [ '*' ], LocalSE => [ '*' ], InteractiveContactstring => [ '*' ], %$xenv_options, %$share_options, }, location => { Name => '*', Address => '*', Place => '*', Country => '*', PostCode => '*', Latitude => '*', Longitude => '*', }, contacts => [ { Name => '*', OtherInfo => [ '*' ], Detail => '', Type => '', } ], accesspolicies => [ { Rule => [ '' ], UserDomainID => [ '' ], } ], mappingpolicies => [ { ShareName => [ '' ], Rule => [ '' ], UserDomainID => [ '' ], } ], xenvs => { '*' => { OtherInfo => [ '*' ], NodeSelection => { Regex => [ '*' ], Command => [ '*' ], Tag => [ '*' ], }, %$xenv_options, } }, shares => { '*' => { Description => '*', OtherInfo => [ '*' ], MappingQueue => '*', ExecutionEnvironmentName => [ '' ], %$share_options, %$lrms_share_options, } } }; my $allbools = [ qw( PublishNordugrid Homogeneous VirtualMachine ConnectivityIn ConnectivityOut Preemption infosys_nordugrid infosys_glue12 infosys_glue2_ldap infosys_glue2_ldap_showactivities GridftpdEnabled GridftpdAllowNew Distributed enable_arc_interface enable_emies_interface enable_perflog_reporting) ]; ############################ Generic functions ########################### # walks a tree of hashes and arrays while applying a function to each hash. sub hash_tree_apply { my ($ref, $func) = @_; if (not ref($ref)) { return; } elsif (ref($ref) eq 'ARRAY') { map {hash_tree_apply($_,$func)} @$ref; return; } elsif (ref($ref) eq 'HASH') { &$func($ref); map {hash_tree_apply($_,$func)} values %$ref; return; } else { return; } } # Strips namespace prefixes from the keys of the hash passed by reference sub hash_strip_prefixes { my ($h) = @_; my %t; while (my ($k,$v) = each %$h) { next if $k =~ m/^xmlns/; $k =~ s/^\w+://; $t{$k} = $v; } %$h=%t; return; } # Verifies that a key is an HASH reference and returns that reference sub hash_get_hashref { my ($h, $key) = @_; my $r = ($h->{$key} ||= {}); $log->fatal("badly formed '$key' element in XML config") unless ref $r eq 'HASH'; return $r; } # Verifies that a key is an ARRAY reference and returns that reference sub hash_get_arrayref { my ($h, $key) = @_; my $r = ($h->{$key} ||= []); $log->fatal("badly formed '$key' element in XML config") unless ref $r eq 'ARRAY'; return $r; } # Set selected keys to either 'true' or 'false' sub fixbools { my ($h,$bools) = @_; for my $key (@$bools) { next unless exists $h->{$key}; my $val = $h->{$key}; if ($val eq '0' or lc $val eq 'false' or lc $val eq 'no' or lc $val eq 'disable') { $h->{$key} = '0'; } elsif ($val eq '1' or lc $val eq 'true' or lc $val eq 'yes' or lc $val eq 'enable' or lc $val eq 'expert-debug-on') { $h->{$key} = '1'; } else { $log->error("Invalid value for $key"); } } return $h; } sub move_keys { my ($h, $k, $names) = @_; for my $key (@$names) { next unless exists $h->{$key}; $k->{$key} = $h->{$key}; delete $h->{$key}; } } sub rename_keys { my ($h, $k, $names) = @_; for my $key (keys %$names) { next unless exists $h->{$key}; my $newkey = $names->{$key}; $k->{$newkey} = $h->{$key}; delete $h->{$key}; } } ############################ ARC xml config ############################## sub read_arex_config { my ($file) = @_; my %xmlopts = (NSexpand => 0, ForceArray => 1, KeepRoot => 1, KeyAttr => {}); my $xml = XML::Simple->new(%xmlopts); my $data; eval { $data = $xml->XMLin($file) }; $log->error("Failed to parse XML file $file: $@") if $@; hash_tree_apply $data, \&hash_strip_prefixes; my $services; $services = $data->{Service} if ref $data eq 'HASH' and ref $data->{Service} eq 'ARRAY'; $services = $data->{ArcConfig}[0]{Chain}[0]{Service} if not $services and ref $data eq 'HASH' and ref $data->{ArcConfig} eq 'ARRAY' and ref $data->{ArcConfig}[0] eq 'HASH' and ref $data->{ArcConfig}[0]{Chain} eq 'ARRAY' and ref $data->{ArcConfig}[0]{Chain}[0] eq 'HASH' and ref $data->{ArcConfig}[0]{Chain}[0]{Service} eq 'ARRAY'; return undef unless $services; for my $srv (@$services) { next unless ref $srv eq 'HASH'; return $srv if $srv->{name} eq 'a-rex'; } return undef; } # # Reads the XML config file passed as the first argument and produces a config # hash conforming to $config_schema. # sub build_config_from_xmlfile { my ($file, $arc_location) = @_; my $arex = read_arex_config($file); $log->fatal("A-REX config not found in $file") unless ref $arex eq 'HASH'; # The structure that will hold all config options my $config = {}; $config->{control} = {}; $config->{service} = {}; $config->{location} = {}; $config->{contacts} = []; $config->{accesspolicies} = []; $config->{mappingpolicies} = []; $config->{xenvs} = {}; $config->{shares} = {}; # Special treatment for xml elements that have empty content. XMLSimple # converts these into an empty hash. Since ForceArray => 1 was used, these # hashes are placed inside an array. Replace these empty hashes with the # empty string. Do not touch keys that normally contain deep structures. my @deepstruct = qw(control dataTransfer Globus cache location remotelocation loadLimits LRMS InfoProvider Location Contact ExecutionEnvironment ComputingShare NodeSelection AccessPolicy MappingPolicy); hash_tree_apply $arex, sub { my $h = shift; while (my ($k,$v) = each %$h) { next unless ref($v) eq 'ARRAY'; next if grep {$k eq $_} @deepstruct; my $nv = [ map {(ref $_ eq 'HASH' && ! scalar %$_) ? '' : $_ } @$v ]; $h->{$k} = $nv; } }; # Collapse unnnecessary arrays created by XMLSimple. All hash keys are # array valued now due to using ForceArray => 1. For keys that corresound # to options which are not multivalued, the arrays should contain only one # element. Replace these arrays with the value of the last element. Keys # corresponding to multivalued options are left untouched. my @multival = qw(cache location remotelocation control sessionRootDir remotegmdirs OpSys Middleware LocalSE ClusterOwner Benchmark OtherInfo StatusInfo Regex Command Tag ExecutionEnvironmentName AuthorizedVO Contact ExecutionEnvironment ComputingShare InteractiveContactstring AccessPolicy MappingPolicy ShareName Rule UserDomainID); hash_tree_apply $arex, sub { my $h = shift; while (my ($k,$v) = each %$h) { next unless ref($v) eq 'ARRAY'; next if grep {$k eq $_} @multival; $v = pop @$v; $h->{$k} = $v; } }; move_keys $arex, $config, ['endpoint', 'debugLevel']; $config->{ttl} = 2 * $arex->{InfoproviderWakeupPeriod} if $arex->{InfoproviderWakeupPeriod}; my $usermap = hash_get_hashref($arex, 'usermap'); my $username = $usermap->{'defaultLocalName'}; $config->{defaultLocalName} = $username if $username; my $gmconfig = $arex->{gmconfig}; if ($gmconfig) { if (not ref $gmconfig) { $config->{gmconfig} = $gmconfig; } elsif (ref $gmconfig eq 'HASH') { $config->{gmconfig} = $gmconfig->{content} if $gmconfig->{content} and $gmconfig->{type} and $gmconfig->{type} eq 'INI'; } } my $controls = hash_get_arrayref($arex, 'control'); for my $control (@$controls) { $log->fatal("badly formed 'control' element in XML config") unless ref $control eq 'HASH'; my $user = $control->{username} || '.'; my $cconf = {}; my $controldir = $control->{controlDir}; $cconf->{controldir} = $controldir if $controldir; my $sessiondirs = $control->{sessionRootDir}; # an array $cconf->{sessiondir} = $sessiondirs if $sessiondirs; my $ttl = $control->{defaultTTL} || ''; my $ttr = $control->{defaultTTR} || ''; $cconf->{defaultttl} = "$ttl $ttr" if $ttl; my $caches = hash_get_arrayref($control, 'cache'); for my $cache (@$caches) { $log->fatal("badly formed 'cache' element in XML config") unless ref $cache eq 'HASH'; my $locations = hash_get_arrayref($cache, 'location'); for my $location (@$locations) { $log->fatal("badly formed 'location' element in XML config") unless ref $location eq 'HASH'; next unless $location->{path}; push @{$cconf->{cachedir}}, $location->{path}; } my $rlocations = hash_get_arrayref($cache, 'remotelocation'); for my $location (@$rlocations) { $log->fatal("badly formed 'location' element in XML config") unless ref $location eq 'HASH'; next unless $location->{path}; push @{$cconf->{remotecachedir}}, $location->{path}; } my $low = $cache->{lowWatermark} || ''; my $high = $cache->{highWatermark} || ''; $cconf->{cachesize} = "$low $high" if $low; } $config->{control}{$user} = $cconf; } my $globus = hash_get_hashref(hash_get_hashref($arex, 'dataTransfer'), 'Globus'); # these are obsolete now -- kept only for backwards compatibility rename_keys $globus, $config, {certpath => 'x509_user_cert', keypath => 'x509_user_key', cadir => 'x509_cert_dir'}; rename_keys $globus, $config, {CertificatePath => 'x509_user_cert', KeyPath => 'x509_user_key', CACertificatesDir => 'x509_cert_dir', gridmapfile => 'gridmap'}; my $load = hash_get_hashref($arex, 'loadLimits'); my $mj = $load->{maxJobsTracked} || '-1'; my $mjr = $load->{maxJobsRun} || '-1'; # maxJobsTransfered, maxJobsTransferedAddtional and maxFilesTransfered are parsed for backward compatibility. my $mjt = $load->{maxJobsTransferred} || $load->{maxJobsTransfered} || '-1'; my $mjta = $load->{maxJobsTransferredAdditional} || $load->{maxJobsTransferedAdditional} || '-1'; my $mft = $load->{maxFilesTransferred} || $load->{maxFilesTransfered} || '-1'; $config->{maxjobs} = "$mj $mjr"; $config->{maxload} = "$mjt $mjta $mft"; if ($load->{maxLoadShare} and $load->{loadShareType}) { $config->{maxloadshare} = $load->{maxLoadShare}." ".$load->{loadShareType}; } $config->{wakeupperiod} = $load->{wakeupPeriod} if defined $load->{wakeupPeriod}; my $lrms = hash_get_hashref($arex, 'LRMS'); $config->{lrms} = $lrms->{type} if $lrms->{type}; $config->{lrms} .= " ".$lrms->{defaultShare} if $lrms->{defaultShare}; move_keys $lrms, $config, [keys %$lrms_options, keys %$lrms_share_options]; rename_keys $lrms, $config, {runtimeDir => 'runtimedir', scratchDir => 'scratchdir', sharedScratch => 'shared_scratch', sharedFilesystem => 'shared_filesystem', GNUTimeUtility => 'gnu_time'}; my $ipcfg = hash_get_hashref($arex, 'InfoProvider'); rename_keys $ipcfg, $ipcfg, {Name => 'ClusterName'}; move_keys $ipcfg, $config->{service}, [keys %{$config_schema->{service}}]; move_keys $ipcfg, $config, ['debugLevel', 'ProviderLog', 'PublishNordugrid', 'AdminDomain']; move_keys $ipcfg, $config, [keys %$gridftpd_options]; rename_keys $ipcfg, $config, {Location => 'location', Contact => 'contacts'}; rename_keys $ipcfg, $config, {AccessPolicy => 'accesspolicies', MappingPolicy => 'mappingpolicies'}; my $xenvs = hash_get_arrayref($ipcfg, 'ExecutionEnvironment'); for my $xe (@$xenvs) { $log->fatal("badly formed 'ExecutionEnvironment' element in XML config") unless ref $xe eq 'HASH'; my $name = $xe->{name}; $log->fatal("ExecutionEnvironment without name attribute") unless $name; my $xeconf = $config->{xenvs}{$name} = {}; $xeconf->{NodeSelection} = hash_get_hashref($xe, 'NodeSelection'); move_keys $xe, $xeconf, [keys %{$config_schema->{xenvs}{'*'}}]; } my $shares = hash_get_arrayref($ipcfg, 'ComputingShare'); for my $s (@{$shares}) { $log->fatal("badly formed 'ComputingShare' element in XML config") unless ref $s eq 'HASH'; my $name = $s->{name}; $log->error("ComputingShare without name attribute") unless $name; my $sconf = $config->{shares}{$name} = {}; move_keys $s, $sconf, [keys %{$config_schema->{shares}{'*'}}]; } hash_tree_apply $config, sub { fixbools shift, $allbools }; _substitute($config, $arc_location); #print(Dumper $config); return $config; } sub _substitute { my ($config, $arc_location) = @_; my $control = $config->{control}; my ($lrms, $defqueue) = split " ", $config->{lrms} || ''; die 'Gridmap user list feature is not supported anymore. Please use @filename to specify user list.' if exists $control->{'*'}; # expand user sections whose user name is like @filename my @users = keys %$control; for my $user (@users) { next unless $user =~ m/^\@(.*)$/; my $path = $1; my $fh; # read in user names from file if (open ($fh, "< $path")) { while (my $line = <$fh>) { chomp (my $newuser = $line); next if exists $control->{$newuser}; # Duplicate user!!!! $control->{$newuser} = { %{$control->{$user}} }; # shallow copy } close $fh; delete $control->{$user}; } else { die "Failed opening file to read user list from: $path: $!"; } } # substitute per-user options @users = keys %$control; for my $user (@users) { my @pw; my $home; if ($user ne '.') { @pw = getpwnam($user); $log->warning("getpwnam failed for user: $user: $!") unless @pw; $home = $pw[7] if @pw; } else { $home = "/tmp"; } my $opts = $control->{$user}; # Default for controldir, sessiondir if ($opts->{controldir} eq '*') { $opts->{controldir} = $pw[7]."/.jobstatus" if @pw; } $opts->{sessiondir} ||= ['*']; $opts->{sessiondir} = [ map { $_ ne '*' ? $_ : "$home/.jobs" } @{$opts->{sessiondir}} ]; my $controldir = $opts->{controldir}; my @sessiondirs = @{$opts->{sessiondir}}; my $subst_opt = sub { my ($val) = @_; # %R - session root $val =~ s/%R/$sessiondirs[0]/g if $val =~ m/%R/; # %C - control dir $val =~ s/%C/$controldir/g if $val =~ m/%C/; if (@pw) { # %U - username $val =~ s/%U/$user/g if $val =~ m/%U/; # %u - userid # %g - groupid # %H - home dir $val =~ s/%u/$pw[2]/g if $val =~ m/%u/; $val =~ s/%g/$pw[3]/g if $val =~ m/%g/; $val =~ s/%H/$home/g if $val =~ m/%H/; } # %L - default lrms # %Q - default queue $val =~ s/%L/$lrms/g if $val =~ m/%L/; $val =~ s/%Q/$defqueue/g if $val =~ m/%Q/; # %W - installation path $val =~ s/%W/$arc_location/g if $val =~ m/%W/; # %G - globus path my $G = $ENV{GLOBUS_LOCATION} || '/usr'; $val =~ s/%G/$G/g if $val =~ m/%G/; return $val; }; if ($opts->{controldir}) { $opts->{controldir} = &$subst_opt($opts->{controldir}); } if ($opts->{sessiondir}) { $opts->{sessiondir} = [ map {&$subst_opt($_)} @{$opts->{sessiondir}} ]; } if ($opts->{cachedir}) { $opts->{cachedir} = [ map {&$subst_opt($_)} @{$opts->{cachedir}} ]; } if ($opts->{remotecachedir}) { $opts->{remotecachedir} = [ map {&$subst_opt($_)} @{$opts->{remotecachedir}} ]; } } # authplugin, localcred, helper: not substituted return $config; } # # Reads the INI config file passed as the first argument and produces a config # hash conforming to $config_schema. An already existing config hash can be # passed as a second, optional, argument in which case opptions read from the # INI file will be merged into the hash overriding options already present. # sub build_config_from_inifile { my ($inifile, $config) = @_; my $iniparser = SubstitutingIniParser->new($inifile); if (not $iniparser) { $log->error("Failed parsing config file: $inifile\n"); return $config; } $log->error("Not a valid INI configuration file: $inifile") unless $iniparser->list_sections(); # Will add to an already existing config. $config ||= {}; $config->{service} ||= {}; $config->{control} ||= {}; $config->{location} ||= {}; $config->{contacts} ||= []; $config->{accesspolicies} ||= []; $config->{mappingpolicies} ||= []; $config->{xenvs} ||= {}; $config->{shares} ||= {}; $config->{admindomain} ||= {}; my $common = { $iniparser->get_section("common") }; my $gm = { $iniparser->get_section("grid-manager") }; rename_keys $common, $config, {providerlog => 'ProviderLog'}; move_keys $common, $config, [keys %$gmcommon_options]; move_keys $common, $config, [keys %$lrms_options, keys %$lrms_share_options]; move_keys $gm, $config, [keys %$gmcommon_options]; rename_keys $gm, $config, {arex_mount_point => 'endpoint'}; $config->{debugLevel} = $common->{debug} if $common->{debug}; move_keys $common, $config, [keys %$ldap_infosys_options]; my $infosys = { $iniparser->get_section("infosys") }; rename_keys $infosys, $config, {providerlog => 'ProviderLog', provider_loglevel => 'debugLevel', port => 'SlapdPort'}; move_keys $infosys, $config, [keys %$ldap_infosys_options]; my @cnames = $iniparser->list_subsections('grid-manager'); for my $name (@cnames) { my $section = { $iniparser->get_section("grid-manager/$name") }; $config->{control}{$name} ||= {}; move_keys $section, $config->{control}{$name}, [keys %$gmuser_options]; } # Cherry-pick some gridftp options if ($iniparser->has_section('gridftpd/jobs')) { my %gconf = $iniparser->get_section('gridftpd'); my %gjconf = $iniparser->get_section('gridftpd/jobs'); $config->{GridftpdEnabled} = 'yes'; $config->{GridftpdPort} = $gconf{port} if $gconf{port}; $config->{GridftpdMountPoint} = $gjconf{path} if $gjconf{path}; $config->{GridftpdAllowNew} = $gjconf{allownew} if defined $gjconf{allownew}; $config->{remotegmdirs} = $gjconf{remotegmdirs} if defined $gjconf{remotegmdirs}; $config->{GridftpdPidFile} = $gconf{pidfile} if defined $gconf{pidfile}; } else { $config->{GridftpdEnabled} = 'no'; } # global AdminDomain configuration if ($iniparser->has_section('infosys/admindomain')) { my $admindomain_options = { $iniparser->get_section('infosys/admindomain') }; rename_keys $admindomain_options, $config->{'admindomain'}, {name => 'Name', otherinfo => 'OtherInfo', description => 'Description', www => 'WWW', distributed => 'Distributed', owner => 'Owner' }; move_keys $admindomain_options, $config->{'admindomain'}, [keys %$admindomain_options]; } else { $log->info('[infosys/admindomain] section missing. No site information will be published.'); } ############################ legacy ini config file structure ############################# move_keys $common, $config, ['AdminDomain']; my $cluster = { $iniparser->get_section('cluster') }; if (%$cluster) { # Ignored: cluster_location, lrmsconfig rename_keys $cluster, $config, {arex_mount_point => 'endpoint'}; rename_keys $cluster, $config->{location}, { cluster_location => 'PostCode' }; rename_keys $cluster, $config->{service}, { interactive_contactstring => 'InteractiveContactstring', cluster_owner => 'ClusterOwner', localse => 'LocalSE', authorizedvo => 'AuthorizedVO', homogeneity => 'Homogeneous', architecture => 'Platform', opsys => 'OpSys', benchmark => 'Benchmark', nodememory => 'MaxVirtualMemory', middleware => 'Middleware', cluster_alias => 'ClusterAlias', comment => 'ClusterComment'}; if ($cluster->{clustersupport} and $cluster->{clustersupport} =~ /(.*)@/) { my $contact = {}; push @{$config->{contacts}}, $contact; $contact->{Name} = $1; $contact->{Detail} = "mailto:".$cluster->{clustersupport}; $contact->{Type} = 'usersupport'; } if (defined $cluster->{nodeaccess}) { $config->{service}{ConnectivityIn} = 0; $config->{service}{ConnectivityOut} = 0; for (split '\[separator\]', $cluster->{nodeaccess}) { $config->{service}{ConnectivityIn} = 1 if lc $_ eq 'inbound'; $config->{service}{ConnectivityOut} = 1 if lc $_ eq 'outbound'; } } move_keys $cluster, $config->{service}, [keys %$share_options, keys %$xenv_options]; move_keys $cluster, $config, [keys %$lrms_options, keys %$lrms_share_options]; } my @qnames = $iniparser->list_subsections('queue'); for my $name (@qnames) { my $queue = { $iniparser->get_section("queue/$name") }; my $sconf = $config->{shares}{$name} ||= {}; my $xeconf = $config->{xenvs}{$name} ||= {}; push @{$sconf->{ExecutionEnvironmentName}}, $name; $log->error("MappingQuue option only allowed under ComputingShare section") if $queue->{MappingQuue}; delete $queue->{MappingQueue}; $log->error("ExecutionEnvironmentName option only allowed under ComputingShare section") if $queue->{ExecutionEnvironmentName}; delete $queue->{ExecutionEnvironmentName}; $log->error("NodeSelection option only allowed under ExecutionEnvironment section") if $queue->{NodeSelection}; delete $queue->{NodeSelection}; rename_keys $queue, $sconf, {scheduling_policy => 'SchedulingPolicy', nodememory => 'MaxVirtualMemory', comment => 'Description', maxslotsperjob => 'MaxSlotsPerJob'}; move_keys $queue, $sconf, [keys %$share_options, keys %$lrms_share_options]; rename_keys $queue, $xeconf, {homogeneity => 'Homogeneous', architecture => 'Platform', opsys => 'OpSys', benchmark => 'Benchmark'}; move_keys $queue, $xeconf, [keys %$xenv_options]; $xeconf->{NodeSelection} = {}; } ################################# new ini config file structure ############################## my $provider = { $iniparser->get_section("InfoProvider") }; move_keys $provider, $config, ['debugLevel', 'ProviderLog', 'PublishNordugrid', 'AdminDomain']; move_keys $provider, $config->{service}, [keys %{$config_schema->{service}}]; my @gnames = $iniparser->list_subsections('ExecutionEnvironment'); for my $name (@gnames) { my $xeconf = $config->{xenvs}{$name} ||= {}; my $section = { $iniparser->get_section("ExecutionEnvironment/$name") }; $xeconf->{NodeSelection} ||= {}; $xeconf->{NodeSelection}{Regex} = $section->{NodeSelectionRegex} if $section->{NodeSelectionRegex}; $xeconf->{NodeSelection}{Command} = $section->{NodeSelectionCommand} if $section->{NodeSelectionCommand}; $xeconf->{NodeSelection}{Tag} = $section->{NodeSelectionTag} if $section->{NodeSelectionTag}; move_keys $section, $xeconf, [keys %$xenv_options, 'OtherInfo']; } my @snames = $iniparser->list_subsections('ComputingShare'); for my $name (@snames) { my $sconf = $config->{shares}{$name} ||= {}; my $section = { $iniparser->get_section("ComputingShare/$name") }; move_keys $section, $sconf, [keys %{$config_schema->{shares}{'*'}}]; } my $location = { $iniparser->get_section("Location") }; $config->{location} = $location if %$location; my @ctnames = $iniparser->list_subsections('Contact'); for my $name (@ctnames) { my $section = { $iniparser->get_section("Contact/$name") }; push @{$config->{contacts}}, $section; } # Create a list with all multi-valued options based on $config_schema. my @multival = (); hash_tree_apply $config_schema, sub { my $h = shift; for (keys %$h) { next if ref $h->{$_} ne 'ARRAY'; next if ref $h->{$_}[0]; # exclude deep structures push @multival, $_; } }; # Transform multi-valued options into arrays hash_tree_apply $config, sub { my $h = shift; while (my ($k,$v) = each %$h) { next if ref $v; # skip anything other than scalars $h->{$k} = [split '\[separator\]', $v]; unless (grep {$k eq $_} @multival) { $h->{$k} = pop @{$h->{$k}}; # single valued options, remember last defined value only } } }; hash_tree_apply $config, sub { fixbools shift, $allbools }; #print (Dumper $config); return $config; } # # Check whether a file is XML # sub isXML { my $file = shift; $log->fatal("Can't open $file") unless open (CONFIGFILE, "<$file"); my $isxml = 0; while (my $line = ) { chomp $line; next unless $line; if ($line =~ m/^\s*<\?xml/) {$isxml = 1; last}; if ($line =~ m/^\s* 2008-12-12T15:19:03Z ) sub mdstoiso { return "$1-$2-$3T$4:$5:$6Z" if shift =~ /^(\d\d\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d(?:\.\d+)?)Z$/; return undef; } sub glue2bool { my $bool = shift; return 'undefined' unless defined $bool; return $bool ? "true" : "false"; } # TODO: Stage-in and Stage-out are substates of what? sub bes_state { my ($gm_state,$lrms_state) = @_; my $is_pending = 0; if ($gm_state =~ /^PENDING:/) { $is_pending = 1; $gm_state = substr $gm_state, 7 } if ($gm_state eq "ACCEPTED") { return [ "Pending", "Accepted" ]; } elsif ($gm_state eq "PREPARING") { return [ "Running", "Stage-in" ]; } elsif ($gm_state eq "SUBMIT") { return [ "Running", "Submitting" ]; } elsif ($gm_state eq "INLRMS") { if (not defined $lrms_state) { return [ "Running" ]; } elsif ($lrms_state eq 'Q') { return [ "Running", "Queuing" ]; } elsif ($lrms_state eq 'R') { return [ "Running", "Executing" ]; } elsif ($lrms_state eq 'EXECUTED' or $lrms_state eq '') { return [ "Running", "Executed" ]; } elsif ($lrms_state eq 'S') { return [ "Running", "Suspended" ]; } else { return [ "Running", "LRMSOther" ]; } } elsif ($gm_state eq "FINISHING") { return [ "Running", "Stage-out" ]; } elsif ($gm_state eq "CANCELING") { return [ "Running", "Cancelling" ]; } elsif ($gm_state eq "KILLED") { return [ "Cancelled" ]; } elsif ($gm_state eq "FAILED") { return [ "Failed" ]; } elsif ($gm_state eq "FINISHED") { return [ "Finished" ]; } elsif ($gm_state eq "DELETED") { # Cannot map to BES state return [ ]; } else { return [ ]; } } # TODO: understand emies state mapping. # this sub evaluates also failure states and changes # emies attributes accordingly. sub emies_state { # TODO: probably add $failure_state taken from somewhere my ($gm_state,$lrms_state,$failure_state) = @_; my $es_state = { 'State' => '', 'Attributes' => '' }; my $is_pending = 0; if ($gm_state =~ /^PENDING:/) { $is_pending = 1; $gm_state = substr $gm_state, 8 } if ($gm_state eq "ACCEPTED") { $es_state->{State} = [ "accepted" ]; $es_state->{Attributes} = [ "client-stagein-possible" ]; } elsif ($gm_state eq "PREPARING") { $es_state->{State} = [ "preprocessing" ]; $es_state->{Attributes} = [ "client-stagein-possible", "server-stagein" ]; } elsif ($gm_state eq "SUBMIT") { $es_state->{State} = [ "processing-accepting" ]; } elsif ($gm_state eq "INLRMS") { # TODO: check hot to map these! if (not defined $lrms_state) { $es_state->{State} = [ "processing-running" ]; $es_state->{Attributes} = [ "app-running" ]; } elsif ($lrms_state eq 'Q') { $es_state->{State} = [ "processing-queued" ]; $es_state->{Attributes} = ''; } elsif ($lrms_state eq 'R') { $es_state->{State} = [ "processing-running" ]; $es_state->{Attributes} = [ "app-running" ]; } elsif ($lrms_state eq 'EXECUTED' or $lrms_state eq '') { $es_state->{State} = [ "processing-running" ]; $es_state->{Attributes} = ''; } elsif ($lrms_state eq 'S') { $es_state->{State} = [ "processing-running" ]; $es_state->{Attributes} = [ "batch-suspend" ]; } else { $es_state->{State} = [ "processing-running" ]; $es_state->{Attributes} = ''; } } elsif ($gm_state eq "FINISHING") { $es_state->{State} = [ "postprocessing" ]; $es_state->{Attributes} = [ "client-stageout-possible", "server-stageout" ]; } elsif ($gm_state eq "CANCELING") { $es_state->{State} = [ "processing" ]; $es_state->{Attributes} = [ "processing-cancel" ]; } elsif ($gm_state eq "KILLED") { $es_state->{State} = [ "terminal" ]; if (! defined($failure_state)) { $log->warning('EMIES Failure state attribute cannot be determined.'); } else { if ($failure_state eq "ACCEPTED") { $es_state->{Attributes} = ["validation-failure"]; } elsif ($failure_state eq "PREPARING") { $es_state->{Attributes} = ["preprocessing-cancel"]; } elsif ($failure_state eq "SUBMIT") { $es_state->{Attributes} = ["processing-cancel"]; } elsif ($failure_state eq "INLRMS") { $es_state->{Attributes} = ["processing-cancel"]; } elsif ($failure_state eq "FINISHING") { $es_state->{Attributes} = ["postprocessing-cancel"]; } elsif ($failure_state eq "FINISHED") { # TODO: $es_state->{Attributes} = ''; } elsif ($failure_state eq "DELETED") { # TODO: $es_state->{Attributes} = ''; } elsif ($failure_state eq "CANCELING") { # TODO: $es_state->{Attributes} = ''; } else { # Nothing } } } elsif ($gm_state eq "FAILED") { $es_state->{State} = [ "terminal" ]; # introduced for bug #3036 if (! defined($failure_state)) { $log->warning('EMIES Failure state attribute cannot be determined.'); } else { if ($failure_state eq "ACCEPTED") { $es_state->{Attributes} = ["validation-failure"]; } elsif ($failure_state eq "PREPARING") { $es_state->{Attributes} = ["preprocessing-failure"]; } elsif ($failure_state eq "SUBMIT") { $es_state->{Attributes} = ["processing-failure"]; } elsif ($failure_state eq "INLRMS") { if ( $lrms_state eq "R" ) { $es_state->{Attributes} = ["processing-failure","app-failure"]; } else { $es_state->{Attributes} = ["processing-failure"]; } } elsif ($failure_state eq "FINISHING") { $es_state->{Attributes} = ["postprocessing-failure"]; } elsif ($failure_state eq "FINISHED") { # TODO: $es_state->{Attributes} = ''; } elsif ($failure_state eq "DELETED") { # TODO: $es_state->{Attributes} = ''; } elsif ($failure_state eq "CANCELING") { # TODO: $es_state->{Attributes} = ''; } else { # Nothing } } } elsif ($gm_state eq "FINISHED") { $es_state->{State} = [ "terminal" ]; $es_state->{Attributes} = [ "client-stageout-possible" ]; } elsif ($gm_state eq "DELETED") { $es_state->{State} = [ "terminal" ]; $es_state->{Attributes} = [ "expired" ]; } elsif ($gm_state) { # this is the "pending" case $es_state->{Attributes} = ["server-paused"]; } else { # No idea } if ( $is_pending ) { push @{$es_state->{Attributes}}, "server-paused"; } return $es_state; } # input is an array with (state, lrms_state, failure_state) sub glueState { my @ng_status = @_; return [ "UNDEFINEDVALUE" ] unless $ng_status[0]; my $status = [ "nordugrid:".join(':',@ng_status) ]; my $bes_state = bes_state(@ng_status); push @$status, "bes:".$bes_state->[0] if @$bes_state; my $emies_state = emies_state(@ng_status); push @$status, "emies:".@{$emies_state->{State}}[0] if $emies_state->{State}; if ($emies_state->{Attributes}) { foreach my $attribs (@{$emies_state->{Attributes}}) { push @$status, "emiesattr:".$attribs; } } return $status; } sub getGMStatus { my ($controldir, $ID) = @_; foreach my $gmjob_status ("$controldir/accepting/job.$ID.status", "$controldir/processing/job.$ID.status", "$controldir/finished/job.$ID.status") { unless (open (GMJOB_STATUS, "<$gmjob_status")) { next; } else { my ($first_line) = ; close GMJOB_STATUS; unless ($first_line) { $log->warning("Job $ID: cannot get status from file $gmjob_status : Skipping job"); next; } chomp $first_line; return $first_line; } } return undef; } # Helper function that assists the GLUE2 XML renderer handle the 'splitjobs' option # $config - the config hash # $jobid - job id from GM # $gmjob - a job hash ref as returned by GMJobsInfo # $xmlGenerator - a function ref that returns a string (the job's GLUE2 XML description) # Returns undef on error, 0 if the XML file was already up to date, 1 if it was written sub jobXmlFileWriter { my ($config, $jobid, $gmjob, $xmlGenerator) = @_; # If this is defined, then it's a job managed by local A-REX. my $gmuser = $gmjob->{gmuser}; # Skip for now jobs managed by remote A-REX. # These are still published in ldap-infosys. As long as # distributing jobs to remote grid-managers is only # implemented by gridftd, remote jobs are not of interest # for the WS interface. return 0 unless defined $gmuser; my $controldir = $config->{control}{$gmuser}{controldir}; my $xml_file = $controldir . "/job." . $jobid . ".xml"; # Here goes simple optimisation - do not write new # XML if status has not changed while in "slow" states my $xml_time = (stat($xml_file))[9]; my $status_time = $gmjob->{statusmodified}; return 0 if defined $xml_time and defined $status_time and $status_time < $xml_time and $gmjob->{status} =~ /ACCEPTED|FINISHED|FAILED|KILLED|DELETED/; my $xmlstring = &$xmlGenerator(); return undef unless defined $xmlstring; # tempfile croaks on error my ($fh, $tmpnam) = File::Temp::tempfile("job.$jobid.xml.XXXXXXX", DIR => $controldir); binmode $fh, ':encoding(utf8)'; print $fh $xmlstring and close $fh or $log->warning("Error writing to temporary file $tmpnam: $!") and close $fh and unlink $tmpnam and return undef; rename $tmpnam, $xml_file or $log->warning("Error moving $tmpnam to $xml_file: $!") and unlink $tmpnam and return undef; # Avoid .xml files created after job is deleted # Check if status file exists if(not defined getGMStatus($controldir,$jobid)) { unlink $xml_file; return undef; } # Set timestamp to the time when the status file was read in. # This is because the status file might have been updated by the time the # XML file gets written. This step ensures that the XML will be updated on # the next run of the infoprovider. my $status_read = $gmjob->{statusread}; return undef unless defined $status_read; utime(time(), $status_read, $xml_file) or $log->warning("Couldn't touch $xml_file: $!") and return undef; # *.xml file was updated return 1; }; # Intersection of two arrays that completes in linear time. The input arrays # are the keys of the two hashes passed as reference. The intersection array # consists of the keys of the returned hash reference. sub intersection { my ($a, $b) = @_; my (%union, %xor, %isec); for (keys %$a) { $union{$_} = 1; $xor{$_} = 1 if exists $b->{$_} } for (keys %$b) { $union{$_} = 1; $xor{$_} = 1 if exists $a->{$_} } for (keys %union) { $isec{$_} = 1 if exists $xor{$_} } return \%isec; } # union of two arrays using hashes. Returns an array. sub union { my (@a, @b) = @_; my %union; foreach (@a) {$union{$_} = 1;} foreach (@b) {$union{$_} = 1;} return keys %union; } # processes NodeSelection options and returns the matching nodes. sub selectnodes { my ($nodes, %nscfg) = @_; return undef unless %$nodes and %nscfg; my @allnodes = keys %$nodes; my %selected = (); if ($nscfg{Regex}) { for my $re (@{$nscfg{Regex}}) { map { $selected{$_} = 1 if /$re/ } @allnodes; } } if ($nscfg{Tag}) { for my $tag (@{$nscfg{Tag}}) { for my $node (@allnodes) { my $tags = $nodes->{$node}{tags}; next unless $tags; map { $selected{$node} = 1 if $tag eq $_ } @$tags; } } } if ($nscfg{Command}) { $log->warning("Not implemented: NodeSelection: Command"); } delete $nscfg{Regex}; delete $nscfg{Tag}; delete $nscfg{Command}; $log->warning("Unknown NodeSelection option: @{[keys %nscfg]}") if %nscfg; $selected{$_} = $nodes->{$_} for keys %selected; return \%selected; } # Sums up ExecutionEnvironments attributes from the LRMS plugin sub xestats { my ($xenv, $nodes) = @_; return undef unless %$nodes; my %continuous = (vmem => 'VirtualMemorySize', pmem => 'MainMemorySize'); my %discrete = (lcpus => 'LogicalCPUs', pcpus => 'PhysicalCPUs', sysname => 'OSFamily', machine => 'Platform'); my (%minval, %maxval); my (%minnod, %maxnod); my %distrib; my %stats = (total => 0, free => 0, available => 0); for my $host (keys %$nodes) { my %node = %{$nodes->{$host}}; $stats{total}++; $stats{free}++ if $node{isfree}; $stats{available}++ if $node{isavailable}; # Also agregate values across nodes, check consistency for my $prop (%discrete) { my $val = $node{$prop}; next unless defined $val; push @{$distrib{$prop}{$val}}, $host; } for my $prop (keys %continuous) { my $val = $node{$prop}; next unless defined $val; if (not defined $minval{$prop} or (defined $minval{$prop} and $minval{$prop} > $val)) { $minval{$prop} = $val; $minnod{$prop} = $host; } if (not defined $maxval{$prop} or (defined $maxval{$prop} and $maxval{$prop} < $val)) { $maxval{$prop} = $val; $maxnod{$prop} = $host; } } } my $homogeneous = 1; while (my ($prop, $opt) = each %discrete) { my $values = $distrib{$prop}; next unless $values; if (scalar keys %$values > 1) { my $msg = "ExecutionEnvironment $xenv is inhomogeneous regarding $opt:"; while (my ($val, $hosts) = each %$values) { my $first = pop @$hosts; my $remaining = @$hosts; $val = defined $val ? $val : 'undef'; $msg .= " $val($first"; $msg .= "+$remaining more" if $remaining; $msg .= ")"; } $log->info($msg); $homogeneous = 0; } else { my ($val) = keys %$values; $stats{$prop} = $val; } } if ($maxval{pmem}) { my $rdev = 2 * ($maxval{pmem} - $minval{pmem}) / ($maxval{pmem} + $minval{pmem}); if ($rdev > 0.1) { my $msg = "ExecutionEnvironment $xenv has variability larger than 10% regarding MainMemorySize:"; $msg .= " Min=$minval{pmem}($minnod{pmem}),"; $msg .= " Max=$maxval{pmem}($maxnod{pmem})"; $log->info($msg); $homogeneous = 0; } $stats{pmem} = $minval{pmem}; } if ($maxval{vmem}) { my $rdev = 2 * ($maxval{vmem} - $minval{vmem}) / ($maxval{vmem} + $minval{vmem}); if ($rdev > 0.5) { my $msg = "ExecutionEnvironment $xenv has variability larger than 50% regarding VirtualMemorySize:"; $msg .= " Min=$minval{vmem}($minnod{vmem}),"; $msg .= " Max=$maxval{vmem}($maxnod{vmem})"; $log->debug($msg); } $stats{vmem} = $minval{vmem}; } $stats{homogeneous} = $homogeneous; return \%stats; } # Combine info about ExecutionEnvironments from config options and the LRMS plugin sub xeinfos { my ($config, $nodes) = @_; my $infos = {}; my %nodemap = (); my @xenvs = keys %{$config->{xenvs}}; for my $xenv (@xenvs) { my $xecfg = $config->{xenvs}{$xenv}; my $info = $infos->{$xenv} = {}; my $nscfg = $xecfg->{NodeSelection}; if (ref $nodes eq 'HASH') { my $selected; if (not $nscfg) { $log->info("NodeSelection configuration missing for ExecutionEnvironment $xenv, implicitly assigning all nodes into it") unless keys %$nodes == 1 and @xenvs == 1; $selected = $nodes; } else { $selected = selectnodes($nodes, %$nscfg); } $nodemap{$xenv} = $selected; $log->debug("Nodes in ExecutionEnvironment $xenv: ".join ' ', keys %$selected); $log->info("No nodes matching NodeSelection for ExecutionEnvironment $xenv") unless %$selected; my $stats = xestats($xenv, $selected); if ($stats) { $info->{ntotal} = $stats->{total}; $info->{nbusy} = $stats->{available} - $stats->{free}; $info->{nunavailable} = $stats->{total} - $stats->{available}; $info->{pmem} = $stats->{pmem} if $stats->{pmem}; $info->{vmem} = $stats->{vmem} if $stats->{vmem}; $info->{pcpus} = $stats->{pcpus} if $stats->{pcpus}; $info->{lcpus} = $stats->{lcpus} if $stats->{lcpus}; $info->{slots} = $stats->{slots} if $stats->{slots}; $info->{sysname} = $stats->{sysname} if $stats->{sysname}; $info->{machine} = $stats->{machine} if $stats->{machine}; } } else { $log->info("The LRMS plugin has no support for NodeSelection options, ignoring them") if $nscfg; } $info->{pmem} = $xecfg->{MainMemorySize} if $xecfg->{MainMemorySize}; $info->{vmem} = $xecfg->{VirtualMemorySize} if $xecfg->{VirtualMemorySize}; $info->{pcpus} = $xecfg->{PhysicalCPUs} if $xecfg->{PhysicalCPUs}; $info->{lcpus} = $xecfg->{LogicalCPUs} if $xecfg->{LogicalCPUs}; $info->{sysname} = $xecfg->{OSFamily} if $xecfg->{OSFamily}; $info->{machine} = $xecfg->{Platform} if $xecfg->{Platform}; } # Check for overlap of nodes if (ref $nodes eq 'HASH') { for (my $i=0; $i<@xenvs; $i++) { my $nodes1 = $nodemap{$xenvs[$i]}; next unless $nodes1; for (my $j=0; $j<$i; $j++) { my $nodes2 = $nodemap{$xenvs[$j]}; next unless $nodes2; my $overlap = intersection($nodes1, $nodes2); $log->warning("Overlap detected between ExecutionEnvironments $xenvs[$i] and $xenvs[$j]. " ."Use NodeSelection options to select correct nodes") if %$overlap; } } } return $infos; } # For each duration, find the largest available numer of slots of any user # Input: the users hash returned by thr LRMS module. sub max_userfreeslots { my ($users) = @_; my %timeslots; for my $uid (keys %$users) { my $uinfo = $users->{$uid}; next unless defined $uinfo->{freecpus}; for my $nfree ( keys %{$uinfo->{freecpus}} ) { my $seconds = 60 * $uinfo->{freecpus}{$nfree}; if ($timeslots{$seconds}) { $timeslots{$seconds} = $nfree > $timeslots{$seconds} ? $nfree : $timeslots{$seconds}; } else { $timeslots{$seconds} = $nfree; } } } return %timeslots; } # adds a prefix to a set of strings in an array. # input: the prefix string, an array. sub addprefix { my $prefix = shift @_; my @set = @_; my @prefixedset = @set; @prefixedset = map { $prefix.$_ } @prefixedset; return @prefixedset; } # TODO: add VOs information ############################################################################ # Combine info from all sources to prepare the final representation ############################################################################ sub collect($) { my ($data) = @_; # used for testing # print Dumper($data); my $config = $data->{config}; my $usermap = $data->{usermap}; my $host_info = $data->{host_info}; my $rte_info = $data->{rte_info}; my $gmjobs_info = $data->{gmjobs_info}; my $lrms_info = $data->{lrms_info}; my $nojobs = $data->{nojobs}; my $creation_time = timenow(); my $validity_ttl = $config->{validity_ttl}; my $hostname = $config->{hostname} || $host_info->{hostname}; my @allxenvs = keys %{$config->{xenvs}}; my @allshares = keys %{$config->{shares}}; # GLUE2 shares differ from the configuration one. # the one to one mapping from a share to a queue is too strong. # the following datastructure reshuffles queues into proper # GLUE2 shares based on authorizedvo # This may require rethinking of parsing the configuration... my $GLUE2shares = {}; # If authorizedvo is present in arc.conf defined, # generate one additional share for each VO. # # TODO: refactorize this to apply to cluster and queue VOs # with a single subroutine, might be handy for glue1 rendering # ## for each share(queue) for my $currentshare (@allshares) { # always add a share with no mapping policy my $share_name = $currentshare; $GLUE2shares->{$share_name} = Storable::dclone($config->{shares}{$currentshare}); # Create as many shares as the number of authorizedvo entries # in the [queue/queuename] block # if there is any VO generate new names if (defined $config->{shares}{$currentshare}{authorizedvo}) { my ($queueauthvos) = $config->{shares}{$currentshare}{authorizedvo}; for my $queueauthvo (@{$queueauthvos}) { # generate an additional share with such authorizedVO my $share_vo = $currentshare.'_'.$queueauthvo; $GLUE2shares->{$share_vo} = Storable::dclone($config->{shares}{$currentshare}); # add the queue from configuration as MappingQueue $GLUE2shares->{$share_vo}{MappingQueue} = $currentshare; # remove VOs from that share, substitute with default VO $GLUE2shares->{$share_vo}{authorizedvo} = $queueauthvo; # Add supported policies $GLUE2shares->{$share_vo}{MappingPolicies} = { 'BasicMappingPolicy' => ''}; } } else { # create as many shares as the authorizedvo in the [cluster] block # iff authorizedvo not defined in queue block if (defined $config->{service}{AuthorizedVO}) { my ($clusterauthvos) = $config->{service}{AuthorizedVO}; for my $clusterauthvo (@{$clusterauthvos}) { # generate an additional share with such authorizedVO my $share_vo = $currentshare.'_'.$clusterauthvo; $GLUE2shares->{$share_vo} = Storable::dclone($config->{shares}{$currentshare}); # add the queue from configuration as MappingQueue $GLUE2shares->{$share_vo}{MappingQueue} = $currentshare; # remove VOs from that share, substitute with default VO $GLUE2shares->{$share_vo}{authorizedvo} = $clusterauthvo; $GLUE2shares->{$share_vo}{MappingPolicies} = { 'BasicMappingPolicy' => '' }; } } } # remove VO array from the datastructure of the share with the same name of the queue delete $GLUE2shares->{$share_name}{authorizedvo}; undef $share_name; } ##replace @allshares with the newly created shares #@allshares = keys %{$GLUE2shares}; my $homogeneous = 1; $homogeneous = 0 if @allxenvs > 1; $homogeneous = 0 if @allshares > 1 and @allxenvs == 0; for my $xeconfig (values %{$config->{xenvs}}) { $homogeneous = 0 if defined $xeconfig->{Homogeneous} and not $xeconfig->{Homogeneous}; } my $xeinfos = xeinfos($config, $lrms_info->{nodes}); # Figure out total number of CPUs my ($totalpcpus, $totallcpus) = (0,0); # First, try to sum up cpus from all ExecutionEnvironments for my $xeinfo (values %$xeinfos) { unless (exists $xeinfo->{ntotal} and $xeinfo->{pcpus}) { $totalpcpus = 0; last } $totalpcpus += $xeinfo->{ntotal} * $xeinfo->{pcpus}; } for my $xeinfo (values %$xeinfos) { unless (exists $xeinfo->{ntotal} and $xeinfo->{lcpus}) { $totallcpus = 0; last } $totallcpus += $xeinfo->{ntotal} * $xeinfo->{lcpus}; } #$log->debug("Cannot determine total number of physical CPUs in all ExecutionEnvironments") unless $totalpcpus; $log->debug("Cannot determine total number of logical CPUs in all ExecutionEnvironments") unless $totallcpus; # Next, use value returned by LRMS in case the the first try failed. # OBS: most LRMSes don't differentiate between Physical and Logical CPUs. $totalpcpus ||= $lrms_info->{cluster}{totalcpus}; $totallcpus ||= $lrms_info->{cluster}{totalcpus}; # my @authorizedvos = (); # if ($config->{service}{AuthorizedVO}) { # @authorizedvos = @{$config->{service}{AuthorizedVO}}; # # add VO: suffix to each authorized VO # @authorizedvos = map { "vo:".$_ } @authorizedvos; # } # # # # # # # # # # # # # # # # # # # # # # # # Job statistics # # # # # # # # # # # # # # # # # # # # # # # # # # total jobs in each GM state my %gmtotalcount; # jobs in each GM state, by share my %gmsharecount; # grid jobs in each lrms sub-state (queued, running, suspended), by share my %inlrmsjobs; # grid jobs in each lrms sub-state (queued, running, suspended) my %inlrmsjobstotal; # slots needed by grid jobs in each lrms sub-state (queued, running, suspended), by share my %inlrmsslots; # number of slots needed by all waiting jobs, per share my %requestedslots; # Jobs waiting to be prepared by GM (indexed by share) my %pending; # Jobs waiting to be prepared by GM my $pendingtotal; # Jobs being prepared by GM (indexed by share) my %share_prepping; # Jobs being prepared by GM (indexed by grid owner) my %user_prepping; # $user_prepping{$_} = 0 for keys %$usermap; # jobids divided per interface. This datastructure # is convenience way to fill jobs per endpoint # each endpoint its list of jobids my $jobs_by_endpoint = {}; # fills most of the above hashes for my $jobid (keys %$gmjobs_info) { my $job = $gmjobs_info->{$jobid}; my $gridowner = $gmjobs_info->{$jobid}{subject}; my $share = $job->{share}; # take only the first VO for now. # TODO: problem. A job gets assigned to the default # queue that is not assigned to that VO. How to solve? my $vomsvo = $job->{vomsvo} if defined $job->{vomsvo}; my $sharevomsvo = $share.'_'.$vomsvo if defined $vomsvo; my $gmstatus = $job->{status} || ''; $gmtotalcount{totaljobs}++; $gmsharecount{$share}{totaljobs}++; # add info for VO dedicated shares $gmsharecount{$sharevomsvo}{totaljobs}++ if defined $vomsvo; # count GM states by category my %states = ( 'UNDEFINED' => [0, 'undefined'], 'ACCEPTING' => [1, 'accepted'], 'ACCEPTED' => [1, 'accepted'], 'PENDING:ACCEPTED' => [1, 'accepted'], 'PREPARING' => [2, 'preparing'], 'PENDING:PREPARING'=> [2, 'preparing'], 'SUBMIT' => [2, 'preparing'], 'SUBMITTING' => [2, 'preparing'], 'INLRMS' => [3, 'inlrms'], 'PENDING:INLRMS' => [4, 'finishing'], 'FINISHING' => [4, 'finishing'], 'CANCELING' => [4, 'finishing'], 'FAILED' => [5, 'finished'], 'KILLED' => [5, 'finished'], 'FINISHED' => [5, 'finished'], 'DELETED' => [6, 'deleted'] ); unless ($states{$gmstatus}) { $log->warning("Unexpected job status for job $jobid: $gmstatus"); $gmstatus = $job->{status} = 'UNDEFINED'; } my ($age, $category) = @{$states{$gmstatus}}; $gmtotalcount{$category}++; $gmsharecount{$share}{$category}++; $gmsharecount{$sharevomsvo}{$category}++ if defined $vomsvo; if ($age < 6) { $gmtotalcount{notdeleted}++; $gmsharecount{$share}{notdeleted}++; $gmsharecount{$sharevomsvo}{notdeleted}++ if defined $vomsvo; } if ($age < 5) { $gmtotalcount{notfinished}++; $gmsharecount{$share}{notfinished}++; $gmsharecount{$sharevomsvo}{notfinished}++ if defined $vomsvo; } if ($age < 3) { $gmtotalcount{notsubmitted}++; $gmsharecount{$share}{notsubmitted}++; $gmsharecount{$sharevomsvo}{notsubmitted}++ if defined $vomsvo; $requestedslots{$share} += $job->{count} || 1; $share_prepping{$share}++; if (defined $vomsvo) { $requestedslots{$sharevomsvo} += $job->{count} || 1; $share_prepping{$sharevomsvo}++; } # TODO: is this used anywhere? $user_prepping{$gridowner}++ if $gridowner; } if ($age < 2) { $pending{$share}++; $pending{$sharevomsvo}++ if defined $vomsvo; $pendingtotal++; } # count grid jobs running and queued in LRMS for each share if ($gmstatus eq 'INLRMS') { my $lrmsid = $job->{localid} || 'IDNOTFOUND'; my $lrmsjob = $lrms_info->{jobs}{$lrmsid}; my $slots = $job->{count} || 1; if (defined $lrmsjob) { if ($lrmsjob->{status} ne 'EXECUTED') { $inlrmsslots{$share}{running} ||= 0; $inlrmsslots{$share}{suspended} ||= 0; $inlrmsslots{$share}{queued} ||= 0; if (defined $vomsvo) { $inlrmsslots{$sharevomsvo}{running} ||= 0; $inlrmsslots{$sharevomsvo}{suspended} ||= 0; $inlrmsslots{$sharevomsvo}{queued} ||= 0; } if ($lrmsjob->{status} eq 'R') { $inlrmsjobstotal{running}++; $inlrmsjobs{$share}{running}++; $inlrmsslots{$share}{running} += $slots; if (defined $vomsvo) { $inlrmsjobs{$sharevomsvo}{running}++; $inlrmsslots{$sharevomsvo}{running} += $slots; } } elsif ($lrmsjob->{status} eq 'S') { $inlrmsjobstotal{suspended}++; $inlrmsjobs{$share}{suspended}++; $inlrmsslots{$share}{suspended} += $slots; if (defined $vomsvo) { $inlrmsjobs{$sharevomsvo}{suspended}++; $inlrmsslots{$sharevomsvo}{suspended} += $slots; } } else { # Consider other states 'queued' $inlrmsjobstotal{queued}++; $inlrmsjobs{$share}{queued}++; $inlrmsslots{$share}{queued} += $slots; $requestedslots{$share} += $slots; if (defined $vomsvo) { $inlrmsjobs{$sharevomsvo}{queued}++; $inlrmsslots{$sharevomsvo}{queued} += $slots; $requestedslots{$sharevomsvo} += $slots; } } } } else { $log->warning("Info missing about lrms job $lrmsid"); } } # fills efficiently %jobs_by_endpoint, defaults to gridftp my $jobinterface = $job->{interface} || 'org.nordugrid.gridftpjob'; $jobs_by_endpoint->{$jobinterface}{$jobid} = {}; } my $admindomain = $config->{admindomain}{Name}; my $lrmsname = $config->{lrms}; # Calculate endpoint URLs for A-REX and ARIS. # check what is enabled in configuration # also calculates static data that can be triggered by endpoints # such as known capabilities my $csvendpointsnum = 0; my $csvcapabilities = {}; my $epscapabilities = {}; # sets the default slapd port if not defined in config file. # this is not nice, as default port might change and would be hardcoded. # grid-infosys script sets the defaults so there is no smarter way of doing this now. my $ldaphostport = defined($config->{SlapdPort}) ? "ldap://$hostname:$config->{SlapdPort}/" : "ldap://$hostname:2135/"; my $ldapngendpoint = ''; my $ldapglue1endpoint = ''; my $ldapglue2endpoint = ''; my $gridftphostport = ''; # TODO: calculate capabilities in a more efficient way. Maybe set # them here for each endpoint and then copy them later? # here we run the risk of having them not in synch with the # endpoints. # data push/pull capabilities. # TODO: scan datalib patch to searc for .apd $epscapabilities->{'common'} = [ 'data.transfer.cepull.ftp', 'data.transfer.cepull.http', 'data.transfer.cepull.https', 'data.transfer.cepull.httpg', 'data.transfer.cepull.gridftp', 'data.transfer.cepull.srm', 'data.transfer.cepush.ftp', 'data.transfer.cepush.http', 'data.transfer.cepush.https', 'data.transfer.cepush.httpg', 'data.transfer.cepush.gridftp', 'data.transfer.cepush.srm', ]; ## Endpoints initialization. # checks for defined paths and enabled features, sets GLUE2 capabilities. # for org.nordugrid.gridftpjob if ($config->{GridftpdEnabled} == 1) { $gridftphostport = "$hostname:$config->{GridftpdPort}"; $csvendpointsnum++; $epscapabilities->{'org.nordugrid.gridftpjob'} = [ 'executionmanagement.jobexecution', 'executionmanagement.jobmanager', 'executionmanagement.jobdescription' ]; $epscapabilities->{'common'} = [ @{$epscapabilities->{'common'}}, ( 'data.access.sessiondir.gridftp', 'data.access.stageindir.gridftp', 'data.access.stageoutdir.gridftp' ) ]; }; # for org.nordugrid.xbes my $arexhostport = ''; if ($config->{enable_arc_interface}) { $arexhostport = $config->{arexhostport}; $csvendpointsnum = $csvendpointsnum + 2; # xbes and wsrf $epscapabilities->{'org.nordugrid.xbes'} = [ 'executionmanagement.jobexecution', 'executionmanagement.jobmanager', 'executionmanagement.jobdescription', 'security.delegation' ]; $epscapabilities->{'common'} = [ @{$epscapabilities->{'common'}}, ( 'data.access.sessiondir.https', 'data.access.stageindir.https', 'data.access.stageoutdir.https' ) ]; }; # The following are for EMI-ES my $emieshostport = ''; if ($config->{enable_emies_interface}) { $emieshostport = $config->{arexhostport}; $csvendpointsnum = $csvendpointsnum + 5; $epscapabilities->{'org.ogf.glue.emies.activitycreation'} = [ 'executionmanagement.jobcreation', 'executionmanagement.jobdescription' ]; $epscapabilities->{'org.ogf.glue.emies.activitymanagement'} = [ 'executionmanagement.jobmanagement', 'information.lookup.job' ]; $epscapabilities->{'org.ogf.glue.emies.resourceinfo'} = [ 'information.discovery.resource', 'information.query.xpath1' ]; $epscapabilities->{'org.ogf.glue.emies.activityinfo'} = [ 'information.discovery.job', 'information.lookup.job' ]; $epscapabilities->{'org.ogf.glue.emies.delegation'} = [ 'security.delegation' ]; } # The following is for the Stagein interface my $stageinhostport = ''; # ARIS LDAP endpoints # ldapng if ($config->{infosys_nordugrid} || $config->{infosys_glue12}) { $csvendpointsnum++; $ldapngendpoint = $ldaphostport."Mds-Vo-Name=local,o=grid"; $epscapabilities->{'org.nordugrid.ldapng'} = [ 'information.discovery.resource' ]; } # ldapglue1 if ($config->{infosys_glue12}) { $csvendpointsnum++; $ldapglue1endpoint = $ldaphostport."Mds-Vo-Name=resource,o=grid"; $epscapabilities->{'org.nordugrid.ldapglue1'} = [ 'information.discovery.resource' ]; } # ldapglue2 if ($config->{infosys_glue2_ldap}) { $csvendpointsnum++; $ldapglue2endpoint = $ldaphostport."o=glue"; $epscapabilities->{'org.nordugrid.ldapglue2'} = [ 'information.discovery.resource' ]; } # Calculcate service capabilities as a union, using hash power foreach my $key (keys %{$epscapabilities}) { foreach my $capability (@{$epscapabilities->{$key}}) { $csvcapabilities->{$capability} = ''; } } # if all sessiondirs are in drain state, put the endpoints in # drain state too my $servingstate = 'draining'; my ($sessiondirs) = ($config->{control}{'.'}{sessiondir}); foreach my $sd (@$sessiondirs) { my @hasdrain = split(' ',$sd); if ($hasdrain[-1] ne 'drain') { $servingstate = 'production'; } } # TODO: userdomain my $userdomain=''; # Global IDs # ARC choices are as follows: # my $adID = "urn:ad:$admindomain"; # AdminDomain ID my $udID = "urn:ud:$userdomain" ; # UserDomain ID; my $csvID = "urn:ogf:ComputingService:$hostname:arex"; # ComputingService ID my $cmgrID = "urn:ogf:ComputingManager:$hostname:$lrmsname"; # ComputingManager ID # Computing Endpoints IDs my $ARCgftpjobcepID; $ARCgftpjobcepID = "urn:ogf:ComputingEndpoint:$hostname:gridftpjob:gsiftp://$gridftphostport".$config->{GridftpdMountPoint}; # ARCGridFTPComputingEndpoint ID my $ARCWScepID; $ARCWScepID = "urn:ogf:ComputingEndpoint:$hostname:xbes:$config->{endpoint}" if $config->{enable_arc_interface}; # ARCWSComputingEndpoint ID my $EMIEScepIDp; $EMIEScepIDp = "urn:ogf:ComputingEndpoint:$hostname:emies:$config->{endpoint}" if $config->{enable_emies_interface}; # EMIESComputingEndpoint ID my $StageincepID = "urn:ogf:ComputingEndpoint:$hostname:gridftp:$stageinhostport"; # StageinComputingEndpoint ID # the following is needed to publish in shares. Must be modified # if we support share-per-endpoint configurations. my @cepIDs = (); push(@cepIDs,$ARCgftpjobcepID) if ($config->{GridftpdEnabled} == 1); push(@cepIDs,$ARCWScepID) if ($config->{enable_arc_interface}); push(@cepIDs,$EMIEScepIDp) if ($config->{enable_emies_interface}); my $cactIDp = "urn:caid:$hostname"; # ComputingActivity ID prefix my $cshaIDp = "urn:ogf:ComputingShare:$hostname"; # ComputingShare ID prefix my $xenvIDp = "urn:ogf:ExecutionEnvironment:$hostname"; # ExecutionEnvironment ID prefix my $aenvIDp = "urn:ogf:ApplicationEnvironment:$hostname:rte"; # ApplicationEnvironment ID prefix # my $ahIDp = "urn:ogf:ApplicationHandle:$hostname:"; # ApplicationHandle ID prefix my $apolIDp = "urn:ogf:AccessPolicy:$hostname"; # AccessPolicy ID prefix my $mpolIDp = "urn:ogf:MappingPolicy:$hostname"; # MappingPolicy ID prefix my %cactIDs; # ComputingActivity IDs my %cshaIDs; # ComputingShare IDs my %aenvIDs; # ApplicationEnvironment IDs my %xenvIDs; # ExecutionEnvironment IDs my $tseID = "urn:ogf:ToStorageElement:$hostname:storageservice"; # ToStorageElement ID prefix # Other Service IDs # my $ARISsvID = "urn:ogf:Service:$hostname:aris"; # ARIS service ID REMOVED my $ARISepIDp = "urn:ogf:Endpoint:$hostname"; # ARIS Endpoint ID kept for uniqueness my $CacheIndexsvID = "urn:ogf:Service:$hostname:cacheindex"; # Cache-Index service ID my $CacheIndexepIDp = "urn:ogf:Endpoint:$hostname:cacheindex"; # Cache-Index Endpoint ID my $HEDControlsvID = "urn:ogf:Service:$hostname:hedcontrol"; # HED-CONTROL service ID my $HEDControlepIDp = "urn:ogf:Endpoint:$hostname:hedcontrol"; # HED-CONTROL Endpoint ID # Generate ComputingShare IDs for my $share (keys %{$GLUE2shares}) { $cshaIDs{$share} = "$cshaIDp:$share"; } # Generate ApplicationEnvironment IDs my $aecount = 0; for my $rte (keys %$rte_info) { $aenvIDs{$rte} = "$aenvIDp:$aecount"; $aecount++; } # Generate ExecutionEnvironment IDs my $envcount = 0; $xenvIDs{$_} = "$xenvIDp:execenv".$envcount++ for @allxenvs; # generate ComputingActivity IDs unless ($nojobs) { for my $jobid (keys %$gmjobs_info) { my $share = $gmjobs_info->{$jobid}{share}; my $interface = $gmjobs_info->{$jobid}{'interface'}; $cactIDs{$share}{$jobid} = "$cactIDp:$interface:$jobid"; } } # TODO: in a first attempt, accesspolicies were expected to be in the XML # config. this is not yet the case, moreover it might not be possible to # do that. So the following is commented out for now. # unless (@{$config->{accesspolicies}}) { # $log->warning("No AccessPolicy configured"); # } # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # build information tree # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $callcount = 0; ### Authorized VOs: Policy stuff # calculate union of the authorizedvos in shares - a hash is used as a set # and add it to the cluster accepted authorizedvos my @clusterauthorizedvos; if ($config->{service}{AuthorizedVO}) { @clusterauthorizedvos = @{$config->{service}{AuthorizedVO}}; } my $unionauthorizedvos; if (@clusterauthorizedvos) { foreach my $vo (@clusterauthorizedvos) { $unionauthorizedvos->{$vo}=''; } } # add the per-queue authorizedvo if any my $shares = Storable::dclone($GLUE2shares); for my $share ( keys %$shares ) { if ($GLUE2shares->{$share}{authorizedvo}) { my (@tempvos) = $GLUE2shares->{$share}{authorizedvo} if ($GLUE2shares->{$share}{authorizedvo}); foreach my $vo (@tempvos) { $unionauthorizedvos->{$vo}=''; } } } my @unionauthorizedvos; if ($unionauthorizedvos) { @unionauthorizedvos = keys %$unionauthorizedvos ; @unionauthorizedvos = addprefix('vo:',@unionauthorizedvos); undef $unionauthorizedvos; } # AccessPolicies implementation. Can be called for each endpoint. # the basic policy value is taken from the service AuthorizedVO. # The logic is similar to the endpoints: first # all the policies subroutines are created, then stored in $accesspolicies, # then every endpoint passes custom values to the getAccessPolicies sub. my $accesspolicies = {}; # Basic access policy: union of authorizedvos my $getBasicAccessPolicy = sub { my $apol = {}; my ($epID) = @_; $apol->{ID} = "$apolIDp:basic"; $apol->{CreationTime} = $creation_time; $apol->{Validity} = $validity_ttl; $apol->{Scheme} = "basic"; if (@unionauthorizedvos) { $apol->{Rule} = [ @unionauthorizedvos ]; }; # $apol->{UserDomainID} = $apconf->{UserDomainID}; $apol->{EndpointID} = $epID; return $apol; }; $accesspolicies->{BasicAccessPolicy} = $getBasicAccessPolicy if (@unionauthorizedvos); ## more accesspolicies can go here. ## subroutines structure to return accesspolicies my $getAccessPolicies = sub { return undef unless my ($accesspolicy, $sub) = each %$accesspolicies; my ($epID) = @_; return &{$sub}($epID); }; # MappingPolicies implementation. Can be called for each ShareID. # the basic policy value is taken from the service AuthorizedVO. # The logic is similar to the endpoints: first # all the policies subroutines are created, stored in mappingpolicies, # then every endpoint passes custom values to the getMappingPolicies sub. my $mappingpolicies = {}; # Basic mapping policy: it can only contain one vo. my $getBasicMappingPolicy = sub { my ($shareID, $sharename) = @_; my $mpol = {}; $mpol->{CreationTime} = $creation_time; $mpol->{Validity} = $validity_ttl; $mpol->{ID} = "$mpolIDp:basic:$GLUE2shares->{$sharename}{authorizedvo}"; $mpol->{Scheme} = "basic"; $mpol->{Rule} = [ "vo:$GLUE2shares->{$sharename}{authorizedvo}" ]; # $mpol->{UserDomainID} = $apconf->{UserDomainID}; $mpol->{ShareID} = $shareID; return $mpol; }; $mappingpolicies->{'BasicMappingPolicy'} = $getBasicMappingPolicy; ## more accesspolicies can go here. ## subroutines structure to return MappingPolicies # MappingPolicies are processed by using the share name and the # GLUE2shares datastructure that contains the MappingPolicies applied to this # share. my $getMappingPolicies = sub { my ($shareID, $sharename) = @_; return undef unless my ($policy) = each %{$GLUE2shares->{$sharename}{MappingPolicies}}; my $sub = $mappingpolicies->{$policy}; return &{$sub}($shareID, $sharename); }; # TODO: the above policies can be rewritten in an object oriented fashion # one single policy object that can be specialized # it's just about changing few strings # Only makes sense once we have other policies than Basic. # function that generates ComputingService data my $getComputingService = sub { $callcount++; my $csv = {}; $csv->{CreationTime} = $creation_time; $csv->{Validity} = $validity_ttl; $csv->{ID} = $csvID; $csv->{Capability} = [keys %$csvcapabilities]; $csv->{Name} = $config->{service}{ClusterName} if $config->{service}{ClusterName}; # scalar $csv->{OtherInfo} = $config->{service}{OtherInfo} if $config->{service}{OtherInfo}; # array $csv->{Type} = 'org.nordugrid.arex'; # OBS: Service QualityLevel used to state the purpose of the service. Can be set by sysadmins. # One of: development, testing, pre-production, production $csv->{QualityLevel} = $config->{infosys_glue2_service_qualitylevel}; $csv->{StatusInfo} = $config->{service}{StatusInfo} if $config->{service}{StatusInfo}; # array my $nshares = keys %{$GLUE2shares}; $csv->{Complexity} = "endpoint=$csvendpointsnum,share=$nshares,resource=".(scalar @allxenvs); $csv->{AllJobs} = $gmtotalcount{totaljobs} || 0; # OBS: Finished/failed/deleted jobs are not counted $csv->{TotalJobs} = $gmtotalcount{notfinished} || 0; $csv->{RunningJobs} = $inlrmsjobstotal{running} || 0; $csv->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $csv->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $csv->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $csv->{PreLRMSWaitingJobs} = $pendingtotal || 0; # ComputingActivity sub. Will try to use as a general approach for each endpoint. my $getComputingActivities = sub { my ($interface) = @_; # $log->debug("interface is $interface"); my $joblist = $jobs_by_endpoint->{$interface}; return undef unless my ($jobid) = each %$joblist; my $gmjob = $gmjobs_info->{$jobid}; my $exited = undef; # whether the job has already run; my $cact = {}; $cact->{CreationTime} = $creation_time; $cact->{Validity} = $validity_ttl; my $share = $gmjob->{share}; # TODO: this here is never used! What was here for? #my $gridid = $config->{endpoint}."/$jobid"; $cact->{Type} = 'single'; $cact->{ID} = $cactIDs{$share}{$jobid}; # TODO: check where is this taken $cact->{IDFromEndpoint} = "urn:idfe:$jobid" if $jobid; $cact->{Name} = $gmjob->{jobname} if $gmjob->{jobname}; # TODO: properly set either ogf:jsdl:1.0 or nordugrid:xrsl # Set job specification language based on description if ($gmjob->{description}) { if ($gmjob->{description} eq 'adl') { $cact->{JobDescription} = 'emies:adl'; } elsif ($gmjob->{description} eq 'jsdl') { # TODO: Supported version might be more accurate if needed. $cact->{JobDescription} = 'ogf:jsdl:1.0'; } else { $cact->{JobDescription} = 'nordugrid:xrsl'; } } else { $cact->{JobDescription} = 'UNDEFINEDVALUE'; } # TODO: understand this below $cact->{RestartState} = glueState($gmjob->{failedstate}) if $gmjob->{failedstate}; $cact->{ExitCode} = $gmjob->{exitcode} if defined $gmjob->{exitcode}; # TODO: modify scan-jobs to write it separately to .diag. All backends should do this. $cact->{ComputingManagerExitCode} = $gmjob->{lrmsexitcode} if $gmjob->{lrmsexitcode}; $cact->{Error} = [ @{$gmjob->{errors}} ] if $gmjob->{errors}; # TODO: VO info, like ATLAS/Prod; check whether this information is available to A-REX $cact->{Owner} = $gmjob->{subject} if $gmjob->{subject}; $cact->{LocalOwner} = $gmjob->{localowner} if $gmjob->{localowner}; # OBS: Times are in seconds. $cact->{RequestedTotalWallTime} = $gmjob->{reqwalltime} if defined $gmjob->{reqwalltime}; $cact->{RequestedTotalCPUTime} = $gmjob->{reqcputime} if defined $gmjob->{reqcputime}; # OBS: Should include name and version. Exact format not specified $cact->{RequestedApplicationEnvironment} = $gmjob->{runtimeenvironments} if $gmjob->{runtimeenvironments}; $cact->{RequestedSlots} = $gmjob->{count} || 1; $cact->{StdIn} = $gmjob->{stdin} if $gmjob->{stdin}; $cact->{StdOut} = $gmjob->{stdout} if $gmjob->{stdout}; $cact->{StdErr} = $gmjob->{stderr} if $gmjob->{stderr}; $cact->{LogDir} = $gmjob->{gmlog} if $gmjob->{gmlog}; $cact->{ExecutionNode} = $gmjob->{nodenames} if $gmjob->{nodenames}; $cact->{Queue} = $gmjob->{queue} if $gmjob->{queue}; # Times for finished jobs $cact->{UsedTotalWallTime} = $gmjob->{WallTime} * ($gmjob->{count} || 1) if defined $gmjob->{WallTime}; $cact->{UsedTotalCPUTime} = $gmjob->{CpuTime} if defined $gmjob->{CpuTime}; $cact->{UsedMainMemory} = ceil($gmjob->{UsedMem}/1024) if defined $gmjob->{UsedMem}; $cact->{SubmissionTime} = mdstoiso($gmjob->{starttime}) if $gmjob->{starttime}; # TODO: change gm to save LRMSSubmissionTime #$cact->{ComputingManagerSubmissionTime} = 'NotImplemented'; # TODO: this should be queried in scan-job. #$cact->{StartTime} = 'NotImplemented'; # TODO: scan-job has to produce this #$cact->{ComputingManagerEndTime} = 'NotImplemented'; $cact->{EndTime} = mdstoiso($gmjob->{completiontime}) if $gmjob->{completiontime}; $cact->{WorkingAreaEraseTime} = mdstoiso($gmjob->{cleanuptime}) if $gmjob->{cleanuptime}; $cact->{ProxyExpirationTime} = mdstoiso($gmjob->{delegexpiretime}) if $gmjob->{delegexpiretime}; if ($gmjob->{clientname}) { # OBS: address of client as seen by the server is used. my $dnschars = '-.A-Za-z0-9'; # RFC 1034,1035 my ($external_address, $port, $clienthost) = $gmjob->{clientname} =~ /^([$dnschars]+)(?::(\d+))?(?:;(.+))?$/; $cact->{SubmissionHost} = $external_address if $external_address; } # TODO: this in not fetched by GMJobsInfo at all. .local does not contain name. $cact->{SubmissionClientName} = $gmjob->{clientsoftware} if $gmjob->{clientsoftware}; # Added for the client to know what was the original interface the job was submitted $cact->{OtherInfo} = ["SubmittedVia=$interface"]; # Computing Activity Associations # TODO: add link #$cact->{ExecutionEnvironmentID} = ; $cact->{ActivityID} = $gmjob->{activityid} if $gmjob->{activityid}; $cact->{ComputingShareID} = $cshaIDs{$share} || 'UNDEFINEDVALUE'; if ( $gmjob->{status} eq "INLRMS" ) { my $lrmsid = $gmjob->{localid}; if (not $lrmsid) { $log->warning("No local id for job $jobid") if $callcount == 1; next; } $cact->{LocalIDFromManager} = $lrmsid; my $lrmsjob = $lrms_info->{jobs}{$lrmsid}; if (not $lrmsjob) { $log->warning("No local job for $jobid") if $callcount == 1; next; } $cact->{State} = $gmjob->{failedstate} ? glueState("INLRMS", $lrmsjob->{status}, $gmjob->{failedstate}) : glueState("INLRMS", $lrmsjob->{status}); $cact->{WaitingPosition} = $lrmsjob->{rank} if defined $lrmsjob->{rank}; $cact->{ExecutionNode} = $lrmsjob->{nodes} if $lrmsjob->{nodes}; unshift @{$cact->{OtherMessages}}, $_ for @{$lrmsjob->{comment}}; # Times for running jobs $cact->{UsedTotalWallTime} = $lrmsjob->{walltime} * ($gmjob->{count} || 1) if defined $lrmsjob->{walltime}; $cact->{UsedTotalCPUTime} = $lrmsjob->{cputime} if defined $lrmsjob->{cputime}; $cact->{UsedMainMemory} = ceil($lrmsjob->{mem}/1024) if defined $lrmsjob->{mem}; } else { $cact->{State} = $gmjob->{failedstate} ? glueState($gmjob->{status},'',$gmjob->{failedstate}) : glueState($gmjob->{status}); } # TODO: UserDomain association, how to calculate it? $cact->{jobXmlFileWriter} = sub { jobXmlFileWriter($config, $jobid, $gmjob, @_) }; return $cact; }; # Computing Endpoints ######## # Here comes a list of endpoints we support. # GridFTPd job execution endpoint - org.nordugrid.gridfptjob # XBES A-REX WSRF job submission endpoint # EMI-ES one endpoint per port-type # LDAP endpoints one per schema # WS-LIDI A-REX WSRF information system endpoint # A-REX datastaging endpoint # these will contain only endpoints with URLs defined # Simple endpoints will be rendered as computingEndpoints # as GLUE2 does not admin simple Endpoints within a ComputingService. my $arexceps = {}; # arex computing endpoints. # my $arexeps = {}; # arex plain endpoints (i.e. former aris endpoints) # A-REX ComputingEndpoints # ARC XBES and WS-LIDI my $getARCWSComputingEndpoint = sub { my $cep = {}; $cep->{CreationTime} = $creation_time; $cep->{Validity} = $validity_ttl; $cep->{ID} = $ARCWScepID; # Name not necessary -- why? added back $cep->{Name} = "ARC CE XBES WSRF submission interface and WSRF LIDI Information System"; # OBS: ideally HED should be asked for the URL $cep->{URL} = $config->{endpoint}; $cep->{Capability} = [ @{$epscapabilities->{'org.nordugrid.xbes'}}, @{$epscapabilities->{'common'}} ]; $cep->{Technology} = 'webservice'; $cep->{InterfaceName} = 'org.ogf.bes'; $cep->{InterfaceVersion} = [ '1.0' ]; $cep->{InterfaceExtension} = [ 'urn:org.nordugrid.xbes' ]; $cep->{WSDL} = [ $config->{endpoint}."/?wsdl" ]; # Wrong type, should be URI $cep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP ]; $cep->{Semantics} = [ "http://www.nordugrid.org/documents/arex.pdf" ]; $cep->{Implementor} = "NorduGrid"; $cep->{ImplementationName} = "nordugrid-arc"; $cep->{ImplementationVersion} = $config->{arcversion}; $cep->{QualityLevel} = "testing"; my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } if ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { push @{$healthissues{warning}}, 'One or more grid managers are down'; } else { push @{$healthissues{critical}}, $config->{remotegmdirs} ? 'All grid managers are down' : 'Grid manager is down'; } } # check if WS interface is actually running # done with netstat but I'd like to be smarter # this only works if the effective user is root # TODO: find a better way to do this. Ask A-REX? # changed by request of aleksandr. Only if root is running arex. if ($> == 0) { my $netstat=`netstat -antup`; if ( $? != 0 ) { # push @{$healthissues{unknown}}, "Checking if ARC WS interface is running: error in executing netstat. Infosys will assume the service is in ok HealthState"; $log->verbose("Checking if ARC WS interface is running: error in executing netstat. Infosys will assume AREX WSRF/XBES running properly"); } else { # searches if arched is listed in netstat output # best way would be ask arched if its service is up...? if( $netstat !~ m/arched/ ) { push @{$healthissues{critical}}, "arched A-REX endpoint not found with netstat" ; } } } else { # push @{$healthissues{unknown}}, "user ".getpwuid($>)." cannot run netstat -p. Infosys will assume the service is in ok HealthState"; $log->verbose("Checking if ARC WS interface is running: user ".getpwuid($>)." cannot run netstat -p. Infosys will assume AREX WSRF/XBES is running properly"); } if (%healthissues) { my @infos; for my $level (qw(critical warning other unknown)) { next unless $healthissues{$level}; $cep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $cep->{HealthStateInfo} = join "; ", @infos; } else { $cep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for a-rex? # OBS: Is there an allownew option for a-rex? # TODO: check if the option below still applies #if ( $config->{GridftpdAllowNew} == 0 ) { # $cep->{ServingState} = 'draining'; #} else { # $cep->{ServingState} = 'production'; #} $cep->{ServingState} = $servingstate; # TODO: StartTime: get it from hed - maybe look at logs? $cep->{IssuerCA} = $host_info->{issuerca}; # scalar $cep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? $cep->{Staging} = 'staginginout'; $cep->{JobDescription} = [ 'ogf:jsdl:1.0', "nordugrid:xrsl" ]; $cep->{TotalJobs} = $gmtotalcount{notfinished} || 0; $cep->{RunningJobs} = $inlrmsjobstotal{running} || 0; $cep->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $cep->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $cep->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $cep->{PreLRMSWaitingJobs} = $pendingtotal || 0; # TODO: Adrian's style accesspolicies. Might be handy later. # if ($config->{accesspolicies}) { # my @apconfs = @{$config->{accesspolicies}}; # $cep->{AccessPolicies} = sub { # return undef unless @apconfs; # my $apconf = pop @apconfs; # my $apol = {}; # $apol->{ID} = "$apolIDp:".join(",", @{$apconf->{Rule}}); # $apol->{Scheme} = "basic"; # $apol->{Rule} = $apconf->{Rule}; # $apol->{UserDomainID} = $apconf->{UserDomainID}; # $apol->{EndpointID} = $ARCWScepID; # return $apol; # }; # } # AccessPolicies $cep->{AccessPolicies} = sub { &{$getAccessPolicies}($cep->{ID}) }; $cep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # ComputingActivities if ($nojobs) { $cep->{ComputingActivities} = undef; } else { # this complicated thing here creates a specialized getComputingActivities # version of sub with a builtin parameter! $cep->{ComputingActivities} = sub { &{$getComputingActivities}('org.nordugrid.xbes'); }; } # Associations # No computingshareID should be associated to endpoint -- unless we enforce # binding between endpoints and queues $cep->{ComputingShareID} = [ values %cshaIDs ]; $cep->{ComputingServiceID} = $csvID; return $cep; }; # don't publish if arex_mount_point not configured in arc.conf $arexceps->{ARCWSComputingEndpoint} = $getARCWSComputingEndpoint if ($config->{enable_arc_interface}); # ARC GridFTPd job submission interface my $getARCGFTPdComputingEndpoint = sub { # check if gridftpd interface is actually configured return undef unless ( $gridftphostport ne ''); my $cep = {}; $cep->{CreationTime} = $creation_time; $cep->{Validity} = $validity_ttl; # Name not necessary -- why? added back $cep->{Name} = "ARC GridFTP job execution interface"; $cep->{URL} = "gsiftp://$gridftphostport".$config->{GridftpdMountPoint}; $cep->{ID} = $ARCgftpjobcepID; $cep->{Capability} = [ @{$epscapabilities->{'org.nordugrid.gridftpjob'}}, @{$epscapabilities->{'common'}} ]; $cep->{Technology} = 'gridftp'; $cep->{InterfaceName} = 'org.nordugrid.gridftpjob'; $cep->{InterfaceVersion} = [ '1.0' ]; # InterfaceExtension should return the same as BESExtension attribute of BES-Factory. # value is taken from services/a-rex/get_factory_attributes_document.cpp, line 56. $cep->{InterfaceExtension} = [ 'http://www.nordugrid.org/schemas/gridftpd' ]; # Wrong type, should be URI $cep->{Semantics} = [ "http://www.nordugrid.org/documents/gridfptd.pdf" ]; $cep->{Implementor} = "NorduGrid"; $cep->{ImplementationName} = "nordugrid-arc"; $cep->{ImplementationVersion} = $config->{arcversion}; $cep->{QualityLevel} = "production"; my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } if ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { push @{$healthissues{warning}}, 'One or more grid managers are down'; } else { push @{$healthissues{critical}}, $config->{remotegmdirs} ? 'All grid managers are down' : 'Grid manager is down'; } } # check if gridftpd is running, by checking pidfile existence push @{$healthissues{critical}}, 'gridfptd pidfile does not exist' unless (-e $config->{GridftpdPidFile}); if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $cep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $cep->{HealthStateInfo} = join "; ", @infos; } else { $cep->{HealthState} = 'ok'; } if ( $config->{GridftpdAllowNew} == 0 ) { $cep->{ServingState} = 'draining'; } else { $cep->{ServingState} = $servingstate; } # StartTime: get it from hed $cep->{IssuerCA} = $host_info->{issuerca}; # scalar $cep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? $cep->{Staging} = 'staginginout'; $cep->{JobDescription} = [ 'ogf:jsdl:1.0', "nordugrid:xrsl" ]; $cep->{TotalJobs} = $gmtotalcount{notfinished} || 0; $cep->{RunningJobs} = $inlrmsjobstotal{running} || 0; $cep->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $cep->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $cep->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $cep->{PreLRMSWaitingJobs} = $pendingtotal || 0; $cep->{AccessPolicies} = sub { &{$getAccessPolicies}($cep->{ID}) }; $cep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # ComputingActivities if ($nojobs) { $cep->{ComputingActivities} = undef; } else { # this complicated thing here creates a specialized getComputingActivities # version of sub with a builtin parameter! $cep->{ComputingActivities} = sub { &{$getComputingActivities}('org.nordugrid.gridftpjob'); }; } # Associations $cep->{ComputingShareID} = [ values %cshaIDs ]; $cep->{ComputingServiceID} = $csvID; return $cep; }; # Don't publish if there is no endpoint URL $arexceps->{ARCGFRPdComputingEndpoint} = $getARCGFTPdComputingEndpoint if $gridftphostport ne ''; # EMI-ES port types # TODO: understand if it's possible to choose only a set of portTypes to publish # EMIES ActivityCreation my $getEMIESActivityCreationComputingEndpoint = sub { # don't publish if no endpoint URL return undef unless $config->{enable_emies_interface}; my $cep = {}; $cep->{CreationTime} = $creation_time; $cep->{Validity} = $validity_ttl; $cep->{ID} = "$EMIEScepIDp:ac"; # Name not necessary -- why? added back $cep->{Name} = "ARC CE EMI-ES ActivityCreation Port Type"; # OBS: ideally HED should be asked for the URL $cep->{URL} = $config->{endpoint}; # TODO: define a strategy to add data capabilites $cep->{Capability} = $epscapabilities->{'org.ogf.glue.emies.activitycreation'}; $cep->{Technology} = 'webservice'; $cep->{InterfaceName} = 'org.ogf.glue.emies.activitycreation'; $cep->{InterfaceVersion} = [ '1.16' ]; $cep->{WSDL} = [ "https://twiki.cern.ch/twiki/pub/EMI/EmiExecutionService/" ]; # What is profile for EMIES? #$cep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $cep->{Semantics} = [ "https://twiki.cern.ch/twiki/pub/EMI/EmiExecutionService/" ]; $cep->{Implementor} = "NorduGrid"; $cep->{ImplementationName} = "nordugrid-arc"; $cep->{ImplementationVersion} = $config->{arcversion}; $cep->{QualityLevel} = "testing"; my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } if ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { push @{$healthissues{warning}}, 'One or more grid managers are down'; } else { push @{$healthissues{critical}}, $config->{remotegmdirs} ? 'All grid managers are down' : 'Grid manager is down'; } } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $cep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $cep->{HealthStateInfo} = join "; ", @infos; } else { $cep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for a-rex? # OBS: Is there an allownew option for a-rex? #if ( $config->{GridftpdAllowNew} == 0 ) { # $cep->{ServingState} = 'draining'; #} else { # $cep->{ServingState} = 'production'; #} $cep->{ServingState} = $servingstate; # StartTime: get it from hed $cep->{IssuerCA} = $host_info->{issuerca}; # scalar $cep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? $cep->{Staging} = 'staginginout'; $cep->{JobDescription} = [ 'ogf:jsdl:1.0', 'nordugrid:xrsl', 'emies:adl' ]; $cep->{TotalJobs} = $gmtotalcount{notfinished} || 0; $cep->{RunningJobs} = $inlrmsjobstotal{running} || 0; $cep->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $cep->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $cep->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $cep->{PreLRMSWaitingJobs} = $pendingtotal || 0; $cep->{AccessPolicies} = sub { &{$getAccessPolicies}($cep->{ID}) }; $cep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # ComputingActivities if ($nojobs) { $cep->{ComputingActivities} = undef; } else { # this complicated thing here creates a specialized getComputingActivities # version of sub with a builtin parameter! #TODO: change interfacename for jobs? $cep->{ComputingActivities} = sub { &{$getComputingActivities}('org.ogf.glue.emies.activitycreation'); }; } # Associations $cep->{ComputingShareID} = [ values %cshaIDs ]; $cep->{ComputingServiceID} = $csvID; return $cep; }; # don't publish if no EMIES endpoint configured $arexceps->{EMIESActivityCreationComputingEndpoint} = $getEMIESActivityCreationComputingEndpoint if ($config->{enable_emies_interface}); # EMI-ES ActivityManagement port type my $getEMIESActivityManagementComputingEndpoint = sub { # don't publish if no endpoint URL return undef unless $config->{enable_emies_interface}; my $cep = {}; $cep->{CreationTime} = $creation_time; $cep->{Validity} = $validity_ttl; $cep->{ID} = "$EMIEScepIDp:am"; # Name not necessary -- why? added back $cep->{Name} = "ARC CE EMI-ES ActivityManagement Port Type"; # OBS: ideally HED should be asked for the URL $cep->{URL} = $config->{endpoint}; # TODO: define a strategy to add data capabilites $cep->{Capability} = [ @{$epscapabilities->{'org.ogf.glue.emies.activitymanagement'}}, @{$epscapabilities->{'common'}} ]; $cep->{Technology} = 'webservice'; $cep->{InterfaceName} = 'org.ogf.glue.emies.activitymanagement'; $cep->{InterfaceVersion} = [ '1.16' ]; $cep->{WSDL} = [ "https://twiki.cern.ch/twiki/pub/EMI/EmiExecutionService/" ]; # What is profile for EMIES? #$cep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $cep->{Semantics} = [ "https://twiki.cern.ch/twiki/pub/EMI/EmiExecutionService/" ]; $cep->{Implementor} = "NorduGrid"; $cep->{ImplementationName} = "nordugrid-arc"; $cep->{ImplementationVersion} = $config->{arcversion}; $cep->{QualityLevel} = "testing"; my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } if ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { push @{$healthissues{warning}}, 'One or more grid managers are down'; } else { push @{$healthissues{critical}}, $config->{remotegmdirs} ? 'All grid managers are down' : 'Grid manager is down'; } } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $cep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $cep->{HealthStateInfo} = join "; ", @infos; } else { $cep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for a-rex? # OBS: Is there an allownew option for a-rex? #if ( $config->{GridftpdAllowNew} == 0 ) { # $cep->{ServingState} = 'draining'; #} else { # $cep->{ServingState} = 'production'; #} $cep->{ServingState} = $servingstate; # StartTime: get it from hed $cep->{IssuerCA} = $host_info->{issuerca}; # scalar $cep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? $cep->{Staging} = 'staginginout'; $cep->{JobDescription} = [ 'ogf:jsdl:1.0', 'nordugrid:xrsl', 'emies:adl' ]; $cep->{TotalJobs} = $gmtotalcount{notfinished} || 0; $cep->{RunningJobs} = $inlrmsjobstotal{running} || 0; $cep->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $cep->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $cep->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $cep->{PreLRMSWaitingJobs} = $pendingtotal || 0; $cep->{AccessPolicies} = sub { &{$getAccessPolicies}($cep->{ID}) }; $cep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # ComputingActivities if ($nojobs) { $cep->{ComputingActivities} = undef; } else { # this complicated thing here creates a specialized getComputingActivities # version of sub with a builtin parameter! #TODO: change interfacename for jobs? $cep->{ComputingActivities} = sub { &{$getComputingActivities}('org.ogf.glue.emies.activitycreation'); }; } # Associations $cep->{ComputingShareID} = [ values %cshaIDs ]; $cep->{ComputingServiceID} = $csvID; return $cep; }; # don't publish if no EMIES endpoint configured $arexceps->{EMIESActivityManagementComputingEndpoint} = $getEMIESActivityManagementComputingEndpoint if ($config->{enable_emies_interface}); # EMI-ES ResourceInfo port type my $getEMIESResourceInfoEndpoint = sub { my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; # Name not necessary -- why? plan was to have it configurable. $ep->{Name} = "ARC CE EMI-ES ResourceInfo Port Type"; # Configuration parser does not contain ldap port! # must be updated # port hardcoded for tests $ep->{URL} = $config->{endpoint}; # TODO: put only the port here $ep->{ID} = "$EMIEScepIDp:ri"; $ep->{Capability} = $epscapabilities->{'org.ogf.glue.emies.resourceinfo'};; $ep->{Technology} = 'webservice'; $ep->{InterfaceName} = 'org.ogf.glue.emies.resourceinfo'; $ep->{InterfaceVersion} = [ '1.16' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; # TODO: put EMIES spec URL here #$ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "testing"; # How to calculate health for this interface? # TODO: inherit health infos from arex endpoints my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } # check if WS interface is actually running # done with netstat but I'd like to be smarter # this only works if the effective user is root # TODO: find a better way to do this. Ask A-REX? # changed by request of aleksandr. Only checks if it's root if ($> == 0) { my $netstat=`netstat -antup`; if ( $? != 0 ) { # push @{$healthissues{unknown}}, "Checking if ARC WS interface is running: error in executing netstat. Infosys will assume the service is in ok HealthState"; $log->verbose("Checking if ARC WS interface is running: error in executing netstat. Infosys will assume EMIES is running properly"); } else { # searches if arched is listed in netstat output # best way would be ask arched if its service is up...? if( $netstat !~ m/arched/ ) { push @{$healthissues{critical}}, "arched A-REX endpoint not found with netstat. EMIES cannot be enabled." ; } } } else { # push @{$healthissues{unknown}}, "user ".getpwuid($>)." cannot run netstat -p. Infosys will assume EMIES is in ok HeathState"; $log->verbose("Checking if ARC WS interface is running: user ".getpwuid($>)." cannot run netstat -p. Infosys will assume EMIES is running properly"); } if (%healthissues) { my @infos; for my $level (qw(critical warning other unknown)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } $ep->{IssuerCA} = $host_info->{issuerca}; # scalar $ep->{TrustedCA} = $host_info->{trustedcas}; # array # OBS: Do 'queueing' and 'closed' states apply for aris? # OBS: Is there an allownew option for aris? #if ( $config->{GridftpdAllowNew} == 0 ) { # $ep->{ServingState} = 'draining'; #} else { # $ep->{ServingState} = 'production'; #} $ep->{ServingState} = 'production'; # TODO: StartTime: get it from hed? # TODO: Downtime, is this necessary, and how should it work? # AccessPolicies $ep->{AccessPolicies} = sub { &{$getAccessPolicies}($ep->{ID}) }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ComputingServiceID} = $csvID; return $ep; }; $arexceps->{EMIESResourceInfoEndpoint} = $getEMIESResourceInfoEndpoint if ($config->{enable_emies_interface}); # TODO: add EMIES Delegation, ActivityInfo, ActivityManagement # EMI-ES ActivityInfo my $getEMIESActivityInfoComputingEndpoint = sub { # don't publish if no endpoint URL return undef unless $config->{enable_emies_interface}; my $cep = {}; $cep->{CreationTime} = $creation_time; $cep->{Validity} = $validity_ttl; $cep->{ID} = "$EMIEScepIDp:ai"; # Name not necessary -- why? added back $cep->{Name} = "ARC CE EMI-ES ActivtyInfo Port Type"; # OBS: ideally HED should be asked for the URL $cep->{URL} = $config->{endpoint}; # TODO: define a strategy to add data capabilites $cep->{Capability} = $epscapabilities->{'org.ogf.glue.emies.activityinfo'}; $cep->{Technology} = 'webservice'; $cep->{InterfaceName} = 'org.ogf.glue.emies.activityinfo'; $cep->{InterfaceVersion} = [ '1.16' ]; $cep->{WSDL} = [ "https://twiki.cern.ch/twiki/pub/EMI/EmiExecutionService/" ]; # What is profile for EMIES? #$cep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $cep->{Semantics} = [ "https://twiki.cern.ch/twiki/pub/EMI/EmiExecutionService/" ]; $cep->{Implementor} = "NorduGrid"; $cep->{ImplementationName} = "nordugrid-arc"; $cep->{ImplementationVersion} = $config->{arcversion}; $cep->{QualityLevel} = "testing"; my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } if ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { push @{$healthissues{warning}}, 'One or more grid managers are down'; } else { push @{$healthissues{critical}}, $config->{remotegmdirs} ? 'All grid managers are down' : 'Grid manager is down'; } } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $cep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $cep->{HealthStateInfo} = join "; ", @infos; } else { $cep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for a-rex? # OBS: Is there an allownew option for a-rex? #if ( $config->{GridftpdAllowNew} == 0 ) { # $cep->{ServingState} = 'draining'; #} else { # $cep->{ServingState} = 'production'; #} $cep->{ServingState} = 'production'; # StartTime: get it from hed $cep->{IssuerCA} = $host_info->{issuerca}; # scalar $cep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? $cep->{Staging} = 'staginginout'; $cep->{JobDescription} = [ 'ogf:jsdl:1.0', 'nordugrid:xrsl', 'emies:adl' ]; $cep->{TotalJobs} = $gmtotalcount{notfinished} || 0; $cep->{RunningJobs} = $inlrmsjobstotal{running} || 0; $cep->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $cep->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $cep->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $cep->{PreLRMSWaitingJobs} = $pendingtotal || 0; $cep->{AccessPolicies} = sub { &{$getAccessPolicies}($cep->{ID}) }; $cep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # ComputingActivities if ($nojobs) { $cep->{ComputingActivities} = undef; } else { # this complicated thing here creates a specialized getComputingActivities # version of sub with a builtin parameter! #TODO: change interfacename for jobs? $cep->{ComputingActivities} = sub { &{$getComputingActivities}('org.ogf.glue.emies.activitycreation'); }; } # Associations $cep->{ComputingShareID} = [ values %cshaIDs ]; $cep->{ComputingServiceID} = $csvID; return $cep; }; # don't publish if no EMIES endpoint configured $arexceps->{EMIESActivityInfoComputingEndpoint} = $getEMIESActivityInfoComputingEndpoint if ($config->{enable_emies_interface}); # EMIES Delegation port type my $getEMIESDelegationEndpoint = sub { my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; # Name not necessary -- why? plan was to have it configurable. $ep->{Name} = "ARC CE EMI-ES Delegation Port Type"; $ep->{URL} = $config->{endpoint}; $ep->{ID} = "$EMIEScepIDp:d"; $ep->{Capability} = $epscapabilities->{'org.ogf.glue.emies.delegation'};; $ep->{Technology} = 'webservice'; $ep->{InterfaceName} = 'org.ogf.glue.emies.delegation'; $ep->{InterfaceVersion} = [ '1.16' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; # TODO: put EMIES spec URL here #$ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "testing"; # How to calculate health for this interface? # TODO: inherit health infos from arex endpoints my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } # check if WS interface is actually running # done with netstat but I'd like to be smarter # this only works if the effective user is root # TODO: find a better way to do this. Ask A-REX? # changed by request of aleksandr. Only checks if it's root if ($> == 0) { my $netstat=`netstat -antup`; if ( $? != 0 ) { # push @{$healthissues{unknown}}, "Checking if ARC WS interface is running: error in executing netstat. Infosys will assume the service is in ok HealthState"; $log->verbose("Checking if ARC WS interface is running: error in executing netstat. Infosys will assume EMIES is running properly"); } else { # searches if arched is listed in netstat output # best way would be ask arched if its service is up...? if( $netstat !~ m/arched/ ) { push @{$healthissues{critical}}, "arched A-REX endpoint not found with netstat. EMIES cannot be enabled." ; } } } else { # push @{$healthissues{unknown}}, "user ".getpwuid($>)." cannot run netstat -p. Infosys will assume EMIES is in ok HeathState"; $log->verbose("Checking if ARC WS interface is running: user ".getpwuid($>)." cannot run netstat -p. Infosys will assume EMIES is running properly"); } if (%healthissues) { my @infos; for my $level (qw(critical warning other unknown)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } $ep->{IssuerCA} = $host_info->{issuerca}; # scalar $ep->{TrustedCA} = $host_info->{trustedcas}; # array # OBS: Do 'queueing' and 'closed' states apply for aris? # OBS: Is there an allownew option for aris? #if ( $config->{GridftpdAllowNew} == 0 ) { # $ep->{ServingState} = 'draining'; #} else { # $ep->{ServingState} = 'production'; #} $ep->{ServingState} = 'production'; # TODO: StartTime: get it from hed? # TODO: Downtime, is this necessary, and how should it work? # AccessPolicies $ep->{AccessPolicies} = sub { &{$getAccessPolicies}($ep->{ID}) }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ComputingServiceID} = $csvID; return $ep; }; $arexceps->{EMIESDelegationEndpoint} = $getEMIESDelegationEndpoint if ($config->{enable_emies_interface}); #### Other A-REX ComputingEndpoints. these are currently disabled as I don't know how to handle them. # Placeholder for Stagein interface my $getStageinComputingEndpoint = sub { # don't publish if no Endpoint URL return undef unless $stageinhostport ne ''; my $cep = {}; $cep->{CreationTime} = $creation_time; $cep->{Validity} = $validity_ttl; $cep->{ID} = $StageincepID; # Name not necessary -- why? added back $cep->{Name} = "ARC WSRF XBES submission interface and WSRF LIDI Information System"; # OBS: ideally HED should be asked for the URL #$cep->{URL} = $config->{endpoint}; $cep->{Capability} = [ 'data.management.transfer' ]; $cep->{Technology} = 'webservice'; $cep->{InterfaceName} = 'Stagein'; $cep->{InterfaceVersion} = [ '1.0' ]; #$cep->{InterfaceExtension} = [ '' ]; $cep->{WSDL} = [ $config->{endpoint}."/?wsdl" ]; # Wrong type, should be URI #$cep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; #$cep->{Semantics} = [ "http://www.nordugrid.org/documents/arex.pdf" ]; $cep->{Implementor} = "NorduGrid"; $cep->{ImplementationName} = "nordugrid-arc"; $cep->{ImplementationVersion} = $config->{arcversion}; $cep->{QualityLevel} = "development"; # How to calculate health for this interface? my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } if ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { push @{$healthissues{warning}}, 'One or more grid managers are down'; } else { push @{$healthissues{critical}}, $config->{remotegmdirs} ? 'All grid managers are down' : 'Grid manager is down'; } } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $cep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $cep->{HealthStateInfo} = join "; ", @infos; } else { $cep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for a-rex? # OBS: Is there an allownew option for a-rex? #if ( $config->{GridftpdAllowNew} == 0 ) { # $cep->{ServingState} = 'draining'; #} else { # $cep->{ServingState} = 'production'; #} $cep->{ServingState} = $servingstate; # StartTime: get it from hed $cep->{IssuerCA} = $host_info->{issuerca}; # scalar $cep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? $cep->{Staging} = 'staginginout'; $cep->{JobDescription} = [ 'ogf:jsdl:1.0', "nordugrid:xrsl" ]; $cep->{TotalJobs} = $gmtotalcount{notfinished} || 0; $cep->{RunningJobs} = $inlrmsjobstotal{running} || 0; $cep->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $cep->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $cep->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $cep->{PreLRMSWaitingJobs} = $pendingtotal || 0; $cep->{AccessPolicies} = sub { &{$getAccessPolicies}($cep->{ID}) }; $cep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $cep->{ComputingShareID} = [ values %cshaIDs ]; $cep->{ComputingServiceID} = $csvID; return $cep; }; # don't publish if no Endpoint URL $arexceps->{StageinComputingEndpoint} = $getStageinComputingEndpoint if $stageinhostport ne ''; ### ARIS endpoints are now part of the A-REX service. # TODO: change ComputingService code in printers to scan for Endpoints my $getArisLdapNGEndpoint = sub { my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; # Name not necessary -- why? plan was to have it configurable. $ep->{Name} = "ARC CE ARIS LDAP NorduGrid Schema Local Information System"; # Configuration parser does not contain ldap port! # must be updated # port hardcoded for tests $ep->{URL} = $ldapngendpoint; $ep->{ID} = "$ARISepIDp:ldapng:$config->{SlapdPort}"; $ep->{Capability} = $epscapabilities->{'org.nordugrid.ldapng'}; $ep->{Technology} = 'ldap'; $ep->{InterfaceName} = 'org.nordugrid.ldapng'; $ep->{InterfaceVersion} = [ '1.0' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "production"; # How to calculate health for this interface? my %healthissues; if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for aris? # OBS: Is there an allownew option for aris? #if ( $config->{GridftpdAllowNew} == 0 ) { # $ep->{ServingState} = 'draining'; #} else { # $ep->{ServingState} = 'production'; #} $ep->{ServingState} = 'production'; # TODO: StartTime: get it from hed? # TODO: Downtime, is this necessary, and how should it work? # AccessPolicies $ep->{AccessPolicies} = sub { &{$getAccessPolicies}($ep->{ID}) }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ComputingServiceID} = $csvID; return $ep; }; $arexceps->{LDAPNGEndpoint} = $getArisLdapNGEndpoint if $ldapngendpoint ne ''; my $getArisLdapGlue1Endpoint = sub { my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; # Name not necessary -- why? plan was to have it configurable. $ep->{Name} = "ARC CE ARIS LDAP Glue 1.2/1.3 Local Information System"; # Configuration parser does not contain ldap port! # must be updated # port hardcoded for tests $ep->{URL} = $ldapglue1endpoint; $ep->{ID} = "$ARISepIDp:ldapglue1:$config->{SlapdPort}"; $ep->{Capability} = $epscapabilities->{'org.nordugrid.ldapglue1'}; $ep->{Technology} = 'ldap'; $ep->{InterfaceName} = 'org.nordugrid.ldapglue1'; $ep->{InterfaceVersion} = [ '1.0' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "production"; # How to calculate health for this interface? my %healthissues; if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for aris? # OBS: Is there an allownew option for aris? #if ( $config->{GridftpdAllowNew} == 0 ) { # $ep->{ServingState} = 'draining'; #} else { # $ep->{ServingState} = 'production'; #} $ep->{ServingState} = 'production'; # TODO: StartTime: get it from hed? # TODO: Downtime, is this necessary, and how should it work? # AccessPolicies $ep->{AccessPolicies} = sub { &{$getAccessPolicies}($ep->{ID}) }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ComputingServiceID} = $csvID; return $ep; }; $arexceps->{LDAPGLUE1Endpoint} = $getArisLdapGlue1Endpoint if $ldapglue1endpoint ne ''; my $getArisLdapGlue2Endpoint = sub { my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; # Name not necessary -- why? plan was to have it configurable. $ep->{Name} = "ARC CE ARIS LDAP GLUE2 Schema Local Information System"; # Configuration parser does not contain ldap port! # must be updated # port hardcoded for tests $ep->{URL} = $ldapglue2endpoint; $ep->{ID} = "$ARISepIDp:ldapglue2:$config->{SlapdPort}"; $ep->{Capability} = $epscapabilities->{'org.nordugrid.ldapglue2'}; $ep->{Technology} = 'ldap'; $ep->{InterfaceName} = 'org.nordugrid.ldapglue2'; $ep->{InterfaceVersion} = [ '1.0' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "production"; # How to calculate health for this interface? my %healthissues; if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for aris? # OBS: Is there an allownew option for aris? #if ( $config->{GridftpdAllowNew} == 0 ) { # $ep->{ServingState} = 'draining'; #} else { # $ep->{ServingState} = 'production'; #} $ep->{ServingState} = 'production'; # TODO: StartTime: get it from hed? # TODO: Downtime, is this necessary, and how should it work? # AccessPolicies $ep->{AccessPolicies} = sub { &{$getAccessPolicies}($ep->{ID}) }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ComputingServiceID} = $csvID; return $ep; }; $arexceps->{LDAPGLUE2Endpoint} = $getArisLdapGlue2Endpoint if $ldapglue2endpoint ne ''; my $getArisWSRFGlue2Endpoint = sub { my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; # Name not necessary -- why? plan was to have it configurable. $ep->{Name} = "ARC CE ARIS WSRF GLUE2 Local Information System"; # Configuration parser does not contain ldap port! # must be updated # port hardcoded for tests $ep->{URL} = $config->{endpoint}; # TODO: put only the port here $ep->{ID} = "$ARISepIDp:wsrfglue2:$config->{endpoint}"; $ep->{Capability} = ['information.discovery.resource']; $ep->{Technology} = 'webservice'; $ep->{InterfaceName} = 'org.nordugrid.wsrfglue2'; $ep->{InterfaceVersion} = [ '1.0' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; # TODO: put a relevant document here #$ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "production"; # How to calculate health for this interface? # TODO: inherit health infos from arex endpoints my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } # check if WS interface is actually running # done with netstat but I'd like to be smarter # this only works if the effective user is root # TODO: find a better way to do this. Ask A-REX? # changed by request of aleksandr. Only if root is running arex. if ($> == 0) { my $netstat=`netstat -antup`; if ( $? != 0 ) { # push @{$healthissues{ok}}, "Checking if ARC WS interface is running: error in executing netstat. Infosys will assume the service is in ok HealthState"; $log->verbose("Checking if ARC WS interface is running: error in executing netstat. Infosys will assume ARIS WSRFGLUE2 is running properly"); } else { # searches if arched is listed in netstat output # best way would be ask arched if its service is up...? if( $netstat !~ m/arched/ ) { push @{$healthissues{critical}}, "arched A-REX endpoint not found with netstat" ; } } } else { # push @{$healthissues{ok}}, "user ".getpwuid($>)." cannot run netstat -p. Infosys will assume the service is in ok HealthState"; $log->verbose("Checking if ARC WS interface is running: user ".getpwuid($>)." cannot run netstat -p. Infosys will assume ARIS WSRFGLUE2 is is running properly"); } if (%healthissues) { my @infos; for my $level (qw(critical warning other unknown)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } $ep->{IssuerCA} = $host_info->{issuerca}; # scalar $ep->{TrustedCA} = $host_info->{trustedcas}; # array # OBS: Do 'queueing' and 'closed' states apply for aris? # OBS: Is there an allownew option for aris? #if ( $config->{GridftpdAllowNew} == 0 ) { # $ep->{ServingState} = 'draining'; #} else { # $ep->{ServingState} = 'production'; #} $ep->{ServingState} = 'production'; # TODO: StartTime: get it from hed? # TODO: Downtime, is this necessary, and how should it work? # AccessPolicies $ep->{AccessPolicies} = sub { &{$getAccessPolicies}($ep->{ID}) }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ComputingServiceID} = $csvID; return $ep; }; $arexceps->{WSRFGLUE2Endpoint} = $getArisWSRFGlue2Endpoint if ($config->{enable_arc_interface}); # Collect endpoints in the datastructure # return ComputingEndpoints in sequence my $getComputingEndpoints = sub { return undef unless my ($cep, $sub) = each %$arexceps; return &$sub; }; $csv->{ComputingEndpoints} = $getComputingEndpoints; # ComputingShares: multiple shares can share the same LRMS queue my @shares = keys %{$GLUE2shares}; my $getComputingShares = sub { return undef unless my ($share, $dummy) = each %{$GLUE2shares}; # Prepare flattened config hash for this share. my $sconfig = { %{$config->{service}}, %{$GLUE2shares->{$share}} }; # List of all shares submitting to the current queue, including the current share. my $qname = $sconfig->{MappingQueue} || $share; # siblings for the main queue are just itself if ($qname ne $share) { my $siblings = $sconfig->{siblingshares} = []; # Do NOT use 'keys %{$GLUE2shares}' here because it would # reset the iterator of 'each' and cause this function to # always return the same result for my $sn (@shares) { my $s = $GLUE2shares->{$sn}; my $qn = $s->{MappingQueue} || $sn; # main queue should not be among the siblings push @$siblings, $sn if (($qn eq $qname) && ($sn ne $qname)); } } else { my $siblings = $sconfig->{siblingshares} = [$qname]; } # get lrms stats from the actual queues, not share names as they might not match my $qinfo = $lrms_info->{queues}{$qname}; my $csha = {}; $csha->{CreationTime} = $creation_time; $csha->{Validity} = $validity_ttl; $csha->{ID} = $cshaIDs{$share}; $csha->{Name} = $share; $csha->{Description} = $sconfig->{Description} if $sconfig->{Description}; $csha->{MappingQueue} = $qname if $qname; # use limits from LRMS $csha->{MaxCPUTime} = $qinfo->{maxcputime} if defined $qinfo->{maxcputime}; # TODO: implement in backends $csha->{MaxTotalCPUTime} = $qinfo->{maxtotalcputime} if defined $qinfo->{maxtotalcputime}; $csha->{MinCPUTime} = $qinfo->{mincputime} if defined $qinfo->{mincputime}; $csha->{DefaultCPUTime} = $qinfo->{defaultcput} if defined $qinfo->{defaultcput}; $csha->{MaxWallTime} = $qinfo->{maxwalltime} if defined $qinfo->{maxwalltime}; # TODO: MaxMultiSlotWallTime replaces MaxTotalWallTime, but has different meaning. Check that it's used correctly #$csha->{MaxMultiSlotWallTime} = $qinfo->{maxwalltime} if defined $qinfo->{maxwalltime}; $csha->{MinWallTime} = $qinfo->{minwalltime} if defined $qinfo->{minwalltime}; $csha->{DefaultWallTime} = $qinfo->{defaultwallt} if defined $qinfo->{defaultwallt}; my ($maxtotal, $maxlrms) = split ' ', ($config->{maxjobs} || ''); $maxtotal = undef if defined $maxtotal and $maxtotal eq '-1'; $maxlrms = undef if defined $maxlrms and $maxlrms eq '-1'; # MaxWaitingJobs: use the maxjobs config option # OBS: An upper limit is not really enforced by A-REX. # OBS: Currently A-REX only cares about totals, not per share limits! $csha->{MaxTotalJobs} = $maxtotal if defined $maxtotal; # MaxWaitingJobs, MaxRunningJobs: my ($maxrunning, $maxwaiting); # use values from lrms if avaialble if (defined $qinfo->{maxrunning}) { $maxrunning = $qinfo->{maxrunning}; } if (defined $qinfo->{maxqueuable}) { $maxwaiting = $qinfo->{maxqueuable}; } # maxjobs config option sets upper limits if (defined $maxlrms) { $maxrunning = $maxlrms if not defined $maxrunning or $maxrunning > $maxlrms; $maxwaiting = $maxlrms if not defined $maxwaiting or $maxwaiting > $maxlrms; } $csha->{MaxRunningJobs} = $maxrunning if defined $maxrunning; $csha->{MaxWaitingJobs} = $maxwaiting if defined $maxwaiting; # MaxPreLRMSWaitingJobs: use GM's maxjobs option # OBS: Currently A-REX only cares about totals, not per share limits! # OBS: this formula is actually an upper limit on the sum of pre + post # lrms jobs. A-REX does not have separate limit for pre lrms jobs $csha->{MaxPreLRMSWaitingJobs} = $maxtotal - $maxlrms if defined $maxtotal and defined $maxlrms; $csha->{MaxUserRunningJobs} = $qinfo->{maxuserrun} if defined $qinfo->{maxuserrun}; # TODO: eventually new return value from LRMS infocollector # not published if not in arc.conf or returned by infocollectors if ($sconfig->{MaxSlotsPerJob} || $qinfo->{MaxSlotsPerJob}) { $csha->{MaxSlotsPerJob} = $sconfig->{MaxSlotsPerJob} || $qinfo->{MaxSlotsPerJob}; } # MaxStageInStreams, MaxStageOutStreams # OBS: A-REX does not have separate limits for up and downloads. # OBS: A-REX only cares about totals, not per share limits! my ($maxloaders, $maxemergency, $maxthreads) = split ' ', ($config->{maxload} || ''); $maxloaders = undef if defined $maxloaders and $maxloaders eq '-1'; $maxthreads = undef if defined $maxthreads and $maxthreads eq '-1'; if ($maxloaders) { # default is 5 (see MAX_DOWNLOADS defined in a-rex/grid-manager/loaders/downloader.cpp) $maxthreads = 5 unless defined $maxthreads; $csha->{MaxStageInStreams} = $maxloaders * $maxthreads; $csha->{MaxStageOutStreams} = $maxloaders * $maxthreads; } # TODO: new return value schedpolicy from LRMS infocollector. my $schedpolicy = $lrms_info->{schedpolicy} || undef; if ($sconfig->{SchedulingPolicy} and not $schedpolicy) { $schedpolicy = 'fifo' if lc($sconfig->{SchedulingPolicy}) eq 'fifo'; $schedpolicy = 'fairshare' if lc($sconfig->{SchedulingPolicy}) eq 'maui'; } $csha->{SchedulingPolicy} = $schedpolicy if $schedpolicy; # GuaranteedVirtualMemory -- all nodes must be able to provide this # much memory per job. Some nodes might be able to afford more per job # (MaxVirtualMemory) # TODO: implement check at job accept time in a-rex # TODO: implement in LRMS plugin maxvmem and maxrss. $csha->{MaxVirtualMemory} = $sconfig->{MaxVirtualMemory} if $sconfig->{MaxVirtualMemory}; # MaxMainMemory -- usage not being tracked by most LRMSs # OBS: new config option (space measured in GB !?) # OBS: Disk usage of jobs is not being enforced. # This limit should correspond with the max local-scratch disk space on # clusters using local disks to run jobs. # TODO: implement check at job accept time in a-rex # TODO: see if any lrms can support this. Implement check in job wrapper $csha->{MaxDiskSpace} = $sconfig->{DiskSpace} if $sconfig->{DiskSpace}; # DefaultStorageService: # OBS: Should be ExtendedBoolean_t (one of 'true', 'false', 'undefined') $csha->{Preemption} = glue2bool($qinfo->{Preemption}) if defined $qinfo->{Preemption}; # ServingState: closed and queuing are not yet supported # OBS: Is there an allownew option for a-rex? #if ( $config->{GridftpdAllowNew} == 0 ) { # $csha->{ServingState} = 'draining'; #} else { # $csha->{ServingState} = 'production'; #} $csha->{ServingState} = 'production'; # Count local jobs my $localrunning = $qinfo->{running}; my $localqueued = $qinfo->{queued}; my $localsuspended = $qinfo->{suspended} || 0; # TODO: [negative] This should avoid taking as local jobs # also those submitted without any VO # local jobs are per queue and not per share. $localrunning -= $inlrmsjobs{$qname}{running} || 0; if ( $localrunning < 0 ) { $localrunning = 0; } $localqueued -= $inlrmsjobs{$qname}{queued} || 0; if ( $localqueued < 0 ) { $localqueued = 0; } $localsuspended -= $inlrmsjobs{$qname}{suspended} || 0; if ( $localsuspended < 0 ) { $localsuspended = 0; } # OBS: Finished/failed/deleted jobs are not counted my $totaljobs = $gmsharecount{$share}{notfinished} || 0; $totaljobs += $localrunning + $localqueued + $localsuspended; $csha->{TotalJobs} = $totaljobs; $csha->{RunningJobs} = $localrunning + ( $inlrmsjobs{$share}{running} || 0 ); $csha->{WaitingJobs} = $localqueued + ( $inlrmsjobs{$share}{queued} || 0 ); $csha->{SuspendedJobs} = $localsuspended + ( $inlrmsjobs{$share}{suspended} || 0 ); # TODO: backends to count suspended jobs # fix localrunning when displaying the values if negative if ( $localrunning < 0 ) { $localrunning = 0; } $csha->{LocalRunningJobs} = $localrunning; $csha->{LocalWaitingJobs} = $localqueued; $csha->{LocalSuspendedJobs} = $localsuspended; $csha->{StagingJobs} = ( $gmsharecount{$share}{preparing} || 0 ) + ( $gmsharecount{$share}{finishing} || 0 ); $csha->{PreLRMSWaitingJobs} = $gmsharecount{$share}{notsubmitted} || 0; # TODO: investigate if it's possible to get these estimates from maui/torque $csha->{EstimatedAverageWaitingTime} = $qinfo->{averagewaitingtime} if defined $qinfo->{averagewaitingtime}; $csha->{EstimatedWorstWaitingTime} = $qinfo->{worstwaitingtime} if defined $qinfo->{worstwaitingtime}; # TODO: implement $qinfo->{freeslots} in LRMS plugins my $freeslots = 0; if (defined $qinfo->{freeslots}) { $freeslots = $qinfo->{freeslots}; } else { # TODO: to be removed after patch testing. Uncomment to check values # $log->debug("share name: $share, qname: $qname, totalcpus is $qinfo->{totalcpus}, running is $qinfo->{running}, ".Dumper($qinfo)); # TODO: still problems with this one, can be negative! Cpus are not enough. Cores must be counted, or logical cpus $freeslots = $qinfo->{totalcpus} - $qinfo->{running} || 0; } # Local users have individual restrictions # FreeSlots: find the maximum freecpus of any local user mapped in this # share and use that as an upper limit for $freeslots # FreeSlotsWithDuration: for each duration, find the maximum freecpus # of any local user mapped in this share # TODO: is this the correct way to do it? # TODO: currently shows negative numbers, check why my @durations; my %timeslots = max_userfreeslots($qinfo->{users}); if (%timeslots) { # find maximum free slots regardless of duration my $maxuserslots = 0; for my $seconds ( keys %timeslots ) { my $nfree = $timeslots{$seconds}; $maxuserslots = $nfree if $nfree > $maxuserslots; } $freeslots = $maxuserslots < $freeslots ? $maxuserslots : $freeslots; # sort descending by duration, keping 0 first (0 for unlimited) for my $seconds (sort { if ($a == 0) {1} elsif ($b == 0) {-1} else {$b <=> $a} } keys %timeslots) { my $nfree = $timeslots{$seconds} < $freeslots ? $timeslots{$seconds} : $freeslots; unshift @durations, $seconds ? "$nfree:$seconds" : $nfree; } } $freeslots = 0 if $freeslots < 0; # This should be 0 if the queue is full, check the zeroing above? $csha->{FreeSlots} = $freeslots; my $freeslotswithduration = join(" ", @durations); # TODO: [negative] If more slots than the available are overbooked the number is negative # for example fork with parallel multicore, so we set to 0 $freeslotswithduration = 0 if $freeslotswithduration < 0; $csha->{FreeSlotsWithDuration} = $freeslotswithduration; $csha->{UsedSlots} = $inlrmsslots{$share}{running}; $csha->{RequestedSlots} = $requestedslots{$share} || 0; # TODO: detect reservationpolicy in the lrms $csha->{ReservationPolicy} = $qinfo->{reservationpolicy} if $qinfo->{reservationpolicy}; # Florido's Mapping Policies $csha->{MappingPolicies} = sub { &{$getMappingPolicies}($csha->{ID},$csha->{Name})}; # Tag: skip it for now # Associations my $xenvs = $sconfig->{ExecutionEnvironmentName} || []; push @{$csha->{ExecutionEnvironmentID}}, $xenvIDs{$_} for @$xenvs; ## check this association below. Which endpoint? $csha->{ComputingEndpointID} = \@cepIDs; $csha->{ServiceID} = $csvID; $csha->{ComputingServiceID} = $csvID; return $csha; }; $csv->{ComputingShares} = $getComputingShares; # ComputingManager my $getComputingManager = sub { my $cmgr = {}; $cmgr->{CreationTime} = $creation_time; $cmgr->{Validity} = $validity_ttl; $cmgr->{ID} = $cmgrID; my $cluster_info = $lrms_info->{cluster}; # array # Name not needed $cmgr->{ProductName} = $cluster_info->{lrms_glue_type} || lc $cluster_info->{lrms_type}; $cmgr->{ProductVersion} = $cluster_info->{lrms_version}; # $cmgr->{Reservation} = "undefined"; $cmgr->{BulkSubmission} = "false"; #$cmgr->{TotalPhysicalCPUs} = $totalpcpus if $totalpcpus; $cmgr->{TotalLogicalCPUs} = $totallcpus if $totallcpus; # OBS: Assuming 1 slot per CPU # TODO: slots should be cores? $cmgr->{TotalSlots} = $cluster_info->{totalcpus}; # This number can be more than totalslots in case more # than the published cores can be used -- happens with fork my @queuenames = keys %{$lrms_info->{queues}}; my $gridrunningslots = 0; for my $qname (@queuenames) { $gridrunningslots += $inlrmsslots{$qname}{running}; } my $localrunningslots = $cluster_info->{usedcpus} - $gridrunningslots; $cmgr->{SlotsUsedByLocalJobs} = ($localrunningslots < 0) ? 0 : $localrunningslots; $cmgr->{SlotsUsedByGridJobs} = $gridrunningslots; $cmgr->{Homogeneous} = $homogeneous ? "true" : "false"; # NetworkInfo of all ExecutionEnvironments my %netinfo = (); for my $xeconfig (values %{$config->{xenvs}}) { $netinfo{$xeconfig->{NetworkInfo}} = 1 if $xeconfig->{NetworkInfo}; } $cmgr->{NetworkInfo} = [ keys %netinfo ] if %netinfo; # TODO: this could also be cross-checked with info from ExecEnvs my $cpuistribution = $cluster_info->{cpudistribution} || ''; $cpuistribution =~ s/cpu:/:/g; $cmgr->{LogicalCPUDistribution} = $cpuistribution if $cpuistribution; if (defined $host_info->{session_total}) { my $sharedsession = "true"; $sharedsession = "false" if lc($config->{shared_filesystem}) eq "no" or lc($config->{shared_filesystem}) eq "false"; $cmgr->{WorkingAreaShared} = $sharedsession; $cmgr->{WorkingAreaGuaranteed} = "false"; my $gigstotal = ceil($host_info->{session_total} / 1024); my $gigsfree = ceil($host_info->{session_free} / 1024); $cmgr->{WorkingAreaTotal} = $gigstotal; $cmgr->{WorkingAreaFree} = $gigsfree; # OBS: There is no special area for MPI jobs, no need to advertize anything #$cmgr->{WorkingAreaMPIShared} = $sharedsession; #$cmgr->{WorkingAreaMPITotal} = $gigstotal; #$cmgr->{WorkingAreaMPIFree} = $gigsfree; #$cmgr->{WorkingAreaMPILifeTime} = $sessionlifetime; } my ($sessionlifetime) = (split ' ', $config->{control}{'.'}{defaultttl}); $sessionlifetime ||= 7*24*60*60; $cmgr->{WorkingAreaLifeTime} = $sessionlifetime; if (defined $host_info->{cache_total}) { my $gigstotal = ceil($host_info->{cache_total} / 1024); my $gigsfree = ceil($host_info->{cache_free} / 1024); $cmgr->{CacheTotal} = $gigstotal; $cmgr->{CacheFree} = $gigsfree; } if ($config->{service}{Benchmark}) { my @bconfs = @{$config->{service}{Benchmark}}; $cmgr->{Benchmarks} = sub { return undef unless @bconfs; my ($type, $value) = split " ", shift @bconfs; my $bench = {}; $bench->{Type} = $type; $bench->{Value} = $value; $bench->{ID} = "urn:ogf:Benchmark:$hostname:$lrmsname:$type"; return $bench; }; } # Not publishing absolute paths #$cmgr->{TmpDir}; #$cmgr->{ScratchDir}; #$cmgr->{ApplicationDir}; # ExecutionEnvironments my $getExecutionEnvironments = sub { return undef unless my ($xenv, $dummy) = each %{$config->{xenvs}}; my $xeinfo = $xeinfos->{$xenv}; # Prepare flattened config hash for this xenv. my $xeconfig = { %{$config->{service}}, %{$config->{xenvs}{$xenv}} }; my $execenv = {}; my $execenvName = $1 if ( $xenvIDs{$xenv} =~ /(?:.*)\:(.*)$/ ); # $execenv->{Name} = $xenv; $execenv->{Name} = $execenvName; $execenv->{CreationTime} = $creation_time; $execenv->{Validity} = $validity_ttl; $execenv->{ID} = $xenvIDs{$xenv}; my $machine = $xeinfo->{machine}; if ($machine) { $machine =~ s/^x86_64/amd64/; $machine =~ s/^ia64/itanium/; $machine =~ s/^ppc/powerpc/; } my $sysname = $xeinfo->{sysname}; if ($sysname) { $sysname =~ s/^Linux/linux/; $sysname =~ s/^Darwin/macosx/; $sysname =~ s/^SunOS/solaris/; } elsif ($xeconfig->{OpSys}) { $sysname = 'linux' if grep /linux/i, @{$xeconfig->{OpSys}}; } $execenv->{Platform} = $machine ? $machine : 'UNDEFINEDVALUE'; # placeholder value $execenv->{TotalInstances} = $xeinfo->{ntotal} if defined $xeinfo->{ntotal}; $execenv->{UsedInstances} = $xeinfo->{nbusy} if defined $xeinfo->{nbusy}; $execenv->{UnavailableInstances} = $xeinfo->{nunavailable} if defined $xeinfo->{nunavailable}; $execenv->{VirtualMachine} = glue2bool($xeconfig->{VirtualMachine}) if defined $xeconfig->{VirtualMachine}; $execenv->{PhysicalCPUs} = $xeinfo->{pcpus} if $xeinfo->{pcpus}; $execenv->{LogicalCPUs} = $xeinfo->{lcpus} if $xeinfo->{lcpus}; if ($xeinfo->{pcpus} and $xeinfo->{lcpus}) { my $cpum = ($xeinfo->{pcpus} > 1) ? 'multicpu' : 'singlecpu'; my $corem = ($xeinfo->{lcpus} > $xeinfo->{pcpus}) ? 'multicore' : 'singlecore'; $execenv->{CPUMultiplicity} = "$cpum-$corem"; } $execenv->{CPUVendor} = $xeconfig->{CPUVendor} if $xeconfig->{CPUVendor}; $execenv->{CPUModel} = $xeconfig->{CPUModel} if $xeconfig->{CPUModel}; $execenv->{CPUVersion} = $xeconfig->{CPUVersion} if $xeconfig->{CPUVersion}; $execenv->{CPUClockSpeed} = $xeconfig->{CPUClockSpeed} if $xeconfig->{CPUClockSpeed}; $execenv->{CPUTimeScalingFactor} = $xeconfig->{CPUTimeScalingFactor} if $xeconfig->{CPUTimeScalingFactor}; $execenv->{WallTimeScalingFactor} = $xeconfig->{WallTimeScalingFactor} if $xeconfig->{WallTimeScalingFactor}; $execenv->{MainMemorySize} = $xeinfo->{pmem} || "999999999"; # placeholder value $execenv->{VirtualMemorySize} = $xeinfo->{vmem} if $xeinfo->{vmem}; $execenv->{OSFamily} = $sysname || 'UNDEFINEDVALUE'; # placeholder value $execenv->{OSName} = $xeconfig->{OSName} if $xeconfig->{OSName}; $execenv->{OSVersion} = $xeconfig->{OSVersion} if $xeconfig->{OSVersion}; # if Connectivity* not specified, assume false. # this has been change due to this value to be mandatory in the LDAP schema. $execenv->{ConnectivityIn} = glue2bool($xeconfig->{ConnectivityIn}) || 'FALSE'; # placeholder value $execenv->{ConnectivityOut} = glue2bool($xeconfig->{ConnectivityOut}) || 'FALSE'; # placeholder value $execenv->{NetworkInfo} = [ $xeconfig->{NetworkInfo} ] if $xeconfig->{NetworkInfo}; if ($callcount == 1) { $log->warning("MainMemorySize not set for ExecutionEnvironment $xenv") unless $xeinfo->{pmem}; $log->warning("OSFamily not set for ExecutionEnvironment $xenv") unless $sysname; $log->warning("ConnectivityIn not set for ExecutionEnvironment $xenv") unless defined $xeconfig->{ConnectivityIn}; $log->warning("ConnectivityOut not set for ExecutionEnvironment $xenv") unless defined $xeconfig->{ConnectivityOut}; my @missing; for (qw(Platform CPUVendor CPUModel CPUClockSpeed OSFamily OSName OSVersion)) { push @missing, $_ unless defined $execenv->{$_}; } $log->info("Missing attributes for ExecutionEnvironment $xenv: ".join ", ", @missing) if @missing; } if ($xeconfig->{Benchmark}) { my @bconfs = @{$xeconfig->{Benchmark}}; $execenv->{Benchmarks} = sub { return undef unless @bconfs; my ($type, $value) = split " ", shift @bconfs; my $bench = {}; $bench->{Type} = $type; $bench->{Value} = $value; $bench->{ID} = "urn:ogf:Benchmark:$hostname:$execenvName:$type"; return $bench; }; } # Associations for my $share (keys %{$GLUE2shares}) { my $sconfig = $GLUE2shares->{$share}; next unless $sconfig->{ExecutionEnvironmentName}; next unless grep { $xenv eq $_ } @{$sconfig->{ExecutionEnvironmentName}}; push @{$execenv->{ComputingShareID}}, $cshaIDs{$share}; } $execenv->{ManagerID} = $cmgrID; $execenv->{ComputingManagerID} = $cmgrID; return $execenv; }; $cmgr->{ExecutionEnvironments} = $getExecutionEnvironments; # ApplicationEnvironments my $getApplicationEnvironments = sub { return undef unless my ($rte, $rinfo) = each %$rte_info; my $appenv = {}; # name and version is separated at the first dash (-) which is followed by a digit my ($name,$version) = ($rte, undef); ($name,$version) = ($1, $2) if $rte =~ m{^(.*?)-([0-9].*)$}; $appenv->{AppName} = $name; $appenv->{AppVersion} = $version if defined $version; $appenv->{ID} = $aenvIDs{$rte}; $appenv->{State} = $rinfo->{state} if $rinfo->{state}; $appenv->{Description} = $rinfo->{description} if $rinfo->{description}; #$appenv->{ParallelSupport} = 'none'; # Associations $appenv->{ComputingManagerID} = $cmgrID; return $appenv; }; $cmgr->{ApplicationEnvironments} = $getApplicationEnvironments; # Associations $cmgr->{ServiceID} = $csvID; $cmgr->{ComputingServiceID} = $csvID; return $cmgr; }; $csv->{ComputingManager} = $getComputingManager; # Location and Contacts if (my $lconfig = $config->{location}) { my $count = 1; $csv->{Location} = sub { return undef if $count-- == 0; my $loc = {}; $loc->{ID} = "urn:ogf:Location:$hostname:ComputingService:arex"; for (qw(Name Address Place PostCode Country Latitude Longitude)) { $loc->{$_} = $lconfig->{$_} if defined $lconfig->{$_}; } $loc->{ServiceForeignKey} = $csvID; return $loc; } } if (my $cconfs = $config->{contacts}) { my $i = 0; $csv->{Contacts} = sub { return undef unless $i < scalar(@$cconfs); my $cconfig = $cconfs->[$i]; #my $detail = $cconfig->{Detail}; my $cont = {}; $cont->{ID} = "urn:ogf:Contact:$hostname:ComputingService:arex:con$i"; for (qw(Name Detail Type)) { $cont->{$_} = $cconfig->{$_} if $cconfig->{$_}; } $cont->{ServiceForeignKey} = $csvID; $i++; return $cont; }; } # Associations $csv->{AdminDomainID} = $adID; $csv->{ServiceID} = $csvID; return $csv; }; my $getAdminDomain = sub { my $dom = { ID => $adID, Name => $config->{admindomain}{Name}, OtherInfo => $config->{admindomain}{OtherInfo}, Description => $config->{admindomain}{Description}, WWW => $config->{admindomain}{WWW}, Owner => $config->{admindomain}{Owner}, CreationTime => $creation_time, Validity => $validity_ttl }; $dom->{Distributed} = glue2bool($config->{admindomain}{Distributed}); # TODO: Location and Contact for AdminDomain goes here. # Contacts can be multiple, don't know how to handle this # in configfile. # TODO: remember to sync ForeignKeys # Disabled for now, as it would only cause trouble. # if (my $lconfig = $config->{location}) { # my $count = 1; # $dom->{Location} = sub { # return undef if $count-- == 0; # my $loc = {}; # $loc->{ID} = "urn:ogf:Location:$hostname:AdminDomain:$admindomain"; # for (qw(Name Address Place PostCode Country Latitude Longitude)) { # $loc->{$_} = $lconfig->{$_} if defined $lconfig->{$_}; # } # return $loc; # } # } # if (my $cconfs = $config->{contacts}) { # my $i = 0; # $dom->{Contacts} = sub { # return undef unless $i < scalar(@$cconfs); # my $cconfig = $cconfs->[$i++]; # #my $detail = $cconfig->{Detail}; # my $cont = {}; # $cont->{ID} = "urn:ogf:Contact:$hostname:AdminDomain:$admindomain:$i"; # for (qw(Name Detail Type)) { # $cont->{$_} = $cconfig->{$_} if $cconfig->{$_}; # } # return $cont; # }; # } return $dom; }; # Other Services my $othersv = {}; # ARIS service has been removed from the rendering. # other services that follow might end up to be endpoints #$othersv->{ARIS} = $getARISService; # Service:Cache-Index my $getCacheIndexService = sub { my $sv = {}; $sv->{CreationTime} = $creation_time; $sv->{Validity} = $validity_ttl; $sv->{ID} = $CacheIndexsvID; $sv->{Name} = "$config->{service}{ClusterName}:Service:Cache-Index" if $config->{service}{ClusterName}; # scalar $sv->{OtherInfo} = $config->{service}{OtherInfo} if $config->{service}{OtherInfo}; # array $sv->{Capability} = [ 'information.discovery' ]; $sv->{Type} = 'org.nordugrid.information.cache-index'; # OBS: QualityLevel reflects the quality of the sotware # One of: development, testing, pre-production, production $sv->{QualityLevel} = 'testing'; $sv->{StatusInfo} = $config->{service}{StatusInfo} if $config->{service}{StatusInfo}; # array $sv->{Complexity} = "endpoint=1,share=0,resource=0"; #EndPoint here my $getCacheIndexEndpoint = sub { return undef; my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; # Name not necessary -- why? added back $ep->{Name} = "ARC Cache Index"; # Configuration parser does not contain ldap port! # must be updated # port hardcoded for tests # $ep->{URL} = "ldap://$hostname:$config->{SlapdPort}/"; # $ep->{ID} = $CacheIndexepIDp.":".$ep->{URL}; $ep->{Capability} = [ 'information.discovery' ]; $ep->{Technology} = 'webservice'; $ep->{InterfaceName} = 'Cache-Index'; $ep->{InterfaceVersion} = [ '1.0' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "testing"; # How to calculate health for this interface? my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for a-rex? # OBS: Is there an allownew option for a-rex? #if ( $config->{GridftpdAllowNew} == 0 ) { # $ep->{ServingState} = 'draining'; #} else { # $ep->{ServingState} = 'production'; #} $ep->{ServingState} = 'production'; # StartTime: get it from hed $ep->{IssuerCA} = $host_info->{issuerca}; # scalar $ep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? if ($config->{accesspolicies}) { my @apconfs = @{$config->{accesspolicies}}; $ep->{AccessPolicies} = sub { return undef unless @apconfs; my $apconf = pop @apconfs; my $apol = {}; $apol->{ID} = "$apolIDp:".join(",", @{$apconf->{Rule}}); $apol->{Scheme} = "basic"; $apol->{Rule} = $apconf->{Rule}; $apol->{UserDomainID} = $apconf->{UserDomainID}; $apol->{EndpointID} = $ep->{ID}; return $apol; }; } $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ServiceID} = $CacheIndexsvID; return $ep; }; $sv->{Endpoint} = $getCacheIndexEndpoint; # Associations $sv->{AdminDomainID} = $adID; return $sv; }; # Disabled until I find a way to know if it's configured or not. # $othersv->{CacheIndex} = $getCacheIndexService; # Service: HED-Control / TODO: should be changed to HEDSHOT my $getHEDControlService = sub { my $sv = {}; $sv->{CreationTime} = $creation_time; $sv->{Validity} = $validity_ttl; $sv->{ID} = $HEDControlsvID; $sv->{Name} = "$config->{service}{ClusterName}:Service:HED-CONTROL" if $config->{service}{ClusterName}; # scalar $sv->{OtherInfo} = $config->{service}{OtherInfo} if $config->{service}{OtherInfo}; # array $sv->{Capability} = [ 'information.discovery' ]; $sv->{Type} = 'org.nordugrid.information.cache-index'; # OBS: QualityLevel reflects the quality of the sotware # One of: development, testing, pre-production, production $sv->{QualityLevel} = 'pre-production'; $sv->{StatusInfo} = $config->{service}{StatusInfo} if $config->{service}{StatusInfo}; # array $sv->{Complexity} = "endpoint=1,share=0,resource=0"; #EndPoint here my $getHEDControlEndpoint = sub { #return undef unless ( -e $config->{bdii_update_pid_file}); my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; # Name not necessary -- why? added back $ep->{Name} = "ARC HED WS Control Interface"; # Configuration parser does not contain ldap port! # must be updated # port hardcoded for tests $ep->{URL} = "$arexhostport/mgmt"; $ep->{ID} = $HEDControlepIDp.":".$ep->{URL}; $ep->{Capability} = [ 'containermanagement.control' ]; $ep->{Technology} = 'webservice'; $ep->{InterfaceName} = 'HED-CONTROL'; $ep->{InterfaceVersion} = [ '1.0' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $ep->{Semantics} = [ "http://www.nordugrid.org/documents/" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "pre-production"; # How to calculate health for this interface? my %healthissues; if ($config->{x509_user_cert} and $config->{x509_cert_dir}) { if ( $host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif ($host_info->{hostcert_enddate} - time < 48*3600 or $host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } # OBS: Do 'queueing' and 'closed' states apply for a-rex? # OBS: Is there an allownew option for a-rex? #if ( $config->{GridftpdAllowNew} == 0 ) { # $ep->{ServingState} = 'draining'; #} else { # $ep->{ServingState} = 'production'; #} $ep->{ServingState} = 'production'; # StartTime: get it from hed $ep->{IssuerCA} = $host_info->{issuerca}; # scalar $ep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? if ($config->{accesspolicies}) { my @apconfs = @{$config->{accesspolicies}}; $ep->{AccessPolicies} = sub { return undef unless @apconfs; my $apconf = pop @apconfs; my $apol = {}; $apol->{ID} = "$apolIDp:".join(",", @{$apconf->{Rule}}); $apol->{Scheme} = "basic"; $apol->{Rule} = $apconf->{Rule}; $apol->{UserDomainID} = $apconf->{UserDomainID}; $apol->{EndpointID} = $ep->{ID}; return $apol; }; }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ServiceID} = $HEDControlsvID; return $ep; }; $sv->{Endpoint} = $getHEDControlEndpoint; # Associations $sv->{AdminDomainID} = $adID; return $sv; }; # Disabled until I find a way to know if it's configured or not. # $othersv->{HEDControl} = $getHEDControlService); # aggregates services my $getServices = sub { return undef unless my ($service, $sub) = each %$othersv; # returns the hash for Entries. Odd, must understand this behaviour return &$sub; }; # TODO: UserDomain my $getUserDomain = sub { my $ud = {}; $ud->{CreationTime} = $creation_time; $ud->{Validity} = $validity_ttl; $ud->{ID} = $udID; $ud->{Name} = ""; $ud->{OtherInfo} = $config->{service}{OtherInfo} if $config->{service}{OtherInfo}; # array $ud->{Description} = ''; # Number of hops to reach the root $ud->{Level} = 0; # Endpoint of some service, such as VOMS server $ud->{UserManager} = 'http://voms.nordugrid.org'; # List of users $ud->{Member} = [ 'users here' ]; # TODO: Calculate Policies, ContactID and LocationID # Associations $ud->{UserDomainID} = $udID; return $ud; }; # TODO: ToStorageElement my $getToStorageElement = sub { my $tse = {}; $tse->{CreationTime} = $creation_time; $tse->{Validity} = $validity_ttl; $tse->{ID} = $tseID; $tse->{Name} = ""; $tse->{OtherInfo} = ''; # array # Local path on the machine to access storage, for example a NFS share $tse->{LocalPath} = 'String'; # Remote path in the Storage Service associated with the local path above $tse->{RemotePath} = 'String'; # Associations $tse->{ComputingService} = $csvID; $tse->{StorageService} = ''; }; # returns the two branches for =grid and =services GroupName. # It's not optimal but it doesn't break recursion my $GLUE2InfoTreeRoot = sub { my $treeroot = { AdminDomain => $getAdminDomain, UserDomain => $getUserDomain, ComputingService => $getComputingService, Services => $getServices }; return $treeroot; }; return $GLUE2InfoTreeRoot; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/ARC0ClusterInfo.pm0000644000000000000000000000012413153453707026725 xustar000000000000000027 mtime=1504597959.039947 27 atime=1513200575.844718 30 ctime=1513200663.150786021 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/ARC0ClusterInfo.pm0000644000175000002070000005754213153453707027007 0ustar00mockbuildmock00000000000000package ARC0ClusterInfo; # This information collector combines the output of the other information collectors # and prepares info modelled on the classic Nordugrid information schema (arc0). use POSIX; use Storable; use strict; use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); sub mds_date { my $seconds = shift; return strftime("%Y%m%d%H%M%SZ", gmtime($seconds)); } ############################################################################ # Combine info from all sources to prepare the final representation ############################################################################ sub collect($) { my ($data) = @_; my $config = $data->{config}; my $usermap = $data->{usermap}; my $host_info = $data->{host_info}; my $rte_info = $data->{rte_info}; my $gmjobs_info = $data->{gmjobs_info}; my $lrms_info = $data->{lrms_info}; my $nojobs = $data->{nojobs}; my @allxenvs = keys %{$config->{xenvs}}; my @allshares = keys %{$config->{shares}}; # homogeneity of the cluster my $homogeneous; if (defined $config->{service}{Homogeneous}) { $homogeneous = $config->{service}{Homogeneous}; } else { # not homogeneous if there are multiple ExecEnvs $homogeneous = @allxenvs > 1 ? 0 : 1; # not homogeneous if one ExecEnv is not homogeneous for my $xeconfig (values %{$config->{xenvs}}) { $homogeneous = 0 if defined $xeconfig->{Homogeneous} and not $xeconfig->{Homogeneous}; } } # config overrides my $hostname = $config->{hostname} || $host_info->{hostname}; # count grid-manager jobs my %gmjobcount = (totaljobs => 0, accepted => 0, preparing => 0, submit => 0, inlrms => 0, canceling => 0, finishing => 0, finished => 0, deleted => 0); for my $job (values %{$gmjobs_info}) { $gmjobcount{totaljobs}++; if ( $job->{status} =~ /ACCEPTED/ ) { $gmjobcount{accepted}++ ; next; } if ( $job->{status} =~ /PREPARING/) { $gmjobcount{preparing}++; next; } if ( $job->{status} =~ /SUBMIT/ ) { $gmjobcount{submit}++ ; next; } if ( $job->{status} =~ /INLRMS/ ) { $gmjobcount{inlrms}++ ; next; } if ( $job->{status} =~ /CANCELING/) { $gmjobcount{canceling}++; next; } if ( $job->{status} =~ /FINISHING/) { $gmjobcount{finishing}++; next; } if ( $job->{status} =~ /FINISHED/ ) { $gmjobcount{finished}++ ; next; } if ( $job->{status} =~ /FAILED/ ) { $gmjobcount{finished}++ ; next; } if ( $job->{status} =~ /KILLED/ ) { $gmjobcount{finished}++ ; next; } if ( $job->{status} =~ /DELETED/ ) { $gmjobcount{deleted}++ ; next; } $log->warning("Unexpected job status: $job->{status}"); } # count grid jobs running and queued in LRMS for each queue my %gridrunning; my %gridqueued; for my $jobid (keys %{$gmjobs_info}) { my $job = $gmjobs_info->{$jobid}; my $share = $job->{share}; if ($job->{status} eq 'INLRMS') { my $lrmsid = $job->{localid}; unless (defined $lrmsid) { $log->warning("localid missing for INLRMS job $jobid"); next; } my $lrmsjob = $lrms_info->{jobs}{$lrmsid}; unless ((defined $lrmsjob) and $lrmsjob->{status}) { $log->warning("LRMS plugin returned no status for job $jobid (lrmsid: $lrmsid)"); next; } if ((defined $lrmsjob) and $lrmsjob->{status} ne 'EXECUTED') { if ($lrmsjob->{status} eq 'R' or $lrmsjob->{status} eq 'S') { $gridrunning{$share} += $lrmsjob->{cpus}; } else { $gridqueued{$share}++; } } } } my %prelrmsqueued; my %pendingprelrms; my %gm_queued; my @gmqueued_states = ("ACCEPTED","PENDING:ACCEPTED","PREPARING","PENDING:PREPARING","SUBMIT"); my @gmpendingprelrms_states =("PENDING:ACCEPTED","PENDING:PREPARING" ); for my $job_gridowner (keys %$usermap) { $gm_queued{$job_gridowner} = 0; } for my $ID (keys %{$gmjobs_info}) { my $share = $gmjobs_info->{$ID}{share}; # set the job_gridowner of the job (read from the job.id.local) # which is used as the key of the %gm_queued my $job_gridowner = $gmjobs_info->{$ID}{subject}; # count the gm_queued jobs per grid users (SNs) and the total if ( grep /^$gmjobs_info->{$ID}{status}$/, @gmqueued_states ) { $gm_queued{$job_gridowner}++; $prelrmsqueued{$share}++; } # count the GM PRE-LRMS pending jobs if ( grep /^$gmjobs_info->{$ID}{status}$/, @gmpendingprelrms_states ) { $pendingprelrms{$share}++; } } # Grid Manager job state mappings to Infosys job states my %map_always = ( 'ACCEPTED' => 'ACCEPTING', 'PENDING:ACCEPTED' => 'ACCEPTED', 'PENDING:PREPARING' => 'PREPARED', 'PENDING:INLRMS' => 'EXECUTED', 'CANCELING' => 'KILLING'); my %map_if_gm_up = ( 'SUBMIT' => 'SUBMITTING'); my %map_if_gm_down = ( 'PREPARING' => 'ACCEPTED', 'FINISHING' => 'EXECUTED', 'SUBMIT' => 'PREPARED'); # Infosys is run by A-REX: Always assume GM is up $host_info->{processes}{'grid-manager'} = 1; for my $job (values %$gmjobs_info) { $job->{status} = $map_always{$job->{status}} if grep { $job->{status} eq $_ } keys %map_always; if ($host_info->{processes}{'grid-manager'}) { $job->{status} = $map_if_gm_up{$job->{status}} if grep { $job->{status} eq $_ } keys %map_if_gm_up; } else { $job->{status} = $map_if_gm_down{$job->{status}} if grep { $job->{status} eq $_ } keys %map_if_gm_down; } } my @supportmails; if ($config->{contacts}) { for (@{$config->{contacts}}) { push @supportmails, $1 if $_->{Detail} =~ m/^mailto:(.*)/; } } my @authorizedvos = (); if ($config->{service}{AuthorizedVO}) { @authorizedvos = @{$config->{service}{AuthorizedVO}}; # add VO: suffix to each authorized VO @authorizedvos = map { "VO:".$_ } @authorizedvos; } # Assume no connectivity unles explicitly configured otherwise on each # ExecutionEnvironment my ($inbound, $outbound) = (1,1); for my $xeconfig (values %{$config->{xenvs}}) { $inbound = 0 unless ($xeconfig->{connectivityIn} || 'false') eq 'true'; $outbound = 0 unless ($xeconfig->{connectivityOut} || 'false') eq 'true'; } $inbound = 1 if ($config->{service}{connectivityIn} || 'false') eq 'true'; $outbound = 1 if ($config->{service}{connectivityOut} || 'false') eq 'true'; # the earliest of hostcert and cacert enddates. my $credenddate; if ($host_info->{issuerca_enddate} and $host_info->{hostcert_enddate}) { $credenddate = ( $host_info->{hostcert_enddate} lt $host_info->{issuerca_enddate} ) ? $host_info->{hostcert_enddate} : $host_info->{issuerca_enddate}; } my $callcount = 0; # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # build information tree # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $getCluster = sub { $callcount++; my $c = {}; $c->{name} = $hostname; $c->{aliasname} = $config->{service}{ClusterAlias} if $config->{service}{ClusterAlias}; $c->{comment} = $config->{service}{ClusterComment} if $config->{service}{ClusterComment}; # added to help client to match GLUE2 services on the same machine $c->{comment} = $c->{comment} ? $c->{comment}."; GLUE2ServiceID=urn:ogf:ComputingService:$hostname:arex" : "GLUE2ServiceID=urn:ogf:ComputingService:$hostname:arex"; # GLUE2ComputingService ID $c->{owner} = $config->{service}{ClusterOwner} if $config->{service}{ClusterOwner}; $c->{acl} = [ @authorizedvos ] if @authorizedvos; $c->{location} = $config->{location}{PostCode} if $config->{location}{PostCode}; $c->{issuerca} = $host_info->{issuerca} if $host_info->{issuerca}; $c->{'issuerca-hash'} = $host_info->{issuerca_hash} if $host_info->{issuerca_hash}; $c->{credentialexpirationtime} = mds_date($credenddate) if $credenddate; $c->{trustedca} = $host_info->{trustedcas} if $host_info->{trustedcas}; $c->{contactstring} = "gsiftp://$hostname:".$config->{GridftpdPort}.$config->{GridftpdMountPoint}; $c->{'interactive-contactstring'} = $config->{service}{InteractiveContactstring} if $config->{service}{InteractiveContactstring}; $c->{support} = [ @supportmails ] if @supportmails; $c->{'lrms-type'} = $lrms_info->{cluster}{lrms_type}; $c->{'lrms-version'} = $lrms_info->{cluster}{lrms_version} if $lrms_info->{cluster}{lrms_version}; $c->{'lrms-config'} = $config->{service}{lrmsconfig} if $config->{service}{lrmsconfig}; # orphan $c->{architecture} = $config->{service}{Platform} if $config->{service}{Platform}; push @{$c->{opsys}}, @{$config->{service}{OpSys}} if $config->{service}{OpSys}; push @{$c->{opsys}}, $config->{service}{OSName}.'-'.$config->{service}{OSVersion} if $config->{service}{OSName} and $config->{service}{OSVersion}; $c->{benchmark} = [ map {join ' @ ', split /\s+/,$_,2 } @{$config->{service}{Benchmark}} ] if $config->{service}{Benchmark}; $c->{nodecpu} = $config->{service}{CPUModel}." @ ".$config->{service}{CPUClockSpeed}." MHz" if $config->{service}{CPUModel} and $config->{service}{CPUClockSpeed}; $c->{homogeneity} = $homogeneous ? 'TRUE' : 'FALSE'; $c->{nodememory} = $config->{service}{MaxVirtualMemory} if ( $homogeneous && $config->{service}{MaxVirtualMemory} ); $c->{nodeaccess} = 'inbound' if $inbound; $c->{nodeaccess} = 'outbound' if $outbound; if ($config->{service}{totalcpus}) { $c->{totalcpus} = $config->{service}{totalcpus}; } else { $c->{totalcpus} = $lrms_info->{cluster}{totalcpus}; } $c->{usedcpus} = $lrms_info->{cluster}{usedcpus}; $c->{cpudistribution} = $lrms_info->{cluster}{cpudistribution}; $c->{prelrmsqueued} = ($gmjobcount{accepted} + $gmjobcount{preparing} + $gmjobcount{submit}); $c->{totaljobs} = ($gmjobcount{totaljobs} - $gmjobcount{finishing} - $gmjobcount{finished} - $gmjobcount{deleted} + $lrms_info->{cluster}{queuedcpus} + $lrms_info->{cluster}{usedcpus} - $gmjobcount{inlrms}); $c->{localse} = $config->{service}{LocalSE} if $config->{service}{LocalSE}; $c->{'sessiondir-free'} = $host_info->{session_free}; $c->{'sessiondir-total'} = $host_info->{session_total}; if ($config->{control}{'.'}{defaultttl}) { my ($sessionlifetime) = split ' ', $config->{control}{'.'}{defaultttl}; $c->{'sessiondir-lifetime'} = int $sessionlifetime/60 if $sessionlifetime; } $c->{'cache-free'} = $host_info->{cache_free}; $c->{'cache-total'} = $host_info->{cache_total}; $c->{runtimeenvironment} = [ sort keys %$rte_info ]; push @{$c->{middleware}}, "nordugrid-arc-".$config->{arcversion}; push @{$c->{middleware}}, "globus-$host_info->{globusversion}" if $host_info->{globusversion}; push @{$c->{middleware}}, @{$config->{service}{Middleware}} if $config->{service}{Middleware}; # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $getQueues = sub { return undef unless my ($share, $dummy) = each %{$config->{shares}}; my $q = {}; my $qinfo = $lrms_info->{queues}{$share}; # merge cluster wide and queue-specific options my $sconfig = { %{$config->{service}}, %{$config->{shares}{$share}} }; $sconfig->{ExecutionEnvironmentName} ||= []; my @nxenvs = @{$sconfig->{ExecutionEnvironmentName}}; if (@nxenvs) { my $xeconfig = $config->{xenvs}{$nxenvs[0]}; $log->info("The Nordugrid InfoSchema is not compatible with multiple ExecutionEnvironments per share") if @nxenvs > 1; $sconfig = { %$sconfig, %$xeconfig }; } $q->{'name'} = $share; if ( defined $config->{GridftpdAllowNew} and $config->{GridftpdAllowNew} == 0 ) { $q->{status} = 'inactive, grid-manager does not accept new jobs'; } elsif ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { $q->{status} = 'degraded, one or more grid-managers are down'; } else { $q->{status} = $config->{remotegmdirs} ? 'inactive, all grid managers are down' : 'inactive, grid-manager is down'; } } elsif (not $host_info->{processes}{'gridftpd'}) { $q->{status} = 'inactive, gridftpd is down'; } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { $q->{status} = 'inactive, host credentials missing'; } elsif ($host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { $q->{status} = 'inactive, host credentials expired'; } elsif ( $qinfo->{status} < 0 ) { $q->{status} = 'inactive, LRMS interface returns negative status'; } else { $q->{status} = 'active'; } $q->{comment}=$sconfig->{Description} if $sconfig->{Description}; if ( defined $sconfig->{OtherInfo}) { my @sotherinfo = @{ $sconfig->{OtherInfo} }; $q->{comment} = "$q->{comment}, OtherInfo: @sotherinfo"; } $q->{schedulingpolicy} = $sconfig->{SchedulingPolicy} if $sconfig->{SchedulingPolicy}; if (defined $sconfig->{Homogeneous}) { $q->{homogeneity} = $sconfig->{Homogeneous} ? 'TRUE' : 'FALSE'; } else { $q->{homogeneity} = @nxenvs > 1 ? 'FALSE' : 'TRUE'; } $q->{nodecpu} = $sconfig->{CPUModel}." @ ".$sconfig->{CPUClockSpeed}." MHz" if $sconfig->{CPUModel} and $sconfig->{CPUClockSpeed}; $q->{nodememory} = $sconfig->{MaxVirtualMemory} if $sconfig->{MaxVirtualMemory}; $q->{architecture} = $sconfig->{Platform} if $sconfig->{Platform}; push @{$q->{opsys}}, $sconfig->{OSName}.'-'.$sconfig->{OSVersion} if $sconfig->{OSName} and $sconfig->{OSVersion}; push @{$q->{opsys}}, @{$sconfig->{OpSys}} if $sconfig->{OpSys}; $q->{benchmark} = [ map {join ' @ ', split /\s+/,$_,2 } @{$sconfig->{Benchmark}} ] if $sconfig->{Benchmark}; $q->{maxrunning} = $qinfo->{maxrunning} if defined $qinfo->{maxrunning}; $q->{maxqueuable}= $qinfo->{maxqueuable}if defined $qinfo->{maxqueuable}; $q->{maxuserrun} = $qinfo->{maxuserrun} if defined $qinfo->{maxuserrun}; $q->{maxcputime} = int $qinfo->{maxcputime}/60 if defined $qinfo->{maxcputime}; $q->{mincputime} = int $qinfo->{mincputime}/60 if defined $qinfo->{mincputime}; $q->{defaultcputime} = int $qinfo->{defaultcput}/60 if defined $qinfo->{defaultcput}; $q->{maxwalltime} = int $qinfo->{maxwalltime}/60 if defined $qinfo->{maxwalltime}; $q->{minwalltime} = int $qinfo->{minwalltime}/60 if defined $qinfo->{minwalltime}; $q->{defaultwalltime} = int $qinfo->{defaultwallt}/60 if defined $qinfo->{defaultwallt}; $q->{running} = $qinfo->{running} if defined $qinfo->{running}; $q->{gridrunning} = $gridrunning{$share} || 0; $q->{gridqueued} = $gridqueued{$share} || 0; $q->{localqueued} = ($qinfo->{queued} - ( $gridqueued{$share} || 0 )); if ( $q->{localqueued} < 0 ) { $q->{localqueued} = 0; } $q->{prelrmsqueued} = $prelrmsqueued{$share} || 0; if ( $sconfig->{totalcpus} ) { $q->{totalcpus} = $sconfig->{totalcpus}; # orphan } elsif ( $qinfo->{totalcpus} ) { $q->{totalcpus} = $qinfo->{totalcpus}; } keys %$gmjobs_info; # reset iterator of each() # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $getJobs = sub { # find the next job that belongs to the current share my ($jobid, $gmjob); while (1) { return undef unless ($jobid, $gmjob) = each %$gmjobs_info; last if $gmjob->{share} eq $share; } my $j = {}; $j->{name} = $jobid; $j->{globalid} = $c->{contactstring}."/$jobid"; $j->{globalowner} = $gmjob->{subject} if $gmjob->{subject}; $j->{jobname} = $gmjob->{jobname} if $gmjob->{jobname}; $j->{submissiontime} = $gmjob->{starttime} if $gmjob->{starttime}; $j->{execcluster} = $hostname if $hostname; $j->{execqueue} = [ $share ] if $share; $j->{cpucount} = [ $gmjob->{count} || 1 ]; $j->{sessiondirerasetime} = [ $gmjob->{cleanuptime} ] if $gmjob->{cleanuptime}; $j->{stdin} = [ $gmjob->{stdin} ] if $gmjob->{stdin}; $j->{stdout} = [ $gmjob->{stdout} ] if $gmjob->{stdout}; $j->{stderr} = [ $gmjob->{stderr} ] if $gmjob->{stderr}; $j->{gmlog} = [ $gmjob->{gmlog} ] if $gmjob->{gmlog}; $j->{runtimeenvironment} = $gmjob->{runtimeenvironments} if $gmjob->{runtimeenvironments}; $j->{submissionui} = $gmjob->{clientname} if $gmjob->{clientname}; $j->{clientsoftware} = $gmjob->{clientsoftware} if $gmjob->{clientsoftware}; $j->{proxyexpirationtime} = $gmjob->{delegexpiretime} if $gmjob->{delegexpiretime}; $j->{rerunable} = $gmjob->{failedstate} ? $gmjob->{failedstate} : 'none' if $gmjob->{status} eq "FAILED"; $j->{comment} = [ $gmjob->{comment} ] if $gmjob->{comment}; # added to record which was the submission interface if ( $gmjob->{interface} ) { my $submittedstring = 'SubmittedVia='.$gmjob->{interface}; push(@{$j->{comment}}, $submittedstring); }; $j->{reqcputime} = int $gmjob->{reqcputime}/60 if $gmjob->{reqcputime}; $j->{reqwalltime} = int $gmjob->{reqwalltime}/60 if $gmjob->{reqwalltime}; if ($gmjob->{status} eq "INLRMS") { my $localid = $gmjob->{localid} or $log->warning("No local id for job $jobid") and next; my $lrmsjob = $lrms_info->{jobs}{$localid} or $log->warning("No local job for $jobid") and next; $j->{usedmem} = $lrmsjob->{mem} if defined $lrmsjob->{mem}; $j->{usedwalltime}= int $lrmsjob->{walltime}/60 if defined $lrmsjob->{walltime}; $j->{usedcputime} = int $lrmsjob->{cputime}/60 if defined $lrmsjob->{cputime}; $j->{reqwalltime} = int $lrmsjob->{reqwalltime}/60 if defined $lrmsjob->{reqwalltime}; $j->{reqcputime} = int $lrmsjob->{reqcputime}/60 if defined $lrmsjob->{reqcputime}; $j->{executionnodes} = $lrmsjob->{nodes} if $lrmsjob->{nodes}; if ($lrms_info->{cluster}{lrms_type} eq "boinc") { # BOINC allocates a dynamic number of cores to jobs so set here what is actually used # This is abusing the schema a bit since cpucount is really requested slots $j->{cpucount} = int $lrmsjob->{cpus} if defined $lrmsjob->{cpus}; } # LRMS-dependent attributes taken from LRMS when the job # is in state 'INLRMS' #nuj0:status # take care of the GM latency, check if the job is in LRMS # according to both GM and LRMS, GM might think the job # is still in LRMS while the job have already left LRMS if ($lrmsjob->{status} and $lrmsjob->{status} ne 'EXECUTED') { $j->{status} = "INLRMS:$lrmsjob->{status}"; } else { $j->{status} = 'EXECUTED'; } push @{$j->{comment}}, @{$lrmsjob->{comment}} if $lrmsjob->{comment}; $j->{queuerank} = $lrmsjob->{rank} if $lrmsjob->{rank}; } else { # LRMS-dependent attributes taken from GM when # the job has passed the 'INLRMS' state $j->{status} = $gmjob->{status}; $j->{usedwalltime} = int $gmjob->{WallTime}/60 if defined $gmjob->{WallTime}; $j->{usedcputime} = int $gmjob->{CpuTime}/60 if defined $gmjob->{CpuTime}; $j->{executionnodes} = $gmjob->{nodenames} if $gmjob->{nodenames}; $j->{usedmem} = $gmjob->{UsedMem} if $gmjob->{UsedMem}; $j->{completiontime} = $gmjob->{completiontime} if $gmjob->{completiontime}; $j->{errors} = join "; ", @{$gmjob->{errors}} if $gmjob->{errors}; $j->{exitcode} = $gmjob->{exitcode} if defined $gmjob->{exitcode}; } return $j; }; $q->{jobs} = $getJobs; # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $usernumber = 0; keys %$usermap; # reset iterator of each() my $getUsers = sub { # find the next user that is authorized in this queue my ($sn, $localid, $lrms_user); while (1) { return undef unless ($sn, $localid) = each %$usermap; # skip users whose SNs need to be base64 encoded if ($sn =~ /^[\s,:<]/ or $sn =~ /[\x0D\x0A\x00]/ or $sn =~ /[^\x00-\x7F]/) { $log->warning("While collecting info for queue $q->{'name'}: user with sn $sn will not be published due to characters that require base64 encoding. Skipping"); next; } $lrms_user = $qinfo->{users}{$localid}; last if not exists $qinfo->{acl_users}; last if grep { $_ eq $localid } @{$qinfo->{acl_users}}; } my $u = {}; ++$usernumber; my $space = $host_info->{localusers}{$localid}; #name= CN from the SN + unique number my $cn = ($sn =~ m#/CN=([^/]+)(/Email)?#) ? $1 : $sn; $u->{name} = "${cn}...$usernumber"; $u->{sn} = $sn; $u->{diskspace} = $space->{diskfree} if defined $space->{diskfree}; my @freecpus; # sort by decreasing number of cpus for my $ncpu ( sort { $b <=> $a } keys %{$lrms_user->{freecpus}} ) { my $minutes = $lrms_user->{freecpus}{$ncpu}; push @freecpus, $minutes ? "$ncpu:$minutes" : $ncpu; } $u->{freecpus} = join(" ", @freecpus) || 0; $u->{queuelength} = $gm_queued{$sn} + $lrms_user->{queuelength}; return $u; }; $q->{users} = $getUsers; return $q; }; $c->{queues} = $getQueues; return $c; }; return $getCluster; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/NGldifPrinter.pm0000644000000000000000000000012412116113676026565 xustar000000000000000027 mtime=1362663358.222032 27 atime=1513200575.763717 30 ctime=1513200663.159786131 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/NGldifPrinter.pm0000644000175000002070000002024412116113676026634 0ustar00mockbuildmock00000000000000package NGldifPrinter; use strict; use base 'LdifPrinter'; use POSIX; sub new { my ($this, $handle, $ttl) = @_; my $self = $this->SUPER::new($handle); my $now = time; $self->{validfrom} = strftime("%Y%m%d%H%M%SZ", gmtime($now)); $self->{validto} = strftime("%Y%m%d%H%M%SZ", gmtime($now + $ttl)); return $self; } # # Print attributes # sub beginGroup { my ($self, $name) = @_; $self->begin('nordugrid-info-group-name' => $name); $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-info-group'); $self->attribute('nordugrid-info-group-name' => $name); } sub MdsAttributes { my ($self) = @_; $self->attribute(objectClass => 'Mds'); $self->attribute('Mds-validfrom' => $self->{validfrom}); $self->attribute('Mds-validto' => $self->{validto}); } sub clusterAttributes { my ($self, $data) = @_; $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-cluster'); $self->attributes($data, 'nordugrid-cluster-', qw( name aliasname contactstring support lrms-type lrms-version lrms-config architecture opsys homogeneity nodecpu nodememory totalcpus cpudistribution sessiondir-free sessiondir-total cache-free cache-total runtimeenvironment localse middleware totaljobs usedcpus queuedjobs location owner issuerca nodeaccess comment interactive-contactstring benchmark sessiondir-lifetime prelrmsqueued issuerca-hash trustedca acl credentialexpirationtime )); } sub queueAttributes { my ($self, $data) = @_; $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-queue'); $self->attributes($data, 'nordugrid-queue-', qw( name status running queued maxrunning maxqueuable maxuserrun maxcputime mincputime defaultcputime schedulingpolicy totalcpus nodecpu nodememory architecture opsys gridrunning gridqueued comment benchmark homogeneity prelrmsqueued localqueued maxwalltime minwalltime defaultwalltime maxtotalcputime )); } sub jobAttributes { my ($self, $data) = @_; $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-job'); $self->attributes($data, 'nordugrid-job-', qw( globalid globalowner execcluster execqueue stdout stderr stdin reqcputime status queuerank comment submissionui submissiontime usedcputime usedwalltime sessiondirerasetime usedmem errors jobname runtimeenvironment cpucount executionnodes gmlog clientsoftware proxyexpirationtime completiontime exitcode rerunable reqwalltime )); } sub userAttributes { my ($self, $data) = @_; $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-authuser'); $self->attributes($data, 'nordugrid-authuser-', qw( name sn freecpus diskspace queuelength )); } # # Follow hierarchy # sub jobs { LdifPrinter::Entries(@_, 'nordugrid-job-', 'globalid', \&jobAttributes); } sub users { LdifPrinter::Entries(@_, 'nordugrid-authuser-', 'name', \&userAttributes); } sub queues { LdifPrinter::Entries(@_, 'nordugrid-queue-', 'name', \&queueAttributes, sub { my ($self, $data) = @_; $self->beginGroup('jobs'); $self->jobs($data->{jobs}); $self->end(); $self->beginGroup('users'); $self->users($data->{users}); $self->end(); }); } sub cluster { LdifPrinter::Entry(@_, 'nordugrid-cluster-', 'name', \&clusterAttributes, sub { my ($self, $data) = @_; $self->queues($data->{queues}); }); } sub Top { my ($self, $data) = @_; $self->begin('o' => "grid"); $self->begin('Mds-Vo-name' => "local"); $self->cluster($data); } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/InfosysHelper.pm0000644000000000000000000000012413065017774026656 xustar000000000000000027 mtime=1490296828.950435 27 atime=1513200575.833718 30 ctime=1513200663.160786143 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/InfosysHelper.pm0000644000175000002070000002713313065017774026731 0ustar00mockbuildmock00000000000000package InfosysHelper; # Helper functions to be used for communication between the A-REX infoprovider and ldap-infosys # # * for A-REX infoprovider: # - createLdifScript: creates a script that prints the ldif from the infoprovider when executed, # - notifyInfosys: notifies ldap-infosys through a fifo file created by ldap-infosys. # * for ldap-infosys: # - waitForProvider: waits for A-REX infoprovider to give a life sign on the fifo it created # - ldifIsReady: calls waitForProvider and checks that ldif is fresh enough use POSIX; use Fcntl; use English; use File::Basename; use File::Temp qw(tempfile tempdir); use File::Path qw(mkpath); #use Data::Dumper::Concise; ## usage: # print Dumper($datastructure); use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); LogUtils::level("VERBOSE"); # # Given a pid file, returns the user id of the running process # sub uidFromPidfile { my ($pidfile) = @_; open(my $fh, "<", "$pidfile") or return undef; my @stat = stat $pidfile; my $pid = <$fh>; close $fh; $pid =~ m/^\s*(\d+)\s*$/ or return undef; my $uid = `ps -ouid= $pid`; close $fh; $uid =~ m/^\s*(\d+)\s*$/ or return undef; return $1; } # # stat the file, get the uid, pid # sub uidGidFromFile { my ($file) = @_; my @stat = stat $file; return () unless @stat; return (@stat[4,5]); } # # switch effective user if possible. This is reversible. # It switches back to root if the passed parameter # is 0. # sub switchEffectiveUser { my ($uid) = @_; if ($UID == 0 && $uid != 0) { my ($name, $pass, $uid, $gid) = getpwuid($uid); return unless defined $gid; eval { $EGID = $gid; $EUID = $uid; }; # Switch back to original UID/GID } else { eval { $EGID = $GID; $EUID = $UID; }; }; } # # Waits for a sign from the infoprovider. Implemented using a fifo file. # * creates a fifo (unlinks it first if it already exists) # * opens the fifo -- this blocks until the other end opens the fifo for writing # * returns false in case of error # sub waitForProvider { my ($runtime_dir) = @_; my $fifopath = "$runtime_dir/ldif-provider.fifo"; if (! -d $runtime_dir) { $log->warning("No such directory: $runtime_dir"); return undef; } if (-e $fifopath) { $log->info("Unlinking stale fifo file: $fifopath"); unlink $fifopath; } unless (POSIX::mkfifo $fifopath, 0600) { $log->warning("Mkfifo failed: $fifopath: $!"); return undef; } $log->verbose("New fifo created: $fifopath"); my $handle; # This might be a long wait. In case somebody kills us, be nice and clean up. $log->info("Start waiting for notification from A-REX's infoprovider"); my $ret; eval { local $SIG{TERM} = sub { die "terminated\n" }; unless ($ret = sysopen($handle, $fifopath, O_RDONLY)) { $log->warning("Failed to open: $fifopath: $!"); unlink $fifopath; } else { while(<$handle>){}; # not interested in contents } }; close $handle; unlink $fifopath; if ($@) { $log->error("Unexpected: $@") unless $@ eq "terminated\n"; $log->warning("SIGTERM caught while waiting for notification from A-REX's infoprovider"); return undef; } return undef unless $ret; $log->info("Notification received from A-REX's infoprovider"); return 1; } { my $cache = undef; # # Finds infosys' runtime directory and the infosys user's uid, gid # TODO: this is a bit complicated and due to BDII4/BDII5. # Maybe it needs simplification, but requires understanding # of what happens in BDII5 since they changed directory paths. # sub findInfosys { return @$cache if defined $cache; my ($config) = @_; my ($bdii_run_dir) = $config->{bdii_run_dir} || "/var/run/arc/bdii"; # remove trailing slashes $bdii_run_dir =~ s|/\z||; $log->debug("BDII run dir set to: $bdii_run_dir"); # TODO: remove this legacy BDII4 location from here and from grid-infosys my ($bdii_var_dir) = $config->{bdii_var_dir} || "/var/lib/arc/bdii"; # remove trailing slashes $bdii_var_dir =~ s|/\z||; $log->debug("BDII var dir set to: $bdii_var_dir"); my ($bdii_update_pid_file) = $config->{bdii_update_pid_file} || "$bdii_run_dir/bdii-update.pid"; $log->debug("BDII pid guessed location: $bdii_update_pid_file. Will search for it later"); my ($infosys_uid, $infosys_gid); my $infosys_ldap_run_dir = $config->{infosys_ldap_run_dir} || "/var/run/arc/infosys"; # remove trailing slashes $infosys_ldap_run_dir =~ s|/\z||; $log->debug("LDAP subsystem run dir set to $infosys_ldap_run_dir"); # search for bdii pid file: legacy bdii4 locations still here # TODO: remove bdii_var_dir from everywhere (also from grid-infosys) # if not specified with bdii_update_pid_file, it's likely here my $existsPidFile = 0; my $bdii5_pidfile = "$bdii_run_dir/bdii-update.pid"; my $bdii4_pidfile = "$bdii_var_dir/bdii-update.pid"; for my $pidfile ( $bdii_update_pid_file, $bdii5_pidfile, $bdii4_pidfile) { unless ( ($infosys_uid, $infosys_gid) = uidGidFromFile($pidfile) ) { $log->verbose("BDII pidfile not found at: $pidfile"); next; } $existsPidFile = 1; $log->verbose("BDII pidfile found at: $pidfile"); next unless (my $user = getpwuid($infosys_uid)); $log->verbose("BDII pidfile owned by: $user ($infosys_uid)"); last; } unless ($existsPidFile) { $log->warning("BDII pid file not found. Check that nordugrid-arc-bdii is running, or that bdii_run_dir is set"); return @$cache = (); } unless (-d $infosys_ldap_run_dir) { $log->warning("LDAP information system runtime directory does not exist. Check that:\n \t *) The arc.conf parameter infosys_ldap_run_dir is correctly set im manually added. \n \t *) nordugrid-arc-bdii is running"); return @$cache = (); } return @$cache = ($infosys_ldap_run_dir, $infosys_uid, $infosys_gid); } } # # # Notify Infosys that there is a new fresh ldif. Implemented using a fifo file. # * finds out whether there is a reader on the other end if the fifo # * opens the file and then closes it (thus waking up the listener on other end) # * returns false on error # sub notifyInfosys { my ($config) = @_; my ($infosys_ldap_run_dir) = findInfosys($config); return undef unless $infosys_ldap_run_dir; my $fifopath = "$infosys_ldap_run_dir/ldif-provider.fifo"; unless (-e $fifopath) { $log->info("LDAP subsystem has not yet created fifo file $fifopath"); return undef; } my $handle; # Open the fifo -- Normally it should't block since the other end is # supposed to be listening. If it blocks nevertheless, something must have # happened to the reader and it's not worth waiting here. Set an alarm and # get out. my $ret; eval { local $SIG{ALRM} = sub { die "alarm\n" }; alarm 5; unless ($ret = sysopen($handle, $fifopath, O_WRONLY)) { $log->warning("Failed to open fifo (as user id $EUID): $fifopath: $!"); } alarm 0; }; if ($@) { $log->error("Unexpected: $@") unless $@ eq "alarm\n"; # timed out -- no reader $log->warning("Fifo file exists but LDAP information system is not listening"); return undef; } return undef unless $ret; close $handle; $log->info("LDAP information system notified on fifo: $fifopath"); return $handle; } # # To be called by the A-REX infoprovider # * Takes the ldif generated by calling &$print_ldif and creates an executable # script that when executed, outputs that ldif. # * If applicable: switches user to that running infosys and then switches back to root # * Returns false on error # sub createLdifScript { my ($config, $print_ldif) = @_; my ($infosys_ldap_run_dir, $infosys_uid, $infosys_gid) = findInfosys($config); return undef unless $infosys_ldap_run_dir; eval { mkpath($infosys_ldap_run_dir); }; if ($@) { $log->warning("Failed creating parent directory $infosys_ldap_run_dir: $@"); return undef; } unless (chown $infosys_uid, $infosys_gid, $infosys_ldap_run_dir) { $log->warning("Chown to uid($infosys_uid) gid($infosys_gid) failed on: $infosys_ldap_run_dir: $!"); return undef; } switchEffectiveUser($infosys_uid); my ($h, $tmpscript); eval { my $template = "ldif-provider.sh.XXXXXXX"; ($h, $tmpscript) = tempfile($template, DIR => $infosys_ldap_run_dir); }; if ($@) { $log->warning("Failed to create temporary file: $@"); switchEffectiveUser($UID); return undef; } # Hopefully this string is not in the ldif my $mark=substr rand(), 2; eval { local $SIG{TERM} = sub { die "terminated\n" }; die "file\n" unless print $h "#!/bin/sh\n\n"; die "file\n" unless print $h "# Autogenerated by A-REX's infoprovider\n\n"; die "file\n" unless print $h "cat <<'EOF_$mark'\n"; &$print_ldif($h); die "file\n" unless print $h "\nEOF_$mark\n"; die "file\n" unless close $h; }; if ($@) { my $msg = "An error occured while creating ldif generator script: $@"; $msg = "An error occured while writing to: $tmpscript: $!" if $@ eq "file\n"; $msg = "SIGTERM caught while creating ldif generator script" if $@ eq "terminated\n"; close $h; unlink $tmpscript; $log->warning($msg); $log->verbose("Removing temporary ldif generator script"); switchEffectiveUser($UID); return undef; } unless (chmod 0700, $tmpscript) { $log->warning("Chmod failed: $tmpscript: $!"); unlink $tmpscript; switchEffectiveUser($UID); return undef; } my $finalscript = "$infosys_ldap_run_dir/ldif-provider.sh"; unless (rename $tmpscript, $finalscript) { $log->warning("Failed renaming temporary script to $finalscript: $!"); unlink $tmpscript; switchEffectiveUser($UID); return undef; } $log->verbose("Ldif generator script created: $finalscript"); switchEffectiveUser($UID); return 1; } # # To be called by ldap-infosys # * returns true if/when there is a fresh ldif # sub ldifIsReady { my ($infosys_ldap_run_dir, $max_age) = @_; LogUtils::timestamps(1); # Check if ldif generator script exists and is fresh enough my $scriptpath = "$infosys_ldap_run_dir/ldif-provider.sh"; unless (-e $scriptpath) { $log->info("The ldif generator script was not found ($scriptpath)"); $log->info("This file should have been created by A-REX's infoprovider. Check that A-REX is running."); return undef; } my @stat = stat $scriptpath; $log->error("Cant't stat $scriptpath: $!") unless @stat; if (time() - $stat[9] > $max_age) { $log->info("The ldif generator script is too old ($scriptpath)"); $log->info("This file should have been refreshed by A-REX's infoprovider. Check that A-REX is running."); return undef; } # A-REX has started up... Wait for the next infoprovider cycle waitForProvider($infosys_ldap_run_dir) or $log->warning("Failed to receive notification from A-REX's infoprovider"); $log->verbose("Using ldif generator script: $scriptpath"); return 1; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/GLUE2xmlPrinter.pm0000644000000000000000000000012412116113676026761 xustar000000000000000027 mtime=1362663358.222032 27 atime=1513200575.870718 30 ctime=1513200663.156786094 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/GLUE2xmlPrinter.pm0000644000175000002070000005472012116113676027036 0ustar00mockbuildmock00000000000000package GLUE2xmlPrinter; use base "XmlPrinter"; # the line below is useful for debugging. see http://perldoc.perl.org/Data/Dumper.html for usage # use Data::Dumper; sub new { my ($this, $handle, $splitjobs) = @_; my $self = $this->SUPER::new($handle); $self->{splitjobs} = $splitjobs; return $self; } sub beginEntity { my ($self, $data, $name, $basetype) = @_; return undef unless $name; $data->{BaseType} = $basetype; $self->begin($name, $data, qw( BaseType CreationTime Validity )); $self->properties($data, qw( ID Name OtherInfo )); } sub Element { my ($self, $collector, $name, $basetype, $filler) = @_; return unless $collector and my $data = &$collector(); $self->beginEntity($data, $name, $basetype); &$filler($self, $data) if $filler; $self->end($name); } # This function creates an open element. # this has been used for problems in the way sub ElementNoClose { my ($self, $collector, $name, $basetype, $filler) = @_; return unless $collector and my $data = &$collector(); $self->beginEntity($data, $name, $basetype); &$filler($self, $data) if $filler; } sub Elements { my ($self, $collector, $name, $basetype, $filler) = @_; while ($collector and my $data = &$collector()) { $self->beginEntity($data, $name, $basetype); &$filler($self, $data) if $filler; $self->end($name); } } sub Location { Element(@_, 'Location', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( Address Place Country PostCode Latitude Longitude )); }); } sub Contacts { Elements(@_, 'Contact', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( Detail Type )); }); } sub AdminDomain { # Warning: this element is NOT closed. # must be closed using the end function. ElementNoClose(@_, 'AdminDomain', 'Domain', sub { my ($self, $data) = @_; $self->properties($data, qw( Description WWW Distributed Owner)); $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); }); } sub AccessPolicies { Elements(@_, 'AccessPolicy', 'Policy', sub { my ($self, $data) = @_; $self->properties($data, qw( Scheme Rule )); if ($data->{UserDomainID}) { $self->begin('Associations'); $self->properties($data, 'UserDomainID'); $self->end('Associations'); } }); } sub MappingPolicies { Elements(@_, 'MappingPolicy', 'Policy', sub { my ($self, $data) = @_; $self->properties($data, qw( Scheme Rule )); if ($data->{UserDomainID}) { $self->begin('Associations'); $self->properties($data, 'UserDomainID'); $self->end('Associations'); } }); } sub Endpoint { Element(@_, 'Endpoint', 'Endpoint', sub { my ($self, $data) = @_; $self->properties($data, qw( URL Capability Technology InterfaceName InterfaceVersion InterfaceExtension WSDL SupportedProfile Semantics Implementor ImplementationName ImplementationVersion QualityLevel HealthState HealthStateInfo ServingState StartTime IssuerCA TrustedCA DowntimeAnnounce DowntimeStart DowntimeEnd DowntimeInfo )); $self->AccessPolicies($data->{AccessPolicies}); #if ($data->{ShareID}) { # $self->begin('Associations'); # $self->properties($data, 'ShareID'); # $self->end('Associations'); #} #if ($data->{Activities}) { # $self->begin('Activities'); # $self->ComputingActivities($data->{Activities}); # $self->end('Activities'); #} }); } sub Endpoints { Elements(@_, 'Endpoint', 'Endpoint', sub { my ($self, $data) = @_; $self->properties($data, qw( URL Capability Technology InterfaceName InterfaceVersion InterfaceExtension WSDL SupportedProfile Semantics Implementor ImplementationName ImplementationVersion QualityLevel HealthState HealthStateInfo ServingState StartTime IssuerCA TrustedCA DowntimeAnnounce DowntimeStart DowntimeEnd DowntimeInfo )); $self->AccessPolicies($data->{AccessPolicies}); #if ($data->{ShareID}) { # $self->begin('Associations'); # $self->properties($data, 'ShareID'); # $self->end('Associations'); #} #if ($data->{Activities}) { # $self->begin('Activities'); # $self->ComputingActivities($data->{Activities}); # $self->end('Activities'); #} }); } sub Services { Elements(@_, 'Service', 'Service', sub { my ($self, $data) = @_; $self->properties($data, qw( Capability Type QualityLevel StatusInfo Complexity )); # XML validation is order-sensitive. $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); $self->Endpoints($data->{Endpoints}); $self->ComputingShares($data->{ComputingShares}); $self->ComputingManager($data->{ComputingManager}); $self->ToStorageServices($data->{ToStorageServices}); if ($data->{ServiceID}) { $self->begin('Associations'); $self->properties($data, 'ServiceID'); $self->end('Associations'); } }); } sub ComputingService { Element(@_, 'ComputingService', 'Service', sub { my ($self, $data) = @_; $self->properties($data, qw( Capability Type QualityLevel StatusInfo Complexity )); $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); $self->properties($data, qw( TotalJobs RunningJobs WaitingJobs StagingJobs SuspendedJobs PreLRMSWaitingJobs )); $self->ComputingEndpoints($data->{ComputingEndpoints}); $self->ComputingShares($data->{ComputingShares}); $self->ComputingManager($data->{ComputingManager}); $self->ToStorageServices($data->{ToStorageServices}); if ($data->{ServiceID}) { $self->begin('Associations'); $self->properties($data, 'ServiceID'); $self->end('Associations'); } }); } sub ComputingEndpoints { Elements(@_, 'ComputingEndpoint', 'Endpoint', sub { my ($self, $data) = @_; $self->properties($data, qw( URL Capability Technology InterfaceName InterfaceVersion InterfaceExtension WSDL SupportedProfile Semantics Implementor ImplementationName ImplementationVersion QualityLevel HealthState HealthStateInfo ServingState StartTime IssuerCA TrustedCA DowntimeAnnounce DowntimeStart DowntimeEnd DowntimeInfo )); $self->AccessPolicies($data->{AccessPolicies}); $self->properties($data, qw( Staging JobDescription TotalJobs RunningJobs WaitingJobs StagingJobs SuspendedJobs PreLRMSWaitingJobs )); if ($data->{ComputingShareID}) { $self->begin('Associations'); $self->properties($data, 'ComputingShareID'); $self->end('Associations'); } if ($data->{ComputingActivities}) { $self->begin('ComputingActivities') unless ($self->{splitjobs}); $self->ComputingActivities($data->{ComputingActivities}); $self->end('ComputingActivities') unless ($self->{splitjobs}); } }); } sub ComputingShares { Elements(@_, 'ComputingShare', 'Share', sub { my ($self, $data) = @_; $self->properties($data, qw( Description )); $self->MappingPolicies($data->{MappingPolicies}); $self->properties($data, qw( MappingQueue MaxWallTime MaxMultiSlotWallTime MinWallTime DefaultWallTime MaxCPUTime MaxTotalCPUTime MinCPUTime DefaultCPUTime MaxTotalJobs MaxRunningJobs MaxWaitingJobs MaxPreLRMSWaitingJobs MaxUserRunningJobs MaxSlotsPerJob MaxStateInStreams MaxStageOutStreams SchedulingPolicy MaxMainMemory GuaranteedMainMemory MaxVirtualMemory GuaranteedVirtualMemory MaxDiskSpace DefaultStorageService Preemption ServingState TotalJobs RunningJobs LocalRunningJobs WaitingJobs LocalWaitingJobs SuspendedJobs LocalSuspendedJobs StagingJobs PreLRMSWaitingJobs EstimatedAverageWaitingTime EstimatedWorstWaitingTime FreeSlots FreeSlotsWithDuration UsedSlots RequestedSlots ReservationPolicy Tag )); $self->begin('Associations'); $self->properties($data, 'ComputingEndpointID'); $self->properties($data, 'ExecutionEnvironmentID'); $self->end('Associations'); }); } sub ComputingManager { Element(@_, 'ComputingManager', 'Manager', sub { my ($self, $data) = @_; $self->properties($data, qw( ProductName ProductVersion Reservation BulkSubmission TotalPhysicalCPUs TotalLogicalCPUs TotalSlots SlotsUsedByLocalJobs SlotsUsedByGridJobs Homogeneous NetworkInfo LogicalCPUDistribution WorkingAreaShared WorkingAreaGuaranteed WorkingAreaTotal WorkingAreaFree WorkingAreaLifeTime WorkingAreaMultiSlotTotal WorkingAreaMultiSlotFree WorkingAreaMultiSlotLifeTime CacheTotal CacheFree TmpDir ScratchDir ApplicationDir )); $self->Benchmarks($data->{Benchmarks}); $self->begin('ExecutionEnvironments'); $self->ExecutionEnvironments($data->{ExecutionEnvironments}); $self->end('ExecutionEnvironments'); $self->begin('ApplicationEnvironments'); $self->ApplicationEnvironments($data->{ApplicationEnvironments}); $self->end('ApplicationEnvironments'); }); } sub Benchmarks { Elements(@_, 'Benchmark', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( Type Value )); }); } sub ExecutionEnvironments { Elements(@_, 'ExecutionEnvironment', 'Resource', sub { my ($self, $data) = @_; $self->properties($data, qw( Platform VirtualMachine TotalInstances UsedInstances UnavailableInstances PhysicalCPUs LogicalCPUs CPUMultiplicity CPUVendor CPUModel CPUVersion CPUClockSpeed CPUTimeScalingFactor WallTimeScalingFactor MainMemorySize VirtualMemorySize OSFamily OSName OSVersion ConnectivityIn ConnectivityOut NetworkInfo )); $self->Benchmarks($data->{Benchmarks}); if ($data->{ComputingShareID} or $data->{ComputingActivityID} or $data->{ApplicationEnvironmentID}) { $self->begin('Associations'); $self->properties($data, 'ComputingShareID'); $self->properties($data, 'ComputingActivityID'); $self->properties($data, 'ApplicationEnvironmentID'); $self->end('Associations'); } }); } sub ApplicationEnvironments { Elements(@_, 'ApplicationEnvironment', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( AppName AppVersion State RemovalDate License Description BestBenchmark ParallelSupport MaxSlots MaxJobs MaxUserSeats FreeSlots FreeJobs FreeUserSeats )); $self->ApplicationHandles($data->{ApplicationHandles}); if ($data->{ExecutionEnvironmentID}) { $self->begin('Associations'); $self->properties($data, 'ExecutionEnvironmentID'); $self->end('Associations'); } }); } sub ApplicationHandles { Elements(@_, 'ApplicationHandle', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( Type Value )); }); } sub ComputingActivities { my $filler = sub { my ($self, $data) = @_; $self->properties($data, qw( Type IDFromEndpoint LocalIDFromManager JobDescription State RestartState ExitCode ComputingManagerExitCode Error WaitingPosition UserDomain Owner LocalOwner RequestedTotalWallTime RequestedTotalCPUTime RequestedSlots RequestedApplicationEnvironment StdIn StdOut StdErr LogDir ExecutionNode Queue UsedTotalWallTime UsedTotalCPUTime UsedMainMemory SubmissionTime ComputingManagerSubmissionTime StartTime ComputingManagerEndTime EndTime WorkingAreaEraseTime ProxyExpirationTime SubmissionHost SubmissionClientName OtherMessages )); $self->begin('Associations'); $self->properties($data, 'UserDomainID'); $self->properties($data, 'ComputingEndpointID'); $self->properties($data, 'ComputingShareID'); $self->properties($data, 'ExecutionEnvironmentID'); $self->properties($data, 'ActivityID'); $self->end('Associations'); }; my ($self, $collector) = @_; if (not $self->{splitjobs}) { Elements(@_, 'ComputingActivity', 'Activity', $filler); } else { while (my $data = &$collector()) { # Function that returns a string containing the ComputingActivity's XML my $xmlGenerator = sub { my ($memhandle, $xmlstring); open $memhandle, '>', \$xmlstring; return undef unless defined $memhandle; my $memprinter = XmlPrinter->new($memhandle); $data->{xmlns} = "http://schemas.ogf.org/glue/2009/03/spec_2.0_r1"; # Todo: fix a-rex, client to handle correct namespace $data->{xmlns} = "http://schemas.ogf.org/glue/2009/03/spec_2.0_r1"; $data->{BaseType} = "Activity"; $memprinter->begin('ComputingActivity', $data, qw(xmlns BaseType CreationTime Validity )); $memprinter->properties($data, qw(ID Name OtherInfo)); &$filler($memprinter, $data); $memprinter->end('ComputingActivity'); close($memhandle); return $xmlstring; }; my $filewriter = $data->{jobXmlFileWriter}; &$filewriter($xmlGenerator); } } } sub ToStorageServices { Elements(@_, 'ToStorageService', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( LocalPath RemotePath )); $self->begin('Associations'); $self->properties($data, 'StorageServiceID'); $self->end('Associations'); }); } sub Domains { my ($self, $data) = @_; my $attrs = { 'xmlns' => "http://schemas.ogf.org/glue/2009/03/spec_2.0_r1", 'xmlns:xsi' => "http://www.w3.org/2001/XMLSchema-instance", 'xsi:schemaLocation' => "https://raw.github.com/OGF-GLUE/XSD/master/schema/GLUE2.xsd" #might change in the future }; # Todo: fix a-rex, client to handle correct namespace # $attrs->{'xmlns'} = "http://schemas.ogf.org/glue/2008/05/spec_2.0_d41_r01"; $self->begin('Domains', $attrs, qw( xmlns xmlns:xsi xsi:schemaLocation )); $self->AdminDomain(&$data->{AdminDomain}); $self->begin('Services'); $self->Services(&$data->{Services}); $self->ComputingService(&$data->{ComputingService}); $self->end('Services'); $self->end('AdminDomain'); $self->end('Domains'); } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/LSF.pm0000644000000000000000000000012412116113676024502 xustar000000000000000027 mtime=1362663358.222032 27 atime=1513200575.774717 30 ctime=1513200663.134785825 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/LSF.pm0000644000175000002070000002770412116113676024561 0ustar00mockbuildmock00000000000000package LSF; use strict; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our $lsf_profile_path; our $lsf_profile; our $lshosts_command; our $bhosts_command; our $bqueues_command; our $bqueuesl_command; our $bjobs_command; our $lsid_command; our $total_cpus="0"; ########################################## # Private subs ########################################## sub lsf_env($$){ my ($path)=shift; $lsf_profile_path=shift; $lsf_profile=`source $lsf_profile_path`; $lshosts_command="$path/lshosts -w"; $bhosts_command = "$path/bhosts -w"; $bqueues_command = "$path/bqueues -w"; $bqueuesl_command = "$path/bqueues -l"; $bjobs_command = "$path/bjobs -W -w"; $lsid_command="$path/lsid"; } sub totalcpus { my %lsfnodes; my $ncpus=0; if ( $total_cpus eq "0"){ read_lsfnodes(\%lsfnodes); #calculate totals foreach my $node (keys %lsfnodes){ if( ($lsfnodes{$node}{"node_status"} eq "ok") || ($lsfnodes{$node}{"node_status"} eq "closed_Full") || ($lsfnodes{$node}{"node_status"} eq "closed_Excl") || ($lsfnodes{$node}{"node_status"} eq "closed_Busy") || ($lsfnodes{$node}{"node_status"} eq "closed_Adm") ){ $ncpus += $lsfnodes{$node}{"node_ncpus"}; } } return $ncpus; } return $total_cpus; } sub read_lsfnodes ($){ my ($hashref) =shift; my ($node_count) = 0; my ($cpu_count) = 0; unless (open LSFHOSTSOUTPUT, "$lshosts_command |") { debug("Error in executing lshosts command: $lshosts_command"); die "Error in executing lshosts: $lshosts_command\n"; } while (my $line= ) { if (! ($line =~ '^HOST_NAME')) { chomp($line); my ($nodeid,$OStype,$model,$cpuf,$ncpus,$maxmem,$maxswp)= split(" ", $line); ${$hashref}{$nodeid}{"node_hostname"} = $nodeid; ${$hashref}{$nodeid}{"node_os_type"} = $OStype; ${$hashref}{$nodeid}{"node_model"} = $model; ${$hashref}{$nodeid}{"node_cpuf"} = $cpuf; ${$hashref}{$nodeid}{"node_maxmem"} = $maxmem; ${$hashref}{$nodeid}{"node_max_swap"} = $maxswp; if($ncpus != "-") { ${$hashref}{$nodeid}{"node_ncpus"} = $ncpus; } else { ${$hashref}{$nodeid}{"node_ncpus"} = 1; } } } close LSFHOSTSOUTPUT; unless (open LSFBHOSTSOUTPUT, "$bhosts_command |") { debug("Error in executing bhosts command: $bhosts_command"); die "Error in executing bhosts: $bhosts_command\n"; } while (my $line= ) { if (! ( ($line =~ '^HOST_NAME') || ($line =~ '^My cluster') || ($line =~ '^My master') ) ) { chomp($line); # HOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV my ($nodeid,$status,$lju,$max,$njobs,$run,$ssusp,$ususp,$rsv) = split(" ", $line); ${$hashref}{$nodeid}{"node_used_slots"} = $njobs; ${$hashref}{$nodeid}{"node_running"} = $run; ${$hashref}{$nodeid}{"node_suspended"} = $ssusp + $ususp; ${$hashref}{$nodeid}{"node_reserved"} = $rsv; ${$hashref}{$nodeid}{"node_status"} = $status; } } close LSFBHOSTSOUTPUT; } sub type_and_version { my (@lsf_version) = `$lsid_command -V 2>&1`; my ($type) ="LSF"; my ($version)="0.0"; if($lsf_version[0] =~ /^Platform/) { my @s = split(/ +/,$lsf_version[0]); $type=$s[1]; $version=$s[2]; $version=~s/,$//; } my (@result) = [$type,$version]; return ($type,$version); } sub queue_info_user ($$$) { my ($path) = shift; my ($qname) = shift; my ($user) = shift; my (%lrms_queue); #calculate running cpus and queues available unless ($user eq ""){ $user = "-u " . $user; } unless (open BQOUTPUT, "$bqueues_command $user $qname|") { debug("Error in executing bqueues command: $bqueues_command $user $qname"); die "Error in executing bqueues: $bqueues_command \n"; } while (my $line= ) { if (! ($line =~ '^QUEUE')) { chomp($line); my ($q_name,$q_priority,$q_status,$q_mjobs,$q_mslots,$q_mslots_proc,$q_mjob_slots_host,$q_num_jobs,$q_job_pending,$q_job_running,$q_job_suspended) = split(" ", $line); $lrms_queue{totalcpus} = "$q_mjobs"; $lrms_queue{maxrunning} = "$q_mjobs"; $lrms_queue{maxqueuable} = "$q_mjobs"; if ($q_mjobs eq "-") { $lrms_queue{totalcpus} = totalcpus(); $lrms_queue{maxrunning} = $lrms_queue{totalcpus}; $lrms_queue{maxqueuable} = $lrms_queue{totalcpus}; } $lrms_queue{maxuserrun} = "$q_mslots"; if ($q_mslots eq "-"){ $lrms_queue{maxuserrun} = $lrms_queue{totalcpus}; } $lrms_queue{running}= $q_job_running; $lrms_queue{status} = $q_status; $lrms_queue{queued} = $q_job_pending; } } close BQOUTPUT; $lrms_queue{defaultcput} = ""; $lrms_queue{defaultwallt} = ""; $lrms_queue{maxcputime} = ""; $lrms_queue{maxwalltime} = ""; unless (open BQOUTPUT, "$bqueuesl_command $user $qname|") { debug("Error in executing bqueues command: $bqueuesl_command $user $qname"); die "Error in executing bqueues: $bqueuesl_command \n"; } my $lastline =""; while (my $line= ) { if ( ($line =~ '^ CPULIMIT')) { my $line2=; chomp($line2); my (@mcput)= split(" ", $line2); #change from float to int. $mcput[0]=~ s/(\d+).*/$1/; if ($lastline =~ '^DEFAULT'){ $lrms_queue{defaultcput} = "$mcput[0]"; } else { $lrms_queue{maxcputime} = "$mcput[0]"; if ($lrms_queue{maxwalltime} == "") { $lrms_queue{maxwalltime} = "$mcput[0]"; } } } if ( ($line =~ '^ RUNLIMIT')) { my $line2=; chomp($line2); my (@mcput)= split(" ", $line2); #change from float to int. $mcput[0]=~ s/(\d+).*/$1/; if ($lastline =~ '^DEFAULT'){ $lrms_queue{defaultwallt} = "$mcput[0]"; } else { $lrms_queue{maxwalltime} = "$mcput[0]"; } } $lastline = $line; } close BQOUTPUT; $lrms_queue{mincputime} = "0"; $lrms_queue{minwalltime} = "0"; return %lrms_queue; } sub get_jobinfo($){ my $id = shift; my %job; unless (open BJOUTPUT, "$bjobs_command $id|") { debug("Error in executing bjobs command: $bjobs_command $id"); die "Error in executing bjobs: $bjobs_command \n"; } while (my $line= ) { if (! ($line =~ '^JOBID')) { chomp($line); my ($j_id,$j_user,$j_stat,$j_queue,$j_fromh,$j_exech,$j_name,$j_submittime,$j_projname,$j_cput,$j_mem,$j_swap,$j_pids,$j_start,$j_finish) = split(" ", $line); $job{id} = $j_id; # Report one node per job. Needs improoving for multi-CPU jobs. $job{nodes} = [ $j_exech ]; $job{cput} = $j_cput; $job{mem} = $j_mem; $job{start} = $j_start; $job{finish} = $j_finish; if ($j_stat eq "RUN"){ $job{status} = "R"; } if ($j_stat eq "PEND"){ $job{status} = "Q"; } if ($j_stat eq "PSUSP" || $j_stat eq "USUSP" || $j_stat eq "SSUSP"){ $job{status} = "S"; } if ($j_stat eq "DONE" || $j_stat eq "EXIT"){ $job{status} = "E"; } if ($j_stat eq "UNKWN" || $j_stat eq "WAIT" || $j_stat eq "ZOMBI"){ $job{status} = "O"; } } } close BJOUTPUT; return %job; } ############################################ # Public subs ############################################# sub cluster_info ($) { my ($config) = shift; lsf_env($$config{lsf_bin_path},$$config{lsf_profile_path}); #init my %lrms_cluster; my %lsfnodes; $lrms_cluster{totalcpus} = 0; $lrms_cluster{usedcpus} = 0; $lrms_cluster{queuedcpus} = 0; $lrms_cluster{runningjobs} = 0; $lrms_cluster{queuedjobs} = 0; my @cpudist; $lrms_cluster{cpudistribution} = ""; $lrms_cluster{queue} = []; #lookup batch type and version ($lrms_cluster{lrms_type},$lrms_cluster{lrms_version}) = type_and_version(); # cputime limit for parallel/multi-cpu jobs is treated as job-total # OBS: Assuming LSB_JOB_CPULIMIT=y ! $lrms_cluster{has_total_cputime_limit} = 1; #get info on nodes in cluster read_lsfnodes(\%lsfnodes); #calculate totals foreach my $node (keys %lsfnodes){ if( ($lsfnodes{$node}{"node_status"} eq "ok") || ($lsfnodes{$node}{"node_status"} eq "closed_Full") || ($lsfnodes{$node}{"node_status"} eq "closed_Excl") || ($lsfnodes{$node}{"node_status"} eq "closed_Busy") || ($lsfnodes{$node}{"node_status"} eq "closed_Adm") ){ my $ncpus = $lsfnodes{$node}{"node_ncpus"}; # we use lshosts output, maybe we should use bhosts? $lrms_cluster{totalcpus} += $ncpus; $lrms_cluster{usedcpus} += $lsfnodes{$node}{"node_used_slots"}; $cpudist[$ncpus] += 1; } } #write cpu distribution string of the form: 1cpu:15 4cpu:4 for (my $i=0; $i<=$#cpudist; $i++) { next unless ($cpudist[$i]); $lrms_cluster{cpudistribution} .= " " . $i . "cpu:" . $cpudist[$i]; } #calculate queued cpus and queues available unless (open BQOUTPUT, "$bqueues_command|") { debug("Error in executing bqueues command: $bqueues_command "); die "Error in executing bqueues: $bqueues_command \n"; } my @queues; while (my $line= ) { if (! ($line =~ '^QUEUE')) { chomp($line); my ($q_name,$q_priority,$q_status,$q_mjobs,$q_mslots,$q_mslots_proc,$q_mjob_slots_host,$q_num_jobs,$q_job_pending,$q_job_running,$q_job_suspended) = split(" ", $line); #TODO: total number of jobs in queue is not equal to queued cpus. $lrms_cluster{queuedcpus}+=$q_num_jobs; $lrms_cluster{runningjobs}+=$q_job_running; $lrms_cluster{queuedjobs}+=$q_job_pending; push @queues, $q_name; } } close BQOUTPUT; @{$lrms_cluster{queue}} = @queues; return %lrms_cluster; } sub queue_info($$){ my ($config) = shift; my ($qname) = shift; lsf_env($$config{lsf_bin_path},$$config{lsf_profile_path}); return queue_info_user($$config{lsf_bin_path},$qname,""); } #LSF time is on format: 000:00:00.28 #output should be an integer in minutes, rounded up. sub translate_time_lsf_to_minutes ($@) { my ($cputime) = shift; my ($days,$hours,$rest) = split(/:/,$cputime); my ($minutes, $seconds)=split(/\./,$rest); if ( $seconds > 0){ $minutes++; } $minutes=$days*24*60+$hours*60+$minutes; return $minutes; } sub jobs_info ($$@) { my ($config) = shift; my ($qname) = shift; my ($jids) = shift; lsf_env($$config{lsf_bin_path},$$config{lsf_profile_path}); my (%lrms_jobs); my (%job); my (@s); foreach my $id (@$jids){ %job = get_jobinfo($id); $lrms_jobs{$id}{status}=$job{status}; $lrms_jobs{$id}{nodes}=$job{nodes}; $lrms_jobs{$id}{mem}=$job{mem}; $lrms_jobs{$id}{cputime}=translate_time_lsf_to_minutes($job{cput}); $lrms_jobs{$id}{walltime}=""; $lrms_jobs{$id}{reqwalltime}=""; $lrms_jobs{$id}{reqcputime}=""; $lrms_jobs{$id}{comment}=["job started: $job{start}"]; $lrms_jobs{$id}{rank}=""; #TODO fix to support parallel jobs $lrms_jobs{$id}{cpus}=1; } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($qname) = shift; my ($accts) = shift; lsf_env($$config{lsf_bin_path},$$config{lsf_profile_path}); my (%lrms_users); my (%queue); foreach my $u ( @{$accts} ) { %queue = queue_info_user( $$config{lsf_bin_path}, $qname, $u ); $lrms_users{$u}{freecpus} = $queue{maxrunning}-$queue{runnning}; $lrms_users{$u}{queuelength} = "$queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/DGBridge.pm0000644000000000000000000000012412116113676025465 xustar000000000000000027 mtime=1362663358.222032 27 atime=1513200575.809717 30 ctime=1513200663.132785801 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/DGBridge.pm0000644000175000002070000001241012116113676025530 0ustar00mockbuildmock00000000000000package DGBridge; use strict; use POSIX qw(ceil floor); use Sys::Hostname; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our (%lrms_queue); our $running = undef; # total running jobs in a queue # the queue passed in the latest call to queue_info, jobs_info or users_info my $currentqueue = undef; # Resets queue-specific global variables if # the queue has changed since the last call sub init_globals($) { my $qname = shift; if (not defined $currentqueue or $currentqueue ne $qname) { $currentqueue = $qname; %lrms_queue = (); $running = undef; } } ########################################## # Private subs ########################################## ############################################ # Public subs ############################################# sub cluster_info ($) { my ($config) = shift; my (%lrms_cluster); $lrms_cluster{lrms_type} = "DGBridge"; $lrms_cluster{lrms_version} = "1.8.1"; # only enforcing per-process cputime limit $lrms_cluster{has_total_cputime_limit} = 0; $lrms_cluster{totalcpus} = 1000; # Since fork is a single machine backend all there will only be one machine available $lrms_cluster{cpudistribution} = $lrms_cluster{totalcpus}."cpu:1"; # usedcpus on a fork machine is determined from the 1min cpu # loadaverage and cannot be larger than the totalcpus $lrms_cluster{usedcpus} = 0; $lrms_cluster{runningjobs} = 0; # no LRMS queuing jobs on a fork machine, fork has no queueing ability $lrms_cluster{queuedcpus} = 0; $lrms_cluster{queuedjobs} = 0; $lrms_cluster{queue} = [ ]; return %lrms_cluster; } sub queue_info ($$) { my ($config) = shift; my ($qname) = shift; init_globals($qname); if (defined $running) { # job_info was already called, we know exactly how many grid jobs # are running $lrms_queue{running} = $running; } else { $lrms_queue{running}= 0; } $lrms_queue{totalcpus} =1000; $lrms_queue{status} = $lrms_queue{totalcpus}-$lrms_queue{running}; # reserve negative numbers for error states $lrms_queue{status} = 0; $lrms_queue{maxrunning} = "10000"; $lrms_queue{maxuserrun} = "10000"; $lrms_queue{maxqueuable} = ""; #unlimited $lrms_queue{maxcputime} = ""; $lrms_queue{queued} = 0; $lrms_queue{mincputime} = ""; $lrms_queue{defaultcput} = ""; $lrms_queue{minwalltime} = ""; $lrms_queue{defaultwallt} = ""; $lrms_queue{maxwalltime} = $lrms_queue{maxcputime}; $lrms_queue{MaxSlotsPerJob} = 1; return %lrms_queue; } sub getDGstate($$) { my ($jid) = shift; my ($ep) = shift; my ($state); my ($cmdl) = "wsclient -e $ep -m status -j $jid |"; unless (open DGSTATUSOUT, $cmdl) { error("Error in executing wsclient"); } $state="WSError"; while () { unless (/^$jid/) { next; } chomp; my ($vid,$val) = split' ',$_,2; $state = $val; } close DGSTATUSOUT; return $state; } sub jobs_info ($$@) { my ($config) = shift; my ($qname) = shift; my ($jids) = shift; init_globals($qname); my (%lrms_jobs); foreach my $id (@$jids){ $lrms_jobs{$id}{nodes} = [ ]; # get real endpoint my ($endp, $bid) = split'\|',$id,2; $endp =~ s/^\"//; $bid =~ s/\"$//; my $dgstate = getDGstate($bid, $endp); #states possible #Init #Running #Unknown #Finished #Error #TempFailed $lrms_jobs{$id}{status} = 'O'; # job is ? if ($dgstate eq "Init") { $lrms_jobs{$id}{status} = 'Q'; # job is preparing } if ($dgstate eq "Running") { $lrms_jobs{$id}{status} = 'R'; # job is running ++$running; } if ( ($dgstate eq "Finished") || ($dgstate eq "Error") ) { $lrms_jobs{$id}{status} = 'E'; # job is EXECUTED } if ( ($dgstate eq "TempFailed") || ($dgstate eq "WSError") ) { $lrms_jobs{$id}{status} = 'S'; # job is temporarily failed } if ($dgstate eq "Unknown") { $lrms_jobs{$id}{status} = 'O'; # job is temporarily failed } $lrms_jobs{$id}{comment} = [ "LRMS: $dgstate" ]; $lrms_jobs{$id}{mem} = ''; $lrms_jobs{$id}{walltime} = ''; $lrms_jobs{$id}{cputime} = ''; $lrms_jobs{$id}{reqwalltime} = ""; $lrms_jobs{$id}{reqcputime} = ""; $lrms_jobs{$id}{rank} = -1; #DGBridge backend does not support parallel jobs $lrms_jobs{$id}{cpus} = 1; } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($qname) = shift; my ($accts) = shift; init_globals($qname); my (%lrms_users); # freecpus # queue length if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $config, $qname ); } foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $lrms_queue{maxuserrun} - $lrms_queue{running}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/CEinfo.pl.in0000644000000000000000000000012713153453707025634 xustar000000000000000027 mtime=1504597959.039947 30 atime=1513200650.103626449 30 ctime=1513200663.165786204 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/CEinfo.pl.in0000755000175000002070000006274113153453707025713 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w package CEInfo; ######################################################## # Driver for information collection ######################################################## use File::Basename; use Getopt::Long; use Sys::Hostname; use Data::Dumper; use Cwd; ## enable this below to dump datastructures ## Note: Concise is not an ARC dependency! ## must be installed separately. #use Data::Dumper::Concise; ## usage: # print Dumper($datastructure); use strict; # Some optional features that may be unavailable in older Perl versions. # Should work with Perl v5.8.0 and up. BEGIN { # Makes sure the GLUE document is valid UTF8 eval {binmode(STDOUT, ":utf8")}; # Used for reading UTF8 encoded grid-mapfile eval {require Encode; import Encode "decode"}; # Fall back to whole-second precision if not avaiable eval {require Time::HiRes; import Time::HiRes "time"}; } BEGIN { my $pkgdatadir = dirname($0); unshift @INC, $pkgdatadir; } # Attempt to recursively create directory # takes absolute filename (absolute path and file name) sub createdirs { my ($fullpathfilename, $log) = @_; my @paths; for (my $path = dirname $fullpathfilename; length $path > 1; $path = dirname $path) { push @paths, $path; } mkdir $_ for reverse @paths; $log->error("Failed to create log directory $paths[0]") if @paths and not -d $paths[0]; } # minimal set of vars before loading the profiler our $configfile; our $NYTPROF_PRESENT = 0; # Profiler config and code moved here because we need # to get more data before main starts BEGIN { use ConfigCentral; use LogUtils; use Getopt::Long; LogUtils::level('WARNING'); LogUtils::indentoutput(1); my $log = LogUtils->getLogger(__PACKAGE__); my $nojobs; my $splitjobs; my $perffreq = 1800; my $print_help; # Make a copy of @ARGV and restore it after extracting options from it. my @options = @ARGV; GetOptions("config:s" => \$configfile, "nojobs|n" => \$nojobs, "splitjobs|s" => \$splitjobs, "perffreq:i" => \$perffreq, "help|h" => \$print_help ); @ARGV = @options; unless ( $configfile ) { $log->warning("Performance code setup: No 'config' option, skipping performance configuration. See --help "); } else { my $perflogdir; my $perflognytprofdir; my $perflognytproffilepath; my $enable_perflog_reporting = ConfigCentral::getValueOf($configfile,'common','enable_perflog_reporting'); if (defined $enable_perflog_reporting && $enable_perflog_reporting) { # The profiling tool might be missing in some distributions. # Default is to assume is not present. $perflogdir = ConfigCentral::getValueOf($configfile,'common','perflogdir'); $perflogdir ||= ($perflogdir) ? $perflogdir : '/var/log/arc/perfdata'; $perflognytprofdir = $perflogdir.'/perl_nytprof/'; # reduce performance reporting depending on interval by checking dir last modification time if ( -e $perflognytprofdir ) { my $t0 = time(); my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,$blksize,$blocks) = stat($perflognytprofdir); my $timediff = $t0 - $mtime; if ($timediff <= $perffreq) { $log->debug("$perflognytprofdir changed $timediff seconds ago, less than $perffreq. Skipping performance data collection" ); undef $log; no ConfigCentral; no LogUtils; return 0; } } # append and create raw folder for NYTProf database files my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(); my $timestamp=POSIX::strftime("%Y%m%d%H%M%S", $sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst); my $perflognytproffilename = 'infosys_'.$timestamp.'.perflog.raw'; $perflognytproffilepath = $perflognytprofdir.$perflognytproffilename; createdirs($perflognytproffilepath,$log); if ( -e $perflogdir ) { $ENV{NYTPROF} = "savesrc=0:start=no:file=".$perflognytproffilepath; $NYTPROF_PRESENT = eval { require Devel::NYTProf; 1; }; if ($NYTPROF_PRESENT) { DB::enable_profile($perflognytproffilepath); $log->info("Performance reporting enabled. The database file will be stored in $perflognytproffilepath"); } else { $log->warning("Devel::NYTProf PERL module not installed. Performance data cannot be collected."); } } else { $log->warning("Cannot access directory $perflogdir. Unable to open performance file $perflognytproffilepath. Check arc.conf perflogdir option, directories and permissions"); } } } undef $log; no ConfigCentral; no LogUtils; } use ConfigCentral; use LogUtils; use HostInfo; use RTEInfo; use GMJobsInfo; use LRMSInfo; use ARC0ClusterInfo; use ARC1ClusterInfo; use GLUE2xmlPrinter; use GLUE2ldifPrinter; use NGldifPrinter; use InfosysHelper; our $nojobs; our $log = LogUtils->getLogger(__PACKAGE__); sub timed { my ($fname, $func) = @_; my $t0 = time(); my $result = &$func(); my $dt = sprintf('%.3f', time() - $t0); $log->verbose("Time spent in $fname: ${dt}s"); return $result; } sub main { LogUtils::level('INFO'); LogUtils::indentoutput(1); # Parse command line options my $splitjobs; my $perffreq = 1800; my $print_help; GetOptions("config:s" => \$configfile, "nojobs|n" => \$nojobs, "splitjobs|s" => \$splitjobs, "perffreq:i" => \$perffreq, "help|h" => \$print_help ); if ($print_help) { print "Usage: $0 --config - location of arc.conf --nojobs|n - don't include information about jobs --splitjobs|s - write job info in a separate XML file for each job in the controldir --perffreq|p - interval between performance collections, in seconds. Default is 1200 --help - this help\n"; exit 1; } unless ( $configfile ) { $log->error("a command line argument is missing, see --help "); } # Read ARC configuration our $config = timed 'ConfigCentral', sub { ConfigCentral::parseConfig($configfile) }; # Dump config (uncomment if needed) #print Dumper($config); # Change level for root logger (affects all loggers from now on) LogUtils::level($config->{debugLevel}) if defined $config->{debugLevel}; my $providerlog = $config->{ProviderLog} || "/var/log/arc/infoprovider.log"; $log->info("Redirecting further messages to $providerlog"); LogUtils::indentoutput(0); # Attempt to recursively create directory createdirs($providerlog,$log); open STDERR, ">>", $providerlog or $log->error("Failed to open to $providerlog"); LogUtils::timestamps(1); $log->info("############## A-REX infoprovider started ##############"); fix_config($config); check_config($config); my $data = timed 'all info collectors', sub { CEInfo::collect($config) }; $data->{nojobs} = $nojobs; # Print GLUE2 XML $log->info("Generating GLUE2 XML rendering"); my $glue2data = timed 'ARC1ClusterInfo', sub { ARC1ClusterInfo::collect($data) }; my $xmlPrinter = GLUE2xmlPrinter->new(*STDOUT, $splitjobs); $xmlPrinter->begin('InfoRoot'); timed 'GLUE2xml', sub { $xmlPrinter->Domains($glue2data) }; $xmlPrinter->end('InfoRoot'); # Glue1.2/1.3 cannot be generated without NorduGrid, so we force publishing if (!$config->{infosys_nordugrid} and $config->{infosys_glue12}) { $log->warning("NorduGrid schema disabled in arc.conf. Glue1.2/1.3 cannot be generated without NorduGrid Schema, reenabling"); $config->{infosys_nordugrid} = 1; } # Generate ldif for infosys-ldap $log->info("Generating LDIF renderings"); my $ngdata; $ngdata = timed 'ARC0ClusterInfo', sub { ARC0ClusterInfo::collect($data) } if ($config->{infosys_nordugrid} or $config->{infosys_glue12}); my $print_ldif = sub { my ($fh) = @_; if ($config->{infosys_glue2_ldap}) { # introduced to find GLUE2 LDAP schema version my $glue2schemaversion; unless (open (GLUE2_SCHEMA, "debug("CEinfo.pl didn't find GLUE20.schema in standard location. Will create LDIF in compatibitity mode."); } else { my $linecnt=0; while (my $line = ) { chomp $line; next unless $line; if ($line =~ m/# Schema Version: (\d).*/) { $glue2schemaversion = $1; $log->debug("GLUE2 schema version major identified. It is: $glue2schemaversion"); last } if ($linecnt > 25) { $log->debug("Can't identify GLUE2 schema version. Will create LDIF in compatibility mode"); last } $linecnt++; } close GLUE2_SCHEMA; } # override splitjobs for ldap if option specified in arc.conf if ( $config->{infosys_glue2_ldap_showactivities} ) { $splitjobs = 0 }; $log->info("Generating GLUE2 LDIF rendering"); my $ldifPrinter = GLUE2ldifPrinter->new($fh, $splitjobs, $glue2schemaversion); timed 'GLUE2ldif', sub { $ldifPrinter->Top($glue2data) }; } if ($config->{infosys_nordugrid} or $config->{infosys_glue12}) { $log->info("Generating NorduGrid LDIF rendering"); my $ldifPrinter = NGldifPrinter->new($fh, $config->{validity_ttl}); timed 'NGldif', sub { $ldifPrinter->Top($ngdata) }; } }; # only notifiy ldap infosys if ldap is enabled! if ($config->{infosys_nordugrid} or $config->{infosys_glue12} or $config->{infosys_glue2_ldap}) { # check that fifo is working and notify if so. if (InfosysHelper::createLdifScript($config, $print_ldif)) { if (InfosysHelper::notifyInfosys($config)) { $log->verbose("LDAP information system notified"); } else { $log->warning("Failed to notify LDAP information system"); } } else { $log->warning("Failed to create ldif generator script for LDAP information system"); } } else { $log->verbose("LDAP/ldif information generation is disabled. To enable it, enable one of the options infosys_nordugrid, infosys_glue12, infosys_glue2_ldap."); } $log->info("############## A-REX infoprovider finished ##############"); # close performance file if enabled DB::disable_profile() if ($NYTPROF_PRESENT && (defined $config->{enable_perflog_reporting})); } ################################################## # information collector ################################################## sub collect($) { my ($config) = @_; # uncomment for a dump of the whole config hash # print Dumper($config); # get all local users from grid-map. Sort unique $log->info("Reading grid-mapfiles"); my @localusers; my $usermap = {}; if ($config->{gridmap}) { my %saw = (); $usermap = read_grid_mapfile($config->{gridmap}); @localusers = grep !$saw{$_}++, values %$usermap; } else { $log->info("gridmap not configured"); my $defaultuser = $config->{defaultLocalName}; @localusers = ($defaultuser) if $defaultuser; } $log->warning("Cannot determine local users") unless @localusers; $log->info("Fetching job information from control directory (GMJobsInfo.pm)"); my $gmjobs_info = get_gmjobs_info($config); $log->info("Updating job status information"); # build the list of all jobs in state INLRMS my @jobids; for my $job (values %$gmjobs_info) { next unless $job->{status} and $job->{status} eq 'INLRMS'; next unless defined $job->{localid} and length $job->{localid}; push @jobids, $job->{localid}; } # build hash with all the input necessary for the renderers my $data = {}; $data->{config} = $config; $data->{usermap} = $usermap; $log->info("Updating frontend information (HostInfo.pm)"); $data->{host_info} = get_host_info($config,\@localusers); $log->info("Updating RTE information (RTEInfo.pm)"); $data->{rte_info} = get_rte_info($config); $data->{gmjobs_info} = $gmjobs_info; $log->info("Updating LRMS information (LRMSInfo.pm)"); $data->{lrms_info} = get_lrms_info($config,\@localusers,\@jobids); $log->info("Discovering adotf values"); fix_adotf($config->{service}, $data->{host_info}); fix_adotf($_, $data->{host_info}) for values %{$config->{xenvs}}; return $data; } ################################################## # Calling other information collectors ################################################## sub get_host_info($$) { my ($config,$localusers) = @_; my $host_opts = {}; $host_opts->{localusers} = $localusers; $host_opts->{processes} = ['arched', 'gridftpd']; $host_opts->{x509_user_cert} = $config->{x509_user_cert}; $host_opts->{x509_cert_dir} = $config->{x509_cert_dir}; $host_opts->{wakeupperiod} = $config->{wakeupperiod}; $host_opts->{sessiondir} = $config->{sessiondir}; $host_opts->{control} = $config->{control}; $host_opts->{remotegmdirs} = $config->{remotegmdirs}; return timed 'HostInfo', sub { HostInfo::collect($host_opts) }; } sub get_rte_info($) { my ($config) = @_; my $rte_opts; $rte_opts->{configfile} = $configfile; $rte_opts->{runtimedir} = $config->{runtimedir} if $config->{runtimedir}; $rte_opts->{use_janitor} = $config->{use_janitor} if $config->{use_janitor}; $rte_opts->{pkgdatadir} = dirname($0); return timed 'RTEInfo', sub { RTEInfo::collect($rte_opts) }; } sub get_lrms_info($$$) { my ($config,$localusers,$jobids) = @_; # possibly any options from config are needed, so just clone it all my $lrms_opts = Storable::dclone($config); delete $lrms_opts->{$_} for qw(xenvs shares); $lrms_opts->{jobs} = $jobids; for my $share ( keys %{$config->{shares}} ) { $lrms_opts->{queues}{$share} = $config->{shares}{$share}; $lrms_opts->{queues}{$share}{users} = $localusers; } return timed 'LRMSInfo', sub { LRMSInfo::collect($lrms_opts) }; } sub get_gmjobs_info($) { my $config = shift; my $gmjobs_info = timed 'GMJobsInfo', sub { GMJobsInfo::collect($config->{control}, $config->{remotegmdirs}, $nojobs) }; return fix_jobs($config, $gmjobs_info); } ################################################## # ################################################## # Check validity and fill in missing 'share' and 'queue' attributes of jobs. sub fix_jobs { my ($config, $gmjobs_info) = @_; my ($lrms, $defaultshare) = split /\s+/, $config->{lrms} || ''; for my $jobid (keys %$gmjobs_info) { my $job = $gmjobs_info->{$jobid}; my $share = $job->{share}; # If A-REX has not chosen a share for the job, default to one. if (not $share) { my $msg = "A-REX has not chosen a share for job $jobid"; if ($defaultshare) { $log->info($msg.". Assuming default: ".$defaultshare); $share = $defaultshare; } else { my @shares = keys %{$config->{shares}}; if (@shares == 1) { $log->info($msg.". Assuming: ".$shares[0]); $share = $shares[0]; } else { $log->warning($msg." and no default share is defined."); } } } # Set correct queue if ($share) { my $sconfig = $config->{shares}{$share}; if ($sconfig) { $job->{queue} = $sconfig->{MappingQueue} || $share; } else { $log->warning("Job $jobid belongs to an invalid share '$share'"); $share = undef; } } # Group jobs not belonging to any known share into a catch-all share named '' $job->{share} = $share || ''; } return $gmjobs_info; } # reads grid-mapfile. Returns a ref to a DN => uid hash sub read_grid_mapfile($) { my $gridmapfile = shift; my $usermap = {}; unless (open MAPFILE, "<$gridmapfile") { $log->warning("can't open gridmapfile at $gridmapfile"); return; } while (my $line = ) { chomp($line); if ( $line =~ m/\"([^\"]+)\"\s+(\S+)/ ) { my $subject = $1; eval { $subject = decode("utf8", $subject, 1); }; $usermap->{$subject} = $2; } } close MAPFILE; return $usermap; } # Alters the parsed configuration options by: # * adding some defaults # * flattening per-user options # 20140225 - synched some options from GMConfig.cpp sub fix_config { my ($config) = @_; my %config_defaults = ( arcversion => '@VERSION@', infosys_nordugrid => 1, infosys_glue12 => 0, infosys_glue2_ldap => 1, GridftpdEnabled => 0, GridftpdMountPoint => '/jobs', GridftpdPort => 2811, GridftpdAllowNew => 1, GridftpdPidFile => '/var/run/gridftpd.pid', validity_ttl => 10800, SlapdPort => 2135, infosys_glue2_service_qualitylevel => 'production', shared_filesystem => 1, enable_arc_interface => 1, enable_emies_interface => 0, infoproviders_timeout => 10800 ); my %control_defaults = ( defaultttl => '604800 2629744' ); for (keys %config_defaults) { $config->{$_} = $config_defaults{$_} if not defined $config->{$_}; } $config->{control} ||= {}; for (keys %control_defaults) { $config->{control}->{"."}->{$_} = $control_defaults{$_} if not defined $config->{control}->{"."}->{$_}; } $config->{service} ||= {}; $config->{shares} ||= {}; $config->{xenvs} ||= {}; delete $config->{location} unless $config->{location} and %{$config->{location}}; delete $config->{contacts} unless $config->{contacts} and @{$config->{contacts}}; my $hostname = $config->{hostname} || hostname(); { my @dns = split /\./, $hostname; my $shorthost = shift @dns; my $dnsdomain = join ".", @dns; $log->info("AdminDomain config option is missing in XML. Defaulting to arc.conf values") unless $config->{AdminDomain}; $log->info("AdminDomain name in [infosys/admindomain] block is missing or incorrect in arc.conf. Defaulting to UNDEFINEDVALUE") unless $config->{admindomain}{Name}; chomp ($config->{admindomain}{Name} ||= "UNDEFINEDVALUE"); chomp ($config->{AdminDomain} ||= "UNDEFINEDVALUE"); $log->info("ClusterName config option is missing in XML. Trying cluster_alias...") unless $config->{service}{ClusterName}; $log->info("ClusterAlias in XML or cluster-alias in arc.conf missing. Defaulting to $shorthost") unless $config->{service}{ClusterAlias}; chomp ($config->{service}{ClusterName} ||= $config->{service}{ClusterAlias} ||= $shorthost); } # Manage A-REX WS interfaces publication if ($config->{endpoint} and $config->{endpoint} =~ m{^(https?)://([^:/]+)(?::(\d+))?(.*)}) { my ($proto,$host,$port,$mountpoint) = ($1,$2,$3,$4); $port ||= 80 if $proto eq "http"; $port ||= 443 if $proto eq "https"; $config->{arexhostport} = "$host:$port"; # fix endpoint information if not complete $config->{endpoint} = "$proto://$host:$port$mountpoint" } else { # disable all WS interfaces $config->{enable_arc_interface} = 0; $config->{enable_emies_interface} = 0; $log->warning("arex_mount_point not configured. WS interfaces org.nordugrid.xbes and EMI-ES will not be published."); } # fire warning if GLUE2 Service Quality Level is not good # TODO: I'd like this to be done by InfoChecker if ($config->{infosys_glue2_service_qualitylevel}) { my $qualitylevelstring = $config->{infosys_glue2_service_qualitylevel}; my $closedenumeration = {'development' => '1', 'pre-production' => '1', 'production' => '1', 'testing' => '1' }; if (! $closedenumeration->{$qualitylevelstring} ) { $log->error("infosys_glue2_service_qualitylevel contains \"$qualitylevelstring\" which is an invalid value. Allowed value is one of: testing production pre-production development"); } } # Cross-check MappingPolicy references and move them to the share wehre they belong for my $s (@{$config->{mappingpolicies}}) { $log->error("MappingPolicy must include a ShareName option") unless $s->{ShareName}; $log->error("MappingPolicy must include a Rule option") unless $s->{Rule}; $log->error("MappingPolicy must include a UserDomainID option") unless $s->{UserDomainID}; for my $name (@{$s->{ShareName}}) { $log->error("MappingPolicy associated with non-existent Share: $name") unless $config->{shares}{$name}; push @{$config->{shares}{$name}{mappingpolicies}}, $s; } } } # Does some consistency checks on the parsed configuration options sub check_config { my ($config) = @_; $log->error("No queue or ComputingShare configured") unless %{$config->{shares}}; $log->error("No ExecutionEnvironment configured") unless %{$config->{xenvs}}; $log->error("No control directory configured") unless %{$config->{control}} or $config->{remotegmdirs}; while (my ($user, $control) = each %{$config->{control}}) { $log->error("No control directory configured for user $user") unless $control->{controldir}; $log->error("No session directory configured for user $user") unless $control->{sessiondir}; } # Cross-check ExecutionEnvironment references for my $s (values %{$config->{shares}}) { next unless $s->{ExecutionEnvironmentName}; for my $group (@{$s->{ExecutionEnvironmentName}}) { $log->error("ComputingShare associated with non-existent ExecutionEnvironment: $group") unless $config->{xenvs}{$group}; } } for my $s (values %{$config->{xenvs}}) { delete $s->{NodeSelection} unless %{$s->{NodeSelection}}; } my ($lrms, $defaultshare) = split /\s+/, $config->{lrms} || ''; $log->error("defaultShare set to nonexistent ComputingShare") if $defaultshare and not $config->{shares}{$defaultshare}; if ($config->{contacts}) { for (@{$config->{contacts}}) { $log->warning("Contact is missing Type") and next unless $_->{Type}; $log->warning("Contact is missing Detail") and next unless $_->{Detail}; $log->warning("Contact Detail is not an URI: ".$_->{Detail}) and next unless $_->{Detail} =~ m/^\w+:/; } } } # Replaces 'adotf' in config options with autodetected values sub fix_adotf { my ($h, $hostinfo) = @_; if ($h->{nodecpu}) { if ($h->{nodecpu} =~ m/(.*?)(?:\s+stepping\s+(\d+))?\s+@\s+([.\d]+)\s*(M|G)Hz$/i) { $h->{CPUModel} ||= $1; $h->{CPUVersion} ||= $2; $h->{CPUClockSpeed} ||= ($4 eq 'G') ? int($3 * 1000) : int($3); } elsif ($h->{nodecpu} eq 'adotf') { $h->{CPUVendor} ||= 'adotf'; $h->{CPUModel} ||= 'adotf'; $h->{CPUClockSpeed} ||= 'adotf'; } else { $log->warning("Invalid value for nodecpu option: ".$h->{nodecpu}); } delete $h->{nodecpu}; } if ($h->{OpSys} and grep {$_ eq 'adotf'} @{$h->{OpSys}}) { $h->{OpSys} = [ grep {$_ ne 'adotf'} @{$h->{OpSys}} ]; unless (defined($hostinfo->{osname})) { $log->warning("Failed to autodetect value for 'OSName'. Enter correct value in config file"); $h->{OSName} = 'unknown'; } $h->{OSName} ||= 'adotf'; $h->{OSVersion} ||= 'adotf'; $h->{OSFamily} ||= 'adotf'; } my %hostkey = (Platform => 'machine', PhysicalCPUs => 'cpusocketcount', LogicalCPUs => 'cputhreadcount', CPUVendor => 'cpuvendor', CPUModel => 'cpumodel', CPUClockSpeed => 'cpufreq', MainMemorySize => 'pmem', VirtualMemorySize => 'vmem', OSFamily => 'sysname', OSName => 'osname', OSVersion => 'osversion' ); for my $key (keys %hostkey) { if (exists $h->{$key} and $h->{$key} eq 'adotf') { $log->warning("Failed to autodetect value for '$key'. Enter correct value in config file") unless defined $hostinfo->{$hostkey{$key}}; $h->{$key} = $hostinfo->{$hostkey{$key}}; } } } main(); nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/LL.pm0000644000000000000000000000012412462175634024373 xustar000000000000000027 mtime=1422457756.033347 27 atime=1513200575.819717 30 ctime=1513200663.133785813 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/LL.pm0000644000175000002070000003562312462175634024451 0ustar00mockbuildmock00000000000000package LL; use strict; our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our(%lrms_queue); ########################################## # Private subs ########################################## # calculates the total cpus from a string like: " 1 3 5-9 " sub total_from_individual($){ my $str=shift; #trim string $str =~ s/^\s+//; $str =~ s/\s+$//; my @ids = split(' ', $str); my $total = 0; foreach my $id (@ids) { if ( $id =~ /([0-9]+)-([0-9]+)/ ){ $total += $2-$1 +1; }elsif( $id =~ /[0-9]+/ ){ $total++; } } return $total; } sub consumable_distribution ($$) { my ( $path ) = shift; my ( $consumable_type ) = shift; unless (open LLSTATUSOUT, "$path/llstatus -R|") { error("Error in executing llstatus"); } my @cons_dist = (); while () { if ( /[^# ]*(#*) *.*$consumable_type\*?\(([0-9]*),([0-9]*).*/ ) { #if displayed as total cpus # Check if node is down if ( $1 ne "#" ) { my @a = ($3 - $2,$3); push @cons_dist, [ @a ]; } } elsif ( /[^# ]*(#*) *.*$consumable_type<([^>]*)><([^>]*)>.*/ ){ #if displayed as individual cpu numbers if ( $1 ne "#" ) { my $availcpu=total_from_individual($2); my $alcpu=total_from_individual($3); my @a = ($alcpu - $availcpu,$alcpu); push @cons_dist, [ @a ]; } } } return @cons_dist; } sub consumable_total (@) { my @dist = @_; my ($cpus, $used, $max); foreach $cpus (@{dist}) { $used += ${$cpus}[0]; $max += ${$cpus}[1]; } return ($used,$max) } sub cpudist2str (@) { my @dist = @_; my @total_dist = (); my $str = ''; # Collect number of available cores my ($used,$max,$cpus); foreach $cpus (@dist) { ($used, $max) = @{$cpus}; $total_dist[$max]++; } # Turn it into a string my $n; $n = 0; foreach $cpus (@total_dist) { if ($cpus) { if ( $str ne '') { $str .= ' '; } $str .= $n . "cpu:" . $cpus; } $n++; } return $str; } sub get_cpu_distribution($) { my ( $path ) = shift; my $single_job_per_box = 1; if ($single_job_per_box == 1) { # Without hyperthreading unless (open LLSTATUSOUT, "$path/llstatus -f %sta|") { error("Error in executing llstatus"); } } else { # Use all available cpus/cores including hyperthreading: unless (open LLSTATUSOUT, "$path/llstatus -r %cpu %sta|") { error("Error in executing llstatus"); } } my %cpudist; while () { chomp; # We only want CPU lines (and there must be at least one) next if !/^[1-9]/; # An empty line denotes end of CPUs last if /^$/; my $cpus; my $startd; if ($single_job_per_box == 1) { ($startd) = split/\!/; $cpus = 1; } else { ($cpus, $startd) = split/\!/; } # Only count those machines which have startd running if ($startd ne "0") { $cpudist{$cpus}++; } } close LLSTATUSOUT; return %cpudist; } sub get_used_cpus($) { my ( $path ) = shift; unless (open LLSTATUSOUT, "$path/llstatus |") { error("Error in executing llstatus"); } my $cpus_used; while () { chomp; # We only want CPU lines (and there must be at least one) next if !/^Total Machines/; tr / //s; my @fields = split; $cpus_used = $fields[6]; last; } close LLSTATUSOUT; return ($cpus_used); } sub get_long_status($) { my ( $path ) = shift; unless (open LLSTATUSOUT, "$path/llstatus -l |") { error("Error in executing llstatus"); } my %cpudist; my $machine_name; my %machines; while () { # Discard trailing information separated by a newline if ( /^$/ ) { next; } chomp; my ($par, $val) = split/\s*=\s*/,$_,2; if ($par eq 'Name') { $machine_name=$val; next; } $machines{$machine_name}{$par} = $val; } close LLSTATUSOUT; return %machines; } sub get_long_queue_info($$) { my ( $path ) = shift; my ( $queue) = shift; unless (open LLCLASSOUT, "$path/llclass -l $queue |") { error("Error in executing llclass"); } my %queue_info; my $queue_name; while () { # Discard trailing information separated by a newline and header if ( /^$/ || /^==========/ ) { next; } # Info ends with a line of dashes last if /^----------/; s/^\s*//; chomp; my ($par, $val) = split/\s*:\s*/,$_,2; if ($par eq 'Name') { $queue_name=$val; next; } $queue_info{$queue_name}{$par} = $val; } close LLCLASSOUT; return %queue_info; } sub get_queues($) { my ( $path ) = shift; unless (open LLCLASSOUT, "$path/llclass |") { error("Error in executing llclass"); } # llclass outputs queues (classes) delimited by ---- markers my @queues; my $queue_sect; while () { # Now reading queues if ( /^----------/ && $queue_sect == 0) { if ($#queues == -1) { $queue_sect = 1; next; } } # Normal ending after reading final queue if ( /^----------/ && $queue_sect == 1) { $queue_sect = 0; return @queues; } if ( $queue_sect == 1 ) { chomp; s/ .*//; push @queues, $_; } } # We only end here if there were no queues return @queues; } sub get_short_job_info($$) { # Path to LRMS commands my ($path) = shift; # Name of the queue to query my ($queue) = shift; if ($queue ne "") { unless (open LLQOUT, "$path/llq -c $queue |") { error("Error in executing llq"); } } else { unless (open LLQOUT, "$path/llq |") { error("Error in executing llq"); } } my %jobstatus; while () { my ($total, $waiting, $pending, $running, $held, $preempted); $total = 0; $waiting = 0; $pending = 0; $running = 0; $held = 0; $preempted = 0; if (/(\d*) .* (\d*) waiting, (\d*) pending, (\d*) running, (\d*) held, (\d*) preempted/) { $total = $1; $waiting = $2; $pending = $3; $running = $4; $held = $5; $preempted = $6; } $jobstatus{total} = $total; $jobstatus{waiting} = $waiting; $jobstatus{pending} = $pending; $jobstatus{running} = $running; $jobstatus{held} = $held; $jobstatus{preempted} = $preempted; } close LLQOUT; return %jobstatus; } sub get_long_job_info($$) { # Path to LRMS commands my ($path) = shift; # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($lrms_ids) = @_; my %jobinfo; if ( (@{$lrms_ids})==0){ return %jobinfo; } # can the list of ids become too long for the shell? my $lrmsidstr = join(" ", @{$lrms_ids}); unless (open LLQOUT, "$path/llq -l -x $lrmsidstr |") { error("Error in executing llq"); } my $jobid; my $skip=0; while () { # Discard trailing information separated by a newline if (/job step\(s\) in queue, /) { last; } # Discard header lines if (/^===/) { $skip=0; next; } # Skip all lines of extra info if (/^--------------------------------------------------------------------------------/) { $skip=!$skip; next; } if ($skip) { next; } chomp; # Create variables using text before colon, trimming whitespace on both sides and replacing white space with _ my ($par, $val) = split/: */,$_,2; $par =~ s/^\s+//; $par =~ s/\s+$//; $par =~ s/\s/_/g; # Assign variables if ($par eq 'Job_Step_Id') { $jobid = $val; next; } $jobinfo{$jobid}{$par} = $val; } close LLQOUT; return %jobinfo; } ############################################ # Public subs ############################################# sub cluster_info ($) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{ll_bin_path}; my (%lrms_cluster); # lrms_type $lrms_cluster{lrms_type} = "LoadLeveler"; # lrms_version my $status_string=`$path/llstatus -v`; if ( $? != 0 ) { warning("Can't run llstatus"); } $status_string =~ /^\S+\s+(\S+)/; $lrms_cluster{lrms_version} = $1; # LL tracks total cpu time but the cpu time limit that the user asked is # first scaled up with the number of requested job slots before enforcing # the cpu time limit. Effectively the cpu time limit is the maxmum average # per-slot cputime $lrms_cluster{has_total_cputime_limit} = 0; my ($ll_consumable_resources) = $$config{ll_consumable_resources}; if ($ll_consumable_resources ne "yes") { # totalcpus $lrms_cluster{totalcpus} = 0; $lrms_cluster{cpudistribution} = ""; my %cpudist = get_cpu_distribution($path); my $sep = ""; foreach my $key (keys %cpudist) { $lrms_cluster{cpudistribution} .= $sep.$key."cpu:".$cpudist{$key}; if (!$sep) { $sep = " "; } $lrms_cluster{totalcpus} += $key * $cpudist{$key}; } # Simple way to find used cpus (slots/cores) by reading the output of llstatus $lrms_cluster{usedcpus} = get_used_cpus($path); } else { # Find used / max CPUs from cconsumable resources my @dist = consumable_distribution($path,"ConsumableCpus"); my @cpu_total = consumable_total(@dist); $lrms_cluster{cpudistribution} = cpudist2str(@dist); $lrms_cluster{totalcpus} = $cpu_total[1]; $lrms_cluster{usedcpus} = $cpu_total[0]; } my %jobstatus = get_short_job_info($path,""); # Here waiting actually refers to jobsteps $lrms_cluster{queuedcpus} = $jobstatus{waiting}; # TODO: this is wrong, but we are not worse off than earlier # we should count jobs, not cpus $lrms_cluster{runningjobs} = $jobstatus{running}; $lrms_cluster{queuedjobs} = $jobstatus{waiting}; $lrms_cluster{queue} = [ ]; return %lrms_cluster; } sub queue_info ($$) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{ll_bin_path}; # Name of the queue to query my ($queue) = shift; my %long_queue_info = get_long_queue_info($path,$queue); my %jobstatus = get_short_job_info($path,$queue); # Translate between LoadLeveler and ARC $lrms_queue{status} = $long_queue_info{$queue}{'Free_slots'}; # Max_total_tasks seems to give the right queue limit #$lrms_queue{maxrunning} = $long_queue_info{$queue}{'Max_total_tasks'}; # Maximum_slots is really the right parameter to use for queue limit $lrms_queue{maxrunning} = $long_queue_info{$queue}{'Maximum_slots'}; $lrms_queue{maxqueuable} = ""; $lrms_queue{maxuserrun} = $lrms_queue{maxrunning}; # Note we use Wall Clock! $_ = $long_queue_info{$queue}{'Wall_clock_limit'}; if (/\((.*) seconds,/) { $lrms_queue{maxcputime} = int($1 / 60); } $lrms_queue{maxwalltime} = $lrms_queue{maxcputime}; # There is no lower limit enforced $lrms_queue{mincputime} = 0; $lrms_queue{minwalltime} = 0; # LL v3 has Def_wall... and LL v5 has Default_wall... $_ = $long_queue_info{$queue}{'Def_wall_clock_limit'}; if (! defined $_ || $_ eq ""){ $_ = $long_queue_info{$queue}{'Default_wall_clock_limit'}; } if (/\((.*) seconds,/) { $lrms_queue{defaultcput} = int($1 / 60); } $lrms_queue{defaultwallt}= $lrms_queue{defaultcput}; $lrms_queue{running} = $jobstatus{running}; # + $jobstatus{held} + $jobstatus{preempted}; $lrms_queue{queued} = $jobstatus{waiting}; # $lrms_queue{totalcpus} = $long_queue_info{$queue}{'Max_processors'}; $lrms_queue{totalcpus} = $long_queue_info{$queue}{'Maximum_slots'}; return %lrms_queue; } sub jobs_info ($$$) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{ll_bin_path}; # Name of the queue to query my ($queue) = shift; # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($lrms_ids) = @_; my (%lrms_jobs); my %jobinfo = get_long_job_info($path,$lrms_ids); foreach my $id (keys %jobinfo) { $lrms_jobs{$id}{status} = "O"; if ( $jobinfo{$id}{Status} eq "Running" ) { $lrms_jobs{$id}{status} = "R"; } if ( ($jobinfo{$id}{Status} eq "Idle") || ($jobinfo{$id}{Status} eq "Deferred") ) { $lrms_jobs{$id}{status} = "Q"; } if ( ($jobinfo{$id}{Status} eq "Completed") || ($jobinfo{$id}{Status} eq "Canceled") || ($jobinfo{$id}{Status} eq "Removed") || ($jobinfo{$id}{Status} eq "Remove Pending") || ($jobinfo{$id}{Status} eq "Terminated") ) { $lrms_jobs{$id}{status} = "E"; } if ( ($jobinfo{$id}{Status} eq "System Hold") || ($jobinfo{$id}{Status} eq "User Hold") || ($jobinfo{$id}{Status} eq "User and System Hold") ) { $lrms_jobs{$id}{status} = "S"; } if ( ($jobinfo{$id}{Status} eq "Checkpointing") ) { $lrms_jobs{$id}{status} = "O"; } $lrms_jobs{$id}{mem} = -1; my $dispt = `date +%s -d "$jobinfo{$id}{Dispatch_Time}\n"`; chomp $dispt; $lrms_jobs{$id}{walltime} = POSIX::ceil((time() - $dispt) /60); # Setting cputime, should be converted to minutes $lrms_jobs{$id}{cputime} = 0; if (defined $jobinfo{$id}{Step_Total_Time}) { my (@cput) = split(/:/,$jobinfo{$id}{Step_Total_Time}); my (@cpudh) = split(/\+/,$cput[0]); if (@cpudh == 2){ $cput[0]= 24*$cpudh[0] + $cpudh[1]; } $lrms_jobs{$id}{cputime} = int($cput[0]*60 + $cput[1] + $cput[2]/60) if (@cput); } if ($jobinfo{$id}{Wall_Clk_Hard_Limit} =~ / \(([0-9]*) seconds\)/) { $lrms_jobs{$id}{reqwalltime} = int($1 / 60); } $lrms_jobs{$id}{reqcputime} = $lrms_jobs{$id}{reqwalltime}; $lrms_jobs{$id}{comment} = [ "LRMS: $jobinfo{$id}{Status}" ]; if (defined $jobinfo{$id}{Allocated_Host} && $jobinfo{$id}{Allocated_Host} ne "") { $lrms_jobs{$id}{nodes} = ["$jobinfo{$id}{Allocated_Host}"]; } elsif (defined $jobinfo{$id}{Allocated_Hosts} && $jobinfo{$id}{Allocated_Hosts} ne "") { $lrms_jobs{$id}{nodes} = ["$jobinfo{$id}{Allocated_Hosts}"]; } else { $lrms_jobs{$id}{nodes} = []; } $lrms_jobs{$id}{rank} = -1; $lrms_jobs{$id}{cpus} = 0; $lrms_jobs{$id}{cpus} = $jobinfo{$id}{Step_Cpus}; } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($path) = $$config{ll_bin_path}; my ($qname) = shift; my ($accts) = shift; my (%lrms_users); if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $path, $qname ); } # Using simple estimate. Fair-share value is only known by Administrator. foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $lrms_queue{status}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/glite-info-provider-ldap0000644000000000000000000000012411530702713030241 xustar000000000000000027 mtime=1298367947.260939 27 atime=1513200575.832718 30 ctime=1513200663.164786192 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/glite-info-provider-ldap0000644000175000002070000002312111530702713030305 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w #Copyright (c) Members of the EGEE Collaboration. 2004. #See http://www.eu-egee.org/partners/ for details on the copyright holders. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License use strict; use LWP::Simple; use POSIX; use IO::Handle; use Getopt::Std; use File::Basename; use File::Path; use File::stat; use File::Copy; use vars qw( $debug ); # Print the usage message sub usage(){ print STDERR "Usage: $0 -c [-m ] [-v validity]" . " -[d dynamic] [-t timeout] [-s size] [-g site/region]\n"; print STDERR ' This information provider takes a list of LDAP urls from the configuration file and queries the LDAP sources. It then merges the results and modifies the dn to append the mds-vo-name as specified on the command line. The configuration file has lines typically with the following format: entity ldap://host.domain:2170/mds-vo-name=something,o=grid Comments start with "#" and are ignored. Options: -c The configuration file listing all the LDAP URLs. -m The mds-vo-name which should be used. -h Displays this helpful message. -d This option will change the search filter to only retrieve the dynamic information. (currently not supported) -g This option will modify the search endpoint to obtain glue 2.0 information and dynamically modify the dn if value is not "none". -t The timeout of the ldapsearch in seconds. -v The validity of the cache files in seconds. -s Maximum file size in megabytes for a single source. '; exit 1; } # Parse the command line options my %options=(); getopts("v:dg:hm:c:t:s:",\%options) or usage(); # Set configuration file my $config; if ($options{c}){ $config=$options{c}; }else{ usage(); exit 1; } # Set value for mds-vo-name my $name; if ($options{m}){ $name=$options{m}; }elsif ($options{g}){ $name=$options{g}; } if ($options{h}){ usage(); exit 1; } # Select Filter, all Glue entries or only dynamic CE entries my $filter; if ($options{d}) { $filter = "'(|(objectClass=GlueVOView)(objectClass=GlueCE))'" . " GlueCEStateRunningJobs GlueCEStateWaitingJobs" . " GlueCEStateTotalJobs GlueCEStateFreeJobSlots" . " GlueCEStateEstimatedResponseTime GlueCEStateWorstResponseTime"; }else{ $filter = "'(|(objectClass=GlueTop)(objectClass=MDS)" . "(objectClass=GlueClusterTop)(objectClass=GlueCETop)" . "(objectClass=GlueGeneralTop)(objectClass=GlueSETop)" . "(objectClass=GlueSATop))'"; } # Set ttl for cache or set ttl to 0 if using dynamic option my $ttl; if ($options{v}){ if ( ! int($options{v})){ usage(); }; $ttl=int($options{v}); if ($options{d}){ print STDERR "Error: Can not use ttl option with dynamic option\n"; exit 1; } }else{ $ttl=600; } # Set timeout for the ldapsearch my $timeout=30; if ($options{t}){ if ( ! int($options{t})){ usage(); }; $timeout=int($options{t}); } # Set the maximum file size. my $max_file_size=10; if ($options{s}){ if ( ! int($options{s})){ usage(); }; $max_file_size=int($options{s}); } # Figure out the location of the glite var directory my $var_dir; if (exists($ENV{GLITE_LOCATION_VAR})) { $var_dir = "$ENV{GLITE_LOCATION_VAR}"; } elsif (exists($ENV{GLITE_LOCATION})) { $var_dir = "$ENV{GLITE_LOCATION}/var"; } else { $var_dir = "/opt/glite/var"; } # Figure out the location of the glite etc directory my $etc_dir; if (exists($ENV{GLITE_LOCATION})) { $etc_dir = "$ENV{GLITE_LOCATION}/etc"; } else { $etc_dir = "/opt/glite/etc"; } my @ldif; # Get the ldap urls from the configuration file. my @urls; open (CONFIG, "$config" ) || die "Couldn't open config file $config $!\n"; while () { s/\#.*//; if (m/\s+ldap:\/\//){ push @urls, $_; } } close (CONFIG); # Prepare the temporary directory my $file_name=fileparse($config); if ($options{g}){ $file_name=$file_name . '-glue2' } my $tmp_dir = "$var_dir/tmp/gip/$file_name"; my $cache_dir = "$var_dir/cache/gip/$file_name"; if( -d $tmp_dir ){ my @files=glob("$tmp_dir/*.ldif"); foreach my $file (@files) { unlink($file); } }else{ mkpath($tmp_dir); } if( ! -d $cache_dir ){ mkpath($cache_dir); } my $date = `date +\%Y-\%m-\%d`; chomp $date; my $log_dir = "$var_dir/tmp/gip/log/$file_name"; my $log_file = "$log_dir/$file_name-$date.log"; if( ! -d $log_dir ){ mkpath($log_dir); } # Remove old log files my $log_file_retention=60; # in number of days my @log_files=glob("$log_dir/$file_name*.log"); my $current_time=time; my $file_info; my $file_time; foreach(@log_files){ $file_info = stat($_); if($file_info){ $file_time=$file_info->mtime; if ( $current_time > ($file_time + ($log_file_retention * 24 * 3600)) ){ unlink($_); } } } open(LOGFILE, ">> $log_file") or die "$0: cannot open '$log_file': $!\n"; my $fh = select(LOGFILE); $| = 1; select($fh); my $start_time = `date '+\%Y-\%m-\%d \%T'`; chomp $start_time; my $t0 = time; print LOGFILE "=== $start_time START\n"; my @pid; # The pids of the forked processes. my $pid; # A pid for one process. my $region; # A region name for the ldif source. my $bind; # The bind point for the search. my $command; # A command that will return ldif. my $bunch=10; my $delay=5; my $n=0; #Loop through for each ldif source foreach(@urls){ if ($n++ >= $bunch) { my $child = 0; eval { local $SIG{ALRM} = sub { die "timeout" }; alarm($delay); $child = wait(); alarm(0); }; if (!$child) { # The active commands may all be hanging, # so launch another bunch... $n = 0; } else { $n--; } } # Split the information from the url. if (m|^([-.\w]+)\s+ldap://(.+):([0-9]+)/(.+)|) { $region = $1; if ($options{g}){ my $site = $4; $site =~s/,o=grid$//; $site =~s/^.*=//; if ( $site eq 'resource'){ $bind = 'GLUE2GroupID=resource,o=glue'; }else{ $bind = "GLUE2DomainId=$site,o=glue"; } $filter = '' }else{ $bind = $4; } $command = "ldapsearch -x -LLL -h $2 -p $3 -b $bind " . $filter . "> $tmp_dir/$region.ldif 2>$tmp_dir/$region.err"; } else { chomp; print LOGFILE "ignoring badly formatted line: '$_'\n"; next; } # Fork the search. if (!defined($pid=fork)) { print LOGFILE "cannot fork: $!\n"; next; } unless ($pid) { # Set our process group to a distinct value. setpgrp(); my $msg = "GOT TIRED OF WAITING"; # Eval will kill the process if it times out. eval { local $SIG{ALRM} = sub { die "$msg" }; alarm ($timeout); #Will call alarm after the timeout. if (system("$command") != 0) { unlink("$tmp_dir/$region.ldif"); }else{ unlink("$tmp_dir/$region.err"); } alarm(0); # Cancel the pending alarm if responds. }; # This section is executed if the process times out. if ($@ =~ /$msg/) { unlink("$tmp_dir/$region.ldif"); system("echo Search timed out >> $tmp_dir/$region.err"); my $PGRP=getpgrp(); kill (-SIGKILL(), $PGRP); exit 1; } exit 0; } push @pid, $pid; } foreach(@pid){ waitpid($_, 0); } my $end_time = `date '+\%Y-\%m-\%d \%T'`; chomp $end_time; my $dt = sprintf "%4d", time - $t0; print LOGFILE "=== $end_time END - elapsed: $dt\n"; for (<$tmp_dir/*.err>){ $region=$_; $region=~s/.*\///; $region=~s/\.err$//; print LOGFILE "--> $region:\n"; system("cat $_ >> $log_file"); print LOGFILE "\n"; unlink($_); } my $file_size; for (<$tmp_dir/*.ldif>){ $file_info = stat($_); if($file_info){ $region=$_; $region=~s/.*\///; $region=~s/\.ldif$//; $file_size = $file_info->size; # Protection for too much data coming from a site. if ( $file_size > ($max_file_size * 1000000) ){ print LOGFILE "ERROR: $region is producing too much data!\n"; unlink($_); }else{ move($_, "$cache_dir/$region.ldif"); } } } # Delete old files from the cache for (<$cache_dir/*.ldif>){ $file_info = stat($_); $file_time=$file_info->mtime; if ( $current_time > ($file_time + $ttl) ){ $region=$_; $region=~s/.*\///; $region=~s/\.ldif$//; print LOGFILE "NOTICE: deleting stale cache file for $region.\n"; unlink($_); } } my $rs = $/; $/ = ""; for (<$cache_dir/*.ldif>) { open(LDIF, $_) || warn "Cannot open '$_': $!\n"; while () { s/\n //g; # Remove line wrapping next if /^\s*$/; unless (/^dn:/) { chomp; s/(.{900}).*/$1\[...]/s; print LOGFILE "ERROR: Skipping junk:\n" . ('-' x 70) . "\n$_\n" . ('-' x 70) . "\n"; next; } my $dn = $_; $dn =~ s/\n.*//s; $dn =~ s/,\s+/,/g; if ($options{m}){ if ($dn =~ m/mds-vo-name=$name,/i) { print LOGFILE "ERROR: Skipping recursive entry '$dn'\n"; next; } $dn =~ s/mds-vo-name=(local|resource),o=grid\s*$/o=grid/i; $dn =~ s/o=grid\s*$/mds-vo-name=$name,o=grid/i; } if ($options{g}){ if ($dn =~ m/GLUE2DomainId=$name,/i) { print LOGFILE "ERROR: Skipping recursive entry '$dn'\n"; next; } if ( $name ne "none"){ $dn =~ s/o=glue/GLUE2DomainId=$name,o=glue/i; } } s/[^\n]*/$dn/; if ($name ne "local" ){ s/mds-vo-name:[^\n]*\n/mds-vo-name: $name\n/i; } print; } close LDIF; } $/ = $rs; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/HostInfo.pm0000644000000000000000000000012313153455074025611 xustar000000000000000027 mtime=1504598588.406979 27 atime=1513200575.832718 29 ctime=1513200663.14578596 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/HostInfo.pm0000644000175000002070000003261713153455074025670 0ustar00mockbuildmock00000000000000package HostInfo; use POSIX; use Sys::Hostname; use Time::Local; use strict; BEGIN { eval {require Time::HiRes; import Time::HiRes "time"}; } use Sysinfo; use LogUtils; use InfoChecker; our $host_options_schema = { x509_user_cert => '*', x509_cert_dir => '*', wakeupperiod => '*', processes => [ '' ], localusers => [ '' ], control => { '*' => { sessiondir => [ '' ], cachedir => [ '*' ], remotecachedir => [ '*' ], cachesize => '*' } }, remotegmdirs => [ '*' ] }; our $host_info_schema = { hostname => '', osname => '*', # see OSName_t, GFD.147 osversion => '*', # see OSName_t, GFD.147 sysname => '', # uname -s release => '', # uname -r machine => '', # uname -m (what would it be on a linux machine) cpuvendor => '*', cpumodel => '*', cpufreq => '*', # unit: MHz cpustepping => '*', pmem => '*', # unit: MB vmem => '*', # unit: MB cputhreadcount=> '*', cpucorecount => '*', cpusocketcount=> '*', issuerca => '', issuerca_hash => '', issuerca_enddate => '*', issuerca_expired => '*', hostcert_enddate => '*', hostcert_expired => '*', trustedcas => [ '' ], session_free => '', # unit: MB session_total => '', # unit: MB cache_free => '', # unit: MB cache_total => '', # unit: MB globusversion => '*', processes => { '*' => '' }, gm_alive => '', localusers => { '*' => { gridareas => [ '' ], diskfree => '' # unit: MB } }, EMIversion => [ '' ] # taken from /etc/emi-version if exists }; our $log = LogUtils->getLogger(__PACKAGE__); { my ($t0, $descr); sub timer_start($) { $descr = shift; $t0 = time(); } sub timer_stop() { my $dt = sprintf("%.3f", time() - $t0); $log->debug("Time spent $descr: ${dt}s"); } } sub collect($) { my ($options) = @_; my ($checker, @messages); $checker = InfoChecker->new($host_options_schema); @messages = $checker->verify($options); $log->warning("config key options->$_") foreach @messages; $log->fatal("Some required options are missing") if @messages; my $result = get_host_info($options); $checker = InfoChecker->new($host_info_schema); @messages = $checker->verify($result); $log->debug("SelfCheck: result key hostinfo->$_") foreach @messages; return $result; } # private subroutines # Obtain the end date of a certificate (in seconds since the epoch) sub enddate { my ($openssl, $certfile) = @_; # assuming here that the file exists and is a well-formed certificate. chomp (my $stdout=`$openssl x509 -noout -enddate -in '$certfile'`); return undef if $?; my %mon = (Jan=>0,Feb=>1,Mar=>2,Apr=>3,May=>4,Jun=>5,Jul=>6,Aug=>7,Sep=>8,Oct=>9,Nov=>10,Dec=>11); if ($stdout =~ m/notAfter=(\w{3}) ?(\d\d?) (\d\d):(\d\d):(\d\d) (\d{4}) GMT/ and exists $mon{$1}) { return timegm($5,$4,$3,$2,$mon{$1},$6); } else { $log->warning("Unexpected -enddate from openssl for $certfile"); return undef; } } # Hostcert, issuer CA, trustedca, issuercahash, enddate ... sub get_cert_info { my ($options, $globusloc) = @_; my $host_info = {}; if (not $options->{x509_user_cert}) { $log->info("x509_user_cert not configured"); return $host_info; } # find an openssl my $openssl = ''; for my $path (split ':', "$ENV{PATH}:$globusloc/bin") { $openssl = "$path/openssl" and last if -x "$path/openssl"; } $log->error("Could not find openssl command") unless $openssl; # Inspect host certificate my $hostcert = $options->{x509_user_cert}; chomp (my $issuerca = `$openssl x509 -noout -issuer -nameopt oneline -in '$hostcert'`); if ($?) { $log->warning("Failed processing host certificate file: $hostcert") if $?; } else { $issuerca =~ s/, /\//g; $issuerca =~ s/ = /=/g; $issuerca =~ s/^[^=]*= */\//; $host_info->{issuerca} = $issuerca; $host_info->{hostcert_enddate} = enddate($openssl, $hostcert); system("$openssl x509 -noout -checkend 3600 -in '$hostcert'"); $host_info->{hostcert_expired} = $? ? 1 : 0; $log->warning("Host certificate is expired in file: $hostcert") if $?; } if (not $options->{x509_cert_dir}) { $log->info("x509_cert_dir not configured"); return $host_info; } # List certs and elliminate duplication in case 2 soft links point to the same file. my %certfiles; my $certdir = $options->{x509_cert_dir}; opendir(CERTDIR, $certdir) or $log->error("Failed listing certificates directory $certdir: $!"); for (readdir CERTDIR) { next unless m/\.\d$/; my $file = $certdir."/".$_; my $link = -l $file ? readlink $file : $_; $certfiles{$link} = $file; } closedir CERTDIR; my %trustedca; foreach my $cert ( sort values %certfiles ) { chomp (my $ca_sn = `$openssl x509 -checkend 3600 -noout -subject -nameopt oneline -in '$cert'`); my $is_expired = $?; $ca_sn = (split(/\n/, $ca_sn))[0]; $ca_sn =~ s/, /\//g; $ca_sn =~ s/ = /=/g; $ca_sn =~ s/^[^=]*= */\//; if ($ca_sn eq $issuerca) { chomp (my $issuerca_hash = `$openssl x509 -noout -hash -in '$cert'`); if ($?) { $log->warning("Failed processing issuer CA certificate file: $cert"); } else { $host_info->{issuerca_hash} = $issuerca_hash || undef; $host_info->{issuerca_enddate} = enddate($openssl, $cert); $host_info->{issuerca_expired} = $is_expired ? 1 : 0; $log->warning("Issuer CA certificate is expired in file: $cert") if $is_expired; } } $log->warning("Certificate is expired for CA: $ca_sn") if $is_expired; $trustedca{$ca_sn} = 1 unless $is_expired; } $host_info->{trustedcas} = [ sort keys %trustedca ]; $log->warning("Issuer CA certificate file not found") unless exists $host_info->{issuerca_hash}; return $host_info; } # Returns 'all' if all grid-managers are up # 'some' if one or more grid-managers are down # 'none' if all grid-managers are down sub gm_alive { my ($timeout, @controldirs) = @_; my $up = 0; my $down = 0; for my $dir (@controldirs) { my @stat = stat("$dir/gm-heartbeat"); if (@stat and time() - $stat[9] < $timeout) { $up++; } else { $down++; } } return 'none' if not $up; return $down ? 'some' : 'all'; } sub get_host_info { my $options = shift; my $host_info = {}; $host_info->{hostname} = hostname(); my $osinfo = Sysinfo::osinfo() || {}; my $cpuinfo = Sysinfo::cpuinfo() || {}; my $meminfo = Sysinfo::meminfo() || {}; $log->error("Failed querying CPU info") unless %$cpuinfo; $log->error("Failed querying OS info") unless %$osinfo; # Globus location my $globusloc = $ENV{GLOBUS_LOCATION} || "/usr"; if ($ENV{GLOBUS_LOCATION}) { if ($ENV{LD_LIBRARY_PATH}) { $ENV{LD_LIBRARY_PATH} .= ":$ENV{GLOBUS_LOCATION}/lib"; } else { $ENV{LD_LIBRARY_PATH} = "$ENV{GLOBUS_LOCATION}/lib"; } } timer_start("collecting certificates info"); my $certinfo = get_cert_info($options, $globusloc); timer_stop(); $host_info = {%$host_info, %$osinfo, %$cpuinfo, %$meminfo, %$certinfo}; my @controldirs; my $control = $options->{control}; push @controldirs, $_->{controldir} for values %$control; # Considering only common session disk space (not including per-user session directoires) my (%commongridareas, $commonfree); if ($control->{'.'}) { $commongridareas{$_} = 1 for map { my ($path, $drain) = split /\s+/, $_; $path; } @{$control->{'.'}{sessiondir}}; } # Also include remote session directoires. if (my $remotes = $options->{remotegmdirs}) { for my $remote (@$remotes) { my ($ctrldir, @sessions) = split ' ', $remote; $commongridareas{$_} = 1 for grep { $_ ne 'drain' } @sessions; push @controldirs, $ctrldir; } } if (%commongridareas) { my %res = Sysinfo::diskspaces(keys %commongridareas); if ($res{errors}) { $log->warning("Failed checking disk space available in session directories"); } else { $host_info->{session_free} = $commonfree = $res{freesum}; $host_info->{session_total} = $res{totalsum}; } } # calculate free space on the sessionsirs of each local user. my $user = $host_info->{localusers} = {}; foreach my $u (@{$options->{localusers}}) { # Are there grid-manager settings applying for this local user? if ($control->{$u}) { my $sessiondirs = [ map { my ($path, $drain) = split /\s+/, $_; $path; } @{$control->{$u}{sessiondir}} ]; my %res = Sysinfo::diskspaces(@$sessiondirs); if ($res{errors}) { $log->warning("Failed checking disk space available in session directories of user $u") } else { $user->{$u}{gridareas} = $sessiondirs; $user->{$u}{diskfree} = $res{freesum}; } } elsif (defined $commonfree) { # default for other users $user->{$u}{gridareas} = [ keys %commongridareas ]; $user->{$u}{diskfree} = $commonfree; } } # Considering only common cache disk space (not including per-user caches) if ($control->{'.'}) { my $cachedirs = $control->{'.'}{cachedir} || []; my $remotecachedirs = $control->{'.'}{remotecachedir} || []; my @paths = map { my @pair = split " ", $_; $pair[0] } @$cachedirs, @$remotecachedirs; if (@paths) { my %res = Sysinfo::diskspaces(@paths); if ($res{errors}) { $log->warning("Failed checking disk space available in common cache directories") } else { # What to publish as CacheFree if there are multiple cache disks? # Should be highWatermark factored in? # Opting to publish the least free space on any of the cache # disks -- at least this has a simple meaning and is useful to # diagnose if a disk gets full. $host_info->{cache_free} = $res{freemin}; # Only accurate if caches are on filesystems of their own $host_info->{cache_total} = $res{totalsum}; } } } my $gm_timeout = $options->{wakeupperiod} ? $options->{wakeupperiod} * 10 : 1800; $host_info->{gm_alive} = gm_alive($gm_timeout, @controldirs); #Globus Toolkit version #globuslocation/share/doc/VERSION my $globusversion; if (-r "$globusloc/share/doc/VERSION" ) { chomp ( $globusversion = `cat $globusloc/share/doc/VERSION 2>/dev/null`); if ($?) { $log->warning("Failed reading the globus version file")} } #globuslocation/bin/globus-version elsif (-x "$globusloc/bin/globus-version" ) { chomp ( $globusversion = `$globusloc/bin/globus-version 2>/dev/null`); if ($?) { $log->warning("Failed running $globusloc/bin/globus-version command")} } $host_info->{globusversion} = $globusversion if $globusversion; $host_info->{processes} = Sysinfo::processid(@{$options->{processes}}); # gets EMI version from /etc/emi-version if any. my $EMIversion; if (-r "/etc/emi-version") { chomp ( $EMIversion = `cat /etc/emi-version 2>/dev/null`); if ($?) { $log->warning("Failed reading EMI version file. Assuming non-EMI deployment. Install emi-version package if you're running EMI version of ARC")} } $host_info->{EMIversion} = [ 'MiddlewareName=EMI' , "MiddlewareVersion=$EMIversion" ] if ($EMIversion); return $host_info; } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test { my $options = { x509_user_cert => '/etc/grid-security/hostcert.pem', x509_cert_dir => '/etc/grid-security/certificates', control => { '.' => { sessiondir => [ '/home', '/boot' ], cachedir => [ '/home' ], remotecachedir => [ '/boot' ], cachesize => '60 80', }, 'daemon' => { sessiondir => [ '/home', '/tmp' ], } }, remotegmdirs => [ '/dummy/control /home', '/dummy/control /boot' ], libexecdir => '/usr/libexec/arc', runtimedir => '/home/grid/runtime', processes => [ qw(bash ps init grid-manager bogous) ], localusers => [ qw(root bin daemon) ] }; require Data::Dumper; import Data::Dumper qw(Dumper); LogUtils::level('DEBUG'); $log->debug("Options:\n" . Dumper($options)); my $results = HostInfo::collect($options); $log->debug("Results:\n" . Dumper($results)); } #test; 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/RTEInfo.pm0000644000000000000000000000012412574532370025330 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200575.809717 30 ctime=1513200663.147785984 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/RTEInfo.pm0000755000175000002070000000311512574532370025400 0ustar00mockbuildmock00000000000000package RTEInfo; use warnings; use strict; use LogUtils; our $rte_options_schema = { pgkdatadir => '', configfile => '', runtimedir => '*', }; our $rte_info_schema = { '*' => { state => '', description => '*', } }; our $log = LogUtils->getLogger(__PACKAGE__); sub collect($) { my ($options) = @_; return {} unless $options->{runtimedir}; my $rtes = {}; add_static_rtes($options->{runtimedir}, $rtes); return $rtes; } sub add_static_rtes { my ($runtimedir, $rtes) = @_; unless (opendir DIR, $runtimedir) { $log->warning("Can't access runtimedir: $runtimedir: $!"); return; } closedir DIR; my $cmd = "find '$runtimedir' -type f ! -name '.*' ! -name '*~'"; unless (open RTE, "$cmd |") { $log->warning("Failed to run: $cmd"); return; } while (my $dir = ) { chomp $dir; $dir =~ s#$runtimedir/*##; $rtes->{$dir} = { state => 'installednotverified' }; } } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test { my $options = { pkgdatadir => '/scratch/adrianta/arc1/share/arc', runtimedir => '/data/export/SOFTWARE/runtime', configfile => '/etc/arc.conf', }; require Data::Dumper; import Data::Dumper qw(Dumper); LogUtils::level('VERBOSE'); $log->debug("Options:\n" . Dumper($options)); my $results = RTEInfo::collect($options); $log->debug("Results:\n" . Dumper($results)); } #test; 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/PerfData.pl.in0000644000000000000000000000012713065017531026150 xustar000000000000000027 mtime=1490296665.191819 30 atime=1513200650.120626657 30 ctime=1513200663.168786241 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PerfData.pl.in0000755000175000002070000004534513065017531026230 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w package PerfData; ######################################################## # Script for performance collection # Requires the NYTProf profiler # Requires files generated after the execution of # CEinfo.pl # Generates output as requested by the NorduGrid # collaboration at # https://wiki.nordugrid.org/wiki/Logging_of_CE_performance_numbers # # # 20160414 metrics: # - lrmscalltiming, lrmstype, lrms full command, durationinseconds # - controldirreadtiming, all, directoryname, durationinseconds # - controldirreadtiming, file, filename, durationinseconds # - controldirreadtiming, job, jobid, durationinseconds # this can be derived by above numbers # # Author: Florido Paganelli florido.paganelli@hp.lu.se, NorduGrid Collaboration # ######################################################## use File::Basename; use Getopt::Long; use Sys::Hostname; use Data::Dumper::Concise; use Cwd; #use Devel::NYTProf::Data; #use Symbol; # The profiling tool might be missing in some distributions. # Default is to assume is not present. my $NYTPROF_PRESENT = 0; $ENV{NYTPROF} = "start=no:file=/tmp/arcnytproftmp"; $NYTPROF_PRESENT = eval { require Devel::NYTProf; 1; }; if ($NYTPROF_PRESENT) { # temporary workaround to reduce useless files written by # the profiler. Might be removed in the future # NOTE: comment lines out to profile/debug this script or profiling will # stop after the following line. DB::disable_profile(); DB::finish_profile(); unlink '/tmp/arcnytproftmp'; }; use strict; BEGIN { my $pkgdatadir = dirname($0); unshift @INC, $pkgdatadir; } # used for code parsing my $pkgdatadir = dirname($0); use ConfigCentral; use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); my $debuglevel; # default is to delete parsed nytprof files my $keepnytproffiles = 0; our $configfile; my $controldirpath = ''; # default performance log filename as agreed in the NG wiki # https://wiki.nordugrid.org/wiki/Logging_of_CE_performance_numbers#details.2C_agreements my $perflogfilename = 'infosys.perflog'; ############################################################# # Datastructures to hold metrics to collect # To add new metrics is enough to fill such datastructures # with the needed information. ############################################################# # TODO: # - controldirreadtiming, job, jobid, durationinseconds # candidate fine grained: raw file date, job, N/A: can NYTPROF get this runtime info? , sum of the above for each jobid? # There is no way to obtain JOB IDS from NYTProf. # This can be achieved only by adding additional info in the code. # candidate coarse grained: raw file date, job, any , sum of the above for just one call or NYTPROF block: foreach my $ID (@gridmanager_jobs) { # This can be derived by other numbers. But it requires summing times of all lines inside the for loop above, which is quite time consuming # and it will not give a much better number than the time to run getgmjobs / number of calls for each file ## GMJobsInfo.pm ####################################### # subs for gmjobs # Implements: # - controldirreadtiming, all, directoryname, durationinseconds # candidate: raw file date, all, $controlsubdir? , sub get_gmjobs # Can't get specific controlsubdir. Just showing aggregated data for now. # # NYTProf datastructure: GMJobsInfo::get_gmjobs => [ ?, ?, ?, external time, internaltime, pointer ] # we always take externaltime # my $gmjobssubs = { 'GMJobsInfo::get_gmjobs' => "controldirreadtiming,all,controldir" }; # code patterns to get line info for gmjobsinfo my $gmjobsinfopatterns = { '.local' => 'my @local_allines = ;', '.status' => 'my \(\$first_line\) = ;', '.failed' => 'read GMJOB_FAILED, \$chars, 1024;', '.grami' => 'while \(my \$line = \) {', '.description' => 'while \(my \$line = \) {', '.diag' => 'unless \( open \(GMJOB_DIAG, "<\$gmjob_diag"\) \) {' }; # mapping between info sources and output strings # # Implements: # - controldirreadtiming, file, filename, durationinseconds, number of calls # filename is missing as it is currently impossible to get from the profiler. # my $gmjobsinfometrics = { 'subs' => { %$gmjobssubs }, 'codepatterns' => { 'subprefix' => 'controldirreadtiming,file', 'patterns' => { %$gmjobsinfopatterns }, 'lines' => {}, # will contain calculated line numbers in source code for the above patterns 'params' => 'incl' } }; # Coarse grained LRMSInfo information # Implements: # not very interesting: candidate coarse grained: raw file date, LRMSInfo.pm, nofullcommand, # timing of line my $result = get_lrms_info($options); <-- same info in infoprovider.log, but maybe good for comparison? my $lrmsinfosubs = { 'LRMSInfo::collect' => 'lrmscalltiming,LRMSInfo.pm,collect', }; my $lrmsinfometrics = { 'subs' => { %$lrmsinfosubs }, }; # Module stuff for each LRMS, to be loaded depending on config # Implements: # - lrmscalltiming, lrmstype, lrms full command, durationinseconds # candidate fine grained: raw file date, lrmsmodulename, lrms full command?, sum of queueinfo and jobsinfo timing # # TODO: add lrms full command, must be extracted from code. Exact values will be missing ## fork my $forkmodsubs = { 'FORKmod::queue_info' => 'lrmscalltiming,fork,queue_info', 'FORKmod::jobs_info' => 'lrmscalltiming,fork,jobs_info' }; #my $forkmodpatterns = { # '.local' => 'my @local_allines = ;', #}; my $forkmodmetrics = { 'subs' => { %$forkmodsubs }, # 'codepatterns' => { # 'subprefix' => 'controldirreadtiming,file', # 'patterns' => { %$forkmodpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## slurm my $slurmmodsubs = { 'SLURMmod::queue_info' => 'lrmscalltiming,slurm,queue_info', 'SLURMmod::jobs_info' => 'lrmscalltiming,slurm,jobs_info' }; #my $slurmmodpatterns = { # '.local' => 'my @local_allines = ;', #}; my $slurmmodmetrics = { 'subs' => { %$slurmmodsubs }, # 'codepatterns' => { # 'subprefix' => 'lrmscalltiming,slurm,command', # 'patterns' => { %$slurmmodpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## Condor my $condorsubs = { 'Condor::queue_info' => 'lrmscalltiming,condor,queue_info', 'Condor::jobs_info' => 'lrmscalltiming,condor,jobs_info' }; #my $condorpatterns = { # '.local' => 'my @local_allines = ;', #}; my $condormetrics = { 'subs' => { %$condorsubs }, # 'codepatterns' => { # 'subprefix' => 'lrmscalltiming,condor,command', # 'patterns' => { %$condorpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## PBS my $pbsmodsubs = { 'PBSmod::queue_info' => 'lrmscalltiming,pbs,queue_info', 'PBSmod::jobs_info' => 'lrmscalltiming,pbs,jobs_info' }; #my $pbspatterns = { # '.local' => 'my @local_allines = ;', #}; my $pbsmodmetrics = { 'subs' => { %$pbsmodsubs }, # 'codepatterns' => { # 'subprefix' => 'lrmscalltiming,pbs,command', # 'patterns' => { %$pbsmodpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## SGE my $sgemodsubs = { 'SGEmod::queue_info' => 'lrmscalltiming,sge,queue_info', 'SGEmod::jobs_info' => 'lrmscalltiming,sge,jobs_info' }; #my $sgemodpatterns = { # '.local' => 'my @local_allines = ;', #}; my $sgemodmetrics = { 'subs' => { %$sgemodsubs }, # 'codepatterns' => { # 'subprefix' => 'lrmscalltiming,sge,command', # 'patterns' => { %$sgemodpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## Metrics list ############################################ my $metrics = { 'modules' => { 'GMJobsInfo.pm' => { %$gmjobsinfometrics }, 'LRMSInfo.pm' => { %$lrmsinfometrics }, # These are loaded depending on config # 'FORKmod.pm' => { %$forkmodmetrics }, # 'SLURMmod.pm' => { %$slurmmodmetrics }, # 'Condor.pm' => { %$condormetrics }, # 'PBSmod.pm' => { %$pbsmodmetrics }, # 'SGEmod.pm' => { %$sgemodmetrics }, }, }; ############################################################ # Subroutines ############################################################ # Scans the modules datastucture and writes out NYTProf data sub getdatabymodule { my ($arcmodulefilename,$prefixstring,$profile) = @_; my $arcmoduledata = $metrics->{'modules'}{$arcmodulefilename}; # print Data::Dumper::Dumper($arcmoduledata); # TODO: check that profile contains relevant data # get performance data for given modules if (defined $arcmoduledata->{subs}) { my $modulesubstimes = {}; $modulesubstimes = getsubroutinedata($modulesubstimes,$arcmoduledata->{subs},$profile); # output strings for my $subr (keys %{$arcmoduledata->{subs}}) { # print Data::Dumper::Dumper($arcmoduledata->{subs}); writeperf("$prefixstring,".$arcmoduledata->{subs}{$subr}.','.$modulesubstimes->{$subr}) if defined $modulesubstimes; } } # print lines information #print Data::Dumper::Dumper("lines before stats is: ".$arcmoduledata->{codepatterns}{lines}); if (defined $arcmoduledata->{codepatterns}) { printfiledata($arcmoduledata->{codepatterns}{lines},$arcmodulefilename,$profile,"$prefixstring,$arcmoduledata->{codepatterns}{subprefix}"); } } # adds to the input hash subroutine times sub getsubroutinedata { my ($modulesubstimes,$arcmodulesubs,$profile) = @_; my $subinfo; # get subroutine data for my $subroutine (keys %$arcmodulesubs) { # TODO: protect from missing data $subinfo = $profile->subinfo_of($subroutine); if (defined $subinfo) { # Suggested by Tim Bunce: $modulesubstimes->{$subroutine} = $subinfo->incl_time; } else { $modulesubstimes->{$subroutine} = '0,0'; } } return $modulesubstimes; } # get stats for selected lines of code. Prints file data per line # Fills the lines hash in the datastructure sub printfiledata { my ($linenumbers,$perlmodulefilename,$profile,$prefixstring) = @_; # get file data my $fileinfo = $profile->fileinfo_of($perlmodulefilename); my $linesinfo = $fileinfo->line_time_data; # [0] is the time for a call, [1] the number of calls for my $filetype (keys %{$linenumbers}) { my $line = $linenumbers->{$filetype}; if (defined @{$linesinfo}[$line]) { my $line_time = @{@{$linesinfo}[$line]}[0]; my $line_calls = @{@{$linesinfo}[$line]}[1]; writeperf("$prefixstring,$filetype,$line_time,$line_calls"); } } } # get code line numbers # taking them from the datastructure at the top sub getlinenumbers { # calculate line numbers for each pattern # this could be static, but allows the code to change independently foreach my $module (keys %{$metrics->{'modules'}}) { my $modulehash = $metrics->{'modules'}{$module}; if (defined $modulehash->{codepatterns}) { open ( SF , "$pkgdatadir/$module" ) or $log->error("$!"); while ( my $fileline = ) { for my $filetype (keys %{$modulehash->{codepatterns}{patterns}}) { my $pattern = $modulehash->{codepatterns}{patterns}{$filetype}; if ($fileline =~ /$pattern/ ) { $modulehash->{codepatterns}{lines}{$filetype} = $.; } } # TODO: cycle through lines to check that values have been defined, # otherwise there might be an error in the patterns #$log->warning("Pattern $pattern for module $module not found. Please recheck codepatterns datastructure in PerfData.pl") unless (defined $modulehash->{codepatterns}{lines}{$filetype}); }; close (SF); #print Data::Dumper::Dumper($metrics->{modules}{$module}{'codepatterns'}); }; }; } # Writes to file the performance information. # structure is not checked here, currently it must be enforced by the # various functions creating the output message. sub writeperf { my ($msg) = @_; open ( my $filehandle, ">>", $perflogfilename) || $log->error("Cannot write to $perflogfilename, exiting"); print $filehandle "$msg\n"; close $filehandle; } # used to remove files in the nytprof db folder sub deletefiles { my ($dbfilefullpath) = @_; unless ($keepnytproffiles) { $log->verbose("deleting file $dbfilefullpath"); $log->warning("Cannot delete file $dbfilefullpath: $!") unless unlink $dbfilefullpath; } } ############################################################ # Main ############################################################ sub main { $log->error('Devel::NYTProf not present. Perfomance files generaton cannot continue. ') unless ($NYTPROF_PRESENT); # Parse command line options my $print_help; my $testfilename; GetOptions("config:s" => \$configfile, "testfilename|test|t:s" => \$testfilename, "debuglevel|d:s" => \$debuglevel, "keepnytproffiles|k!" => \$keepnytproffiles, "help|h" => \$print_help ); if ($print_help) { print " This script loads a set of NYTProf databases and extracts relevant data for ARC information system as specified in https://wiki.nordugrid.org/wiki/Logging_of_CE_performance_numbers . Usage: $0 Options: --config - full path to arc.conf --testfilename|test|t - filename to use for testing. If not specified all files in the performance folder will be scanned (default) --debuglevel|d - debug level as one of ARC {FATAL|ERROR|WARNING|INFO|VERBOSE|DEBUG}. Default is INFO --keepnytproffiles|k - if enabled, the script will not delete nytprof files in perfdata/perl_nytprof. Default is to wipe out the processed ones to save space. --help - this help\n"; exit 1; } $log->error("--config argument is missing, see --help ") unless ( $configfile ); # Read ARC configuration my $perflogdir = ConfigCentral::getValueOf($configfile,'common','perflogdir'); $perflogdir ||= ($perflogdir) ? $perflogdir : '/var/log/arc/perfdata'; my $hostname = ConfigCentral::getValueOf($configfile,'common','hostname'); my $arcversion = '@VERSION@'; $controldirpath = ConfigCentral::getValueOf($configfile,'control','controldir'); # get lrms info and add relevant metrics to datastructure my $lrms = ConfigCentral::getValueOf($configfile,'common','lrms'); if ($lrms eq 'fork') { $metrics->{'modules'}{'FORKmod.pm'} = { %$forkmodmetrics }; }; if ($lrms =~ /slurm/i) { $metrics->{'modules'}{'SLURMmod.pm'} = { %$slurmmodmetrics }; }; if ($lrms =~ /pbs/i) { $metrics->{'modules'}{'PBSmod.pm'} = { %$pbsmodmetrics }; }; if ($lrms =~ /condor/i) { $metrics->{'modules'}{'Condor.pm'} = { %$condormetrics }; }; $debuglevel ? LogUtils::level($debuglevel) : LogUtils::level('INFO'); LogUtils::timestamps(1); $log->verbose('--keepnytproffiles option detected, db files will not be deleted') if ($keepnytproffiles); # calculate line numbers for each pattern # this could be static, but allows the code to change independently getlinenumbers(); #print Data::Dumper::Dumper($config); $log->info("Performance folder: ".$perflogdir); # set performance outputfile # timestamp not needed anymore but I will keep the code for now #my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(); #my $timestamp=POSIX::strftime("%Y%m%d%H%M%S", $sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst); #$perflogfilename = 'infosys_'.$timestamp.'_'.$perflogfilename; $perflogfilename = $perflogdir.'/'.$perflogfilename; $log->info("Performance file will be created: ".$perflogfilename); # open nytprof database files in the folder and save their names my $nytprofperflogdir = $perflogdir.'/perl_nytprof'; $log->info("NYTProf databases folder: $nytprofperflogdir"); unless (opendir PERFDIR, $nytprofperflogdir ) { $log->error("Can't access the nytprof perfdata directory: $nytprofperflogdir"); } my @dbfiles = (); if (defined $testfilename) { push @dbfiles,$testfilename; } else { @dbfiles = grep /infosys\_\d{14}.perflog\.raw/, readdir PERFDIR; closedir PERFDIR; # remove last file as it is usually incomplete @dbfiles = sort @dbfiles; my $lastfile = pop @dbfiles; $log->debug("Skipping $nytprofperflogdir/$lastfile as it might be open by CEInfo.pl/NYTProf"); } # get some files to scan stats my $totalfilestoscan = @dbfiles; $log->info("Files to scan: $totalfilestoscan"); my $processedfiles = 0; # for each file extract relevant calls. foreach my $dbfile (@dbfiles) { #my $dbfile = 'infosys_20160704182917.perflog.raw'; $processedfiles++; my $dbfilefullpath = $nytprofperflogdir.'/'.$dbfile; $log->verbose("Processing: $dbfilefullpath , $processedfiles of $totalfilestoscan"); # Hack to solve NYTProf memory overflow. A circular reference in the # $profile datastructure prevents the garbage collector to cleanup. # in this way each file is processed in a child process that # forces the garbage collector to cleanup on exit. my $pid = fork(); log->error('Cannot fork NYTProf scanning, exiting...') unless (defined $pid); ## child code if( $pid == 0 ){ $log->debug("Starting helper process for $dbfilefullpath"); use Devel::NYTProf::Data; my $profile = Devel::NYTProf::Data->new( { filename => $dbfilefullpath, quiet => 1 } ); #$profile->dump_profile_data(); # Prefix for performance strings. my $prefixstring = ''; my $rawtimestamp =''; if ( $dbfile =~ /infosys\_(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})\.perflog\.raw/ ) { # Format POSIX style $rawtimestamp = "$1-$2-$3".'T'."$4:$5:$6".'Z'; }; $prefixstring = "$rawtimestamp,$hostname,$arcversion,infosys"; # cycle throught datastructure for my $module (keys %{$metrics->{modules}}) { getdatabymodule($module,$prefixstring,$profile); } exit 0; # children exits here } # parent code # waits for child to exit waitpid ($pid, 0); # delete processed file deletefiles($dbfilefullpath); } $log->info("$processedfiles of $totalfilestoscan processed. Results (if any) written to: $perflogfilename"); exit; } main; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/IniParser.pm0000644000000000000000000000012412116113676025752 xustar000000000000000027 mtime=1362663358.222032 27 atime=1513200575.836718 30 ctime=1513200663.140785899 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/IniParser.pm0000644000175000002070000002562412116113676026030 0ustar00mockbuildmock00000000000000package IniParser; use strict; use warnings; # Configuration parser classes for arc.conf ###### IniParser # # Synopsis: # # use IniParser; # # my $parser = IniParser->new("/etc/arc.conf") # or die 'Cannot parse config file'; # # my %common = $parser->get_section('common'); # get hash with all options in a section # my %queue = $parser->get_section('queue/atlas'); # # print $parser->list_subsections('gridftpd'); # list all subsections of 'gridftpd', but not # # the 'gridftpd' section itself # # my %gmopts = $parser->get_section('grid-manager'); # gm options which are not user-specific # my %useropts = $parser->get_section('grid-manager/.'); # gm options for the default user (this # # section is instantiated automatically # # if the controldir command was used) # # The [grid-manager] section is treated specially. Options that are # user-specific are put in separate pseudo-sections [grid-manager/]. # reffers to the user that is initiated by a 'control' command. The # 'controldir' command initiates user '.'. Each pseudo-section has it's own # 'controldir' option. Other user-specific options are: 'sessiondir', 'cachedir', # 'remotecachedir', 'cachesize', 'cachelifetime', 'norootpower', 'maxrerun', # 'maxtransferfiles' and 'defaultttl'. No substituions are made and user names # '.' and '*' are not handled specially. # ###### SubstitutingIniParser # # Synopsis: # # use IniParser; # # my $parser = SubstitutingIniParser->new("/etc/arc.conf") # or die 'Cannot parse config file'; # # This class is just like IniParser, but substitutions are made and sections # for user names like @filename are expanded into separate sections for each # individual user. # sub new($$) { my ($this,$arcconf) = @_; my $class = ref($this) || $this; open(my $fh, "< $arcconf") || return undef; my $self = { config => {} }; bless $self, $class; $self->{config} = _parse($fh); close($fh); return $self; } # Expects the filename of the arc.conf file. # Returns false if it cannot open the file. sub _parse($) { my ($fh) = @_; my $config = {}; # current section my $section = Section->new('common'); while (my $line =<$fh>) { # handle runaway LF in CRLF and LFCR $line =~ s/^\r//; $line =~ s/\r$//; # skip comments and empty lines next if $line =~/^\s*;/; next if $line =~/^\s*#/; next if $line =~/^\s*$/; # new section starts here if ($line =~ /^\s*\[([\w\-\.\/]+)\]\s*$/) { my $sname = $1; $section->register($config); if ($sname =~ m/^vo/) { $section = SelfNamingSection->new($sname,'id'); } elsif ($sname =~ m/^group/) { $section = SelfNamingSection->new($sname,'name'); } elsif ($sname =~ m/^queue/) { $section = SelfNamingSection->new($sname,'name'); } elsif ($sname eq 'grid-manager') { $section = GMSection->new($sname); } else { $section = Section->new($sname); } # single or double quotes can be used. Quotes are removed from the values } elsif ($line =~ /^(\w+)\s*=\s*(["']?)(.*)(\2)\s*$/) { my ($opt,$val) = ($1,$3); $section->add($opt,$val); # bad line, ignore it for now } else { } } $section->register($config); delete $config->{common} unless %{$config->{common}}; return $config; } # Returns a hash with all options defined in a section. If the section does not # exist, it returns an empty hash sub get_section($$) { my ($self,$sname) = @_; return $self->{config}{$sname} ? %{$self->{config}{$sname}} : (); } # Returns the list of all sections sub list_sections($) { my ($self) = @_; return keys %{$self->{config}}; } sub has_section($$) { my ($self,$sname) = @_; return defined $self->{config}{$sname}; } # list all subsections of a section, but not the section section itself sub list_subsections($$) { my ($self,$sname) = @_; my %ssnames = (); for (keys %{$self->{config}}) { $ssnames{$1}='' if m|^$sname/(.+)|; } return keys %ssnames; } 1; ######################################################## package SubstitutingIniParser; our @ISA = ('IniParser'); sub new($$$) { my ($this,$arcconf,$arc_location) = @_; my $self = $this->SUPER::new($arcconf); return undef unless $self; _substitute($self, $arc_location); return $self; } sub _substitute { my ($self, $arc_location) = @_; my $config = $self->{config}; my $lrmsstring = $config->{'grid-manager'}{lrms} || $config->{common}{lrms}; my ($lrms, $defqueue) = split " ", $lrmsstring || ''; die 'Gridmap user list feature is not supported anymore. Please use @filename to specify user list.' if $config->{'grid-manager/*'}; # expand user sections whose user name is like @filename my @users = $self->list_subsections('grid-manager'); for my $user (@users) { my $section = "grid-manager/$user"; next unless $user =~ m/^\@(.*)$/; my $path = $1; my $fh; # read in user names from file if (open ($fh, "< $path")) { while (my $line = <$fh>) { chomp (my $newsection = "grid-manager/$line"); next if exists $config->{$newsection}; # Duplicate user!!!! $config->{$newsection} = { %{$config->{$section}} }; # shallow copy } close $fh; delete $config->{$section}; } else { die "Failed opening file to read user list from: $path: $!"; } } # substitute per-user options @users = $self->list_subsections('grid-manager'); for my $user (@users) { my @pw; my $home; if ($user ne '.') { @pw = getpwnam($user); die "getpwnam failed for user: $user: $!" unless @pw; $home = $pw[7]; } else { $home = "/tmp"; } my $opts = $config->{"grid-manager/$user"}; # Default for controldir, sessiondir if ($opts->{controldir} eq '*') { $opts->{controldir} = $pw[7]."/.jobstatus" if @pw; } if (not $opts->{sessiondir} or $opts->{sessiondir} eq '*') { $opts->{sessiondir} = "$home/.jobs"; } my $controldir = $opts->{controldir}; my @sessiondirs = split /\[separator\]/, $opts->{sessiondir}; my $substitute_opt = sub { my ($key) = @_; my $val = $opts->{$key}; return unless defined $val; # %R - session root $val =~ s/%R/$sessiondirs[0]/g if $val =~ m/%R/; # %C - control dir $val =~ s/%C/$controldir/g if $val =~ m/%C/; if (@pw) { # %U - username $val =~ s/%U/$user/g if $val =~ m/%U/; # %u - userid # %g - groupid # %H - home dir $val =~ s/%u/$pw[2]/g if $val =~ m/%u/; $val =~ s/%g/$pw[3]/g if $val =~ m/%g/; $val =~ s/%H/$home/g if $val =~ m/%H/; } # %L - default lrms # %Q - default queue $val =~ s/%L/$lrms/g if $val =~ m/%L/; $val =~ s/%Q/$defqueue/g if $val =~ m/%Q/; # %W - installation path $val =~ s/%W/$arc_location/g if $val =~ m/%W/; # %G - globus path my $G = $ENV{GLOBUS_LOCATION} || '/usr'; $val =~ s/%G/$G/g if $val =~ m/%G/; $opts->{$key} = $val; }; &$substitute_opt('controldir'); &$substitute_opt('sessiondir'); &$substitute_opt('cachedir'); &$substitute_opt('remotecachedir'); } # authplugin, localcred, helper: not substituted } 1; ######################################################## package Section; sub new($$) { my ($this,$name) = @_; my $class = ref($this) || $this; my $self = { name => $name, data => {} }; bless $self, $class; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; my $data = $self->{data}; my $old = $data->{$opt}; $data->{$opt} = $old ? $old."[separator]".$val : $val; } sub register($$) { my ($self,$config) = @_; my $name = $self->{name}; my $orig = $config->{$name} || {}; my $new = $self->{data}; $config->{$name} = { %$orig, %$new }; } 1; ######################################################## package SelfNamingSection; use base "Section"; sub new($$$) { my ($this,$name,$nameopt) = @_; my $self = $this->SUPER::new($name); $self->{nameopt} = $nameopt; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; if ($opt eq $self->{nameopt}) { $self->{name} =~ s|(/[^/]+)?$|/$val|; } else { $self->SUPER::add($opt,$val); } } 1; ######################################################## package GMSection; use base "Section"; sub new($) { my ($this) = @_; my $self = $this->SUPER::new('grid-manager'); # OBS sessiondir is not treated $self->{muopts} = [qw(sessiondir cachedir remotecachedir)]; $self->{suopts} = [qw(cachesize cachelifetime norootpower maxrerun maxtransferfiles defaultttl)]; $self->{thisuser} = {}; $self->{allusers} = {}; $self->{controldir} = undef; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; my $thisuser = $self->{thisuser}; if ($opt eq 'controldir') { $self->{controldir} = $val; } elsif ($opt eq 'control') { my ($dir, @usernames) = split /\s+/, $val; $thisuser->{controldir} = $dir; $self->{allusers}{$_} = $thisuser for @usernames; $thisuser = $self->{thisuser} = {%$thisuser}; # make copy delete $thisuser->{$_} for @{$self->{muopts}}; } elsif (grep {$opt eq $_} @{$self->{muopts}}) { my $old = $thisuser->{$opt}; $thisuser->{$opt} = $old ? $old."[separator]".$val : $val; } elsif (grep {$opt eq $_} @{$self->{suopts}}) { $thisuser->{$opt} = $val; } else { $self->SUPER::add($opt,$val); } } sub register($$) { my ($self,$config) = @_; my $dir = $self->{controldir}; if ($dir) { my $thisuser = $self->{thisuser}; $thisuser->{controldir} = $dir; $self->{allusers}{'.'} = $thisuser; } my $allusers = $self->{allusers}; $config->{"grid-manager/$_"} = $allusers->{$_} for keys %$allusers; $self->SUPER::register($config); } sub test { require Data::Dumper; import Data::Dumper qw(Dumper); my $parser = SubstitutingIniParser->new('/tmp/arc.conf','/usr') or die; print Dumper($parser); print "@{[$parser->list_subsections('gridftpd')]}\n"; print "@{[$parser->list_subsections('group')]}\n"; } #test(); 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/GMJobsInfo.pm0000644000000000000000000000012413065017774026021 xustar000000000000000027 mtime=1490296828.950435 27 atime=1513200575.837718 30 ctime=1513200663.144785948 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/GMJobsInfo.pm0000644000175000002070000003603713065017774026077 0ustar00mockbuildmock00000000000000package GMJobsInfo; use POSIX qw(ceil); use English; use LogUtils; use strict; # The returned hash looks something like this: our $j = { 'jobID' => { gmuser => '*', # from .local lrms => '*', queue => '', localid => '*', subject => '', starttime => '*', # MDS time format jobname => '*', gmlog => '*', cleanuptime => '*', # MDS time format delegexpiretime => '*', # MDS time format clientname => '', clientsoftware => '*', activityid => [ '*' ], sessiondir => '', diskspace => '', failedstate => '*', fullaccess => '*', lifetime => '*', # seconds jobreport => '*', interface => '*', # added for GLUE2, the interface the job was submitted. If missing, gridftp voms => [ '*' ], #ref array, contains all the voms attributes in the user certificate that A-REX authorization accepted vomsvo => '*', # string, the first VO in the voms array without the slashes # from .description description => '', # rsl or xml # from .grami -- not kept when the job is deleted stdin => '*', stdout => '*', stderr => '*', count => '*', reqwalltime => '*', # units: s reqcputime => '*', # units: s runtimeenvironments=> [ '*' ], # from .status status => '', statusmodified => '', # seconds since epoch statusread => '', # seconds since epoch completiontime => '*', # MDS time format localowner => '', # from .failed errors => [ '*' ], lrmsexitcode => '*', # from .diag exitcode => '*', nodenames => [ '*' ], UsedMem => '*', # units: kB; summed over all execution threads CpuTime => '*', # units: s; summed over all execution threads WallTime => '*' # units: s; real-world time elapsed } }; our $log = LogUtils->getLogger(__PACKAGE__); # # switch effective user if possible. This is reversible. # sub switchEffectiveUser { my ($user) = @_; return unless $UID == 0; my ($name, $pass, $uid, $gid); if ($user eq '.') { ($uid, $gid) = (0, 0); } else { ($name, $pass, $uid, $gid) = getpwnam($user); return unless defined $gid; } eval { $EGID = $gid; $EUID = $uid; }; } sub collect { my ($controls, $remotegmdirs, $nojobs) = @_; my $gmjobs = {}; while (my ($user, $control) = each %$controls) { switchEffectiveUser($user); my $controldir = $control->{controldir}; my $newjobs = get_gmjobs($controldir, $nojobs); $newjobs->{$_}{gmuser} = $user for keys %$newjobs; $gmjobs->{$_} = $newjobs->{$_} for keys %$newjobs; } # switch to root or to gridftpd user? switchEffectiveUser('.'); if ($remotegmdirs) { for my $pair (@$remotegmdirs) { my ($controldir, $sessiondir) = split ' ', $pair; my $newjobs = get_gmjobs($controldir, $nojobs); $gmjobs->{$_} = $newjobs->{$_} for keys %$newjobs; } } # switch back to root switchEffectiveUser('.'); return $gmjobs; } sub get_gmjobs { my ($controldir, $nojobs) = @_; my %gmjobs; my $jobstoscan = 0; my $jobsskipped = 0; # read the list of jobs from the jobdir and create the @gridmanager_jobs # the @gridmanager_jobs contains the IDs from the job.ID.status foreach my $controlsubdir ("$controldir/accepting", "$controldir/processing", "$controldir/finished") { unless (opendir JOBDIR, $controlsubdir ) { $log->warning("Can't access the job control directory: $controlsubdir") and return {}; } my @allfiles = grep /\.status/, readdir JOBDIR; closedir JOBDIR; my @gridmanager_jobs = map {$_=~m/job\.(.+)\.status/; $_=$1;} @allfiles; # count job IDs to scan $jobstoscan = $jobstoscan + @gridmanager_jobs; $log->verbose("Found ". scalar @gridmanager_jobs. " jobs in $controlsubdir"); foreach my $ID (@gridmanager_jobs) { my $job = $gmjobs{$ID} = {}; my $gmjob_local = $controldir."/job.".$ID.".local"; my $gmjob_status = $controlsubdir."/job.".$ID.".status"; my $gmjob_failed = $controldir."/job.".$ID.".failed"; my $gmjob_description = $controldir."/job.".$ID.".description"; my $gmjob_grami = $controldir."/job.".$ID.".grami"; my $gmjob_diag = $controldir."/job.".$ID.".diag"; unless ( open (GMJOB_LOCAL, "<$gmjob_local") ) { $log->warning( "Job $ID: Can't read jobfile $gmjob_local, skipping job" ); delete $gmjobs{$ID}; $jobsskipped++; next; } my @local_allines = ; $job->{activityid} = []; # parse the content of the job.ID.local into the %gmjobs hash foreach my $line (@local_allines) { if ($line=~m/^(\w+)=(.+)$/) { # TODO: multiple activityid support. # is this still used? if not, remove the code. # looking at trunk it doesn't seem to exist anymore. if ($1 eq "activityid") { push @{$job->{activityid}}, $2; } else { # a job can belong to a user that has multiple voms roles # for completeness all added to the datastructure # in an array if ($1 eq "voms") { push @{$job->{voms}}, $2; # vomsvo to hold the selected vo, I assume is the first in the list. # will be used to calculate vo statistics # must match authorizedvo (i.e. slashes are removed) unless (defined $job->{vomsvo}) { my $vostring = $2; if ($vostring =~ /^\/+(\w+)/) { $vostring = $1; }; $job->{vomsvo} = $vostring; } } else { $job->{$1}=$2; } } } } close GMJOB_LOCAL; # Extrasct jobID uri if ($job->{globalid}) { $job->{globalid} =~ s/.*JobSessionDir>([^<]+)<.*/$1/; } else { $log->debug("Job $ID: 'globalid' missing from .local file"); } # Rename queue -> share if (exists $job->{queue}) { $job->{share} = $job->{queue}; delete $job->{queue}; } else { $log->warning("Job $ID: 'queue' missing from .local file"); } # check for interface field if (! $job->{interface}) { $log->warning("Job $ID: 'interface' missing from .local file, reverting to org.nordugrid.gridftpjob"); $job->{interface} = 'org.nordugrid.gridftpjob'; } # read the job.ID.status into "status" unless (open (GMJOB_STATUS, "<$gmjob_status")) { $log->warning("Job $ID: Can't open status file $gmjob_status, skipping job"); delete $gmjobs{$ID}; $jobsskipped++; next; } else { my @file_stat = stat GMJOB_STATUS; my ($first_line) = ; close GMJOB_STATUS; unless ($first_line) { $log->warning("Job $ID: Failed to read status from file $gmjob_status, skipping job"); delete $gmjobs{$ID}; $jobsskipped++; next; } chomp ($first_line); $job->{status} = $first_line; if (@file_stat) { # localowner my $uid = $file_stat[4]; my $user = (getpwuid($uid))[0]; if ($user) { $job->{localowner} = $user; } else { $log->warning("Job $ID: Cannot determine user name for owner (uid $uid)"); } $job->{"statusmodified"} = $file_stat[9]; $job->{"statusread"} = time(); # completiontime if ($job->{"status"} eq "FINISHED") { my ($s,$m,$h,$D,$M,$Y) = gmtime($file_stat[9]); my $ts = sprintf("%4d%02d%02d%02d%02d%02d%1s",$Y+1900,$M+1,$D,$h,$m,$s,"Z"); $job->{"completiontime"} = $ts; } } else { $log->warning("Job $ID: Cannot stat status file: $!"); } } # check for localid if (! $job->{localid}) { if ($job->{status} eq 'INLRMS') { $log->warning("Job $ID: has no local ID but is in INLRMS state, this should not happen"); } $job->{localid} = 'UNDEFINEDVALUE'; } # Comes the splitting of the terminal job state # check for job failure, (job.ID.failed ) "errors" if (-e $gmjob_failed) { unless (open (GMJOB_FAILED, "<$gmjob_failed")) { $log->warning("Job $ID: Can't open $gmjob_failed"); } else { my $chars; read GMJOB_FAILED, $chars, 1024; my @allines = split "\n", $chars; close GMJOB_FAILED; $job->{errors} = \@allines; } } if ($job->{"status"} eq "FINISHED") { #terminal job state mapping if ( $job->{errors} ) { if (grep /User requested to cancel the job/, @{$job->{errors}}) { $job->{status} = "KILLED"; } elsif ( defined $job->{errors} ) { $job->{status} = "FAILED"; } } } # if jobs are not printed, it's sufficient to have jobid, status, # subject, queue and share. Can skip the rest. next if $nojobs; # read the job.ID.grami file unless ( open (GMJOB_GRAMI, "<$gmjob_grami") ) { # this file is is kept by A-REX during the hole existence of the # job. grid-manager from arc0, however, deletes it after the job # has finished. $log->debug("Job $ID: Can't open $gmjob_grami"); } else { my $sessiondir = $job->{sessiondir} || ''; while (my $line = ) { if ($line =~ m/^joboption_(\w+)='(.*)'$/) { my ($param, $value) = ($1, $2); $param =~ s/'\\''/'/g; # unescape quotes # These parameters are quoted by A-REX if ($param eq "stdin") { $job->{stdin} = $value; $job->{stdin} =~ s/^\Q$sessiondir\E\/*//; } elsif ($param eq "stdout") { $job->{stdout} = $value; $job->{stdout} =~ s/^\Q$sessiondir\E\/*//; } elsif ($param eq "stderr") { $job->{stderr} = $value; $job->{stderr} =~ s/^\Q$sessiondir\E\/*//; } elsif ($param =~ m/^runtime_/) { push @{$job->{runtimeenvironments}}, $value; } } elsif ($line =~ m/^joboption_(\w+)=(\w+)$/) { my ($param, $value) = ($1, $2); # These parameters are not quoted by A-REX if ($param eq "count") { $job->{count} = int($value); } elsif ($param eq "walltime") { $job->{reqwalltime} = int($value); } elsif ($param eq "cputime") { $job->{reqcputime} = int($value); } elsif ($param eq "starttime") { $job->{starttime} = $value; } } } close GMJOB_GRAMI; } #read the job.ID.description file unless ( open (GMJOB_DESCRIPTION, "<$gmjob_description") ) { $log->warning("Job $ID: Can't open $gmjob_description"); } else { while (my $line = ) { chomp $line; next unless $line; if ($line =~ m/^\s*[&+|(]/) { $job->{description} = 'rsl'; last } if ($line =~ m/http\:\/\/www.eu-emi.eu\/es\/2010\/12\/adl/) { $job->{description} = 'adl'; last } if ($line =~ m/http\:\/\/schemas.ggf.org\/jsdl\/2005\/11\/jsdl/) { $job->{description} = 'jsdl'; last } my $nextline = ; if ($nextline =~ m/http\:\/\/www.eu-emi.eu\/es\/2010\/12\/adl/) { $job->{description} = 'adl'; last } if ($nextline =~ m/http\:\/\/schemas.ggf.org\/jsdl\/2005\/11\/jsdl/) { $job->{description} = 'jsdl'; last } $log->warning("Job $ID: Can't identify job description language"); last; } close GMJOB_DESCRIPTION; } #read the job.ID.diag file if (-s $gmjob_diag) { unless ( open (GMJOB_DIAG, "<$gmjob_diag") ) { $log->warning("Job $ID: Can't open $gmjob_diag"); } else { my %nodenames; my ($kerneltime, $usertime); while (my $line = ) { $line=~m/^nodename=(\S+)/ and $nodenames{$1} = 1; $line=~m/^WallTime=(\d+)(\.\d*)?/ and $job->{WallTime} = ceil($1); $line=~m/^exitcode=(\d+)/ and $job->{exitcode} = $1; $line=~m/^AverageTotalMemory=(\d+)kB/ and $job->{UsedMem} = ceil($1); $line=~m/^KernelTime=(\d+)(\.\d*)?/ and $kerneltime=$1; $line=~m/^UserTime=(\d+)(\.\d*)?/ and $usertime=$1; } close GMJOB_DIAG; $job->{nodenames} = [ sort keys %nodenames ] if %nodenames; $job->{CpuTime}= ceil($kerneltime + $usertime) if defined $kerneltime and defined $usertime; } } } # job ID loop } # controlsubdir loop $log->verbose("Number of jobs to scan: $jobstoscan ; Number of jobs skipped: $jobsskipped"); return \%gmjobs; } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test() { require Data::Dumper; LogUtils::level('VERBOSE'); my $controldirs = {'*' => { 'controldir' => '/tmp/jobstatus' } }; my $results = GMJobsInfo::collect(\%$controldirs); print Data::Dumper::Dumper($results); } # test; 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/glue-generator.pl0000644000000000000000000000012412145442507026775 xustar000000000000000027 mtime=1368802631.077294 27 atime=1513200575.825718 30 ctime=1513200663.161786156 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/glue-generator.pl0000644000175000002070000005656112145442507027057 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w # Queries NDGF site information and translates it from ARC to GLUE schema # Prototype by L.Field (2006) # Corrections and Performance enhancements by M.Flechl (2006-08-17) # Merged with adjusted bdii-update (David Groep, L.Field, M.Litmaath) by L.Field and M.Flechl (2006-08-21) # ARC GIIS traversal and some sanity checks for failing GRISes by Mattias Wadenstein (2007-07-03) # Updated to work at a resource bdii-level by Daniel (2009-2010) # Now we can generate the CEUniqueID correctly with the queue name by Pekka Kaitaniemi (2012-04-19) # # $Id: glite-info-provider-ndgf,v 1.1 2006/08/30 12:13:15 lfield Exp $ use strict; use LWP::Simple; use POSIX; use IO::Handle; sub translator; sub build_glueCEUniqueID { my $cluster_name = shift; my $cluster_lrms_type = shift; my $queue_name = shift; return $cluster_name . ":" . "2811" . "/nordugrid-". $cluster_lrms_type . "-" . $queue_name; } sub build_glueServiceUniqueID { my $cluster_name = shift; return $cluster_name . "_" . "org.nordugrid.arex"; } # Global variables for translator use vars qw($DEFAULT); $DEFAULT = 0; use vars qw($outbIP $inbIP $glueSubClusterUniqueID $norduBenchmark $norduOpsys $norduNodecput $norduNodecpu); use vars qw($glueHostMainMemoryRAMSize $glueHostArchitecturePlatformType $glueSubClusterUniqueID $GlueHostBenchmarkSI00 $GlueHostBenchmarkSF00); use vars qw($glueSubClusterName $glueSubClusterPhysicalCPUs $glueSubClusterLogicalCPUs $glueClusterUniqueID $processorOtherDesc $smpSize); use vars qw($AssignedSlots $mappedStatus $waitingJobs $totalJobs $waitingJobs $freeSlots $estRespTime $worstRespTime $vo $ce_unique_id); use vars qw(@envs @vos); #Create nordugrid ldif #TODO read this from optarg my $ldif_input=`$LDIF_GENERATOR_FILE_NG`; # These values will be read from NorduGrid MDS my %cluster_attributes=( 'nordugrid-cluster-location' => '', 'nordugrid-cluster-support' => '', 'nordugrid-cluster-name' => '', 'nordugrid-cluster-runtimeenvironment' => '', 'nordugrid-cluster-contactstring' => '', 'nordugrid-cluster-aliasname' => '', 'nordugrid-cluster-lrms-type' => '', 'nordugrid-cluster-lrms-version' => '', 'nordugrid-cluster-totalcpus' => '', 'nordugrid-cluster-opsys' => '', 'nordugrid-cluster-benchmark' => '', 'nordugrid-cluster-nodecpu' => '', 'nordugrid-cluster-nodememory' => '', 'nordugrid-cluster-nodeaccess' => '', 'nordugrid-cluster-architecture' => '', 'nordugrid-cluster-acl' => '', 'nordugrid-cluster-homogeneity' => '', 'nordugrid-cluster-comment' => '', 'nordugrid-cluster-owner' => '', 'nordugrid-cluster-localse' => '', ); #all these values will be checked if they are numeric only: my @cluster_attributes_num = ('totalcpus','nodememory'); for (my $i=0; $i<=$#cluster_attributes_num; $i++){ $cluster_attributes_num[$i] = 'nordugrid-cluster-'.$cluster_attributes_num[$i]; } # Queue attributes read from NorduGrid MDS my %queue_attributes=( 'nordugrid-queue-name' => '', 'nordugrid-queue-running' => '', 'nordugrid-queue-maxrunning' => '', 'nordugrid-queue-maxcputime' => '', 'nordugrid-queue-maxqueuable' => '', 'nordugrid-queue-totalcpus' => '', 'nordugrid-queue-opsys' => '', 'nordugrid-queue-benchmark' => '', 'nordugrid-queue-nodecpu' => '', 'nordugrid-queue-nodememory' => '', 'nordugrid-queue-architecture' => '', 'nordugrid-queue-status' => '', 'nordugrid-queue-gridqueued' => '', 'nordugrid-queue-localqueued' => '', 'nordugrid-queue-prelrmsqueued' => '', ); #all these values will be checked if they are numeric only: my @queue_attributes_num = ('running','maxrunning','maxcputime','maxqueuable','totalcpus','nodememory','gridqueued','localqueued','prelrmsqueued'); for (my $i=0; $i<=$#queue_attributes_num; $i++){ $queue_attributes_num[$i] = 'nordugrid-queue-'.$queue_attributes_num[$i]; } #Translate and print glue ldif #TODO this should perhaps be able to write do different rootdn:s, not just mds-vo-name=resource,o=grid. translator($ldif_input); exit; #translator takes an ldif-output in a scalar variable as input and prints the output to stdout sub translator(){ my $temp=$_[0]; #$DEFAULT = -1; # Store ldif here my @ldif; #Remove blank space at the start of the line $temp=~s/\n //gm; @ldif=split "\n", $temp; push @ldif, "EOF"; #my $hostname=hostname(); #set the attributes from the ldif for my $key ( keys %cluster_attributes ) { $cluster_attributes{$key} = join (" ", grep { /^$key/ } @ldif); chomp $cluster_attributes{$key}; if ($key eq "nordugrid-cluster-opsys" or $key eq "nordugrid-cluster-owner" or $key eq "nordugrid-cluster-benchmark") { $cluster_attributes{$key}=~s/ ?$key//g; } else { $cluster_attributes{$key}=~s/$key: //g; } if ($cluster_attributes{$key}=~/^$/) { $cluster_attributes{$key}="$DEFAULT" } } my $glue_site_unique_id="$GLUESITEUNIQUEID"; #my $glue_site_unique_id=$cluster_attributes{'nordugrid-cluster-aliasname'}; @envs = split / /, $cluster_attributes{'nordugrid-cluster-runtimeenvironment'}; my @storages = split / /, $cluster_attributes{'nordugrid-cluster-localse'}; $outbIP = "FALSE"; $inbIP = "FALSE"; if ($cluster_attributes{'nordugrid-cluster-nodeaccess'} eq "outbound"){ $outbIP = "TRUE"; } elsif ($cluster_attributes{'nordugrid-cluster-nodeaccess'} eq "inbound"){ $inbIP = "TRUE"; } if ($cluster_attributes{'nordugrid-cluster-acl'} eq "$DEFAULT") { $cluster_attributes{'nordugrid-cluster-acl'}="VO:ops"; } my @owner = split /: /, $cluster_attributes{'nordugrid-cluster-owner'}; my $nclocation=$cluster_attributes{'nordugrid-cluster-location'}; my $loc = $LOC; my $lat = $LAT; my $long = $LONG; my $provide_glue_site_info = "$PROVIDE_GLUE_SITE_INFO"; $processorOtherDesc = "$PROCESSOROTHERDESCRIPTION"; #set numeric values to $DEFAULT if they are on the list and not numeric for (my $i=0; $i<=$#cluster_attributes_num; $i++){ if (! ($cluster_attributes{$cluster_attributes_num[$i]} =~ /^\d+$/) ){ $cluster_attributes{$cluster_attributes_num[$i]} = $DEFAULT; } } # Write Site Entries if($provide_glue_site_info =~ /true/i) { write_site_entries($glue_site_unique_id, $cluster_attributes{'nordugrid-cluster-comment'}, $cluster_attributes{'nordugrid-cluster-support'}, $loc,$lat,$long,"$GLUESITEWEB",\@owner); } if ($cluster_attributes{'nordugrid-cluster-homogeneity'} =~ /true/i){ $glueSubClusterUniqueID=$cluster_attributes{'nordugrid-cluster-name'}; $norduOpsys=$cluster_attributes{'nordugrid-cluster-opsys'}; $norduBenchmark=$cluster_attributes{'nordugrid-cluster-benchmark'}; $norduNodecpu=$cluster_attributes{'nordugrid-cluster-nodecpu'}; $glueHostMainMemoryRAMSize=$cluster_attributes{'nordugrid-cluster-nodememory'}; $glueHostArchitecturePlatformType=$cluster_attributes{'nordugrid-cluster-architecture'}; $glueSubClusterUniqueID=$cluster_attributes{'nordugrid-cluster-name'}; $glueSubClusterName=$glue_site_unique_id; if ( $processorOtherDesc =~ m/Cores=(\d+)/ ){ $smpSize=$1; $glueSubClusterPhysicalCPUs=int($cluster_attributes{'nordugrid-cluster-totalcpus'}/$smpSize); } else { $smpSize=1; $glueSubClusterPhysicalCPUs=$cluster_attributes{'nordugrid-cluster-totalcpus'}; } $glueSubClusterLogicalCPUs=$cluster_attributes{'nordugrid-cluster-totalcpus'}; $glueClusterUniqueID=$cluster_attributes{'nordugrid-cluster-name'}; WriteSubCluster(); } #Do GlueCE entry for each nordugrid queue write_gluece_entries(\@ldif); # Write Cluster Entries write_cluster_entries($cluster_attributes{'nordugrid-cluster-name'},$glue_site_unique_id); #write CE-SE Binding Entries if ( $cluster_attributes{'nordugrid-cluster-localse'} ) { write_ce_se_binding_entries($cluster_attributes{'nordugrid-cluster-localse'},\@storages); } # Service information. This is an hack to mimic Site-BDII service information. my $glueServiceUniqueID = build_glueServiceUniqueID($cluster_attributes{'nordugrid-cluster-name'}); my $glueservicename = $glue_site_unique_id."-arc"; my $glueservicestatusinfo=`/etc/init.d/a-rex status`; chomp $glueservicestatusinfo; my $glueservicestatus; if ($? == 0) { $glueservicestatus="OK"; } else { $glueservicestatus="Warning"; } my $serviceendpoint = "gsiftp://". $cluster_attributes{'nordugrid-cluster-name'} . ":" . "2811" . "/jobs"; my $serviceversion = "3.0.0"; my $servicetype = "ARC-CE"; write_service_information ($glueServiceUniqueID,$glueservicename,$glueservicestatus,$glueservicestatusinfo,$serviceendpoint,$serviceversion,$servicetype,'_UNDEF_'); } # Write SubCluster Entries sub WriteSubCluster { #dn: GlueSubClusterUniqueID=$glueSubClusterUniqueID,GlueClusterUniqueID=$cluster_attributes{'nordugrid-cluster-name'},mds-vo-name=resource,o=grid print " dn: GlueSubClusterUniqueID=$glueSubClusterUniqueID,GlueClusterUniqueID=$cluster_attributes{'nordugrid-cluster-name'},mds-vo-name=resource,o=grid objectClass: GlueClusterTop objectClass: GlueSubCluster objectClass: GlueSchemaVersion objectClass: GlueHostApplicationSoftware objectClass: GlueHostArchitecture objectClass: GlueHostBenchmark objectClass: GlueHostMainMemory objectClass: GlueHostNetworkAdapter objectClass: GlueHostOperatingSystem objectClass: GlueHostProcessor objectClass: GlueInformationService objectClass: GlueKey "; foreach (@envs){ chomp; print "GlueHostApplicationSoftwareRunTimeEnvironment: $_\n" } print "GlueHostArchitectureSMPSize: $smpSize GlueHostNetworkAdapterInboundIP: $inbIP GlueHostNetworkAdapterOutboundIP: $outbIP"; my @opsys = split /: /, $norduOpsys; for (my $i=0; $i<=3; $i++){ if ($i > $#opsys) { $opsys[$i]=$DEFAULT; } chomp($opsys[$i]); $opsys[$i]=~s/\s+$//; } $GlueHostBenchmarkSI00 = $DEFAULT; $GlueHostBenchmarkSF00 = $DEFAULT; my $benchmark; if($norduBenchmark) { foreach $benchmark (split /: /, $norduBenchmark) { if($benchmark =~ /SPECINT2000 @ (\d+)/) { $GlueHostBenchmarkSI00 = $1; } if($benchmark =~ /SPECFP2000 @ (\d+)/) { $GlueHostBenchmarkSF00 = $1; } } } my @nodecpu = split / /, $norduNodecpu; my $clockSpeed=$DEFAULT; my $cpuVer=$DEFAULT; for (my $i=0; $i<=$#nodecpu; $i++){ if ($i >= 2){ if ($nodecpu[$i -2] =~ /@/) { $clockSpeed = $nodecpu[$i -1]." ".$nodecpu[$i]; } } if ($nodecpu[$i] =~ /I$/) { $cpuVer = $nodecpu[$i]; } } for (my $i=0; $i<=1; $i++){ if ($i > $#nodecpu) { $nodecpu[$i]=$DEFAULT; } } $clockSpeed=~s/\.[0-9]*//; $clockSpeed=~s/MHz//i; $clockSpeed=~s/GHz/000/i; $clockSpeed=~s/[^0-9]//g; if (! $clockSpeed=~m/[0-9]/) { $clockSpeed=$DEFAULT; } print " GlueHostOperatingSystemName: $opsys[1] GlueHostOperatingSystemRelease: $opsys[2] GlueHostOperatingSystemVersion: $opsys[3] GlueHostProcessorVendor: $nodecpu[0] GlueHostProcessorModel: $nodecpu[1] GlueHostProcessorVersion: $cpuVer GlueHostProcessorClockSpeed: $clockSpeed GlueHostProcessorOtherDescription: $processorOtherDesc GlueHostMainMemoryRAMSize: $glueHostMainMemoryRAMSize GlueHostArchitecturePlatformType: $glueHostArchitecturePlatformType GlueHostBenchmarkSI00: $GlueHostBenchmarkSI00 GlueHostBenchmarkSF00: $GlueHostBenchmarkSF00 GlueSubClusterUniqueID: $glueSubClusterUniqueID GlueSubClusterName: $glueSubClusterName GlueSubClusterPhysicalCPUs: $glueSubClusterPhysicalCPUs GlueSubClusterLogicalCPUs: $glueSubClusterLogicalCPUs GlueChunkKey: GlueClusterUniqueID=$glueClusterUniqueID GlueSchemaVersionMajor: 1 GlueSchemaVersionMinor: 2 "; } sub write_site_entries($$){ my $site_id=shift; my $cluster_comment=shift; my $cluster_support=shift; my $loc=shift; my $lat=shift; my $long=shift; my $siteweb=shift; my $s_owner=shift; my @owner=@{$s_owner}; print " dn: GlueSiteUniqueID=$site_id,mds-vo-name=resource,o=grid objectClass: GlueTop objectClass: GlueSite objectClass: GlueKey objectClass: GlueSchemaVersion GlueSiteUniqueID: $site_id GlueSiteName: $site_id GlueSiteDescription: ARC-$cluster_comment GlueSiteUserSupportContact: mailto: $cluster_support GlueSiteSysAdminContact: mailto: $cluster_support GlueSiteSecurityContact: mailto: $cluster_support GlueSiteLocation: $loc GlueSiteLatitude: $lat GlueSiteLongitude: $long GlueSiteWeb: $siteweb"; for (my $i=1; $i<=$#owner; $i++){ print "\nGlueSiteSponsor: $owner[$i]"; } print " GlueSiteOtherInfo: Middleware=ARC GlueForeignKey: None GlueSchemaVersionMajor: 1 GlueSchemaVersionMinor: 2 "; } sub write_gluece_entries(){ my $s_ldif=shift; my @ldif = @{$s_ldif}; my $is_queue="false"; my @tmp_queue; foreach(@ldif){ push @tmp_queue, $_; if(m/^dn:\s+nordugrid-queue-name/){ $is_queue="true"; undef @tmp_queue; } if( ( (m/^\s*$/) || (m/^EOF/) ) && ( $is_queue eq "true" ) ){ $is_queue = "false"; #Set the queue attributes from the ldif for my $key ( keys %queue_attributes ) { $queue_attributes{$key} = join (" ", grep { /^$key/ } @tmp_queue); chomp $queue_attributes{$key}; if ($key eq "nordugrid-queue-opsys"){ $queue_attributes{$key}=~s/$key//g; } else { $queue_attributes{$key}=~s/$key: //g; } if ($queue_attributes{$key}=~/^$/) { $queue_attributes{$key}="$DEFAULT" } } #set non-numeric values to $DEFAULT if they are on the list for (my $i=0; $i<=$#queue_attributes_num; $i++){ if (! ($queue_attributes{$queue_attributes_num[$i]} =~ /^\d+$/) ){ #print "XXX Change $queue{$queue_num[$i]} to $DEFAULT\n"; $queue_attributes{$queue_attributes_num[$i]} = $DEFAULT; } } if ($cluster_attributes{'nordugrid-cluster-homogeneity'} =~ /false/i){ $glueSubClusterUniqueID=$queue_attributes{'nordugrid-queue-name'}; ##XX $norduOpsys=$queue_attributes{'nordugrid-queue-opsys'}; ##XX $norduBenchmark=$queue_attributes{'nordugrid-queue-benchmark'}; ##XX $norduNodecpu=$queue_attributes{'nordugrid-queue-nodecpu'}; ##XX $glueHostMainMemoryRAMSize=$queue_attributes{'nordugrid-queue-nodememory'}; ##XX $glueHostArchitecturePlatformType=$queue_attributes{'nordugrid-queue-architecture'}; ##XX $glueSubClusterUniqueID=$queue_attributes{'nordugrid-queue-name'}; ##XX $glueSubClusterName=$queue_attributes{'nordugrid-queue-name'}; ##XX if ( $processorOtherDesc =~ m/Cores=(\d+)/ ){ $smpSize=$1; $glueSubClusterPhysicalCPUs=int($queue_attributes{'nordugrid-queue-totalcpus'}/$smpSize); } else { $smpSize=1; $glueSubClusterPhysicalCPUs=$queue_attributes{'nordugrid-queue-totalcpus'}; } $glueSubClusterLogicalCPUs=$queue_attributes{'nordugrid-queue-totalcpus'}; ##XX $glueClusterUniqueID=$cluster_attributes{'nordugrid-cluster-name'}; ##XX WriteSubCluster(); } $AssignedSlots = 0; if ($queue_attributes{'nordugrid-queue-totalcpus'} ne $DEFAULT){ $AssignedSlots = $queue_attributes{'nordugrid-queue-totalcpus'}; } elsif ($queue_attributes{'nordugrid-queue-maxrunning'} ne $DEFAULT) { $AssignedSlots = $queue_attributes{'nordugrid-queue-maxrunning'}; } elsif ($cluster_attributes{'nordugrid-cluster-totalcpus'} ne $DEFAULT) { $AssignedSlots = $cluster_attributes{'nordugrid-cluster-totalcpus'}; } if ($queue_attributes{'nordugrid-queue-totalcpus'} eq $DEFAULT){ $queue_attributes{'nordugrid-queue-totalcpus'} = $cluster_attributes{'nordugrid-cluster-totalcpus'}; } $mappedStatus=""; if ($queue_attributes{'nordugrid-queue-status'} eq "active"){ $mappedStatus = "Production"; } else{ $mappedStatus = "Closed"; } $waitingJobs = 0; if ($queue_attributes{'nordugrid-queue-gridqueued'} ne $DEFAULT) {$waitingJobs += $queue_attributes{'nordugrid-queue-gridqueued'};} if ($queue_attributes{'nordugrid-queue-localqueued'} ne $DEFAULT) {$waitingJobs += $queue_attributes{'nordugrid-queue-localqueued'};} if ($queue_attributes{'nordugrid-queue-prelrmsqueued'} ne $DEFAULT) {$waitingJobs += $queue_attributes{'nordugrid-queue-prelrmsqueued'};} $totalJobs = $waitingJobs; if ($queue_attributes{'nordugrid-queue-running'} ne $DEFAULT) { $totalJobs += $queue_attributes{'nordugrid-queue-running'}; } $freeSlots=$DEFAULT; if ( ($queue_attributes{'nordugrid-queue-totalcpus'} ne $DEFAULT) && ($queue_attributes{'nordugrid-queue-running'} ne $DEFAULT) ){ $freeSlots = $queue_attributes{'nordugrid-queue-totalcpus'} - $queue_attributes{'nordugrid-queue-running'}; } # Get an arbitrary approximate of how long a job may # expect to wait at this site, it depends on jobs that are # currently running and jobs that are waiting. Formula # aquired from Kalle Happonen and the "NDGF BDII" for LHC # T1 services if ( $queue_attributes{'nordugrid-queue-maxrunning'} ne $DEFAULT ) { $estRespTime = int(600 + ($queue_attributes{'nordugrid-queue-running'} /$queue_attributes{'nordugrid-queue-maxrunning'}) *3600 + ($waitingJobs /$queue_attributes{'nordugrid-queue-maxrunning'}) * 600 ); } else { $estRespTime = $DEFAULT; } $worstRespTime = $estRespTime + 2000; my @vos= split / /, $cluster_attributes{'nordugrid-cluster-acl'}; my $ce_unique_id=build_glueCEUniqueID($cluster_attributes{'nordugrid-cluster-name'}, $cluster_attributes{'nordugrid-cluster-lrms-type'}, $queue_attributes{'nordugrid-queue-name'}); # Write CE Entries print " dn: GlueCEUniqueID=$ce_unique_id,mds-vo-name=resource,o=grid objectClass: GlueCETop objectClass: GlueCE objectClass: GlueSchemaVersion objectClass: GlueCEAccessControlBase objectClass: GlueCEInfo objectClass: GlueCEPolicy objectClass: GlueCEState objectClass: GlueInformationService objectClass: GlueKey GlueCEUniqueID: $ce_unique_id GlueCEHostingCluster: $cluster_attributes{'nordugrid-cluster-name'} GlueCEName: $queue_attributes{'nordugrid-queue-name'} GlueCEInfoGatekeeperPort: 2811 GlueCEInfoHostName: $cluster_attributes{'nordugrid-cluster-name'} GlueCEInfoLRMSType: $cluster_attributes{'nordugrid-cluster-lrms-type'} GlueCEInfoLRMSVersion: $cluster_attributes{'nordugrid-cluster-lrms-version'} GlueCEInfoGRAMVersion: $DEFAULT GlueCEInfoTotalCPUs: $queue_attributes{'nordugrid-queue-totalcpus'} GlueCECapability: CPUScalingReferenceSI00=$CPUSCALINGREFERENCESI00 GlueCEInfoJobManager: arc GlueCEInfoContactString: $cluster_attributes{'nordugrid-cluster-contactstring'}?queue=$queue_attributes{'nordugrid-queue-name'} GlueInformationServiceURL: ldap://$cluster_attributes{'nordugrid-cluster-name'}:$BDIIPORT/mds-vo-name=resource,o=grid GlueCEStateEstimatedResponseTime: $estRespTime GlueCEStateRunningJobs: $queue_attributes{'nordugrid-queue-running'} GlueCEStateStatus: $mappedStatus GlueCEStateTotalJobs: $totalJobs GlueCEStateWaitingJobs: $waitingJobs GlueCEStateWorstResponseTime: $worstRespTime GlueCEStateFreeJobSlots: $freeSlots GlueCEPolicyMaxCPUTime: $queue_attributes{'nordugrid-queue-maxcputime'} GlueCEPolicyMaxRunningJobs: $queue_attributes{'nordugrid-queue-maxrunning'} GlueCEPolicyMaxTotalJobs: $queue_attributes{'nordugrid-queue-maxqueuable'} GlueCEPolicyMaxWallClockTime: $queue_attributes{'nordugrid-queue-maxcputime'} GlueCEPolicyPriority: 1 GlueCEPolicyAssignedJobSlots: $AssignedSlots\n"; foreach (@vos){ chomp; print "GlueCEAccessControlBaseRule: $_\n"; } print "GlueForeignKey: GlueClusterUniqueID=$cluster_attributes{'nordugrid-cluster-name'} GlueSchemaVersionMajor: 1 GlueSchemaVersionMinor: 2 GlueCEImplementationName: ARC-CE "; foreach (@vos){ chomp; $vo = $_; $vo =~ s/VO:// ; print " dn: GlueVOViewLocalID=$vo,GlueCEUniqueID=$ce_unique_id,Mds-Vo-name=resource,o=grid objectClass: GlueCETop objectClass: GlueVOView objectClass: GlueCEInfo objectClass: GlueCEState objectClass: GlueCEAccessControlBase objectClass: GlueCEPolicy objectClass: GlueKey objectClass: GlueSchemaVersion GlueSchemaVersionMajor: 1 GlueSchemaVersionMinor: 2 GlueCEInfoDefaultSE: $cluster_attributes{'nordugrid-cluster-localse'} GlueCEStateTotalJobs: $totalJobs GlueCEInfoDataDir: unset GlueCEAccessControlBaseRule: VO:$vo GlueCEStateRunningJobs: $queue_attributes{'nordugrid-queue-running'} GlueChunkKey: GlueCEUniqueID=$ce_unique_id GlueVOViewLocalID: $vo GlueCEInfoApplicationDir: unset GlueCEStateWaitingJobs: $waitingJobs GlueCEStateEstimatedResponseTime: $estRespTime GlueCEStateWorstResponseTime: $worstRespTime GlueCEStateFreeJobSlots: $freeSlots "; } } } } sub write_cluster_entries(){ my $cluster_name=shift; my $site_unique_id=shift; my $ce_unique_id=build_glueCEUniqueID($cluster_attributes{'nordugrid-cluster-name'}, $cluster_attributes{'nordugrid-cluster-lrms-type'}, $queue_attributes{'nordugrid-queue-name'}); print " dn: GlueClusterUniqueID=$cluster_name,mds-vo-name=resource,o=grid objectClass: GlueClusterTop objectClass: GlueCluster objectClass: GlueSchemaVersion objectClass: GlueInformationService objectClass: GlueKey GlueClusterName: $site_unique_id GlueClusterService: $cluster_name GlueClusterUniqueID: $cluster_name GlueForeignKey: GlueCEUniqueID=$ce_unique_id GlueForeignKey: GlueSiteUniqueID=$site_unique_id GlueSchemaVersionMajor: 1 GlueSchemaVersionMinor: 2 "; } sub write_ce_se_binding_entries(){ my $localse=shift; my $ce_unique_id=build_glueCEUniqueID($cluster_attributes{'nordugrid-cluster-name'}, $cluster_attributes{'nordugrid-cluster-lrms-type'}, $queue_attributes{'nordugrid-queue-name'}); my $s_storages=shift; my @storages=@{$s_storages}; if($localse ne '') { print " dn: GlueCESEBindGroupCEUniqueID=$ce_unique_id,mds-vo-name=resource,o=grid objectClass: GlueGeneralTop objectClass: GlueCESEBindGroup objectClass: GlueSchemaVersion GlueCESEBindGroupCEUniqueID: $ce_unique_id "; foreach (@storages){ chomp; print "GlueCESEBindGroupSEUniqueID: $_\n" } print "GlueSchemaVersionMajor: 1 GlueSchemaVersionMinor: 2 "; foreach (@storages){ chomp; print " dn: GlueCESEBindSEUniqueID=$_,GlueCESEBindGroupCEUniqueID=$ce_unique_id,mds-vo-name=resource,o=grid objectClass: GlueGeneralTop objectClass: GlueCESEBind objectClass: GlueSchemaVersion GlueCESEBindSEUniqueID: $_ GlueCESEBindCEUniqueID: $ce_unique_id GlueCESEBindMountInfo: none GlueCESEBindWeight: 0 GlueSchemaVersionMajor: 1 GlueSchemaVersionMinor: 2 "; } } } sub write_service_information () { my $serviceuniqueid = shift; my $servicename = shift; my $glueservicestatus = shift; my $glueservicestatusinfo = shift; my $serviceendpoint = shift; my $serviceversion = shift; my $servicetype = shift; my $servicewsdl = shift; my $s_acb=shift; my $s_acr=shift; my @accesscontrolbase = @{$s_acb} if defined $s_acb; my @accesscontrolrule = @{$s_acr} if defined $s_acr; print " dn: GlueServiceUniqueID=$serviceuniqueid,mds-vo-name=resource,o=grid "; foreach (@accesscontrolbase) { chomp; print " GlueServiceAccessControlBaseRule: $_ "; } print " GlueServiceStatus: $glueservicestatus GlueServiceStatusInfo: $glueservicestatusinfo objectClass: GlueTop objectClass: GlueService objectClass: GlueKey objectClass: GlueSchemaVersion GlueServiceUniqueID: $serviceuniqueid "; foreach (@accesscontrolrule) { chomp; print " GlueServiceAccessControlRule: $_ "; } print " GlueServiceEndpoint: $serviceendpoint GlueServiceVersion: $serviceversion GlueSchemaVersionMajor: 1 GlueSchemaVersionMinor: 3 GlueServiceName: $servicename GlueServiceType: $servicetype GlueServiceWSDL: $servicewsdl "; } #EOF nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/Boinc.pm0000644000000000000000000000012413213445307025106 xustar000000000000000027 mtime=1512983239.050663 27 atime=1513200575.796717 30 ctime=1513200663.139785886 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/Boinc.pm0000644000175000002070000001767013213445307025166 0ustar00mockbuildmock00000000000000package Boinc; use strict; use DBI; use POSIX qw(ceil floor); use Sys::Hostname; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our (%lrms_queue); our $running = undef; # total running jobs in a queue # the queue passed in the latest call to queue_info, jobs_info or users_info my $currentqueue = undef; # Resets queue-specific global variables if # the queue has changed since the last call sub init_globals($) { my $qname = shift; if (not defined $currentqueue or $currentqueue ne $qname) { $currentqueue = $qname; %lrms_queue = (); $running = undef; } } ########################################## # Private subs ########################################## sub db_conn($){ my $config=shift; my $DB_HOST=$$config{boinc_db_host}; my $DB_PORT=$$config{boinc_db_port}; my $DB_NAME=$$config{boinc_db_name}; my $DB_USER=$$config{boinc_db_user}; my $DB_PASS=$$config{boinc_db_pass}; my $dbh = DBI->connect("DBI:mysql:$DB_NAME;host=$DB_HOST:$DB_PORT","$DB_USER","$DB_PASS",{RaiseError=>1}); return $dbh; } sub get_total_cpus($){ # Total number of hosts that finished jobs recently my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select count(distinct hostid) from result where server_state=4'.$where); $sth->execute(); my $result = $sth->fetchrow_array(); if(defined($result)){ # Return a reasonable number to allow bootstrapping projects if($result < 100) {$result = 100;} return $result; } return 0; } sub get_max_cpus($){ # Total number of hosts with running and finished jobs my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select count(distinct hostid) from result where server_state=4'.$where); $sth->execute(); my $result = $sth->fetchrow_array(); if(defined($result)){return $result;} else{ return 0;} } sub get_jobs_in_que($){ # Unsent jobs on BOINC server my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select count(*) from result where server_state in (1,2)'.$where); $sth->execute(); my $result = $sth->fetchrow_array(); if(defined($result)){return $result;} else{ return 0;} } sub get_jobs_in_run($){ # Jobs in progress my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select count(*) from result where server_state=4'.$where); $sth->execute(); my $result = $sth->fetchrow_array(); if(defined($result)){return $result;} else{ return 0;} } sub get_jobs_status($){ # Convert BOINC status into ARC LRMS state for each job my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select server_state,name,opaque from result where server_state in (1,2,4)'.$where); $sth->execute(); my (%jobs_status, @result,$job_status, $job_state, $job_name, $core_count); while(($job_state, $job_name, $core_count) = $sth->fetchrow_array()) { $job_status="Q"; my @tmp=split(/_/,$job_name); $job_name=$tmp[0]; if($job_state==4){$job_status="R";} $jobs_status{$job_name}=[$job_status, $core_count]; } return \%jobs_status; } ############################################ # Public subs ############################################# sub cluster_info ($) { my ($config) = shift; my (%lrms_cluster); $lrms_cluster{lrms_type} = "boinc"; $lrms_cluster{lrms_version} = "1"; # only enforcing per-process cputime limit $lrms_cluster{has_total_cputime_limit} = 0; my ($total_cpus) = get_total_cpus($config); my ($max_cpus) = get_max_cpus($config); $lrms_cluster{totalcpus} = $total_cpus; $lrms_cluster{cpudistribution} = $lrms_cluster{totalcpus}."cpu:1"; my $que_jobs = get_jobs_in_que($config); my $run_jobs = get_jobs_in_run($config); $lrms_cluster{usedcpus} = $run_jobs; $lrms_cluster{runningjobs} = $lrms_cluster{usedcpus}; $lrms_cluster{queuedcpus} = $max_cpus-$total_cpus; $lrms_cluster{queuedjobs} = $que_jobs; $lrms_cluster{queue} = [ ]; return %lrms_cluster; } sub queue_info ($$) { my ($config) = shift; my ($qname) = shift; init_globals($qname); my ($total_cpus) = get_total_cpus($config); my ($max_cpus) = get_max_cpus($config); my $que_jobs = get_jobs_in_que($config); my $running = get_jobs_in_run($config); if (defined $running) { # job_info was already called, we know exactly how many grid jobs # are running $lrms_queue{running} = $running; } else { # assuming that the submitted grid jobs are cpu hogs, approximate # the number of running jobs with the number of running processes $lrms_queue{running}= 0; } $lrms_queue{totalcpus} = $total_cpus; $lrms_queue{status} = $lrms_queue{totalcpus}-$lrms_queue{running}; # reserve negative numbers for error states # Fork is not real LRMS, and cannot be in error state if ($lrms_queue{status}<0) { debug("lrms_queue{status} = $lrms_queue{status}"); $lrms_queue{status} = 0; } my $job_limit; $job_limit = 1000; $lrms_queue{maxrunning} = $job_limit; $lrms_queue{maxuserrun} = $job_limit; $lrms_queue{maxqueuable} = $job_limit; $lrms_queue{maxcputime} = ""; $lrms_queue{queued} = $que_jobs; $lrms_queue{mincputime} = ""; $lrms_queue{defaultcput} = ""; $lrms_queue{minwalltime} = ""; $lrms_queue{defaultwallt} = ""; $lrms_queue{maxwalltime} = $lrms_queue{maxcputime}; return %lrms_queue; } sub jobs_info ($$@) { my ($config) = shift; my ($qname) = shift; my ($jids) = shift; init_globals($qname); my (%lrms_jobs,$jstatus); $jstatus=get_jobs_status($config); foreach my $id (@$jids){ $lrms_jobs{$id}{nodes} = [ hostname ]; $lrms_jobs{$id}{mem} = 2000000000; $lrms_jobs{$id}{walltime} = ""; $lrms_jobs{$id}{cputime} = ""; $lrms_jobs{$id}{comment} = [ "LRMS: Running under boinc" ]; $lrms_jobs{$id}{reqwalltime} = ""; $lrms_jobs{$id}{reqcputime} = ""; $lrms_jobs{$id}{rank} = "0"; # Fix cores to 1 since volunteers download 1 task per core $lrms_jobs{$id}{cpus} = "1"; #if (! exists $$jstatus{$id} || $$jstatus{$id}[1] == 0) { # $lrms_jobs{$id}{cpus} = "1"; #} else { # $lrms_jobs{$id}{cpus} = "$$jstatus{$id}[1]"; #} if(! exists $$jstatus{$id}) { $lrms_jobs{$id}{status} = "O"; } elsif($$jstatus{$id}[0] eq "R") { $lrms_jobs{$id}{status} = "R"; } elsif($$jstatus{$id}[0] eq "Q") { $lrms_jobs{$id}{status} = "Q"; } else { $lrms_jobs{$id}{status} = "O"; } } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($qname) = shift; my ($accts) = shift; init_globals($qname); my (%lrms_users); # freecpus # queue length if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $config, $qname ); } foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $lrms_queue{maxuserrun} - $lrms_queue{running}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/grid-info-soft-register0000644000000000000000000000012311665712012030110 xustar000000000000000026 mtime=1322750986.06032 27 atime=1513200575.839718 30 ctime=1513200663.162786168 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/grid-info-soft-register0000755000175000002070000004111311665712012030161 0ustar00mockbuildmock00000000000000#! /bin/sh ldapbindir=/usr/bin tmpdir=/var/run/arc/infosys # no more than three times per minute MIN_SLAVE_REG_TIME=20 # no less than 5 second cache TTL MIN_DAEMON_CACHETIME=5 # get everything by default DEFAULT_DAEMON_SIZELIMIT=0 # set default network timeout to 30 seconds DEFAULT_NETWORK_TIMEOUT="" short_usage_error () { if test "$#" -gt 0 then echo grid-info-soft-register: "$@" 1>&2 ldap_shell_log_error "$@" else ldap_shell_log_error "unknown problem" fi cat 1>&2 <] -f[ile] \\ -- [arg...] or: grid-info-soft-register [-log ] -r[egister] \\ -d[aemon] or: grid-info-soft-register -h[elp] EOF } usage_error () { short_usage_error cat 1>&2 < -p[ort] \\ -period -dn daemon-clause: -t[ype] ldap -h[ost] -p[ort] \\ -ttl -r[oot] \\ {-T|-timeout} \\ {-b|-bindmethod} \\ [{-z|-sizelimit} ] \\ [-m[ode] cachedump -period ] The first form (with configuration file) is normally used explicitly, while the second form is used internally. The first form starts the daemon with its arguments and registers the daemon service using the information in the conf file for as long as the daemon runs. If given the TERM signal, the program will try to kill the daemon before exiting. The second form performs the requested registration directly until killed by any signal, without starting any daemon. EOF } ldap_shell_log_event () { if test ! "X${GRID_INFO_SYSTEM_LOGFILE}" = "X" then echo `date` "${THIS_COMMAND_BASE} [$$]: log:" "$@" \ >> "${GRID_INFO_SYSTEM_LOGFILE}" fi } ldap_shell_log_error () { if test ! "X${GRID_INFO_SYSTEM_ERRORFILE}" = "X" then echo `date` "${THIS_COMMAND_BASE} [$$]: error:" "$@" \ >> "${GRID_INFO_SYSTEM_ERRORFILE}" fi } ########################################################################### # process command-line INVOCATION_MODE= if test ! "$#" -gt 0 then short_usage_error exit 1 fi arg_check () { value=`eval echo '$'{$1}` if test -z "$value" then short_usage_error "missing $2" exit 1 fi } expand_path () { path_in="$1" while test ! "${path_in}" = `eval echo ${path_in}` do path_in=`eval echo ${path_in}` done echo "${path_in}" } clear_slave_params () { SLAVE_REG_HOST= SLAVE_REG_PORT= SLAVE_REG_TYPE= SLAVE_REG_TIME= SLAVE_REG_DN= SLAVE_DAEMON_HOST= SLAVE_DAEMON_PORT= SLAVE_DAEMON_TYPE= SLAVE_DAEMON_TIME= SLAVE_DAEMON_TIMEOUT= SLAVE_DAEMON_BINDMETHOD= SLAVE_DAEMON_CACHETIME= SLAVE_DAEMON_DN= SLAVE_DAEMON_MODE=direct SLAVE_DAEMON_SIZELIMIT=${DEFAULT_DAEMON_SIZELIMIT} } while test "$#" -gt 0 do case "$1" in -h | -help) usage_error exit 0 ;; -log) if test "$#" -eq 1 then short_usage_error "invalid last argument \"$1\"" exit 1 fi GRID_INFO_SYSTEM_LOGFILE="$2" GRID_INFO_SYSTEM_ERRORFILE="$2" shift 2 ;; -f | -file) if test "$#" -eq 1 then short_usage_error "invalid last argument \"$1\"" exit 1 fi INVOCATION_MODE=master MASTER_CONF_FILE="$2" shift 2 ;; -p | -pid) if test "$#" -eq 1 then short_usage_error "invalid last argument \"$1\"" exit 1 fi MASTER_PID="$2" shift 2 ;; --) if test ! "$#" -ge 2 || \ test ! "X${INVOCATION_MODE}" = "Xmaster" then short_usage_error "missing required arguments" exit 1 fi MASTER_DAEMON_PATH="$2" # leave daemon args in positional parameters shift 2 if test ! -r "${MASTER_CONF_FILE}" then short_usage_error "cannot read file \"${MASTER_CONF_FILE}\"" exit 2 fi if test ! -x "${MASTER_DAEMON_PATH}" then short_usage_error "cannot execute \"${MASTER_DAEMON_PATH}\"" exit 2 fi break ;; -r | -register) INVOCATION_MODE=slave SLAVE_CLAUSE=register clear_slave_params shift 1 while test "$#" -gt 0 do if test "$#" -eq 1 then short_usage_error "invalid last argument \"$1\"" exit 1 fi if test "$1" = "-d" || \ test "$1" = "-daemon" then SLAVE_CLAUSE=daemon shift 1 fi case "${SLAVE_CLAUSE}:$1" in register:-t | register:-type) SLAVE_REG_TYPE="$2" ;; register:-h | register:-host) SLAVE_REG_HOST="$2" ;; register:-p | register:-port) SLAVE_REG_PORT="$2" ;; register:-period) SLAVE_REG_TIME="$2" if test "${SLAVE_REG_TIME}" -lt ${MIN_SLAVE_REG_TIME} then ldap_shell_log_event "clamping period to minimum ${MIN_SLAVE_REG_TIME}" echo "clamping period to minimum ${MIN_SLAVE_REG_TIME}" 1>&2 fi ;; register:-dn) SLAVE_REG_DN="$2" ;; daemon:-t | daemon:-type) SLAVE_DAEMON_TYPE="$2" ;; daemon:-h | daemon:-host) SLAVE_DAEMON_HOST="$2" ;; daemon:-p | daemon:-port) SLAVE_DAEMON_PORT="$2" ;; daemon:-ttl) SLAVE_DAEMON_TIME="$2" ;; daemon:-T | daemon:-timeout) SLAVE_DAEMON_TIMEOUT="$2" ;; daemon:-b | daemon:-bindmethod) SLAVE_DAEMON_BINDMETHOD="$2" ;; daemon:-z | daemon:-sizelimit) SLAVE_DAEMON_SIZELIMIT="$2" ;; daemon:-r | daemon:-root) SLAVE_DAEMON_DN="$2" ;; daemon:-m | daemon:-mode) SLAVE_DAEMON_MODE="$2" ;; daemon:-period) SLAVE_DAEMON_CACHETIME="$2" if test "${SLAVE_DAEMON_CACHETIME}" -lt "${MIN_DAEMON_CACHETIME}" then ldap_shell_log_event "clamping cache period to minimum ${MIN_DAEMON_CACHETIME}" echo "clamping period to minimum ${MIN_DAEMON_CACHETIME}" 1>&2 fi ;; *) short_usage_error "unexpected argument \"$1\"" ;; esac shift 2 done arg_check SLAVE_REG_HOST "registration host" arg_check SLAVE_REG_PORT "registration port" arg_check SLAVE_REG_TYPE "registration type" arg_check SLAVE_REG_TIME "registration period" if test "${SLAVE_REG_TYPE}" = "mdsreg" then arg_check SLAVE_REG_DN "registration DN" elif test "${SLAVE_REG_TYPE}" = "mdsreg2" then arg_check SLAVE_REG_DN "registration DN" else short_usage_error "unrecognized registration" \ "type \"${SLAVE_REG_TYPE}\"" fi arg_check SLAVE_DAEMON_HOST "daemon host" arg_check SLAVE_DAEMON_PORT "daemon port" arg_check SLAVE_DAEMON_TIME "daemon registration TTL" arg_check SLAVE_DAEMON_TYPE "daemon type" arg_check SLAVE_DAEMON_BINDMETHOD "daemon bind method" if test "${SLAVE_DAEMON_TYPE}" = "ldap" then arg_check SLAVE_DAEMON_DN "LDAP daemon root DN" arg_check SLAVE_DAEMON_TIMEOUT "LDAP daemon query timeout" fi if test "${SLAVE_DAEMON_MODE}" = "cachedump" then arg_check SLAVE_DAEMON_CACHETIME "daemon cache period" fi ;; *) short_usage_error "missing -- separator" exit 2 ;; esac done # end argument processing ############################################################## # do real work SLAVE_PIDS_FILE=${tmpdir}/grid-info-soft-register.pids.$$ SLAVE_PIDS= DAEMON_PID= cleanup () { case "${INVOCATION_MODE}" in master) if test -r ${SLAVE_PIDS_FILE} then SLAVE_PIDS="${SLAVE_PIDS} "`cat ${SLAVE_PIDS_FILE}` rm -f ${SLAVE_PIDS_FILE} fi if test ! -z "${SLAVE_PIDS}" then kill -15 ${SLAVE_PIDS} 2> /dev/null SLAVE_PIDS= fi if test ! -z "${DAEMON_PID}" then kill -15 ${DAEMON_PID} 2> /dev/null sleep 2 kill -9 ${DAEMON_PID} 2> /dev/null DAEMON_PID= fi ;; slave) : ;; esac } soft_sleep () { SLEEPER_PID= sleep $* & SLEEPER_PID="$!" wait $SLEEPER_PID } kill_wrap () { _max_time=$1 shift "$@" 2> /dev/null > /dev/null & KILL_PID="$!" soft_sleep $1 kill -TERM "$KILL_PID" kill -9 "$KILL_PID" } trap cleanup 0 mdsreg_ldapadd () { if test -x ${ldapbindir}/ldapadd then tmpfile=`mktemp` while read line ; do echo $line >> $tmpfile ; done if [ "X$SLAVE_DAEMON_BINDMETHOD" = "XAUTHC-ONLY" ] then ${ldapbindir}/ldapadd -h "${SLAVE_REG_HOST}" \ -p "${SLAVE_REG_PORT}" $DEFAULT_NETWORK_TIMEOUT \ < $tmpfile || \ ${ldapbindir}/ldapadd -h "${SLAVE_REG_HOST}" \ -p "${SLAVE_REG_PORT}" $DEFAULT_NETWORK_TIMEOUT \ -D "${SLAVE_REG_DN}" -w "dummy" \ < $tmpfile else ${ldapbindir}/ldapadd -x -h "${SLAVE_REG_HOST}" \ -p "${SLAVE_REG_PORT}" $DEFAULT_NETWORK_TIMEOUT \ < $tmpfile || \ ${ldapbindir}/ldapadd -x -h "${SLAVE_REG_HOST}" \ -p "${SLAVE_REG_PORT}" $DEFAULT_NETWORK_TIMEOUT \ -D "${SLAVE_REG_DN}" -w "dummy" \ < $tmpfile fi unlink $tmpfile else ldap_shell_log_error "cannot execute \"${ldapbindir}/ldapadd\"" fi } mdsreg_register () { # follow original MDS 2.0 registration "schema" if test ! -z "${SLAVE_DAEMON_CACHETIME}" then cacheline="cachettl: ${SLAVE_DAEMON_CACHETIME}" fi mdsreg_ldapadd < /dev/null 2> /dev/null & DAEMON_PID="$!" ldap_shell_log_event "started daemon" \ "PID=${DAEMON_PID} \"${MASTER_DAEMON_PATH}\"" fi # start slave registration processes clear_slave_params # initialize an empty SLAVE_PIDS_FILE cat /dev/null > ${SLAVE_PIDS_FILE} # force a newline at EOF { cat "${MASTER_CONF_FILE}" ; echo "" ; } | \ while read name value do if test -z "${name}" then if test ! -z "${SLAVE_REG_HOST}" && \ test ! -z "${SLAVE_REG_TYPE}" && \ test ! -z "${SLAVE_REG_PORT}" && \ test ! -z "${SLAVE_REG_TIME}" && \ test ! -z "${SLAVE_REG_DN}" && \ test ! -z "${SLAVE_DAEMON_TYPE}" && \ test ! -z "${SLAVE_DAEMON_HOST}" && \ test ! -z "${SLAVE_DAEMON_PORT}" && \ test ! -z "${SLAVE_DAEMON_TIME}" && \ test ! -z "${SLAVE_DAEMON_TIMEOUT}" && \ test ! -z "${SLAVE_DAEMON_BINDMETHOD}" && \ test ! -z "${SLAVE_DAEMON_SIZELIMIT}" && \ test ! -z "${SLAVE_DAEMON_MODE}" && \ test ! -z "${SLAVE_DAEMON_DN}" then # launch a slave if test ! -z "${GRID_INFO_SYSTEM_ERRORFILE}" then logclause="-log ${GRID_INFO_SYSTEM_ERRORFILE}" else logclause= fi $0 $logclause -register -t "${SLAVE_REG_TYPE}" -h "${SLAVE_REG_HOST}" -p "${SLAVE_REG_PORT}" -period "${SLAVE_REG_TIME}" -dn "${SLAVE_REG_DN}" -daemon -t "${SLAVE_DAEMON_TYPE}" -h "${SLAVE_DAEMON_HOST}" -p "${SLAVE_DAEMON_PORT}" -ttl "${SLAVE_DAEMON_TIME}" -r "${SLAVE_DAEMON_DN}" -T "${SLAVE_DAEMON_TIMEOUT}" -b ${SLAVE_DAEMON_BINDMETHOD} -z "${SLAVE_DAEMON_SIZELIMIT}" -m "${SLAVE_DAEMON_MODE}" -period "${SLAVE_DAEMON_CACHETIME}" 2> /dev/null > /dev/null & slave_pid="$!" echo " ${slave_pid}" >> ${SLAVE_PIDS_FILE} clear_slave_params fi else case "${name}" in dn:) SLAVE_REG_DN="${value}" ;; regtype:) SLAVE_REG_TYPE="${value}" ;; reghn:) SLAVE_REG_HOST="${value}" ;; regport:) SLAVE_REG_PORT="${value}" ;; regperiod:) SLAVE_REG_TIME="${value}" ;; hn:) SLAVE_DAEMON_HOST="${value}" ;; port:) SLAVE_DAEMON_PORT="${value}" ;; rootdn:) SLAVE_DAEMON_DN="${value}" ;; ttl:) SLAVE_DAEMON_TIME="${value}" ;; cachettl:) SLAVE_DAEMON_CACHETIME="${value}" ;; timeout: | timelimit:) SLAVE_DAEMON_TIMEOUT="${value}" ;; bindmethod:) SLAVE_DAEMON_BINDMETHOD="${value}" ;; sizelimit:) SLAVE_DAEMON_SIZELIMIT="${value}" ;; mode:) SLAVE_DAEMON_MODE="${value}" ;; type:) SLAVE_DAEMON_TYPE="${value}" ;; \#*) # discard comment fields : ;; esac fi done SLAVE_PIDS=`cat ${SLAVE_PIDS_FILE}` if test -z "${SLAVE_PIDS}" then ldap_shell_log_event "zero registration records" else ldap_shell_log_event "started slave PIDs" ${SLAVE_PIDS} fi # wait for daemon while ( ps -p ${DAEMON_PID} > /dev/null) ; do soft_sleep 30 done ldap_shell_log_event "daemon PID=${DAEMON_PID} terminated, exiting" DAEMON_PID= # kill slave registration processes cleanup ;; slave) # reregister every SLAVE_REG_TIME seconds, until killed ldap_shell_log_event "slave running on ${SLAVE_REG_TIME} interval" # give companion GIIS a chance to start before # first registration soft_sleep 5 while true do case "${SLAVE_REG_TYPE}" in mdsreg) mdsreg_register ;; mdsreg2) mdsreg_register2 ;; *) ldap_shell_log_error "unrecognized registration type \"${SLAVE_REG_TYPE}\"" exit 1 ;; esac soft_sleep "${SLAVE_REG_TIME}" done ;; esac nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/XmlPrinter.pm0000644000000000000000000000012411513650776026171 xustar000000000000000027 mtime=1294946814.211188 27 atime=1513200575.809717 30 ctime=1513200663.155786082 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/XmlPrinter.pm0000644000175000002070000000466611513650776026252 0ustar00mockbuildmock00000000000000package XmlPrinter; sub new { my ($this, $handle) = @_; my $class = ref($this) || $this; binmode $handle, ':encoding(utf8)'; #print $handle ''."\n"; my $self = {fh => $handle, indent => ''}; return bless $self, $class; } sub escape { my ($chars) = @_; $chars =~ s/&/&/g; $chars =~ s/>/>/g; $chars =~ s/{fh}; if (not @attributes) { print $fh $self->{indent}."<$name>\n"; } else { die "$name: Not a HASH reference" unless ref $data eq 'HASH'; print $fh $self->{indent}."<$name"; for my $attr (@attributes) { my $val = $data->{$attr}; print $fh " $attr=\"$val\"" if defined $val; } print $fh ">\n"; } $self->{indent} .= ' '; } sub end { my ($self, $name) = @_; chop $self->{indent}; chop $self->{indent}; my $fh = $self->{fh}; print $fh $self->{indent}."\n"; } sub property { my ($self, $prop, $val) = @_; my $indent = $self->{indent}; my $fh = $self->{fh}; return unless defined $val; if (not ref $val) { print $fh "$indent<$prop>".escape($val)."\n"; } elsif (ref $val eq 'ARRAY') { print $fh "$indent<$prop>".escape($_)."\n" for @$val; } else { die "$prop: Not an ARRAY reference"; } } sub properties { my ($self, $data, @props) = @_; my $indent = $self->{indent}; my $fh = $self->{fh}; for my $prop (@props) { my $val = $data->{$prop}; next unless defined $val; if (not ref $val) { print $fh "$indent<$prop>".escape($val)."\n"; } elsif (ref $val eq 'ARRAY') { print $fh "$indent<$prop>".escape($_)."\n" for @$val; } else { die "$prop: Not an ARRAY reference"; } } } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test { my $printer = XmlPrinter->new(*STDOUT); my $data = { xmlns => "blah/blah", date => "today" }; $printer->header(); $printer->begin("Persons", $data, qw( date )); $data = { id => "1", name => "James", nick => "Jimmy" }; $printer->begin("Person", $data, "id"); $printer->properties($data, qw( name nick )); $printer->end("Person"); $printer->end("Persons"); } #test; 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/InfoChecker.pm0000644000000000000000000000012412116113676026236 xustar000000000000000027 mtime=1362663358.222032 27 atime=1513200575.828718 30 ctime=1513200663.148785997 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/InfoChecker.pm0000644000175000002070000001434012116113676026305 0ustar00mockbuildmock00000000000000package InfoChecker; use base 'Exporter'; use strict; # Class to check that a data structure conforms to a schema. Data and schema # are both nested perl structures consisting of hashes and arrays nested to any # depth. This function will check that data and schema have the same nesting # structure. For hashes, all required keys in the schema must also be defined # in the data. A "*" value in the schema marks that key optional. A "*" key in # the schema matches all unmatched keys in the data (if any). Arrays in the # schema should have exactly one element, and this element will be matched # against all elements in the corresponding array in the data. # Constructor # # Arguments: # $schema - reference to the schema structure sub new($$) { my ($this,$schema) = @_; my $class = ref($this) || $this; die "Schema not a reference" unless ref($schema); my $self = {schema => $schema}; bless $self, $class; return $self; } # # Arguments: # $data - reference to a data structure that should be checked # $strict - (optional) if true, extra hash keys in data will be reported. # Otherwise only missing keys are reported. # # Returns: # @errors - list of error messages, one for each mismatch found during # checking sub verify($$;$) { my ($self,$data,$strict) = @_; $self->{strict} = $strict; $self->{errors} = []; $self->_verify_part("",$data,$self->{schema}); return @{$self->{errors}}; } sub _verify_part($$$$); # prototype it, because it's a recursive function sub _verify_part($$$$) { my ($self,$subject,$data,$schema) = @_; unless (defined $data) { push @{$self->{errors}}, "$subject is undefined"; return 1; # tell caller this entry can be deleted } unless ( ref($data) eq ref($schema) ) { my $type = ref($schema) ? ref($schema) : "SCALAR"; push @{$self->{errors}}, "$subject has wrong type, $type expected"; return 1; # tell caller this entry can be deleted } # process a hash reference if ( ref($schema) eq "HASH" ) { # deal with hash keys other than '*' my @templkeys = grep { $_ ne "*" } keys %$schema; for my $key ( sort @templkeys ) { my $subj = $subject."{$key}"; if ( defined $data->{$key} ) { # check that existing key value is valid my $can_delete = $self->_verify_part($subj, $data->{$key}, $schema->{$key}); # delete it if it's not valid if ($can_delete and $self->{strict}) { push @{$self->{errors}}, "$subj deleting it"; delete $data->{$key}; } } elsif ($schema->{$key} eq "*") { # do nothing: # this missing key is optional } elsif (ref($schema->{$key}) eq "ARRAY" and $schema->{$key}[0] eq "*") { # do nothing: # this missing key is optional, it points to optional array } elsif (ref($schema->{$key}) eq "HASH" and keys(%{$schema->{$key}}) == 1 and exists $schema->{$key}{'*'} ) { # do nothing: # this missing key is optional, it points to optional hash } else { push @{$self->{errors}}, "$subj is missing"; } } # deal with '*' hash key in schema if ( grep { $_ eq "*" } keys %$schema ) { for my $datakey ( sort keys %$data ) { # skip keys that have been checked already next if grep { $datakey eq $_ } @templkeys; my $subj = $subject."{$datakey}"; # check that the key's value is valid my $can_delete = $self->_verify_part($subj, $data->{$datakey}, $schema->{"*"}); # delete it if it's not valid if ($can_delete and $self->{strict}) { push @{$self->{errors}}, "$subj deleting it"; delete $data->{$datakey}; } } # no '*' key in schema, reverse checking may be performed } elsif ($self->{strict}) { for my $datakey ( sort keys %$data) { my $subj = $subject."{$datakey}"; unless (exists $schema->{$datakey}) { push @{$self->{errors}}, "$subj is not recognized"; push @{$self->{errors}}, "$subj deleting it"; delete $data->{$datakey}; } } } # process an array reference } elsif ( ref($schema) eq "ARRAY" ) { for ( my $i=0; $i < @$data; $i++ ) { my $subj = $subject."[$i]"; # check that data element is valid my $can_delete = $self->_verify_part($subj, $data->[$i], $schema->[0]); # delete it if it's not valid if ($can_delete and $self->{strict}) { push @{$self->{errors}}, "$subj deleting it"; splice @$data, $i, 1; --$i; } } # process a scalar: nothing to do here } elsif ( not ref($data)) { # nothing else than scalars and HASH and ARRAY references are allowed in # the schema } else { my $type = ref($schema); push @{$self->{errors}}, "$subject bad value in schema, ref($type) not allowed"; } return 0; } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test() { my $schema = { totalcpus => '', freecpus => '', jobs => { '*' => { owner => '' } }, users => [ { dn => '' } ] }; my $data = { freecpus => undef, jobs => { id1 => { owner => 'val' }, id2 => 'something else' }, users => [{dn => 'joe', extra => 'dummy'}, 'bad user', { }] }; require Data::Dumper; import Data::Dumper; print "Before: ",Dumper($data); print "Checker: options->$_\n" foreach InfoChecker->new($schema)->verify($data,1); print "After: ",Dumper($data); } #test; 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/schema0000644000000000000000000000013213214316027024675 xustar000000000000000030 mtime=1513200663.128785752 30 atime=1513200668.718854121 30 ctime=1513200663.128785752 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/schema/0000755000175000002070000000000013214316027025020 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/schema/PaxHeaders.7502/nordugrid.schema0000644000000000000000000000012412046417550030137 xustar000000000000000027 mtime=1352277864.008593 27 atime=1513200575.780717 30 ctime=1513200663.128785752 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/schema/nordugrid.schema0000644000175000002070000013235312046417550030213 0ustar00mockbuildmock00000000000000#--------------------------------------------------------- # These classes and attributes are imported from globus mds # only slightly modified attributetype ( 1.3.6.1.4.1.11604.2.1.8.1.4.1.0.1 NAME 'Mds-Vo-Op-name' DESC 'Locally unique Op name' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.8.1.4.1 NAME 'MdsVoOp' SUP 'Mds' STRUCTURAL MUST Mds-Vo-Op-name ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.1 NAME 'Mds-Service-type' DESC 'Service protocol' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.2 NAME 'Mds-Service-protocol' DESC 'Service protocol OID' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.3 NAME 'Mds-Service-port' DESC 'Service TCP port' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.4 NAME 'Mds-Service-hn' DESC 'Service FQDN hostname' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.5 NAME 'Mds-Service-url' DESC 'Service URL' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.8.2.7.1 NAME 'MdsService' SUP 'Mds' MUST ( Mds-Service-type $ Mds-Service-port $ Mds-Service-hn ) MAY ( Mds-Service-protocol $ Mds-Service-url ) ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.1 NAME 'Mds-Service-Ldap-suffix' DESC 'DN suffix of service' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.2 NAME 'Mds-Service-Ldap-timeout' DESC 'suggested timeout' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.3 NAME 'Mds-Service-Ldap-sizelimit' DESC 'suggested sizelimit' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.4 NAME 'Mds-Service-Ldap-cachettl' DESC 'suggested cacheability' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.5 NAME 'Mds-Service-Ldap-ttl' DESC 'suggested ttl' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.10 NAME 'Mds-Reg-status' DESC 'VALID/INVALID/PURGED' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.11 NAME 'Mds-Bind-Method-servers' DESC 'AUTHC-ONLY/AUTHC-FIRST/ANONYM-ONLY' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) objectclass ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1 NAME 'MdsServiceLdap' SUP 'MdsService' MUST Mds-Service-Ldap-suffix MAY ( Mds-Service-Ldap-timeout $ Mds-Service-Ldap-sizelimit $ Mds-Service-Ldap-cachettl $ Mds-Service-Ldap-ttl $ Mds-Reg-status $ Mds-Bind-Method-servers ) ) # attributes for the nordugrid-cluster objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.1.1 NAME 'nordugrid-cluster-name' DESC 'The name of the cluster specified as the domain name of the frontend' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.2 NAME 'nordugrid-cluster-aliasname' DESC 'The alias name of the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) attributetype ( 1.3.6.1.4.1.11604.2.1.1.3 NAME 'nordugrid-cluster-contactstring' DESC 'The URL of the job submission service running on the cluster frontend' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.4 NAME 'nordugrid-cluster-support' DESC 'RFC822 email address of support' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256}) attributetype ( 1.3.6.1.4.1.11604.2.1.1.5 NAME 'nordugrid-cluster-lrms-type' DESC 'The type of the Local Resource Management System' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.6 NAME 'nordugrid-cluster-lrms-version' DESC 'The version of the Local Resource Management System' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.7 NAME 'nordugrid-cluster-lrms-config' DESC 'Additional remark on the LRMS config' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.8 NAME 'nordugrid-cluster-architecture' DESC 'The architecture of the cluster nodes' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.9 NAME 'nordugrid-cluster-opsys' DESC 'The operating system of the machines of the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15) attributetype ( 1.3.6.1.4.1.11604.2.1.1.10 NAME 'nordugrid-cluster-homogeneity' DESC 'A logical flag indicating the homogeneity of the cluster nodes' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.11 NAME 'nordugrid-cluster-nodecpu' DESC 'The cpu type of the nodes expressed in a fixed form (model name + MHz)' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.12 NAME 'nordugrid-cluster-nodememory' DESC 'The amount of memory which can be guaranteed to be available on the node in MB' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.13 NAME 'nordugrid-cluster-totalcpus' DESC 'The total number of cpus in the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.14 NAME 'nordugrid-cluster-cpudistribution' DESC 'The cpu distribution of the nodes given in the form of 1cpu:3 2cpu:4 4cpu:1' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.15 NAME 'nordugrid-cluster-sessiondir-free' DESC 'Free diskspace in MB of the sessiondirectory on the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.16 NAME 'nordugrid-cluster-sessiondir-total' DESC 'Total diskspace in MB of the sessiondirectory on the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.17 NAME 'nordugrid-cluster-cache-free' DESC 'Free diskspace in MB of the cache area on the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.18 NAME 'nordugrid-cluster-cache-total' DESC 'Total diskspace in MB of the cache area on the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.19 NAME 'nordugrid-cluster-runtimeenvironment' DESC 'preinstalled software packages of the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.20 NAME 'nordugrid-cluster-localse' DESC 'The URL of a storage element considered to be local to the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.21 NAME 'nordugrid-cluster-middleware' DESC 'The middleware packages on the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.22 NAME 'nordugrid-cluster-totaljobs' DESC 'The total number of jobs (Grid + non-Grid) in the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.23 NAME 'nordugrid-cluster-usedcpus' DESC 'The total number of occupied cpus in the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.24 NAME 'nordugrid-cluster-queuedjobs' DESC 'The total number of jobs (grid and non-grid) not-yet running: preparing or waiting to run on the cluster, either in the grid-manager or in the LRMS. The attribute is TO BE DEPRECATED' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.25 NAME 'nordugrid-cluster-location' DESC 'The geographical location of the cluster expressed in terms of a Postal ZIP code' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.26 NAME 'nordugrid-cluster-owner' DESC 'The owner of the resource' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.27 NAME 'nordugrid-cluster-issuerca' DESC 'The DN of the Certificate Authority which issued the certificate of the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.28 NAME 'nordugrid-cluster-nodeaccess' DESC 'The inbound/outbound network accessibility of the nodes' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.29 NAME 'nordugrid-cluster-comment' DESC 'Free form comment' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.30 NAME 'nordugrid-cluster-interactive-contactstring' DESC 'The URL for interactive login' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.31 NAME 'nordugrid-cluster-benchmark' DESC '@ separated benchmark_name, benchmark_value pair characterizing the cluster nodes' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.32 NAME 'nordugrid-cluster-sessiondir-lifetime' DESC 'The lifetime of the sessiondir after the job has completed (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.33 NAME 'nordugrid-cluster-prelrmsqueued' DESC 'The total number of grid jobs not-yet reached the LRMS: preparing or queuing in the grid-layer' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.34 NAME 'nordugrid-cluster-issuerca-hash' DESC 'The HASH of the Certificate Authority which issued the certificate for the cluster' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.35 NAME 'nordugrid-cluster-trustedca' DESC 'The DN of a Certificate Authority trusted by the cluster' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.36 NAME 'nordugrid-cluster-acl' DESC 'Cluster authorization information' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype (1.3.6.1.4.1.11604.2.1.1.37 NAME 'nordugrid-cluster-credentialexpirationtime' DESC 'The expiration date of the shortest living credential affecting the cluster\27s x509 environment in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.1 NAME 'nordugrid-cluster' DESC 'Description of a Nordugrid cluster' SUP 'Mds' STRUCTURAL MUST ( nordugrid-cluster-name $ nordugrid-cluster-contactstring ) MAY ( nordugrid-cluster-aliasname $ nordugrid-cluster-support $ nordugrid-cluster-lrms-type $ nordugrid-cluster-lrms-version $ nordugrid-cluster-lrms-config $ nordugrid-cluster-architecture $ nordugrid-cluster-opsys $ nordugrid-cluster-homogeneity $ nordugrid-cluster-nodecpu $ nordugrid-cluster-nodememory $ nordugrid-cluster-totalcpus $ nordugrid-cluster-cpudistribution $ nordugrid-cluster-sessiondir-free $ nordugrid-cluster-sessiondir-total $ nordugrid-cluster-cache-free $ nordugrid-cluster-cache-total $ nordugrid-cluster-runtimeenvironment $ nordugrid-cluster-localse $ nordugrid-cluster-middleware $ nordugrid-cluster-totaljobs $ nordugrid-cluster-usedcpus $ nordugrid-cluster-queuedjobs $ nordugrid-cluster-location $ nordugrid-cluster-owner $ nordugrid-cluster-issuerca $ nordugrid-cluster-nodeaccess $ nordugrid-cluster-comment $ nordugrid-cluster-interactive-contactstring $ nordugrid-cluster-benchmark $ nordugrid-cluster-sessiondir-lifetime $ nordugrid-cluster-prelrmsqueued $ nordugrid-cluster-issuerca-hash $ nordugrid-cluster-trustedca $ nordugrid-cluster-acl $ nordugrid-cluster-credentialexpirationtime )) #----------------------------------------------------------------- # attributes for the nordugrid-info-group objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.2.1 NAME 'nordugrid-info-group-name' DESC 'Locally unique info group name' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) objectclass ( 1.3.6.1.4.1.11604.2.1.2 NAME 'nordugrid-info-group' DESC 'A general entry for grouping together MDS entries' SUP 'Mds' STRUCTURAL MUST ( nordugrid-info-group-name )) #----------------------------------------------------------------- # attributes for the nordugrid-queue objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.3.1 NAME 'nordugrid-queue-name' DESC 'The queue name' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.2 NAME 'nordugrid-queue-status' DESC 'The queue status' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.3 NAME 'nordugrid-queue-running' DESC 'Number of running jobs (Grid + non-Grid) in the queue with multi-node jobs multiciplity' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.4 NAME 'nordugrid-queue-queued' DESC 'The number of jobs (Grid + non-Grid) waiting in the queue. The attribute is TO BE DEPRECATED' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.5 NAME 'nordugrid-queue-maxrunning' DESC 'The maximum number of jobs allowed to run from this queue' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.6 NAME 'nordugrid-queue-maxqueuable' DESC 'The maximum number of jobs allowed to reside in the queue' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.7 NAME 'nordugrid-queue-maxuserrun' DESC 'Maximum number of jobs a user can run at the same time' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.8 NAME 'nordugrid-queue-maxcputime' DESC 'The maximum cputime allowed in this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.9 NAME 'nordugrid-queue-mincputime' DESC 'The minimum possible cputime of this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.10 NAME 'nordugrid-queue-defaultcputime' DESC 'The default cputime assigned to this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.11 NAME 'nordugrid-queue-schedulingpolicy' DESC 'The scheduling policy of the queue (i.e. FIFO)' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.12 NAME 'nordugrid-queue-totalcpus' DESC 'Total number of cpus assigned to the queue' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.13 NAME 'nordugrid-queue-nodecpu' DESC 'The cpu type of the nodes assigned to the queue (model name + MHz)' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.14 NAME 'nordugrid-queue-nodememory' DESC 'The installed memory of a node assigned to the queue in MB' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.15 NAME 'nordugrid-queue-architecture' DESC 'The architecture of the machines in the queue' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.16 NAME 'nordugrid-queue-opsys' DESC 'The operating system of the nodes in the queue' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15) attributetype ( 1.3.6.1.4.1.11604.2.1.3.17 NAME 'nordugrid-queue-gridrunning' DESC 'Number of running Grid jobs in the queue with multi-node jobs multiciplity' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.18 NAME 'nordugrid-queue-gridqueued' DESC 'The number of Grid jobs waiting in the LRMS queue' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.19 NAME 'nordugrid-queue-comment' DESC 'Free form comment' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.20 NAME 'nordugrid-queue-benchmark' DESC 'Colon separated benchmark_name, benchmark_value pair characterizing the queue' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.21 NAME 'nordugrid-queue-homogeneity' DESC 'A logical flag indicating the homogeneity of the queue nodes' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.22 NAME 'nordugrid-queue-prelrmsqueued' DESC 'The number of Grid jobs belonging to this queue being processed or waiting in the Grid-layer before the LRMS submission.' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.23 NAME 'nordugrid-queue-localqueued' DESC 'The number of non-Grid jobs waiting in the LRMS queue.' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.24 NAME 'nordugrid-queue-maxwalltime' DESC 'The maximum walltime allowed in this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.25 NAME 'nordugrid-queue-minwalltime' DESC 'The minimum possible walltime of this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.26 NAME 'nordugrid-queue-defaultwalltime' DESC 'The default walltime assigned to this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.27 NAME 'nordugrid-queue-maxtotalcputime' DESC 'The maximum total cputime allowed in this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.3 NAME 'nordugrid-queue' DESC 'An LRMS queue' SUP 'Mds' STRUCTURAL MUST ( nordugrid-queue-name $ nordugrid-queue-status) MAY ( nordugrid-queue-running $ nordugrid-queue-queued $ nordugrid-queue-maxrunning $ nordugrid-queue-maxqueuable$ nordugrid-queue-maxuserrun $ nordugrid-queue-maxcputime $ nordugrid-queue-mincputime $ nordugrid-queue-defaultcputime $ nordugrid-queue-schedulingpolicy $ nordugrid-queue-totalcpus $ nordugrid-queue-nodecpu $ nordugrid-queue-nodememory $ nordugrid-queue-opsys $ nordugrid-queue-architecture $ nordugrid-queue-gridrunning $ nordugrid-queue-gridqueued $ nordugrid-queue-comment $ nordugrid-queue-benchmark $ nordugrid-queue-homogeneity $ nordugrid-queue-prelrmsqueued $ nordugrid-queue-localqueued $ nordugrid-queue-maxwalltime $ nordugrid-queue-minwalltime $ nordugrid-queue-defaultwalltime $ nordugrid-queue-maxtotalcputime )) #----------------------------------------------------------------- #attributes for the nordugrid-job objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.4.1 NAME 'nordugrid-job-globalid' DESC 'The global job identifier string' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.2 NAME 'nordugrid-job-globalowner' DESC 'The SubjectName of the job owner' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.3 NAME 'nordugrid-job-execcluster' DESC 'The name of the execution cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) attributetype ( 1.3.6.1.4.1.11604.2.1.4.4 NAME 'nordugrid-job-execqueue' DESC 'The name of the execution queue' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.5 NAME 'nordugrid-job-stdout' DESC 'The name of the file which contains the stdout' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.6 NAME 'nordugrid-job-stderr' DESC 'The name of the file which contains the stderr' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.7 NAME 'nordugrid-job-stdin' DESC 'The name of the file which contains the stdin' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.8 NAME 'nordugrid-job-reqcputime' DESC 'The cputime request by the job in minutes' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.9 NAME 'nordugrid-job-status' DESC 'The status of the grid job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.10 NAME 'nordugrid-job-queuerank' DESC 'The queue position of the job' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.11 NAME 'nordugrid-job-comment' DESC 'Free form comment about the job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.12 NAME 'nordugrid-job-submissionui' DESC 'The name of the UI from where the job was submitted' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.13 NAME 'nordugrid-job-submissiontime' DESC 'The submission time of the job in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.14 NAME 'nordugrid-job-usedcputime' DESC 'The consumed cputime of the job in minutes' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.15 NAME 'nordugrid-job-usedwalltime' DESC 'The consumed walltime of the job in minutes' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.16 NAME 'nordugrid-job-sessiondirerasetime' DESC 'The date when the session dir will be deleted in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.17 NAME 'nordugrid-job-usedmem' DESC 'The memory usage of the job (in KB)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.18 NAME 'nordugrid-job-errors' DESC 'Error mesages from the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.19 NAME 'nordugrid-job-jobname' DESC 'The jobname specified by the user with the jobname RSL attribute' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.20 NAME 'nordugrid-job-runtimeenvironment' DESC 'The runtimeenvironment requested by the job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.21 NAME 'nordugrid-job-cpucount' DESC 'The number of CPUs requested by the job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.22 NAME 'nordugrid-job-executionnodes' DESC 'The list of nodenames where the job is running' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.23 NAME 'nordugrid-job-gmlog' DESC 'The name of the directory which contains the grid session related logs' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.24 NAME 'nordugrid-job-clientsoftware' DESC 'The client software which submitted the job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15) attributetype ( 1.3.6.1.4.1.11604.2.1.4.25 NAME 'nordugrid-job-proxyexpirationtime' DESC 'The expiration time of the proxy of the job in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.26 NAME 'nordugrid-job-completiontime' DESC 'The completion time of the grid job in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.27 NAME 'nordugrid-job-exitcode' DESC 'The exit code of the executable of the job obtained from the LRMS' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.28 NAME 'nordugrid-job-rerunable' DESC 'Rerunability of the FAILED grid jobs' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.29 NAME 'nordugrid-job-reqwalltime' DESC 'The request wallclock time of the job in minutes' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.4 NAME 'nordugrid-job' DESC 'A Grid job' SUP 'Mds' STRUCTURAL MUST ( nordugrid-job-globalid $ nordugrid-job-globalowner $ nordugrid-job-status ) MAY ( nordugrid-job-queuerank $ nordugrid-job-submissionui $ nordugrid-job-submissiontime $ nordugrid-job-usedcputime $ nordugrid-job-usedwalltime $ nordugrid-job-usedmem $ nordugrid-job-comment $ nordugrid-job-execcluster $ nordugrid-job-execqueue $ nordugrid-job-stdout $ nordugrid-job-stderr $ nordugrid-job-stdin $ nordugrid-job-sessiondirerasetime $ nordugrid-job-reqcputime $ nordugrid-job-errors $ nordugrid-job-jobname $ nordugrid-job-runtimeenvironment $ nordugrid-job-cpucount $ nordugrid-job-executionnodes $ nordugrid-job-gmlog $ nordugrid-job-clientsoftware $ nordugrid-job-proxyexpirationtime $ nordugrid-job-completiontime $ nordugrid-job-exitcode $ nordugrid-job-rerunable $ nordugrid-job-reqwalltime)) #---------------------------------------------------------------- # attributes for the nordugrid-authuser objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.5.1 NAME 'nordugrid-authuser-name' DESC 'The Common Name of the authorized user plus a local unique number' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.5.2 NAME 'nordugrid-authuser-sn' DESC 'The SubjectName of the authorized user' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.5.3 NAME 'nordugrid-authuser-freecpus' DESC 'The number of freely available cpus with their timelimits in minutes for a user in the queue. Given in the form ncpus:min, min is optional (example: 2 4:25 5:180)' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.5.4 NAME 'nordugrid-authuser-diskspace' DESC 'The free diskspace available for the job (in MB)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.5.5 NAME 'nordugrid-authuser-queuelength' DESC 'The number of queuing jobs of a particular user, both queuing in the LRMS and in the Grid-layer' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.5 NAME 'nordugrid-authuser' DESC 'An authorised user of a NorduGrid cluster' SUP 'Mds' STRUCTURAL MUST ( nordugrid-authuser-name $ nordugrid-authuser-sn ) MAY ( nordugrid-authuser-queuelength $ nordugrid-authuser-diskspace $ nordugrid-authuser-freecpus )) #---------------------------------------------------------------- # # nordugrid-se attributetype ( 1.3.6.1.4.1.11604.2.1.6.1 NAME 'nordugrid-se-name' DESC 'The name of the Storage Element' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.2 NAME 'nordugrid-se-aliasname' DESC 'The alias name of the SE' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.3 NAME 'nordugrid-se-type' DESC 'The type of the SE' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.4 NAME 'nordugrid-se-freespace' DESC 'The free space available in the SE (in MB)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.5 NAME 'nordugrid-se-url' DESC 'The URL to contact the Storage Element' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.6 NAME 'nordugrid-se-authuser' DESC 'The DN of an authorized user' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.7 NAME 'nordugrid-se-location' DESC 'The geographical location of the SE expressed in terms of a Postal ZIP code: SE-22363' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.8 NAME 'nordugrid-se-owner' DESC 'The owner of the resource' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.9 NAME 'nordugrid-se-issuerca' DESC 'The DN of the Certificate Authority which issued the certificate of the SE' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.10 NAME 'nordugrid-se-totalspace' DESC 'The total capacity of the SE (in MB)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.11 NAME 'nordugrid-se-middleware' DESC 'The middleware packages on the SE' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.12 NAME 'nordugrid-se-comment' DESC 'Free form comment' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.13 NAME 'nordugrid-se-accesscontrol' DESC 'The access control framework of the SE' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.14 NAME 'nordugrid-se-issuerca-hash' DESC 'The HASH of the Certificate Authority which issued the certificate for the SE' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.15 NAME 'nordugrid-se-trustedca' DESC 'The DN of a Certificate Authority trusted by the SE' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.16 NAME 'nordugrid-se-acl' DESC 'Storage Element authorization information' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) objectclass ( 1.3.6.1.4.1.11604.2.1.6 NAME 'nordugrid-se' DESC 'A storage element in the Nordugrid' SUP 'Mds' STRUCTURAL MUST ( nordugrid-se-name $ nordugrid-se-url) MAY ( nordugrid-se-aliasname $ nordugrid-se-type $ nordugrid-se-freespace $ nordugrid-se-authuser $ nordugrid-se-location $ nordugrid-se-owner $ nordugrid-se-issuerca $ nordugrid-se-totalspace $ nordugrid-se-middleware $ nordugrid-se-comment $ nordugrid-se-accesscontrol $ nordugrid-se-issuerca-hash $ nordugrid-se-trustedca $ nordugrid-se-acl )) #-------------------------------------------------------------------- # nordugrid-rc # attributetype ( 1.3.6.1.4.1.11604.2.1.7.1 NAME 'nordugrid-rc-name' DESC 'The domain name of the machine hosting the Replica Catalog' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.2 NAME 'nordugrid-rc-aliasname' DESC 'The alias name of the rc' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.3 NAME 'nordugrid-rc-baseurl' DESC 'The URL of the Replica Catalog' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.4 NAME 'nordugrid-rc-authuser' DESC 'An authorized user of the replica catalog' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.5 NAME 'nordugrid-rc-location' DESC 'The geographical location of the RC expressed in terms of a Postal ZIP code' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.6 NAME 'nordugrid-rc-owner' DESC 'The owner of the resource' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.7 NAME 'nordugrid-rc-issuerca' DESC 'The DN of the Certificate Authority which issued the certificate of the RC' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) objectclass ( 1.3.6.1.4.1.11604.2.1.7 NAME 'nordugrid-rc' DESC 'A replica catalogue in the Nordugrid' SUP 'Mds' STRUCTURAL MUST ( nordugrid-rc-name $ nordugrid-rc-baseurl ) MAY ( nordugrid-rc-aliasname $ nordugrid-rc-authuser $ nordugrid-rc-location $ nordugrid-rc-owner $ nordugrid-rc-issuerca )) nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/LogUtils.pm0000644000000000000000000000012412733561425025624 xustar000000000000000027 mtime=1466884885.457809 27 atime=1513200575.844718 30 ctime=1513200663.141785911 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/LogUtils.pm0000644000175000002070000001053212733561425025672 0ustar00mockbuildmock00000000000000package LogUtils; # Object-oriented usage example: # # LogUtils::level("VERBOSE"); # $log = LogUtils->getLogger("MyProg.MyClass"); # $log->warning("Oops!"); # $log->error("Can't go on!"); # Procedural usage example: # # start_logging('INFO'); # warning("Oops!"); # error("Can't go on!"); use strict; use warnings; use POSIX; use FileHandle; use File::Basename; use Exporter; our @ISA = ('Exporter'); # Inherit from Exporter our @EXPORT_OK = qw(start_logging error warning info verbose debug); our %names = (FATAL => 0, ERROR => 1, WARNING => 2, INFO => 3, VERBOSE => 4, DEBUG => 5); our $loglevel = 2; # default level is WARNING our $ts_enabled = 0; # by default do not print timestamps our $indented = ""; # do not indent by default our $default_logger = LogUtils->getLogger(basename($0)); # redirect perl warnings to ARC logging format, # and attempt to limit number of warnings if not verbose my %WARNS if ($loglevel < 4); $SIG{__WARN__} = sub { my $message = shift; chomp($message); if ( $loglevel < 4 ) { if (exists($WARNS{$message})) { if ($WARNS{$message} == 1) { $default_logger->warning("\'PERL: $message\' repeated more than once, skipping... set loglevel to VERBOSE to see all messages."); $WARNS{$message} = 2; return; } } else { $default_logger->warning("PERL: $message"); $WARNS{$message} = 1; return; } } else { $default_logger->warning("PERL: $message"); return; } }; # For backwards compatibility sub start_logging($) { level(shift); } # set loglevel for all loggers sub level { return $loglevel unless @_; my $level = shift; if (defined $names{$level}) { $loglevel = $names{$level}; } elsif ($level =~ m/^\d+$/ and $level < keys %names) { $loglevel = $level; } else { fatal("No such loglevel '$level'"); } return $loglevel; } # enable/disable printing of timestamps sub timestamps { return $ts_enabled unless @_; return $ts_enabled = shift() ? 1 : 0; } sub indentoutput { my ($indent) = @_; if ($indent) { $indented = "\t"; } else { $indented = ""; } } # constructor sub getLogger { my $class = shift; my $self = {name => (shift || '')}; bless $self, $class; return $self; } sub debug { return unless $loglevel > 4; unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('DEBUG',$msg); } sub verbose { return unless $loglevel > 3; unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('VERBOSE',$msg); } sub info { return unless $loglevel > 2; unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('INFO',$msg); } sub warning { return unless $loglevel > 1; unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('WARNING',$msg); } # Causes program termination sub error { unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('ERROR',$msg); exit 1; } # Causes program termination sub fatal { unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('FATAL',$msg); exit 2; } sub _log { my ($self,$severity,$msg) = @_; my $name = $self->{name}; $name = $name ? "$name" : ""; print STDERR $indented.($ts_enabled ? "[".timestamp()."] " : "")."[$name] [$severity] [$$] $msg\n"; } sub timestamp { my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(); return POSIX::strftime("%Y-%m-%d %H:%M:%S", $sec,$min,$hour,$mday, $mon,$year,$wday,$yday,$isdst); } sub test { LogUtils::level('INFO'); LogUtils::timestamps(1); my $log = LogUtils->getLogger(); $log->warning("Hi"); $log = LogUtils->getLogger("main"); $log->warning("Hi"); $log = LogUtils->getLogger("main.sub"); $log->warning("Hi"); $log = LogUtils->getLogger("main.sub.one"); $log->warning("Hi"); LogUtils->getLogger("main.sub.too")->info("Boo"); LogUtils->getLogger("main.sub.too")->debug("Hoo"); } sub test2 { start_logging('VERBOSE'); debug('geee'); info('beee'); warning('meee'); error('mooo'); } #test(); #test2(); 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/README0000644000000000000000000000012312101775730024376 xustar000000000000000027 mtime=1359477720.593605 27 atime=1513200575.773717 29 ctime=1513200663.12778574 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/README0000644000175000002070000001106612101775730024450 0ustar00mockbuildmock00000000000000Information providers for A-REX. Currenly all infomration collection for A-REX is done by these scripts. Collected data is presented in 2 formats (classic NG schema and GLUE2 schema) rendered in XML. CEinfo.pl - driver for information collection. It calls all other infomation collectors and prints results in XML. InfoChecker.pm - used to validate options and results against a simple 'schema' (not XML Schema) GMJobsInfo.pm - collects information about jobs from grid manager status files HostInfo.pm - collects other information that can be collected on the front end (hostname, software version, disk space for users, installed certificates, Runtime environments ...) LRMSInfo.pm - collects information that is LRMS specific (queues, jobs, local user limits ...) XXXXmod.pm - plugins for LRMSInfo implementing the new LRMS module interface, such as: FORKmod.pm - Fork information module SGEmod.pm - SGE information module SLURMmod.pm - SLURM information module ARC0mod.pm - special module that loads ARC0 information modules and does the conversion between the old interface and the new one. PBS.pm, LL.pm, LSF.pm, Condor.pm, SLURM.pm - ARC0 information modules ARC0ClusterInfo.pm - combines all information about A-REX and produces information structured according to the classic NG schema. ARC1ClusterInfo.pm - combines all information about A-REX and produces information structured according to the GLUE2 schema. Extended information: CEinfo.pl uses *Info.pm to read all information. LRMSInfo in turn uses ${LRMS}mod.pm and ${LRMS}.pm to get information. That information is aggregated by CEinfo.pl. CEinfo.pl then gives that data to ARC*ClusterInfo.pm which transforms the output to the appropriate format. Lastly, CEinfo.pl uses *Printer.pm to generate ldif/xml. Loading structure: CEinfo.pl loads: ARC*ClusterInfo.pm, GLUE2*Printer.pm, NGldifPrinter.pm, {Host,RTE,GMJobsInfo,LRMS}Info.pm, InfosysHelper.pm, LRMSInfo loads: ${LRMS}mod.pm, ARC0mod.pm ARC0mod.pm: loads ${LRMS}.pm Call-chain: CEinfo.pl read config ( $config = ConfigCentral::parseConfig($configfile) ) CEinfo.pl fix_config($config); CEinfo.pl check_config($config); CEinfo.pl get information from collectors ( $data = CEInfo::collect($config) ) CEinfo.pl $usermap = read_grid_mapfile($config->{gridmap}); CEinfo.pl $gmjobs_info = get_gmjobs_info($config); CEinfo.pl #set config-variables specific to gmjobs CEinfo.pl LRMSInfo::collect($lrms_opts) GMJobsInfo.pm # for each user, get all jobs associated by going through controldirs once for each user, return in gmjobs_info CEinfo.pl # build the list of all jobs in state INLRMS CEinfo.pl # build hash with all the input necessary for the renderers CEinfo.pl $data->{config} = $config; CEinfo.pl $data->{usermap} = $usermap; CEinfo.pl $data->{host_info} = get_host_info($config,\@localusers); HostInfo.pm # get os,cpu,mem,disk per user (from session dir) CEinfo.pl $data->{rte_info} = get_rte_info($config); RTEInfo.pm add_static_rtes($options->{runtimedir}, $rtes); RTEInfo.pm add_janitor_res($options, $rtes); CEinfo.pl $data->{gmjobs_info} = $gmjobs_info; CEinfo.pl $data->{lrms_info} = get_lrms_info($config,\@localusers,\@jobids); LRMSInfo.pm # Load ARC0 or ARC1 LRMS-backend LRMSInfo.pm load_lrms($lrms_name); LRMSInfo.pm $result = get_lrms_info($options); LRMSInfo.pm # Verify that LRMS output is correct LRMSInfo.pm @messages = $checker->verify($result); LRMSInfo.pm # Fix various errors/differences like multiple nodes, trim spaces CEinfo.pl # print Glue2 ( $glue2data = ARC1ClusterInfo::collect($data) ) CEinfo.pl $xmlPrinter = GLUE2xmlPrinter->new(*STDOUT, $splitjobs); # splitjobs comes from commandline GLUE2xmlPrinter.pm CEinfo.pl $xmlPrinter->begin('InfoRoot'); CEinfo.pl $xmlPrinter->Domains($glue2data) CEinfo.pl $xmlPrinter->end('InfoRoot'); CEinfo.pl # Generate ldif for infosys-ldap -- but only if infosys expects it CEinfo.pl $ngdata = ARC0ClusterInfo::collect($data) ARC0ClusterInfo.pm # combine other sources and prepare info modeled on nordugrid-mds schema. CEinfo.pl if glue2ldap { CEinfo.pl $ldifPrinter = GLUE2ldifPrinter->new($fh) CEinfo.pl $ldifPrinter->Top($glue2data) CEinfo.pl } CEinfo.pl if mdsldap or glue12ldap { CEinfo.pl $ldifPrinter = NGldifPrinter->new($fh, $config->{ttl}) CEinfo.pl $ldifPrinter->Top($ngdata) CEinfo.pl } CEinfo.pl InfosysHelper::createLdifScript($config, $print_ldif) InfosysHelper.pm nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/SLURM.pm0000644000000000000000000000012412574536575024777 xustar000000000000000027 mtime=1441971581.361892 27 atime=1513200575.799717 30 ctime=1513200663.137785862 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/SLURM.pm0000644000175000002070000004330312574536575025047 0ustar00mockbuildmock00000000000000package SLURM; use strict; use POSIX qw(ceil floor); our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info', 'nodes_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## #our(%lrms_queue,%lrms_users); our(%scont_config, %scont_part, %scont_jobs, %scont_nodes, %sinfo_cpuinfo); ########################################## # Private subs ########################################## sub slurm_read_config($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM config, store dictionary in scont_config my %scont_config; open (SCPIPE,"$path/scontrol show config| grep -Ev \"primary|Configuration|^\$\"|"); while(){ chomp; my @mrr = split(" = ", $_, 2); $mrr[0]=~s/\s+$//; $scont_config{$mrr[0]} = $mrr[1]; } close(SCPIPE); return %scont_config; } sub get_variable($$){ my $match = shift; my $string = shift; $string =~ m/(\w\s)*?$match=((\w|\s|\/|,|.|:|;|\[|\]|\(|\)|-)*?)($| \w+=.*)/ ; my $var = $2; return $var; } sub slurm_read_jobs($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM jobs, store dictionary in scont_jobs my %scont_jobs; open (SCPIPE,"$path/squeue -a -h -t all -o \"JobId=%i TimeUsed=%M Partition=%P JobState=%T ReqNodes=%D ReqCPUs=%C TimeLimit=%l Name=%j NodeList=%N\"|"); while(){ my %job; my $string = $_; #Fetching of data from squeue output my $JobId = get_variable("JobId",$string); $job{JobId} = get_variable("JobId",$string); $job{TimeUsed} = get_variable("TimeUsed",$string); $job{Partition} = get_variable("Partition",$string); $job{JobState} = get_variable("JobState",$string); $job{ReqNodes} = get_variable("ReqNodes",$string); $job{ReqCPUs} = get_variable("ReqCPUs",$string); $job{TimeLimit} = get_variable("TimeLimit",$string); $job{Name} = get_variable("Name",$string); $job{NodeList} = get_variable("NodeList",$string); #Translation of data $job{TimeUsed} = slurm_to_arc_time($job{TimeUsed}); $job{TimeLimit} = slurm_to_arc_time($job{TimeLimit}); $scont_jobs{$JobId} = \%job; } close(SCPIPE); return %scont_jobs; } sub slurm_read_partitions($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM partitions, store dictionary in scont_part my %scont_part; open (SCPIPE,"$path/sinfo -a -h -o \"PartitionName=%P TotalCPUs=%C TotalNodes=%D MaxTime=%l DefTime=%L\"|"); while(){ my %part; my $string = $_; my $PartitionName = get_variable("PartitionName",$string); $PartitionName =~ s/\*$//; #Fetch data from sinfo $part{PartitionName} = $PartitionName; my $totalcpus = get_variable("TotalCPUs",$string); $part{TotalNodes} = get_variable("TotalNodes",$string); $part{MaxTime} = get_variable("MaxTime",$string); $part{DefTime} = get_variable("DefTime",$string); #Translation of data $part{MaxTime} = slurm_to_arc_time($part{MaxTime}); $part{DefTime} = slurm_to_arc_time($part{DefTime}); # Format of "%C" is: Number of CPUs by state in the format "allocated/idle/other/total" # We only care about total: ###### ($part{AllocatedCPUs},$part{IdleCPUs},$part{OtherCPUs},$part{TotalCPUs}) = split('/',$totalcpus); # Neither of these fields probably need this in SLURM 1.3, but it doesn't hurt. $part{AllocatedCPUs} = slurm_parse_number($part{AllocatedCPUs}); $part{IdleCPUs} = slurm_parse_number($part{IdleCPUs}); $part{OtherCPUs} = slurm_parse_number($part{OtherCPUs}); $part{TotalCPUs} = slurm_parse_number($part{TotalCPUs}); $part{TotalNodes} = slurm_parse_number($part{TotalNodes}); $scont_part{$PartitionName} = \%part; } close(SCPIPE); return %scont_part; } sub slurm_read_cpuinfo($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM partitions, store dictionary in scont_part my %sinfo_cpuinfo; my $cpuinfo; open (SCPIPE,"$path/sinfo -a -h -o \"cpuinfo=%C\"|"); while(){ my $string = $_; $cpuinfo = get_variable("cpuinfo",$string); } close(SCPIPE); ($sinfo_cpuinfo{AllocatedCPUs},$sinfo_cpuinfo{IdleCPUs},$sinfo_cpuinfo{OtherCPUs},$sinfo_cpuinfo{TotalCPUs}) = split('/',$cpuinfo); $sinfo_cpuinfo{AllocatedCPUs} = slurm_parse_number($sinfo_cpuinfo{AllocatedCPUs}); $sinfo_cpuinfo{IdleCPUs} = slurm_parse_number($sinfo_cpuinfo{IdleCPUs}); $sinfo_cpuinfo{OtherCPUs} = slurm_parse_number($sinfo_cpuinfo{OtherCPUs}); $sinfo_cpuinfo{TotalCPUs} = slurm_parse_number($sinfo_cpuinfo{TotalCPUs}); return %sinfo_cpuinfo; } sub slurm_read_nodes($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM nodes, store dictionary in scont_nodes my %scont_nodes; open (SCPIPE,"$path/scontrol show node --oneliner|"); while(){ my %record; my $string = $_; my $node = get_variable("NodeName",$string); # We have to keep CPUs key name for not to break other # functions that use this key $record{CPUs} = get_variable("CPUTot",$string); $record{RealMemory} = get_variable("RealMemory",$string); my $StateName = get_variable("State",$string); # Node status can be followed by different symbols # according to it being unresponsive, powersaving, etc. # Get rid of them $StateName =~ s/[\*~#\+]$//; $record{State} = $StateName; $record{Sockets} = get_variable("Sockets",$string); $record{SysName} = get_variable("OS",$string); $record{Arch} = get_variable("Arch",$string); $scont_nodes{$node} = \%record; } close(SCPIPE); return %scont_nodes; } #Function for retrieving used and queued cpus from slurm sub slurm_get_jobs { my $queue = shift; my $queuedjobs=0, my $usedcpus=0, my $nocpus=0, my $jqueue=0; my $runningjobs=0; foreach my $i (keys %scont_jobs){ $jqueue = $scont_jobs{$i}{"Partition"}; next if (defined($queue) && !($jqueue =~ /$queue/)); if ($scont_jobs{$i}{"JobState"} =~ /^PENDING$/){ $queuedjobs++; } if (($scont_jobs{$i}{"JobState"} =~ /^RUNNING$/) || ($scont_jobs{$i}{"JobState"} =~ /^COMPLETING$/)){ $runningjobs++; } } return ($queuedjobs, $runningjobs); } sub slurm_get_data($){ my $config = shift; %scont_config = slurm_read_config($config); %scont_part = slurm_read_partitions($config); %scont_jobs = slurm_read_jobs($config); %scont_nodes = slurm_read_nodes($config); %sinfo_cpuinfo = slurm_read_cpuinfo($config); } sub slurm_to_arc_time($){ my $timeslurm = shift; my $timearc = 0; # $timeslurm can be "infinite" or "UNLIMITED" if (($timeslurm =~ "UNLIMITED") or ($timeslurm =~ "infinite")) { #Max number allowed by ldap $timearc = 2**31-1; } # days-hours:minutes:seconds elsif ( $timeslurm =~ /(\d+)-(\d+):(\d+):(\d+)/ ) { $timearc = $1*24*60*60 + $2*60*60 + $3*60 + $4; } # hours:minutes:seconds elsif ( $timeslurm =~ /(\d+):(\d+):(\d+)/ ) { $timearc = $1*60*60 + $2*60 + $3; } # minutes:seconds elsif ( $timeslurm =~ /(\d+):(\d+)/ ) { $timearc = $1*60 + $2; } # ARC infosys uses minutes as the smallest allowed value. $timearc = floor($timearc/60); return $timearc; } # SLURM outputs some values as 12.3K where K is 1024. Include T, G, M # as well in case they become necessary in the future. sub slurm_parse_number($){ my $value = shift; if ( $value =~ /(\d+\.?\d*)K$/ ){ $value = floor($1 * 1024); } if ( $value =~ /(\d+\.?\d*)M$/ ){ $value = floor($1 * 1024 * 1024); } if ( $value =~ /(\d+\.?\d*)G$/ ){ $value = floor($1 * 1024 * 1024 * 1024); } if ( $value =~ /(\d+\.?\d*)T$/ ){ $value = floor($1 * 1024 * 1024 * 1024 * 1024); } return $value; } sub slurm_get_first_node($){ my $nodes = shift; my @enodes = split(",", slurm_expand_nodes($nodes)); return " NoNode " if ! @enodes; return $enodes[0]; } #translates a list like n[1-2,5],n23,n[54-55] to n1,n2,n5,n23,n54,n55 sub slurm_expand_nodes($){ my $nodes = shift; my $enodes = ""; $nodes =~ s/,([a-zA-Z])/ $1/g; foreach my $node (split(" ",$nodes)){ if( $node =~ m/([a-zA-Z0-9-_]*)\[([0-9\-,]*)\]/ ){ my $name = $1; my $list = $2; foreach my $element (split(",",$list)){ if($element =~ /(\d*)-(\d*)/){ my $start=$1; my $end=$2; my $l = length($start); for (my $i=$start;$i<=$end;$i++){ # Preserve leading zeroes in sequence, if needed $enodes .= sprintf("%s%0*d,", $name, $l, $i); } } else { $enodes .= $name.$element.","; } } } else { $enodes .= $node . ","; } } chop $enodes; return $enodes; } ############################################ # Public subs ############################################# sub cluster_info ($) { # config array my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # Get Data needed by this function, stored in the global variables # scont_nodes, scont_part, scont_jobs slurm_get_data($config); # Return data structure %lrms_cluster{$keyword} # # lrms_type LRMS type (eg. LoadLeveler) # lrms_version LRMS version # totalcpus Total number of cpus in the system # queuedjobs Number of queueing jobs in LRMS # runningjobs Number of running jobs in LRMS # usedcpus Used cpus in the system # cpudistribution CPU distribution string # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_cluster); #determine the version of SLURM $lrms_cluster{lrms_type} = "SLURM"; $lrms_cluster{lrms_version} = $scont_config{"SLURM_VERSION"}; # SLURM has no cputime limit at all $lrms_cluster{has_total_cputime_limit} = 0; #determine number of processors my $totalcpus=0; foreach my $i (keys %scont_nodes){ $totalcpus += $scont_nodes{$i}{"CPUs"}; } $lrms_cluster{totalcpus} = $totalcpus; $lrms_cluster{usedcpus} = $sinfo_cpuinfo{AllocatedCPUs}; # TODO: investigate if this can be calculated for SLURM # this is a quick and dirty fix for a warning, might be fixed somewhere else $lrms_cluster{queuedcpus} = 0; ($lrms_cluster{queuedjobs}, $lrms_cluster{runningjobs}) = slurm_get_jobs(); #NOTE: should be on the form "8cpu:800 2cpu:40" my @queue=(); foreach my $i (keys %scont_part){ unshift (@queue,$i); } my %cpudistribution; $lrms_cluster{cpudistribution} = ""; foreach my $key (keys %scont_nodes){ if(exists $cpudistribution{$scont_nodes{$key}{CPUs}}){ $cpudistribution{$scont_nodes{$key}{CPUs}} +=1; } else{ $cpudistribution{$scont_nodes{$key}{CPUs}} = 1; } } foreach my $key (keys %cpudistribution){ $lrms_cluster{cpudistribution}.= $key ."cpu:" . $cpudistribution{$key} . " "; } $lrms_cluster{queue} = [@queue]; return %lrms_cluster; } sub queue_info ($$) { # config array my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # Name of the queue to query my ($queue) = shift; # Get data needed by this function, stored in global variables # scont_nodes, scont_part, scont_jobs slurm_get_data($config); # The return data structure is %lrms_queue. my (%lrms_queue); # Return data structure %lrms_queue{$keyword} # # status available slots in the queue, negative number signals # some kind of LRMS error state for the queue # maxrunning queue limit for number of running jobs # maxqueuable queue limit for number of queueing jobs # maxuserrun queue limit for number of running jobs per user # maxcputime queue limit for max cpu time for a job # mincputime queue limit for min cpu time for a job # defaultcput queue default for cputime # maxwalltime queue limit for max wall time for a job # minwalltime queue limit for min wall time for a job # defaultwalltime queue default for walltime # running number of procs used by running jobs in the queue # queued number of procs requested by queueing jobs in the queue # totalcpus number of procs in the queue # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. #TODO available slots, not max jobs. $lrms_queue{status} = $scont_config{"MaxJobCount"}; $lrms_queue{maxrunning} = $scont_config{"MaxJobCount"}; $lrms_queue{maxqueuable} = $scont_config{"MaxJobCount"}; $lrms_queue{maxuserrun} = $scont_config{"MaxJobCount"}; my $maxtime = $scont_part{$queue}{"MaxTime"}; my $deftime = $scont_part{$queue}{"DefTime"}; $lrms_queue{maxcputime} = $maxtime; $lrms_queue{mincputime} = 0; $lrms_queue{defaultcput} = $deftime; $lrms_queue{maxwalltime} = $maxtime; $lrms_queue{minwalltime} = 0; $lrms_queue{defaultwallt} = $deftime; ($lrms_queue{queued}, $lrms_queue{running}) = slurm_get_jobs($queue); $lrms_queue{totalcpus} = $scont_part{$queue}{TotalCPUs}; return %lrms_queue; } sub jobs_info ($$$) { # config array my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # Name of the queue to query my ($queue) = shift; # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($jids) = shift; #Get data needed by this function, stored in global variables # scont_nodes, scont_part, scont_jobs slurm_get_data($config); # status Status of the job: Running 'R', Queued'Q', # Suspended 'S', Exiting 'E', Other 'O' # rank Position in the queue # mem Used (virtual) memory # walltime Used walltime # cputime Used cpu-time # reqwalltime Walltime requested from LRMS # reqcputime Cpu-time requested from LRMS # nodes List of execution hosts. # comment Comment about the job in LRMS, if any # cpus Number of cpus requested/used by the job my (%lrms_jobs); #$queue is not used to keep jobs from different queues separate #jobs can't have overlapping job-ids between queues in SLURM foreach my $jid (@{$jids}){ if ($scont_jobs{$jid}{"JobState"} eq "RUNNING") { $lrms_jobs{$jid}{status} = "R"; } elsif ($scont_jobs{$jid}{"JobState"} eq "COMPLETED") { $lrms_jobs{$jid}{status} = "E"; } elsif ($scont_jobs{$jid}{"JobState"} eq "CANCELLED") { $lrms_jobs{$jid}{status} = "O"; } elsif ($scont_jobs{$jid}{"JobState"} eq "FAILED") { $lrms_jobs{$jid}{status} = "O"; } elsif ($scont_jobs{$jid}{"JobState"} eq "PENDING") { $lrms_jobs{$jid}{status} = "Q"; } elsif ($scont_jobs{$jid}{"JobState"} eq "TIMEOUT") { $lrms_jobs{$jid}{status} = "O"; } else { $lrms_jobs{$jid}{status} = "O"; } #TODO: calculate rank? Probably not possible. $lrms_jobs{$jid}{rank} = 0; #TODO: This gets the memory from the first node in a job #allocation which will not be correct on a heterogenous #cluster my $node = slurm_get_first_node($scont_jobs{$jid}{"NodeList"}); $lrms_jobs{$jid}{mem} = $scont_nodes{$node}{"RealMemory"}; my $walltime = $scont_jobs{$jid}{"TimeUsed"}; my $count = $scont_jobs{$jid}{ReqCPUs}; $lrms_jobs{$jid}{walltime} = $walltime; # TODO: multiply walltime by number of cores to get cputime? $lrms_jobs{$jid}{cputime} = $walltime*$count; $lrms_jobs{$jid}{reqwalltime} = $scont_jobs{$jid}{"TimeLimit"}; # TODO: cputime/walltime confusion again... $lrms_jobs{$jid}{reqcputime} = $scont_jobs{$jid}{"TimeLimit"}*$count; $lrms_jobs{$jid}{nodes} = [ split(",",slurm_expand_nodes($scont_jobs{$jid}{"NodeList"}) ) ]; $lrms_jobs{$jid}{comment} = [$scont_jobs{$jid}{"Name"}]; $lrms_jobs{$jid}{cpus} = $scont_jobs{$jid}{ReqCPUs}; } return %lrms_jobs; } sub users_info($$@) { # config array my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # name of queue to query my ($queue) = shift; # user accounts my ($accts) = shift; #Get data needed by this function, stored in global variables # scont_nodes, scont_part, scont_jobs slurm_get_data($config); my (%lrms_users); # freecpus for given account # queue length for given account # foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $sinfo_cpuinfo{IdleCPUs}; $lrms_users{$u}{queuelength} = 0; } return %lrms_users; } sub nodes_info($) { my $config = shift; my %hoh_slurmnodes = slurm_read_nodes($config); my %nodes; for my $host (keys %hoh_slurmnodes) { my ($isfree, $isavailable) = (0,0); $isavailable = 1 unless $hoh_slurmnodes{$host}{State} =~ /DOWN|DRAIN|FAIL|MAINT|UNK/; $isfree = 1 if $hoh_slurmnodes{$host}{State} =~ /IDLE|MIXED/; $nodes{$host} = {isfree => $isfree, isavailable => $isavailable}; my $np = $hoh_slurmnodes{$host}{CPUs}; my $nsock = $hoh_slurmnodes{$host}{Sockets}; my $rmem = $hoh_slurmnodes{$host}{RealMemory}; $nodes{$host}{lcpus} = int $np if $np; $nodes{$host}{slots} = int $np if $np; $nodes{$host}{pmem} = int $rmem if $rmem; $nodes{$host}{pcpus} = int $nsock if $nsock; $nodes{$host}{sysname} = $hoh_slurmnodes{$host}{SysName}; $nodes{$host}{machine} = $hoh_slurmnodes{$host}{Arch}; } return %nodes; } 1; nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/PaxHeaders.7502/SLURMmod.pm0000644000000000000000000000012412574536575025477 xustar000000000000000027 mtime=1441971581.361892 27 atime=1513200575.841718 30 ctime=1513200663.138785874 nordugrid-arc-5.4.2/src/services/a-rex/infoproviders/SLURMmod.pm0000644000175000002070000003634012574536575025552 0ustar00mockbuildmock00000000000000package SLURMmod; use strict; use POSIX qw(ceil floor); our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('get_lrms_info', 'get_lrms_options_schema'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Public variables ########################################## our $options; our $lrms_info; ########################################## # Saved private variables ########################################## our $path; our(%scont_config, %scont_part, %scont_jobs, %scont_nodes, %sinfo_cpuinfo); our $log = LogUtils->getLogger("SLURMmod"); sub get_lrms_options_schema { return { 'slurm_bin_path' => '*', 'queues' => { '*' => { 'users' => [ '' ], } }, 'jobs' => [ '' ] }; } sub get_lrms_info($) { $options = shift; $path = ($options->{slurm_bin_path} or "/usr/bin"); # slurm_init_check(); slurm_get_data(); cluster_info(); my %qconf = %{$options->{queues}}; for my $qname ( keys %qconf ) { queue_info($qname); } my $jids = $options->{jobs}; jobs_info($jids); for my $qname ( keys %qconf ) { my $users = $qconf{$qname}{users}; users_info($qname,$users); } nodes_info(); return $lrms_info; } ########################################## # Private subs ########################################## #sub slurm_init_check() { # #$log->info("Verifying slurm commands..."); # #my @slurm_commands = ('scontrol','squeue','sinfo'); # #foreach my $slurmcmd (@slurm_commands) { # unless (-e "$path/$slurmcmd") {$log->error("$slurmcmd command not found. Exiting...")}; # } #} sub nodes_info() { my $lrms_nodes = {}; # add this cluster to the info tree $lrms_info->{nodes} = $lrms_nodes; for my $host (keys %scont_nodes) { my ($isfree, $isavailable) = (0,0); $isavailable = 1 unless $scont_nodes{$host}{State} =~ /DOWN|DRAIN|FAIL|MAINT|UNK/; $isfree = 1 if $scont_nodes{$host}{State} =~ /IDLE|MIXED/; $lrms_nodes->{$host} = {isfree => $isfree, isavailable => $isavailable}; my $np = $scont_nodes{$host}{CPUTot}; my $nsock = $scont_nodes{$host}{Sockets}; my $rmem = $scont_nodes{$host}{RealMemory}; $lrms_nodes->{$host}{lcpus} = int $np if $np; $lrms_nodes->{$host}{slots} = int $np if $np; $lrms_nodes->{$host}{pmem} = int $rmem if $rmem; $lrms_nodes->{$host}{pcpus} = int $nsock if $nsock; $lrms_nodes->{$host}{sysname} = $scont_nodes{$host}{SysName}; $lrms_nodes->{$host}{machine} = $scont_nodes{$host}{Arch}; } } sub users_info($@) { # name of queue to query my ($queue) = shift; # user accounts my ($accts) = shift; my $lrms_users = {}; # add users to the info tree my $lrms_queue = $lrms_info->{queues}{$queue}; $lrms_queue->{users} = $lrms_users; # freecpus for given account # queue length for given account # foreach my $u ( @{$accts} ) { $lrms_users->{$u}{freecpus} = { $sinfo_cpuinfo{IdleCPUs} => 0 }; $lrms_users->{$u}{queuelength} = 0; } } sub slurm_get_first_node($){ my $nodes = shift; my @enodes = split(",", slurm_expand_nodes($nodes)); return " NoNode " if ! @enodes; return $enodes[0]; } #translates a list like n[1-2,5],n23,n[54-55] to n1,n2,n5,n23,n54,n55 sub slurm_expand_nodes($){ my $nodes = shift; my $enodes = ""; $nodes =~ s/,([a-zA-Z])/ $1/g; foreach my $node (split(" ",$nodes)){ if( $node =~ m/([a-zA-Z0-9-_]*)\[([0-9\-,]*)\]/ ){ my $name = $1; my $list = $2; foreach my $element (split(",",$list)){ if($element =~ /(\d*)-(\d*)/){ my $start=$1; my $end=$2; my $l = length($start); for (my $i=$start;$i<=$end;$i++){ # Preserve leading zeroes in sequence, if needed $enodes .= sprintf("%s%0*d,", $name, $l, $i); } } else { $enodes .= $name.$element.","; } } } else { $enodes .= $node . ","; } } chop $enodes; return $enodes; } sub jobs_info ($) { my $jids = shift; my $lrms_jobs = {}; # add jobs to the info tree $lrms_info->{jobs} = $lrms_jobs; #jobs can't have overlapping job-ids between queues in SLURM foreach my $jid (@{$jids}){ if ($scont_jobs{$jid}{"JobState"} eq "RUNNING") { $lrms_jobs->{$jid}{status} = "R"; } elsif ($scont_jobs{$jid}{"JobState"} eq "COMPLETED") { $lrms_jobs->{$jid}{status} = "E"; } elsif ($scont_jobs{$jid}{"JobState"} eq "CANCELLED") { $lrms_jobs->{$jid}{status} = "O"; } elsif ($scont_jobs{$jid}{"JobState"} eq "FAILED") { $lrms_jobs->{$jid}{status} = "O"; } elsif ($scont_jobs{$jid}{"JobState"} eq "PENDING") { $lrms_jobs->{$jid}{status} = "Q"; } elsif ($scont_jobs{$jid}{"JobState"} eq "TIMEOUT") { $lrms_jobs->{$jid}{status} = "O"; } else { $lrms_jobs->{$jid}{status} = "O"; } #TODO: calculate rank? Probably not possible. $lrms_jobs->{$jid}{rank} = 0; $lrms_jobs->{$jid}{cpus} = $scont_jobs{$jid}{ReqCPUs}; #TODO: This gets the memory from the first node in a job #allocation which will not be correct on a heterogenous #cluster my $node = slurm_get_first_node($scont_jobs{$jid}{"NodeList"}); # Only jobs that got the nodes can report the memory of # their nodes if($node ne " NoNode "){ $lrms_jobs->{$jid}{mem} = $scont_nodes{$node}{"RealMemory"}; } my $walltime = $scont_jobs{$jid}{"TimeUsed"}; my $count = $scont_jobs{$jid}{ReqCPUs}; $lrms_jobs->{$jid}{walltime} = $walltime; # TODO: multiply walltime by number of cores to get cputime? $lrms_jobs->{$jid}{cputime} = $walltime*$count; $lrms_jobs->{$jid}{reqwalltime} = $scont_jobs{$jid}{"TimeLimit"}; # TODO: cputime/walltime confusion again... $lrms_jobs->{$jid}{reqcputime} = $scont_jobs{$jid}{"TimeLimit"}*$count; $lrms_jobs->{$jid}{nodes} = [ split(",",slurm_expand_nodes($scont_jobs{$jid}{"NodeList"}) ) ]; $lrms_jobs->{$jid}{comment} = [$scont_jobs{$jid}{"Name"}]; } } sub queue_info ($) { # Name of the queue to query my ($queue) = shift; my $lrms_queue = {}; # add this queue to the info tree $lrms_info->{queues}{$queue} = $lrms_queue; $lrms_queue->{status} = $scont_config{"MaxJobCount"}; $lrms_queue->{maxrunning} = $scont_config{"MaxJobCount"}; $lrms_queue->{maxqueuable} = $scont_config{"MaxJobCount"}; $lrms_queue->{maxuserrun} = $scont_config{"MaxJobCount"}; my $maxtime = $scont_part{$queue}{"MaxTime"}; my $deftime = $scont_part{$queue}{"DefTime"}; $lrms_queue->{maxcputime} = $maxtime; $lrms_queue->{mincputime} = 0; $lrms_queue->{defaultcput} = $deftime; $lrms_queue->{maxwalltime} = $maxtime; $lrms_queue->{minwalltime} = 0; $lrms_queue->{defaultwallt} = $deftime; ($lrms_queue->{queued}, $lrms_queue->{running}) = slurm_get_jobs($queue); $lrms_queue->{totalcpus} = $scont_part{$queue}{TotalCPUs}; $lrms_queue->{freeslots} = $scont_part{$queue}{IdleCPUs}; } #Function for retrieving running and queued jobs from slurm sub slurm_get_jobs { my $queue = shift; my $queuedjobs=0, my $usedcpus=0, my $nocpus=0, my $jqueue=0; my $runningjobs=0; foreach my $i (keys %scont_jobs){ $jqueue = $scont_jobs{$i}{"Partition"}; next if (defined($queue) && !($jqueue =~ /$queue/)); if ($scont_jobs{$i}{"JobState"} =~ /^PENDING$/){ $queuedjobs++; } if (($scont_jobs{$i}{"JobState"} =~ /^RUNNING$/) || ($scont_jobs{$i}{"JobState"} =~ /^COMPLETING$/)){ $runningjobs++; } } return ($queuedjobs, $runningjobs); } sub cluster_info () { my $lrms_cluster = {}; # add this cluster to the info tree $lrms_info->{cluster} = $lrms_cluster; #determine the version of SLURM $lrms_cluster->{lrms_type} = "SLURM"; $lrms_cluster->{lrms_version} = $scont_config{"SLURM_VERSION"}; #determine number of processors my $totalcpus=0; foreach my $i (keys %scont_nodes){ $totalcpus += $scont_nodes{$i}{CPUTot}; } $lrms_cluster->{totalcpus} = $totalcpus; # TODO: investigate if this can be calculated for SLURM # this is a quick and dirty fix for a warning, might be fixed somewhere else $lrms_cluster->{queuedcpus} = 0; $lrms_cluster->{usedcpus} = $sinfo_cpuinfo{AllocatedCPUs}; ($lrms_cluster->{queuedjobs}, $lrms_cluster->{runningjobs}) = slurm_get_jobs(); #NOTE: should be on the form "8cpu:800 2cpu:40" my %cpudistribution; $lrms_cluster->{cpudistribution} = ""; foreach my $key (keys %scont_nodes){ if(exists $cpudistribution{$scont_nodes{$key}{CPUTot}}){ $cpudistribution{$scont_nodes{$key}{CPUTot}} +=1; } else{ $cpudistribution{$scont_nodes{$key}{CPUTot}} = 1; } } foreach my $key (keys %cpudistribution){ $lrms_cluster->{cpudistribution}.= $key ."cpu:" . $cpudistribution{$key} . " "; } } sub slurm_get_data(){ %scont_config = slurm_read_config(); %scont_part = slurm_read_partitions(); %scont_jobs = slurm_read_jobs(); %scont_nodes = slurm_read_nodes(); %sinfo_cpuinfo = slurm_read_cpuinfo(); } sub slurm_read_config(){ # get SLURM config, store dictionary in scont_config my %scont_config; open (SCPIPE,"$path/scontrol show config| grep -Ev \"primary|Configuration|^\$\"|"); while(){ chomp; my @mrr = split(" = ", $_, 2); $mrr[0]=~s/\s+$//; $scont_config{$mrr[0]} = $mrr[1]; } close(SCPIPE); return %scont_config; } sub get_variable($$){ my $match = shift; my $string = shift; $string =~ m/(\w\s)*?$match=((\w|\s|\/|,|.|:|;|\[|\]|\(|\)|-)*?)($| \w+=.*)/ ; my $var = $2; return $var; } sub slurm_to_arc_time($){ my $timeslurm = shift; my $timearc = 0; # $timeslurm can be "infinite" or "UNLIMITED" if (($timeslurm =~ "UNLIMITED") or ($timeslurm =~ "infinite")) { #Max number allowed by ldap $timearc = 2**31-1; } # days-hours:minutes:seconds elsif ( $timeslurm =~ /(\d+)-(\d+):(\d+):(\d+)/ ) { $timearc = $1*24*60*60 + $2*60*60 + $3*60 + $4; } # hours:minutes:seconds elsif ( $timeslurm =~ /(\d+):(\d+):(\d+)/ ) { $timearc = $1*60*60 + $2*60 + $3; } # minutes:seconds elsif ( $timeslurm =~ /(\d+):(\d+)/ ) { $timearc = $1*60 + $2; } return $timearc; } # SLURM outputs some values as 12.3K where K is 1024. Include T, G, M # as well in case they become necessary in the future. sub slurm_parse_number($){ my $value = shift; if ( $value =~ /(\d+\.?\d*)K$/ ){ $value = floor($1 * 1024); } if ( $value =~ /(\d+\.?\d*)M$/ ){ $value = floor($1 * 1024 * 1024); } if ( $value =~ /(\d+\.?\d*)G$/ ){ $value = floor($1 * 1024 * 1024 * 1024); } if ( $value =~ /(\d+\.?\d*)T$/ ){ $value = floor($1 * 1024 * 1024 * 1024 * 1024); } return $value; } sub slurm_read_partitions(){ # get SLURM partitions, store dictionary in scont_part my %scont_part; open (SCPIPE,"$path/sinfo -a -h -o \"PartitionName=%P TotalCPUs=%C TotalNodes=%D MaxTime=%l DefTime=%L\"|"); while(){ my %part; my $string = $_; my $PartitionName = get_variable("PartitionName",$string); $PartitionName =~ s/\*$//; #Fetch data from sinfo $part{PartitionName} = $PartitionName; my $totalcpus = get_variable("TotalCPUs",$string); $part{TotalNodes} = get_variable("TotalNodes",$string); $part{MaxTime} = get_variable("MaxTime",$string); $part{DefTime} = get_variable("DefTime",$string); #Translation of data $part{MaxTime} = slurm_to_arc_time($part{MaxTime}); $part{DefTime} = slurm_to_arc_time($part{DefTime}); # Format of "%C" is: Number of CPUs by state in the format "allocated/idle/other/total" # We only care about total: ###### ($part{AllocatedCPUs},$part{IdleCPUs},$part{OtherCPUs},$part{TotalCPUs}) = split('/',$totalcpus); # Neither of these fields probably need this in SLURM 1.3, but it doesn't hurt. $part{AllocatedCPUs} = slurm_parse_number($part{AllocatedCPUs}); $part{IdleCPUs} = slurm_parse_number($part{IdleCPUs}); $part{OtherCPUs} = slurm_parse_number($part{OtherCPUs}); $part{TotalCPUs} = slurm_parse_number($part{TotalCPUs}); $part{TotalNodes} = slurm_parse_number($part{TotalNodes}); $scont_part{$PartitionName} = \%part; } close(SCPIPE); return %scont_part; } sub slurm_read_jobs($){ # get SLURM jobs, store dictionary in scont_jobs my %scont_jobs; open (SCPIPE,"$path/squeue -a -h -t all -o \"JobId=%i TimeUsed=%M Partition=%P JobState=%T ReqNodes=%D ReqCPUs=%C TimeLimit=%l Name=%j NodeList=%N\"|"); while(){ my %job; my $string = $_; #Fetching of data from squeue output my $JobId = get_variable("JobId",$string); $job{JobId} = get_variable("JobId",$string); $job{TimeUsed} = get_variable("TimeUsed",$string); $job{Partition} = get_variable("Partition",$string); $job{JobState} = get_variable("JobState",$string); $job{ReqNodes} = get_variable("ReqNodes",$string); $job{ReqCPUs} = get_variable("ReqCPUs",$string); $job{TimeLimit} = get_variable("TimeLimit",$string); $job{Name} = get_variable("Name",$string); $job{NodeList} = get_variable("NodeList",$string); #Translation of data $job{TimeUsed} = slurm_to_arc_time($job{TimeUsed}); $job{TimeLimit} = slurm_to_arc_time($job{TimeLimit}); $scont_jobs{$JobId} = \%job; } close(SCPIPE); return %scont_jobs; } sub slurm_read_nodes($){ # get SLURM nodes, store dictionary in scont_nodes my %scont_nodes; open (SCPIPE,"$path/scontrol show node --oneliner|"); while(){ my %record; my $string = $_; my $node = get_variable("NodeName",$string); # We have to keep CPUs key name for not to break other # functions that use this key $record{CPUTot} = get_variable("CPUTot",$string); $record{RealMemory} = get_variable("RealMemory",$string); my $StateName = get_variable("State",$string); # Node status can be followed by different symbols # according to it being unresponsive, powersaving, etc. # Get rid of them $StateName =~ s/[\*~#\+]$//; $record{State} = $StateName; $record{Sockets} = get_variable("Sockets",$string); $record{SysName} = get_variable("OS",$string); $record{Arch} = get_variable("Arch",$string); $scont_nodes{$node} = \%record; } close(SCPIPE); return %scont_nodes; } sub slurm_read_cpuinfo($){ my %sinfo_cpuinfo; my $cpuinfo; open (SCPIPE,"$path/sinfo -a -h -o \"cpuinfo=%C\"|"); while(){ my $string = $_; $cpuinfo = get_variable("cpuinfo",$string); } close(SCPIPE); ($sinfo_cpuinfo{AllocatedCPUs},$sinfo_cpuinfo{IdleCPUs},$sinfo_cpuinfo{OtherCPUs},$sinfo_cpuinfo{TotalCPUs}) = split('/',$cpuinfo); $sinfo_cpuinfo{AllocatedCPUs} = slurm_parse_number($sinfo_cpuinfo{AllocatedCPUs}); $sinfo_cpuinfo{IdleCPUs} = slurm_parse_number($sinfo_cpuinfo{IdleCPUs}); $sinfo_cpuinfo{OtherCPUs} = slurm_parse_number($sinfo_cpuinfo{OtherCPUs}); $sinfo_cpuinfo{TotalCPUs} = slurm_parse_number($sinfo_cpuinfo{TotalCPUs}); return %sinfo_cpuinfo; } 1; nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/FileChunks.h0000644000000000000000000000012313166234757023044 xustar000000000000000026 mtime=1507408367.78546 27 atime=1513200576.628727 30 ctime=1513200662.706780591 nordugrid-arc-5.4.2/src/services/a-rex/FileChunks.h0000644000175000002070000000515713166234757023122 0ustar00mockbuildmock00000000000000#include #include #include #include namespace ARex { class FileChunksList; /// Representation of delivered file chunks class FileChunks { friend class FileChunksList; private: Glib::Mutex lock; FileChunksList& list; std::map::iterator self; typedef std::list > chunks_t; chunks_t chunks; off_t size; time_t last_accessed; int refcount; FileChunks(FileChunksList& container); public: FileChunks(const FileChunks& obj); /// Returns assigned file path (id of file) std::string Path(void) { return self->first; }; /// Assign file size void Size(off_t size); /// Returns assigned file size off_t Size(void) { return size; }; /// Report one more delivered chunk void Add(off_t start,off_t csize); /// Returns true if all chunks were delivered. bool Complete(void); /// Prints chunks delivered so far. For debuging purposes. void Print(void); /// Release reference obtained through FileChunksList::Get() method. /// This operation may lead to destruction of FileChunk instance /// hence previously obtained refrence must not be used. void Release(void); /// Relases reference obtained through Get() method and destroys its instance. /// Normally this method to be called instead of Release() after whole /// file is delivered in order to free resources associated with /// FileChunks instance. void Remove(void); }; /// Container for FileChunks instances class FileChunksList { friend class FileChunks; private: Glib::Mutex lock; typedef std::map files_t; files_t files; int timeout; time_t last_timeout; /// Returns pointer to first stuck file. /// File is considred stuck if its Add method was last called more /// timeout seconds ago. FileChunks* GetStuck(void); void RemoveStuck(void); public: FileChunksList(void); ~FileChunksList(void); /// Returns previously created FileChunks object with associated path. /// If such instance does not exist new one is created. /// Obtained reference may be used for other operations. /// Obtained reference must be Release()ed after it is not longer needed. FileChunks& Get(std::string path); /// Assign timeout value (seconds) for file transfers void Timeout(int t) { timeout=t; }; /// Returns pointer to first in a list created FileChunks instance. //FileChunks* GetFirst(void); }; class FileChunksRef { private: FileChunks& obj; public: FileChunksRef(FileChunks& o):obj(o) { }; ~FileChunksRef(void) { obj.Release(); }; FileChunks* operator->(void) { return &obj; }; }; } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/change_activity_status.cpp0000644000000000000000000000012413065020527026073 xustar000000000000000027 mtime=1490297175.117528 27 atime=1513200576.432725 30 ctime=1513200662.690780395 nordugrid-arc-5.4.2/src/services/a-rex/change_activity_status.cpp0000644000175000002070000004075013065020527026146 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::ChangeActivityStatus(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* ChangeActivityStatus ActivityIdentifier (wsa:EndpointReferenceType) OldStatus (a-rex,optional) attribute = state (bes-factory:ActivityStateEnumeration) NewStatus (a-rex) attribute = state (bes-factory:ActivityStateEnumeration) ChangeActivityStatusResponse NewStatus (a-rex) attribute = state (bes-factory:ActivityStateEnumeration) NotAuthorizedFault InvalidActivityIdentifierFault CantApplyOperationToCurrentStateFault */ { std::string s; in.GetXML(s); logger_.msg(Arc::VERBOSE, "ChangeActivityStatus: request = \n%s", s); }; Arc::WSAEndpointReference id(in["ActivityIdentifier"]); if(!(Arc::XMLNode)id) { // Wrong request logger_.msg(Arc::ERROR, "ChangeActivityStatus: no ActivityIdentifier found"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find ActivityIdentifier element in request"); InvalidRequestMessageFault(fault,"jsdl:ActivityIdentifier","Element is missing"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; std::string jobid = Arc::WSAEndpointReference(id).ReferenceParameters()["a-rex:JobID"]; if(jobid.empty()) { // EPR is wrongly formated or not an A-REX EPR logger_.msg(Arc::ERROR, "ChangeActivityStatus: EPR contains no JobID"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobID element in ActivityIdentifier"); InvalidRequestMessageFault(fault,"a-rex:JobID","Element is missing"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job std::string failure = job.Failure(); logger_.msg(Arc::ERROR, "ChangeActivityStatus: no job found: %s",failure); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find requested Activity"); UnknownActivityIdentifierFault(fault,"No corresponding Activity found"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; // Old State Arc::XMLNode old_state = in["OldStatus"]; std::string old_bes_state = old_state.Attribute("state"); std::string old_arex_state = old_state["a-rex:state"]; // New state Arc::XMLNode new_state = in["NewStatus"]; if(!new_state) { // Wrong request logger_.msg(Arc::ERROR, "ChangeActivityStatus: missing NewStatus element"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Missing NewStatus element in request"); InvalidRequestMessageFault(fault,"a-rex:NewStatus","Element is missing"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; std::string new_bes_state = new_state.Attribute("state"); std::string new_arex_state = new_state["a-rex:state"]; // Take renewed proxy if supplied std::string delegation; Arc::XMLNode delegated_token = new_state["arcdeleg:DelegatedToken"]; if(delegated_token) { if(!delegation_stores_.DelegatedToken(config.GmConfig().DelegationDir(),delegated_token,config.GridName(),delegation)) { // Failed to accept delegation (report as bad request) logger_.msg(Arc::ERROR, "ChangeActivityStatus: Failed to accept delegation"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Failed to accept delegation"); InvalidRequestMessageFault(fault,"arcdeleg:DelegatedToken","This token does not exist"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; bool pending = false; std::string gm_state = job.State(pending); bool failed = job.Failed(); std::string bes_state(""); std::string arex_state(""); convertActivityStatus(gm_state,bes_state,arex_state,failed,pending); // Old state in request must be checked against current one if((!old_bes_state.empty()) && (old_bes_state != bes_state)) { logger_.msg(Arc::ERROR, "ChangeActivityStatus: old BES state does not match"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"OldStatus is not same as current status"); CantApplyOperationToCurrentStateFault(fault,gm_state,failed,"OldStatus does not match"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; if((!old_arex_state.empty()) && (old_arex_state != arex_state)) { logger_.msg(Arc::ERROR, "ChangeActivityStatus: old A-REX state does not match"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"OldStatus is not same as current status"); CantApplyOperationToCurrentStateFault(fault,gm_state,failed,"OldStatus does not match"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; // Check for allowed combinations if((new_bes_state == "Finished") && ((new_arex_state.empty()) || (new_arex_state == "Killing"))) { // Request to cancel job if((gm_state != "FINISHED") && (gm_state != "CANCELING") && (gm_state != "DELETED")) job.Cancel(); } else if((new_bes_state == "Finished") && (new_arex_state == "Deleted")) { // Request to clean job if((gm_state != "FINISHED") && (gm_state != "CANCELING") && (gm_state != "DELETED")) job.Cancel(); job.Clean(); } else if((new_bes_state == "Running") && (new_arex_state.empty())) { // Not supporting resume into user-defined state // Request to resume job if(!job.UpdateCredentials(delegation)) { logger_.msg(Arc::ERROR, "ChangeActivityStatus: Failed to update credentials"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Internal error: Failed to update credentials"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; if(!job.Resume()) { logger_.msg(Arc::ERROR, "ChangeActivityStatus: Failed to resume job"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Internal error: Failed to resume activity"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; } else { logger_.msg(Arc::ERROR, "ChangeActivityStatus: State change not allowed: from %s/%s to %s/%s", bes_state.c_str(),arex_state.c_str(),new_bes_state.c_str(),new_arex_state.c_str()); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Requested status transition is not supported"); CantApplyOperationToCurrentStateFault(fault,gm_state,failed,"Requested status transition is not supported"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; // Make response // TODO: // Updating currenst job state gm_state=job.State(pending); failed=job.Failed(); convertActivityStatus(gm_state,bes_state,arex_state,failed,pending); Arc::XMLNode state = out.NewChild("a-rex:NewStatus"); state.NewAttribute("bes-factory:state")=bes_state; state.NewChild("a-rex:state")=arex_state; { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "ChangeActivityStatus: response = \n%s", s); }; return Arc::MCC_Status(Arc::STATUS_OK); } #define MAX_ACTIVITIES (10000) Arc::MCC_Status ARexService::ESPauseActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* PauseActivity estypes:ActivityID 1- PauseActivityResponse PauseActivityResponseItem 1- estypes:ActivityID . EstimatedTime 0-1 estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault estypes:VectorLimitExceededFault estypes:AccessControlFault estypes:InternalBaseFault */ Arc::XMLNode id = in["ActivityID"]; unsigned int n = 0; for(;(bool)id;++id) { if((++n) > MAX_ACTIVITIES) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many ActivityID"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; id = in["ActivityID"]; for(;(bool)id;++id) { std::string jobid = id; Arc::XMLNode item = out.NewChild("esmanag:PauseActivityResponseItem"); item.NewChild("estypes:ActivityID") = jobid; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "EMIES:PauseActivity: job %s - %s", jobid, job.Failure()); ESActivityNotFoundFault(item.NewChild("dummy"),job.Failure()); } else { // Pause not implemented logger_.msg(Arc::ERROR, "EMIES:PauseActivity: job %s - %s", jobid, "pause not implemented"); ESOperationNotPossibleFault(item.NewChild("dummy"),"pause not implemented yet"); }; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::ESResumeActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* ResumeActivity estypes:ActivityID 1- ResumeActivityResponse ResumeActivityResponseItem 1- estypes:ActivityID . EstimatedTime 0-1 estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault estypes:VectorLimitExceededFault estypes:AccessControlFault estypes:InternalBaseFault */ Arc::XMLNode id = in["ActivityID"]; unsigned int n = 0; for(;(bool)id;++id) { if((++n) > MAX_ACTIVITIES) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many ActivityID"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; id = in["ActivityID"]; for(;(bool)id;++id) { std::string jobid = id; Arc::XMLNode item = out.NewChild("esmanag:ResumeActivityResponseItem"); item.NewChild("estypes:ActivityID") = jobid; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "EMIES:ResumeActivity: job %s - %s", jobid, job.Failure()); ESActivityNotFoundFault(item.NewChild("dummy"),job.Failure()); } else { // Pause not implemented hence job can't be resumed too logger_.msg(Arc::ERROR, "EMIES:ResumeActivity: job %s - %s", jobid, "pause not implemented"); ESOperationNotAllowedFault(item.NewChild("dummy"),"pause not implemented"); }; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::ESCancelActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* CancelActivity estypes:ActivityID 1- CancelActivityResponse CancelActivityResponseItem 1- estypes:ActivityID . EstimatedTime 0-1 estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault estypes:VectorLimitExceededFault estypes:AccessControlFault estypes:InternalBaseFault */ Arc::XMLNode id = in["ActivityID"]; unsigned int n = 0; for(;(bool)id;++id) { if((++n) > MAX_ACTIVITIES) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many ActivityID"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; id = in["ActivityID"]; for(;(bool)id;++id) { std::string jobid = id; Arc::XMLNode item = out.NewChild("esmanag:CancelActivityResponseItem"); item.NewChild("estypes:ActivityID") = jobid; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "EMIES:CancelActivity: job %s - %s", jobid, job.Failure()); ESActivityNotFoundFault(item.NewChild("dummy"),job.Failure()); } else { if(!job.Cancel()) { // Probably wrong current state logger_.msg(Arc::ERROR, "EMIES:CancelActivity: job %s - %s", jobid, job.Failure()); // TODO: check for real reason ESOperationNotAllowedFault(item.NewChild("dummy"),job.Failure()); } else { // It may take "wakeup period" for cancel mark to be detected. // And same time till result of cancel script is processed. // Currently it is not possible to estimate how long canceling // would happen. logger_.msg(Arc::INFO, "job %s cancelled successfully", jobid); item.NewChild("esmanag:EstimatedTime") = Arc::tostring(config.GmConfig().WakeupPeriod()*2); }; }; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::ESWipeActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* esmanag:WipeActivity estypes:ActivityID esmanag:WipeActivityResponse esmanag:WipeActivityResponseItem estypes:ActivityID . esmanag:EstimatedTime (xsd:unsignedLong) estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault estypes:VectorLimitExceededFault estypes:AccessControlFault estypes:InternalBaseFault */ Arc::XMLNode id = in["ActivityID"]; unsigned int n = 0; for(;(bool)id;++id) { if((++n) > MAX_ACTIVITIES) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many ActivityID"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; id = in["ActivityID"]; for(;(bool)id;++id) { std::string jobid = id; Arc::XMLNode item = out.NewChild("esmanag:WipeActivityResponseItem"); item.NewChild("estypes:ActivityID") = jobid; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "EMIES:WipeActivity: job %s - %s", jobid, job.Failure()); ESActivityNotFoundFault(item.NewChild("dummy"),job.Failure()); } else { /* Despite it is against EMI-ES specs we allow job cleaning request to be accepted even if job is not in terminal state for user convenience. if((job.State() != "FINISHED") && (job.State() != "DELETED")) { logger_.msg(Arc::ERROR, "EMIES:WipeActivity: job %s - state is %s, not terminal", jobid, job.State()); ESOperationNotAllowedFault(item.NewChild("dummy"),"Not in terminal state"); } else */ if(!job.Clean()) { // Probably wrong current state logger_.msg(Arc::ERROR, "EMIES:WipeActivity: job %s - %s", jobid, job.Failure()); // TODO: check for real reason ESOperationNotAllowedFault(item.NewChild("dummy"),job.Failure()); } else { logger_.msg(Arc::INFO, "job %s (will be) cleaned successfully", jobid); item.NewChild("esmanag:EstimatedTime") = Arc::tostring(config.GmConfig().WakeupPeriod()); }; }; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::ESRestartActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* esmanag:RestartActivity estypes:ActivityID esmanag:RestartActivityResponse esmanag:RestartActivityResponseItem estypes:ActivityID . esmanag:EstimatedTime (xsd:unsignedLong) estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault estypes:VectorLimitExceededFault estypes:AccessControlFault estypes:InternalBaseFault */ Arc::XMLNode id = in["ActivityID"]; unsigned int n = 0; for(;(bool)id;++id) { if((++n) > MAX_ACTIVITIES) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many ActivityID"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; id = in["ActivityID"]; for(;(bool)id;++id) { std::string jobid = id; Arc::XMLNode item = out.NewChild("esmanag:RestartActivityResponseItem"); item.NewChild("estypes:ActivityID") = jobid; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "EMIES:RestartActivity: job %s - %s", jobid, job.Failure()); ESActivityNotFoundFault(item.NewChild("dummy"),job.Failure()); } else { if(!job.Resume()) { // Probably wrong current state logger_.msg(Arc::ERROR, "EMIES:RestartActivity: job %s - %s", jobid, job.Failure()); // TODO: check for real reason ESOperationNotAllowedFault(item.NewChild("dummy"),job.Failure()); } else { logger_.msg(Arc::INFO, "job %s restarted successfully", jobid); item.NewChild("esmanag:EstimatedTime") = Arc::tostring(config.GmConfig().WakeupPeriod()); }; }; }; return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/update_credentials.cpp0000644000000000000000000000012411712244077025173 xustar000000000000000027 mtime=1328105535.039435 27 atime=1513200576.412725 30 ctime=1513200662.693780432 nordugrid-arc-5.4.2/src/services/a-rex/update_credentials.cpp0000644000175000002070000000632711712244077025250 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::UpdateCredentials(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& credentials) { /* UpdateCredentials (deleg) DelegatedToken Reference (multiple) UpdateCredentialsResponse (deleg) NotAuthorizedFault InvalidRequestMessageFault InvalidActivityIdentifierFault */ { std::string s; in.GetXML(s); logger_.msg(Arc::VERBOSE, "UpdateCredentials: request = \n%s", s); }; // Extract job id from references Arc::XMLNode refnode = in["DelegatedToken"]["Reference"]; if(!refnode) { // Must refer to job logger_.msg(Arc::ERROR, "UpdateCredentials: missing Reference"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Must have Activity specified in Reference"); InvalidRequestMessageFault(fault,"arcdeleg:Reference","Wrong multiplicity"); out.Destroy(); return Arc::MCC_Status(); } if((bool)(refnode[1])) { // Only one job can be updated per operation (profile) logger_.msg(Arc::ERROR, "UpdateCredentials: wrong number of Reference"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can update credentials only for single Activity"); InvalidRequestMessageFault(fault,"arcdeleg:Reference","Wrong multiplicity"); out.Destroy(); return Arc::MCC_Status(); }; if(refnode.Size() != 1) { // Expecting single job EPR in Reference logger_.msg(Arc::ERROR, "UpdateCredentials: wrong number of elements inside Reference"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can update credentials only for single Activity"); InvalidRequestMessageFault(fault,"arcdeleg:Reference","Wrong content"); out.Destroy(); return Arc::MCC_Status(); } std::string jobid = Arc::WSAEndpointReference(refnode.Child()).ReferenceParameters()["a-rex:JobID"]; if(jobid.empty()) { // EPR is wrongly formated or not an A-REX EPR logger_.msg(Arc::ERROR, "UpdateCredentials: EPR contains no JobID"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobID element in ActivityIdentifier"); InvalidRequestMessageFault(fault,"arcdeleg:Reference","Wrong content"); out.Destroy(); return Arc::MCC_Status(); }; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job std::string failure = job.Failure(); logger_.msg(Arc::ERROR, "UpdateCredentials: no job found: %s",failure); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find requested Activity"); UnknownActivityIdentifierFault(fault,"No corresponding Activity found"); out.Destroy(); return Arc::MCC_Status(); }; if(!job.UpdateCredentials(credentials)) { logger_.msg(Arc::ERROR, "UpdateCredentials: failed to update credentials"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Internal error: Failed to update credentials"); out.Destroy(); return Arc::MCC_Status(); }; { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "UpdateCredentials: response = \n%s", s); }; return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/grid-manager0000644000000000000000000000013213214316026023100 xustar000000000000000030 mtime=1513200662.807781826 30 atime=1513200668.718854121 30 ctime=1513200662.807781826 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/0000755000175000002070000000000013214316026023223 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/arc-config-check.1.in0000644000000000000000000000012712123705613026735 xustar000000000000000027 mtime=1364167563.653962 30 atime=1513200650.070626046 30 ctime=1513200662.785781557 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/arc-config-check.1.in0000644000175000002070000000305712123705613027004 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARC-CONFIG-CHECK 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arc-config-check \- checks the arc.conf for inconsistencies, known problems or (in a future development) just general bad taste. .SH SYNOPSIS arc-config-check --printall .SH DESCRIPTION The motivation behind this little script was to have a repository for automated tests on issues that came up on the NorduGrid developers mailing list. As such this script indicates directories that are not present, checks host certificates, CA certificates and CRLs, validates the sanity of ARC configuration and tests for clock skew. BECAUSE EVERY INSTALLATION OF ARC IS DIFFERENT THIS UTILITY ONLY SUGGESTS WHAT COULD BE WRONG. SOMETIMES IT IS OVERRESTRICTIVE. AND SOMETIMES IT CAN MISS SOME MISCONFIGURATION. NEVER TREAT RESULTS PRODUCED BY IT AS ULTIMATE TRUTH. .SH OPTIONS .TP .B \-\-config Specifies the location of the config file, by default it is /etc/arc.conf .TP .B \-\-printall Lists all variable names of the config file together with their values. .TP .B \-\-timeserver Allows the specification of a server against which to test the local system's time. .TP .B \-\-skip-warnings Do not show warnings. .TP .B \-\-help Quick summary of options .TP .B \-\-man Detailed man page. .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/arc_vomsac_check.cpp0000644000000000000000000000012412123101751027127 xustar000000000000000027 mtime=1363969001.473645 27 atime=1513200576.240723 30 ctime=1513200662.793781655 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/arc_vomsac_check.cpp0000644000175000002070000001711412123101751027200 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #define AC_POLICY_PARAM_NAME "ac_policy" static Arc::Logger logger(Arc::Logger::rootLogger, "arc-vomsac-check"); static void usage(char *pname) { logger.msg(Arc::ERROR,"Usage: %s [-N] -P -L [-c ] [-d ]",pname); } /* ARC classes use '/VO=voname/Group=groupname/subgroupname/Role=role' notation * VOMS classes use '/voname/groupname/subgroupname/Role=role/Capability=NULL' notation * For configuration compatibility reasons normalization to common format used * either for values provided in config or retreived from proxy certificate */ std::string normalize_fqan(std::string fqan) { // first trim possible spaces and quotes std::string nfqan = Arc::trim(fqan," \""); // remove 'VO=' and 'Group=' if any std::size_t pos = nfqan.find("VO="); if(pos != std::string::npos) nfqan.erase(pos, 3); pos = nfqan.find("Group="); if(pos != std::string::npos) nfqan.erase(pos, 6); // remove NULL values pos = nfqan.find("/Role=NULL"); if(pos != std::string::npos) nfqan.erase(pos, 10); pos = nfqan.find("/Capability=NULL"); if(pos != std::string::npos) nfqan.erase(pos, 16); // return normalized fqan return nfqan; } int main(int argc, char *argv[]) { int opt; int no_ac_success = 0; const char *user_proxy_f = NULL; const char *job_local_f = NULL; const char *config_f = NULL; // log Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::ERROR); // parse options while ((opt = getopt(argc, argv, "NP:L:c:d:")) != -1) { switch (opt) { case 'N': no_ac_success = 1; break; case 'P': user_proxy_f = optarg; break; case 'L': job_local_f = optarg; break; case 'c': config_f = optarg; break; case 'd': Arc::Logger::getRootLogger().setThreshold( Arc::old_level_to_level(atoi(optarg)) ); break; default: usage(argv[0]); return EXIT_FAILURE; } } // check required if ( !user_proxy_f ) { logger.msg(Arc::ERROR,"User proxy file is required but is not specified"); usage(argv[0]); return EXIT_FAILURE; } if ( !job_local_f ) { logger.msg(Arc::ERROR,"Local job status file is required"); usage(argv[0]); return EXIT_FAILURE; } if ( !config_f ) { config_f = "/etc/arc.conf"; } // read information about the job used by the A-REX // and determine selected queue std::string queue; std::ifstream job_local; job_local.open(job_local_f, std::ios::in); if ( job_local.is_open() ) { std::string line; while ( job_local.good() ){ getline(job_local,line); if ( ! line.compare(0,6,"queue=") ) { queue = line.substr(6); logger.msg(Arc::INFO,"Making the decision for the queue %s",queue); break; } } } else { logger.msg(Arc::ERROR,"Can not read information from the local job status file"); return EXIT_FAILURE; } job_local.close(); // Parse INI configuraion file Arc::IniConfig cfg(config_f); if ( ! cfg ) { logger.msg(Arc::ERROR,"Can not parse the configuration file %s",config_f); return EXIT_FAILURE; } // get queue ac_policy // first try [queue/name] block directly, then search for 'id' or 'name' field in [queue] blocks std::string qqueue = '"' + queue + '"'; Arc::XMLNode qparams = cfg["queue/" + queue]; if ( ! (bool)qparams ) { for(Arc::XMLNode qnode = cfg["queue"];(bool)qnode;++qnode) { if ( (std::string)qnode["id"] == qqueue || (std::string)qnode["name"] == qqueue ) { qparams = qnode; break; } } } if ( ! (bool)qparams) { logger.msg(Arc::ERROR,"Can not find queue '%s' in the configuration file",queue); return EXIT_FAILURE; } // create match regexes from ac_policy provided std::vector< std::pair > access_policies; for ( Arc::XMLNode pnode = qparams[AC_POLICY_PARAM_NAME];(bool)pnode;++pnode) { std::string acp = (std::string)pnode; std::size_t pos = acp.find("VOMS:"); if ( pos != std::string::npos ) { // determine positive/negative match bool pmatch = true; if ( pos ) { char pnflag = acp[pos-1]; if ( pnflag == '-' || pnflag == '!' ) pmatch = false; } // normalize rest part of the string std::string regex = ".*" + normalize_fqan(acp.substr(pos + 5)) + ".*"; // save (regex,pmatch) pairs to access_policies vector std::pair match_regex(Arc::RegularExpression(regex),pmatch); access_policies.push_back(match_regex); } } if ( access_policies.empty() ) { logger.msg(Arc::INFO,"No access policy to check, returning success"); return EXIT_SUCCESS; } struct stat statFInfo; // CA Cert directory required to work with proxy std::string ca_dir = (std::string)cfg["common"]["x509_cert_dir"]; if (ca_dir.empty()) { ca_dir = "/etc/grid-security/certificates"; } else { ca_dir = Arc::trim(ca_dir,"\""); } if ( stat(ca_dir.c_str(),&statFInfo) ) { logger.msg(Arc::ERROR,"CA certificates directory %s does not exist", ca_dir); return EXIT_FAILURE; } // VOMS directory required to verify VOMS ACs std::string voms_dir = (std::string)cfg["common"]["x509_voms_dir"]; if (voms_dir.empty()) { voms_dir = "/etc/grid-security/vomsdir"; } else { voms_dir = Arc::trim(voms_dir,"\""); } // Or maybe not _required_ //if ( stat(voms_dir.c_str(),&statFInfo) ) { // logger.msg(Arc::ERROR,"VOMS directory %s does not exist", voms_dir); // return EXIT_FAILURE; //} // construct ARC credentials object Arc::Credential holder(user_proxy_f, "", ca_dir, ""); if (! holder.GetVerification()) { logger.msg(Arc::ERROR,"User proxy certificate is not valid"); return EXIT_FAILURE; } // get VOMS AC from proxy certificate logger.msg(Arc::DEBUG,"Getting VOMS AC for: %s", holder.GetDN()); std::vector voms_attributes; Arc::VOMSTrustList vomscert_trust_dn; vomscert_trust_dn.AddRegex(".*"); if ( ! Arc::parseVOMSAC(holder, ca_dir, "", voms_dir, vomscert_trust_dn, voms_attributes, true, true) ) { // logger.msg(Arc::WARNING,"Error parsing VOMS AC"); if ( no_ac_success ) return EXIT_SUCCESS; return EXIT_FAILURE; } // loop over access_policies for (std::vector >::iterator iP = access_policies.begin(); iP != access_policies.end(); iP++) { logger.msg(Arc::VERBOSE,"Checking a match for '%s'",(iP->first).getPattern()); // for every VOMS AC provided for (std::vector::iterator iAC = voms_attributes.begin(); iAC != voms_attributes.end(); iAC++) { // check every attribure specified to match specified policy for (int acnt = 0; acnt < iAC->attributes.size(); acnt++ ) { std::string fqan = normalize_fqan(iAC->attributes[acnt]); if ( (iP->first).match(fqan) ) { logger.msg(Arc::DEBUG,"FQAN '%s' IS a match to '%s'",fqan,(iP->first).getPattern()); // if positive match return success if ( iP->second ) return EXIT_SUCCESS; // prohibit execution on negative match logger.msg(Arc::ERROR,"Queue '%s' usage is prohibited to FQAN '%s' by the site access policy", queue, fqan); return EXIT_FAILURE; } else { logger.msg(Arc::DEBUG,"FQAN '%s' IS NOT a match to '%s'",fqan,(iP->first).getPattern()); } } } } logger.msg(Arc::ERROR,"Queue '%s' usage with provided FQANs is prohibited by the site access policy",queue); return EXIT_FAILURE; } nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/gm_jobs.cpp0000644000000000000000000000012413065020425025301 xustar000000000000000027 mtime=1490297109.452371 27 atime=1513200575.968719 30 ctime=1513200662.795781679 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/gm_jobs.cpp0000644000175000002070000004071613065020425025356 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "conf/GMConfig.h" #include "conf/StagingConfig.h" #include "files/ControlFileHandling.h" #include "jobs/CommFIFO.h" #include "jobs/JobsList.h" #include "../delegation/DelegationStore.h" using namespace ARex; /** Fill maps with shares taken from data staging states log */ static void get_data_staging_shares(const GMConfig& config, std::map& share_preparing, std::map& share_preparing_pending, std::map& share_finishing, std::map& share_finishing_pending) { // get DTR configuration StagingConfig staging_conf(config); if (!staging_conf) { std::cout<<"Could not read data staging configuration from "< data; if (!Arc::FileRead(dtr_log, data)) { std::cout<<"Can't read transfer states from "<::iterator line = data.begin(); line != data.end(); ++line) { std::vector entries; Arc::tokenize(*line, entries, " "); if (entries.size() < 4 || entries.size() > 6) continue; std::string state = entries[1]; std::string share = entries[3]; bool preparing = (share.find("-download") == share.size()-9); if (state == "TRANSFERRING") { preparing ? share_preparing[share]++ : share_finishing[share]++; } else { preparing ? share_preparing_pending[share]++ : share_finishing_pending[share]++; } } } class counters_t { public: unsigned int jobs_num[JOB_STATE_NUM]; const static unsigned int jobs_pending; unsigned int& operator[](int n) { return jobs_num[n]; }; }; const unsigned int counters_t::jobs_pending = 0; static bool match_list(const std::string& arg, std::list& args, bool erase = false) { for(std::list::const_iterator a = args.begin(); a != args.end(); ++a) { if(*a == arg) { //if(erase) args.erase(a); return true; } } return false; } /** * Print info to stdout on users' jobs */ int main(int argc, char* argv[]) { // stderr destination for error messages Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); Arc::OptionParser options(" ", istring("gm-jobs displays information on " "current jobs in the system.")); bool long_list = false; options.AddOption('l', "longlist", istring("display more information on each job"), long_list); std::string conf_file; options.AddOption('c', "conffile", istring("use specified configuration file"), istring("file"), conf_file); std::string control_dir; options.AddOption('d', "controldir", istring("read information from specified control directory"), istring("dir"), control_dir); bool show_share = false; options.AddOption('s', "showshares", istring("print summary of jobs in each transfer share"), show_share); bool notshow_jobs = false; options.AddOption('J', "notshowjobs", istring("do not print list of jobs"), notshow_jobs); bool notshow_states = false; options.AddOption('S', "notshowstates", istring("do not print number of jobs in each state"), notshow_states); bool show_service = false; options.AddOption('w', "showservice", istring("print state of the service"), show_service); std::list filter_users; options.AddOption('f', "filteruser", istring("show only jobs of user(s) with specified subject name(s)"), istring("dn"), filter_users); std::list cancel_jobs; options.AddOption('k', "killjob", istring("request to cancel job(s) with specified ID(s)"), istring("id"), cancel_jobs); std::list cancel_users; options.AddOption('K', "killuser", istring("request to cancel jobs belonging to user(s) with specified subject name(s)"), istring("dn"), cancel_users); std::list clean_jobs; options.AddOption('r', "remjob", istring("request to clean job(s) with specified ID(s)"), istring("id"), clean_jobs); std::list clean_users; options.AddOption('R', "remuser", istring("request to clean jobs belonging to user(s) with specified subject name(s)"), istring("dn"), clean_users); std::list filter_jobs; options.AddOption('j', "filterjob", istring("show only jobs with specified ID(s)"), istring("id"), filter_jobs); bool show_delegs = false; options.AddOption('e', "listdelegs", istring("print list of available delegation IDs"), show_delegs); std::list show_deleg_ids; options.AddOption('E', "showdeleg", istring("print delegation token of specified ID(s)"), istring("id"), show_deleg_ids); std::list show_deleg_jobs; options.AddOption('D', "showdelegjob", istring("print main delegation token of specified Job ID(s)"), istring("job id"), show_deleg_jobs); std::string output_file; options.AddOption('o', "output", istring("output requested elements (jobs list, delegation ids and tokens) to file"), istring("file name"), output_file); std::list params = options.Parse(argc, argv); if(show_share) { // Why? notshow_jobs=true; notshow_states=true; } GMConfig config; if (!conf_file.empty()) config.SetConfigFile(conf_file); std::cout << "Using configuration at " << config.ConfigFile() << std::endl; if(!config.Load()) exit(1); if (!control_dir.empty()) config.SetControlDir(control_dir); config.Print(); DelegationStore::DbType deleg_db_type = DelegationStore::DbBerkeley; switch(config.DelegationDBType()) { case GMConfig::deleg_db_bdb: deleg_db_type = DelegationStore::DbBerkeley; break; case GMConfig::deleg_db_sqlite: deleg_db_type = DelegationStore::DbSQLite; break; }; std::ostream* outs = &std::cout; std::ofstream outf; if(!output_file.empty()) { outf.open(output_file.c_str()); if(!outf.is_open()) { std::cerr<<"Failed to open output file '"< 0) || (clean_users.size() > 0) || (cancel_jobs.size() > 0) || (clean_jobs.size() > 0)) { std::cout << "Looking for current jobs" << std::endl; } bool service_alive = false; counters_t counters; counters_t counters_pending; for(int i=0; i cancel_jobs_list; std::list clean_jobs_list; if((!notshow_jobs) || (!notshow_states) || (cancel_users.size() > 0) || (clean_users.size() > 0) || (cancel_jobs.size() > 0) || (clean_jobs.size() > 0)) { if(filter_jobs.size() > 0) { for(std::list::iterator id = filter_jobs.begin(); id != filter_jobs.end(); ++id) { jobs.AddJob(*id); } } else { jobs.ScanAllJobs(); } for (JobsList::iterator i=jobs.begin(); i!=jobs.end(); ++i) { // Collecting information bool pending; job_state_t new_state = job_state_read_file(i->get_id(), config, pending); if (new_state == JOB_STATE_UNDEFINED) { std::cout<<"Job: "<get_id()<<" : ERROR : Unrecognizable state"<get_id(), config)); jobs_total++; counters[new_state]++; if (pending) counters_pending[new_state]++; JobLocalDescription& job_desc = *(i->GetLocalDescription(config)); if (&job_desc == NULL) { std::cout<<"Job: "<get_id()<<" : ERROR : No local information."<get_id(),cancel_jobs)) { cancel_jobs_list.push_back(&(*i)); } if(match_list(job_desc.DN,clean_users)) { clean_jobs_list.push_back(&(*i)); } if(match_list(i->get_id(),clean_jobs)) { clean_jobs_list.push_back(&(*i)); } // Printing information if((filter_users.size() > 0) && (!match_list(job_desc.DN,filter_users))) continue; if((!show_share) && (!notshow_jobs)) std::cout << "Job: "<get_id(); if(!notshow_jobs) { if (!long_list) { *outs<<" : "< share_preparing; std::map share_preparing_pending; std::map share_finishing; std::map share_finishing_pending; get_data_staging_shares(config, share_preparing, share_preparing_pending, share_finishing, share_finishing_pending); *outs<<"\n Preparing/Pending files\tTransfer share"<::iterator i = share_preparing.begin(); i != share_preparing.end(); i++) { *outs<<" "<second<<"/"<first]<<"\t\t\t"<first<::iterator i = share_preparing_pending.begin(); i != share_preparing_pending.end(); i++) { if (share_preparing[i->first] == 0) *outs<<" 0/"<first]<<"\t\t\t"<first<::iterator i = share_finishing.begin(); i != share_finishing.end(); i++) { *outs<<" "<second<<"/"<first]<<"\t\t\t"<first<::iterator i = share_finishing_pending.begin(); i != share_finishing_pending.end(); i++) { if (share_finishing[i->first] == 0) *outs<<" 0/"<first]<<"\t\t\t"<first<(i))<<": "< 0)) { ARex::DelegationStore dstore(config.DelegationDir(), deleg_db_type, false); if(dstore) { std::list > creds = dstore.ListCredIDs(); for(std::list >::iterator cred = creds.begin(); cred != creds.end(); ++cred) { if((filter_users.size() > 0) && (!match_list(cred->second,filter_users))) continue; if(show_delegs) { *outs<<"Delegation: "<first<second< lock_ids; if(dstore.GetLocks(cred->first, cred->second, lock_ids)) { for(std::list::iterator lock = lock_ids.begin(); lock != lock_ids.end(); ++lock) { *outs<<"\tJob: "<<*lock< 0) { // TODO: optimize to avoid full scanning. if(match_list(cred->first,show_deleg_ids)) { std::string tokenpath = dstore.FindCred(cred->first,cred->second); if(!tokenpath.empty()) { std::string token; if(Arc::FileRead(tokenpath,token) && (!token.empty())) { *outs<<"Delegation: "<first<<", "<second< 0) { ARex::DelegationStore dstore(config.DelegationDir(), deleg_db_type, false); for(std::list::iterator jobid = show_deleg_jobs.begin(); jobid != show_deleg_jobs.end(); ++jobid) { // Read job's local file to extract delegation id JobLocalDescription job_desc; if(job_local_read_file(*jobid,config,job_desc)) { std::string token; if(!job_desc.delegationid.empty()) { std::string tokenpath = dstore.FindCred(job_desc.delegationid,job_desc.DN); if(!tokenpath.empty()) { (void)Arc::FileRead(tokenpath,token); } } if(token.empty()) { // fall back to public only part (void)job_proxy_read_file(*jobid,config,token); job_desc.delegationid = "public"; } if(!token.empty()) { *outs<<"Job: "<<*jobid< 0) { for(std::list::iterator job = cancel_jobs_list.begin(); job != cancel_jobs_list.end(); ++job) { if(!job_cancel_mark_put(**job, config)) { std::cout<<"Job: "<<(*job)->get_id()<<" : ERROR : Failed to put cancel mark"<get_id()<<" : Cancel request put"< 0) { for(std::list::iterator job = clean_jobs_list.begin(); job != clean_jobs_list.end(); ++job) { // Do not clean job directly because it may have delegations locked. // Instead put clean mark and let A-REX do cleaning properly. if(!job_clean_mark_put(**job, config)) { std::cout<<"Job: "<<(*job)->get_id()<<" : ERROR : Failed to put clean mark"<get_id()<<" : Clean request put"< #include #include #include #include #include #include "GMConfig.h" namespace ARex { class DTRGenerator; /// Represents configuration of DTR data staging class StagingConfig { friend class DTRGenerator; public: /// Load config from [data-staging] or section of ini or xml /// configuration file. StagingConfig(const GMConfig& config); operator bool() const { return valid; }; bool operator!() const { return !valid; }; int get_max_delivery() const { return max_delivery; }; int get_max_processor() const { return max_processor; }; int get_max_emergency() const { return max_emergency; }; int get_max_prepared() const { return max_prepared; }; unsigned long long int get_min_speed() const { return min_speed; }; time_t get_min_speed_time() const { return min_speed_time; }; unsigned long long int get_min_average_speed() const { return min_average_speed; }; time_t get_max_inactivity_time() const { return max_inactivity_time; }; int get_max_retries() const { return max_retries; }; bool get_passive() const { return passive; }; bool get_secure() const { return secure; }; bool get_local_transfer() const { return local_transfer; }; bool get_httpgetpartial() const { return httpgetpartial; }; std::string get_preferred_pattern() const { return preferred_pattern; }; std::vector get_delivery_services() const { return delivery_services; }; unsigned long long int get_remote_size_limit() const { return remote_size_limit; }; std::string get_share_type() const { return share_type; }; std::map get_defined_shares() const { return defined_shares; }; bool get_use_host_cert_for_remote_delivery() const { return use_host_cert_for_remote_delivery; }; Arc::LogLevel get_log_level() const { return log_level; }; std::string get_dtr_log() const { return dtr_log; }; std::string get_dtr_central_log() const { return dtr_central_log; }; std::string get_acix_endpoint() const { return acix_endpoint; }; private: /// Max transfers in delivery int max_delivery; /// Max number of pre- and post-processor slots per state int max_processor; /// Max number of emergency slots int max_emergency; /// Number of files per share to keep prepared int max_prepared; // TODO: the next 8 members are already defined in in xml. // Need to move them to instead. For now they are only processed for // ini-style config with [data-staging] /// Minimum speed for transfer over min_speed_time seconds unsigned long long int min_speed; /// Time over which to calculate min_speed time_t min_speed_time; /// Minimum average speed for entire transfer unsigned long long int min_average_speed; /// Maximum time with no transfer activity time_t max_inactivity_time; /// Max retries for failed transfers that can be retried int max_retries; /// Whether or not to use passive transfer (off by default) bool passive; /// Whether or not to use secure transfer (off by default) bool secure; /// Whether or not to use local transfer on worker node (off by default) bool local_transfer; /// Whether to use partial HTTP GET transfers (on by default) bool httpgetpartial; /// Pattern for choosing preferred replicas std::string preferred_pattern; /// Endpoints of delivery services std::vector delivery_services; /// File size limit (in bytes) below which local transfer should be used unsigned long long int remote_size_limit; /// Criterion on which to split transfers into shares std::string share_type; /// The list of shares with defined priorities std::map defined_shares; /// Whether to use the host certificate for remote delivery bool use_host_cert_for_remote_delivery; /// Log level for DTR transfer log in job.id.errors file Arc::LogLevel log_level; /// where to log DTR state information std::string dtr_log; /// Log for performance metrics Arc::JobPerfLog perf_log; /// Central log file for all DTR messages std::string dtr_central_log; /// ACIX endpoint from which to find locations of cached files std::string acix_endpoint; /// Validity of configuration bool valid; /// Logger object static Arc::Logger logger; /// Read in params from XML config bool readStagingConf(const Arc::XMLNode& cfg); /// Read in params from ini config bool readStagingConf(Arc::ConfigFile& cfile); /// Convert parameter to integer with mimimum value of -1 bool paramToInt(const std::string& param, int& value); StagingConfig(); }; } // namespace ARex #endif /* GM_CONF_STAGING_H_ */ nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732026152 xustar000000000000000030 mtime=1513200602.007038209 30 atime=1513200649.937624419 30 ctime=1513200662.901782976 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/Makefile.in0000644000175000002070000006540413214315732026231 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/conf DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libconf_la_LIBADD = am_libconf_la_OBJECTS = libconf_la-GMConfig.lo \ libconf_la-CoreConfig.lo libconf_la-UrlMapConfig.lo \ libconf_la-CacheConfig.lo libconf_la-StagingConfig.lo libconf_la_OBJECTS = $(am_libconf_la_OBJECTS) libconf_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libconf_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libconf_la_SOURCES) DIST_SOURCES = $(libconf_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libconf.la libconf_la_SOURCES = \ GMConfig.h GMConfig.cpp \ CoreConfig.cpp CoreConfig.h \ UrlMapConfig.cpp UrlMapConfig.h \ CacheConfig.cpp CacheConfig.h \ StagingConfig.cpp StagingConfig.h libconf_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/conf/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/conf/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libconf.la: $(libconf_la_OBJECTS) $(libconf_la_DEPENDENCIES) $(libconf_la_LINK) $(libconf_la_OBJECTS) $(libconf_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-CacheConfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-CoreConfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-GMConfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-StagingConfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-UrlMapConfig.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libconf_la-GMConfig.lo: GMConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-GMConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-GMConfig.Tpo -c -o libconf_la-GMConfig.lo `test -f 'GMConfig.cpp' || echo '$(srcdir)/'`GMConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-GMConfig.Tpo $(DEPDIR)/libconf_la-GMConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GMConfig.cpp' object='libconf_la-GMConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-GMConfig.lo `test -f 'GMConfig.cpp' || echo '$(srcdir)/'`GMConfig.cpp libconf_la-CoreConfig.lo: CoreConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-CoreConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-CoreConfig.Tpo -c -o libconf_la-CoreConfig.lo `test -f 'CoreConfig.cpp' || echo '$(srcdir)/'`CoreConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-CoreConfig.Tpo $(DEPDIR)/libconf_la-CoreConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CoreConfig.cpp' object='libconf_la-CoreConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-CoreConfig.lo `test -f 'CoreConfig.cpp' || echo '$(srcdir)/'`CoreConfig.cpp libconf_la-UrlMapConfig.lo: UrlMapConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-UrlMapConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-UrlMapConfig.Tpo -c -o libconf_la-UrlMapConfig.lo `test -f 'UrlMapConfig.cpp' || echo '$(srcdir)/'`UrlMapConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-UrlMapConfig.Tpo $(DEPDIR)/libconf_la-UrlMapConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UrlMapConfig.cpp' object='libconf_la-UrlMapConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-UrlMapConfig.lo `test -f 'UrlMapConfig.cpp' || echo '$(srcdir)/'`UrlMapConfig.cpp libconf_la-CacheConfig.lo: CacheConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-CacheConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-CacheConfig.Tpo -c -o libconf_la-CacheConfig.lo `test -f 'CacheConfig.cpp' || echo '$(srcdir)/'`CacheConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-CacheConfig.Tpo $(DEPDIR)/libconf_la-CacheConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CacheConfig.cpp' object='libconf_la-CacheConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-CacheConfig.lo `test -f 'CacheConfig.cpp' || echo '$(srcdir)/'`CacheConfig.cpp libconf_la-StagingConfig.lo: StagingConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-StagingConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-StagingConfig.Tpo -c -o libconf_la-StagingConfig.lo `test -f 'StagingConfig.cpp' || echo '$(srcdir)/'`StagingConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-StagingConfig.Tpo $(DEPDIR)/libconf_la-StagingConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='StagingConfig.cpp' object='libconf_la-StagingConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-StagingConfig.lo `test -f 'StagingConfig.cpp' || echo '$(srcdir)/'`StagingConfig.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/CacheConfig.cpp0000644000000000000000000000012412771224333026746 xustar000000000000000027 mtime=1474635995.909107 27 atime=1513200576.106721 30 ctime=1513200662.908783061 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/CacheConfig.cpp0000644000175000002070000003515212771224333027021 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "GMConfig.h" #include "CacheConfig.h" namespace ARex { CacheConfig::CacheConfig(const GMConfig& config): _cache_max(100), _cache_min(100), _log_file("/var/log/arc/cache-clean.log"), _log_level("INFO") , _lifetime("0"), _cache_shared(false), _clean_timeout(0) { // Load conf file Arc::ConfigFile cfile; if(!cfile.open(config.ConfigFile())) throw CacheConfigException("Can't open configuration file"); /* detect type of file */ switch(cfile.detect()) { case Arc::ConfigFile::file_XML: { Arc::XMLNode cfg; if(!cfg.ReadFromStream(cfile)) { cfile.close(); throw CacheConfigException("Can't interpret configuration file as XML"); }; cfile.close(); try { parseXMLConf(cfg); } catch (CacheConfigException& e) { cfile.close(); throw; } }; break; case Arc::ConfigFile::file_INI: { Arc::ConfigIni cf(cfile); try { parseINIConf(cf); } catch (CacheConfigException& e) { cfile.close(); throw; } }; break; default: { cfile.close(); throw CacheConfigException("Can't recognize type of configuration file"); }; break; }; cfile.close(); } CacheConfig::CacheConfig(const Arc::XMLNode& cfg): _cache_max(100), _cache_min(100), _log_file("/var/log/arc/cache-clean.log"), _log_level("INFO") , _lifetime("0"), _cache_shared(false), _clean_timeout(0) { parseXMLConf(cfg); } void CacheConfig::parseINIConf(Arc::ConfigIni& cf) { cf.AddSection("common"); cf.AddSection("grid-manager"); for(;;) { std::string rest; std::string command; cf.ReadNext(command,rest); if(command.length() == 0) break; // EOF else if(command == "remotecachedir") { std::string cache_dir = Arc::ConfigIni::NextArg(rest); if(cache_dir.length() == 0) continue; // cache is disabled std::string cache_link_dir = Arc::ConfigIni::NextArg(rest); // take off leading slashes if (cache_dir.rfind("/") == cache_dir.length()-1) cache_dir = cache_dir.substr(0, cache_dir.length()-1); // add this cache to our list std::string cache = cache_dir; // check if the cache dir needs to be drained bool isDrainingCache = false; if (cache_link_dir == "drain") { cache = cache_dir.substr(0, cache_dir.find(" ")); cache_link_dir = ""; isDrainingCache = true; } if (!cache_link_dir.empty()) cache += " "+cache_link_dir; if(isDrainingCache) _draining_cache_dirs.push_back(cache); else _remote_cache_dirs.push_back(cache); } else if(command == "cachedir") { std::string cache_dir = Arc::ConfigIni::NextArg(rest); if(cache_dir.length() == 0) continue; // cache is disabled std::string cache_link_dir = Arc::ConfigIni::NextArg(rest); // validation of paths while (cache_dir.length() > 1 && cache_dir.rfind("/") == cache_dir.length()-1) cache_dir = cache_dir.substr(0, cache_dir.length()-1); if (cache_dir[0] != '/') throw CacheConfigException("Cache path must start with '/'"); if (cache_dir.find("..") != std::string::npos) throw CacheConfigException("Cache path cannot contain '..'"); if (!cache_link_dir.empty() && cache_link_dir != "." && cache_link_dir != "drain") { while (cache_link_dir.rfind("/") == cache_link_dir.length()-1) cache_link_dir = cache_link_dir.substr(0, cache_link_dir.length()-1); if (cache_link_dir[0] != '/') throw CacheConfigException("Cache link path must start with '/'"); if (cache_link_dir.find("..") != std::string::npos) throw CacheConfigException("Cache link path cannot contain '..'"); } // add this cache to our list std::string cache = cache_dir; bool isDrainingCache = false; // check if the cache dir needs to be drained if (cache_link_dir == "drain") { cache = cache_dir.substr(0, cache_dir.find(' ')); cache_link_dir = ""; isDrainingCache = true; } if (!cache_link_dir.empty()) cache += " "+cache_link_dir; if (isDrainingCache) _draining_cache_dirs.push_back(cache); else _cache_dirs.push_back(cache); } else if(command == "cachesize") { std::string max_s = Arc::ConfigIni::NextArg(rest); if(max_s.length() == 0) continue; std::string min_s = Arc::ConfigIni::NextArg(rest); if(min_s.length() == 0) throw CacheConfigException("Not enough parameters in cachesize parameter"); off_t max_i; if(!Arc::stringto(max_s,max_i)) throw CacheConfigException("bad number in cachesize parameter"); if (max_i > 100 || max_i < 0) throw CacheConfigException("max cache size must be between 0 and 100"); _cache_max = max_i; off_t min_i; if(!Arc::stringto(min_s,min_i)) throw CacheConfigException("bad number in cachesize parameter"); if (min_i > 100 || min_i < 0) throw CacheConfigException("min cache size must be between 0 and 100"); if (min_i >= max_i) throw CacheConfigException("max cache size must be greater than min size"); _cache_min = min_i; } else if(command == "cachelogfile") { std::string logfile = Arc::ConfigIni::NextArg(rest); if (logfile.length() < 2 || logfile[0] != '/' || logfile[logfile.length()-1] == '/') throw CacheConfigException("Bad filename in cachelogfile parameter"); _log_file = logfile; } else if(command == "cacheloglevel") { std::string log_level = Arc::ConfigIni::NextArg(rest); if(log_level.length() == 0) throw CacheConfigException("No value specified in cacheloglevel"); off_t level_i; if(!Arc::stringto(log_level, level_i)) throw CacheConfigException("bad number in cacheloglevel parameter"); // manual conversion from int to log level switch (level_i) { case 0: { _log_level = "FATAL"; }; break; case 1: { _log_level = "ERROR"; }; break; case 2: { _log_level = "WARNING"; }; break; case 3: { _log_level = "INFO"; }; break; case 4: { _log_level = "VERBOSE"; }; break; case 5: { _log_level = "DEBUG"; }; break; default: { _log_level = "INFO"; }; break; } } else if(command == "cachelifetime") { std::string lifetime = Arc::ConfigIni::NextArg(rest); if(lifetime.length() != 0) { _lifetime = lifetime; } } else if(command == "cacheshared") { std::string cache_shared = Arc::ConfigIni::NextArg(rest); if (cache_shared == "yes") { _cache_shared = true; } else if (cache_shared != "no") { throw CacheConfigException("Bad value in cacheshared parameter"); } } else if (command == "cachespacetool") { _cache_space_tool = rest; } else if (command == "cachecleantimeout") { std::string timeout = Arc::ConfigIni::NextArg(rest); if(timeout.length() == 0) continue; if(!Arc::stringto(timeout, _clean_timeout)) throw CacheConfigException("bad number in cachecleantimeout parameter"); } else if (command == "cacheaccess") { Arc::RegularExpression regexp(Arc::ConfigIni::NextArg(rest)); if (!regexp.isOk()) throw CacheConfigException("Bad regexp " + regexp.getPattern() + " in cacheaccess"); std::string cred_type(Arc::ConfigIni::NextArg(rest)); if (cred_type.empty()) throw CacheConfigException("Missing credential type in cacheaccess"); Arc::RegularExpression cred_value(rest); if (!cred_value.isOk()) throw CacheConfigException("Missing credential value in cacheaccess"); struct CacheAccess ca; ca.regexp = regexp; ca.cred_type = cred_type; ca.cred_value = cred_value; _cache_access.push_back(ca); } } } void CacheConfig::parseXMLConf(const Arc::XMLNode& cfg) { /* control controlDir sessionRootDir cache location path link remotelocation path link highWatermark lowWatermark cacheLifetime cacheLogFile cacheLogLevel cacheCleanTimeout cacheShared cacheSpaceTool defaultTTL defaultTTR maxReruns noRootPower */ Arc::XMLNode control_node = cfg; if(control_node.Name() != "control") { control_node = cfg["control"]; } if (!control_node) throw CacheConfigException("No control element found in configuration"); Arc::XMLNode cache_node = control_node["cache"]; if (!cache_node) return; // no cache configured Arc::XMLNode location_node = cache_node["location"]; for(;location_node;++location_node) { std::string cache_dir = location_node["path"]; std::string cache_link_dir = location_node["link"]; if(cache_dir.length() == 0) throw CacheConfigException("Missing path in cache location element"); // validation of paths while (cache_dir.length() > 1 && cache_dir.rfind("/") == cache_dir.length()-1) cache_dir = cache_dir.substr(0, cache_dir.length()-1); if (cache_dir[0] != '/') throw CacheConfigException("Cache path must start with '/'"); if (cache_dir.find("..") != std::string::npos) throw CacheConfigException("Cache path cannot contain '..'"); if (!cache_link_dir.empty() && cache_link_dir != "." && cache_link_dir != "drain") { while (cache_link_dir.rfind("/") == cache_link_dir.length()-1) cache_link_dir = cache_link_dir.substr(0, cache_link_dir.length()-1); if (cache_link_dir[0] != '/') throw CacheConfigException("Cache link path must start with '/'"); if (cache_link_dir.find("..") != std::string::npos) throw CacheConfigException("Cache link path cannot contain '..'"); } // add this cache to our list std::string cache = cache_dir; bool isDrainingCache = false; // check if the cache dir needs to be drained if (cache_link_dir == "drain") { cache = cache_dir.substr(0, cache_dir.find (" ")); cache_link_dir = ""; isDrainingCache = true; } if (!cache_link_dir.empty()) cache += " "+cache_link_dir; // TODO: handle paths with spaces if(isDrainingCache) _draining_cache_dirs.push_back(cache); else _cache_dirs.push_back(cache); } Arc::XMLNode high_node = cache_node["highWatermark"]; Arc::XMLNode low_node = cache_node["lowWatermark"]; if (high_node && !low_node) { throw CacheConfigException("missing lowWatermark parameter"); } else if (low_node && !high_node) { throw CacheConfigException("missing highWatermark parameter"); } else if (low_node && high_node) { off_t max_i; if(!Arc::stringto((std::string)high_node,max_i)) throw CacheConfigException("bad number in highWatermark parameter"); if (max_i > 100) throw CacheConfigException("number is too high in highWatermark parameter"); _cache_max = max_i; off_t min_i; if(!Arc::stringto((std::string)low_node,min_i)) throw CacheConfigException("bad number in lowWatermark parameter"); if (min_i > 100) throw CacheConfigException("number is too high in lowWatermark parameter"); if (min_i >= max_i) throw CacheConfigException("highWatermark must be greater than lowWatermark"); _cache_min = min_i; } std::string cache_log_file = cache_node["cacheLogFile"]; if (!cache_log_file.empty()) { if (cache_log_file.length() < 2 || cache_log_file[0] != '/' || cache_log_file[cache_log_file.length()-1] == '/') throw CacheConfigException("Bad filename in cachelogfile parameter"); _log_file = cache_log_file; } std::string cache_log_level = cache_node["cacheLogLevel"]; if (!cache_log_level.empty()) _log_level = cache_log_level; std::string cache_lifetime = cache_node["cacheLifetime"]; if (!cache_lifetime.empty()) _lifetime = cache_lifetime; std::string cache_shared = cache_node["cacheShared"]; if (cache_shared == "yes") { _cache_shared = true; } std::string cache_space_tool = cache_node["cacheSpaceTool"]; if (!cache_space_tool.empty()) { _cache_space_tool = cache_space_tool; } std::string clean_timeout = cache_node["cacheCleanTimeout"]; if (!clean_timeout.empty()) { if(!Arc::stringto(clean_timeout, _clean_timeout)) throw CacheConfigException("bad number in cacheCleanTimeout parameter"); } Arc::XMLNode remote_location_node = cache_node["remotelocation"]; for(;remote_location_node;++remote_location_node) { std::string cache_dir = remote_location_node["path"]; std::string cache_link_dir = remote_location_node["link"]; if(cache_dir.length() == 0) throw CacheConfigException("Missing path in remote cache location element"); // validation of paths while (cache_dir.length() > 1 && cache_dir.rfind("/") == cache_dir.length()-1) cache_dir = cache_dir.substr(0, cache_dir.length()-1); if (cache_dir[0] != '/') throw CacheConfigException("Remote cache path must start with '/'"); if (cache_dir.find("..") != std::string::npos) throw CacheConfigException("Remote cache path cannot contain '..'"); if (!cache_link_dir.empty() && cache_link_dir != "." && cache_link_dir != "drain" && cache_link_dir != "replicate") { while (cache_link_dir.rfind("/") == cache_link_dir.length()-1) cache_link_dir = cache_link_dir.substr(0, cache_link_dir.length()-1); if (cache_link_dir[0] != '/') throw CacheConfigException("Remote cache link path must start with '/'"); if (cache_link_dir.find("..") != std::string::npos) throw CacheConfigException("Remote cache link path cannot contain '..'"); } // add this cache to our list std::string cache = cache_dir; bool isDrainingCache = false; // check if the cache dir needs to be drained if (cache_link_dir == "drain") { cache = cache_dir.substr(0, cache_dir.find (" ")); cache_link_dir = ""; isDrainingCache = true; } if (!cache_link_dir.empty()) cache += " "+cache_link_dir; // TODO: handle paths with spaces if(isDrainingCache) _draining_cache_dirs.push_back(cache); else _remote_cache_dirs.push_back(cache); } } void CacheConfig::substitute(const GMConfig& config, const Arc::User& user) { for (std::vector::iterator i = _cache_dirs.begin(); i != _cache_dirs.end(); ++i) { config.Substitute(*i, user); } for (std::vector::iterator i = _remote_cache_dirs.begin(); i != _remote_cache_dirs.end(); ++i) { config.Substitute(*i, user); } for (std::vector::iterator i = _draining_cache_dirs.begin(); i != _draining_cache_dirs.end(); ++i) { config.Substitute(*i, user); } } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/GMConfig.h0000644000000000000000000000012413065016642025712 xustar000000000000000027 mtime=1490296226.623926 27 atime=1513200576.114721 30 ctime=1513200662.902782988 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/GMConfig.h0000644000175000002070000003322013065016642025757 0ustar00mockbuildmock00000000000000#ifndef GMCONFIG_H_ #define GMCONFIG_H_ #include #include #include #include #include #include #include "CacheConfig.h" namespace ARex { // Forward declarations for classes for which this is just a container class JobLog; class JobsMetrics; class ContinuationPlugins; class RunPlugin; class DelegationStores; /// Configuration information related to the grid manager part of A-REX. /** * This class contains all configuration variables related to grid-manager. It * also acts as a container for objects which are used in different parts of * A-REX. Therefore since this class contains pointers to complex objects, it * cannot be copied and hence the copy constructor and assignment operator are * private. Those pointers should be managed outside this class. GMConfig * should be instantiated once when the grid-manager is initialised and only * destroyed with the GM has finished. Ideally this would be a singleton but * that would prevent running multiple A-REXes in the same container. * * Substitutions are not done while parsing the configuration, as * substitution variables can change depending on the job. Therefore paths * are stored in their raw format, unsubstituted. The exception is the * control directory which cannot change and is substituted during parsing, * and helper options. Substitution of other variables should be done as * necessary using Substitute(). */ class GMConfig { // Configuration parser which sets values for members of this class friend class CoreConfig; // Parser of data-staging configuration which uses this class' values as default friend class StagingConfig; public: /// Different options for fixing directories enum fixdir_t { fixdir_always, fixdir_missing, fixdir_never }; enum deleg_db_t { deleg_db_bdb, deleg_db_sqlite }; /// Use given (or guessed if not given) configuration file. /** * Guessing uses $ARC_CONFIG, $ARC_LOCATION/etc/arc.conf or the default * location /etc/arc.conf. Load() should then be used to parse the * configuration and fill member variables. * @param conffile Path to configuration file, will be guessed if empty */ GMConfig(const std::string& conffile=""); /// Use the given XMLNode as a source of information instead of a file. GMConfig(const Arc::XMLNode& node); /// Load configuration from file or XML node into members of this object. /// Returns false if errors are found during parsing. bool Load(); /// Print a summary of configuration to stderr void Print() const; /// Get path to configuration file const std::string& ConfigFile() const { return conffile; } /// Set path to configuration file void SetConfigFile(const std::string& file) { conffile = file; } /// Returns true if configuration file is temporary bool ConfigIsTemp() const { return conffile_is_temp; } /// Sets whether configuration file is temporary void SetConfigIsTemp(bool temp) { conffile_is_temp = temp; } /// Set XML node with configuration (corresponding to ) void SetXMLNode(const Arc::XMLNode& node) { xml_cfg = node; } /// Create control structure with permissions depending on fixdir_t value. /// Typically called at A-REX service creation. bool CreateControlDirectory() const; /// Create session directory with correct permissions. Typically called when /// a new job is created and after all substitutions have been done. Creates /// session root if it does not already exist. bool CreateSessionDirectory(const std::string& dir, const Arc::User& user) const; /// Start/restart all helper processes bool RunHelpers(); /// Substitute characters in param specified by % with real values. An /// optional User can be specified for the user-related substitutions. bool Substitute(std::string& param, const Arc::User& user=Arc::User()) const; /// Send signals to helpers to shut them down cleanly void PrepareToDestroy(); /// Set control directory void SetControlDir(const std::string &dir); /// Set session root dir void SetSessionRoot(const std::string &dir); /// Set multiple session root dirs void SetSessionRoot(const std::vector &dirs); /// Set uid and gids used by other process sharing information with A-REX void SetShareID(const Arc::User& share_user); /// Set default queue void SetDefaultQueue(const std::string& queue) { default_queue = queue; } /// Certificates directory location const std::string& CertDir() const { return cert_dir; } /// VOMS lsc files root directory location const std::string& VomsDir() const { return voms_dir; } /// Location of RTE setup scripts const std::string& RTEDir() const { return rte_dir; } /// Directory storing delegations std::string DelegationDir() const; /// Database type to use for delegation storage deleg_db_t DelegationDBType() const; /// Helper(s) log file path const std::string& HelperLog() const { return helper_log; } /// email address of person responsible for this ARC installation const std::string& SupportMailAddress() const { return support_email_address; } /// Set JobLog object void SetJobLog(JobLog* log) { job_log = log; } /// Set JobPerfLog object void SetJobPerfLog(Arc::JobPerfLog* log) { job_perf_log = log; } /// Set JobsMetrics object void SetJobsMetrics(JobsMetrics* metrics) { jobs_metrics = metrics; } /// Set ContinuationPlugins (plugins run at state transitions) void SetContPlugins(ContinuationPlugins* plugins) { cont_plugins = plugins; } /// Set RunPlugin (plugin used to acquire local credentials) void SetCredPlugin(RunPlugin* plugin) { cred_plugin = plugin; } /// Set DelegationStores object void SetDelegations(ARex::DelegationStores* stores) { delegations = stores; } /// JobLog object JobLog* GetJobLog() const { return job_log; } /// JobsMetrics object JobsMetrics* GetJobsMetrics() const { return jobs_metrics; } /// JobPerfLog object Arc::JobPerfLog* GetJobPerfLog() const { return job_perf_log; } /// Plugins run at state transitions ContinuationPlugins* GetContPlugins() const { return cont_plugins; } /// Plugin used to acquire local credentials RunPlugin* CredPlugin() const { return cred_plugin; } /// DelegationsStores object ARex::DelegationStores* GetDelegations() const { return delegations; } /// Control directory const std::string & ControlDir() const { return control_dir; } /// Session root directory corresponding to given job ID. If the session /// dir corresponding to job_id is not found an empty string is returned. std::string SessionRoot(const std::string& job_id) const; /// Session directories const std::vector & SessionRoots() const { return session_roots; } /// Session directories that can be used for new jobs const std::vector & SessionRootsNonDraining() const { return session_roots_non_draining; } /// Base scratch directory for job execution on node const std::string & ScratchDir() const { return scratch_dir; } /// Whether access to session dir must be performed under mapped uid bool StrictSession() const { return strict_session; } /// Cache configuration const CacheConfig & CacheParams() const { return cache_params; } /// URL of cluster's headnode const std::string & HeadNode() const { return headnode; } /// Whether ARC (BES) WS-interface is enabled bool ARCInterfaceEnabled() const { return enable_arc_interface; } /// Whether EMI-ES interface is enabled bool EMIESInterfaceEnabled() const { return enable_emies_interface; } /// GridFTP job interface endpoint const std::string & GridFTPEndpoint() const { return gridftp_endpoint; } /// A-REX WS-interface job submission endpoint const std::string & AREXEndpoint() const { return arex_endpoint; } /// Default LRMS const std::string & DefaultLRMS() const { return default_lrms; } /// Default queue const std::string & DefaultQueue() const { return default_queue; } /// All configured queues const std::list& Queues() const { return queues; } /// Username of user running A-REX const std::string & UnixName() const { return gm_user.Name(); } /// Groups allowed to submit when general job submission is disabled const std::string & AllowSubmit() const { return allow_submit; } /// Length of time to keep session dir after job finishes time_t KeepFinished() const { return keep_finished; } /// Length of time to keep control information after job finishes time_t KeepDeleted() const { return keep_deleted; } /// Maximum number of job re-runs allowed int Reruns() const { return reruns; } /// Strategy for fixing directories fixdir_t FixDirectories() const { return fixdir; } /// Maxmimum time for A-REX to wait between job processing loops unsigned int WakeupPeriod() const { return wakeup_period; } /// Max jobs being processed (from PREPARING to FINISHING) int MaxJobs() const { return max_jobs; }; /// Max jobs in the LRMS int MaxRunning() const { return max_jobs_running; } /// Max jobs being processed per-DN int MaxPerDN() const { return max_jobs_per_dn; } /// Max total jobs in the system int MaxTotal() const { return max_jobs_total; } /// Max submit/cancel scripts int MaxScripts() const { return max_scripts; } /// Returns true if the shared uid matches the given uid bool MatchShareUid(uid_t suid) const { return ((share_uid==0) || (share_uid==suid)); }; /// Returns true if any of the shared gids matches the given gid bool MatchShareGid(gid_t sgid) const; /// Returns forced VOMS attributes for users which have none const std::string & ForcedVOMS(const char * queue = "") const; /// Returns liat of authorized VOs for specified queue const std::list & AuthorizedVOs(const char * queue) const; private: /// Class to run external processes (helper) class ExternalHelper { private: /// Command being run std::string command; /// Object representing running process Arc::Run *proc; public: ExternalHelper(const std::string &cmd); ~ExternalHelper(); /// Start process if it is not running yet bool run(const GMConfig& config); /// Stop process if it is running void stop(); }; /// Configuration file std::string conffile; /// Whether configuration file is temporary bool conffile_is_temp; /// Configuration passed as an XMLNode Arc::XMLNode xml_cfg; /// For logging job information to external logging service JobLog* job_log; /// For reporting jobs metric to ganglia JobsMetrics* jobs_metrics; /// For logging performace/profiling information Arc::JobPerfLog* job_perf_log; /// Plugins run at certain state changes ContinuationPlugins* cont_plugins; /// Plugin for acquiring local credentials RunPlugin* cred_plugin; /// Delegated credentials stored by A-REX // TODO: this should go away after proper locking in DelegationStore is implemented ARex::DelegationStores* delegations; /// Certificates directory std::string cert_dir; /// VOMS LSC files directory std::string voms_dir; /// RTE directory std::string rte_dir; /// email address for support std::string support_email_address; /// helper(s) log path std::string helper_log; /// Scratch directory std::string scratch_dir; /// Directory where files explaining jobs are stored std::string control_dir; /// Directories where directories used to run jobs are created std::vector session_roots; /// Session directories allowed for new jobs (i.e. not draining) std::vector session_roots_non_draining; /// Cache information CacheConfig cache_params; /// URL of the cluster's headnode std::string headnode; /// Default LRMS and queue to use std::string default_lrms; std::string default_queue; /// All configured queues std::list queues; /// User running A-REX Arc::User gm_user; /// uid and gid(s) running other ARC processes that share files with A-REX uid_t share_uid; std::list share_gids; /// How long jobs are kept after job finished time_t keep_finished; time_t keep_deleted; /// Whether session must always be accessed under mapped user's uid bool strict_session; /// Strategy for fixing directories fixdir_t fixdir; /// Maximal value of times job is allowed to be rerun int reruns; /// Maximum time for A-REX to wait between each loop processing jobs unsigned int wakeup_period; /// Groups allowed to submit while job submission is disabled std::string allow_submit; /// List of associated external processes std::list helpers; /// Maximum number of jobs running (between PREPARING and FINISHING) int max_jobs_running; /// Maximum total jobs in the system, including FINISHED and DELETED int max_jobs_total; /// Maximum jobs in the LRMS int max_jobs; /// Maximum jobs running per DN int max_jobs_per_dn; /// Maximum submit/cancel scripts running int max_scripts; /// Whether WS-interface is enabled bool enable_arc_interface; /// Whether EMI-ES interface is enabled bool enable_emies_interface; /// GridFTP job endpoint std::string gridftp_endpoint; /// WS-interface endpoint std::string arex_endpoint; /// Delegation db type deleg_db_t deleg_db; /// Forced VOMS attribute for non-VOMS credentials per queue std::map forced_voms; /// VOs authorized per queue std::map > authorized_vos; /// Logger object static Arc::Logger logger; /// Set defaults for all configuration parameters. Called by constructors. void SetDefaults(); /// Assignment operator and copy constructor are private to prevent copying. GMConfig& operator=(const GMConfig& conf); GMConfig(const GMConfig& conf); }; } // namespace ARex #endif /* GMCONFIG_H_ */ nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/CacheConfig.h0000644000000000000000000000012412771224333026413 xustar000000000000000027 mtime=1474635995.909107 27 atime=1513200576.111721 30 ctime=1513200662.909783073 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/CacheConfig.h0000644000175000002070000000713712771224333026470 0ustar00mockbuildmock00000000000000#ifndef __GM_CONFIG_CACHE_H__ #define __GM_CONFIG_CACHE_H__ #include #include #include #include namespace ARex { class GMConfig; /** * Exception thrown by constructor caused by bad cache params in conf file */ class CacheConfigException : public std::exception { private: std::string _desc; public: CacheConfigException(std::string desc = ""): _desc(desc) {}; ~CacheConfigException() throw() {}; virtual const char* what() const throw() {return _desc.c_str();}; }; /** * Reads conf file and provides methods to obtain cache info from it. * Methods of this class may throw CacheConfigException. */ class CacheConfig { public: /// A struct defining a URL pattern and credentials which can access it struct CacheAccess { Arc::RegularExpression regexp; std::string cred_type; Arc::RegularExpression cred_value; }; private: /** * List of (cache dir [link dir]) */ std::vector _cache_dirs; /** * List of (cache dir [link dir]) for remote caches */ std::vector _remote_cache_dirs; int _cache_max; int _cache_min; /** * Cache directories that are needed to be drained **/ std::vector _draining_cache_dirs; /** * Logfile for cache cleaning messages */ std::string _log_file; /** * cache-clean log level */ std::string _log_level; /** * Lifetime of files in cache */ std::string _lifetime; /** * Whether the cache is shared with other data on the file system */ bool _cache_shared; /** * User-specified tool for getting space information for cleaning tool */ std::string _cache_space_tool; /** * Timeout for cleaning process */ int _clean_timeout; /** * List of CacheAccess structs describing who can access what URLs in cache */ std::list _cache_access; /** * Parsers for the two different conf styles */ void parseINIConf(Arc::ConfigIni& cf); void parseXMLConf(const Arc::XMLNode& cfg); public: /** * Create a new CacheConfig instance. Read the config file and fill in * private member variables with cache parameters. */ CacheConfig(const GMConfig& config); /** * Create a new CacheConfig instance. Read the XML config tree and fill in * private member variables with cache parameters. */ CacheConfig(const Arc::XMLNode& cfg); /** * Empty CacheConfig */ CacheConfig(): _cache_max(0), _cache_min(0), _cache_shared(false), _clean_timeout(0) {}; std::vector getCacheDirs() const { return _cache_dirs; }; std::vector getRemoteCacheDirs() const { return _remote_cache_dirs; }; std::vector getDrainingCacheDirs() const { return _draining_cache_dirs; }; /// Substitute all cache paths, with information given in user if necessary void substitute(const GMConfig& config, const Arc::User& user); int getCacheMax() const { return _cache_max; }; int getCacheMin() const { return _cache_min; }; bool cleanCache() const { return (_cache_max > 0 && _cache_max < 100); }; std::string getLogFile() const { return _log_file; }; std::string getLogLevel() const { return _log_level; }; std::string getLifeTime() const { return _lifetime; }; bool getCacheShared() const { return _cache_shared; }; std::string getCacheSpaceTool() const { return _cache_space_tool; }; int getCleanTimeout() const { return _clean_timeout; }; const std::list& getCacheAccess() const { return _cache_access; }; }; } // namespace ARex #endif /*__GM_CONFIG_CACHE_H__*/ nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/StagingConfig.cpp0000644000000000000000000000012413131637327027341 xustar000000000000000027 mtime=1499938519.516258 27 atime=1513200576.115721 30 ctime=1513200662.910783086 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/StagingConfig.cpp0000644000175000002070000002645213131637327027417 0ustar00mockbuildmock00000000000000#include #include #include #include "StagingConfig.h" namespace ARex { Arc::Logger StagingConfig::logger(Arc::Logger::getRootLogger(), "StagingConfig"); static bool elementtoboollogged(Arc::XMLNode pnode,const char* ename,bool& val,Arc::Logger& logger) { if(Arc::Config::elementtobool(pnode, ename, val)) return true; logger.msg(Arc::ERROR,"wrong boolean in %s",ename); return false; } template static bool elementtointlogged(Arc::XMLNode pnode,const char* ename,T& val,Arc::Logger& logger) { if(Arc::Config::elementtoint(pnode, ename, val)) return true; logger.msg(Arc::ERROR,"wrong number in %s",ename); return false; } StagingConfig::StagingConfig(const GMConfig& config): max_delivery(10), max_processor(10), max_emergency(1), max_prepared(200), min_speed(0), min_speed_time(300), min_average_speed(0), max_inactivity_time(300), max_retries(10), passive(false), secure(false), local_transfer(false), httpgetpartial(true), remote_size_limit(0), use_host_cert_for_remote_delivery(false), log_level(Arc::Logger::getRootLogger().getThreshold()), valid(true) { perf_log.SetOutput("/var/log/arc/perfdata/data.perflog"); // For ini-style, use [data-staging] section, for xml use node Arc::ConfigFile cfile; if (!cfile.open(config.ConfigFile())) { logger.msg(Arc::ERROR, "Can't read configuration file"); valid = false; return; } // detect type of file switch(cfile.detect()) { case Arc::ConfigFile::file_XML: { Arc::XMLNode cfg; if (!cfg.ReadFromStream(cfile)) { logger.msg(Arc::ERROR, "Can't interpret configuration file as XML"); valid = false; } else if (!readStagingConf(cfg)) { logger.msg(Arc::ERROR, "Configuration error"); valid = false; } } break; case Arc::ConfigFile::file_INI: { if (!readStagingConf(cfile)) { logger.msg(Arc::ERROR, "Configuration error"); valid = false; } } break; default: { logger.msg(Arc::ERROR, "Can't recognize type of configuration file"); valid = false; } break; } cfile.close(); } bool StagingConfig::readStagingConf(Arc::ConfigFile& cfile) { Arc::ConfigIni cf(cfile); cf.AddSection("data-staging"); cf.AddSection("common"); for(;;) { std::string rest; std::string command; cf.ReadNext(command,rest); if (command.empty()) break; // eof if (command == "maxdelivery") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_delivery)) { logger.msg(Arc::ERROR, "Bad number in maxdelivery"); return false; } } else if (command == "maxemergency") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_emergency)) { logger.msg(Arc::ERROR, "Bad number in maxemergency"); return false; } } else if (command == "maxprocessor") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_processor)) { logger.msg(Arc::ERROR, "Bad number in maxprocessor"); return false; } } else if (command == "maxprepared") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_prepared) || max_prepared <= 0) { logger.msg(Arc::ERROR, "Bad number in maxprepared"); return false; } } else if (command == "maxtransfertries") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_retries)) { logger.msg(Arc::ERROR, "Bad number in maxtransfertries"); return false; } } else if (command == "speedcontrol") { if (!Arc::stringto(Arc::ConfigIni::NextArg(rest), min_speed) || !Arc::stringto(Arc::ConfigIni::NextArg(rest), min_speed_time) || !Arc::stringto(Arc::ConfigIni::NextArg(rest), min_average_speed) || !Arc::stringto(Arc::ConfigIni::NextArg(rest), max_inactivity_time)) { logger.msg(Arc::ERROR, "Bad number in speedcontrol"); return false; } } else if (command == "sharetype") { share_type = Arc::ConfigIni::NextArg(rest); } else if (command == "definedshare") { std::string share = Arc::ConfigIni::NextArg(rest); int priority = 0; if (!paramToInt(Arc::ConfigIni::NextArg(rest), priority) || priority <= 0) { logger.msg(Arc::ERROR, "Bad number in definedshare %s", share); return false; } defined_shares[share] = priority; } else if (command == "deliveryservice") { std::string url = Arc::ConfigIni::NextArg(rest); Arc::URL u(url); if (!u) { logger.msg(Arc::ERROR, "Bad URL in deliveryservice: %s", url); return false; } delivery_services.push_back(u); } else if (command == "localdelivery") { std::string use_local = Arc::ConfigIni::NextArg(rest); if (use_local == "yes") delivery_services.push_back(Arc::URL("file:/local")); } else if (command == "remotesizelimit") { if (!Arc::stringto(Arc::ConfigIni::NextArg(rest), remote_size_limit)) { logger.msg(Arc::ERROR, "Bad number in remotesizelimit"); return false; } } else if (command == "passivetransfer") { std::string pasv = Arc::ConfigIni::NextArg(rest); if (pasv == "yes") passive = true; } else if (command == "securetransfer") { std::string sec = Arc::ConfigIni::NextArg(rest); if (sec == "yes") secure = true; } else if (command == "localtransfer") { std::string sec = Arc::ConfigIni::NextArg(rest); if (sec == "yes") local_transfer = true; } else if (command == "httpgetpartial") { std::string partial = Arc::ConfigIni::NextArg(rest); if (partial == "no") httpgetpartial = false; } else if (command == "preferredpattern") { preferred_pattern = Arc::ConfigIni::NextArg(rest); } else if (command == "usehostcert") { std::string use_host_cert = Arc::ConfigIni::NextArg(rest); if (use_host_cert == "yes") use_host_cert_for_remote_delivery = true; } else if (command == "debug") { unsigned int level; if (!Arc::strtoint(Arc::ConfigIni::NextArg(rest), level)) { logger.msg(Arc::ERROR, "Bad value for debug"); return false; } log_level = Arc::old_level_to_level(level); } else if (command == "dtrlog") { dtr_log = Arc::ConfigIni::NextArg(rest); } else if (command == "central_logfile") { dtr_central_log = Arc::ConfigIni::NextArg(rest); } else if (command == "acix_endpoint") { std::string endpoint(Arc::ConfigIni::NextArg(rest)); if (!Arc::URL(endpoint) || endpoint.find("://") == std::string::npos) { logger.msg(Arc::ERROR, "Bad URL in acix_endpoint"); return false; } endpoint.replace(0, endpoint.find("://"), "acix"); acix_endpoint = endpoint; } else if (command == "perflogdir") { perf_log.SetOutput(Arc::ConfigIni::NextArg(rest) + "/data.perflog"); } else if (command == "enable_perflog_reporting") { std::string enableperflog = Arc::ConfigIni::NextArg(rest); if (enableperflog == "yes") perf_log.SetEnabled(true); } } return true; } bool StagingConfig::readStagingConf(const Arc::XMLNode& cfg) { /* DTR maxDelivery maxProcessor maxEmergency maxPrepared shareType definedShare name priority deliveryService localDelivery useHostCert logLevel dtrLog centralDTRLog */ Arc::XMLNode tmp_node = cfg["dataTransfer"]["DTR"]; if (tmp_node) { if (!elementtointlogged(tmp_node, "maxDelivery", max_delivery, logger)) return false; if (!elementtointlogged(tmp_node, "maxProcessor", max_processor, logger)) return false; if (!elementtointlogged(tmp_node, "maxEmergency", max_emergency, logger)) return false; if (!elementtointlogged(tmp_node, "maxPrepared", max_prepared, logger)) return false; if (tmp_node["shareType"]) share_type = (std::string)tmp_node["shareType"]; Arc::XMLNode defined_share = tmp_node["definedShare"]; for(;defined_share;++defined_share) { std::string share_name = defined_share["name"]; int share_priority = -1; if (elementtointlogged(defined_share, "priority", share_priority, logger) && (share_priority > 0) && !share_name.empty()) { defined_shares[share_name] = share_priority; } } Arc::XMLNode delivery_service = tmp_node["deliveryService"]; for(;delivery_service;++delivery_service) { Arc::URL u((std::string)delivery_service); if (!u) { logger.msg(Arc::ERROR, "Bad URL in deliveryService: %s", std::string(delivery_service)); return false; } delivery_services.push_back(u); } bool use_local_delivery = false; if (!elementtoboollogged(tmp_node,"localDelivery",use_local_delivery,logger)) return false; if (use_local_delivery) delivery_services.push_back(Arc::URL("file:/local")); if (tmp_node["remoteSizeLimit"]) { if (!Arc::stringto((std::string)tmp_node["remoteSizeLimit"], remote_size_limit)) return false; } if (!elementtoboollogged(tmp_node, "localDelivery", use_host_cert_for_remote_delivery, logger)) return false; if (tmp_node["logLevel"]) { if (!Arc::istring_to_level((std::string)tmp_node["logLevel"], log_level)) { logger.msg(Arc::ERROR, "Bad value for logLevel"); return false; } } if (tmp_node["dtrLog"]) dtr_log = (std::string)tmp_node["dtrLog"]; if (tmp_node["centralDTRLog"]) dtr_central_log = (std::string)tmp_node["centraDTRLog"]; } /* dataTransfer secureTransfer passiveTransfer localTransfer httpGetPartial preferredPattern acixEndpoint timeouts minSpeed minSpeedTime minAverageSpeed maxInactivityTime maxRetries mapURL (link) from to */ tmp_node = cfg["dataTransfer"]; if (tmp_node) { Arc::XMLNode to_node = tmp_node["timeouts"]; if (to_node) { if (!elementtointlogged(tmp_node, "minSpeed", min_speed, logger)) return false; if (!elementtointlogged(tmp_node, "minAverageSpeed", min_average_speed, logger)) return false; if (!elementtointlogged(tmp_node, "minSpeedTime", min_speed_time, logger)) return false; if (!elementtointlogged(tmp_node, "maxInactivityTime", max_inactivity_time, logger)) return false; } if (!elementtoboollogged(tmp_node, "passiveTransfer", passive, logger)) return false; if (!elementtoboollogged(tmp_node, "secureTransfer", secure, logger)) return false; if (!elementtoboollogged(tmp_node, "localTransfer", local_transfer, logger)) return false; if (!elementtoboollogged(tmp_node, "httpGetPartial", httpgetpartial, logger)) return false; if (!elementtointlogged(tmp_node, "maxRetries", max_retries, logger)) return false; if (tmp_node["preferredPattern"]) preferred_pattern = (std::string)(tmp_node["preferredPattern"]); if (tmp_node["acixEndpoint"]) { std::string endpoint((std::string)(tmp_node["acixEndPoint"])); if (!Arc::URL(endpoint) || endpoint.find("://") == std::string::npos) { logger.msg(Arc::ERROR, "Bad URL in acix_endpoint"); return false; } endpoint.replace(0, endpoint.find("://"), "acix"); acix_endpoint = endpoint; } } return true; } bool StagingConfig::paramToInt(const std::string& param, int& value) { int i; if (!Arc::stringto(param, i)) return false; if (i < 0) i=-1; value = i; return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/UrlMapConfig.h0000644000000000000000000000012412046704323026605 xustar000000000000000027 mtime=1352370387.670578 27 atime=1513200576.111721 30 ctime=1513200662.907783049 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/UrlMapConfig.h0000644000175000002070000000065612046704323026661 0ustar00mockbuildmock00000000000000#ifndef __GM_CONFIG_MAP_H__ #define __GM_CONFIG_MAP_H__ #include #include "GMConfig.h" namespace ARex { /* Look URLMap.h for functionality. This object automatically reads configuration file and fills list of mapping for UrlMap. */ class UrlMapConfig: public Arc::URLMap { public: UrlMapConfig(const GMConfig& config); ~UrlMapConfig(void); }; } // namespace ARex #endif // __GM_CONFIG_MAP_H__ nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/CoreConfig.h0000644000000000000000000000012412771224333026300 xustar000000000000000027 mtime=1474635995.909107 27 atime=1513200576.161722 30 ctime=1513200662.906783037 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/CoreConfig.h0000644000175000002070000000167112771224333026352 0ustar00mockbuildmock00000000000000#ifndef __GM_CORE_CONFIG_H__ #define __GM_CORE_CONFIG_H__ #include #include namespace Arc { class XMLNode; } namespace ARex { class GMConfig; /// Parses configuration and fills GMConfig with information class CoreConfig { public: /// Parse config, either ini-style or XML static bool ParseConf(GMConfig& config); private: /// Parse ini-style config from stream cfile static bool ParseConfINI(GMConfig& config, Arc::ConfigFile& cfile); /// Parse config from XML node static bool ParseConfXML(GMConfig& config, const Arc::XMLNode& cfg); /// Function to check that LRMS scripts are available static void CheckLRMSBackends(const std::string& default_lrms); /// Function handle yes/no config commands static bool CheckYesNoCommand(bool& config_param, const std::string& name, std::string& rest); /// Logger static Arc::Logger logger; }; } // namespace ARex #endif // __GM_CORE_CONFIG_H__ nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/UrlMapConfig.cpp0000644000000000000000000000012412771224333027143 xustar000000000000000027 mtime=1474635995.909107 27 atime=1513200576.087721 30 ctime=1513200662.906783037 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp0000644000175000002070000000661312771224333027216 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "UrlMapConfig.h" namespace ARex { static Arc::Logger& glogger = Arc::Logger::getRootLogger(); UrlMapConfig::UrlMapConfig(const GMConfig& config) { Arc::ConfigFile cfile; Arc::ConfigIni* cf = NULL; //if(nordugrid_config_loc().empty()) read_env_vars(true); if(!cfile.open(config.ConfigFile())) { glogger.msg(Arc::ERROR,"Can't open configuration file"); return; }; switch(cfile.detect()) { case Arc::ConfigFile::file_XML: { Arc::XMLNode cfg; if(!cfg.ReadFromStream(cfile)) { glogger.msg(Arc::ERROR,"Can't interpret configuration file as XML"); } else { /* dataTransfer mapURL (link) from to at */ Arc::XMLNode datanode = cfg["dataTransfer"]; if(datanode) { Arc::XMLNode mapnode = datanode["mapURL"]; for(;mapnode;++mapnode) { bool is_link = false; if(!Arc::Config::elementtobool(mapnode,"link",is_link)) { glogger.msg(Arc::ERROR,"Value for 'link' element in mapURL is incorrect"); continue; }; std::string initial = mapnode["from"]; std::string replacement = mapnode["to"]; if(initial.empty()) { glogger.msg(Arc::ERROR,"Missing 'from' element in mapURL"); continue; }; if(replacement.empty()) { glogger.msg(Arc::ERROR,"Missing 'to' element in mapURL"); continue; }; if(is_link) { std::string access = mapnode["at"]; if(access.empty()) access = replacement; add(initial,replacement,access); } else { add(initial,replacement); }; }; }; }; }; break; case Arc::ConfigFile::file_INI: { cf=new Arc::ConfigIni(cfile); cf->AddSection("common"); cf->AddSection("data-staging"); for(;;) { std::string rest; std::string command; cf->ReadNext(command,rest); if(command.length() == 0) break; else if(command == "copyurl") { std::string initial = Arc::ConfigIni::NextArg(rest); std::string replacement = Arc::ConfigIni::NextArg(rest); if((initial.length() == 0) || (replacement.length() == 0)) { glogger.msg(Arc::ERROR,"Not enough parameters in copyurl"); continue; }; add(initial,replacement); } else if(command == "linkurl") { std::string initial = Arc::ConfigIni::NextArg(rest); std::string replacement = Arc::ConfigIni::NextArg(rest); if((initial.length() == 0) || (replacement.length() == 0)) { glogger.msg(Arc::ERROR,"Not enough parameters in linkurl"); continue; }; std::string access = Arc::ConfigIni::NextArg(rest); if(access.length() == 0) access = replacement; add(initial,replacement,access); }; }; }; break; default: { glogger.msg(Arc::ERROR,"Can't recognize type of configuration file"); }; break; }; cfile.close(); if(cf) delete cf; } UrlMapConfig::~UrlMapConfig(void) { } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/GMConfig.cpp0000644000000000000000000000012013107553420026236 xustar000000000000000026 mtime=1495193360.27982 27 atime=1513200576.107721 27 ctime=1513200662.903783 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/GMConfig.cpp0000644000175000002070000003471513107553420026321 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "CoreConfig.h" #include "../run/RunParallel.h" #include "GMConfig.h" namespace ARex { // Defaults // default job ttl after finished - 1 week #define DEFAULT_KEEP_FINISHED (7*24*60*60) // default job ttr after deleted - 1 month #define DEFAULT_KEEP_DELETED (30*24*60*60) // default maximal allowed amount of reruns #define DEFAULT_JOB_RERUNS (5) // default wake up period for main job loop #define DEFAULT_WAKE_UP (120) Arc::Logger GMConfig::logger(Arc::Logger::getRootLogger(), "GMConfig"); static std::string empty_string(""); static std::list empty_string_list; GMConfig::GMConfig(const std::string& conf): conffile(conf) { SetDefaults(); // If no config file was given, guess it. The order to try is // $ARC_CONFIG, $ARC_LOCATION/etc/arc.conf, /etc/arc.conf struct stat st; if (conffile.empty()) { std::string file = Arc::GetEnv("ARC_CONFIG"); if (Arc::FileStat(file, &st, true)) { conffile = file; return; } file = Arc::ArcLocation::Get() + "/etc/arc.conf"; if (Arc::FileStat(file, &st, true)) { conffile = file; return; } file = "/etc/arc.conf"; if (Arc::FileStat(file, &st, true)) { conffile = file; return; } } } GMConfig::GMConfig(const Arc::XMLNode& node): xml_cfg(node) { SetDefaults(); } void GMConfig::SetDefaults() { conffile_is_temp = false; job_log = NULL; jobs_metrics = NULL; job_perf_log = NULL; cont_plugins = NULL; cred_plugin = NULL; delegations = NULL; share_uid = 0; keep_finished = DEFAULT_KEEP_FINISHED; keep_deleted = DEFAULT_KEEP_DELETED; strict_session = false; fixdir = fixdir_always; reruns = DEFAULT_JOB_RERUNS; wakeup_period = DEFAULT_WAKE_UP; max_jobs_running = -1; max_jobs_total = -1; max_jobs = -1; max_jobs_per_dn = -1; max_scripts = -1; deleg_db = deleg_db_bdb; enable_arc_interface = true; enable_emies_interface = false; cert_dir = Arc::GetEnv("X509_CERT_DIR"); voms_dir = Arc::GetEnv("X509_VOMS_DIR"); } bool GMConfig::Load() { // Call CoreConfig (CoreConfig.h) to fill values in this object return CoreConfig::ParseConf(*this); } void GMConfig::Print() const { for(std::vector::const_iterator i = session_roots.begin(); i != session_roots.end(); ++i) logger.msg(Arc::INFO, "\tSession root dir : %s", *i); logger.msg(Arc::INFO, "\tControl dir : %s", control_dir); logger.msg(Arc::INFO, "\tdefault LRMS : %s", default_lrms); logger.msg(Arc::INFO, "\tdefault queue : %s", default_queue); logger.msg(Arc::INFO, "\tdefault ttl : %u", keep_finished); std::vector conf_caches = cache_params.getCacheDirs(); std::vector remote_conf_caches = cache_params.getRemoteCacheDirs(); if(conf_caches.empty()) { logger.msg(Arc::INFO,"No valid caches found in configuration, caching is disabled"); return; } // list each cache for (std::vector::iterator i = conf_caches.begin(); i != conf_caches.end(); i++) { logger.msg(Arc::INFO, "\tCache : %s", (*i).substr(0, (*i).find(" "))); if ((*i).find(" ") != std::string::npos) logger.msg(Arc::INFO, "\tCache link dir : %s", (*i).substr((*i).find_last_of(" ")+1, (*i).length()-(*i).find_last_of(" ")+1)); } // list each remote cache for (std::vector::iterator i = remote_conf_caches.begin(); i != remote_conf_caches.end(); i++) { logger.msg(Arc::INFO, "\tRemote cache : %s", (*i).substr(0, (*i).find(" "))); if ((*i).find(" ") != std::string::npos) logger.msg(Arc::INFO, "\tRemote cache link: %s", (*i).substr((*i).find_last_of(" ")+1, (*i).length()-(*i).find_last_of(" ")+1)); } if (cache_params.cleanCache()) logger.msg(Arc::INFO, "\tCache cleaning enabled"); else logger.msg(Arc::INFO, "\tCache cleaning disabled"); } void GMConfig::SetControlDir(const std::string &dir) { if (dir.empty()) control_dir = gm_user.Home() + "/.jobstatus"; else control_dir = dir; } void GMConfig::SetSessionRoot(const std::string &dir) { session_roots.clear(); if (dir.empty() || dir == "*") session_roots.push_back(gm_user.Home() + "/.jobs"); else session_roots.push_back(dir); } void GMConfig::SetSessionRoot(const std::vector &dirs) { session_roots.clear(); if (dirs.empty()) { std::string dir; SetSessionRoot(dir); } else { for (std::vector::const_iterator i = dirs.begin(); i != dirs.end(); i++) { if (*i == "*") session_roots.push_back(gm_user.Home() + "/.jobs"); else session_roots.push_back(*i); } } } std::string GMConfig::SessionRoot(const std::string& job_id) const { if (session_roots.empty()) return empty_string; if (session_roots.size() == 1 || job_id.empty()) return session_roots[0]; // search for this jobid's session dir struct stat st; for (std::vector::const_iterator i = session_roots.begin(); i != session_roots.end(); i++) { std::string sessiondir(*i + '/' + job_id); if (stat(sessiondir.c_str(), &st) == 0 && S_ISDIR(st.st_mode)) return *i; } return empty_string; // not found } static bool fix_directory(const std::string& path, GMConfig::fixdir_t fixmode, mode_t mode, uid_t uid, gid_t gid) { if (fixmode == GMConfig::fixdir_never) { struct stat st; if (!Arc::FileStat(path, &st, true)) return false; if (!S_ISDIR(st.st_mode)) return false; return true; } else if(fixmode == GMConfig::fixdir_missing) { struct stat st; if (Arc::FileStat(path, &st, true)) { if (!S_ISDIR(st.st_mode)) return false; return true; } } // GMConfig::fixdir_always if (!Arc::DirCreate(path, mode, true)) return false; // Only can switch owner if running as root if (getuid() == 0) if (chown(path.c_str(), uid, gid) != 0) return false; if (chmod(path.c_str(), mode) != 0) return false; return true; } bool GMConfig::CreateControlDirectory() const { bool res = true; if (!control_dir.empty()) { mode_t mode = 0; if (gm_user.get_uid() == 0) { // This control dir serves multiple users and running // as root (hence really can serve multiple users) mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; } else { mode = S_IRWXU; } if (!fix_directory(control_dir, fixdir, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; // Structure inside control dir is important - *always* create it // Directories containing logs and job states may need access from // information system, etc. So allowing them to be more open. // Delegation is only accessed by service itself. if (!fix_directory(control_dir+"/logs", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; if (!fix_directory(control_dir+"/accepting", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; if (!fix_directory(control_dir+"/restarting", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; if (!fix_directory(control_dir+"/processing", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; if (!fix_directory(control_dir+"/finished", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; std::string deleg_dir = DelegationDir(); if (!fix_directory(deleg_dir, fixdir_always, S_IRWXU, gm_user.get_uid(), gm_user.get_gid())) res = false; } return res; } bool GMConfig::CreateSessionDirectory(const std::string& dir, const Arc::User& user) const { // First just try to create per-job dir, assuming session root already exists if (gm_user.get_uid() != 0) { if (Arc::DirCreate(dir, S_IRWXU, false)) return true; } else if (strict_session) { if (Arc::DirCreate(dir, user.get_uid(), user.get_gid(), S_IRWXU, false)) return true; } else { if (Arc::DirCreate(dir, S_IRWXU, false)) return (chown(dir.c_str(), user.get_uid(), user.get_gid()) == 0); } // Creation failed so try to create session root and try again std::string session_root(dir.substr(0, dir.rfind('/'))); if (session_root.empty()) return false; mode_t mode = 0; if (gm_user.get_uid() == 0) { if (strict_session) { // For multiple users creating immediate subdirs using own account // dangerous permissions, but there is no other option mode = S_IRWXU | S_IRWXG | S_IRWXO | S_ISVTX; } else { // For multiple users not creating immediate subdirs using own account mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; } } else { // For single user mode = S_IRWXU; } if (!fix_directory(session_root, fixdir, mode, gm_user.get_uid(), gm_user.get_gid())) return false; // Try per-job dir again if (gm_user.get_uid() != 0) { return Arc::DirCreate(dir, S_IRWXU, false); } else if (strict_session) { return Arc::DirCreate(dir, user.get_uid(), user.get_gid(), S_IRWXU, false); } else { if (!Arc::DirCreate(dir, S_IRWXU, false)) return false; return (chown(dir.c_str(), user.get_uid(), user.get_gid()) == 0); } } std::string GMConfig::DelegationDir() const { std::string deleg_dir = control_dir+"/delegations"; uid_t u = gm_user.get_uid(); if (u == 0) return deleg_dir; struct passwd pwbuf; char buf[4096]; struct passwd* pw; if (::getpwuid_r(u, &pwbuf, buf, sizeof(buf), &pw) == 0) { if (pw && pw->pw_name) { deleg_dir+="."; deleg_dir+=pw->pw_name; } } return deleg_dir; } GMConfig::deleg_db_t GMConfig::DelegationDBType() const { return deleg_db; } const std::string & GMConfig::ForcedVOMS(const char * queue) const { std::map::const_iterator pos = forced_voms.find(queue); return (pos == forced_voms.end()) ? empty_string : pos->second; } const std::list & GMConfig::AuthorizedVOs(const char * queue) const { std::map >::const_iterator pos = authorized_vos.find(queue); return (pos == authorized_vos.end()) ? empty_string_list : pos->second; } bool GMConfig::Substitute(std::string& param, const Arc::User& user) const { std::string::size_type curpos = 0; for (;;) { if (curpos >= param.length()) break; std::string::size_type pos = param.find('%', curpos); if (pos == std::string::npos) break; pos++; if (pos >= param.length()) break; if (param[pos] == '%') { curpos=pos+1; continue; }; std::string to_put; switch (param[pos]) { case 'R': to_put = SessionRoot(""); break; // First session dir will be used if there are multiple case 'C': to_put = ControlDir(); break; case 'U': to_put = user.Name(); break; case 'H': to_put = user.Home(); break; case 'Q': to_put = DefaultQueue(); break; case 'L': to_put = DefaultLRMS(); break; case 'u': to_put = Arc::tostring(user.get_uid()); break; case 'g': to_put = Arc::tostring(user.get_gid()); break; case 'W': to_put = Arc::ArcLocation::Get(); break; case 'F': to_put = conffile; break; case 'G': logger.msg(Arc::ERROR, "Globus location variable substitution is not supported anymore. Please specify path directly."); break; default: to_put = param.substr(pos-1, 2); break; } curpos = pos+1+(to_put.length() - 2); param.replace(pos-1, 2, to_put); } return true; } void GMConfig::SetShareID(const Arc::User& share_user) { share_uid = share_user.get_uid(); share_gids.clear(); if (share_uid <= 0) return; struct passwd pwd_buf; struct passwd* pwd = NULL; #ifdef _SC_GETPW_R_SIZE_MAX int buflen = sysconf(_SC_GETPW_R_SIZE_MAX); if (buflen <= 0) buflen = 16384; #else int buflen = 16384; #endif char* buf = (char*)malloc(buflen); if (!buf) return; if (getpwuid_r(share_uid, &pwd_buf, buf, buflen, &pwd) == 0) { if (pwd) { #ifdef HAVE_GETGROUPLIST #ifdef _MACOSX int groups[100]; #else gid_t groups[100]; #endif int ngroups = 100; if (getgrouplist(pwd->pw_name, pwd->pw_gid, groups, &ngroups) >= 0) { for (int n = 0; npw_gid); } } free(buf); } bool GMConfig::MatchShareGid(gid_t sgid) const { for (std::list::const_iterator i = share_gids.begin(); i != share_gids.end(); ++i) { if (sgid == *i) return true; } return false; } bool GMConfig::RunHelpers() { bool started = true; for (std::list::iterator i = helpers.begin(); i != helpers.end(); ++i) { started &= i->run(*this); } return started; } void GMConfig::PrepareToDestroy() { for (std::list::iterator i = helpers.begin(); i != helpers.end(); ++i) { i->stop(); } } GMConfig::ExternalHelper::ExternalHelper(const std::string &cmd) { command = cmd; proc = NULL; } GMConfig::ExternalHelper::~ExternalHelper() { if(proc != NULL) { delete proc; proc=NULL; } } static void ExternalHelperInitializer(void* arg) { const char* logpath = reinterpret_cast(arg); // set up stdin,stdout and stderr int h; h = ::open("/dev/null",O_RDONLY); if(h != 0) { if(dup2(h,0) != 0) { sleep(10); _exit(1); }; close(h); }; h = ::open("/dev/null",O_WRONLY); if(h != 1) { if(dup2(h,1) != 1) { sleep(10); _exit(1); }; close(h); }; if(logpath && logpath[0]) { h = ::open(logpath,O_WRONLY | O_CREAT | O_APPEND,S_IRUSR | S_IWUSR); if(h==-1) { h = ::open("/dev/null",O_WRONLY); }; } else { h = ::open("/dev/null",O_WRONLY); }; if(h != 2) { if(dup2(h,2) != 2) { sleep(10); exit(1); }; close(h); }; } bool GMConfig::ExternalHelper::run(const GMConfig& config) { if (proc != NULL) { if (proc->Running()) { return true; // it is already/still running } delete proc; proc = NULL; } // start/restart if (command.empty()) return true; // has anything to run ? logger.msg(Arc::VERBOSE, "Starting helper process: %s", command); proc = new Arc::Run(command); proc->KeepStdin(true); proc->KeepStdout(true); proc->KeepStderr(true); proc->AssignInitializer(&ExternalHelperInitializer, const_cast(config.HelperLog().c_str())); if (proc->Start()) return true; delete proc; proc = NULL; logger.msg(Arc::ERROR, "Helper process start failed: %s", command); // start failed, doing nothing - maybe in the future return false; } void GMConfig::ExternalHelper::stop() { if (proc && proc->Running()) { logger.msg(Arc::VERBOSE, "Stopping helper process %s", command); proc->Kill(1); } } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/README0000644000000000000000000000012311016612002024751 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.111721 30 ctime=1513200662.898782939 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/README0000644000175000002070000000003211016612002025012 0ustar00mockbuildmock00000000000000configuration processing. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/PaxHeaders.7502/CoreConfig.cpp0000644000000000000000000000012413065016642026632 xustar000000000000000027 mtime=1490296226.623926 27 atime=1513200576.114721 30 ctime=1513200662.904783012 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/conf/CoreConfig.cpp0000644000175000002070000007763213065016642026716 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "../jobs/ContinuationPlugins.h" #include "../run/RunPlugin.h" #include "../log/JobLog.h" #include "../log/JobsMetrics.h" #include "../jobs/JobsList.h" #include "CacheConfig.h" #include "GMConfig.h" #include "CoreConfig.h" namespace ARex { Arc::Logger CoreConfig::logger(Arc::Logger::getRootLogger(), "CoreConfig"); #define REPORTER_PERIOD "3600"; void CoreConfig::CheckLRMSBackends(const std::string& default_lrms) { std::string tool_path; tool_path=Arc::ArcLocation::GetDataDir()+"/cancel-"+default_lrms+"-job"; if(!Glib::file_test(tool_path,Glib::FILE_TEST_IS_REGULAR)) { logger.msg(Arc::WARNING,"Missing cancel-%s-job - job cancellation may not work",default_lrms); } tool_path=Arc::ArcLocation::GetDataDir()+"/submit-"+default_lrms+"-job"; if(!Glib::file_test(tool_path,Glib::FILE_TEST_IS_REGULAR)) { logger.msg(Arc::WARNING,"Missing submit-%s-job - job submission to LRMS may not work",default_lrms); } tool_path=Arc::ArcLocation::GetDataDir()+"/scan-"+default_lrms+"-job"; if(!Glib::file_test(tool_path,Glib::FILE_TEST_IS_REGULAR)) { logger.msg(Arc::WARNING,"Missing scan-%s-job - may miss when job finished executing",default_lrms); } } bool CoreConfig::CheckYesNoCommand(bool& config_param, const std::string& name, std::string& rest) { std::string s = Arc::ConfigIni::NextArg(rest); if (s == "yes" || s == "expert-debug-on") { config_param = true; } else if(s == "no") { config_param = false; } else { logger.msg(Arc::ERROR, "Wrong option in %s", name); return false; } return true; } bool CoreConfig::ParseConf(GMConfig& config) { if (config.xml_cfg) { return ParseConfXML(config, config.xml_cfg); } if (!config.conffile.empty()) { Arc::ConfigFile cfile; if (!cfile.open(config.conffile)) { logger.msg(Arc::ERROR, "Can't read configuration file at %s", config.conffile); return false; } // detect type of file Arc::ConfigFile::file_type type = cfile.detect(); if (type == Arc::ConfigFile::file_XML) { Arc::XMLNode xml_cfg; if (!xml_cfg.ReadFromStream(cfile)) { cfile.close(); logger.msg(Arc::ERROR, "Can't interpret configuration file %s as XML", config.conffile); return false; } cfile.close(); // Pick out the A-REX service node Arc::XMLNode arex; Arc::Config cfg(xml_cfg); if (!cfg) return false; if (cfg.Name() == "Service") { if (cfg.Attribute("name") == "a-rex") { cfg.New(arex); return ParseConfXML(config, arex); } return false; // not a-rex service } if (cfg.Name() == "ArcConfig") { // In the case of multiple A-REX services defined, we parse the first one for (int i=0;; i++) { Arc::XMLNode node = cfg["Chain"]; node = node["Service"][i]; if (!node) return false; // no a-rex node found if (node.Attribute("name") == "a-rex") { node.New(arex); break; } } if (!arex) return false; return ParseConfXML(config, arex); } // malformed xml return false; } if (type == Arc::ConfigFile::file_INI) { bool result = ParseConfINI(config, cfile); cfile.close(); return result; } logger.msg(Arc::ERROR, "Can't recognize type of configuration file at %s", config.conffile); return false; } logger.msg(Arc::ERROR, "Could not determine configuration type or configuration is empty"); return false; } bool CoreConfig::ParseConfINI(GMConfig& config, Arc::ConfigFile& cfile) { // List of helper commands that will be substituted after all configuration is read std::list helpers; std::string jobreport_publisher; bool helper_log_is_set = false; Arc::ConfigIni cf(cfile); cf.AddSection("common"); // 0 cf.AddSection("grid-manager"); // 1 cf.AddSection("infosys"); // 2 cf.AddSection("queue"); // 3 cf.AddSection("cluster"); // 4 if (config.job_perf_log) { config.job_perf_log->SetEnabled(false); config.job_perf_log->SetOutput("/var/log/arc/perfdata/data.perflog"); } // process configuration information here for(;;) { std::string rest; std::string command; cf.ReadNext(command, rest); if (command.empty()) { // EOF break; } if (cf.SectionNum() == 2) { // infosys - looking for user name to get share uid if (command == "user") { config.SetShareID(Arc::User(rest)); } continue; } if (cf.SectionNum() == 0) { // common - infosys user may be in common too if (command == "user") { config.SetShareID(Arc::User(rest)); } else if(command == "x509_cert_dir") { config.cert_dir = rest; } else if(command == "x509_voms_dir") { config.voms_dir = rest; } // no continue since some options can be in common or grid-manager } if (cf.SectionNum() == 3) { // queue if (cf.SectionNew()) { std::string name = cf.SubSection(); if (name.empty()) { logger.msg(Arc::ERROR, "No queue name given in queue block name"); return false; } config.queues.push_back(name); } if (command == "forcedefaultvoms") { std::string str = Arc::ConfigIni::NextArg(rest); if (str.empty()) { logger.msg(Arc::ERROR, "forcedefaultvoms parameter is empty"); return false; } if (!config.queues.empty()) { std::string queue_name = *(--config.queues.end()); config.forced_voms[queue_name] = str; } } else if (command == "authorizedvo") { std::string str = Arc::ConfigIni::NextArg(rest); if (str.empty()) { logger.msg(Arc::ERROR, "authorizedvo parameter is empty"); return false; } if (!config.queues.empty()) { std::string queue_name = *(--config.queues.end()); config.authorized_vos[queue_name].push_back(str); } } continue; } if (cf.SectionNum() == 4) { // cluster if (command == "authorizedvo") { std::string str = Arc::ConfigIni::NextArg(rest); if (str.empty()) { logger.msg(Arc::ERROR, "authorizedvo parameter is empty"); return false; } config.authorized_vos[""].push_back(str); } continue; } if(command == "arex_mount_point") { config.arex_endpoint = Arc::ConfigIni::NextArg(rest); } else if (command == "runtimedir") { config.rte_dir = rest; } else if (command == "joblog") { // where to write job information if (!config.job_log) continue; std::string fname = Arc::ConfigIni::NextArg(rest); // empty is allowed too config.job_log->SetOutput(fname.c_str()); } else if (command == "enable_ganglia") { if (!config.jobs_metrics) continue; bool enable = false; if (!CheckYesNoCommand(enable, command, rest)) return false; config.jobs_metrics->SetEnabled(enable); } else if (command == "ganglialocation") { if (!config.jobs_metrics) continue; std::string fname = Arc::ConfigIni::NextArg(rest); // empty is allowed too config.jobs_metrics->SetPath(fname.c_str()); } else if (command == "gangliaconfig") { if (!config.jobs_metrics) continue; std::string fname = Arc::ConfigIni::NextArg(rest); // empty is allowed too config.jobs_metrics->SetPath(fname.c_str()); } else if (command == "enable_perflog_reporting") { // if (!config.job_perf_log) continue; bool enable = false; if (!CheckYesNoCommand(enable, command, rest)) return false; config.job_perf_log->SetEnabled(enable); } else if (command == "perflogdir") { // if (!config.job_perf_log) continue; std::string fname = Arc::ConfigIni::NextArg(rest); // empty is allowed too if(!fname.empty()) fname += "/arex.perflog"; config.job_perf_log->SetOutput(fname.c_str()); } else if (command == "jobreport") { // service to report information to if (!config.job_log) continue; for(;;) { std::string url = Arc::ConfigIni::NextArg(rest); if (url.empty()) break; unsigned int i; if (Arc::stringto(url, i)) { config.job_log->SetExpiration(i); continue; } config.job_log->SetReporter(url.c_str()); } } else if (command == "jobreport_vo_filters") { // which VO will be send to the server if (!config.job_log) continue; for(;;) { std::string voFilters = Arc::ConfigIni::NextArg(rest); if (voFilters.empty()) break; config.job_log->SetVoFilters(voFilters.c_str()); } } else if (command == "jobreport_publisher") { // Name of the publisher: e.g. jura if (!config.job_log) continue; jobreport_publisher = Arc::ConfigIni::NextArg(rest); } else if (command == "jobreport_period") { // Period of running in seconds: e.g. 3600 if (!config.job_log) continue; std::string period_s = Arc::ConfigIni::NextArg(rest); if (period_s.empty()) { period_s = REPORTER_PERIOD; } int period; if (!Arc::stringto(period_s, period)) { logger.msg(Arc::ERROR, "Wrong number in jobreport_period: %s", period_s); return false; } if (!config.job_log->SetPeriod(period)) { std::string default_value = REPORTER_PERIOD; logger.msg(Arc::ERROR, "Wrong number in jobreport_period: %d, minimal value: %s", period, default_value); return false; } } else if (command == "jobreport_credentials") { if (!config.job_log) continue; std::string jobreport_key = Arc::ConfigIni::NextArg(rest); std::string jobreport_cert = Arc::ConfigIni::NextArg(rest); std::string jobreport_cadir = Arc::ConfigIni::NextArg(rest); config.job_log->SetCredentials(jobreport_key, jobreport_cert, jobreport_cadir); } else if (command == "jobreport_options") { // e.g. for SGAS, interpreted by usage reporter if (!config.job_log) continue; std::string accounting_options = Arc::ConfigIni::NextArg(rest); config.job_log->SetOptions(accounting_options); } else if (command == "jobreport_logfile") { if (!config.job_log) continue; std::string logfile = Arc::ConfigIni::NextArg(rest); if (logfile.empty()) { logger.msg(Arc::ERROR, "Missing file name in jobreport_logfile"); return false; } config.job_log->SetLogFile(logfile.c_str()); } else if (command == "scratchdir") { std::string scratch = Arc::ConfigIni::NextArg(rest); // don't set if already set by shared_scratch if (config.scratch_dir.empty()) config.scratch_dir = scratch; } else if (command == "shared_scratch") { std::string scratch = Arc::ConfigIni::NextArg(rest); config.scratch_dir = scratch; } else if (command == "maxjobs") { // maximum number of the jobs to support std::string max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) continue; if (!Arc::stringto(max_jobs_s, config.max_jobs)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_jobs < 0) config.max_jobs = -1; max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) continue; if (!Arc::stringto(max_jobs_s, config.max_jobs_running)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_jobs_running < 0) config.max_jobs_running = -1; max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) continue; if (!Arc::stringto(max_jobs_s, config.max_jobs_per_dn)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_jobs_per_dn < 0) config.max_jobs_per_dn = -1; max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) continue; if (!Arc::stringto(max_jobs_s, config.max_jobs_total)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_jobs_total < 0) config.max_jobs_total = -1; max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) continue; if (!Arc::stringto(max_jobs_s, config.max_scripts)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_scripts < 0) config.max_scripts = -1; } else if (command == "wakeupperiod") { std::string wakeup_s = Arc::ConfigIni::NextArg(rest); if (!Arc::stringto(wakeup_s, config.wakeup_period)) { logger.msg(Arc::ERROR,"Wrong number in wakeupperiod: %s",wakeup_s); return false; } } else if(command == "norootpower") { if (!CheckYesNoCommand(config.strict_session, command, rest)) return false; } else if (command == "mail") { // internal address from which to send mail config.support_email_address = Arc::ConfigIni::NextArg(rest); if (config.support_email_address.empty()) { logger.msg(Arc::ERROR, "mail parameter is empty"); return false; } } else if (command == "defaultttl") { // time to keep job after finished std::string default_ttl_s = Arc::ConfigIni::NextArg(rest); if (!Arc::stringto(default_ttl_s, config.keep_finished)) { logger.msg(Arc::ERROR, "Wrong number in defaultttl command"); return false; } default_ttl_s = Arc::ConfigIni::NextArg(rest); if (!default_ttl_s.empty() && !Arc::stringto(default_ttl_s, config.keep_deleted)) { logger.msg(Arc::ERROR, "Wrong number in defaultttl command"); return false; } } else if (command == "maxrerun") { // number of retries allowed std::string default_reruns_s = Arc::ConfigIni::NextArg(rest); if (!Arc::stringto(default_reruns_s, config.reruns)) { logger.msg(Arc::ERROR, "Wrong number in maxrerun command"); return false; } } else if (command == "lrms") { // default lrms type and queue (optional) std::string default_lrms = Arc::ConfigIni::NextArg(rest); if (default_lrms.empty()) { logger.msg(Arc::ERROR, "defaultlrms is empty"); return false; } if (default_lrms.compare("slurm") == 0) { // allow lower case slurm in config default_lrms = "SLURM"; } config.default_lrms = default_lrms; std::string default_queue = Arc::ConfigIni::NextArg(rest); if (!default_queue.empty()) { config.default_queue = default_queue; } CheckLRMSBackends(default_lrms); } else if (command == "authplugin") { // set plugin to be called on state changes if (!config.cont_plugins) continue; std::string state_name = Arc::ConfigIni::NextArg(rest); if (state_name.empty()) { logger.msg(Arc::ERROR, "State name for plugin is missing"); return false; } std::string options_s = Arc::ConfigIni::NextArg(rest); if (options_s.empty()) { logger.msg(Arc::ERROR, "Options for plugin are missing"); return false; } if (!config.cont_plugins->add(state_name.c_str(), options_s.c_str(), rest.c_str())) { logger.msg(Arc::ERROR, "Failed to register plugin for state %s", state_name); return false; } } else if (command == "localcred") { if (!config.cred_plugin) continue; std::string timeout_s = Arc::ConfigIni::NextArg(rest); int timeout; if (!Arc::stringto(timeout_s, timeout)){ logger.msg(Arc::ERROR, "Wrong number for timeout in plugin command"); return false; } config.cred_plugin->timeout(timeout); } else if (command == "fixdirectories") { std::string s = Arc::ConfigIni::NextArg(rest); if (s == "yes") { config.fixdir = GMConfig::fixdir_always; } else if (s == "missing") { config.fixdir = GMConfig::fixdir_missing; } else if (s == "no") { config.fixdir = GMConfig::fixdir_never; } else { logger.msg(Arc::ERROR, "Wrong option in fixdirectories"); return false; } } else if (command == "delegationdb") { std::string s = Arc::ConfigIni::NextArg(rest); if (s == "bdb") { config.deleg_db = GMConfig::deleg_db_bdb; } else if (s == "sqlite") { config.deleg_db = GMConfig::deleg_db_sqlite; } else { logger.msg(Arc::ERROR, "Wrong option in delegationdb"); return false; } } else if (command == "allowsubmit") { // Note: not available in xml config.allow_submit += " " + Arc::ConfigIni::NextArg(rest); } else if (command == "enable_arc_interface") { if (!CheckYesNoCommand(config.enable_arc_interface, command, rest)) return false; } else if (command == "enable_emies_interface") { if (!CheckYesNoCommand(config.enable_emies_interface, command, rest)) return false; } else if (command == "sessiondir") { // set session root directory std::string session_root = Arc::ConfigIni::NextArg(rest); if (session_root.empty()) { logger.msg(Arc::ERROR, "Session root directory is missing"); return false; } if (rest.length() != 0 && rest != "drain") { logger.msg(Arc::ERROR, "Junk in sessiondir command"); return false; } if (session_root == "*") { // special value which uses each user's home area session_root = "%H/.jobs"; } config.session_roots.push_back(session_root); if (rest != "drain") config.session_roots_non_draining.push_back(session_root); } else if (command == "controldir") { std::string control_dir = Arc::ConfigIni::NextArg(rest); if (control_dir.empty()) { logger.msg(Arc::ERROR, "Missing directory in control command"); return false; } config.control_dir = control_dir; } else if (command == "control") { logger.msg(Arc::WARNING, "'control' configuration option is no longer supported, please use 'controldir' instead"); } else if (command == "helper") { std::string helper_user = Arc::ConfigIni::NextArg(rest); if (helper_user.empty()) { logger.msg(Arc::ERROR, "User for helper program is missing"); return false; } if (helper_user != ".") { logger.msg(Arc::ERROR, "Only user '.' for helper program is supported"); return false; } if (rest.empty()) { logger.msg(Arc::ERROR, "Helper program is missing"); return false; } helpers.push_back(rest); } else if (command == "helperlog") { config.helper_log = Arc::ConfigIni::NextArg(rest); // empty is allowed helper_log_is_set = true; } else if (command == "forcedefaultvoms") { std::string str = Arc::ConfigIni::NextArg(rest); if (str.empty()) { logger.msg(Arc::ERROR, "forcedefaultvoms parameter is empty"); return false; } config.forced_voms[""] = str; } } // End of parsing conf commands if (jobreport_publisher.empty()) { jobreport_publisher = "jura"; } if(config.job_log) { config.job_log->SetLogger(jobreport_publisher.c_str()); } if(!helper_log_is_set) { // Assgn default backward compatible value config.helper_log = config.control_dir + "/job.helper.errors"; } // Do substitution of control dir and helpers here now we have all the // configuration. These are special because they do not change per-user config.Substitute(config.control_dir); for (std::list::iterator helper = helpers.begin(); helper != helpers.end(); ++helper) { config.Substitute(*helper); config.helpers.push_back(*helper); } // Add helper to poll for finished LRMS jobs if (!config.default_lrms.empty() && !config.control_dir.empty()) { std::string cmd = Arc::ArcLocation::GetDataDir() + "/scan-"+config.default_lrms+"-job"; cmd = Arc::escape_chars(cmd, " \\", '\\', false); if (!config.conffile.empty()) cmd += " --config " + config.conffile; cmd += " " + config.control_dir; config.helpers.push_back(cmd); } // Get cache parameters try { CacheConfig cache_config = CacheConfig(config); config.cache_params = cache_config; } catch (CacheConfigException& e) { logger.msg(Arc::ERROR, "Error with cache configuration: %s", e.what()); return false; } return true; } bool CoreConfig::ParseConfXML(GMConfig& config, const Arc::XMLNode& cfg) { // Currently we have everything running inside same arched. // So we do not need any special treatment for infosys. // std::string infosys_user(""); bool helper_log_is_set = false; Arc::XMLNode tmp_node = cfg["endpoint"]; if (tmp_node) config.headnode = (std::string)tmp_node; /* jobLogPath jobReport destination expiration type parameters KeyPath CertificatePath CACertificatesDir logfile */ tmp_node = cfg["enableARCInterface"]; if (tmp_node) { if (Arc::lower((std::string)tmp_node) == "yes") { config.enable_arc_interface = true; } else { config.enable_arc_interface = false; } } tmp_node = cfg["enableEMIESInterface"]; if (tmp_node) { if (Arc::lower((std::string)tmp_node) == "yes") { config.enable_emies_interface = true; } else { config.enable_emies_interface = false; } } tmp_node = cfg["jobLogPath"]; if (tmp_node && config.job_log) { std::string fname = tmp_node; config.job_log->SetOutput(fname.c_str()); } tmp_node = cfg["jobReport"]; if (tmp_node && config.job_log) { std::string url = tmp_node["destination"]; if (!url.empty()) { // destination is required config.job_log->SetReporter(url.c_str()); std::string publisher = tmp_node["publisher"]; if (publisher.empty()) publisher = "jura"; config.job_log->SetLogger(publisher.c_str()); unsigned int i; if (Arc::stringto(tmp_node["expiration"], i)) config.job_log->SetExpiration(i); std::string period = tmp_node["Period"]; if (period.empty()) period = REPORTER_PERIOD; unsigned int p; if (!Arc::stringto(period, p)) { logger.msg(Arc::ERROR, "Wrong number in jobreport_period: %s", period); return false; } if (!config.job_log->SetPeriod(p)) { std::string default_value = REPORTER_PERIOD; logger.msg(Arc::ERROR, "Wrong number in jobreport_period: %d, minimal value: %s", p, default_value); return false; } std::string parameters = tmp_node["parameters"]; if (!parameters.empty()) config.job_log->SetOptions(parameters); std::string jobreport_key = tmp_node["KeyPath"]; std::string jobreport_cert = tmp_node["CertificatePath"]; std::string jobreport_cadir = tmp_node["CACertificatesDir"]; config.job_log->SetCredentials(jobreport_key, jobreport_cert, jobreport_cadir); std::string logfile = tmp_node["logfile"]; if (!logfile.empty()) config.job_log->SetLogFile(logfile.c_str()); } } /* loadLimits maxJobsTracked maxJobsRun maxJobsTotal maxJobsPerDN wakeupPeriod */ tmp_node = cfg["loadLimits"]; if (tmp_node) { if (!Arc::Config::elementtoint(tmp_node, "maxJobsTracked", config.max_jobs)) { logger.msg(Arc::ERROR, "Value for maxJobsTracked is incorrect number"); return false; }; if (!Arc::Config::elementtoint(tmp_node, "maxJobsRun", config.max_jobs_running)) { logger.msg(Arc::ERROR, "Value for maxJobsRun is incorrect number"); return false; }; if (!Arc::Config::elementtoint(tmp_node, "maxJobsTotal", config.max_jobs_total)) { logger.msg(Arc::ERROR, "Value for maxJobsTotal is incorrect number"); return false; }; if (!Arc::Config::elementtoint(tmp_node, "maxJobsPerDN", config.max_jobs_per_dn)) { logger.msg(Arc::ERROR, "Value for maxJobsPerDN is incorrect number"); return false; }; if (!Arc::Config::elementtoint(tmp_node, "wakeupPeriod", config.wakeup_period)) { logger.msg(Arc::ERROR, "Value for wakeupPeriod is incorrect number"); return false; }; if (!Arc::Config::elementtoint(tmp_node, "maxScripts", config.max_scripts)) { logger.msg(Arc::ERROR, "Value for maxScripts is incorrect number"); return false; }; } /* serviceMail */ tmp_node = cfg["serviceMail"]; if(tmp_node) { config.support_email_address = (std::string)tmp_node; if (config.support_email_address.empty()) { logger.msg(Arc::ERROR, "serviceMail is empty"); return false; } } /* LRMS type defaultShare */ tmp_node = cfg["LRMS"]; if (tmp_node) { config.default_lrms = (std::string)(tmp_node["type"]); if(config.default_lrms.empty()) { logger.msg(Arc::ERROR,"Type in LRMS is missing"); return false; } config.default_queue = (std::string)(tmp_node["defaultShare"]); CheckLRMSBackends(config.default_lrms); config.rte_dir = (std::string)(tmp_node["runtimeDir"]); // We only want the scratch path as seen on the front-end if (tmp_node["sharedScratch"]) { config.scratch_dir = (std::string)(tmp_node["sharedScratch"]); } else if (tmp_node["scratchDir"]) { config.scratch_dir = (std::string)(tmp_node["scratchDir"]); } } else { logger.msg(Arc::ERROR, "LRMS is missing"); return false; } /* authPlugin (timeout,onSuccess=PASS,FAIL,LOG,onFailure=FAIL,PASS,LOG,onTimeout=FAIL,PASS,LOG) state command */ tmp_node = cfg["authPlugin"]; if (config.cont_plugins) for (; tmp_node; ++tmp_node) { std::string state_name = tmp_node["state"]; if (state_name.empty()) { logger.msg(Arc::ERROR, "State name for authPlugin is missing"); return false; } std::string command = tmp_node["command"]; if (state_name.empty()) { logger.msg(Arc::ERROR, "Command for authPlugin is missing"); return false; } std::string options; Arc::XMLNode onode; onode = tmp_node.Attribute("timeout"); if (onode) options += "timeout="+(std::string)onode+','; onode = tmp_node.Attribute("onSuccess"); if (onode) options += "onsuccess="+Arc::lower((std::string)onode)+','; onode = tmp_node.Attribute("onFailure"); if (onode) options += "onfailure="+Arc::lower((std::string)onode)+','; onode = tmp_node.Attribute("onTimeout"); if (onode) options += "ontimeout="+Arc::lower((std::string)onode)+','; if (!options.empty()) options = options.substr(0, options.length()-1); logger.msg(Arc::DEBUG, "Registering plugin for state %s; options: %s; command: %s", state_name, options, command); if (!config.cont_plugins->add(state_name.c_str(), options.c_str(), command.c_str())) { logger.msg(Arc::ERROR, "Failed to register plugin for state %s", state_name); return false; } } /* localCred (timeout) command */ tmp_node = cfg["localCred"]; if (tmp_node && config.cred_plugin) { std::string command = tmp_node["command"]; if (command.empty()) { logger.msg(Arc::ERROR, "Command for localCred is missing"); return false; } Arc::XMLNode onode; onode = tmp_node.Attribute("timeout"); if (!onode) { logger.msg(Arc::ERROR, "Timeout for localCred is missing"); return false; } int to; if (!Arc::Config::elementtoint(onode, NULL, to)) { logger.msg(Arc::ERROR, "Timeout for localCred is incorrect number"); return false; } *(config.cred_plugin) = command; config.cred_plugin->timeout(to); } /* control username <- not used any more controlDir sessionRootDir cache location path link highWatermark lowWatermark defaultTTL defaultTTR maxReruns noRootPower fixDirectories diskSpace <- not used any more */ tmp_node = cfg["control"]; if (!tmp_node) { logger.msg (Arc::ERROR, "Control element must be present"); return false; } config.control_dir = (std::string)(tmp_node["controlDir"]); if (config.control_dir.empty()) { logger.msg(Arc::ERROR, "controlDir is missing"); return false; } Arc::XMLNode session_node = tmp_node["sessionRootDir"]; for (;session_node; ++session_node) { std::string session_root = std::string(session_node); if (session_root.empty()) { logger.msg(Arc::ERROR,"sessionRootDir is missing"); return false; } if (session_root == "*") { // special value which uses each user's home area session_root = "%H/.jobs"; } config.session_roots.push_back(session_root); bool session_drain = false; if(!Arc::Config::elementtobool(session_node.Attribute("drain"), NULL, session_drain)) { logger.msg(Arc::ERROR, "Attribute drain for sessionRootDir is incorrect boolean"); return false; }; if(!session_drain) config.session_roots_non_draining.push_back(session_root); } GMConfig::fixdir_t fixdir = GMConfig::fixdir_always; const char* fixdir_opts[] = { "yes", "missing", "no", NULL }; int n; if (!Arc::Config::elementtoenum(tmp_node, "fixDirectories", n=(int)fixdir, fixdir_opts)) { logger.msg(Arc::ERROR, "The fixDirectories element is incorrect value"); return false; }; config.fixdir = (GMConfig::fixdir_t)n; GMConfig::deleg_db_t deleg_db = GMConfig::deleg_db_bdb; const char* deleg_db_opts[] = { "bdb", "sqlite", NULL }; if (!Arc::Config::elementtoenum(tmp_node, "delegationDB", n=(int)deleg_db, deleg_db_opts)) { logger.msg(Arc::ERROR, "The delegationDB element is incorrect value"); return false; }; config.deleg_db = (GMConfig::deleg_db_t)n; if (!Arc::Config::elementtoint(tmp_node, "maxReruns", config.reruns)) { logger.msg(Arc::ERROR, "The maxReruns element is incorrect number"); return false; }; if (!Arc::Config::elementtobool(tmp_node, "noRootPower", config.strict_session)) { logger.msg(Arc::ERROR, "The noRootPower element is incorrect number"); return false; }; if (!Arc::Config::elementtoint(tmp_node, "defaultTTL", config.keep_finished)) { logger.msg(Arc::ERROR, "The defaultTTL element is incorrect number"); return false; }; if (!Arc::Config::elementtoint(tmp_node, "defaultTTR", config.keep_deleted)) { logger.msg(Arc::ERROR, "The defaultTTR element is incorrect number"); return false; }; // Get cache parameters try { CacheConfig cache_config(tmp_node); config.cache_params = cache_config; } catch (CacheConfigException& e) { logger.msg(Arc::ERROR, "Error with cache configuration: %s", e.what()); return false; } /* helperLog */ tmp_node = cfg["helperLog"]; if(tmp_node) { config.helper_log = (std::string)tmp_node; helper_log_is_set = true; }; /* helperUtility username command */ std::list helpers; tmp_node = cfg["helperUtility"]; for(; tmp_node; ++tmp_node) { std::string command = tmp_node["command"]; if (command.empty()) { logger.msg(Arc::ERROR, "Command in helperUtility is missing"); return false; } std::string username = tmp_node["username"]; if (username.empty()) { logger.msg(Arc::ERROR, "Username in helperUtility is empty"); return false; } if (username != ".") { logger.msg(Arc::ERROR, "Only user '.' for helper program is supported"); return false; } helpers.push_back(command); } // End of parsing XML node if(!helper_log_is_set) { // Set default config.helper_log = config.control_dir + "/job.helper.errors"; } // Do substitution of control dir and helpers here now we have all the // configuration. These are special because they do not change per-user config.Substitute(config.control_dir); for (std::list::iterator helper = helpers.begin(); helper != helpers.end(); ++helper) { config.Substitute(*helper); config.helpers.push_back(*helper); } // Add helper to poll for finished LRMS jobs std::string cmd = Arc::ArcLocation::GetDataDir() + "/scan-"+config.default_lrms+"-job"; cmd = Arc::escape_chars(cmd, " \\", '\\', false); if (!config.conffile.empty()) cmd += " --config " + config.conffile; cmd += " " + config.control_dir; config.helpers.push_back(cmd); return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315731025223 xustar000000000000000030 mtime=1513200601.939037377 30 atime=1513200649.919624199 29 ctime=1513200662.78278152 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/Makefile.in0000644000175000002070000016175613214315731025312 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pkglibexec_PROGRAMS = gm-kick$(EXEEXT) gm-jobs$(EXEEXT) \ inputcheck$(EXEEXT) arc-vomsac-check$(EXEEXT) \ arc-blahp-logger$(EXEEXT) $(am__EXEEXT_1) subdir = src/services/a-rex/grid-manager DIST_COMMON = README $(dist_pkglibexec_SCRIPTS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/arc-blahp-logger.8.in \ $(srcdir)/arc-config-check.1.in \ $(srcdir)/arc-vomsac-check.8.in \ $(srcdir)/gm-delegations-converter.8.in $(srcdir)/gm-jobs.8.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc-vomsac-check.8 arc-blahp-logger.8 gm-jobs.8 \ gm-delegations-converter.8 arc-config-check.1 CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libgridmanager_la_DEPENDENCIES = jobs/libjobs.la conf/libconf.la \ log/liblog.la files/libfiles.la run/librun.la misc/libmisc.la \ mail/libmail.la $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libgridmanager_la_OBJECTS = libgridmanager_la-GridManager.lo libgridmanager_la_OBJECTS = $(am_libgridmanager_la_OBJECTS) libgridmanager_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libgridmanager_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ @SQLITE_ENABLED_TRUE@am__EXEEXT_1 = gm-delegations-converter$(EXEEXT) am__installdirs = "$(DESTDIR)$(pkglibexecdir)" \ "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(man1dir)" \ "$(DESTDIR)$(man8dir)" PROGRAMS = $(pkglibexec_PROGRAMS) am_arc_blahp_logger_OBJECTS = \ arc_blahp_logger-arc_blahp_logger.$(OBJEXT) arc_blahp_logger_OBJECTS = $(am_arc_blahp_logger_OBJECTS) arc_blahp_logger_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) arc_blahp_logger_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_arc_vomsac_check_OBJECTS = \ arc_vomsac_check-arc_vomsac_check.$(OBJEXT) arc_vomsac_check_OBJECTS = $(am_arc_vomsac_check_OBJECTS) arc_vomsac_check_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) arc_vomsac_check_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(arc_vomsac_check_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_gm_delegations_converter_OBJECTS = \ gm_delegations_converter-gm_delegations_converter.$(OBJEXT) gm_delegations_converter_OBJECTS = \ $(am_gm_delegations_converter_OBJECTS) gm_delegations_converter_DEPENDENCIES = libgridmanager.la \ ../delegation/libdelegation.la gm_delegations_converter_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(gm_delegations_converter_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_gm_jobs_OBJECTS = gm_jobs-gm_jobs.$(OBJEXT) gm_jobs_OBJECTS = $(am_gm_jobs_OBJECTS) gm_jobs_DEPENDENCIES = libgridmanager.la \ ../delegation/libdelegation.la gm_jobs_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_gm_kick_OBJECTS = gm_kick-gm_kick.$(OBJEXT) gm_kick_OBJECTS = $(am_gm_kick_OBJECTS) gm_kick_DEPENDENCIES = libgridmanager.la gm_kick_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(gm_kick_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_inputcheck_OBJECTS = inputcheck-inputcheck.$(OBJEXT) inputcheck_OBJECTS = $(am_inputcheck_OBJECTS) inputcheck_DEPENDENCIES = libgridmanager.la \ ../delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la inputcheck_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(inputcheck_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' SCRIPTS = $(dist_pkglibexec_SCRIPTS) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libgridmanager_la_SOURCES) $(arc_blahp_logger_SOURCES) \ $(arc_vomsac_check_SOURCES) \ $(gm_delegations_converter_SOURCES) $(gm_jobs_SOURCES) \ $(gm_kick_SOURCES) $(inputcheck_SOURCES) DIST_SOURCES = $(libgridmanager_la_SOURCES) \ $(arc_blahp_logger_SOURCES) $(arc_vomsac_check_SOURCES) \ $(gm_delegations_converter_SOURCES) $(gm_jobs_SOURCES) \ $(gm_kick_SOURCES) $(inputcheck_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive man1dir = $(mandir)/man1 man8dir = $(mandir)/man8 NROFF = nroff MANS = $(man_MANS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = jobs run conf misc log mail files loaders jobplugin DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @GRIDFTPD_SERVICE_ENABLED_FALSE@JOBPLUGIN_DIR = @GRIDFTPD_SERVICE_ENABLED_TRUE@JOBPLUGIN_DIR = jobplugin SUBDIRS = jobs run conf misc log mail files loaders $(JOBPLUGIN_DIR) @SQLITE_ENABLED_FALSE@GM_DELEGATIONS_CONVERTER = @SQLITE_ENABLED_TRUE@GM_DELEGATIONS_CONVERTER = gm-delegations-converter @SQLITE_ENABLED_FALSE@GM_DELEGATIONS_CONVERTER_MAN = @SQLITE_ENABLED_TRUE@GM_DELEGATIONS_CONVERTER_MAN = gm-delegations-converter.8 noinst_LTLIBRARIES = libgridmanager.la dist_pkglibexec_SCRIPTS = arc-config-check man_MANS = arc-vomsac-check.8 arc-config-check.1 arc-blahp-logger.8 gm-jobs.8 $(GM_DELEGATIONS_CONVERTER_MAN) libgridmanager_la_SOURCES = GridManager.cpp GridManager.h libgridmanager_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) libgridmanager_la_LIBADD = \ jobs/libjobs.la conf/libconf.la log/liblog.la files/libfiles.la \ run/librun.la misc/libmisc.la mail/libmail.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(DBCXX_LIBS) -lpthread gm_kick_SOURCES = gm_kick.cpp gm_kick_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) gm_kick_LDADD = libgridmanager.la gm_jobs_SOURCES = gm_jobs.cpp gm_jobs_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) gm_jobs_LDADD = libgridmanager.la ../delegation/libdelegation.la gm_delegations_converter_SOURCES = gm_delegations_converter.cpp gm_delegations_converter_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) gm_delegations_converter_LDADD = libgridmanager.la ../delegation/libdelegation.la inputcheck_SOURCES = inputcheck.cpp inputcheck_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) inputcheck_LDADD = libgridmanager.la ../delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arc_vomsac_check_SOURCES = arc_vomsac_check.cpp arc_vomsac_check_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arc_vomsac_check_LDADD = $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) arc_blahp_logger_SOURCES = arc_blahp_logger.cpp arc_blahp_logger_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arc_blahp_logger_LDADD = $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc-vomsac-check.8: $(top_builddir)/config.status $(srcdir)/arc-vomsac-check.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-blahp-logger.8: $(top_builddir)/config.status $(srcdir)/arc-blahp-logger.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ gm-jobs.8: $(top_builddir)/config.status $(srcdir)/gm-jobs.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ gm-delegations-converter.8: $(top_builddir)/config.status $(srcdir)/gm-delegations-converter.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-config-check.1: $(top_builddir)/config.status $(srcdir)/arc-config-check.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libgridmanager.la: $(libgridmanager_la_OBJECTS) $(libgridmanager_la_DEPENDENCIES) $(libgridmanager_la_LINK) $(libgridmanager_la_OBJECTS) $(libgridmanager_la_LIBADD) $(LIBS) install-pkglibexecPROGRAMS: $(pkglibexec_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files clean-pkglibexecPROGRAMS: @list='$(pkglibexec_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arc-blahp-logger$(EXEEXT): $(arc_blahp_logger_OBJECTS) $(arc_blahp_logger_DEPENDENCIES) @rm -f arc-blahp-logger$(EXEEXT) $(arc_blahp_logger_LINK) $(arc_blahp_logger_OBJECTS) $(arc_blahp_logger_LDADD) $(LIBS) arc-vomsac-check$(EXEEXT): $(arc_vomsac_check_OBJECTS) $(arc_vomsac_check_DEPENDENCIES) @rm -f arc-vomsac-check$(EXEEXT) $(arc_vomsac_check_LINK) $(arc_vomsac_check_OBJECTS) $(arc_vomsac_check_LDADD) $(LIBS) gm-delegations-converter$(EXEEXT): $(gm_delegations_converter_OBJECTS) $(gm_delegations_converter_DEPENDENCIES) @rm -f gm-delegations-converter$(EXEEXT) $(gm_delegations_converter_LINK) $(gm_delegations_converter_OBJECTS) $(gm_delegations_converter_LDADD) $(LIBS) gm-jobs$(EXEEXT): $(gm_jobs_OBJECTS) $(gm_jobs_DEPENDENCIES) @rm -f gm-jobs$(EXEEXT) $(gm_jobs_LINK) $(gm_jobs_OBJECTS) $(gm_jobs_LDADD) $(LIBS) gm-kick$(EXEEXT): $(gm_kick_OBJECTS) $(gm_kick_DEPENDENCIES) @rm -f gm-kick$(EXEEXT) $(gm_kick_LINK) $(gm_kick_OBJECTS) $(gm_kick_LDADD) $(LIBS) inputcheck$(EXEEXT): $(inputcheck_OBJECTS) $(inputcheck_DEPENDENCIES) @rm -f inputcheck$(EXEEXT) $(inputcheck_LINK) $(inputcheck_OBJECTS) $(inputcheck_LDADD) $(LIBS) install-dist_pkglibexecSCRIPTS: $(dist_pkglibexec_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(dist_pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_pkglibexecSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_vomsac_check-arc_vomsac_check.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gm_delegations_converter-gm_delegations_converter.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gm_jobs-gm_jobs.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gm_kick-gm_kick.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/inputcheck-inputcheck.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgridmanager_la-GridManager.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libgridmanager_la-GridManager.lo: GridManager.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridmanager_la_CXXFLAGS) $(CXXFLAGS) -MT libgridmanager_la-GridManager.lo -MD -MP -MF $(DEPDIR)/libgridmanager_la-GridManager.Tpo -c -o libgridmanager_la-GridManager.lo `test -f 'GridManager.cpp' || echo '$(srcdir)/'`GridManager.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libgridmanager_la-GridManager.Tpo $(DEPDIR)/libgridmanager_la-GridManager.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GridManager.cpp' object='libgridmanager_la-GridManager.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridmanager_la_CXXFLAGS) $(CXXFLAGS) -c -o libgridmanager_la-GridManager.lo `test -f 'GridManager.cpp' || echo '$(srcdir)/'`GridManager.cpp arc_blahp_logger-arc_blahp_logger.o: arc_blahp_logger.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) -MT arc_blahp_logger-arc_blahp_logger.o -MD -MP -MF $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Tpo -c -o arc_blahp_logger-arc_blahp_logger.o `test -f 'arc_blahp_logger.cpp' || echo '$(srcdir)/'`arc_blahp_logger.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Tpo $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_blahp_logger.cpp' object='arc_blahp_logger-arc_blahp_logger.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) -c -o arc_blahp_logger-arc_blahp_logger.o `test -f 'arc_blahp_logger.cpp' || echo '$(srcdir)/'`arc_blahp_logger.cpp arc_blahp_logger-arc_blahp_logger.obj: arc_blahp_logger.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) -MT arc_blahp_logger-arc_blahp_logger.obj -MD -MP -MF $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Tpo -c -o arc_blahp_logger-arc_blahp_logger.obj `if test -f 'arc_blahp_logger.cpp'; then $(CYGPATH_W) 'arc_blahp_logger.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_blahp_logger.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Tpo $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_blahp_logger.cpp' object='arc_blahp_logger-arc_blahp_logger.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) -c -o arc_blahp_logger-arc_blahp_logger.obj `if test -f 'arc_blahp_logger.cpp'; then $(CYGPATH_W) 'arc_blahp_logger.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_blahp_logger.cpp'; fi` arc_vomsac_check-arc_vomsac_check.o: arc_vomsac_check.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_vomsac_check_CXXFLAGS) $(CXXFLAGS) -MT arc_vomsac_check-arc_vomsac_check.o -MD -MP -MF $(DEPDIR)/arc_vomsac_check-arc_vomsac_check.Tpo -c -o arc_vomsac_check-arc_vomsac_check.o `test -f 'arc_vomsac_check.cpp' || echo '$(srcdir)/'`arc_vomsac_check.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_vomsac_check-arc_vomsac_check.Tpo $(DEPDIR)/arc_vomsac_check-arc_vomsac_check.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_vomsac_check.cpp' object='arc_vomsac_check-arc_vomsac_check.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_vomsac_check_CXXFLAGS) $(CXXFLAGS) -c -o arc_vomsac_check-arc_vomsac_check.o `test -f 'arc_vomsac_check.cpp' || echo '$(srcdir)/'`arc_vomsac_check.cpp arc_vomsac_check-arc_vomsac_check.obj: arc_vomsac_check.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_vomsac_check_CXXFLAGS) $(CXXFLAGS) -MT arc_vomsac_check-arc_vomsac_check.obj -MD -MP -MF $(DEPDIR)/arc_vomsac_check-arc_vomsac_check.Tpo -c -o arc_vomsac_check-arc_vomsac_check.obj `if test -f 'arc_vomsac_check.cpp'; then $(CYGPATH_W) 'arc_vomsac_check.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_vomsac_check.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_vomsac_check-arc_vomsac_check.Tpo $(DEPDIR)/arc_vomsac_check-arc_vomsac_check.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_vomsac_check.cpp' object='arc_vomsac_check-arc_vomsac_check.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_vomsac_check_CXXFLAGS) $(CXXFLAGS) -c -o arc_vomsac_check-arc_vomsac_check.obj `if test -f 'arc_vomsac_check.cpp'; then $(CYGPATH_W) 'arc_vomsac_check.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_vomsac_check.cpp'; fi` gm_delegations_converter-gm_delegations_converter.o: gm_delegations_converter.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_delegations_converter_CXXFLAGS) $(CXXFLAGS) -MT gm_delegations_converter-gm_delegations_converter.o -MD -MP -MF $(DEPDIR)/gm_delegations_converter-gm_delegations_converter.Tpo -c -o gm_delegations_converter-gm_delegations_converter.o `test -f 'gm_delegations_converter.cpp' || echo '$(srcdir)/'`gm_delegations_converter.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gm_delegations_converter-gm_delegations_converter.Tpo $(DEPDIR)/gm_delegations_converter-gm_delegations_converter.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gm_delegations_converter.cpp' object='gm_delegations_converter-gm_delegations_converter.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_delegations_converter_CXXFLAGS) $(CXXFLAGS) -c -o gm_delegations_converter-gm_delegations_converter.o `test -f 'gm_delegations_converter.cpp' || echo '$(srcdir)/'`gm_delegations_converter.cpp gm_delegations_converter-gm_delegations_converter.obj: gm_delegations_converter.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_delegations_converter_CXXFLAGS) $(CXXFLAGS) -MT gm_delegations_converter-gm_delegations_converter.obj -MD -MP -MF $(DEPDIR)/gm_delegations_converter-gm_delegations_converter.Tpo -c -o gm_delegations_converter-gm_delegations_converter.obj `if test -f 'gm_delegations_converter.cpp'; then $(CYGPATH_W) 'gm_delegations_converter.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_delegations_converter.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gm_delegations_converter-gm_delegations_converter.Tpo $(DEPDIR)/gm_delegations_converter-gm_delegations_converter.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gm_delegations_converter.cpp' object='gm_delegations_converter-gm_delegations_converter.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_delegations_converter_CXXFLAGS) $(CXXFLAGS) -c -o gm_delegations_converter-gm_delegations_converter.obj `if test -f 'gm_delegations_converter.cpp'; then $(CYGPATH_W) 'gm_delegations_converter.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_delegations_converter.cpp'; fi` gm_jobs-gm_jobs.o: gm_jobs.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) -MT gm_jobs-gm_jobs.o -MD -MP -MF $(DEPDIR)/gm_jobs-gm_jobs.Tpo -c -o gm_jobs-gm_jobs.o `test -f 'gm_jobs.cpp' || echo '$(srcdir)/'`gm_jobs.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gm_jobs-gm_jobs.Tpo $(DEPDIR)/gm_jobs-gm_jobs.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gm_jobs.cpp' object='gm_jobs-gm_jobs.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) -c -o gm_jobs-gm_jobs.o `test -f 'gm_jobs.cpp' || echo '$(srcdir)/'`gm_jobs.cpp gm_jobs-gm_jobs.obj: gm_jobs.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) -MT gm_jobs-gm_jobs.obj -MD -MP -MF $(DEPDIR)/gm_jobs-gm_jobs.Tpo -c -o gm_jobs-gm_jobs.obj `if test -f 'gm_jobs.cpp'; then $(CYGPATH_W) 'gm_jobs.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_jobs.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gm_jobs-gm_jobs.Tpo $(DEPDIR)/gm_jobs-gm_jobs.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gm_jobs.cpp' object='gm_jobs-gm_jobs.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) -c -o gm_jobs-gm_jobs.obj `if test -f 'gm_jobs.cpp'; then $(CYGPATH_W) 'gm_jobs.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_jobs.cpp'; fi` gm_kick-gm_kick.o: gm_kick.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_kick_CXXFLAGS) $(CXXFLAGS) -MT gm_kick-gm_kick.o -MD -MP -MF $(DEPDIR)/gm_kick-gm_kick.Tpo -c -o gm_kick-gm_kick.o `test -f 'gm_kick.cpp' || echo '$(srcdir)/'`gm_kick.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gm_kick-gm_kick.Tpo $(DEPDIR)/gm_kick-gm_kick.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gm_kick.cpp' object='gm_kick-gm_kick.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_kick_CXXFLAGS) $(CXXFLAGS) -c -o gm_kick-gm_kick.o `test -f 'gm_kick.cpp' || echo '$(srcdir)/'`gm_kick.cpp gm_kick-gm_kick.obj: gm_kick.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_kick_CXXFLAGS) $(CXXFLAGS) -MT gm_kick-gm_kick.obj -MD -MP -MF $(DEPDIR)/gm_kick-gm_kick.Tpo -c -o gm_kick-gm_kick.obj `if test -f 'gm_kick.cpp'; then $(CYGPATH_W) 'gm_kick.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_kick.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gm_kick-gm_kick.Tpo $(DEPDIR)/gm_kick-gm_kick.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gm_kick.cpp' object='gm_kick-gm_kick.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_kick_CXXFLAGS) $(CXXFLAGS) -c -o gm_kick-gm_kick.obj `if test -f 'gm_kick.cpp'; then $(CYGPATH_W) 'gm_kick.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_kick.cpp'; fi` inputcheck-inputcheck.o: inputcheck.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(inputcheck_CXXFLAGS) $(CXXFLAGS) -MT inputcheck-inputcheck.o -MD -MP -MF $(DEPDIR)/inputcheck-inputcheck.Tpo -c -o inputcheck-inputcheck.o `test -f 'inputcheck.cpp' || echo '$(srcdir)/'`inputcheck.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/inputcheck-inputcheck.Tpo $(DEPDIR)/inputcheck-inputcheck.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='inputcheck.cpp' object='inputcheck-inputcheck.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(inputcheck_CXXFLAGS) $(CXXFLAGS) -c -o inputcheck-inputcheck.o `test -f 'inputcheck.cpp' || echo '$(srcdir)/'`inputcheck.cpp inputcheck-inputcheck.obj: inputcheck.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(inputcheck_CXXFLAGS) $(CXXFLAGS) -MT inputcheck-inputcheck.obj -MD -MP -MF $(DEPDIR)/inputcheck-inputcheck.Tpo -c -o inputcheck-inputcheck.obj `if test -f 'inputcheck.cpp'; then $(CYGPATH_W) 'inputcheck.cpp'; else $(CYGPATH_W) '$(srcdir)/inputcheck.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/inputcheck-inputcheck.Tpo $(DEPDIR)/inputcheck-inputcheck.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='inputcheck.cpp' object='inputcheck-inputcheck.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(inputcheck_CXXFLAGS) $(CXXFLAGS) -c -o inputcheck-inputcheck.obj `if test -f 'inputcheck.cpp'; then $(CYGPATH_W) 'inputcheck.cpp'; else $(CYGPATH_W) '$(srcdir)/inputcheck.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } install-man8: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man8dir)" || $(MKDIR_P) "$(DESTDIR)$(man8dir)" @list=''; test -n "$(man8dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ done; } uninstall-man8: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man8dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man8dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man8dir)" && rm -f $$files; } # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(SCRIPTS) $(MANS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man8dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-pkglibexecPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-man install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-dist_pkglibexecSCRIPTS \ install-pkglibexecPROGRAMS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man1 install-man8 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_pkglibexecSCRIPTS uninstall-man \ uninstall-pkglibexecPROGRAMS uninstall-man: uninstall-man1 uninstall-man8 .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES clean-pkglibexecPROGRAMS ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dist_pkglibexecSCRIPTS install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-man8 install-pdf install-pdf-am \ install-pkglibexecPROGRAMS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-dist_pkglibexecSCRIPTS \ uninstall-man uninstall-man1 uninstall-man8 \ uninstall-pkglibexecPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/arc-config-check0000644000000000000000000000012412733560740026175 xustar000000000000000027 mtime=1466884576.105098 27 atime=1513200576.085721 30 ctime=1513200662.779781484 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/arc-config-check0000755000175000002070000005513412733560740026255 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w =head1 NAME arc-config-check - checks the arc.conf for inconsistencies, known problems or (in a future development) just general bad taste. =head1 SYNOPSIS arc-config-check --printall =head1 DESCRIPTION The motivation behind this little script was to have a repository for automated tests on issues that came up on the NorduGrid developers mailing list. As such this script indicates directories that are not present, checks host certificates, CA certificates and CRLs, validates the sanity of ARC configuration and tests for clock skew. BECAUSE EVERY INSTALLATION OF ARC IS DIFFERENT THIS UTILITY ONLY SUGGESTS WHAT COULD BE WRONG. SOMETIMES IT IS OVERRESTRICTIVE. AND SOMETIMES IT CAN MISS SOME MISCONFIGURATION. NEVER TREAT RESULTS PRODUCED BY IT AS ULTIMATE TRUTH. =head1 OPTIONS =over 4 =item --config Specifies the location of the config file, by default it is /etc/arc.conf =item --printall Lists all variable names of the config file together with their values. =item --timeserver Allows the specification of a server against which to test the local system's time. =item --skip-warnings Do not show warnings. =item --help Quick summary of options, =item --man Detailed man page. =item --openssl-path Path to openssl binary, determined with 'which openssl' if not set. =back =cut #################### P R E A M B E L and options parsing #################### use strict; use warnings; use Getopt::Long; my ($conffile,$printall,$skip_warnings,$help,$man)=("/etc/arc.conf",0,0,0,0); #Please make sure this reference server is not one you administer yourself.\n"; my $timeserver="europe.pool.ntp.org"; my $globusloc = $ENV{"GLOBUS_LOCATION"}; my $arcloc = undef; if (defined $ENV{"ARC_LOCATION"}) { $arcloc = $ENV{"ARC_LOCATION"}; } else { $arcloc = "/usr"; } my $OS = `uname`; chomp $OS; my $usercert; my $hostcert; my $CApath; my $opensslpath; my $verbose=0; my $debug=0; GetOptions( "config:s" => \$conffile, "printall" => \$printall, "skip-warnings" => \$skip_warnings, "timeserver:s" => \$timeserver, "openssl-path:s" => \$opensslpath, "verbose" => \$verbose, "debug" => \$debug, "help" => \$help, "man" => \$man ) or die "Could not parse options.\n"; if ( $man or $help ) { # Load Pod::Usage only if needed. require "Pod/Usage.pm"; import Pod::Usage; pod2usage(1) if $help; pod2usage(VERBOSE => 2) if $man; } # key counters my $warnings=0; my $errors=0; $verbose=1 if $debug; ########## S E E D A S S U M P T I O N S F O R R U L E S ############ # Some bits of the configuration are dynamic and may cross-reference other # bits of the configuration. This hash shall keep track of these. my %introducedSections = ( "group" => [], "gridftpd" => [], "queue" => [], "infosys/cluster/registration" => [], "infosys/index" => [], "infosys/se" => [], "infosys/site" => [], "janitor" => [], "queue" => [], "se" => [] ); print STDERR "The following sections have configurable subgroups: " . join(", ",keys %introducedSections)."\n" if $verbose; ################ U T I L R O U T I N E S ################################# $opensslpath=`which openssl | tr -d '\n'` unless defined($opensslpath); # prints and counts a warning sub w($) { my $s = shift; if (!$skip_warnings) { print STDERR "W: $s"; $warnings++; } } # prints and counts an error sub e($) { my $s = shift; print STDERR "E: $s"; $errors++; } sub v($) { return unless $verbose or $debug; my $s = shift; print STDERR "$s"; } ####################### C H E C K S ####################################### =head1 PERFORMED TESTS =over 4 =item timecheck The current time is compared with an external time server. A clock shift higher than a maximally allowed maxtimediff results in an error. =cut sub timecheck($$$) { my ($timeserver, $maxtimediff, $maxtimediffwarn) = @_; my $timeoffset = undef; my $ntpdate = "/usr/sbin/ntpdate"; unless ( -x $ntpdate ) { w("Could not find location of 'ntpdate'.\n"); return; } unless (open(NTPDATE, "$ntpdate -q $timeserver |")) { w("Could not properly invoke 'ntpdate'.\n"); return; } while () { next unless m/^server/; if (m/offset *[-+]?([0-9]*\.[0-9]*)/) { $timeoffset = $1; } } close NTPDATE; if (defined $timeoffset) { if (abs($timeoffset)>=$maxtimediff) { e("Timecheck: Your time differs by more than " . "$maxtimediff seconds ($timeoffset seconds) from the " . "public time server '$timeserver'\n"); } elsif (abs($timeoffset)>=$maxtimediffwarn) { w("Timecheck: Your time differs slightly " . "($timeoffset seconds) from the public time " . "server '$timeserver'.\n"); } } else { w("Timecheck: Can't check the time\n"); } } =item check of permissions The permission to access several different directories are checked. The first argument is the file to be checked. The second is the permission that is should have. The third is a binary mask that selects the bits that are to be inspected. =cut sub permcheck($$$) { my ($filename, $p, $mask) = @_; my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, $atime,$mtime,$ctime,$blksize,$blocks) = stat($filename); $mode &= 07777; printf "$filename: mode %04o to be compared with %04o at mask %04o\n",$mode,$p,$mask if $debug; $mode &= $mask; return ($p == $mode); } # this performs simple stateless checks of configuration entries sub confchecktripel($$$) { my ($block, $name, $value) = @_; printf STDOUT "Checking in block %s, name %s, value %s\n",$block,$name,$value if ($verbose); # check the certificate if ($block eq "common" and $name eq "x509_user_cert") { if (! -e $value) { e("The host certificate '$value' does not exist or is unreadable.\n"); } elsif (! -O $value) { e("The host certificate '$value' is not owned by this user.\n"); } elsif (!permcheck($value,0644,0777) and !permcheck($value,0444,0777)) { e("Permission of '$value' must be 'rw-r--r--' or 'r--r--r--'\n"); } else { $hostcert=$value; } } # check the key elsif ($block eq "common" and $name eq "x509_user_key") { if (! -e $value) { e("The host key '$value' does not exist or is unreadable.\n"); } elsif (! -O $value) { e("The host key '$value' is not owned by this user.\n"); } elsif (!permcheck($value,0400,0777)) { e("Permission of '$value' must be 'r--------'\n"); } } # check the certificate direcotry elsif ($block eq "common" and $name eq "x509_cert_dir") { if (! -d $value) { e("$name: The certificate directory does not exist.\n"); } else { my @r0s=glob($value."/*.r0"); if ($#r0s == -1) { w("$name: There are no certificate revocation lists.\n") unless $skip_warnings; } else { require File::stat; my $t=time(); my $maxdiffsecs=60*60*24*2; # two days foreach my $r0 (@r0s) { my ($dev,$ino,$mode,$nlink, $uid,$gid,$rdev,$size, $atime,$mtime,$ctime, $blksize,$blocks) = stat($r0); if ($t < $mtime ) { e("$r0: mtime in future\n"); } elsif ($t > $mtime + $maxdiffsecs) { w("$r0: Older than $maxdiffsecs seconds. rerun fetch-crl\n"); } } } $CApath=$value; } } # check the cache directory elsif ($block eq "grid-manager" and $name eq "cachedir") { my @dirs=split(/\s+/,$value); # If available, second dir is on workernode my $d = $dirs[0]; if (! -d $d) { e("cachedir: not existing at '$d'\n"); } } # check the session directory elsif ($block eq "grid-manager" and $name eq "sessiondir") { my @dirs=split(/\s+/,$value); my $dpath = $dirs[0]; if (! -d $dpath) { e("sessiondir: not existing at '$dpath'\n"); } else { v("$name exists."); } if (scalar @dirs > 1) { # If multivalued second should be the drain instruction my $drain = $dirs[1]; if ($drain ne "drain") { e("Sessiondir unknown instruction: '$drain'\n"); } else { w("Sessiondir $dpath is set to drain. Jobs will not be accepted if this is the only sessiondir.\n"); } } } # check the controldir elsif ($block eq "grid-manager" and $name eq "controldir") { if (! -d $value) { e("$value: control directory ($name) does not exist\n"); } elsif (!permcheck($value,0755,0777)) { e("$value: directory ($name) should be 755\n"); } } # check all remaining directory entries of the grid-manager block for existence elsif ($block eq "grid-manager" and $name =~ m/dir$/) { if (! -d $value) { e("$value: directory ($name) does not exist\n"); } else { v("$name exists."); } if ($name =~ /^(tmpdir)$/) { if (! permcheck($value,0777,0777)) { e("The tmpdir directory must be writable and have the sticky bit set (chmod +t \"$name\")\n"); } } } # Check for logrotate paths if ($block =~ /common|grid-manager|gridftp/ and $name eq 'logfile') {check_logrotate($block,$name,$value)}; # check for any mount dirs if ($name eq "mount") { if (! -d "$value") { e("$value: directory ($name) does not exist\n"); } else { v("$name exists."); } } # check few infosys commands if ($block eq "infosys") { if ($name eq "slapd_loglevel" and $value gt 0) { w("slapd_loglevel > 0 should only be used for development. DO NOT USE for production and testing.\nConfigure bdii_debug_level=\"DEBUG\" instead.\n"); } if ($name ne "bdii_debug_level" and $name =~ /^bdii/ ) { e("Starting from release 13.02, BDII configuration commands are strongly deprecated. Use only for Development.\nThe only allowed command is bdii_debug_level\n"); } # check logs locations for logrotation if ($name =~ /providerlog|registrationlog/) {check_logrotate($block,$name,$value)}; } # check compliance of AdminDomain name as a LDAP DNs # TODO: add to startup script the capability of escaping special # chars. if ($block eq "infosys/admindomain") { if ($name eq "name") { w("Warning: $block $name not configured. Default AdminDomain name is UNDEFINEDVALUE.\n") if $value eq ''; # bad symbols from RFC 4514 my @syntaxerrors; push (@syntaxerrors, ("\n\tThe following unicode chars are not allowed:", $&)) while ( $value =~ /\x{0022}|\x{002B}|\x{002C}|\x{003B}|\x{003C}|\x{003E}|\x{005C}/g ); push (@syntaxerrors, ("\n\tBlank space or \"#\" symbol at the beginning of name are not allowed")) if ( $value =~ /^#|^\s/ ); push (@syntaxerrors, ("\n\tBlank space at the end of name is not allowed")) if ( $value =~ /\s$/ ); push (@syntaxerrors, ("\n\tNull character unicode (U+0000) not allowed")) if ( $value =~ /\x{0000}/ ); e("$block option $name contains the following errors: @syntaxerrors \n\tLDAP Infosys will not start. \n\tPlease change AdminDomain name accordingly *\n") if @syntaxerrors; } } } # Checks log locations for standard location sub check_logrotate($$$$) { my ($block, $name, $value, $defaultlocation) = @_; my $defaultpath = '/var/log/arc/'; my $defaultlocations = { 'commonlogfile' => $defaultpath.'gridftpd.log', 'grid-managerlogfile' => $defaultpath.'grid-manager.log', 'gridftpdlogfile' => $defaultpath.'gridftpd.log', 'infosysproviderlog' => $defaultpath.'infoprovider.log', 'infosysregistrationlog' => $defaultpath.'inforegistration.log' }; $defaultlocation = $defaultlocations->{$block.$name} || $defaultpath; if ($value ne $defaultlocation ) { w("[$block] $name: $value is not in the default location ($defaultlocation). Logrotation will not work, check your system logrotation configuration files\n"); if ( $block =~ /common|grid-manager|gridftp/ ) { w("[$block] supports independent logrotation. Check ARC CE System Administrator manual to configure. Disable system logrotate configuration for it if you do this!\n"); } } } # Get all valid configuration parameters and blocks from arc.conf.reference. # Dynamic blocks which match those in %introducedSections are not included. sub validconfparams() { my @validparams; my @validblocks; unless (open (ARCREF, "<$arcloc/share/arc/examples/arc.conf.reference")) { w("Could not open arc.conf.reference. No arc.conf parameter checking will be done\n"); return ( \@validparams, \@validblocks ); } while (my $line = ) { if ($line =~ m/^#(\w*)\s*=(.*)$/) { # match #parameter="value" my $name = $1; # take out spaces and quotes $name =~ s/^\s*//; $name =~ s/\s*$//; $name =~ s/^"(.*)"$/$1/; $name =~ s/^'(.*)'$/$1/; if (!grep(/^$name$/, @validparams)) { push(@validparams, $name); } } elsif ($line =~ m|^#\[(.*)\]$|) { # match #[section] my $blockname = $1; my $dynamic = 0; foreach my $k (keys %introducedSections) { if ($blockname =~ m/^$k.*/) { $dynamic = 1; } } push(@validblocks, $blockname) if !$dynamic; } } return ( \@validparams, \@validblocks ); } =item configuration check General checks on the sensibility of the arc.conf =cut sub confcheck($) { my ($arcconf) = @_; my $config = {}; unless (open (CONFIGFILE, "<$conffile")) { e("Could not open '$arcconf' for reading.\n"); return; } # parameters which are allowed more than once # taken from arc.conf.reference - somehow this needs to be kept in sync my @multiple = ("voms_trust_chain", "source", "filter", "subject", "file", "voms", "sessiondir", "cachedir", "remotecachedir", "jobreport", "share_limit", "authplugin", "allowsubmit", "copyurl", "linkurl", "definedshare", "deliveryservice", "unixmap", "unixgroup", "unixvo", "dir", "remotegmdirs", "allowreg", "cluster_owner", "authorizedvo", "clustersupport", "opsys", "benchmark", "nodeaccess", "localse", "cacheserver", "cacheaccess"); # Blocks which are allowed to be empty my @emptyblocks = ("gangliarc"); # valid parameter names and blocks my ($validparams, $validblocks) = validconfparams(); my $blockname = undef; my $blockcontents = 0; my $c = 0; my $vo_counter = 0; while (my $line = ) { if ($line =~ m/^<\?xml version=.*\?>$/) { w("XML configuration can not yet be validated\n"); return; } $c++; next if $line =~ m/^#/; next if $line =~ m/^\s*$/; # a new block? if ($line =~ m/^\s*\[(.+)\]\s*$/) { if (defined $blockname and $blockcontents == 0 and !grep(/^$blockname$/, @emptyblocks)) { e("$arcconf: Block \"$blockname\" is empty\n"); } $blockname = $1; $blockcontents = 0; # blocknames must be unique # but there is a special case of vo-blocks... if ($blockname eq "vo") { $blockname .= "|" . ++$vo_counter; } if (defined $config->{$blockname}) { if (!$skip_warnings) { w("$arcconf:$c: Block '" . $blockname ."' is defined multiple times\n"); } $warnings++; } $config->{$blockname}{">]|found|[<"} = $c; next; } my $name; my $value; # look out for crap unless ($line =~ m/^([^=]*)=(.*)$/) { e("$arcconf: $c: Line is erroneous!\n"); next; } $name = $1; $value = $2; $name =~ s/^\s*//; $name =~ s/\s*$//; $name =~ s/^"(.*)"$/$1/; $name =~ s/^'(.*)'$/$1/; $value =~ s/^\s*//; $value =~ s/\s*$//; $value =~ s/^"(.*)"$/$1/; $value =~ s/^'(.*)'$/$1/; if ($name =~ m/^"/ and $name !~ m/"$/ or $name =~ m/^'/ and $name !~ m/'$/ or $name !~ m/^"/ and $name =~ m/"$/ or $name !~ m/^'/ and $name =~ m/'$/) { w("$arcconf: $c: badly quoted attribute name?\n"); } if ($value =~ m/^"/ and $value !~ m/"$/ or $value =~ m/^'/ and $value !~ m/'$/ or $value !~ m/^"/ and $value =~ m/"$/ or $value !~ m/^'/ and $value =~ m/'$/) { w("$arcconf: $c: badly quoted value?\n"); $warnings++; } # are we within a block? unless (defined $blockname) { e("$arcconf:$c: found value=name pair which is " . "not part of a block\n"); next; } # check we have a valid parameter name if (@$validparams != 0 and !grep(/^$name$/, @$validparams)) { w("$arcconf:$c: found unknown parameter $name\n"); next; } # check if we know more about this kind of tripel confchecktripel($blockname, $name, $value); #count $blockcontents++; if ($config->{$blockname}{$name}) { if (grep(/^$name$/, @multiple)) { $config->{$blockname}{$name} .= ">]|sep|[<" . $value; } else { w("$arcconf:$c: duplicate parameter: ".$name."\n"); } } else { $config->{$blockname}{$name} = $value; } } close CONFIGFILE; check_completeness($config); if ($printall) { foreach my $key (sort { $config->{$a}{">]|found|[<"} <=> $config->{$b}{">]|found|[<"} } keys %$config) { printf "\n# line: %d\n", $config->{$key}{">]|found|[<"}; if ($key =~ m/^(.*)\|[0-9]+$/) { printf "[%s]\n", $1; } else { printf "[%s]\n", $key; } my $x = $config->{$key}; foreach my $item (sort keys %$x) { next if $item eq ">]|found|[<"; foreach my $val (split />\]\|sep\|\[{$key}{$item}) { printf "%s=\"%s\"\n", $item, $val; } } } } } =item check for completeness Return error if the presence of one value implies one of another =cut sub check_completeness() { my $config=shift; my @required=("common", "grid-manager", "infosys", "cluster"); my ($validparams, $validblocks) = validconfparams(); # testing for unknown foreach my $k (keys %$config) { next if grep(/^$k$/,@$validblocks); my @secs; map { push(@secs,$_) if $k=~/^$_/;} keys %introducedSections; print STDERR "$k -> " . join(":",@secs)."\n" if $debug; if (0 == @secs) { unless ($skip_warnings) { w("Unknown group identifier '$k'\n"); } next; } elsif (1 < @secs) { die "Programming error: found multiple sections " .join(",",@secs)." to be prefixes of secion '$k'.\n"; } my $secs1 = $secs[0]; unless (exists($introducedSections{$secs1})) { die "Programming error: section '$secs1' not amongs keys of hash %introducedSections: " . join(",",keys %introducedSections)."\n"; } my $listref=$introducedSections{$secs1}; push @$listref,$k; } # testing for the missing foreach my $k (@required) { unless (exists($config->{$k})) { e("Missing group identifier '$k'\n"); } } # hostname my $hn=`hostname -f`; chomp($hn); my $hn_conf; if (exists($config->{common}{hostname})) { $hn_conf = $config->{common}{hostname}; } elsif (exists($config->{cluster}{hostname})) { $hn_conf = $config->{cluster}{hostname}; } if ($hn_conf) { if ($hn_conf ne "$hn") { w("The entry of the full hostname (".$hn_conf . ") is better\n" ." equal to `hostname -f` ($hn).\n" ." Also test reverse lookup of the hostname.\n"); } } else { e("The entry hostname must not be missed.\n"); } # lrms if (!exists($config->{common}{lrms}) && !exists($config->{"grid-manager"}{lrms})) { e("The entry lrms must not be missed.\n"); } } =item check of applications Some applications are required for the server to be functional. =cut sub check_applications() { unless ( -x "/usr/bin/time" ) { e("Could not find GNU time utility (or a substitute) at /usr/bin/time."); } } =item check of libraries uses ldd to check if all libraries are installed =cut sub check_libraries() { my $prob=0; if (! defined($globusloc)) { v("check_libraries: interpreting undefined GLOBUS_LOCATION as /usr\n"); $globusloc="/usr"; } unless ( -e $globusloc) { e("Cannot find Globus at $globusloc: no such file or directory\n"); $prob++; } if (defined($arcloc)) { unless (-e $arcloc) { e("Cannot find ARC at $arcloc: no such file or directory\n"); if (defined $ENV{"ARC_LOCATION"}) { w("The location was retrieved from the environment variable 'ARC_LOCATION'. Maybe this needs an update."); } $prob++; } } return if ($prob); my @globmes = ( $arcloc . "/bin/arc*", $arcloc . "/libexec/arc/gm*", $arcloc . "/sbin/arc*", $arcloc . "/sbin/grid*", ); push(@globmes, $arcloc . "/lib/*") if "/usr" ne "$arcloc"; # takes too long, little gain expected my @to_check; foreach my $d (@globmes) { @to_check = ( @to_check , glob $d ); } if ($verbose) { print "Checking the following files for their dependencies:\n"; print join("\n",@to_check); print "\n"; } my %missing; foreach my $file ( @to_check ) { next unless -f $file; next if $file =~ m/\.a$/; next if $file =~ m/\.la$/; my $command = "LC_ALL=C "; $command .= "LD_LIBRARY_PATH="; $command .= "$arcloc/lib:" if "/usr" ne "$arcloc" and "/usr/" ne "$arcloc"; $command .= "\$LD_LIBRARY_PATH "; $command .= "ldd $file 2>/dev/null |"; my %libs; if (open LDD, $command) { while () { chomp; my $lib = $_; if ($lib =~ m/^\s*([^\s]+)\.so\.([^\s]+)\s*=>/) { my $a=$1; my $b=$2; my $index=$a; $index =~ s/_(gcc)(16|32|64|128|256)(dbg)?(pthr)?\././; if (defined $libs{$index}) { e("$file: uses multiple versions of lib " . "$a: ".$libs{$index}." and $b. This might not work\n"); } else { $libs{$index} = $b; } } next unless /not found/; $lib =~ m/^\s*([^\s]+)\s*=>/; my $missing = $1; unless (defined $missing{$missing}) { $missing{$missing} = 1; e("$file: needs $missing. Not found.\n"); } } close LDD; } else { if (!$skip_warnings) { w("Cannot check used libraries of $file.\n"); } } } } sub check_certificates() { # check if CAdir is present if (!defined($CApath)) { e("The x509_cert_dir was not set.\n"); $CApath="/etc/grid-security/certificates"; if ( ! -d $CApath) { return; } } elsif ( ! -d "$CApath") { e("CApath does not exist at '$CApath"); return; } unless (defined($opensslpath) and "" ne "$opensslpath") { w("openssl application not in path and not specified.\n"); return; } unless ( -x "$opensslpath") { e("Cannot execute openssl application at '$opensslpath'"); } # check of host certificate if (!defined($hostcert)) { $hostcert="/etc/grid-security/hostcert.pem"; } if ( -f $hostcert) { if (system("$opensslpath verify -CApath $CApath $hostcert | grep 'OK'")) { e("Verification of host cert at $hostcert with $opensslpath failed.\n"); } if (!system("$opensslpath verify -CApath $CApath $hostcert | grep 'expired'")) { e("Host certificate $hostcert has expired.\n"); } } else { if (!$skip_warnings) { w("Not verifying host cert which is not present at $hostcert (should already be reported).\n"); } } } timecheck($timeserver, 0.2, 0.01); confcheck($conffile); check_applications(); check_libraries(); check_certificates(); if (0 == $errors) { print "Found no apparent failures.\n"; } else { printf "Found %d failure%s.\n", $errors, ($errors > 1) ? "s" : ""; } if ($warnings) { printf "Found %d non-critical issue%s%s.\n", $warnings, ($warnings > 1) ? "s" : "", ($skip_warnings?" (not shown)":""); } exit $errors; =back =head1 SEE ALSO http://www.nordugrid.org and our mailing lists. =cut # EOF nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/GridManager.h0000644000000000000000000000012212046742570025516 xustar000000000000000027 mtime=1352385912.462662 26 atime=1513200576.01172 29 ctime=1513200662.79178163 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/GridManager.h0000644000175000002070000000122012046742570025560 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_H #define GRID_MANAGER_H #include namespace ARex { class DTRGenerator; class CommFIFO; class GMConfig; class sleep_st; class GridManager { private: Arc::SimpleCounter active_; bool tostop_; Arc::SimpleCondition* sleep_cond_; CommFIFO* wakeup_interface_; GMConfig& config_; sleep_st* wakeup_; DTRGenerator* dtr_generator_; GridManager(); GridManager(const GridManager&); static void grid_manager(void* arg); bool thread(void); public: GridManager(GMConfig& config); ~GridManager(void); operator bool(void) { return (active_.get()>0); }; }; } // namespace ARex #endif // GRID_MANAGER_H nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/mail0000644000000000000000000000013013214316026024020 xustar000000000000000029 mtime=1513200662.98878404 30 atime=1513200668.718854121 29 ctime=1513200662.98878404 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/0000755000175000002070000000000013214316026024145 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515026142 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200602.368042624 30 ctime=1513200662.983783979 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/Makefile.am0000644000175000002070000000051112052416515026201 0ustar00mockbuildmock00000000000000pkglibexec_PROGRAMS = smtp-send dist_pkglibexec_SCRIPTS = smtp-send.sh noinst_LTLIBRARIES = libmail.la smtp_send_SOURCES = smtp-send.c smtp_send_LDADD = $(LIBRESOLV) $(SOCKET_LIBS) libmail_la_SOURCES = send_mail.cpp send_mail.h libmail_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/PaxHeaders.7502/send_mail.cpp0000644000000000000000000000012412702005750026537 xustar000000000000000027 mtime=1460145128.602165 27 atime=1513200576.084721 30 ctime=1513200662.985784003 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/send_mail.cpp0000644000175000002070000000554512702005750026615 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "../files/ControlFileContent.h" #include "../files/ControlFileHandling.h" #include "../run/RunParallel.h" #include "../conf/GMConfig.h" #include "send_mail.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); /* check if have to send mail and initiate sending */ bool send_mail(GMJob &job,const GMConfig& config) { char flag = GMJob::get_state_mail_flag(job.get_state()); if(flag == ' ') return true; std::string notify = ""; std::string jobname = ""; JobLocalDescription *job_desc = job.GetLocalDescription(config); if(job_desc != NULL) { jobname=job_desc->jobname; notify=job_desc->notify; } else { logger.msg(Arc::ERROR,"Failed reading local information"); }; // job_local_read_notify(job.get_id(),user,notify); if(notify.length() == 0) return true; /* save some time */ Arc::Run* child = NULL; std::string failure_reason=job.GetFailure(config); if(job_failed_mark_check(job.get_id(),config)) { if(failure_reason.length() == 0) failure_reason=""; }; for(std::string::size_type n=0;;) { n=failure_reason.find('\n',n); if(n == std::string::npos) break; failure_reason[n]='.'; }; failure_reason = '"' + failure_reason + '"'; std::string cmd(Arc::ArcLocation::GetToolsDir()+"/smtp-send.sh"); cmd += " " + std::string(job.get_state_name()); cmd += " " + job.get_id(); cmd += " " + config.ControlDir(); cmd += " " + config.SupportMailAddress(); cmd += " \"" + jobname + "\""; cmd += " " + failure_reason; /* go through mail addresses and flags */ std::string::size_type pos=0; std::string::size_type pos_s=0; /* max 3 mail addresses */ std::string mails[3]; int mail_n=0; bool right_flag = false; /* by default mail is sent when job enters states PREPARING and FINISHED */ if((flag == 'b') || (flag == 'e')) right_flag=true; for(;;) { if(pos_s >= notify.length()) break; if((pos = notify.find(' ',pos_s)) == std::string::npos) pos=notify.length(); if(pos==pos_s) { pos++; pos_s++; continue; }; std::string word(notify.substr(pos_s,pos-pos_s)); if(word.find('@') == std::string::npos) { /* flags */ if(word.find(flag) == std::string::npos) { right_flag=false; } else { right_flag=true; }; pos_s=pos+1; continue; }; if(right_flag) { mails[mail_n]=word; mail_n++; }; if(mail_n >= 3) break; pos_s=pos+1; }; if(mail_n == 0) return true; /* not sending to anyone */ for(mail_n--;mail_n>=0;mail_n--) { cmd += " " + mails[mail_n]; }; logger.msg(Arc::DEBUG, "Running mailer command (%s)", cmd); if(!RunParallel::run(config,job,cmd,&child)) { logger.msg(Arc::ERROR,"Failed running mailer"); return false; }; child->Abandon(); delete child; return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732026147 xustar000000000000000030 mtime=1513200602.425043321 30 atime=1513200650.023625471 30 ctime=1513200662.984783991 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/Makefile.in0000644000175000002070000007141613214315732026226 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pkglibexec_PROGRAMS = smtp-send$(EXEEXT) subdir = src/services/a-rex/grid-manager/mail DIST_COMMON = README $(dist_pkglibexec_SCRIPTS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libmail_la_LIBADD = am_libmail_la_OBJECTS = libmail_la-send_mail.lo libmail_la_OBJECTS = $(am_libmail_la_OBJECTS) libmail_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmail_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am__installdirs = "$(DESTDIR)$(pkglibexecdir)" \ "$(DESTDIR)$(pkglibexecdir)" PROGRAMS = $(pkglibexec_PROGRAMS) am_smtp_send_OBJECTS = smtp-send.$(OBJEXT) smtp_send_OBJECTS = $(am_smtp_send_OBJECTS) am__DEPENDENCIES_1 = smtp_send_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' SCRIPTS = $(dist_pkglibexec_SCRIPTS) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmail_la_SOURCES) $(smtp_send_SOURCES) DIST_SOURCES = $(libmail_la_SOURCES) $(smtp_send_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_pkglibexec_SCRIPTS = smtp-send.sh noinst_LTLIBRARIES = libmail.la smtp_send_SOURCES = smtp-send.c smtp_send_LDADD = $(LIBRESOLV) $(SOCKET_LIBS) libmail_la_SOURCES = send_mail.cpp send_mail.h libmail_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) all: all-am .SUFFIXES: .SUFFIXES: .c .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/mail/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/mail/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmail.la: $(libmail_la_OBJECTS) $(libmail_la_DEPENDENCIES) $(libmail_la_LINK) $(libmail_la_OBJECTS) $(libmail_la_LIBADD) $(LIBS) install-pkglibexecPROGRAMS: $(pkglibexec_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files clean-pkglibexecPROGRAMS: @list='$(pkglibexec_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list smtp-send$(EXEEXT): $(smtp_send_OBJECTS) $(smtp_send_DEPENDENCIES) @rm -f smtp-send$(EXEEXT) $(LINK) $(smtp_send_OBJECTS) $(smtp_send_LDADD) $(LIBS) install-dist_pkglibexecSCRIPTS: $(dist_pkglibexec_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(dist_pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_pkglibexecSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmail_la-send_mail.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/smtp-send.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c $< .c.obj: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmail_la-send_mail.lo: send_mail.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmail_la_CXXFLAGS) $(CXXFLAGS) -MT libmail_la-send_mail.lo -MD -MP -MF $(DEPDIR)/libmail_la-send_mail.Tpo -c -o libmail_la-send_mail.lo `test -f 'send_mail.cpp' || echo '$(srcdir)/'`send_mail.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmail_la-send_mail.Tpo $(DEPDIR)/libmail_la-send_mail.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='send_mail.cpp' object='libmail_la-send_mail.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmail_la_CXXFLAGS) $(CXXFLAGS) -c -o libmail_la-send_mail.lo `test -f 'send_mail.cpp' || echo '$(srcdir)/'`send_mail.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(SCRIPTS) installdirs: for dir in "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(pkglibexecdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-pkglibexecPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-dist_pkglibexecSCRIPTS \ install-pkglibexecPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkglibexecSCRIPTS \ uninstall-pkglibexecPROGRAMS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES clean-pkglibexecPROGRAMS \ ctags distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dist_pkglibexecSCRIPTS install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibexecPROGRAMS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-dist_pkglibexecSCRIPTS \ uninstall-pkglibexecPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/PaxHeaders.7502/send_mail.h0000644000000000000000000000012412702005750026204 xustar000000000000000027 mtime=1460145128.602165 27 atime=1513200576.084721 30 ctime=1513200662.987784027 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/send_mail.h0000644000175000002070000000040612702005750026251 0ustar00mockbuildmock00000000000000#ifndef __ARC_GM_SEND_MAIL_H__ #define __ARC_GM_SEND_MAIL_H__ namespace ARex { /* Starts external process smtp-send.sh to send mail to user about changes in job's status. */ bool send_mail(GMJob &job, const GMConfig& config); } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/PaxHeaders.7502/smtp-send.c0000644000000000000000000000012312303060417026153 xustar000000000000000027 mtime=1393320207.952724 27 atime=1513200576.083721 29 ctime=1513200662.98878404 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/smtp-send.c0000644000175000002070000001326512303060417026230 0ustar00mockbuildmock00000000000000/* Simple program to mail information piped to stdin to address 'to' from address 'from'. It tries to connect directly to SMTP server responsible for destination address. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #define SMTP_PORT 25 typedef union { HEADER hdr; unsigned char buf[8192]; } answer_t; void usage(void) { fprintf(stdout,"smtp-send from to\n"); exit(1); } int send_mail(char* mail_server ,char* mail_from,char* mail_to) { char buf[256]; int s,i; FILE* S; int err_code; char my_hostname[256]; struct addrinfo *res = NULL; struct addrinfo *r = NULL; memset(my_hostname,0,256); gethostname(my_hostname,255); if(getaddrinfo(mail_server,NULL,NULL,&res) != 0) return 2; if(res == NULL) return 2; for(r=res;r;r=r->ai_next) { if(r->ai_addr == NULL) continue; if(r->ai_socktype != SOCK_STREAM) continue; if(r->ai_protocol != IPPROTO_TCP) continue; if(r->ai_family == AF_INET) { ((struct sockaddr_in*)(r->ai_addr))->sin_port=htons(SMTP_PORT); break; }; if(r->ai_family == AF_INET6) { ((struct sockaddr_in6*)(r->ai_addr))->sin6_port=htons(SMTP_PORT); break; }; }; if(!r) { freeaddrinfo(res); return 2; }; s=socket(r->ai_family,r->ai_socktype,r->ai_protocol); if(s==-1) { freeaddrinfo(res); return 2; }; if(connect(s,r->ai_addr,r->ai_addrlen)==-1) { freeaddrinfo(res); close(s); return 2; }; freeaddrinfo(res); if((S=fdopen(s,"r+")) == NULL) { close(s); return 2; }; if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 220 ) { fclose(S); return 2; }; fprintf(S,"HELO %s\r\n",my_hostname); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 250 ) { fclose(S); return 2; }; fprintf(S,"MAIL FROM: <%s>\r\n",mail_from); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 250 ) { fclose(S); return 2; }; fprintf(S,"RCPT TO: <%s>\r\n",mail_to); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 250 ) { fclose(S); return 2; }; fprintf(S,"DATA\r\n"); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 354 ) { fclose(S); return 2; }; /* read from stdin and send to socket */ for(;;) { buf[0]=0; if((i=fscanf(stdin,"%255[^\n]",buf)) == EOF) break; if(fscanf(stdin,"%*[^\n]") > 0) {}; fgetc(stdin); if(!strcmp(".",buf)) { fputc(' ',S); }; fprintf(S,"%s\r\n",buf); fflush(S); }; fprintf(S,".\r\n"); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 1; }; fgetc(S); if( err_code != 250 ) { fclose(S); return 1; }; fprintf(S,"QUIT\r\n"); fflush(S); fclose(S); return 0; } int connect_mail(char* domain,char* mail_from,char* mail_to) { char mxbuf[1024]; unsigned short mxtype; unsigned short mxpref; answer_t answer; int l,na,nq; unsigned char *sp; unsigned char *cp; unsigned char *ep; HEADER *hp; int err_code = 2; fprintf(stdout,"Searching for domain %s\n",domain); if((l=res_search(domain,C_IN,T_MX,answer.buf,sizeof(answer))) == -1) { fprintf(stderr,"Query failed\n"); return 2; }; hp = &(answer.hdr); sp = answer.buf; cp = answer.buf + HFIXEDSZ; ep = answer.buf + l; nq=ntohs(hp->qdcount); for(;nq>0;nq--) { if((l=dn_skipname(cp,ep)) == -1) { fprintf(stderr,"skipname failed\n"); return 2; }; cp+=l+QFIXEDSZ; }; na=ntohs(hp->ancount); for(;(na>0) && (cp&2 exit 1 fi # arguments status=$1 shift job_id=$1 shift control_dir=$1 shift local_mail=$1 shift job_name=$1 shift failure_reason=$1 shift if [ -z "$local_mail" ] ; then echo "Empty local mail address" 1>&2 exit 1 fi #host_name=`hostname -f` cur_time=`date -R` cluster_name=`hostname --fqdn` while true ; do if [ $# -lt 1 ] ; then break ; fi mail_addr=$1 if [ -z "$mail_addr" ] ; then break; fi ( # job_name=`cat $control_dir/job.$job_id.local 2>/dev/null | \ # sed --quiet 's/^jobname=\(.*\)/\1/;t print;s/.*//;t;:print;p'` # if [ -z "$job_name" ] ; then # job_name='' # fi echo "From: $local_mail" echo "To: $mail_addr" if [ -z "$job_name" ] ; then echo "Subject: Message from job $job_id" else echo "Subject: Message from job $job_name ($job_id)" fi echo "Date: $cur_time" echo if [ ! -z "$job_name" ] ; then job_name="\"$job_name\" " fi job_name="${job_name}(${job_id})" if [ ! -z "$cluster_name" ] ; then job_name="${job_name} at ${cluster_name}" fi if [ ! -z "$failure_reason" ] ; then echo "Job $job_name state is $status. Job FAILED with reason:" echo "$failure_reason" if [ "$status" = FINISHED ] ; then if [ -r "$control_dir/job.$job_id.diag" ] ; then grep -i '^WallTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^KernelTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^UserTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^MaxResidentMemory' "$control_dir/job.$job_id.diag" 2>/dev/null fi # Oxana requested more information. Race conditions are possible here if [ -r "$control_dir/job.$job_id.local" ] ; then grep -i '^queue' "$control_dir/job.$job_id.local" 2>/dev/null grep -i '^starttime' "$control_dir/job.$job_id.local" 2>/dev/null grep -i '^cleanuptime' "$control_dir/job.$job_id.local" 2>/dev/null fi fi if [ -f "$control_dir/job.$job_id.errors" ] ; then echo echo 'Following is the log of job processing:' echo '-------------------------------------------------' cat "$control_dir/job.$job_id.errors" 2>/dev/null echo '-------------------------------------------------' echo fi else echo "Job $job_name current state is $status." if [ "$status" = FINISHED ] ; then if [ -r "$control_dir/job.$job_id.diag" ] ; then grep -i '^WallTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^KernelTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^UserTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^MaxResidentMemory' "$control_dir/job.$job_id.diag" 2>/dev/null fi if [ -r "$control_dir/job.$job_id.local" ] ; then grep -i '^queue' "$control_dir/job.$job_id.local" 2>/dev/null grep -i '^starttime' "$control_dir/job.$job_id.local" 2>/dev/null grep -i '^cleanuptime' "$control_dir/job.$job_id.local" 2>/dev/null fi fi fi ) | \ $basedir/smtp-send "$local_mail" "$mail_addr" shift done nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/PaxHeaders.7502/README0000644000000000000000000000012311016612002024746 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.084721 30 ctime=1513200662.981783954 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/mail/README0000644000175000002070000000004611016612002025014 0ustar00mockbuildmock00000000000000Utility and function to send E-mails. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/gm-delegations-converter.8.in0000644000000000000000000000012412754430337030572 xustar000000000000000025 mtime=1471295711.6341 29 atime=1513200649.88862382 30 ctime=1513200662.787781581 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/gm-delegations-converter.8.in0000644000175000002070000000227712754430337030647 0ustar00mockbuildmock00000000000000.TH gm-delegations-converter 8 "2016-08-01" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME gm-delegations-converter \- converts delegations database between supported formats .SH DESCRIPTION .B gm-delegations-converter converts delegations backend database storing information about accepted delegated credentials between supported formats. So far supported formats are Berkeley DB and SQLite. Converted database is stored in same location as the original one. Hence original database is overwritten and ready to be used immediately. .SH SYNOPSIS gm-jobs [OPTION...] .SH OPTIONS .IP "\fB-h, --help\fR" Show help for available options .IP "\fB-c, --conffile=file\fR" use specified configuration file .IP "\fB-d, --controldir=dir\fR" read information from specified control directory .IP "\fB-i, --input=database format\fR" specifies format of original database. By default the value from configuration file is used. The possible values are bdb (Berkeley DB) and sqlite (SQLite). .IP "\fB-o, --output=database format\fR" specifies format to convert database into. By default it is opposite to one specified in configuration file. .SH AUTHOR Aleksandr Konstantinov nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/gm_delegations_converter.cpp0000644000000000000000000000012312754430354030742 xustar000000000000000026 mtime=1471295724.32842 27 atime=1513200576.239723 30 ctime=1513200662.794781667 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/gm_delegations_converter.cpp0000644000175000002070000002304012754430354031007 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "conf/GMConfig.h" #include "../delegation/DelegationStore.h" #include "../delegation/FileRecordBDB.h" #include "../delegation/FileRecordSQLite.h" using namespace ARex; int main(int argc, char* argv[]) { // stderr destination for error messages Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); Arc::OptionParser options(" ", istring("gm-delegations-converter changes " "format of delegation database.")); std::string conf_file; options.AddOption('c', "conffile", istring("use specified configuration file"), istring("file"), conf_file); std::string control_dir; options.AddOption('d', "controldir", istring("read information from specified control directory"), istring("dir"), control_dir); std::string input_format; options.AddOption('i', "input", istring("convert from specified input database format [bdb|sqlite]"), istring("database format"), input_format); std::string output_format; options.AddOption('o', "output", istring("convert into specified output database format [bdb|sqlite]"), istring("database format"), output_format); std::list params = options.Parse(argc, argv); GMConfig config; if (!conf_file.empty()) config.SetConfigFile(conf_file); std::cout << "Using configuration at " << config.ConfigFile() << std::endl; if(!config.Load()) exit(1); if (!control_dir.empty()) config.SetControlDir(control_dir); config.Print(); bool deleg_db_type_from_configuration = false; DelegationStore::DbType deleg_db_type = DelegationStore::DbBerkeley; DelegationStore::DbType deleg_db_type_out = DelegationStore::DbSQLite; switch(config.DelegationDBType()) { case GMConfig::deleg_db_bdb: deleg_db_type_from_configuration = true; deleg_db_type = DelegationStore::DbBerkeley; deleg_db_type_out = DelegationStore::DbSQLite; break; case GMConfig::deleg_db_sqlite: deleg_db_type_from_configuration = true; deleg_db_type = DelegationStore::DbSQLite; deleg_db_type_out = DelegationStore::DbBerkeley; break; }; if(!input_format.empty()) { if(input_format == "bdb") { deleg_db_type_from_configuration = false; deleg_db_type = DelegationStore::DbBerkeley; } else if(input_format == "sqlite") { deleg_db_type_from_configuration = false; deleg_db_type = DelegationStore::DbSQLite; } else { std::cerr << "Unknown input database type requested - " << input_format << std::endl; exit(-1); }; }; if(!output_format.empty()) { if(output_format == "bdb") { deleg_db_type_out = DelegationStore::DbBerkeley; } else if(output_format == "sqlite") { deleg_db_type_out = DelegationStore::DbSQLite; } else { std::cerr << "Unknown output database type requested - " << output_format << std::endl; exit(-1); }; }; switch(deleg_db_type) { case DelegationStore::DbBerkeley: std::cout << "Using input database type - Berkeley DB" << std::endl; break; case DelegationStore::DbSQLite: std::cout << "Using input database type - SQLite" << std::endl; break; default: std::cerr << "Failed identifying input database type" << std::endl; exit(-1); }; switch(deleg_db_type_out) { case DelegationStore::DbBerkeley: std::cout << "Using output database type - Berkeley DB" << std::endl; break; case DelegationStore::DbSQLite: std::cout << "Using output database type - SQLite" << std::endl; break; default: std::cerr << "Failed identifying output database type" << std::endl; exit(-1); }; FileRecord* source_db = NULL; std::string delegation_dir = config.DelegationDir(); switch(deleg_db_type) { case DelegationStore::DbBerkeley: source_db = new FileRecordBDB(delegation_dir, false); break; case DelegationStore::DbSQLite: source_db = new FileRecordSQLite(delegation_dir, false); break; }; if((!source_db) || (!*source_db)) { std::cerr << "Failed opening source database" << std::endl; exit(-1); }; FileRecord* output_db = NULL; std::string delegation_dir_output; if(!Arc::TmpDirCreate(delegation_dir_output)) { std::cerr << "Failed to create temporary directory for new database" << std::endl; exit(-1); }; switch(deleg_db_type_out) { case DelegationStore::DbBerkeley: output_db = new FileRecordBDB(delegation_dir_output, true); break; case DelegationStore::DbSQLite: output_db = new FileRecordSQLite(delegation_dir_output, true); break; }; if((!output_db) || (!*output_db)) { std::cerr << "Failed creating output database" << std::endl; exit(-1); }; unsigned int rec_num = 0; // Copy database content record by record FileRecord::Iterator* prec = source_db->NewIterator(); if(!prec) { std::cerr << "Failed opening iterator for source database" << std::endl; exit(-1); }; bool copy_success = true; for(;*prec;++(*prec)) { std::string uid = prec->uid(); std::string id = prec->id(); std::string owner = prec->owner(); std::list meta = prec->meta(); if(!output_db->Add(uid, id, owner, meta)) { copy_success = false; std::cerr << "Failed copying record " << id << ", " << owner << " - " << output_db->Error() << std::endl; break; }; std::list locks; if(!source_db->ListLocks(id, owner, locks)) { copy_success = false; std::cerr << "Failed obtaining locks for " << id << ", " << owner << " - " << source_db->Error() << std::endl; break; }; for(std::list::iterator lock = locks.begin(); lock != locks.end(); ++lock) { std::list ids; ids.push_back(id); if(!output_db->AddLock(*lock, ids, owner)) { copy_success = false; std::cerr << "Failed adding lock " << *lock << " for " << id << ", " << owner << " - " << source_db->Error() << std::endl; break; }; }; if(!copy_success) break; ++rec_num; }; delete prec; delete source_db; delete output_db; if(!copy_success) { Arc::DirDelete(delegation_dir_output, true); exit(-1); } else { std::cout << "Copied " << rec_num << " credentials entries" << std::endl; std::cout << "New database created in " << delegation_dir_output << std::endl; } // Move generated database (first delete old one) try { Glib::Dir dir(delegation_dir); std::string name; while ((name = dir.read_name()) != "") { std::string fullpath(delegation_dir); fullpath += G_DIR_SEPARATOR_S + name; struct stat st; if (::lstat(fullpath.c_str(), &st) == 0) { if(!S_ISDIR(st.st_mode)) { if(!Arc::FileDelete(fullpath.c_str())) { std::cerr << "Failed deleting source database - file " << fullpath << std::endl; std::cerr << "You MUST manually copy content of " << delegation_dir_output << " into " << delegation_dir << std::endl; exit(-1); }; }; }; }; } catch(Glib::FileError& e) { std::cerr << "Failed deleting source database" << std::endl; std::cerr << "You MUST manually copy content of " << delegation_dir_output << " into " << delegation_dir << std::endl; if(deleg_db_type_from_configuration) { std::cerr << "After that do NOT forget to change database type in confguration file." << std::endl; std::cerr << "Otherwise it will be destoyed next time a-rex starts." << std::endl; } exit(-1); }; try { Glib::Dir dir(delegation_dir_output); std::string name; while ((name = dir.read_name()) != "") { std::string fullpath_source(delegation_dir_output); fullpath_source += G_DIR_SEPARATOR_S + name; std::string fullpath_dest(delegation_dir); fullpath_dest += G_DIR_SEPARATOR_S + name; if(!Arc::FileCopy(fullpath_source, fullpath_dest)) { std::cerr << "Failed copying new database - file " << fullpath_source << std::endl; std::cerr << "You MUST manually copy content of " << delegation_dir_output << " into " << delegation_dir << std::endl; if(deleg_db_type_from_configuration) { std::cerr << "After that do NOT forget to change database type in confguration file." << std::endl; std::cerr << "Otherwise it will be destoyed next time a-rex starts." << std::endl; } exit(-1); }; }; } catch(Glib::FileError& e) { std::cerr << "Failed copying new database" << std::endl; std::cerr << "You MUST manually copy content of " << delegation_dir_output << " into " << delegation_dir << std::endl; exit(-1); }; Arc::DirDelete(delegation_dir_output, true); std::cout << "New database moved into " << delegation_dir << std::endl; if(deleg_db_type_from_configuration) { std::cout << "Do NOT forget to change database type in confguration file." << std::endl; std::cout << "Otherwise it will be destoyed next time a-rex starts." << std::endl; } return 0; } nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/misc0000644000000000000000000000013213214316026024033 xustar000000000000000030 mtime=1513200662.935783391 30 atime=1513200668.718854121 30 ctime=1513200662.935783391 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/0000755000175000002070000000000013214316026024156 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712771224333026156 xustar000000000000000027 mtime=1474635995.909107 30 atime=1513200602.442043529 30 ctime=1513200662.932783355 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/Makefile.am0000644000175000002070000000060412771224333026220 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libmisc.la libmisc_la_SOURCES = \ proxy.cpp \ proxy.h libmisc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libmisc_la_LIBADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732026160 xustar000000000000000030 mtime=1513200602.491044129 30 atime=1513200650.039625666 30 ctime=1513200662.933783367 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/Makefile.in0000644000175000002070000005462713214315732026244 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/misc DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libmisc_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libmisc_la_OBJECTS = libmisc_la-proxy.lo libmisc_la_OBJECTS = $(am_libmisc_la_OBJECTS) libmisc_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmisc_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmisc_la_SOURCES) DIST_SOURCES = $(libmisc_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libmisc.la libmisc_la_SOURCES = \ proxy.cpp \ proxy.h libmisc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libmisc_la_LIBADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/misc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/misc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmisc.la: $(libmisc_la_OBJECTS) $(libmisc_la_DEPENDENCIES) $(libmisc_la_LINK) $(libmisc_la_OBJECTS) $(libmisc_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmisc_la-proxy.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmisc_la-proxy.lo: proxy.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmisc_la_CXXFLAGS) $(CXXFLAGS) -MT libmisc_la-proxy.lo -MD -MP -MF $(DEPDIR)/libmisc_la-proxy.Tpo -c -o libmisc_la-proxy.lo `test -f 'proxy.cpp' || echo '$(srcdir)/'`proxy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmisc_la-proxy.Tpo $(DEPDIR)/libmisc_la-proxy.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='proxy.cpp' object='libmisc_la-proxy.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmisc_la_CXXFLAGS) $(CXXFLAGS) -c -o libmisc_la-proxy.lo `test -f 'proxy.cpp' || echo '$(srcdir)/'`proxy.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/PaxHeaders.7502/proxy.h0000644000000000000000000000012412046514361025447 xustar000000000000000027 mtime=1352308977.507652 27 atime=1513200576.244723 30 ctime=1513200662.935783391 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/proxy.h0000644000175000002070000000052212046514361025513 0ustar00mockbuildmock00000000000000#ifndef __ARC_GM_PROXY_H__ #define __ARC_GM_PROXY_H__ namespace ARex { int prepare_proxy(void); int remove_proxy(void); int renew_proxy(const char* old_proxy,const char* new_proxy); bool myproxy_renew(const char* old_proxy_file,const char* new_proxy_file,const char* myproxy_server); } // namespace ARex #endif // __ARC_GM_PROXY_H__ nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/PaxHeaders.7502/proxy.cpp0000644000000000000000000000012412046514361026002 xustar000000000000000027 mtime=1352308977.507652 27 atime=1513200576.243723 30 ctime=1513200662.934783379 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/misc/proxy.cpp0000644000175000002070000001227612046514361026057 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "proxy.h" #include #include #include #include #include #include namespace ARex { int prepare_proxy(void) { int h = -1; off_t len; char* buf = NULL; off_t l,ll; int res=-1; if(getuid() == 0) { /* create temporary proxy */ std::string proxy_file=Arc::GetEnv("X509_USER_PROXY"); if(proxy_file.empty()) goto exit; h=::open(proxy_file.c_str(),O_RDONLY); if(h==-1) goto exit; if((len=lseek(h,0,SEEK_END))==-1) goto exit; if(lseek(h,0,SEEK_SET) != 0) goto exit; buf=(char*)malloc(len); if(buf==NULL) goto exit; for(l=0;l storeopt; std::map::const_iterator m; m=url.Options().find("username"); if(m != url.Options().end()) { storeopt["username"]=m->second; } else { Arc::Credential proxy(std::string(old_proxy_file),"","",""); storeopt["username"]=proxy.GetIdentityName(); }; m=url.Options().find("credname"); if(m != url.Options().end()) { storeopt["credname"]=m->second; }; storeopt["lifetime"] = Arc::tostring(60*60*12); m=url.Options().find("password"); if(m != url.Options().end()) { storeopt["password"] = m->second; }; /* Get new proxy */ std::string new_proxy_str; if(!cstore.Retrieve(storeopt,new_proxy_str)) { fprintf(stderr, "Failed to retrieve a proxy from MyProxy server %s\n", myproxy_server); return false; }; std::ofstream h(new_proxy_file,std::ios::trunc); h< -U -P -L [-c ] [-p ] [-d ] .SH OPTIONS .IP "\fB-I\fR \fIjobID\fR" A-REX job identifier .IP "\fB-U\fR \fIuser\fR" local account that own the job files and processes .IP "\fB-P\fR \fIuser proxy\fR" path to user proxy certificate file to get VOMS membership info .IP "\fB-L\fR \fIjob status file\fR" A-REX jobstatus .local file containint more info about job (like LRMS id) .IP "\fB-c\fR \fIceid prefix\fR" prefix to generate CE ID in accordance to GLUE1.3 publishing .IP "\fB-p\fR \fIlog prefix\fR" logs location and filename prefix. Default is \fB/var/log/arc/accounting/blahp.log\fR .IP "\fB-d\fR \fIloglevel\fR" logging level from 0(ERROR) to 5(DEBUG) .SH ENABLING PLUGIN IN A-REX CONFIGURATION You need to add BLAH logger plugin as a handler for FINISHED state, e.g.: .B authplugin="FINISHED timeout=10,onfailure=pass /usr/libexec/arc/arc-blahp-logger -I %I -U %u -L %C/job.%I.local -P %C/job.%I.proxy" .SH CONFIGURATION There are no particular plugin configuration except passing correct options. By default BLAH log is written to .B /var/log/arc/accounting/blahp.log-YYYYMMDD. The log prefix (without \fB-YYYYMMDD\fR) can be redefined with optional \fB-p\fR option. CE ID is generated automatically and has a format .B host.fqdn:2811/nordugrid-torque-queue in accordance to GLUE1.3 publishing. Queue is added in runtime anyway, but prefix can be redefined with \fB-c\fR option. This option should be added in case of LRMS. To debug logger execution you can add \fB-d 5\fR option and see all ARC logger output from all subsystems used. .SH AUTHOR Andrii Salnikov nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/GridManager.cpp0000644000000000000000000000012413107551157026051 xustar000000000000000027 mtime=1495192175.336282 27 atime=1513200575.967719 30 ctime=1513200662.790781618 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/GridManager.cpp0000644000175000002070000003204313107551157026120 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "jobs/JobsList.h" #include "jobs/CommFIFO.h" #include "log/JobLog.h" #include "log/JobsMetrics.h" #include "run/RunRedirected.h" #include "run/RunParallel.h" #include "jobs/DTRGenerator.h" #include "files/ControlFileHandling.h" #include "../delegation/DelegationStore.h" #include "../delegation/DelegationStores.h" #include "GridManager.h" namespace ARex { /* do job cleaning every 2 hours */ #define HARD_JOB_PERIOD 7200 /* cache cleaning every 5 minutes */ #define CACHE_CLEAN_PERIOD 300 /* cache cleaning default timeout */ #define CACHE_CLEAN_TIMEOUT 3600 static Arc::Logger logger(Arc::Logger::getRootLogger(),"A-REX"); class cache_st { public: Arc::SimpleCounter counter; Arc::SimpleCondition to_exit; const GMConfig* config; cache_st(GMConfig* config_):config(config_) { }; ~cache_st(void) { to_exit.signal(); counter.wait(); }; }; static void cache_func(void* arg) { const GMConfig* config = ((cache_st*)arg)->config; Arc::SimpleCondition& to_exit = ((cache_st*)arg)->to_exit; CacheConfig cache_info(config->CacheParams()); if (!cache_info.cleanCache()) return; // Note: per-user substitutions do not work here. If they are used // cache-clean must be run manually eg via cron cache_info.substitute(*config, Arc::User()); // get the cache dirs std::vector cache_info_dirs = cache_info.getCacheDirs(); if (cache_info_dirs.empty()) return; std::string maxusedspace = Arc::tostring(cache_info.getCacheMax()); std::string minusedspace = Arc::tostring(cache_info.getCacheMin()); std::string cachelifetime = cache_info.getLifeTime(); std::string logfile = cache_info.getLogFile(); bool cacheshared = cache_info.getCacheShared(); std::string cachespacetool = cache_info.getCacheSpaceTool(); // do cache-clean -h for explanation of options std::string cmd = Arc::ArcLocation::GetToolsDir() + "/cache-clean"; cmd += " -m " + minusedspace; cmd += " -M " + maxusedspace; if (!cachelifetime.empty()) cmd += " -E " + cachelifetime; if (cacheshared) cmd += " -S "; if (!cachespacetool.empty()) cmd += " -f \"" + cachespacetool + "\" "; cmd += " -D " + cache_info.getLogLevel(); for (std::vector::iterator i = cache_info_dirs.begin(); i != cache_info_dirs.end(); i++) { cmd += " " + (i->substr(0, i->find(" "))); } // use large timeout, as disk scan can take a long time // blocks until command finishes or timeout int clean_timeout = cache_info.getCleanTimeout(); if (clean_timeout == 0) clean_timeout = CACHE_CLEAN_TIMEOUT; // run cache cleaning periodically forever for(;;) { int h = open(logfile.c_str(), O_WRONLY | O_APPEND); if (h == -1) { std::string dirname(logfile.substr(0, logfile.rfind('/'))); if (!dirname.empty() && !Arc::DirCreate(dirname, S_IRWXU | S_IRGRP | S_IROTH | S_IXGRP | S_IXOTH, true)) { logger.msg(Arc::WARNING, "Cannot create directories for log file %s." " Messages will be logged to this log", logfile); } else { h = open(logfile.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (h == -1) { logger.msg(Arc::WARNING, "Cannot open cache log file %s: %s. Cache cleaning" " messages will be logged to this log", logfile, Arc::StrError(errno)); } } } logger.msg(Arc::DEBUG, "Running command %s", cmd); int result = RunRedirected::run(Arc::User(), "cache-clean", -1, h, h, cmd.c_str(), clean_timeout); if(h != -1) close(h); if (result != 0) { if (result == -1) logger.msg(Arc::ERROR, "Failed to start cache clean script"); else logger.msg(Arc::ERROR, "Cache cleaning script failed"); } if (to_exit.wait(CACHE_CLEAN_PERIOD*1000)) { break; } } } class sleep_st { public: Arc::SimpleCondition* sleep_cond; CommFIFO* timeout; std::string control_dir; bool to_exit; // tells thread to exit bool exited; // set by thread while exiting sleep_st(const std::string& control):sleep_cond(NULL),timeout(NULL),control_dir(control),to_exit(false),exited(false) { }; ~sleep_st(void) { to_exit = true; SignalFIFO(control_dir); while(!exited) sleep(1); }; }; static void wakeup_func(void* arg) { sleep_st* s = (sleep_st*)arg; for(;;) { if(s->to_exit) break; s->timeout->wait(); if(s->to_exit) break; s->sleep_cond->signal(); if(s->to_exit) break; }; s->exited = true; return; } static void kick_func(void* arg) { sleep_st* s = (sleep_st*)arg; s->sleep_cond->signal(); } typedef struct { int argc; char** argv; } args_st; void GridManager::grid_manager(void* arg) { GridManager* gm = (GridManager*)arg; if(!arg) { ::kill(::getpid(),SIGTERM); return; } if(!gm->thread()) { // thread exited because of internal error // that means whole server must be stopped ::kill(::getpid(),SIGTERM); } } bool GridManager::thread() { logger.msg(Arc::INFO,"Starting jobs processing thread"); logger.msg(Arc::INFO,"Used configuration file %s",config_.ConfigFile()); config_.Print(); // Preparing various structures, dirs, etc. wakeup_interface_ = new CommFIFO; time_t hard_job_time; CommFIFO::add_result r = wakeup_interface_->add(config_.ControlDir()); if(r != CommFIFO::add_success) { if(r == CommFIFO::add_busy) { logger.msg(Arc::FATAL,"Error adding communication interface in %s. " "Maybe another instance of A-REX is already running.",config_.ControlDir()); } else { logger.msg(Arc::FATAL,"Error adding communication interface in %s. " "Maybe permissions are not suitable.",config_.ControlDir()); }; return false; }; ARex::DelegationStores* delegs = config_.GetDelegations(); if(delegs) { ARex::DelegationStore& deleg = (*delegs)[config_.DelegationDir()]; if(!deleg) { logger.msg(Arc::FATAL,"Error initiating delegation database in %s. " "Maybe permissions are not suitable. Returned error is: %s.", config_.DelegationDir(),deleg.Error()); return false; }; }; wakeup_interface_->timeout(config_.WakeupPeriod()); /* start timer thread - wake up every 2 minutes */ wakeup_ = new sleep_st(config_.ControlDir()); wakeup_->sleep_cond=sleep_cond_; wakeup_->timeout=wakeup_interface_; if(!Arc::CreateThreadFunction(wakeup_func,wakeup_)) { logger.msg(Arc::ERROR,"Failed to start new thread"); wakeup_->exited = true; return false; }; RunParallel::kicker(&kick_func,wakeup_); /* if(clean_first_level) { bool clean_finished = false; bool clean_active = false; bool clean_junk = false; if(clean_first_level >= 1) { clean_finished=true; if(clean_first_level >= 2) { clean_active=true; if(clean_first_level >= 3) { clean_junk=true; }; }; }; for(;;) { bool cleaned_all=true; for(JobUsers::iterator user = users_->begin();user != users_->end();++user) { size_t njobs = user->get_jobs()->size(); user->get_jobs()->ScanNewJobs(); if(user->get_jobs()->size() == njobs) break; cleaned_all=false; if(!(user->get_jobs()->DestroyJobs(clean_finished,clean_active))) { logger.msg(Arc::WARNING,"Not all jobs are cleaned yet"); sleep(10); logger.msg(Arc::WARNING,"Trying again"); }; kill(getpid(),SIGCHLD); // make sure no child is missed }; if(cleaned_all) { if(clean_junk && clean_active && clean_finished) { // at the moment cleaning junk means cleaning all the files in // session and control directories for(JobUsers::iterator user=users_->begin();user!=users_->end();++user) { std::list flist; for(std::vector::const_iterator i = user->SessionRoots().begin(); i != user->SessionRoots().end(); i++) { logger.msg(Arc::INFO,"Cleaning all files in directory %s", *i); delete_all_files(*i,flist,true); } logger.msg(Arc::INFO,"Cleaning all files in directory %s", user->ControlDir()); delete_all_files(user->ControlDir(),flist,true); }; }; break; }; }; logger.msg(Arc::INFO,"Jobs cleaned"); }; */ // check if cache cleaning is enabled, if so activate cleaning thread cache_st cache_h(&config_); if (!config_.CacheParams().getCacheDirs().empty() && config_.CacheParams().cleanCache()) { if(!Arc::CreateThreadFunction(cache_func,&cache_h,&cache_h.counter)) { logger.msg(Arc::INFO,"Failed to start new thread: cache won't be cleaned"); } } // Start new job list JobsList jobs(config_); logger.msg(Arc::INFO,"Picking up left jobs"); jobs.RestartJobs(); hard_job_time = time(NULL) + HARD_JOB_PERIOD; logger.msg(Arc::INFO, "Starting data staging threads"); DTRGenerator* dtr_generator = new DTRGenerator(config_, &kick_func, wakeup_); if (!(*dtr_generator)) { delete dtr_generator; logger.msg(Arc::ERROR, "Failed to start data staging threads, exiting Grid Manager thread"); return false; } dtr_generator_ = dtr_generator; jobs.SetDataGenerator(dtr_generator); bool scan_old = false; std::string heartbeat_file("gm-heartbeat"); Arc::WatchdogChannel wd(config_.WakeupPeriod()*3+300); /* main loop - forever */ logger.msg(Arc::INFO,"Starting jobs' monitoring"); for(;;) { if(tostop_) break; config_.RunHelpers(); JobLog* joblog = config_.GetJobLog(); if(joblog) joblog->RunReporter(config_); JobsMetrics* metrics = config_.GetJobsMetrics(); if(metrics) metrics->Sync(); bool hard_job = ((int)(time(NULL) - hard_job_time)) > 0; // touch heartbeat file std::string gm_heartbeat(std::string(config_.ControlDir() + "/" + heartbeat_file)); int r = ::open(gm_heartbeat.c_str(), O_WRONLY|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR); if (r == -1) { logger.msg(Arc::WARNING, "Failed to open heartbeat file %s", gm_heartbeat); } else { close(r); r = -1; }; // touch temporary configuration so /tmp cleaner does not erase it if(config_.ConfigIsTemp()) ::utimes(config_.ConfigFile().c_str(), NULL); wd.Kick(); /* check for new marks and activate related jobs */ jobs.ScanNewMarks(); /* look for new jobs */ jobs.ScanNewJobs(); /* slowly scan through old jobs for deleting them in time */ if(hard_job || scan_old) { scan_old = jobs.ScanOldJobs(config_.WakeupPeriod()/2,config_.MaxJobs()); }; /* process known jobs */ jobs.ActJobs(); // Clean old delegations ARex::DelegationStores* delegs = config_.GetDelegations(); if(delegs) { ARex::DelegationStore& deleg = (*delegs)[config_.DelegationDir()]; deleg.Expiration(24*60*60); deleg.CheckTimeout(60); // During this time delegation database will be locked. So it must not be too long. deleg.PeriodicCheckConsumers(); if(hard_job) { // once in a while check for delegations which are locked by non-exiting jobs std::list lock_ids; if(deleg.GetLocks(lock_ids)) { for(std::list::iterator lock_id = lock_ids.begin(); lock_id != lock_ids.end(); ++lock_id) { time_t t = job_state_time(*lock_id,config_); // Returns zero if file is not present if(t == 0) { logger.msg(Arc::ERROR,"Orphan delegation lock detected (%s) - cleaning", *lock_id); deleg.ReleaseCred(*lock_id); // not forcing credential removal - PeriodicCheckConsumers will do it with time control }; }; } else { logger.msg(Arc::ERROR,"Failed to obtain delegation locks for cleaning orphaned locks"); }; }; }; if(hard_job) hard_job_time = time(NULL) + HARD_JOB_PERIOD; sleep_cond_->wait(); logger.msg(Arc::DEBUG,"Waking up"); }; // Waiting for children to finish logger.msg(Arc::INFO,"Stopping jobs processing thread"); config_.PrepareToDestroy(); jobs.PrepareToDestroy(); logger.msg(Arc::INFO,"Exiting jobs processing thread"); return true; } GridManager::GridManager(GMConfig& config):tostop_(false), config_(config) { sleep_cond_ = new Arc::SimpleCondition; wakeup_interface_ = NULL; wakeup_ = NULL; dtr_generator_ = NULL; if(!Arc::CreateThreadFunction(&grid_manager,(void*)this,&active_)) { }; } GridManager::~GridManager(void) { logger.msg(Arc::INFO, "Shutting down job processing"); // Tell main thread to stop tostop_ = true; // Stop data staging if (dtr_generator_) { logger.msg(Arc::INFO, "Shutting down data staging threads"); delete dtr_generator_; } // Wait for main thread while(true) { sleep_cond_->signal(); if(active_.wait(1000)) break; } // wakeup_ is used by users through RunParallel and by // dtr_generator. Hence it must be deleted almost last. if(wakeup_) delete wakeup_; // wakeup_interface_ and sleep_cond_ are used by wakeup_ if(wakeup_interface_) delete wakeup_interface_; delete sleep_cond_; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/gm_kick.cpp0000644000000000000000000000012412046735464025303 xustar000000000000000027 mtime=1352383284.841662 27 atime=1513200576.086721 30 ctime=1513200662.796781692 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/gm_kick.cpp0000644000175000002070000000306712046735464025356 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "conf/GMConfig.h" #include "jobs/CommFIFO.h" int main(int argc,char* argv[]) { Arc::OptionParser options("[control_file]", istring("gm-kick wakes up the A-REX corresponding to the given " "control file. If no file is given it uses the control directory " "found in the configuration file.")); std::string conf_file; options.AddOption('c', "conffile", istring("use specified configuration file"), istring("file"), conf_file); std::list params = options.Parse(argc, argv); std::string control_dir; if (params.empty()) { // Read from config ARex::GMConfig config(conf_file); if (!config.Load()) { std::cerr << "Could not load configuration from " << config.ConfigFile() << std::endl; return 1; } if (config.ControlDir().empty()) { std::cerr << "No control dir found in configuration file " << config.ConfigFile() << std::endl; return 1; } control_dir = config.ControlDir(); } else { control_dir = params.front(); if (control_dir[0] != '/') { char buf[1024]; if (getcwd(buf, 1024) != NULL) control_dir = std::string(buf) + "/" + control_dir; } control_dir = control_dir.substr(0, control_dir.rfind('/')); } ARex::SignalFIFO(control_dir); return 0; } nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/jobs0000644000000000000000000000013213214316026024035 xustar000000000000000030 mtime=1513200662.845782291 30 atime=1513200668.718854121 30 ctime=1513200662.845782291 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/0000755000175000002070000000000013214316026024160 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/JobsList.cpp0000644000000000000000000000012413065033673026360 xustar000000000000000027 mtime=1490302907.897917 27 atime=1513200576.301723 30 ctime=1513200662.834782156 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/JobsList.cpp0000644000175000002070000016554513065033673026445 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "../files/ControlFileHandling.h" #include "../run/RunParallel.h" #include "../mail/send_mail.h" #include "../log/JobLog.h" #include "../log/JobsMetrics.h" #include "../misc/proxy.h" #include "../../delegation/DelegationStores.h" #include "../../delegation/DelegationStore.h" #include "../conf/GMConfig.h" #include "ContinuationPlugins.h" #include "DTRGenerator.h" #include "JobsList.h" namespace ARex { /* max time to run submit-*-job/cancel-*-job before to start looking for alternative way to detect result. Only for protecting against lost child. */ #define CHILD_RUN_TIME_SUSPICIOUS (10*60) /* max time to run submit-*-job/cancel-*-job before to decide that it is gone. Only for protecting against lost child. */ #define CHILD_RUN_TIME_TOO_LONG (60*60) static Arc::Logger& logger = Arc::Logger::getRootLogger(); JobsList::JobsList(const GMConfig& gmconfig) : config(gmconfig), staging_config(gmconfig), old_dir(NULL), dtr_generator(NULL), job_desc_handler(config), jobs_pending(0) { for(int n = 0;nlocal->delegationid; if(!delegation_id.empty()) { ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs) { std::string cred; if((*delegs)[config.DelegationDir()].GetCred(delegation_id,i->local->DN,cred)) { (void)job_proxy_write_file(*i,config,cred); }; }; }; }; } void JobsList::SetJobState(JobsList::iterator &i, job_state_t new_state, const char* reason) { if(i->job_state != new_state) { JobsMetrics* metrics = config.GetJobsMetrics(); if(metrics) metrics->ReportJobStateChange(i->job_id, new_state, i->job_state); std::string msg = Arc::Time().str(Arc::UTCTime); msg += " Job state change "; msg += i->get_state_name(); msg += " -> "; msg += GMJob::get_state_name(new_state); if(reason) { msg += " Reason: "; msg += reason; }; msg += "\n"; i->job_state = new_state; job_errors_mark_add(*i,config,msg); // During intermediate period job.proxy file must contain full delegated proxy. // To ensure its content is up to date even if proxy was updated in store here // we update content of that file on every job state change. UpdateJobCredentials(i); }; } bool JobsList::AddJobNoCheck(const JobId &id,JobsList::iterator &i,uid_t uid,gid_t gid){ i=jobs.insert(jobs.end(),GMJob(id, Arc::User(uid))); i->keep_finished=config.KeepFinished(); i->keep_deleted=config.KeepDeleted(); if (!GetLocalDescription(i)) { // safest thing to do is add failure and move to FINISHED i->AddFailure("Internal error"); SetJobState(i, JOB_STATE_FINISHED, "Internal failure"); FailedJob(i, false); if(!job_state_write_file(*i,config,i->job_state)) { logger.msg(Arc::ERROR, "%s: Failed reading .local and changing state, job and " "A-REX may be left in an inconsistent state", id); } return false; } i->session_dir = i->local->sessiondir; if (i->session_dir.empty()) i->session_dir = config.SessionRoot(id)+'/'+id; return true; } int JobsList::AcceptedJobs() const { return jobs_num[JOB_STATE_ACCEPTED] + jobs_num[JOB_STATE_PREPARING] + jobs_num[JOB_STATE_SUBMITTING] + jobs_num[JOB_STATE_INLRMS] + jobs_num[JOB_STATE_FINISHING] + jobs_pending; } int JobsList::RunningJobs() const { return jobs_num[JOB_STATE_SUBMITTING] + jobs_num[JOB_STATE_INLRMS]; } int JobsList::ProcessingJobs() const { return jobs_num[JOB_STATE_PREPARING] + jobs_num[JOB_STATE_FINISHING]; } int JobsList::PreparingJobs() const { return jobs_num[JOB_STATE_PREPARING]; } int JobsList::FinishingJobs() const { return jobs_num[JOB_STATE_FINISHING]; } void JobsList::PrepareToDestroy(void) { for(iterator i=jobs.begin();i!=jobs.end();++i) { i->PrepareToDestroy(); } } bool JobsList::ActJobs(void) { bool res = true; bool once_more = false; // first pass for(iterator i=jobs.begin();i!=jobs.end();) { if(i->job_state == JOB_STATE_UNDEFINED) { once_more=true; } res &= ActJob(i); } // second pass - process new jobs again if(once_more) for(iterator i=jobs.begin();i!=jobs.end();) { res &= ActJob(i); } // debug info on jobs per DN logger.msg(Arc::VERBOSE, "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)", jobs_dn.size()); for (std::map::iterator it = jobs_dn.begin(); it != jobs_dn.end(); ++it) logger.msg(Arc::VERBOSE, "%s: %i", it->first, (unsigned int)(it->second)); return res; } bool JobsList::DestroyJob(JobsList::iterator &i,bool finished,bool active) { logger.msg(Arc::INFO,"%s: Destroying",i->job_id); job_state_t new_state=i->job_state; if(new_state == JOB_STATE_UNDEFINED) { // Try to obtain real job state if((new_state=job_state_read_file(i->job_id,config))==JOB_STATE_UNDEFINED) { logger.msg(Arc::ERROR,"%s: Can't read state - no comments, just cleaning",i->job_id); UnlockDelegation(i); job_clean_final(*i,config); i=jobs.erase(i); return true; } i->job_state = new_state; } if((new_state == JOB_STATE_FINISHED) && (!finished)) { ++i; return true; } if(!active) { ++i; return true; } if((new_state != JOB_STATE_INLRMS) || (job_lrms_mark_check(i->job_id,config))) { logger.msg(Arc::INFO,"%s: Cleaning control and session directories",i->job_id); UnlockDelegation(i); job_clean_final(*i,config); i=jobs.erase(i); return true; } logger.msg(Arc::INFO,"%s: This job may be still running - canceling",i->job_id); bool state_changed = false; if(!state_submitting(i,state_changed,true)) { logger.msg(Arc::WARNING,"%s: Cancellation failed (probably job finished) - cleaning anyway",i->job_id); UnlockDelegation(i); job_clean_final(*i,config); i=jobs.erase(i); return true; } if(!state_changed) { ++i; return false; } // child still running logger.msg(Arc::INFO,"%s: Cancellation probably succeeded - cleaning",i->job_id); UnlockDelegation(i); job_clean_final(*i,config); i=jobs.erase(i); return true; } bool JobsList::FailedJob(const JobsList::iterator &i,bool cancel) { bool r = true; // add failure mark if(job_failed_mark_add(*i,config,i->failure_reason)) { i->failure_reason = ""; } else { r = false; } if(GetLocalDescription(i)) { i->local->uploads=0; } else { r=false; } // If the job failed during FINISHING then DTR deals with .output if (i->get_state() == JOB_STATE_FINISHING) { if (i->local) job_local_write_file(*i,config,*(i->local)); return r; } // adjust output files to failure state // Not good looking code JobLocalDescription job_desc; if(job_desc_handler.parse_job_req(i->get_id(),job_desc) != JobReqSuccess) { r = false; } // Convert delegation ids to credential paths. std::string default_cred = config.ControlDir() + "/job." + i->get_id() + ".proxy"; for(std::list::iterator f = job_desc.outputdata.begin(); f != job_desc.outputdata.end(); ++f) { if(f->has_lfn()) { if(f->cred.empty()) { f->cred = default_cred; } else { std::string path; ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs && i->local) path = (*delegs)[config.DelegationDir()].FindCred(f->cred,i->local->DN); f->cred = path; } if(i->local) ++(i->local->uploads); } } // Add user-uploaded input files so that they are not deleted during // FINISHING and so resume will work. Credentials are not necessary for // these files. The real output list will be recreated from the job // description if the job is restarted. if (!cancel && job_desc.reruns > 0) { for(std::list::iterator f = job_desc.inputdata.begin(); f != job_desc.inputdata.end(); ++f) { if (f->lfn.find(':') == std::string::npos) { FileData fd(f->pfn, ""); fd.iffailure = true; // make sure to keep file job_desc.outputdata.push_back(fd); } } } if(!job_output_write_file(*i,config,job_desc.outputdata,cancel?job_output_cancel:job_output_failure)) { r=false; logger.msg(Arc::ERROR,"%s: Failed writing list of output files: %s",i->job_id,Arc::StrError(errno)); } if(i->local) job_local_write_file(*i,config,*(i->local)); return r; } bool JobsList::GetLocalDescription(const JobsList::iterator &i) { if(!i->GetLocalDescription(config)) { logger.msg(Arc::ERROR,"%s: Failed reading local information",i->job_id); return false; } return true; } void JobsList::CleanChildProcess(const JobsList::iterator i) { delete i->child; i->child=NULL; if((i->job_state == JOB_STATE_SUBMITTING) || (i->job_state == JOB_STATE_CANCELING)) --jobs_scripts; } bool JobsList::state_submitting(const JobsList::iterator &i,bool &state_changed,bool cancel) { if(i->child == NULL) { // no child was running yet, or recovering from fault if((config.MaxScripts()!=-1) && (jobs_scripts>=config.MaxScripts())) { //logger.msg(Arc::WARNING,"%s: Too many LRMS scripts running - limit is %u", // i->job_id,config.MaxScripts()); // returning true but not advancing to next state should cause retry return true; } // write grami file for submit-X-job // TODO: read existing grami file to check if job is already submitted if(!(i->GetLocalDescription(config))) { logger.msg(Arc::ERROR,"%s: Failed reading local information",i->job_id); if(!cancel) i->AddFailure("Internal error: can't read local file"); return false; }; JobLocalDescription* job_desc = i->local; if(!cancel) { // in case of cancel all preparations are already done const char *local_transfer_s = NULL; if(staging_config.get_local_transfer()) { local_transfer_s="joboption_localtransfer=yes"; } if(!job_desc_handler.write_grami(*i,local_transfer_s)) { logger.msg(Arc::ERROR,"%s: Failed creating grami file",i->job_id); return false; } if(!job_desc_handler.set_execs(*i)) { logger.msg(Arc::ERROR,"%s: Failed setting executable permissions",i->job_id); return false; } // precreate file to store diagnostics from lrms job_diagnostics_mark_put(*i,config); job_lrmsoutput_mark_put(*i,config); } // submit/cancel job to LRMS using submit/cancel-X-job std::string cmd; if(cancel) { cmd=Arc::ArcLocation::GetDataDir()+"/cancel-"+job_desc->lrms+"-job"; } else { cmd=Arc::ArcLocation::GetDataDir()+"/submit-"+job_desc->lrms+"-job"; } if(!cancel) { logger.msg(Arc::INFO,"%s: state SUBMIT: starting child: %s",i->job_id,cmd); } else { if(!job_lrms_mark_check(i->job_id,config)) { logger.msg(Arc::INFO,"%s: state CANCELING: starting child: %s",i->job_id,cmd); } else { logger.msg(Arc::INFO,"%s: Job has completed already. No action taken to cancel",i->job_id); state_changed=true; return true; } } std::string grami = config.ControlDir()+"/job."+(*i).job_id+".grami"; cmd += " --config " + config.ConfigFile() + " " + grami; job_errors_mark_put(*i,config); if(!RunParallel::run(config,*i,cmd,&(i->child))) { if(!cancel) { i->AddFailure("Failed initiating job submission to LRMS"); logger.msg(Arc::ERROR,"%s: Failed running submission process",i->job_id); } else { logger.msg(Arc::ERROR,"%s: Failed running cancellation process",i->job_id); } return false; } ++jobs_scripts; if((config.MaxScripts()!=-1) && (jobs_scripts>=config.MaxScripts())) { logger.msg(Arc::WARNING,"%s: LRMS scripts limit of %u is reached - suspending submit/cancel", i->job_id,config.MaxScripts()); } return true; } // child was run - check if exited and then exit code bool simulate_success = false; if(i->child->Running()) { // child is running - come later // Due to unknown reason sometimes child exit event is lost. // As workaround check if child is running for too long. If // it does then check in grami file for generated local id // or in case of cancel just assume child exited. if((Arc::Time() - i->child->RunTime()) > Arc::Period(CHILD_RUN_TIME_SUSPICIOUS)) { if(!cancel) { // Check if local id is already obtained std::string local_id=job_desc_handler.get_local_id(i->job_id); if(local_id.length() > 0) { simulate_success = true; logger.msg(Arc::ERROR,"%s: Job submission to LRMS takes too long, but ID is already obtained. Pretending submission is done.",i->job_id); } } else { // Check if diagnostics collection is done if(job_lrms_mark_check(i->job_id,config)) { simulate_success = true; logger.msg(Arc::ERROR,"%s: Job cancellation takes too long, but diagnostic collection seems to be done. Pretending cancellation succeeded.",i->job_id); } } } if((!simulate_success) && (Arc::Time() - i->child->RunTime()) > Arc::Period(CHILD_RUN_TIME_TOO_LONG)) { // In any case it is way too long. Job must fail. Otherwise it will hang forever. CleanChildProcess(i); if(!cancel) { logger.msg(Arc::ERROR,"%s: Job submission to LRMS takes too long. Failing.",i->job_id); JobFailStateRemember(i,JOB_STATE_SUBMITTING); i->AddFailure("Job submission to LRMS failed"); // It would be nice to cancel if job finally submits. But we do not know id. return false; } else { logger.msg(Arc::ERROR,"%s: Job cancellation takes too long. Failing.",i->job_id); CleanChildProcess(i); return false; } } if(!simulate_success) return true; } if(!simulate_success) { // real processing if(!cancel) { logger.msg(Arc::INFO,"%s: state SUBMIT: child exited with code %i",i->job_id,i->child->Result()); } else { if((i->child->ExitTime() != Arc::Time::UNDEFINED) && ((Arc::Time() - i->child->ExitTime()) < (config.WakeupPeriod()*2))) { // not ideal solution logger.msg(Arc::INFO,"%s: state CANCELING: child exited with code %i",i->job_id,i->child->Result()); } } // Another workaround in Run class may also detect lost child. // It then sets exit code to -1. This value is also set in // case child was killed. So it is worth to check grami anyway. if((i->child->Result() != 0) && (i->child->Result() != -1)) { if(!cancel) { logger.msg(Arc::ERROR,"%s: Job submission to LRMS failed",i->job_id); JobFailStateRemember(i,JOB_STATE_SUBMITTING); } else { logger.msg(Arc::ERROR,"%s: Failed to cancel running job",i->job_id); } CleanChildProcess(i); if(!cancel) i->AddFailure("Job submission to LRMS failed"); return false; } } else { // Just pretend everything is alright } if(!cancel) { CleanChildProcess(i); // success code - get LRMS job id std::string local_id=job_desc_handler.get_local_id(i->job_id); if(local_id.length() == 0) { logger.msg(Arc::ERROR,"%s: Failed obtaining lrms id",i->job_id); i->AddFailure("Failed extracting LRMS ID due to some internal error"); JobFailStateRemember(i,JOB_STATE_SUBMITTING); return false; } // put id into local information file if(!GetLocalDescription(i)) { i->AddFailure("Internal error"); return false; } i->local->localid=local_id; if(!job_local_write_file(*i,config,*(i->local))) { i->AddFailure("Internal error"); logger.msg(Arc::ERROR,"%s: Failed writing local information: %s",i->job_id,Arc::StrError(errno)); return false; } } else { // job diagnostics collection done in background (scan-*-job script) if(!job_lrms_mark_check(i->job_id,config)) { // job diag not yet collected - come later if((i->child->ExitTime() != Arc::Time::UNDEFINED) && ((Arc::Time() - i->child->ExitTime()) > Arc::Period(Arc::Time::HOUR))) { // it takes too long logger.msg(Arc::ERROR,"%s: state CANCELING: timeout waiting for cancellation",i->job_id); CleanChildProcess(i); return false; } return true; } else { logger.msg(Arc::INFO,"%s: state CANCELING: job diagnostics collected",i->job_id); CleanChildProcess(i); job_diagnostics_mark_move(*i,config); } } // move to next state state_changed=true; return true; } bool JobsList::state_loading(const JobsList::iterator &i,bool &state_changed,bool up) { if (staging_config.get_local_transfer()) { // just check user-uploaded files for PREPARING jobs if (up) { state_changed = true; return true; } int res = dtr_generator->checkUploadedFiles(*i); if (res == 2) { // still going return true; } if (res == 0) { // finished successfully state_changed=true; return true; } // error return false; } // first check if job is already in the system if (!dtr_generator->hasJob(*i)) { dtr_generator->receiveJob(*i); return true; } // if job has already failed then do not set failed state again if DTR failed bool already_failed = i->CheckFailure(config); // queryJobFinished() calls i->AddFailure() if any DTR failed if (dtr_generator->queryJobFinished(*i)) { bool done = true; bool result = true; // check for failure if (i->CheckFailure(config)) { if (!already_failed) JobFailStateRemember(i, (up ? JOB_STATE_FINISHING : JOB_STATE_PREPARING)); result = false; } else if (!up) { // check for user-uploadable files if downloading int res = dtr_generator->checkUploadedFiles(*i); if (res == 2) { // still going done = false; } else if (res == 0) { // finished successfully state_changed=true; } else { // error result = false; } } else { // if uploading we are done state_changed = true; } if (done) dtr_generator->removeJob(*i); return result; } else { // not finished yet logger.msg(Arc::VERBOSE, "%s: State: %s: still in data staging", i->job_id, (up ? "FINISHING" : "PREPARING")); return true; } } bool JobsList::JobPending(JobsList::iterator &i) { if(i->job_pending) return true; i->job_pending=true; return job_state_write_file(*i,config,i->job_state,true); } job_state_t JobsList::JobFailStateGet(const JobsList::iterator &i) { if(!GetLocalDescription(i)) { return JOB_STATE_UNDEFINED; } if(i->local->failedstate.empty()) { return JOB_STATE_UNDEFINED; } job_state_t state = GMJob::get_state(i->local->failedstate.c_str()); if(state != JOB_STATE_UNDEFINED) { if(i->local->reruns <= 0) { logger.msg(Arc::ERROR,"%s: Job is not allowed to be rerun anymore",i->job_id); job_local_write_file(*i,config,*(i->local)); return JOB_STATE_UNDEFINED; } i->local->failedstate=""; i->local->failedcause=""; i->local->reruns--; job_local_write_file(*i,config,*(i->local)); return state; } logger.msg(Arc::ERROR,"%s: Job failed in unknown state. Won't rerun.",i->job_id); i->local->failedstate=""; i->local->failedcause=""; job_local_write_file(*i,config,*(i->local)); return JOB_STATE_UNDEFINED; } bool JobsList::RecreateTransferLists(const JobsList::iterator &i) { // Recreate list of output and input files, excluding those already // transferred. For input files this is done by looking at the session dir, // for output files by excluding files in .output_status std::list output_files; std::list output_files_done; std::list input_files; // keep local info if(!GetLocalDescription(i)) return false; // get output files already done job_output_status_read_file(i->job_id,config,output_files_done); // recreate lists by reprocessing job description JobLocalDescription job_desc; // placeholder if(!job_desc_handler.process_job_req(*i,job_desc)) { logger.msg(Arc::ERROR,"%s: Reprocessing job description failed",i->job_id); return false; } // Restore 'local' if(!job_local_write_file(*i,config,*(i->local))) return false; // Read new lists if(!job_output_read_file(i->job_id,config,output_files)) { logger.msg(Arc::ERROR,"%s: Failed to read reprocessed list of output files",i->job_id); return false; } if(!job_input_read_file(i->job_id,config,input_files)) { logger.msg(Arc::ERROR,"%s: Failed to read reprocessed list of input files",i->job_id); return false; } // remove already uploaded files i->local->uploads=0; for(std::list::iterator i_new = output_files.begin(); i_new!=output_files.end();) { if(!(i_new->has_lfn())) { // user file - keep ++i_new; continue; } std::list::iterator i_done = output_files_done.begin(); for(;i_done!=output_files_done.end();++i_done) { if((i_new->pfn == i_done->pfn) && (i_new->lfn == i_done->lfn)) break; } if(i_done == output_files_done.end()) { ++i_new; i->local->uploads++; continue; } i_new=output_files.erase(i_new); } if(!job_output_write_file(*i,config,output_files)) return false; // remove already downloaded files i->local->downloads=0; for(std::list::iterator i_new = input_files.begin(); i_new!=input_files.end();) { std::string path = i->session_dir+"/"+i_new->pfn; struct stat st; if(::stat(path.c_str(),&st) == -1) { ++i_new; i->local->downloads++; } else { i_new=input_files.erase(i_new); } } if(!job_input_write_file(*i,config,input_files)) return false; return true; } bool JobsList::JobFailStateRemember(const JobsList::iterator &i,job_state_t state,bool internal) { if(!(i->GetLocalDescription(config))) { logger.msg(Arc::ERROR,"%s: Failed reading local information",i->job_id); return false; } if(state == JOB_STATE_UNDEFINED) { i->local->failedstate=""; i->local->failedcause=internal?"internal":"client"; return job_local_write_file(*i,config,*(i->local)); } if(i->local->failedstate.empty()) { i->local->failedstate=GMJob::get_state_name(state); i->local->failedcause=internal?"internal":"client"; return job_local_write_file(*i,config,*(i->local)); } return true; } time_t JobsList::PrepareCleanupTime(JobsList::iterator &i,time_t& keep_finished) { JobLocalDescription job_desc; time_t t = -1; // read lifetime - if empty it wont be overwritten job_local_read_file(i->job_id,config,job_desc); if(!Arc::stringto(job_desc.lifetime,t)) t = keep_finished; if(t > keep_finished) t = keep_finished; time_t last_changed=job_state_time(i->job_id,config); t=last_changed+t; job_desc.cleanuptime=t; job_local_write_file(*i,config,job_desc); return t; } void JobsList::UnlockDelegation(JobsList::iterator &i) { ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs) (*delegs)[config.DelegationDir()].ReleaseCred(i->job_id,true,false); } void JobsList::ActJobUndefined(JobsList::iterator &i, bool& once_more,bool& /*delete_job*/, bool& job_error,bool& state_changed) { // new job - read its status from status file, but first check if it is // under the limit of maximum jobs allowed in the system if((AcceptedJobs() < config.MaxJobs()) || (config.MaxJobs() == -1)) { job_state_t new_state=job_state_read_file(i->job_id,config); if(new_state == JOB_STATE_UNDEFINED) { // something failed logger.msg(Arc::ERROR,"%s: Reading status of new job failed",i->job_id); job_error=true; i->AddFailure("Failed reading status of the job"); return; } // By keeping once_more==false job does not cycle here but // goes out and registers its state in counters. This allows // to maintain limits properly after restart. Except FINISHED // jobs because they are not kept in memory and should be // processed immediately. SetJobState(i, new_state, "(Re)Accepting new job"); // this can be any state, after A-REX restart if(new_state == JOB_STATE_ACCEPTED) { state_changed = true; // to trigger email notification, etc. // first phase of job - just accepted - parse request logger.msg(Arc::INFO,"%s: State: ACCEPTED: parsing job description",i->job_id); if(!job_desc_handler.process_job_req(*i,*i->local)) { logger.msg(Arc::ERROR,"%s: Processing job description failed",i->job_id); job_error=true; i->AddFailure("Could not process job description"); return; // go to next job } job_state_write_file(*i,config,i->job_state); // prepare information for logger // This call is not needed here because at higher level make_file() // is called for every state change // if(config.GetJobLog()) config.GetJobLog()->make_file(*i,config); } else if(new_state == JOB_STATE_FINISHED) { once_more=true; job_state_write_file(*i,config,i->job_state); } else if(new_state == JOB_STATE_DELETED) { once_more=true; job_state_write_file(*i,config,i->job_state); } else { logger.msg(Arc::INFO,"%s: %s: New job belongs to %i/%i",i->job_id.c_str(), GMJob::get_state_name(new_state),i->get_user().get_uid(),i->get_user().get_gid()); // Make it clean state after restart job_state_write_file(*i,config,i->job_state); i->Start(); // add to DN map // here we don't enforce the per-DN limit since the jobs are // already in the system if (i->local->DN.empty()) { logger.msg(Arc::WARNING, "Failed to get DN information from .local file for job %s", i->job_id); } jobs_dn[i->local->DN]++; } } // Not doing JobPending here because that job kind of does not exist. return; } void JobsList::ActJobAccepted(JobsList::iterator &i, bool& once_more,bool& /*delete_job*/, bool& job_error,bool& state_changed) { // accepted state - job was just accepted by A-REX and we already // know that it is accepted - now we are analyzing/parsing request, // or it can also happen we are waiting for user specified time logger.msg(Arc::VERBOSE,"%s: State: ACCEPTED",i->job_id); if(!GetLocalDescription(i)) { job_error=true; i->AddFailure("Internal error"); return; // go to next job } if(i->local->dryrun) { logger.msg(Arc::INFO,"%s: State: ACCEPTED: dryrun",i->job_id); i->AddFailure("User requested dryrun. Job skipped."); job_error=true; return; // go to next job } // check per-DN limit on processing jobs if (config.MaxPerDN() > 0 && jobs_dn[i->local->DN] >= config.MaxPerDN()) { JobPending(i); return; } // check for user specified time if(i->local->processtime != -1 && (i->local->processtime) > time(NULL)) { logger.msg(Arc::INFO,"%s: State: ACCEPTED: has process time %s",i->job_id.c_str(), i->local->processtime.str(Arc::UserTime)); return; } // job can progress to PREPARING - add to per-DN job list jobs_dn[i->local->DN]++; logger.msg(Arc::INFO,"%s: State: ACCEPTED: moving to PREPARING",i->job_id); state_changed=true; once_more=true; SetJobState(i, JOB_STATE_PREPARING, "Starting job processing"); i->Start(); // gather some frontend specific information for user, do it only once // Runs user-supplied executable placed at "frontend-info-collector" std::string cmd = Arc::ArcLocation::GetToolsDir()+"/frontend-info-collector"; char const * const args[2] = { cmd.c_str(), NULL }; job_controldiag_mark_put(*i,config,args); } void JobsList::ActJobPreparing(JobsList::iterator &i, bool& once_more,bool& /*delete_job*/, bool& job_error,bool& state_changed) { // preparing state - job is in data staging system, so check if it has // finished and whether all user uploadable files have been uploaded. logger.msg(Arc::VERBOSE,"%s: State: PREPARING",i->job_id); if(i->job_pending || state_loading(i,state_changed,false)) { if(i->job_pending || state_changed) { if(!GetLocalDescription(i)) { logger.msg(Arc::ERROR,"%s: Failed obtaining local job information.",i->job_id); i->AddFailure("Internal error"); job_error=true; return; } // For jobs with free stage in check if user reported complete stage in. bool stagein_complete = true; if(i->local->freestagein) { stagein_complete = false; std::list ifiles; if(job_input_status_read_file(i->job_id,config,ifiles)) { for(std::list::iterator ifile = ifiles.begin(); ifile != ifiles.end(); ++ifile) { if(*ifile == "/") { stagein_complete = true; break; } } } } // Here we have branch. Either job is ordinary one and goes to SUBMIT // or it has no executable and hence goes to FINISHING if(!stagein_complete) { state_changed=false; JobPending(i); } else if(i->local->exec.size() > 0) { if((config.MaxRunning()==-1) || (RunningJobs()CheckFailure(config)) { i->AddFailure("Data download failed"); } job_error=true; } } void JobsList::ActJobSubmitting(JobsList::iterator &i, bool& once_more,bool& /*delete_job*/, bool& job_error,bool& state_changed) { // everything is ready for submission to batch system or currently submitting logger.msg(Arc::VERBOSE,"%s: State: SUBMIT",i->job_id); if(state_submitting(i,state_changed)) { if(state_changed) { SetJobState(i, JOB_STATE_INLRMS, "Job is passed to LRMS"); once_more=true; } } else { job_error=true; } } void JobsList::ActJobCanceling(JobsList::iterator &i, bool& once_more,bool& /*delete_job*/, bool& job_error,bool& state_changed) { // This state is like submitting, only -cancel instead of -submit logger.msg(Arc::VERBOSE,"%s: State: CANCELING",i->job_id); if(state_submitting(i,state_changed,true)) { if(state_changed) { SetJobState(i, JOB_STATE_FINISHING, "Job cancelation succeeded"); once_more=true; } } else { job_error=true; } } void JobsList::ActJobInlrms(JobsList::iterator &i, bool& once_more,bool& /*delete_job*/, bool& job_error,bool& state_changed) { // Job is currently running in LRMS, check if it has finished logger.msg(Arc::VERBOSE,"%s: State: INLRMS",i->job_id); if(!GetLocalDescription(i)) { i->AddFailure("Failed reading local job information"); job_error=true; return; // go to next job } if(i->job_pending || job_lrms_mark_check(i->job_id,config)) { if(!i->job_pending) { logger.msg(Arc::INFO,"%s: Job finished",i->job_id); job_diagnostics_mark_move(*i,config); LRMSResult ec = job_lrms_mark_read(i->job_id,config); if(ec.code() != i->local->exec.successcode) { logger.msg(Arc::INFO,"%s: State: INLRMS: exit message is %i %s",i->job_id,ec.code(),ec.description()); i->AddFailure("LRMS error: ("+ Arc::tostring(ec.code())+") "+ec.description()); job_error=true; JobFailStateRemember(i,JOB_STATE_INLRMS); // This does not require any special postprocessing and // can go to next state directly state_changed=true; once_more=true; return; } } state_changed=true; once_more=true; SetJobState(i, JOB_STATE_FINISHING, "Job finished executing in LRMS"); } } void JobsList::ActJobFinishing(JobsList::iterator &i, bool& once_more,bool& /*delete_job*/, bool& job_error,bool& state_changed) { // Batch job has finished and now ready to upload output files, or // upload is already on-going logger.msg(Arc::VERBOSE,"%s: State: FINISHING",i->job_id); if(state_loading(i,state_changed,true)) { if(state_changed) { SetJobState(i, JOB_STATE_FINISHED, "Stage-out finished."); if(GetLocalDescription(i)) { if (--(jobs_dn[i->local->DN]) <= 0) jobs_dn.erase(i->local->DN); } once_more=true; } else { return; // still in data staging } } else { state_changed=true; // to send mail once_more=true; if(!i->CheckFailure(config)) { i->AddFailure("Data upload failed"); } job_error=true; } } void JobsList::ActJobFinished(JobsList::iterator &i, bool& /*once_more*/,bool& /*delete_job*/, bool& /*job_error*/,bool& state_changed) { // Job has completely finished, check for user requests to restart or // clean up job, and if it is time to move to DELETED if(job_clean_mark_check(i->job_id,config)) { // request to clean job logger.msg(Arc::INFO,"%s: Job is requested to clean - deleting",i->job_id); UnlockDelegation(i); // delete everything job_clean_final(*i,config); return; } if(job_restart_mark_check(i->job_id,config)) { job_restart_mark_remove(i->job_id,config); // request to rerun job - check if we can // Get information about failed state and forget it job_state_t state_ = JobFailStateGet(i); if(state_ == JOB_STATE_PREPARING) { if(RecreateTransferLists(i)) { job_failed_mark_remove(i->job_id,config); SetJobState(i, JOB_STATE_ACCEPTED, "Request to restart failed job"); JobPending(i); // make it go to end of state immediately return; } } else if((state_ == JOB_STATE_SUBMITTING) || (state_ == JOB_STATE_INLRMS)) { if(RecreateTransferLists(i)) { job_failed_mark_remove(i->job_id,config); if(i->local->downloads > 0) { // missing input files has to be re-downloaded SetJobState(i, JOB_STATE_ACCEPTED, "Request to restart failed job (some input files are missing)"); } else { SetJobState(i, JOB_STATE_PREPARING, "Request to restart failed job (no input files are missing)"); } JobPending(i); // make it go to end of state immediately return; } } else if(state_ == JOB_STATE_FINISHING) { if(RecreateTransferLists(i)) { job_failed_mark_remove(i->job_id,config); SetJobState(i, JOB_STATE_INLRMS, "Request to restart failed job"); JobPending(i); // make it go to end of state immediately return; } } else if(state_ == JOB_STATE_UNDEFINED) { logger.msg(Arc::ERROR,"%s: Can't rerun on request",i->job_id); } else { logger.msg(Arc::ERROR,"%s: Can't rerun on request - not a suitable state",i->job_id); } } time_t t = -1; if(!job_local_read_cleanuptime(i->job_id,config,t)) { // must be first time - create cleanuptime t=PrepareCleanupTime(i,i->keep_finished); } // check if it is time to move job to DELETED if(((int)(time(NULL)-t)) >= 0) { logger.msg(Arc::INFO,"%s: Job is too old - deleting",i->job_id); UnlockDelegation(i); if(i->keep_deleted) { // here we have to get the cache per-job dirs to be deleted std::list cache_per_job_dirs; CacheConfig cache_config(config.CacheParams()); cache_config.substitute(config, i->user); std::vector conf_caches = cache_config.getCacheDirs(); // add each dir to our list for (std::vector::iterator it = conf_caches.begin(); it != conf_caches.end(); it++) { cache_per_job_dirs.push_back(it->substr(0, it->find(" "))+"/joblinks"); } // add remote caches std::vector remote_caches = cache_config.getRemoteCacheDirs(); for (std::vector::iterator it = remote_caches.begin(); it != remote_caches.end(); it++) { cache_per_job_dirs.push_back(it->substr(0, it->find(" "))+"/joblinks"); } // add draining caches std::vector draining_caches = cache_config.getDrainingCacheDirs(); for (std::vector::iterator it = draining_caches.begin(); it != draining_caches.end(); it++) { cache_per_job_dirs.push_back(it->substr(0, it->find(" "))+"/joblinks"); } job_clean_deleted(*i,config,cache_per_job_dirs); SetJobState(i, JOB_STATE_DELETED, "Job stayed unattended too long"); state_changed=true; } else { // delete everything job_clean_final(*i,config); } } } void JobsList::ActJobDeleted(JobsList::iterator &i, bool& /*once_more*/,bool& /*delete_job*/, bool& /*job_error*/,bool& /*state_changed*/) { // Job only has a few control files left, check if is it time to // remove all traces time_t t = -1; if(!job_local_read_cleanuptime(i->job_id,config,t) || ((time(NULL)-(t+i->keep_deleted)) >= 0)) { logger.msg(Arc::INFO,"%s: Job is ancient - delete rest of information",i->job_id); UnlockDelegation(i); // not needed here but in case someting went wrong previously // delete everything job_clean_final(*i,config); } } bool JobsList::ActJob(JobsList::iterator &i) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), i->job_id); job_state_t perflog_start_state = i->job_state; bool once_more = true; bool delete_job = false; bool job_error = false; bool state_changed = false; job_state_t old_state = i->job_state; job_state_t old_reported_state = i->job_state; bool old_pending = i->job_pending; while(once_more) { once_more = false; delete_job = false; job_error = false; state_changed = false; // some states can not be canceled (or there is no sense to do that) if((i->job_state != JOB_STATE_CANCELING) && (i->job_state != JOB_STATE_FINISHED) && (i->job_state != JOB_STATE_DELETED) && (i->job_state != JOB_STATE_SUBMITTING)) { if(job_cancel_mark_check(i->job_id,config)) { logger.msg(Arc::INFO,"%s: Canceling job because of user request",i->job_id); if (i->job_state == JOB_STATE_PREPARING || i->job_state == JOB_STATE_FINISHING) { dtr_generator->cancelJob(*i); } // kill running child if(i->child) { i->child->Kill(0); CleanChildProcess(i); } // put some explanation i->AddFailure("User requested to cancel the job"); JobFailStateRemember(i,i->job_state,false); // behave like if job failed if(!FailedJob(i,true)) { // DO NOT KNOW WHAT TO DO HERE !!!!!!!!!! } // special processing for INLRMS case if(i->job_state == JOB_STATE_INLRMS) { SetJobState(i, JOB_STATE_CANCELING, "Request to cancel job"); } // if FINISHING we wait to get back all DTRs else if (i->job_state != JOB_STATE_PREPARING) { SetJobState(i, JOB_STATE_FINISHING, "Request to cancel job"); } job_cancel_mark_remove(i->job_id,config); state_changed=true; once_more=true; } } if(!state_changed) switch(i->job_state) { case JOB_STATE_UNDEFINED: { ActJobUndefined(i,once_more,delete_job,job_error,state_changed); } break; case JOB_STATE_ACCEPTED: { ActJobAccepted(i,once_more,delete_job,job_error,state_changed); } break; case JOB_STATE_PREPARING: { ActJobPreparing(i,once_more,delete_job,job_error,state_changed); } break; case JOB_STATE_SUBMITTING: { ActJobSubmitting(i,once_more,delete_job,job_error,state_changed); } break; case JOB_STATE_CANCELING: { ActJobCanceling(i,once_more,delete_job,job_error,state_changed); } break; case JOB_STATE_INLRMS: { ActJobInlrms(i,once_more,delete_job,job_error,state_changed); } break; case JOB_STATE_FINISHING: { ActJobFinishing(i,once_more,delete_job,job_error,state_changed); } break; case JOB_STATE_FINISHED: { ActJobFinished(i,once_more,delete_job,job_error,state_changed); } break; case JOB_STATE_DELETED: { ActJobDeleted(i,once_more,delete_job,job_error,state_changed); } break; default: { // should destroy job with unknown state ?! } break; } do { // Process errors which happened during processing this job if(job_error) { job_error=false; // always cause rerun - in order not to lose state change // Failed job - move it to proper state logger.msg(Arc::ERROR,"%s: Job failure detected",i->job_id); if(!FailedJob(i,false)) { // something is really wrong i->AddFailure("Failed during processing failure"); delete_job=true; } else { // just move job to proper state if((i->job_state == JOB_STATE_FINISHED) || (i->job_state == JOB_STATE_DELETED)) { // Normally these stages should not generate errors // so ignore them } else if(i->job_state == JOB_STATE_FINISHING) { // No matter if FINISHING fails - it still goes to FINISHED SetJobState(i, JOB_STATE_FINISHED, "Job processing error"); if(GetLocalDescription(i)) { if (--(jobs_dn[i->local->DN]) <= 0) jobs_dn.erase(i->local->DN); } state_changed=true; once_more=true; } else { SetJobState(i, JOB_STATE_FINISHING, "Job processing error"); state_changed=true; once_more=true; } i->job_pending=false; } } // Process state changes, also those generated by error processing if(old_reported_state != i->job_state) { if(old_reported_state != JOB_STATE_UNDEFINED) { // Report state change into log logger.msg(Arc::INFO,"%s: State: %s from %s", i->job_id.c_str(),GMJob::get_state_name(i->job_state), GMJob::get_state_name(old_reported_state)); } old_reported_state=i->job_state; } if(state_changed) { state_changed=false; i->job_pending=false; if(!job_state_write_file(*i,config,i->job_state)) { i->AddFailure("Failed writing job status: "+Arc::StrError(errno)); job_error=true; } else { // Talk to external plugin to ask if we can proceed // Jobs with ACCEPTED state or UNDEFINED previous state // could be ignored here. But there is tiny possibility // that service failed while processing ContinuationPlugins. // Hence here we have duplicate call for ACCEPTED state. // TODO: maybe introducing job state prefix VALIDATING: // could be used to resolve this situation. if(config.GetContPlugins()) { std::list results; config.GetContPlugins()->run(*i,config,results); std::list::iterator result = results.begin(); while(result != results.end()) { // analyze results if(result->action == ContinuationPlugins::act_fail) { logger.msg(Arc::ERROR,"%s: Plugin at state %s : %s", i->job_id.c_str(),i->get_state_name(), result->response); i->AddFailure(std::string("Plugin at state ")+ i->get_state_name()+" failed: "+(result->response)); job_error=true; } else if(result->action == ContinuationPlugins::act_log) { // Scream but go ahead logger.msg(Arc::WARNING,"%s: Plugin at state %s : %s", i->job_id.c_str(),i->get_state_name(), result->response); } else if(result->action == ContinuationPlugins::act_pass) { // Just continue quietly } else { logger.msg(Arc::ERROR,"%s: Plugin execution failed",i->job_id); i->AddFailure(std::string("Failed running plugin at state ")+ i->get_state_name()); job_error=true; } ++result; } } // Processing to be done on state changes if(config.GetJobLog()) config.GetJobLog()->make_file(*i,config); if(i->job_state == JOB_STATE_FINISHED) { job_clean_finished(i->job_id,config); if(config.GetJobLog()) config.GetJobLog()->finish_info(*i,config); PrepareCleanupTime(i,i->keep_finished); } else if(i->job_state == JOB_STATE_PREPARING) { if(config.GetJobLog()) config.GetJobLog()->start_info(*i,config); } } // send mail after error and change are processed // do not send if something really wrong happened to avoid email DoS if(!delete_job) send_mail(*i,config); } // Keep repeating till error goes out } while(job_error); if(delete_job) { logger.msg(Arc::ERROR,"%s: Delete request due to internal problems",i->job_id); // Move to finished in order to remove from list SetJobState(i, JOB_STATE_FINISHED, "Preparing to delete job due to internal error"); if(i->GetLocalDescription(config)) { if (--(jobs_dn[i->local->DN]) == 0) jobs_dn.erase(i->local->DN); } i->job_pending=false; job_state_write_file(*i,config,i->job_state); i->AddFailure("Serious troubles (problems during processing problems)"); FailedJob(i,false); // put some marks job_clean_finished(i->job_id,config); // clean status files once_more=true; // to process some things in local } } if(perfrecord.Started()) { job_state_t perflog_end_state = i->job_state; std::string name(GMJob::get_state_name(perflog_start_state)); name += "-"; name += GMJob::get_state_name(perflog_end_state); perfrecord.End(name); }; // FINISHED+DELETED jobs are not kept in list - only in files // if job managed to get here with state UNDEFINED - // means we are overloaded with jobs - do not keep them in list if((i->job_state == JOB_STATE_FINISHED) || (i->job_state == JOB_STATE_DELETED) || (i->job_state == JOB_STATE_UNDEFINED)) { // this is the ONLY place where jobs are removed from memory // update counters if(!old_pending) { jobs_num[old_state]--; } else { jobs_pending--; } i=jobs.erase(i); } else { // update counters if(!old_pending) { jobs_num[old_state]--; } else { jobs_pending--; } if(!i->job_pending) { jobs_num[i->job_state]++; } else { jobs_pending++; } ++i; } return true; } class JobFDesc { public: JobId id; uid_t uid; gid_t gid; time_t t; JobFDesc(const std::string& s):id(s),uid(0),gid(0),t(-1) { } bool operator<(const JobFDesc &right) const { return (t < right.t); } }; bool JobsList::RestartJobs(const std::string& cdir,const std::string& odir) { bool res = true; try { Glib::Dir dir(cdir); for(;;) { std::string file=dir.read_name(); if(file.empty()) break; int l=file.length(); // job id contains at least 1 character if(l>(4+7) && file.substr(0,4) == "job." && file.substr(l-7) == ".status") { uid_t uid; gid_t gid; time_t t; std::string fname=cdir+'/'+file.c_str(); std::string oname=odir+'/'+file.c_str(); if(check_file_owner(fname,uid,gid,t)) { if(::rename(fname.c_str(),oname.c_str()) != 0) { logger.msg(Arc::ERROR,"Failed to move file %s to %s",fname,oname); res=false; } } } } dir.close(); } catch(Glib::FileError& e) { logger.msg(Arc::ERROR,"Failed reading control directory: %s",cdir); return false; } return res; } // This code is run at service restart bool JobsList::RestartJobs(void) { std::string cdir=config.ControlDir(); // Jobs from old version bool res1 = RestartJobs(cdir,cdir+"/"+subdir_rew); // Jobs after service restart bool res2 = RestartJobs(cdir+"/"+subdir_cur,cdir+"/"+subdir_rew); return res1 && res2; } bool JobsList::ScanJobs(const std::string& cdir,std::list& ids) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); try { Glib::Dir dir(cdir); for(;;) { std::string file=dir.read_name(); if(file.empty()) break; int l=file.length(); // job id contains at least 1 character if(l>(4+7) && file.substr(0,4) == "job." && file.substr(l-7) == ".status") { JobFDesc id(file.substr(4,l-7-4)); if(FindJob(id.id) == jobs.end()) { std::string fname=cdir+'/'+file.c_str(); uid_t uid; gid_t gid; time_t t; if(check_file_owner(fname,uid,gid,t)) { // add it to the list id.uid=uid; id.gid=gid; id.t=t; ids.push_back(id); } } } } } catch(Glib::FileError& e) { logger.msg(Arc::ERROR,"Failed reading control directory: %s: %s",config.ControlDir(), e.what()); return false; } perfrecord.End("SCAN-JOBS"); return true; } bool JobsList::ScanMarks(const std::string& cdir,const std::list& suffices,std::list& ids) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); try { Glib::Dir dir(cdir); for(;;) { std::string file=dir.read_name(); if(file.empty()) break; int l=file.length(); // job id contains at least 1 character if(l>(4+7) && file.substr(0,4) == "job.") { for(std::list::const_iterator sfx = suffices.begin(); sfx != suffices.end();++sfx) { int ll = sfx->length(); if(l > (ll+4) && file.substr(l-ll) == *sfx) { JobFDesc id(file.substr(4,l-ll-4)); if(FindJob(id.id) == jobs.end()) { std::string fname=cdir+'/'+file.c_str(); uid_t uid; gid_t gid; time_t t; if(check_file_owner(fname,uid,gid,t)) { // add it to the list id.uid=uid; id.gid=gid; id.t=t; ids.push_back(id); } } break; } } } } } catch(Glib::FileError& e) { logger.msg(Arc::ERROR,"Failed reading control directory: %s",config.ControlDir()); return false; } perfrecord.End("SCAN-MARKS"); return true; } // find new jobs - sort by date to implement FIFO bool JobsList::ScanNewJobs(void) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); // New jobs will be accepted only if number of jobs being processed // does not exceed allowed. So avoid scanning if no jobs will be allowed. if((AcceptedJobs() < config.MaxJobs()) || (config.MaxJobs() == -1)) { std::string cdir=config.ControlDir(); std::list ids; // For picking up jobs after service restart std::string odir=cdir+"/"+subdir_rew; if(!ScanJobs(odir,ids)) return false; // sorting by date ids.sort(); for(std::list::iterator id=ids.begin();id!=ids.end();++id) { iterator i; AddJobNoCheck(id->id,i,id->uid,id->gid); }; ids.clear(); // For new jobs std::string ndir=cdir+"/"+subdir_new; if(!ScanJobs(ndir,ids)) return false; // sorting by date ids.sort(); for(std::list::iterator id=ids.begin();id!=ids.end();++id) { iterator i; // adding job with file's uid/gid AddJobNoCheck(id->id,i,id->uid,id->gid); }; }; perfrecord.End("SCAN-JOBS-NEW"); return true; } bool JobsList::ScanOldJobs(unsigned int max_scan_time,int max_scan_jobs) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); // We are going to scan a dir with a lot of files here. So we scan it in // parts and limit scanning time. A finished job is added to the job list // and acted on straight away. If it remains finished or is deleted then it // will be removed again from the list. If it is restarted it will be kept in // the list and processed as normal in the next processing loop. Restarts // are normally processed in ScanNewMarks but can also happen here. time_t start = time(NULL); if(max_scan_time < 10) max_scan_time=10; // some sane number - 10s std::string cdir=config.ControlDir()+"/"+subdir_old; try { if(!old_dir) { old_dir = new Glib::Dir(cdir); } for(;;) { std::string file=old_dir->read_name(); if(file.empty()) { old_dir->close(); delete old_dir; old_dir=NULL; return false; } int l=file.length(); // job id must contain at least one character if(l>(4+7) && file.substr(0,4) == "job." && file.substr(l-7) == ".status") { JobFDesc id(file.substr(4, l-7-4)); if(FindJob(id.id) == jobs.end()) { std::string fname=cdir+'/'+file; uid_t uid; gid_t gid; time_t t; if(check_file_owner(fname,uid,gid,t)) { job_state_t st = job_state_read_file(id.id,config); if(st == JOB_STATE_FINISHED || st == JOB_STATE_DELETED) { JobsList::iterator i; AddJobNoCheck(id.id, i, uid, gid); ActJob(i); if(max_scan_jobs > 0) --max_scan_jobs; } } } } if(((unsigned int)(time(NULL)-start)) >= max_scan_time) break; if(max_scan_jobs == 0) break; } } catch(Glib::FileError& e) { logger.msg(Arc::ERROR,"Failed reading control directory: %s",cdir); if(old_dir) { old_dir->close(); delete old_dir; old_dir=NULL; } return false; } perfrecord.End("SCAN-JOBS-OLD"); return true; } bool JobsList::ScanNewMarks(void) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); std::string cdir=config.ControlDir(); std::string ndir=cdir+"/"+subdir_new; std::list ids; std::list sfx; sfx.push_back(sfx_clean); sfx.push_back(sfx_restart); sfx.push_back(sfx_cancel); if(!ScanMarks(ndir,sfx,ids)) return false; ids.sort(); std::string last_id; for(std::list::iterator id=ids.begin();id!=ids.end();++id) { if(id->id == last_id) continue; // already processed last_id = id->id; job_state_t st = job_state_read_file(id->id,config); if((st == JOB_STATE_UNDEFINED) || (st == JOB_STATE_DELETED)) { // Job probably does not exist anymore job_clean_mark_remove(id->id,config); job_restart_mark_remove(id->id,config); job_cancel_mark_remove(id->id,config); } // Check if such job finished and add it to list. if(st == JOB_STATE_FINISHED) { iterator i; AddJobNoCheck(id->id,i,id->uid,id->gid); // That will activate its processing at least for one step. i->job_state = st; } } perfrecord.End("SCAN-MARKS-NEW"); return true; } // For simply collecting all jobs. bool JobsList::ScanAllJobs(void) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); std::list subdirs; subdirs.push_back(std::string("/")+subdir_rew); // For picking up jobs after service restart subdirs.push_back(std::string("/")+subdir_new); // For new jobs subdirs.push_back(std::string("/")+subdir_cur); // For active jobs subdirs.push_back(std::string("/")+subdir_old); // For done jobs for(std::list::iterator subdir = subdirs.begin(); subdir != subdirs.end();++subdir) { std::string cdir=config.ControlDir(); std::list ids; std::string odir=cdir+(*subdir); if(!ScanJobs(odir,ids)) return false; // sorting by date ids.sort(); for(std::list::iterator id=ids.begin();id!=ids.end();++id) { iterator i; AddJobNoCheck(id->id,i,id->uid,id->gid); } } perfrecord.End("SCAN-JOBS-ALL"); return true; } bool JobsList::AddJob(const JobId& id) { if(FindJob(id) != jobs.end()) return true; std::list subdirs; subdirs.push_back(std::string("/")+subdir_rew); // For picking up jobs after service restart subdirs.push_back(std::string("/")+subdir_new); // For new jobs subdirs.push_back(std::string("/")+subdir_cur); // For active jobs subdirs.push_back(std::string("/")+subdir_old); // For done jobs for(std::list::iterator subdir = subdirs.begin(); subdir != subdirs.end();++subdir) { std::string cdir=config.ControlDir(); std::string odir=cdir+(*subdir); std::string fname=odir+'/'+"job."+id+".status"; uid_t uid; gid_t gid; time_t t; if(check_file_owner(fname,uid,gid,t)) { // add it to the list AddJobNoCheck(id,uid,gid); return true; } } return false; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713065017103026150 xustar000000000000000027 mtime=1490296387.698578 30 atime=1513200602.161040093 30 ctime=1513200662.830782107 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/Makefile.am0000644000175000002070000000115013065017103026207 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libjobs.la libjobs_la_SOURCES = \ CommFIFO.cpp JobsList.cpp GMJob.cpp JobDescriptionHandler.cpp \ ContinuationPlugins.cpp DTRGenerator.cpp \ CommFIFO.h JobsList.h GMJob.h JobDescriptionHandler.h \ ContinuationPlugins.h DTRGenerator.h libjobs_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) libjobs_la_LIBADD = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(DBCXX_LIBS) nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/Makefile.in0000644000000000000000000000013013214315732026160 xustar000000000000000029 mtime=1513200602.21804079 30 atime=1513200649.968624798 29 ctime=1513200662.83178212 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/Makefile.in0000644000175000002070000007102513214315732026235 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/jobs DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libjobs_la_DEPENDENCIES = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libjobs_la_OBJECTS = libjobs_la-CommFIFO.lo libjobs_la-JobsList.lo \ libjobs_la-GMJob.lo libjobs_la-JobDescriptionHandler.lo \ libjobs_la-ContinuationPlugins.lo libjobs_la-DTRGenerator.lo libjobs_la_OBJECTS = $(am_libjobs_la_OBJECTS) libjobs_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libjobs_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libjobs_la_SOURCES) DIST_SOURCES = $(libjobs_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libjobs.la libjobs_la_SOURCES = \ CommFIFO.cpp JobsList.cpp GMJob.cpp JobDescriptionHandler.cpp \ ContinuationPlugins.cpp DTRGenerator.cpp \ CommFIFO.h JobsList.h GMJob.h JobDescriptionHandler.h \ ContinuationPlugins.h DTRGenerator.h libjobs_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) libjobs_la_LIBADD = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(DBCXX_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/jobs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/jobs/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libjobs.la: $(libjobs_la_OBJECTS) $(libjobs_la_DEPENDENCIES) $(libjobs_la_LINK) $(libjobs_la_OBJECTS) $(libjobs_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-CommFIFO.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-ContinuationPlugins.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-DTRGenerator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-GMJob.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-JobDescriptionHandler.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-JobsList.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libjobs_la-CommFIFO.lo: CommFIFO.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-CommFIFO.lo -MD -MP -MF $(DEPDIR)/libjobs_la-CommFIFO.Tpo -c -o libjobs_la-CommFIFO.lo `test -f 'CommFIFO.cpp' || echo '$(srcdir)/'`CommFIFO.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjobs_la-CommFIFO.Tpo $(DEPDIR)/libjobs_la-CommFIFO.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CommFIFO.cpp' object='libjobs_la-CommFIFO.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-CommFIFO.lo `test -f 'CommFIFO.cpp' || echo '$(srcdir)/'`CommFIFO.cpp libjobs_la-JobsList.lo: JobsList.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-JobsList.lo -MD -MP -MF $(DEPDIR)/libjobs_la-JobsList.Tpo -c -o libjobs_la-JobsList.lo `test -f 'JobsList.cpp' || echo '$(srcdir)/'`JobsList.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjobs_la-JobsList.Tpo $(DEPDIR)/libjobs_la-JobsList.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobsList.cpp' object='libjobs_la-JobsList.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-JobsList.lo `test -f 'JobsList.cpp' || echo '$(srcdir)/'`JobsList.cpp libjobs_la-GMJob.lo: GMJob.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-GMJob.lo -MD -MP -MF $(DEPDIR)/libjobs_la-GMJob.Tpo -c -o libjobs_la-GMJob.lo `test -f 'GMJob.cpp' || echo '$(srcdir)/'`GMJob.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjobs_la-GMJob.Tpo $(DEPDIR)/libjobs_la-GMJob.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GMJob.cpp' object='libjobs_la-GMJob.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-GMJob.lo `test -f 'GMJob.cpp' || echo '$(srcdir)/'`GMJob.cpp libjobs_la-JobDescriptionHandler.lo: JobDescriptionHandler.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-JobDescriptionHandler.lo -MD -MP -MF $(DEPDIR)/libjobs_la-JobDescriptionHandler.Tpo -c -o libjobs_la-JobDescriptionHandler.lo `test -f 'JobDescriptionHandler.cpp' || echo '$(srcdir)/'`JobDescriptionHandler.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjobs_la-JobDescriptionHandler.Tpo $(DEPDIR)/libjobs_la-JobDescriptionHandler.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobDescriptionHandler.cpp' object='libjobs_la-JobDescriptionHandler.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-JobDescriptionHandler.lo `test -f 'JobDescriptionHandler.cpp' || echo '$(srcdir)/'`JobDescriptionHandler.cpp libjobs_la-ContinuationPlugins.lo: ContinuationPlugins.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-ContinuationPlugins.lo -MD -MP -MF $(DEPDIR)/libjobs_la-ContinuationPlugins.Tpo -c -o libjobs_la-ContinuationPlugins.lo `test -f 'ContinuationPlugins.cpp' || echo '$(srcdir)/'`ContinuationPlugins.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjobs_la-ContinuationPlugins.Tpo $(DEPDIR)/libjobs_la-ContinuationPlugins.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ContinuationPlugins.cpp' object='libjobs_la-ContinuationPlugins.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-ContinuationPlugins.lo `test -f 'ContinuationPlugins.cpp' || echo '$(srcdir)/'`ContinuationPlugins.cpp libjobs_la-DTRGenerator.lo: DTRGenerator.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-DTRGenerator.lo -MD -MP -MF $(DEPDIR)/libjobs_la-DTRGenerator.Tpo -c -o libjobs_la-DTRGenerator.lo `test -f 'DTRGenerator.cpp' || echo '$(srcdir)/'`DTRGenerator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libjobs_la-DTRGenerator.Tpo $(DEPDIR)/libjobs_la-DTRGenerator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DTRGenerator.cpp' object='libjobs_la-DTRGenerator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-DTRGenerator.lo `test -f 'DTRGenerator.cpp' || echo '$(srcdir)/'`DTRGenerator.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/JobDescriptionHandler.h0000644000000000000000000000012412702005750030500 xustar000000000000000027 mtime=1460145128.602165 27 atime=1513200576.264723 30 ctime=1513200662.842782254 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.h0000644000175000002070000001020712702005750030545 0ustar00mockbuildmock00000000000000#ifndef __ARC_GM_JOB_REQUEST_H__ #define __ARC_GM_JOB_REQUEST_H__ #include #include #include "GMJob.h" namespace ARex { /// Return code of parsing operation enum JobReqResultType { JobReqSuccess, JobReqInternalFailure, JobReqSyntaxFailure, JobReqMissingFailure, JobReqUnsupportedFailure, JobReqLogicalFailure }; /// Return value of parsing operation class JobReqResult { public: JobReqResultType result_type; std::string acl; std::string failure; JobReqResult(JobReqResultType type, const std::string& acl="", const std::string& failure="") :result_type(type), acl(acl), failure(failure) {} bool operator==(const JobReqResultType& result) const { return result == result_type; } bool operator!=(const JobReqResultType& result) const { return result != result_type; } }; /// Deals with parsing and converting job descriptions between Arc::JobDescription /// and JobLocalDescription. Also deals with reading and writing .grami file. class JobDescriptionHandler { public: /// Create a new job description handler JobDescriptionHandler(const GMConfig& config): config(config) {} /// Parse the job description at the given file into job_desc and /// arc_job_desc. Optionally check acl file and put result into /// returned object JobReqResult parse_job_req(JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,const std::string &fname,bool check_acl=false) const; /// Parse the job description for job_id into job_desc. Optionally check /// acl file and put result into returned object JobReqResult parse_job_req(const JobId &job_id,JobLocalDescription &job_desc,bool check_acl=false) const; /// Parse the job description for job_id into job_desc and arc_job_desc. /// Optionally check acl file and put result into returned object JobReqResult parse_job_req(const JobId &job_id,JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,bool check_acl=false) const; /// Parse job description into job_desc and write .local, .input and .output files bool process_job_req(const GMJob &job,JobLocalDescription &job_desc) const; /// Write .grami file after parsing job description file bool write_grami(GMJob &job,const char *opt_add = NULL) const; /// Write .grami from information in arc_job_desc and job bool write_grami(const Arc::JobDescription& arc_job_desc, GMJob& job, const char* opt_add) const; /// Get the local LRMS job id corresponding to A-REX job_id std::string get_local_id(const JobId &job_id) const; /// Set executable bits on appropriate files for the given job bool set_execs(const GMJob &job) const; private: /// Read and parse job description from file and update the job description reference. /** @param fname filename of the job description file. * @param desc a reference to a Arc::JobDescription which is filled on success, * if the job description format is unknown the reference is not touched. * @return false if job description could not be read or parsed, true on success. */ Arc::JobDescriptionResult get_arc_job_description(const std::string& fname, Arc::JobDescription& desc) const; /// Read ACLs from .acl file JobReqResult get_acl(const Arc::JobDescription& arc_job_desc) const; /// Write info to .grami for job executable bool write_grami_executable(std::ofstream& f, const std::string& name, const Arc::ExecutableType& exec) const; /// Class for handling escapes and quotes when writing to .grami class value_for_shell { friend std::ostream& operator<<(std::ostream&,const value_for_shell&); private: const char* str; bool quote; public: value_for_shell(const char *str_,bool quote_):str(str_),quote(quote_) { }; value_for_shell(const std::string &str_,bool quote_):str(str_.c_str()),quote(quote_) { }; }; friend std::ostream& operator<<(std::ostream&,const value_for_shell&); const GMConfig& config; static Arc::Logger logger; static const std::string NG_RSL_DEFAULT_STDIN; static const std::string NG_RSL_DEFAULT_STDOUT; static const std::string NG_RSL_DEFAULT_STDERR; }; std::ostream& operator<<(std::ostream&,const JobDescriptionHandler::value_for_shell&); } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/GMJob.cpp0000644000000000000000000000012412702005750025555 xustar000000000000000027 mtime=1460145128.602165 27 atime=1513200576.254723 30 ctime=1513200662.835782168 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/GMJob.cpp0000644000175000002070000000761712702005750025635 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "../files/ControlFileContent.h" #include "../files/ControlFileHandling.h" #include "GMJob.h" namespace ARex { GMJob::job_state_rec_t const GMJob::states_all[JOB_STATE_NUM] = { { "ACCEPTED", ' ' }, // JOB_STATE_ACCEPTED { "PREPARING", 'b' }, // JOB_STATE_PREPARING { "SUBMIT", ' ' }, // JOB_STATE_SUBMITING { "INLRMS", 'q' }, // JOB_STATE_INLRMS, { "FINISHING", 'f' }, // JOB_STATE_FINISHING { "FINISHED", 'e' }, // JOB_STATE_FINISHED { "DELETED", 'd' }, // JOB_STATE_DELETED { "CANCELING", 'c' }, // JOB_STATE_CANCELING { "UNDEFINED", ' ' } // JOB_STATE_UNDEFINED }; const char* GMJob::get_state_name() const { if((job_state<0) || (job_state>=JOB_STATE_NUM)) return states_all[JOB_STATE_UNDEFINED].name; return states_all[job_state].name; } char GMJob::get_state_mail_flag() const { if((job_state<0) || (job_state>=JOB_STATE_NUM)) return states_all[JOB_STATE_UNDEFINED].mail_flag; return states_all[job_state].mail_flag; } const char* GMJob::get_state_name(job_state_t st) { if((st<0) || (st>=JOB_STATE_NUM)) return states_all[JOB_STATE_UNDEFINED].name; return states_all[st].name; } char GMJob::get_state_mail_flag(job_state_t st) { if((st<0) || (st>=JOB_STATE_NUM)) return states_all[JOB_STATE_UNDEFINED].mail_flag; return states_all[st].mail_flag; } job_state_t GMJob::get_state(const char* state) { for(int i = 0;iWait(); delete child; child=NULL; } delete local; } JobLocalDescription* GMJob::GetLocalDescription(const GMConfig& config) { if(local) return local; JobLocalDescription* job_desc; job_desc=new JobLocalDescription; if(!job_local_read_file(job_id,config,*job_desc)) { delete job_desc; return NULL; }; local=job_desc; return local; } JobLocalDescription* GMJob::GetLocalDescription(void) const { return local; } std::string GMJob::GetFailure(const GMConfig& config) const { std::string reason = job_failed_mark_read(job_id,config); if(!failure_reason.empty()) { reason+=failure_reason; reason+="\n"; }; return reason; } bool GMJob::CheckFailure(const GMConfig& config) const { if(!failure_reason.empty()) return true; return job_failed_mark_check(job_id,config); } void GMJob::PrepareToDestroy(void) { // We could send signals to downloaders and uploaders. // But currently those do not implement safe shutdown. // So we will simply wait for them to finish in destructor. } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/GMJob.h0000644000000000000000000000012412702005750025222 xustar000000000000000027 mtime=1460145128.602165 27 atime=1513200576.265723 30 ctime=1513200662.841782242 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/GMJob.h0000644000175000002070000000757512702005750025305 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_JOB_H #define GRID_MANAGER_JOB_H #include #include #include #include namespace ARex { class JobsList; class JobLocalDescription; class GMConfig; /// Possible job states enum job_state_t { JOB_STATE_ACCEPTED = 0, JOB_STATE_PREPARING = 1, JOB_STATE_SUBMITTING = 2, JOB_STATE_INLRMS = 3, JOB_STATE_FINISHING = 4, JOB_STATE_FINISHED = 5, JOB_STATE_DELETED = 6, JOB_STATE_CANCELING = 7, JOB_STATE_UNDEFINED = 8 }; /// Number of job states #define JOB_STATE_NUM (JOB_STATE_UNDEFINED+1) /// Jobs identifier. Stored as string. Normally is a random string of /// numbers and letters. typedef std::string JobId; /// Represents a job in memory as it passes through the JobsList state machine. class GMJob { friend class JobsList; private: // State of the job (state machine) job_state_t job_state; // Flag to indicate job stays at this stage due to limits imposed. // Such jobs are not counted in counters bool job_pending; // Job identifier JobId job_id; // Directory to run job in std::string session_dir; // Explanation of job's failure std::string failure_reason; // How long job is kept on cluster after it finished time_t keep_finished; time_t keep_deleted; // Pointer to object containing most important parameters of job, // loaded when needed. JobLocalDescription* local; // Job's owner Arc::User user; // Used to determine data transfer share (eg DN, VOMS VO) std::string transfer_share; // Start time of job i.e. when it first moves to PREPARING time_t start_time; struct job_state_rec_t { const char* name; char mail_flag; }; /// Maps job state to state name and flag for email at that state static job_state_rec_t const states_all[JOB_STATE_NUM]; public: // external utility being run to perform tasks like stage-in/out, // submit/cancel. (todo - move to private) Arc::Run* child; // Constructors and destructor. // Accepts: // job_id - identifier // user - owner of job // dir - session_dir of job // state - initial state of job GMJob(const JobId &job_id,const Arc::User& user,const std::string &dir = "",job_state_t state = JOB_STATE_UNDEFINED); GMJob(void); GMJob(const GMJob &job); GMJob& operator=(const GMJob &job); ~GMJob(void); job_state_t get_state() const { return job_state; }; const char* get_state_name() const; char get_state_mail_flag() const; static const char* get_state_name(job_state_t st); static char get_state_mail_flag(job_state_t st); static job_state_t get_state(const char* state); const JobId& get_id() const { return job_id; }; std::string SessionDir(void) const { return session_dir; }; void AddFailure(const std::string &reason) { failure_reason+=reason; failure_reason+="\n"; }; /// Retrieve current failure reason (both in memory and stored in control dir). /// For non-failed jobs returned string is empty. std::string GetFailure(const GMConfig& config) const; /// Check if job is marked as failed (slightly faster than GetFailure). /// For failed job returns true, non-failed - false. bool CheckFailure(const GMConfig& config) const; bool operator==(const GMJob& job) const { return (job_id == job.job_id); }; bool operator==(const JobId &id) const { return (job_id == id); }; bool operator!=(const JobId &id) const { return (job_id != id); }; void set_user(const Arc::User& u) { user = u; } const Arc::User& get_user() const { return user;} void set_share(std::string share); // Force 'local' to be created and read from file if not already available JobLocalDescription* GetLocalDescription(const GMConfig& config); // Use only preloaded local JobLocalDescription* GetLocalDescription() const; void Start() { start_time = time(NULL); }; time_t GetStartTime() const { return start_time; }; void PrepareToDestroy(void); }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/JobDescriptionHandler.cpp0000644000000000000000000000012413124220473031033 xustar000000000000000027 mtime=1498489147.822352 27 atime=1513200576.253723 30 ctime=1513200662.837782193 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp0000644000175000002070000004324413124220473031107 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "../files/ControlFileHandling.h" #include "../conf/GMConfig.h" #include "../../delegation/DelegationStore.h" #include "../../delegation/DelegationStores.h" #include "JobDescriptionHandler.h" // TODO: move to using process_job_req as much as possible namespace ARex { Arc::Logger JobDescriptionHandler::logger(Arc::Logger::getRootLogger(), "JobDescriptionHandler"); const std::string JobDescriptionHandler::NG_RSL_DEFAULT_STDIN("/dev/null"); const std::string JobDescriptionHandler::NG_RSL_DEFAULT_STDOUT("/dev/null"); const std::string JobDescriptionHandler::NG_RSL_DEFAULT_STDERR("/dev/null"); bool JobDescriptionHandler::process_job_req(const GMJob &job,JobLocalDescription &job_desc) const { /* read local first to get some additional info pushed here by script */ job_local_read_file(job.get_id(),config,job_desc); /* some default values */ job_desc.lrms=config.DefaultLRMS(); job_desc.queue=config.DefaultQueue(); job_desc.lifetime=Arc::tostring(config.KeepFinished()); if(parse_job_req(job.get_id(),job_desc) != JobReqSuccess) return false; if(job_desc.reruns>config.Reruns()) job_desc.reruns=config.Reruns(); if(!job_local_write_file(job,config,job_desc)) return false; // Convert delegation ids to credential paths. // Add default credentials for file which have no own assigned. ARex::DelegationStores* delegs = config.GetDelegations(); std::string default_cred = job_proxy_filename(job.get_id(), config); // TODO: drop job.proxy as source of delegation if(!job_desc.delegationid.empty()) { if(delegs) { DelegationStore& deleg = delegs->operator[](config.DelegationDir()); std::string fname = deleg.FindCred(job_desc.delegationid, job_desc.DN); if(!fname.empty()) { default_cred = fname; }; }; }; for(std::list::iterator f = job_desc.inputdata.begin(); f != job_desc.inputdata.end(); ++f) { if(f->has_lfn()) { if(f->cred.empty()) { f->cred = default_cred; } else { std::string path; if(delegs) path = (*delegs)[config.DelegationDir()].FindCred(f->cred,job_desc.DN); f->cred = path; }; }; }; for(std::list::iterator f = job_desc.outputdata.begin(); f != job_desc.outputdata.end(); ++f) { if(f->has_lfn()) { if(f->cred.empty()) { f->cred = default_cred; } else { std::string path; ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs) path = (*delegs)[config.DelegationDir()].FindCred(f->cred,job_desc.DN); f->cred = path; }; }; }; if(!job_input_write_file(job,config,job_desc.inputdata)) return false; if(!job_output_write_file(job,config,job_desc.outputdata,job_output_success)) return false; return true; } JobReqResult JobDescriptionHandler::parse_job_req(JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,const std::string &fname,bool check_acl) const { Arc::JobDescriptionResult arc_job_res = get_arc_job_description(fname, arc_job_desc); if (!arc_job_res) { std::string failure = arc_job_res.str(); if(failure.empty()) failure = "Unable to read or parse job description."; return JobReqResult(JobReqInternalFailure, "", failure); } if (!arc_job_desc.Resources.RunTimeEnvironment.isResolved()) { return JobReqResult(JobReqInternalFailure, "", "Runtime environments have not been resolved."); } job_desc = arc_job_desc; // Additional queue processing // TODO: Temporary solution. // Check for special WLCG queues made out of "queue name_VO name". for(std::list::const_iterator q = config.Queues().begin(); q != config.Queues().end();++q) { if(*q == job_desc.queue) break; const std::list & vos = config.AuthorizedVOs(q->c_str()); // per queue const std::list & cvos = config.AuthorizedVOs(""); // per cluster bool vo_found = false; if(!vos.empty()) { for(std::list::const_iterator vo = vos.begin();vo != vos.end(); ++vo) { std::string synthetic_queue = *q; synthetic_queue += "_"; synthetic_queue += *vo; if(synthetic_queue == job_desc.queue) { vo_found = true; break; }; }; } else { for(std::list::const_iterator vo = cvos.begin();vo != cvos.end(); ++vo) { std::string synthetic_queue = *q; synthetic_queue += "_"; synthetic_queue += *vo; if(synthetic_queue == job_desc.queue) { vo_found = true; break; }; }; }; if(vo_found) { logger.msg(Arc::WARNING, "Replacing queue '%s' with '%s'", job_desc.queue, *q); job_desc.queue = *q; break; }; }; if (check_acl) return get_acl(arc_job_desc); return JobReqSuccess; } JobReqResult JobDescriptionHandler::parse_job_req(const JobId &job_id,JobLocalDescription &job_desc,bool check_acl) const { Arc::JobDescription arc_job_desc; return parse_job_req(job_id,job_desc,arc_job_desc,check_acl); } JobReqResult JobDescriptionHandler::parse_job_req(const JobId &job_id,JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,bool check_acl) const { std::string fname = config.ControlDir() + "/job." + job_id + ".description"; return parse_job_req(job_desc,arc_job_desc,fname,check_acl); } std::string JobDescriptionHandler::get_local_id(const JobId &job_id) const { std::string id; std::string joboption("joboption_jobid="); std::string fgrami(config.ControlDir() + "/job." + job_id + ".grami"); std::list grami_data; if (Arc::FileRead(fgrami, grami_data)) { for (std::list::iterator line = grami_data.begin(); line != grami_data.end(); ++line) { if (line->find(joboption) == 0) { id = line->substr(joboption.length()); id = Arc::trim(id, "'"); break; } } } return id; } bool JobDescriptionHandler::write_grami_executable(std::ofstream& f, const std::string& name, const Arc::ExecutableType& exec) const { std::string executable = Arc::trim(exec.Path); if (executable[0] != '/' && executable[0] != '$' && !(executable[0] == '.' && executable[1] == '/')) executable = "./"+executable; f<<"joboption_"<::const_iterator it = exec.Argument.begin(); it != exec.Argument.end(); it++, i++) { f<<"joboption_"<c_str(),true)<::const_iterator e = arc_job_desc.Application.PreExecutable.begin(); e != arc_job_desc.Application.PreExecutable.end(); ++e) { if(!write_grami_executable(f,"pre_"+Arc::tostring(n),*e)) return false; ++n; } for(std::list::const_iterator e = arc_job_desc.Application.PostExecutable.begin(); e != arc_job_desc.Application.PostExecutable.end(); ++e) { if(!write_grami_executable(f,"post_"+Arc::tostring(n),*e)) return false; } f<<"joboption_stdin="< >::const_iterator it = arc_job_desc.Application.Environment.begin(); it != arc_job_desc.Application.Environment.end(); it++, i++) { f<<"joboption_env_"<first+"="+it->second,true)<::const_iterator itSW = arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList().begin(); itSW != arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList().end(); itSW++) { if (itSW->empty()) continue; std::string rte = Arc::upper(*itSW); if (!Arc::CanonicalDir(rte)) { logger.msg(Arc::ERROR, "Bad name for runtime environment: %s", (std::string)*itSW); return false; } f<<"joboption_runtime_"<& opts = itSW->getOptions(); int n = 1; for(std::list::const_iterator opt = opts.begin(); opt != opts.end();++opt) { f<<"joboption_runtime_"< descs; Arc::JobDescriptionResult r = Arc::JobDescription::Parse(job_desc_str, descs, "", "GRIDMANAGER"); if (r) { if(descs.size() == 1) { desc = descs.front(); } else { r = Arc::JobDescriptionResult(false,"Multiple job descriptions not supported"); } } return r; } JobReqResult JobDescriptionHandler::get_acl(const Arc::JobDescription& arc_job_desc) const { if( !arc_job_desc.Application.AccessControl ) return JobReqSuccess; Arc::XMLNode typeNode = arc_job_desc.Application.AccessControl["Type"]; Arc::XMLNode contentNode = arc_job_desc.Application.AccessControl["Content"]; if( !contentNode ) { std::string failure = "acl element wrongly formated - missing Content element"; logger.msg(Arc::ERROR, failure); return JobReqResult(JobReqMissingFailure, "", failure); }; if( (!typeNode) || ( ( (std::string) typeNode ) == "GACL" ) || ( ( (std::string) typeNode ) == "ARC" ) ) { std::string str_content; if(contentNode.Size() > 0) { Arc::XMLNode acl_doc; contentNode.Child().New(acl_doc); acl_doc.GetDoc(str_content); } else { str_content = (std::string)contentNode; } return JobReqResult(JobReqSuccess, str_content); } std::string failure = "ARC: unsupported ACL type specified: " + (std::string)typeNode; logger.msg(Arc::ERROR, "%s", failure); return JobReqResult(JobReqUnsupportedFailure, "", failure); } /* parse job description and set specified file permissions to executable */ bool JobDescriptionHandler::set_execs(const GMJob &job) const { std::string fname = config.ControlDir() + "/job." + job.get_id() + ".description"; Arc::JobDescription desc; if (!get_arc_job_description(fname, desc)) return false; std::string session_dir = job.SessionDir(); if (desc.Application.Executable.Path[0] != '/' && desc.Application.Executable.Path[0] != '$') { std::string executable = desc.Application.Executable.Path; if(!Arc::CanonicalDir(executable)) { logger.msg(Arc::ERROR, "Bad name for executable: ", executable); return false; } fix_file_permissions_in_session(session_dir+"/"+executable,job,config,true); } // TOOD: Support for PreExecutable and PostExecutable for(std::list::const_iterator it = desc.DataStaging.InputFiles.begin(); it!=desc.DataStaging.InputFiles.end();it++) { if(it->IsExecutable) { std::string executable = it->Name; if (executable[0] != '/' && executable[0] != '.' && executable[1] != '/') executable = "./"+executable; if(!Arc::CanonicalDir(executable)) { logger.msg(Arc::ERROR, "Bad name for executable: %s", executable); return false; } fix_file_permissions_in_session(session_dir+"/"+executable,job,config,true); } } return true; } std::ostream& operator<<(std::ostream& o, const JobDescriptionHandler::value_for_shell& s) { if(s.str == NULL) return o; if(s.quote) o<<"'"; const char* p = s.str; for(;;) { const char* pp = strchr(p,'\''); if(pp == NULL) { o< #endif #include #include #include #include #include #include "../files/Delete.h" #include "../conf/UrlMapConfig.h" #include "../files/ControlFileHandling.h" #include "../conf/StagingConfig.h" #include "../../delegation/DelegationStore.h" #include "../../delegation/DelegationStores.h" #include "GMJob.h" #include "DTRGenerator.h" namespace ARex { Arc::Logger DTRInfo::logger(Arc::Logger::getRootLogger(), "DTRInfo"); DTRInfo::DTRInfo(const GMConfig& config): config(config) { } void DTRInfo::receiveDTR(DataStaging::DTR_ptr dtr) { // write state info to job.id.input for example } Arc::Logger DTRGenerator::logger(Arc::Logger::getRootLogger(), "Generator"); bool compare_job_description(GMJob const& first, GMJob const& second) { int priority_first = first.GetLocalDescription() ? first.GetLocalDescription()->priority : JobLocalDescription::prioritydefault; int priority_second = first.GetLocalDescription() ? second.GetLocalDescription()->priority : JobLocalDescription::prioritydefault; return priority_first > priority_second; } void DTRGenerator::main_thread(void* arg) { ((DTRGenerator*)arg)->thread(); } void DTRGenerator::thread() { // set up logging - to avoid logging DTR logs to the main A-REX log // we disconnect the root logger while submitting to the Scheduler Arc::Logger::getRootLogger().setThreadContext(); while (generator_state != DataStaging::TO_STOP) { // look at event queue and deal with any events. // This method of iteration should be thread-safe because events // are always added to the end of the list // take cancelled jobs first so we can ignore other DTRs in those jobs event_lock.lock(); std::list::iterator it_cancel = jobs_cancelled.begin(); while (it_cancel != jobs_cancelled.end()) { // check if it is still in received queue std::list::iterator it_jobs = jobs_received.begin(); for(; it_jobs != jobs_received.end(); ++it_jobs) { if(*it_jobs == *it_cancel) break; }; if(it_jobs != jobs_received.end()) { jobs_received.erase(it_jobs); continue; }; // job must be in scheduler already event_lock.unlock(); processCancelledJob(*it_cancel); event_lock.lock(); it_cancel = jobs_cancelled.erase(it_cancel); } // next DTRs sent back from the Scheduler std::list::iterator it_dtrs = dtrs_received.begin(); while (it_dtrs != dtrs_received.end()) { event_lock.unlock(); processReceivedDTR(*it_dtrs); event_lock.lock(); // delete DTR LogDestinations (*it_dtrs)->clean_log_destinations(central_dtr_log); it_dtrs = dtrs_received.erase(it_dtrs); } // finally new jobs std::list::iterator it_jobs = jobs_received.begin(); // it can happen that the list grows faster than the jobs are processed // so here we only process for a small time to avoid blocking other // jobs finishing Arc::Time limit(Arc::Time() + Arc::Period(30)); // sort the list by job priority jobs_received.sort(compare_job_description); while (it_jobs != jobs_received.end() && Arc::Time() < limit) { event_lock.unlock(); processReceivedJob(*it_jobs); event_lock.lock(); it_jobs = jobs_received.erase(it_jobs); } event_lock.unlock(); Glib::usleep(50000); } // stop scheduler - cancels all DTRs and waits for them to complete scheduler->stop(); // Handle all the DTRs returned by the scheduler, in case there are completed // DTRs to process before exiting and thus avoiding redoing those transfers // when A-REX restarts. // Lock is not necessary here because scheduler has finished and A-REX is // waiting for this thread to exit. std::list::iterator it_dtrs = dtrs_received.begin(); while (it_dtrs != dtrs_received.end()) { processReceivedDTR(*it_dtrs); // delete DTR LogDestinations (*it_dtrs)->clean_log_destinations(central_dtr_log); it_dtrs = dtrs_received.erase(it_dtrs); } run_condition.signal(); logger.msg(Arc::INFO, "Exiting Generator thread"); } DTRGenerator::DTRGenerator(const GMConfig& config, void (*kicker_func)(void*), void* kicker_arg) : generator_state(DataStaging::INITIATED), config(config), central_dtr_log(NULL), staging_conf(config), info(config), kicker_func(kicker_func), kicker_arg(kicker_arg) { if (!staging_conf) return; // Set log level for DTR in job.id.errors files DataStaging::DTR::LOG_LEVEL = staging_conf.log_level; scheduler = DataStaging::Scheduler::getInstance(); // Convert A-REX configuration values to DTR configuration // If not configured, set the DTR dump file to the first control dir registered std::string dtr_log(staging_conf.dtr_log); if (dtr_log.empty()) dtr_log = config.ControlDir()+"/dtrstate.log"; scheduler->SetDumpLocation(dtr_log); // Read DTR state from previous dump to find any transfers stopped half-way // If those destinations appear again, add overwrite=yes readDTRState(dtr_log); // Central DTR log if configured if (!staging_conf.get_dtr_central_log().empty()) { central_dtr_log = new Arc::LogFile(staging_conf.get_dtr_central_log()); } // Processing limits scheduler->SetSlots(staging_conf.max_processor, staging_conf.max_processor, staging_conf.max_delivery, staging_conf.max_emergency, staging_conf.max_prepared); // Transfer shares DataStaging::TransferSharesConf share_conf(staging_conf.share_type, staging_conf.defined_shares); scheduler->SetTransferSharesConf(share_conf); // Transfer limits DataStaging::TransferParameters transfer_limits; transfer_limits.min_current_bandwidth = staging_conf.min_speed; transfer_limits.averaging_time = staging_conf.min_speed_time; transfer_limits.min_average_bandwidth = staging_conf.min_average_speed; transfer_limits.max_inactivity_time = staging_conf.max_inactivity_time; scheduler->SetTransferParameters(transfer_limits); // URL mappings UrlMapConfig url_map(config); scheduler->SetURLMapping(url_map); // Preferred pattern scheduler->SetPreferredPattern(staging_conf.preferred_pattern); // Delivery services scheduler->SetDeliveryServices(staging_conf.delivery_services); // Limit on remote delivery size scheduler->SetRemoteSizeLimit(staging_conf.remote_size_limit); // Set performance metrics logging scheduler->SetJobPerfLog(staging_conf.perf_log); // End of configuration - start Scheduler thread scheduler->start(); generator_state = DataStaging::RUNNING; Arc::CreateThreadFunction(&main_thread, this); } DTRGenerator::~DTRGenerator() { if (generator_state != DataStaging::RUNNING) return; generator_state = DataStaging::TO_STOP; run_condition.wait(); generator_state = DataStaging::STOPPED; } void DTRGenerator::receiveDTR(DataStaging::DTR_ptr dtr) { if (generator_state == DataStaging::INITIATED || generator_state == DataStaging::STOPPED) { logger.msg(Arc::ERROR, "DTRGenerator is not running!"); return; } else if (generator_state == DataStaging::TO_STOP) { logger.msg(Arc::VERBOSE, "Received DTR %s during Generator shutdown - may not be processed", dtr->get_id()); // still a chance to process this DTR so don't return } event_lock.lock(); dtrs_received.push_back(dtr); event_lock.unlock(); } void DTRGenerator::receiveJob(const GMJob& job) { if (generator_state != DataStaging::RUNNING) { logger.msg(Arc::WARNING, "DTRGenerator is not running!"); } // Add to jobs list even if Generator is stopped, so that A-REX doesn't // think that staging has finished. event_lock.lock(); jobs_received.push_back(job); event_lock.unlock(); } void DTRGenerator::cancelJob(const GMJob& job) { if (generator_state != DataStaging::RUNNING) { logger.msg(Arc::WARNING, "DTRGenerator is not running!"); } event_lock.lock(); jobs_cancelled.push_back(job.get_id()); event_lock.unlock(); } bool DTRGenerator::queryJobFinished(GMJob& job) { // Data staging is finished if the job is in finished_jobs and // not in active_dtrs or jobs_received. // check if this job is still in the received jobs queue event_lock.lock(); for (std::list::iterator i = jobs_received.begin(); i != jobs_received.end(); ++i) { if (*i == job) { event_lock.unlock(); return false; } } event_lock.unlock(); // check if any DTRs in this job are still active lock.lock(); if (active_dtrs.find(job.get_id()) != active_dtrs.end()) { lock.unlock(); return false; } std::map::iterator i = finished_jobs.find(job.get_id()); if (i != finished_jobs.end() && !i->second.empty()) { // add failure to job if any DTR failed job.AddFailure(i->second); finished_jobs[job.get_id()] = ""; } lock.unlock(); return true; } bool DTRGenerator::hasJob(const GMJob& job) { // check if this job is still in the received jobs queue event_lock.lock(); for (std::list::iterator i = jobs_received.begin(); i != jobs_received.end(); ++i) { if (*i == job) { event_lock.unlock(); return true; } } event_lock.unlock(); // check if any DTRs in this job are still active lock.lock(); if (active_dtrs.find(job.get_id()) != active_dtrs.end()) { lock.unlock(); return true; } // finally check finished jobs std::map::iterator i = finished_jobs.find(job.get_id()); if (i != finished_jobs.end()) { lock.unlock(); return true; } lock.unlock(); // not found return false; } void DTRGenerator::removeJob(const GMJob& job) { // check if this job is still in the received jobs queue event_lock.lock(); for (std::list::iterator i = jobs_received.begin(); i != jobs_received.end(); ++i) { if (*i == job) { event_lock.unlock(); logger.msg(Arc::WARNING, "%s: Trying to remove job from data staging which is still active", job.get_id()); return; } } event_lock.unlock(); // check if any DTRs in this job are still active lock.lock(); if (active_dtrs.find(job.get_id()) != active_dtrs.end()) { lock.unlock(); logger.msg(Arc::WARNING, "%s: Trying to remove job from data staging which is still active", job.get_id()); return; } // finally check finished jobs std::map::iterator i = finished_jobs.find(job.get_id()); if (i == finished_jobs.end()) { // warn if not in finished lock.unlock(); logger.msg(Arc::WARNING, "%s: Trying remove job from data staging which does not exist", job.get_id()); return; } finished_jobs.erase(i); lock.unlock(); } bool DTRGenerator::processReceivedDTR(DataStaging::DTR_ptr dtr) { std::string jobid(dtr->get_parent_job_id()); if (!(*dtr)) { logger.msg(Arc::ERROR, "%s: Invalid DTR", jobid); if (dtr->get_status() != DataStaging::DTRStatus::CANCELLED) { scheduler->cancelDTRs(jobid); lock.lock(); finished_jobs[jobid] = std::string("Invalid Data Transfer Request"); active_dtrs.erase(jobid); lock.unlock(); } return false; } logger.msg(Arc::DEBUG, "%s: Received DTR %s to copy file %s in state %s", jobid, dtr->get_id(), dtr->get_source()->str(), dtr->get_status().str()); uid_t job_uid = config.StrictSession() ? dtr->get_local_user().get_uid() : 0; uid_t job_gid = config.StrictSession() ? dtr->get_local_user().get_gid() : 0; // Get session dir from .local if possible std::string session_dir; JobLocalDescription job_desc; if (job_local_read_file(jobid, config, job_desc) && !job_desc.sessiondir.empty()) { session_dir = job_desc.sessiondir; } else { logger.msg(Arc::WARNING, "%s: Failed reading local information", jobid); session_dir = config.SessionRoot(jobid) + '/' + jobid; } // create JobDescription object to pass to job_..write_file methods GMJob job(jobid, dtr->get_local_user(), session_dir); std::string dtr_transfer_statistics; if (dtr->error() && dtr->is_mandatory() && dtr->get_status() != DataStaging::DTRStatus::CANCELLED) { // for uploads, report error but let other transfers continue // for downloads, cancel all other transfers logger.msg(Arc::ERROR, "%s: DTR %s to copy file %s failed", jobid, dtr->get_id(), dtr->get_source()->str()); lock.lock(); if (!dtr->get_source()->Local() && finished_jobs.find(jobid) == finished_jobs.end()) { // download // cancel other DTRs and erase from our list unless error was already reported logger.msg(Arc::INFO, "%s: Cancelling other DTRs", jobid); scheduler->cancelDTRs(jobid); } // add error to finished jobs finished_jobs[jobid] += std::string("Failed in data staging: " + dtr->get_error_status().GetDesc() + '\n'); lock.unlock(); } else if (dtr->get_status() != DataStaging::DTRStatus::CANCELLED) { // remove from job.id.input/output files on success // find out if download or upload by checking which is remote file if (dtr->error() && !dtr->is_mandatory()) { dtr->get_logger()->msg(Arc::INFO, "%s: DTR %s to copy to %s failed but is not mandatory", jobid, dtr->get_id(), dtr->get_destination_str()); } std::list files; if (dtr->get_source()->Local()) { // output files dtr_transfer_statistics = "outputfile:url=" + dtr->get_destination()->str() + ','; if (!job_output_read_file(jobid, config, files)) { logger.msg(Arc::WARNING, "%s: Failed to read list of output files", jobid); } else { FileData uploaded_file; // go through list and take out this file for (std::list::iterator i = files.begin(); i != files.end();) { // compare 'standard' URLs Arc::URL file_lfn(i->lfn); Arc::URL dtr_lfn(dtr->get_destination()->str()); // check if it is in a dynamic list - if so remove from it if (i->pfn.size() > 1 && i->pfn[1] == '@') { std::string dynamic_output(session_dir+'/'+i->pfn.substr(2)); std::list dynamic_files; if (!job_Xput_read_file(dynamic_output, dynamic_files, job_uid, job_gid)) { logger.msg(Arc::WARNING, "%s: Failed to read dynamic output files in %s", jobid, dynamic_output); } else { logger.msg(Arc::DEBUG, "%s: Going through files in list %s", jobid, dynamic_output); for (std::list::iterator dynamic_file = dynamic_files.begin(); dynamic_file != dynamic_files.end(); ++dynamic_file) { if (Arc::URL(dynamic_file->lfn).str() == dtr_lfn.str()) { logger.msg(Arc::DEBUG, "%s: Removing %s from dynamic output file %s", jobid, dtr_lfn.str(), dynamic_output); uploaded_file = *dynamic_file; dynamic_files.erase(dynamic_file); if (!job_Xput_write_file(dynamic_output, dynamic_files, job_output_all, job_uid, job_gid)) logger.msg(Arc::WARNING, "%s: Failed to write back dynamic output files in %s", jobid, dynamic_output); break; } } } } if (file_lfn.str() == dtr_lfn.str()) { uploaded_file = *i; i = files.erase(i); } else { ++i; } } // write back .output file if (!job_output_write_file(job, config, files)) { logger.msg(Arc::WARNING, "%s: Failed to write list of output files", jobid); } if(!uploaded_file.pfn.empty()) { if(!job_output_status_add_file(job, config, uploaded_file)) { logger.msg(Arc::WARNING, "%s: Failed to write list of output status files", jobid); } } } if (dtr->get_source()->CheckSize()) dtr_transfer_statistics += "size=" + Arc::tostring(dtr->get_source()->GetSize()) + ','; dtr_transfer_statistics += "starttime=" + dtr->get_creation_time().str(Arc::UTCTime) + ','; dtr_transfer_statistics += "endtime=" + Arc::Time().str(Arc::UTCTime); } else if (dtr->get_destination()->Local()) { // input files dtr_transfer_statistics = "inputfile:url=" + dtr->get_source()->str() + ','; if (!job_input_read_file(jobid, config, files)) { logger.msg(Arc::WARNING,"%s: Failed to read list of input files", jobid); } else { // go through list and take out this file for (std::list::iterator i = files.begin(); i != files.end();) { // compare 'standard' URLs Arc::URL file_lfn(i->lfn); Arc::URL dtr_lfn(dtr->get_source()->str()); if (file_lfn.str() == dtr_lfn.str()) { struct stat st; Arc::FileStat(job.SessionDir() + i->pfn, &st, job_uid, job_gid, true); dtr_transfer_statistics += "size=" + Arc::tostring(st.st_size) + ','; i = files.erase(i); break; } else { ++i; } } // write back .input file if (!job_input_write_file(job, config, files)) { logger.msg(Arc::WARNING, "%s: Failed to write list of input files", jobid); } } dtr_transfer_statistics += "starttime=" + dtr->get_creation_time().str(Arc::UTCTime) + ','; dtr_transfer_statistics += "endtime=" + Arc::Time().str(Arc::UTCTime) + ','; if (dtr->get_cache_state() == DataStaging::CACHE_ALREADY_PRESENT) dtr_transfer_statistics += "fromcache=yes"; else dtr_transfer_statistics += "fromcache=no"; } else { // transfer between two remote endpoints, shouldn't happen... logger.msg(Arc::WARNING, "%s: Received DTR with two remote endpoints!"); } } // get DTRs for this job id lock.lock(); std::pair::iterator, std::multimap::iterator> dtr_iterator = active_dtrs.equal_range(jobid); if (dtr_iterator.first == dtr_iterator.second) { lock.unlock(); logger.msg(Arc::WARNING, "No active job id %s", jobid); return true; } // Print transfer statistics std::string fname = config.ControlDir() + "/job." + job.get_id() + ".statistics"; std::ofstream f(fname.c_str(),std::ios::out | std::ios::app); if(f.is_open() ) { f << dtr_transfer_statistics << std::endl; } f.close(); // remove this DTR from list for (std::multimap::iterator i = dtr_iterator.first; i != dtr_iterator.second; ++i) { if (i->second == dtr->get_id()) { active_dtrs.erase(i); break; } } // check if any DTRs left from this job, if so return if (active_dtrs.find(jobid) != active_dtrs.end()) { lock.unlock(); return true; } // No DTRs left, clean up session dir if upload or failed download // But first add the DTR back to the active list to avoid race condition // caused by calling hasJob() between removing from active and adding to // finished, which results in job being submitted to DTR again active_dtrs.insert(std::pair(jobid, dtr->get_id())); bool finished_with_error = ((finished_jobs.find(jobid) != finished_jobs.end() && !finished_jobs[jobid].empty()) || dtr->get_status() == DataStaging::DTRStatus::CANCELLED); lock.unlock(); if (dtr->get_source()->Local()) { // list of files to keep in session dir std::list files; if (!job_output_read_file(jobid, config, files)) logger.msg(Arc::WARNING, "%s: Failed to read list of output files, can't clean up session dir", jobid); else { if (finished_with_error) { // if error with uploads, don't remove dynamic output files so that resume will work for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if (i->pfn.size() > 1 && i->pfn[1] == '@') { std::string dynamic_output(session_dir+'/'+i->pfn.substr(2)); FileData fd(std::string(i->pfn.erase(1,1)), ""); files.push_back(fd); // also add files left inside dynamic output file std::list dynamic_files; if (!job_Xput_read_file(dynamic_output, dynamic_files, job_uid, job_gid)) { logger.msg(Arc::WARNING, "%s: Failed to read dynamic output files in %s", jobid, dynamic_output); } else { for (std::list::iterator dynamic_file = dynamic_files.begin(); dynamic_file != dynamic_files.end(); ++dynamic_file) { FileData f(dynamic_file->pfn, ""); files.push_back(f); } } } } } if (delete_all_files(job.SessionDir(), files, true, job_uid, job_gid) == 2) logger.msg(Arc::WARNING, "%s: Failed to clean up session dir", jobid); } // clean up cache joblinks CleanCacheJobLinks(config, job); } else if (finished_with_error) { // clean all files still in input list which could be half-downloaded std::list files; if (!job_input_read_file(jobid, config, files)) logger.msg(Arc::WARNING, "%s: Failed to read list of input files, can't clean up session dir", jobid); else if (delete_all_files(job.SessionDir(), files, false, job_uid, job_gid) == 2) logger.msg(Arc::WARNING, "%s: Failed to clean up session dir", jobid); } // add to finished jobs (without overwriting existing error) and finally // remove from active lock.lock(); active_dtrs.erase(jobid); finished_jobs[jobid] += ""; // log summary to DTR log and A-REX log if (finished_jobs[jobid].empty()) dtr->get_logger()->msg(Arc::INFO, "%s: All %s %s successfully", jobid, dtr->get_source()->Local() ? "uploads":"downloads", (dtr->get_status() == DataStaging::DTRStatus::CANCELLED) ? "cancelled":"finished"); else dtr->get_logger()->msg(Arc::INFO, "%s: Some %s failed", jobid, dtr->get_source()->Local() ? "uploads":"downloads"); lock.unlock(); // wake up GM thread if (kicker_func) (*kicker_func)(kicker_arg); return true; } bool DTRGenerator::processReceivedJob(GMJob& job) { JobId jobid(job.get_id()); logger.msg(Arc::VERBOSE, "%s: Received data staging request to %s files", jobid, (job.get_state() == JOB_STATE_PREPARING ? "download" : "upload")); uid_t job_uid = config.StrictSession() ? job.get_user().get_uid() : 0; uid_t job_gid = config.StrictSession() ? job.get_user().get_gid() : 0; // Default credentials to be used by transfering files if not specified per file std::string default_cred = job_proxy_filename(jobid, config); // TODO: drop job.proxy as source of delegation JobLocalDescription job_desc; if(job_local_read_file(jobid, config, job_desc)) { if(!job_desc.delegationid.empty()) { ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs) { DelegationStore& deleg = delegs->operator[](config.DelegationDir()); std::string fname = deleg.FindCred(job_desc.delegationid, job_desc.DN); if(!fname.empty()) { default_cred = fname; } } } } // Collect credential info for DTRs DataStaging::DTRCredentialInfo cred_info(job_desc.DN, job_desc.expiretime, job_desc.voms); // Create a file for the transfer statistics and fix its permissions std::string fname = config.ControlDir() + "/job." + jobid + ".statistics"; std::ofstream f(fname.c_str(),std::ios::out | std::ios::app); f.close(); fix_file_permissions(fname); // read in input/output files std::list files; bool replication = false; // output files need to be read whether PREPARING or FINISHING std::list output_files; if (!job_output_read_file(jobid, config, output_files)) { logger.msg(Arc::ERROR, "%s: Failed to read list of output files", jobid); lock.lock(); finished_jobs[jobid] = std::string("Failed to read list of output files"); lock.unlock(); if (job.get_state() == JOB_STATE_FINISHING) CleanCacheJobLinks(config, job); if (kicker_func) (*kicker_func)(kicker_arg); return false; } if (job.get_state() == JOB_STATE_PREPARING) { if (!job_input_read_file(jobid, config, files)) { logger.msg(Arc::ERROR, "%s: Failed to read list of input files", jobid); lock.lock(); finished_jobs[jobid] = std::string("Failed to read list of input files"); lock.unlock(); if (kicker_func) (*kicker_func)(kicker_arg); return false; } // check for duplicates (see bug 1285) for (std::list::iterator i = files.begin(); i != files.end(); i++) { for (std::list::iterator j = files.begin(); j != files.end(); j++) { if (i != j && j->pfn == i->pfn) { logger.msg(Arc::ERROR, "%s: Duplicate file in list of input files: %s", jobid, i->pfn); lock.lock(); finished_jobs[jobid] = std::string("Duplicate file in list of input files: " + i->pfn); lock.unlock(); if (kicker_func) (*kicker_func)(kicker_arg); return false; } } } // check if any input files are also output files (bug 1387 and 2793) for (std::list::iterator j = output_files.begin(); j != output_files.end(); j++) { for (std::list::iterator i = files.begin(); i != files.end(); i++) { if (i->pfn == j->pfn && i->lfn.find(':') != std::string::npos) { Arc::URL u(i->lfn); std::string opt = u.Option("cache"); // don't add copy option if exists or current option is "no" or "renew" if (opt.empty() || !(opt == "no" || opt == "renew" || opt == "copy")) { u.AddOption("cache", "copy", true); i->lfn = u.fullstr(); } } } } // pre-clean session dir before downloading if (delete_all_files(job.SessionDir(), files, false, job_uid, job_gid) == 2) { logger.msg(Arc::ERROR, "%s: Failed to clean up session dir", jobid); lock.lock(); finished_jobs[jobid] = std::string("Failed to clean up session dir before downloading inputs"); lock.unlock(); if (kicker_func) (*kicker_func)(kicker_arg); return false; } } else if (job.get_state() == JOB_STATE_FINISHING) { files = output_files; std::list::iterator it; // add any output files dynamically added by the user during the job and // resolve directories for (it = files.begin(); it != files.end() ;) { if (it->pfn.find("@") == 1) { // GM puts a slash on the front of the local file // Following is always empty currently. But it will start working as soon as // there is a way to pass credentials for dynamic files. But so far default_cred // is always picked up. std::string cred(it->cred); if(cred.empty()) cred = default_cred; std::list files_; std::string outputfilelist = job.SessionDir() + std::string("/") + it->pfn.substr(2); logger.msg(Arc::INFO, "%s: Reading output files from user generated list in %s", jobid, outputfilelist); if (!job_Xput_read_file(outputfilelist, files_, job_uid, job_gid)) { logger.msg(Arc::ERROR, "%s: Error reading user generated output file list in %s", jobid, outputfilelist); lock.lock(); // Only write this failure if no previous failure if (!job.CheckFailure(config)) { finished_jobs[jobid] = std::string("Error reading user generated output file list"); } else { finished_jobs[jobid] = ""; } lock.unlock(); CleanCacheJobLinks(config, job); if (kicker_func) (*kicker_func)(kicker_arg); return false; } // Attach dynamic files and assign credentials to them unless already available for(std::list::iterator it_ = files_.begin(); it_ != files_.end(); ++it_) { if(it_->cred.empty()) it_->cred = cred; files.push_back(*it_); } it->pfn.erase(1, 1); ++it; continue; } if (it->pfn.rfind('/') == it->pfn.length()-1) { if (it->lfn.find(':') != std::string::npos) { std::string cred(it->cred); std::string dir(job.SessionDir() + it->pfn); std::list entries; if (!Arc::DirList(dir, entries, true, job_uid, job_gid)) { logger.msg(Arc::ERROR, "%s: Failed to list output directory %s: %s", jobid, dir, Arc::StrError(errno)); lock.lock(); // Only write this failure if no previous failure if (!job.CheckFailure(config)) { finished_jobs[jobid] = std::string("Failed to list output directory"); } else { finished_jobs[jobid] = ""; } lock.unlock(); CleanCacheJobLinks(config, job); if (kicker_func) (*kicker_func)(kicker_arg); return false; } // add entries which are not directories or links to output file list struct stat st; for (std::list::iterator i = entries.begin(); i != entries.end(); ++i) { if (Arc::FileStat(*i, &st, job_uid, job_gid, false) && S_ISREG(st.st_mode)) { std::string lfn(it->lfn + '/' + i->substr(job.SessionDir().length()+it->pfn.length())); std::string pfn(i->substr(job.SessionDir().length())); logger.msg(Arc::DEBUG, "%s: Adding new output file %s: %s", jobid, pfn, lfn); FileData fd(pfn, lfn); fd.cred = cred; files.push_back(fd); } } it = files.erase(it); continue; } // Remove trailing slashes otherwise it will be cleaned in delete_all_files std::string::size_type pos = it->pfn.find_last_not_of('/'); it->pfn.resize((pos == std::string::npos)?1:(pos+1)); } ++it; } // check if any files share the same LFN, if so allow overwriting existing LFN for (it = files.begin(); it != files.end(); it++) { bool done = false; for (std::list::iterator it2 = files.begin(); it2 != files.end(); it2++) { if (it != it2 && !it->lfn.empty() && !it2->lfn.empty()) { // error if lfns (including locations) are identical if (it->lfn == it2->lfn) { logger.msg(Arc::ERROR, "%s: Two identical output destinations: %s", jobid, it->lfn); lock.lock(); finished_jobs[jobid] = std::string("Two identical output destinations: " + it->lfn); lock.unlock(); CleanCacheJobLinks(config, job); if (kicker_func) (*kicker_func)(kicker_arg); return false; } Arc::URL u_it(it->lfn); Arc::URL u_it2(it2->lfn); if (u_it == u_it2) { // error if pfns are different if (it->pfn != it2->pfn) { logger.msg(Arc::ERROR, "%s: Cannot upload two different files %s and %s to same LFN: %s", jobid, it->pfn, it2->pfn, it->lfn); lock.lock(); finished_jobs[jobid] = std::string("Cannot upload two different files to same LFN: " + it->lfn); lock.unlock(); CleanCacheJobLinks(config, job); if (kicker_func) (*kicker_func)(kicker_arg); return false; } replication = true; done = true; break; } } } if (done) break; } // pre-clean session dir before uploading if (delete_all_files(job.SessionDir(), files, true, job_uid, job_gid) == 2) { logger.msg(Arc::ERROR, "%s: Failed to clean up session dir", jobid); lock.lock(); finished_jobs[jobid] = std::string("Failed to clean up session dir before uploading outputs"); lock.unlock(); CleanCacheJobLinks(config, job); if (kicker_func) (*kicker_func)(kicker_arg); return false; } } else { // bad state logger.msg(Arc::ERROR, "%s: Received job in a bad state: %s", jobid, job.get_state_name()); lock.lock(); finished_jobs[jobid] = std::string("Logic error: DTR Generator received job in a bad state"); lock.unlock(); if (kicker_func) (*kicker_func)(kicker_arg); return false; } Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); usercfg.UtilsDirPath(config.ControlDir()); usercfg.CACertificatesDirectory(config.CertDir()); if (config.StrictSession()) usercfg.SetUser(job.get_user()); // TODO: chelonia bartenders // create job.id.errors file with correct permissions to add to Logger job_errors_mark_put(job, config); if (files.empty()) { // if job is FINISHING then clean up cache joblinks if (job.get_state() == JOB_STATE_FINISHING) CleanCacheJobLinks(config, job); // nothing else to do so wake up GM thread and return lock.lock(); finished_jobs[jobid] = ""; lock.unlock(); if (kicker_func) (*kicker_func)(kicker_arg); return true; } // flag to say whether at least one file needs to be staged bool staging = false; for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if (i->lfn.find(":") == std::string::npos) continue; // user down/uploadable file staging = true; std::string source; std::string original_source; std::string destination; if (job.get_state() == JOB_STATE_PREPARING) { // If ACIX should be used, use it as source and add original URL as // location after DTR is created // If cache=check then don't use remote caches as the copy must be up to date Arc::URL original_url(i->lfn); if (!staging_conf.get_acix_endpoint().empty() && original_url.Option("cache") != "check") { original_source = i->lfn; // Encode the original url so it is not parsed as part of the acix url Arc::URL acix_source(staging_conf.get_acix_endpoint() + "?url=" + Arc::uri_encode(original_source, false)); // Add URL options to ACIX URL for (std::map::const_iterator opt = original_url.Options().begin(); opt != original_url.Options().end(); ++opt) { acix_source.AddOption(opt->first, opt->second); } source = acix_source.fullstr(); } else { source = i->lfn; } destination = "file:" + job.SessionDir() + i->pfn; } else { source = "file:" + job.SessionDir() + i->pfn; // Upload to dest ending in '/': append filename to lfn // Note: won't work for nested URLs used for index services if (i->lfn.rfind('/') == i->lfn.length()-1) { destination = i->lfn + i->pfn.substr(i->pfn.rfind('/')+1); } else { destination = i->lfn; } } // Check if this file was recovered from a crash, if so add overwrite option for (std::list::iterator file = recovered_files.begin(); file != recovered_files.end();) { if (*file == destination) { logger.msg(Arc::WARNING, "%s: Destination file %s was possibly left unfinished" " from previous A-REX run, will overwrite", jobid, destination); Arc::URL u(destination); if (u) { u.AddOption("overwrite=yes", true); destination = u.fullstr(); } file = recovered_files.erase(file); } else { ++file; } } // Add common purpose URL options from configuration { Arc::URL u(source); if (u) { u.AddOption("httpgetpartial", staging_conf.get_httpgetpartial()?"yes":"no", false); // Consider adding passive and secure here source = u.fullstr(); } } std::string proxy_cred; if(!i->cred.empty()) { usercfg.ProxyPath(i->cred); if (Arc::FileRead(i->cred, proxy_cred)) usercfg.CredentialString(proxy_cred); } else { usercfg.ProxyPath(default_cred); if (Arc::FileRead(default_cred, proxy_cred)) usercfg.CredentialString(proxy_cred); } // logger for these DTRs. LogDestinations should be deleted when DTR is received back DataStaging::DTRLogger dtr_log(new Arc::Logger(Arc::Logger::getRootLogger(), "DataStaging.DTR")); Arc::LogFile * dest = new Arc::LogFile(job_errors_filename(jobid, config)); dest->setReopen(true); dest->setFormat(Arc::MediumFormat); dtr_log->addDestination(*dest); if (central_dtr_log) { dtr_log->addDestination(*central_dtr_log); } // create DTR and send to Scheduler DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, usercfg, jobid, job.get_user().get_uid(), dtr_log)); // set retry count (tmp errors only) dtr->set_tries_left(staging_conf.max_retries); // allow the same file to be uploaded to multiple locations with same LFN dtr->set_force_registration(replication); // set sub-share for download or upload dtr->set_sub_share((job.get_state() == JOB_STATE_PREPARING) ? "download" : "upload"); // set priority as given in job description if (job.GetLocalDescription(config)) dtr->set_priority(job.GetLocalDescription(config)->priority); // set whether to use A-REX host certificate for remote delivery services dtr->host_cert_for_remote_delivery(staging_conf.use_host_cert_for_remote_delivery); // set real location if ACIX is used if (!original_source.empty()) { dtr->get_source()->AddLocation(Arc::URL(original_source), Arc::URL(original_source).ConnectionURL()); dtr->set_use_acix(true); } dtr->get_job_perf_log().SetOutput(staging_conf.perf_log.GetOutput()); dtr->get_job_perf_log().SetEnabled(staging_conf.perf_log.GetEnabled()); DataStaging::DTRCacheParameters cache_parameters; CacheConfig cache_params(config.CacheParams()); // Substitute cache paths cache_params.substitute(config, job.get_user()); cache_parameters.cache_dirs = cache_params.getCacheDirs(); cache_parameters.remote_cache_dirs = cache_params.getRemoteCacheDirs(); dtr->set_cache_parameters(cache_parameters); dtr->registerCallback(this,DataStaging::GENERATOR); dtr->registerCallback(scheduler, DataStaging::SCHEDULER); // callbacks for info dtr->registerCallback(&info, DataStaging::SCHEDULER); dtr->set_credential_info(cred_info); lock.lock(); active_dtrs.insert(std::pair(jobid, dtr->get_id())); lock.unlock(); // send to Scheduler DataStaging::DTR::push(dtr, DataStaging::SCHEDULER); // update .local with transfer share JobLocalDescription *job_desc = new JobLocalDescription; if (!job_local_read_file(jobid, config, *job_desc)) { logger.msg(Arc::ERROR, "%s: Failed reading local information", jobid); delete job_desc; continue; } job_desc->transfershare = dtr->get_transfer_share(); if (!job_local_write_file(job, config, *job_desc)) { logger.msg(Arc::ERROR, "%s: Failed writing local information", jobid); } delete job_desc; } if (!staging) { // nothing needed staged so mark as finished // if job is FINISHING then clean up cache joblinks if (job.get_state() == JOB_STATE_FINISHING) CleanCacheJobLinks(config, job); lock.lock(); finished_jobs[jobid] = ""; lock.unlock(); } return true; } bool DTRGenerator::processCancelledJob(const std::string& jobid) { // cancel DTRs in Scheduler logger.msg(Arc::INFO, "%s: Cancelling active DTRs", jobid); scheduler->cancelDTRs(jobid); return true; } int DTRGenerator::checkUploadedFiles(GMJob& job) { JobId jobid(job.get_id()); uid_t job_uid = config.StrictSession() ? job.get_user().get_uid() : 0; uid_t job_gid = config.StrictSession() ? job.get_user().get_gid() : 0; std::string session_dir; if (job.GetLocalDescription(config) && !job.GetLocalDescription(config)->sessiondir.empty()) session_dir = job.GetLocalDescription(config)->sessiondir; else session_dir = config.SessionRoot(jobid) + '/' + jobid; // get input files list std::list uploaded_files; std::list* uploaded_files_ = NULL; std::list input_files; std::list input_files_ = input_files; if (!job_input_read_file(jobid, config, input_files)) { job.AddFailure("Error reading list of input files"); logger.msg(Arc::ERROR, "%s: Can't read list of input files", jobid); return 1; } if (job_input_status_read_file(jobid, config, uploaded_files)) { uploaded_files_ = &uploaded_files; } int res = 0; // loop through each file and check for (FileData::iterator i = input_files.begin(); i != input_files.end();) { // all remote files should have been downloaded by this point if (i->lfn.find(":") != std::string::npos) { ++i; continue; } logger.msg(Arc::VERBOSE, "%s: Checking user uploadable file: %s", jobid, i->pfn); std::string error; int err = user_file_exists(*i, session_dir, jobid, error, job_uid, job_gid, uploaded_files_); if (err == 0) { // file is uploaded logger.msg(Arc::VERBOSE, "%s: User has uploaded file %s", jobid, i->pfn); // remove from input list i = input_files.erase(i); input_files_.clear(); for (FileData::iterator it = input_files.begin(); it != input_files.end(); ++it) input_files_.push_back(*it); if (!job_input_write_file(job, config, input_files_)) { logger.msg(Arc::WARNING, "%s: Failed writing changed input file.", jobid); } } else if (err == 1) { // critical failure logger.msg(Arc::ERROR, "%s: Critical error for uploadable file %s", jobid, i->pfn); job.AddFailure("User file: "+i->pfn+" - "+error); res = 1; break; } else { // still waiting res = 2; ++i; } } // check for timeout if (res == 2 && ((time(NULL) - job.GetStartTime()) > 600)) { // hard-coded timeout for (FileData::iterator i = input_files.begin(); i != input_files.end(); ++i) { if (i->lfn.find(":") == std::string::npos) { job.AddFailure("User file: "+i->pfn+" - Timeout waiting"); } } logger.msg(Arc::ERROR, "%s: Uploadable files timed out", jobid); res = 1; } return res; } bool match_list(const std::list& slist, const std::string& str) { for(std::list::const_iterator s = slist.begin(); s != slist.end(); ++s) { if(*s == str) return true; } return false; } int DTRGenerator::user_file_exists(FileData &dt, const std::string& session_dir, const std::string& jobid, std::string& error, uid_t uid, gid_t gid, const std::list* uploaded_files) { struct stat st; std::string file_info(dt.lfn); if (file_info == "*.*") return 0; // do not wait for this file std::string fname = session_dir + '/' + dt.pfn; // check if file exists at all if (!Arc::FileStat(fname.c_str(), &st, uid, gid, false)) return 2; // if no size/checksum was supplied, return success if (file_info.empty()) { // but check status first if avaialble if (uploaded_files) { if (!match_list(*uploaded_files, dt.pfn)) return 2; } return 0; } if (S_ISDIR(st.st_mode)) { error = "Expected file. Directory found."; return 1; } if (!S_ISREG(st.st_mode)) { error = "Expected ordinary file. Special object found."; return 1; } long long int fsize; long long int fsum; bool have_size = false; bool have_checksum = false; // parse format [size][.checksum] if (file_info[0] == '.') { // checksum only if (!Arc::stringto(file_info.substr(1), fsum)) { logger.msg(Arc::ERROR, "%s: Can't convert checksum %s to int for %s", jobid, file_info.substr(1), dt.pfn); error = "Invalid checksum information"; return 1; } have_checksum = true; } else if (file_info.find('.') == std::string::npos) { // size only if (!Arc::stringto(file_info, fsize)) { logger.msg(Arc::ERROR, "%s: Can't convert filesize %s to int for %s", jobid, file_info, dt.pfn); error = "Invalid file size information"; return 1; } have_size = true; } else { // size and checksum std::vector file_attrs; Arc::tokenize(dt.lfn, file_attrs, "."); if (file_attrs.size() != 2) { logger.msg(Arc::ERROR, "%s: Invalid size/checksum information (%s) for %s", jobid, file_info, dt.pfn); error = "Invalid size/checksum information"; return 1; } if (!Arc::stringto(file_attrs[0], fsize)) { logger.msg(Arc::ERROR, "%s: Can't convert filesize %s to int for %s", jobid, file_attrs[0], dt.pfn); error = "Invalid file size information"; return 1; } if (!Arc::stringto(file_attrs[1], fsum)) { logger.msg(Arc::ERROR, "%s: Can't convert checksum %s to int for %s", jobid, file_attrs[1], dt.pfn); error = "Invalid checksum information"; return 1; } have_size = true; have_checksum = true; } // now check if proper size if (have_size) { if (st.st_size < fsize) return 2; if (st.st_size > fsize) { logger.msg(Arc::ERROR, "%s: Invalid file: %s is too big.", jobid, dt.pfn); error = "Delivered file is bigger than specified."; return 1; } } if (uploaded_files) { if (!match_list(*uploaded_files, dt.pfn)) return 2; } else if (have_checksum) { // calculate checksum (if no better way) int h = -1; Arc::FileAccess* fa = NULL; if ((uid && uid != getuid()) || (gid && gid != getgid())) { fa = new Arc::FileAccess(); if (!fa->fa_setuid(uid, gid)) { delete fa; logger.msg(Arc::ERROR, "%s: Failed to switch user ID to %d/%d to read file %s", jobid, (unsigned int)uid, (unsigned int)gid, dt.pfn); error = "Could not switch user id to read file"; return 1; } if(!fa->fa_open(fname, O_RDONLY, 0)) { delete fa; logger.msg(Arc::ERROR, "%s: Failed to open file %s for reading", jobid, dt.pfn); error = "Failed to open file for reading"; return 1; } } else { h = ::open(fname.c_str(), O_RDONLY); if (h == -1) { // if we can't read that file job won't too logger.msg(Arc::ERROR, "%s: Error accessing file %s", jobid, dt.pfn); error = "Delivered file is unreadable."; return 1; } } Arc::CRC32Sum crc; char buffer[1024]; ssize_t l; for(;;) { if (fa) l = fa->fa_read(buffer, 1024); else l = read(h, buffer, 1024); if (l == -1) { logger.msg(Arc::ERROR, "%s: Error reading file %s", jobid, dt.pfn); error = "Could not read file to compute checksum."; delete fa; return 1; } if (l == 0) break; crc.add(buffer, l); } if (h != -1) close(h); if (fa) fa->fa_close(); delete fa; crc.end(); if (fsum != crc.crc()) { if (have_size) { // size was checked and is ok logger.msg(Arc::ERROR, "%s: File %s has wrong checksum: %llu. Expected %lli", jobid, dt.pfn, crc.crc(), fsum); error = "Delivered file has wrong checksum."; return 1; } return 2; // not uploaded yet } logger.msg(Arc::VERBOSE, "%s: Checksum %llu verified for %s", jobid, crc.crc(), dt.pfn); } return 0; // all checks passed - file is ok } void DTRGenerator::readDTRState(const std::string& dtr_log) { std::list lines; // file may not exist if this is the first use of DTR if (!Arc::FileRead(dtr_log, lines)) return; if (!lines.empty()) { logger.msg(Arc::WARNING, "Found unfinished DTR transfers. It is possible the " "previous A-REX process did not shut down normally"); } for (std::list::iterator line = lines.begin(); line != lines.end(); ++line) { std::vector fields; Arc::tokenize(*line, fields); if ((fields.size() == 5 || fields.size() == 6) && (fields.at(1) == "TRANSFERRING" || fields.at(1) == "TRANSFER")) { logger.msg(Arc::VERBOSE, "Found DTR %s for file %s left in transferring state from previous run", fields.at(0), fields.at(4)); recovered_files.push_back(fields.at(4)); } } } void DTRGenerator::CleanCacheJobLinks(const GMConfig& config, const GMJob& job) const { CacheConfig cache_config(config.CacheParams()); cache_config.substitute(config, job.get_user()); // there is no uid switch during Release so uid/gid is not so important Arc::FileCache cache(cache_config.getCacheDirs(), cache_config.getRemoteCacheDirs(), cache_config.getDrainingCacheDirs(), job.get_id(), job.get_user().get_uid(), job.get_user().get_gid()); cache.Release(); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/JobsList.h0000644000000000000000000000012313065016642026021 xustar000000000000000027 mtime=1490296226.623926 27 atime=1513200576.258723 29 ctime=1513200662.84078223 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/JobsList.h0000644000175000002070000002120013065016642026062 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_STATES_H #define GRID_MANAGER_STATES_H #include #include #include "../conf/StagingConfig.h" #include "GMJob.h" #include "JobDescriptionHandler.h" namespace ARex { class JobFDesc; class DTRGenerator; class GMConfig; /// ZeroUInt is a wrapper around unsigned int. It provides a consistent default /// value, as int type variables have no predefined value assigned upon /// creation. It also protects from potential counter underflow, to stop /// counter jumping to MAX_INT. TODO: move to common lib? class ZeroUInt { private: unsigned int value_; public: ZeroUInt(void):value_(0) { }; ZeroUInt(unsigned int v):value_(v) { }; ZeroUInt(const ZeroUInt& v):value_(v.value_) { }; ZeroUInt& operator=(unsigned int v) { value_=v; return *this; }; ZeroUInt& operator=(const ZeroUInt& v) { value_=v.value_; return *this; }; ZeroUInt& operator++(void) { ++value_; return *this; }; ZeroUInt operator++(int) { ZeroUInt temp(value_); ++value_; return temp; }; ZeroUInt& operator--(void) { if(value_) --value_; return *this; }; ZeroUInt operator--(int) { ZeroUInt temp(value_); if(value_) --value_; return temp; }; operator unsigned int(void) const { return value_; }; }; /// List of jobs. This class contains the main job management logic which moves /// jobs through the state machine. New jobs found through Scan methods are /// held in memory until reaching FINISHED state. class JobsList { public: typedef std::list::iterator iterator; private: // List of jobs currently tracked in memory std::list jobs; // GM configuration const GMConfig& config; // Staging configuration StagingConfig staging_config; // Dir containing finished/deleted jobs which is scanned in ScanOldJobs. // Since this can happen over multiple calls a pointer is kept as a member // variable so scanning picks up where it finished last time. Glib::Dir* old_dir; // Generator for handling data staging DTRGenerator* dtr_generator; // Job description handler JobDescriptionHandler job_desc_handler; // number of jobs for every state int jobs_num[JOB_STATE_NUM]; int jobs_scripts; // map of number of active jobs for each DN std::map jobs_dn; // number of jobs currently in pending state int jobs_pending; // Add job into list without checking if it is already there. // 'i' will be set to iterator pointing at new job bool AddJobNoCheck(const JobId &id,iterator &i,uid_t uid,gid_t gid); // Add job into list without checking if it is already there bool AddJobNoCheck(const JobId &id,uid_t uid,gid_t gid); // Perform all actions necessary in case of job failure bool FailedJob(const iterator &i,bool cancel); // Cleaning reference to running child process void CleanChildProcess(const iterator i); // Remove Job from list. All corresponding files are deleted and pointer is // advanced. If finished is false - job is not destroyed if it is FINISHED // If active is false - job is not destroyed if it is not UNDEFINED. Returns // false if external process is still running. bool DestroyJob(iterator &i,bool finished=true,bool active=true); // Perform actions necessary in case job goes to/is in SUBMITTING/CANCELING state bool state_submitting(const iterator &i,bool &state_changed,bool cancel=false); // Same for PREPARING/FINISHING bool state_loading(const iterator &i,bool &state_changed,bool up); // Returns true if job is waiting on some condition or limit before // progressing to the next state bool JobPending(JobsList::iterator &i); // Get the state in which the job failed from .local file job_state_t JobFailStateGet(const iterator &i); // Write the state in which the job failed to .local file bool JobFailStateRemember(const iterator &i,job_state_t state,bool internal = true); // In case of job restart, recreates lists of input and output files taking // into account what was already transferred bool RecreateTransferLists(const JobsList::iterator &i); // Read into ids all jobs in the given dir bool ScanJobs(const std::string& cdir,std::list& ids); // Read into ids all jobs in the given dir with marks given by suffices // (corresponding to file suffixes) bool ScanMarks(const std::string& cdir,const std::list& suffices,std::list& ids); // Called after service restart to move jobs that were processing to a // restarting state bool RestartJobs(const std::string& cdir,const std::string& odir); // Release delegation after job finishes void UnlockDelegation(JobsList::iterator &i); // Calculate job expiration time from last state change and configured lifetime time_t PrepareCleanupTime(JobsList::iterator &i, time_t& keep_finished); // Read in information from .local file bool GetLocalDescription(const JobsList::iterator &i); // Modify job state, log that change and optionally log modification reson void SetJobState(JobsList::iterator &i, job_state_t new_state, const char* reason = NULL); // Update content of job proxy file with one stored in delegations store void UpdateJobCredentials(JobsList::iterator &i); // Main job processing method. Analyze current state of job, perform // necessary actions and advance state or remove job if needed. Iterator 'i' // is advanced or erased inside this function. bool ActJob(iterator &i); // ActJob() calls one of these methods depending on the state of the job. // Parameters: // once_more - if true then ActJob should process this job again // delete_job - if true then ActJob should delete the job // job_error - if true then an error happened in this processing state // state_changed - if true then the job state was changed void ActJobUndefined(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); void ActJobAccepted(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); void ActJobPreparing(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); void ActJobSubmitting(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); void ActJobCanceling(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); void ActJobInlrms(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); void ActJobFinishing(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); void ActJobFinished(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); void ActJobDeleted(iterator &i,bool& once_more,bool& delete_job,bool& job_error,bool& state_changed); public: // Constructor. JobsList(const GMConfig& gmconfig); // std::list methods for using JobsList like a regular list iterator begin(void) { return jobs.begin(); }; iterator end(void) { return jobs.end(); }; size_t size(void) const { return jobs.size(); }; iterator erase(iterator& i) { return jobs.erase(i); }; // Return iterator to object matching given id or jobs.end() if not found iterator FindJob(const JobId &id); // Information about jobs for external utilities // No of jobs in all active states from ACCEPTED and FINISHING int AcceptedJobs() const; // No of jobs in batch system or in process of submission to batch system int RunningJobs() const; // No of jobs in data staging int ProcessingJobs() const; // No of jobs staging in data before job execution int PreparingJobs() const; // No of jobs staging out data after job execution int FinishingJobs() const; // Set DTR Generator for data staging void SetDataGenerator(DTRGenerator* generator) { dtr_generator = generator; }; // Call ActJob for all current jobs bool ActJobs(void); // Look for new or restarted jobs. Jobs are added to list with state UNDEFINED bool ScanNewJobs(void); // Collect all jobs in all states bool ScanAllJobs(void); // Pick jobs which have been marked for restarting, cancelling or cleaning bool ScanNewMarks(void); // Look for finished or deleted jobs and process them. Jobs which are // restarted will be added back into the main processing loop. This method // can be limited in the time it can run for and number of jobs it can scan. // For jobs number -1 is also accepted as no limit. // Time is always limited. // It returns false if failed or scanning finished. bool ScanOldJobs(unsigned int max_scan_time,int max_scan_jobs); // Add job with specified id. // Returns true if job was found and added. bool AddJob(const JobId& id); // Rearrange status files on service restart bool RestartJobs(void); // Send signals to external processes to shut down nicely (not implemented) void PrepareToDestroy(void); }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/CommFIFO.cpp0000644000000000000000000000012413107551112026154 xustar000000000000000027 mtime=1495192138.929876 27 atime=1513200576.258723 30 ctime=1513200662.833782144 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/CommFIFO.cpp0000644000175000002070000001210513107551112026220 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "CommFIFO.h" namespace ARex { #ifndef WIN32 static const std::string fifo_file("/gm.fifo"); bool CommFIFO::make_pipe(void) { bool res = false; lock.lock(); if (kick_in != -1) { close(kick_in); kick_in = -1; }; if (kick_out != -1) { close(kick_out); kick_out = -1; }; int filedes[2]; if(pipe(filedes) == 0) { kick_in=filedes[1]; kick_out=filedes[0]; long arg; arg=fcntl(kick_in,F_GETFL); if(arg != -1) { arg|=O_NONBLOCK; fcntl(kick_in,F_SETFL,&arg); }; arg=fcntl(kick_out,F_GETFL); if(arg != -1) { arg|=O_NONBLOCK; fcntl(kick_out,F_SETFL,&arg); }; res = (kick_in != -1); }; lock.unlock(); return res; } CommFIFO::CommFIFO(void) { timeout_=-1; kick_in=-1; kick_out=-1; make_pipe(); } CommFIFO::~CommFIFO(void) { } void CommFIFO::wait(int timeout) { time_t start_time = time(NULL); time_t end_time = start_time + timeout; for(;;) { fd_set fin,fout,fexc; FD_ZERO(&fin); FD_ZERO(&fout); FD_ZERO(&fexc); int maxfd=-1; if(kick_out == -1) make_pipe(); if(kick_out != -1) { maxfd=kick_out; FD_SET(kick_out,&fin); }; lock.lock(); for(std::list::iterator i = fds.begin();i!=fds.end();++i) { if(i->fd < 0) continue; if(i->fd>maxfd) maxfd=i->fd; FD_SET(i->fd,&fin); }; lock.unlock(); int err; maxfd++; if(timeout >= 0) { struct timeval t; if(((int)(end_time-start_time)) < 0) return; t.tv_sec=end_time-start_time; t.tv_usec=0; if(maxfd > 0) { err = select(maxfd,&fin,&fout,&fexc,&t); } else { sleep(t.tv_sec); err = 0; }; start_time = time(NULL); } else { if(maxfd > 0) { err = select(maxfd,&fin,&fout,&fexc,NULL); } else { err = 0; }; }; if(err == 0) return; if(err == -1) { if(errno == EBADF) { // One of fifos must be broken. Let read() find that out. } else if(errno == EINTR) { // interrupted by signal, retry continue; }; // No idea how this could happen and how to deal with it. // Lets try to escape and start from beginning return; }; if(kick_out >= 0) { if((err < 0) || FD_ISSET(kick_out,&fin)) { char buf[256]; if(read(kick_out,buf,256) == -1) { make_pipe(); }; continue; }; }; lock.lock(); for(std::list::iterator i = fds.begin();i!=fds.end();++i) { if(i->fd < 0) continue; if((err < 0) || FD_ISSET(i->fd,&fin)) { lock.unlock(); char buf[256]; ssize_t l = read(i->fd,buf,sizeof(buf)); if(l < 0) { if((errno == EBADF) || (errno == EINVAL) || (errno == EIO)) { close(i->fd); close(i->fd_keep); i->fd = -1; i->fd_keep = -1; }; } else if(l > 0) { // 0 means kick, 1 - ping, rest undefined yet if(memchr(buf,0,l)) return; }; }; }; lock.unlock(); }; } CommFIFO::add_result CommFIFO::add(const std::string& dir_path) { std::string path = dir_path + fifo_file; if(mkfifo(path.c_str(),S_IRUSR | S_IWUSR) != 0) { if(errno != EEXIST) { return add_error; }; }; (void)chmod(path.c_str(),S_IRUSR | S_IWUSR); int fd = -1; // This must fail. If not then there is another a-rex hanging around. fd = open(path.c_str(),O_WRONLY | O_NONBLOCK); if(fd != -1) { close(fd); return add_busy; }; // (errno != ENXIO)) { fd = open(path.c_str(),O_RDONLY | O_NONBLOCK); if(fd == -1) return add_error; int fd_keep = open(path.c_str(),O_WRONLY | O_NONBLOCK); if(fd_keep == -1) { close(fd); return add_error; }; elem_t el; el.fd=fd; el.fd_keep=fd_keep; lock.lock(); fds.push_back(el); if(kick_in != -1) { char c = 0; write(kick_in,&c,1); }; lock.unlock(); return add_success; } static int OpenFIFO(const std::string& path) { // Here O_NONBLOCK ensures open() will fail if nothing listens int fd = open(path.c_str(),O_WRONLY | O_NONBLOCK); // If fd == -1 here there is no FIFO or nothing is listening on another end return fd; } bool SignalFIFO(const std::string& dir_path) { std::string path = dir_path + fifo_file; int fd = OpenFIFO(path); if(fd == -1) return false; char c = 0; if(write(fd,&c,1) != 1) { close(fd); return false; }; close(fd); return true; } bool PingFIFO(const std::string& dir_path) { std::string path = dir_path + fifo_file; int fd = OpenFIFO(path); // If nothing is listening open() will fail // so there is no need to send anything. if(fd == -1) return false; close(fd); return true; } #else CommFIFO::CommFIFO(void) { } CommFIFO::~CommFIFO(void) { } void CommFIFO::wait(int timeout) { return NULL; } CommFIFO::add_result CommFIFO::add(const std::string& dir_path) { return add_error; } bool SignalFIFO(const std::string& dir_path) { return false; } bool PingFIFO(const std::string& dir_path) { return false; } #endif } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/ContinuationPlugins.h0000644000000000000000000000012412046735464030315 xustar000000000000000027 mtime=1352383284.841662 27 atime=1513200576.264723 30 ctime=1513200662.844782278 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/ContinuationPlugins.h0000644000175000002070000000232012046735464030357 0ustar00mockbuildmock00000000000000#ifndef __ARC_GM_PLUGINS_H__ #define __ARC_GM_PLUGINS_H__ #include #include "GMJob.h" namespace ARex { class ContinuationPlugins { public: typedef enum { act_fail, act_pass, act_log, act_undefined } action_t; class result_t { public: action_t action; int result; std::string response; result_t(action_t act,int res,const std::string& resp): action(act),result(res),response(resp) { }; result_t(action_t act): action(act),result(0) { }; }; private: class command_t { public: std::string cmd; unsigned int to; action_t onsuccess; action_t onfailure; action_t ontimeout; }; std::list commands[JOB_STATE_NUM]; public: ContinuationPlugins(void); ~ContinuationPlugins(void); bool add(job_state_t state,unsigned int timeout,const char* command); bool add(const char* state,unsigned int timeout,const char* command); bool add(job_state_t state,const char* options,const char* command); bool add(const char* state,const char* options,const char* command); void run(const GMJob &job,const GMConfig& config,std::list& results); }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/DTRGenerator.h0000644000000000000000000000012313065020363026563 xustar000000000000000026 mtime=1490297075.15937 27 atime=1513200576.278723 30 ctime=1513200662.845782291 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/DTRGenerator.h0000644000175000002070000001511013065020363026627 0ustar00mockbuildmock00000000000000#ifndef DTR_GENERATOR_H_ #define DTR_GENERATOR_H_ #include #include #include "../conf/StagingConfig.h" namespace ARex { class GMConfig; class FileData; class GMJob; /** * DTRInfo passes state information from data staging to A-REX * via the defined callback, called when the DTR passes to the * certain processes. It could for example write to files in the * control directory, and this information can be picked up and * published by the info system. */ class DTRInfo: public DataStaging::DTRCallback { private: const GMConfig& config; static Arc::Logger logger; public: DTRInfo(const GMConfig& config); virtual void receiveDTR(DataStaging::DTR_ptr dtr); }; /** * A-REX implementation of DTR Generator. Note that job migration functionality * present in the down/uploaders has not been implemented here. */ class DTRGenerator: public DataStaging::DTRCallback { private: /** Active DTRs. Map of job id to DTR id. */ std::multimap active_dtrs; /** Jobs where all DTRs are finished. Map of job id to failure reason (empty if success) */ std::map finished_jobs; /** Lock for lists */ Arc::SimpleCondition lock; // Event lists /** DTRs received */ std::list dtrs_received; /** Jobs received */ std::list jobs_received; /** Jobs cancelled. List of Job IDs. */ std::list jobs_cancelled; /** Lock for events */ Arc::SimpleCondition event_lock; /** Condition to wait on when stopping Generator */ Arc::SimpleCondition run_condition; /** State of Generator */ DataStaging::ProcessState generator_state; /** Grid manager configuration */ const GMConfig& config; /** A list of files left mid-transfer from a previous process */ std::list recovered_files; /** logger to a-rex log */ static Arc::Logger logger; /** Central DTR LogDestination */ Arc::LogDestination* central_dtr_log; /** Associated scheduler */ DataStaging::Scheduler* scheduler; /** Staging configuration */ StagingConfig staging_conf; /** Info object for passing DTR info back to A-REX */ DTRInfo info; //static DTRGeneratorCallback receive_dtr; /** Function and arguments for callback when all DTRs for a job have finished */ void (*kicker_func)(void*); void* kicker_arg; /** Private constructors */ DTRGenerator(const DTRGenerator& generator); /** run main thread */ static void main_thread(void* arg); void thread(void); /** Process a received DTR */ bool processReceivedDTR(DataStaging::DTR_ptr dtr); /** Process a received job */ bool processReceivedJob(GMJob& job); /** Process a cancelled job */ bool processCancelledJob(const std::string& jobid); /** Read in state left from previous process and fill recovered_files */ void readDTRState(const std::string& dtr_log); /** Clean up joblinks dir in caches for given job (called at the end of upload) */ void CleanCacheJobLinks(const GMConfig& config, const GMJob& job) const; /** Check that user-uploadable file exists. * Returns 0 - if file exists * 1 - it is not proper file or other error * 2 - not there yet * @param dt Filename and size/checksum information * @param session_dir Directory in which to find uploaded file * @param jobid Job ID, used in log messages * @param error Errors are reported in this string * @param uid uid under which to access session dir * @param gid gid under which to access session dir */ static int user_file_exists(FileData &dt, const std::string& session_dir, const std::string& jobid, std::string& error, uid_t uid, gid_t gid, const std::list* uploaded_files); public: /** * Start up Generator. * @param user Grid manager configuration. * @param kicker_func Function to call on completion of all DTRs for a job * @param kicker_arg Argument to kicker function */ DTRGenerator(const GMConfig& config, void (*kicker_func)(void*) = NULL, void* kicker_arg = NULL); /** * Stop Generator */ ~DTRGenerator(); operator bool(void) { return (generator_state == DataStaging::RUNNING); }; bool operator!(void) { return (generator_state != DataStaging::RUNNING); }; /** * Callback called when DTR is finished. This DTR is marked done in the * DTR list and if all DTRs for the job have completed, the job is marked * as done. * @param dtr DTR object sent back from the Scheduler */ virtual void receiveDTR(DataStaging::DTR_ptr dtr); /** * A-REX sends data transfer requests to the data staging system through * this method. It reads the job.id.input/output files, forms DTRs and * sends them to the Scheduler. * @param job Job description object. */ void receiveJob(const GMJob& job); /** * This method is used by A-REX to cancel on-going DTRs. A cancel request * is made for each DTR in the job and the method returns. The Scheduler * asychronously deals with cancelling the DTRs. * @param job The job which is being cancelled */ void cancelJob(const GMJob& job); /** * Query status of DTRs in job. If all DTRs are finished, returns true, * otherwise returns false. If true is returned, the JobDescription should * be checked for whether the staging was successful or not by checking * CheckFailure() or GetFailure(). * @param job Description of job to query. Can be modified to add a failure * reason. * @return True if all DTRs in the job are finished, false otherwise. */ bool queryJobFinished(GMJob& job); /** * Query whether the Generator has a record of this job. * @param job Job to query. * @return True if the job is active or finished. */ bool hasJob(const GMJob& job); /** * Remove the job from the Generator. Only finished jobs will be removed, * and a warning will be logged if the job still has active DTRs. This * method should be called after A-REX has finished PREPARING or FINISHING. * @param job The job to remove. */ void removeJob(const GMJob& job); /** * Utility method to check that all files the user was supposed to * upload with the job are ready. * @param job Job description, failures will be reported directly in * this object. * @return 0 if file exists, 1 if it is not a proper file or other error, * 2 if the file not there yet */ int checkUploadedFiles(GMJob& job); }; } // namespace ARex #endif /* DTR_GENERATOR_H_ */ nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/ContinuationPlugins.cpp0000644000000000000000000000012412675602216030644 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200576.251723 30 ctime=1513200662.838782205 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/ContinuationPlugins.cpp0000644000175000002070000001453412675602216030720 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "../jobs/GMJob.h" #include "../jobs/JobsList.h" #include "../conf/GMConfig.h" #include "ContinuationPlugins.h" namespace ARex { /* Substitution: %I - job id */ ContinuationPlugins::ContinuationPlugins(void) { } ContinuationPlugins::~ContinuationPlugins(void) { } bool ContinuationPlugins::add(job_state_t state,unsigned int timeout,const char* command) { if((state == JOB_STATE_ACCEPTED) || (state == JOB_STATE_PREPARING) || (state == JOB_STATE_SUBMITTING) || (state == JOB_STATE_FINISHING) || (state == JOB_STATE_FINISHED) || (state == JOB_STATE_DELETED)) { command_t cmd; cmd.cmd=command; cmd.to=timeout; cmd.onsuccess=act_pass; cmd.onfailure=act_fail; cmd.ontimeout=act_fail; commands[state].push_back(cmd); } else { return false; }; return true; } bool ContinuationPlugins::add(const char* state,unsigned int timeout,const char* command) { job_state_t i = GMJob::get_state(state); if(i != JOB_STATE_UNDEFINED) { return add(i,timeout,command); }; return false; } static ContinuationPlugins::action_t get_action(const char *s,unsigned int l) { if((l == 4) && (strncasecmp(s,"fail",4) == 0)) return ContinuationPlugins::act_fail; if((l == 4) && (strncasecmp(s,"pass",4) == 0)) return ContinuationPlugins::act_pass; if((l == 3) && (strncasecmp(s,"log",3) == 0)) return ContinuationPlugins::act_log; return ContinuationPlugins::act_undefined; } #define RES_ONSUCCESS 0 #define RES_ONFAILURE 1 #define RES_ONTIMEOUT 2 #define RES_TIMEOUT 3 #define RES_UNDEFINED -1 static int get_result(const char *s,unsigned int l) { if((l == 9) && (strncasecmp(s,"onsuccess",9) == 0)) return RES_ONSUCCESS; if((l == 9) && (strncasecmp(s,"onfailure",9) == 0)) return RES_ONFAILURE; if((l == 9) && (strncasecmp(s,"ontimeout",9) == 0)) return RES_ONTIMEOUT; if((l == 7) && (strncasecmp(s,"timeout",7) == 0)) return RES_TIMEOUT; return RES_UNDEFINED; } bool ContinuationPlugins::add(job_state_t state,const char* options,const char* command) { if((state == JOB_STATE_ACCEPTED) || (state == JOB_STATE_PREPARING) || (state == JOB_STATE_SUBMITTING) || (state == JOB_STATE_FINISHING) || (state == JOB_STATE_FINISHED) || (state == JOB_STATE_DELETED)) { } else { return false; }; // go through options separated by ',' action_t onsuccess = act_pass; action_t onfailure = act_fail; action_t ontimeout = act_fail; unsigned int to = 0; const char *opt_p = options; for(;*opt_p;) { const char *next_opt_p = strchr(opt_p,','); if(next_opt_p == NULL) next_opt_p=opt_p+strlen(opt_p); const char *val_p = strchr(opt_p,'='); unsigned int name_len; unsigned int val_len; if((val_p == NULL) || (val_p >= next_opt_p)) { name_len = next_opt_p-opt_p; val_p=next_opt_p; val_len=0; } else { name_len = val_p-opt_p; val_p++; val_len=next_opt_p-val_p; }; action_t act = act_undefined; int res = get_result(opt_p,name_len); if(res == RES_UNDEFINED) { // can be timeout if(val_len != 0) return false; res=RES_TIMEOUT; val_p=opt_p; val_len=next_opt_p-val_p; }; if(res != RES_TIMEOUT) { act=get_action(val_p,val_len); if(act == act_undefined) return false; }; switch(res) { case RES_ONSUCCESS: onsuccess=act; break; case RES_ONFAILURE: onfailure=act; break; case RES_ONTIMEOUT: ontimeout=act; break; case RES_TIMEOUT: { if(val_len > 0) { char* e; to=strtoul(val_p,&e,0); if(e != next_opt_p) return false; } else { to=0; }; }; break; default: return false; }; opt_p=next_opt_p; if(!(*opt_p)) break; opt_p++; }; command_t cmd; cmd.cmd=command; cmd.to=to; cmd.onsuccess=onsuccess; cmd.onfailure=onfailure; cmd.ontimeout=ontimeout; commands[state].push_back(cmd); return true; } bool ContinuationPlugins::add(const char* state,const char* options,const char* command) { job_state_t i = GMJob::get_state(state); if(i != JOB_STATE_UNDEFINED) { return add(i,options,command); }; return false; } void ContinuationPlugins::run(const GMJob &job,const GMConfig& config,std::list& results) { job_state_t state = job.get_state(); for(std::list::iterator command = commands[state].begin(); command != commands[state].end();++command) { action_t act = act_pass; if(command->cmd.length() == 0) { results.push_back(result_t(act_pass)); continue; }; std::string cmd = command->cmd; for(std::string::size_type p = 0;;) { p=cmd.find('%',p); if(p==std::string::npos) break; if(cmd[p+1]=='I') { cmd.replace(p,2,job.get_id().c_str()); p+=job.get_id().length(); } else if(cmd[p+1]=='S') { cmd.replace(p,2,job.get_state_name()); p+=strlen(job.get_state_name()); } else if(cmd[p+1]=='R') { // Get correct session root (without job subdir) for this job std::string sessionroot(job.SessionDir().substr(0, job.SessionDir().rfind('/'))); cmd.replace(p,2,sessionroot); p+=sessionroot.length(); } else { p+=2; }; }; if(!config.Substitute(cmd, job.get_user())) { results.push_back(result_t(act_undefined)); continue; // or break ? }; std::string res_out(""); std::string res_err(""); int to = command->to; int result = -1; Arc::Run re(cmd); re.AssignStdout(res_out); re.AssignStderr(res_err); re.KeepStdin(); std::string response; if(re.Start()) { bool r = to?re.Wait(to):re.Wait(); if(!r) { response="TIMEOUT"; act=command->ontimeout; } else { result=re.Result(); if(result == 0) { act=command->onsuccess; } else { response="FAILED"; act=command->onfailure; }; }; } else { response="FAILED to start plugin"; // act=command->onfailure; ?? act=act_undefined; }; if(!res_out.empty()) { if(!response.empty()) response+=" : "; response+=res_out; }; if(!res_err.empty()) { if(!response.empty()) response+=" : "; response+=res_err; }; results.push_back(result_t(act,result,response)); if(act == act_fail) break; }; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/CommFIFO.h0000644000000000000000000000012312046735464025637 xustar000000000000000027 mtime=1352383284.841662 27 atime=1513200576.278723 29 ctime=1513200662.84078223 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/CommFIFO.h0000644000175000002070000000137612046735464025714 0ustar00mockbuildmock00000000000000#ifndef GM_COMMFIFO_H #define GM_COMMFIFO_H #include #include namespace ARex { class CommFIFO { private: class elem_t { public: elem_t(void):fd(-1),fd_keep(-1) { }; int fd; int fd_keep; }; std::list fds; int kick_in; int kick_out; Glib::Mutex lock; int timeout_; bool make_pipe(void); public: typedef enum { add_success, add_busy, add_error } add_result; CommFIFO(void); ~CommFIFO(void); add_result add(const std::string& dir_path); void wait(int timeout); void wait(void) { wait(timeout_); }; void timeout(int t) { timeout_=t; }; }; bool SignalFIFO(const std::string& dir_path); bool PingFIFO(const std::string& dir_path); } // namespace ARex #endif // GM_COMMFIFO_H nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/PaxHeaders.7502/README0000644000000000000000000000012311016612002024761 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.254723 30 ctime=1513200662.828782083 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobs/README0000644000175000002070000000003211016612002025022 0ustar00mockbuildmock00000000000000Main job management code. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/loaders0000644000000000000000000000013213214316027024532 xustar000000000000000030 mtime=1513200663.044784725 30 atime=1513200668.718854121 30 ctime=1513200663.044784725 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/0000755000175000002070000000000013214316027024655 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515026651 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200602.235040998 30 ctime=1513200663.041784688 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/Makefile.am0000644000175000002070000000151012052416515026710 0ustar00mockbuildmock00000000000000pkglibexec_PROGRAMS = downloader uploader downloader_SOURCES = downloader.cpp downloader_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) downloader_LDADD = \ ../conf/libconf.la ../jobs/libjobs.la ../log/liblog.la \ ../files/libfiles.la ../run/librun.la ../misc/libmisc.la \ ../mail/libmail.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la uploader_SOURCES = uploader.cpp uploader_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) uploader_LDADD = \ ../conf/libconf.la ../jobs/libjobs.la ../log/liblog.la \ ../files/libfiles.la ../run/librun.la ../misc/libmisc.la \ ../mail/libmail.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/PaxHeaders.7502/Makefile.in0000644000000000000000000000013013214315732026654 xustar000000000000000030 mtime=1513200602.286041621 30 atime=1513200649.995625128 28 ctime=1513200663.0427847 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/Makefile.in0000644000175000002070000006750113214315732026735 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pkglibexec_PROGRAMS = downloader$(EXEEXT) uploader$(EXEEXT) subdir = src/services/a-rex/grid-manager/loaders DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(pkglibexecdir)" PROGRAMS = $(pkglibexec_PROGRAMS) am_downloader_OBJECTS = downloader-downloader.$(OBJEXT) downloader_OBJECTS = $(am_downloader_OBJECTS) downloader_DEPENDENCIES = ../conf/libconf.la ../jobs/libjobs.la \ ../log/liblog.la ../files/libfiles.la ../run/librun.la \ ../misc/libmisc.la ../mail/libmail.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la downloader_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(downloader_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_uploader_OBJECTS = uploader-uploader.$(OBJEXT) uploader_OBJECTS = $(am_uploader_OBJECTS) uploader_DEPENDENCIES = ../conf/libconf.la ../jobs/libjobs.la \ ../log/liblog.la ../files/libfiles.la ../run/librun.la \ ../misc/libmisc.la ../mail/libmail.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la uploader_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(uploader_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(downloader_SOURCES) $(uploader_SOURCES) DIST_SOURCES = $(downloader_SOURCES) $(uploader_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ downloader_SOURCES = downloader.cpp downloader_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) downloader_LDADD = \ ../conf/libconf.la ../jobs/libjobs.la ../log/liblog.la \ ../files/libfiles.la ../run/librun.la ../misc/libmisc.la \ ../mail/libmail.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la uploader_SOURCES = uploader.cpp uploader_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) uploader_LDADD = \ ../conf/libconf.la ../jobs/libjobs.la ../log/liblog.la \ ../files/libfiles.la ../run/librun.la ../misc/libmisc.la \ ../mail/libmail.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/loaders/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/loaders/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibexecPROGRAMS: $(pkglibexec_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files clean-pkglibexecPROGRAMS: @list='$(pkglibexec_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list downloader$(EXEEXT): $(downloader_OBJECTS) $(downloader_DEPENDENCIES) @rm -f downloader$(EXEEXT) $(downloader_LINK) $(downloader_OBJECTS) $(downloader_LDADD) $(LIBS) uploader$(EXEEXT): $(uploader_OBJECTS) $(uploader_DEPENDENCIES) @rm -f uploader$(EXEEXT) $(uploader_LINK) $(uploader_OBJECTS) $(uploader_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/downloader-downloader.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/uploader-uploader.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< downloader-downloader.o: downloader.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(downloader_CXXFLAGS) $(CXXFLAGS) -MT downloader-downloader.o -MD -MP -MF $(DEPDIR)/downloader-downloader.Tpo -c -o downloader-downloader.o `test -f 'downloader.cpp' || echo '$(srcdir)/'`downloader.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/downloader-downloader.Tpo $(DEPDIR)/downloader-downloader.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='downloader.cpp' object='downloader-downloader.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(downloader_CXXFLAGS) $(CXXFLAGS) -c -o downloader-downloader.o `test -f 'downloader.cpp' || echo '$(srcdir)/'`downloader.cpp downloader-downloader.obj: downloader.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(downloader_CXXFLAGS) $(CXXFLAGS) -MT downloader-downloader.obj -MD -MP -MF $(DEPDIR)/downloader-downloader.Tpo -c -o downloader-downloader.obj `if test -f 'downloader.cpp'; then $(CYGPATH_W) 'downloader.cpp'; else $(CYGPATH_W) '$(srcdir)/downloader.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/downloader-downloader.Tpo $(DEPDIR)/downloader-downloader.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='downloader.cpp' object='downloader-downloader.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(downloader_CXXFLAGS) $(CXXFLAGS) -c -o downloader-downloader.obj `if test -f 'downloader.cpp'; then $(CYGPATH_W) 'downloader.cpp'; else $(CYGPATH_W) '$(srcdir)/downloader.cpp'; fi` uploader-uploader.o: uploader.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(uploader_CXXFLAGS) $(CXXFLAGS) -MT uploader-uploader.o -MD -MP -MF $(DEPDIR)/uploader-uploader.Tpo -c -o uploader-uploader.o `test -f 'uploader.cpp' || echo '$(srcdir)/'`uploader.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/uploader-uploader.Tpo $(DEPDIR)/uploader-uploader.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='uploader.cpp' object='uploader-uploader.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(uploader_CXXFLAGS) $(CXXFLAGS) -c -o uploader-uploader.o `test -f 'uploader.cpp' || echo '$(srcdir)/'`uploader.cpp uploader-uploader.obj: uploader.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(uploader_CXXFLAGS) $(CXXFLAGS) -MT uploader-uploader.obj -MD -MP -MF $(DEPDIR)/uploader-uploader.Tpo -c -o uploader-uploader.obj `if test -f 'uploader.cpp'; then $(CYGPATH_W) 'uploader.cpp'; else $(CYGPATH_W) '$(srcdir)/uploader.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/uploader-uploader.Tpo $(DEPDIR)/uploader-uploader.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='uploader.cpp' object='uploader-uploader.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(uploader_CXXFLAGS) $(CXXFLAGS) -c -o uploader-uploader.obj `if test -f 'uploader.cpp'; then $(CYGPATH_W) 'uploader.cpp'; else $(CYGPATH_W) '$(srcdir)/uploader.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: for dir in "$(DESTDIR)$(pkglibexecdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibexecPROGRAMS \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibexecPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibexecPROGRAMS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibexecPROGRAMS ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibexecPROGRAMS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibexecPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/PaxHeaders.7502/uploader.cpp0000644000000000000000000000012412701730101027120 xustar000000000000000027 mtime=1460121665.487171 27 atime=1513200576.169722 30 ctime=1513200663.044784725 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/uploader.cpp0000644000175000002070000005633012701730101027174 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif /* Upload files specified in job.ID.output. result: 0 - ok, 1 - unrecoverable error, 2 - potentially recoverable, 3 - certificate error, 4 - should retry. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../jobs/GMJob.h" #include "../files/ControlFileContent.h" #include "../files/ControlFileHandling.h" #include "../files/Delete.h" #include "../conf/GMConfig.h" #include "../misc/proxy.h" #include "../conf/UrlMapConfig.h" #include "../conf/CacheConfig.h" using namespace ARex; static Arc::Logger logger(Arc::Logger::getRootLogger(), "Uploader"); /* maximum number of retries (for every source/destination) */ #define MAX_RETRIES 5 /* maximum number simultaneous uploads */ #define MAX_UPLOADS 5 class PointPair; static void CollectCredentials(std::string& proxy,std::string& cert,std::string& key,std::string& cadir) { proxy=Arc::GetEnv("X509_USER_PROXY"); if(proxy.empty()) { cert=Arc::GetEnv("X509_USER_CERT"); key=Arc::GetEnv("X509_USER_KEY"); }; if(proxy.empty() && cert.empty()) { proxy="/tmp/x509_up"+Arc::tostring(getuid()); }; cadir=Arc::GetEnv("X509_CERT_DIR"); if(cadir.empty()) cadir="/etc/grid-security/certificates"; } class FileDataEx : public FileData { public: typedef std::list::iterator iterator; Arc::DataStatus res; PointPair* pair; /* Times are string to eliminate the need to convert * string to time_t while reading from local file */ std::string starttime; /* time of transfer started */ std::string endtime; /* time of trnasfer finished */ /* the status of file in cache: * - "yes" -- was before; * - "linked" -- downloaded and got in cache right now; * - "no" -- downloaded but didn't get in cache */ std::string incache; FileDataEx(const FileData& f) : FileData(f), res(Arc::DataStatus::Success), pair(NULL) {} FileDataEx(const FileData& f, Arc::DataStatus r) : FileData(f), res(r), pair(NULL) {} }; static std::list job_files_; static std::list job_files; static std::list processed_files; static std::list failed_files; static Arc::SimpleCondition pair_condition; static int pairs_initiated = 0; class SimpleConditionLock { private: Arc::SimpleCondition& cond_; public: SimpleConditionLock(Arc::SimpleCondition& cond):cond_(cond) { cond_.lock(); }; ~SimpleConditionLock(void) { cond_.unlock(); }; }; int clean_files(std::list &job_files,char* session_dir) { std::string session(session_dir); if(delete_all_files(session,job_files,true,0,0) != 2) return 0; return 1; } class PointPair { public: Arc::DataHandle source; Arc::DataHandle destination; PointPair(const std::string& source_str, const std::string& destination_str, const Arc::UserConfig& usercfg) : source(source_str, usercfg), destination(destination_str, usercfg) {}; ~PointPair(void) {}; static void callback(Arc::DataMover*,Arc::DataStatus res,void* arg) { FileDataEx::iterator &it = *((FileDataEx::iterator*)arg); pair_condition.lock(); if(!res.Passed()) { it->res=res; logger.msg(Arc::ERROR, "Failed uploading file %s - %s", it->lfn, std::string(res)); if((it->pair->source->GetTries() <= 0) || (it->pair->destination->GetTries() <= 0)) { delete it->pair; it->pair=NULL; failed_files.push_back(*it); } else { job_files.push_back(*it); logger.msg(Arc::ERROR, "Retrying"); }; } else { logger.msg(Arc::INFO, "Uploaded file %s", it->lfn); delete it->pair; it->pair=NULL; it->endtime=Arc::Time().str(Arc::UTCTime); processed_files.push_back(*it); }; job_files.erase(it); --pairs_initiated; pair_condition.signal_nonblock(); pair_condition.unlock(); delete ⁢ }; }; void expand_files(std::list &job_files,char* session_dir) { for(FileData::iterator i = job_files.begin();i!=job_files.end();) { std::string url = i->lfn; // Only ftp and gsiftp can be expanded to directories so far if(strncasecmp(url.c_str(),"ftp://",6) && strncasecmp(url.c_str(),"gsiftp://",9)) { ++i; continue; }; // user must ask explicitly if(url[url.length()-1] != '/') { ++i; continue; }; std::string path(session_dir); path+="/"; path+=i->pfn; int l = strlen(session_dir) + 1; try { Glib::Dir dir(path); std::string file; for(;;) { file=dir.read_name(); if(file.empty()) break; if(file == ".") continue; if(file == "..") continue; std::string path_ = path; path_+="/"; path+=file; struct stat st; if(lstat(path_.c_str(),&st) != 0) continue; // do not follow symlinks if(S_ISREG(st.st_mode)) { std::string lfn = url+file; job_files.push_back(FileData(path_.c_str()+l,lfn.c_str())); } else if(S_ISDIR(st.st_mode)) { std::string lfn = url+file+"/"; // cause recursive search job_files.push_back(FileData(path_.c_str()+l,lfn.c_str())); }; }; i=job_files.erase(i); } catch(Glib::FileError& e) { ++i; }; }; } int main(int argc,char** argv) { Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::VERBOSE); int res=0; int n_threads = 1; int n_files = MAX_UPLOADS; /* used to find caches used by this user */ Arc::User user; bool use_conf_cache = false; unsigned long long int min_speed = 0; time_t min_speed_time = 300; unsigned long long int min_average_speed = 0; time_t max_inactivity_time = 300; bool secure = true; bool userfiles_only = false; bool passive = false; std::string failure_reason(""); std::string x509_proxy, x509_cert, x509_key, x509_cadir; GMConfig config; // process optional arguments for(;;) { opterr=0; int optc=getopt(argc,argv,"+hclpfC:n:t:u:U:s:S:a:i:d:r:"); if(optc == -1) break; switch(optc) { case 'h': { std::cerr<<"Usage: uploader [-hclpf] [-C conf_file] [-n files] [-t threads] [-U uid]"< 10) { logger.msg(Arc::WARNING, "Won't use more than 10 threads"); n_threads=10; }; UrlMapConfig url_map(config); logger.msg(Arc::INFO, "Uploader started"); Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); usercfg.UtilsDirPath(control_dir); usercfg.SetUser(user); usercfg.CACertificatesDirectory(x509_cadir); Arc::DataMover mover; mover.retry(true); mover.secure(secure); mover.passive(passive); mover.verbose(true); // statistics will be shown if logging is higher than VERBOSE if(min_speed != 0) mover.set_default_min_speed(min_speed,min_speed_time); if(min_average_speed != 0) mover.set_default_min_average_speed(min_average_speed); if(max_inactivity_time != 0) mover.set_default_max_inactivity_time(max_inactivity_time); bool transferred = true; bool credentials_expired = false; std::list::iterator it = job_files_.begin(); std::list::iterator it2 = job_files_.begin(); // this map will be used to write back dynamic output file lists in case of error std::map > dynamic_outputs; // get the list of output files if(!job_output_read_file(job.get_id(),config,job_files_)) { failure_reason+="Internal error in uploader\n"; logger.msg(Arc::ERROR, "Can't read list of output files"); res=1; goto exit; } // add any output files dynamically added by the user during the job for(it = job_files_.begin(); it != job_files_.end() ; ++it) { if(it->pfn.find("@") == 1) { // GM puts a slash on the front of the local file std::string outputfilelist = session_dir + std::string("/") + it->pfn.substr(2); std::list dynamic_files; logger.msg(Arc::INFO, "Reading output files from user generated list in %s", outputfilelist); if (!job_Xput_read_file(outputfilelist, dynamic_files)) { logger.msg(Arc::ERROR, "Error reading user generated output file list in %s", outputfilelist); res=1; goto exit; } dynamic_outputs[it->pfn] = dynamic_files; job_files_.insert(job_files_.end(), dynamic_files.begin(), dynamic_files.end()); } } // check if any files share the same LFN, if so allow overwriting existing LFN for (it = job_files_.begin(); it != job_files_.end(); it++) { bool done = false; for (it2 = job_files_.begin(); it2 != job_files_.end(); it2++) { if (it != it2 && !it->lfn.empty() && !it2->lfn.empty()) { // error if lfns (including locations) are identical if (it->lfn == it2->lfn) { logger.msg(Arc::ERROR, "Two identical output destinations: %s", it->lfn); res = 1; goto exit; } Arc::URL u_it(it->lfn); Arc::URL u_it2(it2->lfn); if (u_it == u_it2) { // error if pfns are different if (it->pfn != it2->pfn) { logger.msg(Arc::ERROR, "Cannot upload two different files %s and %s to same LFN: %s", it->pfn, it2->pfn, it->lfn); res = 1; goto exit; } mover.force_to_meta(true); done = true; break; } } } if (done) break; } // remove bad files if(clean_files(job_files_,session_dir) != 0) { failure_reason+="Internal error in uploader\n"; logger.msg(Arc::ERROR, "Can't remove junk files"); res=1; goto exit; }; // remove dynamic output file lists from the files to upload it = job_files_.begin(); while (it != job_files_.end()) { if(it->pfn.find("@") == 1) it = job_files_.erase(it); else it++; } expand_files(job_files_,session_dir); for(std::list::iterator i = job_files_.begin();i!=job_files_.end();++i) { job_files.push_back(*i); }; if(!job.GetLocalDescription(config)) { logger.msg(Arc::ERROR, "Can't read job local description"); res=1; goto exit; }; // initialize structures to handle upload /* TODO: add threads=# to all urls if n_threads!=1 */ // Main upload cycle if(!userfiles_only) for(;;) { // Initiate transfers int n = 0; SimpleConditionLock local_lock(pair_condition); for(FileDataEx::iterator i=job_files.begin();i!=job_files.end();) { if(i->lfn.find(":") != std::string::npos) { /* is it lfn ? */ ++n; if(n <= pairs_initiated) { ++i; continue; }; // skip files being processed if(n > n_files) break; // quit if not allowed to process more /* have source and file to upload */ std::string source; std::string destination = i->lfn; if(i->pair == NULL) { /* define place to store */ std::string stdlog; JobLocalDescription* local = job.GetLocalDescription(); if(local) stdlog=local->stdlog; if(stdlog.length() > 0) stdlog="/"+stdlog+"/"; if((stdlog.length() > 0) && (strncmp(stdlog.c_str(),i->pfn.c_str(),stdlog.length()) == 0)) { stdlog=i->pfn.c_str()+stdlog.length(); source=std::string("file://")+control_dir+"/job."+id+"."+stdlog; } else { source=std::string("file://")+session_dir+i->pfn; }; if(strncasecmp(destination.c_str(),"file:/",6) == 0) { failure_reason+=std::string("User requested to store output locally ")+destination.c_str()+"\n"; logger.msg(Arc::ERROR, "Local destination for uploader %s", destination); res=1; i->res = Arc::DataStatus::WriteAcquireError; failed_files.push_back(*i); i = job_files.erase(i); continue; }; if(i->cred.empty()) { usercfg.ProxyPath(x509_proxy); usercfg.CertificatePath(x509_cert); usercfg.KeyPath(x509_key); } else { usercfg.ProxyPath(i->cred); usercfg.CertificatePath(""); usercfg.KeyPath(""); }; PointPair* pair = new PointPair(source,destination,usercfg); if(!(pair->source)) { failure_reason+=std::string("Can't accept URL ")+source.c_str()+"\n"; logger.msg(Arc::ERROR, "Can't accept URL: %s", source); delete pair; res=1; i->res = Arc::DataStatus::ReadAcquireError; failed_files.push_back(*i); i = job_files.erase(i); continue; }; if(!(pair->destination)) { failure_reason+=std::string("Can't accept URL ")+destination.c_str()+"\n"; logger.msg(Arc::ERROR, "Can't accept URL: %s", destination); delete pair; res=1; i->res = Arc::DataStatus::WriteAcquireError; failed_files.push_back(*i); i = job_files.erase(i); continue; }; i->pair=pair; }; FileDataEx::iterator* it = new FileDataEx::iterator(i); std::string prefix = i->pfn; if (prefix.find('/') != std::string::npos) prefix.erase(0, prefix.find('/')+1); i->starttime=Arc::Time().str(Arc::UTCTime); Arc::DataStatus dres = mover.Transfer(*(i->pair->source), *(i->pair->destination), *cache, url_map, min_speed, min_speed_time, min_average_speed, max_inactivity_time, &PointPair::callback, it, prefix.c_str()); if (!dres.Passed()) { failure_reason+=std::string("Failed to initiate file transfer: ")+source.c_str()+" - "+std::string(dres)+"\n"; logger.msg(Arc::ERROR, "Failed to initiate file transfer: %s - %s", source, std::string(dres)); delete it; res=1; i->res = dres; failed_files.push_back(*i); i = job_files.erase(i); continue; }; ++pairs_initiated; }; ++i; }; if(pairs_initiated <= 0) break; // Looks like no more files to process // Processing initiated - now wait for event pair_condition.wait_nonblock(); }; // Print upload summary and update transfer accounting information { std::list transfer_stats; transfer_stats.clear(); // paranoid initialization std::string transfer_parameters; std::list job_files_uploaded; for(FileDataEx::iterator i=processed_files.begin();i!=processed_files.end();++i) { logger.msg(Arc::INFO, "Uploaded %s", i->lfn); struct stat st; Arc::FileStat(job.SessionDir() + i->pfn, &st, true); transfer_parameters = "outputfile:"; transfer_parameters += "url=" + i->lfn + ','; transfer_parameters += "size=" + Arc::tostring(st.st_size) + ','; transfer_parameters += "starttime=" + i->starttime + ','; transfer_parameters += "endtime=" + i->endtime; transfer_stats.push_back(transfer_parameters); job_files_uploaded.push_back(*i); }; std::string fname = config.ControlDir() + "/job." + job.get_id() + ".statistics"; std::ofstream f(fname.c_str(),std::ios::out | std::ios::app); if(f.is_open() ) { for (std::list::iterator it=transfer_stats.begin(); it != transfer_stats.end(); ++it) { f << *it << std::endl; }; f.close(); fix_file_owner(fname,job); }; if(!job_output_status_write_file(job,config,job_files_uploaded)) { logger.msg(Arc::WARNING, "Failed writing output status file"); }; } for(FileDataEx::iterator i=failed_files.begin();i!=failed_files.end();++i) { if(i->res.Retryable()) { job_files.push_back(*i); logger.msg(Arc::ERROR,"Failed to upload (but may be retried) %s",i->lfn); res = 4; continue; } logger.msg(Arc::ERROR, "Failed to upload %s", i->lfn); failure_reason+="Output file: "+i->lfn+" - "+(std::string)(i->res)+"\n"; i->lfn=""; job_files.push_back(*i); if(i->res == Arc::DataStatus::CredentialsExpiredError) credentials_expired=true; transferred=false; }; // Check if all files have been properly uploaded if(!transferred) { logger.msg(Arc::INFO, "Some uploads failed"); res=2; if(credentials_expired) res=3; // recreate dynamic lists if necessary for (std::map >::iterator dyn_out = dynamic_outputs.begin(); dyn_out != dynamic_outputs.end(); ++dyn_out) { std::list failed_outputs; for (std::list::iterator dyn_file = dyn_out->second.begin(); dyn_file != dyn_out->second.end(); ++dyn_file) { for (FileDataEx::iterator i=failed_files.begin();i!=failed_files.end();++i) { if (*i == *dyn_file) { failed_outputs.push_back(*dyn_file); } } } if (!failed_outputs.empty()) { std::string outputfilelist = session_dir + std::string("/") + dyn_out->first.substr(2); logger.msg(Arc::DEBUG, "Writing back dynamic output file %s", outputfilelist); if (!job_Xput_write_file(outputfilelist, failed_outputs)) { logger.msg(Arc::WARNING, "Failed to rewrite output file list %s. Job resuming may not work", dyn_out->first); } // @output FileData at_output_file(dyn_out->first, ""); job_files.push_back(at_output_file); // output FileData output_file(std::string('/' + dyn_out->first.substr(2)), ""); job_files.push_back(output_file); } } } else if(res == 4) { logger.msg(Arc::INFO,"Some uploads failed, but (some) may be retried"); } if(!userfiles_only) { job_files_.clear(); for(FileDataEx::iterator i = job_files.begin();i!=job_files.end();++i) job_files_.push_back(*i); if(!job_output_write_file(job,config,job_files_)) { logger.msg(Arc::WARNING, "Failed writing changed output file"); }; // clean uploaded files here clean_files(job_files_,session_dir); }; exit: // release input files used for this job cache->Release(); delete cache; if(res != 0 && res != 4) { job_failed_mark_add(job,config,failure_reason); }; logger.msg(Arc::INFO, "Leaving uploader (%i)", res); return res; } nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/PaxHeaders.7502/README0000644000000000000000000000012311016612002025455 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.169722 30 ctime=1513200663.041784688 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/README0000644000175000002070000000002611016612002025521 0ustar00mockbuildmock00000000000000File staging modules. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/PaxHeaders.7502/downloader.cpp0000644000000000000000000000012412701730101027443 xustar000000000000000027 mtime=1460121665.487171 27 atime=1513200576.235723 30 ctime=1513200663.043784712 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/loaders/downloader.cpp0000644000175000002070000006601212701730101027515 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif /* Download files specified in job.ID.input and check if user uploaded files. Additionally check if this is a migrated job and if so kill the job on old cluster. result: 0 - ok, 1 - unrecoverable error, 2 - potentially recoverable, 3 - certificate error, 4 - should retry. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../jobs/GMJob.h" #include "../files/ControlFileContent.h" #include "../files/ControlFileHandling.h" #include "../files/Delete.h" #include "../misc/proxy.h" #include "../conf/UrlMapConfig.h" #include "../conf/CacheConfig.h" #include "../conf/GMConfig.h" using namespace ARex; static Arc::Logger logger(Arc::Logger::getRootLogger(), "Downloader"); /* check for user uploaded files every 60 seconds */ #define CHECK_PERIOD 60 /* check for user uploaded files every 5 seconds if no checksum is involved */ #define CHECK_PERIOD_FAST 5 /* maximum number of retries (for every source/destination) */ #define MAX_RETRIES 5 /* maximum number simultaneous downloads */ #define MAX_DOWNLOADS 5 /* maximum time for user files to upload (per file) */ #define MAX_USER_TIME 600 class PointPair; static void CollectCredentials(std::string& proxy,std::string& cert,std::string& key,std::string& cadir) { proxy=Arc::GetEnv("X509_USER_PROXY"); if(proxy.empty()) { cert=Arc::GetEnv("X509_USER_CERT"); key=Arc::GetEnv("X509_USER_KEY"); }; if(proxy.empty() && cert.empty()) { proxy="/tmp/x509_up"+Arc::tostring(getuid()); }; cadir=Arc::GetEnv("X509_CERT_DIR"); if(cadir.empty()) cadir="/etc/grid-security/certificates"; } class FileDataEx : public FileData { public: typedef std::list::iterator iterator; Arc::DataStatus res; PointPair* pair; /* Times are string to eliminate the need to convert * string to time_t while reading from local file */ std::string starttime; /* time of transfer started */ std::string endtime; /* time of transfer finished */ /* if the file was retrieved from cache: * - "yes"; * - "no"; */ std::string fromcache; time_t mtime; FileDataEx(const FileData& f) : FileData(f), res(Arc::DataStatus::Success), pair(NULL), mtime((time_t)(-1)) {} FileDataEx(const FileData& f, Arc::DataStatus r) : FileData(f), res(r), pair(NULL), mtime((time_t)(-1)) {} }; static std::list job_files_; static std::list job_files; static std::list processed_files; static std::list failed_files; static Arc::SimpleCondition pair_condition; static int pairs_initiated = 0; class SimpleConditionLock { private: Arc::SimpleCondition& cond_; public: SimpleConditionLock(Arc::SimpleCondition& cond):cond_(cond) { cond_.lock(); }; ~SimpleConditionLock(void) { cond_.unlock(); }; }; static int clean_files(std::list &job_files,char* session_dir) { std::string session(session_dir); /* delete only downloadable files, let user manage his/hers files */ if(delete_all_files(session,job_files,false,0,0) != 2) return 0; return 1; } /* Check for existence of user uploadable file returns 0 if file exists 1 - it is not proper file or other error 2 - not here yet */ static int user_file_exists(FileDataEx &dt,char* session_dir,std::list* have_files,std::string* error = NULL) { struct stat st; const char *str = dt.lfn.c_str(); if(strcmp(str,"*.*") == 0) return 0; /* do not wait for this file */ std::string fname=std::string(session_dir) + '/' + dt.pfn; /* check if file does exist at all */ // TODO: FileAccess? if(lstat(fname.c_str(),&st) != 0) return 2; dt.mtime = st.st_mtime; /* check for misconfiguration */ /* parse files information */ char *str_; unsigned long long int fsize; unsigned long long int fsum = (unsigned long long int)(-1); bool have_size = false; bool have_checksum = false; errno = 0; fsize = strtoull(str,&str_,10); if((*str_) == '.') { if(str_ != str) have_size=true; str=str_+1; fsum = strtoull(str,&str_,10); if((*str_) != 0) { logger.msg(Arc::ERROR, "Invalid checksum in %s for %s", dt.lfn, dt.pfn); if(error) (*error)="Bad information about file: checksum can't be parsed."; return 1; }; have_checksum=true; } else { if(str_ != str) have_size=true; if((*str_) != 0) { logger.msg(Arc::ERROR, "Invalid file size in %s for %s ", dt.lfn, dt.pfn); if(error) (*error)="Bad information about file: size can't be parsed."; return 1; }; }; if(S_ISDIR(st.st_mode)) { if(have_size || have_checksum) { if(error) (*error)="Expected file. Directory found."; return 1; }; return 0; }; if(!S_ISREG(st.st_mode)) { if(error) (*error)="Expected ordinary file. Special object found."; return 1; }; /* now check if proper size */ if(have_size) { if(st.st_size < fsize) return 2; if(st.st_size > fsize) { logger.msg(Arc::ERROR, "Invalid file: %s is too big.", dt.pfn); if(error) (*error)="Delivered file is bigger than specified."; return 1; /* too big file */ }; }; if(have_files) { std::list::iterator f = have_files->begin(); for(;f!=have_files->end();++f) { if(dt.pfn == *f) break; }; if(f == have_files->end()) return 2; } else if(have_checksum) { int h=::open(fname.c_str(),O_RDONLY); if(h==-1) { /* if we can't read that file job won't too */ logger.msg(Arc::ERROR, "Error accessing file %s", dt.pfn); if(error) (*error)="Delivered file is unreadable."; return 1; }; Arc::CRC32Sum crc; char buffer[1024]; ssize_t l; for(;;) { if((l=read(h,buffer,1024)) == -1) { logger.msg(Arc::ERROR, "Error reading file %s", dt.pfn); if(error) (*error)="Could not read file to compute checksum."; return 1; }; if(l==0) break; crc.add(buffer,l); }; close(h); crc.end(); if(fsum != crc.crc()) { if(have_size) { /* size was checked - it is an error to have wrong crc */ logger.msg(Arc::ERROR, "File %s has wrong CRC.", dt.pfn); if(error) (*error)="Delivered file has wrong checksum."; return 1; }; return 2; /* just not uploaded yet */ }; }; return 0; /* all checks passed - file is ok */ } class PointPair { public: Arc::DataHandle source; Arc::DataHandle destination; PointPair(const std::string& source_str, const std::string& destination_str, const Arc::UserConfig& usercfg) : source(source_str, usercfg), destination(destination_str, usercfg) {}; ~PointPair(void) {}; static void callback(Arc::DataMover*,Arc::DataStatus res,void* arg) { FileDataEx::iterator &it = *((FileDataEx::iterator*)arg); pair_condition.lock(); if(!res.Passed()) { it->res=res; logger.msg(Arc::ERROR, "Failed downloading file %s - %s", it->lfn, std::string(res)); if((it->pair->source->GetTries() <= 0) || (it->pair->destination->GetTries() <= 0)) { delete it->pair; it->pair=NULL; failed_files.push_back(*it); } else { job_files.push_back(*it); logger.msg(Arc::ERROR, "Retrying"); }; } else { logger.msg(Arc::INFO, "Downloaded file %s", it->lfn); delete it->pair; it->pair=NULL; it->endtime=Arc::Time().str(Arc::UTCTime); if (res == Arc::DataStatus::SuccessCached) it->fromcache="yes"; else it->fromcache="no"; processed_files.push_back(*it); }; job_files.erase(it); --pairs_initiated; pair_condition.signal_nonblock(); pair_condition.unlock(); delete ⁢ }; }; int main(int argc,char** argv) { Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::VERBOSE); int res=0; bool not_uploaded; time_t files_changed = 0; bool first_loop = true; int n_threads = 1; int n_files = MAX_DOWNLOADS; // Final owner of downloaded files. Modified by -U or -u options Arc::User user; bool use_conf_cache=false; unsigned long long int min_speed = 0; time_t min_speed_time = 300; unsigned long long int min_average_speed = 0; time_t max_inactivity_time = 300; bool secure = true; bool userfiles_only = false; bool passive = false; std::string preferred_pattern(""); std::string failure_reason(""); std::string x509_proxy, x509_cert, x509_key, x509_cadir; srand(time(NULL) + getpid()); GMConfig config; // process optional arguments for(;;) { opterr=0; int optc=getopt(argc,argv,"+hclpfC:n:t:n:u:U:s:S:a:i:d:r:"); if(optc == -1) break; switch(optc) { case 'h': { std::cerr<<"Usage: downloader [-hclpf] [-C conf_file] [-n files] [-t threads] [-U uid]"< 10) { logger.msg(Arc::WARNING, "Won't use more than 10 threads"); n_threads=10; }; /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Add this to DataMove !!!!!!!!!!!! */ UrlMapConfig url_map(config); logger.msg(Arc::INFO, "Downloader started"); Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); usercfg.UtilsDirPath(control_dir); usercfg.SetUser(user); usercfg.CACertificatesDirectory(x509_cadir); Arc::DataMover mover; mover.retry(true); mover.secure(secure); mover.passive(passive); mover.verbose(true); // statistics will be shown if logging is higher than VERBOSE mover.set_preferred_pattern(preferred_pattern); if(min_speed != 0) mover.set_default_min_speed(min_speed,min_speed_time); if(min_average_speed != 0) mover.set_default_min_average_speed(min_average_speed); if(max_inactivity_time != 0) mover.set_default_max_inactivity_time(max_inactivity_time); bool transferred = true; bool credentials_expired = false; std::list output_files; if(!job_input_read_file(job.get_id(),config,job_files_)) { failure_reason+="Internal error in downloader\n"; logger.msg(Arc::ERROR, "Can't read list of input files"); res=1; goto exit; }; // check for duplicates (see bug 1285) for (std::list::iterator i = job_files_.begin(); i != job_files_.end(); i++) { for (std::list::iterator j = job_files_.begin(); j != job_files_.end(); j++) { if (i != j && j->pfn == i->pfn) { failure_reason+="Duplicate input files\n"; logger.msg(Arc::ERROR, "Error: duplicate file in list of input files: %s", i->pfn); res=1; goto exit; } } } // check if any input files are also output files (bug 1387 and 2793) if(job_output_read_file(job.get_id(),config,output_files)) { for (std::list::iterator j = output_files.begin(); j != output_files.end(); j++) { for (std::list::iterator i = job_files_.begin(); i != job_files_.end(); i++) { if (i->pfn == j->pfn && i->lfn.find(':') != std::string::npos) { Arc::URL u(i->lfn); std::string opt = u.Option("cache"); // don't add copy option if exists or current option is "no" or "renew" if (opt.empty() || !(opt == "no" || opt == "renew" || opt == "copy")) { u.AddOption("cache", "copy", true); i->lfn = u.fullstr(); } } } } } else logger.msg(Arc::WARNING, "Can't read list of output files"); // remove bad files if(clean_files(job_files_,session_dir) != 0) { failure_reason+="Internal error in downloader\n"; logger.msg(Arc::ERROR, "Can't remove junk files"); res=1; goto exit; }; for(std::list::iterator i = job_files_.begin();i!=job_files_.end();++i) { job_files.push_back(*i); }; if(!job.GetLocalDescription(config)) { failure_reason+="Internal error in downloader\n"; logger.msg(Arc::ERROR, "Can't read job local description"); res=1; goto exit; }; // initialize structures to handle download /* TODO: add threads=# to all urls if n_threads!=1 */ // Main download cycle if(!userfiles_only) for(;;) { // Initiate transfers int n = 0; SimpleConditionLock local_lock(pair_condition); for(FileDataEx::iterator i=job_files.begin();i!=job_files.end();++i) { if(i->lfn.find(":") != std::string::npos) { /* is it lfn ? */ ++n; if(n <= pairs_initiated) continue; // skip files being processed if(n > n_files) break; // quit if not allowed to process more /* have place and file to download */ std::string destination=std::string("file://") + session_dir + i->pfn; std::string source = i->lfn; if(i->pair == NULL) { /* define place to store */ if(strncasecmp(source.c_str(),"file:/",6) == 0) { failure_reason+=std::string("User requested local input file ")+source.c_str()+"\n"; logger.msg(Arc::ERROR, "Local source for download: %s", source); res=1; goto exit; }; if(i->cred.empty()) { usercfg.ProxyPath(x509_proxy); usercfg.CertificatePath(x509_cert); usercfg.KeyPath(x509_key); } else { usercfg.ProxyPath(i->cred); usercfg.CertificatePath(""); usercfg.KeyPath(""); }; PointPair* pair = new PointPair(source,destination,usercfg); if(!(pair->source)) { failure_reason+=std::string("Can't accept URL ")+source.c_str()+"\n"; logger.msg(Arc::ERROR, "Can't accept URL: %s", source); delete pair; res=1; goto exit; }; if(!(pair->destination)) { failure_reason+=std::string("Can't accept URL ")+destination.c_str()+"\n"; logger.msg(Arc::ERROR, "Can't accept URL: %s", destination); delete pair; res=1; goto exit; }; i->pair=pair; }; FileDataEx::iterator* it = new FileDataEx::iterator(i); std::string prefix = i->pfn; if (prefix.find('/') != std::string::npos) prefix.erase(0, prefix.find('/')+1); i->starttime=Arc::Time().str(Arc::UTCTime); Arc::DataStatus dres = mover.Transfer(*(i->pair->source), *(i->pair->destination), *cache, url_map, min_speed, min_speed_time, min_average_speed, max_inactivity_time, &PointPair::callback, it, prefix.c_str()); if (!dres.Passed()) { failure_reason+=std::string("Failed to initiate file transfer: ")+source.c_str()+" - "+std::string(dres)+"\n"; logger.msg(Arc::ERROR, "Failed to initiate file transfer: %s - %s", source, std::string(dres)); delete it; res=1; goto exit; }; ++pairs_initiated; }; }; if(pairs_initiated <= 0) break; // Looks like no more files to process // Processing initiated - now wait for event pair_condition.wait_nonblock(); }; { std::list transfer_stats; transfer_stats.clear(); // paranoid initialization std::string transfer_parameters; // Print download summary and transfer accounting information for(FileDataEx::iterator i=processed_files.begin();i!=processed_files.end();++i) { logger.msg(Arc::INFO, "Downloaded %s", i->lfn); struct stat st; Arc::FileStat(job.SessionDir() + i->pfn, &st, true); transfer_parameters = "inputfile:"; transfer_parameters += "url=" + i->lfn + ','; transfer_parameters += "size=" + Arc::tostring(st.st_size) + ','; transfer_parameters += "starttime=" + i->starttime + ','; transfer_parameters += "endtime=" + i->endtime + ','; transfer_parameters += "fromcache=" + i->fromcache; transfer_stats.push_back(transfer_parameters); if(Arc::URL(i->lfn).Option("exec") == "yes") { fix_file_permissions(session_dir+i->pfn,true); }; }; std::string fname = config.ControlDir() + "/job." + job.get_id() + ".statistics"; std::ofstream f(fname.c_str(),std::ios::out | std::ios::app); if(f.is_open() ) { for (std::list::iterator it=transfer_stats.begin(); it != transfer_stats.end(); ++it) f << *it << std::endl; f.close(); } } for(FileDataEx::iterator i=failed_files.begin();i!=failed_files.end();++i) { if (i->res.Retryable() || i->res == Arc::DataStatus::TransferError) { logger.msg(Arc::ERROR, "Failed to download (but may be retried) %s",i->lfn); job_files.push_back(*i); res = 4; continue; } logger.msg(Arc::ERROR, "Failed to download %s", i->lfn); failure_reason+="Input file: "+i->lfn+" - "+(std::string)(i->res)+"\n"; if(i->res == Arc::DataStatus::CredentialsExpiredError) credentials_expired=true; transferred=false; }; files_changed = ::time(NULL); // Check if all files have been properly downloaded if(!transferred) { logger.msg(Arc::INFO, "Some downloads failed"); res=2; if(credentials_expired) res=3; goto exit; }; if(res == 4) logger.msg(Arc::INFO, "Some downloads failed, but may be retried"); job_files_.clear(); for(FileDataEx::iterator i = job_files.begin();i!=job_files.end();++i) job_files_.push_back(*i); if(!job_input_write_file(job,config,job_files_)) { logger.msg(Arc::WARNING, "Failed writing changed input file"); }; // check for user uploadable files // run cycle waiting for uploaded files for(;;) { not_uploaded=false; std::list uploaded_files; std::list* uploaded_files_ = NULL; if(job_input_status_read_file(job.get_id(),config,uploaded_files)) uploaded_files_=&uploaded_files; for(FileDataEx::iterator i=job_files.begin();i!=job_files.end();) { if(i->lfn.find(":") == std::string::npos) { /* is it lfn ? */ /* process user uploadable file */ if(first_loop) logger.msg(Arc::INFO, "Checking user uploadable file: %s", i->pfn); std::string error; time_t mtime = i->mtime; int err=user_file_exists(*i,session_dir,uploaded_files_,&error); if(mtime != i->mtime) files_changed = ::time(NULL); if(err == 0) { /* file is uploaded */ logger.msg(Arc::INFO, "User has uploaded file %s", i->pfn); i=job_files.erase(i); job_files_.clear(); for(FileDataEx::iterator i = job_files.begin();i!=job_files.end();++i) job_files_.push_back(*i); if(!job_input_write_file(job,config,job_files_)) { logger.msg(Arc::WARNING, "Failed writing changed input file."); }; } else if(err == 1) { /* critical failure */ logger.msg(Arc::ERROR, "Critical error for uploadable file %s", i->pfn); failure_reason+="User file: "+i->pfn+" - "+error+"\n"; res=1; goto exit; } else { not_uploaded=true; ++i; }; } else { ++i; }; }; first_loop = false; if(!not_uploaded) break; // check for timeout unsigned int time_passed = (unsigned int)(time(NULL) - files_changed); if(time_passed > max_inactivity_time) { logger.msg(Arc::INFO, "No changes in uploadable files for %u seconds",time_passed); logger.msg(Arc::ERROR, "Uploadable files timed out"); res=2; break; }; if(uploaded_files_) { sleep(CHECK_PERIOD_FAST); } else { sleep(CHECK_PERIOD); }; }; job_files_.clear(); for(FileDataEx::iterator i = job_files.begin();i!=job_files.end();++i) job_files_.push_back(*i); if(!job_input_write_file(job,config,job_files_)) { logger.msg(Arc::WARNING, "Failed writing changed input file."); }; // Job migration functionality if (res == 0) { if(job.GetLocalDescription()->migrateactivityid != "") { // Complete the migration. const size_t found = job.GetLocalDescription()->migrateactivityid.rfind("/"); if (found != std::string::npos) { Arc::Job arc_job; arc_job.JobID = job.GetLocalDescription()->migrateactivityid; arc_job.JobManagementURL = Arc::URL(job.GetLocalDescription()->migrateactivityid.substr(0, found)); std::list jobs(1, &arc_job); Arc::UserConfig usercfg(arc_job.JobManagementURL.Protocol() == "https" ? Arc::initializeCredentialsType() : Arc::initializeCredentialsType(Arc::initializeCredentialsType::SkipCredentials)); if (arc_job.JobManagementURL.Protocol() != "https" || (arc_job.JobManagementURL.Protocol() == "https" && usercfg.CredentialsFound())) { Arc::JobControllerPluginLoader loader; Arc::JobControllerPlugin *jobctrl = loader.load("ARC1", usercfg); if (jobctrl) { jobctrl->UpdateJobs(jobs); if ((arc_job.State != Arc::JobState::QUEUING || !jobctrl->CancelJobs(jobs)) && !job.GetLocalDescription()->forcemigration) { res = 1; failure_reason = "FATAL ERROR: Migration failed attempting to kill old job \"" + job.GetLocalDescription()->migrateactivityid + "\"."; } } else { res = 1; failure_reason = "FATAL ERROR: Migration failed, could not locate ARC1 JobControllerPlugin plugin. Maybe it is not installed?"; } } else { res = 1; failure_reason = "FATAL ERROR: Migration failed, unable to find credentials."; } } } } exit: // clean unfinished files here job_files_.clear(); for(FileDataEx::iterator i = job_files.begin();i!=job_files.end();++i) job_files_.push_back(*i); clean_files(job_files_,session_dir); // release cache just in case if(res != 0 && res != 4) { cache->Release(); }; delete cache; if(res != 0 && res != 4) { job_failed_mark_add(job,config,failure_reason); }; logger.msg(Arc::INFO, "Leaving downloader (%i)", res); return res; } nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/gm-jobs.8.in0000644000000000000000000000012712377640012025221 xustar000000000000000027 mtime=1409237002.720427 30 atime=1513200649.873623636 30 ctime=1513200662.788781594 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/gm-jobs.8.in0000644000175000002070000000405012377640012025262 0ustar00mockbuildmock00000000000000.TH gm-jobs 8 "2013-01-30" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME gm-jobs \- displays information and manages current jobs handled by ARC middleware .SH DESCRIPTION .B gm-jobs displays information related to jobs handled by locally running ARC middleware service A-REX. Different kind of information may be selected by using various options. This utility also can perform simple management operations - currently cancel processing of specific jobs and remove them. Default behavior is to print minimal information about all jobs currently handled by A-REX and some statistics. .SH SYNOPSIS gm-jobs [OPTION...] .SH OPTIONS .IP "\fB-h, --help\fR" Show help for available options .IP "\fB-l, --longlist\fR" display more information about each job .IP "\fB-c, --conffile=file\fR" use specified configuration file .IP "\fB-d, --controldir=dir\fR" read information from specified control directory .IP "\fB-s, --showshares\fR" print summary of jobs in each transfer share. Shows for input (preparing) and output (finishing) files the number of files being copied and the number queued per transfer share .IP "\fB-J, --notshowjobs\fR" do not print list of jobs (printed by default) .IP "\fB-S, --notshowstates\fR" do not print number of jobs in each state (printed by default) .IP "\fB-w, --showservice\fR" print state of the service .IP "\fB-f, --filteruser=dn\fR" show only jobs of user(s) with specified subject name(s) .IP "\fB-k, --killjob=id\fR" request to cancel job(s) with specified ID(s) .IP "\fB-K, --killuser=dn\fR" request to cancel jobs belonging to user(s) with specified subject name(s) .IP "\fB-r, --remjob=id\fR" request to clean job(s) with specified ID(s) .IP "\fB-R, --remuser=dn\fR" request to clean jobs belonging to user(s) with specified subject name(s) .IP "\fB-j, --filterjob=id\fR" show only jobs with specified ID(s) .IP "\fB-e, --listdelegs\fR" print list of available delegation IDs .IP "\fB-E, --showdeleg=id\fR" print delegation token of specified ID(s) .SH AUTHOR Aleksandr Konstantinov nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/jobplugin0000644000000000000000000000013213214316027025072 xustar000000000000000030 mtime=1513200663.067785006 30 atime=1513200668.718854121 30 ctime=1513200663.067785006 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/0000755000175000002070000000000013214316027025215 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712715412562027215 xustar000000000000000027 mtime=1463162226.429695 30 atime=1513200602.090039224 30 ctime=1513200663.062784945 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/Makefile.am0000644000175000002070000000221412715412562027256 0ustar00mockbuildmock00000000000000GLOBUS_JOBPLUGIN_LIBS = $(GLOBUS_RSL_LIBS) $(GLOBUS_IO_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_GSI_CREDENTIAL_LIBS) \ $(GLOBUS_GSI_CERT_UTILS_LIBS) $(GLOBUS_COMMON_LIBS) pkglib_LTLIBRARIES = jobplugin.la jobplugin_la_SOURCES = jobplugin.cpp jobplugin_acl.cpp jobplugin.h init.cpp jobplugin_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(DBCXX_CPPFLAGS) \ $(GLOBUS_FTP_CLIENT_CFLAGS) $(AM_CXXFLAGS) jobplugin_la_LDFLAGS = -no-undefined -avoid-version -module jobplugin_la_LIBADD = \ ../../../gridftpd/fileplugin/fileplugin_la-fileplugin.lo \ ../../../gridftpd/libgridftpd_la-userspec.lo \ ../../../gridftpd/libgridftpd_la-names.lo \ ../../../gridftpd/libgridftpd_la-misc.lo \ ../../../gridftpd/auth/libmap.la \ ../conf/libconf.la ../misc/libmisc.la \ ../jobs/libjobs.la ../mail/libmail.la \ ../log/liblog.la ../files/libfiles.la ../run/librun.la \ ../../delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(DBCXX_LIBS) $(SQLITE_LIBS) $(GLOBUS_JOBPLUGIN_LIBS) -lpthread nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732027216 xustar000000000000000030 mtime=1513200602.144039885 30 atime=1513200649.983624982 30 ctime=1513200663.063784957 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/Makefile.in0000644000175000002070000007046213214315732027275 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/jobplugin DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) jobplugin_la_DEPENDENCIES = \ ../../../gridftpd/fileplugin/fileplugin_la-fileplugin.lo \ ../../../gridftpd/libgridftpd_la-userspec.lo \ ../../../gridftpd/libgridftpd_la-names.lo \ ../../../gridftpd/libgridftpd_la-misc.lo \ ../../../gridftpd/auth/libmap.la ../conf/libconf.la \ ../misc/libmisc.la ../jobs/libjobs.la ../mail/libmail.la \ ../log/liblog.la ../files/libfiles.la ../run/librun.la \ ../../delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_2) am_jobplugin_la_OBJECTS = jobplugin_la-jobplugin.lo \ jobplugin_la-jobplugin_acl.lo jobplugin_la-init.lo jobplugin_la_OBJECTS = $(am_jobplugin_la_OBJECTS) jobplugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(jobplugin_la_CXXFLAGS) \ $(CXXFLAGS) $(jobplugin_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(jobplugin_la_SOURCES) DIST_SOURCES = $(jobplugin_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ GLOBUS_JOBPLUGIN_LIBS = $(GLOBUS_RSL_LIBS) $(GLOBUS_IO_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_GSI_CREDENTIAL_LIBS) \ $(GLOBUS_GSI_CERT_UTILS_LIBS) $(GLOBUS_COMMON_LIBS) pkglib_LTLIBRARIES = jobplugin.la jobplugin_la_SOURCES = jobplugin.cpp jobplugin_acl.cpp jobplugin.h init.cpp jobplugin_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(DBCXX_CPPFLAGS) \ $(GLOBUS_FTP_CLIENT_CFLAGS) $(AM_CXXFLAGS) jobplugin_la_LDFLAGS = -no-undefined -avoid-version -module jobplugin_la_LIBADD = \ ../../../gridftpd/fileplugin/fileplugin_la-fileplugin.lo \ ../../../gridftpd/libgridftpd_la-userspec.lo \ ../../../gridftpd/libgridftpd_la-names.lo \ ../../../gridftpd/libgridftpd_la-misc.lo \ ../../../gridftpd/auth/libmap.la \ ../conf/libconf.la ../misc/libmisc.la \ ../jobs/libjobs.la ../mail/libmail.la \ ../log/liblog.la ../files/libfiles.la ../run/librun.la \ ../../delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(DBCXX_LIBS) $(SQLITE_LIBS) $(GLOBUS_JOBPLUGIN_LIBS) -lpthread all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/jobplugin/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/jobplugin/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done jobplugin.la: $(jobplugin_la_OBJECTS) $(jobplugin_la_DEPENDENCIES) $(jobplugin_la_LINK) -rpath $(pkglibdir) $(jobplugin_la_OBJECTS) $(jobplugin_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobplugin_la-init.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobplugin_la-jobplugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobplugin_la-jobplugin_acl.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< jobplugin_la-jobplugin.lo: jobplugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobplugin_la_CXXFLAGS) $(CXXFLAGS) -MT jobplugin_la-jobplugin.lo -MD -MP -MF $(DEPDIR)/jobplugin_la-jobplugin.Tpo -c -o jobplugin_la-jobplugin.lo `test -f 'jobplugin.cpp' || echo '$(srcdir)/'`jobplugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jobplugin_la-jobplugin.Tpo $(DEPDIR)/jobplugin_la-jobplugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='jobplugin.cpp' object='jobplugin_la-jobplugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobplugin_la_CXXFLAGS) $(CXXFLAGS) -c -o jobplugin_la-jobplugin.lo `test -f 'jobplugin.cpp' || echo '$(srcdir)/'`jobplugin.cpp jobplugin_la-jobplugin_acl.lo: jobplugin_acl.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobplugin_la_CXXFLAGS) $(CXXFLAGS) -MT jobplugin_la-jobplugin_acl.lo -MD -MP -MF $(DEPDIR)/jobplugin_la-jobplugin_acl.Tpo -c -o jobplugin_la-jobplugin_acl.lo `test -f 'jobplugin_acl.cpp' || echo '$(srcdir)/'`jobplugin_acl.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jobplugin_la-jobplugin_acl.Tpo $(DEPDIR)/jobplugin_la-jobplugin_acl.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='jobplugin_acl.cpp' object='jobplugin_la-jobplugin_acl.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobplugin_la_CXXFLAGS) $(CXXFLAGS) -c -o jobplugin_la-jobplugin_acl.lo `test -f 'jobplugin_acl.cpp' || echo '$(srcdir)/'`jobplugin_acl.cpp jobplugin_la-init.lo: init.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobplugin_la_CXXFLAGS) $(CXXFLAGS) -MT jobplugin_la-init.lo -MD -MP -MF $(DEPDIR)/jobplugin_la-init.Tpo -c -o jobplugin_la-init.lo `test -f 'init.cpp' || echo '$(srcdir)/'`init.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jobplugin_la-init.Tpo $(DEPDIR)/jobplugin_la-init.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='init.cpp' object='jobplugin_la-init.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobplugin_la_CXXFLAGS) $(CXXFLAGS) -c -o jobplugin_la-init.lo `test -f 'init.cpp' || echo '$(srcdir)/'`init.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/PaxHeaders.7502/jobplugin.cpp0000644000000000000000000000012413107553503027650 xustar000000000000000027 mtime=1495193411.765536 27 atime=1513200576.236723 30 ctime=1513200663.064784969 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/jobplugin.cpp0000644000175000002070000016716513107553503027735 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #ifdef HAVE_SETFSUID #include #endif #ifdef HAVE_SSTREAM #include #else #include #endif #define GRIDFTP_PLUGIN #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../jobs/GMJob.h" #include "../jobs/CommFIFO.h" #include "../jobs/ContinuationPlugins.h" #include "../files/ControlFileContent.h" #include "../files/ControlFileHandling.h" #include "../jobs/JobDescriptionHandler.h" #include "../misc/proxy.h" #include "../run/RunParallel.h" #include "../../../gridftpd/userspec.h" #include "../../../gridftpd/names.h" #include "../../../gridftpd/misc.h" #include "../../../gridftpd/fileplugin/fileplugin.h" #include "jobplugin.h" using namespace ARex; static Arc::Logger logger(Arc::Logger::getRootLogger(),"JobPlugin"); typedef struct { const GMConfig* config; const Arc::User* user; const std::string* job; const char* reason; } job_subst_t; #ifdef HAVE_SETFSUID // Non-portable solution. Needed as long as Linux // does not support proper setuid in threads #define SET_USER_UID { ::setfsuid(user->get_uid()); ::setfsgid(user->get_gid()); } #define RESET_USER_UID { ::setfsuid(getuid()); ::setfsgid(getgid()); } #else // Not sure how this will affect other threads. Most probably // not in a best way. Anyway this option is not for linux. #define SET_USER_UID { setegid(user.get_gid()); seteuid(user.get_uid()); } #define RESET_USER_UID { seteuid(getuid()); setegid(getgid()); } #endif static void job_subst(std::string& str,void* arg) { job_subst_t* subs = (job_subst_t*)arg; if(subs->job) for(std::string::size_type p = 0;;) { p=str.find('%',p); if(p==std::string::npos) break; if(str[p+1]=='I') { str.replace(p,2,subs->job->c_str()); p+=subs->job->length(); } else if(str[p+1]=='S') { str.replace(p,2,"UNKNOWN"); // WRONG p+=7; } else if(str[p+1]=='O') { str.replace(p,2,subs->reason); p+=strlen(subs->reason); } else { p+=2; }; }; if(subs->user && subs->config) subs->config->Substitute(str, *(subs->user)); } // run external plugin to acquire non-unix local credentials // U - user, C - config, J - job, O - reason #define ApplyLocalCred(U,C,J,O) { \ if(cred_plugin && (*cred_plugin)) { \ job_subst_t subst_arg; \ subst_arg.user=&U; \ subst_arg.config=&C; \ subst_arg.job=J; \ subst_arg.reason=O; \ if(!cred_plugin->run(job_subst,&subst_arg)) { \ logger.msg(Arc::ERROR, "Failed to run plugin"); \ return 1; \ }; \ if(cred_plugin->result() != 0) { \ logger.msg(Arc::ERROR, "Plugin failed: %s", cred_plugin->result()); \ return 1; \ }; \ }; \ } JobPlugin::JobPlugin(std::istream &cfile,userspec_t &user_s,FileNode& node): cont_plugins(new ContinuationPlugins), cred_plugin(new RunPlugin), user_a(user_s.user), job_map(user_s.user), matched_vo(NULL), matched_voms(NULL) { // Because this plugin can load own plugins it's symbols need to // become available in order to avoid duplicate resolution. phandle = dlopen(node.get_plugin_path().c_str(),RTLD_NOW | RTLD_GLOBAL); initialized=true; rsl_opened=false; job_rsl_max_size = DEFAULT_JOB_RSL_MAX_SIZE; direct_fs=NULL; proxy_fname=""; proxy_is_deleg=false; std::string configfile = user_s.get_config_file(); readonly=false; chosenFilePlugin=NULL; matched_vo=user_a.default_group_vo(); matched_voms=user_a.default_group_voms(); srand(time(NULL) + rand()); for(;;) { std::string rest = Arc::ConfigFile::read_line(cfile); std::string command = Arc::ConfigIni::NextArg(rest); if(command.length() == 0) { break; } /* end of file - should not be here */ else if(command == "configfile") { configfile = Arc::ConfigIni::NextArg(rest); } else if(command == "allownew") { std::string value = Arc::ConfigIni::NextArg(rest); if(strcasecmp(value.c_str(),"no") == 0) { readonly=true; } else if(strcasecmp(value.c_str(),"yes") == 0) { readonly=false; }; } else if(command == "unixmap") { /* map to local unix user */ if(!job_map) job_map.mapname(rest.c_str()); } else if(command == "unixgroup") { /* map to local unix user */ if(!job_map) job_map.mapgroup(rest.c_str()); } else if(command == "unixvo") { /* map to local unix user */ if(!job_map) job_map.mapvo(rest.c_str()); } else if(command == "remotegmdirs") { std::string remotedir = Arc::ConfigIni::NextArg(rest); if(remotedir.length() == 0) { logger.msg(Arc::ERROR, "empty argument to remotegmdirs"); initialized=false; }; struct gm_dirs_ dirs; dirs.control_dir = remotedir; remotedir = Arc::ConfigIni::NextArg(rest); if(remotedir.length() == 0) { logger.msg(Arc::ERROR, "bad arguments to remotegmdirs"); initialized=false; }; dirs.session_dir = remotedir; gm_dirs_info.push_back(dirs); std::string drain = Arc::ConfigIni::NextArg(rest); if (drain.empty() || drain != "drain") gm_dirs_non_draining.push_back(dirs); } else if(command == "maxjobdesc") { if(rest.empty()) { job_rsl_max_size = 0; } else if(sscanf(rest.c_str(),"%u",&job_rsl_max_size) != 1) { logger.msg(Arc::ERROR, "Wrong number in maxjobdesc"); initialized=false; }; } else if(command == "endpoint") { endpoint = Arc::ConfigIni::NextArg(rest); } else if(command == "end") { break; /* end of section */ } else { logger.msg(Arc::WARNING, "Unsupported configuration command: %s", command); }; }; if(configfile.length()) config.SetConfigFile(configfile); config.SetCredPlugin(cred_plugin); config.SetContPlugins(cont_plugins); std::string uname = user_s.get_uname(); std::string ugroup = user_s.get_gname(); if((bool)job_map) { uname=job_map.unix_name(); ugroup=job_map.unix_group(); }; user = Arc::User(uname, ugroup); if (!user) { logger.msg(Arc::ERROR, "Mapped user:group (%s:%s) not found", uname, ugroup); initialized = false; } else if(!config.Load()) { // read configuration logger.msg(Arc::ERROR, "Failed processing grid-manager configuration"); initialized=false; } else if (gm_dirs_info.size() > 0 && config.SessionRoots().size() > 1) { logger.msg(Arc::ERROR, "Cannot use multiple session directories and remotegmdirs at the same time"); initialized=false; } else { avail_queues = config.Queues(); // set default queue if not given explicitly if(config.DefaultQueue().empty() && (avail_queues.size() == 1)) { config.SetDefaultQueue(*(avail_queues.begin())); } // do substitutions in session dirs based on mapped user session_dirs = config.SessionRoots(); for (std::vector::iterator session = session_dirs.begin(); session != session_dirs.end(); ++session) { config.Substitute(*session, user); } session_dirs_non_draining = config.SessionRootsNonDraining(); for (std::vector::iterator session = session_dirs_non_draining.begin(); session != session_dirs_non_draining.end(); ++session) { config.Substitute(*session, user); } for(std::string allowsubmit = config.AllowSubmit(); !allowsubmit.empty();) { std::string group = Arc::ConfigIni::NextArg(allowsubmit); if(user_a.check_group(group)) { readonly=false; break; }; }; if(readonly) logger.msg(Arc::WARNING, "This user is denied to submit new jobs."); if (!config.ControlDir().empty() && !session_dirs.empty()) { struct gm_dirs_ dirs; dirs.control_dir = config.ControlDir(); dirs.session_dir = session_dirs.front(); gm_dirs_info.push_back(dirs); if (!session_dirs_non_draining.empty()) { gm_dirs_non_draining.push_back(dirs); } } if (gm_dirs_info.empty()) { logger.msg(Arc::ERROR, "No control or remote control directories defined in configuration"); initialized = false; } /* link to the class for direct file access - creating one object per set of GM dirs */ // choose whether to use multiple session dirs or remote GM dirs if (session_dirs.size() > 1) { for (std::vector::iterator i = session_dirs.begin(); i != session_dirs.end(); i++) { std::string direct_config = ""; direct_config += "mount "+(*i)+"\n"; direct_config+="dir / nouser read cd dirlist delete append overwrite"; direct_config+=" create "+ Arc::tostring(user.get_uid())+":"+Arc::tostring(user.get_gid())+ " 600:600"; direct_config+=" mkdir "+ Arc::tostring(user.get_uid())+":"+Arc::tostring(user.get_gid())+ " 700:700\n"; direct_config+="end\n"; #ifdef HAVE_SSTREAM std::stringstream fake_cfile(direct_config); #else std::strstream fake_cfile; fake_cfile<::iterator i = gm_dirs_info.begin(); i != gm_dirs_info.end(); i++) { std::string direct_config = ""; direct_config += "mount "+(*i).session_dir+"\n"; direct_config+="dir / nouser read cd dirlist delete append overwrite"; direct_config+=" create "+ Arc::tostring(user.get_uid())+":"+Arc::tostring(user.get_gid())+ " 600:600"; direct_config+=" mkdir "+ Arc::tostring(user.get_uid())+":"+Arc::tostring(user.get_gid())+ " 700:700\n"; direct_config+="end\n"; #ifdef HAVE_SSTREAM std::stringstream fake_cfile(direct_config); #else std::strstream fake_cfile; fake_cfile<StrictSession()) { // Changing unix user. That means control directory must be // writable for every serviced user. user->SwitchUser(true); }; }; #endif } JobPlugin::~JobPlugin(void) { delete_job_id(); if(!proxy_fname.empty()) { remove(proxy_fname.c_str()); }; if(cont_plugins) delete cont_plugins; if(cred_plugin) delete cred_plugin; for (unsigned int i = 0; i < file_plugins.size(); i++) { if (file_plugins.at(i)) delete file_plugins.at(i); } if(phandle) dlclose(phandle); } std::string JobPlugin::get_error_description() const { if (!error_description.empty()) return error_description; if (!chosenFilePlugin) return std::string(""); return chosenFilePlugin->error_description; } int JobPlugin::makedir(std::string &dname) { if(!initialized) return 1; std::string id; bool spec_dir; if((dname == "new") || (dname == "info")) return 0; if(!is_allowed(dname.c_str(),IS_ALLOWED_WRITE,true,&spec_dir,&id)) return 1; if(spec_dir) { error_description="Can't create subdirectory in a special directory."; return 1; }; ApplyLocalCred(user,config,&id,"write"); DirectFilePlugin * fp = selectFilePlugin(id); int r; if((getuid()==0) && config.StrictSession()) { SET_USER_UID; r=fp->makedir(dname); RESET_USER_UID; } else { r=fp->makedir(dname); } if (r != 0) error_description = fp->get_error_description(); return r; } int JobPlugin::removefile(std::string &name) { if(!initialized) return 1; if(name.find('/') == std::string::npos) { /* request to cancel the job */ if((name == "new") || (name == "info")) { error_description="Special directory can't be mangled."; return 1; }; if(!is_allowed(name.c_str(),IS_ALLOWED_WRITE)) return 1; /* owner of the job */ JobId id(name); GMJob job(id, user); std::string controldir = getControlDir(id); if (controldir.empty()) { error_description="No control information found for this job."; return 1; } config.SetControlDir(controldir); logger.msg(Arc::INFO, "Cancelling job %s", id); if(job_cancel_mark_put(job,config)) return 0; }; const char* logname; std::string id; bool spec_dir; if(!is_allowed(name.c_str(),IS_ALLOWED_WRITE,false,&spec_dir,&id,&logname)) return 1; if(logname) { if((*logname) != 0) return 0; /* pretend status file is deleted */ }; if(spec_dir) { error_description="Special directory can't be mangled."; return 1; /* can delete status directory */ }; ApplyLocalCred(user,config,&id,"write"); DirectFilePlugin * fp = selectFilePlugin(id); int r; if((getuid()==0) && config.StrictSession()) { SET_USER_UID; r=fp->removefile(name); RESET_USER_UID; } else { r=fp->removefile(name); } if (r != 0) error_description = fp->get_error_description(); return r; } int JobPlugin::removedir(std::string &dname) { if(!initialized) return 1; if(dname.find('/') == std::string::npos) { /* request to clean the job */ if((dname == "new") || (dname == "info")) { error_description="Special directory can't be mangled."; return 1; }; if(!is_allowed(dname.c_str(), IS_ALLOWED_WRITE)) return 1; /* owner of the job */ /* check the status */ JobId id(dname); std::string controldir = getControlDir(id); if (controldir.empty()) { error_description="No control information found for this job."; return 1; } config.SetControlDir(controldir); std::string sessiondir = getSessionDir(id); if (sessiondir.empty()) { // session dir could have already been cleaned, so set to first in list sessiondir = config.SessionRoots().at(0); } config.SetSessionRoot(sessiondir); job_state_t status=job_state_read_file(id,config); logger.msg(Arc::INFO, "Cleaning job %s", id); /* put marks because cleaning job may also involve removing locks */ { GMJob job(id,user); bool res = job_cancel_mark_put(job,config); res &= job_clean_mark_put(job,config); if(res) return 0; }; error_description="Failed to clean job."; return 1; }; std::string id; bool spec_dir; if(!is_allowed(dname.c_str(),IS_ALLOWED_WRITE,false,&spec_dir,&id)) return 1; if(spec_dir) { error_description="Special directory can't be mangled."; return 1; }; ApplyLocalCred(user,config,&id,"write"); DirectFilePlugin * fp = selectFilePlugin(id); int r; if((getuid()==0) && config.StrictSession()) { SET_USER_UID; r=fp->removedir(dname); RESET_USER_UID; } else { r=fp->removedir(dname); } if (r != 0) error_description = fp->get_error_description(); return r; } int JobPlugin::open(const char* name,open_modes mode,unsigned long long int size) { if(!initialized) return 1; if(rsl_opened) { /* unfinished request - cancel */ logger.msg(Arc::ERROR, "Request to open file with storing in progress"); rsl_opened=false; delete_job_id(); error_description="Job submission is still in progress."; return 1; }; /* check if acl request */ if((strncmp(name,".gacl-",6) == 0) && (strchr(name,'/') == NULL)) { std::string newname(name+6); newname="info/"+newname+"/acl"; return open(newname.c_str(),mode,size); }; if( mode == GRIDFTP_OPEN_RETRIEVE ) { /* open for reading */ std::string fname; const char* logname; /* check if reading status files */ bool spec_dir; if(!is_allowed(name,IS_ALLOWED_READ,false,&spec_dir,&fname,&logname)) return 1; std::string controldir = getControlDir(fname); if (controldir.empty()) { error_description="No control information found for this job."; return 1; } config.SetControlDir(controldir); chosenFilePlugin = selectFilePlugin(fname); if(logname) { if((*logname) != 0) { if(strncmp(logname,"proxy",5) == 0) { error_description="Not allowed for this file."; chosenFilePlugin = NULL; return 1; }; fname=config.ControlDir()+"/job."+fname+"."+logname; logger.msg(Arc::INFO, "Retrieving file %s", fname); return chosenFilePlugin->open_direct(fname.c_str(),mode); }; }; if(spec_dir) { error_description="Special directory can't be mangled."; return 1; }; ApplyLocalCred(user,config,&fname,"read"); if((getuid()==0) && config.StrictSession()) { SET_USER_UID; int r=chosenFilePlugin->open(name,mode); RESET_USER_UID; return r; }; return chosenFilePlugin->open(name,mode); } else if( mode == GRIDFTP_OPEN_STORE ) { std::string name_f(name); std::string::size_type n = name_f.find('/'); if((n != std::string::npos) && (n != 0)) { if(((n==3) && (strncmp(name_f.c_str(),"new",n) == 0) ) || ((n==job_id.length()) && (strncmp(name_f.c_str(),job_id.c_str(),n) == 0) )) { if(name_f.find('/',n+1) != std::string::npos) { // should not contain subdirs error_description="Can't create subdirectory here."; return 1; }; // new job so select control and session dirs std::string controldir, sessiondir; if (!chooseControlAndSessionDir(job_id, controldir, sessiondir)) { error_description="No control and/or session directory available."; return 1; }; config.SetControlDir(controldir); config.SetSessionRoot(sessiondir); if(job_id.length() == 0) { //if(readonly) { // error_description="You are not allowed to submit new jobs to this service."; // logger.msg(Arc::ERROR, "%s", error_description); // return 1; //}; if(!make_job_id()) { error_description="Failed to allocate ID for job."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; }; logger.msg(Arc::INFO, "Accepting submission of new job or modification request: %s", job_id); rsl_opened=true; chosenFilePlugin = selectFilePlugin(job_id); return 0; }; }; std::string id; bool spec_dir; const char* logname; if(!is_allowed(name,IS_ALLOWED_WRITE,true,&spec_dir,&id,&logname)) return 1; std::string controldir = getControlDir(id); if (controldir.empty()) { std::string sessiondir; if (!chooseControlAndSessionDir(job_id, controldir, sessiondir)) { error_description="No control and/or session directory available."; return 1; } config.SetSessionRoot(sessiondir); } config.SetControlDir(controldir); chosenFilePlugin = selectFilePlugin(id); logger.msg(Arc::INFO, "Storing file %s", name); if(spec_dir) { // It is allowed to modify ACL if(logname) { if(strcmp(logname,"acl") == 0) { std::string fname=config.ControlDir()+"/job."+id+"."+logname; return chosenFilePlugin->open_direct(fname.c_str(),mode); }; }; error_description="Special directory can't be mangled."; chosenFilePlugin = NULL; return 1; }; ApplyLocalCred(user,config,&id,"write"); if((getuid()==0) && config.StrictSession()) { SET_USER_UID; int r=chosenFilePlugin->open(name,mode,size); RESET_USER_UID; return r; }; return chosenFilePlugin->open(name,mode,size); } logger.msg(Arc::ERROR, "Unknown open mode %i", mode); error_description="Unknown/unsupported request."; return 1; } int JobPlugin::close(bool eof) { if(!initialized || chosenFilePlugin == NULL) return 1; if(!rsl_opened) { // file transfer finished if((getuid()==0) && config.StrictSession()) { SET_USER_UID; int r=chosenFilePlugin->close(eof); RESET_USER_UID; return r; }; return chosenFilePlugin->close(eof); }; // Job description/action request transfer finished rsl_opened=false; if(job_id.length() == 0) { error_description="There is no job ID defined."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; if(!eof) { delete_job_id(); return 0; }; /* download was canceled */ /* ************************************************* * store RSL (description) * ************************************************* */ std::string rsl_fname=config.ControlDir()+"/job."+job_id+".description"; /* analyze rsl (checking, substituting, etc)*/ JobDescriptionHandler job_desc_handler(config); JobLocalDescription job_desc; // Initial parsing of job/action request JobReqResult parse_result = job_desc_handler.parse_job_req(job_id,job_desc,true); if (parse_result != JobReqSuccess) { error_description="Failed to parse job/action description."; logger.msg(Arc::ERROR, "%s: %s", error_description, parse_result.failure); delete_job_id(); return 1; }; if(job_desc.action.length() == 0) job_desc.action="request"; if(job_desc.action == "cancel") { // Request to cancel existing job delete_job_id(); if(job_desc.jobid.length() == 0) { error_description="Missing ID in request to cancel job."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; // fall back to RESTful interface return removefile(job_desc.jobid); }; if(job_desc.action == "clean") { // Request to remove existing job delete_job_id(); if(job_desc.jobid.length() == 0) { error_description="Missing ID in request to clean job."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; // fall back to RESTful interface return removedir(job_desc.jobid); }; if(job_desc.action == "renew") { // Request to renew delegated credentials delete_job_id(); if(job_desc.jobid.length() == 0) { error_description="Missing ID in request to renew credentials."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; // fall back to RESTful interface return checkdir(job_desc.jobid); }; if(job_desc.action == "restart") { // Request to restart failed job delete_job_id(); if(job_desc.jobid.length() == 0) { error_description="Missing ID in request to restart job."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; const char* logname; std::string id; if(!is_allowed(job_desc.jobid.c_str(),IS_ALLOWED_LIST,false,NULL,&id,&logname)) return 1; if(job_desc.jobid != id) { error_description="Wrong ID specified."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; JobLocalDescription job_desc; if(!job_local_read_file(id,config,job_desc)) { error_description="Job is probably corrupted: can't read internal information."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; if(job_desc.failedstate.empty()) { error_description="Job can't be restarted."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; if(job_desc.reruns <= 0) { error_description="Job run out number of allowed retries."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; if(!job_restart_mark_put(GMJob(id,user),config)) { error_description="Failed to report restart request."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; return 0; }; if(job_desc.action != "request") { delete_job_id(); error_description="Wrong action in job RSL description."; logger.msg(Arc::ERROR, "%s", error_description); logger.msg(Arc::ERROR, "action(%s) != request", job_desc.action); return 1; }; // Request for creating new job if(readonly) { delete_job_id(); error_description="You are not allowed to submit new jobs to this service."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; if((job_desc.jobid.length() != 0) && (job_desc.jobid != job_id)) { // Client may specify it's own job ID int h_old=::open(rsl_fname.c_str(),O_RDONLY); delete_job_id(); if(readonly) { ::close(h_old); remove(rsl_fname.c_str()); error_description="New jobs are not allowed."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; if(!make_job_id(job_desc.jobid)) { ::close(h_old); remove(rsl_fname.c_str()); error_description="Failed to allocate requested job ID: "+job_desc.jobid; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; rsl_fname=config.ControlDir()+"/job."+job_id+".description"; { int l = -1; if(h_old != -1) { int h=::open(rsl_fname.c_str(),O_WRONLY,0600); if(h != -1) { for(;;) { char buf[256]; l=::read(h_old,buf,sizeof(buf)); if(l <= 0) break; const char* s = buf; for(;l;) { ssize_t ll=::write(h,s,l); if(ll <= 0) { l=-1; break; }; l-=ll; s+=ll; }; if(l < 0) break; }; ::close(h); }; ::close(h_old); }; if(l == -1) { logger.msg(Arc::ERROR, "Failed writing job description"); remove(rsl_fname.c_str()); error_description="Failed to store job description."; delete_job_id(); return 1; }; }; }; // Check for proper LRMS name in request. If there is no LRMS name // in user configuration that means service is opaque frontend and // accepts any LRMS in request. if((!job_desc.lrms.empty()) && (!config.DefaultLRMS().empty())) { if(job_desc.lrms != config.DefaultLRMS()) { error_description="Request for LRMS "+job_desc.lrms+" is not allowed."; logger.msg(Arc::ERROR, "%s", error_description); delete_job_id(); return 1; }; }; if(job_desc.lrms.empty()) job_desc.lrms=config.DefaultLRMS(); // Check for proper queue in request. if(job_desc.queue.empty()) job_desc.queue=config.DefaultQueue(); if(job_desc.queue.empty()) { error_description="Request has no queue defined."; logger.msg(Arc::ERROR, "%s", error_description); delete_job_id(); return 1; }; if(!avail_queues.empty()) { // If no queues configured - service takes any for(std::list::iterator q = avail_queues.begin();;++q) { if(q == avail_queues.end()) { error_description="Requested queue "+job_desc.queue+" does not match any of available queues."; logger.msg(Arc::ERROR, "%s", error_description); delete_job_id(); return 1; }; if(*q == job_desc.queue) break; }; }; /* *********************************************** * Collect delegation identifiers * *********************************************** */ std::list deleg_ids; for(std::list::iterator f = job_desc.inputdata.begin(); f != job_desc.inputdata.end();++f) { if(!f->cred.empty()) deleg_ids.push_back(f->cred); }; for(std::list::iterator f = job_desc.outputdata.begin(); f != job_desc.outputdata.end();++f) { if(!f->cred.empty()) deleg_ids.push_back(f->cred); }; /* **************************************** * Preprocess job request * **************************************** */ std::string session_dir(config.SessionRoot(job_id) + '/' + job_id); GMJob job(job_id,user,session_dir,JOB_STATE_ACCEPTED); if(!job_desc_handler.process_job_req(job, job_desc)) { error_description="Failed to preprocess job description."; logger.msg(Arc::ERROR, "%s", error_description); delete_job_id(); return 1; }; // Also pick up global delegation id if any if(!job_desc.delegationid.empty()) { deleg_ids.push_back(job_desc.delegationid); }; /* **************************************** * Start local file * **************************************** */ /* !!!!! some parameters are unchecked here - rerun,diskspace !!!!! */ job_desc.jobid=job_id; job_desc.starttime=time(NULL); job_desc.DN=subject; job_desc.sessiondir=config.SessionRoot(job_id)+'/'+job_id; if(port != 0) { job_desc.clientname= Arc::tostring(host[0])+"."+Arc::tostring(host[1])+"."+ Arc::tostring(host[2])+"."+Arc::tostring(host[3])+":"+ Arc::tostring(port); }; std::string globalid = endpoint; if(!globalid.empty()) { if(globalid[globalid.length()-1] != '/') globalid+="/"; globalid+=job_id; job_desc.globalid=globalid; }; job_desc.headnode=endpoint; job_desc.interface="org.nordugrid.gridftpjob"; if(matched_vo != NULL) { job_desc.localvo.push_back(matched_vo); }; if(matched_voms != NULL) { for(std::vector::const_iterator f = matched_voms->fqans.begin(); f != matched_voms->fqans.end(); ++f) { std::string fqan; f->str(fqan); job_desc.voms.push_back(fqan); }; }; // If no authorized VOMS was identified just report those from credentials if(job_desc.voms.empty()) { const std::vector& all_voms = user_a.voms(); for(std::vector::const_iterator v = all_voms.begin(); v != all_voms.end(); ++v) { for(std::vector::const_iterator f = v->fqans.begin(); f != v->fqans.end(); ++f) { std::string fqan; f->str(fqan); job_desc.voms.push_back(fqan); }; }; }; // If still no VOMS information is available take forced one from configuration if(job_desc.voms.empty()) { std::string forced_voms = config.ForcedVOMS(job_desc.queue.c_str()); if(forced_voms.empty()) forced_voms = config.ForcedVOMS(); if(!forced_voms.empty()) job_desc.voms.push_back(forced_voms); }; /* *********************************************** * Try to create delegation and proxy file * *********************************************** */ if(!proxy_fname.empty()) { Arc::Credential cred(proxy_fname, proxy_fname, config.CertDir(), ""); using namespace ArcCredential; // needed for macro expansion if (!CERT_IS_RFC_PROXY(cred.GetType())) { error_description="Non-RFC proxies are not supported."; logger.msg(Arc::ERROR, "%s", error_description); delete_job_id(); return 1; } std::string proxy_data; (void)Arc::FileRead(proxy_fname, proxy_data); if(!proxy_data.empty()) { if(proxy_is_deleg && job_desc.delegationid.empty()) { // If we have gridftp delegation and no other generic delegation provided - store it ARex::DelegationStore deleg(config.DelegationDir(), deleg_db_type, false); std::string deleg_id; if(!deleg.AddCred(deleg_id, subject, proxy_data)) { error_description="Failed to store delegation."; logger.msg(Arc::ERROR, "%s", error_description); delete_job_id(); return 1; }; job_desc.delegationid = deleg_id; deleg_ids.push_back(deleg_id); // one more delegation id }; #if 0 // And store public credentials into proxy file try { Arc::Credential ci(proxy_fname, proxy_fname, config.CertDir(), ""); job_desc.expiretime = ci.GetEndTime(); std::string user_cert; ci.OutputCertificate(user_cert); ci.OutputCertificateChain(user_cert); if(!job_proxy_write_file(job,config,user_cert)) { error_description="Failed to store user credentials."; logger.msg(Arc::ERROR, "%s", error_description); delete_job_id(); return 1; }; } catch (std::exception&) { job_desc.expiretime = time(NULL); }; #else // For backward compatibility during conversion time // store full proxy into proxy file if(!job_proxy_write_file(job,config,proxy_data)) { error_description="Failed to store user credentials."; logger.msg(Arc::ERROR, "%s", error_description); delete_job_id(); return 1; }; job_desc.expiretime = time(NULL); if(proxy_is_deleg) { try { Arc::Credential ci(proxy_data, "", config.CertDir(), "", "", false); job_desc.expiretime = ci.GetEndTime(); } catch (std::exception&) { }; }; #endif }; } /* ****************************************** * Write local file * ****************************************** */ if(!job_local_write_file(job,config,job_desc)) { logger.msg(Arc::ERROR, "Failed writing local description"); delete_job_id(); error_description="Failed to create job description."; return 1; }; /* ****************************************** * Write access policy * ****************************************** */ if(!parse_result.acl.empty()) { if(!job_acl_write_file(job_id,config,parse_result.acl)) { logger.msg(Arc::ERROR, "Failed writing ACL"); delete_job_id(); error_description="Failed to process/store job ACL."; return 1; }; }; /* *********************************************** * Call authentication/authorization plugin/exec * *********************************************** */ /* talk to external plugin to ask if we can proceed */ std::list results; if(cont_plugins) cont_plugins->run(job,config,results); // analyze results std::list::iterator result = results.begin(); for(;result != results.end();++result) { if(result->action == ContinuationPlugins::act_fail) { logger.msg(Arc::ERROR, "Failed to run external plugin: %s", result->response); delete_job_id(); error_description="Job is not allowed by external plugin: "+ result->response; return 1; } else if(result->action == ContinuationPlugins::act_log) { // Scream but go ahead logger.msg(Arc::ERROR, "Failed to run external plugin: %s", result->response); } else if(result->action == ContinuationPlugins::act_pass) { // Just continue if(result->response.length()) logger.msg(Arc::INFO, "Plugin response: %s", result->response); } else { logger.msg(Arc::ERROR, "Failed to run external plugin"); delete_job_id(); error_description="Failed to pass external plugin."; return 1; }; }; /* ************************************************************ * From here code accesses filesystem on behalf of local user * ************************************************************ */ if(cred_plugin && (*cred_plugin)) { job_subst_t subst_arg; subst_arg.user=&user; subst_arg.job=&job_id; subst_arg.reason="new"; // run external plugin to acquire non-unix local credentials if(!cred_plugin->run(job_subst,&subst_arg)) { logger.msg(Arc::ERROR, "Failed to run plugin"); delete_job_id(); error_description="Failed to obtain external credentials."; return 1; }; if(cred_plugin->result() != 0) { logger.msg(Arc::ERROR, "Plugin failed: %s", cred_plugin->result()); delete_job_id(); error_description="Failed to obtain external credentials."; return 1; }; }; /* ******************************************* * Create session directory * ******************************************* */ if (!config.CreateSessionDirectory(job.SessionDir(), job.get_user())) { logger.msg(Arc::ERROR, "Failed to create session directory %s", job.SessionDir()); delete_job_id(); error_description="Failed to create session directory."; return 1; } /* ********************************************************** * Create status file (do it last so GM picks job up here) * ********************************************************** */ if(!job_state_write_file(job,config,JOB_STATE_ACCEPTED)) { logger.msg(Arc::ERROR, "Failed writing status"); delete_job_id(); error_description="Failed registering job in grid-manager."; return 1; }; // Put lock on delegated credentials // Can do that after creating status file because delegations are // fresh and hence won't be deleted while locking. if(!deleg_ids.empty()) { deleg_ids.sort(); deleg_ids.unique(); ARex::DelegationStore store(config.DelegationDir(),deleg_db_type,false); if(!store.LockCred(job_id,deleg_ids,subject)) { logger.msg(Arc::ERROR, "Failed to lock delegated credentials: %s", store.GetFailure()); delete_job_id(); error_description="Failed to lock delegated credentials."; return 1; }; } SignalFIFO(config.ControlDir()); job_id.resize(0); chosenFilePlugin = NULL; return 0; } int JobPlugin::read(unsigned char *buf,unsigned long long int offset,unsigned long long int *size) { if(!initialized || chosenFilePlugin == NULL) { error_description="Transfer is not initialised."; return 1; }; error_description="Failed to read from disc."; if((getuid()==0) && config.StrictSession()) { SET_USER_UID; int r=chosenFilePlugin->read(buf,offset,size); RESET_USER_UID; return r; }; return chosenFilePlugin->read(buf,offset,size); } int JobPlugin::write(unsigned char *buf,unsigned long long int offset,unsigned long long int size) { if(!initialized || chosenFilePlugin == NULL) { error_description="Transfer is not initialised."; return 1; }; error_description="Failed to write to disc."; if(!rsl_opened) { if((getuid()==0) && config.StrictSession()) { SET_USER_UID; int r=chosenFilePlugin->write(buf,offset,size); RESET_USER_UID; return r; }; return chosenFilePlugin->write(buf,offset,size); }; /* write to rsl */ if(job_id.length() == 0) { error_description="No job ID defined."; return 1; }; if((job_rsl_max_size > 0) && ((offset+size) >= job_rsl_max_size)) { error_description="Job description is too big."; return 1; }; std::string rsl_fname=config.ControlDir()+"/job."+job_id+".description"; int h=::open(rsl_fname.c_str(),O_WRONLY|O_CREAT,0600); if(h == -1) { error_description="Failed to open job description file " + rsl_fname; return 1; }; if(::lseek(h,offset,SEEK_SET) != offset) { ::close(h); error_description="Failed to seek in job description file " + rsl_fname; return 1; }; for(;size;) { ssize_t l = ::write(h,buf,size); if(l <= 0) { ::close(h); error_description="Failed to write job description file " + rsl_fname; return 1; }; size-=l; buf+=l; }; fix_file_owner(rsl_fname,user); ::close(h); // remove desc file used to claim job id, if different from this one if (config.ControlDir() != gm_dirs_info.at(0).control_dir) { rsl_fname=gm_dirs_info.at(0).control_dir+"/job."+job_id+".description"; remove(rsl_fname.c_str()); } return 0; } int JobPlugin::readdir(const char* name,std::list &dir_list,DirEntry::object_info_level mode) { if(!initialized) { error_description="Plugin is not initialised."; return 1; }; if((name[0] == 0) || (strcmp("info",name) == 0)) { /* root jobs directory or jobs' info */ if(name[0] == 0) { DirEntry dent_new(false,"new"); DirEntry dent_info(false,"info"); dent_new.may_dirlist=true; dent_info.may_dirlist=true; dir_list.push_back(dent_new); dir_list.push_back(dent_info); }; // loop through all control dirs for (std::vector::iterator i = gm_dirs_info.begin(); i != gm_dirs_info.end(); i++) { std::string cdir=(*i).control_dir; Glib::Dir *dir=new Glib::Dir(cdir); if(dir != NULL) { std::string file_name; while ((file_name = dir->read_name()) != "") { std::vector tokens; Arc::tokenize(file_name, tokens, "."); // look for job.id.local if (tokens.size() == 3 && tokens[0] == "job" && tokens[2] == "local") { JobLocalDescription job_desc; std::string fname=cdir+'/'+file_name; if(job_local_read_file(fname,job_desc)) { if(job_desc.DN == subject) { JobId id(tokens[1]); dir_list.push_back(DirEntry(false,id)); }; }; }; }; dir->close(); delete dir; }; }; return 0; }; if(strcmp(name,"new") == 0) { /* directory /new is always empty */ return 0; }; /* check for allowed job directory */ const char* logname; std::string id; std::string log; if(!is_allowed(name,IS_ALLOWED_LIST,false,NULL,&id,&logname,&log)) return 1; if(logname) { std::string controldir = getControlDir(id); if (controldir.empty()) { error_description="No control information found for this job."; return 1; } config.SetControlDir(controldir); if((*logname) != 0) { if(strchr(logname,'/') != NULL) return 1; /* no subdirs */ if(strncmp(logname,"proxy",5) == 0) return 1; id=config.ControlDir()+"/job."+id+"."+logname; struct stat st; if(::stat(id.c_str(),&st) != 0) return 1; if(!S_ISREG(st.st_mode)) return 1; DirEntry dent(true,logname); if(strncmp(logname,"proxy",5) != 0) dent.may_read=true; dir_list.push_back(dent); return -1; }; Glib::Dir* d=new Glib::Dir(config.ControlDir()); if(d == NULL) { return 1; }; /* maybe return ? */ id="job."+id+"."; std::string file_name; while ((file_name = d->read_name()) != "") { if(file_name.substr(0, id.length()) != id) continue; if(file_name.substr(file_name.length() - 5) == "proxy") continue; DirEntry dent(true, file_name.substr(id.length())); dir_list.push_back(dent); }; d->close(); delete d; return 0; }; if(log.length() > 0) { const char* s = strchr(name,'/'); if((s == NULL) || (s[1] == 0)) { DirEntry dent(false,log.c_str()); dent.may_dirlist=true; dir_list.push_back(dent); }; }; /* allowed - pass to file system */ ApplyLocalCred(user,config,&id,"read"); chosenFilePlugin = selectFilePlugin(id); if((getuid()==0) && config.StrictSession()) { SET_USER_UID; int r=chosenFilePlugin->readdir(name,dir_list,mode); RESET_USER_UID; return r; }; return chosenFilePlugin->readdir(name,dir_list,mode); } int JobPlugin::checkdir(std::string &dirname) { if(!initialized) return 1; /* chdir to /new will create new job */ if(dirname.length() == 0) return 0; /* root */ if(dirname == "new") { /* new job */ if(readonly) { error_description="New jobs are not allowed."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; if(!make_job_id()) { error_description="Failed to allocate ID for job."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; dirname=job_id; return 0; }; if(dirname == "info") { /* always allowed */ return 0; }; const char* logname; std::string id; if(!is_allowed(dirname.c_str(),IS_ALLOWED_LIST,false,NULL,&id,&logname)) return 1; std::string controldir = getControlDir(id); if (controldir.empty()) { error_description="No control information found for this job."; return 1; } config.SetControlDir(controldir); if(logname) { if((*logname) != 0) { error_description="There is no such special subdirectory."; return 1; /* log directory has no subdirs */ }; return 0; }; if((dirname == id) && (!proxy_fname.empty()) && proxy_is_deleg) { /* cd to session directory - renew proxy request */ JobLocalDescription job_desc; if(!job_local_read_file(id,config,job_desc)) { error_description="Job is probably corrupted: can't read internal information."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; /* check if new proxy is better than old one */ std::string old_proxy_fname=config.ControlDir()+"/job."+id+".proxy"; Arc::Time new_proxy_expires; Arc::Time old_proxy_expires; std::string proxy_data; std::string user_cert; try { (void)Arc::FileRead(proxy_fname, proxy_data); if(proxy_data.empty()) { error_description="Failed to obtain delegation content."; logger.msg(Arc::ERROR, "%s", error_description); return 1; }; Arc::Credential new_ci(proxy_data, proxy_data, config.CertDir(), "", "", false); new_proxy_expires = new_ci.GetEndTime(); new_ci.OutputCertificate(user_cert); new_ci.OutputCertificateChain(user_cert); } catch (std::exception&) { }; try { Arc::Credential old_ci(old_proxy_fname, "", config.CertDir(), ""); old_proxy_expires = old_ci.GetEndTime(); } catch (std::exception&) { }; if(new_proxy_expires > old_proxy_expires) { /* try to renew proxy */ logger.msg(Arc::INFO, "Renewing proxy for job %s", id); ARex::DelegationStore deleg(config.DelegationDir(),deleg_db_type,false); if((!job_desc.delegationid.empty()) && deleg.PutCred(job_desc.delegationid, subject, proxy_data)) { // Also store public content into job.#.proxy // Ignore error because main store is already updated GMJob job(id, user, "", JOB_STATE_ACCEPTED); #if 0 (void)job_proxy_write_file(job, config, user_cert); #else // For backward compatibility during transitional period store whole proxy (void)job_proxy_write_file(job, config, proxy_data); #endif logger.msg(Arc::INFO, "New proxy expires at %s", Arc::TimeStamp(Arc::Time(new_proxy_expires), Arc::UserTime)); job_desc.expiretime=new_proxy_expires; if(!job_local_write_file(job,config,job_desc)) { logger.msg(Arc::ERROR, "Failed to write 'local' information"); }; error_description="Applying external credentials locally failed."; ApplyLocalCred(user,config,&id,"renew"); error_description=""; } else { logger.msg(Arc::ERROR, "Failed to renew proxy"); }; } else { logger.msg(Arc::WARNING, "New proxy expiry time is not later than old proxy, not renewing proxy"); }; }; ApplyLocalCred(user,config,&id,"read"); chosenFilePlugin = selectFilePlugin(id); if((getuid()==0) && config.StrictSession()) { SET_USER_UID; int r=chosenFilePlugin->checkdir(dirname); RESET_USER_UID; return r; }; return chosenFilePlugin->checkdir(dirname); } int JobPlugin::checkfile(std::string &name,DirEntry &info,DirEntry::object_info_level mode) { if(!initialized) return 1; if(name.length() == 0) { info.name=""; info.is_file=false; return 0; }; if((name == "new") || (name == "info")) { info.name=""; info.is_file=false; return 0; }; const char* logname; std::string id; if(!is_allowed(name.c_str(),IS_ALLOWED_LIST,false,NULL,&id,&logname)) return 1; std::string controldir = getControlDir(id); if (controldir.empty()) { error_description="No control information found for this job."; return 1; } config.SetControlDir(controldir); if(logname) { if((*logname) == 0) { /* directory itself */ info.is_file=false; info.name=""; info.may_dirlist=true; } else { if(strncmp(logname,"proxy",5) == 0) { error_description="There is no such special file."; return 1; }; id=config.ControlDir()+"/job."+id+"."+logname; logger.msg(Arc::INFO, "Checking file %s", id); struct stat st; if(::stat(id.c_str(),&st) != 0) { error_description="There is no such special file."; return 1; }; if(!S_ISREG(st.st_mode)) { error_description="There is no such special file."; return 1; }; info.is_file=true; info.name=""; info.may_read=true; info.size=st.st_size; }; return 0; }; ApplyLocalCred(user,config,&id,"read"); chosenFilePlugin = selectFilePlugin(id); if((getuid()==0) && config.StrictSession()) { SET_USER_UID; int r=chosenFilePlugin->checkfile(name,info,mode); RESET_USER_UID; return r; }; return chosenFilePlugin->checkfile(name,info,mode); } bool JobPlugin::delete_job_id(void) { if(job_id.length() != 0) { std::string controldir = getControlDir(job_id); if (controldir.empty()) { error_description="No control information found for this job."; return false; } config.SetControlDir(controldir); std::string sessiondir = getSessionDir(job_id); if (sessiondir.empty()) { // session dir could have already been cleaned, so set to first in list sessiondir = config.SessionRoots().at(0); } config.SetSessionRoot(sessiondir); job_clean_final(GMJob(job_id,user,sessiondir+"/"+job_id),config); job_id=""; }; return true; } bool JobPlugin::make_job_id(const std::string &id) { if((id.find('/') != std::string::npos) || (id.find('\n') != std::string::npos)) { logger.msg(Arc::ERROR, "ID contains forbidden characters"); return false; }; if((id == "new") || (id == "info")) return false; // claim id by creating empty description file // So far assume control directory is on local fs. // TODO: add locks or links for NFS // check the new ID is not used in any control dir std::vector::iterator it = gm_dirs_info.begin(); std::string fname=it->control_dir+"/job."+id+".description"; int h = ::open(fname.c_str(),O_RDWR | O_CREAT | O_EXCL,0600); if(h == -1) return false; it++; for (; it != gm_dirs_info.end(); it++) { std::string desc_fname=it->control_dir+"/job."+id+".description"; struct stat st; if(stat(desc_fname.c_str(),&st) == 0) { close(h); remove(fname.c_str()); return false; } } fix_file_owner(fname,user); close(h); delete_job_id(); job_id=id; return true; } bool JobPlugin::make_job_id(void) { bool found = false; delete_job_id(); for(int i=0;i<100;i++) { //std::string id=Arc::tostring((unsigned int)getpid())+ // Arc::tostring((unsigned int)time(NULL))+ // Arc::tostring(rand(),1); std::string id; Arc::GUID(id); // create job.id.description file then loop through all control dirs to find if it already exists std::vector::iterator it = gm_dirs_info.begin(); std::string fname=it->control_dir+"/job."+id+".description"; int h = ::open(fname.c_str(),O_RDWR | O_CREAT | O_EXCL,0600); // So far assume control directory is on local fs. // TODO: add locks or links for NFS if(h == -1) { if(errno == EEXIST) continue; logger.msg(Arc::ERROR, "Failed to create file in %s", it->control_dir); return false; }; it++; for (; it != gm_dirs_info.end(); it++) { std::string desc_fname=it->control_dir+"/job."+id+".description"; struct stat st; if(stat(desc_fname.c_str(),&st) == 0) { found = true; break; } } if (found) { found = false; close(h); remove(fname.c_str()); continue; } // safe to use this id job_id = id; fix_file_owner(fname,user); close(h); break; }; if(job_id.empty()) { logger.msg(Arc::ERROR, "Out of tries while allocating new job ID"); return false; }; return true; } /* name - name of file to access perm - permission to check locked - true if job already running jobid - returns id extracted from name logname - name of log file (errors, status, etc.) log - stdlog of job spec_dir - if file belogs to virtual directory returns true if access rights include the specified permission. For special files true is returned and spec_dir is set to true. Distinction between files is processed at higher levels. In case of error, error_description is set. */ bool JobPlugin::is_allowed(const char* name,int perm,bool /* locked */,bool* spec_dir,std::string* jobid,char const ** logname,std::string* log) { if(logname) (*logname) = NULL; if(log) (*log)=""; if(spec_dir) (*spec_dir)=false; JobId id(name); if(id == "info") { // directory which contains list of jobs-directories if(spec_dir) (*spec_dir)=false; if((perm & (IS_ALLOWED_READ | IS_ALLOWED_LIST)) == perm) return true; error_description = "Not allowed for this job: permission denied"; return false; }; if(strncmp(id.c_str(),"info/",5) == 0) { if(spec_dir) (*spec_dir)=true; name+=5; id=name; std::string::size_type n=id.find('/'); if(n != std::string::npos) id.erase(n); if(jobid) (*jobid)=id; if(id.length() == 0) { error_description = "No job id found"; return false; } const char* l_name = name+id.length(); if(l_name[0] == '/') l_name++; if(logname) { (*logname)=l_name; }; JobLocalDescription job_desc; std::string controldir = getControlDir(id); if (controldir.empty()) { error_description="No control information found for this job."; return false; } config.SetControlDir(controldir); if(!job_local_read_file(id,config,job_desc)) { error_description = "Not allowed for this job: "+Arc::StrError(errno); return false; } if(job_desc.DN != subject) { // Not an owner. Check acl. std::string acl_file = config.ControlDir()+"/job."+id+".acl"; struct stat st; if(stat(acl_file.c_str(),&st) == 0) { if(S_ISREG(st.st_mode)) { int res = 0; res |= check_acl(acl_file.c_str(), true, id); if ( (res & perm) == perm) return true; error_description = "Not allowed for this job: permission denied"; }; }; return false; }; //if(strncmp(l_name,"proxy",5) == 0) return (IS_ALLOWED_LIST); //if(strncmp(l_name,"acl",3) != 0) return (IS_ALLOWED_READ | IS_ALLOWED_LIST);; return true; }; std::string::size_type n=id.find('/'); if(n != std::string::npos) id.erase(n); if(jobid) (*jobid)=id; JobLocalDescription job_desc; std::string controldir = getControlDir(id); if (controldir.empty()) { error_description="No control information found for this job."; return false; } config.SetControlDir(controldir); if(!job_local_read_file(id,config,job_desc)) { logger.msg(Arc::ERROR, "Failed to read job's local description for job %s from %s", id, config.ControlDir()); if (errno == ENOENT) error_description="No such job"; else error_description=Arc::StrError(errno); return false; } int res = 0; bool spec = false; //bool proxy = false; //bool acl = false; if(log) (*log)=job_desc.stdlog; if(n != std::string::npos) { int l = job_desc.stdlog.length(); if(l != 0) { if(strncmp(name+n+1,job_desc.stdlog.c_str(),l) == 0) { if(name[n+1+l] == 0) { if(spec_dir) (*spec_dir)=true; if(logname) (*logname)=name+n+1+l; spec=true; } else if(name[n+1+l] == '/') { if(spec_dir) (*spec_dir)=true; if(logname) (*logname)=name+n+1+l+1; spec=true; //if(strncmp(name+n+1+l+1,"proxy",5) == 0) proxy=true; //if(strncmp(name+n+1+l+1,"acl",3) == 0) acl=true; }; }; }; }; if(job_desc.DN == subject) { res|=(IS_ALLOWED_READ | IS_ALLOWED_WRITE | IS_ALLOWED_LIST); } else { // Not an owner. Check acl. std::string acl_file = config.ControlDir()+"/job."+id+".acl"; struct stat st; if(stat(acl_file.c_str(),&st) == 0) { if(S_ISREG(st.st_mode)) { res |= check_acl(acl_file.c_str(), spec, id); }; }; }; if ((res & perm) == perm) return true; error_description="Not allowed for this job: permission denied"; return false; } /* * Methods to deal with multple control and session dirs */ DirectFilePlugin * JobPlugin::selectFilePlugin(std::string id) { if (file_plugins.size() == 1) return file_plugins.at(0); // get session dir std::string sd = getSessionDir(id); if (sd.empty()) return file_plugins.at(0); // match to our list if (session_dirs.size() > 1) { // find this id's session dir from the list of session dirs for (unsigned int i = 0; i < session_dirs.size(); i++) { if (session_dirs.at(i) == sd) { return file_plugins.at(i); } } } else { // find this id's session dir from the gm_dirs info for (unsigned int i = 0; i < gm_dirs_info.size(); i++) { if (gm_dirs_info.at(i).session_dir == sd) { return file_plugins.at(i); } } } // error - shouldn't be possible but return first in list return file_plugins.at(0); } bool JobPlugin::chooseControlAndSessionDir(std::string /* job_id */, std::string& controldir, std::string& sessiondir) { if (gm_dirs_non_draining.empty()) { // no active control or session dirs available logger.msg(Arc::ERROR, "No non-draining control or session directories available"); return false; } // if multiple session dirs are defined, don't use remote dirs if (session_dirs.size() > 1) { // the 'main' control dir is last in the list controldir = gm_dirs_info.at(gm_dirs_info.size()-1).control_dir; // choose randomly from non-draining session dirs sessiondir = session_dirs_non_draining.at(rand() % session_dirs_non_draining.size()); } else { // choose randomly from non-draining gm_dirs_info unsigned int i = rand() % gm_dirs_non_draining.size(); controldir = gm_dirs_non_draining.at(i).control_dir; sessiondir = gm_dirs_non_draining.at(i).session_dir; } logger.msg(Arc::INFO, "Using control directory %s", controldir); logger.msg(Arc::INFO, "Using session directory %s", sessiondir); return true; } std::string JobPlugin::getControlDir(std::string id) { // if multiple session dirs are defined we only have one control dir if (session_dirs.size() > 1 || gm_dirs_info.size() == 1) { // the 'main' control dir is last in the list return gm_dirs_info.at(gm_dirs_info.size()-1).control_dir; } // check for existence of job.id.description file (the first created) for (unsigned int i = 0; i < gm_dirs_info.size(); i++) { config.SetControlDir(gm_dirs_info.at(i).control_dir); JobId jobid(id); std::string rsl; if (job_description_read_file(jobid, config, rsl)) return gm_dirs_info.at(i).control_dir; } // no control info found std::string empty(""); return empty; } std::string JobPlugin::getSessionDir(std::string id) { // if multiple session dirs are defined, don't use remote dirs if (session_dirs.size() > 1) { // look for this id's session dir struct stat st; for (unsigned int i = 0; i < session_dirs.size(); i++) { std::string sessiondir(session_dirs.at(i) + '/' + id); if (stat(sessiondir.c_str(), &st) == 0 && S_ISDIR(st.st_mode)) { return session_dirs.at(i); } } } else { struct stat st; for (unsigned int i = 0; i < gm_dirs_info.size(); i++) { std::string sessiondir(gm_dirs_info.at(i).session_dir + '/' + id); if (stat(sessiondir.c_str(), &st) == 0 && S_ISDIR(st.st_mode)) { return gm_dirs_info.at(i).session_dir; } } } // no session dir found std::string empty(""); return empty; } nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/PaxHeaders.7502/jobplugin_acl.cpp0000644000000000000000000000012413213445240030463 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200576.236723 30 ctime=1513200663.065784982 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/jobplugin_acl.cpp0000644000175000002070000001114713213445240030534 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #ifdef HAVE_GACL #include "../../../gridftpd/auth/gacl_auth.h" #include "../../../gridftpd/auth/permission_gacl.h" #else #include #include #include #endif #include "jobplugin.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"JobPlugin"); #ifdef HAVE_GACL int JobPlugin::check_acl(const char* acl_file,bool spec,const std::string& id) { int res = 0; GACLacl* acl = GACLloadAcl((char*)(acl_file)); if(!acl) { logger.msg(Arc::ERROR, "Failed to read job's ACL for job %s from %s", id, config.ControlDir()); return res; }; GACLperm perm = AuthUserGACLTest(acl,user_a); if(spec) { if(GACLhasList(perm)) res|=IS_ALLOWED_LIST; if(GACLhasRead(perm) || GACLhasWrite(perm)) res|=(IS_ALLOWED_READ | IS_ALLOWED_LIST); if(GACLhasAdmin(perm)) res|=(IS_ALLOWED_READ | IS_ALLOWED_WRITE | IS_ALLOWED_LIST); } else { if(GACLhasList(perm)) res|=IS_ALLOWED_LIST; if(GACLhasRead(perm)) res|=IS_ALLOWED_READ; if(GACLhasWrite(perm)) res|=IS_ALLOWED_WRITE; if(GACLhasAdmin(perm)) res|=(IS_ALLOWED_READ | IS_ALLOWED_WRITE | IS_ALLOWED_LIST); }; return res; } #else // HAVE_GACL #define EVALUATE_ACTION(request,allowed_to,action_name) {\ for(Arc::XMLNode entry = request["entry"];(bool)entry;++entry) {\ entry["allow"].Destroy();\ entry.NewChild("allow").NewChild(action_name);\ };\ ArcSec::Response *resp = eval->evaluate(request,policy.Ptr());\ if(resp) {\ ArcSec::ResponseList& rlist = resp->getResponseItems();\ for(int n = 0; nres != ArcSec::DECISION_PERMIT) continue;\ allowed_to=true; break;\ };\ };\ } int JobPlugin::check_acl(const char* acl_file,bool spec,const std::string& id) { int res = 0; // TODO: this code is not complete yet // Identify and parse policy ArcSec::EvaluatorLoader eval_loader; Arc::AutoPointer policy(eval_loader.getPolicy(ArcSec::SourceFile(acl_file))); if(!policy) { logger.msg(Arc::ERROR, "Failed to parse user policy for job %s", id); return res; }; Arc::AutoPointer eval(eval_loader.getEvaluator(policy.Ptr())); if(!eval) { logger.msg(Arc::VERBOSE, "Failed to load policy evaluator for policy of job %s", id); return res; }; std::string policyname = policy->getName(); if((policyname.length() > 7) && (policyname.substr(policyname.length()-7) == ".policy")) { policyname.resize(policyname.length()-7); }; if(policyname == "arc") { // TODO } else if(policyname == "gacl") { // Creating request - directly with XML Arc::NS ns; Arc::XMLNode request(ns,"gacl"); bool allowed_to_list = false; bool allowed_to_read = false; bool allowed_to_write = false; bool allowed_to_admin = false; // Collect all security attributes { std::string user_identity = user_a.DN(); const std::vector& user_voms = user_a.voms(); Arc::XMLNode entry = request.NewChild("entry"); if(!user_identity.empty()) entry.NewChild("person").NewChild("dn") = user_identity; Arc::XMLNode voms; for(std::vector::const_iterator v = user_voms.begin(); v != user_voms.end();++v) { for(std::vector::const_iterator a = v->fqans.begin(); a != v->fqans.end();++a) { if(!voms) voms = entry.NewChild("voms"); std::string val; a->str(val); voms.NewChild("fqan") = val; }; voms = Arc::XMLNode(); // ?? }; }; // Evaluate every action separately EVALUATE_ACTION(request,allowed_to_list,"list"); EVALUATE_ACTION(request,allowed_to_read,"read"); EVALUATE_ACTION(request,allowed_to_write,"write"); EVALUATE_ACTION(request,allowed_to_admin,"admin"); if(spec) { if(allowed_to_list) res|=IS_ALLOWED_LIST; if(allowed_to_read || allowed_to_write) res|=(IS_ALLOWED_READ | IS_ALLOWED_LIST); if(allowed_to_admin) res|=(IS_ALLOWED_READ | IS_ALLOWED_WRITE | IS_ALLOWED_LIST); } else { if(allowed_to_list) res|=IS_ALLOWED_LIST; if(allowed_to_read) res|=IS_ALLOWED_READ; if(allowed_to_write) res|=IS_ALLOWED_WRITE; if(allowed_to_admin) res|=(IS_ALLOWED_READ | IS_ALLOWED_WRITE | IS_ALLOWED_LIST); }; } else { logger.msg(Arc::VERBOSE, "Unknown ACL policy %s for job %s", policyname, id); }; return res; } #endif // HAVE_GACL nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/PaxHeaders.7502/init.cpp0000644000000000000000000000012412053413155026617 xustar000000000000000027 mtime=1353586285.377456 27 atime=1513200576.239723 30 ctime=1513200663.067785006 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/init.cpp0000644000175000002070000000062112053413155026663 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #define GRIDFTP_PLUGIN #include "jobplugin.h" static FilePlugin* init_cpp(std::istream &cfile,userspec_t &user,FileNode &node) { JobPlugin* arg = new JobPlugin(cfile,user,node); return arg; } extern "C" { FilePlugin* init(std::istream &cfile,userspec_t &user,FileNode &node) { return init_cpp(cfile,user,node); } } nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/PaxHeaders.7502/README0000644000000000000000000000012311412417142026025 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200576.238723 30 ctime=1513200663.061784933 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/README0000644000175000002070000000013611412417142026073 0ustar00mockbuildmock00000000000000Jobplugin for ARC GridFTP server. Allows submission of grid jobs through a GridFTP interface. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/PaxHeaders.7502/jobplugin.h0000644000000000000000000000012412715412562027320 xustar000000000000000027 mtime=1463162226.429695 27 atime=1513200576.238723 30 ctime=1513200663.066784994 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/jobplugin/jobplugin.h0000644000175000002070000000640412715412562027371 0ustar00mockbuildmock00000000000000#ifndef GRID_SERVER_JOB_PLUGIN_H #define GRID_SERVER_JOB_PLUGIN_H #include #include #include #include #include "../../../gridftpd/fileroot.h" #include "../../../gridftpd/userspec.h" #include "../conf/GMConfig.h" #include "../../delegation/DelegationStore.h" using namespace ARex; class DirectFilePlugin; /* * Store per-GM information */ struct gm_dirs_ { std::string control_dir; std::string session_dir; }; #define DEFAULT_JOB_RSL_MAX_SIZE (5*1024*1024) /* this class is used to communicate with network layer - must be derived from FilePlugin */ class JobPlugin: public FilePlugin { private: enum { IS_ALLOWED_READ = 1, IS_ALLOWED_WRITE = 2, IS_ALLOWED_LIST = 4, IS_ALLOWED_RW = 3, IS_ALLOWED_ALL = 7 }; bool make_job_id(const std::string &); bool make_job_id(void); bool delete_job_id(void); int check_acl(const char* acl_file,bool spec,const std::string& id); bool is_allowed(const char* name,int perm,bool locked = false,bool* spec_dir = NULL,std::string* id = NULL,char const ** logname = NULL,std::string* log = NULL); DirectFilePlugin * selectFilePlugin(std::string id); /** Find the control dir used by this job id */ std::string getControlDir(std::string id); /** Find the session dir used by this job id */ std::string getSessionDir(std::string id); /** Pick new control and session dirs according to algorithm */ bool chooseControlAndSessionDir(std::string job_id, std::string& controldir, std::string& sessiondir); void* phandle; ContinuationPlugins* cont_plugins; RunPlugin* cred_plugin; Arc::User user; GMConfig config; DelegationStore::DbType deleg_db_type; AuthUser& user_a; UnixMap job_map; std::list avail_queues; std::string subject; unsigned short int port; // port client used for data channel int host[4]; // client host std::string proxy_fname; /* name of proxy file passed by client */ bool proxy_is_deleg; std::string job_id; unsigned int job_rsl_max_size; //!! char job_rsl[1024*1024+5]; bool initialized; bool rsl_opened; DirectFilePlugin* direct_fs; bool readonly; const char* matched_vo; const voms_t* matched_voms; std::vector gm_dirs_info; std::vector gm_dirs_non_draining; std::vector session_dirs; std::vector session_dirs_non_draining; std::vector file_plugins; DirectFilePlugin * chosenFilePlugin; public: JobPlugin(std::istream &cfile,userspec_t &user,FileNode &node); ~JobPlugin(void); virtual std::string get_error_description() const; virtual int open(const char* name,open_modes mode,unsigned long long int size = 0); virtual int close(bool eof); virtual int read(unsigned char *buf,unsigned long long int offset,unsigned long long int *size); virtual int write(unsigned char *buf,unsigned long long int offset,unsigned long long int size); virtual int readdir(const char* name,std::list &dir_list,DirEntry::object_info_level mode); virtual int checkdir(std::string &dirname); virtual int checkfile(std::string &name,DirEntry &file,DirEntry::object_info_level mode); virtual int makedir(std::string &dirname); virtual int removefile(std::string &name); virtual int removedir(std::string &dirname); }; #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/run0000644000000000000000000000013213214316026023704 xustar000000000000000030 mtime=1513200662.874782645 30 atime=1513200668.718854121 30 ctime=1513200662.874782645 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/0000755000175000002070000000000013214316026024027 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515026024 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200602.509044349 30 ctime=1513200662.866782548 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/Makefile.am0000644000175000002070000000043012052416515026063 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = librun.la librun_la_SOURCES = RunParallel.cpp RunParallel.h \ RunPlugin.cpp RunPlugin.h \ RunRedirected.cpp RunRedirected.h librun_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) librun_la_LIBADD = $(DLOPEN_LIBS) nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/Makefile.in0000644000000000000000000000013013214315732026027 xustar000000000000000030 mtime=1513200602.562044997 29 atime=1513200650.05462585 29 ctime=1513200662.86778256 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/Makefile.in0000644000175000002070000006072513214315732026111 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/run DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = librun_la_DEPENDENCIES = $(am__DEPENDENCIES_1) am_librun_la_OBJECTS = librun_la-RunParallel.lo librun_la-RunPlugin.lo \ librun_la-RunRedirected.lo librun_la_OBJECTS = $(am_librun_la_OBJECTS) librun_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(librun_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(librun_la_SOURCES) DIST_SOURCES = $(librun_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = librun.la librun_la_SOURCES = RunParallel.cpp RunParallel.h \ RunPlugin.cpp RunPlugin.h \ RunRedirected.cpp RunRedirected.h librun_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) librun_la_LIBADD = $(DLOPEN_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/run/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/run/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done librun.la: $(librun_la_OBJECTS) $(librun_la_DEPENDENCIES) $(librun_la_LINK) $(librun_la_OBJECTS) $(librun_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/librun_la-RunParallel.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/librun_la-RunPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/librun_la-RunRedirected.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< librun_la-RunParallel.lo: RunParallel.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -MT librun_la-RunParallel.lo -MD -MP -MF $(DEPDIR)/librun_la-RunParallel.Tpo -c -o librun_la-RunParallel.lo `test -f 'RunParallel.cpp' || echo '$(srcdir)/'`RunParallel.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/librun_la-RunParallel.Tpo $(DEPDIR)/librun_la-RunParallel.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RunParallel.cpp' object='librun_la-RunParallel.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -c -o librun_la-RunParallel.lo `test -f 'RunParallel.cpp' || echo '$(srcdir)/'`RunParallel.cpp librun_la-RunPlugin.lo: RunPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -MT librun_la-RunPlugin.lo -MD -MP -MF $(DEPDIR)/librun_la-RunPlugin.Tpo -c -o librun_la-RunPlugin.lo `test -f 'RunPlugin.cpp' || echo '$(srcdir)/'`RunPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/librun_la-RunPlugin.Tpo $(DEPDIR)/librun_la-RunPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RunPlugin.cpp' object='librun_la-RunPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -c -o librun_la-RunPlugin.lo `test -f 'RunPlugin.cpp' || echo '$(srcdir)/'`RunPlugin.cpp librun_la-RunRedirected.lo: RunRedirected.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -MT librun_la-RunRedirected.lo -MD -MP -MF $(DEPDIR)/librun_la-RunRedirected.Tpo -c -o librun_la-RunRedirected.lo `test -f 'RunRedirected.cpp' || echo '$(srcdir)/'`RunRedirected.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/librun_la-RunRedirected.Tpo $(DEPDIR)/librun_la-RunRedirected.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RunRedirected.cpp' object='librun_la-RunRedirected.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -c -o librun_la-RunRedirected.lo `test -f 'RunRedirected.cpp' || echo '$(srcdir)/'`RunRedirected.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/RunRedirected.h0000644000000000000000000000012312735423445026704 xustar000000000000000027 mtime=1467361061.148401 26 atime=1513200575.99372 30 ctime=1513200662.874782645 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/RunRedirected.h0000644000175000002070000000154712735423445026761 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_RUN_REDIRECTED_H #define GRID_MANAGER_RUN_REDIRECTED_H #include #include namespace ARex { /// Run child process with stdin, stdout and stderr redirected to specified handles class RunRedirected { private: RunRedirected(const char* cmdname,int in,int out,int err):cmdname_(cmdname?cmdname:""),stdin_(in),stdout_(out),stderr_(err) { }; ~RunRedirected(void) { }; std::string cmdname_; int stdin_; int stdout_; int stderr_; static void initializer(void* arg); public: operator bool(void) { return true; }; bool operator!(void) { return false; }; static int run(const Arc::User& user,const char* cmdname,int in,int out,int err,char *const args[],int timeout); static int run(const Arc::User& user,const char* cmdname,int in,int out,int err,const char* cmd,int timeoutd); }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/RunParallel.h0000644000000000000000000000012312735423626026367 xustar000000000000000027 mtime=1467361174.984574 26 atime=1513200575.99472 30 ctime=1513200662.870782597 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/RunParallel.h0000644000175000002070000000301712735423626026436 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_RUN_PARALLEL_H #define GRID_MANAGER_RUN_PARALLEL_H #include #include "../jobs/JobsList.h" #include "RunPlugin.h" namespace ARex { /// Run child process in parallel with stderr redirected to job.jobid.errors class RunParallel { private: RunParallel(const char* procid, const char* errlog, RunPlugin* cred, RunPlugin::substitute_t subst, void* subst_arg) :procid_(procid?procid:""), errlog_(errlog?errlog:""), cred_(cred), subst_(subst), subst_arg_(subst_arg) { }; ~RunParallel(void) { }; std::string procid_; std::string errlog_; RunPlugin* cred_; RunPlugin::substitute_t subst_; void* subst_arg_; static void initializer(void* arg); // TODO: no static variables static void (*kicker_func_)(void*); static void* kicker_arg_; operator bool(void) { return true; }; bool operator!(void) { return false; }; static bool run(const GMConfig& config, const Arc::User& user, const char* procid, const char* errlog, const std::string& args, Arc::Run**, const char* job_proxy, bool su = true, RunPlugin* cred = NULL, RunPlugin::substitute_t subst = NULL, void* subst_arg = NULL); public: static bool run(const GMConfig& config, const GMJob& job, const std::string& args, Arc::Run**, bool su = true); static void kicker(void (*kicker_func)(void*),void* kicker_arg) { kicker_arg_=kicker_arg; kicker_func_=kicker_func; }; }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/RunRedirected.cpp0000644000000000000000000000012312735423445027237 xustar000000000000000027 mtime=1467361061.148401 26 atime=1513200575.99572 30 ctime=1513200662.873782633 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/RunRedirected.cpp0000644000175000002070000000537712735423445027321 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "RunRedirected.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); int RunRedirected::run(const Arc::User& user,const char* cmdname,int in,int out,int err,char *const args[],int timeout) { std::list args_; for(int n = 0;args[n];++n) args_.push_back(std::string(args[n])); Arc::Run re(args_); if(!re) { logger.msg(Arc::ERROR,"%s: Failure creating slot for child process",cmdname?cmdname:""); return -1; }; RunRedirected* rr = new RunRedirected(cmdname,in,out,err); if((!rr) || (!(*rr))) { if(rr) delete rr; logger.msg(Arc::ERROR,"%s: Failure creating data storage for child process",cmdname?cmdname:""); return -1; }; re.AssignInitializer(&initializer,rr); re.AssignUserId(user.get_uid()); re.AssignGroupId(user.get_gid()); re.KeepStdin(true); re.KeepStdout(true); re.KeepStderr(true); if(!re.Start()) { delete rr; logger.msg(Arc::ERROR,"%s: Failure starting child process",cmdname?cmdname:""); return -1; }; delete rr; if(!re.Wait(timeout)) { logger.msg(Arc::ERROR,"%s: Failure waiting for child process to finish",cmdname?cmdname:""); return -1; }; return re.Result(); } int RunRedirected::run(const Arc::User& user,const char* cmdname,int in,int out,int err,const char* cmd,int timeout) { Arc::Run re(cmd); if(!re) { logger.msg(Arc::ERROR,"%s: Failure creating slot for child process",cmdname?cmdname:""); return -1; }; RunRedirected* rr = new RunRedirected(cmdname,in,out,err); if((!rr) || (!(*rr))) { if(rr) delete rr; logger.msg(Arc::ERROR,"%s: Failure creating data storage for child process",cmdname?cmdname:""); return -1; }; re.AssignInitializer(&initializer,rr); re.AssignUserId(user.get_uid()); re.AssignGroupId(user.get_gid()); re.KeepStdin(true); re.KeepStdout(true); re.KeepStderr(true); if(!re.Start()) { delete rr; logger.msg(Arc::ERROR,"%s: Failure starting child process",cmdname?cmdname:""); return -1; }; delete rr; if(!re.Wait(timeout)) { logger.msg(Arc::ERROR,"%s: Failure waiting for child process to finish",cmdname?cmdname:""); return -1; }; return re.Result(); } void RunRedirected::initializer(void* arg) { #ifdef WIN32 #error This functionality is not available in Windows environment #else // There must be only async-safe calls here! // child RunRedirected* it = (RunRedirected*)arg; // set up stdin,stdout and stderr if(it->stdin_ != -1) dup2(it->stdin_,0); if(it->stdout_ != -1) dup2(it->stdout_,1); if(it->stderr_ != -1) dup2(it->stderr_,2); #endif } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/RunPlugin.cpp0000644000000000000000000000012312771224333026416 xustar000000000000000027 mtime=1474635995.909107 26 atime=1513200575.99372 30 ctime=1513200662.871782609 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/RunPlugin.cpp0000644000175000002070000001561412771224333026473 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #ifndef WIN32 #include #endif #include #include #include "RunPlugin.h" namespace ARex { static void free_args(char** args) { if(args == NULL) return; for(int i=0;args[i];i++) free(args[i]); free(args); } static char** string_to_args(const std::string& command) { if(command.length() == 0) return NULL; int n = 100; char** args = (char**)malloc(n*sizeof(char**)); int i; for(i=0;i::iterator i = args_.begin(); i!=args_.end();++i,++n) args[n]=(char*)(i->c_str()); args[n]=NULL; if(lib.length() == 0) { bool r = false; Arc::Run re(args_); re.AssignStdin(stdin_); re.AssignStdout(stdout_); re.AssignStderr(stderr_); if(re.Start()) { if(re.Wait(timeout_)) { result_=re.Result(); r=true; } else { re.Kill(0); }; }; if(!r) { free(args); return false; }; } else { #ifndef WIN32 void* lib_h = dlopen(lib.c_str(),RTLD_NOW); if(lib_h == NULL) { free(args); return false; }; lib_plugin_t f; f.v = dlsym(lib_h,args[0]); if(f.v == NULL) { dlclose(lib_h); free(args); return false; }; result_ = (*f.f)(args[1],args[2],args[3],args[4],args[5], args[6],args[7],args[8],args[9],args[10], args[11],args[12],args[13],args[14],args[15], args[16],args[17],args[18],args[19],args[20], args[21],args[22],args[23],args[24],args[25], args[26],args[27],args[28],args[29],args[30], args[31],args[32],args[33],args[34],args[35], args[36],args[37],args[38],args[39],args[40], args[41],args[42],args[43],args[44],args[45], args[56],args[57],args[58],args[59],args[60], args[61],args[62],args[63],args[64],args[65], args[66],args[67],args[68],args[69],args[70], args[71],args[72],args[73],args[74],args[75], args[76],args[77],args[78],args[79],args[80], args[81],args[82],args[83],args[84],args[85], args[86],args[87],args[88],args[89],args[90], args[91],args[92],args[93],args[94],args[95], args[96],args[97],args[98],args[99],args[100]); dlclose(lib_h); #else #warning Implement calling function from library for Windows result=-1; #endif }; free(args); return true; } bool RunPlugin::run(substitute_t subst,void* arg) { result_=0; stdout_=""; stderr_=""; if(subst == NULL) return run(); if(args_.empty()) return true; char** args = (char**)malloc(sizeof(char*)*(args_.size()+1)); if(args == NULL) return false; std::list args__; for(std::list::iterator i = args_.begin();i!=args_.end();++i) { args__.push_back(*i); }; for(std::list::iterator i = args__.begin();i!=args__.end();++i) { (*subst)(*i,arg); }; int n = 0; for(std::list::iterator i = args__.begin(); i!=args__.end();++i,++n) args[n]=(char*)(i->c_str()); args[n]=NULL; if(lib.length() == 0) { bool r = false; Arc::Run re(args__); re.AssignStdin(stdin_); re.AssignStdout(stdout_); re.AssignStderr(stderr_); if(re.Start()) { if(re.Wait(timeout_)) { result_=re.Result(); r=true; } else { re.Kill(0); }; }; if(!r) { free(args); return false; }; } else { #ifndef WIN32 void* lib_h = dlopen(lib.c_str(),RTLD_NOW); if(lib_h == NULL) { free(args); return false; }; lib_plugin_t f; f.v = dlsym(lib_h,args[0]); if(f.v == NULL) { dlclose(lib_h); free(args); return false; }; result_ = (*f.f)(args[1],args[2],args[3],args[4],args[5], args[6],args[7],args[8],args[9],args[10], args[11],args[12],args[13],args[14],args[15], args[16],args[17],args[18],args[19],args[20], args[21],args[22],args[23],args[24],args[25], args[26],args[27],args[28],args[29],args[30], args[31],args[32],args[33],args[34],args[35], args[36],args[37],args[38],args[39],args[40], args[41],args[42],args[43],args[44],args[45], args[56],args[57],args[58],args[59],args[60], args[61],args[62],args[63],args[64],args[65], args[66],args[67],args[68],args[69],args[70], args[71],args[72],args[73],args[74],args[75], args[76],args[77],args[78],args[79],args[80], args[81],args[82],args[83],args[84],args[85], args[86],args[87],args[88],args[89],args[90], args[91],args[92],args[93],args[94],args[95], args[96],args[97],args[98],args[99],args[100]); dlclose(lib_h); #else #warning Implement calling function from library for Windows result=-1; #endif }; free(args); return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/RunPlugin.h0000644000000000000000000000012312046737100026057 xustar000000000000000027 mtime=1352384064.987698 26 atime=1513200575.99472 30 ctime=1513200662.872782621 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/RunPlugin.h0000644000175000002070000000264612046737100026135 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_RUN_PLUGIN_H #define GRID_MANAGER_RUN_PLUGIN_H #include #include #include #include #include namespace ARex { /// Run external process for acquiring local credentials. class RunPlugin { private: std::list args_; std::string lib; std::string stdin_; std::string stdout_; std::string stderr_; int timeout_; int result_; void set(const std::string& cmd); void set(char const * const * args); public: typedef void (*substitute_t)(std::string& str,void* arg); union lib_plugin_t { int (*f)(...); void* v; }; RunPlugin(void):timeout_(10),result_(0) { }; RunPlugin(const std::string& cmd):timeout_(10),result_(0) { set(cmd); }; RunPlugin(char const * const * args):timeout_(10),result_(0) { set(args); }; RunPlugin& operator=(const std::string& cmd) { set(cmd); return *this; }; RunPlugin& operator=(char const * const * args) { set(args); return *this; }; bool run(void); bool run(substitute_t subst,void* arg); int result(void) const { return result_; }; void timeout(int t) { timeout_=t; }; void stdin_channel(const std::string& s) { stdin_=s; }; const std::string& stdout_channel(void) const { return stdout_; }; const std::string& stderr_channel(void) const { return stderr_; }; operator bool(void) const { return !args_.empty(); }; }; } // namespace ARex #endif // GRID_MANAGER_RUN_PLUGIN_H nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/RunParallel.cpp0000644000000000000000000000012313213512677026717 xustar000000000000000027 mtime=1513002431.186075 26 atime=1513200575.99572 30 ctime=1513200662.868782572 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/RunParallel.cpp0000644000175000002070000001222613213512677026770 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "../conf/GMConfig.h" #include "RunParallel.h" namespace ARex { typedef struct { const GMConfig* config; const GMJob* job; const char* reason; } job_subst_t; static Arc::Logger& logger = Arc::Logger::getRootLogger(); void (*RunParallel::kicker_func_)(void*) = NULL; void* RunParallel::kicker_arg_ = NULL; static void job_subst(std::string& str,void* arg) { job_subst_t* subs = (job_subst_t*)arg; for(std::string::size_type p = 0;;) { p=str.find('%',p); if(p==std::string::npos) break; if(str[p+1]=='I') { str.replace(p,2,subs->job->get_id().c_str()); p+=subs->job->get_id().length(); } else if(str[p+1]=='S') { str.replace(p,2,subs->job->get_state_name()); p+=strlen(subs->job->get_state_name()); } else if(str[p+1]=='O') { str.replace(p,2,subs->reason); p+=strlen(subs->reason); } else { p+=2; }; }; subs->config->Substitute(str, subs->job->get_user()); } bool RunParallel::run(const GMConfig& config,const GMJob& job,const std::string& args,Arc::Run** ere,bool su) { RunPlugin* cred = config.CredPlugin(); job_subst_t subs; subs.config=&config; subs.job=&job; subs.reason="external"; std::string errlog = config.ControlDir()+"/job."+job.get_id()+".errors"; std::string proxy = config.ControlDir() + "/job." + job.get_id() + ".proxy"; return run(config, job.get_user(), job.get_id().c_str(), errlog.c_str(), args, ere, proxy.c_str(), su, NULL, &job_subst, &subs); } /* fork & execute child process with stderr redirected to job.ID.errors, stdin and stdout to /dev/null */ bool RunParallel::run(const GMConfig& config, const Arc::User& user, const char* procid, const char* errlog, const std::string& args, Arc::Run** ere, const char* jobproxy, bool su, RunPlugin* cred, RunPlugin::substitute_t subst, void* subst_arg) { *ere=NULL; Arc::Run* re = new Arc::Run(args); if((!re) || (!(*re))) { if(re) delete re; logger.msg(Arc::ERROR,"%s: Failure creating slot for child process",procid?procid:""); return false; }; if(kicker_func_) re->AssignKicker(kicker_func_,kicker_arg_); RunParallel* rp = new RunParallel(procid,errlog,cred,subst,subst_arg); if((!rp) || (!(*rp))) { if(rp) delete rp; delete re; logger.msg(Arc::ERROR,"%s: Failure creating data storage for child process",procid?procid:""); return false; }; re->AssignInitializer(&initializer,rp); if(su) { // change user re->AssignUserId(user.get_uid()); re->AssignGroupId(user.get_gid()); }; // setting environment - TODO - better environment if(jobproxy && jobproxy[0]) { re->RemoveEnvironment("X509_RUN_AS_SERVER"); re->AddEnvironment("X509_USER_PROXY",jobproxy); // for Globus 2.2 set fake cert and key, or else it takes // those from host in case of root user. // 2.4 needs names and 2.2 will work too. // 3.x requires fake ones again. #if GLOBUS_IO_VERSION>=5 re->AddEnvironment("X509_USER_KEY",(std::string("fake"))); re->AddEnvironment("X509_USER_CERT",(std::string("fake"))); #else re->AddEnvironment("X509_USER_KEY",jobproxy); re->AddEnvironment("X509_USER_CERT",jobproxy); #endif std::string cert_dir = config.CertDir(); if(!cert_dir.empty()) { re->AddEnvironment("X509_CERT_DIR",cert_dir); } else { re->RemoveEnvironment("X509_CERT_DIR"); }; std::string voms_dir = config.VomsDir(); if(!voms_dir.empty()) { re->AddEnvironment("X509_VOMS_DIR",voms_dir); } else { re->RemoveEnvironment("X509_VOMS_DIR"); }; }; re->KeepStdin(true); re->KeepStdout(true); re->KeepStderr(true); if(!re->Start()) { delete rp; delete re; logger.msg(Arc::ERROR,"%s: Failure starting child process",procid?procid:""); return false; }; delete rp; *ere=re; return true; } void RunParallel::initializer(void* arg) { #ifdef WIN32 #error This functionality is not available in Windows environment #else // child RunParallel* it = (RunParallel*)arg; if(it->cred_) { // run external plugin to acquire non-unix local credentials if(!it->cred_->run(it->subst_,it->subst_arg_)) { logger.msg(Arc::ERROR,"%s: Failed to run plugin",it->procid_); sleep(10); _exit(1); }; if(it->cred_->result() != 0) { logger.msg(Arc::ERROR,"%s: Plugin failed",it->procid_); sleep(10); _exit(1); }; }; int h; // set up stdin,stdout and stderr h=::open("/dev/null",O_RDONLY); if(h != 0) { if(dup2(h,0) != 0) { sleep(10); exit(1); }; close(h); }; h=::open("/dev/null",O_WRONLY); if(h != 1) { if(dup2(h,1) != 1) { sleep(10); exit(1); }; close(h); }; std::string errlog; if(!(it->errlog_.empty())) { h=::open(it->errlog_.c_str(),O_WRONLY | O_CREAT | O_APPEND,S_IRUSR | S_IWUSR); if(h==-1) { h=::open("/dev/null",O_WRONLY); }; } else { h=::open("/dev/null",O_WRONLY); }; if(h != 2) { if(dup2(h,2) != 2) { sleep(10); exit(1); }; close(h); }; #endif } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/PaxHeaders.7502/README0000644000000000000000000000012211016612002024627 xustar000000000000000026 mtime=1211831298.62818 26 atime=1513200575.99472 30 ctime=1513200662.865782535 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/run/README0000644000175000002070000000004511016612002024675 0ustar00mockbuildmock00000000000000Classes to run external executables. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/files0000644000000000000000000000013213214316027024203 xustar000000000000000030 mtime=1513200663.019784419 30 atime=1513200668.718854121 30 ctime=1513200663.019784419 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/0000755000175000002070000000000013214316027024326 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515026322 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200602.023038405 30 ctime=1513200663.010784309 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/Makefile.am0000644000175000002070000000046212052416515026366 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libfiles.la libfiles_la_SOURCES = \ Delete.cpp ControlFileHandling.cpp ControlFileContent.cpp JobLogFile.cpp \ Delete.h ControlFileHandling.h ControlFileContent.h JobLogFile.h libfiles_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/JobLogFile.h0000644000000000000000000000012412123054727026412 xustar000000000000000027 mtime=1363958231.436192 27 atime=1513200575.977719 30 ctime=1513200663.019784419 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/JobLogFile.h0000644000175000002070000000062212123054727026457 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_INFO_LOG_H #define GRID_MANAGER_INFO_LOG_H #include #include namespace ARex { class GMJob; class GMConfig; /// Extract job information from control files and write job summary file used by reporting tools bool job_log_make_file(const GMJob &job,const GMConfig& config,const std::string &url,std::list &report_config); } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732026327 xustar000000000000000030 mtime=1513200602.074039029 30 atime=1513200649.952624602 30 ctime=1513200663.011784321 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/Makefile.in0000644000175000002070000006351613214315732026410 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/files DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libfiles_la_LIBADD = am_libfiles_la_OBJECTS = libfiles_la-Delete.lo \ libfiles_la-ControlFileHandling.lo \ libfiles_la-ControlFileContent.lo libfiles_la-JobLogFile.lo libfiles_la_OBJECTS = $(am_libfiles_la_OBJECTS) libfiles_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libfiles_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libfiles_la_SOURCES) DIST_SOURCES = $(libfiles_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libfiles.la libfiles_la_SOURCES = \ Delete.cpp ControlFileHandling.cpp ControlFileContent.cpp JobLogFile.cpp \ Delete.h ControlFileHandling.h ControlFileContent.h JobLogFile.h libfiles_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/files/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/files/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libfiles.la: $(libfiles_la_OBJECTS) $(libfiles_la_DEPENDENCIES) $(libfiles_la_LINK) $(libfiles_la_OBJECTS) $(libfiles_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libfiles_la-ControlFileContent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libfiles_la-ControlFileHandling.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libfiles_la-Delete.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libfiles_la-JobLogFile.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libfiles_la-Delete.lo: Delete.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -MT libfiles_la-Delete.lo -MD -MP -MF $(DEPDIR)/libfiles_la-Delete.Tpo -c -o libfiles_la-Delete.lo `test -f 'Delete.cpp' || echo '$(srcdir)/'`Delete.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libfiles_la-Delete.Tpo $(DEPDIR)/libfiles_la-Delete.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Delete.cpp' object='libfiles_la-Delete.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -c -o libfiles_la-Delete.lo `test -f 'Delete.cpp' || echo '$(srcdir)/'`Delete.cpp libfiles_la-ControlFileHandling.lo: ControlFileHandling.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -MT libfiles_la-ControlFileHandling.lo -MD -MP -MF $(DEPDIR)/libfiles_la-ControlFileHandling.Tpo -c -o libfiles_la-ControlFileHandling.lo `test -f 'ControlFileHandling.cpp' || echo '$(srcdir)/'`ControlFileHandling.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libfiles_la-ControlFileHandling.Tpo $(DEPDIR)/libfiles_la-ControlFileHandling.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ControlFileHandling.cpp' object='libfiles_la-ControlFileHandling.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -c -o libfiles_la-ControlFileHandling.lo `test -f 'ControlFileHandling.cpp' || echo '$(srcdir)/'`ControlFileHandling.cpp libfiles_la-ControlFileContent.lo: ControlFileContent.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -MT libfiles_la-ControlFileContent.lo -MD -MP -MF $(DEPDIR)/libfiles_la-ControlFileContent.Tpo -c -o libfiles_la-ControlFileContent.lo `test -f 'ControlFileContent.cpp' || echo '$(srcdir)/'`ControlFileContent.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libfiles_la-ControlFileContent.Tpo $(DEPDIR)/libfiles_la-ControlFileContent.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ControlFileContent.cpp' object='libfiles_la-ControlFileContent.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -c -o libfiles_la-ControlFileContent.lo `test -f 'ControlFileContent.cpp' || echo '$(srcdir)/'`ControlFileContent.cpp libfiles_la-JobLogFile.lo: JobLogFile.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -MT libfiles_la-JobLogFile.lo -MD -MP -MF $(DEPDIR)/libfiles_la-JobLogFile.Tpo -c -o libfiles_la-JobLogFile.lo `test -f 'JobLogFile.cpp' || echo '$(srcdir)/'`JobLogFile.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libfiles_la-JobLogFile.Tpo $(DEPDIR)/libfiles_la-JobLogFile.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobLogFile.cpp' object='libfiles_la-JobLogFile.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -c -o libfiles_la-JobLogFile.lo `test -f 'JobLogFile.cpp' || echo '$(srcdir)/'`JobLogFile.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/Delete.h0000644000000000000000000000012412046717105025640 xustar000000000000000027 mtime=1352375877.880678 27 atime=1513200575.971719 30 ctime=1513200663.016784382 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/Delete.h0000644000175000002070000000164312046717105025711 0ustar00mockbuildmock00000000000000#ifndef __ARC_GM_DELETE_H__ #define __ARC_GM_DELETE_H__ #include #include #include "../files/ControlFileContent.h" namespace ARex { /** Delete all files and subdirectories in 'dir_base' which are or are not present in 'files' list. Accepts: dir_base - path to directory. files - list of files to delete/keep. Paths are relative to 'dir_base'. excl - if set to true all files excluding those in 'files' will be deleted. Otherwise - only files in 'files' which have LFN information will be deleted. If some of 'files' correspond to directories - whole directory will be deleted. uid - uid under which to perform file system operations gid - gid under which to perform file system operations */ int delete_all_files(const std::string &dir_base, const std::list &files, bool excl, uid_t uid = 0, gid_t gid = 0); } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/JobLogFile.cpp0000644000000000000000000000012312675602216026750 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.975719 29 ctime=1513200663.01578437 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/JobLogFile.cpp0000644000175000002070000002373712675602216027032 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "../jobs/GMJob.h" #include "../conf/GMConfig.h" #include "ControlFileHandling.h" #include "JobLogFile.h" namespace ARex { const char * const sfx_desc = ".description"; const char * const sfx_diag = ".diag"; const char * const sfx_statistics = ".statistics"; static void extract_integer(std::string& s,std::string::size_type n = 0) { for(;n &report_config) { // contents of the job record std::string job_data; // URL to send info to if (!url.empty()) job_data += "loggerurl=" + url + '\n'; // Configuration options for usage reporter tool for (std::list::iterator sp = report_config.begin(); sp != report_config.end(); ++sp) { job_data += *sp + '\n'; } // Copy job description std::string fname_src(config.ControlDir() + "/job." + job.get_id() + sfx_desc); std::string desc; if (!Arc::FileRead(fname_src, desc)) return false; // Remove line breaks std::replace( desc.begin(), desc.end(), '\r', ' '); std::replace( desc.begin(), desc.end(), '\n', ' '); job_data += "description=" + desc + '\n'; // Find local owner of job from owner of desc file struct stat st; if (Arc::FileStat(fname_src, &st, false)) { struct passwd pw_; struct passwd *pw; char buf[BUFSIZ]; getpwuid_r(st.st_uid,&pw_,buf,BUFSIZ,&pw); if (pw != NULL && pw->pw_name) { job_data += "localuser=" + std::string(pw->pw_name) + '\n'; } } // Start time and identifier time_t t = job_mark_time(fname_src); job_data += "submissiontime=" + Arc::Time(t).str(Arc::MDSTime) + '\n'; job_data += "ngjobid=" + job.get_id() + '\n'; // Analyze job.ID.local and store relevant information JobLocalDescription local; if (!job_local_read_file(job.get_id(), config, local)) return false; if (!local.DN.empty()) job_data += "usersn=" + local.DN + '\n'; if (!local.headnode.empty()) job_data += "headnode=" + local.headnode + '\n'; if (!local.lrms.empty()) job_data += "lrms=" + local.lrms + '\n'; if (!local.queue.empty()) job_data += "queue=" + local.queue + '\n'; if (!local.localid.empty()) job_data += "localid=" + local.localid + '\n'; if (!local.jobname.empty()) job_data += "jobname=" + local.jobname + '\n'; if (!local.globalid.empty()) job_data += "globalid=" + local.globalid + '\n'; for (std::list::const_iterator projectname = local.projectnames.begin(); projectname != local.projectnames.end(); ++projectname) { job_data += "projectname=" + *projectname + '\n'; } if (!local.clientname.empty()) job_data += "clienthost=" + local.clientname + '\n'; // Copy public part of user certificate chain incl. proxy fname_src = job_proxy_filename(job.get_id(), config); std::list proxy_data; if (Arc::FileRead(fname_src, proxy_data)) { std::string user_cert; bool in_private=false; // TODO: remove private key filtering in a future because job.proxy file will soon contain only public part for (std::list::iterator line = proxy_data.begin(); line != proxy_data.end(); ++line) { if (in_private) { // Skip private key if (line->find("-----END") != std::string::npos && line->find("PRIVATE KEY-----") != std::string::npos) { // can be RSA, DSA etc. in_private=false; } } else { if (line->find("-----BEGIN") != std::string::npos && line->find("PRIVATE KEY-----") != std::string::npos) { // can be RSA, DSA etc. in_private=true; } else { user_cert += *line + '\\'; } } } if (!user_cert.empty()) job_data += "usercert=" + user_cert + '\n'; } // Extract requested resources Arc::JobDescription arc_job_desc; std::list arc_job_desc_list; if (!Arc::JobDescription::Parse(desc, arc_job_desc_list, "", "GRIDMANAGER") || arc_job_desc_list.size() != 1) return false; arc_job_desc = arc_job_desc_list.front(); if (arc_job_desc.Resources.IndividualPhysicalMemory.max>=0) { job_data += "requestedmemory=" + Arc::tostring(arc_job_desc.Resources.IndividualPhysicalMemory.max) + '\n'; } if (arc_job_desc.Resources.TotalCPUTime.range.max>=0) { job_data += "requestedcputime=" + Arc::tostring(arc_job_desc.Resources.TotalCPUTime.range.max) + '\n'; } if (arc_job_desc.Resources.TotalWallTime.range.max>=0) { job_data += "requestedwalltime=" + Arc::tostring(arc_job_desc.Resources.TotalWallTime.range.max) + '\n'; } if (arc_job_desc.Resources.DiskSpaceRequirement.DiskSpace.max>=0) { job_data += "requesteddisk=" + Arc::tostring((arc_job_desc.Resources.DiskSpaceRequirement.DiskSpace.max*1024*1024)) + '\n'; } if (arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList().size()>0) { std::string rteStr; for (std::list::const_iterator itSW = arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList().begin(); itSW != arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList().end(); itSW++) { if (!itSW->empty() && !itSW->getVersion().empty()) { if (!rteStr.empty()) rteStr += " "; rteStr += *itSW; } } if (!rteStr.empty()) job_data += "runtimeenvironment=" + rteStr + '\n'; } // Analyze diagnostics and store relevant information fname_src = config.ControlDir() + "/job." + job.get_id() + sfx_diag; std::list diag_data; if (Arc::FileRead(fname_src, diag_data)) { std::string nodenames; int nodecount = 0; float cputime = 0; for (std::list::iterator line = diag_data.begin(); line != diag_data.end(); ++line) { std::string::size_type p = line->find('='); if (p == std::string::npos) continue; std::string key(Arc::lower(line->substr(0, p))); std::string value(line->substr(p+1)); if (key.empty()) continue; if (key == "nodename") { if (nodecount) nodenames+=":"; nodenames+=value; nodecount++; } else if(key == "processors") { job_data += "processors=" + value + '\n'; } else if(key == "walltime") { float f; if (string_to_number(value,f)) { job_data += "usedwalltime=" + Arc::tostring((unsigned int)f) + '\n'; } } else if(key == "kerneltime") { float f; if(string_to_number(value,f)) { job_data += "usedkernelcputime=" + Arc::tostring((unsigned int)f) + '\n'; cputime += f; } } else if(key == "usertime") { float f; if(string_to_number(value,f)) { job_data += "usedusercputime=" + Arc::tostring((unsigned int)f) + '\n'; cputime += f; } } else if(key == "averagetotalmemory") { float f; if(string_to_number(value,f)) { job_data += "usedmemory=" + Arc::tostring((unsigned int)f) + '\n'; } } else if(key == "averageresidentmemory") { float f; if(string_to_number(value,f)) { job_data += "usedaverageresident=" + Arc::tostring((unsigned int)f) + '\n'; } } else if(key == "maxresidentmemory") { float f; if(string_to_number(value,f)) { job_data += "usedmaxresident=" + Arc::tostring((unsigned int)f) + '\n'; } } else if(key == "exitcode") { int n; if(Arc::stringto(value,n)) job_data += "exitcode=" + Arc::tostring(n) + '\n'; } } if(nodecount) { job_data += "nodename=" + nodenames + '\n'; job_data += "nodecount=" + Arc::tostring(nodecount) + '\n'; } job_data += "usedcputime=" + Arc::tostring((unsigned int)cputime) + '\n'; } // Endtime and failure reason std::string status; if (job.get_state() == JOB_STATE_FINISHED) { status="completed"; t = job_state_time(job.get_id(),config); if (t == 0) t=::time(NULL); job_data += "endtime=" + Arc::Time(t).str(Arc::MDSTime) + '\n'; if (job_failed_mark_check(job.get_id(),config)) { std::string failure = job_failed_mark_read(job.get_id(),config); job_data += "failurestring=" + failure + '\n'; status="failed"; } } if(!status.empty()) job_data += "status=" + status + '\n'; // Read and store statistics information fname_src = config.ControlDir() + "/job." + job.get_id() + sfx_statistics; std::list statistics_data; if (Arc::FileRead(fname_src, statistics_data)) { for (std::list::iterator line = statistics_data.begin(); line != statistics_data.end(); ++line) { // statistics file has : as first delimiter, replace with = // todo: change DTRGenerator to use = as first delim for next major release std::string::size_type p = line->find(':'); if (p != std::string::npos) line->replace(p, 1, "="); job_data += *line+"\n"; } } // Store in file std::string out_file(config.ControlDir()+"/logs/"+job.get_id()+".XXXXXX"); if (!Arc::TmpFileCreate(out_file, job_data)) return false; fix_file_owner(out_file, job); return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/Delete.cpp0000644000000000000000000000012412046717105026173 xustar000000000000000027 mtime=1352375877.880678 27 atime=1513200575.978719 30 ctime=1513200663.012784333 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/Delete.cpp0000644000175000002070000001127012046717105026241 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "../files/ControlFileContent.h" #include "Delete.h" namespace ARex { struct FL_p { const char* s; FL_p* next; FL_p* prev; }; /* return values: 0 - empty, 1 - has files, 2 - failed */ static int delete_all_recur(const std::string &dir_base, const std::string &dir_cur, FL_p **fl_list,bool excl, uid_t uid,gid_t gid) { /* take corresponding members of fl_list */ FL_p* fl_new = NULL; /* new list */ FL_p* fl_cur = (*fl_list); /* pointer in old list */ int l = dir_cur.length(); /* extract suitable files from list */ for(;;) { if(fl_cur == NULL) break; FL_p* tmp = fl_cur->next; if(!strncmp(fl_cur->s,dir_cur.c_str(),l)) { if(fl_cur->s[l] == '/') { /* remove from list */ if(fl_cur->prev == NULL) { (*fl_list)=fl_cur->next; } else { fl_cur->prev->next=fl_cur->next; }; if(fl_cur->next == NULL) { } else { fl_cur->next->prev=fl_cur->prev; }; /* add to list */ fl_cur->prev=NULL; fl_cur->next=fl_new; if(fl_new == NULL) { fl_new=fl_cur; } else { fl_new->prev = fl_cur; fl_new=fl_cur; }; }; }; fl_cur=tmp; }; /* go through directory and remove files */ std::string file; std::string dir_s = dir_base+dir_cur; int files = 0; try { Glib::Dir dir(dir_s); for(;;) { file=dir.read_name(); if(file.empty()) break; if(file == ".") continue; if(file == "..") continue; fl_cur = fl_new; for(;;) { if(fl_cur == NULL) break; if(!strcmp(file.c_str(),(fl_cur->s)+(l+1))) { /* do not delete or delete */ break; }; fl_cur=fl_cur->next; }; if(excl) { if(fl_cur == NULL) { /* delete */ struct stat f_st; std::string fname=dir_s+'/'+file; if(!Arc::FileStat(fname.c_str(),&f_st,uid,gid,false)) { files++; } else if(S_ISDIR(f_st.st_mode)) { if(delete_all_recur(dir_base, dir_cur+'/'+file,&fl_new,excl,uid,gid) != 0) { files++; } else { if(!Arc::DirDelete(fname, false, uid, gid)) { files++; }; }; } else { if(!Arc::FileDelete(fname, uid, gid)) { files++; }; }; } else { files++; }; } else { struct stat f_st; std::string fname=dir_s+'/'+file; if(!Arc::FileStat(fname.c_str(),&f_st,uid,gid,false)) { files++; } else if(S_ISDIR(f_st.st_mode)) { if(fl_cur != NULL) { /* MUST delete it */ if(!Arc::DirDelete(fname, true, uid, gid)) { files++; }; } else { /* CAN delete if empty, and maybe files inside */ if(delete_all_recur(dir_base, dir_cur+'/'+file,&fl_new,excl,uid,gid) != 0) { files++; } else { if(!Arc::DirDelete(fname, false, uid, gid)) { files++; }; }; }; } else { if(fl_cur != NULL) { /* MUST delete this file */ if(!Arc::FileDelete(fname, uid, gid)) { files++; }; } else { files++; }; }; }; }; } catch(Glib::FileError& e) { return 2; }; if(files) return 1; return 0; } /* filenames should start from / and not to have / at end */ int delete_all_files(const std::string &dir_base,const std::list &files, bool excl,uid_t uid, gid_t gid) { int n = files.size(); FL_p* fl_list = NULL; if(n != 0) { if((fl_list=(FL_p*)malloc(sizeof(FL_p)*n)) == NULL) { return 2; }; std::list::const_iterator file=files.begin(); // fl_list[0].s=file->pfn.c_str(); int i; for(i=0;ilfn.find(':') != std::string::npos) { if(excl) { if(file->pfn == "/") { /* keep all requested */ free(fl_list); return 0; }; }; fl_list[i].s=file->pfn.c_str(); if(i) { fl_list[i].prev=fl_list+(i-1); fl_list[i-1].next=fl_list+i; } else { fl_list[i].prev=NULL; }; fl_list[i].next=NULL; i++; }; ++file; if(file == files.end()) break; }; if(i==0) { free(fl_list); fl_list=NULL; }; }; std::string dir_cur(""); FL_p* fl_list_tmp = fl_list; int res=delete_all_recur(dir_base,dir_cur,&fl_list_tmp,excl,uid,gid); if(fl_list) free(fl_list); return res; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/ControlFileContent.cpp0000644000000000000000000000012413107551126030542 xustar000000000000000027 mtime=1495192150.605522 27 atime=1513200575.974719 30 ctime=1513200663.014784358 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/ControlFileContent.cpp0000644000175000002070000006215313107551126030616 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "ControlFileContent.h" namespace ARex { static Glib::Mutex local_lock; static Arc::Logger& logger = Arc::Logger::getRootLogger(); class KeyValueFile { public: enum OpenMode { Fetch, Create }; KeyValueFile(std::string const& fname, OpenMode mode); ~KeyValueFile(void); operator bool(void) { return handle_ != -1; }; bool operator!(void) { return handle_ == -1; }; bool Write(std::string const& name, std::string const& value); bool Read(std::string& name, std::string& value); private: int handle_; char* read_buf_; int read_buf_pos_; int read_buf_avail_; static int const read_buf_size_ = 256; // normally should fit full line static int const data_max_ = 1024*1024; // sanity protection }; KeyValueFile::KeyValueFile(std::string const& fname, OpenMode mode): handle_(-1),read_buf_(NULL),read_buf_pos_(0),read_buf_avail_(0) { if(mode == Create) { handle_ = ::open(fname.c_str(),O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); if(handle_==-1) return; struct flock lock; lock.l_type=F_WRLCK; lock.l_whence=SEEK_SET; lock.l_start=0; lock.l_len=0; for(;;) { if(::fcntl(handle_,F_SETLKW,&lock) != -1) break; if(errno == EINTR) continue; ::close(handle_); handle_ = -1; return; }; if((::ftruncate(handle_,0) != 0) || (::lseek(handle_,0,SEEK_SET) != 0)) { close(handle_); handle_ = -1; return; }; } else { handle_ = ::open(fname.c_str(),O_RDONLY); if(handle_ == -1) return; struct flock lock; lock.l_type=F_RDLCK; lock.l_whence=SEEK_SET; lock.l_start=0; lock.l_len=0; for(;;) { if(::fcntl(handle_,F_SETLKW,&lock) != -1) break; // success if(errno == EINTR) continue; // retry close(handle_); handle_ = -1; // failure return; }; read_buf_ = new char[read_buf_size_]; if(!read_buf_) { close(handle_); handle_ = -1; return; }; }; } KeyValueFile::~KeyValueFile(void) { if(handle_ != -1) ::close(handle_); if(read_buf_) delete[] read_buf_; } static inline bool write_str(int f,const char* buf, std::string::size_type len) { for(;len > 0;) { ssize_t l = write(f,buf,len); if(l < 0) { if(errno != EINTR) return false; } else { len -= l; buf += l; }; }; return true; } bool KeyValueFile::Write(std::string const& name, std::string const& value) { if(handle_ == -1) return false; if(read_buf_) return false; if(name.empty()) return false; if(name.length() > data_max_) return false; if(value.length() > data_max_) return false; if(!write_str(handle_, name.c_str(), name.length())) return false; if(!write_str(handle_, "=", 1)) return false; if(!write_str(handle_, value.c_str(), value.length())) return false; if(!write_str(handle_, "\n", 1)) return false; return true; } bool KeyValueFile::Read(std::string& name, std::string& value) { if(handle_ == -1) return false; if(!read_buf_) return false; name.clear(); value.clear(); char c; bool key_done = false; for(;;) { if(read_buf_pos_ >= read_buf_avail_) { read_buf_pos_ = 0; read_buf_avail_ = 0; ssize_t l = ::read(handle_, read_buf_, read_buf_size_); if(l < 0) { if(errno == EINTR) continue; return false; }; if(l == 0) break; // EOF - not error read_buf_avail_ = l; }; c = read_buf_[read_buf_pos_++]; if(c == '\n') break; // EOL if(!key_done) { if(c == '=') { key_done = true; } else { name += c; if(name.length() > data_max_) return false; }; } else { value += c; if(value.length() > data_max_) return false; }; }; return true; } std::ostream &operator<< (std::ostream &o,const FileData &fd) { // TODO: switch to HEX encoding and drop dependency on ConfigIni in major release std::string escaped_pfn(Arc::escape_chars(fd.pfn, " \\\r\n", '\\', false)); if(!escaped_pfn.empty()) { o.write(escaped_pfn.c_str(), escaped_pfn.size()); std::string escaped_lfn(Arc::escape_chars(fd.lfn, " \\\r\n", '\\', false)); if(!escaped_lfn.empty()) { o.put(' '); o.write(escaped_lfn.c_str(), escaped_lfn.size()); std::string escaped_cred(Arc::escape_chars(fd.cred, " \\\r\n", '\\', false)); if(!escaped_cred.empty()) { o.put(' '); o.write(escaped_cred.c_str(), escaped_cred.size()); }; }; }; return o; } std::istream &operator>> (std::istream &i,FileData &fd) { std::string buf; std::getline(i,buf); Arc::trim(buf," \t\r\n"); fd.pfn.resize(0); fd.lfn.resize(0); fd.cred.resize(0); fd.pfn = Arc::ConfigIni::NextArg(buf,' ','\0'); fd.lfn = Arc::ConfigIni::NextArg(buf,' ','\0'); fd.cred = Arc::ConfigIni::NextArg(buf,' ','\0'); if((fd.pfn.length() == 0) && (fd.lfn.length() == 0)) return i; /* empty st */ if(!Arc::CanonicalDir(fd.pfn,true,true)) { logger.msg(Arc::ERROR,"Wrong directory in %s",buf); fd.pfn.resize(0); fd.lfn.resize(0); }; return i; } FileData::FileData(void) { ifsuccess = true; ifcancel = false; iffailure = false; } FileData::FileData(const std::string& pfn_s,const std::string& lfn_s) { ifsuccess = true; ifcancel = false; iffailure = false; if(!pfn_s.empty()) { pfn=pfn_s; } else { pfn.resize(0); }; if(!lfn_s.empty()) { lfn=lfn_s; } else { lfn.resize(0); }; } //FileData& FileData::operator= (const char *str) { // pfn.resize(0); lfn.resize(0); // int n=input_escaped_string(str,pfn); // input_escaped_string(str+n,lfn); // return *this; //} bool FileData::operator== (const FileData& data) { // pfn may contain leading slash. It must be striped // before comparison. const char* pfn_ = pfn.c_str(); if(pfn_[0] == '/') ++pfn_; const char* dpfn_ = data.pfn.c_str(); if(dpfn_[0] == '/') ++dpfn_; return (strcmp(pfn_,dpfn_) == 0); // return (pfn == data.pfn); } bool FileData::operator== (const char *name) { if(name == NULL) return false; if(name[0] == '/') ++name; const char* pfn_ = pfn.c_str(); if(pfn_[0] == '/') ++pfn_; return (strcmp(pfn_,name) == 0); } bool FileData::has_lfn(void) { return (lfn.find(':') != std::string::npos); } static char StateToShortcut(const std::string& state) { if(state == "ACCEPTED") return 'a'; // not supported if(state == "PREPARING") return 'b'; if(state == "SUBMIT") return 's'; // not supported if(state == "INLRMS") return 'q'; if(state == "FINISHING") return 'f'; if(state == "FINISHED") return 'e'; if(state == "DELETED") return 'd'; if(state == "CANCELING") return 'c'; return ' '; } Exec& Exec::operator=(const Arc::ExecutableType& src) { Exec& dest = *this; // Order of the following calls matters! dest.clear(); dest.successcode = 0; dest = src.Argument; dest.push_front(src.Path); if(src.SuccessExitCode.first) dest.successcode = src.SuccessExitCode.second; return dest; } JobLocalDescription& JobLocalDescription::operator=(const Arc::JobDescription& arc_job_desc) { // TODO: handle errors action = "request"; std::map::const_iterator act_i = arc_job_desc.OtherAttributes.find("nordugrid:xrsl;action"); if(act_i != arc_job_desc.OtherAttributes.end()) action = act_i->second; std::map::const_iterator jid_i = arc_job_desc.OtherAttributes.find("nordugrid:xrsl;jobid"); if(jid_i != arc_job_desc.OtherAttributes.end()) jobid = jid_i->second; dryrun = arc_job_desc.Application.DryRun; projectnames.clear(); std::map::const_iterator jr_i = arc_job_desc.OtherAttributes.find("nordugrid:jsdl;Identification/JobProject"); if (jr_i != arc_job_desc.OtherAttributes.end()) projectnames.push_back(jr_i->second); jobname = arc_job_desc.Identification.JobName; downloads = 0; uploads = 0; freestagein = false; outputdata.clear(); inputdata.clear(); rte.clear(); transfershare="_default"; const std::list& sw = arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList(); for (std::list::const_iterator itSW = sw.begin(); itSW != sw.end(); ++itSW) rte.push_back(std::string(*itSW)); for (std::list::const_iterator file = arc_job_desc.DataStaging.InputFiles.begin(); file != arc_job_desc.DataStaging.InputFiles.end(); ++file) { std::string fname = file->Name; if(fname[0] != '/') fname = "/"+fname; // Just for safety inputdata.push_back(FileData(fname, "")); if(!file->Sources.empty()) { // Only one source per file is used if (file->Sources.front() && file->Sources.front().Protocol() != "file") { inputdata.back().lfn = file->Sources.front().fullstr(); // It is not possible to extract credentials path here. // So temporarily storing id here. inputdata.back().cred = file->Sources.front().DelegationID; } } if(fname == "/") { // Unnamed file is used to mark request for free stage in freestagein = true; } if (inputdata.back().has_lfn()) { ++downloads; Arc::URL u(inputdata.back().lfn); if (file->IsExecutable || file->Name == arc_job_desc.Application.Executable.Path) { u.AddOption("exec", "yes", true); } inputdata.back().lfn = u.fullstr(); } else if (file->FileSize != -1) { inputdata.back().lfn = Arc::tostring(file->FileSize); if (!file->Checksum.empty()) { // Only set checksum if FileSize is also set. inputdata.back().lfn += "."+file->Checksum; } } } for (std::list::const_iterator file = arc_job_desc.DataStaging.OutputFiles.begin(); file != arc_job_desc.DataStaging.OutputFiles.end(); ++file) { std::string fname = file->Name; if(fname[0] != '/') fname = "/"+fname; // Just for safety bool ifsuccess = false; bool ifcancel = false; bool iffailure = false; if (!file->Targets.empty()) { // output file for(std::list::const_iterator target = file->Targets.begin(); target != file->Targets.end(); ++target) { FileData fdata(fname, target->fullstr()); fdata.ifsuccess = target->UseIfSuccess; fdata.ifcancel = target->UseIfCancel; fdata.iffailure = target->UseIfFailure; outputdata.push_back(fdata); if (outputdata.back().has_lfn()) { ++uploads; Arc::URL u(outputdata.back().lfn); // really needed? if(u.Option("preserve","no") == "yes") { outputdata.back().ifcancel = true; outputdata.back().iffailure = true; }; switch(target->CreationFlag) { case Arc::TargetType::CFE_OVERWRITE: u.AddOption("overwrite","yes",true); break; case Arc::TargetType::CFE_DONTOVERWRITE: u.AddOption("overwrite","no",true); break; // Rest is not supported in URLs yet. default: break; }; u.RemoveOption("preserve"); u.RemoveOption("mandatory"); // TODO: implement outputdata.back().lfn = u.fullstr(); // It is not possible to extract credentials path here. // So temporarily storing id here. outputdata.back().cred = target->DelegationID; } if(outputdata.back().ifsuccess) ifsuccess = true; if(outputdata.back().ifcancel) ifcancel = true; if(outputdata.back().iffailure) iffailure = true; } if(ifsuccess && ifcancel && iffailure) { // All possible results are covered } else { // For not covered cases file is treated as user downloadable FileData fdata(fname, ""); fdata.ifsuccess = !ifsuccess; fdata.ifcancel = !ifcancel; fdata.iffailure = !iffailure; outputdata.push_back(fdata); } } else { // user downloadable file FileData fdata(fname, ""); // user decides either to use file fdata.ifsuccess = true; fdata.ifcancel = true; fdata.iffailure = true; outputdata.push_back(fdata); } } // Pick up per job delegation if(!arc_job_desc.DataStaging.DelegationID.empty()) { delegationid = arc_job_desc.DataStaging.DelegationID; }; exec = arc_job_desc.Application.Executable; for(std::list::const_iterator e = arc_job_desc.Application.PreExecutable.begin(); e != arc_job_desc.Application.PreExecutable.end(); ++e) { Exec pe = *e; preexecs.push_back(pe); } for(std::list::const_iterator e = arc_job_desc.Application.PostExecutable.begin(); e != arc_job_desc.Application.PostExecutable.end(); ++e) { Exec pe = *e; postexecs.push_back(pe); } stdin_ = arc_job_desc.Application.Input; stdout_ = arc_job_desc.Application.Output; stderr_ = arc_job_desc.Application.Error; if (arc_job_desc.Resources.DiskSpaceRequirement.DiskSpace > -1) diskspace = (unsigned long long int)(arc_job_desc.Resources.DiskSpaceRequirement.DiskSpace*1024*1024); processtime = arc_job_desc.Application.ProcessingStartTime; const int lifetimeTemp = (int)arc_job_desc.Resources.SessionLifeTime.GetPeriod(); if (lifetimeTemp > 0) lifetime = lifetimeTemp; activityid = arc_job_desc.Identification.ActivityOldID; stdlog = arc_job_desc.Application.LogDir; jobreport.clear(); for (std::list::const_iterator it = arc_job_desc.Application.RemoteLogging.begin(); it != arc_job_desc.Application.RemoteLogging.end(); ++it) { // TODO: Support optional requirement. // TODO: Support other types than SGAS. if (it->ServiceType == "SGAS") { jobreport.push_back(it->Location.str()); } } notify.clear(); { int n = 0; for (std::list::const_iterator it = arc_job_desc.Application.Notification.begin(); it != arc_job_desc.Application.Notification.end(); it++) { if (n >= 3) break; // Only 3 instances are allowed. std::string states; for (std::list::const_iterator s = it->States.begin(); s != it->States.end(); ++s) { char state = StateToShortcut(*s); if(state == ' ') continue; states+=state; } if(states.empty()) continue; if(it->Email.empty()) continue; if (!notify.empty()) notify += " "; notify += states + " " + it->Email; ++n; } } if (!arc_job_desc.Resources.QueueName.empty()) { queue = arc_job_desc.Resources.QueueName; } if (!arc_job_desc.Application.CredentialService.empty() && arc_job_desc.Application.CredentialService.front()) credentialserver = arc_job_desc.Application.CredentialService.front().fullstr(); if (arc_job_desc.Application.Rerun > -1) reruns = arc_job_desc.Application.Rerun; if ( arc_job_desc.Application.Priority <= 100 && arc_job_desc.Application.Priority > 0 ) priority = arc_job_desc.Application.Priority; return *this; } const char* const JobLocalDescription::transfersharedefault = "_default"; int const JobLocalDescription::prioritydefault = 50; bool LRMSResult::set(const char* s) { // 1. Empty string = exit code 0 if(s == NULL) s=""; for(;*s;++s) { if(!isspace(*s)) break; }; if(!*s) { code_=0; description_=""; }; // Try to read first word as number char* e; code_=strtol(s,&e,0); if((!*e) || (isspace(*e))) { for(;*e;++e) { if(!isspace(*e)) break; }; description_=e; return true; }; // If there is no number that means some "uncoded" failure code_=-1; description_=s; return true; } std::istream& operator>>(std::istream& i,LRMSResult &r) { std::string buf; if(i.eof() || i.fail()) { } else { std::getline(i,buf); }; r=buf; return i; } std::ostream& operator<<(std::ostream& o,const LRMSResult &r) { o<& value) { for(std::list::const_iterator v = value.begin(); v != value.end(); ++v) { if(!write_pair(f,name,*v)) return false; } return true; } static inline bool parse_boolean(const std::string& buf) { if(strncasecmp("yes",buf.c_str(),3) == 0) return true; if(strncasecmp("true",buf.c_str(),4) == 0) return true; if(strncmp("1",buf.c_str(),1) == 0) return true; return false; } bool JobLocalDescription::write(const std::string& fname) const { Glib::Mutex::Lock lock_(local_lock); // *.local file is accessed concurently. To avoid improper readings lock is acquired. KeyValueFile f(fname,KeyValueFile::Create); if(!f) return false; for (std::list::const_iterator it=jobreport.begin(); it!=jobreport.end(); it++) { if(!write_pair(f,"jobreport",*it)) return false; }; if(!write_pair(f,"globalid",globalid)) return false; if(!write_pair(f,"headnode",headnode)) return false; if(!write_pair(f,"interface",interface)) return false; if(!write_pair(f,"lrms",lrms)) return false; if(!write_pair(f,"queue",queue)) return false; if(!write_pair(f,"localid",localid)) return false; if(!write_pair(f,"args",exec)) return false; if(!write_pair(f,"pre",preexecs)) return false; if(!write_pair(f,"post",postexecs)) return false; if(!write_pair(f,"subject",DN)) return false; if(!write_pair(f,"starttime",starttime)) return false; if(!write_pair(f,"lifetime",lifetime)) return false; if(!write_pair(f,"notify",notify)) return false; if(!write_pair(f,"processtime",processtime)) return false; if(!write_pair(f,"exectime",exectime)) return false; if(!write_pair(f,"rerun",Arc::tostring(reruns))) return false; if(downloads>=0) if(!write_pair(f,"downloads",Arc::tostring(downloads))) return false; if(uploads>=0) if(!write_pair(f,"uploads",Arc::tostring(uploads))) return false; if(!write_pair(f,"jobname",jobname)) return false; for (std::list::const_iterator ppn=projectnames.begin(); ppn!=projectnames.end(); ++ppn) { if(!write_pair(f,"projectname",*ppn)) return false; }; if(!write_pair(f,"gmlog",stdlog)) return false; if(!write_pair(f,"cleanuptime",cleanuptime)) return false; if(!write_pair(f,"delegexpiretime",expiretime)) return false; if(!write_pair(f,"clientname",clientname)) return false; if(!write_pair(f,"clientsoftware",clientsoftware)) return false; if(!write_pair(f,"delegationid",delegationid)) return false; if(!write_pair(f,"sessiondir",sessiondir)) return false; if(!write_pair(f,"diskspace",Arc::tostring(diskspace))) return false; if(!write_pair(f,"failedstate",failedstate)) return false; if(!write_pair(f,"failedcause",failedcause)) return false; if(!write_pair(f,"credentialserver",credentialserver)) return false; if(!write_pair(f,"freestagein",freestagein)) return false; for(std::list::const_iterator lv=localvo.begin(); lv != localvo.end(); ++lv) { if(!write_pair(f,"localvo",(*lv))) return false; }; for(std::list::const_iterator vf=voms.begin(); vf != voms.end(); ++vf) { if(!write_pair(f,"voms",(*vf))) return false; }; for(std::list::const_iterator act_id=activityid.begin(); act_id != activityid.end(); ++act_id) { if(!write_pair(f,"activityid",(*act_id))) return false; }; if (migrateactivityid != "") { if(!write_pair(f,"migrateactivityid",migrateactivityid)) return false; if(!write_pair(f,"forcemigration",forcemigration)) return false; } if(!write_pair(f,"transfershare",transfershare)) return false; if(!write_pair(f,"priority",Arc::tostring(priority))) return false; return true; } bool JobLocalDescription::read(const std::string& fname) { Glib::Mutex::Lock lock_(local_lock); // *.local file is accessed concurently. To avoid improper readings lock is acquired. KeyValueFile f(fname,KeyValueFile::Fetch); if(!f) return false; activityid.clear(); localvo.clear(); voms.clear(); for(;;) { std::string name; std::string buf; if(!f.Read(name,buf)) return false; if(name.empty() && buf.empty()) break; // EOF if(name.empty()) continue; if(buf.empty()) continue; if(name == "lrms") { lrms = buf; } else if(name == "headnode") { headnode = buf; } else if(name == "interface") { interface = buf; } else if(name == "queue") { queue = buf; } else if(name == "localid") { localid = buf; } else if(name == "subject") { DN = buf; } else if(name == "starttime") { starttime = buf; } // else if(name == "UI") { UI = buf; } else if(name == "lifetime") { lifetime = buf; } else if(name == "notify") { notify = buf; } else if(name == "processtime") { processtime = buf; } else if(name == "exectime") { exectime = buf; } else if(name == "jobreport") { jobreport.push_back(std::string(buf)); } else if(name == "globalid") { globalid = buf; } else if(name == "jobname") { jobname = buf; } else if(name == "projectname") { projectnames.push_back(std::string(buf)); } else if(name == "gmlog") { stdlog = buf; } else if(name == "rerun") { int n; if(!Arc::stringto(buf,n)) return false; reruns = n; } else if(name == "downloads") { int n; if(!Arc::stringto(buf,n)) return false; downloads = n; } else if(name == "uploads") { int n; if(!Arc::stringto(buf,n)) return false; uploads = n; } else if(name == "args") { exec.clear(); exec.successcode = 0; while(!buf.empty()) { std::string arg; arg = Arc::ConfigIni::NextArg(buf,' ','\0'); exec.push_back(arg); }; } else if(name == "argscode") { int n; if(!Arc::stringto(buf,n)) return false; exec.successcode = n; } else if(name == "pre") { Exec pe; while(!buf.empty()) { std::string arg; arg = Arc::ConfigIni::NextArg(buf); pe.push_back(arg); }; preexecs.push_back(pe); } else if(name == "precode") { if(preexecs.empty()) return false; int n; if(!Arc::stringto(buf,n)) return false; preexecs.back().successcode = n; } else if(name == "post") { Exec pe; while(!buf.empty()) { std::string arg; arg = Arc::ConfigIni::NextArg(buf); pe.push_back(arg); }; postexecs.push_back(pe); } else if(name == "postcode") { if(postexecs.empty()) return false; int n; if(!Arc::stringto(buf,n)) return false; postexecs.back().successcode = n; } else if(name == "cleanuptime") { cleanuptime = buf; } else if(name == "delegexpiretime") { expiretime = buf; } else if(name == "clientname") { clientname = buf; } else if(name == "clientsoftware") { clientsoftware = buf; } else if(name == "delegationid") { delegationid = buf; } else if(name == "sessiondir") { sessiondir = buf; } else if(name == "failedstate") { failedstate = buf; } else if(name == "failedcause") { failedcause = buf; } else if(name == "credentialserver") { credentialserver = buf; } else if(name == "freestagein") { freestagein = parse_boolean(buf); } else if(name == "localvo") { localvo.push_back(buf); } else if(name == "voms") { voms.push_back(buf); } else if(name == "diskspace") { unsigned long long int n; if(!Arc::stringto(buf,n)) return false; diskspace = n; } else if(name == "activityid") { activityid.push_back(buf); } else if(name == "migrateactivityid") { migrateactivityid = buf; } else if(name == "forcemigration") { forcemigration = parse_boolean(buf); } else if(name == "transfershare") { transfershare = buf; } else if(name == "priority") { int n; if(!Arc::stringto(buf,n)) return false; priority = n; } } return true; } bool JobLocalDescription::read_var(const std::string &fname,const std::string &vnam,std::string &value) { Glib::Mutex::Lock lock_(local_lock); // *.local file is accessed concurently. To avoid improper readings lock is acquired. KeyValueFile f(fname,KeyValueFile::Fetch); if(!f) return false; // using iostream for handling file content bool found = false; for(;;) { std::string buf; std::string name; if(!f.Read(name, buf)) return false; if(name.empty() && buf.empty()) break; // EOF if(name.empty()) continue; if(buf.empty()) continue; if(name == vnam) { value = buf; found=true; break; }; }; return found; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/ControlFileContent.h0000644000000000000000000000012312772752650030222 xustar000000000000000026 mtime=1475073448.37431 27 atime=1513200575.977719 30 ctime=1513200663.018784407 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/ControlFileContent.h0000644000175000002070000001746612772752650030306 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_INFO_TYPES_H #define GRID_MANAGER_INFO_TYPES_H #include #include #include #include #include namespace ARex { /* Defines few data types used by grid-manager to store information about jobs. */ /* Pair of values containing file's path (pfn - physical file name) and it's source or destination on the net (lfn - logical file name) */ class FileData { public: typedef std::list::iterator iterator; FileData(void); FileData(const std::string& pfn_s,const std::string& lfn_s); std::string pfn; // path relative to session dir std::string lfn; // input/output url or size.checksum std::string cred; // path to file containing credentials bool ifsuccess; bool ifcancel; bool iffailure; FileData& operator= (const char* str); bool operator== (const char* name); bool operator== (const FileData& data); bool has_lfn(void); }; std::istream &operator>> (std::istream &i,FileData &fd); std::ostream &operator<< (std::ostream &o,const FileData &fd); class Exec: public std::list { public: Exec(void):successcode(0) {}; Exec(const std::list& src):std::list(src),successcode(0) {}; Exec(const Arc::ExecutableType& src):successcode(0) { operator=(src); }; Exec& operator=(const std::list& src) { std::list::operator=(src); return *this; }; Exec& operator=(const Arc::ExecutableType& src); int successcode; }; std::istream &operator>> (std::istream &i,Exec &fd); std::ostream &operator<< (std::ostream &o,const Exec &fd); /* Most important information about job extracted from different sources (mostly from job description) and stored in separate file for relatively quick and simple access. */ class JobLocalDescription { /* all values are public, this class is just for convenience */ public: JobLocalDescription(void):jobid(""),globalid(""),headnode(""),lrms(""),queue(""),localid(""), DN(""),starttime((time_t)(-1)),lifetime(""), notify(""),processtime((time_t)(-1)),exectime((time_t)(-1)), clientname(""),clientsoftware(""),delegationid(""), reruns(0),priority(prioritydefault),downloads(-1),uploads(-1), jobname(""),jobreport(), cleanuptime((time_t)(-1)),expiretime((time_t)(-1)), failedstate(""),failedcause(""), credentialserver(""),freestagein(false),gsiftpthreads(1), dryrun(false),diskspace(0), migrateactivityid(""), forcemigration(false), transfershare(JobLocalDescription::transfersharedefault) {} JobLocalDescription& operator=(const Arc::JobDescription& arc_job_desc); bool read(const std::string& fname); bool write(const std::string& fname) const; static bool read_var(const std::string &fname,const std::string &vnam,std::string &value); // All non-static members are safe to copy std::string jobid; /* job's unique identifier */ /* attributes stored in job.ID.local */ std::string globalid; /* job id as seen from outside */ std::string headnode; /* URL of the cluster's headnode */ std::string interface; /* interface type used to submit job */ std::string lrms; /* lrms type to use - pbs */ std::string queue; /* queue name - default */ std::string localid; /* job's id in lrms */ std::list preexecs; /* executable + arguments */ Exec exec; /* executable + arguments */ std::list postexecs; /* executable + arguments */ std::string DN; /* user's distinguished name aka subject name */ Arc::Time starttime; /* job submission time */ std::string lifetime; /* time to live for submission directory */ std::string notify; /* notification flags used and email address */ Arc::Time processtime; /* time to start job processing (downloading) */ Arc::Time exectime; /* time to start execution */ std::string clientname; /* IP+port of user interface + info given by ui */ std::string clientsoftware; /* Client's version */ std::string delegationid; /* id of deleation assigned to this job (not per file) */ int reruns; /* number of allowed reruns left */ int priority; /* priority the job has */ int downloads; /* number of downloadable files requested */ int uploads; /* number of uploadable files requested */ std::string jobname; /* name of job given by user */ std::list projectnames; /* project names, i.e. "ACIDs" */ std::list jobreport; /* URLs of user's/VO's loggers */ Arc::Time cleanuptime; /* time to remove job completely */ Arc::Time expiretime; /* when main delegation expires */ std::string stdlog; /* dirname to which log messages will be put after job finishes */ std::string sessiondir; /* job's session directory */ std::string failedstate; /* state at which job failed, used for rerun */ std::string failedcause; /* reason for job failure, client or internal error */ std::string credentialserver; /* URL of server used to renew credentials - MyProxy */ bool freestagein; /* if true, client is allowed to stage in any files */ std::list localvo; /* VO names to which user belongs according to local configuration*/ std::list voms; /* VOMS FQANs which we matched during authorization process */ /* attributes stored in other files */ std::list inputdata; /* input files */ std::list outputdata; /* output files */ /* attributes taken from job description */ std::list rte; /* runtime environments */ std::string action; /* what to do - must be 'request' */ std::string rc; /* url to contact replica collection */ std::string stdin_; /* file name for stdin handle */ std::string stdout_; /* file name for stdout handle */ std::string stderr_; /* file name for stderr handle */ std::string cache; /* cache default, yes/no */ int gsiftpthreads; /* number of parallel connections to use during gsiftp down/uploads */ bool dryrun; /* if true, this is test job */ unsigned long long int diskspace; /* anount of requested space on disk (unit bytes) */ std::list activityid; /* ID of activity */ std::string migrateactivityid; /* ID of activity that is being migrated*/ bool forcemigration; /* Ignore if killing of old job fails */ std::string transfershare; /* share assigned to job for transfer fair share */ // Default values which are not zero static int const prioritydefault; /* default priority for the job if not specified */ static const char* const transfersharedefault; /* default value for transfer share */ }; /* Information stored in job.#.lrms_done file */ class LRMSResult { private: int code_; std::string description_; bool set(const char* s); public: LRMSResult(void):code_(-1),description_("") { }; LRMSResult(const std::string& s) { set(s.c_str()); }; LRMSResult(int c):code_(c),description_("") { }; LRMSResult(const char* s) { set(s); }; LRMSResult& operator=(const std::string& s) { set(s.c_str()); return *this; }; LRMSResult& operator=(const char* s) { set(s); return *this; }; int code(void) const { return code_; }; const std::string& description(void) const { return description_; }; }; std::istream& operator>>(std::istream& i,LRMSResult &r); std::ostream& operator<<(std::ostream& i,const LRMSResult &r); } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/ControlFileHandling.cpp0000644000000000000000000000012313213445240030650 xustar000000000000000027 mtime=1512983200.815191 26 atime=1513200575.99272 30 ctime=1513200663.013784345 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/ControlFileHandling.cpp0000644000175000002070000007312413213445240030725 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "../run/RunRedirected.h" #include "../conf/GMConfig.h" #include "../jobs/GMJob.h" #include "ControlFileHandling.h" namespace ARex { // Files in control dir, job.id.sfx const char * const sfx_failed = ".failed"; // Description of failure const char * const sfx_cancel = ".cancel"; // Mark to tell A-REX to cancel job const char * const sfx_restart = ".restart"; // Mark to tell A-REX to restart job const char * const sfx_clean = ".clean"; // Mark to tell A-REX to clean job const char * const sfx_status = ".status"; // Current job status const char * const sfx_local = ".local"; // Local information about job const char * const sfx_errors = ".errors"; // Log of data staging and job submission const char * const sfx_desc = ".description"; // Job description sent by user const char * const sfx_diag = ".diag"; // Diagnostic info about finished job const char * const sfx_lrmsoutput = ".comment"; // Additional information from LRMS const char * const sfx_acl = ".acl"; // ACL information for job const char * const sfx_proxy = ".proxy"; // Delegated proxy const char * const sfx_xml = ".xml"; // XML description of job const char * const sfx_input = ".input"; // Input files required by job const char * const sfx_output = ".output"; // Output files written by job const char * const sfx_inputstatus = ".input_status"; // Input files staged by client const char * const sfx_outputstatus = ".output_status";// Output files already staged out const char * const sfx_statistics = ".statistics"; // Statistical information on data staging // Sub-directories for different jobs states const char * const subdir_new = "accepting"; // Submitted but not yet picked up by A-REX const char * const subdir_cur = "processing"; // Being processed by A-REX const char * const subdir_old = "finished"; // Finished or deleted jobs const char * const subdir_rew = "restarting"; // Jobs waiting to restart static Arc::Logger& logger = Arc::Logger::getRootLogger(); static job_state_t job_state_read_file(const std::string &fname,bool &pending); static bool job_state_write_file(const std::string &fname,job_state_t state,bool pending = false); static bool job_mark_put(Arc::FileAccess& fa, const std::string &fname); static bool job_mark_remove(Arc::FileAccess& fa,const std::string &fname); bool fix_file_permissions(const std::string &fname,bool executable) { mode_t mode = S_IRUSR | S_IWUSR; if(executable) { mode |= S_IXUSR; }; return (chmod(fname.c_str(),mode) == 0); } static bool fix_file_permissions(Arc::FileAccess& fa,const std::string &fname,bool executable = false) { mode_t mode = S_IRUSR | S_IWUSR; if(executable) { mode |= S_IXUSR; }; return fa.fa_chmod(fname.c_str(),mode); } bool fix_file_permissions(const std::string &fname,const GMJob &job,const GMConfig& config) { mode_t mode = S_IRUSR | S_IWUSR; uid_t uid = job.get_user().get_uid(); gid_t gid = job.get_user().get_gid(); if(!config.MatchShareUid(uid)) { mode |= S_IRGRP; if(!config.MatchShareGid(gid)) { mode |= S_IROTH; }; }; return (chmod(fname.c_str(),mode) == 0); } bool fix_file_permissions_in_session(const std::string &fname,const GMJob &job,const GMConfig &config,bool executable) { mode_t mode = S_IRUSR | S_IWUSR; if(executable) { mode |= S_IXUSR; }; if(config.StrictSession()) { uid_t uid = getuid()==0?job.get_user().get_uid():getuid(); uid_t gid = getgid()==0?job.get_user().get_gid():getgid(); Arc::FileAccess fa; if(!fa.fa_setuid(uid,gid)) return false; return fa.fa_chmod(fname,mode); }; return (chmod(fname.c_str(),mode) == 0); } bool fix_file_owner(const std::string &fname,const GMJob& job) { return fix_file_owner(fname, job.get_user()); } bool fix_file_owner(const std::string &fname,const Arc::User& user) { if(getuid() == 0) { if(lchown(fname.c_str(),user.get_uid(),user.get_gid()) == -1) { logger.msg(Arc::ERROR,"Failed setting file owner: %s",fname); return false; }; }; return true; } bool check_file_owner(const std::string &fname) { uid_t uid; gid_t gid; time_t t; return check_file_owner(fname,uid,gid,t); } bool check_file_owner(const std::string &fname,uid_t &uid,gid_t &gid) { time_t t; return check_file_owner(fname,uid,gid,t); } bool check_file_owner(const std::string &fname,uid_t &uid,gid_t &gid,time_t &t) { struct stat st; if(lstat(fname.c_str(),&st) != 0) return false; if(!S_ISREG(st.st_mode)) return false; uid=st.st_uid; gid=st.st_gid; t=st.st_ctime; /* superuser can't run jobs */ if(uid == 0) return false; /* accept any file if superuser */ if(getuid() != 0) { if(uid != getuid()) return false; }; return true; } bool job_lrms_mark_check(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + ".lrms_done"; return job_mark_check(fname); } bool job_lrms_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + ".lrms_done"; return job_mark_remove(fname); } LRMSResult job_lrms_mark_read(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + ".lrms_done"; LRMSResult r("-1 Internal error"); std::ifstream f(fname.c_str()); if(! f.is_open() ) return r; f>>r; return r; } bool job_cancel_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + job.get_id() + sfx_cancel; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_cancel_mark_check(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + id + sfx_cancel; return job_mark_check(fname); } bool job_cancel_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + id + sfx_cancel; return job_mark_remove(fname); } bool job_restart_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + job.get_id() + sfx_restart; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_restart_mark_check(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + id + sfx_restart; return job_mark_check(fname); } bool job_restart_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + id + sfx_restart; return job_mark_remove(fname); } bool job_clean_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + job.get_id() + sfx_clean; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_clean_mark_check(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + id + sfx_clean; return job_mark_check(fname); } bool job_clean_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/job." + id + sfx_clean; return job_mark_remove(fname); } bool job_failed_mark_put(const GMJob &job,const GMConfig &config,const std::string &content) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_failed; if(job_mark_size(fname) > 0) return true; return job_mark_write(fname,content) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } bool job_failed_mark_add(const GMJob &job,const GMConfig &config,const std::string &content) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_failed; return job_mark_add(fname,content) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } bool job_failed_mark_check(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + sfx_failed; return job_mark_check(fname); } bool job_failed_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + sfx_failed; return job_mark_remove(fname); } std::string job_failed_mark_read(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + sfx_failed; return job_mark_read(fname); } bool job_controldiag_mark_put(const GMJob &job,const GMConfig &config,char const * const args[]) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_diag; if(!job_mark_put(fname)) return false; if(!fix_file_owner(fname,job)) return false; if(!fix_file_permissions(fname)) return false; if(args == NULL) return true; struct stat st; if(args[0] && stat(args[0], &st) != 0) return true; int h = open(fname.c_str(),O_WRONLY); if(h == -1) return false; int r; int t = 10; r=RunRedirected::run(job.get_user(),"job_controldiag_mark_put",-1,h,-1,(char**)args,t); close(h); if(r != 0) return false; return true; } bool job_diagnostics_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = job.SessionDir() + sfx_diag; if(config.StrictSession()) { Arc::FileAccess fa; if(!fa.fa_setuid(job.get_user().get_uid(),job.get_user().get_gid())) return false; return job_mark_put(fa,fname) && fix_file_permissions(fa,fname); }; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_diagnostics_mark_remove(const GMJob &job,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_diag; bool res1 = job_mark_remove(fname); fname = job.SessionDir() + sfx_diag; if(config.StrictSession()) { Arc::FileAccess fa; if(!fa.fa_setuid(job.get_user().get_uid(),job.get_user().get_gid())) return res1; return (res1 | job_mark_remove(fa,fname)); }; return (res1 | job_mark_remove(fname)); } bool job_diagnostics_mark_move(const GMJob &job,const GMConfig &config) { std::string fname1; if (job.GetLocalDescription() && !job.GetLocalDescription()->sessiondir.empty()) fname1 = job.GetLocalDescription()->sessiondir + sfx_diag; else fname1 = job.SessionDir() + sfx_diag; std::string fname2 = config.ControlDir() + "/job." + job.get_id() + sfx_diag; std::string data; if(config.StrictSession()) { Arc::FileRead(fname1, data, job.get_user().get_uid(), job.get_user().get_gid()); Arc::FileDelete(fname1, job.get_user().get_uid(), job.get_user().get_gid()); } else { Arc::FileRead(fname1, data); Arc::FileDelete(fname1); } // behaviour is to create file in control dir even if reading fails return Arc::FileCreate(fname2, data) && fix_file_owner(fname2,job) && fix_file_permissions(fname2,job,config); } bool job_lrmsoutput_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = job.SessionDir() + sfx_lrmsoutput; if(config.StrictSession()) { Arc::FileAccess fa; if(!fa.fa_setuid(job.get_user().get_uid(),job.get_user().get_gid())) return false; return job_mark_put(fa,fname) && fix_file_permissions(fa,fname); }; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_lrmsoutput_mark_remove(const GMJob &job,const GMConfig &config) { std::string fname = job.SessionDir() + sfx_lrmsoutput; if(config.StrictSession()) { Arc::FileAccess fa; if(!fa.fa_setuid(job.get_user().get_uid(),job.get_user().get_gid())) return false; return job_mark_remove(fa,fname); }; return job_mark_remove(fname); } std::string job_mark_read(const std::string &fname) { std::string s(""); Arc::FileRead(fname, s); return s; } bool job_mark_write(const std::string &fname,const std::string &content) { return Arc::FileCreate(fname, content); } bool job_mark_add(const std::string &fname,const std::string &content) { int h=open(fname.c_str(),O_WRONLY | O_CREAT | O_APPEND,S_IRUSR | S_IWUSR); if(h==-1) return false; write(h,(const void *)content.c_str(),content.length()); close(h); return true; } bool job_mark_put(const std::string &fname) { int h=open(fname.c_str(),O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR); if(h==-1) return false; close(h); return true; } static bool job_mark_put(Arc::FileAccess& fa, const std::string &fname) { if(!fa.fa_open(fname,O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR)) return false; fa.fa_close(); return true; } bool job_mark_check(const std::string &fname) { struct stat st; if(lstat(fname.c_str(),&st) != 0) return false; if(!S_ISREG(st.st_mode)) return false; return true; } bool job_mark_remove(const std::string &fname) { if(unlink(fname.c_str()) != 0) { if(errno != ENOENT) return false; }; return true; } static bool job_mark_remove(Arc::FileAccess& fa,const std::string &fname) { if(!fa.fa_unlink(fname)) { if(fa.geterrno() != ENOENT) return false; }; return true; } time_t job_mark_time(const std::string &fname) { struct stat st; if(lstat(fname.c_str(),&st) != 0) return 0; if(st.st_mtime == 0) st.st_mtime = 1; // doomsday protection return st.st_mtime; } long int job_mark_size(const std::string &fname) { struct stat st; if(lstat(fname.c_str(),&st) != 0) return 0; if(!S_ISREG(st.st_mode)) return 0; return st.st_size; } bool job_errors_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_errors; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_errors_mark_add(const GMJob &job,const GMConfig &config,const std::string &msg) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_errors; return job_mark_add(fname,msg) && fix_file_owner(fname,job) && fix_file_permissions(fname); } std::string job_errors_filename(const JobId &id, const GMConfig &config) { return config.ControlDir() + "/job." + id + sfx_errors; } time_t job_state_time(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + sfx_status; time_t t = job_mark_time(fname); if(t != 0) return t; fname = config.ControlDir() + "/" + subdir_cur + "/job." + id + sfx_status; t = job_mark_time(fname); if(t != 0) return t; fname = config.ControlDir() + "/" + subdir_new + "/job." + id + sfx_status; t = job_mark_time(fname); if(t != 0) return t; fname = config.ControlDir() + "/" + subdir_rew + "/job." + id + sfx_status; t = job_mark_time(fname); if(t != 0) return t; fname = config.ControlDir() + "/" + subdir_old + "/job." + id + sfx_status; return job_mark_time(fname); } job_state_t job_state_read_file(const JobId &id,const GMConfig &config) { bool pending; return job_state_read_file(id, config, pending); } job_state_t job_state_read_file(const JobId &id,const GMConfig &config,bool& pending) { std::string fname = config.ControlDir() + "/job." + id + sfx_status; job_state_t st = job_state_read_file(fname,pending); if(st != JOB_STATE_DELETED) return st; fname = config.ControlDir() + "/" + subdir_cur + "/job." + id + sfx_status; st = job_state_read_file(fname,pending); if(st != JOB_STATE_DELETED) return st; fname = config.ControlDir() + "/" + subdir_new + "/job." + id + sfx_status; st = job_state_read_file(fname,pending); if(st != JOB_STATE_DELETED) return st; fname = config.ControlDir() + "/" + subdir_rew + "/job." + id + sfx_status; st = job_state_read_file(fname,pending); if(st != JOB_STATE_DELETED) return st; fname = config.ControlDir() + "/" + subdir_old + "/job." + id + sfx_status; return job_state_read_file(fname,pending); } bool job_state_write_file(const GMJob &job,const GMConfig &config,job_state_t state,bool pending) { std::string fname; if(state == JOB_STATE_ACCEPTED) { fname = config.ControlDir() + "/" + subdir_old + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_cur + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_rew + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_new + "/job." + job.get_id() + sfx_status; } else if((state == JOB_STATE_FINISHED) || (state == JOB_STATE_DELETED)) { fname = config.ControlDir() + "/" + subdir_new + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_cur + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_rew + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_old + "/job." + job.get_id() + sfx_status; } else { fname = config.ControlDir() + "/" + subdir_new + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_old + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_rew + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/job." + job.get_id() + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_cur + "/job." + job.get_id() + sfx_status; }; return job_state_write_file(fname,state,pending) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } static job_state_t job_state_read_file(const std::string &fname,bool &pending) { std::string data; if(!Arc::FileRead(fname, data)) { if(!job_mark_check(fname)) return JOB_STATE_DELETED; /* job does not exist */ return JOB_STATE_UNDEFINED; /* can't open file */ }; data = data.substr(0, data.find('\n')); /* interpret information */ if(data.substr(0, 8) == "PENDING:") { data = data.substr(8); pending=true; } else { pending=false; }; return GMJob::get_state(data.c_str()); } static bool job_state_write_file(const std::string &fname,job_state_t state,bool pending) { std::string data; if (pending) data += "PENDING:"; data += GMJob::get_state_name(state); return Arc::FileCreate(fname, data); } time_t job_description_time(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + sfx_desc; return job_mark_time(fname); } bool job_description_read_file(const JobId &id,const GMConfig &config,std::string &desc) { std::string fname = config.ControlDir() + "/job." + id + sfx_desc; return job_description_read_file(fname,desc); } bool job_description_read_file(const std::string &fname,std::string &desc) { if (!Arc::FileRead(fname, desc)) return false; while (desc.find('\n') != std::string::npos) desc.erase(desc.find('\n'), 1); return true; } bool job_description_write_file(const GMJob &job,const GMConfig &config,const std::string &desc) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_desc; return Arc::FileCreate(fname, desc) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } bool job_acl_read_file(const JobId &id,const GMConfig &config,std::string &acl) { std::string fname = config.ControlDir() + "/job." + id + sfx_acl; return job_description_read_file(fname,acl); } bool job_acl_write_file(const JobId &id,const GMConfig &config,const std::string &acl) { std::string fname = config.ControlDir() + "/job." + id + sfx_acl; return Arc::FileCreate(fname, acl); } bool job_xml_read_file(const JobId &id,const GMConfig &config,std::string &xml) { std::string fname = config.ControlDir() + "/job." + id + sfx_xml; return job_description_read_file(fname,xml); } bool job_xml_write_file(const JobId &id,const GMConfig &config,const std::string &xml) { std::string fname = config.ControlDir() + "/job." + id + sfx_xml; return Arc::FileCreate(fname, xml); } bool job_local_write_file(const GMJob &job,const GMConfig &config,const JobLocalDescription &job_desc) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_local; return job_local_write_file(fname,job_desc) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } bool job_local_write_file(const std::string &fname,const JobLocalDescription &job_desc) { return job_desc.write(fname); } bool job_local_read_file(const JobId &id,const GMConfig &config,JobLocalDescription &job_desc) { std::string fname = config.ControlDir() + "/job." + id + sfx_local; return job_local_read_file(fname,job_desc); } bool job_local_read_file(const std::string &fname,JobLocalDescription &job_desc) { return job_desc.read(fname); } bool job_local_read_var(const std::string &fname,const std::string &vnam,std::string &value) { return JobLocalDescription::read_var(fname,vnam,value); } bool job_local_read_cleanuptime(const JobId &id,const GMConfig &config,time_t &cleanuptime) { std::string fname = config.ControlDir() + "/job." + id + sfx_local; std::string str; if(!job_local_read_var(fname,"cleanuptime",str)) return false; cleanuptime=Arc::Time(str).GetTime(); return true; } bool job_local_read_failed(const JobId &id,const GMConfig &config,std::string &state,std::string &cause) { state = ""; cause = ""; std::string fname = config.ControlDir() + "/job." + id + sfx_local; job_local_read_var(fname,"failedstate",state); job_local_read_var(fname,"failedcause",cause); return true; } /* job.ID.input functions */ bool job_input_write_file(const GMJob &job,const GMConfig &config,std::list &files) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_input; return job_Xput_write_file(fname,files) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_input_read_file(const JobId &id,const GMConfig &config,std::list &files) { std::string fname = config.ControlDir() + "/job." + id + sfx_input; return job_Xput_read_file(fname,files); } bool job_input_status_add_file(const GMJob &job,const GMConfig &config,const std::string& file) { // 1. lock // 2. add // 3. unlock std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_inputstatus; Arc::FileLock lock(fname); for (int i = 10; !lock.acquire() && i >= 0; --i) { if (i == 0) return false; sleep(1); } std::string data; if (!Arc::FileRead(fname, data) && errno != ENOENT) { lock.release(); return false; } std::ostringstream line; line<& files) { std::string fname = config.ControlDir() + "/job." + id + sfx_inputstatus; Arc::FileLock lock(fname); for (int i = 10; !lock.acquire() && i >= 0; --i) { if (i == 0) return false; sleep(1); } bool r = Arc::FileRead(fname, files); lock.release(); return r; } /* job.ID.output functions */ bool job_output_write_file(const GMJob &job,const GMConfig &config,std::list &files,job_output_mode mode) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_output; return job_Xput_write_file(fname,files,mode) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_output_read_file(const JobId &id,const GMConfig &config,std::list &files) { std::string fname = config.ControlDir() + "/job." + id + sfx_output; return job_Xput_read_file(fname,files); } bool job_output_status_add_file(const GMJob &job,const GMConfig &config,const FileData& file) { // Not using lock here because concurrent read/write is not expected std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_outputstatus; std::string data; if (!Arc::FileRead(fname, data) && errno != ENOENT) return false; std::ostringstream line; line< &files) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_outputstatus; return job_Xput_write_file(fname,files) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_output_status_read_file(const JobId &id,const GMConfig &config,std::list &files) { std::string fname = config.ControlDir() + "/job." + id + sfx_outputstatus; return job_Xput_read_file(fname,files); } /* common functions */ bool job_Xput_write_file(const std::string &fname,std::list &files,job_output_mode mode, uid_t uid, gid_t gid) { std::ostringstream s; for(FileData::iterator i=files.begin();i!=files.end(); ++i) { if(mode == job_output_all) { s << (*i) << std::endl; } else if(mode == job_output_success) { if(i->ifsuccess) { s << (*i) << std::endl; } else { // This case is handled at higher level }; } else if(mode == job_output_cancel) { if(i->ifcancel) { s << (*i) << std::endl; } else { // This case is handled at higher level }; } else if(mode == job_output_failure) { if(i->iffailure) { s << (*i) << std::endl; } else { // This case is handled at higher level }; }; }; if (!Arc::FileCreate(fname, s.str(), uid, gid)) return false; return true; } bool job_Xput_read_file(const std::string &fname,std::list &files, uid_t uid, gid_t gid) { std::list file_content; if (!Arc::FileRead(fname, file_content, uid, gid)) return false; for(std::list::iterator i = file_content.begin(); i != file_content.end(); ++i) { FileData fd; std::istringstream s(*i); s >> fd; if(!fd.pfn.empty()) files.push_back(fd); }; return true; } std::string job_proxy_filename(const JobId &id, const GMConfig &config){ return config.ControlDir() + "/job." + id + sfx_proxy; } bool job_proxy_write_file(const GMJob &job,const GMConfig &config,const std::string &cred) { std::string fname = config.ControlDir() + "/job." + job.get_id() + sfx_proxy; return Arc::FileCreate(fname, cred, 0, 0, S_IRUSR | S_IWUSR) && fix_file_owner(fname,job); } bool job_proxy_read_file(const JobId &id,const GMConfig &config,std::string &cred) { std::string fname = config.ControlDir() + "/job." + id + sfx_proxy; return Arc::FileRead(fname, cred, 0, 0); } bool job_clean_finished(const JobId &id,const GMConfig &config) { std::string fname; fname = config.ControlDir()+"/job."+id+".proxy.tmp"; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+".lrms_done"; remove(fname.c_str()); return true; } bool job_clean_deleted(const GMJob &job,const GMConfig &config,std::list cache_per_job_dirs) { std::string id = job.get_id(); job_clean_finished(id,config); std::string session; if(job.GetLocalDescription() && !job.GetLocalDescription()->sessiondir.empty()) session = job.GetLocalDescription()->sessiondir; else session = job.SessionDir(); std::string fname; fname = config.ControlDir()+"/job."+id+sfx_proxy; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_new+"/job."+id+sfx_restart; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_errors; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_new+"/job."+id+sfx_cancel; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_new+"/job."+id+sfx_clean; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_output; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_input; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+".grami_log"; remove(fname.c_str()); fname = session+sfx_lrmsoutput; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_outputstatus; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_inputstatus; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_statistics; remove(fname.c_str()); /* remove session directory */ if(config.StrictSession()) { Arc::DirDelete(session, true, job.get_user().get_uid(), job.get_user().get_gid()); } else { Arc::DirDelete(session); } // remove cache per-job links, in case this failed earlier for (std::list::iterator i = cache_per_job_dirs.begin(); i != cache_per_job_dirs.end(); i++) { Arc::DirDelete((*i) + "/" + id); } return true; } bool job_clean_final(const GMJob &job,const GMConfig &config) { std::string id = job.get_id(); job_clean_finished(id,config); job_clean_deleted(job,config); std::string fname; fname = config.ControlDir()+"/job."+id+sfx_local; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+".grami"; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_failed; remove(fname.c_str()); job_diagnostics_mark_remove(job,config); job_lrmsoutput_mark_remove(job,config); fname = config.ControlDir()+"/job."+id+sfx_status; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_new+"/job."+id+sfx_status; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_cur+"/job."+id+sfx_status; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_old+"/job."+id+sfx_status; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_rew+"/job."+id+sfx_status; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_desc; remove(fname.c_str()); fname = config.ControlDir()+"/job."+id+sfx_xml; remove(fname.c_str()); return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/ControlFileHandling.h0000644000000000000000000000012412675602216030327 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.972719 30 ctime=1513200663.017784394 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/ControlFileHandling.h0000644000175000002070000002356312675602216030405 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_INFO_FILES_H #define GRID_MANAGER_INFO_FILES_H #include #include #include "../jobs/GMJob.h" #include "ControlFileContent.h" namespace ARex { class GMConfig; class GMJob; /* Definition of functions used to manipulate files used to stored information about jobs. Most used arguments: job - description of job. Mostly used to obtain job identifier and directories associated with job. config - GM configuration. Used to get control dir information. id - job identifier. Used to derive names of files. */ extern const char * const sfx_cancel; extern const char * const sfx_restart; extern const char * const sfx_clean; extern const char * const subdir_new; extern const char * const subdir_cur; extern const char * const subdir_old; extern const char * const subdir_rew; enum job_output_mode { job_output_all, job_output_success, job_output_cancel, job_output_failure }; // Set permissions of file 'fname' to -rw------- or if 'executable' is set // to - -rwx--------- . bool fix_file_permissions(const std::string &fname,bool executable = false); // Set permissions taking into account share uid/gid in GMConfig bool fix_file_permissions(const std::string &fname,const GMJob &job,const GMConfig &config); bool fix_file_permissions_in_session(const std::string &fname,const GMJob &job,const GMConfig &config,bool executable); // Set owner of file 'fname' to one specified in 'job' bool fix_file_owner(const std::string &fname,const GMJob &job); // Set owner to user bool fix_file_owner(const std::string &fname,const Arc::User& user); // Check if file is owned by current user. If user is equivalent to root any // file is accepted. // Returns: // true - belongs // false - does not belong or error. // If file exists 'uid', 'gid' and 't' are set to uid, gid and creation // of that file. bool check_file_owner(const std::string &fname); bool check_file_owner(const std::string &fname,uid_t &uid,gid_t &gid); bool check_file_owner(const std::string &fname,uid_t &uid,gid_t &gid,time_t &t); // Check existence, remove and read content of file used to mark // job finish in LRMS. This file is created by external script/executable // after it detects job exited and contains exit code of that job. bool job_lrms_mark_check(const JobId &id,const GMConfig &config); bool job_lrms_mark_remove(const JobId &id,const GMConfig &config); LRMSResult job_lrms_mark_read(const JobId &id,const GMConfig &config); // Create, check existence and remove file used to mark cancellation // request for specified job. The content of file is not important. bool job_cancel_mark_put(const GMJob &job,const GMConfig &config); bool job_cancel_mark_check(const JobId &id,const GMConfig &config); bool job_cancel_mark_remove(const JobId &id,const GMConfig &config); // Create, check existence and remove file used to mark request to // restart job. The content of file is not important. bool job_restart_mark_put(const GMJob &job,const GMConfig &config); bool job_restart_mark_check(const JobId &id,const GMConfig &config); bool job_restart_mark_remove(const JobId &id,const GMConfig &config); // Same for file, which marks job cleaning/removal request bool job_clean_mark_put(const GMJob &job,const GMConfig &config); bool job_clean_mark_check(const JobId &id,const GMConfig &config); bool job_clean_mark_remove(const JobId &id,const GMConfig &config); // Create (with given content), add to content, check for existence, delete // and read content of file used to mark failure of the job. // Content describes reason of failure (usually 1-2 strings). bool job_failed_mark_put(const GMJob &job,const GMConfig &config,const std::string &content = ""); bool job_failed_mark_add(const GMJob &job,const GMConfig &config,const std::string &content); bool job_failed_mark_check(const JobId &id,const GMConfig &config); bool job_failed_mark_remove(const JobId &id,const GMConfig &config); std::string job_failed_mark_read(const JobId &id,const GMConfig &config); // Create, add content, delete and move from session to control directory // file holding information about resources used by job while running. // Content is normally produced by "time" utility. bool job_controldiag_mark_put(const GMJob &job,const GMConfig &config,char const * const args[]); bool job_diagnostics_mark_put(const GMJob &job,const GMConfig &config); bool job_diagnostics_mark_remove(const GMJob &job,const GMConfig &config); bool job_diagnostics_mark_move(const GMJob &job,const GMConfig &config); // Same for file containing messages from LRMS, which could give additional // information about reason of job failure. bool job_lrmsoutput_mark_put(const GMJob &job,const GMConfig &config); // Common purpose functions, used by previous functions. std::string job_mark_read(const std::string &fname); bool job_mark_write(const std::string &fname,const std::string &content); bool job_mark_add(const std::string &fname,const std::string &content); bool job_mark_put(const std::string &fname); bool job_mark_check(const std::string &fname); bool job_mark_remove(const std::string &fname); time_t job_mark_time(const std::string &fname); long int job_mark_size(const std::string &fname); // Create file to store stderr of external utilities used to stage-in/out // data, submit/cancel job in LRMS. bool job_errors_mark_put(const GMJob &job,const GMConfig &config); bool job_errors_mark_add(const GMJob &job,const GMConfig &config,const std::string &msg); std::string job_errors_filename(const JobId &id, const GMConfig &config); // Get modification time of file used to store state of the job. time_t job_state_time(const JobId &id,const GMConfig &config); // Read and write file storing state of the job. job_state_t job_state_read_file(const JobId &id,const GMConfig &config); job_state_t job_state_read_file(const JobId &id,const GMConfig &config,bool &pending); bool job_state_write_file(const GMJob &job,const GMConfig &config,job_state_t state,bool pending = false); // Get modification time of file used to store description of the job. time_t job_description_time(const JobId &id,const GMConfig &config); // Read and write file used to store description of job. bool job_description_read_file(const JobId &id,const GMConfig &config,std::string &desc); bool job_description_read_file(const std::string &fname,std::string &desc); bool job_description_write_file(const GMJob &job,const GMConfig &config,const std::string &desc); // Read and write file used to store ACL of job. bool job_acl_read_file(const JobId &id,const GMConfig &config,std::string &acl); bool job_acl_write_file(const JobId &id,const GMConfig &config,const std::string &acl); // Read and write xml file containing job description. bool job_xml_read_file(const JobId &id,const GMConfig &config,std::string &xml); bool job_xml_write_file(const JobId &id,const GMConfig &config,const std::string &xml); // Write and read file, containing most important/needed job parameters. // Information is passed to/from file through 'job' object. bool job_local_write_file(const GMJob &job,const GMConfig &config,const JobLocalDescription &job_desc); bool job_local_write_file(const std::string &fname,const JobLocalDescription &job_desc); bool job_local_read_file(const JobId &id,const GMConfig &config,JobLocalDescription &job_desc); bool job_local_read_file(const std::string &fname,JobLocalDescription &job_desc); // Read only some attributes from previously mentioned file. bool job_local_read_cleanuptime(const JobId &id,const GMConfig &config,time_t &cleanuptime); bool job_local_read_failed(const JobId &id,const GMConfig &config,std::string &state,std::string &cause); // Write and read file containing list of input files. Each line of file // contains name of input file relative to session directory and optionally // source, from which it should be transferred. bool job_input_write_file(const GMJob &job,const GMConfig &config,std::list &files); bool job_input_read_file(const JobId &id,const GMConfig &config,std::list &files); bool job_input_status_add_file(const GMJob &job,const GMConfig &config,const std::string& file = ""); bool job_input_status_read_file(const JobId &id,const GMConfig &config,std::list& files); // Write and read file containing list of output files. Each line of file // contains name of output file relative to session directory and optionally // destination, to which it should be transferred. bool job_output_write_file(const GMJob &job,const GMConfig &config,std::list &files,job_output_mode mode = job_output_all); bool job_output_read_file(const JobId &id,const GMConfig &config,std::list &files); bool job_output_status_add_file(const GMJob &job,const GMConfig &config,const FileData& file); bool job_output_status_write_file(const GMJob &job,const GMConfig &config,std::list& files); bool job_output_status_read_file(const JobId &id,const GMConfig &config,std::list& files); // Common functions for input/output files. bool job_Xput_read_file(const std::string &fname,std::list &files, uid_t uid = 0, gid_t gid = 0); bool job_Xput_write_file(const std::string &fname,std::list &files, job_output_mode mode = job_output_all, uid_t uid = 0, gid_t gid = 0); // Return filename storing job's proxy. std::string job_proxy_filename(const JobId &id, const GMConfig &config); bool job_proxy_write_file(const GMJob &job,const GMConfig &config,const std::string &cred); bool job_proxy_read_file(const JobId &id,const GMConfig &config,std::string &cred); // Remove all files, which should be removed after job's state becomes FINISHED bool job_clean_finished(const JobId &id,const GMConfig &config); // Remove all files, which should be removed after job's state becomes DELETED bool job_clean_deleted(const GMJob &job,const GMConfig &config, std::list cache_per_job_dirs=std::list()); // Remove all job's files. bool job_clean_final(const GMJob &job,const GMConfig &config); } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/PaxHeaders.7502/README0000644000000000000000000000012311016612002025126 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200575.975719 30 ctime=1513200663.009784297 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/files/README0000644000175000002070000000004211016612002025170 0ustar00mockbuildmock00000000000000control and other files handling. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/log0000644000000000000000000000013213214316026023661 xustar000000000000000030 mtime=1513200662.960783697 30 atime=1513200668.718854121 30 ctime=1513200662.960783697 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/0000755000175000002070000000000013214316026024004 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712771224701026003 xustar000000000000000027 mtime=1474636225.243157 30 atime=1513200602.303041829 30 ctime=1513200662.954783624 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/Makefile.am0000644000175000002070000000041712771224701026047 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = liblog.la liblog_la_SOURCES = JobLog.cpp JobLog.h JobsMetrics.cpp JobsMetrics.h liblog_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) liblog_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732026006 xustar000000000000000030 mtime=1513200602.351042416 30 atime=1513200650.008625287 30 ctime=1513200662.956783648 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/Makefile.in0000644000175000002070000005641213214315732026064 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/log DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) liblog_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_liblog_la_OBJECTS = liblog_la-JobLog.lo liblog_la-JobsMetrics.lo liblog_la_OBJECTS = $(am_liblog_la_OBJECTS) liblog_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(liblog_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(liblog_la_SOURCES) DIST_SOURCES = $(liblog_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = liblog.la liblog_la_SOURCES = JobLog.cpp JobLog.h JobsMetrics.cpp JobsMetrics.h liblog_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) liblog_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/log/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/log/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done liblog.la: $(liblog_la_OBJECTS) $(liblog_la_DEPENDENCIES) $(liblog_la_LINK) $(liblog_la_OBJECTS) $(liblog_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblog_la-JobLog.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblog_la-JobsMetrics.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< liblog_la-JobLog.lo: JobLog.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -MT liblog_la-JobLog.lo -MD -MP -MF $(DEPDIR)/liblog_la-JobLog.Tpo -c -o liblog_la-JobLog.lo `test -f 'JobLog.cpp' || echo '$(srcdir)/'`JobLog.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/liblog_la-JobLog.Tpo $(DEPDIR)/liblog_la-JobLog.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobLog.cpp' object='liblog_la-JobLog.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -c -o liblog_la-JobLog.lo `test -f 'JobLog.cpp' || echo '$(srcdir)/'`JobLog.cpp liblog_la-JobsMetrics.lo: JobsMetrics.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -MT liblog_la-JobsMetrics.lo -MD -MP -MF $(DEPDIR)/liblog_la-JobsMetrics.Tpo -c -o liblog_la-JobsMetrics.lo `test -f 'JobsMetrics.cpp' || echo '$(srcdir)/'`JobsMetrics.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/liblog_la-JobsMetrics.Tpo $(DEPDIR)/liblog_la-JobsMetrics.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobsMetrics.cpp' object='liblog_la-JobsMetrics.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -c -o liblog_la-JobsMetrics.lo `test -f 'JobsMetrics.cpp' || echo '$(srcdir)/'`JobsMetrics.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/PaxHeaders.7502/JobsMetrics.cpp0000644000000000000000000000012313065020502026661 xustar000000000000000027 mtime=1490297154.020889 26 atime=1513200576.05972 30 ctime=1513200662.959783685 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/JobsMetrics.cpp0000644000175000002070000001721613065020502026736 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "JobsMetrics.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); JobsMetrics::JobsMetrics():enabled(false),proc(NULL) { std::memset(jobs_processed, 0, sizeof(jobs_processed)); std::memset(jobs_in_state, 0, sizeof(jobs_in_state)); std::memset(jobs_processed_changed, 0, sizeof(jobs_processed_changed)); std::memset(jobs_in_state_changed, 0, sizeof(jobs_in_state_changed)); std::memset(jobs_state_old_new, 0, sizeof(jobs_state_old_new)); std::memset(jobs_state_old_new_changed, 0, sizeof(jobs_state_old_new_changed)); std::memset(jobs_rate, 0, sizeof(jobs_rate)); std::memset(jobs_rate_changed, 0, sizeof(jobs_rate_changed)); std::memset(jobs_state_accum, 0, sizeof(jobs_state_accum)); std::memset(jobs_state_accum_last, 0, sizeof(jobs_state_accum_last)); time_lastupdate = time(NULL); } JobsMetrics::~JobsMetrics() { } void JobsMetrics::SetEnabled(bool val) { enabled = val; } void JobsMetrics::SetConfig(const char* fname) { config_filename = fname; } void JobsMetrics::SetPath(const char* path) { tool_path = path; } static const char* gmetric_tool = "gmetric"; void JobsMetrics::ReportJobStateChange(std::string job_id, job_state_t new_state, job_state_t old_state) { Glib::RecMutex::Lock lock_(lock); //actual states if(old_state < JOB_STATE_UNDEFINED) { ++(jobs_processed[old_state]); jobs_processed_changed[old_state] = true; --(jobs_in_state[old_state]); jobs_in_state_changed[old_state] = true; }; if(new_state < JOB_STATE_UNDEFINED) { ++(jobs_in_state[new_state]); jobs_in_state_changed[new_state] = true; }; //transitions and rates if((old_state <= JOB_STATE_UNDEFINED) && (new_state < JOB_STATE_UNDEFINED)){ job_state_t last_old = JOB_STATE_UNDEFINED; job_state_t last_new = JOB_STATE_UNDEFINED; //find this jobs old and new state from last iteration if(jobs_state_old_map.find(job_id) != jobs_state_old_map.end()){ last_old = jobs_state_old_map.find(job_id)->second; } if(jobs_state_new_map.find(job_id) != jobs_state_new_map.end()){ last_new = jobs_state_new_map.find(job_id)->second; } if( (last_old <= JOB_STATE_UNDEFINED) && (last_new < JOB_STATE_UNDEFINED) ){ --jobs_state_old_new[last_old][last_new]; jobs_state_old_new_changed[last_old][last_new] = true; ++jobs_state_old_new[old_state][new_state]; jobs_state_old_new_changed[old_state][new_state] = true; //update the old and new state jobid maps for next iteration std::map::iterator it; it = jobs_state_old_map.find(job_id); if (it != jobs_state_old_map.end()){ it->second = old_state; } it = jobs_state_new_map.find(job_id); if (it != jobs_state_new_map.end()){ it->second = new_state; } } //for each statechange, increase number of jobs in the state and calculate rates, at defined periods: update accum-array and histograms ++jobs_state_accum[new_state]; time_now = time(NULL); time_delta = time_now - time_lastupdate; //loop over all states and caluclate rate, double rate = 0.; for (int state = 0; state < JOB_STATE_UNDEFINED; ++state){ if(time_delta != 0){ rate = (static_cast(jobs_state_accum[state]) - static_cast(jobs_state_accum_last[state]))/time_delta; jobs_rate[state] = rate; } //only update histograms and values if time since last update is larger or equal defined interval if(time_delta >= GMETRIC_STATERATE_UPDATE_INTERVAL){ time_lastupdate = time_now; jobs_state_accum_last[state] = jobs_state_accum[state]; jobs_rate_changed[state] = true; } } } Sync(); } bool JobsMetrics::CheckRunMetrics(void) { if(!proc) return true; if(proc->Running()) return false; int run_result = proc->Result(); if(run_result != 0) { logger.msg(Arc::ERROR,": Metrics tool returned error code %i: %s",run_result,proc_stderr); }; proc = NULL; return true; } void JobsMetrics::Sync(void) { if(!enabled) return; // not configured Glib::RecMutex::Lock lock_(lock); if(!CheckRunMetrics()) return; // Run gmetric to report one change at a time //since only one process can be started from Sync(), only 1 histogram can be sent at a time, therefore return for each call; //Sync is therefore called multiple times until there are not more histograms that have changed for(int state = 0; state < JOB_STATE_UNDEFINED; ++state) { if(jobs_processed_changed[state]) { if(RunMetrics( std::string("AREX-JOBS-PROCESSED-") + Arc::tostring(state) + "-" + GMJob::get_state_name(static_cast(state)), Arc::tostring(jobs_processed[state]), "int32", "jobs" )) { jobs_processed_changed[state] = false; return; }; }; if(jobs_in_state_changed[state]) { if(RunMetrics( std::string("AREX-JOBS-IN_STATE-") + Arc::tostring(state) + "-" + GMJob::get_state_name(static_cast(state)), Arc::tostring(jobs_in_state[state]), "int32", "jobs" )) { jobs_in_state_changed[state] = false; return; }; }; if(jobs_rate_changed[state]) { if(RunMetrics( std::string("AREX-JOBS-RATE-") + Arc::tostring(state) + "-" + GMJob::get_state_name(static_cast(state)), Arc::tostring(jobs_rate[state]), "double", "jobs/s" )) { jobs_rate_changed[state] = false; return; }; }; }; for(int state_old = 0; state_old <= JOB_STATE_UNDEFINED; ++state_old){ for(int state_new = 1; state_new < JOB_STATE_UNDEFINED; ++state_new){ if(jobs_state_old_new_changed[state_old][state_new]){ std::string histname = std::string("AREX-JOBS-FROM-") + Arc::tostring(state_old) + "-" + GMJob::get_state_name(static_cast(state_old)) + "-TO-" + Arc::tostring(state_new) + "-" + GMJob::get_state_name(static_cast(state_new)); if(RunMetrics(histname, Arc::tostring(jobs_state_old_new[state_old][state_new]), "int32", "jobs")){ jobs_state_old_new_changed[state_old][state_new] = false; return; }; }; }; }; } bool JobsMetrics::RunMetrics(const std::string name, const std::string& value, const std::string unit_type, const std::string unit) { if(proc) return false; std::list cmd; if(tool_path.empty()) { cmd.push_back(gmetric_tool); } else { cmd.push_back(tool_path+G_DIR_SEPARATOR_S+gmetric_tool); }; if(!config_filename.empty()) { cmd.push_back("-c"); cmd.push_back(config_filename); }; cmd.push_back("-n"); cmd.push_back(name); cmd.push_back("-v"); cmd.push_back(value); cmd.push_back("-t");//unit-type cmd.push_back(unit_type); cmd.push_back("-u");//unit cmd.push_back(unit); proc = new Arc::Run(cmd); proc->AssignStderr(proc_stderr); proc->AssignKicker(&RunMetricsKicker, this); if(!(proc->Start())) { delete proc; proc = NULL; return false; }; return true; } void JobsMetrics::SyncAsync(void* arg) { JobsMetrics& it = *reinterpret_cast(arg); if(&it) { Glib::RecMutex::Lock lock_(it.lock); if(it.proc) { // Continue only if no failure in previous call. // Otherwise it can cause storm of failed calls. if(it.proc->Result() == 0) { it.Sync(); }; }; }; } void JobsMetrics::RunMetricsKicker(void* arg) { // Currently it is not allowed to start new external process // from inside process licker (todo: redesign). // So do it asynchronously from another thread. Arc::CreateThreadFunction(&SyncAsync, arg); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/PaxHeaders.7502/JobsMetrics.h0000644000000000000000000000012313065020502026326 xustar000000000000000027 mtime=1490297154.020889 26 atime=1513200576.06172 30 ctime=1513200662.960783697 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/JobsMetrics.h0000644000175000002070000000366113065020502026402 0ustar00mockbuildmock00000000000000/* write essential inforamtion about job started/finished */ #ifndef __GM_JOBS_METRICS_H__ #define __GM_JOBS_METRICS_H__ #include #include #include #include #include #include "../jobs/GMJob.h" #define GMETRIC_STATERATE_UPDATE_INTERVAL 5//to-fix this value could be set in arc.conf to be tailored to site namespace ARex { class JobsMetrics { private: Glib::RecMutex lock; bool enabled; std::string config_filename; std::string tool_path; time_t time_now; time_t time_lastupdate; time_t time_delta; unsigned long long int jobs_processed[JOB_STATE_UNDEFINED]; unsigned long long int jobs_in_state[JOB_STATE_UNDEFINED]; unsigned long long int jobs_state_old_new[JOB_STATE_UNDEFINED+1][JOB_STATE_UNDEFINED]; unsigned long long int jobs_state_accum[JOB_STATE_UNDEFINED+1]; unsigned long long int jobs_state_accum_last[JOB_STATE_UNDEFINED+1]; double jobs_rate[JOB_STATE_UNDEFINED]; bool jobs_processed_changed[JOB_STATE_UNDEFINED]; bool jobs_in_state_changed[JOB_STATE_UNDEFINED]; bool jobs_state_old_new_changed[JOB_STATE_UNDEFINED+1][JOB_STATE_UNDEFINED]; bool jobs_rate_changed[JOB_STATE_UNDEFINED]; std::map jobs_state_old_map; std::map jobs_state_new_map; Arc::Run *proc; std::string proc_stderr; bool RunMetrics(const std::string name, const std::string& value, const std::string unit_type, const std::string unit); bool CheckRunMetrics(void); static void RunMetricsKicker(void* arg); static void SyncAsync(void* arg); public: JobsMetrics(void); ~JobsMetrics(void); void SetEnabled(bool val); /* chose name of configuration file */ void SetConfig(const char* fname); /* chose name of configuration file */ void SetPath(const char* path); void ReportJobStateChange(std::string job_id, job_state_t new_state, job_state_t old_state); void Sync(void); }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/PaxHeaders.7502/JobLog.cpp0000644000000000000000000000012213124220531025610 xustar000000000000000026 mtime=1498489177.92374 26 atime=1513200576.06172 30 ctime=1513200662.957783661 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/JobLog.cpp0000644000175000002070000001536713124220531025673 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif /* write essential information about job started/finished */ #include #include #include #include #include #include #include #include #include #include #include #include "../files/ControlFileContent.h" #include "../files/JobLogFile.h" #include "../conf/GMConfig.h" #include "JobLog.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); JobLog::JobLog(void):filename(""),proc(NULL),last_run(0),period(3600),ex_period(0) { } //JobLog::JobLog(const char* fname):proc(NULL),last_run(0),ex_period(0) { // filename=fname; //} void JobLog::SetOutput(const char* fname) { filename=fname; } void JobLog::SetExpiration(time_t period) { ex_period=period; } bool JobLog::SetPeriod(int new_period) { if ( new_period < 3600 ) return false; period=new_period; return true; } bool JobLog::open_stream(std::ofstream &o) { o.open(filename.c_str(),std::ofstream::app); if(!o.is_open()) return false; o<<(Arc::Time().str()); o<<" "; return true; } bool JobLog::start_info(GMJob &job,const GMConfig &config) { if(filename.length()==0) return true; std::ofstream o; if(!open_stream(o)) return false; o<<"Started - job id: "<jobname; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<"name: \""<DN; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<"owner: \""<lrms<<", queue: "<queue; }; o<jobname; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<"name: \""<DN; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<"owner: \""<lrms<<", queue: "<queue; if(job_desc->localid.length() >0) o<<", lrmsid: "<localid; }; tmps = job.GetFailure(config); if(tmps.length()) { for(std::string::size_type i=0;;) { i=tmps.find('\n',i); if(i==std::string::npos) break; tmps[i]='.'; }; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<", failure: \""<Running()) return true; /* running */ delete proc; proc=NULL; }; if(time(NULL) < (last_run+period)) return true; // default: once per hour last_run=time(NULL); if (logger_name.empty()) { logger.msg(Arc::ERROR,": Logger name is not specified"); return false; } std::string cmd = Arc::ArcLocation::GetToolsDir()+"/"+logger_name; cmd += " -L"; // for long format of logging if(ex_period) cmd += " -E " + Arc::tostring(ex_period); if(!vo_filters.empty()) cmd += " -F " + vo_filters; cmd += " " + config.ControlDir(); proc = new Arc::Run(cmd); if((!proc) || (!(*proc))) { delete proc; proc = NULL; logger.msg(Arc::ERROR,": Failure creating slot for reporter child process"); return false; }; std::string errlog = config.ControlDir() + "/job.logger.errors"; // backward compatibility JobLog* joblog = config.GetJobLog(); if(joblog) { if(!joblog->logfile.empty()) errlog = joblog->logfile; }; proc->AssignInitializer(&initializer,(void*)errlog.c_str()); logger.msg(Arc::DEBUG, "Running command %s", cmd); if(!proc->Start()) { delete proc; proc = NULL; logger.msg(Arc::ERROR,": Failure starting reporter child process"); return false; }; return true; } bool JobLog::SetLogger(const char* fname) { if(fname) logger_name = (std::string(fname)); return true; } bool JobLog::SetLogFile(const char* fname) { if(fname) logfile = (std::string(fname)); return true; } bool JobLog::SetVoFilters(const char* filters) { if(filters) vo_filters = (std::string(filters)); return true; } bool JobLog::SetReporter(const char* destination) { if(destination) urls.push_back(std::string(destination)); return true; } bool JobLog::make_file(GMJob &job, const GMConfig& config) { if((job.get_state() != JOB_STATE_ACCEPTED) && (job.get_state() != JOB_STATE_FINISHED)) return true; bool result = true; // for configured loggers for(std::list::iterator u = urls.begin();u!=urls.end();u++) { if(u->length()) result = job_log_make_file(job,config,*u,report_config) && result; }; // for user's logger JobLocalDescription* local; if(!job.GetLocalDescription(config)) { result=false; } else if((local=job.GetLocalDescription(config)) == NULL) { result=false; } else { if(!(local->jobreport.empty())) { for (std::list::iterator v = local->jobreport.begin(); v!=local->jobreport.end(); v++) { result = job_log_make_file(job,config,*v,report_config) && result; } }; }; return result; } void JobLog::SetCredentials(std::string &key_path,std::string &certificate_path,std::string &ca_certificates_dir) { if (!key_path.empty()) report_config.push_back(std::string("key_path=")+key_path); if (!certificate_path.empty()) report_config.push_back(std::string("certificate_path=")+certificate_path); if (!ca_certificates_dir.empty()) report_config.push_back(std::string("ca_certificates_dir=")+ca_certificates_dir); } JobLog::~JobLog(void) { if(proc != NULL) { if(proc->Running()) proc->Kill(0); delete proc; proc=NULL; }; } void JobLog::initializer(void* arg) { char const * errlog = (char const *)arg; int h; // set up stdin,stdout and stderr h=::open("/dev/null",O_RDONLY); if(h != 0) { if(dup2(h,0) != 0) { sleep(10); exit(1); }; close(h); }; h=::open("/dev/null",O_WRONLY); if(h != 1) { if(dup2(h,1) != 1) { sleep(10); exit(1); }; close(h); }; h=errlog ? ::open(errlog,O_WRONLY | O_CREAT | O_APPEND,S_IRUSR | S_IWUSR) : -1; if(h==-1) { h=::open("/dev/null",O_WRONLY); }; if(h != 2) { if(dup2(h,2) != 2) { sleep(10); exit(1); }; close(h); }; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/PaxHeaders.7502/JobLog.h0000644000000000000000000000012312453512571025272 xustar000000000000000027 mtime=1420727673.764329 26 atime=1513200576.05972 30 ctime=1513200662.958783673 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/JobLog.h0000644000175000002070000000452212453512571025343 0ustar00mockbuildmock00000000000000/* write essential inforamtion about job started/finished */ #ifndef __GM_JOB_LOG_H__ #define __GM_JOB_LOG_H__ #include #include #include #include #include "../jobs/GMJob.h" namespace ARex { class GMConfig; class JobLocalDescription; /// Put short information into log when every job starts/finishes. /// And store more detailed information for Reporter. class JobLog { private: std::string filename; std::list urls; std::list report_config; // additional configuration for usage reporter std::string vo_filters; std::string certificate_path; std::string ca_certificates_dir; std::string logger_name; std::string logfile; Arc::Run *proc; time_t last_run; int period; time_t ex_period; bool open_stream(std::ofstream &o); static void initializer(void* arg); public: JobLog(void); //JobLog(const char* fname); ~JobLog(void); /* chose name of log file */ void SetOutput(const char* fname); /* log job start information */ bool start_info(GMJob &job,const GMConfig &config); /* log job finish iformation */ bool finish_info(GMJob &job,const GMConfig& config); /* Run external utility to report gathered information to logger service */ bool RunReporter(const GMConfig& config); /* Set period of running */ bool SetPeriod(int period); /* Set name of the accounting reporter */ bool SetLogger(const char* fname); /* Set name of the log file for accounting reporter */ bool SetLogFile(const char* fname); /* Set filters of VO that allow to report to accounting service */ bool SetVoFilters(const char* filters); /* Set url of service and local name to use */ // bool SetReporter(const char* destination,const char* name = NULL); bool SetReporter(const char* destination); /* Set after which too old logger information is removed */ void SetExpiration(time_t period = 0); /* Create data file for Reporter */ bool make_file(GMJob &job,const GMConfig &config); /* Set credential file names for accessing logging service */ void SetCredentials(std::string &key_path,std::string &certificate_path,std::string &ca_certificates_dir); /* Set accounting options (e.g. batch size for SGAS LUTS) */ void SetOptions(std::string &options) { report_config.push_back(std::string("accounting_options=")+options); } }; } // namespace ARex #endif nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/PaxHeaders.7502/README0000644000000000000000000000012211016612002024604 xustar000000000000000026 mtime=1211831298.62818 26 atime=1513200576.06272 30 ctime=1513200662.953783612 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/log/README0000644000175000002070000000003711016612002024653 0ustar00mockbuildmock00000000000000Local and remote log handling. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/arc_blahp_logger.cpp0000644000000000000000000000012412574532370027146 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200576.240723 30 ctime=1513200662.792781642 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/arc_blahp_logger.cpp0000644000175000002070000002222512574532370027216 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include static Arc::Logger logger(Arc::Logger::rootLogger, "arc-blahp-logger"); static void usage(char *pname) { logger.msg(Arc::ERROR,"Usage: %s -I -U -P -L [-c ] [-p ] [-d ]", pname); } int main(int argc, char *argv[]) { int opt; const char *user_proxy_f = NULL; const char *job_local_f = NULL; const char *jobid_s = NULL; const char *user_s = NULL; const char *ceid_s = NULL; std::string logprefix = "/var/log/arc/accounting/blahp.log"; // log Arc::LogLevel debuglevel = Arc::ERROR; Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(debuglevel); // Parse command line options while ((opt = getopt(argc, argv, "I:U:P:L:c:p:d:")) != -1) { switch (opt) { case 'I': jobid_s = optarg; break; case 'U': user_s = optarg; break; case 'P': user_proxy_f = optarg; break; case 'L': job_local_f = optarg; break; case 'c': ceid_s = optarg; break; case 'p': logprefix = std::string(optarg); break; case 'd': debuglevel = Arc::old_level_to_level(atoi(optarg)); Arc::Logger::getRootLogger().setThreshold(debuglevel); break; default: usage(argv[0]); return EXIT_FAILURE; } } if ( !jobid_s ) { logger.msg(Arc::ERROR,"Job ID argument is required."); usage(argv[0]); return EXIT_FAILURE; } if ( !user_proxy_f ) { logger.msg(Arc::ERROR,"Path to user's proxy file should be specified."); usage(argv[0]); return EXIT_FAILURE; } if ( !user_s ) { logger.msg(Arc::ERROR,"User name should be specified."); usage(argv[0]); return EXIT_FAILURE; } if ( !job_local_f ) { logger.msg(Arc::ERROR,"Path to .local job status file is required."); usage(argv[0]); return EXIT_FAILURE; } // Get or generate ceID prefix std::string ceid; if ( !ceid_s ) { logger.msg(Arc::DEBUG,"Generating ceID prefix from hostname automatically"); char host[256]; if (gethostname(host, sizeof(host)) != 0) { logger.msg(Arc::ERROR, "Cannot determine hostname from gethostname() to generate ceID automatically."); return EXIT_FAILURE; } else { host[sizeof(host)-1] = 0; ceid = std::string(host) + ":2811/nordugrid-torque"; } } else { ceid = std::string(ceid_s); } logger.msg(Arc::DEBUG,"ceID prefix is set to %s", ceid); // Get the current timestamp for log and logsuffix Arc::SetEnv("TZ","UTC"); tzset(); Arc::Time exectime; std::string timestamp = exectime.str(Arc::UserTime); std::string logsuffix = exectime.str(Arc::MDSTime).substr(0,8); logger.msg(Arc::DEBUG,"Getting currect timestamp for BLAH parser log: %s", timestamp); // Parse .local file to get required information std::string globalid; std::string localid; std::string queue; std::string subject; std::string interface; std::string headnode; logger.msg(Arc::DEBUG,"Parsing .local file to obtain job-specific identifiers and info"); std::ifstream job_local; job_local.open(job_local_f, std::ios::in); if ( job_local.is_open() ) { std::string line; while ( job_local.good() ) { getline(job_local,line); if ( ! line.compare(0,9,"globalid=") ) { globalid = line.substr(9); logger.msg(Arc::DEBUG,"globalid is set to %s", globalid); } else if ( ! line.compare(0,9,"headnode=") ) { headnode = line.substr(9); logger.msg(Arc::DEBUG,"headnode is set to %s", headnode); } else if ( ! line.compare(0,10,"interface=") ) { interface = line.substr(10); logger.msg(Arc::DEBUG,"interface is set to %s", interface); } else if ( ! line.compare(0,8,"localid=") ) { localid = line.substr(8); if ( localid.empty() ) { logger.msg(Arc::ERROR,"There is no local LRMS ID. Message will not be written to BLAH log."); return EXIT_FAILURE; } logger.msg(Arc::DEBUG,"localid is set to %s", localid); } else if ( ! line.compare(0,6,"queue=") ) { queue = line.substr(6); logger.msg(Arc::DEBUG,"queue name is set to %s", queue); } else if ( ! line.compare(0,8,"subject=") ) { subject = line.substr(8); logger.msg(Arc::DEBUG,"owner subject is set to %s", subject); } else if ( ! line.compare(0,12,"failedstate=") ) { logger.msg(Arc::ERROR,"Job did not finished successfully. Message will not be written to BLAH log."); return EXIT_FAILURE; } } } else { logger.msg(Arc::ERROR,"Can not read information from the local job status file"); return EXIT_FAILURE; } // Just in case subject escape subject = Arc::escape_chars(subject, "\"\\", '\\', false); // Construct clientID depend on submission interface std::string clientid; if ( interface == "org.nordugrid.gridftpjob" ) { clientid = globalid; } else if ( interface == "org.ogf.glue.emies.activitycreation" ) { clientid = headnode + "/" + globalid; } else if ( interface == "org.nordugrid.xbes" ) { clientid = headnode + "/" + std::string(jobid_s); } else { logger.msg(Arc::ERROR,"Unsupported submission interface %s. Seems arc-blahp-logger need to be updated accordingly :-) Please submit the bug to bugzilla."); return EXIT_FAILURE; } // Get FQANs information from user's proxy // P.S. validity check is not enforced, proxy can be even expired long time before job finished Arc::Credential holder(user_proxy_f, "", "", ""); std::vector voms_attributes; Arc::VOMSTrustList vomscert_trust_dn; logger.msg(Arc::DEBUG, "Parsing VOMS AC to get FQANs information"); // suppress expired 'ERROR' from Arc.Credential output if ( debuglevel == Arc::ERROR ) Arc::Logger::getRootLogger().setThreshold(Arc::FATAL); Arc::parseVOMSAC(holder, "", "", "", vomscert_trust_dn, voms_attributes, false, true); Arc::Logger::getRootLogger().setThreshold(debuglevel); std::string fqans_logentry; std::string fqan; std::size_t pos; if(voms_attributes.size() > 0) { for (std::vector::iterator iAC = voms_attributes.begin(); iAC != voms_attributes.end(); iAC++) { for (int acnt = 1; acnt < iAC->attributes.size(); acnt++ ) { fqan = iAC->attributes[acnt]; pos = fqan.find("/Role="); if ( pos == std::string::npos ) fqan = fqan + "/Role=NULL"; fqans_logentry += "\"userFQAN=" + Arc::trim(Arc::escape_chars(fqan, "\"\\", '\\', false)) + "\" "; } } } else { logger.msg(Arc::DEBUG, "No FQAN found. Using NULL as userFQAN value"); fqans_logentry = "\"userFQAN=/None/Role=NULL\" "; } // Assemble BLAH logentry std::string logentry = "\"timestamp=" + timestamp + "\" \"userDN=" + Arc::trim(subject) + "\" " + fqans_logentry + "\"ceID=" + ceid + "-" + queue + "\" \"jobID=" + std::string(jobid_s) + "\" \"lrmsID=" + Arc::trim(localid) + "\" \"localUser=" + std::string(user_s) + "\" \"clientID=" + clientid + "\""; logger.msg(Arc::DEBUG, "Assembling BLAH parser log entry: %s", logentry); // Write entry to BLAH log with locking to exclude possible simultaneous writes when several jobs are finished std::string fname = logprefix + "-" + logsuffix; Arc::FileLock lock(fname); logger.msg(Arc::DEBUG,"Writing the info the the BLAH parser log: %s", fname); for (int i = 10; !lock.acquire() && i >= 0; --i) { if (i == 0) return false; sleep(1); } std::ofstream logfile; logfile.open(fname.c_str(),std::ofstream::app); if(!logfile.is_open()) { logger.msg(Arc::ERROR,"Cannot open BLAH log file '%s'", fname); lock.release(); return EXIT_FAILURE; } logfile << logentry << std::endl; logfile.close(); lock.release(); return EXIT_SUCCESS; } nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/arc-vomsac-check.8.in0000644000000000000000000000012711727735070027000 xustar000000000000000027 mtime=1331673656.000075 30 atime=1513200649.843623269 30 ctime=1513200662.786781569 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/arc-vomsac-check.8.in0000644000175000002070000000612611727735070027047 0ustar00mockbuildmock00000000000000.TH arc-vomsac-check 8 "2011-11-17" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME arc-vomsac-check \- ARC VOMS AC-based queue policy enforcing plugin .SH DESCRIPTION .B ARC VOMS AC-based queue policy enforcing plugin perfors per-queue authorization based on information stored in .B VOMS AC. .SH SYNOPSIS arc-vomsac-check [-N] -P -L [-c ] [-d ] .SH OPTIONS .IP "\fB-N\fR" treat absence of VOMS AC as allowed access (deny by default) .IP "\fB-P\fR \fIuser proxy\fR" path to user proxy certificate file to get VOMS AC from .IP "\fB-L\fR \fIA-REX local\fR" A-REX jobstatus .local file (used to determine submission queue) .IP "\fB-c\fR \fIconfigfile\fR" plugin configuration file (/etc/arc.conf will be used by default) .IP "\fB-d\fR \fIloglevel\fR" logging level from 0(ERROR) to 5(DEBUG) .SH GETTING A-REX TO WORK WITH PLUGIN You must attach plugin as handler for ACCEPTED state: .B authplugin="ACCEPTED 60 /opt/arc/libexec/arc/arc-vomsac-check -L %C/job.%I.local -P %C/job.%I.proxy" .SH CONFIGURATION Queue policies need to be written into plain text configuration file of the same format as arc.conf. The plugin expects several configuration blocks for every queue identified by [queue] or [queue/name] section. The attribute value pairs identified by 'ac_policy' keyword within a queue configuration block represent rules for allowing or denying users to utilize queue. These rules are processed in order of specification. The first rule that matches the VOMS AC presented by a user stops further processing of remaining rules in the block. If no one rule mathes VOMS AC, access is denied. If no 'ac_policy' rules supplied in the queue block, access is granted. Matching rules has the following format: ac_policy="[+/-]VOMS: " Prepending '+' indicate positive match (users with FQAN match are allowed). Prepending '-' or '!' indicate negative match (users with FQAN match are prohibited). Without any prefix character, rule is treated as positive match. FQAN format can be specified either in ARC format or general VOMS format: \&'/VO=students/Group=physics/Role=production' is the same as '/students/physics/Role=production' or '/students/Group=physics/Role=production/Capability=NULL' or any other combinations. Regalar expressions syntax can be used in FQAN specification. .SH EXAMPLE CONFIGURATION [queue/general] ac_policy="-VOMS: /students/Role=production" ac_policy="-VOMS: /students/Group=nosubmission" ac_policy="VOMS: /VO=students" [queue] name="production" ac_policy="VOMS: /students/Role=production" ac_policy="-VOMS: /badvo" ac_policy="VOMS: /.*/Role=production" In the example configuration, queue "general" can NOT be used by VO "students" users with Role "production" and VO "students" "nosubmission" Group. It CAN be used by any other members of VO "students". Queue "production" allow access to VO "students" users with Role "production", prohibit some VO "badvo" and allow any VO users with Role "production". First rule may be omitted due to common regex. .SH AUTHOR Andrii Salnikov nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/README0000644000000000000000000000012311016612002024024 xustar000000000000000026 mtime=1211831298.62818 27 atime=1513200576.251723 30 ctime=1513200662.778781471 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/README0000644000175000002070000000001611016612002024067 0ustar00mockbuildmock00000000000000Grid Manager. nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/PaxHeaders.7502/inputcheck.cpp0000644000000000000000000000012312301125744026017 xustar000000000000000027 mtime=1392815076.546455 26 atime=1513200576.01272 30 ctime=1513200662.797781704 nordugrid-arc-5.4.2/src/services/a-rex/grid-manager/inputcheck.cpp0000644000175000002070000000760512301125744026075 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "conf/GMConfig.h" #include "files/ControlFileContent.h" #include "jobs/JobDescriptionHandler.h" #include "misc/proxy.h" static Arc::SimpleCondition cond; static Arc::Logger logger(Arc::Logger::rootLogger, "inputcheck"); class lfn_t { public: std::string lfn; bool done; bool failed; lfn_t(const std::string& l):lfn(l),done(false),failed(false) { }; }; void check_url(void *arg) { lfn_t* lfn = (lfn_t*)arg; logger.msg(Arc::INFO,"%s",lfn->lfn); Arc::UserConfig usercfg; Arc::DataHandle source(Arc::URL(lfn->lfn),usercfg); source->SetSecure(false); if(!source) { logger.msg(Arc::ERROR,"Failed to acquire source: %s",lfn->lfn); lfn->failed=true; lfn->done=true; cond.signal(); return; }; if(!source->Resolve(true).Passed()) { logger.msg(Arc::ERROR,"Failed to resolve %s",lfn->lfn); lfn->failed=true; lfn->done=true; cond.signal(); return; }; source->SetTries(1); // TODO. Run every URL in separate thread. // TODO. Do only connection (optionally) bool check_passed = false; if(source->HaveLocations()) { do { if(source->CurrentLocationHandle()->Check(false).Passed()) { check_passed=true; break; } } while (source->NextLocation()); }; if(!check_passed) { logger.msg(Arc::ERROR,"Failed to check %s",lfn->lfn); lfn->failed=true; lfn->done=true; cond.signal(); return; }; lfn->done=true; cond.signal(); return; } int main(int argc,char* argv[]) { Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::OptionParser options(istring("job_description_file [proxy_file]"), istring("inputcheck checks that input files specified " "in the job description are available and accessible " "using the credentials in the given proxy file.")); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); std::list params = options.Parse(argc, argv); if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); if (params.size() != 1 && params.size() != 2) { logger.msg(Arc::ERROR, "Wrong number of arguments given"); return -1; } std::string desc = params.front(); std::string proxy; if (params.size() == 2) proxy = params.back(); // TODO It would be better to use Arc::JobDescription::Parse(desc) ARex::GMConfig config; ARex::JobDescriptionHandler job_desc_handler(config); ARex::JobLocalDescription job; Arc::JobDescription arc_job_desc; if(job_desc_handler.parse_job_req(job,arc_job_desc,desc) != ARex::JobReqSuccess) return 1; if(!proxy.empty()) { Arc::SetEnv("X509_USER_PROXY",proxy,true); Arc::SetEnv("X509_USER_CERT",proxy,true); Arc::SetEnv("X509_USER_KEY",proxy,true); }; ARex::prepare_proxy(); std::list::iterator file; bool has_lfns = false; std::list lfns; for(file=job.inputdata.begin();file!=job.inputdata.end();++file) { if(file->has_lfn()) { lfn_t* lfn = new lfn_t(file->lfn); lfns.push_back(lfn); Arc::CreateThreadFunction(&check_url,lfn); has_lfns=true; }; }; for(;has_lfns;) { cond.wait(); has_lfns=false; for(std::list::iterator l = lfns.begin();l!=lfns.end();++l) { if((*l)->done) { if((*l)->failed) { ARex::remove_proxy(); exit(1); }; } else { has_lfns=true; }; }; }; ARex::remove_proxy(); exit(0); } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/get_factory_attributes_document.cpp0000644000000000000000000000012211530412700027770 xustar000000000000000026 mtime=1298273728.97221 27 atime=1513200576.441725 29 ctime=1513200662.69278042 nordugrid-arc-5.4.2/src/services/a-rex/get_factory_attributes_document.cpp0000644000175000002070000000527511530412700030050 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::GetFactoryAttributesDocument(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* GetFactoryAttributesDocument GetFactoryAttributesDocumentResponse FactoryResourceAttributesDocument BasicResourceAttributesDocument (optional) IsAcceptingNewActivities (boolean) CommonName (optional,string) LongDescription (optional,string) TotalNumberOfActivities (long) ActivityReference (wsa:EndpointReferenceType,unbounded) TotalNumberOfContainedResources (long) ContainedResource (anyType,unbounded) NamingProfile (anyURI,unbounded) BESExtension (anyURI,unbounded) LocalResourceManagerType (anyURI) OperatingSystem (optional,string) */ { std::string s; in.GetXML(s); logger.msg(Arc::VERBOSE, "GetFactoryAttributesDocument: request = \n%s", s); }; Arc::XMLNode doc = out.NewChild("bes-factory:FactoryResourceAttributesDocument"); //doc.NewChild("bes-factory:BasicResourceAttributesDocument"); doc.NewChild("bes-factory:IsAcceptingNewActivities")="true"; if(!common_name_.empty()) doc.NewChild("bes-factory:CommonName")=common_name_; if(!long_description_.empty()) doc.NewChild("bes-factory:LongDescription")=long_description_; //std::list jobs = ARexJob::Jobs(config,logger_); //doc.NewChild("bes-factory:TotalNumberOfActivities")=Arc::tostring(jobs.size()); //for(std::list::iterator j = jobs.begin();j!=jobs.end();++j) { // Arc::WSAEndpointReference identifier(doc.NewChild("bes-factory:ActivityReference")); // // Make job's ID // identifier.Address(config.Endpoint()); // address of service // identifier.ReferenceParameters().NewChild("a-rex:JobID")=(*j); // identifier.ReferenceParameters().NewChild("a-rex:JobSessionDir")=config.Endpoint()+"/"+(*j); //}; doc.NewChild("bes-factory:TotalNumberOfActivities")=Arc::tostring(ARexJob::TotalJobs(config,logger_)); doc.NewChild("bes-factory:TotalNumberOfContainedResources")=Arc::tostring(0); doc.NewChild("bes-factory:NamingProfile")="http://schemas.ggf.org/bes/2006/08/bes/naming/BasicWSAddressing"; doc.NewChild("bes-factory:BESExtension")="http://www.nordugrid.org/schemas/a-rex"; doc.NewChild("bes-factory:LocalResourceManagerType")=lrms_name_; doc.NewChild("bes-factory:OperatingSystem")=os_name_; { std::string s; out.GetXML(s); logger.msg(Arc::VERBOSE, "GetFactoryAttributesDocument: response = \n%s", s); }; return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/get_activity_statuses.cpp0000644000000000000000000000012413024225705025755 xustar000000000000000027 mtime=1481714629.345783 27 atime=1513200576.475725 30 ctime=1513200662.687780358 nordugrid-arc-5.4.2/src/services/a-rex/get_activity_statuses.cpp0000644000175000002070000005513713024225705026035 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "delegation/DelegationStores.h" #include "delegation/DelegationStore.h" #include "job.h" #include "grid-manager/files/ControlFileHandling.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::GetActivityStatuses(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* GetActivityStatuses ActivityIdentifier (wsa:EndpointReferenceType, unbounded) ActivityStatusVerbosity GetActivityStatusesResponse Response (unbounded) ActivityIdentifier ActivityStatus attribute = state (bes-factory:ActivityStateEnumeration) Pending,Running,Cancelled,Failed,Finished Fault (soap:Fault) UnknownActivityIdentifierFault */ { std::string s; in.GetXML(s); logger.msg(Arc::VERBOSE, "GetActivityStatuses: request = \n%s", s); }; typedef enum { VerbBES, VerbBasic, VerbFull } StatusVerbosity; StatusVerbosity status_verbosity = VerbBasic; Arc::XMLNode verb = in["ActivityStatusVerbosity"]; if((bool)verb) { std::string verb_s = (std::string)verb; if(verb_s == "BES") status_verbosity = VerbBES; else if(verb_s == "Basic") status_verbosity = VerbBasic; else if(verb_s == "Full") status_verbosity = VerbFull; else { logger.msg(Arc::WARNING, "GetActivityStatuses: unknown verbosity level requested: %s", verb_s); }; }; for(int n = 0;;++n) { Arc::XMLNode id = in["ActivityIdentifier"][n]; if(!id) break; // Create place for response Arc::XMLNode resp = out.NewChild("bes-factory:Response"); resp.NewChild(id); std::string jobid = Arc::WSAEndpointReference(id).ReferenceParameters()["a-rex:JobID"]; if(jobid.empty()) { // EPR is wrongly formated or not an A-REX EPR logger_.msg(Arc::ERROR, "GetActivityStatuses: job %s - can't understand EPR", jobid); Arc::SOAPFault fault(resp,Arc::SOAPFault::Sender,"Missing a-rex:JobID in ActivityIdentifier"); UnknownActivityIdentifierFault(fault,"Unrecognized EPR in ActivityIdentifier"); continue; }; // Look for obtained ID ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "GetActivityStatuses: job %s - %s", jobid, job.Failure()); Arc::SOAPFault fault(resp,Arc::SOAPFault::Sender,"No corresponding activity found"); UnknownActivityIdentifierFault(fault,("No activity "+jobid+" found: "+job.Failure()).c_str()); continue; }; /* // TODO: Check permissions on that ID */ bool job_pending = false; std::string gm_state = job.State(job_pending); Arc::XMLNode glue_xml; if(status_verbosity != VerbBES) { std::string glue_s; if(job_xml_read_file(jobid,config.GmConfig(),glue_s)) { Arc::XMLNode glue_xml_tmp(glue_s); glue_xml.Exchange(glue_xml_tmp); }; }; // glue_states_lock_.lock(); Arc::XMLNode st = addActivityStatus(resp,gm_state,glue_xml,job.Failed(),job_pending); // glue_states_lock_.unlock(); if(status_verbosity == VerbFull) { std::string glue_s; if(job_xml_read_file(jobid,config.GmConfig(),glue_s)) { Arc::XMLNode glue_xml(glue_s); if((bool)glue_xml) { st.NewChild(glue_xml); }; }; }; }; { std::string s; out.GetXML(s); logger.msg(Arc::VERBOSE, "GetActivityStatuses: response = \n%s", s); }; return Arc::MCC_Status(Arc::STATUS_OK); } static bool match_lists(const std::list& list1, const std::list& list2) { for(std::list::const_iterator item1=list1.begin();item1!=list1.end();++item1) { for(std::list::const_iterator item2=list2.begin();item2!=list2.end();++item2) { if(*item1 == *item2) return true; }; }; return false; } static bool match_list(const std::string& item1, const std::list& list2) { for(std::list::const_iterator item2=list2.begin();item2!=list2.end();++item2) { if(item1 == *item2) return true; }; return false; } #define MAX_ACTIVITIES (10000) static bool match(const std::pair >& status,const std::string& es_status, const std::list& es_attributes) { // Specs do not define how exactly to match status. Assuming // exact match is needed. // TODO: algorithm below will not work in case of duplicate attributes. if(status.first != es_status) return false; if(status.second.size() != es_attributes.size()) return false; for(std::list::const_iterator a = status.second.begin(); a != status.second.end();++a) { std::list::const_iterator es_a = es_attributes.begin(); for(;es_a != es_attributes.end();++es_a) { if((*a) == (*es_a)) break; }; if(es_a == es_attributes.end()) return false; }; return true; } static bool match(const std::list< std::pair > >& statuses, const std::string& es_status, const std::list& es_attributes) { for(std::list< std::pair > >::const_iterator s = statuses.begin();s != statuses.end();++s) { if(match(*s,es_status,es_attributes)) return true; }; return false; } Arc::MCC_Status ARexService::ESListActivities(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* ListActivities FromDate (xsd:dateTime) 0-1 ToDate (xsd:dateTime) 0-1 Limit 0-1 ActivityStatus 0- Status Attribute 0- ListActivitiesResponse ActivityID 0- truncated (attribute) - false InvalidParameterFault estypes:AccessControlFault estypes:InternalBaseFault */ Arc::Time from((time_t)(-1)); Arc::Time to((time_t)(-1)); Arc::XMLNode node; unsigned int limit = MAX_ACTIVITIES; std::list< std::pair > > statuses; bool filter_status = false; bool filter_time = false; // TODO: Adjust to end of day if((bool)(node = in["FromDate"])) { from = (std::string)node; if(from.GetTime() == (time_t)(-1)) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESInvalidParameterFault(fault,"failed to parse FromDate: "+(std::string)node); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; filter_time = true; }; if((bool)(node = in["ToDate"])) { to = (std::string)node; if(to.GetTime() == (time_t)(-1)) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESInvalidParameterFault(fault,"failed to parse ToDate: "+(std::string)node); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; filter_time = true; }; if((bool)(node = in["Limit"])) { if(!Arc::stringto((std::string)node,limit)) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESInternalBaseFault(fault,"failed to parse Limit: "+(std::string)node); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; if(limit > MAX_ACTIVITIES) limit = MAX_ACTIVITIES; }; for(node=in["ActivityStatus"];(bool)node;++node) { std::pair > status; status.first = (std::string)(node["Status"]); if(status.first.empty()) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESInvalidParameterFault(fault,"Status in ActivityStatus is missing"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; for(Arc::XMLNode anode=node["Attribute"];(bool)anode;++anode) { status.second.push_back((std::string)anode); }; statuses.push_back(status); filter_status = true; }; std::list job_ids = ARexJob::Jobs(config,logger); unsigned int count = 0; for(std::list::iterator id = job_ids.begin();id!=job_ids.end();++id) { if(count >= limit) { out.NewAttribute("truncated") = "true"; break; }; if(filter_time || filter_status) { ARexJob job(*id,config,logger_); if(!job) continue; if(filter_time) { Arc::Time t = job.Created(); if(from.GetTime() != (time_t)(-1)) { if(from > t) continue; }; if(to.GetTime() != (time_t)(-1)) { if(to < t) continue; }; }; if(filter_status) { bool job_pending = false; std::string gm_state = job.State(job_pending); bool job_failed = job.Failed(); std::string failed_cause; std::string failed_state = job.FailedState(failed_cause); std::string es_status; std::list es_attributes; convertActivityStatusES(gm_state,es_status,es_attributes,job_failed,job_pending,failed_state,failed_cause); if(!match(statuses,es_status,es_attributes)) continue; }; }; out.NewChild("estypes:ActivityID") = *id; ++count; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::ESGetActivityStatus(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* GetActivityStatus estypes:ActivityID 1- GetActivityStatusResponse ActivityStatusItem 1- estypes:ActivityID . estypes:ActivityStatus Status Attribute 0- Timestamp (dateTime) Description 0-1 estypes:InternalBaseFault estypes:AccessControlFault UnknownActivityIDFault UnableToRetrieveStatusFault OperationNotPossibleFault OperationNotAllowedFault estypes:VectorLimitExceededFault estypes:AccessControlFault estypes:InternalBaseFault */ Arc::XMLNode id = in["ActivityID"]; unsigned int n = 0; for(;(bool)id;++id) { if((++n) > MAX_ACTIVITIES) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many ActivityID"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; id = in["ActivityID"]; for(;(bool)id;++id) { std::string jobid = id; Arc::XMLNode item = out.NewChild("esainfo:ActivityStatusItem"); item.NewChild("estypes:ActivityID") = jobid; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "EMIES:GetActivityStatus: job %s - %s", jobid, job.Failure()); ESActivityNotFoundFault(item.NewChild("dummy"),job.Failure()); } else { bool job_pending = false; std::string gm_state = job.State(job_pending); bool job_failed = job.Failed(); std::string failed_cause; std::string failed_state = job.FailedState(failed_cause); Arc::XMLNode status = addActivityStatusES(item,gm_state,Arc::XMLNode(),job_failed,job_pending,failed_state,failed_cause); status.NewChild("estypes:Timestamp") = job.Modified().str(Arc::ISOTime); // no definition of meaning in specs //status.NewChild("estypes:Description); TODO }; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::ESGetActivityInfo(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* GetActivityInfo estypes:ActivityID 1- AttributeName (xsd:QName) 0- GetActivityInfoResponse ActivityInfoItem 1- ActivityID . ActivityInfoDocument (glue:ComputingActivity_t) StageInDirectory StageOutDirectory SessionDirectory ComputingActivityHistory 0-1 estypes:ActivityStatus 0- - another ActivityStatus defined in ActivityInfo Operation 0- RequestedOperation Timestamp Success AttributeInfoItem 1- AttributeName AttributeValue InternalBaseFault AccessControlFault ActivityNotFoundFault UnknownAttributeFault UnableToRetrieveStatusFault OperationNotPossibleFault OperationNotAllowedFault estypes:VectorLimitExceededFault UnknownAttributeFault estypes:AccessControlFault estypes:InternalBaseFault */ static const char* job_xml_template = "\ \n\ \n\ SubmittedVia=org.ogf.glue.emies.activitycreation\n\ single\n\ \n\ emies:adl\n\ \n\ \n\ \n\ \n\ \n\ "; Arc::XMLNode id = in["ActivityID"]; unsigned int n = 0; for(;(bool)id;++id) { if((++n) > MAX_ACTIVITIES) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many ActivityID"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; std::list attributes; for(Arc::XMLNode anode = in["AttributeName"];(bool)anode;++anode) { attributes.push_back((std::string)anode); }; //if(!attributes.empty()) { // Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); // ESUnknownAttributeFault(fault,"Selection by AttributeName is not implemented yet"); // out.Destroy(); // return Arc::MCC_Status(Arc::STATUS_OK); //}; id = in["ActivityID"]; for(;(bool)id;++id) { std::string jobid = id; Arc::XMLNode item = out.NewChild("esainfo:ActivityInfoItem"); item.NewChild("estypes:ActivityID") = jobid; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "EMIES:GetActivityStatus: job %s - %s", jobid, job.Failure()); ESActivityNotFoundFault(item.NewChild("dummy"),job.Failure()); } else { // ActivityInfoDocument (glue:ComputingActivity_t) // StageInDirectory 0- // StageOutDirectory 0- // SessionDirectory 0- // ComputingActivityHistory 0-1 std::string glue_s; bool response_generated = false; Arc::XMLNode glue_xml(job_xml_read_file(jobid,config.GmConfig(),glue_s)?glue_s:""); if(!glue_xml) { // TODO: if xml information is not ready yet create something minimal Arc::XMLNode(job_xml_template).New(glue_xml); Arc::URL headnode(config.GmConfig().HeadNode()); glue_xml["ID"] = std::string("urn:caid:")+headnode.Host()+":org.ogf.glue.emies.activitycreation:"+jobid; glue_xml["IDFromEndpoint"] = "urn:idfe:"+jobid; { // Collecting job state bool job_pending = false; std::string gm_state = job.State(job_pending); bool job_failed = job.Failed(); std::string failed_cause; std::string failed_state = job.FailedState(failed_cause); std::string primary_state; std::list state_attributes; convertActivityStatusES(gm_state,primary_state,state_attributes, job_failed,job_pending,failed_state,failed_cause); glue_xml["State"] = "emies:"+primary_state;; std::string prefix = glue_xml["State"].Prefix(); for(std::list::iterator attr = state_attributes.begin(); attr != state_attributes.end(); ++attr) { glue_xml.NewChild(prefix+":State") = "emiesattr:"+(*attr); }; }; glue_xml["Owner"] = config.GridName(); glue_xml.Attribute("CreationTime") = job.Created().str(Arc::ISOTime); }; if((bool)glue_xml) { if(attributes.empty()) { Arc::XMLNode info; std::string glue2_namespace = glue_xml.Namespace(); (info = item.NewChild(glue_xml)).Name("esainfo:ActivityInfoDocument"); info.Namespaces(ns_); std::string glue2_prefix = info.NamespacePrefix(glue2_namespace.c_str()); /* // Collecting job state bool job_pending = false; std::string gm_state = job.State(job_pending); bool job_failed = job.Failed(); std::string failed_cause; std::string failed_state = job.FailedState(failed_cause); // Adding EMI ES state along with Glue state. // TODO: check if not already in infosys generated xml Arc::XMLNode status = info.NewChild(glue2_prefix+":State",0,false); { std::string primary_state; std::list state_attributes; convertActivityStatusES(gm_state,primary_state,state_attributes, job_failed,job_pending,failed_state,failed_cause); status = std::string("emies:")+primary_state; }; */ // TODO: all additional ellements can be stored into xml generated by infoprovider // Extensions store delegation id(s) std::list delegation_ids; DelegationStores* deleg = config.GmConfig().GetDelegations(); if(deleg) { DelegationStore& dstore = (*deleg)[config.GmConfig().DelegationDir()]; delegation_ids = dstore.ListLockedCredIDs(jobid, config.GridName()); }; if(!delegation_ids.empty()) { Arc::XMLNode extensions = info.NewChild(glue2_prefix+":Extensions"); int n = 0; for(std::list::iterator id = delegation_ids.begin(); id != delegation_ids.end();++id) { Arc::XMLNode extension = extensions.NewChild(glue2_prefix+":Extension"); extension.NewChild(glue2_prefix+":LocalID") = "urn:delegid:nordugrid.org"; extension.NewChild(glue2_prefix+":Key") = Arc::tostring(n); extension.NewChild(glue2_prefix+":Value") = *id; // TODO: add source and destination later }; }; // Additional elements info.NewChild("esainfo:StageInDirectory") = config.Endpoint()+"/"+job.ID(); info.NewChild("esainfo:StageOutDirectory") = config.Endpoint()+"/"+job.ID(); info.NewChild("esainfo:SessionDirectory") = config.Endpoint()+"/"+job.ID(); // info.NewChild("esainfo:ComputingActivityHistory") response_generated = true; } else { // Attributes request // AttributeInfoItem 1- // AttributeName // AttributeValue // UnknownAttributeFault bool attribute_added = false; for(std::list::iterator attr = attributes.begin(); attr != attributes.end(); ++attr) { Arc::XMLNode axml = glue_xml[*attr]; for(;axml;++axml) { Arc::XMLNode aitem = item.NewChild("esainfo:AttributeInfoItem"); aitem.NewChild("esainfo:AttributeName") = *attr; aitem.NewChild("esainfo:AttributeValue") = (std::string)axml; attribute_added = true; }; if((*attr == "StageInDirectory") || (*attr == "StageOutDirectory") || (*attr == "SessionDirectory")) { Arc::XMLNode aitem = item.NewChild("esainfo:AttributeInfoItem"); aitem.NewChild("esainfo:AttributeName") = *attr; aitem.NewChild("esainfo:AttributeValue") = config.Endpoint()+"/"+job.ID(); attribute_added = true; }; }; // It is not clear if UnknownAttributeFault must be // used if any or all attributes not found. Probably // it is more useful to do that only if nothing was // found. if(!attribute_added) { ESUnknownAttributeFault(item.NewChild("dummy"),"None of specified attributes is available"); }; response_generated = true; }; }; if(!response_generated) { logger_.msg(Arc::ERROR, "EMIES:GetActivityInfo: job %s - failed to retrieve GLUE2 information", jobid); ESInternalBaseFault(item.NewChild("dummy"),"Failed to retrieve GLUE2 information"); // It would be good to have something like UnableToRetrieveStatusFault here }; }; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::ESNotifyService(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* NotifyService NotifyRequestItem 1- estypes:ActivityID NotifyMessage [client-datapull-done|client-datapush-done] NotifyServiceResponse NotifyResponseItem 1- estypes:ActivityID . Acknowledgement OperationNotPossibleFault OperationNotAllowedFault InternalNotificationFault ActivityNotFoundFault AccessControlFault InternalBaseFault estypes:VectorLimitExceededFault estypes:AccessControlFault estypes:InternalBaseFault */ Arc::XMLNode item = in["NotifyRequestItem"]; unsigned int n = 0; for(;(bool)item;++item) { if((++n) > MAX_ACTIVITIES) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,""); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many NotifyRequestItem"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; item = in["NotifyRequestItem"]; for(;(bool)item;++item) { std::string jobid = (std::string)(item["ActivityID"]); std::string msg = (std::string)(item["NotifyMessage"]); Arc::XMLNode ritem = out.NewChild("esmanag:NotifyResponseItem"); ritem.NewChild("estypes:ActivityID") = jobid; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "EMIES:NotifyService: job %s - %s", jobid, job.Failure()); ESActivityNotFoundFault(ritem.NewChild("dummy"),job.Failure()); } else { if(msg == "client-datapull-done") { // Client is done with job. Same as wipe request. Or should job go to deleted? if(!job.Clean()) { // Failure is not fatal here logger_.msg(Arc::ERROR, "EMIES:NotifyService: job %s - %s", jobid, job.Failure()); }; ritem.NewChild("esmanag:Acknowledgement"); } else if(msg == "client-datapush-done") { if(!job.ReportFilesComplete()) { ESInternalBaseFault(ritem.NewChild("dummy"),job.Failure()); } else { ritem.NewChild("esmanag:Acknowledgement"); }; } else { // Wrong request ESInternalNotificationFault(ritem.NewChild("dummy"),"Unsupported notification type "+msg); // Or maybe OperationNotPossibleFault? }; }; }; return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/create_activity.cpp0000644000000000000000000000012412754411222024506 xustar000000000000000027 mtime=1471287954.153765 27 atime=1513200576.474725 30 ctime=1513200662.685780334 nordugrid-arc-5.4.2/src/services/a-rex/create_activity.cpp0000644000175000002070000002240212754411222024553 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "job.h" #include "arex.h" namespace ARex { // TODO: configurable #define MAX_ACTIVITIES (10) Arc::MCC_Status ARexService::CreateActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& clientid) { /* CreateActivity ActivityDocument jsdl:JobDefinition CreateActivityResponse ActivityIdentifier (wsa:EndpointReferenceType) ActivityDocument jsdl:JobDefinition NotAuthorizedFault NotAcceptingNewActivitiesFault UnsupportedFeatureFault InvalidRequestMessageFault */ if(Arc::VERBOSE >= logger_.getThreshold()) { std::string s; in.GetXML(s); logger_.msg(Arc::VERBOSE, "CreateActivity: request = \n%s", s); }; Arc::XMLNode jsdl = in["ActivityDocument"]["JobDefinition"]; if(!jsdl) { // Wrongly formated request logger_.msg(Arc::ERROR, "CreateActivity: no job description found"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobDefinition element in request"); InvalidRequestMessageFault(fault,"jsdl:JobDefinition","Element is missing"); out.Destroy(); return Arc::MCC_Status(); }; if(config.GmConfig().MaxTotal() > 0 && all_jobs_count_ >= config.GmConfig().MaxTotal()) { logger_.msg(Arc::ERROR, "CreateActivity: max jobs total limit reached"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Reached limit of total allowed jobs"); GenericFault(fault); out.Destroy(); return Arc::MCC_Status(); }; // HPC Basic Profile 1.0 comply (these fault handlings are defined in the KnowARC standards // conformance roadmap 2nd release) // End of the HPC BP 1.0 fault handling part std::string delegationid; Arc::XMLNode delegated_token = in["arcdeleg:DelegatedToken"]; if(delegated_token) { // Client wants to delegate credentials std::string delegation; if(!delegation_stores_.DelegatedToken(config.GmConfig().DelegationDir(),delegated_token,config.GridName(),delegation)) { // Failed to accept delegation (report as bad request) logger_.msg(Arc::ERROR, "CreateActivity: Failed to accept delegation"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Failed to accept delegation"); InvalidRequestMessageFault(fault,"arcdeleg:DelegatedToken","This token does not exist"); out.Destroy(); return Arc::MCC_Status(); }; delegationid = (std::string)(delegated_token["Id"]); }; JobIDGeneratorARC idgenerator(config.Endpoint()); ARexJob job(jsdl,config,delegationid,clientid,logger_,idgenerator); if(!job) { ARexJobFailure failure_type = job; std::string failure = job.Failure(); switch(failure_type) { case ARexJobDescriptionUnsupportedError: { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Unsupported feature in job description"); UnsupportedFeatureFault(fault,failure); }; break; case ARexJobDescriptionMissingError: { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Missing needed element in job description"); UnsupportedFeatureFault(fault,failure); }; break; case ARexJobDescriptionLogicalError: { std::string element; std::string::size_type pos = failure.find(' '); if(pos != std::string::npos) { element=failure.substr(0,pos); failure=failure.substr(pos+1); }; Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Logical error in job description"); InvalidRequestMessageFault(fault,element,failure); }; break; default: { logger_.msg(Arc::ERROR, "CreateActivity: Failed to create new job: %s",failure); // Failed to create new job (no corresponding BES fault defined - using generic SOAP error) logger_.msg(Arc::ERROR, "CreateActivity: Failed to create new job"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,("Failed to create new activity: "+failure).c_str()); GenericFault(fault); }; break; }; out.Destroy(); return Arc::MCC_Status(); }; // Make SOAP response Arc::WSAEndpointReference identifier(out.NewChild("bes-factory:ActivityIdentifier")); // Make job's ID identifier.Address(config.Endpoint()); // address of service identifier.ReferenceParameters().NewChild("a-rex:JobID")=job.ID(); identifier.ReferenceParameters().NewChild("a-rex:JobSessionDir")=config.Endpoint()+"/"+job.ID(); out.NewChild(in["ActivityDocument"]); logger_.msg(Arc::VERBOSE, "CreateActivity finished successfully"); if(Arc::VERBOSE >= logger_.getThreshold()) { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "CreateActivity: response = \n%s", s); }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::ESCreateActivities(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& clientid) { /* CreateActivity adl:ActivityDescription - http://www.eu-emi.eu/es/2010/12/adl 1-unbounded CreateActivityResponse ActivityCreationResponse 1- types:ActivityID types:ActivityMgmtEndpointURL (anyURI) types:ResourceInfoEndpointURL (anyURI) types:ActivityStatus ETNSC (dateTime) 0-1 StageInDirectory 0-1 URL 1- SessionDirectory 0-1 URL 1- StageOutDirectory 0-1 URL 1- - or - types:InternalBaseFault types:AccessControlFault InvalidActivityDescriptionFault InvalidActivityDescriptionSemanticFault UnsupportedCapabilityFault types:VectorLimitExceededFault types:InternalBaseFault types:AccessControlFault */ if(Arc::VERBOSE >= logger_.getThreshold()) { std::string s; in.GetXML(s); logger_.msg(Arc::VERBOSE, "EMIES:CreateActivity: request = \n%s", s); }; Arc::XMLNode adl = in["ActivityDescription"]; unsigned int n = 0; for(;(bool)adl;++adl) { if((++n) > MAX_ACTIVITIES) { logger_.msg(Arc::ERROR, "EMIES:CreateActivity: too many activity descriptions"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Too many activity descriptions"); ESVectorLimitExceededFault(fault,MAX_ACTIVITIES,"Too many activity descriptions"); out.Destroy(); return Arc::MCC_Status(Arc::STATUS_OK); }; }; adl = in["ActivityDescription"]; if(!adl) { // Wrongly formated request logger_.msg(Arc::ERROR, "EMIES:CreateActivity: no job description found"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"ActivityDescription element is missing"); ESInternalBaseFault(fault,"ActivityDescription element is missing"); out.Destroy(); return Arc::MCC_Status(); }; if(config.GmConfig().MaxTotal() > 0 && all_jobs_count_ >= config.GmConfig().MaxTotal()) { logger_.msg(Arc::ERROR, "EMIES:CreateActivity: max jobs total limit reached"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Reached limit of total allowed jobs"); ESInternalBaseFault(fault,"Reached limit of total allowed jobs"); out.Destroy(); return Arc::MCC_Status(); }; for(;(bool)adl;++adl) { JobIDGeneratorES idgenerator(config.Endpoint()); ARexJob job(adl,config,"",clientid,logger_,idgenerator); // Make SOAP response Arc::XMLNode resp = out.NewChild("escreate:ActivityCreationResponse"); if(!job) { Arc::XMLNode fault = resp.NewChild("dummy"); ARexJobFailure failure_type = job; std::string failure = job.Failure(); switch(failure_type) { case ARexJobDescriptionUnsupportedError: { ESUnsupportedCapabilityFault(fault,failure); }; break; case ARexJobDescriptionMissingError: { ESInvalidActivityDescriptionSemanticFault(fault,failure); }; break; case ARexJobDescriptionLogicalError: { ESInvalidActivityDescriptionFault(fault,failure); }; break; default: { logger_.msg(Arc::ERROR, "ES:CreateActivity: Failed to create new job: %s",failure); ESInternalBaseFault(fault,"Failed to create new activity. "+failure); }; break; }; } else { resp.NewChild("estypes:ActivityID")=job.ID(); resp.NewChild("estypes:ActivityMgmtEndpointURL")=config.Endpoint(); resp.NewChild("estypes:ResourceInfoEndpointURL")=config.Endpoint(); Arc::XMLNode rstatus = addActivityStatusES(resp,"ACCEPTED",Arc::XMLNode(),false,false); //resp.NewChild("escreate:ETNSC"); resp.NewChild("escreate:StageInDirectory").NewChild("escreate:URL")=config.Endpoint()+"/"+job.ID(); resp.NewChild("escreate:SessionDirectory").NewChild("escreate:URL")=config.Endpoint()+"/"+job.ID(); resp.NewChild("escreate:StageOutDirectory").NewChild("escreate:URL")=config.Endpoint()+"/"+job.ID(); // TODO: move into addActivityStatusES() rstatus.NewChild("estypes:Timestamp")=Arc::Time().str(Arc::ISOTime); //rstatus.NewChild("estypes:Description")=; logger_.msg(Arc::VERBOSE, "EMIES:CreateActivity finished successfully"); logger_.msg(Arc::INFO, "New job accepted with id %s", job.ID()); if(Arc::VERBOSE >= logger_.getThreshold()) { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "EMIES:CreateActivity: response = \n%s", s); }; }; }; return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/job.cpp0000644000000000000000000000012413111110540022064 xustar000000000000000027 mtime=1495568736.818608 27 atime=1513200576.439725 30 ctime=1513200662.684780322 nordugrid-arc-5.4.2/src/services/a-rex/job.cpp0000644000175000002070000011067213111110540022140 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "grid-manager/conf/GMConfig.h" #include "grid-manager/jobs/GMJob.h" #include "grid-manager/jobs/ContinuationPlugins.h" #include "grid-manager/jobs/JobDescriptionHandler.h" #include "grid-manager/jobs/CommFIFO.h" #include "grid-manager/jobs/JobsList.h" #include "grid-manager/run/RunPlugin.h" #include "grid-manager/files/ControlFileHandling.h" #include "delegation/DelegationStores.h" #include "delegation/DelegationStore.h" #include "job.h" using namespace ARex; Arc::Logger ARexGMConfig::logger(Arc::Logger::getRootLogger(), "ARexGMConfig"); ARexGMConfig::ARexGMConfig(const GMConfig& config,const std::string& uname,const std::string& grid_name,const std::string& service_endpoint): config_(config),user_(uname),readonly_(false),grid_name_(grid_name),service_endpoint_(service_endpoint) { //if(!InitEnvironment(configfile)) return; // const char* uname = user_s.get_uname(); //if((bool)job_map) uname=job_map.unix_name(); if(!user_) { logger.msg(Arc::WARNING, "Cannot handle local user %s", uname); return; } // Do substitutions on session dirs session_roots_ = config_.SessionRoots(); for (std::vector::iterator session = session_roots_.begin(); session != session_roots_.end(); ++session) { config_.Substitute(*session, user_); } session_roots_non_draining_ = config_.SessionRootsNonDraining(); for (std::vector::iterator session = session_roots_non_draining_.begin(); session != session_roots_non_draining_.end(); ++session) { config_.Substitute(*session, user_); } if(!config_.AREXEndpoint().empty()) service_endpoint_ = config_.AREXEndpoint(); } static ARexJobFailure setfail(JobReqResult res) { switch(res.result_type) { case JobReqSuccess: return ARexJobNoError; case JobReqInternalFailure: return ARexJobInternalError; case JobReqSyntaxFailure: return ARexJobDescriptionSyntaxError; case JobReqUnsupportedFailure: return ARexJobDescriptionUnsupportedError; case JobReqMissingFailure: return ARexJobDescriptionMissingError; case JobReqLogicalFailure: return ARexJobDescriptionLogicalError; }; return ARexJobInternalError; } bool ARexJob::is_allowed(bool fast) { allowed_to_see_=false; allowed_to_maintain_=false; // Checking user's grid name against owner if(config_.GridName() == job_.DN) { allowed_to_see_=true; allowed_to_maintain_=true; return true; }; if(fast) return true; // Do fine-grained authorization requested by job's owner if(config_.beginAuth() == config_.endAuth()) return true; std::string acl; if(!job_acl_read_file(id_,config_.GmConfig(),acl)) return true; // safe to ignore if(acl.empty()) return true; // No policy defiled - only owner allowed // Identify and parse policy ArcSec::EvaluatorLoader eval_loader; Arc::AutoPointer policy(eval_loader.getPolicy(ArcSec::Source(acl))); if(!policy) { logger_.msg(Arc::VERBOSE, "%s: Failed to parse user policy", id_); return true; }; Arc::AutoPointer eval(eval_loader.getEvaluator(policy.Ptr())); if(!eval) { logger_.msg(Arc::VERBOSE, "%s: Failed to load evaluator for user policy ", id_); return true; }; std::string policyname = policy->getName(); if((policyname.length() > 7) && (policyname.substr(policyname.length()-7) == ".policy")) { policyname.resize(policyname.length()-7); }; if(policyname == "arc") { // Creating request - directly with XML // Creating top of request document Arc::NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; Arc::XMLNode request(ns,"ra:Request"); // Collect all security attributes for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) (*a)->Export(Arc::SecAttr::ARCAuth,request); }; // Leave only client identities for(Arc::XMLNode item = request["RequestItem"];(bool)item;++item) { for(Arc::XMLNode a = item["Action"];(bool)a;a=item["Action"]) a.Destroy(); for(Arc::XMLNode r = item["Resource"];(bool)r;r=item["Resource"]) r.Destroy(); }; // Fix namespace request.Namespaces(ns); // Create A-Rex specific action // TODO: make helper classes for such operations Arc::XMLNode item = request["ra:RequestItem"]; if(!item) item=request.NewChild("ra:RequestItem"); // Possible operations are Modify and Read Arc::XMLNode action; action=item.NewChild("ra:Action"); action=JOB_POLICY_OPERATION_READ; action.NewAttribute("Type")="string"; action.NewAttribute("AttributeId")=JOB_POLICY_OPERATION_URN; action=item.NewChild("ra:Action"); action=JOB_POLICY_OPERATION_MODIFY; action.NewAttribute("Type")="string"; action.NewAttribute("AttributeId")=JOB_POLICY_OPERATION_URN; // Evaluating policy ArcSec::Response *resp = eval->evaluate(request,policy.Ptr()); // Analyzing response in order to understand which operations are allowed if(!resp) return true; // Not authorized // Following should be somehow made easier ArcSec::ResponseList& rlist = resp->getResponseItems(); for(int n = 0; nres != ArcSec::DECISION_PERMIT) continue; if(!(ritem->reqtp)) continue; for(ArcSec::Action::iterator a = ritem->reqtp->act.begin();a!=ritem->reqtp->act.end();++a) { ArcSec::RequestAttribute* attr = *a; if(!attr) continue; ArcSec::AttributeValue* value = attr->getAttributeValue(); if(!value) continue; std::string action = value->encode(); if(action == "Read") allowed_to_see_=true; if(action == "Modify") allowed_to_maintain_=true; }; }; } else if(policyname == "gacl") { // Creating request - directly with XML Arc::NS ns; Arc::XMLNode request(ns,"gacl"); // Collect all security attributes for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) (*a)->Export(Arc::SecAttr::GACL,request); }; // Leave only client identities int entries = 0; for(Arc::XMLNode entry = request["entry"];(bool)entry;++entry) { for(Arc::XMLNode a = entry["allow"];(bool)a;a=entry["allow"]) a.Destroy(); for(Arc::XMLNode a = entry["deny"];(bool)a;a=entry["deny"]) a.Destroy(); ++entries; }; if(!entries) request.NewChild("entry"); // Evaluate every action separately for(Arc::XMLNode entry = request["entry"];(bool)entry;++entry) { entry.NewChild("allow").NewChild("read"); }; ArcSec::Response *resp; resp=eval->evaluate(request,policy.Ptr()); if(resp) { ArcSec::ResponseList& rlist = resp->getResponseItems(); for(int n = 0; nres != ArcSec::DECISION_PERMIT) continue; allowed_to_see_=true; break; }; }; for(Arc::XMLNode entry = request["entry"];(bool)entry;++entry) { entry["allow"].Destroy(); entry.NewChild("allow").NewChild("write"); }; resp=eval->evaluate(request,policy.Ptr()); if(resp) { ArcSec::ResponseList& rlist = resp->getResponseItems(); for(int n = 0; nres != ArcSec::DECISION_PERMIT) continue; allowed_to_maintain_=true; break; }; }; // TODO: , } else { logger_.msg(Arc::VERBOSE, "%s: Unknown user policy '%s'", id_, policyname); }; return true; } ARexJob::ARexJob(const std::string& id,ARexGMConfig& config,Arc::Logger& logger,bool fast_auth_check):id_(id),logger_(logger),config_(config) { if(id_.empty()) return; if(!config_) { id_.clear(); return; }; // Reading essential information about job if(!job_local_read_file(id_,config_.GmConfig(),job_)) { id_.clear(); return; }; // Checking if user is allowed to do anything with that job if(!is_allowed(fast_auth_check)) { id_.clear(); return; }; if(!(allowed_to_see_ || allowed_to_maintain_)) { id_.clear(); return; }; } ARexJob::ARexJob(Arc::XMLNode jsdl,ARexGMConfig& config,const std::string& delegid,const std::string& clientid, Arc::Logger& logger, JobIDGenerator& idgenerator, Arc::XMLNode migration):id_(""),logger_(logger),config_(config) { if(!config_) return; DelegationStores* delegs = config_.GmConfig().GetDelegations(); if(!delegs) { failure_="Failed to find delegation store"; failure_type_=ARexJobInternalError; return; } DelegationStore& deleg = delegs->operator[](config_.GmConfig().DelegationDir()); // New job is created here // First get and acquire new id if(!make_job_id()) return; // Turn job description into text std::string job_desc_str; // Make full XML doc out of subtree { Arc::XMLNode jsdldoc; jsdl.New(jsdldoc); jsdldoc.GetDoc(job_desc_str); }; // Choose session directory std::string sessiondir; if (!ChooseSessionDir(id_, sessiondir)) { delete_job_id(); failure_="Failed to find valid session directory"; failure_type_=ARexJobInternalError; return; }; job_.sessiondir = sessiondir+"/"+id_; GMJob job(id_,Arc::User(config_.User().get_uid()),job_.sessiondir,JOB_STATE_ACCEPTED); // Store description if(!job_description_write_file(job,config_.GmConfig(),job_desc_str)) { delete_job_id(); failure_="Failed to store job description"; failure_type_=ARexJobInternalError; return; }; // Analyze job description (checking, substituting, etc) JobDescriptionHandler job_desc_handler(config.GmConfig()); Arc::JobDescription desc; JobReqResult parse_result = job_desc_handler.parse_job_req(id_,job_,desc,true); if((failure_type_=setfail(parse_result)) != ARexJobNoError) { failure_ = parse_result.failure; if(failure_.empty()) { failure_="Failed to parse job description"; failure_type_=ARexJobInternalError; }; delete_job_id(); return; }; std::string acl(parse_result.acl); if((!job_.action.empty()) && (job_.action != "request")) { failure_="Wrong action in job request: "+job_.action; failure_type_=ARexJobInternalError; delete_job_id(); return; }; // Check for proper LRMS name in request. If there is no LRMS name // in user configuration that means service is opaque frontend and // accepts any LRMS in request. if((!job_.lrms.empty()) && (!config_.GmConfig().DefaultLRMS().empty())) { if(job_.lrms != config_.GmConfig().DefaultLRMS()) { failure_="Requested LRMS is not supported by this service"; failure_type_=ARexJobInternalError; //failure_type_=ARexJobDescriptionLogicalError; delete_job_id(); return; }; }; if(job_.lrms.empty()) job_.lrms=config_.GmConfig().DefaultLRMS(); // Check for proper queue in request. if(job_.queue.empty()) job_.queue=config_.GmConfig().DefaultQueue(); if(job_.queue.empty()) { failure_="Request has no queue defined"; failure_type_=ARexJobDescriptionMissingError; delete_job_id(); return; }; if(!config_.GmConfig().Queues().empty()) { // If no queues configured - service takes any for(std::list::const_iterator q = config_.GmConfig().Queues().begin();;++q) { if(q == config_.GmConfig().Queues().end()) { failure_="Requested queue "+job_.queue+" does not match any of available queues"; //failure_type_=ARexJobDescriptionLogicalError; failure_type_=ARexJobInternalError; delete_job_id(); return; }; if(*q == job_.queue) break; }; }; // Check for various unsupported features if(!job_.preexecs.empty()) { failure_="Pre-executables are not supported by this service"; failure_type_=ARexJobDescriptionUnsupportedError; delete_job_id(); return; }; if(!job_.postexecs.empty()) { failure_="Post-executables are not supported by this service"; failure_type_=ARexJobDescriptionUnsupportedError; delete_job_id(); return; }; for(std::list::iterator f = desc.DataStaging.OutputFiles.begin();f != desc.DataStaging.OutputFiles.end();++f) { for(std::list::iterator t = f->Targets.begin();t != f->Targets.end();++t) { switch(t->CreationFlag) { case Arc::TargetType::CFE_DEFAULT: case Arc::TargetType::CFE_OVERWRITE: case Arc::TargetType::CFE_DONTOVERWRITE: break; default: failure_="Unsupported creation mode for Target"; failure_type_=ARexJobDescriptionUnsupportedError; delete_job_id(); return; }; }; }; // TODO: Rerun; // TODO: ExpiryTime; // TODO: ProcessingStartTime; // TODO: Priority; // TODO: Notification; // TODO: CredentialService; // TODO: AccessControl; // TODO: DryRun; // TODO: RemoteLogging // TODO: OperatingSystem; // TODO: Platform; // TODO: NetworkInfo; // TODO: IndividualPhysicalMemory; // TODO: IndividualVirtualMemory; // TODO: DiskSpaceRequirement; // TODO: SessionLifeTime; // TODO: SessionDirectoryAccess; // TODO: IndividualCPUTime; // TODO: TotalCPUTime; // TODO: IndividualWallTime; // TODO: TotalWallTime; // TODO: NodeAccess; // TODO: CEType; // Check that the SlotRequirements make sense. // I.e. that SlotsPerHost do not exceed total Slots // and that SlotsPerHost is a divisor of total Slots if((desc.Resources.SlotRequirement.SlotsPerHost > desc.Resources.SlotRequirement.NumberOfSlots) || (desc.Resources.SlotRequirement.NumberOfSlots % desc.Resources.SlotRequirement.SlotsPerHost != 0)) { failure_="SlotsPerHost exceeding NumberOfSlots is not supported"; failure_type_=ARexJobDescriptionUnsupportedError; delete_job_id(); return; }; if(!desc.Resources.Coprocessor.v.empty()) { failure_="Coprocessor is not supported yet."; failure_type_=ARexJobDescriptionUnsupportedError; delete_job_id(); return; }; // There may be 3 sources of delegated credentials: // 1. If job comes through EMI-ES it has delegations assigned only per file // through source and target. But ARC has extension to pass global // delegation for whole DataStaging // 2. In ARC BES extension credentils delegated as part of job creation request. // Those are provided in credentials variable // 3. If neither works and special dynamic output files @list which // have no targets and no delegations are present then any of // per file delegations is used bool need_delegation = false; // not for sure, but most probably needed std::list deleg_ids; // collection of all delegations if(!desc.DataStaging.DelegationID.empty()) { job_.delegationid = desc.DataStaging.DelegationID; // remember that special delegation deleg_ids.push_back(desc.DataStaging.DelegationID); // and store in list of all delegations } else if(!delegid.empty()) { // Have per job credentials - remember and refer by id later job_.delegationid = delegid; // remember that ad-hoc delegation deleg_ids.push_back(delegid); // and store in list of all delegations } else { // No per job delegation provided. // Check if generic delegation is needed at all. for(std::list::iterator f = desc.DataStaging.OutputFiles.begin(); f != desc.DataStaging.OutputFiles.end();++f) { if(f->Name[0] == '@') { // Dynamic file - possibly we need delegation. But we can't know till job finished. // Try to use any of provided delegations. need_delegation = true; break; }; }; }; // Collect other delegations // Delegation ids can be found in parsed job description for(std::list::iterator f = desc.DataStaging.InputFiles.begin();f != desc.DataStaging.InputFiles.end();++f) { for(std::list::iterator s = f->Sources.begin();s != f->Sources.end();++s) { if(!s->DelegationID.empty()) deleg_ids.push_back(s->DelegationID); }; }; for(std::list::iterator f = desc.DataStaging.OutputFiles.begin();f != desc.DataStaging.OutputFiles.end();++f) { for(std::list::iterator t = f->Targets.begin();t != f->Targets.end();++t) { if(!t->DelegationID.empty()) deleg_ids.push_back(t->DelegationID); }; }; if(need_delegation && job_.delegationid.empty()) { // Still need generic per job delegation if(deleg_ids.size() > 0) { // Pick up first delegation as generic one job_.delegationid = *deleg_ids.begin(); } else { // Missing most probably required delegation - play safely failure_="Dynamic output files and no delegation assigned to job are incompatible."; failure_type_=ARexJobDescriptionUnsupportedError; delete_job_id(); return; }; }; // Start local file (some local attributes are already defined at this point) /* !!!!! some parameters are unchecked here - rerun,diskspace !!!!! */ job_.jobid=id_; job_.starttime=Arc::Time(); job_.DN=config_.GridName(); job_.clientname=clientid; job_.migrateactivityid=(std::string)migration["ActivityIdentifier"]; job_.forcemigration=(migration["ForceMigration"]=="true"); // BES ActivityIdentifier is global job ID idgenerator.SetLocalID(id_); job_.globalid = idgenerator.GetGlobalID(); job_.headnode = idgenerator.GetManager(); job_.interface = idgenerator.GetInterface(); std::string certificates; job_.expiretime = time(NULL); #if 1 // For compatibility reasons during transitional period store full proxy if possible if(!job_.delegationid.empty()) { (void)deleg.GetCred(job_.delegationid, config_.GridName(), certificates); } if(!certificates.empty()) { if(!job_proxy_write_file(job,config_.GmConfig(),certificates)) { delete_job_id(); failure_="Failed to write job proxy file"; failure_type_=ARexJobInternalError; return; }; try { Arc::Credential cred(certificates,"","","","",false); job_.expiretime = cred.GetEndTime(); logger.msg(Arc::VERBOSE, "Credential expires at %s", job_.expiretime.str()); } catch(std::exception const& e) { logger.msg(Arc::WARNING, "Credential handling exception: %s", e.what()); }; } else #endif // Create user credentials (former "proxy") { for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) { Arc::SecAttr* sattr = (*a)->get("TLS"); if(sattr) { certificates = sattr->get("CERTIFICATE"); if(!certificates.empty()) { certificates += sattr->get("CERTIFICATECHAIN"); if(!job_proxy_write_file(job,config_.GmConfig(),certificates)) { delete_job_id(); failure_="Failed to write job proxy file"; failure_type_=ARexJobInternalError; return; }; try { Arc::Credential cred(certificates,"","","","",false); job_.expiretime = cred.GetEndTime(); logger_.msg(Arc::VERBOSE, "Credential expires at %s", job_.expiretime.str()); } catch(std::exception const& e) { logger_.msg(Arc::WARNING, "Credential handling exception: %s", e.what()); }; break; }; }; }; }; }; // Collect authorized VOMS/VO - so far only source is ARCLEGACYPDP { for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) { Arc::SecAttr* sattr = (*a)->get("ARCLEGACYPDP"); if(sattr) { std::list voms = sattr->getAll("VOMS"); job_.voms.insert(job_.voms.end(),voms.begin(),voms.end()); std::list vo = sattr->getAll("VO"); job_.localvo.insert(job_.localvo.end(),vo.begin(),vo.end()); }; }; }; }; // If no authorized VOMS was identified just report those from credentials (TLS source) if(job_.voms.empty()) { for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) { Arc::SecAttr* sattr = (*a)->get("TLS"); if(sattr) { std::list voms = sattr->getAll("VOMS"); // These attributes are in different format and need to be converted // into ordinary VOMS FQANs. for(std::list::iterator v = voms.begin();v!=voms.end();++v) { std::string fqan = Arc::VOMSFQANFromFull(*v); if(!fqan.empty()) { job_.voms.insert(job_.voms.end(),fqan); }; }; }; }; }; }; // If still no VOMS information is available take forced one from configuration if(job_.voms.empty()) { std::string forced_voms = config_.GmConfig().ForcedVOMS(job_.queue.c_str()); if(forced_voms.empty()) forced_voms = config_.GmConfig().ForcedVOMS(); if(!forced_voms.empty()) { job_.voms.push_back(forced_voms); }; }; // Write local file if(!job_local_write_file(job,config_.GmConfig(),job_)) { delete_job_id(); failure_="Failed to store internal job description"; failure_type_=ARexJobInternalError; return; }; // Write grami file if(!job_desc_handler.write_grami(desc,job,NULL)) { delete_job_id(); failure_="Failed to create grami file"; failure_type_=ARexJobInternalError; return; }; // Write ACL file if(!acl.empty()) { if(!job_acl_write_file(id_,config.GmConfig(),acl)) { delete_job_id(); failure_="Failed to process/store job ACL"; failure_type_=ARexJobInternalError; return; }; }; // Call authentication/authorization plugin/exec { // talk to external plugin to ask if we can proceed std::list results; ContinuationPlugins* plugins = config_.GmConfig().GetContPlugins(); if(plugins) plugins->run(job,config_.GmConfig(),results); std::list::iterator result = results.begin(); while(result != results.end()) { // analyze results if(result->action == ContinuationPlugins::act_fail) { delete_job_id(); failure_="Job is not allowed by external plugin: "+result->response; failure_type_=ARexJobInternalError; return; } else if(result->action == ContinuationPlugins::act_log) { // Scream but go ahead logger_.msg(Arc::WARNING, "Failed to run external plugin: %s", result->response); } else if(result->action == ContinuationPlugins::act_pass) { // Just continue if(result->response.length()) { logger_.msg(Arc::INFO, "Plugin response: %s", result->response); }; } else { delete_job_id(); failure_="Failed to pass external plugin: "+result->response; failure_type_=ARexJobInternalError; return; }; ++result; }; }; /*@ // Make access to filesystem on behalf of local user if(cred_plugin && (*cred_plugin)) { job_subst_t subst_arg; subst_arg.user=user; subst_arg.job=&job_id; subst_arg.reason="new"; // run external plugin to acquire non-unix local credentials if(!cred_plugin->run(job_subst,&subst_arg)) { olog << "Failed to run plugin" << std::endl; delete_job_id(); failure_type_=ARexJobInternalError; error_description="Failed to obtain external credentials"; return 1; }; if(cred_plugin->result() != 0) { olog << "Plugin failed: " << cred_plugin->result() << std::endl; delete_job_id(); error_description="Failed to obtain external credentials"; failure_type_=ARexJobInternalError; return 1; }; }; */ // Create session directory if(!config_.GmConfig().CreateSessionDirectory(job.SessionDir(), job.get_user())) { delete_job_id(); failure_="Failed to create session directory"; failure_type_=ARexJobInternalError; return; }; // Create input status file to tell downloader we // are handling input in clever way. job_input_status_add_file(job,config_.GmConfig()); // Create status file (do it last so GM picks job up here) if(!job_state_write_file(job,config_.GmConfig(),JOB_STATE_ACCEPTED)) { delete_job_id(); failure_="Failed registering job in grid-manager"; failure_type_=ARexJobInternalError; return; }; // Put lock on all delegated credentials of this job. // Because same delegation id can be used multiple times remove // duplicates to avoid adding multiple identical locking records. deleg_ids.sort(); deleg_ids.unique(); deleg.LockCred(id_,deleg_ids,config_.GridName()); SignalFIFO(config_.GmConfig().ControlDir()); return; } bool ARexJob::GetDescription(Arc::XMLNode& jsdl) { if(id_.empty()) return false; std::string sdesc; if(!job_description_read_file(id_,config_.GmConfig(),sdesc)) return false; Arc::XMLNode xdesc(sdesc); if(!xdesc) return false; jsdl.Replace(xdesc); return true; } bool ARexJob::Cancel(void) { if(id_.empty()) return false; GMJob job(id_,Arc::User(config_.User().get_uid())); if(!job_cancel_mark_put(job,config_.GmConfig())) return false; return true; } bool ARexJob::Clean(void) { if(id_.empty()) return false; GMJob job(id_,Arc::User(config_.User().get_uid())); if(!job_clean_mark_put(job,config_.GmConfig())) return false; return true; } bool ARexJob::Resume(void) { if(id_.empty()) return false; if(job_.failedstate.length() == 0) { // Job can't be restarted. return false; }; if(job_.reruns <= 0) { // Job run out of number of allowed retries. return false; }; if(!job_restart_mark_put(GMJob(id_,Arc::User(config_.User().get_uid())),config_.GmConfig())) { // Failed to report restart request. return false; }; return true; } std::string ARexJob::State(void) { bool job_pending; return State(job_pending); } std::string ARexJob::State(bool& job_pending) { if(id_.empty()) return ""; job_state_t state = job_state_read_file(id_,config_.GmConfig(),job_pending); return GMJob::get_state_name(state); } bool ARexJob::Failed(void) { if(id_.empty()) return false; return job_failed_mark_check(id_,config_.GmConfig()); } std::string ARexJob::FailedState(std::string& cause) { std::string state; job_local_read_failed(id_,config_.GmConfig(),state,cause); return state; } Arc::Time ARexJob::Created(void) { time_t t = job_description_time(id_,config_.GmConfig()); if(t == 0) return Arc::Time(); // ??? return Arc::Time(t); } Arc::Time ARexJob::Modified(void) { time_t t = job_state_time(id_,config_.GmConfig()); if(t == 0) return Arc::Time(); // ??? return Arc::Time(t); } bool ARexJob::UpdateCredentials(const std::string& credentials) { if(id_.empty()) return false; if(!update_credentials(credentials)) return false; GMJob job(id_,Arc::User(config_.User().get_uid()), config_.GmConfig().SessionRoot(id_)+"/"+id_,JOB_STATE_ACCEPTED); if(!job_local_write_file(job,config_.GmConfig(),job_)) return false; return true; } bool ARexJob::update_credentials(const std::string& credentials) { if(credentials.empty()) return true; // Per job credentials update - renew generic credentials assigned to this job if(job_.delegationid.empty()) return false; DelegationStores* delegs = config_.GmConfig().GetDelegations(); if(!delegs) return false; DelegationStore& deleg = delegs->operator[](config_.GmConfig().DelegationDir()); if(!deleg.PutCred(job_.delegationid, config_.GridName(), credentials)) return false; Arc::Credential cred(credentials,"","","","",false); job_.expiretime = cred.GetEndTime(); GMJob job(id_,Arc::User(config_.User().get_uid()), config_.GmConfig().SessionRoot(id_)+"/"+id_,JOB_STATE_ACCEPTED); #if 0 std::string cred_public; cred.OutputCertificate(cred_public); cred.OutputCertificateChain(cred_public); (void)job_proxy_write_file(job,config_.GmConfig(),cred_public); #else // For compatibility reasons during transitional period store full proxy if possible (void)job_proxy_write_file(job,config_.GmConfig(),credentials); #endif // TODO: should job.#.proxy be updated too? return true; } bool ARexJob::make_job_id(void) { if(!config_) return false; int i; //@ delete_job_id(); for(i=0;i<100;i++) { //id_=Arc::tostring((unsigned int)getpid())+ // Arc::tostring((unsigned int)time(NULL))+ // Arc::tostring(rand(),1); Arc::GUID(id_); std::string fname=config_.GmConfig().ControlDir()+"/job."+id_+".description"; struct stat st; if(stat(fname.c_str(),&st) == 0) continue; int h = ::open(fname.c_str(),O_RDWR | O_CREAT | O_EXCL,0600); // So far assume control directory is on local fs. // TODO: add locks or links for NFS int err = errno; if(h == -1) { if(err == EEXIST) continue; logger_.msg(Arc::ERROR, "Failed to create file in %s", config_.GmConfig().ControlDir()); id_=""; return false; }; fix_file_owner(fname,config_.User()); close(h); return true; }; logger_.msg(Arc::ERROR, "Out of tries while allocating new job ID in %s", config_.GmConfig().ControlDir()); id_=""; return false; } bool ARexJob::delete_job_id(void) { if(!config_) return true; if(!id_.empty()) { job_clean_final(GMJob(id_,Arc::User(config_.User().get_uid()), config_.GmConfig().SessionRoot(id_)+"/"+id_),config_.GmConfig()); id_=""; }; return true; } int ARexJob::TotalJobs(ARexGMConfig& config,Arc::Logger& /* logger */) { ContinuationPlugins plugins; JobsList jobs(config.GmConfig()); jobs.ScanAllJobs(); return jobs.size(); } std::list ARexJob::Jobs(ARexGMConfig& config,Arc::Logger& logger) { std::list jlist; ContinuationPlugins plugins; JobsList jobs(config.GmConfig()); jobs.ScanAllJobs(); JobsList::iterator i = jobs.begin(); for(;i!=jobs.end();++i) { ARexJob job(i->get_id(),config,logger,true); if(job) jlist.push_back(i->get_id()); }; return jlist; } std::string ARexJob::SessionDir(void) { if(id_.empty()) return ""; return config_.GmConfig().SessionRoot(id_)+"/"+id_; } std::string ARexJob::LogDir(void) { return job_.stdlog; } static bool normalize_filename(std::string& filename) { std::string::size_type p = 0; if(filename[0] != G_DIR_SEPARATOR) filename.insert(0,G_DIR_SEPARATOR_S); for(;p != std::string::npos;) { if((filename[p+1] == '.') && (filename[p+2] == '.') && ((filename[p+3] == 0) || (filename[p+3] == G_DIR_SEPARATOR)) ) { std::string::size_type pr = std::string::npos; if(p > 0) pr = filename.rfind(G_DIR_SEPARATOR,p-1); if(pr == std::string::npos) return false; filename.erase(pr,p-pr+3); p=pr; } else if((filename[p+1] == '.') && (filename[p+2] == G_DIR_SEPARATOR)) { filename.erase(p,2); } else if(filename[p+1] == G_DIR_SEPARATOR) { filename.erase(p,1); }; p = filename.find(G_DIR_SEPARATOR,p+1); }; if(!filename.empty()) filename.erase(0,1); // removing leading separator return true; } Arc::FileAccess* ARexJob::CreateFile(const std::string& filename) { if(id_.empty()) return NULL; std::string fname = filename; if((!normalize_filename(fname)) || (fname.empty())) { failure_="File name is not acceptable"; failure_type_=ARexJobInternalError; return NULL; }; int lname = fname.length(); fname = config_.GmConfig().SessionRoot(id_)+"/"+id_+"/"+fname; // First try to create/open file Arc::FileAccess* fa = Arc::FileAccess::Acquire(); if(!*fa) { delete fa; return NULL; }; if(!fa->fa_setuid(config_.User().get_uid(),config_.User().get_gid())) { Arc::FileAccess::Release(fa); return NULL; }; if(!fa->fa_open(fname,O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR)) { if(fa->geterrno() != ENOENT) { Arc::FileAccess::Release(fa); return NULL; }; std::string::size_type n = fname.rfind('/'); if((n == std::string::npos) || (n < (fname.length()-lname))) { Arc::FileAccess::Release(fa); return NULL; }; if(!fa->fa_mkdirp(fname.substr(0,n),S_IRUSR | S_IWUSR | S_IXUSR)) { if(fa->geterrno() != EEXIST) { Arc::FileAccess::Release(fa); return NULL; }; }; if(!fa->fa_open(fname,O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR)) { Arc::FileAccess::Release(fa); return NULL; }; }; return fa; } Arc::FileAccess* ARexJob::OpenFile(const std::string& filename,bool for_read,bool for_write) { if(id_.empty()) return NULL; std::string fname = filename; if((!normalize_filename(fname)) || (fname.empty())) { failure_="File name is not acceptable"; failure_type_=ARexJobInternalError; return NULL; }; fname = config_.GmConfig().SessionRoot(id_)+"/"+id_+"/"+fname; int flags = 0; if(for_read && for_write) { flags=O_RDWR; } else if(for_read) { flags=O_RDONLY; } else if(for_write) { flags=O_WRONLY; } //return Arc::FileOpen(fname,flags,config_.User().get_uid(),config_.User().get_gid(),0); Arc::FileAccess* fa = Arc::FileAccess::Acquire(); if(*fa) { if(fa->fa_setuid(config_.User().get_uid(),config_.User().get_gid())) { if(fa->fa_open(fname,flags,0)) { return fa; }; }; }; failure_="Failed opening file - "+Arc::StrError(fa->geterrno()); failure_type_=ARexJobInternalError; Arc::FileAccess::Release(fa); return NULL; } Arc::FileAccess* ARexJob::OpenDir(const std::string& dirname) { if(id_.empty()) return NULL; std::string dname = dirname; if(!normalize_filename(dname)) { failure_="Directory name is not acceptable"; failure_type_=ARexJobInternalError; return NULL; }; //if(dname.empty()) return NULL; dname = config_.GmConfig().SessionRoot(id_)+"/"+id_+"/"+dname; Arc::FileAccess* fa = Arc::FileAccess::Acquire(); if(*fa) { if(fa->fa_setuid(config_.User().get_uid(),config_.User().get_gid())) { if(fa->fa_opendir(dname)) { return fa; }; }; }; failure_="Failed opening directory - "+Arc::StrError(fa->geterrno()); failure_type_=ARexJobInternalError; Arc::FileAccess::Release(fa); return NULL; } int ARexJob::OpenLogFile(const std::string& name) { if(id_.empty()) return -1; if(strchr(name.c_str(),'/')) return -1; std::string fname = config_.GmConfig().ControlDir() + "/job." + id_ + "." + name; return ::open(fname.c_str(),O_RDONLY); } std::list ARexJob::LogFiles(void) { std::list logs; if(id_.empty()) return logs; std::string dname = config_.GmConfig().ControlDir(); std::string prefix = "job." + id_ + "."; // TODO: scanning is performance bottleneck. Use matching instead. Glib::Dir* dir = new Glib::Dir(dname); if(!dir) return logs; for(;;) { std::string name = dir->read_name(); if(name.empty()) break; if(strncmp(prefix.c_str(),name.c_str(),prefix.length()) != 0) continue; logs.push_back(name.substr(prefix.length())); }; delete dir; return logs; } std::string ARexJob::GetFilePath(const std::string& filename) { if(id_.empty()) return ""; std::string fname = filename; if(!normalize_filename(fname)) return ""; if(fname.empty()) config_.GmConfig().SessionRoot(id_)+"/"+id_; return config_.GmConfig().SessionRoot(id_)+"/"+id_+"/"+fname; } bool ARexJob::ReportFileComplete(const std::string& filename) { if(id_.empty()) return ""; std::string fname = filename; if(!normalize_filename(fname)) return false; return job_input_status_add_file(GMJob(id_,Arc::User(config_.User().get_uid())),config_.GmConfig(),"/"+fname); } bool ARexJob::ReportFilesComplete(void) { if(id_.empty()) return ""; return job_input_status_add_file(GMJob(id_,Arc::User(config_.User().get_uid())),config_.GmConfig(),"/"); } std::string ARexJob::GetLogFilePath(const std::string& name) { if(id_.empty()) return ""; return config_.GmConfig().ControlDir() + "/job." + id_ + "." + name; } bool ARexJob::ChooseSessionDir(const std::string& /* jobid */, std::string& sessiondir) { if (config_.SessionRootsNonDraining().size() == 0) { // no active session dirs available logger_.msg(Arc::ERROR, "No non-draining session dirs available"); return false; } // choose randomly from non-draining session dirs sessiondir = config_.SessionRootsNonDraining().at(rand() % config_.SessionRootsNonDraining().size()); return true; } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/a-rex.in0000644000000000000000000000012713065017701022172 xustar000000000000000027 mtime=1490296769.679784 30 atime=1513200649.748622107 30 ctime=1513200662.680780273 nordugrid-arc-5.4.2/src/services/a-rex/a-rex.in0000755000175000002070000007502313065017701022246 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the A-REX service # # chkconfig: 2345 75 25 # description: NorduGrid grid-manager # # config: /etc/sysconfig/globus # config: /etc/sysconfig/nordugrid # config: @prefix@/etc/arc.conf # config: /etc/arc.conf # # This startup script takes ARC0 configuration file as # its input and generates ARC1 arched configuration file # which contains commands to start A-REX service. Service # is either run isolated or with WS interface enabled. # To enable WS interface ARC0 configuration file must # contain undocumented option in [grid-manager] section: # # arex_mount_point="a_rex_url" ### BEGIN INIT INFO # Provides: a-rex # Required-Start: $local_fs $remote_fs # Required-Stop: $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC grid manager # Description: The unit of the NorduGrid's ARC middleware to # accept and control jobs. ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=arched RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/a-rex ]; then . /etc/sysconfig/a-rex elif [ -r /etc/default/a-rex ]; then . /etc/default/a-rex fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ -n "$GLOBUS_LOCATION" ]; then if [ ! -d "$GLOBUS_LOCATION" ]; then log_failure_msg "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION testconfigblock() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" cat "$fname" | sed -e 's/\r$//' -e 's/^\r//' | grep -e '^\[' | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then echo 'true' return fi done echo 'false' } } readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | sed -e 's/\r$//' -e 's/^\r//' | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } readconfigvars() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | sed -e 's/\r$//' -e 's/^\r//' | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` echo "$val" fi done fi done } } tokenize() { line="$1" prefix="$2" suffix="$3" while true; do if [ -z "$line" ]; then break; fi token=`echo "$line" | sed 's/^"\([^"]*\)" *.*/\1/;t exit;s/^\([^ ]*\) *.*/\1/;:exit'` line=`echo "$line" | sed 's/^"[^"]*" *\(.*\)/\1/;t exit;s/^[^ ]* *\(.*\)/\1/;t exit;s/.*//;:exit'` echo "${prefix}${token}${suffix}" done } voms_trust_to_xml() { xml="" while true; do read line if [ $? -ne '0' ]; then break; fi if [ -z "$line" ]; then continue; fi xmlchain=`tokenize "$line" "" ""` echo "${xmlchain}" done } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi # PID and lock file PID_FILE=`readconfigvar "$ARC_CONFIG" grid-manager pidfile` if [ `id -u` = 0 ] ; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then LOCKFILE=/var/lock/subsys/$prog-arex else LOCKFILE=/var/lock/$prog-arex fi if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog-arex.pid fi else LOCKFILE=$HOME/$prog-arex.lock if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog-arex.pid fi fi prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then log_failure_msg "Missing executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # Creating configuration file of arched # Reading following information from config file: # Log file # Debug level # User name GRIDFTPD_PRESENT=`testconfigblock "$ARC_CONFIG" gridftpd/jobs` LOGFILE=`readconfigvar "$ARC_CONFIG" grid-manager logfile` WSLOGFILE=`readconfigvar "$ARC_CONFIG" grid-manager wslogfile` PERFLOGDIR=`readconfigvar "$ARC_CONFIG" common perflogdir` ENABLEPERFLOG=`readconfigvar "$ARC_CONFIG" common enable_perflog_reporting` LOGLEVEL=`readconfigvar "$ARC_CONFIG" grid-manager debug` LOGSIZE=`readconfigvar "$ARC_CONFIG" grid-manager logsize` LOGREOPEN=`readconfigvar "$ARC_CONFIG" grid-manager logreopen` WATCHDOG=`readconfigvar "$ARC_CONFIG" grid-manager watchdog` USERNAME=`readconfigvar "$ARC_CONFIG" grid-manager user` GROUPNAME=`echo "$USERNAME" | sed 's/^[^:]*//;s/^://'` USERNAME=`echo "$USERNAME" | sed 's/:.*//'` X509_USER_CERT=`readconfigvar "$ARC_CONFIG" grid-manager x509_user_cert` X509_USER_KEY=`readconfigvar "$ARC_CONFIG" grid-manager x509_user_key` X509_CERT_DIR=`readconfigvar "$ARC_CONFIG" grid-manager x509_cert_dir` GRIDMAP=`readconfigvar "$ARC_CONFIG" grid-manager gridmap` GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" grid-manager globus_tcp_port_range` GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" grid-manager globus_udp_port_range` VOMS_PROCESSING=`readconfigvar "$ARC_CONFIG" grid-manager voms_processing` VOMS_TRUST_CHAINS=`readconfigvars "$ARC_CONFIG" grid-manager voms_trust_chain | voms_trust_to_xml` MAX_JOB_CONTROL_REQUESTS=`readconfigvar "$ARC_CONFIG" grid-manager max_job_control_requests` MAX_INFOSYS_REQUESTS=`readconfigvar "$ARC_CONFIG" grid-manager max_infosys_requests` MAX_DATA_TRANSFER_REQUESTS=`readconfigvar "$ARC_CONFIG" grid-manager max_data_transfer_requests` if [ "$GRIDFTPD_PRESENT" == 'true' ] ; then ALLOWUNKNOWN=`readconfigvar "$ARC_CONFIG" gridftpd allowunknown` USERMAP_BLOCK='gridftpd' USERAUTH_BLOCK='gridftpd/jobs' else ALLOWUNKNOWN=`readconfigvar "$ARC_CONFIG" grid-manager allowunknown` USERMAP_BLOCK='grid-manager' USERAUTH_BLOCK='grid-manager' fi if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=`readconfigvar "$ARC_CONFIG" common x509_user_cert` fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=`readconfigvar "$ARC_CONFIG" common x509_user_key` fi if [ -z "$X509_CERT_DIR" ] ; then X509_CERT_DIR=`readconfigvar "$ARC_CONFIG" common x509_cert_dir` fi if [ -z "$GRIDMAP" ] ; then GRIDMAP=`readconfigvar "$ARC_CONFIG" common gridmap` fi if [ -z "$GLOBUS_TCP_PORT_RANGE" ] ; then GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" common globus_tcp_port_range` fi if [ -z "$GLOBUS_UDP_PORT_RANGE" ] ; then GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" common globus_udp_port_range` fi if [ -z "$VOMS_PROCESSING" ] ; then VOMS_PROCESSING=`readconfigvar "$ARC_CONFIG" common voms_processing` fi VOMS_TRUST_CHAINS="$VOMS_TRUST_CHAINS"`readconfigvars "$ARC_CONFIG" common voms_trust_chain | voms_trust_to_xml` # Exporting collected variables if [ ! -z "$X509_USER_CERT" ] ; then export X509_USER_CERT ; fi if [ ! -z "$X509_USER_KEY" ] ; then export X509_USER_KEY ; fi if [ ! -z "$X509_CERT_DIR" ] ; then export X509_CERT_DIR ; fi if [ ! -z "$GRIDMAP" ] ; then export GRIDMAP ; fi if [ ! -z "$GLOBUS_TCP_PORT_RANGE" ] ; then export GLOBUS_TCP_PORT_RANGE ; fi if [ ! -z "$GLOBUS_UDP_PORT_RANGE" ] ; then export GLOBUS_UDP_PORT_RANGE ; fi # Required defaults if [ -z "$GRIDMAP" ] ; then GRIDMAP=/etc/grid-security/grid-mapfile fi if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=/etc/grid-security/hostcert.pem fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=/etc/grid-security/hostkey.pem fi if [ -z "$X509_CERT_DIR" ] ; then X509_CERT_DIR=/etc/grid-security/certificates fi if [ "$ALLOWUNKNOWN" != "yes" ] ; then ALLOWUNKNOWN="" fi # Web Service configuration arex_endpoint="" arex_mount_point=`readconfigvar "$ARC_CONFIG" grid-manager arex_mount_point` arex_mount_point=${arex_mount_point:-`readconfigvar "$ARC_CONFIG" cluster arex_mount_point`} if [ ! -z "$arex_mount_point" ] ; then arex_proto=`echo "$arex_mount_point" | sed 's/^\([^:]*\):\/\/.*/\1/;t;s/.*//'` arex_host=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/\([^:\/]*\).*/\1/;t;s/.*//'` arex_port=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/[^:]*:\([^\/]*\)\(.*\)/\1/;t;s/.*//'` arex_path=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/[^\/]*\/\(.*\)/\1/;t;s/.*//'` if [ -z "$arex_port" ] ; then if [ "$arex_proto" = "https" ] ; then arex_port="443" else arex_port="80" fi fi arex_endpoint="$arex_mount_point" fi enable_emies_interface=`readconfigvar "$ARC_CONFIG" grid-manager enable_emies_interface` if [ ! -z "$enable_emies_interface" ] ; then enable_emies_interface="$enable_emies_interface" fi emir_registration="" # if [ ! -z "$arex_mount_point" ]; then # emir_urls=`readconfigvar "$ARC_CONFIG" "registration/emir" emirurls` # if [ ! -z "$emir_urls" ]; then # emir_no_xbes=`readconfigvar "$ARC_CONFIG" "registration/emir" disablereg_xbes` # emir_no_emies=`readconfigvar "$ARC_CONFIG" "registration/emir" disablereg_emies` # if [ ! "$emir_no_xbes" = "yes" ]; then # emir_validity=`readconfigvar "$ARC_CONFIG" "registration/emir" validity` # if [ -z "$emir_validity" ]; then emir_validity="600"; fi # emir_period=`readconfigvar "$ARC_CONFIG" "registration/emir" period` # if [ -z "$emir_period" ]; then emir_period="60"; fi # emir_urls=`echo "$emir_urls" | sed ':loop;s/, */<\/URL>EMIREG:/;t loop'` # emir_urls="EMIREG:$emir_urls" # emir_registration="\ # # $arex_mount_point # $emir_validity # $emir_period # # $emir_urls # 10 # $arex_mount_point # $emir_validity # $emir_period # $X509_USER_KEY # $X509_USER_CERT # $X509_CERT_DIR # #" # fi # fi # fi isis_registration="" if [ ! -z "$arex_mount_point" ]; then isis_urls=`readconfigvar "$ARC_CONFIG" "registration/isis" isisurls` if [ ! -z "$isis_urls" ]; then isis_no_xbes=`readconfigvar "$ARC_CONFIG" "registration/isis" disablereg_xbes` isis_no_emies=`readconfigvar "$ARC_CONFIG" "registration/isis" disablereg_emies` if [ ! "$isis_no_xbes" = "yes" ]; then isis_validity=`readconfigvar "$ARC_CONFIG" "registration/isis" validity` if [ -z "$isis_validity" ]; then isis_validity="600"; fi isis_period=`readconfigvar "$ARC_CONFIG" "registration/isis" period` if [ -z "$isis_period" ]; then isis_period="60"; fi isis_urls=`echo "$isis_urls" | sed ':loop;s/, */<\/URL>ISIS:/;t loop'` isis_urls="ISIS:$isis_urls" isis_registration="\ $arex_mount_point $isis_validity $isis_period $isis_urls 10 $arex_mount_point $isis_validity $isis_period $X509_USER_KEY $X509_USER_CERT $X509_CERT_DIR " fi fi fi infoproviders_timeout=`readconfigvar "$ARC_CONFIG" infosys infoproviders_timeout` if [ -z "$infoproviders_timeout" ]; then infoproviders_wakeup="\ 600" else infoproviders_wakeup="\ $infoproviders_timeout" fi argus_shc="" argus_plugin="" arguspep_endpoint=`readconfigvar "$ARC_CONFIG" grid-manager arguspep_endpoint` if [ ! -z "$arguspep_endpoint" ]; then argus_plugin="${argus_plugin}arguspepclient" if [ ! -f "$ARC_LOCATION/lib/arc/libarguspepclient.so" ] && [ ! -f "$ARC_LOCATION/lib64/arc/libarguspepclient.so" ]; then log_failure_msg "Plugin arguspepclient(libarguspepclient.so) not found" log_failure_msg "You may need to install corresponding package" exit 1 fi arguspep_profile=`readconfigvar "$ARC_CONFIG" grid-manager arguspep_profile` if [ -z "$arguspep_profile" ]; then arguspep_profile="emi"; fi arguspep_usermap=`readconfigvar "$ARC_CONFIG" grid-manager arguspep_usermap` if [ -z "$arguspep_usermap" ]; then arguspep_usermap="false"; fi if [ "$arguspep_usermap" = "yes" ]; then arguspep_usermap="true"; fi if [ "$arguspep_usermap" = "no" ]; then arguspep_usermap="false"; fi argus_shc="${argus_shc} $arguspep_endpoint $arguspep_profile $X509_USER_KEY $X509_USER_CERT $X509_CERT_DIR $arguspep_usermap " fi arguspdp_endpoint=`readconfigvar "$ARC_CONFIG" grid-manager arguspdp_endpoint` if [ ! -z "$arguspdp_endpoint" ]; then argus_plugin="${argus_plugin}arguspdpclient" if [ ! -f "$ARC_LOCATION/lib/arc/libarguspdpclient.so" ] && [ ! -f "$ARC_LOCATION/lib64/arc/libarguspdpclient.so" ]; then log_failure_msg "Plugin arguspdpclient(libarguspdpclient.so) not found" log_failure_msg "You may need to install corresponding package" exit 1 fi arguspdp_profile=`readconfigvar "$ARC_CONFIG" grid-manager arguspdp_profile` if [ -z "$arguspdp_profile" ]; then arguspdp_profile="emi"; fi arguspdp_usermap=`readconfigvar "$ARC_CONFIG" grid-manager arguspdp_usermap` if [ -z "$arguspdp_usermap" ]; then arguspdp_usermap="false"; fi if [ "$arguspdp_usermap" = "yes" ]; then arguspdp_usermap="true"; fi if [ "$arguspdp_usermap" = "no" ]; then arguspdp_usermap="false"; fi arguspdp_acceptnotapplicable=`readconfigvar "$ARC_CONFIG" grid-manager arguspdp_acceptnotapplicable` if [ -z "$arguspdp_acceptnotapplicable" ]; then arguspdp_acceptnotapplicable="false"; fi if [ "$arguspdp_acceptnotapplicable" = "yes" ]; then arguspdp_acceptnotapplicable="true"; fi if [ "$arguspdp_acceptnotapplicable" = "no" ]; then arguspdp_acceptnotapplicable="false"; fi argus_shc="${argus_shc} $arguspdp_endpoint $arguspdp_profile $X509_USER_KEY $X509_USER_CERT $X509_CERT_DIR $arguspdp_usermap $arguspdp_acceptnotapplicable " fi legacy_shc=" $ARC_CONFIG $USERAUTH_BLOCK $ARC_CONFIG $USERMAP_BLOCK " # cache service cache_service_plexer="" cache_service="" use_cache_service=`readconfigvar "$ARC_CONFIG" grid-manager enable_cache_service` if [ "$use_cache_service" = "yes" ]; then use_dtr=`readconfigvar "$ARC_CONFIG" grid-manager enable_dtr` if [ -z "$arex_mount_point" -o "$use_dtr" = "no" ]; then log_failure_msg "Both DTR and A-REX WS interface must be turned on to use cache service" exit 1 fi cache_service_plexer="^/cacheservice" cache_service="\ \ $legacy_shc \ $ARC_CONFIG\ true\ \ " fi if [ -z "$ALLOWUNKNOWN" ]; then gridmapmatch="\ " fi AREX_CONFIG=`mktemp -t arex.xml.XXXXXX` if [ -z "$AREX_CONFIG" ] ; then log_failure_msg "Failed to create temporary file" exit 1 fi CMD="$CMD -c '$AREX_CONFIG'" # VOMS_LOCATION VOMS_LOCATION=${VOMS_LOCATION:-@DEFAULT_VOMS_LOCATION@} add_library_path "$VOMS_LOCATION" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH case "$LOGLEVEL" in 0) LOGLEVEL="FATAL" ;; 1) LOGLEVEL="ERROR" ;; 2) LOGLEVEL="WARNING" ;; 3) LOGLEVEL="INFO" ;; 4) LOGLEVEL="VERBOSE" ;; 5) LOGLEVEL="DEBUG" ;; *) LOGLEVEL="WARNING" ;; esac if [ "$USERNAME" = "root" ] ; then USERNAME="" fi if [ "$GROUPNAME" = "root" ] ; then GROUPNAME="" fi LOGFILE=${LOGFILE:-/var/log/arc/grid-manager.log} if [ ! -d `dirname $LOGFILE` ]; then mkdir -p `dirname $LOGFILE` fi WSLOGFILE=${WSLOGFILE:-/var/log/arc/ws-interface.log} if [ ! -d `dirname $WSLOGFILE` ]; then mkdir -p `dirname $WSLOGFILE` fi PERFLOGDIR=${PERFLOGDIR:-/var/log/arc/perfdata} if [ -d $PERFLOGDIR ]; then chmod 700 $PERFLOGDIR fi if [ "$ENABLEPERFLOG" = "expert-debug-on" ]; then if [ ! -d $PERFLOGDIR ]; then mkdir -p $PERFLOGDIR fi # Set permissions suitable for logs written under user accounts chmod a+rwx $PERFLOGDIR fi LOGSIZE=${LOGSIZE:--1 -1} LOGNUM=`echo "$LOGSIZE" | sed 's/^ *[-+0-9]* *//'` LOGSIZE=`echo "$LOGSIZE" | sed 's/^ *\([-+0-9]*\).*/\1/'` LOGREOPEN=${LOGREOPEN:-no} if [ "$LOGREOPEN" = "yes" ] ; then LOGREOPEN="true" else LOGREOPEN="false" fi WATCHDOG=${WATCHDOG:-no} if [ "$WATCHDOG" = "yes" ] ; then WATCHDOG="true" else WATCHDOG="false" fi VOMS_PROCESSING=${VOMS_PROCESSING:-standard} MAX_JOB_CONTROL_REQUESTS=${MAX_JOB_CONTROL_REQUESTS:-100} MAX_INFOSYS_REQUESTS=${MAX_INFOSYS_REQUESTS:-1} MAX_DATA_TRANSFER_REQUESTS=${MAX_DATA_TRANSFER_REQUESTS:-100} if [ ! -z "$USERNAME" ] ; then CMD="$CMD -u '$USERNAME'" fi if [ ! -z "$GROUPNAME" ] ; then CMD="$CMD -g '$GROUPNAME'" fi # A-Rex without WS interface AREXCFG="\ $PID_FILE $LOGFILE $LOGLEVEL $LOGNUM $LOGSIZE $LOGREOPEN $WATCHDOG $ARC_LOCATION/@pkglibsubdir@/ arex $ARC_CONFIG $infoproviders_wakeup " # A-Rex with WS interface over HTTPS AREXCFGWSS="\ $PID_FILE $LOGFILE $WSLOGFILE $LOGLEVEL $LOGNUM $LOGSIZE $LOGREOPEN $WATCHDOG $ARC_LOCATION/@pkglibsubdir@/ mcctcp mcctls mcchttp mccsoap arex identitymap arcshc arcshclegacy $argus_plugin $arex_port $X509_USER_KEY $X509_USER_CERT $X509_CERT_DIR $VOMS_TRUST_CHAINS $VOMS_PROCESSING $gridmapmatch $GRIDMAP nobody $ARC_CONFIG POST GET PUT HEAD ^/$arex_path $cache_service_plexer $legacy_shc $argus_shc $arex_endpoint $enable_emies_interface $emir_registration $isis_registration $ARC_CONFIG $MAX_INFOSYS_REQUESTS $infoproviders_wakeup $MAX_JOB_CONTROL_REQUESTS $MAX_DATA_TRANSFER_REQUESTS $cache_service " if [ -z "$arex_proto" ] ; then echo "$AREXCFG" > "$AREX_CONFIG" elif [ "$arex_proto" = "https" ] ; then echo "$AREXCFGWSS" > "$AREX_CONFIG" else log_failure_msg "Unsupported protocol: $arex_proto" exit 1 fi if [ ! -z "$USERNAME" ] ; then [ -f $AREX_CONFIG ] && chown $USERNAME $AREX_CONFIG fi # prepare to collect crash information COREDIR=`dirname ${LOGFILE}`/arccore mkdir -p ${COREDIR} cd ${COREDIR} ulimit -c unlimited } validate() { CHECK_CMD=$ARC_LOCATION/@pkglibexecsubdir@/arc-config-check if [ ! -x $CHECK_CMD ]; then log_failure_msg "Could not find or execute arc-config-check tool" return 1 fi eval "$CHECK_CMD --config $ARC_CONFIG" RETVAL=$? return $RETVAL } start() { if [ "$RUN" != "yes" ] ; then echo "a-rex disabled, please adjust the configuration to your needs " echo "and then set RUN to 'yes' in /etc/default/a-rex to enable it." return 0 fi echo -n "Starting $prog: " # Check if we are already running if [ -f $PID_FILE ]; then read pid < $PID_FILE if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PID_FILE" "$LOCKFILE" fi prepare eval "$CMD" RETVAL=$? rm -f "$AREX_CONFIG" if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } stop() { echo -n "Stopping $prog: " if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ ! -z "$pid" ] ; then if [ "x$1" != "x" ]; then # kill whole process group on force-kill kill -TERM "-$pid" else kill "$pid" fi RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi timeout=180; # for stopping nicely if [ "x$1" != "x" ]; then timeout=1 # 1 second for force-kill fi while ( ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null ) && [ $timeout -ge 1 ] ; do sleep 1 timeout=$(($timeout - 1)) done [ $timeout -lt 1 ] && kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PID_FILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } status() { if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } restart() { stop start } case "$1" in start) start ;; stop) stop ;; status) status $prog ;; restart | force-reload) restart ;; reload) ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; force-kill) stop 1 ;; validate) validate ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload|reload|condrestart|try-restart|force-kill|validate}" exit 1 ;; esac exit $? nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/a-rex-backtrace-collect.8.in0000644000000000000000000000012712755024064025705 xustar000000000000000027 mtime=1471424564.776441 30 atime=1513200649.815622927 30 ctime=1513200662.677780236 nordugrid-arc-5.4.2/src/services/a-rex/a-rex-backtrace-collect.8.in0000644000175000002070000000133412755024064025750 0ustar00mockbuildmock00000000000000.TH a-rex-backtrace-collect 8 "2016-08-16" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME a-rex-backtrace-collect \- processes core file(s) generated by arched and produces backtrace(s). .SH DESCRIPTION .B a-rex-backtrace-collect processes core file(s) collected in ARC_LOGS_DIR/arccore folder and produces their backtraces. The backtrace(s) are stored in files .backtrace. The ARC instalation location can be adjusted using ARC_LOCATION environment variable. The location of configuration file can be specified using ARC_CONFIG environment variable. .SH SYNOPSIS a-rex-backtrace-collect .SH OPTIONS No options are supported. .SH AUTHOR Aleksandr Konstantinov nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/faults.cpp0000644000000000000000000000012412037007621022623 xustar000000000000000027 mtime=1350307729.170894 27 atime=1513200576.605727 30 ctime=1513200662.694780444 nordugrid-arc-5.4.2/src/services/a-rex/faults.cpp0000644000175000002070000001714112037007621022674 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "arex.h" #include "tools.h" /* NotAuthorizedFault NotAcceptingNewActivitiesFault UnsupportedFeatureFault Feature (string, unbounded) CantApplyOperationToCurrentStateFault ActivityStatus (ActivityStatusType) Message (xsd:string) OperationWillBeAppliedEventuallyFault ActivityStatus (ActivityStatusType) Message (xsd:string) UnknownActivityIdentifierFault Message (string) InvalidRequestMessageFaultType InvalidElement (string,unbounded) Message (string) */ namespace ARex { // A-REX faults static const std::string BES_FACTORY_FAULT_URL("http://schemas.ggf.org/bes/2006/08/bes-factory/BESFactoryPortType/Fault"); static void SetFaultResponse(Arc::SOAPFault& fault) { // Fetch top element of SOAP message - should be better way Arc::XMLNode fault_node = fault; Arc::SOAPEnvelope res(fault_node.Parent().Parent()); // Fault->Body->Envelope Arc::WSAHeader(res).Action(BES_FACTORY_FAULT_URL); } void ARexService::GenericFault(Arc::SOAPFault& fault) { Arc::XMLNode fault_node = fault; Arc::SOAPEnvelope res(fault_node.Parent().Parent()); // Fault->Body->Envelope Arc::WSAHeader(res).Action(""); } void ARexService::NotAuthorizedFault(Arc::XMLNode fault) { fault.Name("bes-factory:NotAuthorizedFault"); } void ARexService::NotAuthorizedFault(Arc::SOAPFault& fault) { NotAuthorizedFault(fault.Detail(true).NewChild("dummy")); SetFaultResponse(fault); } void ARexService::NotAcceptingNewActivitiesFault(Arc::XMLNode fault) { fault.Name("bes-factory:NotAcceptingNewActivitiesFault"); return; } void ARexService::NotAcceptingNewActivitiesFault(Arc::SOAPFault& fault) { NotAcceptingNewActivitiesFault(fault.Detail(true).NewChild("dummy")); SetFaultResponse(fault); } void ARexService::UnsupportedFeatureFault(Arc::XMLNode fault,const std::string& feature) { fault.Name("bes-factory:UnsupportedFeatureFault"); if(!feature.empty()) fault.NewChild("bes-factory:Feature")=feature; return; } void ARexService::UnsupportedFeatureFault(Arc::SOAPFault& fault,const std::string& feature) { UnsupportedFeatureFault(fault.Detail(true).NewChild("dummy"),feature); SetFaultResponse(fault); } void ARexService::CantApplyOperationToCurrentStateFault(Arc::XMLNode fault,const std::string& gm_state,bool failed,const std::string& message) { fault.Name("bes-factory:CantApplyOperationToCurrentStateFault"); addActivityStatus(fault,gm_state,"",failed); fault.NewChild("bes-factory:Message")=message; return; } void ARexService::CantApplyOperationToCurrentStateFault(Arc::SOAPFault& fault,const std::string& gm_state,bool failed,const std::string& message) { CantApplyOperationToCurrentStateFault(fault.Detail(true).NewChild("dummy"),gm_state,failed,message); SetFaultResponse(fault); } void ARexService::OperationWillBeAppliedEventuallyFault(Arc::XMLNode fault,const std::string& gm_state,bool failed,const std::string& message) { fault.Name("bes-factory:OperationWillBeAppliedEventuallyFault"); addActivityStatus(fault,gm_state,"",failed); fault.NewChild("bes-factory:Message")=message; return; } void ARexService::OperationWillBeAppliedEventuallyFault(Arc::SOAPFault& fault,const std::string& gm_state,bool failed,const std::string& message) { OperationWillBeAppliedEventuallyFault(fault.Detail(true).NewChild("dummy"),gm_state,failed,message); SetFaultResponse(fault); } void ARexService::UnknownActivityIdentifierFault(Arc::XMLNode fault,const std::string& message) { fault.Name("bes-factory:UnknownActivityIdentifierFault"); fault.NewChild("bes-factory:Message")=message; return; } void ARexService::UnknownActivityIdentifierFault(Arc::SOAPFault& fault,const std::string& message) { UnknownActivityIdentifierFault(fault.Detail(true).NewChild("dummy"),message); SetFaultResponse(fault); } void ARexService::InvalidRequestMessageFault(Arc::XMLNode fault,const std::string& element,const std::string& message) { fault.Name("bes-factory:InvalidRequestMessageFaultType"); if(!element.empty()) fault.NewChild("bes-factory:InvalidElement")=element; fault.NewChild("bes-factory:Message")=message; return; } void ARexService::InvalidRequestMessageFault(Arc::SOAPFault& fault,const std::string& element,const std::string& message) { InvalidRequestMessageFault(fault.Detail(true).NewChild("dummy"),element,message); SetFaultResponse(fault); } // EMI ES faults // InternalBaseFault // Message // Timestamp (dateTime) 0-1 // Description 0-1 // FailureCode (int) 0-1 void ARexService::ESInternalBaseFault(Arc::XMLNode fault,const std::string& message,const std::string& desc) { fault.Name("estypes:InternalBaseFault"); fault.NewChild("estypes:Message") = message; fault.NewChild("estypes:Timestamp") = Arc::Time().str(Arc::ISOTime); if(!desc.empty()) fault.NewChild("estypes:Description") = desc; //fault.NewChild("estypes:FailureCode") = "0"; } void ARexService::ESInternalBaseFault(Arc::SOAPFault& fault,const std::string& message,const std::string& desc) { ESInternalBaseFault(fault.Detail(true).NewChild("dummy"),message,desc); } void ARexService::ESVectorLimitExceededFault(Arc::XMLNode fault,unsigned long limit,const std::string& message,const std::string& desc) { ESInternalBaseFault(fault,message.empty()?"Limit of parallel requests exceeded":message,desc); fault.NewChild("estypes:ServerLimit") = Arc::tostring(limit); fault.Name("estypes:VectorLimitExceededFault"); } void ARexService::ESVectorLimitExceededFault(Arc::SOAPFault& fault,unsigned long limit,const std::string& message,const std::string& desc) { ESVectorLimitExceededFault(fault.Detail(true).NewChild("dummy"),limit,message,desc); } #define ES_SIMPLE_FAULT(FAULTNAME,NAMESPACE,MESSAGE) \ void ARexService::ES##FAULTNAME(Arc::XMLNode fault, \ const std::string& message,const std::string& desc) { \ ESInternalBaseFault(fault,message.empty()?(MESSAGE):message,desc); \ fault.Name(#NAMESPACE ":" #FAULTNAME); \ } \ \ void ARexService::ES##FAULTNAME(Arc::SOAPFault& fault, \ const std::string& message,const std::string& desc) { \ ES##FAULTNAME(fault.Detail(true).NewChild("dummy"),message,desc); \ } ES_SIMPLE_FAULT(AccessControlFault,estypes,"Access denied") ES_SIMPLE_FAULT(UnsupportedCapabilityFault,escreate,"Unsupported capability") ES_SIMPLE_FAULT(InvalidActivityDescriptionSemanticFault,escreate,"Invalid activity description semantics") ES_SIMPLE_FAULT(InvalidActivityDescriptionFault,escreate,"Invalid activity description") ES_SIMPLE_FAULT(NotSupportedQueryDialectFault,esrinfo,"Query language not supported") ES_SIMPLE_FAULT(NotValidQueryStatementFault,esrinfo,"Query is not valid for specified language") ES_SIMPLE_FAULT(UnknownQueryFault,esrinfo,"Query is not recognized") ES_SIMPLE_FAULT(InternalResourceInfoFault,esrinfo,"Internal failure retrieving resource information") ES_SIMPLE_FAULT(ResourceInfoNotFoundFault,esrinfo,"Resource has no requested information") ES_SIMPLE_FAULT(UnableToRetrieveStatusFault,esainfo,"Activity status is missing") ES_SIMPLE_FAULT(UnknownAttributeFault,esainfo,"Activity has no such attribute") ES_SIMPLE_FAULT(OperationNotAllowedFault,esainfo,"Requested operation not allowed") ES_SIMPLE_FAULT(ActivityNotFoundFault,esainfo,"Activity with specified ID not found") ES_SIMPLE_FAULT(InternalNotificationFault,esainfo,"Notofication fault") ES_SIMPLE_FAULT(OperationNotPossibleFault,esainfo,"Can't perform this operation") ES_SIMPLE_FAULT(InvalidActivityStateFault,esainfo,"Invalid activity state") ES_SIMPLE_FAULT(InvalidActivityLimitFault,esainfo,"Invalid activity limit") ES_SIMPLE_FAULT(InvalidParameterFault,esainfo,"Invalid parameter") } nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/jura0000644000000000000000000000013213214316027021505 xustar000000000000000030 mtime=1513200663.250787244 30 atime=1513200668.718854121 30 ctime=1513200663.250787244 nordugrid-arc-5.4.2/src/services/a-rex/jura/0000755000175000002070000000000013214316027021630 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/Makefile.am0000644000000000000000000000012613065017103023616 xustar000000000000000027 mtime=1490296387.698578 29 atime=1513200602.68904655 30 ctime=1513200663.224786926 nordugrid-arc-5.4.2/src/services/a-rex/jura/Makefile.am0000644000175000002070000000164113065017103023663 0ustar00mockbuildmock00000000000000pkglibexec_PROGRAMS = jura if PYTHON_ENABLED APEL_CLIENT = ssm else APEL_CLIENT = endif SUBDIRS = $(APEL_CLIENT) DIST_SUBDIRS = ssm jura_SOURCES = jura.cpp jura.h UsageReporter.cpp UsageReporter.h \ Reporter.h ReReporter.cpp ReReporter.h \ JobLogFile.cpp JobLogFile.h Destinations.cpp Destinations.h \ Destination.cpp Destination.h LutsDestination.cpp LutsDestination.h \ ApelDestination.cpp ApelDestination.h CARDestination.cpp CARDestination.h \ CARAggregation.cpp CARAggregation.h jura_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) jura_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) man_MANS = jura.1 nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/JobLogFile.h0000644000000000000000000000012412623101234023703 xustar000000000000000027 mtime=1447854748.072969 27 atime=1513200576.394724 30 ctime=1513200663.236787073 nordugrid-arc-5.4.2/src/services/a-rex/jura/JobLogFile.h0000644000175000002070000000400712623101234023751 0ustar00mockbuildmock00000000000000#ifndef JOBLOGFILE_H #define JOBLOGFILE_H #include #include #include #include #include #include #include namespace Arc { class JobLogFile: public std::map /** * Class to represent a job log file created by A-REX, * and to create OGF Job Usage Records from them. */ { std::string filename; bool allow_remove; std::vector inputfiles; std::vector outputfiles; std::string getArchivingPath(bool car=false); void parseInputOutputFiles(Arc::XMLNode &node, std::vector &list, std::string type="input"); public: /** Constructor. Loads and parses A-REX job log. */ JobLogFile(const std::string& _filename):allow_remove(true) { parse(_filename); } /** Reloads and parses A-REX job log. */ int parse(const std::string& _filename); /** Creates an OGF Job Usage Record from parsed log files. * - Missing UR properties: * -# ProcessID: Local PID(s) of job. Extraction is LRMS-specific and \n * may not always be possible * -# Charge: Amount of money or abstract credits charged for the job. * -# Some differentiated properties e.g. network, disk etc. */ void createUsageRecord(Arc::XMLNode &usagerecord, const char *recordid_prefix="ur-"); /** Creates an OGF 2.0 (CAR) Job Usage Record from parsed log files. */ void createCARUsageRecord(Arc::XMLNode &usagerecord, const char *recordid_prefix="ur-"); /** Returns original full path to log file */ std::string getFilename() { return filename; } /** Enables/disables file removal from disk */ void allowRemove(bool a) { allow_remove=a; } /** Checks if file exists on the disk */ bool exists(); /** Checks if file was modified earlier than 'age' seconds ago */ bool olderThan(time_t age); /** Deletes file from the disk */ void remove(); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/UsageReporter.h0000644000000000000000000000012412534012676024532 xustar000000000000000027 mtime=1433408958.605316 27 atime=1513200576.349724 30 ctime=1513200663.231787012 nordugrid-arc-5.4.2/src/services/a-rex/jura/UsageReporter.h0000644000175000002070000000252112534012676024577 0ustar00mockbuildmock00000000000000#ifndef _USAGEREPORTER_H #define _USAGEREPORTER_H #include #include #include #include #ifdef WIN32 #include #endif #include "Reporter.h" #include "Destinations.h" namespace Arc { /** The class for main JURA functionality. Traverses the 'logs' dir * of the given control directory, and reports usage data extracted from * job log files within. */ class UsageReporter:public Reporter { private: Arc::Logger logger; Arc::Destinations *dests; /** Directory where A-REX puts job logs */ std::string job_log_dir; time_t expiration_time; std::vector urls; std::vector topics; std::string vo_filters; std::string out_dir; public: /** Constructor. Gets the job log dir and the expiration time in seconds. * Default expiration time is infinity (represented by zero value). */ UsageReporter(std::string job_log_dir_, time_t expiration_time_=0, std::vector urls_=std::vector(), std::vector topics_=std::vector(), std::string vo_filters_="", std::string out_dir_=""); /** Processes job log files in '/logs'. */ int report(); ~UsageReporter(); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732023631 xustar000000000000000030 mtime=1513200602.763047455 30 atime=1513200650.153627061 30 ctime=1513200663.225786938 nordugrid-arc-5.4.2/src/services/a-rex/jura/Makefile.in0000644000175000002070000014664213214315732023714 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pkglibexec_PROGRAMS = jura$(EXEEXT) subdir = src/services/a-rex/jura DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/jura.1.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = jura.1 CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(pkglibexec_PROGRAMS) am_jura_OBJECTS = jura-jura.$(OBJEXT) jura-UsageReporter.$(OBJEXT) \ jura-ReReporter.$(OBJEXT) jura-JobLogFile.$(OBJEXT) \ jura-Destinations.$(OBJEXT) jura-Destination.$(OBJEXT) \ jura-LutsDestination.$(OBJEXT) jura-ApelDestination.$(OBJEXT) \ jura-CARDestination.$(OBJEXT) jura-CARAggregation.$(OBJEXT) jura_OBJECTS = $(am_jura_OBJECTS) am__DEPENDENCIES_1 = jura_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) jura_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(jura_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(jura_SOURCES) DIST_SOURCES = $(jura_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @PYTHON_ENABLED_FALSE@APEL_CLIENT = @PYTHON_ENABLED_TRUE@APEL_CLIENT = ssm SUBDIRS = $(APEL_CLIENT) DIST_SUBDIRS = ssm jura_SOURCES = jura.cpp jura.h UsageReporter.cpp UsageReporter.h \ Reporter.h ReReporter.cpp ReReporter.h \ JobLogFile.cpp JobLogFile.h Destinations.cpp Destinations.h \ Destination.cpp Destination.h LutsDestination.cpp LutsDestination.h \ ApelDestination.cpp ApelDestination.h CARDestination.cpp CARDestination.h \ CARAggregation.cpp CARAggregation.h jura_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) jura_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) man_MANS = jura.1 all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/jura/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/jura/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): jura.1: $(top_builddir)/config.status $(srcdir)/jura.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkglibexecPROGRAMS: $(pkglibexec_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files clean-pkglibexecPROGRAMS: @list='$(pkglibexec_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list jura$(EXEEXT): $(jura_OBJECTS) $(jura_DEPENDENCIES) @rm -f jura$(EXEEXT) $(jura_LINK) $(jura_OBJECTS) $(jura_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-ApelDestination.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-CARAggregation.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-CARDestination.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-Destination.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-Destinations.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-JobLogFile.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-LutsDestination.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-ReReporter.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-UsageReporter.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jura-jura.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< jura-jura.o: jura.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-jura.o -MD -MP -MF $(DEPDIR)/jura-jura.Tpo -c -o jura-jura.o `test -f 'jura.cpp' || echo '$(srcdir)/'`jura.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-jura.Tpo $(DEPDIR)/jura-jura.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='jura.cpp' object='jura-jura.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-jura.o `test -f 'jura.cpp' || echo '$(srcdir)/'`jura.cpp jura-jura.obj: jura.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-jura.obj -MD -MP -MF $(DEPDIR)/jura-jura.Tpo -c -o jura-jura.obj `if test -f 'jura.cpp'; then $(CYGPATH_W) 'jura.cpp'; else $(CYGPATH_W) '$(srcdir)/jura.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-jura.Tpo $(DEPDIR)/jura-jura.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='jura.cpp' object='jura-jura.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-jura.obj `if test -f 'jura.cpp'; then $(CYGPATH_W) 'jura.cpp'; else $(CYGPATH_W) '$(srcdir)/jura.cpp'; fi` jura-UsageReporter.o: UsageReporter.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-UsageReporter.o -MD -MP -MF $(DEPDIR)/jura-UsageReporter.Tpo -c -o jura-UsageReporter.o `test -f 'UsageReporter.cpp' || echo '$(srcdir)/'`UsageReporter.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-UsageReporter.Tpo $(DEPDIR)/jura-UsageReporter.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UsageReporter.cpp' object='jura-UsageReporter.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-UsageReporter.o `test -f 'UsageReporter.cpp' || echo '$(srcdir)/'`UsageReporter.cpp jura-UsageReporter.obj: UsageReporter.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-UsageReporter.obj -MD -MP -MF $(DEPDIR)/jura-UsageReporter.Tpo -c -o jura-UsageReporter.obj `if test -f 'UsageReporter.cpp'; then $(CYGPATH_W) 'UsageReporter.cpp'; else $(CYGPATH_W) '$(srcdir)/UsageReporter.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-UsageReporter.Tpo $(DEPDIR)/jura-UsageReporter.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UsageReporter.cpp' object='jura-UsageReporter.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-UsageReporter.obj `if test -f 'UsageReporter.cpp'; then $(CYGPATH_W) 'UsageReporter.cpp'; else $(CYGPATH_W) '$(srcdir)/UsageReporter.cpp'; fi` jura-ReReporter.o: ReReporter.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-ReReporter.o -MD -MP -MF $(DEPDIR)/jura-ReReporter.Tpo -c -o jura-ReReporter.o `test -f 'ReReporter.cpp' || echo '$(srcdir)/'`ReReporter.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-ReReporter.Tpo $(DEPDIR)/jura-ReReporter.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ReReporter.cpp' object='jura-ReReporter.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-ReReporter.o `test -f 'ReReporter.cpp' || echo '$(srcdir)/'`ReReporter.cpp jura-ReReporter.obj: ReReporter.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-ReReporter.obj -MD -MP -MF $(DEPDIR)/jura-ReReporter.Tpo -c -o jura-ReReporter.obj `if test -f 'ReReporter.cpp'; then $(CYGPATH_W) 'ReReporter.cpp'; else $(CYGPATH_W) '$(srcdir)/ReReporter.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-ReReporter.Tpo $(DEPDIR)/jura-ReReporter.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ReReporter.cpp' object='jura-ReReporter.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-ReReporter.obj `if test -f 'ReReporter.cpp'; then $(CYGPATH_W) 'ReReporter.cpp'; else $(CYGPATH_W) '$(srcdir)/ReReporter.cpp'; fi` jura-JobLogFile.o: JobLogFile.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-JobLogFile.o -MD -MP -MF $(DEPDIR)/jura-JobLogFile.Tpo -c -o jura-JobLogFile.o `test -f 'JobLogFile.cpp' || echo '$(srcdir)/'`JobLogFile.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-JobLogFile.Tpo $(DEPDIR)/jura-JobLogFile.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobLogFile.cpp' object='jura-JobLogFile.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-JobLogFile.o `test -f 'JobLogFile.cpp' || echo '$(srcdir)/'`JobLogFile.cpp jura-JobLogFile.obj: JobLogFile.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-JobLogFile.obj -MD -MP -MF $(DEPDIR)/jura-JobLogFile.Tpo -c -o jura-JobLogFile.obj `if test -f 'JobLogFile.cpp'; then $(CYGPATH_W) 'JobLogFile.cpp'; else $(CYGPATH_W) '$(srcdir)/JobLogFile.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-JobLogFile.Tpo $(DEPDIR)/jura-JobLogFile.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobLogFile.cpp' object='jura-JobLogFile.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-JobLogFile.obj `if test -f 'JobLogFile.cpp'; then $(CYGPATH_W) 'JobLogFile.cpp'; else $(CYGPATH_W) '$(srcdir)/JobLogFile.cpp'; fi` jura-Destinations.o: Destinations.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-Destinations.o -MD -MP -MF $(DEPDIR)/jura-Destinations.Tpo -c -o jura-Destinations.o `test -f 'Destinations.cpp' || echo '$(srcdir)/'`Destinations.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-Destinations.Tpo $(DEPDIR)/jura-Destinations.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Destinations.cpp' object='jura-Destinations.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-Destinations.o `test -f 'Destinations.cpp' || echo '$(srcdir)/'`Destinations.cpp jura-Destinations.obj: Destinations.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-Destinations.obj -MD -MP -MF $(DEPDIR)/jura-Destinations.Tpo -c -o jura-Destinations.obj `if test -f 'Destinations.cpp'; then $(CYGPATH_W) 'Destinations.cpp'; else $(CYGPATH_W) '$(srcdir)/Destinations.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-Destinations.Tpo $(DEPDIR)/jura-Destinations.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Destinations.cpp' object='jura-Destinations.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-Destinations.obj `if test -f 'Destinations.cpp'; then $(CYGPATH_W) 'Destinations.cpp'; else $(CYGPATH_W) '$(srcdir)/Destinations.cpp'; fi` jura-Destination.o: Destination.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-Destination.o -MD -MP -MF $(DEPDIR)/jura-Destination.Tpo -c -o jura-Destination.o `test -f 'Destination.cpp' || echo '$(srcdir)/'`Destination.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-Destination.Tpo $(DEPDIR)/jura-Destination.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Destination.cpp' object='jura-Destination.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-Destination.o `test -f 'Destination.cpp' || echo '$(srcdir)/'`Destination.cpp jura-Destination.obj: Destination.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-Destination.obj -MD -MP -MF $(DEPDIR)/jura-Destination.Tpo -c -o jura-Destination.obj `if test -f 'Destination.cpp'; then $(CYGPATH_W) 'Destination.cpp'; else $(CYGPATH_W) '$(srcdir)/Destination.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-Destination.Tpo $(DEPDIR)/jura-Destination.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Destination.cpp' object='jura-Destination.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-Destination.obj `if test -f 'Destination.cpp'; then $(CYGPATH_W) 'Destination.cpp'; else $(CYGPATH_W) '$(srcdir)/Destination.cpp'; fi` jura-LutsDestination.o: LutsDestination.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-LutsDestination.o -MD -MP -MF $(DEPDIR)/jura-LutsDestination.Tpo -c -o jura-LutsDestination.o `test -f 'LutsDestination.cpp' || echo '$(srcdir)/'`LutsDestination.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-LutsDestination.Tpo $(DEPDIR)/jura-LutsDestination.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LutsDestination.cpp' object='jura-LutsDestination.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-LutsDestination.o `test -f 'LutsDestination.cpp' || echo '$(srcdir)/'`LutsDestination.cpp jura-LutsDestination.obj: LutsDestination.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-LutsDestination.obj -MD -MP -MF $(DEPDIR)/jura-LutsDestination.Tpo -c -o jura-LutsDestination.obj `if test -f 'LutsDestination.cpp'; then $(CYGPATH_W) 'LutsDestination.cpp'; else $(CYGPATH_W) '$(srcdir)/LutsDestination.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-LutsDestination.Tpo $(DEPDIR)/jura-LutsDestination.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LutsDestination.cpp' object='jura-LutsDestination.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-LutsDestination.obj `if test -f 'LutsDestination.cpp'; then $(CYGPATH_W) 'LutsDestination.cpp'; else $(CYGPATH_W) '$(srcdir)/LutsDestination.cpp'; fi` jura-ApelDestination.o: ApelDestination.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-ApelDestination.o -MD -MP -MF $(DEPDIR)/jura-ApelDestination.Tpo -c -o jura-ApelDestination.o `test -f 'ApelDestination.cpp' || echo '$(srcdir)/'`ApelDestination.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-ApelDestination.Tpo $(DEPDIR)/jura-ApelDestination.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ApelDestination.cpp' object='jura-ApelDestination.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-ApelDestination.o `test -f 'ApelDestination.cpp' || echo '$(srcdir)/'`ApelDestination.cpp jura-ApelDestination.obj: ApelDestination.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-ApelDestination.obj -MD -MP -MF $(DEPDIR)/jura-ApelDestination.Tpo -c -o jura-ApelDestination.obj `if test -f 'ApelDestination.cpp'; then $(CYGPATH_W) 'ApelDestination.cpp'; else $(CYGPATH_W) '$(srcdir)/ApelDestination.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-ApelDestination.Tpo $(DEPDIR)/jura-ApelDestination.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ApelDestination.cpp' object='jura-ApelDestination.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-ApelDestination.obj `if test -f 'ApelDestination.cpp'; then $(CYGPATH_W) 'ApelDestination.cpp'; else $(CYGPATH_W) '$(srcdir)/ApelDestination.cpp'; fi` jura-CARDestination.o: CARDestination.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-CARDestination.o -MD -MP -MF $(DEPDIR)/jura-CARDestination.Tpo -c -o jura-CARDestination.o `test -f 'CARDestination.cpp' || echo '$(srcdir)/'`CARDestination.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-CARDestination.Tpo $(DEPDIR)/jura-CARDestination.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CARDestination.cpp' object='jura-CARDestination.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-CARDestination.o `test -f 'CARDestination.cpp' || echo '$(srcdir)/'`CARDestination.cpp jura-CARDestination.obj: CARDestination.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-CARDestination.obj -MD -MP -MF $(DEPDIR)/jura-CARDestination.Tpo -c -o jura-CARDestination.obj `if test -f 'CARDestination.cpp'; then $(CYGPATH_W) 'CARDestination.cpp'; else $(CYGPATH_W) '$(srcdir)/CARDestination.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-CARDestination.Tpo $(DEPDIR)/jura-CARDestination.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CARDestination.cpp' object='jura-CARDestination.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-CARDestination.obj `if test -f 'CARDestination.cpp'; then $(CYGPATH_W) 'CARDestination.cpp'; else $(CYGPATH_W) '$(srcdir)/CARDestination.cpp'; fi` jura-CARAggregation.o: CARAggregation.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-CARAggregation.o -MD -MP -MF $(DEPDIR)/jura-CARAggregation.Tpo -c -o jura-CARAggregation.o `test -f 'CARAggregation.cpp' || echo '$(srcdir)/'`CARAggregation.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-CARAggregation.Tpo $(DEPDIR)/jura-CARAggregation.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CARAggregation.cpp' object='jura-CARAggregation.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-CARAggregation.o `test -f 'CARAggregation.cpp' || echo '$(srcdir)/'`CARAggregation.cpp jura-CARAggregation.obj: CARAggregation.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -MT jura-CARAggregation.obj -MD -MP -MF $(DEPDIR)/jura-CARAggregation.Tpo -c -o jura-CARAggregation.obj `if test -f 'CARAggregation.cpp'; then $(CYGPATH_W) 'CARAggregation.cpp'; else $(CYGPATH_W) '$(srcdir)/CARAggregation.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/jura-CARAggregation.Tpo $(DEPDIR)/jura-CARAggregation.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CARAggregation.cpp' object='jura-CARAggregation.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jura_CXXFLAGS) $(CXXFLAGS) -c -o jura-CARAggregation.obj `if test -f 'CARAggregation.cpp'; then $(CYGPATH_W) 'CARAggregation.cpp'; else $(CYGPATH_W) '$(srcdir)/CARAggregation.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(PROGRAMS) $(MANS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibexecPROGRAMS \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-man install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibexecPROGRAMS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man1 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-man uninstall-pkglibexecPROGRAMS uninstall-man: uninstall-man1 .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibexecPROGRAMS ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-pdf install-pdf-am \ install-pkglibexecPROGRAMS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-man uninstall-man1 \ uninstall-pkglibexecPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/CARDestination.cpp0000644000000000000000000000012412110655773025106 xustar000000000000000027 mtime=1361271803.874235 27 atime=1513200576.352724 30 ctime=1513200663.245787183 nordugrid-arc-5.4.2/src/services/a-rex/jura/CARDestination.cpp0000644000175000002070000000762112110655773025161 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "CARDestination.h" #include "jura.h" #include #include #include namespace Arc { CARDestination::CARDestination(JobLogFile& joblog): logger(Arc::Logger::rootLogger, "JURA.CARDestination"), urn(0), usagerecordset(Arc::NS("","http://eu-emi.eu/namespaces/2012/11/computerecord"), "UsageRecords") { //Get service URL, cert, key, CA path from job log file std::string serviceurl=joblog["loggerurl"].substr(4); std::string certfile=joblog["certificate_path"]; std::string keyfile=joblog["key_path"]; std::string cadir=joblog["ca_certificates_dir"]; output_dir=joblog["outputdir"]; // ...or get them from environment if (certfile.empty()) certfile=Arc::GetEnv("X509_USER_CERT"); if (keyfile.empty()) keyfile=Arc::GetEnv("X509_USER_KEY"); if (cadir.empty()) cadir=Arc::GetEnv("X509_CERT_DIR"); // ...or by default, use host cert, key, CA path if (certfile.empty()) certfile=JURA_DEFAULT_CERT_FILE; if (keyfile.empty()) keyfile=JURA_DEFAULT_KEY_FILE; if (cadir.empty()) cadir=JURA_DEFAULT_CA_DIR; cfg.AddCertificate(certfile); cfg.AddPrivateKey(keyfile); cfg.AddCADir(cadir); // Tokenize service URL std::string host, port, endpoint; if (serviceurl.empty()) { logger.msg(Arc::ERROR, "ServiceURL missing"); } else { Arc::URL url(serviceurl); if (url.Protocol()!="https") { logger.msg(Arc::ERROR, "Protocol is %s, should be https", url.Protocol()); } host=url.Host(); std::ostringstream os; os<>max_ur_set_size; } } void CARDestination::report(Arc::JobLogFile &joblog) { //if (joblog.exists()) { //Store copy of job log joblogs.push_back(joblog); //Create UR if can Arc::XMLNode usagerecord(Arc::NS(), ""); joblog.createCARUsageRecord(usagerecord); if (usagerecord) { usagerecordset.NewChild(usagerecord); ++urn; } else { logger.msg(Arc::INFO,"Ignoring incomplete log file \"%s\"", joblog.getFilename().c_str()); joblog.remove(); } } if (urn==max_ur_set_size) // Batch is full. Submit and delete job log files. submit_batch(); } void CARDestination::finish() { if (urn>0) // Send the remaining URs and delete job log files. submit_batch(); } int CARDestination::submit_batch() { std::string urstr; usagerecordset.GetDoc(urstr,false); logger.msg(Arc::INFO, "Logging UR set of %d URs.", urn); logger.msg(Arc::DEBUG, "UR set dump: %s", urstr.c_str()); return OutputFileGeneration("CAR", service_url, output_dir, urstr, logger); } Arc::MCC_Status CARDestination::send_request(const std::string &urset) { //TODO: Not implemented yet. return Arc::MCC_Status(Arc::STATUS_OK, "carclient", "CAR message sent."); } void CARDestination::clear() { urn=0; joblogs.clear(); usagerecordset.Replace( Arc::XMLNode(Arc::NS("", "http://eu-emi.eu/namespaces/2012/11/computerecord" ), "UsageRecords") ); } CARDestination::~CARDestination() { finish(); } } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/ssm0000644000000000000000000000013213214316027022307 xustar000000000000000030 mtime=1513200663.281787623 30 atime=1513200668.718854121 30 ctime=1513200663.281787623 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/0000755000175000002070000000000013214316027022432 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612101527505024422 xustar000000000000000026 mtime=1359392581.52781 30 atime=1513200602.781047675 30 ctime=1513200663.272787513 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/Makefile.am0000644000175000002070000000055012101527505024465 0ustar00mockbuildmock00000000000000ssmdir= $(pkgdatadir)/ssm pkglibexec_SCRIPTS = ssmsend ssm_DATA = ssm2.py crypto.py brokers.py sender.cfg __init__.py EXTRA_DIST = $(ssm_DATA) $(pkglibexec_SCRIPTS) README.ssm install-exec-hook: cd $(DESTDIR)$(pkglibexecdir) && \ cp ssmsend ssmsend.orig && \ sed "s,SSMDIR,\"$(ssmdir)\"," < ssmsend.orig > ssmsend && \ rm -f ssmsend.orig nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315732024433 xustar000000000000000030 mtime=1513200602.818048128 30 atime=1513200650.186627464 30 ctime=1513200663.273787525 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/Makefile.in0000644000175000002070000004740313214315732024511 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/jura/ssm DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(ssmdir)" SCRIPTS = $(pkglibexec_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(ssm_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ ssmdir = $(pkgdatadir)/ssm pkglibexec_SCRIPTS = ssmsend ssm_DATA = ssm2.py crypto.py brokers.py sender.cfg __init__.py EXTRA_DIST = $(ssm_DATA) $(pkglibexec_SCRIPTS) README.ssm all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/jura/ssm/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/jura/ssm/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibexecSCRIPTS: $(pkglibexec_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-ssmDATA: $(ssm_DATA) @$(NORMAL_INSTALL) test -z "$(ssmdir)" || $(MKDIR_P) "$(DESTDIR)$(ssmdir)" @list='$(ssm_DATA)'; test -n "$(ssmdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(ssmdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(ssmdir)" || exit $$?; \ done uninstall-ssmDATA: @$(NORMAL_UNINSTALL) @list='$(ssm_DATA)'; test -n "$(ssmdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(ssmdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(ssmdir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(ssmdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-ssmDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibexecSCRIPTS @$(NORMAL_INSTALL) $(MAKE) $(AM_MAKEFLAGS) install-exec-hook install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibexecSCRIPTS uninstall-ssmDATA .MAKE: install-am install-exec-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-exec-hook install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibexecSCRIPTS \ install-ps install-ps-am install-ssmDATA install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-pkglibexecSCRIPTS uninstall-ssmDATA install-exec-hook: cd $(DESTDIR)$(pkglibexecdir) && \ cp ssmsend ssmsend.orig && \ sed "s,SSMDIR,\"$(ssmdir)\"," < ssmsend.orig > ssmsend && \ rm -f ssmsend.orig # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/__init__.py0000644000000000000000000000012412457657517024522 xustar000000000000000027 mtime=1421827919.377719 27 atime=1513200576.387724 30 ctime=1513200663.279787599 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/__init__.py0000644000175000002070000000270112457657517024567 0ustar00mockbuildmock00000000000000''' Copyright (C) 2012 STFC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: Will Rogers ''' import logging import sys __version__ = (2, 1, 5) LOG_BREAK = '========================================' def set_up_logging(logfile, level, console): ''' Programmatically initialise logging system. ''' levels = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARN': logging.WARN, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL} fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' formatter = logging.Formatter(fmt) log = logging.getLogger() log.setLevel(levels[level]) if logfile is not None: fh = logging.FileHandler(logfile) fh.setFormatter(formatter) log.addHandler(fh) if console: ch = logging.StreamHandler(sys.stdout) ch.setFormatter(formatter) log.addHandler(ch) nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/ssm2.py0000644000000000000000000000012312457657517023646 xustar000000000000000027 mtime=1421827919.377719 27 atime=1513200576.389724 29 ctime=1513200663.27578755 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/ssm2.py0000644000175000002070000004263712457657517023730 0ustar00mockbuildmock00000000000000''' Copyright (C) 2012 STFC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: Will Rogers ''' # It's possible for SSM to be used without SSL, and the ssl module isn't in the # standard library until 2.6, so this makes it safe for earlier Python versions. try: import ssl except ImportError: # ImportError is raised later on if SSL is actually requested. ssl = None import crypto from dirq.QueueSimple import QueueSimple from dirq.queue import Queue import stomp # Exception changed name between stomppy versions try: from stomp.exception import ConnectFailedException except ImportError: from stomp.exception import ReconnectFailedException \ as ConnectFailedException import os import socket import time import logging # Set up logging log = logging.getLogger(__name__) class Ssm2Exception(Exception): ''' Exception for use by SSM2. ''' pass class Ssm2(stomp.ConnectionListener): ''' Minimal SSM implementation. ''' # Schema for the dirq message queue. QSCHEMA = {'body': 'string', 'signer':'string', 'empaid':'string?'} REJECT_SCHEMA = {'body': 'string', 'signer':'string?', 'empaid':'string?', 'error':'string'} CONNECTION_TIMEOUT = 10 def __init__(self, hosts_and_ports, qpath, cert, key, dest=None, listen=None, capath=None, check_crls=False, use_ssl=False, username=None, password=None, enc_cert=None, verify_enc_cert=True, pidfile=None): ''' Creates an SSM2 object. If a listen value is supplied, this SSM2 will be a receiver. ''' self._conn = None self._last_msg = None self._brokers = hosts_and_ports self._cert = cert self._key = key self._enc_cert = enc_cert self._capath = capath self._check_crls = check_crls self._user = username self._pwd = password self._use_ssl = use_ssl # use pwd auth if we're supplied both user and pwd self._use_pwd = username is not None and password is not None self.connected = False self._listen = listen self._dest = dest self._valid_dns = [] self._pidfile = pidfile # create the filesystem queues for accepted and rejected messages if dest is not None and listen is None: self._outq = QueueSimple(qpath) elif listen is not None: inqpath = os.path.join(qpath, 'incoming') rejectqpath = os.path.join(qpath, 'reject') self._inq = Queue(inqpath, schema=Ssm2.QSCHEMA) self._rejectq = Queue(rejectqpath, schema=Ssm2.REJECT_SCHEMA) else: raise Ssm2Exception('SSM must be either producer or consumer.') # check that the cert and key match if not crypto.check_cert_key(self._cert, self._key): raise Ssm2Exception('Cert and key don\'t match.') # check the server certificate provided if enc_cert is not None: log.info('Messages will be encrypted using %s', enc_cert) if not os.path.isfile(self._enc_cert): raise Ssm2Exception('Specified certificate file does not exist: %s.' % self._enc_cert) if verify_enc_cert: if not crypto.verify_cert_path(self._enc_cert, self._capath, self._check_crls): raise Ssm2Exception('Failed to verify server certificate %s against CA path %s.' % (self._enc_cert, self._capath)) def set_dns(self, dn_list): ''' Set the list of DNs which are allowed to sign incoming messages. ''' self._valid_dns = dn_list ########################################################################## # Methods called by stomppy ########################################################################## def on_send(self, headers, unused_body): ''' Called by stomppy when a message is sent. ''' log.debug('Sent message: ' + headers['empa-id']) def on_message(self, headers, body): ''' Called by stomppy when a message is received. Handle the message according to its content and headers. ''' try: empaid = headers['empa-id'] if empaid == 'ping': # ignore ping message log.info('Received ping message.') return except KeyError: empaid = 'noid' log.info('Received message: ' + empaid) raw_msg, signer = self._handle_msg(body) try: if raw_msg is None: # the message has been rejected log.warn('Message rejected.') if signer is None: # crypto failed err_msg = 'Could not extract message.' log.warn(err_msg) signer = 'Not available.' else: # crypto ok but signer not verified err_msg = 'Signer not in valid DNs list.' log.warn(err_msg) self._rejectq.add({'body': body, 'signer': signer, 'empaid': empaid, 'error': err_msg}) else: # message verified ok self._inq.add({'body': raw_msg, 'signer':signer, 'empaid': headers['empa-id']}) except OSError, e: log.error('Failed to read or write file: %s', e) def on_error(self, unused_headers, body): ''' Called by stomppy when an error frame is received. ''' log.warn('Error message received: %s', body) raise Ssm2Exception() def on_connected(self, unused_headers, unused_body): ''' Called by stomppy when a connection is established. Track the connection. ''' self.connected = True log.info('Connected.') def on_disconnected(self): ''' Called by stomppy when disconnected from the broker. ''' log.info('Disconnected from broker.') self.connected = False def on_receipt(self, headers, unused_body): ''' Called by stomppy when the broker acknowledges receipt of a message. ''' log.info('Broker received message: ' + headers['receipt-id']) self._last_msg = headers['receipt-id'] ########################################################################## # Message handling methods ########################################################################## def _handle_msg(self, text): ''' Deal with the raw message contents appropriately: - decrypt if necessary - verify signature Return plain-text message and signer's DN. ''' if text is None or text == '': return None, None # if not text.startswith('MIME-Version: 1.0'): # raise Ssm2Exception('Not a valid message.') # encrypted - this could be nicer if 'application/pkcs7-mime' in text or 'application/x-pkcs7-mime' in text: try: text = crypto.decrypt(text, self._cert, self._key) except crypto.CryptoException, e: log.error('Failed to decrypt message: %s', e) return None, None # always signed try: message, signer = crypto.verify(text, self._capath, self._check_crls) except crypto.CryptoException, e: log.error('Failed to verify message: %s', e) return None, None if signer not in self._valid_dns: log.error('Message signer not in the valid DNs list: %s', signer) return None, signer else: log.info('Valid signer: %s', signer) return message, signer def _send_msg(self, message, msgid): ''' Send one message using stomppy. The message will be signed using the host cert and key. If an encryption certificate has been supplied, the message will also be encrypted. ''' log.info('Sending message: ' + msgid) headers = {'destination': self._dest, 'receipt': msgid, 'empa-id': msgid} if message is not None: to_send = crypto.sign(message, self._cert, self._key) if self._enc_cert is not None: to_send = crypto.encrypt(to_send, self._enc_cert) else: to_send = '' self._conn.send(to_send, headers=headers) def send_ping(self): ''' If a STOMP connection is left open with no activity for an hour or so, it stops responding. Stomppy 3.1.3 has two ways of handling this, but stomppy 3.0.3 (EPEL 5 and 6) has neither. To get around this, we begin and then abort a STOMP transaction to keep the connection active. ''' # Use time as transaction id to ensure uniqueness within each connection transaction_id = str(time.time()) self._conn.begin({'transaction': transaction_id}) self._conn.abort({'transaction': transaction_id}) def has_msgs(self): ''' Return True if there are any messages in the outgoing queue. ''' return self._outq.count() > 0 def send_all(self): ''' Send all the messages in the outgoing queue. ''' log.info('Found %s messages.', self._outq.count()) for msgid in self._outq: if not self._outq.lock(msgid): log.warn('Message was locked. %s will not be sent.', msgid) continue text = self._outq.get(msgid) self._send_msg(text, msgid) log.info('Waiting for broker to accept message.') while self._last_msg is None: if not self.connected: raise Ssm2Exception('Lost connection.') time.sleep(0.1) self._last_msg = None self._outq.remove(msgid) log.info('Tidying message directory.') try: # Remove empty dirs and unlock msgs older than 5 min (default) self._outq.purge() except OSError, e: log.warn('OSError raised while purging message queue: %s', e) ############################################################################ # Connection handling methods ############################################################################ def _initialise_connection(self, host, port): ''' Create the self._connection object with the appropriate properties, but don't try to start the connection. ''' if self._use_ssl: if ssl is None: raise ImportError("SSL connection requested but the ssl module " "wasn't found.") log.info('Connecting using SSL...') try: # Compatible with stomp.py >= 3.0.4 self._conn = stomp.Connection([(host, port)], use_ssl=self._use_ssl, user=self._user, passcode=self._pwd, ssl_key_file=self._key, ssl_cert_file=self._cert, ssl_version=ssl.PROTOCOL_SSLv23) except TypeError: # For stomp.py <= 3.0.3, override ssl.PROTOCOL_SSLv3 and then # try to set up the connection again below. ssl.PROTOCOL_SSLv3 = ssl.PROTOCOL_SSLv23 if self._conn is None: # If _conn is None then either SSL wasn't requested or trying to # set ssl_version failed. self._conn = stomp.Connection([(host, port)], use_ssl=self._use_ssl, user=self._user, passcode=self._pwd, ssl_key_file=self._key, ssl_cert_file=self._cert) # You can set this in the constructor but only for stomppy version 3. # This works for stomppy 3 but doesn't break stomppy 2. self._conn.__reconnect_attempts_max = 1 self._conn.__timeout = Ssm2.CONNECTION_TIMEOUT self._conn.set_listener('SSM', self) def handle_connect(self): ''' Assuming that the SSM has retrieved the details of the broker or brokers it wants to connect to, connect to one. If more than one is in the list self._network_brokers, try to connect to each in turn until successful. ''' for host, port in self._brokers: self._initialise_connection(host, port) try: self.start_connection() break except ConnectFailedException, e: # ConnectFailedException doesn't provide a message. log.warn('Failed to connect to %s:%s.', host, port) except Ssm2Exception, e: log.warn('Failed to connect to %s:%s: %s', host, port, e) if not self.connected: raise Ssm2Exception('Attempts to start the SSM failed. The system will exit.') def handle_disconnect(self): ''' When disconnected, attempt to reconnect using the same method as used when starting up. ''' self.connected = False # Shut down properly self.close_connection() # Sometimes the SSM will reconnect to the broker before it's properly # shut down! This prevents that. time.sleep(2) # Try again according to the same logic as the initial startup try: self.handle_connect() except Ssm2Exception: self.connected = False # If reconnection fails, admit defeat. if not self.connected: err_msg = 'Reconnection attempts failed and have been abandoned.' raise Ssm2Exception(err_msg) def start_connection(self): ''' Once self._connection exists, attempt to start it and subscribe to the relevant topics. If the timeout is reached without receiving confirmation of connection, raise an exception. ''' if self._conn is None: raise Ssm2Exception('Called start_connection() before a \ connection object was initialised.') self._conn.start() self._conn.connect(wait = True) if self._dest is not None: log.info('Will send messages to: %s', self._dest) if self._listen is not None: self._conn.subscribe(destination=self._listen, ack='auto') log.info('Subscribing to: %s', self._listen) i = 0 while not self.connected: time.sleep(0.1) if i > Ssm2.CONNECTION_TIMEOUT * 10: err = 'Timed out while waiting for connection. ' err += 'Check the connection details.' raise Ssm2Exception(err) i += 1 def close_connection(self): ''' Close the connection. This is important because it runs in a separate thread, so it can outlive the main process if it is not ended. ''' try: self._conn.stop() # Same as diconnect() but waits for thread exit except (stomp.exception.NotConnectedException, socket.error): self._conn = None except AttributeError: # AttributeError if self._connection is None already pass log.info('SSM connection ended.') def startup(self): ''' Create the pidfile then start the connection. ''' if self._pidfile is not None: try: f = open(self._pidfile, 'w') f.write(str(os.getpid())) f.write('\n') f.close() except IOError, e: log.warn('Failed to create pidfile %s: %s', self._pidfile, e) self.handle_connect() def shutdown(self): ''' Close the connection then remove the pidfile. ''' self.close_connection() if self._pidfile is not None: try: if os.path.exists(self._pidfile): os.remove(self._pidfile) else: log.warn('pidfile %s not found.', self._pidfile) except IOError, e: log.warn('Failed to remove pidfile %s: %e', self._pidfile, e) log.warn('SSM may not start again until it is removed.') nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/brokers.py0000644000000000000000000000012412457657517024432 xustar000000000000000027 mtime=1421827919.377719 27 atime=1513200576.392724 30 ctime=1513200663.277787574 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/brokers.py0000644000175000002070000001250012457657517024475 0ustar00mockbuildmock00000000000000''' Copyright (C) 2012 STFC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: Will Rogers Class to interact with a BDII LDAP server to retrieve information about the stomp brokers specified in a network. ''' import ldap import logging log = logging.getLogger(__name__) # Constants used for specific LDAP queries STOMP_SERVICE = 'msg.broker.stomp' STOMP_SSL_SERVICE = 'msg.broker.stomp-ssl' STOMP_PREFIX = 'stomp' STOMP_SSL_PREFIX = 'stomp+ssl' class StompBrokerGetter(object): ''' Given the URL of a BDII, searches for all the STOMP brokers listed that are part of the specified network. ''' def __init__(self, bdii_url): ''' Set up the LDAP connection and strings which are re-used. ''' # Set up the LDAP connection log.debug('Connecting to %s...', bdii_url) self._ldap_conn = ldap.initialize(bdii_url) self._base_dn = 'o=grid' self._service_id_key = 'GlueServiceUniqueID' self._endpoint_key = 'GlueServiceEndpoint' self._service_data_value_key = 'GlueServiceDataValue' def get_broker_urls(self, service_type, network): ''' Gets the list of all the stomp brokers in the BDII, then checks them to see if they are part of the network. The network is supplied as a string. Returns a list of URLs. ''' prod_broker_urls = [] broker_details = self._get_broker_details(service_type) for broker_id, broker_url in broker_details: if self._broker_in_network(broker_id, network): prod_broker_urls.append(broker_url) return prod_broker_urls def get_broker_hosts_and_ports(self, service_type, network): ''' Gets the list of all the stomp brokers in the BDII, then checks them to see if they are part of the network. The network is supplied as a string. Returns a list of (host, port) tuples. ''' urls = self.get_broker_urls(service_type, network) hosts_and_ports = [] for url in urls: hosts_and_ports.append(parse_stomp_url(url)) return hosts_and_ports def _get_broker_details(self, service_type): ''' Searches the BDII for all STOMP message brokers. Returns a list of tuples: (, ). ''' broker_details = [] ldap_filter = '(&(objectClass=GlueService)(GlueServiceType=%s))' % service_type attrs = [self._service_id_key, self._endpoint_key] brokers = self._ldap_conn.search_s(self._base_dn, ldap.SCOPE_SUBTREE, ldap_filter, attrs) for unused_dn, attrs in brokers: details = attrs[self._service_id_key][0], attrs[self._endpoint_key][0] broker_details.append(details) return broker_details def _broker_in_network(self, broker_id, network): ''' Given a GlueServiceUniqueID for a message broker, check that it is part of the specified network. ''' ldap_filter = '(&(GlueServiceDataKey=cluster)(GlueChunkKey=GlueServiceUniqueID=%s))' \ % broker_id attrs = [self._service_data_value_key] results = self._ldap_conn.search_s(self._base_dn, ldap.SCOPE_SUBTREE, ldap_filter, attrs) try: unused_dn, attrs2 = results[0] return network in attrs2[self._service_data_value_key] except IndexError: # no results from the query return False def parse_stomp_url(stomp_url): ''' Given a URL of the form stomp://stomp.cern.ch:6262/, return a tuple containing (stomp.cern.ch, 6262). ''' parts = stomp_url.split(':') protocols = [STOMP_PREFIX, STOMP_SSL_PREFIX] if not parts[0].lower() in protocols: raise ValueError("URL %s does not begin 'stomp:'." % stomp_url) host = parts[1].strip('/') port = parts[2].strip('/') if not port.isdigit(): raise ValueError('URL %s does not have an integer as its third part.') return host, int(port) if __name__ == '__main__': # BDII URL BDII = 'ldap://lcg-bdii.cern.ch:2170' BG = StompBrokerGetter(BDII) def print_brokers(text, service, network): brokers = BG.get_broker_hosts_and_ports(service, network) # Print section heading print '==', text, '==' # Print brokers in form 'host:port' for broker in brokers: print '%s:%i' % (broker[0], broker[1]) # Leave space between sections print print_brokers('SSL production brokers', STOMP_SSL_SERVICE, 'PROD') print_brokers('Production brokers', STOMP_SERVICE, 'PROD') print_brokers('SSL test brokers', STOMP_SSL_SERVICE, 'TEST-NWOB') print_brokers('Test brokers', STOMP_SERVICE, 'TEST-NWOB') nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/README.ssm0000644000000000000000000000012412457657517024072 xustar000000000000000027 mtime=1421827919.377719 27 atime=1513200576.391724 30 ctime=1513200663.281787623 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/README.ssm0000644000175000002070000000341712457657517024144 0ustar00mockbuildmock00000000000000 This modified Python codes are part of the SSM that developed by APEL. Changelog: *Mon Jan 12 2015 Gabor Szigeti -update to ssm2 2.1.5 *Fri Aug 8 2014 Gabor Szigeti -update to ssm2 2.1.3 *Fri Jan 3 2014 Gabor Szigeti -update to ssm2 2.1.1 *Thu May 2 2013 Gabor Szigeti -update to ssm2 2.1.0 *Mon Jan 29 2013 Gabor Szigeti -update to ssm2 2.0.0 *Mon Jan 28 2013 Gabor Szigeti -update to ssm2 0.0.2 *Wed Sep 19 2012 Gabor Szigeti -update to ssm 1.2-2 *Fri Mar 23 2012 Gabor Szigeti -update to ssm 0.8 -fixed temporary location of the log and the messages -better packaging structrure *Thu Mar 6 2012 Gabor Szigeti -add license information *Mon Dec 19 2012 Gabor Szigeti -SSM 0.7 integration What kind of modification happend: -used sources and logs: ssm_master.py SecureStompMessenger.py EncryptUtils.py message_db.py ssm.cfg ssm.log.cfg -SecureStompMessenger.py: *commented two lines (don't send ping message, never reset the certificate) *add stop method for a disconnection from the server -ssm_master.py: *extend the python pathes with one new path *the following attributes are comming as a command line argument: *send every message ones Installation requirement: *python 2.4.3 or later *stomppy 2.0.2 or later (recommended 3.0.3) *python-dirq *python-ldap (optional) Package names on different platforms: *Scientific Linux, Fedora: python, stomppy, python-ldap, python-dirq *Debian 6: python, python-stompy, python-ldap nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/crypto.py0000644000000000000000000000012413153455235024266 xustar000000000000000027 mtime=1504598685.225093 27 atime=1513200576.387724 30 ctime=1513200663.276787562 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/crypto.py0000644000175000002070000002302013153455235024330 0ustar00mockbuildmock00000000000000''' Copyright (C) 2012 STFC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: Kevin Haines, Will Rogers The crypto module calls openssl command line directly, using subprocess. We investigated python's crypto libraries (all openssl bindings) and found that none were mature enough to implement the SMIME crypto we had decided on. ''' from subprocess import Popen, PIPE import quopri import base64 import logging # logging configuration log = logging.getLogger(__name__) # Valid ciphers CIPHERS = ['aes128', 'aes192', 'aes256'] class CryptoException(Exception): ''' Exception for use by the crypto module. ''' pass def _from_file(filename): ''' Convenience function to read entire file into string. ''' f = open(filename, 'r') s = f.read() f.close() return s def check_cert_key(certpath, keypath): ''' Check that a certificate and a key match, using openssl directly to fetch the modulus of each, which must be the same. ''' try: cert = _from_file(certpath) key = _from_file(keypath) except IOError, e: log.error('Could not find cert or key file: %s', e) return False # Two things the same have the same modulus. if cert == key: return False p1 = Popen(['openssl', 'x509', '-noout', '-modulus'], stdin=PIPE, stdout=PIPE, stderr=PIPE) modulus1, error = p1.communicate(cert) if error != '': log.error(error) return False p2 = Popen(['openssl', 'rsa', '-noout', '-modulus'], stdin=PIPE, stdout=PIPE, stderr=PIPE) modulus2, error = p2.communicate(key) if error != '': log.error(error) return False return modulus1.strip() == modulus2.strip() def sign(text, certpath, keypath): ''' Sign the specified message using the certificate and key in the files specified. Returns the signed message as an SMIME string, suitable for transmission. ''' try: p1 = Popen(['openssl', 'smime', '-sign', '-inkey', keypath, '-signer', certpath, '-text'], stdin=PIPE, stdout=PIPE, stderr=PIPE) signed_msg, error = p1.communicate(text) if (error != ''): log.error(error) return signed_msg except OSError, e: log.error('Failed to sign message: %s', e) raise CryptoException('Message signing failed. Check cert and key permissions.') def encrypt(text, certpath, cipher='aes128'): ''' Encrypt the specified message using the certificate string. Returns the encrypted SMIME text suitable for transmission ''' if cipher not in CIPHERS: raise CryptoException('Invalid cipher %s.' % cipher) cipher = '-' + cipher # encrypt p1 = Popen(['openssl', 'smime', '-encrypt', cipher, certpath], stdin=PIPE, stdout=PIPE, stderr=PIPE) enc_txt, error = p1.communicate(text) if (error != ''): log.error(error) return enc_txt def verify(signed_text, capath, check_crl): ''' Verify the signed message has been signed by the certificate (attached to the supplied SMIME message) it claims to have, by one of the accepted CAs in capath. Returns a tuple including the signer's certificate and the plain-text of the message if it has been verified. If the content transfer encoding is specified as 'quoted-printable' or 'base64', decode the message body accordingly. ''' if signed_text is None or capath is None: raise CryptoException('Invalid None argument to verify().') # This ensures that openssl knows that the string is finished. # It makes no difference if the signed message is correct, but # prevents it from hanging in the case of an empty string. signed_text += '\n\n' signer = get_signer_cert(signed_text) if not verify_cert(signer, capath, check_crl): raise CryptoException('Unverified signer') # The -noverify flag removes the certificate verification. The certificate # is verified above; this check would also check that the certificate # is allowed to sign with SMIME, which host certificates sometimes aren't. p1 = Popen(['openssl', 'smime', '-verify', '-CApath', capath, '-noverify'], stdin=PIPE, stdout=PIPE, stderr=PIPE) message, error = p1.communicate(signed_text) # SMIME header and message body are separated by a blank line lines = message.strip().splitlines() blankline = lines.index('') headers = '\n'.join(lines[:blankline]) body = '\n'.join(lines[blankline + 1:]) # two possible encodings if 'quoted-printable' in headers: body = quopri.decodestring(body) elif 'base64' in headers: body = base64.decodestring(body) # otherwise, plain text # Interesting problem here - we get a message 'Verification successful' # to standard error. We don't want to log this as an error each time, # but we do want to see if there's a genuine error... log.info(str(error).strip()) subj = get_certificate_subject(signer) return body, subj def decrypt(encrypted_text, certpath, keypath): ''' Decrypt the specified message using the certificate and key contained in the named PEM files. The capath should point to a directory holding all the CAs that we accept This decryption function can be used whether or not OpenSSL is used to encrypt the data ''' # This ensures that openssl knows that the string is finished. # It makes no difference if the signed message is correct, but # prevents it from hanging in the case of an empty string. encrypted_text += '\n\n' log.info('Decrypting message.') p1 = Popen(['openssl', 'smime', '-decrypt', '-recip', certpath, '-inkey', keypath], stdin=PIPE, stdout=PIPE, stderr=PIPE) enc_txt, error = p1.communicate(encrypted_text) if (error != ''): log.error(error) return enc_txt def verify_cert(certstring, capath, check_crls=True): ''' Verify that the certificate is signed by a CA whose certificate is stored in capath. Note that I've had to compare strings in the output of openssl to check for verification, which may make this brittle. Returns True if the certificate is verified ''' if certstring is None or capath is None: raise CryptoException('Invalid None argument to verify_cert().') args = ['openssl', 'verify', '-CApath', capath] if check_crls: args.append('-crl_check_all') p1 = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) message, error = p1.communicate(certstring) # I think this is unlikely ever to happen if (error != ''): log.error(error) # There was a tricky problem here. # 'openssl verify' returns 0 whatever happens, so we can't # use the return code to determine whether the verification was # successful. # If it is successful, openssl prints 'OK' # If it fails, openssl prints 'error' # So: log.info('Certificate verification: ' + str(message).strip()) return ('OK' in message and 'error' not in message) def verify_cert_path(certpath, capath, check_crls=True): ''' Verify certificate, but using the certificate filepath rather than the certificate string as in verify_cert. ''' certstring = _from_file(certpath) return verify_cert(certstring, capath, check_crls) def get_certificate_subject(certstring): ''' Return the certificate subject's DN, in legacy openssl format. ''' p1 = Popen(['openssl', 'x509', '-noout', '-subject', '-nameopt', 'oneline'], stdin=PIPE, stdout=PIPE, stderr=PIPE) subject, error = p1.communicate(certstring) if (error != ''): log.error(error) raise CryptoException('Failed to get subject: %s' % error) subject = subject.strip()[8:] # remove 'subject=' from the front subject = subject.lstrip() # even if there is space after subject= subject = subject.split(', ') for idx in range(len(subject)): el = subject[idx].split(' = ') el = '='.join(el) subject.pop(idx) subject.insert(idx,el) subject = '/' + '/'.join(subject) return subject def get_signer_cert(signed_text): ''' Read the signer's certificate from the signed specified message, and return the certificate string. ''' # This ensures that openssl knows that the string is finished. # It makes no difference if the signed message is correct, but # prevents it from hanging in the case of an empty string. signed_text += '\n\n' p1 = Popen(['openssl', 'smime', '-pk7out'], stdin=PIPE, stdout=PIPE, stderr=PIPE) p2 = Popen(['openssl', 'pkcs7', '-print_certs'], stdin=p1.stdout, stdout=PIPE, stderr=PIPE) p1.stdin.write(signed_text) certstring, error = p2.communicate() if (error != ''): log.error(error) return certstring nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/sender.cfg0000644000000000000000000000012312140445755024335 xustar000000000000000026 mtime=1367493613.61892 27 atime=1513200576.393724 30 ctime=1513200663.278787587 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/sender.cfg0000644000175000002070000000262512140445755024410 0ustar00mockbuildmock00000000000000################################################################################ # Required: broker configuration options # [broker] # The SSM will query a BDII to find brokers available. These details are for the # EGI production broker network #bdii: ldap://lcg-bdii.cern.ch:2170 #network: PROD # OR (these details will only be used if the broker network settings aren't used) #host: test-msg02.afroditi.hellasgrid.gr #port: 6163 # broker authentication. If use_ssl is set, the certificates configured # in the mandatory [certificates] section will be used. use_ssl: false ################################################################################ # Required: Certificate configuration [certificates] certificate: /etc/grid-security/hostcert.pem key: /etc/grid-security/hostkey.pem capath: /etc/grid-security/certificates # If this is supplied, outgoing messages will be encrypted # using this certificate #server: /etc/grid-security/APELservercert.pem ################################################################################ # Messaging configuration. # [messaging] # Queue to which SSM will send messages destination: /queue/global.accounting.cputest.CENTRAL # Outgoing messages will be read and removed from this directory. path: /var/spool/apel/outgoing [logging] logfile: /var/spool/arc/ssm/ssmsend.log # Available logging levels: # DEBUG, INFO, WARN, ERROR, CRITICAL level: INFO console: true nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/PaxHeaders.7502/ssmsend0000644000000000000000000000012412457657517024010 xustar000000000000000027 mtime=1421827919.377719 27 atime=1513200576.391724 30 ctime=1513200663.280787611 nordugrid-arc-5.4.2/src/services/a-rex/jura/ssm/ssmsend0000755000175000002070000001353512457657517024067 0ustar00mockbuildmock00000000000000#!/usr/bin/env python # Copyright (C) 2012 STFC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Script to run a sending SSM. @author: Will Rogers ''' import sys # Prepend module path sys.path.insert(0,SSMDIR) from __init__ import __version__, set_up_logging, LOG_BREAK from ssm2 import Ssm2, Ssm2Exception from crypto import CryptoException from brokers import StompBrokerGetter, STOMP_SERVICE, STOMP_SSL_SERVICE import logging.config import ldap import os from optparse import OptionParser import ConfigParser def main(_host, _port, _topic, _key, _cert, _cadir, _message_path, _use_ssl): ''' Set up connection, send all messages and quit. ''' ver = "SSM %s.%s.%s" % __version__ op = OptionParser(description=__doc__, version=ver) op.add_option('-c', '--config', help='location of config file', default="%s/sender.cfg" % SSMDIR) op.add_option('-l', '--log_config', help='location of logging config file (optional)', default='/etc/apel/logging.cfg') (options, unused_args) = op.parse_args() cp = ConfigParser.ConfigParser() cp.read(options.config) # set up logging try: if os.path.exists(options.log_config): logging.config.fileConfig(options.log_config) else: set_up_logging(cp.get('logging', 'logfile'), cp.get('logging', 'level'), cp.getboolean('logging', 'console')) except (ConfigParser.Error, ValueError, IOError), err: print 'Error configuring logging: %s' % str(err) print 'The system will exit.' sys.exit(1) log = logging.getLogger('ssmsend') log.info(LOG_BREAK) log.info('Starting sending SSM version %s.%s.%s.', *__version__) # If we can't get a broker to connect to, we have to give up. try: bdii_url = cp.get('broker','bdii') log.info('Retrieving broker details from %s ...', bdii_url) bg = StompBrokerGetter(bdii_url) use_ssl = _use_ssl if use_ssl: service = STOMP_SSL_SERVICE else: service = STOMP_SERVICE brokers = bg.get_broker_hosts_and_ports(service, cp.get('broker','network')) log.info('Found %s brokers.', len(brokers)) except ConfigParser.NoOptionError, e: try: host = _host port = _port brokers = [(host, int(port))] except ConfigParser.NoOptionError: log.error('Options incorrectly supplied for either single broker or \ broker network. Please check configuration') log.error('System will exit.') log.info(LOG_BREAK) print 'SSM failed to start. See log file for details.' sys.exit(1) except ldap.LDAPError, e: log.error('Could not connect to LDAP server: %s', e) log.error('System will exit.') log.info(LOG_BREAK) print 'SSM failed to start. See log file for details.' sys.exit(1) if len(brokers) == 0: log.error('No brokers available.') log.error('System will exit.') log.info(LOG_BREAK) sys.exit(1) try: server_cert = None verify_server_cert = True try: server_cert = cp.get('certificates','server_cert') try: verify_server_cert = cp.getboolean('certificates', 'verify_server_cert') except ConfigParser.NoOptionError: pass except ConfigParser.NoOptionError: log.info('No server certificate supplied. Will not encrypt messages.') try: destination = cp.get('messaging', 'destination') if destination == '': raise Ssm2Exception('No destination queue is configured.') except ConfigParser.NoOptionError, e: raise Ssm2Exception(e) sender = Ssm2(brokers, _message_path, cert=_cert, key=_key, dest=_topic, use_ssl=_use_ssl, capath=_cadir, enc_cert=server_cert, verify_enc_cert=verify_server_cert) if sender.has_msgs(): sender.handle_connect() sender.send_all() log.info('SSM run has finished.') else: log.info('No messages found to send.') except (Ssm2Exception, CryptoException), e: print 'SSM failed to complete successfully. See log file for details.' log.error('SSM failed to complete successfully: %s', e) except Exception, e: print 'SSM failed to complete successfully. See log file for details.' log.error('Unexpected exception in SSM: %s', e) log.error('Exception type: %s', e.__class__) try: sender.close_connection() except UnboundLocalError: # SSM not set up. pass log.info('SSM has shut down.') log.info(LOG_BREAK) if __name__ == '__main__': if (len(sys.argv) != 9): print "Usage: python ssmsend " sys.exit(1) main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8]) sys.exit(0) nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/Destinations.cpp0000644000000000000000000000012411741502232024731 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200576.351724 30 ctime=1513200663.237787085 nordugrid-arc-5.4.2/src/services/a-rex/jura/Destinations.cpp0000644000175000002070000000174311741502232025003 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "Destinations.h" namespace Arc { void Destinations::report(Arc::JobLogFile &joblog) { std::string dest_id=joblog["loggerurl"]; //TODO same service URL with different reporting parameters? if (find(dest_id)==end()) //New destination { // Create the appropriate adapter Destination *dest = Destination::createDestination(joblog); if (dest) (*this)[dest_id] = dest; else { Arc::Logger logger(Arc::Logger::rootLogger, "JURA.Destinations"); logger.msg(Arc::ERROR, "Unable to create adapter for the specific reporting destination type"); return; } } (*this)[dest_id]->report(joblog); } Destinations::~Destinations() { for (Destinations::iterator it=begin(); it!=end(); ++it) { delete (*it).second; } } } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/CARDestination.h0000644000000000000000000000012412042216423024540 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200576.352724 30 ctime=1513200663.246787195 nordugrid-arc-5.4.2/src/services/a-rex/jura/CARDestination.h0000644000175000002070000000261212042216423024606 0ustar00mockbuildmock00000000000000#ifndef CARDESTINATION_H #define CARDESTINATION_H #include "Destination.h" #include "JobLogFile.h" #include #include #include #include #include #include #include #include #include namespace Arc { /** Reporting destination adapter for EMI. */ class CARDestination:public Destination { private: Arc::Logger logger; Arc::MCCConfig cfg; Arc::URL service_url; std::string output_dir; /** Max number of URs to put in a set before submitting it */ int max_ur_set_size; /** Actual number of usage records in set */ int urn; /** List of copies of job logs */ std::list joblogs; /** Usage Record set XML */ Arc::XMLNode usagerecordset; int submit_batch(); Arc::MCC_Status send_request(const std::string &urset); void clear(); public: /** Constructor. Service URL and CAR-related parameters (e.g. UR * batch size) are extracted from the given job log file. */ CARDestination(JobLogFile& joblog); /** Generates record from job log file content, collects it into the * UR batch, and if batch is full, submits it to the service. */ void report(JobLogFile& joblog); void finish(); ~CARDestination(); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/CARAggregation.h0000644000000000000000000000012412331670145024514 xustar000000000000000027 mtime=1399287909.710558 27 atime=1513200576.325724 30 ctime=1513200663.249787232 nordugrid-arc-5.4.2/src/services/a-rex/jura/CARAggregation.h0000644000175000002070000000473112331670145024566 0ustar00mockbuildmock00000000000000#ifndef CARAGGREGATION_H #define CARAGGREGATION_H #include #include #include #include namespace Arc { /** Aggregation record collecting and reporting for APEL. */ class CARAggregation { private: Arc::Logger logger; Arc::MCCConfig cfg; std::string host; std::string port; std::string topic; /** Require to set to true this option by production message broker */ std::string use_ssl; /** File name extension */ int sequence; /** location of Aggregation Records */ std::string aggr_record_location; bool aggr_record_update_need; bool synch_message; /** Aggregation Record set XML */ Arc::XMLNode aggregationrecordset; Arc::NS ns; Arc::NS ns_query; void init(std::string _host, std::string _port, std::string _topic); /** Send records to the accounting server. */ Arc::MCC_Status send_records(const std::string &urset); /** Update all records sending dates */ void UpdateLastSendingDate(); /** Update records sending dates that contains in the list */ void UpdateLastSendingDate(Arc::XMLNodeList& records); /** Remove all local information from the XMLNode */ void NodeCleaning(Arc::XMLNode node); std::string Current_Time( time_t parameter_time = time(NULL) ); /** APEL Synch record generation from the CAR aggregation record */ std::string SynchMessage(Arc::XMLNode records); /** Update the "name" Child of the "node" with the time value. * It will create only 10 element with same name and update always * the latest one.*/ void UpdateElement(Arc::XMLNode& node, std::string name, std::string time); public: /** * Constructor for information collection. */ CARAggregation(std::string _host); /** * Constructor for record reporting. */ CARAggregation(std::string _host, std::string _port, std::string _topic, bool synch); ~CARAggregation(); /** Generated record from CAR record, collects it into the * CAR aggregation. */ void UpdateAggregationRecord(Arc::XMLNode& ur); /** Save the records to the disc. */ int save_records(); /** Reporting a required record to the accounting server. */ bool Reporting_records(std::string year, std::string month=""); /** Reporting all records to the accounting server. */ bool Reporting_records(bool force_resend=false); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/Destination.cpp0000644000000000000000000000012412536031137024552 xustar000000000000000027 mtime=1433940575.569596 27 atime=1513200576.393724 30 ctime=1513200663.240787122 nordugrid-arc-5.4.2/src/services/a-rex/jura/Destination.cpp0000644000175000002070000001054212536031137024621 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "Destination.h" #include "LutsDestination.h" #include "ApelDestination.h" #include "CARDestination.h" namespace Arc { Destination* Destination::createDestination(Arc::JobLogFile &joblog) { std::string url=joblog["loggerurl"]; if (url.substr(0,3) == "CAR") { return new CARDestination(joblog); } //TODO distinguish if ( !joblog["topic"].empty() || url.substr(0,4) == "APEL"){ return new ApelDestination(joblog); }else{ return new LutsDestination(joblog); } } // Current time calculation and convert to the UTC time format. std::string Destination::Current_Time( time_t parameter_time ){ time_t rawtime; if ( parameter_time == time(NULL) ){ time ( &rawtime ); //current time } else { rawtime = parameter_time; } tm * ptm; ptm = gmtime ( &rawtime ); std::string mon_prefix = (ptm->tm_mon+1 < 10)?"0":""; std::string day_prefix = (ptm->tm_mday < 10)?"0":""; std::string hour_prefix = (ptm->tm_hour < 10)?"0":""; std::string min_prefix = (ptm->tm_min < 10)?"0":""; std::string sec_prefix = (ptm->tm_sec < 10)?"0":""; std::stringstream out; if ( parameter_time == time(NULL) ){ out << ptm->tm_year+1900<<"-"<tm_mon+1<<"-"<tm_mday<<"T"<tm_hour<<":"<tm_min<<":"<tm_sec<<"+0000"; } else { out << ptm->tm_year+1900<tm_mon+1<tm_mday<<"."<tm_hour<tm_min<tm_sec; } return out.str(); } Arc::MCC_Status Destination::OutputFileGeneration(std::string prefix, Arc::URL url, std::string output_dir, std::string message, Arc::Logger& logger){ //Filename generation int sequence = 0; std::string output_filename = prefix+"_records_" +url.Host() + "_" + Current_Time(); logger.msg(Arc::DEBUG, "UR set dump: %s", output_dir); if (!output_dir.empty()) { // local copy creation std::string output_path; output_path = output_dir; if (output_dir[output_dir.size()-1] != '/'){ output_path = output_dir + "/"; } output_path += output_filename; std::ifstream ifile(output_path.c_str()); if (ifile) { // The file exists, and create new filename sequence++; std::stringstream ss; ss << sequence; output_path += ss.str(); output_filename += ss.str(); } else { sequence=0; } ifile.close(); //Save all records into the output file. const char* filename(output_path.c_str()); std::ofstream outputfile; outputfile.open (filename); if (outputfile.is_open()) { outputfile << message; outputfile.close(); logger.msg(Arc::DEBUG, "Backup file (%s) created.", output_filename); } else { return Arc::MCC_Status(Arc::PARSING_ERROR, prefix + "client", std::string( "Error opening file: " )+ filename ); } } return Arc::MCC_Status(STATUS_OK); } void Destination::log_sent_ids(Arc::XMLNode usagerecordset, int nr_of_records, Arc::Logger &logger, std::string type) { Arc::NS ns_query; std::string query = ""; if ( type == "" ) { ns_query["urf"] = "http://schema.ogf.org/urf/2003/09/urf"; query = "//JobUsageRecord/RecordIdentity"; } else if ( type == "APEL" ) { ns_query["urf"] = "http://eu-emi.eu/namespaces/2012/11/computerecord"; query = "//UsageRecord/RecordIdentity"; } Arc::XMLNodeList list = usagerecordset.XPathLookup(query,ns_query); logger.msg(Arc::DEBUG, "Sent jobIDs: (nr. of job(s) %d)", nr_of_records); for (std::list::iterator it = list.begin(); it != list.end(); it++) { std::string id = (*it).Attribute("urf:recordId"); //std::size_t found = id.find_last_of("-"); logger.msg(Arc::DEBUG, id); } } } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/JobLogFile.cpp0000644000000000000000000000012412623101644024243 xustar000000000000000027 mtime=1447855012.232902 27 atime=1513200576.362724 30 ctime=1513200663.235787061 nordugrid-arc-5.4.2/src/services/a-rex/jura/JobLogFile.cpp0000644000175000002070000012571212623101644024320 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "JobLogFile.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Needed to redefine mkdir on mingw #ifdef WIN32 #include #endif namespace Arc { std::string replaceChar(std::string str, char ch1, char ch2) { for (int i = 0; i < (int)str.length(); ++i) { if (str[i] == ch1) str[i] = ch2; } return str; } std::vector ParseVOAttr(std::string cert_str) { cert_str=replaceChar(cert_str,'\\', '\n'); Arc::Credential holder(cert_str, "", "", "", "", false); std::string ca_dir = Arc::GetEnv("X509_CERT_DIR"); std::string voms_dir = Arc::GetEnv("X509_VOMS_DIR"); Arc::VOMSTrustList voms_trust_dn; voms_trust_dn.AddRegex(".*"); std::vector voms_attributes; parseVOMSAC(holder, ca_dir, "", voms_dir, voms_trust_dn, voms_attributes, true, true); //parseVOMSAC(cert_str, ca_dir, "", voms_dir, voms_trust_dn, voms_attributes, true, true); return voms_attributes; } static std::string vo_filters_previous=""; static std::multimap log_vo_map; bool VOFilterCheck(std::string vo_filters, std::string voname, std::string loggerurl) { bool retval=true; std::multimap local_vo_map; if (vo_filters == "") { // Accept all messages return true; } if (vo_filters == vo_filters_previous) { // tokenized it before local_vo_map=log_vo_map; } else { //tokenize vo_filters string to map char * pairs; char delimiter = ','; pairs = strtok (strdup(vo_filters.c_str()), &delimiter); while (pairs != NULL) { std::string pairStr = (std::string)pairs; std::size_t found = pairStr.find_first_of(" "); // left trim std::size_t firstChar = pairStr.find_first_not_of(' '); if ( firstChar > found ) { pairStr.erase (0,firstChar); } // right trim pairStr.erase(pairStr.find_last_not_of(" \n\r\t")+1); found = pairStr.find_first_of(" "); std::string vo = pairStr.substr(0,found); std::string url = pairStr.substr(found+1); Arc::Logger::rootLogger.msg(Arc::DEBUG, "Insert filter element: <%s,%s>",url, vo); local_vo_map.insert ( std::pair(url,vo) ); pairs = strtok (NULL, &delimiter); } vo_filters_previous = vo_filters; log_vo_map=local_vo_map; } if ( local_vo_map.count(loggerurl) == 0 ) { // No filter set for this logger URL // Accept the record Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Not set filter for this URL (%s).", loggerurl); } else { // Contains VO filter option for this logger URL bool acceptVO=false; std::map::iterator it; Arc::Logger::rootLogger.msg(Arc::DEBUG, "Current job's VO name: %s", voname); for (it=local_vo_map.equal_range(loggerurl).first; it!=local_vo_map.equal_range(loggerurl).second; ++it) { Arc::Logger::rootLogger.msg(Arc::DEBUG, "VO filter for host: %s", it->second); if ( it->second == voname ) { acceptVO=true; break; } } retval = acceptVO; } return retval; } int JobLogFile::parse(const std::string& _filename) { int count=0; //number of parsed values clear(); filename=_filename; if (!exists()) return -1; std::ifstream logfile(filename.c_str(),std::ios::in); std::string line; while (logfile.good()) { std::getline(logfile,line); size_type e=line.find('='); if (e!=std::string::npos) { count++; std::string key=line.substr(0, e), value=line.substr(e+1, std::string::npos); (*this)[key]=value; if ( key == "inputfile") { inputfiles.push_back(value); } if ( key == "outputfile") { outputfiles.push_back(value); } } } logfile.close(); //Parse jobreport_options string! std::string jobreport_opts=(*this)["accounting_options"]; std::string option; size_type pcomma=jobreport_opts.find(','); size_type pcolon; while (pcomma!=std::string::npos) { option=jobreport_opts.substr(0,pcomma); // separate opt_name:value pair pcolon=option.find(':'); if (pcolon!=std::string::npos) { std::string key=option.substr(0, pcolon), value=option.substr(pcolon+1, std::string::npos); (*this)[std::string("jobreport_option_")+key]=value; } //next: jobreport_opts=jobreport_opts.substr(pcomma+1, std::string::npos); pcomma=jobreport_opts.find(','); } option=jobreport_opts; pcolon=option.find(':'); if (pcolon!=std::string::npos) { std::string key=option.substr(0, pcolon), value=option.substr(pcolon+1, std::string::npos); (*this)[std::string("jobreport_option_")+key]=value; } return count; } void JobLogFile::createUsageRecord(Arc::XMLNode &usagerecord, const char *recordid_prefix) { //*** //If archiving is enabled: first try to load archived UR std::string archive_fn=getArchivingPath(); if (!archive_fn.empty()) { errno=0; if (usagerecord.ReadFromFile(archive_fn)) { Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Read archive file %s", archive_fn.c_str()); return; } Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Could not read archive file %s for job log file %s (%s), generating new Usage Record", archive_fn.c_str(), filename.c_str(), StrError(errno)); } //Otherwise go on and create new UR //*** Arc::NS ns_ur; //Namespaces defined by OGF ns_ur[""]="http://schema.ogf.org/urf/2003/09/urf"; ns_ur["urf"]="http://schema.ogf.org/urf/2003/09/urf"; ns_ur["xsd"]="http://www.w3.org/2001/XMLSchema"; ns_ur["xsi"]="http://www.w3.org/2001/XMLSchema-instance"; ns_ur["ds"]="http://www.w3.org/2000/09/xmldsig#"; ns_ur["arc"]="http://www.nordugrid.org/ws/schemas/ur-arc"; ns_ur["vo"]="http://www.sgas.se/namespaces/2009/05/ur/vo"; ns_ur["tr"]="http://www.sgas.se/namespaces/2010/10/filetransfer"; //Get node names std::list nodenames; std::string mainnode; std::string nodestr=(*this)["nodename"]; size_type pcolon=nodestr.find(':'); while (pcolon!=std::string::npos) { nodenames.push_back(nodestr.substr(0,pcolon)); nodestr=nodestr.substr(pcolon+1,std::string::npos); pcolon=nodestr.find(':'); } if (!nodestr.empty()) nodenames.push_back(nodestr); if (!nodenames.empty()) mainnode=*(nodenames.begin()); //Get runtime environments std::list rtes; std::string rtestr=(*this)["runtimeenvironment"]; size_type pspace=rtestr.find(" "); while (pspace!=std::string::npos) { std::string rte=rtestr.substr(0,pspace); if (!rte.empty()) rtes.push_back(rte); rtestr=rtestr.substr(pspace+1,std::string::npos); pspace=rtestr.find(" "); } if (!rtestr.empty()) rtes.push_back(rtestr); //Fill this Usage Record Arc::XMLNode ur(ns_ur,"JobUsageRecord"); //RecordIdentity, GlobalJobId, LocalJobId if (find("ngjobid")!=end()) { // Timestamp for record, required std::string nowstamp=Arc::Time().str(Arc::UTCTime); Arc::XMLNode rid=ur.NewChild("RecordIdentity"); rid.NewAttribute("urf:createTime")=nowstamp; //NOTE! Current LUTS also sets a "creationTime"[sic!] for each record // ID for record if (find("headnode")!=end()) { Arc::URL headnode((*this)["headnode"]); rid.NewAttribute("urf:recordId")= std::string(recordid_prefix) + headnode.Host() + '-' + (*this)["ngjobid"]; } else if (!mainnode.empty()) rid.NewAttribute("urf:recordId")= std::string(recordid_prefix) + mainnode + '-' + (*this)["ngjobid"]; else rid.NewAttribute("urf:recordId")= std::string(recordid_prefix) + (*this)["ngjobid"]; ur.NewChild("JobIdentity").NewChild("GlobalJobId")= (*this)["globalid"]; if (find("localid")!=end()) { std::string prefix = ""; if (find("jobreport_option_localid_prefix")!=end()) prefix = (*this)["jobreport_option_localid_prefix"] +"-"; ur["JobIdentity"].NewChild("LocalJobId")=prefix + (*this)["localid"]; } } else { //TODO what if not valid? Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Missing required Usage Record element \"RecordIdentity\", in job log file %s", filename.c_str()); usagerecord.Destroy(); return; } //ProcessId? //GlobalUser[Nn]ame, LocalUserId //TODO clarify case //NOTE! original JARM used "GlobalUserId" if (find("usersn")!=end()) { ur.NewChild("UserIdentity"); if (find("localuser")!=end()) ur["UserIdentity"].NewChild("LocalUserId")= (*this)["localuser"]; ur["UserIdentity"].NewChild("GlobalUserName")= (*this)["usersn"]; } //VO Attributes if (find("usercert")!=end()) { std::vector voms_attributes = ParseVOAttr((*this)["usercert"]); std::string loggerurl=(*this)["loggerurl"]; bool needToSend = false; for(int n = 0; n<(int)voms_attributes.size(); ++n) { if(voms_attributes[n].attributes.size() > 0) { if ( !ur["UserIdentity"] ) { ur.NewChild("UserIdentity"); } Arc::XMLNode vo=ur["UserIdentity"].NewChild("vo:VO"); vo.NewAttribute("vo:type")="voms"; std::string voname=voms_attributes[n].voname; vo.NewChild("vo:Name")=voname; vo.NewChild("vo:Issuer")=voms_attributes[n].issuer; if ( find("vo_filters")!=end() ) { if ( !needToSend ) { needToSend = VOFilterCheck((*this)["vo_filters"], voname, loggerurl); if ( !needToSend ) { Arc::Logger::rootLogger.msg(Arc::DEBUG, "VO (%s) not set for this (%s) SGAS server by VO filter.", voname, loggerurl); } } } else { needToSend = true; } for(int i = 0; i < (int)voms_attributes[n].attributes.size(); i++) { std::string attr = voms_attributes[n].attributes[i]; std::string::size_type pos; if((pos = attr.find("hostname=")) != std::string::npos) { continue; } Arc::XMLNode vo_attr=vo.NewChild("vo:Attribute"); if(attr.find("Role=") == std::string::npos || attr.find("Capability=") == std::string::npos) { vo_attr.NewChild("vo:Group")=attr; } if((pos = attr.find("Role=")) != std::string::npos) { std::string str = attr.substr(pos+5); vo_attr.NewChild("vo:Role")=str; } if((pos = attr.find("Capability=")) != std::string::npos) { std::string str = attr.substr(pos+11); vo_attr.NewChild("vo:Capability")=str; } } } } if ( !needToSend && (int)voms_attributes.size() > 0) { Arc::Logger::rootLogger.msg(Arc::INFO, "[VO filter] Job log will be not send. %s.", filename.c_str()); usagerecord.Destroy(); return; } } if (find("jobreport_option_vomsless_vo")!=end() && !ur["UserIdentity"]["vo:VO"]) { if ( !ur["UserIdentity"] ) { ur.NewChild("UserIdentity"); } Arc::XMLNode vo=ur["UserIdentity"].NewChild("vo:VO"); vo.NewAttribute("vo:type")="voms"; std::string option=(*this)["jobreport_option_vomsless_vo"]; std::string name=option; std::string issuer=option; // separate opt_vo_name#opt_vo_issuer pair pcolon=option.find('#'); if (pcolon!=std::string::npos) { name=option.substr(0, pcolon), issuer=option.substr(pcolon+1, std::string::npos); } vo.NewChild("vo:Name")=name; vo.NewChild("vo:Issuer")=issuer; vo.NewChild("vo:Attribute").NewChild("vo:Group")="/"+name; } if (find("jobreport_option_vo_group")!=end()) { bool find = false; if ( !ur["UserIdentity"] ) { ur.NewChild("UserIdentity"); } Arc::XMLNode useridentity = ur["UserIdentity"]; Arc::XMLNode node = useridentity["vo:VO"]["vo:Attribute"]["vo:Group"]; while ( bool(node) ){ if (std::string(node) == (*this)["jobreport_option_vo_group"]) { find = true; break; } ++node; } if ( !find ) { if ( !useridentity["vo:VO"] ) { Arc::XMLNode vo = useridentity.NewChild("vo:VO"); vo.NewAttribute("vo:type")="voms"; } if ( !useridentity["vo:VO"]["vo:Attribute"] ) { useridentity["vo:VO"].NewChild("vo:Attribute"); } useridentity["vo:VO"]["vo:Attribute"].NewChild("vo:Group")=(*this)["jobreport_option_vo_group"].substr(1); } } //JobName if (find("jobname")!=end()) { ur.NewChild("JobName")=(*this)["jobname"]; } //Charge? //Status if (find("status")!=end()) { ur.NewChild("Status")=(*this)["status"]; //TODO convert? } else { //TODO what if not valid? Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Missing required element \"Status\" in job log file %s", filename.c_str()); usagerecord.Destroy(); return; } //--- //Network? //Disk? //Memory if (find("usedmemory")!=end() && (*this)["usedmemory"]!="0") { Arc::XMLNode memn=ur.NewChild("Memory")=(*this)["usedmemory"]; memn.NewAttribute("urf:storageUnit")="KB"; memn.NewAttribute("urf:metric")="average"; memn.NewAttribute("urf:type")="virtual"; } if (find("usedmaxresident")!=end() && (*this)["usedmaxresident"]!="0") { Arc::XMLNode memn=ur.NewChild("Memory")=(*this)["usedmaxresident"]; memn.NewAttribute("urf:storageUnit")="KB"; memn.NewAttribute("urf:metric")="max"; memn.NewAttribute("urf:type")="physical"; } if (find("usedaverageresident")!=end() && (*this)["usedaverageresident"]!="0") { Arc::XMLNode memn=ur.NewChild("Memory")=(*this)["usedaverageresident"]; memn.NewAttribute("urf:storageUnit")="KB"; memn.NewAttribute("urf:metric")="average"; memn.NewAttribute("urf:type")="physical"; } //Swap? //TimeDuration, TimeInstant, ServiceLevel? //--- //WallDuration if (find("usedwalltime")!=end()) { Arc::Period walldur((*this)["usedwalltime"],Arc::PeriodSeconds); std::string walld = (std::string)walldur; if (walld == "P"){ walld = "PT0S"; } ur.NewChild("WallDuration")=walld; } //CpuDuration if (find("usedusercputime")!=end() && find("usedkernelcputime")!=end()) { Arc::Period udur((*this)["usedusercputime"],Arc::PeriodSeconds); Arc::Period kdur((*this)["usedkernelcputime"],Arc::PeriodSeconds); std::string udurs = (std::string)udur; if (udurs == "P"){ udurs = "PT0S"; } Arc::XMLNode udurn=ur.NewChild("CpuDuration")=udurs; udurn.NewAttribute("urf:usageType")="user"; std::string kdurs = (std::string)kdur; if (kdurs == "P"){ kdurs = "PT0S"; } Arc::XMLNode kdurn=ur.NewChild("CpuDuration")=kdurs; kdurn.NewAttribute("urf:usageType")="system"; } else if (find("usedcputime")!=end()) { Arc::Period cpudur((*this)["usedcputime"],Arc::PeriodSeconds); std::string cpudurs = (std::string)cpudur; if (cpudurs == "P"){ cpudurs = "PT0S"; } ur.NewChild("CpuDuration")=cpudurs; } //StartTime if (find("submissiontime")!=end()) { Arc::Time starttime((*this)["submissiontime"]); ur.NewChild("StartTime")=starttime.str(Arc::UTCTime); } //EndTime if (find("endtime")!=end()) { Arc::Time endtime((*this)["endtime"]); ur.NewChild("EndTime")=endtime.str(Arc::UTCTime); } //MachineName if (find("headnode")!=end()) { std::string machinename=(*this)["headnode"]; size_t first = machinename.find(":"); size_t last = machinename.find(":", first+1); if (last == std::string::npos) { last=machinename.length(); } ur.NewChild("MachineName")=machinename.substr(first+3,last-first-3); } //Host if (!mainnode.empty()) { Arc::XMLNode primary_node=ur.NewChild("Host"); primary_node=mainnode; primary_node.NewAttribute("urf:primary")="true"; std::list::iterator it=nodenames.begin(); ++it; while (it!=nodenames.end()) { ur.NewChild("Host")=*it; ++it; } } //SubmitHost if (find("clienthost")!=end()) { // Chop port no. std::string hostport=(*this)["clienthost"], host; size_type clnp=hostport.find(":"); if (clnp==std::string::npos) host=hostport; else host=hostport.substr(0,clnp); ur.NewChild("SubmitHost")=host; } //Headnode /* Unhandled UR element by SGAS if (find("headnode")!=end()) { ur.NewChild("Headnode")=(*this)["headnode"]; }*/ //Queue if (find("queue")!=end()) { ur.NewChild("Queue")=(*this)["queue"]; } //ProjectName if (find("projectname")!=end()) { ur.NewChild("ProjectName")=(*this)["projectname"]; } //NodeCount if (find("nodecount")!=end()) { ur.NewChild("NodeCount")=(*this)["nodecount"]; } //Processors if (find("processors")!=end()) { ur.NewChild("Processors")=(*this)["processors"]; } //Transfer statistics if (find("inputfile")!=end()) { parseInputOutputFiles(ur, inputfiles); } if (find("outputfile")!=end()) { parseInputOutputFiles(ur, outputfiles, "output"); } //Extra: //RuntimeEnvironment for(std::list::iterator jt=rtes.begin(); jt!=rtes.end(); ++jt) { ur.NewChild("arc:RuntimeEnvironment")=*jt; } //TODO user id info //*** //Archiving if enabled: if (!archive_fn.empty()) { struct stat st; std::string dir_name=(*this)["jobreport_option_archiving"]; if (stat(dir_name.c_str(),&st)!=0) { Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Creating directory %s", dir_name.c_str()); errno=0; if (mkdir(dir_name.c_str(),S_IRWXU)!=0) { Arc::Logger::rootLogger.msg(Arc::ERROR, "Failed to create archive directory %s: %s", dir_name.c_str(), StrError(errno)); } } Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Archiving Usage Record to file %s", archive_fn.c_str()); errno=0; if (!ur.SaveToFile(archive_fn.c_str())) { Arc::Logger::rootLogger.msg(Arc::ERROR, "Failed to write file %s: %s", archive_fn.c_str(), StrError(errno)); } } //*** usagerecord.Replace(ur); } void JobLogFile::createCARUsageRecord(Arc::XMLNode &usagerecord, const char *recordid_prefix) { //*** //If archiving is enabled: first try to load archived UR std::string archive_fn=getArchivingPath(true); if (!archive_fn.empty()) { errno=0; if (usagerecord.ReadFromFile(archive_fn)) { Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Read archive file %s", archive_fn.c_str()); return; } Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Could not read archive file %s for job log file %s (%s), generating new Usage Record", archive_fn.c_str(), filename.c_str(), StrError(errno)); } //Otherwise go on and create new UR //*** Arc::NS ns_ur; //Namespaces defined by OGF 2.0 (CAR 1.2) ns_ur[""]="http://eu-emi.eu/namespaces/2012/11/computerecord"; ns_ur["urf"]="http://eu-emi.eu/namespaces/2012/11/computerecord"; ns_ur["xsd"]="http://www.w3.org/2001/XMLSchema"; ns_ur["xsi"]="http://www.w3.org/2001/XMLSchema-instance"; //Get node names std::list nodenames; std::string mainnode; std::string nodestr=(*this)["nodename"]; size_type pcolon=nodestr.find(':'); while (pcolon!=std::string::npos) { nodenames.push_back(nodestr.substr(0,pcolon)); nodestr=nodestr.substr(pcolon+1,std::string::npos); pcolon=nodestr.find(':'); } if (!nodestr.empty()) nodenames.push_back(nodestr); if (!nodenames.empty()) mainnode=*(nodenames.begin()); //Get runtime environments std::list rtes; std::string rtestr=(*this)["runtimeenvironment"]; size_type pspace=rtestr.find(" "); while (pspace!=std::string::npos) { std::string rte=rtestr.substr(0,pspace); if (!rte.empty()) rtes.push_back(rte); rtestr=rtestr.substr(pspace+1,std::string::npos); pspace=rtestr.find(" "); } if (!rtestr.empty()) rtes.push_back(rtestr); /**BASE Record properties * * UsageRecord * RecordIdentity * JobIdentity * UserIdentity * JobName * Charge * Status * ExitStatus * WallDuration * CpuDuration * EndTime * StartTime * MachineName * Host * SubmitHost * Site * Infrastructure */ /** Differentiated Record Properties * * Queue * Middleware * Memory * Swap * NodeCount * Processors * TimeInstant * ServiceLevel */ //Fill this Usage Record //UsageRecord Arc::XMLNode ur(ns_ur,"UsageRecord"); //RecordIdentity, JobIdentity GlobalJobId, LocalJobId if (find("ngjobid")!=end()) { // Timestamp for record, required std::string nowstamp=Arc::Time().str(Arc::UTCTime); Arc::XMLNode rid=ur.NewChild("RecordIdentity"); rid.NewAttribute("urf:createTime")=nowstamp; // ID for record if (find("headnode")!=end()){ Arc::URL headnode((*this)["headnode"]); rid.NewAttribute("urf:recordId")= std::string(recordid_prefix) + headnode.Host() + '-' + (*this)["ngjobid"]; } else if (!mainnode.empty()) rid.NewAttribute("urf:recordId")= std::string(recordid_prefix) + mainnode + '-' + (*this)["ngjobid"]; else rid.NewAttribute("urf:recordId")= std::string(recordid_prefix) + (*this)["ngjobid"]; //GlobalJobId if (find("globalid")!=end()) ur.NewChild("JobIdentity").NewChild("GlobalJobId")= (*this)["globalid"]; //LocalJobId if (!ur["JobIdentity"]) ur.NewChild("JobIdentity"); if (find("localid")!=end()) { ur["JobIdentity"].NewChild("LocalJobId")= (*this)["localid"]; } else { ur["JobIdentity"].NewChild("LocalJobId")= (*this)["ngjobid"]; } } else { Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Missing required Usage Record element \"RecordIdentity\", in job log file %s", filename.c_str()); usagerecord.Destroy(); return; } //ProcessId if (find("???")!=end()) //TODO: not present in the AREX accounting log { if (!ur["JobIdentity"]) ur.NewChild("JobIdentity"); Arc::XMLNode pid = ur["JobIdentity"].NewChild("ProcessId")= (*this)["???"]; pid.NewAttribute("urf:Host")=(*this)["nodename"]; } //UserIdentity Arc::XMLNode useridentity = ur.NewChild("UserIdentity"); //GlobalUserName if (find("usersn")!=end()) { Arc::XMLNode globalusername = useridentity.NewChild("GlobalUserName")= (*this)["usersn"]; globalusername.NewAttribute("urf:type")="opensslCompat"; } //Group //GroupAttribute if (find("usercert")!=end()) { std::vector voms_attributes = ParseVOAttr((*this)["usercert"]); for(int n = 0; n<(int)voms_attributes.size(); ++n) { if(voms_attributes[n].attributes.size() > 0) { if ( !useridentity["Group"] ) { useridentity.NewChild("Group")=voms_attributes[n].voname; } for(int i = 0; i < (int)voms_attributes[n].attributes.size(); i++) { std::string attr = voms_attributes[n].attributes[i]; std::string::size_type pos; if((pos = attr.find("hostname=")) != std::string::npos) { continue; } else { pos = attr.find("Role="); std::string group = attr.substr(0,pos-1); Arc::XMLNode vog=useridentity.NewChild("GroupAttribute")=group; vog.NewAttribute("urf:type")="vo-group"; if( pos != std::string::npos) { std::string role = attr.substr(pos); Arc::XMLNode vo=useridentity.NewChild("GroupAttribute")=role; vo.NewAttribute("urf:type")="vo-role"; } } } } } } if (find("jobreport_option_vomsless_vo")!=end() && !useridentity["Group"]) { if ( !useridentity["Group"] ) { useridentity.NewChild("Group"); } Arc::XMLNode vo=useridentity["Group"]; std::string option=(*this)["jobreport_option_vomsless_vo"]; std::string name=option; std::string issuer=option; // separate opt_vo_name#opt_vo_issuer pair pcolon=option.find('#'); if (pcolon!=std::string::npos) { name=option.substr(0, pcolon), issuer=option.substr(pcolon+1, std::string::npos); } vo=name; Arc::XMLNode gattr=useridentity.NewChild("GroupAttribute")="/"+name; gattr.NewAttribute("urf:type")="vo-role"; } if (find("jobreport_option_vo_group")!=end()) { bool find = false; Arc::XMLNode node = useridentity["GroupAttribute"]; while ( bool(node) ){ if (node.Attribute("urf:type") == "vo-group" && std::string(node) == (*this)["jobreport_option_vo_group"]) { find = true; break; } ++node; } if ( !find ) { if ( !useridentity["Group"] ) { useridentity.NewChild("Group")=(*this)["jobreport_option_vo_group"].substr(1); } Arc::XMLNode vog=useridentity.NewChild("GroupAttribute")=(*this)["jobreport_option_vo_group"]; vog.NewAttribute("urf:type")="vo-group"; } } if (find("projectname")!=end()) { Arc::XMLNode project=useridentity.NewChild("GroupAttribute")=(*this)["projectname"]; project.NewAttribute("urf:type")="ProjectName"; } //LocalUserId if (find("localuser")!=end()) { useridentity.NewChild("LocalUserId")= (*this)["localuser"]; } //LocalGroup //JobName if (find("jobname")!=end()) { ur.NewChild("JobName")=(*this)["jobname"]; } //Charge +unit,formula //Status if (find("status")!=end()) { /* * The available status values are: * aborted, completed, failed, held, queued, started, suspended */ ur.NewChild("Status")=(*this)["status"]; //TODO convert? } else { //TODO what if not valid? Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Missing required element \"Status\" in job log file %s", filename.c_str()); usagerecord.Destroy(); return; } // ExitStatus if (find("exitcode")!=end()) { ur.NewChild("ExitStatus")=(*this)["exitcode"]; } //Infrastructure Arc::XMLNode infran = ur.NewChild("Infrastructure"); std::string type = "local"; if (find("headnode")!=end() && (*this)["lrms"] != "fork" ){ type = "grid"; std::string lrms = (std::string)(*this)["lrms"]; std::transform(lrms.begin(), lrms.end(), lrms.begin(), ::toupper); infran.NewAttribute("urf:description")="JURA-ARC-"+lrms; //Prefix required to the APEL } infran.NewAttribute("urf:type")=type; //Middleware Arc::XMLNode middleware = ur.NewChild("Middleware")=""; middleware.NewAttribute("urf:name")="arc"; middleware.NewAttribute("urf:version")=VERSION; middleware.NewAttribute("urf:description")=PACKAGE_STRING; // WallDuration (EndTime-StartTime) ur.NewChild("WallDuration")="PT0S"; if (find("usedwalltime")!=end()) { Arc::Period walldur((*this)["usedwalltime"],Arc::PeriodSeconds); std::string walld = (std::string)walldur; if (walld == "P"){ walld = "PT0S"; } ur["WallDuration"]=walld; } // CpuDuration /* TODO: This element contains the CPU time consumed. If the job ran * on many cores/processors/nodes/sites, all separate consumptions * shall be aggregated in this value. This as an impact on MPI jobs, * where the consumption of all the 'nodes' of the MPI job get * aggregated into this CPU consumption. This is the way LRMS * accounting work on the batch systems underlying the avaialble * CE implementations. */ if (find("usedusercputime")!=end() && find("usedkernelcputime")!=end()) { Arc::Period udur((*this)["usedusercputime"],Arc::PeriodSeconds); Arc::Period kdur((*this)["usedkernelcputime"],Arc::PeriodSeconds); std::string udurs = (std::string)udur; if (udurs == "P"){ udurs = "PT0S"; } Arc::XMLNode udurn=ur.NewChild("CpuDuration")=udurs; udurn.NewAttribute("urf:usageType")="user"; std::string kdurs = (std::string)kdur; if (kdurs == "P"){ kdurs = "PT0S"; } Arc::XMLNode kdurn=ur.NewChild("CpuDuration")=kdurs; kdurn.NewAttribute("urf:usageType")="system"; Arc::Period all(udur.GetPeriod() + kdur.GetPeriod()); std::string alls = (std::string)all; if (alls == "P"){ alls = "PT0S"; } Arc::XMLNode alln=ur.NewChild("CpuDuration")=alls; alln.NewAttribute("urf:usageType")="all"; } else if (find("usedcputime")!=end()) { Arc::Period cpudur((*this)["usedcputime"],Arc::PeriodSeconds); std::string cpudurs = (std::string)cpudur; if (cpudurs == "P"){ cpudurs = "PT0S"; } Arc::XMLNode alln = ur.NewChild("CpuDuration")=cpudurs; alln.NewAttribute("urf:usageType")="all"; } else { Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Missing required element \"CpuDuration\" in job log file %s", filename.c_str()); usagerecord.Destroy(); return; } //ServiceLevel std::string benchmark_type = "Si2K"; std::string benchmark_value = "1.0"; std::string benchmark_description = ""; if (find("jobreport_option_benchmark_type")!=end()) { std::string type = (*this)["jobreport_option_benchmark_type"]; std::vector accepted_types; accepted_types.push_back("Si2k"); accepted_types.push_back("Sf2k"); accepted_types.push_back("HEPSPEC"); if (std::find(accepted_types.begin(), accepted_types.end(), type) == accepted_types.end()) { Arc::Logger::rootLogger.msg(Arc::WARNING, "Set non standard bechmark type: %s", type); } benchmark_type = type; } if (find("jobreport_option_benchmark_value")!=end()) { std::string value = (*this)["jobreport_option_benchmark_value"]; float float_value = stringtof(value); if (float_value==0) { Arc::Logger::rootLogger.msg(Arc::WARNING, "Ignored incoming benchmark value: %s, Use float value!", value); } else { std::ostringstream ss; ss << float_value; benchmark_value = ss.str(); if (benchmark_value.find(".")==std::string::npos) { benchmark_value +=".0"; } } } if (find("jobreport_option_benchmark_description")!=end()) { benchmark_description = (*this)["jobreport_option_benchmark_description"]; } Arc::XMLNode sleveln = ur.NewChild("ServiceLevel")=benchmark_value; sleveln.NewAttribute("urf:type")=benchmark_type; sleveln.NewAttribute("urf:description")=benchmark_description; //Memory if (find("usedmemory")!=end() && (*this)["usedmemory"] != "0") { Arc::XMLNode memn=ur.NewChild("Memory")=(*this)["usedmemory"]; memn.NewAttribute("urf:type")="Shared"; memn.NewAttribute("urf:metric")="average"; memn.NewAttribute("urf:storageUnit")="KB"; } if (find("usedmaxresident")!=end() && (*this)["usedmaxresident"] != "0") { Arc::XMLNode memn=ur.NewChild("Memory")=(*this)["usedmaxresident"]; memn.NewAttribute("urf:type")="Physical"; memn.NewAttribute("urf:metric")="max"; memn.NewAttribute("urf:storageUnit")="KB"; } if (find("usedaverageresident")!=end() && (*this)["usedaverageresident"] != "0") { Arc::XMLNode memn=ur.NewChild("Memory")=(*this)["usedaverageresident"]; memn.NewAttribute("urf:type")="Physical"; memn.NewAttribute("urf:metric")="average"; memn.NewAttribute("urf:storageUnit")="KB"; } //Swap //TimeInstant //NodeCount if (find("nodecount")!=end()) { ur.NewChild("NodeCount")=(*this)["nodecount"]; } //Processors if (find("processors")!=end()) { ur.NewChild("Processors")=(*this)["processors"]; } //EndTime if (find("endtime")!=end()) { Arc::Time endtime((*this)["endtime"]); ur.NewChild("EndTime")=endtime.str(Arc::UTCTime); } //StartTime if (find("submissiontime")!=end()) { Arc::Time starttime((*this)["submissiontime"]); ur.NewChild("StartTime")=starttime.str(Arc::UTCTime); } //MachineName if (find("headnode")!=end()) { Arc::URL machineName((*this)["headnode"]); ur.NewChild("MachineName")=machineName.Host(); } //SubmitHost if (find("headnode")!=end()) { Arc::XMLNode submitn = ur.NewChild("SubmitHost")=(*this)["headnode"]; } //Queue if (find("queue")!=end()) { Arc::XMLNode queue = ur.NewChild("Queue")=(*this)["queue"]; queue.NewAttribute("urf:description")="execution"; } //Site if (find("headnode")!=end()) { Arc::URL machineName((*this)["headnode"]); std::string site = machineName.Host(); // repcale "." to "-" int position = site.find( "." ); // find first space while ( position != (int)std::string::npos ){ site.replace( position, 1, "-" ); position = site.find( ".", position + 1 ); } // to upper case std::locale loc; for (size_t i=0; i::iterator it=nodenames.begin(); ++it; while (it!=nodenames.end()) { ur.NewChild("Host")=*it; ++it; } } /*Aggregated USAGE RECORD * * SummaryRecord * SummaryRecords * Site * Month * Year * UserIdentity * MachineName * SubmitHost * Host * Queue * Infrastructure * Middleware * EarliestEndTime * LatestEndTime * WallDuration * CpuDuration * ServiceLevel * NumberOfJobs * Memory * Swap * NodeCoutn * Processors */ //*** //Archiving if enabled: if (!archive_fn.empty()) { struct stat st; std::string dir_name=(*this)["jobreport_option_archiving"]; if (stat(dir_name.c_str(),&st)!=0) { Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Creating directory %s", dir_name.c_str()); errno=0; if (mkdir(dir_name.c_str(),S_IRWXU)!=0) { Arc::Logger::rootLogger.msg(Arc::ERROR, "Failed to create archive directory %s: %s", dir_name.c_str(), StrError(errno)); } } Arc::Logger::rootLogger.msg(Arc::VERBOSE, "Archiving Usage Record to file %s", archive_fn.c_str()); errno=0; if (!ur.SaveToFile(archive_fn.c_str())) { Arc::Logger::rootLogger.msg(Arc::ERROR, "Failed to write file %s: %s", archive_fn.c_str(), StrError(errno)); } } //*** usagerecord.Replace(ur); } bool JobLogFile::exists() { //TODO cross-platform? struct stat s; return (0==stat(filename.c_str(),&s)); } bool JobLogFile::olderThan(time_t age) { struct stat s; return ( ( 0==stat(filename.c_str(),&s) ) && ( (time(NULL)-s.st_mtime) > age ) ); } void JobLogFile::remove() { if (!allow_remove) return; errno=0; int e = ::remove(filename.c_str()); if (e) Arc::Logger::rootLogger.msg(Arc::ERROR,"Failed to delete file %s:%s", filename.c_str(), StrError(errno)); } std::string JobLogFile::getArchivingPath(bool car) { //no archiving dir set if ((*this)["jobreport_option_archiving"].empty()) return std::string(); //if set, archive file name corresponds to original job log file std::string base_fn; size_type seppos=filename.rfind('/'); if (seppos==std::string::npos) base_fn=filename; else base_fn=filename.substr(seppos+1,std::string::npos); if (car) { return (*this)["jobreport_option_archiving"]+"/usagerecordCAR."+base_fn; } return (*this)["jobreport_option_archiving"]+"/usagerecord."+base_fn; } void JobLogFile::parseInputOutputFiles(Arc::XMLNode &node, std::vector &filelist, std::string type) { if ( !node["tr:FileTransfers"] ) { node.NewChild("tr:FileTransfers"); } for (int i=0; i<(int)filelist.size(); i++) { std::string nodeName = "FileDownload"; if ( type == "output") { nodeName = "FileUpload"; } Arc::XMLNode dl = node["tr:FileTransfers"].NewChild("tr:" + nodeName); std::string option=filelist[i]; size_type pcolon=option.find(','); while (pcolon!=std::string::npos) { std::string pair=option.substr(0, pcolon); size_type peqv=pair.find('='); std::string key=pair.substr(0, peqv); std::string value=pair.substr(peqv+1, std::string::npos); if ( key == "url" ) key = "URL"; if ( key == "size" ) key = "Size"; if ( key == "starttime" ) key = "StartTime"; if ( key == "endtime" ) key = "EndTime"; if ( type == "input" ) { if ( key == "bypasscache" ) key = "BypassCache"; if ( key == "fromcache" ) key = "RetrievedFromCache"; if ( value == "yes" ) value = "true"; if ( value == "no" ) value = "false"; } dl.NewChild("tr:"+key)=value; //next: if ( pcolon != option.length() ) { option=option.substr(pcolon+1, std::string::npos); } else { option=""; } pcolon=option.find(','); if ( option.length() > 0 && pcolon==std::string::npos) { pcolon=option.length(); } } } } } // namespace nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/Reporter.h0000644000000000000000000000012412534012676023545 xustar000000000000000027 mtime=1433408958.605316 27 atime=1513200576.352724 30 ctime=1513200663.232787024 nordugrid-arc-5.4.2/src/services/a-rex/jura/Reporter.h0000644000175000002070000000105112534012676023607 0ustar00mockbuildmock00000000000000#ifndef _REPORTER_H #define _REPORTER_H #include #include #include #include #ifdef WIN32 #include #endif #include "Destinations.h" namespace Arc { /** The class for main JURA functionality. Traverses the 'logs' dir * of the given control directory, and reports usage data extracted from * job log files within. */ class Reporter { public: /** Processes job log files in '/logs'. */ virtual int report()=0; virtual ~Reporter() {}; }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/CARAggregation.cpp0000644000000000000000000000012412462174345025055 xustar000000000000000027 mtime=1422457061.507669 27 atime=1513200576.375724 30 ctime=1513200663.247787207 nordugrid-arc-5.4.2/src/services/a-rex/jura/CARAggregation.cpp0000644000175000002070000004450312462174345025130 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "CARAggregation.h" #include #include "jura.h" #include "Destination.h" #include #include namespace Arc { CARAggregation::CARAggregation(std::string _host): logger(Arc::Logger::rootLogger, "JURA.CARAggregation"), aggr_record_update_need(false), synch_message(false), aggregationrecordset(Arc::NS("","http://eu-emi.eu/namespaces/2012/11/aggregatedcomputerecord"), "SummaryRecords") { init(_host, "", ""); } CARAggregation::CARAggregation(std::string _host, std::string _port, std::string _topic, bool synch): logger(Arc::Logger::rootLogger, "JURA.CARAggregation"), use_ssl("false"), sequence(0), aggr_record_update_need(false), synch_message(false), aggregationrecordset(Arc::NS("","http://eu-emi.eu/namespaces/2012/11/aggregatedcomputerecord"), "SummaryRecords") { synch_message = synch; init(_host, _port, _topic); } void CARAggregation::init(std::string _host, std::string _port, std::string _topic) { ns[""] = "http://eu-emi.eu/namespaces/2012/11/aggregatedcomputerecord"; ns["urf"] = "http://eu-emi.eu/namespaces/2012/11/computerecord"; ns_query["car"] = "http://eu-emi.eu/namespaces/2012/11/aggregatedcomputerecord"; ns_query["urf"] = "http://eu-emi.eu/namespaces/2012/11/computerecord"; // Get cert, key, CA path from environment std::string certfile=Arc::GetEnv("X509_USER_CERT"); std::string keyfile=Arc::GetEnv("X509_USER_KEY"); std::string cadir=Arc::GetEnv("X509_CERT_DIR"); // ...or by default, use host cert, key, CA path if (certfile.empty()) certfile=JURA_DEFAULT_CERT_FILE; if (keyfile.empty()) keyfile=JURA_DEFAULT_KEY_FILE; if (cadir.empty()) cadir=JURA_DEFAULT_CA_DIR; cfg.AddCertificate(certfile); cfg.AddPrivateKey(keyfile); cfg.AddCADir(cadir); host = _host; port = _port; topic = _topic; //read the previous aggregation records std::string default_path = (std::string)JURA_DEFAULT_DIR_PREFIX + "/urs/"; aggr_record_location = default_path + host + "_aggregation_records.xml"; if (!aggregationrecordset.ReadFromFile(aggr_record_location)) { logger.msg(Arc::INFO, "Aggregation record (%s) not exist, initialize it...", aggr_record_location); if (aggregationrecordset.SaveToFile(aggr_record_location)) { logger.msg(Arc::INFO, "Aggregation record (%s) initialization successful.", aggr_record_location); } else { logger.msg(Arc::ERROR, "Some error happens during the Aggregation record (%s) initialization.", aggr_record_location); } } else { logger.msg(Arc::DEBUG, "Aggregation record (%s) read from file successful.", aggr_record_location); } } int CARAggregation::save_records() { // Save the stored aggregation records if (aggr_record_update_need) { if (aggregationrecordset.SaveToFile(aggr_record_location)) { aggr_record_update_need = false; logger.msg(Arc::INFO, "Aggregation record (%s) stored successful.", aggr_record_location); } else { logger.msg(Arc::ERROR, "Some error happens during the Aggregation record (%s) storing.", aggr_record_location); return 1; } } return 0; } Arc::MCC_Status CARAggregation::send_records(const std::string &urset) { //Filename generation std::string output_filename = Current_Time(); char chars[] = "-+T:"; for (unsigned int i = 0; i < strlen(chars); ++i) { output_filename.erase ( std::remove(output_filename.begin(), output_filename.end(), chars[i]), output_filename.end()); } output_filename=output_filename.substr(0,14); //Save all records into the default folder. std::string default_path = (std::string)JURA_DEFAULT_DIR_PREFIX + "/ssm/"; struct stat st; //directory check if (stat(default_path.c_str(), &st) != 0) { mkdir(default_path.c_str(), S_IRWXU); } //directory check (for host) std::string subdir = default_path + host + "/"; if (stat(subdir.c_str(), &st) != 0) { mkdir(subdir.c_str(), S_IRWXU); } //directory check subdir = subdir + "outgoing/"; default_path = subdir.substr(0,subdir.length()-1); if (stat(subdir.c_str(), &st) != 0) { mkdir(subdir.c_str(), S_IRWXU); } subdir = subdir + "00000000/"; if (stat(subdir.c_str(), &st) != 0) { mkdir(subdir.c_str(), S_IRWXU); } // create message file to the APEL client subdir += output_filename; const char* filename(subdir.c_str()); std::ofstream outputfile; outputfile.open (filename); if (outputfile.is_open()) { outputfile << urset; outputfile.close(); logger.msg(Arc::DEBUG, "APEL aggregation message file (%s) created.", output_filename); } else { return Arc::MCC_Status(Arc::PARSING_ERROR, "apelclient", std::string( "Error opening file: " )+ filename ); } int retval; //ssmsend " std::string command; std::vector ssm_pathes; std::string exec_cmd = "ssmsend"; //RedHat: /usr/libexec/arc/ssm_master ssm_pathes.push_back("/usr/libexec/arc/"+exec_cmd); ssm_pathes.push_back("/usr/local/libexec/arc/"+exec_cmd); // Ubuntu/Debian: /usr/lib/arc/ssm_master ssm_pathes.push_back("/usr/lib/arc/"+exec_cmd); ssm_pathes.push_back("/usr/local/lib/arc/"+exec_cmd); // If you don't use non-standard prefix for a compilation you will // use this extra location. std::ostringstream prefix; prefix << INSTPREFIX << "/" << PKGLIBEXECSUBDIR << "/"; ssm_pathes.push_back(prefix.str()+exec_cmd); // Find the location of the ssm_master std::string ssm_command = "./ssm/"+exec_cmd; for (int i=0; i<(int)ssm_pathes.size(); i++) { std::ifstream ssmfile(ssm_pathes[i].c_str()); if (ssmfile) { // The file exists, ssm_command = ssm_pathes[i]; ssmfile.close(); break; } } command = ssm_command; command += " " + host; //host command += " " + port; //port command += " " + topic; //topic command += " " + cfg.key; //certificate key command += " " + cfg.cert; //certificate command += " " + cfg.cadir; //cadir command += " " + default_path; //messages path command += " " + use_ssl; //use_ssl command += ""; retval = system(command.c_str()); logger.msg(Arc::DEBUG, "system retval: %d", retval); if (retval == 0) { return Arc::MCC_Status(Arc::STATUS_OK, "apelclient", "APEL message sent."); } else { return Arc::MCC_Status(Arc::GENERIC_ERROR, "apelclient", "Some error has during the APEL message sending."); } } void CARAggregation::UpdateAggregationRecord(Arc::XMLNode& ur) { std::string endtime = ur["EndTime"]; std::string year = endtime.substr(0,4); std::string month = endtime.substr(5,2); std::string queue = ur["Queue"]; logger.msg(Arc::DEBUG, "year: %s", year); logger.msg(Arc::DEBUG, "moth: %s", month); logger.msg(Arc::DEBUG, "queue: %s", queue); std::string query("//car:SummaryRecords/car:SummaryRecord[car:Year='"); query += year; query += "' and car:Month='" + month; std::string queryPrefix(query); query += "' and Queue='" + queue; query += "']"; logger.msg(Arc::DEBUG, "query: %s", query); Arc::XMLNodeList list = aggregationrecordset.XPathLookup(query,ns_query); logger.msg(Arc::DEBUG, "list size: %d", (int)list.size()); if ( list.size() == 0 ) { // When read XML from file create a namespace for the Queue element. query = queryPrefix + "' and car:Queue='" + queue + "']"; list = aggregationrecordset.XPathLookup(query,ns_query); } /** * CAR aggregation record elements: * Site* * Month* * Year* * UserIdentity * SubmitHost** * Host * Queue * Infrastructure* * Middleware * EarliestEndTime * LatestEndTime * WallDuration* * CpuDuration* * ServiceLevel* * NumberOfJobs* * Memory * Swap * NodeCount * Processors * * notes: * mandatory for CAR * ** mandatory for APEL synch * */ if ( list.empty()) { // Not exist Aggregation record for this month Arc::XMLNode new_node(ns,"SummaryRecord"); new_node.NewChild(ur["Site"]); new_node.NewChild("Year") = year; new_node.NewChild("Month") = month; new_node.NewChild(ur["UserIdentity"]); new_node["UserIdentity"]["LocalUserId"].Destroy(); new_node.NewChild(ur["SubmitHost"]); new_node.NewChild(ur["Host"]); new_node.NewChild(ur["Queue"]); new_node.NewChild(ur["Infrastructure"]); new_node.NewChild(ur["Middleware"]); new_node.NewChild("EarliestEndTime") = endtime; new_node.NewChild("LatestEndTime") = endtime; new_node.NewChild(ur["WallDuration"]); new_node.NewChild(ur["CpuDuration"]); new_node.NewChild(ur["ServiceLevel"]); new_node.NewChild("NumberOfJobs") = "1"; //new_node.NewChild("Memory"); //new_node.NewChild("Swap"); new_node.NewChild(ur["NodeCount"]); //new_node.NewChild("Processors"); // Local informations // LastModification new_node.NewChild("LastModification") = Current_Time(); // LastSending // Add new node to the aggregation record collection aggregationrecordset.NewChild(new_node); } else { // Aggregation record exist for this month, comparison needed Arc::XMLNode node = list.front(); // EarliestEndTime std::string endtime = ur["EndTime"]; if ( endtime.compare(node["EarliestEndTime"]) < 0 ) { node["EarliestEndTime"] = endtime; } // LatestEndTime if ( endtime.compare(node["LatestEndTime"]) > 0 ) { node["LatestEndTime"] = endtime; } // WallDuration Arc::Period walldur((std::string)node["WallDuration"]); Arc::Period new_walldur((std::string)ur["WallDuration"]); walldur+=new_walldur; std::string walld = (std::string)walldur; if (walld == "P"){ walld = "PT0S"; } node["WallDuration"] = walld; // CpuDuration Arc::Period cpudur((std::string)node["CpuDuration"]); Arc::Period new_cpudur((std::string)ur["CpuDuration"]); cpudur+=new_cpudur; std::string cpud = (std::string)cpudur; if (cpud == "P"){ cpud = "PT0S"; } node["CpuDuration"] = cpud; // NumberOfJobs std::ostringstream nrofjobs; nrofjobs << stringtoi(((std::string)node["NumberOfJobs"]))+1; node["NumberOfJobs"] = nrofjobs.str(); //node.NewChild("Memory"); //node.NewChild("Swap"); //node.NewChild("NodeCount"); //node.NewChild("Processors"); // Local informations // LastModification node["LastModification"] = Current_Time(); } aggr_record_update_need = true; { //only DEBUG information std::string ss; aggregationrecordset.GetXML(ss,true); logger.msg(Arc::DEBUG, "XML: %s", ss); } logger.msg(Arc::DEBUG, "UPDATE Aggregation Record called."); } CARAggregation::~CARAggregation() { //save_records(); } // Current time calculation and convert to the UTC time format. std::string CARAggregation::Current_Time( time_t parameter_time ){ time_t rawtime; if ( parameter_time == time(NULL) ){ time ( &rawtime ); //current time } else { rawtime = parameter_time; } tm * ptm; ptm = gmtime ( &rawtime ); std::string mon_prefix = (ptm->tm_mon+1 < 10)?"0":""; std::string day_prefix = (ptm->tm_mday < 10)?"0":""; std::string hour_prefix = (ptm->tm_hour < 10)?"0":""; std::string min_prefix = (ptm->tm_min < 10)?"0":""; std::string sec_prefix = (ptm->tm_sec < 10)?"0":""; std::stringstream out; if ( parameter_time == time(NULL) ){ out << ptm->tm_year+1900<<"-"<tm_mon+1<<"-"<tm_mday<<"T"<tm_hour<<":"<tm_min<<":"<tm_sec<<"+0000"; } else { out << ptm->tm_year+1900<tm_mon+1<tm_mday<<"."<tm_hour<tm_min<tm_sec; } return out.str(); } bool CARAggregation::Reporting_records(std::string year, std::string month) { // get the required records std::string query("//car:SummaryRecords/car:SummaryRecord[car:Year='"); query += year; if(!month.empty()){ query += "' and car:Month='" + month; } query += "']"; logger.msg(Arc::DEBUG, "query: %s", query); Arc::XMLNodeList list = aggregationrecordset.XPathLookup(query,ns_query); Arc::XMLNode sendingXMLrecords(Arc::NS("","http://eu-emi.eu/namespaces/2012/11/aggregatedcomputerecord"), "SummaryRecords"); for(Arc::XMLNodeList::iterator liter = list.begin(); liter != list.end(); ++liter) { NodeCleaning(sendingXMLrecords.NewChild(*liter)); } if ( sendingXMLrecords.Size() == 0 ) { logger.msg(Arc::INFO, "Does not sending empty aggregation/synch message."); return true; } // send the required records std::string records; if (synch_message) { //Synch record need to be send records = SynchMessage(sendingXMLrecords); } else { //Aggregation record need to be send sendingXMLrecords.GetXML(records,true); } Arc::MCC_Status status = send_records(records); if ( status != Arc::STATUS_OK ) { return false; } // Update the last sending dates UpdateLastSendingDate(list); // Store the records return save_records(); } bool CARAggregation::Reporting_records(bool force_resend) { Arc::XMLNode sendingXMLrecords(Arc::NS("","http://eu-emi.eu/namespaces/2012/11/aggregatedcomputerecord"), "SummaryRecords"); Arc::XMLNode node = aggregationrecordset["SummaryRecord"]; while (node) { if ( force_resend ) { // force resend all records NodeCleaning(sendingXMLrecords.NewChild(node)); ++node; continue; } Arc::XMLNode lastsendingnode = node["LastSending"]; std::string lastsending= ""; // find the latest LastSending element while ( lastsendingnode ) { lastsending = (std::string)lastsendingnode; ++lastsendingnode; } // collect all modified records if ( lastsending < (std::string)node["LastModification"] ) { NodeCleaning(sendingXMLrecords.NewChild(node)); } ++node; } if ( sendingXMLrecords.Size() == 0 ) { logger.msg(Arc::INFO, "Does not sending empty aggregation/synch message."); return true; } // send all records std::string all_records; if (synch_message) { //Synch record need to be send all_records = SynchMessage(sendingXMLrecords); } else { //Aggregation record need to be send sendingXMLrecords.GetXML(all_records,true); } Arc::MCC_Status status = send_records(all_records); if ( status != Arc::STATUS_OK ) { return false; } // Update the last sending dates UpdateLastSendingDate(); // Store the records return save_records(); } void CARAggregation::UpdateLastSendingDate() { std::string query("//car:SummaryRecords/car:SummaryRecord"); Arc::XMLNodeList list = aggregationrecordset.XPathLookup(query,ns_query); UpdateLastSendingDate(list); } void CARAggregation::UpdateLastSendingDate(Arc::XMLNodeList& records) { std::string current_time = Current_Time(); for(Arc::XMLNodeList::iterator liter = records.begin(); liter != records.end(); ++liter) { UpdateElement( *liter, "LastSending", current_time); } if(records.size() > 0){ aggr_record_update_need = true; } } void CARAggregation::UpdateElement(Arc::XMLNode& node, std::string name, std::string time) { Arc::XMLNode mod_node = node[name]; //find latest date Arc::XMLNode cnode = node[name]; for (int i=0; i<10; i++) { if (cnode) { if ( std::string(cnode) < std::string(mod_node)) { mod_node = cnode; break; } } else { // create a new child mod_node = node.NewChild(name); break; } ++cnode; } //update the element mod_node = time; } std::string CARAggregation::SynchMessage(Arc::XMLNode records) { std::string result; //header result = "APEL-sync-message: v0.1\n"; Arc::XMLNode node = records["SummaryRecord"]; while (node) { //Site result += "Site: " + (std::string)node["Site"] + "\n"; //SubmitHost result += "SubmitHost: " + (std::string)node["SubmitHost"] + "/" + (std::string)node["Queue"] + "\n"; //NumberOfJobs result += "NumberOfJobs: " + (std::string)node["NumberOfJobs"] + "\n"; //Month result += "Month: " + (std::string)node["Month"] + "\n"; //Year result += "Year: " + (std::string)node["Year"] + "\n"; result += "%%\n"; ++node; } logger.msg(Arc::DEBUG, "synch message: %s", result); return result; } void CARAggregation::NodeCleaning(Arc::XMLNode node) { /** Remove the local information from the sending record. * These attributes are not CAR related values. */ node["LastModification"].Destroy(); Arc::XMLNode next = (Arc::XMLNode)node["LastSending"]; while (next) { Arc::XMLNode prev = next; ++next; prev.Destroy(); } } } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/Destinations.h0000644000000000000000000000012311201323305024366 xustar000000000000000027 mtime=1241884357.358858 27 atime=1513200576.352724 29 ctime=1513200663.23978711 nordugrid-arc-5.4.2/src/services/a-rex/jura/Destinations.h0000644000175000002070000000104711201323305024436 0ustar00mockbuildmock00000000000000#ifndef DESTINATIONS_H #define DESTINATIONS_H #include "Destination.h" #include "JobLogFile.h" #include namespace Arc { /** Class to handle a set of reporting destinations. */ class Destinations:public std::map { public: /** Reports the given job log file to a destination. If an adapter * object for the specific destination already exists in the set, * it uses that, otherwise creates a new one. */ void report(Arc::JobLogFile &joblog); ~Destinations(); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/ApelDestination.h0000644000000000000000000000012412534012676025026 xustar000000000000000027 mtime=1433408958.605316 27 atime=1513200576.347724 30 ctime=1513200663.244787171 nordugrid-arc-5.4.2/src/services/a-rex/jura/ApelDestination.h0000644000175000002070000000361512534012676025100 0ustar00mockbuildmock00000000000000#ifndef APELDESTINATION_H #define APELDESTINATION_H #include "CARAggregation.h" #include "Destination.h" #include "JobLogFile.h" #include #include #include #include #include #include #include #include #include #include namespace Arc { /** Reporting destination adapter for APEL. */ class ApelDestination:public Destination { private: Arc::Logger logger; Arc::MCCConfig cfg; Arc::URL service_url; std::string topic; std::string output_dir; /** Max number of URs to put in a set before submitting it */ int max_ur_set_size; bool rereport; /** Require to set to ture this option by production message broker */ std::string use_ssl; /** Actual number of usage records in set */ int urn; /** File name extension */ int sequence; /** List of copies of job logs */ std::list joblogs; /** Usage Record set XML */ Arc::XMLNode usagerecordset; CARAggregation* aggregationManager; void init(std::string serviceurl_, std::string topic_, std::string outputdir_, std::string cert_, std::string key_, std::string ca_); int submit_batch(); Arc::MCC_Status send_request(const std::string &urset); void clear(); public: /** Constructor. Service URL and APEL-related parameters (e.g. UR * batch size) are extracted from the given job log file. */ ApelDestination(JobLogFile& joblog); ApelDestination(std::string url_, std::string topic_); /** Generates record from job log file content, collects it into the * UR batch, and if batch is full, submits it to the service. */ void report(JobLogFile& joblog); void report(std::string& joblog); void finish(); ~ApelDestination(); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/ReReporter.cpp0000644000000000000000000000012412534013375024364 xustar000000000000000027 mtime=1433409277.187664 27 atime=1513200576.347724 30 ctime=1513200663.233787036 nordugrid-arc-5.4.2/src/services/a-rex/jura/ReReporter.cpp0000644000175000002070000001350412534013375024434 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "jura.h" #include "ReReporter.h" #include "ApelDestination.h" #include "LutsDestination.h" //TODO cross-platform #include #include #include #include #include #include #include #include #include namespace Arc { std::vector &split(const std::string &s, char delim, std::vector &elems) { std::stringstream ss(s); std::string item; while (std::getline(ss, item, delim)) { elems.push_back(item); } return elems; } std::vector split(const std::string &s, char delim) { std::vector elems; split(s, delim, elems); return elems; } /** Constructor. Pass name of directory containing job log files, * expiration time of files in seconds, list of URLs in case of * interactive mode. */ ReReporter::ReReporter(std::string archivedjob_log_dir_, std::string time_range_, std::vector urls_, std::vector topics_, std::string vo_filters_): logger(Arc::Logger::rootLogger, "JURA.ReReporter"), dest(NULL), archivedjob_log_dir(archivedjob_log_dir_), urls(urls_), topics(topics_), vo_filters(vo_filters_) { logger.msg(Arc::INFO, "Initialised, archived job log dir: %s", archivedjob_log_dir.c_str()); time_t currentTime = time(NULL); struct tm *aTime = localtime(¤tTime); start = new tm(); start->tm_year = aTime->tm_year + 1900; start->tm_mon = aTime->tm_mon + 1; start->tm_mday = 1; start->tm_hour = 0; start->tm_min = 0; end = new tm(); end->tm_year = start->tm_year; end->tm_mon = start->tm_mon; end->tm_mday = 31; end->tm_hour = 23; end->tm_min = 59; if (!time_range_.empty()) { logger.msg(Arc::VERBOSE, "Incoming time range: %s", time_range_); std::vector start_stop = split(time_range_, '-'); for (unsigned i=0; i date = split(element, '.'); switch(i) { case 0: if ( date.size() > 2 ) { start->tm_mday = atoi(date[2].c_str()); } if ( date.size() > 1 ) { start->tm_mon = atoi(date[1].c_str()); } if ( date.size() > 0 ) { start->tm_year = atoi(date[0].c_str()); } break; case 1: if ( date.size() > 2 ) { end->tm_mday = atoi(date[2].c_str()); } if ( date.size() > 1 ) { end->tm_mon = atoi(date[1].c_str()); } if ( date.size() > 0 ) { end->tm_year = atoi(date[0].c_str()); } break; } } } logger.msg(Arc::VERBOSE, "Requested time range: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d ", start->tm_year, start->tm_mon, start->tm_mday, end->tm_year, end->tm_mon, end->tm_mday, end->tm_hour, end->tm_min); //Collection of logging destinations: if (!urls.empty()) { logger.msg(Arc::VERBOSE, "Interactive mode."); std::string url=urls.at(0); std::string topic=topics.at(0); if ( !topic.empty() || url.substr(0,4) == "APEL"){ //dest = new ApelReReporter(url, topic); dest = new ApelDestination(url, topic); regexp = "usagerecordCAR.[0-9A-Za-z]+\\.[^.]+$"; }else{ dest = new LutsDestination(url, vo_filters_); regexp = "usagerecord.[0-9A-Za-z]+\\.[^.]+$"; } } } /** * Parse usage data and publish it via the appropriate destination adapter. */ int ReReporter::report() { //Collect job log file names from job log dir //(to know where to get usage data from) DIR *dirp = NULL; dirent *entp = NULL; errno=0; if ( (dirp=opendir(archivedjob_log_dir.c_str()))==NULL ) { logger.msg(Arc::ERROR, "Could not open log directory \"%s\": %s", archivedjob_log_dir.c_str(), StrError(errno) ); return -1; } // Seek "usagerecord[CAR].." files. Arc::RegularExpression logfilepattern(regexp); errno = 0; while ((entp = readdir(dirp)) != NULL) { if (logfilepattern.match(entp->d_name)) { //TODO handle DOS-style path separator! std::string fname=archivedjob_log_dir+"/"+entp->d_name; struct tm* clock; // create a time structure struct stat attrib; // create a file attribute structure stat(fname.c_str(), &attrib); // get the attributes of file clock = gmtime(&(attrib.st_mtime)); // Get the last modified time and put it into the time structure clock->tm_year+=1900; clock->tm_mon+=1; // checking it is in the given range if ( mktime(start) <= mktime(clock) && mktime(clock) <= mktime(end)) { if ( vo_filters != "") { // TODO: if we want } dest->report(fname); } } errno = 0; } closedir(dirp); if (errno!=0) { logger.msg(Arc::ERROR, "Error reading log directory \"%s\": %s", archivedjob_log_dir.c_str(), StrError(errno) ); return -2; } return 0; } ReReporter::~ReReporter() { delete dest; logger.msg(Arc::INFO, "Finished, job log dir: %s", archivedjob_log_dir.c_str()); } } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/ApelDestination.cpp0000644000000000000000000000012412536031203025346 xustar000000000000000027 mtime=1433940611.169376 27 atime=1513200576.326724 30 ctime=1513200663.243787159 nordugrid-arc-5.4.2/src/services/a-rex/jura/ApelDestination.cpp0000644000175000002070000002714412536031203025423 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "ApelDestination.h" #include #include "jura.h" #include #include #include #include namespace Arc { ApelDestination::ApelDestination(std::string url_, std::string topic_): logger(Arc::Logger::rootLogger, "JURA.ApelReReporter"), rereport(true), use_ssl("true"), urn(0), sequence(0), usagerecordset(Arc::NS("","http://eu-emi.eu/namespaces/2012/11/computerecord"), "UsageRecords") { init(url_.substr(5), topic_, "", "", "", ""); } ApelDestination::ApelDestination(JobLogFile& joblog): logger(Arc::Logger::rootLogger, "JURA.ApelDestination"), rereport(false), use_ssl("false"), urn(0), sequence(0), usagerecordset(Arc::NS("","http://eu-emi.eu/namespaces/2012/11/computerecord"), "UsageRecords") { init(joblog["loggerurl"].substr(5), joblog["topic"], joblog["outputdir"], joblog["certificate_path"], joblog["key_path"], joblog["ca_certificates_dir"]); //From jobreport_options: std::string urbatch=joblog["jobreport_option_urbatch"]; if (!urbatch.empty()) { std::istringstream is(urbatch); is>>max_ur_set_size; } std::string o_use_ssl=joblog["jobreport_option_use_ssl"]; std::transform(o_use_ssl.begin(), o_use_ssl.end(), o_use_ssl.begin(), tolower); if (o_use_ssl == "true") { use_ssl="true"; } } void ApelDestination::init(std::string serviceurl_,std::string topic_, std::string outputdir_, std::string cert_, std::string key_, std::string ca_) { //Get service URL, cert, key, CA path from job log file std::string serviceurl=serviceurl_; topic=topic_; output_dir=outputdir_; std::string certfile=cert_; std::string keyfile=key_; std::string cadir=ca_; // ...or get them from environment if (certfile.empty()) certfile=Arc::GetEnv("X509_USER_CERT"); if (keyfile.empty()) keyfile=Arc::GetEnv("X509_USER_KEY"); if (cadir.empty()) cadir=Arc::GetEnv("X509_CERT_DIR"); // ...or by default, use host cert, key, CA path if (certfile.empty()) certfile=JURA_DEFAULT_CERT_FILE; if (keyfile.empty()) keyfile=JURA_DEFAULT_KEY_FILE; if (cadir.empty()) cadir=JURA_DEFAULT_CA_DIR; cfg.AddCertificate(certfile); cfg.AddPrivateKey(keyfile); cfg.AddCADir(cadir); // Tokenize service URL std::string host, port, endpoint; if (serviceurl.empty()) { logger.msg(Arc::ERROR, "ServiceURL missing"); } else { Arc::URL url(serviceurl); service_url = url; if (url.Protocol()!="https") { logger.msg(Arc::ERROR, "Protocol is %s, should be https", url.Protocol()); } host=url.Host(); std::ostringstream os; os<UpdateAggregationRecord(usagerecord); } else { logger.msg(Arc::INFO,"Ignoring incomplete log file \"%s\"", joblog.getFilename().c_str()); joblog.remove(); } } if (urn==max_ur_set_size) // Batch is full. Submit and delete job log files. submit_batch(); } void ApelDestination::report(std::string &joblog) { //Create UR if can //Arc::XMLNode usagerecord(Arc::NS(), ""); Arc::XMLNode usagerecord; usagerecord.ReadFromFile(joblog); if (usagerecord) { usagerecordset.NewChild(usagerecord); ++urn; //aggregationManager->UpdateAggregationRecord(usagerecord); } else { logger.msg(Arc::INFO,"Ignoring incomplete log file \"%s\"", joblog.c_str()); } if (urn==max_ur_set_size) // Batch is full. Submit and delete job log files. submit_batch(); } void ApelDestination::finish() { if (urn>0) // Send the remaining URs and delete job log files. submit_batch(); if (!rereport) delete aggregationManager; } int ApelDestination::submit_batch() { std::string urstr; usagerecordset.GetDoc(urstr,false); logger.msg(Arc::INFO, "Logging UR set of %d URs.", urn); logger.msg(Arc::DEBUG, "UR set dump: %s", urstr.c_str()); // Communication with Apel server Arc::MCC_Status status=send_request(urstr); if (status.isOk()) { log_sent_ids(usagerecordset, urn, logger, "APEL"); if (!rereport) { // Save the modified aggregation records aggregationManager->save_records(); // Reported the new synch record aggregationManager->Reporting_records(); } // Delete log files for (std::list::iterator jp=joblogs.begin(); jp!=joblogs.end(); jp++ ) { (*jp).remove(); } clear(); return 0; } else // status.isnotOk { logger.msg(Arc::ERROR, "%s: %s", status.getOrigin().c_str(), status.getExplanation().c_str() ); clear(); return -1; } } Arc::MCC_Status ApelDestination::send_request(const std::string &urset) { //Filename generation std::string output_filename = Current_Time(); char chars[] = ".-+T:"; for (unsigned int i = 0; i < strlen(chars); ++i) { output_filename.erase ( std::remove(output_filename.begin(), output_filename.end(), chars[i]), output_filename.end()); } output_filename=output_filename.substr(0,14); if (!output_dir.empty()) { // local copy creation std::string output_path; output_path = output_dir; if (output_dir[output_dir.size()-1] != '/'){ output_path = output_dir + "/"; } output_path += output_filename; std::ifstream ifile(output_path.c_str()); if (ifile) { // The file exists, and create new filename sequence++; std::stringstream ss; ss << sequence; output_path += ss.str(); output_filename += ss.str(); } else { sequence=0; } ifile.close(); //Save all records into the output file. const char* filename(output_path.c_str()); std::ofstream outputfile; outputfile.open (filename); if (outputfile.is_open()) { outputfile << urset; outputfile.close(); logger.msg(Arc::DEBUG, "Backup file (%s) created.", output_filename); } else { return Arc::MCC_Status(Arc::PARSING_ERROR, "apelclient", std::string( "Error opening file: " )+ filename ); } } //Save all records into the default folder. std::string default_path = (std::string)JURA_DEFAULT_DIR_PREFIX + "/ssm/"; struct stat st; //directory check if (stat(default_path.c_str(), &st) != 0) { mkdir(default_path.c_str(), S_IRWXU); } //directory check (for host) std::string subdir = default_path + service_url.Host() + "/"; if (stat(subdir.c_str(), &st) != 0) { mkdir(subdir.c_str(), S_IRWXU); } //directory check subdir = subdir + "outgoing/"; default_path = subdir.substr(0,subdir.length()-1); if (stat(subdir.c_str(), &st) != 0) { mkdir(subdir.c_str(), S_IRWXU); } subdir = subdir + "00000000/"; if (stat(subdir.c_str(), &st) != 0) { mkdir(subdir.c_str(), S_IRWXU); } // create message file to the APEL client subdir += output_filename; const char* filename(subdir.c_str()); std::ofstream outputfile; outputfile.open (filename); if (outputfile.is_open()) { outputfile << urset; outputfile.close(); logger.msg(Arc::DEBUG, "APEL message file (%s) created.", output_filename); } else { return Arc::MCC_Status(Arc::PARSING_ERROR, "apelclient", std::string( "Error opening file: " )+ filename ); } int retval; //ssmsend " std::string command; std::vector ssm_pathes; std::string exec_cmd = "ssmsend"; //RedHat: /usr/libexec/arc/ssm_master ssm_pathes.push_back("/usr/libexec/arc/"+exec_cmd); ssm_pathes.push_back("/usr/local/libexec/arc/"+exec_cmd); // Ubuntu/Debian: /usr/lib/arc/ssm_master ssm_pathes.push_back("/usr/lib/arc/"+exec_cmd); ssm_pathes.push_back("/usr/local/lib/arc/"+exec_cmd); // If you don't use non-standard prefix for a compilation you will // use this extra location. std::ostringstream prefix; prefix << INSTPREFIX << "/" << PKGLIBEXECSUBDIR << "/"; ssm_pathes.push_back(prefix.str()+exec_cmd); // Find the location of the ssm_master std::string ssm_command = "./ssm/"+exec_cmd; for (int i=0; i<(int)ssm_pathes.size(); i++) { std::ifstream ssmfile(ssm_pathes[i].c_str()); if (ssmfile) { // The file exists, ssm_command = ssm_pathes[i]; ssmfile.close(); break; } } command = ssm_command; command += " " + service_url.Host(); //host std::stringstream port; port << service_url.Port(); command += " " + port.str(); //port command += " " + topic; //topic command += " " + cfg.key; //certificate key command += " " + cfg.cert; //certificate command += " " + cfg.cadir; //cadir command += " " + default_path; //messages path command += " " + use_ssl; //use_ssl command += ""; retval = system(command.c_str()); logger.msg(Arc::DEBUG, "system retval: %d", retval); if (retval == 0) { return Arc::MCC_Status(Arc::STATUS_OK, "apelclient", "APEL message sent."); } else { return Arc::MCC_Status(Arc::GENERIC_ERROR, "apelclient", "Some error has during the APEL message sending. \ See SSM log (/var/spool/arc/ssm/ssmsend.log) for more details."); } } void ApelDestination::clear() { urn=0; joblogs.clear(); usagerecordset.Replace( Arc::XMLNode(Arc::NS("", "http://eu-emi.eu/namespaces/2012/11/computerecord" ), "UsageRecords") ); } ApelDestination::~ApelDestination() { finish(); } } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/LutsDestination.h0000644000000000000000000000012412534012676025074 xustar000000000000000027 mtime=1433408958.605316 27 atime=1513200576.349724 30 ctime=1513200663.242787146 nordugrid-arc-5.4.2/src/services/a-rex/jura/LutsDestination.h0000644000175000002070000000307512534012676025146 0ustar00mockbuildmock00000000000000#ifndef LUTSDESTINATION_H #define LUTSDESTINATION_H #include "Destination.h" #include "JobLogFile.h" #include #include #include #include #include #include #include #include #include namespace Arc { /** Reporting destination adapter for SGAS LUTS. */ class LutsDestination:public Destination { private: Arc::Logger logger; Arc::MCCConfig cfg; Arc::URL service_url; /** Max number of URs to put in a set before submitting it */ int max_ur_set_size; /** Actual number of usage records in set */ int urn; /** List of copies of job logs */ std::list joblogs; /** Usage Record set XML */ Arc::XMLNode usagerecordset; void init(std::string serviceurl_, std::string cert_, std::string key_, std::string ca_); int submit_batch(); Arc::MCC_Status send_request(const std::string &urset); void clear(); public: /** Constructor. Service URL and LUTS-related parameters (e.g. UR * batch size) are extracted from the given job log file. */ LutsDestination(JobLogFile& joblog); LutsDestination(std::string url_, std::string vo_filter_); /** Generates record from job log file content, collects it into the * UR batch, and if batch is full, submits it to the service. */ void report(JobLogFile& joblog); void report(std::string& joblog); void finish(); ~LutsDestination(); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/Destination.h0000644000000000000000000000012412536031137024217 xustar000000000000000027 mtime=1433940575.569596 27 atime=1513200576.339724 30 ctime=1513200663.241787134 nordugrid-arc-5.4.2/src/services/a-rex/jura/Destination.h0000644000175000002070000000234612536031137024271 0ustar00mockbuildmock00000000000000#ifndef DESTINATION_H #define DESTINATION_H #include "JobLogFile.h" #include #include namespace Arc { /** Abstract class to represent a reporting destination. * Specific destination types are represented by inherited classes. */ class Destination { public: /** Reports the job log file content to the destination. */ virtual void report(Arc::JobLogFile &joblog)=0; /** Reports the archived job log file content to the destination. */ virtual void report(std::string &joblog) {}; /** Finishes pending submission of records. */ virtual void finish() {}; virtual ~Destination() {} /** Creates an instance of the inherited class corresponding to * the destination for the given job log file. */ static Destination* createDestination(Arc::JobLogFile &joblog); std::string Current_Time( time_t parameter_time = time(NULL) ); Arc::MCC_Status OutputFileGeneration(std::string prefix, Arc::URL url, std::string output_dir, std::string message,Arc::Logger& logger); /** Logged the sent jobIds */ void log_sent_ids(Arc::XMLNode usagerecordset, int nr_of_records, Arc::Logger &logger,std::string type=""); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/UsageReporter.cpp0000644000000000000000000000012112245341577025066 xustar000000000000000027 mtime=1385546623.754218 27 atime=1513200576.339724 27 ctime=1513200663.230787 nordugrid-arc-5.4.2/src/services/a-rex/jura/UsageReporter.cpp0000644000175000002070000001522212245341577025140 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "jura.h" #include "UsageReporter.h" #include "JobLogFile.h" //TODO cross-platform #include #include #include #include #include #include #include namespace Arc { /** Constructor. Pass name of directory containing job log files, * expiration time of files in seconds, list of URLs in case of * interactive mode. */ UsageReporter::UsageReporter(std::string job_log_dir_, time_t expiration_time_, std::vector urls_, std::vector topics_, std::string vo_filters_, std::string out_dir_): logger(Arc::Logger::rootLogger, "JURA.UsageReporter"), dests(NULL), job_log_dir(job_log_dir_), expiration_time(expiration_time_), urls(urls_), topics(topics_), vo_filters(vo_filters_), out_dir(out_dir_) { logger.msg(Arc::INFO, "Initialised, job log dir: %s", job_log_dir.c_str()); logger.msg(Arc::VERBOSE, "Expiration time: %d seconds", expiration_time); if (!urls.empty()) { logger.msg(Arc::VERBOSE, "Interactive mode.", expiration_time); } //Collection of logging destinations: dests=new Arc::Destinations(); } /** * Parse usage data and publish it via the appropriate destination adapter. */ int UsageReporter::report() { //ngjobid->url mapping to keep track of which loggerurl is replaced //by the '-u' options std::map dest_to_duplicate; //Collect job log file names from job log dir //(to know where to get usage data from) DIR *dirp = NULL; dirent *entp = NULL; errno=0; if ( (dirp=opendir(job_log_dir.c_str()))==NULL ) { logger.msg(Arc::ERROR, "Could not open log directory \"%s\": %s", job_log_dir.c_str(), StrError(errno) ); return -1; } DIR *odirp = NULL; errno=0; if ( !out_dir.empty() && (odirp=opendir(out_dir.c_str()))==NULL ) { logger.msg(Arc::ERROR, "Could not open output directory \"%s\": %s", out_dir.c_str(), StrError(errno) ); closedir(dirp); return -1; } if (odirp != NULL) { closedir(odirp); } // Seek "." files. Arc::RegularExpression logfilepattern("^[0-9A-Za-z]+\\.[^.]+$"); errno = 0; while ((entp = readdir(dirp)) != NULL) { if (logfilepattern.match(entp->d_name)) { //Parse log file Arc::JobLogFile *logfile; //TODO handle DOS-style path separator! std::string fname=job_log_dir+"/"+entp->d_name; logfile=new Arc::JobLogFile(fname); if (!out_dir.empty()) { if ((*logfile)["jobreport_option_archiving"] == "") { (*logfile)["jobreport_option_archiving"] = out_dir; } else { (*logfile)["outputdir"] = out_dir; } } if ( vo_filters != "") { (*logfile)["vo_filters"] = vo_filters; } //A. Non-interactive mode: each jlf is parsed, and if valid, // submitted to the destination given by "loggerurl=..." if (urls.empty()) { // Check creation time and remove it if really too old if( expiration_time>0 && logfile->olderThan(expiration_time) ) { logger.msg(Arc::INFO, "Removing outdated job log file %s", logfile->getFilename().c_str() ); logfile->remove(); } else { // Set topic option if it is needed if ( (*logfile)["loggerurl"].substr(0,4) == "APEL" ) { (*logfile)["topic"] = (*logfile)["jobreport_option_topic"]; } //Pass job log file content to the appropriate //logging destination dests->report(*logfile); //(deep copy performed) } } //B. Interactive mode: submit only to services specified by // command line option '-u'. Avoid repetition if several jlfs // are created with same content and different destination. // Keep all jlfs on disk. else { if ( dest_to_duplicate.find( (*logfile)["ngjobid"] ) == dest_to_duplicate.end() ) { dest_to_duplicate[ (*logfile)["ngjobid"] ]= (*logfile)["loggerurl"]; } //submit only 1x to each! if ( dest_to_duplicate[ (*logfile)["ngjobid"] ] == (*logfile)["loggerurl"] ) { //Duplicate content of log file, overwriting URL with //each '-u' command line option, disabling file deletion Arc::JobLogFile *dupl_logfile= new Arc::JobLogFile(*logfile); dupl_logfile->allowRemove(false); for (int it=0; it<(int)urls.size(); it++) { (*dupl_logfile)["loggerurl"] = urls[it]; if (!topics[it].empty()) { (*dupl_logfile)["topic"] = topics[it]; } //Pass duplicated job log content to the appropriate //logging destination dests->report(*dupl_logfile); //(deep copy performed) } delete dupl_logfile; } } delete logfile; } errno = 0; } closedir(dirp); if (errno!=0) { logger.msg(Arc::ERROR, "Error reading log directory \"%s\": %s", job_log_dir.c_str(), StrError(errno) ); return -2; } return 0; } UsageReporter::~UsageReporter() { delete dests; logger.msg(Arc::INFO, "Finished, job log dir: %s", job_log_dir.c_str()); } } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/jura.cpp0000644000000000000000000000012412540540374023235 xustar000000000000000027 mtime=1434632444.510699 27 atime=1513200576.350724 30 ctime=1513200663.227786963 nordugrid-arc-5.4.2/src/services/a-rex/jura/jura.cpp0000644000175000002070000001072412540540374023306 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "jura.h" #include //TODO cross-platform #include #include #include //TODO cross-platform #include #include #include #include #include #ifdef WIN32 #include #endif #include "Reporter.h" #include "UsageReporter.h" #include "ReReporter.h" #include "CARAggregation.h" int main(int argc, char **argv) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::rootLogger.addDestination(logcerr); opterr=0; time_t ex_period = 0; std::vector urls; std::vector topics; std::string output_dir; bool aggregation = false; bool sync = false; bool force_resend = false; bool ur_resend = false; std::string resend_range = ""; std::string year = ""; std::string month = ""; std::string vo_filters=""; int n; while((n=getopt(argc,argv,":E:u:t:o:y:F:m:r:afsvL")) != -1) { switch(n) { case ':': { std::cerr<<"Missing argument\n"; return 1; } case '?': { std::cerr<<"Unrecognized option\n"; return 1; } case 'E': { char* p; int i = strtol(optarg,&p,10); if(((*p) != 0) || (i<=0)) { std::cerr<<"Improper expiration period '"<Reporting_records(year, month); } else { aggr->Reporting_records(force_resend); } delete aggr; } return 0; } // The essence: int argind; Arc::Reporter *usagereporter; for (argind=optind ; argindreport(); delete usagereporter; } return 0; } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/jura.1.in0000644000000000000000000000012712540540533023220 xustar000000000000000027 mtime=1434632539.044554 30 atime=1513200650.171627281 30 ctime=1513200663.226786951 nordugrid-arc-5.4.2/src/services/a-rex/jura/jura.1.in0000644000175000002070000000731412540540533023267 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH JURA 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME jura \- Job Usage Reporter of ARC .SH DESCRIPTION The .B jura application is a resource-side accounting (usage logging) client. .SH SYNOPSIS jura [\fB-E\fR \fIexp_time\fR] [\fB-L\fR] [\fB-u\fR \fIurl\fR [\fB-u\fR \fIurl\fR \fB-t\fR \fItopic\fR \fB-u\fR \fIurl\fR ...] ] [\fB-r\fR \fItime_range\fR] \fIctrl_dir\fR [\fIctrl_dir\fR ...] .SH OPTIONS .IP "\fB-E\fR" expiration time (validity length) of job log files in days .IP "\fB-u\fR" logging destination URL .IP "\fB-t\fR" topic for a destination URL .IP "\fB-o\fR" archived accounting records directory .IP "\fB-L\fR" turns on timestamps and logging level in messages printed during execution .IP "\fB-r\fR" time range of archived messages that will be resend .IP "\fB-v\fR" show version of Jura (ARC) .LP .SH ARGUMENTS .IP "\fBctrl_dir\fR" The A-REX control directory for a mapped local UNIX user .LP .SH EXTENDED DESCRIPTION Normally, \fBjura\fR is invoked by A-REX periodically with the appropriate command line arguments, but it can also be executed interactively. Job log files, i.e. files produced by A-REX for each job and stored under \fI/logs\fR contain various usage data and configuration options for accounting (see NORDUGRID-TECH-24 for details of their content). These files are parsed by \fBjura\fR, converted into UR format, and submitted in batches to reporting destinations. A destination is a resource usage logging service; currently SGAS LUTS and CAR 1.2 message propagation are supported. Job log files older than the expiration time specified with the \fB-E\fR option are deleted without parsing. If the \fB-u\fR option is absent, URs are submitted to the destination URLs specified in the job log files. If submitting succeeds, the corresponding job log files are deleted. This is the default, automatic mode of operation. If destination URLs are specified with the \fB-u\fR option, URs are submitted only to these destinations (one UR per job to each destination). In this case, job log files are preserved even if submitting succeeds. If the \fB-r\fR option is absent, \fI\fR parameter be using as location of archived files, URs from the given directory are submitted to the destination URLs specified with \fB-u\fR option in the CLI. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_CERT The location of the certificate of the reporting entity, used only if not specified in job log files. The default is the standard host certificate location, \fI/etc/grid-security/hostcert.pem\fR. .TP .B X509_USER_KEY The location of the private key of the reporting entity, used only if not specified in job log files. The default is the standard host key location, \fI/etc/grid-security/hostkey.pem\fR. .TP .B X509_CERT_DIR The path to the certificates of trusted CAs, used only if not specified in job log files. The default is the standard certificates directory, \fI/etc/grid-security/certificates\fR. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/LutsDestination.cpp0000644000000000000000000000012412536031137025422 xustar000000000000000027 mtime=1433940575.569596 27 atime=1513200576.347724 30 ctime=1513200663.241787134 nordugrid-arc-5.4.2/src/services/a-rex/jura/LutsDestination.cpp0000644000175000002070000001621112536031137025470 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "LutsDestination.h" #include "jura.h" #include #include #include namespace Arc { LutsDestination::LutsDestination(std::string url_, std::string vo_filter_): logger(Arc::Logger::rootLogger, "JURA.LutsReReporter"), urn(0), usagerecordset(Arc::NS("","http://schema.ogf.org/urf/2003/09/urf"), "UsageRecords") { init(url_,"","",""); } LutsDestination::LutsDestination(JobLogFile& joblog): logger(Arc::Logger::rootLogger, "JURA.LutsDestination"), urn(0), usagerecordset(Arc::NS("","http://schema.ogf.org/urf/2003/09/urf"), "UsageRecords") { init(joblog["loggerurl"], joblog["certificate_path"], joblog["key_path"], joblog["ca_certificates_dir"]); //From jobreport_options: std::string urbatch=joblog["jobreport_option_urbatch"]; if (!urbatch.empty()) { std::istringstream is(urbatch); is>>max_ur_set_size; } } void LutsDestination::init(std::string serviceurl_, std::string cert_, std::string key_, std::string ca_) { //Get service URL, cert, key, CA path from job log file std::string serviceurl=serviceurl_; std::string certfile=cert_; std::string keyfile=key_; std::string cadir=ca_; // ...or get them from environment if (certfile.empty()) certfile=Arc::GetEnv("X509_USER_CERT"); if (keyfile.empty()) keyfile=Arc::GetEnv("X509_USER_KEY"); if (cadir.empty()) cadir=Arc::GetEnv("X509_CERT_DIR"); // ...or by default, use host cert, key, CA path if (certfile.empty()) certfile=JURA_DEFAULT_CERT_FILE; if (keyfile.empty()) keyfile=JURA_DEFAULT_KEY_FILE; if (cadir.empty()) cadir=JURA_DEFAULT_CA_DIR; cfg.AddCertificate(certfile); cfg.AddPrivateKey(keyfile); cfg.AddCADir(cadir); // Tokenize service URL std::string host, port, endpoint; if (serviceurl.empty()) { logger.msg(Arc::ERROR, "ServiceURL missing"); } else { Arc::URL url(serviceurl); // URL path checking if (url.Path().length() > 3 && url.Path().substr(url.Path().length()-3) != "/ur"){ if (url.Path().substr(url.Path().length()-1) == "/"){ url.ChangePath(url.Path()+"ur"); } else { url.ChangePath(url.Path()+"/ur"); } } service_url = url; if (url.Protocol()!="https") { logger.msg(Arc::ERROR, "Protocol is %s, should be https", url.Protocol()); } host=url.Host(); std::ostringstream os; os<0) // Send the remaining URs and delete job log files. submit_batch(); } int LutsDestination::submit_batch() { std::string urstr; usagerecordset.GetDoc(urstr,false); logger.msg(Arc::INFO, "Logging UR set of %d URs.", urn); logger.msg(Arc::DEBUG, "UR set dump: %s", urstr.c_str()); // Communication with LUTS server Arc::MCC_Status status=send_request(urstr); if (status.isOk()) { log_sent_ids(usagerecordset, urn, logger); // Delete log files for (std::list::iterator jp=joblogs.begin(); jp!=joblogs.end(); jp++ ) { (*jp).remove(); } clear(); return 0; } else // status.isnotOk { logger.msg(Arc::ERROR, "%s: %s", status.getOrigin().c_str(), status.getExplanation().c_str() ); clear(); return -1; } } Arc::MCC_Status LutsDestination::send_request(const std::string &urset) { ClientHTTP httpclient(cfg, service_url); //TODO: Absolute or relative url was in the configuration? httpclient.RelativeURI(true); PayloadRaw http_request; PayloadRawInterface *http_response = NULL; HTTPClientInfo http_info; std::multimap http_attributes; Arc::MCC_Status status; //Add the message into the request http_request.Insert(urset.c_str()); try { //Send status=httpclient.process("POST", http_attributes, &http_request, &http_info, &http_response); if (status){ // When Chain(s) configuration not failed logger.msg(Arc::DEBUG, "UsageRecords registration response: %s", http_response->Content()); } } catch (std::exception&) {} if (http_response==NULL) { //Unintelligible non-HTTP response return Arc::MCC_Status(Arc::PROTOCOL_RECOGNIZED_ERROR, "lutsclient", "Response not HTTP"); } if (status && ((std::string)http_response->Content()).substr(0,1) != "{" ) { // Status OK, but some error std::string httpfault; httpfault = http_response->Content(); delete http_response; return Arc::MCC_Status(Arc::PARSING_ERROR, "lutsclient", std::string( "Response from the server: " )+ httpfault ); } delete http_response; return status; } void LutsDestination::clear() { urn=0; joblogs.clear(); usagerecordset.Replace( Arc::XMLNode(Arc::NS("", "http://schema.ogf.org/urf/2003/09/urf" ), "UsageRecords") ); } LutsDestination::~LutsDestination() { finish(); } } nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/jura.h0000644000000000000000000000012412213610456022676 xustar000000000000000027 mtime=1378816302.521766 27 atime=1513200576.351724 30 ctime=1513200663.229786987 nordugrid-arc-5.4.2/src/services/a-rex/jura/jura.h0000644000175000002070000000103312213610456022740 0ustar00mockbuildmock00000000000000#ifndef _JURA_H #define _JURA_H //Default values for configuration: #define JURA_DEFAULT_JOBLOG_DIR "/tmp/jobstatus/logs" #define JURA_DEFAULT_DIR_PREFIX "/var/spool/arc" #define JURA_DEFAULT_MAX_UR_SET_SIZE 50 //just like in original JARM #define JURA_DEFAULT_MAX_APEL_UR_SET_SIZE 1000 #define JURA_DEFAULT_KEY_FILE "/etc/grid-security/hostkey.pem" #define JURA_DEFAULT_CERT_FILE "/etc/grid-security/hostcert.pem" #define JURA_DEFAULT_CA_DIR "/etc/grid-security/certificates" #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/ReReporter.h0000644000000000000000000000012412534012676024034 xustar000000000000000027 mtime=1433408958.605316 27 atime=1513200576.349724 30 ctime=1513200663.234787049 nordugrid-arc-5.4.2/src/services/a-rex/jura/ReReporter.h0000644000175000002070000000247312534012676024107 0ustar00mockbuildmock00000000000000#ifndef _REREPORTER_H #define _REREPORTER_H #include #include #include #include #ifdef WIN32 #include #endif #include "Reporter.h" #include "Destination.h" namespace Arc { /** The class for main JURA functionality. Traverses the 'logs' dir * of the given control directory, and reports usage data extracted from * job log files within. */ class ReReporter:public Reporter { private: Arc::Logger logger; Arc::Destination *dest; /** Directory where A-REX puts archived job logs */ std::string archivedjob_log_dir; struct tm* start; struct tm* end; std::vector urls; std::vector topics; std::string vo_filters; std::string regexp; public: /** Constructor. Gets the job log dir and the expiration time in seconds. * Default expiration time is infinity (represented by zero value). */ ReReporter(std::string archivedjob_log_dir_, std::string time_range_="", std::vector urls_=std::vector(), std::vector topics_=std::vector(), std::string vo_filters_=""); /** Processes job log files in '/logs'. */ int report(); ~ReReporter(); }; } #endif nordugrid-arc-5.4.2/src/services/a-rex/jura/PaxHeaders.7502/README0000644000000000000000000000012412101513125022433 xustar000000000000000027 mtime=1359386197.658718 27 atime=1513200576.375724 30 ctime=1513200663.224786926 nordugrid-arc-5.4.2/src/services/a-rex/jura/README0000644000175000002070000000014712101513125022502 0ustar00mockbuildmock00000000000000JURA: A-REX compatible accounting agent for SGAS-LUTS and/or APEL and able to generate CAR 1.2 output. nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/get_activity_documents.cpp0000644000000000000000000000012411425654673026120 xustar000000000000000027 mtime=1280793019.369276 27 atime=1513200576.302723 30 ctime=1513200662.686780346 nordugrid-arc-5.4.2/src/services/a-rex/get_activity_documents.cpp0000644000175000002070000000502611425654673026170 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::GetActivityDocuments(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { /* GetActivityDocuments ActivityIdentifier (wsa:EndpointReferenceType, unbounded) GetActivityDocumentsResponse Response (unbounded) ActivityIdentifier JobDefinition (jsdl:JobDefinition) Fault (soap:Fault) UnknownActivityIdentifierFault */ { std::string s; in.GetXML(s); logger.msg(Arc::VERBOSE, "GetActivityDocuments: request = \n%s", s); }; for(int n = 0;;++n) { Arc::XMLNode id = in["ActivityIdentifier"][n]; if(!id) break; // Create place for response Arc::XMLNode resp = out.NewChild("bes-factory:Response"); resp.NewChild(id); std::string jobid = Arc::WSAEndpointReference(id).ReferenceParameters()["a-rex:JobID"]; if(jobid.empty()) { // EPR is wrongly formated or not an A-REX EPR logger_.msg(Arc::ERROR, "GetActivityDocuments: non-AREX job requested"); Arc::SOAPFault fault(resp,Arc::SOAPFault::Sender,"Missing a-rex:JobID in ActivityIdentifier"); UnknownActivityIdentifierFault(fault,"Unrecognized EPR in ActivityIdentifier"); continue; }; // Look for obtained ID ARexJob job(jobid,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "GetActivityDocuments: job %s - %s", jobid, job.Failure()); Arc::SOAPFault fault(resp,Arc::SOAPFault::Sender,"No corresponding activity found"); UnknownActivityIdentifierFault(fault,("No activity "+jobid+" found: "+job.Failure()).c_str()); continue; }; /* // TODO: Check permissions on that ID */ // Read JSDL of job Arc::XMLNode jsdl = resp.NewChild("bes-factory:JobDefinition"); if(!job.GetDescription(jsdl)) { logger_.msg(Arc::ERROR, "GetActivityDocuments: job %s - %s", jobid, job.Failure()); // Processing failure jsdl.Destroy(); Arc::SOAPFault fault(resp,Arc::SOAPFault::Sender,"Failed processing activity"); UnknownActivityIdentifierFault(fault,("Failed processing activity "+jobid+": "+job.Failure()).c_str()); continue; }; jsdl.Name("bes-factory:JobDefinition"); // Recovering namespace of element }; { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "GetActivityDocuments: response = \n%s", s); }; return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/schema0000644000000000000000000000013213214316027022004 xustar000000000000000030 mtime=1513200663.540790791 30 atime=1513200668.718854121 30 ctime=1513200663.540790791 nordugrid-arc-5.4.2/src/services/a-rex/schema/0000755000175000002070000000000013214316027022127 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611255712574024132 xustar000000000000000027 mtime=1253545340.990206 30 atime=1513200603.446055809 29 ctime=1513200663.53579073 nordugrid-arc-5.4.2/src/services/a-rex/schema/Makefile.am0000644000175000002070000000020511255712574024172 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = a-rex.xsd a-rex_infoprovider.xsd a-rex_lrms.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/services/a-rex/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315733024131 xustar000000000000000030 mtime=1513200603.481056237 30 atime=1513200650.853635622 30 ctime=1513200663.536790742 nordugrid-arc-5.4.2/src/services/a-rex/schema/Makefile.in0000644000175000002070000004356213214315733024211 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = a-rex.xsd a-rex_infoprovider.xsd a-rex_lrms.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/schema/PaxHeaders.7502/a-rex_lrms.xsd0000644000000000000000000000012412144153067024657 xustar000000000000000027 mtime=1368446519.129163 27 atime=1513200576.303723 30 ctime=1513200663.540790791 nordugrid-arc-5.4.2/src/services/a-rex/schema/a-rex_lrms.xsd0000644000175000002070000001206012144153067024723 0ustar00mockbuildmock00000000000000 The path to qstat, pbsnodes, qmgr etc PBS binaries. No need to set unless PBS is used. The path of the PBS server logfiles which are used by the GM to determine whether a PBS job is completed. If not specified, GM will use qstat for that. nordugrid-arc-5.4.2/src/services/a-rex/schema/PaxHeaders.7502/a-rex_infoprovider.xsd0000644000000000000000000000012411523070764026412 xustar000000000000000027 mtime=1296855540.978124 27 atime=1513200576.325724 30 ctime=1513200663.539790779 nordugrid-arc-5.4.2/src/services/a-rex/schema/a-rex_infoprovider.xsd0000644000175000002070000002423011523070764026460 0ustar00mockbuildmock00000000000000 This element can be used to specify benchmark results on the ExecutionEnvironment level. It should contain the name of the benchmark and the benchmark score separated by a space. Please use one of standard benchmark names given below if applicable: bogomips - BogoMips cfp2006 - SPEC CFP 2006 floating point benchmark cint2006 - SPEC CINT 2006 integer benchmark linpack - LINPACK benchmark specfp2000 - SPECfp2000 floating point benchmark specint2000 - SPECint2000 integer benchmark This element is used by the infoprovider to determine which nodes are included in an ExecutionEnvironment. This element represents a group of identical compute nodes in the cluster. nordugrid-arc-5.4.2/src/services/a-rex/schema/PaxHeaders.7502/a-rex.xsd0000644000000000000000000000012412453512571023624 xustar000000000000000027 mtime=1420727673.764329 27 atime=1513200576.304723 30 ctime=1513200663.537790754 nordugrid-arc-5.4.2/src/services/a-rex/schema/a-rex.xsd0000644000175000002070000007305512453512571023703 0ustar00mockbuildmock00000000000000 This element defines URL of A-REX service as seen from outside. This element defines path to arc0 Grid Manager configuration file. If present values in that file will overwrite those defined as siblings of this element. If string is empty its value is /etc/arc.conf. This element defines how grid-manager part of A-REX is run. * internal - as a thread inside service container. * none - no grid-manager is run. * external - as a separate executable (not supported anymore). Default is 'internal'. Defines parameters for mapping Grid user identity to local account. Currently only default account name can be specified. Defines parameters for controlling LRMS specific and LRMS related functionality. * type - name of LRMS/batch system * defaultShare - optional name of default ComputingShare * sharedFilesystem - either session directory is shared with computing node * sharedScratch - the path where the frontend can access cross-mounted scratch directories of nodes, if applicable * GNUTimeUtility - location and name of GNU time executable * any accommodates numerous LRMS configuration parameters * pbs_bin_path="/usr/bin" * pbs_log_path="/var/spool/pbs/server_logs" * condor_bin_path="/opt/condor/bin" * condor_config="/opt/condor/etc/condor_config" * condor_rank="(1-LoadAvg/2)*(1-LoadAvg/2)*Memory/1000*KFlops/1000000" * slurm_bin_path="/usr/bin" * sge_bin_path="/opt/n1ge6/bin/lx24-x86" * sge_root="/opt/n1ge6" * sge_cell="default" * sge_execd_port="537" * lsf_bin_path="/usr/local/lsf/bin/" * lsf_profile_path="/usr/share/lsf/conf" * ll_bin_path="/opt/ibmll/LoadL/full/bin" * ll_consumable_resources="yes" Defines parameters influencing load imposed on gateway computer. Unless specified missing element means do not limit. * maxJobsTracked - jobs which are not in FINISHED state (jobs tracked in RAM) * maxJobsRun - jobs being run (SUBMITTING, INLRMS states) * maxJobsTotal - jobs in any state * maxJobsPerDN - maximum jobs in the system per user DN * maxJobsTransferred - jobs being processed on frontend (PREPARING, FINISHING states) * maxJobsTransferredAdditional - additional reserved number of jobs being processed on frontend * maxFilesTransferred - number of files being transferred simultaneously by jobs in PREPARING and FINISHING states. Value is per job. * maxLoadShare - sharing mechanism for data transfer - the maximum number of processes that can run per transfer share * loadShareType - sharing mechanism for data transfer - the scheme used to assign jobs to transfer shares. Possible values are "dn", "voms:vo", "voms:role" and "voms:group" * shareLimit - specifies a transfer share that has a limit different from the default value in maxLoadShare * name - the name of the share. Examples for different sharing mechanisms: - dn : /O=Grid/O=NorduGrid/OU=domainname.com/CN=Jane Doe - voms:vo : voname - voms:role : voname:rolename - voms:group : /voname/groupname * limit - the maximum number of processes that can run for this particular share * wakeupPeriod - specifies how often cheks for new jobs arrived, job state change requests, etc are done. That is resposivity of the service. The period is in seconds. Default is 3 minutes. Usually this element is not needed. Parameters related to cache functionality. Multiple caches may be specified. Cached data will be distributed evenly over the caches. If none such element is present caching is disabled. * location - path to a directory to store cached data. Multiple cache directories may be specified by specifying multiple location elements. Cached data will be distributed evenly over the caches. * remotelocation - path to a cache which is managed by another grid-manager. * link - optional path at which the location is accessible on computing nodes, if it is different from the path on the service host. * highWatermark, lowWatermark - specify high and low watermarks for space used by cache, as a percentage of the space on the file system on which the cache directory is located. When the max is exceeded, files will be deleted to bring the used space down to the min level. It is a good idea to have the cache on its own separate file system. To turn off this feature those elements must be absent. * cacheLogFile - the file where messages from cache cleaning are logged. * cacheLogLevel - the log level used by the cache cleaning script. * cacheLifetime - the lifetime of cache files Parameters for new data staging framework: * maxDelivery: maximum number of files in physical transfer * maxProcessor: maximum number of files in each pre or post transfer stage * maxEmergency: maximum number of files which can use emergency slots when regular slots are full * maxPrepared: maximum number of files in prepared state * shareType: transfer shares type * definedShare: share with a defined priority * name: share name * priority: share priority * deliveryService: remote data delivery service endpoint * localDelivery: whether to use local delivery as well as remote * remoteSizeLimit: Lower limit on file size (in bytes) under which transfers always use local delivery * useHostCert: whether to use host certificate for communication with remote delivery services * dtrLog: path to location where DTR state is periodically dumped Specifies how service prepares its control and session directories at startup. yes - directories are created and they ownership and permissions adjusted missing - directories are created and only for those which are created ownership and permission are adjusted no - nothing is created and adjusted This optional parameter can be used to enable publishing of additional information to ISIS. The default is not to publish ("no"). The information, which is considered in some degree to be static, includes HealthState, OSFamily, Platform, PhysicalCPUs, CPUMultiplicity, CPUModel and ApplicationEnvironment. This optional parameter can be used to disable ARC (BES based) job management interface. By default it is enabled. This optional parameter can be used to enable EMI ES job management interface. By default it is disabled. CommonName attribute of bes-factory. LongDescription attribute of bes-factory. Name of Local Resource Management System. Name of Operating System. The values are based on the OSType field of the CIM_OperatingSystem model: http://www.dmtf.org/standards/cim/cim_schema_v29 Some examples of valid choices: LINUX, MACOS, Solaris, Windows 2000 The GLUE2 infoprovider wake up period time in second The information interface (LIDI) max number of simultaneous clients. Default is 10. The max number of simultaneous clients performing job management operations (extended BES). Default is 100. The max number of simultaneous clients performing HTTP PUT and GET operations. Default is 100. Options for the A-REX information provider nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/delegation0000644000000000000000000000013213214316026022656 xustar000000000000000030 mtime=1513200662.752781153 30 atime=1513200668.718854121 30 ctime=1513200662.752781153 nordugrid-arc-5.4.2/src/services/a-rex/delegation/0000755000175000002070000000000013214316026023001 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712715412562025002 xustar000000000000000027 mtime=1463162226.429695 30 atime=1513200601.791035567 30 ctime=1513200662.740781007 nordugrid-arc-5.4.2/src/services/a-rex/delegation/Makefile.am0000644000175000002070000000114512715412562025045 0ustar00mockbuildmock00000000000000if SQLITE_ENABLED FRSQLITE_SOURCES = FileRecordSQLite.cpp FileRecordSQLite.h else FRSQLITE_SOURCES = endif noinst_LTLIBRARIES = libdelegation.la libdelegation_la_SOURCES = \ uid.cpp FileRecord.cpp FileRecordBDB.cpp DelegationStore.cpp DelegationStores.cpp \ uid.h FileRecord.h FileRecordBDB.h DelegationStore.h DelegationStores.h \ $(FRSQLITE_SOURCES) libdelegation_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(DBCXX_CPPFLAGS) $(SQLITE_CFLAGS) $(AM_CXXFLAGS) libdelegation_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la $(DBCXX_LIBS) $(SQLITE_LIBS) nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/FileRecordBDB.cpp0000644000000000000000000000012312754411443025774 xustar000000000000000026 mtime=1471288099.46513 27 atime=1513200576.481726 30 ctime=1513200662.744781056 nordugrid-arc-5.4.2/src/services/a-rex/delegation/FileRecordBDB.cpp0000644000175000002070000004673312754411443026057 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "uid.h" #include "FileRecordBDB.h" namespace ARex { #define FR_DB_NAME "list" static void db_env_clean(const std::string& base) { try { Glib::Dir dir(base); std::string name; while ((name = dir.read_name()) != "") { std::string fullpath(base); fullpath += G_DIR_SEPARATOR_S + name; struct stat st; if (::lstat(fullpath.c_str(), &st) == 0) { if(!S_ISDIR(st.st_mode)) { if(name != FR_DB_NAME) { Arc::FileDelete(fullpath.c_str()); }; }; }; }; } catch(Glib::FileError& e) { }; } bool FileRecordBDB::dberr(const char* s, int err) { if(err == 0) return true; error_num_ = err; error_str_ = std::string(s)+": "+DbEnv::strerror(err); return false; } FileRecordBDB::FileRecordBDB(const std::string& base, bool create): FileRecord(base, create), db_rec_(NULL), db_lock_(NULL), db_locked_(NULL), db_link_(NULL) { valid_ = open(create); } bool FileRecordBDB::verify(void) { // Performing various kinds of verifications std::string dbpath = basepath_ + G_DIR_SEPARATOR_S + FR_DB_NAME; { Db db_test(NULL,DB_CXX_NO_EXCEPTIONS); if(!dberr("Error verifying databases", db_test.verify(dbpath.c_str(),NULL,NULL,DB_NOORDERCHK))) { if(error_num_ != ENOENT) return false; }; }; { Db db_test(NULL,DB_CXX_NO_EXCEPTIONS); if(!dberr("Error verifying database 'meta'", db_test.verify(dbpath.c_str(),"meta",NULL,DB_ORDERCHKONLY))) { if(error_num_ != ENOENT) return false; }; }; // Skip 'link' - it is not of btree kind // Skip 'lock' - for unknown reason it returns DB_NOTFOUND // Skip 'locked' - for unknown reason it returns DB_NOTFOUND return true; } FileRecordBDB::~FileRecordBDB(void) { close(); } bool FileRecordBDB::open(bool create) { int oflags = 0; int eflags = DB_INIT_CDB | DB_INIT_MPOOL; if(create) { oflags |= DB_CREATE; eflags |= DB_CREATE; }; int mode = S_IRUSR|S_IWUSR; db_env_ = new DbEnv(DB_CXX_NO_EXCEPTIONS); if(!dberr("Error setting database environment flags", db_env_->set_flags(DB_CDB_ALLDB,1))) { delete db_env_; db_env_ = NULL; return false; } // If process is master (create = true) we should check by caling // failchk() and discard environment in case something is wrong. // But sicne we are allowed to discard environment we can do // that just in case. if(create) { db_env_clean(basepath_); }; if(!dberr("Error opening database environment", db_env_->open(basepath_.c_str(),eflags,mode))) { delete db_env_; db_env_ = NULL; return false; }; std::string dbpath = FR_DB_NAME; if(create) { // If creation is allowed that means we are master and can try verifying if(!verify()) return false; }; // db_link // |---db_lock // \---db_locked db_rec_ = new Db(db_env_,DB_CXX_NO_EXCEPTIONS); db_lock_ = new Db(db_env_,DB_CXX_NO_EXCEPTIONS); db_locked_ = new Db(db_env_,DB_CXX_NO_EXCEPTIONS); db_link_ = new Db(db_env_,DB_CXX_NO_EXCEPTIONS); if(!dberr("Error setting flag DB_DUPSORT",db_lock_->set_flags(DB_DUPSORT))) return false; if(!dberr("Error setting flag DB_DUPSORT",db_locked_->set_flags(DB_DUPSORT))) return false; if(!dberr("Error associating databases",db_link_->associate(NULL,db_lock_,&lock_callback,0))) return false; if(!dberr("Error associating databases",db_link_->associate(NULL,db_locked_,&locked_callback,0))) return false; if(!dberr("Error opening database 'meta'", db_rec_->open(NULL,dbpath.c_str(), "meta", DB_BTREE,oflags,mode))) return false; if(!dberr("Error opening database 'link'", db_link_->open(NULL,dbpath.c_str(), "link", DB_RECNO,oflags,mode))) return false; if(!dberr("Error opening database 'lock'", db_lock_->open(NULL,dbpath.c_str(), "lock", DB_BTREE,oflags,mode))) return false; if(!dberr("Error opening database 'locked'", db_locked_->open(NULL,dbpath.c_str(),"locked",DB_BTREE,oflags,mode))) return false; return true; } void FileRecordBDB::close(void) { valid_ = false; if(db_locked_) db_locked_->close(0); if(db_lock_) db_lock_->close(0); if(db_link_) db_link_->close(0); if(db_rec_) db_rec_->close(0); if(db_env_) db_env_->close(0); delete db_locked_; db_locked_ = NULL; delete db_lock_; db_lock_ = NULL; delete db_link_; db_link_ = NULL; delete db_env_; db_env_ = NULL; } static void* store_string(const std::string& str, void* buf) { uint32_t l = str.length(); unsigned char* p = (unsigned char*)buf; *p = (unsigned char)l; l >>= 8; ++p; *p = (unsigned char)l; l >>= 8; ++p; *p = (unsigned char)l; l >>= 8; ++p; *p = (unsigned char)l; l >>= 8; ++p; ::memcpy(p,str.c_str(),str.length()); p += str.length(); return (void*)p; } static void* parse_string(std::string& str, const void* buf, uint32_t& size) { uint32_t l = 0; const unsigned char* p = (unsigned char*)buf; if(size < 4) { p += size; size = 0; return (void*)p; }; l |= ((uint32_t)(*p)) << 0; ++p; --size; l |= ((uint32_t)(*p)) << 8; ++p; --size; l |= ((uint32_t)(*p)) << 16; ++p; --size; l |= ((uint32_t)(*p)) << 24; ++p; --size; if(l > size) l = size; // TODO: sanity check str.assign((const char*)p,l); p += l; size -= l; return (void*)p; } static void make_string(const std::string& str, Dbt& rec) { rec.set_data(NULL); rec.set_size(0); uint32_t l = 4 + str.length(); void* d = (void*)::malloc(l); if(!d) return; rec.set_data(d); rec.set_size(l); d = store_string(str,d); } static void make_link(const std::string& lock_id,const std::string& id, const std::string& owner, Dbt& rec) { rec.set_data(NULL); rec.set_size(0); uint32_t l = 4 + lock_id.length() + 4 + id.length() + 4 + owner.length(); void* d = (void*)::malloc(l); if(!d) return; rec.set_data(d); rec.set_size(l); d = store_string(lock_id,d); d = store_string(id,d); d = store_string(owner,d); } static void make_key(const std::string& id, const std::string& owner, Dbt& key) { key.set_data(NULL); key.set_size(0); uint32_t l = 4 + id.length() + 4 + owner.length(); void* d = (void*)::malloc(l); if(!d) return; key.set_data(d); key.set_size(l); d = store_string(id,d); d = store_string(owner,d); } static void make_record(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta, Dbt& key, Dbt& data) { key.set_data(NULL); key.set_size(0); data.set_data(NULL); data.set_size(0); uint32_t l = 4 + uid.length(); for(std::list::const_iterator m = meta.begin(); m != meta.end(); ++m) { l += 4 + m->length(); }; make_key(id,owner,key); void* d = (void*)::malloc(l); if(!d) { ::free(key.get_data()); key.set_data(NULL); key.set_size(0); return; }; data.set_data(d); data.set_size(l); d = store_string(uid,d); for(std::list::const_iterator m = meta.begin(); m != meta.end(); ++m) { d = store_string(*m,d); }; } static void parse_record(std::string& uid, std::string& id, std::string& owner, std::list& meta, const Dbt& key, const Dbt& data) { uint32_t size = 0; void* d = NULL; d = (void*)key.get_data(); size = (uint32_t)key.get_size(); d = parse_string(id,d,size); d = parse_string(owner,d,size); d = (void*)data.get_data(); size = (uint32_t)data.get_size(); d = parse_string(uid,d,size); for(;size;) { std::string s; d = parse_string(s,d,size); meta.push_back(s); }; } int FileRecordBDB::locked_callback(Db * secondary, const Dbt * key, const Dbt * data, Dbt * result) { const void* p = data->get_data(); uint32_t size = data->get_size(); std::string str; p = parse_string(str,p,size); result->set_data((void*)p); result->set_size(size); return 0; } bool FileRecordBDB::Recover(void) { Glib::Mutex::Lock lock(lock_); // Real recovery not implemented yet. close(); error_num_ = -1; error_str_ = "Recovery not implemented yet."; return false; } int FileRecordBDB::lock_callback(Db * secondary, const Dbt * key, const Dbt * data, Dbt * result) { const void* p = data->get_data(); uint32_t size = data->get_size(); uint32_t rest = size; std::string str; parse_string(str,p,rest); result->set_data((void*)p); result->set_size(size-rest); return 0; } std::string FileRecordBDB::Add(std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return ""; int uidtries = 10; // some sane number std::string uid; while(true) { if(!(uidtries--)) return ""; Glib::Mutex::Lock lock(lock_); Dbt key; Dbt data; uid = rand_uid64().substr(4); make_record(uid,(id.empty())?uid:id,owner,meta,key,data); void* pkey = key.get_data(); void* pdata = data.get_data(); int dbres = db_rec_->put(NULL,&key,&data,DB_NOOVERWRITE); if(dbres == DB_KEYEXIST) { ::free(pkey); ::free(pdata); uid.resize(0); continue; }; if(!dberr("Failed to add record to database",dbres)) { ::free(pkey); ::free(pdata); return ""; }; db_rec_->sync(0); ::free(pkey); ::free(pdata); break; }; if(id.empty()) id = uid; make_file(uid); return uid_to_path(uid); } bool FileRecordBDB::Add(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); Dbt key; Dbt data; make_record(uid,(id.empty())?uid:id,owner,meta,key,data); void* pkey = key.get_data(); void* pdata = data.get_data(); int dbres = db_rec_->put(NULL,&key,&data,DB_NOOVERWRITE); if(!dberr("Failed to add record to database",dbres)) { ::free(pkey); ::free(pdata); return false; }; db_rec_->sync(0); ::free(pkey); ::free(pdata); return true; } std::string FileRecordBDB::Find(const std::string& id, const std::string& owner, std::list& meta) { if(!valid_) return ""; Glib::Mutex::Lock lock(lock_); Dbt key; Dbt data; make_key(id,owner,key); void* pkey = key.get_data(); if(!dberr("Failed to retrieve record from database",db_rec_->get(NULL,&key,&data,0))) { ::free(pkey); return ""; }; std::string uid; std::string id_tmp; std::string owner_tmp; parse_record(uid,id_tmp,owner_tmp,meta,key,data); ::free(pkey); return uid_to_path(uid); } bool FileRecordBDB::Modify(const std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); Dbt key; Dbt data; make_key(id,owner,key); void* pkey = key.get_data(); if(!dberr("Failed to retrieve record from database",db_rec_->get(NULL,&key,&data,0))) { ::free(pkey); return false; }; std::string uid; std::string id_tmp; std::string owner_tmp; std::list meta_tmp; parse_record(uid,id_tmp,owner_tmp,meta_tmp,key,data); ::free(pkey); make_record(uid,id,owner,meta,key,data); if(!dberr("Failed to store record to database",db_rec_->put(NULL,&key,&data,0))) { ::free(key.get_data()); ::free(data.get_data()); return false; }; db_rec_->sync(0); ::free(key.get_data()); ::free(data.get_data()); return true; } bool FileRecordBDB::Remove(const std::string& id, const std::string& owner) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); Dbt key; Dbt data; make_key(id,owner,key); void* pkey = key.get_data(); if(dberr("",db_locked_->get(NULL,&key,&data,0))) { ::free(pkey); error_str_ = "Record has active locks"; return false; // have locks }; if(!dberr("Failed to retrieve record from database",db_rec_->get(NULL,&key,&data,0))) { ::free(pkey); return false; // No such record? }; std::string uid; std::string id_tmp; std::string owner_tmp; std::list meta; parse_record(uid,id_tmp,owner_tmp,meta,key,data); if(!dberr("Failed to delete record from database",db_rec_->del(NULL,&key,0))) { // TODO: handle error ::free(pkey); return false; }; db_rec_->sync(0); ::free(pkey); remove_file(uid); return true; } bool FileRecordBDB::AddLock(const std::string& lock_id, const std::list& ids, const std::string& owner) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); Dbt key; Dbt data; for(std::list::const_iterator id = ids.begin(); id != ids.end(); ++id) { make_link(lock_id,*id,owner,data); void* pdata = data.get_data(); if(!dberr("addlock:put",db_link_->put(NULL,&key,&data,DB_APPEND))) { ::free(pdata); return false; }; ::free(pdata); }; db_link_->sync(0); return true; } bool FileRecordBDB::RemoveLock(const std::string& lock_id) { std::list > ids; return RemoveLock(lock_id,ids); } bool FileRecordBDB::RemoveLock(const std::string& lock_id, std::list >& ids) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); Dbc* cur = NULL; if(!dberr("removelock:cursor",db_lock_->cursor(NULL,&cur,DB_WRITECURSOR))) return false; Dbt key; Dbt data; make_string(lock_id,key); void* pkey = key.get_data(); if(!dberr("removelock:get1",cur->get(&key,&data,DB_SET))) { // TODO: handle errors ::free(pkey); cur->close(); return false; }; for(;;) { std::string id; std::string owner; uint32_t size = data.get_size(); void* buf = data.get_data(); buf = parse_string(id,buf,size); // lock_id - skip buf = parse_string(id,buf,size); buf = parse_string(owner,buf,size); ids.push_back(std::pair(id,owner)); if(!dberr("removelock:del",cur->del(0))) { ::free(pkey); cur->close(); return false; }; if(!dberr("removelock:get2",cur->get(&key,&data,DB_NEXT_DUP))) break; }; db_lock_->sync(0); ::free(pkey); cur->close(); return true; } bool FileRecordBDB::ListLocked(const std::string& lock_id, std::list >& ids) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); Dbc* cur = NULL; if(!dberr("listlocked:cursor",db_lock_->cursor(NULL,&cur,0))) return false; Dbt key; Dbt data; make_string(lock_id,key); void* pkey = key.get_data(); if(!dberr("listlocked:get1",cur->get(&key,&data,DB_SET))) { // TODO: handle errors ::free(pkey); cur->close(); return false; }; for(;;) { std::string id; std::string owner; uint32_t size = data.get_size(); void* buf = data.get_data(); buf = parse_string(id,buf,size); // lock_id - skip buf = parse_string(id,buf,size); buf = parse_string(owner,buf,size); ids.push_back(std::pair(id,owner)); if(cur->get(&key,&data,DB_NEXT_DUP) != 0) break; }; ::free(pkey); cur->close(); return true; } bool FileRecordBDB::ListLocks(std::list& locks) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); Dbc* cur = NULL; if(db_lock_->cursor(NULL,&cur,0)) return false; for(;;) { Dbt key; Dbt data; if(cur->get(&key,&data,DB_NEXT_NODUP) != 0) break; // TODO: handle errors std::string str; uint32_t size = key.get_size(); parse_string(str,key.get_data(),size); locks.push_back(str); }; cur->close(); return true; } bool FileRecordBDB::ListLocks(const std::string& id, const std::string& owner, std::list& locks) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); Dbc* cur = NULL; if(db_lock_->cursor(NULL,&cur,0)) return false; for(;;) { Dbt key; Dbt data; if(cur->get(&key,&data,DB_NEXT_NODUP) != 0) break; // TODO: handle errors std::string str; uint32_t size = key.get_size(); parse_string(str,key.get_data(),size); { std::string id_tmp; std::string owner_tmp; uint32_t size = data.get_size(); void* buf = data.get_data(); buf = parse_string(id_tmp,buf,size); // lock_id - skip buf = parse_string(id_tmp,buf,size); buf = parse_string(owner_tmp,buf,size); if((id_tmp != id) || (owner_tmp != owner)) continue; }; locks.push_back(str); }; cur->close(); return true; } FileRecordBDB::Iterator::Iterator(FileRecordBDB& frec):FileRecord::Iterator(frec),cur_(NULL) { Glib::Mutex::Lock lock(frec.lock_); if(!frec.dberr("Iterator:cursor",frec.db_rec_->cursor(NULL,&cur_,0))) { if(cur_) { cur_->close(); cur_=NULL; }; return; }; Dbt key; Dbt data; if(!frec.dberr("Iterator:first",cur_->get(&key,&data,DB_FIRST))) { cur_->close(); cur_=NULL; return; }; parse_record(uid_,id_,owner_,meta_,key,data); } FileRecordBDB::Iterator::~Iterator(void) { FileRecordBDB& frec((FileRecordBDB&)frec_); Glib::Mutex::Lock lock(frec.lock_); if(cur_) { cur_->close(); cur_=NULL; }; } FileRecordBDB::Iterator& FileRecordBDB::Iterator::operator++(void) { if(!cur_) return *this; FileRecordBDB& frec((FileRecordBDB&)frec_); Glib::Mutex::Lock lock(frec.lock_); Dbt key; Dbt data; if(!frec.dberr("Iterator:first",cur_->get(&key,&data,DB_NEXT))) { cur_->close(); cur_=NULL; return *this; }; parse_record(uid_,id_,owner_,meta_,key,data); return *this; } FileRecordBDB::Iterator& FileRecordBDB::Iterator::operator--(void) { if(!cur_) return *this; FileRecordBDB& frec((FileRecordBDB&)frec_); Glib::Mutex::Lock lock(frec.lock_); Dbt key; Dbt data; if(!frec.dberr("Iterator:first",cur_->get(&key,&data,DB_PREV))) { cur_->close(); cur_=NULL; return *this; }; parse_record(uid_,id_,owner_,meta_,key,data); return *this; } void FileRecordBDB::Iterator::suspend(void) { FileRecordBDB& frec((FileRecordBDB&)frec_); Glib::Mutex::Lock lock(frec.lock_); if(cur_) { cur_->close(); cur_=NULL; } } bool FileRecordBDB::Iterator::resume(void) { FileRecordBDB& frec((FileRecordBDB&)frec_); Glib::Mutex::Lock lock(frec.lock_); if(!cur_) { if(id_.empty()) return false; if(!frec.dberr("Iterator:cursor",frec.db_rec_->cursor(NULL,&cur_,0))) { if(cur_) { cur_->close(); cur_=NULL; }; return false; }; Dbt key; Dbt data; make_key(id_,owner_,key); void* pkey = key.get_data(); if(!frec.dberr("Iterator:first",cur_->get(&key,&data,DB_SET))) { ::free(pkey); cur_->close(); cur_=NULL; return false; }; parse_record(uid_,id_,owner_,meta_,key,data); ::free(pkey); }; return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/FileRecord.cpp0000644000000000000000000000012412754411343025464 xustar000000000000000027 mtime=1471288035.077654 27 atime=1513200576.486726 30 ctime=1513200662.743781043 nordugrid-arc-5.4.2/src/services/a-rex/delegation/FileRecord.cpp0000644000175000002070000000246712754411343025542 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "FileRecord.h" namespace ARex { std::string FileRecord::uid_to_path(const std::string& uid) { std::string path = basepath_; std::string::size_type p = 0; for(;uid.length() > (p+4);) { path = path + G_DIR_SEPARATOR_S + uid.substr(p,3); p += 3; }; return path + G_DIR_SEPARATOR_S + uid.substr(p); } bool FileRecord::make_file(const std::string& uid) { std::string path = uid_to_path(uid); std::string::size_type p = path.rfind(G_DIR_SEPARATOR_S); if((p != std::string::npos) && (p != 0)) { (void)Arc::DirCreate(path.substr(0,p),0,0,S_IXUSR|S_IRUSR|S_IWUSR,true); } return Arc::FileCreate(uid_to_path(uid),"",0,0,S_IRUSR|S_IWUSR); } bool FileRecord::remove_file(const std::string& uid) { std::string path = uid_to_path(uid); if(Arc::FileDelete(path)) { while(true) { std::string::size_type p = path.rfind(G_DIR_SEPARATOR_S); if((p == std::string::npos) || (p == 0)) break; if(p <= basepath_.length()) break; path.resize(p); if(!Arc::DirDelete(path,false)) break; }; return true; }; return false; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731025002 xustar000000000000000030 mtime=1513200601.847036252 30 atime=1513200649.903624003 30 ctime=1513200662.741781019 nordugrid-arc-5.4.2/src/services/a-rex/delegation/Makefile.in0000644000175000002070000007236513214315731025065 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/delegation DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libdelegation_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am__libdelegation_la_SOURCES_DIST = uid.cpp FileRecord.cpp \ FileRecordBDB.cpp DelegationStore.cpp DelegationStores.cpp \ uid.h FileRecord.h FileRecordBDB.h DelegationStore.h \ DelegationStores.h FileRecordSQLite.cpp FileRecordSQLite.h @SQLITE_ENABLED_TRUE@am__objects_1 = \ @SQLITE_ENABLED_TRUE@ libdelegation_la-FileRecordSQLite.lo am_libdelegation_la_OBJECTS = libdelegation_la-uid.lo \ libdelegation_la-FileRecord.lo \ libdelegation_la-FileRecordBDB.lo \ libdelegation_la-DelegationStore.lo \ libdelegation_la-DelegationStores.lo $(am__objects_1) libdelegation_la_OBJECTS = $(am_libdelegation_la_OBJECTS) libdelegation_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdelegation_la_SOURCES) DIST_SOURCES = $(am__libdelegation_la_SOURCES_DIST) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @SQLITE_ENABLED_FALSE@FRSQLITE_SOURCES = @SQLITE_ENABLED_TRUE@FRSQLITE_SOURCES = FileRecordSQLite.cpp FileRecordSQLite.h noinst_LTLIBRARIES = libdelegation.la libdelegation_la_SOURCES = \ uid.cpp FileRecord.cpp FileRecordBDB.cpp DelegationStore.cpp DelegationStores.cpp \ uid.h FileRecord.h FileRecordBDB.h DelegationStore.h DelegationStores.h \ $(FRSQLITE_SOURCES) libdelegation_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(DBCXX_CPPFLAGS) $(SQLITE_CFLAGS) $(AM_CXXFLAGS) libdelegation_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la $(DBCXX_LIBS) $(SQLITE_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/delegation/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/delegation/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdelegation.la: $(libdelegation_la_OBJECTS) $(libdelegation_la_DEPENDENCIES) $(libdelegation_la_LINK) $(libdelegation_la_OBJECTS) $(libdelegation_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-DelegationStore.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-DelegationStores.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-FileRecord.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-FileRecordBDB.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-FileRecordSQLite.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-uid.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdelegation_la-uid.lo: uid.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-uid.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-uid.Tpo -c -o libdelegation_la-uid.lo `test -f 'uid.cpp' || echo '$(srcdir)/'`uid.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdelegation_la-uid.Tpo $(DEPDIR)/libdelegation_la-uid.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='uid.cpp' object='libdelegation_la-uid.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-uid.lo `test -f 'uid.cpp' || echo '$(srcdir)/'`uid.cpp libdelegation_la-FileRecord.lo: FileRecord.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-FileRecord.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-FileRecord.Tpo -c -o libdelegation_la-FileRecord.lo `test -f 'FileRecord.cpp' || echo '$(srcdir)/'`FileRecord.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdelegation_la-FileRecord.Tpo $(DEPDIR)/libdelegation_la-FileRecord.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileRecord.cpp' object='libdelegation_la-FileRecord.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-FileRecord.lo `test -f 'FileRecord.cpp' || echo '$(srcdir)/'`FileRecord.cpp libdelegation_la-FileRecordBDB.lo: FileRecordBDB.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-FileRecordBDB.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-FileRecordBDB.Tpo -c -o libdelegation_la-FileRecordBDB.lo `test -f 'FileRecordBDB.cpp' || echo '$(srcdir)/'`FileRecordBDB.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdelegation_la-FileRecordBDB.Tpo $(DEPDIR)/libdelegation_la-FileRecordBDB.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileRecordBDB.cpp' object='libdelegation_la-FileRecordBDB.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-FileRecordBDB.lo `test -f 'FileRecordBDB.cpp' || echo '$(srcdir)/'`FileRecordBDB.cpp libdelegation_la-DelegationStore.lo: DelegationStore.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-DelegationStore.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-DelegationStore.Tpo -c -o libdelegation_la-DelegationStore.lo `test -f 'DelegationStore.cpp' || echo '$(srcdir)/'`DelegationStore.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdelegation_la-DelegationStore.Tpo $(DEPDIR)/libdelegation_la-DelegationStore.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationStore.cpp' object='libdelegation_la-DelegationStore.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-DelegationStore.lo `test -f 'DelegationStore.cpp' || echo '$(srcdir)/'`DelegationStore.cpp libdelegation_la-DelegationStores.lo: DelegationStores.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-DelegationStores.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-DelegationStores.Tpo -c -o libdelegation_la-DelegationStores.lo `test -f 'DelegationStores.cpp' || echo '$(srcdir)/'`DelegationStores.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdelegation_la-DelegationStores.Tpo $(DEPDIR)/libdelegation_la-DelegationStores.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationStores.cpp' object='libdelegation_la-DelegationStores.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-DelegationStores.lo `test -f 'DelegationStores.cpp' || echo '$(srcdir)/'`DelegationStores.cpp libdelegation_la-FileRecordSQLite.lo: FileRecordSQLite.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-FileRecordSQLite.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-FileRecordSQLite.Tpo -c -o libdelegation_la-FileRecordSQLite.lo `test -f 'FileRecordSQLite.cpp' || echo '$(srcdir)/'`FileRecordSQLite.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdelegation_la-FileRecordSQLite.Tpo $(DEPDIR)/libdelegation_la-FileRecordSQLite.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileRecordSQLite.cpp' object='libdelegation_la-FileRecordSQLite.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-FileRecordSQLite.lo `test -f 'FileRecordSQLite.cpp' || echo '$(srcdir)/'`FileRecordSQLite.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/uid.cpp0000644000000000000000000000012412754411373024232 xustar000000000000000027 mtime=1471288059.065732 27 atime=1513200576.480726 30 ctime=1513200662.742781031 nordugrid-arc-5.4.2/src/services/a-rex/delegation/uid.cpp0000644000175000002070000000110312754411373024272 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #ifdef HAVE_STDINT_H #include #endif #include #include "uid.h" namespace ARex { std::string rand_uid64(void) { static unsigned int cnt; struct timeval t; gettimeofday(&t,NULL); uint64_t id = (((uint64_t)((cnt++) & 0xffff)) << 48) | (((uint64_t)(t.tv_sec & 0xffff)) << 32) | (((uint64_t)(t.tv_usec & 0xffff)) << 16) | (((uint64_t)(rand() & 0xffff)) << 0); return Arc::inttostr(id,16,16); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/FileRecordSQLite.h0000644000000000000000000000012312754411443026213 xustar000000000000000026 mtime=1471288099.46513 27 atime=1513200576.481726 30 ctime=1513200662.752781153 nordugrid-arc-5.4.2/src/services/a-rex/delegation/FileRecordSQLite.h0000644000175000002070000000534012754411443026263 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_FILERECORDSQLITE_H__ #define __ARC_DELEGATION_FILERECORDSQLITE_H__ #include #include #include #include #include "FileRecord.h" namespace ARex { class FileRecordSQLite: public FileRecord { private: Glib::Mutex lock_; // TODO: use DB locking sqlite3* db_; bool dberr(const char* s, int err); bool open(bool create); void close(void); bool verify(void); public: class Iterator: public FileRecord::Iterator { friend class FileRecordSQLite; private: Iterator(const Iterator&); // disabled constructor Iterator(FileRecordSQLite& frec); sqlite3_int64 rowid_; public: ~Iterator(void); virtual Iterator& operator++(void); virtual Iterator& operator--(void); virtual void suspend(void); virtual bool resume(void); virtual operator bool(void) { return (rowid_ != -1); }; virtual bool operator!(void) { return (rowid_ == -1); }; }; friend class FileRecordSQLite::Iterator; FileRecordSQLite(const std::string& base, bool create = true); virtual ~FileRecordSQLite(void); virtual Iterator* NewIterator(void) { return new Iterator(*this); }; virtual bool Recover(void); virtual std::string Add(std::string& id, const std::string& owner, const std::list& meta); virtual bool Add(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta); virtual std::string Find(const std::string& id, const std::string& owner, std::list& meta); virtual bool Modify(const std::string& id, const std::string& owner, const std::list& meta); virtual bool Remove(const std::string& id, const std::string& owner); // Assign specified credential ids specified lock lock_id virtual bool AddLock(const std::string& lock_id, const std::list& ids, const std::string& owner); // Reomove lock lock_id from all associated credentials virtual bool RemoveLock(const std::string& lock_id); // Reomove lock lock_id from all associated credentials and store // identifiers of associated credentials into ids virtual bool RemoveLock(const std::string& lock_id, std::list >& ids); // Fills locks with all known lock ids. virtual bool ListLocks(std::list& locks); // Fills locks with all lock ids associated with specified credential id virtual bool ListLocks(const std::string& id, const std::string& owner, std::list& locks); // Fills ids with identifiers of credentials locked by specified lock_id lock virtual bool ListLocked(const std::string& lock_id, std::list >& ids); }; } // namespace ARex #endif // __ARC_DELEGATION_FiLERECORDSQLITE_H__ nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/DelegationStores.cpp0000644000000000000000000000012312715412562026721 xustar000000000000000027 mtime=1463162226.429695 27 atime=1513200576.484726 29 ctime=1513200662.74678108 nordugrid-arc-5.4.2/src/services/a-rex/delegation/DelegationStores.cpp0000644000175000002070000000275112715412562026774 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "DelegationStore.h" #include "DelegationStores.h" namespace ARex { DelegationStores::DelegationStores(DelegationStore::DbType db_type):db_type_(db_type) { } DelegationStores::~DelegationStores(void) { Glib::Mutex::Lock lock(lock_); for(std::map::iterator i = stores_.begin(); i != stores_.end(); ++i) { delete i->second; } } DelegationStore& DelegationStores::operator[](const std::string& path) { Glib::Mutex::Lock lock(lock_); std::map::iterator i = stores_.find(path); if(i != stores_.end()) return *(i->second); DelegationStore* store = new DelegationStore(path,db_type_); stores_.insert(std::pair(path,store)); return *store; } bool DelegationStores::MatchNamespace(const Arc::SOAPEnvelope& in) { return Arc::DelegationContainerSOAP().MatchNamespace(in); } bool DelegationStores::Process(const std::string& path,const Arc::SOAPEnvelope& in,Arc::SOAPEnvelope& out,const std::string& client,std::string& credentials) { return operator[](path).Process(credentials,in,out,client); } bool DelegationStores::DelegatedToken(const std::string& path,Arc::XMLNode token,const std::string& client,std::string& credentials) { return operator[](path).DelegatedToken(credentials,token,client); } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/DelegationStores.h0000644000000000000000000000012412715412562026367 xustar000000000000000027 mtime=1463162226.429695 27 atime=1513200576.477725 30 ctime=1513200662.751781141 nordugrid-arc-5.4.2/src/services/a-rex/delegation/DelegationStores.h0000644000175000002070000000340512715412562026436 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_STORES_H__ #define __ARC_DELEGATION_STORES_H__ #include #include #include #include #include #include "DelegationStore.h" namespace ARex { /// Set of service storing delegated credentials class DelegationStores { private: Glib::Mutex lock_; std::map stores_; DelegationStore::DbType db_type_; DelegationStores(const DelegationStores&) { }; public: DelegationStores(DelegationStore::DbType db_type = DelegationStore::DbBerkeley); ~DelegationStores(void); void SetDbType(DelegationStore::DbType db_type) { db_type_ = db_type; }; /// Returns or creates delegation storage associated with 'path'. DelegationStore& operator[](const std::string& path); /// Check if SOAP request 'in' can be handled by this implementation. bool MatchNamespace(const Arc::SOAPEnvelope& in); /// Processes SOAP request 'in' using delegation storage associated with 'path'. /// Response is filled into 'out'. The 'client' is identifier of requestor /// used by service internally to recognize owner of stored credentials. /// If operation produces credentials token it is returned in 'credentials'. /// If operation is successful returns true. bool Process(const std::string& path,const Arc::SOAPEnvelope& in,Arc::SOAPEnvelope& out,const std::string& client,std::string& credentials); /// Stores delegated credentials token defined by 'token' into storage 'path'. /// Extracted token is also returned in 'credentials'. /// If operation is successful returns true. bool DelegatedToken(const std::string& path,Arc::XMLNode token,const std::string& client,std::string& credentials); }; } // namespace ARex #endif // __ARC_DELEGATION_STORE_H__ nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/DelegationStore.h0000644000000000000000000000012412733562016026204 xustar000000000000000027 mtime=1466885134.934031 27 atime=1513200576.486726 30 ctime=1513200662.750781129 nordugrid-arc-5.4.2/src/services/a-rex/delegation/DelegationStore.h0000644000175000002070000001137112733562016026254 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_STORE_H__ #define __ARC_DELEGATION_STORE_H__ #include #include #include #include #include #include "FileRecord.h" namespace ARex { class DelegationStore: public Arc::DelegationContainerSOAP { private: class Consumer { public: std::string id; std::string client; std::string path; Consumer(const std::string& id_, const std::string& client_, const std::string& path_): id(id_),client(client_),path(path_) { }; }; Glib::Mutex lock_; Glib::Mutex check_lock_; FileRecord* fstore_; std::map acquired_; unsigned int expiration_; unsigned int maxrecords_; unsigned int mtimeout_; FileRecord::Iterator* mrec_; Arc::Logger logger_; public: enum DbType { DbBerkeley, DbSQLite }; DelegationStore(const std::string& base, DbType db, bool allow_recover = true); ~DelegationStore(void); operator bool(void) { return ((bool)fstore_ && (bool)*fstore_); }; bool operator!(void) { return !((bool)fstore_ && (bool)*fstore_); }; /** Returns description of last error */ std::string Error(void) { return fstore_?fstore_->Error():std::string(""); }; /** Sets expiration time for unlocked credentials */ void Expiration(unsigned int v = 0) { expiration_ = v; }; /** Sets max number of credentials to store */ void MaxRecords(unsigned int v = 0) { maxrecords_ = v; }; void CheckTimeout(unsigned int v = 0) { mtimeout_ = v; }; /** Create a slot for credential storing and return associated delegation consumer. The consumer object must be release with ReleaseConsumer/RemoveConsumer */ virtual Arc::DelegationConsumerSOAP* AddConsumer(std::string& id,const std::string& client); /** Find existing delegation slot and create delegation consumer for it. The consumer object must be release with ReleaseConsumer/RemoveConsumer */ virtual Arc::DelegationConsumerSOAP* FindConsumer(const std::string& id,const std::string& client); /** Store credentials into slot associated with specified consumer object */ virtual bool TouchConsumer(Arc::DelegationConsumerSOAP* c,const std::string& credentials); /** Read credentials stored in slot associated with specified consumer object */ virtual bool QueryConsumer(Arc::DelegationConsumerSOAP* c,std::string& credentials); /** Release consumer object but keep credentials store slot */ virtual void ReleaseConsumer(Arc::DelegationConsumerSOAP* c); /** Release consumer object and delete associated credentials store slot */ virtual void RemoveConsumer(Arc::DelegationConsumerSOAP* c); virtual void CheckConsumers(void); void PeriodicCheckConsumers(void); /** Store new credentials associated with client and assign id to it */ bool AddCred(std::string& id, const std::string& client, const std::string& credentials); /** Store/update credentials with specified id and associated with client */ bool PutCred(const std::string& id, const std::string& client, const std::string& credentials); /** Returns path to file containing credential with specied id and client */ std::string FindCred(const std::string& id,const std::string& client); /** Retrieves credentials with specified id and associated with client */ bool GetCred(const std::string& id, const std::string& client, std::string& credentials); /** Retrieves locks associated with specified id and client */ bool GetLocks(const std::string& id, const std::string& client, std::list& lock_ids); /** Retrieves all locks known */ bool GetLocks(std::list& lock_ids); /** Returns credentials ids associated with specific client */ std::list ListCredIDs(const std::string& client); /** Returns all credentials ids (1st) along with their client ids (2nd) */ std::list > ListCredIDs(void); /** Locks credentials also associating it with specific lock identifier */ bool LockCred(const std::string& lock_id, const std::list& ids,const std::string& client); /** Release lock set by previous call to LockCred by associated lock id. Optionally it can update credentials usage timestamp and force removal credentials from storage if it is not locked anymore. */ bool ReleaseCred(const std::string& lock_id, bool touch = false, bool remove = false); /** Returns credential ids locked by specific lock id and associated with specified client */ std::list ListLockedCredIDs(const std::string& lock_id, const std::string& client); /** Returns credential ids locked by specific lock id */ std::list > ListLockedCredIDs(const std::string& lock_id); }; } // namespace ARex #endif // __ARC_DELEGATION_STORE_H__ nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/uid.h0000644000000000000000000000012411660562253023676 xustar000000000000000027 mtime=1321395371.817946 27 atime=1513200576.483726 30 ctime=1513200662.747781092 nordugrid-arc-5.4.2/src/services/a-rex/delegation/uid.h0000644000175000002070000000013111660562253023736 0ustar00mockbuildmock00000000000000#include namespace ARex { std::string rand_uid64(void); } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/FileRecord.h0000644000000000000000000000012312754411443025131 xustar000000000000000026 mtime=1471288099.46513 27 atime=1513200576.481726 30 ctime=1513200662.748781104 nordugrid-arc-5.4.2/src/services/a-rex/delegation/FileRecord.h0000644000175000002070000000737212754411443025210 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_FILERECORD_H__ #define __ARC_DELEGATION_FILERECORD_H__ #include #include namespace ARex { class FileRecord { protected: std::string basepath_; int error_num_; std::string error_str_; bool valid_; std::string uid_to_path(const std::string& uid); bool make_file(const std::string& uid); bool remove_file(const std::string& uid); public: class Iterator { private: Iterator(const Iterator&); // disabled copy constructor protected: Iterator(FileRecord& frec):frec_(frec) {}; FileRecord& frec_; std::string uid_; std::string id_; std::string owner_; std::list meta_; public: virtual ~Iterator(void) {}; virtual Iterator& operator++(void) = 0; virtual Iterator& operator--(void) = 0; virtual void suspend(void) = 0; virtual bool resume(void) = 0; virtual operator bool(void) = 0; virtual bool operator!(void) = 0; const std::string& uid(void) const { return uid_; }; const std::string& id(void) const { return id_; }; const std::string& owner(void) const { return owner_; }; const std::list& meta(void) const { return meta_; }; const std::string path(void) const { return frec_.uid_to_path(uid_); }; }; friend class FileRecord::Iterator; FileRecord(const std::string& base, bool create = true): basepath_(base), error_num_(0), valid_(false) {}; virtual ~FileRecord(void) {}; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; /// Returns textual description of last error. std::string Error(void) { return error_str_; }; /// Obtain an iterator for walking through existing credentials slots. virtual Iterator* NewIterator(void) = 0; virtual bool Recover(void) = 0; /// Adds new slot for storing credentials including generation of uid, /// assignment of id (if empty) and creation of file for storing credentials. virtual std::string Add(std::string& id, const std::string& owner, const std::list& meta) = 0; /// Adds only record in database (to be used for database management only). virtual bool Add(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta) = 0; /// Obtains path to stored credentials. virtual std::string Find(const std::string& id, const std::string& owner, std::list& meta) = 0; /// Modifies existing entry in database with new meta values. virtual bool Modify(const std::string& id, const std::string& owner, const std::list& meta) = 0; /// Fully removes credentials slot including file which stores credentials. virtual bool Remove(const std::string& id, const std::string& owner) = 0; // Assign specified credential ids specified lock lock_id virtual bool AddLock(const std::string& lock_id, const std::list& ids, const std::string& owner) = 0; // Remove lock lock_id from all associated credentials virtual bool RemoveLock(const std::string& lock_id) = 0; // Reomve lock lock_id from all associated credentials and store // identifiers of associated credentials into ids virtual bool RemoveLock(const std::string& lock_id, std::list >& ids) = 0; // Fills locks with all known lock ids. virtual bool ListLocks(std::list& locks) = 0; // Fills locks with all lock ids associated with specified credential id virtual bool ListLocks(const std::string& id, const std::string& owner, std::list& locks) = 0; // Fills ids with identifiers of credentials locked by specified lock_id lock virtual bool ListLocked(const std::string& lock_id, std::list >& ids) = 0; }; } // namespace ARex #endif // __ARC_DELEGATION_FiLERECORD_H__ nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/DelegationStore.cpp0000644000000000000000000000012412754411412026533 xustar000000000000000027 mtime=1471288074.221792 27 atime=1513200576.483726 30 ctime=1513200662.745781068 nordugrid-arc-5.4.2/src/services/a-rex/delegation/DelegationStore.cpp0000644000175000002070000003467612754411412026620 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #define DELEGATION_USES_SQLITE 1 #ifdef HAVE_SQLITE #include "FileRecordSQLite.h" #endif #include "FileRecordBDB.h" #include "DelegationStore.h" namespace ARex { DelegationStore::DelegationStore(const std::string& base, DbType db, bool allow_recover): logger_(Arc::Logger::rootLogger, "Delegation Storage") { fstore_ = NULL; expiration_ = 0; maxrecords_ = 0; mtimeout_ = 0; mrec_ = NULL; switch(db) { case DbBerkeley: fstore_ = new FileRecordBDB(base, allow_recover); break; #ifdef HAVE_SQLITE case DbSQLite: fstore_ = new FileRecordSQLite(base, allow_recover); break; #endif default: failure_ = "Unsupported database type requested for delegation storage."; logger_.msg(Arc::ERROR,"%s",failure_); return; }; if(!*fstore_) { failure_ = "Failed to initialize storage. " + fstore_->Error(); logger_.msg(Arc::WARNING,"%s",failure_); if(allow_recover) { // Database creation failed. Try recovery. if(!fstore_->Recover()) { failure_ = "Failed to recover storage. " + fstore_->Error(); logger_.msg(Arc::WARNING,"%s",failure_); logger_.msg(Arc::WARNING,"Wiping and re-creating whole storage"); delete fstore_; fstore_ = NULL; // Full recreation of database. Delete everything. Glib::Dir dir(base); std::string name; while ((name = dir.read_name()) != "") { std::string fullpath(base); fullpath += G_DIR_SEPARATOR_S + name; struct stat st; if (::lstat(fullpath.c_str(), &st) == 0) { if(S_ISDIR(st.st_mode)) { Arc::DirDelete(fullpath.c_str()); } else { Arc::FileDelete(fullpath.c_str()); }; }; }; switch(db) { case DbBerkeley: fstore_ = new FileRecordBDB(base); break; #ifdef HAVE_SQLITE case DbSQLite: fstore_ = new FileRecordSQLite(base); break; #endif default: // Must not happen - already sorted out above. return; }; if(!*fstore_) { // Failure failure_ = "Failed to re-create storage. " + fstore_->Error(); logger_.msg(Arc::WARNING,"%s",failure_); } else { // Database recreated. }; }; } else { logger_.msg(Arc::ERROR,"%s",failure_); }; }; // TODO: Do some cleaning on startup } DelegationStore::~DelegationStore(void) { // BDB objects must be destroyed because // somehow BDB does not understand that process // already died and keeps locks forewer. delete mrec_; delete fstore_; /* Following code is not executed because there must be no active consumers when store being destroyed. It is probably safer to leave hanging consumers than to destroy them. Anyway by design this destructor is supposed to be called only when applications exits. while(acquired_.size() > 0) { std::map::iterator i = acquired_.begin(); delete i->first; acquired_.erase(i); }; */ } Arc::DelegationConsumerSOAP* DelegationStore::AddConsumer(std::string& id,const std::string& client) { std::string path = fstore_->Add(id,client,std::list()); if(path.empty()) { failure_ = "Local error - failed to create slot for delegation. "+fstore_->Error(); return NULL; } Arc::DelegationConsumerSOAP* cs = new Arc::DelegationConsumerSOAP(); std::string key; cs->Backup(key); if(!key.empty()) { if(!Arc::FileCreate(path,key,0,0,S_IRUSR|S_IWUSR)) { fstore_->Remove(id,client); delete cs; cs = NULL; failure_ = "Local error - failed to store credentials"; return NULL; }; }; Glib::Mutex::Lock lock(lock_); acquired_.insert(std::pair(cs,Consumer(id,client,path))); return cs; } static const char* key_start_tag("-----BEGIN RSA PRIVATE KEY-----"); static const char* key_end_tag("-----END RSA PRIVATE KEY-----"); static std::string extract_key(const std::string& proxy) { std::string key; std::string::size_type start = proxy.find(key_start_tag); if(start != std::string::npos) { std::string::size_type end = proxy.find(key_end_tag,start+strlen(key_start_tag)); if(end != std::string::npos) { return proxy.substr(start,end-start+strlen(key_end_tag)); }; }; return ""; } static bool compare_no_newline(const std::string& str1, const std::string& str2) { std::string::size_type p1 = 0; std::string::size_type p2 = 0; for(;;) { if((p1 < str1.length()) && ((str1[p1] == '\r') || (str1[p1] == '\n'))) { ++p1; continue; }; if((p2 < str2.length()) && ((str2[p2] == '\r') || (str2[p2] == '\n'))) { ++p2; continue; }; if(p1 >= str1.length()) break; if(p2 >= str2.length()) break; if(str1[p1] != str2[p2]) break; ++p1; ++p2; }; return ((p1 >= str1.length()) && (p2 >= str2.length())); } Arc::DelegationConsumerSOAP* DelegationStore::FindConsumer(const std::string& id,const std::string& client) { std::list meta; std::string path = fstore_->Find(id,client,meta); if(path.empty()) { failure_ = "Identifier not found for client. "+fstore_->Error(); return NULL; }; std::string content; if(!Arc::FileRead(path,content)) { failure_ = "Local error - failed to read credentials"; return NULL; }; Arc::DelegationConsumerSOAP* cs = new Arc::DelegationConsumerSOAP(); if(!content.empty()) { std::string key = extract_key(content); if(!key.empty()) { cs->Restore(key); }; }; Glib::Mutex::Lock lock(lock_); acquired_.insert(std::pair(cs,Consumer(id,client,path))); return cs; } bool DelegationStore::TouchConsumer(Arc::DelegationConsumerSOAP* c,const std::string& credentials) { if(!c) return false; Glib::Mutex::Lock lock(lock_); std::map::iterator i = acquired_.find(c); if(i == acquired_.end()) { failure_ = "Delegation not found"; return false; }; if(!credentials.empty()) { if(!Arc::FileCreate(i->second.path,credentials,0,0,S_IRUSR|S_IWUSR)) { failure_ = "Local error - failed to create storage for delegation"; logger_.msg(Arc::WARNING,"DelegationStore: TouchConsumer failed to create file %s",i->second.path); return false; }; }; return true; } bool DelegationStore::QueryConsumer(Arc::DelegationConsumerSOAP* c,std::string& credentials) { if(!c) return false; Glib::Mutex::Lock lock(lock_); std::map::iterator i = acquired_.find(c); if(i == acquired_.end()) { failure_ = "Delegation not found"; return false; }; Arc::FileRead(i->second.path,credentials); return true; } void DelegationStore::ReleaseConsumer(Arc::DelegationConsumerSOAP* c) { if(!c) return; Glib::Mutex::Lock lock(lock_); std::map::iterator i = acquired_.find(c); if(i == acquired_.end()) return; // ???? // Check if key changed. If yes then store only key. // TODO: optimize std::string newkey; i->first->Backup(newkey); if(!newkey.empty()) { std::string oldkey; std::string content; Arc::FileRead(i->second.path,content); if(!content.empty()) oldkey = extract_key(content); if(!compare_no_newline(newkey,oldkey)) { Arc::FileCreate(i->second.path,newkey,0,0,S_IRUSR|S_IWUSR); }; }; delete i->first; acquired_.erase(i); } void DelegationStore::RemoveConsumer(Arc::DelegationConsumerSOAP* c) { if(!c) return; Glib::Mutex::Lock lock(lock_); std::map::iterator i = acquired_.find(c); if(i == acquired_.end()) return; // ???? fstore_->Remove(i->second.id,i->second.client); // TODO: Handle failure delete i->first; acquired_.erase(i); } void DelegationStore::CheckConsumers(void) { // Not doing any cleaning ocasionally to avoid delegation response delay. // Instead PeriodicCheckConsumers() is called to do periodic cleaning. } void DelegationStore::PeriodicCheckConsumers(void) { // Go through stored credentials // Remove outdated records (those with locks won't be removed) if(expiration_) { time_t start = ::time(NULL); Glib::Mutex::Lock check_lock(lock_); if(mrec_ != NULL) { if(!mrec_->resume()) { logger_.msg(Arc::WARNING,"DelegationStore: PeriodicCheckConsumers failed to resume iterator"); delete mrec_; mrec_ = NULL; }; }; if(mrec_ == NULL) { mrec_ = fstore_->NewIterator(); }; for(;(bool)(*mrec_);++(*mrec_)) { if(mtimeout_ && (((unsigned int)(::time(NULL) - start)) > mtimeout_)) { mrec_->suspend(); return; } struct stat st; if(::stat(mrec_->path().c_str(),&st) == 0) { if(((unsigned int)(::time(NULL) - st.st_mtime)) > expiration_) { if(fstore_->Remove(mrec_->id(),mrec_->owner())) { } else { // It is ok to fail here because Remove checks for delegation locks. // So reporting only for debuging purposes. logger_.msg(Arc::DEBUG,"DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - %s", mrec_->uid(), fstore_->Error()); }; }; }; }; delete mrec_; mrec_ = NULL; }; // TODO: Remove records over threshold return; } bool DelegationStore::AddCred(std::string& id, const std::string& client, const std::string& credentials) { std::string path = fstore_->Add(id,client,std::list()); if(path.empty()) { failure_ = "Local error - failed to create slot for delegation. "+fstore_->Error(); return false; } if(!Arc::FileCreate(path,credentials,0,0,S_IRUSR|S_IWUSR)) { fstore_->Remove(id,client); failure_ = "Local error - failed to create storage for delegation"; logger_.msg(Arc::WARNING,"DelegationStore: TouchConsumer failed to create file %s",path); return false; }; return true; } bool DelegationStore::PutCred(const std::string& id, const std::string& client, const std::string& credentials) { std::list meta; std::string path = fstore_->Find(id,client,meta); if(path.empty()) { failure_ = "Local error - failed to find specified credentials. "+fstore_->Error(); return false; } if(!Arc::FileCreate(path,credentials,0,0,S_IRUSR|S_IWUSR)) { failure_ = "Local error - failed to store delegation"; return false; }; return true; } std::string DelegationStore::FindCred(const std::string& id,const std::string& client) { std::list meta; return fstore_->Find(id,client,meta); } bool DelegationStore::GetCred(const std::string& id, const std::string& client, std::string& credentials) { std::list meta; std::string path = fstore_->Find(id,client,meta); if(path.empty()) { failure_ = "Local error - failed to find specified credentials. "+fstore_->Error(); return false; } std::string content; if(!Arc::FileRead(path,credentials)) { failure_ = "Local error - failed to read credentials"; return false; }; return true; } bool DelegationStore::GetLocks(const std::string& id, const std::string& client, std::list& lock_ids) { return fstore_->ListLocks(id, client, lock_ids); } bool DelegationStore::GetLocks(std::list& lock_ids) { return fstore_->ListLocks(lock_ids); } std::list DelegationStore::ListCredIDs(const std::string& client) { std::list res; FileRecord::Iterator& rec = *(fstore_->NewIterator()); for(;(bool)rec;++rec) { if(rec.owner() == client) res.push_back(rec.id()); }; delete &rec; return res; } std::list > DelegationStore::ListLockedCredIDs(const std::string& lock_id) { std::list > ids; (void)fstore_->ListLocked(lock_id, ids); return ids; } std::list DelegationStore::ListLockedCredIDs(const std::string& lock_id, const std::string& client) { std::list res; std::list > ids; if(!fstore_->ListLocked(lock_id, ids)) return res; for(std::list >::iterator id = ids.begin(); id != ids.end();++id) { if(id->second == client) res.push_back(id->first); } return res; } std::list > DelegationStore::ListCredIDs(void) { std::list > res; FileRecord::Iterator& rec = *(fstore_->NewIterator()); for(;(bool)rec;++rec) { res.push_back(std::pair(rec.id(),rec.owner())); }; delete &rec; return res; } bool DelegationStore::LockCred(const std::string& lock_id, const std::list& ids,const std::string& client) { if(!fstore_->AddLock(lock_id,ids,client)) { failure_ = "Local error - failed set lock for delegation. "+fstore_->Error(); return false; }; return true; } bool DelegationStore::ReleaseCred(const std::string& lock_id, bool touch, bool remove) { if((!touch) && (!remove)) return fstore_->RemoveLock(lock_id); std::list > ids; if(!fstore_->RemoveLock(lock_id,ids)) return false; for(std::list >::iterator i = ids.begin(); i != ids.end(); ++i) { if(touch) { std::list meta; std::string path = fstore_->Find(i->first,i->second,meta); // TODO: in a future use meta for storing times if(!path.empty()) ::utime(path.c_str(),NULL); }; if(remove) fstore_->Remove(i->first,i->second); }; return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/FileRecordSQLite.cpp0000644000000000000000000000012413107553466026554 xustar000000000000000027 mtime=1495193398.312771 27 atime=1513200576.478725 30 ctime=1513200662.751781141 nordugrid-arc-5.4.2/src/services/a-rex/delegation/FileRecordSQLite.cpp0000644000175000002070000004755413107553466026640 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "uid.h" #include "FileRecordSQLite.h" namespace ARex { #define FR_DB_NAME "list" bool FileRecordSQLite::dberr(const char* s, int err) { if(err == SQLITE_OK) return true; error_num_ = err; #ifdef HAVE_SQLITE3_ERRSTR error_str_ = std::string(s)+": "+sqlite3_errstr(err); #else error_str_ = std::string(s)+": error code "+Arc::tostring(err); #endif return false; } int sqlite3_exec_nobusy(sqlite3* db, const char *sql, int (*callback)(void*,int,char**,char**), void *arg, char **errmsg) { int err; while((err = sqlite3_exec(db, sql, callback, arg, errmsg)) == SQLITE_BUSY) { // Access to database is designed in such way that it should not block for long time. // So it should be safe to simply wait for lock to be released without any timeout. struct timespec delay = { 0, 10000000 }; // 0.01s - should be enough for most cases (void)::nanosleep(&delay, NULL); }; return err; } FileRecordSQLite::FileRecordSQLite(const std::string& base, bool create): FileRecord(base, create), db_(NULL) { valid_ = open(create); } bool FileRecordSQLite::verify(void) { // Not implemented and probably not needed return true; } FileRecordSQLite::~FileRecordSQLite(void) { close(); } bool FileRecordSQLite::open(bool create) { std::string dbpath = basepath_ + G_DIR_SEPARATOR_S + FR_DB_NAME; if(db_ != NULL) return true; // already open int flags = SQLITE_OPEN_READWRITE; // it will open read-only if access is protected if(create) { flags |= SQLITE_OPEN_CREATE; }; int err; while((err = sqlite3_open_v2(dbpath.c_str(), &db_, flags, NULL)) == SQLITE_BUSY) { // In case something prevents databasre from open right now - retry if(db_) (void)sqlite3_close(db_); db_ = NULL; struct timespec delay = { 0, 10000000 }; // 0.01s - should be enough for most cases (void)::nanosleep(&delay, NULL); }; if(!dberr("Error opening database", err)) { if(db_) (void)sqlite3_close(db_); db_ = NULL; return false; }; if(create) { if(!dberr("Error creating table rec", sqlite3_exec_nobusy(db_, "CREATE TABLE IF NOT EXISTS rec(id, owner, uid, meta, UNIQUE(id, owner), UNIQUE(uid))", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; if(!dberr("Error creating table lock", sqlite3_exec_nobusy(db_, "CREATE TABLE IF NOT EXISTS lock(lockid, uid)", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; if(!dberr("Error creating index lockid", sqlite3_exec_nobusy(db_, "CREATE INDEX IF NOT EXISTS lockid ON lock (lockid)", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; if(!dberr("Error creating index uid", sqlite3_exec_nobusy(db_, "CREATE INDEX IF NOT EXISTS uid ON lock (uid)", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; } else { // SQLite opens database in lazy way. But we still want to know if it is good database. if(!dberr("Error checking database", sqlite3_exec_nobusy(db_, "PRAGMA schema_version;", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; }; return true; } void FileRecordSQLite::close(void) { valid_ = false; if(db_) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; }; } static const std::string sql_special_chars("'#\r\n\b\0",6); static const char sql_escape_char('%'); static const Arc::escape_type sql_escape_type(Arc::escape_hex); inline static std::string sql_escape(const std::string& str) { return Arc::escape_chars(str, sql_special_chars, sql_escape_char, false, sql_escape_type); } inline static std::string sql_unescape(const std::string& str) { return Arc::unescape_chars(str, sql_escape_char,sql_escape_type); } void store_strings(const std::list& strs, std::string& buf) { if(!strs.empty()) { for(std::list::const_iterator str = strs.begin(); ; ++str) { buf += sql_escape(*str); if (str == strs.end()) break; buf += '#'; }; }; } static void parse_strings(std::list& strs, const char* buf) { if(!buf || (*buf == '\0')) return; const char* sep = std::strchr(buf, '#'); while(sep) { strs.push_back(sql_unescape(std::string(buf,sep-buf))); buf = sep+1; sep = std::strchr(buf, '#'); }; } bool FileRecordSQLite::Recover(void) { Glib::Mutex::Lock lock(lock_); // Real recovery not implemented yet. close(); error_num_ = -1; error_str_ = "Recovery not implemented yet."; return false; } struct FindCallbackRecArg { sqlite3_int64 rowid; std::string id; std::string owner; std::string uid; std::list meta; FindCallbackRecArg(): rowid(-1) {}; }; static int FindCallbackRec(void* arg, int colnum, char** texts, char** names) { for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if((strcmp(names[n], "rowid") == 0) || (strcmp(names[n], "_rowid_") == 0)) { (void)Arc::stringto(texts[n], ((FindCallbackRecArg*)arg)->rowid); } else if(strcmp(names[n], "uid") == 0) { ((FindCallbackRecArg*)arg)->uid = texts[n]; } else if(strcmp(names[n], "id") == 0) { ((FindCallbackRecArg*)arg)->id = sql_unescape(texts[n]); } else if(strcmp(names[n], "owner") == 0) { ((FindCallbackRecArg*)arg)->owner = sql_unescape(texts[n]); } else if(strcmp(names[n], "meta") == 0) { parse_strings(((FindCallbackRecArg*)arg)->meta, texts[n]); }; }; }; return 0; } struct FindCallbackUidMetaArg { std::string& uid; std::list& meta; FindCallbackUidMetaArg(std::string& uid, std::list& meta): uid(uid), meta(meta) {}; }; static int FindCallbackUidMeta(void* arg, int colnum, char** texts, char** names) { for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "uid") == 0) { ((FindCallbackUidMetaArg*)arg)->uid = texts[n]; } else if(strcmp(names[n], "meta") == 0) { parse_strings(((FindCallbackUidMetaArg*)arg)->meta, texts[n]); }; }; }; return 0; } struct FindCallbackUidArg { std::string& uid; FindCallbackUidArg(std::string& uid): uid(uid) {}; }; static int FindCallbackUid(void* arg, int colnum, char** texts, char** names) { for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "uid") == 0) { ((FindCallbackUidMetaArg*)arg)->uid = texts[n]; }; }; }; return 0; } struct FindCallbackCountArg { int count; FindCallbackCountArg():count(0) {}; }; static int FindCallbackCount(void* arg, int colnum, char** texts, char** names) { ((FindCallbackCountArg*)arg)->count += 1; return 0; } struct FindCallbackIdOwnerArg { std::list< std::pair >& records; FindCallbackIdOwnerArg(std::list< std::pair >& recs): records(recs) {}; }; static int FindCallbackIdOwner(void* arg, int colnum, char** texts, char** names) { std::pair rec; for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "id") == 0) { rec.first = sql_unescape(texts[n]); } else if(strcmp(names[n], "owner") == 0) { rec.second = sql_unescape(texts[n]); }; }; }; if(!rec.first.empty()) ((FindCallbackIdOwnerArg*)arg)->records.push_back(rec); return 0; } struct FindCallbackLockArg { std::list< std::string >& records; FindCallbackLockArg(std::list< std::string >& recs): records(recs) {}; }; static int FindCallbackLock(void* arg, int colnum, char** texts, char** names) { for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "lockid") == 0) { std::string rec = sql_unescape(texts[n]); if(!rec.empty()) ((FindCallbackLockArg*)arg)->records.push_back(rec); }; }; }; return 0; } std::string FileRecordSQLite::Add(std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return ""; int uidtries = 10; // some sane number std::string uid; while(true) { if(!(uidtries--)) { error_str_ = "Out of tries adding record to database"; return ""; }; Glib::Mutex::Lock lock(lock_); uid = rand_uid64().substr(4); std::string metas; store_strings(meta, metas); std::string sqlcmd = "INSERT INTO rec(id, owner, uid, meta) VALUES ('"+ sql_escape(id.empty()?uid:id)+"', '"+ sql_escape(owner)+"', '"+uid+"', '"+metas+"')"; int dbres = sqlite3_exec_nobusy(db_, sqlcmd.c_str(), NULL, NULL, NULL); if(dbres == SQLITE_CONSTRAINT) { // retry due to non-unique id uid.resize(0); continue; }; if(!dberr("Failed to add record to database", dbres)) { return ""; }; if(sqlite3_changes(db_) != 1) { error_str_ = "Failed to add record to database"; return ""; }; break; }; if(id.empty()) id = uid; make_file(uid); return uid_to_path(uid); } bool FileRecordSQLite::Add(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); std::string metas; store_strings(meta, metas); std::string sqlcmd = "INSERT INTO rec(id, owner, uid, meta) VALUES ('"+ sql_escape(id.empty()?uid:id)+"', '"+ sql_escape(owner)+"', '"+uid+"', '"+metas+"')"; int dbres = sqlite3_exec_nobusy(db_, sqlcmd.c_str(), NULL, NULL, NULL); if(!dberr("Failed to add record to database", dbres)) { return false; }; if(sqlite3_changes(db_) != 1) { error_str_ = "Failed to add record to database"; return false; }; return true; } std::string FileRecordSQLite::Find(const std::string& id, const std::string& owner, std::list& meta) { if(!valid_) return ""; Glib::Mutex::Lock lock(lock_); std::string sqlcmd = "SELECT uid, meta FROM rec WHERE ((id = '"+sql_escape(id)+"') AND (owner = '"+sql_escape(owner)+"'))"; std::string uid; FindCallbackUidMetaArg arg(uid, meta); if(!dberr("Failed to retrieve record from database",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackUidMeta, &arg, NULL))) { return ""; }; if(uid.empty()) { error_str_ = "Failed to retrieve record from database"; return ""; }; return uid_to_path(uid); } bool FileRecordSQLite::Modify(const std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); std::string metas; store_strings(meta, metas); std::string sqlcmd = "UPDATE rec SET meta = '"+metas+"' WHERE ((id = '"+sql_escape(id)+"') AND (owner = '"+sql_escape(owner)+"'))"; if(!dberr("Failed to update record in database",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; if(sqlite3_changes(db_) < 1) { error_str_ = "Failed to find record in database"; return false; }; return true; } bool FileRecordSQLite::Remove(const std::string& id, const std::string& owner) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); std::string uid; { std::string sqlcmd = "SELECT uid FROM rec WHERE ((id = '"+sql_escape(id)+"') AND (owner = '"+sql_escape(owner)+"'))"; FindCallbackUidArg arg(uid); if(!dberr("Failed to retrieve record from database",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackUid, &arg, NULL))) { return false; // No such record? }; }; if(uid.empty()) { error_str_ = "Record not found"; return false; // No such record }; { std::string sqlcmd = "SELECT uid FROM lock WHERE (uid = '"+uid+"')"; FindCallbackCountArg arg; if(!dberr("Failed to find locks in database",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackCount, &arg, NULL))) { return false; }; if(arg.count > 0) { error_str_ = "Record has active locks"; return false; // have locks }; }; { std::string sqlcmd = "DELETE FROM rec WHERE (uid = '"+uid+"')"; if(!dberr("Failed to delete record in database",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; if(sqlite3_changes(db_) < 1) { error_str_ = "Failed to delete record in database"; return false; // no such record }; }; remove_file(uid); return true; } bool FileRecordSQLite::AddLock(const std::string& lock_id, const std::list& ids, const std::string& owner) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); for(std::list::const_iterator id = ids.begin(); id != ids.end(); ++id) { std::string uid; { std::string sqlcmd = "SELECT uid FROM rec WHERE ((id = '"+sql_escape(*id)+"') AND (owner = '"+sql_escape(owner)+"'))"; FindCallbackUidArg arg(uid); if(!dberr("Failed to retrieve record from database",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackUid, &arg, NULL))) { return false; // No such record? }; }; if(uid.empty()) { // No such record continue; }; std::string sqlcmd = "INSERT INTO lock(lockid, uid) VALUES ('"+sql_escape(lock_id)+"','"+uid+"')"; if(!dberr("addlock:put",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; }; return true; } bool FileRecordSQLite::RemoveLock(const std::string& lock_id) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); // map lock to id,owner { std::string sqlcmd = "DELETE FROM lock WHERE (lockid = '"+sql_escape(lock_id)+"')"; if(!dberr("removelock:del",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; if(sqlite3_changes(db_) < 1) { error_str_ = ""; return false; }; }; return true; } bool FileRecordSQLite::RemoveLock(const std::string& lock_id, std::list >& ids) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); // map lock to id,owner { std::string sqlcmd = "SELECT id,owner FROM rec WHERE uid IN SELECT uid FROM lock WHERE (lockid = '"+sql_escape(lock_id)+"')"; FindCallbackIdOwnerArg arg(ids); if(!dberr("removelock:get",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackIdOwner, &arg, NULL))) { //return false; }; }; { std::string sqlcmd = "DELETE FROM lock WHERE (lockid = '"+sql_escape(lock_id)+"')"; if(!dberr("removelock:del",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; if(sqlite3_changes(db_) < 1) { error_str_ = ""; return false; }; }; return true; } bool FileRecordSQLite::ListLocked(const std::string& lock_id, std::list >& ids) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); // map lock to id,owner { std::string sqlcmd = "SELECT id,owner FROM rec WHERE uid IN SELECT uid FROM lock WHERE (lockid = '"+sql_escape(lock_id)+"')"; FindCallbackIdOwnerArg arg(ids); if(!dberr("listlocked:get",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackIdOwner, &arg, NULL))) { return false; }; }; //if(ids.empty()) return false; return true; } bool FileRecordSQLite::ListLocks(std::list& locks) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); { std::string sqlcmd = "SELECT lockid FROM lock"; FindCallbackLockArg arg(locks); if(!dberr("listlocks:get",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackLock, &arg, NULL))) { return false; }; }; return true; } bool FileRecordSQLite::ListLocks(const std::string& id, const std::string& owner, std::list& locks) { if(!valid_) return false; Glib::Mutex::Lock lock(lock_); std::string uid; { std::string sqlcmd = "SELECT uid FROM rec WHERE ((id = '"+sql_escape(id)+"') AND (owner = '"+sql_escape(owner)+"'))"; FindCallbackUidArg arg(uid); if(!dberr("Failed to retrieve record from database",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackUid, &arg, NULL))) { return false; // No such record? }; }; if(uid.empty()) { error_str_ = "Record not found"; return false; // No such record }; { std::string sqlcmd = "SELECT lockid FROM lock WHERE (uid = '"+uid+"')"; FindCallbackLockArg arg(locks); if(!dberr("listlocks:get",sqlite3_exec_nobusy(db_, sqlcmd.c_str(), &FindCallbackLock, &arg, NULL))) { return false; }; }; return true; } FileRecordSQLite::Iterator::Iterator(FileRecordSQLite& frec):FileRecord::Iterator(frec) { rowid_ = -1; Glib::Mutex::Lock lock(frec.lock_); { std::string sqlcmd = "SELECT _rowid_,id,owner,uid,meta FROM rec ORDER BY _rowid_ LIMIT 1"; FindCallbackRecArg arg; if(!frec.dberr("listlocks:get",sqlite3_exec_nobusy(frec.db_, sqlcmd.c_str(), &FindCallbackRec, &arg, NULL))) { return; }; if(arg.uid.empty()) { return; }; uid_ = arg.uid; id_ = arg.id; owner_ = arg.owner; meta_ = arg.meta; rowid_ = arg.rowid; }; } FileRecordSQLite::Iterator::~Iterator(void) { } FileRecordSQLite::Iterator& FileRecordSQLite::Iterator::operator++(void) { if(rowid_ == -1) return *this; FileRecordSQLite& frec((FileRecordSQLite&)frec_); Glib::Mutex::Lock lock(frec.lock_); { std::string sqlcmd = "SELECT _rowid_,id,owner,uid,meta FROM rec WHERE (_rowid_ > " + Arc::tostring(rowid_) + ") ORDER BY _rowid_ ASC LIMIT 1"; FindCallbackRecArg arg; if(!frec.dberr("listlocks:get",sqlite3_exec_nobusy(frec.db_, sqlcmd.c_str(), &FindCallbackRec, &arg, NULL))) { rowid_ = -1; return *this; }; if(arg.uid.empty()) { rowid_ = -1; return *this; }; uid_ = arg.uid; id_ = arg.id; owner_ = arg.owner; meta_ = arg.meta; rowid_ = arg.rowid; }; return *this; } FileRecordSQLite::Iterator& FileRecordSQLite::Iterator::operator--(void) { if(rowid_ == -1) return *this; FileRecordSQLite& frec((FileRecordSQLite&)frec_); Glib::Mutex::Lock lock(frec.lock_); { std::string sqlcmd = "SELECT _rowid_,id,owner,uid,meta FROM rec WHERE (_rowid_ < " + Arc::tostring(rowid_) + ") ORDER BY _rowid_ DESC LIMIT 1"; FindCallbackRecArg arg; if(!frec.dberr("listlocks:get",sqlite3_exec_nobusy(frec.db_, sqlcmd.c_str(), &FindCallbackRec, &arg, NULL))) { rowid_ = -1; return *this; }; if(arg.uid.empty()) { rowid_ = -1; return *this; }; uid_ = arg.uid; id_ = arg.id; owner_ = arg.owner; meta_ = arg.meta; rowid_ = arg.rowid; }; return *this; } void FileRecordSQLite::Iterator::suspend(void) { } bool FileRecordSQLite::Iterator::resume(void) { return true; } } // namespace ARex nordugrid-arc-5.4.2/src/services/a-rex/delegation/PaxHeaders.7502/FileRecordBDB.h0000644000000000000000000000012312754411443025441 xustar000000000000000026 mtime=1471288099.46513 27 atime=1513200576.486726 30 ctime=1513200662.749781117 nordugrid-arc-5.4.2/src/services/a-rex/delegation/FileRecordBDB.h0000644000175000002070000000563212754411443025515 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_FILERECORDBDB_H__ #define __ARC_DELEGATION_FILERECORDBDB_H__ #include #include #include #include #include "FileRecord.h" namespace ARex { class FileRecordBDB: public FileRecord { private: Glib::Mutex lock_; // TODO: use DB locking DbEnv* db_env_; Db* db_rec_; Db* db_lock_; Db* db_locked_; Db* db_link_; static int locked_callback(Db *, const Dbt *, const Dbt *, Dbt * result); static int lock_callback(Db *, const Dbt *, const Dbt *, Dbt * result); bool dberr(const char* s, int err); bool open(bool create); void close(void); bool verify(void); public: class Iterator: public FileRecord::Iterator { friend class FileRecordBDB; private: Dbc* cur_; Iterator(const Iterator&); // disabled constructor Iterator(FileRecordBDB& frec); public: ~Iterator(void); virtual Iterator& operator++(void); virtual Iterator& operator--(void); virtual void suspend(void); virtual bool resume(void); virtual operator bool(void) { return (cur_!=NULL); }; virtual bool operator!(void) { return (cur_==NULL); }; }; friend class FileRecordBDB::Iterator; FileRecordBDB(const std::string& base, bool create = true); virtual ~FileRecordBDB(void); virtual Iterator* NewIterator(void) { return new Iterator(*this); }; virtual bool Recover(void); virtual std::string Add(std::string& id, const std::string& owner, const std::list& meta); virtual bool Add(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta); virtual std::string Find(const std::string& id, const std::string& owner, std::list& meta); virtual bool Modify(const std::string& id, const std::string& owner, const std::list& meta); virtual bool Remove(const std::string& id, const std::string& owner); // Assign specified credential ids specified lock lock_id virtual bool AddLock(const std::string& lock_id, const std::list& ids, const std::string& owner); // Reomove lock lock_id from all associated credentials virtual bool RemoveLock(const std::string& lock_id); // Reomove lock lock_id from all associated credentials and store // identifiers of associated credentials into ids virtual bool RemoveLock(const std::string& lock_id, std::list >& ids); // Fills locks with all known lock ids. virtual bool ListLocks(std::list& locks); // Fills locks with all lock ids associated with specified credential id virtual bool ListLocks(const std::string& id, const std::string& owner, std::list& locks); // Fills ids with identifiers of credentials locked by specified lock_id lock virtual bool ListLocked(const std::string& lock_id, std::list >& ids); }; } // namespace ARex #endif // __ARC_DELEGATION_FiLERECORDBDB_H__ nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/README0000644000000000000000000000012411037472457021515 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200576.302723 30 ctime=1513200662.673780187 nordugrid-arc-5.4.2/src/services/a-rex/README0000644000175000002070000000030011037472457021553 0ustar00mockbuildmock00000000000000BES/JSDL based Grid job execution service - A-Rex. For details, see "The ARC Computational Job Management Module - A-REX", A.Konstantinov [NORDUGRID-TECH-14], available via www.nordugrid.org nordugrid-arc-5.4.2/src/services/a-rex/PaxHeaders.7502/PayloadFile.cpp0000644000000000000000000000012413153453672023531 xustar000000000000000027 mtime=1504597946.546683 27 atime=1513200576.401725 30 ctime=1513200662.698780493 nordugrid-arc-5.4.2/src/services/a-rex/PayloadFile.cpp0000644000175000002070000001461013153453672023600 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #ifndef WIN32 #include #endif #include #include #include "PayloadFile.h" namespace ARex { PayloadBigFile::Size_t PayloadBigFile::threshold_ = 1024*1024*10; // 10MB by default PayloadFile::PayloadFile(const char* filename,Size_t start,Size_t end) { handle_=::open(filename,O_RDONLY); SetRead(handle_,start,end); } PayloadFile::PayloadFile(int h,Size_t start,Size_t end) { SetRead(h,start,end); } void PayloadFile::SetRead(int h,Size_t start,Size_t end) { handle_=h; start_=start; end_=end; addr_=(char*)MAP_FAILED; size_=0; if(handle_ == -1) return; struct stat st; if(fstat(handle_,&st) != 0) goto error; size_=st.st_size; if((end_ == ((off_t)-1)) || (end_ > size_)) { end_=size_; } if(start_ >= size_) { start_=size_; end_=start_; return; } #ifndef WIN32 if(size_ > 0) { addr_=(char*)mmap(NULL,size_,PROT_READ,MAP_SHARED,handle_,0); if(addr_ == (char*)MAP_FAILED) goto error; } #else goto error; #endif return; error: perror("PayloadFile"); if(handle_ != -1) ::close(handle_); handle_=-1; size_=0; addr_=(char*)MAP_FAILED; return; } PayloadFile::~PayloadFile(void) { #ifndef WIN32 if(addr_ != (char*)MAP_FAILED) munmap(addr_,size_); #endif if(handle_ != -1) ::close(handle_); handle_=-1; size_=0; addr_=(char*)MAP_FAILED; return; } char* PayloadFile::Content(Size_t pos) { if(handle_ == -1) return NULL; if(addr_ == (char*)MAP_FAILED) return NULL; if(pos >= end_) return NULL; if(pos < start_) return NULL; return (addr_+pos); } char PayloadFile::operator[](Size_t pos) const { if(handle_ == -1) return 0; if(addr_ == (char*)MAP_FAILED) return 0; if(pos >= end_) return 0; if(pos < start_) return 0; return addr_[pos]; } PayloadFile::Size_t PayloadFile::Size(void) const { return size_; } char* PayloadFile::Insert(Size_t /*pos*/,Size_t /*size*/) { // Not supported return NULL; } char* PayloadFile::Insert(const char*,Size_t /*pos*/,Size_t /*size*/) { // Not supported return NULL; } char* PayloadFile::Buffer(unsigned int num) { if(handle_ == -1) return NULL; if(num>0) return NULL; if(addr_ == (char*)MAP_FAILED) return NULL; return addr_+start_; } PayloadFile::Size_t PayloadFile::BufferSize(unsigned int num) const { if(handle_ == -1) return 0; if(num>0) return 0; return (end_-start_); } PayloadFile::Size_t PayloadFile::BufferPos(unsigned int num) const { if(num == 0) return start_; return end_; } bool PayloadFile::Truncate(Size_t /*size*/) { // Not supported return false; } static int open_file_read(const char* filename) { return ::open(filename,O_RDONLY); } //static int open_file_write(const char* filename) { // return ::open(filename,O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR); //} PayloadBigFile::PayloadBigFile(int h,Size_t start,Size_t end): PayloadStream(h) { seekable_ = false; if(handle_ == -1) return; ::lseek(handle_,start,SEEK_SET); limit_ = end; } PayloadBigFile::PayloadBigFile(const char* filename,Size_t start,Size_t end): PayloadStream(open_file_read(filename)) { seekable_ = false; if(handle_ == -1) return; ::lseek(handle_,start,SEEK_SET); limit_ = end; } //PayloadBigFile::PayloadBigFile(const char* filename,Size_t size): // PayloadStream(open_file_write(filename)){ // seekable_ = false; //} PayloadBigFile::~PayloadBigFile(void) { if(handle_ != -1) ::close(handle_); } Arc::PayloadStream::Size_t PayloadBigFile::Pos(void) const { if(handle_ == -1) return 0; return ::lseek(handle_,0,SEEK_CUR); } Arc::PayloadStream::Size_t PayloadBigFile::Size(void) const { if(handle_ == -1) return 0; struct stat st; if(fstat(handle_,&st) != 0) return 0; return st.st_size; } Arc::PayloadStream::Size_t PayloadBigFile::Limit(void) const { Size_t s = Size(); if((limit_ == (off_t)(-1)) || (limit_ > s)) return s; return limit_; } bool PayloadBigFile::Get(char* buf,int& size) { if(handle_ == -1) return false; if(limit_ == (off_t)(-1)) return PayloadStream::Get(buf,size); Size_t cpos = Pos(); if(cpos >= limit_) { size=0; return false; } if((cpos+size) > limit_) size=limit_-cpos; return PayloadStream::Get(buf,size); } PayloadFAFile::PayloadFAFile(Arc::FileAccess* h,Size_t start,Size_t end) { handle_ = h; if(handle_ == NULL) return; handle_->fa_lseek(start,SEEK_SET); limit_ = end; } PayloadFAFile::~PayloadFAFile(void) { if(handle_ != NULL) { handle_->fa_close(); Arc::FileAccess::Release(handle_); }; } Arc::PayloadStream::Size_t PayloadFAFile::Pos(void) const { if(handle_ == NULL) return 0; return handle_->fa_lseek(0,SEEK_CUR); } Arc::PayloadStream::Size_t PayloadFAFile::Size(void) const { if(handle_ == NULL) return 0; struct stat st; if(!handle_->fa_fstat(st)) return 0; return st.st_size; } Arc::PayloadStream::Size_t PayloadFAFile::Limit(void) const { Size_t s = Size(); if((limit_ == (off_t)(-1)) || (limit_ > s)) return s; return limit_; } bool PayloadFAFile::Get(char* buf,int& size) { if(handle_ == NULL) return false; if(limit_ != (off_t)(-1)) { Size_t cpos = Pos(); if(cpos >= limit_) { size=0; return false; } if((cpos+size) > limit_) size=limit_-cpos; }; ssize_t l = handle_->fa_read(buf,size); if(l <= 0) { size=0; return false; } size = (int)l; return true; } Arc::MessagePayload* newFileRead(const char* filename,Arc::PayloadRawInterface::Size_t start,Arc::PayloadRawInterface::Size_t end) { int h = open_file_read(filename); return newFileRead(h,start,end); } Arc::MessagePayload* newFileRead(int h,Arc::PayloadRawInterface::Size_t start,Arc::PayloadRawInterface::Size_t end) { #ifndef WIN32 struct stat st; if(fstat(h,&st) != 0) return NULL; if(st.st_size > PayloadBigFile::Threshold()) { PayloadBigFile* f = new PayloadBigFile(h,start,end); if(!*f) { delete f; return NULL; }; return f; } PayloadFile* f = new PayloadFile(h,start,end); if(!*f) { delete f; return NULL; }; return f; #else PayloadBigFile* f = new PayloadBigFile(h,start,end); if(!*f) { delete f; return NULL; }; return f; #endif } Arc::MessagePayload* newFileRead(Arc::FileAccess* h,Arc::PayloadRawInterface::Size_t start,Arc::PayloadRawInterface::Size_t end) { PayloadFAFile* f = new PayloadFAFile(h,start,end); return f; } } // namespace ARex nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/gridftpd0000644000000000000000000000013213214316026021332 xustar000000000000000030 mtime=1513200662.491777961 30 atime=1513200668.718854121 30 ctime=1513200662.491777961 nordugrid-arc-5.4.2/src/services/gridftpd/0000755000175000002070000000000013214316026021455 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712754431715023462 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200604.179064774 30 ctime=1513200662.465777643 nordugrid-arc-5.4.2/src/services/gridftpd/Makefile.am0000644000175000002070000000327012754431715023526 0ustar00mockbuildmock00000000000000sbin_PROGRAMS = gridftpd noinst_LTLIBRARIES = libgridftpd.la if SYSV_SCRIPTS_ENABLED GRIDFTPD_SCRIPT = gridftpd.init else GRIDFTPD_SCRIPT = endif if SYSV_SCRIPTS_ENABLED # Not using initd_SCRIPTS due to rename. install-data-local: $(MKDIR_P) "$(DESTDIR)$(initddir)" $(INSTALL_SCRIPT) gridftpd.init $(DESTDIR)$(initddir)/gridftpd uninstall-local: rm -f $(DESTDIR)$(initddir)/gridftpd endif if SYSTEMD_UNITS_ENABLED GRIDFTPD_UNIT = gridftpd.service GRIDFTPD_UNIT_WRAPPER = gridftpd-start else GRIDFTPD_UNIT = GRIDFTPD_UNIT_WRAPPER = endif units_DATA = $(GRIDFTPD_UNIT) pkgdata_SCRIPTS = $(GRIDFTPD_UNIT_WRAPPER) libgridftpd_la_SOURCES = userspec.cpp names.cpp misc.cpp libgridftpd_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLOBUS_FTP_CLIENT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) gridftpd_SOURCES = commands.cpp config.cpp fileroot.cpp listener.cpp \ dataread.cpp datawrite.cpp datalist.cpp fileroot_config.cpp \ commands.h conf.h fileroot.h misc.h names.h userspec.h gridftpd_CXXFLAGS = -I$(top_srcdir)/include \ $(GLOBUS_FTP_CLIENT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) gridftpd_LDADD = libgridftpd.la conf/libconf.la run/librun.la \ misc/libmisc.la auth/libauth.la auth/libmap.la \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLOBUS_FTP_CONTROL_LIBS) $(GLOBUS_GSS_ASSIST_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_IO_LIBS) \ $(GLOBUS_GSI_CERT_UTILS_LIBS) $(GLOBUS_GSI_CREDENTIAL_LIBS) \ $(GLOBUS_OPENSSL_MODULE_LIBS) $(GLOBUS_COMMON_LIBS) gridftpd_LDFLAGS = -rdynamic SUBDIRS = misc conf run auth . fileplugin DIST_SUBDIRS = misc conf run auth . fileplugin man_MANS = gridftpd.8 nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/dataread.cpp0000644000000000000000000000012411741502232023660 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.526714 30 ctime=1513200662.478777802 nordugrid-arc-5.4.2/src/services/gridftpd/dataread.cpp0000644000175000002070000001600511741502232023727 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "fileroot.h" #include "names.h" #include "commands.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"GridFTP_Commands"); /* file retrieve callbacks */ void GridFTP_Commands::data_connect_retrieve_callback(void* arg,globus_ftp_control_handle_t*,unsigned int /* stripendx */,globus_bool_t /* reused */,globus_object_t *error) { GridFTP_Commands *it = (GridFTP_Commands*)arg; logger.msg(Arc::VERBOSE, "data_connect_retrieve_callback"); globus_thread_blocking_will_block(); globus_mutex_lock(&(it->data_lock)); it->time_spent_disc=0; it->time_spent_network=0; it->last_action_time=time(NULL); logger.msg(Arc::VERBOSE, "Data channel connected (retrieve)"); if(it->check_abort(error)) { it->froot.close(false); globus_mutex_unlock(&(it->data_lock)); return; }; it->data_eof = false; /* make buffers */ logger.msg(Arc::VERBOSE, "data_connect_retrieve_callback: allocate_data_buffer"); it->compute_data_buffer(); if(!(it->allocate_data_buffer())) { logger.msg(Arc::ERROR, "data_connect_retrieve_callback: allocate_data_buffer failed"); it->froot.close(false); it->force_abort(); globus_mutex_unlock(&(it->data_lock)); return; }; /* fill and register all available buffers */ it->data_callbacks=0; it->data_offset=0; for(unsigned int i = 0;idata_buffer_num;i++) { logger.msg(Arc::VERBOSE, "data_connect_retrieve_callback: check for buffer %u", i); if(!((it->data_buffer)[i].data)) continue; /* read data from file */ unsigned long long size = it->data_buffer_size; if(it->virt_restrict) { if((it->data_offset + size) > it->virt_size) size=it->virt_size-it->data_offset; }; struct timezone tz; gettimeofday(&(it->data_buffer[i].time_last),&tz); int fres=it->froot.read(it->data_buffer[i].data, (it->virt_offset)+(it->data_offset),&size); if(fres != 0) { logger.msg(Arc::ERROR, "Closing channel (retrieve) due to local read error :%s", it->froot.error); it->force_abort(); it->free_data_buffer();it->froot.close(false); globus_mutex_unlock(&(it->data_lock)); return; }; if(size == 0) it->data_eof=GLOBUS_TRUE; /* register buffer */ globus_result_t res; res=globus_ftp_control_data_write(&(it->handle), (globus_byte_t*)(it->data_buffer[i].data), size,it->data_offset,it->data_eof, &data_retrieve_callback,it); it->data_offset+=size; if(res != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Buffer registration failed"); logger.msg(Arc::ERROR, "Globus error: %s", Arc::GlobusResult(res).str()); it->force_abort(); if(it->data_callbacks==0){it->free_data_buffer();it->froot.close(false);}; globus_mutex_unlock(&(it->data_lock)); return; }; it->data_callbacks++; if(it->data_eof == GLOBUS_TRUE) break; }; globus_mutex_unlock(&(it->data_lock)); return; } void GridFTP_Commands::data_retrieve_callback(void* arg,globus_ftp_control_handle_t*,globus_object_t *error,globus_byte_t *buffer,globus_size_t length,globus_off_t offset,globus_bool_t eof) { logger.msg(Arc::VERBOSE, "data_retrieve_callback"); globus_thread_blocking_will_block(); GridFTP_Commands *it = (GridFTP_Commands*)arg; struct timezone tz; struct timeval tv; gettimeofday(&tv,&tz); globus_mutex_lock(&(it->data_lock)); it->last_action_time=time(NULL); logger.msg(Arc::VERBOSE, "Data channel (retrieve) %i %i %i", (int)offset, (int)length, (int)eof); it->data_callbacks--; if(it->check_abort(error)) { if(it->data_callbacks==0){it->free_data_buffer();it->froot.close(false);}; globus_mutex_unlock(&(it->data_lock)); return; }; if(it->data_eof) { if(it->data_callbacks==0) { logger.msg(Arc::VERBOSE, "Closing channel (retrieve)"); it->free_data_buffer(); it->virt_offset=0; it->virt_restrict=false; it->transfer_mode=false; it->froot.close(); logger.msg(Arc::VERBOSE, "Time spent waiting for network: %.3f ms", (float)(it->time_spent_network/1000.0)); logger.msg(Arc::VERBOSE, "Time spent waiting for disc: %.3f ms", (float)(it->time_spent_disc/1000.0)); it->send_response("226 Requested file transfer completed\r\n"); }; globus_mutex_unlock(&(it->data_lock)); return; }; /* find this buffer */ unsigned int i; for(i = 0;idata_buffer_num;i++) { if((it->data_buffer)[i].data == (unsigned char*)buffer) break; }; if(i >= it->data_buffer_num) { /* lost buffer - probably memory corruption */ logger.msg(Arc::ERROR, "data_retrieve_callback: lost buffer"); it->force_abort(); if(it->data_callbacks==0){it->free_data_buffer();it->froot.close(false);}; globus_mutex_unlock(&(it->data_lock)); return; }; unsigned long long int time_diff = (tv.tv_sec-(it->data_buffer[i].time_last.tv_sec))*1000000+ (tv.tv_usec-(it->data_buffer[i].time_last.tv_usec)); it->time_spent_network+=time_diff; /* read data from file */ unsigned long long size = it->data_buffer_size; if(it->virt_restrict) { if((it->data_offset + size) > it->virt_size) size=it->virt_size-it->data_offset; }; #ifdef __USE_PARALLEL_FILE_ACCESS__ it->data_callbacks++; /* Unlock while reading file, so to allow others to read in parallel. This can speed up read if on striped device/filesystem. */ globus_mutex_unlock(&(it->data_lock)); #endif /* NOTE: it->data_lock is not unlocked here because it->froot.write is not thread safe */ struct timeval tv_last; gettimeofday(&tv_last,&tz); int fres=it->froot.read(it->data_buffer[i].data, (it->virt_offset)+(it->data_offset),&size); #ifdef __USE_PARALLEL_FILE_ACCESS__ globus_mutex_lock(&(it->data_lock)); it->data_callbacks--; #endif gettimeofday(&tv,&tz); time_diff=(tv.tv_sec-tv_last.tv_sec)*1000000+(tv.tv_usec-tv_last.tv_usec); it->time_spent_disc+=time_diff; if((fres != 0) || (!it->transfer_mode) || (it->transfer_abort)) { logger.msg(Arc::ERROR, "Closing channel (retrieve) due to local read error: %s", it->froot.error); it->force_abort(); if(it->data_callbacks==0){it->free_data_buffer();it->froot.close(false);}; globus_mutex_unlock(&(it->data_lock)); return; }; if(size == 0) it->data_eof=true; /* register buffer */ globus_result_t res; res=globus_ftp_control_data_write(&(it->handle), (globus_byte_t*)(it->data_buffer[i].data), size,it->data_offset,it->data_eof, &data_retrieve_callback,it); it->data_offset+=size; if(res != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Buffer registration failed"); logger.msg(Arc::ERROR, "Globus error: %s", Arc::GlobusResult(res).str()); it->force_abort(); if(it->data_callbacks==0){it->free_data_buffer();it->froot.close(false);}; globus_mutex_unlock(&(it->data_lock)); return; }; it->data_callbacks++; globus_mutex_unlock(&(it->data_lock)); return; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/auth0000644000000000000000000000013213214316026022273 xustar000000000000000030 mtime=1513200662.623779575 30 atime=1513200668.718854121 30 ctime=1513200662.623779575 nordugrid-arc-5.4.2/src/services/gridftpd/auth/0000755000175000002070000000000013214316026022416 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612442324223024407 xustar000000000000000027 mtime=1418307731.508846 30 atime=1513200604.273065923 29 ctime=1513200662.59877927 nordugrid-arc-5.4.2/src/services/gridftpd/auth/Makefile.am0000644000175000002070000000231312442324223024451 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libauth.la libaccess.la libmap.la libauth_la_SOURCES = auth.h auth.cpp auth_subject.cpp auth_file.cpp \ auth_ldap.cpp auth_voms.cpp auth_lcas.cpp auth_plugin.cpp libauth_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(GLOBUS_IO_CFLAGS) \ $(LCAS_CFLAGS) $(LCMAPS_CFLAGS) $(AM_CXXFLAGS) libauth_la_LIBADD = ../misc/libmisc.la ../run/librun.la ../conf/libconf.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libaccess_la_SOURCES = identity.cpp identity.h \ identity_dn.cpp identity_dn.h identity_voms.cpp identity_voms.h \ permission.cpp permission.h \ object_access.cpp object_access.h libaccess_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(GLOBUS_IO_CFLAGS) \ $(LCAS_CFLAGS) $(LCMAPS_CFLAGS) $(AM_CXXFLAGS) libaccess_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(XML_LIBS) libmap_la_SOURCES = unixmap.h unixmap.cpp unixmap_lcmaps.cpp \ simplemap.h simplemap.cpp libmap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(GLOBUS_IO_CFLAGS) \ $(LCAS_CFLAGS) $(LCMAPS_CFLAGS) $(AM_CXXFLAGS) libmap_la_LIBADD = libauth.la $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/simplemap.h0000644000000000000000000000012411611503620024506 xustar000000000000000027 mtime=1311147920.204773 27 atime=1513200575.542714 30 ctime=1513200662.622779563 nordugrid-arc-5.4.2/src/services/gridftpd/auth/simplemap.h0000644000175000002070000000060411611503620024553 0ustar00mockbuildmock00000000000000#include #define SELFUNMAP_TIME (10*24*60*60) class SimpleMap { private: std::string dir_; int pool_handle_; public: SimpleMap(const char* dir); ~SimpleMap(void); std::string map(const char* subject); bool unmap(const char* subject); operator bool(void) const { return (pool_handle_ != -1); }; bool operator!(void) const { return (pool_handle_ == -1); }; }; nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/identity_dn.cpp0000644000000000000000000000012411741502232025366 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.549714 30 ctime=1513200662.603779331 nordugrid-arc-5.4.2/src/services/gridftpd/auth/identity_dn.cpp0000644000175000002070000000132111741502232025430 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "identity_dn.h" IdentityItemDN::IdentityItemDN(const char* dn):dn_(dn) { type_="dn"; } IdentityItemDN::~IdentityItemDN(void) { } Identity::Item* IdentityItemDN::duplicate(void) const { return new IdentityItemDN(dn_.c_str()); } const std::string& IdentityItemDN::name(unsigned int n) { if(n>0) return empty_; return type_; } const std::string& IdentityItemDN::value(unsigned int n) { if(n>0) return empty_; return dn_; } const std::string& IdentityItemDN::value(const char* name,unsigned int /* n */) { std::string name_s = name; if(name_s != "dn") return empty_; return dn_; } std::string IdentityItemDN::str(void) { return dn_; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734024422 xustar000000000000000030 mtime=1513200604.349066853 30 atime=1513200651.226640184 30 ctime=1513200662.599779282 nordugrid-arc-5.4.2/src/services/gridftpd/auth/Makefile.in0000644000175000002070000011762013214315734024477 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/gridftpd/auth DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libaccess_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libaccess_la_OBJECTS = libaccess_la-identity.lo \ libaccess_la-identity_dn.lo libaccess_la-identity_voms.lo \ libaccess_la-permission.lo libaccess_la-object_access.lo libaccess_la_OBJECTS = $(am_libaccess_la_OBJECTS) libaccess_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libaccess_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ libauth_la_DEPENDENCIES = ../misc/libmisc.la ../run/librun.la \ ../conf/libconf.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libauth_la_OBJECTS = libauth_la-auth.lo libauth_la-auth_subject.lo \ libauth_la-auth_file.lo libauth_la-auth_ldap.lo \ libauth_la-auth_voms.lo libauth_la-auth_lcas.lo \ libauth_la-auth_plugin.lo libauth_la_OBJECTS = $(am_libauth_la_OBJECTS) libauth_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libauth_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ libmap_la_DEPENDENCIES = libauth.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libmap_la_OBJECTS = libmap_la-unixmap.lo \ libmap_la-unixmap_lcmaps.lo libmap_la-simplemap.lo libmap_la_OBJECTS = $(am_libmap_la_OBJECTS) libmap_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmap_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccess_la_SOURCES) $(libauth_la_SOURCES) \ $(libmap_la_SOURCES) DIST_SOURCES = $(libaccess_la_SOURCES) $(libauth_la_SOURCES) \ $(libmap_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libauth.la libaccess.la libmap.la libauth_la_SOURCES = auth.h auth.cpp auth_subject.cpp auth_file.cpp \ auth_ldap.cpp auth_voms.cpp auth_lcas.cpp auth_plugin.cpp libauth_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(GLOBUS_IO_CFLAGS) \ $(LCAS_CFLAGS) $(LCMAPS_CFLAGS) $(AM_CXXFLAGS) libauth_la_LIBADD = ../misc/libmisc.la ../run/librun.la ../conf/libconf.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libaccess_la_SOURCES = identity.cpp identity.h \ identity_dn.cpp identity_dn.h identity_voms.cpp identity_voms.h \ permission.cpp permission.h \ object_access.cpp object_access.h libaccess_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(GLOBUS_IO_CFLAGS) \ $(LCAS_CFLAGS) $(LCMAPS_CFLAGS) $(AM_CXXFLAGS) libaccess_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(XML_LIBS) libmap_la_SOURCES = unixmap.h unixmap.cpp unixmap_lcmaps.cpp \ simplemap.h simplemap.cpp libmap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(GLOBUS_IO_CFLAGS) \ $(LCAS_CFLAGS) $(LCMAPS_CFLAGS) $(AM_CXXFLAGS) libmap_la_LIBADD = libauth.la $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/gridftpd/auth/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/gridftpd/auth/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccess.la: $(libaccess_la_OBJECTS) $(libaccess_la_DEPENDENCIES) $(libaccess_la_LINK) $(libaccess_la_OBJECTS) $(libaccess_la_LIBADD) $(LIBS) libauth.la: $(libauth_la_OBJECTS) $(libauth_la_DEPENDENCIES) $(libauth_la_LINK) $(libauth_la_OBJECTS) $(libauth_la_LIBADD) $(LIBS) libmap.la: $(libmap_la_OBJECTS) $(libmap_la_DEPENDENCIES) $(libmap_la_LINK) $(libmap_la_OBJECTS) $(libmap_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccess_la-identity.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccess_la-identity_dn.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccess_la-identity_voms.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccess_la-object_access.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccess_la-permission.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libauth_la-auth.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libauth_la-auth_file.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libauth_la-auth_lcas.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libauth_la-auth_ldap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libauth_la-auth_plugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libauth_la-auth_subject.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libauth_la-auth_voms.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmap_la-simplemap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmap_la-unixmap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmap_la-unixmap_lcmaps.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccess_la-identity.lo: identity.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -MT libaccess_la-identity.lo -MD -MP -MF $(DEPDIR)/libaccess_la-identity.Tpo -c -o libaccess_la-identity.lo `test -f 'identity.cpp' || echo '$(srcdir)/'`identity.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccess_la-identity.Tpo $(DEPDIR)/libaccess_la-identity.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='identity.cpp' object='libaccess_la-identity.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccess_la-identity.lo `test -f 'identity.cpp' || echo '$(srcdir)/'`identity.cpp libaccess_la-identity_dn.lo: identity_dn.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -MT libaccess_la-identity_dn.lo -MD -MP -MF $(DEPDIR)/libaccess_la-identity_dn.Tpo -c -o libaccess_la-identity_dn.lo `test -f 'identity_dn.cpp' || echo '$(srcdir)/'`identity_dn.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccess_la-identity_dn.Tpo $(DEPDIR)/libaccess_la-identity_dn.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='identity_dn.cpp' object='libaccess_la-identity_dn.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccess_la-identity_dn.lo `test -f 'identity_dn.cpp' || echo '$(srcdir)/'`identity_dn.cpp libaccess_la-identity_voms.lo: identity_voms.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -MT libaccess_la-identity_voms.lo -MD -MP -MF $(DEPDIR)/libaccess_la-identity_voms.Tpo -c -o libaccess_la-identity_voms.lo `test -f 'identity_voms.cpp' || echo '$(srcdir)/'`identity_voms.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccess_la-identity_voms.Tpo $(DEPDIR)/libaccess_la-identity_voms.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='identity_voms.cpp' object='libaccess_la-identity_voms.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccess_la-identity_voms.lo `test -f 'identity_voms.cpp' || echo '$(srcdir)/'`identity_voms.cpp libaccess_la-permission.lo: permission.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -MT libaccess_la-permission.lo -MD -MP -MF $(DEPDIR)/libaccess_la-permission.Tpo -c -o libaccess_la-permission.lo `test -f 'permission.cpp' || echo '$(srcdir)/'`permission.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccess_la-permission.Tpo $(DEPDIR)/libaccess_la-permission.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='permission.cpp' object='libaccess_la-permission.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccess_la-permission.lo `test -f 'permission.cpp' || echo '$(srcdir)/'`permission.cpp libaccess_la-object_access.lo: object_access.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -MT libaccess_la-object_access.lo -MD -MP -MF $(DEPDIR)/libaccess_la-object_access.Tpo -c -o libaccess_la-object_access.lo `test -f 'object_access.cpp' || echo '$(srcdir)/'`object_access.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccess_la-object_access.Tpo $(DEPDIR)/libaccess_la-object_access.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='object_access.cpp' object='libaccess_la-object_access.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccess_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccess_la-object_access.lo `test -f 'object_access.cpp' || echo '$(srcdir)/'`object_access.cpp libauth_la-auth.lo: auth.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -MT libauth_la-auth.lo -MD -MP -MF $(DEPDIR)/libauth_la-auth.Tpo -c -o libauth_la-auth.lo `test -f 'auth.cpp' || echo '$(srcdir)/'`auth.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libauth_la-auth.Tpo $(DEPDIR)/libauth_la-auth.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth.cpp' object='libauth_la-auth.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -c -o libauth_la-auth.lo `test -f 'auth.cpp' || echo '$(srcdir)/'`auth.cpp libauth_la-auth_subject.lo: auth_subject.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -MT libauth_la-auth_subject.lo -MD -MP -MF $(DEPDIR)/libauth_la-auth_subject.Tpo -c -o libauth_la-auth_subject.lo `test -f 'auth_subject.cpp' || echo '$(srcdir)/'`auth_subject.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libauth_la-auth_subject.Tpo $(DEPDIR)/libauth_la-auth_subject.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_subject.cpp' object='libauth_la-auth_subject.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -c -o libauth_la-auth_subject.lo `test -f 'auth_subject.cpp' || echo '$(srcdir)/'`auth_subject.cpp libauth_la-auth_file.lo: auth_file.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -MT libauth_la-auth_file.lo -MD -MP -MF $(DEPDIR)/libauth_la-auth_file.Tpo -c -o libauth_la-auth_file.lo `test -f 'auth_file.cpp' || echo '$(srcdir)/'`auth_file.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libauth_la-auth_file.Tpo $(DEPDIR)/libauth_la-auth_file.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_file.cpp' object='libauth_la-auth_file.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -c -o libauth_la-auth_file.lo `test -f 'auth_file.cpp' || echo '$(srcdir)/'`auth_file.cpp libauth_la-auth_ldap.lo: auth_ldap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -MT libauth_la-auth_ldap.lo -MD -MP -MF $(DEPDIR)/libauth_la-auth_ldap.Tpo -c -o libauth_la-auth_ldap.lo `test -f 'auth_ldap.cpp' || echo '$(srcdir)/'`auth_ldap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libauth_la-auth_ldap.Tpo $(DEPDIR)/libauth_la-auth_ldap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_ldap.cpp' object='libauth_la-auth_ldap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -c -o libauth_la-auth_ldap.lo `test -f 'auth_ldap.cpp' || echo '$(srcdir)/'`auth_ldap.cpp libauth_la-auth_voms.lo: auth_voms.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -MT libauth_la-auth_voms.lo -MD -MP -MF $(DEPDIR)/libauth_la-auth_voms.Tpo -c -o libauth_la-auth_voms.lo `test -f 'auth_voms.cpp' || echo '$(srcdir)/'`auth_voms.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libauth_la-auth_voms.Tpo $(DEPDIR)/libauth_la-auth_voms.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_voms.cpp' object='libauth_la-auth_voms.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -c -o libauth_la-auth_voms.lo `test -f 'auth_voms.cpp' || echo '$(srcdir)/'`auth_voms.cpp libauth_la-auth_lcas.lo: auth_lcas.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -MT libauth_la-auth_lcas.lo -MD -MP -MF $(DEPDIR)/libauth_la-auth_lcas.Tpo -c -o libauth_la-auth_lcas.lo `test -f 'auth_lcas.cpp' || echo '$(srcdir)/'`auth_lcas.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libauth_la-auth_lcas.Tpo $(DEPDIR)/libauth_la-auth_lcas.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_lcas.cpp' object='libauth_la-auth_lcas.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -c -o libauth_la-auth_lcas.lo `test -f 'auth_lcas.cpp' || echo '$(srcdir)/'`auth_lcas.cpp libauth_la-auth_plugin.lo: auth_plugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -MT libauth_la-auth_plugin.lo -MD -MP -MF $(DEPDIR)/libauth_la-auth_plugin.Tpo -c -o libauth_la-auth_plugin.lo `test -f 'auth_plugin.cpp' || echo '$(srcdir)/'`auth_plugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libauth_la-auth_plugin.Tpo $(DEPDIR)/libauth_la-auth_plugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_plugin.cpp' object='libauth_la-auth_plugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libauth_la_CXXFLAGS) $(CXXFLAGS) -c -o libauth_la-auth_plugin.lo `test -f 'auth_plugin.cpp' || echo '$(srcdir)/'`auth_plugin.cpp libmap_la-unixmap.lo: unixmap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmap_la_CXXFLAGS) $(CXXFLAGS) -MT libmap_la-unixmap.lo -MD -MP -MF $(DEPDIR)/libmap_la-unixmap.Tpo -c -o libmap_la-unixmap.lo `test -f 'unixmap.cpp' || echo '$(srcdir)/'`unixmap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmap_la-unixmap.Tpo $(DEPDIR)/libmap_la-unixmap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='unixmap.cpp' object='libmap_la-unixmap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmap_la_CXXFLAGS) $(CXXFLAGS) -c -o libmap_la-unixmap.lo `test -f 'unixmap.cpp' || echo '$(srcdir)/'`unixmap.cpp libmap_la-unixmap_lcmaps.lo: unixmap_lcmaps.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmap_la_CXXFLAGS) $(CXXFLAGS) -MT libmap_la-unixmap_lcmaps.lo -MD -MP -MF $(DEPDIR)/libmap_la-unixmap_lcmaps.Tpo -c -o libmap_la-unixmap_lcmaps.lo `test -f 'unixmap_lcmaps.cpp' || echo '$(srcdir)/'`unixmap_lcmaps.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmap_la-unixmap_lcmaps.Tpo $(DEPDIR)/libmap_la-unixmap_lcmaps.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='unixmap_lcmaps.cpp' object='libmap_la-unixmap_lcmaps.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmap_la_CXXFLAGS) $(CXXFLAGS) -c -o libmap_la-unixmap_lcmaps.lo `test -f 'unixmap_lcmaps.cpp' || echo '$(srcdir)/'`unixmap_lcmaps.cpp libmap_la-simplemap.lo: simplemap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmap_la_CXXFLAGS) $(CXXFLAGS) -MT libmap_la-simplemap.lo -MD -MP -MF $(DEPDIR)/libmap_la-simplemap.Tpo -c -o libmap_la-simplemap.lo `test -f 'simplemap.cpp' || echo '$(srcdir)/'`simplemap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmap_la-simplemap.Tpo $(DEPDIR)/libmap_la-simplemap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='simplemap.cpp' object='libmap_la-simplemap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmap_la_CXXFLAGS) $(CXXFLAGS) -c -o libmap_la-simplemap.lo `test -f 'simplemap.cpp' || echo '$(srcdir)/'`simplemap.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/object_access.cpp0000644000000000000000000000012411741502232025643 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.541714 30 ctime=1513200662.609779404 nordugrid-arc-5.4.2/src/services/gridftpd/auth/object_access.cpp0000644000175000002070000000347111741502232025715 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "object_access.h" ObjectAccess::Item ObjectAccess::empty_(NULL,NULL); ObjectAccess::ObjectAccess(void) { } ObjectAccess::ObjectAccess(const ObjectAccess& o) { for(std::list::const_iterator i = o.items_.begin();i!=o.items_.end();++i) { Item* item = (Item*)(&(*i)); Identity* id = item->id(); Permission* perm = item->permission(); if(id && perm) { id = id->duplicate(); perm = perm->duplicate(); if(id && perm) { items_.insert(items_.end(),Item(id,perm)); } else { if(id) delete id; if(perm) delete perm; }; }; }; } ObjectAccess::~ObjectAccess(void) { for(std::list::iterator i = items_.begin();i!=items_.end();++i) { if(i->id()) delete i->id(); if(i->permission()) delete i->permission(); }; } ObjectAccess::Item* ObjectAccess::use(Identity* id,Permission* perm) { if(!id) return NULL; if(!perm) return NULL; return &(*items_.insert(items_.end(),Item(id,perm))); } ObjectAccess::Item* ObjectAccess::add(Identity* id,Permission* perm) { if(!id) return NULL; if(!perm) return NULL; Identity* id_ = id->duplicate(); Permission* perm_ = perm->duplicate(); return use(id_,perm_); } ObjectAccess::Item* ObjectAccess::operator[](unsigned int n) { if(n >= items_.size()) return NULL; std::list::iterator i = items_.begin(); for(;n && (i!=items_.end());--n,++i){}; if(i == items_.end()) return NULL; return &(*i); } ObjectAccess::Item* ObjectAccess::find(Identity* id) { if(id == NULL) return NULL; std::list::iterator i = items_.begin(); for(;i!=items_.end();++i) { Identity* id_ = i->id(); if(id_ == NULL) continue; if((*id_) == (*id)) return &(*i); }; return NULL; } int ObjectAccess::size(void) { return items_.size(); } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/identity_voms.h0000644000000000000000000000012211412417142025414 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.546714 29 ctime=1513200662.60777938 nordugrid-arc-5.4.2/src/services/gridftpd/auth/identity_voms.h0000644000175000002070000000140711412417142025465 0ustar00mockbuildmock00000000000000#include #include "identity.h" class IdentityItemVOMS: public Identity::Item { std::string vo_; std::string voms_; std::string group_; std::string role_; std::string cap_; static std::string vo_name_; static std::string voms_name_; static std::string group_name_; static std::string role_name_; static std::string cap_name_; public: IdentityItemVOMS(const char* vo,const char* voms,const char* group,const char* role,const char* cap); IdentityItemVOMS(const IdentityItemVOMS& v); virtual ~IdentityItemVOMS(void); virtual Identity::Item* duplicate(void) const; virtual const std::string& name(unsigned int n); virtual const std::string& value(unsigned int n); virtual const std::string& value(const char* name,unsigned int n); }; nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/auth.h0000644000000000000000000000012412675602216023474 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.547714 30 ctime=1513200662.611779429 nordugrid-arc-5.4.2/src/services/gridftpd/auth/auth.h0000644000175000002070000001661112675602216023546 0ustar00mockbuildmock00000000000000#ifndef __GM_AUTH_H__ #define __GM_AUTH_H__ #include #include #include #include #include enum AuthResult { AAA_POSITIVE_MATCH = 1, AAA_NEGATIVE_MATCH = -1, AAA_NO_MATCH = 0, AAA_FAILURE = 2 }; class AuthVO; /** VOMS FQAN split into elements */ struct voms_fqan_t { std::string group; // including root group which is always same as VO std::string role; // role associated to group - for each role there is one voms_fqan_t std::string capability; // deprecated but must keep itt void str(std::string& str) const; // convert to string (minimal variation) }; /** VOMS data */ struct voms_t { std::string server; /*!< The VOMS server hostname */ std::string voname; /*!< The name of the VO to which the VOMS belongs */ std::vector fqans; /*!< Processed FQANs of user */ }; class AuthUser { private: typedef AuthResult (AuthUser:: * match_func_t)(const char* line); typedef struct { const char* cmd; match_func_t func; } source_t; class group_t { public: std::string name; // const char* vo; // VO name matched when authorizing this group struct voms_t voms; // VOMS attributes matched when authorizing this group group_t(const char* name_, const char* vo_, const struct voms_t& voms_): name(name_?name_:""),vo(vo_?vo_:""),voms(voms_) { }; }; // VOMS attributes which matched last athorization rule. Also affected by matching group. struct voms_t default_voms_; // Last matched VO name from those defined in [vo]. const char* default_vo_; // Last matched group including groupcfg processing. const char* default_group_; std::string subject; // SN of certificate std::string from; // Remote hostname std::string filename; // Delegated proxy stored in this file bool proxy_file_was_created; // If proxy file was created by this object bool has_delegation; // If proxy contains delegation static source_t sources[]; // Supported evaluation sources AuthResult match_all(const char* line); AuthResult match_group(const char* line); AuthResult match_subject(const char* line); AuthResult match_file(const char* line); AuthResult match_ldap(const char* line); AuthResult match_voms(const char* line); AuthResult match_vo(const char* line); AuthResult match_lcas(const char *); AuthResult match_plugin(const char* line); AuthResult process_voms(void); std::vector voms_data; // VOMS information extracted from proxy bool voms_extracted; std::list groups; // Groups which user matched (internal names) std::list vos; // VOs to which user belongs (external names) bool valid; const group_t* find_group(const char* grp) const { if(grp == NULL) return NULL; for(std::list::const_iterator i=groups.begin();i!=groups.end();++i) { if(i->name == grp) return &(*i); }; return NULL; }; const group_t* find_group(const std::string& grp) const { return find_group(grp.c_str());}; public: AuthUser(const AuthUser&); // Constructor // subject - subject/DN of user // filename - file with (delegated) credentials AuthUser(const char* subject = NULL,const char* filename = NULL); ~AuthUser(void); AuthUser& operator=(const AuthUser&); bool operator!(void) { return !valid; }; operator bool(void) { return valid; }; void set(const char* subject,const char* hostname = NULL); void set(const char* subject,gss_ctx_id_t ctx,gss_cred_id_t cred,const char* hostname = NULL); void set(const char* s,STACK_OF(X509)* cred,const char* hostname = NULL); // Evaluate authentication rules AuthResult evaluate(const char* line); const char* DN(void) const { return subject.c_str(); }; const char* proxy(void) const { return filename.c_str(); }; bool is_proxy(void) const { return has_delegation; }; const char* hostname(void) const { return from.c_str(); }; // Remember this user belongs to group 'grp' void add_group(const char* grp) { groups.push_back(group_t(grp,default_vo_,default_voms_)); }; void add_group(const std::string& grp) { add_group(grp.c_str()); }; // Mark this user as belonging to no groups void clear_groups(void) { groups.clear(); default_group_=NULL; }; // Returns true if user belongs to specified group 'grp' bool check_group(const char* grp) const { if(grp == NULL) return false; for(std::list::const_iterator i=groups.begin();i!=groups.end();++i) { if(i->name == grp) return true; }; return false; }; bool check_group(const std::string& grp) const { return check_group(grp.c_str());}; bool select_group(const char* grp) { default_group_ = NULL; if(grp == NULL) return false; for(std::list::const_iterator i=groups.begin();i!=groups.end();++i) { if(i->name == grp) { default_group_ = i->name.c_str(); return true; }; }; return false; } bool select_group(const std::string& grp) { return select_group(grp.c_str());}; void add_vo(const char* vo) { vos.push_back(std::string(vo)); }; void add_vo(const std::string& vo) { vos.push_back(vo); }; bool add_vo(const char* vo,const char* filename); bool add_vo(const std::string& vo,const std::string& filename); bool add_vo(const AuthVO& vo); bool add_vo(const std::list& vos); void clear_vos(void) { vos.clear(); }; bool check_vo(const char* vo) const { for(std::list::const_iterator i=vos.begin();i!=vos.end();++i) { if(strcmp(i->c_str(),vo) == 0) return true; }; return false; }; bool check_vo(const std::string& vo) const { return check_vo(vo.c_str());}; const struct voms_t& default_voms(void) const { return default_voms_; }; const char* default_vo(void) const { return default_vo_; }; const char* default_group(void) const { return default_group_; }; const struct voms_t* default_group_voms(void) const { const group_t* group = find_group(default_group_); return (group == NULL)?NULL:&(group->voms); }; const char* default_group_vo(void) const { const group_t* group = find_group(default_group_); return (group == NULL)?NULL:group->vo; }; const char* default_subject(void) const { return subject.c_str(); }; // Returns all VOMS attributes associated with user const std::vector& voms(void); // Returns all internal (locally configured) VOs associated with user const std::list& VOs(void) const; // convert ARC VOMS attribute list into voms structure static struct voms_t arc_to_voms(const std::string& vo,const std::vector& attributes); static std::string err_to_string(int err); }; class AuthEvaluator { private: std::list l; std::string name; public: AuthEvaluator(void); AuthEvaluator(const char* name); ~AuthEvaluator(void); void add(const char*); AuthResult evaluate(AuthUser &) const; bool operator==(const char* s) { return (strcmp(name.c_str(),s)==0); }; bool operator==(const std::string& s) const { return (name == s); }; const char* get_name() const { return name.c_str(); }; }; void AuthUserSubst(std::string& str,AuthUser& it); class AuthVO { friend class AuthUser; private: std::string name; std::string file; public: AuthVO(const char* vo,const char* filename):name(vo),file(filename) { }; AuthVO(const std::string& vo,const std::string& filename):name(vo.c_str()),file(filename.c_str()) { }; ~AuthVO(void) { }; }; #endif nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/unixmap.h0000644000000000000000000000012412675602216024214 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.539714 30 ctime=1513200662.618779514 nordugrid-arc-5.4.2/src/services/gridftpd/auth/unixmap.h0000644000175000002070000000333312675602216024263 0ustar00mockbuildmock00000000000000#ifndef __GM_UNIXMAP_H__ #define __GM_UNIXMAP_H__ #include #include "auth.h" class UnixMap { private: class unix_user_t { public: std::string name; std::string group; unix_user_t(void) { }; }; typedef AuthResult (UnixMap:: * map_func_t)(const AuthUser& user,unix_user_t& unix_user,const char* line); typedef struct { const char* cmd; map_func_t map; } source_t; static source_t sources[]; // Supported evaluation sources // Unix user obtained after mapping unix_user_t unix_user_; // Associated user AuthUser& user_; // Identity of mapping request. std::string map_id_; // Mapping was done bool mapped_; AuthResult map_mapfile(const AuthUser& user,unix_user_t& unix_user,const char* line); AuthResult map_simplepool(const AuthUser& user,unix_user_t& unix_user,const char* line); AuthResult map_unixuser(const AuthUser& user,unix_user_t& unix_user,const char* line); AuthResult map_lcmaps(const AuthUser& user,unix_user_t& unix_user,const char* line); AuthResult map_mapplugin(const AuthUser& user,unix_user_t& unix_user,const char* line); public: // Constructor - links to grid user UnixMap(AuthUser& user,const std::string& id = ""); ~UnixMap(void); // Properties const char* id(void) const { return map_id_.c_str(); }; operator bool(void) const { return mapped_; }; bool operator!(void) const { return !mapped_; }; const char* unix_name(void) const { return unix_user_.name.c_str(); }; const char* unix_group(void) const { return unix_user_.group.c_str(); }; AuthUser& user(void) { return user_; }; // Map AuthResult mapname(const char* line); AuthResult mapgroup(const char* line); AuthResult mapvo(const char* line); }; #endif // __GM_UNIXMAP_H__ nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/auth.cpp0000644000000000000000000000012413003654365024025 xustar000000000000000027 mtime=1477400821.008311 27 atime=1513200575.545714 30 ctime=1513200662.612779441 nordugrid-arc-5.4.2/src/services/gridftpd/auth/auth.cpp0000644000175000002070000002525413003654365024102 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "../misc/proxy.h" #include "auth.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUser"); void voms_fqan_t::str(std::string& str) const { str = group; if(!role.empty()) str += "/Role="+role; if(!capability.empty()) str += "/Capability="+capability; } AuthResult AuthUser::match_all(const char* /* line */) { default_voms_=voms_t(); default_vo_=NULL; default_group_=NULL; return AAA_POSITIVE_MATCH; } AuthResult AuthUser::match_group(const char* line) { for(;;) { std::string s(""); int n = Arc::ConfigIni::NextArg(line,s); if(n == 0) break; line+=n; for(std::list::iterator i = groups.begin();i!=groups.end();++i) { if(s == i->name) { default_voms_=i->voms; default_vo_=i->vo; default_group_=i->name.c_str(); return AAA_POSITIVE_MATCH; }; }; }; return AAA_NO_MATCH; } AuthResult AuthUser::match_vo(const char* line) { for(;;) { std::string s(""); int n = Arc::ConfigIni::NextArg(line,s); if(n == 0) break; line+=n; for(std::list::iterator i = vos.begin();i!=vos.end();++i) { if(s == *i) { default_voms_=voms_t(); default_vo_=i->c_str(); default_group_=NULL; return AAA_POSITIVE_MATCH; }; }; }; return AAA_NO_MATCH; } AuthUser::source_t AuthUser::sources[] = { { "all", &AuthUser::match_all }, { "group", &AuthUser::match_group }, { "subject", &AuthUser::match_subject }, { "file", &AuthUser::match_file }, { "remote", &AuthUser::match_ldap }, { "voms", &AuthUser::match_voms }, { "vo", &AuthUser::match_vo }, { "lcas", &AuthUser::match_lcas }, { "plugin", &AuthUser::match_plugin }, { NULL, NULL } }; AuthUser::AuthUser(const char* s,const char* f):subject(""),filename("") { valid = true; if(s) { Arc::ConfigIni::NextArg(s,subject,'\0','\0'); }; struct stat fileStat; if(f && stat(f, &fileStat) == 0) filename=f; proxy_file_was_created=false; voms_extracted=false; has_delegation=false; // ???? default_voms_=voms_t(); default_vo_=NULL; default_group_=NULL; if(process_voms() == AAA_FAILURE) valid=false; } AuthUser::AuthUser(const AuthUser& a) { valid=a.valid; subject=a.subject; filename=a.filename; has_delegation=a.has_delegation; proxy_file_was_created=false; voms_extracted=false; default_voms_=voms_t(); default_vo_=NULL; default_group_=NULL; if(process_voms() == AAA_FAILURE) valid=false; } AuthUser& AuthUser::operator=(const AuthUser& a) { valid=a.valid; subject=a.subject; filename=a.filename; has_delegation=a.has_delegation; voms_data.clear(); voms_extracted=false; proxy_file_was_created=false; default_voms_=voms_t(); default_vo_=NULL; default_group_=NULL; if(process_voms() == AAA_FAILURE) valid=false; return *this; } void AuthUser::set(const char* s,gss_ctx_id_t ctx,gss_cred_id_t cred,const char* hostname) { valid=true; if(hostname) from=hostname; voms_data.clear(); voms_extracted=false; proxy_file_was_created=false; filename=""; has_delegation=false; filename=""; subject=""; char* p = gridftpd::write_proxy(cred); if(p) { filename=p; free(p); has_delegation=true; proxy_file_was_created=true; } else { p=gridftpd::write_cert_chain(ctx); if(p) { filename=p; free(p); proxy_file_was_created=true; }; }; if(s == NULL) { // Obtain subject from credentials or context if(filename.length()) { globus_gsi_cred_handle_t h; if(globus_gsi_cred_handle_init(&h,GLOBUS_NULL) == GLOBUS_SUCCESS) { if(globus_gsi_cred_read_proxy(h,(char*)(filename.c_str())) == GLOBUS_SUCCESS) { char* sname = NULL; if(globus_gsi_cred_get_subject_name(h,&sname) == GLOBUS_SUCCESS) { Arc::ConfigIni::NextArg(sname,subject,'\0','\0'); free(sname); }; }; globus_gsi_cred_handle_destroy(h); }; }; } else { subject=s; }; if(process_voms() == AAA_FAILURE) valid=false; } void AuthUser::set(const char* s,STACK_OF(X509)* cred,const char* hostname) { valid=true; if(hostname) from=hostname; voms_data.clear(); voms_extracted=false; proxy_file_was_created=false; filename=""; has_delegation=false; int chain_size = 0; if(cred) chain_size=sk_X509_num(cred); if((s == NULL) && (chain_size <= 0)) return; if(s == NULL) { X509* cert=sk_X509_value(cred,0); if(cert) { X509_NAME *name = X509_get_subject_name(cert); if(name) { if(globus_gsi_cert_utils_get_base_name(name,cred) == GLOBUS_SUCCESS) { char* buf = X509_NAME_oneline(X509_get_subject_name(cert),NULL,0); if(buf) { subject=buf; OPENSSL_free(buf); }; }; }; }; if(subject.length() == 0) return; } else { subject=s; }; if(chain_size > 0) { std::string tempname = Glib::build_filename(Glib::get_tmp_dir(), "x509.XXXXXX"); if(!Arc::TmpFileCreate(tempname, "")) return; filename = tempname; BIO* bio; if((bio=BIO_new_file(filename.c_str(), "w")) == NULL) return; for(int chain_index = 0;chain_index& attributes) { struct voms_t voms_item; voms_item.voname = vo; // Collect fqans with parsed groups, roles and capabilties. for(std::vector::const_iterator v = attributes.begin(); v != attributes.end(); ++v) { std::list elements; Arc::tokenize(*v, elements, "/"); // /rootgroup(=VO)/mygroup/mysubgroup/Role=myrole std::list::iterator i = elements.begin(); // Check root group agains VO and skip wrong ones if (i == elements.end()) continue; // too short if (*i != voms_item.voname) { // Check if that is VO to hostname association (special ARC FQAN) if(*i == (std::string("voname=")+voms_item.voname)) { ++i; if (*i != voms_item.voname) { std::vector keyvalue; Arc::tokenize(*i, keyvalue, "="); if (keyvalue.size() == 2) { if (keyvalue[0] == "hostname") { voms_item.server = keyvalue[1]; }; }; }; }; continue; // ignore attribute with wrong root group }; voms_fqan_t fqan; fqan.group = "/"+(*i); ++i; for (; i != elements.end(); ++i) { std::vector keyvalue; Arc::tokenize(*i, keyvalue, "="); if (keyvalue.size() == 1) { // part of group fqan.group += "/"+(*i); } else if (keyvalue.size() == 2) { if (keyvalue[0] == "Role") { fqan.role = keyvalue[1]; } else if (keyvalue[0] == "Capability") { fqan.capability = keyvalue[1]; } } } voms_item.fqans.push_back(fqan); } return voms_item; } AuthUser::~AuthUser(void) { if(proxy_file_was_created && filename.length()) unlink(filename.c_str()); } AuthResult AuthUser::evaluate(const char* line) { if(!valid) return AAA_FAILURE; bool invert = false; bool no_match = false; const char* command = "subject"; size_t command_len = 7; if(subject.length()==0) return AAA_NO_MATCH; // ?? if(!line) return AAA_NO_MATCH; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) return AAA_NO_MATCH; if(*line == '#') return AAA_NO_MATCH; if(*line == '-') { line++; invert=true; } else if(*line == '+') { line++; }; if(*line == '!') { no_match=true; line++; }; if((*line != '/') && (*line != '"')) { command=line; for(;*line;line++) if(isspace(*line)) break; command_len=line-command; for(;*line;line++) if(!isspace(*line)) break; }; for(source_t* s = sources;s->cmd;s++) { if((strncmp(s->cmd,command,command_len) == 0) && (strlen(s->cmd) == command_len)) { AuthResult res=(this->*(s->func))(line); if(res == AAA_FAILURE) return res; if(no_match) { if(res==AAA_NO_MATCH) { res=AAA_POSITIVE_MATCH; } else { res=AAA_NO_MATCH; }; }; if(invert) { switch(res) { case AAA_POSITIVE_MATCH: res = AAA_NEGATIVE_MATCH; break; case AAA_NEGATIVE_MATCH: res = AAA_POSITIVE_MATCH; break; }; }; return res; }; }; logger.msg(Arc::ERROR, "Unknown authorization command %s", command); return AAA_FAILURE; } const std::vector& AuthUser::voms(void) { if(!voms_extracted) { const char* line = "* * * *"; match_voms(line); }; return voms_data; } const std::list& AuthUser::VOs(void) const { return vos; } bool AuthUser::add_vo(const char* vo,const char* filename) { if((!filename) || (!filename[0])) { logger.msg(Arc::WARNING,"The [vo] section labeled '%s' has no file associated and can't be used for matching", vo); return false; } if(match_file(filename) == AAA_POSITIVE_MATCH) { add_vo(vo); return true; }; return false; } bool AuthUser::add_vo(const std::string& vo,const std::string& filename) { return add_vo(vo.c_str(),filename.c_str()); } bool AuthUser::add_vo(const AuthVO& vo) { return add_vo(vo.name,vo.file); } bool AuthUser::add_vo(const std::list& vos) { bool r = true; for(std::list::const_iterator vo = vos.begin();vo!=vos.end();++vo) { r&=add_vo(*vo); }; return r; } std::string AuthUser::err_to_string(int err) { if(err == AAA_POSITIVE_MATCH) return "positive"; if(err == AAA_NEGATIVE_MATCH) return "negative"; if(err == AAA_NO_MATCH) return "no match"; if(err == AAA_FAILURE) return "failure"; return ""; } AuthEvaluator::AuthEvaluator(void):name("") { } AuthEvaluator::AuthEvaluator(const char* s):name(s) { } AuthEvaluator::~AuthEvaluator(void) { } void AuthEvaluator::add(const char* line) { l.push_back(line); } AuthResult AuthEvaluator::evaluate(AuthUser &u) const { for(std::list::const_iterator i = l.begin();i!=l.end();++i) { AuthResult r = u.evaluate(i->c_str()); if(r != AAA_NO_MATCH) return r; }; return AAA_NO_MATCH; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/auth_subject.cpp0000644000000000000000000000012412771223666025553 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.539714 30 ctime=1513200662.613779453 nordugrid-arc-5.4.2/src/services/gridftpd/auth/auth_subject.cpp0000644000175000002070000000063612771223666025625 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "auth.h" AuthResult AuthUser::match_subject(const char* line) { for(;;) { std::string s(""); int n = Arc::ConfigIni::NextArg(line,s,' ','"'); if(n == 0) break; line+=n; if(strcmp(subject.c_str(),s.c_str()) == 0) { return AAA_POSITIVE_MATCH; }; }; return AAA_NO_MATCH; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/auth_ldap.cpp0000644000000000000000000000012412771223666025034 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.545714 30 ctime=1513200662.615779478 nordugrid-arc-5.4.2/src/services/gridftpd/auth/auth_ldap.cpp0000644000175000002070000000455712771223666025114 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #ifdef HAVE_LDAP #include "../misc/ldapquery.h" #endif #include "auth.h" #define LDAP_CONNECT_TIMEOUT 10 #define LDAP_QUERY_TIMEOUT 20 #define LDAP_RESULT_TIMEOUT 60 static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUserLDAP"); class result_t { public: std::string subject; AuthResult decision; result_t(const char* s):subject(s),decision(AAA_NO_MATCH) {}; }; static void result_callback(const std::string & attr,const std::string & value,void * ref) { result_t* r = (result_t*)ref; if(r->decision != AAA_NO_MATCH) return; if(attr == "description") { if(strncmp("subject=",value.c_str(),8) == 0) { const char* s = value.c_str()+8; for(;*s;s++) if(*s != ' ') break; if(strcmp(s,r->subject.c_str()) == 0) { r->decision=AAA_POSITIVE_MATCH; }; }; }; } AuthResult AuthUser::match_ldap(const char* line) { #ifdef HAVE_LDAP for(;;) { std::string u(""); int n = Arc::ConfigIni::NextArg(line,u,' ','"'); if(n == 0) break; line+=n; try { Arc::URL url(u.c_str()); if(url.Protocol() != "ldap") return AAA_FAILURE; std::string usersn(""); gridftpd::LdapQuery ldap(url.Host(), url.Port(), false, usersn); logger.msg(Arc::INFO, "Connecting to %s:%i", url.Host(), url.Port()); logger.msg(Arc::INFO, "Querying at %s", url.Path()); std::vector attrs; attrs.push_back("description"); try { ldap.Query(url.Path(),"",attrs,gridftpd::LdapQuery::onelevel); } catch (gridftpd::LdapQueryError& e) { logger.msg(Arc::ERROR, "Failed to query LDAP server %s", u); return AAA_FAILURE; }; result_t r(subject.c_str()); try { ldap.Result(&result_callback,&r) ; } catch (gridftpd::LdapQueryError& e) { logger.msg(Arc::ERROR, "Failed to get results from LDAP server %s", u); return AAA_FAILURE; }; if(r.decision==AAA_POSITIVE_MATCH) { // just a placeholder default_voms_=voms_t(); default_vo_=NULL; }; return r.decision; } catch (std::exception& e) { return AAA_FAILURE; }; }; return AAA_NO_MATCH; #else logger.msg(Arc::ERROR, "LDAP authorization is not supported"); return AAA_FAILURE; #endif } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/auth_voms.cpp0000644000000000000000000000012312771223666025077 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.547714 29 ctime=1513200662.61677949 nordugrid-arc-5.4.2/src/services/gridftpd/auth/auth_voms.cpp0000644000175000002070000001361712771223666025155 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "auth.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUserVOMS"); static AuthResult process_vomsproxy(const char* filename,std::vector &data,bool auto_cert = false); AuthResult AuthUser::process_voms(void) { if(!voms_extracted) { if(filename.length() > 0) { AuthResult err = process_vomsproxy(filename.c_str(),voms_data); voms_extracted=true; logger.msg(Arc::DEBUG, "VOMS proxy processing returns: %i - %s", err, err_to_string(err)); if(err != AAA_POSITIVE_MATCH) return err; }; }; return AAA_POSITIVE_MATCH; } AuthResult AuthUser::match_voms(const char* line) { // parse line std::string vo(""); std::string group(""); std::string role(""); std::string capabilities(""); std::string auto_c(""); int n; n=Arc::ConfigIni::NextArg(line,vo,' ','"'); if(n == 0) { logger.msg(Arc::ERROR, "Missing VO in configuration"); return AAA_FAILURE; }; line+=n; n=Arc::ConfigIni::NextArg(line,group,' ','"'); if(n == 0) { logger.msg(Arc::ERROR, "Missing group in configuration"); return AAA_FAILURE; }; line+=n; n=Arc::ConfigIni::NextArg(line,role,' ','"'); if(n == 0) { logger.msg(Arc::ERROR, "Missing role in configuration"); return AAA_FAILURE; }; line+=n; n=Arc::ConfigIni::NextArg(line,capabilities,' ','"'); if(n == 0) { logger.msg(Arc::ERROR, "Missing capabilities in configuration"); return AAA_FAILURE; }; n=Arc::ConfigIni::NextArg(line,auto_c,' ','"'); logger.msg(Arc::VERBOSE, "Rule: vo: %s", vo); logger.msg(Arc::VERBOSE, "Rule: group: %s", group); logger.msg(Arc::VERBOSE, "Rule: role: %s", role); logger.msg(Arc::VERBOSE, "Rule: capabilities: %s", capabilities); // extract info from voms proxy // if(voms_data->size() == 0) { if(process_voms() != AAA_POSITIVE_MATCH) return AAA_FAILURE; if(voms_data.empty()) return AAA_NO_MATCH; // analyse permissions for(std::vector::iterator v = voms_data.begin();v!=voms_data.end();++v) { logger.msg(Arc::DEBUG, "Match vo: %s", v->voname); if((vo == "*") || (vo == v->voname)) { bool matched = false; for(std::vector::iterator f = v->fqans.begin(); f != v->fqans.end(); ++f) { if(((group == "*") || (group == f->group)) && ((role == "*") || (role == f->role)) && ((capabilities == "*") || (capabilities == f->capability))) { if(!matched) { default_voms_ = voms_t(); default_voms_.voname = v->voname; default_voms_.server = v->server; matched = true; }; default_voms_.fqans.push_back(*f); }; }; if(matched) { return AAA_POSITIVE_MATCH; }; }; }; logger.msg(Arc::VERBOSE, "Matched nothing"); return AAA_NO_MATCH; } static AuthResult process_vomsproxy(const char* filename,std::vector &data,bool /* auto_cert */) { std::string voms_dir = "/etc/grid-security/vomsdir"; std::string cert_dir = "/etc/grid-security/certificates"; { std::string v; if(!(v = Arc::GetEnv("X509_VOMS_DIR")).empty()) voms_dir = v; if(!(v = Arc::GetEnv("X509_CERT_DIR")).empty()) cert_dir = v; }; std::string voms_processing = Arc::GetEnv("VOMS_PROCESSING"); Arc::Credential c(filename, filename, cert_dir, ""); std::vector output; std::string emptystring = ""; /* Arc::VOMSTrustList emptylist; emptylist.AddRegex(".*"); */ std::string voms_trust_chains = Arc::GetEnv("VOMS_TRUST_CHAINS"); logger.msg(Arc::VERBOSE, "VOMS trust chains: %s", voms_trust_chains); std::vector vomstrustlist; std::vector vomstrustchains; Arc::tokenize(voms_trust_chains, vomstrustchains, "\n"); for(size_t i=0; i vomstrust_dns; std::string trust_chain = vomstrustchains[i]; std::string::size_type p1, p2=0; while(1) { p1 = trust_chain.find("\"", p2); if(p1!=std::string::npos) { p2 = trust_chain.find("\"", p1+1); if(p2!=std::string::npos) { std::string str = trust_chain.substr(p1+1, p2-p1-1); vomstrust_dns.push_back(str); p2++; if(trust_chain[p2] == '\n') break; } } if((p1==std::string::npos) || (p2==std::string::npos)) break; } if(!vomstrust_dns.empty()) { if(vomstrustlist.empty()) vomstrustlist.insert(vomstrustlist.begin(), vomstrust_dns.begin(), vomstrust_dns.end()); else { vomstrustlist.push_back("----NEXT CHAIN---"); vomstrustlist.insert(vomstrustlist.end(), vomstrust_dns.begin(), vomstrust_dns.end()); } } } Arc::VOMSTrustList voms_trust_list(vomstrustlist); parseVOMSAC(c, cert_dir, emptystring, voms_dir, voms_trust_list, output, true, true); for(size_t n=0;n #endif #include "identity_voms.h" std::string IdentityItemVOMS::vo_name_("vo"); std::string IdentityItemVOMS::voms_name_("voms"); std::string IdentityItemVOMS::group_name_("group"); std::string IdentityItemVOMS::role_name_("role"); std::string IdentityItemVOMS::cap_name_("capability"); IdentityItemVOMS::IdentityItemVOMS(const IdentityItemVOMS& v) { vo_=v.vo_; voms_=v.voms_; group_=v.group_; role_=v.role_; cap_=v.cap_; } IdentityItemVOMS::~IdentityItemVOMS(void) { } Identity::Item* IdentityItemVOMS::duplicate(void) const { return new IdentityItemVOMS(*this); } const std::string& IdentityItemVOMS::name(unsigned int n) { switch(n) { case 0: return vo_name_; case 1: return voms_name_; case 2: return group_name_; case 3: return role_name_; case 4: return cap_name_; }; return empty_; } const std::string& IdentityItemVOMS::value(unsigned int n) { switch(n) { case 0: return vo_; case 1: return voms_; case 2: return group_; case 3: return role_; case 4: return cap_; }; return empty_; } const std::string& IdentityItemVOMS::value(const char* name,unsigned int /* n */) { if(vo_name_ == name) return vo_; if(voms_name_ == name) return voms_; if(group_name_ == name) return group_; if(role_name_ == name) return role_; if(cap_name_ == name) return cap_; return empty_; } IdentityItemVOMS::IdentityItemVOMS(const char* vo,const char* voms,const char* group,const char* role,const char* cap) { vo_=vo; voms_=voms; group_=group; role_=role; cap_=cap; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/permission.h0000644000000000000000000000012311412417142024710 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.544714 30 ctime=1513200662.609779404 nordugrid-arc-5.4.2/src/services/gridftpd/auth/permission.h0000644000175000002070000000136711412417142024765 0ustar00mockbuildmock00000000000000#ifndef __ARC_PERMISSION_H__ #define __ARC_PERMISSION_H__ class Permission { public: typedef enum { object = 0, metadata = 1, permissions = 2 } Object; typedef enum { create = 0, read = 1, write = 2, extend = 3, reduce = 4, remove = 5, info = 6 } Action; typedef enum { undefined = 0, allow = 1, deny = 2 } Perm; private: Perm perms_[3][7]; public: Permission(void); Permission(const Permission& p); virtual ~Permission(void); bool set(Object o,Action a,Perm p); bool set_conditional(Object o,Action a,Perm p); bool get(Object o,Action a,Perm p); bool get_conditional(Object o,Action a,Perm p); virtual Permission* duplicate(void) const; }; #endif // __ARC_PERMISSION_H__ nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/auth_plugin.cpp0000644000000000000000000000012412675602216025405 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.537714 30 ctime=1513200662.617779502 nordugrid-arc-5.4.2/src/services/gridftpd/auth/auth_plugin.cpp0000644000175000002070000000366612675602216025465 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "../run/run_plugin.h" #include "auth.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUserPlugin"); void AuthUserSubst(std::string& str,AuthUser& it) { int l = str.length(); for(int i=0;i #endif #include "identity.h" // ------ Identity --------------- Identity::Item Identity::empty_; Identity::Identity(void) { } Identity::Identity(const Identity& t) { for(std::list::const_iterator i = t.items_.begin();i!=t.items_.end();++i) { add(*i); }; } Identity::~Identity(void) { for(std::list::iterator i = items_.begin();i!=items_.end();++i) { if(*i) delete *i; }; } Identity* Identity::duplicate(void) const { return new Identity(*this); } Identity::Item* Identity::add(const Identity::Item* t) { if(!t) return NULL; return *(items_.insert(items_.end(),t->duplicate())); } Identity::Item* Identity::use(Identity::Item* t) { if(!t) return NULL; return *(items_.insert(items_.end(),t)); } Identity::Item* Identity::operator[](unsigned int n) { if(n>=items_.size()) return NULL; std::list::iterator i = items_.begin(); for(;n && (i!=items_.end());--n,++i){}; if(i==items_.end()) return NULL; return *i; } bool Identity::operator==(Identity& id) { if(&id == NULL) return false; for(std::list::iterator i = items_.begin(); i!=items_.end();++i) { if(*i == NULL) continue; for(std::list::iterator i_ = id.items_.begin(); i_!=id.items_.end();++i_) { if(*i_ == NULL) continue; if(((*i)->str()) == ((*i_)->str())) return true; }; }; return false; } // ------ Identity::Item --------------- std::string Identity::Item::empty_(""); Identity::Item::Item(void):type_("") { } Identity::Item::~Item(void) { } Identity::Item* Identity::Item::duplicate(void) const { return new Identity::Item; } const std::string& Identity::Item::name(unsigned int /* n */) { return empty_; } const std::string& Identity::Item::value(unsigned int /* n */) { return empty_; } const std::string& Identity::Item::value(const char* /* name */,unsigned int /* n */) { return empty_; } std::string Identity::Item::str(void) { std::string v; for(int n = 0;;n++) { const std::string& s = name(n); if(s.length() == 0) break; v+="/"+s+"="+value(n); }; return v; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/unixmap_lcmaps.cpp0000644000000000000000000000012412771223666026113 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.537714 30 ctime=1513200662.621779551 nordugrid-arc-5.4.2/src/services/gridftpd/auth/unixmap_lcmaps.cpp0000644000175000002070000000155112771223666026162 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "../misc/proxy.h" #include "../run/run_plugin.h" #include "unixmap.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"UnixMap"); AuthResult UnixMap::map_lcmaps(const AuthUser& user,unix_user_t& unix_user,const char* line) { // TODO: escape // TODO: hardcoded 300s timeout for lcmaps std::string lcmaps_plugin = "300 \""+ Arc::ArcLocation::Get()+G_DIR_SEPARATOR_S+PKGLIBEXECSUBDIR+ G_DIR_SEPARATOR_S+"arc-lcmaps\" "; lcmaps_plugin+=std::string("\"")+user_.DN()+"\" "; lcmaps_plugin+=std::string("\"")+user_.proxy()+"\" "; lcmaps_plugin+=line; AuthResult res = map_mapplugin(user,unix_user,lcmaps_plugin.c_str()); return res; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/identity.h0000644000000000000000000000012411611503620024350 xustar000000000000000027 mtime=1311147920.204773 27 atime=1513200575.544714 30 ctime=1513200662.602779319 nordugrid-arc-5.4.2/src/services/gridftpd/auth/identity.h0000644000175000002070000000162411611503620024420 0ustar00mockbuildmock00000000000000#ifndef __ARC_IDENTITY_H__ #define __ARC_IDENTITY_H__ #include #include class Identity { public: class Item { protected: std::string type_; static std::string empty_; public: Item(void); virtual ~Item(void); const std::string& type(void) const { return type_; }; virtual Item* duplicate(void) const; virtual const std::string& name(unsigned int n); virtual const std::string& value(unsigned int n); virtual const std::string& value(const char* name,unsigned int n); virtual std::string str(void); }; protected: std::list items_; static Item empty_; public: Identity(void); Identity(const Identity&); virtual ~Identity(void); Item* add(const Item* t); Item* use(Item* t); Item* operator[](unsigned int n); virtual Identity* duplicate(void) const; virtual bool operator==(Identity& id); }; #endif // __ARC_IDENTITY_H__ nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/permission.cpp0000644000000000000000000000012411741502232025244 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.541714 30 ctime=1513200662.608779392 nordugrid-arc-5.4.2/src/services/gridftpd/auth/permission.cpp0000644000175000002070000000354311741502232025316 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "permission.h" Permission::Permission(void) { for(int a = 0;a<7;++a) { for(int o = 0;o<3;++o) { perms_[o][a]=undefined; }; }; } Permission::Permission(const Permission& p) { for(int a = 0;a<7;++a) { for(int o = 0;o<3;++o) { perms_[o][a]=p.perms_[o][a]; }; }; } Permission::~Permission(void) { } Permission* Permission::duplicate(void) const { return new Permission(*this); } bool Permission::set(Object o,Action a,Perm p) { if((o<0) || (o>=3)) return false; if((a<0) || (a>=7)) return false; perms_[o][a]=p; return true; } bool Permission::set_conditional(Object o,Action a,Perm p) { if((o<0) || (o>=3)) return false; if((a<0) || (a>=7)) return false; if((perms_[permissions][info] == allow) && (perms_[o][a] == p)) return true; switch(p) { case undefined: { if((perms_[permissions][reduce] == allow) || (perms_[permissions][write] == allow)) { perms_[o][a]=p; return true; }; }; break; case allow: { if(((perms_[permissions][extend] == allow) && (perms_[o][a] == undefined)) || (perms_[permissions][write] == allow)) { perms_[o][a]=p; return true; }; }; break; case deny: { if(((perms_[permissions][extend] == allow) && (perms_[o][a] == undefined)) || (perms_[permissions][write] == allow)) { perms_[o][a]=p; return true; }; }; break; }; return false; } bool Permission::get(Object o,Action a,Perm p) { if((o<0) || (o>=3)) return false; if((a<0) || (a>=7)) return false; if(perms_[permissions][info] != allow) return false; return (perms_[o][a] == p); } bool Permission::get_conditional(Object o,Action a,Perm p) { if((o<0) || (o>=3)) return false; if((a<0) || (a>=7)) return false; return (perms_[o][a] == p); } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/simplemap.cpp0000644000000000000000000000012412123101751025037 xustar000000000000000027 mtime=1363969001.473645 27 atime=1513200575.540714 30 ctime=1513200662.623779575 nordugrid-arc-5.4.2/src/services/gridftpd/auth/simplemap.cpp0000644000175000002070000001242712123101751025112 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "simplemap.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"SimpleMap"); class FileLock { private: int h_; struct flock l_; public: FileLock(int h):h_(h) { if(h_ == -1) return; l_.l_type=F_WRLCK; l_.l_whence=SEEK_SET; l_.l_start=0; l_.l_len=0; for(;;) { if(fcntl(h_,F_SETLKW,&l_) == 0) break; if(errno != EINTR) { h_=-1; return; }; }; }; ~FileLock(void) { if(h_ == -1) return; l_.l_type=F_UNLCK; fcntl(h_,F_SETLKW,&l_); }; operator bool(void) const { return (h_ != -1); }; bool operator!(void) const { return (h_ == -1); }; }; SimpleMap::SimpleMap(const char* dir):dir_(dir) { if((dir_.length() == 0) || (dir_[dir_.length()-1] != '/')) dir_+="/"; if(dir_[0] != '/') dir_=Glib::get_current_dir()+"/"+dir_; pool_handle_=open((dir_+"pool").c_str(),O_RDWR); } SimpleMap::~SimpleMap(void) { if(pool_handle_ != -1) close(pool_handle_); pool_handle_=-1; } #define failure(S) { \ logger.msg(Arc::ERROR, "SimpleMap: %s", (S)); \ return ""; \ } #define info(S) { \ logger.msg(Arc::INFO, "SimpleMap: %s", (S)); \ } std::string SimpleMap::map(const char* subject) { if(pool_handle_ == -1) failure("not initialized"); if(!subject) failure("missing subject"); std::string filename(subject); for(std::string::size_type i = filename.find('/');i!=std::string::npos; i=filename.find('/',i+1)) filename[i]='_'; filename=dir_+filename; FileLock lock(pool_handle_); if(!lock) failure("failed to lock pool file"); // Check for existing mapping struct stat st; if(stat(filename.c_str(),&st) == 0) { if(!S_ISREG(st.st_mode)) failure("mapping is not a regular file"); std::ifstream f(filename.c_str()); if(!f.is_open()) failure("can't open mapping file"); std::string buf; getline(f,buf); utime(filename.c_str(),NULL); return buf; }; // Look for unused names // Get full list first. std::list names; { std::ifstream f((dir_+"pool").c_str()); if(!f.is_open()) failure("can't open pool file") std::string buf; while(getline(f,buf)) { if(buf.empty()) continue; names.push_back(buf); }; }; if(names.empty()) failure("pool is empty"); // Remove all used names from list. Also find oldest maping. time_t oldmap_time = 0; std::string oldmap_name; std::string oldmap_subject; { struct dirent file_; struct dirent *file; DIR *dir=opendir(dir_.c_str()); if(dir == NULL) failure("can't list pool directory"); for(;;) { readdir_r(dir,&file_,&file); if(file == NULL) break; if(std::string(file->d_name) == ".") continue; if(std::string(file->d_name) == "..") continue; if(std::string(file->d_name) == "pool") continue; std::string filename = dir_+file->d_name; struct stat st; if(stat(filename.c_str(),&st) != 0) continue; if(!S_ISREG(st.st_mode)) continue; std::ifstream f(filename.c_str()); if(!f.is_open()) { // trash in directory closedir(dir); failure("can't open one of mapping files"); }; std::string buf; getline(f,buf); // find this name in list std::list::iterator i = names.begin(); for(;i!=names.end();++i) if(*i == buf) break; if(i == names.end()) { // Always try to destroy old mappings without corresponding // entry in the pool file if(((unsigned int)(time(NULL) - st.st_mtime)) >= SELFUNMAP_TIME) { unlink(filename.c_str()); }; } else { names.erase(i); if( (oldmap_name.length() == 0) || (((int)(oldmap_time - st.st_mtime)) > 0) ) { oldmap_name=buf; oldmap_subject=file->d_name; oldmap_time=st.st_mtime; }; }; }; closedir(dir); }; if(!names.empty()) { // Claim one of unused names std::ofstream f(filename.c_str()); if(!f.is_open()) failure("can't create mapping file"); f<<*(names.begin())< #include "identity.h" class IdentityItemDN: public Identity::Item { std::string dn_; public: IdentityItemDN(const char* dn); virtual ~IdentityItemDN(void); virtual Identity::Item* duplicate(void) const; virtual const std::string& name(unsigned int n); virtual const std::string& value(unsigned int n); virtual const std::string& value(const char* name,unsigned int n); virtual std::string str(void); }; nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/object_access.h0000644000000000000000000000012311412417142025307 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.542714 30 ctime=1513200662.610779417 nordugrid-arc-5.4.2/src/services/gridftpd/auth/object_access.h0000644000175000002070000000156311412417142025362 0ustar00mockbuildmock00000000000000#ifndef __ARC_OBJECT_ACCESS_H__ #define __ARC_OBJECT_ACCESS_H__ #include #include "identity.h" #include "permission.h" class ObjectAccess { public: class Item: public Identity::Item { protected: Identity* id_; Permission* perm_; public: Item(Identity* id,Permission* perm):id_(id),perm_(perm) { }; //~Item(void) { if(id_) delete id_; if(perm_) delete perm_; }; ~Item(void) { }; Identity* id(void) { return id_; }; Permission* permission(void) { return perm_; }; }; protected: static Item empty_; std::list items_; public: ObjectAccess(void); ObjectAccess(const ObjectAccess& o); virtual ~ObjectAccess(void); Item* use(Identity* id,Permission* perm); Item* add(Identity* id,Permission* perm); Item* operator[](unsigned int n); Item* find(Identity* id); int size(void); }; #endif // __ARC_OBJECT_ACCESS_H__ nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/auth_lcas.cpp0000644000000000000000000000012412771223666025036 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.546714 30 ctime=1513200662.617779502 nordugrid-arc-5.4.2/src/services/gridftpd/auth/auth_lcas.cpp0000644000175000002070000000135612771223666025110 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "../misc/proxy.h" #include "auth.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUserLCAS"); AuthResult AuthUser::match_lcas(const char* line) { // TODO: escape // TODO: hardcoded 300s timeout std::string lcas_plugin = "300 \""+ Arc::ArcLocation::Get()+G_DIR_SEPARATOR_S+PKGLIBEXECSUBDIR+ G_DIR_SEPARATOR_S+"arc-lcas\" "; lcas_plugin+=std::string("\"")+DN()+"\" "; lcas_plugin+=std::string("\"")+proxy()+"\" "; lcas_plugin+=line; AuthResult res = match_plugin(lcas_plugin.c_str()); return res; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/auth_file.cpp0000644000000000000000000000012412771223666025033 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.545714 30 ctime=1513200662.614779465 nordugrid-arc-5.4.2/src/services/gridftpd/auth/auth_file.cpp0000644000175000002070000000146512771223666025106 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "auth.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUserFile"); AuthResult AuthUser::match_file(const char* line) { for(;;) { std::string s(""); int n = Arc::ConfigIni::NextArg(line,s,' ','"'); if(n == 0) break; line+=n; std::ifstream f(s.c_str()); if(!f.is_open()) { logger.msg(Arc::ERROR, "Failed to read file %s", s); return AAA_FAILURE; }; for(;f.good();) { std::string buf; getline(f,buf); AuthResult res = evaluate(buf.c_str()); if(res != AAA_NO_MATCH) { f.close(); return res; }; }; f.close(); }; return AAA_NO_MATCH; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/unixmap.cpp0000644000000000000000000000012412771223666024554 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.539714 30 ctime=1513200662.619779527 nordugrid-arc-5.4.2/src/services/gridftpd/auth/unixmap.cpp0000644000175000002070000002405412771223666024626 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "../run/run_plugin.h" #include "simplemap.h" #include "unixmap.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"UnixMap"); UnixMap::source_t UnixMap::sources[] = { { "mapfile", &UnixMap::map_mapfile }, { "simplepool", &UnixMap::map_simplepool }, { "unixuser", &UnixMap::map_unixuser }, { "lcmaps", &UnixMap::map_lcmaps }, { "mapplugin", &UnixMap::map_mapplugin }, { NULL, NULL } }; UnixMap::UnixMap(AuthUser& user,const std::string& id): user_(user),map_id_(id),mapped_(false) { } UnixMap::~UnixMap(void) { } void split_unixname(std::string& unixname,std::string& unixgroup) { std::string::size_type p = unixname.find(':'); if(p != std::string::npos) { unixgroup=unixname.c_str()+p+1; unixname.resize(p); }; if(unixname[0] == '*') unixname.resize(0); if(unixgroup[0] == '*') unixgroup.resize(0); } AuthResult UnixMap::mapgroup(const char* line) { mapped_=false; if(!line) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; const char* groupname = line; for(;*line;line++) if(isspace(*line)) break; int groupname_len = line-groupname; if(groupname_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty group: %s", groupname); return AAA_FAILURE; }; if(!user_.check_group(std::string(groupname,groupname_len))) return AAA_NO_MATCH; unix_user_.name.resize(0); unix_user_.group.resize(0); for(;*line;line++) if(!isspace(*line)) break; const char* command = line; for(;*line;line++) if(isspace(*line)) break; size_t command_len = line-command; if(command_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty command: %s", command); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; for(source_t* s = sources;s->cmd;s++) { if((strncmp(s->cmd,command,command_len) == 0) && (strlen(s->cmd) == command_len)) { AuthResult res=(this->*(s->map))(user_,unix_user_,line); if(res == AAA_POSITIVE_MATCH) { mapped_=true; return AAA_POSITIVE_MATCH; }; if(res == AAA_FAILURE) { // Processing failure cause immediate error return AAA_FAILURE; }; // Paranoid about negative match return AAA_NO_MATCH; }; }; return AAA_FAILURE; } AuthResult UnixMap::mapvo(const char* line) { mapped_=false; if(!line) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; const char* voname = line; for(;*line;line++) if(isspace(*line)) break; int voname_len = line-voname; if(voname_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty VO: %s", voname); return AAA_FAILURE; }; if(!user_.check_vo(std::string(voname,voname_len))) return AAA_NO_MATCH; unix_user_.name.resize(0); unix_user_.group.resize(0); for(;*line;line++) if(!isspace(*line)) break; const char* command = line; for(;*line;line++) if(isspace(*line)) break; size_t command_len = line-command; if(command_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty command: %s", command); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; for(source_t* s = sources;s->cmd;s++) { if((strncmp(s->cmd,command,command_len) == 0) && (strlen(s->cmd) == command_len)) { AuthResult res=(this->*(s->map))(user_,unix_user_,line); if(res == AAA_POSITIVE_MATCH) { mapped_=true; return AAA_POSITIVE_MATCH; }; if(res == AAA_FAILURE) { // Processing failure cause immediate error return AAA_FAILURE; }; // Paranoid about negative match return AAA_NO_MATCH; }; }; return AAA_FAILURE; } AuthResult UnixMap::mapname(const char* line) { mapped_=false; if(!line) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; const char* unixname = line; for(;*line;line++) if(isspace(*line)) break; int unixname_len = line-unixname; if(unixname_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty name: %s", unixname); return AAA_FAILURE; }; unix_user_.name.assign(unixname,unixname_len); split_unixname(unix_user_.name,unix_user_.group); for(;*line;line++) if(!isspace(*line)) break; const char* command = line; for(;*line;line++) if(isspace(*line)) break; size_t command_len = line-command; if(command_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty command: %s", command); return AAA_FAILURE; } for(;*line;line++) if(!isspace(*line)) break; for(source_t* s = sources;s->cmd;s++) { if((strncmp(s->cmd,command,command_len) == 0) && (strlen(s->cmd) == command_len)) { AuthResult res=(this->*(s->map))(user_,unix_user_,line); if(res == AAA_POSITIVE_MATCH) { mapped_=true; return AAA_POSITIVE_MATCH; }; if(res == AAA_FAILURE) { // Processing failure cause immediate error return AAA_FAILURE; }; // Paranoid about negative match return AAA_NO_MATCH; }; }; if(unix_user_.name.length() != 0) { // Try authorization rules if username is predefined AuthResult decision = user_.evaluate(command); if(decision == AAA_POSITIVE_MATCH) { mapped_=true; return AAA_POSITIVE_MATCH; }; return decision; // propagate failure information }; // If user name is not defined then it was supposed to be // mapping rule. And if not then we failed. return AAA_FAILURE; } // ----------------------------------------------------------- static void subst_arg(std::string& str,void* arg) { AuthUser* it = (AuthUser*)arg; if(!it) return; AuthUserSubst(str,*it); } AuthResult UnixMap::map_mapplugin(const AuthUser& /* user */ ,unix_user_t& unix_user,const char* line) { // timeout path arg ... if(!line) { logger.msg(Arc::ERROR,"Plugin (user mapping) command is empty"); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"Plugin (user mapping) command is empty"); return AAA_FAILURE; }; char* p; long int to = strtol(line,&p,0); if(p == line) { logger.msg(Arc::ERROR,"Plugin (user mapping) timeout is not a number: %s", line); return AAA_FAILURE; }; if(to < 0) { logger.msg(Arc::ERROR,"Plugin (user mapping) timeout is wrong number: %s", line); return AAA_FAILURE; }; line=p; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"Plugin (user mapping) command is empty"); return AAA_FAILURE; }; std::string s = line; gridftpd::RunPlugin run(line); run.timeout(to); if(run.run(subst_arg,&user_)) { if(run.result() == 0) { if(run.stdout_channel().length() <= 512) { // sane name // Plugin should print user[:group] at stdout or nothing if no suitable mapping found unix_user.name = run.stdout_channel(); split_unixname(unix_user.name,unix_user.group); if(unix_user.name.empty()) return AAA_NO_MATCH; // success but no match return AAA_POSITIVE_MATCH; } else { logger.msg(Arc::ERROR,"Plugin %s returned too much: %s",run.cmd(),run.stdout_channel()); }; } else { logger.msg(Arc::ERROR,"Plugin %s returned: %u",run.cmd(),(unsigned int)run.result()); }; } else { logger.msg(Arc::ERROR,"Plugin %s failed to run",run.cmd()); }; logger.msg(Arc::INFO,"Plugin %s printed: %u",run.cmd(),run.stdout_channel()); logger.msg(Arc::ERROR,"Plugin %s error: %u",run.cmd(),run.stderr_channel()); return AAA_FAILURE; } AuthResult UnixMap::map_mapfile(const AuthUser& user,unix_user_t& unix_user,const char* line) { // This is just grid-mapfile std::ifstream f(line); if(user.DN()[0] == 0) return AAA_FAILURE; if(!f.is_open() ) { logger.msg(Arc::ERROR, "Mapfile at %s can't be opened.", line); return AAA_FAILURE; }; for(;f.good();) { std::string buf; //char buf[512]; // must be enough for DN + name getline(f,buf); char* p = &buf[0]; for(;*p;p++) if(((*p) != ' ') && ((*p) != '\t')) break; if((*p) == '#') continue; if((*p) == 0) continue; std::string val; int n = Arc::ConfigIni::NextArg(p,val); if(strcmp(val.c_str(),user.DN()) != 0) continue; p+=n; Arc::ConfigIni::NextArg(p,unix_user.name); f.close(); return AAA_POSITIVE_MATCH; }; f.close(); return AAA_NO_MATCH; } AuthResult UnixMap::map_simplepool(const AuthUser& user,unix_user_t& unix_user,const char* line) { if(user.DN()[0] == 0) { logger.msg(Arc::ERROR, "User pool call is missing user subject."); return AAA_NO_MATCH; }; SimpleMap pool(line); if(!pool) { logger.msg(Arc::ERROR, "User pool at %s can't be opened.", line); return AAA_FAILURE; }; unix_user.name=pool.map(user.DN()); if(unix_user.name.empty()) { logger.msg(Arc::ERROR, "User pool at %s failed to perform user mapping.", line); return AAA_FAILURE; }; split_unixname(unix_user.name,unix_user.group); return AAA_POSITIVE_MATCH; } AuthResult UnixMap::map_unixuser(const AuthUser& /* user */,unix_user_t& unix_user,const char* line) { // Maping is always positive - just fill specified username std::string unixname(line); std::string unixgroup; std::string::size_type p = unixname.find(':'); if(p != std::string::npos) { unixgroup=unixname.c_str()+p+1; unixname.resize(p); }; if(unixname.empty()) { logger.msg(Arc::ERROR, "User name direct mapping is missing user name: %s.", line); return AAA_FAILURE; }; unix_user.name=unixname; unix_user.group=unixgroup; return AAA_POSITIVE_MATCH; } nordugrid-arc-5.4.2/src/services/gridftpd/auth/PaxHeaders.7502/README0000644000000000000000000000012311412417142023227 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.549714 30 ctime=1513200662.597779258 nordugrid-arc-5.4.2/src/services/gridftpd/auth/README0000644000175000002070000000005611412417142023276 0ustar00mockbuildmock00000000000000Authorisation handling for the GridFTP server nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/conf0000644000000000000000000000013213214316026022257 xustar000000000000000030 mtime=1513200662.548778658 30 atime=1513200668.718854121 30 ctime=1513200662.548778658 nordugrid-arc-5.4.2/src/services/gridftpd/conf/0000755000175000002070000000000013214316026022402 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/conf_vo.cpp0000644000000000000000000000012412771223666024510 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.524714 30 ctime=1513200662.540778561 nordugrid-arc-5.4.2/src/services/gridftpd/conf/conf_vo.cpp0000644000175000002070000000633112771223666024560 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "environment.h" #include "conf_vo.h" namespace gridftpd { int config_vo(AuthUser& user,Arc::ConfigIni& sect,std::string& cmd,std::string& rest,Arc::Logger* logger) { if(strcmp(sect.SectionMatch(),"vo") != 0) return 1; if(cmd.length() == 0) return 1; std::string voname = sect.SubSection(); std::string vofile; for(;;) { if((cmd == "name") || (cmd == "vo")) { voname=rest; } else if(cmd == "file") { vofile=rest; }; sect.ReadNext(cmd,rest); if(sect.SectionNew() || (cmd.length() == 0)) { if(voname.empty()) { logger->msg(Arc::WARNING, "Configuration section [vo] is missing name. Check for presence of name= or vo= option."); } else { user.add_vo(voname,vofile); }; if(cmd.length() == 0) return 1; if(strcmp(sect.SectionMatch(),"vo") != 0) return 1; voname=""; vofile=""; }; }; return 0; } int config_vo(std::list& vos,Arc::ConfigIni& sect,std::string& cmd,std::string& rest,Arc::Logger* logger) { if(strcmp(sect.SectionMatch(),"vo") != 0) return 1; if(cmd.length() == 0) return 1; std::string voname = sect.SubSection(); std::string vofile; for(;;) { if((cmd == "name") || (cmd == "vo")) { voname=rest; } else if(cmd == "file") { vofile=rest; }; sect.ReadNext(cmd,rest); if(sect.SectionNew() || (cmd.length() == 0)) { if(voname.empty()) { logger->msg(Arc::WARNING, "Configuration section [vo] is missing name. Check for presence of name= or vo= option."); } else { vos.push_back(AuthVO(voname,vofile)); }; if(cmd.length() == 0) return 1; if(strcmp(sect.SectionMatch(),"vo") != 0) return 1; voname=""; vofile=""; }; }; return 0; } // vo name filename etc. int config_vo(AuthUser& user,const std::string& cmd,std::string& rest,Arc::Logger* logger) { if(cmd != "vo") return 1; std::string voname = Arc::ConfigIni::NextArg(rest); std::string vofile = Arc::ConfigIni::NextArg(rest); if(voname.empty()) { logger->msg(Arc::WARNING, "Configuration section [vo] is missing name. Check for presence of name= or vo= option."); return -1; }; user.add_vo(voname,vofile); return 0; } int config_vo(std::list& vos,const std::string& cmd,std::string& rest,Arc::Logger* logger) { if(cmd != "vo") return 1; std::string voname = Arc::ConfigIni::NextArg(rest); std::string vofile = Arc::ConfigIni::NextArg(rest); if(voname.empty()) { logger->msg(Arc::WARNING, "Configuration section [vo] is missing name. Check for presence of name= or vo= option."); return -1; }; vos.push_back(AuthVO(voname,vofile)); return 0; } int config_vo(AuthUser& user,const char* cmd,const char* rest,Arc::Logger* logger) { std::string cmd_(cmd); std::string rest_(rest); return config_vo(user,cmd_,rest_,logger); } int config_vo(std::list& vos,const char* cmd,const char* rest,Arc::Logger* logger) { std::string cmd_(cmd); std::string rest_(rest); return config_vo(vos,cmd_,rest_,logger); } } // namespace gridftpd nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712771223666024412 xustar000000000000000027 mtime=1474635702.672235 30 atime=1513200604.365067049 30 ctime=1513200662.538778536 nordugrid-arc-5.4.2/src/services/gridftpd/conf/Makefile.am0000644000175000002070000000043512771223666024456 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libconf.la libconf_la_SOURCES = \ conf_vo.cpp daemon.cpp environment.cpp \ gridmap.cpp \ conf_vo.h daemon.h environment.h \ gridmap.h libconf_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GLOBUS_IO_CFLAGS) $(AM_CXXFLAGS) nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734024406 xustar000000000000000030 mtime=1513200604.416067672 30 atime=1513200651.243640392 30 ctime=1513200662.539778548 nordugrid-arc-5.4.2/src/services/gridftpd/conf/Makefile.in0000644000175000002070000006256713214315734024474 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/gridftpd/conf DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libconf_la_LIBADD = am_libconf_la_OBJECTS = libconf_la-conf_vo.lo libconf_la-daemon.lo \ libconf_la-environment.lo libconf_la-gridmap.lo libconf_la_OBJECTS = $(am_libconf_la_OBJECTS) libconf_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libconf_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libconf_la_SOURCES) DIST_SOURCES = $(libconf_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libconf.la libconf_la_SOURCES = \ conf_vo.cpp daemon.cpp environment.cpp \ gridmap.cpp \ conf_vo.h daemon.h environment.h \ gridmap.h libconf_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GLOBUS_IO_CFLAGS) $(AM_CXXFLAGS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/gridftpd/conf/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/gridftpd/conf/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libconf.la: $(libconf_la_OBJECTS) $(libconf_la_DEPENDENCIES) $(libconf_la_LINK) $(libconf_la_OBJECTS) $(libconf_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-conf_vo.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-daemon.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-environment.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-gridmap.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libconf_la-conf_vo.lo: conf_vo.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-conf_vo.lo -MD -MP -MF $(DEPDIR)/libconf_la-conf_vo.Tpo -c -o libconf_la-conf_vo.lo `test -f 'conf_vo.cpp' || echo '$(srcdir)/'`conf_vo.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-conf_vo.Tpo $(DEPDIR)/libconf_la-conf_vo.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='conf_vo.cpp' object='libconf_la-conf_vo.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-conf_vo.lo `test -f 'conf_vo.cpp' || echo '$(srcdir)/'`conf_vo.cpp libconf_la-daemon.lo: daemon.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-daemon.lo -MD -MP -MF $(DEPDIR)/libconf_la-daemon.Tpo -c -o libconf_la-daemon.lo `test -f 'daemon.cpp' || echo '$(srcdir)/'`daemon.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-daemon.Tpo $(DEPDIR)/libconf_la-daemon.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='daemon.cpp' object='libconf_la-daemon.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-daemon.lo `test -f 'daemon.cpp' || echo '$(srcdir)/'`daemon.cpp libconf_la-environment.lo: environment.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-environment.lo -MD -MP -MF $(DEPDIR)/libconf_la-environment.Tpo -c -o libconf_la-environment.lo `test -f 'environment.cpp' || echo '$(srcdir)/'`environment.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-environment.Tpo $(DEPDIR)/libconf_la-environment.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='environment.cpp' object='libconf_la-environment.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-environment.lo `test -f 'environment.cpp' || echo '$(srcdir)/'`environment.cpp libconf_la-gridmap.lo: gridmap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-gridmap.lo -MD -MP -MF $(DEPDIR)/libconf_la-gridmap.Tpo -c -o libconf_la-gridmap.lo `test -f 'gridmap.cpp' || echo '$(srcdir)/'`gridmap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libconf_la-gridmap.Tpo $(DEPDIR)/libconf_la-gridmap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gridmap.cpp' object='libconf_la-gridmap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-gridmap.lo `test -f 'gridmap.cpp' || echo '$(srcdir)/'`gridmap.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/environment.h0000644000000000000000000000012412035300101025035 xustar000000000000000027 mtime=1349877825.304932 27 atime=1513200575.525714 30 ctime=1513200662.547778646 nordugrid-arc-5.4.2/src/services/gridftpd/conf/environment.h0000644000175000002070000000157412035300101025111 0ustar00mockbuildmock00000000000000#ifndef __GRIDFTPD_ENVIRONMENT_H__ #define __GRIDFTPD_ENVIRONMENT_H__ #include namespace gridftpd { class GMEnvironment { bool valid_; public: GMEnvironment(bool guess = false); operator bool(void) const { return valid_; }; bool operator!(void) const { return !valid_; }; /// ARC configuration file /// /etc/arc.conf /// $ARC_LOCATION/etc/arc.conf std::string nordugrid_config_loc(void) const; void nordugrid_config_loc(const std::string&); // Certificates directory location std::string cert_dir_loc() const; /// Email address of person responsible for this ARC installation /// grid.manager@hostname, it can also be set from configuration file std::string support_mail_address(void) const; void support_mail_address(const std::string&); }; } // namespace gridftpd #endif // __GRIDFTPD_ENVIRONMENT_H__ nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/daemon.cpp0000644000000000000000000000012313124220112024271 xustar000000000000000026 mtime=1498488906.04108 27 atime=1513200575.505714 30 ctime=1513200662.541778573 nordugrid-arc-5.4.2/src/services/gridftpd/conf/daemon.cpp0000644000175000002070000002553713124220112024353 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "environment.h" #include "daemon.h" namespace gridftpd { static Arc::Logger logger(Arc::Logger::getRootLogger(),"Daemon"); static Arc::LogFile* sighup_dest = NULL; static void sighup_handler(int) { if(!sighup_dest) return; sighup_dest->setReopen(true); sighup_dest->setReopen(false); } Daemon::Daemon(void):logfile_(""),logsize_(0),lognum_(5),logreopen_(false),uid_((uid_t)(-1)),gid_((gid_t)(-1)),daemon_(true),pidfile_(""),debug_(-1) { } Daemon::~Daemon(void) { } int Daemon::arg(char c) { switch(c) { case 'F': { daemon_=false; }; break; case 'L': { logfile_=optarg; }; break; case 'U': { std::string username(optarg); std::string groupname(""); std::string::size_type n = username.find(':'); if(n != std::string::npos) { groupname=optarg+n+1; username.resize(n); }; if(username.length() == 0) { uid_=0; gid_=0; } else { struct passwd pw_; struct passwd *pw; char buf[BUFSIZ]; getpwnam_r(username.c_str(),&pw_,buf,BUFSIZ,&pw); if(pw == NULL) { logger.msg(Arc::ERROR, "No such user: %s", username); uid_=0; gid_=0; return -1; }; uid_=pw->pw_uid; gid_=pw->pw_gid; }; if(groupname.length() != 0) { struct group gr_; struct group *gr; char buf[BUFSIZ]; getgrnam_r(groupname.c_str(),&gr_,buf,BUFSIZ,&gr); if(gr == NULL) { logger.msg(Arc::ERROR, "No such group: %s", groupname); gid_=0; return -1; }; gid_=gr->gr_gid; }; }; break; case 'P': { pidfile_=optarg; }; break; case 'd': { char* p; debug_ = strtol(optarg,&p,10); if(((*p) != 0) || (debug_<0)) { logger.msg(Arc::ERROR, "Improper debug level '%s'", optarg); return 1; }; }; break; default: return 1; }; return 0; } int Daemon::config(const std::string& cmd,std::string& rest) { if(cmd == "gridmap") { Arc::SetEnv("GRIDMAP",rest.c_str()); return 0; } else if(cmd == "hostname") { Arc::SetEnv("GLOBUS_HOSTNAME",rest.c_str()); return 0; } else if(cmd == "globus_tcp_port_range") { Arc::SetEnv("GLOBUS_TCP_PORT_RANGE",rest.c_str()); return 0; } else if(cmd == "globus_udp_port_range") { Arc::SetEnv("GLOBUS_UDP_PORT_RANGE",rest.c_str()); return 0; } else if(cmd == "x509_user_key") { Arc::SetEnv("X509_USER_KEY",rest.c_str()); return 0; } else if(cmd == "x509_user_cert") { Arc::SetEnv("X509_USER_CERT",rest.c_str()); return 0; } else if(cmd == "x509_cert_dir") { Arc::SetEnv("X509_CERT_DIR",rest.c_str()); return 0; } else if(cmd == "http_proxy") { Arc::SetEnv("ARC_HTTP_PROXY",rest.c_str()); return 0; } else if(cmd == "x509_voms_dir") { Arc::SetEnv("X509_VOMS_DIR",rest.c_str()); return 0; } else if(cmd == "voms_processing") { Arc::SetEnv("VOMS_PROCESSING",rest.c_str()); return 0; } else if(cmd == "voms_trust_chain") { // There could be multiple "voms_trust_chain" for multiple voms servers std::string voms_trust_chains = Arc::GetEnv("VOMS_TRUST_CHAINS"); if(!voms_trust_chains.empty()) voms_trust_chains.append("\n").append(rest); else voms_trust_chains = rest; Arc::SetEnv("VOMS_TRUST_CHAINS",voms_trust_chains.c_str()); return 0; }; if(cmd == "daemon") { if(daemon_) { std::string arg = Arc::ConfigIni::NextArg(rest); if(arg=="") { logger.msg(Arc::ERROR, "Missing option for command daemon"); return -1; }; if(strcasecmp("yes",arg.c_str()) == 0) { daemon_=true; } else if(strcasecmp("no",arg.c_str()) == 0) { daemon_=false; } else { logger.msg(Arc::ERROR, "Wrong option in daemon"); return -1; }; }; } else if(cmd == "logfile") { if(logfile_.length() == 0) logfile_=Arc::ConfigIni::NextArg(rest); } else if(cmd == "logsize") { if(logsize_ == 0) { char* p; logsize_ = strtol(rest.c_str(),&p,10); if(logsize_ < 0) { logsize_=0; logger.msg(Arc::ERROR, "Improper size of log '%s'", rest); return -1; }; if((*p) == ' ') { for(;*p;++p) if((*p) != ' ') break; if(*p) { lognum_ = strtol(p,&p,10); if(lognum_ < 0) { logsize_=0; lognum_=0; logger.msg(Arc::ERROR, "Improper number of logs '%s'", rest); return -1; }; }; } else if((*p) != 0) { logsize_=0; lognum_=0; logger.msg(Arc::ERROR, "Improper argument for logsize '%s'", rest); return -1; }; }; } else if(cmd == "logreopen") { std::string arg = Arc::ConfigIni::NextArg(rest); if(arg=="") { logger.msg(Arc::ERROR, "Missing option for command logreopen"); return -1; }; if(strcasecmp("yes",arg.c_str()) == 0) { logreopen_=true; } else if(strcasecmp("no",arg.c_str()) == 0) { logreopen_=false; } else { logger.msg(Arc::ERROR, "Wrong option in logreopen"); return -1; }; } else if(cmd == "user") { if(uid_ == (uid_t)(-1)) { std::string username = Arc::ConfigIni::NextArg(rest); std::string groupname(""); std::string::size_type n = username.find(':'); if(n != std::string::npos) { groupname=username.c_str()+n+1; username.resize(n); }; if(username.length() == 0) { uid_=0; gid_=0; } else { struct passwd pw_; struct passwd *pw; char buf[BUFSIZ]; getpwnam_r(username.c_str(),&pw_,buf,BUFSIZ,&pw); if(pw == NULL) { logger.msg(Arc::ERROR, "No such user: %s", username); uid_=0; gid_=0; return -1; }; uid_=pw->pw_uid; gid_=pw->pw_gid; }; if(groupname.length() != 0) { struct group gr_; struct group *gr; char buf[BUFSIZ]; getgrnam_r(groupname.c_str(),&gr_,buf,BUFSIZ,&gr); if(gr == NULL) { logger.msg(Arc::ERROR, "No such group: %s", groupname); gid_=0; return -1; }; gid_=gr->gr_gid; }; }; } else if(cmd == "pidfile") { if(pidfile_.length() == 0) pidfile_=Arc::ConfigIni::NextArg(rest); } else if(cmd == "debug") { if(debug_ == -1) { char* p; debug_ = strtol(rest.c_str(),&p,10); if(((*p) != 0) || (debug_<0)) { logger.msg(Arc::ERROR, "Improper debug level '%s'", rest); return -1; }; }; } else { return 1; }; return 0; } int Daemon::skip_config(const std::string& cmd) { if(cmd == "debug") return 0; if(cmd == "daemon") return 0; if(cmd == "logfile") return 0; if(cmd == "logsize") return 0; if(cmd == "user") return 0; if(cmd == "pidfile") return 0; return 1; } int Daemon::getopt(int argc, char * const argv[],const char *optstring) { int n; std::string opts(optstring); opts+=DAEMON_OPTS; while((n=::getopt(argc,argv,opts.c_str())) != -1) { switch(n) { case 'F': case 'L': case 'U': case 'P': case 'd': { if(arg(n) != 0) return '.'; }; break; default: return n; }; }; return -1; } int Daemon::daemon(bool close_fds) { // set up logging // this must be a pointer which is not deleted because log destinations // are added by reference... Arc::LogFile* logger_file = new Arc::LogFile(logfile_); if (!logger_file || !(*logger_file)) { logger.msg(Arc::ERROR, "Failed to open log file %s", logfile_); return 1; } if (logsize_ > 0) logger_file->setMaxSize(logsize_); if (lognum_ > 0) logger_file->setBackups(lognum_); logger_file->setReopen(logreopen_); if (debug_ >= 0) { Arc::Logger::getRootLogger().setThreshold(Arc::old_level_to_level((unsigned int)debug_)); }; Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().addDestination(*logger_file); if(!logreopen_) { sighup_dest = logger_file; signal(SIGHUP,&sighup_handler); }; if(close_fds) { struct rlimit lim; unsigned long long int max_files; if(getrlimit(RLIMIT_NOFILE,&lim) == 0) { max_files=lim.rlim_cur; } else { max_files=4096; }; if(max_files == RLIM_INFINITY) max_files=4096; for(int i=3;i #endif #include #include #include #include #include #include #include #include #include "environment.h" namespace gridftpd { static Arc::Logger logger(Arc::Logger::getRootLogger(),"GMEnvironment"); static bool read_env_vars(bool guess); GMEnvironment::GMEnvironment(bool guess) { valid_=read_env_vars(guess); } class prstring { private: Glib::Mutex lock_; std::string val_; public: prstring(void); prstring(const char*); prstring(const prstring&); prstring& operator=(const char*); prstring& operator=(const std::string&); prstring& operator=(const prstring&); void operator+=(const char*); void operator+=(const std::string&); std::string operator+(const char*) const; std::string operator+(const std::string&) const; operator std::string(void) const; std::string str(void) const; bool empty() const; }; std::string operator+(const char*,const prstring&); std::string operator+(const std::string&,const prstring&); prstring::prstring(void) { } prstring::prstring(const char* val):lock_(), val_(val) { } prstring::prstring(const prstring& val):lock_(), val_(val.str()) { } prstring& prstring::operator=(const char* val) { Glib::Mutex::Lock lock(lock_); val_=val; return *this; } prstring& prstring::operator=(const std::string& val) { Glib::Mutex::Lock lock(lock_); val_=val; return *this; } prstring& prstring::operator=(const prstring& val) { if(&val == this) return *this; Glib::Mutex::Lock lock(lock_); val_=val.str(); return *this; } void prstring::operator+=(const char* val) { Glib::Mutex::Lock lock(lock_); val_+=val; } void prstring::operator+=(const std::string& val) { Glib::Mutex::Lock lock(lock_); val_+=val; } std::string prstring::operator+(const char* val) const { const_cast(lock_).lock(); std::string r = val_ + val; const_cast(lock_).unlock(); return r; } std::string prstring::operator+(const std::string& val) const { const_cast(lock_).lock(); std::string r = val_ + val; const_cast(lock_).unlock(); return r; } prstring::operator std::string(void) const { const_cast(lock_).lock(); std::string r = val_; const_cast(lock_).unlock(); return r; } bool prstring::empty() const { const_cast(lock_).lock(); bool r = val_.empty(); const_cast(lock_).unlock(); return r; } std::string prstring::str(void) const { return operator std::string(); } std::string operator+(const char* val1,const prstring& val2) { return (val1 + val2.str()); } std::string operator+(const std::string& val1,const prstring& val2) { return (val1 + val2.str()); } // ARC configuration file static prstring nordugrid_config_loc_; // Certificates directory static prstring cert_dir_loc_; // RTE setup scripts static prstring runtime_config_dir_; // Email address of person responsible for this ARC installation static prstring support_mail_address_; std::string GMEnvironment::nordugrid_config_loc(void) const { return nordugrid_config_loc_.str(); } std::string GMEnvironment::cert_dir_loc(void) const { return cert_dir_loc_.str(); } void GMEnvironment::nordugrid_config_loc(const std::string& val) { nordugrid_config_loc_=val; } std::string GMEnvironment::support_mail_address(void) const { return support_mail_address_.str(); } void GMEnvironment::support_mail_address(const std::string& val) { support_mail_address_=val; } static bool file_exists(const char* name) { struct stat st; if(stat(name,&st) != 0) return false; if(!S_ISREG(st.st_mode)) return false; return true; } static bool read_env_vars(bool guess) { if(nordugrid_config_loc_.empty()) { std::string tmp = Arc::GetEnv("ARC_CONFIG"); if(tmp.empty()) { tmp=Arc::GetEnv("NORDUGRID_CONFIG"); if(tmp.empty() && guess) { tmp="/etc/arc.conf"; nordugrid_config_loc_=tmp; if(!file_exists(tmp.c_str())) { logger.msg(Arc::ERROR, "Central configuration file is missing at guessed location:\n" " /etc/arc.conf\n" "Use ARC_CONFIG variable for non-standard location"); return false; }; }; }; if(!tmp.empty()) nordugrid_config_loc_=tmp; }; if(cert_dir_loc_.empty()) { cert_dir_loc_=Arc::GetEnv("X509_CERT_DIR"); }; // Set all environment variables for other tools Arc::SetEnv("ARC_CONFIG",nordugrid_config_loc_); if(support_mail_address_.empty()) { char hn[100]; support_mail_address_="grid.manager@"; if(gethostname(hn,99) == 0) { support_mail_address_+=hn; } else { support_mail_address_+="localhost"; }; }; return true; } } // namespace gridftpd nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/conf_vo.h0000644000000000000000000000012412771223666024155 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.526714 30 ctime=1513200662.544778609 nordugrid-arc-5.4.2/src/services/gridftpd/conf/conf_vo.h0000644000175000002070000000157112771223666024226 0ustar00mockbuildmock00000000000000#ifndef __GRIDFTPD_CONFIG_VO_H__ #define __GRIDFTPD_CONFIG_VO_H__ #include #include #include #include "../auth/auth.h" namespace gridftpd { int config_vo(AuthUser& user,Arc::ConfigIni& sect,std::string& cmd,std::string& rest,Arc::Logger* logger = NULL); int config_vo(AuthUser& user,const std::string& cmd,std::string& rest,Arc::Logger* logger = NULL); int config_vo(AuthUser& user,const char* cmd,const char* rest,Arc::Logger* logger = NULL); int config_vo(std::list& vos,Arc::ConfigIni& sect,std::string& cmd,std::string& rest,Arc::Logger* logger = NULL); int config_vo(std::list& vos,const std::string& cmd,std::string& rest,Arc::Logger* logger = NULL); int config_vo(std::list& vos,const char* cmd,const char* rest,Arc::Logger* logger = NULL); } // namespace gridftpd #endif // __GRIDFTPD_CONFIG_VO_H__ nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/daemon.h0000644000000000000000000000012411553274171023761 xustar000000000000000027 mtime=1303214201.729212 27 atime=1513200575.523714 30 ctime=1513200662.545778622 nordugrid-arc-5.4.2/src/services/gridftpd/conf/daemon.h0000644000175000002070000000143511553274171024031 0ustar00mockbuildmock00000000000000#ifndef __GRIDFTPD_DAEMON_H__ #define __GRIDFTPD_DAEMON_H__ #include #define DAEMON_OPTS "ZzFL:U:P:d:" namespace gridftpd { class Daemon { private: std::string logfile_; int logsize_; int lognum_; bool logreopen_; uid_t uid_; gid_t gid_; bool daemon_; std::string pidfile_; int debug_; public: Daemon(void); ~Daemon(void); int arg(char c); int config(const std::string& cmd,std::string& rest); static int skip_config(const std::string& cmd); int getopt(int argc, char * const argv[],const char *optstring); int daemon(bool close_fds = false); const char* short_help(void); void logfile(const char* path); void pidfile(const char* path); }; } // namespace gridftpd #endif // __GRIDFTPD_DAEMON_H__ nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/gridmap.h0000644000000000000000000000012311412417142024127 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.524714 30 ctime=1513200662.548778658 nordugrid-arc-5.4.2/src/services/gridftpd/conf/gridmap.h0000644000175000002070000000111411412417142024172 0ustar00mockbuildmock00000000000000#ifndef __GRIDFTPD_GRIDMAP_H__ #define __GRIDFTPD_GRIDMAP_H__ #include #include namespace gridftpd { /* Read file specified by path argument. Returns: true - success false - error (most probaly file is missing) 'ulist' contains unix usernames found in file - one per line or in gridmap-like format - separted by blank spaces. */ bool file_user_list(const std::string& path,std::string &ulist); bool file_user_list(const std::string& path,std::list &ulist); } // namespace gridftpd #endif // __GRIDFTPD_GRIDMAP_H__ nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/README0000644000000000000000000000012311412417142023213 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.525714 30 ctime=1513200662.537778524 nordugrid-arc-5.4.2/src/services/gridftpd/conf/README0000644000175000002070000000003211412417142023254 0ustar00mockbuildmock00000000000000configuration processing. nordugrid-arc-5.4.2/src/services/gridftpd/conf/PaxHeaders.7502/gridmap.cpp0000644000000000000000000000012412771223666024502 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.505714 30 ctime=1513200662.543778597 nordugrid-arc-5.4.2/src/services/gridftpd/conf/gridmap.cpp0000644000175000002070000000327512771223666024556 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "environment.h" #include "gridmap.h" namespace gridftpd { bool file_user_list(const std::string& path,std::string &ulist) { std::ifstream f(path.c_str()); if(! f.is_open() ) return false; for(;f.good();) { std::string rest; std::getline(f,rest); Arc::trim(rest," \t\r\n"); std::string name = ""; for(;rest.length() != 0;) { name=Arc::ConfigIni::NextArg(rest); }; if(name.length() == 0) continue; std::string::size_type pos; if((pos=ulist.find(name)) != std::string::npos) { if(pos!=0) if(ulist[pos-1] != ' ') { ulist+=" "+name; continue; }; pos+=name.length(); if(pos < ulist.length()) if(ulist[pos] != ' ') { ulist+=" "+name; continue; }; } else { ulist+=" "+name; }; }; f.close(); return true; } bool file_user_list(const std::string& path,std::list &ulist) { std::ifstream f(path.c_str()); if(! f.is_open() ) return false; for(;f.good();) { std::string rest; std::getline(f,rest); Arc::trim(rest," \t\r\n"); std::string name = ""; for(;rest.length() != 0;) { name=Arc::ConfigIni::NextArg(rest); }; if(name.length() == 0) continue; for(std::list::iterator u = ulist.begin(); u != ulist.end(); ++u) { if(name == *u) { name.resize(0); break; }; }; if(name.length() == 0) continue; ulist.push_back(name); }; f.close(); return true; } } // namespace gridftpd nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734023461 xustar000000000000000030 mtime=1513200604.257065728 30 atime=1513200651.143639169 30 ctime=1513200662.466777655 nordugrid-arc-5.4.2/src/services/gridftpd/Makefile.in0000644000175000002070000015776413214315734023553 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ sbin_PROGRAMS = gridftpd$(EXEEXT) subdir = src/services/gridftpd DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/gridftpd-start.in $(srcdir)/gridftpd.8.in \ $(srcdir)/gridftpd.init.in $(srcdir)/gridftpd.service.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = gridftpd.init gridftpd.service gridftpd-start \ gridftpd.8 CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libgridftpd_la_LIBADD = am_libgridftpd_la_OBJECTS = libgridftpd_la-userspec.lo \ libgridftpd_la-names.lo libgridftpd_la-misc.lo libgridftpd_la_OBJECTS = $(am_libgridftpd_la_OBJECTS) libgridftpd_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libgridftpd_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am__installdirs = "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(man8dir)" "$(DESTDIR)$(unitsdir)" PROGRAMS = $(sbin_PROGRAMS) am_gridftpd_OBJECTS = gridftpd-commands.$(OBJEXT) \ gridftpd-config.$(OBJEXT) gridftpd-fileroot.$(OBJEXT) \ gridftpd-listener.$(OBJEXT) gridftpd-dataread.$(OBJEXT) \ gridftpd-datawrite.$(OBJEXT) gridftpd-datalist.$(OBJEXT) \ gridftpd-fileroot_config.$(OBJEXT) gridftpd_OBJECTS = $(am_gridftpd_OBJECTS) am__DEPENDENCIES_1 = gridftpd_DEPENDENCIES = libgridftpd.la conf/libconf.la run/librun.la \ misc/libmisc.la auth/libauth.la auth/libmap.la \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) gridftpd_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(gridftpd_CXXFLAGS) \ $(CXXFLAGS) $(gridftpd_LDFLAGS) $(LDFLAGS) -o $@ am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' SCRIPTS = $(pkgdata_SCRIPTS) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libgridftpd_la_SOURCES) $(gridftpd_SOURCES) DIST_SOURCES = $(libgridftpd_la_SOURCES) $(gridftpd_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive man8dir = $(mandir)/man8 NROFF = nroff MANS = $(man_MANS) DATA = $(units_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libgridftpd.la @SYSV_SCRIPTS_ENABLED_FALSE@GRIDFTPD_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@GRIDFTPD_SCRIPT = gridftpd.init @SYSTEMD_UNITS_ENABLED_FALSE@GRIDFTPD_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@GRIDFTPD_UNIT = gridftpd.service @SYSTEMD_UNITS_ENABLED_FALSE@GRIDFTPD_UNIT_WRAPPER = @SYSTEMD_UNITS_ENABLED_TRUE@GRIDFTPD_UNIT_WRAPPER = gridftpd-start units_DATA = $(GRIDFTPD_UNIT) pkgdata_SCRIPTS = $(GRIDFTPD_UNIT_WRAPPER) libgridftpd_la_SOURCES = userspec.cpp names.cpp misc.cpp libgridftpd_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLOBUS_FTP_CLIENT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) gridftpd_SOURCES = commands.cpp config.cpp fileroot.cpp listener.cpp \ dataread.cpp datawrite.cpp datalist.cpp fileroot_config.cpp \ commands.h conf.h fileroot.h misc.h names.h userspec.h gridftpd_CXXFLAGS = -I$(top_srcdir)/include \ $(GLOBUS_FTP_CLIENT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) gridftpd_LDADD = libgridftpd.la conf/libconf.la run/librun.la \ misc/libmisc.la auth/libauth.la auth/libmap.la \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLOBUS_FTP_CONTROL_LIBS) $(GLOBUS_GSS_ASSIST_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_IO_LIBS) \ $(GLOBUS_GSI_CERT_UTILS_LIBS) $(GLOBUS_GSI_CREDENTIAL_LIBS) \ $(GLOBUS_OPENSSL_MODULE_LIBS) $(GLOBUS_COMMON_LIBS) gridftpd_LDFLAGS = -rdynamic SUBDIRS = misc conf run auth . fileplugin DIST_SUBDIRS = misc conf run auth . fileplugin man_MANS = gridftpd.8 all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/gridftpd/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/gridftpd/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): gridftpd.init: $(top_builddir)/config.status $(srcdir)/gridftpd.init.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ gridftpd.service: $(top_builddir)/config.status $(srcdir)/gridftpd.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ gridftpd-start: $(top_builddir)/config.status $(srcdir)/gridftpd-start.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ gridftpd.8: $(top_builddir)/config.status $(srcdir)/gridftpd.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libgridftpd.la: $(libgridftpd_la_OBJECTS) $(libgridftpd_la_DEPENDENCIES) $(libgridftpd_la_LINK) $(libgridftpd_la_OBJECTS) $(libgridftpd_la_LIBADD) $(LIBS) install-sbinPROGRAMS: $(sbin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)" @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ } \ ; done uninstall-sbinPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(sbindir)" && rm -f $$files clean-sbinPROGRAMS: @list='$(sbin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list gridftpd$(EXEEXT): $(gridftpd_OBJECTS) $(gridftpd_DEPENDENCIES) @rm -f gridftpd$(EXEEXT) $(gridftpd_LINK) $(gridftpd_OBJECTS) $(gridftpd_LDADD) $(LIBS) install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gridftpd-commands.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gridftpd-config.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gridftpd-datalist.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gridftpd-dataread.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gridftpd-datawrite.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gridftpd-fileroot.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gridftpd-fileroot_config.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gridftpd-listener.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgridftpd_la-misc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgridftpd_la-names.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgridftpd_la-userspec.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libgridftpd_la-userspec.lo: userspec.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridftpd_la_CXXFLAGS) $(CXXFLAGS) -MT libgridftpd_la-userspec.lo -MD -MP -MF $(DEPDIR)/libgridftpd_la-userspec.Tpo -c -o libgridftpd_la-userspec.lo `test -f 'userspec.cpp' || echo '$(srcdir)/'`userspec.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libgridftpd_la-userspec.Tpo $(DEPDIR)/libgridftpd_la-userspec.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='userspec.cpp' object='libgridftpd_la-userspec.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridftpd_la_CXXFLAGS) $(CXXFLAGS) -c -o libgridftpd_la-userspec.lo `test -f 'userspec.cpp' || echo '$(srcdir)/'`userspec.cpp libgridftpd_la-names.lo: names.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridftpd_la_CXXFLAGS) $(CXXFLAGS) -MT libgridftpd_la-names.lo -MD -MP -MF $(DEPDIR)/libgridftpd_la-names.Tpo -c -o libgridftpd_la-names.lo `test -f 'names.cpp' || echo '$(srcdir)/'`names.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libgridftpd_la-names.Tpo $(DEPDIR)/libgridftpd_la-names.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='names.cpp' object='libgridftpd_la-names.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridftpd_la_CXXFLAGS) $(CXXFLAGS) -c -o libgridftpd_la-names.lo `test -f 'names.cpp' || echo '$(srcdir)/'`names.cpp libgridftpd_la-misc.lo: misc.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridftpd_la_CXXFLAGS) $(CXXFLAGS) -MT libgridftpd_la-misc.lo -MD -MP -MF $(DEPDIR)/libgridftpd_la-misc.Tpo -c -o libgridftpd_la-misc.lo `test -f 'misc.cpp' || echo '$(srcdir)/'`misc.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libgridftpd_la-misc.Tpo $(DEPDIR)/libgridftpd_la-misc.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='misc.cpp' object='libgridftpd_la-misc.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridftpd_la_CXXFLAGS) $(CXXFLAGS) -c -o libgridftpd_la-misc.lo `test -f 'misc.cpp' || echo '$(srcdir)/'`misc.cpp gridftpd-commands.o: commands.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-commands.o -MD -MP -MF $(DEPDIR)/gridftpd-commands.Tpo -c -o gridftpd-commands.o `test -f 'commands.cpp' || echo '$(srcdir)/'`commands.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-commands.Tpo $(DEPDIR)/gridftpd-commands.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='commands.cpp' object='gridftpd-commands.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-commands.o `test -f 'commands.cpp' || echo '$(srcdir)/'`commands.cpp gridftpd-commands.obj: commands.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-commands.obj -MD -MP -MF $(DEPDIR)/gridftpd-commands.Tpo -c -o gridftpd-commands.obj `if test -f 'commands.cpp'; then $(CYGPATH_W) 'commands.cpp'; else $(CYGPATH_W) '$(srcdir)/commands.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-commands.Tpo $(DEPDIR)/gridftpd-commands.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='commands.cpp' object='gridftpd-commands.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-commands.obj `if test -f 'commands.cpp'; then $(CYGPATH_W) 'commands.cpp'; else $(CYGPATH_W) '$(srcdir)/commands.cpp'; fi` gridftpd-config.o: config.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-config.o -MD -MP -MF $(DEPDIR)/gridftpd-config.Tpo -c -o gridftpd-config.o `test -f 'config.cpp' || echo '$(srcdir)/'`config.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-config.Tpo $(DEPDIR)/gridftpd-config.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='config.cpp' object='gridftpd-config.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-config.o `test -f 'config.cpp' || echo '$(srcdir)/'`config.cpp gridftpd-config.obj: config.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-config.obj -MD -MP -MF $(DEPDIR)/gridftpd-config.Tpo -c -o gridftpd-config.obj `if test -f 'config.cpp'; then $(CYGPATH_W) 'config.cpp'; else $(CYGPATH_W) '$(srcdir)/config.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-config.Tpo $(DEPDIR)/gridftpd-config.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='config.cpp' object='gridftpd-config.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-config.obj `if test -f 'config.cpp'; then $(CYGPATH_W) 'config.cpp'; else $(CYGPATH_W) '$(srcdir)/config.cpp'; fi` gridftpd-fileroot.o: fileroot.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-fileroot.o -MD -MP -MF $(DEPDIR)/gridftpd-fileroot.Tpo -c -o gridftpd-fileroot.o `test -f 'fileroot.cpp' || echo '$(srcdir)/'`fileroot.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-fileroot.Tpo $(DEPDIR)/gridftpd-fileroot.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fileroot.cpp' object='gridftpd-fileroot.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-fileroot.o `test -f 'fileroot.cpp' || echo '$(srcdir)/'`fileroot.cpp gridftpd-fileroot.obj: fileroot.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-fileroot.obj -MD -MP -MF $(DEPDIR)/gridftpd-fileroot.Tpo -c -o gridftpd-fileroot.obj `if test -f 'fileroot.cpp'; then $(CYGPATH_W) 'fileroot.cpp'; else $(CYGPATH_W) '$(srcdir)/fileroot.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-fileroot.Tpo $(DEPDIR)/gridftpd-fileroot.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fileroot.cpp' object='gridftpd-fileroot.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-fileroot.obj `if test -f 'fileroot.cpp'; then $(CYGPATH_W) 'fileroot.cpp'; else $(CYGPATH_W) '$(srcdir)/fileroot.cpp'; fi` gridftpd-listener.o: listener.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-listener.o -MD -MP -MF $(DEPDIR)/gridftpd-listener.Tpo -c -o gridftpd-listener.o `test -f 'listener.cpp' || echo '$(srcdir)/'`listener.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-listener.Tpo $(DEPDIR)/gridftpd-listener.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='listener.cpp' object='gridftpd-listener.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-listener.o `test -f 'listener.cpp' || echo '$(srcdir)/'`listener.cpp gridftpd-listener.obj: listener.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-listener.obj -MD -MP -MF $(DEPDIR)/gridftpd-listener.Tpo -c -o gridftpd-listener.obj `if test -f 'listener.cpp'; then $(CYGPATH_W) 'listener.cpp'; else $(CYGPATH_W) '$(srcdir)/listener.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-listener.Tpo $(DEPDIR)/gridftpd-listener.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='listener.cpp' object='gridftpd-listener.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-listener.obj `if test -f 'listener.cpp'; then $(CYGPATH_W) 'listener.cpp'; else $(CYGPATH_W) '$(srcdir)/listener.cpp'; fi` gridftpd-dataread.o: dataread.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-dataread.o -MD -MP -MF $(DEPDIR)/gridftpd-dataread.Tpo -c -o gridftpd-dataread.o `test -f 'dataread.cpp' || echo '$(srcdir)/'`dataread.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-dataread.Tpo $(DEPDIR)/gridftpd-dataread.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='dataread.cpp' object='gridftpd-dataread.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-dataread.o `test -f 'dataread.cpp' || echo '$(srcdir)/'`dataread.cpp gridftpd-dataread.obj: dataread.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-dataread.obj -MD -MP -MF $(DEPDIR)/gridftpd-dataread.Tpo -c -o gridftpd-dataread.obj `if test -f 'dataread.cpp'; then $(CYGPATH_W) 'dataread.cpp'; else $(CYGPATH_W) '$(srcdir)/dataread.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-dataread.Tpo $(DEPDIR)/gridftpd-dataread.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='dataread.cpp' object='gridftpd-dataread.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-dataread.obj `if test -f 'dataread.cpp'; then $(CYGPATH_W) 'dataread.cpp'; else $(CYGPATH_W) '$(srcdir)/dataread.cpp'; fi` gridftpd-datawrite.o: datawrite.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-datawrite.o -MD -MP -MF $(DEPDIR)/gridftpd-datawrite.Tpo -c -o gridftpd-datawrite.o `test -f 'datawrite.cpp' || echo '$(srcdir)/'`datawrite.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-datawrite.Tpo $(DEPDIR)/gridftpd-datawrite.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='datawrite.cpp' object='gridftpd-datawrite.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-datawrite.o `test -f 'datawrite.cpp' || echo '$(srcdir)/'`datawrite.cpp gridftpd-datawrite.obj: datawrite.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-datawrite.obj -MD -MP -MF $(DEPDIR)/gridftpd-datawrite.Tpo -c -o gridftpd-datawrite.obj `if test -f 'datawrite.cpp'; then $(CYGPATH_W) 'datawrite.cpp'; else $(CYGPATH_W) '$(srcdir)/datawrite.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-datawrite.Tpo $(DEPDIR)/gridftpd-datawrite.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='datawrite.cpp' object='gridftpd-datawrite.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-datawrite.obj `if test -f 'datawrite.cpp'; then $(CYGPATH_W) 'datawrite.cpp'; else $(CYGPATH_W) '$(srcdir)/datawrite.cpp'; fi` gridftpd-datalist.o: datalist.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-datalist.o -MD -MP -MF $(DEPDIR)/gridftpd-datalist.Tpo -c -o gridftpd-datalist.o `test -f 'datalist.cpp' || echo '$(srcdir)/'`datalist.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-datalist.Tpo $(DEPDIR)/gridftpd-datalist.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='datalist.cpp' object='gridftpd-datalist.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-datalist.o `test -f 'datalist.cpp' || echo '$(srcdir)/'`datalist.cpp gridftpd-datalist.obj: datalist.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-datalist.obj -MD -MP -MF $(DEPDIR)/gridftpd-datalist.Tpo -c -o gridftpd-datalist.obj `if test -f 'datalist.cpp'; then $(CYGPATH_W) 'datalist.cpp'; else $(CYGPATH_W) '$(srcdir)/datalist.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-datalist.Tpo $(DEPDIR)/gridftpd-datalist.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='datalist.cpp' object='gridftpd-datalist.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-datalist.obj `if test -f 'datalist.cpp'; then $(CYGPATH_W) 'datalist.cpp'; else $(CYGPATH_W) '$(srcdir)/datalist.cpp'; fi` gridftpd-fileroot_config.o: fileroot_config.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-fileroot_config.o -MD -MP -MF $(DEPDIR)/gridftpd-fileroot_config.Tpo -c -o gridftpd-fileroot_config.o `test -f 'fileroot_config.cpp' || echo '$(srcdir)/'`fileroot_config.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-fileroot_config.Tpo $(DEPDIR)/gridftpd-fileroot_config.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fileroot_config.cpp' object='gridftpd-fileroot_config.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-fileroot_config.o `test -f 'fileroot_config.cpp' || echo '$(srcdir)/'`fileroot_config.cpp gridftpd-fileroot_config.obj: fileroot_config.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -MT gridftpd-fileroot_config.obj -MD -MP -MF $(DEPDIR)/gridftpd-fileroot_config.Tpo -c -o gridftpd-fileroot_config.obj `if test -f 'fileroot_config.cpp'; then $(CYGPATH_W) 'fileroot_config.cpp'; else $(CYGPATH_W) '$(srcdir)/fileroot_config.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/gridftpd-fileroot_config.Tpo $(DEPDIR)/gridftpd-fileroot_config.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fileroot_config.cpp' object='gridftpd-fileroot_config.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gridftpd_CXXFLAGS) $(CXXFLAGS) -c -o gridftpd-fileroot_config.obj `if test -f 'fileroot_config.cpp'; then $(CYGPATH_W) 'fileroot_config.cpp'; else $(CYGPATH_W) '$(srcdir)/fileroot_config.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man8: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man8dir)" || $(MKDIR_P) "$(DESTDIR)$(man8dir)" @list=''; test -n "$(man8dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ done; } uninstall-man8: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man8dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man8dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man8dir)" && rm -f $$files; } install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) test -z "$(unitsdir)" || $(MKDIR_P) "$(DESTDIR)$(unitsdir)" @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(unitsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(unitsdir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(SCRIPTS) $(MANS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(man8dir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." @SYSV_SCRIPTS_ENABLED_FALSE@uninstall-local: @SYSV_SCRIPTS_ENABLED_FALSE@install-data-local: clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-sbinPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-data-local install-man install-pkgdataSCRIPTS \ install-unitsDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-sbinPROGRAMS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man8 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-local uninstall-man uninstall-pkgdataSCRIPTS \ uninstall-sbinPROGRAMS uninstall-unitsDATA uninstall-man: uninstall-man8 .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES clean-sbinPROGRAMS ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-data-local install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man8 \ install-pdf install-pdf-am install-pkgdataSCRIPTS install-ps \ install-ps-am install-sbinPROGRAMS install-strip \ install-unitsDATA installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-local uninstall-man \ uninstall-man8 uninstall-pkgdataSCRIPTS uninstall-sbinPROGRAMS \ uninstall-unitsDATA # Not using initd_SCRIPTS due to rename. @SYSV_SCRIPTS_ENABLED_TRUE@install-data-local: @SYSV_SCRIPTS_ENABLED_TRUE@ $(MKDIR_P) "$(DESTDIR)$(initddir)" @SYSV_SCRIPTS_ENABLED_TRUE@ $(INSTALL_SCRIPT) gridftpd.init $(DESTDIR)$(initddir)/gridftpd @SYSV_SCRIPTS_ENABLED_TRUE@uninstall-local: @SYSV_SCRIPTS_ENABLED_TRUE@ rm -f $(DESTDIR)$(initddir)/gridftpd # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/names.h0000644000000000000000000000012411655756553022710 xustar000000000000000027 mtime=1320672619.279056 27 atime=1513200575.528714 30 ctime=1513200662.485777888 nordugrid-arc-5.4.2/src/services/gridftpd/names.h0000644000175000002070000000041111655756553022751 0ustar00mockbuildmock00000000000000#include bool remove_last_name(std::string &name); bool keep_last_name(std::string &name); char* remove_head_dir_c(const char* name,int dir_len); std::string remove_head_dir_s(std::string &name,int dir_len); const char* get_last_name(const char* name); nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/fileplugin0000644000000000000000000000013213214316026023470 xustar000000000000000030 mtime=1513200662.646779857 30 atime=1513200668.718854121 30 ctime=1513200662.646779857 nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/0000755000175000002070000000000013214316026023613 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/PaxHeaders.7502/fileplugin.cpp0000644000000000000000000000012413213445240026410 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200575.534714 30 ctime=1513200662.645779845 nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/fileplugin.cpp0000644000175000002070000006360413213445240026466 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #define GRIDFTP_PLUGIN #include #if HAVE_SYS_STATFS_H #include #endif #if HAVE_SYS_MOUNT_H #include #endif #if HAVE_SYS_VFS_H #include #endif #include #include #include "fileplugin.h" #include "../userspec.h" #include "../names.h" #include "../misc.h" #ifndef HAVE_STRERROR_R int strerror_r (int errnum, char * buf, size_t buflen) { char * estring = strerror (errnum); strncpy (buf, estring, buflen); buf[buflen-1] = '\0'; return 0; } #endif static Arc::Logger logger(Arc::Logger::getRootLogger(),"DirectFilePlugin"); static bool parse_id(std::string s,int &id,int base = 10) { if((s.length()==1) && (s[0] == '*')) { id=-1; return true; } else { char* end; id=strtoul(s.c_str(),&end,base); if(*end) { return false; }; }; return true; } static bool parse_owner_rights(std::string &rest,int &uid,int &gid,int &orbits,int &andbits) { struct passwd pw_; struct group gr_; struct passwd *pw; struct group *gr; char buf[BUFSIZ]; std::string owner = Arc::ConfigIni::NextArg(rest); std::string acc_rights = Arc::ConfigIni::NextArg(rest); if(acc_rights.length() == 0) { logger.msg(Arc::WARNING, "Can't parse access rights in configuration line"); return false; }; std::string::size_type n; n=owner.find(':'); if(n == std::string::npos) { logger.msg(Arc::WARNING, "Can't parse user:group in configuration line"); return false; }; if(!parse_id(owner.substr(0,n),uid)) { /* not number, must be name */ getpwnam_r(owner.substr(0,n).c_str(),&pw_,buf,BUFSIZ,&pw); if(pw == NULL) { logger.msg(Arc::WARNING, "Can't recognize user in configuration line"); return false; }; uid=pw->pw_uid; }; if(!parse_id(owner.substr(n+1),gid)) { /* not number, must be name */ getgrnam_r(owner.substr(n+1).c_str(),&gr_,buf,BUFSIZ,&gr); if(gr == NULL) { logger.msg(Arc::WARNING, "Can't recognize group in configuration line"); return false; }; gid=gr->gr_gid; }; n=acc_rights.find(':'); if(n == std::string::npos) { logger.msg(Arc::WARNING, "Can't parse or:and in configuration line"); return false; }; if((!parse_id(acc_rights.substr(0,n),orbits,8)) || (!parse_id(acc_rights.substr(0,n),andbits,8))) { logger.msg(Arc::WARNING, "Can't parse or:and in configuration line"); return false; }; return true; } DirectFilePlugin::DirectFilePlugin(std::istream &cfile,userspec_t &user) { data_file=-1; uid=user.get_uid(); gid=user.get_gid(); /* read configuration */ for(;;) { std::string rest=Arc::ConfigFile::read_line(cfile); std::string command=Arc::ConfigIni::NextArg(rest); if(command.length() == 0) break; /* end of file - should not be here */ if(command == "dir") { DirectAccess::diraccess_t laccess; /* filling default access */ laccess.read=false; laccess.dirlist=false; laccess.cd=false; laccess.creat=false; laccess.overwrite=false; laccess.append=false; laccess.del=false; laccess.mkdir=false; laccess.access=DirectAccess::local_unix_access; bool parsed_line = false; rest=subst_user_spec(rest,&user); std::string dir = Arc::ConfigIni::NextArg(rest); if(dir.length() == 0) { logger.msg(Arc::WARNING, "Can't parse configuration line"); continue; }; if(!Arc::CanonicalDir(dir,false)) { logger.msg(Arc::WARNING, "Bad directory name: %s", dir); continue; }; for(;;) { std::string subcommand = Arc::ConfigIni::NextArg(rest); if(subcommand.length() == 0) { parsed_line=true; break; }; if(subcommand == "read") { laccess.read=true; } else if(subcommand == "delete") { laccess.del=true; } else if(subcommand == "append") { laccess.append=true; } else if(subcommand == "overwrite") { laccess.overwrite=true; } else if(subcommand == "cd") { laccess.cd=true; } else if(subcommand == "dirlist") { laccess.dirlist=true; } else if(subcommand == "create") { laccess.creat=true; if(!parse_owner_rights(rest, laccess.creat_uid,laccess.creat_gid, laccess.creat_perm_or,laccess.creat_perm_and)) { logger.msg(Arc::WARNING, "Can't parse create arguments in configuration line"); break; }; } else if(subcommand == "mkdir") { laccess.mkdir=true; if(!parse_owner_rights(rest, laccess.mkdir_uid,laccess.mkdir_gid, laccess.mkdir_perm_or,laccess.mkdir_perm_and)) { logger.msg(Arc::WARNING, "Can't parse mkdir arguments in configuration line"); break; }; } else if(subcommand == "owner") { laccess.access=DirectAccess::local_user_access; } else if(subcommand == "group") { laccess.access=DirectAccess::local_group_access; } else if(subcommand == "other") { laccess.access=DirectAccess::local_other_access; } else if(subcommand == "nouser") { laccess.access=DirectAccess::local_none_access; } else { logger.msg(Arc::WARNING, "Bad subcommand in configuration line: %s", subcommand); continue; }; }; if(parsed_line) { access.push_back(DirectAccess(dir,laccess)); }; } else if(command == "mount") { rest=subst_user_spec(rest,&user); mount=Arc::ConfigIni::NextArg(rest); if((mount.length() == 0) || (!Arc::CanonicalDir(mount,false))) { logger.msg(Arc::WARNING, "Bad mount directory specified"); }; logger.msg(Arc::INFO, "Mount point %s", mount); } else if(command == "endpoint") { endpoint=Arc::ConfigIni::NextArg(rest); } else if(command == "end") { break; /* end of section */ } else { logger.msg(Arc::WARNING, "Unsupported configuration command: %s", command); }; }; access.sort(DirectAccess::comp); file_mode=file_access_none; } /* name must be absolute path */ /* make directories out of scope of mount dir */ int makedirs(std::string &name) { /* to make it faster - just check if it exists */ struct stat st; if(stat(name.c_str(),&st) == 0) { if(S_ISDIR(st.st_mode)) return 0; return 1; }; std::string::size_type n=1; for(;;) { if(n >= name.length()) break; n=name.find('/',n); if(n==std::string::npos) n=name.length(); std::string dname=name.substr(0,n); n++; if(stat(dname.c_str(),&st) == 0) { /* have intermediate object */ if(S_ISDIR(st.st_mode)) continue; /* already have - ok */ return 1; /* can't make directory based on file - not in unix */ }; /* no such object - create */ if(mkdir(dname.c_str(),S_IRWXU | S_IRWXG | S_IRWXO) == 0) continue; char errmgsbuf[256] = ""; (void)strerror_r(errno,errmgsbuf,sizeof(errmgsbuf)); logger.msg(Arc::ERROR, "mkdir failed: %s", errmgsbuf); return 1; /* directory creation failed */ }; return 0; } /* make all directories */ int DirectFilePlugin::makedir(std::string &dname) { /* first check for mount point */ std::string mname='/'+mount; if(makedirs(mname) != 0) { /* can't make mount point */ logger.msg(Arc::WARNING, "Warning: mount point %s creation failed.", mname); return 1; }; /* now go through rest of directories */ std::string::size_type n = 0; std::string pdname(""); std::list::iterator i=control_dir(pdname,false); if(i==access.end()) return 1; /* no root ? - strange */ pdname=real_name(pdname); int ur=i->unix_rights(pdname,uid,gid); if(ur & S_IFREG) return 1; if(!(ur & S_IFDIR)) return 1; for(;;) { if(n >= dname.length()) break; n=dname.find('/',n); if(n==std::string::npos) n=dname.length(); std::string fdname=dname.substr(0,n); n++; /* remember if parrent directory allows mkdir */ bool allow_mkdir = i->access.mkdir; i=control_dir(fdname,false); if(i==access.end()) return 1; /* first check maybe it already exists */ fdname=real_name(fdname); int pur = ur; ur=i->unix_rights(fdname,uid,gid); if(ur & S_IFDIR) continue; /* already exists */ if(ur & S_IFREG) return 1; /* can't make directory with same name as file */ /* check if parrent directory allows mkdir */ if(!allow_mkdir) return -1; if(!(pur & S_IWUSR)) return 1; /* create directory with proper rights */ if(i->unix_set(uid,gid) == 0) { if(::mkdir(fdname.c_str(), i->access.mkdir_perm_or & i->access.mkdir_perm_and) == 0) { chmod(fdname.c_str(), i->access.mkdir_perm_or & i->access.mkdir_perm_and); i->unix_reset(); uid_t u = i->access.mkdir_uid; gid_t g = i->access.mkdir_gid; if(u == ((uid_t)(-1))) u=uid; if(g == ((gid_t)(-1))) g=gid; if(chown(fdname.c_str(),u,g) != 0) {} continue; } else { i->unix_reset(); }; }; char errmgsbuf[256] = ""; (void)strerror_r(errno,errmgsbuf,sizeof(errmgsbuf)); logger.msg(Arc::ERROR, "mkdir failed: %s", errmgsbuf); return 1; /* directory creation failed */ }; return 0; } int DirectFilePlugin::removefile(std::string &name) { std::list::iterator i=control_dir(name,true); if(i==access.end()) return 1; if(!(i->access.del)) return 1; std::string fname=real_name(name); int ur=i->unix_rights(fname,uid,gid); if(ur == 0 && errno > 0) { // stat failed error_description = Arc::StrError(errno); return 1; } if(ur & S_IFDIR) { error_description = "Is a directory"; return 1; } if(!(ur & S_IFREG)) return 1; if(i->unix_set(uid,gid) != 0) return 1; if(::remove(fname.c_str()) != 0) { error_description = Arc::StrError(errno); i->unix_reset(); return 1; }; i->unix_reset(); return 0; } int DirectFilePlugin::removedir(std::string &dname) { std::list::iterator i=control_dir(dname,true); if(i==access.end()) return 1; if(!(i->access.del)) return 1; std::string fdname=real_name(dname); int ur=i->unix_rights(fdname,uid,gid); if(ur == 0 && errno > 0) { // stat failed error_description = Arc::StrError(errno); return 1; } if(!(ur & S_IFDIR)) { error_description = "Not a directory"; return 1; } if(i->unix_set(uid,gid) != 0) return 1; if(::remove(fdname.c_str()) != 0) { error_description = Arc::StrError(errno); i->unix_reset(); return 1; }; i->unix_reset(); return 0; } int DirectFilePlugin::open(const char* name,open_modes mode,unsigned long long int size) { logger.msg(Arc::VERBOSE, "plugin: open: %s", name); std::string fname = real_name(name); if( mode == GRIDFTP_OPEN_RETRIEVE ) { /* open for reading */ std::list::iterator i=control_dir(name,true); if(i==access.end()) return 1; /* error ? */ if(i->access.read) { int ur=(*i).unix_rights(fname,uid,gid); if(ur == 0 && errno > 0) { // stat failed error_description = Arc::StrError(errno); return 1; } if((ur & S_IFREG) && (ur & S_IRUSR)) { /* so open it */ if(i->unix_set(uid,gid) != 0) return 1; logger.msg(Arc::INFO, "Retrieving file %s", fname); data_file=::open(fname.c_str(),O_RDONLY); i->unix_reset(); if(data_file == -1) return 1; file_mode=file_access_read; file_name=fname; return 0; }; }; return 1; } else if( mode == GRIDFTP_OPEN_STORE ) { /* open for writing - overwrite */ std::string dname=name; if(!remove_last_name(dname)) { return 1; }; std::list::iterator i=control_dir(name,true); if(i==access.end()) return 1; /* first check if file exists */ int ur=i->unix_rights(fname,uid,gid); if(ur & S_IFREG) { if(i->access.overwrite) { /* can overwrite */ if(ur & S_IWUSR) { /* really can ? */ if(size > 0) { struct statfs dst; #ifndef sun if(statfs((char*)(fname.c_str()),&dst) == 0) { #else if(statfs((char*)(fname.c_str()),&dst,0,0) == 0) { #endif uid_t uid_; gid_t gid_; unsigned long long size_ = 0; time_t changed_,modified_; bool is_file_; i->unix_info(fname,uid_,gid_,size_,changed_,modified_,is_file_); if(size > ((dst.f_bfree*dst.f_bsize) + size_)) { logger.msg(Arc::ERROR, "Not enough space to store file"); return 1; }; }; }; if(i->unix_set(uid,gid) != 0) return 1; logger.msg(Arc::INFO, "Storing file %s", fname); data_file=::open(fname.c_str(),O_WRONLY); i->unix_reset(); if(data_file == -1) return 1; file_mode=file_access_overwrite; file_name=fname; if(truncate(file_name.c_str(),0) != 0) {} return 0; }; }; error_description="File exists, overwrite not allowed"; return 1; } else if(ur & S_IFDIR) { /* it's a directory */ return 1; } else { /* no such object in filesystem */ if(i->access.creat) { /* allowed to create new file */ std::string fdname = real_name(dname); /* make sure we have directory to store file */ if(makedir(dname) != 0) return 1; /* problems with underlaying dir */ int ur=i->unix_rights(fdname,uid,gid); if((ur & S_IWUSR) && (ur & S_IFDIR)) { if(size > 0) { struct statfs dst; #ifndef sun if(statfs((char*)(fname.c_str()),&dst) == 0) { #else if(statfs((char*)(fname.c_str()),&dst,0,0) == 0) { #endif if(size > (dst.f_bfree*dst.f_bsize)) { logger.msg(Arc::ERROR, "Not enough space to store file"); return 1; }; }; }; if(i->unix_set(uid,gid) != 0) return 1; logger.msg(Arc::INFO, "Storing file %s", fname); data_file=::open(fname.c_str(),O_WRONLY | O_CREAT | O_EXCL, i->access.creat_perm_or & i->access.creat_perm_and); i->unix_reset(); if(data_file == -1) return 1; uid_t u = i->access.creat_uid; gid_t g = i->access.creat_gid; if(u == ((uid_t)(-1))) u=uid; if(g == ((gid_t)(-1))) g=gid; logger.msg(Arc::VERBOSE, "open: changing owner for %s, %i, %i", fname, u, gid); if(chown(fname.c_str(),u,g) != 0) {} /* adjust permissions because open uses umask */ chmod(fname.c_str(), i->access.creat_perm_or & i->access.creat_perm_and); struct stat st; stat(fname.c_str(),&st); logger.msg(Arc::VERBOSE, "open: owner: %i %i", st.st_uid, st.st_gid); file_mode=file_access_create; file_name=fname; return 0; }; }; }; return 1; } logger.msg(Arc::WARNING, "Unknown open mode %s", mode); return 1; } int DirectFilePlugin::close(bool eof) { logger.msg(Arc::VERBOSE, "plugin: close"); if(data_file != -1) { if(eof) { ::close(data_file); } else { /* file was not transferred properly */ if((file_mode==file_access_create) || (file_mode==file_access_overwrite)) { /* destroy file */ ::close(data_file); ::unlink(file_name.c_str()); }; }; }; return 0; } int DirectFilePlugin::open_direct(const char* name,open_modes mode) { std::string fname = name; if( mode == GRIDFTP_OPEN_RETRIEVE ) { /* open for reading */ data_file=::open(fname.c_str(),O_RDONLY); if(data_file == -1) return 1; file_mode=file_access_read; file_name=fname; return 0; } else if( mode == GRIDFTP_OPEN_STORE ) { /* open for writing - overwrite */ data_file=::open(fname.c_str(),O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR); if(data_file == -1) return 1; file_mode=file_access_create; file_name=fname; if(truncate(file_name.c_str(),0) != 0) {} if(chown(fname.c_str(),uid,gid) != 0) {} chmod(fname.c_str(),S_IRUSR | S_IWUSR); return 0; } logger.msg(Arc::WARNING, "Unknown open mode %s", mode); return 1; } int DirectFilePlugin::read(unsigned char *buf,unsigned long long int offset,unsigned long long int *size) { ssize_t l; logger.msg(Arc::VERBOSE, "plugin: read"); if(data_file == -1) return 1; if(lseek(data_file,offset,SEEK_SET) != offset) { (*size)=0; return 0; /* can't read anymore */ }; if((l=::read(data_file,buf,(*size))) == -1) { logger.msg(Arc::WARNING, "Error while reading file"); (*size)=0; return 1; }; (*size)=l; return 0; } int DirectFilePlugin::write(unsigned char *buf,unsigned long long int offset,unsigned long long int size) { ssize_t l; size_t ll; logger.msg(Arc::VERBOSE, "plugin: write"); if(data_file == -1) return 1; if(lseek(data_file,offset,SEEK_SET) != offset) { perror("lseek"); return 1; /* can't write at that position */ }; for(ll=0;ll::iterator DirectFilePlugin::control_dir(const std::string &name,bool indir) { return control_dir(name.c_str(),indir); } std::list::iterator DirectFilePlugin::control_dir(const char* name,bool indir) { std::list::iterator i; for(i=access.begin();i!=access.end();++i) { if(i->belongs(name,indir)) break; }; return i; } std::string DirectFilePlugin::real_name(char* name) { return real_name(std::string(name)); } std::string DirectFilePlugin::real_name(std::string name) { std::string fname = ""; if(mount.length() != 0) { fname+='/'+mount; }; if(name.length() != 0) { fname+='/'+name; }; return fname; } bool DirectFilePlugin::fill_object_info(DirEntry &dent,std::string dirname,int ur, std::list::iterator i, DirEntry::object_info_level mode) { bool is_manageable = true; if(mode != DirEntry::minimal_object_info) { std::string ffname = dirname; if(dent.name.length() != 0) ffname+="/"+dent.name; if(i->unix_set(uid,gid) != 0) { is_manageable=false; } else { if(i->unix_info(ffname, dent.uid,dent.gid,dent.size, dent.changed,dent.modified,dent.is_file) != 0) { is_manageable=false; }; i->unix_reset(); }; if(is_manageable) { if(mode != DirEntry::basic_object_info) { int fur=i->unix_rights(ffname,uid,gid); if(S_IFDIR & fur) { dent.is_file=false; } else if(S_IFREG & fur) { dent.is_file=true; } else { is_manageable=false; }; // TODO: treat special files (not regular) properly. (how?) if(is_manageable) { if(dent.is_file) { if(i->access.del && (ur & S_IWUSR)) dent.may_delete=true; if(i->access.overwrite &&(fur & S_IWUSR)) dent.may_write=true; if(i->access.append && (fur & S_IWUSR)) dent.may_append=true; if(i->access.read && (fur & S_IRUSR)) dent.may_read=true; } else { // TODO: this directory can have different rules than i !!!!!! if(i->access.del && (ur & S_IWUSR)) dent.may_delete=true; if(i->access.creat && (fur & S_IWUSR)) dent.may_create=true; if(i->access.mkdir && (fur & S_IWUSR)) dent.may_mkdir=true; if(i->access.cd && (fur & S_IXUSR)) dent.may_chdir=true; if(i->access.dirlist &&(fur & S_IRUSR)) dent.may_dirlist=true; if(i->access.del && (fur & S_IWUSR)) dent.may_purge=true; }; }; }; }; }; return is_manageable; } int DirectFilePlugin::readdir(const char* name,std::list &dir_list,DirEntry::object_info_level mode) { /* first check if allowed to read this directory */ std::list::iterator i=control_dir(name,false); if(i==access.end()) return 1; /* error ? */ std::string fname = real_name(name); if(i->access.dirlist) { int ur=i->unix_rights(fname,uid,gid); if(ur == 0 && errno > 0) { // stat failed error_description = Arc::StrError(errno); return 1; } if((ur & S_IFDIR) && (ur & S_IRUSR) && (ur & S_IXUSR)) { /* allowed to list in configuration and by unix rights */ /* following Linux semantics - need r-x for dirlist */ /* now get real listing */ if(i->unix_set(uid,gid) != 0) return 1; DIR* d=::opendir(fname.c_str()); if(d == NULL) { return 1; }; /* maybe return ? */ struct dirent *de; for(;;) { de=::readdir(d); if(de == NULL) break; if((!strcmp(de->d_name,".")) || (!strcmp(de->d_name,".."))) continue; DirEntry dent(true,de->d_name); // treat it as file by default i->unix_reset(); bool is_manageable = fill_object_info(dent,fname,ur,i,mode); i->unix_set(uid,gid); if(is_manageable) { dir_list.push_back(dent); }; }; ::closedir(d); i->unix_reset(); return 0; } else if(ur & S_IFREG) { DirEntry dent(true,""); bool is_manageable = fill_object_info(dent,fname,ur,i,mode); if(is_manageable) { dir_list.push_back(dent); return -1; }; }; } return 1; } /* checkdir is allowed to change dirname to show actual target of cd */ int DirectFilePlugin::checkdir(std::string &dirname) { logger.msg(Arc::VERBOSE, "plugin: checkdir: %s", dirname); std::list::iterator i=control_dir(dirname,false); if(i==access.end()) return 0; /* error ? */ logger.msg(Arc::VERBOSE, "plugin: checkdir: access: %s", (*i).name); std::string fname = real_name(dirname); if(i->access.cd) { int ur=(*i).unix_rights(fname,uid,gid); if(ur == 0 && errno > 0) { // stat failed error_description = Arc::StrError(errno); return 1; } if((ur & S_IXUSR) && (ur & S_IFDIR)) { logger.msg(Arc::VERBOSE, "plugin: checkdir: access: allowed: %s", fname); return 0; }; }; return 1; } int DirectFilePlugin::checkfile(std::string &name,DirEntry &info,DirEntry::object_info_level mode) { std::list::iterator i=control_dir(name,true); if(i==access.end()) return 1; /* error ? */ /* TODO check permissions of higher level directory */ std::string dname=name; if(!remove_last_name(dname)) { /* information about top directory was requested. Since this directory is declared it should exist. At least virtually */ info.uid=getuid(); info.gid=getgid(); info.is_file=false; info.name=""; return 0; }; if(!(i->access.dirlist)) { return 1; }; std::string fdname = real_name(dname); int ur=i->unix_rights(fdname,uid,gid); if(ur == 0 && errno > 0) { // stat failed error_description = Arc::StrError(errno); return 1; } if(!((ur & S_IXUSR) && (ur & S_IFDIR))) { return 1; }; std::string fname = real_name(name); DirEntry dent(true,get_last_name(fname.c_str())); bool is_manageable = fill_object_info(dent,fdname,ur,i,mode); if(!is_manageable) { if (errno > 0) error_description = Arc::StrError(errno); return 1; }; info=dent; return 0; } bool DirectAccess::belongs(std::string &name,bool indir) { return belongs(name.c_str(),indir); } bool DirectAccess::belongs(const char* name,bool indir) { int pl=this->name.length(); if(pl == 0) return true; /* root dir */ int l=strlen(name); if (pl > l) return false; if(strncmp(this->name.c_str(),name,pl)) return false; if(!indir) if(pl == l) return true; if(name[pl] == '/') return true; return false; } nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515025610 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200604.432067868 30 ctime=1513200662.642779808 nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/Makefile.am0000644000175000002070000000132212052416515025650 0ustar00mockbuildmock00000000000000GLOBUS_FILEPLUGIN_LIBS = $(GLOBUS_RSL_LIBS) $(GLOBUS_IO_LIBS) \ $(GLOBUS_GSI_CREDENTIAL_LIBS) $(GLOBUS_GSI_CERT_UTILS_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_COMMON_LIBS) pkglib_LTLIBRARIES = fileplugin.la fileplugin_la_SOURCES = init.cpp fileplugin.cpp fileplugin.h fileplugin_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLOBUS_IO_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) fileplugin_la_LDFLAGS = -no-undefined -avoid-version -module fileplugin_la_LIBADD = \ ../libgridftpd_la-misc.lo ../libgridftpd_la-userspec.lo \ ../libgridftpd_la-names.lo ../auth/libmap.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(GLOBUS_FILEPLUGIN_LIBS) nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315734025616 xustar000000000000000030 mtime=1513200604.481068467 30 atime=1513200651.291640979 29 ctime=1513200662.64377982 nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/Makefile.in0000644000175000002070000006447513214315734025705 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/gridftpd/fileplugin DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) fileplugin_la_DEPENDENCIES = ../libgridftpd_la-misc.lo \ ../libgridftpd_la-userspec.lo ../libgridftpd_la-names.lo \ ../auth/libmap.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(am__DEPENDENCIES_2) am_fileplugin_la_OBJECTS = fileplugin_la-init.lo \ fileplugin_la-fileplugin.lo fileplugin_la_OBJECTS = $(am_fileplugin_la_OBJECTS) fileplugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(fileplugin_la_CXXFLAGS) \ $(CXXFLAGS) $(fileplugin_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(fileplugin_la_SOURCES) DIST_SOURCES = $(fileplugin_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ GLOBUS_FILEPLUGIN_LIBS = $(GLOBUS_RSL_LIBS) $(GLOBUS_IO_LIBS) \ $(GLOBUS_GSI_CREDENTIAL_LIBS) $(GLOBUS_GSI_CERT_UTILS_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_COMMON_LIBS) pkglib_LTLIBRARIES = fileplugin.la fileplugin_la_SOURCES = init.cpp fileplugin.cpp fileplugin.h fileplugin_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLOBUS_IO_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) fileplugin_la_LDFLAGS = -no-undefined -avoid-version -module fileplugin_la_LIBADD = \ ../libgridftpd_la-misc.lo ../libgridftpd_la-userspec.lo \ ../libgridftpd_la-names.lo ../auth/libmap.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(GLOBUS_FILEPLUGIN_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/gridftpd/fileplugin/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/gridftpd/fileplugin/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done fileplugin.la: $(fileplugin_la_OBJECTS) $(fileplugin_la_DEPENDENCIES) $(fileplugin_la_LINK) -rpath $(pkglibdir) $(fileplugin_la_OBJECTS) $(fileplugin_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fileplugin_la-fileplugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fileplugin_la-init.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< fileplugin_la-init.lo: init.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(fileplugin_la_CXXFLAGS) $(CXXFLAGS) -MT fileplugin_la-init.lo -MD -MP -MF $(DEPDIR)/fileplugin_la-init.Tpo -c -o fileplugin_la-init.lo `test -f 'init.cpp' || echo '$(srcdir)/'`init.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/fileplugin_la-init.Tpo $(DEPDIR)/fileplugin_la-init.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='init.cpp' object='fileplugin_la-init.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(fileplugin_la_CXXFLAGS) $(CXXFLAGS) -c -o fileplugin_la-init.lo `test -f 'init.cpp' || echo '$(srcdir)/'`init.cpp fileplugin_la-fileplugin.lo: fileplugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(fileplugin_la_CXXFLAGS) $(CXXFLAGS) -MT fileplugin_la-fileplugin.lo -MD -MP -MF $(DEPDIR)/fileplugin_la-fileplugin.Tpo -c -o fileplugin_la-fileplugin.lo `test -f 'fileplugin.cpp' || echo '$(srcdir)/'`fileplugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/fileplugin_la-fileplugin.Tpo $(DEPDIR)/fileplugin_la-fileplugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fileplugin.cpp' object='fileplugin_la-fileplugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(fileplugin_la_CXXFLAGS) $(CXXFLAGS) -c -o fileplugin_la-fileplugin.lo `test -f 'fileplugin.cpp' || echo '$(srcdir)/'`fileplugin.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/PaxHeaders.7502/fileplugin.h0000644000000000000000000000012311412417142026053 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.537714 30 ctime=1513200662.646779857 nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/fileplugin.h0000644000175000002070000000742711412417142026133 0ustar00mockbuildmock00000000000000#ifndef GRID_SERVER_FILE_PLUGIN_H #define GRID_SERVER_FILE_PLUGIN_H #include #include #include #include "../fileroot.h" #include "../userspec.h" /* DirectAccess is used to store information about access control */ class DirectAccess { public: typedef enum { local_none_access, local_user_access, local_group_access, local_other_access, local_unix_access } local_access_t; typedef struct { bool read; bool creat; int creat_uid; int creat_gid; int creat_perm_or; int creat_perm_and; bool overwrite; bool append; bool del; bool mkdir; int mkdir_uid; int mkdir_gid; int mkdir_perm_or; int mkdir_perm_and; local_access_t access; bool cd; bool dirlist; } diraccess_t; diraccess_t access; std::string name; DirectAccess(void) { /* dumb constructor, object is for copying to only */ name=""; access.read=true; access.dirlist=true; access.cd=true; access.creat=false; access.overwrite=false; access.append=false; access.del=false; access.mkdir=false; access.access=local_unix_access; }; DirectAccess(const DirectAccess &dir) { /* copy constructor */ name=dir.name; access=dir.access; }; DirectAccess& operator= (const DirectAccess &dir) { name=dir.name; access=dir.access; return (*this); }; DirectAccess(std::string &dirname,diraccess_t &diraccess) { /* real constructor */ name=dirname; access=diraccess; }; static bool comp(DirectAccess &left,DirectAccess &right) { return (left.name.length() > right.name.length()); }; bool belongs(std::string &name,bool indir = false); bool belongs(const char* name,bool indir = false); bool can_read(std::string &name); bool can_write(std::string &name); bool can_append(std::string &name); bool can_mkdir(std::string &name); int unix_rights(std::string &name,int uid,int gid); int unix_info(std::string &name,uid_t &uid,gid_t &gid,unsigned long long &size,time_t &created,time_t &modified,bool &is_file); int unix_set(int uid,int gid); void unix_reset(void); }; /* this class is used to communicate with network layer - must be derived from FilePlugin */ class DirectFilePlugin: public FilePlugin { private: typedef enum { file_access_none, file_access_read, file_access_create, file_access_overwrite } file_access_mode_t; file_access_mode_t file_mode; std::string file_name; bool fill_object_info(DirEntry &dent,std::string dirname,int ur, std::list::iterator i, DirEntry::object_info_level mode); std::string real_name(std::string name); std::string real_name(char* name); std::list::iterator control_dir(const std::string &name,bool indir=false); std::list::iterator control_dir(const char* name,bool indir=false); public: int uid; int gid; std::list access; int data_file; std::string mount; DirectFilePlugin(std::istream &cfile,userspec_t &user); ~DirectFilePlugin(void) { }; virtual int open(const char* name,open_modes mode,unsigned long long int size = 0); int open_direct(const char* name,open_modes mode); virtual int close(bool eof = true); virtual int read(unsigned char *buf,unsigned long long int offset,unsigned long long int *size); virtual int write(unsigned char *buf,unsigned long long int offset,unsigned long long int size); virtual int readdir(const char* name,std::list &dir_list,DirEntry::object_info_level mode); virtual int checkdir(std::string &dirname); virtual int checkfile(std::string &name,DirEntry &file,DirEntry::object_info_level mode); virtual int makedir(std::string &dirname); virtual int removefile(std::string &name); virtual int removedir(std::string &dirname); }; #endif nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/PaxHeaders.7502/init.cpp0000644000000000000000000000012412053412741025216 xustar000000000000000027 mtime=1353586145.017438 27 atime=1513200575.537714 30 ctime=1513200662.644779832 nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/init.cpp0000644000175000002070000000060712053412741025266 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #define GRIDFTP_PLUGIN #include "fileplugin.h" static FilePlugin* init_cpp(std::istream &cfile,userspec_t &user) { DirectFilePlugin* arg = new DirectFilePlugin(cfile,user); return arg; } extern "C" { FilePlugin* init(std::istream &cfile,userspec_t &user,FileNode &node) { return init_cpp(cfile,user); } } nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/PaxHeaders.7502/README0000644000000000000000000000012311412417142024424 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.534714 30 ctime=1513200662.641779796 nordugrid-arc-5.4.2/src/services/gridftpd/fileplugin/README0000644000175000002070000000015111412417142024467 0ustar00mockbuildmock00000000000000fileplugin for GridFTP server. Allows exposure of a file-system hierarchy through the GridFTP interface. nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/misc.cpp0000644000000000000000000000012411741502232023046 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.501714 30 ctime=1513200662.473777741 nordugrid-arc-5.4.2/src/services/gridftpd/misc.cpp0000644000175000002070000000125711741502232023120 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "misc.h" std::string timetostring(time_t t) { int l; char buf[32]; buf[0]=0; ctime_r(&t,buf); l=strlen(buf); if(l > 0) buf[l-1]=0; return std::string(buf); } std::string dirstring(bool dir,long long unsigned int s,time_t t,const char *name) { std::string str; if(dir) { str="d--------- 1 user group " + timetostring(t) + \ " " + Arc::tostring(s,16) + " " + std::string(name)+"\r\n"; } else { str="---------- 1 user group " + timetostring(t) + \ " " + Arc::tostring(s,16) + " " + std::string(name)+"\r\n"; }; return str; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/listener.cpp0000644000000000000000000000012313124220410023727 xustar000000000000000027 mtime=1498489096.378221 27 atime=1513200575.553714 29 ctime=1513200662.47777779 nordugrid-arc-5.4.2/src/services/gridftpd/listener.cpp0000644000175000002070000004172413124220410024005 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fileroot.h" #include "commands.h" #include "conf.h" #define DEFAULT_MAX_BUFFER_SIZE (10*65536) #define DEFAULT_BUFFER_SIZE (65536) #define DEFAULT_MAX_CONECTIONS (100) #define DEFAULT_GRIDFTP_PORT 2811 #define DEFAULT_LOG_FILE "/var/log/arc/gridftpd.log" #define DEFAULT_PID_FILE "/var/run/gridftpd.pid" GridFTP_Commands *client; static int max_connections = 0; static volatile int started_connections = 0; static volatile int finished_connections = 0; unsigned long long int max_data_buffer_size = 0; unsigned long long int default_data_buffer_size = 0; unsigned int firewall_interface[4] = { 0, 0, 0, 0 }; static Arc::Logger logger(Arc::Logger::getRootLogger(), "gridftpd"); #define PROTO_NAME(ADDR) ((ADDR->ai_family==AF_INET6)?"IPv6":"IPv4") /* new connection */ #ifndef __DONT_USE_FORK__ void new_conn_callback(int sock) { /* initiate random number generator */ srand(getpid() + getppid() + time(NULL)); #ifdef HAVE_GLOBUS_THREAD_SET_MODEL globus_thread_set_model("pthread"); #endif if((globus_module_activate(GLOBUS_COMMON_MODULE) != GLOBUS_SUCCESS) || (globus_module_activate(GLOBUS_FTP_CONTROL_MODULE) != GLOBUS_SUCCESS) || (globus_module_activate(GLOBUS_GSI_CREDENTIAL_MODULE) != GLOBUS_SUCCESS) || (globus_module_activate(GLOBUS_GSI_GSS_ASSIST_MODULE) != GLOBUS_SUCCESS) || (globus_module_activate(GLOBUS_OPENSSL_MODULE) != GLOBUS_SUCCESS)) { logger.msg(Arc::ERROR, "Activation failed"); globus_module_deactivate_all(); close(sock); exit(1); }; client = new GridFTP_Commands(getpid(),firewall_interface); client->new_connection_callback((void*)client,sock); close(sock); logger.msg(Arc::INFO, "Child exited"); _exit(0); globus_module_deactivate(GLOBUS_OPENSSL_MODULE); globus_module_deactivate(GLOBUS_GSI_GSS_ASSIST_MODULE); globus_module_deactivate(GLOBUS_GSI_CREDENTIAL_MODULE); globus_module_deactivate(GLOBUS_FTP_CONTROL_MODULE); globus_module_deactivate(GLOBUS_COMMON_MODULE); exit(0); } #else void new_conn_callback(void* arg,globus_ftp_control_server_t *handle,globus_object_t *error) { if(error != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Globus connection error"); return; }; logger.msg(Arc::INFO, "New connection"); client = new GridFTP_Commands(cur_connections,firewall_interface); client->new_connection_callback((void*)client,handle,error); } #endif void serv_stop_callback(void* /* arg */,globus_ftp_control_server_t* /* handle */,globus_object_t* /* error */) { logger.msg(Arc::INFO, "Server stopped"); } static volatile int server_done = 0; static void (*sig_old_chld)(int) = SIG_ERR; void sig_chld(int /* signum */) { int old_errno = errno; int status; for(;;) { int id=waitpid(-1,&status,WNOHANG); if((id == 0) || (id == -1)) break; ++finished_connections; }; errno = old_errno; } #ifdef __USE_RESURECTION__ void sig_term(int signum) { int old_errno = errno; if(chid == -1) return; if(chid == 0) { server_done = 1; globus_cond_signal(&server_cond); if(sig_old_term == SIG_ERR) return; if(sig_old_term == SIG_IGN) return; if(sig_old_term == SIG_DFL) return; (*sig_old_term)(signum); } else { kill(chid,SIGTERM); }; errno = old_errno; }; int main_internal(int argc,char** argv); int main(int argc,char** argv) { char const * log_time_format = ::getenv("ARC_LOGGER_TIME_FORMAT"); if(log_time_format) { if(strcmp(log_time_format,"USER") == 0) { Arc::Time::SetFormat(Arc::UserTime); } else if(strcmp(log_time_format,"USEREXT") == 0) { Arc::Time::SetFormat(Arc::UserExtTime); } else if(strcmp(log_time_format,"ELASTIC") == 0) { Arc::Time::SetFormat(Arc::ElasticTime); } else if(strcmp(log_time_format,"MDS") == 0) { Arc::Time::SetFormat(Arc::MDSTime); } else if(strcmp(log_time_format,"ASC") == 0) { Arc::Time::SetFormat(Arc::ASCTime); } else if(strcmp(log_time_format,"ISO") == 0) { Arc::Time::SetFormat(Arc::ISOTime); } else if(strcmp(log_time_format,"UTC") == 0) { Arc::Time::SetFormat(Arc::UTCTime); } else if(strcmp(log_time_format,"RFC1123") == 0) { Arc::Time::SetFormat(Arc::RFC1123Time); } else if(strcmp(log_time_format,"EPOCH") == 0) { Arc::Time::SetFormat(Arc::EpochTime); }; }; // temporary stderr destination until configuration is read and used in daemon.daemon() Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::INFO); globus_module_deactivate_all(); setpgid(0,0); sig_old_term=signal(SIGTERM,&sig_term); if(sig_old_term == SIG_ERR) { perror(""); logger.msg(Arc::ERROR, "Error: failed to set handler for SIGTERM"); return -1; }; for(;;) { logger.msg(Arc::INFO, "Starting controlled process"); if((chid=fork()) != 0) { if(chid == -1) { logger.msg(Arc::ERROR, "fork failed"); return -1; }; int status; if(wait(&status) == -1) { logger.msg(Arc::ERROR, "wait failed - killing child"); kill(chid,SIGKILL); return -1; }; logger.msg(Arc::INFO, "Child exited"); if(WIFSIGNALED(status)) { logger.msg(Arc::INFO, "Killed with signal: "<<(int)(WTERMSIG(status))); if(WTERMSIG(status) == SIGSEGV) { logger.msg(Arc::INFO, "Restarting after segmentation violation."); logger.msg(Arc::INFO, "Waiting 1 minute"); sleep(60); continue; }; }; return WEXITSTATUS(status); }; break; }; return main_internal(argc,argv); } int main_internal(int argc,char** argv) { #else void sig_term_fork(int /* signum */) { int old_errno = errno; int static passed = 0; if(passed) _exit(-1); server_done=1; passed=1; kill(0,SIGTERM); errno = old_errno; } int main(int argc,char** argv) { #ifndef __DONT_USE_FORK__ globus_module_deactivate_all(); #endif setpgid(0,0); #endif char const * log_time_format = ::getenv("ARC_LOGGER_TIME_FORMAT"); if(log_time_format) { if(strcmp(log_time_format,"USER") == 0) { Arc::Time::SetFormat(Arc::UserTime); } else if(strcmp(log_time_format,"USEREXT") == 0) { Arc::Time::SetFormat(Arc::UserExtTime); } else if(strcmp(log_time_format,"ELASTIC") == 0) { Arc::Time::SetFormat(Arc::ElasticTime); } else if(strcmp(log_time_format,"MDS") == 0) { Arc::Time::SetFormat(Arc::MDSTime); } else if(strcmp(log_time_format,"ASC") == 0) { Arc::Time::SetFormat(Arc::ASCTime); } else if(strcmp(log_time_format,"ISO") == 0) { Arc::Time::SetFormat(Arc::ISOTime); } else if(strcmp(log_time_format,"UTC") == 0) { Arc::Time::SetFormat(Arc::UTCTime); } else if(strcmp(log_time_format,"RFC1123") == 0) { Arc::Time::SetFormat(Arc::RFC1123Time); } else if(strcmp(log_time_format,"EPOCH") == 0) { Arc::Time::SetFormat(Arc::EpochTime); }; }; // temporary stderr destination until configuration is read and used in daemon.daemon() Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::INFO); #ifndef __DONT_USE_FORK__ signal(SIGTERM,&sig_term_fork); sig_old_chld=signal(SIGCHLD,&sig_chld); if(sig_old_chld == SIG_ERR) { perror(""); logger.msg(Arc::ERROR, "Error: failed to set handler for SIGCHLD"); return -1; }; std::list handles; #else globus_ftp_control_server_t handle; /* initiate random number generator */ srand(getpid() + getppid() + time(NULL)); #endif unsigned short server_port=0; int n; gridftpd::Daemon daemon; while((n=daemon.getopt(argc,argv,"hp:c:n:b:B:")) != -1) { switch(n) { case '.': { return 1; }; case ':': { logger.msg(Arc::ERROR, "Missing argument"); return 1; }; case '?': { logger.msg(Arc::ERROR, "Unknown option"); return 1; }; case 'h': { fprintf(stdout,"gridftpd [-p port_to_listen] [-c config_file] [-n maximal_connections] [-b default_buffer_size] [-B maximal_buffer_size] %s.\n",daemon.short_help()); return 0; }; case 'p': { if(sscanf(optarg,"%hu",&server_port) != 1) { logger.msg(Arc::ERROR, "Wrong port number"); return 1; }; }; break; case 'c': { config_file=optarg; }; break; case 'n': { if((sscanf(optarg,"%i",&max_connections) != 1) || (max_connections < 0)) { logger.msg(Arc::ERROR, "Wrong number of connections"); return 1; }; }; break; case 'b': { if((sscanf(optarg,"%Lu",&default_data_buffer_size) != 1) || (default_data_buffer_size < 1)) { logger.msg(Arc::ERROR, "Wrong buffer size"); return 1; }; }; break; case 'B': { if((sscanf(optarg,"%Lu",&max_data_buffer_size) != 1) || (max_data_buffer_size < 1)) { logger.msg(Arc::ERROR, "Wrong maximal buffer size"); return 1; }; }; break; default: break; }; }; //if(config_file) nordugrid_config_loc=config_file; // Read configuration (for daemon commands and port) FileRoot::ServerParams params; if(FileRoot::config(daemon,¶ms) != 0) { logger.msg(Arc::ERROR, "Failed reading configuration"); return 1; }; if(server_port == 0) server_port=params.port; if(server_port == 0) server_port=DEFAULT_GRIDFTP_PORT; if(max_connections == 0) max_connections=params.max_connections; if(max_connections == 0) max_connections=DEFAULT_MAX_CONECTIONS; if(max_data_buffer_size == 0) max_data_buffer_size=params.max_buffer; if(max_data_buffer_size == 0) max_data_buffer_size=DEFAULT_MAX_BUFFER_SIZE; if(default_data_buffer_size == 0) default_data_buffer_size=params.default_buffer; if(default_data_buffer_size == 0) default_data_buffer_size=DEFAULT_BUFFER_SIZE; firewall_interface[0]=params.firewall[0]; firewall_interface[1]=params.firewall[1]; firewall_interface[2]=params.firewall[2]; firewall_interface[3]=params.firewall[3]; #ifndef __DONT_USE_FORK__ unsigned int addrs_num = 0; { struct addrinfo hint; struct addrinfo *info = NULL; memset(&hint, 0, sizeof(hint)); hint.ai_socktype = SOCK_STREAM; hint.ai_protocol = IPPROTO_TCP; // ? hint.ai_flags = AI_PASSIVE; // looking for bind'able adresses // hint.ai_family = AF_INET; // hint.ai_family = AF_INET6; int ret = getaddrinfo(NULL, Arc::tostring(server_port).c_str(), &hint, &info); if (ret != 0) { std::string err_str = gai_strerror(ret); logger.msg(Arc::ERROR, "Failed to obtain local address: %s",err_str); exit(-1); }; for(struct addrinfo *info_ = info;info_;info_=info_->ai_next) { ++addrs_num; int s = socket(info_->ai_family,info_->ai_socktype,info_->ai_protocol); if(s == -1) { std::string e = Arc::StrError(errno); logger.msg(Arc::WARNING, "Failed to create socket(%s): %s",PROTO_NAME(info_),e); }; { int on = 1; setsockopt(s,SOL_SOCKET,SO_REUSEADDR,(void*)(&on),sizeof(on)); }; #ifdef IPV6_V6ONLY if(info_->ai_family == AF_INET6) { int v = 1; // Some systems (Linux for example) make v6 support v4 too // by default. Some don't. Make it same for everyone - // separate sockets for v4 and v6. if(setsockopt(s,IPPROTO_IPV6,IPV6_V6ONLY,&v,sizeof(v)) != 0) { std::string e = Arc::StrError(errno); logger.msg(Arc::WARNING, "Failed to limit socket to IPv6: %s",e); close(s); continue; }; }; #endif if(bind(s,info_->ai_addr,info_->ai_addrlen) == -1) { std::string e = Arc::StrError(errno); logger.msg(Arc::WARNING, "Failed to bind socket(%s): %s",PROTO_NAME(info_),e); close(s); continue; }; if(listen(s,128) == -1) { std::string e = Arc::StrError(errno); logger.msg(Arc::WARNING, "Failed to listen on socket(%s): %s",PROTO_NAME(info_),e); close(s); continue; }; handles.push_back(s); }; }; if(handles.empty()) { logger.msg(Arc::ERROR, "Not listening to anything"); exit(-1); }; if(handles.size() < addrs_num) { logger.msg(Arc::WARNING, "Some addresses failed. Listening on %u of %u.",(unsigned int)handles.size(),addrs_num); }; daemon.logfile(DEFAULT_LOG_FILE); daemon.pidfile(DEFAULT_PID_FILE); if(daemon.daemon(false) != 0) { perror("daemonization failed"); return 1; }; logger.msg(Arc::INFO, "Listen started"); for(;;) { fd_set ifds; fd_set efds; FD_ZERO(&ifds); FD_ZERO(&efds); int maxfd = -1; for(std::list::iterator handle = handles.begin();handle != handles.end();++handle) { FD_SET(*handle,&ifds); FD_SET(*handle,&efds); if(*handle > maxfd) maxfd = *handle; }; if(maxfd < 0) { if(!server_done) logger.msg(Arc::ERROR, "No valid handles left for listening"); break; }; int r = select(maxfd+1,&ifds,NULL,&efds,NULL); if(r == -1) { if(errno == EINTR) continue; if(!server_done) logger.msg(Arc::ERROR, "Select failed: %s", Arc::StrError(errno)); break; }; std::list::iterator handle = handles.begin(); for(;handle != handles.end();++handle) { if(FD_ISSET(*handle,&ifds) || FD_ISSET(*handle,&efds)) break; }; if(handle == handles.end()) { // ??? continue; }; struct sockaddr_in addr; socklen_t addrlen = sizeof(addr); int sock = accept(*handle,(sockaddr*)&addr,&addrlen); if(sock == -1) { if(!server_done) logger.msg(Arc::ERROR, "Accept failed: %s", Arc::StrError(errno)); if(errno == EBADF) { // handle becomes bad close(*handle); handles.erase(handle); }; }; int curr_connections = started_connections - finished_connections; logger.msg(Arc::INFO, "Have connections: %i, max: %i", curr_connections, max_connections); if((curr_connections < max_connections) || (max_connections == 0)) { logger.msg(Arc::INFO, "New connection"); switch (fork()) { case -1: { logger.msg(Arc::ERROR, "Fork failed: %s", Arc::StrError(errno)); }; break; case 0: { /* child */ for(std::list::iterator handle = handles.begin();handle != handles.end();++handle) { close(*handle); }; handles.clear(); Arc::Run::AfterFork(); new_conn_callback(sock); }; break; default: { /* parent */ ++started_connections; }; break; }; } else { /* it is probaly better to close connection immediately */ logger.msg(Arc::ERROR, "Refusing connection: Connection limit exceeded"); }; close(sock); }; for(std::list::iterator handle = handles.begin();handle != handles.end();++handle) { close(*handle); }; handles.clear(); #else if(daemon.daemon() != 0) { perror("daemonization failed"); return 1; }; #ifdef HAVE_GLOBUS_THREAD_SET_MODEL globus_thread_set_model("pthread"); #endif if((globus_module_activate(GLOBUS_COMMON_MODULE) != GLOBUS_SUCCESS) || (globus_module_activate(GLOBUS_FTP_CONTROL_MODULE) != GLOBUS_SUCCESS) || (globus_module_activate(GLOBUS_GSI_CREDENTIAL_MODULE) != GLOBUS_SUCCESS) || (globus_module_activate(GLOBUS_GSI_GSS_ASSIST_MODULE) != GLOBUS_SUCCESS) || (globus_module_activate(GLOBUS_OPENSSL_MODULE) != GLOBUS_SUCCESS)) { logger.msg(Arc::ERROR, "Activation failed"); globus_module_deactivate_all(); goto exit; }; if(globus_ftp_control_server_handle_init(&handle) != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Init failed"); goto exit_active; }; if(globus_ftp_control_server_listen(&handle,&server_port,&new_conn_callback,NULL) != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Listen failed"); goto exit_inited; }; logger.msg(Arc::INFO, "Listen started"); globus_mutex_init(&server_lock,GLOBUS_NULL); globus_cond_init(&server_cond,GLOBUS_NULL); server_done=0; globus_mutex_lock(&(server_lock)); while(!(server_done)) { globus_cond_wait(&(server_cond),&(server_lock)); }; globus_mutex_unlock(&(server_lock)); logger.msg(Arc::INFO, "Listen finished"); globus_mutex_destroy(&server_lock); globus_cond_destroy(&server_cond); logger.msg(Arc::INFO, "Stopping server"); globus_ftp_control_server_stop(&handle,&serv_stop_callback,NULL); exit_inited: logger.msg(Arc::INFO, "Destroying handle"); globus_ftp_control_server_handle_destroy(&handle); exit_active: logger.msg(Arc::INFO, "Deactivating modules"); globus_module_deactivate(GLOBUS_OPENSSL_MODULE); globus_module_deactivate(GLOBUS_GSI_GSS_ASSIST_MODULE); globus_module_deactivate(GLOBUS_GSI_CREDENTIAL_MODULE); globus_module_deactivate(GLOBUS_FTP_CONTROL_MODULE); globus_module_deactivate(GLOBUS_COMMON_MODULE); exit: #endif logger.msg(Arc::INFO, "Exiting"); return 0; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/commands.cpp0000644000000000000000000000012413065020314023711 xustar000000000000000027 mtime=1490297036.543658 27 atime=1513200575.502714 30 ctime=1513200662.474777753 nordugrid-arc-5.4.2/src/services/gridftpd/commands.cpp0000644000175000002070000015066313065020314023771 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "fileroot.h" #include "names.h" #include "commands.h" #include "misc/proxy.h" /* timeout if nothing happened during 10 minutes */ #define FTP_TIMEOUT 600 static Arc::Logger logger(Arc::Logger::getRootLogger(),"GridFTP_Commands"); extern unsigned long long int max_data_buffer_size; extern unsigned long long int default_data_buffer_size; #ifndef __DONT_USE_FORK__ static int fork_done; static globus_mutex_t fork_lock; static globus_cond_t fork_cond; #endif GridFTP_Commands_timeout* timeouter = NULL; extern int make_list_string(const DirEntry &entr,GridFTP_Commands::list_mode_t mode,unsigned char* buf,int size,const char *prefix); int GridFTP_Commands::send_response(const char* response) { globus_result_t res; response_done=0; { std::string s = response; for(std::string::size_type n=0;;) if((n=s.find('\r'))==std::string::npos) {break;} else {s[n]='\\';}; for(std::string::size_type n=0;;) if((n=s.find('\n'))==std::string::npos) {break;} else {s[n]='\\';}; logger.msg(Arc::VERBOSE, "response: %s", s); }; res = globus_ftp_control_send_response(&handle,response,&response_callback,this); if(res != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Send response failed: %s", Arc::GlobusResult(res).str()); globus_mutex_lock(&response_lock); response_done=2; globus_cond_signal(&response_cond); globus_mutex_unlock(&response_lock); return 1; }; return 0; } int GridFTP_Commands::wait_response(void) { int res = 0; // What is the right deafault here? globus_abstime_t timeout; last_action_time=time(NULL); GlobusTimeAbstimeSet(timeout,0,100000); globus_mutex_lock(&response_lock); while(!response_done) { globus_cond_timedwait(&response_cond,&response_lock,&timeout); res=(response_done != 1); last_action_time=time(NULL); }; response_done=0; globus_mutex_unlock(&response_lock); return res; } void GridFTP_Commands::response_callback(void* arg,globus_ftp_control_handle_t*,globus_object_t *error) { GridFTP_Commands *it = (GridFTP_Commands*)arg; globus_mutex_lock(&(it->response_lock)); if(error != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Response sending error"); it->response_done=2; } else { it->response_done=1; }; globus_cond_signal(&(it->response_cond)); globus_mutex_unlock(&(it->response_lock)); } void GridFTP_Commands::close_callback(void *arg,globus_ftp_control_handle_t*,globus_object_t* /* error */, globus_ftp_control_response_t* /* ftp_response */) { GridFTP_Commands *it = (GridFTP_Commands*)arg; if(it) { logger.msg(Arc::INFO, "Closed connection"); delete it; }; // GridFTP_Commands::response_callback(arg,handle,error); } #if GLOBUS_IO_VERSION>=5 static void io_close_cb(void* /* callback_arg */,globus_io_handle_t*, globus_result_t /* result */) { } #endif #ifndef __DONT_USE_FORK__ GridFTP_Commands::close_semaphor_t::close_semaphor_t(void) { } GridFTP_Commands::close_semaphor_t::~close_semaphor_t(void) { globus_mutex_lock(&fork_lock); fork_done=1; globus_cond_signal(&fork_cond); globus_mutex_unlock(&fork_lock); } int GridFTP_Commands::new_connection_callback(void* arg,int sock) { GridFTP_Commands *it = (GridFTP_Commands*)arg; globus_mutex_init(&fork_lock,GLOBUS_NULL); globus_cond_init(&fork_cond,GLOBUS_NULL); // Convert the socket to a globus IO structure. globus_result_t res; globus_io_attr_t attr; globus_io_tcpattr_init(&attr); globus_io_attr_set_socket_oobinline(&attr, GLOBUS_TRUE); globus_io_attr_set_tcp_nodelay(&attr, GLOBUS_TRUE); res = globus_io_tcp_posix_convert(sock,&attr,&(it->handle.cc_handle.io_handle)); if(res != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Socket conversion failed: %s", Arc::GlobusResult(res).str()); return -1; }; it->handle.cc_handle.cc_state=GLOBUS_FTP_CONTROL_CONNECTED; fork_done = 0; int count = 0; res = globus_io_tcp_get_local_address_ex(&(it->handle.cc_handle.io_handle), it->local_host,&count,&(it->local_port)); if(res != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Failed to obtain own address: %s", Arc::GlobusResult(res).str()); return -1; }; if(count == sizeof(in_addr)) { it->local_is_ipv6 = false; } else if(count == sizeof(in6_addr)) { it->local_is_ipv6 = true; } else { logger.msg(Arc::ERROR, "Failed to recognize own address type (IPv4 ir IPv6) - %u",count); return -1; }; if(it->local_is_ipv6) { char str[8*5]; snprintf(str,sizeof(str),"%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", it->local_host[0]<<8 | it->local_host[1], it->local_host[2]<<8 | it->local_host[3], it->local_host[4]<<8 | it->local_host[5], it->local_host[6]<<8 | it->local_host[7], it->local_host[8]<<8 | it->local_host[9], it->local_host[10]<<8 | it->local_host[11], it->local_host[12]<<8 | it->local_host[13],it->local_host[14]<<8 | it->local_host[15]); logger.msg(Arc::INFO, "Accepted connection on [%s]:%u",str,it->local_port); } else { logger.msg(Arc::INFO, "Accepted connection on %u.%u.%u.%u:%u",it->local_host[0],it->local_host[1],it->local_host[2],it->local_host[3],it->local_port); }; globus_ftp_control_local_prot(&(it->handle),GLOBUS_FTP_CONTROL_PROTECTION_PRIVATE); // globus_ftp_control_local_prot(&(it->handle),GLOBUS_FTP_CONTROL_PROTECTION_CLEAR); it->data_dcau.mode=GLOBUS_FTP_CONTROL_DCAU_SELF; it->data_dcau.subject.subject=NULL; globus_ftp_control_local_dcau(&(it->handle),&(it->data_dcau),GSS_C_NO_CREDENTIAL); globus_ftp_control_local_mode(&(it->handle),GLOBUS_FTP_CONTROL_MODE_STREAM); globus_ftp_control_local_type(&(it->handle),GLOBUS_FTP_CONTROL_TYPE_IMAGE,0); // Call accept callback as if Globus called it accepted_callback(it, &(it->handle), GLOBUS_SUCCESS); globus_mutex_lock(&fork_lock); while(!fork_done) { globus_cond_wait(&fork_cond,&fork_lock); }; globus_mutex_unlock(&fork_lock); globus_cond_destroy(&fork_cond); globus_mutex_destroy(&fork_lock); return 0; } #else void GridFTP_Commands::new_connection_callback(void* arg,globus_ftp_control_server_t *server_handle,globus_object_t *error) { GridFTP_Commands *it = (GridFTP_Commands*)arg; globus_ftp_control_local_prot(&(it->handle),GLOBUS_FTP_CONTROL_PROTECTION_PRIVATE); // globus_ftp_control_local_prot(&(it->handle),GLOBUS_FTP_CONTROL_PROTECTION_CLEAR); it->data_dcau.mode=GLOBUS_FTP_CONTROL_DCAU_SELF; it->data_dcau.subject.subject=NULL; globus_ftp_control_local_dcau(&(it->handle),&(it->data_dcau),GSS_C_NO_CREDENTIAL); globus_ftp_control_local_mode(&(it->handle),GLOBUS_FTP_CONTROL_MODE_STREAM); globus_ftp_control_local_type(&(it->handle),GLOBUS_FTP_CONTROL_TYPE_IMAGE); if(globus_ftp_control_server_accept(server_handle,&(it->handle),&accepted_callback,it) != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Accept failed"); }; } #endif void GridFTP_Commands::accepted_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error) { GridFTP_Commands *it = (GridFTP_Commands*)arg; if(error != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Accept failed: %s", Arc::globus_object_to_string(error)); delete it; return; }; int remote_host[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned short remote_port = 0; int count = 0; globus_io_tcp_get_remote_address_ex(&(handle->cc_handle.io_handle),remote_host,&count,&remote_port); if(count == sizeof(in6_addr)) { char str[8*5]; snprintf(str,sizeof(str),"%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", remote_host[0]<<8 | remote_host[1], remote_host[2]<<8 | remote_host[3], remote_host[4]<<8 | remote_host[5], remote_host[6]<<8 | remote_host[7], remote_host[8]<<8 | remote_host[9], remote_host[10]<<8 | remote_host[11], remote_host[12]<<8 | remote_host[13],remote_host[14]<<8 | remote_host[15]); logger.msg(Arc::INFO, "Accepted connection from [%s]:%u",str,remote_port); } else { logger.msg(Arc::INFO, "Accepted connection from %u.%u.%u.%u:%u", (unsigned int)(remote_host[0]), (unsigned int)(remote_host[1]), (unsigned int)(remote_host[2]), (unsigned int)(remote_host[3]), remote_port); }; it->send_response("220 Server ready\r\n"); if(globus_ftp_control_server_authenticate(&(it->handle),GLOBUS_FTP_CONTROL_AUTH_REQ_GSSAPI,&authenticate_callback,it) != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Authenticate in commands failed"); delete it; return; }; } void GridFTP_Commands::authenticate_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error,globus_ftp_control_auth_info_t *result) { GridFTP_Commands *it = (GridFTP_Commands*)arg; if((result == GLOBUS_NULL) || (error != GLOBUS_SUCCESS)) { logger.msg(Arc::ERROR, "Authentication failure"); logger.msg(Arc::ERROR, Arc::globus_object_to_string(error)); if(it->send_response("535 Authentication failed\r\n") == 0) { it->wait_response(); }; delete it; return; }; logger.msg(Arc::INFO, "User subject: %s", result->auth_gssapi_subject); logger.msg(Arc::INFO, "Encrypted: %s", (result->encrypt ? "true" : "false")); it->delegated_cred=result->delegated_credential_handle; // //const char* fname = write_cert_chain(result->auth_gssapi_context); // if(it->froot.config(result,handle) != 0) { logger.msg(Arc::ERROR, "User has no proper configuration associated"); if(it->send_response("535 Not allowed\r\n") == 0) { it->wait_response(); }; delete it; return; }; if(it->froot.nodes.size() == 0) { logger.msg(Arc::ERROR, "User has empty virtual directory tree.\nEither user has no authorised plugins or there are no plugins configured at all."); if(it->send_response("535 Nothing to serve\r\n") == 0) { it->wait_response(); }; delete it; return; }; it->send_response("235 Authentication successful\r\n"); /* Set defaults */ // globus_ftp_control_local_prot(&(it->handle),GLOBUS_FTP_CONTROL_PROTECTION_PRIVATE); // globus_ftp_control_local_prot(&(it->handle),GLOBUS_FTP_CONTROL_PROTECTION_CLEAR); it->data_dcau.mode=GLOBUS_FTP_CONTROL_DCAU_SELF; it->data_dcau.subject.subject=NULL; globus_ftp_control_local_dcau(&(it->handle),&(it->data_dcau),it->delegated_cred); globus_ftp_control_local_mode(&(it->handle),GLOBUS_FTP_CONTROL_MODE_STREAM); globus_ftp_control_local_type(&(it->handle),GLOBUS_FTP_CONTROL_TYPE_IMAGE,0); if(globus_ftp_control_read_commands(&(it->handle),&commands_callback,it) != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Read commands in authenticate failed"); delete it; return; }; } static int parse_integers(char* string,int args[],int margs) { char* cp = string; char* np; int n=0; if((*cp)==0) return n; for(;;) { np=cp; cp=strchr(np,','); if(cp!=NULL) { (*cp)=0; cp++; }; if(ntransfer_mode) { \ it->send_response("421 Service not available\r\n"); break; \ }; \ /* Globus data handle may still be in CLOSING state because \ it takes some time to close socket through globus_io. \ Check for such situation and give Globus 10 sec. to recover. */ \ time_t s_time = time(NULL); \ while(true) { \ if(handle->dc_handle.state != GLOBUS_FTP_DATA_STATE_CLOSING) break; \ if(((unsigned int)(time(NULL)-s_time)) > 10) break; \ sleep(1); \ }; \ if(handle->dc_handle.state == GLOBUS_FTP_DATA_STATE_CLOSING) { \ it->send_response("421 Timeout waiting for service to become available\r\n"); break; \ }; \ } static int parse_eport(char* str, globus_ftp_control_host_port_t* host_port) { memset(host_port,0,sizeof(globus_ftp_control_host_port_t)); for(;isblank(*str);++str) { }; if((*str < 33) || (*str > 126)) return -1; // bad delimiter char delim = *str; const char* protocol_s = ++str; for(;*str != delim;++str) { if(!*str) return -1; }; // missing delimiter *str = 0; const char* addr_s = ++str; for(;*str != delim;++str) { if(!*str) return -1; }; // missing delimiter *str = 0; const char* port_s = ++str; for(;*str != delim;++str) { if(!*str) return -1; }; // missing delimiter *str = 0; char* port_e = NULL; unsigned short port = strtoul(port_s,&port_e,10); if(!port) return -1; // wrong port number host_port->port = port; char* protocol_e = NULL; unsigned short protocol = strtoul(protocol_s,&protocol_e,10); if(protocol == 1) { // IPv4 struct in_addr addr; if(inet_pton(AF_INET,addr_s,&addr) != 1) return -1; // wrong address if(sizeof(addr) > sizeof(host_port->host)) return -1; memcpy(&(host_port->host),&addr,sizeof(addr)); host_port->hostlen = sizeof(addr); } else if(protocol == 2) { // IPv6 struct in6_addr addr; if(inet_pton(AF_INET6,addr_s,&addr) != 1) return -1; // wrong address if(sizeof(addr) > sizeof(host_port->host)) return -1; memcpy(&(host_port->host),&addr,sizeof(addr)); host_port->hostlen = sizeof(addr); } else { return -1; // wrong protocol }; return 0; } /* main procedure */ void GridFTP_Commands::commands_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error,union globus_ftp_control_command_u *command) { GridFTP_Commands *it = (GridFTP_Commands*)arg; it->last_action_time=time(NULL); if(command == GLOBUS_NULL) { logger.msg(Arc::INFO, "Control connection (probably) closed"); if(error) { logger.msg(Arc::ERROR, Arc::globus_object_to_string(error)); }; it->make_abort(); delete it; return; } #ifndef HAVE_FTP_COMMAND_MLSD #define GLOBUS_FTP_CONTROL_COMMAND_MLSD \ ((globus_ftp_control_command_code_t)(GLOBUS_FTP_CONTROL_COMMAND_UNKNOWN+1)) #define GLOBUS_FTP_CONTROL_COMMAND_MLST \ ((globus_ftp_control_command_code_t)(GLOBUS_FTP_CONTROL_COMMAND_UNKNOWN+2)) #define GLOBUS_FTP_CONTROL_COMMAND_EPRT \ ((globus_ftp_control_command_code_t)(GLOBUS_FTP_CONTROL_COMMAND_UNKNOWN+3)) #define GLOBUS_FTP_CONTROL_COMMAND_EPSV \ ((globus_ftp_control_command_code_t)(GLOBUS_FTP_CONTROL_COMMAND_UNKNOWN+4)) if(command->code == GLOBUS_FTP_CONTROL_COMMAND_UNKNOWN) { if(!strncasecmp("MLSD",command->base.raw_command,4)) { command->code=GLOBUS_FTP_CONTROL_COMMAND_MLSD; const char* arg = get_arg(command->base.raw_command); if(arg == NULL) { arg=""; }; command->list.string_arg=(char*)arg; } else if(!strncasecmp("MLST",command->base.raw_command,4)) { command->code=GLOBUS_FTP_CONTROL_COMMAND_MLST; const char* arg = get_arg(command->base.raw_command); if(arg == NULL) { arg=""; }; command->list.string_arg=(char*)arg; } else if(!strncasecmp("EPRT",command->base.raw_command,4)) { command->code=GLOBUS_FTP_CONTROL_COMMAND_EPRT; char* arg = get_arg(command->base.raw_command); if(parse_eport(arg,&(command->port.host_port)) != 0) { logger.msg(Arc::VERBOSE, "Command EPRT"); logger.msg(Arc::ERROR, "Failed to parse remote addres %s",arg); it->send_response("553 Failed to parse port for data transfer\r\n"); return; } } else if(!strncasecmp("EPSV",command->base.raw_command,4)) { command->code=GLOBUS_FTP_CONTROL_COMMAND_EPSV; const char* arg = get_arg(command->base.raw_command); if(arg == NULL) { arg=""; }; command->pasv.string_arg=(char*)arg; }; }; #endif switch((int)command->code) { case GLOBUS_FTP_CONTROL_COMMAND_AUTH: { it->send_response("534 Reauthentication is not supported\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_FEAT: { it->send_response("211- Features supported\r\n FEAT\r\n AUTH\r\n ERET\r\n SBUF\r\n DCAU\r\n SPAS\r\n SPOR\r\n SIZE\r\n MDTM\r\n MLSD\r\n MLST\r\nEPRT\r\nEPSV\r\n211 End\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_USER: { fix_string_arg(command->user.string_arg); logger.msg(Arc::VERBOSE, "Command USER %s", command->user.string_arg); it->send_response("230 No need for username\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_PASS: { it->send_response("230 No need for password\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_CDUP: { logger.msg(Arc::VERBOSE, "Command CDUP"); command->code=GLOBUS_FTP_CONTROL_COMMAND_CWD; command->cwd.string_arg=(char*)".."; }; case GLOBUS_FTP_CONTROL_COMMAND_CWD: { fix_string_arg(command->cwd.string_arg); logger.msg(Arc::VERBOSE, "Command CWD %s", command->cwd.string_arg); std::string pwd = command->cwd.string_arg; if(it->froot.cwd(pwd) == 0) { pwd = "250 \""+pwd+"\" is current directory\r\n"; it->send_response(pwd.c_str()); } else { if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); } else { it->send_response("550 Can't change to this directory.\r\n"); }; }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_MKD: { fix_string_arg(command->mkd.string_arg); logger.msg(Arc::VERBOSE, "Command MKD %s", command->mkd.string_arg); std::string pwd = command->mkd.string_arg; if(it->froot.mkd(pwd) == 0) { it->send_response("250 MKD command ok.\r\n"); } else { if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); } else { it->send_response("550 Can't make this directory.\r\n"); }; }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_PWD: { std::string pwd; pwd = "257 \""+(it->froot.cwd())+"\" is current directory\r\n"; it->send_response(pwd.c_str()); }; break; case GLOBUS_FTP_CONTROL_COMMAND_SIZE: { fix_string_arg(command->size.string_arg); logger.msg(Arc::VERBOSE, "Command SIZE %s", command->size.string_arg); unsigned long long size; if(it->froot.size(command->size.string_arg,&size) != 0) { if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); } else { it->send_response("550 Size for object not available.\r\n"); }; break; }; char buf[200]; sprintf(buf,"213 %llu\r\n",size); it->send_response(buf); }; break; case GLOBUS_FTP_CONTROL_COMMAND_SBUF: { CHECK_TRANSFER; logger.msg(Arc::VERBOSE, "Command SBUF: %i", command->sbuf.buffer_size); // Because Globus wants SBUF to apply for all following data // connections, there is no way to reset to system defaults. // Let's make a little extension globus_ftp_control_tcpbuffer_t tcpbuf; if(command->sbuf.buffer_size == 0) { tcpbuf.mode=GLOBUS_FTP_CONTROL_TCPBUFFER_DEFAULT; } else if(command->sbuf.buffer_size < 0) { it->send_response("501 Wrong argument for SBUF\r\n"); break; } else { tcpbuf.mode=GLOBUS_FTP_CONTROL_TCPBUFFER_FIXED; tcpbuf.fixed.size=command->sbuf.buffer_size; }; if(globus_ftp_control_local_tcp_buffer(&(it->handle),&tcpbuf) != GLOBUS_SUCCESS) { it->send_response("501 SBUF argument can't be accepted\r\n"); break; }; it->send_response("200 Accepted TCP buffer size\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_MLST: { fix_string_arg(command->list.string_arg); logger.msg(Arc::VERBOSE, "Command MLST %s", command->list.string_arg); DirEntry info; if(it->froot.checkfile(command->list.string_arg,info,DirEntry::full_object_info) != 0) { if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); } else { it->send_response("550 Information for object not available\r\n"); }; break; }; char buf[1024]; const char* str1="250-Information follows\r\n "; const char* str2="250 Information finished\r\n"; int str1l=strlen(str1); int str2l=strlen(str2); strcpy(buf,str1); make_list_string(info,list_mlsd_mode,(unsigned char*)(buf+str1l), 1024-str1l-str2l,""); strcat(buf,str2); it->send_response(buf); }; break; case GLOBUS_FTP_CONTROL_COMMAND_DELE: { fix_string_arg(command->dele.string_arg); logger.msg(Arc::VERBOSE, "Command DELE %s", command->dele.string_arg); std::string file = command->dele.string_arg; if(it->froot.rm(file) == 0) { it->send_response("250 File deleted.\r\n"); } else { if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); } else { it->send_response("550 Can't delete this file.\r\n"); }; }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_RMD: { fix_string_arg(command->rmd.string_arg); logger.msg(Arc::VERBOSE, "Command RMD %s", command->rmd.string_arg); std::string dfile = command->rmd.string_arg; if(it->froot.rmd(dfile) == 0) { it->send_response("250 Directory deleted.\r\n"); } else { if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); } else { it->send_response("550 Can't delete this directory.\r\n"); }; }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_TYPE: { logger.msg(Arc::VERBOSE, "Command TYPE %c", (char)(command->type.type)); CHECK_TRANSFER; if(command->type.type==GLOBUS_FTP_CONTROL_TYPE_NONE) { it->send_response("504 Unsupported type\r\n"); } else { globus_ftp_control_local_type(&(it->handle),command->type.type,0); it->send_response("200 Type accepted\r\n"); }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_MODE: { logger.msg(Arc::VERBOSE, "Command MODE %c", (char)(command->mode.mode)); CHECK_TRANSFER; if((command->mode.mode!=GLOBUS_FTP_CONTROL_MODE_STREAM) && (command->mode.mode!=GLOBUS_FTP_CONTROL_MODE_EXTENDED_BLOCK)) { it->send_response("504 Unsupported mode\r\n"); } else { globus_ftp_control_local_mode(&(it->handle),command->mode.mode); it->send_response("200 Mode accepted\r\n"); }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_ABOR: { logger.msg(Arc::VERBOSE, "Command ABOR"); globus_mutex_lock(&(it->abort_lock)); if(!(it->transfer_mode)) { globus_mutex_unlock(&(it->abort_lock)); it->send_response("226 Abort not needed\r\n"); } else { globus_mutex_unlock(&(it->abort_lock)); it->make_abort(); }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_REST: { /* for the beginning stream mode only */ fix_string_arg(command->rest.string_arg); logger.msg(Arc::VERBOSE, "Command REST %s", command->rest.string_arg); CHECK_TRANSFER; it->virt_restrict=false; if(sscanf(command->rest.string_arg,"%llu",&(it->virt_offset)) != 1) { it->virt_offset=0; it->send_response("501 Wrong parameter\r\n"); break; }; it->send_response("350 Restore pointer accepted\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_EPSV: case GLOBUS_FTP_CONTROL_COMMAND_SPAS: case GLOBUS_FTP_CONTROL_COMMAND_PASV: { if(command->code == GLOBUS_FTP_CONTROL_COMMAND_EPSV) { logger.msg(Arc::VERBOSE, "Command EPSV %s",command->pasv.string_arg); } else if(command->code == GLOBUS_FTP_CONTROL_COMMAND_SPAS) { logger.msg(Arc::VERBOSE, "Command SPAS"); } else { logger.msg(Arc::VERBOSE, "Command PASV"); }; CHECK_TRANSFER; globus_ftp_control_host_port_t node; memset(&node,0,sizeof(node)); char buf[200]; if((command->code == GLOBUS_FTP_CONTROL_COMMAND_EPSV) && it->local_is_ipv6) { // EPSV requires data and control to be of same interface // Hopefully globus opens port both for IPv4 and IPv6 globus_ftp_control_ipv6_allow(&(it->handle),GLOBUS_TRUE); // But it does not. It also ignores address passed to it // in 'node'. Looks like only option to control which // socket is created is to directly set attribute in // internal member. globus_io_attr_set_tcp_interface(&(it->handle.dc_handle.io_attr),"0:0:0:0:0:0:0:0"); } if((command->code == GLOBUS_FTP_CONTROL_COMMAND_PASV) || (command->code == GLOBUS_FTP_CONTROL_COMMAND_EPSV)) { globus_result_t res_tmp; if((res_tmp=globus_ftp_control_local_pasv(&(it->handle),&node)) !=GLOBUS_SUCCESS){ logger.msg(Arc::ERROR, "local_pasv failed"); logger.msg(Arc::ERROR, Arc::GlobusResult(res_tmp).str()); it->send_response("553 Failed to allocate port for data transfer\r\n"); break; }; if(it->firewall[0] && (node.hostlen == 4)) { // replace address // todo: we need separate firewall for IPv4 and IPv6 node.host[0]=it->firewall[0]; node.host[1]=it->firewall[1]; node.host[2]=it->firewall[2]; node.host[3]=it->firewall[3]; }; if(command->code == GLOBUS_FTP_CONTROL_COMMAND_PASV) { sprintf(buf,"227 Entering Passive Mode (%i,%i,%i,%i,%i,%i)\r\n", node.host[0], node.host[1], node.host[2], node.host[3], (node.port & 0x0FF00) >> 8, node.port & 0x000FF); } else { // EPSV sprintf(buf,"229 Entering Extended Passive Mode (|||%u|)\r\n",node.port); }; } else { // SPAS globus_result_t res_tmp; if((res_tmp=globus_ftp_control_local_spas(&(it->handle),&node,1)) !=GLOBUS_SUCCESS){ logger.msg(Arc::ERROR, "local_spas failed"); logger.msg(Arc::ERROR, Arc::GlobusResult(res_tmp).str()); it->send_response("553 Failed to allocate port for data transfer\r\n"); break; }; if(it->firewall[0]) { // replace address node.host[0]=it->firewall[0]; node.host[1]=it->firewall[1]; node.host[2]=it->firewall[2]; node.host[3]=it->firewall[3]; }; sprintf(buf,"229-Entering Passive Mode\r\n %i,%i,%i,%i,%i,%i\r\n229 End\r\n", node.host[0], node.host[1], node.host[2], node.host[3], (node.port & 0x0FF00) >> 8, node.port & 0x000FF); }; it->data_conn_type=GRIDFTP_CONNECT_PASV; it->send_response(buf); }; break; case GLOBUS_FTP_CONTROL_COMMAND_EPRT: case GLOBUS_FTP_CONTROL_COMMAND_PORT: { if(command->code == GLOBUS_FTP_CONTROL_COMMAND_EPRT) { logger.msg(Arc::VERBOSE, "Command EPRT"); } else { logger.msg(Arc::VERBOSE, "Command PORT"); }; if(!it->froot.active_data) { logger.msg(Arc::ERROR, "active_data is disabled"); it->send_response("553 Active data transfer is disabled\r\n"); break; } CHECK_TRANSFER; globus_ftp_control_host_port_t node; node=command->port.host_port; globus_result_t res_tmp = globus_ftp_control_local_port(&(it->handle),&node); if(res_tmp != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "local_port failed"); logger.msg(Arc::ERROR, Arc::GlobusResult(res_tmp).str()); it->send_response("553 Failed to accept port for data transfer\r\n"); break; }; it->data_conn_type=GRIDFTP_CONNECT_PORT; it->send_response("200 Node accepted\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_MLSD: case GLOBUS_FTP_CONTROL_COMMAND_NLST: case GLOBUS_FTP_CONTROL_COMMAND_LIST: { fix_string_arg(command->list.string_arg); if(command->code == GLOBUS_FTP_CONTROL_COMMAND_MLSD) { logger.msg(Arc::VERBOSE, "Command MLSD %s", command->list.string_arg); } else if(command->code == GLOBUS_FTP_CONTROL_COMMAND_NLST) { logger.msg(Arc::VERBOSE, "Command NLST %s", command->list.string_arg); } else { logger.msg(Arc::VERBOSE, "Command LIST %s", command->list.string_arg); }; CHECK_TRANSFER; DirEntry::object_info_level mode; if(command->code == GLOBUS_FTP_CONTROL_COMMAND_LIST) { it->list_mode=list_list_mode; mode=DirEntry::basic_object_info; } else if(command->code == GLOBUS_FTP_CONTROL_COMMAND_NLST) { it->list_mode=list_nlst_mode; mode=DirEntry::minimal_object_info; } else { it->list_mode=list_mlsd_mode; mode=DirEntry::full_object_info; }; it->dir_list.clear(); it->list_name_prefix=""; int res = it->froot.readdir(command->list.string_arg,it->dir_list,mode); // 1 - error if(res == 1) { // error, most probably no such dir if(it->froot.error.length()) { it->send_response("450 "+it->froot.error+"\r\n"); break; } else { it->send_response("450 Object unavailable.\r\n"); break; }; }; // -1 - file if((res == -1) && ( (it->list_mode == list_mlsd_mode) || (it->list_mode == list_nlst_mode) ) ) { // MLSD and NLST are for directories only it->send_response("501 Object is not a directory.\r\n"); break; }; // 0 - directory if(it->data_conn_type == GRIDFTP_CONNECT_PORT) { it->send_response("150 Opening connection for list.\r\n"); it->transfer_mode=true; it->transfer_abort=false; globus_ftp_control_data_connect_write(&(it->handle),&list_connect_retrieve_callback,it); } else if(it->data_conn_type == GRIDFTP_CONNECT_PASV) { it->send_response("150 Opening connection for list.\r\n"); it->transfer_mode=true; it->transfer_abort=false; globus_ftp_control_data_connect_write(&(it->handle),&list_connect_retrieve_callback,it); } else { it->send_response("501 PORT or PASV command needed\r\n"); }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_ERET: { fix_string_arg(command->eret.string_arg); logger.msg(Arc::VERBOSE, "Command ERET %s", command->eret.string_arg); char* args[4]; if(parse_args(command->eret.raw_command,args,4)<4) { it->send_response("500 parsing failed\r\n"); break; }; if(strcmp(args[0],"P")) { it->send_response("500 mark parsing failed\r\n"); break; }; char* ep; it->virt_restrict=true; it->virt_offset=strtoull(args[1],&ep,10); if((*ep) != 0) { it->send_response("500 offset parsing failed\r\n"); break; }; it->virt_size=strtoull(args[2],&ep,10); if((*ep) != 0) { it->send_response("500 size parsing failed\r\n"); break; }; if(it->froot.open(args[3],GRIDFTP_OPEN_RETRIEVE)!=0) { /* failed to open */ if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); } else { it->send_response("550 File unavailable.\r\n"); }; break; }; }; case GLOBUS_FTP_CONTROL_COMMAND_RETR: { if(command->code == GLOBUS_FTP_CONTROL_COMMAND_RETR) { fix_string_arg(command->retr.string_arg); logger.msg(Arc::VERBOSE, "Command RETR %s", command->retr.string_arg); CHECK_TRANSFER; /* try to open file */ if(it->froot.open(command->retr.string_arg,GRIDFTP_OPEN_RETRIEVE)!=0) { /* failed to open */ if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); break; } else { it->send_response("550 File unavailable.\r\n"); break; }; break; }; it->virt_restrict=false; }; if(it->data_conn_type == GRIDFTP_CONNECT_PORT) { it->send_response("150 Opening connection.\r\n"); it->transfer_abort=false; it->transfer_mode=true; globus_ftp_control_data_connect_write(&(it->handle),&data_connect_retrieve_callback,it); } else if(it->data_conn_type == GRIDFTP_CONNECT_PASV) { it->send_response("150 Opening connection.\r\n"); it->transfer_abort=false; it->transfer_mode=true; globus_ftp_control_data_connect_write(&(it->handle),&data_connect_retrieve_callback,it); } else { it->send_response("502 PORT or PASV command needed\r\n"); }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_STOR: { fix_string_arg(command->stor.string_arg); logger.msg(Arc::VERBOSE, "Command STOR %s", command->stor.string_arg); CHECK_TRANSFER; /* try to open file */ if(it->froot.open(command->stor.string_arg,GRIDFTP_OPEN_STORE) != 0) { it->file_size=0; /* failed to open */ if(it->froot.error.length()) { it->send_response("553 "+it->froot.error+"\r\n"); } else { it->send_response("553 File not allowed.\r\n"); }; break; }; it->file_size=0; if(it->data_conn_type == GRIDFTP_CONNECT_PORT) { it->send_response("150 Opening connection.\r\n"); it->transfer_abort=false; it->transfer_mode=true; globus_ftp_control_data_connect_read(&(it->handle),&data_connect_store_callback,it); } else if(it->data_conn_type == GRIDFTP_CONNECT_PASV) { it->send_response("150 Opening connection.\r\n"); it->transfer_abort=false; it->transfer_mode=true; globus_ftp_control_data_connect_read(&(it->handle),&data_connect_store_callback,it); } else { it->send_response("501 PORT or PASV command needed\r\n"); }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_ALLO: { logger.msg(Arc::VERBOSE, "Command ALLO %i", command->allo.size); it->file_size=0; char* args[4]; int n = parse_args(command->allo.raw_command,args,4); if( (n==0) || (n==4) || (n==2) || ((n==3) && (strcmp(args[1],"R"))) ) { it->send_response("500 parsing failed\r\n"); break; }; char *e; it->file_size=strtoull(args[0],&e,10); if((*e) != 0) { it->file_size=0; it->send_response("500 parsing failed\r\n"); break; }; if(n == 3) { it->file_size=strtoull(args[2],&e,10); if((*e) != 0) { it->file_size=0; it->send_response("500 parsing failed\r\n"); break; }; }; it->send_response("200 Size accepted\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_OPTS: { logger.msg(Arc::VERBOSE, "Command OPTS"); CHECK_TRANSFER; if(!strncasecmp(command->opts.cmd_name,"RETR",4)) { logger.msg(Arc::VERBOSE, "Command OPTS RETR"); char* args[3]; char* val; int v; int i; int n=parse_semicolon(command->opts.cmd_opts,args,3); if(n>3) n=3; globus_ftp_control_parallelism_t dp; dp.mode=GLOBUS_FTP_CONTROL_PARALLELISM_NONE; for(i=0;isend_response("500 Syntax failure\r\n"); break; }; (*val++)=0; if(!strcasecmp(args[i],"parallelism")) { int argn[3]; if((v=parse_integers(val,argn,3)) != 3) { it->send_response("500 parsing failed\r\n"); break; }; if(argn[0]<=0) { it->send_response("500 bad value\r\n"); break; }; if(argn[0]>50) { it->send_response("500 too big value\r\n"); break; }; dp.mode=GLOBUS_FTP_CONTROL_PARALLELISM_FIXED; dp.fixed.size=argn[0]; continue; } else { it->send_response("501 Sorry, option not supported\r\n"); break; }; }; if(ihandle),&dp); /* it->data_buffer_num=dp.fixed.size*2+1; it->data_buffer_size=default_data_buffer_size; if(it->data_buffer_num > 41) it->data_buffer_num=41; if(it->data_buffer_num < 3) it->data_buffer_num=3; if((it->data_buffer_num * it->data_buffer_size)>max_data_buffer_size) { it->data_buffer_size=max_data_buffer_size/it->data_buffer_num; }; if(it->data_buffer_size=0) { it->data_buffer_size=1; }; */ it->send_response("200 New options are valid\r\n"); } else { it->send_response("501 OPTS for command is not supported\r\n"); break; }; }; break; case GLOBUS_FTP_CONTROL_COMMAND_NOOP: { logger.msg(Arc::VERBOSE, "Command NOOP"); it->send_response("200 Doing nothing.\r\n"); }; break; case GLOBUS_FTP_CONTROL_COMMAND_QUIT: { logger.msg(Arc::VERBOSE, "Command QUIT"); it->make_abort(); if(it->send_response("221 Quitting.\r\n") == 0) { it->wait_response(); }; //globus_ftp_control_force_close(&(it->handle),&close_callback,it); //// delete it; logger.msg(Arc::INFO, "Closing connection"); if(globus_ftp_control_force_close(&(it->handle),&close_callback,it) != GLOBUS_SUCCESS) { logger.msg(Arc::WARNING, "Failed to close, deleting client"); delete it; //} else { // it->wait_response(); }; //delete it; return; }; break; case GLOBUS_FTP_CONTROL_COMMAND_UNKNOWN: default: { fix_string_arg(command->base.raw_command); if(!strncasecmp("DCAU",command->base.raw_command,4)) { char* args[2]; int n = parse_args(command->base.raw_command,args,2); logger.msg(Arc::VERBOSE, "Command DCAU: %i '%s'", n, args[0]); if((n < 1) || (n > 2) || (strlen(args[0]) != 1)) { it->send_response("500 Wrong number of arguments\r\n"); break; }; if(args[0][0] == 'T') args[0][0]='A'; if((args[0][0] == GLOBUS_FTP_CONTROL_DCAU_NONE) || (args[0][0] == GLOBUS_FTP_CONTROL_DCAU_SELF)) { if(n != 1) { it->send_response("500 Do not need a subject\r\n"); break; } } else if(args[0][0] == GLOBUS_FTP_CONTROL_DCAU_SUBJECT) { if(n != 2) { it->send_response("500 Need an argument\r\n"); break; } } else { it->send_response("504 Unsupported authentication type\r\n"); break; }; it->data_dcau.mode=(globus_ftp_control_dcau_mode_t)(args[0][0]); if(n>1) { if(it->data_dcau.subject.subject) free(it->data_dcau.subject.subject); it->data_dcau.subject.subject = strdup(args[1]); }; globus_ftp_control_local_dcau(&(it->handle),&(it->data_dcau),it->delegated_cred); it->send_response("200 Authentication type accepted\r\n"); } else if(!strncasecmp("PBSZ",command->base.raw_command,4)) { CHECK_TRANSFER; char* args[1]; int n = parse_args(command->base.raw_command,args,1); logger.msg(Arc::VERBOSE, "Command PBZS: %s", args[0]); if(n > 1) { it->send_response("501 Need only one argument\r\n"); break; }; unsigned long pbsz; unsigned long npbsz; pbsz=atoi(args[0]); if((n <= 0) || (n>1000000)) { /* let's not support TOO BIG buffers */ it->send_response("501 Wrong number\r\n"); break; }; logger.msg(Arc::VERBOSE, "Setting pbsz to %lu", pbsz); globus_ftp_control_local_pbsz(&(it->handle),pbsz); globus_ftp_control_get_pbsz(&(it->handle),&npbsz); if(pbsz == npbsz) { it->send_response("200 Accepted buffer size\r\n"); } else { char buf[200]; sprintf(buf,"200 PBSZ=%lu\r\n",npbsz); it->send_response(buf); }; /* it->data_buffer_size=npbsz; */ } else if(!strncasecmp("PROT",command->base.raw_command,4)) { CHECK_TRANSFER; char* args[1]; int n = parse_args(command->base.raw_command,args,1); logger.msg(Arc::VERBOSE, "Command PROT: %s", args[0]); if(n > 1) { it->send_response("501 Need only one argument\r\n"); break; }; if(strlen(args[0]) != 1) { it->send_response("504 Protection level is not supported\r\n"); break; }; bool allow_protection = true; switch(args[0][0]) { case GLOBUS_FTP_CONTROL_PROTECTION_PRIVATE: if(!it->froot.heavy_encryption) allow_protection=false; case GLOBUS_FTP_CONTROL_PROTECTION_SAFE: case GLOBUS_FTP_CONTROL_PROTECTION_CONFIDENTIAL: case GLOBUS_FTP_CONTROL_PROTECTION_CLEAR: { if(allow_protection) { it->send_response("200 Protection mode accepted\r\n"); globus_ftp_control_local_prot(&(it->handle), (globus_ftp_control_protection_t)args[0][0]); } else { it->send_response("504 Protection level is not allowed\r\n"); }; }; break; default: { it->send_response("504 Protection level is not supported\r\n"); }; }; } else if(!strncasecmp("MDTM",command->base.raw_command,4)) { char* arg = get_arg(command->base.raw_command); logger.msg(Arc::VERBOSE, "Command MDTM %s", (arg?arg:"")); if(arg == NULL) { it->send_response("501 Need name\r\n"); break; }; time_t t; struct tm tt; struct tm *tp; if(it->froot.time(arg,&t) != 0) { if(it->froot.error.length()) { it->send_response("550 "+it->froot.error+"\r\n"); } else { it->send_response("550 Time for object not available.\r\n"); }; break; }; tp=gmtime_r(&t,&tt); if(tp==NULL) { it->send_response("550 Time for object not available\r\n"); break; }; char buf[200]; sprintf(buf,"213 %04u%02u%02u%02u%02u%02u\r\n",tp->tm_year+1900,tp->tm_mon+1,tp->tm_mday,tp->tm_hour,tp->tm_min,tp->tm_sec); it->send_response(buf); } else { logger.msg(Arc::VERBOSE, "Raw command: %s", command->base.raw_command); it->send_response("500 Do not understand\r\n"); }; } break; }; } void GridFTP_Commands::free_data_buffer(void) { if(data_buffer == NULL) return; for(unsigned int i = 0;i 41) data_buffer_num=41; if(data_buffer_num < 3) data_buffer_num=3; if((data_buffer_num * data_buffer_size) > max_data_buffer_size) { data_buffer_size=max_data_buffer_size/data_buffer_num; }; if(data_buffer_size==0) { data_buffer_size=1; }; return; } bool GridFTP_Commands::allocate_data_buffer(void) { free_data_buffer(); data_buffer=(data_buffer_t*)malloc(sizeof(data_buffer_t)*data_buffer_num); if(data_buffer == NULL) return false; unsigned int i; for(i = 0;iabort_lock)); if(error != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "abort_callback: Globus error: %s", Arc::globus_object_to_string(error)); }; /* check for flag just in case */ if(it->transfer_abort) { it->send_response("226 Abort finished\r\n"); }; it->transfer_abort=false; it->transfer_mode=false; globus_cond_broadcast(&(it->abort_cond)); /* many threads can be waiting */ globus_mutex_unlock(&(it->abort_lock)); } /* perform data transfer abort */ void GridFTP_Commands::make_abort(bool already_locked,bool wait_abort) { logger.msg(Arc::VERBOSE, "make_abort: start"); if(!already_locked) globus_mutex_lock(&abort_lock); if(!transfer_mode) { /* leave if not transfering */ globus_mutex_unlock(&abort_lock); return; }; bool t = transfer_mode; if(!transfer_abort) { /* not aborting yet */ if(globus_ftp_control_data_force_close(&handle,abort_callback,this) == GLOBUS_SUCCESS) { transfer_abort=true; } else { logger.msg(Arc::ERROR, "Failed to abort data connection - ignoring and recovering"); globus_mutex_unlock(&abort_lock); abort_callback(this,&handle,GLOBUS_SUCCESS); globus_mutex_lock(&abort_lock); }; }; last_action_time=time(NULL); if(wait_abort) while(transfer_abort) { logger.msg(Arc::INFO, "make_abort: wait for abort flag to be reset"); globus_cond_wait(&abort_cond,&abort_lock); }; if(t) { /* transfer_mode=false; */ /* close (if) opened files */ froot.close(false); virt_offset=0; virt_restrict=false; }; logger.msg(Arc::VERBOSE, "make_abort: leaving"); globus_mutex_unlock(&abort_lock); } /* check for globus error, print it and abort connection if necessary */ /* This function should always be called from data transfer callbacks with data_lock locked */ bool GridFTP_Commands::check_abort(globus_object_t *error) { globus_mutex_lock(&abort_lock); if(transfer_abort || (!transfer_mode)) { /* abort in progress or not transfering anymore */ globus_mutex_unlock(&abort_lock); return true; /* just leave telling to stop registering buffers */ }; if((error != GLOBUS_SUCCESS)) { logger.msg(Arc::ERROR, "check_abort: have Globus error"); logger.msg(Arc::ERROR, "Abort request caused by transfer error"); logger.msg(Arc::ERROR, "Globus error: %s", Arc::globus_object_to_string(error)); /* TODO !!!!!!!!!!! should be only one 426 !!!!!!!!!! */ logger.msg(Arc::INFO, "check_abort: sending 426"); send_response("426 Transfer terminated.\r\n"); globus_mutex_unlock(&data_lock); /* release other waiting threads */ make_abort(true,false); globus_mutex_lock(&data_lock); return true; }; globus_mutex_unlock(&abort_lock); return false; } /* same as make_abort, but is called from data transfer callbacks */ /* This function should always be called from data transfer callbacks with data_lock locked */ void GridFTP_Commands::force_abort(void) { globus_mutex_lock(&abort_lock); if(transfer_abort || (!transfer_mode)) { /* abort in progress or not transfering anymore*/ globus_mutex_unlock(&abort_lock); return; }; logger.msg(Arc::INFO, "Abort request caused by error in transfer function"); /* TODO !!!!!!!!!!! should be only one 426 !!!!!!!!!! */ if(froot.error.length()) { send_response("426 "+froot.error+"\r\n"); } else { send_response("426 Transfer terminated.\r\n"); }; globus_mutex_unlock(&data_lock); /* release other waiting threads */ make_abort(true,false); globus_mutex_lock(&data_lock); return; } GridFTP_Commands::GridFTP_Commands(int n,unsigned int* f) { log_id=n; firewall[0]=0; firewall[1]=0; firewall[2]=0; firewall[3]=0; if(f) memcpy(firewall,f,sizeof(firewall)); globus_mutex_init(&response_lock,GLOBUS_NULL); globus_cond_init(&response_cond,GLOBUS_NULL); response_done=0; globus_mutex_init(&abort_lock,GLOBUS_NULL); globus_cond_init(&abort_cond,GLOBUS_NULL); data_done=0; globus_mutex_init(&data_lock,GLOBUS_NULL); data_buffer=NULL; data_buffer_size=default_data_buffer_size; data_buffer_num=3; data_buf_count=0; data_callbacks=0; data_offset=0; globus_ftp_control_handle_init(&handle); data_dcau.mode=GLOBUS_FTP_CONTROL_DCAU_DEFAULT; data_dcau.subject.subject=NULL; data_conn_type=GRIDFTP_CONNECT_NONE; virt_offset=0; virt_size=0; virt_restrict=false; time_spent_disc=0; time_spent_network=0; transfer_mode=false; transfer_abort=false; data_eof=false; delegated_cred=NULL; file_size=0; last_action_time=time(NULL); list_offset=0; list_mode=list_mlsd_mode; /* harmless race condition here */ if(!timeouter) { GridFTP_Commands_timeout* t = new GridFTP_Commands_timeout; if(!timeouter) { timeouter=t; } else { delete t; }; }; timeouter->add(*this); } GridFTP_Commands::~GridFTP_Commands(void) { /* here all connections should be closed and all callbacks unregistered */ globus_mutex_destroy(&response_lock); globus_cond_destroy(&response_cond); globus_mutex_destroy(&abort_lock); globus_cond_destroy(&abort_cond); globus_ftp_control_handle_destroy(&handle); timeouter->remove(*this); #ifndef __DONT_USE_FORK__ delete timeouter; /* globus_mutex_lock(&fork_lock); fork_done=1; globus_cond_signal(&fork_cond); globus_mutex_unlock(&fork_lock); */ #endif } GridFTP_Commands_timeout::GridFTP_Commands_timeout(void) { exit_cond_flag=false; cond_flag=false; globus_mutex_init(&lock,GLOBUS_NULL); globus_cond_init(&cond,GLOBUS_NULL); globus_cond_init(&exit_cond,GLOBUS_NULL); if(globus_thread_create(&timer_thread,NULL,&timer_func,(void*)this)!=0){ logger.msg(Arc::ERROR, "Failed to start timer thread - timeout won't work"); globus_mutex_destroy(&lock); globus_cond_destroy(&cond); globus_cond_destroy(&exit_cond); exit_cond_flag=true; }; } GridFTP_Commands_timeout::~GridFTP_Commands_timeout(void) { if(exit_cond_flag) return; cond_flag=true; globus_mutex_lock(&lock); globus_cond_signal(&cond); while(!exit_cond_flag) { globus_cond_wait(&exit_cond,&lock); }; globus_mutex_unlock(&lock); globus_mutex_destroy(&lock); globus_cond_destroy(&cond); globus_cond_destroy(&exit_cond); } void GridFTP_Commands_timeout::remove(const GridFTP_Commands& cmd) { if(exit_cond_flag) return; globus_mutex_lock(&lock); for(std::list::iterator i=cmds.begin();i!=cmds.end();) { if(&cmd == (*i)) { i=cmds.erase(i); } else { ++i; }; }; globus_mutex_unlock(&lock); } void GridFTP_Commands_timeout::add(GridFTP_Commands& cmd) { if(exit_cond_flag) return; globus_mutex_lock(&lock); cmds.push_back(&cmd); globus_mutex_unlock(&lock); } void* GridFTP_Commands_timeout::timer_func(void* arg) { GridFTP_Commands_timeout* it = (GridFTP_Commands_timeout*)arg; globus_mutex_lock(&(it->lock)); for(;;) { if(it->cond_flag) { /* destructor */ break; }; time_t curr_time = time(NULL); time_t next_wakeup = curr_time + FTP_TIMEOUT; for(std::list::iterator i=it->cmds.begin(); i!=it->cmds.end();++i) { if((*i)->last_action_time != (time_t)(-1)) { time_t time_passed = curr_time - (*i)->last_action_time; if(time_passed >= FTP_TIMEOUT) { /* cancel connection */ logger.msg(Arc::ERROR, "Killing connection due to timeout"); #if GLOBUS_IO_VERSION<5 shutdown((*i)->handle.cc_handle.io_handle.fd,2); #else globus_io_register_close(&((*i)->handle.cc_handle.io_handle),&io_close_cb,NULL); #endif (*i)->last_action_time=(time_t)(-1); } else { time_passed = curr_time + (FTP_TIMEOUT - time_passed); if(time_passed < next_wakeup) next_wakeup = time_passed; }; }; }; curr_time = time(NULL); if(next_wakeup < curr_time) { next_wakeup=curr_time; }; globus_abstime_t timeout; GlobusTimeAbstimeSet(timeout,next_wakeup-curr_time,0); globus_cond_timedwait(&(it->cond),&(it->lock),&timeout); }; it->exit_cond_flag=true; globus_cond_signal(&(it->exit_cond)); globus_mutex_unlock(&(it->lock)); return NULL; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/datawrite.cpp0000644000000000000000000000012411741502232024077 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.552714 30 ctime=1513200662.479777814 nordugrid-arc-5.4.2/src/services/gridftpd/datawrite.cpp0000644000175000002070000001323511741502232024150 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "fileroot.h" #include "names.h" #include "commands.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"GridFTP_Commands"); /* file store callbacks */ void GridFTP_Commands::data_connect_store_callback(void* arg,globus_ftp_control_handle_t*,unsigned int /* stripendx */,globus_bool_t /* reused */,globus_object_t *error) { GridFTP_Commands *it = (GridFTP_Commands*)arg; logger.msg(Arc::VERBOSE, "data_connect_store_callback"); globus_thread_blocking_will_block(); globus_mutex_lock(&(it->data_lock)); it->time_spent_disc=0; it->time_spent_network=0; it->last_action_time=time(NULL); logger.msg(Arc::VERBOSE, "Data channel connected (store)"); if(it->check_abort(error)) { it->froot.close(false); globus_mutex_unlock(&(it->data_lock)); return; }; it->data_eof = false; /* make buffers */ it->compute_data_buffer(); if(!(it->allocate_data_buffer())) { it->froot.close(false); it->force_abort(); globus_mutex_unlock(&(it->data_lock)); return; }; /* register all available buffers */ it->data_callbacks=0; globus_result_t res = GLOBUS_SUCCESS; for(unsigned int i = 0;idata_buffer_num;i++) { if(!((it->data_buffer)[i].data)) continue; struct timezone tz; gettimeofday(&(it->data_buffer[i].time_last),&tz); res=globus_ftp_control_data_read(&(it->handle), (globus_byte_t*)(it->data_buffer[i].data), it->data_buffer_size, &data_store_callback,it); if(res==GLOBUS_SUCCESS) { it->data_callbacks++; } else { break; }; }; if(it->data_callbacks==0) { logger.msg(Arc::ERROR, "Failed to register any buffer"); if(res != GLOBUS_SUCCESS) { logger.msg(Arc::ERROR, "Globus error: %s", Arc::GlobusResult(res).str()); }; it->froot.close(false); it->free_data_buffer(); it->force_abort(); globus_mutex_unlock(&(it->data_lock)); return; }; globus_mutex_unlock(&(it->data_lock)); return; } void GridFTP_Commands::data_store_callback(void* arg,globus_ftp_control_handle_t*,globus_object_t *error,globus_byte_t *buffer,globus_size_t length,globus_off_t offset,globus_bool_t eof) { globus_thread_blocking_will_block(); GridFTP_Commands *it = (GridFTP_Commands*)arg; struct timezone tz; struct timeval tv; gettimeofday(&tv,&tz); globus_mutex_lock(&(it->data_lock)); it->last_action_time=time(NULL); logger.msg(Arc::VERBOSE, "Data channel (store) %i %i %i", (int)offset, (int)length, (int)eof); it->data_callbacks--; if(it->check_abort(error)) { if(it->data_callbacks==0){it->free_data_buffer();it->froot.close(false);}; globus_mutex_unlock(&(it->data_lock)); return; }; if(eof) it->data_eof=true; /* find this buffer */ unsigned int i; for(i = 0;idata_buffer_num;i++) { if((it->data_buffer)[i].data == (unsigned char*)buffer) break; }; if(i >= it->data_buffer_num) { /* lost buffer - probably memory corruption */ logger.msg(Arc::ERROR, "data_store_callback: lost buffer"); it->force_abort(); if(it->data_callbacks==0){it->free_data_buffer();it->froot.close(false);}; globus_mutex_unlock(&(it->data_lock)); return; }; unsigned long long int time_diff = (tv.tv_sec-(it->data_buffer[i].time_last.tv_sec))*1000000+ (tv.tv_usec-(it->data_buffer[i].time_last.tv_usec)); it->time_spent_network+=time_diff; /* write data to file NOTE: it->data_lock is not unlocked here because it->froot.write is not thread safe */ struct timeval tv_last; gettimeofday(&tv_last,&tz); if(it->froot.write(it->data_buffer[i].data, (it->virt_offset)+offset,length) != 0) { logger.msg(Arc::ERROR, "Closing channel (store) due to error: %s", it->froot.error); it->force_abort(); if(it->data_callbacks==0){it->free_data_buffer();it->froot.close(false);}; globus_mutex_unlock(&(it->data_lock)); return; }; gettimeofday(&tv,&tz); time_diff=(tv.tv_sec-tv_last.tv_sec)*1000000+(tv.tv_usec-tv_last.tv_usec); it->time_spent_disc+=time_diff; if(it->data_eof) { if(it->data_callbacks==0) { logger.msg(Arc::VERBOSE, "Closing channel (store)"); it->free_data_buffer(); it->virt_offset=0; it->virt_restrict=false; it->transfer_mode=false; if(it->froot.close() != 0) { if(it->froot.error.length()) { it->send_response("451 "+it->froot.error+"\r\n"); } else { it->send_response("451 Local error\r\n"); }; } else { logger.msg(Arc::VERBOSE, "Time spent waiting for network: %.3f ms", (float)(it->time_spent_network/1000.0)); logger.msg(Arc::VERBOSE, "Time spent waiting for disc: %.3f ms", (float)(it->time_spent_disc/1000.0)); it->send_response("226 Requested file transfer completed\r\n"); }; }; globus_mutex_unlock(&(it->data_lock)); return; }; /* register buffer */ globus_result_t res; gettimeofday(&(it->data_buffer[i].time_last),&tz); res=globus_ftp_control_data_read(&(it->handle), (globus_byte_t*)(it->data_buffer[i].data), it->data_buffer_size, &data_store_callback,it); if(res != GLOBUS_SUCCESS) { /* Because this error can be caused by EOF, abort should not be called unless this is last buffer */ if(it->data_callbacks==0) { logger.msg(Arc::ERROR, "Globus error: %s", Arc::GlobusResult(res).str()); it->force_abort(); it->free_data_buffer();it->froot.close(false); }; globus_mutex_unlock(&(it->data_lock)); return; }; it->data_callbacks++; globus_mutex_unlock(&(it->data_lock)); return; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/names.cpp0000644000000000000000000000012411741502232023216 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.505714 30 ctime=1513200662.472777729 nordugrid-arc-5.4.2/src/services/gridftpd/names.cpp0000644000175000002070000000155011741502232023264 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include bool remove_last_name(std::string &name) { int n=name.rfind('/'); if(n==-1) { if(name.length() == 0) return false; name=""; return true; }; name=name.substr(0,n); return true; } bool keep_last_name(std::string &name) { int n=name.rfind('/'); if(n==-1) return false; name=name.substr(n+1); return true; } /* only good names can come here - not checking */ char* remove_head_dir_c(const char* name,int dir_len) { char* s = (char*)name+dir_len; if((*s) == '/') s++; return s; } std::string remove_head_dir_s(std::string &name,int dir_len) { if(name[dir_len]=='/') dir_len++; return name.substr(dir_len); } const char* get_last_name(const char* name) { const char* p = strrchr(name,'/'); if(p==NULL) { p=name; } else { p++; }; return p; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/misc0000644000000000000000000000013213214316026022265 xustar000000000000000030 mtime=1513200662.518778291 30 atime=1513200668.719854133 30 ctime=1513200662.518778291 nordugrid-arc-5.4.2/src/services/gridftpd/misc/0000755000175000002070000000000013214316026022410 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/gridftpd/misc/PaxHeaders.7502/ldapquery.cpp0000644000000000000000000000012411611503620025052 xustar000000000000000027 mtime=1311147920.204773 27 atime=1513200575.530714 30 ctime=1513200662.517778279 nordugrid-arc-5.4.2/src/services/gridftpd/misc/ldapquery.cpp0000644000175000002070000004351211611503620025124 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include "config.h" #endif #include #ifdef HAVE_SASL_H #include #include #endif #ifdef HAVE_SASL_SASL_H #include #include #endif #include #include #include #include #include #include #include #include #include "ldapquery.h" #ifdef HAVE_LIBINTL_H #include #define _(A) dgettext("arclib", (A)) #else #define _(A) (A) #endif namespace gridftpd { static Arc::Logger logger(Arc::Logger::getRootLogger(),"LdapQuery"); class sigpipe_ingore { public: sigpipe_ingore(); }; struct ldap_bind_arg { LDAP *connection; Arc::SimpleCondition cond; bool anonymous; std::string usersn; bool valid; }; static void* ldap_bind_with_timeout(void* arg_); #if defined(HAVE_SASL_H) || defined(HAVE_SASL_SASL_H) class sasl_defaults { public: sasl_defaults (ldap *ld, const std::string & mech, const std::string & realm, const std::string & authcid, const std::string & authzid, const std::string & passwd); ~sasl_defaults() {}; private: std::string p_mech; std::string p_realm; std::string p_authcid; std::string p_authzid; std::string p_passwd; friend int my_sasl_interact(ldap *ld, unsigned int flags, void * defaults_, void * interact_); }; static sigpipe_ingore sigpipe_ingore; sigpipe_ingore::sigpipe_ingore() { signal(SIGPIPE,SIG_IGN); } sasl_defaults::sasl_defaults (ldap *ld, const std::string & mech, const std::string & realm, const std::string & authcid, const std::string & authzid, const std::string & passwd) : p_mech (mech), p_realm (realm), p_authcid (authcid), p_authzid (authzid), p_passwd (passwd) { if (p_mech.empty()) { char * temp; ldap_get_option (ld, LDAP_OPT_X_SASL_MECH, &temp); if (temp) { p_mech = temp; free (temp); } } if (p_realm.empty()) { char * temp; ldap_get_option (ld, LDAP_OPT_X_SASL_REALM, &temp); if (temp) { p_realm = temp; free (temp); } } if (p_authcid.empty()) { char * temp; ldap_get_option (ld, LDAP_OPT_X_SASL_AUTHCID, &temp); if (temp) { p_authcid = temp; free (temp); } } if (p_authzid.empty()) { char * temp; ldap_get_option (ld, LDAP_OPT_X_SASL_AUTHZID, &temp); if (temp) { p_authzid = temp; free (temp); } } } int my_sasl_interact(ldap* /* ld */, unsigned int flags, void * defaults_, void * interact_) { sasl_interact_t * interact = (sasl_interact_t *) interact_; sasl_defaults * defaults = (sasl_defaults *) defaults_; if (flags == LDAP_SASL_INTERACTIVE) { logger.msg(Arc::VERBOSE, _("SASL Interaction")); } while (interact->id != SASL_CB_LIST_END) { bool noecho = false; bool challenge = false; bool use_default = false; switch (interact->id) { case SASL_CB_GETREALM: if (defaults && !defaults->p_realm.empty()) interact->defresult = strdup (defaults->p_realm.c_str()); break; case SASL_CB_AUTHNAME: if (defaults && !defaults->p_authcid.empty()) interact->defresult = strdup (defaults->p_authcid.c_str()); break; case SASL_CB_USER: if (defaults && !defaults->p_authzid.empty()) interact->defresult = strdup (defaults->p_authzid.c_str()); break; case SASL_CB_PASS: if (defaults && !defaults->p_passwd.empty()) interact->defresult = strdup (defaults->p_passwd.c_str()); noecho = true; break; case SASL_CB_NOECHOPROMPT: noecho = true; challenge = true; break; case SASL_CB_ECHOPROMPT: challenge = true; break; } if (flags != LDAP_SASL_INTERACTIVE && (interact->defresult || interact->id == SASL_CB_USER)) { use_default = true; } else { if (flags == LDAP_SASL_QUIET) return 1; if (challenge && interact->challenge) logger.msg(Arc::VERBOSE, "%s: %s", _("Challenge"), interact->challenge); if (interact->defresult) logger.msg(Arc::VERBOSE, "%s: %s", _("Default"), interact->defresult); std::string prompt; std::string input; prompt = interact->prompt ? std::string (interact->prompt) + ": " : "Interact: "; if (noecho) { input = getpass (prompt.c_str()); } else { std::cout << prompt; std::cin >> input; } if (input.empty()) use_default = true; else { interact->result = strdup (input.c_str()); interact->len = input.length(); } } if (use_default) { interact->result = strdup (interact->defresult ? interact->defresult : ""); interact->len = strlen ((char *) interact->result); } if (defaults && interact->id == SASL_CB_PASS) { // clear default password after first use defaults->p_passwd = ""; } interact++; } return 0; } #endif LdapQuery::LdapQuery(const std::string& ldaphost, int ldapport, bool anonymous, const std::string& usersn, int timeout) : host(ldaphost), port(ldapport), anonymous(anonymous), usersn(usersn), timeout(timeout), connection(NULL), messageid(0) {} LdapQuery::~LdapQuery() { if (connection) { ldap_unbind_ext (connection, NULL, NULL); connection = NULL; } } void LdapQuery::Connect() { const int version = LDAP_VERSION3; logger.msg(Arc::VERBOSE, "%s: %s:%i", _("LdapQuery: Initializing connection to"), host, port); if (connection) throw LdapQueryError( _("Ldap connection already open to") + (" " + host)); ldap_initialize(&connection, ("ldap://" + host + ':' + Arc::tostring(port)).c_str()); if (!connection) throw LdapQueryError( _("Could not open ldap connection to") + (" " + host)); try { SetConnectionOptions(version); } catch (LdapQueryError& e) { // Clean up and re-throw exception ldap_unbind_ext (connection, NULL, NULL); connection = NULL; throw; } ldap_bind_arg arg; arg.connection = connection; arg.anonymous = anonymous; arg.usersn = usersn; arg.valid = false; pthread_t thr; if (pthread_create(&thr, NULL, &ldap_bind_with_timeout, &arg) != 0) { ldap_unbind_ext (connection, NULL, NULL); connection = NULL; throw LdapQueryError( _("Failed to create ldap bind thread") + (" (" + host + ")")); } if (!arg.cond.wait(1000 * (timeout + 1))) { pthread_cancel (thr); pthread_detach (thr); // if bind fails unbind will fail too - so don't call it connection = NULL; throw LdapQueryError( _("Ldap bind timeout") + (" (" + host + ")")); } pthread_join (thr, NULL); if (!arg.valid) { ldap_unbind_ext (connection, NULL, NULL); connection = NULL; throw LdapQueryError( _("Failed to bind to ldap server") + (" (" + host + ")")); }; } void LdapQuery::SetConnectionOptions(int version) { timeval tout; tout.tv_sec = timeout; tout.tv_usec = 0; if (ldap_set_option (connection, LDAP_OPT_NETWORK_TIMEOUT, &tout) != LDAP_OPT_SUCCESS) throw LdapQueryError( _("Could not set ldap network timeout") + (" (" + host + ")")); if (ldap_set_option (connection, LDAP_OPT_TIMELIMIT, &timeout) != LDAP_OPT_SUCCESS) throw LdapQueryError( _("Could not set ldap timelimit") + (" (" + host + ")")); if (ldap_set_option (connection, LDAP_OPT_PROTOCOL_VERSION, &version) != LDAP_OPT_SUCCESS) throw LdapQueryError( _("Could not set ldap protocol version") + (" (" + host + ")")); } static void* ldap_bind_with_timeout(void* arg_) { ldap_bind_arg* arg = (ldap_bind_arg* ) arg_; int ldresult = 0; if (arg->anonymous) { BerValue cred = { 0, (char*)"" }; ldresult = ldap_sasl_bind_s (arg->connection, NULL, LDAP_SASL_SIMPLE, &cred, NULL, NULL, NULL); } else { #if defined(HAVE_SASL_H) || defined(HAVE_SASL_SASL_H) int ldapflag = LDAP_SASL_QUIET; if (logger.getThreshold() <= Arc::VERBOSE) ldapflag = LDAP_SASL_AUTOMATIC; sasl_defaults defaults = sasl_defaults (arg->connection, SASLMECH, "", "", arg->usersn, ""); ldresult = ldap_sasl_interactive_bind_s (arg->connection, NULL, SASLMECH, NULL, NULL, ldapflag, my_sasl_interact, &defaults); #else BerValue cred = { 0, (char*)"" }; ldresult = ldap_sasl_bind_s (arg->connection, NULL, LDAP_SASL_SIMPLE, &cred, NULL, NULL, NULL); #endif } if (ldresult != LDAP_SUCCESS) { arg->valid = false; arg->cond.signal(); } else { arg->valid = true; arg->cond.signal(); } return NULL; } void LdapQuery::Query(const std::string& base, const std::string& filter, const std::vector & attributes, Scope scope) throw (LdapQueryError) { Connect(); logger.msg(Arc::VERBOSE, "%s %s", _("LdapQuery: Querying"), host); logger.msg(Arc::VERBOSE, "%s: %s", _("base dn"), base); if (!filter.empty()) logger.msg(Arc::VERBOSE, " %s: %s", _("filter"), filter); if (!attributes.empty()) { logger.msg(Arc::VERBOSE, " %s:", _("attributes")); for (std::vector::const_iterator vs = attributes.begin(); vs != attributes.end(); vs++) logger.msg(Arc::VERBOSE, " %s", *vs); } timeval tout; tout.tv_sec = timeout; tout.tv_usec = 0; char *filt = (char *) filter.c_str(); char ** attrs; if (attributes.empty()) attrs = NULL; else { attrs = new char * [attributes.size() + 1]; int i = 0; for (std::vector::const_iterator vs = attributes.begin(); vs != attributes.end(); vs++, i++) attrs [i] = (char *) vs->c_str(); attrs [i] = NULL; } int ldresult = ldap_search_ext (connection, base.c_str(), scope, filt, attrs, 0, NULL, NULL, &tout, 0, &messageid); if (attrs) delete[] attrs; if (ldresult != LDAP_SUCCESS) { std::string error_msg(ldap_err2string (ldresult)); error_msg += " (" + host + ")"; ldap_unbind_ext (connection, NULL, NULL); connection = NULL; throw LdapQueryError(error_msg); } } void LdapQuery::Result(ldap_callback callback, void* ref) throw(LdapQueryError) { try { HandleResult(callback, ref); } catch (LdapQueryError& e) { // Clean up and re-throw exception ldap_unbind_ext (connection, NULL, NULL); connection = NULL; messageid = 0; throw; } // Since C++ doesnt have finally(), here we are again ldap_unbind_ext (connection, NULL, NULL); connection = NULL; messageid = 0; return; } void LdapQuery::HandleResult(ldap_callback callback, void* ref) { logger.msg(Arc::VERBOSE, "%s %s", _("LdapQuery: Getting results from"), host); if (!messageid) throw LdapQueryError( _("Error: no ldap query started to") + (" " + host)); timeval tout; tout.tv_sec = timeout; tout.tv_usec = 0; bool done = false; int ldresult = 0; LDAPMessage * res = NULL; while (!done && (ldresult = ldap_result (connection, messageid, LDAP_MSG_ONE, &tout, &res)) > 0) { for (LDAPMessage * msg = ldap_first_message (connection, res); msg; msg = ldap_next_message (connection, msg)) { switch (ldap_msgtype(msg)) { case LDAP_RES_SEARCH_ENTRY: HandleSearchEntry(msg, callback, ref); break; case LDAP_RES_SEARCH_RESULT: done = true; break; } // switch } // for ldap_msgfree (res); } if (ldresult == 0) throw LdapQueryError(_("Ldap query timed out") + (": " + host)); if (ldresult == -1) { std::string error_msg(ldap_err2string (ldresult)); error_msg += " (" + host + ")"; throw LdapQueryError(error_msg); } return; } void LdapQuery::HandleSearchEntry(LDAPMessage* msg, ldap_callback callback, void* ref) { char *dn = ldap_get_dn(connection, msg); callback("dn", dn, ref); if (dn) ldap_memfree(dn); BerElement *ber = NULL; for (char *attr = ldap_first_attribute (connection, msg, &ber); attr; attr = ldap_next_attribute (connection, msg, ber)) { BerValue **bval; if ((bval = ldap_get_values_len (connection, msg, attr))) { for (int i = 0; bval[i]; i++) { callback (attr, (bval[i]->bv_val ? bval[i]->bv_val : ""), ref); } ber_bvecfree(bval); } ldap_memfree(attr); } if (ber) ber_free(ber, 0); } std::string LdapQuery::Host() { return host; } ParallelLdapQueries::ParallelLdapQueries(std::list clusters, std::string filter, std::vector attrs, ldap_callback callback, void* object, LdapQuery::Scope scope, const std::string& usersn, bool anonymous, int timeout) : clusters(clusters), filter(filter), attrs(attrs), callback(callback), object(object), scope(scope), usersn(usersn), anonymous(anonymous), timeout(timeout) { urlit = this->clusters.begin(); pthread_mutex_init(&lock, NULL); } ParallelLdapQueries::~ParallelLdapQueries() { pthread_mutex_destroy(&lock); } void ParallelLdapQueries::Query() { const int numqueries = clusters.size(); pthread_t* threads = new pthread_t[numqueries]; int res; for (unsigned int i = 0; ilock); Arc::URL qurl = *(plq->urlit); plq->urlit++; pthread_mutex_unlock(&plq->lock); LdapQuery ldapq(qurl.Host(), qurl.Port(), plq->anonymous, plq->usersn, plq->timeout); try { pthread_mutex_lock(&plq->lock); ldapq.Query(qurl.Path(), plq->filter, plq->attrs, plq->scope); /* is Path() correct here to replace BaseDN() ?? */ pthread_mutex_unlock(&plq->lock); } catch (LdapQueryError& e) { pthread_mutex_unlock(&plq->lock); logger.msg(Arc::VERBOSE, "%s: %s", _("Warning"), e.what()); pthread_exit(NULL); } pthread_mutex_lock(&plq->lock); try { ldapq.Result(plq->callback, plq->object); } catch (LdapQueryError& e) { logger.msg(Arc::VERBOSE, "%s: %s", _("Warning"), e.what()); } pthread_mutex_unlock(&plq->lock); pthread_exit(NULL); } } // namespace gridftpd nordugrid-arc-5.4.2/src/services/gridftpd/misc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712771223666024420 xustar000000000000000027 mtime=1474635702.672235 30 atime=1513200604.497068663 30 ctime=1513200662.512778218 nordugrid-arc-5.4.2/src/services/gridftpd/misc/Makefile.am0000644000175000002070000000103712771223666024463 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libmisc.la if LDAP_ENABLED libmisc_la_SOURCES = \ ldapquery.cpp proxy.cpp \ ldapquery.h proxy.h else libmisc_la_SOURCES = \ proxy.cpp \ proxy.h endif libmisc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GLOBUS_GSSAPI_GSI_CFLAGS) $(AM_CXXFLAGS) libmisc_la_LIBADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LDAP_LIBS) -lpthread nordugrid-arc-5.4.2/src/services/gridftpd/misc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315734024413 xustar000000000000000030 mtime=1513200604.548069287 30 atime=1513200651.258640575 29 ctime=1513200662.51377823 nordugrid-arc-5.4.2/src/services/gridftpd/misc/Makefile.in0000644000175000002070000005770213214315734024475 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/gridftpd/misc DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libmisc_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am__libmisc_la_SOURCES_DIST = proxy.cpp proxy.h ldapquery.cpp \ ldapquery.h @LDAP_ENABLED_FALSE@am_libmisc_la_OBJECTS = libmisc_la-proxy.lo @LDAP_ENABLED_TRUE@am_libmisc_la_OBJECTS = libmisc_la-ldapquery.lo \ @LDAP_ENABLED_TRUE@ libmisc_la-proxy.lo libmisc_la_OBJECTS = $(am_libmisc_la_OBJECTS) libmisc_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmisc_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmisc_la_SOURCES) DIST_SOURCES = $(am__libmisc_la_SOURCES_DIST) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libmisc.la @LDAP_ENABLED_FALSE@libmisc_la_SOURCES = \ @LDAP_ENABLED_FALSE@ proxy.cpp \ @LDAP_ENABLED_FALSE@ proxy.h @LDAP_ENABLED_TRUE@libmisc_la_SOURCES = \ @LDAP_ENABLED_TRUE@ ldapquery.cpp proxy.cpp \ @LDAP_ENABLED_TRUE@ ldapquery.h proxy.h libmisc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GLOBUS_GSSAPI_GSI_CFLAGS) $(AM_CXXFLAGS) libmisc_la_LIBADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LDAP_LIBS) -lpthread all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/gridftpd/misc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/gridftpd/misc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmisc.la: $(libmisc_la_OBJECTS) $(libmisc_la_DEPENDENCIES) $(libmisc_la_LINK) $(libmisc_la_OBJECTS) $(libmisc_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmisc_la-ldapquery.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmisc_la-proxy.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmisc_la-proxy.lo: proxy.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmisc_la_CXXFLAGS) $(CXXFLAGS) -MT libmisc_la-proxy.lo -MD -MP -MF $(DEPDIR)/libmisc_la-proxy.Tpo -c -o libmisc_la-proxy.lo `test -f 'proxy.cpp' || echo '$(srcdir)/'`proxy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmisc_la-proxy.Tpo $(DEPDIR)/libmisc_la-proxy.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='proxy.cpp' object='libmisc_la-proxy.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmisc_la_CXXFLAGS) $(CXXFLAGS) -c -o libmisc_la-proxy.lo `test -f 'proxy.cpp' || echo '$(srcdir)/'`proxy.cpp libmisc_la-ldapquery.lo: ldapquery.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmisc_la_CXXFLAGS) $(CXXFLAGS) -MT libmisc_la-ldapquery.lo -MD -MP -MF $(DEPDIR)/libmisc_la-ldapquery.Tpo -c -o libmisc_la-ldapquery.lo `test -f 'ldapquery.cpp' || echo '$(srcdir)/'`ldapquery.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmisc_la-ldapquery.Tpo $(DEPDIR)/libmisc_la-ldapquery.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ldapquery.cpp' object='libmisc_la-ldapquery.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmisc_la_CXXFLAGS) $(CXXFLAGS) -c -o libmisc_la-ldapquery.lo `test -f 'ldapquery.cpp' || echo '$(srcdir)/'`ldapquery.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/gridftpd/misc/PaxHeaders.7502/proxy.h0000644000000000000000000000012412675602216023706 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.530714 30 ctime=1513200662.515778255 nordugrid-arc-5.4.2/src/services/gridftpd/misc/proxy.h0000644000175000002070000000035012675602216023751 0ustar00mockbuildmock00000000000000#ifndef GRID_SERVER_PROXY_H #define GRID_SERVER_PROXY_H #include namespace gridftpd { char* write_proxy(gss_cred_id_t cred); char* write_cert_chain(const gss_ctx_id_t gss_context); } // namespace gridftpd #endif nordugrid-arc-5.4.2/src/services/gridftpd/misc/PaxHeaders.7502/ldapquery.h0000644000000000000000000000012311412417142024520 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.532714 30 ctime=1513200662.518778291 nordugrid-arc-5.4.2/src/services/gridftpd/misc/ldapquery.h0000644000175000002070000000740711412417142024576 0ustar00mockbuildmock00000000000000/* * Interface for querying ldap servers. Should support GSSGSI-API for * SASL if your environment is set up correctly, however so far it * isn't necessary in the ARC. */ #ifndef ARCLIB_LDAPQUERY #define ARCLIB_LDAPQUERY #include #include #include #include #include #define SASLMECH "GSI-GSSAPI" namespace gridftpd { /** LdapQuery exception. Gets thrown whan an error occurs in a query. */ class LdapQueryError : public std::exception { public: /** Standard exception class constructor. */ LdapQueryError(std::string message): message(message) {} ~LdapQueryError() throw() {} virtual const char* what() const throw() { return message.c_str(); } private: std::string message; }; /** * LDAP callback type. Your ldap callbacks should be of same structure. */ typedef void (*ldap_callback)(const std::string& attr, const std::string& value, void *ref); /** * LdapQuery class; querying of LDAP servers. */ class LdapQuery { public: /** * Constructs a new LdapQuery object and sets connection options. * The connection is first established when calling Query. */ LdapQuery(const std::string& ldaphost, int ldapport, bool anonymous = true, const std::string& usersn = "", int timeout = 20); /** * Destructor. Will disconnect from the ldapserver if stll connected. */ ~LdapQuery(); /** * Scope for a LDAP queries. Use when querying. */ enum Scope { base, onelevel, subtree }; /** * Queries the ldap server. */ void Query(const std::string& base, const std::string& filter = "(objectclass=*)", const std::vector & attributes = std::vector(), Scope scope = subtree) throw(LdapQueryError); /** * Retrieves the result of the query from the ldap-server. */ void Result(ldap_callback callback, void *ref) throw(LdapQueryError); /** * Returns the hostname of the ldap-server. */ std::string Host(); private: void Connect(); void SetConnectionOptions(int version); void HandleResult(ldap_callback callback, void *ref); void HandleSearchEntry(LDAPMessage *msg, ldap_callback callback, void *ref); std::string host; int port; bool anonymous; std::string usersn; int timeout; ldap *connection; int messageid; }; /** General method to perform parallel ldap-queries to a set of clusters */ class ParallelLdapQueries { public: ParallelLdapQueries(std::list clusters, std::string filter, std::vector attrs, ldap_callback callback, void* object, LdapQuery::Scope scope = LdapQuery::subtree, const std::string& usersn = "", bool anonymous = true, int timeout = 20); ~ParallelLdapQueries(); void Query(); static void* DoLdapQuery(void* arg); private: std::list clusters; std::string filter; std::vector attrs; ldap_callback callback; void* object; LdapQuery::Scope scope; std::string usersn; bool anonymous; int timeout; std::list::iterator urlit; pthread_mutex_t lock; }; } // namespace gridftpd #endif // ARCLIB_LDAPQUERY nordugrid-arc-5.4.2/src/services/gridftpd/misc/PaxHeaders.7502/proxy.cpp0000644000000000000000000000012413065017103024226 xustar000000000000000027 mtime=1490296387.698578 27 atime=1513200575.532714 30 ctime=1513200662.514778243 nordugrid-arc-5.4.2/src/services/gridftpd/misc/proxy.cpp0000644000175000002070000000710213065017103024273 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "proxy.h" #include #include #include #include #include #include namespace gridftpd { char* write_proxy(gss_cred_id_t cred) { char* proxy_fname = NULL; OM_uint32 major_status = 0; OM_uint32 minor_status = 0; gss_buffer_desc deleg_proxy_filename; if(cred == GSS_C_NO_CREDENTIAL) return NULL; major_status = gss_export_cred(&minor_status, cred, NULL, 1, &deleg_proxy_filename); if (major_status == GSS_S_COMPLETE) { char * cp; cp = strchr((char *)deleg_proxy_filename.value, '='); if(cp != NULL) { cp++; proxy_fname=strdup(cp); }; free(deleg_proxy_filename.value); }; return proxy_fname; } char* write_cert_chain(const gss_ctx_id_t gss_context) { /* Globus OID for the remote parties certificate chain */ gss_OID_desc cert_chain_oid = {11, (void*)"\x2b\x06\x01\x04\x01\x9b\x50\x01\x01\x01\x08"}; gss_buffer_set_t client_cert_chain = NULL; OM_uint32 major_status; OM_uint32 minor_status; int certs_num = 0; int n,n_; STACK_OF(X509) *cert_chain = NULL; BIO* bio = NULL; char* fname = NULL; major_status = gss_inquire_sec_context_by_oid(&minor_status, gss_context, &cert_chain_oid, &client_cert_chain); if(major_status != GSS_S_COMPLETE) { return NULL; }; certs_num = client_cert_chain->count; if(certs_num <= 0) goto err_exit; if((cert_chain = sk_X509_new_null()) == NULL) goto err_exit; for(n=0,n_=0;nelements[n].value; int length = (int)client_cert_chain->elements[n].length; X509* cert = d2i_X509(NULL,&value,length); if(cert) { if(cert) sk_X509_insert(cert_chain,cert,n_++); /* { X509_NAME *name = X509_get_subject_name(cert); char buf[256]; buf[0]=0; if(name) { X509_NAME_oneline(name,buf,sizeof(buf)); fprintf(stderr,"Name: %s\n",buf); } else { fprintf(stderr,"Name: none\n"); }; }; */ } else { /* fprintf(stderr,"No cert\n"); */ }; }; /* TODO: do not store in file - pass directly to calling function */ /* Make temporary file */ { std::string tempname = Glib::build_filename(Glib::get_tmp_dir(), "x509.XXXXXX"); if(!Arc::TmpFileCreate(tempname, "")) goto err_exit; fname = strdup(tempname.c_str()); if((bio=BIO_new_file(fname,"w")) == NULL) goto err_exit; }; for(n=0;n #endif #include #include #include #include #include #include #include #include #include "userspec.h" #include "conf.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"userspec_t"); void userspec_t::free(void) const { // Keep authentication info to preserve proxy (just in case) } //userspec_t::userspec_t(void):user(),map(user),default_map(user),name(NULL),group(NULL),home(NULL),gridmap(false) { userspec_t::userspec_t(void):user(),uid(-1),gid(-1),port(0),map(user),default_map(user),gridmap(false) { host[0] = 0; } userspec_t::~userspec_t(void) { userspec_t::free(); } bool check_gridmap(const char* dn,char** user,const char* mapfile) { std::string globus_gridmap; if(mapfile) { globus_gridmap=mapfile; } else { char* tmp=getenv("GRIDMAP"); if((tmp == NULL) || (tmp[0] == 0)) { globus_gridmap="/etc/grid-security/grid-mapfile"; } else { globus_gridmap=tmp; }; }; std::ifstream f(globus_gridmap.c_str()); if(!f.is_open() ) { logger.msg(Arc::ERROR, "Mapfile is missing at %s", globus_gridmap); return false; }; for(;f.good();) { std::string buf;//char buf[512]; // must be enough for DN + name getline(f,buf); //buf[511]=0; char* p = &buf[0]; for(;*p;p++) if(((*p) != ' ') && ((*p) != '\t')) break; if((*p) == '#') continue; if((*p) == 0) continue; std::string val; int n = Arc::ConfigIni::NextArg(p,val); if(strcmp(val.c_str(),dn) != 0) continue; p+=n; if(user) { n=Arc::ConfigIni::NextArg(p,val); *user=strdup(val.c_str()); }; f.close(); return true; }; f.close(); return false; } bool userspec_t::fill(globus_ftp_control_auth_info_t *auth,globus_ftp_control_handle_t *handle, const char* cfg) { struct passwd pw_; struct group gr_; struct passwd* pw=NULL; struct group* gr=NULL; char bufp[BUFSIZ]; char bufg[BUFSIZ]; if(cfg) config_file = cfg; if(auth == NULL) return false; if(auth->auth_gssapi_subject == NULL) return false; std::string subject; Arc::ConfigIni::NextArg(auth->auth_gssapi_subject,subject,'\0','\0'); char* name=NULL; char* gname=NULL; if(!check_gridmap(subject.c_str(),&name)) { logger.msg(Arc::WARNING, "There is no local mapping for user"); } else { if((name == NULL) || (name[0] == 0)) { logger.msg(Arc::WARNING, "There is no local name for user"); if(name) { std::free(name); name=NULL; }; } else { gridmap=true; gname = strchr(name,':'); if(gname) { *gname = 0; ++gname; if(gname[0] == 0) gname = NULL; }; }; }; // fill host info if(handle) { //int host[4] = {0,0,0,0}; //unsigned short port = 0; if(globus_io_tcp_get_remote_address(&(handle->cc_handle.io_handle), host,&(port)) != GLOBUS_SUCCESS) { port=0; user.set(auth->auth_gssapi_subject,auth->auth_gssapi_context, auth->delegated_credential_handle); } else { char abuf[1024]; abuf[sizeof(abuf)-1]=0; struct hostent he; struct hostent* he_p; struct in_addr a; snprintf(abuf,sizeof(abuf)-1,"%u.%u.%u.%u", (unsigned int)host[0],(unsigned int)host[1],(unsigned int)host[2],(unsigned int)host[3]); if(inet_aton(abuf,&a) != 0) { int h_errnop; char buf[1024]; he_p=globus_libc_gethostbyaddr_r((char*)&a,strlen(abuf),AF_INET, &he,buf,sizeof(buf),&h_errnop); if(he_p) { if(strcmp(he_p->h_name,"localhost") == 0) { abuf[sizeof(abuf)-1]=0; if(globus_libc_gethostname(abuf,sizeof(abuf)-1) != 0) { strcpy(abuf,"localhost"); }; }; }; }; user.set(auth->auth_gssapi_subject,auth->auth_gssapi_context, auth->delegated_credential_handle,abuf); }; } else { user.set(auth->auth_gssapi_subject,auth->auth_gssapi_context, auth->delegated_credential_handle); }; if((!user.is_proxy()) || (user.proxy() == NULL) || (user.proxy()[0] == 0)) { logger.msg(Arc::INFO, "No proxy provided"); } else { logger.msg(Arc::VERBOSE, "Proxy/credentials stored at %s", user.proxy()); }; if((getuid() == 0) && name) { logger.msg(Arc::INFO, "Initially mapped to local user: %s", name); getpwnam_r(name,&pw_,bufp,BUFSIZ,&pw); if(pw == NULL) { logger.msg(Arc::ERROR, "Local user %s does not exist",name); std::free(name); name=NULL; return false; }; if(gname) { logger.msg(Arc::INFO, "Initially mapped to local group: %s", gname); getgrnam_r(gname,&gr_,bufg,BUFSIZ,&gr); if(gr == NULL) { logger.msg(Arc::ERROR, "Local group %s does not exist",gname); std::free(name); name=NULL; return false; }; }; } else { if(name) std::free(name); name=NULL; gname=NULL; getpwuid_r(getuid(),&pw_,bufp,BUFSIZ,&pw); if(pw == NULL) { logger.msg(Arc::WARNING, "Running user has no name"); } else { name=strdup(pw->pw_name); logger.msg(Arc::INFO, "Mapped to running user: %s", name); }; }; if(pw) { uid=pw->pw_uid; if(gr) { gid=gr->gr_gid; } else { gid=pw->pw_gid; }; logger.msg(Arc::INFO, "Mapped to local id: %i", uid); home=pw->pw_dir; if(!gr) { getgrgid_r(gid,&gr_,bufg,BUFSIZ,&gr); if(gr == NULL) { logger.msg(Arc::ERROR, "No group %i for mapped user", gid); }; }; std::string mapstr; if(name) mapstr+=name; mapstr+=":"; if(gr) mapstr+=gr->gr_name; mapstr+=" all"; default_map.mapname(mapstr.c_str()); logger.msg(Arc::INFO, "Mapped to local group id: %i", gid); if(gr) logger.msg(Arc::INFO, "Mapped to local group name: %s", gr->gr_name); logger.msg(Arc::VERBOSE, "Mapped user's home: %s", home); }; if(name) std::free(name); if(!user) return false; return true; } bool userspec_t::fill(AuthUser& u, const char* cfg) { struct passwd pw_; struct group gr_; struct passwd *pw; struct group *gr; char bufp[BUFSIZ]; char bufg[BUFSIZ]; std::string subject = u.DN(); char* name=NULL; char* gname=NULL; if(cfg) config_file = cfg; if(!check_gridmap(subject.c_str(),&name)) { logger.msg(Arc::WARNING, "There is no local mapping for user"); name=NULL; } else { if((name == NULL) || (name[0] == 0)) { logger.msg(Arc::WARNING, "There is no local name for user"); if(name) { std::free(name); name=NULL; }; } else { gridmap=true; gname = strchr(name,':'); if(gname) { *gname = 0; ++gname; if(gname[0] == 0) gname = NULL; }; }; }; user=u; if((!user.is_proxy()) || (user.proxy() == NULL) || (user.proxy()[0] == 0)) { logger.msg(Arc::INFO, "No proxy provided"); } else { logger.msg(Arc::INFO, "Proxy stored at %s", user.proxy()); }; if((getuid() == 0) && name) { logger.msg(Arc::INFO, "Initially mapped to local user: %s", name); getpwnam_r(name,&pw_,bufp,BUFSIZ,&pw); if(pw == NULL) { logger.msg(Arc::ERROR, "Local user does not exist"); std::free(name); name=NULL; return false; }; if(gname) { logger.msg(Arc::INFO, "Initially mapped to local group: %s", gname); getgrnam_r(gname,&gr_,bufg,BUFSIZ,&gr); if(gr == NULL) { logger.msg(Arc::ERROR, "Local group %s does not exist",gname); std::free(name); name=NULL; return false; }; }; } else { if(name) std::free(name); name=NULL; gname=NULL; getpwuid_r(getuid(),&pw_,bufp,BUFSIZ,&pw); if(pw == NULL) { logger.msg(Arc::WARNING, "Running user has no name"); } else { name=strdup(pw->pw_name); logger.msg(Arc::INFO, "Mapped to running user: %s", name); }; }; if(pw) { uid=pw->pw_uid; if(gr) { gid=gr->gr_gid; } else { gid=pw->pw_gid; }; logger.msg(Arc::INFO, "Mapped to local id: %i", uid); home=pw->pw_dir; if(!gr) { getgrgid_r(gid,&gr_,bufg,BUFSIZ,&gr); if(gr == NULL) { logger.msg(Arc::INFO, "No group %i for mapped user", gid); }; }; std::string mapstr; if(name) mapstr+=name; mapstr+=":"; if(gr) mapstr+=gr->gr_name; mapstr+=" all"; default_map.mapname(mapstr.c_str()); logger.msg(Arc::INFO, "Mapped to local group id: %i", pw->pw_gid); if(gr) logger.msg(Arc::INFO, "Mapped to local group name: %s", gr->gr_name); logger.msg(Arc::INFO, "Mapped user's home: %s", home); }; if(name) std::free(name); return true; } std::string subst_user_spec(std::string &in,userspec_t *spec) { std::string out = ""; unsigned int i; unsigned int last; last=0; for(i=0;ilast) out+=in.substr(last,i-last); i++; if(i>=in.length()) { }; switch(in[i]) { case 'u': { char buf[10]; snprintf(buf,9,"%i",spec->uid); out+=buf; last=i+1; }; break; case 'U': { out+=spec->get_uname(); last=i+1; }; break; case 'g': { char buf[10]; snprintf(buf,9,"%i",spec->gid); out+=buf; last=i+1; }; break; case 'G': { out+=spec->get_gname(); last=i+1; }; break; case 'D': { out+=spec->user.DN(); last=i+1; }; break; case 'H': { out+=spec->home; last=i+1; }; break; case '%': { out+='%'; last=i+1; }; break; default: { logger.msg(Arc::WARNING, "Undefined control sequence: %%%s", in[i]); }; break; }; }; }; if(i>last) out+=in.substr(last); return out; } bool userspec_t::refresh(void) { if(!map) return false; home=""; uid=-1; gid=-1; const char* name = map.unix_name(); const char* group = map.unix_group(); if(name == NULL) return false; if(name[0] == 0) return false; struct passwd pw_; struct group gr_; struct passwd *pw; struct group *gr; char buf[BUFSIZ]; getpwnam_r(name,&pw_,buf,BUFSIZ,&pw); if(pw == NULL) { logger.msg(Arc::ERROR, "Local user %s does not exist", name); return false; }; uid=pw->pw_uid; home=pw->pw_dir; gid=pw->pw_gid; if(group && group[0]) { getgrnam_r(group,&gr_,buf,BUFSIZ,&gr); if(gr == NULL) { logger.msg(Arc::WARNING, "Local group %s does not exist", group); } else { gid=gr->gr_gid; }; }; logger.msg(Arc::INFO, "Remapped to local user: %s", name); logger.msg(Arc::INFO, "Remapped to local id: %i", uid); logger.msg(Arc::INFO, "Remapped to local group id: %i", gid); if(group && group[0]) logger.msg(Arc::INFO, "Remapped to local group name: %s", group); logger.msg(Arc::INFO, "Remapped user's home: %s", home); return true; } AuthResult userspec_t::mapname(const char* line) { AuthResult res = map.mapname(line); if(res == AAA_POSITIVE_MATCH) refresh(); return res; } AuthResult userspec_t::mapgroup(const char* line) { AuthResult res = map.mapgroup(line); if(res == AAA_POSITIVE_MATCH) refresh(); return res; } AuthResult userspec_t::mapvo(const char* line) { AuthResult res = map.mapvo(line); if(res == AAA_POSITIVE_MATCH) refresh(); return res; } const char* userspec_t::get_uname(void) { const char* name = NULL; if((bool)map) { name=map.unix_name(); } else if((bool)default_map) { name=default_map.unix_name(); }; if(!name) name=""; return name; } const char* userspec_t::get_gname(void) { const char* group = NULL; if((bool)map) { group=map.unix_group(); } else if((bool)default_map) { group=default_map.unix_group(); }; if(!group) group=""; return group; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/config.cpp0000644000000000000000000000012412771223666023377 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.526714 30 ctime=1513200662.475777765 nordugrid-arc-5.4.2/src/services/gridftpd/config.cpp0000644000175000002070000000152412771223666023446 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "conf.h" static const char* default_central_config_file = DEFAULT_CENTRAL_CONFIG_FILE; static const char* default_central_config_file2 = DEFAULT_CENTRAL_CONFIG_FILE2; const char* config_file = NULL; std::string config_open_gridftp(Arc::ConfigFile &cfile) { std::string config_name; gridftpd::GMEnvironment env; if(!env) exit(1); if(config_file) { config_name=config_file; } else { struct stat st; if(stat(default_central_config_file,&st) == 0) { config_name=default_central_config_file; } else { config_name=default_central_config_file2; }; }; if(cfile.open(config_name)) return config_name; return ""; } void config_strip(std::string &rest) { int n=rest.find_first_not_of(" \t",0); if(n>0) rest.erase(0,n); } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/misc.h0000644000000000000000000000012311412417142022512 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.501714 30 ctime=1513200662.485777888 nordugrid-arc-5.4.2/src/services/gridftpd/misc.h0000644000175000002070000000036711412417142022566 0ustar00mockbuildmock00000000000000#ifdef __USE_POSIX #include #else #define __USE_POSIX #include #undef __USE_POSIX #endif #include std::string timetostring(time_t t); std::string dirstring(bool dir,long long unsigned int s,time_t t,const char *name); nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/gridftpd.8.in0000644000000000000000000000012611721150100023701 xustar000000000000000027 mtime=1329909824.702493 30 atime=1513200651.210639988 29 ctime=1513200662.46877768 nordugrid-arc-5.4.2/src/services/gridftpd/gridftpd.8.in0000644000175000002070000000245311721150100023750 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH GRIDFTPD 8 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid System Managers Manual" .SH NAME gridftpd \- ARC GridFTP Server .SH SYNOPSIS .B gridftpd [\fIOPTION\fR]... .SH DESCRIPTION .\" Add any additional description here .PP gridftpd is the daemon running the ARC GridFTP server. This server has plugins to allow job submission from Grid clients and to expose a local filesystem as a Grid Storage Element. For more information see "The ARC Computing Element System Administrator Guide" (NORDUGRID-MANUAL-20). .SH OPTIONS .TP \fB\-h\fR display help text .TP \fB\-p\fR \fInumber\fR port on which to listen .TP \fB\-c\fR \fIpath\fR full path to config file .TP \fB\-n\fR \fInumber\fR maximum number of connections allowed .TP \fB\-b\fR \fInumber\fR default buffer size .TP \fB\-B\fR \fInumber\fR maximum buffer size .TP \fB\-F\fR run daemon in foreground .TP \fB\-U\fR \fIuser[:group]\fR user (and group) name to switch to after starting .TP \fB\-L\fR \fIpath\fR path to log file .TP \fB\-P\fR \fIpath\fR path to pid file .TP \fB\-d\fR \fInumber\fR debug level, from 0 (lowest verbosity) to 5 (highest verbosity) .PP .SH REPORTING BUGS Report bugs to http://bugzilla.nordugrid.org/ .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH FILES .BR /etc/arc.conf .SH AUTHOR David Cameron nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/datalist.cpp0000644000000000000000000000012412044527530023725 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200575.526714 30 ctime=1513200662.480777827 nordugrid-arc-5.4.2/src/services/gridftpd/datalist.cpp0000644000175000002070000001277012044527530024001 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "fileroot.h" #include "names.h" #include "commands.h" #include "misc.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"GridFTP_Commands"); static std::string timetostring_rfc3659(time_t t) { struct tm tt; struct tm *tp; tp=gmtime_r(&t,&tt); if(tp == NULL) return ""; char buf[16]; snprintf(buf,sizeof(buf),"%04u%02u%02u%02u%02u%02u", tp->tm_year+1900,tp->tm_mon+1,tp->tm_mday, tp->tm_hour,tp->tm_min,tp->tm_sec); buf[sizeof(buf)-1]=0; return std::string(buf); } int make_list_string(const DirEntry &entr,GridFTP_Commands::list_mode_t mode, unsigned char* buf,int size,const char *prefix) { std::string str; switch(mode) { case GridFTP_Commands::list_mlsd_mode: { if(entr.is_file) { str+="type=file;"; } else { str+="type=dir;"; }; str+="size="+Arc::tostring(entr.size)+";"; str+="modify="+timetostring_rfc3659(entr.modified)+";"; str+="perm="; if(entr.is_file) { if(entr.may_append) str+="a"; if(entr.may_delete) str+="d"; if(entr.may_rename) str+="f"; if(entr.may_read) str+="r"; if(entr.may_write) str+="w"; } else { if(entr.may_create) str+="c"; if(entr.may_delete) str+="d"; if(entr.may_chdir) str+="e"; if(entr.may_rename) str+="f"; if(entr.may_dirlist)str+="l"; if(entr.may_purge) str+="p"; }; str+="; "; str+=prefix+entr.name; }; break; case GridFTP_Commands::list_list_mode: { if(entr.is_file) { str="------- 1 user group "+timetostring(entr.modified)+" "+ Arc::tostring(entr.size,16)+" "+prefix+entr.name; } else { str="d------ 1 user group "+timetostring(entr.modified)+" "+ Arc::tostring(entr.size,16)+" "+prefix+entr.name; }; }; break; case GridFTP_Commands::list_nlst_mode: { str=prefix+entr.name; }; break; default: { }; break; }; int len = str.length(); if(len > (size-3)) { str.resize(size-6); str+="..."; len=size-3; }; strcpy((char*)buf,str.c_str()); buf[len]='\r'; len++; buf[len]='\n'; len++; buf[len]=0; return len; } /* *** list transfer callbacks *** */ void GridFTP_Commands::list_retrieve_callback(void* arg,globus_ftp_control_handle_t*,globus_object_t *error,globus_byte_t* /* buffer */,globus_size_t /* length */,globus_off_t /* offset */,globus_bool_t /* eof */) { GridFTP_Commands *it = (GridFTP_Commands*)arg; globus_mutex_lock(&(it->data_lock)); it->last_action_time=time(NULL); if(it->check_abort(error)) { it->free_data_buffer(); globus_mutex_unlock(&(it->data_lock)); return; }; globus_bool_t eodf; globus_size_t size; if(it->dir_list_pointer == it->dir_list.end()) { it->virt_offset=0; it->transfer_mode=false; it->free_data_buffer(); logger.msg(Arc::VERBOSE, "Closing channel (list)"); it->send_response("226 Transfer completed.\r\n"); globus_mutex_unlock(&(it->data_lock)); return; }; globus_ftp_control_local_send_eof(&(it->handle),GLOBUS_TRUE); ++(it->dir_list_pointer); if(it->dir_list_pointer == it->dir_list.end()) { size=0; eodf=GLOBUS_TRUE; } else { size=make_list_string(*(it->dir_list_pointer),it->list_mode, it->data_buffer[0].data,it->data_buffer_size, it->list_name_prefix.c_str()); eodf=GLOBUS_FALSE; }; globus_result_t res; res=globus_ftp_control_data_write(&(it->handle), (globus_byte_t*)(it->data_buffer[0].data), size,it->list_offset,eodf, &list_retrieve_callback,it); if(res != GLOBUS_SUCCESS) { it->free_data_buffer(); it->force_abort(); globus_mutex_unlock(&(it->data_lock)); return; }; globus_mutex_unlock(&(it->data_lock)); } void GridFTP_Commands::list_connect_retrieve_callback(void* arg,globus_ftp_control_handle_t*,unsigned int /* stripendx */,globus_bool_t /* reused */,globus_object_t *error) { GridFTP_Commands *it = (GridFTP_Commands*)arg; globus_mutex_lock(&(it->data_lock)); it->last_action_time=time(NULL); if(it->check_abort(error)) { globus_mutex_unlock(&(it->data_lock)); return; }; it->data_buffer_size=4096; it->data_buffer_num=1; if(!it->allocate_data_buffer()) { it->force_abort(); globus_mutex_unlock(&(it->data_lock)); return; }; it->dir_list_pointer=it->dir_list.begin(); globus_size_t size; globus_bool_t eodf; if(it->dir_list_pointer == it->dir_list.end()) { size=0; eodf=GLOBUS_TRUE; } else { size=make_list_string(*(it->dir_list_pointer),it->list_mode, it->data_buffer[0].data,it->data_buffer_size, it->list_name_prefix.c_str()); eodf=GLOBUS_FALSE; }; it->list_offset = 0; logger.msg(Arc::VERBOSE, "Data channel connected (list)"); globus_ftp_control_local_send_eof(&(it->handle),GLOBUS_TRUE); globus_result_t res; res=globus_ftp_control_data_write(&(it->handle), (globus_byte_t*)(it->data_buffer[0].data), size,it->list_offset,eodf, &list_retrieve_callback,it); if(res != GLOBUS_SUCCESS) { it->free_data_buffer(); it->force_abort(); globus_mutex_unlock(&(it->data_lock)); return; }; it->list_offset+=size; globus_mutex_unlock(&(it->data_lock)); } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/gridftpd-start.in0000644000000000000000000000012712754431715024714 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200651.194639793 30 ctime=1513200662.467777668 nordugrid-arc-5.4.2/src/services/gridftpd/gridftpd-start.in0000755000175000002070000001032712754431715024764 0ustar00mockbuildmock00000000000000#!/bin/bash add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=gridftpd RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/${prog} ]; then . /etc/sysconfig/${prog} elif [ -r /etc/default/${prog} ]; then . /etc/default/${prog} fi if [ "$RUN" != "yes" ] ; then echo "$prog disabled, please adjust the configuration to your" echo "needs and then set RUN to 'yes' in /etc/default/$prog to enable it." exit 0 fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ ! -d "$GLOBUS_LOCATION" ]; then echo "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then echo "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } check_cert() { X509_USER_CERT=`readconfigvar "$ARC_CONFIG" gridftpd x509_user_cert` X509_USER_KEY=`readconfigvar "$ARC_CONFIG" gridftpd x509_user_key` if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=`readconfigvar "$ARC_CONFIG" common x509_user_cert` fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=`readconfigvar "$ARC_CONFIG" common x509_user_key` fi if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=/etc/grid-security/hostcert.pem fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=/etc/grid-security/hostkey.pem fi if [ ! -f "$X509_USER_CERT" ] ; then echo "Host certificate not found" exit 1 fi if [ ! -f "$X509_USER_KEY" ] ; then echo "Host key not found" exit 1 fi # check permissions on key perms=`stat -L -c %a "$X509_USER_KEY"` if [ "$perms" != "600" ] && [ "$perms" != "400" ] ; then echo "Host key must be readable only by user" exit 1 fi } CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then echo "Missing executable" exit 1 fi # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi CMD="$CMD -c '$ARC_CONFIG'" # VOMS_LOCATION VOMS_LOCATION=${VOMS_LOCATION:-@DEFAULT_VOMS_LOCATION@} add_library_path "$VOMS_LOCATION" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH PID_FILE=`readconfigvar "$ARC_CONFIG" gridftpd pidfile` if [ `id -u` = 0 ] ; then if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog.pid fi else if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog.pid fi fi logfile=`readconfigvar "$ARC_CONFIG" gridftpd logfile` if [ "x$logfile" = "x" ]; then logfile=/var/log/arc/gridftpd.log fi if [ ! -d `dirname $logfile` ]; then mkdir -p `dirname $logfile` fi CMD="$CMD -P '$PID_FILE'" check_cert exec "$CMD" nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/fileroot_config.cpp0000644000000000000000000000012413213445240025264 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200575.528714 30 ctime=1513200662.481777839 nordugrid-arc-5.4.2/src/services/gridftpd/fileroot_config.cpp0000644000175000002070000005266313213445240025345 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #ifdef HAVE_SSTREAM #include #else #include #endif #include #include #include #include "conf.h" #include "names.h" #include "misc.h" #include "auth/auth.h" #include "conf/conf_vo.h" #include "conf/environment.h" #include "fileroot.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"FileRoot"); int FileRoot::config(gridftpd::Daemon &daemon,ServerParams* params) { /* open and read configuration file */ Arc::ConfigFile cfile; Arc::ConfigIni* cf = NULL; config_open_gridftp(cfile); if(!cfile.is_open()) { logger.msg(Arc::ERROR, "configuration file not found"); return 1; }; cf=new Arc::ConfigIni(cfile); cf->AddSection("common"); cf->AddSection("gridftpd"); for(;;) { std::string rest; std::string command; cf->ReadNext(command,rest); if(command.length() == 0) break; /* EOF */ if(cf->SubSection()[0] != 0) continue; int r; r=daemon.config(command,rest); if(r == 0) continue; if(r == -1) { cfile.close(); delete cf; return 1; }; if(command == "port") { if(params) { if(sscanf(rest.c_str(),"%u",&(params->port)) != 1) { logger.msg(Arc::ERROR, "Wrong port number in configuration"); cfile.close(); delete cf; return 1; }; }; } else if(command == "maxconnections") { if(params) { if(sscanf(rest.c_str(),"%u",&(params->max_connections)) != 1) { logger.msg(Arc::ERROR, "Wrong maxconnections number in configuration"); cfile.close(); delete cf; return 1; }; }; } else if(command == "defaultbuffer") { if(params) { if(sscanf(rest.c_str(),"%u",&(params->default_buffer)) != 1) { logger.msg(Arc::ERROR, "Wrong defaultbuffer number in configuration"); cfile.close(); delete cf; return 1; }; }; } else if(command == "maxbuffer") { if(params) { if(sscanf(rest.c_str(),"%u",&(params->max_buffer)) != 1) { logger.msg(Arc::ERROR, "Wrong maxbuffer number in configuration"); cfile.close(); delete cf; return 1; }; }; } else if(command == "firewall") { if(params) { std::string value=Arc::ConfigIni::NextArg(rest); int errcode; struct hostent* host; struct hostent hostbuf; #ifdef _MACOSX char buf[BUFSIZ]; if((host=gethostbyname2(value.c_str(),AF_INET)) == NULL) { //TODO: Deal with IPv6 #else #ifndef _AIX #ifndef sun char buf[BUFSIZ]; if(gethostbyname_r(value.c_str(), &hostbuf,buf,sizeof(buf),&host,&errcode)) { #else char buf[BUFSIZ]; if((host=gethostbyname_r(value.c_str(), &hostbuf,buf,sizeof(buf),&errcode)) == NULL) { #endif #else struct hostent_data buf[BUFSIZ]; if((errcode=gethostbyname_r(value.c_str(), (host=&hostbuf),buf))) { #endif #endif logger.msg(Arc::ERROR, "Can't resolve host %s", value); cfile.close(); delete cf; return 1; }; if( (host == NULL) || (host->h_length < sizeof(struct in_addr)) || (host->h_addr_list[0] == NULL) ) { logger.msg(Arc::ERROR, "Can't resolve host %s", value); cfile.close(); delete cf; return 1; }; unsigned char* addr = (unsigned char*)(&(((struct in_addr*)(host->h_addr_list[0]))->s_addr)); params->firewall[0]=addr[0]; params->firewall[1]=addr[1]; params->firewall[2]=addr[2]; params->firewall[3]=addr[3]; }; }; }; cfile.close(); delete cf; return 0; } int FileRoot::config(std::ifstream &cfile,std::string &pluginpath) { bool right_group = true; user.user.select_group(NULL); for(;;) { std::string rest=Arc::ConfigFile::read_line(cfile); std::string command=Arc::ConfigIni::NextArg(rest); if(command.length() == 0) break; /* EOF */ if(gridftpd::Daemon::skip_config(command) == 0) { } else if(command == "include") { /* include content of another file */ std::string name=Arc::ConfigIni::NextArg(rest); std::ifstream cfile_; cfile_.open(name.c_str(),std::ifstream::in); if(!cfile_.is_open()) { logger.msg(Arc::ERROR, "couldn't open file %s", name); return 1; }; config(cfile_,pluginpath); cfile_.close(); } else if(command == "encryption") { /* is encryption allowed ? */ std::string value=Arc::ConfigIni::NextArg(rest); if(value == "yes") { heavy_encryption=true; } else if(value == "no") { heavy_encryption=false; } else { user.user.clear_groups(); nodes.clear(); logger.msg(Arc::ERROR, "improper attribute for encryption command: %s", value); return 1; }; } else if(command == "allowunknown") { /* should user be present in grid-mapfile ? */ std::string value=Arc::ConfigIni::NextArg(rest); if(value == "yes") { user.gridmap=true; } else if(value == "no") { if(!user.gridmap) { logger.msg(Arc::ERROR, "unknown (non-gridmap) user is not allowed"); return 1; }; } else { user.user.clear_groups(); nodes.clear(); logger.msg(Arc::ERROR, "improper attribute for encryption command: %s", value); return 1; }; } else if(command == "group") { /* definition of group of users based on DN */ if(!right_group) { for(;;) { rest=Arc::ConfigFile::read_line(cfile); command=Arc::ConfigIni::NextArg(rest); if(command.length() == 0) break; /* eof - bad */ if(command == "end") break; }; continue; }; std::string group_name=Arc::ConfigIni::NextArg(rest); int decision = AAA_NO_MATCH; for(;;) { std::string rest=Arc::ConfigFile::read_line(cfile); if(rest.length() == 0) break; /* eof - bad */ if(rest == "end") break; if(decision == AAA_NO_MATCH) { decision = user.user.evaluate(rest.c_str()); if(decision == AAA_FAILURE) { //decision=AAA_NO_MATCH; logger.msg(Arc::ERROR, "Failed processing authorization group %s",group_name); return 1; }; }; }; if(decision == AAA_POSITIVE_MATCH) user.user.add_group(group_name); } else if(command == "vo") { if(gridftpd::config_vo(user.user,command,rest,&logger) != 0) { logger.msg(Arc::ERROR, "couldn't process VO configuration"); return 1; }; } else if(command == "unixmap") { /* map to local unix user */ if(!user.mapped()) { if(user.mapname(rest.c_str()) == AAA_FAILURE) { logger.msg(Arc::ERROR, "failed while processing configuration command: %s %s", command, rest); return 1; }; }; } else if(command == "unixgroup") { /* map to local unix user */ if(!user.mapped()) { if(user.mapgroup(rest.c_str()) == AAA_FAILURE) { logger.msg(Arc::ERROR, "failed while processing configuration command: %s %s", command, rest); return 1; }; }; } else if(command == "unixvo") { /* map to local unix user */ if(!user.mapped()) { if(user.mapvo(rest.c_str()) == AAA_FAILURE) { logger.msg(Arc::ERROR, "failed while processing configuration command: %s %s", command, rest); return 1; }; }; } else if(command == "groupcfg") { /* next commands only for these groups */ user.user.select_group(NULL); if(rest.find_first_not_of(" \t") == std::string::npos) { right_group=true; continue; }; right_group=false; for(;;) { std::string group_name=Arc::ConfigIni::NextArg(rest); if(group_name.length() == 0) break; right_group=user.user.select_group(group_name); if(right_group) { break; }; }; } else if(command == "pluginpath") { if(!right_group) continue; pluginpath=Arc::ConfigIni::NextArg(rest); if(pluginpath.length() == 0) pluginpath="/"; } else if(command == "port") { } else if(command == "plugin") { if(!right_group) { for(;;) { rest=Arc::ConfigFile::read_line(cfile); command=Arc::ConfigIni::NextArg(rest); if(command.length() == 0) break; /* eof - bad */ if(command == "end") break; }; continue; }; std::string dir = Arc::ConfigIni::NextArg(rest); std::string plugin = Arc::ConfigIni::NextArg(rest); if(plugin.length() == 0) { logger.msg(Arc::WARNING, "can't parse configuration line: %s %s %s %s", command, dir, plugin, rest); continue; }; dir=subst_user_spec(dir,&user); if(!Arc::CanonicalDir(dir,false)) { logger.msg(Arc::WARNING, "bad directory in plugin command: %s", dir); continue; }; /* look if path is not already registered */ bool already_have_this_path=false; for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if((*i) == dir) { already_have_this_path=true; break; }; }; if(already_have_this_path) { logger.msg(Arc::WARNING, "Already have directory: %s", dir); for(;;) { rest=Arc::ConfigFile::read_line(cfile); command=Arc::ConfigIni::NextArg(rest); if(command.length() == 0) break; /* eof - bad */ if(command == "end") break; }; } else { logger.msg(Arc::INFO, "Registering directory: %s with plugin: %s", dir, plugin); plugin=pluginpath+'/'+plugin; FileNode node((char*)(dir.c_str()),(char*)(plugin.c_str)(),cfile,user); if(node.has_plugin()) { nodes.push_back(node); } else { logger.msg(Arc::ERROR, "file node creation failed: %s", dir); for(;;) { rest=Arc::ConfigFile::read_line(cfile); command=Arc::ConfigIni::NextArg(rest); if(command.length() == 0) break; /* eof - bad */ if(command == "end") break; }; }; }; } else if(command == "allowactivedata") { std::string value=Arc::ConfigIni::NextArg(rest); if(value == "yes") { active_data=true; } else if(value == "no") { active_data=false; } else { user.user.clear_groups(); nodes.clear(); logger.msg(Arc::ERROR, "improper attribute for allowactvedata command: %s", value); return 1; }; } else { logger.msg(Arc::WARNING, "unsupported configuration command: %s", command); }; }; return 0; } int FileRoot::config(Arc::ConfigIni &cf,std::string &pluginpath) { typedef enum { conf_state_single, conf_state_group, conf_state_plugin } config_state_t; config_state_t st = conf_state_single; bool right_group = true; user.user.select_group(NULL); std::string group_name; // =config_next_arg(rest); int group_decision = AAA_NO_MATCH; std::string plugin_config; std::string plugin_name; std::string plugin_path; // for telling plugin its own endpoint // hostname can be overridden in configuration std::string hostname; char hostn[256]; if (gethostname(hostn, sizeof(hostn)) != 0) logger.msg(Arc::WARNING, "Could not determine hostname from gethostname()"); else hostname = hostn; std::string port = "2811"; // Hard-coded, but it is standard default for(;;) { std::string rest; std::string command; cf.ReadNext(command,rest); if(!right_group) { if(!cf.SectionNew()) continue; }; int r = gridftpd::config_vo(user.user,cf,command,rest,&logger); if(r==0) continue; if(cf.SectionNew()) { if(right_group) switch(st) { case conf_state_group: { if(group_name.length() == 0) { logger.msg(Arc::ERROR, "unnamed group"); return 1; }; if(group_decision == AAA_POSITIVE_MATCH) { user.user.add_group(group_name); }; }; break; case conf_state_plugin: { if(plugin_name.length() == 0) { logger.msg(Arc::WARNING, "undefined plugin"); break; }; if(plugin_path.length() == 0) { logger.msg(Arc::WARNING, "undefined virtual plugin path"); break; }; plugin_path=subst_user_spec(plugin_path,&user); if(!Arc::CanonicalDir(plugin_path,false)) { logger.msg(Arc::WARNING, "bad directory for plugin: %s", plugin_path); break; }; /* look if path is not already registered */ bool already_have_this_path=false; for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if((*i) == plugin_path) { already_have_this_path=true; break; }; }; if(already_have_this_path) { logger.msg(Arc::WARNING, "Already have directory: %s", plugin_path); break; }; logger.msg(Arc::INFO, "Registering directory: %s with plugin: %s", plugin_path, plugin_name); plugin_name=pluginpath+'/'+plugin_name; plugin_config+="endpoint gsiftp://"+hostname+":"+port+"/"+plugin_path+"\n"; plugin_config+="end\n"; #ifdef HAVE_SSTREAM std::stringstream fake_cfile(plugin_config); #else std::strstream fake_cfile; fake_cfile<AddSection("common"); cf->AddSection("group"); cf->AddSection("gridftpd"); cf->AddSection("vo"); /* keep information about user */ if(!user.fill(auth,handle,config_file.c_str())) { logger.msg(Arc::ERROR, "failed to process client identification"); delete cf; return 1; }; std::string pluginpath = "/"; std::list pluginpaths = Arc::ArcLocation::GetPlugins(); if(!pluginpaths.empty()) pluginpath=pluginpaths.front(); int r; r = config(*cf,pluginpath); cfile.close(); delete cf; cf = NULL; if(r != 0) return r; if(!user.gridmap) { logger.msg(Arc::ERROR, "unknown (non-gridmap) user is not allowed"); return 1; }; /* must be sorted to make sure we can always find right directory */ nodes.sort(FileNode::compare); /* create dummy directories */ int nn=nodes.size(); for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if(nn==0) break; std::string name = i->point; for(;remove_last_name(name);) { if(name.length() == 0) break; bool found = false; std::list::iterator ii=i; for(;;) { ++ii; if(ii == nodes.end()) break; if(name == ii->point) { found=true; break; }; }; if(!found) { /* add dummy dir */ logger.msg(Arc::ERROR, "Registering dummy directory: %s", name); nodes.push_back(FileNode(name.c_str())); } else { break; }; }; nn--; }; opened_node=nodes.end(); user.free(); return 0; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/fileroot.h0000644000000000000000000000012413065020314023400 xustar000000000000000027 mtime=1490297036.543658 27 atime=1513200575.566714 30 ctime=1513200662.484777876 nordugrid-arc-5.4.2/src/services/gridftpd/fileroot.h0000644000175000002070000001671613065020314023460 0ustar00mockbuildmock00000000000000#ifndef GRID_SERVER_FILEROOT_H #define GRID_SERVER_FILEROOT_H #include #include #include #include #include #include #include #include "userspec.h" #include "conf.h" #include "conf/daemon.h" typedef enum { GRIDFTP_OPEN_RETRIEVE = 1, GRIDFTP_OPEN_STORE = 2 } open_modes; class DirEntry { public: typedef enum { minimal_object_info = 0, basic_object_info = 1, full_object_info = 2 } object_info_level; std::string name; bool is_file; time_t changed; time_t modified; unsigned long long size; uid_t uid; gid_t gid; bool may_rename; // bool may_delete; // bool may_create; // for dirs bool may_chdir; // for dirs bool may_dirlist; // for dirs bool may_mkdir; // for dirs bool may_purge; // for dirs bool may_read; // for files bool may_append; // for files bool may_write; // for files DirEntry(bool is_file_ = false,std::string name_ = ""): name(name_),is_file(is_file_), changed(0),modified(0),size(0),uid(0),gid(0), may_rename(false),may_delete(false), may_create(false),may_chdir(false),may_dirlist(false), may_mkdir(false),may_purge(false), may_read(false),may_append(false),may_write(false) { }; void reset(void) { name=""; is_file=false; changed=0; modified=0; size=0; uid=0; gid=0; may_rename=false; may_delete=false; may_create=false; may_chdir=false; may_dirlist=false; may_mkdir=false; may_purge=false; may_read=false; may_append=false; may_write=false; }; }; class FilePlugin { /* this is the base class for plugins */ public: std::string error_description; virtual std::string get_error_description() const { return error_description; }; /* virtual functions are not defined in base class */ virtual int open(const char*,open_modes,unsigned long long int /* size */ = 0) { return 1; }; virtual int close(bool /* eof */ = true) { return 1; }; virtual int read(unsigned char *,unsigned long long int /* offset */,unsigned long long int* /* size */) { return 1; }; virtual int write(unsigned char *,unsigned long long int /* offset */,unsigned long long int /* size */) { return 1; }; virtual int readdir(const char* /* name */,std::list& /* dir_list */,DirEntry::object_info_level /* mode */ = DirEntry::basic_object_info) { return 1; }; virtual int checkdir(std::string& /* dirname */) { return 1; }; virtual int checkfile(std::string& /* name */,DirEntry& /* info */,DirEntry::object_info_level /* mode */) { return 1; }; virtual int makedir(std::string& /* dirname */) { return 1; }; virtual int removefile(std::string& /* name */) { return 1; }; virtual int removedir(std::string& /* dirname */) { return 1; }; int count; FilePlugin(void) { count=0; /* after creation acquire MUST be called */ }; int acquire(void) { count++; return count; }; int release(void); virtual ~FilePlugin(void) { /* dlclose is done externally - yet */ }; protected: std::string endpoint; // endpoint (URL) corresponding to plugin }; class FileNode; /* this is the only exported C function from plugin */ typedef FilePlugin* (*plugin_init_t)(std::istream &cfile,userspec_t &user,FileNode &node); class FileNode { public: std::string point; private: FilePlugin *plug; std::string plugname; void* handle; plugin_init_t init; public: static const std::string no_error; FileNode(void) { /* empty uninitialized - can be used only to copy to it later */ point=""; plugname=""; handle=NULL; init=NULL; plug=NULL; }; /* following two constructors should be used only for copying in list */ FileNode(const FileNode &node) { point=node.point; plugname=node.plugname; plug=node.plug; handle=node.handle; init=NULL; if(plug) plug->acquire(); }; FileNode& operator= (const FileNode &node); FileNode(const char* dirname) { plug=NULL; init=NULL; point=std::string(dirname); handle=NULL; return; }; /* this constructor is for real load of plugin - it should be used to create really new FileNode */ FileNode(char* dirname,char* plugin,std::istream &cfile,userspec_t &user); ~FileNode(void); bool has_plugin(void) const { return (plug != NULL); }; FilePlugin* get_plugin(void) const { return plug; }; const std::string& get_plugin_path(void) const { return plugname; }; static bool compare(const FileNode &left,const FileNode &right) { return (left.point.length() > right.point.length()); }; bool operator> (const FileNode &right) const { return (point.length() > right.point.length()); }; bool operator< (const FileNode &right) const { return (point.length() < right.point.length()); }; bool operator> (char* right) const { return (point.length() > strlen(right)); }; bool operator< (char* right) const { return (point.length() < strlen(right)); }; bool operator== (std::string right) const { return (point == right); }; bool belongs(const char* name); bool is_in_dir(const char* name); std::string last_name(void); int open(const char* name,open_modes mode,unsigned long long int size = 0); int close(bool eof = true); int write(unsigned char *buf,unsigned long long int offset,unsigned long long int size); int read(unsigned char *buf,unsigned long long int offset,unsigned long long int *size); int readdir(const char* name,std::list &dir_list,DirEntry::object_info_level mode = DirEntry::basic_object_info); int checkdir(std::string &dirname); int checkfile(std::string &name,DirEntry &info,DirEntry::object_info_level mode); int makedir(std::string &dirname); int removedir(std::string &dirname); int removefile(std::string &name); std::string error(void) const { if(plug) return plug->get_error_description(); return no_error; }; }; class GridFTP_Commands; class FileRoot { friend class GridFTP_Commands; private: bool heavy_encryption; bool active_data; //bool unix_mapped; std::string error; public: class ServerParams { public: unsigned int port; unsigned int firewall[4]; unsigned int max_connections; unsigned int default_buffer; unsigned int max_buffer; ServerParams(void):port(0),max_connections(0),default_buffer(0),max_buffer(0) { firewall[0]=0; firewall[1]=0; firewall[2]=0; firewall[3]=0; }; }; std::list nodes; std::string cur_dir; userspec_t user; FileRoot(void); ~FileRoot(void) { }; std::list::iterator opened_node; int open(const char* name,open_modes mode,unsigned long long int size = 0); int close(bool eof = true); int write(unsigned char *buf,unsigned long long int offset,unsigned long long int size); int read(unsigned char *buf,unsigned long long int offset,unsigned long long int *size); int readdir(const char* name,std::list &dir_list,DirEntry::object_info_level mode); std::string cwd() const { return "/"+cur_dir; }; int cwd(std::string &name); int mkd(std::string &name); int rmd(std::string &name); int rm(std::string &name); int size(const char* name,unsigned long long int *size); int time(const char* name,time_t *time); int checkfile(const char* name,DirEntry &obj,DirEntry::object_info_level mode); int config(globus_ftp_control_auth_info_t* auth,globus_ftp_control_handle_t *handle); int config(std::ifstream &cfile,std::string &pluginpath); int config(Arc::ConfigIni &cf,std::string &pluginpath); static int config(gridftpd::Daemon &daemon,ServerParams* params); }; #endif nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/conf.h0000644000000000000000000000012412771223666022524 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.501714 30 ctime=1513200662.483777863 nordugrid-arc-5.4.2/src/services/gridftpd/conf.h0000644000175000002070000000076512771223666022601 0ustar00mockbuildmock00000000000000#ifndef __GFS_CONF_H__ #define __GFS_CONF_H__ #include #include #include #include #include #include "conf/environment.h" #define DEFAULT_CONFIG_FILE "/etc/gridftpd.conf" #define DEFAULT_CENTRAL_CONFIG_FILE "/etc/arc.conf" #define DEFAULT_CENTRAL_CONFIG_FILE2 "/etc/nordugrid.conf" extern const char* config_file; std::string config_open_gridftp(Arc::ConfigFile &cfile); void config_strip(std::string &rest); #endif // __GFS_CONF_H__ nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/fileroot.cpp0000644000000000000000000000012413065020314023733 xustar000000000000000027 mtime=1490297036.543658 27 atime=1513200575.551714 30 ctime=1513200662.476777778 nordugrid-arc-5.4.2/src/services/gridftpd/fileroot.cpp0000644000175000002070000003052113065020314024001 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "fileroot.h" #include "names.h" #include "misc.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"FilePlugin"); const std::string FileNode::no_error(""); #define NO_PLUGIN(PATH) { logger.msg(Arc::ERROR, "No plugin is configured or authorised for requested path %s", PATH); } int FilePlugin::release(void) { count--; if(count < 0) { logger.msg(Arc::WARNING, "FilePlugin: more unload than load"); count=0; }; return count; } FileNode::FileNode(char* dirname,char* plugin,std::istream &cfile,userspec_t &user) { plug=NULL; init=NULL; point=std::string(dirname); plugname=std::string(plugin); // handle=dlopen(plugin,RTLD_LAZY); handle=dlopen(plugin,RTLD_NOW); if(!handle) { logger.msg(Arc::ERROR, dlerror()); logger.msg(Arc::ERROR, "Can't load plugin %s for access point %s", plugin, dirname); return; }; init=(plugin_init_t)dlsym(handle,"init"); if(init == NULL) { logger.msg(Arc::ERROR, "Plugin %s for access point %s is broken.", plugin, dirname); dlclose(handle); handle=NULL; return; }; if((plug=init(cfile,user,*this)) == NULL) { logger.msg(Arc::ERROR, "Plugin %s for access point %s is broken.", plugin, dirname); dlclose(handle); handle=NULL; init=NULL; return; }; if(plug->acquire() != 1) { logger.msg(Arc::ERROR, "Plugin %s for access point %s acquire failed (should never happen).", plugin, dirname); delete plug; dlclose(handle); handle=NULL; init=NULL; plug=NULL; return; }; } FileNode::~FileNode(void) { if(plug) if(plug->release() == 0) { logger.msg(Arc::VERBOSE, "Destructor with dlclose (%s)", point); delete plug; dlclose(handle); handle=NULL; init=NULL; plug=NULL; }; } std::string FileNode::last_name(void) { int pl=point.rfind('/'); if(pl == -1) return point; return point.substr(pl+1); } bool FileNode::belongs(const char* name) { int pl=point.length(); if(pl == 0) return true; int l=strlen(name); if (pl > l) return false; if(strncmp(point.c_str(),name,pl)) return false; if(pl == l) return true; if(name[pl] == '/') return true; return false; } FileNode& FileNode::operator= (const FileNode &node) { logger.msg(Arc::VERBOSE, "FileNode: operator= (%s <- %s) %lu <- %lu", point, node.point, (unsigned long int)this, (unsigned long int)(&node)); if(plug) if(plug->release() == 0) { logger.msg(Arc::VERBOSE, "Copying with dlclose"); delete plug; dlclose(handle); handle=NULL; init=NULL; plug=NULL; }; point=node.point; plugname=node.plugname; plug=node.plug; handle=node.handle; return *this; } /* bool FileNode::is_in_dir(const char* name) { int l=strlen(name); if(point.length() <= l) return false; if(point[l] != '/') return false; if(strncmp(point.c_str(),name,l)) return false; return true; } */ /* should only last directory be shown ? */ bool FileNode::is_in_dir(const char* name) { int l=strlen(name); int pl=point.rfind('/'); /* returns -1 if not found */ if(pl == -1) { if(l == 0) { return true; }; }; if(l != pl) return false; if(strncmp(point.c_str(),name,l)) return false; return true; } int FileRoot::size(const char* name,unsigned long long int *size) { std::string new_name; if(name[0] != '/') { new_name=cur_dir+'/'+name; } else { new_name=name; }; error=FileNode::no_error; if(!Arc::CanonicalDir(new_name,false)) return 1; if(new_name.empty()) { (*size)=0; return 0; } for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if(i->belongs(new_name.c_str())) { DirEntry info; if(i->checkfile(new_name,info,DirEntry::basic_object_info) != 0) { error=i->error(); return 1; }; (*size)=info.size; return 0; }; }; NO_PLUGIN(name); return 1; } int FileRoot::time(const char* name,time_t *time) { std::string new_name; if(name[0] != '/') { new_name=cur_dir+'/'+name; } else { new_name=name; }; error=FileNode::no_error; if(!Arc::CanonicalDir(new_name,false)) return 1; if(new_name.empty()) { (*time)=0; return 0; }; for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if(i->belongs(new_name.c_str())) { DirEntry info; if(i->checkfile(new_name,info,DirEntry::basic_object_info) != 0) { error=i->error(); return 1; }; (*time)=info.modified; return 0; }; }; NO_PLUGIN(name); return 1; } int FileRoot::checkfile(const char* name,DirEntry &info,DirEntry::object_info_level mode) { std::string new_name; if(name[0] != '/') { new_name=cur_dir+'/'+name; } else { new_name=name; }; error=FileNode::no_error; if(!Arc::CanonicalDir(new_name,false)) return 1; if(new_name.empty()) { info.reset(); info.name="/"; info.is_file=false; return 0; }; for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if(i->belongs(new_name.c_str())) { if(i->checkfile(new_name,info,mode) != 0) { error=i->error(); return 1; }; info.name="/"+new_name; return 0; }; }; NO_PLUGIN(name); return 1; } int FileRoot::mkd(std::string& name) { std::string new_dir; if(name[0] != '/') { new_dir=cur_dir+'/'+name; } else { new_dir=name; }; error=FileNode::no_error; if(Arc::CanonicalDir(new_dir,false)) { for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if((*i) == new_dir) { /* already exists, at least virtually */ name=new_dir; return 0; }; if((*i).belongs(new_dir.c_str())) { if((*i).makedir(new_dir) == 0) { name=new_dir; return 0; }; error=i->error(); name=cur_dir; return 1; }; }; NO_PLUGIN(name); }; name=cur_dir; return 1; } int FileRoot::rmd(std::string& name) { std::string new_dir; if(name[0] != '/') { new_dir=cur_dir+'/'+name; } else { new_dir=name; }; error=FileNode::no_error; if(Arc::CanonicalDir(new_dir,false)) { for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if((*i) == new_dir) { /* virtual - not removable */ return 1; }; if(i->belongs(new_dir.c_str())) { int res = i->removedir(new_dir); error=i->error(); return res; }; }; NO_PLUGIN(name); }; return 1; } int FileRoot::rm(std::string& name) { std::string new_dir; if(name[0] != '/') { new_dir=cur_dir+'/'+name; } else { new_dir=name; }; error=FileNode::no_error; if(Arc::CanonicalDir(new_dir,false)) { for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if((*i) == new_dir) { /* virtual dir - not removable */ return 1; }; if(i->belongs(new_dir.c_str())) { int res = i->removefile(new_dir); error=i->error(); return res; }; }; NO_PLUGIN(name); }; return 1; } int FileRoot::cwd(std::string& name) { std::string new_dir; if(name[0] != '/') { new_dir=cur_dir+'/'+name; } else { new_dir=name; }; error=FileNode::no_error; if(Arc::CanonicalDir(new_dir,false)) { if(new_dir.length() == 0) { /* always can go to root ? */ cur_dir=new_dir; name=cur_dir; return 0; }; /* check if can cd */ for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if((*i) == new_dir) { cur_dir=new_dir; name=cur_dir; return 0; }; if((*i).belongs(new_dir.c_str())) { if((*i).checkdir(new_dir) == 0) { cur_dir=new_dir; name=cur_dir; return 0; }; error=i->error(); name=cur_dir; return 1; }; }; NO_PLUGIN(name); }; name="/"+cur_dir; return 1; } int FileRoot::open(const char* name,open_modes mode,unsigned long long int size) { std::string new_name; if(name[0] != '/') { new_name=cur_dir+'/'+name; } else { new_name=name; }; error=FileNode::no_error; if(!Arc::CanonicalDir(new_name,false)) { return 1; }; for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if(i->belongs(new_name.c_str())) { if(i->open(new_name.c_str(),mode,size) == 0) { opened_node=i; return 0; }; error=i->error(); return 1; }; }; NO_PLUGIN(name); return 1; } int FileRoot::close(bool eof) { error=FileNode::no_error; if(opened_node != nodes.end()) { int i=(*opened_node).close(eof); error=opened_node->error(); opened_node=nodes.end(); return i; }; return 1; } int FileRoot::read(unsigned char* buf,unsigned long long int offset,unsigned long long *size) { error=FileNode::no_error; if(opened_node != nodes.end()) { int res = (*opened_node).read(buf,offset,size); error=opened_node->error(); return res; }; return 1; } int FileRoot::write(unsigned char *buf,unsigned long long int offset,unsigned long long size) { error=FileNode::no_error; if(opened_node != nodes.end()) { int res = (*opened_node).write(buf,offset,size); error=opened_node->error(); return res; }; return 1; } /* 0 - ok , 1 - failure, -1 - this is a file */ int FileRoot::readdir(const char* name,std::list &dir_list,DirEntry::object_info_level mode) { std::string fullname; if(name[0] != '/') { fullname=cur_dir+'/'+name; } else { fullname=name; }; error=FileNode::no_error; if(!Arc::CanonicalDir(fullname,false)) return 1; int res = 1; for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if(i->belongs(fullname.c_str())) { res=i->readdir(fullname.c_str(),dir_list,mode); error=i->error(); break; }; }; if(res == -1) { /* means this is a file */ std::list::iterator di = dir_list.end(); --di; di->name="/"+fullname; return -1; }; for(std::list::iterator i=nodes.begin();i!=nodes.end();++i) { if(i->is_in_dir(fullname.c_str())) { DirEntry de; de.name=i->last_name(); de.is_file=false; // if(i->checkfile(i->point,de,mode) == 0) { // if(de.is_file) { // de.reset(); de.name=i->last_name(); de.is_file=false; /* TODO: fill other attributes */ // // }; // }; dir_list.push_front(de); res=0; }; }; return res; } FileRoot::FileRoot(void):error(FileNode::no_error) { cur_dir=""; opened_node=nodes.end(); heavy_encryption=true; active_data=true; //unix_mapped=false; } int FileNode::readdir(const char* name,std::list &dir_list,DirEntry::object_info_level mode) { if(plug) { plug->error_description=""; return plug->readdir(remove_head_dir_c(name,point.length()),dir_list,mode); }; return 0; } int FileNode::checkfile(std::string &name,DirEntry &info,DirEntry::object_info_level mode) { if(plug) { plug->error_description=""; std::string dname=remove_head_dir_s(name,point.length()); return plug->checkfile(dname,info,mode); }; return 1; } int FileNode::checkdir(std::string &dirname) { if(plug) { plug->error_description=""; std::string dname=remove_head_dir_s(dirname,point.length()); if(plug->checkdir(dname) == 0) { dirname=point+'/'+dname; return 0; }; }; return 1; } int FileNode::makedir(std::string &dirname) { if(plug) { plug->error_description=""; std::string dname=remove_head_dir_s(dirname,point.length()); return plug->makedir(dname); }; return 1; } int FileNode::removedir(std::string &dirname) { if(plug) { plug->error_description=""; std::string dname=remove_head_dir_s(dirname,point.length()); return plug->removedir(dname); }; return 1; } int FileNode::removefile(std::string &name) { if(plug) { plug->error_description=""; std::string dname=remove_head_dir_s(name,point.length()); return plug->removefile(dname); }; return 1; } int FileNode::open(const char* name,open_modes mode,unsigned long long int size) { if(plug) { plug->error_description=""; return plug->open(remove_head_dir_c(name,point.length()),mode,size); }; return 1; } int FileNode::close(bool eof) { if(plug) { plug->error_description=""; return plug->close(eof); }; return 1; } int FileNode::read(unsigned char *buf,unsigned long long int offset,unsigned long long *size) { if(plug) { plug->error_description=""; return plug->read(buf,offset,size); }; return 1; } int FileNode::write(unsigned char *buf,unsigned long long int offset,unsigned long long size) { if(plug) { plug->error_description=""; return plug->write(buf,offset,size); }; return 1; } nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/run0000644000000000000000000000013013214316026022134 xustar000000000000000029 mtime=1513200662.57177894 30 atime=1513200668.719854133 29 ctime=1513200662.57177894 nordugrid-arc-5.4.2/src/services/gridftpd/run/0000755000175000002070000000000013214316026022261 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/gridftpd/run/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024256 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200604.565069495 30 ctime=1513200662.568778903 nordugrid-arc-5.4.2/src/services/gridftpd/run/Makefile.am0000644000175000002070000000033612052416515024322 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = librun.la librun_la_SOURCES = run_plugin.cpp run_plugin.h librun_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) librun_la_LIBADD = $(DLOPEN_LIBS) -lpthread nordugrid-arc-5.4.2/src/services/gridftpd/run/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734024265 xustar000000000000000030 mtime=1513200604.612070069 30 atime=1513200651.274640771 30 ctime=1513200662.569778915 nordugrid-arc-5.4.2/src/services/gridftpd/run/Makefile.in0000644000175000002070000005416313214315734024344 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/gridftpd/run DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = librun_la_DEPENDENCIES = $(am__DEPENDENCIES_1) am_librun_la_OBJECTS = librun_la-run_plugin.lo librun_la_OBJECTS = $(am_librun_la_OBJECTS) librun_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(librun_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(librun_la_SOURCES) DIST_SOURCES = $(librun_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = librun.la librun_la_SOURCES = run_plugin.cpp run_plugin.h librun_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) librun_la_LIBADD = $(DLOPEN_LIBS) -lpthread all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/gridftpd/run/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/gridftpd/run/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done librun.la: $(librun_la_OBJECTS) $(librun_la_DEPENDENCIES) $(librun_la_LINK) $(librun_la_OBJECTS) $(librun_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/librun_la-run_plugin.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< librun_la-run_plugin.lo: run_plugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -MT librun_la-run_plugin.lo -MD -MP -MF $(DEPDIR)/librun_la-run_plugin.Tpo -c -o librun_la-run_plugin.lo `test -f 'run_plugin.cpp' || echo '$(srcdir)/'`run_plugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/librun_la-run_plugin.Tpo $(DEPDIR)/librun_la-run_plugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='run_plugin.cpp' object='librun_la-run_plugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -c -o librun_la-run_plugin.lo `test -f 'run_plugin.cpp' || echo '$(srcdir)/'`run_plugin.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/gridftpd/run/PaxHeaders.7502/run_plugin.cpp0000644000000000000000000000012412771223666025120 xustar000000000000000027 mtime=1474635702.672235 27 atime=1513200575.549714 30 ctime=1513200662.570778927 nordugrid-arc-5.4.2/src/services/gridftpd/run/run_plugin.cpp0000644000175000002070000001647112771223666025176 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #ifndef WIN32 #include #endif #include #include #include "../conf/environment.h" #include "run_plugin.h" namespace gridftpd { void free_args(char** args) { if(args == NULL) return; for(int i=0;args[i];i++) free(args[i]); free(args); } char** string_to_args(const std::string& command) { if(command.length() == 0) return NULL; int n = 100; char** args = (char**)malloc(n*sizeof(char**)); int i; for(i=0;i::iterator i = args_.begin(); i!=args_.end();++i,++n) args[n]=(char*)(i->c_str()); args[n]=NULL; if(lib.length() == 0) { bool r = false; Arc::Run re(args_); re.AssignStdin(stdin_); re.AssignStdout(stdout_); re.AssignStderr(stderr_); if(re.Start()) { if(re.Wait(timeout_)) { result_=re.Result(); r=true; } else { re.Kill(0); }; }; if(!r) { free(args); return false; }; } else { #ifndef WIN32 void* lib_h = dlopen(lib.c_str(),RTLD_NOW); if(lib_h == NULL) { free(args); return false; }; lib_plugin_t f; f.v = dlsym(lib_h,args[0]); if(f.v == NULL) { dlclose(lib_h); free(args); return false; }; result_ = (*f.f)(args[1],args[2],args[3],args[4],args[5], args[6],args[7],args[8],args[9],args[10], args[11],args[12],args[13],args[14],args[15], args[16],args[17],args[18],args[19],args[20], args[21],args[22],args[23],args[24],args[25], args[26],args[27],args[28],args[29],args[30], args[31],args[32],args[33],args[34],args[35], args[36],args[37],args[38],args[39],args[40], args[41],args[42],args[43],args[44],args[45], args[56],args[57],args[58],args[59],args[60], args[61],args[62],args[63],args[64],args[65], args[66],args[67],args[68],args[69],args[70], args[71],args[72],args[73],args[74],args[75], args[76],args[77],args[78],args[79],args[80], args[81],args[82],args[83],args[84],args[85], args[86],args[87],args[88],args[89],args[90], args[91],args[92],args[93],args[94],args[95], args[96],args[97],args[98],args[99],args[100]); dlclose(lib_h); #else #warning Implement calling function from library for Windows result=-1; #endif }; free(args); return true; } bool RunPlugin::run(substitute_t subst,void* arg) { result_=0; stdout_=""; stderr_=""; if(subst == NULL) return run(); if(args_.empty()) return true; char** args = (char**)malloc(sizeof(char*)*(args_.size()+1)); if(args == NULL) return false; std::list args__; for(std::list::iterator i = args_.begin();i!=args_.end();++i) { args__.push_back(*i); }; for(std::list::iterator i = args__.begin();i!=args__.end();++i) { (*subst)(*i,arg); }; int n = 0; for(std::list::iterator i = args__.begin(); i!=args__.end();++i,++n) args[n]=(char*)(i->c_str()); args[n]=NULL; if(lib.length() == 0) { bool r = false; Arc::Run re(args__); re.AssignStdin(stdin_); re.AssignStdout(stdout_); re.AssignStderr(stderr_); if(re.Start()) { if(re.Wait(timeout_)) { result_=re.Result(); r=true; } else { re.Kill(0); }; }; if(!r) { free(args); return false; }; } else { #ifndef WIN32 void* lib_h = dlopen(lib.c_str(),RTLD_NOW); if(lib_h == NULL) { free(args); return false; }; lib_plugin_t f; f.v = dlsym(lib_h,args[0]); if(f.v == NULL) { dlclose(lib_h); free(args); return false; }; result_ = (*f.f)(args[1],args[2],args[3],args[4],args[5], args[6],args[7],args[8],args[9],args[10], args[11],args[12],args[13],args[14],args[15], args[16],args[17],args[18],args[19],args[20], args[21],args[22],args[23],args[24],args[25], args[26],args[27],args[28],args[29],args[30], args[31],args[32],args[33],args[34],args[35], args[36],args[37],args[38],args[39],args[40], args[41],args[42],args[43],args[44],args[45], args[56],args[57],args[58],args[59],args[60], args[61],args[62],args[63],args[64],args[65], args[66],args[67],args[68],args[69],args[70], args[71],args[72],args[73],args[74],args[75], args[76],args[77],args[78],args[79],args[80], args[81],args[82],args[83],args[84],args[85], args[86],args[87],args[88],args[89],args[90], args[91],args[92],args[93],args[94],args[95], args[96],args[97],args[98],args[99],args[100]); dlclose(lib_h); #else #warning Implement calling function from library for Windows result=-1; #endif }; free(args); return true; } } // namespace gridftpd nordugrid-arc-5.4.2/src/services/gridftpd/run/PaxHeaders.7502/run_plugin.h0000644000000000000000000000012311725640232024552 xustar000000000000000027 mtime=1331118234.649862 27 atime=1513200575.551714 29 ctime=1513200662.57177894 nordugrid-arc-5.4.2/src/services/gridftpd/run/run_plugin.h0000644000175000002070000000313511725640232024622 0ustar00mockbuildmock00000000000000#ifndef ARC_GRIDFTPD_RUN_PLUGIN_H #define ARC_GRIDFTPD_RUN_PLUGIN_H #include #include #include #include #include namespace gridftpd { class RunPlugin { private: std::list args_; std::string lib; std::string stdin_; std::string stdout_; std::string stderr_; int timeout_; int result_; void set(const std::string& cmd); void set(char const * const * args); public: typedef void (*substitute_t)(std::string& str,void* arg); union lib_plugin_t { int (*f)(...); void* v; }; RunPlugin(void):timeout_(10),result_(0) { }; RunPlugin(const std::string& cmd):timeout_(10),result_(0) { set(cmd); }; RunPlugin(char const * const * args):timeout_(10),result_(0) { set(args); }; RunPlugin& operator=(const std::string& cmd) { set(cmd); return *this; }; RunPlugin& operator=(char const * const * args) { set(args); return *this; }; bool run(void); bool run(substitute_t subst,void* arg); int result(void) const { return result_; }; void timeout(int t) { timeout_=t; }; void stdin_channel(const std::string& s) { stdin_=s; }; const std::string& stdout_channel(void) const { return stdout_; }; const std::string& stderr_channel(void) const { return stderr_; }; operator bool(void) const { return !args_.empty(); }; std::string cmd(void) { return (args_.empty())?std::string(""):args_.front(); }; }; void free_args(char** args); char** string_to_args(const std::string& command); } // namespace gridftpd #endif // ARC_GRIDFTPD_RUN_PLUGIN_H nordugrid-arc-5.4.2/src/services/gridftpd/run/PaxHeaders.7502/README0000644000000000000000000000012311412417142023072 xustar000000000000000026 mtime=1277828706.49151 27 atime=1513200575.551714 30 ctime=1513200662.566778879 nordugrid-arc-5.4.2/src/services/gridftpd/run/README0000644000175000002070000000004511412417142023137 0ustar00mockbuildmock00000000000000Classes to run external executables. nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/gridftpd.service.in0000644000000000000000000000012712754431715025220 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200651.179639609 30 ctime=1513200662.470777704 nordugrid-arc-5.4.2/src/services/gridftpd/gridftpd.service.in0000644000175000002070000000034712754431715025266 0ustar00mockbuildmock00000000000000[Unit] Description=ARC gridftpd Documentation=man:gridftpd(8) After=local_fs.target remote_fs.target [Service] Type=forking PIDFile=/var/run/gridftpd.pid ExecStart=@pkgdatadir@/gridftpd-start [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/commands.h0000644000000000000000000000012412220651407023363 xustar000000000000000027 mtime=1380143879.429208 27 atime=1513200575.528714 30 ctime=1513200662.482777851 nordugrid-arc-5.4.2/src/services/gridftpd/commands.h0000644000175000002070000001300112220651407023423 0ustar00mockbuildmock00000000000000#include #include class GridFTP_Commands_timeout; class GridFTP_Commands { typedef enum data_connect_type_e { GRIDFTP_CONNECT_NONE, GRIDFTP_CONNECT_PORT, GRIDFTP_CONNECT_PASV } data_connect_type_t; friend class GridFTP_Commands_timeout; private: #ifndef __DONT_USE_FORK__ class close_semaphor_t { public: close_semaphor_t(void); ~close_semaphor_t(void); }; close_semaphor_t close_semaphor; #endif int log_id; unsigned int firewall[4]; int local_host[16]; // enough for IPv6 unsigned short local_port; bool local_is_ipv6; time_t last_action_time; globus_ftp_control_handle_t handle; globus_mutex_t response_lock; globus_cond_t response_cond; int response_done; globus_mutex_t abort_lock; globus_cond_t abort_cond; int data_done; data_connect_type_t data_conn_type; globus_ftp_control_dcau_t data_dcau; globus_ftp_control_tcpbuffer_t tcp_buffer; gss_cred_id_t delegated_cred; unsigned long long int file_size; FileRoot froot; /* flag to mark server is transfering data right now */ bool transfer_mode; /* flag to mark transfer abort was requested by client side */ bool transfer_abort; /* lock used in data transfer callbacks */ globus_mutex_t data_lock; /* flag to mark eof was set during data transfer(receive) or any other reason to stop registering new buffers */ bool data_eof; /* number of buffers registered so far for data transfer */ unsigned int data_buf_count; /* store array of data buffers here */ typedef struct { unsigned char* data; unsigned long long int used; struct timeval time_last; } data_buffer_t; data_buffer_t* data_buffer; /* size of every buffer - should it be equal to PBSZ ? */ unsigned long long int data_buffer_size; unsigned int data_buffer_num; unsigned int data_callbacks; /* keeps offset in file for reading */ unsigned long long data_offset; unsigned long long virt_offset; unsigned long long virt_size; bool virt_restrict; /* statistics */ unsigned long long int time_spent_disc; unsigned long long int time_spent_network; void compute_data_buffer(void); bool allocate_data_buffer(void); void free_data_buffer(void); int send_response(const std::string& response) { return send_response(response.c_str()); }; int send_response(const char* response); int wait_response(void); static void close_callback(void *arg,globus_ftp_control_handle_t *handle,globus_object_t *error, globus_ftp_control_response_t *ftp_response); static void response_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error); static void abort_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error); bool check_abort(globus_object_t *error); void make_abort(bool already_locked = false,bool wait_abort = true); void force_abort(void); static void accepted_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error); static void commands_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error,union globus_ftp_control_command_u *command); static void authenticate_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error,globus_ftp_control_auth_info_t *result); time_t last_action(void) const { return last_action_time; }; public: GridFTP_Commands(int n = 0,unsigned int* firewall = NULL); ~GridFTP_Commands(void); #ifndef __DONT_USE_FORK__ static int new_connection_callback(void* arg,int server_handle); #else static void new_connection_callback(void* arg,globus_ftp_control_server_t *server_handle,globus_object_t *error); #endif static void data_connect_retrieve_callback(void* arg,globus_ftp_control_handle_t *handle,unsigned int stripendx,globus_bool_t reused,globus_object_t *error); static void data_retrieve_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error,globus_byte_t *buffer,globus_size_t length,globus_off_t offset,globus_bool_t eof); static void data_connect_store_callback(void* arg,globus_ftp_control_handle_t *handle,unsigned int stripendx,globus_bool_t reused,globus_object_t *error); static void data_store_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error,globus_byte_t *buffer,globus_size_t length,globus_off_t offset,globus_bool_t eof); static void list_connect_retrieve_callback(void* arg,globus_ftp_control_handle_t *handle,unsigned int stripendx,globus_bool_t reused,globus_object_t *error); static void list_retrieve_callback(void* arg,globus_ftp_control_handle_t *handle,globus_object_t *error,globus_byte_t *buffer,globus_size_t length,globus_off_t offset,globus_bool_t eof); std::list dir_list; std::list::iterator dir_list_pointer; std::string list_name_prefix; globus_off_t list_offset; typedef enum { list_list_mode, list_nlst_mode, list_mlsd_mode } list_mode_t; list_mode_t list_mode; }; class GridFTP_Commands_timeout { private: globus_thread_t timer_thread; std::list cmds; globus_mutex_t lock; globus_cond_t cond; globus_cond_t exit_cond; bool cond_flag; bool exit_cond_flag; static void* timer_func(void* arg); public: GridFTP_Commands_timeout(void); ~GridFTP_Commands_timeout(void); void add(GridFTP_Commands& cmd); void remove(const GridFTP_Commands& cmd); }; nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/README0000644000000000000000000000012412442324223022270 xustar000000000000000027 mtime=1418307731.508846 27 atime=1513200575.533714 30 ctime=1513200662.464777631 nordugrid-arc-5.4.2/src/services/gridftpd/README0000644000175000002070000000102112442324223022327 0ustar00mockbuildmock00000000000000GridFTP server ported from arc0 svn tree. This service can act as a secure gateway to grid resources, providing authentication and authorisation through several mechanisms. 3 plugins are provided by ARC: - jobplugin (in a-rex/grid-manager/jobplugin) allows job submission, monitoring and control through the GridFTP interface - fileplugin exposes a file-system hierarchy through the GridFTP interface The plugins can be enabled through configuration described in the manual "The NorduGrid Grid Manager and GridFTP Server" nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/userspec.h0000644000000000000000000000012212675602216023421 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.501714 28 ctime=1513200662.4867779 nordugrid-arc-5.4.2/src/services/gridftpd/userspec.h0000644000175000002070000000264312675602216023475 0ustar00mockbuildmock00000000000000#ifndef GRID_SERVER_USERSPEC_H #define GRID_SERVER_USERSPEC_H #include #include #include "auth/auth.h" #include "auth/unixmap.h" class userspec_t { friend std::string subst_user_spec(std::string &in,userspec_t *spec); public: AuthUser user; private: int uid; int gid; std::string home; int host[4]; short unsigned int port; std::string config_file; bool refresh(void); UnixMap map; UnixMap default_map; public: bool gridmap; void free(void) const; userspec_t(void); ~userspec_t(void); // Initial setup bool fill(globus_ftp_control_auth_info_t *auth,globus_ftp_control_handle_t *handle,const char* cfg = NULL); bool fill(AuthUser& user,const char* cfg = NULL); int get_uid(void) const { return uid; }; int get_gid(void) const { return gid; }; const char* get_uname(void); const char* get_gname(void); const std::string& get_config_file(void) { return config_file; } short unsigned int get_port(void) const { return port; }; const int* get_host(void) const { return host; }; const AuthUser& get_user(void) const { return user; }; AuthResult mapname(const char* line); AuthResult mapgroup(const char* line); AuthResult mapvo(const char* line); bool mapped(void) const { return (bool)map; }; }; std::string subst_user_spec(std::string &in,userspec_t *spec); bool check_gridmap(const char* dn,char** user,const char* mapfile = NULL); #endif nordugrid-arc-5.4.2/src/services/gridftpd/PaxHeaders.7502/gridftpd.init.in0000644000000000000000000000012712442324223024510 xustar000000000000000027 mtime=1418307731.508846 30 atime=1513200651.162639401 30 ctime=1513200662.469777692 nordugrid-arc-5.4.2/src/services/gridftpd/gridftpd.init.in0000755000175000002070000001623312442324223024562 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the ARC gridftp server # # chkconfig: 2345 75 25 # description: ARC gridftpd # # config: /etc/sysconfig/nordugrid # config: /etc/sysconfig/gridftpd # config: @prefix@/etc/arc.conf # config: /etc/arc.conf ### BEGIN INIT INFO # Provides: gridftpd # Required-Start: $local_fs $remote_fs # Required-Stop: $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC gridftpd # Description: ARC gridftp server ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=gridftpd RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/${prog} ]; then . /etc/sysconfig/${prog} elif [ -r /etc/default/${prog} ]; then . /etc/default/${prog} fi if [ "$RUN" != "yes" ] ; then log_warning_msg "$prog disabled, please adjust the configuration to your" log_warning_msg "needs and then set RUN to 'yes' in /etc/default/$prog to enable it." exit 0 fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ ! -d "$GLOBUS_LOCATION" ]; then log_failure_msg "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } check_cert() { X509_USER_CERT=`readconfigvar "$ARC_CONFIG" gridftpd x509_user_cert` X509_USER_KEY=`readconfigvar "$ARC_CONFIG" gridftpd x509_user_key` if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=`readconfigvar "$ARC_CONFIG" common x509_user_cert` fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=`readconfigvar "$ARC_CONFIG" common x509_user_key` fi if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=/etc/grid-security/hostcert.pem fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=/etc/grid-security/hostkey.pem fi if [ ! -f "$X509_USER_CERT" ] ; then log_failure_msg "Host certificate not found" exit 1 fi if [ ! -f "$X509_USER_KEY" ] ; then log_failure_msg "Host key not found" exit 1 fi # check permissions on key perms=`stat -L -c %a "$X509_USER_KEY"` if [ "$perms" != "600" ] && [ "$perms" != "400" ] ; then log_failure_msg "Host key must be readable only by user" exit 1 fi } CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then log_failure_msg "Missing executable" exit 1 fi # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi CMD="$CMD -c '$ARC_CONFIG'" # VOMS_LOCATION VOMS_LOCATION=${VOMS_LOCATION:-@DEFAULT_VOMS_LOCATION@} add_library_path "$VOMS_LOCATION" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH PID_FILE=`readconfigvar "$ARC_CONFIG" gridftpd pidfile` if [ `id -u` = 0 ] ; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then LOCKFILE=/var/lock/subsys/$prog else LOCKFILE=/var/lock/$prog fi if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog.pid fi else LOCKFILE=$HOME/$prog.lock if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog.pid fi fi logfile=`readconfigvar "$ARC_CONFIG" gridftpd logfile` if [ "x$logfile" = "x" ]; then logfile=/var/log/arc/gridftpd.log fi if [ ! -d `dirname $logfile` ]; then mkdir -p `dirname $logfile` fi CMD="$CMD -P '$PID_FILE'" start() { check_cert echo -n "Starting $prog: " # Check if we are already running if [ -f $PID_FILE ]; then read pid < $PID_FILE if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PID_FILE" "$LOCKFILE" fi eval "$CMD" RETVAL=$? if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } stop() { echo -n "Stopping $prog: " if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ ! -z "$pid" ] ; then kill "$pid" RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi sleep 1 kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PID_FILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } status() { if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } restart() { stop start } case "$1" in start) start ;; stop) stop ;; status) status $prog ;; restart | force-reload) restart ;; reload) ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload|reload|condrestart|try-restart}" exit 1 ;; esac exit $? nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/examples0000644000000000000000000000013213214316030021340 xustar000000000000000030 mtime=1513200664.162798398 30 atime=1513200668.719854133 30 ctime=1513200664.162798398 nordugrid-arc-5.4.2/src/services/examples/0000755000175000002070000000000013214316030021463 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/examples/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712440333267023470 xustar000000000000000027 mtime=1417787063.331373 30 atime=1513200604.030062952 30 ctime=1513200664.159798362 nordugrid-arc-5.4.2/src/services/examples/Makefile.am0000644000175000002070000000034512440333267023534 0ustar00mockbuildmock00000000000000if PYTHON_SERVICE PYTHON_SERVICE = echo_python else PYTHON_SERVICE = endif #if JAVA_ENABLED #JAVA_SERVICE = echo_java #else JAVA_SERVICE = #endif SUBDIRS = $(PYTHON_SERVICE) $(JAVA_SERVICE) DIST_SUBDIRS = echo_python echo_java nordugrid-arc-5.4.2/src/services/examples/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734023474 xustar000000000000000030 mtime=1513200604.064063367 30 atime=1513200651.758646691 30 ctime=1513200664.160798374 nordugrid-arc-5.4.2/src/services/examples/Makefile.in0000644000175000002070000005601213214315734023546 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/examples DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @PYTHON_SERVICE_FALSE@PYTHON_SERVICE = @PYTHON_SERVICE_TRUE@PYTHON_SERVICE = echo_python #if JAVA_ENABLED #JAVA_SERVICE = echo_java #else JAVA_SERVICE = #endif SUBDIRS = $(PYTHON_SERVICE) $(JAVA_SERVICE) DIST_SUBDIRS = echo_python echo_java all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/examples/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/examples/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/examples/PaxHeaders.7502/echo_python0000644000000000000000000000013213214316030023657 xustar000000000000000030 mtime=1513200664.189798729 30 atime=1513200668.719854133 30 ctime=1513200664.189798729 nordugrid-arc-5.4.2/src/services/examples/echo_python/0000755000175000002070000000000013214316030024002 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/examples/echo_python/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712244643123026004 xustar000000000000000027 mtime=1385383507.950275 30 atime=1513200604.129064162 30 ctime=1513200664.186798692 nordugrid-arc-5.4.2/src/services/examples/echo_python/Makefile.am0000644000175000002070000000022612244643123026046 0ustar00mockbuildmock00000000000000exampledir = $(pkgdatadir)/examples/echo_python example_DATA = README EchoService.py __init__.py schema/echo_python.xsd EXTRA_DIST = $(example_DATA) nordugrid-arc-5.4.2/src/services/examples/echo_python/PaxHeaders.7502/EchoService.py0000644000000000000000000000012412074623675026531 xustar000000000000000027 mtime=1358112701.620939 27 atime=1513200576.661728 30 ctime=1513200664.188798716 nordugrid-arc-5.4.2/src/services/examples/echo_python/EchoService.py0000644000175000002070000002207112074623675026600 0ustar00mockbuildmock00000000000000import arc import time logger = arc.Logger(arc.Logger_getRootLogger(), 'EchoService.py') wsrf_rp_ns = "http://docs.oasis-open.org/wsrf/rp-2" echo_ns = "http://www.nordugrid.org/schemas/echo" import threading class EchoService: def __init__(self, cfg): logger.msg(arc.INFO, "EchoService (python) constructor called") # get the response-prefix from the config XML self.prefix = str(cfg.Get('prefix')) # get the response-suffix from the config XML self.suffix = str(cfg.Get('suffix')) logger.msg(arc.DEBUG, "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" % {'prefix': self.prefix, 'suffix': self.suffix}) self.ssl_config = self.parse_ssl_config(cfg) thread_test = str(cfg.Get('ThreadTest')) if thread_test: threading.Thread(target = self.infinite, args=[thread_test]).start() def __del__(self): logger.msg(arc.INFO, "EchoService (python) destructor called") def parse_ssl_config(self, cfg): try: client_ssl_node = cfg.Get('ClientSSLConfig') fromFile = str(client_ssl_node.Attribute('FromFile')) if fromFile: try: xml_string = file(fromFile).read() client_ssl_node = arc.XMLNode(xml_string) except: log.msg() pass if client_ssl_node.Size() == 0: return {} ssl_config = {} ssl_config['key_file'] = str(client_ssl_node.Get('KeyPath')) ssl_config['cert_file'] = str(client_ssl_node.Get('CertificatePath')) ca_file = str(client_ssl_node.Get('CACertificatePath')) if ca_file: ssl_config['ca_file'] = ca_file else: ssl_config['ca_dir'] = str(client_ssl_node.Get('CACertificatesDir')) return ssl_config except: import traceback logger.msg(arc.ERROR, traceback.format_exc()) return {} def infinite(self, url): logger.msg(arc.INFO, "EchoService (python) thread test starting") i = 0 while True: try: i += 1 cfg = arc.MCCConfig() s = arc.ClientSOAP(cfg, arc.URL(url)) ns = arc.NS('echo', echo_ns) outpayload = arc.PayloadSOAP(ns) outpayload.NewChild('echo:echo').NewChild('echo:say').Set('hi!') resp, status = s.process(outpayload) logger.msg(arc.INFO, "EchoService (python) thread test, iteration %(iteration)s %(status)s" % {'iteration': i, 'status': status}) time.sleep(3) except Exception, e: import traceback logger.msg(arc.DEBUG, traceback.format_exc()) def RegistrationCollector(self, doc): regentry = arc.XMLNode('') SrcAdv = regentry.NewChild('SrcAdv') SrcAdv.NewChild('Type').Set('org.nordugrid.tests.echo_python') #Place the document into the doc attribute doc.Replace(regentry) return True def GetLocalInformation(self): ns = arc.NS({'':'http://schemas.ogf.org/glue/2008/05/spec_2.0_d41_r01'}) info = arc.XMLNode(ns,'Domains') service_node = info.NewChild('AdminDomain').NewChild('Services').NewChild('Service') service_node.NewChild('Type').Set('org.nordugrid.tests.echo_python') endpoint_node = service_node.NewChild('Endpoint') endpoint_node.NewChild('HealthState').Set('ok') endpoint_node.NewChild('ServingState').Set('production') return info def process(self, inmsg, outmsg): logger.msg(arc.DEBUG, "EchoService (python) 'Process' called") # time.sleep(10) # get the payload from the message inpayload = inmsg.Payload() logger.msg(arc.VERBOSE, 'inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s' % inmsg.Auth().Export(arc.SecAttr.ARCAuth).GetXML()) logger.msg(arc.VERBOSE, 'inmsg.Attributes().getAll() = %s ' % inmsg.Attributes().getAll()) logger.msg(arc.INFO, "EchoService (python) got: %s " % inpayload.GetXML()) # the first child of the payload should be the name of the request request_node = inpayload.Child() # get the namespace request_namespace = request_node.Namespace() logger.msg(arc.DEBUG, "EchoService (python) request_namespace: %s" % request_namespace) if request_namespace != echo_ns: if request_namespace == wsrf_rp_ns: outpayload = arc.PayloadSOAP(arc.NS({'wsrf-rp':wsrf_rp_ns})) outpayload.NewChild('wsrf-rp:GetResourcePropertyDocumentResponse').NewChild(self.GetLocalInformation()) outmsg.Payload(outpayload) logger.msg(arc.DEBUG, "outpayload %s" % outpayload.GetXML()) return arc.MCC_Status(arc.STATUS_OK) raise Exception, 'wrong namespace. expected: %s' % echo_ns # get the name of the request without the namespace prefix # this is the name of the Body node's first child request_name = request_node.Name() # create an answer payload ns = arc.NS({'echo': echo_ns}) outpayload = arc.PayloadSOAP(ns) # here we defined that 'echo' prefix will be the namespace prefix of 'http://www.nordugrid.org/schemas/echo' # get the message say = str(request_node.Get('say')) # put it between the response-prefix and the response-suffix hear = self.prefix + say + self.suffix if request_name == 'double': # if the name of the request is 'double' # we create a new echo message which we send to http://localhost:60000/Echo using the ClientSOAP object cfg = arc.MCCConfig() ssl = False if self.ssl_config: cfg.AddCertificate(self.ssl_config.get('cert_file', None)) cfg.AddPrivateKey(self.ssl_config.get('key_file', None)) if self.ssl_config.has_key('ca_file'): cfg.AddCAFile(self.ssl_config.get('ca_file', None)) else: cfg.AddCADir(self.ssl_config.get('ca_dir', None)) ssl = True if ssl: url = arc.URL('https://localhost:60000/Echo') logger.msg(arc.DEBUG, 'Calling https://localhost:60000/Echo using ClientSOAP') else: url = arc.URL('http://localhost:60000/Echo') logger.msg(arc.DEBUG, 'Calling http://localhost:60000/Echo using ClientSOAP') # creating the ClientSOAP object s = arc.ClientSOAP(cfg, url) new_payload = arc.PayloadSOAP(ns) # creating the message new_payload.NewChild('echo:echo').NewChild('echo:say').Set(hear) logger.msg(arc.DEBUG, 'new_payload %s' % new_payload.GetXML()) # sending the message resp, status = s.process(new_payload) # get the response hear = str(resp.Get('echoResponse').Get('hear')) elif request_name == 'httplib': # if the name of the request is 'httplib' # we create a new echo message which we send to http://localhost:60000/echo using python's built-in http client import httplib logger.msg(arc.DEBUG, 'Calling http://localhost:60000/Echo using httplib') # create the connection h = httplib.HTTPConnection('localhost', 60000) new_payload = arc.PayloadSOAP(ns) # create the message new_payload.NewChild('echo:echo').NewChild('echo:say').Set(hear) logger.msg(arc.DEBUG, 'new_payload %s' % new_payload.GetXML()) # send the message h.request('POST', '/Echo', new_payload.GetXML()) r = h.getresponse() response = r.read() logger.msg(arc.DEBUG, response) resp = arc.XMLNode(response) # get the response hear = str(resp.Child().Get('echoResponse').Get('hear')) elif request_name == 'wait': logger.msg(arc.DEBUG, 'Start waiting 10 sec...') time.sleep(10) logger.msg(arc.DEBUG, 'Waiting ends.') # we create a node at '/echo:echoResponse/echo:hear' and put the string in it outpayload.NewChild('echo:echoResponse').NewChild('echo:hear').Set(hear) outmsg.Payload(outpayload) logger.msg(arc.DEBUG, "outpayload %s" % outpayload.GetXML()) # return with STATUS_OK return arc.MCC_Status(arc.STATUS_OK) # you can easily test this with this shellscript: """ MESSAGE='HELLO' echo Request: echo $MESSAGE echo echo Response: curl -d "$MESSAGE" http://localhost:60000/Echo echo """ # nordugrid-arc-5.4.2/src/services/examples/echo_python/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734026013 xustar000000000000000030 mtime=1513200604.162064566 30 atime=1513200651.790647082 30 ctime=1513200664.187798704 nordugrid-arc-5.4.2/src/services/examples/echo_python/Makefile.in0000644000175000002070000004357113214315734026073 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/examples/echo_python DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(exampledir)" DATA = $(example_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ exampledir = $(pkgdatadir)/examples/echo_python example_DATA = README EchoService.py __init__.py schema/echo_python.xsd EXTRA_DIST = $(example_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/examples/echo_python/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/examples/echo_python/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exampleDATA install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-exampleDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/examples/echo_python/PaxHeaders.7502/__init__.py0000644000000000000000000000012410670336505026062 xustar000000000000000027 mtime=1189199173.632646 27 atime=1513200576.662728 30 ctime=1513200664.189798729 nordugrid-arc-5.4.2/src/services/examples/echo_python/__init__.py0000644000175000002070000000000010670336505026115 0ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/examples/echo_python/PaxHeaders.7502/schema0000644000000000000000000000013213214316030025117 xustar000000000000000030 mtime=1513200664.191798753 30 atime=1513200668.719854133 30 ctime=1513200664.191798753 nordugrid-arc-5.4.2/src/services/examples/echo_python/schema/0000755000175000002070000000000013214316030025242 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/examples/echo_python/schema/PaxHeaders.7502/echo_python.xsd0000644000000000000000000000012411255700321030237 xustar000000000000000027 mtime=1253540049.444682 27 atime=1513200576.661728 30 ctime=1513200664.191798753 nordugrid-arc-5.4.2/src/services/examples/echo_python/schema/echo_python.xsd0000644000175000002070000000463511255700321030314 0ustar00mockbuildmock00000000000000 Prefix of the response string. Suffix of the response string. Defines the path of the XML file which contains TLS related clint configuration. If this attribute defined than the elements inside will be ignored. Container of TLS related client configuration Path of certificate private key Path of certificate Directory location of CA certificates nordugrid-arc-5.4.2/src/services/examples/echo_python/PaxHeaders.7502/README0000644000000000000000000000012411037472457024636 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200576.660728 30 ctime=1513200664.184798668 nordugrid-arc-5.4.2/src/services/examples/echo_python/README0000644000175000002070000000011211037472457024675 0ustar00mockbuildmock00000000000000Simple test service to demonstrate how Python based services should work. nordugrid-arc-5.4.2/src/services/examples/PaxHeaders.7502/echo_java0000644000000000000000000000013213214316030023257 xustar000000000000000030 mtime=1513200664.210798985 30 atime=1513200668.719854133 30 ctime=1513200664.210798985 nordugrid-arc-5.4.2/src/services/examples/echo_java/0000755000175000002070000000000013214316030023402 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/examples/echo_java/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611542111161025373 xustar000000000000000026 mtime=1300796017.30964 30 atime=1513200604.080063563 30 ctime=1513200664.208798961 nordugrid-arc-5.4.2/src/services/examples/echo_java/Makefile.am0000644000175000002070000000034111542111161025434 0ustar00mockbuildmock00000000000000javalibdir = $(libdir)/java javasharedir = $(prefix)/share/java CLEANFILES = *.class javashare_DATA = EchoService.class EXTRA_DIST = EchoService.java compile.sh EchoService.class: EchoService.java compile.sh ./compile.sh nordugrid-arc-5.4.2/src/services/examples/echo_java/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315734025413 xustar000000000000000030 mtime=1513200604.113063966 30 atime=1513200651.775646899 30 ctime=1513200664.209798973 nordugrid-arc-5.4.2/src/services/examples/echo_java/Makefile.in0000644000175000002070000004402613214315734025467 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/examples/echo_java DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(javasharedir)" DATA = $(javashare_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ javalibdir = $(libdir)/java javasharedir = $(prefix)/share/java CLEANFILES = *.class javashare_DATA = EchoService.class EXTRA_DIST = EchoService.java compile.sh all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/examples/echo_java/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/examples/echo_java/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-javashareDATA: $(javashare_DATA) @$(NORMAL_INSTALL) test -z "$(javasharedir)" || $(MKDIR_P) "$(DESTDIR)$(javasharedir)" @list='$(javashare_DATA)'; test -n "$(javasharedir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(javasharedir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(javasharedir)" || exit $$?; \ done uninstall-javashareDATA: @$(NORMAL_UNINSTALL) @list='$(javashare_DATA)'; test -n "$(javasharedir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(javasharedir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(javasharedir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(javasharedir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-javashareDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-javashareDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-javashareDATA install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-javashareDATA EchoService.class: EchoService.java compile.sh ./compile.sh # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/examples/echo_java/PaxHeaders.7502/compile.sh0000644000000000000000000000012410633266125025334 xustar000000000000000027 mtime=1181576277.080903 27 atime=1513200576.659728 30 ctime=1513200664.210798985 nordugrid-arc-5.4.2/src/services/examples/echo_java/compile.sh0000755000175000002070000000010110633266125025374 0ustar00mockbuildmock00000000000000javac -verbose -classpath ../../../java/arc.jar EchoService.java nordugrid-arc-5.4.2/src/services/examples/echo_java/PaxHeaders.7502/EchoService.java0000644000000000000000000000012311337246323026411 xustar000000000000000026 mtime=1266502867.55712 27 atime=1513200576.659728 30 ctime=1513200664.210798985 nordugrid-arc-5.4.2/src/services/examples/echo_java/EchoService.java0000644000175000002070000000255711337246323026470 0ustar00mockbuildmock00000000000000import nordugrid.arc.*; public class EchoService { // private NS ns_; static { // load libjarc.so System.loadLibrary("jarc"); } public EchoService() { System.out.println("EchoService constructor called"); // NS ns_ = new NS(); // ns_.set("echo", "http://www.nordugrid.org/schemas/echo"); } /* public int process() { System.out.println("EchoService process called"); return 10; } */ public MCC_Status process(SOAPMessage inmsg, SOAPMessage outmsg) { System.out.println("EchoService process with messages called"); // XXX: error handling SOAPEnvelope in_payload = inmsg.Payload(); if (in_payload == null) { return new MCC_Status(StatusKind.GENERIC_ERROR); } XMLNode echo = in_payload.Get("echo"); XMLNode echo_op = new XMLNode(echo); String say = new String(echo_op.Get("say").toString()); System.out.println("Java got: " + say); String hear = new String("[ " + say + " ]"); NS ns_ = new NS(); ns_.set("echo", "http://www.nordugrid.org/schemas/echo"); PayloadSOAP outpayload = new PayloadSOAP(ns_); outpayload.NewChild("echo:echoResponse").NewChild("echo:hear").Set(hear); outmsg.Payload(outpayload); return new MCC_Status(StatusKind.STATUS_OK); } } nordugrid-arc-5.4.2/src/services/examples/echo_java/PaxHeaders.7502/README0000644000000000000000000000012411001653037024220 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200576.659728 30 ctime=1513200664.207798949 nordugrid-arc-5.4.2/src/services/examples/echo_java/README0000644000175000002070000000011011001653037024255 0ustar00mockbuildmock00000000000000simple test service to demonstarte how JAVA based services should work. nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/cache_service0000644000000000000000000000013213214316027022313 xustar000000000000000030 mtime=1513200663.992796319 30 atime=1513200668.719854133 30 ctime=1513200663.992796319 nordugrid-arc-5.4.2/src/services/cache_service/0000755000175000002070000000000013214316027022436 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/CacheService.cpp0000644000000000000000000000012413153455410025423 xustar000000000000000027 mtime=1504598792.949892 27 atime=1513200575.456713 30 ctime=1513200663.990796295 nordugrid-arc-5.4.2/src/services/cache_service/CacheService.cpp0000644000175000002070000005502213153455410025474 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "CacheService.h" namespace Cache { static Arc::Plugin *get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; CacheService* s = new CacheService((Arc::Config*)(*srvarg),arg); if (*s) return s; delete s; return NULL; } Arc::Logger CacheService::logger(Arc::Logger::rootLogger, "CacheService"); CacheService::CacheService(Arc::Config *cfg, Arc::PluginArgument* parg) : RegisteredService(cfg,parg), dtr_generator(NULL) { valid = false; // read configuration information /* cacheservice config specifies A-REX conf file /etc/arc.conf */ ns["cacheservice"] = "urn:cacheservice_config"; if (!(*cfg)["cache"] || !(*cfg)["cache"]["config"]) { // error - no config defined logger.msg(Arc::ERROR, "No A-REX config file found in cache service configuration"); return; } std::string arex_config = (std::string)(*cfg)["cache"]["config"]; logger.msg(Arc::INFO, "Using A-REX config file %s", arex_config); config.SetConfigFile(arex_config); if (!config.Load()) { logger.msg(Arc::ERROR, "Failed to process A-REX configuration in %s", arex_config); return; } config.Print(); if (config.CacheParams().getCacheDirs().empty()) { // do we care about remote caches? logger.msg(Arc::ERROR, "No caches defined in configuration"); return; } // check if we are running along with A-REX or standalone bool with_arex = false; if ((*cfg)["cache"]["witharex"] && (std::string)(*cfg)["cache"]["witharex"] == "true") with_arex = true; // start Generator for data staging dtr_generator = new CacheServiceGenerator(config, with_arex); valid = true; } CacheService::~CacheService(void) { if (dtr_generator) { delete dtr_generator; dtr_generator = NULL; } } Arc::MCC_Status CacheService::CacheCheck(Arc::XMLNode in, Arc::XMLNode out, const Arc::User& mapped_user) { /* Accepts: url ... Returns url true 1234 ... */ // substitute cache paths according to mapped user ARex::CacheConfig cache_params(config.CacheParams()); cache_params.substitute(config, mapped_user); Arc::FileCache cache(cache_params.getCacheDirs(), "0", mapped_user.get_uid(), mapped_user.get_gid()); if (!cache) { logger.msg(Arc::ERROR, "Error creating cache"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheCheck", "Server error with cache"); } Arc::XMLNode resp = out.NewChild("CacheCheckResponse"); Arc::XMLNode results = resp.NewChild("CacheCheckResult"); for(int n = 0;;++n) { Arc::XMLNode id = in["CacheCheck"]["TheseFilesNeedToCheck"]["FileURL"][n]; if (!id) break; std::string fileurl = (std::string)in["CacheCheck"]["TheseFilesNeedToCheck"]["FileURL"][n]; Arc::XMLNode resultelement = results.NewChild("Result"); bool fileexist = false; std::string file_lfn; Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); Arc::URL url(fileurl); Arc::DataHandle d(url, usercfg); logger.msg(Arc::INFO, "Looking up URL %s", d->str()); file_lfn = cache.File(d->str()); if (file_lfn.empty()) { logger.msg(Arc::ERROR, "Empty filename returned from FileCache"); resultelement.NewChild("ExistInTheCache") = "false"; resultelement.NewChild("FileSize") = "0"; continue; } logger.msg(Arc::INFO, "Cache file is %s", file_lfn); struct stat fileStat; if (Arc::FileStat(file_lfn, &fileStat, false)) fileexist = true; else if (errno != ENOENT) logger.msg(Arc::ERROR, "Problem accessing cache file %s: %s", file_lfn, Arc::StrError(errno)); resultelement.NewChild("FileURL") = fileurl; resultelement.NewChild("ExistInTheCache") = (fileexist ? "true": "false"); if (fileexist) resultelement.NewChild("FileSize") = Arc::tostring(fileStat.st_size); else resultelement.NewChild("FileSize") = "0"; } return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status CacheService::CacheLink(Arc::XMLNode in, Arc::XMLNode out, const Arc::User& mapped_user) { /* Accepts: url // remote file name // local file on session dir ... uname 123456789 90 false Returns: url 0 success ... */ // read in inputs bool dostage = false; if (in["CacheLink"]["Stage"]) dostage = ((std::string)in["CacheLink"]["Stage"] == "true") ? true : false; Arc::XMLNode jobidnode = in["CacheLink"]["JobID"]; if (!jobidnode) { logger.msg(Arc::ERROR, "No job ID supplied"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Bad input (no JobID specified)"); } std::string jobid = (std::string)jobidnode; int priority = 50; Arc::XMLNode prioritynode = in["CacheLink"]["Priority"]; if (prioritynode) { if (!Arc::stringto((std::string)prioritynode, priority)) { logger.msg(Arc::ERROR, "Bad number in priority element: %s", (std::string)prioritynode); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Bad input (bad number in Priority)"); } if (priority <= 0) priority = 1; if (priority > 100) priority = 100; } Arc::XMLNode uname = in["CacheLink"]["Username"]; if (!uname) { logger.msg(Arc::ERROR, "No username supplied"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Bad input (no Username specified)"); } std::string username = (std::string)uname; // TODO: try to force mapping to supplied user if (username != mapped_user.Name()) { logger.msg(Arc::ERROR, "Supplied username %s does not match mapped username %s", username, mapped_user.Name()); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Supplied username does not match mapped user"); } // check job id and session dir are ok // substitute session dirs and use tmp configuration to find the one for this job std::vector sessions = config.SessionRoots(); for (std::vector::iterator session = sessions.begin(); session != sessions.end(); ++session) { config.Substitute(*session, mapped_user); } ARex::GMConfig tmp_config; tmp_config.SetSessionRoot(sessions); std::string session_root = tmp_config.SessionRoot(jobid); if (session_root.empty()) { logger.msg(Arc::ERROR, "No session directory found"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "No session directory found for supplied Job ID"); } std::string session_dir = session_root + '/' + jobid; logger.msg(Arc::INFO, "Using session dir %s", session_dir); struct stat fileStat; if (!Arc::FileStat(session_dir, &fileStat, true)) { logger.msg(Arc::ERROR, "Failed to stat session dir %s", session_dir); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Failed to access session dir"); } // check permissions - owner must be same as mapped user if (fileStat.st_uid != mapped_user.get_uid()) { logger.msg(Arc::ERROR, "Session dir %s is owned by %i, but current mapped user is %i", session_dir, fileStat.st_uid, mapped_user.get_uid()); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Failed to access session dir"); } // get delegated proxy info to check permission on cached files // TODO: use credentials of caller of this service. For now ask the // delegation store for the proxy of the job. ARex::DelegationStore::DbType deleg_db_type = ARex::DelegationStore::DbBerkeley; switch (config.DelegationDBType()) { case ARex::GMConfig::deleg_db_bdb: deleg_db_type = ARex::DelegationStore::DbBerkeley; break; case ARex::GMConfig::deleg_db_sqlite: deleg_db_type = ARex::DelegationStore::DbSQLite; break; } ARex::DelegationStore dstore(config.DelegationDir(), deleg_db_type, false); std::string proxy_path; // Read job's local file to extract delegation id ARex::JobLocalDescription job_desc; if (job_local_read_file(jobid, config, job_desc) && !job_desc.delegationid.empty()) { proxy_path = dstore.FindCred(job_desc.delegationid, job_desc.DN); } if (proxy_path.empty() || !Arc::FileStat(proxy_path, &fileStat, true)) { logger.msg(Arc::ERROR, "Failed to access proxy of given job id %s at %s", jobid, proxy_path); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Failed to access proxy"); } Arc::UserConfig usercfg; usercfg.UtilsDirPath(config.ControlDir()); usercfg.ProxyPath(proxy_path); usercfg.InitializeCredentials(Arc::initializeCredentialsType::NotTryCredentials); std::string dn; Arc::Time exp_time; try { Arc::Credential ci(usercfg.ProxyPath(), usercfg.ProxyPath(), usercfg.CACertificatesDirectory(), ""); dn = ci.GetIdentityName(); exp_time = ci.GetEndTime(); } catch (Arc::CredentialError& e) { logger.msg(Arc::ERROR, "Couldn't handle certificate: %s", e.what()); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", std::string("Error with proxy at "+proxy_path)); } logger.msg(Arc::INFO, "DN is %s", dn); // create cache // substitute cache paths according to mapped user ARex::CacheConfig cache_params(config.CacheParams()); cache_params.substitute(config, mapped_user); Arc::FileCache cache(cache_params.getCacheDirs(), jobid, mapped_user.get_uid(), mapped_user.get_gid()); if (!cache) { logger.msg(Arc::ERROR, "Error with cache configuration"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheCheck", "Server error with cache"); } // set up response structure Arc::XMLNode resp = out.NewChild("CacheLinkResponse"); Arc::XMLNode results = resp.NewChild("CacheLinkResult"); std::map to_download; // files not in cache (remote, local) bool error_happened = false; // if true then don't bother with downloads at the end // loop through all files for (int n = 0;;++n) { Arc::XMLNode id = in["CacheLink"]["TheseFilesNeedToLink"]["File"][n]; if (!id) break; Arc::XMLNode f_url = id["FileURL"]; if (!f_url) break; Arc::XMLNode f_name = id["FileName"]; if (!f_name) break; std::string fileurl = (std::string)f_url; std::string filename = (std::string)f_name; std::string session_file = session_dir + '/' + filename; logger.msg(Arc::INFO, "Looking up URL %s", fileurl); Arc::URL u(fileurl); Arc::DataHandle d(u, usercfg); d->SetSecure(false); // the actual url used with the cache std::string url = d->str(); bool available = false; bool is_locked = false; if (!cache.Start(url, available, is_locked, true)) { Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("FileURL") = fileurl; if (is_locked) { resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::Locked); resultelement.NewChild("ReturnCodeExplanation") = "File is locked"; } else { resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::CacheError); resultelement.NewChild("ReturnCodeExplanation") = "Error starting cache"; } error_happened = true; continue; } if (!available) { cache.Stop(url); // file not in cache - the result status for these files will be set later to_download[fileurl] = session_file; continue; } Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("FileURL") = fileurl; // file is in cache - check permissions if (!cache.CheckDN(url, dn)) { Arc::DataStatus res = d->Check(false); if (!res.Passed()) { logger.msg(Arc::ERROR, "Permission checking failed: %s", url); resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::PermissionError); resultelement.NewChild("ReturnCodeExplanation") = "Permission denied"; error_happened = true; continue; } cache.AddDN(url, dn, exp_time); logger.msg(Arc::VERBOSE, "Permission checking passed for url %s", url); } // link file bool try_again = false; // TODO add executable and copy flags to request if (!cache.Link(session_file, url, false, false, false, try_again)) { // If locked, send to DTR and let it deal with the retry strategy if (try_again) { to_download[fileurl] = session_file; continue; } // failed to link - report as if not there resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::LinkError); resultelement.NewChild("ReturnCodeExplanation") = "Failed to link to session dir"; error_happened = true; continue; } // Successfully linked to session - move to scratch if necessary if (!config.ScratchDir().empty()) { std::string scratch_file(config.ScratchDir()+'/'+jobid+'/'+filename); // Access session and scratch under mapped uid Arc::FileAccess fa; if (!fa.fa_setuid(mapped_user.get_uid(), mapped_user.get_gid()) || !fa.fa_rename(session_file, scratch_file)) { logger.msg(Arc::ERROR, "Failed to move %s to %s: %s", session_file, scratch_file, Arc::StrError(errno)); resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::LinkError); resultelement.NewChild("ReturnCodeExplanation") = "Failed to link to move file from session dir to scratch"; error_happened = true; continue; } } // everything went ok so report success resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::Success); resultelement.NewChild("ReturnCodeExplanation") = "Success"; } // check for any downloads to perform, only if requested and there were no previous errors if (to_download.empty() || error_happened || !dostage) { for (std::map::iterator i = to_download.begin(); i != to_download.end(); ++i) { Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("FileURL") = i->first; resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::NotAvailable); resultelement.NewChild("ReturnCodeExplanation") = "File not available"; } return Arc::MCC_Status(Arc::STATUS_OK); } bool stage_start_error = false; // Loop through files to download and start a DTR for each one for (std::map::iterator i = to_download.begin(); i != to_download.end(); ++i) { Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("FileURL") = i->first; // if one DTR failed to start then don't start any more // TODO cancel others already started if (stage_start_error) { resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::DownloadError); resultelement.NewChild("ReturnCodeExplanation") = "Failed to start data staging"; continue; } logger.msg(Arc::VERBOSE, "Starting new DTR for %s", i->first); if (!dtr_generator->addNewRequest(mapped_user, i->first, i->second, usercfg, jobid, priority)) { logger.msg(Arc::ERROR, "Failed to start new DTR for %s", i->first); resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::DownloadError); resultelement.NewChild("ReturnCodeExplanation") = "Failed to start data staging"; stage_start_error = true; } else { resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::Staging); resultelement.NewChild("ReturnCodeExplanation") = "Staging started"; } } return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status CacheService::CacheLinkQuery(Arc::XMLNode in, Arc::XMLNode out) { /* Accepts: 123456789 Returns: 0 success */ Arc::XMLNode jobidnode = in["CacheLinkQuery"]["JobID"]; if (!jobidnode) { logger.msg(Arc::ERROR, "No job ID supplied"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLinkQuery", "Bad input (no JobID specified)"); } std::string jobid = (std::string)jobidnode; // set up response structure Arc::XMLNode resp = out.NewChild("CacheLinkQueryResponse"); Arc::XMLNode results = resp.NewChild("CacheLinkQueryResult"); Arc::XMLNode resultelement = results.NewChild("Result"); std::string error; // query Generator for DTR status if (dtr_generator->queryRequestsFinished(jobid, error)) { if (error.empty()) { logger.msg(Arc::INFO, "Job %s: all files downloaded successfully", jobid); resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::Success); resultelement.NewChild("ReturnCodeExplanation") = "Success"; } else if (error == "Job not found") { resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::CacheError); resultelement.NewChild("ReturnCodeExplanation") = "No such job"; } else { logger.msg(Arc::INFO, "Job %s: Some downloads failed", jobid); resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::DownloadError); resultelement.NewChild("ReturnCodeExplanation") = "Download failed: " + error; } } else { logger.msg(Arc::VERBOSE, "Job %s: files still downloading", jobid); resultelement.NewChild("ReturnCode") = Arc::tostring(CacheService::Staging); resultelement.NewChild("ReturnCodeExplanation") = "Still staging"; } return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status CacheService::process(Arc::Message &inmsg, Arc::Message &outmsg) { // Check authorization if(!ProcessSecHandlers(inmsg, "incoming")) { logger.msg(Arc::ERROR, "CacheService: Unauthorized"); return make_soap_fault(outmsg, "Authorization failed"); } std::string method = inmsg.Attributes()->get("HTTP:METHOD"); // find local user std::string mapped_username = inmsg.Attributes()->get("SEC:LOCALID"); if (mapped_username.empty()) { logger.msg(Arc::ERROR, "No local user mapping found"); return make_soap_fault(outmsg, "No local user mapping found"); } Arc::User mapped_user(mapped_username); if(method == "POST") { logger.msg(Arc::VERBOSE, "process: POST"); logger.msg(Arc::INFO, "Identity is %s", inmsg.Attributes()->get("TLS:PEERDN")); // Both input and output are supposed to be SOAP // Extracting payload Arc::PayloadSOAP* inpayload = NULL; try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) { logger.msg(Arc::ERROR, "input is not SOAP"); return make_soap_fault(outmsg); } // Applying known namespaces inpayload->Namespaces(ns); if(logger.getThreshold() <= Arc::VERBOSE) { std::string str; inpayload->GetDoc(str, true); logger.msg(Arc::VERBOSE, "process: request=%s",str); } // Analyzing request Arc::XMLNode op = inpayload->Child(0); if(!op) { logger.msg(Arc::ERROR, "input does not define operation"); return make_soap_fault(outmsg); } logger.msg(Arc::VERBOSE, "process: operation: %s",op.Name()); Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns); outpayload->Namespaces(ns); Arc::MCC_Status result(Arc::STATUS_OK); // choose operation if (MatchXMLName(op,"CacheCheck")) { result = CacheCheck(*inpayload, *outpayload, mapped_user); } else if (MatchXMLName(op, "CacheLink")) { result = CacheLink(*inpayload, *outpayload, mapped_user); } else if (MatchXMLName(op, "CacheLinkQuery")) { result = CacheLinkQuery(*inpayload, *outpayload); } else { // unknown operation logger.msg(Arc::ERROR, "SOAP operation is not supported: %s", op.Name()); delete outpayload; return make_soap_fault(outmsg); } if (!result) return make_soap_fault(outmsg, result.getExplanation()); if (logger.getThreshold() <= Arc::VERBOSE) { std::string str; outpayload->GetDoc(str, true); logger.msg(Arc::VERBOSE, "process: response=%s", str); } outmsg.Payload(outpayload); if (!ProcessSecHandlers(outmsg,"outgoing")) { logger.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); } } else { // only POST supported logger.msg(Arc::ERROR, "Only POST is supported in CacheService"); return Arc::MCC_Status(); } return Arc::MCC_Status(Arc::STATUS_OK); } bool CacheService::RegistrationCollector(Arc::XMLNode &doc) { Arc::NS isis_ns; isis_ns["isis"] = "http://www.nordugrid.org/schemas/isis/2008/08"; Arc::XMLNode regentry(isis_ns, "RegEntry"); regentry.NewChild("SrcAdv").NewChild("Type") = "org.nordugrid.execution.cacheservice"; regentry.New(doc); return true; } Arc::MCC_Status CacheService::make_soap_fault(Arc::Message& outmsg, const std::string& reason) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns,true); Arc::SOAPFault* fault = outpayload?outpayload->Fault():NULL; if(fault) { fault->Code(Arc::SOAPFault::Sender); if (reason.empty()) fault->Reason("Failed processing request"); else fault->Reason("Failed processing request: "+reason); } outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace Cache extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "cacheservice", "HED:SERVICE", NULL, 0, &Cache::get_service }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713153455410024432 xustar000000000000000027 mtime=1504598792.949892 30 atime=1513200603.890061239 30 ctime=1513200663.984796221 nordugrid-arc-5.4.2/src/services/cache_service/Makefile.am0000644000175000002070000000223313153455410024474 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libcacheservice.la if SYSV_SCRIPTS_ENABLED CACHE_SCRIPT = arc-cache-service else CACHE_SCRIPT = endif initd_SCRIPTS = $(CACHE_SCRIPT) if SYSTEMD_UNITS_ENABLED CACHE_UNIT = arc-cache-service.service CACHE_UNIT_WRAPPER = arc-cache-service-start else CACHE_UNIT = CACHE_UNIT_WRAPPER = endif units_DATA = $(CACHE_UNIT) pkgdata_SCRIPTS = $(CACHE_UNIT_WRAPPER) libcacheservice_la_SOURCES = CacheService.h CacheService.cpp \ CacheServiceGenerator.h CacheServiceGenerator.cpp libcacheservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libcacheservice_la_LIBADD = \ ../a-rex/grid-manager/libgridmanager.la \ ../a-rex/delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) libcacheservice_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/CacheServiceGenerator.cpp0000644000000000000000000000012412754411156027277 xustar000000000000000027 mtime=1471287918.139575 27 atime=1513200575.477713 30 ctime=1513200663.992796319 nordugrid-arc-5.4.2/src/services/cache_service/CacheServiceGenerator.cpp0000644000175000002070000002153512754411156027352 0ustar00mockbuildmock00000000000000#include #include #include "../a-rex/grid-manager/conf/UrlMapConfig.h" #include "CacheServiceGenerator.h" namespace Cache { Arc::Logger CacheServiceGenerator::logger(Arc::Logger::rootLogger, "CacheServiceGenerator"); CacheServiceGenerator::CacheServiceGenerator(const ARex::GMConfig& conf, bool with_arex) : generator_state(DataStaging::INITIATED), use_host_cert(false), scratch_dir(conf.ScratchDir()), run_with_arex(with_arex), config(conf), staging_conf(config) { scheduler = DataStaging::Scheduler::getInstance(); if (run_with_arex) { // A-REX sets DTR configuration generator_state = DataStaging::RUNNING; return; } if (!staging_conf) return; // Convert A-REX configuration values to DTR configuration // TODO find location for DTR state log, should be different from A-REX's // Log level for DTR DataStaging::DTR::LOG_LEVEL = staging_conf.get_log_level(); // Processing limits scheduler->SetSlots(staging_conf.get_max_processor(), staging_conf.get_max_processor(), staging_conf.get_max_delivery(), staging_conf.get_max_emergency(), staging_conf.get_max_prepared()); // Transfer shares DataStaging::TransferSharesConf share_conf(staging_conf.get_share_type(), staging_conf.get_defined_shares()); scheduler->SetTransferSharesConf(share_conf); // Transfer limits DataStaging::TransferParameters transfer_limits; transfer_limits.min_current_bandwidth = staging_conf.get_min_speed(); transfer_limits.averaging_time = staging_conf.get_min_speed_time(); transfer_limits.min_average_bandwidth = staging_conf.get_min_average_speed(); transfer_limits.max_inactivity_time = staging_conf.get_max_inactivity_time(); scheduler->SetTransferParameters(transfer_limits); // URL mappings ARex::UrlMapConfig url_map(config); scheduler->SetURLMapping(url_map); // Preferred pattern scheduler->SetPreferredPattern(staging_conf.get_preferred_pattern()); // Delivery services scheduler->SetDeliveryServices(staging_conf.get_delivery_services()); // Limit on remote delivery size scheduler->SetRemoteSizeLimit(staging_conf.get_remote_size_limit()); // Set whether to use host cert for remote delivery use_host_cert = staging_conf.get_use_host_cert_for_remote_delivery(); // End of configuration - start Scheduler thread scheduler->start(); generator_state = DataStaging::RUNNING; } CacheServiceGenerator::~CacheServiceGenerator() { generator_state = DataStaging::STOPPED; if (!run_with_arex) scheduler->stop(); // delete scheduler? it is possible another thread is using the static instance } void CacheServiceGenerator::receiveDTR(DataStaging::DTR_ptr dtr) { // Take DTR out of processing map and add to finished jobs logger.msg(Arc::INFO, "DTR %s finished with state %s", dtr->get_id(), dtr->get_status().str()); std::string jobid (dtr->get_parent_job_id()); // Delete LogStreams and LogDestinations dtr->clean_log_destinations(); // Add to finished jobs std::string error_msg; if (dtr->error()) error_msg = dtr->get_error_status().GetDesc() + ". "; finished_lock.lock(); finished_jobs[jobid] += error_msg; finished_lock.unlock(); // remove from processing jobs processing_lock.lock(); std::pair::iterator, std::multimap::iterator> dtr_iterator = processing_dtrs.equal_range(jobid); if (dtr_iterator.first == dtr_iterator.second) { processing_lock.unlock(); logger.msg(Arc::WARNING, "No active job id %s", jobid); return; } // remove this DTR from the processing list for (std::multimap::iterator i = dtr_iterator.first; i != dtr_iterator.second; ++i) { if (i->second->get_id() == dtr->get_id()) { processing_dtrs.erase(i); break; } } processing_lock.unlock(); // Move to scratch if necessary if (!dtr->error() && !scratch_dir.empty()) { // Get filename relative to session dir std::string session_file = dtr->get_destination()->GetURL().Path(); std::string::size_type pos = session_file.find(jobid); if (pos == std::string::npos) { logger.msg(Arc::ERROR, "Could not determine session directory from filename %s", session_file); finished_lock.lock(); finished_jobs[jobid] += "Could not determine session directory from filename for during move to scratch. "; finished_lock.unlock(); return; } std::string scratch_file(scratch_dir+'/'+session_file.substr(pos)); // Access session and scratch under mapped uid Arc::FileAccess fa; if (!fa.fa_setuid(dtr->get_local_user().get_uid(), dtr->get_local_user().get_gid()) || !fa.fa_rename(session_file, scratch_file)) { logger.msg(Arc::ERROR, "Failed to move %s to %s: %s", session_file, scratch_file, Arc::StrError(errno)); finished_lock.lock(); finished_jobs[jobid] += "Failed to move file from session dir to scratch. "; finished_lock.unlock(); } } } bool CacheServiceGenerator::addNewRequest(const Arc::User& user, const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, int priority) { if (generator_state != DataStaging::RUNNING) return false; // Logger for this DTR. Uses a string stream to keep log in memory rather // than a file. LogStream keeps a reference to the stream so we have to use // a pointer. The LogDestinations are deleted when the DTR is received back. // TODO: provide access to this log somehow std::stringstream * stream = new std::stringstream(); Arc::LogDestination * output = new Arc::LogStream(*stream); DataStaging::DTRLogger log(new Arc::Logger(Arc::Logger::getRootLogger(), "DataStaging")); log->removeDestinations(); log->addDestination(*output); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, usercfg, jobid, user.get_uid(), log)); if (!(*dtr)) { logger.msg(Arc::ERROR, "Invalid DTR for source %s, destination %s", source, destination); log->deleteDestinations(); return false; } // set retry count (tmp errors only) dtr->set_tries_left(staging_conf.get_max_retries()); // set priority dtr->set_priority(priority); // set whether to use A-REX host certificate for remote delivery services dtr->host_cert_for_remote_delivery(use_host_cert); // use a separate share from A-REX downloads dtr->set_sub_share("cache-service-download"); // substitute cache paths based on user ARex::CacheConfig cache_params(config.CacheParams()); cache_params.substitute(config, user); DataStaging::DTRCacheParameters cache_parameters; cache_parameters.cache_dirs = cache_params.getCacheDirs(); // we are definitely going to download so remote caches are not useful here dtr->set_cache_parameters(cache_parameters); dtr->registerCallback(this, DataStaging::GENERATOR); dtr->registerCallback(scheduler, DataStaging::SCHEDULER); processing_lock.lock(); processing_dtrs.insert(std::pair(jobid, dtr)); processing_lock.unlock(); // Avoid logging when possible during scheduler submission because it gets // blocked by LFC calls locking the environment Arc::LogLevel log_level = Arc::Logger::getRootLogger().getThreshold(); Arc::Logger::getRootLogger().setThreshold(Arc::ERROR); DataStaging::DTR::push(dtr, DataStaging::SCHEDULER); Arc::Logger::getRootLogger().setThreshold(log_level); return true; } bool CacheServiceGenerator::queryRequestsFinished(const std::string& jobid, std::string& error) { // First check currently processing DTRs processing_lock.lock(); if (processing_dtrs.find(jobid) != processing_dtrs.end()) { logger.msg(Arc::VERBOSE, "DTRs still running for job %s", jobid); processing_lock.unlock(); return false; } processing_lock.unlock(); // Now check finished jobs finished_lock.lock(); if (finished_jobs.find(jobid) != finished_jobs.end()) { logger.msg(Arc::VERBOSE, "All DTRs finished for job %s", jobid); error = finished_jobs[jobid]; finished_lock.unlock(); return true; } // Job not running or finished - report error logger.msg(Arc::WARNING, "Job %s not found", jobid); error = "Job not found"; return true; } } // namespace Cache nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/arc-cache-service.service.in0000644000000000000000000000012612754431715027640 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200651.043637946 29 ctime=1513200663.98879627 nordugrid-arc-5.4.2/src/services/cache_service/arc-cache-service.service.in0000644000175000002070000000034512754431715027705 0ustar00mockbuildmock00000000000000[Unit] Description=A-REX cache service After=local_fs.target remote_fs.target [Service] Type=forking PIDFile=/var/run/arched-cache-service.pid ExecStart=@pkgdatadir@/arc-cache-service-start [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315733024437 xustar000000000000000030 mtime=1513200603.946061924 29 atime=1513200651.00963753 30 ctime=1513200663.985796234 nordugrid-arc-5.4.2/src/services/cache_service/Makefile.in0000644000175000002070000010020713214315733024506 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/cache_service DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arc-cache-service-start.in \ $(srcdir)/arc-cache-service.in \ $(srcdir)/arc-cache-service.service.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc-cache-service arc-cache-service.service \ arc-cache-service-start CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" \ "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libcacheservice_la_DEPENDENCIES = \ ../a-rex/grid-manager/libgridmanager.la \ ../a-rex/delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libcacheservice_la_OBJECTS = libcacheservice_la-CacheService.lo \ libcacheservice_la-CacheServiceGenerator.lo libcacheservice_la_OBJECTS = $(am_libcacheservice_la_OBJECTS) libcacheservice_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libcacheservice_la_CXXFLAGS) $(CXXFLAGS) \ $(libcacheservice_la_LDFLAGS) $(LDFLAGS) -o $@ SCRIPTS = $(initd_SCRIPTS) $(pkgdata_SCRIPTS) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libcacheservice_la_SOURCES) DIST_SOURCES = $(libcacheservice_la_SOURCES) DATA = $(units_DATA) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libcacheservice.la @SYSV_SCRIPTS_ENABLED_FALSE@CACHE_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@CACHE_SCRIPT = arc-cache-service initd_SCRIPTS = $(CACHE_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@CACHE_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@CACHE_UNIT = arc-cache-service.service @SYSTEMD_UNITS_ENABLED_FALSE@CACHE_UNIT_WRAPPER = @SYSTEMD_UNITS_ENABLED_TRUE@CACHE_UNIT_WRAPPER = arc-cache-service-start units_DATA = $(CACHE_UNIT) pkgdata_SCRIPTS = $(CACHE_UNIT_WRAPPER) libcacheservice_la_SOURCES = CacheService.h CacheService.cpp \ CacheServiceGenerator.h CacheServiceGenerator.cpp libcacheservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libcacheservice_la_LIBADD = \ ../a-rex/grid-manager/libgridmanager.la \ ../a-rex/delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) libcacheservice_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/cache_service/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/cache_service/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc-cache-service: $(top_builddir)/config.status $(srcdir)/arc-cache-service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-cache-service.service: $(top_builddir)/config.status $(srcdir)/arc-cache-service.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-cache-service-start: $(top_builddir)/config.status $(srcdir)/arc-cache-service-start.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libcacheservice.la: $(libcacheservice_la_OBJECTS) $(libcacheservice_la_DEPENDENCIES) $(libcacheservice_la_LINK) -rpath $(pkglibdir) $(libcacheservice_la_OBJECTS) $(libcacheservice_la_LIBADD) $(LIBS) install-initdSCRIPTS: $(initd_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(initddir)" || $(MKDIR_P) "$(DESTDIR)$(initddir)" @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(initddir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(initddir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcacheservice_la-CacheService.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcacheservice_la-CacheServiceGenerator.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libcacheservice_la-CacheService.lo: CacheService.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcacheservice_la_CXXFLAGS) $(CXXFLAGS) -MT libcacheservice_la-CacheService.lo -MD -MP -MF $(DEPDIR)/libcacheservice_la-CacheService.Tpo -c -o libcacheservice_la-CacheService.lo `test -f 'CacheService.cpp' || echo '$(srcdir)/'`CacheService.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libcacheservice_la-CacheService.Tpo $(DEPDIR)/libcacheservice_la-CacheService.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CacheService.cpp' object='libcacheservice_la-CacheService.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcacheservice_la_CXXFLAGS) $(CXXFLAGS) -c -o libcacheservice_la-CacheService.lo `test -f 'CacheService.cpp' || echo '$(srcdir)/'`CacheService.cpp libcacheservice_la-CacheServiceGenerator.lo: CacheServiceGenerator.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcacheservice_la_CXXFLAGS) $(CXXFLAGS) -MT libcacheservice_la-CacheServiceGenerator.lo -MD -MP -MF $(DEPDIR)/libcacheservice_la-CacheServiceGenerator.Tpo -c -o libcacheservice_la-CacheServiceGenerator.lo `test -f 'CacheServiceGenerator.cpp' || echo '$(srcdir)/'`CacheServiceGenerator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libcacheservice_la-CacheServiceGenerator.Tpo $(DEPDIR)/libcacheservice_la-CacheServiceGenerator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CacheServiceGenerator.cpp' object='libcacheservice_la-CacheServiceGenerator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcacheservice_la_CXXFLAGS) $(CXXFLAGS) -c -o libcacheservice_la-CacheServiceGenerator.lo `test -f 'CacheServiceGenerator.cpp' || echo '$(srcdir)/'`CacheServiceGenerator.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) test -z "$(unitsdir)" || $(MKDIR_P) "$(DESTDIR)$(unitsdir)" @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(unitsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(unitsdir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-initdSCRIPTS install-pkgdataSCRIPTS \ install-unitsDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-pkglibLTLIBRARIES uninstall-unitsDATA .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-initdSCRIPTS install-man install-pdf install-pdf-am \ install-pkgdataSCRIPTS install-pkglibLTLIBRARIES install-ps \ install-ps-am install-strip install-unitsDATA installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-initdSCRIPTS \ uninstall-pkgdataSCRIPTS uninstall-pkglibLTLIBRARIES \ uninstall-unitsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/CacheService.h0000644000000000000000000000012412733561314025074 xustar000000000000000027 mtime=1466884812.935694 27 atime=1513200575.476713 30 ctime=1513200663.989796283 nordugrid-arc-5.4.2/src/services/cache_service/CacheService.h0000644000175000002070000001057412733561314025150 0ustar00mockbuildmock00000000000000#ifndef CACHESERVICE_H_ #define CACHESERVICE_H_ #include #include #include #include #include // A-REX includes for GM configuration and delegation #include "../a-rex/grid-manager/conf/GMConfig.h" #include "../a-rex/grid-manager/files/ControlFileContent.h" #include "../a-rex/grid-manager/files/ControlFileHandling.h" #include "../a-rex/delegation/DelegationStore.h" #include "CacheServiceGenerator.h" namespace Cache { /** * CacheService provides functionality for A-REX cache operations that can be * performed by remote clients. It currently consists of three operations: * CacheCheck - allows querying of the cache for the presence of files. * CacheLink - enables a running job to dynamically request cache files to * be linked to its working (session) directory. * CacheLinkQuery - query the status of a transfer initiated by CacheLink. * This service is especially useful in the case of pilot job workflows where * job submission does not follow the usual ARC workflow. In order for input * files to be available to jobs, the pilot job can call the cache service to * prepare them. If requested files are not present in the cache, they can be * downloaded by the cache service if requested, using the DTR data staging * framework. */ class CacheService: public Arc::RegisteredService { private: /** Return codes of cache link */ enum CacheLinkReturnCode { Success, // everything went ok Staging, // files are still in the middle of downloading NotAvailable, // cache file doesn't exist and dostage is false Locked, // cache file is locked (being downloaded by other process) CacheError, // error with cache (configuration, filesystem etc) PermissionError, // user doesn't have permission on original source LinkError, // error while linking to session dir DownloadError, // error downloading cache file }; /** Construct a SOAP error message with optional extra reason string */ Arc::MCC_Status make_soap_fault(Arc::Message& outmsg, const std::string& reason = ""); /** CacheService namespace */ Arc::NS ns; /** A-REX configuration */ ARex::GMConfig config; /** Generator to handle data staging */ CacheServiceGenerator* dtr_generator; /** Logger object */ static Arc::Logger logger; protected: /* Cache operations */ /** * Check whether the URLs supplied in the input are present in any cache. * Returns in the out message for each file true or false, and if true, * the size of the file on cache disk. * @param mapped_user The local user to which the client DN was mapped */ Arc::MCC_Status CacheCheck(Arc::XMLNode in, Arc::XMLNode out, const Arc::User& mapped_user); /** * This method is used to link cache files to the session dir. A list of * URLs is supplied and if they are present in the cache and the user * calling the service has permission to access them, then they are linked * to the given session directory. If the user requests that missing files * be staged, then data staging requests are entered. The user should then * use CacheLinkQuery to poll the status of the requests. * @param mapped_user The local user to which the client DN was mapped */ Arc::MCC_Status CacheLink(Arc::XMLNode in, Arc::XMLNode out, const Arc::User& mapped_user); /** * Query the status of data staging for a given job ID. */ Arc::MCC_Status CacheLinkQuery(Arc::XMLNode in, Arc::XMLNode out); public: /** * Make a new CacheService. Reads the configuration and determines * the validity of the service. */ CacheService(Arc::Config *cfg, Arc::PluginArgument* parg); /** * Destroy the CacheService */ virtual ~CacheService(void); /** * Main method called by HED when CacheService is invoked. Directs call * to appropriate CacheService method. */ virtual Arc::MCC_Status process(Arc::Message &inmsg, Arc::Message &outmsg); /** * Supplies information on the service for use in the information system. */ bool RegistrationCollector(Arc::XMLNode &doc); /** Returns true if the CacheService is valid. */ operator bool() { return valid; }; /** Returns true if the CacheService is not valid. */ bool operator!() { return !valid; }; }; } // namespace Cache #endif /* CACHESERVICE_H_ */ nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/arc-cache-service-start.in0000644000000000000000000000012712754431715027335 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200651.059638142 30 ctime=1513200663.986796246 nordugrid-arc-5.4.2/src/services/cache_service/arc-cache-service-start.in0000644000175000002070000002174712754431715027412 0ustar00mockbuildmock00000000000000#!/bin/bash add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=arched RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-cache-service ]; then . /etc/sysconfig/arc-cache-service elif [ -r /etc/default/arc-cache-service ]; then . /etc/default/arc-cache-service fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ ! -d "$GLOBUS_LOCATION" ]; then echo "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then echo "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi if [ `id -u` = 0 ] ; then if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog-cache-service.pid fi else if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog-cache-service.pid fi fi prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then echo "Missing executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # Creating configuration file of arched # Reading following information from config file: # Debug level # User name # Log file is currently hard-coded #LOGFILE=`readconfigvar "$ARC_CONFIG" grid-manager logfile` LOGLEVEL=`readconfigvar "$ARC_CONFIG" grid-manager debug` LOGSIZE=`readconfigvar "$ARC_CONFIG" grid-manager logsize` USERNAME=`readconfigvar "$ARC_CONFIG" grid-manager user` X509_USER_CERT=`readconfigvar "$ARC_CONFIG" grid-manager x509_user_cert` X509_USER_KEY=`readconfigvar "$ARC_CONFIG" grid-manager x509_user_key` X509_CERT_DIR=`readconfigvar "$ARC_CONFIG" grid-manager x509_cert_dir` GRIDMAP=`readconfigvar "$ARC_CONFIG" grid-manager gridmap` GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" grid-manager globus_tcp_port_range` GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" grid-manager globus_udp_port_range` if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=`readconfigvar "$ARC_CONFIG" common x509_user_cert` fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=`readconfigvar "$ARC_CONFIG" common x509_user_key` fi if [ -z "$X509_CERT_DIR" ] ; then X509_CERT_DIR=`readconfigvar "$ARC_CONFIG" common x509_cert_dir` fi if [ -z "$GRIDMAP" ] ; then GRIDMAP=`readconfigvar "$ARC_CONFIG" common gridmap` fi if [ -z "$GLOBUS_TCP_PORT_RANGE" ] ; then GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" common globus_tcp_port_range` fi if [ -z "$GLOBUS_UDP_PORT_RANGE" ] ; then GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" common globus_udp_port_range` fi # Exporting collected variables if [ ! -z "$X509_USER_CERT" ] ; then export X509_USER_CERT ; fi if [ ! -z "$X509_USER_KEY" ] ; then export X509_USER_KEY ; fi if [ ! -z "$X509_CERT_DIR" ] ; then export X509_CERT_DIR ; fi if [ ! -z "$GRIDMAP" ] ; then export GRIDMAP ; fi if [ ! -z "$GLOBUS_TCP_PORT_RANGE" ] ; then export GLOBUS_TCP_PORT_RANGE ; fi if [ ! -z "$GLOBUS_UDP_PORT_RANGE" ] ; then export GLOBUS_UDP_PORT_RANGE ; fi # Required defaults if [ -z "$GRIDMAP" ] ; then GRIDMAP=/etc/grid-security/grid-mapfile fi if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=/etc/grid-security/hostcert.pem fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=/etc/grid-security/hostkey.pem fi if [ -z "$X509_CERT_DIR" ] ; then X509_CERT_DIR=/etc/grid-security/certificates fi # Web Service configuration host=`readconfigvar "$ARC_CONFIG" common hostname` cache_endpoint=https://$host:60001/cacheservice CACHE_CONFIG=`mktemp -t arc-cache-service.xml.XXXXXX` if [ -z "$CACHE_CONFIG" ] ; then echo "Failed to create temporary file" exit 1 fi CMD="$CMD -c '$CACHE_CONFIG'" # VOMS_LOCATION VOMS_LOCATION=${VOMS_LOCATION:-@DEFAULT_VOMS_LOCATION@} add_library_path "$VOMS_LOCATION" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH case "$LOGLEVEL" in 0) LOGLEVEL="FATAL" ;; 1) LOGLEVEL="ERROR" ;; 2) LOGLEVEL="WARNING" ;; 3) LOGLEVEL="INFO" ;; 4) LOGLEVEL="VERBOSE" ;; 5) LOGLEVEL="DEBUG" ;; *) LOGLEVEL="WARNING" ;; esac if [ "$USERNAME" = "root" ] ; then USERNAME="" fi LOGFILE=${LOGFILE:-/var/log/arc/cache-service.log} if [ ! -d `dirname $LOGFILE` ]; then mkdir -p `dirname $LOGFILE` fi LOGSIZE=${LOGSIZE:--1 -1} LOGNUM=`echo "$LOGSIZE" | sed 's/^ *[-+0-9]* *//'` LOGSIZE=`echo "$LOGSIZE" | sed 's/^ *\([-+0-9]*\).*/\1/'` if [ ! -z "$USERNAME" ] ; then CMD="$CMD -u $USERNAME" fi # Cache service XML config CACHECFG="\ \ \ \ $PID_FILE\ \ $LOGFILE\ $LOGLEVEL\ $LOGNUM\ $LOGSIZE\ \ \ \ $ARC_LOCATION/@pkglibsubdir@/\ \ mcctcp\ mcctls\ mcchttp\ mccsoap\ identitymap\ arcshc\ cacheservice\ \ \ \ 60001\ \ \ \ $X509_USER_KEY\ $X509_USER_CERT\ $X509_CERT_DIR\ .* \ $GRIDMAP\ nobody\ \ \ \ POST\ GET\ PUT\ \ \ \ \ \ ^/cacheservice\ \ \ \ \ \ \ \ $ARC_CONFIG\ false\ \ \ \ " echo "$CACHECFG" > "$CACHE_CONFIG" if [ ! -z "$USERNAME" ] ; then [ -f $CACHE_CONFIG ] && chown $USERNAME $CACHE_CONFIG fi } if [ "$RUN" != "yes" ] ; then echo "arc-cache-service disabled in configuration" return 0 fi prepare exec "$CMD" rm -f "$CACHE_CONFIG" nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/CacheServiceGenerator.h0000644000000000000000000000012412046704323026737 xustar000000000000000027 mtime=1352370387.670578 27 atime=1513200575.455713 30 ctime=1513200663.991796307 nordugrid-arc-5.4.2/src/services/cache_service/CacheServiceGenerator.h0000644000175000002070000000574512046704323027017 0ustar00mockbuildmock00000000000000#ifndef CACHESERVICEGENERATOR_H_ #define CACHESERVICEGENERATOR_H_ #include #include #include "../a-rex/grid-manager/conf/StagingConfig.h" namespace Cache { /// DTR Generator for the cache service. class CacheServiceGenerator : public DataStaging::DTRCallback { private: /// Scheduler object to process DTRs. DataStaging::Scheduler* scheduler; /// Generator state DataStaging::ProcessState generator_state; /// Whether to use the host certificate when communicating with remote delivery bool use_host_cert; /// Scratch directory used by job std::string scratch_dir; /// Whether we are running with A-REX or we manage the Scheduler ourselves bool run_with_arex; /// A-REX configuration const ARex::GMConfig& config; /// Staging configuration ARex::StagingConfig staging_conf; /// Map of job id to DTRs std::multimap processing_dtrs; /// Lock for DTR map Arc::SimpleCondition processing_lock; /// Map of job id to error message, if any std::map finished_jobs; /// Lock for finished job map Arc::SimpleCondition finished_lock; /// Logger static Arc::Logger logger; public: /// Start Generator and get Scheduler instance. /** * If with_arex is true then it is assumed that A-REX takes care of * configuring, starting and stopping the DTR Scheduler. If cache service * is run outside of A-REX then it starts an independent DTR instance, * using parameters given in arc.conf. * @param config A-REX configuration * @param with_arex If true then we assume A-REX starts the scheduler, if * false then we start and stop it. */ CacheServiceGenerator(const ARex::GMConfig& config, bool with_arex); /// Stop Scheduler if we are not running with A-REX ~CacheServiceGenerator(); /// Callback method to receive completed DTRs void receiveDTR(DataStaging::DTR_ptr dtr); /// Add a new request. /** * @param user User for this transfer * @param source Source file * @param destination Destination file * @param usercfg UserConfig with proxy information * @param jobid Job identifier * @param priority DTR priority */ bool addNewRequest(const Arc::User& user, const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, int priority); /// Query requests for given job id. /** * @param jobid Job ID to query * @param error If any DTR finished with an error, the description is put * in error. * @return True if all requests for the job have finished, false otherwise */ bool queryRequestsFinished(const std::string& jobid, std::string& error); }; } // namespace Cache #endif /* CACHESERVICEGENERATOR_H_ */ nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/arc-cache-service.in0000644000000000000000000000012712442324223026167 xustar000000000000000027 mtime=1418307731.508846 30 atime=1513200651.026637738 30 ctime=1513200663.987796258 nordugrid-arc-5.4.2/src/services/cache_service/arc-cache-service.in0000644000175000002070000003001412442324223026227 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the A-REX cache service # # This startup script takes ARC0 configuration file as # its input and generates ARC1 arched configuration file # which contains commands to start A-REX cache service. # chkconfig: 2345 87 13 # description: ARC cache service # processname: arched ### BEGIN INIT INFO # Provides: arc-cache-service # Required-Start: $local_fs $remote_fs # Required-Stop: $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC cache service # Description: ARC cache service ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=arched RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-cache-service ]; then . /etc/sysconfig/arc-cache-service elif [ -r /etc/default/arc-cache-service ]; then . /etc/default/arc-cache-service fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ ! -d "$GLOBUS_LOCATION" ]; then log_failure_msg "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi PID_FILE= if [ `id -u` = 0 ] ; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then LOCKFILE=/var/lock/subsys/$prog-cache-service else LOCKFILE=/var/lock/$prog-cache-service fi if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog-cache-service.pid fi else LOCKFILE=$HOME/$prog-cache-service.lock if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog-cache-service.pid fi fi prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then log_failure_msg "Missing executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # Creating configuration file of arched # Reading following information from config file: # Debug level # User name # Log file is currently hard-coded #LOGFILE=`readconfigvar "$ARC_CONFIG" grid-manager logfile` LOGLEVEL=`readconfigvar "$ARC_CONFIG" grid-manager debug` LOGSIZE=`readconfigvar "$ARC_CONFIG" grid-manager logsize` USERNAME=`readconfigvar "$ARC_CONFIG" grid-manager user` X509_USER_CERT=`readconfigvar "$ARC_CONFIG" grid-manager x509_user_cert` X509_USER_KEY=`readconfigvar "$ARC_CONFIG" grid-manager x509_user_key` X509_CERT_DIR=`readconfigvar "$ARC_CONFIG" grid-manager x509_cert_dir` GRIDMAP=`readconfigvar "$ARC_CONFIG" grid-manager gridmap` GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" grid-manager globus_tcp_port_range` GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" grid-manager globus_udp_port_range` if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=`readconfigvar "$ARC_CONFIG" common x509_user_cert` fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=`readconfigvar "$ARC_CONFIG" common x509_user_key` fi if [ -z "$X509_CERT_DIR" ] ; then X509_CERT_DIR=`readconfigvar "$ARC_CONFIG" common x509_cert_dir` fi if [ -z "$GRIDMAP" ] ; then GRIDMAP=`readconfigvar "$ARC_CONFIG" common gridmap` fi if [ -z "$GLOBUS_TCP_PORT_RANGE" ] ; then GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" common globus_tcp_port_range` fi if [ -z "$GLOBUS_UDP_PORT_RANGE" ] ; then GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_CONFIG" common globus_udp_port_range` fi # Exporting collected variables if [ ! -z "$X509_USER_CERT" ] ; then export X509_USER_CERT ; fi if [ ! -z "$X509_USER_KEY" ] ; then export X509_USER_KEY ; fi if [ ! -z "$X509_CERT_DIR" ] ; then export X509_CERT_DIR ; fi if [ ! -z "$GRIDMAP" ] ; then export GRIDMAP ; fi if [ ! -z "$GLOBUS_TCP_PORT_RANGE" ] ; then export GLOBUS_TCP_PORT_RANGE ; fi if [ ! -z "$GLOBUS_UDP_PORT_RANGE" ] ; then export GLOBUS_UDP_PORT_RANGE ; fi # Required defaults if [ -z "$GRIDMAP" ] ; then GRIDMAP=/etc/grid-security/grid-mapfile fi if [ -z "$X509_USER_CERT" ] ; then X509_USER_CERT=/etc/grid-security/hostcert.pem fi if [ -z "$X509_USER_KEY" ] ; then X509_USER_KEY=/etc/grid-security/hostkey.pem fi if [ -z "$X509_CERT_DIR" ] ; then X509_CERT_DIR=/etc/grid-security/certificates fi # Web Service configuration host=`readconfigvar "$ARC_CONFIG" common hostname` cache_endpoint=https://$host:60001/cacheservice CACHE_CONFIG=`mktemp -t arc-cache-service.xml.XXXXXX` if [ -z "$CACHE_CONFIG" ] ; then log_failure_msg "Failed to create temporary file" exit 1 fi CMD="$CMD -c '$CACHE_CONFIG'" # VOMS_LOCATION VOMS_LOCATION=${VOMS_LOCATION:-@DEFAULT_VOMS_LOCATION@} add_library_path "$VOMS_LOCATION" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH case "$LOGLEVEL" in 0) LOGLEVEL="FATAL" ;; 1) LOGLEVEL="ERROR" ;; 2) LOGLEVEL="WARNING" ;; 3) LOGLEVEL="INFO" ;; 4) LOGLEVEL="VERBOSE" ;; 5) LOGLEVEL="DEBUG" ;; *) LOGLEVEL="WARNING" ;; esac if [ "$USERNAME" = "root" ] ; then USERNAME="" fi LOGFILE=${LOGFILE:-/var/log/arc/cache-service.log} if [ ! -d `dirname $LOGFILE` ]; then mkdir -p `dirname $LOGFILE` fi LOGSIZE=${LOGSIZE:--1 -1} LOGNUM=`echo "$LOGSIZE" | sed 's/^ *[-+0-9]* *//'` LOGSIZE=`echo "$LOGSIZE" | sed 's/^ *\([-+0-9]*\).*/\1/'` if [ ! -z "$USERNAME" ] ; then CMD="$CMD -u $USERNAME" fi # Cache service XML config CACHECFG="\ \ \ \ $PID_FILE\ \ $LOGFILE\ $LOGLEVEL\ $LOGNUM\ $LOGSIZE\ \ \ \ $ARC_LOCATION/@pkglibsubdir@/\ \ mcctcp\ mcctls\ mcchttp\ mccsoap\ identitymap\ arcshc\ cacheservice\ \ \ \ 60001\ \ \ \ $X509_USER_KEY\ $X509_USER_CERT\ $X509_CERT_DIR\ .* \ $GRIDMAP\ nobody\ \ \ \ POST\ GET\ PUT\ \ \ \ \ \ ^/cacheservice\ \ \ \ \ \ \ \ $ARC_CONFIG\ false\ \ \ \ " echo "$CACHECFG" > "$CACHE_CONFIG" if [ ! -z "$USERNAME" ] ; then [ -f $CACHE_CONFIG ] && chown $USERNAME $CACHE_CONFIG fi } start() { if [ "$RUN" != "yes" ] ; then echo "arc-cache-service disabled in configuration" return 0 fi echo -n "Starting $prog: " # Check if we are already running if [ -f $PID_FILE ]; then read pid < $PID_FILE if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PID_FILE" "$LOCKFILE" fi prepare eval "$CMD" RETVAL=$? rm -f "$CACHE_CONFIG" if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } stop() { echo -n "Stopping $prog: " if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ ! -z "$pid" ] ; then kill "$pid" RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi sleep 1 kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PID_FILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } status() { if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exist" return 2 fi echo "$1 is stopped" return 3 } restart() { stop start } case "$1" in start) start ;; stop) stop ;; status) status $prog ;; restart | force-reload) restart ;; reload) ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload|reload|condrestart|try-restart}" exit 1 ;; esac exit $? nordugrid-arc-5.4.2/src/services/cache_service/PaxHeaders.7502/README0000644000000000000000000000012411443432673023261 xustar000000000000000027 mtime=1284388283.358678 27 atime=1513200575.480713 30 ctime=1513200663.983796209 nordugrid-arc-5.4.2/src/services/cache_service/README0000644000175000002070000000020611443432673023324 0ustar00mockbuildmock00000000000000The cache service is a service inside HED which exposes some operations on the A-REX cache to remote clients through a WS interface. nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/README0000644000000000000000000000012412441413350020464 xustar000000000000000027 mtime=1418073832.580408 27 atime=1513200576.629727 30 ctime=1513200662.426777166 nordugrid-arc-5.4.2/src/services/README0000644000175000002070000000115212441413350020530 0ustar00mockbuildmock00000000000000ARC services ------------ a-rex - ARC REsource-coupled EXecutation Service/Execution Capability cache_service - Service to expose some A-REX cache operations data-staging - Service to execute data transfer requests echo_java - Demonstration services: how to write Java service echo_python - Simple test service to demonstrate how Python based services should work gridftpd - GridFTP server ldap-infosys - LDAP based grid information system ldap-monitor - LDAP based grid monitor wrappers - Collection of language bindings ws-monitor - Webservice based grid monitor nordugrid-arc-5.4.2/src/services/PaxHeaders.7502/ldap-infosys0000644000000000000000000000013213214316027022140 xustar000000000000000030 mtime=1513200663.578791256 30 atime=1513200668.719854133 30 ctime=1513200663.578791256 nordugrid-arc-5.4.2/src/services/ldap-infosys/0000755000175000002070000000000013214316027022263 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-egiis.in0000644000000000000000000000012713024226423026406 xustar000000000000000027 mtime=1481714963.501999 30 atime=1513200651.384642117 30 ctime=1513200663.567791121 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-egiis.in0000644000175000002070000001245613024226423026460 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the NorduGrid/ARC Grid Information Index Server (EGIIS) # # chkconfig: 2345 76 24 # description: NorduGrid/ARC Grid Information Index Server # # config: /etc/sysconfig/nordugrid # config: /etc/sysconfig/nordugrid-arc-egiis # config: /etc/arc.conf # ###################################################################### ### BEGIN INIT INFO # Provides: nordugrid-arc-egiis # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: NorduGrid/ARC Grid Information Index Server # Description: Init script for the NorduGrid/ARC Grid Information # Index Server (EGIIS) ### END INIT INFO # Helper functions if [ -r /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -r /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi RETVAL=0 prog=nordugrid-arc-egiis RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/$prog ]; then . /etc/sysconfig/$prog elif [ -r /etc/default/$prog ]; then . /etc/default/$prog fi if [ "x$RUN" != "xyes" ]; then log_warning_msg "$prog disabled, please adjust the configuration to your" log_warning_msg "needs and then set RUN to 'yes' in /etc/default/$prog to enable it." exit 0 fi [ -n "$ARC_LOCATION" ] && export ARC_LOCATION [ -n "$ARC_CONFIG" ] && export ARC_CONFIG ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # Source the config parsing routines if [ -r "$ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" ]; then . $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh || exit $? else log_failure_msg "Could not find $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" exit 1 fi ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration file ($ARC_CONFIG) not found" log_failure_msg "If this file is in a non-standard place it can be set" log_failure_msg " with the ARC_CONFIG environment variable" exit 1 fi # Read arc.conf config_parse_file $ARC_CONFIG || exit $? # Check for infosys-block if ! config_match_section infosys; then log_warning_msg "Missing infosys configuration block" exit 0 fi config_hide_all config_import_section common # These options need to come from the infosys-block, not from common unset CONFIG_logfile unset CONFIG_user unset CONFIG_port config_import_section infosys bdii_user=$CONFIG_user if [ -z "$bdii_user" ]; then # Get ldap user from passwd bdii_user=`getent passwd ldap openldap | sed 's/:.*//;q'` if [ -z "$bdii_user" ]; then log_warning_msg "Warning, could not find ldap or openldap user" log_warning_msg "resorting to using the root user" bdii_user=root fi fi # Use this command to change user if [ -x /sbin/runuser ]; then RUNUSER=runuser else RUNUSER=su fi # If missing, we have a problem USERSHELL=${USERSHELL:-"/bin/sh"} if [ ! -x ${USERSHELL} ]; then log_failure_msg "Could not find ${USERSHELL}" exit 1 fi giis_location=${CONFIG_giis_location:-$ARC_LOCATION} if [ `id -u` = 0 ]; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then lockfile=/var/lock/subsys/$prog else lockfile=/var/lock/$prog fi else lockfile=$HOME/$prog fi infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/var/run/arc/infosys} mkdir -p ${infosys_ldap_run_dir} chown ${bdii_user}: ${infosys_ldap_run_dir} giis_fifo=${CONFIG_giis_fifo:-${infosys_ldap_run_dir}/giis-fifo} start () { if [ -r "${lockfile}" ]; then log_success_msg "$prog already started" RETVAL=0 return ${RETVAL} fi if [ ! "`config_subsections infosys/index`" ]; then log_failure_msg "No $prog defined in $ARC_CONFIG" exit 1 fi if [ `id -u` = 0 ]; then $RUNUSER -s "$USERSHELL" -c "${giis_location}/sbin/arc-infoindex-server -c $ARC_CONFIG -f $giis_fifo" ${bdii_user} else ${giis_location}/sbin/arc-infoindex-server -c $ARC_CONFIG -f $giis_fifo fi touch ${lockfile} log_success_msg "$prog started" } stop () { if [ ! -r "${lockfile}" ]; then log_success_msg "$prog already stopped" RETVAL=0 return ${RETVAL} fi [ -p $giis_fifo ] && echo STOP > $giis_fifo rm -f ${lockfile} log_success_msg "$prog stopped" } status () { if [ ! -r "${lockfile}" ]; then log_success_msg "$prog is stopped" RETVAL=3 return ${RETVAL} fi log_success_msg "$prog is running" RETVAL=0 return ${RETVAL} } case "$1" in start) start ;; stop) stop ;; restart | force-reload) stop # avoid race sleep 3 start ;; reload) ;; status) status ;; condrestart | try-restart) if [ -r ${lockfile} ]; then stop # avoid race sleep 3 start fi ;; *) echo "Usage: $0 {start|stop|restart|force-reload|reload|condrestart|try-restart|status}" exit 1 ;; esac exit $RETVAL nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712754431715024267 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200604.628070265 30 ctime=1513200663.560791036 nordugrid-arc-5.4.2/src/services/ldap-infosys/Makefile.am0000644000175000002070000000147212754431715024335 0ustar00mockbuildmock00000000000000if GIIS_SERVICE_ENABLED GIIS_SERVICE = giis else GIIS_SERVICE = endif if SYSV_SCRIPTS_ENABLED GRID_INFOSYS_SCRIPT = nordugrid-arc-bdii nordugrid-arc-egiis nordugrid-arc-inforeg nordugrid-arc-slapd nordugrid-arc-ldap-infosys else GRID_INFOSYS_SCRIPT = endif initd_SCRIPTS = $(GRID_INFOSYS_SCRIPT) if SYSTEMD_UNITS_ENABLED GRID_INFOSYS_UNIT = nordugrid-arc-bdii.service nordugrid-arc-egiis.service nordugrid-arc-inforeg.service nordugrid-arc-slapd.service else GRID_INFOSYS_UNIT = endif units_DATA = $(GRID_INFOSYS_UNIT) pkgdata_DATA = ConfigParser.pm pkgdata_SCRIPTS = create-bdii-config create-inforeg-config create-slapd-config SUBDIRS = $(GIIS_SERVICE) DIST_SUBDIRS = giis EXTRA_DIST = ConfigParser.pm nordugrid-arc-bdii.service nordugrid-arc-egiis.service nordugrid-arc-inforeg.service nordugrid-arc-slapd.service nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-bdii.in0000644000000000000000000000012713024226423026215 xustar000000000000000027 mtime=1481714963.501999 30 atime=1513200651.367641909 30 ctime=1513200663.566791109 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-bdii.in0000644000175000002070000002044313024226423026262 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the NorduGrid/ARC LDAP based local resource information system # # chkconfig: 2345 76 24 # description: NorduGrid/ARC local resource information system # # config: /etc/sysconfig/nordugrid # config: /etc/sysconfig/nordugrid-arc-bdii # config: /etc/arc.conf # ###################################################################### ### BEGIN INIT INFO # Provides: nordugrid-arc-bdii # Required-Start: $remote_fs $syslog nordugrid-arc-slapd # Required-Stop: $remote_fs $syslog nordugrid-arc-slapd # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: NorduGrid/ARC local resource information system # Description: NorduGrid/ARC LDAP based local resource information system ### END INIT INFO # Helper functions if [ -r /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -r /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi RETVAL=0 prog=nordugrid-arc-bdii RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/$prog ]; then . /etc/sysconfig/$prog elif [ -r /etc/default/$prog ]; then . /etc/default/$prog fi if [ "x$RUN" != "xyes" ]; then log_warning_msg "$prog disabled, please adjust the configuration to your" log_warning_msg "needs and then set RUN to 'yes' in /etc/default/$prog to enable it." exit 0 fi [ -n "$ARC_LOCATION" ] && export ARC_LOCATION [ -n "$ARC_CONFIG" ] && export ARC_CONFIG ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # Source the config parsing routines if [ -r "$ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" ]; then . $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh || exit $? else log_failure_msg "Could not find $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" exit 1 fi ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration file ($ARC_CONFIG) not found" log_failure_msg "If this file is in a non-standard place it can be set" log_failure_msg " with the ARC_CONFIG environment variable" exit 1 fi # Read arc.conf config_parse_file $ARC_CONFIG || exit $? # Check for infosys-block if ! config_match_section infosys; then log_failure_msg "Missing infosys configuration block" exit 1 fi config_hide_all config_import_section common # These options need to come from the infosys-block, not from common unset CONFIG_logfile unset CONFIG_user unset CONFIG_port config_import_section infosys debug=${CONFIG_debug:-$debug} bdii_user=$CONFIG_user if [ -z "$bdii_user" ]; then # Get ldap user from passwd bdii_user=`getent passwd ldap openldap | sed 's/:.*//;q'` if [ -z "$bdii_user" ]; then log_warning_msg "Warning, could not find ldap or openldap user" log_warning_msg "resorting to using the root user" bdii_user=root fi fi # These values may be set in arc.conf, otherwise use sensible defaults hostname_f=$(hostname -f) hostname=${CONFIG_hostname:-$hostname_f} # Use this command to change user if [ -x /sbin/runuser ]; then RUNUSER=runuser else RUNUSER=su fi # If missing, we have a problem USERSHELL=${USERSHELL:-"/bin/sh"} if [ ! -x ${USERSHELL} ]; then log_failure_msg "Could not find ${USERSHELL}" exit 1 fi bdii_location=${CONFIG_bdii_location:-/usr} giis_location=${CONFIG_giis_location:-$ARC_LOCATION} bdii_update_cmd=${CONFIG_bdii_update_cmd:-${bdii_location}/sbin/bdii-update} if [ ! -e $bdii_update_cmd ]; then log_failure_msg "Can not find bdii-update command at: $bdii_update_cmd." log_failure_msg "Please set bdii_update_cmd in arc.conf" exit 1 fi bdii_debug_level=${CONFIG_bdii_debug_level:-ERROR} bdii_tmp_dir=${CONFIG_bdii_tmp_dir:-/var/tmp/arc/bdii} if grep -q BDII_PID_FILE $bdii_update_cmd ; then bdii_var_dir=${CONFIG_bdii_var_dir:-/var/lib/arc/bdii} bdii_run_dir=${CONFIG_bdii_run_dir:-/var/run/arc/bdii} else bdii_var_dir=${CONFIG_bdii_var_dir:-/var/run/arc/bdii} bdii_run_dir=${bdii_var_dir} fi bdii_log_dir=${CONFIG_bdii_log_dir:-/var/log/arc/bdii} bdii_log_file=${bdii_log_dir}/bdii-update.log mkdir -p $bdii_log_dir bdii_default_ldif_ng=${bdii_tmp_dir}/provider/arc-default.ldif.pl bdii_ldif_dir=${bdii_tmp_dir}/ldif bdii_provider_dir=${bdii_tmp_dir}/provider bdii_plugin_dir=${bdii_tmp_dir}/plugin # Using uppercase characters in bdii_bind will break infosys. bdii_bind="o=grid" infosys_nordugrid=${CONFIG_infosys_nordugrid:-"enable"} infosys_glue12=${CONFIG_infosys_glue12:-"disable"} infosys_glue2_ldap=${CONFIG_infosys_glue2_ldap:-"disable"} update_pid_file=${CONFIG_bdii_update_pid_file:-$bdii_run_dir/bdii-update.pid} if [ `id -u` = 0 ]; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then update_lock_file=${update_lock_file:-/var/lock/subsys/$prog} else update_lock_file=${update_lock_file:-/var/lock/$prog} fi else update_lock_file=$HOME/$prog fi chown ${bdii_user}: ${bdii_log_dir} infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/var/run/arc/infosys} BDII_CONF=${CONFIG_bdii_conf:-${infosys_ldap_run_dir}/bdii.conf} start () { if [ -r "${update_lock_file}" ]; then log_success_msg "$prog already started" RETVAL=0 return ${RETVAL} fi ${ARC_LOCATION}/@pkgdatasubdir@/create-bdii-config if [ ! $? = 0 ]; then log_failure_msg "Failed to create configuration for $prog" exit 1 fi if [ `id -u` = 0 ]; then $RUNUSER -s "$USERSHELL" -c "${bdii_update_cmd} -c ${BDII_CONF} -d" ${bdii_user} else ${bdii_update_cmd} -c ${BDII_CONF} -d fi touch ${update_lock_file} iterlimit=30 while [ $iterlimit -ge 0 ] && ! [ -r ${update_pid_file} ]; do sleep 1 iterlimit=$(expr $iterlimit - 1) done if [ ! -r ${update_pid_file} ]; then log_failure_msg "$prog failed to start" rm -f ${update_lock_file} RETVAL=1 return ${RETVAL} fi log_success_msg "$prog started" } stop () { if [ ! -r "${update_lock_file}" ]; then log_success_msg "$prog already stopped" RETVAL=0 return ${RETVAL} fi if [ -r "${update_pid_file}" ]; then update_pid=$(cat ${update_pid_file}) ps ${update_pid} >/dev/null 2>&1 if [ ! $? = 0 ]; then log_failure_msg "$prog pid file exists but the process died" RETVAL=1 return ${RETVAL} fi else log_failure_msg "$prog process has no pid file" RETVAL=1 return ${RETVAL} fi killall -u ${bdii_user} -15 arc-nordugrid-bdii-ldif 2>/dev/null if [ -n "${update_pid}" ]; then kill -15 ${update_pid} 2>/dev/null ps ${update_pid} >/dev/null 2>&1 if [ $? = 0 ]; then sleep 2 ps ${update_pid} >/dev/null 2>&1 if [ $? = 0 ]; then kill -9 ${update_pid} 2>/dev/null sleep 2 ps ${update_pid} >/dev/null 2>&1 if [ $? = 0 ]; then RETVAL=1 fi fi fi fi # Clean up rm -f ${infosys_ldap_run_dir}/arc-glue-bdii-ldif if [ ${RETVAL} = 0 ]; then rm -f ${update_pid_file} rm -f ${update_lock_file} log_success_msg "$prog stopped" else log_failure_msg "Could not kill $prog with pid ${update_pid}" fi return ${RETVAL} } status () { if [ ! -f "${update_lock_file}" ]; then log_success_msg "$prog is stopped" RETVAL=3 return ${RETVAL} fi if [ -r ${update_pid_file} ]; then ps $(cat ${update_pid_file}) >/dev/null 2>&1 if [ ! $? = 0 ]; then log_failure_msg "$prog pid file exists but the process died" RETVAL=1 return ${RETVAL} fi else log_failure_msg "$prog process has no pid file" RETVAL=2 return ${RETVAL} fi log_success_msg "$prog is running" RETVAL=0 return ${RETVAL} } case "$1" in start) start ;; stop) stop ;; restart | force-reload) stop # avoid race sleep 3 start ;; reload) ;; status) status ;; condrestart | try-restart) if [ -r ${update_lock_file} ]; then stop # avoid race sleep 3 start fi ;; *) echo "Usage: $0 {start|stop|restart|force-reload|reload|condrestart|try-restart|status}" exit 1 ;; esac exit $RETVAL nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315734024265 xustar000000000000000030 mtime=1513200604.669070767 30 atime=1513200651.306641163 29 ctime=1513200663.56279106 nordugrid-arc-5.4.2/src/services/ldap-infosys/Makefile.in0000644000175000002070000007647713214315734024361 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ldap-infosys DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/create-bdii-config.in \ $(srcdir)/create-inforeg-config.in \ $(srcdir)/create-slapd-config.in \ $(srcdir)/nordugrid-arc-bdii.in \ $(srcdir)/nordugrid-arc-egiis.in \ $(srcdir)/nordugrid-arc-inforeg.in \ $(srcdir)/nordugrid-arc-ldap-infosys.in \ $(srcdir)/nordugrid-arc-slapd.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = create-bdii-config create-inforeg-config \ create-slapd-config nordugrid-arc-bdii nordugrid-arc-egiis \ nordugrid-arc-inforeg nordugrid-arc-ldap-infosys \ nordugrid-arc-slapd CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)" SCRIPTS = $(initd_SCRIPTS) $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive DATA = $(pkgdata_DATA) $(units_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @GIIS_SERVICE_ENABLED_FALSE@GIIS_SERVICE = @GIIS_SERVICE_ENABLED_TRUE@GIIS_SERVICE = giis @SYSV_SCRIPTS_ENABLED_FALSE@GRID_INFOSYS_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@GRID_INFOSYS_SCRIPT = nordugrid-arc-bdii nordugrid-arc-egiis nordugrid-arc-inforeg nordugrid-arc-slapd nordugrid-arc-ldap-infosys initd_SCRIPTS = $(GRID_INFOSYS_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@GRID_INFOSYS_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@GRID_INFOSYS_UNIT = nordugrid-arc-bdii.service nordugrid-arc-egiis.service nordugrid-arc-inforeg.service nordugrid-arc-slapd.service units_DATA = $(GRID_INFOSYS_UNIT) pkgdata_DATA = ConfigParser.pm pkgdata_SCRIPTS = create-bdii-config create-inforeg-config create-slapd-config SUBDIRS = $(GIIS_SERVICE) DIST_SUBDIRS = giis EXTRA_DIST = ConfigParser.pm nordugrid-arc-bdii.service nordugrid-arc-egiis.service nordugrid-arc-inforeg.service nordugrid-arc-slapd.service all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ldap-infosys/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ldap-infosys/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): create-bdii-config: $(top_builddir)/config.status $(srcdir)/create-bdii-config.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ create-inforeg-config: $(top_builddir)/config.status $(srcdir)/create-inforeg-config.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ create-slapd-config: $(top_builddir)/config.status $(srcdir)/create-slapd-config.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ nordugrid-arc-bdii: $(top_builddir)/config.status $(srcdir)/nordugrid-arc-bdii.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ nordugrid-arc-egiis: $(top_builddir)/config.status $(srcdir)/nordugrid-arc-egiis.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ nordugrid-arc-inforeg: $(top_builddir)/config.status $(srcdir)/nordugrid-arc-inforeg.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ nordugrid-arc-ldap-infosys: $(top_builddir)/config.status $(srcdir)/nordugrid-arc-ldap-infosys.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ nordugrid-arc-slapd: $(top_builddir)/config.status $(srcdir)/nordugrid-arc-slapd.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-initdSCRIPTS: $(initd_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(initddir)" || $(MKDIR_P) "$(DESTDIR)$(initddir)" @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(initddir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(initddir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-pkgdataDATA: $(pkgdata_DATA) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) test -z "$(unitsdir)" || $(MKDIR_P) "$(DESTDIR)$(unitsdir)" @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(unitsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(unitsdir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(SCRIPTS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-initdSCRIPTS install-pkgdataDATA \ install-pkgdataSCRIPTS install-unitsDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-initdSCRIPTS uninstall-pkgdataDATA \ uninstall-pkgdataSCRIPTS uninstall-unitsDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-initdSCRIPTS install-man install-pdf \ install-pdf-am install-pkgdataDATA install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip install-unitsDATA \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-initdSCRIPTS uninstall-pkgdataDATA \ uninstall-pkgdataSCRIPTS uninstall-unitsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-bdii.service0000644000000000000000000000012412233420743027246 xustar000000000000000027 mtime=1382949347.799513 27 atime=1513200575.759717 30 ctime=1513200663.572791182 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-bdii.service0000644000175000002070000000062412233420743027315 0ustar00mockbuildmock00000000000000[Unit] Description=NorduGrid/ARC local resource information system Requires=nordugrid-arc-slapd.service After=nordugrid-arc-slapd.service BindsTo=nordugrid-arc-slapd.service [Service] Type=forking PIDFile=/var/run/arc/bdii/bdii-update.pid User=ldap ExecStartPre=/usr/share/arc/create-bdii-config ExecStart=/usr/sbin/bdii-update -c /var/run/arc/infosys/bdii.conf -d [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-ldap-infosys.in0000644000000000000000000000012712534011266027720 xustar000000000000000027 mtime=1433408182.001174 30 atime=1513200651.418642532 30 ctime=1513200663.569791146 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-ldap-infosys.in0000644000175000002070000000233312534011266027763 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the NorduGrid/ARC LDAP based Information System # # This script offers an alternative to the per-service init scripts # It offers a convenient way to start all the infosys services in # a signle command # # chkconfig: - 80 20 # description: NorduGrid/ARC Information system # ###################################################################### ### BEGIN INIT INFO # Provides: nordugrid-arc-ldap-infosys # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Stop: 0 1 2 3 4 5 6 # Short-Description: NorduGrid/ARC Information LDAP Information System # Description: Init file for the NorduGrid/ARC LDAP Information System ### END INIT INFO prefix=${ARC_LOCATION:-@prefix@} case "$1" in start | stop | restart | force-reload | reload | status | condrestart | try-restart) [ -x @initddir@/nordugrid-arc-slapd ] && \ @initddir@/nordugrid-arc-slapd "$1" [ -x @initddir@/nordugrid-arc-bdii ] && \ @initddir@/nordugrid-arc-bdii "$1" [ -x @initddir@/nordugrid-arc-egiis ] && \ @initddir@/nordugrid-arc-egiis "$1" exit 0 ;; *) echo "Usage: $0 {start|stop|restart|force-reload|reload|condrestart|try-restart|status}" exit 1 ;; esac nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-inforeg.service0000644000000000000000000000012412233420743027770 xustar000000000000000027 mtime=1382949347.799513 27 atime=1513200575.737716 30 ctime=1513200663.575791219 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-inforeg.service0000644000175000002070000000042412233420743030035 0ustar00mockbuildmock00000000000000[Unit] Description=NorduGrid/ARC Information Endpoint Registration [Service] Type=forking PIDFile=/var/run/nordugrid-arc-inforeg.pid ExecStartPre=/usr/share/arc/create-inforeg-config ExecStart=/usr/share/arc/grid-info-soft-register-wrap [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/ConfigParser.pm0000644000000000000000000000012311530765363025147 xustar000000000000000027 mtime=1298393843.859243 27 atime=1513200575.755717 29 ctime=1513200663.57179117 nordugrid-arc-5.4.2/src/services/ldap-infosys/ConfigParser.pm0000644000175000002070000002551711530765363025227 0ustar00mockbuildmock00000000000000package ConfigParser; use strict; use warnings; # Configuration parser classes for arc.conf ###### ConfigParser # # Synopsis: # # use ConfigParser; # # my $parser = ConfigParser->new("/etc/arc.conf") # or die 'Cannot parse config file'; # # my %common = $parser->get_section('common'); # get hash with all options in a section # my %queue = $parser->get_section('queue/atlas'); # # print $parser->list_subsections('gridftpd'); # list all subsections of 'gridftpd', but not # # the 'gridftpd' section itself # # my %gmopts = $parser->get_section('grid-manager'); # gm options which are not user-specific # my %useropts = $parser->get_section('grid-manager/.'); # gm options for the default user (this # # section is instantiated automatically # # if the controldir command was used) # # The [grid-manager] section is treated specially. Options that are # user-specific are put in separate pseudo-sections [grid-manager/]. # reffers to the user that is initiated by a 'control' command. The # 'controldir' command initiates user '.'. Each pseudo-section has it's own # 'controldir' option. Other user-specific options are: 'sessiondir', 'cachedir', # 'remotecachedir', 'cachesize', 'cachelifetime', 'norootpower', 'maxrerun', # 'maxtransferfiles' and 'defaultttl'. No substituions are made and user names # '.' and '*' are not handled specially. # ###### SubstitutingConfigParser # # Synopsis: # # use ConfigParser; # # my $parser = SubstitutingConfigParser->new("/etc/arc.conf") # or die 'Cannot parse config file'; # # This class is just like ConfigParser, but substitutions are made and sections # for user names like @filename are expanded into separate sections for each # individual user. # sub new($$) { my ($this,$arcconf) = @_; my $class = ref($this) || $this; open(my $fh, "< $arcconf") || return undef; my $self = { config => {} }; bless $self, $class; $self->{config} = _parse($fh); close($fh); return $self; } # Expects the filename of the arc.conf file. # Returns false if it cannot open the file. sub _parse($) { my ($fh) = @_; my $config = {}; # current section my $section = Section->new('common'); while (my $line =<$fh>) { # skip comments and empty lines next if $line =~/^\s*;/; next if $line =~/^\s*#/; next if $line =~/^\s*$/; # new section starts here if ($line =~ /^\s*\[([\w\-\.\/]+)\]\s*$/) { my $sname = $1; $section->register($config); if ($sname =~ m/^vo/) { $section = SelfNamingSection->new($sname,'id'); } elsif ($sname =~ m/^group/) { $section = SelfNamingSection->new($sname,'name'); } elsif ($sname =~ m/^queue/) { $section = SelfNamingSection->new($sname,'name'); } elsif ($sname eq 'grid-manager') { $section = GMSection->new($sname); } else { $section = Section->new($sname); } # single or double quotes can be used. Quotes are removed from the values } elsif ($line =~ /^(\w+)\s*=\s*(["']?)(.*)(\2)\s*$/) { my ($opt,$val) = ($1,$3); $section->add($opt,$val); # bad line, ignore it for now } else { } } $section->register($config); delete $config->{common} unless %{$config->{common}}; return $config; } # Returns a hash with all options defined in a section. If the section does not # exist, it returns an empty hash sub get_section($$) { my ($self,$sname) = @_; return $self->{config}{$sname} ? %{$self->{config}{$sname}} : (); } # Returns the list of all sections sub list_sections($) { my ($self) = @_; return keys %{$self->{config}}; } sub has_section($$) { my ($self,$sname) = @_; return defined $self->{config}{$sname}; } # list all subsections of a section, but not the section section itself sub list_subsections($$) { my ($self,$sname) = @_; my %ssnames = (); for (keys %{$self->{config}}) { $ssnames{$1}='' if m|^$sname/(.+)|; } return keys %ssnames; } 1; ######################################################## package SubstitutingConfigParser; use base "ConfigParser"; sub new($$$) { my ($this,$arcconf,$arc_location) = @_; my $self = $this->SUPER::new($arcconf); return undef unless $self; _substitute($self, $arc_location); return $self; } sub _substitute { my ($self, $arc_location) = @_; my $config = $self->{config}; my $lrmsstring = $config->{'grid-manager'}{lrms} || $config->{common}{lrms}; my ($lrms, $defqueue) = split " ", $lrmsstring || ''; die 'Gridmap user list feature is not supported anymore. Please use @filename to specify user list.' if $config->{'grid-manager/*'}; # expand user sections whose user name is like @filename my @users = $self->list_subsections('grid-manager'); for my $user (@users) { my $section = "grid-manager/$user"; next unless $user =~ m/^\@(.*)$/; my $path = $1; my $fh; # read in user names from file if (open ($fh, "< $path")) { while (my $line = <$fh>) { chomp (my $newsection = "grid-manager/$line"); next if exists $config->{$newsection}; # Duplicate user!!!! $config->{$newsection} = { %{$config->{$section}} }; # shallow copy } close $fh; delete $config->{$section}; } else { die "Failed opening file to read user list from: $path: $!"; } } # substitute per-user options @users = $self->list_subsections('grid-manager'); for my $user (@users) { my @pw; my $home; if ($user ne '.') { @pw = getpwnam($user); die "getpwnam failed for user: $user: $!" unless @pw; $home = $pw[7]; } else { $home = "/tmp"; } my $opts = $config->{"grid-manager/$user"}; # Default for controldir, sessiondir if ($opts->{controldir} eq '*') { $opts->{controldir} = $pw[7]."/.jobstatus" if @pw; } if (not $opts->{sessiondir} or $opts->{sessiondir} eq '*') { $opts->{sessiondir} = "$home/.jobs"; } my $controldir = $opts->{controldir}; my @sessiondirs = split /\[separator\]/, $opts->{sessiondir}; my $substitute_opt = sub { my ($key) = @_; my $val = $opts->{$key}; return unless defined $val; # %R - session root $val =~ s/%R/$sessiondirs[0]/g if $val =~ m/%R/; # %C - control dir $val =~ s/%C/$controldir/g if $val =~ m/%C/; if (@pw) { # %U - username $val =~ s/%U/$user/g if $val =~ m/%U/; # %u - userid # %g - groupid # %H - home dir $val =~ s/%u/$pw[2]/g if $val =~ m/%u/; $val =~ s/%g/$pw[3]/g if $val =~ m/%g/; $val =~ s/%H/$home/g if $val =~ m/%H/; } # %L - default lrms # %Q - default queue $val =~ s/%L/$lrms/g if $val =~ m/%L/; $val =~ s/%Q/$defqueue/g if $val =~ m/%Q/; # %W - installation path $val =~ s/%W/$arc_location/g if $val =~ m/%W/; # %G - globus path my $G = $ENV{GLOBUS_LOCATION} || '/usr'; $val =~ s/%G/$G/g if $val =~ m/%G/; $opts->{$key} = $val; }; &$substitute_opt('controldir'); &$substitute_opt('sessiondir'); &$substitute_opt('cachedir'); &$substitute_opt('remotecachedir'); } # authplugin, localcred, helper: not substituted } 1; ######################################################## package Section; sub new($$) { my ($this,$name) = @_; my $class = ref($this) || $this; my $self = { name => $name, data => {} }; bless $self, $class; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; my $data = $self->{data}; my $old = $data->{$opt}; $data->{$opt} = $old ? $old."[separator]".$val : $val; } sub register($$) { my ($self,$config) = @_; my $name = $self->{name}; my $orig = $config->{$name} || {}; my $new = $self->{data}; $config->{$name} = { %$orig, %$new }; } 1; ######################################################## package SelfNamingSection; use base "Section"; sub new($$$) { my ($this,$name,$nameopt) = @_; my $self = $this->SUPER::new($name); $self->{nameopt} = $nameopt; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; if ($opt eq $self->{nameopt}) { $self->{name} =~ s|(/[^/]+)?$|/$val|; } else { $self->SUPER::add($opt,$val); } } 1; ######################################################## package GMSection; use base "Section"; sub new($) { my ($this) = @_; my $self = $this->SUPER::new('grid-manager'); # OBS sessiondir is not treated $self->{muopts} = [qw(sessiondir cachedir remotecachedir)]; $self->{suopts} = [qw(cachesize cachelifetime norootpower maxrerun maxtransferfiles defaultttl)]; $self->{thisuser} = {}; $self->{allusers} = {}; $self->{controldir} = undef; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; my $thisuser = $self->{thisuser}; if ($opt eq 'controldir') { $self->{controldir} = $val; } elsif ($opt eq 'control') { my ($dir, @usernames) = split /\s+/, $val; $thisuser->{controldir} = $dir; $self->{allusers}{$_} = $thisuser for @usernames; $thisuser = $self->{thisuser} = {%$thisuser}; # make copy delete $thisuser->{$_} for @{$self->{muopts}}; } elsif (grep {$opt eq $_} @{$self->{muopts}}) { my $old = $thisuser->{$opt}; $thisuser->{$opt} = $old ? $old."[separator]".$val : $val; } elsif (grep {$opt eq $_} @{$self->{suopts}}) { $thisuser->{$opt} = $val; } else { $self->SUPER::add($opt,$val); } } sub register($$) { my ($self,$config) = @_; my $dir = $self->{controldir}; if ($dir) { my $thisuser = $self->{thisuser}; $thisuser->{controldir} = $dir; $self->{allusers}{'.'} = $thisuser; } my $allusers = $self->{allusers}; $config->{"grid-manager/$_"} = $allusers->{$_} for keys %$allusers; $self->SUPER::register($config); } sub test { require Data::Dumper; import Data::Dumper qw(Dumper); my $parser = SubstitutingConfigParser->new('/tmp/arc.conf','/usr') or die; print Dumper($parser); print "@{[$parser->list_subsections('gridftpd')]}\n"; print "@{[$parser->list_subsections('group')]}\n"; } #test(); 1; nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-egiis.service0000644000000000000000000000012412233420743027437 xustar000000000000000027 mtime=1382949347.799513 27 atime=1513200575.756717 30 ctime=1513200663.574791207 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-egiis.service0000644000175000002070000000063412233420743027507 0ustar00mockbuildmock00000000000000[Unit] Description=NorduGrid/ARC Grid Information Index Server (EGIIS) Requires=nordugrid-arc-slapd.service After=nordugrid-arc-slapd.service [Service] Type=forking User=ldap ExecStart=/usr/sbin/arc-infoindex-server -c /etc/arc.conf -f /var/run/arc/infosys/giis-fifo ExecStop=/bin/sh -c '[ -p /var/run/arc/infosys/giis-fifo ] && echo STOP > /var/run/arc/infosys/giis-fifo' [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/create-bdii-config.in0000644000000000000000000000012713065020220026153 xustar000000000000000027 mtime=1490296976.424864 30 atime=1513200651.321641346 30 ctime=1513200663.563791072 nordugrid-arc-5.4.2/src/services/ldap-infosys/create-bdii-config.in0000644000175000002070000003030013065020220026211 0ustar00mockbuildmock00000000000000#!/bin/bash # Create bdii config for the NorduGrid/ARC information system ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then echo "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # Source the config parsing routines if [ -r "$ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" ]; then . $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh || exit $? else echo "Could not find $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" exit 1 fi ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration file ($ARC_CONFIG) not found" echo "If this file is in a non-standard place it can be set with the" echo " ARC_CONFIG environment variable" exit 1 fi # Read arc.conf config_parse_file $ARC_CONFIG || exit $? # Check for infosys-block if ! config_match_section infosys; then echo "Missing infosys configuration block" exit 1 fi config_hide_all config_import_section common # These options need to come from the infosys-block, not from common unset CONFIG_logfile unset CONFIG_user unset CONFIG_port config_import_section infosys bdii_user=$CONFIG_user if [ -z "$bdii_user" ]; then # Get ldap user from passwd bdii_user=`getent passwd ldap openldap | sed 's/:.*//;q'` if [ -z "$bdii_user" ]; then echo "Warning, could not find ldap or openldap user" echo "resorting to using the root user" bdii_user=root fi fi # These values may be set in arc.conf, otherwise use sensible defaults hostname_f=$(hostname -f) hostname=${CONFIG_hostname:-$hostname_f} providerlog=${CONFIG_providerlog:-/var/log/arc/infoprovider.log} bdii_location=${CONFIG_bdii_location:-/usr} bdii_update_cmd=${CONFIG_bdii_update_cmd:-${bdii_location}/sbin/bdii-update} if [ ! -e $bdii_update_cmd ]; then echo "Can not find bdii-update command at: $bdii_update_cmd." echo "Please set bdii_update_cmd in arc.conf" exit 1 fi infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/var/run/arc/infosys} mkdir -p ${infosys_ldap_run_dir} chown ${bdii_user}: ${infosys_ldap_run_dir} bdii_debug_level=${CONFIG_bdii_debug_level:-ERROR} bdii_tmp_dir=${CONFIG_bdii_tmp_dir:-/var/tmp/arc/bdii} if grep -q BDII_PID_FILE $bdii_update_cmd ; then bdii_var_dir=${CONFIG_bdii_var_dir:-/var/lib/arc/bdii} bdii_run_dir=${CONFIG_bdii_run_dir:-/var/run/arc/bdii} else bdii_var_dir=${CONFIG_bdii_var_dir:-/var/run/arc/bdii} bdii_run_dir=${bdii_var_dir} fi bdii_log_dir=${CONFIG_bdii_log_dir:-/var/log/arc/bdii} bdii_log_file="${bdii_log_dir}/bdii-update.log" bdii_slapd_conf=${infosys_ldap_run_dir}/bdii-slapd.conf bdii_default_ldif=${bdii_tmp_dir}/provider/arc-default.ldif.pl bdii_ldif_dir=${bdii_tmp_dir}/ldif bdii_provider_dir=${bdii_tmp_dir}/provider bdii_plugin_dir=${bdii_tmp_dir}/plugin bdii_port=${CONFIG_port:-2135} # Using uppercase characters in bdii_bind will break infosys. bdii_bind="o=grid" infosys_nordugrid=${CONFIG_infosys_nordugrid:-"enable"} infosys_glue12=${CONFIG_infosys_glue12:-"disable"} infosys_glue2_ldap=${CONFIG_infosys_glue2_ldap:-"disable"} # $provider_timeout refers to the time bdii waits for the provider output to complete. provider_timeout=${CONFIG_provider_timeout:-10800} # $infoproviders_timeout is a-rex's infoproviders timeout. infoproviders_timeout=${CONFIG_infoproviders_timeout:-10800} # gm_wakeupperiod is the time a-rex waits before running infoproviders again. gm_wakeupperiod=$(config_print_option grid-manager wakeupperiod) gm_wakeupperiod=${gm_wakeupperiod:-120} bdii_archive_size=${CONFIG_bdii_archive_size:-0} # The infoprovider does the waiting, no need for BDII to do it too. Use # some small timeout to protect the system in case there is a problem with # the provier # OBS: I think this might not be used by BDII5 anymore. bdii_breathe_time=${CONFIG_bdii_breathe_time:-10} # max_cycle is the time bdii will trust the content of any provider to be fresh enough max_cycle=$(( $provider_timeout + $infoproviders_timeout + $gm_wakeupperiod )) bdii_read_timeout=${CONFIG_bdii_read_timeout:-$max_cycle} bdii_delete_delay=${CONFIG_bdii_delete_delay:-0} update_pid_file=${CONFIG_bdii_update_pid_file:-$bdii_run_dir/bdii-update.pid} # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then update_lock_file=${update_lock_file:-/var/lock/subsys/arc-bdii-update} else update_lock_file=${update_lock_file:-/var/lock/arc-bdii-update} fi # Check directories and permissions mkdir -p `dirname $providerlog` touch ${providerlog} chown ${bdii_user}: ${providerlog} mkdir -p $bdii_log_dir chown -R ${bdii_user}: ${bdii_log_dir} # If the new code path is selected... if [ "x$infosys_nordugrid" = "xenable" ] || \ [ "x$infosys_glue12" = "xenable" ] || \ [ "x$infosys_glue2_ldap" = "xenable" ]; then if [ ! -f "$ARC_LOCATION/@pkgdatasubdir@/InfosysHelper.pm" ]; then echo "InfosysHelper.pm not found. Is A-REX installed?" echo "For operation without A-REX, disable publishing of cluster information" echo "(infosys_nordugrid, infosys_glue12 and infosys_glue2_ldap)" exit 1 fi fi BDII_CONF=${CONFIG_bdii_conf:-${infosys_ldap_run_dir}/bdii.conf} resource_location="" resource_latitude="" resource_longitude="" cpuscalingreferencesi00="" processorotherdescription="" gluesiteweb="" gluesiteuniqueid="" provide_glue_site_info="true" if [ "x$infosys_glue12" = "xenable" ]; then if ! config_match_section infosys/glue12 ; then echo "infosys_glue12 is set to enable, but infosys/glue12 block is missing" exit 1 fi config_import_section infosys/glue12 resource_location=${CONFIG_resource_location} resource_latitude=${CONFIG_resource_latitude} resource_longitude=${CONFIG_resource_longitude} cpuscalingreferencesi00=${CONFIG_cpu_scaling_reference_si00} processorotherdescription=${CONFIG_processor_other_description} gluesiteweb=${CONFIG_glue_site_web} gluesiteuniqueid=${CONFIG_glue_site_unique_id} provide_glue_site_info=${CONFIG_provide_glue_site_info:-$provide_glue_site_info} if [ "x$resource_location" = "x" ]; then echo "If infosys_glue12 is enabled, then resource_location must be set." echo "It should be set to a free-form string describing the location," echo "for example: 'Kastrup, Denmark'" exit 1 fi if [[ "x$resource_location" =~ "/" ]]; then echo "WARNING: wrong location format. Please do NOT use slashes / ." echo "It should be set to a free-form string describing the location," echo "for example: 'Kastrup, Denmark'" exit 1 fi if [ "x$resource_latitude" = "x" ]; then echo "If infosys_glue12 is enabled, then resource_latitude must be set." echo "It should be set to the latitude for the location," echo "for example: '55.75000'" exit 1 fi if [ "x$resource_longitude" = "x" ]; then echo "If infosys_glue12 is enabled, then resource_longitude must be set." echo "It should be set to the longitude for the location," echo "for example: '12.41670'" exit 1 fi if [ "x$cpuscalingreferencesi00" = "x" ]; then echo "If infosys_glue12 is enabled, then cpu_scaling_reference_si00 must be set." echo "It should be set to the SI00 value," echo "for example: '2400'" exit 1 fi if [ "x$processorotherdescription" = "x" ]; then echo "If infosys_glue12 is enabled, then processor_other_description must be set." echo "It should be set to a value like in the example,where cores is the average number" echo "of cores in the machine" echo "for example: 'Cores=3,Benchmark=9.8-HEP-SPEC06'" exit 1 fi if [ "x$gluesiteweb" = "x" ]; then echo "If infosys_glue12 is enabled, then glue_site_web must be set." echo "It should be set to a url for the website belonging to the institution holding the resource," echo "for example: 'http://www.ndgf.org'" exit 1 fi if [ "x$gluesiteuniqueid" = "x" ]; then echo "If infosys_glue12 is enabled, then glue_site_unique_id must be set." echo "It should be set to a unique id to the resource, this should be entered into the GocDb" echo "for example: 'NDGF-T1'" exit 1 fi fi # Create directories for storing temporary scripts and check permissions etc mkdir -p $bdii_var_dir mkdir -p $bdii_run_dir mkdir -p $bdii_tmp_dir mkdir -p $bdii_tmp_dir/ldif mkdir -p $bdii_tmp_dir/provider mkdir -p $bdii_tmp_dir/plugin # change permissions if user is not root chown -R ${bdii_user}: ${bdii_var_dir} chown -R ${bdii_user}: ${bdii_run_dir} chown -R ${bdii_user}: ${bdii_tmp_dir} # Generate bdii configuration rm -f ${BDII_CONF} cat <<-EOF >> ${BDII_CONF} # This file was automatically generated by $0 # Do not modify BDII_LOG_FILE=$bdii_log_file BDII_PID_FILE=$update_pid_file BDII_LOG_LEVEL=$bdii_debug_level BDII_LDIF_DIR=$bdii_ldif_dir BDII_PROVIDER_DIR=$bdii_provider_dir BDII_PLUGIN_DIR=$bdii_plugin_dir BDII_PORT=$bdii_port BDII_BREATHE_TIME=$bdii_breathe_time BDII_READ_TIMEOUT=$bdii_read_timeout BDII_ARCHIVE_SIZE=$bdii_archive_size BDII_DELETE_DELAY=$bdii_delete_delay BDII_USER=$bdii_user BDII_VAR_DIR=$bdii_var_dir BDII_RUN_DIR=$bdii_run_dir BDII_BIND=$bdii_bind SLAPD_CONF=$bdii_slapd_conf EOF # Generate default ldif cat <<-EOF > $bdii_default_ldif #!/usr/bin/perl # This file was automatically generated by $0 # Do not modify use POSIX; print "\n"; print "dn: o=grid\n"; print "objectClass: organization\n"; print "o: grid\n"; print "\n"; print "dn: Mds-Vo-name=local,o=grid\n"; print "objectClass: Mds\n"; print "Mds-Vo-name: local\n"; print "Mds-validfrom: " . strftime("%Y%m%d%H%M%SZ\n", gmtime()); print "Mds-validto: " . strftime("%Y%m%d%H%M%SZ\n", gmtime(time() + 3600)); print "\n"; print "dn: Mds-Vo-name=resource,o=grid\n"; print "objectClass: Mds\n"; print "Mds-Vo-name: resource\n"; print "Mds-validfrom: " . strftime("%Y%m%d%H%M%SZ\n", gmtime()); print "Mds-validto: " . strftime("%Y%m%d%H%M%SZ\n", gmtime(time() + 3600)); print "\n"; print "dn: o=glue\n"; print "objectClass: organization\n"; print "o: glue\n"; EOF chmod +x $bdii_default_ldif # Create ARC ldif generator file ldif_generator_file=${bdii_tmp_dir}/provider/arc-nordugrid-bdii-ldif rm -f ${ldif_generator_file} touch ${ldif_generator_file} ldif_glue12_generator=${infosys_ldap_run_dir}/arc-glue-bdii-ldif ldif_script=${infosys_ldap_run_dir}/ldif-provider.sh cat <<-EOF > ${ldif_generator_file} #!/usr/bin/perl # This file was automatically generated by the $0 # Do not modify EOF # NG and GLUE2 come directly from a-rex infoprovider cat <<-EOF >> ${ldif_generator_file} BEGIN { unshift @INC, '$ARC_LOCATION/@pkgdatasubdir@'; } use InfosysHelper; exit 1 unless InfosysHelper::ldifIsReady('$infosys_ldap_run_dir', '$max_cycle'); EOF if [ "x$infosys_nordugrid" = "xenable" ] || \ [ "x$infosys_glue2_ldap" = "xenable" ]; then echo "system('$ldif_script');" >> ${ldif_generator_file} fi if [ "x$infosys_glue12" = "xenable" ]; then ldif_generator_file_ng=${bdii_tmp_dir}/provider/arc-nordugrid-bdii-ldif ldif_generator_file_glue=${bdii_tmp_dir}/provider/arc-glue-bdii-ldif rm -f ${ldif_generator_file_glue} touch ${ldif_generator_file_glue} # We use , instead of / here to allow for / in path # resource_location though, can contain commas.. sed "s,\$LDIF_GENERATOR_FILE_NG,$ldif_generator_file_ng,g; s/\$LOC/\"$resource_location\"/g; s/\$LAT/$resource_latitude/g; s/\$LONG/$resource_longitude/g; s/\$CPUSCALINGREFERENCESI00/$cpuscalingreferencesi00/g; s/\$PROCESSOROTHERDESCRIPTION/$processorotherdescription/g; s,\$GLUESITEWEB,$gluesiteweb,g; s,\$BDIIPORT,$bdii_port,g; s,\$GLUESITEUNIQUEID,$gluesiteuniqueid,g; s,\$PROVIDE_GLUE_SITE_INFO,$provide_glue_site_info,g; " $ARC_LOCATION/@pkgdatasubdir@/glue-generator.pl > ${ldif_generator_file_glue} chmod +x ${ldif_generator_file_glue} echo "system('$ldif_glue12_generator');" >> ${ldif_generator_file} fi chmod +x ${ldif_generator_file} # Site BDII for site_bdii in `config_subsections infosys/site`; do ( config_import_section infosys/site/$site_bdii unique_id=${CONFIG_unique_id:-$site_bdii} site_config="${bdii_tmp_dir}/${site_bdii}.conf" site_provider="$bdii_provider_dir/site_${site_bdii}.sh" url=${CONFIG_url} echo "$unique_id $url" > "$site_config" # Create script and make glite provider use arc directories cat <<-EOF > $site_provider #!/bin/sh export GLITE_LOCATION_VAR=${bdii_tmp_dir} $ARC_LOCATION/@pkgdatasubdir@/glite-info-provider-ldap -m "$site_bdii" -c $site_config EOF chmod +x $site_provider ) done nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-slapd.in0000644000000000000000000000012712457660467026434 xustar000000000000000027 mtime=1421828407.015915 30 atime=1513200651.434642728 30 ctime=1513200663.570791158 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-slapd.in0000644000175000002070000001430312457660467026477 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the NorduGrid/ARC LDAP based Information System # # chkconfig: 2345 75 25 # description: NorduGrid/ARC Information system # # config: /etc/sysconfig/nordugrid # config: /etc/sysconfig/nordugrid-arc-slapd # config: /etc/arc.conf # ###################################################################### ### BEGIN INIT INFO # Provides: nordugrid-arc-slapd # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: NorduGrid/ARC Information system # Description: NorduGrid/ARC LDAP based Information System ### END INIT INFO # Helper functions if [ -r /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -r /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi RETVAL=0 prog=nordugrid-arc-slapd RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/$prog ]; then . /etc/sysconfig/$prog elif [ -r /etc/default/$prog ]; then . /etc/default/$prog fi if [ "x$RUN" != "xyes" ]; then log_warning_msg "$prog disabled, please adjust the configuration to your" log_warning_msg "needs and then set RUN to 'yes' in /etc/default/$prog to enable it." exit 0 fi [ -n "$ARC_LOCATION" ] && export ARC_LOCATION [ -n "$ARC_CONFIG" ] && export ARC_CONFIG ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # Source the config parsing routines if [ -r "$ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" ]; then . $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh || exit $? else log_failure_msg "Could not find $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" exit 1 fi ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration file ($ARC_CONFIG) not found" log_failure_msg "If this file is in a non-standard place it can be set" log_failure_msg " with the ARC_CONFIG environment variable" exit 1 fi # Read arc.conf config_parse_file $ARC_CONFIG || exit $? # Check for infosys-block if ! config_match_section infosys; then log_failure_msg "Missing infosys configuration block" exit 1 fi config_hide_all config_import_section common config_import_section infosys bdii_location=${CONFIG_bdii_location:-/usr} bdii_update_cmd=${CONFIG_bdii_update_cmd:-${bdii_location}/sbin/bdii-update} if [ ! -x $bdii_update_cmd ] || grep -q BDII_PID_FILE $bdii_update_cmd; then bdii_var_dir=${CONFIG_bdii_var_dir:-/var/lib/arc/bdii} bdii_run_dir=${CONFIG_bdii_run_dir:-/var/run/arc/bdii} else bdii_var_dir=${CONFIG_bdii_var_dir:-/var/run/arc/bdii} bdii_run_dir=${bdii_var_dir} fi slapd_pid_file=${bdii_run_dir}/db/slapd.pid infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/var/run/arc/infosys} if [ `id -u` = 0 ]; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then slapd_lock_file=${slapd_lock_file:-/var/lock/subsys/$prog} else slapd_lock_file=${slapd_lock_file:-/var/lock/$prog} fi else slapd_lock_file=$HOME/$prog fi start () { if [ -r "${slapd_lock_file}" ]; then log_success_msg "$prog already started" RETVAL=0 return ${RETVAL} fi ${ARC_LOCATION}/@pkgdatasubdir@/create-slapd-config if [ ! $? = 0 ]; then log_failure_msg "Failed to create configuration for $prog" exit 1 fi /bin/sh ${infosys_ldap_run_dir}/bdii-slapd.cmd touch ${slapd_lock_file} iterlimit=30 while [ $iterlimit -ge 0 ] && ! [ -r ${slapd_pid_file} ]; do sleep 1 iterlimit=$(expr $iterlimit - 1) done if ! [ -r "${slapd_pid_file}" ]; then log_failure_msg "$prog failed to start" rm -f ${slapd_lock_file} RETVAL=1 return ${RETVAL} fi log_success_msg "$prog started" prefix=${ARC_LOCATION:-@prefix@} if [ -x @initddir@/nordugrid-arc-bdii ] ; then @initddir@/nordugrid-arc-bdii condrestart fi } stop () { if [ ! -r "${slapd_lock_file}" ]; then log_success_msg "$prog already stopped" RETVAL=0 return ${RETVAL} fi if [ -r "${slapd_pid_file}" ]; then slapd_pid=$(cat ${slapd_pid_file}) ps ${slapd_pid} >/dev/null 2>&1 if [ ! $? = 0 ]; then log_failure_msg "$prog pid file exists but the process died" RETVAL=1 return ${RETVAL} fi else log_failure_msg "$prog process has no pid file" RETVAL=1 return ${RETVAL} fi if [ -n "${slapd_pid}" ]; then kill -15 ${slapd_pid} 2>/dev/null ps ${slapd_pid} >/dev/null 2>&1 if [ $? = 0 ]; then sleep 2 ps ${slapd_pid} >/dev/null 2>&1 if [ $? = 0 ]; then kill -9 ${slapd_pid} 2>/dev/null sleep 2 ps ${slapd_pid} >/dev/null 2>&1 if [ $? = 0 ]; then RETVAL=1 fi fi fi fi if [ ${RETVAL} = 0 ]; then rm -f ${slapd_pid_file} rm -f ${slapd_lock_file} log_success_msg "$prog stopped" else log_failure_msg "Could not stop $prog with pid: $slapd_pid" fi return ${RETVAL} } status () { if [ ! -r "${slapd_lock_file}" ]; then log_success_msg "$prog is stopped" RETVAL=3 return ${RETVAL} fi if [ -r ${slapd_pid_file} ]; then ps $(cat ${slapd_pid_file}) >/dev/null 2>&1 if [ ! $? = 0 ]; then log_failure_msg "$prog pid file exists but the process died" RETVAL=1 return ${RETVAL} fi else log_failure_msg "$prog process has no pid file" RETVAL=2 return ${RETVAL} fi log_success_msg "$prog is running" RETVAL=0 return ${RETVAL} } case "$1" in start) start ;; stop) stop ;; restart | force-reload) stop # avoid race sleep 3 start ;; reload) ;; status) status ;; condrestart | try-restart) if [ -r ${slapd_lock_file} ]; then stop # avoid race sleep 3 start fi ;; *) echo "Usage: $0 {start|stop|restart|force-reload|reload|condrestart|try-restart|status}" exit 1 ;; esac exit $RETVAL nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-slapd.service0000644000000000000000000000012412233420743027442 xustar000000000000000027 mtime=1382949347.799513 27 atime=1513200575.756717 30 ctime=1513200663.576791231 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-slapd.service0000644000175000002070000000044212233420743027507 0ustar00mockbuildmock00000000000000[Unit] Description=NorduGrid/ARC Information system After=syslog.target network.target [Service] Type=forking PIDFile=/var/run/arc/bdii/db/slapd.pid ExecStartPre=/usr/share/arc/create-slapd-config ExecStart=/bin/sh /var/run/arc/infosys/bdii-slapd.cmd [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/nordugrid-arc-inforeg.in0000644000000000000000000000012712457660467026762 xustar000000000000000027 mtime=1421828407.015915 30 atime=1513200651.400642312 30 ctime=1513200663.568791133 nordugrid-arc-5.4.2/src/services/ldap-infosys/nordugrid-arc-inforeg.in0000644000175000002070000001424612457660467027033 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the NorduGrid/ARC Information Endpoint Registration # # chkconfig: 2345 75 25 # description: NorduGrid/ARC Information Endpoint Registration # # config: /etc/sysconfig/nordugrid # config: /etc/sysconfig/nordugrid-arc-infosys # config: /etc/arc.conf # ###################################################################### ### BEGIN INIT INFO # Provides: nordugrid-arc-inforeg # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: NorduGrid/ARC Information Endpoint Registration # Description: Init file for the NorduGrid/ARC Information Endpoint # Registration ### END INIT INFO # Helper functions if [ -r /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -r /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi prog=nordugrid-arc-inforeg RETVAL=0 RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/$prog ]; then . /etc/sysconfig/$prog elif [ -r /etc/default/$prog ]; then . /etc/default/$prog fi if [ "x$RUN" != "xyes" ]; then log_warning_msg "$prog disabled, please adjust the configuration to your" log_warning_msg "needs and then set RUN to 'yes' in /etc/default/$prog to enable it." exit 0 fi [ -n "$ARC_LOCATION" ] && export ARC_LOCATION [ -n "$ARC_CONFIG" ] && export ARC_CONFIG ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # Source the config parsing routines if [ -r "$ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" ]; then . $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh || exit $? else log_failure_msg "Could not find $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" exit 1 fi ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration file ($ARC_CONFIG) not found" log_failure_msg "If this file is in a non-standard place it can be set" log_failure_msg " with the ARC_CONFIG environment variable" exit 1 fi # Read arc.conf config_parse_file $ARC_CONFIG || exit $? # Check for infosys-block if ! config_match_section infosys; then log_failure_msg "Missing infosys configuration block" exit 1 fi config_hide_all config_import_section common config_import_section infosys if [ `id -u` = 0 ]; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then lockfile=/var/lock/subsys/$prog else lockfile=/var/lock/$prog fi else lockfile=$HOME/$prog fi registrationlog=${CONFIG_registrationlog:-/var/log/arc/inforegistration.log} infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/var/run/arc/infosys} registrationconf=${infosys_ldap_run_dir}/grid-info-resource-register.conf pid_file=${CONFIG_registration_pid_file:-/var/run/nordugrid-arc-inforeg.pid} start () { echo -n "Starting $prog: " # Check if we are already running if [ -f "$pid_file" ]; then read pid < "$pid_file" if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^sleep$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$pid_file" "$lockfile" fi rm -f $registrationconf $ARC_LOCATION/@pkgdatasubdir@/create-inforeg-config if grep -q ^regtype: $registrationconf ; then # We should sleep forever really... sleep 10000d >/dev/null 2>&1 & pid=$! echo $pid > $pid_file $ARC_LOCATION/@pkgdatasubdir@/grid-info-soft-register \ -log $registrationlog -f $registrationconf \ -p $pid >/dev/null 2>&1 & if [ $? -eq 0 ]; then touch $lockfile log_success_msg else log_failure_msg fi fi } stop () { echo -n "Stopping $prog: " RETVAL=0 if [ -f "$pid_file" ]; then read pid < "$pid_file" if [ -n "${pid}" ]; then kill -15 ${pid} 2>/dev/null ps ${pid} >/dev/null 2>&1 if [ $? -eq 0 ]; then sleep 2 ps ${pid} >/dev/null 2>&1 if [ $? -eq 0 ]; then kill -9 ${update_pid} 2>/dev/null sleep 2 ps ${pid} >/dev/null 2>&1 if [ $? -eq 0 ]; then RETVAL=1 fi fi fi fi if [ ${RETVAL} -eq 0 ]; then rm -f "$pid_file" "$lockfile" log_success_msg else log_failure_msg "Could not kill $prog" fi else log_success_msg "already stopped" fi return ${RETVAL} } status () { if [ -f "$pid_file" ]; then read pid < "$pid_file" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $lockfile ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } case "$1" in start) start ;; stop) stop ;; restart | force-reload) stop # avoid race sleep 3 start ;; reload) ;; status) status $prog ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; *) echo "Usage: $0 {start|stop|restart|force-reload|reload|condrestart|try-restart|status}" exit 1 ;; esac exit $? nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/giis0000644000000000000000000000013213214316027023073 xustar000000000000000030 mtime=1513200663.610791647 30 atime=1513200668.719854133 30 ctime=1513200663.610791647 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/0000755000175000002070000000000013214316027023216 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711624233044025210 xustar000000000000000027 mtime=1313945124.845808 30 atime=1513200604.686070975 30 ctime=1513200663.600791525 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Makefile.am0000644000175000002070000000117311624233044025254 0ustar00mockbuildmock00000000000000sbin_PROGRAMS = arc-infoindex-relay \ arc-infoindex-server pkglib_LTLIBRARIES = arc-infoindex-slapd-wrapper.la arc_infoindex_relay_SOURCES = Relay.c arc_infoindex_server_SOURCES = main.cpp \ Entry.cpp Index.cpp Policy.cpp Server.cpp \ Entry.h Index.h Policy.h Server.h arc_infoindex_server_LDADD = -lpthread arc_infoindex_slapd_wrapper_la_SOURCES = SlapdWrapper.cpp arc_infoindex_slapd_wrapper_la_LDFLAGS = -no-undefined -avoid-version -module arc_infoindex_slapd_wrapper_la_LIBADD = -ldl man8_MANS = arc-infoindex-relay.8 arc-infoindex-server.8 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Policy.cpp0000644000000000000000000000012411741502232025112 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.743716 30 ctime=1513200663.606791598 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Policy.cpp0000644000175000002070000000545011741502232025163 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "Policy.h" Policy::Policy(const std::string &pstr) : port(-1) { std::string::size_type colon = pstr.find(':'); std::string::size_type slash = pstr.find('/'); if(slash == std::string::npos) { if(colon == std::string::npos) { host = pstr; } else { host = pstr.substr(0, colon); port = atoi(pstr.substr(colon + 1).c_str()); } } else { if(colon == std::string::npos || colon > slash) { host = pstr.substr(0, slash); suffix = pstr.substr(slash + 1); } else { host = pstr.substr(0, colon); port = atoi(pstr.substr(colon + 1, slash - colon - 1).c_str()); suffix = pstr.substr(slash + 1); } } RegComp(); } Policy::Policy(const Policy& p) : host(p.host), port(p.port), suffix(p.suffix) { RegComp(); } Policy& Policy::operator=(const Policy& p){ if(!host.empty()) regfree(&host_rx); if(!suffix.empty()) regfree(&suffix_rx); host = p.host; port = p.port; suffix = p.suffix; RegComp(); return *this; } Policy::~Policy() { if(!host.empty()) regfree(&host_rx); if(!suffix.empty()) regfree(&suffix_rx); } bool Policy::Check(const std::string &h, int p, const std::string &d) const { return ((host.empty() || regexec(&host_rx, h.c_str(), 0, NULL, 0) == 0) && (port == -1 || port == p) && (suffix.empty() || regexec(&suffix_rx, d.c_str(), 0, NULL, 0) == 0)); } void Policy::RegComp() { if(!host.empty()) { std::string reg = '^' + host + '$'; std::string::size_type pos = 0; while((pos = reg.find('.', pos)) != std::string::npos) { reg.insert(pos, "\\"); pos += 2; } pos = 0; while ((pos = reg.find('*', pos)) != std::string::npos) { reg.insert(pos, "."); pos += 2; } pos = 0; while ((pos = reg.find('?', pos)) != std::string::npos) { reg.replace(pos, 1, "."); pos++; } regcomp(&host_rx, reg.c_str(), REG_ICASE); } if(!suffix.empty()) { std::string reg = '^' + suffix + '$'; std::string::size_type pos = 0; while((pos = reg.find('.', pos)) != std::string::npos) { reg.insert(pos, "\\"); pos += 2; } pos = 0; while ((pos = reg.find('*', pos)) != std::string::npos) { reg.insert(pos, "."); pos += 2; } pos = 0; while ((pos = reg.find('?', pos)) != std::string::npos) { reg.replace(pos, 1, "."); pos++; } pos = 0; while ((pos = reg.find(',', pos)) != std::string::npos) { std::string::size_type pos1 = reg.find_last_not_of(' ', pos - 1); std::string::size_type pos2 = reg.find_first_not_of(' ', pos + 1); reg.replace(pos1 + 1, pos2 - pos1 - 1, " *, *"); pos = pos1 + 6; } regcomp(&suffix_rx, reg.c_str(), REG_ICASE); } } nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Index.cpp0000644000000000000000000000012411741502232024722 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.741716 30 ctime=1513200663.605791586 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Index.cpp0000644000175000002070000000261311741502232024771 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "Index.h" #include Index::Index(const std::string& name) : name(name) { pthread_mutex_init(&lock, NULL); } Index::Index(const Index& ix) : name(ix.name), policies(ix.policies), entries(ix.entries) { pthread_mutex_init(&lock, NULL); } Index::~Index() { pthread_mutex_destroy(&lock); } const std::string& Index::Name() const { return name; } void Index::ListEntries(FILE *f) { pthread_mutex_lock(&lock); for(std::list::const_iterator e = entries.begin(); e != entries.end(); e++) fprintf(f, "%s", e->SearchEntry().c_str()); pthread_mutex_unlock(&lock); } bool Index::AddEntry(const Entry& entry) { bool accept = false; for(std::list::const_iterator p = policies.begin(); p != policies.end(); p++) if(p->Check(entry.Host(), entry.Port(), entry.Suffix())) { accept = true; break; } if(accept) { bool found = false; pthread_mutex_lock(&lock); for(std::list::iterator e = entries.begin(); e != entries.end(); e++) { if(e->Host() == entry.Host() && e->Port() == entry.Port() && e->Suffix() == entry.Suffix()) { *e = entry; found = true; break; } } if(!found) entries.push_back(entry); pthread_mutex_unlock(&lock); } return accept; } void Index::AllowReg(const std::string& areg) { policies.push_back(areg); } nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315734025220 xustar000000000000000029 mtime=1513200604.74207166 30 atime=1513200651.450642924 30 ctime=1513200663.600791525 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Makefile.in0000644000175000002070000007620413214315734025300 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ sbin_PROGRAMS = arc-infoindex-relay$(EXEEXT) \ arc-infoindex-server$(EXEEXT) subdir = src/services/ldap-infosys/giis DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arc-infoindex-relay.8.in \ $(srcdir)/arc-infoindex-server.8.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc-infoindex-relay.8 arc-infoindex-server.8 CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(sbindir)" \ "$(DESTDIR)$(man8dir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) arc_infoindex_slapd_wrapper_la_DEPENDENCIES = am_arc_infoindex_slapd_wrapper_la_OBJECTS = SlapdWrapper.lo arc_infoindex_slapd_wrapper_la_OBJECTS = \ $(am_arc_infoindex_slapd_wrapper_la_OBJECTS) arc_infoindex_slapd_wrapper_la_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(AM_CXXFLAGS) $(CXXFLAGS) \ $(arc_infoindex_slapd_wrapper_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(sbin_PROGRAMS) am_arc_infoindex_relay_OBJECTS = Relay.$(OBJEXT) arc_infoindex_relay_OBJECTS = $(am_arc_infoindex_relay_OBJECTS) arc_infoindex_relay_LDADD = $(LDADD) am_arc_infoindex_server_OBJECTS = main.$(OBJEXT) Entry.$(OBJEXT) \ Index.$(OBJEXT) Policy.$(OBJEXT) Server.$(OBJEXT) arc_infoindex_server_OBJECTS = $(am_arc_infoindex_server_OBJECTS) arc_infoindex_server_DEPENDENCIES = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(arc_infoindex_slapd_wrapper_la_SOURCES) \ $(arc_infoindex_relay_SOURCES) $(arc_infoindex_server_SOURCES) DIST_SOURCES = $(arc_infoindex_slapd_wrapper_la_SOURCES) \ $(arc_infoindex_relay_SOURCES) $(arc_infoindex_server_SOURCES) man8dir = $(mandir)/man8 NROFF = nroff MANS = $(man8_MANS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = arc-infoindex-slapd-wrapper.la arc_infoindex_relay_SOURCES = Relay.c arc_infoindex_server_SOURCES = main.cpp \ Entry.cpp Index.cpp Policy.cpp Server.cpp \ Entry.h Index.h Policy.h Server.h arc_infoindex_server_LDADD = -lpthread arc_infoindex_slapd_wrapper_la_SOURCES = SlapdWrapper.cpp arc_infoindex_slapd_wrapper_la_LDFLAGS = -no-undefined -avoid-version -module arc_infoindex_slapd_wrapper_la_LIBADD = -ldl man8_MANS = arc-infoindex-relay.8 arc-infoindex-server.8 all: all-am .SUFFIXES: .SUFFIXES: .c .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ldap-infosys/giis/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ldap-infosys/giis/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc-infoindex-relay.8: $(top_builddir)/config.status $(srcdir)/arc-infoindex-relay.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-infoindex-server.8: $(top_builddir)/config.status $(srcdir)/arc-infoindex-server.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done arc-infoindex-slapd-wrapper.la: $(arc_infoindex_slapd_wrapper_la_OBJECTS) $(arc_infoindex_slapd_wrapper_la_DEPENDENCIES) $(arc_infoindex_slapd_wrapper_la_LINK) -rpath $(pkglibdir) $(arc_infoindex_slapd_wrapper_la_OBJECTS) $(arc_infoindex_slapd_wrapper_la_LIBADD) $(LIBS) install-sbinPROGRAMS: $(sbin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)" @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ } \ ; done uninstall-sbinPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(sbindir)" && rm -f $$files clean-sbinPROGRAMS: @list='$(sbin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arc-infoindex-relay$(EXEEXT): $(arc_infoindex_relay_OBJECTS) $(arc_infoindex_relay_DEPENDENCIES) @rm -f arc-infoindex-relay$(EXEEXT) $(LINK) $(arc_infoindex_relay_OBJECTS) $(arc_infoindex_relay_LDADD) $(LIBS) arc-infoindex-server$(EXEEXT): $(arc_infoindex_server_OBJECTS) $(arc_infoindex_server_DEPENDENCIES) @rm -f arc-infoindex-server$(EXEEXT) $(CXXLINK) $(arc_infoindex_server_OBJECTS) $(arc_infoindex_server_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Entry.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Index.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Policy.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Relay.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Server.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SlapdWrapper.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/main.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c $< .c.obj: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man8: $(man8_MANS) @$(NORMAL_INSTALL) test -z "$(man8dir)" || $(MKDIR_P) "$(DESTDIR)$(man8dir)" @list='$(man8_MANS)'; test -n "$(man8dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ done; } uninstall-man8: @$(NORMAL_UNINSTALL) @list='$(man8_MANS)'; test -n "$(man8dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man8dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man8dir)" && rm -f $$files; } ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(man8dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ clean-sbinPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-sbinPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man8 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-man uninstall-pkglibLTLIBRARIES \ uninstall-sbinPROGRAMS uninstall-man: uninstall-man8 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES clean-sbinPROGRAMS ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-man8 install-pdf \ install-pdf-am install-pkglibLTLIBRARIES install-ps \ install-ps-am install-sbinPROGRAMS install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-man uninstall-man8 \ uninstall-pkglibLTLIBRARIES uninstall-sbinPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Server.cpp0000644000000000000000000000012412334503270025123 xustar000000000000000027 mtime=1400014520.804983 27 atime=1513200575.741716 30 ctime=1513200663.607791611 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Server.cpp0000644000175000002070000001336212334503270025175 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "Server.h" #include "Index.h" static void Error(const std::string& file, int code, const std::string& info) { int fd = open(file.c_str(), O_WRONLY); if(fd != -1) { FILE *f = fdopen(fd, "w"); fprintf(f, "RESULT\n"); fprintf(f, "info:%s\n", info.c_str()); fprintf(f, "code:%d\n", code); fclose(f); } } struct process_arg_t { Server *server; std::string file; std::list query; }; void* process(void *a) { process_arg_t *arg = (process_arg_t *)a; if(arg->query.empty()) Error(arg->file, 53, "Empty query"); else if(*arg->query.begin() == "BIND") arg->server->Bind(arg->file); else if(*arg->query.begin() == "ADD") arg->server->Add(arg->file, arg->query); else if(*arg->query.begin() == "SEARCH") arg->server->Search(arg->file, arg->query); else Error(arg->file, 53, "Unimplemented query type: " + *arg->query.begin()); delete arg; return NULL; } Server::Server(const std::string& fifo, const std::string& conf) : fifo(fifo) { mkfifo(fifo.c_str(), S_IRUSR | S_IWUSR); std::ifstream cfg(conf.c_str()); std::string line; while(getline(cfg, line)) { if(line[0] == '[' && line[line.size() - 1] == ']') { while(!line.empty() && line.substr(1, 14) == "infosys/index/" && line.find('/', 15) == std::string::npos) { Index ix(line.substr(15, line.size() - 16)); while(getline(cfg, line)) { if(line[0] == '[' && line[line.size() - 1] == ']') break; if(line.substr(0, 9) == "allowreg=") { if(line[9] == '"' && line[line.size() - 1] == '"') ix.AllowReg(line.substr(10, line.size() - 11)); else ix.AllowReg(line.substr(9)); } } indices.push_back(ix); } } } } Server::~Server() { unlink(fifo.c_str()); } void Server::Start() { bool running = true; int sfd = open(fifo.c_str(), O_RDONLY | O_NONBLOCK); FILE *sf = fdopen(sfd, "r"); while(running) { fd_set fs; FD_ZERO(&fs); FD_SET(sfd, &fs); if(select(sfd + 1, &fs, NULL, NULL, NULL) > 0) { char buf[2048]; if (!fgets(buf, 2048, sf)) { fclose(sf); sfd = open(fifo.c_str(), O_RDONLY | O_NONBLOCK); sf = fdopen(sfd, "r"); continue; } std::string file(buf, strlen(buf) - 1); if(file == "STOP") running = false; else { process_arg_t *arg = new process_arg_t; arg->server = this; arg->file = file; std::string line; while(fgets(buf, 2048, sf)) { if(buf[0] == ' ') line.append(&buf[1], strlen(buf) - 2); else { if(!line.empty()) arg->query.push_back(line); line = std::string(buf, strlen(buf) - 1); } } fclose(sf); sfd = open(fifo.c_str(), O_RDONLY | O_NONBLOCK); sf = fdopen(sfd, "r"); if(!line.empty()) arg->query.push_back(line); pthread_t pid; pthread_create(&pid, NULL, process, arg); pthread_detach(pid); } } } fclose(sf); } void Server::Bind(const std::string& file) { Error(file, 0, "Binding"); } void Server::Add(const std::string& file, const std::list& query) { std::string name; for(std::list::const_iterator it = query.begin(); it != query.end(); it++) if(it->substr(0, 8) == "suffix: ") { std::string::size_type pos1 = it->find("Mds-Vo-name=", 8); if(pos1 != std::string::npos) { pos1 += 12; std::string::size_type pos2 = it->find(',', pos1); if(pos2 != std::string::npos) name = it->substr(pos1, pos2 - pos1); else name = it->substr(pos1); break; } } if(name.empty()) { Error(file, 53, "MDS VO name not found in LDAP suffix"); return; } std::list::iterator ix; for(ix = indices.begin(); ix != indices.end(); ix++) if(strcasecmp(ix->Name().c_str(), name.c_str()) == 0) break; if(ix == indices.end()) { Error(file, 53, "No such index (" + name + ")"); return; } Entry e(query); if(!e) { Error(file, 53, "Incomplete query"); return; } if(!ix->AddEntry(e)) { Error(file, 53, "Not authorized to add - check policies"); return; } Error(file, 0, "Registration succeded"); } void Server::Search(const std::string& file, const std::list& query) { std::string name; int scope = 0; std::string attrs; for(std::list::const_iterator it = query.begin(); it != query.end(); it++) if(it->substr(0, 8) == "suffix: ") { std::string::size_type pos1 = it->find("Mds-Vo-name=", 8); if(pos1 != std::string::npos) { pos1 += 12; std::string::size_type pos2 = it->find(',', pos1); if(pos2 != std::string::npos) name = it->substr(pos1, pos2 - pos1); else name = it->substr(pos1); } } else if(it->substr(0, 7) == "scope: ") scope = atoi(it->substr(7).c_str()); else if(it->substr(0, 7) == "attrs: ") attrs = it->substr(7); if(name.empty()) { Error(file, 53, "MDS VO name not found in LDAP suffix"); return; } if(scope != 0) { Error(file, 53, "Search scope not supported"); return; } // if(attrs != "giisregistrationstatus") { // Error(file, 53, "Unsupported query attribute (" + attrs + +")"); // return; // } std::list::iterator ix; for(ix = indices.begin(); ix != indices.end(); ix++) if(strcasecmp(ix->Name().c_str(), name.c_str()) == 0) break; if(ix == indices.end()) { Error(file, 53, "No such index (" + name + ")"); return; } int fd = open(file.c_str(), O_WRONLY); if(fd != -1) { FILE *f = fdopen(fd, "w"); ix->ListEntries(f); fprintf(f, "RESULT\n"); fprintf(f, "info:Successful query\n"); fprintf(f, "code:0\n"); fclose(f); } } nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/arc-infoindex-relay.8.in0000644000000000000000000000012612574532370027522 xustar000000000000000027 mtime=1441969400.372727 29 atime=1513200651.46664312 30 ctime=1513200663.601791537 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/arc-infoindex-relay.8.in0000644000175000002070000000345512574532370027574 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARC-INFOINDEX-RELAY 8 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arc-infoindex-relay \- ARC Information Index Relay Helper .SH DESCRIPTION The .B arc-infoindex-relay command is used internally by the ARC Information System to relay queries and responses between the slapd server and the arc-infoindex-server .SH SYNOPSIS .B arc-infoindex-relay .I server-fifo .SH ARGUMENTS .IP "\fBserver-fifo\fR" The FIFO on which the arc-infoindex-server accepts queries .LP .SH EXTENDED DESCRIPTION \fBarc-infoindex-relay\fR acts as a bridge between the slapd server and the arc-infoindex-server in the NorduGrid ARC Information system. For each index served by the arc-infoindex-server a shell backend database should be configured in the slapd configuration. Here is a sample configuration: .nf database shell suffix "Mds-Vo-name=Sweden,o=Grid" bind /usr/sbin/arc-infoindex-relay /var/run/arc/infosys/giis-fifo add /usr/sbin/arc-infoindex-relay /var/run/arc/infosys/giis-fifo search /usr/sbin/arc-infoindex-relay /var/run/arc/infosys/giis-fifo access to * by * write .fi When a query for the configured database is received by the slapd server, the server will call out to the arc-infoindex-relay. The arc-infoindex-relay will then open a unique return FIFO for this query and then forward the query and the name of the return FIFO to the arc-infoindex-server through the configured server FIFO. When the arc-infoindex-server has processed the query it will return the result to the arc-infoindex-relay using the return FIFO. The arc-infoindex-relay then closes the return FIFO and forwards the response to the slapd server. .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR Mattias Ellert .SH SEE ALSO .BR arc-infoindex-server (8) nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Entry.cpp0000644000000000000000000000012411741502232024754 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.738716 30 ctime=1513200663.605791586 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Entry.cpp0000644000175000002070000000610011741502232025016 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "Entry.h" #include #include #include Entry::Entry(const std::list& query) : port(-1), sizelimit(0), timelimit(30), cachetime(0) { for(std::list::const_iterator it = query.begin(); it != query.end(); it++) { if(it->substr(0, 4) == "dn: ") dn = it->substr(4); if(it->substr(0, 16) == "Mds-Service-hn: ") hn = it->substr(16); if(it->substr(0, 18) == "Mds-Service-port: ") port = atoi(it->substr(18).c_str()); if(it->substr(0, 25) == "Mds-Service-Ldap-suffix: ") suffix = it->substr(25); if(it->substr(0, 27) == "Mds-Service-Ldap-cachettl: ") cachetime = atoi(it->substr(27).c_str()); if(it->substr(0, 26) == "Mds-Service-Ldap-timeout: ") timelimit = atoi(it->substr(26).c_str()); if(it->substr(0, 28) == "Mds-Service-Ldap-sizelimit: ") sizelimit = atoi(it->substr(28).c_str()); if(it->substr(0, 15) == "Mds-validfrom: ") validfrom = it->substr(15); if(it->substr(0, 13) == "Mds-validto: ") validto = it->substr(13); if(it->substr(0, 12) == "Mds-keepto: ") keepto = it->substr(12); } std::string::size_type spos = suffix.find(','); std::string::size_type dpos = dn.find(','); if(spos != std::string::npos && dpos != std::string::npos) dn = suffix.substr(0, spos) + dn.substr(dpos); else dn.clear(); } Entry::~Entry() {} const std::string& Entry::Host() const { return hn; } int Entry::Port() const { return port; } const std::string& Entry::Suffix() const { return suffix; } std::string Entry::SearchEntry() const { std::ostringstream ss; ss << "dn: " << dn << std::endl; ss << "objectClass: Mds" << std::endl; ss << "objectClass: MdsVoOp" << std::endl; ss << "objectClass: MdsService" << std::endl; ss << "objectClass: MdsServiceLdap" << std::endl; ss << "Mds-Vo-Op-name: register" << std::endl; ss << "Mds-Service-type: ldap" << std::endl; ss << "Mds-Service-protocol: 0.1" << std::endl; ss << "Mds-Service-hn: " << hn << std::endl; ss << "Mds-Service-port: " << port << std::endl; ss << "Mds-Service-Ldap-suffix: " << suffix << std::endl; ss << "Mds-Service-Ldap-sizelimit: " << sizelimit << std::endl; ss << "Mds-Service-Ldap-timeout: " << timelimit << std::endl; ss << "Mds-Service-Ldap-cachettl: " << cachetime << std::endl; ss << "Mds-Bind-Method-servers: ANONYM-ONLY" << std::endl; time_t t1 = time(NULL); struct tm t2; char buf[16]; strftime (buf, 16, "%Y%m%d%H%M%SZ", gmtime_r(&t1, &t2)); if(keepto < buf) ss << "Mds-Reg-status: PURGED" << std::endl; else if(validto < buf) ss << "Mds-Reg-status: INVALID" << std::endl; else ss << "Mds-Reg-status: VALID" << std::endl; ss << "Mds-validfrom: " << validfrom << std::endl; ss << "Mds-validto: " << validto << std::endl; if (!keepto.empty()) ss << "Mds-keepto: " << keepto << std::endl; ss << std::endl; return ss.str(); } bool Entry::operator!() const { return (dn.empty() || hn.empty() || (port == -1) || suffix.empty()); } nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Relay.c0000644000000000000000000000012411623446175024403 xustar000000000000000027 mtime=1313754237.458826 27 atime=1513200575.743716 30 ctime=1513200663.603791562 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Relay.c0000644000175000002070000001004111623446175024444 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include #include #include #include #include #include static int Error(int code, const char *info) { printf("RESULT\n"); printf("info:%s\n", info); printf("code:%d\n", code); return code; } int main(int argv, char** argc) { struct stat st; int rfd, sfd, stdinfd; FILE *rf, *sf, *stdinf; fd_set fs; char buf[2048]; char replyfile[] = "/tmp/giis-fifo.XXXXXX"; char stdinfile[] = "/tmp/stdinfile.XXXXXX"; struct timeval timelimit = { 5, 0 }; const char* infile; long flags; int counter; signal(SIGPIPE, SIG_IGN); if(argv != 2) return Error(53, "Wrong number of arguments"); infile = argc[1]; if(stat(infile, &st) != 0) return Error(53, "Could not stat server FIFO"); if(!S_ISFIFO(st.st_mode)) return Error(53, "Server FIFO is not a FIFO"); stdinfd = mkstemp(stdinfile); if (stdinfd == -1) return Error(53, "Could not create temporary file for stdin"); stdinf = fdopen(stdinfd, "w+"); if (!stdinf) { close(stdinfd); unlink(stdinfile); return Error(53, "Could not open temporary file for stdin"); } flags = fcntl(fileno(stdin), F_GETFL); fcntl(fileno(stdin), F_SETFL, flags | O_NONBLOCK); counter = 0; do { if(fgets(buf, 2048, stdin)) { fputs(buf, stdinf); counter = 0; } else if(errno == EAGAIN) { usleep(1000); counter = counter + 1; if (counter >= 1000) { fclose(stdinf); unlink(stdinfile); return Error(53, "Timeout while reading stdin"); } continue; } else if(!feof(stdin)) { fclose(stdinf); unlink(stdinfile); return Error(53, "I/O Error while reading stdin"); } } while(!feof(stdin)); rewind(stdinf); if(*mktemp(replyfile) == '\0') { fclose(stdinf); unlink(stdinfile); return Error(53, "Could not create reply FIFO filename"); } if(mkfifo(replyfile, S_IRUSR | S_IWUSR) == -1) { fclose(stdinf); unlink(stdinfile); return Error(53, "Could not create reply FIFO"); } sfd = open(infile, O_WRONLY | O_NONBLOCK); if (sfd == -1) { fclose(stdinf); unlink(stdinfile); unlink(replyfile); return Error(53, "Could not open server FIFO"); } if(lockf(sfd, F_LOCK, 0) != 0) { fclose(stdinf); unlink(stdinfile); close(sfd); unlink(replyfile); return Error(53, "Could not lock server FIFO"); } sf = fdopen(sfd, "w"); if (!sf) { fclose(stdinf); unlink(stdinfile); if(lockf(sfd, F_ULOCK, 0) != 0) {} close(sfd); unlink(replyfile); return Error(53, "Could not open server FIFO"); } rfd = open(replyfile, O_RDONLY | O_NONBLOCK); if (rfd == -1) { fclose(stdinf); unlink(stdinfile); if(lockf(sfd, F_ULOCK, 0) != 0) {} fclose(sf); unlink(replyfile); return Error(53, "Could not open reply FIFO"); } rf = fdopen(rfd, "r"); if (!rf) { fclose(stdinf); unlink(stdinfile); if(lockf(sfd, F_ULOCK, 0) != 0) {} fclose(sf); close(rfd); unlink(replyfile); return Error(53, "Could not open reply FIFO"); } fprintf(sf, "%s\n", replyfile); while(fgets(buf, 2048, stdinf)) { fputs(buf, sf); if(strncasecmp(buf, "timelimit:", 10) == 0) { timelimit.tv_sec = atoi(&buf[10]); if(timelimit.tv_sec == 0 || timelimit.tv_sec > 20) timelimit.tv_sec = 20; } } fclose(stdinf); unlink(stdinfile); fflush(sf); if(lockf(sfd, F_ULOCK, 0) != 0) {} fclose(sf); FD_ZERO(&fs); FD_SET(rfd, &fs); if(select(rfd + 1, &fs, NULL, NULL, &timelimit) <= 0) { fclose(rf); unlink(replyfile); return Error(3, "Time out waiting for reply"); } do { if(fgets(buf, 2048, rf)) fputs(buf, stdout); else if(errno == EAGAIN) { usleep(1000); continue; } else if(!feof(rf)) { fclose(rf); unlink(replyfile); return Error(53, "I/O Error while waiting for reply"); } } while(!feof(rf)); fclose(rf); unlink(replyfile); return 0; } nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/SlapdWrapper.cpp0000644000000000000000000000012412334503270026261 xustar000000000000000027 mtime=1400014520.804983 27 atime=1513200575.739716 30 ctime=1513200663.603791562 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/SlapdWrapper.cpp0000644000175000002070000001424212334503270026331 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif /* Quite an ugly hack to return all attributes from slapd-shell even if a attribute name is given. Needed in order to be compatible with clients expecting the non-standard behaviour of the Globus GIIS backend. */ #include #include #include #include #include #include extern "C" { typedef void** (*backend_info_t)(const char*); /* These are simplified versions of some structs and unions in the openldap internal header ldap.h Most pointers to structs have been replaced with void pointers, since only the size of the pointers is important. */ typedef struct req_search_s { int rs_scope; int rs_deref; int rs_slimit; int rs_tlimit; void *rs_limit; /* struct slap_limits_set* */ int rs_attrsonly; void *rs_attrs; /* AttributeName* */ void *rs_filter; /* Filter* */ struct berval rs_filterstr; } req_search_s; #if LDAP_VENDOR_VERSION >= 20300 /* openldap 2.3 and later */ typedef union OpRequest { req_search_s oq_search; /* There are more fields in this union, but they are not relevant for this hack. */ } OpRequest; typedef struct Operation { void *o_hdr; /* Opheader* */ ber_tag_t o_tag; time_t o_time; int o_tincr; void *o_bd; /* BackendDB* */ struct berval o_req_dn; struct berval o_req_ndn; OpRequest o_request; /* There are more fields in this struct, but they are not relevant for this hack. */ } Operation; typedef struct SlapReply SlapReply; #else /* openldap 2.2 */ typedef struct slap_op { unsigned long o_opid; unsigned long o_connid; void *o_conn; /* struct slap_conn* */ void *o_bd; /* BackendDB* */ ber_int_t o_msgid; ber_int_t o_protocol; ber_tag_t o_tag; time_t o_time; struct berval o_req_dn; struct berval o_req_ndn; union o_req_u { req_search_s oq_search; /* There are more fields in this union, but they are not relevant for this hack. */ } o_request; /* There are more fields in this struct, but they are not relevant for this hack. */ } Operation; typedef struct slap_rep SlapReply; #endif typedef int (*BI_bi_func)(void**); typedef int (*BI_op_func)(Operation*, SlapReply*); static int shell_back_search_wrapper(Operation *op, SlapReply *rs) { static BI_op_func shell_back_search = NULL; if (!shell_back_search) { shell_back_search = (BI_op_func) dlsym(RTLD_DEFAULT, "shell_back_search"); if (!shell_back_search) { void *shell_handle = NULL; const char *arc_ldaplib_shell = getenv("ARC_LDAPLIB_SHELL"); if(!arc_ldaplib_shell) arc_ldaplib_shell = "/usr/lib/ldap/back_shell.so"; shell_handle = dlopen(arc_ldaplib_shell, RTLD_LAZY); if(!shell_handle) { std::cerr << "Error: Unable to dlopen " << arc_ldaplib_shell << std::endl; exit(1); } shell_back_search = (BI_op_func) dlsym(shell_handle, "shell_back_search"); } } if (!shell_back_search) { std::cerr << "Can not find shell_back_search" << std::endl; exit(1); } void* save = op->o_request.oq_search.rs_attrs; op->o_request.oq_search.rs_attrs = NULL; int res = shell_back_search(op, rs); op->o_request.oq_search.rs_attrs = save; return res; } static int shell_back_initialize_wrapper(void **bi) { static BI_bi_func shell_back_initialize = NULL; if (!shell_back_initialize) { shell_back_initialize = (BI_bi_func) dlsym(RTLD_DEFAULT, "shell_back_initialize"); } if (!shell_back_initialize) { std::cerr << "Can not find shell_back_initialize" << std::endl; exit(1); } int res = shell_back_initialize(bi); static BI_op_func shell_back_search = NULL; if (!shell_back_search) { shell_back_search = (BI_op_func) dlsym(RTLD_DEFAULT, "shell_back_search"); } if (!shell_back_search) { std::cerr << "Can not find shell_back_search" << std::endl; exit(1); } for (int i = 0; i < 100; i++) { if (bi[i] == (void*)shell_back_search) { bi[i] = (void*)&shell_back_search_wrapper; break; } } return res; } int init_module(int, char**) { backend_info_t backend_info = (backend_info_t) dlsym(RTLD_DEFAULT, "backend_info"); if (!backend_info) { std::cerr << "Can not find backend_info" << std::endl; exit(1); } void **bi = backend_info("shell"); if (bi) { BI_op_func shell_back_search = (BI_op_func) dlsym(RTLD_DEFAULT, "shell_back_search"); if (!shell_back_search) { void *shell_handle = NULL; const char *arc_ldaplib_shell = getenv("ARC_LDAPLIB_SHELL"); if(!arc_ldaplib_shell) arc_ldaplib_shell = "/usr/lib/ldap/back_shell.so"; shell_handle = dlopen(arc_ldaplib_shell, RTLD_LAZY); if(!shell_handle) { std::cerr << "Error: Unable to dlopen " << arc_ldaplib_shell << std::endl; exit(1); } shell_back_search = (BI_op_func) dlsym(shell_handle, "shell_back_search"); } if (!shell_back_search) { std::cerr << "Can not find shell_back_search" << std::endl; exit(1); } if (shell_back_search) { for (int i = 0; i < 100; i++) { if (bi[i] == (void*)shell_back_search) { bi[i] = (void*)&shell_back_search_wrapper; break; } } } } return 0; } } /* extern "C" */ class SlapdWrapper { public: SlapdWrapper(); ~SlapdWrapper() {} }; SlapdWrapper::SlapdWrapper() { BI_op_func shell_back_initialize = (BI_op_func) dlsym(RTLD_DEFAULT, "shell_back_initialize"); if (shell_back_initialize) { // Look for shell_back_initialize in backend info for static backends void** slap_binfo = (void**) dlsym(RTLD_DEFAULT, "slap_binfo"); if (slap_binfo) { for (int i = 0; i < 2000; i++) { if (slap_binfo[i] == (void*)shell_back_initialize) { slap_binfo[i] = (void*)&shell_back_initialize_wrapper; break; } } } } else { std::cerr << "The shell_back_initialize symbol does not exist in default scope." << std::endl; std::cerr << "Try adding the slapd wrapper as a module instead." << std::endl; } } SlapdWrapper slapdwrapper; nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/arc-infoindex-server.8.in0000644000000000000000000000012711641102672027705 xustar000000000000000027 mtime=1317307834.180105 30 atime=1513200651.480643291 30 ctime=1513200663.602791549 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/arc-infoindex-server.8.in0000644000175000002070000000361311641102672027752 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARC-INFOINDEX-SERVER 8 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arc-infoindex-server \- ARC Information Index Server .SH DESCRIPTION The .B arc-infoindex-server accepts registrations about local information systems and other arc-infoindex-servers creating an hierarcical information system for discovering Grid resources .SH SYNOPSIS .B arc-infoindex-relay -c .I conffile -f .I fifo .SH OPTIONS .IP "\fB-c\fR conffile" The configuration file, usually /etc/arc.conf .LP .IP "\fB-f\fR fifo" The FIFO on which the server will accept queries .LP .SH EXTENDED DESCRIPTION \fBarc-infoindex-server\fR accepts registrations with information about local information systems and other arc-infoindex-servers. It also responds to status queries returning the list of registered resources and their registration status (valid, invalid or purged). Queries are accepted on a FIFO and are normally forwarded from a slapd server through an arc-infoindex-relay process. The first line of a query should contain the name of a FIFO on which the response should be returned. The remaining part of the query should be a Globus MDS query in LDIF format. To stop arc-infoindex-server write the special query "STOP" to the server FIFO. .SH FILES .IP "\fB/etc/arc.conf\fR" Several resource indices can be kept in the same server. Each of them should have an [infosys/index] block in the configuration file. The set of resoures that are allowed to register to each index can be limited using allowreg keys. Here is an example configuration: .nf [infosys/index/Sweden] allowreg="*.uu.se:2135" allowreg="*.lu.se:2135" allowreg="*.liu.se:2135" allowreg="*.umu.se:2135" allowreg="*.chalmers.se:2135" allowreg="*.kth.se:2135" .fi .LP .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR Mattias Ellert .SH SEE ALSO .BR arc-infoindex-relay (8), .BR arc.conf (5) nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Entry.h0000644000000000000000000000012411455353323024430 xustar000000000000000027 mtime=1286985427.270721 27 atime=1513200575.740716 30 ctime=1513200663.608791623 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Entry.h0000644000175000002070000000101511455353323024472 0ustar00mockbuildmock00000000000000#ifndef ENTRY_H #define ENTRY_H #include #include class Entry { public: Entry(const std::list& query); ~Entry(); const std::string& Host() const; int Port() const; const std::string& Suffix() const; std::string SearchEntry() const; bool operator!() const; private: std::string dn; std::string hn; int port; std::string suffix; int sizelimit; int timelimit; int cachetime; std::string validfrom; std::string validto; std::string keepto; }; #endif // ENTRY_H nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Index.h0000644000000000000000000000012412334503270024371 xustar000000000000000027 mtime=1400014520.804983 27 atime=1513200575.742716 30 ctime=1513200663.609791635 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Index.h0000644000175000002070000000077312334503270024445 0ustar00mockbuildmock00000000000000#ifndef INDEX_H #define INDEX_H #include #include #include #include "Policy.h" #include "Entry.h" class Index { public: Index(const std::string& name); Index(const Index& ix); ~Index(); const std::string& Name() const; void ListEntries(FILE *f); bool AddEntry(const Entry& entry); void AllowReg(const std::string& areg); private: const std::string name; std::list policies; std::list entries; pthread_mutex_t lock; }; #endif // INDEX_H nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Policy.h0000644000000000000000000000012411413413444024561 xustar000000000000000027 mtime=1278088996.053921 27 atime=1513200575.738716 30 ctime=1513200663.609791635 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Policy.h0000644000175000002070000000067311413413444024634 0ustar00mockbuildmock00000000000000#ifndef POLICY_H #define POLICY_H #include #include class Policy { public: Policy(const std::string &pstr); Policy(const Policy& p); Policy& operator=(const Policy& p); ~Policy(); bool Check(const std::string &host, int port, const std::string &suffix) const; private: void RegComp(); std::string host; int port; std::string suffix; regex_t host_rx; regex_t suffix_rx; }; #endif // POLICY_H nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/Server.h0000644000000000000000000000012411413413444024570 xustar000000000000000027 mtime=1278088996.053921 27 atime=1513200575.742716 30 ctime=1513200663.610791647 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/Server.h0000644000175000002070000000073111413413444024636 0ustar00mockbuildmock00000000000000#ifndef SERVER_H #define SERVER_H #include #include class Index; class Server { public: Server(const std::string& fifo, const std::string& conf); ~Server(); void Start(); void Bind(const std::string& file); void Add(const std::string& file, const std::list& query); void Search(const std::string& file, const std::list& query); private: const std::string fifo; std::list indices; }; #endif // SERVER_H nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/PaxHeaders.7502/main.cpp0000644000000000000000000000012411741502232024577 xustar000000000000000027 mtime=1334215834.878809 27 atime=1513200575.740716 30 ctime=1513200663.604791574 nordugrid-arc-5.4.2/src/services/ldap-infosys/giis/main.cpp0000644000175000002070000000240711741502232024647 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "Server.h" #include #include #include #include #include #include #include int main(int argv, char **argc) { std::string conffile; std::string fifo; int c; char* name = strrchr(argc[0], '/'); name = name ? name + 1 : argc[0]; while((c = getopt(argv, argc, "c:f:")) != -1) { if(c == 'c') conffile = optarg; else if(c == 'f') fifo = optarg; else { std::cout << "Usage: " << name << " -c conffile -f fifo" << std::endl; return 1; } } if(optind < argv) { std::cout << "Usage: " << name << " -c conffile -f fifo" << std::endl; return 1; } if(conffile.empty()) { std::cout << "No configuration file" << std::endl; return 1; } if(fifo.empty()) { std::cout << "No FIFO filename" << std::endl; return 1; } Server s(fifo, conffile); int i = fork(); if(i < 0) exit(1); if(i > 0) exit(0); setsid(); if(freopen("/dev/null", "r", stdin) == NULL) fclose(stdin); if(freopen("/dev/null", "a", stdout) == NULL) fclose(stdout); if(freopen("/dev/null", "a", stderr) == NULL) fclose(stderr); signal(SIGPIPE, SIG_IGN); s.Start(); return 0; } nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/create-slapd-config.in0000644000000000000000000000012713024226423026357 xustar000000000000000027 mtime=1481714963.501999 30 atime=1513200651.351641713 30 ctime=1513200663.565791097 nordugrid-arc-5.4.2/src/services/ldap-infosys/create-slapd-config.in0000644000175000002070000003606013024226423026426 0ustar00mockbuildmock00000000000000#!/bin/bash # Create slapd config for the NorduGrid/ARC information system ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then echo "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # Source the config parsing routines if [ -r "$ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" ]; then . $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh || exit $? else echo "Could not find $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" exit 1 fi ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration file ($ARC_CONFIG) not found" echo "If this file is in a non-standard place it can be set with the" echo " ARC_CONFIG environment variable" exit 1 fi # Read arc.conf config_parse_file $ARC_CONFIG || exit $? # Check for infosys-block if ! config_match_section infosys; then echo "Missing infosys configuration block" exit 1 fi config_hide_all config_import_section common # These options need to come from the infosys-block, not from common unset CONFIG_logfile unset CONFIG_user unset CONFIG_port config_import_section infosys bdii_user=$CONFIG_user if [ -z "$bdii_user" ]; then # Get ldap user from passwd bdii_user=`getent passwd ldap openldap | sed 's/:.*//;q'` if [ -z "$bdii_user" ]; then echo "Warning, could not find ldap or openldap user" echo "resorting to using the root user" bdii_user=root fi fi # These values may be set in arc.conf, otherwise use sensible defaults slapd_loglevel=${CONFIG_slapd_loglevel:-0} slapd_hostnamebind=${CONFIG_slapd_hostnamebind:-"*"} slapd_port=${CONFIG_port:-2135} ldap_schema_dir=${CONFIG_ldap_schema_dir} threads=${CONFIG_threads:-32} timelimit=${CONFIG_timelimit:-2400} bdii_location=${CONFIG_bdii_location:-/usr} bdii_update_cmd=${CONFIG_bdii_update_cmd:-${bdii_location}/sbin/bdii-update} infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/var/run/arc/infosys} mkdir -p ${infosys_ldap_run_dir} chown ${bdii_user}: ${infosys_ldap_run_dir} giis_location=${CONFIG_giis_location:-$ARC_LOCATION} giis_fifo=${CONFIG_giis_fifo:-${infosys_ldap_run_dir}/giis-fifo} bdii_tmp_dir=${CONFIG_bdii_tmp_dir:-/var/tmp/arc/bdii} if [ ! -x $bdii_update_cmd ] || grep -q BDII_PID_FILE $bdii_update_cmd; then bdii_var_dir=${CONFIG_bdii_var_dir:-/var/lib/arc/bdii} bdii_run_dir=${CONFIG_bdii_run_dir:-/var/run/arc/bdii} else bdii_var_dir=${CONFIG_bdii_var_dir:-/var/run/arc/bdii} bdii_run_dir=${bdii_var_dir} fi bdii_db_config=${CONFIG_bdii_db_config:-"/etc/bdii/DB_CONFIG"} bdii_database=${CONFIG_bdii_database:-"hdb"} # Check for existance of core ldap schema coreschema=$(find /etc/openldap /etc/ldap ${ldap_schema_dir} -name core.schema \ -printf "%h/%f\n" 2>/dev/null) if [ "x" = "x$coreschema" ]; then echo "Could not find ldap core schema file" exit 1 fi # Check for existance of Glue schemas. glueschemadir=$(find /etc/openldap /etc/ldap ${ldap_schema_dir} -name Glue-CORE.schema \ -printf "%h\n" 2>/dev/null) if [ "x" = "x$glueschemadir" ]; then echo "Error, could not find glue schema directory under /etc" exit 1 fi # Check for existence of a system ldap, this command will be used by bdii slapd_cmd= slapd_env= if [ "x" = "x$CONFIG_slapd" ]; then O_IFS=$IFS IFS=: # slapd2.4 search added to overcome SL5 problems # can be removed once redhat5 is not supported anymore for dir in $PATH; do if [ -x "$dir/slapd2.4" ]; then slapd_cmd="$dir/slapd2.4" break fi done if [ -z "$slapd_cmd" ]; then for dir in $PATH; do if [ -x "$dir/slapd" ]; then slapd_cmd="$dir/slapd" break fi done fi IFS=$O_IFS else slapd_cmd=$CONFIG_slapd fi if [ -z "$slapd_cmd" ] || [ ! -x "$slapd_cmd" ]; then echo "Could not find ldap server binary, usually /usr/sbin/slapd" exit 1 fi find_ldap_database_module() { # First try to find a separate module # openldap2.4 search added to overcome SL5 problems # can be removed once redhat5 is not supported anymore if [[ "$slapd_cmd" =~ '2.4' ]]; then ldapdir=$(find /usr/lib64/openldap2.4 /usr/lib/openldap2.4 \ -name "back_${database}.la" -printf ":%h/" 2>/dev/null) else ldapdir=$(find /usr/lib64/openldap /usr/lib/openldap /usr/lib64/ldap \ /usr/lib/ldap -name "back_${database}.la" -printf ":%h/" 2>/dev/null) fi if [ -n "$ldapdir" ]; then # Separate module found ldapmodule="moduleload back_${database}" grep -E -q "(^|:)${ldapdir}(:|$)" <<< ${ldapdirs} || \ ldapdirs=${ldapdirs}${ldapdir} else # Separate module not found - check for preloaded module ldapmodule= if [ $(grep -Ec "${database}_db_init|${database}_back_db_init" "$slapd_cmd") -eq 0 ]; then # Module not found database= fi fi } find_ldap_overlay_module() { # Try to find a separate module # openldap2.4 search added to overcome SL5 problems # can be removed once redhat5 is not supported anymore if [[ "$slapd_cmd" =~ '2.4' ]]; then ldapdir=$(find /usr/lib64/openldap2.4 /usr/lib/openldap2.4 \ -name "${overlay}.la" -printf ":%h/" 2>/dev/null) else ldapdir=$(find /usr/lib64/openldap /usr/lib/openldap /usr/lib64/ldap \ /usr/lib/ldap -name "${overlay}.la" -printf ":%h/" 2>/dev/null) fi if [ -n "$ldapdir" ]; then # Separate module found ldapmodule="moduleload ${overlay}" grep -E -q "(^|:)${ldapdir}(:|$)" <<< ${ldapdirs} || \ ldapdirs=${ldapdirs}${ldapdir} else # Module not found ldapmodule= overlay= fi } ldapdirs= database=${bdii_database} find_ldap_database_module if [ -z "${database}" ]; then echo "Could not find ldap ${bdii_database} database module" exit 1 fi moduleload_bdii="${ldapmodule}" database=relay find_ldap_database_module if [ -z "${database}" ]; then echo "Could not find ldap relay database module, top-bdii integration is disabled." fi moduleload_relay="${ldapmodule}" overlay=rwm find_ldap_overlay_module if [ -z "$overlay" ]; then echo "Could not find ldap rwm overlay module, top-bdii integration is disabled." fi moduleload_rwm="${ldapmodule}" if [ "$(config_subsections infosys/index)" ]; then database=shell find_ldap_database_module if [ -z "${database}" ]; then echo "Could not find ldap shell database module" exit 1 fi moduleload_shell="${ldapmodule}" if [ -n "${ldapmodule}" ]; then ldapdir=`sed 's/^://;s,/:.*,,' <<< "$ldapdir"` dlname=`( . ${ldapdir}/back_shell.la ; echo ${dlname} )` slapd_env="ARC_LDAPLIB_SHELL=${ldapdir}/${dlname}" fi if [ -r ${giis_location}/lib64/arc/arc-infoindex-slapd-wrapper.so ]; then pkglibdir=${giis_location}/lib64/arc elif [ -r ${giis_location}/lib/arc/arc-infoindex-slapd-wrapper.so ]; then pkglibdir=${giis_location}/lib/arc else echo "Error, could not find infoindex slapd wrapper, please install the package nordugrid-arc-egiis to run an index server" exit 1 fi ldapdirs=${ldapdirs}:${pkglibdir} moduleload_index="moduleload arc-infoindex-slapd-wrapper" if [ -z "${slapd_env}" ] ; then slapd_env="LD_PRELOAD=${pkglibdir}/arc-infoindex-slapd-wrapper.so" fi else moduleload_shell= moduleload_index= fi ldapdirs=`sed 's/^://' <<< $ldapdirs` #ldapdirs=`sed 's/:$//' <<< $ldapdirs` if [ -n "$ldapdirs" ]; then modulepath="modulepath $ldapdirs" else modulepath= fi for i in "/etc/bdii/BDII.schema" "${bdii_location}/etc/BDII.schema"; do if [ -r $i ]; then bdii_schema="include $i" break fi done bdii_slapd_conf=${infosys_ldap_run_dir}/bdii-slapd.conf rm -f $bdii_slapd_conf bdii_slapd_cmd=${infosys_ldap_run_dir}/bdii-slapd.cmd rm -f $bdii_slapd_cmd # Ensure the configuration file is not world-readable, # as it contains the slapd database password (umask 077; > $bdii_slapd_conf) pass=`/usr/bin/mkpasswd -s 0 2> /dev/null` || pass=$RANDOM$RANDOM cat <<-EOF >> $bdii_slapd_conf # This file was automatically generated by $0." # Do not modify. include ${coreschema} ${bdii_schema} #glue schemas include ${glueschemadir}/Glue-CORE.schema include ${glueschemadir}/Glue-CE.schema include ${glueschemadir}/Glue-CESEBind.schema include ${glueschemadir}/Glue-MDS.schema #glue2 schema include ${glueschemadir}/GLUE20.schema #nordugrid specific schemas include ${ARC_LOCATION}/@pkgdatasubdir@/ldap-schema/nordugrid.schema $modulepath $moduleload_bdii $moduleload_relay $moduleload_rwm $moduleload_shell $moduleload_index allow bind_v2 pidfile $bdii_run_dir/db/slapd.pid argsfile $bdii_run_dir/db/slapd.args loglevel $slapd_loglevel threads $threads idletimeout 120 sizelimit unlimited timelimit $timelimit EOF for vo in `config_subsections infosys/index`; do ( CONFIG_name= config_import_section infosys/index/$vo indexname=${CONFIG_name:-$vo} cat <<-EOF >> $bdii_slapd_conf # Index Service: $vo database shell suffix "Mds-Vo-name=$indexname,o=grid" bind $giis_location/sbin/arc-infoindex-relay $giis_fifo add $giis_location/sbin/arc-infoindex-relay $giis_fifo search $giis_location/sbin/arc-infoindex-relay $giis_fifo access to * by * write EOF ) done if [ -n "${moduleload_rwm}" ]; then ( admindomain="UNDEFINEDVALUE" CONFIG_name= config_import_section infosys/admindomain admindomain="urn:ad:${CONFIG_name:-$admindomain}" cat <<-EOF >> $bdii_slapd_conf # Relay to allow top-bdii to parse info as the CE was a site-bdii database relay suffix "GLUE2GroupID=resource,o=glue" overlay rwm rwm-rewriteEngine on rwm-rewriteContext default rwm-rewriteRule "GLUE2GroupID=resource,o=glue" "GLUE2GroupID=services,o=glue" ":" rwm-rewriteContext searchFilter rwm-rewriteContext searchEntryDN rwm-rewriteContext searchAttrDN rwm-rewriteContext matchedDN database relay suffix "GLUE2GroupID=resource,GLUE2DomainID=$admindomain,o=glue" overlay rwm rwm-rewriteEngine on rwm-rewriteContext default rwm-rewriteRule "GLUE2GroupID=resource,GLUE2DomainID=$admindomain,o=glue" "GLUE2GroupID=services,o=glue" ":" rwm-rewriteContext searchFilter rwm-rewriteContext searchEntryDN rwm-rewriteRule "(.*[^ ],)?[ ]?GLUE2GroupID=services,o=glue" "\$1GLUE2GroupID=services,GLUE2DomainID=$admindomain,o=glue" ":" rwm-rewriteContext searchAttrDN rwm-rewriteContext matchedDN database relay suffix "GLUE2GroupID=services,GLUE2DomainID=$admindomain,o=glue" overlay rwm suffixmassage "GLUE2GroupID=services,o=glue" EOF ) fi cat <<-EOF >> $bdii_slapd_conf # ${bdii_database} database definitions for o=grid database ${bdii_database} cachesize 150000 dbnosync suffix "o=grid" checkpoint 131072 60 rootdn "o=grid" rootpw $pass directory $bdii_var_dir/db/arc # ${bdii_database} database definitions for o=glue database ${bdii_database} cachesize 150000 dbnosync suffix "o=glue" checkpoint 131072 60 rootdn "o=glue" rootpw $pass directory $bdii_var_dir/db/glue2 # ${bdii_database} database definitions for o=infosys database ${bdii_database} cachesize 60 dbnosync suffix "o=infosys" checkpoint 131072 60 rootdn "o=infosys" rootpw $pass directory $bdii_var_dir/db/stats EOF chown $bdii_user: $bdii_slapd_conf [ -x /sbin/restorecon ] && /sbin/restorecon $bdii_slapd_conf if [ "x$slapd_hostnamebind" = "x*" ]; then echo ${slapd_env} ${slapd_cmd} -f ${bdii_slapd_conf} -h \"ldap://${slapd_hostnamebind}:${slapd_port}\" -u ${bdii_user} > ${bdii_slapd_cmd} else echo ${slapd_env} ${slapd_cmd} -f ${bdii_slapd_conf} -h \"ldap://localhost:${slapd_port} ldap://${slapd_hostnamebind}:${slapd_port}\" -u ${bdii_user} > ${bdii_slapd_cmd} fi chmod +x ${bdii_slapd_cmd} # Initialize the database directories mkdir -p $bdii_run_dir/db mkdir -p $bdii_run_dir/archive chown $bdii_user: $bdii_run_dir chown $bdii_user: $bdii_run_dir/db chown $bdii_user: $bdii_run_dir/archive [ -x /sbin/restorecon ] && /sbin/restorecon -R $bdii_run_dir/db [ -x /sbin/restorecon ] && /sbin/restorecon -R $bdii_run_dir/archive mkdir -p $bdii_var_dir/archive mkdir -p $bdii_var_dir/db/arc mkdir -p $bdii_var_dir/db/glue2 mkdir -p $bdii_var_dir/db/stats rm -f $bdii_var_dir/db/arc/* 2>/dev/null rm -f $bdii_var_dir/db/glue2/* 2>/dev/null rm -f $bdii_var_dir/db/stats/* 2>/dev/null chown $bdii_user: $bdii_var_dir/db chown $bdii_user: $bdii_var_dir/archive chown $bdii_user: $bdii_var_dir/db/arc chown $bdii_user: $bdii_var_dir/db/glue2 chown $bdii_user: $bdii_var_dir/db/stats [ -x /sbin/restorecon ] && /sbin/restorecon -R $bdii_var_dir/db [ -x /sbin/restorecon ] && /sbin/restorecon -R $bdii_var_dir/archive # Workaround for BDII DB_CONFIG cachesize bigger than actual memory set_cachesize_line=`egrep '^[[:space:]]*'set_cachesize ${bdii_db_config}` if [ -n "${set_cachesize_line}" ]; then if [ -e /proc/meminfo ]; then memsize=$(grep MemFree /proc/meminfo | awk '{printf "%.0f", $2 * 1024}') default_set_cachesize=$(echo ${set_cachesize_line} | awk '{print $2 * 1073741824 + $3}') half_memsize=$(( ${memsize} / 2 )) if [ $default_set_cachesize -ge $half_memsize ]; then echo "The system does not fulfill BDII optimal memory requirements" echo "ARC will try to fix it anyway..." new_set_cachesize=$(( $memsize / 16 )) TEMPBDIIDBCONFIG=`mktemp -q /tmp/DB_CONFIG.XXXXXX` chmod 644 $TEMPBDIIDBCONFIG sed "s/^set_cachesize.*$/set_cachesize 0 $new_set_cachesize 1/" ${bdii_db_config} > $TEMPBDIIDBCONFIG bdii_db_config=${TEMPBDIIDBCONFIG} echo "DB_CONFIG set_cachesize is now: 0 $new_set_cachesize 1" fi else echo "/proc/meminfo does not exist. Cannot apply BDII memory workaround" echo "slapd might fail to start" fi fi # End of BDII set_cachesize workaround # copy BDII DB_CONFIG in ARC locations cp ${bdii_db_config} ${bdii_var_dir}/db/arc/DB_CONFIG cp ${bdii_db_config} ${bdii_var_dir}/db/glue2/DB_CONFIG cp ${bdii_db_config} ${bdii_var_dir}/db/stats/DB_CONFIG chown $bdii_user: ${bdii_var_dir}/db/arc/DB_CONFIG chown $bdii_user: ${bdii_var_dir}/db/glue2/DB_CONFIG chown $bdii_user: ${bdii_var_dir}/db/stats/DB_CONFIG # if the BDII low memory workaround has been applied, remove the temp file if [ -r $TEMPBDIIDBCONFIG ]; then rm -f $TEMPBDIIDBCONFIG fi # Create cron configuration for slapd checkpointing cron db_archive=${CONFIG_db_archive:-/usr/sbin/slapd_db_archive} db_checkpoint=${CONFIG_db_checkpoint:-/usr/sbin/slapd_db_checkpoint} slapd_cron_checkpoint=${CONFIG_slapd_cron_checkpoint:-disable} if [ "x${slapd_cron_checkpoint}" = "xenable" ]; then if [ -n $db_checkpoint ] && [ -x $db_checkpoint ] && \ [ -n $db_archive ] && [ -x $db_archive ] && \ [ -d "/etc/cron.d" ]; then cat <<-EOF > /etc/cron.d/nordugrid-arc-aris 0-59/5 * * * * $bdii_user $db_checkpoint -1 -h $bdii_var_dir/db/arc && $db_archive -a -h $bdii_var_dir/db/arc|xargs rm -f EOF else echo "You have enabled slapd cron checkpointing." echo "However, it could not be configured." echo "Please check db_checkpoint and db_archive in ${ARC_CONFIG}." exit 1 fi fi nordugrid-arc-5.4.2/src/services/ldap-infosys/PaxHeaders.7502/create-inforeg-config.in0000644000000000000000000000012713024226423026705 xustar000000000000000027 mtime=1481714963.501999 30 atime=1513200651.337641542 30 ctime=1513200663.564791084 nordugrid-arc-5.4.2/src/services/ldap-infosys/create-inforeg-config.in0000644000175000002070000001252413024226423026753 0ustar00mockbuildmock00000000000000#!/bin/bash # Create info registration config for the NorduGrid/ARC information system ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then echo "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # Source the config parsing routines if [ -r "$ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" ]; then . $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh || exit $? else echo "Could not find $ARC_LOCATION/@pkgdatasubdir@/config_parser_compat.sh" exit 1 fi ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} if [ ! -r "$ARC_CONFIG" ]; then echo "ARC configuration file ($ARC_CONFIG) not found" echo "If this file is in a non-standard place it can be set with the" echo " ARC_CONFIG environment variable" exit 1 fi # Read arc.conf config_parse_file $ARC_CONFIG || exit $? # Check for infosys-block if ! config_match_section infosys; then echo "Missing infosys configuration block" exit 1 fi config_hide_all config_import_section common config_import_section cluster # These options need to come from the infosys-block, not from common unset CONFIG_logfile unset CONFIG_user unset CONFIG_port config_import_section infosys bdii_user=$CONFIG_user if [ -z "$bdii_user" ]; then # Get ldap user from passwd bdii_user=`getent passwd ldap openldap | sed 's/:.*//;q'` if [ -z "$bdii_user" ]; then echo "Warning, could not find ldap or openldap user" echo "resorting to using the root user" bdii_user=root fi fi # directories registrationlog=${CONFIG_registrationlog:-/var/log/arc/inforegistration.log} mkdir -p `dirname $registrationlog` touch ${registrationlog} chown ${bdii_user}: ${registrationlog} infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/var/run/arc/infosys} mkdir -p ${infosys_ldap_run_dir} chown ${bdii_user}: ${infosys_ldap_run_dir} registrationconf=${infosys_ldap_run_dir}/grid-info-resource-register.conf printregldif () { cat <<-EOF # Registration "$rootdn" -> "$targetsuffix" dn: Mds-Vo-Op-name=register, $targetsuffix regtype: mdsreg2 reghn: $reghn regport: $regport regperiod: $regperiod type: ldap hn: $hn port: $port rootdn: $rootdn ttl: $ttl timeout: $timeout mode: cachedump cachettl: $cachettl bindmethod: $bindmethod EOF } echo "# This file was automatically generated by $0." > $registrationconf echo "# Do not modify." >> $registrationconf echo >> $registrationconf ( config_hide_all config_import_section common config_import_section cluster config_import_section infosys # Cluster registration blocks for p in `config_subsections infosys/cluster/registration`; do ( config_import_section infosys/cluster/registration/$p targetsuffix=${CONFIG_targetsuffix:-"Mds-Vo-name=$p,o=grid"} reghn=${CONFIG_targethostname:-"targethostname.not.set"} regport=${CONFIG_targetport:-2135} hn=${CONFIG_registranthostname:-$CONFIG_hostname} port=${CONFIG_registrantport:-${CONFIG_port:-2135}} rootdn=${CONFIG_registrantsuffix:-"nordugrid-cluster-name=$hn,Mds-Vo-name=local,o=grid"} regperiod=${CONFIG_regperiod:-120} ttl=${CONFIG_ttl:-$(( $regperiod * 2 ))} timeout=${CONFIG_timeout:-45} cachettl=${CONFIG_cachettl:-0} sizelimit=${CONFIG_sizelimit:-0} bindmethod=${CONFIG_bindmethod:-ANONYM-ONLY} printregldif >> $registrationconf ) done # SE registration blocks for seentry in `config_subsections infosys/se`; do ( CONFIG_name= config_import_section se/$seentry sename=${CONFIG_name:-$seentry} config_import_section infosys/se/$seentry for p in `config_subsections infosys/se/$seentry/registration`; do ( config_import_section infosys/se/$seentry/registration/$p targetsuffix=${CONFIG_targetsuffix:-"Mds-Vo-name=$p,o=grid"} reghn=${CONFIG_targethostname:-"targethostname.not.set"} regport=${CONFIG_targetport:-2135} hn=${CONFIG_registranthostname:-$CONFIG_hostname} port=${CONFIG_registrantport:-${CONFIG_port:-2135}} rootdn=${CONFIG_registrantsuffix:-"nordugrid-se-name=$sename:$hn,Mds-Vo-name=local,o=grid"} regperiod=${CONFIG_regperiod:-120} ttl=${CONFIG_ttl:-$(( $regperiod * 2 ))} timeout=${CONFIG_timeout:-45} cachettl=${CONFIG_cachettl:-0} sizelimit=${CONFIG_sizelimit:-0} bindmethod=${CONFIG_bindmethod:-ANONYM-ONLY} printregldif >> $registrationconf ) done ) done # Registrations of the Index Services for vo in `config_subsections infosys/index`; do ( CONFIG_name= config_import_section infosys/index/$vo indexname=${CONFIG_name:-$vo} for r in `config_subsections infosys/index/$vo/registration`; do ( config_import_section infosys/index/$vo/registration/$r targetsuffix=${CONFIG_targetsuffix:-"Mds-Vo-name=$r,o=grid"} reghn=${CONFIG_targethostname:-"targethostname.not.set"} regport=${CONFIG_targetport:-2135} hn=${CONFIG_registranthostname:-$CONFIG_hostname} port=${CONFIG_registrantport:-${CONFIG_port:-2135}} rootdn=${CONFIG_registrantsuffix:-"Mds-Vo-name=$indexname,o=grid"} regperiod=${CONFIG_regperiod:-120} ttl=${CONFIG_ttl:-$(( $regperiod * 2 ))} timeout=${CONFIG_timeout:-120} cachettl=${CONFIG_cachettl:-0} sizelimit=${CONFIG_sizelimit:-0} bindmethod=${CONFIG_bindmethod:-ANONYM-ONLY} printregldif >> $registrationconf ) done ) done ) exit 0 nordugrid-arc-5.4.2/src/PaxHeaders.7502/external0000644000000000000000000000013213214316022017522 xustar000000000000000030 mtime=1513200658.679731338 30 atime=1513200668.719854133 30 ctime=1513200658.679731338 nordugrid-arc-5.4.2/src/external/0000755000175000002070000000000013214316022017645 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/external/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712267746371021664 xustar000000000000000027 mtime=1390398713.380593 30 atime=1513200593.579935142 30 ctime=1513200658.676731302 nordugrid-arc-5.4.2/src/external/Makefile.am0000644000175000002070000000004512267746371021725 0ustar00mockbuildmock00000000000000SUBDIRS = cJSON DIST_SUBDIRS = cJSON nordugrid-arc-5.4.2/src/external/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721021651 xustar000000000000000030 mtime=1513200593.610935521 30 atime=1513200647.751597683 30 ctime=1513200658.677731314 nordugrid-arc-5.4.2/src/external/Makefile.in0000644000175000002070000005544713214315721021736 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/external DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = cJSON DIST_SUBDIRS = cJSON all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/external/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/external/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/external/PaxHeaders.7502/cJSON0000644000000000000000000000013213214316022020436 xustar000000000000000030 mtime=1513200658.707731681 30 atime=1513200668.719854133 30 ctime=1513200658.707731681 nordugrid-arc-5.4.2/src/external/cJSON/0000755000175000002070000000000013214316022020561 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/external/cJSON/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712230572722022563 xustar000000000000000027 mtime=1382217170.911343 30 atime=1513200593.625935705 30 ctime=1513200658.704731644 nordugrid-arc-5.4.2/src/external/cJSON/Makefile.am0000644000175000002070000000014112230572722022621 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libcjson.la libcjson_la_SOURCES = cJSON.c cJSON.h libcjson_la_LIBADD = -lm nordugrid-arc-5.4.2/src/external/cJSON/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721022565 xustar000000000000000030 mtime=1513200593.664936182 30 atime=1513200647.766597866 30 ctime=1513200658.705731656 nordugrid-arc-5.4.2/src/external/cJSON/Makefile.in0000644000175000002070000005035413214315721022642 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/external/cJSON DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libcjson_la_DEPENDENCIES = am_libcjson_la_OBJECTS = cJSON.lo libcjson_la_OBJECTS = $(am_libcjson_la_OBJECTS) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libcjson_la_SOURCES) DIST_SOURCES = $(libcjson_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libcjson.la libcjson_la_SOURCES = cJSON.c cJSON.h libcjson_la_LIBADD = -lm all: all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/external/cJSON/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/external/cJSON/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libcjson.la: $(libcjson_la_OBJECTS) $(libcjson_la_DEPENDENCIES) $(LINK) $(libcjson_la_OBJECTS) $(libcjson_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cJSON.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c $< .c.obj: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/external/cJSON/PaxHeaders.7502/cJSON.c0000644000000000000000000000012412230004771021576 xustar000000000000000027 mtime=1382025721.834744 27 atime=1513200575.392712 30 ctime=1513200658.706731669 nordugrid-arc-5.4.2/src/external/cJSON/cJSON.c0000644000175000002070000005517112230004771021654 0ustar00mockbuildmock00000000000000/* Copyright (c) 2009 Dave Gamble Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* cJSON */ /* JSON parser in C. */ #include #include #include #include #include #include #include #include "cJSON.h" static const char *ep; const char *cJSON_GetErrorPtr(void) {return ep;} static int cJSON_strcasecmp(const char *s1,const char *s2) { if (!s1) return (s1==s2)?0:1;if (!s2) return 1; for(; tolower(*s1) == tolower(*s2); ++s1, ++s2) if(*s1 == 0) return 0; return tolower(*(const unsigned char *)s1) - tolower(*(const unsigned char *)s2); } static void *(*cJSON_malloc)(size_t sz) = malloc; static void (*cJSON_free)(void *ptr) = free; static char* cJSON_strdup(const char* str) { size_t len; char* copy; len = strlen(str) + 1; if (!(copy = (char*)cJSON_malloc(len))) return 0; memcpy(copy,str,len); return copy; } void cJSON_InitHooks(cJSON_Hooks* hooks) { if (!hooks) { /* Reset hooks */ cJSON_malloc = malloc; cJSON_free = free; return; } cJSON_malloc = (hooks->malloc_fn)?hooks->malloc_fn:malloc; cJSON_free = (hooks->free_fn)?hooks->free_fn:free; } /* Internal constructor. */ static cJSON *cJSON_New_Item(void) { cJSON* node = (cJSON*)cJSON_malloc(sizeof(cJSON)); if (node) memset(node,0,sizeof(cJSON)); return node; } /* Delete a cJSON structure. */ void cJSON_Delete(cJSON *c) { cJSON *next; while (c) { next=c->next; if (!(c->type&cJSON_IsReference) && c->child) cJSON_Delete(c->child); if (!(c->type&cJSON_IsReference) && c->valuestring) cJSON_free(c->valuestring); if (c->string) cJSON_free(c->string); cJSON_free(c); c=next; } } /* Parse the input text to generate a number, and populate the result into item. */ static const char *parse_number(cJSON *item,const char *num) { double n=0,sign=1,scale=0;int subscale=0,signsubscale=1; if (*num=='-') sign=-1,num++; /* Has sign? */ if (*num=='0') num++; /* is zero */ if (*num>='1' && *num<='9') do n=(n*10.0)+(*num++ -'0'); while (*num>='0' && *num<='9'); /* Number? */ if (*num=='.' && num[1]>='0' && num[1]<='9') {num++; do n=(n*10.0)+(*num++ -'0'),scale--; while (*num>='0' && *num<='9');} /* Fractional part? */ if (*num=='e' || *num=='E') /* Exponent? */ { num++;if (*num=='+') num++; else if (*num=='-') signsubscale=-1,num++; /* With sign? */ while (*num>='0' && *num<='9') subscale=(subscale*10)+(*num++ - '0'); /* Number? */ } n=sign*n*pow(10.0,(scale+subscale*signsubscale)); /* number = +/- number.fraction * 10^+/- exponent */ item->valuedouble=n; item->valueint=(int)n; item->type=cJSON_Number; return num; } /* Render the number nicely from the given item into a string. */ static char *print_number(cJSON *item) { char *str; double d=item->valuedouble; if (fabs(((double)item->valueint)-d)<=DBL_EPSILON && d<=INT_MAX && d>=INT_MIN) { str=(char*)cJSON_malloc(21); /* 2^64+1 can be represented in 21 chars. */ if (str) sprintf(str,"%d",item->valueint); } else { str=(char*)cJSON_malloc(64); /* This is a nice tradeoff. */ if (str) { if (fabs(floor(d)-d)<=DBL_EPSILON && fabs(d)<1.0e60)sprintf(str,"%.0f",d); else if (fabs(d)<1.0e-6 || fabs(d)>1.0e9) sprintf(str,"%e",d); else sprintf(str,"%f",d); } } return str; } static unsigned parse_hex4(const char *str) { unsigned h=0; if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0; h=h<<4;str++; if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0; h=h<<4;str++; if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0; h=h<<4;str++; if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0; return h; } /* Parse the input text into an unescaped cstring, and populate item. */ static const unsigned char firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; static const char *parse_string(cJSON *item,const char *str) { const char *ptr=str+1;char *ptr2;char *out;int len=0;unsigned uc,uc2; if (*str!='\"') {ep=str;return 0;} /* not a string! */ while (*ptr!='\"' && *ptr && ++len) if (*ptr++ == '\\') ptr++; /* Skip escaped quotes. */ out=(char*)cJSON_malloc(len+1); /* This is how long we need for the string, roughly. */ if (!out) return 0; ptr=str+1;ptr2=out; while (*ptr!='\"' && *ptr) { if (*ptr!='\\') *ptr2++=*ptr++; else { ptr++; switch (*ptr) { case 'b': *ptr2++='\b'; break; case 'f': *ptr2++='\f'; break; case 'n': *ptr2++='\n'; break; case 'r': *ptr2++='\r'; break; case 't': *ptr2++='\t'; break; case 'u': /* transcode utf16 to utf8. */ uc=parse_hex4(ptr+1);ptr+=4; /* get the unicode char. */ if ((uc>=0xDC00 && uc<=0xDFFF) || uc==0) break; /* check for invalid. */ if (uc>=0xD800 && uc<=0xDBFF) /* UTF16 surrogate pairs. */ { if (ptr[1]!='\\' || ptr[2]!='u') break; /* missing second-half of surrogate. */ uc2=parse_hex4(ptr+3);ptr+=6; if (uc2<0xDC00 || uc2>0xDFFF) break; /* invalid second-half of surrogate. */ uc=0x10000 + (((uc&0x3FF)<<10) | (uc2&0x3FF)); } len=4;if (uc<0x80) len=1;else if (uc<0x800) len=2;else if (uc<0x10000) len=3; ptr2+=len; switch (len) { case 4: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6; case 3: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6; case 2: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6; case 1: *--ptr2 =(uc | firstByteMark[len]); } ptr2+=len; break; default: *ptr2++=*ptr; break; } ptr++; } } *ptr2=0; if (*ptr=='\"') ptr++; item->valuestring=out; item->type=cJSON_String; return ptr; } /* Render the cstring provided to an escaped version that can be printed. */ static char *print_string_ptr(const char *str) { const char *ptr;char *ptr2,*out;int len=0;unsigned char token; if (!str) return cJSON_strdup(""); ptr=str;while ((token=*ptr) && ++len) {if (strchr("\"\\\b\f\n\r\t",token)) len++; else if (token<32) len+=5;ptr++;} out=(char*)cJSON_malloc(len+3); if (!out) return 0; ptr2=out;ptr=str; *ptr2++='\"'; while (*ptr) { if ((unsigned char)*ptr>31 && *ptr!='\"' && *ptr!='\\') *ptr2++=*ptr++; else { *ptr2++='\\'; switch (token=*ptr++) { case '\\': *ptr2++='\\'; break; case '\"': *ptr2++='\"'; break; case '\b': *ptr2++='b'; break; case '\f': *ptr2++='f'; break; case '\n': *ptr2++='n'; break; case '\r': *ptr2++='r'; break; case '\t': *ptr2++='t'; break; default: sprintf(ptr2,"u%04x",token);ptr2+=5; break; /* escape and print */ } } } *ptr2++='\"';*ptr2++=0; return out; } /* Invote print_string_ptr (which is useful) on an item. */ static char *print_string(cJSON *item) {return print_string_ptr(item->valuestring);} /* Predeclare these prototypes. */ static const char *parse_value(cJSON *item,const char *value); static char *print_value(cJSON *item,int depth,int fmt); static const char *parse_array(cJSON *item,const char *value); static char *print_array(cJSON *item,int depth,int fmt); static const char *parse_object(cJSON *item,const char *value); static char *print_object(cJSON *item,int depth,int fmt); /* Utility to jump whitespace and cr/lf */ static const char *skip(const char *in) {while (in && *in && (unsigned char)*in<=32) in++; return in;} /* Parse an object - create a new root, and populate. */ cJSON *cJSON_ParseWithOpts(const char *value,const char **return_parse_end,int require_null_terminated) { const char *end=0; cJSON *c=cJSON_New_Item(); ep=0; if (!c) return 0; /* memory fail */ end=parse_value(c,skip(value)); if (!end) {cJSON_Delete(c);return 0;} /* parse failure. ep is set. */ /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ if (require_null_terminated) {end=skip(end);if (*end) {cJSON_Delete(c);ep=end;return 0;}} if (return_parse_end) *return_parse_end=end; return c; } /* Default options for cJSON_Parse */ cJSON *cJSON_Parse(const char *value) {return cJSON_ParseWithOpts(value,0,0);} /* Render a cJSON item/entity/structure to text. */ char *cJSON_Print(cJSON *item) {return print_value(item,0,1);} char *cJSON_PrintUnformatted(cJSON *item) {return print_value(item,0,0);} /* Parser core - when encountering text, process appropriately. */ static const char *parse_value(cJSON *item,const char *value) { if (!value) return 0; /* Fail on null. */ if (!strncmp(value,"null",4)) { item->type=cJSON_NULL; return value+4; } if (!strncmp(value,"false",5)) { item->type=cJSON_False; return value+5; } if (!strncmp(value,"true",4)) { item->type=cJSON_True; item->valueint=1; return value+4; } if (*value=='\"') { return parse_string(item,value); } if (*value=='-' || (*value>='0' && *value<='9')) { return parse_number(item,value); } if (*value=='[') { return parse_array(item,value); } if (*value=='{') { return parse_object(item,value); } ep=value;return 0; /* failure. */ } /* Render a value to text. */ static char *print_value(cJSON *item,int depth,int fmt) { char *out=0; if (!item) return 0; switch ((item->type)&255) { case cJSON_NULL: out=cJSON_strdup("null"); break; case cJSON_False: out=cJSON_strdup("false");break; case cJSON_True: out=cJSON_strdup("true"); break; case cJSON_Number: out=print_number(item);break; case cJSON_String: out=print_string(item);break; case cJSON_Array: out=print_array(item,depth,fmt);break; case cJSON_Object: out=print_object(item,depth,fmt);break; } return out; } /* Build an array from input text. */ static const char *parse_array(cJSON *item,const char *value) { cJSON *child; if (*value!='[') {ep=value;return 0;} /* not an array! */ item->type=cJSON_Array; value=skip(value+1); if (*value==']') return value+1; /* empty array. */ item->child=child=cJSON_New_Item(); if (!item->child) return 0; /* memory fail */ value=skip(parse_value(child,skip(value))); /* skip any spacing, get the value. */ if (!value) return 0; while (*value==',') { cJSON *new_item; if (!(new_item=cJSON_New_Item())) return 0; /* memory fail */ child->next=new_item;new_item->prev=child;child=new_item; value=skip(parse_value(child,skip(value+1))); if (!value) return 0; /* memory fail */ } if (*value==']') return value+1; /* end of array */ ep=value;return 0; /* malformed. */ } /* Render an array to text */ static char *print_array(cJSON *item,int depth,int fmt) { char **entries; char *out=0,*ptr,*ret;int len=5; cJSON *child=item->child; int numentries=0,i=0,fail=0; /* How many entries in the array? */ while (child) numentries++,child=child->next; /* Explicitly handle numentries==0 */ if (!numentries) { out=(char*)cJSON_malloc(3); if (out) strcpy(out,"[]"); return out; } /* Allocate an array to hold the values for each */ entries=(char**)cJSON_malloc(numentries*sizeof(char*)); if (!entries) return 0; memset(entries,0,numentries*sizeof(char*)); /* Retrieve all the results: */ child=item->child; while (child && !fail) { ret=print_value(child,depth+1,fmt); entries[i++]=ret; if (ret) len+=strlen(ret)+2+(fmt?1:0); else fail=1; child=child->next; } /* If we didn't fail, try to malloc the output string */ if (!fail) out=(char*)cJSON_malloc(len); /* If that fails, we fail. */ if (!out) fail=1; /* Handle failure. */ if (fail) { for (i=0;itype=cJSON_Object; value=skip(value+1); if (*value=='}') return value+1; /* empty array. */ item->child=child=cJSON_New_Item(); if (!item->child) return 0; value=skip(parse_string(child,skip(value))); if (!value) return 0; child->string=child->valuestring;child->valuestring=0; if (*value!=':') {ep=value;return 0;} /* fail! */ value=skip(parse_value(child,skip(value+1))); /* skip any spacing, get the value. */ if (!value) return 0; while (*value==',') { cJSON *new_item; if (!(new_item=cJSON_New_Item())) return 0; /* memory fail */ child->next=new_item;new_item->prev=child;child=new_item; value=skip(parse_string(child,skip(value+1))); if (!value) return 0; child->string=child->valuestring;child->valuestring=0; if (*value!=':') {ep=value;return 0;} /* fail! */ value=skip(parse_value(child,skip(value+1))); /* skip any spacing, get the value. */ if (!value) return 0; } if (*value=='}') return value+1; /* end of array */ ep=value;return 0; /* malformed. */ } /* Render an object to text. */ static char *print_object(cJSON *item,int depth,int fmt) { char **entries=0,**names=0; char *out=0,*ptr,*ret,*str;int len=7,i=0,j; cJSON *child=item->child; int numentries=0,fail=0; /* Count the number of entries. */ while (child) numentries++,child=child->next; /* Explicitly handle empty object case */ if (!numentries) { out=(char*)cJSON_malloc(fmt?depth+4:3); if (!out) return 0; ptr=out;*ptr++='{'; if (fmt) {*ptr++='\n';for (i=0;ichild;depth++;if (fmt) len+=depth; while (child) { names[i]=str=print_string_ptr(child->string); entries[i++]=ret=print_value(child,depth,fmt); if (str && ret) len+=strlen(ret)+strlen(str)+2+(fmt?2+depth:0); else fail=1; child=child->next; } /* Try to allocate the output string */ if (!fail) out=(char*)cJSON_malloc(len); if (!out) fail=1; /* Handle failure */ if (fail) { for (i=0;ichild;int i=0;while(c)i++,c=c->next;return i;} cJSON *cJSON_GetArrayItem(cJSON *array,int item) {cJSON *c=array->child; while (c && item>0) item--,c=c->next; return c;} cJSON *cJSON_GetObjectItem(cJSON *object,const char *string) {cJSON *c=object->child; while (c && cJSON_strcasecmp(c->string,string)) c=c->next; return c;} /* Utility for array list handling. */ static void suffix_object(cJSON *prev,cJSON *item) {prev->next=item;item->prev=prev;} /* Utility for handling references. */ static cJSON *create_reference(cJSON *item) {cJSON *ref=cJSON_New_Item();if (!ref) return 0;memcpy(ref,item,sizeof(cJSON));ref->string=0;ref->type|=cJSON_IsReference;ref->next=ref->prev=0;return ref;} /* Add item to array/object. */ void cJSON_AddItemToArray(cJSON *array, cJSON *item) {cJSON *c=array->child;if (!item) return; if (!c) {array->child=item;} else {while (c && c->next) c=c->next; suffix_object(c,item);}} void cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item) {if (!item) return; if (item->string) cJSON_free(item->string);item->string=cJSON_strdup(string);cJSON_AddItemToArray(object,item);} void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) {cJSON_AddItemToArray(array,create_reference(item));} void cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item) {cJSON_AddItemToObject(object,string,create_reference(item));} cJSON *cJSON_DetachItemFromArray(cJSON *array,int which) {cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return 0; if (c->prev) c->prev->next=c->next;if (c->next) c->next->prev=c->prev;if (c==array->child) array->child=c->next;c->prev=c->next=0;return c;} void cJSON_DeleteItemFromArray(cJSON *array,int which) {cJSON_Delete(cJSON_DetachItemFromArray(array,which));} cJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string) {int i=0;cJSON *c=object->child;while (c && cJSON_strcasecmp(c->string,string)) i++,c=c->next;if (c) return cJSON_DetachItemFromArray(object,i);return 0;} void cJSON_DeleteItemFromObject(cJSON *object,const char *string) {cJSON_Delete(cJSON_DetachItemFromObject(object,string));} /* Replace array/object items with new ones. */ void cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem) {cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return; newitem->next=c->next;newitem->prev=c->prev;if (newitem->next) newitem->next->prev=newitem; if (c==array->child) array->child=newitem; else newitem->prev->next=newitem;c->next=c->prev=0;cJSON_Delete(c);} void cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem){int i=0;cJSON *c=object->child;while(c && cJSON_strcasecmp(c->string,string))i++,c=c->next;if(c){newitem->string=cJSON_strdup(string);cJSON_ReplaceItemInArray(object,i,newitem);}} /* Create basic types: */ cJSON *cJSON_CreateNull(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_NULL;return item;} cJSON *cJSON_CreateTrue(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_True;return item;} cJSON *cJSON_CreateFalse(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_False;return item;} cJSON *cJSON_CreateBool(int b) {cJSON *item=cJSON_New_Item();if(item)item->type=b?cJSON_True:cJSON_False;return item;} cJSON *cJSON_CreateNumber(double num) {cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_Number;item->valuedouble=num;item->valueint=(int)num;}return item;} cJSON *cJSON_CreateString(const char *string) {cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_String;item->valuestring=cJSON_strdup(string);}return item;} cJSON *cJSON_CreateArray(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Array;return item;} cJSON *cJSON_CreateObject(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Object;return item;} /* Create Arrays: */ cJSON *cJSON_CreateIntArray(const int *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} cJSON *cJSON_CreateFloatArray(const float *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} cJSON *cJSON_CreateDoubleArray(const double *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} cJSON *cJSON_CreateStringArray(const char **strings,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} /* Duplication */ cJSON *cJSON_Duplicate(cJSON *item,int recurse) { cJSON *newitem,*cptr,*nptr=0,*newchild; /* Bail on bad ptr */ if (!item) return 0; /* Create new item */ newitem=cJSON_New_Item(); if (!newitem) return 0; /* Copy over all vars */ newitem->type=item->type&(~cJSON_IsReference),newitem->valueint=item->valueint,newitem->valuedouble=item->valuedouble; if (item->valuestring) {newitem->valuestring=cJSON_strdup(item->valuestring); if (!newitem->valuestring) {cJSON_Delete(newitem);return 0;}} if (item->string) {newitem->string=cJSON_strdup(item->string); if (!newitem->string) {cJSON_Delete(newitem);return 0;}} /* If non-recursive, then we're done! */ if (!recurse) return newitem; /* Walk the ->next chain for the child. */ cptr=item->child; while (cptr) { newchild=cJSON_Duplicate(cptr,1); /* Duplicate (with recurse) each item in the ->next chain */ if (!newchild) {cJSON_Delete(newitem);return 0;} if (nptr) {nptr->next=newchild,newchild->prev=nptr;nptr=newchild;} /* If newitem->child already set, then crosswire ->prev and ->next and move on */ else {newitem->child=newchild;nptr=newchild;} /* Set newitem->child and move to it */ cptr=cptr->next; } return newitem; } void cJSON_Minify(char *json) { char *into=json; while (*json) { if (*json==' ') json++; else if (*json=='\t') json++; // Whitespace characters. else if (*json=='\r') json++; else if (*json=='\n') json++; else if (*json=='/' && json[1]=='/') while (*json && *json!='\n') json++; // double-slash comments, to end of line. else if (*json=='/' && json[1]=='*') {while (*json && !(*json=='*' && json[1]=='/')) json++;json+=2;} // multiline comments. else if (*json=='\"'){*into++=*json++;while (*json && *json!='\"'){if (*json=='\\') *into++=*json++;*into++=*json++;}*into++=*json++;} // string literals, which are \" sensitive. else *into++=*json++; // All other characters. } *into=0; // and null-terminate. }nordugrid-arc-5.4.2/src/external/cJSON/PaxHeaders.7502/cJSON.h0000644000000000000000000000012412230004771021603 xustar000000000000000027 mtime=1382025721.834744 27 atime=1513200575.390712 30 ctime=1513200658.707731681 nordugrid-arc-5.4.2/src/external/cJSON/cJSON.h0000644000175000002070000001522612230004771021656 0ustar00mockbuildmock00000000000000/* Copyright (c) 2009 Dave Gamble Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef cJSON__h #define cJSON__h #ifdef __cplusplus extern "C" { #endif /* cJSON Types: */ #define cJSON_False 0 #define cJSON_True 1 #define cJSON_NULL 2 #define cJSON_Number 3 #define cJSON_String 4 #define cJSON_Array 5 #define cJSON_Object 6 #define cJSON_IsReference 256 /* The cJSON structure: */ typedef struct cJSON { struct cJSON *next,*prev; /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ struct cJSON *child; /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ int type; /* The type of the item, as above. */ char *valuestring; /* The item's string, if type==cJSON_String */ int valueint; /* The item's number, if type==cJSON_Number */ double valuedouble; /* The item's number, if type==cJSON_Number */ char *string; /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ } cJSON; typedef struct cJSON_Hooks { void *(*malloc_fn)(size_t sz); void (*free_fn)(void *ptr); } cJSON_Hooks; /* Supply malloc, realloc and free functions to cJSON */ extern void cJSON_InitHooks(cJSON_Hooks* hooks); /* Supply a block of JSON, and this returns a cJSON object you can interrogate. Call cJSON_Delete when finished. */ extern cJSON *cJSON_Parse(const char *value); /* Render a cJSON entity to text for transfer/storage. Free the char* when finished. */ extern char *cJSON_Print(cJSON *item); /* Render a cJSON entity to text for transfer/storage without any formatting. Free the char* when finished. */ extern char *cJSON_PrintUnformatted(cJSON *item); /* Delete a cJSON entity and all subentities. */ extern void cJSON_Delete(cJSON *c); /* Returns the number of items in an array (or object). */ extern int cJSON_GetArraySize(cJSON *array); /* Retrieve item number "item" from array "array". Returns NULL if unsuccessful. */ extern cJSON *cJSON_GetArrayItem(cJSON *array,int item); /* Get item "string" from object. Case insensitive. */ extern cJSON *cJSON_GetObjectItem(cJSON *object,const char *string); /* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ extern const char *cJSON_GetErrorPtr(void); /* These calls create a cJSON item of the appropriate type. */ extern cJSON *cJSON_CreateNull(void); extern cJSON *cJSON_CreateTrue(void); extern cJSON *cJSON_CreateFalse(void); extern cJSON *cJSON_CreateBool(int b); extern cJSON *cJSON_CreateNumber(double num); extern cJSON *cJSON_CreateString(const char *string); extern cJSON *cJSON_CreateArray(void); extern cJSON *cJSON_CreateObject(void); /* These utilities create an Array of count items. */ extern cJSON *cJSON_CreateIntArray(const int *numbers,int count); extern cJSON *cJSON_CreateFloatArray(const float *numbers,int count); extern cJSON *cJSON_CreateDoubleArray(const double *numbers,int count); extern cJSON *cJSON_CreateStringArray(const char **strings,int count); /* Append item to the specified array/object. */ extern void cJSON_AddItemToArray(cJSON *array, cJSON *item); extern void cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item); /* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ extern void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); extern void cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item); /* Remove/Detatch items from Arrays/Objects. */ extern cJSON *cJSON_DetachItemFromArray(cJSON *array,int which); extern void cJSON_DeleteItemFromArray(cJSON *array,int which); extern cJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string); extern void cJSON_DeleteItemFromObject(cJSON *object,const char *string); /* Update array items. */ extern void cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem); extern void cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); /* Duplicate a cJSON item */ extern cJSON *cJSON_Duplicate(cJSON *item,int recurse); /* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will need to be released. With recurse!=0, it will duplicate any children connected to the item. The item->next and ->prev pointers are always zero on return from Duplicate. */ /* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ extern cJSON *cJSON_ParseWithOpts(const char *value,const char **return_parse_end,int require_null_terminated); extern void cJSON_Minify(char *json); /* Macros for creating things quickly. */ #define cJSON_AddNullToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateNull()) #define cJSON_AddTrueToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateTrue()) #define cJSON_AddFalseToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateFalse()) #define cJSON_AddBoolToObject(object,name,b) cJSON_AddItemToObject(object, name, cJSON_CreateBool(b)) #define cJSON_AddNumberToObject(object,name,n) cJSON_AddItemToObject(object, name, cJSON_CreateNumber(n)) #define cJSON_AddStringToObject(object,name,s) cJSON_AddItemToObject(object, name, cJSON_CreateString(s)) /* When assigning an integer value, it needs to be propagated to valuedouble too. */ #define cJSON_SetIntValue(object,val) ((object)?(object)->valueint=(object)->valuedouble=(val):(val)) #ifdef __cplusplus } #endif #endif nordugrid-arc-5.4.2/src/external/cJSON/PaxHeaders.7502/README0000644000000000000000000000012412230004771021376 xustar000000000000000027 mtime=1382025721.834744 27 atime=1513200575.392712 30 ctime=1513200658.703731632 nordugrid-arc-5.4.2/src/external/cJSON/README0000644000175000002070000002105712230004771021450 0ustar00mockbuildmock00000000000000/* Copyright (c) 2009 Dave Gamble Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ Welcome to cJSON. cJSON aims to be the dumbest possible parser that you can get your job done with. It's a single file of C, and a single header file. JSON is described best here: http://www.json.org/ It's like XML, but fat-free. You use it to move data around, store things, or just generally represent your program's state. First up, how do I build? Add cJSON.c to your project, and put cJSON.h somewhere in the header search path. For example, to build the test app: gcc cJSON.c test.c -o test -lm ./test As a library, cJSON exists to take away as much legwork as it can, but not get in your way. As a point of pragmatism (i.e. ignoring the truth), I'm going to say that you can use it in one of two modes: Auto and Manual. Let's have a quick run-through. I lifted some JSON from this page: http://www.json.org/fatfree.html That page inspired me to write cJSON, which is a parser that tries to share the same philosophy as JSON itself. Simple, dumb, out of the way. Some JSON: { "name": "Jack (\"Bee\") Nimble", "format": { "type": "rect", "width": 1920, "height": 1080, "interlace": false, "frame rate": 24 } } Assume that you got this from a file, a webserver, or magic JSON elves, whatever, you have a char * to it. Everything is a cJSON struct. Get it parsed: cJSON *root = cJSON_Parse(my_json_string); This is an object. We're in C. We don't have objects. But we do have structs. What's the framerate? cJSON *format = cJSON_GetObjectItem(root,"format"); int framerate = cJSON_GetObjectItem(format,"frame rate")->valueint; Want to change the framerate? cJSON_GetObjectItem(format,"frame rate")->valueint=25; Back to disk? char *rendered=cJSON_Print(root); Finished? Delete the root (this takes care of everything else). cJSON_Delete(root); That's AUTO mode. If you're going to use Auto mode, you really ought to check pointers before you dereference them. If you want to see how you'd build this struct in code? cJSON *root,*fmt; root=cJSON_CreateObject(); cJSON_AddItemToObject(root, "name", cJSON_CreateString("Jack (\"Bee\") Nimble")); cJSON_AddItemToObject(root, "format", fmt=cJSON_CreateObject()); cJSON_AddStringToObject(fmt,"type", "rect"); cJSON_AddNumberToObject(fmt,"width", 1920); cJSON_AddNumberToObject(fmt,"height", 1080); cJSON_AddFalseToObject (fmt,"interlace"); cJSON_AddNumberToObject(fmt,"frame rate", 24); Hopefully we can agree that's not a lot of code? There's no overhead, no unnecessary setup. Look at test.c for a bunch of nice examples, mostly all ripped off the json.org site, and a few from elsewhere. What about manual mode? First up you need some detail. Let's cover how the cJSON objects represent the JSON data. cJSON doesn't distinguish arrays from objects in handling; just type. Each cJSON has, potentially, a child, siblings, value, a name. The root object has: Object Type and a Child The Child has name "name", with value "Jack ("Bee") Nimble", and a sibling: Sibling has type Object, name "format", and a child. That child has type String, name "type", value "rect", and a sibling: Sibling has type Number, name "width", value 1920, and a sibling: Sibling has type Number, name "height", value 1080, and a sibling: Sibling hs type False, name "interlace", and a sibling: Sibling has type Number, name "frame rate", value 24 Here's the structure: typedef struct cJSON { struct cJSON *next,*prev; struct cJSON *child; int type; char *valuestring; int valueint; double valuedouble; char *string; } cJSON; By default all values are 0 unless set by virtue of being meaningful. next/prev is a doubly linked list of siblings. next takes you to your sibling, prev takes you back from your sibling to you. Only objects and arrays have a "child", and it's the head of the doubly linked list. A "child" entry will have prev==0, but next potentially points on. The last sibling has next=0. The type expresses Null/True/False/Number/String/Array/Object, all of which are #defined in cJSON.h A Number has valueint and valuedouble. If you're expecting an int, read valueint, if not read valuedouble. Any entry which is in the linked list which is the child of an object will have a "string" which is the "name" of the entry. When I said "name" in the above example, that's "string". "string" is the JSON name for the 'variable name' if you will. Now you can trivially walk the lists, recursively, and parse as you please. You can invoke cJSON_Parse to get cJSON to parse for you, and then you can take the root object, and traverse the structure (which is, formally, an N-tree), and tokenise as you please. If you wanted to build a callback style parser, this is how you'd do it (just an example, since these things are very specific): void parse_and_callback(cJSON *item,const char *prefix) { while (item) { char *newprefix=malloc(strlen(prefix)+strlen(item->name)+2); sprintf(newprefix,"%s/%s",prefix,item->name); int dorecurse=callback(newprefix, item->type, item); if (item->child && dorecurse) parse_and_callback(item->child,newprefix); item=item->next; free(newprefix); } } The prefix process will build you a separated list, to simplify your callback handling. The 'dorecurse' flag would let the callback decide to handle sub-arrays on it's own, or let you invoke it per-item. For the item above, your callback might look like this: int callback(const char *name,int type,cJSON *item) { if (!strcmp(name,"name")) { /* populate name */ } else if (!strcmp(name,"format/type") { /* handle "rect" */ } else if (!strcmp(name,"format/width") { /* 800 */ } else if (!strcmp(name,"format/height") { /* 600 */ } else if (!strcmp(name,"format/interlace") { /* false */ } else if (!strcmp(name,"format/frame rate") { /* 24 */ } return 1; } Alternatively, you might like to parse iteratively. You'd use: void parse_object(cJSON *item) { int i; for (i=0;ichild; while (subitem) { // handle subitem if (subitem->child) parse_object(subitem->child); subitem=subitem->next; } } Of course, this should look familiar, since this is just a stripped-down version of the callback-parser. This should cover most uses you'll find for parsing. The rest should be possible to infer.. and if in doubt, read the source! There's not a lot of it! ;) In terms of constructing JSON data, the example code above is the right way to do it. You can, of course, hand your sub-objects to other functions to populate. Also, if you find a use for it, you can manually build the objects. For instance, suppose you wanted to build an array of objects? cJSON *objects[24]; cJSON *Create_array_of_anything(cJSON **items,int num) { int i;cJSON *prev, *root=cJSON_CreateArray(); for (i=0;i<24;i++) { if (!i) root->child=objects[i]; else prev->next=objects[i], objects[i]->prev=prev; prev=objects[i]; } return root; } and simply: Create_array_of_anything(objects,24); cJSON doesn't make any assumptions about what order you create things in. You can attach the objects, as above, and later add children to each of those objects. As soon as you call cJSON_Print, it renders the structure to text. The test.c code shows how to handle a bunch of typical cases. If you uncomment the code, it'll load, parse and print a bunch of test files, also from json.org, which are more complex than I'd care to try and stash into a const char array[]. Enjoy cJSON! - Dave Gamble, Aug 2009 nordugrid-arc-5.4.2/src/external/PaxHeaders.7502/README0000644000000000000000000000012412267746601020501 xustar000000000000000027 mtime=1390398849.863325 27 atime=1513200575.395712 30 ctime=1513200658.675731289 nordugrid-arc-5.4.2/src/external/README0000644000175000002070000000014012267746601020541 0ustar00mockbuildmock00000000000000External libraries small enough to include source rather than depending on an external package. nordugrid-arc-5.4.2/src/PaxHeaders.7502/clients0000644000000000000000000000013213214316030017340 xustar000000000000000030 mtime=1513200664.386801138 30 atime=1513200668.719854133 30 ctime=1513200664.386801138 nordugrid-arc-5.4.2/src/clients/0000755000175000002070000000000013214316030017463 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/clients/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712240515025021460 xustar000000000000000027 mtime=1384290837.339254 30 atime=1513200592.967927657 30 ctime=1513200664.381801077 nordugrid-arc-5.4.2/src/clients/Makefile.am0000644000175000002070000000142612240515025021525 0ustar00mockbuildmock00000000000000if DATA_CLIENT_ENABLED DATA_CLIENT = data else DATA_CLIENT = endif if ECHO_CLIENT_ENABLED ECHO_CLIENT = echo else ECHO_CLIENT = endif if CREDENTIALS_CLIENT_ENABLED CREDENTIALS_CLIENT = credentials else CREDENTIALS_CLIENT = endif if COMPUTE_CLIENT_ENABLED COMPUTE_CLIENT = compute else COMPUTE_CLIENT = endif if SAML_CLIENT_ENABLED SAML_CLIENT = saml else SAML_CLIENT = endif if WSRF_CLIENT_ENABLED WSRF_CLIENT = wsrf else WSRF_CLIENT = endif SUBDIRS = $(DATA_CLIENT) $(ECHO_CLIENT) $(CREDENTIALS_CLIENT) $(COMPUTE_CLIENT) $(SAML_CLIENT) $(WSRF_CLIENT) DIST_SUBDIRS = data echo credentials compute saml wsrf arcsysconfdir = $(sysconfdir)/arc arcsysconf_DATA = client.conf exampledir = $(pkgdatadir)/examples example_DATA = client.conf EXTRA_DIST = $(example_DATA) $(arcsysconf_DATA) nordugrid-arc-5.4.2/src/clients/PaxHeaders.7502/data0000644000000000000000000000013213214316030020251 xustar000000000000000030 mtime=1513200664.412801456 30 atime=1513200668.719854133 30 ctime=1513200664.412801456 nordugrid-arc-5.4.2/src/clients/data/0000755000175000002070000000000013214316030020374 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022376 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200593.232930898 30 ctime=1513200664.403801346 nordugrid-arc-5.4.2/src/clients/data/Makefile.am0000644000175000002070000000250212052416515022437 0ustar00mockbuildmock00000000000000bin_PROGRAMS = arccp arcls arcrm arcmkdir arcrename man_MANS = arccp.1 arcls.1 arcrm.1 arcmkdir.1 arcrename.1 CLILIBS = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arccp_SOURCES = arccp.cpp arccp_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arccp_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcls_SOURCES = arcls.cpp arcls_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcls_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcrm_SOURCES = arcrm.cpp arcrm_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrm_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcmkdir_SOURCES = arcmkdir.cpp arcmkdir_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcmkdir_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcrename_SOURCES = arcrename.cpp arcrename_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrename_LDADD = $(CLILIBS) $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721022401 xustar000000000000000030 mtime=1513200593.289931595 30 atime=1513200651.896648379 30 ctime=1513200664.404801358 nordugrid-arc-5.4.2/src/clients/data/Makefile.in0000644000175000002070000011414113214315721022451 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = arccp$(EXEEXT) arcls$(EXEEXT) arcrm$(EXEEXT) \ arcmkdir$(EXEEXT) arcrename$(EXEEXT) subdir = src/clients/data DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arccp.1.in $(srcdir)/arcls.1.in \ $(srcdir)/arcmkdir.1.in $(srcdir)/arcrename.1.in \ $(srcdir)/arcrm.1.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arccp.1 arcls.1 arcrm.1 arcmkdir.1 arcrename.1 CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(bin_PROGRAMS) am_arccp_OBJECTS = arccp-arccp.$(OBJEXT) arccp_OBJECTS = $(am_arccp_OBJECTS) am__DEPENDENCIES_1 = arccp_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arccp_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arccp_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcls_OBJECTS = arcls-arcls.$(OBJEXT) arcls_OBJECTS = $(am_arcls_OBJECTS) arcls_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arcls_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arcls_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcmkdir_OBJECTS = arcmkdir-arcmkdir.$(OBJEXT) arcmkdir_OBJECTS = $(am_arcmkdir_OBJECTS) arcmkdir_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arcmkdir_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcmkdir_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcrename_OBJECTS = arcrename-arcrename.$(OBJEXT) arcrename_OBJECTS = $(am_arcrename_OBJECTS) arcrename_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arcrename_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcrename_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcrm_OBJECTS = arcrm-arcrm.$(OBJEXT) arcrm_OBJECTS = $(am_arcrm_OBJECTS) arcrm_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arcrm_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arcrm_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(arccp_SOURCES) $(arcls_SOURCES) $(arcmkdir_SOURCES) \ $(arcrename_SOURCES) $(arcrm_SOURCES) DIST_SOURCES = $(arccp_SOURCES) $(arcls_SOURCES) $(arcmkdir_SOURCES) \ $(arcrename_SOURCES) $(arcrm_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ man_MANS = arccp.1 arcls.1 arcrm.1 arcmkdir.1 arcrename.1 CLILIBS = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arccp_SOURCES = arccp.cpp arccp_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arccp_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcls_SOURCES = arcls.cpp arcls_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcls_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcrm_SOURCES = arcrm.cpp arcrm_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrm_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcmkdir_SOURCES = arcmkdir.cpp arcmkdir_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcmkdir_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcrename_SOURCES = arcrename.cpp arcrename_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrename_LDADD = $(CLILIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/data/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/data/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arccp.1: $(top_builddir)/config.status $(srcdir)/arccp.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcls.1: $(top_builddir)/config.status $(srcdir)/arcls.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcrm.1: $(top_builddir)/config.status $(srcdir)/arcrm.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcmkdir.1: $(top_builddir)/config.status $(srcdir)/arcmkdir.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcrename.1: $(top_builddir)/config.status $(srcdir)/arcrename.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arccp$(EXEEXT): $(arccp_OBJECTS) $(arccp_DEPENDENCIES) @rm -f arccp$(EXEEXT) $(arccp_LINK) $(arccp_OBJECTS) $(arccp_LDADD) $(LIBS) arcls$(EXEEXT): $(arcls_OBJECTS) $(arcls_DEPENDENCIES) @rm -f arcls$(EXEEXT) $(arcls_LINK) $(arcls_OBJECTS) $(arcls_LDADD) $(LIBS) arcmkdir$(EXEEXT): $(arcmkdir_OBJECTS) $(arcmkdir_DEPENDENCIES) @rm -f arcmkdir$(EXEEXT) $(arcmkdir_LINK) $(arcmkdir_OBJECTS) $(arcmkdir_LDADD) $(LIBS) arcrename$(EXEEXT): $(arcrename_OBJECTS) $(arcrename_DEPENDENCIES) @rm -f arcrename$(EXEEXT) $(arcrename_LINK) $(arcrename_OBJECTS) $(arcrename_LDADD) $(LIBS) arcrm$(EXEEXT): $(arcrm_OBJECTS) $(arcrm_DEPENDENCIES) @rm -f arcrm$(EXEEXT) $(arcrm_LINK) $(arcrm_OBJECTS) $(arcrm_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arccp-arccp.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcls-arcls.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcmkdir-arcmkdir.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrename-arcrename.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrm-arcrm.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< arccp-arccp.o: arccp.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -MT arccp-arccp.o -MD -MP -MF $(DEPDIR)/arccp-arccp.Tpo -c -o arccp-arccp.o `test -f 'arccp.cpp' || echo '$(srcdir)/'`arccp.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arccp-arccp.Tpo $(DEPDIR)/arccp-arccp.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arccp.cpp' object='arccp-arccp.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -c -o arccp-arccp.o `test -f 'arccp.cpp' || echo '$(srcdir)/'`arccp.cpp arccp-arccp.obj: arccp.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -MT arccp-arccp.obj -MD -MP -MF $(DEPDIR)/arccp-arccp.Tpo -c -o arccp-arccp.obj `if test -f 'arccp.cpp'; then $(CYGPATH_W) 'arccp.cpp'; else $(CYGPATH_W) '$(srcdir)/arccp.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arccp-arccp.Tpo $(DEPDIR)/arccp-arccp.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arccp.cpp' object='arccp-arccp.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -c -o arccp-arccp.obj `if test -f 'arccp.cpp'; then $(CYGPATH_W) 'arccp.cpp'; else $(CYGPATH_W) '$(srcdir)/arccp.cpp'; fi` arcls-arcls.o: arcls.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -MT arcls-arcls.o -MD -MP -MF $(DEPDIR)/arcls-arcls.Tpo -c -o arcls-arcls.o `test -f 'arcls.cpp' || echo '$(srcdir)/'`arcls.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcls-arcls.Tpo $(DEPDIR)/arcls-arcls.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcls.cpp' object='arcls-arcls.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -c -o arcls-arcls.o `test -f 'arcls.cpp' || echo '$(srcdir)/'`arcls.cpp arcls-arcls.obj: arcls.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -MT arcls-arcls.obj -MD -MP -MF $(DEPDIR)/arcls-arcls.Tpo -c -o arcls-arcls.obj `if test -f 'arcls.cpp'; then $(CYGPATH_W) 'arcls.cpp'; else $(CYGPATH_W) '$(srcdir)/arcls.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcls-arcls.Tpo $(DEPDIR)/arcls-arcls.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcls.cpp' object='arcls-arcls.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -c -o arcls-arcls.obj `if test -f 'arcls.cpp'; then $(CYGPATH_W) 'arcls.cpp'; else $(CYGPATH_W) '$(srcdir)/arcls.cpp'; fi` arcmkdir-arcmkdir.o: arcmkdir.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -MT arcmkdir-arcmkdir.o -MD -MP -MF $(DEPDIR)/arcmkdir-arcmkdir.Tpo -c -o arcmkdir-arcmkdir.o `test -f 'arcmkdir.cpp' || echo '$(srcdir)/'`arcmkdir.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcmkdir-arcmkdir.Tpo $(DEPDIR)/arcmkdir-arcmkdir.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcmkdir.cpp' object='arcmkdir-arcmkdir.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -c -o arcmkdir-arcmkdir.o `test -f 'arcmkdir.cpp' || echo '$(srcdir)/'`arcmkdir.cpp arcmkdir-arcmkdir.obj: arcmkdir.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -MT arcmkdir-arcmkdir.obj -MD -MP -MF $(DEPDIR)/arcmkdir-arcmkdir.Tpo -c -o arcmkdir-arcmkdir.obj `if test -f 'arcmkdir.cpp'; then $(CYGPATH_W) 'arcmkdir.cpp'; else $(CYGPATH_W) '$(srcdir)/arcmkdir.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcmkdir-arcmkdir.Tpo $(DEPDIR)/arcmkdir-arcmkdir.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcmkdir.cpp' object='arcmkdir-arcmkdir.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -c -o arcmkdir-arcmkdir.obj `if test -f 'arcmkdir.cpp'; then $(CYGPATH_W) 'arcmkdir.cpp'; else $(CYGPATH_W) '$(srcdir)/arcmkdir.cpp'; fi` arcrename-arcrename.o: arcrename.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -MT arcrename-arcrename.o -MD -MP -MF $(DEPDIR)/arcrename-arcrename.Tpo -c -o arcrename-arcrename.o `test -f 'arcrename.cpp' || echo '$(srcdir)/'`arcrename.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcrename-arcrename.Tpo $(DEPDIR)/arcrename-arcrename.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcrename.cpp' object='arcrename-arcrename.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -c -o arcrename-arcrename.o `test -f 'arcrename.cpp' || echo '$(srcdir)/'`arcrename.cpp arcrename-arcrename.obj: arcrename.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -MT arcrename-arcrename.obj -MD -MP -MF $(DEPDIR)/arcrename-arcrename.Tpo -c -o arcrename-arcrename.obj `if test -f 'arcrename.cpp'; then $(CYGPATH_W) 'arcrename.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrename.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcrename-arcrename.Tpo $(DEPDIR)/arcrename-arcrename.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcrename.cpp' object='arcrename-arcrename.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -c -o arcrename-arcrename.obj `if test -f 'arcrename.cpp'; then $(CYGPATH_W) 'arcrename.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrename.cpp'; fi` arcrm-arcrm.o: arcrm.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -MT arcrm-arcrm.o -MD -MP -MF $(DEPDIR)/arcrm-arcrm.Tpo -c -o arcrm-arcrm.o `test -f 'arcrm.cpp' || echo '$(srcdir)/'`arcrm.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcrm-arcrm.Tpo $(DEPDIR)/arcrm-arcrm.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcrm.cpp' object='arcrm-arcrm.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -c -o arcrm-arcrm.o `test -f 'arcrm.cpp' || echo '$(srcdir)/'`arcrm.cpp arcrm-arcrm.obj: arcrm.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -MT arcrm-arcrm.obj -MD -MP -MF $(DEPDIR)/arcrm-arcrm.Tpo -c -o arcrm-arcrm.obj `if test -f 'arcrm.cpp'; then $(CYGPATH_W) 'arcrm.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrm.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcrm-arcrm.Tpo $(DEPDIR)/arcrm-arcrm.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcrm.cpp' object='arcrm-arcrm.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -c -o arcrm-arcrm.obj `if test -f 'arcrm.cpp'; then $(CYGPATH_W) 'arcrm.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrm.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-binPROGRAMS install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-binPROGRAMS uninstall-man \ uninstall-man1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/arcmkdir.cpp0000644000000000000000000000012413213442363022637 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.324711 30 ctime=1513200664.411801444 nordugrid-arc-5.4.2/src/clients/data/arcmkdir.cpp0000644000175000002070000001340313213442363022705 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcmkdir"); bool arcmkdir(const Arc::URL& file_url, Arc::UserConfig& usercfg, bool with_parents) { if (!file_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", file_url.str()); return false; } if (file_url.Protocol() == "urllist") { std::list files = Arc::ReadURLList(file_url); if (files.empty()) { logger.msg(Arc::ERROR, "Can't read list of locations from file %s", file_url.Path()); return false; } bool r = true; for (std::list::iterator file = files.begin(); file != files.end(); ++file) { if (!arcmkdir(*file, usercfg, with_parents)) r = false; } return r; } Arc::DataHandle url(file_url, usercfg); if (!url) { logger.msg(Arc::ERROR, "Unsupported URL given"); return false; } if (url->RequiresCredentials()) { if (!usercfg.InitializeCredentials(Arc::initializeCredentialsType::RequireCredentials)) { logger.msg(Arc::ERROR, "Unable to create directory %s", file_url.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } Arc::Credential holder(usercfg); if (!holder.IsValid()) { if (holder.GetEndTime() < Arc::Time()) { logger.msg(Arc::ERROR, "Proxy expired"); } logger.msg(Arc::ERROR, "Unable to create directory %s", file_url.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } } url->SetSecure(false); Arc::DataStatus res = url->CreateDirectory(with_parents); if (!res.Passed()) { logger.msg(Arc::ERROR, std::string(res)); if (res.Retryable()) logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); return false; } return true; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("url"), istring("The arcmkdir command creates directories " "on grid storage elements and catalogs.")); bool with_parents = false; options.AddOption('p', "parents", istring("make parent directories as needed"), with_parents); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcrm", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // credentials will be initialised later if necessary Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY); usercfg.Timeout(timeout); if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (params.size() != 1) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } // add a slash to the end if not present std::string url = params.front(); if (url[url.length()-1] != '/') url += '/'; if (!arcmkdir(url, usercfg, with_parents)) return 1; return 0; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/arcrename.cpp0000644000000000000000000000012413213442363023000 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.325711 30 ctime=1513200664.412801456 nordugrid-arc-5.4.2/src/clients/data/arcrename.cpp0000644000175000002070000001406313213442363023051 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcrename"); bool arcrename(const Arc::URL& old_url, const Arc::URL& new_url, Arc::UserConfig& usercfg, int timeout) { if (!old_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", old_url.str()); return false; } if (!new_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", new_url.str()); return false; } // Check URLs if (old_url.Protocol() != new_url.Protocol() || old_url.Host() != new_url.Host() || old_url.Port() != new_url.Port()) { logger.msg(Arc::ERROR, "Both URLs must have the same protocol, host and port"); return false; } std::string old_path(old_url.Path()); std::string new_path(new_url.Path()); Arc::CanonicalDir(old_path, true); Arc::CanonicalDir(new_path, true); // LFC URLs can be specified by guid metadata option if ((old_path.find_first_not_of('/') == std::string::npos && old_url.MetaDataOptions().empty()) || new_path.find_first_not_of('/') == std::string::npos) { logger.msg(Arc::ERROR, "Cannot rename to or from root directory"); return false; } if (old_path == new_path && old_url.FullPath() == new_url.FullPath()) { logger.msg(Arc::ERROR, "Cannot rename to the same URL"); return false; } Arc::DataHandle url(old_url, usercfg); if (!url) { logger.msg(Arc::ERROR, "Unsupported URL given"); return false; } if (url->RequiresCredentials()) { if (!usercfg.InitializeCredentials(Arc::initializeCredentialsType::RequireCredentials)) { logger.msg(Arc::ERROR, "Unable to rename %s", old_url.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } Arc::Credential holder(usercfg); if (!holder.IsValid()) { if (holder.GetEndTime() < Arc::Time()) { logger.msg(Arc::ERROR, "Proxy expired"); } logger.msg(Arc::ERROR, "Unable to rename %s", old_url.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } } // Insecure by default url->SetSecure(false); // Do the renaming Arc::DataStatus res = url->Rename(new_url); if (!res.Passed()) { logger.msg(Arc::ERROR, std::string(res)); if (res.Retryable()) logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); return false; } return true; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("old_url new_url"), istring("The arcrename command renames files on " "grid storage elements.")); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcrename", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // credentials will be initialised later if necessary Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY); if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (params.size() != 2) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } std::string oldurl(params.front()); std::string newurl(params.back()); if (!arcrename(oldurl, newurl, usercfg, timeout)) return 1; return 0; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/arcls.cpp0000644000000000000000000000012413213442363022147 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.320711 30 ctime=1513200664.410801431 nordugrid-arc-5.4.2/src/clients/data/arcls.cpp0000644000175000002070000003415713213442363022226 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcls"); void print_urls(const Arc::FileInfo& file) { for (std::list::const_iterator u = file.GetURLs().begin(); u != file.GetURLs().end(); ++u) std::cout << "\t" << *u << std::endl; } void print_meta(const Arc::FileInfo& file) { std::map md = file.GetMetaData(); for (std::map::iterator mi = md.begin(); mi != md.end(); ++mi) std::cout<first<<":"<second<& files, bool show_urls, bool show_meta) { if (files.empty()) return; unsigned int namewidth = 0; unsigned int sizewidth = 0; unsigned int csumwidth = 0; // find longest length of each field to align the output for (std::list::const_iterator i = files.begin(); i != files.end(); ++i) { if (i->GetName().length() > namewidth) namewidth = i->GetName().length(); if (i->CheckSize() && i->GetSize() > 0 && // log(0) not good! (unsigned int)(log10(i->GetSize()))+1 > sizewidth) sizewidth = (unsigned int)(log10(i->GetSize()))+1; if (i->CheckCheckSum() && i->GetCheckSum().length() > csumwidth) csumwidth = i->GetCheckSum().length(); } std::cout << std::setw(namewidth) << std::left << " "; std::cout << " "; std::cout << std::setw(sizewidth + 4) << std::left << " "; std::cout << " "; std::cout << " "; std::cout << std::setw(csumwidth) << std::right << ""; std::cout << std::endl; // set minimum widths to accommodate headers if (namewidth < 7) namewidth = 7; if (sizewidth < 7) sizewidth = 7; if (csumwidth < 8) csumwidth = 8; for (std::list::const_iterator i = files.begin(); i != files.end(); ++i) { std::cout << std::setw(namewidth) << std::left << i->GetName(); switch (i->GetType()) { case Arc::FileInfo::file_type_file: std::cout << " file"; break; case Arc::FileInfo::file_type_dir: std::cout << " dir"; break; default: std::cout << " (n/a)"; break; } if (i->CheckSize()) { std::cout << " " << std::setw(sizewidth) << std::right << Arc::tostring(i->GetSize()); } else { std::cout << " " << std::setw(sizewidth) << std::right << " (n/a)"; } if (i->CheckModified()) { std::cout << " " << i->GetModified(); } else { std::cout << " (n/a) "; } if (i->CheckCheckSum()) { std::cout << " " << std::setw(csumwidth) << std::left << i->GetCheckSum(); } else { std::cout << " " << std::setw(csumwidth) << std::left << " (n/a)"; } if (i->CheckLatency()) { std::cout << " " << i->GetLatency(); } else { std::cout << " (n/a)"; } std::cout << std::endl; if (show_urls) print_urls(*i); if (show_meta) print_meta(*i); } } static bool arcls(const Arc::URL& dir_url, Arc::UserConfig& usercfg, bool show_details, // longlist bool show_urls, // locations bool show_meta, // metadata bool no_list, // don't list dirs bool force_list, // force dir list bool check_access, // checkaccess int recursion, // recursion int timeout) { // timeout if (!dir_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", dir_url.fullstr()); return false; } if (dir_url.Protocol() == "urllist") { std::list dirs = Arc::ReadURLList(dir_url); if (dirs.empty()) { logger.msg(Arc::ERROR, "Can't read list of locations from file %s", dir_url.Path()); return false; } bool r = true; for (std::list::iterator dir = dirs.begin(); dir != dirs.end(); ++dir) { if(!arcls(*dir, usercfg, show_details, show_urls, show_meta, no_list, force_list, check_access, recursion, timeout)) r = false; } return r; } Arc::DataHandle url(dir_url, usercfg); if (!url) { logger.msg(Arc::ERROR, "Unsupported URL given"); return false; } if (url->RequiresCredentials()) { if (!usercfg.InitializeCredentials(Arc::initializeCredentialsType::RequireCredentials)) { logger.msg(Arc::ERROR, "Unable to list content of %s", dir_url.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } Arc::Credential holder(usercfg); if (!holder.IsValid()) { if (holder.GetEndTime() < Arc::Time()) { logger.msg(Arc::ERROR, "Proxy expired"); } logger.msg(Arc::ERROR, "Unable to list content of %s", dir_url.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } } url->SetSecure(false); if(check_access) { std::cout << dir_url << " - "; if(url->Check(false)) { std::cout << "passed" << std::endl; return true; } else { std::cout << "failed" << std::endl; return false; } } Arc::DataPoint::DataPointInfoType verb = (Arc::DataPoint::DataPointInfoType) (Arc::DataPoint::INFO_TYPE_MINIMAL | Arc::DataPoint::INFO_TYPE_NAME); if(show_urls) verb = (Arc::DataPoint::DataPointInfoType) (verb | Arc::DataPoint::INFO_TYPE_STRUCT); if(show_meta) verb = (Arc::DataPoint::DataPointInfoType) (verb | Arc::DataPoint::INFO_TYPE_ALL); if(show_details) verb = (Arc::DataPoint::DataPointInfoType) (verb | Arc::DataPoint::INFO_TYPE_TYPE | Arc::DataPoint::INFO_TYPE_TIMES | Arc::DataPoint::INFO_TYPE_CONTENT | Arc::DataPoint::INFO_TYPE_ACCESS); if(recursion > 0) verb = (Arc::DataPoint::DataPointInfoType) (verb | Arc::DataPoint::INFO_TYPE_TYPE); Arc::DataStatus res; Arc::FileInfo file; std::list files; if(no_list) { // only requested object is queried res = url->Stat(file, verb); if(res) files.push_back(file); } else if(force_list) { // assume it is directory, fail otherwise res = url->List(files, verb); } else { // try to guess what to do res = url->Stat(file, (Arc::DataPoint::DataPointInfoType)(verb | Arc::DataPoint::INFO_TYPE_TYPE)); if(res && (file.GetType() == Arc::FileInfo::file_type_file)) { // If it is file and we are sure, then just report it. files.push_back(file); } else { // If it is dir then we must list it. But if stat failed or // if type is undefined there is still chance it is directory. Arc::DataStatus res_ = url->List(files, verb); if(!res_) { // If listing failed maybe simply report previous result if any. if(res) { files.push_back(file); } } else { res = res_; } } } if (!res) { if (files.empty()) { logger.msg(Arc::ERROR, std::string(res)); if (res.Retryable()) logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); return false; } logger.msg(Arc::INFO, "Warning: " "Failed listing files but some information is obtained"); } files.sort(); // Sort alphabetically by name if (show_details) { print_details(files, show_urls, show_meta); } else { for (std::list::iterator i = files.begin(); i != files.end(); ++i) { std::cout << i->GetName() << std::endl; if (show_urls) print_urls(*i); if (show_meta) print_meta(*i); } } // Do recursion. Recursion has no sense if listing is forbidden. if ((recursion > 0) && (!no_list)) { for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if (i->GetType() == Arc::FileInfo::file_type_dir) { Arc::URL suburl = dir_url; if(suburl.Protocol() != "file") { if (suburl.Path()[suburl.Path().length() - 1] != '/') suburl.ChangePath(suburl.Path() + "/" + i->GetName()); else suburl.ChangePath(suburl.Path() + i->GetName()); } else { if (suburl.Path()[suburl.Path().length() - 1] != G_DIR_SEPARATOR) suburl.ChangePath(suburl.Path() + G_DIR_SEPARATOR_S + i->GetName()); else suburl.ChangePath(suburl.Path() + i->GetName()); } std::cout << std::endl; std::cout << suburl.str() << ":" << std::endl; arcls(suburl, usercfg, show_details, show_urls, show_meta, no_list, force_list, check_access, recursion - 1, timeout); std::cout << std::endl; } } } return true; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("url"), istring("The arcls command is used for listing " "files in grid storage elements " "and file\nindex catalogues.")); bool longlist = false; options.AddOption('l', "long", istring("long format (more information)"), longlist); bool locations = false; options.AddOption('L', "locations", istring("show URLs of file locations"), locations); bool metadata = false; options.AddOption('m', "metadata", istring("display all available metadata"), metadata); bool infinite_recursion = false; options.AddOption('r', "recursive", istring("operate recursively"), infinite_recursion); int recursion = 0; options.AddOption('D', "depth", istring("operate recursively up to specified level"), istring("level"), recursion); bool nolist = false; options.AddOption('n', "nolist", istring("show only description of requested object, do not list content of directories"), nolist); bool forcelist = false; options.AddOption('f', "forcelist", istring("treat requested object as directory and always try to list content"), forcelist); bool checkaccess = false; options.AddOption('c', "checkaccess", istring("check readability of object, does not show any information about object"), checkaccess); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcls", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // credentials will be initialised later if necessary Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY); if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); // Analyze options if (params.size() != 1) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } if(forcelist && nolist) { logger.msg(Arc::ERROR, "Incompatible options --nolist and --forcelist requested"); return 1; } if(recursion && nolist) { logger.msg(Arc::ERROR, "Requesting recursion and --nolist has no sense"); return 1; } if(infinite_recursion) recursion = INT_MAX; std::list::iterator it = params.begin(); if(!arcls(*it, usercfg, longlist, locations, metadata, nolist, forcelist, checkaccess, recursion, timeout)) return 1; return 0; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/arcrm.1.in0000644000000000000000000000012712441125533022134 xustar000000000000000027 mtime=1417980763.391041 30 atime=1513200651.934648843 30 ctime=1513200664.409801419 nordugrid-arc-5.4.2/src/clients/data/arcrm.1.in0000644000175000002070000000620012441125533022174 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCRM 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcrm \- delete files .SH DESCRIPTION The .B arcrm command deletes files on grid storage elements and indexing services. .SH SYNOPSIS .B arcrm [options] url [url ...] .SH OPTIONS .IP "\fB-f\fR, \fB--force\fR" remove logical file name registration even if not all physical instances were removed .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins (protocols supported) .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .SH ARGUMENTS .IP "\fBurl [url ...]\fR" the location(s) to delete .LP .SH EXTENDED DESCRIPTION The .B arcrm command deletes files on grid storage elements and indexing services. In the case of an indexing service url all physical instances of the file corresponding to the given locations are deleted and unregistered. If an indexing service url is given without locations, all physical instances and all meta-information about file are deleted. For more information on ARC URL syntax please read "Protocols, Uniform Resource Locators (URL) and Extensions Supported in ARC" [NORDUGRID-TECH-7] If .B url starts from '@', the remaining argument is assumed to be a path to a local file containing a list of URLs, one per line. In this case .B arcrm performs like it were called with all those URLs as arguments. Depending on the installed libraries (check with .B -P ), the following protocols may be used: file (file:// prefix may be omitted), http, https, httpg, ftp, gsiftp, srm, root. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH EXAMPLE arcrm gsiftp://example.com/grid/file.dat .SH NOTES Lack of recursion is a feature. .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccp (1), .BR arcls (1), .BR arcmkdir(1), .BR arcrename (1), nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/arcrename.1.in0000644000000000000000000000012712441125533022765 xustar000000000000000027 mtime=1417980763.391041 30 atime=1513200651.959649149 30 ctime=1513200664.408801407 nordugrid-arc-5.4.2/src/clients/data/arcrename.1.in0000644000175000002070000000621012441125533023026 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCRENAME 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcrename \- rename file or directory .SH DESCRIPTION The .B arcrename renames files or directories on grid storage elements and indexing services. .SH SYNOPSIS .B arcrename [options] oldurl newurl .SH OPTIONS .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins (protocols supported) .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .SH ARGUMENTS .IP "\fBoldurl\fR" current name of file or directory .IP "\fBnewurl\fR" new name for file or directory .LP .SH EXTENDED DESCRIPTION The .B arcrename command renames files or directories on grid storage elements and indexing services. The path component of .B oldurl and .B newurl must differ and it must be the only component of both URLs which is different. .B arcrename will exit with an error if the paths are equivalent or other components of the URLs are different. Renaming a URL to an existing URL will either fail or overwrite the existing URL, depending on the protocol. .B arcrename works purely at the namespace level and does not perform data transfer. For more information on ARC URL syntax please read "Protocols, Uniform Resource Locators (URL) and Extensions Supported in ARC" [NORDUGRID-TECH-7] Depending on the installed libraries (check with .B -P ), the following protocols may be used: file (file:// prefix may be omitted), http, https, httpg, ftp, gsiftp, srm, root. However renaming is not supported or implemented for some of those protocols. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH EXAMPLE arcrename gsiftp://example.com/grid/file.dat gsiftp://example.com/grid/new.file.dat .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccp (1), .BR arcls (1), .BR arcmkdir(1), .BR arcrm(1), nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/arccp.cpp0000644000000000000000000000012413213442363022133 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.324711 30 ctime=1513200664.410801431 nordugrid-arc-5.4.2/src/clients/data/arccp.cpp0000644000175000002070000006350113213442363022205 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static Arc::Logger logger(Arc::Logger::getRootLogger(), "arccp"); static Arc::SimpleCondition cond; static bool cancelled = false; static void sig_cancel(int) { if (cancelled) _exit(0); cancelled = true; cond.broadcast(); } static void progress(FILE *o, const char*, unsigned int, unsigned long long int all, unsigned long long int max, double, double) { static int rs = 0; const char rs_[4] = { '|', '/', '-', '\\' }; if (max) { fprintf(o, "\r|"); unsigned int l = (74 * all + 37) / max; if (l > 74) l = 74; unsigned int i = 0; for (; i < l; i++) fprintf(o, "="); fprintf(o, "%c", rs_[rs++]); if (rs > 3) rs = 0; for (; i < 74; i++) fprintf(o, " "); fprintf(o, "|\r"); fflush(o); return; } fprintf(o, "\r%llu kB \r", all / 1024); } static void transfer_cb(unsigned long long int bytes_transferred) { fprintf (stderr, "\r%llu kB \r", bytes_transferred / 1024); } static void mover_callback(Arc::DataMover* mover, Arc::DataStatus status, void* arg) { Arc::DataStatus* res = (Arc::DataStatus*)arg; *res = status; if (!res->Passed()) { logger.msg(Arc::ERROR, "Current transfer FAILED: %s", std::string(*res)); if (res->Retryable()) { logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); } } cond.broadcast(); } static bool checkProxy(Arc::UserConfig& usercfg, const Arc::URL& src_file) { if (!usercfg.InitializeCredentials(Arc::initializeCredentialsType::RequireCredentials)) { logger.msg(Arc::ERROR, "Unable to copy %s", src_file.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } Arc::Credential holder(usercfg); if (!holder.IsValid()) { if (holder.GetEndTime() < Arc::Time()) { logger.msg(Arc::ERROR, "Proxy expired"); } logger.msg(Arc::ERROR, "Unable to copy %s", src_file.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } return true; } bool arctransfer(const Arc::URL& source_url, const Arc::URL& destination_url, const std::list& locations, Arc::UserConfig& usercfg, bool secure, bool passive, bool verbose, int timeout) { if (!source_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", source_url.str()); return false; } if (!destination_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", destination_url.str()); return false; } // Credentials are always required for 3rd party transfer if (!checkProxy(usercfg, source_url)) return false; if (timeout > 0) usercfg.Timeout(timeout); Arc::DataStatus res = Arc::DataPoint::Transfer3rdParty(source_url, destination_url, usercfg, verbose ? &transfer_cb : NULL); if (verbose) std::cerr< sources = Arc::ReadURLList(source_url); std::list destinations = Arc::ReadURLList(destination_url); if (sources.empty()) { logger.msg(Arc::ERROR, "Can't read list of sources from file %s", source_url.Path()); return false; } if (destinations.empty()) { logger.msg(Arc::ERROR, "Can't read list of destinations from file %s", destination_url.Path()); return false; } if (sources.size() != destinations.size()) { logger.msg(Arc::ERROR, "Numbers of sources and destinations do not match"); return false; } bool r = true; for (std::list::iterator source = sources.begin(), destination = destinations.begin(); (source != sources.end()) && (destination != destinations.end()); ++source, ++destination) { if (!arcregister(*source, *destination, usercfg, force_meta)) r = false; if (cancelled) return true; } return r; } if (source_url.Protocol() == "urllist") { std::list sources = Arc::ReadURLList(source_url); if (sources.empty()) { logger.msg(Arc::ERROR, "Can't read list of sources from file %s", source_url.Path()); return false; } bool r = true; for (std::list::iterator source = sources.begin(); source != sources.end(); ++source) { if (!arcregister(*source, destination_url, usercfg, force_meta)) r = false; if (cancelled) return true; } return r; } if (destination_url.Protocol() == "urllist") { std::list destinations = Arc::ReadURLList(destination_url); if (destinations.empty()) { logger.msg(Arc::ERROR, "Can't read list of destinations from file %s", destination_url.Path()); return false; } bool r = true; for (std::list::iterator destination = destinations.begin(); destination != destinations.end(); ++destination) { if (!arcregister(source_url, *destination, usercfg, force_meta)) r = false; if (cancelled) return true; } return r; } if (destination_url.Path()[destination_url.Path().length() - 1] == '/') { logger.msg(Arc::ERROR, "Fileset registration is not supported yet"); return false; } Arc::DataHandle source(source_url, usercfg); Arc::DataHandle destination(destination_url, usercfg); if (!source) { logger.msg(Arc::ERROR, "Unsupported source url: %s", source_url.str()); return false; } if (!destination) { logger.msg(Arc::ERROR, "Unsupported destination url: %s", destination_url.str()); return false; } if ((source->RequiresCredentials() || destination->RequiresCredentials()) && !checkProxy(usercfg, source_url)) return false; if (source->IsIndex() || !destination->IsIndex()) { logger.msg(Arc::ERROR, "For registration source must be ordinary URL" " and destination must be indexing service"); return false; } // Obtain meta-information about source Arc::FileInfo fileinfo; Arc::DataPoint::DataPointInfoType verb = (Arc::DataPoint::DataPointInfoType)Arc::DataPoint::INFO_TYPE_CONTENT; Arc::DataStatus res = source->Stat(fileinfo, verb); if (!res) { logger.msg(Arc::ERROR, "Could not obtain information about source: %s", std::string(res)); return false; } // Check if destination is already registered if (destination->Resolve(true)) { // Check meta-info matches source if (!destination->CompareMeta(*source) && !force_meta) { logger.msg(Arc::ERROR, "Metadata of source does not match existing " "destination. Use the --force option to override this."); return false; } // Remove existing locations destination->ClearLocations(); } bool replication = destination->Registered(); destination->SetMeta(*source); // pass metadata // Add new location std::string metaname = source_url.ConnectionURL(); if (!destination->AddLocation(source_url, metaname)) { logger.msg(Arc::ERROR, "Failed to accept new file/destination"); return false; } destination->SetTries(1); res = destination->PreRegister(replication, force_meta); if (!res) { logger.msg(Arc::ERROR, "Failed to register new file/destination: %s", std::string(res)); return false; } res = destination->PostRegister(replication); if (!res) { destination->PreUnregister(replication); logger.msg(Arc::ERROR, "Failed to register new file/destination: %s", std::string(res)); return false; } return true; } static Arc::DataStatus do_mover(const Arc::URL& s_url, const Arc::URL& d_url, const std::list& locations, const std::string& cache_dir, Arc::UserConfig& usercfg, bool secure, bool passive, bool force_meta, int tries, bool verbose, int timeout) { Arc::DataHandle source(s_url, usercfg); Arc::DataHandle destination(d_url, usercfg); if (!source) { logger.msg(Arc::ERROR, "Unsupported source url: %s", s_url.str()); return Arc::DataStatus::ReadAcquireError; } if (!destination) { logger.msg(Arc::ERROR, "Unsupported destination url: %s", d_url.str()); return Arc::DataStatus::WriteAcquireError; } if ((source->RequiresCredentials() || destination->RequiresCredentials()) && !checkProxy(usercfg, s_url)) return Arc::DataStatus::CredentialsExpiredError; if (!locations.empty()) { std::string meta(destination->GetURL().Protocol()+"://"+destination->GetURL().Host()); for (std::list::const_iterator i = locations.begin(); i != locations.end(); ++i) { destination->AddLocation(*i, meta); } } Arc::DataMover mover; mover.secure(secure); mover.passive(passive); mover.verbose(verbose); mover.force_to_meta(force_meta); if (tries) { mover.retry(true); // go through all locations source->SetTries(tries); // try all locations "tries" times destination->SetTries(tries); } Arc::User cache_user; Arc::FileCache cache; if (!cache_dir.empty()) cache = Arc::FileCache(cache_dir+" .", "", cache_user.get_uid(), cache_user.get_gid()); if (verbose) mover.set_progress_indicator(&progress); Arc::DataStatus callback_res; Arc::DataStatus res = mover.Transfer(*source, *destination, cache, Arc::URLMap(), 0, 0, 0, timeout, &mover_callback, &callback_res); if (!res.Passed()) { logger.msg(Arc::ERROR, "Current transfer FAILED: %s", std::string(res)); if (res.Retryable()) { logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); } return res; } cond.wait(); // wait for mover_callback if (verbose) std::cerr<& locations, const std::string& cache_dir, Arc::UserConfig& usercfg, bool secure, bool passive, bool force_meta, int recursion, int tries, bool verbose, int timeout) { Arc::URL source_url(source_url_); if (!source_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", source_url.str()); return false; } Arc::URL destination_url(destination_url_); if (!destination_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", destination_url.str()); return false; } if (timeout <= 0) timeout = 300; // 5 minute default if (tries < 0) tries = 0; if (source_url.Protocol() == "urllist" && destination_url.Protocol() == "urllist") { std::list sources = Arc::ReadURLList(source_url); std::list destinations = Arc::ReadURLList(destination_url); if (sources.empty()) { logger.msg(Arc::ERROR, "Can't read list of sources from file %s", source_url.Path()); return false; } if (destinations.empty()) { logger.msg(Arc::ERROR, "Can't read list of destinations from file %s", destination_url.Path()); return false; } if (sources.size() != destinations.size()) { logger.msg(Arc::ERROR, "Numbers of sources and destinations do not match"); return false; } bool r = true; for (std::list::iterator source = sources.begin(), destination = destinations.begin(); (source != sources.end()) && (destination != destinations.end()); ++source, ++destination) { if (!arccp(*source, *destination, locations, cache_dir, usercfg, secure, passive, force_meta, recursion, tries, verbose, timeout)) r = false; if (cancelled) return true; } return r; } if (source_url.Protocol() == "urllist") { std::list sources = Arc::ReadURLList(source_url); if (sources.empty()) { logger.msg(Arc::ERROR, "Can't read list of sources from file %s", source_url.Path()); return false; } bool r = true; for (std::list::iterator source = sources.begin(); source != sources.end(); ++source) { if (!arccp(*source, destination_url, locations, cache_dir, usercfg, secure, passive, force_meta, recursion, tries, verbose, timeout)) r = false; if (cancelled) return true; } return r; } if (destination_url.Protocol() == "urllist") { std::list destinations = Arc::ReadURLList(destination_url); if (destinations.empty()) { logger.msg(Arc::ERROR, "Can't read list of destinations from file %s", destination_url.Path()); return false; } bool r = true; for (std::list::iterator destination = destinations.begin(); destination != destinations.end(); ++destination) { if (!arccp(source_url, *destination, locations, cache_dir, usercfg, secure, passive, force_meta, recursion, tries, verbose, timeout)) r = false; if (cancelled) return true; } return r; } if (destination_url.Path()[destination_url.Path().length() - 1] != '/') { if (source_url.Path()[source_url.Path().length() - 1] == '/' && source_url.MetaDataOption("guid").empty()) { // files specified by guid may have path '/' logger.msg(Arc::ERROR, "Fileset copy to single object is not supported yet"); return false; } } else { // Copy TO fileset/directory if (source_url.Path()[source_url.Path().length() - 1] != '/') { // Copy FROM single object std::string::size_type p = source_url.Path().rfind('/'); if (p == std::string::npos) { logger.msg(Arc::ERROR, "Can't extract object's name from source url"); return false; } destination_url.ChangePath(destination_url.Path() + source_url.Path().substr(p + 1)); } else { // Fileset copy Arc::DataHandle source(source_url, usercfg); if (!source) { logger.msg(Arc::ERROR, "Unsupported source url: %s", source_url.str()); return false; } if (source->RequiresCredentials() && !checkProxy(usercfg, source_url)) return false; std::list files; Arc::DataStatus result = source->List(files, (Arc::DataPoint::DataPointInfoType) (Arc::DataPoint::INFO_TYPE_NAME | Arc::DataPoint::INFO_TYPE_TYPE)); if (!result.Passed()) { logger.msg(Arc::ERROR, "%s. Cannot copy fileset", std::string(result)); return false; } bool failures = false; // Handle transfer of files first (treat unknown like files) for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if ((i->GetType() != Arc::FileInfo::file_type_unknown) && (i->GetType() != Arc::FileInfo::file_type_file)) continue; logger.msg(Arc::INFO, "Name: %s", i->GetName()); Arc::URL s_url(std::string(source_url.str() + i->GetName())); Arc::URL d_url(std::string(destination_url.str() + i->GetName())); logger.msg(Arc::INFO, "Source: %s", s_url.str()); logger.msg(Arc::INFO, "Destination: %s", d_url.str()); Arc::DataStatus res = do_mover(s_url, d_url, locations, cache_dir, usercfg, secure, passive, force_meta, tries, verbose, timeout); if (cancelled) return true; if (!res.Passed()) failures = true; else logger.msg(Arc::INFO, "Current transfer complete"); } if (failures) { logger.msg(Arc::ERROR, "Some transfers failed"); return false; } // Go deeper if allowed bool r = true; if (recursion > 0) // Handle directories recursively for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if (i->GetType() != Arc::FileInfo::file_type_dir) continue; if (verbose) logger.msg(Arc::INFO, "Directory: %s", i->GetName()); std::string s_url(source_url.str()); std::string d_url(destination_url.str()); s_url += i->GetName(); d_url += i->GetName(); s_url += "/"; d_url += "/"; if (!arccp(s_url, d_url, locations, cache_dir, usercfg, secure, passive, force_meta, recursion - 1, tries, verbose, timeout)) r = false; if (cancelled) return true; } return r; } } Arc::DataStatus res = do_mover(source_url, destination_url, locations, cache_dir, usercfg, secure, passive, force_meta, tries, verbose, timeout); if (cancelled) return true; if (!res.Passed()) return false; logger.msg(Arc::INFO, "Transfer complete"); return true; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); // set signal handlers for safe cancellation signal(SIGTERM, sig_cancel); signal(SIGINT, sig_cancel); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("source destination"), istring("The arccp command copies files to, from " "and between grid storage elements.")); bool passive = false; options.AddOption('p', "passive", istring("use passive transfer (off by default if secure " "is on, on by default if secure is not requested)"), passive); bool notpassive = false; options.AddOption('n', "nopassive", istring("do not try to force passive transfer"), notpassive); bool force = false; options.AddOption('f', "force", istring("if the destination is an indexing service " "and not the same as the source and the " "destination is already registered, then " "the copy is normally not done. However, if " "this option is specified the source is " "assumed to be a replica of the destination " "created in an uncontrolled way and the " "copy is done like in case of replication. " "Using this option also skips validation of " "completed transfers."), force); bool verbose = false; options.AddOption('i', "indicate", istring("show progress indicator"), verbose); bool nocopy = false; options.AddOption('T', "notransfer", istring("do not transfer, but register source into " "destination. destination must be a meta-url."), nocopy); bool secure = false; options.AddOption('u', "secure", istring("use secure transfer (insecure by default)"), secure); std::string cache_path; options.AddOption('y', "cache", istring("path to local cache (use to put file into cache)"), istring("path"), cache_path); bool infinite_recursion = false; options.AddOption('r', "recursive", istring("operate recursively"), infinite_recursion); int recursion = 0; options.AddOption('D', "depth", istring("operate recursively up to specified level"), istring("level"), recursion); int retries = 0; options.AddOption('R', "retries", istring("number of retries before failing file transfer"), istring("number"), retries); std::list locations; options.AddOption('L', "location", istring("physical location to write to when destination is an indexing service." " Must be specified for indexing services which do not automatically" " generate physical locations. Can be specified multiple times -" " locations will be tried in order until one succeeds."), istring("URL"), locations); bool thirdparty = false; options.AddOption('3', "thirdparty", istring("perform third party transfer, where the destination pulls" " from the source (only available with GFAL plugin)"), thirdparty); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arccp", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // Attempt to acquire credentials. Whether they are required will be // determined later depending on the protocol. Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY); if (debug.empty() && !usercfg.Verbosity().empty()) { Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); } if (params.size() != 2) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } if (passive && notpassive) { logger.msg(Arc::ERROR, "Options 'p' and 'n' can't be used simultaneously"); return 1; } if ((!secure) && (!notpassive)) passive = true; if (infinite_recursion) recursion = INT_MAX; std::list::iterator it = params.begin(); std::string source = *it; ++it; std::string destination = *it; if (source == "-") source = "stdio:///stdin"; if (destination == "-") destination = "stdio:///stdout"; if (thirdparty) { if (!arctransfer(source, destination, locations, usercfg, secure, passive, verbose, timeout)) return 1; } else if (nocopy) { if (!arcregister(source, destination, usercfg, force)) return 1; } else { if (!arccp(source, destination, locations, cache_path, usercfg, secure, passive, force, recursion, retries + 1, verbose, timeout)) return 1; } return 0; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-5.4.2/src/clients/data/PaxHeaders.7502/arcrm.cpp0000644000000000000000000000012413213442363022147 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.320711 30 ctime=1513200664.412801456 nordugrid-arc-5.4.2/src/clients/data/arcrm.cpp0000644000175000002070000001535713213442363022227 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcrm"); /// Returns number of files that failed to be deleted int arcrm(const std::list& urls, Arc::UserConfig& usercfg, bool errcont) { Arc::DataHandle* handle = NULL; Arc::DataMover mover; unsigned int failed = 0; for (std::list::const_iterator url = urls.begin(); url != urls.end(); ++url) { if (!(*url)) { logger.msg(Arc::ERROR, "Invalid URL: %s", url->str()); failed++; continue; } if (url->Protocol() == "urllist") { std::list url_files = Arc::ReadURLList(*url); if (url_files.empty()) { logger.msg(Arc::ERROR, "Can't read list of locations from file %s", url->Path()); failed += 1; } else { failed += arcrm(url_files, usercfg, errcont); } continue; } // Depending on protocol SetURL() may allow reusing connections and hence // the same DataHandle object to delete multiple files. If it is not // supported SetURL() returns false and a new DataHandle must be created. if (!handle || !(*handle)->SetURL(*url)) { delete handle; handle = new Arc::DataHandle(*url, usercfg); if (!(*handle)) { logger.msg(Arc::ERROR, "Unsupported URL given: %s", url->str()); failed++; delete handle; handle = NULL; continue; } if ((*handle)->RequiresCredentials()) { if (!usercfg.InitializeCredentials(Arc::initializeCredentialsType::RequireCredentials)) { logger.msg(Arc::ERROR, "Unable to remove file %s", url->str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); failed++; delete handle; handle = NULL; continue; } Arc::Credential holder(usercfg); if (!holder.IsValid()) { if (holder.GetEndTime() < Arc::Time()) { logger.msg(Arc::ERROR, "Proxy expired"); } logger.msg(Arc::ERROR, "Unable to remove file %s", url->str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); failed++; delete handle; handle = NULL; continue; } } } // only one try (*handle)->SetTries(1); Arc::DataStatus res = mover.Delete(**handle, errcont); if (!res.Passed()) { logger.msg(Arc::ERROR, std::string(res)); if (res.Retryable()) { logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); } failed++; } } delete handle; return failed; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("url [url ...]"), istring("The arcrm command deletes files and on " "grid storage elements.")); bool force = false; options.AddOption('f', "force", istring("remove logical file name registration even " "if not all physical instances were removed"), force); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcrm", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) { Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); } logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // credentials will be initialised later if necessary Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY); usercfg.Timeout(timeout); if (debug.empty() && !usercfg.Verbosity().empty()) { Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); } if (params.empty()) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } std::list urls; for (std::list::const_iterator i = params.begin(); i != params.end(); ++i) { urls.push_back(*i); } unsigned int failed = arcrm(urls, usercfg, force); if (failed != 0) { if (params.size() != 1 || failed > 1) std::cout</dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(arcsysconfdir)" "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-arcsysconfDATA install-exampleDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-arcsysconfDATA uninstall-exampleDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-arcsysconfDATA \ install-data install-data-am install-dvi install-dvi-am \ install-exampleDATA install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-arcsysconfDATA uninstall-exampleDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/clients/PaxHeaders.7502/wsrf0000644000000000000000000000013213214316030020321 xustar000000000000000030 mtime=1513200664.578803486 30 atime=1513200668.719854133 30 ctime=1513200664.578803486 nordugrid-arc-5.4.2/src/clients/wsrf/0000755000175000002070000000000013214316030020444 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/clients/wsrf/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612350510254022441 xustar000000000000000026 mtime=1403162796.25655 30 atime=1513200593.422933222 30 ctime=1513200664.574803437 nordugrid-arc-5.4.2/src/clients/wsrf/Makefile.am0000644000175000002070000000075112350510254022507 0ustar00mockbuildmock00000000000000bin_PROGRAMS = arcwsrf man_MANS = arcwsrf.1 arcwsrf_SOURCES = arcwsrf.cpp arcwsrf_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) arcwsrf_LDADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) sysconf_DATA = EXTRA_DIST = $(sysconf_DATA) nordugrid-arc-5.4.2/src/clients/wsrf/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315721022450 xustar000000000000000030 mtime=1513200593.467933772 30 atime=1513200652.286653148 29 ctime=1513200664.57580345 nordugrid-arc-5.4.2/src/clients/wsrf/Makefile.in0000644000175000002070000007201013214315721022517 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = arcwsrf$(EXEEXT) subdir = src/clients/wsrf DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arcwsrf.1.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arcwsrf.1 CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" \ "$(DESTDIR)$(sysconfdir)" PROGRAMS = $(bin_PROGRAMS) am_arcwsrf_OBJECTS = arcwsrf-arcwsrf.$(OBJEXT) arcwsrf_OBJECTS = $(am_arcwsrf_OBJECTS) am__DEPENDENCIES_1 = arcwsrf_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) arcwsrf_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arcwsrf_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(arcwsrf_SOURCES) DIST_SOURCES = $(arcwsrf_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) DATA = $(sysconf_DATA) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ man_MANS = arcwsrf.1 arcwsrf_SOURCES = arcwsrf.cpp arcwsrf_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) arcwsrf_LDADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) sysconf_DATA = EXTRA_DIST = $(sysconf_DATA) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/wsrf/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/wsrf/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arcwsrf.1: $(top_builddir)/config.status $(srcdir)/arcwsrf.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arcwsrf$(EXEEXT): $(arcwsrf_OBJECTS) $(arcwsrf_DEPENDENCIES) @rm -f arcwsrf$(EXEEXT) $(arcwsrf_LINK) $(arcwsrf_OBJECTS) $(arcwsrf_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcwsrf-arcwsrf.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< arcwsrf-arcwsrf.o: arcwsrf.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcwsrf_CXXFLAGS) $(CXXFLAGS) -MT arcwsrf-arcwsrf.o -MD -MP -MF $(DEPDIR)/arcwsrf-arcwsrf.Tpo -c -o arcwsrf-arcwsrf.o `test -f 'arcwsrf.cpp' || echo '$(srcdir)/'`arcwsrf.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcwsrf-arcwsrf.Tpo $(DEPDIR)/arcwsrf-arcwsrf.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcwsrf.cpp' object='arcwsrf-arcwsrf.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcwsrf_CXXFLAGS) $(CXXFLAGS) -c -o arcwsrf-arcwsrf.o `test -f 'arcwsrf.cpp' || echo '$(srcdir)/'`arcwsrf.cpp arcwsrf-arcwsrf.obj: arcwsrf.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcwsrf_CXXFLAGS) $(CXXFLAGS) -MT arcwsrf-arcwsrf.obj -MD -MP -MF $(DEPDIR)/arcwsrf-arcwsrf.Tpo -c -o arcwsrf-arcwsrf.obj `if test -f 'arcwsrf.cpp'; then $(CYGPATH_W) 'arcwsrf.cpp'; else $(CYGPATH_W) '$(srcdir)/arcwsrf.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcwsrf-arcwsrf.Tpo $(DEPDIR)/arcwsrf-arcwsrf.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcwsrf.cpp' object='arcwsrf-arcwsrf.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcwsrf_CXXFLAGS) $(CXXFLAGS) -c -o arcwsrf-arcwsrf.obj `if test -f 'arcwsrf.cpp'; then $(CYGPATH_W) 'arcwsrf.cpp'; else $(CYGPATH_W) '$(srcdir)/arcwsrf.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } install-sysconfDATA: $(sysconf_DATA) @$(NORMAL_INSTALL) test -z "$(sysconfdir)" || $(MKDIR_P) "$(DESTDIR)$(sysconfdir)" @list='$(sysconf_DATA)'; test -n "$(sysconfdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(sysconfdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(sysconfdir)" || exit $$?; \ done uninstall-sysconfDATA: @$(NORMAL_UNINSTALL) @list='$(sysconf_DATA)'; test -n "$(sysconfdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(sysconfdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(sysconfdir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) $(DATA) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(sysconfdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-sysconfDATA install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man \ uninstall-sysconfDATA uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-binPROGRAMS install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-pdf install-pdf-am install-ps \ install-ps-am install-strip install-sysconfDATA installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-binPROGRAMS \ uninstall-man uninstall-man1 uninstall-sysconfDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/clients/wsrf/PaxHeaders.7502/arcwsrf.cpp0000644000000000000000000000012412301125744022560 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200575.302711 30 ctime=1513200664.578803486 nordugrid-arc-5.4.2/src/clients/wsrf/arcwsrf.cpp0000644000175000002070000001113612301125744022627 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace Arc; #ifdef TEST #define RUN(X) test_arcwsrf_##X #else #define RUN(X) X #endif int RUN(main)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcwsrf"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); //Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("URL [query]"), istring("The arcwsrf command is used for " "obtaining the WS-ResourceProperties of\n" "services.")); std::list paths; options.AddOption('p', "property", istring("Request for specific Resource Property"), istring("[-]name"), paths); int timeout = -1; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::string url; std::string query; { std::list args = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcwsrf", VERSION) << std::endl; return 0; } if(args.size() < 1) { logger.msg(Arc::ERROR, "Missing URL"); return 1; } if(url.size() > 2) { logger.msg(Arc::ERROR, "Too many parameters"); return 1; } std::list::iterator arg = args.begin(); url = *arg; ++arg; if(arg != args.end()) query = *arg; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); Arc::UserConfig usercfg(conffile); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (timeout > 0) usercfg.Timeout(timeout); if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); //if (timeout > 0) { // usercfg.SetTimeout(timeout); //} // Proxy check //if (!usercfg.CheckProxy()) // return 1; CountedPointer request(NULL); if(!query.empty()) { XMLNode q(query); if(!q) { logger.msg(Arc::ERROR, "Query is not a valid XML"); return 1; } request = new InformationRequest(q); } else { std::list > qpaths; for(std::list::iterator path = paths.begin(); path != paths.end();++path) { std::list qpath; qpath.push_back(*path); qpaths.push_back(qpath); } request = new InformationRequest(qpaths); } if(!(*request)) { logger.msg(Arc::ERROR, "Failed to create WSRP request"); return 1; } //SOAPEnvelope* SOAP(void); URL u(url); if(!u) { logger.msg(Arc::ERROR, "Specified URL is not valid"); return 1; } MCCConfig cfg; usercfg.ApplyToConfig(cfg); ClientSOAP client(cfg, u, usercfg.Timeout()); PayloadSOAP req(*(request->SOAP())); PayloadSOAP* resp_ = NULL; MCC_Status r = client.process(&req,&resp_); CountedPointer resp(resp_); if(!r) { logger.msg(Arc::ERROR, "Failed to send request"); return 1; } if(!resp) { logger.msg(Arc::ERROR, "Failed to obtain SOAP response"); return 1; } std::string o; resp->Body().Child().GetXML(o); if(resp->IsFault()) { logger.msg(Arc::ERROR, "SOAP fault received"); std::cerr</dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-binPROGRAMS install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-binPROGRAMS uninstall-man \ uninstall-man1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/clients/echo/PaxHeaders.7502/arcecho.cpp0000644000000000000000000000012412301125744022452 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200575.388712 30 ctime=1513200664.434801725 nordugrid-arc-5.4.2/src/clients/echo/arcecho.cpp0000644000175000002070000000764012301125744022526 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include int main(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcecho"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::rootLogger.setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("service message"), istring("The arcecho command is a client for " "the ARC echo service."), istring("The service argument is a URL to an ARC " "echo service.\n" "The message argument is the message the " "service should return.")); int timeout = -1; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list args = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcecho", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); Arc::UserConfig usercfg(conffile); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (timeout > 0) { usercfg.Timeout(timeout); } if (args.size() != 2) { logger.msg(Arc::ERROR, "Wrong number of arguments!"); return 1; } std::list::iterator it = args.begin(); Arc::URL service = *it++; std::string message = *it++; Arc::MCCConfig cfg; usercfg.ApplyToConfig(cfg); Arc::ClientSOAP client(cfg, service, usercfg.Timeout()); std::string xml; Arc::NS ns("echo", "http://www.nordugrid.org/schemas/echo"); Arc::PayloadSOAP request(ns); request.NewChild("echo:echo").NewChild("echo:say") = message; request.GetXML(xml, true); logger.msg(Arc::INFO, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client.process(&request, &response); if (!status) { logger.msg(Arc::ERROR, (std::string)status); if (response) delete response; return 1; } if (!response) { logger.msg(Arc::ERROR, "No SOAP response"); return 1; } response->GetXML(xml, true); logger.msg(Arc::INFO, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } logger.msg(Arc::ERROR, err); return 1; } std::string answer = (std::string)((*response)["echoResponse"]["hear"]); delete response; std::cout << answer << std::endl; return 0; } nordugrid-arc-5.4.2/src/clients/echo/PaxHeaders.7502/arcecho.1.in0000644000000000000000000000012712123705613022441 xustar000000000000000027 mtime=1364167563.653962 30 atime=1513200651.984649455 30 ctime=1513200664.434801725 nordugrid-arc-5.4.2/src/clients/echo/arcecho.1.in0000644000175000002070000000373212123705613022510 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCECHO 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcecho \- ARC echo client .SH DESCRIPTION The .B arcecho command is a client for the ARC echo service. .SH SYNOPSIS .B arcecho [options] service message .SH OPTIONS .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBservice\fR" A URL to an ARC echo service .IP "\fBmessage\fR" The message the service should return .LP .SH EXTENDED DESCRIPTION .B arcecho is a simple test tool: we can send a message to the echo service and it will respond with the same message. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org nordugrid-arc-5.4.2/src/clients/echo/PaxHeaders.7502/README0000644000000000000000000000012311106323734021222 xustar000000000000000026 mtime=1226418140.52583 27 atime=1513200575.388712 30 ctime=1513200664.431801688 nordugrid-arc-5.4.2/src/clients/echo/README0000644000175000002070000000003011106323734021261 0ustar00mockbuildmock00000000000000client for echo service nordugrid-arc-5.4.2/src/clients/PaxHeaders.7502/credentials0000644000000000000000000000013213214316030021635 xustar000000000000000030 mtime=1513200664.470802166 30 atime=1513200668.719854133 30 ctime=1513200664.470802166 nordugrid-arc-5.4.2/src/clients/credentials/0000755000175000002070000000000013214316030021760 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/arcproxy_proxy.cpp0000644000000000000000000000012413066751223025543 xustar000000000000000027 mtime=1490801299.996866 27 atime=1513200575.375712 30 ctime=1513200664.465802104 nordugrid-arc-5.4.2/src/clients/credentials/arcproxy_proxy.cpp0000644000175000002070000001133213066751223025610 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_NSS #include #endif using namespace ArcCredential; void create_tmp_proxy(std::string& proxy, Arc::Credential& signer) { int keybits = 1024; Arc::Time now; Arc::Period period = 3600 * 12 + 300; std::string req_str; Arc::Credential tmp_proxyreq(now-Arc::Period(300), period, keybits); tmp_proxyreq.GenerateRequest(req_str); std::string proxy_private_key; std::string signing_cert; std::string signing_cert_chain; tmp_proxyreq.OutputPrivatekey(proxy_private_key); signer.OutputCertificate(signing_cert); signer.OutputCertificateChain(signing_cert_chain); if (!signer.SignRequest(&tmp_proxyreq, proxy)) throw std::runtime_error("Failed to sign proxy"); proxy.append(proxy_private_key).append(signing_cert).append(signing_cert_chain); } void create_proxy(std::string& proxy, Arc::Credential& signer, const std::string& proxy_policy, const Arc::Time& proxy_start, const Arc::Period& proxy_period, const std::string& vomsacseq, int keybits, const std::string& signing_algorithm) { std::string private_key, signing_cert, signing_cert_chain; std::string req_str; if(keybits < 0) keybits = signer.GetKeybits(); Arc::Credential cred_request(proxy_start, proxy_period, keybits); cred_request.SetSigningAlgorithm(signer.GetSigningAlgorithm()); if(!signing_algorithm.empty() && signing_algorithm != "inherit") { if(signing_algorithm == "sha1") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA1); } else if(signing_algorithm == "sha2") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA256); } else if(signing_algorithm == "sha224") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA224); } else if(signing_algorithm == "sha256") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA256); } else if(signing_algorithm == "sha384") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA384); } else if(signing_algorithm == "sha512") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA512); } else { throw std::runtime_error("Unknown signing algorithm specified: "+signing_algorithm); } } cred_request.GenerateRequest(req_str); cred_request.OutputPrivatekey(private_key); signer.OutputCertificate(signing_cert); signer.OutputCertificateChain(signing_cert_chain); //Put the voms attribute certificate into proxy certificate if (!vomsacseq.empty()) { bool r = cred_request.AddExtension("acseq", (char**)(vomsacseq.c_str())); if (!r) std::cout << Arc::IString("Failed to add VOMS AC extension. Your proxy may be incomplete.") << std::endl; } if(!proxy_policy.empty()) { cred_request.SetProxyPolicy("rfc", "anylanguage", proxy_policy, -1); } else if(CERT_IS_LIMITED_PROXY(signer.GetType())) { // Gross hack for globus. If Globus marks own proxy as limited // it expects every derived proxy to be limited or at least // independent. Independent proxies has little sense in Grid // world. So here we make our proxy globus-limited to allow // it to be used with globus code. cred_request.SetProxyPolicy("rfc", "limited", proxy_policy, -1); } else { cred_request.SetProxyPolicy("rfc", "inheritAll", proxy_policy, -1); } if (!signer.SignRequest(&cred_request, proxy)) throw std::runtime_error("Failed to sign proxy"); proxy.append(private_key).append(signing_cert).append(signing_cert_chain); } void write_proxy_file(const std::string& path, const std::string& content) { std::string::size_type off = 0; if((!Arc::FileDelete(path)) && (errno != ENOENT)) { throw std::runtime_error("Failed to remove proxy file " + path); } if(!Arc::FileCreate(path, content, 0, 0, S_IRUSR | S_IWUSR)) { throw std::runtime_error("Failed to create proxy file " + path); } } void remove_proxy_file(const std::string& path) { if((!Arc::FileDelete(path)) && (errno != ENOENT)) { throw std::runtime_error("Failed to remove proxy file " + path); } } void remove_cert_file(const std::string& path) { if((!Arc::FileDelete(path)) && (errno != ENOENT)) { throw std::runtime_error("Failed to remove certificate file " + path); } } nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712574532670023774 xustar000000000000000027 mtime=1441969592.119697 30 atime=1513200593.149929883 30 ctime=1513200664.459802031 nordugrid-arc-5.4.2/src/clients/credentials/Makefile.am0000644000175000002070000000634412574532670024045 0ustar00mockbuildmock00000000000000if CANLXX_ENABLED ARCPROXYALT_PROGRAM = arcproxyalt else ARCPROXYALT_PROGRAM = endif bin_PROGRAMS = arcproxy $(ARCPROXYALT_PROGRAM) noinst_PROGRAMS = test2vomsserver test2myproxyserver_put test2myproxyserver_get arcproxy_SOURCES = arcproxy.cpp arcproxy_voms.cpp arcproxy_myproxy.cpp arcproxy_proxy.cpp arcproxy.h arcproxy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcproxy_LDADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) if CANLXX_ENABLED arcproxyalt_SOURCES = arcproxyalt.cpp arcproxyalt_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(CANLXX_CFLAGS) \ $(AM_CXXFLAGS) arcproxyalt_LDADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(CANLXX_LIBS) endif test2vomsserver_SOURCES = test2vomsserver.cpp test2vomsserver_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test2vomsserver_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) test2myproxyserver_put_SOURCES = test2myproxyserver_put.cpp test2myproxyserver_put_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test2myproxyserver_put_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) test2myproxyserver_get_SOURCES = test2myproxyserver_get.cpp test2myproxyserver_get_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test2myproxyserver_get_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) man_MANS = arcproxy.1 nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721023765 xustar000000000000000030 mtime=1513200593.216930703 30 atime=1513200651.996649602 30 ctime=1513200664.460802043 nordugrid-arc-5.4.2/src/clients/credentials/Makefile.in0000644000175000002070000014767213214315721024054 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = arcproxy$(EXEEXT) $(am__EXEEXT_1) noinst_PROGRAMS = test2vomsserver$(EXEEXT) \ test2myproxyserver_put$(EXEEXT) \ test2myproxyserver_get$(EXEEXT) subdir = src/clients/credentials DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arcproxy.1.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arcproxy.1 CONFIG_CLEAN_VPATH_FILES = @CANLXX_ENABLED_TRUE@am__EXEEXT_1 = arcproxyalt$(EXEEXT) am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) am_arcproxy_OBJECTS = arcproxy-arcproxy.$(OBJEXT) \ arcproxy-arcproxy_voms.$(OBJEXT) \ arcproxy-arcproxy_myproxy.$(OBJEXT) \ arcproxy-arcproxy_proxy.$(OBJEXT) arcproxy_OBJECTS = $(am_arcproxy_OBJECTS) am__DEPENDENCIES_1 = arcproxy_DEPENDENCIES = $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcproxy_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcproxy_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am__arcproxyalt_SOURCES_DIST = arcproxyalt.cpp @CANLXX_ENABLED_TRUE@am_arcproxyalt_OBJECTS = \ @CANLXX_ENABLED_TRUE@ arcproxyalt-arcproxyalt.$(OBJEXT) arcproxyalt_OBJECTS = $(am_arcproxyalt_OBJECTS) @CANLXX_ENABLED_TRUE@arcproxyalt_DEPENDENCIES = $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @CANLXX_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @CANLXX_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @CANLXX_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @CANLXX_ENABLED_TRUE@ $(am__DEPENDENCIES_1) arcproxyalt_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcproxyalt_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_test2myproxyserver_get_OBJECTS = \ test2myproxyserver_get-test2myproxyserver_get.$(OBJEXT) test2myproxyserver_get_OBJECTS = $(am_test2myproxyserver_get_OBJECTS) test2myproxyserver_get_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) test2myproxyserver_get_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test2myproxyserver_get_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_test2myproxyserver_put_OBJECTS = \ test2myproxyserver_put-test2myproxyserver_put.$(OBJEXT) test2myproxyserver_put_OBJECTS = $(am_test2myproxyserver_put_OBJECTS) test2myproxyserver_put_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) test2myproxyserver_put_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test2myproxyserver_put_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_test2vomsserver_OBJECTS = \ test2vomsserver-test2vomsserver.$(OBJEXT) test2vomsserver_OBJECTS = $(am_test2vomsserver_OBJECTS) test2vomsserver_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) test2vomsserver_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test2vomsserver_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(arcproxy_SOURCES) $(arcproxyalt_SOURCES) \ $(test2myproxyserver_get_SOURCES) \ $(test2myproxyserver_put_SOURCES) $(test2vomsserver_SOURCES) DIST_SOURCES = $(arcproxy_SOURCES) $(am__arcproxyalt_SOURCES_DIST) \ $(test2myproxyserver_get_SOURCES) \ $(test2myproxyserver_put_SOURCES) $(test2vomsserver_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @CANLXX_ENABLED_FALSE@ARCPROXYALT_PROGRAM = @CANLXX_ENABLED_TRUE@ARCPROXYALT_PROGRAM = arcproxyalt arcproxy_SOURCES = arcproxy.cpp arcproxy_voms.cpp arcproxy_myproxy.cpp arcproxy_proxy.cpp arcproxy.h arcproxy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcproxy_LDADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) @CANLXX_ENABLED_TRUE@arcproxyalt_SOURCES = arcproxyalt.cpp @CANLXX_ENABLED_TRUE@arcproxyalt_CXXFLAGS = -I$(top_srcdir)/include \ @CANLXX_ENABLED_TRUE@ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(CANLXX_CFLAGS) \ @CANLXX_ENABLED_TRUE@ $(AM_CXXFLAGS) @CANLXX_ENABLED_TRUE@arcproxyalt_LDADD = \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ @CANLXX_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @CANLXX_ENABLED_TRUE@ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(CANLXX_LIBS) test2vomsserver_SOURCES = test2vomsserver.cpp test2vomsserver_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test2vomsserver_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) test2myproxyserver_put_SOURCES = test2myproxyserver_put.cpp test2myproxyserver_put_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test2myproxyserver_put_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) test2myproxyserver_get_SOURCES = test2myproxyserver_get.cpp test2myproxyserver_get_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test2myproxyserver_get_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) man_MANS = arcproxy.1 all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/credentials/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/credentials/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arcproxy.1: $(top_builddir)/config.status $(srcdir)/arcproxy.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arcproxy$(EXEEXT): $(arcproxy_OBJECTS) $(arcproxy_DEPENDENCIES) @rm -f arcproxy$(EXEEXT) $(arcproxy_LINK) $(arcproxy_OBJECTS) $(arcproxy_LDADD) $(LIBS) arcproxyalt$(EXEEXT): $(arcproxyalt_OBJECTS) $(arcproxyalt_DEPENDENCIES) @rm -f arcproxyalt$(EXEEXT) $(arcproxyalt_LINK) $(arcproxyalt_OBJECTS) $(arcproxyalt_LDADD) $(LIBS) test2myproxyserver_get$(EXEEXT): $(test2myproxyserver_get_OBJECTS) $(test2myproxyserver_get_DEPENDENCIES) @rm -f test2myproxyserver_get$(EXEEXT) $(test2myproxyserver_get_LINK) $(test2myproxyserver_get_OBJECTS) $(test2myproxyserver_get_LDADD) $(LIBS) test2myproxyserver_put$(EXEEXT): $(test2myproxyserver_put_OBJECTS) $(test2myproxyserver_put_DEPENDENCIES) @rm -f test2myproxyserver_put$(EXEEXT) $(test2myproxyserver_put_LINK) $(test2myproxyserver_put_OBJECTS) $(test2myproxyserver_put_LDADD) $(LIBS) test2vomsserver$(EXEEXT): $(test2vomsserver_OBJECTS) $(test2vomsserver_DEPENDENCIES) @rm -f test2vomsserver$(EXEEXT) $(test2vomsserver_LINK) $(test2vomsserver_OBJECTS) $(test2vomsserver_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxy-arcproxy.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxy-arcproxy_myproxy.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxy-arcproxy_proxy.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxy-arcproxy_voms.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxyalt-arcproxyalt.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test2myproxyserver_get-test2myproxyserver_get.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test2myproxyserver_put-test2myproxyserver_put.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test2vomsserver-test2vomsserver.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< arcproxy-arcproxy.o: arcproxy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy.o -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy.Tpo -c -o arcproxy-arcproxy.o `test -f 'arcproxy.cpp' || echo '$(srcdir)/'`arcproxy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxy-arcproxy.Tpo $(DEPDIR)/arcproxy-arcproxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxy.cpp' object='arcproxy-arcproxy.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy.o `test -f 'arcproxy.cpp' || echo '$(srcdir)/'`arcproxy.cpp arcproxy-arcproxy.obj: arcproxy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy.obj -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy.Tpo -c -o arcproxy-arcproxy.obj `if test -f 'arcproxy.cpp'; then $(CYGPATH_W) 'arcproxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxy-arcproxy.Tpo $(DEPDIR)/arcproxy-arcproxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxy.cpp' object='arcproxy-arcproxy.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy.obj `if test -f 'arcproxy.cpp'; then $(CYGPATH_W) 'arcproxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy.cpp'; fi` arcproxy-arcproxy_voms.o: arcproxy_voms.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_voms.o -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_voms.Tpo -c -o arcproxy-arcproxy_voms.o `test -f 'arcproxy_voms.cpp' || echo '$(srcdir)/'`arcproxy_voms.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxy-arcproxy_voms.Tpo $(DEPDIR)/arcproxy-arcproxy_voms.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxy_voms.cpp' object='arcproxy-arcproxy_voms.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_voms.o `test -f 'arcproxy_voms.cpp' || echo '$(srcdir)/'`arcproxy_voms.cpp arcproxy-arcproxy_voms.obj: arcproxy_voms.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_voms.obj -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_voms.Tpo -c -o arcproxy-arcproxy_voms.obj `if test -f 'arcproxy_voms.cpp'; then $(CYGPATH_W) 'arcproxy_voms.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_voms.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxy-arcproxy_voms.Tpo $(DEPDIR)/arcproxy-arcproxy_voms.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxy_voms.cpp' object='arcproxy-arcproxy_voms.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_voms.obj `if test -f 'arcproxy_voms.cpp'; then $(CYGPATH_W) 'arcproxy_voms.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_voms.cpp'; fi` arcproxy-arcproxy_myproxy.o: arcproxy_myproxy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_myproxy.o -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_myproxy.Tpo -c -o arcproxy-arcproxy_myproxy.o `test -f 'arcproxy_myproxy.cpp' || echo '$(srcdir)/'`arcproxy_myproxy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxy-arcproxy_myproxy.Tpo $(DEPDIR)/arcproxy-arcproxy_myproxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxy_myproxy.cpp' object='arcproxy-arcproxy_myproxy.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_myproxy.o `test -f 'arcproxy_myproxy.cpp' || echo '$(srcdir)/'`arcproxy_myproxy.cpp arcproxy-arcproxy_myproxy.obj: arcproxy_myproxy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_myproxy.obj -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_myproxy.Tpo -c -o arcproxy-arcproxy_myproxy.obj `if test -f 'arcproxy_myproxy.cpp'; then $(CYGPATH_W) 'arcproxy_myproxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_myproxy.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxy-arcproxy_myproxy.Tpo $(DEPDIR)/arcproxy-arcproxy_myproxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxy_myproxy.cpp' object='arcproxy-arcproxy_myproxy.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_myproxy.obj `if test -f 'arcproxy_myproxy.cpp'; then $(CYGPATH_W) 'arcproxy_myproxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_myproxy.cpp'; fi` arcproxy-arcproxy_proxy.o: arcproxy_proxy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_proxy.o -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_proxy.Tpo -c -o arcproxy-arcproxy_proxy.o `test -f 'arcproxy_proxy.cpp' || echo '$(srcdir)/'`arcproxy_proxy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxy-arcproxy_proxy.Tpo $(DEPDIR)/arcproxy-arcproxy_proxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxy_proxy.cpp' object='arcproxy-arcproxy_proxy.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_proxy.o `test -f 'arcproxy_proxy.cpp' || echo '$(srcdir)/'`arcproxy_proxy.cpp arcproxy-arcproxy_proxy.obj: arcproxy_proxy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_proxy.obj -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_proxy.Tpo -c -o arcproxy-arcproxy_proxy.obj `if test -f 'arcproxy_proxy.cpp'; then $(CYGPATH_W) 'arcproxy_proxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_proxy.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxy-arcproxy_proxy.Tpo $(DEPDIR)/arcproxy-arcproxy_proxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxy_proxy.cpp' object='arcproxy-arcproxy_proxy.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_proxy.obj `if test -f 'arcproxy_proxy.cpp'; then $(CYGPATH_W) 'arcproxy_proxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_proxy.cpp'; fi` arcproxyalt-arcproxyalt.o: arcproxyalt.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxyalt_CXXFLAGS) $(CXXFLAGS) -MT arcproxyalt-arcproxyalt.o -MD -MP -MF $(DEPDIR)/arcproxyalt-arcproxyalt.Tpo -c -o arcproxyalt-arcproxyalt.o `test -f 'arcproxyalt.cpp' || echo '$(srcdir)/'`arcproxyalt.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxyalt-arcproxyalt.Tpo $(DEPDIR)/arcproxyalt-arcproxyalt.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxyalt.cpp' object='arcproxyalt-arcproxyalt.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxyalt_CXXFLAGS) $(CXXFLAGS) -c -o arcproxyalt-arcproxyalt.o `test -f 'arcproxyalt.cpp' || echo '$(srcdir)/'`arcproxyalt.cpp arcproxyalt-arcproxyalt.obj: arcproxyalt.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxyalt_CXXFLAGS) $(CXXFLAGS) -MT arcproxyalt-arcproxyalt.obj -MD -MP -MF $(DEPDIR)/arcproxyalt-arcproxyalt.Tpo -c -o arcproxyalt-arcproxyalt.obj `if test -f 'arcproxyalt.cpp'; then $(CYGPATH_W) 'arcproxyalt.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxyalt.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcproxyalt-arcproxyalt.Tpo $(DEPDIR)/arcproxyalt-arcproxyalt.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcproxyalt.cpp' object='arcproxyalt-arcproxyalt.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxyalt_CXXFLAGS) $(CXXFLAGS) -c -o arcproxyalt-arcproxyalt.obj `if test -f 'arcproxyalt.cpp'; then $(CYGPATH_W) 'arcproxyalt.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxyalt.cpp'; fi` test2myproxyserver_get-test2myproxyserver_get.o: test2myproxyserver_get.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2myproxyserver_get_CXXFLAGS) $(CXXFLAGS) -MT test2myproxyserver_get-test2myproxyserver_get.o -MD -MP -MF $(DEPDIR)/test2myproxyserver_get-test2myproxyserver_get.Tpo -c -o test2myproxyserver_get-test2myproxyserver_get.o `test -f 'test2myproxyserver_get.cpp' || echo '$(srcdir)/'`test2myproxyserver_get.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test2myproxyserver_get-test2myproxyserver_get.Tpo $(DEPDIR)/test2myproxyserver_get-test2myproxyserver_get.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test2myproxyserver_get.cpp' object='test2myproxyserver_get-test2myproxyserver_get.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2myproxyserver_get_CXXFLAGS) $(CXXFLAGS) -c -o test2myproxyserver_get-test2myproxyserver_get.o `test -f 'test2myproxyserver_get.cpp' || echo '$(srcdir)/'`test2myproxyserver_get.cpp test2myproxyserver_get-test2myproxyserver_get.obj: test2myproxyserver_get.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2myproxyserver_get_CXXFLAGS) $(CXXFLAGS) -MT test2myproxyserver_get-test2myproxyserver_get.obj -MD -MP -MF $(DEPDIR)/test2myproxyserver_get-test2myproxyserver_get.Tpo -c -o test2myproxyserver_get-test2myproxyserver_get.obj `if test -f 'test2myproxyserver_get.cpp'; then $(CYGPATH_W) 'test2myproxyserver_get.cpp'; else $(CYGPATH_W) '$(srcdir)/test2myproxyserver_get.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test2myproxyserver_get-test2myproxyserver_get.Tpo $(DEPDIR)/test2myproxyserver_get-test2myproxyserver_get.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test2myproxyserver_get.cpp' object='test2myproxyserver_get-test2myproxyserver_get.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2myproxyserver_get_CXXFLAGS) $(CXXFLAGS) -c -o test2myproxyserver_get-test2myproxyserver_get.obj `if test -f 'test2myproxyserver_get.cpp'; then $(CYGPATH_W) 'test2myproxyserver_get.cpp'; else $(CYGPATH_W) '$(srcdir)/test2myproxyserver_get.cpp'; fi` test2myproxyserver_put-test2myproxyserver_put.o: test2myproxyserver_put.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2myproxyserver_put_CXXFLAGS) $(CXXFLAGS) -MT test2myproxyserver_put-test2myproxyserver_put.o -MD -MP -MF $(DEPDIR)/test2myproxyserver_put-test2myproxyserver_put.Tpo -c -o test2myproxyserver_put-test2myproxyserver_put.o `test -f 'test2myproxyserver_put.cpp' || echo '$(srcdir)/'`test2myproxyserver_put.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test2myproxyserver_put-test2myproxyserver_put.Tpo $(DEPDIR)/test2myproxyserver_put-test2myproxyserver_put.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test2myproxyserver_put.cpp' object='test2myproxyserver_put-test2myproxyserver_put.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2myproxyserver_put_CXXFLAGS) $(CXXFLAGS) -c -o test2myproxyserver_put-test2myproxyserver_put.o `test -f 'test2myproxyserver_put.cpp' || echo '$(srcdir)/'`test2myproxyserver_put.cpp test2myproxyserver_put-test2myproxyserver_put.obj: test2myproxyserver_put.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2myproxyserver_put_CXXFLAGS) $(CXXFLAGS) -MT test2myproxyserver_put-test2myproxyserver_put.obj -MD -MP -MF $(DEPDIR)/test2myproxyserver_put-test2myproxyserver_put.Tpo -c -o test2myproxyserver_put-test2myproxyserver_put.obj `if test -f 'test2myproxyserver_put.cpp'; then $(CYGPATH_W) 'test2myproxyserver_put.cpp'; else $(CYGPATH_W) '$(srcdir)/test2myproxyserver_put.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test2myproxyserver_put-test2myproxyserver_put.Tpo $(DEPDIR)/test2myproxyserver_put-test2myproxyserver_put.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test2myproxyserver_put.cpp' object='test2myproxyserver_put-test2myproxyserver_put.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2myproxyserver_put_CXXFLAGS) $(CXXFLAGS) -c -o test2myproxyserver_put-test2myproxyserver_put.obj `if test -f 'test2myproxyserver_put.cpp'; then $(CYGPATH_W) 'test2myproxyserver_put.cpp'; else $(CYGPATH_W) '$(srcdir)/test2myproxyserver_put.cpp'; fi` test2vomsserver-test2vomsserver.o: test2vomsserver.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2vomsserver_CXXFLAGS) $(CXXFLAGS) -MT test2vomsserver-test2vomsserver.o -MD -MP -MF $(DEPDIR)/test2vomsserver-test2vomsserver.Tpo -c -o test2vomsserver-test2vomsserver.o `test -f 'test2vomsserver.cpp' || echo '$(srcdir)/'`test2vomsserver.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test2vomsserver-test2vomsserver.Tpo $(DEPDIR)/test2vomsserver-test2vomsserver.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test2vomsserver.cpp' object='test2vomsserver-test2vomsserver.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2vomsserver_CXXFLAGS) $(CXXFLAGS) -c -o test2vomsserver-test2vomsserver.o `test -f 'test2vomsserver.cpp' || echo '$(srcdir)/'`test2vomsserver.cpp test2vomsserver-test2vomsserver.obj: test2vomsserver.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2vomsserver_CXXFLAGS) $(CXXFLAGS) -MT test2vomsserver-test2vomsserver.obj -MD -MP -MF $(DEPDIR)/test2vomsserver-test2vomsserver.Tpo -c -o test2vomsserver-test2vomsserver.obj `if test -f 'test2vomsserver.cpp'; then $(CYGPATH_W) 'test2vomsserver.cpp'; else $(CYGPATH_W) '$(srcdir)/test2vomsserver.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test2vomsserver-test2vomsserver.Tpo $(DEPDIR)/test2vomsserver-test2vomsserver.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test2vomsserver.cpp' object='test2vomsserver-test2vomsserver.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test2vomsserver_CXXFLAGS) $(CXXFLAGS) -c -o test2vomsserver-test2vomsserver.obj `if test -f 'test2vomsserver.cpp'; then $(CYGPATH_W) 'test2vomsserver.cpp'; else $(CYGPATH_W) '$(srcdir)/test2vomsserver.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool clean-noinstPROGRAMS ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-binPROGRAMS \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man1 \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am \ uninstall-binPROGRAMS uninstall-man uninstall-man1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/arcproxy.h0000644000000000000000000000012413066751223023747 xustar000000000000000027 mtime=1490801299.996866 27 atime=1513200575.379712 30 ctime=1513200664.466802116 nordugrid-arc-5.4.2/src/clients/credentials/arcproxy.h0000644000175000002070000000325013066751223024014 0ustar00mockbuildmock00000000000000 // Functions in arcproxy_proxy.cpp // Create simple temporary proxy void create_tmp_proxy(std::string& proxy, Arc::Credential& signer); // Create proxy with all bells and whistles as specified in arguments void create_proxy(std::string& proxy, Arc::Credential& signer, const std::string& proxy_policy, const Arc::Time& proxy_start, const Arc::Period& proxy_period, const std::string& vomsacseq, int keybits, const std::string& signing_algorithm); // Store content of proxy void write_proxy_file(const std::string& path, const std::string& content); // Delete proxy file void remove_proxy_file(const std::string& path); // Delete certificate file void remove_cert_file(const std::string& path); // Functions in arcproxy_voms.cpp // Create simple temporary proxy // Collect VOMS AC from configured Voms servers bool contact_voms_servers(std::map >& vomscmdlist, std::list& orderlist, std::string& vomses_path, bool use_gsi_comm, bool use_http_comm, const std::string& voms_period, Arc::UserConfig& usercfg, Arc::Logger& logger, const std::string& tmp_proxy_path, std::string& vomsacseq); // Functions in arcproxy_myproxy.cpp // Communicate with MyProxy server bool contact_myproxy_server(const std::string& myproxy_server, const std::string& myproxy_command, const std::string& myproxy_user_name, bool use_empty_passphrase, const std::string& myproxy_period, const std::string& retrievable_by_cert, Arc::Time& proxy_start, Arc::Period& proxy_period, std::list& vomslist, std::string& vomses_path, const std::string& proxy_path, Arc::UserConfig& usercfg, Arc::Logger& logger); nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/arcproxy.cpp0000644000000000000000000000012413213442363024276 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.385712 30 ctime=1513200664.462802068 nordugrid-arc-5.4.2/src/clients/credentials/arcproxy.cpp0000644000175000002070000016621113213442363024352 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_NSS #include #endif #include "arcproxy.h" using namespace ArcCredential; #ifdef HAVE_NSS static void get_default_nssdb_path(std::vector& nss_paths) { const Arc::User user; // The profiles.ini could exist under firefox, seamonkey and thunderbird std::vector profiles_homes; #ifndef WIN32 std::string home_path = user.Home(); #else std::string home_path = Glib::get_home_dir(); #endif std::string profiles_home; #if defined(_MACOSX) profiles_home = home_path + G_DIR_SEPARATOR_S "Library" G_DIR_SEPARATOR_S "Application Support" G_DIR_SEPARATOR_S "Firefox"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S "Library" G_DIR_SEPARATOR_S "Application Support" G_DIR_SEPARATOR_S "SeaMonkey"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S "Library" G_DIR_SEPARATOR_S "Thunderbird"; profiles_homes.push_back(profiles_home); #elif defined(WIN32) //Windows Vista and Win7 profiles_home = home_path + G_DIR_SEPARATOR_S "AppData" G_DIR_SEPARATOR_S "Roaming" G_DIR_SEPARATOR_S "Mozilla" G_DIR_SEPARATOR_S "Firefox"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S "AppData" G_DIR_SEPARATOR_S "Roaming" G_DIR_SEPARATOR_S "Mozilla" G_DIR_SEPARATOR_S "SeaMonkey"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S "AppData" G_DIR_SEPARATOR_S "Roaming" G_DIR_SEPARATOR_S "Thunderbird"; profiles_homes.push_back(profiles_home); //WinXP and Win2000 profiles_home = home_path + G_DIR_SEPARATOR_S "Application Data" G_DIR_SEPARATOR_S "Mozilla" G_DIR_SEPARATOR_S "Firefox"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S "Application Data" G_DIR_SEPARATOR_S "Mozilla" G_DIR_SEPARATOR_S "SeaMonkey"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S "Application Data" G_DIR_SEPARATOR_S "Thunderbird"; profiles_homes.push_back(profiles_home); #else //Linux profiles_home = home_path + G_DIR_SEPARATOR_S ".mozilla" G_DIR_SEPARATOR_S "firefox"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S ".mozilla" G_DIR_SEPARATOR_S "seamonkey"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S ".thunderbird"; profiles_homes.push_back(profiles_home); #endif std::vector pf_homes; // Remove the unreachable directories for(int i=0; i ini_home; // Remove the unreachable "profiles.ini" files for(int i=0; i::iterator it; for(it = ini_home.begin(); it != ini_home.end(); ++it) { std::string pf_ini = (*it).first; std::string pf_home = (*it).second; std::string profiles; std::ifstream in_f(pf_ini.c_str()); std::getline(in_f, profiles, '\0'); std::list lines; Arc::tokenize(profiles, lines, "\n"); // Parse each [Profile] for (std::list::iterator i = lines.begin(); i != lines.end(); ++i) { std::vector inivalue; Arc::tokenize(*i, inivalue, "="); if((inivalue[0].find("Profile") != std::string::npos) && (inivalue[0].find("StartWithLast") == std::string::npos)) { bool is_relative = false; std::string path; std::advance(i, 1); for(; i != lines.end();) { inivalue.clear(); Arc::tokenize(*i, inivalue, "="); if (inivalue.size() == 2) { if (inivalue[0] == "IsRelative") { if(inivalue[1] == "1") is_relative = true; else is_relative = false; } if (inivalue[0] == "Path") path = inivalue[1]; } if(inivalue[0].find("Profile") != std::string::npos) { --i; break; } std::advance(i, 1); } std::string nss_path; if(is_relative) nss_path = pf_home + G_DIR_SEPARATOR_S + path; else nss_path = path; struct stat st; if((::stat(nss_path.c_str(),&st) == 0) && (S_ISDIR(st.st_mode)) && (user.get_uid() == st.st_uid)) nss_paths.push_back(nss_path); if(i == lines.end()) break; } } } return; } static void get_nss_certname(std::string& certname, Arc::Logger& logger) { std::list certInfolist; ArcAuthNSS::nssListUserCertificatesInfo(certInfolist); if(certInfolist.size()) { std::cout<::iterator it; for(it = certInfolist.begin(); it != certInfolist.end(); ++it) { ArcAuthNSS::certInfo cert_info = (*it); std::string sub_dn = cert_info.subject_dn; std::string cn_name; std::string::size_type pos1, pos2; pos1 = sub_dn.find("CN="); if(pos1 != std::string::npos) { pos2 = sub_dn.find(",", pos1); if(pos2 != std::string::npos) cn_name = " ("+sub_dn.substr(pos1+3, pos2-pos1-3) + ")"; } std::cout< cert_info.end) msg = "(expired)"; else if((now + 300) > cert_info.end) msg = "(will be expired in 5 min)"; else if((now + 3600*24) > cert_info.end) { Arc::Period left(cert_info.end - now); msg = std::string("(will be expired in ") + std::string(left) + ")"; } std::cout<1)) { char c = getchar(); int num = c - '0'; if((num<=certInfolist.size()) && (num>=1)) { it = certInfolist.begin(); std::advance(it, num-1); certname = (*it).certname; break; } } } #endif static std::string signTypeToString(Arc::Signalgorithm alg) { switch(alg) { case Arc::SIGN_SHA1: return "sha1"; case Arc::SIGN_SHA224: return "sha224"; case Arc::SIGN_SHA256: return "sha256"; case Arc::SIGN_SHA384: return "sha384"; case Arc::SIGN_SHA512: return "sha512"; default: break; } return "unknown"; } typedef enum { pass_all, pass_private_key, pass_myproxy, pass_myproxy_new, pass_nss } pass_destination_type; std::map passsources; class PasswordSourceFile: public Arc::PasswordSource { private: std::ifstream file_; public: PasswordSourceFile(const std::string& filename):file_(filename.c_str()) { }; virtual Result Get(std::string& password, int minsize, int maxsize) { if(!file_) return Arc::PasswordSource::NO_PASSWORD; std::getline(file_, password); return Arc::PasswordSource::PASSWORD; }; }; static int runmain(int argc, char *argv[]) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcproxy"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(" ", istring("The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources."), istring("Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours for delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file\n" " keybits=number - length of the key to generate. Default is 1024 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" " signingAlgorithm=name - signing algorithm to use for signing public key of proxy.\n" " Possible values are sha1, sha2 (alias for sha256), sha224, sha256, sha384, sha512\n" " and inherit (use algorithm of signing certificate). Default is inherit.\n" " With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" " identity - identity subject name of proxy certificate.\n" " issuer - issuer subject name of proxy certificate.\n" " ca - subject name of CA ehich issued initial certificate\n" " path - file system path to file containing proxy.\n" " type - type of proxy certificate.\n" " validityStart - timestamp when proxy validity starts.\n" " validityEnd - timestamp when proxy validity ends.\n" " validityPeriod - duration of proxy validity in seconds.\n" " validityLeft - duration of proxy validity left in seconds.\n" " vomsVO - VO name represented by VOMS attribute\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" " vomsIssuer - subject of service which issued VOMS certificate\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" " proxyPolicy\n" " keybits - size of proxy certificate key in bits.\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" " myproxy - for accessing credentials at MyProxy service\n" " myproxynew - for creating credentials at MyProxy service\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" " int - interactively request password from console\n" " stdin - read password from standard input delimited by newline\n" " file:filename - read password from file named filename\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported.\n" )); std::string proxy_path; options.AddOption('P', "proxy", istring("path to the proxy file"), istring("path"), proxy_path); std::string cert_path; options.AddOption('C', "cert", istring("path to the certificate file, it can be either PEM, DER, or PKCS12 formated"), istring("path"), cert_path); std::string key_path; options.AddOption('K', "key", istring("path to the private key file, if the certificate is in PKCS12 format, then no need to give private key"), istring("path"), key_path); std::string ca_dir; options.AddOption('T', "cadir", istring("path to the trusted certificate directory, only needed for the VOMS client functionality"), istring("path"), ca_dir); std::string voms_dir; options.AddOption('s', "vomsdir", istring("path to the top directory of VOMS *.lsc files, only needed for the VOMS client functionality"), istring("path"), voms_dir); std::string vomses_path; options.AddOption('V', "vomses", istring("path to the VOMS server configuration file"), istring("path"), vomses_path); std::list vomslist; options.AddOption('S', "voms", istring("voms<:command>. Specify VOMS server (More than one VOMS server \n" " can be specified like this: --voms VOa:command1 --voms VOb:command2). \n" " :command is optional, and is used to ask for specific attributes(e.g: roles)\n" " command options are:\n" " all --- put all of this DN's attributes into AC; \n" " list ---list all of the DN's attribute, will not create AC extension; \n" " /Role=yourRole --- specify the role, if this DN \n" " has such a role, the role will be put into AC \n" " /voname/groupname/Role=yourRole --- specify the VO, group and role; if this DN \n" " has such a role, the role will be put into AC \n" ), istring("string"), vomslist); std::list orderlist; options.AddOption('o', "order", istring("group<:role>. Specify ordering of attributes \n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/testers:Tester \n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/testers:Tester \n" " Note that it does not make sense to specify the order if you have two or more different VOMS servers specified"), istring("string"), orderlist); bool use_gsi_comm = false; options.AddOption('G', "gsicom", istring("use GSI communication protocol for contacting VOMS services"), use_gsi_comm); bool use_http_comm = false; options.AddOption('H', "httpcom", istring("use HTTP communication protocol for contacting VOMS services that provide RESTful access \n" " Note for RESTful access, \'list\' command and multiple VOMS server are not supported\n" ), use_http_comm); bool use_gsi_proxy = false; options.AddOption('O', "old", istring("this option is not functional (old GSI proxies are not supported anymore)"), use_gsi_proxy); bool info = false; options.AddOption('I', "info", istring("print all information about this proxy."), info); std::list infoitemlist; options.AddOption('i', "infoitem", istring("print selected information about this proxy."), istring("string"), infoitemlist); bool remove_proxy = false; options.AddOption('r', "remove", istring("remove proxy"), remove_proxy); std::string user_name; //user name to MyProxy server options.AddOption('U', "user", istring("username to MyProxy server (if missing subject of user certificate is used)"), istring("string"), user_name); bool use_empty_passphrase = false; //if use empty passphrase to myproxy server options.AddOption('N', "nopassphrase", istring( "don't prompt for a credential passphrase, when retrieve a \n" " credential from on MyProxy server. \n" " The precondition of this choice is the credential is PUT onto\n" " the MyProxy server without a passphrase by using -R (--retrievable_by_cert) \n" " option when being PUTing onto Myproxy server. \n" " This option is specific for the GET command when contacting Myproxy server." ), use_empty_passphrase); std::string retrievable_by_cert; //if use empty passphrase to myproxy server options.AddOption('R', "retrievable_by_cert", istring( "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific for the PUT command when contacting Myproxy server." ), istring("string"), retrievable_by_cert); std::string myproxy_server; //url of MyProxy server options.AddOption('L', "myproxysrv", istring("hostname[:port] of MyProxy server"), istring("string"), myproxy_server); std::string myproxy_command; //command to myproxy server options.AddOption('M', "myproxycmd", istring( "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server; \n" " GET -- get a delegated credentials from the MyProxy server; \n" " INFO -- get and present information about credentials stored at the MyProxy server; \n" " NEWPASS -- change password protecting credentials stored at the MyProxy server; \n" " DESTROY -- wipe off credentials stored at the MyProxy server; \n" " Local credentials (certificate and key) are not necessary except in case of PUT. \n" " MyProxy functionality can be used together with VOMS functionality.\n" " --voms and --vomses can be used for Get command if VOMS attributes\n" " is required to be included in the proxy.\n" ), istring("string"), myproxy_command); bool use_nssdb = false; #ifdef HAVE_NSS options.AddOption('F', "nssdb", istring("use NSS credential database in default Mozilla profiles, \n" " including Firefox, Seamonkey and Thunderbird.\n"), use_nssdb); #endif std::list constraintlist; options.AddOption('c', "constraint", istring("proxy constraints"), istring("string"), constraintlist); std::list passsourcelist; options.AddOption('p', "passwordsource", istring("password destination=password source"), istring("string"), passsourcelist); int timeout = -1; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcproxy", VERSION) << std::endl; return EXIT_SUCCESS; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); // This ensure command line args overwrite all other options if(!cert_path.empty())Arc::SetEnv("X509_USER_CERT", cert_path); if(!key_path.empty())Arc::SetEnv("X509_USER_KEY", key_path); if(!proxy_path.empty())Arc::SetEnv("X509_USER_PROXY", proxy_path); if(!ca_dir.empty())Arc::SetEnv("X509_CERT_DIR", ca_dir); // Set default, predefined or guessed credentials. Also check if they exist. #ifdef HAVE_NSS Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::TryCredentials)); #else Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::TryCredentials)); #endif if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization."); return EXIT_FAILURE; } if(use_nssdb) { usercfg.CertificatePath("");; usercfg.KeyPath("");; } // Check for needed credentials objects // Can proxy be used for? Could not find it in documentation. // Key and certificate not needed if only printing proxy information if ( (!(Arc::lower(myproxy_command) == "get")) && (!use_nssdb) ) { if((usercfg.CertificatePath().empty() || ( usercfg.KeyPath().empty() && (usercfg.CertificatePath().find(".p12") == std::string::npos) ) ) && !(info || (infoitemlist.size() > 0) || remove_proxy)) { logger.msg(Arc::ERROR, "Failed to find certificate and/or private key or files have improper permissions or ownership."); logger.msg(Arc::ERROR, "You may try to increase verbosity to get more information."); return EXIT_FAILURE; } } if(!vomslist.empty() || !myproxy_command.empty()) { // For external communication CAs are needed if(usercfg.CACertificatesDirectory().empty()) { logger.msg(Arc::ERROR, "Failed to find CA certificates"); logger.msg(Arc::ERROR, "Cannot find the CA certificates directory path, " "please set environment variable X509_CERT_DIR, " "or cacertificatesdirectory in a configuration file."); logger.msg(Arc::ERROR, "You may try to increase verbosity to get more information."); logger.msg(Arc::ERROR, "The CA certificates directory is required for " "contacting VOMS and MyProxy servers."); return EXIT_FAILURE; } } // Convert list of voms+command into more convenient structure std::map > vomscmdlist; if (!vomslist.empty()) { if (vomses_path.empty()) vomses_path = usercfg.VOMSESPath(); if (vomses_path.empty()) { logger.msg(Arc::ERROR, "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the corresponding sub-directory"); return false; } for(std::list::iterator v = vomslist.begin(); v != vomslist.end(); ++v) { std::string::size_type p = v->find(':'); if(p == std::string::npos) { vomscmdlist[*v].push_back(""); } else { vomscmdlist[v->substr(0,p)].push_back(v->substr(p+1)); *v = v->substr(0,p); } } // Remove duplicates vomslist.sort(); vomslist.unique(); } // Proxy is special case. We either need default or predefined path. // No guessing or testing is needed. // By running credentials initialization once more all set values // won't change. But proxy will get default value if not set. { Arc::UserConfig tmpcfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::NotTryCredentials)); if(proxy_path.empty()) proxy_path = tmpcfg.ProxyPath(); usercfg.ProxyPath(proxy_path); } // Get back all paths if(key_path.empty()) key_path = usercfg.KeyPath(); if(cert_path.empty()) cert_path = usercfg.CertificatePath(); if(ca_dir.empty()) ca_dir = usercfg.CACertificatesDirectory(); if(voms_dir.empty()) voms_dir = Arc::GetEnv("X509_VOMS_DIR"); if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (timeout > 0) usercfg.Timeout(timeout); Arc::User user; if (!params.empty()) { logger.msg(Arc::ERROR, "Wrong number of arguments!"); return EXIT_FAILURE; } const Arc::Time now; if (remove_proxy) { if (proxy_path.empty()) { logger.msg(Arc::ERROR, "Cannot find the path of the proxy file, " "please setup environment X509_USER_PROXY, " "or proxypath in a configuration file"); return EXIT_FAILURE; } if(!Arc::FileDelete(proxy_path)) { if(errno != ENOENT) { logger.msg(Arc::ERROR, "Cannot remove proxy file at %s", proxy_path); } else { logger.msg(Arc::ERROR, "Cannot remove proxy file at %s, because it's not there", proxy_path); } return EXIT_FAILURE; } return EXIT_SUCCESS; } if (info) { std::vector voms_attributes; bool res = false; if (proxy_path.empty()) { logger.msg(Arc::ERROR, "Cannot find the path of the proxy file, " "please setup environment X509_USER_PROXY, " "or proxypath in a configuration file"); return EXIT_FAILURE; } else if (!(Glib::file_test(proxy_path, Glib::FILE_TEST_EXISTS))) { logger.msg(Arc::ERROR, "Cannot find file at %s for getting the proxy. " "Please make sure this file exists.", proxy_path); return EXIT_FAILURE; } Arc::Credential holder(proxy_path, "", "", ""); std::cout << Arc::IString("Subject: %s", holder.GetDN()) << std::endl; std::cout << Arc::IString("Issuer: %s", holder.GetIssuerName()) << std::endl; std::cout << Arc::IString("Identity: %s", holder.GetIdentityName()) << std::endl; if (holder.GetEndTime() < now) std::cout << Arc::IString("Time left for proxy: Proxy expired") << std::endl; else if (now < holder.GetStartTime()) std::cout << Arc::IString("Time left for proxy: Proxy not valid yet") << std::endl; else std::cout << Arc::IString("Time left for proxy: %s", (holder.GetEndTime() - now).istr()) << std::endl; std::cout << Arc::IString("Proxy path: %s", proxy_path) << std::endl; std::cout << Arc::IString("Proxy type: %s", certTypeToString(holder.GetType())) << std::endl; std::cout << Arc::IString("Proxy key length: %i", holder.GetKeybits()) << std::endl; std::cout << Arc::IString("Proxy signature: %s", signTypeToString(holder.GetSigningAlgorithm())) << std::endl; Arc::VOMSTrustList voms_trust_dn; voms_trust_dn.AddRegex(".*"); res = parseVOMSAC(holder, ca_dir, "", voms_dir, voms_trust_dn, voms_attributes, true, true); // Not printing error message because parseVOMSAC will print everything itself //if (!res) logger.msg(Arc::ERROR, "VOMS attribute parsing failed"); for(int n = 0; n 0) { std::cout<<"====== "< voms_attributes[n].till) { std::cout << Arc::IString("Time left for AC: AC has expired")< 0) { std::vector voms_attributes; Arc::Credential holder(proxy_path, "", "", ""); Arc::VOMSTrustList voms_trust_dn; voms_trust_dn.AddRegex(".*"); parseVOMSAC(holder, ca_dir, "", voms_dir, voms_trust_dn, voms_attributes, true, true); for(std::list::iterator ii = infoitemlist.begin(); ii != infoitemlist.end(); ++ii) { if(*ii == "subject") { std::cout << holder.GetDN() << std::endl; } else if(*ii == "identity") { std::cout << holder.GetIdentityName() << std::endl; } else if(*ii == "issuer") { std::cout << holder.GetIssuerName() << std::endl; } else if(*ii == "ca") { std::cout << holder.GetCAName() << std::endl; } else if(*ii == "path") { std::cout << proxy_path << std::endl; } else if(*ii == "type") { std::cout << certTypeToString(holder.GetType()) << std::endl; // todo:less human readable } else if(*ii == "validityStart") { std::cout << holder.GetStartTime().GetTime() << std::endl; } else if(*ii == "validityEnd") { std::cout << holder.GetEndTime().GetTime() << std::endl; } else if(*ii == "validityPeriod") { std::cout << (holder.GetEndTime() - holder.GetStartTime()).GetPeriod() << std::endl; } else if(*ii == "validityLeft") { std::cout << ((nownow)?(voms_attributes[n].till-now):Arc::Period(0)).GetPeriod(); } std::cout << std::endl; } else if(*ii == "proxyPolicy") { std::cout << holder.GetProxyPolicy() << std::endl; } else if(*ii == "keybits") { std::cout << holder.GetKeybits() << std::endl; } else if(*ii == "signingAlgorithm") { std::cout << signTypeToString(holder.GetSigningAlgorithm()) << std::endl; } else { logger.msg(Arc::ERROR, "Information item '%s' is not known",*ii); } } return EXIT_SUCCESS; } if ((cert_path.empty() || key_path.empty()) && (Arc::lower(myproxy_command) == "put")) { if (cert_path.empty()) logger.msg(Arc::ERROR, "Cannot find the user certificate path, " "please setup environment X509_USER_CERT, " "or certificatepath in a configuration file"); if (key_path.empty()) logger.msg(Arc::ERROR, "Cannot find the user private key path, " "please setup environment X509_USER_KEY, " "or keypath in a configuration file"); return EXIT_FAILURE; } std::map constraints; for (std::list::iterator it = constraintlist.begin(); it != constraintlist.end(); ++it) { std::string::size_type pos = it->find('='); if (pos != std::string::npos) constraints[it->substr(0, pos)] = it->substr(pos + 1); else constraints[*it] = ""; } std::map > passprompts; passprompts[pass_private_key] = std::pair("private key",false); passprompts[pass_myproxy] = std::pair("MyProxy server",false); passprompts[pass_myproxy_new] = std::pair("MyProxy server (new)",true); for (std::list::iterator it = passsourcelist.begin(); it != passsourcelist.end(); ++it) { std::string::size_type pos = it->find('='); if (pos == std::string::npos) { logger.msg(Arc::ERROR, "Cannot parse password source expression %s " "it must be of type=source format", *it); return EXIT_FAILURE; } std::string dest = it->substr(0, pos); pass_destination_type pass_dest; if(dest == "key") { pass_dest = pass_private_key; } else if(dest == "myproxy") { pass_dest = pass_myproxy; } else if(dest == "myproxynew") { pass_dest = pass_myproxy_new; } else if(dest == "nss") { pass_dest = pass_nss; } else if(dest == "all") { pass_dest = pass_all; } else { logger.msg(Arc::ERROR, "Cannot parse password type %s. " "Currently supported values are 'key','myproxy','myproxynew' and 'all'.", dest); return EXIT_FAILURE; } Arc::PasswordSource* pass_source; std::string pass = it->substr(pos + 1); if((pass[0] == '"') && (pass[pass.length()-1] == '"')) { pass_source = new Arc::PasswordSourceString(pass.substr(1,pass.length()-2)); } else if(pass == "int") { pass_source = new Arc::PasswordSourceInteractive(passprompts[pass_private_key].first,passprompts[pass_private_key].second); } else if(pass == "stdin") { pass_source = new Arc::PasswordSourceStream(&std::cin); } else { pos = pass.find(':'); if(pos == std::string::npos) { logger.msg(Arc::ERROR, "Cannot parse password source %s " "it must be of source_type or source_type:data format. " "Supported source types are int,stdin,stream,file.", pass); return EXIT_FAILURE; } std::string data = pass.substr(pos + 1); pass.resize(pos); if(pass == "file") { pass_source = new PasswordSourceFile(data); // TODO: combine same files } else if(pass == "stream") { if(data == "0") { pass_source = new Arc::PasswordSourceStream(&std::cin); } else { logger.msg(Arc::ERROR, "Only standard input is currently supported " "for password source."); return EXIT_FAILURE; } } else { logger.msg(Arc::ERROR, "Cannot parse password source type %s. " "Supported source types are int,stdin,stream,file.", pass); return EXIT_FAILURE; } } if(pass_source) { if(pass_dest != pass_all) { passsources[pass_dest] = pass_source; } else { passsources[pass_private_key] = pass_source; passsources[pass_myproxy] = pass_source; passsources[pass_myproxy_new] = pass_source; passsources[pass_nss] = pass_source; } } } for(std::map >::iterator p = passprompts.begin(); p != passprompts.end();++p) { if(passsources.find(p->first) == passsources.end()) { passsources[p->first] = new Arc::PasswordSourceInteractive(p->second.first,p->second.second); } } //proxy validity period //Set the default proxy validity lifetime to 12 hours if there is //no validity lifetime provided by command caller // Set default values first // TODO: Is default validityPeriod since now or since validityStart? Arc::Time validityStart = now; // now by default Arc::Period validityPeriod(12*60*60); if (Arc::lower(myproxy_command) == "put") { //For myproxy PUT operation, the proxy should be 7 days according to the default //definition in myproxy implementation. validityPeriod = 7*24*60*60; } // Acquire constraints. Check for valid values and conflicts. if((!constraints["validityStart"].empty()) && (!constraints["validityEnd"].empty()) && (!constraints["validityPeriod"].empty())) { std::cerr << Arc::IString("The start, end and period can't be set simultaneously") << std::endl; return EXIT_FAILURE; } if(!constraints["validityStart"].empty()) { validityStart = Arc::Time(constraints["validityStart"]); if (validityStart == Arc::Time(Arc::Time::UNDEFINED)) { std::cerr << Arc::IString("The start time that you set: %s can't be recognized.", (std::string)constraints["validityStart"]) << std::endl; return EXIT_FAILURE; } } if(!constraints["validityPeriod"].empty()) { validityPeriod = Arc::Period(constraints["validityPeriod"]); if (validityPeriod.GetPeriod() <= 0) { std::cerr << Arc::IString("The period that you set: %s can't be recognized.", (std::string)constraints["validityPeriod"]) << std::endl; return EXIT_FAILURE; } } if(!constraints["validityEnd"].empty()) { Arc::Time validityEnd = Arc::Time(constraints["validityEnd"]); if (validityEnd == Arc::Time(Arc::Time::UNDEFINED)) { std::cerr << Arc::IString("The end time that you set: %s can't be recognized.", (std::string)constraints["validityEnd"]) << std::endl; return EXIT_FAILURE; } if(!constraints["validityPeriod"].empty()) { // If period is explicitly set then start is derived from end and period validityStart = validityEnd - validityPeriod; } else { // otherwise start - optionally - and end are set, period is derived if(validityEnd < validityStart) { std::cerr << Arc::IString("The end time that you set: %s is before start time:%s.", (std::string)validityEnd,(std::string)validityStart) << std::endl; // error return EXIT_FAILURE; } validityPeriod = validityEnd - validityStart; } } // Here we have validityStart and validityPeriod defined Arc::Time validityEnd = validityStart + validityPeriod; // Warn user about strange times but do not prevent user from doing anything legal if(validityStart < now) { std::cout << Arc::IString("WARNING: The start time that you set: %s is before current time: %s", (std::string)validityStart, (std::string)now) << std::endl; } if(validityEnd < now) { std::cout << Arc::IString("WARNING: The end time that you set: %s is before current time: %s", (std::string)validityEnd, (std::string)now) << std::endl; } //voms AC valitity period //Set the default voms AC validity lifetime to 12 hours if there is //no validity lifetime provided by command caller Arc::Period vomsACvalidityPeriod(12*60*60); if(!constraints["vomsACvalidityPeriod"].empty()) { vomsACvalidityPeriod = Arc::Period(constraints["vomsACvalidityPeriod"]); if (vomsACvalidityPeriod.GetPeriod() == 0) { std::cerr << Arc::IString("The VOMS AC period that you set: %s can't be recognized.", (std::string)constraints["vomsACvalidityPeriod"]) << std::endl; return EXIT_FAILURE; } } else { if(validityPeriod < vomsACvalidityPeriod) vomsACvalidityPeriod = validityPeriod; // It is strange that VOMS AC may be valid less than proxy itself. // Maybe it would be more correct to have it valid by default from // now till validityEnd. } std::string voms_period = Arc::tostring(vomsACvalidityPeriod.GetPeriod()); //myproxy validity period. //Set the default myproxy validity lifetime to 12 hours if there is //no validity lifetime provided by command caller Arc::Period myproxyvalidityPeriod(12*60*60); if(!constraints["myproxyvalidityPeriod"].empty()) { myproxyvalidityPeriod = Arc::Period(constraints["myproxyvalidityPeriod"]); if (myproxyvalidityPeriod.GetPeriod() == 0) { std::cerr << Arc::IString("The MyProxy period that you set: %s can't be recognized.", (std::string)constraints["myproxyvalidityPeriod"]) << std::endl; return EXIT_FAILURE; } } else { if(validityPeriod < myproxyvalidityPeriod) myproxyvalidityPeriod = validityPeriod; // see vomsACvalidityPeriod } std::string myproxy_period = Arc::tostring(myproxyvalidityPeriod.GetPeriod()); std::string signing_algorithm = constraints["signingAlgorithm"]; int keybits = 0; if(!constraints["keybits"].empty()) { if(constraints["keybits"] == "inherit") { keybits = -1; } else if((!Arc::stringto(constraints["keybits"],keybits)) || (keybits <= 0)) { std::cerr << Arc::IString("The keybits constraint is wrong: %s.", (std::string)constraints["keybits"]) << std::endl; return EXIT_FAILURE; } } #ifdef HAVE_NSS // TODO: move to spearate file //Using nss db dominate other option if(use_nssdb) { // Get the nss db paths from firefox's profile.ini file std::vector nssdb_paths; get_default_nssdb_path(nssdb_paths); if(nssdb_paths.empty()) { std::cout << Arc::IString("The NSS database can not be detected in the Firefox profile") << std::endl; return EXIT_FAILURE; } // Let user to choose which profile to use // if multiple profiles exist bool res; std::string configdir; if(nssdb_paths.size() > 1) { std::cout<=1)) { configdir = nssdb_paths[num-1]; break; } } } else { configdir = nssdb_paths[0]; } res = ArcAuthNSS::nssInit(configdir); std::cout<< Arc::IString("NSS database to be accessed: %s\n", configdir.c_str()); //The nss db under firefox profile seems to not be protected by any passphrase by default bool ascii = true; const char* trusts = "u,u,u"; // Generate CSR std::string proxy_csrfile = "proxy.csr"; std::string proxy_keyname = "proxykey"; std::string proxy_privk_str; res = ArcAuthNSS::nssGenerateCSR(proxy_keyname, "CN=Test,OU=ARC,O=EMI", *passsources[pass_nss], proxy_csrfile, proxy_privk_str, ascii); if(!res) return EXIT_FAILURE; // Create a temporary proxy and contact voms server std::string issuername; std::string vomsacseq; if (!vomslist.empty()) { std::string tmp_proxy_path; if(!Arc::TmpFileCreate(tmp_proxy_path,"")) return EXIT_FAILURE; get_nss_certname(issuername, logger); // Create tmp proxy cert int duration = 12; res = ArcAuthNSS::nssCreateCert(proxy_csrfile, issuername, NULL, duration, "", tmp_proxy_path, ascii); if(!res) { remove_proxy_file(tmp_proxy_path); return EXIT_FAILURE; } // TODO: Use FileUtils std::string tmp_proxy_cred_str; std::ifstream tmp_proxy_cert_s(tmp_proxy_path.c_str()); std::getline(tmp_proxy_cert_s, tmp_proxy_cred_str,'\0'); tmp_proxy_cert_s.close(); // Export EEC std::string cert_file; if(!Arc::TmpFileCreate(cert_file,"")) { remove_proxy_file(tmp_proxy_path); return EXIT_FAILURE; } res = ArcAuthNSS::nssExportCertificate(issuername, cert_file); if(!res) { remove_cert_file(cert_file); remove_proxy_file(tmp_proxy_path); return EXIT_FAILURE; } std::string eec_cert_str; std::ifstream eec_s(cert_file.c_str()); std::getline(eec_s, eec_cert_str,'\0'); eec_s.close(); remove_cert_file(cert_file); // Compose tmp proxy file tmp_proxy_cred_str.append(proxy_privk_str).append(eec_cert_str); write_proxy_file(tmp_proxy_path, tmp_proxy_cred_str); if(!contact_voms_servers(vomscmdlist, orderlist, vomses_path, use_gsi_comm, use_http_comm, voms_period, usercfg, logger, tmp_proxy_path, vomsacseq)) { remove_proxy_file(tmp_proxy_path); return EXIT_FAILURE; } remove_proxy_file(tmp_proxy_path); } // Create proxy with VOMS AC std::string proxy_certfile = "myproxy.pem"; // Let user to choose which credential to use if(issuername.empty()) get_nss_certname(issuername, logger); std::cout< voms_attributes; bool r = parseVOMSAC(holder, ca_dir, "", voms_dir, voms_trust_dn, voms_attributes, true, true); if (!r) logger.msg(Arc::ERROR, "VOMS attribute parsing failed"); if(voms_attributes.size() == 0) { logger.msg(Arc::INFO, "Myproxy server did not return proxy with VOMS AC included"); std::string vomsacseq; contact_voms_servers(vomscmdlist, orderlist, vomses_path, use_gsi_comm, use_http_comm, voms_period, usercfg, logger, proxy_path, vomsacseq); if(!vomsacseq.empty()) { Arc::Credential signer(proxy_path, proxy_path, "", ""); std::string proxy_cert; create_proxy(proxy_cert, signer, policy, proxy_start, proxy_period, vomsacseq, keybits, signing_algorithm); write_proxy_file(proxy_path, proxy_cert); } } return EXIT_SUCCESS; } else return EXIT_FAILURE; } //Create proxy or voms proxy try { Arc::Credential signer(cert_path, key_path, "", "", *passsources[pass_private_key]); if (signer.GetIdentityName().empty()) { std::cerr << Arc::IString("Proxy generation failed: No valid certificate found.") << std::endl; return EXIT_FAILURE; } #ifndef WIN32 EVP_PKEY* pkey = signer.GetPrivKey(); if(!pkey) { std::cerr << Arc::IString("Proxy generation failed: No valid private key found.") << std::endl; return EXIT_FAILURE; } if(pkey) EVP_PKEY_free(pkey); #endif std::cout << Arc::IString("Your identity: %s", signer.GetIdentityName()) << std::endl; if (now > signer.GetEndTime()) { std::cerr << Arc::IString("Proxy generation failed: Certificate has expired.") << std::endl; return EXIT_FAILURE; } else if (now < signer.GetStartTime()) { std::cerr << Arc::IString("Proxy generation failed: Certificate is not valid yet.") << std::endl; return EXIT_FAILURE; } std::string vomsacseq; if (!vomslist.empty()) { //Generate a temporary self-signed proxy certificate //to contact the voms server std::string tmp_proxy_path; std::string tmp_proxy; if(!Arc::TmpFileCreate(tmp_proxy_path,"")) { std::cerr << Arc::IString("Proxy generation failed: Failed to create temporary file.") << std::endl; return EXIT_FAILURE; } create_tmp_proxy(tmp_proxy, signer); write_proxy_file(tmp_proxy_path, tmp_proxy); if(!contact_voms_servers(vomscmdlist, orderlist, vomses_path, use_gsi_comm, use_http_comm, voms_period, usercfg, logger, tmp_proxy_path, vomsacseq)) { remove_proxy_file(tmp_proxy_path); std::cerr << Arc::IString("Proxy generation failed: Failed to retrieve VOMS information.") << std::endl; return EXIT_FAILURE; } remove_proxy_file(tmp_proxy_path); } std::string proxy_cert; create_proxy(proxy_cert, signer, policy, proxy_start, proxy_period, vomsacseq, keybits, signing_algorithm); //If myproxy command is "Put", then the proxy path is set to /tmp/myproxy-proxy.uid.pid if (Arc::lower(myproxy_command) == "put") proxy_path = Glib::build_filename(Glib::get_tmp_dir(), "myproxy-proxy." + Arc::tostring(user.get_uid()) + Arc::tostring((int)(getpid()))); write_proxy_file(proxy_path,proxy_cert); Arc::Credential proxy_cred(proxy_path, proxy_path, "", ""); Arc::Time left = proxy_cred.GetEndTime(); std::cout << Arc::IString("Proxy generation succeeded") << std::endl; std::cout << Arc::IString("Your proxy is valid until: %s", left.str(Arc::UserTime)) << std::endl; //return EXIT_SUCCESS; } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return EXIT_FAILURE; } //Delegate the former self-delegated credential to //myproxy server if (Arc::lower(myproxy_command) == "put") { bool res = contact_myproxy_server( myproxy_server, myproxy_command, user_name, use_empty_passphrase, myproxy_period, retrievable_by_cert, proxy_start, proxy_period, vomslist, vomses_path, proxy_path, usercfg, logger); if(res) return EXIT_SUCCESS; else return EXIT_FAILURE; } return EXIT_SUCCESS; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/test2myproxyserver_get.cpp0000644000000000000000000000012412042216423027221 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200575.381712 30 ctime=1513200664.468802141 nordugrid-arc-5.4.2/src/clients/credentials/test2myproxyserver_get.cpp0000644000175000002070000001424412042216423027273 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include int main(void) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "Test2MyProxyServer"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); std::string cert("../../tests/echo/usercert.pem"); std::string key("../../tests/echo/userkey.pem"); std::string cadir("../../tests/echo/certificates/"); Arc::Credential signer(cert, key, cadir, ""); //Generate a temporary self-signed proxy certificate //to contact the myproxy server std::string private_key, signing_cert, signing_cert_chain; std::string out_file("./tmpproxy.pem"); Arc::Time t; int keybits = 1024; std::string req_str; Arc::Credential cred_request(t, Arc::Period(12 * 3600), keybits, "rfc", "inheritAll", "", -1); cred_request.GenerateRequest(req_str); Arc::Credential proxy; proxy.InquireRequest(req_str); signer.SignRequest(&proxy, out_file.c_str()); cred_request.OutputPrivatekey(private_key); signer.OutputCertificate(signing_cert); signer.OutputCertificateChain(signing_cert_chain); std::ofstream out_f(out_file.c_str(), std::ofstream::app); out_f.write(private_key.c_str(), private_key.size()); out_f.write(signing_cert.c_str(), signing_cert.size()); out_f.write(signing_cert_chain.c_str(), signing_cert_chain.size()); out_f.close(); //Contact the myproxy server to get a delegated certificate from that server // The message which will be sent to myproxy server //"GET" command std::string send_msg("VERSION=MYPROXYv2\n COMMAND=0\n USERNAME=mytest\n PASSPHRASE=123456\n LIFETIME=43200\n"); std::cout << "Send message to peer end through GSS communication: " << send_msg << " Size: " << send_msg.length() << std::endl; //For "GET" command, client authentication is optional Arc::MCCConfig cfg; cfg.AddProxy(out_file); cfg.AddCADir("$HOME/.globus/certificates/"); //Arc::ClientTCP client(cfg, "127.0.0.1", 7512, Arc::GSISec, 60); Arc::ClientTCP client(cfg, "example.org", 7512, Arc::GSISec, 60); Arc::PayloadRaw request; request.Insert(send_msg.c_str(), 0, send_msg.length()); //Arc::PayloadRawInterface& buffer = dynamic_cast(request); //std::cout<<"Message in PayloadRaw: "<<((Arc::PayloadRawInterface&)buffer).Content()<Get(&ret_buf[0], len); ret_str.append(ret_buf, len); memset(ret_buf, 0, 1024); } while (len == 1024); logger.msg(Arc::INFO, "Returned msg from myproxy server: %s %d", ret_str.c_str(), ret_str.length()); if (response) { delete response; response = NULL; } //Generate a certificate request, //and send it to myproxy server std::string x509_req_str; Arc::Time start; Arc::Credential x509_request(start, Arc::Period(), 1024); x509_request.GenerateRequest(x509_req_str, true); std::string proxy_key_str; x509_request.OutputPrivatekey(proxy_key_str); Arc::PayloadRaw request1; request1.Insert(x509_req_str.c_str(), 0, x509_req_str.length()); status = client.process(&request1, &response, true); if (!status) { logger.msg(Arc::ERROR, (std::string)status); if (response) delete response; return 1; } if (!response) { logger.msg(Arc::ERROR, "No stream response"); return 1; } std::string ret_str1; memset(ret_buf, 0, 1024); do { len = 1024; response->Get(&ret_buf[0], len); ret_str1.append(ret_buf, len); memset(ret_buf, 0, 1024); } while (len == 1024); logger.msg(Arc::INFO, "Returned msg from myproxy server: %s %d", ret_str1.c_str(), ret_str1.length()); BIO *bio = BIO_new(BIO_s_mem()); BIO_write(bio, (unsigned char*)(ret_str1.c_str()), ret_str1.length()); unsigned char number_of_certs; BIO_read(bio, &number_of_certs, sizeof(number_of_certs)); logger.msg(Arc::INFO, "There are %d certificates in the returned msg", number_of_certs); std::string proxy_cert_str; for (;;) { char s[256]; int l = BIO_read(bio, s, sizeof(s)); if (l <= 0) break; proxy_cert_str.append(s, l); } BIO_free_all(bio); //Output the PEM formated proxy certificate std::string tmpcert_file("tmpcert.pem"); std::ofstream tmpcert_f(tmpcert_file.c_str()); std::string tmpkey_file("tmpkey.pem"); std::ofstream tmpkey_f(tmpkey_file.c_str()); tmpcert_f.write(proxy_cert_str.c_str(), proxy_cert_str.size()); tmpkey_f.write(proxy_key_str.c_str(), proxy_key_str.size()); tmpcert_f.close(); tmpkey_f.close(); Arc::Credential proxy_cred(tmpcert_file, tmpkey_file, cadir, ""); std::string proxy_cred_str_pem; std::string proxy_cred_file("proxy_cred.pem"); std::ofstream proxy_cred_f(proxy_cred_file.c_str()); proxy_cred.OutputCertificate(proxy_cred_str_pem); proxy_cred.OutputPrivatekey(proxy_cred_str_pem); proxy_cred.OutputCertificateChain(proxy_cred_str_pem); proxy_cred_f.write(proxy_cred_str_pem.c_str(), proxy_cred_str_pem.size()); proxy_cred_f.close(); //Myproxy server will then return a standard response message std::string ret_str2; memset(ret_buf, 0, 1024); do { len = 1024; response->Get(&ret_buf[0], len); ret_str2.append(ret_buf, len); memset(ret_buf, 0, 1024); } while (len == 1024); logger.msg(Arc::INFO, "Returned msg from myproxy server: %s %d", ret_str2.c_str(), ret_str2.length()); if (response) { delete response; response = NULL; } return 0; } nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/arcproxy_myproxy.cpp0000644000000000000000000000012412402140323026073 xustar000000000000000027 mtime=1409859795.055799 27 atime=1513200575.381712 30 ctime=1513200664.464802092 nordugrid-arc-5.4.2/src/clients/credentials/arcproxy_myproxy.cpp0000644000175000002070000003043312402140323026143 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_NSS #include #endif #include "arcproxy.h" using namespace ArcCredential; typedef enum { pass_all, pass_private_key, pass_myproxy, pass_myproxy_new, pass_nss } pass_destination_type; extern std::map passsources; static std::string get_cert_dn(const std::string& cert_file) { std::string dn_str; Arc::Credential cert(cert_file, "", "", ""); dn_str = cert.GetIdentityName(); return dn_str; } bool contact_myproxy_server(const std::string& myproxy_server, const std::string& myproxy_command, const std::string& myproxy_user_name, bool use_empty_passphrase, const std::string& myproxy_period, const std::string& retrievable_by_cert, Arc::Time& proxy_start, Arc::Period& proxy_period, std::list& vomslist, std::string& vomses_path, const std::string& proxy_path, Arc::UserConfig& usercfg, Arc::Logger& logger) { std::string user_name = myproxy_user_name; std::string key_path, cert_path, ca_dir; key_path = usercfg.KeyPath(); cert_path = usercfg.CertificatePath(); ca_dir = usercfg.CACertificatesDirectory(); if(user_name.empty()) { user_name = get_cert_dn(proxy_path); } if(user_name.empty() && !cert_path.empty()) { user_name = get_cert_dn(cert_path); } //If the "INFO" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (Arc::lower(myproxy_command) == "info") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string respinfo; //if(usercfg.CertificatePath().empty()) usercfg.CertificatePath(cert_path); //if(usercfg.KeyPath().empty()) usercfg.KeyPath(key_path); if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; if(!cstore.Info(myproxyopt,respinfo)) throw std::invalid_argument("Failed to get info from MyProxy service"); std::cout << Arc::IString("Succeeded to get info from MyProxy server") << std::endl; std::cout << respinfo << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return false; } //If the "NEWPASS" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (Arc::lower(myproxy_command) == "newpass") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string passphrase; if(passsources[pass_myproxy]->Get(passphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); std::string newpassphrase; if(passsources[pass_myproxy_new]->Get(newpassphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["newpassword"] = newpassphrase; if(!cstore.ChangePassword(myproxyopt)) throw std::invalid_argument("Failed to change password MyProxy service"); std::cout << Arc::IString("Succeeded to change password on MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return false; } //If the "DESTROY" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (Arc::lower(myproxy_command) == "destroy") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string passphrase; if(passsources[pass_myproxy]->Get(passphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); std::string respinfo; if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; if(!cstore.Destroy(myproxyopt)) throw std::invalid_argument("Failed to destroy credential on MyProxy service"); std::cout << Arc::IString("Succeeded to destroy credential on MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return false; } //If the "GET" myproxy command is given, try to get a delegated //certificate from the myproxy server. //For "GET" command, certificate and key are not needed, and //anonymous GSSAPI is used (GSS_C_ANON_FLAG) try { if (Arc::lower(myproxy_command) == "get") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string passphrase; if(!use_empty_passphrase) { if(passsources[pass_myproxy]->Get(passphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); } std::string proxy_cred_str_pem; Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg_tmp(cred_type); usercfg_tmp.CACertificatesDirectory(usercfg.CACertificatesDirectory()); Arc::CredentialStore cstore(usercfg_tmp,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["lifetime"] = myproxy_period; // According to the protocol of myproxy, the "Get" command can // include the information about vo name, so that myproxy server // can contact voms server to retrieve AC for myproxy client // See 2.4 of http://grid.ncsa.illinois.edu/myproxy/protocol/ // "When VONAME appears in the message, the server will generate VOMS // proxy certificate using VONAME and VOMSES, or the server's VOMS server information." class vomses_match: public Arc::VOMSConfig::filter { private: int seq_; const std::list& vomses_; std::map& opts_; public: bool match(const Arc::VOMSConfigLine& line) { for(std::list::const_iterator voms = vomses_.begin(); voms != vomses_.end(); ++voms) { if((line.Name() == *voms) || (line.Alias() == *voms)) { opts_["vomsname"+Arc::tostring(seq_)] = *voms; opts_["vomses"+Arc::tostring(seq_)] = line.Str(); ++seq_; break; }; }; // Because rsult is stored imeediately there is no sense to keep matched lines in // VOMSConfig object. return false; }; vomses_match(const std::list& vomses, std::map opts): seq_(0),vomses_(vomses),opts_(opts) { }; }; Arc::VOMSConfig voms_config(vomses_path, vomses_match(vomslist,myproxyopt)); if(!cstore.Retrieve(myproxyopt,proxy_cred_str_pem)) throw std::invalid_argument("Failed to retrieve proxy from MyProxy service"); write_proxy_file(proxy_path,proxy_cred_str_pem); //Assign proxy_path to cert_path and key_path, //so the later voms functionality can use the proxy_path //to create proxy with voms AC extension. In this //case, "--cert" and "--key" is not needed. cert_path = proxy_path; key_path = proxy_path; std::cout << Arc::IString("Succeeded to get a proxy in %s from MyProxy server %s", proxy_path, myproxy_server) << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return false; } //Delegate the former self-delegated credential to //myproxy server try { if (Arc::lower(myproxy_command) == "put") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string prompt1 = "MyProxy server"; std::string passphrase; if(retrievable_by_cert.empty()) { if(passsources[pass_myproxy_new]->Get(passphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); } std::string proxy_cred_str_pem; std::ifstream proxy_cred_file(proxy_path.c_str()); if(!proxy_cred_file) throw std::invalid_argument("Failed to read proxy file "+proxy_path); std::getline(proxy_cred_file,proxy_cred_str_pem,'\0'); if(proxy_cred_str_pem.empty()) throw std::invalid_argument("Failed to read proxy file "+proxy_path); proxy_cred_file.close(); usercfg.ProxyPath(proxy_path); if(usercfg.CACertificatesDirectory().empty()) { usercfg.CACertificatesDirectory(ca_dir); } Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["lifetime"] = myproxy_period; if(!retrievable_by_cert.empty()) { myproxyopt["retriever_trusted"] = retrievable_by_cert; } if(!cstore.Store(myproxyopt,proxy_cred_str_pem,true,proxy_start,proxy_period)) throw std::invalid_argument("Failed to delegate proxy to MyProxy service"); remove_proxy_file(proxy_path); std::cout << Arc::IString("Succeeded to put a proxy onto MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); remove_proxy_file(proxy_path); return false; } return true; } nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/test2vomsserver.cpp0000644000000000000000000000012412042216423025617 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200575.380712 30 ctime=1513200664.470802166 nordugrid-arc-5.4.2/src/clients/credentials/test2vomsserver.cpp0000644000175000002070000001207612042216423025672 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int main(void) { setlocale(LC_ALL, ""); Arc::OpenSSLInit(); Arc::Logger logger(Arc::Logger::getRootLogger(), "Test2VOMSServer"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); std::string cert("../../tests/echo/usercert.pem"); std::string key("../../tests/echo/userkey.pem"); std::string cadir("../../tests/echo/certificates/"); Arc::Credential signer(cert, key, cadir, ""); //Generate a temporary self-signed proxy certificate //to contact the voms server std::string private_key, signing_cert, signing_cert_chain; std::string out_file_ac("./out_withac.pem"); Arc::Time t; int keybits = 1024; std::string req_str; Arc::Credential cred_request(t, Arc::Period(12 * 3600), keybits, "rfc", "inheritAll", "", -1); cred_request.GenerateRequest(req_str); Arc::Credential proxy; proxy.InquireRequest(req_str); signer.SignRequest(&proxy, out_file_ac.c_str()); cred_request.OutputPrivatekey(private_key); signer.OutputCertificate(signing_cert); signer.OutputCertificateChain(signing_cert_chain); std::ofstream out_f(out_file_ac.c_str(), std::ofstream::app); out_f.write(private_key.c_str(), private_key.size()); out_f.write(signing_cert.c_str(), signing_cert.size()); out_f.write(signing_cert_chain.c_str(), signing_cert_chain.size()); out_f.close(); //Contact the voms server to retrieve attribute certificate // The message which will be sent to voms server //std::string send_msg("G/playground.knowarc.eu43200"); std::string send_msg("G/knowarc.eu43200"); std::cout << "Send message to peer end through GSS communication: " << send_msg << " Size: " << send_msg.length() << std::endl; Arc::MCCConfig cfg; cfg.AddProxy(out_file_ac); //cfg.AddProxy("/tmp/x509up_u1001"); //cfg.AddCertificate("/home/wzqiang/arc-0.9/src/tests/echo/usercert.pem"); //cfg.AddPrivateKey("/home/wzqiang/arc-0.9/src/tests/echo/userkey-nopass.pem"); cfg.AddCADir("../../tests/echo/certificates/"); //Arc::ClientTCP client(cfg, "arthur.hep.lu.se", 15001, Arc::GSISec, 60); Arc::ClientTCP client(cfg, "arthur.hep.lu.se", 15001, Arc::SSL3Sec, 60); //Arc::ClientTCP client(cfg, "squark.uio.no", 15011, Arc::GSISec, 60); Arc::PayloadRaw request; request.Insert(send_msg.c_str(), 0, send_msg.length()); //Arc::PayloadRawInterface& buffer = dynamic_cast(request); //std::cout<<"Message in PayloadRaw: "<<((Arc::PayloadRawInterface&)buffer).Content()<Get(&ret_buf[0], len); ret_str.append(ret_buf, len); memset(ret_buf, 0, 1024); } while (len == 1024); logger.msg(Arc::INFO, "Returned msg from voms server: %s ", ret_str.c_str()); //Put the return attribute certificate into proxy certificate as the extension part Arc::XMLNode node(ret_str); std::string codedac; codedac = (std::string)(node["ac"]); std::cout << "Coded AC: " << codedac << std::endl; std::string decodedac; int size; char *dec = NULL; dec = Arc::VOMSDecode((char*)(codedac.c_str()), codedac.length(), &size); decodedac.append(dec, size); if (dec != NULL) { free(dec); dec = NULL; } //std::cout<<"Decoded AC: "< #endif #include #include #include #include #include #include #include #include #include #include #include #include int main(void) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "Test2MyProxyServer"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); std::string cert("../../tests/echo/usercert.pem"); std::string key("../../tests/echo/userkey.pem"); std::string cadir("../../tests/echo/certificates/"); Arc::Credential signer(cert, key, cadir, ""); //Generate a temporary self-signed proxy certificate //to contact the myproxy server std::string private_key, signing_cert, signing_cert_chain; std::string out_file("./tmpproxy.pem"); Arc::Time t; int keybits = 1024; std::string req_str; Arc::Credential cred_request(t, Arc::Period(12 * 3600), keybits, "rfc", "inheritAll", "", -1); cred_request.GenerateRequest(req_str); Arc::Credential proxy; proxy.InquireRequest(req_str); signer.SignRequest(&proxy, out_file.c_str()); cred_request.OutputPrivatekey(private_key); signer.OutputCertificate(signing_cert); signer.OutputCertificateChain(signing_cert_chain); std::ofstream out_f(out_file.c_str(), std::ofstream::app); out_f.write(private_key.c_str(), private_key.size()); out_f.write(signing_cert.c_str(), signing_cert.size()); out_f.write(signing_cert_chain.c_str(), signing_cert_chain.size()); out_f.close(); //Contact the myproxy server to delegate a certificate into that server // The message which will be sent to myproxy server //"PUT" command std::string send_msg("VERSION=MYPROXYv2\n COMMAND=1\n USERNAME=mytest\n PASSPHRASE=123456\n LIFETIME=43200\n"); std::cout << "Send message to peer end through GSS communication: " << send_msg << " Size: " << send_msg.length() << std::endl; Arc::MCCConfig cfg; cfg.AddProxy(out_file); cfg.AddCADir("$HOME/.globus/certificates/"); //Arc::ClientTCP client(cfg, "127.0.0.1", 7512, Arc::GSISec, 60); Arc::ClientTCP client(cfg, "example.org", 7512, Arc::GSISec, 60); Arc::PayloadRaw request; request.Insert(send_msg.c_str(), 0, send_msg.length()); //Arc::PayloadRawInterface& buffer = dynamic_cast(request); //std::cout<<"Message in PayloadRaw: "<<((Arc::PayloadRawInterface&)buffer).Content()<Get(&ret_buf[0], len); ret_str.append(ret_buf, len); memset(ret_buf, 0, 1024); } while (len == 1024); logger.msg(Arc::INFO, "Returned msg from myproxy server: %s %d", ret_str.c_str(), ret_str.length()); //Myproxy server will send back another message which includes //the certificate request in DER format std::string x509ret_str; memset(ret_buf, 0, 1024); do { len = 1024; response->Get(&ret_buf[0], len); x509ret_str.append(ret_buf, len); memset(ret_buf, 0, 1024); } while (len == 1024); logger.msg(Arc::INFO, "Returned msg from myproxy server: %s %d", x509ret_str.c_str(), x509ret_str.length()); if (response) { delete response; response = NULL; } std::string tmp_req_file("myproxy_req.pem"); std::ofstream tmp_req_out(tmp_req_file.c_str()); tmp_req_out.write(x509ret_str.c_str(), x509ret_str.size()); tmp_req_out.close(); Arc::Credential signer1(out_file, "", cadir, ""); Arc::Credential proxy1; std::string signedcert1, signing_cert1, signing_cert_chain1; proxy1.InquireRequest(x509ret_str, false, true); if (!(signer1.SignRequest(&proxy1, signedcert1, true))) { logger.msg(Arc::ERROR, "Delegate proxy failed"); return 1; } signer1.OutputCertificate(signing_cert1, true); signer1.OutputCertificateChain(signing_cert_chain1, true); signedcert1.append(signing_cert1).append(signing_cert_chain1); //std::cout<<"Signing cert: "<Get(&ret_buf[0], len); ret_str1.append(ret_buf, len); memset(ret_buf, 0, 1024); } while (len == 1024); logger.msg(Arc::INFO, "Returned msg from myproxy server: %s %d", ret_str1.c_str(), ret_str1.length()); if (response) { delete response; response = NULL; } return 0; } nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/arcproxyalt.cpp0000644000000000000000000000012412675602216025005 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.375712 30 ctime=1513200664.467802129 nordugrid-arc-5.4.2/src/clients/credentials/arcproxyalt.cpp0000644000175000002070000022055012675602216025056 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include //#include #include #include #include #include #include #ifdef HAVE_CANLXX #include #endif #ifdef HAVE_NSS #include #endif using namespace ArcCredential; static bool contact_voms_servers(std::list& vomslist, std::list& orderlist, std::string& vomses_path, bool use_gsi_comm, bool use_http_comm, const std::string& voms_period, Arc::UserConfig& usercfg, Arc::Logger& logger, AuthN::Credentials& signer, AuthN::ProxyCredentialsRequest& proxyreq); static bool contact_myproxy_server(const std::string& myproxy_server, const std::string& myproxy_command, const std::string& myproxy_user_name, bool use_empty_passphrase, const std::string& myproxy_period, const std::string& retrievable_by_cert, Arc::Time& proxy_start, Arc::Period& proxy_period, std::list& vomslist, std::string& vomses_path, const std::string& proxy_path, Arc::UserConfig& usercfg, Arc::Logger& logger); static std::string get_proxypolicy(const std::string& policy_source); static int create_proxy_file(const std::string& path) { int f = -1; if((::unlink(path.c_str()) != 0) && (errno != ENOENT)) { throw std::runtime_error("Failed to remove proxy file " + path); } f = ::open(path.c_str(), O_WRONLY | O_CREAT | O_EXCL | O_TRUNC, S_IRUSR | S_IWUSR); if (f == -1) { throw std::runtime_error("Failed to create proxy file " + path); } if(::chmod(path.c_str(), S_IRUSR | S_IWUSR) != 0) { ::unlink(path.c_str()); ::close(f); throw std::runtime_error("Failed to change permissions of proxy file " + path); } return f; } static void write_proxy_file(const std::string& path, const std::string& content) { std::string::size_type off = 0; int f = create_proxy_file(path); while(off < content.length()) { ssize_t l = ::write(f, content.c_str(), content.length()-off); if(l < 0) { ::unlink(path.c_str()); ::close(f); throw std::runtime_error("Failed to write into proxy file " + path); } off += (std::string::size_type)l; } ::close(f); } static void remove_proxy_file(const std::string& path) { if((::unlink(path.c_str()) != 0) && (errno != ENOENT)) { throw std::runtime_error("Failed to remove proxy file " + path); } } static void tls_process_error(Arc::Logger& logger) { unsigned long err; err = ERR_get_error(); if (err != 0) { logger.msg(Arc::ERROR, "OpenSSL error -- %s", ERR_error_string(err, NULL)); logger.msg(Arc::ERROR, "Library : %s", ERR_lib_error_string(err)); logger.msg(Arc::ERROR, "Function : %s", ERR_func_error_string(err)); logger.msg(Arc::ERROR, "Reason : %s", ERR_reason_error_string(err)); } return; } #define PASS_MIN_LENGTH (4) static int input_password(char *password, int passwdsz, bool verify, const std::string& prompt_info, const std::string& prompt_verify_info, Arc::Logger& logger) { UI *ui = NULL; int res = 0; ui = UI_new(); if (ui) { int ok = 0; char* buf = new char[passwdsz]; memset(buf, 0, passwdsz); int ui_flags = 0; char *prompt1 = NULL; char *prompt2 = NULL; prompt1 = UI_construct_prompt(ui, "passphrase", prompt_info.c_str()); ui_flags |= UI_INPUT_FLAG_DEFAULT_PWD; UI_ctrl(ui, UI_CTRL_PRINT_ERRORS, 1, 0, 0); ok = UI_add_input_string(ui, prompt1, ui_flags, password, 0, passwdsz - 1); if (ok >= 0) { do { ok = UI_process(ui); } while (ok < 0 && UI_ctrl(ui, UI_CTRL_IS_REDOABLE, 0, 0, 0)); } if (ok >= 0) res = strlen(password); if (ok >= 0 && verify) { UI_free(ui); ui = UI_new(); if(!ui) { ok = -1; } else { // TODO: use some generic password strength evaluation if(res < PASS_MIN_LENGTH) { UI_add_info_string(ui, "WARNING: Your password is too weak (too short)!\n" "Make sure this is really what You wanted to enter.\n"); } prompt2 = UI_construct_prompt(ui, "passphrase", prompt_verify_info.c_str()); ok = UI_add_verify_string(ui, prompt2, ui_flags, buf, 0, passwdsz - 1, password); if (ok >= 0) { do { ok = UI_process(ui); } while (ok < 0 && UI_ctrl(ui, UI_CTRL_IS_REDOABLE, 0, 0, 0)); } } } if (ok == -1) { logger.msg(Arc::ERROR, "User interface error"); tls_process_error(logger); memset(password, 0, (unsigned int)passwdsz); res = 0; } if (ok == -2) { logger.msg(Arc::ERROR, "Aborted!"); memset(password, 0, (unsigned int)passwdsz); res = 0; } if(ui) UI_free(ui); delete[] buf; if(prompt1) OPENSSL_free(prompt1); if(prompt2) OPENSSL_free(prompt2); } return res; } static bool is_file(std::string path) { if (Glib::file_test(path, Glib::FILE_TEST_IS_REGULAR)) return true; return false; } static bool is_dir(std::string path) { if (Glib::file_test(path, Glib::FILE_TEST_IS_DIR)) return true; return false; } static std::vector search_vomses(std::string path) { std::vector vomses_files; if(is_file(path)) vomses_files.push_back(path); else if(is_dir(path)) { //if the path 'vomses' is a directory, search all of the files under this directory, //i.e., 'vomses/voA' 'vomses/voB' std::string path_header = path; std::string fullpath; Glib::Dir dir(path); for(Glib::Dir::iterator i = dir.begin(); i != dir.end(); i++ ) { fullpath = path_header + G_DIR_SEPARATOR_S + *i; if(is_file(fullpath)) vomses_files.push_back(fullpath); else if(is_dir(fullpath)) { std::string sub_path = fullpath; //if the path is a directory, search the all of the files under this directory, //i.e., 'vomses/extra/myprivatevo' Glib::Dir subdir(sub_path); for(Glib::Dir::iterator j = subdir.begin(); j != subdir.end(); j++ ) { fullpath = sub_path + G_DIR_SEPARATOR_S + *j; if(is_file(fullpath)) vomses_files.push_back(fullpath); //else if(is_dir(fullpath)) { //if it is again a directory, the files under it will be ignored } } } } } return vomses_files; } #define VOMS_LINE_NICKNAME (0) #define VOMS_LINE_HOST (1) #define VOMS_LINE_PORT (2) #define VOMS_LINE_SN (3) #define VOMS_LINE_NAME (4) #define VOMS_LINE_NUM (5) static bool find_matched_vomses(std::map > > &matched_voms_line /*output*/, std::multimap& server_command_map /*output*/, std::list& vomses /*output*/, std::list& vomslist, std::string& vomses_path, Arc::UserConfig& usercfg, Arc::Logger& logger); static std::string tokens_to_string(std::vector tokens) { std::string s; for(int n = 0; n(in_f, profile_ini, '\0'); std::list lines; Arc::tokenize(profile_ini, lines, "\n"); for (std::list::iterator i = lines.begin(); i != lines.end(); ++i) { std::vector inivalue; Arc::tokenize(*i, inivalue, "="); if (inivalue.size() == 2) { if (inivalue[0] == "IsRelative") { if(inivalue[1] == "1") is_relative = true; else is_relative = false; } if (inivalue[0] == "Path") path = inivalue[1]; } } if(is_relative) nss_path = ff_home + G_DIR_SEPARATOR_S + path; else nss_path = path; if(::stat(nss_path.c_str(),&st) != 0) return std::string(); if(!S_ISDIR(st.st_mode)) return std::string(); if(user.get_uid() != st.st_uid) return std::string(); return nss_path; } #endif int main(int argc, char *argv[]) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcproxy"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(" ", istring("The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources."), istring("Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start from now)\n" " validityEnd=time\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and validityEnd\n" " not specified, the default is 12 hours for local proxy, and 168 hours for delegated\n" " proxy on myproxy server)\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the default\n" " is the minimum value of 12 hours and validityPeriod)\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value of\n" " 12 hours and validityPeriod (which is lifetime of the delegated proxy on myproxy server))\n" " proxyPolicy=policy content\n" " proxyPolicyFile=policy file")); std::string proxy_path; options.AddOption('P', "proxy", istring("path to the proxy file"), istring("path"), proxy_path); std::string cert_path; options.AddOption('C', "cert", istring("path to the certificate file, it can be either PEM, DER, or PKCS12 formated"), istring("path"), cert_path); std::string key_path; options.AddOption('K', "key", istring("path to the private key file, if the certificate is in PKCS12 format, then no need to give private key"), istring("path"), key_path); std::string ca_dir; options.AddOption('T', "cadir", istring("path to the trusted certificate directory, only needed for the VOMS client functionality"), istring("path"), ca_dir); std::string voms_dir; options.AddOption('s', "vomsdir", istring("path to the top directory of VOMS *.lsc files, only needed for the VOMS client functionality"), istring("path"), voms_dir); std::string vomses_path; options.AddOption('V', "vomses", istring("path to the VOMS server configuration file"), istring("path"), vomses_path); std::list vomslist; options.AddOption('S', "voms", istring("voms<:command>. Specify VOMS server (More than one VOMS server \n" " can be specified like this: --voms VOa:command1 --voms VOb:command2). \n" " :command is optional, and is used to ask for specific attributes(e.g: roles)\n" " command options are:\n" " all --- put all of this DN's attributes into AC; \n" " list ---list all of the DN's attribute, will not create AC extension; \n" " /Role=yourRole --- specify the role, if this DN \n" " has such a role, the role will be put into AC \n" " /voname/groupname/Role=yourRole --- specify the VO, group and role; if this DN \n" " has such a role, the role will be put into AC \n" ), istring("string"), vomslist); std::list orderlist; options.AddOption('o', "order", istring("group<:role>. Specify ordering of attributes \n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/testers:Tester \n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/testers:Tester \n" " Note that it does not make sense to specify the order if you have two or more different VOMS servers specified"), istring("string"), orderlist); bool use_gsi_comm = false; options.AddOption('G', "gsicom", istring("use GSI communication protocol for contacting VOMS services"), use_gsi_comm); bool use_http_comm = false; options.AddOption('H', "httpcom", istring("use HTTP communication protocol for contacting VOMS services that provide RESTful access \n" " Note for RESTful access, \'list\' command and multiple VOMS server are not supported\n" ), use_http_comm); bool info = false; options.AddOption('I', "info", istring("print all information about this proxy. \n" " In order to show the Identity (DN without CN as suffix for proxy) \n" " of the certificate, the 'trusted certdir' is needed." ), info); bool remove_proxy = false; options.AddOption('r', "remove", istring("remove proxy"), remove_proxy); std::string user_name; //user name to MyProxy server options.AddOption('U', "user", istring("username to MyProxy server"), istring("string"), user_name); bool use_empty_passphrase = false; //if use empty passphrase to myproxy server options.AddOption('N', "nopassphrase", istring("don't prompt for a credential passphrase, when retrieve a \n" " credential from on MyProxy server. \n" " The precondition of this choice is the credential is PUT onto\n" " the MyProxy server without a passphrase by using -R (--retrievable_by_cert) \n" " option when being PUTing onto Myproxy server. \n" " This option is specific for the GET command when contacting Myproxy server." ), use_empty_passphrase); std::string retrievable_by_cert; //if use empty passphrase to myproxy server options.AddOption('R', "retrievable_by_cert", istring("Allow specified entity to retrieve credential without passphrase.\n" " This option is specific for the PUT command when contacting Myproxy server." ), istring("string"), retrievable_by_cert); std::string myproxy_server; //url of MyProxy server options.AddOption('L', "myproxysrv", istring("hostname[:port] of MyProxy server"), istring("string"), myproxy_server); std::string myproxy_command; //command to myproxy server options.AddOption('M', "myproxycmd", istring("command to MyProxy server. The command can be PUT or GET.\n" " PUT/put/Put -- put a delegated credential to the MyProxy server; \n" " GET/get/Get -- get a delegated credential from the MyProxy server, \n" " credential (certificate and key) is not needed in this case. \n" " MyProxy functionality can be used together with VOMS\n" " functionality.\n" ), istring("string"), myproxy_command); std::list constraintlist; options.AddOption('c', "constraint", istring("proxy constraints"), istring("string"), constraintlist); #ifdef HAVE_NSS bool use_nssdb = false; options.AddOption('F', "nssdb", istring("use NSS credential database in the Firefox profile"), use_nssdb); #endif int timeout = -1; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcproxy", VERSION) << std::endl; return EXIT_SUCCESS; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); #ifdef HAVE_NSS //Using nss db dominate other option if(use_nssdb) { std::string nssdb_path = get_nssdb_path(); if(nssdb_path.empty()) { std::cout << Arc::IString("The NSS database can not be detected in the Firefox profile") << std::endl; return EXIT_FAILURE; } bool res; std::string configdir = nssdb_path; res = ArcAuthNSS::nssInit(configdir); std::cout<< Arc::IString("NSS database to be accessed: %s\n", configdir.c_str()); char* slotpw = NULL; //"secretpw"; //TODO: Input passphrase to nss db //The nss db under firefox profile seems to not be protected by any passphrase by default bool ascii = true; const char* trusts = "u,u,u"; std::string proxy_csrfile = "proxy.csr"; std::string proxy_keyname = "proxykey"; std::string proxy_privk_str; res = ArcAuthNSS::nssGenerateCSR(proxy_keyname, "CN=Test,OU=ARC,O=EMI", slotpw, proxy_csrfile, proxy_privk_str, ascii); if(!res) return EXIT_FAILURE; std::string proxy_certfile = "myproxy.pem"; std::string issuername = "Imported Certificate"; //The name of the certificate imported in firefox is //normally "Imported Certificate" by default, if name is not specified int duration = 12; std::string vomsacseq; res = ArcAuthNSS::nssCreateCert(proxy_csrfile, issuername, "", duration, vomsacseq, proxy_certfile, ascii); if(!res) return EXIT_FAILURE; const char* proxy_certname = "proxycert"; res = ArcAuthNSS::nssImportCert(slotpw, proxy_certfile, proxy_certname, trusts, ascii); if(!res) return EXIT_FAILURE; //Compose the proxy certificate if(!proxy_path.empty())Arc::SetEnv("X509_USER_PROXY", proxy_path); Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::NotTryCredentials)); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization."); return EXIT_FAILURE; } if(proxy_path.empty()) proxy_path = usercfg.ProxyPath(); usercfg.ProxyPath(proxy_path); std::string cert_file = "cert.pem"; res = ArcAuthNSS::nssExportCertificate(issuername, cert_file); if(!res) return EXIT_FAILURE; std::string proxy_cred_str; std::ifstream proxy_s(proxy_certfile.c_str()); std::getline(proxy_s, proxy_cred_str,'\0'); proxy_s.close(); std::string eec_cert_str; std::ifstream eec_s(cert_file.c_str()); std::getline(eec_s, eec_cert_str,'\0'); eec_s.close(); proxy_cred_str.append(proxy_privk_str).append(eec_cert_str); write_proxy_file(proxy_path, proxy_cred_str); AuthN::Context ctx(AuthN::Context::EmptyContext); ctx.SetCredentials(proxy_path, ""); AuthN::Credentials proxy_cred(ctx); Arc::Time left = proxy_cred.GetValidTill(); std::cout << Arc::IString("Proxy generation succeeded") << std::endl; std::cout << Arc::IString("Your proxy is valid until: %s", left.str(Arc::UserTime)) << std::endl; return EXIT_SUCCESS; } #endif // This ensure command line args overwrite all other options if(!cert_path.empty())Arc::SetEnv("X509_USER_CERT", cert_path); if(!key_path.empty())Arc::SetEnv("X509_USER_KEY", key_path); if(!proxy_path.empty())Arc::SetEnv("X509_USER_PROXY", proxy_path); if(!ca_dir.empty())Arc::SetEnv("X509_CERT_DIR", ca_dir); // Set default, predefined or guessed credentials. Also check if they exist. Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::TryCredentials)); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization."); return EXIT_FAILURE; } // Check for needed credentials objects // Can proxy be used for? Could not find it in documentation. // Key and certificate not needed if only printing proxy information if((usercfg.CertificatePath().empty() || (usercfg.KeyPath().empty() && (usercfg.CertificatePath().find(".p12") == std::string::npos))) && !info) { logger.msg(Arc::ERROR, "Failed to find certificate and/or private key or files have improper permissions or ownership."); logger.msg(Arc::ERROR, "You may try to increase verbosity to get more information."); return EXIT_FAILURE; } if(!vomslist.empty() || !myproxy_command.empty()) { // For external communication CAs are needed if(usercfg.CACertificatesDirectory().empty()) { logger.msg(Arc::ERROR, "Failed to find CA certificates"); logger.msg(Arc::ERROR, "Cannot find the CA certificates directory path, " "please set environment variable X509_CERT_DIR, " "or cacertificatesdirectory in a configuration file."); logger.msg(Arc::ERROR, "You may try to increase verbosity to get more information."); logger.msg(Arc::ERROR, "The CA certificates directory is required for " "contacting VOMS and MyProxy servers."); return EXIT_FAILURE; } } // Proxy is special case. We either need default or predefined path. // No guessing or testing is needed. // By running credentials initialization once more all set values // won't change. But proxy will get default value if not set. { Arc::UserConfig tmpcfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::NotTryCredentials)); if(proxy_path.empty()) proxy_path = tmpcfg.ProxyPath(); usercfg.ProxyPath(proxy_path); } // Get back all paths if(key_path.empty()) key_path = usercfg.KeyPath(); if(cert_path.empty()) cert_path = usercfg.CertificatePath(); if(ca_dir.empty()) ca_dir = usercfg.CACertificatesDirectory(); if(voms_dir.empty()) voms_dir = Arc::GetEnv("X509_VOMS_DIR"); if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (timeout > 0) usercfg.Timeout(timeout); Arc::User user; if (!params.empty()) { logger.msg(Arc::ERROR, "Wrong number of arguments!"); return EXIT_FAILURE; } const Arc::Time now; if (remove_proxy) { if (proxy_path.empty()) { logger.msg(Arc::ERROR, "Cannot find the path of the proxy file, " "please setup environment X509_USER_PROXY, " "or proxypath in a configuration file"); return EXIT_FAILURE; } else if (!(Glib::file_test(proxy_path, Glib::FILE_TEST_EXISTS))) { logger.msg(Arc::ERROR, "Cannot remove proxy file at %s, because it's not there", proxy_path); return EXIT_FAILURE; } if((unlink(proxy_path.c_str()) != 0) && (errno != ENOENT)) { logger.msg(Arc::ERROR, "Cannot remove proxy file at %s", proxy_path); return EXIT_FAILURE; } return EXIT_SUCCESS; } if (info) { std::vector voms_attributes; bool res = false; if (proxy_path.empty()) { logger.msg(Arc::ERROR, "Cannot find the path of the proxy file, " "please setup environment X509_USER_PROXY, " "or proxypath in a configuration file"); return EXIT_FAILURE; } else if (!(Glib::file_test(proxy_path, Glib::FILE_TEST_EXISTS))) { logger.msg(Arc::ERROR, "Cannot find file at %s for getting the proxy. " "Please make sure this file exists.", proxy_path); return EXIT_FAILURE; } // EmptyContext is used , and then cert/key and CA paths // are set manually. AuthN::Context will only not use // cert/key, if both cert/key and proxy paths are set AuthN::Context ctx(AuthN::Context::EmptyContext); AuthN::Context full_ctx(AuthN::Context::ClientFullContext); ctx.SetCredentials(full_ctx.GetCertPath(), full_ctx.GetKeyPath()); ctx.SetCAPath(full_ctx.GetCAPath()); AuthN::Credentials holder(ctx); std::cout << Arc::IString("Subject: %s", holder.GetSubjectName()) << std::endl; std::cout << Arc::IString("Issuer: %s", holder.GetIssuerName()) << std::endl; std::cout << Arc::IString("Identity: %s", holder.GetIdentityName()) << std::endl; Arc::Time end(holder.GetValidTill()); Arc::Time start(holder.GetValidFrom()); if (end < now) std::cout << Arc::IString("Time left for proxy: Proxy expired") << std::endl; else if (now < start) std::cout << Arc::IString("Time left for proxy: Proxy not valid yet") << std::endl; else std::cout << Arc::IString("Time left for proxy: %s", (end - now).istr()) << std::endl; if(!ctx.GetCertPath().empty()) std::cout << Arc::IString("Proxy path: %s", ctx.GetCertPath()) << std::endl; // std::cout << Arc::IString("Proxy type: %s", certTypeToString(holder.GetType())) << std::endl; Arc::VOMSTrustList voms_trust_dn; voms_trust_dn.AddRegex(".*"); std::string cert_str; holder.GetCertificate(cert_str); res = parseVOMSAC(cert_str, ca_dir, "", voms_dir, voms_trust_dn, voms_attributes, true, true); // Not printing error message because parseVOMSAC will print everything itself //if (!res) logger.msg(Arc::ERROR, "VOMS attribute parsing failed"); for(int n = 0; n 0) { std::cout<<"====== "< voms_attributes[n].till) { std::cout << Arc::IString("Time left for AC: AC has expired")< constraints; for (std::list::iterator it = constraintlist.begin(); it != constraintlist.end(); it++) { std::string::size_type pos = it->find('='); if (pos != std::string::npos) constraints[it->substr(0, pos)] = it->substr(pos + 1); else constraints[*it] = ""; } //proxy validity period //Set the default proxy validity lifetime to 12 hours if there is //no validity lifetime provided by command caller // Set default values first // TODO: Is default validityPeriod since now or since validityStart? Arc::Time validityStart = now; // now by default Arc::Period validityPeriod(12*60*60); if (myproxy_command == "put" || myproxy_command == "PUT" || myproxy_command == "Put") { //For myproxy PUT operation, the proxy should be 7 days according to the default //definition in myproxy implementation. validityPeriod = 7*24*60*60; } // Acquire constraints. Check for valid values and conflicts. if((!constraints["validityStart"].empty()) && (!constraints["validityEnd"].empty()) && (!constraints["validityPeriod"].empty())) { std::cerr << Arc::IString("The start, end and period can't be set simultaneously") << std::endl; return EXIT_FAILURE; } if(!constraints["validityStart"].empty()) { validityStart = Arc::Time(constraints["validityStart"]); if (validityStart == Arc::Time(Arc::Time::UNDEFINED)) { std::cerr << Arc::IString("The start time that you set: %s can't be recognized.", (std::string)constraints["validityStart"]) << std::endl; return EXIT_FAILURE; } } if(!constraints["validityPeriod"].empty()) { validityPeriod = Arc::Period(constraints["validityPeriod"]); if (validityPeriod.GetPeriod() <= 0) { std::cerr << Arc::IString("The period that you set: %s can't be recognized.", (std::string)constraints["validityPeriod"]) << std::endl; return EXIT_FAILURE; } } if(!constraints["validityEnd"].empty()) { Arc::Time validityEnd = Arc::Time(constraints["validityEnd"]); if (validityEnd == Arc::Time(Arc::Time::UNDEFINED)) { std::cerr << Arc::IString("The end time that you set: %s can't be recognized.", (std::string)constraints["validityEnd"]) << std::endl; return EXIT_FAILURE; } if(!constraints["validityPeriod"].empty()) { // If period is explicitly set then start is derived from end and period validityStart = validityEnd - validityPeriod; } else { // otherwise start - optionally - and end are set, period is derived if(validityEnd < validityStart) { std::cerr << Arc::IString("The end time that you set: %s is before start time:%s.", (std::string)validityEnd,(std::string)validityStart) << std::endl; // error return EXIT_FAILURE; } validityPeriod = validityEnd - validityStart; } } // Here we have validityStart and validityPeriod defined Arc::Time validityEnd = validityStart + validityPeriod; // Warn user about strange times but do not prevent user from doing anything legal if(validityStart < now) { std::cout << Arc::IString("WARNING: The start time that you set: %s is before current time: %s", (std::string)validityStart, (std::string)now) << std::endl; } if(validityEnd < now) { std::cout << Arc::IString("WARNING: The end time that you set: %s is before current time: %s", (std::string)validityEnd, (std::string)now) << std::endl; } //voms AC valitity period //Set the default voms AC validity lifetime to 12 hours if there is //no validity lifetime provided by command caller Arc::Period vomsACvalidityPeriod(12*60*60); if(!constraints["vomsACvalidityPeriod"].empty()) { vomsACvalidityPeriod = Arc::Period(constraints["vomsACvalidityPeriod"]); if (vomsACvalidityPeriod.GetPeriod() == 0) { std::cerr << Arc::IString("The VOMS AC period that you set: %s can't be recognized.", (std::string)constraints["vomsACvalidityPeriod"]) << std::endl; return EXIT_FAILURE; } } else { if(validityPeriod < vomsACvalidityPeriod) vomsACvalidityPeriod = validityPeriod; // It is strange that VOMS AC may be valid less than proxy itself. // Maybe it would be more correct to have it valid by default from // now till validityEnd. } std::string voms_period = Arc::tostring(vomsACvalidityPeriod.GetPeriod()); //myproxy validity period. //Set the default myproxy validity lifetime to 12 hours if there is //no validity lifetime provided by command caller Arc::Period myproxyvalidityPeriod(12*60*60); if(!constraints["myproxyvalidityPeriod"].empty()) { myproxyvalidityPeriod = Arc::Period(constraints["myproxyvalidityPeriod"]); if (myproxyvalidityPeriod.GetPeriod() == 0) { std::cerr << Arc::IString("The MyProxy period that you set: %s can't be recognized.", (std::string)constraints["myproxyvalidityPeriod"]) << std::endl; return EXIT_FAILURE; } } else { if(validityPeriod < myproxyvalidityPeriod) myproxyvalidityPeriod = validityPeriod; // see vomsACvalidityPeriod } std::string myproxy_period = Arc::tostring(myproxyvalidityPeriod.GetPeriod()); Arc::OpenSSLInit(); Arc::Time proxy_start = validityStart; Arc::Period proxy_period = validityPeriod; if (constraints["validityStart"].empty() && constraints["validityEnd"].empty()) { // If start/end is not explicitly specified then add 5 min back gap. proxy_start = proxy_start - Arc::Period(300); proxy_period.SetPeriod(proxy_period.GetPeriod() + 300); } // TODO: Myproxy if (!myproxy_command.empty() && (myproxy_command != "put" && myproxy_command != "PUT" && myproxy_command != "Put")) { bool res = contact_myproxy_server( myproxy_server, myproxy_command, user_name, use_empty_passphrase, myproxy_period, retrievable_by_cert, proxy_start, proxy_period, vomslist, vomses_path, proxy_path, usercfg, logger); if(res) return EXIT_SUCCESS; else return EXIT_FAILURE; } //Create proxy or voms proxy try { AuthN::Context ctx(AuthN::Context::EmptyContext); AuthN::Context empty_ctx(AuthN::Context::EmptyContext); AuthN::Context full_ctx(AuthN::Context::ClientFullContext); ctx.SetCredentials(full_ctx.GetCertPath(), full_ctx.GetKeyPath()); ctx.SetCAPath(full_ctx.GetCAPath()); AuthN::Credentials signer(ctx); if (signer.GetIdentityName().empty()) { std::cerr << Arc::IString("Proxy generation failed: No valid certificate found.") << std::endl; return EXIT_FAILURE; } std::string privk_str; signer.GetPrivateKey(privk_str); if(privk_str.empty()) { std::cerr << Arc::IString("Proxy generation failed: No valid private key found.") << std::endl; return EXIT_FAILURE; } std::cout << Arc::IString("Your identity: %s", signer.GetIdentityName()) << std::endl; Arc::Time end(signer.GetValidTill()); Arc::Time start(signer.GetValidFrom()); if (now > end) { std::cerr << Arc::IString("Proxy generation failed: Certificate has expired.") << std::endl; return EXIT_FAILURE; } else if (now < start) { std::cerr << Arc::IString("Proxy generation failed: Certificate is not valid yet.") << std::endl; return EXIT_FAILURE; } std::string private_key, signing_cert, signing_cert_chain; int keybits = 1024; AuthN::ProxyCredentialsRequest proxyreq(empty_ctx); proxyreq.MakeKeys(keybits); proxyreq.MakeRequest(); proxyreq.GetPrivateKey(private_key); if(vomslist.size() > 0) contact_voms_servers(vomslist, orderlist, vomses_path, use_gsi_comm, use_http_comm, voms_period, usercfg, logger, signer, proxyreq); proxyreq.SetValidFrom(proxy_start.GetTime()); proxyreq.SetValidTill((proxy_start + proxy_period).GetTime()); AuthN::Credentials::Extension policy; std::string policy_source = constraints["proxyPolicy"].empty() ? constraints["proxyPolicyFile"] : constraints["proxyPolicy"]; std::string policy_string = get_proxypolicy(policy_source); if(!policy_string.empty()) { policy.value = policy_string; proxyreq.SetPolicy(policy); } AuthN::Credentials proxy(empty_ctx); AuthN::Status stat = signer.Sign(proxyreq, proxy); if(stat != AuthN::Status(0)) throw std::runtime_error("Failed to sign proxy"); std::string proxy_cert; proxy.GetCertificate(proxy_cert); signer.GetCertificate(signing_cert); signer.GetChain(signing_cert_chain); proxy_cert.append(private_key).append(signing_cert).append(signing_cert_chain); //If myproxy command is "Put", then the proxy path is set to /tmp/myproxy-proxy.uid.pid if (myproxy_command == "put" || myproxy_command == "PUT" || myproxy_command == "Put") proxy_path = Glib::build_filename(Glib::get_tmp_dir(), "myproxy-proxy." + Arc::tostring(user.get_uid()) + Arc::tostring((int)(getpid()))); write_proxy_file(proxy_path,proxy_cert); Arc::Time left = proxy.GetValidTill(); std::cout << Arc::IString("Proxy generation succeeded") << std::endl; std::cout << Arc::IString("Your proxy is valid until: %s", left.str(Arc::UserTime)) << std::endl; } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); tls_process_error(logger); return EXIT_FAILURE; } // TODO: Myproxy put if (myproxy_command == "put" || myproxy_command == "PUT" || myproxy_command == "Put") { bool res = contact_myproxy_server( myproxy_server, myproxy_command, user_name, use_empty_passphrase, myproxy_period, retrievable_by_cert, proxy_start, proxy_period, vomslist, vomses_path, proxy_path, usercfg, logger); if(res) return EXIT_SUCCESS; else return EXIT_FAILURE; } return EXIT_SUCCESS; } static bool contact_voms_servers(std::list& vomslist, std::list& orderlist, std::string& vomses_path, bool use_gsi_comm, bool use_http_comm, const std::string& voms_period, Arc::UserConfig& usercfg, Arc::Logger& logger, AuthN::Credentials& signer, AuthN::ProxyCredentialsRequest& proxyreq) { std::string tmp_proxy_path; std::string ca_dir; tmp_proxy_path = Glib::build_filename(Glib::get_tmp_dir(), std::string("tmp_proxy.pem")); ca_dir = usercfg.CACertificatesDirectory(); //Generate a temporary self-signed proxy certificate //to contact the voms server AuthN::Context ctx(AuthN::Context::EmptyContext); AuthN::Credentials tmp_proxy(ctx); AuthN::ProxyCredentialsRequest tmp_proxyreq(ctx); int keybits = 1024; tmp_proxyreq.MakeKeys(keybits); tmp_proxyreq.MakeRequest(); Arc::Time now; Arc::Period period = 3600 * 12; tmp_proxyreq.SetValidFrom((now-Arc::Period(300)).GetTime()); tmp_proxyreq.SetValidTill((now + period + 300).GetTime()); std::string proxy_private_key, proxy_cert, signing_cert, signing_cert_chain; tmp_proxyreq.GetPrivateKey(proxy_private_key); signer.GetCertificate(signing_cert); signer.GetChain(signing_cert_chain); AuthN::Status stat = signer.Sign(tmp_proxyreq, tmp_proxy); if(stat != AuthN::Status(0)) throw std::runtime_error("Failed to sign proxy"); tmp_proxy.GetCertificate(proxy_cert); proxy_cert.append(proxy_private_key).append(signing_cert).append(signing_cert_chain); write_proxy_file(tmp_proxy_path, proxy_cert); std::map > > matched_voms_line; std::multimap server_command_map; std::list vomses; if(!find_matched_vomses(matched_voms_line, server_command_map, vomses, vomslist, vomses_path, usercfg, logger)) return false; //Contact the voms server to retrieve attribute certificate std::string aclist_str; Arc::MCCConfig cfg; cfg.AddProxy(tmp_proxy_path); cfg.AddCADir(ca_dir); for (std::map > >::iterator it = matched_voms_line.begin(); it != matched_voms_line.end(); it++) { std::string voms_server; std::list command_list; voms_server = (*it).first; std::vector > voms_lines = (*it).second; bool succeeded = false; //a boolean value to indicate if there is valid message returned from voms server, by using the current voms_line for (std::vector >::iterator line_it = voms_lines.begin(); line_it != voms_lines.end(); line_it++) { std::vector voms_line = *line_it; int count = server_command_map.count(voms_server); logger.msg(Arc::DEBUG, "There are %d commands to the same VOMS server %s", count, voms_server); std::multimap::iterator command_it; for(command_it = server_command_map.equal_range(voms_server).first; command_it!=server_command_map.equal_range(voms_server).second; ++command_it) { command_list.push_back((*command_it).second); } std::string address; if(voms_line.size() > VOMS_LINE_HOST) address = voms_line[VOMS_LINE_HOST]; if(address.empty()) { logger.msg(Arc::ERROR, "Cannot get VOMS server address information from vomses line: \"%s\"", tokens_to_string(voms_line)); throw std::runtime_error("Cannot get VOMS server address information from vomses line: \"" + tokens_to_string(voms_line) + "\""); } std::string port; if(voms_line.size() > VOMS_LINE_PORT) port = voms_line[VOMS_LINE_PORT]; std::string voms_name; if(voms_line.size() > VOMS_LINE_NAME) voms_name = voms_line[VOMS_LINE_NAME]; logger.msg(Arc::INFO, "Contacting VOMS server (named %s): %s on port: %s", voms_name, address, port); std::cout << Arc::IString("Contacting VOMS server (named %s): %s on port: %s", voms_name, address, port) << std::endl; std::string send_msg; send_msg.append(""); std::string command; for(std::list::iterator c_it = command_list.begin(); c_it != command_list.end(); c_it++) { std::string command_2server; command = *c_it; if (command.empty()) command_2server.append("G/").append(voms_name); else if (command == "all" || command == "ALL") command_2server.append("A"); else if (command == "list") command_2server.append("N"); else { std::string::size_type pos = command.find("/Role="); if (pos == 0) command_2server.append("R").append(command.substr(pos + 6)); else if (pos != std::string::npos && pos > 0) command_2server.append("B").append(command.substr(0, pos)).append(":").append(command.substr(pos + 6)); else if(command[0] == '/') command_2server.append("G").append(command); } send_msg.append("").append(command_2server).append(""); } std::string ordering; for(std::list::iterator o_it = orderlist.begin(); o_it != orderlist.end(); o_it++) { ordering.append(o_it == orderlist.begin() ? "" : ",").append(*o_it); } logger.msg(Arc::VERBOSE, "Try to get attribute from VOMS server with order: %s", ordering); send_msg.append("").append(ordering).append(""); send_msg.append("").append(voms_period).append(""); logger.msg(Arc::VERBOSE, "Message sent to VOMS server %s is: %s", voms_name, send_msg); std::string ret_str; if(use_http_comm) { // Use http to contact voms server, for the RESRful interface provided by voms server // The format of the URL: https://moldyngrid.org:15112/generate-ac?fqans=/testbed.univ.kiev.ua/blabla/Role=test-role&lifetime=86400 // fqans is composed of the voname, group name and role, i.e., the "command" for voms. std::string url_str; if(!command.empty()) url_str = "https://" + address + ":" + port + "/generate-ac?" + "fqans=" + command + "&lifetime=" + voms_period; else url_str = "https://" + address + ":" + port + "/generate-ac?" + "lifetime=" + voms_period; Arc::URL voms_url(url_str); Arc::ClientHTTP client(cfg, voms_url, usercfg.Timeout()); client.RelativeURI(true); Arc::PayloadRaw request; Arc::PayloadRawInterface* response; Arc::HTTPClientInfo info; Arc::MCC_Status status = client.process("GET", &request, &info, &response); if (!status) { if (response) delete response; std::cout << Arc::IString("The VOMS server with the information:\n\t%s\ncan not be reached, please make sure it is available", tokens_to_string(voms_line)) << std::endl; continue; //There could be another voms replicated server with the same name exists } if (!response) { logger.msg(Arc::ERROR, "No HTTP response from VOMS server"); continue; } if(response->Content() != NULL) ret_str.append(response->Content()); if (response) delete response; logger.msg(Arc::VERBOSE, "Returned message from VOMS server: %s", ret_str); } else { // Use GSI or TLS to contact voms server Arc::ClientTCP client(cfg, address, atoi(port.c_str()), use_gsi_comm ? Arc::GSISec : Arc::SSL3Sec, usercfg.Timeout()); Arc::PayloadRaw request; request.Insert(send_msg.c_str(), 0, send_msg.length()); Arc::PayloadStreamInterface *response = NULL; Arc::MCC_Status status = client.process(&request, &response, true); if (!status) { //logger.msg(Arc::ERROR, (std::string)status); if (response) delete response; std::cout << Arc::IString("The VOMS server with the information:\n\t%s\"\ncan not be reached, please make sure it is available", tokens_to_string(voms_line)) << std::endl; continue; //There could be another voms replicated server with the same name exists } if (!response) { logger.msg(Arc::ERROR, "No stream response from VOMS server"); continue; } char ret_buf[1024]; int len = sizeof(ret_buf); while(response->Get(ret_buf, len)) { ret_str.append(ret_buf, len); len = sizeof(ret_buf); }; if (response) delete response; logger.msg(Arc::VERBOSE, "Returned message from VOMS server: %s", ret_str); } Arc::XMLNode node; Arc::XMLNode(ret_str).Exchange(node); if((!node) || ((bool)(node["error"]))) { if((bool)(node["error"])) { std::string str = node["error"]["item"]["message"]; std::string::size_type pos; std::string tmp_str = "The validity of this VOMS AC in your proxy is shortened to"; if((pos = str.find(tmp_str))!= std::string::npos) { std::string tmp = str.substr(pos + tmp_str.size() + 1); std::cout << Arc::IString("The validity duration of VOMS AC is shortened from %s to %s, due to the validity constraint on voms server side.\n", voms_period, tmp); } else { std::cout << Arc::IString("Cannot get any AC or attributes info from VOMS server: %s;\n Returned message from VOMS server: %s\n", voms_server, str); break; //since the voms servers with the same name should be looked as the same for robust reason, the other voms server that can be reached could returned the same message. So we exists the loop, even if there are other backup voms server exist. } } else { std::cout << Arc::IString("Returned message from VOMS server %s is: %s\n", voms_server, ret_str); break; } } //Put the return attribute certificate into proxy certificate as the extension part std::string codedac; if (command == "list") codedac = (std::string)(node["bitstr"]); else codedac = (std::string)(node["ac"]); std::string decodedac; int size; char *dec = NULL; dec = Arc::VOMSDecode((char*)(codedac.c_str()), codedac.length(), &size); if (dec != NULL) { decodedac.append(dec, size); free(dec); dec = NULL; } if (command == "list") { std::cout << Arc::IString("The attribute information from VOMS server: %s is list as following:", voms_server) << std::endl << decodedac << std::endl; return true; } aclist_str.append(VOMS_AC_HEADER).append("\n"); aclist_str.append(codedac).append("\n"); aclist_str.append(VOMS_AC_TRAILER).append("\n"); succeeded = true; break; }//end of the scanning of multiple vomses lines with the same name if(succeeded == false) { if(voms_lines.size() > 1) std::cout << Arc::IString("There are %d servers with the same name: %s in your vomses file, but all of them can not be reached, or can not return valid message. But proxy without VOMS AC extension will still be generated.", voms_lines.size(), voms_server) << std::endl; } } remove_proxy_file(tmp_proxy_path); //Put the returned attribute certificate into proxy certificate if (!aclist_str.empty()) { AuthN::Credentials::Extension ac_ext; ac_ext.oid = "acseq"; std::string aclist_asn1; Arc::VOMSACSeqEncode(aclist_str, aclist_asn1); ac_ext.value = aclist_asn1; AuthN::Status stat = proxyreq.AddExtension(ac_ext); if(stat != AuthN::Status(0)) std::cout<< Arc::IString("Failed to add extension: %s", stat.GetDescription().c_str()) <& vomslist, std::string& vomses_path, const std::string& proxy_path, Arc::UserConfig& usercfg, Arc::Logger& logger) { std::string user_name = myproxy_user_name; std::string key_path, cert_path, ca_dir; key_path = usercfg.KeyPath(); cert_path = usercfg.CertificatePath(); ca_dir = usercfg.CACertificatesDirectory(); //If the "INFO" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (myproxy_command == "info" || myproxy_command == "INFO" || myproxy_command == "Info") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if(user_name.empty()) { user_name = get_cert_dn(proxy_path); } if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string respinfo; //if(usercfg.CertificatePath().empty()) usercfg.CertificatePath(cert_path); //if(usercfg.KeyPath().empty()) usercfg.KeyPath(key_path); if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; if(!cstore.Info(myproxyopt,respinfo)) throw std::invalid_argument("Failed to get info from MyProxy service"); std::cout << Arc::IString("Succeeded to get info from MyProxy server") << std::endl; std::cout << respinfo << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); tls_process_error(logger); return false; } //If the "NEWPASS" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (myproxy_command == "newpass" || myproxy_command == "NEWPASS" || myproxy_command == "Newpass" || myproxy_command == "NewPass") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if(user_name.empty()) { user_name = get_cert_dn(proxy_path); } if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string prompt1 = "MyProxy server"; char password[256]; std::string passphrase; int res = input_password(password, 256, false, prompt1, "", logger); if (!res) throw std::invalid_argument("Error entering passphrase"); passphrase = password; std::string prompt2 = "MyProxy server"; char newpassword[256]; std::string newpassphrase; res = input_password(newpassword, 256, true, prompt1, prompt2, logger); if (!res) throw std::invalid_argument("Error entering passphrase"); newpassphrase = newpassword; if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["newpassword"] = newpassphrase; if(!cstore.ChangePassword(myproxyopt)) throw std::invalid_argument("Failed to change password MyProxy service"); std::cout << Arc::IString("Succeeded to change password on MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); tls_process_error(logger); return false; } //If the "DESTROY" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (myproxy_command == "destroy" || myproxy_command == "DESTROY" || myproxy_command == "Destroy") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if(user_name.empty()) { user_name = get_cert_dn(proxy_path); } if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string prompt1 = "MyProxy server"; char password[256]; std::string passphrase; int res = input_password(password, 256, false, prompt1, "", logger); if (!res) throw std::invalid_argument("Error entering passphrase"); passphrase = password; std::string respinfo; if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; if(!cstore.Destroy(myproxyopt)) throw std::invalid_argument("Failed to destroy credential on MyProxy service"); std::cout << Arc::IString("Succeeded to destroy credential on MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); tls_process_error(logger); return false; } //If the "GET" myproxy command is given, try to get a delegated //certificate from the myproxy server. //For "GET" command, certificate and key are not needed, and //anonymous GSSAPI is used (GSS_C_ANON_FLAG) try { if (myproxy_command == "get" || myproxy_command == "GET" || myproxy_command == "Get") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if(user_name.empty()) { user_name = get_cert_dn(proxy_path); } if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string prompt1 = "MyProxy server"; char password[256]; std::string passphrase = password; if(!use_empty_passphrase) { int res = input_password(password, 256, false, prompt1, "", logger); if (!res) throw std::invalid_argument("Error entering passphrase"); passphrase = password; } std::string proxy_cred_str_pem; Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg_tmp(cred_type); usercfg_tmp.CACertificatesDirectory(usercfg.CACertificatesDirectory()); Arc::CredentialStore cstore(usercfg_tmp,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["lifetime"] = myproxy_period; // According to the protocol of myproxy, the "Get" command can // include the information about vo name, so that myproxy server // can contact voms server to retrieve AC for myproxy client // See 2.4 of http://grid.ncsa.illinois.edu/myproxy/protocol/ // "When VONAME appears in the message, the server will generate VOMS // proxy certificate using VONAME and VOMSES, or the server's VOMS server information." char seq = '0'; for (std::list::iterator it = vomslist.begin(); it != vomslist.end(); it++) { size_t p; std::string voms_server; p = (*it).find(":"); voms_server = (p == std::string::npos) ? (*it) : (*it).substr(0, p); myproxyopt[std::string("vomsname").append(1, seq)] = voms_server; seq++; } seq = '0'; // vomses can be specified, so that myproxy server could use it to contact voms server std::list vomses; // vomses --- Store matched vomses lines, only the //vomses line that matches the specified voms name is included. std::map > > matched_voms_line; std::multimap server_command_map; find_matched_vomses(matched_voms_line, server_command_map, vomses, vomslist, vomses_path, usercfg, logger); for (std::list::iterator it = vomses.begin(); it != vomses.end(); it++) { std::string vomses_line; vomses_line = (*it); myproxyopt[std::string("vomses").append(1, seq)] = vomses_line; seq++; } if(!cstore.Retrieve(myproxyopt,proxy_cred_str_pem)) throw std::invalid_argument("Failed to retrieve proxy from MyProxy service"); write_proxy_file(proxy_path,proxy_cred_str_pem); //Assign proxy_path to cert_path and key_path, //so the later voms functionality can use the proxy_path //to create proxy with voms AC extension. In this //case, "--cert" and "--key" is not needed. cert_path = proxy_path; key_path = proxy_path; std::cout << Arc::IString("Succeeded to get a proxy in %s from MyProxy server %s", proxy_path, myproxy_server) << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); tls_process_error(logger); return false; } //Delegate the former self-delegated credential to //myproxy server try { if (myproxy_command == "put" || myproxy_command == "PUT" || myproxy_command == "Put") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if(user_name.empty()) { user_name = get_cert_dn(proxy_path); } if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string prompt1 = "MyProxy server"; std::string prompt2 = "MyProxy server"; char password[256]; std::string passphrase; if(retrievable_by_cert.empty()) { int res = input_password(password, 256, true, prompt1, prompt2, logger); if (!res) throw std::invalid_argument("Error entering passphrase"); passphrase = password; } std::string proxy_cred_str_pem; std::ifstream proxy_cred_file(proxy_path.c_str()); if(!proxy_cred_file) throw std::invalid_argument("Failed to read proxy file "+proxy_path); std::getline(proxy_cred_file,proxy_cred_str_pem,'\0'); if(proxy_cred_str_pem.empty()) throw std::invalid_argument("Failed to read proxy file "+proxy_path); proxy_cred_file.close(); usercfg.ProxyPath(proxy_path); if(usercfg.CACertificatesDirectory().empty()) { usercfg.CACertificatesDirectory(ca_dir); } Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["lifetime"] = myproxy_period; if(!retrievable_by_cert.empty()) { myproxyopt["retriever_trusted"] = retrievable_by_cert; } if(!cstore.Store(myproxyopt,proxy_cred_str_pem,true,proxy_start,proxy_period)) throw std::invalid_argument("Failed to delegate proxy to MyProxy service"); remove_proxy_file(proxy_path); std::cout << Arc::IString("Succeeded to put a proxy onto MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); tls_process_error(logger); remove_proxy_file(proxy_path); return false; } return false; } static bool find_matched_vomses(std::map > > &matched_voms_line /*output*/, std::multimap& server_command_map /*output*/, std::list& vomses /*output*/, std::list& vomslist, std::string& vomses_path, Arc::UserConfig& usercfg, Arc::Logger& logger) { //Parse the voms server and command from command line for (std::list::iterator it = vomslist.begin(); it != vomslist.end(); it++) { size_t p; std::string voms_server; std::string command; p = (*it).find(":"); //here user could give voms name or voms nick name voms_server = (p == std::string::npos) ? (*it) : (*it).substr(0, p); command = (p == std::string::npos) ? "" : (*it).substr(p + 1); server_command_map.insert(std::pair(voms_server, command)); } //Parse the 'vomses' file to find configure lines corresponding to //the information from the command line if (vomses_path.empty()) vomses_path = usercfg.VOMSESPath(); if (vomses_path.empty()) { logger.msg(Arc::ERROR, "$X509_VOMS_FILE, and $X509_VOMSES are not set;\nUser has not specify the location for vomses information;\nThere is also not vomses location information in user's configuration file;\nCannot find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the corresponding sub-directory"); return false; } //the 'vomses' location could be one single files; //or it could be a directory which includes multiple files, such as 'vomses/voA', 'vomses/voB', etc. //or it could be a directory which includes multiple directories that includes multiple files, //such as 'vomses/atlas/voA', 'vomses/atlas/voB', 'vomses/alice/voa', 'vomses/alice/vob', //'vomses/extra/myprivatevo', 'vomses/mypublicvo' std::vector vomses_files; //If the location is a file if(is_file(vomses_path)) vomses_files.push_back(vomses_path); //If the locaton is a directory, all the files and directories will be scanned //to find the vomses information. The scanning will not stop until all of the //files and directories are all scanned. else { std::vector files; files = search_vomses(vomses_path); if(!files.empty())vomses_files.insert(vomses_files.end(), files.begin(), files.end()); files.clear(); } for(std::vector::iterator file_i = vomses_files.begin(); file_i != vomses_files.end(); file_i++) { std::string vomses_file = *file_i; std::ifstream in_f(vomses_file.c_str()); std::string voms_line; while (true) { voms_line.clear(); std::getline(in_f, voms_line, '\n'); if (voms_line.empty()) break; if((voms_line.size() >= 1) && (voms_line[0] == '#')) continue; bool has_find = false; //boolean value to record if the vomses server information has been found in this vomses line std::vector voms_tokens; Arc::tokenize(voms_line,voms_tokens," \t","\""); if(voms_tokens.size() != VOMS_LINE_NUM) { // Warning: malformed voms line logger.msg(Arc::WARNING, "VOMS line contains wrong number of tokens (%u expected): \"%s\"", (unsigned int)VOMS_LINE_NUM, voms_line); } if(voms_tokens.size() > VOMS_LINE_NAME) { std::string str = voms_tokens[VOMS_LINE_NAME]; for (std::multimap::iterator it = server_command_map.begin(); it != server_command_map.end(); it++) { std::string voms_server = (*it).first; if (str == voms_server) { matched_voms_line[voms_server].push_back(voms_tokens); vomses.push_back(voms_line); has_find = true; break; }; }; }; if(!has_find) { //you can also use the nick name of the voms server if(voms_tokens.size() > VOMS_LINE_NAME) { std::string str1 = voms_tokens[VOMS_LINE_NAME]; for (std::multimap::iterator it = server_command_map.begin(); it != server_command_map.end(); it++) { std::string voms_server = (*it).first; if (str1 == voms_server) { matched_voms_line[voms_server].push_back(voms_tokens); vomses.push_back(voms_line); break; }; }; }; }; }; };//end of scanning all of the vomses files //Judge if we can not find any of the voms server in the command line from 'vomses' file //if(matched_voms_line.empty()) { // logger.msg(Arc::ERROR, "Cannot get voms server information from file: %s", vomses_path); // throw std::runtime_error("Cannot get voms server information from file: " + vomses_path); //} //if (matched_voms_line.size() != server_command_map.size()) for (std::multimap::iterator it = server_command_map.begin(); it != server_command_map.end(); it++) if (matched_voms_line.find((*it).first) == matched_voms_line.end()) logger.msg(Arc::ERROR, "Cannot get VOMS server %s information from the vomses files", (*it).first); return true; } nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/arcproxy.1.in0000644000000000000000000000012613066751223024267 xustar000000000000000027 mtime=1490801299.996866 29 atime=1513200652.01364981 30 ctime=1513200664.461802055 nordugrid-arc-5.4.2/src/clients/credentials/arcproxy.1.in0000644000175000002070000002665013066751223024343 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH APPROXY 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcproxy \- ARC Credentials Proxy generation utility .SH SYNOPSIS .B arcproxy [\fIOPTION\fR] .SH DESCRIPTION .\" Add any additional description here .PP arcproxy generates proxy credentials (general proxy certificate, or proxy certificate with VOMS AC extenstion) from private key and certificate of user. .SH OPTIONS .TP \fB\-h\fR prints short usage description .TP \fB\-P\fR \fIfilename\fR location of the generated proxy file .TP \fB\-C\fR location of X509 certificate file, the file can be either pem, der, or pkcs12 formated; if this option is not set, then env X509_USER_CERT will be searched; if X509_USER_CERT env is not set, then certificatepath item in client.conf will be searched; if the location still is not found, then ~/.arc/, ~/.globus/, ./etc/arc, and ./ will be searched. .TP \fB\-K\fR location of private key file, if the certificate is in pkcs12 format, then no need to give private key; if this option is not set, then env X509_USER_KEY will be searched; if X509_USER_KEY env is not set, then keypath item in client.conf will be searched; if the location still is not found, then ~/.arc/, ~/.globus/, ./etc/arc, and ./ will be searched. .TP \fB\-T\fR path to trusted certificate directory, only needed for VOMS client functionality; if this option is not set, then env X509_CERT_DIR will be searched; if X509_CERT_DIR env is not set, then cacertificatesdirectory item in client.conf will be searched. .TP \fB\-s\fR path to top directory of VOMS *.lsc files, only needed for VOMS client functionality .TP \fB\-V\fR path to VOMS server configuration file, only needed for VOMS client functionality if the path is a directory rather than a file, all of the files under this directory will be searched .TP \fB\-S\fR voms<:command>. Specify VOMS server. :command is optional, and is used to ask for specific attributes(e.g: roles) command option is: all --- put all of this DN's attributes into AC; list ---list all of the DN's attribute,will not create AC extension; /Role=yourRole --- specify the role, if this DN has such a role, the role will be put into AC /voname/groupname/Role=yourRole --- specify the vo,group and role if this DN has such a role, the role will be put into AC .TP \fB\-o\fR group<:role>. Specify ordering of attributes. Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/testers:Tester or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/testers:Tester Note that it does not make sense to specify the order if you have two or more different VOMS server specified .TP \fB\-G\fR use GSI communication protocol for contacting VOMS services .TP \fB\-H\fR use HTTP communication protocol for contacting VOMS services that provide RESTful access Note for RESTful access, 'list' command and multiple VOMS server are not supported .TP \fB\-O\fR this option is not functional anymore (old GSI proxies are not supported) .TP \fB\-I\fR print all information about this proxy. In order to show the Identity (DN without CN as subfix for proxy) of the certificate, the 'trusted certdir' is needed. .TP \fB\-i\fR print selected information about this proxy. Currently following information items are supported: \fBsubject\fR - subject name of proxy certificate. \fBidentity\fR - identity subject name of proxy certificate. \fBissuer\fR - issuer subject name of proxy certificate. \fBca\fR - subject name of CA which issued initial certificate. \fBpath\fR - file system path to file containing proxy. \fBtype\fR - type of proxy certificate. \fBvalidityStart\fR - timestamp when proxy validity starts. \fBvalidityEnd\fR - timestamp when proxy validity ends. \fBvalidityPeriod\fR - duration of proxy validity in seconds. \fBvalidityLeft\fR - duration of proxy validity left in seconds. \fBvomsVO\fR - VO name represented by VOMS attribute. \fBvomsSubject\fR - subject of certificate for which VOMS attribute is issued. \fBvomsIssuer\fR - subject of service which issued VOMS certificate. \fBvomsACvalidityStart\fR - timestamp when VOMS attribute validity starts. \fBvomsACvalidityEnd\fR - timestamp when VOMS attribute validity ends. \fBvomsACvalidityPeriod\fR - duration of VOMS attribute validity in seconds. \fBvomsACvalidityLeft\fR - duration of VOMS attribute validity left in seconds. \fBproxyPolicy\fR \fBkeybits\fR - size of proxy certificate key in bits. \fBsigningAlgorithm\fR - algorithm used to sign proxy certificate. Items are printed in requested order and are separated by newline. If item has multiple values they are printed in same line separated by |. .TP \fB\-r\fR Remove the proxy file. .TP \fB\-U\fR Username to myproxy server. .TP \fB\-N\fR don't prompt for a credential passphrase, when retrieve a credential from on MyProxy server. The precondition of this choice is the credential is PUT onto the MyProxy server without a passphrase by using -R (--retrievable_by_cert) option when being PUTing onto Myproxy server. This option is specific for the GET command when contacting Myproxy server. .TP \fB\-R\fR Allow specified entity to retrieve credential without passphrase. This option is specific for the PUT command when contacting Myproxy server. .TP \fB\-L\fR hostname of myproxy server optionally followed by colon and port number, e.g. example.org:7512. If the port number has not been specified, 7512 is used by default. .TP \fB\-M\fR command to myproxy server. The command can be PUT and GET. PUT/put -- put a delegated credential to myproxy server; GET/get -- get a delegated credential from myproxy server, credential (certificate and key) is not needed in this case; myproxy functionality can be used together with VOMS functionality. voms and vomses can be used for Get command if VOMS attributes is required to be included in the proxy. .TP \fB\-F\fR use NSS credential DB in default Mozilla profiles, including Firefox, Seamonkey and Thunderbird. .TP \fB\-c\fR constraints of proxy certificate. Currently following constraints are supported: \fBvalidityStart=time\fR - time when certificate becomes valid. Default is now. \fBvalidityEnd=time\fR - time when certificate becomes invalid. Default is 43200 (12 hours) from start for local proxy and 7 days for delegated to MyProxy. \fBvalidityPeriod=time\fR - for how long certificate is valid. Default is 43200 (12 hours)for local proxy and 7 days for delegated to MyProxy. \fBvomsACvalidityPeriod=time\fR - for how long the AC is valid. Default is shorter of validityPeriod and 12 hours. \fBmyproxyvalidityPeriod=time\fR - lifetime of proxies delegated by myproxy server. Default is shorter of validityPeriod and 12 hours. \fBproxyPolicy=policy content\fR - assigns specified string to proxy policy to limit it's functionality. \fBkeybits=number\fR - length of the key to generate. Default is 1024 bits. Special value 'inherit' is to use key length of signing certificate. \fBsigningAlgorithm=name\fR - signing algorithm to use for signing public key of proxy. Default is sha1. Possible values are sha1, sha2 (alias for sha256), sha224, sha256, sha384, sha512 and inherit (use algorithm of signing certificate). .TP \fB\-p\fR password destination=password source. Supported password destinations are: \fBkey\fR - for reading private key \fBmyproxy\fR - for accessing credentials at MyProxy service \fBmyproxynew\fR - for creating credentials at MyProxy service \fBall\fR - for any purspose. Supported password sources are: \fBquoted string ("password")\fR - explicitly specified password \fBint\fR - interactively request password from console \fBstdin\fR - read password from standard input delimited by newline \fBfile:filename\fR - read password from file named filename \fBstream:#\fR - read password from input stream number #. Currently only 0 (standard input) is supported. .TP \fB\-t\fR timeout in seconds (default 20) .TP \fB\-z\fR configuration file (default ~/.arc/client.conf) .TP \fB\-d\fR level of information printed. Possible values are DEBUG, VERBOSE, INFO, WARNING, ERROR and FATAL. .TP \fB\-v\fR print version information .PP If location of certificate and key are not exlicitly specified they are looked for in following location and order: Key/certificate paths specified by the environment variables X509_USER_KEY and X509_USER_CERT respectively. Paths specified in configuration file. ~/.arc/usercert.pem and ~/.arc/userkey.pem for certificate and key respectively. ~/.globus/usercert.pem and ~/.globus/userkey.pem for certificate and key respectively. If destination location of proxy file is not specified, the value of X509_USER_PROXY environment variable is used explicitly. If no value is provided, the default location is used - /x509up_u. Here TEMPORARY DIRECTORY is derived from environment variables TMPDIR, TMP, TEMP or default location /tmp is used. .SH "REPORTING BUGS" Report bugs to http://bugzilla.nordugrid.org/ .SH ENVIRONMENT VARIABLES .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH FILES .TP .B /etc/vomses Common file containing a list of selected VO contact point, one VO per line, for example: .RS .IP """gin"" ""kuiken.nikhef.nl"" ""15050"" ""/O=dutchgrid/O=hosts/OU=nikhef.nl/CN=kuiken.nikhef.nl"" ""gin.ggf.org""" .IP """nordugrid.org"" ""voms.uninett.no"" ""15015"" ""/O=Grid/O=NorduGrid/CN=host/voms.ndgf.org"" ""nordugrid.org""" .RE .TP .B ~/.voms/vomses Same as .B /etc/vomses but located in user's home area. If exists, has precedence over .B /etc/vomses .RS The order of the parsing of vomses location is: .RS 1. command line options .RE .RS 2. client configuration file ~/.arc/client.conf .RE .RS 3. $X509_VOMSES or $X509_VOMS_FILE .RE .RS 4. ~/.arc/vomses .RE .RS 5. ~/.voms/vomses .RE .RS 6. $ARC_LOCATION/etc/vomses (this is for Windows environment) .RE .RS 7. $ARC_LOCATION/etc/grid-security/vomses (this is for Windows environment) .RE .RS 8. $PWD/vomses .RE .RS 9. /etc/vomses .RE .RS 10. /etc/grid-security/vomses .RE .RE .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/arcproxy_voms.cpp0000644000000000000000000000012313213442363025341 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.379712 29 ctime=1513200664.46380208 nordugrid-arc-5.4.2/src/clients/credentials/arcproxy_voms.cpp0000644000175000002070000002250413213442363025412 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include bool contact_voms_servers(std::map >& vomscmdlist, std::list& orderlist, std::string& vomses_path, bool use_gsi_comm, bool use_http_comm, const std::string& voms_period, Arc::UserConfig& usercfg, Arc::Logger& logger, const std::string& tmp_proxy_path, std::string& vomsacseq) { std::string ca_dir; ca_dir = usercfg.CACertificatesDirectory(); class voms_match: public Arc::VOMSConfig::filter { private: const std::string& voms_; public: virtual bool match(const Arc::VOMSConfigLine& line) const { return ((line.Name() == voms_) || (line.Alias() == voms_)); }; voms_match(const std::string& voms):voms_(voms) { }; }; class vomses_match: public Arc::VOMSConfig::filter { private: const std::map >& vomses_; public: virtual bool match(const Arc::VOMSConfigLine& line) const { // TODO: this will not scale for many voms servers specified at command line for(std::map >::const_iterator voms = vomses_.begin(); voms != vomses_.end(); ++voms) { if((line.Name() == voms->first) || (line.Alias() == voms->first)) return true; }; return false; }; vomses_match(const std::map >& vomses):vomses_(vomses) { }; }; Arc::VOMSConfig voms_config(vomses_path, vomses_match(vomscmdlist)); if(!voms_config) { // logger logger.msg(Arc::ERROR, "Failed to process VOMS configuration or no suitable configuration lines found."); return false; } //Contact the voms servers to retrieve attribute certificate Arc::MCCConfig cfg; cfg.AddProxy(tmp_proxy_path); cfg.AddCADir(ca_dir); Arc::Period lifetime; if(!voms_period.empty()) { time_t voms_period_sec; if(!Arc::stringto(voms_period,voms_period_sec)) { logger.msg(Arc::ERROR, "Failed to parse requested VOMS lifetime: %s", voms_period); return false; } lifetime = voms_period_sec; } // TODO: group commands by server. Is it really useful? Can it really be used effectively. // Loop through commands for (std::map >::iterator vomscmd = vomscmdlist.begin(); vomscmd != vomscmdlist.end(); ++vomscmd) { const std::string& voms_server = vomscmd->first; // server name const std::list& voms_commands = vomscmd->second; // command to send bool succeeded = false; int voms_lines_num = 0; // Loop through suitable voms configuration lines for (Arc::VOMSConfig::iterator vomsline = voms_config.First(voms_match(voms_server)); (bool)vomsline; vomsline = vomsline.Next(voms_match(voms_server))) { if(vomsline->Host().empty()) { logger.msg(Arc::ERROR, "Cannot get VOMS server address information from vomses line: \"%s\"", vomsline->Str()); throw std::runtime_error("Cannot get VOMS server address information from vomses line: \"" + vomsline->Str() + "\""); } ++voms_lines_num; logger.msg(Arc::INFO, "Contacting VOMS server (named %s): %s on port: %s", vomsline->Name(), vomsline->Host(), vomsline->Port()); std::cout << Arc::IString("Contacting VOMS server (named %s): %s on port: %s", vomsline->Name(), vomsline->Host(), vomsline->Port()) << std::endl; unsigned int port_num = 0; if(!vomsline->Port().empty()) { if(!Arc::stringto(vomsline->Port(),port_num)) { // Failed to parse port logger.msg(Arc::ERROR, "Failed to parse requested VOMS server port number: %s", vomsline->Port()); continue; } } else { port_num = 8443; // default VOMS port ? } if(use_http_comm) { // RESTful interface std::list fqans; for(std::list::const_iterator c_it = voms_commands.begin(); c_it != voms_commands.end(); ++c_it) { if (c_it->empty()) { // ?? fqans.push_back("/"+voms_name); } else if (Arc::lower(*c_it) == "all") { // ?? fqans.push_back("/"+voms_name); } else if (Arc::lower(*c_it) == "list") { // N // not supported logger.msg(Arc::ERROR, "List functionality is not supported for RESTful VOMS interface"); continue; } else { fqans.push_back(*c_it); // ?? } } Arc::ClientVOMSRESTful client(cfg, vomsline->Host(), port_num, Arc::TLSSec, usercfg.Timeout()/* todo: , proxy_host, proxy_port*/); std::string response; Arc::MCC_Status status = client.process(fqans, lifetime, response); if (!status) { std::cout << Arc::IString("The VOMS server with the information:\n\t%s\ncan not be reached, please make sure it is available.", vomsline->Str()) << std::endl; std::cout << Arc::IString("Collected error is:\n\t%s",(std::string)status) << std::endl; continue; //There could be another voms replicated server with the same name exists } if (response.empty()) { logger.msg(Arc::ERROR, "No valid response from VOMS server: %s", vomsline->Name()); std::cout << Arc::IString("Collected error is:\n\t%s",(std::string)status) << std::endl; continue; } vomsacseq.append(response); succeeded = true; break; // ?? } else { // old interface std::list commands; for(std::list::const_iterator c_it = voms_commands.begin(); c_it != voms_commands.end(); ++c_it) { if (c_it->empty()) { commands.push_back(Arc::VOMSCommand().GetGroup("/"+vomsline->Name())); } else if (Arc::lower(*c_it) == "all") { commands.push_back(Arc::VOMSCommand().GetEverything()); } else if (Arc::lower(*c_it) == "list") { // N // not supported logger.msg(Arc::ERROR, "List functionality is not supported for legacy VOMS interface"); continue; } else { std::string::size_type pos = c_it->find("/Role="); if (pos == 0) { commands.push_back(Arc::VOMSCommand().GetRole(c_it->substr(pos+6))); } else if((pos != std::string::npos) && (pos > 0)) { commands.push_back(Arc::VOMSCommand().GetRoleInGroup(c_it->substr(0, pos),c_it->substr(pos + 6))); } else if((*c_it)[0] == '/') { commands.push_back(Arc::VOMSCommand().GetGroup(*c_it)); } else { // unexpected logger.msg(Arc::ERROR, "Failed to parse VOMS command: %s",*c_it); continue; } } } std::list > ordering; for(std::list::iterator o_it = orderlist.begin(); o_it != orderlist.end(); ++o_it) { std::string::size_type pos = o_it->find(':'); if(pos == std::string::npos) { ordering.push_back(std::pair(*o_it,"")); } else { ordering.push_back(std::pair(o_it->substr(0,pos),o_it->substr(pos+1))); } } //logger.msg(Arc::VERBOSE, "Try to get attribute from VOMS server with order: %s", ordering); //logger.msg(Arc::VERBOSE, "Message sent to VOMS server %s is: %s", voms_name, send_msg); Arc::ClientVOMS client(cfg, vomsline->Host(), port_num, use_gsi_comm ? Arc::GSISec : Arc::TLSSec, usercfg.Timeout()); std::string response; Arc::MCC_Status status = client.process(commands, ordering, lifetime, response); if (!status) { std::cout << Arc::IString("The VOMS server with the information:\n\t%s\ncan not be reached, please make sure it is available.", vomsline->Str()) << std::endl; std::cout << Arc::IString("Collected error is:\n\t%s",(std::string)status) << std::endl; continue; //There could be another voms replicated server with the same name exists } if (response.empty()) { logger.msg(Arc::ERROR, "No valid response from VOMS server: %s", vomsline->Name()); std::cout << Arc::IString("Collected error is:\n\t%s",(std::string)status) << std::endl; continue; } vomsacseq.append(response); succeeded = true; break; } } // voms lines if(succeeded == false) { if(voms_lines_num > 1) { std::cout << Arc::IString("There are %d servers with the same name: %s in your vomses file, but all of them can not be reached, or can not return valid message.", voms_lines_num, voms_server) << std::endl; } return false; } } // voms servers return true; } nordugrid-arc-5.4.2/src/clients/credentials/PaxHeaders.7502/README0000644000000000000000000000012412015720323022575 xustar000000000000000027 mtime=1345822931.976791 27 atime=1513200575.379712 30 ctime=1513200664.458802019 nordugrid-arc-5.4.2/src/clients/credentials/README0000644000175000002070000000225212015720323022643 0ustar00mockbuildmock00000000000000User tools for manipulating user credentials. /-------/ arcproxy: /-------/ arcproxyalt: The alternative of arcproxy, by using the emi authentication library for the credential manipulation. /-------/ arcslcs: examples ./arcslcs -S https://127.0.0.1:60000/slcs -I https://idp.testshib.org/idp/shibboleth -U myself -P myself -D /home/wzqiang/arc-0.9/src/clients/credentials -z client.conf ./arcslcs -S https://127.0.0.1:60000/slcs -I https://squark.uio.no/idp/shibboleth -U staff -P 123456 -D /home/wzqiang/arc-0.9/src/clients/credentials -z client.conf # use with Confusa and WebSSO authentication method ./arcslcs -c -m web -S https://confusatest.pdc.kth.se/slcs -I https://openidp.feide.no -U test -P pass -D /home/tzangerl/.globus -z client.conf # use with Confusa and OAuth authentication method ./arcslcs -c -m oauth -S https://confusatest.pdc.kth.se/slcs -I https://aitta2.funet.fi/idp/shibbolet -U test -P pass -D /home7tzangerl/.globus -z client.conf All the options can also be configured in the client.conf file. For testing against the test-purpose Identity Provider. The following username can be used: staff, researcher, librarian, binduser with the same password "123456" nordugrid-arc-5.4.2/src/clients/PaxHeaders.7502/saml0000644000000000000000000000013213214316030020274 xustar000000000000000030 mtime=1513200664.554803193 30 atime=1513200668.719854133 30 ctime=1513200664.554803193 nordugrid-arc-5.4.2/src/clients/saml/0000755000175000002070000000000013214316030020417 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/clients/saml/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022421 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200593.364932513 30 ctime=1513200664.550803144 nordugrid-arc-5.4.2/src/clients/saml/Makefile.am0000644000175000002070000000123512052416515022464 0ustar00mockbuildmock00000000000000bin_PROGRAMS = saml_assertion_init man_MANS = saml_assertion_init.1 CLILIBS = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la saml_assertion_init_SOURCES = saml_assertion_init.cpp saml_assertion_init_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) saml_assertion_init_LDADD = $(CLILIBS) $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/clients/saml/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721022424 xustar000000000000000030 mtime=1513200593.407933039 30 atime=1513200652.029650005 30 ctime=1513200664.551803156 nordugrid-arc-5.4.2/src/clients/saml/Makefile.in0000644000175000002070000007160713214315721022505 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = saml_assertion_init$(EXEEXT) subdir = src/clients/saml DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/saml_assertion_init.1.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = saml_assertion_init.1 CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(bin_PROGRAMS) am_saml_assertion_init_OBJECTS = \ saml_assertion_init-saml_assertion_init.$(OBJEXT) saml_assertion_init_OBJECTS = $(am_saml_assertion_init_OBJECTS) am__DEPENDENCIES_1 = saml_assertion_init_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) saml_assertion_init_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(saml_assertion_init_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(saml_assertion_init_SOURCES) DIST_SOURCES = $(saml_assertion_init_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ man_MANS = saml_assertion_init.1 CLILIBS = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la saml_assertion_init_SOURCES = saml_assertion_init.cpp saml_assertion_init_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) saml_assertion_init_LDADD = $(CLILIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/saml/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/saml/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): saml_assertion_init.1: $(top_builddir)/config.status $(srcdir)/saml_assertion_init.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list saml_assertion_init$(EXEEXT): $(saml_assertion_init_OBJECTS) $(saml_assertion_init_DEPENDENCIES) @rm -f saml_assertion_init$(EXEEXT) $(saml_assertion_init_LINK) $(saml_assertion_init_OBJECTS) $(saml_assertion_init_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/saml_assertion_init-saml_assertion_init.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< saml_assertion_init-saml_assertion_init.o: saml_assertion_init.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(saml_assertion_init_CXXFLAGS) $(CXXFLAGS) -MT saml_assertion_init-saml_assertion_init.o -MD -MP -MF $(DEPDIR)/saml_assertion_init-saml_assertion_init.Tpo -c -o saml_assertion_init-saml_assertion_init.o `test -f 'saml_assertion_init.cpp' || echo '$(srcdir)/'`saml_assertion_init.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/saml_assertion_init-saml_assertion_init.Tpo $(DEPDIR)/saml_assertion_init-saml_assertion_init.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='saml_assertion_init.cpp' object='saml_assertion_init-saml_assertion_init.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(saml_assertion_init_CXXFLAGS) $(CXXFLAGS) -c -o saml_assertion_init-saml_assertion_init.o `test -f 'saml_assertion_init.cpp' || echo '$(srcdir)/'`saml_assertion_init.cpp saml_assertion_init-saml_assertion_init.obj: saml_assertion_init.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(saml_assertion_init_CXXFLAGS) $(CXXFLAGS) -MT saml_assertion_init-saml_assertion_init.obj -MD -MP -MF $(DEPDIR)/saml_assertion_init-saml_assertion_init.Tpo -c -o saml_assertion_init-saml_assertion_init.obj `if test -f 'saml_assertion_init.cpp'; then $(CYGPATH_W) 'saml_assertion_init.cpp'; else $(CYGPATH_W) '$(srcdir)/saml_assertion_init.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/saml_assertion_init-saml_assertion_init.Tpo $(DEPDIR)/saml_assertion_init-saml_assertion_init.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='saml_assertion_init.cpp' object='saml_assertion_init-saml_assertion_init.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(saml_assertion_init_CXXFLAGS) $(CXXFLAGS) -c -o saml_assertion_init-saml_assertion_init.obj `if test -f 'saml_assertion_init.cpp'; then $(CYGPATH_W) 'saml_assertion_init.cpp'; else $(CYGPATH_W) '$(srcdir)/saml_assertion_init.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-binPROGRAMS install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-binPROGRAMS uninstall-man \ uninstall-man1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/clients/saml/PaxHeaders.7502/saml_assertion_init.1.in0000644000000000000000000000012712574536660025137 xustar000000000000000027 mtime=1441971632.610157 30 atime=1513200652.045650201 30 ctime=1513200664.552803168 nordugrid-arc-5.4.2/src/clients/saml/saml_assertion_init.1.in0000644000175000002070000000452112574536660025203 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH VOMS_ASSERTION_INIT 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME saml_assertion_init \- ARC client to voms saml service, or arc aa service .SH DESCRIPTION The .B saml_assertion_init command is a prototype client which is used for sending an attribute query request to a voms saml service or arc aa service. .SH SYNOPSIS .B saml_assertion_init [options] [filename ...] saml_assertion_init \-c saml_assertion_init_client.xml \-d DEBUG https://squark.uio.no:8443/voms/saml/knowarc/services/AttributeAuthorityPortType .SH OPTIONS .IP "\fB\-c\fR [\-]\fIconfigurefile\fR, \fB\-conf\fR [\-]\fIconfigurefile\fR" specify the configuration file of this client .IP "\fB\-d\fR \fIdebuglevel\fR, \fB\-debug\fR \fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB\-v\fR, \fB\-version\fR" print version information .IP "\fB\-?\fR, \fB\-help\fR" print help .LP .SH ARGUMENTS .IP "\fBserviceurl\fR ..." url to saml aa service .LP .SH EXTENDED DESCRIPTION The .B saml_assertion_init command is a prototype client which is used for sending an attribute query request to a saml aa service. .IR Examples : .IP "\fBsaml_assertion_init \-c saml_assertion_init_client.xml \-d DEBUG https://squark.uio.no:8443/voms/saml/knowarc/services/AttributeAuthorityPortType\fR ..." send the attribute query request to https://squark.uio.no:8443/voms/saml/knowarc/services/AttributeAuthorityPortType .LP .SH FILES .TP .B .arc/config.xml Some options can be given default values by specifying them in the ARC configuration file. .SH ENVIRONMENT VARIABLES .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org nordugrid-arc-5.4.2/src/clients/saml/PaxHeaders.7502/saml_assertion_init.cpp0000644000000000000000000000012412301125744025132 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200575.366712 30 ctime=1513200664.554803193 nordugrid-arc-5.4.2/src/clients/saml/saml_assertion_init.cpp0000644000175000002070000001451612301125744025206 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SAML_NAMESPACE "urn:oasis:names:tc:SAML:2.0:assertion" #define SAMLP_NAMESPACE "urn:oasis:names:tc:SAML:2.0:protocol" #define XENC_NAMESPACE "http://www.w3.org/2001/04/xmlenc#" #define DSIG_NAMESPACE "http://www.w3.org/2000/09/xmldsig#" int main(int argc, char* argv[]){ setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "saml_assertion_init"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("service_url")); std::string config_path; options.AddOption('c', "config", istring("path to config file"), istring("path"), config_path); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "saml_assertion_init", VERSION) << std::endl; return 0; } if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); try{ if (params.size()!=1) { throw std::invalid_argument("Wrong number of arguments!"); } //Create a SOAP client std::list::iterator it = params.begin(); Arc::URL url(*it++); if(!url) throw std::invalid_argument("Can't parse specified URL"); Arc::MCCConfig cfg; if(config_path != "") cfg.GetOverlay(config_path); Arc::ClientSOAP client(cfg,url); //Compose the soap which include //Use the credential which is configured in MCCConfig for the //credential setup in std::string cert = (std::string)(cfg.overlay["Chain"]["Component"]["CertificatePath"]); std::string key = (std::string)(cfg.overlay["Chain"]["Component"]["KeyPath"]); std::string cafile = (std::string)(cfg.overlay["Chain"]["Component"]["CACertificatePath"]); std::string cadir = (std::string)(cfg.overlay["Chain"]["Component"]["CACertificatesDir"]); Arc::Credential cred(cert, key, cadir, cafile); std::string local_dn_str = cred.GetDN(); std::string local_dn = Arc::convert_to_rdn(local_dn_str); //Compose Arc::NS ns; ns["saml"] = SAML_NAMESPACE; ns["samlp"] = SAMLP_NAMESPACE; Arc::XMLNode attr_query(ns, "samlp:AttributeQuery"); std::string query_id = Arc::UUID(); attr_query.NewAttribute("ID") = query_id; Arc::Time t; std::string current_time = t.str(Arc::UTCTime); attr_query.NewAttribute("IssueInstant") = current_time; attr_query.NewAttribute("Version") = std::string("2.0"); // std::string issuer_name = local_dn; Arc::XMLNode issuer = attr_query.NewChild("saml:Issuer"); issuer = issuer_name; issuer.NewAttribute("Format") = std::string("urn:oasis:names:tc:SAML:1.1:nameid-format:x509SubjectName"); // Arc::XMLNode subject = attr_query.NewChild("saml:Subject"); Arc::XMLNode name_id = subject.NewChild("saml:NameID"); name_id.NewAttribute("Format")=std::string("urn:oasis:names:tc:SAML:1.1:nameid-format:x509SubjectName"); name_id = local_dn; //Add one or more s into AttributeQuery here, which means the //Requestor would get these s from AA //TODO //Initialize lib xmlsec Arc::init_xmlsec(); //Put the attribute query into soap message Arc::NS soap_ns; Arc::SOAPEnvelope envelope(soap_ns); envelope.NewChild(attr_query); Arc::PayloadSOAP request(envelope); //Send the soap message to the other end Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client.process(&request,&response); if (!response) { logger.msg(Arc::ERROR, "SOAP Request failed: No response"); throw std::runtime_error("SOAP Request failed: No response"); } if (!status) { logger.msg(Arc::ERROR, "SOAP Request failed: Error"); throw std::runtime_error("SOAP Request failed: Error"); } Arc::XMLNode saml_response = (*response).Body().Child(0); if(!saml_response) { logger.msg(Arc::ERROR, "No in SOAP response"); throw std::runtime_error("No in SOAP response"); } Arc::XMLNode saml_assertion = saml_response["Assertion"]; if(!saml_assertion) { logger.msg(Arc::ERROR, "No in SAML response"); throw std::runtime_error("No in SAML response"); } std::string str; saml_assertion.GetXML(str); std::cout<<"SAML assertion: "<"); } else { logger.msg(Arc::ERROR, "Failed to verify the signature under "); throw std::runtime_error("Failed to verify the signature under "); } } catch (std::exception& e){ Arc::final_xmlsec(); // Notify the user about the failure std::cerr << "ERROR: " << e.what() << std::endl; return EXIT_FAILURE; } Arc::final_xmlsec(); return EXIT_SUCCESS; } nordugrid-arc-5.4.2/src/clients/saml/PaxHeaders.7502/README0000644000000000000000000000012411540672103021240 xustar000000000000000027 mtime=1300460611.241814 27 atime=1513200575.374712 30 ctime=1513200664.548803119 nordugrid-arc-5.4.2/src/clients/saml/README0000644000175000002070000000126511540672103021311 0ustar00mockbuildmock00000000000000 ARC SAML Services client ======================== Description ----------- This is a client tool to communicate to SAML-based VOMS services. Implementation notes -------------------- The tool requires 3rd-party xmlsec1 libraries, as well as common ARC libraries found in src/hed/libs Build notes ----------- Due to the dependencies above, it is not feasible to build ARC SAML Services client in a manner independent from the rest of ARC code. In order to build it, obtain full ARC source code tree and run in its root: ./autogen.sh ./configure make install Installation and configuration notes ------------------------------------ None Documentation ------------- Nonenordugrid-arc-5.4.2/src/clients/PaxHeaders.7502/README0000644000000000000000000000012412064073603020306 xustar000000000000000027 mtime=1355839363.780191 27 atime=1513200575.375712 30 ctime=1513200664.380801065 nordugrid-arc-5.4.2/src/clients/README0000644000175000002070000000135212064073603020354 0ustar00mockbuildmock00000000000000 ARC clients =========== ARC provides a number of command line clients that implement interfaces to various Grid services. The following distinct client groups exist in the source code: o compute - client for computational job management o cache - client for internal A-REX cache handling service o credentials - user tools for handling user credentials o data - clients for basic file handlng on the Grid o echo - client for ARC Echo service (demo) o jura - A-REX compatible accounting agent for SGAS-LUTS o saml - client tool to communicate to SAML-based VOMS services o srm - ping client for the SRM service o wsrf - command for obtaining WS-ResourceProperties of services nordugrid-arc-5.4.2/src/clients/PaxHeaders.7502/compute0000644000000000000000000000013213214316030021014 xustar000000000000000030 mtime=1513200664.524802826 30 atime=1513200668.719854133 30 ctime=1513200664.524802826 nordugrid-arc-5.4.2/src/clients/compute/0000755000175000002070000000000013214316030021137 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcresub.cpp0000644000000000000000000000012413213442363023414 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.338712 30 ctime=1513200664.518802752 nordugrid-arc-5.4.2/src/clients/compute/arcresub.cpp0000644000175000002070000001662413213442363023472 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcresub)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcresub"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_RESUB, istring("[job ...]")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcresub", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.show_plugins) { std::list types; types.push_back("HED:SubmitterPlugin"); types.push_back("HED:ServiceEndpointRetrieverPlugin"); types.push_back("HED:TargetInformationRetrieverPlugin"); types.push_back("HED:BrokerPlugin"); showplugins("arcresub", types, logger, usercfg.Broker().first); return 0; } if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.same && opt.notsame) { logger.msg(Arc::ERROR, "--same and --not-same cannot be specified together."); return 1; } for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if (!opt.broker.empty()) usercfg.Broker(opt.broker); if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.clusters.empty()) opt.all = true; if (jobidentifiers.empty() && opt.clusters.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.clusters); std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; delete jobstore; return 1; } std::list services = getServicesFromUserConfigAndCommandLine(usercfg, opt.indexurls, opt.qlusters, opt.requestedSubmissionInterfaceName, opt.infointerface); std::list rejectDiscoveryURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); std::list resubmittedJobs; // same + 2*notsame in {0,1,2}. same and notsame cannot both be true, see above. int retval = (int)!jobmaster.Resubmit((int)opt.same + 2*(int)opt.notsame, services, resubmittedJobs, rejectDiscoveryURLs); if (retval == 0 && resubmittedJobs.empty()) { std::cout << Arc::IString("No jobs to resubmit with the specified status") << std::endl; delete jobstore; return 0; } for (std::list::const_iterator it = resubmittedJobs.begin(); it != resubmittedJobs.end(); ++it) { std::cout << Arc::IString("Job submitted with jobid: %s", it->JobID) << std::endl; } if (!resubmittedJobs.empty() && !jobstore->Write(resubmittedJobs)) { std::cout << Arc::IString("Warning: Failed to write job information to file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString(" To recover missing jobs, run arcsync") << std::endl; retval = 1; } if (!opt.jobidoutfile.empty() && !Arc::Job::WriteJobIDsToFile(resubmittedJobs, opt.jobidoutfile)) { logger.msg(Arc::WARNING, "Cannot write jobids to file (%s)", opt.jobidoutfile); retval = 1; } std::list notresubmitted = jobmaster.GetIDsNotProcessed(); if (!jobmaster.Cancel()) { retval = 1; } for (std::list::const_iterator it = jobmaster.GetIDsNotProcessed().begin(); it != jobmaster.GetIDsNotProcessed().end(); ++it) { logger.msg(Arc::WARNING, "Resubmission of job (%s) succeeded, but killing the job failed - it will still appear in the job list", *it); } if (!opt.keep) { if (!jobmaster.Clean()) { retval = 1; } for (std::list::const_iterator it = jobmaster.GetIDsNotProcessed().begin(); it != jobmaster.GetIDsNotProcessed().end(); ++it) { logger.msg(Arc::WARNING, "Resubmission of job (%s) succeeded, but cleaning the job failed - it will still appear in the job list", *it); } if (!jobstore->Remove(jobmaster.GetIDsProcessed())) { std::cout << Arc::IString("Warning: Failed removing jobs from file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString(" Use arcclean to remove non-existing jobs") << std::endl; retval = 1; } } delete jobstore; if ((resubmittedJobs.size() + notresubmitted.size()) > 1) { std::cout << std::endl << Arc::IString("Job resubmission summary:") << std::endl; std::cout << "-----------------------" << std::endl; std::cout << Arc::IString("%d of %d jobs were resubmitted", resubmittedJobs.size(), resubmittedJobs.size() + notresubmitted.size()) << std::endl; if (!notresubmitted.empty()) { std::cout << Arc::IString("The following %d were not resubmitted", notresubmitted.size()) << std::endl; for (std::list::const_iterator it = notresubmitted.begin(); it != notresubmitted.end(); ++it) { std::cout << *it << std::endl; } } } return retval; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712442365766023157 xustar000000000000000027 mtime=1418324982.290198 30 atime=1513200593.020928306 30 ctime=1513200664.497802496 nordugrid-arc-5.4.2/src/clients/compute/Makefile.am0000644000175000002070000000741612442365766023231 0ustar00mockbuildmock00000000000000bin_PROGRAMS = arcsub arcget arcstat arcinfo arckill arcclean arccat arcsync \ arcresub arcresume arcrenew arctest man_MANS = arcsub.1 arcget.1 arcstat.1 arcinfo.1 arckill.1 arcclean.1 arccat.1 \ arcresub.1 arcsync.1 arcresume.1 arcrenew.1 arctest.1 CLILIBS = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arcsub_SOURCES = arcsub.cpp utils.cpp utils.h arcsub_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcsub_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arctest_SOURCES = arctest.cpp utils.cpp utils.h arctest_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arctest_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcget_SOURCES = arcget.cpp utils.cpp utils.h arcget_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcget_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcstat_SOURCES = arcstat.cpp utils.cpp utils.h arcstat_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcstat_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcinfo_SOURCES = arcinfo.cpp utils.cpp utils.h arcinfo_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcinfo_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arckill_SOURCES = arckill.cpp utils.cpp utils.h arckill_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arckill_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcclean_SOURCES = arcclean.cpp utils.cpp utils.h arcclean_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcclean_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arccat_SOURCES = arccat.cpp utils.cpp utils.h arccat_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arccat_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcsync_SOURCES = arcsync.cpp utils.cpp utils.h arcsync_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcsync_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcresub_SOURCES = arcresub.cpp utils.cpp utils.h arcresub_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcresub_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcrenew_SOURCES = arcrenew.cpp utils.cpp utils.h arcrenew_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcrenew_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcresume_SOURCES = arcresume.cpp utils.cpp utils.h arcresume_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcresume_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arctest.1.in0000644000000000000000000000012712441050167023240 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.253652745 30 ctime=1513200664.510802655 nordugrid-arc-5.4.2/src/clients/compute/arctest.1.in0000644000175000002070000001012112441050167023275 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCTEST 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arctest \- ARC Test Suite .SH DESCRIPTION The .B arctest command tests basic ARC client and server setup. .SH SYNOPSIS .B arctest [options] .SH OPTIONS .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-g\fR, \fB--index\fR=\fIname\fR" select one or more registries: \fIname\fR can be an alias for a single registry, a group of registries or a URL .IP "\fB-R\fR, \fB--rejectdiscovery\fR=\fIURL\fR" skip the service with the given URL during service discovery .IP "\fB-S\fR, \fB--submissioninterface\fR=\fIInterfaceName\fR" only use this interface for submitting (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes) .IP "\fB-I\fR, \fB--infointerface\fR=\fIInterfaceName\fR" the computing element specified by URL at the command line should be queried using this information interface (possible options: org.nordugrid.ldapng, org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies.resourceinfo) .IP "\fB-J\fR \fIjobid\fR, \fB--jobid\fR=\fIjobid\fR" submits testjob given by the jobid. .IP "\fB-r\fR \fItime\fR, \fB--runtime\fR=\fItime\fR" test job runtime specified in case of the 1st test job. .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-o\fR, \fB--jobids-to-file\fR=\fIfilename\fR" the IDs of the submitted jobs will be appended to this file .IP "\fB-D\fR, \fB--dryrun\fR" submit jobs as dry run (no submission to batch system) .IP "\fB --direct\fR" submit directly - no resource discovery or matchmaking .IP "\fB-x\fR, \fB--dumpdescription\fR" do not submit - dump job description in the language accepted by the target .IP "\fB-E\fR, \fB--certificate\fR" prints information about installed user- and CA-certificates .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-b\fR, \fB--broker\fR=\fIbroker\fR" selected broker: Random (default), FastestQueue or custom .IP "\fB-v\fR, \fB-version\fR" print version information .IP "\fB-h\fR, \fB-help\fR" print usage information .LP .SH EXTENDED DESCRIPTION The .B arctest command is used for basic testing of ARC client and server installation through submitting various test jobs. It can also print basic information about user's certificate. The command is complementary to .B arcinfo and .B arcproxy -I The test jobs available in this version of arctest are: Test job 1: This test-job calculates prime-numbers for a number of minutes given by .B -r (default 5) and outputs the list to stderr. The source-code for the prime-number program, the Makefile and the executable are downloaded to the cluster from HTTP and FTP servers and the program is compiled before running. In this way, the test job constitutes a fairly comprehensive test of the basic setup of a grid cluster. Test job 2: attempts to list all environment variables at the remote site Test job 3: copies a remote file from an HTTP server into a local file .SH EXAMPLES arctest -J 1 -c will submit test job number 1 to the specified cluster belonging to the flavor of it. arctest --certificate will print basic information about the user's certificate. .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315721023143 xustar000000000000000030 mtime=1513200593.133929687 30 atime=1513200652.060650384 29 ctime=1513200664.49980252 nordugrid-arc-5.4.2/src/clients/compute/Makefile.in0000644000175000002070000023752713214315721023232 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = arcsub$(EXEEXT) arcget$(EXEEXT) arcstat$(EXEEXT) \ arcinfo$(EXEEXT) arckill$(EXEEXT) arcclean$(EXEEXT) \ arccat$(EXEEXT) arcsync$(EXEEXT) arcresub$(EXEEXT) \ arcresume$(EXEEXT) arcrenew$(EXEEXT) arctest$(EXEEXT) subdir = src/clients/compute DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arccat.1.in $(srcdir)/arcclean.1.in \ $(srcdir)/arcget.1.in $(srcdir)/arcinfo.1.in \ $(srcdir)/arckill.1.in $(srcdir)/arcrenew.1.in \ $(srcdir)/arcresub.1.in $(srcdir)/arcresume.1.in \ $(srcdir)/arcstat.1.in $(srcdir)/arcsub.1.in \ $(srcdir)/arcsync.1.in $(srcdir)/arctest.1.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arcstat.1 arcinfo.1 arcsub.1 arcclean.1 arckill.1 \ arcget.1 arccat.1 arcresub.1 arcsync.1 arcrenew.1 arcresume.1 \ arctest.1 CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(bin_PROGRAMS) am_arccat_OBJECTS = arccat-arccat.$(OBJEXT) arccat-utils.$(OBJEXT) arccat_OBJECTS = $(am_arccat_OBJECTS) am__DEPENDENCIES_1 = arccat_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arccat_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arccat_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcclean_OBJECTS = arcclean-arcclean.$(OBJEXT) \ arcclean-utils.$(OBJEXT) arcclean_OBJECTS = $(am_arcclean_OBJECTS) arcclean_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcclean_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcclean_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcget_OBJECTS = arcget-arcget.$(OBJEXT) arcget-utils.$(OBJEXT) arcget_OBJECTS = $(am_arcget_OBJECTS) arcget_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcget_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arcget_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcinfo_OBJECTS = arcinfo-arcinfo.$(OBJEXT) arcinfo-utils.$(OBJEXT) arcinfo_OBJECTS = $(am_arcinfo_OBJECTS) arcinfo_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcinfo_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arcinfo_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arckill_OBJECTS = arckill-arckill.$(OBJEXT) arckill-utils.$(OBJEXT) arckill_OBJECTS = $(am_arckill_OBJECTS) arckill_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arckill_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arckill_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcrenew_OBJECTS = arcrenew-arcrenew.$(OBJEXT) \ arcrenew-utils.$(OBJEXT) arcrenew_OBJECTS = $(am_arcrenew_OBJECTS) arcrenew_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcrenew_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcrenew_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcresub_OBJECTS = arcresub-arcresub.$(OBJEXT) \ arcresub-utils.$(OBJEXT) arcresub_OBJECTS = $(am_arcresub_OBJECTS) arcresub_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcresub_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcresub_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcresume_OBJECTS = arcresume-arcresume.$(OBJEXT) \ arcresume-utils.$(OBJEXT) arcresume_OBJECTS = $(am_arcresume_OBJECTS) arcresume_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcresume_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcresume_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcstat_OBJECTS = arcstat-arcstat.$(OBJEXT) arcstat-utils.$(OBJEXT) arcstat_OBJECTS = $(am_arcstat_OBJECTS) arcstat_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcstat_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arcstat_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcsub_OBJECTS = arcsub-arcsub.$(OBJEXT) arcsub-utils.$(OBJEXT) arcsub_OBJECTS = $(am_arcsub_OBJECTS) arcsub_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcsub_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arcsub_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcsync_OBJECTS = arcsync-arcsync.$(OBJEXT) arcsync-utils.$(OBJEXT) arcsync_OBJECTS = $(am_arcsync_OBJECTS) arcsync_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arcsync_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arcsync_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arctest_OBJECTS = arctest-arctest.$(OBJEXT) arctest-utils.$(OBJEXT) arctest_OBJECTS = $(am_arctest_OBJECTS) arctest_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arctest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arctest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(arccat_SOURCES) $(arcclean_SOURCES) $(arcget_SOURCES) \ $(arcinfo_SOURCES) $(arckill_SOURCES) $(arcrenew_SOURCES) \ $(arcresub_SOURCES) $(arcresume_SOURCES) $(arcstat_SOURCES) \ $(arcsub_SOURCES) $(arcsync_SOURCES) $(arctest_SOURCES) DIST_SOURCES = $(arccat_SOURCES) $(arcclean_SOURCES) $(arcget_SOURCES) \ $(arcinfo_SOURCES) $(arckill_SOURCES) $(arcrenew_SOURCES) \ $(arcresub_SOURCES) $(arcresume_SOURCES) $(arcstat_SOURCES) \ $(arcsub_SOURCES) $(arcsync_SOURCES) $(arctest_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ man_MANS = arcsub.1 arcget.1 arcstat.1 arcinfo.1 arckill.1 arcclean.1 arccat.1 \ arcresub.1 arcsync.1 arcresume.1 arcrenew.1 arctest.1 CLILIBS = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arcsub_SOURCES = arcsub.cpp utils.cpp utils.h arcsub_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcsub_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arctest_SOURCES = arctest.cpp utils.cpp utils.h arctest_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arctest_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcget_SOURCES = arcget.cpp utils.cpp utils.h arcget_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcget_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcstat_SOURCES = arcstat.cpp utils.cpp utils.h arcstat_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcstat_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcinfo_SOURCES = arcinfo.cpp utils.cpp utils.h arcinfo_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcinfo_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arckill_SOURCES = arckill.cpp utils.cpp utils.h arckill_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arckill_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcclean_SOURCES = arcclean.cpp utils.cpp utils.h arcclean_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcclean_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arccat_SOURCES = arccat.cpp utils.cpp utils.h arccat_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arccat_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcsync_SOURCES = arcsync.cpp utils.cpp utils.h arcsync_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcsync_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcresub_SOURCES = arcresub.cpp utils.cpp utils.h arcresub_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcresub_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcrenew_SOURCES = arcrenew.cpp utils.cpp utils.h arcrenew_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcrenew_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) arcresume_SOURCES = arcresume.cpp utils.cpp utils.h arcresume_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) arcresume_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(DBCXX_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/compute/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/compute/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arcstat.1: $(top_builddir)/config.status $(srcdir)/arcstat.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcinfo.1: $(top_builddir)/config.status $(srcdir)/arcinfo.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcsub.1: $(top_builddir)/config.status $(srcdir)/arcsub.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcclean.1: $(top_builddir)/config.status $(srcdir)/arcclean.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arckill.1: $(top_builddir)/config.status $(srcdir)/arckill.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcget.1: $(top_builddir)/config.status $(srcdir)/arcget.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arccat.1: $(top_builddir)/config.status $(srcdir)/arccat.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcresub.1: $(top_builddir)/config.status $(srcdir)/arcresub.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcsync.1: $(top_builddir)/config.status $(srcdir)/arcsync.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcrenew.1: $(top_builddir)/config.status $(srcdir)/arcrenew.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arcresume.1: $(top_builddir)/config.status $(srcdir)/arcresume.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arctest.1: $(top_builddir)/config.status $(srcdir)/arctest.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arccat$(EXEEXT): $(arccat_OBJECTS) $(arccat_DEPENDENCIES) @rm -f arccat$(EXEEXT) $(arccat_LINK) $(arccat_OBJECTS) $(arccat_LDADD) $(LIBS) arcclean$(EXEEXT): $(arcclean_OBJECTS) $(arcclean_DEPENDENCIES) @rm -f arcclean$(EXEEXT) $(arcclean_LINK) $(arcclean_OBJECTS) $(arcclean_LDADD) $(LIBS) arcget$(EXEEXT): $(arcget_OBJECTS) $(arcget_DEPENDENCIES) @rm -f arcget$(EXEEXT) $(arcget_LINK) $(arcget_OBJECTS) $(arcget_LDADD) $(LIBS) arcinfo$(EXEEXT): $(arcinfo_OBJECTS) $(arcinfo_DEPENDENCIES) @rm -f arcinfo$(EXEEXT) $(arcinfo_LINK) $(arcinfo_OBJECTS) $(arcinfo_LDADD) $(LIBS) arckill$(EXEEXT): $(arckill_OBJECTS) $(arckill_DEPENDENCIES) @rm -f arckill$(EXEEXT) $(arckill_LINK) $(arckill_OBJECTS) $(arckill_LDADD) $(LIBS) arcrenew$(EXEEXT): $(arcrenew_OBJECTS) $(arcrenew_DEPENDENCIES) @rm -f arcrenew$(EXEEXT) $(arcrenew_LINK) $(arcrenew_OBJECTS) $(arcrenew_LDADD) $(LIBS) arcresub$(EXEEXT): $(arcresub_OBJECTS) $(arcresub_DEPENDENCIES) @rm -f arcresub$(EXEEXT) $(arcresub_LINK) $(arcresub_OBJECTS) $(arcresub_LDADD) $(LIBS) arcresume$(EXEEXT): $(arcresume_OBJECTS) $(arcresume_DEPENDENCIES) @rm -f arcresume$(EXEEXT) $(arcresume_LINK) $(arcresume_OBJECTS) $(arcresume_LDADD) $(LIBS) arcstat$(EXEEXT): $(arcstat_OBJECTS) $(arcstat_DEPENDENCIES) @rm -f arcstat$(EXEEXT) $(arcstat_LINK) $(arcstat_OBJECTS) $(arcstat_LDADD) $(LIBS) arcsub$(EXEEXT): $(arcsub_OBJECTS) $(arcsub_DEPENDENCIES) @rm -f arcsub$(EXEEXT) $(arcsub_LINK) $(arcsub_OBJECTS) $(arcsub_LDADD) $(LIBS) arcsync$(EXEEXT): $(arcsync_OBJECTS) $(arcsync_DEPENDENCIES) @rm -f arcsync$(EXEEXT) $(arcsync_LINK) $(arcsync_OBJECTS) $(arcsync_LDADD) $(LIBS) arctest$(EXEEXT): $(arctest_OBJECTS) $(arctest_DEPENDENCIES) @rm -f arctest$(EXEEXT) $(arctest_LINK) $(arctest_OBJECTS) $(arctest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arccat-arccat.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arccat-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcclean-arcclean.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcclean-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcget-arcget.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcget-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcinfo-arcinfo.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcinfo-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arckill-arckill.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arckill-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrenew-arcrenew.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrenew-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcresub-arcresub.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcresub-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcresume-arcresume.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcresume-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcstat-arcstat.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcstat-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsub-arcsub.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsub-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsync-arcsync.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsync-utils.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arctest-arctest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arctest-utils.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< arccat-arccat.o: arccat.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -MT arccat-arccat.o -MD -MP -MF $(DEPDIR)/arccat-arccat.Tpo -c -o arccat-arccat.o `test -f 'arccat.cpp' || echo '$(srcdir)/'`arccat.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arccat-arccat.Tpo $(DEPDIR)/arccat-arccat.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arccat.cpp' object='arccat-arccat.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -c -o arccat-arccat.o `test -f 'arccat.cpp' || echo '$(srcdir)/'`arccat.cpp arccat-arccat.obj: arccat.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -MT arccat-arccat.obj -MD -MP -MF $(DEPDIR)/arccat-arccat.Tpo -c -o arccat-arccat.obj `if test -f 'arccat.cpp'; then $(CYGPATH_W) 'arccat.cpp'; else $(CYGPATH_W) '$(srcdir)/arccat.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arccat-arccat.Tpo $(DEPDIR)/arccat-arccat.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arccat.cpp' object='arccat-arccat.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -c -o arccat-arccat.obj `if test -f 'arccat.cpp'; then $(CYGPATH_W) 'arccat.cpp'; else $(CYGPATH_W) '$(srcdir)/arccat.cpp'; fi` arccat-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -MT arccat-utils.o -MD -MP -MF $(DEPDIR)/arccat-utils.Tpo -c -o arccat-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arccat-utils.Tpo $(DEPDIR)/arccat-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arccat-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -c -o arccat-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arccat-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -MT arccat-utils.obj -MD -MP -MF $(DEPDIR)/arccat-utils.Tpo -c -o arccat-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arccat-utils.Tpo $(DEPDIR)/arccat-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arccat-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -c -o arccat-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcclean-arcclean.o: arcclean.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -MT arcclean-arcclean.o -MD -MP -MF $(DEPDIR)/arcclean-arcclean.Tpo -c -o arcclean-arcclean.o `test -f 'arcclean.cpp' || echo '$(srcdir)/'`arcclean.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcclean-arcclean.Tpo $(DEPDIR)/arcclean-arcclean.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcclean.cpp' object='arcclean-arcclean.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -c -o arcclean-arcclean.o `test -f 'arcclean.cpp' || echo '$(srcdir)/'`arcclean.cpp arcclean-arcclean.obj: arcclean.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -MT arcclean-arcclean.obj -MD -MP -MF $(DEPDIR)/arcclean-arcclean.Tpo -c -o arcclean-arcclean.obj `if test -f 'arcclean.cpp'; then $(CYGPATH_W) 'arcclean.cpp'; else $(CYGPATH_W) '$(srcdir)/arcclean.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcclean-arcclean.Tpo $(DEPDIR)/arcclean-arcclean.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcclean.cpp' object='arcclean-arcclean.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -c -o arcclean-arcclean.obj `if test -f 'arcclean.cpp'; then $(CYGPATH_W) 'arcclean.cpp'; else $(CYGPATH_W) '$(srcdir)/arcclean.cpp'; fi` arcclean-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -MT arcclean-utils.o -MD -MP -MF $(DEPDIR)/arcclean-utils.Tpo -c -o arcclean-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcclean-utils.Tpo $(DEPDIR)/arcclean-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcclean-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -c -o arcclean-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcclean-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -MT arcclean-utils.obj -MD -MP -MF $(DEPDIR)/arcclean-utils.Tpo -c -o arcclean-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcclean-utils.Tpo $(DEPDIR)/arcclean-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcclean-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -c -o arcclean-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcget-arcget.o: arcget.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -MT arcget-arcget.o -MD -MP -MF $(DEPDIR)/arcget-arcget.Tpo -c -o arcget-arcget.o `test -f 'arcget.cpp' || echo '$(srcdir)/'`arcget.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcget-arcget.Tpo $(DEPDIR)/arcget-arcget.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcget.cpp' object='arcget-arcget.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -c -o arcget-arcget.o `test -f 'arcget.cpp' || echo '$(srcdir)/'`arcget.cpp arcget-arcget.obj: arcget.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -MT arcget-arcget.obj -MD -MP -MF $(DEPDIR)/arcget-arcget.Tpo -c -o arcget-arcget.obj `if test -f 'arcget.cpp'; then $(CYGPATH_W) 'arcget.cpp'; else $(CYGPATH_W) '$(srcdir)/arcget.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcget-arcget.Tpo $(DEPDIR)/arcget-arcget.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcget.cpp' object='arcget-arcget.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -c -o arcget-arcget.obj `if test -f 'arcget.cpp'; then $(CYGPATH_W) 'arcget.cpp'; else $(CYGPATH_W) '$(srcdir)/arcget.cpp'; fi` arcget-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -MT arcget-utils.o -MD -MP -MF $(DEPDIR)/arcget-utils.Tpo -c -o arcget-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcget-utils.Tpo $(DEPDIR)/arcget-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcget-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -c -o arcget-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcget-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -MT arcget-utils.obj -MD -MP -MF $(DEPDIR)/arcget-utils.Tpo -c -o arcget-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcget-utils.Tpo $(DEPDIR)/arcget-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcget-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -c -o arcget-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcinfo-arcinfo.o: arcinfo.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -MT arcinfo-arcinfo.o -MD -MP -MF $(DEPDIR)/arcinfo-arcinfo.Tpo -c -o arcinfo-arcinfo.o `test -f 'arcinfo.cpp' || echo '$(srcdir)/'`arcinfo.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcinfo-arcinfo.Tpo $(DEPDIR)/arcinfo-arcinfo.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcinfo.cpp' object='arcinfo-arcinfo.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -c -o arcinfo-arcinfo.o `test -f 'arcinfo.cpp' || echo '$(srcdir)/'`arcinfo.cpp arcinfo-arcinfo.obj: arcinfo.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -MT arcinfo-arcinfo.obj -MD -MP -MF $(DEPDIR)/arcinfo-arcinfo.Tpo -c -o arcinfo-arcinfo.obj `if test -f 'arcinfo.cpp'; then $(CYGPATH_W) 'arcinfo.cpp'; else $(CYGPATH_W) '$(srcdir)/arcinfo.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcinfo-arcinfo.Tpo $(DEPDIR)/arcinfo-arcinfo.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcinfo.cpp' object='arcinfo-arcinfo.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -c -o arcinfo-arcinfo.obj `if test -f 'arcinfo.cpp'; then $(CYGPATH_W) 'arcinfo.cpp'; else $(CYGPATH_W) '$(srcdir)/arcinfo.cpp'; fi` arcinfo-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -MT arcinfo-utils.o -MD -MP -MF $(DEPDIR)/arcinfo-utils.Tpo -c -o arcinfo-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcinfo-utils.Tpo $(DEPDIR)/arcinfo-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcinfo-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -c -o arcinfo-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcinfo-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -MT arcinfo-utils.obj -MD -MP -MF $(DEPDIR)/arcinfo-utils.Tpo -c -o arcinfo-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcinfo-utils.Tpo $(DEPDIR)/arcinfo-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcinfo-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -c -o arcinfo-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arckill-arckill.o: arckill.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -MT arckill-arckill.o -MD -MP -MF $(DEPDIR)/arckill-arckill.Tpo -c -o arckill-arckill.o `test -f 'arckill.cpp' || echo '$(srcdir)/'`arckill.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arckill-arckill.Tpo $(DEPDIR)/arckill-arckill.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arckill.cpp' object='arckill-arckill.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -c -o arckill-arckill.o `test -f 'arckill.cpp' || echo '$(srcdir)/'`arckill.cpp arckill-arckill.obj: arckill.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -MT arckill-arckill.obj -MD -MP -MF $(DEPDIR)/arckill-arckill.Tpo -c -o arckill-arckill.obj `if test -f 'arckill.cpp'; then $(CYGPATH_W) 'arckill.cpp'; else $(CYGPATH_W) '$(srcdir)/arckill.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arckill-arckill.Tpo $(DEPDIR)/arckill-arckill.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arckill.cpp' object='arckill-arckill.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -c -o arckill-arckill.obj `if test -f 'arckill.cpp'; then $(CYGPATH_W) 'arckill.cpp'; else $(CYGPATH_W) '$(srcdir)/arckill.cpp'; fi` arckill-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -MT arckill-utils.o -MD -MP -MF $(DEPDIR)/arckill-utils.Tpo -c -o arckill-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arckill-utils.Tpo $(DEPDIR)/arckill-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arckill-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -c -o arckill-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arckill-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -MT arckill-utils.obj -MD -MP -MF $(DEPDIR)/arckill-utils.Tpo -c -o arckill-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arckill-utils.Tpo $(DEPDIR)/arckill-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arckill-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -c -o arckill-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcrenew-arcrenew.o: arcrenew.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -MT arcrenew-arcrenew.o -MD -MP -MF $(DEPDIR)/arcrenew-arcrenew.Tpo -c -o arcrenew-arcrenew.o `test -f 'arcrenew.cpp' || echo '$(srcdir)/'`arcrenew.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcrenew-arcrenew.Tpo $(DEPDIR)/arcrenew-arcrenew.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcrenew.cpp' object='arcrenew-arcrenew.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -c -o arcrenew-arcrenew.o `test -f 'arcrenew.cpp' || echo '$(srcdir)/'`arcrenew.cpp arcrenew-arcrenew.obj: arcrenew.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -MT arcrenew-arcrenew.obj -MD -MP -MF $(DEPDIR)/arcrenew-arcrenew.Tpo -c -o arcrenew-arcrenew.obj `if test -f 'arcrenew.cpp'; then $(CYGPATH_W) 'arcrenew.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrenew.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcrenew-arcrenew.Tpo $(DEPDIR)/arcrenew-arcrenew.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcrenew.cpp' object='arcrenew-arcrenew.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -c -o arcrenew-arcrenew.obj `if test -f 'arcrenew.cpp'; then $(CYGPATH_W) 'arcrenew.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrenew.cpp'; fi` arcrenew-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -MT arcrenew-utils.o -MD -MP -MF $(DEPDIR)/arcrenew-utils.Tpo -c -o arcrenew-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcrenew-utils.Tpo $(DEPDIR)/arcrenew-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcrenew-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -c -o arcrenew-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcrenew-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -MT arcrenew-utils.obj -MD -MP -MF $(DEPDIR)/arcrenew-utils.Tpo -c -o arcrenew-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcrenew-utils.Tpo $(DEPDIR)/arcrenew-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcrenew-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -c -o arcrenew-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcresub-arcresub.o: arcresub.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresub_CXXFLAGS) $(CXXFLAGS) -MT arcresub-arcresub.o -MD -MP -MF $(DEPDIR)/arcresub-arcresub.Tpo -c -o arcresub-arcresub.o `test -f 'arcresub.cpp' || echo '$(srcdir)/'`arcresub.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcresub-arcresub.Tpo $(DEPDIR)/arcresub-arcresub.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcresub.cpp' object='arcresub-arcresub.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresub_CXXFLAGS) $(CXXFLAGS) -c -o arcresub-arcresub.o `test -f 'arcresub.cpp' || echo '$(srcdir)/'`arcresub.cpp arcresub-arcresub.obj: arcresub.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresub_CXXFLAGS) $(CXXFLAGS) -MT arcresub-arcresub.obj -MD -MP -MF $(DEPDIR)/arcresub-arcresub.Tpo -c -o arcresub-arcresub.obj `if test -f 'arcresub.cpp'; then $(CYGPATH_W) 'arcresub.cpp'; else $(CYGPATH_W) '$(srcdir)/arcresub.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcresub-arcresub.Tpo $(DEPDIR)/arcresub-arcresub.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcresub.cpp' object='arcresub-arcresub.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresub_CXXFLAGS) $(CXXFLAGS) -c -o arcresub-arcresub.obj `if test -f 'arcresub.cpp'; then $(CYGPATH_W) 'arcresub.cpp'; else $(CYGPATH_W) '$(srcdir)/arcresub.cpp'; fi` arcresub-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresub_CXXFLAGS) $(CXXFLAGS) -MT arcresub-utils.o -MD -MP -MF $(DEPDIR)/arcresub-utils.Tpo -c -o arcresub-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcresub-utils.Tpo $(DEPDIR)/arcresub-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcresub-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresub_CXXFLAGS) $(CXXFLAGS) -c -o arcresub-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcresub-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresub_CXXFLAGS) $(CXXFLAGS) -MT arcresub-utils.obj -MD -MP -MF $(DEPDIR)/arcresub-utils.Tpo -c -o arcresub-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcresub-utils.Tpo $(DEPDIR)/arcresub-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcresub-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresub_CXXFLAGS) $(CXXFLAGS) -c -o arcresub-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcresume-arcresume.o: arcresume.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -MT arcresume-arcresume.o -MD -MP -MF $(DEPDIR)/arcresume-arcresume.Tpo -c -o arcresume-arcresume.o `test -f 'arcresume.cpp' || echo '$(srcdir)/'`arcresume.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcresume-arcresume.Tpo $(DEPDIR)/arcresume-arcresume.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcresume.cpp' object='arcresume-arcresume.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -c -o arcresume-arcresume.o `test -f 'arcresume.cpp' || echo '$(srcdir)/'`arcresume.cpp arcresume-arcresume.obj: arcresume.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -MT arcresume-arcresume.obj -MD -MP -MF $(DEPDIR)/arcresume-arcresume.Tpo -c -o arcresume-arcresume.obj `if test -f 'arcresume.cpp'; then $(CYGPATH_W) 'arcresume.cpp'; else $(CYGPATH_W) '$(srcdir)/arcresume.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcresume-arcresume.Tpo $(DEPDIR)/arcresume-arcresume.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcresume.cpp' object='arcresume-arcresume.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -c -o arcresume-arcresume.obj `if test -f 'arcresume.cpp'; then $(CYGPATH_W) 'arcresume.cpp'; else $(CYGPATH_W) '$(srcdir)/arcresume.cpp'; fi` arcresume-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -MT arcresume-utils.o -MD -MP -MF $(DEPDIR)/arcresume-utils.Tpo -c -o arcresume-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcresume-utils.Tpo $(DEPDIR)/arcresume-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcresume-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -c -o arcresume-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcresume-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -MT arcresume-utils.obj -MD -MP -MF $(DEPDIR)/arcresume-utils.Tpo -c -o arcresume-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcresume-utils.Tpo $(DEPDIR)/arcresume-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcresume-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -c -o arcresume-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcstat-arcstat.o: arcstat.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -MT arcstat-arcstat.o -MD -MP -MF $(DEPDIR)/arcstat-arcstat.Tpo -c -o arcstat-arcstat.o `test -f 'arcstat.cpp' || echo '$(srcdir)/'`arcstat.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcstat-arcstat.Tpo $(DEPDIR)/arcstat-arcstat.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcstat.cpp' object='arcstat-arcstat.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -c -o arcstat-arcstat.o `test -f 'arcstat.cpp' || echo '$(srcdir)/'`arcstat.cpp arcstat-arcstat.obj: arcstat.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -MT arcstat-arcstat.obj -MD -MP -MF $(DEPDIR)/arcstat-arcstat.Tpo -c -o arcstat-arcstat.obj `if test -f 'arcstat.cpp'; then $(CYGPATH_W) 'arcstat.cpp'; else $(CYGPATH_W) '$(srcdir)/arcstat.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcstat-arcstat.Tpo $(DEPDIR)/arcstat-arcstat.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcstat.cpp' object='arcstat-arcstat.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -c -o arcstat-arcstat.obj `if test -f 'arcstat.cpp'; then $(CYGPATH_W) 'arcstat.cpp'; else $(CYGPATH_W) '$(srcdir)/arcstat.cpp'; fi` arcstat-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -MT arcstat-utils.o -MD -MP -MF $(DEPDIR)/arcstat-utils.Tpo -c -o arcstat-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcstat-utils.Tpo $(DEPDIR)/arcstat-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcstat-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -c -o arcstat-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcstat-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -MT arcstat-utils.obj -MD -MP -MF $(DEPDIR)/arcstat-utils.Tpo -c -o arcstat-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcstat-utils.Tpo $(DEPDIR)/arcstat-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcstat-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -c -o arcstat-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcsub-arcsub.o: arcsub.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-arcsub.o -MD -MP -MF $(DEPDIR)/arcsub-arcsub.Tpo -c -o arcsub-arcsub.o `test -f 'arcsub.cpp' || echo '$(srcdir)/'`arcsub.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcsub-arcsub.Tpo $(DEPDIR)/arcsub-arcsub.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcsub.cpp' object='arcsub-arcsub.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-arcsub.o `test -f 'arcsub.cpp' || echo '$(srcdir)/'`arcsub.cpp arcsub-arcsub.obj: arcsub.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-arcsub.obj -MD -MP -MF $(DEPDIR)/arcsub-arcsub.Tpo -c -o arcsub-arcsub.obj `if test -f 'arcsub.cpp'; then $(CYGPATH_W) 'arcsub.cpp'; else $(CYGPATH_W) '$(srcdir)/arcsub.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcsub-arcsub.Tpo $(DEPDIR)/arcsub-arcsub.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcsub.cpp' object='arcsub-arcsub.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-arcsub.obj `if test -f 'arcsub.cpp'; then $(CYGPATH_W) 'arcsub.cpp'; else $(CYGPATH_W) '$(srcdir)/arcsub.cpp'; fi` arcsub-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-utils.o -MD -MP -MF $(DEPDIR)/arcsub-utils.Tpo -c -o arcsub-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcsub-utils.Tpo $(DEPDIR)/arcsub-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcsub-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcsub-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-utils.obj -MD -MP -MF $(DEPDIR)/arcsub-utils.Tpo -c -o arcsub-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcsub-utils.Tpo $(DEPDIR)/arcsub-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcsub-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcsync-arcsync.o: arcsync.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -MT arcsync-arcsync.o -MD -MP -MF $(DEPDIR)/arcsync-arcsync.Tpo -c -o arcsync-arcsync.o `test -f 'arcsync.cpp' || echo '$(srcdir)/'`arcsync.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcsync-arcsync.Tpo $(DEPDIR)/arcsync-arcsync.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcsync.cpp' object='arcsync-arcsync.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -c -o arcsync-arcsync.o `test -f 'arcsync.cpp' || echo '$(srcdir)/'`arcsync.cpp arcsync-arcsync.obj: arcsync.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -MT arcsync-arcsync.obj -MD -MP -MF $(DEPDIR)/arcsync-arcsync.Tpo -c -o arcsync-arcsync.obj `if test -f 'arcsync.cpp'; then $(CYGPATH_W) 'arcsync.cpp'; else $(CYGPATH_W) '$(srcdir)/arcsync.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcsync-arcsync.Tpo $(DEPDIR)/arcsync-arcsync.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arcsync.cpp' object='arcsync-arcsync.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -c -o arcsync-arcsync.obj `if test -f 'arcsync.cpp'; then $(CYGPATH_W) 'arcsync.cpp'; else $(CYGPATH_W) '$(srcdir)/arcsync.cpp'; fi` arcsync-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -MT arcsync-utils.o -MD -MP -MF $(DEPDIR)/arcsync-utils.Tpo -c -o arcsync-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcsync-utils.Tpo $(DEPDIR)/arcsync-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcsync-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -c -o arcsync-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcsync-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -MT arcsync-utils.obj -MD -MP -MF $(DEPDIR)/arcsync-utils.Tpo -c -o arcsync-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcsync-utils.Tpo $(DEPDIR)/arcsync-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arcsync-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -c -o arcsync-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arctest-arctest.o: arctest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-arctest.o -MD -MP -MF $(DEPDIR)/arctest-arctest.Tpo -c -o arctest-arctest.o `test -f 'arctest.cpp' || echo '$(srcdir)/'`arctest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arctest-arctest.Tpo $(DEPDIR)/arctest-arctest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arctest.cpp' object='arctest-arctest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-arctest.o `test -f 'arctest.cpp' || echo '$(srcdir)/'`arctest.cpp arctest-arctest.obj: arctest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-arctest.obj -MD -MP -MF $(DEPDIR)/arctest-arctest.Tpo -c -o arctest-arctest.obj `if test -f 'arctest.cpp'; then $(CYGPATH_W) 'arctest.cpp'; else $(CYGPATH_W) '$(srcdir)/arctest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arctest-arctest.Tpo $(DEPDIR)/arctest-arctest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arctest.cpp' object='arctest-arctest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-arctest.obj `if test -f 'arctest.cpp'; then $(CYGPATH_W) 'arctest.cpp'; else $(CYGPATH_W) '$(srcdir)/arctest.cpp'; fi` arctest-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-utils.o -MD -MP -MF $(DEPDIR)/arctest-utils.Tpo -c -o arctest-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arctest-utils.Tpo $(DEPDIR)/arctest-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arctest-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arctest-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-utils.obj -MD -MP -MF $(DEPDIR)/arctest-utils.Tpo -c -o arctest-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arctest-utils.Tpo $(DEPDIR)/arctest-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='utils.cpp' object='arctest-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-binPROGRAMS install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-binPROGRAMS uninstall-man \ uninstall-man1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arckill.1.in0000644000000000000000000000012712441050167023214 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.140651363 30 ctime=1513200664.504802581 nordugrid-arc-5.4.2/src/clients/compute/arckill.1.in0000644000175000002070000001106712441050167023263 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCKILL 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arckill \- ARC Kill .SH DESCRIPTION The .B arckill command is used to kill running jobs. .SH SYNOPSIS .B arckill [options] [job ...] .SH OPTIONS .IP "\fB-a\fR, \fB--all\fR" all jobs .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-i\fR, \fB--jobids-from-file\fR=\fIfilename\fR" a file containing a list of jobIDs .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-r\fR, \fB--rejectmanagement\fR=\fIURL\fR" skip jobs which are on a computing element with a given URL .IP "\fB-s\fR, \fB--status\fR=\fIstatusstr\fR" only select jobs whose status is statusstr .IP "\fB-k\fR, \fB--keep\fR" keep files on the remote cluster (do not clean) .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBjob\fR ..." list of jobids and/or jobnames .LP .SH EXTENDED DESCRIPTION The .B arckill command kills a running job on an ARC enabled resource. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname all those jobs are killed. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, all jobs can be killed. The .B --cluster option can be used to select or reject jobs at specific clusters. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arstat (1) for possible state values. If the job was successfully killed the attepmt to remove the job from the remote cluster will be made unless the .B --keep option was specified. Depending on functionality of service job killing procedure may take time and it may be impossible to clean job immediately. In that case .B arckill will report number of cleaned jobs smaller than processed ones. Cleaning of leftover jobs may be performed by running .B arcclean later. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcsub.cpp0000644000000000000000000000012413213442363023065 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.353712 30 ctime=1513200664.521802789 nordugrid-arc-5.4.2/src/clients/compute/arcsub.cpp0000644000175000002070000003773313213442363023147 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcsub"); static int submit(const Arc::UserConfig& usercfg, const std::list& jobdescriptionlist, std::list& services, const std::string& requestedSubmissionInterface, const std::string& jobidfile, bool direct_submission); static int dumpjobdescription(const Arc::UserConfig& usercfg, const std::list& jobdescriptionlist, const std::list& services, const std::string& requestedSubmissionInterface); int RUNMAIN(arcsub)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_SUB, istring("[filename ...]"), istring("The arcsub command is used for " "submitting jobs to Grid enabled " "computing\nresources.")); std::list params = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcsub", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.show_plugins) { std::list types; types.push_back("HED:SubmitterPlugin"); types.push_back("HED:ServiceEndpointRetrieverPlugin"); types.push_back("HED:TargetInformationRetrieverPlugin"); types.push_back("HED:JobDescriptionParserPlugin"); types.push_back("HED:BrokerPlugin"); showplugins("arcsub", types, logger, usercfg.Broker().first); return 0; } if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if (!opt.broker.empty()) usercfg.Broker(opt.broker); opt.jobdescriptionfiles.insert(opt.jobdescriptionfiles.end(), params.begin(), params.end()); if (opt.jobdescriptionfiles.empty() && opt.jobdescriptionstrings.empty()) { logger.msg(Arc::ERROR, "No job description input specified"); return 1; } std::list jobdescriptionlist; // Loop over input job description files for (std::list::iterator it = opt.jobdescriptionfiles.begin(); it != opt.jobdescriptionfiles.end(); ++it) { std::ifstream descriptionfile(it->c_str()); if (!descriptionfile) { logger.msg(Arc::ERROR, "Can not open job description file: %s", *it); return 1; } descriptionfile.seekg(0, std::ios::end); std::streamsize length = descriptionfile.tellg(); descriptionfile.seekg(0, std::ios::beg); char *buffer = new char[length + 1]; descriptionfile.read(buffer, length); descriptionfile.close(); buffer[length] = '\0'; std::list jobdescs; Arc::JobDescriptionResult parseres = Arc::JobDescription::Parse((std::string)buffer, jobdescs); if (parseres) { for (std::list::iterator itJ = jobdescs.begin(); itJ != jobdescs.end(); ++itJ) { itJ->Application.DryRun = opt.dryrun; for (std::list::iterator itJAlt = itJ->GetAlternatives().begin(); itJAlt != itJ->GetAlternatives().end(); ++itJAlt) { itJAlt->Application.DryRun = opt.dryrun; } } jobdescriptionlist.insert(jobdescriptionlist.end(), jobdescs.begin(), jobdescs.end()); } else { logger.msg(Arc::ERROR, "Invalid JobDescription:"); std::cout << buffer << std::endl; delete[] buffer; std::cerr << parseres.str() << std::endl; return 1; } delete[] buffer; } //Loop over job description input strings for (std::list::iterator it = opt.jobdescriptionstrings.begin(); it != opt.jobdescriptionstrings.end(); ++it) { std::list jobdescs; Arc::JobDescriptionResult parseres = Arc::JobDescription::Parse(*it, jobdescs); if (parseres) { for (std::list::iterator itJ = jobdescs.begin(); itJ != jobdescs.end(); ++itJ) { itJ->Application.DryRun = opt.dryrun; for (std::list::iterator itJAlt = itJ->GetAlternatives().begin(); itJAlt != itJ->GetAlternatives().end(); ++itJAlt) { itJAlt->Application.DryRun = opt.dryrun; } } jobdescriptionlist.insert(jobdescriptionlist.end(), jobdescs.begin(), jobdescs.end()); } else { logger.msg(Arc::ERROR, "Invalid JobDescription:"); std::cout << *it << std::endl; std::cerr << parseres.str() << std::endl; return 1; } } std::list services = getServicesFromUserConfigAndCommandLine(usercfg, opt.indexurls, opt.clusters, opt.requestedSubmissionInterfaceName, opt.infointerface); if (!opt.direct_submission) { usercfg.AddRejectDiscoveryURLs(opt.rejectdiscovery); } if (opt.dumpdescription) { return dumpjobdescription(usercfg, jobdescriptionlist, services, opt.requestedSubmissionInterfaceName); } return submit(usercfg, jobdescriptionlist, services, opt.requestedSubmissionInterfaceName, opt.jobidoutfile, opt.direct_submission); } class HandleSubmittedJobs : public Arc::EntityConsumer { public: HandleSubmittedJobs(const std::string& jobidfile, const Arc::UserConfig& uc) : jobidfile(jobidfile), uc(uc), submittedJobs() {} void addEntity(const Arc::Job& j) { std::cout << Arc::IString("Job submitted with jobid: %s", j.JobID) << std::endl; submittedJobs.push_back(j); } void write() const { if (!jobidfile.empty() && !Arc::Job::WriteJobIDsToFile(submittedJobs, jobidfile)) { logger.msg(Arc::WARNING, "Cannot write job IDs to file (%s)", jobidfile); } Arc::JobInformationStorage* jobStore = createJobInformationStorage(uc); if (jobStore == NULL || !jobStore->Write(submittedJobs)) { if (jobStore == NULL) { std::cerr << Arc::IString("Warning: Unable to open job list file (%s), unknown format", uc.JobListFile()) << std::endl; } else { std::cerr << Arc::IString("Warning: Failed to write job information to file (%s)", uc.JobListFile()) << std::endl; } std::cerr << " " << Arc::IString("To recover missing jobs, run arcsync") << std::endl; } delete jobStore; } void printsummary(const std::list& originalDescriptions, const std::list& notsubmitted) const { if (originalDescriptions.size() > 1) { std::cout << std::endl << Arc::IString("Job submission summary:") << std::endl; std::cout << "-----------------------" << std::endl; std::cout << Arc::IString("%d of %d jobs were submitted", submittedJobs.size(), submittedJobs.size()+notsubmitted.size()) << std::endl; if (!notsubmitted.empty()) { std::cout << Arc::IString("The following %d were not submitted", notsubmitted.size()) << std::endl; for (std::list::const_iterator it = notsubmitted.begin(); it != notsubmitted.end(); ++it) { int jobnr = 1; for (std::list::const_iterator itOrig = originalDescriptions.begin(); itOrig != originalDescriptions.end(); ++itOrig, ++jobnr) { if (&(*itOrig) == *it) { std::cout << Arc::IString("Job nr.") << " " << jobnr; if (!(*it)->Identification.JobName.empty()) { std::cout << ": " << (*it)->Identification.JobName; } std::cout << std::endl; break; } } } } } } void clearsubmittedjobs() { submittedJobs.clear(); } private: const std::string jobidfile; const Arc::UserConfig& uc; std::list submittedJobs; }; static int submit(const Arc::UserConfig& usercfg, const std::list& jobdescriptionlist, std::list& services, const std::string& requestedSubmissionInterface, const std::string& jobidfile, bool direct_submission) { int retval = 0; HandleSubmittedJobs hsj(jobidfile, usercfg); Arc::Submitter s(usercfg); s.addConsumer(hsj); Arc::SubmissionStatus status; if (!direct_submission) { std::list rsi; if (!requestedSubmissionInterface.empty()) rsi.push_back(requestedSubmissionInterface); status = s.BrokeredSubmit(services, jobdescriptionlist, rsi); } else { if (!requestedSubmissionInterface.empty()) { for (std::list::iterator it = services.begin(); it != services.end();) { // Remove endpoint - it has an unrequested interface name. if (!it->InterfaceName.empty() && it->InterfaceName != requestedSubmissionInterface) { logger.msg(Arc::INFO, "Removing endpoint %s: It has an unrequested interface (%s).", it->URLString, it->InterfaceName); it = services.erase(it); continue; } it->InterfaceName = requestedSubmissionInterface; ++it; } } status = s.Submit(services, jobdescriptionlist); } hsj.write(); if (status.isSet(Arc::SubmissionStatus::BROKER_PLUGIN_NOT_LOADED)) { std::cerr << Arc::IString("ERROR: Unable to load broker %s", usercfg.Broker().first) << std::endl; return 1; } if (status.isSet(Arc::SubmissionStatus::NO_SERVICES)) { std::cerr << Arc::IString("ERROR: Job submission aborted because no resource returned any information") << std::endl; return 1; } if (status.isSet(Arc::SubmissionStatus::DESCRIPTION_NOT_SUBMITTED)) { std::cerr << Arc::IString("ERROR: One or multiple job descriptions was not submitted.") << std::endl; retval = 1; } if (status.isSet(Arc::SubmissionStatus::SUBMITTER_PLUGIN_NOT_LOADED)) { bool gridFTPJobPluginFailed = false; for (std::map::const_iterator it = s.GetEndpointSubmissionStatuses().begin(); it != s.GetEndpointSubmissionStatuses().end(); ++it) { if (it->first.InterfaceName == "org.nordugrid.gridftpjob" && it->second == Arc::EndpointSubmissionStatus::NOPLUGIN) { gridFTPJobPluginFailed = true; } } if (gridFTPJobPluginFailed) { Arc::LogLevel level = (retval == 1 ? Arc::ERROR : Arc::INFO); std::string indent = (retval == 1 ? " " : " "); logger.msg(level, "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ.", indent, indent, indent); } // TODO: What to do when failing to load other plugins. } hsj.printsummary(jobdescriptionlist, s.GetDescriptionsNotSubmitted()); return retval; } static int dumpjobdescription(const Arc::UserConfig& usercfg, const std::list& jobdescriptionlist, const std::list& services, const std::string& requestedSubmissionInterface) { int retval = 0; std::set preferredInterfaceNames; if (usercfg.InfoInterface().empty()) { preferredInterfaceNames.insert("org.nordugrid.ldapglue2"); } else { preferredInterfaceNames.insert(usercfg.InfoInterface()); } Arc::ComputingServiceUniq csu; Arc::ComputingServiceRetriever csr(usercfg, std::list(), usercfg.RejectDiscoveryURLs(), preferredInterfaceNames); csr.addConsumer(csu); for (std::list::const_iterator it = services.begin(); it != services.end(); ++it) { csr.addEndpoint(*it); } csr.wait(); std::list CEs = csu.getServices(); if (CEs.empty()) { std::cout << Arc::IString("Unable to adapt job description to any resource, no resource information could be obtained.") << std::endl; std::cout << Arc::IString("Original job description is listed below:") << std::endl; for (std::list::const_iterator it = jobdescriptionlist.begin(); it != jobdescriptionlist.end(); ++it) { std::string descOutput; it->UnParse(descOutput, it->GetSourceLanguage()); std::cout << descOutput << std::endl; } return 1; } Arc::Broker broker(usercfg, usercfg.Broker().first); if (!broker.isValid(false)) { logger.msg(Arc::ERROR, "Dumping job description aborted: Unable to load broker %s", usercfg.Broker().first); return 1; } Arc::ExecutionTargetSorter ets(broker, CEs); std::list::const_iterator itJAlt; // Iterator to use for alternative job descriptions. for (std::list::const_iterator itJ = jobdescriptionlist.begin(); itJ != jobdescriptionlist.end(); ++itJ) { const Arc::JobDescription* currentJobDesc = &*itJ; bool descriptionDumped = false; do { Arc::JobDescription jobdescdump(*currentJobDesc); ets.set(jobdescdump); for (ets.reset(); !ets.endOfList(); ets.next()) { if(!requestedSubmissionInterface.empty() && ets->ComputingEndpoint->InterfaceName != requestedSubmissionInterface) continue; if (!jobdescdump.Prepare(*ets)) { logger.msg(Arc::INFO, "Unable to prepare job description according to needs of the target resource (%s).", ets->ComputingEndpoint->URLString); continue; } std::string jobdesclang = "nordugrid:jsdl"; if (ets->ComputingEndpoint->InterfaceName == "org.nordugrid.gridftpjob") { jobdesclang = "nordugrid:xrsl"; } else if (ets->ComputingEndpoint->InterfaceName == "org.glite.ce.cream") { jobdesclang = "egee:jdl"; } else if (ets->ComputingEndpoint->InterfaceName == "org.ogf.glue.emies.activitycreation") { jobdesclang = "emies:adl"; } std::string jobdesc; if (!jobdescdump.UnParse(jobdesc, jobdesclang)) { logger.msg(Arc::INFO, "An error occurred during the generation of job description to be sent to %s", ets->ComputingEndpoint->URLString); continue; } std::cout << Arc::IString("Job description to be sent to %s:", ets->AdminDomain->Name) << std::endl; std::cout << jobdesc << std::endl; descriptionDumped = true; break; } if (!descriptionDumped && itJ->HasAlternatives()) { // Alternative job descriptions. if (currentJobDesc == &*itJ) { itJAlt = itJ->GetAlternatives().begin(); } else { ++itJAlt; } currentJobDesc = &*itJAlt; } } while (!descriptionDumped && itJ->HasAlternatives() && itJAlt != itJ->GetAlternatives().end()); if (ets.endOfList()) { std::cout << Arc::IString("Unable to prepare job description according to needs of the target resource.") << std::endl; retval = 1; } } //end loop over all job descriptions return retval; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arccat.cpp0000644000000000000000000000012413213442363023043 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.364712 30 ctime=1513200664.511802667 nordugrid-arc-5.4.2/src/clients/compute/arccat.cpp0000644000175000002070000001601713213442363023115 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arccat)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arccat"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_CAT, istring("[job ...]"), istring("The arccat command performs the cat " "command on the stdout, stderr or grid\n" "manager's error log of the job.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arccat", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arccat", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); it++) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.clusters.empty()) opt.all = true; if (jobidentifiers.empty() && opt.clusters.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.clusters.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.clusters); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } delete jobstore; if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } jobs = jobmaster.GetSelectedJobs(); if (jobs.empty()) { std::cout << Arc::IString("No jobs") << std::endl; return 1; } std::string resourceName; Arc::Job::ResourceType resource; if (opt.show_joblog) { resource = Arc::Job::JOBLOG; resourceName = "joblog"; } else if (opt.show_stderr) { resource = Arc::Job::STDERR; resourceName = "stderr"; } else { resource = Arc::Job::STDOUT; resourceName = "stdout"; } // saving to a temp file is necessary because chunks from server // may arrive out of order std::string filename = Glib::build_filename(Glib::get_tmp_dir(), "arccat.XXXXXX"); int tmp_h = Glib::mkstemp(filename); if (tmp_h == -1) { logger.msg(Arc::INFO, "Could not create temporary file \"%s\"", filename); logger.msg(Arc::ERROR, "Cannot create output of %s for any jobs", resourceName); return 1; } Arc::URL dst("stdio:///"+Arc::tostring(tmp_h)); if (!dst) { logger.msg(Arc::ERROR, "Cannot create output of %s for any jobs", resourceName); logger.msg(Arc::INFO, "Invalid destination URL %s", dst.str()); close(tmp_h); unlink(filename.c_str()); return 1; } Arc::URL stdoutdst("stdio:///stdout"); int retval = 0; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if (!it->State || (!opt.status.empty() && std::find(opt.status.begin(), opt.status.end(), it->State()) == opt.status.end() && std::find(opt.status.begin(), opt.status.end(), it->State.GetGeneralState()) == opt.status.end())) { continue; } if (it->State == Arc::JobState::DELETED) { logger.msg(Arc::WARNING, "Job deleted: %s", it->JobID); retval = 1; continue; } // The job-log might be available before the job has started (middleware dependent). if (!opt.show_joblog && !it->State.IsFinished() && it->State != Arc::JobState::RUNNING && it->State != Arc::JobState::FINISHING) { logger.msg(Arc::WARNING, "Job has not started yet: %s", it->JobID); retval = 1; continue; } if ((opt.show_joblog && it->LogDir.empty()) || (!opt.show_joblog && opt.show_stderr && it->StdErr.empty()) || (!opt.show_joblog && !opt.show_stderr && it->StdOut.empty())) { logger.msg(Arc::ERROR, "Cannot determine the %s location: %s", resourceName, it->JobID); retval = 1; continue; } Arc::URL src; it->GetURLToResource(resource, src); if (!src) { logger.msg(Arc::ERROR, "Cannot create output of %s for job (%s): Invalid source %s", resourceName, it->JobID, src.str()); retval = 1; continue; } if (!it->CopyJobFile(usercfg, src, dst)) { retval = 1; continue; } logger.msg(Arc::VERBOSE, "Catting %s for job %s", resourceName, it->JobID); // Use File DMC in order to handle proper writing to stdout (e.g. supporting redirection and piping from shell). if (!it->CopyJobFile(usercfg, dst, stdoutdst)) { retval = 1; continue; } } close(tmp_h); unlink(filename.c_str()); return retval; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcsync.1.in0000644000000000000000000000012712441050167023235 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.202652121 30 ctime=1513200664.509802642 nordugrid-arc-5.4.2/src/clients/compute/arcsync.1.in0000644000175000002070000001025512441050167023302 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCSYNC 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcsync \- ARC Synchronize .SH DESCRIPTION The \fBarcsync\fR command synchronizes your local jobs list with the information at a given cluster or index server. .SH SYNOPSIS .B arcsync [options] .SH OPTIONS .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-g\fR, \fB--index\fR=\fIname\fR" select one or more registries: \fIname\fR can be an alias for a single registry, a group of registries or a URL .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-f\fR, \fB--force\fR" don't ask for verification .IP "\fB-T\fR, \fB--truncate\fR" shrinks the joblist before synchronizing .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .LP .SH EXTENDED DESCRIPTION The ARC user interface keeps a local list of active jobs in the users home directory (see \fBarcsub\fR(1)). If this file is lost, or the user wants to recreate the file on a different computer,the \fBarcsync\fR command can be used to recreate the file from the information available the specified cluster or index server. Since the information about a job retrieved from a cluster can be slightly out of date if the user very recently submitted or removed a job a warning is issued when this command is run. The \fB--force\fR option disables this warning. The .B --cluster options can be used to select or reject clusters that should be considered in the synchronization. This options can be repeated several times. See .BR arcsub (1) for a discussion of the format of arguments to this option. If the joblist is not empty when invoking synchronization the old jobs will be merged with the new jobs, unless the .B --truncate option is given, in which case the joblist will first be cleaned of old jobs and then the new jobs will be added. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This is a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcresume.cpp0000644000000000000000000000012413213442363023574 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.360712 30 ctime=1513200664.519802765 nordugrid-arc-5.4.2/src/clients/compute/arcresume.cpp0000644000175000002070000001006213213442363023640 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcresume)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcresume"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_RESUME, istring("[job ...]")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcresume", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcresume", types, logger); return 0; } for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.clusters.empty()) opt.all = true; if (jobidentifiers.empty() && opt.clusters.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.clusters.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.clusters); } std::list rejectManagementURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } delete jobstore; if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; return 1; } int retval = (int)!jobmaster.Resume(); std::cout << Arc::IString("Jobs processed: %d, resumed: %d", jobmaster.GetIDsProcessed().size()+jobmaster.GetIDsNotProcessed().size(), jobmaster.GetIDsProcessed().size()) << std::endl; return retval; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcstat.cpp0000644000000000000000000000012413213442363023247 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.330711 30 ctime=1513200664.520802777 nordugrid-arc-5.4.2/src/clients/compute/arcstat.cpp0000644000175000002070000001364213213442363023322 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcstat)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcstat"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_STAT, istring("[job ...]"), istring("The arcstat command is used for " "obtaining the status of jobs that have\n" "been submitted to Grid enabled resources.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcstat", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcstat", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if (!opt.sort.empty() && !opt.rsort.empty()) { logger.msg(Arc::ERROR, "The 'sort' and 'rsort' flags cannot be specified at the same time."); return 1; } if (!opt.rsort.empty()) { opt.sort = opt.rsort; } typedef bool (*JobSorting)(const Arc::Job&, const Arc::Job&); std::map orderings; orderings["jobid"] = &Arc::Job::CompareJobID; orderings["submissiontime"] = &Arc::Job::CompareSubmissionTime; orderings["jobname"] = &Arc::Job::CompareJobName; if (!opt.sort.empty() && orderings.find(opt.sort) == orderings.end()) { std::cerr << "Jobs cannot be sorted by \"" << opt.sort << "\", the following orderings are supported:" << std::endl; for (std::map::const_iterator it = orderings.begin(); it != orderings.end(); ++it) std::cerr << it->first << std::endl; return 1; } if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.clusters.empty()) opt.all = true; if (jobidentifiers.empty() && opt.clusters.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.clusters.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.clusters); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } delete jobstore; if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); unsigned int queried_num = jobmaster.GetAllJobs().size(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (!opt.show_unavailable) { jobmaster.SelectValid(); } jobs = jobmaster.GetSelectedJobs(); if (queried_num == 0) { std::cout << Arc::IString("No jobs found, try later") << std::endl; return 1; } std::vector jobsSortable(jobs.begin(), jobs.end()); if (!opt.sort.empty()) { opt.rsort.empty() ? std::sort(jobsSortable.begin(), jobsSortable.end(), orderings[opt.sort]) : std::sort(jobsSortable.rbegin(), jobsSortable.rend(), orderings[opt.sort]); } for (std::vector::const_iterator it = jobsSortable.begin(); it != jobsSortable.end(); ++it) { // Option 'long' (longlist) takes precedence over option 'print-jobids' (printids) if (opt.longlist || !opt.printids) { it->SaveToStream(std::cout, opt.longlist); } else { std::cout << it->JobID << std::endl; } } if (opt.show_unavailable) { jobmaster.SelectValid(); } unsigned int returned_info_num = jobmaster.GetSelectedJobs().size(); std::cout << Arc::IString("Status of %d jobs was queried, %d jobs returned information", queried_num, returned_info_num) << std::endl; return 0; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcget.cpp0000644000000000000000000000012413213442363023053 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.338712 30 ctime=1513200664.515802716 nordugrid-arc-5.4.2/src/clients/compute/arcget.cpp0000644000175000002070000001500113213442363023115 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcget)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcget"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_GET, istring("[job ...]"), istring("The arcget command is used for " "retrieving the results from a job.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcget", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcget", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.downloaddir.empty()) { if (!usercfg.JobDownloadDirectory().empty()) { opt.downloaddir = usercfg.JobDownloadDirectory(); logger.msg(Arc::INFO, "Job download directory from user configuration file: %s ", opt.downloaddir); } else { logger.msg(Arc::INFO, "Job download directory will be created in present working directory. "); } } else { logger.msg(Arc::INFO, "Job download directory: %s ", opt.downloaddir); } for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.clusters.empty()) opt.all = true; if (jobidentifiers.empty() && opt.clusters.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.clusters.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.clusters); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } if (!opt.all) { for (std::list::const_iterator itJIdentifier = jobidentifiers.begin(); itJIdentifier != jobidentifiers.end(); ++itJIdentifier) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIdentifier) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; delete jobstore; return 1; } if(!opt.downloaddir.empty()) { Arc::URL dirpath(opt.downloaddir); if(dirpath.Protocol() == "file") { if(!Arc::DirCreate(dirpath.Path(),S_IRWXU,true)) { std::string errstr = Arc::StrError(); logger.msg(Arc::ERROR, "Unable to create directory for storing results (%s) - %s", dirpath.Path(), errstr); return 1; } } } std::list downloaddirectories; int retval = (int)!jobmaster.Retrieve(opt.downloaddir, opt.usejobname, opt.forcedownload, downloaddirectories); for (std::list::const_iterator it = downloaddirectories.begin(); it != downloaddirectories.end(); ++it) { std::cout << Arc::IString("Results stored at: %s", *it) << std::endl; } unsigned int processed_num = jobmaster.GetIDsProcessed().size(); unsigned int retrieved_num = downloaddirectories.size(); unsigned int cleaned_num = 0; if (!opt.keep) { std::list retrieved = jobmaster.GetIDsProcessed(); // No need to clean selection because retrieved is subset of selected jobmaster.SelectByID(retrieved); if(!jobmaster.Clean()) { std::cout << Arc::IString("Warning: Some jobs were not removed from server") << std::endl; std::cout << Arc::IString(" Use arclean to remove retrieved jobs from job list", usercfg.JobListFile()) << std::endl; retval = 1; } cleaned_num = jobmaster.GetIDsProcessed().size(); if (!jobstore->Remove(jobmaster.GetIDsProcessed())) { std::cout << Arc::IString("Warning: Failed removing jobs from file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString(" Use arclean to remove retrieved jobs from job list", usercfg.JobListFile()) << std::endl; retval = 1; } std::cout << Arc::IString("Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d", processed_num, retrieved_num, cleaned_num) << std::endl; } else { std::cout << Arc::IString("Jobs processed: %d, successfully retrieved: %d", processed_num, retrieved_num) << std::endl; } delete jobstore; return retval; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcrenew.1.in0000644000000000000000000000012712441050167023401 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.220652341 30 ctime=1513200664.505802594 nordugrid-arc-5.4.2/src/clients/compute/arcrenew.1.in0000644000175000002070000001026712441050167023451 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCRENEW 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcrenew \- ARC Proxy Renewal .SH DESCRIPTION The .B arcrenew command is used for renewing the proxy of jobs that have been submitted to grid enabled resources. .SH SYNOPSIS .B arcrenew [options] [job ...] .SH OPTIONS .IP "\fB-a\fR, \fB--all\fR" all jobs .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-i\fR, \fB--jobids-from-file\fR=\fIfilename\fR" a file containing a list of jobIDs .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-r\fR, \fB--rejectmanagement\fR=\fIURL\fR" skip jobs which are on a computing element with a given URL .IP "\fB-s\fR, \fB--status\fR=\fIstatusstr\fR" only select jobs whose status is statusstr .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBjob\fR ..." list of jobids and/or jobnames .LP .SH EXTENDED DESCRIPTION The .B arcrenew command renews the proxy of a job submitted an ARC enabled resource. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname the proxies of all those jobs are renewed. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, the proxies of all active jobs will be renewed. The .B --cluster option can be used to select or reject jobs at specific clusters. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arstat (1) for possible state values. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arctest.cpp0000644000000000000000000000012412574532370023262 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200575.336712 30 ctime=1513200664.524802826 nordugrid-arc-5.4.2/src/clients/compute/arctest.cpp0000644000175000002070000003240312574532370023331 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcsub"); int test(const Arc::UserConfig& usercfg, Arc::ExecutionTargetSorter& ets, const Arc::JobDescription& testJob, const std::string& jobidfile); int dumpjobdescription(const Arc::UserConfig& usercfg, Arc::ExecutionTargetSorter& ets, const Arc::JobDescription& testJob); static bool get_hash_value(const Arc::Credential& c, std::string& hash_str); int RUNMAIN(arctest)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_TEST, istring(" "), istring("The arctest command is used for " "testing clusters as resources.")); std::list params = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arctest", VERSION) << std::endl; return 0; } if ((opt.testjobid == -1) && (!opt.show_credentials) && (!opt.show_plugins)) { std::cout << Arc::IString("Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n"); return 0; } if ((opt.testjobid == 1) && (!opt.runtime)) { std::cout << Arc::IString("For the 1st test job " "you also have to specify a runtime value with -r (--runtime) option."); return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.show_plugins) { std::list types; types.push_back("HED:SubmitterPlugin"); types.push_back("HED:ServiceEndpointRetrieverPlugin"); types.push_back("HED:TargetInformationRetrieverPlugin"); types.push_back("HED:JobDescriptionParserPlugin"); types.push_back("HED:BrokerPlugin"); showplugins("arctest", types, logger, usercfg.Broker().first); return 0; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.show_credentials) { const Arc::Time now; std::cout << Arc::IString("Certificate information:") << std::endl; std::string certificate_issuer = ""; if (usercfg.CertificatePath().empty()) { std::cout << " " << Arc::IString("No user-certificate found") << std::endl << std::endl; } else { Arc::Credential holder(usercfg.CertificatePath(), "", usercfg.CACertificatesDirectory(), ""); std::cout << " " << Arc::IString("Certificate: %s", usercfg.CertificatePath()) << std::endl; if (!holder.GetDN().empty()) { std::cout << " " << Arc::IString("Subject name: %s", holder.GetDN()) << std::endl; std::cout << " " << Arc::IString("Valid until: %s", (std::string) holder.GetEndTime() ) << std::endl << std::endl; certificate_issuer = holder.GetIssuerName(); } else { std::cout << " " << Arc::IString("Unable to determine certificate information") << std::endl << std::endl; } } std::cout << Arc::IString("Proxy certificate information:") << std::endl; if (usercfg.ProxyPath().empty()) { std::cout << " " << Arc::IString("No proxy found") << std::endl << std::endl; } else { Arc::Credential holder(usercfg.ProxyPath(), "", usercfg.CACertificatesDirectory(), ""); std::cout << " " << Arc::IString("Proxy: %s", usercfg.ProxyPath()) << std::endl; std::cout << " " << Arc::IString("Proxy-subject: %s", holder.GetDN()) << std::endl; if (holder.GetEndTime() < now) { std::cout << " " << Arc::IString("Valid for: Proxy expired") << std::endl << std::endl; } else if (!holder.GetVerification()) { std::cout << " " << Arc::IString("Valid for: Proxy not valid") << std::endl << std::endl; } else { std::cout << " " << Arc::IString("Valid for: %s", (holder.GetEndTime() - now).istr()) << std::endl << std::endl; } } if (!certificate_issuer.empty()) { std::cout << Arc::IString("Certificate issuer: %s", certificate_issuer) << std::endl << std::endl; } bool issuer_certificate_found = false; std::cout << Arc::IString("CA-certificates installed:") << std::endl; Glib::Dir cadir(usercfg.CACertificatesDirectory()); for (Glib::DirIterator it = cadir.begin(); it != cadir.end(); ++it) { std::string cafile = Glib::build_filename(usercfg.CACertificatesDirectory(), *it); // Assume certificates have file ending ".0", ".1" or ".2". Very OpenSSL specific. if (Glib::file_test(cafile, Glib::FILE_TEST_IS_REGULAR) && (*it)[(*it).size()-2] == '.' && ((*it)[(*it).size()-1] == '0' || (*it)[(*it).size()-1] == '1' || (*it)[(*it).size()-1] == '2')) { Arc::Credential cred(cafile, "", "", ""); std::string dn = cred.GetDN(); if (dn.empty()) continue; std::string hash; // Only accept certificates with correct hash. if (!get_hash_value(cred, hash) || hash != (*it).substr(0, (*it).size()-2)) continue; if (dn == certificate_issuer) issuer_certificate_found = true; std::cout << " " << dn << std::endl; } } if (certificate_issuer.empty()) { std::cout << std::endl << Arc::IString("Unable to detect if issuer certificate is installed.") << std::endl; } else if (!issuer_certificate_found) { logger.msg(Arc::WARNING, "Your issuer's certificate is not installed"); } return EXIT_SUCCESS; } if (!checkproxy(usercfg)) { return 1; } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if (!opt.broker.empty()) usercfg.Broker(opt.broker); Arc::JobDescription testJob; if (!Arc::JobDescription::GetTestJob(opt.testjobid, testJob)) { std::cout << Arc::IString("No test-job, with ID \"%d\"", opt.testjobid) << std::endl; return 1; } // Set user input variables into job description if (opt.testjobid == 1) { testJob.Application.Executable.Argument.back() = Arc::tostring(opt.runtime); testJob.Resources.TotalCPUTime = (opt.runtime+3)*60; for ( std::map::iterator iter = testJob.OtherAttributes.begin(); iter != testJob.OtherAttributes.end(); ++iter ) { char buffer [iter->second.length()+255]; sprintf(buffer, iter->second.c_str(), opt.runtime, opt.runtime+3); iter->second = (std::string) buffer; } } Arc::Broker broker(usercfg, testJob, usercfg.Broker().first); if (!broker.isValid()) { logger.msg(Arc::ERROR, "Unable to load broker %s", usercfg.Broker().first); return 1; } logger.msg(Arc::INFO, "Broker %s loaded", usercfg.Broker().first); std::list services = getServicesFromUserConfigAndCommandLine(usercfg, opt.indexurls, opt.clusters, opt.requestedSubmissionInterfaceName, opt.infointerface); std::set preferredInterfaceNames; if (usercfg.InfoInterface().empty()) { preferredInterfaceNames.insert("org.nordugrid.ldapglue2"); } else { preferredInterfaceNames.insert(usercfg.InfoInterface()); } Arc::ExecutionTargetSorter ets(broker); std::list rejectDiscoveryURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); Arc::ComputingServiceRetriever csr(usercfg, std::list(), rejectDiscoveryURLs, preferredInterfaceNames); csr.addConsumer(ets); for (std::list::const_iterator it = services.begin(); it != services.end(); ++it) { csr.addEndpoint(*it); } csr.wait(); if (csr.empty()) { if (!opt.dumpdescription) { std::cout << Arc::IString("Test aborted because no resource returned any information") << std::endl; } else { std::cout << Arc::IString("Unable to adapt job description to any resource, no resource information could be obtained.") << std::endl; std::cout << Arc::IString("Original job description is listed below:") << std::endl; std::string descOutput; testJob.UnParse(descOutput, testJob.GetSourceLanguage()); std::cout << descOutput << std::endl; } return 1; } if (ets.getMatchingTargets().empty()) { if (!opt.dumpdescription) { std::cout << Arc::IString("ERROR: Test aborted because no suitable resources were found for the test-job") << std::endl; } else { std::cout << Arc::IString("ERROR: Dumping job description aborted because no suitable resources were found for the test-job") << std::endl; } return 1; } if (opt.dumpdescription) { return dumpjobdescription(usercfg, ets, testJob); } std::cout << Arc::IString("Submitting test-job %d:", opt.testjobid) << std::endl; std::string testJobXRSL; testJob.UnParse(testJobXRSL, "nordugrid:xrsl"); std::cout << testJobXRSL << std::endl; std::cout << Arc::IString("Client version: nordugrid-arc-%s", VERSION) << std::endl; return test(usercfg, ets, testJob, opt.jobidoutfile); } void printjobid(const std::string& jobid, const std::string& jobidfile) { if (!jobidfile.empty()) if (!Arc::Job::WriteJobIDToFile(jobid, jobidfile)) logger.msg(Arc::WARNING, "Cannot write jobid (%s) to file (%s)", jobid, jobidfile); std::cout << Arc::IString("Test submitted with jobid: %s", jobid) << std::endl; } int test(const Arc::UserConfig& usercfg, Arc::ExecutionTargetSorter& ets, const Arc::JobDescription& testJob, const std::string& jobidfile) { int retval = 0; std::list jobids; std::list submittedJobs; std::map notsubmitted; submittedJobs.push_back(Arc::Job()); for (ets.reset(); !ets.endOfList(); ets.next()) { if (ets->Submit(usercfg, testJob, submittedJobs.back())) { printjobid(submittedJobs.back().JobID, jobidfile); std::cout << Arc::IString("Computing service: %s", ets->ComputingService->Name) << std::endl; break; } } if (ets.endOfList()) { std::cout << Arc::IString("Test failed, no more possible targets") << std::endl; submittedJobs.pop_back(); retval = 1; } Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore == NULL) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); return 1; } if (!jobstore->Write(submittedJobs)) { std::cout << Arc::IString("Warning: Failed to write job information to file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString("To recover missing jobs, run arcsync") << std::endl; } delete jobstore; return retval; } int dumpjobdescription(const Arc::UserConfig& usercfg, Arc::ExecutionTargetSorter& ets, const Arc::JobDescription& testJob) { for (ets.reset(); !ets.endOfList(); ets.next()) { Arc::JobDescription preparedTestJob(testJob); std::string jobdesc; // Prepare the test jobdescription according to the chosen ExecutionTarget if (!preparedTestJob.Prepare(*ets)) { logger.msg(Arc::INFO, "Unable to prepare job description according to needs of the target resource (%s).", ets->ComputingEndpoint->URLString); continue; } std::string jobdesclang = "nordugrid:jsdl"; if (ets->ComputingEndpoint->InterfaceName == "org.nordugrid.gridftpjob") { jobdesclang = "nordugrid:xrsl"; } else if (ets->ComputingEndpoint->InterfaceName == "org.glite.ce.cream") { jobdesclang = "egee:jdl"; } if (!preparedTestJob.UnParse(jobdesc, jobdesclang)) { logger.msg(Arc::INFO, "An error occurred during the generation of job description to be sent to %s", ets->ComputingEndpoint->URLString); continue; } std::cout << Arc::IString("Job description to be sent to %s:", ets->AdminDomain->Name) << std::endl; std::cout << jobdesc << std::endl; break; } return (!ets.endOfList()); } static bool get_hash_value(const Arc::Credential& c, std::string& hash_str) { X509* cert = c.GetCert(); if(!cert) return false; X509_NAME* cert_name = X509_get_subject_name(cert); if(!cert_name) return false; char hash[32]; memset(hash, 0, 32); snprintf(hash, 32, "%08lx", X509_NAME_hash(cert_name)); hash_str = hash; X509_free(cert); return true; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arckill.cpp0000644000000000000000000000012313213442363023226 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.355712 29 ctime=1513200664.51780274 nordugrid-arc-5.4.2/src/clients/compute/arckill.cpp0000644000175000002070000001245113213442363023277 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arckill)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arckill"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_KILL, istring("[job ...]"), istring("The arckill command is used to kill " "running jobs.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arckill", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arckill", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.clusters.empty()) opt.all = true; if (jobidentifiers.empty() && opt.clusters.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.clusters.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.clusters); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; delete jobstore; return 1; } int retval = (int)!jobmaster.Cancel(); unsigned int selected_num = jobmaster.GetSelectedJobs().size(); unsigned int canceled_num = jobmaster.GetIDsProcessed().size(); unsigned int cleaned_num = 0; if (!opt.keep) { std::list canceled = jobmaster.GetIDsProcessed(); // No need to clean selection because retrieved is subset of selected jobmaster.SelectByID(canceled); if(!jobmaster.Clean()) { std::cout << Arc::IString("Warning: Some jobs were not removed from server") << std::endl; std::cout << Arc::IString(" Use arcclean to remove retrieved jobs from job list", usercfg.JobListFile()) << std::endl; retval = 1; } cleaned_num = jobmaster.GetIDsProcessed().size(); if (!jobstore->Remove(jobmaster.GetIDsProcessed())) { std::cout << Arc::IString("Warning: Failed removing jobs from file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString(" Run 'arcclean -s Undefined' to remove killed jobs from job list") << std::endl; retval = 1; } std::cout << Arc::IString("Jobs processed: %d, successfully killed: %d, successfully cleaned: %d", selected_num, canceled_num, cleaned_num) << std::endl; } else { std::cout << Arc::IString("Jobs processed: %d, successfully killed: %d", selected_num, canceled_num) << std::endl; } delete jobstore; return retval; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcsub.1.in0000644000000000000000000000012612574532370023061 xustar000000000000000027 mtime=1441969400.372727 30 atime=1513200652.108650971 29 ctime=1513200664.50880263 nordugrid-arc-5.4.2/src/clients/compute/arcsub.1.in0000644000175000002070000003403312574532370023127 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCSUB 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcsub \- ARC Submission .SH DESCRIPTION The .B arcsub command is used for submitting jobs to Grid enabled computing resources. .SH SYNOPSIS .B arcsub [options] [filename ...] .SH OPTIONS .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-g\fR, \fB--index\fR=\fIname\fR" select one or more registries: \fIname\fR can be an alias for a single registry, a group of registries or a URL .IP "\fB-R\fR, \fB--rejectdiscovery\fR=\fIURL\fR" skip the service with the given URL during service discovery .IP "\fB-S\fR, \fB--submissioninterface\fR=\fIInterfaceName\fR" only use this interface for submitting (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes) .IP "\fB-I\fR, \fB--infointerface\fR=\fIInterfaceName\fR" the computing element specified by URL at the command line should be queried using this information interface (possible options: org.nordugrid.ldapng, org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies.resourceinfo) .IP "\fB-e\fR, \fB--jobdescrstring=\fIString" jobdescription string describing the job to be submitted .IP "\fB-f\fR, \fB--jobdescrfile=\fIfilename" jobdescription file describing the job to be submitted .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-o\fR, \fB--jobids-to-file\fR=\fIfilename\fR" the IDs of the submitted jobs will be appended to this file .IP "\fB-D\fR, \fB--dryrun\fR" submit jobs as dry run (no submission to batch system) .IP "\fB --direct\fR" submit directly - no resource discovery or matchmaking .IP "\fB-x\fR, \fB--dumpdescription\fR" do not submit - dump job description in the language accepted by the target .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-b\fR, \fB--broker\fR=\fIbroker\fR" selected broker: Random (default), FastestQueue or custom. Use -P to find possible options. .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBfilename\fR ..." job description files describing the jobs to be submitted .LP .SH EXTENDED DESCRIPTION \fBarcsub\fR is the key command when submitting jobs to Grid enabled computing resources with the ARC client. As default \fBarcsub\fR is able to submit jobs to A-REX, CREAM and EMI ES enabled computing elements (CEs), and as always for successful submission you need to be authenticated at the targeted computing services. Since \fBarcsub\fR is build on a modular library, modules can be installed which enables submission to other targets, e.g. the classic ARC CE Grid-Manager. Job submission can be accomplished by specifying a job description file to submit as an argument. \fBarcsub\fR will then by default perform resource discovery on the Grid and then the discovered resources will be matched to the job description and ranked according to the chosen broker (\fB--broker\fR option). If no Grid environment has been configured, please contact your system administrator, or setup one yourself in the client configuration file (see files section). Another option is to explicitly specify a registry service (or multiple) to \fBarcsub\fR using the \fB--index\fR option, which accepts an URL, alias or group. Alternatively a specific CE (or multiple) can be targeted by using the \fB--cluster\fR option. If such a scenario is the most common, it is worthwhile to specify those CEs in the client configuration as default services, which makes it superfluous to specify them as argument. In the same manner aliases and groups, defined in the configuration file, can be utilized, and can be used as argument to the \fB--cluster\fR or \fB--index\fR options. In all of the above scenarios \fBarcsub\fR obtains resource information from the services which is then used for matchmaking against the job description, however that step can be avoided by specifying the \fB--direct\fR option, in which case the job description is submitted directly to first specified endpoint. The format of a classic GRIDFTP-based cluster URLs: .br \fB[ldap://][:2135/nordugrid-cluster-name=,Mds-Vo-name=local,o=grid]\fR .br Only the \fBhostname\fR part has to be specified, the rest of the URL is automatically generated. The format of an A-REX URL is: .br \fB[https://][:][/]\fR .br Here the port is 443 by default, but the path cannot be guessed, so if it is not specified, then the service is assumed to live on the root path. Job descriptions can also be specified using the \fB--jobdescrfile\fR option which expect the file name of the description as argument, or the \fB--jobdescrstring\fR option which expect as argument the job description as a string, and both options can be specified multiple times and one does not exclude the other. The default supported job description languages are xRSL, JSDL and JDL. If the job description is successfully submitted a job-ID is returned and printed. This job-ID uniquely identifies the job while it is being executed. On the other hand it is also possible that no CEs matches the constraints defined in the description in which case no submission will be done. Upon successful submission, the job-ID along with more technical job information is stored in the job-list file (described below). The stored information enables the job management commands of the ARC client to manage jobs easily, and thus the job-ID need not to be saved manually. By default the job-list file is stored in the .arc directory in the home directory of the user, however another location can be specified using the \fB--joblist\fR option taking the location of this file as argument. If the \fB--joblist\fR option was used during submission, it should also be specified in the consecutive commands when managing the job. If a Computing Element has multiple job submission interfaces (e.g. gridftp, EMI-ES, BES), then the brokering algorithm will choose one of them. With the \fB--submissioninterface\fR option the requested interface can be specified, and in that case only those Computing Elements will be considered which has that specific interface, and only that interface will be used to submit the jobs. As mentioned above registry or index services can be specified with the \fB--index\fR option. Specifying one or multiple index servers instructs the \fBarcsub\fR command to query the servers for registered CEs, the returned CEs will then be matched against the job description and those matching will be ranked by the chosen broker (see below) and submission will be tried in order until successful or reaching the end. From the returned list of CEs it might happen that a troublesome or undesirable CE is selected for submission, in that case it possible to reject that cluster using the \fB--rejectdiscovery\fR option and providing the URL (or just the hostname) of the CE, which will disregard that CE as a target for submission. When multiple CEs are targeted for submission, the resource broker will be used to filter out CEs which do not match the job description requirements and then rank the remaining CEs. The broker used by default will rank the CEs randomly, however a different broker can be chosen by using the \fB--broker\fR option, which takes the name of the broker as argument. The broker type can also be specified in client.conf. The brokers available can be seen using .B arcsub -P. By default the following brokers are available: .IP "Random (default)" Chooses a random CE matching the job requirements. .IP "FastestQueue" Ranks matching CEs according to the length of the job queue at the CEs, ranking those with shortest queue first/highest. .IP "Benchmark" Ranks matching CEs according to a specified benchmark, which should be specified by appending the broker name with ':' and then the name of the benchmark. If no option is given to the Benchmark broker then CEs will be ranked according to the 'specint2000' benchmark. .IP "Data" Ranks matching CEs according to the amount of input data cached by each CE, by querying the CE. Only CEs with the A-REX BES interface support this operation. .IP "Null" Choose a random CE with no filtering at all of CEs. .IP "PythonBroker" User-defined custom brokers can be created in Python. See the example broker SampleBroker.py or ACIXBroker.py (like Data broker but uses the ARC Cache Index) that come installed with ARC for more details of how to write your own broker. A PythonBroker is specified by \fB--broker PythonBroker:Filename.Class:args\fR, where Filename is the file containing the class Class which implements the broker interface. The directory containing this file must be in the PYTHONPATH. args is optional and allows specifying arguments to the broker. .P Before submission, \fBarcsub\fR performs an intelligent modification of the job description (adding or modifying attributes, even converting the description language to fit the needs of the CE) ensuring that it is valid. The modified job description can be printed by specifying the \fB--dumpdescription\fR option. The format, i.e. job description language, of the printed job description cannot be specified, and will be that which will be sent to and accepted by the chosen target. Further information from \fBarcsub\fR can be obtained by increasing the verbosity, which is done with the \fB--debug\fR option where the default verbosity level is WARNING. Setting the level to DEBUG will show all messages, while setting it to FATAL will only show fatal log messages. To \fBvalidate\fR your job description without actually submitting a job, use the \fB--dryrun\fR option: it will capture possible syntax or other errors, but will instruct the site not to submit the job for execution. Only the grid-manager (ARC0) and A-REX (ARC1) CEs support this feature. .SH EXAMPLES Submission of a job description file "helloworld.jsdl" to the Grid .br \fBarcsub helloworld.jsdl\fR A information index server (registry) can also be queried for CEs to submit to: .br \fBarcsub -g registry.example.com helloworld.jsdl\fR Submission of a job description file "helloworld.jsdl" to ce.example.com: .br \fBarcsub -c ce.example.com helloworld.jsdl\fR Direct submission to a CE is done as: .br \fBarcsub --direct -c cd.example.com helloworld.jsdl\fR The job description can also be specified directly on the command line as shown in the example, using the XRSL job description language: .br \fBarcsub -c example.com/arex -e \\ .br \'&(executable="/bin/echo")(arguments="Hello World!")\'\fR When submitting against CEs retrieved from information index servers it might be useful to do resource brokering: .br \fBarcsub -g registry.example.com -b FastestQueue helloworld.jsdl\fR If the job has a large input data set, it can be useful to send it to a CE where those files are already cached. The ACIX broker can be used for this: .br \fBarcsub -g registry.example.com -b PythonBroker:ACIXBroker.ACIXBroker:https://cacheindex.ndgf.org:6443/data/index helloworld.jsdl\fR Disregarding a specific CE for submission submitting against an information index server: .br \fBarcsub -g registry.example.com -R badcomputingelement.com/arex helloworld.jsdl\fR Dumping the job description is done as follows: .br \fBarcsub -c example.com/arex -x helloworld.jsdl\fR .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. Registry and computing element services can be specified in separate sections of the config. The default services can be specified by adding 'default=yes' attribute to the section of the service, thus when no \fB--cluster\fR or \fB--index\fR options are given these will be used for submission. Each service has an alias, and can be member of any number of groups. Then specifying the alias or the name of the group with the \fB--cluster\fR or \fB--index\fR options will select the given services. By using the \fB--conffile\fR option a different configuration file can be used than the default. Note that some installations also have a system client configuration file, however attributes in the client one takes precedence, and then command line options takes precedence over configuration file attributes. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the \fB--all\fR option to the various NorduGrid ARC user interface commands. By using the \fB--joblist\fR option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcsync.cpp0000644000000000000000000000012413213442363023250 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.330711 30 ctime=1513200664.523802814 nordugrid-arc-5.4.2/src/clients/compute/arcsync.cpp0000644000175000002070000001660713213442363023327 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" class JobSynchronizer : public Arc::EntityConsumer { public: JobSynchronizer( const Arc::UserConfig& uc, const std::list& services, const std::list& rejectedServices = std::list(), const std::set& preferredInterfaceNames = std::set(), const std::list& capabilityFilter = std::list(1, Arc::Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO)) ) : uc(uc), ser(uc, Arc::EndpointQueryOptions(true, capabilityFilter, rejectedServices)), jlr(uc, Arc::EndpointQueryOptions(preferredInterfaceNames)) { jlr.needAllResults(); ser.addConsumer(*this); jlr.addConsumer(jobs); for (std::list::const_iterator it = services.begin(); it != services.end(); ++it) { if (it->HasCapability(Arc::Endpoint::REGISTRY)) { ser.addEndpoint(*it); } else { jlr.addEndpoint(*it); } } } void wait() { ser.wait(); jlr.wait(); } void addEntity(const Arc::Endpoint& service) { if (service.HasCapability(Arc::Endpoint::COMPUTINGINFO)) { jlr.addEndpoint(service); } } bool writeJobs(bool truncate) { bool jobsWritten = false; bool jobsReported = false; Arc::JobInformationStorage *jobstore = createJobInformationStorage(uc); if (jobstore == NULL) { std::cerr << Arc::IString("Warning: Unable to open job list file (%s), unknown format", uc.JobListFile()) << std::endl; return false; } // Write extracted job info to joblist if (truncate) { jobstore->Clean(); if ( (jobsWritten = jobstore->Write(jobs)) ) { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if (!jobsReported) { std::cout << Arc::IString("Found the following jobs:")<Name.empty()) { std::cout << it->Name << " (" << it->JobID << ")" << std::endl; } else { std::cout << it->JobID << std::endl; } } std::cout << Arc::IString("Total number of jobs found: ") << jobs.size() << std::endl; } } else { std::list newJobs; std::set prunedServices; jlr.getServicesWithStatus(Arc::EndpointQueryingStatus::SUCCESSFUL, prunedServices); if ( (jobsWritten = jobstore->Write(jobs, prunedServices, newJobs)) ) { for (std::list::const_iterator it = newJobs.begin(); it != newJobs.end(); ++it) { if (!jobsReported) { std::cout << Arc::IString("Found the following new jobs:")<Name.empty()) { std::cout << (*it)->Name << " (" << (*it)->JobID << ")" << std::endl; } else { std::cout << (*it)->JobID << std::endl; } } std::cout << Arc::IString("Total number of new jobs found: ") << newJobs.size() << std::endl; } } delete jobstore; if (!jobsWritten) { std::cout << Arc::IString("ERROR: Failed to write job information to file (%s)", uc.JobListFile()) << std::endl; return false; } return true; } private: const Arc::UserConfig& uc; Arc::ServiceEndpointRetriever ser; Arc::JobListRetriever jlr; Arc::EntityContainer jobs; }; int RUNMAIN(arcsync)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcsync"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_SYNC, " ", istring("The arcsync command synchronizes your " "local job list with the information at\n" "the given resources or index servers.")); std::list params = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcsync", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobListRetrieverPlugin"); showplugins("arcsync", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); //sanity check if (!opt.forcesync) { std::cout << Arc::IString("Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." ) << std::endl; std::cout << Arc::IString("Are you sure you want to synchronize your local job list?") << " [" << Arc::IString("y") << "/" << Arc::IString("n") << "] "; std::string response; std::cin >> response; if (Arc::lower(response) != std::string(Arc::FindTrans("y"))) { std::cout << Arc::IString("Cancelling synchronization request") << std::endl; return 0; } } std::list endpoints = getServicesFromUserConfigAndCommandLine(usercfg, opt.indexurls, opt.clusters); std::list rejectDiscoveryURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); if (endpoints.empty()) { logger.msg(Arc::ERROR, "No services specified. Please configure default services in the client configuration," "or specify a cluster or index (-c or -g options, see arcsync -h)."); return 1; } std::set preferredInterfaceNames; if (usercfg.InfoInterface().empty()) { preferredInterfaceNames.insert("org.nordugrid.ldapglue2"); } else { preferredInterfaceNames.insert(usercfg.InfoInterface()); } JobSynchronizer js(usercfg, endpoints, rejectDiscoveryURLs, preferredInterfaceNames); js.wait(); return js.writeJobs(opt.truncate)?0:1; // true -> 0, false -> 1. } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcrenew.cpp0000644000000000000000000000012413213442363023414 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.338712 30 ctime=1513200664.518802752 nordugrid-arc-5.4.2/src/clients/compute/arcrenew.cpp0000644000175000002070000001005613213442363023463 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcrenew)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcrenew"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_RENEW, istring("[job ...]")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcrenew", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcrenew", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.clusters.empty()) opt.all = true; if (jobidentifiers.empty() && opt.clusters.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.clusters.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.clusters); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } delete jobstore; if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; return 1; } int retval = (int)!jobmaster.Renew(); std::cout << Arc::IString("Jobs processed: %d, renewed: %d", jobmaster.GetIDsProcessed().size()+jobmaster.GetIDsNotProcessed().size(), jobmaster.GetIDsProcessed().size()) << std::endl; return retval; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcresub.1.in0000644000000000000000000000012712441050167023401 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.186651926 30 ctime=1513200664.506802606 nordugrid-arc-5.4.2/src/clients/compute/arcresub.1.in0000644000175000002070000001604712441050167023453 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCRESUB 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcresub \- ARC Resubmission .SH DESCRIPTION The .B arcresub command is used for resubmitting jobs to grid enabled computing resources. .SH SYNOPSIS .B arcresub [options] [jobid ...] .SH OPTIONS .IP "\fB-a\fR, \fB--all\fR" all jobs .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-i\fR, \fB--jobids-from-file\fR=\fIfilename\fR" a file containing a list of job IDs .IP "\fB-o\fR, \fB--jobids-to-file\fR=\fIfilename\fR" the IDs of the submitted jobs will be appended to this file .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-q\fR, \fB--qluster\fR=\fIname\fR" select one or more computing elements for the new jobs: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-g\fR, \fB--index\fR=\fIname\fR" select one or more registries: \fIname\fR can be an alias for a single registry, a group of registries or a URL .IP "\fB-r\fR, \fB--rejectmanagement\fR=\fIURL\fR" skip jobs which are on a computing element with a given URL .IP "\fB-R\fR, \fB--rejectdiscovery\fR=\fIURL\fR" skip the service with the given URL during service discovery .IP "\fB-S\fR, \fB--submissioninterface\fR=\fIInterfaceName\fR" only use this interface for submitting (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes) .IP "\fB-I\fR, \fB--infointerface\fR=\fIInterfaceName\fR" the computing element specified by URL at the command line should be queried using this information interface (possible options: org.nordugrid.ldapng, org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies.resourceinfo) .IP "\fB-k\fR, \fB--keep\fR" keep the files on the server (do not clean) .IP "\fB-m\fR, \fB--same\fR" resubmit to the same cluster .IP "\fB-M\fR, \fB--not-same\fR" do not resubmit to the same cluster .IP "\fB-s\fR, \fB--status\fR=\fIstatusstr\fR" only select jobs whose status is statusstr .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-b\fR, \fB--broker\fR=\fIbroker\fR" selected broker: Random (default), FastestQueue or custom .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBjobid\fR ..." job ID or job name of jobs to be resubmitted .LP .SH EXTENDED DESCRIPTION The .B arcresub command is used for resubmitting jobs on grid enabled computing resources. The job can be referred to either by the job ID that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one job ID and/or jobname can be given. If several jobs were submitted with the same jobname all those jobs will be resubmitted. If the job description of a job to be resubmitted, contained any local input files, checksums of these was calculated and stored in the job list, and those will be used to check whether the files has changed. If local input files has changed the job will not be resubmitted. In case the job description is not found in the job list, it will be retrieved from the cluster holding the job. This description however might differ from the one originally used to submit the job, since both the submission client and the cluster can have made modifications to the job description. Upon resubmision the job will receive a new job ID, and the old job ID will be stored in the local job list file, enabling future back tracing of the resubmitted job. The name of the local job list file can used specified by the .B --joblist option. By specifying the .B --all option, all active jobs appearing the in job list file will be resubmitted. The .B --cluster option can be used to select or reject jobs at specific clusters to be resubmitted. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --qluster option can be used to force the jobs to be resubmitted to a particular cluster, or to reject resubmission to a particular cluster. Again see .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arstat (1) for possible state values. Which servers to query can be specified by giving the .B --index option to the command. See .BR arcsub (1) for a discussion of the format of arguments to this option. The default behaviour of .BR arcresub is to resubmit to any cluster. This behaviour can be changed by specifying the \fB--same\fR or \fB--not-same\fR options. The former will resubmit a job to the same cluster as the job resides, and thus the \fB--qluster\fR and \fB--index\fR options is ignored. The latter will resubmit a job to any cluster except to the cluster it resides. Note the \fB--same\fR and \fB--not-same\fR options cannot be specified together. If the old job was successfully killed the job will be removed from the remote cluster unless the \fB--keep\fR option was specified. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/utils.cpp0000644000000000000000000000012413213442363022746 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.328711 30 ctime=1513200664.512802679 nordugrid-arc-5.4.2/src/clients/compute/utils.cpp0000644000175000002070000004571513213442363023027 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "utils.h" #ifndef WIN32 #include #include ConsoleRecovery::ConsoleRecovery(void) { ti = new termios; if (tcgetattr(STDIN_FILENO, ti) == 0) return; delete ti; ti = NULL; } ConsoleRecovery::~ConsoleRecovery(void) { if(ti) tcsetattr(STDIN_FILENO, TCSANOW, ti); delete ti; } #else ConsoleRecovery::ConsoleRecovery(void) { ti=NULL; } ConsoleRecovery::~ConsoleRecovery(void) { } #endif std::list getSelectedURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list computingelements) { std::list endpoints = getServicesFromUserConfigAndCommandLine(usercfg, std::list(), computingelements); std::list serviceURLs; for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { serviceURLs.push_back(it->URLString); } return serviceURLs; } std::list getRejectDiscoveryURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list rejectdiscovery) { std::list rejectDiscoveryURLs = usercfg.RejectDiscoveryURLs(); rejectDiscoveryURLs.insert(rejectDiscoveryURLs.end(), rejectdiscovery.begin(), rejectdiscovery.end()); return rejectDiscoveryURLs; } std::list getRejectManagementURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list rejectmanagement) { std::list rejectManagementURLs = usercfg.RejectManagementURLs(); rejectManagementURLs.insert(rejectManagementURLs.end(), rejectmanagement.begin(), rejectmanagement.end()); return rejectManagementURLs; } std::list getServicesFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list registries, std::list computingelements, std::string requestedSubmissionInterfaceName, std::string infointerface) { std::list services; if (computingelements.empty() && registries.empty()) { std::list endpoints = usercfg.GetDefaultServices(); for (std::list::const_iterator its = endpoints.begin(); its != endpoints.end(); ++its) { services.push_back(*its); } } else { for (std::list::const_iterator it = computingelements.begin(); it != computingelements.end(); ++it) { // check if the string is a group or alias std::list newServices = usercfg.GetServices(*it, Arc::ConfigEndpoint::COMPUTINGINFO); if (newServices.empty()) { // if it was not an alias or a group, then it should be the URL Arc::Endpoint service(*it); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO)); if (!infointerface.empty()) { service.InterfaceName = infointerface; } service.RequestedSubmissionInterfaceName = requestedSubmissionInterfaceName; services.push_back(service); } else { // if it was a group (or an alias), add all the services for (std::list::iterator its = newServices.begin(); its != newServices.end(); ++its) { if (!requestedSubmissionInterfaceName.empty()) { // if there was a submission interface requested, this overrides the one from the config its->RequestedSubmissionInterfaceName = requestedSubmissionInterfaceName; } services.push_back(*its); } } } for (std::list::const_iterator it = registries.begin(); it != registries.end(); ++it) { // check if the string is a name of a group std::list newServices = usercfg.GetServices(*it, Arc::ConfigEndpoint::REGISTRY); if (newServices.empty()) { // if it was not an alias or a group, then it should be the URL Arc::Endpoint service(*it); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::REGISTRY)); services.push_back(service); } else { // if it was a group (or an alias), add all the services services.insert(services.end(), newServices.begin(), newServices.end()); } } } return services; } void showplugins(const std::string& program, const std::list& types, Arc::Logger& logger, const std::string& chosenBroker) { for (std::list::const_iterator itType = types.begin(); itType != types.end(); ++itType) { if (*itType == "HED:SubmitterPlugin") { std::cout << Arc::IString("Types of execution services %s is able to submit jobs to:", program) << std::endl; } else if (*itType == "HED:ServiceEndpointRetrieverPlugin") { std::cout << Arc::IString("Types of registry services which %s is able collect information from:", program) << std::endl; } else if (*itType == "HED:TargetInformationRetrieverPlugin") { std::cout << Arc::IString("Types of local information services which %s is able collect information from:", program) << std::endl; } else if (*itType == "HED:JobListRetriever") { std::cout << Arc::IString("Types of local information services which %s is able collect job information from:", program) << std::endl; } else if (*itType == "HED:JobControllerPlugin") { std::cout << Arc::IString("Types of services %s is able to manage jobs at:", program) << std::endl; } else if (*itType == "HED:JobDescriptionParserPlugin") { std::cout << Arc::IString("Job description languages supported by %s:", program) << std::endl; } else if (*itType == "HED:BrokerPlugin") { std::cout << Arc::IString("Brokers available to %s:", program) << std::endl; } std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); bool isDefaultBrokerLocated = false; pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind(*itType, modules); for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name; if (*itType == "HED:BrokerPlugin" && itPlug->name == chosenBroker) { std::cout << " (default)"; isDefaultBrokerLocated = true; } std::cout << " - " << itPlug->description << std::endl; } } if (*itType == "HED:BrokerPlugin" && !isDefaultBrokerLocated) { logger.msg(Arc::WARNING, "Default broker (%s) is not available. When using %s a broker should be specified explicitly (-b option).", chosenBroker, program); } } } bool checkproxy(const Arc::UserConfig& uc) { if (!uc.ProxyPath().empty() ) { Arc::Credential holder(uc.ProxyPath(), "", "", ""); if (holder.GetEndTime() < Arc::Time()){ std::cout << Arc::IString("Proxy expired. Job submission aborted. Please run 'arcproxy'!") << std::endl; return false; } } else { std::cout << Arc::IString("Cannot find any proxy. This application currently cannot run without a proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!") << std::endl; return false; } return true; } void splitendpoints(std::list& selected, std::list& rejected) { // Removes slashes from end of endpoint strings, and put strings with leading '-' into rejected list. for (std::list::iterator it = selected.begin(); it != selected.end();) { if ((*it)[it->length()-1] == '/') { it->erase(it->length()-1); continue; } if (it->empty()) { it = selected.erase(it); continue; } if ((*it)[0] == '-') { rejected.push_back(it->substr(1)); it = selected.erase(it); } else { ++it; } } } Arc::JobInformationStorage* createJobInformationStorage(const Arc::UserConfig& uc) { Arc::JobInformationStorage* jis = NULL; if (Glib::file_test(uc.JobListFile(), Glib::FILE_TEST_EXISTS)) { for (int i = 0; Arc::JobInformationStorage::AVAILABLE_TYPES[i].name != NULL; ++i) { jis = (Arc::JobInformationStorage::AVAILABLE_TYPES[i].instance)(uc.JobListFile()); if (jis && jis->IsValid()) { return jis; } delete jis; } return NULL; } for (int i = 0; Arc::JobInformationStorage::AVAILABLE_TYPES[i].name != NULL; ++i) { if (uc.JobListType() == Arc::JobInformationStorage::AVAILABLE_TYPES[i].name) { jis = (Arc::JobInformationStorage::AVAILABLE_TYPES[i].instance)(uc.JobListFile()); if (jis && jis->IsValid()) { return jis; } delete jis; return NULL; } } if (Arc::JobInformationStorage::AVAILABLE_TYPES[0].instance != NULL) { jis = (Arc::JobInformationStorage::AVAILABLE_TYPES[0].instance)(uc.JobListFile()); if (jis && jis->IsValid()) { return jis; } delete jis; } return NULL; } ClientOptions::ClientOptions(Client_t c, const std::string& arguments, const std::string& summary, const std::string& description) : Arc::OptionParser(arguments, summary, description), dryrun(false), dumpdescription(false), show_credentials(false), show_plugins(false), showversion(false), all(false), forcemigration(false), keep(false), forcesync(false), truncate(false), longlist(false), printids(false), same(false), notsame(false), show_stdout(true), show_stderr(false), show_joblog(false), usejobname(false), forcedownload(false), list_configured_services(false), direct_submission(false), show_unavailable(false), testjobid(-1), runtime(5), timeout(-1) { bool cIsJobMan = (c == CO_CAT || c == CO_CLEAN || c == CO_GET || c == CO_KILL || c == CO_RENEW || c == CO_RESUME || c == CO_STAT || c == CO_ACL); AddOption('c', "cluster", istring("select one or more computing elements: " "name can be an alias for a single CE, a group of CEs or a URL"), istring("name"), clusters); if (!cIsJobMan && c != CO_SYNC) { AddOption('I', "infointerface", istring("the computing element specified by URL at the command line " "should be queried using this information interface " "(possible options: org.nordugrid.ldapng, org.nordugrid.ldapglue2, " "org.nordugrid.wsrfglue2, org.ogf.glue.emies.resourceinfo)"), istring("interfacename"), infointerface); } if (c == CO_RESUB || c == CO_MIGRATE) { AddOption('q', "qluster", istring("selecting a computing element for the new jobs with a URL or an alias, " "or selecting a group of computing elements with the name of the group"), istring("name"), qlusters); } if (c == CO_MIGRATE) { AddOption('f', "force", istring("force migration, ignore kill failure"), forcemigration); } if (c == CO_GET || c == CO_KILL || c == CO_MIGRATE || c == CO_RESUB) { AddOption('k', "keep", istring("keep the files on the server (do not clean)"), keep); } if (c == CO_SYNC) { AddOption('f', "force", istring("do not ask for verification"), forcesync); AddOption('T', "truncate", istring("truncate the joblist before synchronizing"), truncate); } if (c == CO_INFO || c == CO_STAT) { AddOption('l', "long", istring("long format (more information)"), longlist); } if (c == CO_INFO) { AddOption('L', "list-configured-services", istring("print a list of services configured in the client.conf"), list_configured_services); } if (c == CO_CAT) { AddOption('o', "stdout", istring("show the stdout of the job (default)"), show_stdout); AddOption('e', "stderr", istring("show the stderr of the job"), show_stderr); AddOption('l', "joblog", istring("show the CE's error log of the job"), show_joblog); } if (c == CO_GET) { AddOption('D', "dir", istring("download directory (the job directory will" " be created in this directory)"), istring("dirname"), downloaddir); AddOption('J', "usejobname", istring("use the jobname instead of the short ID as" " the job directory name"), usejobname); AddOption('f', "force", istring("force download (overwrite existing job directory)"), forcedownload); } if (c == CO_STAT) { // Option 'long' takes precedence over this option (print-jobids). AddOption('p', "print-jobids", istring("instead of the status only the IDs of " "the selected jobs will be printed"), printids); AddOption('S', "sort", istring("sort jobs according to jobid, submissiontime or jobname"), istring("order"), sort); AddOption('R', "rsort", istring("reverse sorting of jobs according to jobid, submissiontime or jobname"), istring("order"), rsort); AddOption('u', "show-unavailable", istring("show jobs where status information is unavailable"), show_unavailable); } if (c == CO_RESUB) { AddOption('m', "same", istring("resubmit to the same resource"), same); AddOption('M', "not-same", istring("do not resubmit to the same resource"), notsame); } if (c == CO_CLEAN) { AddOption('f', "force", istring("remove the job from the local list of jobs " "even if the job is not found in the infosys"), forceclean); } if (!cIsJobMan) { AddOption('g', "index", istring("select one or more registries: " "name can be an alias for a single registry, a group of registries or a URL"), istring("name"), indexurls); } if (c == CO_TEST) { AddOption('J', "job", istring("submit test job given by the number"), istring("int"), testjobid); AddOption('r', "runtime", istring("test job runtime specified by the number"), istring("int"), runtime); } if (cIsJobMan || c == CO_RESUB) { AddOption('s', "status", istring("only select jobs whose status is statusstr"), istring("statusstr"), status); } if (cIsJobMan || c == CO_MIGRATE || c == CO_RESUB) { AddOption('a', "all", istring("all jobs"), all); } if (c == CO_SUB) { AddOption('e', "jobdescrstring", istring("jobdescription string describing the job to " "be submitted"), istring("string"), jobdescriptionstrings); AddOption('f', "jobdescrfile", istring("jobdescription file describing the job to " "be submitted"), istring("string"), jobdescriptionfiles); } if (c == CO_MIGRATE || c == CO_RESUB || c == CO_SUB || c == CO_TEST) { AddOption('b', "broker", istring("select broker method (list available brokers with --listplugins flag)"), istring("broker"), broker); AddOption('o', "jobids-to-file", istring("the IDs of the submitted jobs will be appended to this file"), istring("filename"), jobidoutfile); AddOption('S', "submissioninterface", istring("only use this interface for submitting " "(e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes)"), istring("InterfaceName"), requestedSubmissionInterfaceName); } if (c == CO_MIGRATE || c == CO_RESUB || c == CO_SUB || c == CO_TEST || c == CO_INFO) { AddOption('R', "rejectdiscovery", istring("skip the service with the given URL during service discovery"), istring("URL"), rejectdiscovery); } if (cIsJobMan || c == CO_MIGRATE || c == CO_RESUB) { AddOption('i', "jobids-from-file", istring("a file containing a list of jobIDs"), istring("filename"), jobidinfiles); AddOption('r', "rejectmanagement", istring("skip jobs which are on a computing element with a given URL"), istring("URL"), rejectmanagement); } if (c == CO_SUB || c == CO_TEST) { AddOption('D', "dryrun", istring("submit jobs as dry run (no submission to batch system)"), dryrun); AddOption(0, "direct", istring("submit directly - no resource discovery or matchmaking"), direct_submission); AddOption('x', "dumpdescription", istring("do not submit - dump job description " "in the language accepted by the target"), dumpdescription); } if (c == CO_INFO) { AddOption('S', "submissioninterface", istring("only get information about executon targets which support this job submission interface " "(e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.ogf.bes)"), istring("InterfaceName"), requestedSubmissionInterfaceName); } if (c == CO_TEST) { AddOption('E', "certificate", istring("prints info about installed user- and CA-certificates"), show_credentials); } if (c != CO_INFO) { AddOption('j', "joblist", Arc::IString("the file storing information about active jobs (default %s)", Arc::UserConfig::JOBLISTFILE).str(), istring("filename"), joblist); } /* --- Standard options below --- */ AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); AddOption('P', "listplugins", istring("list the available plugins"), show_plugins); AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); AddOption('v', "version", istring("print version information"), showversion); } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/utils.h0000644000000000000000000000012412406376653022426 xustar000000000000000027 mtime=1410989483.903785 27 atime=1513200575.336712 30 ctime=1513200664.513802691 nordugrid-arc-5.4.2/src/clients/compute/utils.h0000644000175000002070000001676312406376653022510 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include #include #include #include struct termios; // This class records current state of console // when created and recovers it when destroyed. // Its main purpose is to recover console in // case application had to cancel any UI actions // involving changing console state like // password input. class ConsoleRecovery { private: struct termios * ti; public: ConsoleRecovery(void); ~ConsoleRecovery(void); }; #ifdef TEST #define RUNMAIN(X) test_##X##_main #else #define RUNMAIN(X) X(int argc, char **argv); \ int main(int argc, char **argv) { int xr = 0; { ConsoleRecovery cr; xr = X(argc,argv); }; _exit(xr); return 0; } \ int X #endif /// Returns the URLs of computing elements selected by alias, group name, URL or the default ones /** This helper method gets a list of string representing computing elements. Each item of the list is either an alias of service configured in the UserConfig, a name of a group configured in the UserConfig, or a URL of service not configured in the UserConfig. If the list is empty, the default services will be selected from the UserConfig. The method returns the URLs of the selected services. This is meant to be used by the command line programs where the user is specifying a list of computing elements by alias, group name (which has to be looked up in the UserConfig), or by URL. \param[in] usercfg is the UserConfig object containing information about configured services \param[in] computingelements is a list of strings containing aliases, group names, or URLs of computing elements \return a list of URL strings, the endpoints of the selected services, or the default ones if none was selected */ std::list getSelectedURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list computingelements); /// Combine the list of rejected discovery URLs from the UserConfig with the ones specified in a list /** Helper method for the command line programs to combine the list of rejected discovery URLs specified by the user at the command line with the ones configured in the UserConfig. The rejected discovery URLs supposed to cause the service discovery not to discovery computing elements whose URL matches any of these strings. \param[in] usercfg is the UserConfig object containing information about configured services \param[in] rejectdiscovery is a list of strings, which will be also added to the resulting list besides the ones from the UserConfig \return a list of strings which are the rejected URLs from the UserConfig and the ones given as the second argument combined */ std::list getRejectDiscoveryURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list rejectdiscovery); /// Combine the list of rejected management URLs from the UserConfig with the ones specified in a list /** Helper method for the command line programs to combine the list of rejected management URLs specified by the user at the command line with the ones configured in the UserConfig. The rejected management URLs supposed to cause the job management commands not to manage jobs which reside on computing elements whose URL matches any of the items in the list \param[in] usercfg is the UserConfig object containing information about configured services \param[in] rejectmanagement is a list of strings, which will be also added to the resulting list besides the ones from the UserConfig \return a list of strings which are the rejected URLs from the UserConfig and the ones given as the second argument combined */ std::list getRejectManagementURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list rejectmanagement); /// Looks up or creates Endpoints from strings specified at the command line using the information from the UserConfig /** This helper method gets a list of strings representing service registries and computing element, along with a requested submisison interface, looks up all the services from the UserConfig, and return the Endpoints found there, or create new Endpoints for services not found in the Userconfig. If there are no registries or computing elements given, then the default services will be returned. This is meant to be used by the command line programs where the user is specifying service registries and/or computing elements with several strings, which could refer to services configured in the UserConfig (aliases or groups), or they can be URLs refering to services which are not configured in the UserConfig. This method looks up the aliases and group names, and if a string is not an alias or a group name, then it's assumed to be a URL. \param[in] usercfg is the UserConfig object containing information about configured services \param[in] registries is a list of strings containing aliases, group names, or URLs of service registries \param[in] computingelements is a list of strings containing aliases, group names, or URLs of computing elements \return a list of Endpoint objects containing the services corresponding the given strings or the default services. */ std::list getServicesFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list registries, std::list computingelements, std::string requestedSubmissionInterfaceName = "", std::string infointerface = ""); void showplugins(const std::string& program, const std::list& types, Arc::Logger& logger, const std::string& chosenBroker = ""); bool checkproxy(const Arc::UserConfig& uc); void splitendpoints(std::list& selected, std::list& rejected); /** * Creates a new JobInformationStorage object. Caller has responsibility of * deleting returned object. */ Arc::JobInformationStorage* createJobInformationStorage(const Arc::UserConfig& uc); class ClientOptions : public Arc::OptionParser { public: enum Client_t { CO_SUB, CO_MIGRATE, CO_RESUB, CO_TEST, CO_CAT, CO_CLEAN, CO_GET, CO_KILL, CO_RENEW, CO_RESUME, CO_STAT, CO_SYNC, CO_INFO, CO_ACL }; ClientOptions(Client_t c, const std::string& arguments = "", const std::string& summary = "", const std::string& description = ""); bool dryrun; bool dumpdescription; bool show_credentials; bool show_plugins; bool showversion; bool all; bool forcemigration; bool keep; bool forcesync; bool truncate; bool longlist; bool printids; bool same; bool notsame; bool forceclean; bool show_stdout; bool show_stderr; bool show_joblog; bool usejobname; bool forcedownload; bool list_configured_services; bool direct_submission; bool show_unavailable; int testjobid; int runtime; int timeout; std::string joblist; std::string jobidoutfile; std::string conffile; std::string debug; std::string broker; std::string sort; std::string rsort; std::string downloaddir; std::string requestedSubmissionInterfaceName; std::string infointerface; std::list clusters; std::list qlusters; std::list indexurls; std::list jobdescriptionstrings; std::list jobdescriptionfiles; std::list jobidinfiles; std::list status; std::list rejectdiscovery; std::list rejectmanagement; }; nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arccat.1.in0000644000000000000000000000012612441050167023027 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.171651742 29 ctime=1513200664.49980252 nordugrid-arc-5.4.2/src/clients/compute/arccat.1.in0000644000175000002070000001111312441050167023067 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCCAT 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arccat \- ARC Catenate .SH DESCRIPTION The .B arccat command can be used to view the stdout or stderr of a running job. It can also be used to show A-REX's error log of a job. .SH SYNOPSIS .B arccat [options] [job ...] .SH OPTIONS .IP "\fB-a\fR, \fB--all\fR" all jobs .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-i\fR, \fB--jobids-from-file\fR=\fIfilename\fR" a file containing a list of jobIDs .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-r\fR, \fB--rejectmanagement\fR=\fIURL\fR" skip jobs which are on a computing element with a given URL .IP "\fB-s\fR, \fB--status\fR=\fIstatusstr\fR" only select jobs whose status is statusstr .IP "\fB-o\fR, \fB--stdout\fR" show the stdout of the job (default) .IP "\fB-e\fR, \fB--stderr\fR" show the stderr of the job .IP "\fB-l\fR, \fB--joblog\fR" show A-REX's error log of the job .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBjob\fR ..." list of jobids and/or jobnames .LP .SH EXTENDED DESCRIPTION The .B arccat command displays the stdout or stderr of running jobs. It can also display A-REX's error log of a job. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname the stdout, stderr or A-REX error log of all those jobs are shown. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, the stdout, stderr or A-REX error log of all active jobs will be shown. The .B --cluster option can be used to select or reject jobs at specific clusters. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arstat (1) for possible state values. Only jobs where the stdout or stderr argument was given in the job description can display the contents of those files. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcclean.1.in0000644000000000000000000000012712441050167023343 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.125651179 30 ctime=1513200664.501802545 nordugrid-arc-5.4.2/src/clients/compute/arcclean.1.in0000644000175000002070000001104312441050167023404 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCCLEAN 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcclean \- ARC Clean .SH DESCRIPTION The .B arcclean command removes a job from the remote cluster. .SH SYNOPSIS .B arcclean [options] [job ...] .SH OPTIONS .IP "\fB-a\fR, \fB--all\fR" all jobs .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-i\fR, \fB--jobids-from-file\fR=\fIfilename\fR" a file containing a list of jobIDs .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-r\fR, \fB--rejectmanagement\fR=\fIURL\fR" skip jobs which are on a computing element with a given URL .IP "\fB-s\fR, \fB--status\fR=\fIstatusstr\fR" only select jobs whose status is statusstr .IP "\fB-f\fR, \fB--force\fR" removes the job from the local list of jobs even if the job is not found in the cluster's information system .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBjob\fR ..." list of jobids and/or jobnames .LP .SH EXTENDED DESCRIPTION The .B arcclean command removes a job from the remote cluster. Only jobs that have finished can be removed. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname all those jobs are removed. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, all active jobs can be removed. The .B --cluster option can be used to select or reject jobs at specific clusters. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arstat (1) for possible state values. The .B --force option removes the job from your local list of jobs even if the job can not be found in the remote information system. Jobs not appearing in the remote information system can also be removed from the local list by specifying the .B --status option with value \fBUndefined\fR. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcinfo.cpp0000644000000000000000000000012413213442363023227 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.357712 30 ctime=1513200664.516802728 nordugrid-arc-5.4.2/src/clients/compute/arcinfo.cpp0000644000175000002070000001705413213442363023303 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcinfo)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcinfo"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_INFO, istring("[resource ...]"), istring("The arcinfo command is used for " "obtaining the status of computing " "resources on the Grid.")); { std::list clusterstmp = opt.Parse(argc, argv); opt.clusters.insert(opt.clusters.end(), clusterstmp.begin(), clusterstmp.end()); } if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcinfo", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:ServiceEndpointRetrieverPlugin"); types.push_back("HED:TargetInformationRetrieverPlugin"); showplugins("arcinfo", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.list_configured_services) { std::map allServices = usercfg.GetAllConfiguredServices(); std::cout << "Configured registries:" << std::endl; for (std::map::const_iterator it = allServices.begin(); it != allServices.end(); ++it) { if (it->second.type == Arc::ConfigEndpoint::REGISTRY) { std::cout << " " << it->first << ": " << it->second.URLString; if (!it->second.InterfaceName.empty()) { std::cout << " (" << it->second.InterfaceName << ")"; } std::cout << std::endl; } } std::cout << "Configured computing elements:" << std::endl; for (std::map::const_iterator it = allServices.begin(); it != allServices.end(); ++it) { if (it->second.type == Arc::ConfigEndpoint::COMPUTINGINFO) { std::cout << " " << it->first << ": " << it->second.URLString; if (!it->second.InterfaceName.empty() || !it->second.RequestedSubmissionInterfaceName.empty()) { std::cout << " (" << it->second.InterfaceName; if (!it->second.InterfaceName.empty() && !it->second.RequestedSubmissionInterfaceName.empty()) { std::cout << " / "; } std::cout << it->second.RequestedSubmissionInterfaceName + ")"; } std::cout << std::endl; } } return 0; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.timeout > 0) usercfg.Timeout(opt.timeout); std::list endpoints = getServicesFromUserConfigAndCommandLine(usercfg, opt.indexurls, opt.clusters, opt.requestedSubmissionInterfaceName, opt.infointerface); std::set preferredInterfaceNames; if (usercfg.InfoInterface().empty()) { preferredInterfaceNames.insert("org.nordugrid.ldapglue2"); } else { preferredInterfaceNames.insert(usercfg.InfoInterface()); } std::list rejectDiscoveryURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); Arc::ComputingServiceUniq csu; Arc::ComputingServiceRetriever csr(usercfg, std::list(), rejectDiscoveryURLs, preferredInterfaceNames); csr.addConsumer(csu); for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { csr.addEndpoint(*it); } csr.wait(); std::list services = csu.getServices(); for (std::list::const_iterator it = services.begin(); it != services.end(); ++it) { if (opt.longlist) { if (it != services.begin()) std::cout << std::endl; std::cout << *it; std::cout << std::flush; } else { std::cout << "Computing service: " << (**it).Name; if (!(**it).QualityLevel.empty()) { std::cout << " (" << (**it).QualityLevel << ")"; } std::cout << std::endl; std::stringstream infostream, submissionstream; for (std::map::const_iterator itCE = it->ComputingEndpoint.begin(); itCE != it->ComputingEndpoint.end(); ++itCE) { if (itCE->second->Capability.count(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO))) { infostream << " " << Arc::IString("Information endpoint") << ": " << itCE->second->URLString << std::endl; } if (itCE->second->Capability.empty() || itCE->second->Capability.count(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBSUBMIT)) || itCE->second->Capability.count(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBCREATION))) { submissionstream << " "; submissionstream << Arc::IString("Submission endpoint") << ": "; submissionstream << itCE->second->URLString; submissionstream << " (" << Arc::IString("status") << ": "; submissionstream << itCE->second->HealthState << ", "; submissionstream << Arc::IString("interface") << ": "; submissionstream << itCE->second->InterfaceName << ")" << std::endl; } } std::cout << infostream.str() << submissionstream.str(); } } bool anEndpointFailed = false; // Check if querying endpoint succeeded. Arc::EndpointStatusMap statusMap = csr.getAllStatuses(); for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { Arc::EndpointStatusMap::const_iterator itStatus = statusMap.find(*it); if (itStatus != statusMap.end() && itStatus->second != Arc::EndpointQueryingStatus::SUCCESSFUL && itStatus->second != Arc::EndpointQueryingStatus::SUSPENDED_NOTREQUIRED) { if (!anEndpointFailed) { anEndpointFailed = true; std::cerr << Arc::IString("ERROR: Failed to retrieve information from the following endpoints:") << std::endl; } std::cerr << " " << it->URLString; if (!itStatus->second.getDescription().empty()) { std::cerr << " (" << itStatus->second.getDescription() << ")"; } std::cerr << std::endl; } } if (anEndpointFailed) return 1; if (services.empty()) { std::cerr << Arc::IString("ERROR: Failed to retrieve information"); if (!endpoints.empty()) { std::cerr << " " << Arc::IString("from the following endpoints:") << std::endl; for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { std::cerr << " " << it->URLString << std::endl; } } else { std::cerr << std::endl; } return 1; } return 0; } nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcstat.1.in0000644000000000000000000000012712442365766023253 xustar000000000000000027 mtime=1418324982.290198 30 atime=1513200652.078650605 30 ctime=1513200664.507802618 nordugrid-arc-5.4.2/src/clients/compute/arcstat.1.in0000644000175000002070000001524712442365766023326 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCSTAT 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcstat \- ARC Status .SH DESCRIPTION The .B arcstat command is used for obtaining the status of jobs that have been submitted to grid enabled resources. .SH SYNOPSIS .B arcstat [options] [job ...] .SH OPTIONS .IP "\fB-a\fR, \fB--all\fR" all jobs .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-i\fR, \fB--jobids-from-file\fR=\fIfilename\fR" a file containing a list of jobIDs .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-r\fR, \fB--rejectmanagement\fR=\fIURL\fR" skip jobs which are on a computing element with a given URL .IP "\fB-s\fR, \fB--status\fR=\fIstatusstr\fR" only select jobs whose status is statusstr .IP "\fB-l\fR, \fB--long\fR" long format (more information) .IP "\fB-S\fR, \fB--sort\fR" sort jobs according to jobid, submissiontime or jobname .IP "\fB-R\fR, \fB--rsort\fR" reverse sorting of jobs according to jobid, submissiontime or jobname .IP "\fB-u\fR, \fB--show-unavailable\fR" show jobs where status information is unavailable .IP "\fB-p\fR, \fB--print-jobids\fR" instead of the status only the IDs of the selected jobs will be printed .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBjob\fR ..." list of jobids and/or jobnames .LP .SH EXTENDED DESCRIPTION The .B arcstat command gives the status of a job submitted to a ARC enabled resource. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname the status of all those jobs are shown. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, the status of all active jobs will be shown. By default .B arcstat presents job states as defined internally followed by middleware specific representation of job state in brackets. The following internal job states are defined: .B Accepted - job accepted on cluster but not being processed yet .B Preparing - job is in phase of preparing for submission to batch system .B Submitting - communication with batch system in ongoing .B Hold - job's processing is suspended dueto internal reason or user request .B Queuing - job is passed to batch system but not being executed yet .B Running - job being execcuted in batch system .B Finishing - job in phase of post-execution procedures being run .B Finished - job successfully completed all processing phases .B Killed - job processing was interrupted by user request .B Failed - job processing was interrupted due to detected failure .B Deleted - job was removed from cluster (usually because it stayed there too long) .B Other - middleware specific job state could not be adequately mappped to internal state Those are also states which are used by .BR arccat (1), .BR arcclean (1), .BR arcget (1), .BR arckill (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1) to perform job filtering. If the .B --long option is given more detailed information is shown. Jobs can be sorted according to the jobid, submissiontime or jobname, either in normal or reverse order. By using the .B --sort or .B --rsort option followed by the desired ordering ('jobid', 'submissiontime' or 'jobname'), jobs will be sorted in normal or reverse order. Note that the options .B --sort and .B --rsort cannot be used at the same time. The .B --cluster option can be used to select or reject jobs at specific clusters. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. The .B arcstat command locates the available clusters by querying information index servers. Which servers to query can be specified by giving the .B --index option to the command. See .BR arcsub (1) for a discussion of the format of arguments to this option. Also in this case the .B --long option can be used to obtain more detailed information. Previously the .B arcstat command was also used to query information of clusters and/or index servers. This functionality have been moved to the new command .B arcinfo . .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcresume.1.in0000644000000000000000000000012712441050167023561 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.237652549 30 ctime=1513200664.506802606 nordugrid-arc-5.4.2/src/clients/compute/arcresume.1.in0000644000175000002070000001033512441050167023625 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCRESUME 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcresume \- ARC Resume .SH DESCRIPTION The .B arcresume command is used for resuming a job that was submitted to grid enabled resources and then subsequently failed. The job will be resumed at the last ok state reported by the cluster. .SH SYNOPSIS .B arcresume [options] [job ...] .SH OPTIONS .IP "\fB-a\fR, \fB--all\fR" all jobs .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-i\fR, \fB--jobids-from-file\fR=\fIfilename\fR" a file containing a list of jobIDs .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-r\fR, \fB--rejectmanagement\fR=\fIURL\fR" skip jobs which are on a computing element with a given URL .IP "\fB-s\fR, \fB--status\fR=\fIstatusstr\fR" only select jobs whose status is statusstr .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBjob\fR ..." list of jobids and/or jobnames .LP .SH EXTENDED DESCRIPTION The .B arcresume command resumes a job submitted an ARC enabled resource. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname all those jobs are resumed. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, all active jobs will be resumed. The .B --cluster option can be used to select or reject jobs at specific clusters. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arcstat (1) for possible states values. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcinfo.1.in0000644000000000000000000000012712441050167023214 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.092650776 30 ctime=1513200664.503802569 nordugrid-arc-5.4.2/src/clients/compute/arcinfo.1.in0000644000175000002070000001020112441050167023250 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCINFO 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcinfo \- ARC Info .SH DESCRIPTION The .B arcinfo command is used for obtaining status and information of clusters on the grid. .SH SYNOPSIS .B arcinfo [options] [cluster ...] .SH OPTIONS .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-I\fR, \fB--infointerface\fR=\fIInterfaceName\fR" the computing element specified by URL at the command line should be queried using this information interface (possible options: org.nordugrid.ldapng, org.nordugrid.ldapglue2, org.nordugrid.wsrfglue2, org.ogf.glue.emies.resourceinfo) .IP "\fB-g\fR, \fB--index\fR=\fIname\fR" select one or more registries: \fIname\fR can be an alias for a single registry, a group of registries or a URL .IP "\fB-R\fR, \fB--rejectdiscovery\fR=\fIURL\fR" skip the service with the given URL during service discovery .IP "\fB-S\fR, \fB--submissioninterface\fR=\fIInterfaceName\fR" only get information about executon targets which supports this job submission interface (e.g. org.nordugrid.gridftpjob, org.ogf.glue.emies.activitycreation, org.nordugrid.xbes) .IP "\fB-l\fR, \fB--long\fR" long format (more information) .IP "\fB-L\fR, \fB--list-configured-services\fR" print a list of services configured in the client.conf .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH EXTENDED DESCRIPTION The .B arcinfo command is used to get the status and information of clusters and queues available on the grid. You can specify the URLs of cluster with the .B --cluster option, or by just listing them as arguments. The .B --index flag can be used to specify an index server which should be queried for clusters. Both of these flags take a service endpoint as argument. See .BR arcsub (1) for a discussion of this format. Detailed information about queried computing services can be obtained by specifying the .B --long flag. When specifying the .B --index flag, the information about the computing services registered at the index server will be queried rather than the status of the index server itself. Currently no command exists to query a index server. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. Note that this could also be set in the client configuration file, however the environment variable overrides the settings in configuration. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcget.1.in0000644000000000000000000000012712441050167023040 xustar000000000000000027 mtime=1417957495.519061 30 atime=1513200652.156651558 30 ctime=1513200664.502802557 nordugrid-arc-5.4.2/src/clients/compute/arcget.1.in0000644000175000002070000001150512441050167023104 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCGET 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcget \- ARC Get .SH DESCRIPTION The .B arcget command is used for retrieving the results from a job. .SH SYNOPSIS .B arcget [options] [job ...] .SH OPTIONS .IP "\fB-a\fR, \fB--all\fR" all jobs .IP "\fB-j\fR, \fB--joblist\fR=\fIfilename\fR" the file storing information about active jobs (default ~/.arc/jobs.xml) .IP "\fB-i\fR, \fB--jobids-from-file\fR=\fIfilename\fR" a file containing a list of jobIDs .IP "\fB-c\fR, \fB--cluster\fR=\fIname\fR" select one or more computing elements: \fIname\fR can be an alias for a single CE, a group of CEs or a URL .IP "\fB-r\fR, \fB--rejectmanagement\fR=\fIURL\fR" skip jobs which are on a computing element with a given URL .IP "\fB-s\fR, \fB--status\fR=\fIstatusstr\fR" only select jobs whose status is statusstr .IP "\fB-D\fR, \fB--dir\fR=\fIdirname\fR" download directory (the job directory will be created in this directory) .IP "\fB-J\fR, \fB--usejobname\fR" use the jobname instead of the short ID as the job directory name .IP "\fB-k\fR, \fB--keep\fR" keep files on the remote cluster (do not clean) .IP "\fB-f\fR, \fB--force\fR" force download (overwrite existing job directory) .IP "\fB-P\fR, \fB--listplugins\fR" list the available plugins .IP "\fB-t\fR, \fB--timeout\fR=\fIseconds\fR" timeout in seconds (default 20) .IP "\fB-z\fR, \fB--conffile\fR=\fIfilename\fR" configuration file (default ~/.arc/client.conf) .IP "\fB-d\fR, \fB--debug\fR=\fIdebuglevel\fR" FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG .IP "\fB-v\fR, \fB--version\fR" print version information .IP "\fB-?\fR, \fB--help\fR" print help .LP .SH ARGUMENTS .IP "\fBjob\fR ..." list of jobids and/or jobnames .LP .SH EXTENDED DESCRIPTION The .B arcget command downloads the results after a job has completed on an grid enabled computing resource. Only the results of jobs that have finished can be downloaded. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname the results of all those jobs are downloaded. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, the results of all active jobs are downloaded. The .B --cluster option can be used to select or reject jobs at specific clusters. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arstat (1) for possible state values. For each job that is downloaded a subdirectory will be created in the download directory that will contain the downloaded files. If the download was successful the job will be removed from the remote cluster unless the .B --keep option was specified. .SH FILES .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.xml This a local list of the user's active jobs. When a job is successfully submitted it is added to this list and when it is removed from the remote cluster it is removed from this list. This list is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresub (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/README0000644000000000000000000000012412441050167021761 xustar000000000000000027 mtime=1417957495.519061 27 atime=1513200575.340712 30 ctime=1513200664.496802483 nordugrid-arc-5.4.2/src/clients/compute/README0000644000175000002070000000535312441050167022034 0ustar00mockbuildmock00000000000000 ARC job management command line interface ========================================= Description ----------- This is a command line user interface (client) for computational job submission and management to ARC, CREAM and EMI ES enabled computing services. For ARC, both Grid Manager (GridFTP) and A-REX compute elements are supported. The following commands are implemented: o arcsub - job submission o arcstat - job information o arcinfo - resource information o arcget - job retrieval o arckill - job cancellation o arcclean - job clean-up o arccat - job output capture o arcresub - job re-submission o arcrenew - remote credentials renewal o arcsync - syncronisation of user job list o arcresume - job resumption o arctest - functionality testing Implementation notes -------------------- The commands are built upon the ARC SDK, in particular the libarccompute library with source code located in src/hed/libs/compute which in turn makes use of ARC Client Components (ACC) located in src/hed/acc In addition, job management client requires data handling methods from the libarcdata library with source code located in src/hed/libs/data Additional command line user interface tools exist for credential handling and basic file handling, located respectively in src/clients/credentials src/clients/data Build notes ----------- Due to complex dependencies above, it is not feasible to build ARC job management client in a manner independent from the rest of ARC code. In order to build ARC clients, obtain full ARC source code tree and run in its root: ./autogen.sh ./configure make install Installation and configuration notes ------------------------------------ Depending on the operating system and distributor, ARC job management client is available in a variety of packages: o Linux: rpm, deb, binary tarball o MS Windows: installer o Mac OS X o Source tarball Please follow specific for each case installation instructions, offered by the distributor. Client configuration file template can be found in src/clients/client.conf and is installed by default in /etc/arc/client.conf to define common configuration parameters for all users at a machine. Individual user configuration files have the same format and should be placed manually in ~/.arc/client.conf If necessary, a different configuration can be specified from the command line. Documentation ------------- Installation guide and client tools manual (complete with confiduration manual) can be found at the NorduGrid Web site, are distributed with the NorduGrid ARC documentation package, and are also available from the NorduGrid code repository. Each command line tool comes complete with Linux man pages and short help. nordugrid-arc-5.4.2/src/clients/compute/PaxHeaders.7502/arcclean.cpp0000644000000000000000000000012413213442363023356 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200575.340712 30 ctime=1513200664.514802704 nordugrid-arc-5.4.2/src/clients/compute/arcclean.cpp0000644000175000002070000001372713213442363023435 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcclean)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcclean"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_CLEAN, istring("[job ...]"), istring("The arcclean command removes a job " "from the computing resource.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcclean", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcclean", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (!checkproxy(usercfg)) { return 1; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.clusters.empty()) opt.all = true; if (jobidentifiers.empty() && opt.clusters.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.clusters.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.clusters); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } //if (jobmaster.GetSelectedJobs().empty()) { // std::cout << Arc::IString("No jobs") << std::endl; // return 1; //} int retval = (int)!jobmaster.Clean(); std::list cleaned = jobmaster.GetIDsProcessed(); const std::list& notcleaned = jobmaster.GetIDsNotProcessed(); if ((!opt.status.empty() && std::find(opt.status.begin(), opt.status.end(), "Undefined") != opt.status.end()) || opt.forceclean) { std::string response = ""; if (!opt.forceclean) { std::cout << Arc::IString("You are about to remove jobs from the job list for which no information could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the information\n" "system, and this action will also remove such jobs.") << std::endl; std::cout << Arc::IString("Are you sure you want to clean jobs missing information?") << " [" << Arc::IString("y") << "/" << Arc::IString("n") << "] "; std::cin >> response; } if (!opt.forceclean && Arc::lower(response) != std::string(Arc::FindTrans("y"))) { std::cout << Arc::IString("Jobs missing information will not be cleaned!") << std::endl; if (cleaned.empty() && notcleaned.empty()) { return retval; } } else { for (std::list::const_iterator it = jobmaster.GetAllJobs().begin(); it != jobmaster.GetAllJobs().end(); ++it) { if (it->State == Arc::JobState::UNDEFINED) { cleaned.push_back(it->JobID); } } } } if (!jobstore->Remove(cleaned)) { std::cout << Arc::IString("Warning: Failed to write job information to file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString(" Run 'arcclean -s Undefined' to remove cleaned jobs from job list", usercfg.JobListFile()) << std::endl; } delete jobstore; if (cleaned.empty() && notcleaned.empty()) { std::cout << Arc::IString("No jobs") << std::endl; return 1; } std::cout << Arc::IString("Jobs processed: %d, deleted: %d", cleaned.size()+notcleaned.size(), cleaned.size()) << std::endl; return retval; } nordugrid-arc-5.4.2/src/PaxHeaders.7502/utils0000644000000000000000000000013213214316030017037 xustar000000000000000030 mtime=1513200664.604803804 30 atime=1513200668.719854133 30 ctime=1513200664.604803804 nordugrid-arc-5.4.2/src/utils/0000755000175000002070000000000013214316030017162 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/utils/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712240745230021162 xustar000000000000000027 mtime=1384368792.357467 30 atime=1513200606.195089431 30 ctime=1513200664.600803755 nordugrid-arc-5.4.2/src/utils/Makefile.am0000644000175000002070000000027412240745230021227 0ustar00mockbuildmock00000000000000if WIN32 GRIDMAP = else GRIDMAP = gridmap endif if HED_ENABLED BUILD_SOURCES = hed $(GRIDMAP) else BUILD_SOURCES = $(GRIDMAP) endif SUBDIRS = $(BUILD_SOURCES) DIST_SUBDIRS = hed gridmap nordugrid-arc-5.4.2/src/utils/PaxHeaders.7502/gridmap0000644000000000000000000000013213214316030020462 xustar000000000000000030 mtime=1513200664.668804587 30 atime=1513200668.719854133 30 ctime=1513200664.668804587 nordugrid-arc-5.4.2/src/utils/gridmap/0000755000175000002070000000000013214316030020605 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/utils/gridmap/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306022607 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200606.246090054 30 ctime=1513200664.663804526 nordugrid-arc-5.4.2/src/utils/gridmap/Makefile.am0000644000175000002070000000054012047045306022650 0ustar00mockbuildmock00000000000000dist_sbin_SCRIPTS = nordugridmap cronddir = @cronddir@ man_MANS = nordugridmap.8 # Not using crond_DATA since we need to rename it. install-exec-local: $(MKDIR_P) "$(DESTDIR)$(cronddir)" $(INSTALL_DATA) nordugridmap.cron $(DESTDIR)$(cronddir)/nordugridmap uninstall-local: rm -f $(DESTDIR)$(cronddir)/nordugridmap EXTRA_DIST = nordugridmap.conf nordugrid-arc-5.4.2/src/utils/gridmap/PaxHeaders.7502/nordugridmap.8.in0000644000000000000000000000012613024226021023732 xustar000000000000000027 mtime=1481714705.415372 30 atime=1513200652.561656512 29 ctime=1513200664.66580455 nordugrid-arc-5.4.2/src/utils/gridmap/nordugridmap.8.in0000644000175000002070000000417713024226021024006 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH NORDUGRIDMAP 8 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid ARC" .SH NAME nordugridmap \- ARC grid-mapfile generator .SH SYNOPSIS .B nordugridmap [\fB-t\fR, \fB--test\fR] [\fB-c\fR, \fB--config\fR \fIFILE\fR] [\fB-h\fR, \fB--help\fR] .SH DESCRIPTION .PP The nordugridmap utility is usually run as a crontab entry in order to automatically generate grid-mapfile(s). .PP All information about mapfiles to generate, sources of information and other related options are stored in a single configuration file. Common Nordugrid ARC configuration file \fBarc.conf\fP(5) used if not redefined via command line option. .SH EXTENDED DESCRIPTION The sources of information supported by nordugridmap are the following: .TP .B http(s):// URL to plain text file. File should contain a list of DNs - one DN per line. .TP .B voms(s):// URL to VOMS-Admin interface. .TP .B nordugrid NorduGrid VO members .TP .B file:// Local file (stand-alone or dynamicaly generated by nordugridmap). File should contain a list of DNs with optional mapped unixid: "user DN" [mapped unixID] .TP .B vo:// Reference to another VO configuration block .PP We are STRONGLY RECOMMEND TO READ tne Nordugruid ARC documentation, \fBarc.conf\fP(5) manual page and/or the \fBarc.conf.reference\fP to learn all details about nordugridmap configuration options usage. .SH OPTIONS .TP \fB\-t\fR, \fB\-\-test\fR Does not actually create grid-mapfile(s), but perform test run in debug mode. .TP \fB\-c\fR, \fB\-\-config \fIFILE\fR Specifies the configuration file to be used. By default the \fBarc.conf\fP(5) is used. nordugridmap utilize [nordugridmap] section for general options fine-tunung and processes all the [vo] blocks from the config. .TP \fB\-h\fR, \fB\-\-help\fR Show help options .PP .SH REPORTING BUGS Report bugs to http://bugzilla.nordugrid.org/ .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH FILES .BR /etc/arc.conf .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arc.conf (5) nordugrid-arc-5.4.2/src/utils/gridmap/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315736022617 xustar000000000000000029 mtime=1513200606.28009047 30 atime=1513200652.576656695 30 ctime=1513200664.664804538 nordugrid-arc-5.4.2/src/utils/gridmap/Makefile.in0000644000175000002070000005307613214315736022701 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/utils/gridmap DIST_COMMON = $(dist_sbin_SCRIPTS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/nordugridmap.8.in \ $(srcdir)/nordugridmap.cron.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = nordugridmap.cron nordugridmap.8 CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(man8dir)" SCRIPTS = $(dist_sbin_SCRIPTS) SOURCES = DIST_SOURCES = man8dir = $(mandir)/man8 NROFF = nroff MANS = $(man_MANS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ dist_sbin_SCRIPTS = nordugridmap man_MANS = nordugridmap.8 EXTRA_DIST = nordugridmap.conf all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/utils/gridmap/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/utils/gridmap/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): nordugridmap.cron: $(top_builddir)/config.status $(srcdir)/nordugridmap.cron.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ nordugridmap.8: $(top_builddir)/config.status $(srcdir)/nordugridmap.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-dist_sbinSCRIPTS: $(dist_sbin_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)" @list='$(dist_sbin_SCRIPTS)'; test -n "$(sbindir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_sbinSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_sbin_SCRIPTS)'; test -n "$(sbindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(sbindir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man8: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man8dir)" || $(MKDIR_P) "$(DESTDIR)$(man8dir)" @list=''; test -n "$(man8dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ done; } uninstall-man8: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man8dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man8dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man8dir)" && rm -f $$files; } tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(MANS) installdirs: for dir in "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(man8dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-dist_sbinSCRIPTS install-exec-local install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man8 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_sbinSCRIPTS uninstall-local uninstall-man uninstall-man: uninstall-man8 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dist_sbinSCRIPTS \ install-dvi install-dvi-am install-exec install-exec-am \ install-exec-local install-html install-html-am install-info \ install-info-am install-man install-man8 install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-dist_sbinSCRIPTS uninstall-local uninstall-man \ uninstall-man8 # Not using crond_DATA since we need to rename it. install-exec-local: $(MKDIR_P) "$(DESTDIR)$(cronddir)" $(INSTALL_DATA) nordugridmap.cron $(DESTDIR)$(cronddir)/nordugridmap uninstall-local: rm -f $(DESTDIR)$(cronddir)/nordugridmap # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/utils/gridmap/PaxHeaders.7502/nordugridmap.conf0000644000000000000000000000012413024226021024101 xustar000000000000000027 mtime=1481714705.415372 27 atime=1513200575.302711 30 ctime=1513200664.668804587 nordugrid-arc-5.4.2/src/utils/gridmap/nordugridmap.conf0000644000175000002070000002327313024226021024155 0ustar00mockbuildmock00000000000000## The nordugridmap utility processes the arc.conf central config file. ## The --config command line option can be used to specify a ## different configuration file. ## The relevant section of the arc.conf is shown below. ## nordugridmap reads the [vo] blocks and generates 'gridmap' files. ## [nordugridmap] block is used to fine-tune nordugridmap behavior ## and provide necessary system paths information [nordugridmap] # x509_user_key path - file containing certificate private key. # Default is '/etc/grid-security/hostkey.pem' # x509_user_cert path - file containing public certificate. # Default is '/etc/grid-security/hostcert.pem' # x509_cert_dir - directory containig CA certificates # Default is '/etc/grid-security/certificates/' # This information is needed to contact external sources over TLS. # https:// and vomss:// sources rely on this parameters. x509_user_key="/etc/grid-security/hostkey.pem" x509_user_cert="/etc/grid-security/hostcert.pem" x509_cert_dir="/etc/grid-security/certificates/" # gridmap_owner username - owner of generated gridmapfiles. # Default is a owner of grid-manager process (default is 'root'). gridmap_owner="root" # gridmap_group groupname - group of generated gridmapfiles. # Default is 'root' gridmap_group="root" # gridmap_permissions filemode - permissions of generated gridmapfiles. # Default is '0600' gridmap_permissions="0600" # log_to_file yes/no - control whether logging output of nordugridmap # will be saved to file. Default is 'yes'. log_to_file="yes" # logfile file - specify the log file location. # Default is '/var/log/arc/nordugridmap.log' logfile="/var/log/arc/nordugridmap.log" # cache_enable yes/no - control whether caching of external sources # will be used. Default is 'yes' cache_enable="yes" # cachedir path - path to store cached sources. # Default is '/var/spool/nordugrid/gridmapcache/' cachedir="/var/spool/nordugrid/gridmapcache/" # cachetime seconds - controls how many time cached information remains # valid. Default is 3 days (259200). cachetime="259200" # mapuser_processing overwrite/keep - control the behavior of [vo] block # mapped_unixid usage. See 'mapped_unixid' description for details. # Default is 'keep' mapuser_processing="keep" # allow_empty_unixid yes/no - control whether empty (or unspecified) # 'mapped_unixid' [vo] block option is allowed to be used. # See 'mapped_unixid' description for details. Default is 'no' allow_empty_unixid="no" # voms_method soap/get - control how to get information from VOMS(S) # sources: # soap - call SOAP method directly using SOAP::Lite (default) # get - use old implementation that manually parses XML response voms_method="soap" # debug level - controls the verbosity of nordugridmap output. Valid # values are: # 0 - FATAL - only critical fatal error shown # 1 - ERROR - errors, including non-critical are shown # 2 - WARNING (default) - configuration errors that can be ignored # 3 - INFO - processing information # 4 - VERBOSE - a bit more processing information # 5 - DEBUG - lot of processing information # # When test run is requested (--test command line option of the # nordugridmap) debug level is automatically set to 'DEBUG' debug="4" # fetch_timeout seconds - control how many time nordugridmap will # wait for external sources retrieval. Default is 15 secconds. fetch_timeout="15" ## end of [nordugridmap] block # [vo] block is used to define VOs and generate mapfiles from users # lists maintained by VO databases. # [vo] blocks can also be used and referenced in authorization blocks # or in other [vo] blocks. [vo] # id blockid - specifies the unique block id (not required for processing) id="vo_1" # vo vo_name - specifies the VO name, this name can be used in other blocks # and in gacl expressions. MUST be given. vo="atlas" # file path - output gridmap-file where GENERATED mapping list will be # stored. See attributes below to specify how to generate this file. # # If the same file specified as output for different [vo] blocks, # nordugridmap will automatically merge enrties (in given blocks order) file="/etc/grid-security/VOs/atlas-users" # source url - the URL of the VO database which is assigned to this VO. # The nordugridmap will use this URL to automatically generate and keep # up-to-date userlist (mapfile) specified by the 'file' attribute. # # several sources can be specified for a [vo] block and all the user # records from those sources will be merged # # The source URLs are processed from top to bottom in the given order. # All already defined user's DN will be ignored. # # Currently supported URL types are: # http(s):// - URL to plain text file. File should contain a list # of DNs with optional issuer certificate authority DN # (see require_issuerdn): "user DN" ["issuer DN"] # voms(s):// - URL to VOMS-Admin interface # nordugrid - add NorduGrid VO members # file:// - local file (stand-alone or dynamicaly generated by # nordugridmap). File should contain a list of DNs with # optional mapped unixid: "user DN" [mapped user ID] # Result of optional mapped unixid processing depend # on mapuser_processing option settings. # vo:// - reference to another [vo] configuration block # # You can use either vo:// or file:// entries to specify dependencies # between [vo] blocks, but using vo:// is a recommended way # # For each separate source URL it is possible to override some parameters # value. You can use the following syntax to perform this: # source="URL < parameter1=value1 parameter2=value2" # You can override the following parameters: # mapped_unixid for http(s),voms(s),ldap,file URLs # cache_enable for http(s),voms(s),ldap,file URLs # voms_method for voms(s) URLs # mapuser_processing for file URLs (If file already contain mapped_unixid # control weather overwrite it with supplied value. # Only applied if mapped_unixid overrided for URL. ) # source="vomss://lcg-voms.cern.ch:8443/voms/atlas?/atlas/Role=VO-Admin < mapped_unixid=atlasadmin" source="vomss://kuiken.nikhef.nl:8443/voms/gin.ggf.org" source="http://www.nordugrid.org/developers.dn" source="file:///etc/grid-security/priviliged_users.dn" source="vo://NorduGrid_Community" source="nordugrid" # mapped_unixid unixid - the local UNIXID which is used in the generated # grid-mapfile by the nordugridmap utility. # # if any of the sources have already provided mapping information (file:// # or vo://) behavior depends on 'mapuser_processing' nordugridmap conguration: # mapuser_processing = 'overwrite': ignore already provided mapping and # apply mapped_unixid for all sources # mapuser_processing = 'keep': apply mapped_unixid only for sources that # does not contain mapping information # # [vo] block can only have one UNIXID # if 'mapped_unixid' is not specified behavior depends on 'allow_empty_unixid' # nordugridmap conguration value: # allow_empty_unixid = 'yes': empty value will be used for mapped_unixid # which means that nordugridmap will generate only # the list of DNs without mapping (consider using # mapuser_processing='overwrite' or sources that # does not provide mapping information) # allow_empty_unixid = 'no': skip users without mapping information (no # mapping information provided by sources or/and # mapuser_processing='overwrite' is enabled) mapped_unixid="gridtest" # voms_fqan_map FQAN unixid - the local UNIXID which is used to map voms(s) # sources with specific FQAN given. # # Several voms_fqan_map can be specified for a [vo] block. # For each voms(s) sources in [vo] block and every voms_fqan_map record # separate source record will be authomatically generated with mapped_unixid # overrided to specified one. # # Sources are generated in given voms_fqan_map order. Original voms(s) source # URL are processed last after all FQANs. # # This allow to simplify configuration, especially in redundancy cases when # several VOMS servers are used for the same VO. # voms_fqan_map="/atlas/Role=VO-Admin atlasadmin" voms_fqan_map="/atlas/Role=production atlasprod" # filter ACL string - An ACL filter for the nordugridmap utility. # Multiple allow/deny statements are possible. The fetched DNs are filtered # against the specified rules before they are added to the generated mapfile. # # * can be used as a wildcard. You may run the nordugridmap with the --test # command line option to see how the filters you specified works. filter="deny *infn*" filter="allow *NorduGrid*" # another example of VO block [vo] id="vo_2" vo="NorduGrid_Community" file="/etc/grid-security/VOs/community.dn" source="http://www.nordugrid.org/community.dn" source="nordugrid" source="file:///etc/grid-security/priviliged_users.dn < mapped_unixid=privelegeduser mapuser_processing=overwrite" source="vomss://lcg-voms.cern.ch:8443/voms/atlas?/atlas/Role=VO-Admin < mapped_unixid=atlasadmin voms_method=soap" mapped_unixid="communityuser" filter="deny /O=Grid/O=NorduGrid/OU=bad.site/CN=Bad User" # one more example of VO block [vo] id="vo_3" vo="all_users" file="/etc/grid-security/grid-mapfile" source="vo://NorduGrid_Community" source="vo://atlas" source="file:///etc/grid-security/local-grid-mapfile.dn" mapped_unixid="grid" # FQAN-map use-case example [vo] id="vo_4" vo="moldyngrid" source="vomss://grid.org.ua/voms/moldyngrid" source="vomss://moldyngrid.org/voms/moldyngrid" voms_fqan_map="/moldyngrid/Role=VO-Admin .mdgadm" voms_fqan_map="/moldyngrid/Role=production .mdgprd" voms_fqan_map="/moldyngrid/md .mdgmd" mapped_unixid=".mdg" file="/etc/grid-security/VOs/moldyngrid" nordugrid-arc-5.4.2/src/utils/gridmap/PaxHeaders.7502/nordugridmap0000644000000000000000000000012413065017560023170 xustar000000000000000027 mtime=1490296688.611612 27 atime=1513200575.300711 30 ctime=1513200664.662804514 nordugrid-arc-5.4.2/src/utils/gridmap/nordugridmap0000755000175000002070000012054713065017560023251 0ustar00mockbuildmock00000000000000#!/usr/bin/perl # # nordugridmap - generates grid-mapfile(s) based on configuration # binmode STDIN; binmode STDOUT; use Getopt::Long; use POSIX qw(strftime); use Time::localtime; use File::Temp qw(tempfile); use File::Path; use Storable; use URI; use XML::DOM; use LWP::UserAgent; use LWP::Protocol::https; use SOAP::Lite; use SOAP::Transport::HTTP; # please use this when developing use warnings; use strict; use constant { # debug level constants FATAL => 0, ERROR => 1, WARNING => 2, INFO => 3, VERBOSE => 4, DEBUG => 5, # nordugridmap internals VERSION => "3.0", USERAGENT => "nordugridmap" }; # # GET COMMAND LINE OPTIONS AND SET DEFAULTS # # define configuration flags my %config_flags = ( 'mapuser_processing' => 0, # overwrite = 1, keep = 0 'cache_enabled' => 1, 'log_to_file' => 1, 'voms_use_soap' => 1, # voms_method: soap = 1, get = 0 'allow_empty_unixid' => 0 ); my $debug_level = 2; my $fetch_url_timeout = 15; my $opt_help; my $opt_test; my $fileopt = $ENV{ARC_CONFIG}||="/etc/arc.conf"; # get options GetOptions("help" => \$opt_help, "test" => \$opt_test, "config=s" => \$fileopt); if ($opt_help) { &printHelp; exit(1); } # print at DEBUG level to STDERR when using testing mode if ($opt_test) { $debug_level = 5; $config_flags{'log_to_file'} = 0; &Logger("Nordugridmap is running in a testing mode. There will be no gridmaps altered.", DEBUG); } # # CONFIG FILE PARSER (ARC.CONF INI FORMAT EXPECTED) # unless (open (CONFIGFILE, "<$fileopt")) { &Logger("Can't open $fileopt configuration file",FATAL); } my %parsedconfig = (); my $blockname; my $blockindex=0; my $lineindex=0; while (my $line =) { $lineindex++; next if $line =~/^#/; next if $line =~/^$/; next if $line =~/^\s+$/; # parse block name if ($line =~/\[([^\/]+).*\]/ ) { $blockindex++; $blockname = $1; unless ( $blockname =~ /^(common|nordugridmap)$/ ) { $blockname = sprintf("%s_%03i",$1,$blockindex); } $parsedconfig{$blockname}{'configline'} = $lineindex; next; } # skip every block not related to nordugridmap next unless ( $blockname =~/^(common|nordugridmap|vo_)/ ); # get variable = "value" next unless ( $line =~/^(\w+)\s*=\s*"(.*)"\s*$/ ); my $variable_name=$1; my $variable_value=$2; if ( $blockname =~/^vo_/ ) { # special parsing for the local grid-mapfile if ($variable_name eq "localmapfile") { $variable_name = "source"; $variable_value = "file://" . $variable_value; } # special parsing for the nordugrid VO: source="nordugrid" if (($variable_name eq "source") && ($variable_value eq "nordugrid")) { $variable_value = "vomss://voms.ndgf.org:8443/voms/nordugrid.org"; } } # store values to hash: $parsedconfig{blockname[_blockindex]}{variable_name} unless ($parsedconfig{$blockname}{$variable_name}) { $parsedconfig{$blockname}{$variable_name} = $variable_value; } else { $parsedconfig{$blockname}{$variable_name} .= '[separator]' . $variable_value; } } close CONFIGFILE; # # CHECK CONFIGURATION FOR REQUIRED INFO # # check [vo] blocks exists my @blocknames_tmp = (keys %parsedconfig); unless ( grep /^vo_/, @blocknames_tmp) { &Logger("There are no [vo] blocks are found in the $fileopt configuration file", FATAL); } # general configurable options (order: [nordugridmap] -> [common] -> $ENV -> defaults); my $capath = $parsedconfig{"nordugridmap"}{"x509_cert_dir"} || $parsedconfig{"common"}{"x509_cert_dir"} || $ENV{X509_CERT_DIR} || "/etc/grid-security/certificates/"; my $x509cert = $parsedconfig{'nordugridmap'}{'x509_user_cert'} || $parsedconfig{'common'}{'x509_user_cert'} || $ENV{X509_USER_CERT} || "/etc/grid-security/hostcert.pem"; my $x509key = $parsedconfig{'nordugridmap'}{'x509_user_key'} || $parsedconfig{'common'}{'x509_user_key'} || $ENV{X509_USER_KEY} || "/etc/grid-security/hostkey.pem"; my $default_mapfile = $parsedconfig{'nordugridmap'}{'gridmap'} || $parsedconfig{'common'}{'gridmap'} || "/etc/grid-security/grid-mapfile"; my $mapfile_owner = $parsedconfig{'nordugridmap'}{'gridmap_owner'} || $parsedconfig{'grid-manager'}{'user'} || "root"; my $mapfile_group = $parsedconfig{'nordugridmap'}{'gridmap_group'} || "root"; my $mapfile_chmod = $parsedconfig{'nordugridmap'}{'gridmap_permissions'} || "0600"; my $logfile = $parsedconfig{'nordugridmap'}{'logfile'} || "/var/log/arc/nordugridmap.log"; my $cachedir = $parsedconfig{'nordugridmap'}{'cachedir'} || "/var/spool/nordugrid/gridmapcache/"; my $cache_maxlife = $parsedconfig{'nordugridmap'}{'cachetime'} || 3 * 24 * 60 * 60; # three days old &set_numeric_value(\$debug_level, 'debug', '0 to 5') unless $opt_test; &set_numeric_value(\$fetch_url_timeout, 'fetch_timeout', 'numeric integers'); &set_configuration_flag('cache_enabled','cache_enable','yes','no'); &set_configuration_flag('mapuser_processing','mapuser_processing','overwrite','keep'); &set_configuration_flag('allow_empty_unixid','allow_empty_unixid','yes','no'); &set_configuration_flag('voms_use_soap','voms_method','soap','get'); &set_configuration_flag('log_to_file', 'log_to_file', 'yes', 'no') unless $opt_test; # # ENABLE/DISABLE FEATURES DEPEND ON CONFIGURATION # # redirect log to file if ( $config_flags{'log_to_file'} ) { open ( STDERR, ">> $logfile" ) or &Logger("Cannot open logfile '$logfile' for writting. Exiting.", FATAL); &Logger("Starting grid-mapfiles processing cycle", INFO); } # if cache enabled ensure cache directory exists and writable if ( $config_flags{'cache_enabled'} ) { # check cachedir exists unless ( -d $cachedir ) { &Logger("Cache directory does not exists. Trying to create...", WARNING); eval { mkpath($cachedir) }; if ($@) { &Logger("Failed to create cache directory $cachedir", FATAL); } &Logger("Cache directory $cachedir has been created", INFO); } &Logger("Cache directory $cachedir is not writable", FATAL) unless -w $cachedir; } # # PROCESS [VO] BLOCKS DEPENDENCIES # # generate a list of all external sources to fetch # generate a list of [vo] blocks dependencies my %sources_list = (); my %sources_deps = (); my %generated_blocks = (); # process blocks defined in arc.conf foreach my $block (sort(keys %parsedconfig)) { next unless $block =~ /^vo_/; my $voname = &get_vo_name($block); $sources_deps{"vo://".$voname} = &get_block_sources($block, \%sources_list, \%generated_blocks); } # process blocks generated authomaticaly on the previous step foreach my $block (sort(keys %generated_blocks)) { $sources_deps{"vo://".$block} = &get_block_sources($block, \%sources_list, \%generated_blocks, \%generated_blocks); $parsedconfig{$block} = $generated_blocks{$block}; } # ensure loop-free configuration my %dryrun_sources_data = %sources_list; &process_vo_blocks(\%sources_deps, \%dryrun_sources_data, 1); # # FETCH SOURCES AND ASSEMBLE GRIDMAPS # # fetch all sources my %sources_data = (); &fetch_sources(\%sources_list, \%sources_data); # assemble [vo] blocks gridmap lists &process_vo_blocks(\%sources_deps, \%sources_data); # assemble gridmapfiles my %mapfile_data = (); &process_mapfiles(\%mapfile_data, \%sources_data); # write mapfiles to disk if ( $opt_test ) { &write_mapfiles_data(\%mapfile_data, 1); } else { &write_mapfiles_data(\%mapfile_data, 0, $mapfile_owner, $mapfile_group, $mapfile_chmod); } # END OF MAIN ROUTINE :-) # # GENERAL CONFIGURATION PARSER SUBROUTINES # # get VO name for [vo] block sub get_vo_name { my $block = shift; if ( $parsedconfig{$block}{'vo'} ) { return $parsedconfig{$block}{'vo'}; } else { &Logger("Parameter 'vo' is required but not set inside the [vo] block. Please check your configuratinon ($fileopt line $parsedconfig{$block}{'configline'})", FATAL, "ConfigParser"); } } # set configuration flags in %config_flags based on [nordugridmap] parsed configuration sub set_configuration_flag { my ( $flag_name, $option_name, $value_yes, $value_no ) = @_; if ( defined $parsedconfig{'nordugridmap'}{$option_name} ) { if ( $parsedconfig{'nordugridmap'}{$option_name} eq $value_yes ) { $config_flags{$flag_name} = 1; } elsif ( $parsedconfig{'nordugridmap'}{$option_name} eq $value_no ) { $config_flags{$flag_name} = 0; } else { my $text_def = $config_flags{$flag_name} ? $value_yes : $value_no; &Logger("Unrecognized value for option '$option_name' in [nordugridmap] configuration. Valid valueas are: '$value_yes' or '$value_no'. Using default '$text_def'", WARNING, "ConfigParser"); } } } # return numeric value of [nordugridmap] parsed configuration option sub set_numeric_value { my ( $ref_var, $option_name, $value_valid ) = @_; if ( defined $parsedconfig{'nordugridmap'}{$option_name} ) { if ( $parsedconfig{'nordugridmap'}{$option_name} =~ /^\d+$/ ) { $$ref_var = $parsedconfig{'nordugridmap'}{$option_name}; } else { &Logger("Unrecognized value for option '$option_name' in [nordugridmap] configuration. Valid valueas are: $value_valid. Using default value: $$ref_var.", WARNING, "ConfigParser"); } } } # return boolean flag value in specified %options_hash sub get_source_flag { my ( $ref_options_hash, $flag_name, $option_name, $value_yes, $value_no ) = @_; if ( defined $ref_options_hash->{$option_name} ) { return 1 if ( $ref_options_hash->{$option_name} eq $value_yes ); return 0 if ( $ref_options_hash->{$option_name} eq $value_no ); my $text_def = $config_flags{$flag_name} ? $value_yes : $value_no; &Logger("Unrecognized value for source-specific option '$option_name'. Valid valueas are: '$value_yes' or '$value_no'. Using globaly configured value '$text_def'", WARNING, "ConfigParser"); } return $config_flags{$flag_name}; } # # MAPPING PROCESSING SUBROUTINES # # assemble grid-mapfiles data sub process_mapfiles { my ( $ref_mapfile_data, $ref_sources_data ) = @_; foreach my $block (sort(keys %parsedconfig)) { next unless $block =~ /^vo_/; my $gmf = $parsedconfig{$block}{'file'} || $default_mapfile; next if $gmf eq '/dev/null'; next if defined $ref_mapfile_data->{$gmf}; &Logger("Assembling gridmap file: $gmf", INFO, "AssembleGridMapfile"); $ref_mapfile_data->{$gmf} = {}; my @vo_blocks_list = &get_file_vo_sources($gmf); foreach my $source ( @vo_blocks_list ) { foreach my $dn ( keys %{$ref_sources_data->{$source}} ) { unless ( defined $ref_mapfile_data->{$gmf}->{$dn} ) { $ref_mapfile_data->{$gmf}->{$dn} = $ref_sources_data->{$source}->{$dn}->{'mapuser'}; } else { &Logger("Entry '$dn' already exists in $gmf gridmapfile. Skiped.", DEBUG, "AssembleGridMapfile"); } } } } } # asseble [vo] blocks mapping data sub process_vo_blocks { my ($ref_sources_deps, $ref_sources_data, $dryrun) = @_; my $blocks_unfinished = 1; my $blocks_processed = 1; # loop until all [vo] blocks are processed while ( $blocks_unfinished ) { if ( $blocks_processed == 0 ) { &Logger("Loop detected in the [vo] blocks dependencied. Please review you configuration.", FATAL, "AssembleBlockData"); } # initial values $blocks_unfinished = 0; $blocks_processed = 0; foreach my $block (sort(keys %parsedconfig)) { next unless $block =~ /^vo_/; my $vo_name = &get_vo_name($block); my $vo_ref = "vo://" . $vo_name; next if defined $ref_sources_data->{$vo_ref}; $blocks_unfinished++; # check all sources fetched or already assembled my $undefined_cnt = 0; foreach my $source ( @{$ref_sources_deps->{$vo_ref}} ) { $undefined_cnt++ unless defined $ref_sources_data->{$source}; } # assemble [vo] block gridmap unless ( $undefined_cnt ) { unless ( $dryrun ) { # get [vo] block parameters my $mapped_user = ""; if ( $parsedconfig{$block}{'mapped_unixid'} ) { $mapped_user = $parsedconfig{$block}{'mapped_unixid'}; } # define [vo] block filter if any my @Rules = (); if ( $parsedconfig{$block}{'filter'} ) { my @filters = split /\[separator\]/, $parsedconfig{$block}{'filter'}; foreach my $filter_entry (@filters) { push @Rules, $filter_entry; } # if we allow certain people, deny becomes last option if ( ($parsedconfig{$block}{'filter'} =~ /allow/) ) { push @Rules, "deny *"; } } else { # no filters - allow all push @Rules, "allow *"; } # print block parameters summary on debug &Logger("Assembling DNs list for the [vo] block $vo_name (mapped_unixid=$mapped_user)", DEBUG, "AssembleBlockData"); # process all sources $ref_sources_data->{$vo_ref} = {}; foreach my $source ( @{$ref_sources_deps->{$vo_ref}} ) { foreach my $dn ( keys %{$ref_sources_data->{$source}} ) { my %source_dn_hash = %{$ref_sources_data->{$source}->{$dn}}; unless ( defined $ref_sources_data->{$vo_ref}->{$dn} ) { # check DN is filtered next unless &rule_match($dn, \@Rules); # check mapping user exists for record if ( $config_flags{'mapuser_processing'} || ! defined $source_dn_hash{'mapuser'} ) { if ( $mapped_user eq "" ) { unless ( $config_flags{'allow_empty_unixid'} ) { &Logger("There is no mapping for DN '$dn' in [vo] block $vo_name. Skiping record.", WARNING, "AssembleBlockData"); next; } else { &Logger("Using empty mapping for DN '$dn' in [vo] block $vo_name.", VERBOSE, "AssembleBlockData"); } } } # if we are still here - add entry $ref_sources_data->{$vo_ref}->{$dn} = \%source_dn_hash; # always map to common user on 'rewrite' mapuser processing if ( $config_flags{'mapuser_processing'} || ! defined $ref_sources_data->{$vo_ref}->{$dn}->{'mapuser'} ) { $ref_sources_data->{$vo_ref}->{$dn}->{'mapuser'} = $mapped_user; } &Logger("Adding mapping entry '$dn -> $ref_sources_data->{$vo_ref}->{$dn}->{'mapuser'}' for the '$vo_name' [vo] block.", DEBUG, "AssembleBlockData"); # maintain information about where record is come from unless ( defined $ref_sources_data->{$vo_ref}->{$dn}->{'source'} ) { $ref_sources_data->{$vo_ref}->{$dn}->{'source'} = $source; } } else { &Logger("Mapping for '$dn' entry already exists for the '$vo_name' [vo] block. Skiped.", DEBUG, "AssembleBlockData"); } } } } else { $ref_sources_data->{$vo_ref} = 1; } $blocks_processed++; } } } } # write mapfiles to disk sub write_mapfiles_data { my ( $ref_mapfile_data, $dryrun, $owner, $group, $chmod ) = @_; foreach my $mapfile ( keys %$ref_mapfile_data ) { unless ( $dryrun ) { my ($gmf, $tmp_mapfile) = tempfile($mapfile . "XXXXX", UNLINK => 1) or &Logger("Cannot open temporary file to write $mapfile data", FATAL, "WriteMapfile"); &Logger("Writting mapfile data to $mapfile", INFO, "WriteMapfile"); while ( my ($dn, $map) = each(%{$ref_mapfile_data->{$mapfile}}) ) { print $gmf "\"$dn\" $map\n" or &Logger("Failed to write gridmap data (not enough disk space?) to temporary file $tmp_mapfile", FATAL, "WriteMapfile"); } close($gmf); my $uid = getpwnam($owner); my $gid = getgrnam($group); chown $uid, $gid, $tmp_mapfile; chmod oct($chmod), $tmp_mapfile; rename $tmp_mapfile, $mapfile; } else { my $gmf_string = ""; while ( my ($dn, $map) = each(%{$ref_mapfile_data->{$mapfile}}) ) { $gmf_string .= " \"$dn\" $map\n"; } &Logger("Printing mapfile content for $mapfile:\n$gmf_string", INFO, "WriteMapfile"); } } } # # SOURCES DEPENDENCIES TRACKING SUBROTINES # # return array of [vo] blocks required to generate gridmapfile sub get_file_vo_sources { my $file_name = shift; my @file_vos_list = (); foreach my $block (sort(keys %parsedconfig)) { next unless $block =~ /^vo_/; if ( defined $parsedconfig{$block}{'file'} ) { next unless $parsedconfig{$block}{'file'} eq $file_name; } else { next unless $file_name eq $default_mapfile; } push @file_vos_list, "vo://".get_vo_name($block); } return @file_vos_list; } # extract optional per-source parameters from source string and return hash # optional parameters will be removed from passed source string sub get_source_params { my $ref_source = shift; my ( $source_str, $params_str ) = split '<', $$ref_source; # trim url without optional parameters and return back $source_str =~ s/^\s+//; $source_str =~ s/\s+$//; $$ref_source = $source_str; &Logger("Source URL is: $source_str", DEBUG, "ParseSourceURL"); # create source parameters hash my %source_params = (); if ( defined $params_str ) { foreach my $param_record ( split ' ', $params_str ) { next unless ( $param_record =~/^(\w+)=(.+)$/ ); &Logger("Processing source optional parameter '$1'=$2", DEBUG, "ParseSourceURL"); $source_params{$1}=$2; } } return \%source_params; } # return list of block dependencied and fill external sources list sub get_block_sources { my ($block_id, $ref_sources_list, $ref_generated_blocks, $ref_confighash) = @_; # parsed arc.conf hash is used by default $ref_confighash = \%parsedconfig unless defined $ref_confighash; # array with block dependencied my @vo_sources_list = (); &Logger("Getting sources for VO block: $block_id", DEBUG, "GetInfoSources"); my @urls = split /\[separator\]/, $ref_confighash->{$block_id}{'source'}; foreach my $source (@urls) { &Logger("Found mapping source record: $source", DEBUG, "GetInfoSources"); # get optional per-source parameters my $ref_source_params = &get_source_params(\$source); my $source_id = $source; # check sources is already in sources list if ( defined $ref_sources_list->{$source} ) { # if source parameters differ - use block_id prefix if ( &Storable::freeze($ref_source_params) ne &Storable::freeze($ref_sources_list->{$source}) ) { &Logger("Adding block ID prefix for duplicate source URL with different parameters set", DEBUG, "GetInfoSources"); $source_id = "$block_id|$source"; } else { &Logger("Source URL is already defined", DEBUG, "GetInfoSources"); } } # get source protocol my ( $protocol, $uri ) = $source =~ m/([-\w]+):\/\/(.*)/; $protocol = lc $protocol; # process URLs depend on protocol used if ( $protocol =~ /^vomss?$/i ) { # special handling for voms_fqan_map if ( defined $ref_confighash->{$block_id}{'voms_fqan_map'} ) { # FQANs defined for VOMS URL: generate URL for every FQAN my @fqans = split /\[separator\]/, $ref_confighash->{$block_id}{'voms_fqan_map'}; my ( $voms_baseid, $dummy_fqan ) = $source_id =~ m/^([^\?]+)\??(.*)$/; foreach my $fqan_match ( @fqans ) { my ( $fqan, $map_id ) = $fqan_match =~ m/^([^\s]+)\s+(.*)$/; # create URL with specified FQAN my $fqan_source_id = $voms_baseid . "?" . $fqan; my ( $dummy_id, $fqan_source_url ) = $fqan_source_id =~ m/(\w+\|)?([^|]+)/; &Logger("Generating FQAN-map source URL: $fqan_source_url (mapped to $map_id)", VERBOSE, "GetInfoSources"); # put mapped_unixid parameter my %fqan_source_params = %$ref_source_params; $fqan_source_params{'mapped_unixid'} = $map_id; # save as [vo] block source $ref_sources_list->{$fqan_source_id} = \%fqan_source_params; push @vo_sources_list, $fqan_source_id; } } # standalone VOMS URL: retreive and use directly as VO source # FQANs before original URL to apply specific maps first $ref_sources_list->{$source_id} = $ref_source_params; push @vo_sources_list, $source_id; } elsif ( $protocol =~ /^(https?|ldap)$/i ) { # external sources: retreive and use directly as VO source $ref_sources_list->{$source_id} = $ref_source_params; push @vo_sources_list, $source_id; } elsif ( $protocol =~ /^file$/i ) { # local file: if created by nordugridmap - use [vo] blocks as VO sources # if file is independent source - use directly my @file_vo_sources = &get_file_vo_sources($uri); if ( @file_vo_sources ) { push @vo_sources_list, @file_vo_sources; } else { if ( -e $uri ) { $ref_sources_list->{$source_id} = $ref_source_params; push @vo_sources_list, $source_id; } else { &Logger("File source '$uri' does not exists. Ignoring.", WARNING, "GetInfoSources"); } } } elsif ( $protocol =~ /^vo$/i ) { # [vo] block: use directly as VO source push @vo_sources_list, $source_id; } else { &Logger("Unsupported protocol found: $source", WARNING, "GetInfoSources"); } } return \@vo_sources_list; } # # SUBROUTINES TO GET INFORMATION FROM DIFFERENT SOURCES # # fetch data from all sources in sources_list and put them to sources_data hash sub fetch_sources { my ( $ref_sources_list, $ref_sources_data ) = @_; my $exit_code; my $ref_subjects; foreach my $source_id (keys %$ref_sources_list) { # separate optional block_id prefix from source URL my ( $block_id, $source ) = $source_id =~ m/(\w+\|)?([^|]+)/; # get source parameters my ( $protocol, $uri ) = $source =~ m/(\w+):\/\/(.*)/; my $ref_source_params = $ref_sources_list->{$source_id}; # check source-specific cache control my $use_cache = &get_source_flag($ref_source_params, 'cache_enabled','cache_enable','yes','no'); # get subjects from external URL if ( $protocol =~ /^vomss?$/i ) { ($exit_code, $ref_subjects) = &voms_subjects($source, $ref_source_params); } elsif ( $protocol =~ /^https?$/i ) { ($exit_code, $ref_subjects) = &http_subjects($source, $ref_source_params); } elsif ( $protocol =~ /^file$/i ) { ($exit_code, $ref_subjects) = &read_gridmap($uri, $ref_source_params); } else { &Logger("Unsupported protocol to fetch: $protocol", FATAL, "FetchSourcesData"); } # check fetch result and try to save/load cache unless ( $exit_code ) { if ( $use_cache ) { &write_cached_subjects($source_id, $ref_subjects) unless $opt_test; } } else { &Logger("Fail to retreive data from URL: $source", WARNING, "FetchSourcesData"); if ( $use_cache ) { my ($err_code, $cache_ref_subjects) = &read_cached_subjects($source_id); unless ($err_code) { &Logger("Using locally cached data for URL: $source", INFO, "FetchSourcesData"); $ref_subjects = $cache_ref_subjects; } } } # put fetched results to sources_data hash $ref_sources_data->{$source_id} = $ref_subjects; } } # setup HTTPS SSL parameters sub setup_https { # For Net::SSL $ENV{HTTPS_CERT_FILE} = $x509cert; $ENV{HTTPS_KEY_FILE} = $x509key; $ENV{HTTPS_CA_DIR} = $capath; # For IO::Socket::SSL (LWP) if ( $IO::Socket::SSL::VERSION ) { IO::Socket::SSL::set_ctx_defaults( ca_path => $capath, use_cert => 1, key_file => $x509key, cert_file => $x509cert, verify_mode => 1 ); } } # get content of HTTP(S) URL sub get_http_url { my $uri = shift; my $scheme = $uri->scheme; &Logger("Unsupported URL ($uri) passed to method", FATAL, "FetchSourcesData.HTTP") unless ( $scheme =~ /^https?$/ ); # handle SSL environment &setup_https() if ($uri->scheme eq 'https'); # create LWP object my $ua = LWP::UserAgent->new( agent => USERAGENT."/".VERSION, timeout => $fetch_url_timeout ); # do GET query my $res = $ua->get($uri, 'Cache-Control' => 'no-cache', 'Pragma' => 'no-cache'); unless ($res->is_success) { &Logger("HTTP request failed for URL $uri:\n\t". $res->message, ERROR, "FetchSourcesData.HTTP"); return 0; } return $res->content; } # HTTP(S) sources: expects plain text list of "DN" sub http_subjects { my ($url, $ref_source_params) = @_; my %Subjects = (); # get subjects from URL specified &Logger("Getting subjects from source: $url", DEBUG, "FetchSourcesData.HTTP"); my $uri = URI->new($url); my $content = get_http_url($uri); unless ($content) { &Logger("Failed to get information from source: $url", ERROR, "FetchSourcesData.HTTP"); return (1, \%Subjects); } my $count = 0; foreach my $line ( split /\n/, $content ) { next if $line =~ /^(\s)*$/; chomp($line); # "subject" should be the first and only one parsed parameter my ($subject, $dummy) = split (/\s+"(.*)"/, $line); $subject =~ s/"(.*)"/$1/g; $Subjects{$subject} = { 'subject' => $subject }; # mapped_unixid can be passed via optional parameters $Subjects{$subject}{'mapuser'} = $ref_source_params->{'mapped_unixid'} if defined $ref_source_params->{'mapped_unixid'}; $count++; } &Logger("No information retreived from URL: $url", WARNING, "FetchSourcesData.HTTP") unless $count; return (0, \%Subjects); } # VOMS(S) methods wrapper sub voms_subjects { my ($url, $ref_source_params) = @_; my $use_soap = &get_source_flag($ref_source_params, 'voms_use_soap', 'voms_method', 'soap', 'get'); if ( $use_soap ) { return &voms_subjects_soap($url, $ref_source_params); } else { return &voms_subjects_get($url, $ref_source_params); } } # VOMS(S) sources: expect VOMS-Admin SOAP responce (SOAP:Lite implementation) sub voms_subjects_soap { my ($url, $ref_source_params) = @_; my %Subjects = (); &Logger("Getting subjects from source: $url", DEBUG, "FetchSourcesData.VOMS"); # get SOAP endpoint URL and container my ( $endpoint, $container ) = split(/\?/, $url, 2); $endpoint =~ s/^voms/http/; # handle SSL environment &setup_https() if $endpoint =~ /^https/; $endpoint .= '/services/VOMSCompatibility'; my $soap_client; eval { $soap_client = SOAP::Lite->proxy($endpoint, agent => USERAGENT."/".VERSION, timeout => $fetch_url_timeout ); }; unless ( $soap_client ) { &Logger("Failed to connect to SOAP endpoint: $url", ERROR, "FetchSourcesData.VOMS"); return (1, \%Subjects); } # call getGridmapUsers method my $soap_req; eval { if ( $container ) { $soap_req = $soap_client->getGridmapUsers($container); } else { $soap_req = $soap_client->getGridmapUsers(); } }; unless ( $soap_client->transport->is_success ) { &Logger("SOAP transport failed for URL: $url. Error: ".$soap_client->transport->status, ERROR, "FetchSourcesData.VOMS"); return (1, \%Subjects); } unless ($soap_req) { &Logger("SOAP responce parsing failed for URL: $url", ERROR, "FetchSourcesData.VOMS"); return (3, \%Subjects); } if ( $soap_req->fault ) { &Logger("SOAP request failed for URL: $url. Returned error: ".$soap_req->faultstring, ERROR, "FetchSourcesData.VOMS"); return (4, \%Subjects); } if ( ref($soap_req->result) ne 'ARRAY' ) { &Logger("SOAP returned non-array result for URL: $url", VERBOSE, "FetchSourcesData.VOMS"); return (0, \%Subjects); } if ( ! @{$soap_req->result} ) { &Logger("SOAP returned empty result for URL: $url", VERBOSE, "FetchSourcesData.VOMS"); return (0, \%Subjects); } foreach my $subject ( @{$soap_req->result} ) { $Subjects{$subject} = { 'subject' => $subject }; # mapped_unixid can be passed via optional parameters $Subjects{$subject}{'mapuser'} = $ref_source_params->{'mapped_unixid'} if defined $ref_source_params->{'mapped_unixid'}; } return (0, \%Subjects); } # VOMS(S) sources: expect VOMS-Admin SOAP responce (GET+XML manual parser implementation) sub voms_subjects_get { my ($url, $ref_source_params) = @_; my %Subjects = (); &Logger("Getting subjects from source: $url", DEBUG, "FetchSourcesData.VOMS"); # create proper HTTP(S) URL my $uri = URI->new($url); my $scheme = $uri->scheme; $scheme =~ s/^voms/http/; $uri->scheme($scheme); # prepare GET query $uri->path($uri->path.'/services/VOMSCompatibility'); if ( $uri->query() ) { $uri->query_form( method => 'getGridmapUsers', container => $uri->query() ); } else { $uri->query_form( method => 'getGridmapUsers'); } # get URI content my $content = get_http_url($uri); return ( 1, \%Subjects) unless $content; # parse result on success my $parser = new XML::DOM::Parser; my $doc; eval { $doc = $parser->parse($content) }; unless ($doc) { &Logger("Parsing VOMS ($url) XML response FAILED", ERROR, "FetchSourcesData.VOMS"); return ( 3, \%Subjects); } my $retval = $doc->getElementsByTagName('soapenv:Body'); my $subject; if ($retval->getLength == 1) { my $returnNode = $doc->getElementsByTagName('getGridmapUsersReturn')->item(0); for my $user ($returnNode->getChildNodes) { if ($user->getNodeType == ELEMENT_NODE) { $subject = undef; eval { $subject = $user->getFirstChild->getData }; if ( defined $subject ) { $Subjects{$subject} = { 'subject' => $subject }; # mapped_unixid can be passed via optional parameters $Subjects{$subject}{'mapuser'} = $ref_source_params->{'mapped_unixid'} if defined $ref_source_params->{'mapped_unixid'}; } else { &Logger("Found subject that cannot be parsed from VOMS XML ($url)", ERROR, "FetchSourcesData.VOMS"); } } } } else { &Logger("VOMS search($uri): No such object", ERROR, "FetchSourcesData.VOMS"); return ( 4, \%Subjects); } $doc->dispose; return (0, \%Subjects); } # Mapfile sources: expect local gridmap-file sub read_gridmap { my ($gridmap_file, $ref_source_params) = @_; my %Subjects = (); &Logger("Getting subjects from source: file://$gridmap_file", DEBUG, "FetchSourcesData.LocalFile"); if (! -e $gridmap_file) { &Logger("File $gridmap_file not found", ERROR, "FetchSourcesData.LocalFile"); return (1, \%Subjects); } if (! -T $gridmap_file) { &Logger("File $gridmap_file not in text format", ERROR, "FetchSourcesData.LocalFile"); return (2, \%Subjects); } unless (open(IN, "< $gridmap_file")) { &Logger("Unable to open $gridmap_file", ERROR, "FetchSourcesData.LocalFile"); return (3, \%Subjects); } binmode IN; # mapped_unixid can be passed via optional parameters, overwriting is controlled by 'mapuser_processing' option my $def_mapuser = ( defined $ref_source_params->{'mapped_unixid'} ) ? $ref_source_params->{'mapped_unixid'} : 0; my $map_overwrite = &get_source_flag($ref_source_params, 'mapuser_processing','mapuser_processing','overwrite','keep'); while (my $f = ) { chomp($f); if ($f =~ /^\s*\"((\/[^\/]+)+)"\s+([^\s]+)\s*$/) { # record match: "/user/DN" mapping my $subject = $1; my $mapuser = $3; $mapuser = $def_mapuser if ( $def_mapuser && $map_overwrite ); $Subjects{$subject} = { 'subject' => $subject, 'mapuser' => $mapuser }; } elsif ($f =~ /^\s*\"((\/[^\/]+)+)\"\s*$/) { # record match: "/user/DN/only" my $subject = $1; $Subjects{$subject} = { 'subject' => $subject }; $Subjects{$subject}{'mapuser'} = $def_mapuser if ( $def_mapuser ); } elsif ($f =~ /^\s*((\/[^\/\s]+)+)\s+([^\s]+)\s*$/) { # record match: /user/DN/no_spaces mapping my $subject = $1; my $mapuser = $3; $mapuser = $def_mapuser if ( $def_mapuser && $map_overwrite ); $Subjects{$subject} = { 'subject' => $subject, 'mapuser' => $mapuser }; } elsif ($f =~ /^\s*((\/[^\/\s]+)+)\s*$/) { # record match: /user/DN/no_spaces/only my $subject = $1; $Subjects{$subject} = { 'subject' => $subject }; $Subjects{$subject}{'mapuser'} = $def_mapuser if ( $def_mapuser ); } else { &Logger("Skipping missformed record '$f' in file $gridmap_file", WARNING, "FetchSourcesData.LocalFile"); } } close(IN); return (0, \%Subjects); } # # MATCHING AND FILTERING # # check subject match against ACL rules sub rule_match { my ($subj, $ref_Rules) = @_; my @Rules = @$ref_Rules; my $subjReg = $subj; $subjReg =~ s/\@/\\\@/g; foreach my $rule (@Rules) { my ($action, $acl) = split / /, $rule, 2; $acl =~ s/\@/\\\@/g; $acl =~ s/\*/.\*/g; if ($subjReg =~ /$acl/) { if ($action eq "deny") { &Logger("User '$subj' denied by rule 'deny $acl'", DEBUG, "FilterDN"); } else { &Logger("User '$subj' allowed by rule 'allow $acl'", DEBUG, "FilterDN") if ( $acl ne ".*" ); return 1; } last; } } return 0; } # # CACHE OPERATIONS SUBROUTINES # # get source URL hash sub urlhash { my $url = shift; # split the url into substrings of length 8 and run crypt on each substring my @chunks = ( $url =~ /.{1,8}/gs ); my $result; foreach my $c (@chunks) { $result .= crypt $c, "arc"; } $result =~ s/[\/|\.]//g; return $result; } # get cache location for source URL sub get_subject_cache_location { my $url = shift; my $hash = &urlhash($url); my $file_location = $cachedir . "/" . $hash; return $file_location; } # write cached values for source URL sub write_cached_subjects { my ($url, $ref_subjects) = @_; my %Subjects = %$ref_subjects; my $cache_file = &get_subject_cache_location($url); &Logger("Writting cached subjects for $url to $cache_file", DEBUG, "SourceCaching"); store($ref_subjects, $cache_file) or &Logger("Failed to write to the cache file $cache_file", WARNING, "SourceCaching"); } # read cached values for source URL sub read_cached_subjects { my $url = shift; my $cache_file = &get_subject_cache_location($url); unless ( -e $cache_file ) { &Logger("Cache file does not exists for URL: $url", VERBOSE, "SourceCaching"); return 1; } my $mtime = (stat($cache_file))[9]; if ($mtime + $cache_maxlife < time()) { &Logger("Rejecting to use cache, max lifetime exceeded", VERBOSE, "SourceCaching"); eval { unlink($cache_file); }; return 2; } &Logger("Getting subjects for $url from cache", DEBUG, "SourceCaching"); my $ref_subjects; eval { $ref_subjects = retrieve($cache_file); }; if ( defined $ref_subjects ) { return 0, $ref_subjects; } &Logger("Failed to get data from cache file for URL: $url", WARNING, "SourceCaching"); eval { unlink($cache_file); }; return 3; } # # LOGGING FUNCTIONS # # convert debug level to number sub debug_numericv { my $level = shift; return $level if ( $level =~ /\d/ ); return 0 if $level =~ /^FATAL$/i; return 1 if $level =~ /^ERROR$/i; return 2 if $level =~ /^WARNING$/i; return 3 if $level =~ /^INFO$/i; return 4 if $level =~ /^VERBOSE$/i; return 5 if $level =~ /^DEBUG$/i; return 2; # WARNING level on syntax error } # get debug level string value sub debug_stringv { my $level = shift; return "FATAL" if ( $level == 0 ); return "ERROR" if ( $level == 1 ); return "WARNING" if ( $level == 2 ); return "INFO" if ( $level == 3 ); return "VERBOSE" if ( $level == 4 ); return "DEBUG" if ( $level == 5 ); } # show message depending on threshold sub Logger { my ( $text, $threshold, $subsystem ) = @_; $threshold = &debug_numericv($threshold); if ( $threshold <= $debug_level ) { my $timestring = strftime("%Y-%m-%d %H:%M:%S", @{ &localtime() } ); $subsystem = ( defined $subsystem ) ? ".$subsystem" : ""; printf STDERR "[%s] [Nordugridmap%s] [%s] [$$] %s\n", $timestring, $subsystem, &debug_stringv($threshold), $text; } # exit nordugridmap on FATAL errors exit (1) unless ( $threshold ); } # # DISPLAY NORDUGRIDMAP HELP # sub printHelp { system("pod2text $0"); } =pod =head1 NAME nordugridmap - generates grid-mapfile(s) =head1 SYNOPSIS B [B<-t>, B<--test>] [B<-h>, B<--help>] [ B<-c>, B<--config> FILE ] =head1 DESCRIPTION B is usually run as a crontab entry in order to automatically generate mapfile(s). For configuration information consult tne Nordugrid ARC documentation and the arc.conf.reference =head1 OPTIONS =over 4 =item B<-t>, B<--test> Does not actually create grid-mapfile(s), but perform test run in debug mode. =item B<-h>, B<--help> Print a help screen. =item B<-c>, B<--config> FILE Specifies the configuration file to be used. By default the /etc/arc.conf is used. B utilize [nordugridmap] section for general options fine-tuning and processes all the [vo] blocks from the config. =back =head1 CREDITS The early scripts were based on a modified version of the mkgridmap (v 1.6) script written by the DataGrid - authorization team . Since then the script has been considerably rewritten. In Dec 2011 script logic was completely rewritten and B v 2.0 was born. =head1 COMMENTS balazs.konya@hep.lu.se, waananen@nbi.dk, manf@grid.org.ua =cut nordugrid-arc-5.4.2/src/utils/gridmap/PaxHeaders.7502/nordugridmap.cron.in0000644000000000000000000000012711531042570024533 xustar000000000000000027 mtime=1298417016.390647 30 atime=1513200652.547656341 30 ctime=1513200664.666804563 nordugrid-arc-5.4.2/src/utils/gridmap/nordugridmap.cron.in0000644000175000002070000000014511531042570024575 0ustar00mockbuildmock00000000000000#NorduGrid VO automatic grid-mapfile creation 11 3,9,12,15,21 * * * root @prefix@/sbin/nordugridmap nordugrid-arc-5.4.2/src/utils/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315736021175 xustar000000000000000030 mtime=1513200606.229089846 30 atime=1513200652.593656903 30 ctime=1513200664.601803768 nordugrid-arc-5.4.2/src/utils/Makefile.in0000644000175000002070000005570413214315736021256 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/utils DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @WIN32_FALSE@GRIDMAP = gridmap @WIN32_TRUE@GRIDMAP = @HED_ENABLED_FALSE@BUILD_SOURCES = $(GRIDMAP) @HED_ENABLED_TRUE@BUILD_SOURCES = hed $(GRIDMAP) SUBDIRS = $(BUILD_SOURCES) DIST_SUBDIRS = hed gridmap all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/utils/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/utils/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/utils/PaxHeaders.7502/hed0000644000000000000000000000013213214316030017577 xustar000000000000000030 mtime=1513200664.635804183 30 atime=1513200668.719854133 30 ctime=1513200664.635804183 nordugrid-arc-5.4.2/src/utils/hed/0000755000175000002070000000000013214316030017722 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/utils/hed/PaxHeaders.7502/wsdl2hed.cpp0000644000000000000000000000012411441417147022110 xustar000000000000000027 mtime=1283858023.146715 27 atime=1513200575.283711 30 ctime=1513200664.630804122 nordugrid-arc-5.4.2/src/utils/hed/wsdl2hed.cpp0000644000175000002070000002606111441417147022162 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "schemaconv.h" Arc::NS ns; struct to_upper { int operator() (int ch) { return std::toupper(ch); } }; struct to_lower { int operator() (int ch) { return std::tolower(ch); } }; static void std_h_header(std::string &name, std::ofstream &h) { std::string uname = name; std::transform(name.begin(), name.end(), uname.begin(), to_upper()); h << "// Generated by wsdl2hed " << std::endl; h << "#ifndef __ARC_" << uname << "_H__" << std::endl; h << "#define __ARC_" << uname << "_H__" << std::endl; h << std::endl; h << "#include " << std::endl; h << "#include " << std::endl; h << "#include " << std::endl; h << std::endl; h << "namespace " << name << " {" << std::endl; h << std::endl; h << "class " << name << "Service: public Arc::Service" << std::endl; h << "{" << std::endl; h << std::endl; } static void h_public_part(std::string &name, std::ofstream &h) { h << " public:" << std::endl; h << " " << name << "Service(Arc::Config *cfg);" << std::endl; h << " virtual ~" << name << "Service(void);" << std::endl; h << " virtual Arc::MCC_Status process(Arc::Message &inmsg, Arc::Message &outmsg);" << std::endl; } static void h_private_part(std::string &/*name*/, std::ofstream &h, Arc::XMLNode &xml) { h << " private:" << std::endl; h << " Arc::NS ns;" << std::endl; h << " Arc::Logger logger;" << std::endl; h << " Arc::DelegationContainerSOAP delegation;" << std::endl; h << " Arc::InformationContainer infodoc;" << std::endl; h << " Arc::MCC_Status make_soap_fault(Arc::Message &outmsg);" << std::endl; h << " // Operations from WSDL" << std::endl; Arc::XMLNode op; for (int i = 0; (op = xml["wsdl:portType"]["wsdl:operation"][i]) == true; i++) { std::string n = (std::string) op.Attribute("name"); if (!n.empty()) { h << " Arc::MCC_Status " << n << "(Arc::XMLNode &in, Arc::XMLNode &out);" << std::endl; } } } static void std_cpp_header(std::string &name, std::ofstream &cpp) { std::string lname = name; std::transform(name.begin(), name.end(), lname.begin(), to_lower()); cpp << "// Generated by wsdl2hed" << std::endl; cpp << "#ifdef HAVE_CONFIG_H" << std::endl; cpp << "#include " << std::endl; cpp << "#endif" << std::endl; cpp << std::endl; cpp << "#include " << std::endl; cpp << "#include " << std::endl; cpp << "#include " << std::endl; cpp << "#include " << std::endl; cpp << std::endl; cpp << "#include \"" << lname << ".h\"" << std::endl; cpp << std::endl; cpp << "namespace " << name << " {" << std::endl; cpp << std::endl; cpp << "static Arc::Service *get_service(Arc::Config *cfg, Arc::ChainContext *) { " << std::endl; cpp << " return new " << name << "Service(cfg);" << std::endl; cpp << "}" << std::endl; } static void cpp_public_part(std::string &name, std::ofstream &cpp, Arc::XMLNode &xml) { cpp << std::endl; cpp << name << "Service::" << name << "Service(Arc::Config *cfg):Service(cfg),logger(Arc::Logger::rootLogger, \"" << name << "\")" << std::endl; cpp << "{" << std::endl; cpp << " // Define supported namespaces" << std::endl; Arc::NS n = xml.Namespaces(); Arc::NS::iterator it; for (it = n.begin(); it != n.end(); it++) { // Ignore some default namespace if (it->first != "soap" && it->first != "SOAP-ENV" && it->first != "SOAP-ENC" && it->first != "wsdl" && it->first != "xsd") { cpp << " ns[\"" << it->first << "\"]=\"" << it->second << "\";" << std::endl; } } cpp << "}" << std::endl; cpp << std::endl; cpp << name << "Service::~" << name << "Service(void)" << std::endl; cpp << "{" << std::endl; cpp << "}" << std::endl; cpp << std::endl; cpp << "Arc::MCC_Status " << name << "Service::process(Arc::Message &inmsg, Arc::Message &outmsg)" << std::endl; cpp << "{\n\ // Both input and output are supposed to be SOAP\n\ // Extracting payload\n\ Arc::PayloadSOAP* inpayload = NULL;\n\ try {\n\ inpayload = dynamic_cast(inmsg.Payload());\n\ } catch(std::exception& e) { };\n\ if(!inpayload) {\n\ logger.msg(Arc::ERROR, \"input is not SOAP\");\n\ return make_soap_fault(outmsg);\n\ };\n\ // Analyzing request\n\ Arc::XMLNode op = inpayload->Child(0);\n\ if(!op) {\n\ logger.msg(Arc::ERROR, \"input does not define operation\");\n\ return make_soap_fault(outmsg);\n\ }; \n\ logger.msg(Arc::VERBOSE,\"process: operation: %s\", op.Name());\n\ Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns);\n\ Arc::PayloadSOAP& res = *outpayload;\n\ Arc::MCC_Status ret = Arc::STATUS_OK;" << std::endl; cpp << " "; // just becuase good indent of following if section Arc::XMLNode op; for (int i = 0; (op = xml["wsdl:portType"]["wsdl:operation"][i]) == true; i++) { std::string n = (std::string) op.Attribute("name"); std::string msg = (std::string) op["output"].Attribute("message"); cpp << "if(MatchXMLName(op, \"" << n << "\")) {" << std::endl; cpp << " Arc::XMLNode r = res.NewChild(\"" << msg << "\");" << std::endl; cpp << " ret = " << n << "(op, r);" << std::endl; cpp << " } else "; } cpp << "if(MatchXMLName(op, \"DelegateCredentialsInit\")) {\n\ if(!delegation.DelegateCredentialsInit(*inpayload,*outpayload)) {\n\ delete inpayload;\n\ return make_soap_fault(outmsg);\n\ }\n\ // WS-Property\n\ } else if(MatchXMLNamespace(op,\"http://docs.oasis-open.org/wsrf/rp-2\")) {\n\ Arc::SOAPEnvelope* out_ = infodoc.Process(*inpayload);\n\ if(out_) {\n\ *outpayload=*out_;\n\ delete out_;\n\ } else {\n\ delete inpayload; delete outpayload;\n\ return make_soap_fault(outmsg);\n\ };\n\ } else {\n\ logger.msg(Arc::ERROR,\"SOAP operation is not supported: %s\", op.Name());\n\ return make_soap_fault(outmsg);\n\ };\n\ // Set output\n\ outmsg.Payload(outpayload);\n\ return Arc::MCC_Status(ret);\n\ }" << std::endl; cpp << std::endl; } static void cpp_private_part(std::string &name, std::ostream &cpp, Arc::XMLNode &xml) { cpp << "Arc::MCC_Status "<< name << "Service::make_soap_fault(Arc::Message& outmsg)\n\ {\n\ Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns,true);\n\ Arc::SOAPFault* fault = outpayload?outpayload->Fault():NULL;\n\ if(fault) {\n\ fault->Code(Arc::SOAPFault::Sender);\n\ fault->Reason(\"Failed processing request\");\n\ };\n\ outmsg.Payload(outpayload);\n\ return Arc::MCC_Status(Arc::STATUS_OK);\n\ }" << std::endl << std::endl; Arc::XMLNode op; for (int i = 0; (op = xml["wsdl:portType"]["wsdl:operation"][i]) == true; i++) { std::string n = (std::string) op.Attribute("name"); if (!n.empty()) { cpp << "Arc::MCC_Status " << name << "Service::" << n << "(Arc::XMLNode &in, Arc::XMLNode &out)" << std::endl; cpp << "{" << std::endl; cpp << " return Arc::MCC_Status();" << std::endl; cpp << "}" << std::endl; cpp << std::endl; } } } static void std_h_footer(std::string &name, std::ofstream &h) { std::string uname = name; std::transform(name.begin(), name.end(), uname.begin(), to_upper()); h << std::endl; h << "}; // class " << name << std::endl; h << "}; // namespace " << name << std::endl; h << "#endif // __ARC_" << uname << "_H__" << std::endl; } static void std_cpp_footer(std::string &name, std::ofstream &cpp) { std::string lname = name; std::transform(name.begin(), name.end(), lname.begin(), to_lower()); cpp << "}; // namespace " << name << std::endl; cpp << std::endl; cpp << "service_descriptors ARC_SERVICE_LOADER = {" << std::endl; cpp << " { \"" << lname << "\", 0, &" << name << "::get_service }," << std::endl; cpp << " { NULL, 0, NULL }" << std::endl; cpp << "};" << std::endl; } static void gen_makefile_am(std::string &name) { std::string lname = name; std::transform(name.begin(), name.end(), lname.begin(), to_lower()); std::ofstream m("Makefile.am"); m << "pkglib_LTLIBRARIES = lib" << lname << ".la" << std::endl; m << "lib" << lname << "_la_SOURCES = " << lname << ".cpp " << lname << ".h" << std::endl; m << "lib" << lname << "_la_CXXFLAGS = $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) -I$(top_srcdir)/include" << std::endl; m << "lib" << lname << "_la_LIBADD = $(top_srcdir)/src/hed/libs/loader/libarcloader.la $(top_srcdir)/src/hed/libs/message/libarcmessage.la $(top_srcdir)/src/hed/libs/security/libarcsecurity.la $(top_srcdir)/src/hed/libs/ws/libarcws.la $(top_srcdir)/src/hed/libs/common/libarccommon.la" << std::endl; m << "lib" << lname << "_la_LDFLAGS = -no-undefined -avoid-version -module" << std::endl; m.close(); } int main(int argc, char **argv) { bool parse_schema = false; if ((argc > 1) && (strcmp(argv[1],"-s") == 0)) { parse_schema = true; --argc; ++argv; } if (argc < 3) { std::cerr << "Invalid arguments" << std::endl; return -1; } ns["wsdl"] = "http://schemas.xmlsoap.org/wsdl/"; std::string xml_str = Glib::file_get_contents(argv[1]); Arc::XMLNode xml(xml_str); if (xml == false) { std::cerr << "Failed parse XML! " << std::endl; return -1; } /* { std::string str; xml.GetXML(str); std::cout << str << std::endl; }; */ // xml.Namespaces(ns); std::string name = argv[2]; std::string lname = name; std::transform(name.begin(), name.end(), lname.begin(), to_lower()); std::string header_path = lname; header_path += ".h"; std::string cpp_path = lname; cpp_path += ".cpp"; std::ofstream h(header_path.c_str()); if (!h) { std::cerr << "Cannot create: " << header_path << std::endl; exit(1); } std::ofstream cpp(cpp_path.c_str()); if (!cpp) { unlink (header_path.c_str()); std::cerr << "Cannot create: " << cpp_path << std::endl; } if(parse_schema) { if(!schemaconv(xml,h,cpp,lname)) return 1; return 0; } std_h_header(name, h); h_public_part(name, h); h_private_part(name, h, xml); std_h_footer(name, h); std_cpp_header(name, cpp); cpp_public_part(name, cpp, xml); cpp_private_part(name, cpp, xml); std_cpp_footer(name, cpp); h.close(); cpp.close(); gen_makefile_am(name); return 0; } nordugrid-arc-5.4.2/src/utils/hed/PaxHeaders.7502/complextype.cpp0000644000000000000000000000012411455353323022745 xustar000000000000000027 mtime=1286985427.270721 27 atime=1513200575.286711 30 ctime=1513200664.635804183 nordugrid-arc-5.4.2/src/utils/hed/complextype.cpp0000644000175000002070000002200111455353323023005 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "schemaconv.h" using namespace Arc; // -------------- Complex type -------------- // 1 - class/parent name (C++,XML) static const char* complex_type_header_pattern_h = "\ class %1$s: public Arc::XMLNode {\n\ public:\n\ "; // 1 - class/parent name (C++,XML) static const char* complex_type_footer_pattern_h = "\ static %1$s New(Arc::XMLNode parent);\n\ %1$s(Arc::XMLNode node);\n\ };\n\ \n\ "; // 1 - class/parent name (C++,XML) // 2 - class namespace (XML) static const char* complex_type_constructor_header_pattern_cpp = "\ %3$s%1$s::%1$s(Arc::XMLNode node) {\n\ Arc::NS ns;\n\ ns[\"ns\"]=\"%2$s\";\n\ Namespaces(ns);\n\ "; // 1 - class/parent name (C++,XML) // 2 - class namespace (XML) static const char* complex_type_constructor_footer_pattern_cpp = "\ }\n\ \n\ %3$s%1$s %3$s%1$s::New(Arc::XMLNode parent) {\n\ Arc::NS ns;\n\ ns[\"ns\"]=\"%2$s\";\n\ %1$s el(parent.NewChild(\"ns:%1$s\",ns));\n\ return el;\n\ }\n\ "; // 1 - element name (C++,XML) // 2 - element type (C++) static const char* mandatory_element_pattern_h = "\ %2$s %1$s(void);\n\ "; // 1 - element name (C++,XML) // 2 - element type (C++) static const char* optional_element_pattern_h = "\ %2$s %1$s(bool create = false);\n\ "; // 1 - element name (C++,XML) // 2 - element type (C++) static const char* array_element_pattern_h = "\ %2$s %1$s(int index,bool create = false);\n\ "; // 1 - element name (C++,XML) static const char* mandatory_element_constructor_pattern_cpp = "\ (void)%1$s();\n\ "; // 1 - element name (C++,XML) // 2 - element type (C++) // 3 - class/parent name (C++,XML) // 4 - element namespace prefix (XML) // 5 - element type (XML) static const char* mandatory_element_method_pattern_cpp = "\ %2$s %3$s::%1$s(void) {\n\ Arc::XMLNode node = operator[](\"%4$s:%5$s\");\n\ if(!node) node = NewChild(\"%4$s:%5$s\");\n\ return node;\n\ }\n\ "; // 1 - element name (C++,XML) static const char* optional_element_constructor_pattern_cpp = "\ "; // 1 - element name (C++,XML) // 2 - element type (C++) // 3 - class/parent name (C++,XML) // 4 - element namespace prefix (XML) // 5 - element type (XML) static const char* optional_element_method_pattern_cpp = "\ %2$s %3$s::%1$s(bool create) {\n\ Arc::XMLNode node = operator[](\"%4$s:%5$s\");\n\ if(create && !node) node = NewChild(\"%4$s:%5$s\");\n\ return node;\n\ }\n\ "; // 1 - element name (C++,XML) // 2 - minimal number of elements static const char* array_element_constructor_pattern_cpp = "\ if(%2$s > 0) (void)%1$s(%2$s - 1);\n\ "; // 1 - element name (C++,XML) // 2 - element type (C++) // 3 - class/parent name (C++,XML) // 4 - element namespace prefix (XML) // 5 - element type (XML) // 6 - minimal number of elements static const char* array_element_method_pattern_cpp = "\ %2$s %3$s::%1$s(int index,bool create) {\n\ if(index < %6$s) create = true;\n\ Arc::XMLNode node = operator[](\"%4$s:%5$s\")[index];\n\ if(create && !node) {\n\ for(int n = 0;n((std::string)n); }; n=(std::string)(element.Attribute("maxOccurs")); if(!n.empty()) { if(n == "unbounded") { maxoccurs=-1; } else { maxoccurs=stringto((std::string)n); }; }; if(maxoccurs != -1) { if(maxoccurs < minoccurs) { std::cout<<" maxOccurs is smaller than minOccurs"</dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-binPROGRAMS install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-binPROGRAMS uninstall-man \ uninstall-man1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/utils/hed/PaxHeaders.7502/arcplugin.cpp0000644000000000000000000000012312056232245022354 xustar000000000000000027 mtime=1354314917.841989 27 atime=1513200575.284711 29 ctime=1513200664.62980411 nordugrid-arc-5.4.2/src/utils/hed/arcplugin.cpp0000644000175000002070000002031212056232245022420 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include std::list< std::pair > priorities_map; static uint32_t map_priority(const std::string& str1, const std::string& str2) { for(std::list< std::pair >::iterator p = priorities_map.begin(); p != priorities_map.end(); ++p) { if(p->first.match(str1) || p->first.match(str2)) return p->second; } return ARC_PLUGIN_DEFAULT_PRIORITY; } static std::string encode_for_var(uint32_t v) { return "\"" + Arc::tostring(v) + "\""; } static std::string encode_for_var(const char* str) { std::string stro = "\""; stro += str; std::string::size_type p = 1; for(;;++p) { p = stro.find_first_of("\"\\",p); if(p == std::string::npos) break; stro.insert(p, "\\"); ++p; } stro += "\""; return stro; } static std::string replace_file_suffix(const std::string& path,const std::string& newsuffix) { std::string newpath = path; std::string::size_type name_p = newpath.rfind(G_DIR_SEPARATOR_S); if(name_p == std::string::npos) { name_p = 0; } else { ++name_p; } std::string::size_type suffix_p = newpath.find('.',name_p); if(suffix_p != std::string::npos) { newpath.resize(suffix_p); } newpath += "." + newsuffix; return newpath; } static bool process_module(const std::string& plugin_filename, bool create_apd) { Arc::PluginDescriptor dummy_desc[2]; memset(dummy_desc,0,sizeof(dummy_desc)); dummy_desc[0].name = ""; dummy_desc[0].kind = ""; dummy_desc[0].description = ""; dummy_desc[0].version = 0; dummy_desc[0].instance = (Arc::get_plugin_instance)dummy_desc; std::string descriptor_filename = replace_file_suffix(plugin_filename,"apd"); Glib::ModuleFlags flags = Glib::ModuleFlags(0); flags|=Glib::MODULE_BIND_LAZY; Glib::Module *module = new Glib::Module(plugin_filename,flags); if ((!module) || (!(*module))) { std::cerr << "Failed to load module " << plugin_filename << ": " << Glib::Module::get_last_error() << std::endl; return false; } std::cout << "Loaded module " << plugin_filename << std::endl; std::cout << std::endl; void *ptr = NULL; if(!module->get_symbol(ARC_PLUGINS_TABLE_SYMB,ptr)) { std::cerr << "Module " << plugin_filename << " is not an ARC plugin: " << Glib::Module::get_last_error() << std::endl; if(create_apd) { std::cerr << "Dummy descriptor file will be created to avoid loading this module at all" << std::endl; // This is needed to make rpmlint happy. ptr = dummy_desc; } //delete module; //return -1; }; Arc::PluginDescriptor* desc = (Arc::PluginDescriptor*)ptr; std::ofstream apd; if(create_apd) { apd.open(descriptor_filename.c_str()); if(!apd) { std::cerr << "Failed to create descriptor file " << descriptor_filename << std::endl; return false; }; }; for(;desc;++desc) { if(desc->name == NULL) break; if(desc->kind == NULL) break; if(desc->instance == NULL) break; if(create_apd) { uint32_t priority = map_priority(desc->name, desc->kind); apd << "name=" << encode_for_var(desc->name) << std::endl; apd << "kind=" << encode_for_var(desc->kind) << std::endl; if (desc->description != NULL) { apd << "description=" << encode_for_var(desc->description) << std::endl; } apd << "version=" << encode_for_var(desc->version) << std::endl; apd << "priority=" << encode_for_var(priority) << std::endl; apd << std::endl; // end of description mark } else { std::cout << "name: " << desc->name << std::endl; std::cout << "kind: " << desc->kind << std::endl; if (desc->description != NULL) { std::cout << "description: " << desc->description << std::endl; } std::cout << "version: " << desc->version << std::endl; std::cout << std::endl; }; }; if(create_apd) { apd.close(); std::cout << "Created descriptor " << descriptor_filename << std::endl; }; // We are not unloading module because it may be not suitable // for unloading or it may be library which may fail unloading // after it was loaded with dlopen(). //delete module; return true; } int main(int argc, char **argv) { const std::string modsuffix("." G_MODULE_SUFFIX); bool create_apd = false; bool recursive = false; while (argc > 1) { if (strcmp(argv[1],"-c") == 0) { create_apd = true; --argc; ++argv; } else if(strcmp(argv[1],"-r") == 0) { recursive = true; --argc; ++argv; } else if(strcmp(argv[1],"-p") == 0) { if(argc <= 2) { std::cerr << "Missing option for -p" << std::endl; return -1; } uint32_t priority; std::string option = argv[2]; std::string::size_type comma = option.find(','); if(comma == std::string::npos) { std::cerr << "Missing , in -p option" << std::endl; return -1; } if(!Arc::stringto(option.substr(0,comma),priority)) { std::cerr << "Can't parse prority number " << option.substr(0,comma) << std::endl; return -1; } std::cerr<<"+++ "<(Arc::RegularExpression(option.substr(comma+1)),priority)); --argc; ++argv; --argc; ++argv; } else if (strcmp(argv[1],"-h") == 0) { std::cout << "arcplugin [-c] [-r] [-p priority,regex] [-h] plugin_path [plugin_path [...]]" << std::endl; std::cout << " -c If specified then APD file is created using same name" << std::endl; std::cout << " as ARC plugin with suffix replaced with .apd." << std::endl; std::cout << " -r If specified operation is fully recursive." << std::endl; std::cout << " -p Defines which priority to be assigned for each plugin." << std::endl; std::cout << " Each plugin's kind and name attributes are matched" << std::endl; std::cout << " specified regex. One which matches gets specified" << std::endl; std::cout << " This option can be specified multiple times." << std::endl; std::cout << " -h prints this help and exits." << std::endl; std::cout << " plugin_path is full path to ARC plugin loadable module" << std::endl; std::cout << " file or directory containing such modules." << std::endl; return 0; } else { break; }; }; if (argc < 2) { std::cerr << "Missing arguments" << std::endl; return -1; }; std::list paths; for(int n = 1; n < argc; ++n) paths.push_back(argv[n]); int user_paths = paths.size(); int num = 0; for(std::list::iterator path = paths.begin(); path != paths.end(); ++path) { try { Glib::Dir dir(*path); if((!recursive) && (num >= user_paths)) continue; for (Glib::DirIterator file = dir.begin(); file != dir.end(); file++) { std::string name = *file; if(name == ".") continue; if(name == "..") continue; paths.push_back(Glib::build_filename(*path, name)); } } catch (Glib::FileError&) { if(path->length() <= modsuffix.length()) continue; if(path->substr(path->length()-modsuffix.length()) != modsuffix) continue; process_module(*path, create_apd); } ++num; } //return 0; // Do quick exit to avoid possible problems with module unloading _exit(0); } nordugrid-arc-5.4.2/src/utils/hed/PaxHeaders.7502/common.cpp0000644000000000000000000000012411441417147021664 xustar000000000000000027 mtime=1283858023.146715 27 atime=1513200575.286711 30 ctime=1513200664.633804159 nordugrid-arc-5.4.2/src/utils/hed/common.cpp0000644000175000002070000000303611441417147021733 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "schemaconv.h" using namespace Arc; void strprintf(std::ostream& out,const char* fmt, const std::string& arg1,const std::string& arg2, const std::string& arg3,const std::string& arg4, const std::string& arg5,const std::string& arg6, const std::string& arg7,const std::string& arg8, const std::string& arg9,const std::string& arg10) { char buf[65536]; buf[0]=0; snprintf(buf,sizeof(buf)-1,fmt,arg1.c_str(),arg2.c_str(),arg3.c_str(), arg4.c_str(),arg5.c_str(),arg6.c_str(), arg7.c_str(),arg8.c_str(),arg9.c_str(), arg10.c_str()); buf[sizeof(buf)-1]=0; out< #include // common void strprintf(std::ostream& out,const char* fmt, const std::string& arg1 = "",const std::string& arg2 = "", const std::string& arg3 = "",const std::string& arg4 = "", const std::string& arg5 = "",const std::string& arg6 = "", const std::string& arg7 = "",const std::string& arg8 = "", const std::string& arg9 = "",const std::string& arg10 = ""); void strprintf(std::string& out,const char* fmt, const std::string& arg1 = "",const std::string& arg2 = "", const std::string& arg3 = "",const std::string& arg4 = "", const std::string& arg5 = "",const std::string& arg6 = "", const std::string& arg7 = "",const std::string& arg8 = "", const std::string& arg9 = "",const std::string& arg10 = ""); // simple type void simpletypeprint(Arc::XMLNode stype,const std::string& ns,std::ostream& h_file,std::ostream& cpp_file); void simpletypeprintnamed(const std::string& cppspace,const std::string& ntype,Arc::XMLNode stype,const std::string& ns,std::ostream& h_file,std::ostream& cpp_file); // complex type void complextypeprint(Arc::XMLNode ctype,const std::string& ns,std::ostream& h_file,std::ostream& cpp_file); // entry point bool schemaconv(Arc::XMLNode wsdl,std::ostream& h_file,std::ostream& cpp_file,const std::string& name); nordugrid-arc-5.4.2/src/utils/hed/PaxHeaders.7502/arcplugin.1.in0000644000000000000000000000012712123705613022342 xustar000000000000000027 mtime=1364167563.653962 30 atime=1513200652.516655962 30 ctime=1513200664.627804086 nordugrid-arc-5.4.2/src/utils/hed/arcplugin.1.in0000644000175000002070000000263712123705613022414 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCPLUGIN 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcplugin \- ARC plugin management utility .SH DESCRIPTION The .B arcplugin command prints description of ARC plugin or creates ARC Plugin Descriptor (APD) file. .SH SYNOPSIS .B arcplugin [-c] [-r] [-p priority,regex] [-h] plugin_path [plugin_path [...]] .SH OPTIONS .IP "\fB\ -h \fR" Prints help message and exits. .IP "\fB\ -c \fR" If specified then APD file is created using same name as ARC plugin with suffix. replaced with .apd. .IP "\fB\ -r \fR" If specified operation is fully recursive. .IP "\fB\ -p \fR" Defines which priority to be assigned for each plugin. Each plugin's kind and name attributes are matched specified regex. One which matches gets specified This option can be specified multiple times. Priority is 32 bit positive integer. Default value is 128. .IP "\fB\ plugin_path \fR" full path to ARC plugin loadable module file or directory containing such modules. In last case operation will recurse once into that directory. For fully recursive operation use -r. .PP .SH REPORTING BUGS Report bugs to http://bugzilla.nordugrid.org .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org nordugrid-arc-5.4.2/src/utils/hed/PaxHeaders.7502/schemaconv.cpp0000644000000000000000000000012411441417147022522 xustar000000000000000027 mtime=1283858023.146715 27 atime=1513200575.284711 30 ctime=1513200664.631804135 nordugrid-arc-5.4.2/src/utils/hed/schemaconv.cpp0000644000175000002070000000333711441417147022575 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "schemaconv.h" using namespace Arc; bool schemaconv(XMLNode wsdl,std::ostream& h_file,std::ostream& cpp_file,const std::string& name) { h_file<<"\ #include \n\ \n\ namespace "< #endif #include #include #include #include #include "schemaconv.h" using namespace Arc; // -------- Simple type ------------ // 1 - class/parent name (C++,XML) static const char* simple_type_pattern_h = "\ class %1$s: public Arc::XMLNode {\n\ public:\n\ static %1$s New(Arc::XMLNode parent);\n\ %1$s(Arc::XMLNode node);\n\ };\n\ \n\ "; // 1 - class/parent name (C++,XML) // 2 - class namespace (XML) static const char* simple_type_pattern_cpp = "\ %1$s %1$s::New(Arc::XMLNode parent) {\n\ Arc::NS ns;\n\ ns[\"ns\"]=\"%2$s\";\n\ %1$s el(parent.NewChild(\"ns:%1$s\",ns));\n\ return el;\n\ }\n\ \n\ %1$s::%1$s(Arc::XMLNode node):Arc::XMLNode(node){\n\ Arc::NS ns;\n\ ns[\"ns\"]=\"%2$s\";\n\ Namespaces(ns);\n\ }\n\ \n\ "; void simpletypeprintnamed(const std::string& /* cppspace */,const std::string& ntype,XMLNode /* stype */,const std::string& ns,std::ostream& h_file,std::ostream& cpp_file) { strprintf(h_file,simple_type_pattern_h,ntype); strprintf(cpp_file,simple_type_pattern_cpp,ntype,ns); } void simpletypeprint(XMLNode stype,const std::string& ns,std::ostream& h_file,std::ostream& cpp_file) { std::string ntype; if(stype.Name() == "simpleType") { ntype = (std::string)(stype.Attribute("name")); h_file<<"//simple type: "< #include #include #include #include static Arc::Logger logger(Arc::Logger::getRootLogger(), "libarcdatatest"); /* * Use -v to enable logging when running a unit test directly. * E.g.: ./hed/libs/common/test/LoggerTest -v */ int main(int argc, char **argv) { Arc::LogStream logcerr(std::cerr); Arc::ArcLocation::Init(argv[0]); if (argc > 1 && strcmp(argv[1], "-v") == 0) { // set logging Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); } CppUnit::TextUi::TestRunner runner; runner.addTest(CppUnit::TestFactoryRegistry::getRegistry().makeTest()); runner.setOutputter(CppUnit::CompilerOutputter::defaultOutputter (&runner.result(), std::cerr)); bool wasSuccessful = runner.run(); return wasSuccessful ? 0 : 1; } nordugrid-arc-5.4.2/src/PaxHeaders.7502/tests0000644000000000000000000000013213214316026017046 xustar000000000000000030 mtime=1513200662.194774329 30 atime=1513200668.719854133 30 ctime=1513200662.194774329 nordugrid-arc-5.4.2/src/tests/0000755000175000002070000000000013214316026017171 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/tests/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611455530677021201 xustar000000000000000027 mtime=1287041471.636847 29 atime=1513200605.52708126 30 ctime=1513200662.188774255 nordugrid-arc-5.4.2/src/tests/Makefile.am0000644000175000002070000000012311455530677021240 0ustar00mockbuildmock00000000000000SUBDIRS = echo xpath policy-delegation delegation translator arcpolicy perf client nordugrid-arc-5.4.2/src/tests/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735021176 xustar000000000000000030 mtime=1513200605.560081664 30 atime=1513200652.304653369 30 ctime=1513200662.188774255 nordugrid-arc-5.4.2/src/tests/Makefile.in0000644000175000002070000005554613214315735021263 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/tests DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = echo xpath policy-delegation delegation translator arcpolicy perf client all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/tests/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/tests/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/tests/PaxHeaders.7502/translator0000644000000000000000000000013213214316026021237 xustar000000000000000030 mtime=1513200662.323775906 30 atime=1513200668.719854133 30 ctime=1513200662.323775906 nordugrid-arc-5.4.2/src/tests/translator/0000755000175000002070000000000013214316026021362 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/tests/translator/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023357 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200606.072087926 30 ctime=1513200662.321775882 nordugrid-arc-5.4.2/src/tests/translator/Makefile.am0000644000175000002070000000073112052416515023422 0ustar00mockbuildmock00000000000000noinst_PROGRAMS = translator SOURCES = translator.cpp translator_SOURCES = $(SOURCES) translator_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) translator_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(EXTRA_LIBS) nordugrid-arc-5.4.2/src/tests/translator/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315736023370 xustar000000000000000030 mtime=1513200606.118088489 30 atime=1513200652.410654665 30 ctime=1513200662.322775894 nordugrid-arc-5.4.2/src/tests/translator/Makefile.in0000644000175000002070000005602513214315736023446 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = translator$(EXEEXT) subdir = src/tests/translator DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) am__objects_1 = translator-translator.$(OBJEXT) am_translator_OBJECTS = $(am__objects_1) translator_OBJECTS = $(am_translator_OBJECTS) am__DEPENDENCIES_1 = translator_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) translator_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(translator_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DIST_SOURCES = $(translator_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SOURCES = translator.cpp translator_SOURCES = $(SOURCES) translator_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) translator_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(EXTRA_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/tests/translator/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/tests/translator/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list translator$(EXEEXT): $(translator_OBJECTS) $(translator_DEPENDENCIES) @rm -f translator$(EXEEXT) $(translator_LINK) $(translator_OBJECTS) $(translator_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/translator-translator.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< translator-translator.o: translator.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(translator_CXXFLAGS) $(CXXFLAGS) -MT translator-translator.o -MD -MP -MF $(DEPDIR)/translator-translator.Tpo -c -o translator-translator.o `test -f 'translator.cpp' || echo '$(srcdir)/'`translator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/translator-translator.Tpo $(DEPDIR)/translator-translator.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='translator.cpp' object='translator-translator.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(translator_CXXFLAGS) $(CXXFLAGS) -c -o translator-translator.o `test -f 'translator.cpp' || echo '$(srcdir)/'`translator.cpp translator-translator.obj: translator.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(translator_CXXFLAGS) $(CXXFLAGS) -MT translator-translator.obj -MD -MP -MF $(DEPDIR)/translator-translator.Tpo -c -o translator-translator.obj `if test -f 'translator.cpp'; then $(CYGPATH_W) 'translator.cpp'; else $(CYGPATH_W) '$(srcdir)/translator.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/translator-translator.Tpo $(DEPDIR)/translator-translator.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='translator.cpp' object='translator-translator.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(translator_CXXFLAGS) $(CXXFLAGS) -c -o translator-translator.obj `if test -f 'translator.cpp'; then $(CYGPATH_W) 'translator.cpp'; else $(CYGPATH_W) '$(srcdir)/translator.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstPROGRAMS ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/tests/translator/PaxHeaders.7502/translator.cpp0000644000000000000000000000012412045235201024206 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200575.455713 30 ctime=1513200662.323775906 nordugrid-arc-5.4.2/src/tests/translator/translator.cpp0000644000175000002070000000170412045235201024255 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include int main() { std::cout << " [ JobDescription tester ] " << std::endl; std::cout << std::endl << " [ Parsing ] " << std::endl << std::endl; std::string xrsl_string = "&(executable=\"test.sh\") \ (arguments=\"pal\") \ (stdout=\"stdout.txt\") \ (join=\"yes\") \ (inputfiles=(\"myjob.sh\" \"test.sh\")) \ (outputfiles=(\"stdout.txt\" \"download.txt\")) \ (CPUtime=10) \ (environment=(\"ATLAS\" \"/opt/atlas\") (\"CERN\" \"/cern\")) \ (jobName=\"MyTestJob\")"; std::list jds; if (!Arc::JobDescription::Parse( xrsl_string , jds ) || jds.empty()) { return 1; } jds.front().SaveToStream(std::cout, "user"); std::string jobdescstring; jds.front().UnParse(jobdescstring, "egee:jdl"); std::cout << std::endl << " [ egee:jdl ] " << std::endl << std::endl << jobdescstring << std::endl; return 0; } nordugrid-arc-5.4.2/src/tests/PaxHeaders.7502/perf0000644000000000000000000000013213214316026020002 xustar000000000000000030 mtime=1513200662.377776567 30 atime=1513200668.719854133 30 ctime=1513200662.377776567 nordugrid-arc-5.4.2/src/tests/perf/0000755000175000002070000000000013214316026020125 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/tests/perf/PaxHeaders.7502/perftest_slcs.cpp0000644000000000000000000000012412301125744023445 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200575.402712 30 ctime=1513200662.377776567 nordugrid-arc-5.4.2/src/tests/perf/perftest_slcs.cpp0000644000175000002070000001765412301125744023527 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // perftest_slcs.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Some global shared variables... Glib::Mutex* mutex; bool run; int finishedThreads; unsigned long completedRequests; unsigned long failedRequests; unsigned long totalRequests; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; std::string url_str; std::string idp_name; std::string username; std::string password; // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Send requests and collect statistics. void sendRequests(){ // Some variables... unsigned long completedRequests = 0; unsigned long failedRequests = 0; Glib::TimeVal completedTime(0,0); Glib::TimeVal failedTime(0,0); Glib::TimeVal tBefore; Glib::TimeVal tAfter; bool connected; Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; mcc_cfg.AddCAFile("../../services/slcs/cacert2.pem"); mcc_cfg.AddCADir("../echo/certificates"); Arc::NS slcs_ns; slcs_ns["slcs"] = "http://www.nordugrid.org/schemas/slcs"; while(run){ // Create a Client. Arc::ClientSOAPwithSAML2SSO *client = NULL; client = new Arc::ClientSOAPwithSAML2SSO(mcc_cfg,url); connected=true; //while(run and connected){ // Prepare the request and time it. Arc::PayloadSOAP req(slcs_ns); tBefore.assign_current_time(); Arc::Time t; int lifetime = 12; int keysize = 1024; Arc::Credential request(t, Arc::Period(lifetime * 3600), keysize, "EEC"); std::string cert_req_str; if (!request.GenerateRequest(cert_req_str)) throw std::runtime_error("Failed to generate certificate request"); std::string private_key; request.OutputPrivatekey(private_key); req.NewChild("GetSLCSCertificateRequest").NewChild("X509Request") = cert_req_str; // Send the request Arc::PayloadSOAP* resp = NULL; Arc::MCC_Status status = client->process(&req,&resp, idp_name, username, password); tAfter.assign_current_time(); if(!status) { // Request failed. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { if(resp == NULL) { // Response was not SOAP or no response at all. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { if (std::string((*resp)["GetSLCSCertificateResponse"]["X509Certificate"]).size()==0){ // The response was not what it should be. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else{ // Everything worked just fine! completedRequests++; completedTime+=tAfter-tBefore; std::string cert_str = (std::string)((*resp)["GetSLCSCertificateResponse"]["X509Certificate"]); std::string ca_str = (std::string)((*resp)["GetSLCSCertificateResponse"]["CACertificate"]); } } } if(resp) delete resp; //} if(client) delete client; } // Update global variables. Glib::Mutex::Lock lock(*mutex); ::completedRequests+=completedRequests; ::failedRequests+=failedRequests; ::completedTime+=completedTime; ::failedTime+=failedTime; finishedThreads++; std::cout << "Number of finished threads: " << finishedThreads << std::endl; } int main(int argc, char* argv[]){ // Some variables... int numberOfThreads; int duration; int i; Glib::Thread** threads; const char* config_file = NULL; int debug_level = -1; Arc::LogStream logcerr(std::cerr); // Process options - quick hack, must use Glib options later while(argc >= 3) { if(strcmp(argv[1],"-c") == 0) { config_file = argv[2]; argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-d") == 0) { debug_level=Arc::istring_to_level(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else { break; }; } if(debug_level >= 0) { Arc::Logger::getRootLogger().setThreshold((Arc::LogLevel)debug_level); Arc::Logger::getRootLogger().addDestination(logcerr); } // Extract command line arguments. if (argc!=7){ std::cerr << "Wrong number of arguments!" << std::endl << std::endl << "Usage:" << std::endl << "perftest [-c config] [-d debug] url idpname username password threads duration" << std::endl << std::endl << "Arguments:" << std::endl << "url The url of the slcs service." << std::endl << "idpname The name of the SP, e.g. https://squark.uio.no/idp/shibboleth" << std::endl << "username The username to IdP " << std::endl << "password The password to IdP " << std::endl << "threads The number of concurrent requests." << std::endl << "duration The duration of the test in seconds." << std::endl << "config The file containing client chain XML configuration with " << std::endl << " 'soap' entry point and HOSTNAME, PORTNUMBER and PATH " << std::endl << " keyword for hostname, port and HTTP path of 'echo' service." << std::endl << "debug The textual representation of desired debug level. Available " << std::endl << " levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL." << std::endl; exit(EXIT_FAILURE); } url_str = std::string(argv[1]); idp_name = std::string(argv[2]); username = std::string(argv[3]); password = std::string(argv[4]); numberOfThreads = atoi(argv[5]); duration = atoi(argv[6]); Arc::init_xmlsec(); // Start threads. run=true; finishedThreads=0; //Glib::thread_init(); mutex=new Glib::Mutex; threads = new Glib::Thread*[numberOfThreads]; for (i=0; i/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool clean-noinstPROGRAMS ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-binPROGRAMS \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man1 \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am \ uninstall-binPROGRAMS uninstall-man uninstall-man1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/tests/perf/PaxHeaders.7502/perftest_deleg_bydelegclient.cpp0000644000000000000000000000012412301125744026453 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200575.404712 30 ctime=1513200662.373776518 nordugrid-arc-5.4.2/src/tests/perf/perftest_deleg_bydelegclient.cpp0000644000175000002070000001445112301125744026525 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // perftest_deleg_bydelegclient.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Some global shared variables... Glib::Mutex* mutex; bool run; int finishedThreads; unsigned long completedRequests; unsigned long failedRequests; unsigned long totalRequests; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; std::string url_str; // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Send requests and collect statistics. void sendRequests(){ // Some variables... unsigned long completedRequests = 0; unsigned long failedRequests = 0; Glib::TimeVal completedTime(0,0); Glib::TimeVal failedTime(0,0); Glib::TimeVal tBefore; Glib::TimeVal tAfter; bool connected; //std::string url_str("https://127.0.0.1:60000/echo"); Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey("../echo/userkey-nopass.pem"); mcc_cfg.AddCertificate("../echo/usercert.pem"); mcc_cfg.AddCAFile("../echo/testcacert.pem"); mcc_cfg.AddCADir("../echo/certificates"); while(run){ // Create a Client. Arc::ClientX509Delegation *client = NULL; client = new Arc::ClientX509Delegation(mcc_cfg,url); connected=true; while(run and connected){ // Send the delegation request and time it. tBefore.assign_current_time(); std::string arc_delegation_id; bool res = false; if(client) { if(!(res = client->createDelegation(Arc::DELEG_ARC, arc_delegation_id))) { std::cerr<<"Delegation to ARC delegation service failed"<= 3) { if(strcmp(argv[1],"-c") == 0) { config_file = argv[2]; argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-d") == 0) { debug_level=Arc::istring_to_level(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else { break; }; } if(debug_level >= 0) { Arc::Logger::getRootLogger().setThreshold((Arc::LogLevel)debug_level); Arc::Logger::getRootLogger().addDestination(logcerr); } // Extract command line arguments. if (argc!=4){ std::cerr << "Wrong number of arguments!" << std::endl << std::endl << "Usage:" << std::endl << "perftest_deleg_bydelegclient [-c config] [-d debug] url threads duration" << std::endl << std::endl << "Arguments:" << std::endl << "url The url of the delegation service." << std::endl << "threads The number of concurrent requests." << std::endl << "duration The duration of the test in seconds." << std::endl << "config The file containing client chain XML configuration with " << std::endl << " 'soap' entry point and HOSTNAME, PORTNUMBER and PATH " << std::endl << " keyword for hostname, port and HTTP path of 'echo' service." << std::endl << "debug The textual representation of desired debug level. Available " << std::endl << " levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL." << std::endl; exit(EXIT_FAILURE); } url_str = std::string(argv[1]); numberOfThreads = atoi(argv[2]); duration = atoi(argv[3]); // Start threads. run=true; finishedThreads=0; //Glib::thread_init(); mutex=new Glib::Mutex; threads = new Glib::Thread*[numberOfThreads]; for (i=0; i #endif // perftest_cmd.cpp #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #else #include #endif // Some global shared variables... Glib::Mutex* mutex; int finishedProcesses; unsigned long completedCommands; unsigned long failedCommands; unsigned long totalCommands; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; int duration; std::string cmd_str; std::vector arglist; // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Execute a command line void execCommand() { // Some variables... Glib::TimeVal tBefore; Glib::TimeVal tAfter; char **list; int pid; list = (char **)malloc (sizeof(char *) * (arglist.size() + 1)); for (int i = 0;i < arglist.size();i++) list[i] = (char *)arglist[i].c_str(); list[arglist.size()] = NULL; while(Round(::completedTime.as_double() * 1000000) < (duration * 1000000)){ tBefore.assign_current_time(); #ifdef WIN32 STARTUPINFO si = {0}; si.cb = sizeof(si); PROCESS_INFORMATION pi; if(CreateProcess(cmd_str.c_str(),NULL,NULL,NULL,FALSE,0,NULL,NULL,&si,&pi)) { std::cout << "Create process failed: "< #endif // perftest.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Some global shared variables... Glib::Mutex* mutex; bool run; int finishedThreads; unsigned long completedRequests; unsigned long failedRequests; unsigned long totalRequests; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; std::string url_str; bool alwaysReconnect = false; // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Send requests and collect statistics. void sendRequests(){ // Some variables... unsigned long completedRequests = 0; unsigned long failedRequests = 0; Glib::TimeVal completedTime(0,0); Glib::TimeVal failedTime(0,0); Glib::TimeVal tBefore; Glib::TimeVal tAfter; bool connected; //std::string url_str("https://127.0.0.1:60000/echo"); Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; Arc::UserConfig usercfg(""); usercfg.ApplyToConfig(mcc_cfg); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; while(run){ // Create a Client. Arc::ClientSOAP *client = NULL; client = new Arc::ClientSOAP(mcc_cfg,url,60); connected=true; while(run and connected){ // Prepare the request. Arc::PayloadSOAP req(echo_ns); req.NewChild("echo:echo").NewChild("echo:say")="HELLO"; // Send the request and time it. tBefore.assign_current_time(); Arc::PayloadSOAP* resp = NULL; //std::string str; //req.GetXML(str); //std::cout<<"request: "<process(&req,&resp); tAfter.assign_current_time(); if(!status) { // Request failed. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { if(resp == NULL) { // Response was not SOAP or no response at all. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { //std::string xml; //resp->GetXML(xml); if (std::string((*resp)["echoResponse"]["hear"]).size()==0){ // The response was not what it should be. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else{ // Everything worked just fine! completedRequests++; completedTime+=tAfter-tBefore; } } } if(resp) delete resp; if(alwaysReconnect) connected=false; } if(client) delete client; } // Update global variables. Glib::Mutex::Lock lock(*mutex); ::completedRequests+=completedRequests; ::failedRequests+=failedRequests; ::completedTime+=completedTime; ::failedTime+=failedTime; finishedThreads++; std::cout << "Number of finished threads: " << finishedThreads << std::endl; } int main(int argc, char* argv[]){ // Some variables... int numberOfThreads; int duration; int i; Glib::Thread** threads; const char* config_file = NULL; int debug_level = -1; Arc::LogStream logcerr(std::cerr); // Process options - quick hack, must use Glib options later while(argc >= 3) { if(strcmp(argv[1],"-c") == 0) { config_file = argv[2]; argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-d") == 0) { debug_level=Arc::istring_to_level(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-r") == 0) { alwaysReconnect=true; argv+=1; argc-=1; } else { break; }; } if(debug_level >= 0) { Arc::Logger::getRootLogger().setThreshold((Arc::LogLevel)debug_level); Arc::Logger::getRootLogger().addDestination(logcerr); } // Extract command line arguments. if (argc!=4){ std::cerr << "Wrong number of arguments!" << std::endl << std::endl << "Usage:" << std::endl << "perftest [-c config] [-d debug] [-r] url threads duration" << std::endl << std::endl << "Arguments:" << std::endl << "url The url of the service." << std::endl << "threads The number of concurrent requests." << std::endl << "duration The duration of the test in seconds." << std::endl << "-c config The file containing client chain XML configuration with " << std::endl << " 'soap' entry point and HOSTNAME, PORTNUMBER and PATH " << std::endl << " keyword for hostname, port and HTTP path of 'echo' service." << std::endl << "-d debug The textual representation of desired debug level. Available " << std::endl << " levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL." << std::endl << "-r If specified close connection and reconnect after " << std::endl << " every request." << std::endl; exit(EXIT_FAILURE); } url_str = std::string(argv[1]); numberOfThreads = atoi(argv[2]); duration = atoi(argv[3]); // Start threads. run=true; finishedThreads=0; //Glib::thread_init(); mutex=new Glib::Mutex; threads = new Glib::Thread*[numberOfThreads]; for (i=0; i #endif // perftest_deleg_bysechandler.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include // Some global shared variables... Glib::Mutex* mutex; bool run; int finishedThreads; unsigned long completedRequests; unsigned long failedRequests; unsigned long totalRequests; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; std::string url_str; Arc::XMLNode sechanlder_nd("\ \ x509\ client\ \ https://glueball.uio.no:60000/delegation\ https://squark.uio.no:60000/echo\ ../echo/userkey-nopass.pem\ ../echo/usercert.pem\ \ \ ../echo/testcacert.pem\ ../echo/certificates\ "); // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Send requests and collect statistics. void sendRequests(){ // Some variables... unsigned long completedRequests = 0; unsigned long failedRequests = 0; Glib::TimeVal completedTime(0,0); Glib::TimeVal failedTime(0,0); Glib::TimeVal tBefore; Glib::TimeVal tAfter; bool connected; //std::string url_str("https://127.0.0.1:60000/echo"); Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey("../echo/userkey-nopass.pem"); mcc_cfg.AddCertificate("../echo/usercert.pem"); mcc_cfg.AddCAFile("../echo/testcacert.pem"); mcc_cfg.AddCADir("../echo/certificates"); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; while(run){ // Create a Client. Arc::ClientSOAP *client = NULL; client = new Arc::ClientSOAP(mcc_cfg,url,60); client->AddSecHandler(sechanlder_nd, "arcshc"); connected=true; //while(run and connected){ // Prepare the request. Arc::PayloadSOAP req(echo_ns); req.NewChild("echo").NewChild("say")="HELLO"; // Send the request and time it. tBefore.assign_current_time(); Arc::PayloadSOAP* resp = NULL; //std::string str; //req.GetXML(str); //std::cout<<"request: "<process(&req,&resp); tAfter.assign_current_time(); if(!status) { // Request failed. failedRequests++; failedTime+=tAfter-tBefore; connected=false; std::cout<<"failure1: "<GetXML(xml); std::cout<<"reponse: "<= 3) { if(strcmp(argv[1],"-c") == 0) { config_file = argv[2]; argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-d") == 0) { debug_level=Arc::istring_to_level(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else { break; }; } if(debug_level >= 0) { Arc::Logger::getRootLogger().setThreshold((Arc::LogLevel)debug_level); Arc::Logger::getRootLogger().addDestination(logcerr); } // Extract command line arguments. if (argc!=4){ std::cerr << "Wrong number of arguments!" << std::endl << std::endl << "Usage:" << std::endl << "perftest_deleg_bysechandler [-c config] [-d debug] url threads duration" << std::endl << std::endl << "Arguments:" << std::endl << "url The url of the service." << std::endl << "threads The number of concurrent requests." << std::endl << "duration The duration of the test in seconds." << std::endl << "config The file containing client chain XML configuration with " << std::endl << " 'soap' entry point and HOSTNAME, PORTNUMBER and PATH " << std::endl << " keyword for hostname, port and HTTP path of 'echo' service." << std::endl << "debug The textual representation of desired debug level. Available " << std::endl << " levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL." << std::endl; exit(EXIT_FAILURE); } url_str = std::string(argv[1]); numberOfThreads = atoi(argv[2]); duration = atoi(argv[3]); // Start threads. run=true; finishedThreads=0; //Glib::thread_init(); mutex=new Glib::Mutex; threads = new Glib::Thread*[numberOfThreads]; for (i=0; i #endif // perftest_samlaa.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Some global shared variables... Glib::Mutex* mutex; bool run; int finishedThreads; unsigned long completedRequests; unsigned long failedRequests; unsigned long totalRequests; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; std::string url_str; bool alwaysReconnect = false; static std::string cert("../../tests/echo/usercert.pem"); static std::string key("../../tests/echo/userkey-nopass.pem"); static std::string cafile("../../tests/echo/testcacert.pem"); static std::string cadir("../../tests/echo/certificates"); #define SAML_NAMESPACE "urn:oasis:names:tc:SAML:2.0:assertion" #define SAMLP_NAMESPACE "urn:oasis:names:tc:SAML:2.0:protocol" // Round off a double to an integer. int Round(double x){ return int(x+0.5); } static void prepareAttributeQuery(Arc::XMLNode& attr_query){ Arc::Credential cred(cert, key, cadir, cafile); std::string local_dn_str = cred.GetDN(); std::string local_dn; size_t pos1 = std::string::npos; size_t pos2; do { //The DN should be like "CN=test,O=UiO,ST=Oslo,C=NO", so we need to change the format here std::string str; pos2 = local_dn_str.find_last_of("/", pos1); if(pos2 != std::string::npos && pos1 == std::string::npos) { str = local_dn_str.substr(pos2+1); local_dn.append(str); pos1 = pos2-1; } else if (pos2 != std::string::npos && pos1 != std::string::npos) { str = local_dn_str.substr(pos2+1, pos1-pos2); local_dn.append(str); pos1 = pos2-1; } if(pos2 != (std::string::npos+1)) local_dn.append(","); }while(pos2 != std::string::npos && pos2 != (std::string::npos+1)); //Compose std::string query_id = Arc::UUID(); attr_query.NewAttribute("ID") = query_id; Arc::Time t; std::string current_time = t.str(Arc::UTCTime); attr_query.NewAttribute("IssueInstant") = current_time; attr_query.NewAttribute("Version") = std::string("2.0"); // std::string issuer_name = local_dn; Arc::XMLNode issuer = attr_query.NewChild("saml:Issuer"); issuer = issuer_name; issuer.NewAttribute("Format") = std::string("urn:oasis:names:tc:SAML:1.1:nameid-format:x509SubjectName"); // Arc::XMLNode subject = attr_query.NewChild("saml:Subject"); Arc::XMLNode name_id = subject.NewChild("saml:NameID"); name_id.NewAttribute("Format")=std::string("urn:oasis:names:tc:SAML:1.1:nameid-format:x509SubjectName"); name_id = local_dn; // Arc::XMLSecNode attr_query_secnd(attr_query); std::string attr_query_idname("ID"); attr_query_secnd.AddSignatureTemplate(attr_query_idname, Arc::XMLSecNode::RSA_SHA1); if(attr_query_secnd.SignNode(key,cert)) { std::cout<<"Succeeded to sign the signature under "< attr_resp = (*soap_resp).Body().Child(0); std::string str; attr_resp.GetXML(str); std::cout<<"SAML Response: "< std::string resp_idname = "ID"; Arc::XMLSecNode attr_resp_secnode(attr_resp); if(attr_resp_secnode.VerifyNode(resp_idname, cafile, cadir)) { std::cout<<"Succeeded to verify the signature under "<"< std::string statuscode_value = attr_resp["Status"]["StatusCode"].Attribute("Value"); if(statuscode_value == "urn:oasis:names:tc:SAML:2.0:status:Success") std::cout<<"The StatusCode is Success"< Arc::XMLNode assertion = attr_resp["saml:Assertion"]; std::string assertion_idname = "ID"; Arc::XMLSecNode assertion_secnode(assertion); if(assertion_secnode.VerifyNode(assertion_idname, cafile, cadir,false)) { std::cout<<"Succeeded to verify the signature under "<"<process(&req,&resp); tAfter.assign_current_time(); if(!status) { // Request failed. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { if(resp == NULL) { // Response was not SOAP or no response at all. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { //std::string xml; //resp->GetXML(xml); if (!verifySAML(resp)){ // The response was not what it should be. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else{ // Everything worked just fine! completedRequests++; completedTime+=tAfter-tBefore; } } } if(resp) delete resp; if(alwaysReconnect) connected=false; } if(client) delete client; } Arc::final_xmlsec(); // Update global variables. Glib::Mutex::Lock lock(*mutex); ::completedRequests+=completedRequests; ::failedRequests+=failedRequests; ::completedTime+=completedTime; ::failedTime+=failedTime; finishedThreads++; std::cout << "Number of finished threads: " << finishedThreads << std::endl; } int main(int argc, char* argv[]){ // Some variables... int numberOfThreads; int duration; int i; Glib::Thread** threads; const char* config_file = NULL; int debug_level = -1; Arc::LogStream logcerr(std::cerr); // Process options - quick hack, must use Glib options later while(argc >= 3) { if(strcmp(argv[1],"-c") == 0) { config_file = argv[2]; argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-d") == 0) { debug_level=Arc::istring_to_level(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-r") == 0) { alwaysReconnect=true; argv+=1; argc-=1; } else { break; }; } if(debug_level >= 0) { Arc::Logger::getRootLogger().setThreshold((Arc::LogLevel)debug_level); Arc::Logger::getRootLogger().addDestination(logcerr); } // Extract command line arguments. if (argc!=4){ std::cerr << "Wrong number of arguments!" << std::endl << std::endl << "Usage:" << std::endl << "perftest [-c config] [-d debug] [-r] url threads duration" << std::endl << std::endl << "Arguments:" << std::endl << "url The url of the service." << std::endl << "threads The number of concurrent requests." << std::endl << "duration The duration of the test in seconds." << std::endl << "-c config The file containing client chain XML configuration with " << std::endl << " 'soap' entry point and HOSTNAME, PORTNUMBER and PATH " << std::endl << " keyword for hostname, port and HTTP path of 'echo' service." << std::endl << "-d debug The textual representation of desired debug level. Available " << std::endl << " levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL." << std::endl << "-r If specified close connection and reconnect after " << std::endl << " every request." << std::endl; exit(EXIT_FAILURE); } url_str = std::string(argv[1]); numberOfThreads = atoi(argv[2]); duration = atoi(argv[3]); // Start threads. run=true; finishedThreads=0; //Glib::thread_init(); mutex=new Glib::Mutex; threads = new Glib::Thread*[numberOfThreads]; for (i=0; i #endif // perftest.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Some global shared variables... Glib::Mutex* mutex; bool run; int finishedThreads; unsigned long completedRequests; unsigned long failedRequests; unsigned long totalRequests; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; std::string url_str; std::string idp_name; std::string username; std::string password; // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Send requests and collect statistics. void sendRequests(){ // Some variables... unsigned long completedRequests = 0; unsigned long failedRequests = 0; Glib::TimeVal completedTime(0,0); Glib::TimeVal failedTime(0,0); Glib::TimeVal tBefore; Glib::TimeVal tAfter; bool connected; //std::string url_str("https://127.0.0.1:60000/echo"); Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; //mcc_cfg.AddPrivateKey("../echo/userkey-nopass.pem"); //mcc_cfg.AddCertificate("../echo/usercert.pem"); //mcc_cfg.AddCAFile("../echo/testcacert.pem"); mcc_cfg.AddCAFile("../../services/slcs/cacert2.pem"); mcc_cfg.AddCADir("../echo/certificates"); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; while(run){ // Create a Client. Arc::ClientSOAPwithSAML2SSO *client = NULL; client = new Arc::ClientSOAPwithSAML2SSO(mcc_cfg,url); connected=true; //while(run and connected){ // Prepare the request. Arc::PayloadSOAP req(echo_ns); req.NewChild("echo").NewChild("say")="HELLO"; // Send the request and time it. tBefore.assign_current_time(); Arc::PayloadSOAP* resp = NULL; //std::string str; //req.GetXML(str); //std::cout<<"request: "<process(&req,&resp, idp_name, username, password); tAfter.assign_current_time(); if(!status) { // Request failed. failedRequests++; failedTime+=tAfter-tBefore; connected=false; std::cout<<"failure1: "<GetXML(xml); //std::cout<<"reponse: "<= 3) { if(strcmp(argv[1],"-c") == 0) { config_file = argv[2]; argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-d") == 0) { debug_level=Arc::istring_to_level(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else { break; }; } if(debug_level >= 0) { Arc::Logger::getRootLogger().setThreshold((Arc::LogLevel)debug_level); Arc::Logger::getRootLogger().addDestination(logcerr); } // Extract command line arguments. if (argc!=7){ std::cerr << "Wrong number of arguments!" << std::endl << std::endl << "Usage:" << std::endl << "perftest [-c config] [-d debug] url idpname username password threads duration" << std::endl << std::endl << "Arguments:" << std::endl << "url The url of the service." << std::endl << "idpname The name of the SP, e.g. https://squark.uio.no/idp/shibboleth" << std::endl << "username The username to IdP " << std::endl << "password The password to IdP " << std::endl << "threads The number of concurrent requests." << std::endl << "duration The duration of the test in seconds." << std::endl << "config The file containing client chain XML configuration with " << std::endl << " 'soap' entry point and HOSTNAME, PORTNUMBER and PATH " << std::endl << " keyword for hostname, port and HTTP path of 'echo' service." << std::endl << "debug The textual representation of desired debug level. Available " << std::endl << " levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL." << std::endl; exit(EXIT_FAILURE); } url_str = std::string(argv[1]); idp_name = std::string(argv[2]); username = std::string(argv[3]); password = std::string(argv[4]); numberOfThreads = atoi(argv[5]); duration = atoi(argv[6]); Arc::init_xmlsec(); // Start threads. run=true; finishedThreads=0; //Glib::thread_init(); mutex=new Glib::Mutex; threads = new Glib::Thread*[numberOfThreads]; for (i=0; i #endif // perftest_cmd.cpp #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #else #include #endif // Some global shared variables... Glib::Mutex* mutex; int finishedProcesses; unsigned long completedCommands; unsigned long failedCommands; unsigned long totalCommands; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; int numberOfProcesses; std::string cmd_str; std::vector arglist; // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Execute a command line void execCommand() { // Some variables... Glib::TimeVal tBefore; Glib::TimeVal tAfter; char **list; int pid; list = (char **)malloc (sizeof(char *) * (arglist.size() +1)); for (int i = 0;i < arglist.size();i++) list[i] = (char *)arglist[i].c_str(); list[arglist.size()] = NULL; for (int i=0; i #endif // perftest.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Some global shared variables... Glib::Mutex* mutex; bool run; int finishedThreads; unsigned long completedRequests; unsigned long failedRequests; unsigned long totalRequests; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; std::string url_str; bool alwaysReconnect = false; bool printTimings = false; bool tcpNoDelay = false; bool fixedMsgSize = false; int start; int stop; int steplength; int msgSize; // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Send requests and collect statistics. void sendRequests(){ // Some variables... unsigned long completedRequests = 0; unsigned long failedRequests = 0; Glib::TimeVal completedTime(0,0); Glib::TimeVal failedTime(0,0); Glib::TimeVal tBefore; Glib::TimeVal tAfter; bool connected; //std::string url_str("https://127.0.0.1:60000/echo"); Arc::URL url(url_str); if(tcpNoDelay) url.AddOption("tcpnodelay=yes"); Arc::MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey("../echo/testuserkey-nopass.pem"); mcc_cfg.AddCertificate("../echo/testusercert.pem"); mcc_cfg.AddCAFile("../echo/testcacert.pem"); mcc_cfg.AddCADir("../echo/certificates"); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; std::string size; Arc::ClientSOAP *client = NULL; while(run){ connected=false; for(int i=start; iprocess(&req,&resp); tAfter.assign_current_time(); if(!status) { // Request failed. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { if(resp == NULL) { // Response was not SOAP or no response at all. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { //std::string xml; //resp->GetXML(xml); if (std::string((*resp)["echoResponse"]["hear"]).size()==0){ // The response was not what it should be. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else{ // Everything worked just fine! completedRequests++; completedTime+=tAfter-tBefore; if (printTimings) std::cout << completedRequests << " " << size << " " << tAfter.as_double()-tBefore.as_double() << std::endl; } } } if(resp) delete resp; if(alwaysReconnect) connected=false; } if(client) delete client; } // Update global variables. Glib::Mutex::Lock lock(*mutex); ::completedRequests+=completedRequests; ::failedRequests+=failedRequests; ::completedTime+=completedTime; ::failedTime+=failedTime; finishedThreads++; std::cout << "Number of finished threads: " << finishedThreads << std::endl; } int main(int argc, char* argv[]){ // Some variables... int numberOfThreads; int duration; int i; Glib::Thread** threads; const char* config_file = NULL; int debug_level = -1; Arc::LogStream logcerr(std::cerr); // Process options - quick hack, must use Glib options later while(argc >= 7) { if(strcmp(argv[1],"-c") == 0) { config_file = argv[2]; argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-d") == 0) { debug_level=Arc::istring_to_level(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-f") == 0) { fixedMsgSize = true; msgSize=atoi(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-r") == 0) { alwaysReconnect=true; argv+=1; argc-=1; } else if(strcmp(argv[1],"-v") == 0) { printTimings=true; argv+=1; argc-=1; } else if(strcmp(argv[1],"-t") == 0) { tcpNoDelay=true; argv+=1; argc-=1; } else { break; }; } if(debug_level >= 0) { Arc::Logger::getRootLogger().setThreshold((Arc::LogLevel)debug_level); Arc::Logger::getRootLogger().addDestination(logcerr); } // Extract command line arguments. if (argc!=7){ std::cerr << "Wrong number of arguments!" << std::endl << std::endl << "Usage:" << std::endl << "perftest [-c config] [-d debug] [-r] [-t] [-v] url threads duration start stop steplength" << std::endl << std::endl << "Arguments:" << std::endl << "url The url of the service." << std::endl << "threads The number of concurrent requests." << std::endl << "duration The duration of the test in seconds." << std::endl << "start The size of the first response from the echo service." << std::endl << "stop The size of the last response from the echo service." << std::endl << "steplength The increase of size per call to the echo service." << std::endl << "-c config The file containing client chain XML configuration with " << std::endl << " 'soap' entry point and HOSTNAME, PORTNUMBER and PATH " << std::endl << " keyword for hostname, port and HTTP path of 'echo' service." << std::endl << "-d debug The textual representation of desired debug level. Available " << std::endl << " levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL." << std::endl << "-r If specified close connection and reconnect after " << std::endl << " every request." << std::endl << "-t Toggles TCP_NODELAY option " << std::endl << "-f size Fixed message size " << std::endl << "-v If specified print out timings for each iteration " << std::endl; exit(EXIT_FAILURE); } url_str = std::string(argv[1]); numberOfThreads = atoi(argv[2]); duration = atoi(argv[3]); start = atoi(argv[4]); stop = atoi(argv[5]); steplength = atoi(argv[6]); // Start threads. run=true; finishedThreads=0; //Glib::thread_init(); mutex=new Glib::Mutex; threads = new Glib::Thread*[numberOfThreads]; for (i=0; i #endif #include #include #include #include #include #include using namespace ArcSec; int main(int argc,char* argv[]) { if(argc != 3) { std::cerr<<"Wrong number of arguments. Expecting policy and request."<addPolicy(SourceFile(argv[1])); Response *resp = eval->evaluate(SourceFile(argv[2])); ResponseList rlist = resp->getResponseItems(); int size = rlist.size(); for(int i = 0; i < size; i++) { ResponseItem* item = rlist[i]; RequestTuple* tp = item->reqtp; Subject subject = tp->sub; std::cout<<"Subject: "; for(Subject::iterator it = subject.begin(); it!= subject.end(); it++){ AttributeValue *attrval; RequestAttribute *attr; attr = dynamic_cast(*it); if(attr){ attrval = (*it)->getAttributeValue(); if(attrval) std::cout<encode(); } }; std::cout<<", Result: "<res< #endif #include #include #include #include #include #include int main(void) { Arc::LogStream cdest(std::cerr); Arc::Logger::getRootLogger().addDestination(cdest); Arc::Logger::getRootLogger().setThreshold(Arc::VERBOSE); std::string cert("./cert.pem"); std::string key("./key.pem"); std::string cafile("./ca.pem"); Arc::XMLNode policy_nd("\ \ \ \ Sample Permit rule for Storage_manager service \ \ \ /O=NorduGrid/OU=UIO/CN=test\ /vo.knowarc/usergroupA\ \ /O=Grid/OU=KnowARC/CN=XYZ\ urn:mace:shibboleth:examples\ \ \ \ file://home/test\ \ \ read\ stat\ list\ \ \ 2007-09-10T20:30:20/P1Y1M\ \ \ "); std::string policy; policy_nd.GetXML(policy); /************************************/ //Request side Arc::Time t; std::string req_string; Arc::Credential request(t, Arc::Period(24*3600), 1024, "rfc"); request.GenerateRequest(req_string); std::cout<<"Certificate request: "< #endif #include #include #include #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); std::string url_str("https://127.0.0.1:60000/echo"); Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey("../echo/testkey-nopass.pem"); mcc_cfg.AddCertificate("../echo/testcert.pem"); mcc_cfg.AddCAFile("../echo/testcacert.pem"); mcc_cfg.AddCADir("../echo/certificates"); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; std::string idp_name = "https://idp.testshib.org/idp/shibboleth"; std::string username = "myself"; std::string password = "myself"; /******** Test to service with SAML2SSO **********/ //Create a HTTP client logger.msg(Arc::INFO, "Creating a http client"); Arc::ClientHTTPwithSAML2SSO *client_http; client_http = new Arc::ClientHTTPwithSAML2SSO(mcc_cfg,url); logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadRaw req_http; //req_http.Insert(); Arc::PayloadRawInterface* resp_http = NULL; Arc::HTTPClientInfo info; if(client_http) { Arc::MCC_Status status = client_http->process("GET", "echo", &req_http,&info,&resp_http, idp_name, username, password); if(!status) { logger.msg(Arc::ERROR, "HTTP with SAML2SSO invokation failed"); throw std::runtime_error("HTTP with SAML2SSO invokation failed"); } if(resp_http == NULL) { logger.msg(Arc::ERROR,"There was no HTTP response"); throw std::runtime_error("There was no HTTP response"); } } if(resp_http) delete resp_http; if(client_http) delete client_http; //Create a SOAP client logger.msg(Arc::INFO, "Creating a soap client"); Arc::ClientSOAPwithSAML2SSO *client_soap; client_soap = new Arc::ClientSOAPwithSAML2SSO(mcc_cfg,url); logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadSOAP req_soap(echo_ns); req_soap.NewChild("echo").NewChild("say")="HELLO"; Arc::PayloadSOAP* resp_soap = NULL; Arc::MCC_Status status = client_soap->process(&req_soap,&resp_soap, idp_name, username, password); if(!status) { logger.msg(Arc::ERROR, "SOAP with SAML2SSO invokation failed"); throw std::runtime_error("SOAP with SAML2SSO invokation failed"); } if(resp_soap == NULL) { logger.msg(Arc::ERROR,"There was no SOAP response"); throw std::runtime_error("There was no SOAP response"); } std::string xml_soap; resp_soap->GetXML(xml_soap); std::cout << "XML: "<< xml_soap << std::endl; std::cout << "Response: " << (std::string)((*resp_soap)["echoResponse"]["hear"]) << std::endl; if(resp_soap) delete resp_soap; if(client_soap) delete client_soap; return 0; } nordugrid-arc-5.4.2/src/tests/client/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022444 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200605.637082606 30 ctime=1513200662.398776824 nordugrid-arc-5.4.2/src/tests/client/Makefile.am0000644000175000002070000000472212052416515022513 0ustar00mockbuildmock00000000000000if XMLSEC_ENABLED noinst_PROGRAMS = test_ClientInterface test_ClientSAML2SSO \ test_ClientX509Delegation_ARC test_ClientX509Delegation_GridSite else noinst_PROGRAMS = test_ClientInterface \ test_ClientX509Delegation_ARC test_ClientX509Delegation_GridSite endif test_ClientInterface_SOURCES = test_ClientInterface.cpp test_ClientInterface_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_ClientInterface_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) if XMLSEC_ENABLED test_ClientSAML2SSO_SOURCES = test_ClientSAML2SSO.cpp test_ClientSAML2SSO_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_ClientSAML2SSO_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) endif test_ClientX509Delegation_ARC_SOURCES = test_ClientX509Delegation_ARC.cpp test_ClientX509Delegation_ARC_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_ClientX509Delegation_ARC_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_ClientX509Delegation_GridSite_SOURCES = \ test_ClientX509Delegation_GridSite.cpp test_ClientX509Delegation_GridSite_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_ClientX509Delegation_GridSite_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/tests/client/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315735022453 xustar000000000000000030 mtime=1513200605.695083315 29 atime=1513200652.48465557 30 ctime=1513200662.399776836 nordugrid-arc-5.4.2/src/tests/client/Makefile.in0000644000175000002070000011412313214315735022524 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @XMLSEC_ENABLED_FALSE@noinst_PROGRAMS = test_ClientInterface$(EXEEXT) \ @XMLSEC_ENABLED_FALSE@ test_ClientX509Delegation_ARC$(EXEEXT) \ @XMLSEC_ENABLED_FALSE@ test_ClientX509Delegation_GridSite$(EXEEXT) @XMLSEC_ENABLED_TRUE@noinst_PROGRAMS = test_ClientInterface$(EXEEXT) \ @XMLSEC_ENABLED_TRUE@ test_ClientSAML2SSO$(EXEEXT) \ @XMLSEC_ENABLED_TRUE@ test_ClientX509Delegation_ARC$(EXEEXT) \ @XMLSEC_ENABLED_TRUE@ test_ClientX509Delegation_GridSite$(EXEEXT) subdir = src/tests/client DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) am_test_ClientInterface_OBJECTS = \ test_ClientInterface-test_ClientInterface.$(OBJEXT) test_ClientInterface_OBJECTS = $(am_test_ClientInterface_OBJECTS) am__DEPENDENCIES_1 = test_ClientInterface_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_ClientInterface_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_ClientInterface_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am__test_ClientSAML2SSO_SOURCES_DIST = test_ClientSAML2SSO.cpp @XMLSEC_ENABLED_TRUE@am_test_ClientSAML2SSO_OBJECTS = test_ClientSAML2SSO-test_ClientSAML2SSO.$(OBJEXT) test_ClientSAML2SSO_OBJECTS = $(am_test_ClientSAML2SSO_OBJECTS) @XMLSEC_ENABLED_TRUE@test_ClientSAML2SSO_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/data/libarcdata.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) test_ClientSAML2SSO_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_ClientSAML2SSO_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_test_ClientX509Delegation_ARC_OBJECTS = test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.$(OBJEXT) test_ClientX509Delegation_ARC_OBJECTS = \ $(am_test_ClientX509Delegation_ARC_OBJECTS) test_ClientX509Delegation_ARC_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_ClientX509Delegation_ARC_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_ClientX509Delegation_ARC_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_test_ClientX509Delegation_GridSite_OBJECTS = test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.$(OBJEXT) test_ClientX509Delegation_GridSite_OBJECTS = \ $(am_test_ClientX509Delegation_GridSite_OBJECTS) test_ClientX509Delegation_GridSite_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_ClientX509Delegation_GridSite_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_ClientX509Delegation_GridSite_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(test_ClientInterface_SOURCES) \ $(test_ClientSAML2SSO_SOURCES) \ $(test_ClientX509Delegation_ARC_SOURCES) \ $(test_ClientX509Delegation_GridSite_SOURCES) DIST_SOURCES = $(test_ClientInterface_SOURCES) \ $(am__test_ClientSAML2SSO_SOURCES_DIST) \ $(test_ClientX509Delegation_ARC_SOURCES) \ $(test_ClientX509Delegation_GridSite_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ test_ClientInterface_SOURCES = test_ClientInterface.cpp test_ClientInterface_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_ClientInterface_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) @XMLSEC_ENABLED_TRUE@test_ClientSAML2SSO_SOURCES = test_ClientSAML2SSO.cpp @XMLSEC_ENABLED_TRUE@test_ClientSAML2SSO_CXXFLAGS = -I$(top_srcdir)/include \ @XMLSEC_ENABLED_TRUE@ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) @XMLSEC_ENABLED_TRUE@test_ClientSAML2SSO_LDADD = \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/data/libarcdata.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_TRUE@ $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_ClientX509Delegation_ARC_SOURCES = test_ClientX509Delegation_ARC.cpp test_ClientX509Delegation_ARC_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_ClientX509Delegation_ARC_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_ClientX509Delegation_GridSite_SOURCES = \ test_ClientX509Delegation_GridSite.cpp test_ClientX509Delegation_GridSite_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_ClientX509Delegation_GridSite_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/tests/client/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/tests/client/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test_ClientInterface$(EXEEXT): $(test_ClientInterface_OBJECTS) $(test_ClientInterface_DEPENDENCIES) @rm -f test_ClientInterface$(EXEEXT) $(test_ClientInterface_LINK) $(test_ClientInterface_OBJECTS) $(test_ClientInterface_LDADD) $(LIBS) test_ClientSAML2SSO$(EXEEXT): $(test_ClientSAML2SSO_OBJECTS) $(test_ClientSAML2SSO_DEPENDENCIES) @rm -f test_ClientSAML2SSO$(EXEEXT) $(test_ClientSAML2SSO_LINK) $(test_ClientSAML2SSO_OBJECTS) $(test_ClientSAML2SSO_LDADD) $(LIBS) test_ClientX509Delegation_ARC$(EXEEXT): $(test_ClientX509Delegation_ARC_OBJECTS) $(test_ClientX509Delegation_ARC_DEPENDENCIES) @rm -f test_ClientX509Delegation_ARC$(EXEEXT) $(test_ClientX509Delegation_ARC_LINK) $(test_ClientX509Delegation_ARC_OBJECTS) $(test_ClientX509Delegation_ARC_LDADD) $(LIBS) test_ClientX509Delegation_GridSite$(EXEEXT): $(test_ClientX509Delegation_GridSite_OBJECTS) $(test_ClientX509Delegation_GridSite_DEPENDENCIES) @rm -f test_ClientX509Delegation_GridSite$(EXEEXT) $(test_ClientX509Delegation_GridSite_LINK) $(test_ClientX509Delegation_GridSite_OBJECTS) $(test_ClientX509Delegation_GridSite_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_ClientInterface-test_ClientInterface.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_ClientSAML2SSO-test_ClientSAML2SSO.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< test_ClientInterface-test_ClientInterface.o: test_ClientInterface.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientInterface_CXXFLAGS) $(CXXFLAGS) -MT test_ClientInterface-test_ClientInterface.o -MD -MP -MF $(DEPDIR)/test_ClientInterface-test_ClientInterface.Tpo -c -o test_ClientInterface-test_ClientInterface.o `test -f 'test_ClientInterface.cpp' || echo '$(srcdir)/'`test_ClientInterface.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_ClientInterface-test_ClientInterface.Tpo $(DEPDIR)/test_ClientInterface-test_ClientInterface.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_ClientInterface.cpp' object='test_ClientInterface-test_ClientInterface.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientInterface_CXXFLAGS) $(CXXFLAGS) -c -o test_ClientInterface-test_ClientInterface.o `test -f 'test_ClientInterface.cpp' || echo '$(srcdir)/'`test_ClientInterface.cpp test_ClientInterface-test_ClientInterface.obj: test_ClientInterface.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientInterface_CXXFLAGS) $(CXXFLAGS) -MT test_ClientInterface-test_ClientInterface.obj -MD -MP -MF $(DEPDIR)/test_ClientInterface-test_ClientInterface.Tpo -c -o test_ClientInterface-test_ClientInterface.obj `if test -f 'test_ClientInterface.cpp'; then $(CYGPATH_W) 'test_ClientInterface.cpp'; else $(CYGPATH_W) '$(srcdir)/test_ClientInterface.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_ClientInterface-test_ClientInterface.Tpo $(DEPDIR)/test_ClientInterface-test_ClientInterface.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_ClientInterface.cpp' object='test_ClientInterface-test_ClientInterface.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientInterface_CXXFLAGS) $(CXXFLAGS) -c -o test_ClientInterface-test_ClientInterface.obj `if test -f 'test_ClientInterface.cpp'; then $(CYGPATH_W) 'test_ClientInterface.cpp'; else $(CYGPATH_W) '$(srcdir)/test_ClientInterface.cpp'; fi` test_ClientSAML2SSO-test_ClientSAML2SSO.o: test_ClientSAML2SSO.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientSAML2SSO_CXXFLAGS) $(CXXFLAGS) -MT test_ClientSAML2SSO-test_ClientSAML2SSO.o -MD -MP -MF $(DEPDIR)/test_ClientSAML2SSO-test_ClientSAML2SSO.Tpo -c -o test_ClientSAML2SSO-test_ClientSAML2SSO.o `test -f 'test_ClientSAML2SSO.cpp' || echo '$(srcdir)/'`test_ClientSAML2SSO.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_ClientSAML2SSO-test_ClientSAML2SSO.Tpo $(DEPDIR)/test_ClientSAML2SSO-test_ClientSAML2SSO.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_ClientSAML2SSO.cpp' object='test_ClientSAML2SSO-test_ClientSAML2SSO.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientSAML2SSO_CXXFLAGS) $(CXXFLAGS) -c -o test_ClientSAML2SSO-test_ClientSAML2SSO.o `test -f 'test_ClientSAML2SSO.cpp' || echo '$(srcdir)/'`test_ClientSAML2SSO.cpp test_ClientSAML2SSO-test_ClientSAML2SSO.obj: test_ClientSAML2SSO.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientSAML2SSO_CXXFLAGS) $(CXXFLAGS) -MT test_ClientSAML2SSO-test_ClientSAML2SSO.obj -MD -MP -MF $(DEPDIR)/test_ClientSAML2SSO-test_ClientSAML2SSO.Tpo -c -o test_ClientSAML2SSO-test_ClientSAML2SSO.obj `if test -f 'test_ClientSAML2SSO.cpp'; then $(CYGPATH_W) 'test_ClientSAML2SSO.cpp'; else $(CYGPATH_W) '$(srcdir)/test_ClientSAML2SSO.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_ClientSAML2SSO-test_ClientSAML2SSO.Tpo $(DEPDIR)/test_ClientSAML2SSO-test_ClientSAML2SSO.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_ClientSAML2SSO.cpp' object='test_ClientSAML2SSO-test_ClientSAML2SSO.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientSAML2SSO_CXXFLAGS) $(CXXFLAGS) -c -o test_ClientSAML2SSO-test_ClientSAML2SSO.obj `if test -f 'test_ClientSAML2SSO.cpp'; then $(CYGPATH_W) 'test_ClientSAML2SSO.cpp'; else $(CYGPATH_W) '$(srcdir)/test_ClientSAML2SSO.cpp'; fi` test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.o: test_ClientX509Delegation_ARC.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientX509Delegation_ARC_CXXFLAGS) $(CXXFLAGS) -MT test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.o -MD -MP -MF $(DEPDIR)/test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.Tpo -c -o test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.o `test -f 'test_ClientX509Delegation_ARC.cpp' || echo '$(srcdir)/'`test_ClientX509Delegation_ARC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.Tpo $(DEPDIR)/test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_ClientX509Delegation_ARC.cpp' object='test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientX509Delegation_ARC_CXXFLAGS) $(CXXFLAGS) -c -o test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.o `test -f 'test_ClientX509Delegation_ARC.cpp' || echo '$(srcdir)/'`test_ClientX509Delegation_ARC.cpp test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.obj: test_ClientX509Delegation_ARC.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientX509Delegation_ARC_CXXFLAGS) $(CXXFLAGS) -MT test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.obj -MD -MP -MF $(DEPDIR)/test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.Tpo -c -o test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.obj `if test -f 'test_ClientX509Delegation_ARC.cpp'; then $(CYGPATH_W) 'test_ClientX509Delegation_ARC.cpp'; else $(CYGPATH_W) '$(srcdir)/test_ClientX509Delegation_ARC.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.Tpo $(DEPDIR)/test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_ClientX509Delegation_ARC.cpp' object='test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientX509Delegation_ARC_CXXFLAGS) $(CXXFLAGS) -c -o test_ClientX509Delegation_ARC-test_ClientX509Delegation_ARC.obj `if test -f 'test_ClientX509Delegation_ARC.cpp'; then $(CYGPATH_W) 'test_ClientX509Delegation_ARC.cpp'; else $(CYGPATH_W) '$(srcdir)/test_ClientX509Delegation_ARC.cpp'; fi` test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.o: test_ClientX509Delegation_GridSite.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientX509Delegation_GridSite_CXXFLAGS) $(CXXFLAGS) -MT test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.o -MD -MP -MF $(DEPDIR)/test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.Tpo -c -o test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.o `test -f 'test_ClientX509Delegation_GridSite.cpp' || echo '$(srcdir)/'`test_ClientX509Delegation_GridSite.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.Tpo $(DEPDIR)/test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_ClientX509Delegation_GridSite.cpp' object='test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientX509Delegation_GridSite_CXXFLAGS) $(CXXFLAGS) -c -o test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.o `test -f 'test_ClientX509Delegation_GridSite.cpp' || echo '$(srcdir)/'`test_ClientX509Delegation_GridSite.cpp test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.obj: test_ClientX509Delegation_GridSite.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientX509Delegation_GridSite_CXXFLAGS) $(CXXFLAGS) -MT test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.obj -MD -MP -MF $(DEPDIR)/test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.Tpo -c -o test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.obj `if test -f 'test_ClientX509Delegation_GridSite.cpp'; then $(CYGPATH_W) 'test_ClientX509Delegation_GridSite.cpp'; else $(CYGPATH_W) '$(srcdir)/test_ClientX509Delegation_GridSite.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.Tpo $(DEPDIR)/test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_ClientX509Delegation_GridSite.cpp' object='test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_ClientX509Delegation_GridSite_CXXFLAGS) $(CXXFLAGS) -c -o test_ClientX509Delegation_GridSite-test_ClientX509Delegation_GridSite.obj `if test -f 'test_ClientX509Delegation_GridSite.cpp'; then $(CYGPATH_W) 'test_ClientX509Delegation_GridSite.cpp'; else $(CYGPATH_W) '$(srcdir)/test_ClientX509Delegation_GridSite.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstPROGRAMS ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/tests/client/PaxHeaders.7502/test_ClientX509Delegation_GridSite.cpp0000644000000000000000000000012412042216423027535 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200575.411712 30 ctime=1513200662.404776897 nordugrid-arc-5.4.2/src/tests/client/test_ClientX509Delegation_GridSite.cpp0000644000175000002070000000350112042216423027601 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); /******** Test to gridsite delegation service **********/ std::string gs_deleg_url_str("https://cream.grid.upjs.sk:8443/ce-cream/services/gridsite-delegation"); Arc::URL gs_deleg_url(gs_deleg_url_str); Arc::MCCConfig gs_deleg_mcc_cfg; //Use voms-proxy-init or arcproxy to generate a gsi legacy proxy, and //put put it into mcc configuraton by using "AddProxy" gs_deleg_mcc_cfg.AddProxy("proxy.pem"); //gs_deleg_mcc_cfg.AddPrivateKey("../echo/userkey-nopass.pem"); //gs_deleg_mcc_cfg.AddCertificate("../echo/usercert.pem"); gs_deleg_mcc_cfg.AddCADir("../echo/certificates"); //Create a delegation SOAP client logger.msg(Arc::INFO, "Creating a delegation soap client"); Arc::ClientX509Delegation *gs_deleg_client = NULL; gs_deleg_client = new Arc::ClientX509Delegation(gs_deleg_mcc_cfg, gs_deleg_url); std::string gs_delegation_id; gs_delegation_id = Arc::UUID(); if(gs_deleg_client) { if(!(gs_deleg_client->createDelegation(Arc::DELEG_GRIDSITE, gs_delegation_id))) { logger.msg(Arc::ERROR, "Delegation to gridsite delegation service failed"); throw std::runtime_error("Delegation to gridsite delegation service failed"); } } logger.msg(Arc::INFO, "Delegation ID: %s", gs_delegation_id.c_str()); if(gs_deleg_client) delete gs_deleg_client; return 0; } nordugrid-arc-5.4.2/src/tests/client/PaxHeaders.7502/test_ClientInterface.cpp0000644000000000000000000000012412042216423025202 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200575.408712 30 ctime=1513200662.400776848 nordugrid-arc-5.4.2/src/tests/client/test_ClientInterface.cpp0000644000175000002070000000641312042216423025253 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); std::string url_str("https://charged.uio.no:60000/echo"); Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey("../echo/userkey-nopass.pem"); mcc_cfg.AddCertificate("../echo/usercert.pem"); mcc_cfg.AddCAFile("../echo/testcacert.pem"); mcc_cfg.AddCADir("../echo/certificates"); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; //Create a SOAP client logger.msg(Arc::INFO, "Creating a soap client"); //Configuration for the security handler of usernametoken and x509token Arc::XMLNode sechanlder_nd_ut("\ \ generate\ digest\ user\ passwd\ "); Arc::XMLNode sechanlder_nd_xt("\ \ generate\ ../echo/testcert.pem\ ../echo/testkey-nopass.pem\ "); Arc::XMLNode sechanlder_nd_st("\ \ generate\ ../echo/usercert.pem\ ../echo/userkey-nopass.pem\ ../echo/testcacert.pem\ ../echo/certificates\ https://squark.uio.no:60001/aaservice\ "); Arc::ClientSOAP *client; client = new Arc::ClientSOAP(mcc_cfg,url,60); //client->AddSecHandler(sechanlder_nd_ut, "arcshc"); //client->AddSecHandler(sechanlder_nd_xt, "arcshc"); client->AddSecHandler(sechanlder_nd_st, "arcshc"); // Create and send echo request logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadSOAP req(echo_ns); req.NewChild("echo").NewChild("say")="HELLO"; Arc::PayloadSOAP* resp = NULL; std::string str; req.GetXML(str); std::cout<<"request: "<process(&req,&resp); if(!status) { logger.msg(Arc::ERROR, "SOAP invokation failed"); throw std::runtime_error("SOAP invokation failed"); } if(resp == NULL) { logger.msg(Arc::ERROR,"There was no SOAP response"); throw std::runtime_error("There was no SOAP response"); } std::string xml; resp->GetXML(xml); std::cout << "XML: "<< xml << std::endl; std::cout << "Response: " << (std::string)((*resp)["echoResponse"]["hear"]) << std::endl; if(resp) delete resp; if(client) delete client; return 0; } nordugrid-arc-5.4.2/src/tests/client/PaxHeaders.7502/README0000644000000000000000000000012311173301321021253 xustar000000000000000026 mtime=1240302289.19337 27 atime=1513200575.411712 30 ctime=1513200662.396776799 nordugrid-arc-5.4.2/src/tests/client/README0000644000175000002070000000046011173301321021321 0ustar00mockbuildmock00000000000000This directory includes the example codes about how to use the client interfaces for developing the clients, including: 1. General WS client; 2. WS client with SAMLSSO2 profile; 3. Delegation client for using delegation client interface: to ARC delegation service; to gridSite delegation service; nordugrid-arc-5.4.2/src/tests/client/PaxHeaders.7502/test_ClientX509Delegation_ARC.cpp0000644000000000000000000000012412042216423026430 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200575.409712 30 ctime=1513200662.402776873 nordugrid-arc-5.4.2/src/tests/client/test_ClientX509Delegation_ARC.cpp0000644000175000002070000000466112042216423026504 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); /******** Test to ARC delegation service **********/ std::string arc_deleg_url_str("https://127.0.0.1:60000/delegation"); //std::string arc_deleg_url_str("https://glueball.uio.no:60000/delegation"); Arc::URL arc_deleg_url(arc_deleg_url_str); Arc::MCCConfig arc_deleg_mcc_cfg; arc_deleg_mcc_cfg.AddPrivateKey("../echo/testuserkey-nopass.pem"); arc_deleg_mcc_cfg.AddCertificate("../echo/testusercert.pem"); arc_deleg_mcc_cfg.AddCAFile("../echo/testcacert.pem"); arc_deleg_mcc_cfg.AddCADir("../echo/certificates"); //Create a delegation SOAP client logger.msg(Arc::INFO, "Creating a delegation soap client"); Arc::ClientX509Delegation *arc_deleg_client = NULL; arc_deleg_client = new Arc::ClientX509Delegation(arc_deleg_mcc_cfg, arc_deleg_url); //Delegate a credential to the delegation service. //The credential inside mcc configuration is used for delegating. std::string arc_delegation_id; if(arc_deleg_client) { if(!(arc_deleg_client->createDelegation(Arc::DELEG_ARC, arc_delegation_id))) { logger.msg(Arc::ERROR, "Delegation to ARC delegation service failed"); throw std::runtime_error("Delegation to ARC delegation service failed"); } } logger.msg(Arc::INFO, "Delegation ID: %s", arc_delegation_id.c_str()); //Acquire the delegated credential from the delegation service std::string arc_delegation_cred; if(!arc_deleg_client->acquireDelegation(Arc::DELEG_ARC, arc_delegation_cred, arc_delegation_id)) { logger.msg(Arc::ERROR,"Can not get the delegation credential: %s from delegation service:%s",arc_delegation_id.c_str(),arc_deleg_url_str.c_str()); throw std::runtime_error("Can not get the delegation credential from delegation service"); } logger.msg(Arc::INFO, "Delegated credential from delegation service: %s", arc_delegation_cred.c_str()); if(arc_deleg_client) delete arc_deleg_client; return 0; } nordugrid-arc-5.4.2/src/tests/PaxHeaders.7502/echo0000644000000000000000000000013013214316026017762 xustar000000000000000029 mtime=1513200662.22677472 30 atime=1513200668.719854133 29 ctime=1513200662.22677472 nordugrid-arc-5.4.2/src/tests/echo/0000755000175000002070000000000013214316026020107 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712062610204022074 xustar000000000000000027 mtime=1355485316.277046 30 atime=1513200605.776084306 30 ctime=1513200662.213774561 nordugrid-arc-5.4.2/src/tests/echo/Makefile.am0000644000175000002070000000557612062610204022153 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libecho.la noinst_PROGRAMS = test test_client test_clientinterface test_service perftest exampledir = $(pkgdatadir)/examples/echo example_DATA = echo_service.xml.example dist_example_DATA = echo.wsdl libecho_la_SOURCES = echo.cpp echo.h libecho_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libecho_la_LIBADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libecho_la_LDFLAGS = -no-undefined -avoid-version -module test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_client_SOURCES = test_client.cpp test_client_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_client_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_client_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_clientinterface_SOURCES = test_clientinterface.cpp test_clientinterface_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_clientinterface_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_clientinterface_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_service_SOURCES = test_service.cpp test_service_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_service_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_service_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la perftest_SOURCES = perftest.cpp perftest_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) perftest_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) perftest_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/test_clientinterface.cpp0000644000000000000000000000012412042216423024742 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200575.423713 30 ctime=1513200662.223774683 nordugrid-arc-5.4.2/src/tests/echo/test_clientinterface.cpp0000644000175000002070000001611012042216423025006 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); /************************************************/ std::string url_str("https://127.0.0.1:60000/echo"); Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey("testkey-nopass.pem"); mcc_cfg.AddCertificate("testcert.pem"); mcc_cfg.AddCAFile("testcacert.pem"); mcc_cfg.AddCADir("certificates"); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; /******** Test to service **********/ //Create a SOAP client logger.msg(Arc::INFO, "Creating a soap client"); #if 0 Arc::XMLNode sechanlder_nd("\ \ generate\ digest\ user\ passwd\ "); #endif Arc::XMLNode sechanlder_nd("\ \ generate\ ./testcert.pem\ ./testkey-nopass.pem\ "); Arc::ClientSOAP *client; client = new Arc::ClientSOAP(mcc_cfg,url,60); client->AddSecHandler(sechanlder_nd, "arcshc"); // Create and send echo request logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadSOAP req(echo_ns); req.NewChild("echo").NewChild("say")="HELLO"; Arc::PayloadSOAP* resp = NULL; std::string str; req.GetXML(str); std::cout<<"request: "<process(&req,&resp); if(!status) { logger.msg(Arc::ERROR, "SOAP invokation failed"); throw std::runtime_error("SOAP invokation failed"); } if(resp == NULL) { logger.msg(Arc::ERROR,"There was no SOAP response"); throw std::runtime_error("There was no SOAP response"); } std::string xml; resp->GetXML(xml); std::cout << "XML: "<< xml << std::endl; std::cout << "Response: " << (std::string)((*resp)["echoResponse"]["hear"]) << std::endl; if(resp) delete resp; if(client) delete client; #if 0 std::string idp_name = "https://idp.testshib.org/idp/shibboleth"; /******** Test to service with SAML2SSO **********/ //Create a HTTP client logger.msg(Arc::INFO, "Creating a http client"); std::string username = "myself"; std::string password = "myself"; Arc::ClientHTTPwithSAML2SSO *client_http; client_http = new Arc::ClientHTTPwithSAML2SSO(mcc_cfg,url); logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadRaw req_http; //req_http.Insert(); Arc::PayloadRawInterface* resp_http = NULL; Arc::HTTPClientInfo info; if(client_http) { Arc::MCC_Status status = client_http->process("GET", "echo", &req_http,&info,&resp_http, idp_name, username, password); if(!status) { logger.msg(Arc::ERROR, "HTTP with SAML2SSO invokation failed"); throw std::runtime_error("HTTP with SAML2SSO invokation failed"); } if(resp_http == NULL) { logger.msg(Arc::ERROR,"There was no HTTP response"); throw std::runtime_error("There was no HTTP response"); } } if(resp_http) delete resp_http; if(client_http) delete client_http; //Create a SOAP client logger.msg(Arc::INFO, "Creating a soap client"); Arc::ClientSOAPwithSAML2SSO *client_soap; client_soap = new Arc::ClientSOAPwithSAML2SSO(mcc_cfg,url); logger.msg(Arc::INFO, "Creating and sending request"); Arc::PayloadSOAP req_soap(echo_ns); req_soap.NewChild("echo").NewChild("say")="HELLO"; Arc::PayloadSOAP* resp_soap = NULL; if(client_soap) { Arc::MCC_Status status = client_soap->process(&req_soap,&resp_soap, idp_name, username, password); if(!status) { logger.msg(Arc::ERROR, "SOAP with SAML2SSO invokation failed"); throw std::runtime_error("SOAP with SAML2SSO invokation failed"); } if(resp_soap == NULL) { logger.msg(Arc::ERROR,"There was no SOAP response"); throw std::runtime_error("There was no SOAP response"); } } std::string xml_soap; resp_soap->GetXML(xml_soap); std::cout << "XML: "<< xml_soap << std::endl; std::cout << "Response: " << (std::string)((*resp_soap)["echoResponse"]["hear"]) << std::endl; if(resp_soap) delete resp_soap; if(client_soap) delete client_soap; #endif #if 0 /******** Test to ARC delegation service **********/ std::string arc_deleg_url_str("https://127.0.0.1:60000/delegation"); Arc::URL arc_deleg_url(arc_deleg_url_str); Arc::MCCConfig arc_deleg_mcc_cfg; arc_deleg_mcc_cfg.AddPrivateKey("testuserkey-nopass.pem"); arc_deleg_mcc_cfg.AddCertificate("testusercert.pem"); //arc_deleg_mcc_cfg.AddCAFile("testcacert.pem"); arc_deleg_mcc_cfg.AddCADir("certificates"); //Create a delegation SOAP client logger.msg(Arc::INFO, "Creating a delegation soap client"); Arc::ClientX509Delegation *arc_deleg_client = NULL; arc_deleg_client = new Arc::ClientX509Delegation(arc_deleg_mcc_cfg, arc_deleg_url); std::string arc_delegation_id; if(arc_deleg_client) { if(!(arc_deleg_client->createDelegation(Arc::DELEG_ARC, arc_delegation_id))) { logger.msg(Arc::ERROR, "Delegation to ARC delegation service failed"); throw std::runtime_error("Delegation to ARC delegation service failed"); } } logger.msg(Arc::INFO, "Delegation ID: %s", arc_delegation_id.c_str()); if(arc_deleg_client) delete arc_deleg_client; /******** Test to gridsite delegation service **********/ std::string gs_deleg_url_str("https://cream.grid.upjs.sk:8443/ce-cream/services/gridsite-delegation"); Arc::URL gs_deleg_url(gs_deleg_url_str); Arc::MCCConfig gs_deleg_mcc_cfg; gs_deleg_mcc_cfg.AddProxy("x509up_u126587"); //gs_deleg_mcc_cfg.AddPrivateKey("userkey-nopass.pem"); //gs_deleg_mcc_cfg.AddCertificate("usercert.pem"); gs_deleg_mcc_cfg.AddCADir("certificates"); //Create a delegation SOAP client logger.msg(Arc::INFO, "Creating a delegation soap client"); Arc::ClientX509Delegation *gs_deleg_client = NULL; gs_deleg_client = new Arc::ClientX509Delegation(gs_deleg_mcc_cfg, gs_deleg_url); std::string gs_delegation_id; gs_delegation_id = Arc::UUID(); if(gs_deleg_client) { if(!(gs_deleg_client->createDelegation(Arc::DELEG_GRIDSITE, gs_delegation_id))) { logger.msg(Arc::ERROR, "Delegation to gridsite delegation service failed"); throw std::runtime_error("Delegation to gridsite delegation service failed"); } } logger.msg(Arc::INFO, "Delegation ID: %s", gs_delegation_id.c_str()); if(gs_deleg_client) delete gs_deleg_client; #endif return 0; } nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315735022114 xustar000000000000000030 mtime=1513200605.846085162 30 atime=1513200652.320653564 30 ctime=1513200662.214774573 nordugrid-arc-5.4.2/src/tests/echo/Makefile.in0000644000175000002070000014123513214315735022170 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test$(EXEEXT) test_client$(EXEEXT) \ test_clientinterface$(EXEEXT) test_service$(EXEEXT) \ perftest$(EXEEXT) subdir = src/tests/echo DIST_COMMON = README $(dist_example_DATA) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/echo_service.xml.example.in \ $(srcdir)/perftest.1.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = perftest.1 echo_service.xml.example CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(exampledir)" \ "$(DESTDIR)$(exampledir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libecho_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libecho_la_OBJECTS = libecho_la-echo.lo libecho_la_OBJECTS = $(am_libecho_la_OBJECTS) libecho_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libecho_la_CXXFLAGS) \ $(CXXFLAGS) $(libecho_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am_perftest_OBJECTS = perftest-perftest.$(OBJEXT) perftest_OBJECTS = $(am_perftest_OBJECTS) perftest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la perftest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(perftest_CXXFLAGS) \ $(CXXFLAGS) $(perftest_LDFLAGS) $(LDFLAGS) -o $@ am_test_OBJECTS = test-test.$(OBJEXT) test_OBJECTS = $(am_test_OBJECTS) test_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(test_CXXFLAGS) $(CXXFLAGS) \ $(test_LDFLAGS) $(LDFLAGS) -o $@ am_test_client_OBJECTS = test_client-test_client.$(OBJEXT) test_client_OBJECTS = $(am_test_client_OBJECTS) test_client_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_client_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(test_client_CXXFLAGS) \ $(CXXFLAGS) $(test_client_LDFLAGS) $(LDFLAGS) -o $@ am_test_clientinterface_OBJECTS = \ test_clientinterface-test_clientinterface.$(OBJEXT) test_clientinterface_OBJECTS = $(am_test_clientinterface_OBJECTS) test_clientinterface_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_clientinterface_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_clientinterface_CXXFLAGS) $(CXXFLAGS) \ $(test_clientinterface_LDFLAGS) $(LDFLAGS) -o $@ am_test_service_OBJECTS = test_service-test_service.$(OBJEXT) test_service_OBJECTS = $(am_test_service_OBJECTS) test_service_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_service_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(test_service_CXXFLAGS) \ $(CXXFLAGS) $(test_service_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libecho_la_SOURCES) $(perftest_SOURCES) $(test_SOURCES) \ $(test_client_SOURCES) $(test_clientinterface_SOURCES) \ $(test_service_SOURCES) DIST_SOURCES = $(libecho_la_SOURCES) $(perftest_SOURCES) \ $(test_SOURCES) $(test_client_SOURCES) \ $(test_clientinterface_SOURCES) $(test_service_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive DATA = $(dist_example_DATA) $(example_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema pkglib_LTLIBRARIES = libecho.la exampledir = $(pkgdatadir)/examples/echo example_DATA = echo_service.xml.example dist_example_DATA = echo.wsdl libecho_la_SOURCES = echo.cpp echo.h libecho_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libecho_la_LIBADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libecho_la_LDFLAGS = -no-undefined -avoid-version -module test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_client_SOURCES = test_client.cpp test_client_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_client_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_client_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_clientinterface_SOURCES = test_clientinterface.cpp test_clientinterface_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_clientinterface_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_clientinterface_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la test_service_SOURCES = test_service.cpp test_service_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_service_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_service_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la perftest_SOURCES = perftest.cpp perftest_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) perftest_LDFLAGS = $(GLIBMM_LIBS) $(LIBXML2_LIBS) perftest_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/tests/echo/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/tests/echo/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): perftest.1: $(top_builddir)/config.status $(srcdir)/perftest.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ echo_service.xml.example: $(top_builddir)/config.status $(srcdir)/echo_service.xml.example.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libecho.la: $(libecho_la_OBJECTS) $(libecho_la_DEPENDENCIES) $(libecho_la_LINK) -rpath $(pkglibdir) $(libecho_la_OBJECTS) $(libecho_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list perftest$(EXEEXT): $(perftest_OBJECTS) $(perftest_DEPENDENCIES) @rm -f perftest$(EXEEXT) $(perftest_LINK) $(perftest_OBJECTS) $(perftest_LDADD) $(LIBS) test$(EXEEXT): $(test_OBJECTS) $(test_DEPENDENCIES) @rm -f test$(EXEEXT) $(test_LINK) $(test_OBJECTS) $(test_LDADD) $(LIBS) test_client$(EXEEXT): $(test_client_OBJECTS) $(test_client_DEPENDENCIES) @rm -f test_client$(EXEEXT) $(test_client_LINK) $(test_client_OBJECTS) $(test_client_LDADD) $(LIBS) test_clientinterface$(EXEEXT): $(test_clientinterface_OBJECTS) $(test_clientinterface_DEPENDENCIES) @rm -f test_clientinterface$(EXEEXT) $(test_clientinterface_LINK) $(test_clientinterface_OBJECTS) $(test_clientinterface_LDADD) $(LIBS) test_service$(EXEEXT): $(test_service_OBJECTS) $(test_service_DEPENDENCIES) @rm -f test_service$(EXEEXT) $(test_service_LINK) $(test_service_OBJECTS) $(test_service_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libecho_la-echo.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/perftest-perftest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_client-test_client.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_clientinterface-test_clientinterface.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_service-test_service.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libecho_la-echo.lo: echo.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libecho_la_CXXFLAGS) $(CXXFLAGS) -MT libecho_la-echo.lo -MD -MP -MF $(DEPDIR)/libecho_la-echo.Tpo -c -o libecho_la-echo.lo `test -f 'echo.cpp' || echo '$(srcdir)/'`echo.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libecho_la-echo.Tpo $(DEPDIR)/libecho_la-echo.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='echo.cpp' object='libecho_la-echo.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libecho_la_CXXFLAGS) $(CXXFLAGS) -c -o libecho_la-echo.lo `test -f 'echo.cpp' || echo '$(srcdir)/'`echo.cpp perftest-perftest.o: perftest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(perftest_CXXFLAGS) $(CXXFLAGS) -MT perftest-perftest.o -MD -MP -MF $(DEPDIR)/perftest-perftest.Tpo -c -o perftest-perftest.o `test -f 'perftest.cpp' || echo '$(srcdir)/'`perftest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/perftest-perftest.Tpo $(DEPDIR)/perftest-perftest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='perftest.cpp' object='perftest-perftest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(perftest_CXXFLAGS) $(CXXFLAGS) -c -o perftest-perftest.o `test -f 'perftest.cpp' || echo '$(srcdir)/'`perftest.cpp perftest-perftest.obj: perftest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(perftest_CXXFLAGS) $(CXXFLAGS) -MT perftest-perftest.obj -MD -MP -MF $(DEPDIR)/perftest-perftest.Tpo -c -o perftest-perftest.obj `if test -f 'perftest.cpp'; then $(CYGPATH_W) 'perftest.cpp'; else $(CYGPATH_W) '$(srcdir)/perftest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/perftest-perftest.Tpo $(DEPDIR)/perftest-perftest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='perftest.cpp' object='perftest-perftest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(perftest_CXXFLAGS) $(CXXFLAGS) -c -o perftest-perftest.obj `if test -f 'perftest.cpp'; then $(CYGPATH_W) 'perftest.cpp'; else $(CYGPATH_W) '$(srcdir)/perftest.cpp'; fi` test-test.o: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.o -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp test-test.obj: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.obj -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` test_client-test_client.o: test_client.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_client_CXXFLAGS) $(CXXFLAGS) -MT test_client-test_client.o -MD -MP -MF $(DEPDIR)/test_client-test_client.Tpo -c -o test_client-test_client.o `test -f 'test_client.cpp' || echo '$(srcdir)/'`test_client.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_client-test_client.Tpo $(DEPDIR)/test_client-test_client.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_client.cpp' object='test_client-test_client.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_client_CXXFLAGS) $(CXXFLAGS) -c -o test_client-test_client.o `test -f 'test_client.cpp' || echo '$(srcdir)/'`test_client.cpp test_client-test_client.obj: test_client.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_client_CXXFLAGS) $(CXXFLAGS) -MT test_client-test_client.obj -MD -MP -MF $(DEPDIR)/test_client-test_client.Tpo -c -o test_client-test_client.obj `if test -f 'test_client.cpp'; then $(CYGPATH_W) 'test_client.cpp'; else $(CYGPATH_W) '$(srcdir)/test_client.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_client-test_client.Tpo $(DEPDIR)/test_client-test_client.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_client.cpp' object='test_client-test_client.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_client_CXXFLAGS) $(CXXFLAGS) -c -o test_client-test_client.obj `if test -f 'test_client.cpp'; then $(CYGPATH_W) 'test_client.cpp'; else $(CYGPATH_W) '$(srcdir)/test_client.cpp'; fi` test_clientinterface-test_clientinterface.o: test_clientinterface.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_clientinterface_CXXFLAGS) $(CXXFLAGS) -MT test_clientinterface-test_clientinterface.o -MD -MP -MF $(DEPDIR)/test_clientinterface-test_clientinterface.Tpo -c -o test_clientinterface-test_clientinterface.o `test -f 'test_clientinterface.cpp' || echo '$(srcdir)/'`test_clientinterface.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_clientinterface-test_clientinterface.Tpo $(DEPDIR)/test_clientinterface-test_clientinterface.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_clientinterface.cpp' object='test_clientinterface-test_clientinterface.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_clientinterface_CXXFLAGS) $(CXXFLAGS) -c -o test_clientinterface-test_clientinterface.o `test -f 'test_clientinterface.cpp' || echo '$(srcdir)/'`test_clientinterface.cpp test_clientinterface-test_clientinterface.obj: test_clientinterface.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_clientinterface_CXXFLAGS) $(CXXFLAGS) -MT test_clientinterface-test_clientinterface.obj -MD -MP -MF $(DEPDIR)/test_clientinterface-test_clientinterface.Tpo -c -o test_clientinterface-test_clientinterface.obj `if test -f 'test_clientinterface.cpp'; then $(CYGPATH_W) 'test_clientinterface.cpp'; else $(CYGPATH_W) '$(srcdir)/test_clientinterface.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_clientinterface-test_clientinterface.Tpo $(DEPDIR)/test_clientinterface-test_clientinterface.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_clientinterface.cpp' object='test_clientinterface-test_clientinterface.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_clientinterface_CXXFLAGS) $(CXXFLAGS) -c -o test_clientinterface-test_clientinterface.obj `if test -f 'test_clientinterface.cpp'; then $(CYGPATH_W) 'test_clientinterface.cpp'; else $(CYGPATH_W) '$(srcdir)/test_clientinterface.cpp'; fi` test_service-test_service.o: test_service.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_service_CXXFLAGS) $(CXXFLAGS) -MT test_service-test_service.o -MD -MP -MF $(DEPDIR)/test_service-test_service.Tpo -c -o test_service-test_service.o `test -f 'test_service.cpp' || echo '$(srcdir)/'`test_service.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_service-test_service.Tpo $(DEPDIR)/test_service-test_service.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_service.cpp' object='test_service-test_service.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_service_CXXFLAGS) $(CXXFLAGS) -c -o test_service-test_service.o `test -f 'test_service.cpp' || echo '$(srcdir)/'`test_service.cpp test_service-test_service.obj: test_service.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_service_CXXFLAGS) $(CXXFLAGS) -MT test_service-test_service.obj -MD -MP -MF $(DEPDIR)/test_service-test_service.Tpo -c -o test_service-test_service.obj `if test -f 'test_service.cpp'; then $(CYGPATH_W) 'test_service.cpp'; else $(CYGPATH_W) '$(srcdir)/test_service.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_service-test_service.Tpo $(DEPDIR)/test_service-test_service.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_service.cpp' object='test_service-test_service.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_service_CXXFLAGS) $(CXXFLAGS) -c -o test_service-test_service.obj `if test -f 'test_service.cpp'; then $(CYGPATH_W) 'test_service.cpp'; else $(CYGPATH_W) '$(srcdir)/test_service.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_exampleDATA: $(dist_example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(dist_example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-dist_exampleDATA: @$(NORMAL_UNINSTALL) @list='$(dist_example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(exampledir)" "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ clean-pkglibLTLIBRARIES mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dist_exampleDATA install-exampleDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_exampleDATA uninstall-exampleDATA \ uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstPROGRAMS clean-pkglibLTLIBRARIES ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dist_exampleDATA install-dvi \ install-dvi-am install-exampleDATA install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-pkglibLTLIBRARIES install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-dist_exampleDATA \ uninstall-exampleDATA uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/test_client.cpp0000644000000000000000000000012411570717263023076 xustar000000000000000027 mtime=1306762931.686321 27 atime=1513200575.426713 30 ctime=1513200662.222774671 nordugrid-arc-5.4.2/src/tests/echo/test_client.cpp0000644000175000002070000000756511570717263023160 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); logger.msg(Arc::INFO, "Creating client side chain"); // Create client chain Arc::XMLNode client_doc("\ \ \ .libs/\ ../../hed/mcc/http/.libs/\ ../../hed/mcc/soap/.libs/\ ../../hed/mcc/tls/.libs/\ ../../hed/mcc/tcp/.libs/\ ../../hed/shc/.libs/\ \ mcctcp\ mcctls\ mcchttp\ mccsoap\ arcshc\ \ 127.0.0.150000\ \ \ ./testkey-nopass.pem\ ./testcert.pem\ ./testcacert.pem\ \ \ POST\ /Echo\ \ \ \ \ \ "); Arc::Config client_config(client_doc); if(!client_config) { logger.msg(Arc::ERROR, "Failed to load client configuration"); return -1; }; Arc::MCCLoader client_loader(client_config); logger.msg(Arc::INFO, "Client side MCCs are loaded"); Arc::MCC* client_entry = client_loader["soap"]; if(!client_entry) { logger.msg(Arc::ERROR, "Client chain does not have entry point"); return -1; }; // for (int i = 0; i < 10; i++) { // Create and send echo request logger.msg(Arc::INFO, "Creating and sending request"); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; Arc::PayloadSOAP req(echo_ns); req.NewChild("echo").NewChild("say")="HELLO"; Arc::Message reqmsg; Arc::Message repmsg; reqmsg.Payload(&req); // It is a responsibility of code initiating first Message to // provide Context and Attributes as well. Arc::MessageAttributes attributes_req; Arc::MessageAttributes attributes_rep; Arc::MessageContext context; reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); Arc::MCC_Status status = client_entry->process(reqmsg,repmsg); if(!status) { logger.msg(Arc::ERROR, "Request failed"); std::cerr << "Status: " << std::string(status) << std::endl; return -1; }; Arc::PayloadSOAP* resp = NULL; if(repmsg.Payload() == NULL) { logger.msg(Arc::ERROR, "There is no response"); return -1; }; try { resp = dynamic_cast(repmsg.Payload()); } catch(std::exception&) { }; if(resp == NULL) { logger.msg(Arc::ERROR, "Response is not SOAP"); return -1; }; std::string xml; resp->GetXML(xml); std::cout << "XML: "<< xml << std::endl; std::cout << "Response: " << (std::string)((*resp)["echoResponse"]["hear"]) << std::endl; //} return 0; } nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/perftest.1.in0000644000000000000000000000012712123705613022372 xustar000000000000000027 mtime=1364167563.653962 30 atime=1513200652.335653748 30 ctime=1513200662.216774598 nordugrid-arc-5.4.2/src/tests/echo/perftest.1.in0000644000175000002070000000231512123705613022435 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH PERFTEST 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME perftest \- ARC1 service performance tester .SH DESCRIPTION With the .B perftest command we can test an ARC1 service performance .SH SYNOPSIS .B perftest [-c config] [-d debug] host port threads duration .SH OPTIONS .IP "\fB\ -c config \fR" The file containing client chain XML configuration with soap entry point and HOSTNAME, PORTNUMBER and PATH keyword for hostname, port and HTTP path of 'echo' service .TP .IP "\fB\ -d debug\fR" The textual representation of desired debug level. Available levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL .TP .IP "\fB\ host \fR" The name of the host of the service .TP .IP "\fB\ port \fR" The port to use on the host .TP .IP "\fB\ threads \fR" The number of concurrent requests .TP .IP "\fB\ duration \fR" The duration of the test in seconds .SH REPORTING BUGS Report bugs to http://bugzilla.nordugrid.org .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/perftest.cpp0000644000000000000000000000012412301125744022403 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200575.411712 30 ctime=1513200662.219774635 nordugrid-arc-5.4.2/src/tests/echo/perftest.cpp0000644000175000002070000002251212301125744022452 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // perftest.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include // Some global shared variables... Glib::Mutex* mutex; bool run; int finishedThreads; unsigned long completedRequests; unsigned long failedRequests; unsigned long totalRequests; Glib::TimeVal completedTime; Glib::TimeVal failedTime; Glib::TimeVal totalTime; // The configuration string. std::string confString = "\ \ \ .libs/\ ../../hed/mcc/http/.libs/\ ../../hed/mcc/soap/.libs/\ ../../hed/mcc/tls/.libs/\ ../../hed/mcc/tcp/.libs/\ ../../lib/arc/\ \ mcctcp\ mcctls\ mcchttp\ mccsoap\ \ HOSTNAMEPORTNUMBER\ \ POSTHTTPPATH\ \ \ "; // Replace a substring by another substring. void replace(std::string& str, const std::string& out, const std::string& in) { std::string::size_type index = str.find(out); if (index!=std::string::npos) str.replace(index, out.size(), in); } // Round off a double to an integer. int Round(double x){ return int(x+0.5); } // Send requests and collect statistics. void sendRequests(){ // Some variables... unsigned long completedRequests = 0; unsigned long failedRequests = 0; Glib::TimeVal completedTime(0,0); Glib::TimeVal failedTime(0,0); Glib::TimeVal tBefore; Glib::TimeVal tAfter; bool connected; while(run){ // Create a client chain. Arc::Config client_config(confString); if(!client_config) { std::cerr << "Failed to load client configuration." << std::endl; return; } Arc::MCCLoader client_loader(client_config); Arc::MCC* client_entry = client_loader["soap"]; if(!client_entry) { std::cerr << "Client chain have no entry point." << std::endl; return; } connected=true; Arc::MessageContext context; while(run and connected){ // Prepare the request. Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; Arc::PayloadSOAP req(echo_ns); req.NewChild("echo").NewChild("say")="HELLO"; Arc::Message reqmsg; Arc::Message repmsg; Arc::MessageAttributes attributes_req; Arc::MessageAttributes attributes_rep; reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); reqmsg.Payload(&req); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); // Send the request and time it. tBefore.assign_current_time(); Arc::MCC_Status status = client_entry->process(reqmsg,repmsg); tAfter.assign_current_time(); if(!status) { // Request failed. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { Arc::PayloadSOAP* resp = NULL; if(repmsg.Payload() != NULL) { // There is response. try{ resp = dynamic_cast(repmsg.Payload()); } catch(std::exception&){ }; } if(resp == NULL) { // Response was not SOAP or no response at all. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else { std::string xml; resp->GetXML(xml); if (std::string((*resp)["echoResponse"]["hear"]).size()==0){ // The response was not what it should be. failedRequests++; failedTime+=tAfter-tBefore; connected=false; } else{ // Everything worked just fine! completedRequests++; completedTime+=tAfter-tBefore; } } } if(repmsg.Payload()) delete repmsg.Payload(); } } // Update global variables. Glib::Mutex::Lock lock(*mutex); ::completedRequests+=completedRequests; ::failedRequests+=failedRequests; ::completedTime+=completedTime; ::failedTime+=failedTime; finishedThreads++; std::cout << "Number of finished threads: " << finishedThreads << std::endl; } int main(int argc, char* argv[]){ // Some variables... std::string serviceHost; std::string portNumber; std::string httpPath("/Echo"); int numberOfThreads; int duration; int i; Glib::Thread** threads; const char* config_file = NULL; int debug_level = -1; Arc::LogStream logcerr(std::cerr); // Process options - quick hack, must use Glib options later while(argc >= 3) { if(strcmp(argv[1],"-c") == 0) { config_file = argv[2]; argv[2]=argv[0]; argv+=2; argc-=2; } else if(strcmp(argv[1],"-d") == 0) { debug_level=Arc::istring_to_level(argv[2]); argv[2]=argv[0]; argv+=2; argc-=2; } else { break; }; } if(config_file) { std::ifstream f(config_file); if(!f) { std::cerr << "File "<(f, confString, 0); } if(debug_level >= 0) { Arc::Logger::getRootLogger().setThreshold((Arc::LogLevel)debug_level); Arc::Logger::getRootLogger().addDestination(logcerr); } // Extract command line arguments. if (argc!=5){ std::cerr << "Wrong number of arguments!" << std::endl << std::endl << "Usage:" << std::endl << "perftest [-c config] [-d debug] host port threads duration" << std::endl << std::endl << "Arguments:" << std::endl << "host The name of the host of the service." << std::endl << "port The port to use on the host." << std::endl << "threads The number of concurrent requests." << std::endl << "duration The duration of the test in seconds." << std::endl << "config The file containing client chain XML configuration with " << std::endl << " 'soap' entry point and HOSTNAME, PORTNUMBER and PATH " << std::endl << " keyword for hostname, port and HTTP path of 'echo' service." << std::endl << "debug The textual representation of desired debug level. Available " << std::endl << " levels: DEBUG, VERBOSE, INFO, WARNING, ERROR, FATAL." << std::endl; exit(EXIT_FAILURE); } serviceHost = std::string(argv[1]); portNumber = std::string(argv[2]); numberOfThreads = atoi(argv[3]); duration = atoi(argv[4]); // Insert host name and port number into the configuration string. replace(confString, "HOSTNAME", serviceHost); replace(confString, "PORTNUMBER", portNumber); replace(confString, "HTTPPATH", httpPath); // Start threads. run=true; finishedThreads=0; //Glib::thread_init(); mutex=new Glib::Mutex; threads = new Glib::Thread*[numberOfThreads]; for (i=0; i #endif #include #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr1(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr1); // Load service chain logger.msg(Arc::INFO, "Creating service side chain"); Arc::Config service_config("service.xml"); if(!service_config) { logger.msg(Arc::ERROR, "Failed to load service configuration"); return -1; }; Arc::MCCLoader service_loader(service_config); logger.msg(Arc::INFO, "Service side MCCs are loaded"); logger.msg(Arc::INFO, "Creating client interface"); Arc::MCCConfig client_cfg; // Paths to plugins in source tree client_cfg.AddPluginsPath("../../hed/mcc/http/.libs"); client_cfg.AddPluginsPath("../../hed/mcc/soap/.libs"); client_cfg.AddPluginsPath("../../hed/mcc/tls/.libs"); client_cfg.AddPluginsPath("../../hed/mcc/tcp/.libs"); client_cfg.AddPluginsPath("../../hed/shc/.libs"); // Specify credentials client_cfg.AddPrivateKey("./testkey-nopass.pem"); client_cfg.AddCertificate("./testcert.pem"); client_cfg.AddCAFile("./testcacert.pem"); // Create client instance for contacting echo service Arc::ClientSOAP client(client_cfg,Arc::URL("https://127.0.0.1:60000/echo"),60); // Add SecHandler to chain at TLS location to accept only // connection to server with specified DN std::list dns; // Making a list of allowed hosts. To make this test // fail change DN below. dns.push_back("/O=Grid/O=Test/CN=localhost"); // Creating SecHandler configuration with allowed DNs Arc::DNListHandlerConfig dncfg(dns,"outgoing"); // Adding SecHandler to client at TLS level. // We have to explicitly specify method of ClientTCP // class to attach SecHandler at proper location. client.Arc::ClientTCP::AddSecHandler(dncfg,Arc::TLSSec); logger.msg(Arc::INFO, "Client side MCCs are loaded"); for(int n = 0;n<1;n++) { // Create and send echo request logger.msg(Arc::INFO, "Creating and sending request"); Arc::NS echo_ns; Arc::PayloadSOAP req(echo_ns); // Making echo namespace appear at operation level only // This is probably not needed and is here for demonstration // purposes only. echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; Arc::XMLNode op = req.NewChild("echo:echo",echo_ns); op.NewChild("echo:say")="HELLO"; Arc::PayloadSOAP* resp = NULL; Arc::MCC_Status status = client.process(&req,&resp); if(!status) { logger.msg(Arc::ERROR, "Request failed"); if(resp) delete resp; return -1; }; if(resp == NULL) { logger.msg(Arc::ERROR, "There is no response"); return -1; }; logger.msg(Arc::INFO, "Request succeed!!!"); std::cout << "Response: " << (std::string)((*resp)["echoResponse"]["hear"]) << std::endl; delete resp; }; return 0; } nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/test_service.cpp0000644000000000000000000000012411570717263023260 xustar000000000000000027 mtime=1306762931.686321 27 atime=1513200575.413712 30 ctime=1513200662.224774696 nordugrid-arc-5.4.2/src/tests/echo/test_service.cpp0000644000175000002070000000164511570717263023333 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); // Load service chain logger.msg(Arc::INFO, "Creating service side chain"); Arc::Config service_config("service.xml"); if(!service_config) { logger.msg(Arc::ERROR, "Failed to load service configuration"); return -1; }; Arc::MCCLoader service_loader(service_config); logger.msg(Arc::INFO, "Service side MCCs are loaded"); logger.msg(Arc::INFO, "Service is waiting for requests"); for(;;) { sleep(10); } return 0; } nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/echo.wsdl0000644000000000000000000000012311337246323021660 xustar000000000000000026 mtime=1266502867.55712 27 atime=1513200575.424713 30 ctime=1513200662.212774549 nordugrid-arc-5.4.2/src/tests/echo/echo.wsdl0000644000175000002070000000442011337246323021726 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/echo.h0000644000000000000000000000012411730411253021130 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.417713 30 ctime=1513200662.218774622 nordugrid-arc-5.4.2/src/tests/echo/echo.h0000644000175000002070000000352311730411253021200 0ustar00mockbuildmock00000000000000#ifndef __ARC_ECHO_H__ #define __ARC_ECHO_H__ #include #include #include #include #include namespace Echo { /** This is a test service which accepts SOAP requests and produces response as described in echo.wsdl. Response contains string passed in request with prefix_ and suffix_ added. */ /** About the policy decision, here the echo service is used as an example to demostrate how to * implement and deploy it. * For service developer, he is supposed to marshall the pdp request into a internal structure * For service deployer, he is supposed to do the following two things: * a, write the policy according to its requirement, and based on the Policy.xsd schema. * b, configure the service.xml, put the pdp configuration into a The "name" attribute is the identifier for dynamic loading the ArcPDP object. The "policylocation" attribute is for the configuration of ArcPDP's policy */ class Service_Echo: public Arc::RegisteredService { protected: std::string prefix_; std::string suffix_; std::string policylocation_; Arc::NS ns_; Arc::MCC_Status make_fault(Arc::Message& outmsg,const std::string& txtmsg = ""); Arc::Logger logger; Arc::InformationContainer infodoc; public: /** Constructor accepts configuration describing content of prefix and suffix */ Service_Echo(Arc::Config *cfg, Arc::PluginArgument *parg); virtual ~Service_Echo(void); /** Service request processing routine */ virtual Arc::MCC_Status process(Arc::Message&,Arc::Message&); bool RegistrationCollector(Arc::XMLNode &doc); }; } // namespace Echo #endif /* __ARC_ECHO_H__ */ nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/echo_service.xml.example.in0000644000000000000000000000012711746176510025276 xustar000000000000000027 mtime=1335426376.695858 30 atime=1513200652.349653919 30 ctime=1513200662.215774585 nordugrid-arc-5.4.2/src/tests/echo/echo_service.xml.example.in0000644000175000002070000000466411746176510025352 0ustar00mockbuildmock00000000000000 /var/run/arched.pid /var/log/arc/arched.log @prefix@/lib/arc/ mcctcp mcctls mcchttp mccsoap arcshc echo 60000 /etc/grid-security/hostkey.pem /etc/grid-security/hostcert.pem /etc/grid-security/certificates POST ^/Echo$ [ ] echo_service_id 127.0.0.1 P15M nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/echo.cpp0000644000000000000000000000012312675602216021474 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.428713 29 ctime=1513200662.21777461 nordugrid-arc-5.4.2/src/tests/echo/echo.cpp0000644000175000002070000001274612675602216021554 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "echo.h" static Arc::Plugin* get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new Echo::Service_Echo((Arc::Config*)(*mccarg),arg); } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "echo", "HED:SERVICE", NULL, 0, &get_service }, { NULL, NULL, NULL, 0, NULL } }; using namespace Arc; namespace Echo { Service_Echo::Service_Echo(Arc::Config *cfg, Arc::PluginArgument *parg):RegisteredService(cfg,parg),logger(Arc::Logger::rootLogger, "Echo") { ns_["echo"]="http://www.nordugrid.org/schemas/echo"; prefix_=(std::string)((*cfg)["prefix"]); suffix_=(std::string)((*cfg)["suffix"]); #if 0 // Parse the policy location information, and put them into a map container for later using for(int i=0;; i++) { Arc::XMLNode cn = (*cfg).Child(i); if(!cn) break; if(MatchXMLName(cn, "SecHandler")) { for(int j=0;; j++) { Arc::XMLNode gn = cn.Child(j); if(!gn) break; if(MatchXMLName(gn, "PDP")) { policylocation_ = (std::string)(gn.Attribute("policylocation")); } } } } #endif // Assigning service description - Glue2 document should go here. infodoc.Assign(Arc::XMLNode( "" "okproductionECHO" ),true); } Service_Echo::~Service_Echo(void) { } Arc::MCC_Status Service_Echo::make_fault(Arc::Message& outmsg,const std::string& txtmsg) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns_,true); Arc::SOAPFault* fault = outpayload->Fault(); if(fault) { fault->Code(Arc::SOAPFault::Sender); if(txtmsg.empty()) { fault->Reason("Failed processing request"); } else { logger.msg(Arc::ERROR, txtmsg); fault->Reason(txtmsg); }; }; outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status Service_Echo::process(Arc::Message& inmsg,Arc::Message& outmsg) { //Store policy location into message attribute //inmsg.Attributes()->add("PDP:POLICYLOCATION", policylocation_); // Check authorization if(!ProcessSecHandlers(inmsg, "incoming")) { logger.msg(Arc::ERROR, "echo: Unauthorized"); return Arc::MCC_Status(Arc::GENERIC_ERROR); }; // Both input and output are supposed to be SOAP // Extracting payload Arc::PayloadSOAP* inpayload = NULL; try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) { return make_fault(outmsg,"Input is not SOAP"); }; { std::string str; inpayload->GetDoc(str, true); logger.msg(Arc::VERBOSE, "process: request=%s",str); }; Arc::PayloadSOAP* outpayload = NULL; /**Export the formated policy-decision request**/ MessageAuth* mauth = inmsg.Auth(); MessageAuth* cauth = inmsg.AuthContext(); if((!mauth) && (!cauth)) { logger.msg(ERROR,"Missing security object in message"); return Arc::MCC_Status(); }; NS ns; XMLNode requestxml(ns,""); if(mauth) { if(!mauth->Export(SecAttr::ARCAuth,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to ARC request"); return Arc::MCC_Status(); }; }; if(cauth) { if(!cauth->Export(SecAttr::ARCAuth,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to ARC request"); return Arc::MCC_Status(); }; }; /** */ // Analyzing request // Checking if it's info request if(MatchXMLNamespace(inpayload->Child(0),"http://docs.oasis-open.org/wsrf/rp-2")) { Arc::SOAPEnvelope* outxml = infodoc.Process(*inpayload); if(!outxml) { return make_fault(outmsg,"WSRF request processing failed"); }; outpayload = new Arc::PayloadSOAP(*outxml); delete outxml; } else if((*inpayload)["size"]){ Arc::XMLNode echo_op = (*inpayload)["size"]; int size = atoi(std::string(echo_op["size"]).c_str()); std::string msg = "Message for you, sir"; msg.resize(size,'0'); std::string say = echo_op["say"]; std::string hear = prefix_+say+suffix_; outpayload = new Arc::PayloadSOAP(ns_); outpayload->NewChild("echo:echoResponse").NewChild("echo:hear")=msg; } else { // Then it must be 'echo' operation requested Arc::XMLNode echo_op = (*inpayload)["echo"]; if(!echo_op) { return make_fault(outmsg,"Request is not supported - "+echo_op.Name()); }; std::string say = echo_op["say"]; std::string hear = prefix_+say+suffix_; outpayload = new Arc::PayloadSOAP(ns_); outpayload->NewChild("echo:echoResponse").NewChild("echo:hear")=hear; }; outmsg.Payload(outpayload); { std::string str; outpayload->GetDoc(str, true); logger.msg(Arc::VERBOSE, "process: response=%s",str); }; return Arc::MCC_Status(Arc::STATUS_OK); } bool Service_Echo::RegistrationCollector(Arc::XMLNode &doc) { // RegEntry element generation Arc::XMLNode empty(ns_, "RegEntry"); empty.New(doc); doc.NewChild("SrcAdv").NewChild("Type") = "org.nordugrid.tests.echo"; return true; } } // namespace Echo nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/schema0000644000000000000000000000013213214316026021224 xustar000000000000000030 mtime=1513200662.247774977 30 atime=1513200668.719854133 30 ctime=1513200662.247774977 nordugrid-arc-5.4.2/src/tests/echo/schema/0000755000175000002070000000000013214316026021347 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/tests/echo/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321023337 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200605.862085358 30 ctime=1513200662.245774952 nordugrid-arc-5.4.2/src/tests/echo/schema/Makefile.am0000644000175000002070000000013511255700321023400 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = echo.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/tests/echo/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315735023353 xustar000000000000000030 mtime=1513200605.895085761 29 atime=1513200652.36365409 30 ctime=1513200662.246774965 nordugrid-arc-5.4.2/src/tests/echo/schema/Makefile.in0000644000175000002070000004347613214315735023440 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/tests/echo/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = echo.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/tests/echo/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/tests/echo/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/tests/echo/schema/PaxHeaders.7502/echo.xsd0000644000000000000000000000012411255700321022736 xustar000000000000000027 mtime=1253540049.444682 27 atime=1513200575.428713 30 ctime=1513200662.247774977 nordugrid-arc-5.4.2/src/tests/echo/schema/echo.xsd0000644000175000002070000000130611255700321023003 0ustar00mockbuildmock00000000000000 Prefix of the response string. Suffix of the response string. nordugrid-arc-5.4.2/src/tests/echo/PaxHeaders.7502/README0000644000000000000000000000012410670337274020735 xustar000000000000000027 mtime=1189199548.200925 27 atime=1513200575.426713 30 ctime=1513200662.211774537 nordugrid-arc-5.4.2/src/tests/echo/README0000644000175000002070000000023310670337274021000 0ustar00mockbuildmock00000000000000Update certs: openssl genrsa 1024 > key.pem openssl req -config ./openssl.cnf -new -x509 -nodes -sha1 -days 365 -key key.pem > cert.pem cp cert.pem ca.pem nordugrid-arc-5.4.2/src/tests/PaxHeaders.7502/xpath0000644000000000000000000000013213214316026020172 xustar000000000000000030 mtime=1513200662.265775197 30 atime=1513200668.719854133 30 ctime=1513200662.265775197 nordugrid-arc-5.4.2/src/tests/xpath/0000755000175000002070000000000013214316026020315 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/tests/xpath/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022312 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200606.134088685 30 ctime=1513200662.263775173 nordugrid-arc-5.4.2/src/tests/xpath/Makefile.am0000644000175000002070000000036512052416515022360 0ustar00mockbuildmock00000000000000noinst_PROGRAMS = query query_SOURCES = query.cpp query_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) query_LDADD = $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/tests/xpath/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315736022323 xustar000000000000000030 mtime=1513200606.179089235 30 atime=1513200652.425654849 30 ctime=1513200662.264775185 nordugrid-arc-5.4.2/src/tests/xpath/Makefile.in0000644000175000002070000005441213214315736022377 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = query$(EXEEXT) subdir = src/tests/xpath DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) am_query_OBJECTS = query-query.$(OBJEXT) query_OBJECTS = $(am_query_OBJECTS) am__DEPENDENCIES_1 = query_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) query_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(query_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(query_SOURCES) DIST_SOURCES = $(query_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ query_SOURCES = query.cpp query_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) query_LDADD = $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/tests/xpath/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/tests/xpath/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list query$(EXEEXT): $(query_OBJECTS) $(query_DEPENDENCIES) @rm -f query$(EXEEXT) $(query_LINK) $(query_OBJECTS) $(query_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/query-query.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< query-query.o: query.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(query_CXXFLAGS) $(CXXFLAGS) -MT query-query.o -MD -MP -MF $(DEPDIR)/query-query.Tpo -c -o query-query.o `test -f 'query.cpp' || echo '$(srcdir)/'`query.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/query-query.Tpo $(DEPDIR)/query-query.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='query.cpp' object='query-query.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(query_CXXFLAGS) $(CXXFLAGS) -c -o query-query.o `test -f 'query.cpp' || echo '$(srcdir)/'`query.cpp query-query.obj: query.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(query_CXXFLAGS) $(CXXFLAGS) -MT query-query.obj -MD -MP -MF $(DEPDIR)/query-query.Tpo -c -o query-query.obj `if test -f 'query.cpp'; then $(CYGPATH_W) 'query.cpp'; else $(CYGPATH_W) '$(srcdir)/query.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/query-query.Tpo $(DEPDIR)/query-query.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='query.cpp' object='query-query.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(query_CXXFLAGS) $(CXXFLAGS) -c -o query-query.obj `if test -f 'query.cpp'; then $(CYGPATH_W) 'query.cpp'; else $(CYGPATH_W) '$(srcdir)/query.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstPROGRAMS ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/tests/xpath/PaxHeaders.7502/query.cpp0000644000000000000000000000012411335100256022117 xustar000000000000000027 mtime=1265926318.157741 27 atime=1513200575.432713 30 ctime=1513200662.265775197 nordugrid-arc-5.4.2/src/tests/xpath/query.cpp0000644000175000002070000000231511335100256022165 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include int main(int argc, char **argv) { if(argc < 3) return -1; int filenum = atoi(argv[1]); int attrnum = atoi(argv[2]); for (int i = 0; i < filenum; i++) { // load content of file char fname[200]; snprintf(fname, sizeof(fname), "file%d.xml", i); std::string xml_str = ""; std::string str; std::ifstream f(fname); while (f >> str) { xml_str.append(str); xml_str.append(" "); } f.close(); Arc::XMLNode doc(xml_str); Arc::NS ns; Glib::Rand r; int n = r.get_int_range(0, attrnum); char query[200]; snprintf(query, sizeof(query), "//AttributeName%d", n); std::cout << "Query: " << query << std::endl; std::list result = doc.XPathLookup(query, ns); std::list::iterator it; for (it = result.begin(); it != result.end(); it++) { std::cout << fname << ":" << (*it).Name() << ":" << std::string(*it) << std::endl; } } return 0; } nordugrid-arc-5.4.2/src/tests/PaxHeaders.7502/delegation0000644000000000000000000000013213214316026021161 xustar000000000000000030 mtime=1513200662.303775662 30 atime=1513200668.719854133 30 ctime=1513200662.303775662 nordugrid-arc-5.4.2/src/tests/delegation/0000755000175000002070000000000013214316026021304 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/tests/delegation/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023301 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200605.711083511 30 ctime=1513200662.300775625 nordugrid-arc-5.4.2/src/tests/delegation/Makefile.am0000644000175000002070000000231712052416515023346 0ustar00mockbuildmock00000000000000noinst_PROGRAMS = test_delegation_client test_client_with_delegation_sechandler test_delegation_client_SOURCES = test_delegation_client.cpp test_delegation_client_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_delegation_client_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_client_with_delegation_sechandler_SOURCES = \ test_client_with_delegation_sechandler.cpp test_client_with_delegation_sechandler_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_client_with_delegation_sechandler_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/tests/delegation/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315735023310 xustar000000000000000029 mtime=1513200605.76008411 30 atime=1513200652.393654457 30 ctime=1513200662.301775637 nordugrid-arc-5.4.2/src/tests/delegation/Makefile.in0000644000175000002070000007242513214315735023371 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test_delegation_client$(EXEEXT) \ test_client_with_delegation_sechandler$(EXEEXT) subdir = src/tests/delegation DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) am_test_client_with_delegation_sechandler_OBJECTS = test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.$(OBJEXT) test_client_with_delegation_sechandler_OBJECTS = \ $(am_test_client_with_delegation_sechandler_OBJECTS) am__DEPENDENCIES_1 = test_client_with_delegation_sechandler_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_client_with_delegation_sechandler_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_client_with_delegation_sechandler_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_test_delegation_client_OBJECTS = \ test_delegation_client-test_delegation_client.$(OBJEXT) test_delegation_client_OBJECTS = $(am_test_delegation_client_OBJECTS) test_delegation_client_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_delegation_client_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_delegation_client_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(test_client_with_delegation_sechandler_SOURCES) \ $(test_delegation_client_SOURCES) DIST_SOURCES = $(test_client_with_delegation_sechandler_SOURCES) \ $(test_delegation_client_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ test_delegation_client_SOURCES = test_delegation_client.cpp test_delegation_client_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_delegation_client_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) test_client_with_delegation_sechandler_SOURCES = \ test_client_with_delegation_sechandler.cpp test_client_with_delegation_sechandler_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_client_with_delegation_sechandler_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/tests/delegation/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/tests/delegation/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test_client_with_delegation_sechandler$(EXEEXT): $(test_client_with_delegation_sechandler_OBJECTS) $(test_client_with_delegation_sechandler_DEPENDENCIES) @rm -f test_client_with_delegation_sechandler$(EXEEXT) $(test_client_with_delegation_sechandler_LINK) $(test_client_with_delegation_sechandler_OBJECTS) $(test_client_with_delegation_sechandler_LDADD) $(LIBS) test_delegation_client$(EXEEXT): $(test_delegation_client_OBJECTS) $(test_delegation_client_DEPENDENCIES) @rm -f test_delegation_client$(EXEEXT) $(test_delegation_client_LINK) $(test_delegation_client_OBJECTS) $(test_delegation_client_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_delegation_client-test_delegation_client.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.o: test_client_with_delegation_sechandler.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_client_with_delegation_sechandler_CXXFLAGS) $(CXXFLAGS) -MT test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.o -MD -MP -MF $(DEPDIR)/test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.Tpo -c -o test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.o `test -f 'test_client_with_delegation_sechandler.cpp' || echo '$(srcdir)/'`test_client_with_delegation_sechandler.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.Tpo $(DEPDIR)/test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_client_with_delegation_sechandler.cpp' object='test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_client_with_delegation_sechandler_CXXFLAGS) $(CXXFLAGS) -c -o test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.o `test -f 'test_client_with_delegation_sechandler.cpp' || echo '$(srcdir)/'`test_client_with_delegation_sechandler.cpp test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.obj: test_client_with_delegation_sechandler.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_client_with_delegation_sechandler_CXXFLAGS) $(CXXFLAGS) -MT test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.obj -MD -MP -MF $(DEPDIR)/test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.Tpo -c -o test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.obj `if test -f 'test_client_with_delegation_sechandler.cpp'; then $(CYGPATH_W) 'test_client_with_delegation_sechandler.cpp'; else $(CYGPATH_W) '$(srcdir)/test_client_with_delegation_sechandler.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.Tpo $(DEPDIR)/test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_client_with_delegation_sechandler.cpp' object='test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_client_with_delegation_sechandler_CXXFLAGS) $(CXXFLAGS) -c -o test_client_with_delegation_sechandler-test_client_with_delegation_sechandler.obj `if test -f 'test_client_with_delegation_sechandler.cpp'; then $(CYGPATH_W) 'test_client_with_delegation_sechandler.cpp'; else $(CYGPATH_W) '$(srcdir)/test_client_with_delegation_sechandler.cpp'; fi` test_delegation_client-test_delegation_client.o: test_delegation_client.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_delegation_client_CXXFLAGS) $(CXXFLAGS) -MT test_delegation_client-test_delegation_client.o -MD -MP -MF $(DEPDIR)/test_delegation_client-test_delegation_client.Tpo -c -o test_delegation_client-test_delegation_client.o `test -f 'test_delegation_client.cpp' || echo '$(srcdir)/'`test_delegation_client.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_delegation_client-test_delegation_client.Tpo $(DEPDIR)/test_delegation_client-test_delegation_client.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_delegation_client.cpp' object='test_delegation_client-test_delegation_client.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_delegation_client_CXXFLAGS) $(CXXFLAGS) -c -o test_delegation_client-test_delegation_client.o `test -f 'test_delegation_client.cpp' || echo '$(srcdir)/'`test_delegation_client.cpp test_delegation_client-test_delegation_client.obj: test_delegation_client.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_delegation_client_CXXFLAGS) $(CXXFLAGS) -MT test_delegation_client-test_delegation_client.obj -MD -MP -MF $(DEPDIR)/test_delegation_client-test_delegation_client.Tpo -c -o test_delegation_client-test_delegation_client.obj `if test -f 'test_delegation_client.cpp'; then $(CYGPATH_W) 'test_delegation_client.cpp'; else $(CYGPATH_W) '$(srcdir)/test_delegation_client.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_delegation_client-test_delegation_client.Tpo $(DEPDIR)/test_delegation_client-test_delegation_client.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_delegation_client.cpp' object='test_delegation_client-test_delegation_client.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_delegation_client_CXXFLAGS) $(CXXFLAGS) -c -o test_delegation_client-test_delegation_client.obj `if test -f 'test_delegation_client.cpp'; then $(CYGPATH_W) 'test_delegation_client.cpp'; else $(CYGPATH_W) '$(srcdir)/test_delegation_client.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstPROGRAMS ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/tests/delegation/PaxHeaders.7502/test_delegation_client.cpp0000644000000000000000000000012412042216423026451 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200575.395712 30 ctime=1513200662.303775662 nordugrid-arc-5.4.2/src/tests/delegation/test_delegation_client.cpp0000644000175000002070000000733012042216423026521 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include //The following is for showing how to use the specific client API // (ClientX509Delegation) to delegate a proxy to ARC delegation //service, and gLite (gridsite) delegation service individually. int main(void) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); /******** Test to ARC delegation service **********/ //Note the endpoint here should be changed //into the actual endpoint of delegation service. // std::string arc_deleg_url_str("https://127.0.0.1:60000/delegation"); std::string arc_deleg_url_str("https://glueball.uio.no:60000/delegation"); Arc::URL arc_deleg_url(arc_deleg_url_str); Arc::MCCConfig arc_deleg_mcc_cfg; arc_deleg_mcc_cfg.AddPrivateKey("../echo/testkey-nopass.pem"); arc_deleg_mcc_cfg.AddCertificate("../echo/testcert.pem"); arc_deleg_mcc_cfg.AddCAFile("../echo/testcacert.pem"); arc_deleg_mcc_cfg.AddCADir("../echo/certificates"); //Create a delegation SOAP client logger.msg(Arc::INFO, "Creating a delegation soap client"); Arc::ClientX509Delegation *arc_deleg_client = NULL; arc_deleg_client = new Arc::ClientX509Delegation(arc_deleg_mcc_cfg, arc_deleg_url); std::string arc_delegation_id; if(arc_deleg_client) { if(!(arc_deleg_client->createDelegation(Arc::DELEG_ARC, arc_delegation_id))) { logger.msg(Arc::ERROR, "Delegation to ARC delegation service failed"); if(arc_deleg_client) delete arc_deleg_client; return 1; } } logger.msg(Arc::INFO, "Delegation ID: %s", arc_delegation_id.c_str()); if(arc_deleg_client) delete arc_deleg_client; /******** Test to gridsite delegation service **********/ std::string gs_deleg_url_str("https://cream.grid.upjs.sk:8443/ce-cream/services/gridsite-delegation"); Arc::URL gs_deleg_url(gs_deleg_url_str); Arc::MCCConfig gs_deleg_mcc_cfg; //Somehow gridsite delegation service only accepts proxy certificate, //not the EEC certificate, so we need to generate the proxy certificate //firstly. //Note the proxy needs to be generated before running this test. //And the proxy should be created by using a credential signed by officially //certified CAs, if the peer gridside delegation service only trusts //official CAs, not testing CA such as the InstantCA. It also applies to //the delegation to ARC delegation service. Arc::User user; std::string proxy_path = Glib::build_filename(Glib::get_tmp_dir(),"x509up_u" + Arc::tostring(user.get_uid())); gs_deleg_mcc_cfg.AddProxy(proxy_path); gs_deleg_mcc_cfg.AddCADir("../echo/certificates"); //Create a delegation SOAP client logger.msg(Arc::INFO, "Creating a delegation soap client"); Arc::ClientX509Delegation *gs_deleg_client = NULL; gs_deleg_client = new Arc::ClientX509Delegation(gs_deleg_mcc_cfg, gs_deleg_url); std::string gs_delegation_id; gs_delegation_id = Arc::UUID(); if(gs_deleg_client) { if(!(gs_deleg_client->createDelegation(Arc::DELEG_GRIDSITE, gs_delegation_id))) { logger.msg(Arc::ERROR, "Delegation to gridsite delegation service failed"); if(gs_deleg_client) delete gs_deleg_client; return 1; } } logger.msg(Arc::INFO, "Delegation ID: %s", gs_delegation_id.c_str()); if(gs_deleg_client) delete gs_deleg_client; return 0; } nordugrid-arc-5.4.2/src/tests/delegation/PaxHeaders.7502/test_client_with_delegation_sechandler.cpp0000644000000000000000000000012312042216423031673 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200575.397712 29 ctime=1513200662.30277565 nordugrid-arc-5.4.2/src/tests/delegation/test_client_with_delegation_sechandler.cpp0000644000175000002070000001037612042216423031750 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include static Arc::Logger& logger = Arc::Logger::rootLogger; int main(void) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); // Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); //This is an example that shows how the client or the client called //by a service, delegates a proxy to a delegation service. //Note the "DelegationServiceEndpoint" should be changed according //the actual delegation endpoint. Arc::XMLNode sechanlder_nd("\ \ x509\ delegator\ \ https://glueball.uio.no:60000/delegation\ https://127.0.0.1:60000/echo\ ../echo/userkey-nopass.pem\ ../echo/usercert.pem\ \ \ ../echo/testcacert.pem\ ../echo/certificates\ "); /*For the firstly client in the service invocation chain, the credential path should be configured for the 'delegator' role delegation handler. ../echo/testkey-nopass.pem\ ../echo/testcert.pem\ \ Alternatively, For the clients which are called in the intermediate service inside the service invocation chain, the the 'Identity' should be configured for the 'delegator' role delegation handler. The 'Identity' can be parsed from the 'incoming' message context of the service itself by service implementation: std::string identity= msg->Attributes()->get("TLS:IDENTITYDN"); Afterwards, the service implementation should change the client (the client that this service will call to contact the next intemediate service) configuration to add 'DelegationCredIdentity' like the following. /O=KnowARC/OU=UiO/CN=squark.uio.no\ Filling "DelegationCredIdentity" element is the only code that is needed for the ARC services that need to utilize the delegation functionality (more specifically, to launch a more level of delegation). */ std::string url_str("https://127.0.0.1:60000/echo"); Arc::URL url(url_str); Arc::MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey("../echo/userkey-nopass.pem"); mcc_cfg.AddCertificate("../echo/usercert.pem"); mcc_cfg.AddCADir("../echo/certificates"); mcc_cfg.AddCAFile("../echo/testcacert.pem"); //Create a SOAP client logger.msg(Arc::INFO, "Creating a soap client"); Arc::ClientSOAP *client; client = new Arc::ClientSOAP(mcc_cfg,url,60); client->AddSecHandler(sechanlder_nd, "arcshc"); //Create and send echo request logger.msg(Arc::INFO, "Creating and sending request"); Arc::NS echo_ns; echo_ns["echo"]="http://www.nordugrid.org/schemas/echo"; Arc::PayloadSOAP req(echo_ns); req.NewChild("echo").NewChild("say")="HELLO"; Arc::PayloadSOAP* resp = NULL; std::string str; req.GetXML(str); std::cout<<"request: "<process(&req,&resp); if(!status) { logger.msg(Arc::ERROR, "SOAP invokation failed"); } if(resp == NULL) { logger.msg(Arc::ERROR,"There was no SOAP response"); } std::string xml; resp->GetXML(xml); std::cout << "XML: "<< xml << std::endl; std::cout << "Response: " << (std::string)((*resp)["echoResponse"]["hear"]) << std::endl; if(resp) delete resp; if(client) delete client; return 0; } nordugrid-arc-5.4.2/src/tests/delegation/PaxHeaders.7502/README0000644000000000000000000000012311270012023022104 xustar000000000000000026 mtime=1256199187.16649 27 atime=1513200575.397712 30 ctime=1513200662.299775613 nordugrid-arc-5.4.2/src/tests/delegation/README0000644000175000002070000000434011270012023022153 0ustar00mockbuildmock00000000000000Some explaination about the source files and xml template under this directory. /*************/ test_client_with_delegation_sechandler.cpp This file shows how to add a security handler into client configuration so that a proxy can be delegated onto a delegation service (ARC delegation service). The delegation process is completed together with the first soap operation. Once the delegation has been successful, the successive soap operation will not need to do delegation. /*************/ test_delegation_client.cpp This file shows how to call a specific client interface (ClientX509Delegation) for delegating a proxy onto delegation service (ARC delegation service or gridsite delegation service). In case of delegating a proxy onto gridsite service, since the CREAM service is designed to be physically hosted together with the gridsite delegation service, and CREAM service (maybe) shares the same respository of proxies with gridsite delegation service; and CREAM service particularly has an interface to consume the DelegationID from client, therefore the ClientX509Delegation class can be explicitly called to interact with CREAM service and gridsite delegation service. In case of delegating a proxy onto ARC delegation service, the ARC delegation service is not designed as a service that is by default known by (and hosted together with) the ARC services; also ARC services themselves have no interface to consume the DelegationID from client, the ClientX509Delegation should not be explicitly called to interact with ARC services and ARC delegation service. Instead, it is called by the delegation security handler ("delegator" and "delegatee" roles) to delegate (by "delegator") a proxy onto an ARC delegation service, and then acquire (by "delegatee") a proxy from the ARC delegation service. Since everything is done by the security handler, nothing (almost) is needed to change for the client/service code. So the delegation process generally applies to ARC services. /*************/ delegation_service_template.xml The example service configuration for delegation service /*************/ service_with_delegationsechandler_template.xml The example service configuration for an ARC service that needs to utilize the delegation functionality. nordugrid-arc-5.4.2/src/tests/PaxHeaders.7502/README0000644000000000000000000000012411037472457020020 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200575.408712 30 ctime=1513200662.187774243 nordugrid-arc-5.4.2/src/tests/README0000644000175000002070000000002311037472457020060 0ustar00mockbuildmock00000000000000Testing playground nordugrid-arc-5.4.2/src/PaxHeaders.7502/hed0000644000000000000000000000013213214316022016440 xustar000000000000000030 mtime=1513200658.754732256 30 atime=1513200668.719854133 30 ctime=1513200658.754732256 nordugrid-arc-5.4.2/src/hed/0000755000175000002070000000000013214316022016563 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712047045306020564 xustar000000000000000027 mtime=1352420038.089673 30 atime=1513200593.679936365 30 ctime=1513200658.749732194 nordugrid-arc-5.4.2/src/hed/Makefile.am0000644000175000002070000000147412047045306020634 0ustar00mockbuildmock00000000000000# order is important! SUBDIRS = libs acc mcc dmc shc daemon identitymap DIST_SUBDIRS = libs acc mcc dmc shc daemon identitymap profiledir = $(pkgdatadir)/profiles profile_DATA = profiles/general/general.xml profileexampledir = $(pkgdatadir)/examples/config profileexample_DATA = profiles/*/*.xml install-profileexampleDATA: $(profile_DATA) for i in profiles/*; do \ profile=`basename $$i` ; \ mkdir -p "$(DESTDIR)$(profileexampledir)/$$profile" ; \ ( cd profiles/$$profile ; for i in *.xml ; do \ test -f $$i && cp -p $$i "$(DESTDIR)$(profileexampledir)/$$profile/" || : ; \ done ) ; \ done uninstall-profileexampleDATA: for p in $(profile_DATA); do \ p=`echo $$p | sed 's|^\(.*/\)\?profiles/||'`; \ rm -f "$(DESTDIR)$(profileexampledir)/$$p"; \ done nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/identitymap0000644000000000000000000000013213214316026020773 xustar000000000000000030 mtime=1513200662.019772188 30 atime=1513200668.719854133 30 ctime=1513200662.019772188 nordugrid-arc-5.4.2/src/hed/identitymap/0000755000175000002070000000000013214316026021116 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023113 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200595.882963309 30 ctime=1513200662.007772042 nordugrid-arc-5.4.2/src/hed/identitymap/Makefile.am0000644000175000002070000000406012052416515023155 0ustar00mockbuildmock00000000000000SUBDIRS = schema if ARGUS_ENABLED pkglib_LTLIBRARIES = libidentitymap.la libarguspdpclient.la libarguspepclient.la else pkglib_LTLIBRARIES = libidentitymap.la libarguspdpclient.la endif libidentitymap_la_SOURCES = IdentityMap.cpp SimpleMap.cpp \ IdentityMap.h SimpleMap.h libidentitymap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libidentitymap_la_LIBADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libidentitymap_la_LDFLAGS = -no-undefined -avoid-version -module if ARGUS_ENABLED libarguspepclient_la_SOURCES = ArgusPEPClient.cpp ArgusPEPClient.h libarguspepclient_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(ARGUS_CFLAGS) $(AM_CXXFLAGS) libarguspepclient_la_LIBADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(ARGUS_LIBS) libarguspepclient_la_LDFLAGS = -no-undefined -avoid-version -module endif libarguspdpclient_la_SOURCES = ArgusPDPClient.cpp ArgusPDPClient.h ArgusXACMLConstant.h libarguspdpclient_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarguspdpclient_la_LIBADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libarguspdpclient_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/ArgusXACMLConstant.h0000644000000000000000000000012312033271336024603 xustar000000000000000027 mtime=1349350110.187364 27 atime=1513200574.442701 29 ctime=1513200662.01177209 nordugrid-arc-5.4.2/src/hed/identitymap/ArgusXACMLConstant.h0000644000175000002070000004616512033271336024665 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARGUSPEP_CONSTANT_H__ #define __ARC_SEC_ARGUSPEP_CONSTANT_H__ namespace ArcSec { /* * XACML Data-types identifiers (XACML 2.0, Appendix B.3) */ static const char XACML_DATATYPE_X500NAME[]= "urn:oasis:names:tc:xacml:1.0:data-type:x500Name"; /**< XACML data-type @c x500Name identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_RFC822NAME[]= "urn:oasis:names:tc:xacml:1.0:data-type:rfc822Name"; /**< XACML data-type @c rfc822Name identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_IPADDRESS[]= "urn:oasis:names:tc:xacml:1.0:data-type:ipAddress"; /**< XACML data-type @c ipAddress identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_DNSNAME[]= "urn:oasis:names:tc:xacml:1.0:data-type:dnsName"; /**< XACML data-type @c dnsName identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_STRING[]= "http://www.w3.org/2001/XMLSchema#string"; /**< XACML data-type @c string identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_BOOLEAN[]= "http://www.w3.org/2001/XMLSchema#boolean"; /**< XACML data-type @c boolean identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_INTEGER[]= "http://www.w3.org/2001/XMLSchema#integer"; /**< XACML data-type @c integer identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_DOUBLE[]= "http://www.w3.org/2001/XMLSchema#double"; /**< XACML data-type @c double identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_TIME[]= "http://www.w3.org/2001/XMLSchema#time"; /**< XACML data-type @c time identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_DATE[]= "http://www.w3.org/2001/XMLSchema#date"; /**< XACML data-type @c date identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_DATETIME[]= "http://www.w3.org/2001/XMLSchema#dateTime"; /**< XACML data-type @c dateTime identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_ANYURI[]= "http://www.w3.org/2001/XMLSchema#anyURI"; /**< XACML data-type @c anyURI identifier (XACML 2.0, B.3) */ static const char XACML_DATATYPE_HEXBINARY[]= "http://www.w3.org/2001/XMLSchema#hexBinary"; /**< XACML data-type @c hexBinary identifier (XACML 2.0, B.3) */ /* * PEP XACML Subject/Attribute identifiers and Subject/\@SubjectCategory values (XACML 2.0, Appendix B) */ static const char XACML_SUBJECT_ID[]= "urn:oasis:names:tc:xacml:1.0:subject:subject-id"; /**< XACML Subject/Attribute @c subject-id identifier (XACML 2.0, B.4) */ static const char XACML_SUBJECT_ID_QUALIFIER[]= "urn:oasis:names:tc:xacml:1.0:subject:subject-id-qualifier"; /**< XACML Subject/Attribute @c subject-id-qualifier identifier (XACML 2.0, B.4) */ static const char XACML_SUBJECT_KEY_INFO[]= "urn:oasis:names:tc:xacml:1.0:subject:key-info"; /**< XACML Subject/Attribute @c key-info identifier (XACML 2.0, B.4) */ static const char XACML_SUBJECT_CATEGORY_ACCESS[]= "urn:oasis:names:tc:xacml:1.0:subject-category:access-subject"; /**< XACML Subject/\@SubjectCategory attribute @b access-subject value (XACML 2.0, B.2) */ static const char XACML_SUBJECT_CATEGORY_INTERMEDIARY[]= "urn:oasis:names:tc:xacml:1.0:subject-category:intermediary-subject"; /**< XACML Subject/\@SubjectCategory attribute @b intermediary-subject value (XACML 2.0, B.2) */ static const char XACML_SUBJECT_CATEGORY_RECIPIENT[]= "urn:oasis:names:tc:xacml:1.0:subject-category:recipient-subject"; /**< XACML Subject/\@SubjectCategory attribute @b recipient-subject value (XACML 2.0, B.2) */ static const char XACML_SUBJECT_CATEGORY_CODEBASE[]= "urn:oasis:names:tc:xacml:1.0:subject-category:codebase"; /**< XACML Subject/\@SubjectCategory attribute @b codebase value (XACML 2.0, B.2) */ static const char XACML_SUBJECT_CATEGORY_REQUESTING_MACHINE[]= "urn:oasis:names:tc:xacml:1.0:subject-category:requesting-machine"; /**< XACML Subject/\@SubjectCategory attribute @b requesting-machine value (XACML 2.0, B.2) */ /* * XACML Resource/Attribute Identifiers (XACML 2.0, Appendix B) */ static const char XACML_RESOURCE_ID[]= "urn:oasis:names:tc:xacml:1.0:resource:resource-id"; /**< XACML Resource/Attribute @b resource-id identifier (XACML 2.0, B.6) */ /* * XACML Action/Attribute Identifiers (XACML 2.0, Appendix B) */ static const char XACML_ACTION_ID[]= "urn:oasis:names:tc:xacml:1.0:action:action-id"; /**< XACML Action/Attribute @b action-id identifier (XACML 2.0, B.7) */ /* * PEP XACML Environment/Attribute identifiers (XACML 2.0, Appendix B) */ static const char XACML_ENVIRONMENT_CURRENT_TIME[]= "urn:oasis:names:tc:xacml:1.0:environment:current-time"; /**< XACML Environment/Attribute @c current-time identifier (XACML 2.0, B.8) */ static const char XACML_ENVIRONMENT_CURRENT_DATE[]= "urn:oasis:names:tc:xacml:1.0:environment:current-date"; /**< XACML Environment/Attribute @c current-date identifier (XACML 2.0, B.8) */ static const char XACML_ENVIRONMENT_CURRENT_DATETIME[]= "urn:oasis:names:tc:xacml:1.0:environment:current-dateTime"; /**< XACML Environment/Attribute @c current-dateTime identifier (XACML 2.0, B.8) */ /* * PEP XACML StatusCode/\@Value values (XACML 2.0, B.9) */ static const char XACML_STATUSCODE_OK[]= "urn:oasis:names:tc:xacml:1.0:status:ok"; /**< XACML StatusCode/\@Value attribute @c ok value (XACML 2.0, B.9) */ static const char XACML_STATUSCODE_MISSINGATTRIBUTE[]= "urn:oasis:names:tc:xacml:1.0:status:missing-attribute"; /**< XACML StatusCode/\@Value attribute @c missing-attribute value (XACML 2.0, B.9) */ static const char XACML_STATUSCODE_SYNTAXERROR[]= "urn:oasis:names:tc:xacml:1.0:status:syntax-error"; /**< XACML StatusCode/\@Value attribute @c syntax-error value (XACML 2.0, B.9) */ static const char XACML_STATUSCODE_PROCESSINGERROR[]= "urn:oasis:names:tc:xacml:1.0:status:processing-error"; /**< XACML StatusCode/\@Value attribute @c processing-error value (XACML 2.0, B.9) */ /** @defgroup CommonAuthZ Common XACML Authorization Profile * @ingroup Profiles * * EMI Common XACML Authorization Profile (Version 1.1) * @version 1.1 * * Document: http://dci-sec.org/xacml/profile/common-authz/1.1 * * Profile version, XACML attribute identifiers and obligation identifiers for the Common XACML * Authorization Profile. * @{ */ /* * Common XACML Authorization Profile version */ static const char XACML_COMMONAUTHZ_PROFILE_1_1[]= "http://dci-sec.org/xacml/profile/common-authz/1.1"; /**< Common XACML Authorization Profile version 1.1 value. See attribute #XACML_DCISEC_ATTRIBUTE_PROFILE_ID [Common XACML Authorization Profile v.1.1, 4.1.1] */ static const char XACML_DCISEC_ATTRIBUTE_PROFILE_ID[]= "http://dci-sec.org/xacml/attribute/profile-id"; /**< Common XACML Authorization Profile Environment/Attribute @b profile-id identifier. Datatype: anyURI, see #XACML_DATATYPE_ANYURI [Common XACML Authorization Profile v.1.1, 4.1.1] */ static const char XACML_DCISEC_ATTRIBUTE_SUBJECT_ISSUER[]= "http://dci-sec.org/xacml/attribute/subject-issuer"; /**< Common XACML Authorization Profile Subject/Attribute @b subject-issuer identifier. Datatype: x500Name, see #XACML_DATATYPE_X500NAME [Common XACML Authorization Profile v.1.1, 4.2.3] */ static const char XACML_DCISEC_ATTRIBUTE_VIRTUAL_ORGANIZATION[]= "http://dci-sec.org/xacml/attribute/virtual-organization"; /**< Common XACML Authorization Profile Subject/Attribute @b virtual-organization (VO) identifier. Datatype: string, see #XACML_DATATYPE_STRING [Common XACML Authorization Profile v.1.1, 4.2.4] */ static const char XACML_DCISEC_ATTRIBUTE_GROUP[]= "http://dci-sec.org/xacml/attribute/group"; /**< Common XACML Authorization Profile Subject/Attribute @b group (VO) identifier. Datatype: string, see #XACML_DATATYPE_STRING [Common XACML Authorization Profile v.1.1, 4.2.6] */ static const char XACML_DCISEC_ATTRIBUTE_GROUP_PRIMARY[]= "http://dci-sec.org/xacml/attribute/group/primary"; /**< Common XACML Authorization Profile Subject/Attribute @b group/primary (VO) identifier. Datatype: string, see #XACML_DATATYPE_STRING [Common XACML Authorization Profile v.1.1, 4.2.5] */ static const char XACML_DCISEC_ATTRIBUTE_ROLE[]= "http://dci-sec.org/xacml/attribute/role"; /**< Common XACML Authorization Profile Subject/Attribute @b role (VO) identifier. Datatype: string, see #XACML_DATATYPE_STRING [Common XACML Authorization Profile v.1.1, 4.2.8] */ static const char XACML_DCISEC_ATTRIBUTE_ROLE_PRIMARY[]= "http://dci-sec.org/xacml/attribute/role/primary"; /**< Common XACML Authorization Profile Subject/Attribute @b role/primary (VO) identifier. Datatype: string, see #XACML_DATATYPE_STRING [Common XACML Authorization Profile v.1.1, 4.2.7] */ static const char XACML_DCISEC_ATTRIBUTE_RESOURCE_OWNER[]= "http://dci-sec.org/xacml/attribute/resource-owner"; /**< Common XACML Authorization Profile Resource/Attribute @b resource-owner identifier. Datatype: x500Name, see #XACML_DATATYPE_X500NAME [Common XACML Authorization Profile v.1.1, 4.3.2] */ static const char XACML_DCISEC_ACTION_NAMESPACE[]= "http://dci-sec.org/xacml/action"; /**< Namespace for the Common XACML Authorization Profile Action values. See attribute #XACML_ACTION_ID [Common XACML Authorization Profile v.1.1, 4.4.1] */ static const char XACML_DCISEC_ACTION_ANY[]= "http://dci-sec.org/xacml/action/ANY"; /**< Common XACML Authorization Profile Action @b ANY value. See attribute #XACML_ACTION_ID [Common XACML Authorization Profile v.1.1, 4.4.1] */ static const char XACML_DCISEC_OBLIGATION_MAP_LOCAL_USER[]= "http://dci-sec.org/xacml/obligation/map-local-user"; /**< Common XACML Authorization Profile Obligation @b map-local-user identifier [Common XACML Authorization Profile v.1.1, 5.1.1] */ static const char XACML_DCISEC_OBLIGATION_MAP_POSIX_USER[]= "http://dci-sec.org/xacml/obligation/map-local-user/posix"; /**< Common XACML Authorization Profile Obligation @b map-local-user/posix identifier. See attribute assignments #XACML_DCISEC_ATTRIBUTE_USER_ID, #XACML_DCISEC_ATTRIBUTE_GROUP_ID_PRIMARY and #XACML_DCISEC_ATTRIBUTE_GROUP_ID [Common XACML Authorization Profile v.1.1, 5.1.2] */ static const char XACML_DCISEC_ATTRIBUTE_USER_ID[]= "http://dci-sec.org/xacml/attribute/user-id"; /**< Common XACML Authorization Profile Obligation/AttributeAssignment @b user-id (username) identifier, see obligation #XACML_DCISEC_OBLIGATION_MAP_POSIX_USER. Datatype: string, see #XACML_DATATYPE_STRING [Common XACML Authorization Profile v.1.1, 5.2.1] */ static const char XACML_DCISEC_ATTRIBUTE_GROUP_ID[]= "http://dci-sec.org/xacml/attribute/group-id"; /**< Common XACML Authorization Profile Obligation/AttributeAssignment @b group-id (user group name) identifier, see obligation #XACML_DCISEC_OBLIGATION_MAP_POSIX_USER. Datatype: string, see #XACML_DATATYPE_STRING [Common XACML Authorization Profile v.1.1, 5.2.3] */ static const char XACML_DCISEC_ATTRIBUTE_GROUP_ID_PRIMARY[]= "http://dci-sec.org/xacml/attribute/group-id/primary"; /**< Common XACML Authorization Profile Obligation/AttributeAssignment @b group-id/primary (primary group name) identifier, see obligation #XACML_DCISEC_OBLIGATION_MAP_POSIX_USER. Datatype: string, see #XACML_DATATYPE_STRING [Common XACML Authorization Profile v.1.1, 5.2.2] */ /** @} */ /** @defgroup GridWNAuthZ Grid Worker Node Authorization Profile * @ingroup Profiles * * XACML Grid Worker Node Authorization Profile (Version 1.0) * @version 1.0 * * Document: https://edms.cern.ch/document/1058175/1.0 * * Profile version, XACML Attribute identifiers, XACML Obligation identifiers, and datatypes for the Grid WN AuthZ Profile. * @{ */ /* * XACML Grid WN AuthZ Profile version */ static const char XACML_GRIDWN_PROFILE_VERSION[]= "http://glite.org/xacml/profile/grid-wn/1.0"; /**< XACML Grid WN AuthZ Profile version value [XACML Grid WN AuthZ 1.0, 3.1.1] */ /* * XACML Grid WN AuthZ Attribute identifiers */ static const char XACML_GLITE_ATTRIBUTE_PROFILE_ID[]= "http://glite.org/xacml/attribute/profile-id"; /**< XACML Grid WN AuthZ Environment/Attribute @b profile-id identifier. Datatype: anyURI, see #XACML_DATATYPE_ANYURI [XACML Grid WN AuthZ 1.0, 3.1.1] */ static const char XACML_GLITE_ATTRIBUTE_SUBJECT_ISSUER[]= "http://glite.org/xacml/attribute/subject-issuer"; /**< XACML Grid WN AuthZ Subject/Attribute @b subject-issuer identifier. Datatype: x500Name, see #XACML_DATATYPE_X500NAME [XACML Grid WN AuthZ 1.0, 3.2.2 and 4.2] */ static const char XACML_GLITE_ATTRIBUTE_VOMS_ISSUER[]= "http://glite.org/xacml/attribute/voms-issuer"; /**< XACML Grid WN AuthZ Subject/Attribute @b voms-issuer identifier [XACML Grid WN AuthZ 1.0, 4.6.2] */ static const char XACML_GLITE_ATTRIBUTE_VIRTUAL_ORGANIZATION[]= "http://glite.org/xacml/attribute/virtual-organization"; /**< XACML Grid WN AuthZ Subject/Attribute @b virutal-organization identifier. Datatype: string, see #XACML_DATATYPE_STRING [XACML Grid WN AuthZ 1.0, 3.2.3 and 4.3] */ static const char XACML_GLITE_ATTRIBUTE_FQAN[]= "http://glite.org/xacml/attribute/fqan"; /**< XACML Grid WN AuthZ Subject/Attribute @b fqan identifier. Datatype: FQAN, see #XACML_GLITE_DATATYPE_FQAN [XACML Grid WN AuthZ 1.0, 3.2.4 and 4.4] */ static const char XACML_GLITE_ATTRIBUTE_FQAN_PRIMARY[]= "http://glite.org/xacml/attribute/fqan/primary"; /**< XACML Grid WN AuthZ Subject/Attribute @b fqan/primary identifier. Datatype: FQAN, see #XACML_GLITE_DATATYPE_FQAN [XACML Grid WN AuthZ 1.0, 3.2.5] */ static const char XACML_GLITE_ATTRIBUTE_PILOT_JOB_CLASSIFIER[]= "http://glite.org/xacml/attribute/pilot-job-classifer"; /**< XACML Grid WN AuthZ Action/Attribute @b pilot-job-classifer identifier. Datatype: string, see #XACML_DATATYPE_STRING [XACML Grid WN AuthZ 1.0, 3.4.2] */ static const char XACML_GLITE_ATTRIBUTE_USER_ID[]= "http://glite.org/xacml/attribute/user-id"; /**< XACML Grid WN AuthZ Obligation/AttributeAssignment @b user-id identifier [XACML Grid WN AuthZ 1.0, 3.6.1] */ static const char XACML_GLITE_ATTRIBUTE_GROUP_ID[]= "http://glite.org/xacml/attribute/group-id"; /**< XACML Grid WN AuthZ Obligation/AttributeAssignment @b group-id identifier [XACML Grid WN AuthZ 1.0, 3.6.2] */ static const char XACML_GLITE_ATTRIBUTE_GROUP_ID_PRIMARY[]= "http://glite.org/xacml/attribute/group-id/primary"; /**< XACML Grid WN AuthZ Obligation/AttributeAssignment @b group-id/primary identifier [XACML Grid WN AuthZ 1.0, 3.6.3] */ static const char XACML_GLITE_OBLIGATION_LOCAL_ENVIRONMENT_MAP[]= "http://glite.org/xacml/obligation/local-environment-map"; /**< XACML Grid WN AuthZ Obligation @b local-environment-map identifier [XACML Grid WN AuthZ 1.0, 3.5.1] */ static const char XACML_GLITE_OBLIGATION_LOCAL_ENVIRONMENT_MAP_POSIX[]= "http://glite.org/xacml/obligation/local-environment-map/posix"; /**< XACML Grid WN AuthZ Obligation @b local-environment-map/posix identifier [XACML Grid WN AuthZ 1.0, 3.5.2] */ static const char XACML_GLITE_DATATYPE_FQAN[]= "http://glite.org/xacml/datatype/fqan"; /**< XACML Grid WN AuthZ @b fqan datatype [XACML Grid WN AuthZ 1.0, 3.7.1] */ /** @defgroup AuthzInterop Authorization Interoperability Profile * @ingroup Profiles * * XACML Attribute and Obligation Profile for Authorization Interoperability in Grids (Version 1.1) * @version 1.1 * * Document: https://edms.cern.ch/document/929867/2 * * XACML Subject's Attribute identifiers, XACML Obligation and Obligation's AttributeAssignment * identifiers for the AuthZ Interop Profile * @{ */ /* * XACML Authz Interop Subject/Attribute identifiers (XACML Authz Interop Profile 1.1) */ static const char XACML_AUTHZINTEROP_SUBJECT_X509_ID[]= "http://authz-interop.org/xacml/subject/subject-x509-id"; /**< XACML AuthZ Interop Subject/Attribute @b subject-x509-id identifier (Datatype: string, OpenSSL format) */ static const char XACML_AUTHZINTEROP_SUBJECT_X509_ISSUER[]= "http://authz-interop.org/xacml/subject/subject-x509-issuer"; /**< XACML AuthZ Interop Subject/Attribute @b subject-x509-issuer identifier (Datatype: string, OpenSSL format) */ static const char XACML_AUTHZINTEROP_SUBJECT_VO[]= "http://authz-interop.org/xacml/subject/vo"; /**< XACML AuthZ Interop Subject/Attribute @b vo identifier (Datatype: string) */ static const char XACML_AUTHZINTEROP_SUBJECT_CERTCHAIN[]= "http://authz-interop.org/xacml/subject/cert-chain"; /**< XACML AuthZ Interop Subject/Attribute @b cert-chain identifier (Datatype: base64Binary) */ static const char XACML_AUTHZINTEROP_SUBJECT_VOMS_FQAN[]= "http://authz-interop.org/xacml/subject/voms-fqan"; /**< XACML AuthZ Interop Subject/Attribute @b voms-fqan identifier (Datatype: string) */ static const char XACML_AUTHZINTEROP_SUBJECT_VOMS_PRIMARY_FQAN[]= "http://authz-interop.org/xacml/subject/voms-primary-fqan"; /**< XACML AuthZ Interop Subject/Attribute @b voms-primary-fqan identifier (Datatype: string) */ /* * XACML Authz Interop Obligation and Obligation/AttributeAssignment identifiers (XACML Authz Interop Profile 1.1) */ static const char XACML_AUTHZINTEROP_OBLIGATION_UIDGID[]= "http://authz-interop.org/xacml/obligation/uidgid"; /**< XACML AuthZ Interop Obligation @b uidgid identifier (XACML Authz Interop: UID GID) */ static const char XACML_AUTHZINTEROP_OBLIGATION_SECONDARY_GIDS[]= "http://authz-interop.org/xacml/obligation/secondary-gids"; /**< XACML AuthZ Interop Obligation @b secondary-gids identifier (XACML Authz Interop: Multiple Secondary GIDs) */ static const char XACML_AUTHZINTEROP_OBLIGATION_USERNAME[]= "http://authz-interop.org/xacml/obligation/username"; /**< XACML AuthZ Interop Obligation @b username identifier (XACML Authz Interop: Username) */ static const char XACML_AUTHZINTEROP_OBLIGATION_AFS_TOKEN[]= "http://authz-interop.org/xacml/obligation/afs-token"; /**< XACML AuthZ Interop Obligation @b afs-token identifier (XACML Authz Interop: AFS Token) */ static const char XACML_AUTHZINTEROP_OBLIGATION_ATTR_POSIX_UID[]= "http://authz-interop.org/xacml/attribute/posix-uid"; /**< XACML AuthZ Interop Obligation/AttributeAssignment @b posix-uid identifier (C Datatype: string, must be converted to integer) */ static const char XACML_AUTHZINTEROP_OBLIGATION_ATTR_POSIX_GID[]= "http://authz-interop.org/xacml/attribute/posix-gid"; /**< XACML AuthZ Interop Obligation/AttributeAssignment @b posix-gid identifier (C Datatype: string, must be converted to integer) */ static const char XACML_AUTHZINTEROP_OBLIGATION_ATTR_USERNAME[]= "http://authz-interop.org/xacml/attribute/username"; /**< XACML AuthZ Interop Obligation/AttributeAssignment @b username identifier (Datatype: string) */ static const char XACML_AUTHZINTEROP_OBLIGATION_ATTR_AFS_TOKEN[]= "http://authz-interop.org/xacml/attribute/afs-token"; /**< XACML AuthZ Interop Obligation/AttributeAssignment @b afs-token identifier (Datatype: base64Binary) */ /** @} */ /* * PEP XACML Result/Decision element constants. */ typedef enum xacml_decision { XACML_DECISION_DENY = 0, /**< Decision is @b Deny */ XACML_DECISION_PERMIT, /**< Decision is @b Permit */ XACML_DECISION_INDETERMINATE, /**< Decision is @b Indeterminate, the PEP was unable to evaluate the request */ XACML_DECISION_NOT_APPLICABLE /**< Decision is @b NotApplicable, the PEP does not have any policy that applies to the request */ } xacml_decision_t; /* * PEP XACML Obligation/\@FulfillOn attribute constants. */ typedef enum xacml_fulfillon { XACML_FULFILLON_DENY = 0, /**< Fulfill the Obligation on @b Deny decision */ XACML_FULFILLON_PERMIT /**< Fulfill the Obligation on @b Permit decision */ } xacml_fulfillon_t; } // namespace ArcSec #endif /* __ARC_SEC_ARGUSPEP_CONSTANT_H__ */ nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723023120 xustar000000000000000030 mtime=1513200595.938963994 30 atime=1513200649.613620456 30 ctime=1513200662.008772054 nordugrid-arc-5.4.2/src/hed/identitymap/Makefile.in0000644000175000002070000011500613214315723023171 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/identitymap DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libarguspdpclient_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libarguspdpclient_la_OBJECTS = \ libarguspdpclient_la-ArgusPDPClient.lo libarguspdpclient_la_OBJECTS = $(am_libarguspdpclient_la_OBJECTS) libarguspdpclient_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarguspdpclient_la_CXXFLAGS) $(CXXFLAGS) \ $(libarguspdpclient_la_LDFLAGS) $(LDFLAGS) -o $@ @ARGUS_ENABLED_FALSE@am_libarguspdpclient_la_rpath = -rpath \ @ARGUS_ENABLED_FALSE@ $(pkglibdir) @ARGUS_ENABLED_TRUE@am_libarguspdpclient_la_rpath = -rpath \ @ARGUS_ENABLED_TRUE@ $(pkglibdir) @ARGUS_ENABLED_TRUE@libarguspepclient_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ @ARGUS_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @ARGUS_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ @ARGUS_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @ARGUS_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @ARGUS_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @ARGUS_ENABLED_TRUE@ $(am__DEPENDENCIES_1) am__libarguspepclient_la_SOURCES_DIST = ArgusPEPClient.cpp \ ArgusPEPClient.h @ARGUS_ENABLED_TRUE@am_libarguspepclient_la_OBJECTS = \ @ARGUS_ENABLED_TRUE@ libarguspepclient_la-ArgusPEPClient.lo libarguspepclient_la_OBJECTS = $(am_libarguspepclient_la_OBJECTS) libarguspepclient_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarguspepclient_la_CXXFLAGS) $(CXXFLAGS) \ $(libarguspepclient_la_LDFLAGS) $(LDFLAGS) -o $@ @ARGUS_ENABLED_TRUE@am_libarguspepclient_la_rpath = -rpath \ @ARGUS_ENABLED_TRUE@ $(pkglibdir) libidentitymap_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libidentitymap_la_OBJECTS = libidentitymap_la-IdentityMap.lo \ libidentitymap_la-SimpleMap.lo libidentitymap_la_OBJECTS = $(am_libidentitymap_la_OBJECTS) libidentitymap_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libidentitymap_la_CXXFLAGS) $(CXXFLAGS) \ $(libidentitymap_la_LDFLAGS) $(LDFLAGS) -o $@ @ARGUS_ENABLED_FALSE@am_libidentitymap_la_rpath = -rpath $(pkglibdir) @ARGUS_ENABLED_TRUE@am_libidentitymap_la_rpath = -rpath $(pkglibdir) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarguspdpclient_la_SOURCES) \ $(libarguspepclient_la_SOURCES) $(libidentitymap_la_SOURCES) DIST_SOURCES = $(libarguspdpclient_la_SOURCES) \ $(am__libarguspepclient_la_SOURCES_DIST) \ $(libidentitymap_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema @ARGUS_ENABLED_FALSE@pkglib_LTLIBRARIES = libidentitymap.la libarguspdpclient.la @ARGUS_ENABLED_TRUE@pkglib_LTLIBRARIES = libidentitymap.la libarguspdpclient.la libarguspepclient.la libidentitymap_la_SOURCES = IdentityMap.cpp SimpleMap.cpp \ IdentityMap.h SimpleMap.h libidentitymap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libidentitymap_la_LIBADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libidentitymap_la_LDFLAGS = -no-undefined -avoid-version -module @ARGUS_ENABLED_TRUE@libarguspepclient_la_SOURCES = ArgusPEPClient.cpp ArgusPEPClient.h @ARGUS_ENABLED_TRUE@libarguspepclient_la_CXXFLAGS = -I$(top_srcdir)/include \ @ARGUS_ENABLED_TRUE@ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(ARGUS_CFLAGS) $(AM_CXXFLAGS) @ARGUS_ENABLED_TRUE@libarguspepclient_la_LIBADD = \ @ARGUS_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ @ARGUS_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @ARGUS_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ @ARGUS_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @ARGUS_ENABLED_TRUE@ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(ARGUS_LIBS) @ARGUS_ENABLED_TRUE@libarguspepclient_la_LDFLAGS = -no-undefined -avoid-version -module libarguspdpclient_la_SOURCES = ArgusPDPClient.cpp ArgusPDPClient.h ArgusXACMLConstant.h libarguspdpclient_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarguspdpclient_la_LIBADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libarguspdpclient_la_LDFLAGS = -no-undefined -avoid-version -module all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/identitymap/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/identitymap/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarguspdpclient.la: $(libarguspdpclient_la_OBJECTS) $(libarguspdpclient_la_DEPENDENCIES) $(libarguspdpclient_la_LINK) $(am_libarguspdpclient_la_rpath) $(libarguspdpclient_la_OBJECTS) $(libarguspdpclient_la_LIBADD) $(LIBS) libarguspepclient.la: $(libarguspepclient_la_OBJECTS) $(libarguspepclient_la_DEPENDENCIES) $(libarguspepclient_la_LINK) $(am_libarguspepclient_la_rpath) $(libarguspepclient_la_OBJECTS) $(libarguspepclient_la_LIBADD) $(LIBS) libidentitymap.la: $(libidentitymap_la_OBJECTS) $(libidentitymap_la_DEPENDENCIES) $(libidentitymap_la_LINK) $(am_libidentitymap_la_rpath) $(libidentitymap_la_OBJECTS) $(libidentitymap_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarguspdpclient_la-ArgusPDPClient.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarguspepclient_la-ArgusPEPClient.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libidentitymap_la-IdentityMap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libidentitymap_la-SimpleMap.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarguspdpclient_la-ArgusPDPClient.lo: ArgusPDPClient.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarguspdpclient_la_CXXFLAGS) $(CXXFLAGS) -MT libarguspdpclient_la-ArgusPDPClient.lo -MD -MP -MF $(DEPDIR)/libarguspdpclient_la-ArgusPDPClient.Tpo -c -o libarguspdpclient_la-ArgusPDPClient.lo `test -f 'ArgusPDPClient.cpp' || echo '$(srcdir)/'`ArgusPDPClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarguspdpclient_la-ArgusPDPClient.Tpo $(DEPDIR)/libarguspdpclient_la-ArgusPDPClient.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArgusPDPClient.cpp' object='libarguspdpclient_la-ArgusPDPClient.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarguspdpclient_la_CXXFLAGS) $(CXXFLAGS) -c -o libarguspdpclient_la-ArgusPDPClient.lo `test -f 'ArgusPDPClient.cpp' || echo '$(srcdir)/'`ArgusPDPClient.cpp libarguspepclient_la-ArgusPEPClient.lo: ArgusPEPClient.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarguspepclient_la_CXXFLAGS) $(CXXFLAGS) -MT libarguspepclient_la-ArgusPEPClient.lo -MD -MP -MF $(DEPDIR)/libarguspepclient_la-ArgusPEPClient.Tpo -c -o libarguspepclient_la-ArgusPEPClient.lo `test -f 'ArgusPEPClient.cpp' || echo '$(srcdir)/'`ArgusPEPClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarguspepclient_la-ArgusPEPClient.Tpo $(DEPDIR)/libarguspepclient_la-ArgusPEPClient.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArgusPEPClient.cpp' object='libarguspepclient_la-ArgusPEPClient.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarguspepclient_la_CXXFLAGS) $(CXXFLAGS) -c -o libarguspepclient_la-ArgusPEPClient.lo `test -f 'ArgusPEPClient.cpp' || echo '$(srcdir)/'`ArgusPEPClient.cpp libidentitymap_la-IdentityMap.lo: IdentityMap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libidentitymap_la_CXXFLAGS) $(CXXFLAGS) -MT libidentitymap_la-IdentityMap.lo -MD -MP -MF $(DEPDIR)/libidentitymap_la-IdentityMap.Tpo -c -o libidentitymap_la-IdentityMap.lo `test -f 'IdentityMap.cpp' || echo '$(srcdir)/'`IdentityMap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libidentitymap_la-IdentityMap.Tpo $(DEPDIR)/libidentitymap_la-IdentityMap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='IdentityMap.cpp' object='libidentitymap_la-IdentityMap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libidentitymap_la_CXXFLAGS) $(CXXFLAGS) -c -o libidentitymap_la-IdentityMap.lo `test -f 'IdentityMap.cpp' || echo '$(srcdir)/'`IdentityMap.cpp libidentitymap_la-SimpleMap.lo: SimpleMap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libidentitymap_la_CXXFLAGS) $(CXXFLAGS) -MT libidentitymap_la-SimpleMap.lo -MD -MP -MF $(DEPDIR)/libidentitymap_la-SimpleMap.Tpo -c -o libidentitymap_la-SimpleMap.lo `test -f 'SimpleMap.cpp' || echo '$(srcdir)/'`SimpleMap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libidentitymap_la-SimpleMap.Tpo $(DEPDIR)/libidentitymap_la-SimpleMap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SimpleMap.cpp' object='libidentitymap_la-SimpleMap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libidentitymap_la_CXXFLAGS) $(CXXFLAGS) -c -o libidentitymap_la-SimpleMap.lo `test -f 'SimpleMap.cpp' || echo '$(srcdir)/'`SimpleMap.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/SimpleMap.h0000644000000000000000000000012410732034755023121 xustar000000000000000027 mtime=1198012909.815532 27 atime=1513200574.447701 30 ctime=1513200662.017772164 nordugrid-arc-5.4.2/src/hed/identitymap/SimpleMap.h0000644000175000002070000000064410732034755023172 0ustar00mockbuildmock00000000000000#include #define SELFUNMAP_TIME (10*24*60*60) namespace ArcSec { class SimpleMap { private: std::string dir_; int pool_handle_; public: SimpleMap(const std::string& dir); ~SimpleMap(void); std::string map(const std::string& subject); bool unmap(const std::string& subject); operator bool(void) { return (pool_handle_ != -1); }; bool operator!(void) { return (pool_handle_ == -1); }; }; } nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/IdentityMap.cpp0000644000000000000000000000012412675602216024015 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.442701 30 ctime=1513200662.014772127 nordugrid-arc-5.4.2/src/hed/identitymap/IdentityMap.cpp0000644000175000002070000001423012675602216024062 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "SimpleMap.h" #include "IdentityMap.h" static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; ArcSec::IdentityMap* plugin = new ArcSec::IdentityMap((Arc::Config*)(*shcarg),(Arc::ChainContext*)(*shcarg),shcarg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; return NULL; }; return plugin; } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "identity.map", "HED:SHC", NULL, 0, &get_sechandler}, { NULL, NULL, NULL, 0, NULL } }; namespace ArcSec { // -------------------------------------------------------------------------- class LocalMapDirect: public LocalMap { private: std::string id_; public: LocalMapDirect(const std::string& id):id_(id) {}; virtual ~LocalMapDirect(void) {}; virtual std::string ID(Arc::Message*) { return id_; }; }; // -------------------------------------------------------------------------- class LocalMapPool: public LocalMap { private: std::string dir_; public: LocalMapPool(const std::string& dir); virtual ~LocalMapPool(void); virtual std::string ID(Arc::Message* msg); }; LocalMapPool::LocalMapPool(const std::string& dir):dir_(dir) { } LocalMapPool::~LocalMapPool(void) { } std::string LocalMapPool::ID(Arc::Message* msg) { // Get user Grid identity. // So far only DN from TLS is supported. std::string dn = msg->Attributes()->get("TLS:IDENTITYDN"); if(dn.empty()) return ""; SimpleMap pool(dir_); if(!pool) return ""; return pool.map(dn); } // -------------------------------------------------------------------------- class LocalMapList: public LocalMap { private: std::vector files_; public: LocalMapList(const std::vector& files); LocalMapList(const std::string& file); virtual ~LocalMapList(void); virtual std::string ID(Arc::Message* msg); }; LocalMapList::LocalMapList(const std::vector& files):files_(files) { } LocalMapList::LocalMapList(const std::string& file) { files_.push_back(file); } LocalMapList::~LocalMapList(void) { } static std::string get_val(std::string& str) { std::string val; if(str[0] == '"') { std::string::size_type p = str.find('"',1); if(p == std::string::npos) return ""; val=str.substr(1,p-1); str=str.substr(p+1); return val; }; if(str[0] == '\'') { std::string::size_type p = str.find('\'',1); if(p == std::string::npos) return ""; val=str.substr(1,p-1); str=str.substr(p+1); return val; }; std::string::size_type p = str.find_first_of(" \t"); if(p == std::string::npos) { val=str; str.resize(0); } else { val=str.substr(0,p); str=str.substr(p); }; return val; } std::string LocalMapList::ID(Arc::Message* msg) { // Compare user Grid identity to list in file. // So far only DN from TLS is supported. std::string dn = msg->Attributes()->get("TLS:IDENTITYDN"); if(dn.empty()) return ""; for (std::vector::iterator it = files_.begin(); it != files_.end(); it++) { std::string file_ = *it; std::ifstream f(file_.c_str()); if(!f.is_open() ) continue; for(;f.good();) { std::string buf; std::getline(f,buf); buf=Arc::trim(buf); if(buf.empty()) continue; if(buf[0] == '#') continue; std::string val = get_val(buf); if(val != dn) continue; buf=Arc::trim(buf); val=get_val(buf); if(val.empty()) continue; f.close(); return val; }; f.close(); } return ""; } // -------------------------------------------------------------------------- static LocalMap* MakeLocalMap(Arc::XMLNode pdp) { Arc::XMLNode p; p=pdp["LocalName"]; if(p) { std::string name = p; if(name.empty()) return NULL; return new LocalMapDirect(name); }; p=pdp["LocalList"]; if(p) { std::vector files; while (p) { files.push_back((std::string) p); ++p; } if(files.empty()) return NULL; return new LocalMapList(files); }; p=pdp["LocalSimplePool"]; if(p) { std::string dir = p; if(dir.empty()) return NULL; return new LocalMapPool(dir); }; return NULL; } // --------------------------------------------------------------------------- IdentityMap::IdentityMap(Arc::Config *cfg,Arc::ChainContext* ctx,Arc::PluginArgument* parg):ArcSec::SecHandler(cfg,parg),valid_(false){ Arc::PluginsFactory* pdp_factory = (Arc::PluginsFactory*)(*ctx); if(pdp_factory) { Arc::XMLNode plugins = (*cfg)["Plugins"]; for(int n = 0;;++n) { Arc::XMLNode p = plugins[n]; if(!p) break; std::string name = p["Name"]; if(name.empty()) continue; // Nameless plugin? pdp_factory->load(name,PDPPluginKind); }; Arc::XMLNode pdps = (*cfg)["PDP"]; for(int n = 0;;++n) { Arc::XMLNode p = pdps[n]; if(!p) break; std::string name = p.Attribute("name"); if(name.empty()) continue; // Nameless? LocalMap* local_id = MakeLocalMap(p); if(!local_id) continue; // No mapping? Arc::Config cfg_(p); PDPPluginArgument arg(&cfg_); ArcSec::PDP* pdp = pdp_factory->GetInstance(PDPPluginKind,name,&arg); if(!pdp) { delete local_id; logger.msg(Arc::ERROR, "PDP: %s can not be loaded", name); return; }; map_pair_t m; m.pdp=pdp; m.uid=local_id; maps_.push_back(m); }; }; valid_ = true; } IdentityMap::~IdentityMap(void) { for(std::list::iterator p = maps_.begin();p!=maps_.end();++p) { if(p->pdp) delete p->pdp; if(p->uid) delete p->uid; }; } SecHandlerStatus IdentityMap::Handle(Arc::Message* msg) const { for(std::list::const_iterator p = maps_.begin();p!=maps_.end();++p) { if(p->pdp->isPermitted(msg)) { std::string id = p->uid->ID(msg); logger.msg(Arc::INFO,"Grid identity is mapped to local identity '%s'",id); msg->Attributes()->set("SEC:LOCALID",id); return true; }; } return true; } } nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/ArgusPDPClient.cpp0000644000000000000000000000012413213445240024341 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200574.436701 30 ctime=1513200662.009772066 nordugrid-arc-5.4.2/src/hed/identitymap/ArgusPDPClient.cpp0000644000175000002070000011341213213445240024410 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "ArgusPDPClient.h" #include "ArgusXACMLConstant.h" #define AREX_JOB_POLICY_OPERATION_URN "http://www.nordugrid.org/schemas/policy-arc/types/a-rex/joboperation" #define AREX_JOB_POLICY_OPERATION_CREATE "Create" #define AREX_JOB_POLICY_OPERATION_MODIFY "Modify" #define AREX_JOB_POLICY_OPERATION_READ "Read" #define AREX_POLICY_OPERATION_URN "http://www.nordugrid.org/schemas/policy-arc/types/a-rex/operation" #define AREX_POLICY_OPERATION_ADMIN "Admin" #define AREX_POLICY_OPERATION_INFO "Info" static const char XACML_DATATYPE_FQAN[]= "http://glite.org/xacml/datatype/fqan"; #define SAML_NAMESPACE "urn:oasis:names:tc:SAML:2.0:assertion" #define SAMLP_NAMESPACE "urn:oasis:names:tc:SAML:2.0:protocol" #define XACML_SAMLP_NAMESPACE "urn:oasis:names:tc:xacml:2.0:profile:saml2.0:v2:schema:protocol" //#define XACML_SAMLP_NAMESPACE "urn:oasis:xacml:2.0:saml:protocol:schema:os" #define EMIES_OPERATION_CREATION "http://www.eu-emi.eu/es/2010/12/creation" #define EMIES_OPERATION_ACTIVITY "http://www.eu-emi.eu/es/2010/12/activity" #define EMIES_OPERATION_ACTIVITYMANGEMENT "http://www.eu-emi.eu/es/2010/12/activitymanagement" #define EMIES_OPERATION_RESOURCEINFO "http://www.eu-emi.eu/es/2010/12/resourceinfo" #define EMIES_OPERATION_DELEGATION "http://www.gridsite.org/namespaces/delegation-21" #define EMIES_OPERATION_ANY "http://dci-sec.org/xacml/action/ANY" static void xacml_create_request(Arc::XMLNode& request) { Arc::NS ns; ns["xacml-ctx"]="urn:oasis:names:tc:xacml:2.0:context:schema:os"; Arc::XMLNode node(ns, "xacml-ctx:Request"); node.New(request); return; } static Arc::XMLNode xacml_request_add_element(Arc::XMLNode& request_node, const std::string& element_name) { std::string elm_name = "xacml-ctx:"; elm_name.append(element_name); Arc::XMLNode element = request_node.NewChild(elm_name); return element; } static Arc::XMLNode xacml_element_add_attribute(Arc::XMLNode& element_node, const std::string& attribute, const std::string& data_type, const std::string& id, const std::string& issuer) { Arc::XMLNode attr = element_node.NewChild("xacml-ctx:Attribute"); attr.NewAttribute("DataType") = data_type; attr.NewAttribute("AttributeId") = id; if(!issuer.empty()) attr.NewAttribute("Issuer") = issuer; attr.NewChild("xacml-ctx:AttributeValue") = attribute; return attr; } static Arc::XMLNode xacml_element_add_attribute(Arc::XMLNode& element_node, const std::list& attributes, const std::string& data_type, const std::string& id, const std::string& issuer) { Arc::XMLNode attr = element_node.NewChild("xacml-ctx:Attribute"); attr.NewAttribute("DataType") = data_type; attr.NewAttribute("AttributeId") = id; if(!issuer.empty()) attr.NewAttribute("Issuer") = issuer; for(std::list::const_iterator it = attributes.begin(); it!=attributes.end(); ++it) { attr.NewChild("xacml-ctx:AttributeValue") = *it; } return attr; } static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; ArcSec::ArgusPDPClient* plugin = new ArcSec::ArgusPDPClient((Arc::Config*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; return NULL;}; return plugin; } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "arguspdpclient.map", "HED:SHC", NULL, 0, &get_sechandler}, { NULL, NULL, NULL, 0, NULL } }; namespace ArcSec { class pep_ex { public: std::string desc; pep_ex(const std::string& desc_):desc(desc_) {}; }; static std::string path_to_x500(const std::string& path) { class url_ex: public Arc::URL { public: static std::string Path2BaseDN(const std::string& path) { return Arc::URL::Path2BaseDN(path); }; }; return url_ex::Path2BaseDN(path); } std::string flatten_fqan(const std::string& wfqan) { const std::string vo_tag("/VO="); const std::string group_tag("/Group="); std::string fqan; std::string::size_type pos1 = 0; std::string::size_type pos2 = 0; if(wfqan.substr(0,vo_tag.length()) != vo_tag) return fqan; for(;;) { pos1 = wfqan.find(group_tag,pos2); if(pos1 == std::string::npos) break; pos2 = wfqan.find("/",pos1+1); if(pos2 == std::string::npos) { fqan += "/" + wfqan.substr(pos1+group_tag.length()); break; }; fqan += "/" + wfqan.substr(pos1+group_tag.length(),pos2-pos1-group_tag.length()); }; return fqan; } Arc::Logger ArgusPDPClient::logger(Arc::Logger::getRootLogger(), "SecHandler.Argus"); std::string xacml_decision_to_string(xacml_decision_t decision) { switch(decision) { case XACML_DECISION_DENY: return "DENY"; case XACML_DECISION_PERMIT: return "PERMIT"; case XACML_DECISION_INDETERMINATE: return "INDETERMINATE"; case XACML_DECISION_NOT_APPLICABLE: return "NOT APPLICABLE"; }; return "UNKNOWN"; } /* extract the elements from the configuration file */ ArgusPDPClient::ArgusPDPClient(Arc::Config *cfg,Arc::PluginArgument* parg):ArcSec::SecHandler(cfg,parg), conversion(conversion_emi) { valid_ = false; accept_mapping = false; accept_notapplicable = false; logger.setThreshold(Arc::DEBUG); pdpdlocation = (std::string)(*cfg)["PDPD"]; if(pdpdlocation.empty()) { logger.msg(Arc::ERROR, "PDPD location is missing"); return; } logger.msg(Arc::DEBUG, "PDPD location: %s",pdpdlocation); std::string conversion_str = (std::string)(*cfg)["Conversion"]; if(conversion_str == "subject") { logger.msg(Arc::DEBUG, "Conversion mode is set to SUBJECT"); conversion = conversion_subject; } else if(conversion_str == "cream") { logger.msg(Arc::DEBUG, "Conversion mode is set to CREAM"); conversion = conversion_cream; } else if(conversion_str == "emi") { logger.msg(Arc::DEBUG, "Conversion mode is set to EMI"); conversion = conversion_emi; } else if(!conversion_str.empty()) { logger.msg(Arc::INFO, "Unknown conversion mode %s, using default", conversion_str); } Arc::XMLNode filter = (*cfg)["Filter"]; if((bool)filter) { Arc::XMLNode select_attr = filter["Select"]; Arc::XMLNode reject_attr = filter["Reject"]; for(;(bool)select_attr;++select_attr) select_attrs.push_back((std::string)select_attr); for(;(bool)reject_attr;++reject_attr) reject_attrs.push_back((std::string)reject_attr); }; capath = (std::string)(*cfg)["CACertificatesDir"]; keypath = (std::string)(*cfg)["KeyPath"]; certpath = (std::string)(*cfg)["CertificatePath"]; std::string proxypath = (std::string)(*cfg)["ProxyPath"]; if(!proxypath.empty()) { keypath = proxypath; certpath = proxypath; }; std::string mapping_str = (std::string)(*cfg)["AcceptMapping"]; if((mapping_str == "1") || (mapping_str == "true")) accept_mapping = true; std::string notapplicable_str = (std::string)(*cfg)["AcceptNotApplicable"]; if((notapplicable_str == "1") || (notapplicable_str == "true")) accept_notapplicable = true; valid_ = true; } ArgusPDPClient::~ArgusPDPClient(void) { } static bool contact_pdp(Arc::ClientSOAP* client, const std::string& pdpdlocation, const std::string& certpath, Arc::Logger& logger, Arc::XMLNode& request, Arc::XMLNode& response) { bool ret = false; Arc::NS ns; ns["saml"] = SAML_NAMESPACE; ns["samlp"] = SAMLP_NAMESPACE; ns["xacml-samlp"] = XACML_SAMLP_NAMESPACE; Arc::XMLNode authz_query(ns, "xacml-samlp:XACMLAuthzDecisionQuery"); std::string query_id = Arc::UUID(); authz_query.NewAttribute("ID") = query_id; Arc::Time t; std::string current_time = t.str(Arc::UTCTime); authz_query.NewAttribute("IssueInstant") = current_time; authz_query.NewAttribute("Version") = std::string("2.0"); Arc::Credential cred(certpath, "", "", ""); std::string local_dn_str = cred.GetDN(); std::string local_dn = Arc::convert_to_rdn(local_dn_str); std::string issuer_name = local_dn; authz_query.NewChild("saml:Issuer") = issuer_name; authz_query.NewAttribute("InputContextOnly") = std::string("false"); authz_query.NewAttribute("ReturnContext") = std::string("true"); authz_query.NewChild(request); Arc::NS req_ns; Arc::SOAPEnvelope req_env(req_ns); req_env.NewChild(authz_query); Arc::PayloadSOAP req(req_env); Arc::PayloadSOAP* resp = NULL; Arc::MCC_Status status = client->process(&req, &resp); if(!status) { logger.msg(Arc::ERROR, "Failed to contact PDP server: %s", pdpdlocation); } if(resp == NULL) { logger.msg(Arc::ERROR,"There was no SOAP response return from PDP server: %s", pdpdlocation); } else { std::string str; resp->GetXML(str); logger.msg(Arc::DEBUG, "SOAP response: %s", str); //The authorization query response from argus pdp server is like the following /* http://localhost.localdomain/pdp ...duplication of the request... NotApplicable */ Arc::XMLNode respxml = (*resp)["saml2p:Response"]["saml2:Assertion"]["saml2:Statement"]["xacml-context:Response"]; if((bool)respxml) respxml.New(response); delete resp; ret = true; } return ret; } SecHandlerStatus ArgusPDPClient::Handle(Arc::Message* msg) const { int rc = 0; bool res = true; Arc::XMLNode request; Arc::XMLNode response; std::list requests; std::string subject , resource , action; Arc::XMLNode secattr; try{ // Create xacml if(conversion == conversion_subject) { // Extract the user subject according to RFC2256 format std::string dn = msg->Attributes()->get("TLS:IDENTITYDN"); while (dn.rfind("/") != std::string::npos) { std::string s = dn.substr(dn.rfind("/")+1,dn.length()) ; subject = subject + s + ","; dn = dn.substr(0, dn.rfind("/")) ; }; subject = subject.substr(0, subject.length()-1); if(resource.empty()) resource = "ANY"; if(action.empty()) action = "ANY"; rc = create_xacml_request(request,subject.c_str(),resource.c_str(),action.c_str()); if((bool)request) requests.push_back(request); } else if(conversion == conversion_cream) { std::list auths; auths.push_back(msg->Auth()); auths.push_back(msg->AuthContext()); Arc::PayloadSOAP* payload = NULL; try { payload = dynamic_cast(msg->Payload()); } catch(std::exception& e) { }; //if(!payload) throw pep_ex("No SOAP in message"); if(payload) { rc = create_xacml_request_cream(request,auths,msg->Attributes(),payload->Child(0)); } else { // For HTTP operations rc = create_xacml_request_cream(request,auths,msg->Attributes(),Arc::XMLNode()); } if((bool)request) requests.push_back(request); } else if(conversion == conversion_emi) { std::list auths; auths.push_back(msg->Auth()); auths.push_back(msg->AuthContext()); Arc::PayloadSOAP* payload = NULL; try { payload = dynamic_cast(msg->Payload()); } catch(std::exception& e) { }; //if(!payload) throw pep_ex("No SOAP in message"); if(payload) { rc = create_xacml_request_emi(request,auths,msg->Attributes(),payload->Child(0)); } else { // For HTTP operations rc = create_xacml_request_emi(request,auths,msg->Attributes(),Arc::XMLNode()); } if((bool)request) requests.push_back(request); } else { throw pep_ex("Unsupported conversion mode " + Arc::tostring(conversion)); } if (rc != 0) { throw pep_ex("Failed to create XACML request(s): " + Arc::tostring(rc)); } // Contact PDP server std::string local_id; xacml_decision_t decision = XACML_DECISION_INDETERMINATE; // Simple combining algorithm. At least one deny means deny. If none, then at // least one permit means permit. Otherwise deny. TODO: configurable. logger.msg(Arc::DEBUG, "Have %i requests to process", requests.size()); logger.msg(Arc::INFO, "Creating a client to Argus PDP service"); Arc::URL pdp_url(pdpdlocation); Arc::MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey(keypath); mcc_cfg.AddCertificate(certpath); mcc_cfg.AddCADir(capath); Arc::ClientSOAP client(mcc_cfg,pdp_url,60); for(std::list::iterator it = requests.begin(); it != requests.end(); it++) { Arc::XMLNode req = *it; std::string str; req.GetXML(str); logger.msg(Arc::DEBUG, "XACML authorisation request: %s", str); bool res = contact_pdp(&client, pdpdlocation, certpath, logger, request, response); if (!res) { throw pep_ex(std::string("Failed to process XACML request")); } if (!response) { throw pep_ex("XACML response is empty"); } response.GetXML(str); logger.msg(Arc::DEBUG, "XACML authorisation response: %s", str); // Extract the local user name from the response to be mapped to the GID for (int cn = 0;; ++cn) { Arc::XMLNode cnode = response.Child(cn); if (!cnode) break; std::string authz_res = (std::string)(cnode["xacml-context:Decision"]); if(authz_res.empty()) break; if(authz_res == "Permit") decision = XACML_DECISION_PERMIT; else if(authz_res == "Deny") decision = XACML_DECISION_DENY; else if(authz_res == "NotApplicable") decision = XACML_DECISION_NOT_APPLICABLE; if(decision == XACML_DECISION_DENY) break; /* */ for(int n = 0;; ++n) { Arc::XMLNode scn = cnode.Child(n); if(!scn) break; if(!MatchXMLName(scn, "Obligations")) continue; for(int m = 0;; ++m) { Arc::XMLNode sscn = scn.Child(m); if(!sscn) break; std::string id = (std::string)sscn; local_id = id.empty() ? "":id; } } } if(decision == XACML_DECISION_DENY) break; } if ((decision != XACML_DECISION_PERMIT) && (decision != XACML_DECISION_NOT_APPLICABLE)) { if(conversion == conversion_subject) { logger.msg(Arc::INFO,"%s is not authorized to do action %s in resource %s ", subject, action, resource); } else { logger.msg(Arc::INFO,"Not authorized"); } throw pep_ex("The reached decision is: " + xacml_decision_to_string(decision)); } else if((decision == XACML_DECISION_NOT_APPLICABLE) && (accept_notapplicable == false)) { logger.msg(Arc::INFO,"Not authorized"); throw pep_ex("The reached decision is: " + xacml_decision_to_string(decision) + ". But this service will treat NotApplicable decision as reason to deny request"); } if(accept_mapping && !local_id.empty()) { logger.msg(Arc::INFO,"Grid identity is mapped to local identity '%s'", local_id); msg->Attributes()->set("SEC:LOCALID", local_id); } } catch (pep_ex& e) { logger.msg(Arc::ERROR,"%s",e.desc); res = false; } return res; } int ArgusPDPClient::create_xacml_request(Arc::XMLNode& request,const char * subjectid, const char * resourceid, const char * actionid) const { xacml_create_request(request); Arc::XMLNode subject = xacml_request_add_element(request, "Subject"); std::string subject_attribute = path_to_x500(subjectid); Arc::XMLNode subject_attr = xacml_element_add_attribute(subject, subject_attribute, XACML_DATATYPE_X500NAME, XACML_SUBJECT_ID, ""); Arc::XMLNode resource = xacml_request_add_element(request, "Resource"); Arc::XMLNode resource_attr = xacml_element_add_attribute(resource, resourceid, XACML_DATATYPE_STRING, XACML_RESOURCE_ID, ""); Arc::XMLNode action = xacml_request_add_element(request, "Action"); Arc::XMLNode action_attr = xacml_element_add_attribute(action, actionid, XACML_DATATYPE_STRING, XACML_ACTION_ID, ""); return 0; } // This part is ARC vs CREAM specific. // In a future such mapping should be pluggable or configurable or done by service static const std::string BES_FACTORY_NAMESPACE("http://schemas.ggf.org/bes/2006/08/bes-factory"); static const std::string BES_MANAGEMENT_NAMESPACE("http://schemas.ggf.org/bes/2006/08/bes-management"); static const std::string BES_ARC_NAMESPACE("http://www.nordugrid.org/schemas/a-rex"); static const std::string DELEG_ARC_NAMESPACE("http://www.nordugrid.org/schemas/delegation"); static const std::string WSRF_NAMESPACE("http://docs.oasis-open.org/wsrf/rp-2"); static std::string get_cream_action(Arc::XMLNode op) { if(MatchXMLNamespace(op,BES_FACTORY_NAMESPACE)) { if(MatchXMLName(op,"CreateActivity")) return "http://glite.org/xacml/action/ce/job/submit"; if(MatchXMLName(op,"GetActivityStatuses")) return "http://glite.org/xacml/action/ce/job/get-info"; if(MatchXMLName(op,"TerminateActivities")) return "http://glite.org/xacml/action/ce/job/terminate"; if(MatchXMLName(op,"GetActivityDocuments")) return "http://glite.org/xacml/action/ce/job/get-info"; if(MatchXMLName(op,"GetFactoryAttributesDocument")) return "http://glite.org/xacml/action/ce/get-info"; return ""; } else if(MatchXMLNamespace(op,BES_MANAGEMENT_NAMESPACE)) { if(MatchXMLName(op,"StopAcceptingNewActivities")) return ""; if(MatchXMLName(op,"StartAcceptingNewActivities")) return ""; } else if(MatchXMLNamespace(op,BES_ARC_NAMESPACE)) { if(MatchXMLName(op,"ChangeActivityStatus")) return "http://glite.org/xacml/action/ce/job/manage"; if(MatchXMLName(op,"MigrateActivity")) return "http://glite.org/xacml/action/ce/job/manage"; if(MatchXMLName(op,"CacheCheck")) return "http://glite.org/xacml/action/ce/get-info"; return ""; } else if(MatchXMLNamespace(op,DELEG_ARC_NAMESPACE)) { if(MatchXMLName(op,"DelegateCredentialsInit")) return "http://glite.org/xacml/action/ce/delegation/manage"; if(MatchXMLName(op,"UpdateCredentials")) return "http://glite.org/xacml/action/ce/delegation/manage"; return ""; } else if(MatchXMLNamespace(op,WSRF_NAMESPACE)) { return "http://glite.org/xacml/action/ce/get-info"; } // http://glite.org/xacml/action/ce/job/submit // *http://glite.org/xacml/action/ce/job/terminate // *http://glite.org/xacml/action/ce/job/get-info // *http://glite.org/xacml/action/ce/job/manage // http://glite.org/xacml/action/ce/lease/get-info // http://glite.org/xacml/action/ce/lease/manage // *http://glite.org/xacml/action/ce/get-info // http://glite.org/xacml/action/ce/delegation/get-info // *http://glite.org/xacml/action/ce/delegation/manage // http://glite.org/xacml/action/ce/subscription/get-info // http://glite.org/xacml/action/ce/subscription/manage return ""; } static std::string get_cream_action_http(const std::string& method, Arc::Logger& logger) { if(method == "GET") { return "http://glite.org/xacml/action/ce/job/get-info"; } else if(method == "PUT") { return "http://glite.org/xacml/action/ce/job/submit"; } return ""; } static std::string get_sec_attr(std::list auths, const std::string& sid, const std::string& aid) { for(std::list::iterator a = auths.begin(); a != auths.end(); ++a) { Arc::SecAttr* sa = (*a)->get(sid); if(!sa) continue; std::string str = sa->get(aid); if(!str.empty()) return str; } return ""; } static std::list get_sec_attrs(std::list auths, const std::string& sid, const std::string& aid) { for(std::list::iterator a = auths.begin(); a != auths.end(); ++a) { Arc::SecAttr* sa = (*a)->get(sid); if(!sa) continue; std::list strs = sa->getAll(aid); if(!strs.empty()) return strs; } return std::list(); } static std::string get_resource(std::list& auths, Arc::MessageAttributes* attrs) { std::string resource = get_sec_attr(auths, "AREX", "SERVICE"); if(!resource.empty()) return resource; if(attrs) resource = attrs->get("ENDPOINT"); return resource; } int ArgusPDPClient::create_xacml_request_cream(Arc::XMLNode& request, std::list auths, Arc::MessageAttributes* attrs, Arc::XMLNode operation) const { logger.msg(Arc::DEBUG,"Doing CREAM request"); class ierror { public: std::string desc; ierror(const std::string& err):desc(err) { }; }; try { xacml_create_request(request); // Environment Arc::XMLNode environment = xacml_request_add_element(request, "Environment"); std::string env_attr_id = XACML_GLITE_ATTRIBUTE_PROFILE_ID; //"http://glite.org/xacml/attribute/profile-id"; std::string env_attr_value = "http://glite.org/xacml/profile/grid-ce/1.0"; logger.msg(Arc::DEBUG,"Adding profile-id value: %s", env_attr_value); xacml_element_add_attribute(environment, env_attr_value, XACML_DATATYPE_ANYURI, env_attr_id, ""); // Subject Arc::XMLNode subject = xacml_request_add_element(request, "Subject"); std::string sub_attr_id = XACML_SUBJECT_ID; //"urn:oasis:names:tc:xacml:1.0:subject:subject-id"; std::string sub_attr_value = get_sec_attr(auths, "TLS", "IDENTITY"); if(sub_attr_value.empty()) throw ierror("Failed to extract TLS:IDENTITY"); sub_attr_value = path_to_x500(sub_attr_value); logger.msg(Arc::DEBUG,"Adding subject-id value: %s", sub_attr_value); xacml_element_add_attribute(subject, sub_attr_value, XACML_DATATYPE_X500NAME, sub_attr_id, ""); std::string iss_attr_id = XACML_GLITE_ATTRIBUTE_SUBJECT_ISSUER; //"http://glite.org/xacml/attribute/subject-issuer"; std::string iss_attr_value = get_sec_attr(auths, "TLS", "CA"); if(iss_attr_value.empty()) throw ierror("Failed to extract TLS:CA"); iss_attr_value = path_to_x500(iss_attr_value); logger.msg(Arc::DEBUG,"Adding subject-issuer value: %s", iss_attr_value); xacml_element_add_attribute(subject, iss_attr_value, XACML_DATATYPE_X500NAME, iss_attr_id, ""); std::string vo_attr_id = XACML_GLITE_ATTRIBUTE_VIRTUAL_ORGANIZATION; // "http://glite.org/xacml/attribute/virtual-organization"; std::list vos = get_sec_attrs(auths, "TLS", "VO"); for(std::list::iterator vo = vos.begin(); vo!=vos.end(); ++vo) { logger.msg(Arc::DEBUG,"Adding virtual-organization value: %s",*vo); } if(vos.size()>0) xacml_element_add_attribute(subject, vos, XACML_DATATYPE_STRING, vo_attr_id, ""); std::string fqan_attr_id = XACML_GLITE_ATTRIBUTE_FQAN; //"http://glite.org/xacml/attribute/fqan"; std::string pfqan; std::list fqans = get_sec_attrs(auths, "TLS", "VOMS"); std::list flatten_fqans; for(std::list::iterator fqan = fqans.begin(); fqan!=fqans.end(); ++fqan) { std::string fqan_str = flatten_fqan(*fqan); if(fqan_str.empty()) continue; if(pfqan.empty()) pfqan = fqan_str; flatten_fqans.push_back(fqan_str); logger.msg(Arc::DEBUG,"Adding FQAN value: %s",fqan_str); } if(flatten_fqans.size()>0)xacml_element_add_attribute(subject, flatten_fqans, XACML_DATATYPE_FQAN, fqan_attr_id, ""); // /voname=testers.eu-emi.eu/hostname=emitestbed07.cnaf.infn.it:15002/testers.eu-emi.eu/test3:test_ga=ciccio -> ? // /VO=testers.eu-emi.eu/Group=testers.eu-emi.eu/Group=test1 -> /testers.eu-emi.eu/test1 if(!pfqan.empty()) { std::string pfqan_attr_id = XACML_GLITE_ATTRIBUTE_FQAN_PRIMARY; //"http://glite.org/xacml/attribute/fqan/primary"; logger.msg(Arc::DEBUG,"Adding FQAN/primary value: %s",pfqan); xacml_element_add_attribute(subject, pfqan, XACML_DATATYPE_FQAN, pfqan_attr_id, ""); // TODO: convert to VOMS FQAN? } std::string cert_attr_id = XACML_SUBJECT_KEY_INFO; //urn:oasis:names:tc:xacml:1.0:subject:key-info std::string cert_attr_value = get_sec_attr(auths, "TLS", "CERTIFICATE"); if(cert_attr_value.empty()) throw ierror("Failed to create attribute key-info object"); std::string chain_attr_value = get_sec_attr(auths, "TLS", "CERTIFICATECHAIN"); chain_attr_value = cert_attr_value + "\n" + chain_attr_value; logger.msg(Arc::DEBUG,"Adding cert chain value: %s", chain_attr_value); xacml_element_add_attribute(subject, chain_attr_value, XACML_DATATYPE_STRING, cert_attr_id, ""); // Resource Arc::XMLNode resource = xacml_request_add_element(request, "Resource"); std::string res_attr_id = XACML_RESOURCE_ID; //"urn:oasis:names:tc:xacml:1.0:resource:resource-id"; std::string res_attr_value = get_resource(auths,attrs); if(res_attr_value.empty()) throw ierror("Failed to extract resource identifier"); logger.msg(Arc::DEBUG,"Adding resource-id value: %s", res_attr_value); xacml_element_add_attribute(resource, res_attr_value, XACML_DATATYPE_STRING, res_attr_id, ""); // Action Arc::XMLNode action = xacml_request_add_element(request, "Action"); std::string act_attr_id = XACML_ACTION_ID; //"urn:oasis:names:tc:xacml:1.0:action:action-id"; std::string act_attr_value; if((bool)operation) { act_attr_value = get_cream_action(operation); } else if(attrs) { act_attr_value = get_cream_action_http(attrs->get("HTTP:METHOD"),logger); } if(act_attr_value.empty()) act_attr_value = "http://glite.org/xacml/action/ANY"; // throw ierror("Failed to generate action name"); logger.msg(Arc::DEBUG,"Adding action-id value: %s", act_attr_value); xacml_element_add_attribute(action, act_attr_value, XACML_DATATYPE_STRING, act_attr_id, ""); } catch(ierror& err) { logger.msg(Arc::DEBUG,"CREAM request generation failed: %s",err.desc); return 1; } return 0; } static bool split_voms(const std::string& voms_attr, std::string& vo, std::string& group, std::list& roles, std::list& attrs) { vo.resize(0); group.resize(0); roles.clear(); attrs.clear(); std::list elements; Arc::tokenize(voms_attr,elements,"/"); std::list::iterator element = elements.begin(); for(;element!=elements.end();++element) { std::string::size_type p = element->find('='); if(p == std::string::npos) { attrs.push_back(*element); } else { std::string key = element->substr(0,p); if(key == "VO") { vo = element->substr(p+1); } else if(key == "Group") { group += "/"+element->substr(p+1); } else if(key == "Role") { roles.push_back(element->substr(p+1)); } else { attrs.push_back(*element); } } } return true; } static std::string get_emi_action_http(const std::string& method) { if(method == "GET") { return EMIES_OPERATION_CREATION; } else if(method == "PUT") { return EMIES_OPERATION_ACTIVITYMANGEMENT; } return ""; } static std::string get_emi_action(Arc::XMLNode op) { if(MatchXMLNamespace(op,EMIES_OPERATION_CREATION)) return EMIES_OPERATION_CREATION; if(MatchXMLNamespace(op,EMIES_OPERATION_ACTIVITY)) return EMIES_OPERATION_ACTIVITY; if(MatchXMLNamespace(op,EMIES_OPERATION_ACTIVITYMANGEMENT)) return EMIES_OPERATION_ACTIVITYMANGEMENT; if(MatchXMLNamespace(op,EMIES_OPERATION_RESOURCEINFO)) return EMIES_OPERATION_RESOURCEINFO; if(MatchXMLNamespace(op,EMIES_OPERATION_DELEGATION)) return EMIES_OPERATION_DELEGATION; return ""; } static std::string get_emi_action_arex(const std::string& ns, const std::string& action) { if(ns == AREX_JOB_POLICY_OPERATION_URN) { if(action == AREX_JOB_POLICY_OPERATION_CREATE) return EMIES_OPERATION_CREATION; if(action == AREX_JOB_POLICY_OPERATION_MODIFY) return EMIES_OPERATION_ACTIVITYMANGEMENT; if(action == AREX_JOB_POLICY_OPERATION_READ) return EMIES_OPERATION_ACTIVITY; } else if(ns == AREX_POLICY_OPERATION_URN) { if(action == AREX_POLICY_OPERATION_INFO) return EMIES_OPERATION_RESOURCEINFO; if(action == AREX_POLICY_OPERATION_ADMIN) return ""; } return ""; } int ArgusPDPClient::create_xacml_request_emi(Arc::XMLNode& request, std::list auths, Arc::MessageAttributes* attrs, Arc::XMLNode operation) const { logger.msg(Arc::DEBUG,"Doing EMI request"); class ierror { public: std::string desc; ierror(const std::string& err):desc(err) { }; }; try { xacml_create_request(request); // Environment Arc::XMLNode environment = xacml_request_add_element(request, "Environment"); std::string env_attr_id = XACML_DCISEC_ATTRIBUTE_PROFILE_ID; //"http://dci-sec.org/xacml/attribute/profile-id"; std::string env_attr_value = "http://dci-sec.org/xacml/profile/common-ce/1.0"; //? not defined ? logger.msg(Arc::DEBUG,"Adding profile-id value: %s", env_attr_value); xacml_element_add_attribute(environment, env_attr_value, XACML_DATATYPE_ANYURI, env_attr_id, ""); // Subject Arc::XMLNode subject = xacml_request_add_element(request, "Subject"); std::string sub_attr_id = XACML_SUBJECT_ID; //"urn:oasis:names:tc:xacml:1.0:subject:subject-id"; std::string sub_attr_value = get_sec_attr(auths, "TLS", "IDENTITY"); if(sub_attr_value.empty()) throw ierror("Failed to extract TLS:IDENTITY"); sub_attr_value = path_to_x500(sub_attr_value); logger.msg(Arc::DEBUG,"Adding subject-id value: %s", sub_attr_value); xacml_element_add_attribute(subject, sub_attr_value, XACML_DATATYPE_X500NAME, sub_attr_id, ""); std::string iss_attr_id = XACML_DCISEC_ATTRIBUTE_SUBJECT_ISSUER; //"http://dci-sec.org/xacml/attribute/subject-issuer"; std::string iss_attr_value = get_sec_attr(auths, "TLS", "CA"); if(iss_attr_value.empty()) throw ierror("Failed to extract TLS:CA"); iss_attr_value = path_to_x500(iss_attr_value); logger.msg(Arc::DEBUG,"Adding subject-issuer value: %s", iss_attr_value); xacml_element_add_attribute(subject, iss_attr_value, XACML_DATATYPE_X500NAME, iss_attr_id, ""); std::string vo_attr_id = XACML_DCISEC_ATTRIBUTE_VIRTUAL_ORGANIZATION; // "http://dci-sec.org/xacml/attribute/virtual-organization"; std::list vos = get_sec_attrs(auths, "TLS", "VO"); for(std::list::iterator vo = vos.begin(); vo!=vos.end(); ++vo) { logger.msg(Arc::DEBUG,"Adding Virtual Organization value: %s",*vo); } if(vos.size()>0) xacml_element_add_attribute(subject, vos, XACML_DATATYPE_STRING, vo_attr_id, ""); std::string group_attr_id = XACML_DCISEC_ATTRIBUTE_GROUP; //"http://dci-sec.org/xacml/attribute/group"; std::list fqans = get_sec_attrs(auths, "TLS", "VOMS"); // TODO: handle no fqans std::list groups; std::string pgroup; for(std::list::iterator fqan = fqans.begin(); fqan!=fqans.end(); ++fqan) { std::string vo; std::string group; std::list roles; std::list attrs; if(!split_voms(*fqan,vo,group,roles,attrs)) throw ierror("Failed to convert VOMS FQAN"); if(pgroup.empty()) pgroup = group; if(!group.empty()) { groups.push_back(group); } } groups.unique(); for(std::list::iterator g = groups.begin(); g!=groups.end(); ++g) { logger.msg(Arc::DEBUG,"Adding VOMS group value: %s", *g); } if(groups.size()>0) xacml_element_add_attribute(subject, groups, XACML_DATATYPE_STRING, group_attr_id, ""); if(!pgroup.empty()) { std::string pgroup_attr_id = XACML_DCISEC_ATTRIBUTE_GROUP_PRIMARY; //"http://dci-sec.org/xacml/attribute/group/primary" logger.msg(Arc::DEBUG,"Adding VOMS primary group value: %s", pgroup); xacml_element_add_attribute(subject, pgroup, XACML_DATATYPE_STRING, pgroup_attr_id, ""); } std::string prole; pgroup.resize(0); for(std::list::iterator fqan = fqans.begin(); fqan!=fqans.end(); ++fqan) { std::string vo; std::string group; std::list roles; std::list attrs; if(!split_voms(*fqan,vo,group,roles,attrs)) throw ierror("Failed to convert VOMS FQAN"); std::string role_attr_id = XACML_DCISEC_ATTRIBUTE_ROLE; //"http://dci-sec.org/xacml/attribute/role" // TODO: handle no roles for(std::list::iterator role = roles.begin(); role!=roles.end();) { if(role->empty()) { role = roles.erase(role); continue; } if(prole.empty()) { prole = *role; pgroup = group; } logger.msg(Arc::DEBUG,"Adding VOMS role value: %s", *role); ++role; } if(roles.size()>0) xacml_element_add_attribute(subject, roles, XACML_DATATYPE_STRING, role_attr_id, group); } if(!prole.empty()) { std::string prole_attr_id = XACML_DCISEC_ATTRIBUTE_ROLE_PRIMARY; //"http://dci-sec.org/xacml/attribute/role/primary" logger.msg(Arc::DEBUG,"Adding VOMS primary role value: %s", prole); xacml_element_add_attribute(subject, prole, XACML_DATATYPE_STRING, prole_attr_id, pgroup); } // Resource Arc::XMLNode resource = xacml_request_add_element(request, "Resource"); std::string res_attr_id = XACML_RESOURCE_ID; //"urn:oasis:names:tc:xacml:1.0:resource:resource-id" std::string res_attr_value = get_resource(auths,attrs); if(res_attr_value.empty()) throw ierror("Failed to extract resource identifier"); logger.msg(Arc::DEBUG,"Adding resource-id value: %s", res_attr_value); xacml_element_add_attribute(resource, res_attr_value, XACML_DATATYPE_STRING, res_attr_id, ""); std::string res_own_attr_id = XACML_DCISEC_ATTRIBUTE_RESOURCE_OWNER; //"http://dci-sec.org/xacml/attribute/resource-owner" std::string res_own_attr_value = get_sec_attr(auths, "TLS", "LOCALSUBJECT"); if(res_own_attr_value.empty()) throw ierror("Failed to extract LOCALSUBJECT"); logger.msg(Arc::DEBUG,"Adding resource-owner value: %s", res_own_attr_value); xacml_element_add_attribute(resource, path_to_x500(res_own_attr_value), XACML_DATATYPE_X500NAME, res_own_attr_id, ""); // Action // In a future action names should be synchronized among services Arc::XMLNode action = xacml_request_add_element(request, "Action"); std::string act_attr_id = XACML_ACTION_ID; //"urn:oasis:names:tc:xacml:1.0:action:action-id"; //"http://dci-sec.org/xacml/action/arc/arex/"+operation.Name std::string arex_ns = get_sec_attr(auths, "AREX", "NAMESPACE"); std::string arex_action = get_sec_attr(auths, "AREX", "ACTION"); std::string act_attr_value = get_emi_action(operation); if(act_attr_value.empty()) act_attr_value = get_emi_action_arex(arex_ns, arex_action); if(act_attr_value.empty()) act_attr_value = get_emi_action_http(attrs->get("HTTP:METHOD")); //if(act_attr_value.empty() && !arex_ns.empty()) act_attr_value = arex_ns + "/" + arex_action; if(act_attr_value.empty()) act_attr_value = EMIES_OPERATION_ANY; //throw ierror("Failed to generate action name"); logger.msg(Arc::DEBUG,"Adding action-id value: %s", act_attr_value); xacml_element_add_attribute(action, act_attr_value, XACML_DATATYPE_STRING, act_attr_id, ""); } catch(ierror& err) { logger.msg(Arc::DEBUG,"EMI request generation failed: %s",err.desc); return 1; } return 0; } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/ArgusPDPClient.h0000644000000000000000000000012412110401544024000 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200574.445701 30 ctime=1513200662.010772078 nordugrid-arc-5.4.2/src/hed/identitymap/ArgusPDPClient.h0000644000175000002070000000340412110401544024046 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARGUSPDPCLIENT_H__ #define __ARC_SEC_ARGUSPDPCLIENT_H__ #include #include #include #include #include #include #include #include namespace ArcSec { class ArgusPDPClient : public SecHandler { private: typedef enum { conversion_subject, conversion_cream, conversion_emi } conversion_type; std::string pdpdlocation; std::string keypath; std::string certpath; std::string capath; std::list select_attrs; std::list reject_attrs; conversion_type conversion; bool accept_mapping; bool accept_notapplicable; bool valid_; static Arc::Logger logger; public: ArgusPDPClient(Arc::Config *cfg,Arc::PluginArgument* parg); ArgusPDPClient(void); virtual ~ArgusPDPClient(void); virtual SecHandlerStatus Handle(Arc::Message* msg) const ; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; private: int create_xacml_request(Arc::XMLNode& request, const char * subjectid, const char * resourceid, const char * actionid) const; int create_xacml_request_cream(Arc::XMLNode& request, std::list auths, Arc::MessageAttributes* attrs, Arc::XMLNode operation) const; int create_xacml_request_emi(Arc::XMLNode& request, std::list auths, Arc::MessageAttributes* attrs, Arc::XMLNode operation) const; // const char * decision_tostring(xacml_decision_t decision); // const char * fulfillon_tostring(xacml_fulfillon_t fulfillon); }; } // namespace ArcSec #endif /* __ARC_SEC_ARGUSPDPCLIENT_H__ */ nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/SimpleMap.cpp0000644000000000000000000000012412123101751023437 xustar000000000000000027 mtime=1363969001.473645 27 atime=1513200574.444701 30 ctime=1513200662.015772139 nordugrid-arc-5.4.2/src/hed/identitymap/SimpleMap.cpp0000644000175000002070000001273112123101751023510 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #include #define odlog(LEVEL) std::cerr #include #include "SimpleMap.h" namespace ArcSec { #ifndef WIN32 class FileLock { private: int h_; struct flock l_; public: FileLock(int h):h_(h) { if(h_ == -1) return; l_.l_type=F_WRLCK; l_.l_whence=SEEK_SET; l_.l_start=0; l_.l_len=0; for(;;) { if(fcntl(h_,F_SETLKW,&l_) == 0) break; if(errno != EINTR) { h_=-1; return; }; }; }; ~FileLock(void) { if(h_ == -1) return; l_.l_type=F_UNLCK; fcntl(h_,F_SETLKW,&l_); }; operator bool(void) { return (h_ != -1); }; bool operator!(void) { return (h_ == -1); }; }; #else // TODO: implement class FileLock { public: FileLock(int) { }; operator bool(void) { return false; }; bool operator!(void) { return true; }; }; #endif SimpleMap::SimpleMap(const std::string& dir):dir_(dir) { if((dir_.length() == 0) || (dir_[dir_.length()-1] != '/')) dir_+="/"; if(dir_[0] != '/') dir_=Glib::get_current_dir()+"/"+dir_; pool_handle_=::open((dir_+"pool").c_str(),O_RDWR); } SimpleMap::~SimpleMap(void) { if(pool_handle_ != -1) close(pool_handle_); pool_handle_=-1; } #define failure(S) { \ odlog(ERROR)<<"SimpleMap: "<<(S)< names; { std::ifstream f((dir_+"pool").c_str()); if(!f.is_open()) failure("can't open pool file") std::string name; while(f.good()) { std::getline(f,name); if(!f.fail()) break; if(name.empty()) continue; names.push_back(name); }; }; if(!names.size()) failure("pool is empty"); // Remove all used names from list. Also find oldest maping. time_t oldmap_time = 0; std::string oldmap_name; std::string oldmap_subject; try { std::string file; Glib::Dir dir(dir_); for(;;) { file=dir.read_name(); if(file.empty()) break; if(file == ".") continue; if(file == "..") continue; if(file == "pool") continue; std::string filename = dir_+file; struct stat st; if(stat(filename.c_str(),&st) != 0) continue; if(!S_ISREG(st.st_mode)) continue; std::ifstream f(filename.c_str()); if(!f.is_open()) { // trash in directory failure("can't open one of mapping files"); }; std::string name; std::getline(f,name); // find this name in list std::list::iterator i = names.begin(); for(;i!=names.end();++i) if(*i == name) break; if(i == names.end()) { // Always try to destroy old mappings without corresponding // entry in the pool file if(((unsigned int)(time(NULL) - st.st_mtime)) >= SELFUNMAP_TIME) { unlink(filename.c_str()); }; } else { names.erase(i); if( (oldmap_name.length() == 0) || (((int)(oldmap_time - st.st_mtime)) > 0) ) { oldmap_name=name; oldmap_subject=file; oldmap_time=st.st_mtime; }; }; }; } catch(Glib::FileError& e) { failure("can't list pool directory"); }; if(names.size()) { // Claim one of unused names std::ofstream f(filename.c_str()); if(!f.is_open()) failure("can't create mapping file"); f<<*(names.begin())< #endif #include #include #include #include #include #include #include #include #include "ArgusPEPClient.h" static const char XACML_DATATYPE_FQAN[]= "http://glite.org/xacml/datatype/fqan"; #define AREX_JOB_POLICY_OPERATION_URN "http://www.nordugrid.org/schemas/policy-arc/types/a-rex/joboperation" #define AREX_JOB_POLICY_OPERATION_CREATE "Create" #define AREX_JOB_POLICY_OPERATION_MODIFY "Modify" #define AREX_JOB_POLICY_OPERATION_READ "Read" #define AREX_POLICY_OPERATION_URN "http://www.nordugrid.org/schemas/policy-arc/types/a-rex/operation" #define AREX_POLICY_OPERATION_ADMIN "Admin" #define AREX_POLICY_OPERATION_INFO "Info" #define EMIES_OPERATION_CREATION "http://www.eu-emi.eu/es/2010/12/creation" #define EMIES_OPERATION_ACTIVITY "http://www.eu-emi.eu/es/2010/12/activity" #define EMIES_OPERATION_ACTIVITYMANGEMENT "http://www.eu-emi.eu/es/2010/12/activitymanagement" #define EMIES_OPERATION_RESOURCEINFO "http://www.eu-emi.eu/es/2010/12/resourceinfo" #define EMIES_OPERATION_DELEGATION "http://www.gridsite.org/namespaces/delegation-21" #define EMIES_OPERATION_ANY "http://dci-sec.org/xacml/action/ANY" static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; ArcSec::ArgusPEPClient* plugin = new ArcSec::ArgusPEPClient((Arc::Config*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; return NULL;}; return plugin; } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "arguspepclient.map", "HED:SHC", NULL, 0, &get_sechandler}, { NULL, NULL, NULL, 0, NULL } }; namespace ArcSec { class pep_ex { public: std::string desc; pep_ex(const std::string& desc_):desc(desc_) {}; }; static std::string path_to_x500(const std::string& path) { class url_ex: public Arc::URL { public: static std::string Path2BaseDN(const std::string& path) { return Arc::URL::Path2BaseDN(path); }; }; return url_ex::Path2BaseDN(path); } std::string flatten_fqan(const std::string& wfqan) { const std::string vo_tag("/VO="); const std::string group_tag("/Group="); std::string fqan; std::string::size_type pos1 = 0; std::string::size_type pos2 = 0; if(wfqan.substr(0,vo_tag.length()) != vo_tag) return fqan; for(;;) { pos1 = wfqan.find(group_tag,pos2); if(pos1 == std::string::npos) break; pos2 = wfqan.find("/",pos1+1); if(pos2 == std::string::npos) { fqan += "/" + wfqan.substr(pos1+group_tag.length()); break; }; fqan += "/" + wfqan.substr(pos1+group_tag.length(),pos2-pos1-group_tag.length()); }; return fqan; } Arc::Logger ArgusPEPClient::logger(Arc::Logger::getRootLogger(), "SecHandler.Argus"); int ArgusPEPClient::pep_log(int level, const char *fmt, va_list args) { char buf[1024]; vsnprintf(buf,sizeof(buf)-1,fmt,args); buf[sizeof(buf)-1] = 0; Arc::LogLevel l = Arc::INFO; switch(logger.getThreshold()) { case PEP_LOGLEVEL_DEBUG: l = Arc::DEBUG; break; case PEP_LOGLEVEL_INFO: l = Arc::INFO; break; case PEP_LOGLEVEL_WARN: l = Arc::WARNING; break; case PEP_LOGLEVEL_ERROR: l = Arc::ERROR; break; } logger.msg(l,"%s",buf); return 0; } std::string xacml_decision_to_string(xacml_decision_t decision) { switch(decision) { case XACML_DECISION_DENY: return "DENY"; case XACML_DECISION_PERMIT: return "PERMIT"; case XACML_DECISION_INDETERMINATE: return "INDETERMINATE"; case XACML_DECISION_NOT_APPLICABLE: return "NOT APPLICABLE"; }; return "UNKNOWN"; } /* extract the elements from the configuration file */ ArgusPEPClient::ArgusPEPClient(Arc::Config *cfg,Arc::PluginArgument* parg):ArcSec::SecHandler(cfg,parg),conversion(conversion_emi) { valid_ = false; accept_mapping = false; logger.setThreshold(Arc::DEBUG); pepdlocation = (std::string)(*cfg)["PEPD"]; if(pepdlocation.empty()) { logger.msg(Arc::ERROR, "PEPD location is missing"); return; } logger.msg(Arc::DEBUG, "PEPD location: %s",pepdlocation); std::string conversion_str = (std::string)(*cfg)["Conversion"]; if(conversion_str == "direct") { logger.msg(Arc::DEBUG, "Conversion mode is set to DIRECT"); conversion = conversion_direct; } else if(conversion_str == "subject") { logger.msg(Arc::DEBUG, "Conversion mode is set to SUBJECT"); conversion = conversion_subject; } else if(conversion_str == "cream") { logger.msg(Arc::DEBUG, "Conversion mode is set to CREAM"); conversion = conversion_cream; } else if(conversion_str == "emi") { logger.msg(Arc::DEBUG, "Conversion mode is set to EMI"); conversion = conversion_emi; } else if(!conversion_str.empty()) { logger.msg(Arc::INFO, "Unknown conversion mode %s, using default", conversion_str); } Arc::XMLNode filter = (*cfg)["Filter"]; if((bool)filter) { Arc::XMLNode select_attr = filter["Select"]; Arc::XMLNode reject_attr = filter["Reject"]; for(;(bool)select_attr;++select_attr) select_attrs.push_back((std::string)select_attr); for(;(bool)reject_attr;++reject_attr) reject_attrs.push_back((std::string)reject_attr); }; pep_log_level = PEP_LOGLEVEL_NONE; switch(logger.getThreshold()) { case Arc::DEBUG: pep_log_level = PEP_LOGLEVEL_DEBUG; break; case Arc::VERBOSE: pep_log_level = PEP_LOGLEVEL_INFO; break; case Arc::INFO: pep_log_level = PEP_LOGLEVEL_INFO; break; case Arc::WARNING: pep_log_level = PEP_LOGLEVEL_WARN; break; case Arc::ERROR: pep_log_level = PEP_LOGLEVEL_ERROR; break; case Arc::FATAL: pep_log_level = PEP_LOGLEVEL_ERROR; break; }; capath = (std::string)(*cfg)["CACertificatesDir"]; keypath = (std::string)(*cfg)["KeyPath"]; certpath = (std::string)(*cfg)["CertificatePath"]; std::string proxypath = (std::string)(*cfg)["ProxyPath"]; if(!proxypath.empty()) { keypath = proxypath; certpath = proxypath; }; std::string mapping_str = (std::string)(*cfg)["AcceptMapping"]; if((mapping_str == "1") || (mapping_str == "true")) accept_mapping = true; valid_ = true; } ArgusPEPClient::~ArgusPEPClient(void) { } SecHandlerStatus ArgusPEPClient::Handle(Arc::Message* msg) const { int rc = 0; bool res = true; PEP* pep_handle = NULL; pep_error_t pep_rc = PEP_OK; xacml_response_t * response = NULL; xacml_request_t * request = NULL; std::list requests; std::string subject , resource , action; Arc::XMLNode secattr; try{ // set up the communication with the pepd host pep_handle = pep_initialize(); if (pep_handle == NULL) throw pep_ex(std::string("Failed to initialize PEP client:")); pep_rc = pep_setoption(pep_handle, PEP_OPTION_LOG_LEVEL, pep_log_level); if (pep_rc != PEP_OK) throw pep_ex("Failed to set PEP log level: '" + pepdlocation + "' "+ pep_strerror(pep_rc)); pep_rc = pep_setoption(pep_handle, PEP_OPTION_LOG_HANDLER, &pep_log); if (pep_rc != PEP_OK) throw pep_ex("Failed to set PEP log handler: '" + pepdlocation + "' "+ pep_strerror(pep_rc)); pep_rc = pep_setoption(pep_handle, PEP_OPTION_ENDPOINT_SSL_VALIDATION, 1); if (pep_rc != PEP_OK) throw pep_ex("Failed to set PEP validation: '" + pepdlocation + "' "+ pep_strerror(pep_rc)); pep_rc = pep_setoption(pep_handle, PEP_OPTION_ENDPOINT_URL, pepdlocation.c_str()); if (pep_rc != PEP_OK) throw pep_ex("Failed to set PEP URL: '" + pepdlocation + "' "+ pep_strerror(pep_rc)); pep_rc = pep_setoption(pep_handle, PEP_OPTION_ENABLE_OBLIGATIONHANDLERS, 0); if (pep_rc != PEP_OK) throw pep_ex("Failed to set PEP obligation handling: '" + pepdlocation + "' "+ pep_strerror(pep_rc)); if(!capath.empty()) { pep_rc = pep_setoption(pep_handle, PEP_OPTION_ENDPOINT_SERVER_CAPATH, capath.c_str()); if (pep_rc != PEP_OK) throw pep_ex("Failed to set PEP CA path: '" + pepdlocation + "' "+ pep_strerror(pep_rc)); } if(!keypath.empty()) { pep_rc = pep_setoption(pep_handle, PEP_OPTION_ENDPOINT_CLIENT_KEY, keypath.c_str()); if (pep_rc != PEP_OK) throw pep_ex("Failed to set PEP key: '" + pepdlocation + "' "+ pep_strerror(pep_rc)); } if(!certpath.empty()) { pep_rc = pep_setoption(pep_handle, PEP_OPTION_ENDPOINT_CLIENT_CERT, certpath.c_str()); if (pep_rc != PEP_OK) throw pep_ex("Failed to set PEP certificate: '" + pepdlocation + "' "+ pep_strerror(pep_rc)); } if(conversion == conversion_direct) { msg->Auth()->Export(Arc::SecAttr::ARCAuth, secattr); msg->AuthContext()->Export(Arc::SecAttr::ARCAuth, secattr); rc= create_xacml_request_direct(requests,secattr); } else if(conversion == conversion_subject) { //resource= (std::string) secattr["RequestItem"][0]["Resource"][0]; //action= (std::string) secattr["RequestItem"][0]["Action"][0]; // Extract the user subject according to RFC2256 format std::string dn = msg->Attributes()->get("TLS:IDENTITYDN"); while (dn.rfind("/") != std::string::npos) { std::string s = dn.substr(dn.rfind("/")+1,dn.length()) ; subject = subject + s + ","; dn = dn.substr(0, dn.rfind("/")) ; }; subject = subject.substr(0, subject.length()-1); if(resource.empty()) resource = "ANY"; if(action.empty()) action = "ANY"; rc = create_xacml_request(&request,subject.c_str(),resource.c_str(),action.c_str()); if(request != NULL) requests.push_back(request); request = NULL; } else if(conversion == conversion_cream) { std::list auths; auths.push_back(msg->Auth()); auths.push_back(msg->AuthContext()); Arc::PayloadSOAP* payload = NULL; try { payload = dynamic_cast(msg->Payload()); } catch(std::exception& e) { }; //if(!payload) throw pep_ex("No SOAP in message"); if(payload) { rc = create_xacml_request_cream(&request,auths,msg->Attributes(),payload->Child(0)); } else { // For HTTP operations rc = create_xacml_request_cream(&request,auths,msg->Attributes(),Arc::XMLNode()); } if(request != NULL) requests.push_back(request); request = NULL; } else if(conversion == conversion_emi) { std::list auths; auths.push_back(msg->Auth()); auths.push_back(msg->AuthContext()); Arc::PayloadSOAP* payload = NULL; try { payload = dynamic_cast(msg->Payload()); } catch(std::exception& e) { }; //if(!payload) throw pep_ex("No SOAP in message"); if(payload) { rc = create_xacml_request_emi(&request,auths,msg->Attributes(),payload->Child(0)); } else { rc = create_xacml_request_emi(&request,auths,msg->Attributes(),Arc::XMLNode()); } if(request != NULL) requests.push_back(request); request = NULL; } else { throw pep_ex("Unsupported conversion mode " + Arc::tostring(conversion)); } if (rc != 0) { throw pep_ex("Failed to create XACML request(s): " + Arc::tostring(rc)); } std::string local_id; xacml_decision_t decision = XACML_DECISION_INDETERMINATE; // Simple combining algorithm. At least one deny means deny. If none, then at // least one permit means permit. Otherwise deny. TODO: configurable. logger.msg(Arc::DEBUG, "Have %i requests to process", requests.size()); while(requests.size() > 0) { request = requests.front(); requests.pop_front(); pep_rc = pep_authorize(pep_handle,&request,&response); if (pep_rc != PEP_OK) { throw pep_ex(std::string("Failed to process XACML request: ")+pep_strerror(pep_rc)); } if (response == NULL) { throw pep_ex("XACML response is empty"); } // Extract the local user name from the response to be mapped to the GID size_t results_l = xacml_response_results_length(response); int i = 0; for(i = 0; iAttributes()->set("SEC:LOCALID", local_id); } } catch (pep_ex& e) { logger.msg(Arc::ERROR,"%s",e.desc); res = false; } if(response) xacml_response_delete(response); if(request) xacml_request_delete(request); while(requests.size() > 0) { xacml_request_delete(requests.front()); requests.pop_front(); } if(pep_handle) pep_destroy(pep_handle); return res; } int ArgusPEPClient::create_xacml_request(xacml_request_t ** request,const char * subjectid, const char * resourceid, const char * actionid) const { xacml_subject_t * subject= xacml_subject_create(); if (subject == NULL) { logger.msg(Arc::DEBUG, "Subject of request is null \n"); return 1; } xacml_attribute_t * subject_attr_id= xacml_attribute_create(XACML_SUBJECT_ID); if (subject_attr_id == NULL) { logger.msg(Arc::DEBUG,"Can not create XACML SubjectAttribute: %s\n", XACML_SUBJECT_ID); xacml_subject_delete(subject); return 1; } xacml_attribute_addvalue(subject_attr_id, path_to_x500(subjectid).c_str()); xacml_attribute_setdatatype(subject_attr_id,XACML_DATATYPE_X500NAME); xacml_subject_addattribute(subject,subject_attr_id); xacml_resource_t * resource = xacml_resource_create(); if (resource == NULL) { logger.msg(Arc::DEBUG, "Can not create XACML Resource \n"); xacml_subject_delete(subject); return 2; } xacml_attribute_t * resource_attr_id = xacml_attribute_create(XACML_RESOURCE_ID); if (resource_attr_id == NULL) { logger.msg(Arc::DEBUG,"Can not create XACML ResourceAttribute: %s\n", XACML_RESOURCE_ID); xacml_subject_delete(subject); xacml_resource_delete(resource); return 2; } xacml_attribute_addvalue(resource_attr_id,resourceid); xacml_resource_addattribute(resource,resource_attr_id); xacml_action_t * action= xacml_action_create(); if (action == NULL) { logger.msg(Arc::DEBUG,"Can not create XACML Action\n"); xacml_subject_delete(subject); xacml_resource_delete(resource); return 3; } xacml_attribute_t * action_attr_id= xacml_attribute_create(XACML_ACTION_ID); if (action_attr_id == NULL) { logger.msg(Arc::DEBUG,"Can not create XACML ActionAttribute: %s\n", XACML_ACTION_ID); xacml_subject_delete(subject); xacml_resource_delete(resource); xacml_action_delete(action); return 3; } xacml_attribute_addvalue(action_attr_id,actionid); xacml_action_addattribute(action,action_attr_id); *request= xacml_request_create(); if (*request == NULL) { logger.msg(Arc::DEBUG,"Can not create XACML request\n"); xacml_subject_delete(subject); xacml_resource_delete(resource); xacml_action_delete(action); return 4; } xacml_request_addsubject(*request,subject); xacml_request_addresource(*request,resource); xacml_request_setaction(*request,action); return 0; } int ArgusPEPClient::create_xacml_request_direct(std::list& requests, Arc::XMLNode arcreq) const { // -- XACML -- // Request // Subject * // Attribute * // AttributeValue * // AttributeId // DataType // Issuer // Resource * // Attribute * // Action . // Attribute * // Environment . // Attribute * // // -- ARC -- // Request (1) // RequestItem (1-) // Subject (1-) // SubjectAttribute (1-) // Resource (0-) // Action (0-) // Context (0-) // ContextAttribute (1-) Arc::XMLNode arcreqitem = arcreq["RequestItem"]; int r = 0; for(;(bool)arcreqitem;++arcreqitem) { xacml_request_t* request = xacml_request_create(); if(!request) { r=1; break; } Arc::XMLNode arcsubject = arcreqitem["Subject"]; for(;(bool)arcsubject;++arcsubject) { Arc::XMLNode arcattr = arcsubject["SubjectAttribute"]; if((bool)arcattr) { xacml_subject_t * subject = xacml_subject_create(); if(!subject) { r=1; break; } for(;(bool)arcattr;++arcattr) { std::string id = arcattr.Attribute("AttributeId"); xacml_attribute_t * attr = xacml_attribute_create(id.c_str()); if(!attr) { r=1; break; } xacml_attribute_addvalue(attr,((std::string)arcattr).c_str()); xacml_attribute_setdatatype(attr,XACML_DATATYPE_STRING); xacml_subject_addattribute(subject,attr); } xacml_request_addsubject(request,subject); } } Arc::XMLNode arcresource = arcreqitem["Resource"]; if((bool)arcresource) { xacml_resource_t * resource = xacml_resource_create(); if(resource) { std::string id = arcresource.Attribute("AttributeId"); xacml_attribute_t * attr = xacml_attribute_create(id.c_str()); if(attr) { xacml_attribute_addvalue(attr,((std::string)arcresource).c_str()); xacml_attribute_setdatatype(attr,XACML_DATATYPE_STRING); xacml_resource_addattribute(resource,attr); } else { r=1; } xacml_request_addresource(request,resource); } else { r=1; } } Arc::XMLNode arcaction = arcreqitem["Action"]; if((bool)arcaction) { xacml_action_t * action = xacml_action_create(); if(action) { std::string id = arcaction.Attribute("AttributeId"); xacml_attribute_t * attr = xacml_attribute_create(id.c_str()); if(attr) { xacml_attribute_addvalue(attr,((std::string)arcaction).c_str()); xacml_attribute_setdatatype(attr,XACML_DATATYPE_STRING); xacml_action_addattribute(action,attr); } else { r=1; } xacml_request_setaction(request,action); } else { r=1; } } Arc::XMLNode arccontext = arcreqitem["Context"]; if((bool)arccontext) { Arc::XMLNode arcattr = arccontext["ContextAttribute"]; if((bool)arcattr) { xacml_environment_t * environment = xacml_environment_create(); if(environment) { for(;(bool)arcattr;++arcattr) { std::string id = arcattr.Attribute("AttributeId"); xacml_attribute_t * attr = xacml_attribute_create(id.c_str()); if(!attr) { r=1; break; } xacml_attribute_addvalue(attr, ((std::string)arcattr).c_str()); xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_environment_addattribute(environment,attr); } xacml_request_setenvironment(request,environment); } else { r=1; } } } requests.push_back(request); if(r != 0) break; } if(r != 0) { while(requests.size() > 0) { xacml_request_delete(requests.front()); requests.pop_front(); } } return r; } // This part is ARC vs CREAM specific. // In a future such mapping should be pluggable or configurable or done by service static const std::string BES_FACTORY_NAMESPACE("http://schemas.ggf.org/bes/2006/08/bes-factory"); static const std::string BES_MANAGEMENT_NAMESPACE("http://schemas.ggf.org/bes/2006/08/bes-management"); static const std::string BES_ARC_NAMESPACE("http://www.nordugrid.org/schemas/a-rex"); static const std::string DELEG_ARC_NAMESPACE("http://www.nordugrid.org/schemas/delegation"); static const std::string WSRF_NAMESPACE("http://docs.oasis-open.org/wsrf/rp-2"); static std::string get_cream_action(Arc::XMLNode op, Arc::Logger& logger) { logger.msg(Arc::DEBUG,"Converting to CREAM action - namespace: %s, operation: %s",op.Namespace(),op.Name()); if(MatchXMLNamespace(op,BES_FACTORY_NAMESPACE)) { if(MatchXMLName(op,"CreateActivity")) return "http://glite.org/xacml/action/ce/job/submit"; if(MatchXMLName(op,"GetActivityStatuses")) return "http://glite.org/xacml/action/ce/job/get-info"; if(MatchXMLName(op,"TerminateActivities")) return "http://glite.org/xacml/action/ce/job/terminate"; if(MatchXMLName(op,"GetActivityDocuments")) return "http://glite.org/xacml/action/ce/job/get-info"; if(MatchXMLName(op,"GetFactoryAttributesDocument")) return "http://glite.org/xacml/action/ce/get-info"; return ""; } else if(MatchXMLNamespace(op,BES_MANAGEMENT_NAMESPACE)) { if(MatchXMLName(op,"StopAcceptingNewActivities")) return ""; if(MatchXMLName(op,"StartAcceptingNewActivities")) return ""; } else if(MatchXMLNamespace(op,BES_ARC_NAMESPACE)) { if(MatchXMLName(op,"ChangeActivityStatus")) return "http://glite.org/xacml/action/ce/job/manage"; if(MatchXMLName(op,"MigrateActivity")) return "http://glite.org/xacml/action/ce/job/manage"; if(MatchXMLName(op,"CacheCheck")) return "http://glite.org/xacml/action/ce/get-info"; return ""; } else if(MatchXMLNamespace(op,DELEG_ARC_NAMESPACE)) { if(MatchXMLName(op,"DelegateCredentialsInit")) return "http://glite.org/xacml/action/ce/delegation/manage"; if(MatchXMLName(op,"UpdateCredentials")) return "http://glite.org/xacml/action/ce/delegation/manage"; return ""; } else if(MatchXMLNamespace(op,WSRF_NAMESPACE)) { return "http://glite.org/xacml/action/ce/get-info"; } // http://glite.org/xacml/action/ce/job/submit // *http://glite.org/xacml/action/ce/job/terminate // *http://glite.org/xacml/action/ce/job/get-info // *http://glite.org/xacml/action/ce/job/manage // http://glite.org/xacml/action/ce/lease/get-info // http://glite.org/xacml/action/ce/lease/manage // *http://glite.org/xacml/action/ce/get-info // http://glite.org/xacml/action/ce/delegation/get-info // *http://glite.org/xacml/action/ce/delegation/manage // http://glite.org/xacml/action/ce/subscription/get-info // http://glite.org/xacml/action/ce/subscription/manage return ""; } static std::string get_cream_action_http(const std::string& method, Arc::Logger& logger) { if(method == "GET") { return "http://glite.org/xacml/action/ce/job/get-info"; } else if(method == "PUT") { return "http://glite.org/xacml/action/ce/job/submit"; } return ""; } static std::string get_emi_action_http(const std::string& method) { if(method == "GET") { return EMIES_OPERATION_CREATION; } else if(method == "PUT") { return EMIES_OPERATION_ACTIVITYMANGEMENT; } return ""; } static std::string get_emi_action(Arc::XMLNode op) { if(MatchXMLNamespace(op,EMIES_OPERATION_CREATION)) return EMIES_OPERATION_CREATION; if(MatchXMLNamespace(op,EMIES_OPERATION_ACTIVITY)) return EMIES_OPERATION_ACTIVITY; if(MatchXMLNamespace(op,EMIES_OPERATION_ACTIVITYMANGEMENT)) return EMIES_OPERATION_ACTIVITYMANGEMENT; if(MatchXMLNamespace(op,EMIES_OPERATION_RESOURCEINFO)) return EMIES_OPERATION_RESOURCEINFO; if(MatchXMLNamespace(op,EMIES_OPERATION_DELEGATION)) return EMIES_OPERATION_DELEGATION; return ""; } static std::string get_emi_action_arex(const std::string& ns, const std::string& action) { if(ns == AREX_JOB_POLICY_OPERATION_URN) { if(action == AREX_JOB_POLICY_OPERATION_CREATE) return EMIES_OPERATION_CREATION; if(action == AREX_JOB_POLICY_OPERATION_MODIFY) return EMIES_OPERATION_ACTIVITYMANGEMENT; if(action == AREX_JOB_POLICY_OPERATION_READ) return EMIES_OPERATION_ACTIVITY; } else if(ns == AREX_POLICY_OPERATION_URN) { if(action == AREX_POLICY_OPERATION_INFO) return EMIES_OPERATION_RESOURCEINFO; if(action == AREX_POLICY_OPERATION_ADMIN) return ""; } return ""; } static std::string get_sec_attr(std::list auths, const std::string& sid, const std::string& aid) { for(std::list::iterator a = auths.begin(); a != auths.end(); ++a) { Arc::SecAttr* sa = (*a)->get(sid); if(!sa) continue; std::string str = sa->get(aid); if(!str.empty()) return str; } return ""; } static std::list get_sec_attrs(std::list auths, const std::string& sid, const std::string& aid) { for(std::list::iterator a = auths.begin(); a != auths.end(); ++a) { Arc::SecAttr* sa = (*a)->get(sid); if(!sa) continue; std::list strs = sa->getAll(aid); if(!strs.empty()) return strs; } return std::list(); } static std::string get_resource(std::list& auths, Arc::MessageAttributes* attrs) { std::string resource = get_sec_attr(auths, "AREX", "SERVICE"); if(!resource.empty()) return resource; if(attrs) resource = attrs->get("ENDPOINT"); return resource; } int ArgusPEPClient::create_xacml_request_cream(xacml_request_t** request, std::list auths, Arc::MessageAttributes* attrs, Arc::XMLNode operation) const { logger.msg(Arc::DEBUG,"Doing CREAM request"); xacml_attribute_t* attr = NULL; xacml_environment_t* environment = NULL; xacml_subject_t* subject = NULL; xacml_resource_t* resource = NULL; xacml_action_t* action = NULL; class ierror { public: std::string desc; ierror(const std::string& err):desc(err) { }; }; try { *request = xacml_request_create(); if(!*request) throw ierror("Failed to create request object"); environment = xacml_environment_create(); if(!environment) throw ierror("Failed to create environment object"); subject = xacml_subject_create(); if(!subject) throw ierror("Failed to create subject object"); resource = xacml_resource_create(); if(!resource) throw ierror("Failed to create resource object"); action = xacml_action_create(); if(!action) throw ierror("Failed to create action object"); // Environment std::string profile_id = "http://glite.org/xacml/profile/grid-ce/1.0"; attr = xacml_attribute_create("http://glite.org/xacml/attribute/profile-id"); if(!attr) throw ierror("Failed to create attribute profile-id object"); xacml_attribute_addvalue(attr, profile_id.c_str()); logger.msg(Arc::DEBUG,"Adding profile-id value: %s",profile_id); xacml_attribute_setdatatype(attr, XACML_DATATYPE_ANYURI); xacml_environment_addattribute(environment,attr); attr = NULL; // Subject attr = xacml_attribute_create("urn:oasis:names:tc:xacml:1.0:subject:subject-id"); if(!attr) throw ierror("Failed to create attribute subject-id object"); std::string subject_str = get_sec_attr(auths, "TLS", "IDENTITY"); if(subject_str.empty()) throw ierror("Failed to extract TLS:IDENTITY"); subject_str = path_to_x500(subject_str); xacml_attribute_addvalue(attr, subject_str.c_str()); logger.msg(Arc::DEBUG,"Adding subject-id value: %s",subject_str); xacml_attribute_setdatatype(attr, XACML_DATATYPE_X500NAME); xacml_subject_addattribute(subject,attr); attr = NULL; attr = xacml_attribute_create("http://glite.org/xacml/attribute/subject-issuer"); if(!attr) throw ierror("Failed to create attribute subject-issuer object"); std::string ca_str = get_sec_attr(auths, "TLS", "CA"); if(ca_str.empty()) throw ierror("Failed to extract TLS:CA"); ca_str = path_to_x500(ca_str); xacml_attribute_addvalue(attr, ca_str.c_str()); logger.msg(Arc::DEBUG,"Adding subject-issuer value: %s",ca_str); xacml_attribute_setdatatype(attr, XACML_DATATYPE_X500NAME); xacml_subject_addattribute(subject,attr); attr = NULL; attr = xacml_attribute_create("http://glite.org/xacml/attribute/virtual-organization"); if(!attr) throw ierror("Failed to create attribute virtual-organization object"); std::list vos = get_sec_attrs(auths, "TLS", "VO"); for(std::list::iterator vo = vos.begin(); vo!=vos.end(); ++vo) { if(!vo->empty()) xacml_attribute_addvalue(attr, vo->c_str()); logger.msg(Arc::DEBUG,"Adding virtual-organization value: %s",*vo); } xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_subject_addattribute(subject,attr); attr = NULL; attr = xacml_attribute_create("http://glite.org/xacml/attribute/fqan"); if(!attr) throw ierror("Failed to create attribute fqan object"); std::string pfqan; std::list fqans = get_sec_attrs(auths, "TLS", "VOMS"); // /voname=testers.eu-emi.eu/hostname=emitestbed07.cnaf.infn.it:15002/testers.eu-emi.eu/test3:test_ga=ciccio -> ? // /VO=testers.eu-emi.eu/Group=testers.eu-emi.eu/Group=test1 -> /testers.eu-emi.eu/test1 for(std::list::iterator fqan = fqans.begin(); fqan!=fqans.end(); ++fqan) { std::string fqan_str = flatten_fqan(*fqan); if(fqan_str.empty()) continue; if(pfqan.empty()) pfqan = fqan_str; if(!fqan->empty()) xacml_attribute_addvalue(attr, fqan_str.c_str()); logger.msg(Arc::DEBUG,"Adding FQAN value: %s",fqan_str); } xacml_attribute_setdatatype(attr, XACML_DATATYPE_FQAN); xacml_subject_addattribute(subject,attr); attr = NULL; if(!pfqan.empty()) { attr = xacml_attribute_create("http://glite.org/xacml/attribute/fqan/primary"); if(!attr) throw ierror("Failed to create attribute FQAN/primary object"); // TODO: convert to VOMS FQAN? xacml_attribute_addvalue(attr, pfqan.c_str()); logger.msg(Arc::DEBUG,"Adding FQAN/primary value: %s",pfqan); xacml_attribute_setdatatype(attr, XACML_DATATYPE_FQAN); xacml_subject_addattribute(subject,attr); attr = NULL; } attr = xacml_attribute_create("urn:oasis:names:tc:xacml:1.0:subject:key-info"); if(!attr) throw ierror("Failed to create attribute key-info object"); std::string certstr = get_sec_attr(auths, "TLS", "CERTIFICATE"); std::string chainstr = get_sec_attr(auths, "TLS", "CERTIFICATECHAIN"); chainstr = certstr + "\n" + chainstr; xacml_attribute_addvalue(attr, chainstr.c_str()); logger.msg(Arc::DEBUG,"Adding cert chain value: %s",chainstr); xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_subject_addattribute(subject,attr); attr = NULL; // Resource attr = xacml_attribute_create("urn:oasis:names:tc:xacml:1.0:resource:resource-id"); if(!attr) throw ierror("Failed to create attribute resource-id object"); std::string endpoint = get_resource(auths,attrs); if(endpoint.empty()) throw ierror("Failed to extract resource identifier"); xacml_attribute_addvalue(attr, endpoint.c_str()); logger.msg(Arc::DEBUG,"Adding resoure-id value: %s",endpoint); xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_resource_addattribute(resource,attr); attr = NULL; // Action attr = xacml_attribute_create("urn:oasis:names:tc:xacml:1.0:action:action-id"); if(!attr) throw ierror("Failed to create attribute action-id object"); std::string act; if((bool)operation) { act = get_cream_action(operation,logger); } else if(attrs) { act = get_cream_action_http(attrs->get("HTTP:METHOD"),logger); } if(act.empty()) act = "http://glite.org/xacml/action/ANY"; // throw ierror("Failed to generate action name"); xacml_attribute_addvalue(attr, act.c_str()); logger.msg(Arc::DEBUG,"Adding action-id value: %s",act); xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_action_addattribute(action,attr); attr = NULL; // Add everything into request xacml_request_setenvironment(*request,environment); environment = NULL; xacml_request_addsubject(*request,subject); subject = NULL; xacml_request_addresource(*request,resource); resource = NULL; xacml_request_setaction(*request,action); action = NULL; } catch(ierror& err) { logger.msg(Arc::DEBUG,"CREAM request generation failed: %s",err.desc); if(attr) xacml_attribute_delete(attr); if(environment) xacml_environment_delete(environment); if(subject) xacml_subject_delete(subject); if(resource) xacml_resource_delete(resource); if(*request) xacml_request_delete(*request); *request = NULL; return 1; } return 0; } static bool split_voms(const std::string& voms_attr, std::string& vo, std::string& group, std::list< std::string>& roles, std::list& attrs) { vo.resize(0); group.resize(0); roles.clear(); attrs.clear(); std::list elements; Arc::tokenize(voms_attr,elements,"/"); std::list::iterator element = elements.begin(); for(;element!=elements.end();++element) { std::string::size_type p = element->find('='); if(p == std::string::npos) { attrs.push_back(*element); } else { std::string key = element->substr(0,p); if(key == "VO") { vo = element->substr(p+1); } else if(key == "Group") { group += "/"+element->substr(p+1); } else if(key == "Role") { roles.push_back(element->substr(p+1)); } else { attrs.push_back(*element); } } } return true; } int ArgusPEPClient::create_xacml_request_emi(xacml_request_t** request, std::list auths, Arc::MessageAttributes* attrs, Arc::XMLNode operation) const { xacml_attribute_t* attr = NULL; xacml_environment_t* environment = NULL; xacml_subject_t* subject = NULL; xacml_resource_t* resource = NULL; xacml_action_t* action = NULL; class ierror { }; try { *request = xacml_request_create(); if(!*request) throw ierror(); environment = xacml_environment_create(); if(!environment) throw ierror(); subject = xacml_subject_create(); if(!subject) throw ierror(); resource = xacml_resource_create(); if(!resource) throw ierror(); action = xacml_action_create(); if(!action) throw ierror(); // Environment attr = xacml_attribute_create("http://dci-sec.org/xacml/attribute/profile-id"); if(!attr) throw ierror(); xacml_attribute_addvalue(attr, "http://dci-sec.org/xacml/profile/common-ce/1.0"); xacml_attribute_setdatatype(attr, XACML_DATATYPE_ANYURI); xacml_environment_addattribute(environment,attr); attr = NULL; // Subject attr = xacml_attribute_create("urn:oasis:names:tc:xacml:1.0:subject:subject-id"); if(!attr) throw ierror(); std::string subject_str = get_sec_attr(auths, "TLS", "IDENTITY"); if(subject_str.empty()) throw ierror(); xacml_attribute_addvalue(attr, path_to_x500(subject_str).c_str()); xacml_attribute_setdatatype(attr, XACML_DATATYPE_X500NAME); xacml_subject_addattribute(subject,attr); attr = NULL; attr = xacml_attribute_create("http://dci-sec.org/xacml/attribute/subject-issuer"); if(!attr) throw ierror(); std::string ca_str = get_sec_attr(auths, "TLS", "CA"); if(ca_str.empty()) throw ierror(); xacml_attribute_addvalue(attr, path_to_x500(ca_str).c_str()); xacml_attribute_setdatatype(attr, XACML_DATATYPE_X500NAME); xacml_subject_addattribute(subject,attr); attr = NULL; attr = xacml_attribute_create("http://dci-sec.org/xacml/attribute/virtual-organization"); if(!attr) throw ierror(); std::list vos = get_sec_attrs(auths, "TLS", "VO"); // TODO: handle no vos for(std::list::iterator vo = vos.begin(); vo!=vos.end(); ++vo) { if(!vo->empty()) xacml_attribute_addvalue(attr, vo->c_str()); } xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_subject_addattribute(subject,attr); attr = NULL; attr = xacml_attribute_create("http://dci-sec.org/xacml/attribute/group"); if(!attr) throw ierror(); std::string pgroup; std::list fqans = get_sec_attrs(auths, "TLS", "VOMS"); // TODO: handle no fqans for(std::list::iterator fqan = fqans.begin(); fqan!=fqans.end(); ++fqan) { std::string vo; std::string group; std::list roles; std::list attrs; if(!split_voms(*fqan,vo,group,roles,attrs)) throw ierror(); if(pgroup.empty()) pgroup = group; if(!group.empty()) xacml_attribute_addvalue(attr, group.c_str()); } xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_subject_addattribute(subject,attr); attr = NULL; if(!pgroup.empty()) { attr = xacml_attribute_create("http://dci-sec.org/xacml/attribute/group/primary"); if(!attr) throw ierror(); xacml_attribute_addvalue(attr, pgroup.c_str()); xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_subject_addattribute(subject,attr); attr = NULL; } std::string prole; pgroup.resize(0); for(std::list::iterator fqan = fqans.begin(); fqan!=fqans.end(); ++fqan) { std::string vo; std::string group; std::list roles; std::list attrs; if(!split_voms(*fqan,vo,group,roles,attrs)) throw ierror(); attr = xacml_attribute_create("http://dci-sec.org/xacml/attribute/role"); if(!attr) throw ierror(); if(!group.empty()) xacml_attribute_setissuer(attr, group.c_str()); // TODO: handle no roles for(std::list::iterator role = roles.begin(); role!=roles.end(); ++role) { if(role->empty()) continue; if(prole.empty()) { prole = *role; pgroup = group; } xacml_attribute_addvalue(attr, role->c_str()); } xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_subject_addattribute(subject,attr); attr = NULL; } if(!prole.empty()) { attr = xacml_attribute_create("http://dci-sec.org/xacml/attribute/role/primary"); if(!attr) throw ierror(); if(!pgroup.empty()) xacml_attribute_setissuer(attr, pgroup.c_str()); xacml_attribute_addvalue(attr, prole.c_str()); xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_subject_addattribute(subject,attr); attr = NULL; } // Resource attr = xacml_attribute_create("urn:oasis:names:tc:xacml:1.0:resource:resource-id"); if(!attr) throw ierror(); std::string endpoint = get_resource(auths,attrs); if(endpoint.empty()) throw ierror(); xacml_attribute_addvalue(attr, endpoint.c_str()); xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_resource_addattribute(resource,attr); attr = NULL; attr = xacml_attribute_create("http://dci-sec.org/xacml/attribute/resource-owner"); std::string owner_str = get_sec_attr(auths, "TLS", "LOCALSUBJECT"); if(owner_str.empty()) throw ierror(); xacml_attribute_addvalue(attr, path_to_x500(owner_str).c_str()); xacml_attribute_setdatatype(attr, XACML_DATATYPE_X500NAME); xacml_attribute_setdatatype(attr, XACML_DATATYPE_X500NAME); xacml_resource_addattribute(resource,attr); attr = NULL; // Action // In a future action names should be synchronized among services attr = xacml_attribute_create("urn:oasis:names:tc:xacml:1.0:action:action-id"); if(!attr) throw ierror(); std::string arex_ns = get_sec_attr(auths, "AREX", "NAMESPACE"); std::string arex_action = get_sec_attr(auths, "AREX", "ACTION"); std::string act = get_emi_action(operation); if(act.empty()) act = get_emi_action_arex(arex_ns, arex_action); if(act.empty()) act = get_emi_action_http(attrs->get("HTTP:METHOD")); //if(act.empty() && !arex_ns.empty()) act = arex_ns + "/" + arex_action; if(act.empty()) act = EMIES_OPERATION_ANY; //throw ierror("Failed to generate action name"); xacml_attribute_addvalue(attr, act.c_str()); xacml_attribute_setdatatype(attr, XACML_DATATYPE_STRING); xacml_action_addattribute(action,attr); attr = NULL; // Add everything into request xacml_request_setenvironment(*request,environment); environment = NULL; xacml_request_addsubject(*request,subject); subject = NULL; xacml_request_addresource(*request,resource); resource = NULL; xacml_request_setaction(*request,action); action = NULL; } catch(ierror& err) { if(attr) xacml_attribute_delete(attr); if(environment) xacml_environment_delete(environment); if(subject) xacml_subject_delete(subject); if(resource) xacml_resource_delete(resource); if(*request) xacml_request_delete(*request); return 1; } return 0; } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/ArgusPEPClient.h0000644000000000000000000000012412110401544024001 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200574.445701 30 ctime=1513200662.013772115 nordugrid-arc-5.4.2/src/hed/identitymap/ArgusPEPClient.h0000644000175000002070000000411212110401544024044 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARGUSPEPCLIENT_H__ #define __ARC_SEC_ARGUSPEPCLIENT_H__ #include #include #include #include #include #include #include #include #include namespace ArcSec { class ArgusPEPClient : public SecHandler { private: typedef enum { conversion_subject, conversion_direct, conversion_cream, conversion_emi } conversion_type; std::string pepdlocation; int pep_log_level; std::string keypath; std::string certpath; std::string capath; std::list select_attrs; std::list reject_attrs; conversion_type conversion; bool accept_mapping; bool valid_; static Arc::Logger logger; // XACML request and response // xacml_request_t * request; // xacml_response_t * response; static int pep_log(int level, const char *fmt, va_list args); public: ArgusPEPClient(Arc::Config *cfg,Arc::PluginArgument* parg); ArgusPEPClient(void); virtual ~ArgusPEPClient(void); virtual SecHandlerStatus Handle(Arc::Message* msg) const ; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; private: int create_xacml_request(xacml_request_t** request, const char * subjectid, const char * resourceid, const char * actionid) const ; int create_xacml_request_direct(std::list& requests,Arc::XMLNode arcreq) const; int create_xacml_request_cream(xacml_request_t** request, std::list auths, Arc::MessageAttributes* attrs, Arc::XMLNode operation) const; int create_xacml_request_emi(xacml_request_t** request, std::list auths, Arc::MessageAttributes* attrs, Arc::XMLNode operation) const; // const char * decision_tostring(xacml_decision_t decision); // const char * fulfillon_tostring(xacml_fulfillon_t fulfillon); }; } // namespace ArcSec #endif /* __ARC_SEC_ARGUSPEPCLIENT_H__ */ nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/IdentityMap.h0000644000000000000000000000012412110401544023443 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200574.446701 30 ctime=1513200662.016772152 nordugrid-arc-5.4.2/src/hed/identitymap/IdentityMap.h0000644000175000002070000000214612110401544023513 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_IDENTITYMAP_H__ #define __ARC_SEC_IDENTITYMAP_H__ #include #include #include #include namespace ArcSec { class LocalMap { public: LocalMap(void) {}; virtual ~LocalMap(void) {}; virtual std::string ID(Arc::Message* msg) = 0; }; /// Apply Tests message against list of PDPs /** This class implements SecHandler interface. It's Handle() method runs provided Message instance against all PDPs specified in configuration. If any of PDPs returns positive result Handle() return true, otherwise false. */ class IdentityMap : public SecHandler { private: typedef struct { PDP* pdp; LocalMap* uid; } map_pair_t; std::list maps_; bool valid_; public: IdentityMap(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~IdentityMap(void); virtual SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; }; } // namespace ArcSec #endif /* __ARC_SEC_IDENTITYMAP_H__ */ nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/schema0000644000000000000000000000013213214316026022233 xustar000000000000000030 mtime=1513200662.048772543 30 atime=1513200668.719854133 30 ctime=1513200662.048772543 nordugrid-arc-5.4.2/src/hed/identitymap/schema/0000755000175000002070000000000013214316026022356 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/identitymap/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612033667522024357 xustar000000000000000027 mtime=1349480274.950183 29 atime=1513200595.95496419 30 ctime=1513200662.043772482 nordugrid-arc-5.4.2/src/hed/identitymap/schema/Makefile.am0000644000175000002070000000021212033667522024415 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = identitymap.xsd arguspepclient.xsd arguspdpclient.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/identitymap/schema/PaxHeaders.7502/arguspdpclient.xsd0000644000000000000000000000012312104677173026066 xustar000000000000000026 mtime=1360232059.27042 27 atime=1513200574.447701 30 ctime=1513200662.048772543 nordugrid-arc-5.4.2/src/hed/identitymap/schema/arguspdpclient.xsd0000644000175000002070000001003612104677173026134 0ustar00mockbuildmock00000000000000 This element defines how to compose message to PDPD service. The "subject" option means only X.509 subject will be sent using predefined XACML attribute. This is default behavior. The "cream" will cause this handler emulate CREAM CE. And "emi" will make it use common EMI XACML profile. This element defines Security Attributes to select and reject. If there are no Select elements all Attributes are used except those listed in Reject elements. Location of private key used for connecting PDP server. Location of public certificate used for connecting PDP server. Location of proxy credentials used for connecting PDP server. If present KeyPath and CertificatePath are not needed. Directory containing certificates of accepted CAs. Specify if local account name returned by Argus is to be used. Default is not to apply local account provided by Argus. Specify if the "NotApplicable" decision returned by Argus PDP is treated as reason to deny request. Default is false, which treats "NotApplicable" as reson to deny request. nordugrid-arc-5.4.2/src/hed/identitymap/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315723024357 xustar000000000000000030 mtime=1513200595.985964569 29 atime=1513200649.62862064 30 ctime=1513200662.045772506 nordugrid-arc-5.4.2/src/hed/identitymap/schema/Makefile.in0000644000175000002070000004357213214315723024441 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/identitymap/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = identitymap.xsd arguspepclient.xsd arguspdpclient.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/identitymap/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/identitymap/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/identitymap/schema/PaxHeaders.7502/identitymap.xsd0000644000000000000000000000012411331563536025370 xustar000000000000000027 mtime=1265035102.844272 27 atime=1513200574.450701 30 ctime=1513200662.046772519 nordugrid-arc-5.4.2/src/hed/identitymap/schema/identitymap.xsd0000644000175000002070000000632511331563536025443 0ustar00mockbuildmock00000000000000 This element defines shared library which contains plugins to be used. It is supposed to be used if name of library is not same as name of plugin and hence can't be located automatically. This element contains configuration of PDP to be used and associated mapping procedure. There may be multiple or none such element. Attribute 'name' contains name of PDP plugin as defined in one of loaded libraries. Elements LocalName, LocalList or LocalSimplePool define mapping to be applied if PDP gives positive response. All listed PDPs are tested sequentially and first with positive result is applied. The rest of this element defines configuration of PDP in its specific way. Local account name. Path to file containing key value pairs separated by blank space per each string. Keys are compared to TLS:IDENTITYDN attribute of each message and corresponding value of first matched key is used. Path directory containing dynamic configuration for mapping TLS:IDENTITYDN attribute of each message to local account names. nordugrid-arc-5.4.2/src/hed/identitymap/schema/PaxHeaders.7502/arguspepclient.xsd0000644000000000000000000000012412033667522026066 xustar000000000000000027 mtime=1349480274.950183 27 atime=1513200574.447701 30 ctime=1513200662.047772531 nordugrid-arc-5.4.2/src/hed/identitymap/schema/arguspepclient.xsd0000644000175000002070000000647112033667522026143 0ustar00mockbuildmock00000000000000 This element defines how to compose message to PEPD service. The "subject" option means only X.509 subject will be sent using predefined XACML attribute. This is default behavior. In case of "direct" request will be translated from usual ARC authorization request. The "cream" will cause this handler emulate CREAM CE. And "emi" will make it use common EMI XACML profile. This element defines Security Attributes to select and reject. If there are no Select elements all Attributes are used except those listed in Reject elements. Location of private key used for connecting PEP server. Location of public certificate used for connecting PEP server. Location of proxy credentials used for connecting PEP server. If present KeyPath and CertificatePath are not needed. Directory containing certificates of accepted CAs. Specify if local account name returned by Argus is to be used. Default is not to apply local account provided by Argus. nordugrid-arc-5.4.2/src/hed/identitymap/PaxHeaders.7502/README0000644000000000000000000000012411016534143021731 xustar000000000000000027 mtime=1211807843.180089 27 atime=1513200574.447701 30 ctime=1513200662.006772029 nordugrid-arc-5.4.2/src/hed/identitymap/README0000644000175000002070000000015011016534143021772 0ustar00mockbuildmock00000000000000SecHandler plugin implementing Grid identity maping to local user identity using few simple algorithms. nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315721020566 xustar000000000000000030 mtime=1513200593.714936794 29 atime=1513200647.78159805 30 ctime=1513200658.750732207 nordugrid-arc-5.4.2/src/hed/Makefile.in0000644000175000002070000006332413214315721020645 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(profiledir)" \ "$(DESTDIR)$(profileexampledir)" DATA = $(profile_DATA) $(profileexample_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ # order is important! SUBDIRS = libs acc mcc dmc shc daemon identitymap DIST_SUBDIRS = libs acc mcc dmc shc daemon identitymap profiledir = $(pkgdatadir)/profiles profile_DATA = profiles/general/general.xml profileexampledir = $(pkgdatadir)/examples/config profileexample_DATA = profiles/*/*.xml all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-profileDATA: $(profile_DATA) @$(NORMAL_INSTALL) test -z "$(profiledir)" || $(MKDIR_P) "$(DESTDIR)$(profiledir)" @list='$(profile_DATA)'; test -n "$(profiledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(profiledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(profiledir)" || exit $$?; \ done uninstall-profileDATA: @$(NORMAL_UNINSTALL) @list='$(profile_DATA)'; test -n "$(profiledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(profiledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(profiledir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(profiledir)" "$(DESTDIR)$(profileexampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-profileDATA install-profileexampleDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-profileDATA uninstall-profileexampleDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-profileDATA install-profileexampleDATA install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-profileDATA \ uninstall-profileexampleDATA install-profileexampleDATA: $(profile_DATA) for i in profiles/*; do \ profile=`basename $$i` ; \ mkdir -p "$(DESTDIR)$(profileexampledir)/$$profile" ; \ ( cd profiles/$$profile ; for i in *.xml ; do \ test -f $$i && cp -p $$i "$(DESTDIR)$(profileexampledir)/$$profile/" || : ; \ done ) ; \ done uninstall-profileexampleDATA: for p in $(profile_DATA); do \ p=`echo $$p | sed 's|^\(.*/\)\?profiles/||'`; \ rm -f "$(DESTDIR)$(profileexampledir)/$$p"; \ done # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/libs0000644000000000000000000000013213214316022017371 xustar000000000000000030 mtime=1513200658.781732586 30 atime=1513200668.719854133 30 ctime=1513200658.781732586 nordugrid-arc-5.4.2/src/hed/libs/0000755000175000002070000000000013214316022017514 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712133203240021502 xustar000000000000000027 mtime=1366099616.110521 30 atime=1513200596.001964765 30 ctime=1513200658.770732451 nordugrid-arc-5.4.2/src/hed/libs/Makefile.am0000644000175000002070000000113412133203240021543 0ustar00mockbuildmock00000000000000if XMLSEC_ENABLED XMLSEC_DIR = xmlsec else XMLSEC_DIR = endif if GLOBUSUTILS_ENABLED GLOBUSUTILS_DIR = globusutils else GLOBUSUTILS_DIR = endif # order is important! SUBDIRS = common loader message crypto cryptomod \ credential credentialmod data security \ ws-addressing $(XMLSEC_DIR) ws-security wsrf delegation \ communication compute ws infosys $(GLOBUSUTILS_DIR) \ credentialstore DIST_SUBDIRS = common loader message crypto cryptomod \ credential credentialmod data security \ ws-addressing xmlsec ws-security wsrf delegation \ communication compute ws infosys globusutils credentialstore nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/data0000644000000000000000000000013013214316023020301 xustar000000000000000029 mtime=1513200659.24973831 30 atime=1513200668.719854133 29 ctime=1513200659.24973831 nordugrid-arc-5.4.2/src/hed/libs/data/0000755000175000002070000000000013214316023020426 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602022423 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200597.225979735 30 ctime=1513200659.230738077 nordugrid-arc-5.4.2/src/hed/libs/data/Makefile.am0000644000175000002070000000224712231165602022472 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarcdata.la if WIN32 DIRS = examples else DIRS = $(TEST_DIR) examples endif SUBDIRS = $(DIRS) DIST_SUBDIRS = test examples if WIN32 pkglibexec_SCRIPTS = else pkglibexec_SCRIPTS = cache-clean cache-list endif libarcdata_ladir = $(pkgincludedir)/data libarcdata_la_HEADERS = DataPoint.h DataPointDirect.h \ DataPointIndex.h DataBuffer.h \ DataSpeed.h DataMover.h URLMap.h \ DataCallback.h DataHandle.h FileInfo.h DataStatus.h \ FileCache.h FileCacheHash.h libarcdata_la_SOURCES = DataPoint.cpp DataPointDirect.cpp \ DataPointIndex.cpp DataBuffer.cpp \ DataSpeed.cpp DataMover.cpp URLMap.cpp \ DataStatus.cpp \ FileCache.cpp FileCacheHash.cpp libarcdata_la_CXXFLAGS = -I$(top_srcdir)/include $(GLIBMM_CFLAGS) \ $(LIBXML2_CFLAGS) $(GTHREAD_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcdata_la_LIBADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(GTHREAD_LIBS) \ $(OPENSSL_LIBS) libarcdata_la_LDFLAGS = -version-info 3:0:0 if WIN32 man_MANS = else man_MANS = cache-clean.1 cache-list.1 endif nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/cache-list.in0000644000000000000000000000012712441125533022735 xustar000000000000000027 mtime=1417980763.391041 30 atime=1513200648.021600985 30 ctime=1513200659.235738138 nordugrid-arc-5.4.2/src/hed/libs/data/cache-list.in0000755000175000002070000000642312441125533023007 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w # Make a list of URLs in the cache and their corresponding # cache filenames use File::Find (); use File::Basename; use Digest::SHA1 qw/sha1_hex/; BEGIN { unshift @INC, dirname($0).'/@pkgdatadir_rel_to_pkglibexecdir@'; } use ConfigCentral; # for the convenience of &wanted calls, including -eval statements: use vars qw/*name *dir *prune/; *name = *File::Find::name; *dir = *File::Find::dir; *prune = *File::Find::prune; sub wanted; my $conffile; sub usage { print < '389', 'http' => '80', 'https' => '443', 'httpg' => '8443', 'srm' => '8443', 'ldap' => '389', 'ftp' => '21', 'gsiftp'=> '2811', ); my @files; if (@ARGV > 0) { $file = $ARGV[0]; if ($file eq '-h') { usage(); } if ($file eq '-c') { shift (@ARGV); $conffile = shift (@ARGV); } # for each file add default port if necessary foreach $file (@ARGV) { push (@files, $file); next if $file !~ m|(\w+)://(\S+?)/\S*|; next if ! defined( $ports{$1} ); $protocol = $1; $host = $2; next if index($host, ':') != -1; # no port so try the default $file =~ s|$host|$host:$ports{$protocol}|; push (@files, $file); } } if (!$conffile && $ENV{"ARC_CONFIG"} && -e $ENV{"ARC_CONFIG"}) { $conffile = $ENV{"ARC_CONFIG"}; } usage() unless $conffile; # parse to find cache dirs my @caches; my $config = ConfigCentral::parseConfig($conffile); die "Failed parsing A-REX config file '$conffile'" unless $config; die "No users set up in config file '$conffile'" unless $config->{control} and ref $config->{control} eq 'HASH'; for my $control (values %{$config->{control}}) { next unless ref $control eq 'HASH'; next unless $control->{cachedir} and ref $control->{cachedir} eq 'ARRAY'; for (@{$control->{cachedir}}) { print "\n Warning: cache-list cannot deal with substitutions - $_\n" and next if /%/; print "\n Warning: ignoring malformed cache location - $_\n" and next unless m{^(/\S+)}; push @caches, $1; } } die "No caches found in config file '$conffile'" unless @caches; # list all files if (@files == 0) { foreach $cache (@caches) { print "Cache: $cache\n"; if (! -d $cache || ! -d $cache."/data") { print " Cache is empty\n"; } else { File::Find::find({wanted => \&wanted}, $cache."/data"); } } } # list files given as arguments else { foreach $file (@files) { $hash = sha1_hex($file); if (length($hash) != 40) { print "Error in hash calculation for file $file\n"; next; } # look for this file in the caches foreach $cache (@caches) { $cachefile = $cache.'/data/'.substr($hash, 0, 2).'/'.substr($hash, 2); if (-e $cachefile) { print " $file $cachefile"; print ' (locked)' if -e "$cachefile.lock"; print "\n"; } } } } sub wanted { return if $name !~ m|\.meta$|; return if ! -e substr($name, 0, -5); open FILE, $name or die "$name $!"; my $line = ; close FILE; chomp($line); my $fname = substr($name, 0, rindex($name, ".meta")); print " $line $fname\n"; } nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataPoint.cpp0000644000000000000000000000012413111074405022751 xustar000000000000000027 mtime=1495562501.418201 27 atime=1513200574.573702 30 ctime=1513200659.236738151 nordugrid-arc-5.4.2/src/hed/libs/data/DataPoint.cpp0000644000175000002070000002041113111074405023014 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include namespace Arc { Logger DataPoint::logger(Logger::rootLogger, "DataPoint"); DataPoint::DataPoint(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : Plugin(parg), url(url), usercfg(usercfg), size((unsigned long long int)(-1)), modified(-1), valid(-1), access_latency(ACCESS_LATENCY_ZERO), triesleft(1), failure_code(DataStatus::UnknownError), cache(url.Option("cache") != "no"), stageable(false) { // add all valid URL options applicable to all protocols valid_url_options.clear(); valid_url_options.insert("cache"); valid_url_options.insert("readonly"); valid_url_options.insert("blocksize"); valid_url_options.insert("checksum"); valid_url_options.insert("exec"); valid_url_options.insert("preserve"); valid_url_options.insert("overwrite"); valid_url_options.insert("threads"); valid_url_options.insert("secure"); valid_url_options.insert("autodir"); valid_url_options.insert("tcpnodelay"); valid_url_options.insert("protocol"); valid_url_options.insert("spacetoken"); valid_url_options.insert("transferprotocol"); valid_url_options.insert("encryption"); valid_url_options.insert("httpputpartial"); valid_url_options.insert("httpgetpartial"); valid_url_options.insert("rucioaccount"); valid_url_options.insert("failureallowed"); valid_url_options.insert("relativeuri"); } DataPoint::~DataPoint() {} const URL& DataPoint::GetURL() const { return url; } bool DataPoint::SetURL(const URL& url) { return false; } const UserConfig& DataPoint::GetUserConfig() const { return usercfg; } std::string DataPoint::str() const { return url.str(); } DataPoint::operator bool() const { if (!url) return false; // URL option validation. Subclasses which do not want to validate // URL options should override this method. std::map options = url.Options(); for (std::map::iterator i = options.begin(); i != options.end(); i++) { if (valid_url_options.find(i->first) == valid_url_options.end()) { logger.msg(ERROR, "Invalid URL option: %s", i->first); return false; } } return true; } bool DataPoint::operator!() const { return !((bool)*this); } DataStatus DataPoint::PrepareReading(unsigned int timeout, unsigned int& wait_time) { wait_time = 0; return DataStatus::Success; } DataStatus DataPoint::PrepareWriting(unsigned int timeout, unsigned int& wait_time) { wait_time = 0; return DataStatus::Success; } DataStatus DataPoint::FinishReading(bool error) { return DataStatus::Success; } DataStatus DataPoint::FinishWriting(bool error) { return DataStatus::Success; } DataStatus DataPoint::Transfer3rdParty(const URL& source, const URL& destination, Callback3rdParty callback) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } DataStatus DataPoint::GetFailureReason() const { return failure_code; } bool DataPoint::Cache() const { return cache; } bool DataPoint::IsStageable() const { return stageable; } bool DataPoint::CheckSize() const { return (size != (unsigned long long int)(-1)); } void DataPoint::SetSize(const unsigned long long int val) { size = val; } unsigned long long int DataPoint::GetSize() const { return size; } bool DataPoint::CheckCheckSum() const { return (!checksum.empty()); } void DataPoint::SetCheckSum(const std::string& val) { checksum = val; } const std::string& DataPoint::GetCheckSum() const { return checksum; } const std::string DataPoint::DefaultCheckSum() const { return std::string("cksum"); } bool DataPoint::CheckModified() const { return (modified != -1); } void DataPoint::SetModified(const Time& val) { modified = val; } const Time& DataPoint::GetModified() const { return modified; } bool DataPoint::CheckValid() const { return (valid != -1); } void DataPoint::SetValid(const Time& val) { valid = val; } const Time& DataPoint::GetValid() const { return valid; } void DataPoint::SetAccessLatency(const DataPointAccessLatency& val) { access_latency = val; } DataPoint::DataPointAccessLatency DataPoint::GetAccessLatency() const { return access_latency; } int DataPoint::GetTries() const { return triesleft; } void DataPoint::SetTries(const int n) { triesleft = std::max(0, n); } void DataPoint::NextTry() { if(triesleft) --triesleft; } bool DataPoint::RequiresCredentials() const { return true; } bool DataPoint::RequiresCredentialsInFile() const { return false; } void DataPoint::SetMeta(const DataPoint& p) { if (!CheckSize()) SetSize(p.GetSize()); if (!CheckCheckSum()) SetCheckSum(p.GetCheckSum()); if (!CheckModified()) SetModified(p.GetModified()); if (!CheckValid()) SetValid(p.GetValid()); } void DataPoint::ResetMeta() { size = (unsigned long long int)(-1); checksum.clear(); modified = -1; valid = -1; } bool DataPoint::CompareMeta(const DataPoint& p) const { if (CheckSize() && p.CheckSize()) if (GetSize() != p.GetSize()) return false; if (CheckCheckSum() && p.CheckCheckSum()) // TODO: compare checksums properly if (strcasecmp(GetCheckSum().c_str(), p.GetCheckSum().c_str())) return false; if (CheckValid() && p.CheckValid()) if (GetValid() != p.GetValid()) return false; return true; } std::vector DataPoint::TransferLocations() const { // return empty vector std::vector urls; urls.push_back(url); return urls; } void DataPoint::AddURLOptions(const std::map& options) { for (std::map::const_iterator key = options.begin(); key != options.end(); ++key) { if (valid_url_options.find(key->first) == valid_url_options.end()) { logger.msg(VERBOSE, "Skipping invalid URL option %s", key->first); } else { url.AddOption(key->first, key->second, true); } } } DataStatus DataPoint::Transfer3rdParty(const URL& source, const URL& destination, const UserConfig& usercfg, Callback3rdParty callback) { // to load GFAL instead of ARC's DMCs we create a fake URL with gfal protocol URL gfal_url(destination); gfal_url.ChangeProtocol("gfal"); // load GFAL DMC DataHandle gfal_handle(gfal_url, usercfg); if (!gfal_handle) { logger.msg(Arc::ERROR, "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ."); return DataStatus(DataStatus::TransferError, EOPNOTSUPP, "Could not load GFAL plugin"); } return gfal_handle->Transfer3rdParty(source, destination, callback); } DataPointLoader::DataPointLoader() : Loader(BaseConfig().MakeConfig(Config()).Parent()) {} DataPointLoader::~DataPointLoader() {} DataPoint* DataPointLoader::load(const URL& url, const UserConfig& usercfg) { DataPointPluginArgument arg(url, usercfg); factory_->load(FinderLoader::GetLibrariesList(), "HED:DMC"); return factory_->GetInstance("HED:DMC", &arg, false); } DataPointLoader& DataHandle::getLoader() { // For C++ it would be enough to have // static DataPointLoader loader; // But Java sometimes does not destroy objects causing // PluginsFactory destructor loop forever waiting for // plugins to exit. static DataPointLoader* loader = NULL; if(!loader) { loader = new DataPointLoader(); } return *loader; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataMover.cpp0000644000000000000000000000012413213445240022752 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200574.536702 30 ctime=1513200659.241738212 nordugrid-arc-5.4.2/src/hed/libs/data/DataMover.cpp0000644000175000002070000013426313213445240023030 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Arc { Logger DataMover::logger(Logger::getRootLogger(), "DataMover"); DataMover::DataMover() : be_verbose(false), force_secure(false), force_passive(false), force_registration(false), do_checks(true), do_retries(true), default_min_speed(0), default_min_speed_time(0), default_min_average_speed(0), default_max_inactivity_time(300), show_progress(NULL), cancelled(false) {} DataMover::~DataMover() { Cancel(); // Wait for Transfer() to finish with lock Glib::Mutex::Lock lock(lock_); } bool DataMover::verbose() { return be_verbose; } void DataMover::verbose(bool val) { be_verbose = val; } void DataMover::verbose(const std::string& prefix) { be_verbose = true; verbose_prefix = prefix; } bool DataMover::retry() { return do_retries; } void DataMover::retry(bool val) { do_retries = val; } bool DataMover::checks() { return do_checks; } void DataMover::checks(bool val) { do_checks = val; } typedef struct { DataPoint *source; DataPoint *destination; FileCache *cache; const URLMap *map; unsigned long long int min_speed; time_t min_speed_time; unsigned long long int min_average_speed; time_t max_inactivity_time; DataMover::callback cb; DataMover *it; void *arg; const char *prefix; } transfer_struct; DataStatus DataMover::Delete(DataPoint& url, bool errcont) { DataStatus res = DataStatus::Success; bool remove_lfn = !url.HaveLocations(); // pfn or plain url if (!url.Resolve(true).Passed()) { // TODO: Check if error is real or "not exist". if (remove_lfn) { logger.msg(INFO, "No locations found - probably no more physical instances"); }; }; std::list removed_urls; if (url.HaveLocations()) for (; url.LocationValid();) { logger.msg(INFO, "Removing %s", url.CurrentLocation().str()); // It can happen that after resolving list contains duplicated // physical locations obtained from different meta-data-services. // Because not all locations can reliably say if files does not exist // or access is not allowed, avoid duplicated delete attempts. bool url_was_deleted = false; for (std::list::iterator u = removed_urls.begin(); u != removed_urls.end(); ++u) if (url.CurrentLocation() == (*u)) { url_was_deleted = true; break; } if (url_was_deleted) { logger.msg(DEBUG, "This instance was already deleted"); } else { url.SetSecure(false); DataStatus r = url.Remove(); if (!r) { logger.msg(INFO, "Failed to delete physical file"); res = r; if (!errcont) { url.NextLocation(); continue; } } else removed_urls.push_back(url.CurrentLocation()); } if (url.IsIndex()) { logger.msg(INFO, "Removing metadata in %s", url.CurrentLocationMetadata()); DataStatus r = url.Unregister(false); if (!r) { logger.msg(INFO, "Failed to delete meta-information"); res = r; url.NextLocation(); } else url.RemoveLocation(); } else { // Leave immediately in case of direct URL break; } } if (url.IsIndex()) { if (url.HaveLocations()) { logger.msg(INFO, "Failed to remove all physical instances"); return res; } if (remove_lfn) { logger.msg(INFO, "Removing logical file from metadata %s", url.str()); DataStatus r = url.Unregister(true); if (!r) { logger.msg(INFO, "Failed to delete logical file"); return r; } } } else { if (!url.LocationValid()) { logger.msg(INFO, "Failed to remove instance"); return res; } } return DataStatus::Success; } void transfer_func(void *arg) { transfer_struct *param = (transfer_struct*)arg; DataStatus res = param->it->Transfer(*(param->source), *(param->destination), *(param->cache), *(param->map), param->min_speed, param->min_speed_time, param->min_average_speed, param->max_inactivity_time, NULL, NULL, param->prefix); (*(param->cb))(param->it, res, param->arg); if (param->prefix) free((void*)(param->prefix)); if (param->cache) delete param->cache; free(param); } /* transfer data from source to destination */ DataStatus DataMover::Transfer(DataPoint& source, DataPoint& destination, FileCache& cache, const URLMap& map, DataMover::callback cb, void *arg, const char *prefix) { return Transfer(source, destination, cache, map, default_min_speed, default_min_speed_time, default_min_average_speed, default_max_inactivity_time, cb, arg, prefix); } DataStatus DataMover::Transfer(DataPoint& source, DataPoint& destination, FileCache& cache, const URLMap& map, unsigned long long int min_speed, time_t min_speed_time, unsigned long long int min_average_speed, time_t max_inactivity_time, DataMover::callback cb, void *arg, const char *prefix) { class DataPointStopper { private: DataPoint& point_; public: DataPointStopper(DataPoint& p):point_(p) {}; ~DataPointStopper(void) { point_.StopReading(); point_.FinishReading(); point_.StopWriting(); point_.FinishWriting(); }; }; if (cb != NULL) { logger.msg(VERBOSE, "DataMover::Transfer : starting new thread"); transfer_struct *param = (transfer_struct*)malloc(sizeof(transfer_struct)); if (param == NULL) return DataStatus::TransferError; param->source = &source; param->destination = &destination; param->cache = new FileCache(cache); param->map = ↦ param->min_speed = min_speed; param->min_speed_time = min_speed_time; param->min_average_speed = min_average_speed; param->max_inactivity_time = max_inactivity_time; param->cb = cb; param->it = this; param->arg = arg; param->prefix = NULL; if (prefix) param->prefix = strdup(prefix); if (param->prefix == NULL) param->prefix = strdup(verbose_prefix.c_str()); if (!CreateThreadFunction(&transfer_func, param)) { if(param->prefix) free((void*)(param->prefix)); if(param->cache) delete param->cache; free(param); return DataStatus::TransferError; } return DataStatus::Success; } logger.msg(INFO, "Transfer from %s to %s", source.str(), destination.str()); if (!source) { logger.msg(ERROR, "Not valid source"); source.NextTry(); return DataStatus(DataStatus::ReadAcquireError, EINVAL, "Source is not valid"); } if (!destination) { logger.msg(ERROR, "Not valid destination"); destination.NextTry(); return DataStatus(DataStatus::WriteAcquireError, EINVAL, "Destination is not valid"); } // initial cache check, if the DN is cached we can exit straight away bool cacheable = false; bool executable = (source.GetURL().Option("exec") == "yes") ? true : false; bool cache_copy = (source.GetURL().Option("cache") == "copy") ? true : false; // DN is used for cache permissions std::string dn; Time exp_time(0); if (source.Cache() && destination.Local() && cache) { cacheable = true; try { // TODO (important) load credential in unified way or // use already loaded one Credential ci(source.GetUserConfig().ProxyPath(), source.GetUserConfig().ProxyPath(), source.GetUserConfig().CACertificatesDirectory(), ""); dn = ci.GetIdentityName(); exp_time = ci.GetEndTime(); } catch (CredentialError& e) { logger.msg(WARNING, "Couldn't handle certificate: %s", e.what()); } } #ifndef WIN32 if (cacheable && source.GetURL().Option("cache") != "renew" && source.GetURL().Option("cache") != "check") { std::string canonic_url = source.str(); bool is_in_cache = false; bool is_locked = false; if (cache.Start(canonic_url, is_in_cache, is_locked)) { if (is_in_cache) { logger.msg(INFO, "File %s is cached (%s) - checking permissions", canonic_url, cache.File(canonic_url)); // check the list of cached DNs if (cache.CheckDN(canonic_url, dn)) { logger.msg(VERBOSE, "Permission checking passed"); logger.msg(INFO, "Linking/copying cached file"); bool cache_link_result = cache.Link(destination.CurrentLocation().Path(), canonic_url, (!source.ReadOnly() || executable || cache_copy), executable, false, is_locked); source.NextTry(); /* to decrease retry counter */ if (cache_link_result) return DataStatus::SuccessCached; // if it failed with lock problems - continue and try later if (!is_locked) return DataStatus::CacheError; } } else { cache.Stop(canonic_url); } } } #endif /*WIN32*/ for (;;) { DataStatus dres = source.Resolve(true); if (dres.Passed()) { if (source.HaveLocations()) break; logger.msg(ERROR, "No locations for source found: %s", source.str()); dres = DataStatus(DataStatus::ReadResolveError, EARCRESINVAL, "No locations found"); } else logger.msg(ERROR, "Failed to resolve source: %s", source.str()); source.NextTry(); /* try again */ if (!do_retries) return dres; if (!source.LocationValid()) return dres; } for (;;) { DataStatus dres = destination.Resolve(false); if (dres.Passed()) { if (destination.HaveLocations()) break; logger.msg(ERROR, "No locations for destination found: %s", destination.str()); dres = DataStatus(DataStatus::WriteResolveError, EARCRESINVAL, "No locations found"); } else logger.msg(ERROR, "Failed to resolve destination: %s", destination.str()); destination.NextTry(); /* try again */ if (!do_retries) return dres; if (!destination.LocationValid()) return dres; } bool replication = false; if (source.IsIndex() && destination.IsIndex()) // check for possible replication if (source.GetURL() == destination.GetURL()) { replication = true; // we do not want to replicate to same physical file destination.RemoveLocations(source); if (!destination.HaveLocations()) { logger.msg(ERROR, "No locations for destination different from source " "found: %s", destination.str()); return DataStatus(DataStatus::WriteResolveError, EEXIST); } } // Try to avoid any additional checks meant to provide // meta-information whenever possible bool checks_required = destination.AcceptsMeta() && (!replication); bool destination_meta_initially_stored = destination.Registered(); bool destination_overwrite = false; if (!replication) { // overwriting has no sense in case of replication std::string value = destination.GetURL().Option("overwrite", "no"); if (strcasecmp(value.c_str(), "no") != 0) destination_overwrite = true; } // sort source replicas according to the expression supplied source.SortLocations(preferred_pattern, map); if (destination_overwrite) { if ((destination.IsIndex() && destination_meta_initially_stored) || (!destination.IsIndex())) { URL del_url = destination.GetURL(); logger.msg(VERBOSE, "DataMover::Transfer: trying to destroy/overwrite " "destination: %s", del_url.str()); int try_num = destination.GetTries(); for (;;) { DataHandle del(del_url, destination.GetUserConfig()); del->SetTries(1); DataStatus res = Delete(*del); if (res == DataStatus::Success) break; if (!destination.IsIndex()) { // pfn has chance to be overwritten directly logger.msg(WARNING, "Failed to delete %s but will still try to copy", del_url.str()); break; } logger.msg(INFO, "Failed to delete %s", del_url.str()); destination.NextTry(); /* try again */ if (!do_retries) return res; if ((--try_num) <= 0) return res; } if (destination.IsIndex()) { for (;;) { DataStatus dres = destination.Resolve(false); if (dres.Passed()) { if (destination.HaveLocations()) break; logger.msg(ERROR, "No locations for destination found: %s", destination.str()); } else logger.msg(ERROR, "Failed to resolve destination: %s", destination.str()); destination.NextTry(); /* try again */ if (!do_retries) return dres; if (!destination.LocationValid()) return dres; } destination_meta_initially_stored = destination.Registered(); if (destination_meta_initially_stored) { logger.msg(INFO, "Deleted but still have locations at %s", destination.str()); destination.NextTry(); /* try again */ return DataStatus::WriteResolveError; } } } } DataStatus res = DataStatus::TransferError; int try_num; for (try_num = 0;; try_num++) { /* cycle for retries */ Glib::Mutex::Lock lock(lock_); logger.msg(VERBOSE, "DataMover: cycle"); if ((try_num != 0) && (!do_retries)) { logger.msg(VERBOSE, "DataMover: no retries requested - exit"); return res; } if ((!source.LocationValid()) || (!destination.LocationValid())) { if (!source.LocationValid()) logger.msg(VERBOSE, "DataMover: source out of tries - exit"); if (!destination.LocationValid()) logger.msg(VERBOSE, "DataMover: destination out of tries - exit"); /* out of tries */ return res; } DataBuffer buffer; // Make sure any transfer is stopped before buffer is destroyed DataPointStopper source_stop(source); DataPointStopper destination_stop(destination); logger.msg(INFO, "Real transfer from %s to %s", source.CurrentLocation().str(), destination.CurrentLocation().str()); /* creating handler for transfer */ source.SetSecure(force_secure); source.Passive(force_passive); destination.SetSecure(force_secure); destination.Passive(force_passive); destination.SetAdditionalChecks(do_checks); /* take suggestion from DataHandle about buffer, etc. */ long long int bufsize; int bufnum; /* tune buffers */ // bufsize = 16384; // 65536; /* have reasonable buffer size */ bufsize =65536; /* have reasonable buffer size */ bool seekable = destination.WriteOutOfOrder(); source.ReadOutOfOrder(seekable); bufnum = 1; if (source.BufSize() > bufsize) bufsize = source.BufSize(); if (destination.BufSize() > bufsize) bufsize = destination.BufSize(); if (seekable) { if (source.BufNum() > bufnum) bufnum = source.BufNum(); if (destination.BufNum() > bufnum) bufnum = destination.BufNum(); } bufnum = bufnum * 2; logger.msg(VERBOSE, "Creating buffer: %lli x %i", bufsize, bufnum); // Checksum logic: // if destination has meta, use default checksum it accepts, or override // with url option. If not check source checksum and calculate that one. // If not available calculate default checksum for source even if source // doesn't have it at the moment since it might be available after start_reading(). CheckSumAny crc; CheckSumAny crc_source; CheckSumAny crc_dest; std::string crc_type(""); // check if checksumming is turned off if ((source.GetURL().Option("checksum") == "no") || (destination.GetURL().Option("checksum") == "no")) { logger.msg(VERBOSE, "DataMove::Transfer: no checksum calculation for %s", source.str()); } // check if checksum is specified as a metadata attribute else if (!destination.GetURL().MetaDataOption("checksumtype").empty() && !destination.GetURL().MetaDataOption("checksumvalue").empty()) { crc_type = destination.GetURL().MetaDataOption("checksumtype"); logger.msg(VERBOSE, "DataMove::Transfer: using supplied checksum %s:%s", destination.GetURL().MetaDataOption("checksumtype"), destination.GetURL().MetaDataOption("checksumvalue")); std::string csum = destination.GetURL().MetaDataOption("checksumtype") + ':' + destination.GetURL().MetaDataOption("checksumvalue"); destination.SetCheckSum(csum); } else if (destination.AcceptsMeta() || destination.ProvidesMeta()) { if (destination.GetURL().Option("checksum").empty()) { crc_type = destination.DefaultCheckSum(); } else { crc_type = destination.GetURL().Option("checksum"); } } else if (source.CheckCheckSum()) { crc_type = source.GetCheckSum(); crc_type = crc_type.substr(0, crc_type.find(':')); } else if (source.ProvidesMeta()) { crc_type = source.DefaultCheckSum(); } if (!crc_type.empty()) { crc = crc_type.c_str(); crc_source = crc_type.c_str(); crc_dest = crc_type.c_str(); if (crc.Type() != CheckSumAny::none) logger.msg(VERBOSE, "DataMove::Transfer: will calculate %s checksum", crc_type); } /* create buffer and tune speed control */ buffer.set(&crc, bufsize, bufnum); if (!buffer) logger.msg(WARNING, "Buffer creation failed !"); buffer.speed.set_min_speed(min_speed, min_speed_time); buffer.speed.set_min_average_speed(min_average_speed); buffer.speed.set_max_inactivity_time(max_inactivity_time); buffer.speed.verbose(be_verbose); if (be_verbose) { if (prefix) buffer.speed.verbose(std::string(prefix)); else buffer.speed.verbose(verbose_prefix); buffer.speed.set_progress_indicator(show_progress); } /* checking if current source should be mapped to different location */ /* TODO: make mapped url to be handled by source handle directly */ bool mapped = false; URL mapped_url; if (destination.Local()) { mapped_url = source.CurrentLocation(); mapped = map.map(mapped_url); /* TODO: copy options to mapped_url */ if (!mapped) mapped_url = URL(); else { logger.msg(VERBOSE, "URL is mapped to: %s", mapped_url.str()); if (mapped_url.Protocol() == "link") /* can't cache links */ cacheable = false; } } // Do not link if user asks. Replace link:// with file:// if ((!source.ReadOnly()) && mapped) { if (mapped_url.Protocol() == "link") { mapped_url.ChangeProtocol("file"); } } DataHandle mapped_h(mapped_url, source.GetUserConfig()); DataPoint& mapped_p(*mapped_h); if (mapped_h) { mapped_p.SetSecure(force_secure); mapped_p.Passive(force_passive); } /* Try to initiate cache (if needed) */ std::string canonic_url = source.str(); #ifndef WIN32 if (cacheable) { res = DataStatus::Success; bool use_remote = true; bool delete_first = (source.GetURL().Option("cache") == "renew"); for (;;) { /* cycle for outdated cache files */ bool is_in_cache = false; bool is_locked = false; if (!cache.Start(canonic_url, is_in_cache, is_locked, use_remote, delete_first)) { if (is_locked) { logger.msg(VERBOSE, "Cached file is locked - should retry"); source.NextTry(); /* to decrease retry counter */ return DataStatus(DataStatus::CacheError, EAGAIN, "Cache file locked"); } cacheable = false; logger.msg(INFO, "Failed to initiate cache"); source.NextLocation(); /* try another source */ break; } if (is_in_cache) { /* just need to check permissions */ logger.msg(INFO, "File %s is cached (%s) - checking permissions", canonic_url, cache.File(canonic_url)); // check the list of cached DNs bool have_permission = false; // don't request metadata from source if user says not to bool check_meta = (source.GetURL().Option("cache") != "invariant"); if (source.GetURL().Option("cache") != "check" && cache.CheckDN(canonic_url, dn)) have_permission = true; else { DataStatus cres = source.Check(check_meta); if (!cres.Passed()) { logger.msg(ERROR, "Permission checking failed: %s", canonic_url); source.NextLocation(); /* try another source */ logger.msg(VERBOSE, "source.next_location"); res = cres; break; } cache.AddDN(canonic_url, dn, exp_time); } logger.msg(INFO, "Permission checking passed"); /* check if file is fresh enough */ bool outdated = true; if (have_permission || !check_meta) outdated = false; // cached DN means don't check creation date if (source.CheckModified() && cache.CheckCreated(canonic_url)) { Time sourcetime = source.GetModified(); Time cachetime = cache.GetCreated(canonic_url); logger.msg(VERBOSE, "Source modification date: %s", sourcetime.str()); logger.msg(VERBOSE, "Cache creation date: %s", cachetime.str()); if (sourcetime <= cachetime) outdated = false; } if (outdated) { delete_first = true; logger.msg(INFO, "Cached file is outdated, will re-download"); use_remote = false; continue; } logger.msg(VERBOSE, "Cached copy is still valid"); logger.msg(INFO, "Linking/copying cached file"); if (!cache.Link(destination.CurrentLocation().Path(), canonic_url, (!source.ReadOnly() || executable || cache_copy), executable, false, is_locked)) { source.NextTry(); /* to decrease retry counter */ if (is_locked) { logger.msg(VERBOSE, "Cached file is locked - should retry"); return DataStatus(DataStatus::CacheError, EAGAIN, "Cache file locked"); } return DataStatus::CacheError; } return DataStatus::SuccessCached; // Leave here. Rest of code below is for transfer. } break; } if (cacheable && !res.Passed()) continue; } #endif /*WIN32*/ if (mapped) { if ((mapped_url.Protocol() == "link") || (mapped_url.Protocol() == "file")) { /* check permissions first */ logger.msg(INFO, "URL is mapped to local access - " "checking permissions on original URL"); DataStatus cres = source.Check(false); if (!cres.Passed()) { logger.msg(ERROR, "Permission checking on original URL failed: %s", source.str()); source.NextLocation(); /* try another source */ logger.msg(VERBOSE, "source.next_location"); #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif res = cres; continue; } logger.msg(VERBOSE, "Permission checking passed"); if (mapped_url.Protocol() == "link") { logger.msg(VERBOSE, "Linking local file"); const std::string& file_name = mapped_url.Path(); const std::string& link_name = destination.CurrentLocation().Path(); // create directory structure for link_name { User user; std::string dirpath = Glib::path_get_dirname(link_name); if(dirpath == ".") dirpath = G_DIR_SEPARATOR_S; if (!DirCreate(dirpath, user.get_uid(), user.get_gid(), S_IRWXU, true) != 0) { int err = errno; logger.msg(ERROR, "Failed to create directory %s", dirpath); source.NextLocation(); /* try another source */ logger.msg(VERBOSE, "source.next_location"); res = DataStatus(DataStatus::ReadStartError, err); continue; } } // make link if (symlink(file_name.c_str(), link_name.c_str()) == -1) { int err = errno; logger.msg(ERROR, "Failed to make symbolic link %s to %s : %s", link_name, file_name, StrError()); source.NextLocation(); /* try another source */ logger.msg(VERBOSE, "source.next_location"); res = DataStatus(DataStatus::ReadStartError, err); continue; } User user; if (lchown(link_name.c_str(), user.get_uid(), user.get_gid()) == -1) { logger.msg(WARNING, "Failed to change owner of symbolic link %s to %i", link_name, user.get_uid()); } return DataStatus::Success; // Leave after making a link. Rest moves data. } } } URL churl; #ifndef WIN32 if (cacheable) { /* create new destination for cache file */ churl = cache.File(canonic_url); logger.msg(INFO, "cache file: %s", churl.Path()); } #endif // don't switch user to access cache UserConfig usercfg(destination.GetUserConfig()); usercfg.SetUser(User(getuid())); DataHandle chdest_h(churl, usercfg); DataPoint& chdest(*chdest_h); if (chdest_h) { chdest.SetSecure(force_secure); chdest.Passive(force_passive); chdest.SetAdditionalChecks(false); // don't pre-allocate space in cache chdest.SetMeta(destination); // share metadata } DataPoint& source_url = mapped ? mapped_p : source; DataPoint& destination_url = cacheable ? chdest : destination; // Disable checks meant to provide meta-information if not needed source_url.SetAdditionalChecks((do_checks && (checks_required || cacheable)) || (show_progress != NULL)); if (source_url.GetAdditionalChecks()) { FileInfo fileinfo; DataPoint::DataPointInfoType verb = (DataPoint::DataPointInfoType) (DataPoint::INFO_TYPE_TIMES | DataPoint::INFO_TYPE_CONTENT); DataStatus r = source_url.Stat(fileinfo, verb); if (!r.Passed()) { logger.msg(ERROR, "Failed to stat source %s", source_url.str()); if (source.NextLocation()) logger.msg(VERBOSE, "(Re)Trying next source"); res = r; #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif continue; } // check location meta r = source_url.CompareLocationMetadata(); if (!r.Passed()) { if (r == DataStatus::InconsistentMetadataError) logger.msg(ERROR, "Meta info of source and location do not match for %s", source_url.str()); if (source.NextLocation()) logger.msg(VERBOSE, "(Re)Trying next source"); res = r; #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif continue; } // check for high latency if (source_url.GetAccessLatency() == Arc::DataPoint::ACCESS_LATENCY_LARGE) { if (source.LastLocation()) { logger.msg(WARNING, "Replica %s has high latency, but no more sources exist so will use this one", source_url.CurrentLocation().str()); } else { logger.msg(INFO, "Replica %s has high latency, trying next source", source_url.CurrentLocation().str()); source.NextLocation(); #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif continue; } } } source_url.AddCheckSumObject(&crc_source); unsigned int wait_time; DataStatus datares = source_url.PrepareReading(max_inactivity_time, wait_time); if (!datares.Passed()) { logger.msg(ERROR, "Failed to prepare source: %s", source_url.str()); source_url.FinishReading(true); res = datares; /* try another source */ if (source.NextLocation()) logger.msg(VERBOSE, "(Re)Trying next source"); #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif continue; } datares = source_url.StartReading(buffer); if (!datares.Passed()) { logger.msg(ERROR, "Failed to start reading from source: %s", source_url.str()); source_url.StopReading(); source_url.FinishReading(true); res = datares; if (source.GetFailureReason() != DataStatus::UnknownError) res = source.GetFailureReason(); /* try another source */ if (source.NextLocation()) logger.msg(VERBOSE, "(Re)Trying next source"); #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif continue; } if (mapped) destination.SetMeta(mapped_p); if (force_registration && destination.IsIndex()) { // at least compare metadata if (!destination.CompareMeta(source)) { logger.msg(ERROR, "Metadata of source and destination are different"); source_url.StopReading(); source_url.FinishReading(true); source.NextLocation(); /* not exactly sure if this would help */ res = DataStatus::PreRegisterError; #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif continue; } } // pass metadata gathered during start_reading() // from source to destination destination.SetMeta(source); if (chdest_h) chdest.SetMeta(source); if (destination.CheckSize()) buffer.speed.set_max_data(destination.GetSize()); datares = destination.PreRegister(replication, force_registration); if (!datares.Passed()) { logger.msg(ERROR, "Failed to preregister destination: %s", destination.str()); source_url.StopReading(); source_url.FinishReading(true); destination.NextLocation(); /* not exactly sure if this would help */ logger.msg(VERBOSE, "destination.next_location"); res = datares; // Normally remote destination is not cached. But who knows. #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif continue; } buffer.speed.reset(); // cache files don't need prepared datares = destination.PrepareWriting(max_inactivity_time, wait_time); if (!datares.Passed()) { logger.msg(ERROR, "Failed to prepare destination: %s", destination.str()); destination.FinishWriting(true); source_url.StopReading(); source_url.FinishReading(true); if (!destination.PreUnregister(replication || destination_meta_initially_stored).Passed()) logger.msg(ERROR, "Failed to unregister preregistered lfn. " "You may need to unregister it manually: %s", destination.str()); /* try another source */ if (destination.NextLocation()) logger.msg(VERBOSE, "(Re)Trying next destination"); res = datares; continue; } DataStatus read_failure = DataStatus::Success; DataStatus write_failure = DataStatus::Success; std::string cache_lock; if (!cacheable) { destination.AddCheckSumObject(&crc_dest); datares = destination.StartWriting(buffer); if (!datares.Passed()) { logger.msg(ERROR, "Failed to start writing to destination: %s", destination.str()); destination.StopWriting(); destination.FinishWriting(true); source_url.StopReading(); source_url.FinishReading(true); if (!destination.PreUnregister(replication || destination_meta_initially_stored).Passed()) logger.msg(ERROR, "Failed to unregister preregistered lfn. " "You may need to unregister it manually: %s", destination.str()); if (destination.NextLocation()) logger.msg(VERBOSE, "(Re)Trying next destination"); res = datares; if(destination.GetFailureReason() != DataStatus::UnknownError) res = destination.GetFailureReason(); continue; } } else { #ifndef WIN32 chdest.AddCheckSumObject(&crc_dest); datares = chdest.StartWriting(buffer); if (!datares.Passed()) { // TODO: put callback to clean cache into FileCache logger.msg(ERROR, "Failed to start writing to cache"); chdest.StopWriting(); source_url.StopReading(); source_url.FinishReading(true); // hope there will be more space next time cache.StopAndDelete(canonic_url); if (!destination.PreUnregister(replication || destination_meta_initially_stored).Passed()) logger.msg(ERROR, "Failed to unregister preregistered lfn. " "You may need to unregister it manually"); destination.NextLocation(); /* to decrease retry counter */ return DataStatus::CacheError; // repeating won't help here } cache_lock = chdest.GetURL().Path()+FileLock::getLockSuffix(); #endif } logger.msg(VERBOSE, "Waiting for buffer"); // cancelling will make loop exit before eof, triggering error and destinatinon cleanup for (; (!buffer.eof_read() || !buffer.eof_write()) && !buffer.error() && !cancelled;) { buffer.wait_any(); if (cacheable && !cache_lock.empty()) { // touch cache lock file regularly so it is still valid if (utime(cache_lock.c_str(), NULL) == -1) { logger.msg(WARNING, "Failed updating timestamp on cache lock file %s for file %s: %s", cache_lock, source_url.str(), StrError(errno)); } } } logger.msg(VERBOSE, "buffer: read EOF : %s", buffer.eof_read()?"yes":"no"); logger.msg(VERBOSE, "buffer: write EOF: %s", buffer.eof_write()?"yes":"no"); logger.msg(VERBOSE, "buffer: error : %s, read: %s, write: %s", buffer.error()?"yes":"no", buffer.error_read()?"yes":"no", buffer.error_write()?"yes":"no"); logger.msg(VERBOSE, "Closing read channel"); read_failure = source_url.StopReading(); source_url.FinishReading((!read_failure.Passed() || buffer.error())); if (cacheable && mapped) { source.SetMeta(mapped_p); // pass more metadata (checksum) } logger.msg(VERBOSE, "Closing write channel"); // turn off checks during stop_writing() if force is turned on destination_url.SetAdditionalChecks(destination_url.GetAdditionalChecks() && !force_registration); if (!destination_url.StopWriting().Passed()) { destination_url.FinishWriting(true); buffer.error_write(true); } else if (!destination_url.FinishWriting(buffer.error())) { logger.msg(ERROR, "Failed to complete writing to destination"); buffer.error_write(true); } if (buffer.error()) { #ifndef WIN32 if (cacheable) cache.StopAndDelete(canonic_url); #endif if (!destination.PreUnregister(replication || destination_meta_initially_stored).Passed()) logger.msg(ERROR, "Failed to unregister preregistered lfn. " "You may need to unregister it manually"); // Check for cancellation if (cancelled) { logger.msg(INFO, "Transfer cancelled successfully"); return DataStatus::SuccessCancelled; } // Analyze errors // Easy part first - if either read or write part report error // go to next endpoint. if (buffer.error_read()) { if (source.NextLocation()) logger.msg(VERBOSE, "(Re)Trying next source"); // check for error from callbacks etc if(source.GetFailureReason() != DataStatus::UnknownError) res=source.GetFailureReason(); else res=DataStatus::ReadError; } else if (buffer.error_write()) { if (destination.NextLocation()) logger.msg(VERBOSE, "(Re)Trying next destination"); // check for error from callbacks etc if(destination.GetFailureReason() != DataStatus::UnknownError) { res=destination.GetFailureReason(); } else { res=DataStatus::WriteError; } } else if (buffer.error_transfer()) { // Here is more complicated case - operation timeout // Let's first check if buffer was full res = DataStatus(DataStatus::TransferError, ETIMEDOUT); if (!buffer.for_read()) { // No free buffers for 'read' side. Buffer must be full. res.SetDesc(destination.GetFailureReason().GetDesc()); if (destination.NextLocation()) { logger.msg(VERBOSE, "(Re)Trying next destination"); } } else if (!buffer.for_write()) { // Buffer is empty res.SetDesc(source.GetFailureReason().GetDesc()); if (source.NextLocation()) { logger.msg(VERBOSE, "(Re)Trying next source"); } } else { // Both endpoints were very slow? Choose randomly. logger.msg(VERBOSE, "Cause of failure unclear - choosing randomly"); Glib::Rand r; if (r.get_int() < (RAND_MAX / 2)) { res.SetDesc(source.GetFailureReason().GetDesc()); if (source.NextLocation()) { logger.msg(VERBOSE, "(Re)Trying next source"); } } else { res.SetDesc(destination.GetFailureReason().GetDesc()); if (destination.NextLocation()) { logger.msg(VERBOSE, "(Re)Trying next destination"); } } } } continue; } // compare checksum. For uploads this is done in StopWriting, but we also // need to check here if the sum is given in meta attributes. For downloads // compare to the original source (if available). std::string calc_csum; if (crc && buffer.checksum_valid()) { char buf[100]; crc.print(buf,100); calc_csum = buf; } else if(crc_source) { char buf[100]; crc_source.print(buf,100); calc_csum = buf; } else if(crc_dest) { char buf[100]; crc_dest.print(buf,100); calc_csum = buf; } if (!calc_csum.empty()) { // compare calculated to any checksum given as a meta option if (!destination.GetURL().MetaDataOption("checksumtype").empty() && !destination.GetURL().MetaDataOption("checksumvalue").empty() && calc_csum.substr(0, calc_csum.find(":")) == destination.GetURL().MetaDataOption("checksumtype") && calc_csum.substr(calc_csum.find(":")+1) != destination.GetURL().MetaDataOption("checksumvalue")) { // error here? yes since we'll have an inconsistent catalog otherwise logger.msg(ERROR, "Checksum mismatch between checksum given as meta option (%s:%s) and calculated checksum (%s)", destination.GetURL().MetaDataOption("checksumtype"), destination.GetURL().MetaDataOption("checksumvalue"), calc_csum); #ifndef WIN32 if (cacheable) { cache.StopAndDelete(canonic_url); } #endif if (!destination.Unregister(replication || destination_meta_initially_stored)) { logger.msg(WARNING, "Failed to unregister preregistered lfn, You may need to unregister it manually"); } res = DataStatus(DataStatus::TransferError, EARCCHECKSUM); if (!Delete(destination, true)) { logger.msg(WARNING, "Failed to delete destination, retry may fail"); } if (destination.NextLocation()) { logger.msg(VERBOSE, "(Re)Trying next destination"); } continue; } if (source.CheckCheckSum()) { std::string src_csum_s(source.GetCheckSum()); if (src_csum_s.find(':') == src_csum_s.length() -1) { logger.msg(VERBOSE, "Cannot compare empty checksum"); } // Check the checksum types match. Some buggy GridFTP servers return a // different checksum type than requested so also check that the checksum // length matches before comparing. else if (calc_csum.substr(0, calc_csum.find(":")) != src_csum_s.substr(0, src_csum_s.find(":")) || calc_csum.substr(calc_csum.find(":")).length() != src_csum_s.substr(src_csum_s.find(":")).length()) { logger.msg(VERBOSE, "Checksum type of source and calculated checksum differ, cannot compare"); } else if (calc_csum.substr(calc_csum.find(":")) != src_csum_s.substr(src_csum_s.find(":"))) { logger.msg(ERROR, "Checksum mismatch between calcuated checksum %s and source checksum %s", calc_csum, source.GetCheckSum()); #ifndef WIN32 if(cacheable) { cache.StopAndDelete(canonic_url); } #endif res = DataStatus(DataStatus::TransferError, EARCCHECKSUM); if (source.NextLocation()) { logger.msg(VERBOSE, "(Re)Trying next source"); } continue; } else { logger.msg(VERBOSE, "Calculated transfer checksum %s matches source checksum", calc_csum); } } // set the destination checksum to be what we calculated destination.SetCheckSum(calc_csum.c_str()); } else { logger.msg(VERBOSE, "Checksum not computed"); } destination.SetMeta(source); // pass more metadata (checksum) datares = destination.PostRegister(replication); if (!datares.Passed()) { logger.msg(ERROR, "Failed to postregister destination %s", destination.str()); if (!destination.PreUnregister(replication || destination_meta_initially_stored).Passed()) logger.msg(ERROR, "Failed to unregister preregistered lfn. " "You may need to unregister it manually: %s", destination.str()); destination.NextLocation(); /* not sure if this can help */ logger.msg(VERBOSE, "destination.next_location"); #ifndef WIN32 if(cacheable) cache.Stop(canonic_url); #endif res = datares; continue; } #ifndef WIN32 if (cacheable) { cache.AddDN(canonic_url, dn, exp_time); logger.msg(INFO, "Linking/copying cached file"); bool is_locked = false; bool cache_link_result = cache.Link(destination.CurrentLocation().Path(), canonic_url, (!source.ReadOnly() || executable || cache_copy), executable, true, is_locked); cache.Stop(canonic_url); if (!cache_link_result) { if (!destination.PreUnregister(replication || destination_meta_initially_stored).Passed()) logger.msg(ERROR, "Failed to unregister preregistered lfn. " "You may need to unregister it manually"); source.NextTry(); /* to decrease retry count */ if (is_locked) return DataStatus(DataStatus::CacheError, EAGAIN, "Cache file locked"); return DataStatus::CacheError; // retry won't help } } #endif if (buffer.error()) continue; // should never happen - keep just in case break; } return DataStatus::Success; } void DataMover::Cancel() { cancelled = true; } void DataMover::secure(bool val) { force_secure = val; } void DataMover::passive(bool val) { force_passive = val; } void DataMover::force_to_meta(bool val) { force_registration = val; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315725022434 xustar000000000000000030 mtime=1513200597.293980567 30 atime=1513200647.976600435 29 ctime=1513200659.23173809 nordugrid-arc-5.4.2/src/hed/libs/data/Makefile.in0000644000175000002070000013534213214315725022513 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/data DIST_COMMON = README $(libarcdata_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/cache-clean.1.in \ $(srcdir)/cache-clean.in $(srcdir)/cache-list.1.in \ $(srcdir)/cache-list.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = cache-clean cache-clean.1 cache-list cache-list.1 CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pkglibexecdir)" \ "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(libarcdata_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcdata_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libarcdata_la_OBJECTS = libarcdata_la-DataPoint.lo \ libarcdata_la-DataPointDirect.lo \ libarcdata_la-DataPointIndex.lo libarcdata_la-DataBuffer.lo \ libarcdata_la-DataSpeed.lo libarcdata_la-DataMover.lo \ libarcdata_la-URLMap.lo libarcdata_la-DataStatus.lo \ libarcdata_la-FileCache.lo libarcdata_la-FileCacheHash.lo libarcdata_la_OBJECTS = $(am_libarcdata_la_OBJECTS) libarcdata_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarcdata_la_CXXFLAGS) \ $(CXXFLAGS) $(libarcdata_la_LDFLAGS) $(LDFLAGS) -o $@ SCRIPTS = $(pkglibexec_SCRIPTS) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcdata_la_SOURCES) DIST_SOURCES = $(libarcdata_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) HEADERS = $(libarcdata_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarcdata.la @WIN32_FALSE@DIRS = $(TEST_DIR) examples @WIN32_TRUE@DIRS = examples SUBDIRS = $(DIRS) DIST_SUBDIRS = test examples @WIN32_FALSE@pkglibexec_SCRIPTS = cache-clean cache-list @WIN32_TRUE@pkglibexec_SCRIPTS = libarcdata_ladir = $(pkgincludedir)/data libarcdata_la_HEADERS = DataPoint.h DataPointDirect.h \ DataPointIndex.h DataBuffer.h \ DataSpeed.h DataMover.h URLMap.h \ DataCallback.h DataHandle.h FileInfo.h DataStatus.h \ FileCache.h FileCacheHash.h libarcdata_la_SOURCES = DataPoint.cpp DataPointDirect.cpp \ DataPointIndex.cpp DataBuffer.cpp \ DataSpeed.cpp DataMover.cpp URLMap.cpp \ DataStatus.cpp \ FileCache.cpp FileCacheHash.cpp libarcdata_la_CXXFLAGS = -I$(top_srcdir)/include $(GLIBMM_CFLAGS) \ $(LIBXML2_CFLAGS) $(GTHREAD_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcdata_la_LIBADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(GTHREAD_LIBS) \ $(OPENSSL_LIBS) libarcdata_la_LDFLAGS = -version-info 3:0:0 @WIN32_FALSE@man_MANS = cache-clean.1 cache-list.1 @WIN32_TRUE@man_MANS = all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/data/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/data/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): cache-clean: $(top_builddir)/config.status $(srcdir)/cache-clean.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cache-clean.1: $(top_builddir)/config.status $(srcdir)/cache-clean.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cache-list: $(top_builddir)/config.status $(srcdir)/cache-list.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cache-list.1: $(top_builddir)/config.status $(srcdir)/cache-list.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcdata.la: $(libarcdata_la_OBJECTS) $(libarcdata_la_DEPENDENCIES) $(libarcdata_la_LINK) -rpath $(libdir) $(libarcdata_la_OBJECTS) $(libarcdata_la_LIBADD) $(LIBS) install-pkglibexecSCRIPTS: $(pkglibexec_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-DataBuffer.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-DataMover.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-DataPoint.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-DataPointDirect.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-DataPointIndex.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-DataSpeed.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-DataStatus.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-FileCache.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-FileCacheHash.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdata_la-URLMap.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcdata_la-DataPoint.lo: DataPoint.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-DataPoint.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-DataPoint.Tpo -c -o libarcdata_la-DataPoint.lo `test -f 'DataPoint.cpp' || echo '$(srcdir)/'`DataPoint.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-DataPoint.Tpo $(DEPDIR)/libarcdata_la-DataPoint.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPoint.cpp' object='libarcdata_la-DataPoint.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-DataPoint.lo `test -f 'DataPoint.cpp' || echo '$(srcdir)/'`DataPoint.cpp libarcdata_la-DataPointDirect.lo: DataPointDirect.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-DataPointDirect.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-DataPointDirect.Tpo -c -o libarcdata_la-DataPointDirect.lo `test -f 'DataPointDirect.cpp' || echo '$(srcdir)/'`DataPointDirect.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-DataPointDirect.Tpo $(DEPDIR)/libarcdata_la-DataPointDirect.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointDirect.cpp' object='libarcdata_la-DataPointDirect.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-DataPointDirect.lo `test -f 'DataPointDirect.cpp' || echo '$(srcdir)/'`DataPointDirect.cpp libarcdata_la-DataPointIndex.lo: DataPointIndex.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-DataPointIndex.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-DataPointIndex.Tpo -c -o libarcdata_la-DataPointIndex.lo `test -f 'DataPointIndex.cpp' || echo '$(srcdir)/'`DataPointIndex.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-DataPointIndex.Tpo $(DEPDIR)/libarcdata_la-DataPointIndex.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointIndex.cpp' object='libarcdata_la-DataPointIndex.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-DataPointIndex.lo `test -f 'DataPointIndex.cpp' || echo '$(srcdir)/'`DataPointIndex.cpp libarcdata_la-DataBuffer.lo: DataBuffer.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-DataBuffer.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-DataBuffer.Tpo -c -o libarcdata_la-DataBuffer.lo `test -f 'DataBuffer.cpp' || echo '$(srcdir)/'`DataBuffer.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-DataBuffer.Tpo $(DEPDIR)/libarcdata_la-DataBuffer.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataBuffer.cpp' object='libarcdata_la-DataBuffer.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-DataBuffer.lo `test -f 'DataBuffer.cpp' || echo '$(srcdir)/'`DataBuffer.cpp libarcdata_la-DataSpeed.lo: DataSpeed.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-DataSpeed.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-DataSpeed.Tpo -c -o libarcdata_la-DataSpeed.lo `test -f 'DataSpeed.cpp' || echo '$(srcdir)/'`DataSpeed.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-DataSpeed.Tpo $(DEPDIR)/libarcdata_la-DataSpeed.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataSpeed.cpp' object='libarcdata_la-DataSpeed.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-DataSpeed.lo `test -f 'DataSpeed.cpp' || echo '$(srcdir)/'`DataSpeed.cpp libarcdata_la-DataMover.lo: DataMover.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-DataMover.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-DataMover.Tpo -c -o libarcdata_la-DataMover.lo `test -f 'DataMover.cpp' || echo '$(srcdir)/'`DataMover.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-DataMover.Tpo $(DEPDIR)/libarcdata_la-DataMover.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataMover.cpp' object='libarcdata_la-DataMover.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-DataMover.lo `test -f 'DataMover.cpp' || echo '$(srcdir)/'`DataMover.cpp libarcdata_la-URLMap.lo: URLMap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-URLMap.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-URLMap.Tpo -c -o libarcdata_la-URLMap.lo `test -f 'URLMap.cpp' || echo '$(srcdir)/'`URLMap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-URLMap.Tpo $(DEPDIR)/libarcdata_la-URLMap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='URLMap.cpp' object='libarcdata_la-URLMap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-URLMap.lo `test -f 'URLMap.cpp' || echo '$(srcdir)/'`URLMap.cpp libarcdata_la-DataStatus.lo: DataStatus.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-DataStatus.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-DataStatus.Tpo -c -o libarcdata_la-DataStatus.lo `test -f 'DataStatus.cpp' || echo '$(srcdir)/'`DataStatus.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-DataStatus.Tpo $(DEPDIR)/libarcdata_la-DataStatus.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataStatus.cpp' object='libarcdata_la-DataStatus.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-DataStatus.lo `test -f 'DataStatus.cpp' || echo '$(srcdir)/'`DataStatus.cpp libarcdata_la-FileCache.lo: FileCache.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-FileCache.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-FileCache.Tpo -c -o libarcdata_la-FileCache.lo `test -f 'FileCache.cpp' || echo '$(srcdir)/'`FileCache.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-FileCache.Tpo $(DEPDIR)/libarcdata_la-FileCache.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileCache.cpp' object='libarcdata_la-FileCache.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-FileCache.lo `test -f 'FileCache.cpp' || echo '$(srcdir)/'`FileCache.cpp libarcdata_la-FileCacheHash.lo: FileCacheHash.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdata_la-FileCacheHash.lo -MD -MP -MF $(DEPDIR)/libarcdata_la-FileCacheHash.Tpo -c -o libarcdata_la-FileCacheHash.lo `test -f 'FileCacheHash.cpp' || echo '$(srcdir)/'`FileCacheHash.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdata_la-FileCacheHash.Tpo $(DEPDIR)/libarcdata_la-FileCacheHash.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileCacheHash.cpp' object='libarcdata_la-FileCacheHash.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdata_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdata_la-FileCacheHash.lo `test -f 'FileCacheHash.cpp' || echo '$(srcdir)/'`FileCacheHash.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } install-libarcdata_laHEADERS: $(libarcdata_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcdata_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcdata_ladir)" @list='$(libarcdata_la_HEADERS)'; test -n "$(libarcdata_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcdata_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcdata_ladir)" || exit $$?; \ done uninstall-libarcdata_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcdata_la_HEADERS)'; test -n "$(libarcdata_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcdata_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcdata_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(SCRIPTS) $(MANS) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(libarcdata_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcdata_laHEADERS install-man install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-pkglibexecSCRIPTS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man1 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES uninstall-libarcdata_laHEADERS \ uninstall-man uninstall-pkglibexecSCRIPTS uninstall-man: uninstall-man1 .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags ctags-recursive \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarcdata_laHEADERS install-man install-man1 \ install-pdf install-pdf-am install-pkglibexecSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarcdata_laHEADERS \ uninstall-man uninstall-man1 uninstall-pkglibexecSCRIPTS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataSpeed.h0000644000000000000000000000012412075551565022404 xustar000000000000000027 mtime=1358353269.148218 27 atime=1513200574.515701 30 ctime=1513200659.221737967 nordugrid-arc-5.4.2/src/hed/libs/data/DataSpeed.h0000644000175000002070000001370112075551565022453 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATASPEED_H__ #define __ARC_DATASPEED_H__ #include #include #define DATASPEED_AVERAGING_PERIOD 60 namespace Arc { /// Keeps track of average and instantaneous transfer speed. /** * Also detects data transfer inactivity and other transfer timeouts. * \ingroup data * \headerfile DataSpeed.h arc/data/DataSpeed.h */ class DataSpeed { private: time_t first_time; time_t last_time; time_t last_activity_time; unsigned long long int N; unsigned long long int Nall; unsigned long long int Nmax; time_t first_speed_failure; time_t last_printed; time_t T; time_t min_speed_time; time_t max_inactivity_time; unsigned long long int min_speed; unsigned long long int min_average_speed; bool be_verbose; std::string verbose_prefix; bool min_speed_failed; bool min_average_speed_failed; bool max_inactivity_time_failed; bool disabled; static Logger logger; public: /// Callback for output of transfer status. /** * A function with this signature can be passed to set_progress_indicator() * to enable user-defined output of transfer progress. * \param o FILE object connected to stderr * \param s prefix set in verbose(const std::string&) * \param t time in seconds since the start of the transfer * \param all number of bytes transferred so far * \param max total amount of bytes to be transferred (set in set_max_data()) * \param instant instantaneous transfer rate in bytes per second * \param average average transfer rate in bytes per second */ typedef void (*show_progress_t)(FILE *o, const char *s, unsigned int t, unsigned long long int all, unsigned long long int max, double instant, double average); private: show_progress_t show_progress; void print_statistics(FILE *o, time_t t); public: /// Constructor /** * \param base time period used to average values (default 1 minute). */ DataSpeed(time_t base = DATASPEED_AVERAGING_PERIOD); /// Constructor /** * \param min_speed minimal allowed speed (bytes per second). If speed * drops and holds below threshold for min_speed_time seconds error is * triggered. * \param min_speed_time time over which to calculate min_speed. * \param min_average_speed minimal average speed (bytes per second) * to trigger error. Averaged over whole current transfer time. * \param max_inactivity_time if no data is passing for specified * amount of time, error is triggered. * \param base time period used to average values (default 1 minute). */ DataSpeed(unsigned long long int min_speed, time_t min_speed_time, unsigned long long int min_average_speed, time_t max_inactivity_time, time_t base = DATASPEED_AVERAGING_PERIOD); /// Destructor ~DataSpeed(); /// Set true to activate printing transfer information during transfer. void verbose(bool val); /// Activate printing transfer information using 'prefix' at the beginning of every string. void verbose(const std::string& prefix); /// Check if speed information is going to be printed. bool verbose(); /// Set minimal allowed speed in bytes per second. /** * \param min_speed minimal allowed speed (bytes per second). If speed * drops and holds below threshold for min_speed_time seconds error * is triggered. * \param min_speed_time time over which to calculate min_speed. */ void set_min_speed(unsigned long long int min_speed, time_t min_speed_time); /// Set minimal average speed in bytes per second. /** * \param min_average_speed minimal average speed (bytes per second) * to trigger error. Averaged over whole current transfer time. */ void set_min_average_speed(unsigned long long int min_average_speed); /// Set inactivity timeout. /** * \param max_inactivity_time - if no data is passing for specified * amount of time, error is triggered. */ void set_max_inactivity_time(time_t max_inactivity_time); /// Get inactivity timeout. time_t get_max_inactivity_time() { return max_inactivity_time; }; /// Set averaging time period (default 1 minute). void set_base(time_t base_ = DATASPEED_AVERAGING_PERIOD); /// Set amount of data (in bytes) to be transferred. Used in verbose messages. void set_max_data(unsigned long long int max = 0); /// Specify an external function to print verbose messages. /** * If not specified an internal function is used. * \param func pointer to function which prints information. */ void set_progress_indicator(show_progress_t func = NULL); /// Reset all counters and triggers. void reset(); /// Inform object that an amount of data has been transferred. /** * All errors are triggered by this method. To make them work the * application must call this method periodically even with zero value. * \param n amount of data transferred in bytes. * \return false if transfer rate is below limits */ bool transfer(unsigned long long int n = 0); /// Turn and off off speed control. void hold(bool disable); /// Check if minimal speed error was triggered. bool min_speed_failure() { return min_speed_failed; } /// Check if minimal average speed error was triggered. bool min_average_speed_failure() { return min_average_speed_failed; } /// Check if maximal inactivity time error was triggered. bool max_inactivity_time_failure() { return max_inactivity_time_failed; } /// Returns number of bytes transferred so far (this object knows about). unsigned long long int transferred_size() { return Nall; } }; } // namespace Arc #endif // __ARC_DATASPEED_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataPointIndex.cpp0000644000000000000000000000012413065022044023741 xustar000000000000000027 mtime=1490297892.037862 27 atime=1513200574.568702 30 ctime=1513200659.238738175 nordugrid-arc-5.4.2/src/hed/libs/data/DataPointIndex.cpp0000644000175000002070000003227613065022044024020 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include namespace Arc { DataPointIndex::DataPointIndex(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPoint(url, usercfg, parg), resolved(false), registered(false), h(NULL) { location = locations.end(); } DataPointIndex::~DataPointIndex() { if (h) delete h; } const URL& DataPointIndex::CurrentLocation() const { static const URL empty; if (locations.end() == location) return empty; return *location; } const std::string& DataPointIndex::CurrentLocationMetadata() const { static const std::string empty; if (locations.end() == location) return empty; return location->Name(); } DataPoint* DataPointIndex::CurrentLocationHandle() const { if (!h) return NULL; return &(**h); } DataStatus DataPointIndex::CompareLocationMetadata() const { if (h && *h) { FileInfo fileinfo; DataPoint::DataPointInfoType verb = (DataPoint::DataPointInfoType) (DataPoint::INFO_TYPE_TIMES | DataPoint::INFO_TYPE_CONTENT); DataStatus res = (*h)->Stat(fileinfo, verb); if (!res.Passed()) return res; if (!CompareMeta(*(*h))) return DataStatus::InconsistentMetadataError; } return DataStatus::Success; } bool DataPointIndex::HaveLocations() const { return (locations.size() != 0); } bool DataPointIndex::LocationValid() const { if (triesleft <= 0) return false; if (locations.end() == location) return false; return true; } bool DataPointIndex::LastLocation() { if (location == locations.end()) return true; bool last = false; if (++location == locations.end()) last = true; location--; return last; } void DataPointIndex::SetHandle(void) { // TODO: pass various options from old handler to new if (h) delete h; if (locations.end() != location) { h = new DataHandle(*location, usercfg); if (!h || !(*h)) { logger.msg(WARNING, "Can't handle location %s", location->str()); delete h; h = NULL; RemoveLocation(); } else (*h)->SetMeta(*this); } else h = NULL; } bool DataPointIndex::NextLocation() { if (!LocationValid()) { --triesleft; return false; } ++location; if (locations.end() == location) if (--triesleft > 0) location = locations.begin(); SetHandle(); return LocationValid(); } DataStatus DataPointIndex::RemoveLocation() { if (locations.end() == location) return DataStatus::NoLocationError; location = locations.erase(location); if (locations.end() == location) location = locations.begin(); SetHandle(); return DataStatus::Success; } DataStatus DataPointIndex::RemoveLocations(const DataPoint& p_) { if (!p_.IsIndex()) return DataStatus::Success; const DataPointIndex& p = dynamic_cast(p_); std::list::iterator p_int; std::list::const_iterator p_ext; for (p_ext = p.locations.begin(); p_ext != p.locations.end(); ++p_ext) for (p_int = locations.begin(); p_int != locations.end();) // Compare URLs if (*p_int == *p_ext) if (location == p_int) { p_int = locations.erase(p_int); location = p_int; } else p_int = locations.erase(p_int); else ++p_int; if (locations.end() == location) location = locations.begin(); SetHandle(); return DataStatus::Success; } DataStatus DataPointIndex::ClearLocations() { locations.clear(); location = locations.end(); SetHandle(); return DataStatus::Success; } DataStatus DataPointIndex::AddLocation(const URL& url, const std::string& meta) { logger.msg(DEBUG, "Add location: url: %s", url.str()); logger.msg(DEBUG, "Add location: metadata: %s", meta); for (std::list::iterator i = locations.begin(); i != locations.end(); ++i) if ((i->Name() == meta) && (url == (*i))) return DataStatus::LocationAlreadyExistsError; locations.push_back(URLLocation(url, meta)); if(locations.end() == location) { location = locations.begin(); SetHandle(); } return DataStatus::Success; } void DataPointIndex::SortLocations(const std::string& pattern, const URLMap& url_map) { if (locations.size() < 2 || (pattern.empty() && !url_map)) return; std::list sorted_locations; // sort according to URL map first if (url_map) { logger.msg(VERBOSE, "Sorting replicas according to URL map"); for (std::list::iterator l = locations.begin(); l != locations.end(); ++l) { URL mapped_url = *l; if (url_map.map(mapped_url)) { logger.msg(VERBOSE, "Replica %s is mapped", l->str()); sorted_locations.push_back(*l); } } } // sort according to preferred pattern if (!pattern.empty()) { logger.msg(VERBOSE, "Sorting replicas according to preferred pattern %s", pattern); // go through each pattern in pattern - if match then add to sorted list std::list patterns; Arc::tokenize(pattern, patterns, "|"); for (std::list::iterator p = patterns.begin(); p != patterns.end(); ++p) { std::string to_match = *p; bool match_host = false; if (to_match.rfind('$') == to_match.length()-1) { // only match host to_match.erase(to_match.length()-1); match_host = true; } bool exclude = false; if (to_match.find('!') == 0) { to_match.erase(0, 1); exclude = true; } for (std::list::iterator l = locations.begin(); l != locations.end();) { if (match_host) { if ((l->Host().length() >= to_match.length()) && (l->Host().rfind(to_match) == (l->Host().length() - to_match.length()))) { if (exclude) { logger.msg(VERBOSE, "Excluding replica %s matching pattern !%s", l->str(), to_match); l = locations.erase(l); continue; } // check if already present bool present = false; for (std::list::iterator j = sorted_locations.begin();j!=sorted_locations.end();++j) { if (*j == *l) present = true; } if (!present) { logger.msg(VERBOSE, "Replica %s matches host pattern %s", l->str(), to_match); sorted_locations.push_back(*l); } } } else if (l->str().find(*p) != std::string::npos) { if (exclude) { logger.msg(VERBOSE, "Excluding replica %s matching pattern !%s", l->str(), to_match); l = locations.erase(l); continue; } // check if already present bool present = false; for (std::list::iterator j = sorted_locations.begin();j!=sorted_locations.end();++j) { if (*j == *l) present = true; } if (!present) { logger.msg(VERBOSE, "Replica %s matches pattern %s", l->str(), *p); sorted_locations.push_back(*l); } } ++l; } } } // add anything left for (std::list::iterator i = locations.begin();i!=locations.end();++i) { bool present = false; for (std::list::iterator j = sorted_locations.begin();j!=sorted_locations.end();++j) { if (*j == *i) present = true; } if (!present) { logger.msg(VERBOSE, "Replica %s doesn't match preferred pattern or URL map", i->str()); sorted_locations.push_back(*i); } } locations = sorted_locations; location = locations.begin(); SetHandle(); } void DataPointIndex::SetTries(const int n) { triesleft = std::max(0, n); if (triesleft == 0) location = locations.end(); else if (locations.end() == location) location = locations.begin(); SetHandle(); } bool DataPointIndex::IsIndex() const { return true; } bool DataPointIndex::IsStageable() const { if (!h || !*h) return false; return (*h)->IsStageable(); } bool DataPointIndex::AcceptsMeta() const { return true; } bool DataPointIndex::ProvidesMeta() const { return true; } void DataPointIndex::SetMeta(const DataPoint& p) { if (!CheckSize()) SetSize(p.GetSize()); if (!CheckCheckSum()) SetCheckSum(p.GetCheckSum()); if (!CheckModified()) SetModified(p.GetModified()); if (!CheckValid()) SetValid(p.GetValid()); // set for current handle if (h && *h) (*h)->SetMeta(p); } void DataPointIndex::SetCheckSum(const std::string& val) { checksum = val; if (h && *h) (*h)->SetCheckSum(val); } void DataPointIndex::SetSize(const unsigned long long int val) { size = val; if (h && *h) (*h)->SetSize(val); } bool DataPointIndex::Registered() const { return registered; } DataStatus DataPointIndex::StartReading(DataBuffer& buffer) { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->StartReading(buffer); } DataStatus DataPointIndex::PrepareReading(unsigned int timeout, unsigned int& wait_time) { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->PrepareReading(timeout, wait_time); } DataStatus DataPointIndex::PrepareWriting(unsigned int timeout, unsigned int& wait_time) { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->PrepareWriting(timeout, wait_time); } DataStatus DataPointIndex::StartWriting(DataBuffer& buffer, DataCallback *cb) { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->StartWriting(buffer, cb); } DataStatus DataPointIndex::StopReading() { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->StopReading(); } DataStatus DataPointIndex::StopWriting() { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->StopWriting(); } DataStatus DataPointIndex::FinishReading(bool error) { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->FinishReading(error); } DataStatus DataPointIndex::FinishWriting(bool error) { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->FinishWriting(error); } std::vector DataPointIndex::TransferLocations() const { if (!h || !*h) { std::vector empty_vector; return empty_vector; } return (*h)->TransferLocations(); } void DataPointIndex::ClearTransferLocations() { if (h && *h) (*h)->ClearTransferLocations(); } DataStatus DataPointIndex::Check(bool check_meta) { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->Check(check_meta); } long long int DataPointIndex::BufSize() const { if (!h || !*h) return -1; return (*h)->BufSize(); } int DataPointIndex::BufNum() const { if (!h || !*h) return 1; return (*h)->BufNum(); } bool DataPointIndex::Local() const { if (!h || !*h) return false; return (*h)->Local(); } bool DataPointIndex::ReadOnly() const { if (!h || !*h) return true; return (*h)->ReadOnly(); } DataStatus DataPointIndex::Remove() { if (!h || !*h) return DataStatus::NoLocationError; return (*h)->Remove(); } void DataPointIndex::ReadOutOfOrder(bool v) { if (h && *h) (*h)->ReadOutOfOrder(v); } bool DataPointIndex::WriteOutOfOrder() { if (!h || !*h) return false; return (*h)->WriteOutOfOrder(); } void DataPointIndex::SetAdditionalChecks(bool v) { if (h && *h) (*h)->SetAdditionalChecks(v); } bool DataPointIndex::GetAdditionalChecks() const { if (!h || !*h) return false; return (*h)->GetAdditionalChecks(); } void DataPointIndex::SetSecure(bool v) { if (h && *h) (*h)->SetSecure(v); } bool DataPointIndex::GetSecure() const { if (!h || !*h) return false; return (*h)->GetSecure(); } DataPoint::DataPointAccessLatency DataPointIndex::GetAccessLatency() const { if (!h || !*h) return ACCESS_LATENCY_ZERO; return (*h)->GetAccessLatency(); } void DataPointIndex::Passive(bool v) { if (h && *h) (*h)->Passive(v); } void DataPointIndex::Range(unsigned long long int start, unsigned long long int end) { if (h && *h) (*h)->Range(start, end); } int DataPointIndex::AddCheckSumObject(CheckSum* /* cksum */) { return -1; } const CheckSum* DataPointIndex::GetCheckSumObject(int /* index */) const { return NULL; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/test0000644000000000000000000000013213214316023021262 xustar000000000000000030 mtime=1513200659.279738677 30 atime=1513200668.720854145 30 ctime=1513200659.279738677 nordugrid-arc-5.4.2/src/hed/libs/data/test/0000755000175000002070000000000013214316023021405 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/data/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612052416515023404 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200597.377981594 29 ctime=1513200659.27673864 nordugrid-arc-5.4.2/src/hed/libs/data/test/Makefile.am0000644000175000002070000000062412052416515023451 0ustar00mockbuildmock00000000000000TESTS = libarcdatatest check_PROGRAMS = $(TESTS) libarcdatatest_SOURCES = $(top_srcdir)/src/Test.cpp FileCacheTest.cpp libarcdatatest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libarcdatatest_LDADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/hed/libs/data/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315725023413 xustar000000000000000030 mtime=1513200597.614984493 29 atime=1513200648.05060134 30 ctime=1513200659.277738652 nordugrid-arc-5.4.2/src/hed/libs/data/test/Makefile.in0000644000175000002070000006775713214315725023510 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = libarcdatatest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/libs/data/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = libarcdatatest$(EXEEXT) am_libarcdatatest_OBJECTS = libarcdatatest-Test.$(OBJEXT) \ libarcdatatest-FileCacheTest.$(OBJEXT) libarcdatatest_OBJECTS = $(am_libarcdatatest_OBJECTS) am__DEPENDENCIES_1 = libarcdatatest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) libarcdatatest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcdatatest_SOURCES) DIST_SOURCES = $(libarcdatatest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ libarcdatatest_SOURCES = $(top_srcdir)/src/Test.cpp FileCacheTest.cpp libarcdatatest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libarcdatatest_LDADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/data/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/data/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list libarcdatatest$(EXEEXT): $(libarcdatatest_OBJECTS) $(libarcdatatest_DEPENDENCIES) @rm -f libarcdatatest$(EXEEXT) $(libarcdatatest_LINK) $(libarcdatatest_OBJECTS) $(libarcdatatest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatatest-FileCacheTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatatest-Test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcdatatest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) -MT libarcdatatest-Test.o -MD -MP -MF $(DEPDIR)/libarcdatatest-Test.Tpo -c -o libarcdatatest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatatest-Test.Tpo $(DEPDIR)/libarcdatatest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='libarcdatatest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatatest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp libarcdatatest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) -MT libarcdatatest-Test.obj -MD -MP -MF $(DEPDIR)/libarcdatatest-Test.Tpo -c -o libarcdatatest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatatest-Test.Tpo $(DEPDIR)/libarcdatatest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='libarcdatatest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatatest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` libarcdatatest-FileCacheTest.o: FileCacheTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) -MT libarcdatatest-FileCacheTest.o -MD -MP -MF $(DEPDIR)/libarcdatatest-FileCacheTest.Tpo -c -o libarcdatatest-FileCacheTest.o `test -f 'FileCacheTest.cpp' || echo '$(srcdir)/'`FileCacheTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatatest-FileCacheTest.Tpo $(DEPDIR)/libarcdatatest-FileCacheTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileCacheTest.cpp' object='libarcdatatest-FileCacheTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatatest-FileCacheTest.o `test -f 'FileCacheTest.cpp' || echo '$(srcdir)/'`FileCacheTest.cpp libarcdatatest-FileCacheTest.obj: FileCacheTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) -MT libarcdatatest-FileCacheTest.obj -MD -MP -MF $(DEPDIR)/libarcdatatest-FileCacheTest.Tpo -c -o libarcdatatest-FileCacheTest.obj `if test -f 'FileCacheTest.cpp'; then $(CYGPATH_W) 'FileCacheTest.cpp'; else $(CYGPATH_W) '$(srcdir)/FileCacheTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdatatest-FileCacheTest.Tpo $(DEPDIR)/libarcdatatest-FileCacheTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileCacheTest.cpp' object='libarcdatatest-FileCacheTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatatest_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatatest-FileCacheTest.obj `if test -f 'FileCacheTest.cpp'; then $(CYGPATH_W) 'FileCacheTest.cpp'; else $(CYGPATH_W) '$(srcdir)/FileCacheTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/data/test/PaxHeaders.7502/FileCacheTest.cpp0000644000000000000000000000012212463677753024536 xustar000000000000000025 mtime=1422884843.9491 27 atime=1513200574.541702 30 ctime=1513200659.279738677 nordugrid-arc-5.4.2/src/hed/libs/data/test/FileCacheTest.cpp0000644000175000002070000012606712463677753024621 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "../FileCache.h" class FileCacheTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(FileCacheTest); CPPUNIT_TEST(testStart); CPPUNIT_TEST(testRemoteCache); CPPUNIT_TEST(testRemoteCacheValidLock); CPPUNIT_TEST(testRemoteCacheInvalidLock); CPPUNIT_TEST(testRemoteCacheReplication); CPPUNIT_TEST(testStop); CPPUNIT_TEST(testStopAndDelete); CPPUNIT_TEST(testLinkFile); CPPUNIT_TEST(testLinkFileLinkCache); CPPUNIT_TEST(testCopyFile); CPPUNIT_TEST(testFile); CPPUNIT_TEST(testRelease); CPPUNIT_TEST(testCheckDN); CPPUNIT_TEST(testTwoCaches); CPPUNIT_TEST(testCreationDate); CPPUNIT_TEST(testConstructor); CPPUNIT_TEST(testBadConstructor); CPPUNIT_TEST(testInternal); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void testStart(); void testRemoteCache(); void testRemoteCacheValidLock(); void testRemoteCacheInvalidLock(); void testRemoteCacheReplication(); void testStop(); void testStopAndDelete(); void testLinkFile(); void testLinkFileLinkCache(); void testCopyFile(); void testFile(); void testRelease(); void testCheckDN(); void testTwoCaches(); void testCreationDate(); void testConstructor(); void testBadConstructor(); void testInternal(); private: std::string _testroot; std::string _cache_dir; std::string _cache_data_dir; std::string _cache_job_dir; std::string _session_dir; std::string _url; std::string _jobid; uid_t _uid; gid_t _gid; std::string _hostname; Arc::FileCache *_fc1; /** Create a file with the given size */ bool _createFile(std::string filename, std::string text = "a"); /** Return the contents of the given file */ std::string _readFile(std::string filename); }; void FileCacheTest::setUp() { std::string tmpdir; Arc::TmpDirCreate(tmpdir); _testroot = tmpdir; _cache_dir = _testroot + "/cache"; _cache_data_dir = _cache_dir + "/data"; _cache_job_dir = _cache_dir + "/joblinks"; _session_dir = _testroot + "/session"; _url = "http://host.org/file1"; _uid = getuid(); _gid = getgid(); _jobid = "1"; _fc1 = new Arc::FileCache(_cache_dir, _jobid, _uid, _gid); // construct hostname struct utsname buf; CPPUNIT_ASSERT_EQUAL(uname(&buf), 0); _hostname = buf.nodename; } void FileCacheTest::tearDown() { delete _fc1; Arc::DirDelete(_testroot); } void FileCacheTest::testStart() { // test that the cache Starts ok bool available = false; bool is_locked = false; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); // test cache is ok CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // test cache file is locked std::string lock_file(_fc1->File(_url) + ".lock"); struct stat fileStat; CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); // test meta file exists and contains correct url std::string meta_file(_fc1->File(_url) + ".meta"); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + meta_file, 0, stat(meta_file.c_str(), &fileStat)); std::string meta_url = _readFile(meta_file); CPPUNIT_ASSERT(meta_url != ""); CPPUNIT_ASSERT_EQUAL(std::string(_url) + '\n', meta_url); // test no lock is left on the .meta file std::string meta_lock(meta_file + ".lock"); CPPUNIT_ASSERT(stat(meta_lock.c_str(), &fileStat) != 0); // look at lock modification time - should not be more than 1 second old CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); time_t mod_time = fileStat.st_mtime; time_t now = time(NULL); CPPUNIT_ASSERT((now - mod_time) <= 1); // check it has the right pid inside std::string lock_pid = _readFile(lock_file); CPPUNIT_ASSERT(lock_pid != ""); CPPUNIT_ASSERT_EQUAL(Arc::tostring(getpid()) + "@" + _hostname, lock_pid); // set old modification time struct utimbuf times; time_t t = 1; times.actime = t; times.modtime = t; CPPUNIT_ASSERT_EQUAL(0, utime(lock_file.c_str(), ×)); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL(t, fileStat.st_mtime); // call Start again - should succeed and make new lock file CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // look at lock modification time - should not be more than 1 second old CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); mod_time = fileStat.st_mtime; now = time(NULL); CPPUNIT_ASSERT((now - mod_time) <= 1); // create a cache file _createFile(_fc1->File(_url)); // Stop cache CPPUNIT_ASSERT(_fc1->Stop(_url)); // check lock is released CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); // check meta file is still there CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + meta_file, 0, stat(meta_file.c_str(), &fileStat)); // call Start again and check available is true CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(available); CPPUNIT_ASSERT(!is_locked); // check no lock exists CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); // force delete - file should be unavailable and locked CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked, false, true)); CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); CPPUNIT_ASSERT(_fc1->Stop(_url)); // force delete again - should have same result CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked, false, true)); CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); CPPUNIT_ASSERT(_fc1->Stop(_url)); // lock the file with a pid which is still running on this host and check is_locked _createFile(_fc1->File(_url) + ".lock", "1@" + _hostname); CPPUNIT_ASSERT(!_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(is_locked); // lock with process on different host is_locked = false; _createFile(_fc1->File(_url) + ".lock", "1@mybadhost.org"); CPPUNIT_ASSERT(!_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(is_locked); // delete lock file and try again with a non-existent pid CPPUNIT_ASSERT_EQUAL(0, remove(std::string(_fc1->File(_url) + ".lock").c_str())); _createFile(_fc1->File(_url) + ".lock", "99999@" + _hostname); CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT(_fc1->Stop(_url)); // put different url in meta file _createFile(_fc1->File(_url) + ".meta", "http://badfile"); CPPUNIT_ASSERT(!_fc1->Start(_url, available, is_locked)); // locked meta file - this is ok and will be ignored _createFile(_fc1->File(_url) + ".meta", _url); _createFile(meta_lock, "1@" + _hostname); CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT(_fc1->Stop(_url)); // remove meta file - now locked should be true CPPUNIT_ASSERT_EQUAL(0, remove(meta_file.c_str())); CPPUNIT_ASSERT(!_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(is_locked); CPPUNIT_ASSERT_EQUAL(0, remove(meta_lock.c_str())); // empty meta file - should be recreated _createFile(_fc1->File(_url) + ".meta", ""); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + meta_file, 0, stat(meta_file.c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL(0, (int)fileStat.st_size); CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT(_fc1->Stop(_url)); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + meta_file, 0, stat(meta_file.c_str(), &fileStat)); CPPUNIT_ASSERT(fileStat.st_size > 0); // corrupted meta file - should be recreated _createFile(_fc1->File(_url) + ".meta", "\n"); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + meta_file, 0, stat(meta_file.c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL(1, (int)fileStat.st_size); CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT(_fc1->Stop(_url)); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + meta_file, 0, stat(meta_file.c_str(), &fileStat)); CPPUNIT_ASSERT(fileStat.st_size > 1); // Use a bad cache dir if (_uid != 0 && stat("/lost+found/cache", &fileStat) != 0 && errno == EACCES) { std::vector caches; caches.push_back("/lost+found/cache"); delete _fc1; _fc1 = new Arc::FileCache(caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // with one good dir and one bad dir Start should succeed caches.push_back(_cache_dir); delete _fc1; _fc1 = new Arc::FileCache(caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); } } void FileCacheTest::testRemoteCache() { // test with remote cache std::vector caches; std::vector remote_caches; std::vector draining_caches; caches.push_back(_cache_dir); std::string remote_cache_dir = _testroot + "/remotecache"; std::string remote_cache_file(remote_cache_dir+"/data/8a/929b8384300813ba1dd2d661c42835b80691a2"); std::string remote_cache_meta(remote_cache_dir+"/data/8a/929b8384300813ba1dd2d661c42835b80691a2.meta"); std::string remote_cache_lock(remote_cache_dir+"/data/8a/929b8384300813ba1dd2d661c42835b80691a2.lock"); remote_caches.push_back(remote_cache_dir); delete _fc1; _fc1 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); std::string local_cache_file(_fc1->File(_url)); std::string local_lock_file(local_cache_file + ".lock"); std::string local_meta_file(local_cache_file + ".meta"); struct stat fileStat; // file is not available in local or remote cache - the local cache file should be locked bool available, is_locked; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + local_lock_file, 0, stat(local_lock_file.c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + local_meta_file, 0, stat(local_meta_file.c_str(), &fileStat)); CPPUNIT_ASSERT(_fc1->Stop(_url)); CPPUNIT_ASSERT_EQUAL(0, remove(local_meta_file.c_str())); // create a cache file CPPUNIT_ASSERT(_createFile(local_cache_file)); // should be available CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat local meta file " + local_meta_file, 0, stat(local_meta_file.c_str(), &fileStat)); CPPUNIT_ASSERT(stat(local_lock_file.c_str(), &fileStat) != 0); // delete file and create in remote cache CPPUNIT_ASSERT_EQUAL(0, remove(local_cache_file.c_str())); CPPUNIT_ASSERT_EQUAL(0, remove(local_meta_file.c_str())); CPPUNIT_ASSERT(_createFile(remote_cache_file)); // check not available if not using remote caches CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked, false)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT(_fc1->Stop(_url)); CPPUNIT_ASSERT_EQUAL(0, remove(local_meta_file.c_str())); // check available when remote caches are enabled CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked, true)); CPPUNIT_ASSERT(available); CPPUNIT_ASSERT(!is_locked); // Add new dn CPPUNIT_ASSERT(_fc1->AddDN(_url, "/O=Grid/O=NorduGrid/OU=test.org/CN=Mr Tester", Arc::Time(Arc::Time() + 3600))); CPPUNIT_ASSERT(_fc1->CheckDN(_url, "/O=Grid/O=NorduGrid/OU=test.org/CN=Mr Tester")); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat remote meta file " + remote_cache_meta, 0, stat(remote_cache_meta.c_str(), &fileStat)); // check nothing in local cache (in old system a symlink was created) CPPUNIT_ASSERT(lstat( local_cache_file.c_str(), &fileStat) != 0); CPPUNIT_ASSERT(lstat( local_meta_file.c_str(), &fileStat) != 0); // call link and check it was created in the remote cache std::string soft_link = _session_dir+"/"+_jobid+"/file1"; CPPUNIT_ASSERT(_fc1->Link(soft_link, _url, false, false, false, is_locked)); CPPUNIT_ASSERT_EQUAL_MESSAGE( "Could not stat remote hard link ", 0, stat((remote_cache_dir+"/joblinks/1/file1").c_str(), &fileStat) ); CPPUNIT_ASSERT_EQUAL_MESSAGE( "Hard link is a soft link", true, (lstat((remote_cache_dir+"/joblinks/1/file1").c_str(), &fileStat) == 0 && !S_ISLNK(fileStat.st_mode))); CPPUNIT_ASSERT(stat( std::string(_cache_job_dir+"/"+_jobid+"/file1").c_str(), &fileStat) != 0 ); // release CPPUNIT_ASSERT(_fc1->Release()); CPPUNIT_ASSERT(stat(std::string(remote_cache_dir+"/joblinks/1/file1").c_str(), &fileStat) != 0 ); CPPUNIT_ASSERT(stat(std::string(remote_cache_dir+"/joblinks/1").c_str(), &fileStat) != 0 ); // call Link() without Start() - should be found ok and links recreated delete _fc1; _fc1 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(_fc1->Link(soft_link, _url, false, false, false, is_locked)); CPPUNIT_ASSERT_EQUAL_MESSAGE( "Could not stat remote hard link ", 0, stat((remote_cache_dir+"/joblinks/1/file1").c_str(), &fileStat) ); CPPUNIT_ASSERT_EQUAL_MESSAGE( "Hard link is a soft link", true, (lstat((remote_cache_dir+"/joblinks/1/file1").c_str(), &fileStat) == 0 && !S_ISLNK(fileStat.st_mode))); CPPUNIT_ASSERT(stat( std::string(_cache_job_dir+"/"+_jobid+"/file1").c_str(), &fileStat) != 0 ); // try again after deleting remote cache file - should fail CPPUNIT_ASSERT_EQUAL(0, remove(remote_cache_file.c_str())); CPPUNIT_ASSERT(!_fc1->Link(soft_link, _url, false, false, false, is_locked)); CPPUNIT_ASSERT(is_locked); } void FileCacheTest::testRemoteCacheValidLock() { std::vector caches; std::vector remote_caches; std::vector draining_caches; caches.push_back(_cache_dir); std::string remote_cache_dir = _testroot + "/remotecache"; std::string remote_cache_file(remote_cache_dir+"/data/8a/929b8384300813ba1dd2d661c42835b80691a2"); std::string remote_cache_lock(remote_cache_dir+"/data/8a/929b8384300813ba1dd2d661c42835b80691a2.lock"); remote_caches.push_back(remote_cache_dir); delete _fc1; _fc1 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); // create a valid lock in the remote cache - the local cache should be used CPPUNIT_ASSERT(_createFile(remote_cache_lock, std::string("1@" + _hostname))); bool available, is_locked; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); struct stat fileStat; CPPUNIT_ASSERT_EQUAL(0, stat(remote_cache_lock.c_str(), &fileStat)); // Local lock and meta files should be created CPPUNIT_ASSERT_EQUAL(0, stat(std::string(_fc1->File(_url)+".lock").c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL(0, stat(std::string(_fc1->File(_url)+".meta").c_str(), &fileStat)); // write cache file and link CPPUNIT_ASSERT(_createFile(_fc1->File(_url))); std::string soft_link = _session_dir+"/"+_jobid+"/file1"; CPPUNIT_ASSERT(_fc1->Link(soft_link, _url, false, false, true, is_locked)); CPPUNIT_ASSERT_EQUAL_MESSAGE( "Could not stat hard link ", 0, stat((_cache_job_dir+"/1/file1").c_str(), &fileStat) ); CPPUNIT_ASSERT_EQUAL_MESSAGE( "Hard link is a soft link", true, (lstat((_cache_job_dir+"/1/file1").c_str(), &fileStat) == 0 && !S_ISLNK(fileStat.st_mode))); CPPUNIT_ASSERT_EQUAL_MESSAGE( "Could not stat soft link ", 0, stat(soft_link.c_str(), &fileStat) ); // release CPPUNIT_ASSERT(_fc1->Release()); CPPUNIT_ASSERT(stat(std::string(_cache_job_dir+"/1/file1").c_str(), &fileStat) != 0 ); CPPUNIT_ASSERT(stat(std::string(_cache_job_dir+"/1").c_str(), &fileStat) != 0 ); // stop cache to release lock CPPUNIT_ASSERT(_fc1->Stop(_url)); } void FileCacheTest::testRemoteCacheInvalidLock() { std::vector caches; std::vector remote_caches; std::vector draining_caches; caches.push_back(_cache_dir); std::string remote_cache_dir = _testroot + "/remotecache"; std::string remote_cache_file(remote_cache_dir+"/data/8a/929b8384300813ba1dd2d661c42835b80691a2"); std::string remote_cache_lock(remote_cache_dir+"/data/8a/929b8384300813ba1dd2d661c42835b80691a2.lock"); remote_caches.push_back(remote_cache_dir); delete _fc1; _fc1 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); // create a stale lock in the remote cache CPPUNIT_ASSERT(_createFile(remote_cache_lock, std::string("99999@" + _hostname))); CPPUNIT_ASSERT(_createFile(remote_cache_file)); // Start() should succeed and remove stale lock and remote cache file but return unavailable bool available, is_locked; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); struct stat fileStat; CPPUNIT_ASSERT(stat(remote_cache_file.c_str(), &fileStat) != 0); CPPUNIT_ASSERT(stat(remote_cache_lock.c_str(), &fileStat) != 0); // Local lock and meta files should be created CPPUNIT_ASSERT_EQUAL(0, stat(std::string(_fc1->File(_url)+".lock").c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL(0, stat(std::string(_fc1->File(_url)+".meta").c_str(), &fileStat)); // stop cache to release locks CPPUNIT_ASSERT(_fc1->Stop(_url)); } void FileCacheTest::testRemoteCacheReplication() { // test with a replicate policy std::vector caches; std::vector remote_caches; std::vector draining_caches; caches.push_back(_cache_dir); std::string remote_cache_dir(_testroot + "/remotecache replicate"); std::string remote_cache_file(_testroot + "/remotecache/data/8a/929b8384300813ba1dd2d661c42835b80691a2"); std::string remote_cache_lock(_testroot + "/remotecache/data/8a/929b8384300813ba1dd2d661c42835b80691a2.lock"); remote_caches.push_back(remote_cache_dir); delete _fc1; _fc1 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); std::string local_cache_file(_fc1->File(_url)); std::string local_lock_file(local_cache_file + ".lock"); std::string local_meta_file(local_cache_file + ".meta"); // test with a replicate policy CPPUNIT_ASSERT(_createFile(remote_cache_file)); bool available, is_locked; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(available); CPPUNIT_ASSERT(!is_locked); // check file was copied and is not a link struct stat fileStat; CPPUNIT_ASSERT_EQUAL_MESSAGE( "Could not stat file "+local_cache_file, 0, stat( local_cache_file.c_str(), &fileStat) ); CPPUNIT_ASSERT(!S_ISLNK(fileStat.st_mode)); CPPUNIT_ASSERT_EQUAL(0, stat(local_meta_file.c_str(), &fileStat)); CPPUNIT_ASSERT(stat(local_lock_file.c_str(), &fileStat) != 0); // remove cache file and try with a locked remote file CPPUNIT_ASSERT_EQUAL(0, remove(local_cache_file.c_str())); CPPUNIT_ASSERT_EQUAL(0, remove(local_meta_file.c_str())); CPPUNIT_ASSERT(_createFile(remote_cache_lock, std::string("1@" + _hostname))); // reset cache delete _fc1; _fc1 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); // local file should be downloaded instead CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); CPPUNIT_ASSERT_EQUAL_MESSAGE( "Could not stat file "+local_lock_file, 0, stat( local_lock_file.c_str(), &fileStat) ); CPPUNIT_ASSERT_EQUAL(0, stat(local_meta_file.c_str(), &fileStat)); CPPUNIT_ASSERT(_fc1->Stop(_url)); // bad local meta file with different url CPPUNIT_ASSERT_EQUAL(0, remove(remote_cache_lock.c_str())); CPPUNIT_ASSERT(_createFile(local_meta_file, "http://bad.host/bad/path")); CPPUNIT_ASSERT(!_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); } void FileCacheTest::testStop() { // Start cache bool available = false; bool is_locked = false; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); std::string cache_file(_fc1->File(_url)); // test cache is ok CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // Stop cache with non-existent cache file - this is ok CPPUNIT_ASSERT(_fc1->Stop(_url)); // check lock is released std::string lock_file(cache_file + ".lock"); struct stat fileStat; CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); // Start again to create a new lock CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); // create the cache file _createFile(cache_file); // Stop cache CPPUNIT_ASSERT(_fc1->Stop(_url)); // check lock is released CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); // check cache file is still there CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat cache file " + cache_file, 0, stat(cache_file.c_str(), &fileStat)); // check meta file is still there with correct url std::string meta_file(cache_file + ".meta"); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + meta_file, 0, stat(meta_file.c_str(), &fileStat)); std::string meta_url = _readFile(meta_file); CPPUNIT_ASSERT(meta_url != ""); CPPUNIT_ASSERT_EQUAL(std::string(_url) + '\n', meta_url); // call with non-existent lock file CPPUNIT_ASSERT(!_fc1->Stop(_url)); // put different pid in lock file - Stop() should return false _createFile(lock_file, Arc::tostring(getpid() + 1)); CPPUNIT_ASSERT(!_fc1->Stop(_url)); // check that lock and cache file are still there CPPUNIT_ASSERT(stat(cache_file.c_str(), &fileStat) == 0); CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) == 0); // check meta file is still there CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat meta file " + meta_file, 0, stat(meta_file.c_str(), &fileStat)); } void FileCacheTest::testStopAndDelete() { // Start cache bool available = false; bool is_locked = false; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); std::string cache_file(_fc1->File(_url)); // test cache is ok CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // Stop and delete with non-existent cache file CPPUNIT_ASSERT(_fc1->StopAndDelete(_url)); // check lock is released std::string lock_file(cache_file + ".lock"); struct stat fileStat; CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); // check meta file is deleted std::string meta_file(cache_file + ".meta"); CPPUNIT_ASSERT(stat(meta_file.c_str(), &fileStat) != 0); // Start again to create a new lock CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); // create the cache file CPPUNIT_ASSERT(_createFile(cache_file)); // check cache file is there CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat cache file " + cache_file, 0, stat(cache_file.c_str(), &fileStat)); // Stop cache and delete file CPPUNIT_ASSERT(_fc1->StopAndDelete(_url)); // check lock is released CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); // check cache file has gone CPPUNIT_ASSERT(stat(cache_file.c_str(), &fileStat) != 0); // check meta file is deleted CPPUNIT_ASSERT(stat(meta_file.c_str(), &fileStat) != 0); // create the cache file CPPUNIT_ASSERT(_createFile(cache_file)); // call with non-existent lock file CPPUNIT_ASSERT(!_fc1->StopAndDelete(_url)); // check the cache file is still there CPPUNIT_ASSERT(stat(cache_file.c_str(), &fileStat) == 0); // put different pid in lock file - StopAndDelete() should return false _createFile(lock_file, Arc::tostring(getpid() + 1)); CPPUNIT_ASSERT(!_fc1->StopAndDelete(_url)); // check that lock and cache files are still there CPPUNIT_ASSERT(stat(cache_file.c_str(), &fileStat) == 0); CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) == 0); } void FileCacheTest::testLinkFile() { std::string soft_link(_session_dir + "/" + _jobid + "/file1"); std::string hard_link(_cache_job_dir + "/" + _jobid + "/file1"); bool try_again = false; // link non-existent file - should return false and set try_again true CPPUNIT_ASSERT(!_fc1->Link(soft_link, _url, false, false, false, try_again)); CPPUNIT_ASSERT(try_again); // Start cache bool available = false; bool is_locked = false; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); // test cache is ok CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // create cache file CPPUNIT_ASSERT(_createFile(_fc1->File(_url))); // check cache file is there struct stat fileStat; CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat cache file " + _fc1->File(_url), 0, stat(_fc1->File(_url).c_str(), &fileStat)); // create link CPPUNIT_ASSERT(_fc1->Link(soft_link, _url, false, false, true, try_again)); // check hard- and soft-links exist CPPUNIT_ASSERT(stat((_cache_job_dir + "/1").c_str(), &fileStat) == 0); CPPUNIT_ASSERT((fileStat.st_mode & S_IRWXU) == S_IRWXU); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat hard link " + hard_link, 0, stat(hard_link.c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat soft link " + soft_link, 0, stat(soft_link.c_str(), &fileStat)); // create bad soft-link if (_uid != 0 && stat("/lost+found/sessiondir", &fileStat) != 0 && errno == EACCES) { CPPUNIT_ASSERT(!_fc1->Link("/lost_found/sessiondir/file1", _url, false, false, false, try_again)); CPPUNIT_ASSERT(!try_again); } // Stop cache to release lock CPPUNIT_ASSERT(_fc1->Stop(_url)); // lock with valid lock CPPUNIT_ASSERT(_createFile(_fc1->File(_url)+".lock", std::string("1@" + _hostname))); // link should fail and links should be cleaned up CPPUNIT_ASSERT(!_fc1->Link(soft_link, _url, false, false, false, try_again)); CPPUNIT_ASSERT(try_again); CPPUNIT_ASSERT(stat(hard_link.c_str(), &fileStat) != 0); CPPUNIT_ASSERT(stat(soft_link.c_str(), &fileStat) != 0); } void FileCacheTest::testLinkFileLinkCache() { // new cache with link path set std::string cache_link_dir = _testroot + "/link"; _fc1 = new Arc::FileCache(_cache_dir + " " + cache_link_dir, _jobid, _uid, _gid); CPPUNIT_ASSERT(symlink(_cache_dir.c_str(), cache_link_dir.c_str()) == 0); // Start cache bool available = false; bool is_locked = false; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); // test cache is ok CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // create cache file CPPUNIT_ASSERT(_createFile(_fc1->File(_url))); // check cache file is there struct stat fileStat; CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat cache file " + _fc1->File(_url), 0, stat(_fc1->File(_url).c_str(), &fileStat)); // create link std::string soft_link = _session_dir + "/" + _jobid + "/file1"; std::string hard_link = _cache_job_dir + "/" + _jobid + "/file1"; bool try_again = false; CPPUNIT_ASSERT(_fc1->Link(soft_link, _url, false, false, true, try_again)); // check soft link is ok and points to the right place CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat soft link " + soft_link, 0, lstat(soft_link.c_str(), &fileStat)); // check our soft link links to yet another link CPPUNIT_ASSERT(S_ISLNK(fileStat.st_mode)); // check the target is correct CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat target of soft link " + soft_link, 0, stat(soft_link.c_str(), &fileStat)); // Stop cache to release lock CPPUNIT_ASSERT(_fc1->Stop(_url)); } void FileCacheTest::testCopyFile() { // TODO integrate into testLinkFile() std::string dest_file = _session_dir + "/" + _jobid + "/file1"; std::string hard_link = _cache_job_dir + "/" + _jobid + "/file1"; bool try_again = false; // copy non-existent file CPPUNIT_ASSERT(!_fc1->Link(dest_file, _url, true, false, false, try_again)); CPPUNIT_ASSERT(try_again); // Start cache bool available = false; bool is_locked = false; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); // test cache is ok CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // create cache file CPPUNIT_ASSERT(_createFile(_fc1->File(_url))); // check cache file is there struct stat fileStat; CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat cache file " + _fc1->File(_url), 0, stat(_fc1->File(_url).c_str(), &fileStat)); // do copy // NOTE: not possible to test executable since can't use FileAccess directly during tests CPPUNIT_ASSERT(_fc1->Link(dest_file, _url, true, false, true, try_again)); // check copy exists and is executable CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat destination file " + dest_file, 0, stat(dest_file.c_str(), &fileStat)); // create bad copy if (_uid != 0 && stat("/lost+found/sessiondir", &fileStat) != 0 && errno == EACCES) CPPUNIT_ASSERT(!_fc1->Link("/lost+found/sessiondir/file1", _url, true, false, false, try_again)); // Stop cache to release lock CPPUNIT_ASSERT(_fc1->Stop(_url)); } void FileCacheTest::testFile() { // test hash returned std::string hash = "/8a/929b8384300813ba1dd2d661c42835b80691a2"; CPPUNIT_ASSERT_EQUAL(std::string(_cache_data_dir + hash), _fc1->File(_url)); // set up two caches std::vector caches; caches.push_back(_cache_dir); std::string cache_dir2 = _cache_dir + "2"; caches.push_back(cache_dir2); Arc::FileCache *fc2 = new Arc::FileCache(caches, "1", _uid, _gid); // _url can go to either cache, so check it at least is mapped to one bool found = false; if (fc2->File(_url) == std::string(_cache_data_dir + hash)) found = true; if (fc2->File(_url) == std::string(cache_dir2 + "/data" + hash)) found = true; CPPUNIT_ASSERT(found); delete fc2; } void FileCacheTest::testRelease() { // create cache and link std::string soft_link = _session_dir + "/" + _jobid + "/file1"; std::string hard_link = _cache_job_dir + "/" + _jobid + "/file1"; bool available = false; bool is_locked = false; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); // test cache is ok CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // create cache file CPPUNIT_ASSERT(_createFile(_fc1->File(_url))); // create link bool try_again = false; CPPUNIT_ASSERT(_fc1->Link(soft_link, _url, false, false, true, try_again)); // Stop cache to release lock CPPUNIT_ASSERT(_fc1->Stop(_url)); // release CPPUNIT_ASSERT(_fc1->Release()); // check files and dir are gone struct stat fileStat; CPPUNIT_ASSERT(stat(hard_link.c_str(), &fileStat) != 0); CPPUNIT_ASSERT(stat(std::string(_cache_job_dir + "/" + _jobid).c_str(), &fileStat) != 0); CPPUNIT_ASSERT_EQUAL(0, remove(soft_link.c_str())); // start again but don't create links CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); CPPUNIT_ASSERT(_fc1->Stop(_url)); // check link dirs are not there CPPUNIT_ASSERT(stat(hard_link.c_str(), &fileStat) != 0); CPPUNIT_ASSERT(stat(std::string(_cache_job_dir + "/" + _jobid).c_str(), &fileStat) != 0); // release should not give an error, even though job dir does not exist CPPUNIT_ASSERT(_fc1->Release()); } void FileCacheTest::testCheckDN() { // Bad DN CPPUNIT_ASSERT(!_fc1->AddDN(_url, "", Arc::Time())); CPPUNIT_ASSERT(!_fc1->CheckDN(_url, "")); std::string dn1 = "/O=Grid/O=NorduGrid/OU=test.org/CN=Mr Tester"; // non-existent meta file CPPUNIT_ASSERT(!_fc1->AddDN(_url, dn1, Arc::Time())); CPPUNIT_ASSERT(!_fc1->CheckDN(_url, dn1)); // create empty meta file std::string meta_file = _fc1->File(_url) + ".meta"; CPPUNIT_ASSERT(_createFile(meta_file, "")); CPPUNIT_ASSERT(!_fc1->AddDN(_url, dn1, Arc::Time())); CPPUNIT_ASSERT(!_fc1->CheckDN(_url, dn1)); // create proper meta file CPPUNIT_ASSERT(_createFile(meta_file, _url + '\n')); CPPUNIT_ASSERT(!_fc1->CheckDN(_url, dn1)); // add DN Arc::Time now = Arc::Time(); Arc::Time futuretime = Arc::Time(now.GetTime() + 1000); CPPUNIT_ASSERT(_fc1->AddDN(_url, dn1, futuretime)); CPPUNIT_ASSERT(_fc1->CheckDN(_url, dn1)); CPPUNIT_ASSERT_EQUAL(_url + "\n" + dn1 + " " + futuretime.str(Arc::MDSTime) + '\n', _readFile(meta_file)); // expired DN Arc::Time pasttime = Arc::Time(now.GetTime() - 10); CPPUNIT_ASSERT(_createFile(meta_file, _url + "\n" + dn1 + " " + pasttime.str(Arc::MDSTime) + '\n')); CPPUNIT_ASSERT(!_fc1->CheckDN(_url, dn1)); // add again futuretime = Arc::Time(now.GetTime() + 86400); CPPUNIT_ASSERT(_fc1->AddDN(_url, dn1, futuretime)); CPPUNIT_ASSERT(_fc1->CheckDN(_url, dn1)); CPPUNIT_ASSERT_EQUAL(_url + "\n" + dn1 + " " + futuretime.str(Arc::MDSTime) + '\n', _readFile(meta_file)); // add another DN std::string dn2 = "/O=Grid/O=NorduGrid/OU=test.org/CN=Mrs Tester"; CPPUNIT_ASSERT(!_fc1->CheckDN(_url, dn2)); CPPUNIT_ASSERT(_fc1->AddDN(_url, dn2, futuretime)); CPPUNIT_ASSERT(_fc1->CheckDN(_url, dn1)); CPPUNIT_ASSERT(_fc1->CheckDN(_url, dn2)); CPPUNIT_ASSERT_EQUAL(_url + "\n" + dn1 + " " + futuretime.str(Arc::MDSTime) + "\n" + dn2 + " " + futuretime.str(Arc::MDSTime) + '\n', _readFile(meta_file)); // create expired DN and check it gets removed pasttime = Arc::Time(now.GetTime() - 86401); CPPUNIT_ASSERT(_createFile(meta_file, _url + '\n' + dn2 + " " + pasttime.str(Arc::MDSTime) + "\n" + dn1 + " " + pasttime.str(Arc::MDSTime) + '\n')); CPPUNIT_ASSERT(_fc1->AddDN(_url, dn1, futuretime)); CPPUNIT_ASSERT(_fc1->CheckDN(_url, dn1)); CPPUNIT_ASSERT_EQUAL(_url + "\n" + dn1 + " " + futuretime.str(Arc::MDSTime) + '\n', _readFile(meta_file)); // add with no specified expiry time CPPUNIT_ASSERT(_fc1->AddDN(_url, dn2, Arc::Time(0))); // test should not fail if time changes during the test CPPUNIT_ASSERT((_url + "\n" + dn1 + " " + futuretime.str(Arc::MDSTime) + "\n" + dn2 + " " + futuretime.str(Arc::MDSTime) + '\n') == _readFile(meta_file) || (Arc::Time().GetTime() != now.GetTime())); // lock meta file - check should still work CPPUNIT_ASSERT(_createFile(meta_file + ".lock", std::string("1@" + _hostname))); CPPUNIT_ASSERT(!_fc1->AddDN(_url, dn1, futuretime)); CPPUNIT_ASSERT(_fc1->CheckDN(_url, dn1)); } void FileCacheTest::testTwoCaches() { // set up two caches std::vector caches; caches.push_back(_cache_dir); std::string cache_dir2 = _cache_dir + "2"; caches.push_back(cache_dir2); std::string url2 = "http://host.org/file2"; Arc::FileCache *fc2 = new Arc::FileCache(caches, "1", _uid, _gid); // create cache bool available = false; bool is_locked = false; CPPUNIT_ASSERT(fc2->Start(_url, available, is_locked)); std::string cache_file1 = fc2->File(_url); // test cache is ok CPPUNIT_ASSERT(*fc2); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); bool available2 = false; bool is_locked2 = false; CPPUNIT_ASSERT(fc2->Start(url2, available2, is_locked2)); std::string cache_file2 = fc2->File(url2); // test cache is ok CPPUNIT_ASSERT(!available2); CPPUNIT_ASSERT(!is_locked2); // create cache files CPPUNIT_ASSERT(_createFile(cache_file1)); CPPUNIT_ASSERT(_createFile(cache_file2)); // create links std::string soft_link = _session_dir + "/" + _jobid + "/file1"; std::string soft_link2 = _session_dir + "/" + _jobid + "/file2"; // we expect the hard links to be made to here std::string hard_link_cache1 = _cache_job_dir + "/" + _jobid + "/file1"; std::string hard_link_cache2 = cache_dir2 + "/joblinks/" + _jobid + "/file1"; std::string hard_link2_cache1 = _cache_job_dir + "/" + _jobid + "/file2"; std::string hard_link2_cache2 = cache_dir2 + "/joblinks/" + _jobid + "/file2"; bool try_again = false; CPPUNIT_ASSERT(fc2->Link(soft_link, _url, false, false, true, try_again)); CPPUNIT_ASSERT(fc2->Link(soft_link2, url2, false, false, true, try_again)); // check hard links are made in one of the caches struct stat fileStat; CPPUNIT_ASSERT(stat(hard_link_cache1.c_str(), &fileStat) == 0 || stat(hard_link_cache2.c_str(), &fileStat) == 0); CPPUNIT_ASSERT(stat(hard_link2_cache1.c_str(), &fileStat) == 0 || stat(hard_link2_cache2.c_str(), &fileStat) == 0); // Stop caches to release locks CPPUNIT_ASSERT(fc2->Stop(_url)); CPPUNIT_ASSERT(fc2->Stop(url2)); // release with correct IDs CPPUNIT_ASSERT(fc2->Release()); // check links and job dir are gone CPPUNIT_ASSERT(stat(hard_link_cache1.c_str(), &fileStat) != 0 && stat(hard_link_cache2.c_str(), &fileStat) != 0); CPPUNIT_ASSERT(stat(hard_link2_cache1.c_str(), &fileStat) != 0 && stat(hard_link2_cache2.c_str(), &fileStat) != 0); CPPUNIT_ASSERT(stat(std::string(_cache_job_dir + "/" + _jobid).c_str(), &fileStat) != 0); CPPUNIT_ASSERT(stat(std::string(cache_dir2 + "/joblinks/" + _jobid).c_str(), &fileStat) != 0); // copy file CPPUNIT_ASSERT_EQUAL(0, remove(soft_link.c_str())); CPPUNIT_ASSERT(fc2->Start(_url, available, is_locked)); CPPUNIT_ASSERT(fc2->Link(soft_link, _url, true, false, false, try_again)); // check job dir is created CPPUNIT_ASSERT(stat(hard_link_cache1.c_str(), &fileStat) == 0 || stat(hard_link_cache2.c_str(), &fileStat) == 0); CPPUNIT_ASSERT(fc2->Release()); } void FileCacheTest::testCreationDate() { // call with non-existent file CPPUNIT_ASSERT(!_fc1->CheckCreated(_url)); CPPUNIT_ASSERT_EQUAL(0, (int)(_fc1->GetCreated(_url).GetTime())); // Start cache and add file bool available = false; bool is_locked = false; CPPUNIT_ASSERT(_fc1->Start(_url, available, is_locked)); // test cache is ok CPPUNIT_ASSERT(*_fc1); CPPUNIT_ASSERT(!available); CPPUNIT_ASSERT(!is_locked); // create cache file CPPUNIT_ASSERT(_createFile(_fc1->File(_url))); // test creation date is available CPPUNIT_ASSERT(_fc1->CheckCreated(_url)); // get creation date from file system struct stat fileStat; CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat cache file " + _fc1->File(_url), 0, stat(_fc1->File(_url).c_str(), &fileStat)); // test this is equal to created() CPPUNIT_ASSERT_EQUAL(fileStat.st_ctime, _fc1->GetCreated(_url).GetTime()); // sleep 1 second and check dates still match sleep(1); CPPUNIT_ASSERT(fileStat.st_ctime == _fc1->GetCreated(_url).GetTime()); // Stop cache to release lock CPPUNIT_ASSERT(_fc1->Stop(_url)); } void FileCacheTest::testConstructor() { // constructor should not create anything struct stat fileStat; CPPUNIT_ASSERT(stat(_cache_data_dir.c_str(), &fileStat) != 0); CPPUNIT_ASSERT(stat(_cache_job_dir.c_str(), &fileStat) != 0); // create constructor with same parameters Arc::FileCache *fc2 = new Arc::FileCache(_cache_dir, _jobid, _uid, _gid); CPPUNIT_ASSERT(*_fc1 == *fc2); delete fc2; // test copy constructor Arc::FileCache *fc3 = new Arc::FileCache(*_fc1); CPPUNIT_ASSERT(*_fc1 == *fc3); delete fc3; // test invalid cache constructor, and that cache is not available Arc::FileCache *fc5 = new Arc::FileCache(); CPPUNIT_ASSERT(!(*_fc1 == *fc5)); CPPUNIT_ASSERT(!(*fc5)); delete fc5; // create with 2 cache dirs std::vector caches; std::string cache_dir2 = _cache_dir + "2"; std::string cache_dir3 = _cache_dir + "3/"; caches.push_back(_cache_dir); caches.push_back(cache_dir2); Arc::FileCache *fc6 = new Arc::FileCache(caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(*fc6); CPPUNIT_ASSERT(!(*_fc1 == *fc6)); // create with two different caches and compare caches.clear(); caches.push_back(_cache_dir); caches.push_back(cache_dir3); Arc::FileCache *fc7 = new Arc::FileCache(caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(*fc7); CPPUNIT_ASSERT(!(*fc6 == *fc7)); delete fc6; delete fc7; // constructor with remote and draining caches caches.clear(); caches.push_back(_cache_dir); std::vector remote_caches; remote_caches.push_back(_testroot + "remote1"); std::vector draining_caches; draining_caches.push_back(_testroot + "draining1"); Arc::FileCache *fc8 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(*fc8); // file should be in main cache std::string hash = "/8a/929b8384300813ba1dd2d661c42835b80691a2"; CPPUNIT_ASSERT_EQUAL(std::string(_cache_data_dir + hash), fc8->File(_url)); delete fc8; } void FileCacheTest::testBadConstructor() { // no cache dir _fc1 = new Arc::FileCache("", _jobid, _uid, _gid); CPPUNIT_ASSERT(!(*_fc1)); delete _fc1; // two caches, one of which is bad std::vector caches; caches.push_back(_cache_dir); caches.push_back(""); _fc1 = new Arc::FileCache(caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(!(*_fc1)); // call some methods bool available = false; bool is_locked = false; CPPUNIT_ASSERT(!(_fc1->Start(_url, available, is_locked))); CPPUNIT_ASSERT_EQUAL(std::string(""), _fc1->File(_url)); CPPUNIT_ASSERT(!(_fc1->Stop(_url))); CPPUNIT_ASSERT(!(_fc1->StopAndDelete(_url))); CPPUNIT_ASSERT(!(_fc1->CheckCreated(_url))); caches.clear(); caches.push_back(_cache_dir); // bad remote and draining caches std::vector remote_caches; remote_caches.push_back(""); std::vector draining_caches; delete _fc1; _fc1 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(!(*_fc1)); remote_caches.clear(); remote_caches.push_back(_testroot + "/remotecache"); draining_caches.push_back(""); delete _fc1; _fc1 = new Arc::FileCache(caches, remote_caches, draining_caches, _jobid, _uid, _gid); CPPUNIT_ASSERT(!(*_fc1)); } void FileCacheTest::testInternal() { // read a non-existent file std::string pid(Arc::tostring(getpid())); std::string testfile(_testroot + "/test.file." + pid); CPPUNIT_ASSERT(_readFile(testfile) == ""); // create a file CPPUNIT_ASSERT(_createFile(testfile, pid)); // check it exists struct stat fileStat; CPPUNIT_ASSERT_EQUAL(0, stat(testfile.c_str(), &fileStat)); // check the contents CPPUNIT_ASSERT_EQUAL(pid, _readFile(testfile)); // delete CPPUNIT_ASSERT_EQUAL(0, remove((char*)testfile.c_str())); // check it has gone CPPUNIT_ASSERT(stat(testfile.c_str(), &fileStat) != 0); } bool FileCacheTest::_createFile(std::string filename, std::string text) { if (Arc::FileCreate(filename, text)) return true; // try to create necessary dirs if possible if (filename.rfind('/') == 0 || filename.rfind('/') == std::string::npos) return false; Arc::DirCreate(filename.substr(0, filename.rfind('/')), 0700, true); return Arc::FileCreate(filename, text); } std::string FileCacheTest::_readFile(std::string filename) { FILE *pFile; char mystring[1024]; pFile = fopen((char*)filename.c_str(), "r"); if (pFile == NULL) return ""; std::string data; while (fgets(mystring, sizeof(mystring), pFile)) data += std::string(mystring); fclose(pFile); return data; } CPPUNIT_TEST_SUITE_REGISTRATION(FileCacheTest); nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataBuffer.h0000644000000000000000000000012412075551565022555 xustar000000000000000027 mtime=1358353269.148218 27 atime=1513200574.521702 30 ctime=1513200659.220737955 nordugrid-arc-5.4.2/src/hed/libs/data/DataBuffer.h0000644000175000002070000002567312075551565022637 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATABUFFER_H__ #define __ARC_DATABUFFER_H__ #include #include namespace Arc { class CheckSum; /// Represents set of buffers. /** * This class is used during data transfer using DataPoint classes. * \ingroup data * \headerfile DataBuffer.h arc/data/DataBuffer.h */ class DataBuffer { private: /// used to check if configuration changed int set_counter; /// general purpose mutex and condition used to achieve thread safety Glib::Mutex lock; Glib::Cond cond; /// internal struct to describe status of every buffer typedef struct { /// buffer address in memory char *start; /// true if taken by application for filling bool taken_for_read; /// true if taken by application for emptying bool taken_for_write; /// size of buffer unsigned int size; /// amount of information stored unsigned int used; /// offset in file or similar, has meaning only for application unsigned long long int offset; } buf_desc; /// amount of data passed through buffer (including current stored). /// computed using offset and size. gaps are ignored. unsigned long long int eof_pos; /// list of controlled buffers buf_desc *bufs; /// amount of controlled buffers int bufs_n; /// set to true if application's reading(filling) part won't use buffer anymore bool eof_read_flag; /// same for writing(emptying) part bool eof_write_flag; /// reading part of application experienced error bool error_read_flag; /// same for writing part bool error_write_flag; /// error was originated in DataBuffer itself bool error_transfer_flag; /// wait for any change of buffers' status bool cond_wait(); /// internal class with pointer to object to compute checksum class checksum_desc { public: checksum_desc(CheckSum *sum) : sum(sum), offset(0), ready(true) {} CheckSum *sum; unsigned long long int offset; bool ready; }; /// checksums to be computed in this buffer std::list checksums; public: /// This object controls transfer speed DataSpeed speed; /// Returns true if DataBuffer object is initialized operator bool() const { return (bufs != 0); } /// Construct a new DataBuffer object /** * \param size size of every buffer in bytes. * \param blocks number of buffers. */ DataBuffer(unsigned int size = 65536, int blocks = 3); /// Construct a new DataBuffer object with checksum computation /** * \param size size of every buffer in bytes. * \param blocks number of buffers. * \param cksum object which will compute checksum. Should not be * destroyed until DataBuffer itself. */ DataBuffer(CheckSum *cksum, unsigned int size = 65536, int blocks = 3); /// Destructor. ~DataBuffer(); /// Reinitialize buffers with different parameters. /** * \param size size of every buffer in bytes. * \param blocks number of buffers. * \param cksum object which will compute checksum. Should not be * destroyed until DataBuffer itself. * \return true if buffers were successfully initialized */ bool set(CheckSum *cksum = NULL, unsigned int size = 65536, int blocks = 3); /// Add a checksum object which will compute checksum of buffer. /** * \param cksum object which will compute checksum. Should not be * destroyed until DataBuffer itself. * \return integer position in the list of checksum objects. */ int add(CheckSum *cksum); /// Direct access to buffer by number. /** * \param n buffer number * \return buffer content */ char* operator[](int n); /// Request buffer for READING INTO it. /** * Should be called when data is being read from a source. The calling code * should write data into the returned buffer and then call is_read(). * \param handle filled with buffer's number. * \param length filled with size of buffer * \param wait if true and there are no free buffers, method will wait * for one. * \return true on success * For python bindings pattern of this method is * (bool, handle, length) for_read(wait). Here buffer for reading * to be provided by external code and provided to DataBuffer * object through is_read() method. Content of buffer must not exceed * provided length. */ bool for_read(int& handle, unsigned int& length, bool wait); /// Check if there are buffers which can be taken by for_read(). /** * This function checks only for buffers and does not take eof and error * conditions into account. * \return true if buffers are available */ bool for_read(); /// Informs object that data was read into buffer. /** * \param handle buffer's number. * \param length amount of data. * \param offset offset in stream, file, etc. * \return true if buffer was successfully informed * For python bindings pattern of that method is * bool is_read(handle,buffer,offset). Here buffer is string containing * content of buffer to be passed to DataBuffer object. */ bool is_read(int handle, unsigned int length, unsigned long long int offset); /// Informs object that data was read into buffer. /** * \param buf address of buffer * \param length amount of data. * \param offset offset in stream, file, etc. * \return true if buffer was successfully informed */ bool is_read(char *buf, unsigned int length, unsigned long long int offset); /// Request buffer for WRITING FROM it. /** * Should be called when data is being written to a destination. The * calling code should write the data contained in the returned buffer and * then call is_written(). * \param handle returns buffer's number. * \param length returns size of buffer * \param offset returns buffer offset * \param wait if true and there are no available buffers, * method will wait for one. * \return true on success * For python bindings pattern of this method is * (bool, handle, length, offset, buffer) for_write(wait). * Here buffer is string with content of buffer provided * by DataBuffer object. */ bool for_write(int& handle, unsigned int& length, unsigned long long int& offset, bool wait); /// Check if there are buffers which can be taken by for_write(). /** * This function checks only for buffers and does not take eof and error * conditions into account. * \return true if buffers are available */ bool for_write(); /// Informs object that data was written from buffer. /** * \param handle buffer's number. * \return true if buffer was successfully informed */ bool is_written(int handle); /// Informs object that data was written from buffer. /** * \param buf - address of buffer * \return true if buffer was successfully informed */ bool is_written(char *buf); /// Informs object that data was NOT written from buffer (and releases buffer). /** * \param handle buffer's number. * \return true if buffer was successfully informed */ bool is_notwritten(int handle); /// Informs object that data was NOT written from buffer (and releases buffer). /** * \param buf - address of buffer * \return true if buffer was successfully informed */ bool is_notwritten(char *buf); /// Informs object if there will be no more request for 'read' buffers. /** * \param v true if no more requests. */ void eof_read(bool v); /// Informs object if there will be no more request for 'write' buffers. /** * \param v true if no more requests. */ void eof_write(bool v); /// Informs object if error occurred on 'read' side. /** * \param v true if error */ void error_read(bool v); /// Informs object if error occurred on 'write' side. /** * \param v true if error */ void error_write(bool v); /// Returns true if object was informed about end of transfer on 'read' side. bool eof_read(); /// Returns true if object was informed about end of transfer on 'write' side. bool eof_write(); /// Returns true if object was informed about error on 'read' side. bool error_read(); /// Returns true if object was informed about error on 'write' side. bool error_write(); /// Returns true if transfer was slower than limits set in speed object. bool error_transfer(); /// Returns true if object was informed about error or internal error occurred. bool error(); /// Wait (max 60 sec.) till any action happens in object. /** * \return true if action is eof on any side */ bool wait_any(); /// Wait till there are no more used buffers left in object. /** * \return true if an error occurred while waiting */ bool wait_used(); /// Wait till no more buffers taken for "READING INTO" left in object. /** * \return true if an error occurred while waiting */ bool wait_for_read(); /// Wait till no more buffers taken for "WRITING FROM" left in object. /** * \return true if an error occurred while waiting */ bool wait_for_write(); /// Returns true if the specified checksum was successfully computed. /** * \param index of the checksum in question. * \return false if index is not in list */ bool checksum_valid(int index) const; /// Returns true if the checksum was successfully computed. bool checksum_valid() const; /// Returns CheckSum object at specified index or NULL if index is not in list. /** * \param index of the checksum in question. */ const CheckSum* checksum_object(int index) const; /// Returns first checksum object in checksum list or NULL if list is empty. const CheckSum* checksum_object() const; /// Wait until end of transfer happens on 'read' side. Always returns true. bool wait_eof_read(); /// Wait until end of transfer or error happens on 'read' side. Always returns true. bool wait_read(); /// Wait until end of transfer happens on 'write' side. Always returns true. bool wait_eof_write(); /// Wait until end of transfer or error happens on 'write' side. Always returns true. bool wait_write(); /// Wait until end of transfer happens on any side. Always returns true. bool wait_eof(); /// Returns offset following last piece of data transferred. unsigned long long int eof_position() const { return eof_pos; } /// Returns size of buffer in object. /** * If not initialized then this number represents size of default buffer. */ unsigned int buffer_size() const; }; } // namespace Arc #endif // __ARC_DATABUFFER_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataMover.h0000644000000000000000000000012312075551565022433 xustar000000000000000027 mtime=1358353269.148218 27 atime=1513200574.574702 29 ctime=1513200659.22273798 nordugrid-arc-5.4.2/src/hed/libs/data/DataMover.h0000644000175000002070000002170512075551565022506 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAMOVER_H__ #define __ARC_DATAMOVER_H__ #include #include #include #include #include #include namespace Arc { class Logger; class URLMap; /// DataMover provides an interface to transfer data between two DataPoints. /** * Its main action is represented by Transfer methods. * \ingroup data * \headerfile DataMover.h arc/data/DataMover.h */ class DataMover { private: bool be_verbose; bool force_secure; bool force_passive; bool force_registration; bool do_checks; std::string verbose_prefix; bool do_retries; unsigned long long int default_min_speed; time_t default_min_speed_time; unsigned long long int default_min_average_speed; time_t default_max_inactivity_time; DataSpeed::show_progress_t show_progress; std::string preferred_pattern; bool cancelled; /// For safe destruction of object, Transfer() holds this lock and /// destructor waits until the lock can be obtained Glib::Mutex lock_; static Logger logger; public: /// Callback function which can be passed to Transfer(). /** * \param mover this DataMover instance * @param status result of the transfer * @param arg arguments passed in 'arg' parameter of Transfer() */ typedef void (*callback)(DataMover* mover, DataStatus status, void* arg); /// Constructor. Sets all transfer parameters to default values. DataMover(); /// Destructor cancels transfer if active and waits for cancellation to finish. ~DataMover(); /// Initiates transfer from 'source' to 'destination'. /** * An optional callback can be provided, in which case this method starts * a separate thread for the transfer and returns immediately. The callback * is called after the transfer finishes. * \param source source DataPoint to read from. * \param destination destination DataPoint to write to. * \param cache controls caching of downloaded files (if destination * url is "file://"). If caching is not needed default constructor * FileCache() can be used. * \param map URL mapping/conversion table (for 'source' URL). If URL * mapping is not needed the default constructor URLMap() can be used. * \param cb if not NULL, transfer is done in separate thread and 'cb' * is called after transfer completes/fails. * \param arg passed to 'cb'. * \param prefix if 'verbose' is activated this information will be * printed before each line representing current transfer status. * \return DataStatus object with transfer result */ DataStatus Transfer(DataPoint& source, DataPoint& destination, FileCache& cache, const URLMap& map, callback cb = NULL, void *arg = NULL, const char *prefix = NULL); /// Initiates transfer from 'source' to 'destination'. /** * An optional callback can be provided, in which case this method starts * a separate thread for the transfer and returns immediately. The callback * is called after the transfer finishes. * \param source source DataPoint to read from. * \param destination destination DataPoint to write to. * \param cache controls caching of downloaded files (if destination * url is "file://"). If caching is not needed default constructor * FileCache() can be used. * \param map URL mapping/conversion table (for 'source' URL). If URL * mapping is not needed the default constructor URLMap() can be used. * \param min_speed minimal allowed current speed. * \param min_speed_time time for which speed should be less than * 'min_speed' before transfer fails. * \param min_average_speed minimal allowed average speed. * \param max_inactivity_time time for which should be no activity * before transfer fails. * \param cb if not NULL, transfer is done in separate thread and 'cb' * is called after transfer completes/fails. * \param arg passed to 'cb'. * \param prefix if 'verbose' is activated this information will be * printed before each line representing current transfer status. * \return DataStatus object with transfer result */ DataStatus Transfer(DataPoint& source, DataPoint& destination, FileCache& cache, const URLMap& map, unsigned long long int min_speed, time_t min_speed_time, unsigned long long int min_average_speed, time_t max_inactivity_time, callback cb = NULL, void *arg = NULL, const char *prefix = NULL); /// Delete the file at url. /** * This method differs from DataPoint::Remove() in that for index services, * it deletes all replicas in addition to removing the index entry. * @param url file to delete * @param errcont if true then replica information will be deleted from an * index service even if deleting the physical replica fails * @return DataStatus object with result of deletion */ DataStatus Delete(DataPoint& url, bool errcont = false); /// Cancel transfer, cleaning up any data written or registered. void Cancel(); /// Returns whether printing information about transfer status is activated. bool verbose(); /// Set output of transfer status information during transfer. void verbose(bool); /// Set output of transfer status information during transfer. /** * \param prefix use this string if 'prefix' in DataMover::Transfer is NULL. */ void verbose(const std::string& prefix); /// Returns whether transfer will be retried in case of failure. bool retry(); /// Set if transfer will be retried in case of failure. void retry(bool); /// Set if high level of security (encryption) will be used during transfer if available. void secure(bool); /// Set if passive transfer should be used for FTP-like transfers. void passive(bool); /// Set if file should be transferred and registered even if such LFN is already registered and source is not one of registered locations. void force_to_meta(bool); /// Returns true if extra checks are made before transfer starts. bool checks(); /// Set if extra checks are made before transfer starts. /** * If turned on, extra checks are done before commencing the transfer, such * as checking the existence of the source file and verifying consistency * of metadata between index service and physical replica. */ void checks(bool v); /// Set minimal allowed transfer speed (default is 0) to 'min_speed'. /** * If speed drops below for time longer than 'min_speed_time', error * is raised. For more information see description of DataSpeed class. * \param min_speed minimum transfer rate in bytes/second * \param min_speed_time time in seconds over which min_speed is measured */ void set_default_min_speed(unsigned long long int min_speed, time_t min_speed_time) { default_min_speed = min_speed; default_min_speed_time = min_speed_time; } /// Set minimal allowed average transfer speed. /** * Default is 0 averaged over whole time of transfer. For more information * see description of DataSpeed class. * \param min_average_speed minimum average transfer rate over the whole * transfer in bytes/second */ void set_default_min_average_speed(unsigned long long int min_average_speed) { default_min_average_speed = min_average_speed; } /// Set maximal allowed time for no data transfer. /** * For more information see description of DataSpeed class. * \param max_inactivity_time maximum time in seconds which is allowed * without any data transfer */ void set_default_max_inactivity_time(time_t max_inactivity_time) { default_max_inactivity_time = max_inactivity_time; } /// Set function which is called every second during the transfer void set_progress_indicator(DataSpeed::show_progress_t func = NULL) { show_progress = func; } /// Set a preferred pattern for ordering of replicas. /** * This pattern will be used in the case of an index service URL with * multiple physical replicas and allows sorting of those replicas in order * of preference. It consists of one or more patterns separated by a pipe * character (|) listed in order of preference. If the dollar character ($) * is used at the end of a pattern, the pattern will be matched to the end * of the hostname of the replica. Example: "srm://myhost.org|.uk$|.ch$" * \param pattern pattern on which to order replicas */ void set_preferred_pattern(const std::string& pattern) { preferred_pattern = pattern; } }; } // namespace Arc #endif // __ARC_DATAMOVER_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/FileCacheHash.h0000644000000000000000000000012412075551565023161 xustar000000000000000027 mtime=1358353269.148218 27 atime=1513200574.521702 30 ctime=1513200659.229738065 nordugrid-arc-5.4.2/src/hed/libs/data/FileCacheHash.h0000644000175000002070000000142612075551565023231 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef FILE_CACHE_HASH_H_ #define FILE_CACHE_HASH_H_ #include namespace Arc { /// FileCacheHash provides methods to make hashes from strings. /** * Currently the SHA-1 hash from the openssl library is used. * \ingroup data * \headerfile FileCacheHash.h arc/data/FileCacheHash.h */ class FileCacheHash { private: /// Maximum length of an md5 hash static int MAX_MD5_LENGTH; /// Maximum length of a sha1 hash static int MAX_SHA1_LENGTH; public: /// Return a hash of the given URL, according to the current hash scheme. static std::string getHash(std::string url); /// Return the maximum length of a hash string. static int maxLength() { return MAX_SHA1_LENGTH; } }; } // namespace Arc #endif /*FILE_CACHE_HASH_H_*/ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/cache-clean.1.in0000644000000000000000000000012712457662275023223 xustar000000000000000027 mtime=1421829309.071965 30 atime=1513200648.007600814 30 ctime=1513200659.232738102 nordugrid-arc-5.4.2/src/hed/libs/data/cache-clean.1.in0000644000175000002070000001161712457662275023273 0ustar00mockbuildmock00000000000000.TH CACHE-CLEAN 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME cache-clean \- Administration tool for the A-REX cache. .SH SYNOPSIS cache-clean [-h] [-s] [-S] [-m NN -M NN] [-E N] [-D debug_level] [-f space_command] [ -c | [ [...]] ] .SH DESCRIPTION .B cache-clean is a tool for administrators of ARC server installations to safely remove A-REX cache data and to provide an overview of the contents of the cache. It is used by the A-REX to automatically manage cache contents. There are two modes of operation - printing statistics and deleting files. If .B -s is used, then statistics are printed on each cache. If .B -m and .B -M are used then files in each cache are deleted if the space used by the cache on the file system is more than that given by .B -M, in the order of least recently accessed, until the space used by the cache is equal to what is specified by .B -m. If .B -E is used, then all files accessed less recently than the given time are deleted. .B -E can be used in combination with .B -m and .B -M but deleting files using .B -E is carried out first. If after this the cache used space is still more than that given by .B -M then cleaning according to those options is performed. If the cache is on a file system shared with other data then .B -S should be specified so that the space used by the cache is calculated. Otherwise all the used space on the file system is assumed to be for the cache. Using .B -S is slower so should only be used when the cache is shared. By default the "df" command is used to determine total and (if .B -S is not specified) used space. If this command is not supported on the cache file system then .B -f can be used to specify an alternate command. The output of this command must be "total_bytes used_bytes", and so the command would normally be a small script around the file system space information tool. The cache directory is passed as the last argument to this command. Cache directories are given by .B dir1, dir2.. or taken from the config file specified by .B -c or the ARC_CONFIG environment variable. .B -h - print short help .B -s - print cache statistics, without deleting anything. The output displays for each cache the number of deletable (and locked) files, the total size of these files, the percentage usage of the file system in which the cache is stored, and a histogram of access times of the files in the cache. .B -S - Calculate the size of the cache instead of taking used space on the file system. This should only be used when the cache file system is shared with other data. .B -M - the maximum used space (as % of the file system) at which to start cleaning .B -m - the minimum used space (as % of the file system) at which to stop cleaning .B -E - files accessed less recently than the given time period will be deleted. Example values of this option are 1800, 90s, 24h, 30d. The default when no suffix is given is seconds. .B -f - alternative command to "df" for obtaining the file system total and used space. The output of this command must be "total_bytes used_bytes". The cache directory is passed as the last argument to this command. .B -D - debug level. Possible values are FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG. Default level is INFO. .B -c - path to an A-REX config file, xml or ini format This tool is run periodically by the A-REX to keep the size of each cache within the limits specified in the configuration file. Therefore cleaning should not be performed manually, unless the cache size needs to be reduced temporarily. For performance reasons it may however be desirable to run cache-clean independently on the machine hosting the cache file system, if this is different from the A-REX host. The most useful function for administrators is to give an overview of the contents of the cache, using the .B -s option. Within each cache directory specified in the configuration file, there is a subdirectory for data (data/) and one for per-job hard links (joblinks/). See the A-REX Administration Guide for more details. .B cache-clean should only operate on the data subdirectory, therefore when giving .B dir arguments they should be the top level cache directory. .B cache-clean will then automatically only look at files within the data directory. .SH EXAMPLE .B cache-clean -m20 -M30 -E30d -D VERBOSE -c /etc/arc.conf Cache directories are taken from the configuration file .B /etc/arc.conf and all cache files accessed more than 30 days ago are deleted. Then if the used space in a cache is above 30%, data is deleted until the used space reaches 20%. Verbose debug output is enabled so information is output on each file that is deleted. .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/URLMap.cpp0000644000000000000000000000012411544363267022205 xustar000000000000000027 mtime=1301407415.744771 27 atime=1513200574.516701 30 ctime=1513200659.243738236 nordugrid-arc-5.4.2/src/hed/libs/data/URLMap.cpp0000644000175000002070000000367211544363267022262 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include namespace Arc { Logger URLMap::logger(Logger::getRootLogger(), "URLMap"); URLMap::URLMap() {} URLMap::~URLMap() {} bool URLMap::map(URL& url) const { for (std::list::const_iterator i = entries.begin(); i != entries.end(); ++i) if (url.str().substr(0, i->initial.str().length()) == i->initial.str()) { std::string tmp_url = url.str(); tmp_url.replace(0, i->initial.str().length(), i->replacement.str()); URL newurl = tmp_url; /* must return semi-valid url */ if (!newurl) { logger.msg(Arc::ERROR, "Can't use URL %s", tmp_url); return false; } if (newurl.Protocol() == "file") { /* local file - check permissions */ int h = ::open(newurl.Path().c_str(), O_RDONLY); if (h == -1) { logger.msg(ERROR, "file %s is not accessible", newurl.Path()); return false; } close(h); if (i->access) { /* how it should be accessed on nodes */ tmp_url.replace(0, i->replacement.str().length(), i->access.str()); newurl = tmp_url; newurl.ChangeProtocol("link"); } } logger.msg(INFO, "Mapping %s to %s", url.str(), newurl.str()); url = newurl; return true; } return false; } bool URLMap::local(const URL& url) const { for (std::list::const_iterator i = entries.begin(); i != entries.end(); ++i) if (url.str().substr(0, i->initial.str().length()) == i->initial.str()) return true; return false; } void URLMap::add(const URL& templ, const URL& repl, const URL& accs) { entries.push_back(map_entry(templ, repl, accs)); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/URLMap.h0000644000000000000000000000012312075551565021651 xustar000000000000000027 mtime=1358353269.148218 27 atime=1513200574.561702 29 ctime=1513200659.22273798 nordugrid-arc-5.4.2/src/hed/libs/data/URLMap.h0000644000175000002070000000517312075551565021725 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_URLMAP_H__ #define __ARC_URLMAP_H__ #include #include #include namespace Arc { /// URLMap allows mapping certain patterns of URLs to other URLs. /** * A URLMap can be used if certain URLs can be more efficiently accessed * by other means on a certain site. For example a GridFTP storage element * may be mounted as a local file system and so a map can be made from a * gsiftp:// URL to a local file path. * \ingroup data * \headerfile URLMap.h arc/data/URLMap.h */ class URLMap { private: class map_entry { public: URL initial; URL replacement; URL access; map_entry() {} map_entry(const URL& templ, const URL& repl, const URL& accs = URL()) : initial(templ), replacement(repl), access(accs) {} }; std::list entries; static Logger logger; public: /// Construct an empty URLMap. URLMap(); ~URLMap(); /// Map a URL if possible. /** * If the given URL matches any template it will be changed to the mapped * URL. Additionally, if the mapped URL is a local file, a permission check * is done by attempting to open the file. If a different access path is * specified for this URL the URL will be changed to link://accesspath. To * check if a URL will be mapped without changing it local() can be used. * \param url URL to check * \return true if the URL was mapped to a new URL, false if it was not * mapped or an error occurred during mapping */ bool map(URL& url) const; /// Check if a mapping exists for a URL. /** * Checks to see if a URL will be mapped but does not do the mapping. * @param url URL to check * @return true if a mapping exists for this URL */ bool local(const URL& url) const; /// Add an entry to the URLMap. /** * All URLs matching templ will have the templ part replaced by repl. * @param templ template to replace, for example gsiftp://se.org/files * @param repl replacement for template, for example /export/grid/files * @param accs replacement path if it differs in the place the file will * actually be accessed (e.g. on worker nodes), for example * /mount/grid/files */ void add(const URL& templ, const URL& repl, const URL& accs = URL()); /// Returns true if the URLMap is not empty. operator bool() const { return entries.size() != 0; }; /// Returns true if the URLMap is empty. bool operator!() const { return entries.size() == 0; }; }; } // namespace Arc #endif // __ARC_URLMAP_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataHandle.h0000644000000000000000000000012412110726523022523 xustar000000000000000027 mtime=1361292627.682019 27 atime=1513200574.540702 30 ctime=1513200659.224738004 nordugrid-arc-5.4.2/src/hed/libs/data/DataHandle.h0000644000175000002070000000527412110726523022600 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAHANDLE_H__ #define __ARC_DATAHANDLE_H__ #include namespace Arc { class URL; class UserConfig; /// This class is a wrapper around the DataPoint class. /** * It simplifies the construction, use and destruction of * DataPoint objects and should be used instead of DataPoint * classes directly. The appropriate DataPoint subclass is * created automatically and stored internally in DataHandle. * A DataHandle instance can be thought of as a pointer to * the DataPoint instance and the DataPoint can be accessed * through the usual dereference operators. A DataHandle * cannot be copied. * * This class is the main way to access remote data items and * obtain information about them. To simply copy a whole file * DataMover::Transfer() can be used. For partial file copy see * the examples in \ref data. * * \ingroup data * \headerfile DataHandle.h arc/data/DataHandle.h */ class DataHandle { public: /// Construct a new DataHandle DataHandle(const URL& url, const UserConfig& usercfg) : p(getLoader().load(url, usercfg)) {} /// Destructor ~DataHandle() { if (p) delete p; } /// Returns a pointer to a DataPoint object DataPoint* operator->() { return p; } /// Returns a const pointer to a DataPoint object const DataPoint* operator->() const { return p; } /// Returns a reference to a DataPoint object DataPoint& operator*() { return *p; } /// Returns a const reference to a DataPoint object const DataPoint& operator*() const { return *p; } /// Returns true if the DataHandle is not valid bool operator!() const { return !p; } /// Returns true if the DataHandle is valid operator bool() const { return !!p; } /// Returns a pointer to new DataPoint object corresponding to URL. /** * This static method is mostly for bindings to other languages * and if available scope of obtained DataPoint is undefined. */ static DataPoint* GetPoint(const URL& url, const UserConfig& usercfg) { return getLoader().load(url, usercfg); } private: /// Pointer to specific DataPoint instance DataPoint *p; /// Returns DataPointLoader object to be used for creating DataPoint objects. static DataPointLoader& getLoader(); /// Private default constructor DataHandle(void); /// Private copy constructor and assignment operator because DataHandle /// should not be copied. DataHandle(const DataHandle&); DataHandle& operator=(const DataHandle&); }; } // namespace Arc #endif // __ARC_DATAHANDLE_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataPointDirect.h0000644000000000000000000000012313153453576023570 xustar000000000000000027 mtime=1504597886.887452 27 atime=1513200574.537702 29 ctime=1513200659.21873793 nordugrid-arc-5.4.2/src/hed/libs/data/DataPointDirect.h0000644000175000002070000000642013153453576023640 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAPOINTDIRECT_H__ #define __ARC_DATAPOINTDIRECT_H__ #include #include #include #define MAX_PARALLEL_STREAMS 20 #define MAX_BLOCK_SIZE (10 * 1024 * 1024) namespace Arc { class DataBuffer; class DataCallback; /// DataPointDirect represents "physical" data objects. /** * This class should never be used directly, instead inherit from it to * provide a class for a specific access protocol. * \ingroup data * \headerfile DataPointDirect.h arc/data/DataPointDirect.h */ class DataPointDirect : public DataPoint { public: virtual ~DataPointDirect(); virtual bool IsIndex() const; virtual bool IsStageable() const; virtual long long int BufSize() const; virtual int BufNum() const; virtual bool Local() const; virtual bool ReadOnly() const; virtual void ReadOutOfOrder(bool v); virtual bool WriteOutOfOrder(); virtual void SetAdditionalChecks(bool v); virtual bool GetAdditionalChecks() const; virtual void SetSecure(bool v); virtual bool GetSecure() const; virtual void Passive(bool v); virtual void Range(unsigned long long int start = 0, unsigned long long int end = 0); virtual int AddCheckSumObject(CheckSum *cksum); virtual const CheckSum* GetCheckSumObject(int index) const; virtual DataStatus Stat(std::list& files, const std::list& urls, DataPointInfoType verb = INFO_TYPE_ALL); // Not supported for direct data points: virtual DataStatus Resolve(bool source); virtual DataStatus Resolve(bool source, const std::list& urls); virtual bool Registered() const; virtual DataStatus PreRegister(bool replication, bool force = false); virtual DataStatus PostRegister(bool replication); virtual DataStatus PreUnregister(bool replication); virtual DataStatus Unregister(bool all); virtual bool AcceptsMeta() const; virtual bool ProvidesMeta() const; virtual const URL& CurrentLocation() const; virtual DataPoint* CurrentLocationHandle() const; virtual const std::string& CurrentLocationMetadata() const; virtual DataStatus CompareLocationMetadata() const; virtual bool NextLocation(); virtual bool LocationValid() const; virtual bool HaveLocations() const; virtual bool LastLocation(); virtual DataStatus AddLocation(const URL& url, const std::string& meta); virtual DataStatus RemoveLocation(); virtual DataStatus RemoveLocations(const DataPoint& p); virtual DataStatus ClearLocations(); virtual void SortLocations(const std::string& /* pattern */, const URLMap& /* url_map */) {}; protected: DataBuffer *buffer; long long int bufsize; int bufnum; bool local; bool readonly; bool linkable; bool is_secure; bool force_secure; bool force_passive; bool additional_checks; bool allow_out_of_order; unsigned long long int range_start; unsigned long long int range_end; std::list checksums; DataPointDirect(const URL& url, const UserConfig& usercfg, PluginArgument* parg); }; } // namespace Arc #endif // __ARC_DATAPOINTDIRECT_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataPointIndex.h0000644000000000000000000000012412167545534023426 xustar000000000000000027 mtime=1373555548.255766 27 atime=1513200574.524702 30 ctime=1513200659.219737943 nordugrid-arc-5.4.2/src/hed/libs/data/DataPointIndex.h0000644000175000002070000000723012167545534023475 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAPOINTINDEX_H__ #define __ARC_DATAPOINTINDEX_H__ #include #include #include #include namespace Arc { /// DataPointIndex represents "index" data objects, e.g. catalogs. /** * This class should never be used directly, instead inherit from it to * provide a class for a specific indexing service. * \ingroup data * \headerfile DataPointIndex.h arc/data/DataPointIndex.h */ class DataPointIndex : public DataPoint { public: virtual ~DataPointIndex(); virtual const URL& CurrentLocation() const; virtual const std::string& CurrentLocationMetadata() const; virtual DataPoint* CurrentLocationHandle() const; virtual DataStatus CompareLocationMetadata() const; virtual bool NextLocation(); virtual bool LocationValid() const; virtual bool HaveLocations() const; virtual bool LastLocation(); virtual DataStatus RemoveLocation(); virtual DataStatus RemoveLocations(const DataPoint& p); virtual DataStatus ClearLocations(); virtual DataStatus AddLocation(const URL& url, const std::string& meta); virtual void SortLocations(const std::string& pattern, const URLMap& url_map); virtual bool IsIndex() const; virtual bool IsStageable() const; virtual bool AcceptsMeta() const; virtual bool ProvidesMeta() const; virtual void SetMeta(const DataPoint& p); virtual void SetCheckSum(const std::string& val); virtual void SetSize(const unsigned long long int val); virtual bool Registered() const; virtual void SetTries(const int n); // the following are relayed to the current location virtual long long int BufSize() const; virtual int BufNum() const; virtual bool Local() const; virtual bool ReadOnly() const; virtual DataStatus PrepareReading(unsigned int timeout, unsigned int& wait_time); virtual DataStatus PrepareWriting(unsigned int timeout, unsigned int& wait_time); virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); virtual DataStatus FinishReading(bool error = false); virtual DataStatus FinishWriting(bool error = false); virtual std::vector TransferLocations() const; virtual void ClearTransferLocations(); virtual DataStatus Check(bool check_meta); virtual DataStatus Remove(); virtual void ReadOutOfOrder(bool v); virtual bool WriteOutOfOrder(); virtual void SetAdditionalChecks(bool v); virtual bool GetAdditionalChecks() const; virtual void SetSecure(bool v); virtual bool GetSecure() const; virtual DataPointAccessLatency GetAccessLatency() const; virtual void Passive(bool v); virtual void Range(unsigned long long int start = 0, unsigned long long int end = 0); virtual int AddCheckSumObject(CheckSum *cksum); virtual const CheckSum* GetCheckSumObject(int index) const; protected: bool resolved; bool registered; DataPointIndex(const URL& url, const UserConfig& usercfg, PluginArgument* parg); private: // Following members must be kept synchronised hence they are private /// List of locations at which file can be probably found. std::list locations; std::list::iterator location; DataHandle *h; void SetHandle(); }; } // namespace Arc #endif // __ARC_DATAPOINTINDEX_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/cache-list.1.in0000644000000000000000000000012712123705613023074 xustar000000000000000027 mtime=1364167563.653962 30 atime=1513200648.036601169 30 ctime=1513200659.234738126 nordugrid-arc-5.4.2/src/hed/libs/data/cache-list.1.in0000644000175000002070000000250512123705613023140 0ustar00mockbuildmock00000000000000.TH CACHE-LIST 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME cache-list \- List contents of the A-REX cache. .SH SYNOPSIS cache-list [-h] [-c config_file] [url1 [url2 [...]]] .SH DESCRIPTION .B -h - print short help .B -c - configuration file from which to read cache information. The ARC_CONFIG environment variable can be set in place of this option. .B cache-list is used to list all files present in each cache or, given a list of URLs as arguments, shows the location of each URL in the cache if present. If no arguments are given, it prints to stdout each cache directory specified in the configuration file then a list of files in each cache directory and the corresponding URLs of their source in the format: .B url filename If arguments are given, each cache is checked for the existence of each URL. If a URL is present in the cache then the URL and filename are printed to stdout in the above format. This tool can be useful for finding out if a certain URL is stored in the cache, or simply to give a list of all URLs in the cache. .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/FileCache.h0000644000000000000000000000012212463677753022364 xustar000000000000000025 mtime=1422884843.9491 27 atime=1513200574.563702 30 ctime=1513200659.228738053 nordugrid-arc-5.4.2/src/hed/libs/data/FileCache.h0000644000175000002070000004316712463677753022446 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef FILECACHE_H_ #define FILECACHE_H_ #include #include #include #include #include #include #include "FileCacheHash.h" namespace Arc { /// Contains data on the parameters of a cache. /** * \ingroup data * \headerfile FileCache.h arc/data/FileCache.h */ struct CacheParameters { std::string cache_path; std::string cache_link_path; }; #ifndef WIN32 /// FileCache provides an interface to all cache operations. /** * When it is decided a file should be downloaded to the cache, Start() * should be called, so that the cache file can be prepared and locked if * necessary. If the file is already available it is not locked and Link() * can be called immediately to create a hard link to a per-job directory in * the cache and then soft link, or copy the file directly to the session * directory so it can be accessed from the user's job. If the file is not * available, Start() will lock it, then after downloading Link() can be * called. Stop() must then be called to release the lock. If the transfer * failed, StopAndDelete() can be called to clean up the cache file. After * a job has finished, Release() should be called to remove the hard links * created for that job. * * Cache files are locked for writing using the FileLock class, which * creates a lock file with the '.lock' suffix next to the cache file. * If Start() is called and the cache file is not already available, it * creates this lock and Stop() must be called to release it. All processes * calling Start() must wait until they successfully obtain the lock before * downloading can begin. * * The cache directory(ies) and the optional directory to link to when the * soft-links are made are set in the constructor. The names of cache files * are formed from an SHA-1 hash of the URL to cache. To ease the load on * the file system, the cache files are split into subdirectories based on * the first two characters in the hash. For example the file with hash * 76f11edda169848038efbd9fa3df5693 is stored in * 76/f11edda169848038efbd9fa3df5693. A cache filename can be found by * passing the URL to Find(). For more information on the structure of the * cache, see the ARC Computing Element System Administrator Guide * (NORDUGRID-MANUAL-20). * \ingroup data * \headerfile FileCache.h arc/data/FileCache.h */ class FileCache { private: /// Map of urls and the cache they are mapped to/exist in std::map _cache_map; /// Vector of caches. Each entry defines a cache and specifies /// a cache directory and optional link path. std::vector _caches; /// Vector of remote caches. Each entry defines a cache and specifies /// a cache directory, per-job directory and link/copy information. std::vector _remote_caches; /// Vector of caches to be drained. std::vector _draining_caches; /// A list of URLs that have already been unlocked in Link(). URLs in /// this set will not be unlocked in Stop(). std::set _urls_unlocked; /// Identifier used to claim files, ie the job id std::string _id; /// uid corresponding to the user running the job. /// The directory with hard links to cached files will be searchable only by this user uid_t _uid; /// gid corresponding to the user running the job. gid_t _gid; /// The sub-dir of the cache for data static const std::string CACHE_DATA_DIR; /// The sub-dir of the cache for per-job links static const std::string CACHE_JOB_DIR; /// The length of each cache subdirectory static const int CACHE_DIR_LENGTH; /// The number of levels of cache subdirectories static const int CACHE_DIR_LEVELS; /// The suffix to use for meta files static const std::string CACHE_META_SUFFIX; /// Default validity time of cached DNs static const int CACHE_DEFAULT_AUTH_VALIDITY; /// Timeout on cache lock. The lock file is continually updated during the /// transfer so 15 mins with no transfer update should mean stale lock. static const int CACHE_LOCK_TIMEOUT; /// Timeout on lock on meta file static const int CACHE_META_LOCK_TIMEOUT; /// Common code for constructors bool _init(const std::vector& caches, const std::vector& remote_caches, const std::vector& draining_caches, const std::string& id, uid_t job_uid, gid_t job_gid); /// Check the meta file corresponding to cache file filename is valid, /// and create one if it doesn't exist. Returns false if creation fails, /// and if it was due to being locked, is_locked is set to true. bool _checkMetaFile(const std::string& filename, const std::string& url, bool& is_locked); /// Create the meta file with the given content. Returns false and sets /// is_locked to true if the file is already locked. bool _createMetaFile(const std::string& meta_file, const std::string& content, bool& is_locked); /// Return the filename of the meta file associated to the given url std::string _getMetaFileName(const std::string& url); /// Get the hashed path corresponding to the given url std::string _getHash(const std::string& url) const; /// Choose a cache directory to use for this url, based on the free /// size of the cache directories. Returns the cache to use. struct CacheParameters _chooseCache(const std::string& url) const; /// Return the free space in GB at the given path float _getCacheInfo(const std::string& path) const; /// For cleaning up after a cache file was locked during Link() bool _cleanFilesAndReturnFalse(const std::string& hard_link_file, bool& locked); /// Logger for messages static Logger logger; public: /// Create a new FileCache instance with one cache directory. /** * @param cache_path The format is "cache_dir[ link_path]". * path is the path to the cache directory and the optional * link_path is used to create a link in case the * cache directory is visible under a different name during actual * usage. When linking from the session dir this path is used * instead of cache_path. * @param id the job id. This is used to create the per-job dir * which the job's cache files will be hard linked from * @param job_uid owner of job. The per-job dir will only be * readable by this user * @param job_gid owner group of job */ FileCache(const std::string& cache_path, const std::string& id, uid_t job_uid, gid_t job_gid); /// Create a new FileCache instance with multiple cache dirs. /** * @param caches a vector of strings describing caches. The format * of each string is "cache_dir[ link_path]". * @param id the job id. This is used to create the per-job dir * which the job's cache files will be hard linked from * @param job_uid owner of job. The per-job dir will only be * readable by this user * @param job_gid owner group of job */ FileCache(const std::vector& caches, const std::string& id, uid_t job_uid, gid_t job_gid); /// Create a new FileCache instance with multiple cache dirs, remote caches and draining cache directories. /** * @param caches a vector of strings describing caches. The format * of each string is "cache_dir[ link_path]". * @param remote_caches Same format as caches. These are the * paths to caches which are under the control of other Grid * Managers and are read-only for this process. * @param draining_caches Same format as caches. These are the * paths to caches which are to be drained. * @param id the job id. This is used to create the per-job dir * which the job's cache files will be hard linked from * @param job_uid owner of job. The per-job dir will only be * readable by this user * @param job_gid owner group of job */ FileCache(const std::vector& caches, const std::vector& remote_caches, const std::vector& draining_caches, const std::string& id, uid_t job_uid, gid_t job_gid); /// Default constructor. Invalid cache. FileCache(): _uid(0),_gid(0) { _caches.clear(); } /// Start preparing to cache the file specified by url. /** * Start() returns true if the file was successfully prepared. The * available parameter is set to true if the file already exists and in * this case Link() can be called immediately. If available is false the * caller should write the file and then call Link() followed by Stop(). * Start() returns false if it was unable to prepare the cache file for any * reason. In this case the is_locked parameter should be checked and if * it is true the file is locked by another process and the caller should * try again later. * * @param url url that is being downloaded * @param available true on exit if the file is already in cache * @param is_locked true on exit if the file is already locked, ie * cannot be used by this process * @param use_remote Whether to look to see if the file exists in a * remote cache. Can be set to false if for example a forced download * to cache is desired. * @param delete_first If true then any existing cache file is deleted. * @return true if file is available or ready to be downloaded, false if * the file is already locked or preparing the cache failed. */ bool Start(const std::string& url, bool& available, bool& is_locked, bool use_remote = true, bool delete_first = false); /// Stop the cache after a file was downloaded. /** * This method (or stopAndDelete()) must be called after file was * downloaded or download failed, to release the lock on the * cache file. Stop() does not delete the cache file. It returns * false if the lock file does not exist, or another pid was found * inside the lock file (this means another process took over the * lock so this process must go back to Start()), or if it fails * to delete the lock file. It must only be called if the caller * actually downloaded the file. It must not be called if the file was * already available. * @param url the url of the file that was downloaded * @return true if the lock was successfully released. */ bool Stop(const std::string& url); /// Stop the cache after a file was downloaded and delete the cache file. /** * Release the cache file and delete it, because for example a * failed download left an incomplete copy. This method also deletes * the meta file which contains the url corresponding to the cache file. * The logic of the return value is the same as Stop(). It must only be * called if the caller downloaded the file. * @param url the url corresponding to the cache file that has * to be released and deleted * @return true if the cache file and lock were successfully removed. */ bool StopAndDelete(const std::string& url); /// Get the cache filename for the given URL. /** * @param url the URL to look for in the cache * @return the full pathname of the file in the cache which corresponds to * the given url. */ std::string File(const std::string& url); /// Link a cache file to the place it will be used. /** * Create a hard-link to the per-job dir from the cache dir, and then a * soft-link from here to the session directory. This is effectively * 'claiming' the file for the job, so even if the original cache file is * deleted, eg by some external process, the hard link still exists until * it is explicitly released by calling Release(). * * If cache_link_path is set to "." or copy or executable is true then * files will be copied directly to the session directory rather than * linked. * * After linking or copying, the cache file is checked for the presence of * a write lock, and whether the modification time has changed since * linking started (in case the file was locked, modified then released * during linking). If either of these are true the links created during * Link() are deleted, try_again is set to true and Link() returns false. * The caller should then go back to Start(). If the caller has obtained a * write lock from Start() and then downloaded the file, it should set * holding_lock to true, in which case none of the above checks are * performed. * * The session directory is accessed under the uid and gid passed in * the constructor. * * @param link_path path to the session dir for soft-link or new file * @param url url of file to link to or copy * @param copy If true the file is copied rather than soft-linked * to the session dir * @param executable If true then file is copied and given execute * permissions in the session dir * @param holding_lock Should be set to true if the caller already holds * the lock * @param try_again If after linking the cache file was found to be locked, * deleted or modified, then try_again is set to true * @return true if linking succeeded, false if an error occurred or the * file was locked or modified by another process during linking */ bool Link(const std::string& link_path, const std::string& url, bool copy, bool executable, bool holding_lock, bool& try_again); /// Release cache files used in this cache. /** * Release claims on input files for the job specified by id. * For each cache directory the per-job directory with the * hard-links will be deleted. * @return false if any directory fails to be deleted */ bool Release() const; /// Store a DN in the permissions cache for the given url. /** * Add the given DN to the list of cached DNs with the given expiry time. * @param url the url corresponding to the cache file to which we * want to add a cached DN * @param DN the DN of the user * @param expiry_time the expiry time of this DN in the DN cache * @return true if the DN was successfully added */ bool AddDN(const std::string& url, const std::string& DN, const Time& expiry_time); /// Check if a DN exists in the permission cache and is still valid for the given url. /** * Check if the given DN is cached for authorisation and it is still valid. * @param url the url corresponding to the cache file for which we * want to check the cached DN * @param DN the DN of the user * @return true if the DN exists and is still valid */ bool CheckDN(const std::string& url, const std::string& DN); /// Check if it is possible to obtain the creation time of a cache file. /** * @param url the url corresponding to the cache file for which we * want to know if the creation date exists * @return true if the file exists in the cache, since the creation time * is the creation time of the cache file. */ bool CheckCreated(const std::string& url); /// Get the creation time of a cached file. /** * @param url the url corresponding to the cache file for which we * want to know the creation date * @return creation time of the file or 0 if the cache file does not exist */ Time GetCreated(const std::string& url); /// Returns true if object is useable. operator bool() { return (!_caches.empty()); }; /// Returns true if all attributes are equal bool operator==(const FileCache& a); }; #else class FileCache { public: FileCache(const std::string& cache_path, const std::string& id, int job_uid, int job_gid) {} FileCache(const std::vector& caches, const std::string& id, int job_uid, int job_gid) {} FileCache(const std::vector& caches, const std::vector& remote_caches, const std::vector& draining_caches, const std::string& id, int job_uid, int job_gid, int cache_max=100, int cache_min=100) {} FileCache(const FileCache& cache) {} FileCache() {} bool Start(const std::string& url, bool& available, bool& is_locked, bool use_remote=true, bool delete_first=false) { return false; } bool Stop(const std::string& url) { return false; } bool StopAndDelete(const std::string& url) {return false; } std::string File(const std::string& url) { return url; } bool Link(const std::string& link_path, const std::string& url, bool copy, bool executable, bool holding_lock, bool& try_again) { return false; } bool Release() const { return false;} bool AddDN(const std::string& url, const std::string& DN, const Time& expiry_time) { return false;} bool CheckDN(const std::string& url, const std::string& DN) { return false; } bool CheckCreated(const std::string& url){ return false; } Time GetCreated(const std::string& url) { return Time(); } operator bool() { return false; }; bool operator==(const FileCache& a) { return false; } }; #endif /*WIN32*/ } // namespace Arc #endif /*FILECACHE_H_*/ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataStatus.cpp0000644000000000000000000000012412102474175023152 xustar000000000000000027 mtime=1359640701.979832 27 atime=1513200574.521702 30 ctime=1513200659.244738248 nordugrid-arc-5.4.2/src/hed/libs/data/DataStatus.cpp0000644000175000002070000001266612102474175023232 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include namespace Arc { static const char *status_string[] = { istring("Operation completed successfully"), // Success istring("Source is invalid URL"), // ReadAcquireError istring("Destination is invalid URL"), // WriteAcquireError istring("Resolving of index service for source failed"), // ReadResolveError istring("Resolving of index service for destination failed"), // WriteResolveError istring("Can't read from source"), // ReadStartError istring("Can't write to destination"), // WriteStartError istring("Failed while reading from source"), // ReadError istring("Failed while writing to destination"), // WriteError istring("Failed while transferring data"), // TransferError istring("Failed while finishing reading from source"), // ReadStopError istring("Failed while finishing writing to destination"), // WriteStopError istring("First stage of registration to index service failed"), // PreRegisterError istring("Last stage of registration to index service failed"), // PostRegisterError istring("Unregistering from index service failed"), // UnregisterError istring("Error in caching procedure"), // CacheError istring("Error due to expiration of provided credentials"), // CredentialsExpiredError istring("Delete error"), // DeleteError istring("No valid location available"), // NoLocationError istring("Location already exists"), // LocationAlreadyExistsError istring("Operation not supported for this kind of URL"), // NotSupportedForDirectDataPointsError istring("Feature is not implemented"), // UnimplementedError istring("Already reading from source"), // IsReadingError istring("Already writing to destination"), // IsWritingError istring("Read access check failed"), // CheckError istring("Directory listing failed"), // ListError istring("Object is not suitable for listing"), // ListNonDirError istring("Failed to obtain information about file"), // StatError istring("No such file or directory"), // StatNotPresentError istring("Object not initialized (internal error)"), // NotInitializedError istring("Operating System error"), // SystemError istring("Failed to stage file(s)"), // StageError istring("Inconsistent metadata"), // InconsistentMetadataError istring("Failed to prepare source"), // ReadPrepareError istring("Should wait for source to be prepared"), // ReadPrepareWait istring("Failed to prepare destination"), // WritePrepareError istring("Should wait for destination to be prepared"), // WritePrepareWait istring("Failed to finalize reading from source"), // ReadFinishError istring("Failed to finalize writing to destination"), // WriteFinishError istring("Failed to create directory"), // CreateDirectoryError istring("Failed to rename URL"), // RenameError istring("Data was already cached"), // SuccessCached istring("Operation cancelled successfully"), // SuccessCancelled istring("Generic error"), // GenericError istring("Unknown error") // UnknownError }; static const char* errnodesc[] = { istring("No error"), // DataStatusErrnoBase istring("Transfer timed out"), // EARCTRANSFERTIMEOUT istring("Checksum mismatch"), // EARCCHECKSUM istring("Bad logic"), // EARCLOGIC istring("All results obtained are invalid"), // EARCRESINVAL istring("Temporary service error"), // EARCSVCTMP istring("Permanent service error"), // EARCSVCPERM istring("Error switching uid"), // EARCUIDSWITCH istring("Request timed out"), // EARCREQUESTTIMEOUT istring("Unknown error") // EARCOTHER }; DataStatus::operator std::string() const { unsigned int status_ = status; if (status_ >= DataStatusRetryableBase) status_ -= DataStatusRetryableBase; if (status_ > UnknownError) status_ = UnknownError; std::string s(status_string[status_]); if (Errno > 0 && Errno < DataStatusErrnoMax) s += ": " + GetStrErrno(); if (!desc.empty() && desc != GetStrErrno()) s += ": " + desc; return s; } std::string DataStatus::GetStrErrno() const { if (Errno > DataStatusErrnoMax) return "Unknown error"; if (Errno > DataStatusErrnoBase) return errnodesc[Errno - DataStatusErrnoBase]; return StrError(Errno); } bool DataStatus::Retryable() const { return (status > DataStatusRetryableBase || Errno == EAGAIN || Errno == EBUSY || Errno == ETIMEDOUT || Errno == EARCSVCTMP || Errno == EARCTRANSFERTIMEOUT || Errno == EARCCHECKSUM || Errno == EARCOTHER); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/examples0000644000000000000000000000013213214316023022121 xustar000000000000000030 mtime=1513200659.305738995 30 atime=1513200668.720854145 30 ctime=1513200659.305738995 nordugrid-arc-5.4.2/src/hed/libs/data/examples/0000755000175000002070000000000013214316023022244 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/data/examples/PaxHeaders.7502/partial_copy.cpp0000644000000000000000000000012412110726523025375 xustar000000000000000027 mtime=1361292627.682019 27 atime=1513200574.513701 30 ctime=1513200659.304738983 nordugrid-arc-5.4.2/src/hed/libs/data/examples/partial_copy.cpp0000644000175000002070000000304512110726523025444 0ustar00mockbuildmock00000000000000#include #include #include #include using namespace Arc; int main(int argc, char** argv) { #define DESIRED_SIZE 512 if (argc != 2) { std::cerr<<"Usage: partial_copy filename"<SetSecure(false); // GridFTP servers generally do not have encrypted data channel FileInfo info; if(!handle->Stat(info)) { std::cerr<<"Failed Stat"<GetSize(); if(fsize == (unsigned long long int)-1) { std::cerr<<"file size is not available"< DESIRED_SIZE) { handle->Range(fsize-DESIRED_SIZE,fsize-1); }; DataBuffer buffer; if(!handle->StartReading(buffer)) { std::cerr<<"Failed to start reading"<StopReading(); return 0; } nordugrid-arc-5.4.2/src/hed/libs/data/examples/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712115374044024244 xustar000000000000000027 mtime=1362491428.782947 30 atime=1513200597.309980762 30 ctime=1513200659.302738958 nordugrid-arc-5.4.2/src/hed/libs/data/examples/Makefile.am0000644000175000002070000000204512115374044024307 0ustar00mockbuildmock00000000000000check_PROGRAMS = partial_copy simple_copy partial_copy_SOURCES = partial_copy.cpp partial_copy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) partial_copy_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(GLIBMM_LIBS) simple_copy_SOURCES = simple_copy.cpp simple_copy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) simple_copy_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(GLIBMM_LIBS) check_LTLIBRARIES = libdmcmy.la libdmcmy_la_SOURCES = DataPointMyProtocol.cpp libdmcmy_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmcmy_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmcmy_la_LDFLAGS = -no-undefined -avoid-version -module exampledir = $(pkgdatadir)/examples/sdk example_DATA = partial_copy.cpp simple_copy.cpp DataPointMyProtocol.cpp nordugrid-arc-5.4.2/src/hed/libs/data/examples/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315725024252 xustar000000000000000030 mtime=1513200597.361981398 30 atime=1513200648.065601523 29 ctime=1513200659.30373897 nordugrid-arc-5.4.2/src/hed/libs/data/examples/Makefile.in0000644000175000002070000007452013214315725024331 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ check_PROGRAMS = partial_copy$(EXEEXT) simple_copy$(EXEEXT) subdir = src/hed/libs/data/examples DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__DEPENDENCIES_1 = libdmcmy_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libdmcmy_la_OBJECTS = libdmcmy_la-DataPointMyProtocol.lo libdmcmy_la_OBJECTS = $(am_libdmcmy_la_OBJECTS) libdmcmy_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmcmy_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmcmy_la_LDFLAGS) $(LDFLAGS) -o $@ am_partial_copy_OBJECTS = partial_copy-partial_copy.$(OBJEXT) partial_copy_OBJECTS = $(am_partial_copy_OBJECTS) partial_copy_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(am__DEPENDENCIES_1) partial_copy_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(partial_copy_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_simple_copy_OBJECTS = simple_copy-simple_copy.$(OBJEXT) simple_copy_OBJECTS = $(am_simple_copy_OBJECTS) simple_copy_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(am__DEPENDENCIES_1) simple_copy_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(simple_copy_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcmy_la_SOURCES) $(partial_copy_SOURCES) \ $(simple_copy_SOURCES) DIST_SOURCES = $(libdmcmy_la_SOURCES) $(partial_copy_SOURCES) \ $(simple_copy_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(exampledir)" DATA = $(example_DATA) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ partial_copy_SOURCES = partial_copy.cpp partial_copy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) partial_copy_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(GLIBMM_LIBS) simple_copy_SOURCES = simple_copy.cpp simple_copy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) simple_copy_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(GLIBMM_LIBS) check_LTLIBRARIES = libdmcmy.la libdmcmy_la_SOURCES = DataPointMyProtocol.cpp libdmcmy_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmcmy_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcdata.la $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmcmy_la_LDFLAGS = -no-undefined -avoid-version -module exampledir = $(pkgdatadir)/examples/sdk example_DATA = partial_copy.cpp simple_copy.cpp DataPointMyProtocol.cpp all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/data/examples/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/data/examples/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkLTLIBRARIES: -test -z "$(check_LTLIBRARIES)" || rm -f $(check_LTLIBRARIES) @list='$(check_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcmy.la: $(libdmcmy_la_OBJECTS) $(libdmcmy_la_DEPENDENCIES) $(libdmcmy_la_LINK) $(libdmcmy_la_OBJECTS) $(libdmcmy_la_LIBADD) $(LIBS) clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list partial_copy$(EXEEXT): $(partial_copy_OBJECTS) $(partial_copy_DEPENDENCIES) @rm -f partial_copy$(EXEEXT) $(partial_copy_LINK) $(partial_copy_OBJECTS) $(partial_copy_LDADD) $(LIBS) simple_copy$(EXEEXT): $(simple_copy_OBJECTS) $(simple_copy_DEPENDENCIES) @rm -f simple_copy$(EXEEXT) $(simple_copy_LINK) $(simple_copy_OBJECTS) $(simple_copy_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcmy_la-DataPointMyProtocol.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/partial_copy-partial_copy.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simple_copy-simple_copy.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcmy_la-DataPointMyProtocol.lo: DataPointMyProtocol.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcmy_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcmy_la-DataPointMyProtocol.lo -MD -MP -MF $(DEPDIR)/libdmcmy_la-DataPointMyProtocol.Tpo -c -o libdmcmy_la-DataPointMyProtocol.lo `test -f 'DataPointMyProtocol.cpp' || echo '$(srcdir)/'`DataPointMyProtocol.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcmy_la-DataPointMyProtocol.Tpo $(DEPDIR)/libdmcmy_la-DataPointMyProtocol.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointMyProtocol.cpp' object='libdmcmy_la-DataPointMyProtocol.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcmy_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcmy_la-DataPointMyProtocol.lo `test -f 'DataPointMyProtocol.cpp' || echo '$(srcdir)/'`DataPointMyProtocol.cpp partial_copy-partial_copy.o: partial_copy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(partial_copy_CXXFLAGS) $(CXXFLAGS) -MT partial_copy-partial_copy.o -MD -MP -MF $(DEPDIR)/partial_copy-partial_copy.Tpo -c -o partial_copy-partial_copy.o `test -f 'partial_copy.cpp' || echo '$(srcdir)/'`partial_copy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/partial_copy-partial_copy.Tpo $(DEPDIR)/partial_copy-partial_copy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='partial_copy.cpp' object='partial_copy-partial_copy.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(partial_copy_CXXFLAGS) $(CXXFLAGS) -c -o partial_copy-partial_copy.o `test -f 'partial_copy.cpp' || echo '$(srcdir)/'`partial_copy.cpp partial_copy-partial_copy.obj: partial_copy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(partial_copy_CXXFLAGS) $(CXXFLAGS) -MT partial_copy-partial_copy.obj -MD -MP -MF $(DEPDIR)/partial_copy-partial_copy.Tpo -c -o partial_copy-partial_copy.obj `if test -f 'partial_copy.cpp'; then $(CYGPATH_W) 'partial_copy.cpp'; else $(CYGPATH_W) '$(srcdir)/partial_copy.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/partial_copy-partial_copy.Tpo $(DEPDIR)/partial_copy-partial_copy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='partial_copy.cpp' object='partial_copy-partial_copy.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(partial_copy_CXXFLAGS) $(CXXFLAGS) -c -o partial_copy-partial_copy.obj `if test -f 'partial_copy.cpp'; then $(CYGPATH_W) 'partial_copy.cpp'; else $(CYGPATH_W) '$(srcdir)/partial_copy.cpp'; fi` simple_copy-simple_copy.o: simple_copy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(simple_copy_CXXFLAGS) $(CXXFLAGS) -MT simple_copy-simple_copy.o -MD -MP -MF $(DEPDIR)/simple_copy-simple_copy.Tpo -c -o simple_copy-simple_copy.o `test -f 'simple_copy.cpp' || echo '$(srcdir)/'`simple_copy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/simple_copy-simple_copy.Tpo $(DEPDIR)/simple_copy-simple_copy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='simple_copy.cpp' object='simple_copy-simple_copy.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(simple_copy_CXXFLAGS) $(CXXFLAGS) -c -o simple_copy-simple_copy.o `test -f 'simple_copy.cpp' || echo '$(srcdir)/'`simple_copy.cpp simple_copy-simple_copy.obj: simple_copy.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(simple_copy_CXXFLAGS) $(CXXFLAGS) -MT simple_copy-simple_copy.obj -MD -MP -MF $(DEPDIR)/simple_copy-simple_copy.Tpo -c -o simple_copy-simple_copy.obj `if test -f 'simple_copy.cpp'; then $(CYGPATH_W) 'simple_copy.cpp'; else $(CYGPATH_W) '$(srcdir)/simple_copy.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/simple_copy-simple_copy.Tpo $(DEPDIR)/simple_copy-simple_copy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='simple_copy.cpp' object='simple_copy-simple_copy.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(simple_copy_CXXFLAGS) $(CXXFLAGS) -c -o simple_copy-simple_copy.obj `if test -f 'simple_copy.cpp'; then $(CYGPATH_W) 'simple_copy.cpp'; else $(CYGPATH_W) '$(srcdir)/simple_copy.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_LTLIBRARIES) $(check_PROGRAMS) check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkLTLIBRARIES clean-checkPROGRAMS clean-generic \ clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean \ clean-checkLTLIBRARIES clean-checkPROGRAMS clean-generic \ clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exampleDATA install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am \ uninstall-exampleDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/data/examples/PaxHeaders.7502/DataPointMyProtocol.cpp0000644000000000000000000000012312675602216026631 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.511701 29 ctime=1513200659.30373897 nordugrid-arc-5.4.2/src/hed/libs/data/examples/DataPointMyProtocol.cpp0000644000175000002070000000555612675602216026712 0ustar00mockbuildmock00000000000000#include namespace Arc { // DMC implementation for my protocol class DataPointMyProtocol : public DataPointDirect { public: // Constructor should never be used directly DataPointMyProtocol(const URL& url, const UserConfig& usercfg, PluginArgument* parg); // Instance is called by the DataPointPluginLoader to get the correct DMC // instance. If returns a DataPointMyProtocol if the URL is of the form my:// // or NULL otherwise. static Plugin* Instance(PluginArgument *arg); // The following methods from DataPoint must be implemented virtual DataStatus Check(bool check_meta); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents=false) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); }; virtual DataStatus Stat(FileInfo& file, DataPoint::DataPointInfoType verb); virtual DataStatus List(std::list& file, DataPoint::DataPointInfoType verb); virtual DataStatus Rename(const URL& newurl) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); }; virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); }; DataPointMyProtocol::DataPointMyProtocol(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(url, usercfg, parg) {} DataStatus DataPointMyProtocol::Check(bool check_meta) { return DataStatus::Success; } DataStatus DataPointMyProtocol::Remove() { return DataStatus::Success; } DataStatus DataPointMyProtocol::Stat(FileInfo& file, DataPoint::DataPointInfoType verb) { return DataStatus::Success; } DataStatus DataPointMyProtocol::List(std::list& file, DataPoint::DataPointInfoType verb) { return DataStatus::Success; } DataStatus DataPointMyProtocol::StartReading(DataBuffer& buffer) { return DataStatus::Success; } DataStatus DataPointMyProtocol::StartWriting(DataBuffer& buffer, DataCallback *space_cb) { return DataStatus::Success; } DataStatus DataPointMyProtocol::StopReading() { return DataStatus::Success; } DataStatus DataPointMyProtocol::StopWriting() { return DataStatus::Success; } Plugin* DataPointMyProtocol::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL &)(*dmcarg)).Protocol() != "my") return NULL; return new DataPointMyProtocol(*dmcarg, *dmcarg, dmcarg); } } // namespace Arc // Add this plugin to the plugin descriptor table extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "my", "HED:DMC", "My protocol", 0, &Arc::DataPointMyProtocol::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/libs/data/examples/PaxHeaders.7502/simple_copy.cpp0000644000000000000000000000012412110726523025232 xustar000000000000000027 mtime=1361292627.682019 27 atime=1513200574.513701 30 ctime=1513200659.305738995 nordugrid-arc-5.4.2/src/hed/libs/data/examples/simple_copy.cpp0000644000175000002070000000262012110726523025277 0ustar00mockbuildmock00000000000000#include #include #include #include #include int main(int argc, char** argv) { // Set up logging to stderr with level VERBOSE (a lot of output will be shown) Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::VERBOSE); Arc::Logger logger(Arc::Logger::getRootLogger(), "copy"); if (argc != 3) { logger.msg(Arc::ERROR, "Usage: copy source destination"); return 1; } // Set up source and destination objects Arc::UserConfig usercfg; Arc::URL src_url(argv[1]); Arc::URL dest_url(argv[2]); Arc::DataHandle src_handle(src_url, usercfg); Arc::DataHandle dest_handle(dest_url, usercfg); // Transfer should be insecure by default (most servers don't support encryption) // and passive if the client is behind a firewall Arc::DataMover mover; mover.secure(false); mover.passive(true); // If caching and URL mapping are not necessary default constructed objects can be used Arc::FileCache cache; Arc::URLMap map; // Call DataMover to do the transfer Arc::DataStatus result = mover.Transfer(*src_handle, *dest_handle, cache, map); if (!result.Passed()) { logger.msg(Arc::ERROR, "Copy failed: %s", std::string(result)); return 1; } return 0; } nordugrid-arc-5.4.2/src/hed/libs/data/examples/PaxHeaders.7502/README0000644000000000000000000000012412110726523023063 xustar000000000000000027 mtime=1361292627.682019 27 atime=1513200574.513701 30 ctime=1513200659.301738946 nordugrid-arc-5.4.2/src/hed/libs/data/examples/README0000644000175000002070000000005012110726523023123 0ustar00mockbuildmock00000000000000Examples of how to use the data library.nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataSpeed.cpp0000644000000000000000000000012211631156731022726 xustar000000000000000027 mtime=1315233241.447756 27 atime=1513200574.540702 28 ctime=1513200659.2407382 nordugrid-arc-5.4.2/src/hed/libs/data/DataSpeed.cpp0000644000175000002070000001225611631156731023003 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include namespace Arc { Logger DataSpeed::logger(Logger::getRootLogger(), "DataSpeed"); bool DataSpeed::verbose(void) { return be_verbose; } void DataSpeed::verbose(bool val) { be_verbose = val; } void DataSpeed::verbose(const std::string& prefix) { be_verbose = true; verbose_prefix = prefix; } void DataSpeed::hold(bool disable) { disabled = disable; } bool DataSpeed::transfer(unsigned long long int n) { if (disabled) { last_time = time(NULL); return true; } time_t t = time(NULL); time_t dt = t - last_time; Nall += n; if (dt > T) N = (n * dt) / T; else N = (N * (T - dt)) / T + n; if ((t - first_time) >= T * 3) { /* make decision only after statistics settles */ /* check for speed */ if (N < (T * min_speed)) if (first_speed_failure != 0) { if (t > (first_speed_failure + min_speed_time)) min_speed_failed = true; } else first_speed_failure = t; else first_speed_failure = 0; /* check for avearge speed */ if ((min_average_speed * (t - first_time)) > Nall) min_average_speed_failed = true; /* check for inactivity time */ if (t > (last_activity_time + max_inactivity_time)) max_inactivity_time_failed = true; } if (n > 0) last_activity_time = t; last_time = t; if (be_verbose) /* statistics to screen */ if ((t - last_printed) >= 1) { print_statistics(stderr, t); last_printed = t; } return !(min_speed_failed || min_average_speed_failed || max_inactivity_time_failed); } void DataSpeed::print_statistics(FILE *o, time_t t) { if (show_progress != NULL) { (*show_progress)(o, verbose_prefix.c_str(), (unsigned int)(t - first_time), Nall, Nmax, (t > first_time ? (((double)N) / (((t - first_time) > T) ? T : (t - first_time))) : ((double)0)), (t > first_time ? (((double)Nall) / (t - first_time)) : ((double)0))); return; } std::string::size_type outlen = verbose_prefix.length()+100; char* out = new char[outlen]; if(out) try { snprintf(out,outlen, "%s%5u s: %10.1f kB %8.1f kB/s %8.1f kB/s %c %c %c ", verbose_prefix.c_str(), (unsigned int)(t - first_time), ((double)Nall) / 1024, (t > first_time ? (((double)N) / (((t - first_time) > T) ? T : (t - first_time)) / 1024) : ((double)0)), (t > first_time ? (((double)Nall) / (t - first_time) / 1024) : ((double)0)), (min_speed_failed ? '!' : '.'), (min_average_speed_failed ? '!' : '.'), (max_inactivity_time_failed ? '!' : '.')); logger.msg(INFO, out); } catch(std::exception& e) { } delete[] out; } void DataSpeed::set_min_speed(unsigned long long int min_speed_, time_t min_speed_time_) { min_speed = min_speed_; min_speed_time = min_speed_time_; } void DataSpeed::set_min_average_speed(unsigned long long int min_average_speed_) { min_average_speed = min_average_speed_; } void DataSpeed::set_max_inactivity_time(time_t max_inactivity_time_) { max_inactivity_time = max_inactivity_time_; } void DataSpeed::set_base(time_t base_) { N = (N * base_) / T; /* allows ro change T on the fly */ T = base_; } void DataSpeed::set_progress_indicator(show_progress_t func) { show_progress = func; } void DataSpeed::set_max_data(unsigned long long int max) { Nmax = max; } DataSpeed::DataSpeed(unsigned long long int min_speed_, time_t min_speed_time_, unsigned long long int min_average_speed_, time_t max_inactivity_time_, time_t base_) { min_speed = min_speed_; min_speed_time = min_speed_time_; min_average_speed = min_average_speed_; max_inactivity_time = max_inactivity_time_; T = base_; be_verbose = false; disabled = false; show_progress = NULL; Nmax = 0; reset(); } DataSpeed::~DataSpeed(void) { /* statistics to screen */ if (be_verbose) print_statistics(stderr, time(NULL)); } DataSpeed::DataSpeed(time_t base_) { min_speed = 0; min_speed_time = 0; min_average_speed = 0; max_inactivity_time = 600; /* MUST have at least pure timeout */ T = base_; be_verbose = false; disabled = false; show_progress = NULL; Nmax = 0; reset(); } void DataSpeed::reset(void) { first_time = time(NULL); last_time = first_time; last_activity_time = first_time; last_printed = first_time; N = 0; Nall = 0; first_speed_failure = 0; min_speed_failed = false; min_average_speed_failed = false; max_inactivity_time_failed = false; return; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/FileCache.cpp0000644000000000000000000000012212463677753022717 xustar000000000000000025 mtime=1422884843.9491 27 atime=1513200574.518702 30 ctime=1513200659.245738261 nordugrid-arc-5.4.2/src/hed/libs/data/FileCache.cpp0000644000175000002070000011327512463677753022777 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #ifndef WIN32 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "FileCache.h" namespace Arc { const std::string FileCache::CACHE_DATA_DIR = "data"; const std::string FileCache::CACHE_JOB_DIR = "joblinks"; const int FileCache::CACHE_DIR_LENGTH = 2; const int FileCache::CACHE_DIR_LEVELS = 1; const std::string FileCache::CACHE_META_SUFFIX = ".meta"; const int FileCache::CACHE_DEFAULT_AUTH_VALIDITY = 86400; // 24 h const int FileCache::CACHE_LOCK_TIMEOUT = 900; // 15 mins const int FileCache::CACHE_META_LOCK_TIMEOUT = 2; Logger FileCache::logger(Logger::getRootLogger(), "FileCache"); FileCache::FileCache(const std::string& cache_path, const std::string& id, uid_t job_uid, gid_t job_gid) { // make a vector of one item and call _init std::vector caches; std::vector remote_caches; std::vector draining_caches; if (!cache_path.empty()) caches.push_back(cache_path); // if problem in init, clear _caches so object is invalid if (!_init(caches, remote_caches, draining_caches, id, job_uid, job_gid)) _caches.clear(); } FileCache::FileCache(const std::vector& caches, const std::string& id, uid_t job_uid, gid_t job_gid) { std::vector remote_caches; std::vector draining_caches; // if problem in init, clear _caches so object is invalid if (!_init(caches, remote_caches, draining_caches, id, job_uid, job_gid)) _caches.clear(); } FileCache::FileCache(const std::vector& caches, const std::vector& remote_caches, const std::vector& draining_caches, const std::string& id, uid_t job_uid, gid_t job_gid) { // if problem in init, clear _caches so object is invalid if (!_init(caches, remote_caches, draining_caches, id, job_uid, job_gid)) _caches.clear(); } bool FileCache::_init(const std::vector& caches, const std::vector& remote_caches, const std::vector& draining_caches, const std::string& id, uid_t job_uid, gid_t job_gid) { _id = id; _uid = job_uid; _gid = job_gid; // for each cache for (int i = 0; i < (int)caches.size(); i++) { std::string cache = caches[i]; std::string cache_path = cache.substr(0, cache.find(" ")); if (cache_path.empty()) { logger.msg(ERROR, "No cache directory specified"); return false; } std::string cache_link_path = ""; if (cache.find(" ") != std::string::npos) cache_link_path = cache.substr(cache.find_last_of(" ") + 1, cache.length() - cache.find_last_of(" ") + 1); // tidy up paths - take off any trailing slashes if (cache_path.rfind("/") == cache_path.length() - 1) cache_path = cache_path.substr(0, cache_path.length() - 1); if (cache_link_path.rfind("/") == cache_link_path.length() - 1) cache_link_path = cache_link_path.substr(0, cache_link_path.length() - 1); // add this cache to our list struct CacheParameters cache_params; cache_params.cache_path = cache_path; cache_params.cache_link_path = cache_link_path; _caches.push_back(cache_params); } if (!caches.empty() && _caches.empty()) { logger.msg(ERROR, "No usable caches"); return false; } // add remote caches for (int i = 0; i < (int)remote_caches.size(); i++) { std::string cache = remote_caches[i]; std::string cache_path = cache.substr(0, cache.find(" ")); if (cache_path.empty()) { logger.msg(ERROR, "No remote cache directory specified"); return false; } std::string cache_link_path = ""; if (cache.find(" ") != std::string::npos) cache_link_path = cache.substr(cache.find_last_of(" ")+1, cache.length()-cache.find_last_of(" ")+1); // tidy up paths - take off any trailing slashes if (cache_path.rfind("/") == cache_path.length()-1) cache_path = cache_path.substr(0, cache_path.length()-1); if (cache_link_path.rfind("/") == cache_link_path.length()-1) cache_link_path = cache_link_path.substr(0, cache_link_path.length()-1); // add this cache to our list struct CacheParameters cache_params; cache_params.cache_path = cache_path; cache_params.cache_link_path = cache_link_path; _remote_caches.push_back(cache_params); } // for each draining cache for (int i = 0; i < (int)draining_caches.size(); i++) { std::string cache = draining_caches[i]; std::string cache_path = cache.substr(0, cache.find(" ")); if (cache_path.empty()) { logger.msg(ERROR, "No draining cache directory specified"); return false; } // tidy up paths - take off any trailing slashes if (cache_path.rfind("/") == cache_path.length()-1) cache_path = cache_path.substr(0, cache_path.length()-1); // add this cache to our list struct CacheParameters cache_params; cache_params.cache_path = cache_path; cache_params.cache_link_path = ""; _draining_caches.push_back(cache_params); } return true; } bool FileCache::Start(const std::string& url, bool& available, bool& is_locked, bool use_remote, bool delete_first) { if (!(*this)) return false; available = false; is_locked = false; _cache_map.erase(url); std::string filename = File(url); // create directory structure if required, with last dir only readable by A-REX user // try different caches until one succeeds while (!DirCreate(filename.substr(0, filename.rfind("/")), S_IRWXU | S_IRGRP | S_IROTH | S_IXGRP | S_IXOTH, true)) { logger.msg(WARNING, "Failed to create cache directory for file %s: %s", filename, StrError(errno)); // remove the cache that failed from the cache list and try File() again std::vector::iterator i = _caches.begin(); for (; i != _caches.end(); ++i) { if (i->cache_path == _cache_map[url].cache_path) { _caches.erase(i); break; } } if (_caches.empty()) { logger.msg(ERROR, "Failed to create any cache directories for %s", url); return false; } _cache_map.erase(url); filename = File(url); } if (errno != EEXIST && chmod(filename.substr(0, filename.rfind("/")).c_str(), S_IRWXU) != 0) { logger.msg(ERROR, "Failed to change permissions on %s: %s", filename.substr(0, filename.rfind("/")), StrError(errno)); return false; } // check if a lock file exists and whether it is still valid struct stat lockStat; if (FileStat(std::string(filename+FileLock::getLockSuffix()).c_str(), &lockStat, false)) { FileLock lock(filename, CACHE_LOCK_TIMEOUT); bool lock_removed = false; if (lock.acquire(lock_removed)) { // if lock was invalid delete cache file if (lock_removed && !FileDelete(filename.c_str()) && errno != ENOENT) { logger.msg(ERROR, "Failed to delete stale cache file %s: %s", filename, StrError(errno)); } if (!lock.release()) { logger.msg(WARNING, "Failed to release lock on file %s", filename); is_locked = true; return false; } } else { is_locked = true; return false; } } // now check if the cache file is there already struct stat fileStat; if (FileStat(filename, &fileStat, true)) { available = true; } // if the file is not there. check remote caches else if (errno == ENOENT) { if (use_remote && !_remote_caches.empty() && !delete_first) { // get the hash of the url std::string hash(_getHash(url)); std::string remote_cache_file; std::string remote_cache_link; // store remote file modification time to compare if copy fails Arc::Time remote_mod_time; // go through remote caches and try to find file std::vector::iterator it = _remote_caches.begin(); for (; it != _remote_caches.end(); it++) { std::string remote_file = it->cache_path+"/"+CACHE_DATA_DIR+"/"+hash; if (FileStat(remote_file, &fileStat, true)) { remote_cache_file = remote_file; remote_cache_link = it->cache_link_path; remote_mod_time = Arc::Time(fileStat.st_mtime); break; } } if (!remote_cache_file.empty()) { logger.msg(INFO, "Found file %s in remote cache at %s", url, remote_cache_file); // check meta file - if fails use local file if (_checkMetaFile(remote_cache_file, url, is_locked)) { // check if a lock file exists and whether it is still valid - if // it is valid then just download to local cache bool use_local = false; if (FileStat(std::string(remote_cache_file+FileLock::getLockSuffix()).c_str(), &lockStat, false)) { FileLock remote_lock(remote_cache_file, CACHE_LOCK_TIMEOUT); bool remote_lock_removed = false; if (remote_lock.acquire(remote_lock_removed)) { // if lock was invalid delete cache file if (remote_lock_removed) { use_local = true; if (!FileDelete(remote_cache_file.c_str()) && errno != ENOENT) { logger.msg(WARNING, "Failed to delete stale remote cache file %s: %s", remote_cache_file, StrError(errno)); } } if (!remote_lock.release()) { logger.msg(WARNING, "Failed to release lock on remote cache file %s", remote_cache_file); use_local = true; } } else { use_local = true; } } if (!use_local) { // ok to use remote file - now decide whether to replicate to local cache if (remote_cache_link == "replicate") { // check local cache meta file if (!_checkMetaFile(filename, url, is_locked)) return false; // acquire lock on local cache FileLock lock(filename, CACHE_LOCK_TIMEOUT); bool lock_removed = false; if (!lock.acquire(lock_removed)) { logger.msg(INFO, "Failed to obtain lock on cache file %s", filename); is_locked = true; return false; } // we have the lock, if there was a stale lock remove cache file to be safe if (lock_removed && !FileDelete(filename) && errno != ENOENT) { logger.msg(ERROR, "Error removing cache file %s: %s", filename, StrError(errno)); if (!lock.release()) logger.msg(ERROR, "Failed to remove lock on %s. Some manual intervention may be required", filename); return false; } // copy the file to the local cache, remove lock and exit with available=true logger.msg(VERBOSE, "Replicating file %s to local cache file %s", remote_cache_file, filename); if (!FileCopy(remote_cache_file, filename)) { logger.msg(ERROR, "Failed to copy file %s to %s: %s", remote_cache_file, filename, StrError(errno)); // it could have failed because another process deleted the remote file struct stat remoteStat; if (!FileStat(remote_cache_file, &remoteStat, false) || Arc::Time(remoteStat.st_mtime) > remote_mod_time) { logger.msg(WARNING, "Replicating file %s from remote cache failed due to source being deleted or modified", remote_cache_file); if (!FileDelete(filename) && errno != ENOENT) logger.msg(ERROR, "Failed to delete bad copy of remote cache file %s at %s: %s", remote_cache_file, filename, StrError(errno)); } if (!lock.release()) logger.msg(ERROR, "Failed to remove lock on %s. Some manual intervention may be required", remote_cache_file); return false; } if (!lock.release()) logger.msg(ERROR, "Failed to remove lock on %s. Some manual intervention may be required", remote_cache_file); } else { // set remote cache in the cache map _cache_map[url] = *it; } available = true; return true; } } } } } else { // this is ok, we will download again logger.msg(WARNING, "Failed looking up attributes of cached file: %s", StrError(errno)); } if (!available || delete_first) { // lock in preparation for writing FileLock lock(filename, CACHE_LOCK_TIMEOUT); bool lock_removed = false; if (!lock.acquire(lock_removed)) { logger.msg(INFO, "Failed to obtain lock on cache file %s", filename); is_locked = true; return false; } // we have the lock, if there was a stale lock or the file was requested // to be deleted, remove cache file if (lock_removed || delete_first) { if (!FileDelete(filename) && errno != ENOENT) { logger.msg(ERROR, "Error removing cache file %s: %s", filename, StrError(errno)); if (!lock.release()) logger.msg(ERROR, "Failed to remove lock on %s. Some manual intervention may be required", filename); return false; } available = false; } } // create the meta file to store the URL, if it does not exist if (!_checkMetaFile(filename, url, is_locked)) { // release locks if acquired if (!available) { FileLock lock(filename, CACHE_LOCK_TIMEOUT); if (!lock.release()) logger.msg(ERROR, "Failed to remove lock on %s. Some manual intervention may be required", filename); } return false; } return true; } bool FileCache::Stop(const std::string& url) { if (!(*this)) return false; // check if already unlocked in Link() if (_urls_unlocked.find(url) == _urls_unlocked.end()) { std::string filename(File(url)); // delete the lock FileLock lock(filename); if (!lock.release()) { logger.msg(ERROR, "Failed to unlock file %s: %s. Manual intervention may be required", filename, StrError(errno)); return false; } } return true; } bool FileCache::StopAndDelete(const std::string& url) { if (!(*this)) return false; std::string filename = File(url); FileLock lock(filename, CACHE_LOCK_TIMEOUT); // first check that the lock is still valid before deleting anything if (lock.check() != 0) { logger.msg(ERROR, "Invalid lock on file %s", filename); return false; } // delete the meta file - not critical so don't fail on error if (!FileDelete(_getMetaFileName(url))) logger.msg(ERROR, "Failed to remove .meta file %s: %s", _getMetaFileName(url), StrError(errno)); // delete the cache file if (!FileDelete(filename) && errno != ENOENT) { // leave the lock file so that a bad cache file is not used next time logger.msg(ERROR, "Error removing cache file %s: %s", filename, StrError(errno)); return false; } // delete the lock file last if (!lock.release()) { logger.msg(ERROR, "Failed to unlock file %s: %s. Manual intervention may be required", filename, StrError(errno)); return false; } return true; } std::string FileCache::File(const std::string& url) { if (!(*this)) return ""; // get the hash of the url std::string hash(_getHash(url)); // look up the cache map to see if the file is already in std::map ::iterator iter = _cache_map.find(url) ; if (iter != _cache_map.end()) { return _cache_map[url].cache_path + "/" + CACHE_DATA_DIR + "/" + hash; } // else choose a new cache and assign the file to it struct CacheParameters chosen_cache = _chooseCache(url); std::string path = chosen_cache.cache_path + "/" + CACHE_DATA_DIR + "/" + hash; // update the cache map with the new file _cache_map.insert(std::make_pair(url, chosen_cache)); return path; } bool FileCache::Link(const std::string& dest_path, const std::string& url, bool copy, bool executable, bool holding_lock, bool& try_again) { if (!(*this)) return false; try_again = false; std::string cache_file = File(url); std::string hard_link_path; std::string cache_link_path; // Mod time of cache file Arc::Time modtime; // check the original file exists and if so in which cache struct stat fileStat; if (FileStat(cache_file, &fileStat, false)) { // look up the map to get the cache params this url is mapped to (set in File()) std::map ::iterator iter = _cache_map.find(url); if (iter == _cache_map.end()) { logger.msg(ERROR, "Cache not found for file %s", cache_file); return false; } hard_link_path = _cache_map[url].cache_path + "/" + CACHE_JOB_DIR + "/" +_id; cache_link_path = _cache_map[url].cache_link_path; modtime = Arc::Time(fileStat.st_mtime); // if modtime is now (to second granularity) sleep for a second to avoid // race condition with another process locking, modifying and unlocking // during link if (!holding_lock && modtime.GetTime() == Arc::Time().GetTime()) { logger.msg(VERBOSE, "Cache file %s was modified in the last second, sleeping 1 second to avoid race condition", cache_file); sleep(1); } } else if (errno == ENOENT) { if (holding_lock || _remote_caches.empty()) { logger.msg(WARNING, "Cache file %s does not exist", cache_file); try_again = true; return false; } // file may have been found in a remote cache std::string hash(_getHash(url)); // go through remote caches and try to find file for (std::vector::iterator it = _remote_caches.begin(); it != _remote_caches.end(); it++) { std::string remote_file = it->cache_path+"/"+CACHE_DATA_DIR+"/"+hash; if (FileStat(remote_file, &fileStat, true)) { cache_file = remote_file; hard_link_path = it->cache_path + "/" + CACHE_JOB_DIR + "/" +_id; cache_link_path = it->cache_link_path; modtime = Arc::Time(fileStat.st_mtime); break; } } if (hard_link_path.empty()) { // another process could have deleted it, so try again logger.msg(WARNING, "Cache file for %s not found in any local or remote cache", url); try_again = true; return false; } logger.msg(VERBOSE, "Using remote cache file %s for url %s", cache_file, url); } else { logger.msg(ERROR, "Error accessing cache file %s: %s", cache_file, StrError(errno)); return false; } // create per-job hard link dir if necessary, making the final dir readable only by the job user if (!DirCreate(hard_link_path, S_IRWXU | S_IRGRP | S_IROTH | S_IXGRP | S_IXOTH, true)) { logger.msg(ERROR, "Cannot create directory %s for per-job hard links", hard_link_path); return false; } if (errno != EEXIST) { if (chmod(hard_link_path.c_str(), S_IRWXU) != 0) { logger.msg(ERROR, "Cannot change permission of %s: %s ", hard_link_path, StrError(errno)); return false; } if (chown(hard_link_path.c_str(), _uid, _gid) != 0) { logger.msg(ERROR, "Cannot change owner of %s: %s ", hard_link_path, StrError(errno)); return false; } } std::string filename = dest_path.substr(dest_path.rfind("/") + 1); std::string hard_link_file = hard_link_path + "/" + filename; std::string session_dir = dest_path.substr(0, dest_path.rfind("/")); // make the hard link if (!FileLink(cache_file, hard_link_file, false)) { // if the link we want to make already exists, delete and make new one if (errno == EEXIST) { if (!FileDelete(hard_link_file)) { logger.msg(ERROR, "Failed to remove existing hard link at %s: %s", hard_link_file, StrError(errno)); return false; } if (!FileLink(cache_file, hard_link_file, false)) { logger.msg(ERROR, "Failed to create hard link from %s to %s: %s", hard_link_file, cache_file, StrError(errno)); return false; } } else if (errno == ENOENT) { // another process could have deleted the cache file, so try again logger.msg(WARNING, "Cache file %s not found", cache_file); try_again = true; return false; } else { logger.msg(ERROR, "Failed to create hard link from %s to %s: %s", hard_link_file, cache_file, StrError(errno)); return false; } } // ensure the hard link is readable by all and owned by root (or GM user) // to make cache file immutable but readable by mapped user // Using chmod as a temporary solution until it is possible to // specify mode when writing with File DMC if (chmod(hard_link_file.c_str(), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) != 0) { logger.msg(ERROR, "Failed to change permissions or set owner of hard link %s: %s", hard_link_file, StrError(errno)); return false; } // Hard link is created so release any locks on the cache file if (holding_lock) { FileLock lock(cache_file, CACHE_LOCK_TIMEOUT); if (!lock.release()) { logger.msg(WARNING, "Failed to release lock on cache file %s", cache_file); return _cleanFilesAndReturnFalse(hard_link_file, try_again); } _urls_unlocked.insert(url); } else { // check that the cache file wasn't locked or modified during the link/copy // if we are holding the lock, assume none of these checks are necessary struct stat lockStat; // check if lock file exists if (FileStat(cache_file+FileLock::getLockSuffix(), &lockStat, false)) { logger.msg(WARNING, "Cache file %s was locked during link/copy, must start again", cache_file); return _cleanFilesAndReturnFalse(hard_link_file, try_again); } // check cache file is still there if (!FileStat(cache_file, &fileStat, false)) { logger.msg(WARNING, "Cache file %s was deleted during link/copy, must start again", cache_file); return _cleanFilesAndReturnFalse(hard_link_file, try_again); } // finally check the mod time of the cache file if (Arc::Time(fileStat.st_mtime) > modtime) { logger.msg(WARNING, "Cache file %s was modified while linking, must start again", cache_file); return _cleanFilesAndReturnFalse(hard_link_file, try_again); } } // make necessary dirs for the soft link // the session dir should already exist but in the case of arccp with cache it may not // here we use the mapped user to access session dir if (!DirCreate(session_dir, _uid, _gid, S_IRWXU, true)) { logger.msg(ERROR, "Failed to create directory %s: %s", session_dir, StrError(errno)); return false; } // if _cache_link_path is '.' or copy or executable is true then copy instead // "replicate" should not be possible, but including just in case if (copy || executable || cache_link_path == "." || cache_link_path == "replicate") { if (!FileCopy(hard_link_file, dest_path, _uid, _gid)) { logger.msg(ERROR, "Failed to copy file %s to %s: %s", hard_link_file, dest_path, StrError(errno)); return false; } if (executable) { FileAccess fa; if (!fa) { logger.msg(ERROR, "Failed to set executable bit on file %s", dest_path); return false; } if (!fa.fa_setuid(_uid, _gid) || !fa.fa_chmod(dest_path, S_IRWXU)) { errno = fa.geterrno(); logger.msg(ERROR, "Failed to set executable bit on file %s: %s", dest_path, StrError(errno)); return false; } } } else { // make the soft link, changing the target if cache_link_path is defined if (!cache_link_path.empty()) hard_link_file = cache_link_path + "/" + CACHE_JOB_DIR + "/" + _id + "/" + filename; if (!FileLink(hard_link_file, dest_path, _uid, _gid, true)) { // if the link we want to make already exists, delete and make new one if (errno == EEXIST) { if (!FileDelete(dest_path, _uid, _gid)) { logger.msg(ERROR, "Failed to remove existing symbolic link at %s: %s", dest_path, StrError(errno)); return false; } if (!FileLink(hard_link_file, dest_path, _uid, _gid, true)) { logger.msg(ERROR, "Failed to create symbolic link from %s to %s: %s", dest_path, hard_link_file, StrError(errno)); return false; } } else { logger.msg(ERROR, "Failed to create symbolic link from %s to %s: %s", dest_path, hard_link_file, StrError(errno)); return false; } } } // file was safely linked/copied return true; } bool FileCache::Release() const { // go through all caches (including remote caches and draining caches) // and remove per-job dirs for our job id std::vector job_dirs; for (int i = 0; i < (int)_caches.size(); i++) job_dirs.push_back(_caches[i].cache_path + "/" + CACHE_JOB_DIR + "/" + _id); for (int i = 0; i < (int)_remote_caches.size(); i++) job_dirs.push_back(_remote_caches[i].cache_path + "/" + CACHE_JOB_DIR + "/" + _id); for (int i = 0; i < (int)_draining_caches.size(); i++) job_dirs.push_back(_draining_caches[i].cache_path + "/" + CACHE_JOB_DIR + "/" + _id); for (int i = 0; i < (int)job_dirs.size(); i++) { std::string job_dir = job_dirs[i]; // check if job dir exists struct stat fileStat; if (!FileStat(job_dir, &fileStat, true) && errno == ENOENT) continue; logger.msg(DEBUG, "Removing %s", job_dir); if (!DirDelete(job_dir)) { logger.msg(WARNING, "Failed to remove cache per-job dir %s: %s", job_dir, StrError(errno)); return false; } } return true; } bool FileCache::AddDN(const std::string& url, const std::string& DN, const Time& exp_time) { if (DN.empty()) return false; Time expiry_time(exp_time); if (expiry_time == Time(0)) expiry_time = Time(time(NULL) + CACHE_DEFAULT_AUTH_VALIDITY); // add DN to the meta file. If already there, renew the expiry time std::string meta_file = _getMetaFileName(url); struct stat fileStat; if (!FileStat(meta_file, &fileStat, true)) { logger.msg(ERROR, "Error reading meta file %s: %s", meta_file, StrError(errno)); return false; } std::list lines; if (!FileRead(meta_file, lines)) { logger.msg(ERROR, "Error opening meta file %s", meta_file); return false; } if (lines.empty()) { logger.msg(ERROR, "meta file %s is empty", meta_file); return false; } // first line contains the URL std::list::iterator line = lines.begin(); std::string first_line(*line); // check for possible hash collisions between URLs if (first_line != url) { logger.msg(ERROR, "File %s is already cached at %s under a different URL: %s - will not add DN to cached list", url, File(url), first_line); return false; } std::string newdnlist(first_line + '\n'); // second line may contain validity time, this is no longer supported so // remove it ++line; if (line != lines.end()) { std::string::size_type space_pos = line->rfind(' '); if (space_pos == std::string::npos) { ++line; } // check list of DNs for expired and this DN for (; line != lines.end(); ++line) { space_pos = line->rfind(' '); if (space_pos == std::string::npos) { logger.msg(WARNING, "Bad format detected in file %s, in line %s", meta_file, *line); continue; } // remove expired DNs (after some grace period) if (line->substr(0, space_pos) != DN) { Time exp_time(line->substr(space_pos + 1)); if (exp_time > Time(time(NULL) - CACHE_DEFAULT_AUTH_VALIDITY)) newdnlist += std::string(*line + '\n'); } } } newdnlist += std::string(DN + ' ' + expiry_time.str(MDSTime) + '\n'); // write everything back to the file FileLock meta_lock(meta_file, CACHE_META_LOCK_TIMEOUT); if (!meta_lock.acquire()) { // not critical if writing fails logger.msg(INFO, "Could not acquire lock on meta file %s", meta_file); return false; } if (!FileCreate(meta_file, newdnlist)) { logger.msg(ERROR, "Error opening meta file for writing %s", meta_file); meta_lock.release(); return false; } meta_lock.release(); return true; } bool FileCache::CheckDN(const std::string& url, const std::string& DN) { if (DN.empty()) return false; std::string meta_file = _getMetaFileName(url); struct stat fileStat; if (!FileStat(meta_file, &fileStat, true)) { if (errno != ENOENT) logger.msg(ERROR, "Error reading meta file %s: %s", meta_file, StrError(errno)); return false; } std::list lines; if (!FileRead(meta_file, lines)) { logger.msg(ERROR, "Error opening meta file %s", meta_file); return false; } if (lines.empty()) { logger.msg(ERROR, "meta file %s is empty", meta_file); return false; } // read list of DNs until we find this one for (std::list::iterator line = lines.begin(); line != lines.end(); ++line) { std::string::size_type space_pos = line->rfind(' '); if (line->substr(0, space_pos) == DN) { std::string exp_time = line->substr(space_pos + 1); if (Time(exp_time) > Time()) { logger.msg(VERBOSE, "DN %s is cached and is valid until %s for URL %s", DN, Time(exp_time).str(), url); return true; } else { logger.msg(VERBOSE, "DN %s is cached but has expired for URL %s", DN, url); return false; } } } return false; } bool FileCache::CheckCreated(const std::string& url) { // check the cache file exists - if so we can get the creation date // follow symlinks std::string cache_file = File(url); struct stat fileStat; return FileStat(cache_file, &fileStat, true); } Time FileCache::GetCreated(const std::string& url) { // check the cache file exists std::string cache_file = File(url); // follow symlinks struct stat fileStat; if (!FileStat(cache_file, &fileStat, true)) { if (errno == ENOENT) logger.msg(ERROR, "Cache file %s does not exist", cache_file); else logger.msg(ERROR, "Error accessing cache file %s: %s", cache_file, StrError(errno)); return 0; } time_t mtime = fileStat.st_mtime; if (mtime <= 0) return Time(0); return Time(mtime); } bool FileCache::operator==(const FileCache& a) { if (a._caches.size() != _caches.size()) return false; for (int i = 0; i < (int)a._caches.size(); i++) { if (a._caches.at(i).cache_path != _caches.at(i).cache_path) return false; if (a._caches.at(i).cache_link_path != _caches.at(i).cache_link_path) return false; } return (a._id == _id && a._uid == _uid && a._gid == _gid); } bool FileCache::_createMetaFile(const std::string& meta_file, const std::string& content, bool& is_locked) { FileLock meta_lock(meta_file, CACHE_META_LOCK_TIMEOUT); if (!meta_lock.acquire()) { logger.msg(WARNING, "Failed to acquire lock on cache meta file %s", meta_file); is_locked = true; return false; } if (!FileCreate(meta_file, content)) { logger.msg(WARNING, "Failed to create cache meta file %s", meta_file); } meta_lock.release(); // there is a small timeout so don't bother to report error return true; } bool FileCache::_checkMetaFile(const std::string& filename, const std::string& url, bool& is_locked) { std::string meta_file(filename + CACHE_META_SUFFIX); struct stat fileStat; if (FileStat(meta_file, &fileStat, true)) { // check URL inside file for possible hash collisions std::list lines; if (!FileRead(meta_file, lines)) { // file was probably deleted by another process - try again logger.msg(WARNING, "Failed to read cache meta file %s", meta_file); is_locked = true; return false; } if (lines.empty()) { logger.msg(WARNING, "Cache meta file %s is empty, will recreate", meta_file); return _createMetaFile(meta_file, std::string(url + '\n'), is_locked); } std::string meta_str = lines.front(); if (meta_str.empty()) { logger.msg(WARNING, "Cache meta file %s possibly corrupted, will recreate", meta_file); return _createMetaFile(meta_file, std::string(url + '\n'), is_locked); } // Prior to ARC1 the first line was url validity // From ARC1 to 4 validity was stored on the second line // Validity was removed in ARC 5 // There is probably no need now to check for pre-ARC1 format if (meta_str != url) { logger.msg(WARNING, "File %s is already cached at %s under a different URL: %s - this file will not be cached", url, filename, meta_str); return false; } } else if (errno == ENOENT) { // create new file return _createMetaFile(meta_file, std::string(url + '\n'), is_locked); } else { logger.msg(ERROR, "Error looking up attributes of cache meta file %s: %s", meta_file, StrError(errno)); return false; } return true; } std::string FileCache::_getMetaFileName(const std::string& url) { return File(url) + CACHE_META_SUFFIX; } std::string FileCache::_getHash(const std::string& url) const { // get the hash of the url std::string hash = FileCacheHash::getHash(url); int index = 0; for (int level = 0; level < CACHE_DIR_LEVELS; level ++) { hash.insert(index + CACHE_DIR_LENGTH, "/"); // go to next slash position, add one since we just inserted a slash index += CACHE_DIR_LENGTH + 1; } return hash; } struct CacheParameters FileCache::_chooseCache(const std::string& url) const { // When there is only one cache directory if (_caches.size() == 1) return _caches.front(); std::string hash(_getHash(url)); struct stat fileStat; // check the fs to see if the file is already there for (std::vector::const_iterator i = _caches.begin(); i != _caches.end(); ++i) { std::string c_file = i->cache_path + "/" + CACHE_DATA_DIR +"/" + hash; if (FileStat(c_file, &fileStat, true)) { return *i; } } // check to see if a lock file already exists, since cache could be // started but no file download was done for (std::vector::const_iterator i = _caches.begin(); i != _caches.end(); ++i) { std::string c_file = i->cache_path + "/" + CACHE_DATA_DIR +"/" + hash + FileLock::getLockSuffix(); if (FileStat(c_file, &fileStat, true)) { return *i; } } // map of cache number and unused space in GB std::map cache_map; // sum of all cache free space float total_free = 0; // get the free spaces of the caches for (unsigned int i = 0; i < _caches.size(); ++i) { float free_space = _getCacheInfo(_caches.at(i).cache_path); cache_map[i] = free_space; total_free += free_space; } // Select a random cache using the free space as a weight // r is a random number between 0 and total_space float r = total_free * ((float)rand()/(float)RAND_MAX); for (std::map::iterator cache_it = cache_map.begin(); cache_it != cache_map.end(); ++cache_it) { r -= cache_it->second; if (r <= 0) { logger.msg(DEBUG, "Using cache %s", _caches.at(cache_it->first).cache_path); return _caches.at(cache_it->first); } } // shouldn't be possible to get here return _caches.front(); } float FileCache::_getCacheInfo(const std::string& path) const { struct statvfs info; if (statvfs(path.c_str(), &info) != 0) { // if path does not exist info is undefined but the dir will be created in Start() anyway if (errno != ENOENT) { logger.msg(ERROR, "Error getting info from statvfs for the path %s: %s", path, StrError(errno)); return 0; } } // return free space in GB float space = (float)(info.f_bfree * info.f_bsize) / (float)(1024 * 1024 * 1024); logger.msg(DEBUG, "Cache %s: Free space %f GB", path, space); return space; } bool FileCache::_cleanFilesAndReturnFalse(const std::string& hard_link_file, bool& locked) { if (!FileDelete(hard_link_file)) logger.msg(ERROR, "Failed to clean up file %s: %s", hard_link_file, StrError(errno)); locked = true; return false; } } // namespace Arc #endif /*WIN32*/ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataBuffer.cpp0000644000000000000000000000012411754672447023116 xustar000000000000000027 mtime=1337160999.057496 27 atime=1513200574.545702 30 ctime=1513200659.239738187 nordugrid-arc-5.4.2/src/hed/libs/data/DataBuffer.cpp0000644000175000002070000004016611754672447023172 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include namespace Arc { bool DataBuffer::set(CheckSum *cksum, unsigned int size, int blocks) { lock.lock(); if (blocks < 0) { lock.unlock(); return false; } if (bufs != NULL) { for (int i = 0; i < bufs_n; i++) { if (bufs[i].start) free(bufs[i].start); } free(bufs); bufs_n = 0; bufs = NULL; set_counter++; cond.broadcast(); /* make all waiting loops to exit */ } if ((size == 0) || (blocks == 0)) { lock.unlock(); return true; } bufs = (buf_desc*)malloc(sizeof(buf_desc) * blocks); if (bufs == NULL) { lock.unlock(); return false; } bufs_n = blocks; for (int i = 0; i < blocks; i++) { bufs[i].start = NULL; bufs[i].taken_for_read = false; bufs[i].taken_for_write = false; bufs[i].size = size; bufs[i].used = 0; bufs[i].offset = 0; } //checksum = cksum; checksums.clear(); checksums.push_back(checksum_desc(cksum)); if (cksum) cksum->start(); lock.unlock(); return true; } int DataBuffer::add(CheckSum *cksum) { if (!cksum) return -1; lock.lock(); checksum_desc cs = cksum; cs.sum->start(); for (int i = 0; i < bufs_n; i++) { if (bufs[i].used != 0) { if (bufs[i].offset == cs.offset) { cs.sum->add(bufs[i].start, bufs[i].used); cs.offset += bufs[i].used; i = -1; cs.ready = true; } else if (cs.offset < bufs[i].offset) { cs.ready = false; } } } if (eof_read_flag && cs.ready) cs.sum->end(); checksums.push_back(cs); int res = checksums.size() - 1; lock.unlock(); return res; } DataBuffer::DataBuffer(unsigned int size, int blocks) { bufs_n = 0; bufs = NULL; set_counter = 0; eof_read_flag = false; eof_write_flag = false; error_read_flag = false; error_write_flag = false; error_transfer_flag = false; set(NULL, size, blocks); eof_pos = 0; } DataBuffer::DataBuffer(CheckSum *cksum, unsigned int size, int blocks) { bufs_n = 0; bufs = NULL; set_counter = 0; eof_read_flag = false; eof_write_flag = false; error_read_flag = false; error_write_flag = false; error_transfer_flag = false; set(cksum, size, blocks); eof_pos = 0; } DataBuffer::~DataBuffer() { set(NULL, 0, 0); } bool DataBuffer::eof_read() { return eof_read_flag; } bool DataBuffer::eof_write() { return eof_write_flag; } bool DataBuffer::error_transfer() { return error_transfer_flag; } bool DataBuffer::error_read() { return error_read_flag; } bool DataBuffer::error_write() { return error_write_flag; } void DataBuffer::eof_read(bool eof_) { lock.lock(); if (eof_) { for (std::list::iterator itCheckSum = checksums.begin(); itCheckSum != checksums.end(); itCheckSum++) { if (itCheckSum->sum) itCheckSum->sum->end(); } } eof_read_flag = eof_; cond.broadcast(); lock.unlock(); } void DataBuffer::eof_write(bool eof_) { lock.lock(); eof_write_flag = eof_; cond.broadcast(); lock.unlock(); } bool DataBuffer::error() { return (error_read_flag || error_write_flag || error_transfer_flag); } void DataBuffer::error_read(bool error_) { lock.lock(); // error_read_flag=error_; if (error_) { if (!(error_write_flag || error_transfer_flag)) error_read_flag = true; for (std::list::iterator itCheckSum = checksums.begin(); itCheckSum != checksums.end(); itCheckSum++) { if (itCheckSum->sum) itCheckSum->sum->end(); } eof_read_flag = true; } else { error_read_flag = false; } cond.broadcast(); lock.unlock(); } void DataBuffer::error_write(bool error_) { lock.lock(); // error_write_flag=error_; if (error_) { if (!(error_read_flag || error_transfer_flag)) error_write_flag = true; eof_write_flag = true; } else { error_write_flag = false; } cond.broadcast(); lock.unlock(); } bool DataBuffer::wait_eof_read() { lock.lock(); for (;;) { if (eof_read_flag) break; cond.wait(lock); } lock.unlock(); return true; } bool DataBuffer::wait_read() { lock.lock(); for (;;) { if (eof_read_flag) break; if (error_read_flag) break; cond.wait(lock); } lock.unlock(); return true; } bool DataBuffer::wait_eof_write() { lock.lock(); for (;;) { if (eof_write_flag) break; cond.wait(lock); } lock.unlock(); return true; } bool DataBuffer::wait_write() { lock.lock(); for (;;) { if (eof_write_flag) break; if (error_write_flag) break; cond.wait(lock); } lock.unlock(); return true; } bool DataBuffer::wait_eof() { lock.lock(); for (;;) { if (eof_read_flag && eof_write_flag) break; cond.wait(lock); } lock.unlock(); return true; } bool DataBuffer::cond_wait() { // Wait for any event int tmp = set_counter; bool eof_read_flag_tmp = eof_read_flag; bool eof_write_flag_tmp = eof_write_flag; // cond.wait(lock); bool err = false; for (;;) { if (!speed.transfer()) { if ((!(error_read_flag || error_write_flag)) && (!(eof_read_flag && eof_write_flag))) { error_transfer_flag = true; } } if (eof_read_flag && eof_write_flag) { // there wil be no more events lock.unlock(); Glib::Thread::yield(); lock.lock(); return true; } if (eof_read_flag_tmp != eof_read_flag) return true; if (eof_write_flag_tmp != eof_write_flag) return true; if (error()) return false; // useless to wait for - better fail if (set_counter != tmp) return false; if (err) break; // Some event int t = 60; Glib::TimeVal stime; stime.assign_current_time(); // Using timeout to workaround lost signal err = cond.timed_wait(lock, stime + t); } return true; } bool DataBuffer::for_read() { if (bufs == NULL) return false; lock.lock(); for (int i = 0; i < bufs_n; i++) { if ((!bufs[i].taken_for_read) && (!bufs[i].taken_for_write) && (bufs[i].used == 0)) { lock.unlock(); return true; } } lock.unlock(); return false; } bool DataBuffer::for_read(int& handle, unsigned int& length, bool wait) { lock.lock(); if (bufs == NULL) { lock.unlock(); return false; } for (;;) { if (error()) { /* errors detected/set - any continuation is unusable */ lock.unlock(); return false; } for (int i = 0; i < bufs_n; i++) { if ((!bufs[i].taken_for_read) && (!bufs[i].taken_for_write) && (bufs[i].used == 0)) { if (bufs[i].start == NULL) { bufs[i].start = (char*)malloc(bufs[i].size); if (bufs[i].start == NULL) continue; } handle = i; bufs[i].taken_for_read = true; length = bufs[i].size; cond.broadcast(); lock.unlock(); return true; } } /* suitable block not found - wait for changes or quit */ if (eof_write_flag) { /* writing side quited, no need to wait */ lock.unlock(); return false; } if (!wait) { lock.unlock(); return false; } if (!cond_wait()) { lock.unlock(); return false; } } lock.unlock(); return false; } bool DataBuffer::is_read(char *buf, unsigned int length, unsigned long long int offset) { lock.lock(); for (int i = 0; i < bufs_n; i++) { if (bufs[i].start == buf) { lock.unlock(); return is_read(i, length, offset); } } lock.unlock(); return false; } bool DataBuffer::is_read(int handle, unsigned int length, unsigned long long int offset) { lock.lock(); if (bufs == NULL) { lock.unlock(); return false; } if (handle >= bufs_n) { lock.unlock(); return false; } if (!bufs[handle].taken_for_read) { lock.unlock(); return false; } if (length > bufs[handle].size) { lock.unlock(); return false; } bufs[handle].taken_for_read = false; bufs[handle].used = length; bufs[handle].offset = offset; if ((offset + length) > eof_pos) eof_pos = offset + length; /* checksum on the fly */ for (std::list::iterator itCheckSum = checksums.begin(); itCheckSum != checksums.end(); itCheckSum++) { if ((itCheckSum->sum != NULL) && (offset == itCheckSum->offset)) { for (int i = handle; i < bufs_n; i++) { if (bufs[i].used != 0) { if (bufs[i].offset == itCheckSum->offset) { itCheckSum->sum->add(bufs[i].start, bufs[i].used); itCheckSum->offset += bufs[i].used; i = -1; itCheckSum->ready = true; } else if (itCheckSum->offset < bufs[i].offset) { itCheckSum->ready = false; } } } } } cond.broadcast(); lock.unlock(); return true; } bool DataBuffer::for_write() { if (bufs == NULL) return false; lock.lock(); for (int i = 0; i < bufs_n; i++) { if ((!bufs[i].taken_for_read) && (!bufs[i].taken_for_write) && (bufs[i].used != 0)) { lock.unlock(); return true; } } lock.unlock(); return false; } /* return true + buffer with data, return false in case of failure, or eof + no buffers claimed for read */ bool DataBuffer::for_write(int& handle, unsigned int& length, unsigned long long int& offset, bool wait) { lock.lock(); if (bufs == NULL) { lock.unlock(); return false; } for (;;) { if (error()) { /* internal/external errors - no need to continue */ lock.unlock(); return false; } bool have_for_read = false; bool have_unused = false; unsigned long long int min_offset = (unsigned long long int)(-1); handle = -1; for (int i = 0; i < bufs_n; i++) { if (bufs[i].taken_for_read) have_for_read = true; if ((!bufs[i].taken_for_read) && (!bufs[i].taken_for_write) && (bufs[i].used != 0)) { if (bufs[i].offset < min_offset) { min_offset = bufs[i].offset; handle = i; } } if (bufs[i].taken_for_read || (bufs[i].used == 0)) have_unused = true; } if (handle != -1) { bool keep_buffers = false; for (std::list::iterator itCheckSum = checksums.begin(); itCheckSum != checksums.end(); itCheckSum++) { if ((!itCheckSum->ready) && (bufs[handle].offset >= itCheckSum->offset)) { keep_buffers = true; break; } } if (keep_buffers) { /* try to keep buffers as long as possible for checksuming */ if (have_unused && (!eof_read_flag)) { /* still have chances to get that block */ if (!wait) { lock.unlock(); return false; } if (!cond_wait()) { lock.unlock(); return false; } continue; } } bufs[handle].taken_for_write = true; length = bufs[handle].used; offset = bufs[handle].offset; cond.broadcast(); lock.unlock(); return true; } if (eof_read_flag && (!have_for_read)) { lock.unlock(); return false; } /* suitable block not found - wait for changes or quit */ if (!wait) { lock.unlock(); return false; } if (!cond_wait()) { lock.unlock(); return false; } } lock.unlock(); return false; } bool DataBuffer::is_written(char *buf) { lock.lock(); for (int i = 0; i < bufs_n; i++) { if (bufs[i].start == buf) { lock.unlock(); return is_written(i); } } lock.unlock(); return false; } bool DataBuffer::is_notwritten(char *buf) { lock.lock(); for (int i = 0; i < bufs_n; i++) { if (bufs[i].start == buf) { lock.unlock(); return is_notwritten(i); } } lock.unlock(); return false; } bool DataBuffer::is_written(int handle) { lock.lock(); if (bufs == NULL) { lock.unlock(); return false; } if (handle >= bufs_n) { lock.unlock(); return false; } if (!bufs[handle].taken_for_write) { lock.unlock(); return false; } /* speed control */ if (!speed.transfer(bufs[handle].used)) if ((!(error_read_flag || error_write_flag)) && (!(eof_read_flag && eof_write_flag))) { error_transfer_flag = true; } bufs[handle].taken_for_write = false; bufs[handle].used = 0; bufs[handle].offset = 0; cond.broadcast(); lock.unlock(); return true; } bool DataBuffer::is_notwritten(int handle) { lock.lock(); if (bufs == NULL) { lock.unlock(); return false; } if (handle >= bufs_n) { lock.unlock(); return false; } if (!bufs[handle].taken_for_write) { lock.unlock(); return false; } bufs[handle].taken_for_write = false; cond.broadcast(); lock.unlock(); return true; } char* DataBuffer::operator[](int block) { lock.lock(); if ((block < 0) || (block >= bufs_n)) { lock.unlock(); return NULL; } char *tmp = bufs[block].start; lock.unlock(); return tmp; } bool DataBuffer::wait_any() { lock.lock(); bool res = cond_wait(); lock.unlock(); return res; } bool DataBuffer::wait_used() { lock.lock(); for (int i = 0; i < bufs_n; i++) { if ((bufs[i].taken_for_read) || (bufs[i].taken_for_write) || (bufs[i].used != 0)) { if (!cond_wait()) { lock.unlock(); return false; } i = -1; } } lock.unlock(); return true; } bool DataBuffer::wait_for_read() { lock.lock(); for (int i = 0; i < bufs_n; i++) { if (bufs[i].taken_for_read) { if (!cond_wait()) { lock.unlock(); return false; } i = -1; } } lock.unlock(); return true; } bool DataBuffer::wait_for_write() { lock.lock(); for (int i = 0; i < bufs_n; i++) { if (bufs[i].taken_for_write) { if (!cond_wait()) { lock.unlock(); return false; } i = -1; } } lock.unlock(); return true; } bool DataBuffer::checksum_valid() const { if (checksums.size() != 0) { return (checksums.begin()->ready && (checksums.begin()->offset == eof_pos)); } else { return false; } } bool DataBuffer::checksum_valid(int index) const { if (index < 0) return false; int i = 0; for (std::list::const_iterator itCheckSum = checksums.begin(); itCheckSum != checksums.end(); itCheckSum++) { if (index == i) return itCheckSum->ready; i++; } return false; } const CheckSum* DataBuffer::checksum_object() const { if (checksums.size() != 0) { return checksums.begin()->sum; } else { return NULL; } } const CheckSum* DataBuffer::checksum_object(int index) const { if (index < 0) return NULL; int i = 0; for (std::list::const_iterator itCheckSum = checksums.begin(); itCheckSum != checksums.end(); itCheckSum++) { if (index == i) return itCheckSum->sum; i++; } return NULL; } unsigned int DataBuffer::buffer_size() const { if (bufs == NULL) return 65536; unsigned int size = 0; for (int i = 0; i < bufs_n; i++) { if (size < bufs[i].size) size = bufs[i].size; } return size; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataCallback.h0000644000000000000000000000012412075551565023040 xustar000000000000000027 mtime=1358353269.148218 27 atime=1513200574.540702 30 ctime=1513200659.223737992 nordugrid-arc-5.4.2/src/hed/libs/data/DataCallback.h0000644000175000002070000000266412075551565023115 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATACALLBACK_H__ #define __ARC_DATACALLBACK_H__ namespace Arc { /// Callbacks to be used when there is not enough space on the local filesystem. /** * If DataPoint::StartWriting() tries to pre-allocate disk space but finds * that there is not enough to write the whole file, one of the 'cb' * functions here will be called with the required space passed as a * parameter. Users should define their own subclass of this class depending * on how they wish to free up space. Each callback method should return true * if the space was freed, false otherwise. This subclass should then be used * as a parameter in StartWriting(). * \ingroup data * \headerfile DataCallback.h arc/data/DataCallback.h */ class DataCallback { public: /// Construct a new DataCallback DataCallback() {} /// Empty destructor virtual ~DataCallback() {} /// Callback with int passed as parameter virtual bool cb(int) { return false; } /// Callback with unsigned int passed as parameter virtual bool cb(unsigned int) { return false; } /// Callback with long long int passed as parameter virtual bool cb(long long int) { return false; } /// Callback with unsigned long long int passed as parameter virtual bool cb(unsigned long long int) { return false; } }; } // namespace Arc #endif // __ARC_DATACALLBACK_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataPointDirect.cpp0000644000000000000000000000012412356764260024123 xustar000000000000000027 mtime=1404823728.113156 27 atime=1513200574.570702 30 ctime=1513200659.237738163 nordugrid-arc-5.4.2/src/hed/libs/data/DataPointDirect.cpp0000644000175000002070000001273712356764260024202 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include namespace Arc { DataPointDirect::DataPointDirect(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPoint(url, usercfg, parg), buffer(NULL), bufsize(-1), bufnum(1), local(false), readonly(true), linkable(false), is_secure(false), force_secure(true), force_passive(false), additional_checks(true), allow_out_of_order(false), range_start(0), range_end(0) { std::string optval; optval = url.Option("threads"); if (!optval.empty()) bufnum = stringtoi(optval); if (bufnum < 1) bufnum = 1; if (bufnum > MAX_PARALLEL_STREAMS) bufnum = MAX_PARALLEL_STREAMS; optval = url.Option("blocksize"); if (!optval.empty()) bufsize = stringtoull(optval); if (bufsize > MAX_BLOCK_SIZE) bufsize = MAX_BLOCK_SIZE; readonly = (url.Option("readonly", "yes") == "yes"); } DataPointDirect::~DataPointDirect() {} bool DataPointDirect::IsIndex() const { return false; } bool DataPointDirect::IsStageable() const { return false; } long long int DataPointDirect::BufSize() const { return bufsize; } int DataPointDirect::BufNum() const { return bufnum; } bool DataPointDirect::Local() const { return local; } bool DataPointDirect::ReadOnly() const { return readonly; } void DataPointDirect::ReadOutOfOrder(bool val) { allow_out_of_order = val; } bool DataPointDirect::WriteOutOfOrder() { return false; } void DataPointDirect::SetAdditionalChecks(bool val) { additional_checks = val; } bool DataPointDirect::GetAdditionalChecks() const { return additional_checks; } void DataPointDirect::SetSecure(bool val) { force_secure = val; } bool DataPointDirect::GetSecure() const { return is_secure; } void DataPointDirect::Passive(bool val) { force_passive = val; } void DataPointDirect::Range(unsigned long long int start, unsigned long long int end) { range_start = start; range_end = end; } DataStatus DataPointDirect::Resolve(bool) { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } DataStatus DataPointDirect::Resolve(bool source, const std::list& urls) { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } DataStatus DataPointDirect::PreRegister(bool, bool) { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } DataStatus DataPointDirect::PostRegister(bool) { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } DataStatus DataPointDirect::PreUnregister(bool) { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } DataStatus DataPointDirect::Unregister(bool) { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } bool DataPointDirect::AcceptsMeta() const { return false; } bool DataPointDirect::ProvidesMeta() const { return false; } bool DataPointDirect::Registered() const { return false; } const URL& DataPointDirect::CurrentLocation() const { return url; } const std::string& DataPointDirect::CurrentLocationMetadata() const { static const std::string empty; return empty; } DataPoint* DataPointDirect::CurrentLocationHandle() const { return const_cast (this); } DataStatus DataPointDirect::CompareLocationMetadata() const { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } bool DataPointDirect::NextLocation() { if (triesleft > 0) --triesleft; return (triesleft > 0); } bool DataPointDirect::LocationValid() const { return (triesleft > 0); } bool DataPointDirect::HaveLocations() const { return true; } bool DataPointDirect::LastLocation() { return (triesleft == 1 || triesleft == 0); } DataStatus DataPointDirect::AddLocation(const URL&, const std::string&) { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } DataStatus DataPointDirect::RemoveLocation() { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } DataStatus DataPointDirect::RemoveLocations(const DataPoint&) { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } DataStatus DataPointDirect::ClearLocations() { return DataStatus(DataStatus::NotSupportedForDirectDataPointsError, EOPNOTSUPP); } int DataPointDirect::AddCheckSumObject(CheckSum *cksum) { if(!cksum) return -1; cksum->start(); checksums.push_back(cksum); return checksums.size()-1; } const CheckSum* DataPointDirect::GetCheckSumObject(int index) const { if(index < 0) return NULL; if(index >= checksums.size()) return NULL; for(std::list::const_iterator cksum = checksums.begin(); cksum != checksums.end(); ++cksum) { if(!index) return *cksum; } return NULL; } DataStatus DataPointDirect::Stat(std::list& files, const std::list& urls, DataPointInfoType verb) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/README0000644000000000000000000000012411037472457021260 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200574.569702 30 ctime=1513200659.216737906 nordugrid-arc-5.4.2/src/hed/libs/data/README0000644000175000002070000000023411037472457021324 0ustar00mockbuildmock00000000000000libdata2 library to handle file transfers mostly. It is a modular framework to handle DMC which implements the protocol-specific part of the data transfer. nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/FileCacheHash.cpp0000644000000000000000000000012413124220162023472 xustar000000000000000027 mtime=1498488946.871067 27 atime=1513200574.561702 30 ctime=1513200659.246738273 nordugrid-arc-5.4.2/src/hed/libs/data/FileCacheHash.cpp0000644000175000002070000000262013124220162023537 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "FileCacheHash.h" namespace Arc { int FileCacheHash::MAX_MD5_LENGTH = 32; int FileCacheHash::MAX_SHA1_LENGTH = 40; #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static EVP_MD_CTX* EVP_MD_CTX_new(void) { EVP_MD_CTX* ctx = (EVP_MD_CTX*)std::malloc(sizeof(EVP_MD_CTX)); if(ctx) { EVP_MD_CTX_init(ctx); } return ctx; } static void EVP_MD_CTX_free(EVP_MD_CTX* ctx) { if(ctx) { EVP_MD_CTX_cleanup(ctx); std::free(ctx); } } #endif std::string FileCacheHash::getHash(std::string url) { std::string res(""); /* * example borrowed from http://www.openssl.org/docs/crypto/EVP_DigestInit.html */ EVP_MD_CTX* mdctx = EVP_MD_CTX_new(); if(mdctx) { const EVP_MD *md = EVP_sha1(); // change to EVP_md5() for md5 hashes if(md) { char *mess1 = (char*)url.c_str(); unsigned char md_value[EVP_MAX_MD_SIZE]; unsigned int md_len, i; EVP_DigestInit_ex(mdctx, md, NULL); EVP_DigestUpdate(mdctx, mess1, strlen(mess1)); EVP_DigestFinal_ex(mdctx, md_value, &md_len); char result[3]; for (i = 0; i < md_len; i++) { snprintf(result, 3, "%02x", md_value[i]); res.append(result); } } EVP_MD_CTX_free(mdctx); } return res; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataStatus.h0000644000000000000000000000012412102474175022617 xustar000000000000000027 mtime=1359640701.979832 27 atime=1513200574.570702 30 ctime=1513200659.227738041 nordugrid-arc-5.4.2/src/hed/libs/data/DataStatus.h0000644000175000002070000002747712102474175022705 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATASTATUS_H__ #define __ARC_DATASTATUS_H__ #include #include #include #include #include namespace Arc { #define DataStatusRetryableBase (100) #define DataStatusErrnoBase 1000 #define EARCTRANSFERTIMEOUT (DataStatusErrnoBase + 1) // Transfer timed out #define EARCCHECKSUM (DataStatusErrnoBase + 2) // Checksum mismatch #define EARCLOGIC (DataStatusErrnoBase + 3) // Bad logic, eg calling StartWriting on a // DataPoint currently reading #define EARCRESINVAL (DataStatusErrnoBase + 4) // All results obtained from a service are invalid #define EARCSVCTMP (DataStatusErrnoBase + 5) // Temporary service error #define EARCSVCPERM (DataStatusErrnoBase + 6) // Permanent service error #define EARCUIDSWITCH (DataStatusErrnoBase + 7) // Error switching uid #define EARCREQUESTTIMEOUT (DataStatusErrnoBase + 8) // Request made to remote service timed out #define EARCOTHER (DataStatusErrnoBase + 9) // Other / unknown error #define DataStatusErrnoMax EARCOTHER /// Status code returned by many DataPoint methods. /** * A class to be used for return types of all major data handling methods. * It describes the outcome of the method and contains three fields: * DataStatusType describes in which operation the error occurred, Errno * describes why the error occurred and desc gives more detail if available. * Errno is an integer corresponding to error codes defined in errno.h plus * additional ARC-specific error codes defined here. * * For those DataPoints which natively support errno, it is safe to use code * like * @code * DataStatus s = someMethod(); * if (!s) { * logger.msg(ERROR, "someMethod failed: %s", StrError(errno)); * return DataStatus(DataStatus::ReadError, errno); * } * @endcode * since logger.msg() does not call any system calls that modify errno. * * \ingroup data * \headerfile DataStatus.h arc/data/DataStatus.h */ class DataStatus { public: /// Status codes /** * These codes describe in which operation an error occurred. Retryable * error codes are deprecated - the corresponding non-retryable error code * should be used with errno set to a retryable value. */ enum DataStatusType { // Order is important! Must be kept synchronised with status_string[] /// Operation completed successfully Success, /// Source is bad URL or can't be used due to some reason ReadAcquireError, /// Destination is bad URL or can't be used due to some reason WriteAcquireError, /// Resolving of index service URL for source failed ReadResolveError, /// Resolving of index service URL for destination failed WriteResolveError, /// Can't read from source ReadStartError, /// Can't write to destination WriteStartError, /// Failed while reading from source ReadError, /// Failed while writing to destination WriteError, /// Failed while transfering data (mostly timeout) TransferError, /// Failed while finishing reading from source ReadStopError, /// Failed while finishing writing to destination WriteStopError, /// First stage of registration of index service URL failed PreRegisterError, /// Last stage of registration of index service URL failed PostRegisterError, /// Unregistration of index service URL failed UnregisterError, /// Error in caching procedure CacheError, /// Error due to provided credentials are expired CredentialsExpiredError, /// Error deleting location or URL DeleteError, /// No valid location available NoLocationError, /// No valid location available LocationAlreadyExistsError, /// Operation has no sense for this kind of URL NotSupportedForDirectDataPointsError, /// Feature is unimplemented UnimplementedError, /// DataPoint is already reading IsReadingError, /// DataPoint is already writing IsWritingError, /// Access check failed CheckError, /// Directory listing failed ListError, /// @deprecated ListError with errno set to ENOTDIR should be used instead ListNonDirError, /// File/dir stating failed StatError, /// @deprecated StatError with errno set to ENOENT should be used instead StatNotPresentError, /// Object initialization failed NotInitializedError, /// Error in OS SystemError, /// Staging error StageError, /// Inconsistent metadata InconsistentMetadataError, /// Can't prepare source ReadPrepareError, /// Wait for source to be prepared ReadPrepareWait, /// Can't prepare destination WritePrepareError, /// Wait for destination to be prepared WritePrepareWait, /// Can't finish source ReadFinishError, /// Can't finish destination WriteFinishError, /// Can't create directory CreateDirectoryError, /// Can't rename URL RenameError, /// Data was already cached SuccessCached, /// Operation was cancelled successfully SuccessCancelled, /// General error which doesn't fit any other error GenericError, /// Undefined UnknownError, // These Retryable error codes are deprecated but kept for backwards // compatibility. They will be removed in a future major release. // Instead of these codes the corresponding non-retryable code should be // used with an errno, and this is used to determine whether the error is // retryable. ReadAcquireErrorRetryable = DataStatusRetryableBase+ReadAcquireError, ///< @deprecated WriteAcquireErrorRetryable = DataStatusRetryableBase+WriteAcquireError, ///< @deprecated ReadResolveErrorRetryable = DataStatusRetryableBase+ReadResolveError, ///< @deprecated WriteResolveErrorRetryable = DataStatusRetryableBase+WriteResolveError, ///< @deprecated ReadStartErrorRetryable = DataStatusRetryableBase+ReadStartError, ///< @deprecated WriteStartErrorRetryable = DataStatusRetryableBase+WriteStartError, ///< @deprecated ReadErrorRetryable = DataStatusRetryableBase+ReadError, ///< @deprecated WriteErrorRetryable = DataStatusRetryableBase+WriteError, ///< @deprecated TransferErrorRetryable = DataStatusRetryableBase+TransferError, ///< @deprecated ReadStopErrorRetryable = DataStatusRetryableBase+ReadStopError, ///< @deprecated WriteStopErrorRetryable = DataStatusRetryableBase+WriteStopError, ///< @deprecated PreRegisterErrorRetryable = DataStatusRetryableBase+PreRegisterError, ///< @deprecated PostRegisterErrorRetryable = DataStatusRetryableBase+PostRegisterError, ///< @deprecated UnregisterErrorRetryable = DataStatusRetryableBase+UnregisterError, ///< @deprecated CacheErrorRetryable = DataStatusRetryableBase+CacheError, ///< @deprecated DeleteErrorRetryable = DataStatusRetryableBase+DeleteError, ///< @deprecated CheckErrorRetryable = DataStatusRetryableBase+CheckError, ///< @deprecated ListErrorRetryable = DataStatusRetryableBase+ListError, ///< @deprecated StatErrorRetryable = DataStatusRetryableBase+StatError, ///< @deprecated StageErrorRetryable = DataStatusRetryableBase+StageError, ///< @deprecated ReadPrepareErrorRetryable = DataStatusRetryableBase+ReadPrepareError, ///< @deprecated WritePrepareErrorRetryable = DataStatusRetryableBase+WritePrepareError, ///< @deprecated ReadFinishErrorRetryable = DataStatusRetryableBase+ReadFinishError, ///< @deprecated WriteFinishErrorRetryable = DataStatusRetryableBase+WriteFinishError, ///< @deprecated CreateDirectoryErrorRetryable = DataStatusRetryableBase+CreateDirectoryError, ///< @deprecated RenameErrorRetryable = DataStatusRetryableBase+RenameError, ///< @deprecated GenericErrorRetryable = DataStatusRetryableBase+GenericError ///< @deprecated }; /// Constructor to use when errno-like information is not available. /** * \param status error location * \param desc error description */ DataStatus(const DataStatusType& status, std::string desc="") : status(status), Errno(0), desc(desc) { if (!Passed()) Errno = EARCOTHER; } /// Construct a new DataStatus with errno and optional text description. /** * If the status is an error condition then error_no must be set to a * non-zero value. * \param status error location * \param error_no errno * \param desc error description */ DataStatus(const DataStatusType& status, int error_no, const std::string& desc="") : status(status), Errno(error_no), desc(desc) {} /// Construct a new DataStatus with fields initialised to success states. DataStatus() : status(Success), Errno(0), desc("") {} /// Returns true if this status type matches s. bool operator==(const DataStatusType& s) { return status == s; } /// Returns true if this status type matches the status type of s. bool operator==(const DataStatus& s) { return status == s.status; } /// Returns true if this status type does not match s. bool operator!=(const DataStatusType& s) { return status != s; } /// Returns true if this status type does not match the status type of s. bool operator!=(const DataStatus& s) { return status != s.status; } /// Assignment operator. /** * Sets status type to s and errno to EARCOTHER if s is an error state. */ DataStatus operator=(const DataStatusType& s) { status = s; Errno = 0; if (!Passed()) Errno = EARCOTHER; return *this; } /// Returns true if status type is not a success value. bool operator!() const { return (status != Success) && (status != SuccessCached); } /// Returns true if status type is a success value. operator bool() const { return (status == Success) || (status == SuccessCached); } /// Returns true if no error occurred bool Passed() const { return ((status == Success) || (status == NotSupportedForDirectDataPointsError) || (status == ReadPrepareWait) || (status == WritePrepareWait) || (status == SuccessCached) || (status == SuccessCancelled)); } /// Returns true if the error was temporary and could be retried. /** * Retryable error numbers are EAGAIN, EBUSY, ETIMEDOUT, EARCSVCTMP, * EARCTRANSFERTIMEOUT, EARCCHECKSUM and EARCOTHER. */ bool Retryable() const; /// Set the error number. void SetErrno(int error_no) { Errno = error_no; } /// Get the error number. int GetErrno() const { return Errno; } /// Get text description of the error number. std::string GetStrErrno() const; /// Set a detailed description of the status, removing trailing new line if present. void SetDesc(const std::string& d) { desc = trim(d); } /// Get a detailed description of the status. std::string GetDesc() const { return desc; } /// Returns a human-friendly readable string with all error information. operator std::string(void) const; private: /// status code DataStatusType status; /// error number (values defined in errno.h) int Errno; /// description of failure std::string desc; }; /// Write a human-friendly readable string with all error information to o. /** \ingroup data */ inline std::ostream& operator<<(std::ostream& o, const DataStatus& d) { return (o << ((std::string)d)); } } // namespace Arc #endif // __ARC_DATASTATUS_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/DataPoint.h0000644000000000000000000000012413111074405022416 xustar000000000000000027 mtime=1495562501.418201 27 atime=1513200574.539702 30 ctime=1513200659.217737918 nordugrid-arc-5.4.2/src/hed/libs/data/DataPoint.h0000644000175000002070000010774213111074405022476 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- // Summary page for libarcdata doxygen module // Enclosed in Arc namespace so automatic linking to other classes works namespace Arc { /** * \defgroup data ARC data library (libarcdata) * * libarcdata is a library for access to data on the Grid. It provides a * uniform interface to several types of Grid storage and catalogs using * various protocols. The protocols usable on a given system depend on the * packages installed. The interface can be used to read, write, list, transfer * and delete data to and from storage systems and catalogs. * * The library uses ARC's dynamic plugin mechanism to load plugins for * specific protocols only when required at runtime. These plugins are * called Data Manager Components (DMCs). The DataHandle class takes care of * automatically loading the required DMC at runtime to create a DataPoint object * representing a resource accessible through a given protocol. DataHandle * should always be used instead of DataPoint directly. * * To create a new DMC for a protocol which is not yet supported see the * instruction and examples in the DataPoint class documentation. This * documentation also gives a complete overview of the interface. * * The following protocols are currently supported in standard distributions * of ARC. * * - File (%file://) - Regular local file system. * * - GridFTP (gsiftp://) - GridFTP is essentially the FTP protocol with GSI * security. Regular FTP can also be used. * * - HTTP(S/G) (%http://) - Hypertext Transfer Protocol. HTTP over SSL (HTTPS) * and HTTP over GSI (HTTPG) are also supported. * * - LDAP (ldap://) - Lightweight Directory Access Protocol. LDAP is used in * grids mainly to store information about grid services or resources rather * than to store data itself. * * - SRM (srm://) - The Storage Resource Manager (SRM) protocol allows access * to data distributed across physical storage through a unified namespace * and management interface. * * - XRootd (root://) - Protocol for data access across large scale storage * clusters. More information can be found at http://xrootd.slac.stanford.edu/ * * - ACIX (acix://) - The ARC Cache Index contains locations of cached files * in ARC CE caches. * * DataMover provides a simple high-level interface to copy files. Fine-grained * control over data transfer is shown in the following example: * * \include partial_copy.cpp * * And the same example in python * * \include partial_copy.py */ } // namespace Arc #ifndef __ARC_DATAPOINT_H__ #define __ARC_DATAPOINT_H__ #include #include #include #include #include #include #include #include #include #include #include namespace Arc { class Logger; class DataBuffer; class DataCallback; class XMLNode; class CheckSum; /// A DataPoint represents a data resource and is an abstraction of a URL. /** * DataPoint uses ARC's Plugin mechanism to dynamically load the required * Data Manager Component (DMC) when necessary. A DMC typically defines a * subclass of DataPoint (e.g. DataPointHTTP) and is responsible for a * specific protocol (e.g. http). DataPoints should not be used directly, * instead the DataHandle wrapper class should be used, which automatically * loads the correct DMC. Examples of how to use DataPoint methods are shown * in the DataHandle documentation. * * DataPoint defines methods for access to the data resource. To transfer * data between two DataPoints, DataMover::Transfer() can be used. * * There are two subclasses of DataPoint, DataPointDirect and DataPointIndex. * None of these three classes can be instantiated directly. * DataPointDirect and its subclasses handle "physical" resources through * protocols such as file, http and gsiftp. These classes implement methods * such as StartReading() and StartWriting(). DataPointIndex and its * subclasses handle resources such as indexes and catalogs and implement * methods like Resolve() and PreRegister(). * * When creating a new DMC, a subclass of either DataPointDirect or * DataPointIndex should be created, and the appropriate methods implemented. * DataPoint itself has no direct external dependencies, but plugins may * rely on third-party components. The new DMC must also add itself to the * list of available plugins and provide an Instance() method which returns * a new instance of itself, if the supplied arguments are valid for the * protocol. Here is an example skeleton implementation of a new DMC for * protocol MyProtocol which represents a physical resource accessible through * protocol my:// * \include DataPointMyProtocol.cpp * * \ingroup data * \headerfile DataPoint.h arc/data/DataPoint.h */ class DataPoint : public Plugin { public: /// Callback for use in 3rd party transfer. /** * Will be called periodically during the transfer with the number of bytes * transferred so far. * \param bytes_transferred the number of bytes transferred so far */ typedef void(*Callback3rdParty)(unsigned long long int bytes_transferred); /// Describes the latency to access this URL /** * For now this value is one of a small set specified * by the enumeration. In the future with more sophisticated * protocols or information it could be replaced by a more * fine-grained list of possibilities such as an int value. */ enum DataPointAccessLatency { /// URL can be accessed instantly ACCESS_LATENCY_ZERO, /// URL has low (but non-zero) access latency, for example staged from disk ACCESS_LATENCY_SMALL, /// URL has a large access latency, for example staged from tape ACCESS_LATENCY_LARGE }; /// Describes type of information about URL to request enum DataPointInfoType { INFO_TYPE_MINIMAL = 0, ///< Whatever protocol can get with no additional effort. INFO_TYPE_NAME = 1, ///< Only name of object (relative). INFO_TYPE_TYPE = 2, ///< Type of object - currently file or dir. INFO_TYPE_TIMES = 4, ///< Timestamps associated with object. INFO_TYPE_CONTENT = 8, ///< Metadata describing content, like size, checksum, etc. INFO_TYPE_ACCESS = 16, ///< Access control - ownership, permission, etc. INFO_TYPE_STRUCT = 32, ///< Fine structure - replicas, transfer locations, redirections. INFO_TYPE_REST = 64, ///< All the other parameters. INFO_TYPE_ALL = 127 ///< All the parameters. }; /// Perform third party transfer. /** * Credentials are delegated to the destination and it pulls data from the * source, i.e. data flows directly between source and destination instead * of through the client. A callback function can be supplied to monitor * progress. This method blocks until the transfer is complete. It is * static because third party transfer requires different DMC plugins than * those loaded by DataHandle for the same protocol. The third party * transfer plugins are loaded internally in this method. * \param source Source URL to pull data from * \param destination Destination URL which pulls data to itself * \param usercfg Configuration information * \param callback Optional monitoring callback * \return outcome of transfer */ static DataStatus Transfer3rdParty(const URL& source, const URL& destination, const UserConfig& usercfg, Callback3rdParty callback = NULL); /// Destructor. virtual ~DataPoint(); /// Returns the URL that was passed to the constructor. virtual const URL& GetURL() const; /// Returns the UserConfig that was passed to the constructor. virtual const UserConfig& GetUserConfig() const; /// Assigns new URL. /** * The main purpose of this method is to reuse an existing connection for * accessing a different object on the same server. The DataPoint * implementation does not have to implement this method. If the supplied * URL is not suitable or method is not implemented false is returned. * \param url New URL * \return true if switching to new URL is supported and succeeded */ virtual bool SetURL(const URL& url); /// Returns a string representation of the DataPoint. virtual std::string str() const; /// Is DataPoint valid? virtual operator bool() const; /// Is DataPoint valid? virtual bool operator!() const; /// Prepare DataPoint for reading. /** * This method should be implemented by protocols which require * preparation or staging of physical files for reading. It can act * synchronously or asynchronously (if protocol supports it). In the * first case the method will block until the file is prepared or the * specified timeout has passed. In the second case the method can * return with a ReadPrepareWait status before the file is prepared. * The caller should then wait some time (a hint from the remote service * may be given in wait_time) and call PrepareReading() again to poll for * the preparation status, until the file is prepared. In this case it is * also up to the caller to decide when the request has taken too long * and if so cancel it by calling FinishReading(). * When file preparation has finished, the physical file(s) * to read from can be found from TransferLocations(). * \param timeout If non-zero, this method will block until either the * file has been prepared successfully or the timeout has passed. A zero * value means that the caller would like to call and poll for status. * \param wait_time If timeout is zero (caller would like asynchronous * operation) and ReadPrepareWait is returned, a hint for how long to wait * before a subsequent call may be given in wait_time. * \return Status of the operation */ virtual DataStatus PrepareReading(unsigned int timeout, unsigned int& wait_time); /// Prepare DataPoint for writing. /** * This method should be implemented by protocols which require * preparation of physical files for writing. It can act * synchronously or asynchronously (if protocol supports it). In the * first case the method will block until the file is prepared or the * specified timeout has passed. In the second case the method can * return with a WritePrepareWait status before the file is prepared. * The caller should then wait some time (a hint from the remote service * may be given in wait_time) and call PrepareWriting() again to poll for * the preparation status, until the file is prepared. In this case it is * also up to the caller to decide when the request has taken too long * and if so cancel or abort it by calling FinishWriting(true). * When file preparation has finished, the physical file(s) * to write to can be found from TransferLocations(). * \param timeout If non-zero, this method will block until either the * file has been prepared successfully or the timeout has passed. A zero * value means that the caller would like to call and poll for status. * \param wait_time If timeout is zero (caller would like asynchronous * operation) and WritePrepareWait is returned, a hint for how long to wait * before a subsequent call may be given in wait_time. * \return Status of the operation */ virtual DataStatus PrepareWriting(unsigned int timeout, unsigned int& wait_time); /// Start reading data from URL. /** * A separate thread to transfer data will be created. No other * operation can be performed while reading is in progress. Progress of the * transfer should be followed using the DataBuffer object. * \param buffer operation will use this buffer to put * information into. Should not be destroyed before StopReading() * was called and returned. If StopReading() is not called explicitly * to release buffer it will be released in destructor of DataPoint * which also usually calls StopReading(). * \return success if a thread was successfully started to start reading */ virtual DataStatus StartReading(DataBuffer& buffer) = 0; /// Start writing data to URL. /** * A separate thread to transfer data will be created. No other * operation can be performed while writing is in progress. Progress of the * transfer should be followed using the DataBuffer object. * \param buffer operation will use this buffer to get * information from. Should not be destroyed before StopWriting() * was called and returned. If StopWriting() is not called explicitly * to release buffer it will be released in destructor of DataPoint * which also usually calls StopWriting(). * \param space_cb callback which is called if there is not * enough space to store data. May not implemented for all * protocols. * \return success if a thread was successfully started to start writing */ virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL) = 0; /// Stop reading. /** * Must be called after corresponding StartReading() method, * either after all data is transferred or to cancel transfer. * Use buffer object to find out when data is transferred. * \return outcome of stopping reading (not outcome of transfer itself) */ virtual DataStatus StopReading() = 0; /// Stop writing. /** * Must be called after corresponding StartWriting() method, * either after all data is transferred or to cancel transfer. * Use buffer object to find out when data is transferred. * \return outcome of stopping writing (not outcome of transfer itself) */ virtual DataStatus StopWriting() = 0; /// Finish reading from the URL. /** * Must be called after transfer of physical file has completed if * PrepareReading() was called, to free resources, release requests that * were made during preparation etc. * \param error If true then action is taken depending on the error. * \return success if source was released properly */ virtual DataStatus FinishReading(bool error = false); /// Finish writing to the URL. /** * Must be called after transfer of physical file has completed if * PrepareWriting() was called, to free resources, release requests that * were made during preparation etc. * \param error if true then action is taken depending on the error, for * example cleaning the file from the storage * \return success if destination was released properly */ virtual DataStatus FinishWriting(bool error = false); /// Query the DataPoint to check if object is accessible. /** * If check_meta is true this method will also try to provide meta * information about the object. Note that for many protocols an access * check also provides meta information and so check_meta may have no * effect. * \param check_meta If true then the method will try to retrieve meta data * during the check. * \return success if the object is accessible by the caller. */ virtual DataStatus Check(bool check_meta) = 0; /// Remove/delete object at URL. virtual DataStatus Remove() = 0; /// Retrieve information about this object /** * If the DataPoint represents a directory or something similar, * information about the object itself and not its contents will * be obtained. * \param file will contain object name and requested attributes. * There may be more attributes than requested. There may be less * if object can't provide particular information. * \param verb defines attribute types which method must try to * retrieve. It is not a failure if some attributes could not * be retrieved due to limitation of protocol or access control. * \return success if any information could be retrieved */ virtual DataStatus Stat(FileInfo& file, DataPointInfoType verb = INFO_TYPE_ALL) = 0; /// Retrieve information about several DataPoints. /** * If a DataPoint represents a directory or something similar, * information about the object itself and not its contents will * be obtained. This method can use bulk operations if the protocol * supports it. The protocols and hosts of all the DataPoints in * urls must be the same and the same as this DataPoint's protocol * and host. This method can be called on any of the urls, for * example urls.front()->Stat(files, urls); * Calling this method with an empty list of urls returns success if * the protocol supports bulk Stat, and an error if it does not and this * can be used as a check for bulk support. * \param files will contain objects' names and requested attributes. * There may be more attributes than requested. There may be less * if objects can't provide particular information. The order of this * list matches the order of urls. If a stat of any url fails then * the corresponding FileInfo in this list will evaluate to false. * \param urls list of DataPoints to stat. Protocols and hosts must * match and match this DataPoint's protocol and host. * \param verb defines attribute types which method must try to * retrieve. It is not a failure if some attributes could not * be retrieved due to limitation of protocol or access control. * \return success if any information could be retrieved for any DataPoint */ virtual DataStatus Stat(std::list& files, const std::list& urls, DataPointInfoType verb = INFO_TYPE_ALL) = 0; /// List hierarchical content of this object. /** * If the DataPoint represents a directory or something similar its * contents will be listed and put into files. If the DataPoint is file- * like an error will be returned. * \param files will contain list of file names and requested * attributes. There may be more attributes than requested. There * may be less if object can't provide particular information. * \param verb defines attribute types which method must try to * retrieve. It is not a failure if some attributes could not * be retrieved due to limitation of protocol or access control. * \return success if DataPoint is a directory-like object and could be * listed. */ virtual DataStatus List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL) = 0; /// Create a directory. /** * If the protocol supports it, this method creates the last directory * in the path to the URL. It assumes the last component of the path is a * file-like object and not a directory itself, unless the path ends in a * directory separator. If with_parents is true then all missing parent * directories in the path will also be created. The access control on the * new directories is protocol-specific and may vary depending on protocol. * \param with_parents If true then all missing directories in the path * are created * \return success if the directory was created */ virtual DataStatus CreateDirectory(bool with_parents=false) = 0; /// Rename a URL. /** * This method renames the file or directory specified in the constructor * to the new name specified in newurl. It only performs namespace * operations using the paths of the two URLs and in general ignores any * differences in protocol and host between them. It is assumed that checks * that the URLs are consistent are done by the caller of this method. * This method does not do any data transfer and is only implemented for * protocols which support renaming as an atomic namespace operation. * \param newurl The new name for the URL * \return success if the object was renamed */ virtual DataStatus Rename(const URL& newurl) = 0; /// Allow/disallow DataPoint to read data out of order. /** * If set to true then data may be read from source out of order or in * parallel from multiple threads. For a transfer between two DataPoints * this should only be set to true if WriteOutOfOrder() returns true for * the destination. Only certain protocols support this option. * \param v true if allowed (default is false). */ virtual void ReadOutOfOrder(bool v) = 0; /// Returns true if DataPoint supports receiving data out of order during writing. virtual bool WriteOutOfOrder() = 0; /// Allow/disallow additional checks on a source DataPoint before transfer /** * If set to true, extra checks will be performed in DataMover::Transfer() * before data transfer starts on for example existence of the source file * (and probably other checks too). * \param v true if allowed (default is true). */ virtual void SetAdditionalChecks(bool v) = 0; /// Returns true unless SetAdditionalChecks() was set to false. virtual bool GetAdditionalChecks() const = 0; /// Allow/disallow heavy security (data encryption) during data transfer. /** * \param v true if allowed (default depends on protocol). */ virtual void SetSecure(bool v) = 0; /// Returns true if heavy security during data transfer is allowed. virtual bool GetSecure() const = 0; /// Set passive transfers for FTP-like protocols. /** * \param v true if passive should be used. */ virtual void Passive(bool v) = 0; /// Returns reason of transfer failure, as reported by callbacks. /** * This could be different from the failure returned by the methods themselves. */ virtual DataStatus GetFailureReason(void) const; /// Set range of bytes to retrieve. /** * Default values correspond to whole file. Both start and end bytes are * included in the range, i.e. start - end + 1 bytes will be read. * \param start byte to start from * \param end byte to end at */ virtual void Range(unsigned long long int start = 0, unsigned long long int end = 0) = 0; /// Resolves index service URL into list of ordinary URLs. /** * Also obtains meta information about the file if possible. Resolve should * be called for both source and destination URLs before a transfer. If * source is true an error is returned if the file does not exist. * \param source true if DataPoint object represents source of information. * \return success if DataPoint was successfully resolved */ virtual DataStatus Resolve(bool source) = 0; /// Resolves several index service URLs. /** * Can use bulk calls if protocol allows. The protocols and hosts of all * the DataPoints in urls must be the same and the same as this DataPoint's * protocol and host. This method can be called on any of the urls, for * example urls.front()->Resolve(true, urls); * \param source true if DataPoint objects represent source of information * \param urls List of DataPoints to resolve. Protocols and hosts must * match and match this DataPoint's protocol and host. * \return success if any DataPoint was successfully resolved */ virtual DataStatus Resolve(bool source, const std::list& urls) = 0; /// Returns true if file is registered in indexing service (only known after Resolve()) virtual bool Registered() const = 0; /// Index service pre-registration. /** * This function registers the physical location of a file into an indexing * service. It should be called *before* the actual transfer to that * location happens. * \param replication if true, the file is being replicated between two * locations registered in the indexing service under the same name. * \param force if true, perform registration of a new file even if it * already exists. Should be used to fix failures in indexing service. * \return success if pre-registration succeeded */ virtual DataStatus PreRegister(bool replication, bool force = false) = 0; /// Index service post-registration. /** * Used for same purpose as PreRegister. Should be called after actual * transfer of file successfully finished to finalise registration in an * index service. * \param replication if true, the file is being replicated between two * locations registered in Indexing Service under the same name. * \return success if post-registration succeeded */ virtual DataStatus PostRegister(bool replication) = 0; /// Index service pre-unregistration. /** * Should be called if file transfer failed. It removes changes made * by PreRegister(). * \param replication if true, the file is being replicated between two * locations registered in Indexing Service under the same name. * \return success if pre-unregistration succeeded */ virtual DataStatus PreUnregister(bool replication) = 0; /// Index service unregistration. /** * Remove information about file registered in indexing service. * \param all if true, information about file itself is (LFN) is * removed. Otherwise only particular physical instance in * CurrentLocation() is unregistered. * \return success if unregistration succeeded */ virtual DataStatus Unregister(bool all) = 0; /// Check if meta-information 'size' is available. virtual bool CheckSize() const; /// Set value of meta-information 'size'. virtual void SetSize(const unsigned long long int val); /// Get value of meta-information 'size'. virtual unsigned long long int GetSize() const; /// Check if meta-information 'checksum' is available. virtual bool CheckCheckSum() const; /// Set value of meta-information 'checksum'. virtual void SetCheckSum(const std::string& val); /// Get value of meta-information 'checksum'. virtual const std::string& GetCheckSum() const; /// Default checksum type (varies by protocol) virtual const std::string DefaultCheckSum() const; /// Check if meta-information 'modification time' is available. virtual bool CheckModified() const; /// Set value of meta-information 'modification time'. virtual void SetModified(const Time& val); /// Get value of meta-information 'modification time'. virtual const Time& GetModified() const; /// Check if meta-information 'validity time' is available. virtual bool CheckValid() const; /// Set value of meta-information 'validity time'. virtual void SetValid(const Time& val); /// Get value of meta-information 'validity time'. virtual const Time& GetValid() const; /// Set value of meta-information 'access latency' virtual void SetAccessLatency(const DataPointAccessLatency& latency); /// Get value of meta-information 'access latency' virtual DataPointAccessLatency GetAccessLatency() const; /// Get suggested buffer size for transfers. virtual long long int BufSize() const = 0; /// Get suggested number of buffers for transfers. virtual int BufNum() const = 0; /// Returns true if file is cacheable. virtual bool Cache() const; /// Returns true if file is local, e.g. file:// urls. virtual bool Local() const = 0; /// Returns true if file is readonly. virtual bool ReadOnly() const = 0; /// Returns number of retries left. virtual int GetTries() const; /// Set number of retries. virtual void SetTries(const int n); /// Decrease number of retries left. virtual void NextTry(); /// Returns true if some kind of credentials are needed to use this DataPoint virtual bool RequiresCredentials() const; /// Returns true if credentials must be stored in files. /** * Some protocols require that credentials exist in files (in which case * this method returns true), whereas others can use credentials in memory * (then this method returns false). * \since Added in 4.0.0. */ virtual bool RequiresCredentialsInFile() const; /// Check if URL is an Indexing Service. virtual bool IsIndex() const = 0; /// Check if URL should be staged or queried for Transport URL (TURL) virtual bool IsStageable() const; /// Check if endpoint can have any use from meta information. virtual bool AcceptsMeta() const = 0; /// Check if endpoint can provide at least some meta information directly. virtual bool ProvidesMeta() const = 0; /// Copy meta information from another object. /** * Already defined values are not overwritten. * \param p object from which information is taken. */ virtual void SetMeta(const DataPoint& p); /// Reset meta information to default (undefined) values. /** * \param p object from which information is taken. */ virtual void ResetMeta(); /// Compare meta information from another object. /** * Undefined values are not used for comparison. * \param p object to which to compare. */ virtual bool CompareMeta(const DataPoint& p) const; /// Returns physical file(s) to read/write, if different from CurrentLocation() /** * To be used with protocols which re-direct to different URLs such as * Transport URLs (TURLs). The list is initially filled by PrepareReading * and PrepareWriting. If this list is non-empty then real transfer * should use a URL from this list. It is up to the caller to choose the * best URL and instantiate new DataPoint for handling it. * For consistency protocols which do not require redirections return * original URL. * For protocols which need redirection calling StartReading and StartWriting * will use first URL in the list. */ virtual std::vector TransferLocations() const; /// Clear list of physical file(s) to read/write. /** * To be used with protocols which re-direct to different URLs such as * Transport URLs (TURLs). Has no effect for other protocols. * \since Added in 4.0.0. */ virtual void ClearTransferLocations() {}; /// Returns current (resolved) URL. virtual const URL& CurrentLocation() const = 0; /// Returns meta information used to create current URL. /** * Usage differs between different indexing services. */ virtual const std::string& CurrentLocationMetadata() const = 0; /// Returns a pointer to the DataPoint representing the current location. virtual DataPoint* CurrentLocationHandle() const = 0; /// Compare metadata of DataPoint and current location. /** * \return inconsistency error or error encountered during operation, or * success */ virtual DataStatus CompareLocationMetadata() const = 0; /// Switch to next location in list of URLs. /** * At last location switch to first if number of allowed retries is not * exceeded. * \return false if no retries left. */ virtual bool NextLocation() = 0; /// Returns false no more locations are left and out of retries. virtual bool LocationValid() const = 0; /// Returns true if the current location is the last. virtual bool LastLocation() = 0; /// Returns true if number of resolved URLs is not 0. virtual bool HaveLocations() const = 0; /// Add URL representing physical replica to list of locations. /** * \param url Location URL to add. * \param meta Location meta information. * \return LocationAlreadyExistsError if location already exists, otherwise * success */ virtual DataStatus AddLocation(const URL& url, const std::string& meta) = 0; /// Remove current URL from list virtual DataStatus RemoveLocation() = 0; /// Remove locations present in another DataPoint object virtual DataStatus RemoveLocations(const DataPoint& p) = 0; /// Remove all locations virtual DataStatus ClearLocations() = 0; /// Add a checksum object which will compute checksum during data transfer. /** * \param cksum object which will compute checksum. Should not be destroyed * until DataPointer itself. * \return integer position in the list of checksum objects. */ virtual int AddCheckSumObject(CheckSum *cksum) = 0; /// Get CheckSum object at given position in list virtual const CheckSum* GetCheckSumObject(int index) const = 0; /// Sort locations according to the specified pattern and URLMap. /** * See DataMover::set_preferred_pattern for a more detailed explanation of * pattern matching. Locations present in url_map are preferred over * others. * \param pattern a set of strings, separated by |, to match against. * \param url_map map of URLs to local URLs */ virtual void SortLocations(const std::string& pattern, const URLMap& url_map) = 0; /// Add URL options to this DataPoint's URL object. /** * Invalid options for the specific DataPoint instance will not be added. * \param options map of option, value pairs */ virtual void AddURLOptions(const std::map& options); protected: /// URL supplied in constructor. URL url; /// UserConfig supplied in constructor. const UserConfig usercfg; // attributes /// Size of object represented by DataPoint. unsigned long long int size; /// Checksum of object represented by DataPoint. std::string checksum; /// Modification time of object represented by DataPoint. Time modified; /// Validity time of object represented by DataPoint. Time valid; /// Access latency of object represented by DataPoint. DataPointAccessLatency access_latency; /// Retries left for data transfer. int triesleft; /// Result of data read/write carried out in separate thread. DataStatus failure_code; /* filled by callback methods */ /// Whether this DataPoint is cacheable. bool cache; /// Whether this DataPoint requires staging. bool stageable; /// Valid URL options. Subclasses should add their own specific options to this list. std::set valid_url_options; /// Logger object static Logger logger; /// Constructor. /** * Constructor is protected because DataPoints should not be created * directly. Subclasses should however call this in their constructors to * set various common attributes. * \param url The URL representing the DataPoint * \param usercfg User configuration object * \param parg plugin argument */ DataPoint(const URL& url, const UserConfig& usercfg, PluginArgument* parg); /// Perform third party transfer. /** * This method is protected because the static version should be used * instead to load the correct DMC plugin for third party transfer. * \param source Source URL to pull data from * \param destination Destination URL which pulls data to itself * \param callback Optional monitoring callback * \return outcome of transfer */ virtual DataStatus Transfer3rdParty(const URL& source, const URL& destination, Callback3rdParty callback = NULL); }; /** \cond Class used by DataHandle to load the required DMC. */ class DataPointLoader : public Loader { private: DataPointLoader(); ~DataPointLoader(); DataPoint* load(const URL& url, const UserConfig& usercfg); friend class DataHandle; }; /** \endcond */ /** \cond Class representing the arguments passed to DMC plugins. */ class DataPointPluginArgument : public PluginArgument { public: DataPointPluginArgument(const URL& url, const UserConfig& usercfg) : url(url), usercfg(usercfg) {} ~DataPointPluginArgument() {} operator const URL&() { return url; } operator const UserConfig&() { return usercfg; } private: const URL& url; const UserConfig& usercfg; }; /** \endcond */ } // namespace Arc #endif // __ARC_DATAPOINT_H__ nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/cache-clean.in0000644000000000000000000000012712457662275023064 xustar000000000000000027 mtime=1421829309.071965 30 atime=1513200647.993600643 30 ctime=1513200659.233738114 nordugrid-arc-5.4.2/src/hed/libs/data/cache-clean.in0000755000175000002070000003707012457662275023140 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w # # Copyright 2008 Niklas Edmundsson , # Tomas Ögren , # David Cameron # # Originally developed by Niklas Edmundsson and Tomas Ögren as # cleanbyage. Modified, renamed cache-clean and maintained # for ARC by David Cameron. # # Released under Apache License Version 2.0 # use Sys::Hostname; use File::Find (); use File::Path; use Getopt::Std; use Fcntl ':mode'; use DirHandle; use File::Basename; use strict; use warnings; BEGIN { unshift @INC, dirname($0).'/@pkgdatadir_rel_to_pkglibexecdir@'; } use ConfigCentral; # Set the variable $File::Find::dont_use_nlink if you're using AFS, # since AFS cheats. # for the convenience of &wanted calls, including -eval statements: use vars qw/*name *dir *prune/; *name = *File::Find::name; *dir = *File::Find::dir; *prune = *File::Find::prune; sub wanted; sub debug; sub printsize; sub diskspace; my(%opts); my $configfile; # max/min used percentage my $maxusedpercent = 0; my $minusedpercent = 0; my $expirytime = 0; my %files; my %expiredfiles; my $totsize = 0; my $totlocksize = 0; my $totlockfiles = 0; my $debuglevel = 'INFO'; LogUtils::level($debuglevel); LogUtils::timestamps(1); my $log = LogUtils->getLogger('cache-clean'); my $currenttime = time(); getopts('hsSc:m:M:E:f:D:', \%opts); if(defined($opts{c})) { $configfile = $opts{c}; } if(defined($opts{M})) { $maxusedpercent = $opts{M}; } if(defined($opts{m})) { $minusedpercent = $opts{m}; } if(defined($opts{D})) { $debuglevel = $opts{D}; LogUtils::level($debuglevel); } if(defined($opts{s})) { $debuglevel = 'ERROR'; LogUtils::level($debuglevel); } if($maxusedpercent < 0 || $maxusedpercent > 100) { die "Bad value for -M: $maxusedpercent\n"; } if($minusedpercent < 0 || $minusedpercent > 100) { die "Bad value for -m: $minusedpercent\n"; } if($minusedpercent > $maxusedpercent) { die "-M can't be smaller than -m (now $maxusedpercent/$minusedpercent)\n"; } if(defined($opts{E})) { if ($opts{E} =~ m/^(\d+)(\D)$/) { if ($2 eq 'd') { $expirytime = $1 * 86400; } elsif ($2 eq 'h') { $expirytime = $1 * 3600; } elsif ($2 eq 'm') { $expirytime = $1 * 60; } elsif ($2 eq 's') { $expirytime = $1; } else { die "Bad format in -E option value\n"; } } elsif ($opts{E} =~ m/^\d+$/) { $expirytime = $opts{E}; } else { die "Bad format in -E option value\n"; } } sub usage { print < -M ] [-E N] [-D debug_level] [-f space_command] [ -c | [ [...]] ] -h - This help -s - Statistics mode, show cache usage stats, dont delete anything -S - Calculate cache size rather than using used file system space -c - path to an A-REX config file, xml or ini format -M NN - Maximum usage of file system. When to start cleaning the cache (percent) -m NN - Minimum usage of file system. When to stop cleaning cache (percent) -E N - Delete all files whose access time is older than N. Examples of N are 1800, 90s, 24h, 30d (default is seconds) -f command - Path and optionally arguments to a command which outputs "total_bytes used_bytes" of the file system the cache is on. The cache dir is passed as an argument to this command. -D level - Debug level, FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG. Default is INFO Caches are given by dir1, dir2.. or taken from the config file specified by -c or ARC_CONFIG EOH exit 1; } usage() if(defined($opts{'h'}) || ((!defined($opts{'M'}) || !defined($opts{'m'})) && !defined($opts{'E'}) && !defined($opts{'s'}))); if (!$configfile && $ENV{'ARC_CONFIG'} && -e $ENV{'ARC_CONFIG'}) { $configfile = $ENV{'ARC_CONFIG'}; } $log->info('Cache cleaning started'); my @caches = @ARGV; if (!@caches) { die "No config file found and no caches specified\n" unless $configfile; my $config = ConfigCentral::parseConfig($configfile); die "Failed parsing A-REX config file '$configfile'\n" unless $config; die "No users set up in config file '$configfile'\n" unless $config->{control} and ref $config->{control} eq 'HASH'; for my $control (values %{$config->{control}}) { next unless ref $control eq 'HASH'; next unless $control->{cachedir} and ref $control->{cachedir} eq 'ARRAY'; for (@{$control->{cachedir}}) { $log->warning("\n Warning: cache-clean cannot deal with substitutions - $_") and next if /%/; $log->warning("\n Warning: ignoring malformed cache location - $_\n") and next unless m{^(/\S+)}; push @caches, $1; } } die "No caches found in config file '$configfile'\n" unless @caches; } # ConfigCentral sets debug level to level in conf file, so we have to reset it here LogUtils::level($debuglevel); foreach my $filesystem (@caches) { $filesystem =~ s|/+$|| unless $filesystem eq "/"; next if $filesystem eq ""; if ($filesystem =~ /%/) { $log->warning("$filesystem: Warning: cache-clean cannot deal with substitutions"); next; } if (! -d $filesystem || ! -d $filesystem."/data") { $log->info("$filesystem: Cache is empty"); next; } # follow sym links to real filesystem my $symlinkdest = $filesystem; while ($symlinkdest) { $filesystem = $symlinkdest; $symlinkdest = readlink($symlinkdest); $symlinkdest =~ s|/+$|| if $symlinkdest; } my $fsvalues = diskspace($filesystem); if(!($fsvalues)) { $log->warning("Unable to stat $filesystem"); next; } my $fssize = $fsvalues->{total}; my $fsused = $fsvalues->{used}; my $maxfbytes=$fssize*$maxusedpercent/100; $log->info(join("", "$filesystem: used space ", printsize($fsused), " / ", printsize($fssize), " (", sprintf("%.2f",100*$fsused/$fssize), "%)")); if ($expirytime == 0 && $fsused < $maxfbytes && !$opts{'s'}) { $log->info(join("", "Used space is lower than upper limit (", $maxusedpercent, "%)")); next; } my $minfbytes=$fssize*$minusedpercent/100; %files = (); %expiredfiles = (); $totsize = 0; $totlocksize = 0; $totlockfiles = 0; File::Find::find({wanted => \&wanted}, $filesystem."/data"); if($opts{'s'}) { my %allfiles = (%files, %expiredfiles); print "\nUsage statistics: $filesystem\n"; print "Total deletable files found: ",scalar keys %allfiles," ($totlockfiles files locked or in use)\n"; print "Total size of deletable files found: ",printsize($totsize)," (",printsize($totlocksize)," locked or in use)\n"; print "Used space on file system: ",printsize($fsused)," / ",printsize($fssize), " (",sprintf("%.2f",100*$fsused/$fssize),"%)\n"; my $increment = $totsize / 10; if($increment < 1) { print "Total size too small to show usage histogram\n"; next; } printf "%-21s %-25s %s\n", "At size (% of total)", "Newest file", "Oldest file"; my $nextinc = $increment; my $accumulated = 0; my ($newatime, $lastatime); foreach my $fil (sort { $allfiles{$b}{atime} <=> $allfiles{$a}{atime} } keys %allfiles) { $accumulated += $allfiles{$fil}{size}; if(!$newatime) { $newatime = $allfiles{$fil}{atime}; } if($accumulated > $nextinc) { printf "%-21s %-25s %s\n", printsize($accumulated)." (".int(($accumulated/$totsize)*100)."%)", scalar localtime($newatime), scalar localtime($allfiles{$fil}{atime}); while($nextinc < $accumulated) { $nextinc += $increment; } $newatime = undef; $lastatime = undef; } else { $lastatime = $allfiles{$fil}{atime}; } } printf "%-21s %-25s %s\n", printsize($accumulated)." (100%)", "-", scalar localtime($lastatime) if($lastatime); next; } # remove expired files if ($expirytime > 0) { foreach my $fil (keys %expiredfiles) { next if (-d "$fil"); if (-e "$fil.lock") { my $atime = (stat("$fil.lock"))[8]; next if ( defined $atime && ($currenttime - $atime) <= 86400); unlink "$fil.lock"; } if ( unlink $fil ) { $fsused-=$expiredfiles{$fil}{size}; if (defined($opts{'D'}) && -e "$fil.meta") { open FILE, "$fil.meta"; my @lines = ; close FILE; my @values = split(' ', $lines[0]); $log->verbose(join("", "Deleting expired file: $fil atime: ", scalar localtime($expiredfiles{$fil}{atime}), " size: $expiredfiles{$fil}{size} url: $values[0]")); } else { $log->verbose(join("", "Deleting expired file: $fil atime: ", scalar localtime($expiredfiles{$fil}{atime}), " size: $expiredfiles{$fil}{size}")); } } else { $log->warning("Error deleting file '$fil': $!"); } # not critical if this fails next if (! -e "$fil.meta"); if ( unlink "$fil.meta" ) { my $lastslash = rindex($fil, "/"); if ( rmdir(substr($fil, 0, $lastslash))) { $log->verbose("Deleting directory ".substr($fil, 0, $lastslash)); } } else { $log->warning("Error deleting file '$fil.meta': $!"); } } } #Are we still exceding limit after deleting expired files. if ($fsused > $maxfbytes) { # delete in order of access foreach my $fil (sort { $files{$a}{atime} <=> $files{$b}{atime} } keys %files) { last if $fsused < $minfbytes; next if (-d "$fil"); if (-e "$fil.lock") { my $atime = (stat("$fil.lock"))[8]; next if ( defined $atime && ($currenttime - $atime) <= 86400); unlink "$fil.lock" } if ( unlink $fil ) { $fsused-=$files{$fil}{size}; if (defined($opts{'D'}) && -e "$fil.meta") { open FILE, "$fil.meta"; my $line = ; close FILE; chomp($line); $log->verbose(join("", "Deleting file: $fil atime: ", scalar localtime($files{$fil}{atime}), " size: $files{$fil}{size} url: $line")); } else { $log->verbose(join("","Deleting file: $fil atime: ", scalar localtime($files{$fil}{atime}), " size: $files{$fil}{size}")); } } else { $log->warning("Error deleting file '$fil': $!"); } next if (! -e "$fil.meta"); # not critical if this fails if ( unlink "$fil.meta" ) { my $lastslash = rindex($fil, "/"); if ( rmdir(substr($fil, 0, $lastslash))) { $log->verbose("Deleting directory ".substr($fil, 0, $lastslash)); } } else { $log->warning("Error deleting file '$fil.meta': $!"); } } } $log->info(join("", "Cleaning finished, used space now ", printsize($fsused), " / ", printsize($fssize), " (", sprintf("%.2f",100*$fsused/$fssize),"%)")); } exit 0; sub wanted { return if $name =~ m|\.lock$|; return if $name =~ m|\.meta$|; my ($links, $atime, $blocks); ($links, $atime, $blocks) = (lstat($_))[3,8,12]; return unless defined $atime; return unless !(-d _) || -f _; if ($links != 1) { $totlocksize += 512 * $blocks; $totlockfiles++; return; } if (-e "$name.lock") { # check if lock is still valid my $lockatime = (stat("$name.lock"))[8]; if ( defined $lockatime && ($currenttime - $lockatime) <= 86400) { $totlocksize += 512 * $blocks; $totlockfiles++; return; } } if ($expirytime > 0 && ($currenttime - $atime) >= $expirytime) { $expiredfiles{$name}{atime}=$atime; $expiredfiles{$name}{size}= 512 * $blocks; } else { $files{$name}{atime}=$atime; $files{$name}{size}= 512 * $blocks; } $totsize += 512 * $blocks; } sub printsize($) { my $size = shift; if($size > 1024*1024*1024*1024) { $size = int($size/(1024*1024*1024*1024)); return "$size TB"; } if($size > 1024*1024*1024) { $size = int($size/(1024*1024*1024)); return "$size GB"; } if($size > 1024*1024) { $size = int($size/(1024*1024)); return "$size MB"; } if($size > 1024) { $size = int($size/1024); return "$size kB"; } return $size; } # # Returns disk space (total, used by cache and free) in bytes on a filesystem # Taken from arc1/trunk/src/services/a-rex/infoproviders/HostInfo.pm # Updated to calculate actual use # TODO: Put in common place # sub diskspace ($) { my $path = shift; my ($diskused, $disktotal); if ( -d "$path") { # user-specified tool if (defined($opts{f})) { my $spacecmd = $opts{f}; my @output= `$spacecmd $path`; if ($? != 0) { $log->warning("Failed running $spacecmd"); } elsif ($output[0] =~ m/(\d+) (\d+)/) { $disktotal = $1; $diskused = $2; } else { $log->warning("Bad output from $spacecmd: @output"); } } # check if on afs elsif ($path =~ m#/afs/#) { my @dfstring =`fs listquota $path 2>/dev/null`; if ($? != 0) { $log->warning("Failed running: fs listquota $path"); } elsif ($dfstring[-1] =~ /\s+(\d+)\s+(\d+)\s+\d+%\s+\d+%/) { $disktotal = $1*1024; $diskused = $2*1024; } else { $log->warning("Failed interpreting output of: fs listquota $path"); } # "ordinary" disk } else { my @dfstring =`df -k $path 2>/dev/null`; if ($? != 0) { $log->warning("Failed running: df -k $path"); # The first column may be printed on a separate line. # The relevant numbers are always on the last line. } elsif ($dfstring[-1] =~ /\s+(\d+)\s+(\d+)\s+\d+\s+\d+%\s+\//) { $disktotal = $1*1024; $diskused = $2*1024; } else { $log->warning("Failed interpreting output of: df -k $path"); } } # get actual used disk for caches on shared partitions if configured if (defined($opts{S})) { $diskused = undef; my @dustring =`du -ks $path 2>/dev/null`; if ($? != 0) { $log->warning("Failed running: du -ks $path"); } elsif ($dustring[-1] =~ /(\d+)\s+[\w\/]+/) { $diskused = $1*1024; } else { $log->warning("Failed interpreting output of: du -ks $path"); } } } else { $log->warning("Not a directory: $path"); } return undef unless defined($disktotal) and defined($diskused); return {total => $disktotal, used => $diskused}; } nordugrid-arc-5.4.2/src/hed/libs/data/PaxHeaders.7502/FileInfo.h0000644000000000000000000000012412075551565022245 xustar000000000000000027 mtime=1358353269.148218 27 atime=1513200574.516701 30 ctime=1513200659.225738016 nordugrid-arc-5.4.2/src/hed/libs/data/FileInfo.h0000644000175000002070000001251412075551565022315 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_FILEINFO_H__ #define __ARC_FILEINFO_H__ #include #include #include #include #include namespace Arc { /// FileInfo stores information about files (metadata). /** * Set/Get methods exist for "standard" metadata such as name, size and * modification time, and there is a generic key-value map for * protocol-specific attributes. The Set methods always set the corresponding * entry in the generic map, so there is no need for a caller make two calls, * for example SetSize(1) followed by SetMetaData("size", "1"). * \ingroup data * \headerfile FileInfo.h arc/data/FileInfo.h */ class FileInfo { public: /// Type of file object. enum Type { file_type_unknown = 0, ///< Unknown file_type_file = 1, ///< File-type file_type_dir = 2 ///< Directory-type }; /// Construct a new FileInfo with optional name (file path). FileInfo(const std::string& name = "") : name(name), size((unsigned long long int)(-1)), modified((time_t)(-1)), valid((time_t)(-1)), type(file_type_unknown), latency("") { if (!name.empty()) metadata["name"] = name; } /// Returns the name (file path) of the file. const std::string& GetName() const { return name; } /// Returns the last component of the file name (like the "basename" command). std::string GetLastName() const { std::string::size_type pos = name.rfind('/'); if (pos != std::string::npos) return name.substr(pos + 1); else return name; } /// Set name of the file (file path). void SetName(const std::string& n) { name = n; metadata["name"] = n; } /// Returns the list of file replicas (for index services). const std::list& GetURLs() const { return urls; } /// Add a replica to this file. void AddURL(const URL& u) { urls.push_back(u); } /// Check if file size is known. bool CheckSize() const { return (size != (unsigned long long int)(-1)); } /// Returns file size. unsigned long long int GetSize() const { return size; } /// Set file size. void SetSize(const unsigned long long int s) { size = s; metadata["size"] = tostring(s); } /// Check if checksum is known. bool CheckCheckSum() const { return (!checksum.empty()); } /// Returns checksum. const std::string& GetCheckSum() const { return checksum; } /// Set checksum. void SetCheckSum(const std::string& c) { checksum = c; metadata["checksum"] = c; } /// Check if modified time is known. bool CheckModified() const { return (modified != -1); } /// Returns modified time. Time GetModified() const { return modified; } /// Set modified time. void SetModified(const Time& t) { modified = t; metadata["mtime"] = t.str(); } /// Check if validity time is known. bool CheckValid() const { return (valid != -1); } /// Returns validity time. Time GetValid() const { return valid; } /// Set validity time. void SetValid(const Time& t) { valid = t; metadata["validity"] = t.str(); } /// Check if file type is known. bool CheckType() const { return (type != file_type_unknown); } /// Returns file type. Type GetType() const { return type; } /// Set file type. void SetType(const Type t) { type = t; if (t == file_type_file) metadata["type"] = "file"; else if (t == file_type_dir) metadata["type"] = "dir"; } /// Check if access latency is known. bool CheckLatency() const { return (!latency.empty()); } /// Returns access latency. std::string GetLatency() const { return latency; } /// Set access latency. void SetLatency(const std::string l) { latency = l; metadata["latency"] = l; } /// Returns map of generic metadata. std::map GetMetaData() const { return metadata; } /// Set an attribute of generic metadata. void SetMetaData(const std::string att, const std::string val) { metadata[att] = val; } /// Returns true if this file's name is before f's name alphabetically. bool operator<(const FileInfo& f) const { return (lower(this->name).compare(lower(f.name)) < 0); } /// Returns true if file name is defined. operator bool() const { return !name.empty(); } /// Returns true if file name is not defined. bool operator!() const { return name.empty(); } private: std::string name; std::list urls; // Physical enpoints/URLs. unsigned long long int size; // Size of file in bytes. std::string checksum; // Checksum of file. Time modified; // Creation/modification time. Time valid; // Valid till time. Type type; // File type - usually file_type_file std::string latency; // Access latenct of file (applies to SRM only) std::map metadata; // Generic metadata attribute-value pairs }; } // namespace Arc #endif // __ARC_FILEINFO_H__ nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/loader0000644000000000000000000000013213214316022020637 xustar000000000000000030 mtime=1513200658.946734604 30 atime=1513200668.720854145 30 ctime=1513200658.946734604 nordugrid-arc-5.4.2/src/hed/libs/loader/0000755000175000002070000000000013214316022020762 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602022760 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200598.008989311 30 ctime=1513200658.941734543 nordugrid-arc-5.4.2/src/hed/libs/loader/Makefile.am0000644000175000002070000000204412231165602023022 0ustar00mockbuildmock00000000000000DIST_SUBDIRS = schema test SUBDIRS = schema $(TEST_DIR) lib_LTLIBRARIES = libarcloader.la libarcloader_ladir = $(pkgincludedir)/loader libarcloader_la_HEADERS = Plugin.h Loader.h ModuleManager.h FinderLoader.h libarcloader_la_SOURCES = Plugin.cpp Loader.cpp ModuleManager.cpp FinderLoader.cpp libarcloader_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcloader_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libarcloader_la_LDFLAGS = -version-info 3:0:0 #libtestservice_la_SOURCES = TestService.cpp TestService.h #libtestservice_la_CXXFLAGS = -I$(top_srcdir)/include \ # $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) #noinst_PROGRAMS = test #test_SOURCES = test.cpp #test_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_LDADD = ./libarcloader.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726022773 xustar000000000000000030 mtime=1513200598.060989947 30 atime=1513200648.099601939 30 ctime=1513200658.942734555 nordugrid-arc-5.4.2/src/hed/libs/loader/Makefile.in0000644000175000002070000010543413214315726023050 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/loader DIST_COMMON = README $(libarcloader_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarcloader_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcloader_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libarcloader_la_OBJECTS = libarcloader_la-Plugin.lo \ libarcloader_la-Loader.lo libarcloader_la-ModuleManager.lo \ libarcloader_la-FinderLoader.lo libarcloader_la_OBJECTS = $(am_libarcloader_la_OBJECTS) libarcloader_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcloader_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcloader_la_SOURCES) DIST_SOURCES = $(libarcloader_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive HEADERS = $(libarcloader_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ DIST_SUBDIRS = schema test SUBDIRS = schema $(TEST_DIR) lib_LTLIBRARIES = libarcloader.la libarcloader_ladir = $(pkgincludedir)/loader libarcloader_la_HEADERS = Plugin.h Loader.h ModuleManager.h FinderLoader.h libarcloader_la_SOURCES = Plugin.cpp Loader.cpp ModuleManager.cpp FinderLoader.cpp libarcloader_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcloader_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libarcloader_la_LDFLAGS = -version-info 3:0:0 all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/loader/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/loader/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcloader.la: $(libarcloader_la_OBJECTS) $(libarcloader_la_DEPENDENCIES) $(libarcloader_la_LINK) -rpath $(libdir) $(libarcloader_la_OBJECTS) $(libarcloader_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcloader_la-FinderLoader.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcloader_la-Loader.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcloader_la-ModuleManager.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcloader_la-Plugin.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcloader_la-Plugin.lo: Plugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) -MT libarcloader_la-Plugin.lo -MD -MP -MF $(DEPDIR)/libarcloader_la-Plugin.Tpo -c -o libarcloader_la-Plugin.lo `test -f 'Plugin.cpp' || echo '$(srcdir)/'`Plugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcloader_la-Plugin.Tpo $(DEPDIR)/libarcloader_la-Plugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Plugin.cpp' object='libarcloader_la-Plugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcloader_la-Plugin.lo `test -f 'Plugin.cpp' || echo '$(srcdir)/'`Plugin.cpp libarcloader_la-Loader.lo: Loader.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) -MT libarcloader_la-Loader.lo -MD -MP -MF $(DEPDIR)/libarcloader_la-Loader.Tpo -c -o libarcloader_la-Loader.lo `test -f 'Loader.cpp' || echo '$(srcdir)/'`Loader.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcloader_la-Loader.Tpo $(DEPDIR)/libarcloader_la-Loader.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Loader.cpp' object='libarcloader_la-Loader.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcloader_la-Loader.lo `test -f 'Loader.cpp' || echo '$(srcdir)/'`Loader.cpp libarcloader_la-ModuleManager.lo: ModuleManager.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) -MT libarcloader_la-ModuleManager.lo -MD -MP -MF $(DEPDIR)/libarcloader_la-ModuleManager.Tpo -c -o libarcloader_la-ModuleManager.lo `test -f 'ModuleManager.cpp' || echo '$(srcdir)/'`ModuleManager.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcloader_la-ModuleManager.Tpo $(DEPDIR)/libarcloader_la-ModuleManager.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ModuleManager.cpp' object='libarcloader_la-ModuleManager.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcloader_la-ModuleManager.lo `test -f 'ModuleManager.cpp' || echo '$(srcdir)/'`ModuleManager.cpp libarcloader_la-FinderLoader.lo: FinderLoader.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) -MT libarcloader_la-FinderLoader.lo -MD -MP -MF $(DEPDIR)/libarcloader_la-FinderLoader.Tpo -c -o libarcloader_la-FinderLoader.lo `test -f 'FinderLoader.cpp' || echo '$(srcdir)/'`FinderLoader.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcloader_la-FinderLoader.Tpo $(DEPDIR)/libarcloader_la-FinderLoader.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FinderLoader.cpp' object='libarcloader_la-FinderLoader.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcloader_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcloader_la-FinderLoader.lo `test -f 'FinderLoader.cpp' || echo '$(srcdir)/'`FinderLoader.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcloader_laHEADERS: $(libarcloader_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcloader_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcloader_ladir)" @list='$(libarcloader_la_HEADERS)'; test -n "$(libarcloader_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcloader_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcloader_ladir)" || exit $$?; \ done uninstall-libarcloader_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcloader_la_HEADERS)'; test -n "$(libarcloader_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcloader_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcloader_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarcloader_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcloader_laHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarcloader_laHEADERS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags ctags-recursive \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarcloader_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarcloader_laHEADERS #libtestservice_la_SOURCES = TestService.cpp TestService.h #libtestservice_la_CXXFLAGS = -I$(top_srcdir)/include \ # $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) #noinst_PROGRAMS = test #test_SOURCES = test.cpp #test_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_LDADD = ./libarcloader.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(GLIBMM_LIBS) $(LIBXML2_LIBS) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/ModuleManager.h0000644000000000000000000000012313111754431023613 xustar000000000000000026 mtime=1495783705.79581 27 atime=1513200574.475701 30 ctime=1513200658.940734531 nordugrid-arc-5.4.2/src/hed/libs/loader/ModuleManager.h0000644000175000002070000001677213111754431023676 0ustar00mockbuildmock00000000000000#ifndef __ARC_MODULEMANAGER_H__ #define __ARC_MODULEMANAGER_H__ #include #include #include #include #include #include #include namespace Arc { class ModuleManager; /// If found in loadable module this function is called /// right after module is loaded. It provides functionality /// similar to constructor attribute of GCC, but is independent /// of compiler and gives access to manager and module objects. #define ARC_MODULE_CONSTRUCTOR_NAME __arc_module_constructor__ #define ARC_MODULE_CONSTRUCTOR_SYMB "__arc_module_constructor__" typedef void (*arc_module_constructor_func)(Glib::Module*, ModuleManager*); /// If found in loadable module this function is called /// right before module is unloaded. It provides functionality /// similar to constructor attribute of GCC, but is independent /// of compiler and gives access to manager and module objects. /// If module was set as persistent this function is still called /// although module is not actually unloaded. #define ARC_MODULE_DESTRUCTOR_NAME __arc_module_destructor__ #define ARC_MODULE_DESTRUCTOR_SYMB "__arc_module_destructor__" typedef void (*arc_module_destructor_func)(Glib::Module*, ModuleManager*); /// Manager of shared libraries /** This class loads shared libraries/modules. There supposed to be created one instance of it per executable. In such circumstances it would cache handles to loaded modules and not load them multiple times. But creating multiple instances is not prohibited. Instance of this class handles loading of shared libraries through call to load() method. All loaded libraries are remembered internally and by default are unloaded when instance of this class is destroyed. Sometimes it is not safe to unload library. In such case makePersistent() for this library must be called. Upon first load() of library ModuleManager looks for function called __arc_module_constructor__ and calls it. This makes it possible for library to do some preparations. Currently it is used to make some libraries persistent in memory. Before unloading library from memory __arc_module_destructor__ is called if present. Every loaded library has load counter associated. Each call to load() for specific library increases that counter and unload() decreases it. Library is unloaded when counter reaches zero. When instance of ModuleManager is destroyed all load counters are reset to 0 and libraries are unloaded unless claimed to stay persistent in memory. Each library also has usage counter associated. Those counters are increased and decreased by use() and unuse() methods. This counter is used to claim usage of code provided by loaded library. It is automatically increased and decreased in constructor and destructor of Plugin class. Having non-zero usage counter prevents library from being unloaded. Please note that destructor of ModuleManager waits for all usage counters to reach zero. This is especially useful in multithreaded environments. To avoid dealocks make sure Plugins loaded by instance of ModuleManager are destroyed before destroying ModuleManager or in independent threads. */ class ModuleManager { private: class LoadableModuleDescription { private: Glib::Module* module; int count; int usage_count; void check_unload(ModuleManager* manager); public: LoadableModuleDescription(void); LoadableModuleDescription(Glib::Module* m); LoadableModuleDescription& operator=(Glib::Module* m); LoadableModuleDescription& operator=(const LoadableModuleDescription& d); operator Glib::Module*(void) { return module; }; operator bool(void) { return (module != NULL); }; bool operator!(void) { return (module == NULL); }; bool operator==(Glib::Module* m) { return (module==m); }; void shift(Glib::Module* source, LoadableModuleDescription& target); int load(void); int unload(ModuleManager* manager); int use(void) { ++usage_count; return usage_count; }; int unuse(void) { if(usage_count > 0) --usage_count; //check_unload(); - not unloading code because it is needed at least to do return. return usage_count; }; int usage(void) { return usage_count; }; void makePersistent(void) { if(module) module->make_resident(); }; }; friend class LoadableModuleDescription; typedef std::map plugin_cache_t; typedef std::list plugin_trash_t; Glib::Mutex mlock; static Logger logger; std::list plugin_dir; /** collection of path to directory for modules */ plugin_cache_t plugin_cache; /** Cache of handles of loaded modules */ plugin_trash_t plugin_trash; /** Trash bin of reloaded modules */ ModuleManager(const ModuleManager&) {}; ModuleManager& operator=(const ModuleManager&) { return *this; }; protected: /** Unload module by its identifier. Decreases load counter and unloads module when it reaches 0. */ void unload(Glib::Module* module); public: /** Constructor. It is supposed to process correponding configuration subtree and tune module loading parameters accordingly. */ ModuleManager(XMLNode cfg); ~ModuleManager(); /** Finds module 'name' in cache or loads corresponding loadable module */ Glib::Module* load(const std::string& name,bool probe); /** Finds loadable module by 'name' looking in same places as load() does, but does not load it. */ std::string find(const std::string& name); /** Reload module previously loaded in probe mode. New module is loaded with all symbols resolved and old module handler is unloaded. In case of error old module is not unloaded. */ Glib::Module* reload(Glib::Module* module); /** Increase usage count of loaded module. It is intended to be called by plugins or other code which needs prevent module to be unloaded while its code is running. Must be accompanied by unuse when module is not needed. */ void use(Glib::Module* module); /** Decrease usage count till it reaches 0. This call does not unload module. Usage counter is only for preventing unexpected unload. Unloading is done by unload() methods and by desctructor if usage counter is zero. */ void unuse(Glib::Module* module); /** Finds shared library corresponding to module 'name' and returns path to it */ std::string findLocation(const std::string& name); /** Make sure this module is never unloaded. Even if unload() is called. Call to this method does not affect how other methods arel behaving. Just loaded module stays in memory after all unloading procedures. */ bool makePersistent(Glib::Module* module); /** Make sure this module is never unloaded. Even if unload() is called. */ bool makePersistent(const std::string& name); /** Input the configuration subtree, and trigger the module loading (do almost the same as the Constructor). This method is desigined for ClassLoader to adopt the singleton pattern. */ void setCfg (XMLNode cfg); }; } // namespace Arc #endif /* __ARC_MODULEMANAGER_H__ */ nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/test0000644000000000000000000000013213214316022021616 xustar000000000000000030 mtime=1513200658.988735118 30 atime=1513200668.720854145 30 ctime=1513200658.988735118 nordugrid-arc-5.4.2/src/hed/libs/loader/test/0000755000175000002070000000000013214316022021741 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/loader/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023742 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200598.123990718 30 ctime=1513200658.986735093 nordugrid-arc-5.4.2/src/hed/libs/loader/test/Makefile.am0000644000175000002070000000145312052416515024007 0ustar00mockbuildmock00000000000000TESTS = PluginTest check_LTLIBRARIES = libtestplugin.la check_PROGRAMS = $(TESTS) libtestplugin_la_SOURCES = TestPlugin.cpp libtestplugin_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libtestplugin_la_LIBADD = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libtestplugin_la_LDFLAGS = -no-undefined -avoid-version -module -rpath $(CURDIR) PluginTest_SOURCES = $(top_srcdir)/src/Test.cpp PluginTest.cpp PluginTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) PluginTest_LDADD = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/hed/libs/loader/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726023752 xustar000000000000000030 mtime=1513200598.173991329 30 atime=1513200648.131602331 30 ctime=1513200658.987735105 nordugrid-arc-5.4.2/src/hed/libs/loader/test/Makefile.in0000644000175000002070000007443313214315726024033 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = PluginTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/libs/loader/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__DEPENDENCIES_1 = libtestplugin_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libtestplugin_la_OBJECTS = libtestplugin_la-TestPlugin.lo libtestplugin_la_OBJECTS = $(am_libtestplugin_la_OBJECTS) libtestplugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libtestplugin_la_CXXFLAGS) $(CXXFLAGS) \ $(libtestplugin_la_LDFLAGS) $(LDFLAGS) -o $@ am__EXEEXT_1 = PluginTest$(EXEEXT) am_PluginTest_OBJECTS = PluginTest-Test.$(OBJEXT) \ PluginTest-PluginTest.$(OBJEXT) PluginTest_OBJECTS = $(am_PluginTest_OBJECTS) PluginTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) PluginTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(PluginTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libtestplugin_la_SOURCES) $(PluginTest_SOURCES) DIST_SOURCES = $(libtestplugin_la_SOURCES) $(PluginTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ check_LTLIBRARIES = libtestplugin.la libtestplugin_la_SOURCES = TestPlugin.cpp libtestplugin_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libtestplugin_la_LIBADD = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libtestplugin_la_LDFLAGS = -no-undefined -avoid-version -module -rpath $(CURDIR) PluginTest_SOURCES = $(top_srcdir)/src/Test.cpp PluginTest.cpp PluginTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) PluginTest_LDADD = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/loader/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/loader/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkLTLIBRARIES: -test -z "$(check_LTLIBRARIES)" || rm -f $(check_LTLIBRARIES) @list='$(check_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libtestplugin.la: $(libtestplugin_la_OBJECTS) $(libtestplugin_la_DEPENDENCIES) $(libtestplugin_la_LINK) $(libtestplugin_la_OBJECTS) $(libtestplugin_la_LIBADD) $(LIBS) clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list PluginTest$(EXEEXT): $(PluginTest_OBJECTS) $(PluginTest_DEPENDENCIES) @rm -f PluginTest$(EXEEXT) $(PluginTest_LINK) $(PluginTest_OBJECTS) $(PluginTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/PluginTest-PluginTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/PluginTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libtestplugin_la-TestPlugin.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libtestplugin_la-TestPlugin.lo: TestPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libtestplugin_la_CXXFLAGS) $(CXXFLAGS) -MT libtestplugin_la-TestPlugin.lo -MD -MP -MF $(DEPDIR)/libtestplugin_la-TestPlugin.Tpo -c -o libtestplugin_la-TestPlugin.lo `test -f 'TestPlugin.cpp' || echo '$(srcdir)/'`TestPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libtestplugin_la-TestPlugin.Tpo $(DEPDIR)/libtestplugin_la-TestPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TestPlugin.cpp' object='libtestplugin_la-TestPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libtestplugin_la_CXXFLAGS) $(CXXFLAGS) -c -o libtestplugin_la-TestPlugin.lo `test -f 'TestPlugin.cpp' || echo '$(srcdir)/'`TestPlugin.cpp PluginTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(PluginTest_CXXFLAGS) $(CXXFLAGS) -MT PluginTest-Test.o -MD -MP -MF $(DEPDIR)/PluginTest-Test.Tpo -c -o PluginTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/PluginTest-Test.Tpo $(DEPDIR)/PluginTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='PluginTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(PluginTest_CXXFLAGS) $(CXXFLAGS) -c -o PluginTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp PluginTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(PluginTest_CXXFLAGS) $(CXXFLAGS) -MT PluginTest-Test.obj -MD -MP -MF $(DEPDIR)/PluginTest-Test.Tpo -c -o PluginTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/PluginTest-Test.Tpo $(DEPDIR)/PluginTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='PluginTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(PluginTest_CXXFLAGS) $(CXXFLAGS) -c -o PluginTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` PluginTest-PluginTest.o: PluginTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(PluginTest_CXXFLAGS) $(CXXFLAGS) -MT PluginTest-PluginTest.o -MD -MP -MF $(DEPDIR)/PluginTest-PluginTest.Tpo -c -o PluginTest-PluginTest.o `test -f 'PluginTest.cpp' || echo '$(srcdir)/'`PluginTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/PluginTest-PluginTest.Tpo $(DEPDIR)/PluginTest-PluginTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PluginTest.cpp' object='PluginTest-PluginTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(PluginTest_CXXFLAGS) $(CXXFLAGS) -c -o PluginTest-PluginTest.o `test -f 'PluginTest.cpp' || echo '$(srcdir)/'`PluginTest.cpp PluginTest-PluginTest.obj: PluginTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(PluginTest_CXXFLAGS) $(CXXFLAGS) -MT PluginTest-PluginTest.obj -MD -MP -MF $(DEPDIR)/PluginTest-PluginTest.Tpo -c -o PluginTest-PluginTest.obj `if test -f 'PluginTest.cpp'; then $(CYGPATH_W) 'PluginTest.cpp'; else $(CYGPATH_W) '$(srcdir)/PluginTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/PluginTest-PluginTest.Tpo $(DEPDIR)/PluginTest-PluginTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PluginTest.cpp' object='PluginTest-PluginTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(PluginTest_CXXFLAGS) $(CXXFLAGS) -c -o PluginTest-PluginTest.obj `if test -f 'PluginTest.cpp'; then $(CYGPATH_W) 'PluginTest.cpp'; else $(CYGPATH_W) '$(srcdir)/PluginTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_LTLIBRARIES) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkLTLIBRARIES clean-checkPROGRAMS clean-generic \ clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkLTLIBRARIES clean-checkPROGRAMS clean-generic \ clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/loader/test/PaxHeaders.7502/PluginTest.cpp0000644000000000000000000000012311741501077024506 xustar000000000000000026 mtime=1334215231.35079 27 atime=1513200574.470701 30 ctime=1513200658.988735118 nordugrid-arc-5.4.2/src/hed/libs/loader/test/PluginTest.cpp0000644000175000002070000000217311741501077024557 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include class PluginTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(PluginTest); CPPUNIT_TEST(TestPlugin); CPPUNIT_TEST_SUITE_END(); public: void TestPlugin(); }; class PluginTestLoader: Arc::Loader { public: PluginTestLoader(Arc::XMLNode cfg):Arc::Loader(cfg) { }; Arc::PluginsFactory* factory(void) { return factory_; }; }; void PluginTest::TestPlugin() { std::string config_xml("\ \n\ \n\ \n\ .libs/\n\ \n\ \n\ testplugin\n\ \n\ "); Arc::XMLNode cfg(config_xml); PluginTestLoader loader(cfg); CPPUNIT_ASSERT(loader.factory()); std::string plugin_name = "testplugin"; std::string plugin_kind = "TEST"; Arc::PluginArgument* plugin_arg = NULL; CPPUNIT_ASSERT(loader.factory()->get_instance(plugin_kind,plugin_name,plugin_arg,false)); } CPPUNIT_TEST_SUITE_REGISTRATION(PluginTest); nordugrid-arc-5.4.2/src/hed/libs/loader/test/PaxHeaders.7502/TestPlugin.cpp0000644000000000000000000000012412675602216024513 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.470701 30 ctime=1513200658.987735105 nordugrid-arc-5.4.2/src/hed/libs/loader/test/TestPlugin.cpp0000644000175000002070000000144312675602216024562 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "../Plugin.h" namespace Test { class TestPlugin: Arc::Plugin { public: TestPlugin(Arc::PluginArgument*); ~TestPlugin(); }; TestPlugin::TestPlugin(Arc::PluginArgument* parg): Plugin(parg) { } TestPlugin::~TestPlugin(void) { } Arc::Plugin *get_instance(Arc::PluginArgument* arg) { return (Arc::Plugin *)(new TestPlugin(arg)); } } // namespace Test extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "testplugin", /* name */ "TEST", /* kind */ NULL, /* description */ 0, /* version */ Test::get_instance /* get_instance function */ }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/Plugin.cpp0000644000000000000000000000012413106317607022671 xustar000000000000000027 mtime=1494851463.124269 27 atime=1513200574.466701 30 ctime=1513200658.943734567 nordugrid-arc-5.4.2/src/hed/libs/loader/Plugin.cpp0000644000175000002070000006332513106317607022747 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "Plugin.h" namespace Arc { static std::string strip_newline(const std::string& str) { std::string s(str); std::string::size_type p = 0; while((p=s.find('\r',p)) != std::string::npos) s[p]=' '; p=0; while((p=s.find('\n',p)) != std::string::npos) s[p]=' '; return s; } static bool issane(const char* s) { if(!s) return true; for(unsigned int n = 0;n<2048;++n) { if(!s[n]) return true; } return false; } bool PluginsFactory::modules_t_::add(ModuleDesc* m_i, Glib::Module* m_h, PluginDescriptor* d_h) { if(this->find(m_i->name)) return false; // TODO: too many copying - reduce module_t_ module; module.module = m_h; unsigned int sane_count = 1024; // Loop through plugins in module for(PluginDescriptor* p = d_h;(p->kind) && (p->name) && (p->instance);++p) { if(--sane_count == 0) break; if(!issane(p->kind)) break; if(!issane(p->name)) break; if(!issane(p->description)) break; // Find matching description and prepare object to store descriptor_t_ d; d.desc_m = p; d.desc_i.name = p->name; d.desc_i.kind = p->kind; d.desc_i.version = p->version; if(p->description) d.desc_i.description = p->description; d.desc_i.priority = ARC_PLUGIN_DEFAULT_PRIORITY; for(std::list::iterator pd = m_i->plugins.begin(); pd != m_i->plugins.end();++pd) { if((pd->name == d.desc_i.name) && (pd->kind == d.desc_i.kind) && (pd->version == d.desc_i.version)) { d.desc_i = *pd; break; }; }; // Store obtained description module.plugins.push_back(d); }; // Store new module module_t_& module_r = ((*this)[m_i->name] = module); // Add new descriptions to plugins list sorted by priority for(std::list::iterator p = module_r.plugins.begin(); p != module_r.plugins.end(); ++p) { // Find place std::list< std::pair >::iterator pp = plugins_.begin(); for(; pp != plugins_.end(); ++pp) { if((*p).desc_i.priority > (*pp).first->desc_i.priority) break; }; plugins_.insert(pp,std::pair(&(*p),&module_r)); }; return true; } bool PluginsFactory::modules_t_::remove(PluginsFactory::modules_t_::miterator& module) { // Remove links from descriptors/plugins list for(std::list::iterator p = module->second.plugins.begin(); p != module->second.plugins.end(); ++p) { // Find it for(std::list< std::pair >::iterator pp = plugins_.begin(); pp != plugins_.end(); ++pp) { if((*pp).first == &(*p)) { // or compare by module? plugins_.erase(pp); break; }; }; }; // Remove module itself this->erase(module); module = PluginsFactory::modules_t_::miterator(*this,this->end()); return true; } static PluginDescriptor* find_constructor(PluginDescriptor* desc,const std::string& kind,int min_version,int max_version,int dsize = -1) { if(!desc) return NULL; unsigned int sane_count = 1024; for(;(desc->kind) && (desc->name) && (desc->instance);++desc) { if(dsize == 0) break; if(--sane_count == 0) break; if(!issane(desc->kind)) break; if(!issane(desc->name)) break; if(!issane(desc->description)) break; if((kind == desc->kind) || (kind.empty())) { if((min_version <= desc->version) && (max_version >= desc->version)) { if(desc->instance) return desc; }; }; if(dsize >= 0) --dsize; }; return NULL; } static PluginDescriptor* find_constructor(PluginDescriptor* desc,const std::string& kind,const std::string& name,int min_version,int max_version,int dsize = -1) { if(!desc) return NULL; unsigned int sane_count = 1024; for(;(desc->kind) && (desc->name) && (desc->instance);++desc) { if(dsize == 0) break; if(--sane_count == 0) break; if(!issane(desc->kind)) break; if(!issane(desc->name)) break; if(!issane(desc->description)) break; if(((kind == desc->kind) || (kind.empty())) && ((name == desc->name) || (name.empty()))) { if((min_version <= desc->version) && (max_version >= desc->version)) { if(desc->instance) return desc; }; }; if(dsize >= 0) --dsize; }; return NULL; } // TODO: Merge with ModuleDesc and PluginDesc. That would reduce code size and // make manipulation of *.apd files exposed through API. class ARCModuleDescriptor { private: bool valid; class ARCPluginDescriptor { public: std::string name; std::string kind; std::string description; uint32_t version; uint32_t priority; bool valid; ARCPluginDescriptor(std::ifstream& in):valid(false) { if(!in) return; std::string line; version = 0; priority = ARC_PLUGIN_DEFAULT_PRIORITY; // Protect against insane line length? while(std::getline(in,line)) { line = trim(line); if(line.empty()) break; // end of descripton std::string::size_type p = line.find('='); std::string tag = line.substr(0,p); line.replace(0,p+1,""); line = trim(line); if(line.length() < 2) return; if(line[0] != '"') return; if(line[line.length()-1] != '"') return; line=line.substr(1,line.length()-2); p=0; while((p = line.find('\\',p)) != std::string::npos) { line.replace(p,1,""); ++p; } if(tag == "name") { name = line; } else if(tag == "kind") { kind = line; } else if(tag == "description") { description = line; } else if(tag == "version") { if(!stringto(line,version)) return; } else if(tag == "priority") { if(!stringto(line,priority)) return; } } if(name.empty()) return; if(kind.empty()) return; valid = true; }; }; std::list descriptors; public: ARCModuleDescriptor(std::ifstream& in):valid(false) { if(!in) return; for(;;) { ARCPluginDescriptor plg(in); if(!plg.valid) break; descriptors.push_back(plg); }; valid = true; } operator bool(void) const { return valid; }; bool operator!(void) const { return !valid; }; bool contains(const std::list& kinds) const { if(kinds.size() == 0) return valid; for(std::list::const_iterator kind = kinds.begin(); kind != kinds.end(); ++kind) { if(contains(*kind)) return true; }; return false; } bool contains(const std::string& kind) const { for(std::list::const_iterator desc = descriptors.begin(); desc != descriptors.end(); ++desc) { if(desc->kind == kind) return true; }; return false; }; bool contains(const std::string& kind, const std::string& pname) { for(std::list::const_iterator desc = descriptors.begin(); desc != descriptors.end(); ++desc) { if((desc->name == pname) && (desc->kind == kind)) return true; }; return false; }; void get(std::list& descs) { for(std::list::const_iterator desc = descriptors.begin(); desc != descriptors.end(); ++desc) { PluginDesc pd; pd.name = desc->name; pd.kind = desc->kind; pd.description = desc->description; pd.version = desc->version; pd.priority = desc->priority; descs.push_back(pd); }; }; }; static void replace_file_suffix(std::string& path,const std::string& suffix) { std::string::size_type name_p = path.rfind(G_DIR_SEPARATOR_S); if(name_p == std::string::npos) { name_p = 0; } else { ++name_p; } std::string::size_type suffix_p = path.find('.',name_p); if(suffix_p != std::string::npos) { path.resize(suffix_p); } path += "." + suffix; } // Look for apd file of specified name and extract plugin descriptor static ARCModuleDescriptor* probe_descriptor(std::string name,ModuleManager& manager) { std::string::size_type p = 0; // Replace ':' symbol by safe '_' for(;;) { p=name.find(':',p); if(p == std::string::npos) break; name.replace(p,1,"_"); ++p; }; // Find loadable library file by name std::string path = manager.find(name); if(path.empty()) return NULL; // Check for presece of plugin descriptor in apd file replace_file_suffix(path,"apd"); std::ifstream in(path.c_str()); ARCModuleDescriptor* md = new ARCModuleDescriptor(in); if(!(*md)) { delete md; return NULL; }; return md; } // Try to find and load shared library file by name static Glib::Module* probe_module(std::string name,ModuleManager& manager) { std::string::size_type p = 0; for(;;) { p=name.find(':',p); if(p == std::string::npos) break; name.replace(p,1,"_"); ++p; }; return manager.load(name,true); } inline static Glib::Module* reload_module(Glib::Module* module,ModuleManager& manager) { if(!module) return NULL; return manager.reload(module); } #define unload_module(module,manager) { if(module) unload(module); } // inline static void unload_module(Glib::Module* module,ModuleManager& manager) { // if(!module) return; // manager.unload(module); // } const char* plugins_table_name = ARC_PLUGINS_TABLE_SYMB; Logger PluginsFactory::logger(Logger::rootLogger, "Plugin"); Plugin::Plugin(PluginArgument* arg): factory_(arg?arg->get_factory():NULL),module_(arg?arg->get_module():NULL) { if(factory_ && module_) ((ModuleManager*)factory_)->use(module_); } Plugin::Plugin(const Plugin& obj): factory_(obj.factory_),module_(obj.module_) { if(factory_ && module_) ((ModuleManager*)factory_)->use(module_); } Plugin::~Plugin(void) { if(factory_ && module_) ((ModuleManager*)factory_)->unuse(module_); } Plugin* PluginsFactory::get_instance(const std::string& kind,PluginArgument* arg,bool search) { return get_instance(kind,0,INT_MAX,arg,search); } Plugin* PluginsFactory::get_instance(const std::string& kind,int version,PluginArgument* arg,bool search) { return get_instance(kind,version,version,arg,search); } Plugin* PluginsFactory::get_instance(const std::string& kind,int min_version,int max_version,PluginArgument* arg,bool search) { if(arg) arg->set_factory(this); Glib::Mutex::Lock lock(lock_); modules_t_::diterator d = modules_; for(;d;++d) { PluginDescriptor* desc = (*d).first->desc_m; desc=find_constructor(desc,kind,min_version,max_version,1); if(!desc) continue; // Suitable plugin descriptor is found ... if(arg) { arg->set_module((*d).second->module); }; lock.release(); Plugin* plugin = desc->instance(arg); if(plugin) return plugin; // ... but plugin did not instantiate with specified argument lock.acquire(); }; // Either searching for plugin is enabled if(!search) return NULL; // Looking for file and especially loading library may take // long time. Especially if it involves network operations. // So releasing lock. No opertions on modules_ are allowed // till lock is re-acquired. lock.release(); // Try to load module of plugin // Look for *.apd first by requested plugin kind std::string mname = kind; AutoPointer mdesc(probe_descriptor(mname,*this)); if(mdesc) { if(!mdesc->contains(kind)) return NULL; }; // Descriptor with suitable name not found. // Check if allowed to load executables if(!try_load_) { logger.msg(ERROR, "Could not find loadable module descriptor by name %s",kind); return NULL; }; // Now try to load module directly Glib::Module* module = probe_module(kind,*this); if (module == NULL) { logger.msg(ERROR, "Could not find loadable module by name %s (%s)",kind,strip_newline(Glib::Module::get_last_error())); return NULL; }; // Identify table of descriptors void *ptr = NULL; if(!module->get_symbol(plugins_table_name,ptr)) { logger.msg(VERBOSE, "Module %s is not an ARC plugin (%s)",kind,strip_newline(Glib::Module::get_last_error())); unload_module(module,*this); return NULL; }; // Try to find plugin in new table PluginDescriptor* desc = (PluginDescriptor*)ptr; for(;;) { // Look for plugin descriptor of suitable kind desc=find_constructor(desc,kind,min_version,max_version); if(!desc) break; // Out of descriptors if(arg) arg->set_module(module); Plugin* plugin = desc->instance(arg); if(plugin) { // Plugin instantiated with specified argument. // Keep plugin loaded and registered. Glib::Module* nmodule = reload_module(module,*this); if(!nmodule) { logger.msg(VERBOSE, "Module %s failed to reload (%s)",mname,strip_newline(Glib::Module::get_last_error())); // clean up delete plugin; unload_module(module,*this); return NULL; }; module = NULL; // initial handler is not valid anymore // Re-acqire lock before working with modules_ lock.acquire(); // Make descriptor and register it in the cache ModuleDesc mdesc_i; mdesc_i.name = mname; if(mdesc) mdesc->get(mdesc_i.plugins); // TODO: handle multiple records with same mname. Is it needed? modules_.add(&mdesc_i,nmodule,(PluginDescriptor*)ptr); return plugin; }; // Proceede to next descriptor ++desc; }; // Out of descriptors. Release module and exit. unload_module(module,*this); return NULL; } Plugin* PluginsFactory::get_instance(const std::string& kind,const std::string& name,PluginArgument* arg,bool search) { return get_instance(kind,name,0,INT_MAX,arg,search); } Plugin* PluginsFactory::get_instance(const std::string& kind,const std::string& /* name */,int version,PluginArgument* arg,bool search) { return get_instance(kind,version,version,arg,search); } Plugin* PluginsFactory::get_instance(const std::string& kind,const std::string& name,int min_version,int max_version,PluginArgument* arg,bool search) { if(arg) arg->set_factory(this); Glib::Mutex::Lock lock(lock_); modules_t_::diterator d = modules_; for(;d;++d) { PluginDescriptor* desc = (*d).first->desc_m; desc=find_constructor(desc,kind,name,min_version,max_version,1); if(!desc) continue; if(arg) { arg->set_module((*d).second->module); }; lock.release(); // If both name and kind are supplied no probing is done return desc->instance(arg); }; if(!search) return NULL; // Looking for file and especially loading library may take // long time. Especially if it involves network operations. // So releasing lock. No opertions on modules_ are allowed // till lock is re-acquired. lock.release(); // Try to load module - first by name of plugin std::string mname = name; AutoPointer mdesc(probe_descriptor(mname,*this)); if(mdesc) { if(!mdesc->contains(kind,name)) { logger.msg(ERROR, "Loadable module %s contains no requested plugin %s of kind %s",mname,name,kind); return NULL; }; }; // Descriptor not found or indicates presence of requested kinds. // Now try to load module directly Glib::Module* module = try_load_?probe_module(name,*this):NULL; if (module == NULL) { // Then by kind of plugin mname=kind; mdesc = probe_descriptor(mname,*this); if(mdesc) { if(!mdesc->contains(kind,name)) { logger.msg(ERROR, "Loadable module %s contains no requested plugin %s of kind %s",mname,name,kind); return NULL; }; }; if(!try_load_) { logger.msg(ERROR, "Could not find loadable module descriptor by names %s and %s",name,kind); return NULL; }; // Descriptor not found or indicates presence of requested kinds. // Now try to load module directly module=probe_module(kind,*this); logger.msg(ERROR, "Could not find loadable module by names %s and %s (%s)",name,kind,strip_newline(Glib::Module::get_last_error())); return NULL; }; // Identify table of descriptors void *ptr = NULL; if(!module->get_symbol(plugins_table_name,ptr)) { logger.msg(VERBOSE, "Module %s is not an ARC plugin (%s)",mname,strip_newline(Glib::Module::get_last_error())); unload_module(module,*this); return NULL; }; // Try to find plugin in new table PluginDescriptor* desc = find_constructor((PluginDescriptor*)ptr,kind,name,min_version,max_version); if(desc) { // Keep plugin loaded and registered Glib::Module* nmodule = reload_module(module,*this); if(!nmodule) { logger.msg(VERBOSE, "Module %s failed to reload (%s)",mname,strip_newline(Glib::Module::get_last_error())); unload_module(module,*this); return NULL; }; lock.acquire(); ModuleDesc mdesc_i; mdesc_i.name = mname; if(mdesc) mdesc->get(mdesc_i.plugins); modules_.add(&mdesc_i,nmodule,(PluginDescriptor*)ptr); if(arg) arg->set_module(nmodule); lock.release(); return desc->instance(arg); }; unload_module(module,*this); return NULL; } bool PluginsFactory::load(const std::string& name) { std::list kinds; return load(name,kinds); } bool PluginsFactory::load(const std::string& name,const std::string& kind) { std::list kinds; kinds.push_back(kind); return load(name,kinds); } bool PluginsFactory::load(const std::string& name,const std::string& kind,const std::string& pname) { std::list kinds; std::list pnames; kinds.push_back(kind); pnames.push_back(pname); return load(name,kinds,pnames); } bool PluginsFactory::load(const std::string& name,const std::list& kinds) { std::list pnames; return load(name,kinds,pnames); } bool PluginsFactory::load(const std::string& name,const std::list& kinds,const std::list& /* pnames */) { // In real use-case all combinations of kinds and pnames // have no sense. So normally if both are defined each contains // only one item. if(name.empty()) return false; Glib::Module* module = NULL; PluginDescriptor* desc = NULL; void *ptr = NULL; std::string mname; Glib::Mutex::Lock lock(lock_); // Check if module already loaded modules_t_::miterator m = modules_.find(name); // Releasing lock in order to avoid locking while loading new module. // The iterator stays valid because modules are not unloaded from cache. lock.release(); AutoPointer mdesc; if(m) { desc = m->second.get_table(); if(!desc) return false; } else { // Try to load module by specified name mname = name; // First try to find descriptor of module mdesc = probe_descriptor(mname,*this); if(mdesc) { if(!mdesc->contains(kinds)) { //logger.msg(VERBOSE, "Module %s does not contain plugin(s) of specified kind(s)",mname); return false; }; }; if(!try_load_) { logger.msg(ERROR, "Could not find loadable module descriptor by name %s",name); return false; }; // Descriptor not found or indicates presence of requested kinds. // Now try to load module directly module = probe_module(mname,*this); if (module == NULL) { logger.msg(ERROR, "Could not find loadable module by name %s (%s)",name,strip_newline(Glib::Module::get_last_error())); return false; }; // Identify table of descriptors if(!module->get_symbol(plugins_table_name,ptr)) { logger.msg(VERBOSE, "Module %s is not an ARC plugin (%s)",mname,strip_newline(Glib::Module::get_last_error())); unload_module(module,*this); return false; }; desc = (PluginDescriptor*)ptr; }; if(kinds.size() > 0) { for(std::list::const_iterator kind = kinds.begin(); kind != kinds.end(); ++kind) { if(kind->empty()) continue; desc=find_constructor(desc,*kind,0,INT_MAX); if(desc) break; }; if(!desc) { //logger.msg(VERBOSE, "Module %s does not contain plugin(s) of specified kind(s)",mname); if(module) unload_module(module,*this); return false; }; }; if(!mname.empty()) { // this indicates new module is loaded Glib::Module* nmodule=reload_module(module,*this); if(!nmodule) { logger.msg(VERBOSE, "Module %s failed to reload (%s)",mname,strip_newline(Glib::Module::get_last_error())); unload_module(module,*this); return false; }; // Re-acquire lock before registering new module in cache lock.acquire(); ModuleDesc mdesc_i; mdesc_i.name = mname; if(mdesc) mdesc->get(mdesc_i.plugins); modules_.add(&mdesc_i,nmodule,(PluginDescriptor*)ptr); }; return true; } bool PluginsFactory::load(const std::list& names,const std::string& kind) { std::list kinds; kinds.push_back(kind); return load(names,kinds); } bool PluginsFactory::load(const std::list& names,const std::string& kind,const std::string& pname) { bool r = false; std::list kinds; std::list pnames; kinds.push_back(kind); pnames.push_back(pname); for(std::list::const_iterator name = names.begin(); name != names.end();++name) { if(load(*name,kinds,pnames)) r=true; } return r; } bool PluginsFactory::load(const std::list& names,const std::list& kinds) { bool r = false; for(std::list::const_iterator name = names.begin(); name != names.end();++name) { if(load(*name,kinds)) r=true; } return r; } bool PluginsFactory::scan(const std::string& name, ModuleDesc& desc) { ARCModuleDescriptor* mod = probe_descriptor(name,*this); if(mod) { desc.name = name; mod->get(desc.plugins); delete mod; return true; } // Descriptor not found if(!try_load_) return false; // Now try to load module directly Glib::Module* module = probe_module(name,*this); if (module == NULL) return false; // Identify table of descriptors void *ptr = NULL; if(!module->get_symbol(plugins_table_name,ptr)) { unload_module(module,*this); return false; }; PluginDescriptor* d = (PluginDescriptor*)ptr; if(!d) { unload_module(module,*this); return false; }; unsigned int sane_count = 1024; for(;(d->kind) && (d->name) && (d->instance);++d) { // Checking sanity of record to deal with broken // plugins. if((--sane_count == 0) || (!issane(d->name)) || (!issane(d->kind)) || (!issane(d->description))) { unload_module(module,*this); return false; }; PluginDesc pd; pd.name = d->name; pd.kind = d->kind; if(d->description) pd.description = d->description; pd.version = d->version; desc.plugins.push_back(pd); }; return true; } bool PluginsFactory::scan(const std::list& names, std::list& descs) { bool r = false; for(std::list::const_iterator name = names.begin(); name != names.end();++name) { ModuleDesc desc; if(scan(*name,desc)) { r=true; descs.push_back(desc); } } return r; } void PluginsFactory::report(std::list& descs) { modules_t_::miterator m = modules_; for(;m;++m) { ModuleDesc md; md.name = m->first; for(std::list::iterator d = m->second.plugins.begin(); d != m->second.plugins.end();++m) { md.plugins.push_back(d->desc_i); }; descs.push_back(md); }; } void PluginsFactory::FilterByKind(const std::string& kind, std::list& mdescs) { for (std::list::iterator mdesc = mdescs.begin(); mdesc != mdescs.end();) { for (std::list::iterator pdesc = mdesc->plugins.begin(); pdesc != mdesc->plugins.end();) { if (pdesc->kind != kind) { // Remove plugins from module not of kind. pdesc = mdesc->plugins.erase(pdesc); } else { pdesc++; } } if (mdesc->plugins.empty()) { // If list is empty, remove module. mdesc = mdescs.erase(mdesc); } else { mdesc++; } } } PluginsFactory::PluginsFactory(XMLNode cfg): ModuleManager(cfg), try_load_(true) { } PluginArgument::PluginArgument(void): factory_(NULL), module_(NULL) { } PluginArgument::~PluginArgument(void) { } PluginsFactory* PluginArgument::get_factory(void) { return factory_; } Glib::Module* PluginArgument::get_module(void) { return module_; } void PluginArgument::set_factory(PluginsFactory* factory) { factory_=factory; } void PluginArgument::set_module(Glib::Module* module) { module_=module; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/Loader.cpp0000644000000000000000000000012411330053136022630 xustar000000000000000027 mtime=1264604766.327876 27 atime=1513200574.478701 30 ctime=1513200658.943734567 nordugrid-arc-5.4.2/src/hed/libs/loader/Loader.cpp0000644000175000002070000000164511330053136022703 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "Loader.h" namespace Arc { Logger Loader::logger(Logger::rootLogger, "Loader"); Loader::~Loader(void) { if(factory_) delete factory_; } Loader::Loader(XMLNode cfg) { factory_ = new PluginsFactory(cfg); for(int n = 0;; ++n) { XMLNode cn = cfg.Child(n); if(!cn) break; if(MatchXMLName(cn, "ModuleManager")) { continue; } if(MatchXMLName(cn, "Plugins")) { XMLNode n; for (int i = 0; (n = cn["Name"][i]) != false; i++) { std::string name = (std::string)n; factory_->load(name); } } // Configuration processing is split to multiple functions - hence // ignoring all unknown elements. //logger.msg(WARNING, "Unknown element \"%s\" - ignoring", cn.Name()); } } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/FinderLoader.cpp0000644000000000000000000000012412044527530023767 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200574.464701 30 ctime=1513200658.945734592 nordugrid-arc-5.4.2/src/hed/libs/loader/FinderLoader.cpp0000644000175000002070000000260112044527530024033 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include "Plugin.h" #include "FinderLoader.h" namespace Arc { static inline bool name_is_plugin(std::string& name) { if (name.substr(0, 3) != "lib") return false; std::string::size_type p = name.rfind('.'); if(p == std::string::npos) return false; if(name.substr(p+1) != G_MODULE_SUFFIX) return false; name = name.substr(3, p - 3); return true; } const std::list FinderLoader::GetLibrariesList(void) { BaseConfig basecfg; NS ns; Config cfg(ns); basecfg.MakeConfig(cfg); std::list names; for (XMLNode n = cfg["ModuleManager"]; n; ++n) { for (XMLNode m = n["Path"]; m; ++m) { // Protect against insane configurations... if ((std::string)m == "/usr/lib" || (std::string)m == "/usr/lib64" || (std::string)m == "/usr/bin" || (std::string)m == "/usr/libexec") continue; try { Glib::Dir dir((std::string)m); for (Glib::DirIterator file = dir.begin(); file != dir.end(); file++) { std::string name = *file; if(name_is_plugin(name)) names.push_back(name); } } catch (Glib::FileError&) {} } } return names; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/ModuleManager.cpp0000644000000000000000000000012313111754431024146 xustar000000000000000026 mtime=1495783705.79581 27 atime=1513200574.472701 30 ctime=1513200658.944734579 nordugrid-arc-5.4.2/src/hed/libs/loader/ModuleManager.cpp0000644000175000002070000002576013111754431024226 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include namespace Arc { Logger ModuleManager::logger(Logger::rootLogger, "ModuleManager"); static std::string strip_newline(const std::string& str) { std::string s(str); std::string::size_type p = 0; while((p=s.find('\r',p)) != std::string::npos) s[p]=' '; p=0; while((p=s.find('\n',p)) != std::string::npos) s[p]=' '; return s; } ModuleManager::ModuleManager(XMLNode cfg) { if(!cfg) return; ModuleManager::logger.msg(DEBUG, "Module Manager Init"); if(!MatchXMLName(cfg,"ArcConfig")) return; XMLNode mm = cfg["ModuleManager"]; for (int n = 0;;++n) { XMLNode path = mm.Child(n); if (!path) { break; } if (MatchXMLName(path, "Path")) { plugin_dir.push_back((std::string)path); } } if (plugin_dir.empty()) { plugin_dir = ArcLocation::GetPlugins(); } } ModuleManager::~ModuleManager(void) { Glib::Mutex::Lock lock(mlock); // Try to unload all modules // Remove unloaded plugins from cache for(plugin_cache_t::iterator i = plugin_cache.begin(); i != plugin_cache.end();) { while(i->second.unload(this) > 0) { }; if(i->second) { // module is not unloaded only if it is in use according to usage counter ++i; } else { plugin_cache.erase(i); i = plugin_cache.begin(); // for map erase does not return iterator } } for(plugin_trash_t::iterator i = plugin_trash.begin(); i != plugin_trash.end();) { while(i->unload(this) > 0) { }; if(*i) { ++i; } else { i = plugin_trash.erase(i); } } // exit only when all plugins unloaded if(plugin_cache.empty() && plugin_trash.empty()) return; // otherwise wait for plugins to be released logger.msg(WARNING, "Busy plugins found while unloading Module Manager. Waiting for them to be released."); for(;;) { // wait for plugins to be released lock.release(); sleep(1); lock.acquire(); // Check again // Just in case something called load() - unloading them again for(plugin_cache_t::iterator i = plugin_cache.begin(); i != plugin_cache.end();) { while(i->second.unload(this) > 0) { }; if(i->second) { ++i; } else { plugin_cache.erase(i); i = plugin_cache.begin(); // for map erase does not return iterator } } for(plugin_trash_t::iterator i = plugin_trash.begin(); i != plugin_trash.end();) { while(i->unload(this) > 0) { }; if(*i) { ++i; } else { i = plugin_trash.erase(i); } } if(plugin_cache.empty() && plugin_trash.empty()) return; }; } std::string ModuleManager::findLocation(const std::string& name) { Glib::Mutex::Lock lock(mlock); std::string path; std::list::const_iterator i = plugin_dir.begin(); for (; i != plugin_dir.end(); i++) { path = Glib::Module::build_path(*i, name); // Loader::logger.msg(VERBOSE, "Try load %s", path); FILE *file = fopen(path.c_str(), "r"); if (file == NULL) { continue; } else { fclose(file); break; } } if(i == plugin_dir.end()) path=""; return path; } void ModuleManager::unload(Glib::Module *module) { Glib::Mutex::Lock lock(mlock); for(plugin_cache_t::iterator p = plugin_cache.begin(); p!=plugin_cache.end();++p) { if(p->second == module) { p->second.unload(this); if(!(p->second)) { plugin_cache.erase(p); } return; } } for(plugin_trash_t::iterator p = plugin_trash.begin(); p!=plugin_trash.end();++p) { if(*p == module) { p->unload(NULL); // Do not call destructor for trashed module if(!(*p)) { plugin_trash.erase(p); } return; } } } void ModuleManager::use(Glib::Module *module) { Glib::Mutex::Lock lock(mlock); for(plugin_cache_t::iterator p = plugin_cache.begin(); p!=plugin_cache.end();++p) { if(p->second == module) { p->second.use(); return; } } for(plugin_trash_t::iterator p = plugin_trash.begin(); p!=plugin_trash.end();++p) { if(*p == module) { p->use(); return; } } } void ModuleManager::unuse(Glib::Module *module) { Glib::Mutex::Lock lock(mlock); for(plugin_cache_t::iterator p = plugin_cache.begin(); p!=plugin_cache.end();++p) { if(p->second == module) { p->second.unuse(); if(!(p->second)) { plugin_cache.erase(p); } return; } } for(plugin_trash_t::iterator p = plugin_trash.begin(); p!=plugin_trash.end();++p) { if(*p == module) { p->unuse(); if(!(*p)) { plugin_trash.erase(p); } return; } } } std::string ModuleManager::find(const std::string& name) { return findLocation(name); } Glib::Module* ModuleManager::load(const std::string& name,bool probe) { if (!Glib::Module::get_supported()) { return NULL; } // find name in plugin_cache { Glib::Mutex::Lock lock(mlock); plugin_cache_t::iterator p = plugin_cache.find(name); if (p != plugin_cache.end()) { ModuleManager::logger.msg(DEBUG, "Found %s in cache", name); p->second.load(); return static_cast(p->second); } } std::string path = findLocation(name); if(path.empty()) { ModuleManager::logger.msg(VERBOSE, "Could not locate module %s in following paths:", name); Glib::Mutex::Lock lock(mlock); std::list::const_iterator i = plugin_dir.begin(); for (; i != plugin_dir.end(); i++) { ModuleManager::logger.msg(VERBOSE, "\t%s", *i); } return NULL; }; // race! Glib::Mutex::Lock lock(mlock); Glib::ModuleFlags flags = Glib::ModuleFlags(0); if(probe) flags|=Glib::MODULE_BIND_LAZY; Glib::Module *module = new Glib::Module(path,flags); if ((!module) || (!(*module))) { ModuleManager::logger.msg(ERROR, strip_newline(Glib::Module::get_last_error())); if(module) delete module; return NULL; } ModuleManager::logger.msg(DEBUG, "Loaded %s", path); (plugin_cache[name] = module).load(); void* func = NULL; if(!module->get_symbol(ARC_MODULE_CONSTRUCTOR_SYMB,func)) func = NULL; if(func) { plugin_cache[name].use(); lock.release(); // Avoid deadlock if manager called from module constructor (*(arc_module_constructor_func)func)(module,this); lock.acquire(); plugin_cache[name].unuse(); } return module; } Glib::Module* ModuleManager::reload(Glib::Module* omodule) { Glib::Mutex::Lock lock(mlock); plugin_cache_t::iterator p = plugin_cache.begin(); for(;p!=plugin_cache.end();++p) { if(p->second == omodule) break; } if(p==plugin_cache.end()) return NULL; // TODO: avoid reloading modules which are already properly loaded Glib::ModuleFlags flags = Glib::ModuleFlags(0); //flags|=Glib::MODULE_BIND_LOCAL; Glib::Module *module = new Glib::Module(omodule->get_name(),flags); if ((!module) || (!(*module))) { ModuleManager::logger.msg(ERROR, strip_newline(Glib::Module::get_last_error())); if(module) delete module; return NULL; } // Move existing module into trash list for later removal and // store handle in current entry. // Trashed module keeps load counter. But usage counter stays in cached entry. LoadableModuleDescription trashed; p->second.shift(module, trashed); if (trashed) { plugin_trash.push_back(trashed); } return module; } void ModuleManager::setCfg (XMLNode cfg) { if(!cfg) return; ModuleManager::logger.msg(DEBUG, "Module Manager Init by ModuleManager::setCfg"); if(!MatchXMLName(cfg,"ArcConfig")) return; XMLNode mm = cfg["ModuleManager"]; for (int n = 0;;++n) { XMLNode path = mm.Child(n); if (!path) { break; } if (MatchXMLName(path, "Path")) { Glib::Mutex::Lock lock(mlock); //std::cout<<"Size:"<::const_iterator it; for( it = plugin_dir.begin(); it != plugin_dir.end(); it++){ //std::cout<<(std::string)path<<"*********"<<(*it)<second.makePersistent(); ModuleManager::logger.msg(DEBUG, "%s made persistent", name); return true; } } ModuleManager::logger.msg(DEBUG, "Not found %s in cache", name); return false; } bool ModuleManager::makePersistent(Glib::Module* module) { Glib::Mutex::Lock lock(mlock); for(plugin_cache_t::iterator p = plugin_cache.begin(); p!=plugin_cache.end();++p) { if(p->second == module) { ModuleManager::logger.msg(DEBUG, "%s made persistent", p->first); p->second.makePersistent(); return true; } } ModuleManager::logger.msg(DEBUG, "Specified module not found in cache"); return false; } ModuleManager::LoadableModuleDescription::LoadableModuleDescription(void): module(NULL),count(0),usage_count(0) { } ModuleManager::LoadableModuleDescription::LoadableModuleDescription(Glib::Module* m): module(m),count(0),usage_count(0) { } void ModuleManager::LoadableModuleDescription::check_unload(ModuleManager* manager) { if((count <= 0) && (usage_count <= 0) && module) { void* func = NULL; if(!module->get_symbol(ARC_MODULE_DESTRUCTOR_SYMB,func)) func = NULL; if(func && manager) { use(); manager->mlock.unlock(); (*(arc_module_destructor_func)func)(module,manager); manager->mlock.lock(); unuse(); } delete module; module=NULL; } } ModuleManager::LoadableModuleDescription& ModuleManager::LoadableModuleDescription::operator=(Glib::Module* m) { module=m; return *this; } ModuleManager::LoadableModuleDescription& ModuleManager::LoadableModuleDescription::operator=(const LoadableModuleDescription& d) { module=d.module; count=d.count; usage_count=d.usage_count; return *this; } int ModuleManager::LoadableModuleDescription::load(void) { ++count; return count; } int ModuleManager::LoadableModuleDescription::unload(ModuleManager* manager) { if(count > 0) --count; check_unload(manager); return count; } void ModuleManager::LoadableModuleDescription::shift(Glib::Module* source, LoadableModuleDescription& target) { target.module = module; target.count = count; module = source; count = 0; load(); // accepting new module handler target.unload(NULL); // removing reference taken by new module } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/FinderLoader.h0000644000000000000000000000012411714713616023441 xustar000000000000000027 mtime=1328781198.664475 27 atime=1513200574.473701 30 ctime=1513200658.941734543 nordugrid-arc-5.4.2/src/hed/libs/loader/FinderLoader.h0000644000175000002070000000127711714713616023515 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_FINDERLOADER_H__ #define __ARC_FINDERLOADER_H__ #include #include namespace Arc { // TODO: Remove this class by partially moving its functionality // to PluginsFactory and related classes. That should remove // dependency of plugin loading library on classes managing user // configuration. // This class is fully static. class FinderLoader /* : Loader */ { private: FinderLoader() {} ~FinderLoader() {} public: //static const PluginList GetPluginList(const std::string& kind); static const std::list GetLibrariesList(void); }; } // namespace Arc #endif // __ARC_FINDERLOADER_H__ nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/Loader.h0000644000000000000000000000012412061335603022301 xustar000000000000000027 mtime=1355135875.004913 27 atime=1513200574.464701 30 ctime=1513200658.939734518 nordugrid-arc-5.4.2/src/hed/libs/loader/Loader.h0000644000175000002070000000207112061335603022346 0ustar00mockbuildmock00000000000000#ifndef __ARC_LOADER_H__ #define __ARC_LOADER_H__ #include #include #include #include namespace Arc { /// Plugins loader. /** This class processes XML configration and loads specified plugins. Accepted configuration is defined by XML schema mcc.xsd. "Plugins" elements are parsed by this class and corresponding libraries are loaded. Main functionality is provided by class PluginsFactory. */ class Loader { public: static Logger logger; protected: /** Link to Factory responsible for loading and creation of Plugin and derived objects */ PluginsFactory *factory_; public: Loader() : factory_(NULL) {}; /** Constructor that takes whole XML configuration and performs common configuration part */ Loader(XMLNode cfg); /** Destructor destroys all components created by constructor */ ~Loader(); private: Loader(const Loader&); Loader& operator=(const Loader&) { return *this; }; }; } // namespace Arc #endif /* __ARC_LOADER_H__ */ nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/Plugin.h0000644000000000000000000000012412061335603022331 xustar000000000000000027 mtime=1355135875.004913 27 atime=1513200574.474701 30 ctime=1513200658.938734506 nordugrid-arc-5.4.2/src/hed/libs/loader/Plugin.h0000644000175000002070000003214212061335603022400 0ustar00mockbuildmock00000000000000#ifndef __ARC_PLUGIN_H__ #define __ARC_PLUGIN_H__ #include #include #include #include #include #ifdef HAVE_STDINT_H #include #endif #include #include #include #include namespace Arc { #define ARC_PLUGIN_DEFAULT_PRIORITY (128) class PluginsFactory; /// Base class for passing arguments to loadable ARC components. /** During its creation constructor function of ARC loadable component expects instance of class inherited from this one or wrapped in it. Then dynamic type casting is used for obtaining class of expected kind. */ class PluginArgument { friend class PluginsFactory; private: PluginsFactory* factory_; Glib::Module* module_; void set_factory(PluginsFactory* factory); void set_module(Glib::Module* module); protected: PluginArgument(void); public: virtual ~PluginArgument(void); /// Returns pointer to factory which instantiated plugin. /** Because factory usually destroys/unloads plugins in its destructor it should be safe to keep this pointer inside plugin for later use. But one must always check. */ PluginsFactory* get_factory(void); /// Returns pointer to loadable module/library which contains plugin. /** Corresponding factory keeps list of modules till itself is destroyed. So it should be safe to keep that pointer. But care must be taken if module contains persistent plugins. Such modules stay in memory after factory is detroyed. So it is advisable to use obtained pointer only in constructor function of plugin. */ Glib::Module* get_module(void); }; /// Base class for loadable ARC components. /** All classes representing loadable ARC components must be either descendants of this class or be wrapped by its offspring. */ class Plugin { private: Plugin(void); Plugin& operator=(const Plugin&) { return *this; }; protected: PluginsFactory* factory_; Glib::Module* module_; /// Main constructor for creating new plugin object Plugin(PluginArgument* arg); /// Constructor to be used if plugin want to copy itself Plugin(const Plugin& obj); public: virtual ~Plugin(void); }; /// Name of symbol refering to table of plugins. /** This C null terminated string specifies name of symbol which shared library should export to give an access to an array of PluginDescriptor elements. The array is terminated by element with all components set to NULL. */ extern const char* plugins_table_name; #define ARC_PLUGINS_TABLE_NAME __arc_plugins_table__ #define ARC_PLUGINS_TABLE_SYMB "__arc_plugins_table__" /// Constructor function of ARC lodable component /** This function is called with plugin-specific argument and should produce and return valid instance of plugin. If plugin can't be produced by any reason (for example because passed argument is not applicable) then NULL is returned. No exceptions should be raised. */ typedef Plugin* (*get_plugin_instance)(PluginArgument* arg); /// Description of ARC lodable component typedef struct { const char* name; // Unique name of plugin in scope of its kind const char* kind; // Type/kind of plugin const char* description; // Short description of plugin uint32_t version; // Version of plugin (0 if not applicable) get_plugin_instance instance; // Pointer to constructor function } PluginDescriptor; /// Description of plugin /** This class is used for reports */ class PluginDesc { public: std::string name; std::string kind; std::string description; uint32_t version; uint32_t priority; PluginDesc(void):version(0),priority(ARC_PLUGIN_DEFAULT_PRIORITY) { }; }; /// Description of loadable module /** This class is used for reports */ class ModuleDesc { public: std::string name; std::list plugins; }; /// Generic ARC plugins loader /** The instance of this class provides functionality of loading pluggable ARC components stored in shared libraries. It also makes use of Arc Plugin Description (*.apd) files which contain textual plugin identfiers. Arc Plugin Description files contain attributes describing pluggable components stored in corresponding shared libraries. Using those files allows to save on actually loading and resolving libraries while looking for specific component. Specifically this class uses 'priority' attribute to sort plugin description in internal lists. Please note that priority affects order in which plugins tried in get_instance(...) methods. But it only works for plugins which were already loaded by previous calls to load(...) and get_instance(...) methods. For plugins discovered inside get_instance priority in not effective. This class mostly handles tasks of finding, identifying, fitering and sorting ARC pluggable components. For loading shared libraries it uses functionality of ModuleManager class. So it is important to see documentation of ModuleManager in order to understand how this class works. For more information also please check ARC HED documentation. This class is thread-safe - its methods are protected from simultatneous use from multiple threads. Current thread protection implementation is suboptimal and will be revised in future. */ class PluginsFactory: public ModuleManager { friend class PluginArgument; private: Glib::Mutex lock_; // Combined convenient description of module and // its representation inside module. class descriptor_t_ { public: PluginDesc desc_i; PluginDescriptor* desc_m; }; class module_t_ { public: // Handle of loaded module Glib::Module* module; // List of contained plugins. First one is also // pointer to plugins table. std::list plugins; PluginDescriptor* get_table(void) { return plugins.empty()?NULL:(plugins.front().desc_m); }; }; //typedef std::map modules_t_; // Container for all loaded modules and their plugins class modules_t_: public std::map { public: // convenience iterator for modules class miterator: public std::map::iterator { private: std::map* ref_; public: miterator(std::map& ref, std::map::iterator iter):std::map::iterator(iter),ref_(&ref) { }; operator bool(void) const { return (std::map::iterator&)(*this) != ref_->end(); }; bool operator!(void) const { return !((bool)(*this)); }; }; // class miterator // iterator for accessing plugins by priority class diterator: public std::list< std::pair >::iterator { private: std::list< std::pair >* ref_; public: diterator(std::list< std::pair >& ref, std::list< std::pair >::iterator iter):std::list< std::pair >::iterator(iter),ref_(&ref) { }; operator bool(void) const { return (std::list< std::pair >::iterator&)(*this) != ref_->end(); }; bool operator!(void) const { return !((bool)(*this)); }; }; // class diterator operator miterator(void) { return miterator(*this,this->begin()); }; miterator find(const std::string& name) { return miterator(*this,((std::map*)this)->find(name)); }; operator diterator(void) { return diterator(plugins_,plugins_.begin()); }; bool add(ModuleDesc* m_i, Glib::Module* m_h, PluginDescriptor* d_h); bool remove(miterator& module); private: // Plugins sorted by priority std::list< std::pair > plugins_; // TODO: potentially other sortings like by kind can be useful. }; modules_t_ modules_; bool try_load_; static Logger logger; bool load(const std::string& name,const std::list& kinds,const std::list& pnames); public: /** Constructor - accepts configuration (not yet used) meant to tune loading of modules. */ PluginsFactory(XMLNode cfg); /** Specifies if loadable module may be loaded while looking for analyzing its content. If set to false only *.apd files are checked. Modules without corresponding *.apd will be ignored. Default is true; */ void TryLoad(bool v) { try_load_ = v; }; bool TryLoad(void) { return try_load_; }; /** These methods load module named lib'name', locate plugin constructor functions of specified 'kind' and 'name' (if specified) and call it. Supplied argument affects way plugin instance is created in plugin-specific way. If name of plugin is not specified then all plugins of specified kind are tried with supplied argument till valid instance is created. All loaded plugins are also registered in internal list of this instance of PluginsFactory class. If search is set to false then no attempt is made to find plugins in loadable modules. Only plugins already loaded with previous calls to get_instance() and load() are checked. Returns created instance or NULL if failed. */ Plugin* get_instance(const std::string& kind,PluginArgument* arg,bool search = true); Plugin* get_instance(const std::string& kind,int version,PluginArgument* arg,bool search = true); Plugin* get_instance(const std::string& kind,int min_version,int max_version,PluginArgument* arg,bool search = true); Plugin* get_instance(const std::string& kind,const std::string& name,PluginArgument* arg,bool search = true); Plugin* get_instance(const std::string& kind,const std::string& name,int version,PluginArgument* arg,bool search = true); Plugin* get_instance(const std::string& kind,const std::string& name,int min_version,int max_version,PluginArgument* arg,bool search = true); /** These methods load module named lib'name' and check if it contains ARC plugin(s) of specified 'kind' and 'name'. If there are no specified plugins or module does not contain any ARC plugins it is unloaded. All loaded plugins are also registered in internal list of this instance of PluginsFactory class. Returns true if any plugin was loaded. */ bool load(const std::string& name); bool load(const std::string& name,const std::string& kind); bool load(const std::string& name,const std::string& kind,const std::string& pname); bool load(const std::string& name,const std::list& kinds); bool load(const std::list& names,const std::string& kind); bool load(const std::list& names,const std::string& kind,const std::string& pname); bool load(const std::list& names,const std::list& kinds); /** Collect information about plugins stored in module(s) with specified names. Returns true if any of specified modules has plugins. */ bool scan(const std::string& name, ModuleDesc& desc); bool scan(const std::list& names, std::list& descs); /** Provides information about currently loaded modules and plugins. */ void report(std::list& descs); /** Filter list of modules by kind. */ static void FilterByKind(const std::string& kind, std::list& descs); template P* GetInstance(const std::string& kind,PluginArgument* arg,bool search = true) { Plugin* plugin = get_instance(kind,arg,search); if(!plugin) return NULL; P* p = dynamic_cast(plugin); if(!p) delete plugin; return p; } template P* GetInstance(const std::string& kind,const std::string& name,PluginArgument* arg,bool search = true) { Plugin* plugin = get_instance(kind,name,arg,search); if(!plugin) return NULL; P* p = dynamic_cast(plugin); if(!p) delete plugin; return p; } }; template P* PluginCast(PluginArgument* p) { if(p == NULL) return NULL; P* pp = dynamic_cast(p); if(pp != NULL) return pp; // Workaround for g++ and loadable modules if(strcmp(typeid(P).name(),typeid(*p).name()) != 0) return NULL; return static_cast(p); } template P* PluginCast(Plugin* p) { if(p == NULL) return NULL; P* pp = dynamic_cast(p); if(pp != NULL) return pp; // Workaround for g++ and loadable modules if(strcmp(typeid(P).name(),typeid(*p).name()) != 0) return NULL; return static_cast(p); } } // namespace Arc #endif /* __ARC_PLUGIN_H__ */ nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/schema0000644000000000000000000000013213214316022022077 xustar000000000000000030 mtime=1513200658.966734849 30 atime=1513200668.720854145 30 ctime=1513200658.966734849 nordugrid-arc-5.4.2/src/hed/libs/loader/schema/0000755000175000002070000000000013214316022022222 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/loader/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321024216 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200598.076990143 30 ctime=1513200658.964734824 nordugrid-arc-5.4.2/src/hed/libs/loader/schema/Makefile.am0000644000175000002070000000013711255700321024261 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = loader.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/libs/loader/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726024233 xustar000000000000000030 mtime=1513200598.107990522 30 atime=1513200648.115602135 30 ctime=1513200658.965734836 nordugrid-arc-5.4.2/src/hed/libs/loader/schema/Makefile.in0000644000175000002070000004351713214315726024313 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/loader/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = loader.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/loader/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/loader/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/loader/schema/PaxHeaders.7502/loader.xsd0000644000000000000000000000012411461507513024154 xustar000000000000000027 mtime=1288081227.276958 27 atime=1513200574.471701 30 ctime=1513200658.966734849 nordugrid-arc-5.4.2/src/hed/libs/loader/schema/loader.xsd0000644000175000002070000002145011461507513024223 0ustar00mockbuildmock00000000000000 This element specifies parameters needed to successfully load plugins. Currently it allows to specify filesystem paths to directories where plugin libraries are located. Specify filesystem paths to directories where plugin libraries are located. modulepath This element defines shared library which contains plugins to be used. It is supposed to be used if name of library is not same as name of plugin and hence can't be located automatically. Specify the name of the plugin. This element is not required and does not affect chains directly. It's purpose is to group multiple components logically mostly for readability purpose. This element defines MCC plugin. Required attribute 'name' specifies name of plugin as defined in MCC description. Required attribute 'id' assigns identifier which is used to refer to this element from others. Sub-elements 'next' refer to next components in a chain through their attribute 'id' and their content represent assigned component-specific label. If attribute 'id' is missing all 'next' refer to next component in document. If 'next' is missing one label-less 'next' is assigned automatically. Presence of attribute 'entry' exposes this MCC through Loader class interface to external code. That is meant to be used in code which cretes chains dynamically like client utilities. Rest elements define component-specific configuration. This element is a Plexer. Optional attribute 'id' assigns identifier which is used to refer to this element from others. If not specified it will be assigned to "plexer" automatically. Sub-elements 'next' refer to next components in a chain and their content represent requested endpoints. In Plexer content of element 'next' represents Regular Expression pattern. For every incoming message path part of message's endpoint is matched pattern.In case of ordinary service element 'next' may look like ^/service$ If service is also responsible for whole subtree then simple solution is ^/service But more safer solution would be to use 2 elements ^/service$ ^/service/ Unmatched part of message endpoint is propagated with incoming message in attribute PLEXER:EXTENSION and may be used by service to determine response. This element represents a service - last compnent in a chain. Required attribute 'name' specifies name of plugin as defined in Service description. Required attribute 'id' assigns identifier which is used to refer to this element from others. Rest elements define service-specific configuration. This element specifies security handler plugin to be called at various stages of message processing. Depending on produced result message may be either sent farther through chain or processing would be cancelled. Required attribute 'name' specifies name of plugin. Attibute 'id' creates identifier of SecHandler which may be used to refer to it. If attribute 'refid' is defined then configuration of SecHendler is provided by another element within ArcConfig with corresponding 'id'. Attribute 'event' defines to which queue inside MCC SecHandler to be attached. If it's missing SecHandler is attached to default queue if MCC has such. Names of queues are MCC specific. If not otherwise specified they are 'incoming' and 'outgoing' and are processed for incoming and outgoing messages. There is no default queue by default. nordugrid-arc-5.4.2/src/hed/libs/loader/PaxHeaders.7502/README0000644000000000000000000000012411001653037021577 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200574.464701 30 ctime=1513200658.937734494 nordugrid-arc-5.4.2/src/hed/libs/loader/README0000644000175000002070000000007111001653037021642 0ustar00mockbuildmock00000000000000loader infrastucture. It provides class to load any *MC. nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315724021523 xustar000000000000000030 mtime=1513200596.033965156 30 atime=1513200648.082601731 30 ctime=1513200658.771732464 nordugrid-arc-5.4.2/src/hed/libs/Makefile.in0000644000175000002070000005660213214315724021602 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @XMLSEC_ENABLED_FALSE@XMLSEC_DIR = @XMLSEC_ENABLED_TRUE@XMLSEC_DIR = xmlsec @GLOBUSUTILS_ENABLED_FALSE@GLOBUSUTILS_DIR = @GLOBUSUTILS_ENABLED_TRUE@GLOBUSUTILS_DIR = globusutils # order is important! SUBDIRS = common loader message crypto cryptomod \ credential credentialmod data security \ ws-addressing $(XMLSEC_DIR) ws-security wsrf delegation \ communication compute ws infosys $(GLOBUSUTILS_DIR) \ credentialstore DIST_SUBDIRS = common loader message crypto cryptomod \ credential credentialmod data security \ ws-addressing xmlsec ws-security wsrf delegation \ communication compute ws infosys globusutils credentialstore all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/ws0000644000000000000000000000013213214316023020023 xustar000000000000000030 mtime=1513200659.904746321 30 atime=1513200668.720854145 30 ctime=1513200659.904746321 nordugrid-arc-5.4.2/src/hed/libs/ws/0000755000175000002070000000000013214316023020146 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/ws/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602022143 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200598.961000955 30 ctime=1513200659.903746308 nordugrid-arc-5.4.2/src/hed/libs/ws/Makefile.am0000644000175000002070000000045612231165602022212 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarcws.la libarcws_ladir = $(pkgincludedir) nodist_EXTRA_libarcws_la_SOURCES = dummy.cpp libarcws_la_SOURCES = libarcws_la_LIBADD = $(top_builddir)/src/hed/libs/wsrf/libwsrf.la libarcws_la_LDFLAGS = -version-info 3:0:0 # libwsaddressing is included through libwsrf # stupid libtool nordugrid-arc-5.4.2/src/hed/libs/ws/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727022157 xustar000000000000000030 mtime=1513200599.003001469 30 atime=1513200648.426605939 30 ctime=1513200659.904746321 nordugrid-arc-5.4.2/src/hed/libs/ws/Makefile.in0000644000175000002070000005554413214315727022242 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/ws DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" LTLIBRARIES = $(lib_LTLIBRARIES) libarcws_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/wsrf/libwsrf.la am_libarcws_la_OBJECTS = libarcws_la_OBJECTS = $(am_libarcws_la_OBJECTS) libarcws_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(libarcws_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcws_la_SOURCES) $(nodist_EXTRA_libarcws_la_SOURCES) DIST_SOURCES = $(libarcws_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarcws.la libarcws_ladir = $(pkgincludedir) nodist_EXTRA_libarcws_la_SOURCES = dummy.cpp libarcws_la_SOURCES = libarcws_la_LIBADD = $(top_builddir)/src/hed/libs/wsrf/libwsrf.la libarcws_la_LDFLAGS = -version-info 3:0:0 all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/ws/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/ws/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcws.la: $(libarcws_la_OBJECTS) $(libarcws_la_DEPENDENCIES) $(libarcws_la_LINK) -rpath $(libdir) $(libarcws_la_OBJECTS) $(libarcws_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dummy.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(libdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libLTLIBRARIES install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-libLTLIBRARIES # libwsaddressing is included through libwsrf # stupid libtool # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/ws/PaxHeaders.7502/README0000644000000000000000000000012411016534143020764 xustar000000000000000027 mtime=1211807843.180089 27 atime=1513200574.962707 30 ctime=1513200659.901746284 nordugrid-arc-5.4.2/src/hed/libs/ws/README0000644000175000002070000000006311016534143021030 0ustar00mockbuildmock00000000000000merged library: libwsrf libws-addresing libinfosys nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/communication0000644000000000000000000000013213214316024022240 xustar000000000000000030 mtime=1513200660.174749623 30 atime=1513200668.720854145 30 ctime=1513200660.174749623 nordugrid-arc-5.4.2/src/hed/libs/communication/0000755000175000002070000000000013214316024022363 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/ClientInterface.cpp0000644000000000000000000000012313153453750026072 xustar000000000000000026 mtime=1504597992.39874 27 atime=1513200574.454701 30 ctime=1513200659.725744131 nordugrid-arc-5.4.2/src/hed/libs/communication/ClientInterface.cpp0000644000175000002070000007231513153453750026150 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif // This define is needed to have maximal values for types with fixed size #define __STDC_LIMIT_MACROS #include #include #include #include #include #include "ClientInterface.h" namespace Arc { #define SECURITY_IS_SSL(sec) ( \ (sec == TLSSec) || \ (sec == TLS10Sec) || (sec == TLS11Sec) || (sec == TLS12Sec) || \ (sec == SSL3Sec) || \ (sec == DTLSSec) || (sec == DTLS10Sec) || (sec ==DTLS12Sec) \ ) #define SECURITY_IS_GSI(sec) ( \ (sec == GSISec) || (sec == GSIIOSec) \ ) Logger ClientInterface::logger(Logger::getRootLogger(), "ClientInterface"); static void xml_add_element(XMLNode xml, XMLNode element) { if ((std::string)(element.Attribute("overlay")) != "add") if (element.Size() > 0) { std::string element_name = element.Name(); // FullName ? std::string element_id = (std::string)(element.Attribute("name")); for (XMLNode x = xml[element_name]; (bool)x; x = x[1]) { if (!element_id.empty()) if (element_id != (std::string)(x.Attribute("name"))) continue; for (int n = 0;; ++n) { XMLNode e = element.Child(n); if (!e) break; xml_add_element(x, e); } } return; } xml.NewChild(element, 0); return; } static void xml_add_elements(XMLNode xml, XMLNode elements) { for (int n = 0;; ++n) { XMLNode e = elements.Child(n); if (!e) break; xml_add_element(xml, e); } } static XMLNode ConfigMakeComponent(XMLNode chain, const char *name, const char *id, const char *next = NULL) { XMLNode comp = chain.NewChild("Component"); // Make sure namespaces and names are correct comp.NewAttribute("name") = name; comp.NewAttribute("id") = id; if (next) comp.NewChild("next").NewAttribute("id") = next; return comp; } static XMLNode ConfigFindComponent(XMLNode chain, const char *name, const char *id) { XMLNode comp = chain["Component"]; for (; (bool)comp; ++comp) if ((comp.Attribute("name") == name) && (comp.Attribute("id") == id)) return comp; return XMLNode(); } ClientInterface::ClientInterface(const BaseConfig& cfg) : xmlcfg(NS()), //Need to add all of the configuration namespaces here? loader() { cfg.MakeConfig(xmlcfg); xmlcfg.NewChild("Chain"); cfg.overlay.New(overlay); } ClientInterface::~ClientInterface() { if (loader) delete loader; } MCC_Status ClientInterface::Load() { if (!loader) { if (overlay) Overlay(overlay); loader = new MCCLoader(xmlcfg); } if (!(*loader)) return MCC_Status(GENERIC_ERROR,"COMMUNICATION",loader->failure()); return MCC_Status(STATUS_OK); } void ClientInterface::Overlay(XMLNode cfg) { xml_add_elements(xmlcfg, cfg); } void ClientInterface::AddSecHandler(XMLNode mcccfg, XMLNode handlercfg) { // Insert SecHandler configuration into MCC configuration block // Make sure namespaces and names are correct mcccfg.NewChild(handlercfg).Name("SecHandler"); } void ClientInterface::AddPlugin(XMLNode mcccfg, const std::string& libname, const std::string& libpath) { if (!libpath.empty()) { XMLNode mm = mcccfg["ModuleManager"]; if (!mm) mcccfg.NewChild("ModuleManager", 0); XMLNode mp = mm["Path"]; for (; (bool)mp; ++mp) if (mp == libpath) break; if (!mp) mm.NewChild("Path") = libpath; } if (!libname.empty()) { XMLNode pl = mcccfg["Plugins"]; for (; (bool)pl; ++pl) if (pl["Name"] == libname) break; if (!pl) mcccfg.NewChild("Plugins", 0).NewChild("Name") = libname; } } ClientTCP::ClientTCP(const BaseConfig& cfg, const std::string& host, int port, TCPSec sec, int timeout, bool no_delay) : ClientInterface(cfg), tcp_entry(NULL), tls_entry(NULL) { XMLNode comp = ConfigMakeComponent(xmlcfg["Chain"], "tcp.client", "tcp"); comp.NewAttribute("entry") = "tcp"; comp = comp.NewChild("Connect"); comp.NewChild("Host") = host; comp.NewChild("Port") = tostring(port); if(timeout >= 0) comp.NewChild("Timeout") = tostring(timeout); if(no_delay) comp.NewChild("NoDelay") = "true"; if (SECURITY_IS_SSL(sec.sec)) { comp = ConfigMakeComponent(xmlcfg["Chain"], "tls.client", "tls", "tcp"); if (!cfg.key.empty()) comp.NewChild("KeyPath") = cfg.key; if (!cfg.cert.empty()) comp.NewChild("CertificatePath") = cfg.cert; if (!cfg.proxy.empty()) comp.NewChild("ProxyPath") = cfg.proxy; if (!cfg.credential.empty()) comp.NewChild("Credential") = cfg.credential; if (!cfg.cafile.empty()) comp.NewChild("CACertificatePath") = cfg.cafile; if (!cfg.cadir.empty()) { XMLNode cadir = comp.NewChild("CACertificatesDir"); cadir = cfg.cadir; cadir.NewAttribute("PolicyGlobus") = "true"; }; comp.NewAttribute("entry") = "tls"; if (sec.sec == SSL3Sec) comp.NewChild("Handshake") = "SSLv3"; else if (sec.sec == TLS10Sec) comp.NewChild("Handshake") = "TLSv1.0"; else if (sec.sec == TLS11Sec) comp.NewChild("Handshake") = "TLSv1.1"; else if (sec.sec == TLS12Sec) comp.NewChild("Handshake") = "TLSv1.2"; else if (sec.sec == DTLSSec) comp.NewChild("Handshake") = "DTLS"; else if (sec.sec == DTLS10Sec) comp.NewChild("Handshake") = "DTLSv1.0"; else if (sec.sec == DTLS12Sec) comp.NewChild("Handshake") = "DTLSv1.2"; else comp.NewChild("Handshake") = "TLS"; // also default } else if (SECURITY_IS_GSI(sec.sec)) { comp = ConfigMakeComponent(xmlcfg["Chain"], "tls.client", "gsi", "tcp"); if (!cfg.key.empty()) comp.NewChild("KeyPath") = cfg.key; if (!cfg.cert.empty()) comp.NewChild("CertificatePath") = cfg.cert; if (!cfg.proxy.empty()) comp.NewChild("ProxyPath") = cfg.proxy; if (!cfg.credential.empty()) comp.NewChild("Credential") = cfg.credential; if (!cfg.cafile.empty()) comp.NewChild("CACertificatePath") = cfg.cafile; if (!cfg.cadir.empty()) { XMLNode cadir = comp.NewChild("CACertificatesDir"); cadir = cfg.cadir; cadir.NewAttribute("PolicyGlobus") = "true"; }; if (sec.sec == GSISec) { comp.NewChild("GSI") = "globus"; } else { comp.NewChild("GSI") = "globusio"; } comp.NewAttribute("entry") = "gsi"; } if(sec.sec != NoSec) { if (sec.enc == RequireEnc) { comp.NewChild("Encryption") = "required"; } else if (sec.enc == PreferEnc) { comp.NewChild("Encryption") = "preferred"; } else if (sec.enc == OptionalEnc) { comp.NewChild("Encryption") = "optional"; } else if (sec.enc == NoEnc) { comp.NewChild("Encryption") = "off"; } } } ClientTCP::~ClientTCP() {} MCC_Status ClientTCP::Load() { MCC_Status r(STATUS_OK); if(!(r=ClientInterface::Load())) return r; if (!tls_entry) tls_entry = (*loader)["tls"]; if (!tls_entry) tls_entry = (*loader)["gsi"]; if (!tcp_entry) tcp_entry = (*loader)["tcp"]; if((!tls_entry) && (!tcp_entry)) { return MCC_Status(GENERIC_ERROR,"COMMUNICATION","MCC chain loading produced no entry point"); } return r; } MCC_Status ClientTCP::process(PayloadRawInterface *request, PayloadStreamInterface **response, bool tls) { *response = NULL; MCC_Status r; if (!(r=Load())) return r; if ((tls && !tls_entry) || (!tls && !tcp_entry)) { return MCC_Status(GENERIC_ERROR,"COMMUNICATION","MCC chain loading produced no entry point"); } MessageAttributes attributes_req; MessageAttributes attributes_rep; Message reqmsg; Message repmsg; reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); reqmsg.Payload(request); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); if (tls) { r = tls_entry->process(reqmsg, repmsg); } else { r = tcp_entry->process(reqmsg, repmsg); } if (repmsg.Payload() != NULL) { try { *response = dynamic_cast(repmsg.Payload()); repmsg.Payload(NULL); } catch (std::exception&) { } delete repmsg.Payload(); } return r; } MCC_Status ClientTCP::process(PayloadStreamInterface *request, PayloadStreamInterface **response, bool tls) { *response = NULL; MCC_Status r; if (!(r=Load())) return r; if ((tls && !tls_entry) || (!tls && !tcp_entry)) { return MCC_Status(GENERIC_ERROR,"COMMUNICATION","MCC chain loading produced no entry point"); } MessageAttributes attributes_req; MessageAttributes attributes_rep; Message reqmsg; Message repmsg; reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); reqmsg.Payload(request); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); if (tls) { r = tls_entry->process(reqmsg, repmsg); } else { r = tcp_entry->process(reqmsg, repmsg); } if (repmsg.Payload() != NULL) { try { *response = dynamic_cast(repmsg.Payload()); repmsg.Payload(NULL); } catch (std::exception&) { } delete repmsg.Payload(); } return r; } void ClientTCP::AddSecHandler(XMLNode handlercfg, TCPSec sec, const std::string& libname, const std::string& libpath) { if (SECURITY_IS_SSL(sec.sec)) { ClientInterface::AddSecHandler( ConfigFindComponent(xmlcfg["Chain"], "tls.client", "tls"), handlercfg); } else if (SECURITY_IS_GSI(sec.sec)) { ClientInterface::AddSecHandler( ConfigFindComponent(xmlcfg["Chain"], "tls.client", "gsi"), handlercfg); } else { ClientInterface::AddSecHandler( ConfigFindComponent(xmlcfg["Chain"], "tcp.client", "tcp"), handlercfg); } for (XMLNode pl = handlercfg["Plugins"]; (bool)pl; ++pl) { AddPlugin(xmlcfg, pl["Name"]); } AddPlugin(xmlcfg, libname, libpath); } static std::string get_http_proxy(const URL& url) { if(url.Protocol() == "http") return GetEnv("ARC_HTTP_PROXY"); if(url.Protocol() == "https") return GetEnv("ARC_HTTPS_PROXY"); if(url.Protocol() == "httpg") return GetEnv("ARC_HTTPG_PROXY"); return ""; } static std::string get_http_proxy_host(const URL& url, const std::string& proxy_host, int /* proxy_port */) { if(!proxy_host.empty()) return proxy_host; std::string proxy = get_http_proxy(url); if(proxy.empty()) return url.Host(); std::string::size_type p = proxy.find(':'); if(p != std::string::npos) proxy.resize(p); return proxy; } static int get_http_proxy_port(const URL& url, const std::string& proxy_host, int proxy_port) { int port = 0; if(!proxy_host.empty()) { port = proxy_port; } else { std::string proxy = get_http_proxy(url); if(proxy.empty()) return url.Port(); std::string::size_type p = proxy.find(':'); if(p != std::string::npos) stringto(proxy.substr(p+1),port); } if(port == 0) { if(url.Protocol() == "http") port=HTTP_DEFAULT_PORT; else if(url.Protocol() == "https") port=HTTPS_DEFAULT_PORT; else if(url.Protocol() == "httpg") port=HTTPG_DEFAULT_PORT; } return port; } static TCPSec http_url_to_sec(const URL& url) { TCPSec sec; if(url.Protocol() == "https" || url.Protocol() == "davs") { if(url.Option("protocol") == "ssl3") { // TODO: Other options? sec.sec = SSL3Sec; } else { sec.sec = TLSSec; } } else if(url.Protocol() == "httpg") { if(url.Option("protocol") == "gsi") { sec.sec = GSIIOSec; } else { sec.sec = GSISec; } } else { sec.sec = NoSec; sec.enc = NoEnc; return sec; } sec.enc = RequireEnc; if(url.Option("encryption") == "required") { sec.enc = RequireEnc; } else if(url.Option("encryption") == "preferred") { sec.enc = PreferEnc; } else if(url.Option("encryption") == "optional") { sec.enc = OptionalEnc; } else if(url.Option("encryption") == "off") { sec.enc = NoEnc; } return sec; } // ------------------------------------------------------------------------- ClientHTTP::ClientHTTP(const BaseConfig& cfg, const URL& url, int timeout, const std::string& proxy_host, int proxy_port) : ClientTCP(cfg, get_http_proxy_host(url,proxy_host,proxy_port), get_http_proxy_port(url,proxy_host,proxy_port), http_url_to_sec(url), timeout, url.Option("tcpnodelay") == "yes"), http_entry(NULL), default_url(url), relative_uri(url.Option("relativeuri") == "yes"), sec(http_url_to_sec(url)), closed(false) { XMLNode comp = ConfigMakeComponent(xmlcfg["Chain"], "http.client", "http", (SECURITY_IS_SSL(sec.sec)) ? "tls" : (SECURITY_IS_GSI(sec.sec)) ? "gsi" : "tcp"); comp.NewAttribute("entry") = "http"; comp.NewChild("Method") = "POST"; // Override using attributes if needed comp.NewChild("Endpoint") = url.str(true); // Override using attributes if needed } ClientHTTP::~ClientHTTP() {} MCC_Status ClientHTTP::Load() { MCC_Status r(STATUS_OK); if(!(r=ClientTCP::Load())) return r; if (!http_entry) http_entry = (*loader)["http"]; if (!http_entry) { return MCC_Status(GENERIC_ERROR,"COMMUNICATION","MCC chain loading produced no entry point"); } return r; } MCC_Status ClientHTTP::process(const std::string& method, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response) { std::multimap attributes; return process(method, "", attributes, 0, UINT64_MAX, request, info, response); } MCC_Status ClientHTTP::process(const std::string& method, std::multimap& attributes, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response) { return process(method, "", attributes, 0, UINT64_MAX, request, info, response); } MCC_Status ClientHTTP::process(const std::string& method, const std::string& path, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response) { std::multimap attributes; return process(method, path, attributes, 0, UINT64_MAX, request, info, response); } MCC_Status ClientHTTP::process(const std::string& method, const std::string& path, std::multimap& attributes, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response) { return process(method, path, attributes, 0, UINT64_MAX, request, info, response); } MCC_Status ClientHTTP::process(const std::string& method, const std::string& path, uint64_t range_start, uint64_t range_end, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response) { std::multimap attributes; return process(method, path, attributes, range_start, range_end, request, info, response); } MCC_Status ClientHTTP::process(const std::string& method, const std::string& path, std::multimap& attributes, uint64_t range_start, uint64_t range_end, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response) { MessagePayload* mresp = NULL; MCC_Status r = process(method,path,attributes,range_start,range_end, (MessagePayload*)request,info,&mresp); if (mresp != NULL) { try { *response = dynamic_cast(mresp); mresp = NULL; } catch (std::exception&) { } delete mresp; } return r; } MCC_Status ClientHTTP::process(const std::string& method, const std::string& path, std::multimap& attributes, uint64_t range_start, uint64_t range_end, MessagePayload *request, HTTPClientInfo *info, MessagePayload **response) { *response = NULL; MCC_Status r; if (closed) return r; if (!(r=Load())) return r; if (!http_entry) return MCC_Status(GENERIC_ERROR,"COMMUNICATION","MCC chain loading produced no entry point"); MessageAttributes attributes_req; MessageAttributes attributes_rep; Message reqmsg; Message repmsg; reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); reqmsg.Payload(request); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); reqmsg.Attributes()->set("HTTP:METHOD", method); if (!path.empty()) { URL url(default_url); url.ChangeFullPath(path,true); if(relative_uri) { // Workaround for servers which can't handle full URLs in request reqmsg.Attributes()->set("HTTP:HOST", url.Host() + ":" + tostring(url.Port())); std::string rpath = url.FullPathURIEncoded(); if(rpath[0] != '/') rpath.insert(0,"/"); reqmsg.Attributes()->set("HTTP:ENDPOINT", rpath); } else { reqmsg.Attributes()->set("HTTP:ENDPOINT", url.str(true)); } } else { if(relative_uri) { reqmsg.Attributes()->set("HTTP:HOST", default_url.Host() + ":" + tostring(default_url.Port())); std::string rpath = default_url.FullPathURIEncoded(); if(rpath[0] != '/') rpath.insert(0,"/"); reqmsg.Attributes()->set("HTTP:ENDPOINT", rpath); } } if (range_end != UINT64_MAX) { reqmsg.Attributes()->set("HTTP:Range", "bytes=" + tostring(range_start) + "-" + tostring(range_end)); } else if (range_start != 0) { reqmsg.Attributes()->set("HTTP:Range", "bytes=" + tostring(range_start) + "-"); } std::map::iterator it; for (it = attributes.begin(); it != attributes.end(); it++) { std::string key("HTTP:"); key.append((*it).first); reqmsg.Attributes()->add(key, (*it).second); } r = http_entry->process(reqmsg, repmsg); if(!r) { if (repmsg.Payload() != NULL) delete repmsg.Payload(); return r; }; stringto(repmsg.Attributes()->get("HTTP:CODE"),info->code); if(info->code == 302) { // TODO: Handle redirection transparently } info->reason = repmsg.Attributes()->get("HTTP:REASON"); stringto(repmsg.Attributes()->get("HTTP:content-length"),info->size); std::string lm; lm = repmsg.Attributes()->get("HTTP:last-modified"); if (lm.size() > 11) info->lastModified = lm; info->type = repmsg.Attributes()->get("HTTP:content-type"); for(AttributeIterator i = repmsg.Attributes()->getAll("HTTP:set-cookie");i.hasMore();++i) { info->cookies.push_back(*i); } info->location = URL(repmsg.Attributes()->get("HTTP:location"), true); // Put all headers in generic map for(AttributeIterator i = repmsg.Attributes()->getAll();i.hasMore();++i) { info->headers.insert(std::pair(i.key(), *i)); } closed = (repmsg.Attributes()->get("HTTP:KEEPALIVE") != "TRUE"); *response = repmsg.Payload(); return r; } MCC_Status ClientHTTP::process(const ClientHTTPAttributes &reqattr, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response) { MessagePayload* mresp = NULL; MCC_Status r = process(reqattr.method_,reqattr.path_,reqattr.attributes_, reqattr.range_start_,reqattr.range_end_, (MessagePayload*)request,info,&mresp); if (mresp != NULL) { try { *response = dynamic_cast(mresp); mresp = NULL; } catch (std::exception&) { } delete mresp; } return r; } MCC_Status ClientHTTP::process(const ClientHTTPAttributes &reqattr, PayloadStreamInterface *request, HTTPClientInfo *info, PayloadRawInterface **response) { MessagePayload* mresp = NULL; MCC_Status r = process(reqattr.method_,reqattr.path_,reqattr.attributes_, reqattr.range_start_,reqattr.range_end_, (MessagePayload*)request,info,&mresp); if (mresp != NULL) { try { *response = dynamic_cast(mresp); mresp = NULL; } catch (std::exception&) { } delete mresp; } return r; } MCC_Status ClientHTTP::process(const ClientHTTPAttributes &reqattr, PayloadRawInterface *request, HTTPClientInfo *info, PayloadStreamInterface **response) { MessagePayload* mresp = NULL; MCC_Status r = process(reqattr.method_,reqattr.path_,reqattr.attributes_, reqattr.range_start_,reqattr.range_end_, (MessagePayload*)request,info,&mresp); if (mresp != NULL) { try { *response = dynamic_cast(mresp); mresp = NULL; } catch (std::exception&) { } delete mresp; } return r; } MCC_Status ClientHTTP::process(const ClientHTTPAttributes &reqattr, PayloadStreamInterface *request, HTTPClientInfo *info, PayloadStreamInterface **response) { MessagePayload* mresp = NULL; MCC_Status r = process(reqattr.method_,reqattr.path_,reqattr.attributes_, reqattr.range_start_,reqattr.range_end_, (MessagePayload*)request,info,&mresp); if (mresp != NULL) { try { *response = dynamic_cast(mresp); mresp = NULL; } catch (std::exception&) { } delete mresp; } return r; } void ClientHTTP::AddSecHandler(XMLNode handlercfg, const std::string& libname, const std::string& libpath) { ClientInterface::AddSecHandler( ConfigFindComponent(xmlcfg["Chain"], "http.client", "http"), handlercfg); for (XMLNode pl = handlercfg["Plugins"]; (bool)pl; ++pl) AddPlugin(xmlcfg, pl["Name"]); AddPlugin(xmlcfg, libname, libpath); } ClientHTTPAttributes::ClientHTTPAttributes(const std::string& method): method_(method),path_(default_path_),attributes_(default_attributes_), range_start_(0),range_end_(UINT64_MAX) { } ClientHTTPAttributes::ClientHTTPAttributes(const std::string& method, std::multimap& attributes): method_(method),path_(default_path_),attributes_(attributes), range_start_(0),range_end_(UINT64_MAX) { } ClientHTTPAttributes::ClientHTTPAttributes(const std::string& method, const std::string& path): method_(method),path_(path),attributes_(default_attributes_), range_start_(0),range_end_(UINT64_MAX) { } ClientHTTPAttributes::ClientHTTPAttributes(const std::string& method, const std::string& path, std::multimap& attributes): method_(method),path_(path),attributes_(attributes), range_start_(0),range_end_(UINT64_MAX) { } ClientHTTPAttributes::ClientHTTPAttributes(const std::string& method, const std::string& path, uint64_t range_start, uint64_t range_end): method_(method),path_(path),attributes_(default_attributes_), range_start_(range_start),range_end_(range_end) { } ClientHTTPAttributes::ClientHTTPAttributes(const std::string& method, const std::string& path, std::multimap& attributes, uint64_t range_start, uint64_t range_end): method_(method),path_(path),attributes_(attributes), range_start_(range_start),range_end_(range_end) { } // ------------------------------------------------------------------------- ClientSOAP::ClientSOAP(const BaseConfig& cfg, const URL& url, int timeout) : ClientHTTP(cfg, url, timeout), soap_entry(NULL) { XMLNode comp = ConfigMakeComponent(xmlcfg["Chain"], "soap.client", "soap", "http"); comp.NewAttribute("entry") = "soap"; } ClientSOAP::~ClientSOAP() {} MCC_Status ClientSOAP::Load() { MCC_Status r(STATUS_OK); if(!(r=ClientHTTP::Load())) return r; if (!soap_entry) soap_entry = (*loader)["soap"]; if (!soap_entry) { return MCC_Status(GENERIC_ERROR,"COMMUNICATION","MCC chain loading produced no entry point"); } return r; } MCC_Status ClientSOAP::process(PayloadSOAP *request, PayloadSOAP **response) { *response = NULL; MCC_Status r; if(!(r=Load())) return r; if (!soap_entry) return MCC_Status(GENERIC_ERROR,"COMMUNICATION","MCC chain loading produced no entry point"); MessageAttributes attributes_req; MessageAttributes attributes_rep; Message reqmsg; Message repmsg; reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); reqmsg.Payload(request); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); r = soap_entry->process(reqmsg, repmsg); if (repmsg.Payload() != NULL) { try { *response = dynamic_cast(repmsg.Payload()); repmsg.Payload(NULL); } catch (std::exception&) { } delete repmsg.Payload(); } return r; } MCC_Status ClientSOAP::process(const std::string& action, PayloadSOAP *request, PayloadSOAP **response) { *response = NULL; MCC_Status r; if(!(r=Load())) return r; if (!soap_entry) return MCC_Status(GENERIC_ERROR,"COMMUNICATION","MCC chain loading produced no entry point"); MessageAttributes attributes_req; MessageAttributes attributes_rep; Message reqmsg; Message repmsg; reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); reqmsg.Payload(request); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); attributes_req.set("SOAP:ACTION", action); r = soap_entry->process(reqmsg, repmsg); if (repmsg.Payload() != NULL) { try { *response = dynamic_cast(repmsg.Payload()); repmsg.Payload(NULL); } catch (std::exception&) { } delete repmsg.Payload(); } return r; } void ClientSOAP::AddSecHandler(XMLNode handlercfg, const std::string& libname, const std::string& libpath) { ClientInterface::AddSecHandler( ConfigFindComponent(xmlcfg["Chain"], "soap.client", "soap"), handlercfg); for (XMLNode pl = handlercfg["Plugins"]; (bool)pl; ++pl) AddPlugin(xmlcfg, pl["Name"]); AddPlugin(xmlcfg, libname, libpath); } // ------------------------------------------------------------------------- SecHandlerConfig::SecHandlerConfig(const std::string& name, const std::string& event) : XMLNode("") { NewAttribute("name") = name; if (!event.empty()) NewAttribute("event") = event; } DNListHandlerConfig::DNListHandlerConfig(const std::list& dns, const std::string& event) : SecHandlerConfig("arc.authz", event) { // Loading PDP which deals with DN lists NewChild("Plugins").NewChild("Name") = "arcshc"; XMLNode pdp = NewChild("PDP"); pdp.NewAttribute("name") = "simplelist.pdp"; for (std::list::const_iterator dn = dns.begin(); dn != dns.end(); ++dn) pdp.NewChild("DN") = (*dn); } void DNListHandlerConfig::AddDN(const std::string& dn) { XMLNode pdp = operator[]("PDP"); pdp.NewChild("DN") = dn; } ARCPolicyHandlerConfig::ARCPolicyHandlerConfig(const std::string& event) : SecHandlerConfig("arc.authz", event) {} ARCPolicyHandlerConfig::ARCPolicyHandlerConfig(XMLNode policy, const std::string& event) : SecHandlerConfig("arc.authz", event) { // Loading PDP which deals with ARC policies NewChild("Plugins").NewChild("Name") = "arcshc"; XMLNode pdp = NewChild("PDP"); pdp.NewAttribute("name") = "arc.pdp"; pdp.NewChild(policy); } void ARCPolicyHandlerConfig::AddPolicy(XMLNode policy) { XMLNode pdp = operator[]("PDP"); pdp.NewChild(policy); } void ARCPolicyHandlerConfig::AddPolicy(const std::string& policy) { XMLNode p(policy); XMLNode pdp = operator[]("PDP"); pdp.NewChild(p); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602024357 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200596.420969889 30 ctime=1513200659.723744107 nordugrid-arc-5.4.2/src/hed/libs/communication/Makefile.am0000644000175000002070000000244312231165602024424 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarccommunication.la if XMLSEC_ENABLED ARCLIBS_WITH_XMLSEC = $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la LIBS_WITH_XMLSEC = $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) CFLAGS_WITH_XMLSEC = $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) HEADER_WITH_XMLSEC = ClientSAML2SSO.h SOURCE_WITH_XMLSEC = ClientSAML2SSO.cpp else ARCLIBS_WITH_XMLSEC = LIBS_WITH_XMLSEC = CFLAGS_WITH_XMLSEC = HEADER_WITH_XMLSEC = SOURCE_WITH_XMLSEC = endif libarccommunication_ladir = $(pkgincludedir)/communication libarccommunication_la_HEADERS = ClientInterface.h ClientX509Delegation.h $(HEADER_WITH_XMLSEC) libarccommunication_la_SOURCES = ClientInterface.cpp ClientX509Delegation.cpp $(SOURCE_WITH_XMLSEC) libarccommunication_la_CXXFLAGS = -I$(top_srcdir)/include \ $(CFLAGS_WITH_XMLSEC) $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) \ $(AM_CXXFLAGS) libarccommunication_la_LIBADD = \ $(ARCLIBS_WITH_XMLSEC) \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBS_WITH_XMLSEC) $(LIBXML2_LIBS) $(GLIBMM_LIBS) libarccommunication_la_LDFLAGS = -version-info 3:0:0 nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315724024370 xustar000000000000000030 mtime=1513200596.472970525 30 atime=1513200647.882599285 30 ctime=1513200659.724744119 nordugrid-arc-5.4.2/src/hed/libs/communication/Makefile.in0000644000175000002070000007456513214315724024457 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/communication DIST_COMMON = $(am__libarccommunication_la_HEADERS_DIST) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarccommunication_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) @XMLSEC_ENABLED_TRUE@am__DEPENDENCIES_1 = $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la am__DEPENDENCIES_2 = @XMLSEC_ENABLED_TRUE@am__DEPENDENCIES_3 = $(am__DEPENDENCIES_2) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_2) libarccommunication_la_DEPENDENCIES = $(am__DEPENDENCIES_1) \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_3) $(am__DEPENDENCIES_2) \ $(am__DEPENDENCIES_2) am__libarccommunication_la_SOURCES_DIST = ClientInterface.cpp \ ClientX509Delegation.cpp ClientSAML2SSO.cpp @XMLSEC_ENABLED_TRUE@am__objects_1 = \ @XMLSEC_ENABLED_TRUE@ libarccommunication_la-ClientSAML2SSO.lo am_libarccommunication_la_OBJECTS = \ libarccommunication_la-ClientInterface.lo \ libarccommunication_la-ClientX509Delegation.lo \ $(am__objects_1) libarccommunication_la_OBJECTS = $(am_libarccommunication_la_OBJECTS) libarccommunication_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarccommunication_la_CXXFLAGS) $(CXXFLAGS) \ $(libarccommunication_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarccommunication_la_SOURCES) DIST_SOURCES = $(am__libarccommunication_la_SOURCES_DIST) am__libarccommunication_la_HEADERS_DIST = ClientInterface.h \ ClientX509Delegation.h ClientSAML2SSO.h HEADERS = $(libarccommunication_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarccommunication.la @XMLSEC_ENABLED_FALSE@ARCLIBS_WITH_XMLSEC = @XMLSEC_ENABLED_TRUE@ARCLIBS_WITH_XMLSEC = $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la @XMLSEC_ENABLED_FALSE@LIBS_WITH_XMLSEC = @XMLSEC_ENABLED_TRUE@LIBS_WITH_XMLSEC = $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) @XMLSEC_ENABLED_FALSE@CFLAGS_WITH_XMLSEC = @XMLSEC_ENABLED_TRUE@CFLAGS_WITH_XMLSEC = $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) @XMLSEC_ENABLED_FALSE@HEADER_WITH_XMLSEC = @XMLSEC_ENABLED_TRUE@HEADER_WITH_XMLSEC = ClientSAML2SSO.h @XMLSEC_ENABLED_FALSE@SOURCE_WITH_XMLSEC = @XMLSEC_ENABLED_TRUE@SOURCE_WITH_XMLSEC = ClientSAML2SSO.cpp libarccommunication_ladir = $(pkgincludedir)/communication libarccommunication_la_HEADERS = ClientInterface.h ClientX509Delegation.h $(HEADER_WITH_XMLSEC) libarccommunication_la_SOURCES = ClientInterface.cpp ClientX509Delegation.cpp $(SOURCE_WITH_XMLSEC) libarccommunication_la_CXXFLAGS = -I$(top_srcdir)/include \ $(CFLAGS_WITH_XMLSEC) $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) \ $(AM_CXXFLAGS) libarccommunication_la_LIBADD = \ $(ARCLIBS_WITH_XMLSEC) \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBS_WITH_XMLSEC) $(LIBXML2_LIBS) $(GLIBMM_LIBS) libarccommunication_la_LDFLAGS = -version-info 3:0:0 all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/communication/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/communication/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarccommunication.la: $(libarccommunication_la_OBJECTS) $(libarccommunication_la_DEPENDENCIES) $(libarccommunication_la_LINK) -rpath $(libdir) $(libarccommunication_la_OBJECTS) $(libarccommunication_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommunication_la-ClientInterface.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommunication_la-ClientSAML2SSO.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommunication_la-ClientX509Delegation.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarccommunication_la-ClientInterface.lo: ClientInterface.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommunication_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommunication_la-ClientInterface.lo -MD -MP -MF $(DEPDIR)/libarccommunication_la-ClientInterface.Tpo -c -o libarccommunication_la-ClientInterface.lo `test -f 'ClientInterface.cpp' || echo '$(srcdir)/'`ClientInterface.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommunication_la-ClientInterface.Tpo $(DEPDIR)/libarccommunication_la-ClientInterface.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ClientInterface.cpp' object='libarccommunication_la-ClientInterface.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommunication_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommunication_la-ClientInterface.lo `test -f 'ClientInterface.cpp' || echo '$(srcdir)/'`ClientInterface.cpp libarccommunication_la-ClientX509Delegation.lo: ClientX509Delegation.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommunication_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommunication_la-ClientX509Delegation.lo -MD -MP -MF $(DEPDIR)/libarccommunication_la-ClientX509Delegation.Tpo -c -o libarccommunication_la-ClientX509Delegation.lo `test -f 'ClientX509Delegation.cpp' || echo '$(srcdir)/'`ClientX509Delegation.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommunication_la-ClientX509Delegation.Tpo $(DEPDIR)/libarccommunication_la-ClientX509Delegation.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ClientX509Delegation.cpp' object='libarccommunication_la-ClientX509Delegation.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommunication_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommunication_la-ClientX509Delegation.lo `test -f 'ClientX509Delegation.cpp' || echo '$(srcdir)/'`ClientX509Delegation.cpp libarccommunication_la-ClientSAML2SSO.lo: ClientSAML2SSO.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommunication_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommunication_la-ClientSAML2SSO.lo -MD -MP -MF $(DEPDIR)/libarccommunication_la-ClientSAML2SSO.Tpo -c -o libarccommunication_la-ClientSAML2SSO.lo `test -f 'ClientSAML2SSO.cpp' || echo '$(srcdir)/'`ClientSAML2SSO.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommunication_la-ClientSAML2SSO.Tpo $(DEPDIR)/libarccommunication_la-ClientSAML2SSO.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ClientSAML2SSO.cpp' object='libarccommunication_la-ClientSAML2SSO.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommunication_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommunication_la-ClientSAML2SSO.lo `test -f 'ClientSAML2SSO.cpp' || echo '$(srcdir)/'`ClientSAML2SSO.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarccommunication_laHEADERS: $(libarccommunication_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarccommunication_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarccommunication_ladir)" @list='$(libarccommunication_la_HEADERS)'; test -n "$(libarccommunication_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarccommunication_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarccommunication_ladir)" || exit $$?; \ done uninstall-libarccommunication_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarccommunication_la_HEADERS)'; test -n "$(libarccommunication_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarccommunication_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarccommunication_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarccommunication_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libarccommunication_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarccommunication_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libLTLIBRARIES install-libarccommunication_laHEADERS \ install-man install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-libLTLIBRARIES \ uninstall-libarccommunication_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/test0000644000000000000000000000013013214316024023215 xustar000000000000000029 mtime=1513200660.17774966 30 atime=1513200668.720854145 29 ctime=1513200660.17774966 nordugrid-arc-5.4.2/src/hed/libs/communication/test/0000755000175000002070000000000013214316024023342 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/communication/test/PaxHeaders.7502/SimulatorClasses.cpp0000644000000000000000000000012312042216423027275 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200574.453701 29 ctime=1513200660.17774966 nordugrid-arc-5.4.2/src/hed/libs/communication/test/SimulatorClasses.cpp0000644000175000002070000000346012042216423027346 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "SimulatorClasses.h" std::string Arc::ClientSOAPTest::action = ""; Arc::PayloadSOAP Arc::ClientSOAPTest::request = Arc::PayloadSOAP(Arc::NS()); Arc::PayloadSOAP* Arc::ClientSOAPTest::response = NULL; Arc::MCC_Status Arc::ClientSOAPTest::status(Arc::GENERIC_ERROR); Arc::MCC_Status Arc::ClientSOAPTest::process(Arc::PayloadSOAP *request, Arc::PayloadSOAP **response) { ClientSOAPTest::request = Arc::PayloadSOAP(*request); *response = ClientSOAPTest::response; return ClientSOAPTest::status; } Arc::MCC_Status Arc::ClientSOAPTest::process(const std::string& action, Arc::PayloadSOAP *request, Arc::PayloadSOAP **response) { Arc::ClientSOAPTest::action = action; ClientSOAPTest::request = Arc::PayloadSOAP(*request); *response = ClientSOAPTest::response; return ClientSOAPTest::status; } /* DelegationProviderSOAPTest class body specification */ std::string Arc::DelegationProviderSOAPTest::action = ""; Arc::PayloadSOAP Arc::DelegationProviderSOAPTest::request = Arc::PayloadSOAP(Arc::NS()); Arc::PayloadSOAP* Arc::DelegationProviderSOAPTest::response = NULL; Arc::MCC_Status Arc::DelegationProviderSOAPTest::status(Arc::GENERIC_ERROR); bool Arc::DelegationProviderSOAPTest::DelegateCredentialsInit(Arc::MCCInterface& mcc_interface,Arc::MessageContext* context) { id_ = "id"; request_ = "request"; return true; } bool Arc::DelegationProviderSOAPTest::DelegatedToken(XMLNode parent) { XMLNode token = parent.NewChild("deleg:DelegatedToken"); token.NewAttribute("deleg:Format")="x509"; token.NewChild("deleg:Id")=id_; token.NewChild("deleg:Value")= "delegation"; return true; } nordugrid-arc-5.4.2/src/hed/libs/communication/test/PaxHeaders.7502/SimulatorClasses.h0000644000000000000000000000012312070641767026760 xustar000000000000000026 mtime=1357071351.77812 27 atime=1513200574.454701 30 ctime=1513200660.176749647 nordugrid-arc-5.4.2/src/hed/libs/communication/test/SimulatorClasses.h0000644000175000002070000000550712070641767027035 0ustar00mockbuildmock00000000000000#ifndef __ARC_SIMULATORCLASSES_H__ #define __ARC_SIMULATORCLASSES_H__ #include #include #ifdef ClientSOAP #undef ClientSOAP #endif #ifdef DelegationProviderSOAP #undef DelegationProviderSOAP #endif namespace Arc { class ClientSOAPTest : public ClientSOAP { public: /** Constructor creates MCC chain and connects to server. */ ClientSOAPTest() : ClientSOAP(),soap_entry(NULL) {} ClientSOAPTest(const BaseConfig& cfg, const URL& url, int timeout = -1) : ClientSOAP(cfg, url, timeout) { Config config; soap_entry = new MCC(&config,NULL);; } /** Send SOAP request and receive response. */ MCC_Status process(PayloadSOAP *request, PayloadSOAP **response); /** Send SOAP request with specified SOAP action and receive response. */ MCC_Status process(const std::string& action, PayloadSOAP *request, PayloadSOAP **response); /** Instantiates pluggable elements according to generated configuration */ virtual MCC_Status Load() { return MCC_Status(STATUS_OK); } /** Returns entry point to SOAP MCC in configured chain. To initialize entry point Load() method must be called. */ MCC* GetEntry() { return soap_entry; } MessageContext& GetContext() { return context; } static std::string action; static Arc::PayloadSOAP request; static Arc::PayloadSOAP* response; static Arc::MCC_Status status; MCC *soap_entry; MessageContext context; }; class DelegationProviderSOAPTest : public DelegationProviderSOAP { public: /** Creates instance from provided credentials. Credentials are used to sign delegated credentials. */ DelegationProviderSOAPTest(const std::string& credentials) : DelegationProviderSOAP(credentials) {} DelegationProviderSOAPTest(const std::string& cert_file,const std::string& key_file,std::istream* inpwd = NULL) : DelegationProviderSOAP(cert_file,key_file,inpwd) {} /** Performs DelegateCredentialsInit SOAP operation. As result request for delegated credentials is received by this instance and stored internally. Call to UpdateCredentials should follow. */ bool DelegateCredentialsInit(MCCInterface& mcc_interface,MessageContext* context); /** Generates DelegatedToken element. Element is created as child of provided XML element and contains structure described in delegation.wsdl. */ bool DelegatedToken(XMLNode parent); static std::string action; static Arc::PayloadSOAP request; static Arc::PayloadSOAP* response; static Arc::MCC_Status status; }; } #ifndef ClientSOAP #define ClientSOAP ClientSOAPTest #endif #ifndef DelegationProviderSOAP #define DelegationProviderSOAP DelegationProviderSOAPTest #endif #endif // __ARC_SIMULATORCLASSES_H__ nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/ClientInterface.h0000644000000000000000000000012313107553435025537 xustar000000000000000026 mtime=1495193373.96957 27 atime=1513200574.461701 30 ctime=1513200659.719744058 nordugrid-arc-5.4.2/src/hed/libs/communication/ClientInterface.h0000644000175000002070000002703513107553435025614 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_CLIENTINTERFACE_H__ #define __ARC_CLIENTINTERFACE_H__ #include #include #include #include #include #include #include #include #include #include #include namespace Arc { class MCCLoader; class Logger; class MCC; //! Utility base class for MCC /** The ClientInterface class is a utility base class used for * configuring a client side Message Chain Component (MCC) chain * and loading it into memory. It has several specializations of * increasing complexity of the MCC chains. * This class is not supposed to be used directly. Instead its * descendants like ClientTCP, ClientHTTP, etc. must be used. **/ class ClientInterface { public: ClientInterface() : loader(NULL) {} ClientInterface(const BaseConfig& cfg); virtual ~ClientInterface(); void Overlay(XMLNode cfg); const Config& GetConfig() const { return xmlcfg; } MessageContext& GetContext() { return context; } /// Initializes communication chain for this object. /// Call to this method in derived class is not needed /// if process() methods are used. It is only needed if /// GetEntry() is used before process() is called. virtual MCC_Status Load(); protected: Config xmlcfg; XMLNode overlay; MCCLoader *loader; MessageContext context; static Logger logger; static void AddSecHandler(XMLNode mcccfg, XMLNode handlercfg); static void AddPlugin(XMLNode mcccfg, const std::string& libname, const std::string& libpath = ""); }; enum SecurityLayer { NoSec, //< No security applied to this communication TLSSec, //< HTTPS-like security is used (handshake negotiates highest possible protocol) GSISec, //< GSI compatible communication SSL3Sec, //< Start communication from SSLv3 handshake GSIIOSec, //< Globus GSI implemwntation compatible communication TLS10Sec, //< TLSv1.0 only protocol TLS11Sec, //< TLSv1.1 only protocol TLS12Sec, //< TLSv1.2 only protocol DTLSSec, //< Automatic selection of DTLS protocol DTLS10Sec, //< DTLSv1.0 only protocol DTLS12Sec, //< DTLSv1.0 only protocol }; enum EncryptionLevel { NoEnc, //< No data encryption to be performed RequireEnc, //< Force data encryption PreferEnc, //< Use data encryption if possible OptionalEnc //< Use data encryption only if needed }; class TCPSec { public: SecurityLayer sec; EncryptionLevel enc; TCPSec(void):sec(NoSec),enc(NoEnc) { }; TCPSec(SecurityLayer s):sec(s),enc((s==NoSec)?NoEnc:RequireEnc) { }; TCPSec(SecurityLayer s, EncryptionLevel e):sec(s),enc(e) { }; }; //! Class for setting up a MCC chain for TCP communication /** The ClientTCP class is a specialization of the ClientInterface * which sets up a client MCC chain for TCP communication, and * optionally with a security layer on top which can be either TLS, * GSI or SSL3. **/ class ClientTCP : public ClientInterface { public: ClientTCP() : tcp_entry(NULL), tls_entry(NULL) {} ClientTCP(const BaseConfig& cfg, const std::string& host, int port, TCPSec sec, int timeout = -1, bool no_delay = false); virtual ~ClientTCP(); MCC_Status process(PayloadRawInterface *request, PayloadStreamInterface **response, bool tls); MCC_Status process(PayloadStreamInterface *request, PayloadStreamInterface **response, bool tls); /** Returns entry point to TCP or TLS MCC in configured chain. To initialize entry point Load() method must be called. */ MCC* GetEntry() { return tls_entry ? tls_entry : tcp_entry; } virtual MCC_Status Load(); void AddSecHandler(XMLNode handlercfg, TCPSec sec, const std::string& libanme = "", const std::string& libpath = ""); protected: MCC *tcp_entry; MCC *tls_entry; }; struct HTTPClientInfo { int code; /// HTTP response code std::string reason; /// HTTP response reason uint64_t size; /// Size of body (content-length) Time lastModified; /// Reported modification time std::string type; /// Content-type std::list cookies; /// All collected cookies /// All returned headers /** * \since Added in 4.1.0. **/ std::multimap headers; URL location; /// Value of location attribute in HTTP response bool keep_alive; }; class ClientHTTP; //! Proxy class for handling request parameters /** The purpose of this calss is to reduce number of methods in ClientHTTP class. Use only for temporary variables. */ class ClientHTTPAttributes { friend class ClientHTTP; public: ClientHTTPAttributes(const std::string& method); ClientHTTPAttributes(const std::string& method, std::multimap& attributes); ClientHTTPAttributes(const std::string& method, const std::string& path); ClientHTTPAttributes(const std::string& method, const std::string& path, std::multimap& attributes); ClientHTTPAttributes(const std::string& method, const std::string& path, uint64_t range_start, uint64_t range_end); ClientHTTPAttributes(const std::string& method, const std::string& path, std::multimap& attributes, uint64_t range_start, uint64_t range_end); protected: const std::string default_path_; std::multimap default_attributes_; const std::string& method_; const std::string& path_; std::multimap& attributes_; uint64_t range_start_; uint64_t range_end_; }; //! Class for setting up a MCC chain for HTTP communication /** The ClientHTTP class inherits from the ClientTCP class and adds * an HTTP MCC to the chain. **/ class ClientHTTP : public ClientTCP { public: ClientHTTP() : http_entry(NULL), relative_uri(false), sec(NoSec) {} ClientHTTP(const BaseConfig& cfg, const URL& url, int timeout = -1, const std::string& proxy_host = "", int proxy_port = 0); virtual ~ClientHTTP(); MCC_Status process(const std::string& method, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response); MCC_Status process(const std::string& method, std::multimap& attributes, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response); MCC_Status process(const std::string& method, const std::string& path, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response); MCC_Status process(const std::string& method, const std::string& path, std::multimap& attributes, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response); MCC_Status process(const std::string& method, const std::string& path, uint64_t range_start, uint64_t range_end, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response); MCC_Status process(const std::string& method, const std::string& path, std::multimap& attributes, uint64_t range_start, uint64_t range_end, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response); MCC_Status process(const ClientHTTPAttributes &reqattr, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response); MCC_Status process(const ClientHTTPAttributes &reqattr, PayloadStreamInterface *request, HTTPClientInfo *info, PayloadRawInterface **response); MCC_Status process(const ClientHTTPAttributes &reqattr, PayloadRawInterface *request, HTTPClientInfo *info, PayloadStreamInterface **response); MCC_Status process(const ClientHTTPAttributes &reqattr, PayloadStreamInterface *request, HTTPClientInfo *info, PayloadStreamInterface **response); /** Returns entry point to HTTP MCC in configured chain. To initialize entry point Load() method must be called. */ MCC* GetEntry() { return http_entry; } void AddSecHandler(XMLNode handlercfg, const std::string& libanme = "", const std::string& libpath = ""); virtual MCC_Status Load(); void RelativeURI(bool val) { relative_uri=val; }; const URL& GetURL() const { return default_url; }; bool GetClosed() const { return closed; } protected: MCC *http_entry; URL default_url; bool relative_uri; TCPSec sec; bool closed; MCC_Status process(const std::string& method, const std::string& path, std::multimap& attributes, uint64_t range_start, uint64_t range_end, MessagePayload *request, HTTPClientInfo *info, MessagePayload **response); }; /** Class with easy interface for sending/receiving SOAP messages over HTTP(S/G). It takes care of configuring MCC chain and making an entry point. */ class ClientSOAP : public ClientHTTP { public: /** Constructor creates MCC chain and connects to server. */ ClientSOAP() : soap_entry(NULL) {} ClientSOAP(const BaseConfig& cfg, const URL& url, int timeout = -1); virtual ~ClientSOAP(); /** Send SOAP request and receive response. */ MCC_Status process(PayloadSOAP *request, PayloadSOAP **response); /** Send SOAP request with specified SOAP action and receive response. */ MCC_Status process(const std::string& action, PayloadSOAP *request, PayloadSOAP **response); /** Returns entry point to SOAP MCC in configured chain. To initialize entry point Load() method must be called. */ MCC* GetEntry() { return soap_entry; } /** Adds security handler to configuration of SOAP MCC */ void AddSecHandler(XMLNode handlercfg, const std::string& libanme = "", const std::string& libpath = ""); /** Instantiates pluggable elements according to generated configuration */ virtual MCC_Status Load(); protected: MCC *soap_entry; }; // Convenience base class for creating configuration // security handlers. class SecHandlerConfig : public XMLNode { public: SecHandlerConfig(const std::string& name, const std::string& event = "incoming"); }; class DNListHandlerConfig : public SecHandlerConfig { public: DNListHandlerConfig(const std::list& dns, const std::string& event = "incoming"); void AddDN(const std::string& dn); }; class ARCPolicyHandlerConfig : public SecHandlerConfig { public: ARCPolicyHandlerConfig(const std::string& event = "incoming"); ARCPolicyHandlerConfig(XMLNode policy, const std::string& event = "incoming"); void AddPolicy(XMLNode policy); void AddPolicy(const std::string& policy); }; } // namespace Arc #endif // __ARC_CLIENTINTERFACE_H__ nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/ClientX509Delegation.cpp0000644000000000000000000000012412042216423026622 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200574.458701 30 ctime=1513200659.726744144 nordugrid-arc-5.4.2/src/hed/libs/communication/ClientX509Delegation.cpp0000644000175000002070000002706412042216423026700 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif // This define is needed to have maximal values for types with fixed size #define __STDC_LIMIT_MACROS #include #include #include #include #include #include #include "ClientX509Delegation.h" namespace Arc { Logger ClientX509Delegation::logger(Logger::getRootLogger(), "ClientX509Delegation"); #define ARC_DELEGATION_NAMESPACE "http://www.nordugrid.org/schemas/delegation" #define GS_DELEGATION_NAMESPACE "http://www.gridsite.org/namespaces/delegation-2" ClientX509Delegation::ClientX509Delegation(const BaseConfig& cfg, const URL& url) : soap_client_(NULL), signer_(NULL) { soap_client_ = new ClientSOAP(cfg, url, 60); //Use the certificate and key in the main chain to delegate cert_file_ = cfg.cert; privkey_file_ = cfg.key; proxy_file_ = cfg.proxy; trusted_ca_dir_ = cfg.cadir; trusted_ca_file_ = cfg.cafile; if (!cert_file_.empty() && !privkey_file_.empty()) signer_ = new Credential(cert_file_, privkey_file_, trusted_ca_dir_, trusted_ca_file_); else if (!proxy_file_.empty()) signer_ = new Credential(proxy_file_, "", trusted_ca_dir_, trusted_ca_file_); } ClientX509Delegation::~ClientX509Delegation() { if (soap_client_) delete soap_client_; if (signer_) delete signer_; } bool ClientX509Delegation::createDelegation(DelegationType deleg, std::string& delegation_id) { if (deleg == DELEG_ARC) { //Use the DelegationInterface class for ARC delegation service logger.msg(INFO, "Creating delegation credential to ARC delegation service"); if (soap_client_ != NULL) { NS ns; ns["deleg"] = ARC_DELEGATION_NAMESPACE; PayloadSOAP request(ns); request.NewChild("deleg:DelegateCredentialsInit"); PayloadSOAP *response = NULL; //Send DelegateCredentialsInit request MCC_Status status = soap_client_->process(&request, &response); if (status != STATUS_OK) { logger.msg(ERROR, "DelegateCredentialsInit failed"); return false; } if (!response) { logger.msg(ERROR, "There is no SOAP response"); return false; } XMLNode token = (*response)["DelegateCredentialsInitResponse"]["TokenRequest"]; if (!token) { logger.msg(ERROR, "There is no X509 request in the response"); delete response; return false; } if (((std::string)(token.Attribute("Format"))) != "x509") { logger.msg(ERROR, "There is no Format request in the response"); delete response; return false; } delegation_id = (std::string)(token["Id"]); std::string x509request = (std::string)(token["Value"]); delete response; if (delegation_id.empty() || x509request.empty()) { logger.msg(ERROR, "There is no Id or X509 request value in the response"); return false; } //std::cout<<"X509 Request: \n"<SignRequest(&proxy, signedcert))) { logger.msg(ERROR, "DelegateProxy failed"); return false; } std::string signercert_str; std::string signercertchain_str; signer_->OutputCertificate(signercert_str); signer_->OutputCertificateChain(signercertchain_str); signedcert.append(signercert_str); signedcert.append(signercertchain_str); std::cout<<"X509 proxy certificate: \n"<process(&request2, &response); if (status != STATUS_OK) { logger.msg(ERROR, "UpdateCredentials failed"); return false; } if (!response) { logger.msg(ERROR, "There is no SOAP response"); return false; } if (!(*response)["UpdateCredentialsResponse"]) { logger.msg(ERROR, "There is no UpdateCredentialsResponse in response"); delete response; return false; } delete response; return true; } else { logger.msg(ERROR, "There is no SOAP connection chain configured"); return false; } } else if (deleg == DELEG_GRIDSITE) { //Move the current delegation related code in CREAMClient class to here logger.msg(INFO, "Creating delegation to CREAM delegation service"); NS ns; ns["deleg"] = GS_DELEGATION_NAMESPACE; PayloadSOAP request(ns); XMLNode getProxyReqRequest = request.NewChild("deleg:getProxyReq"); XMLNode delegid = getProxyReqRequest.NewChild("deleg:delegationID"); delegid.Set(delegation_id); PayloadSOAP *response = NULL; //Send the getProxyReq request if (soap_client_) { MCC_Status status = soap_client_->process("", &request, &response); if (!status) { logger.msg(ERROR, "Delegation getProxyReq request failed"); return false; } if (response == NULL) { logger.msg(ERROR, "There is no SOAP response"); return false; } } else { logger.msg(ERROR, "There is no SOAP connection chain configured"); return false; } std::string getProxyReqReturnValue; if ((bool)(*response) && (bool)((*response)["getProxyReqResponse"]["getProxyReqReturn"]) && ((std::string)(*response)["getProxyReqResponse"]["getProxyReqReturn"] != "")) getProxyReqReturnValue = (std::string)(*response)["getProxyReqResponse"]["getProxyReqReturn"]; else { logger.msg(ERROR, "Creating delegation to CREAM delegation service failed"); return false; } delete response; //Sign the proxy certificate Time start; //start = start - Period(300); Credential proxy(start); std::string signedcert; //std::cout<<"X509 Request: \n"<SignRequest(&proxy, signedcert))) { logger.msg(ERROR, "DelegateProxy failed"); return false; } std::string signerstr, signerchain_str; signer_->OutputCertificate(signerstr); signer_->OutputCertificateChain(signerchain_str); signedcert.append(signerstr).append(signerchain_str); PayloadSOAP request2(ns); XMLNode putProxyRequest = request2.NewChild("deleg:putProxy"); XMLNode delegid_node = putProxyRequest.NewChild("deleg:delegationID"); delegid_node.Set(delegation_id); XMLNode proxy_node = putProxyRequest.NewChild("deleg:proxy"); proxy_node.Set(signedcert); response = NULL; //Send the putProxy request if (soap_client_) { MCC_Status status = soap_client_->process("", &request2, &response); if (!status) { logger.msg(ERROR, "Delegation putProxy request failed"); return false; } if (response == NULL) { logger.msg(ERROR, "There is no SOAP response"); return false; } } else { logger.msg(ERROR, "There is no SOAP connection chain configured"); return false; } if (!(bool)(*response) || !(bool)((*response)["putProxyResponse"])) { logger.msg(ERROR, "Creating delegation to CREAM delegation failed"); return false; } delete response; } else if (deleg == DELEG_MYPROXY) {} return true; } bool ClientX509Delegation::acquireDelegation(DelegationType deleg, std::string& delegation_cred, std::string& delegation_id, const std::string cred_identity, const std::string cred_delegator_ip, const std::string /* username */, const std::string /* password */) { if (deleg == DELEG_ARC) { //Use the DelegationInterface class for ARC delegation service logger.msg(INFO, "Getting delegation credential from ARC delegation service"); if (soap_client_ != NULL) { NS ns; ns["deleg"] = ARC_DELEGATION_NAMESPACE; PayloadSOAP request(ns); XMLNode tokenlookup = request.NewChild("deleg:AcquireCredentials").NewChild("deleg:DelegatedTokenLookup"); //Use delegation ID to acquire delegation credential from //delegation service, if the delegation ID is presented; if (!delegation_id.empty()) tokenlookup.NewChild("deleg:Id") = delegation_id; //If delegation ID is not presented, use the credential identity //credential delegator's IP to acquire delegation credential else { tokenlookup.NewChild("deleg:CredIdentity") = cred_identity; tokenlookup.NewChild("deleg:CredDelegatorIP") = cred_delegator_ip; } //Generate a X509 request std::string x509req_str; Time start; int keybits = 1024; Credential cred_request(start, Period(), keybits); cred_request.GenerateRequest(x509req_str); tokenlookup.NewChild("deleg:Value") = x509req_str; std::string privkey_str; cred_request.OutputPrivatekey(privkey_str); PayloadSOAP *response = NULL; //Send AcquireCredentials request MCC_Status status = soap_client_->process(&request, &response); if (status != STATUS_OK) { logger.msg(ERROR, "DelegateCredentialsInit failed"); return false; } if (!response) { logger.msg(ERROR, "There is no SOAP response"); return false; } XMLNode token = (*response)["AcquireCredentialsResponse"]["DelegatedToken"]; if (!token) { logger.msg(ERROR, "There is no Delegated X509 token in the response"); delete response; return false; } if (((std::string)(token.Attribute("Format"))) != "x509") { logger.msg(ERROR, "There is no Format delegated token in the response"); delete response; return false; } delegation_id = (std::string)(token["Id"]); std::string delegation_cert = (std::string)(token["Value"]); delete response; if (delegation_id.empty() || delegation_cert.empty()) { logger.msg(ERROR, "There is no Id or X509 token value in the response"); return false; } Credential proxy_cred(delegation_cert, privkey_str, trusted_ca_dir_, trusted_ca_file_, "", false); proxy_cred.OutputCertificate(delegation_cred); proxy_cred.OutputPrivatekey(delegation_cred); proxy_cred.OutputCertificateChain(delegation_cred); logger.msg(DEBUG,"Get delegated credential from delegation service: \n %s",delegation_cred.c_str()); return true; } else { logger.msg(ERROR, "There is no SOAP connection chain configured"); return false; } } else {} return false; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/ClientSAML2SSO.h0000644000000000000000000000012412042216423025031 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200574.464701 30 ctime=1513200659.721744083 nordugrid-arc-5.4.2/src/hed/libs/communication/ClientSAML2SSO.h0000644000175000002070000000563012042216423025102 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_CLIENTSAML2SSO_H__ #define __ARC_CLIENTSAML2SSO_H__ #include #include #include #include #include #include #include #include #include namespace Arc { class ClientHTTPwithSAML2SSO { public: /** Constructor creates MCC chain and connects to server. */ ClientHTTPwithSAML2SSO() : http_client_(NULL), authn_(false) {} ClientHTTPwithSAML2SSO(const BaseConfig& cfg, const URL& url); virtual ~ClientHTTPwithSAML2SSO(); /** Send HTTP request and receive response. */ MCC_Status process(const std::string& method, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response, const std::string& idp_name, const std::string& username, const std::string& password, const bool reuse_authn = false); MCC_Status process(const std::string& method, const std::string& path, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response, const std::string& idp_name, const std::string& username, const std::string& password, const bool reuse_authn = false); private: ClientHTTP *http_client_; bool authn_; //Credential and trusted certificates used to contact IdP std::string cert_file_; std::string privkey_file_; std::string ca_file_; std::string ca_dir_; static Logger logger; std::string cookie; }; class ClientSOAPwithSAML2SSO { public: /** Constructor creates MCC chain and connects to server.*/ ClientSOAPwithSAML2SSO() : soap_client_(NULL), authn_(false) {} ClientSOAPwithSAML2SSO(const BaseConfig& cfg, const URL& url); virtual ~ClientSOAPwithSAML2SSO(); /** Send SOAP request and receive response. */ MCC_Status process(PayloadSOAP *request, PayloadSOAP **response, const std::string& idp_name, const std::string& username, const std::string& password, const bool reuse_authn = false); /** Send SOAP request with specified SOAP action and receive response. */ MCC_Status process(const std::string& action, PayloadSOAP *request, PayloadSOAP **response, const std::string& idp_name, const std::string& username, const std::string& password, const bool reuse_authn = false); private: ClientSOAP *soap_client_; bool authn_; //Credential and trusted certificates used to contact IdP std::string cert_file_; std::string privkey_file_; std::string ca_file_; std::string ca_dir_; static Logger logger; std::string cookie; }; } // namespace Arc #endif // __ARC_CLIENTSAML2SSO_H__ nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/ClientX509Delegation.h0000644000000000000000000000012312042216423026266 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200574.461701 29 ctime=1513200659.72074407 nordugrid-arc-5.4.2/src/hed/libs/communication/ClientX509Delegation.h0000644000175000002070000001321112042216423026332 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_CLIENTX509DELEGATION_H__ #define __ARC_CLIENTX509DELEGATION_H__ #include #include #include #include #include #include #include #include #include #include namespace Arc { //This class is supposed to be run against the generic ARC delegation service //to delegate itself's X.509 credential to delegation service; afterwards, //other functional clients can access the services which is hosted together //with the above delegation service. //This class can be used in any client utility, and also the service implementation //which needs to interoperate with another service. //The purpose of this client (together with the delegation service) is that any //intermediate service is able to act on behalf of the user. // //This class will also be extended to interoperate with other delegation service //implementaion such as the gridsite implementation which is used by CREAM service. // //Also, MyProxy could be looked as a delegation service, which will only used for //the first-step delegation (user delegates its credential to client). In this case, //ClientX509Delegation will only be used for the client utility, not for the inmediate //services. //User firstly delegates its credential to MyProxy server (the proxy certificate and //related private key will be stored on MyProxy server), then the client (normally it //could be the Web Browser) uses the username/password to acquire the proxy credential //from MyProxy server. enum DelegationType { DELEG_ARC, DELEG_GRIDSITE, DELEG_GT4, DELEG_MYPROXY, DELEG_UNKNOWN }; class ClientX509Delegation { public: /** Constructor creates MCC chain and connects to server.*/ ClientX509Delegation() : soap_client_(NULL), signer_(NULL) {} ClientX509Delegation(const BaseConfig& cfg, const URL& url); virtual ~ClientX509Delegation(); /** Create the delegation credential according to the different remote * delegation service. * This method should be called by holder of EEC(end entity credential) * which would delegate its EEC credential, or by holder of delegated * credential(normally, the holder is intermediate service) which would * further delegate the credential (on behalf of the original EEC's holder) * (for instance, the 'n' intermediate service creates a delegation credential, * then the 'n+1' intermediate service aquires this delegation credential * from the delegation service and also acts on behalf of the EEC's holder * by using this delegation credential). * * @param deleg Delegation type * @param delegation_id For gridsite delegation service, the delegation_id * is supposed to be created by client side, and sent to service side; * for ARC delegation service, the delegation_id is supposed to be created * by service side, and returned back. So for gridsite delegation service, * this parameter is treated as input, while for ARC delegation service, * it is treated as output. */ bool createDelegation(DelegationType deleg, std::string& delegation_id); bool destroyDelegation(DelegationType /* deleg */) { return false; } /** Acquire delegation credential from delegation service. * This method should be called by intermediate service ('n+1' service as * explained on above) in order to use this delegation credential on behalf * of the EEC's holder. * @param deleg Delegation type * @param delegation_id delegation ID which is used to look up the credential * by delegation service * @param cred_identity the identity (in case of x509 credential, it is the * DN of EEC credential). * @param cred_delegator_ip the IP address of the credential delegator. Regard of * delegation, an intermediate service should accomplish * three tasks: * 1. Acquire 'n' level delegation credential (which is * delegated by 'n-1' level delegator) from delegation * service; * 1. Create 'n+1' level delegation credential to * delegation service; * 2. Use 'n' level delegation credential to act on behalf * of the EEC's holder. * In case of absense of delegation_id, the 'n-1' level * delegator's IP address and credential's identity are * supposed to be used for look up the delegation credential * from delegation service. */ bool acquireDelegation(DelegationType deleg, std::string& delegation_cred, std::string& delegation_id, const std::string cred_identity = "", const std::string cred_delegator_ip = "", const std::string username = "", const std::string password = ""); private: ClientSOAP *soap_client_; std::string cert_file_; //if it is proxy certificate, the privkey_file_ should be empty std::string privkey_file_; std::string proxy_file_; std::string trusted_ca_dir_; std::string trusted_ca_file_; Credential *signer_; static Logger logger; }; } // namespace Arc #endif // __ARC_CLIENTX509DELEGATION_H__ nordugrid-arc-5.4.2/src/hed/libs/communication/PaxHeaders.7502/ClientSAML2SSO.cpp0000644000000000000000000000012412042216423025364 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200574.459701 30 ctime=1513200659.727744156 nordugrid-arc-5.4.2/src/hed/libs/communication/ClientSAML2SSO.cpp0000644000175000002070000004076412042216423025444 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif // This define is needed to have maximal values for types with fixed size #define __STDC_LIMIT_MACROS #include #include #include #include #include #include #include #include #include #include "ClientSAML2SSO.h" namespace Arc { Logger ClientHTTPwithSAML2SSO::logger(Logger::getRootLogger(), "ClientHTTPwithSAML2SSO"); Logger ClientSOAPwithSAML2SSO::logger(Logger::getRootLogger(), "ClientSOAPwithSAML2SSO"); ClientHTTPwithSAML2SSO::ClientHTTPwithSAML2SSO(const BaseConfig& cfg, const URL& url) : http_client_(NULL), authn_(false) { http_client_ = new ClientHTTP(cfg, url, 60); //Use the credential and trusted certificates from client's main chain to //contact IdP cert_file_ = cfg.cert; privkey_file_ = cfg.key; ca_file_ = cfg.cafile; ca_dir_ = cfg.cadir; } ClientHTTPwithSAML2SSO::~ClientHTTPwithSAML2SSO() { if (http_client_) delete http_client_; } static MCC_Status process_saml2sso(const std::string& idp_name, const std::string& username, const std::string& password, ClientHTTP *http_client, std::string& cert_file, std::string& privkey_file, std::string& ca_file, std::string& ca_dir, Logger& logger, std::string& cookie) { // ------------------------------------------- // User-Agent: 1. Send an empty http request to SP; // // The saml2sso process share the same tcp/tls connection (on the service // side, SP service and the functional/real service which is to be protected // are supposed to be at the same service chain) with the // main client chain. And because of this connection sharing, if the // saml2sso process (interaction between user-agent/SP service/extenal IdP // service, see SAML2 SSO profile) is succeeded, we can suppose that the // later real client/real service interaction is authorized. // // 2. User-Agent then get back , and send it response to IdP // // SP Service: a service based on http service on the service side, which is // specifically in charge of the funtionality of Service Provider of SAML2 // SSO profile. // // User-Agent: Since the SAML2 SSO profile is web-browser based, so here we // implement the code which is with the same functionality as browser's user agent. // ------------------------------------------- // Arc::PayloadRaw requestSP; Arc::PayloadRawInterface *responseSP = NULL; Arc::HTTPClientInfo infoSP; requestSP.Insert(idp_name.c_str(), 0, idp_name.size()); //Call the peer http endpoint with path "saml2sp", which //is the endpoint of saml SP service Arc::MCC_Status statusSP = http_client->process("POST", "/saml2sp", &requestSP, &infoSP, &responseSP); if (!responseSP) { logger.msg(Arc::ERROR, "Request failed: No response from SPService"); return MCC_Status(); } if (!statusSP) { logger.msg(Arc::ERROR, "Request failed: response from SPService is not as expected"); if (responseSP) delete responseSP; return MCC_Status(); } //Parse the authenRequestUrl from response std::string authnRequestUrl(responseSP->Content()); logger.msg(DEBUG, "Authentication Request URL: %s", authnRequestUrl); if (responseSP) delete responseSP; // ------------------------------------------- //User-Agent: Send the AuthnRequest to IdP, //(IP-based authentication, and Username/Password authentication) //get back b64 encoded saml response, and //send this saml response to SP //------------------------------------------- Arc::URL url(authnRequestUrl); Arc::MCCConfig cfg; if (!cert_file.empty()) cfg.AddCertificate(cert_file); if (!privkey_file.empty()) cfg.AddPrivateKey(privkey_file); if (!ca_file.empty()) cfg.AddCAFile(ca_file); if (!ca_dir.empty()) cfg.AddCADir(ca_dir); ClientHTTP clientIdP(cfg, url); Arc::PayloadRaw requestIdP; Arc::PayloadRawInterface *responseIdP = NULL; Arc::HTTPClientInfo infoIdP; std::multimap http_attributes; if (!(cookie.empty())) http_attributes.insert(std::pair("Cookie",cookie)); //Contact the IdP service Arc::MCC_Status statusIdP = clientIdP.process("GET", http_attributes, &requestIdP, &infoIdP, &responseIdP); if ((!(infoIdP.cookies.empty())) && infoIdP.code != 200) cookie = *(infoIdP.cookies.begin()); if (!responseIdP) { logger.msg(Arc::ERROR, "Request failed: No response from IdP"); return MCC_Status(); } if (!statusIdP) { logger.msg(Arc::ERROR, "Request failed: response from SPService is not as expected"); if (responseIdP) delete responseIdP; return MCC_Status(); } //Record the returned html content in case something is wrong std::string html_content; if(responseIdP->Content() != NULL) html_content = responseIdP->Content(); if (responseIdP) delete responseIdP; //The following code is for authentication (username/password) std::string resp_html; Arc::HTTPClientInfo redirect_info = infoIdP; if(redirect_info.code!= 200) { int count = 0; do { /* std::cout<<"Code: "< http_attributes; if (!(cookie.empty())) http_attributes.insert(std::pair("Cookie",cookie)); //Keep contacting IdP Arc::MCC_Status redirect_status = redirect_client.process("GET", http_attributes, &redirect_request, &redirect_info, &redirect_response); if (!(redirect_info.cookies.empty())) cookie = *(redirect_info.cookies.begin()); if (!redirect_response) { logger.msg(Arc::ERROR, "Request failed: No response from IdP when doing redirecting"); return MCC_Status(); } if (!redirect_status) { logger.msg(Arc::ERROR, "Request failed: response from IdP is not as expected when doing redirecting"); if (redirect_response) delete redirect_response; return MCC_Status(); } char *content = redirect_response->Content(); if (content != NULL) { resp_html.assign(redirect_response->Content()); size_t pos = resp_html.find("j_username"); if (pos != std::string::npos) { if (redirect_response) delete redirect_response; break; //"break" if the "j_username" is found in response, //here for different implentation of IdP, different //name could be searched. } } if (redirect_response) delete redirect_response; count++; if (count > 5) break; //At most loop 5 times } while (1); //Arc::URL redirect_url_final("https://idp.testshib.org:443/idp/Authn/UserPassword"); Arc::URL redirect_url_final(infoIdP.location); Arc::ClientHTTP redirect_client_final(cfg, redirect_url_final); Arc::PayloadRaw redirect_request_final; //std::string login_html("j_username=myself&j_password=myself"); std::multimap http_attributes2; std::string login_html; login_html.append("j_username=").append(username).append("&j_password=").append(password); redirect_request_final.Insert(login_html.c_str(), 0, login_html.size()); http_attributes2.insert(std::pair("Content-Type","application/x-www-form-urlencoded")); if(!(cookie.empty())) http_attributes2.insert(std::pair("Cookie",cookie)); Arc::PayloadRawInterface *redirect_response_final = NULL; Arc::HTTPClientInfo redirect_info_final; //Contact IdP to send the username/password Arc::MCC_Status redirect_status_final = redirect_client_final.process("POST", http_attributes2, &redirect_request_final, &redirect_info_final, &redirect_response_final); if (!(redirect_info_final.cookies.empty())) cookie = *(redirect_info_final.cookies.begin()); if (!redirect_response_final) { logger.msg(Arc::ERROR, "Request failed: No response from IdP when doing authentication"); return MCC_Status(); } if (!redirect_status_final) { logger.msg(Arc::ERROR, "Request failed: response from IdP is not as expected when doing authentication"); if (redirect_response_final) delete redirect_response_final; return MCC_Status(); } std::string html_body; for (int i = 0;; i++) { char *buf = redirect_response_final->Buffer(i); if (buf == NULL) break; html_body.append(redirect_response_final->Buffer(i), redirect_response_final->BufferSize(i)); //std::cout<<"Buffer: "<"); else logger.msg(Arc::ERROR, "Failed to verify the signature under "); //Send the encrypted saml assertion to service side through this main message chain //Get the encrypted saml assertion in this saml response Arc::XMLNode assertion_nd = samlresp_nd["EncryptedAssertion"]; std::string saml_assertion; assertion_nd.GetXML(saml_assertion); //std::cout<<"Encrypted saml assertion: "<process("POST", "/saml2sp", &requestSP, &infoSP, &responseSP); if (!responseSP) { logger.msg(Arc::ERROR, "Request failed: No response from SP Service when sending SAML assertion to SP"); return MCC_Status(); } if (!statusSP) { logger.msg(Arc::ERROR, "Request failed: response from SP Service is not as expected when sending SAML assertion to SP"); if (responseSP) delete responseSP; return MCC_Status(); } if (responseSP) delete responseSP; } else { // if "redirect_info.code == 200", then something could be wrong, // because "200" is not supposed to appear in this step logger.msg(Arc::ERROR,"IdP return some error message: %s", html_content.c_str()); return MCC_Status(); } return Arc::MCC_Status(Arc::STATUS_OK); } MCC_Status ClientHTTPwithSAML2SSO::process(const std::string& method, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response, const std::string& idp_name, const std::string& username, const std::string& password, const bool reuse_authn) { return (process(method, "", request, info, response, idp_name, username, password, reuse_authn)); } MCC_Status ClientHTTPwithSAML2SSO::process(const std::string& method, const std::string& path, PayloadRawInterface *request, HTTPClientInfo *info, PayloadRawInterface **response, const std::string& idp_name, const std::string& username, const std::string& password, const bool reuse_authn) { if (!authn_) { //If has not yet passed the saml2sso process //Do the saml2sso Arc::MCC_Status status = process_saml2sso(idp_name, username, password, http_client_, cert_file_, privkey_file_, ca_file_, ca_dir_, logger, cookie); if (!status) { logger.msg(Arc::ERROR, "SAML2SSO process failed"); return MCC_Status(); } if(reuse_authn) authn_ = true; //Reuse or not reuse the result from saml2sso } //Send the real message Arc::MCC_Status status = http_client_->process(method, path, request, info, response); return status; } ClientSOAPwithSAML2SSO::ClientSOAPwithSAML2SSO(const BaseConfig& cfg, const URL& url) : soap_client_(NULL), authn_(false) { soap_client_ = new ClientSOAP(cfg, url); //Use the credential and trusted certificates from client's main chain to //contact IdP cert_file_ = cfg.cert; privkey_file_ = cfg.key; ca_file_ = cfg.cafile; ca_dir_ = cfg.cadir; } ClientSOAPwithSAML2SSO::~ClientSOAPwithSAML2SSO() { if (soap_client_) delete soap_client_; } MCC_Status ClientSOAPwithSAML2SSO::process(PayloadSOAP *request, PayloadSOAP **response, const std::string& idp_name, const std::string& username, const std::string& password, const bool reuse_authn) { return process("", request, response, idp_name, username, password, reuse_authn); } MCC_Status ClientSOAPwithSAML2SSO::process(const std::string& action, PayloadSOAP *request, PayloadSOAP **response, const std::string& idp_name, const std::string& username, const std::string& password, const bool reuse_authn) { //Do the saml2sso if (!authn_) { //If has not yet passed the saml2sso process ClientHTTP *http_client = dynamic_cast(soap_client_); Arc::MCC_Status status = process_saml2sso(idp_name, username, password, http_client, cert_file_, privkey_file_, ca_file_, ca_dir_, logger, cookie); if (!status) { logger.msg(Arc::ERROR, "SAML2SSO process failed"); return MCC_Status(); } if(reuse_authn) authn_ = true; //Reuse or not reuse the result from saml2sso } //Send the real message Arc::MCC_Status status = soap_client_->process(action, request, response); return status; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/credential0000644000000000000000000000013213214316023021504 xustar000000000000000030 mtime=1513200659.145737038 30 atime=1513200668.720854145 30 ctime=1513200659.145737038 nordugrid-arc-5.4.2/src/hed/libs/credential/0000755000175000002070000000000013214316023021627 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/Proxycertinfo.cpp0000644000000000000000000000012413065017103025142 xustar000000000000000027 mtime=1490296387.698578 27 atime=1513200574.585702 30 ctime=1513200659.135736916 nordugrid-arc-5.4.2/src/hed/libs/credential/Proxycertinfo.cpp0000644000175000002070000003452313065017103025216 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include /* #include #include #include */ #include "Proxycertinfo.h" namespace ArcCredential { /* PROXYPOLICY function */ /* set policy language */ int PROXY_POLICY_set_policy_language(PROXY_POLICY * policy, ASN1_OBJECT * policy_language) { if(policy_language != NULL) { if(policy_language != policy->policyLanguage) { ASN1_OBJECT_free(policy->policyLanguage); policy->policyLanguage = OBJ_dup(policy_language); } return 1; } return 0; } /* get policy language */ ASN1_OBJECT * PROXY_POLICY_get_policy_language(PROXY_POLICY * policy) { return policy->policyLanguage; } /* set policy */ int PROXY_POLICY_set_policy(PROXY_POLICY * proxypolicy, unsigned char * policy, int length) { if(policy != NULL) { /* if member policy of proxypolicy non set */ if(!proxypolicy->policy) proxypolicy->policy = ASN1_OCTET_STRING_new(); /* set member policy of proxypolicy */ ASN1_OCTET_STRING_set(proxypolicy->policy, policy, length); } else if(proxypolicy->policy) { ASN1_OCTET_STRING_free(proxypolicy->policy); proxypolicy->policy = NULL; } return 1; } /* get policy */ unsigned char * PROXY_POLICY_get_policy(PROXY_POLICY * proxypolicy, int * length) { /* assure field policy is set */ if(proxypolicy->policy) { *length = proxypolicy->policy->length; /* assure ASN1_OCTET_STRING is full */ if (*length>0 && proxypolicy->policy->data) { unsigned char * copy = (unsigned char*) malloc(*length); if(copy) { memcpy(copy, proxypolicy->policy->data, *length); return copy; } } } /* else return NULL */ return NULL; } /* internal to der conversion */ /* int i2d_PROXYPOLICY(PROXYPOLICY * policy, unsigned char ** pp) { #if 0 int v1 = 0; M_ASN1_I2D_vars(policy); M_ASN1_I2D_len(policy->policy_language, i2d_ASN1_OBJECT); M_ASN1_I2D_len_EXP_opt(policy->policy, i2d_ASN1_OCTET_STRING, 0, v1); M_ASN1_I2D_seq_total(); M_ASN1_I2D_put(policy->policy_language, i2d_ASN1_OBJECT); M_ASN1_I2D_put_EXP_opt(policy->policy, i2d_ASN1_OCTET_STRING, 0, v1); M_ASN1_I2D_finish(); #endif M_ASN1_I2D_vars(policy); M_ASN1_I2D_len(policy->policy_language, i2d_ASN1_OBJECT); if(policy->policy) { M_ASN1_I2D_len(policy->policy, i2d_ASN1_OCTET_STRING); } M_ASN1_I2D_seq_total(); M_ASN1_I2D_put(policy->policy_language, i2d_ASN1_OBJECT); if(policy->policy) { M_ASN1_I2D_put(policy->policy, i2d_ASN1_OCTET_STRING); } M_ASN1_I2D_finish(); return 0; } PROXYPOLICY * d2i_PROXYPOLICY(PROXYPOLICY ** a, unsigned char ** pp, long length) { M_ASN1_D2I_vars(a, PROXYPOLICY *, PROXYPOLICY_new); M_ASN1_D2I_Init(); M_ASN1_D2I_start_sequence(); M_ASN1_D2I_get(ret->policy_language, d2i_ASN1_OBJECT); * need to try getting the policy using * a) a call expecting no tags * b) a call expecting tags * one of which should succeed * M_ASN1_D2I_get_opt(ret->policy, d2i_ASN1_OCTET_STRING, V_ASN1_OCTET_STRING); M_ASN1_D2I_get_IMP_opt(ret->policy, d2i_ASN1_OCTET_STRING, 0, V_ASN1_OCTET_STRING); M_ASN1_D2I_Finish(a, PROXYPOLICY_free, ASN1_F_D2I_PROXYPOLICY); } STACK_OF(CONF_VALUE) * i2v_PROXYPOLICY( struct v3_ext_method* * method *, PROXYPOLICY* ext, STACK_OF(CONF_VALUE)* extlist) { char* policy = NULL; char policy_lang[128]; char* tmp_string = NULL; char* index = NULL; int nid; int policy_length; X509V3_add_value("Proxy Policy:", NULL, &extlist); nid = OBJ_obj2nid(PROXYPOLICY_get_policy_language(ext)); if(nid != NID_undef) { BIO_snprintf(policy_lang, 128, " %s", OBJ_nid2ln(nid)); } else { policy_lang[0] = ' '; i2t_ASN1_OBJECT(&policy_lang[1], 127, PROXYPOLICY_get_policy_language(ext)); } X509V3_add_value(" Policy Language", policy_lang, &extlist); policy = (char *) PROXYPOLICY_get_policy(ext, &policy_length); if(!policy) { X509V3_add_value(" Policy", " EMPTY", &extlist); } else { X509V3_add_value(" Policy:", NULL, &extlist); tmp_string = policy; while(1) { index = strchr(tmp_string, '\n'); if(!index) { int length; unsigned char* last_string; length = (policy_length - (tmp_string - policy)) + 9; last_string = (unsigned char*) malloc(length); BIO_snprintf((char*)last_string, length, "%8s%s", "", tmp_string); X509V3_add_value(NULL, (const char*)last_string, &extlist); free(last_string); break; } *index = '\0'; X509V3_add_value(NULL, tmp_string, &extlist); tmp_string = index + 1; } free(policy); } return extlist; } X509V3_EXT_METHOD * PROXYPOLICY_x509v3_ext_meth() { static X509V3_EXT_METHOD proxypolicy_x509v3_ext_meth = { -1, X509V3_EXT_MULTILINE, NULL, (X509V3_EXT_NEW) PROXYPOLICY_new, (X509V3_EXT_FREE) PROXYPOLICY_free, (X509V3_EXT_D2I) d2i_PROXYPOLICY, (X509V3_EXT_I2D) i2d_PROXYPOLICY, NULL, NULL, (X509V3_EXT_I2V) i2v_PROXYPOLICY, NULL, NULL, NULL, NULL }; return (&proxypolicy_x509v3_ext_meth); } */ /** PROXY_CERT_INFO_EXTENSION function */ /* PROXY_CERT_INFO_EXTENSION * PROXY_CERT_INFO_EXTENSION_new() { PROXY_CERT_INFO_EXTENSION* ret = (PROXY_CERT_INFO_EXTENSION*)OPENSSL_malloc(sizeof(PROXY_CERT_INFO_EXTENSION)); if(ret != NULL) { memset(ret, 0, sizeof(PROXY_CERT_INFO_EXTENSION)); ret->path_length = NULL; ret->proxypolicy = PROXYPOLICY_new(); } else { ASN1err(ASN1_F_PROXY_CERT_INFO_EXTENSION_NEW, ERR_R_MALLOC_FAILURE); } return (ret); } void PROXY_CERT_INFO_EXTENSION_free(PROXY_CERT_INFO_EXTENSION * proxycertinfo) { if(proxycertinfo == NULL) return; ASN1_INTEGER_free(proxycertinfo->path_length); PROXYPOLICY_free(proxycertinfo->proxypolicy); OPENSSL_free(proxycertinfo); } PROXY_CERT_INFO_EXTENSION * PROXY_CERT_INFO_EXTENSION_dup(PROXY_CERT_INFO_EXTENSION * proxycertinfo) { PROXY_CERT_INFO_EXTENSION * new_proxycertinfo = NULL; if(proxycertinfo == NULL) return NULL; new_proxycertinfo = PROXY_CERT_INFO_EXTENSION_new(); if(new_proxycertinfo == NULL) return NULL; if(proxycertinfo->path_length) { new_proxycertinfo->path_length = ASN1_INTEGER_dup(proxycertinfo->path_length); } new_proxycertinfo->version = proxycertinfo->version; PROXY_CERT_INFO_EXTENSION_set_proxypolicy(new_proxycertinfo,proxycertinfo->proxypolicy); return new_proxycertinfo; } int PROXY_CERT_INFO_EXTENSION_print(BIO* bp, PROXY_CERT_INFO_EXTENSION* cert_info) { STACK_OF(CONF_VALUE)* values = NULL; values = i2v_PROXY_CERT_INFO_EXTENSION(PROXY_CERT_INFO_EXTENSION_v4_x509v3_ext_meth(), cert_info, NULL); X509V3_EXT_val_prn(bp, values, 0, 1); sk_CONF_VALUE_pop_free(values, X509V3_conf_free); return 1; } int PROXY_CERT_INFO_EXTENSION_print_fp(FILE* fp, PROXY_CERT_INFO_EXTENSION* cert_info) { int ret; BIO* bp; bp = BIO_new(BIO_s_file()); BIO_set_fp(bp, fp, BIO_NOCLOSE); ret = PROXY_CERT_INFO_EXTENSION_print(bp, cert_info); BIO_free(bp); return (ret); } */ /* set path_length */ int PROXY_CERT_INFO_EXTENSION_set_path_length(PROXY_CERT_INFO_EXTENSION * proxycertinfo, long path_length) { /* assure proxycertinfo is not empty */ if(proxycertinfo != NULL) { if(path_length != -1) { /* if member pcPathLengthConstraint is empty allocate memory the set */ if(proxycertinfo->pcPathLengthConstraint == NULL) proxycertinfo->pcPathLengthConstraint = ASN1_INTEGER_new(); return ASN1_INTEGER_set(proxycertinfo->pcPathLengthConstraint, path_length); } else if(proxycertinfo->pcPathLengthConstraint != NULL) { ASN1_INTEGER_free(proxycertinfo->pcPathLengthConstraint); proxycertinfo->pcPathLengthConstraint = NULL; } return 1; } return 0; } /* int PROXY_CERT_INFO_EXTENSION_set_version(PROXY_CERT_INFO_EXTENSION * proxycertinfo, int version) { if (proxycertinfo != NULL) { proxycertinfo->version = version; return 1; } return 0; } int PROXY_CERT_INFO_EXTENSION_get_version(PROXY_CERT_INFO_EXTENSION * proxycertinfo) { if (proxycertinfo) return proxycertinfo->version; return -1; } */ /* get path length */ long PROXY_CERT_INFO_EXTENSION_get_path_length(PROXY_CERT_INFO_EXTENSION * proxycertinfo) { if(proxycertinfo && proxycertinfo->pcPathLengthConstraint) return ASN1_INTEGER_get(proxycertinfo->pcPathLengthConstraint); else return -1; } /* * set policy * int PROXY_CERT_INFO_EXTENSION_set_proxypolicy(PROXY_CERT_INFO_EXTENSION * proxycertinfo, PROXYPOLICY * proxypolicy) { if(proxypolicy != proxycertinfo->proxypolicy) { PROXYPOLICY_free(proxycertinfo->proxypolicy); if(proxypolicy != NULL) proxycertinfo->proxypolicy = PROXYPOLICY_dup(proxypolicy); else proxycertinfo->proxypolicy = NULL; } return 1; } */ /* get policy */ PROXY_POLICY * PROXY_CERT_INFO_EXTENSION_get_proxypolicy(PROXY_CERT_INFO_EXTENSION * proxycertinfo) { if(proxycertinfo) return proxycertinfo->proxyPolicy; return NULL; } /* * internal to der conversion * int i2d_PROXY_CERT_INFO_EXTENSION_v3(PROXY_CERT_INFO_EXTENSION * proxycertinfo, unsigned char ** pp) { int v1; M_ASN1_I2D_vars(proxycertinfo); v1 = 0; M_ASN1_I2D_len(proxycertinfo->proxypolicy, i2d_PROXYPOLICY); M_ASN1_I2D_len_EXP_opt(proxycertinfo->path_length,i2d_ASN1_INTEGER, 1, v1); M_ASN1_I2D_seq_total(); M_ASN1_I2D_put(proxycertinfo->proxypolicy, i2d_PROXYPOLICY); M_ASN1_I2D_put_EXP_opt(proxycertinfo->path_length, i2d_ASN1_INTEGER, 1, v1); M_ASN1_I2D_finish(); return 0; } int i2d_PROXY_CERT_INFO_EXTENSION_v4(PROXY_CERT_INFO_EXTENSION * proxycertinfo, unsigned char ** pp) { M_ASN1_I2D_vars(proxycertinfo); if(proxycertinfo->path_length) { M_ASN1_I2D_len(proxycertinfo->path_length, i2d_ASN1_INTEGER); } M_ASN1_I2D_len(proxycertinfo->proxypolicy, i2d_PROXYPOLICY); M_ASN1_I2D_seq_total(); if(proxycertinfo->path_length) { M_ASN1_I2D_put(proxycertinfo->path_length, i2d_ASN1_INTEGER); } M_ASN1_I2D_put(proxycertinfo->proxypolicy, i2d_PROXYPOLICY); M_ASN1_I2D_finish(); return 0; } int i2d_PROXY_CERT_INFO_EXTENSION(PROXY_CERT_INFO_EXTENSION * proxycertinfo, unsigned char ** pp) { switch(proxycertinfo->version) { case 3: return i2d_PROXY_CERT_INFO_EXTENSION_v3(proxycertinfo, pp); break; case 4: return i2d_PROXY_CERT_INFO_EXTENSION_v4(proxycertinfo, pp); break; default: return -1; break; } return 0; } PROXY_CERT_INFO_EXTENSION * d2i_PROXY_CERT_INFO_EXTENSION_v3(PROXY_CERT_INFO_EXTENSION ** cert_info, unsigned char ** pp, long length) { M_ASN1_D2I_vars(cert_info, PROXY_CERT_INFO_EXTENSION *, PROXY_CERT_INFO_EXTENSION_new); M_ASN1_D2I_Init(); M_ASN1_D2I_start_sequence(); //M_ASN1_D2I_get(ret->proxypolicy, (unsigned char**)d2i_PROXYPOLICY); c.q=c.p; if (d2i_PROXYPOLICY(&(ret->proxypolicy),(unsigned char**)&c.p,c.slen) == NULL) {c.line=__LINE__; goto err; } c.slen-=(c.p-c.q); M_ASN1_D2I_get_EXP_opt(ret->path_length, d2i_ASN1_INTEGER, 1); ret->version = 3; M_ASN1_D2I_Finish(cert_info, PROXY_CERT_INFO_EXTENSION_free, ASN1_F_D2I_PROXY_CERT_INFO_EXTENSION); } PROXY_CERT_INFO_EXTENSION * d2i_PROXY_CERT_INFO_EXTENSION_v4(PROXY_CERT_INFO_EXTENSION ** cert_info, unsigned char ** pp, long length) { DECLARE_ASN1_FUNCTIONS M_ASN1_D2I_vars(cert_info, PROXY_CERT_INFO_EXTENSION *, PROXY_CERT_INFO_EXTENSION_new); M_ASN1_D2I_Init(); M_ASN1_D2I_start_sequence(); M_ASN1_D2I_get_EXP_opt(ret->path_length, d2i_ASN1_INTEGER, 1); M_ASN1_D2I_get_opt(ret->path_length, d2i_ASN1_INTEGER, V_ASN1_INTEGER); //M_ASN1_D2I_get(ret->proxypolicy, (unsigned char**)d2i_PROXYPOLICY); c.q=c.p; if (d2i_PROXYPOLICY(&(ret->proxypolicy),(unsigned char**)&c.p,c.slen) == NULL) {c.line=__LINE__; goto err; } c.slen-=(c.p-c.q); ret->version = 4; M_ASN1_D2I_Finish(cert_info, PROXY_CERT_INFO_EXTENSION_free, ASN1_F_D2I_PROXY_CERT_INFO_EXTENSION); } PROXY_CERT_INFO_EXTENSION * d2i_PROXY_CERT_INFO_EXTENSION(PROXY_CERT_INFO_EXTENSION ** cert_info, unsigned char ** pp, long length) { PROXY_CERT_INFO_EXTENSION *info = d2i_PROXY_CERT_INFO_EXTENSION_v3(cert_info, pp, length); if (!info) info = d2i_PROXY_CERT_INFO_EXTENSION_v4(cert_info, pp, length); return info; } STACK_OF(CONF_VALUE) * i2v_PROXY_CERT_INFO_EXTENSION(struct v3_ext_method* * method *, PROXY_CERT_INFO_EXTENSION* ext, STACK_OF(CONF_VALUE)* extlist) { int len = 128; char tmp_string[128]; if(!ext) { extlist = NULL; return extlist; } if(extlist == NULL) { extlist = sk_CONF_VALUE_new_null(); if(extlist == NULL) { return NULL; } } if(PROXY_CERT_INFO_EXTENSION_get_path_length(ext) > -1) { memset(tmp_string, 0, len); BIO_snprintf(tmp_string, len, " %lu (0x%lx)", PROXY_CERT_INFO_EXTENSION_get_path_length(ext), PROXY_CERT_INFO_EXTENSION_get_path_length(ext)); X509V3_add_value("Path Length", tmp_string, &extlist); } if(PROXY_CERT_INFO_EXTENSION_get_proxypolicy(ext)) { i2v_PROXYPOLICY(PROXYPOLICY_x509v3_ext_meth(), PROXY_CERT_INFO_EXTENSION_get_proxypolicy(ext), extlist); } return extlist; } X509V3_EXT_METHOD * PROXY_CERT_INFO_EXTENSION_v4_x509v3_ext_meth() { static X509V3_EXT_METHOD proxycertinfo_v4_x509v3_ext_meth = { -1, X509V3_EXT_MULTILINE, NULL, (X509V3_EXT_NEW) PROXY_CERT_INFO_EXTENSION_new, (X509V3_EXT_FREE) PROXY_CERT_INFO_EXTENSION_free, (X509V3_EXT_D2I) d2i_PROXY_CERT_INFO_EXTENSION_v4, (X509V3_EXT_I2D) i2d_PROXY_CERT_INFO_EXTENSION_v4, NULL, NULL, (X509V3_EXT_I2V) i2v_PROXY_CERT_INFO_EXTENSION, NULL, NULL, //(X509V3_EXT_I2R) i2r_PROXY_CERT_INFO_EXTENSION, NULL, //(X509V3_EXT_R2I) r2i_PROXY_CERT_INFO_EXTENSION, NULL }; return (&proxycertinfo_v4_x509v3_ext_meth); } X509V3_EXT_METHOD * PROXY_CERT_INFO_EXTENSION_v3_x509v3_ext_meth() { static X509V3_EXT_METHOD proxycertinfo_v3_x509v3_ext_meth = { -1, X509V3_EXT_MULTILINE, NULL, (X509V3_EXT_NEW) PROXY_CERT_INFO_EXTENSION_new, (X509V3_EXT_FREE) PROXY_CERT_INFO_EXTENSION_free, (X509V3_EXT_D2I) d2i_PROXY_CERT_INFO_EXTENSION_v3, (X509V3_EXT_I2D) i2d_PROXY_CERT_INFO_EXTENSION_v3, NULL, NULL, (X509V3_EXT_I2V) i2v_PROXY_CERT_INFO_EXTENSION, NULL, NULL, //(X509V3_EXT_I2R) i2r_PROXY_CERT_INFO_EXTENSION, NULL, //(X509V3_EXT_R2I) r2i_PROXY_CERT_INFO_EXTENSION, NULL }; return (&proxycertinfo_v3_x509v3_ext_meth); } */ } //namespace ArcCredential nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713065017103023622 xustar000000000000000027 mtime=1490296387.698578 30 atime=1513200596.826974855 30 ctime=1513200659.133736891 nordugrid-arc-5.4.2/src/hed/libs/credential/Makefile.am0000644000175000002070000000561613065017103023674 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarccredential.la #noinst_PROGRAMS = testproxy testcertinfo testproxy2proxy testvoms testeec #if WIN32 #VOMS_HEADER = #VOMS_SOURCE = #else VOMS_HEADER = VOMSUtil.h VOMSConfig.h VOMS_SOURCE = VOMSUtil.cpp VOMSConfig.cpp #endif if NSS_ENABLED NSS_HEADER = NSSUtil.h nssprivkeyinfocodec.h NSS_SOURCE = NSSUtil.cpp nssprivkeyinfocodec.cpp else NSS_HEADER = NSS_SOURCE = endif libarccredential_ladir = $(pkgincludedir)/credential libarccredential_la_HEADERS = Credential.h CertUtil.h Proxycertinfo.h PasswordSource.h \ VOMSAttribute.h $(VOMS_HEADER) $(NSS_HEADER) libarccredential_la_SOURCES = Proxycertinfo.cpp CertUtil.cpp PasswordSource.cpp \ Credential.cpp listfunc.cpp listfunc.h \ VOMSAttribute.cpp $(VOMS_SOURCE) $(NSS_SOURCE) libarccredential_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(NSS_CFLAGS) $(AM_CXXFLAGS) libarccredential_la_CFLAGS = $(libarccredential_la_CXXFLAGS) libarccredential_la_LIBADD = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(OPENSSL_LIBS) $(NSS_LIBS) libarccredential_la_LDFLAGS = -version-info 3:0:0 testproxy_SOURCES = testproxy.cpp testproxy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testproxy_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) testcertinfo_SOURCES = testcertinfo.cpp testcertinfo_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testcertinfo_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) testproxy2proxy_SOURCES = testproxy2proxy.cpp testproxy2proxy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testproxy2proxy_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) testvoms_SOURCES = testvoms.cpp testvoms_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testvoms_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) testeec_SOURCES = testeec.cpp testeec_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testeec_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/Proxycertinfo.h0000644000000000000000000000012413065017103024607 xustar000000000000000027 mtime=1490296387.698578 27 atime=1513200574.619703 30 ctime=1513200659.127736818 nordugrid-arc-5.4.2/src/hed/libs/credential/Proxycertinfo.h0000644000175000002070000001357413065017103024666 0ustar00mockbuildmock00000000000000#ifndef ARC_PROXY_CERT_INFO_EXTENSION_H #define ARC_PROXY_CERT_INFO_EXTENSION_H //#include //#include #include //#include /// Internal code for low-level credential handling. namespace ArcCredential { /** Functions and constants for maintaining proxy certificates */ /** The code is derived from globus gsi, voms, and openssl-0.9.8e. The existing code for maintaining proxy certificates in OpenSSL only covers standard proxies and does not cover old Globus proxies, so here the Globus code is introduced. */ /* predefined policy language */ #define ANYLANGUAGE_PROXY_OBJ OBJ_id_ppl_anyLanguage #define ANYLANGUAGE_PROXY_NID NID_id_ppl_anyLanguage #define ANYLANGUAGE_PROXY_SN SN_id_ppl_anyLanguage #define ANYLANGUAGE_PROXY_LN LN_id_ppl_anyLanguage #define IMPERSONATION_PROXY_OBJ OBJ_id_ppl_inheritAll #define IMPERSONATION_PROXY_NID NID_id_ppl_inheritAll #define IMPERSONATION_PROXY_SN SN_id_ppl_inheritAll #define IMPERSONATION_PROXY_LN LN_id_ppl_inheritAll #define INDEPENDENT_PROXY_OBJ OBJ_Independent #define INDEPENDENT_PROXY_NID NID_Independent #define INDEPENDENT_PROXY_SN SN_Independent #define INDEPENDENT_PROXY_LN LN_Independent /* generic policy language */ /* #define GLOBUS_GSI_PROXY_GENERIC_POLICY_OID "1.3.6.1.4.1.3536.1.1.1.8" #define LIMITED_PROXY_OID "1.3.6.1.4.1.3536.1.1.1.9" #define LIMITED_PROXY_SN "LIMITED_PROXY" #define LIMITED_PROXY_LN "GSI limited proxy" */ /* error handling */ /* #define ASN1_F_PROXYPOLICY_NEW 450 #define ASN1_F_D2I_PROXYPOLICY 451 #define ASN1_F_PROXY_CERT_INFO_EXTENSION_NEW 430 #define ASN1_F_D2I_PROXY_CERT_INFO_EXTENSION 431 */ /* Error codes for the X509V3 functions. */ /* Function codes. */ /* #define X509V3_F_PROCESS_PCI_VALUE 150 #define X509V3_F_R2I_PCI 155 */ /* Reason Code */ /* #define X509V3_R_INVALID_PROXY_POLICY_SETTING 153 #define X509V3_R_NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED 154 #define X509V3_R_POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY 159 */ /* data structure */ /* typedef struct PROXYPOLICY_st { ASN1_INTEGER* dummy; ASN1_INTEGER* dummy2; ASN1_OBJECT * policy_language; ASN1_OCTET_STRING * policy; } PROXYPOLICY; ASN1_SEQUENCE(PROXYPOLICY) = { ASN1_EXP_OPT(PROXYPOLICY, dummy, ASN1_INTEGER, 1), ASN1_OPT(PROXYPOLICY, dummy2, ASN1_INTEGER), ASN1_EXP_OPT(PROXYPOLICY, policy_language, ASN1_OBJECT, 1), ASN1_EXP_OPT(PROXYPOLICY, policy, ASN1_OCTET_STRING, 1) } ASN1_SEQUENCE_END(PROXYPOLICY) DECLARE_ASN1_FUNCTIONS(PROXYPOLICY) IMPLEMENT_ASN1_FUNCTIONS(PROXYPOLICY) */ /* typedef struct PROXY_CERT_INFO_EXTENSION_st { ASN1_INTEGER * path_length; PROXYPOLICY * proxypolicy; int version; } PROXY_CERT_INFO_EXTENSION; /// \endcond */ /* PROXYPOLICY function */ /* allocating and free memory */ //PROXYPOLICY * PROXYPOLICY_new(); //void PROXYPOLICY_free(PROXYPOLICY * proxypolicy); /* duplicate */ //PROXYPOLICY * PROXYPOLICY_dup(PROXYPOLICY * policy); /* set policy language */ int PROXY_POLICY_set_policy_language(PROXY_POLICY * policy, ASN1_OBJECT * policy_language); /* Returns policy language object from policy */ ASN1_OBJECT * PROXY_POLICY_get_policy_language(PROXY_POLICY * policy); /* set policy contents */ int PROXY_POLICY_set_policy(PROXY_POLICY * proxypolicy, unsigned char * policy, int length); /* get policy contents */ unsigned char * PROXY_POLICY_get_policy(PROXY_POLICY * policy, int * length); /* internal to der conversion */ //int i2d_PROXYPOLICY(PROXYPOLICY * policy, unsigned char ** pp); /* der to internal conversion */ //PROXYPOLICY * d2i_PROXYPOLICY(PROXYPOLICY ** policy, unsigned char ** pp, long length); //X509V3_EXT_METHOD * PROXYPOLICY_x509v3_ext_meth(); //STACK_OF(CONF_VALUE) * i2v_PROXYPOLICY(struct v3_ext_method * method, PROXYPOLICY * ext, STACK_OF(CONF_VALUE) * extlist); /*PROXY_CERT_INFO_EXTENSION function */ /* allocating and free memory */ //PROXY_CERT_INFO_EXTENSION * PROXY_CERT_INFO_EXTENSION_new(); //void PROXY_CERT_INFO_EXTENSION_free(PROXY_CERT_INFO_EXTENSION * proxycertinfo); /* duplicate */ //PROXY_CERT_INFO_EXTENSION * PROXY_CERT_INFO_EXTENSION_dup(PROXY_CERT_INFO_EXTENSION * proxycertinfo); //int PROXY_CERT_INFO_EXTENSION_print_fp(FILE* fp, PROXY_CERT_INFO_EXTENSION* cert_info); /* set path_length */ int PROXY_CERT_INFO_EXTENSION_set_path_length(PROXY_CERT_INFO_EXTENSION * proxycertinfo, long path_length); /* get path length */ long PROXY_CERT_INFO_EXTENSION_get_path_length(PROXY_CERT_INFO_EXTENSION * proxycertinfo); /* set proxypolicy */ //int PROXY_CERT_INFO_EXTENSION_set_proxypolicy(PROXY_CERT_INFO_EXTENSION * proxycertinfo, PROXYPOLICY * proxypolicy); /* get proxypolicy */ PROXY_POLICY * PROXY_CERT_INFO_EXTENSION_get_proxypolicy(PROXY_CERT_INFO_EXTENSION * proxycertinfo); /* internal to der conversion */ //int i2d_PROXY_CERT_INFO_EXTENSION(PROXY_CERT_INFO_EXTENSION * proxycertinfo, unsigned char ** pp); /* der to internal conversion */ //PROXY_CERT_INFO_EXTENSION * d2i_PROXY_CERT_INFO_EXTENSION(PROXY_CERT_INFO_EXTENSION ** cert_info, unsigned char ** a, long length); //int PROXY_CERT_INFO_EXTENSION_set_version(PROXY_CERT_INFO_EXTENSION *cert_info, int version); //STACK_OF(CONF_VALUE) * i2v_PROXY_CERT_INFO_EXTENSION( // struct v3_ext_method * method, // PROXY_CERT_INFO_EXTENSION * ext, // STACK_OF(CONF_VALUE) * extlist); //int i2r_PROXY_CERT_INFO_EXTENSION(X509V3_EXT_METHOD *method, PROXY_CERT_INFO_EXTENSION *ext, BIO *out, int indent); //PROXY_CERT_INFO_EXTENSION *r2i_PROXY_CERT_INFO_EXTENSION(X509V3_EXT_METHOD *method, X509V3_CTX *ctx, char *value); //X509V3_EXT_METHOD * PROXY_CERT_INFO_EXTENSION_v3_x509v3_ext_meth(); //X509V3_EXT_METHOD * PROXY_CERT_INFO_EXTENSION_v4_x509v3_ext_meth(); } //namespace ArcCredential #endif nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/Credential.cpp0000644000000000000000000000012313153454775024362 xustar000000000000000027 mtime=1504598525.713781 27 atime=1513200574.615703 29 ctime=1513200659.13773694 nordugrid-arc-5.4.2/src/hed/libs/credential/Credential.cpp0000644000175000002070000030065713153454775024443 0ustar00mockbuildmock00000000000000/**Some of the following code is compliant to OpenSSL license*/ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "Credential.h" using namespace ArcCredential; namespace Arc { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static BN_GENCB* BN_GENCB_new(void) { BN_GENCB* bn = (BN_GENCB*)std::malloc(sizeof(BN_GENCB)); if(bn) std::memset(bn, 0, sizeof(BN_GENCB)); return bn; } static void BN_GENCB_free(BN_GENCB* bn) { if(bn) std::free(bn); } #define X509_getm_notAfter X509_get_notAfter #define X509_getm_notBefore X509_get_notBefore #define X509_set1_notAfter X509_set_notAfter #define X509_set1_notBefore X509_set_notBefore #endif #if (OPENSSL_VERSION_NUMBER < 0x10002000L) static int X509_get_signature_nid(const X509 *x) { return OBJ_obj2nid(x->sig_alg->algorithm); } static void X509_get0_signature(ASN1_BIT_STRING **psig, X509_ALGOR **palg, const X509 *x) { if (psig) *psig = x->signature; if (palg) *palg = x->sig_alg; } #endif #define DEFAULT_DIGEST ((EVP_MD*)EVP_sha1()) #define DEFAULT_KEYBITS (1024) CredentialError::CredentialError(const std::string& what) : std::runtime_error(what) { } Logger CredentialLogger(Logger::rootLogger, "Credential"); static int ssl_err_cb(const char *str, size_t, void *u) { Logger& logger = *((Logger*)u); logger.msg(DEBUG, "OpenSSL error string: %s", str); return 1; } #define PASS_MIN_LENGTH (0) typedef struct pw_cb_data { PasswordSource *password; } PW_CB_DATA; static int passwordcb(char *buf, int bufsiz, int verify, void *cb_tmp) { PW_CB_DATA *cb_data = (PW_CB_DATA *)cb_tmp; if (bufsiz <= 1) return 0; if (cb_data) { if (cb_data->password) { std::string password; if(cb_data->password->Get(password,PASS_MIN_LENGTH,bufsiz) != PasswordSource::PASSWORD) { // It was requested to have key encrypted and no password was provided return 0; } if(buf) strncpy(buf, password.c_str(), bufsiz); int len = password.length(); if(len > bufsiz) len = bufsiz; return len; } } // Password is needed but no source defined for password return 0; } void Credential::LogError(void) const { ERR_print_errors_cb(&ssl_err_cb, &CredentialLogger); } Time asn1_to_utctime(const ASN1_UTCTIME *s) { if(s == NULL) return Time(); std::string t_str; if(s->type == V_ASN1_UTCTIME) { t_str.append("20"); t_str.append((char*)(s->data)); } else {//V_ASN1_GENERALIZEDTIME t_str.append((char*)(s->data)); } return Time(t_str); } ASN1_UTCTIME* utc_to_asn1time(const Time& t) { // Using ASN1_UTCTIME instead of ASN1_GENERALIZEDTIME because of dCache std::string t_str = t.str(MDSTime); if(t_str.length() < 2) return NULL; // paranoic ASN1_UTCTIME* s = ASN1_UTCTIME_new(); if(!s) return NULL; if(ASN1_UTCTIME_set_string(s,(char*)(t_str.c_str()+2))) return s; ASN1_UTCTIME_free(s); return NULL; //ASN1_GENERALIZEDTIME* s = ASN1_GENERALIZEDTIME_new(); //if(!s) return NULL; //std::string t_str = t.str(MDSTime); //if(ASN1_GENERALIZEDTIME_set_string(s,(char*)t_str.c_str())) return s; //ASN1_GENERALIZEDTIME_free(s); //return NULL; } class AutoBIO { private: BIO* bio_; public: AutoBIO(BIO* bio):bio_(bio) { }; ~AutoBIO(void) { if(bio_) { BIO_set_close(bio_,BIO_CLOSE); BIO_free_all(bio_); } }; operator bool(void) { return (bio_ != NULL); }; operator BIO*(void) { return bio_; }; BIO& operator*(void) const { return *bio_; }; BIO* operator->(void) const { return bio_; }; }; //Get the life time of the credential static void getLifetime(STACK_OF(X509)* certchain, X509* cert, Time& start, Period &lifetime) { X509* tmp_cert = NULL; Time start_time(-1), end_time(-1); int n; ASN1_UTCTIME* atime = NULL; if(cert == NULL) { start = Time(); lifetime = Period(); return; } if(certchain) for (n = 0; n < sk_X509_num(certchain); n++) { tmp_cert = sk_X509_value(certchain, n); atime = X509_getm_notAfter(tmp_cert); Time e = asn1_to_utctime(atime); if (end_time == Time(-1) || e < end_time) { end_time = e; } atime = X509_getm_notBefore(tmp_cert); Time s = asn1_to_utctime(atime); if (start_time == Time(-1) || s > start_time) { start_time = s; } } atime = X509_getm_notAfter(cert); Time e = asn1_to_utctime(atime); if (end_time == Time(-1) || e < end_time) { end_time = e; } atime = X509_getm_notBefore(cert); Time s = asn1_to_utctime(atime); if (start_time == Time(-1) || s > start_time) { start_time = s; } start = start_time; lifetime = end_time - start_time; } //Parse the BIO for certificate and get the format of it Credformat Credential::getFormat_BIO(BIO* bio, const bool is_file) const { Credformat format = CRED_UNKNOWN; if(bio == NULL) return format; if(is_file) { char buf[1]; char firstbyte; int position; if((position = BIO_tell(bio))<0 || BIO_read(bio, buf, 1)<=0 || BIO_seek(bio, position)<0) { LogError(); CredentialLogger.msg(ERROR,"Can't get the first byte of input to determine its format"); return format; } firstbyte = buf[0]; // DER-encoded structure (including PKCS12) will start with ASCII 048. // Otherwise, it's PEM. if(firstbyte==48) { //DER-encoded, PKCS12 or DER? firstly parse it as PKCS12 ASN.1, //if can not parse it, then it is DER format PKCS12* pkcs12 = NULL; if((pkcs12 = d2i_PKCS12_bio(bio,NULL)) == NULL){ format=CRED_DER; } else { format = CRED_PKCS; PKCS12_free(pkcs12); } if( BIO_seek(bio, position) < 0 ) { LogError(); CredentialLogger.msg(ERROR,"Can't reset the input"); return format; } } else { format = CRED_PEM; } } else { unsigned char* bio_str; int len; len = BIO_get_mem_data(bio, (unsigned char *) &bio_str); char firstbyte; if(len>0) { firstbyte = bio_str[0]; if(firstbyte==48) { //DER-encoded, PKCS12 or DER? firstly parse it as PKCS12 ASN.1, //if can not parse it, then it is DER format AutoBIO pkcs12bio(BIO_new_mem_buf(bio_str,len)); PKCS12* pkcs12 = NULL; if((pkcs12 = d2i_PKCS12_bio(pkcs12bio,NULL)) == NULL){ format=CRED_DER; } else { format = CRED_PKCS; PKCS12_free(pkcs12); } } else { format = CRED_PEM; } } else { CredentialLogger.msg(ERROR,"Can't get the first byte of input BIO to get its format"); return format; } } return format; } //Parse the string for certificate and get the format of it Credformat Credential::getFormat_str(const std::string& source) const { Credformat format = CRED_UNKNOWN; AutoBIO bio(BIO_new_mem_buf((void*)(source.c_str()), source.length())); if(!bio){ CredentialLogger.msg(ERROR,"Can not read certificate/key string"); LogError(); } if(!bio) return format; unsigned char* bio_str; int len; len = BIO_get_mem_data(bio, (unsigned char *) &bio_str); char firstbyte; if(len>0) { firstbyte = bio_str[0]; if(firstbyte==48) { //DER-encoded, PKCS12 or DER? firstly parse it as PKCS12 ASN.1, //if can not parse it, then it is DER format PKCS12* pkcs12 = NULL; unsigned char* source_chr = (unsigned char*)(source.c_str()); if((pkcs12 = d2i_PKCS12(NULL, (const unsigned char**)&source_chr, source.length())) != NULL){ format=CRED_PKCS; PKCS12_free(pkcs12); } else { format = CRED_DER; } } else { format = CRED_PEM; } } else { CredentialLogger.msg(ERROR,"Can't get the first byte of input BIO to get its format"); return format; } return format; } std::string Credential::GetDN(void) const { X509_NAME *subject = NULL; if(!cert_) return ""; subject = X509_get_subject_name(cert_); std::string str; if(subject!=NULL) { char* buf = X509_NAME_oneline(subject,NULL,0); if(buf) { str.append(buf); OPENSSL_free(buf); } } return str; } std::string Credential::GetIdentityName(void) const { // TODO: it is more correct to go through chain till first non-proxy cert X509_NAME *subject = NULL; X509_NAME_ENTRY *ne = NULL; if(!cert_) return ""; subject = X509_NAME_dup(X509_get_subject_name(cert_)); ASN1_STRING* entry; std::string entry_str; for(;;) { ne = X509_NAME_get_entry(subject, X509_NAME_entry_count(subject)-1); if (!OBJ_cmp(X509_NAME_ENTRY_get_object(ne),OBJ_nid2obj(NID_commonName))) { entry = X509_NAME_ENTRY_get_data(ne); entry_str.assign((const char*)(entry->data), (std::size_t)(entry->length)); if(entry_str == "proxy" || entry_str == "limited proxy" || entry_str.find_first_not_of("0123456789") == std::string::npos) { //Drop the name entry "proxy", "limited proxy", or the random digital(RFC) ne = X509_NAME_delete_entry(subject, X509_NAME_entry_count(subject)-1); X509_NAME_ENTRY_free(ne); ne = NULL; } else break; } else break; } std::string str; if(subject!=NULL) { char* buf = X509_NAME_oneline(subject,NULL,0); if(buf) { str.append(buf); OPENSSL_free(buf); } X509_NAME_free(subject); } return str; } certType Credential::GetType(void) const { return cert_type_; } std::string Credential::GetIssuerName(void) const { X509_NAME *issuer = NULL; if(!cert_) return ""; issuer = X509_get_issuer_name(cert_); std::string str; if(issuer!=NULL) { char* buf = X509_NAME_oneline(issuer,NULL,0); if(buf) { str.append(buf); OPENSSL_free(buf); } } return str; } std::string Credential::GetCAName(void) const { X509 *cacert = NULL; X509_NAME *caname = NULL; if(!cert_chain_) return ""; int num = sk_X509_num(cert_chain_); std::string str; if(num > 0) { // This works even if last cert on chain is CA // itself because CA is self-signed. cacert = sk_X509_value(cert_chain_, num-1); caname = X509_get_issuer_name(cacert); if(caname!=NULL) { char* buf = X509_NAME_oneline(caname,NULL,0); if(buf) { str.append(buf); OPENSSL_free(buf); } } } return str; } std::string Credential::GetProxyPolicy(void) const { return verification_proxy_policy; } Period Credential::GetLifeTime(void) const { return lifetime_; } Time Credential::GetStartTime() const { return start_; } Time Credential::GetEndTime() const { return start_+lifetime_; } Signalgorithm Credential::GetSigningAlgorithm(void) const { Signalgorithm signing_algorithm = SIGN_DEFAULT; if(!cert_) return signing_algorithm; int sig_nid = X509_get_signature_nid(cert_); switch(sig_nid) { case NID_sha1WithRSAEncryption: signing_algorithm = SIGN_SHA1; break; case NID_sha224WithRSAEncryption: signing_algorithm = SIGN_SHA224; break; case NID_sha256WithRSAEncryption: signing_algorithm = SIGN_SHA256; break; case NID_sha384WithRSAEncryption: signing_algorithm = SIGN_SHA384; break; case NID_sha512WithRSAEncryption: signing_algorithm = SIGN_SHA512; break; } return signing_algorithm; } int Credential::GetKeybits(void) const { int keybits = 0; if(!cert_) return keybits; EVP_PKEY* pkey = X509_get_pubkey(cert_); if(!pkey) return keybits; keybits = EVP_PKEY_bits(pkey); return keybits; } void Credential::SetLifeTime(const Period& period) { lifetime_ = period; } void Credential::SetStartTime(const Time& start_time) { start_ = start_time; } void Credential::SetSigningAlgorithm(Signalgorithm signing_algorithm) { switch(signing_algorithm) { case SIGN_SHA1: signing_alg_ = ((EVP_MD*)EVP_sha1()); break; case SIGN_SHA224: signing_alg_ = ((EVP_MD*)EVP_sha224()); break; case SIGN_SHA256: signing_alg_ = ((EVP_MD*)EVP_sha256()); break; case SIGN_SHA384: signing_alg_ = ((EVP_MD*)EVP_sha384()); break; case SIGN_SHA512: signing_alg_ = ((EVP_MD*)EVP_sha512()); break; default: signing_alg_ = NULL; break; } } void Credential::SetKeybits(int keybits) { keybits_ = keybits; } bool Credential::IsCredentialsValid(const UserConfig& usercfg) { return Credential(!usercfg.ProxyPath().empty() ? usercfg.ProxyPath() : usercfg.CertificatePath(), !usercfg.ProxyPath().empty() ? "" : usercfg.KeyPath(), usercfg.CACertificatesDirectory(), usercfg.CACertificatePath()).IsValid(); } bool Credential::IsValid(void) { Time t; return verification_valid && (GetStartTime() <= t) && (t < GetEndTime()); } static BIO* OpenFileBIO(const std::string& file) { if(!Glib::file_test(file,Glib::FILE_TEST_IS_REGULAR)) return NULL; return BIO_new_file(file.c_str(), "r"); } void Credential::loadCertificateFile(const std::string& certfile, X509* &x509, STACK_OF(X509) **certchain) { BIO* b = OpenFileBIO(certfile); if(!b) { CredentialLogger.msg(ERROR,"Can not find certificate file: %s", certfile); throw CredentialError("Can not find certificate file"); } AutoBIO certbio(b); if(!certbio){ CredentialLogger.msg(ERROR,"Can not read certificate file: %s", certfile); LogError(); throw CredentialError("Can not read certificate file"); } std::string certstr; for(;;) { char s[256]; int l = BIO_read(certbio,s,sizeof(s)); if(l <= 0) break; certstr.append(s,l); } loadCertificateString(certstr,x509,certchain); } static bool matchCertificate(X509* tmp, X509* x509) { if((X509_cmp(tmp, x509) == 0) && // only hash is checked by X509_cmp (X509_issuer_and_serial_cmp(tmp, x509) == 0) && (X509_subject_name_cmp(tmp, x509) == 0)) return true; return false; } static bool matchCertificate(X509* tmp, STACK_OF(X509) *certchain) { int nn = 0; for(; nn < sk_X509_num(certchain) ; nn++) { X509* ccert = sk_X509_value(certchain, nn); if((X509_cmp(tmp, ccert) == 0) && // only hash is checked by X509_cmp (X509_issuer_and_serial_cmp(tmp, ccert) == 0) && (X509_subject_name_cmp(tmp, ccert) == 0)) break; } if(nn < sk_X509_num(certchain)) return true; return false; } void Credential::loadCertificateString(const std::string& cert, X509* &x509, STACK_OF(X509) **certchain) { AutoBIO certbio(BIO_new_mem_buf((void*)(cert.c_str()), cert.length())); if(!certbio){ CredentialLogger.msg(ERROR,"Can not read certificate string"); LogError(); throw CredentialError("Can not read certificate string"); } //Parse the certificate Credformat format = CRED_UNKNOWN; if(!certbio) return; format = getFormat_str(cert); int n; if(*certchain) { sk_X509_pop_free(*certchain, X509_free); *certchain = NULL; } unsigned char* pkcs_chr; switch(format) { case CRED_PEM: CredentialLogger.msg(DEBUG,"Certificate format is PEM"); //Get the certificte, By default, certificate is without passphrase //Read certificate if(!(PEM_read_bio_X509(certbio, &x509, NULL, NULL))) { throw CredentialError("Can not read cert information from BIO"); } //Get the issuer chain *certchain = sk_X509_new_null(); n = 0; while(!BIO_eof(certbio)){ X509 * tmp = NULL; if(!(PEM_read_bio_X509(certbio, &tmp, NULL, NULL))){ ERR_clear_error(); break; } // Gross hack - fight users which concatenate their certificates in loop // Filter out certificates which are already present. if(matchCertificate(tmp, *certchain) || matchCertificate(tmp, x509)) continue; // duplicate - skip if(!sk_X509_insert(*certchain, tmp, n)) { //std::string str(X509_NAME_oneline(X509_get_subject_name(tmp),0,0)); X509_free(tmp); throw CredentialError("Can not insert cert into certificate's issuer chain"); } ++n; } break; case CRED_DER: CredentialLogger.msg(DEBUG,"Certificate format is DER"); x509 = d2i_X509_bio(certbio, NULL); if(!x509){ throw CredentialError("Unable to read DER credential from BIO"); } //Get the issuer chain *certchain = sk_X509_new_null(); n = 0; while(!BIO_eof(certbio)){ X509 * tmp = NULL; if(!(tmp = d2i_X509_bio(certbio, NULL))){ ERR_clear_error(); break; } // Gross hack - fight users which concatenate their certificates in loop // Filter out certificates which are already present. if(matchCertificate(tmp, *certchain) || matchCertificate(tmp, x509)) continue; // duplicate - skip if(!sk_X509_insert(*certchain, tmp, n)) { //std::string str(X509_NAME_oneline(X509_get_subject_name(tmp),0,0)); X509_free(tmp); throw CredentialError("Can not insert cert into certificate's issuer chain"); } ++n; } break; case CRED_PKCS: { PKCS12* pkcs12 = NULL; STACK_OF(X509)* pkcs12_certs = NULL; CredentialLogger.msg(DEBUG,"Certificate format is PKCS"); pkcs_chr = (unsigned char*)(cert.c_str()); pkcs12 = d2i_PKCS12(NULL, (const unsigned char**)&pkcs_chr, cert.length()); if(pkcs12){ char password[100]; EVP_read_pw_string(password, 100, "Enter Password for PKCS12 certificate:", 0); if(!PKCS12_parse(pkcs12, password, &pkey_, &x509, &pkcs12_certs)) { if(pkcs12) PKCS12_free(pkcs12); throw CredentialError("Can not parse PKCS12 file"); } } else{ throw CredentialError("Can not read PKCS12 credential from BIO"); } if (pkcs12_certs && sk_X509_num(pkcs12_certs)){ X509* tmp; for (n = 0; n < sk_X509_num(pkcs12_certs); n++) { tmp = X509_dup(sk_X509_value(pkcs12_certs, n)); sk_X509_insert(*certchain, tmp, n); } } if(pkcs12) { PKCS12_free(pkcs12); } if(pkcs12_certs) { sk_X509_pop_free(pkcs12_certs, X509_free); } } break; default: CredentialLogger.msg(DEBUG,"Certificate format is unknown"); break; } // end switch } void Credential::loadKeyFile(const std::string& keyfile, EVP_PKEY* &pkey, PasswordSource& passphrase) { BIO* b = OpenFileBIO(keyfile); if(!b) { CredentialLogger.msg(ERROR,"Can not find key file: %s", keyfile); throw CredentialError("Can not find key file"); } AutoBIO keybio(b); if(!keybio){ CredentialLogger.msg(ERROR,"Can not open key file %s", keyfile); LogError(); throw CredentialError("Can not open key file " + keyfile); } std::string keystr; for(;;) { char s[256]; int l = BIO_read(keybio,s,sizeof(s)); if(l <= 0) break; keystr.append(s,l); } loadKeyString(keystr,pkey,passphrase); } void Credential::loadKeyString(const std::string& key, EVP_PKEY* &pkey, PasswordSource& passphrase) { AutoBIO keybio(BIO_new_mem_buf((void*)(key.c_str()), key.length())); if(!keybio){ CredentialLogger.msg(ERROR,"Can not read key string"); LogError(); throw CredentialError("Can not read key string"); } //Read key Credformat format; if(!keybio) return; format = getFormat_str(key); unsigned char* key_chr; switch(format){ case CRED_PEM: { PW_CB_DATA cb_data; cb_data.password = &passphrase; if(!(pkey = PEM_read_bio_PrivateKey(keybio, NULL, passwordcb, &cb_data))) { int reason = ERR_GET_REASON(ERR_peek_error()); if(reason == PEM_R_BAD_BASE64_DECODE) throw CredentialError("Can not read PEM private key: probably bad password"); if(reason == PEM_R_BAD_DECRYPT) throw CredentialError("Can not read PEM private key: failed to decrypt"); if(reason == PEM_R_BAD_PASSWORD_READ) throw CredentialError("Can not read PEM private key: failed to obtain password"); if(reason == PEM_R_PROBLEMS_GETTING_PASSWORD) throw CredentialError("Can not read PEM private key: failed to obtain password"); throw CredentialError("Can not read PEM private key"); } } break; case CRED_DER: key_chr = (unsigned char*)(key.c_str()); pkey=d2i_PrivateKey(EVP_PKEY_RSA, NULL, (const unsigned char**)&key_chr, key.length()); break; default: break; } } static bool proxy_init_ = false; void Credential::InitProxyCertInfo(void) { static Glib::Mutex lock_; // At least in some versions of OpenSSL functions manupulating // global lists seems to be not thread-safe despite locks // installed (tested for 0.9.7). Hence it is safer to protect // such calls. // It is also good idea to protect proxy_init_ too. Glib::Mutex::Lock lock(lock_); if(proxy_init_) return; /* Proxy Certificate Extension's related objects */ // none // This library provides methods and objects which when registred in // global OpenSSL lists can't be unregistred anymore. Hence it must not // be allowed to unload. if(!PersistentLibraryInit("modcredential")) { CredentialLogger.msg(WARNING, "Failed to lock arccredential library in memory"); }; proxy_init_=true; } void Credential::AddCertExtObj(std::string& sn, std::string& oid) { OBJ_create(oid.c_str(), sn.c_str(), sn.c_str()); } void Credential::InitVerification(void) { } bool Credential::Verify(void) { verification_proxy_policy.clear(); if(verify_cert_chain(cert_, &cert_chain_, cacertfile_, cacertdir_, verification_proxy_policy)) { CredentialLogger.msg(VERBOSE, "Certificate verification succeeded"); verification_valid = true; return true; } else { CredentialLogger.msg(INFO, "Certificate verification failed"); LogError(); return false;} } Credential::Credential() : verification_valid(false), cert_(NULL), pkey_(NULL), cert_chain_(NULL), proxy_cert_info_(NULL), format(CRED_UNKNOWN), start_(Time()), lifetime_(Period("PT12H")), req_(NULL), rsa_key_(NULL), signing_alg_(NULL), keybits_(0), proxyver_(0), pathlength_(0), extensions_(NULL) { OpenSSLInit(); InitVerification(); extensions_ = sk_X509_EXTENSION_new_null(); if(!extensions_) { CredentialLogger.msg(ERROR, "Failed to initialize extensions member for Credential"); return; } //Initiate the proxy certificate constant and method which is required by openssl if(!proxy_init_) InitProxyCertInfo(); } Credential::Credential(const int keybits) : cert_(NULL), pkey_(NULL), cert_chain_(NULL), proxy_cert_info_(NULL), start_(Time()), lifetime_(Period("PT12H")), req_(NULL), rsa_key_(NULL), signing_alg_(NULL), keybits_(keybits), extensions_(NULL) { OpenSSLInit(); InitVerification(); extensions_ = sk_X509_EXTENSION_new_null(); if(!extensions_) { CredentialLogger.msg(ERROR, "Failed to initialize extensions member for Credential"); return; } //Initiate the proxy certificate constant and method which is required by openssl if(!proxy_init_) InitProxyCertInfo(); } Credential::Credential(Time start, Period lifetime, int keybits, std::string proxyversion, std::string policylang, std::string policy, int pathlength) : cert_(NULL), pkey_(NULL), cert_chain_(NULL), proxy_cert_info_(NULL), start_(start), lifetime_(lifetime), req_(NULL), rsa_key_(NULL), signing_alg_(NULL), keybits_(keybits), extensions_(NULL) { OpenSSLInit(); InitVerification(); extensions_ = sk_X509_EXTENSION_new_null(); if(!extensions_) { CredentialLogger.msg(ERROR, "Failed to initialize extensions member for Credential"); return; } //Initiate the proxy certificate constant and method which is required by openssl if(!proxy_init_) InitProxyCertInfo(); SetProxyPolicy(proxyversion, policylang, policy, pathlength); } void Credential::SetProxyPolicy(const std::string& proxyversion, const std::string& policylang, const std::string& policy, int pathlength) { proxyversion_ = proxyversion; policy_ = policy; pathlength_ = pathlength; if(proxy_cert_info_) { PROXY_CERT_INFO_EXTENSION_free(proxy_cert_info_); proxy_cert_info_ = NULL; } //Get certType if (proxyversion_.compare("RFC") == 0 || proxyversion_.compare("rfc") == 0) { //The "limited" and "restricted" are from the definition in //http://dev.globus.org/wiki/Security/ProxyCertTypes#RFC_3820_Proxy_Certificates if(policylang.compare("LIMITED") == 0 || policylang.compare("limited") == 0) { cert_type_ = CERT_TYPE_RFC_LIMITED_PROXY; } else if(policylang.compare("RESTRICTED") == 0 || policylang.compare("restricted") == 0) { cert_type_ = CERT_TYPE_RFC_RESTRICTED_PROXY; } else if(policylang.compare("INDEPENDENT") == 0 || policylang.compare("independent") == 0) { cert_type_ = CERT_TYPE_RFC_INDEPENDENT_PROXY; } else if(policylang.compare("IMPERSONATION") == 0 || policylang.compare("impersonation") == 0 || policylang.compare("INHERITALL") == 0 || policylang.compare("inheritAll") == 0){ cert_type_ = CERT_TYPE_RFC_IMPERSONATION_PROXY; //For RFC here, "impersonation" is the same as the "inheritAll" in openssl version>098 } else if(policylang.compare("ANYLANGUAGE") == 0 || policylang.compare("anylanguage") == 0) { cert_type_ = CERT_TYPE_RFC_ANYLANGUAGE_PROXY; //Defined in openssl version>098 } else { CredentialLogger.msg(ERROR,"Unsupported proxy policy language is requested - %s", policylang); proxyversion_.clear(); policy_.clear(); pathlength_ = -1; cert_type_ = CERT_TYPE_CA; return; } } else if (proxyversion_.compare("EEC") == 0 || proxyversion_.compare("eec") == 0) { cert_type_ = CERT_TYPE_EEC; } else { CredentialLogger.msg(ERROR,"Unsupported proxy version is requested - %s", proxyversion_); proxyversion_.clear(); policy_.clear(); pathlength_ = -1; cert_type_ = CERT_TYPE_CA; return; } if(cert_type_ != CERT_TYPE_EEC) { // useless check but keep for future extensions if(!policy_.empty() && policylang.empty()) { CredentialLogger.msg(ERROR,"If you specify a policy you also need to specify a policy language"); return; } proxy_cert_info_ = PROXY_CERT_INFO_EXTENSION_new(); PROXY_POLICY * ppolicy =PROXY_CERT_INFO_EXTENSION_get_proxypolicy(proxy_cert_info_); PROXY_POLICY_set_policy(ppolicy, NULL, 0); ASN1_OBJECT * policy_object = NULL; //set policy language, see definiton in: http://dev.globus.org/wiki/Security/ProxyCertTypes switch(cert_type_) { case CERT_TYPE_RFC_IMPERSONATION_PROXY: if((policy_object = OBJ_nid2obj(IMPERSONATION_PROXY_NID)) != NULL) { PROXY_POLICY_set_policy_language(ppolicy, policy_object); } break; case CERT_TYPE_RFC_INDEPENDENT_PROXY: if((policy_object = OBJ_nid2obj(INDEPENDENT_PROXY_NID)) != NULL) { PROXY_POLICY_set_policy_language(ppolicy, policy_object); } break; case CERT_TYPE_RFC_ANYLANGUAGE_PROXY: if((policy_object = OBJ_nid2obj(ANYLANGUAGE_PROXY_NID)) != NULL) { PROXY_POLICY_set_policy_language(ppolicy, policy_object); } break; default: break; } //set path length constraint if(pathlength >= 0) PROXY_CERT_INFO_EXTENSION_set_path_length(proxy_cert_info_, pathlength_); //set policy std::string policystring; if(!policy_.empty()) { if(Glib::file_test(policy_,Glib::FILE_TEST_EXISTS)) { //If the argument is a location which specifies a file that //includes the policy content if(Glib::file_test(policy_,Glib::FILE_TEST_IS_REGULAR)) { std::ifstream fp; fp.open(policy_.c_str()); if(!fp) { CredentialLogger.msg(ERROR,"Error: can't open policy file: %s", policy_.c_str()); if(proxy_cert_info_) { PROXY_CERT_INFO_EXTENSION_free(proxy_cert_info_); proxy_cert_info_ = NULL; } return; } fp.unsetf(std::ios::skipws); char c; while(fp.get(c)) policystring += c; } else { CredentialLogger.msg(ERROR,"Error: policy location: %s is not a regular file", policy_.c_str()); if(proxy_cert_info_) { PROXY_CERT_INFO_EXTENSION_free(proxy_cert_info_); proxy_cert_info_ = NULL; } return; } } else { //Otherwise the argument should include the policy content policystring = policy_; } } ppolicy = PROXY_CERT_INFO_EXTENSION_get_proxypolicy(proxy_cert_info_); //Here only consider the situation when there is policy specified if(policystring.size() > 0) { //PROXYPOLICY_set_policy_language(ppolicy, policy_object); PROXY_POLICY_set_policy(ppolicy, (unsigned char*)policystring.c_str(), policystring.size()); } } } Credential::Credential(const std::string& certfile, const std::string& keyfile, const std::string& cadir, const std::string& cafile, PasswordSource& passphrase4key, const bool is_file) { InitCredential(certfile,keyfile,cadir,cafile,passphrase4key,is_file); } Credential::Credential(const std::string& certfile, const std::string& keyfile, const std::string& cadir, const std::string& cafile, const std::string& passphrase4key, const bool is_file) { PasswordSource* pass = NULL; if(passphrase4key.empty()) { pass = new PasswordSourceInteractive("private key", false); } else if(passphrase4key[0] == '\0') { pass = new PasswordSourceString(""); } else { pass = new PasswordSourceString(passphrase4key); } InitCredential(certfile,keyfile,cadir,cafile,*pass,is_file); delete pass; } Credential::Credential(const UserConfig& usercfg, PasswordSource& passphrase4key) { if (usercfg.CredentialString().empty()) { InitCredential(!usercfg.ProxyPath().empty() ? usercfg.ProxyPath() : usercfg.CertificatePath(), !usercfg.ProxyPath().empty() ? "" : usercfg.KeyPath(), usercfg.CACertificatesDirectory(), usercfg.CACertificatePath(), passphrase4key, true); } else { InitCredential(usercfg.CredentialString(), "", usercfg.CACertificatesDirectory(), usercfg.CACertificatePath(), passphrase4key, false); } } Credential::Credential(const UserConfig& usercfg, const std::string& passphrase4key) { PasswordSource* pass = NULL; if(passphrase4key.empty()) { pass = new PasswordSourceInteractive("private key", false); } else if(passphrase4key[0] == '\0') { pass = new PasswordSourceString(""); } else { pass = new PasswordSourceString(passphrase4key); } if (usercfg.CredentialString().empty()) { InitCredential(!usercfg.ProxyPath().empty() ? usercfg.ProxyPath() : usercfg.CertificatePath(), !usercfg.ProxyPath().empty() ? "" : usercfg.KeyPath(), usercfg.CACertificatesDirectory(), usercfg.CACertificatePath(), *pass, true); } else { InitCredential(usercfg.CredentialString(), "", usercfg.CACertificatesDirectory(), usercfg.CACertificatePath(), *pass, false); } delete pass; } void Credential::InitCredential(const std::string& certfile, const std::string& keyfile, const std::string& cadir, const std::string& cafile, PasswordSource& passphrase4key, const bool is_file) { cacertfile_ = cafile; cacertdir_ = cadir; certfile_ = certfile; keyfile_ = keyfile; verification_valid = false; cert_ = NULL; pkey_ = NULL; cert_chain_ = NULL; proxy_cert_info_ = NULL; req_ = NULL; rsa_key_ = NULL; signing_alg_ = NULL; keybits_ = 0; proxyver_ = 0; pathlength_ = 0; extensions_ = NULL; OpenSSLInit(); InitVerification(); extensions_ = sk_X509_EXTENSION_new_null(); if(!extensions_) { CredentialLogger.msg(ERROR, "Failed to initialize extensions member for Credential"); return; } if(certfile.empty()) { CredentialLogger.msg(ERROR, "Certificate/Proxy path is empty"); return; } //Initiate the proxy certificate constant and method which is required by openssl if(!proxy_init_) InitProxyCertInfo(); try { if(is_file) { loadCertificateFile(certfile, cert_, &cert_chain_); if(cert_) check_cert_type(cert_,cert_type_); if(keyfile.empty()) { //Detect if the certificate file/string contains private key. //If the key file is absent, and the private key is not contained inside //certificate file/string, then the certificate file will not //be parsed for private key. //Note this detection only applies to PEM file std::string keystr; // Since the certfile file has been loaded in the call to // loadCertificateFile, it is redundant to check if it exist. // loadCertificateFile will throw an exception if the file does not // exist. std::ifstream in(certfile.c_str(), std::ios::in); std::getline(in, keystr, 0); in.close(); if(keystr.find("BEGIN RSA PRIVATE KEY") != std::string::npos) { loadKeyFile(certfile, pkey_, passphrase4key); } } else { loadKeyFile(keyfile, pkey_, passphrase4key); } } else { loadCertificateString(certfile, cert_, &cert_chain_); if(cert_) check_cert_type(cert_,cert_type_); if(keyfile.empty()) { std::string keystr; keystr = certfile; if(keystr.find("BEGIN RSA PRIVATE KEY") != std::string::npos) { loadKeyString(certfile, pkey_, passphrase4key); } } else { loadKeyString(keyfile, pkey_, passphrase4key); } } } catch(std::exception& err){ CredentialLogger.msg(ERROR, "%s", err.what()); LogError(); //return; } //Get the lifetime of the credential getLifetime(cert_chain_, cert_, start_, lifetime_); if(cert_) { X509_EXTENSION* ext = NULL; for (int i=0; idata = (unsigned char*) malloc(data.size()); memcpy(ext_oct->data, data.c_str(), data.size()); ext_oct->length = data.size(); if (!(ext = X509_EXTENSION_create_by_OBJ(NULL, ext_obj, crit, ext_oct))) { CredentialLogger.msg(ERROR, "Can not create extension for proxy certificate"); LogError(); if(ext_oct) ASN1_OCTET_STRING_free(ext_oct); if(ext_obj) ASN1_OBJECT_free(ext_obj); return NULL; } #ifndef WIN32 // TODO: ASN1_OCTET_STRING_free is not working correctly // on Windows Vista, bugreport: 1587 if(ext_oct) ASN1_OCTET_STRING_free(ext_oct); #endif if(ext_obj) ASN1_OBJECT_free(ext_obj); return ext; } X509_REQ* Credential::GetCertReq(void) const { return req_; } bool Credential::GenerateEECRequest(BIO* reqbio, BIO* /*keybio*/, const std::string& dn) { bool res = false; RSA* rsa_key = NULL; const EVP_MD *digest = signing_alg_?signing_alg_:DEFAULT_DIGEST; EVP_PKEY* pkey; int keybits = keybits_?keybits_:DEFAULT_KEYBITS; BN_GENCB* cb = BN_GENCB_new(); BIGNUM *prime = BN_new(); rsa_key = RSA_new(); BN_GENCB_set(cb,&keygen_cb,NULL); if(prime && rsa_key) { int val1 = BN_set_word(prime,RSA_F4); if(val1 != 1) { CredentialLogger.msg(ERROR, "BN_set_word failed"); LogError(); if(cb) BN_GENCB_free(cb); if(prime) BN_free(prime); if(rsa_key) RSA_free(rsa_key); return false; } int val2 = RSA_generate_key_ex(rsa_key, keybits, prime, cb); if(val2 != 1) { CredentialLogger.msg(ERROR, "RSA_generate_key_ex failed"); LogError(); if(cb) BN_GENCB_free(cb); if(prime) BN_free(prime); if(rsa_key) RSA_free(rsa_key); return false; } } else { CredentialLogger.msg(ERROR, "BN_new || RSA_new failed"); LogError(); if(cb) BN_GENCB_free(cb); if(prime) BN_free(prime); if(rsa_key) RSA_free(rsa_key); return false; } if(cb) BN_GENCB_free(cb); if(prime) BN_free(prime); X509_REQ *req = NULL; CredentialLogger.msg(VERBOSE, "Created RSA key, proceeding with request"); pkey = EVP_PKEY_new(); if (pkey) { if (rsa_key) { CredentialLogger.msg(VERBOSE, "pkey and rsa_key exist!"); if (EVP_PKEY_set1_RSA(pkey, rsa_key)) { req = X509_REQ_new(); CredentialLogger.msg(VERBOSE, "Generate new X509 request!"); if(req) { if (X509_REQ_set_version(req,3L)) { X509_NAME *name = NULL; name = parse_name((char*)(dn.c_str()), MBSTRING_ASC, 0); CredentialLogger.msg(VERBOSE, "Setting subject name!"); X509_REQ_set_subject_name(req, name); X509_NAME_free(name); if(X509_REQ_set_pubkey(req,pkey)) { if(X509_REQ_sign(req,pkey,digest)) { if(!(PEM_write_bio_X509_REQ(reqbio,req))){ CredentialLogger.msg(ERROR, "PEM_write_bio_X509_REQ failed"); LogError(); res = false; } else { rsa_key_ = rsa_key; rsa_key = NULL; pkey_ = pkey; pkey = NULL; req_ = req; res = true; } } } } } } } } if(rsa_key) RSA_free(rsa_key); req_ = req; return res; } bool Credential::GenerateEECRequest(std::string& req_content, std::string& key_content, const std::string& dn) { BIO *req_out = BIO_new(BIO_s_mem()); BIO *key_out = BIO_new(BIO_s_mem()); if(!req_out || !key_out) { CredentialLogger.msg(ERROR, "Can not create BIO for request"); LogError(); return false; } if(GenerateEECRequest(req_out, key_out,dn)) { int l = 0; char s[256]; while ((l = BIO_read(req_out,s,sizeof(s))) >= 0) { req_content.append(s,l); } l = 0; while ((l=BIO_read(key_out,s,sizeof(s))) >= 0) { key_content.append(s,l); } } else { CredentialLogger.msg(ERROR, "Failed to write request into string"); BIO_free_all(req_out); BIO_free_all(key_out); return false; } BIO_free_all(req_out); BIO_free_all(key_out); return true; } static int BIO_write_filename_User(BIO *b, const char* file) { return BIO_write_filename(b, (char*)file); } static int BIO_read_filename_User(BIO *b, const char* file) { return BIO_read_filename(b, (char*)file); } bool Credential::GenerateEECRequest(const char* req_filename, const char* key_filename, const std::string& dn) { BIO *req_out = BIO_new(BIO_s_file()); BIO *key_out = BIO_new(BIO_s_file()); if(!req_out || !key_out) { CredentialLogger.msg(ERROR, "Can not create BIO for request"); return false; } if (!(BIO_write_filename_User(req_out, req_filename))) { CredentialLogger.msg(ERROR, "Can not set writable file for request BIO"); BIO_free_all(req_out); return false; } if (!(BIO_write_filename_User(key_out, key_filename))) { CredentialLogger.msg(ERROR, "Can not set writable file for request BIO"); BIO_free_all(key_out); return false; } if(GenerateEECRequest(req_out,key_out, dn)) { CredentialLogger.msg(INFO, "Wrote request into a file"); } else { CredentialLogger.msg(ERROR, "Failed to write request into a file"); BIO_free_all(req_out); BIO_free_all(key_out); return false; } BIO_free_all(req_out); BIO_free_all(key_out); return true; } //TODO: self request/sign proxy bool Credential::GenerateRequest(BIO* reqbio, bool if_der){ bool res = false; RSA* rsa_key = NULL; int keybits = keybits_?keybits_:DEFAULT_KEYBITS; const EVP_MD *digest = signing_alg_?signing_alg_:DEFAULT_DIGEST; EVP_PKEY* pkey; if(pkey_) { CredentialLogger.msg(ERROR, "The credential's private key has already been initialized"); return false; }; //BN_GENCB cb; BIGNUM *prime = BN_new(); rsa_key = RSA_new(); //BN_GENCB_set(&cb,&keygen_cb,NULL); if(prime && rsa_key) { int val1 = BN_set_word(prime,RSA_F4); if(val1 != 1) { CredentialLogger.msg(ERROR, "BN_set_word failed"); LogError(); if(prime) BN_free(prime); if(rsa_key) RSA_free(rsa_key); return false; } //int val2 = RSA_generate_key_ex(rsa_key, keybits, prime, &cb); int val2 = RSA_generate_key_ex(rsa_key, keybits, prime, NULL); if(val2 != 1) { CredentialLogger.msg(ERROR, "RSA_generate_key_ex failed"); LogError(); if(prime) BN_free(prime); if(rsa_key) RSA_free(rsa_key); return false; } } else { CredentialLogger.msg(ERROR, "BN_new || RSA_new failed"); LogError(); if(prime) BN_free(prime); if(rsa_key) RSA_free(rsa_key); return false; } if(prime) BN_free(prime); X509_REQ *req = NULL; pkey = EVP_PKEY_new(); if(pkey){ if(rsa_key) { if(EVP_PKEY_set1_RSA(pkey, rsa_key)) { req = X509_REQ_new(); if(req) { if(X509_REQ_set_version(req,2L)) { //set the DN X509_NAME* name = NULL; X509_NAME_ENTRY* entry = NULL; if(cert_) { //self-sign, copy the X509_NAME if ((name = X509_NAME_dup(X509_get_subject_name(cert_))) == NULL) { CredentialLogger.msg(ERROR, "Can not duplicate the subject name for the self-signing proxy certificate request"); LogError(); res = false; if(pkey) EVP_PKEY_free(pkey); if(rsa_key) RSA_free(rsa_key); return res; } } else { name = X509_NAME_new();} if((entry = X509_NAME_ENTRY_create_by_NID(NULL, NID_commonName, V_ASN1_APP_CHOOSE, (unsigned char *) "NULL SUBJECT NAME ENTRY", -1)) == NULL) { CredentialLogger.msg(ERROR, "Can not create a new X509_NAME_ENTRY for the proxy certificate request"); LogError(); res = false; X509_NAME_free(name); if(pkey) EVP_PKEY_free(pkey); if(rsa_key) RSA_free(rsa_key); return res; } X509_NAME_add_entry(name, entry, X509_NAME_entry_count(name), 0); X509_REQ_set_subject_name(req,name); X509_NAME_free(name); name = NULL; if(entry) { X509_NAME_ENTRY_free(entry); entry = NULL; } if(cert_type_ != CERT_TYPE_EEC) { // set the default PROXY_CERT_INFO_EXTENSION extension X509_EXTENSION* ext = NULL; std::string data; int length = i2d_PROXY_CERT_INFO_EXTENSION(proxy_cert_info_, NULL); if(length < 0) { CredentialLogger.msg(ERROR, "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER encoded format"); LogError(); } else { data.resize(length); unsigned char* derdata = reinterpret_cast(const_cast(data.c_str())); length = i2d_PROXY_CERT_INFO_EXTENSION(proxy_cert_info_, &derdata); if(length < 0) { CredentialLogger.msg(ERROR, "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER encoded format"); LogError(); } else { std::string certinfo_sn = SN_proxyCertInfo; ext = CreateExtension(certinfo_sn, data, 1); } } if(ext) { STACK_OF(X509_EXTENSION)* extensions = sk_X509_EXTENSION_new_null(); if(extensions && sk_X509_EXTENSION_push(extensions, ext)) { X509_REQ_add_extensions(req, extensions); sk_X509_EXTENSION_pop_free(extensions, X509_EXTENSION_free); } else { X509_EXTENSION_free(ext); } } } if(X509_REQ_set_pubkey(req,pkey)) { if(X509_REQ_sign(req,pkey,digest) != 0) { if(if_der == false) { if(!(PEM_write_bio_X509_REQ(reqbio,req))){ CredentialLogger.msg(ERROR, "PEM_write_bio_X509_REQ failed"); LogError(); res = false; } else { rsa_key_ = rsa_key; rsa_key = NULL; pkey_ = pkey; pkey = NULL; res = true; } } else { if(!(i2d_X509_REQ_bio(reqbio,req))){ CredentialLogger.msg(ERROR, "Can't convert X509 request from internal to DER encoded format"); LogError(); res = false; } else { rsa_key_ = rsa_key; rsa_key = NULL; pkey_ = pkey; pkey = NULL; res = true; } } } } } //X509_REQ_free(req); } else { CredentialLogger.msg(ERROR, "Can not generate X509 request"); LogError(); res = false; } } else { CredentialLogger.msg(ERROR, "Can not set private key"); LogError(); res = false;} } } if(rsa_key) RSA_free(rsa_key); req_ = req; return res; } bool Credential::GenerateRequest(std::string &content, bool if_der) { BIO *out = BIO_new(BIO_s_mem()); if(!out) { CredentialLogger.msg(ERROR, "Can not create BIO for request"); LogError(); return false; } if(GenerateRequest(out,if_der)) { for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; content.append(s,l); } } BIO_free_all(out); return true; } bool Credential::GenerateRequest(const char* filename, bool if_der) { BIO *out = BIO_new(BIO_s_file()); if(!out) { CredentialLogger.msg(ERROR, "Can not create BIO for request"); LogError(); return false; } if (!(BIO_write_filename_User(out, filename))) { CredentialLogger.msg(ERROR, "Can not set writable file for request BIO"); LogError(); BIO_free_all(out); return false; } if(GenerateRequest(out,if_der)) { CredentialLogger.msg(INFO, "Wrote request into a file"); } else { CredentialLogger.msg(ERROR, "Failed to write request into a file"); BIO_free_all(out); return false; } BIO_free_all(out); return true; } bool Credential::OutputPrivatekey(std::string &content, bool encryption, const std::string& passphrase) { if(passphrase.empty()) { PasswordSourceInteractive pass("", true); return OutputPrivatekey(content, encryption, pass); } PasswordSourceString pass(passphrase); return OutputPrivatekey(content, encryption, pass); } bool Credential::OutputPrivatekey(std::string &content, bool encryption, PasswordSource& passphrase) { BIO *out = BIO_new(BIO_s_mem()); EVP_CIPHER *enc = NULL; if(!out) return false; if(rsa_key_ != NULL) { if(!encryption) { if(!PEM_write_bio_RSAPrivateKey(out,rsa_key_,enc,NULL,0,NULL,NULL)) { BIO_free_all(out); return false; } } else { enc = (EVP_CIPHER*)EVP_des_ede3_cbc(); PW_CB_DATA cb_data; cb_data.password = &passphrase; if(!PEM_write_bio_RSAPrivateKey(out,rsa_key_,enc,NULL,0, &passwordcb,&cb_data)) { BIO_free_all(out); return false; } } } else if(pkey_ != NULL) { if(!encryption) { if(!PEM_write_bio_PrivateKey(out,pkey_,enc,NULL,0,NULL,NULL)) { BIO_free_all(out); return false; } } else { enc = (EVP_CIPHER*)EVP_des_ede3_cbc(); PW_CB_DATA cb_data; cb_data.password = &passphrase; if(!PEM_write_bio_PrivateKey(out,pkey_,enc,NULL,0, &passwordcb,&cb_data)) { BIO_free_all(out); return false; } } } else { CredentialLogger.msg(ERROR, "Failed to get private key"); BIO_free_all(out); return false; } for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; content.append(s,l); } BIO_free_all(out); return true; } bool Credential::OutputPublickey(std::string &content) { BIO *out = BIO_new(BIO_s_mem()); if(!out) return false; if(rsa_key_ != NULL) { if(!PEM_write_bio_RSAPublicKey(out,rsa_key_)) { CredentialLogger.msg(ERROR, "Failed to get public key from RSA object"); BIO_free_all(out); return false; }; } else if(cert_ != NULL) { EVP_PKEY *pkey = NULL; pkey = X509_get_pubkey(cert_); if(pkey == NULL) { CredentialLogger.msg(ERROR, "Failed to get public key from X509 object"); BIO_free_all(out); return false; }; PEM_write_bio_PUBKEY(out, pkey); EVP_PKEY_free(pkey); } else { CredentialLogger.msg(ERROR, "Failed to get public key"); BIO_free_all(out); return false; } for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; content.append(s,l); } BIO_free_all(out); return true; } bool Credential::OutputCertificate(std::string &content, bool if_der) { if(!cert_) return false; BIO *out = BIO_new(BIO_s_mem()); if(!out) return false; if(if_der == false) { if(!PEM_write_bio_X509(out,cert_)) { BIO_free_all(out); return false; }; } else { if(!i2d_X509_bio(out,cert_)) { BIO_free_all(out); return false; }; } for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; content.append(s,l); } BIO_free_all(out); return true; } bool Credential::OutputCertificateChain(std::string &content, bool if_der) { BIO *out = BIO_new(BIO_s_mem()); if(!out) return false; X509 *cert; CredentialLogger.msg(DEBUG, "Certiticate chain number %d",sk_X509_num(cert_chain_)); //Output the cert chain. After the verification the cert_chain_ //will include the CA certificate. Having CA in proxy does not //harm. So we output it too. if(cert_chain_) for (int n = 0; n < sk_X509_num(cert_chain_) ; n++) { cert = sk_X509_value(cert_chain_, n); if(if_der == false) { if(!PEM_write_bio_X509(out,cert)) { BIO_free_all(out); return false; }; } else { if(!i2d_X509_bio(out,cert)) { BIO_free_all(out); return false; }; } for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; content.append(s,l); } } BIO_free_all(out); return true; } //Inquire the input request bio to get PROXY_CERT_INFO_EXTENSION, certType bool Credential::InquireRequest(BIO* reqbio, bool if_eec, bool if_der){ bool res = false; if(reqbio == NULL) { CredentialLogger.msg(ERROR, "NULL BIO passed to InquireRequest"); return false; } if(req_) {X509_REQ_free(req_); req_ = NULL; } if((if_der == false) && (!(PEM_read_bio_X509_REQ(reqbio, &req_, NULL, NULL)))) { CredentialLogger.msg(ERROR, "PEM_read_bio_X509_REQ failed"); LogError(); return false; } else if((if_der == true) && (!(d2i_X509_REQ_bio(reqbio, &req_)))) { CredentialLogger.msg(ERROR, "d2i_X509_REQ_bio failed"); LogError(); return false; } STACK_OF(X509_EXTENSION)* req_extensions = NULL; X509_EXTENSION* ext; PROXY_POLICY* policy = NULL; ASN1_OBJECT* policy_lang = NULL; ASN1_OBJECT* extension_oid = NULL; int nid = NID_undef; int i; //Get the PROXY_CERT_INFO_EXTENSION from request' extension req_extensions = X509_REQ_get_extensions(req_); for(i=0;i it2) pt2 = it2; ASN1_UTCTIME* not_before = utc_to_asn1time(pt1); ASN1_UTCTIME* not_after = utc_to_asn1time(pt2); if((!not_before) || (!not_after)) { if(not_before) ASN1_UTCTIME_free(not_before); if(not_after) ASN1_UTCTIME_free(not_after); return false; } X509_set1_notBefore(tosign, not_before); X509_set1_notAfter(tosign, not_after); ASN1_UTCTIME_free(not_before); ASN1_UTCTIME_free(not_after); return true; } EVP_PKEY* Credential::GetPrivKey(void) const { EVP_PKEY* key = NULL; BIO* bio = NULL; int length; bio = BIO_new(BIO_s_mem()); if(pkey_ == NULL) { //CredentialLogger.msg(ERROR, "Private key of the credential object is NULL"); BIO_free(bio); return NULL; } length = i2d_PrivateKey_bio(bio, pkey_); if(length <= 0) { CredentialLogger.msg(ERROR, "Can not convert private key to DER format"); LogError(); BIO_free(bio); return NULL; } key = d2i_PrivateKey_bio(bio, NULL); BIO_free(bio); return key; } EVP_PKEY* Credential::GetPubKey(void) const { EVP_PKEY* key = NULL; if(cert_) key = X509_get_pubkey(cert_); return key; } X509* Credential::GetCert(void) const { X509* cert = NULL; if(cert_) cert = X509_dup(cert_); return cert; } STACK_OF(X509)* Credential::GetCertChain(void) const { STACK_OF(X509)* chain = NULL; chain = sk_X509_new_null(); //Return the cert chain (not including this certificate itself) if(cert_chain_) for (int i=0; i < sk_X509_num(cert_chain_); i++) { X509* tmp = X509_dup(sk_X509_value(cert_chain_,i)); sk_X509_insert(chain, tmp, i); } return chain; } int Credential::GetCertNumofChain(void) const { //Return the number of certificates //in the issuer chain if(!cert_chain_) return 0; return sk_X509_num(cert_chain_); } bool Credential::AddExtension(const std::string& name, const std::string& data, bool crit) { X509_EXTENSION* ext = CreateExtension(name, data, crit); if(ext) { if(sk_X509_EXTENSION_push(extensions_, ext)) return true; X509_EXTENSION_free(ext); } return false; } bool Credential::AddExtension(const std::string& name, char** binary) { X509_EXTENSION* ext = NULL; if(binary == NULL) return false; ext = X509V3_EXT_conf_nid(NULL, NULL, OBJ_txt2nid(name.c_str()), (char*)binary); if(ext) { if(sk_X509_EXTENSION_push(extensions_, ext)) return true; X509_EXTENSION_free(ext); } return false; } std::string Credential::GetExtension(const std::string& name) { std::string res; if(cert_ == NULL) return res; int num; if ((num = X509_get_ext_count(cert_)) > 0) { for (int i = 0; i < num; i++) { X509_EXTENSION *ext; const char *extname; ext = X509_get_ext(cert_, i); extname = OBJ_nid2sn(OBJ_obj2nid(X509_EXTENSION_get_object(ext))); if (strcmp(extname, name.c_str()) == 0) { X509V3_EXT_METHOD *method; STACK_OF(CONF_VALUE) *val; CONF_VALUE *nval; void *extstr = NULL; const unsigned char *ext_value_data; //Get x509 extension method structure if (!(method = (X509V3_EXT_METHOD *)(X509V3_EXT_get(ext)))) break; ASN1_OCTET_STRING* extvalue = X509_EXTENSION_get_data(ext); ext_value_data = extvalue->data; //Decode ASN1 item in data if (method->it) { //New style ASN1 extstr = ASN1_item_d2i(NULL, &ext_value_data, extvalue->length, ASN1_ITEM_ptr(method->it)); } else { //Old style ASN1 extstr = method->d2i(NULL, &ext_value_data, extvalue->length); } val = method->i2v(method, extstr, NULL); for (int j = 0; j < sk_CONF_VALUE_num(val); j++) { nval = sk_CONF_VALUE_value(val, j); std::string name = nval->name; std::string val = nval->value; if(!val.empty()) res = name + ":" + val; else res = name; } } } } return res; } bool Credential::SignRequestAssistant(Credential* proxy, EVP_PKEY* req_pubkey, X509** tosign){ bool res = false; X509* issuer = NULL; int position = -1; *tosign = NULL; if(cert_ == NULL) { CredentialLogger.msg(ERROR, "Credential is not initialized"); goto err; } issuer = X509_dup(cert_); if(!issuer) { CredentialLogger.msg(ERROR, "Failed to duplicate X509 structure"); LogError(); goto err; } if((*tosign = X509_new()) == NULL) { CredentialLogger.msg(ERROR, "Failed to initialize X509 structure"); LogError(); goto err; } //TODO: VOMS { int length = i2d_PROXY_CERT_INFO_EXTENSION(proxy->proxy_cert_info_, NULL); if (length < 0) { CredentialLogger.msg(ERROR, "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER encoded format"); LogError(); goto err; } else { std::string certinfo_data; certinfo_data.resize(length); unsigned char* derdata = reinterpret_cast(const_cast(certinfo_data.c_str())); length = i2d_PROXY_CERT_INFO_EXTENSION(proxy->proxy_cert_info_, &derdata); if (length < 0) { CredentialLogger.msg(ERROR, "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER encoded format"); LogError(); goto err; } else { certinfo_data.resize(length); std::string NID_txt = SN_proxyCertInfo; X509_EXTENSION* certinfo_ext = CreateExtension(NID_txt, certinfo_data, 1); if(certinfo_ext == NULL) { CredentialLogger.msg(ERROR, "Can not create extension for PROXY_CERT_INFO"); LogError(); goto err; } else { if(!sk_X509_EXTENSION_push(proxy->extensions_, certinfo_ext)) { CredentialLogger.msg(ERROR, "Can not add X509 extension to proxy cert"); X509_EXTENSION_free(certinfo_ext); LogError(); goto err; } } } } } /* Add any keyUsage and extendedKeyUsage extensions present in the issuer cert */ if(X509_get_ext_by_NID(issuer, NID_key_usage, -1) > -1) { // Extension is present - transfer it ASN1_BIT_STRING* usage = (ASN1_BIT_STRING*)X509_get_ext_d2i(issuer, NID_key_usage, NULL, NULL); if(!usage) { CredentialLogger.msg(ERROR, "Can not convert keyUsage struct from DER encoded format"); LogError(); goto err; } /* clear bits specified in draft */ ASN1_BIT_STRING_set_bit(usage, 1, 0); /* Non Repudiation */ ASN1_BIT_STRING_set_bit(usage, 5, 0); /* Certificate Sign */ X509_EXTENSION* ext = NULL; int ku_length = i2d_ASN1_BIT_STRING(usage, NULL); if(ku_length < 0) { CredentialLogger.msg(ERROR, "Can not convert keyUsage struct from internal to DER format"); LogError(); ASN1_BIT_STRING_free(usage); goto err; } std::string ku_data; ku_data.resize(ku_length); unsigned char* derdata = reinterpret_cast(const_cast(ku_data.c_str())); ku_length = i2d_ASN1_BIT_STRING(usage, &derdata); if(ku_length < 0) { CredentialLogger.msg(ERROR, "Can not convert keyUsage struct from internal to DER format"); LogError(); ASN1_BIT_STRING_free(usage); goto err; } ASN1_BIT_STRING_free(usage); std::string name = "keyUsage"; ext = CreateExtension(name, ku_data, 1); if(!ext) { CredentialLogger.msg(ERROR, "Can not create extension for keyUsage"); LogError(); goto err; } if(!sk_X509_EXTENSION_push(proxy->extensions_, ext)) { CredentialLogger.msg(ERROR, "Can not add X509 extension to proxy cert"); LogError(); X509_EXTENSION_free(ext); ext = NULL; goto err; } } position = X509_get_ext_by_NID(issuer, NID_ext_key_usage, -1); if(position > -1) { X509_EXTENSION* ext = NULL; if(!(ext = X509_get_ext(issuer, position))) { CredentialLogger.msg(ERROR, "Can not get extended KeyUsage extension from issuer certificate"); LogError(); goto err; } ext = X509_EXTENSION_dup(ext); if(!ext) { CredentialLogger.msg(ERROR, "Can not copy extended KeyUsage extension"); LogError(); goto err; } if(!sk_X509_EXTENSION_push(proxy->extensions_, ext)) { CredentialLogger.msg(ERROR, "Can not add X509 extended KeyUsage extension to new proxy certificate"); LogError(); X509_EXTENSION_free(ext); ext = NULL; goto err; } } { std::stringstream CN_name; unsigned char md[SHA_DIGEST_LENGTH]; unsigned int len = sizeof(md); if(!ASN1_digest((int(*)(void*, unsigned char**))&i2d_PUBKEY, EVP_sha1(), (char*)req_pubkey, md, &len)) { CredentialLogger.msg(ERROR, "Can not compute digest of public key"); goto err; } // SHA_DIGEST_LENGTH=20 < 4 uint32_t sub_hash = md[0] + (md[1] + (md[2] + (md[3] >> 1) * 256) * 256) * 256; CN_name<(const_cast(CN_name.str().c_str())), -1)) == NULL) { CredentialLogger.msg(ERROR, "Can not create name entry CN for proxy certificate"); LogError(); X509_NAME_free(subject_name); goto err; } if(!X509_NAME_add_entry(subject_name, name_entry, X509_NAME_entry_count(subject_name), 0) || !X509_set_subject_name(*tosign, subject_name)) { CredentialLogger.msg(ERROR, "Can not set CN in proxy certificate"); LogError(); X509_NAME_free(subject_name); X509_NAME_ENTRY_free(name_entry); goto err; } X509_NAME_free(subject_name); X509_NAME_ENTRY_free(name_entry); } if(!X509_set_issuer_name(*tosign, X509_get_subject_name(issuer))) { CredentialLogger.msg(ERROR, "Can not set issuer's subject for proxy certificate"); LogError(); goto err; } if(!X509_set_version(*tosign, 2L)) { CredentialLogger.msg(ERROR, "Can not set version number for proxy certificate"); LogError(); goto err; } //Use the serial number in the certificate as the serial number in the proxy certificate if(ASN1_INTEGER* serial_number = X509_get_serialNumber(issuer)) { if(serial_number = ASN1_INTEGER_dup(X509_get_serialNumber(issuer))) { if(!X509_set_serialNumber(*tosign, serial_number)) { CredentialLogger.msg(ERROR, "Can not set serial number for proxy certificate"); ASN1_INTEGER_free(serial_number); LogError(); goto err; } ASN1_INTEGER_free(serial_number); } else { CredentialLogger.msg(ERROR, "Can not duplicate serial number for proxy certificate"); LogError(); goto err; } } if(!SetProxyPeriod(*tosign, issuer, proxy->start_, proxy->lifetime_)) { CredentialLogger.msg(ERROR, "Can not set the lifetime for proxy certificate"); goto err; } if(!X509_set_pubkey(*tosign, req_pubkey)) { CredentialLogger.msg(ERROR, "Can not set pubkey for proxy certificate"); LogError(); goto err; } res = true; err: if(issuer) { X509_free(issuer); } if((!res) && *tosign) { X509_free(*tosign); *tosign = NULL;} return res; } bool Credential::SignRequest(Credential* proxy, BIO* outputbio, bool if_der){ bool res = false; if(proxy == NULL) { CredentialLogger.msg(ERROR, "The credential to be signed is NULL"); return false; } if(proxy->req_ == NULL) { CredentialLogger.msg(ERROR, "The credential to be signed contains no request"); return false; } if(outputbio == NULL) { CredentialLogger.msg(ERROR, "The BIO for output is NULL"); return false; } int md_nid; const EVP_MD* dgst_alg =NULL; EVP_PKEY* issuer_priv = NULL; EVP_PKEY* issuer_pub = NULL; X509* proxy_cert = NULL; X509_EXTENSION* ext = NULL; EVP_PKEY* req_pubkey = NULL; req_pubkey = X509_REQ_get_pubkey(proxy->req_); if(!req_pubkey) { CredentialLogger.msg(ERROR, "Error when extracting public key from request"); LogError(); return false; } if(!X509_REQ_verify(proxy->req_, req_pubkey)){ CredentialLogger.msg(ERROR,"Failed to verify the request"); LogError(); goto err; } if(!SignRequestAssistant(proxy, req_pubkey, &proxy_cert)) { CredentialLogger.msg(ERROR,"Failed to add issuer's extension into proxy"); LogError(); goto err; } /*Add the extensions which has just been added by application, * into the proxy_cert which will be signed soon. * Note here we suppose it is the signer who will add the * extension to to-signed proxy and then sign it; * it also could be the request who add the extension and put * it inside X509 request' extension, but here the situation * has not been considered for now */ for(X509_EXTENSION* ext = X509_delete_ext(proxy_cert,0); ext; ext = X509_delete_ext(proxy_cert,0)) { X509_EXTENSION_free(ext); }; /*Set the serialNumber*/ //cert_info->serialNumber = M_ASN1_INTEGER_dup(X509_get_serialNumber(proxy_cert));; /*Set the extension*/ for (int i=0; iextensions_); i++) { ext = sk_X509_EXTENSION_value(proxy->extensions_, i); if (ext == NULL) { //CredentialLogger.msg(ERROR,"Failed to duplicate extension"); LogError(); goto err; CredentialLogger.msg(ERROR,"Failed to find extension"); LogError(); goto err; } X509_add_ext(proxy_cert, ext, -1); } /*Clean extensions attached to "proxy" after it has been linked into to-signed certificate*/ while(X509_EXTENSION* ext = sk_X509_EXTENSION_pop(proxy->extensions_)) { X509_EXTENSION_free(ext); } /* Now sign the new certificate */ if(!(issuer_priv = GetPrivKey())) { CredentialLogger.msg(ERROR, "Can not get the issuer's private key"); goto err; } /* Use the signing algorithm in the signer's priv key */ { int dgst_err = EVP_PKEY_get_default_digest_nid(issuer_priv, &md_nid); if(dgst_err <= 0) { CredentialLogger.msg(INFO, "There is no digest in issuer's private key object"); } else if((dgst_err == 2) || (!proxy->signing_alg_)) { // mandatory or no digest specified char* md_str = (char *)OBJ_nid2sn(md_nid); if(md_str) { if((dgst_alg = EVP_get_digestbyname(md_str)) == NULL) { CredentialLogger.msg(INFO, "%s is an unsupported digest type", md_str); // TODO: if digest is mandatory then probably there must be error. } } } } if(dgst_alg == NULL) dgst_alg = proxy->signing_alg_?proxy->signing_alg_:DEFAULT_DIGEST; /* Check whether the digest algorithm is SHA1 or SHA2*/ md_nid = EVP_MD_type(dgst_alg); if((md_nid != NID_sha1) && (md_nid != NID_sha224) && (md_nid != NID_sha256) && (md_nid != NID_sha384) && (md_nid != NID_sha512)) { CredentialLogger.msg(ERROR, "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign certificate requests", OBJ_nid2sn(md_nid)); goto err; } if(!X509_sign(proxy_cert, issuer_priv, dgst_alg)) { CredentialLogger.msg(ERROR, "Failed to sign the proxy certificate"); LogError(); goto err; } else CredentialLogger.msg(INFO, "Succeeded to sign the proxy certificate"); /*Verify the signature, not needed later*/ issuer_pub = GetPubKey(); if((X509_verify(proxy_cert, issuer_pub)) != 1) { CredentialLogger.msg(ERROR, "Failed to verify the signed certificate"); LogError(); goto err; } else CredentialLogger.msg(INFO, "Succeeded to verify the signed certificate"); /*Output the signed certificate into BIO*/ if(if_der == false) { if(PEM_write_bio_X509(outputbio, proxy_cert)) { CredentialLogger.msg(INFO, "Output the proxy certificate"); res = true; } else { CredentialLogger.msg(ERROR, "Can not convert signed proxy cert into PEM format"); LogError(); } } else { if(i2d_X509_bio(outputbio, proxy_cert)) { CredentialLogger.msg(INFO, "Output the proxy certificate"); res = true; } else { CredentialLogger.msg(ERROR, "Can not convert signed proxy cert into DER format"); LogError(); } } err: if(issuer_priv) { EVP_PKEY_free(issuer_priv);} if(proxy_cert) { X509_free(proxy_cert);} if(req_pubkey) { EVP_PKEY_free(req_pubkey); } if(issuer_pub) { EVP_PKEY_free(issuer_pub); } return res; } bool Credential::SignRequest(Credential* proxy, std::string &content, bool if_der) { BIO *out = BIO_new(BIO_s_mem()); if(!out) { CredentialLogger.msg(ERROR, "Can not create BIO for signed proxy certificate"); LogError(); return false; } if(SignRequest(proxy, out, if_der)) { for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; content.append(s,l); } } else { BIO_free_all(out); return false; } BIO_free_all(out); return true; } bool Credential::SignRequest(Credential* proxy, const char* filename, bool if_der) { BIO *out = BIO_new(BIO_s_file()); if(!out) { CredentialLogger.msg(ERROR, "Can not create BIO for signed proxy certificate"); LogError(); return false; } if (!BIO_write_filename_User(out, filename)) { CredentialLogger.msg(ERROR, "Can not set writable file for signed proxy certificate BIO"); LogError(); BIO_free_all(out); return false; } if(SignRequest(proxy, out, if_der)) { CredentialLogger.msg(INFO, "Wrote signed proxy certificate into a file"); } else { CredentialLogger.msg(ERROR, "Failed to write signed proxy certificate into a file"); BIO_free_all(out); return false; } BIO_free_all(out); return true; } //The following is the methods about how to use a CA credential to sign an EEC Credential::Credential(const std::string& CAcertfile, const std::string& CAkeyfile, const std::string& CAserial, const std::string& extfile, const std::string& extsect, PasswordSource& passphrase4key) : certfile_(CAcertfile), keyfile_(CAkeyfile), verification_valid(false), cert_(NULL), pkey_(NULL), cert_chain_(NULL), proxy_cert_info_(NULL), req_(NULL), rsa_key_(NULL), signing_alg_(NULL), keybits_(0), proxyver_(0), pathlength_(0), extensions_(NULL), CAserial_(CAserial), extfile_(extfile), extsect_(extsect) { OpenSSLInit(); InitVerification(); //Initiate the proxy certificate constant and method which is required by openssl if(!proxy_init_) InitProxyCertInfo(); extensions_ = sk_X509_EXTENSION_new_null(); if(!extensions_) { CredentialLogger.msg(ERROR, "Failed to initialize extensions member for Credential"); return; } try { loadCertificateFile(CAcertfile, cert_, &cert_chain_); if(cert_) check_cert_type(cert_,cert_type_); loadKeyFile(CAkeyfile, pkey_, passphrase4key); } catch(std::exception& err){ CredentialLogger.msg(ERROR, "ERROR:%s", err.what()); LogError(); } } Credential::Credential(const std::string& CAcertfile, const std::string& CAkeyfile, const std::string& CAserial, const std::string& extfile, const std::string& extsect, const std::string& passphrase4key) : certfile_(CAcertfile), keyfile_(CAkeyfile), verification_valid(false), cert_(NULL), pkey_(NULL), cert_chain_(NULL), proxy_cert_info_(NULL), req_(NULL), rsa_key_(NULL), signing_alg_(NULL), keybits_(0), proxyver_(0), pathlength_(0), extensions_(NULL), CAserial_(CAserial), extfile_(extfile), extsect_(extsect) { OpenSSLInit(); InitVerification(); //Initiate the proxy certificate constant and method which is required by openssl if(!proxy_init_) InitProxyCertInfo(); extensions_ = sk_X509_EXTENSION_new_null(); if(!extensions_) { CredentialLogger.msg(ERROR, "Failed to initialize extensions member for Credential"); return; } try { PasswordSource* pass = NULL; if(passphrase4key.empty()) { pass = new PasswordSourceInteractive("private key", false); } else if(passphrase4key[0] == '\0') { pass = new PasswordSourceString(""); } else { pass = new PasswordSourceString(passphrase4key); } loadCertificateFile(CAcertfile, cert_, &cert_chain_); if(cert_) check_cert_type(cert_,cert_type_); loadKeyFile(CAkeyfile, pkey_, *pass); delete pass; } catch(std::exception& err){ CredentialLogger.msg(ERROR, "ERROR:%s", err.what()); LogError(); } } static void print_ssl_errors() { unsigned long err; while((err = ERR_get_error())) { CredentialLogger.msg(DEBUG,"SSL error: %s, libs: %s, func: %s, reason: %s", ERR_error_string(err, NULL),ERR_lib_error_string(err), ERR_func_error_string(err),ERR_reason_error_string(err)); } } #define SERIAL_RAND_BITS 64 int rand_serial(BIGNUM *b, ASN1_INTEGER *ai) { BIGNUM *btmp; int ret = 0; if (b) btmp = b; else btmp = BN_new(); if (!btmp) return 0; if (!BN_pseudo_rand(btmp, SERIAL_RAND_BITS, 0, 0)) goto error; if (ai && !BN_to_ASN1_INTEGER(btmp, ai)) goto error; ret = 1; error: if (!b) BN_free(btmp); return ret; } #undef BSIZE #define BSIZE 256 BIGNUM *load_serial(const std::string& serialfile, ASN1_INTEGER **retai) { BIO *in=NULL; BIGNUM *ret=NULL; char buf[1024]; ASN1_INTEGER *ai=NULL; ai=ASN1_INTEGER_new(); if (ai == NULL) goto err; if ((in=BIO_new(BIO_s_file())) == NULL) { print_ssl_errors(); goto err; } if (BIO_read_filename_User(in,serialfile.c_str()) > 0) { if (!a2i_ASN1_INTEGER(in,ai,buf,1024)) { CredentialLogger.msg(ERROR,"unable to load number from: %s",serialfile); goto err; } ret=ASN1_INTEGER_to_BN(ai,NULL); if (ret == NULL) { CredentialLogger.msg(ERROR, "error converting number from bin to BIGNUM"); goto err; } } if (ret && retai) { *retai = ai; ai = NULL; } err: if (in != NULL) BIO_free(in); if (ai != NULL) ASN1_INTEGER_free(ai); return(ret); } int save_serial(const std::string& serialfile, char *suffix, BIGNUM *serial, ASN1_INTEGER **retai) { char buf[1][BSIZE]; BIO *out = NULL; int ret=0; ASN1_INTEGER *ai=NULL; int j; if (suffix == NULL) j = strlen(serialfile.c_str()); else j = strlen(serialfile.c_str()) + strlen(suffix) + 1; if (j >= BSIZE) { CredentialLogger.msg(ERROR,"file name too long"); goto err; } if (suffix == NULL) BUF_strlcpy(buf[0], serialfile.c_str(), BSIZE); else { #ifndef OPENSSL_SYS_VMS j = BIO_snprintf(buf[0], sizeof buf[0], "%s.%s", serialfile.c_str(), suffix); #else j = BIO_snprintf(buf[0], sizeof buf[0], "%s-%s", serialfile.c_str(), suffix); #endif } out=BIO_new(BIO_s_file()); if (out == NULL) { print_ssl_errors(); goto err; } if (BIO_write_filename_User(out,buf[0]) <= 0) { perror(serialfile.c_str()); goto err; } if ((ai=BN_to_ASN1_INTEGER(serial,NULL)) == NULL) { CredentialLogger.msg(ERROR,"error converting serial to ASN.1 format"); goto err; } i2a_ASN1_INTEGER(out,ai); BIO_puts(out,"\n"); ret=1; if (retai) { *retai = ai; ai = NULL; } err: if (out != NULL) BIO_free_all(out); if (ai != NULL) ASN1_INTEGER_free(ai); return(ret); } #undef POSTFIX #define POSTFIX ".srl" static ASN1_INTEGER *x509_load_serial(const std::string& CAfile, const std::string& serialfile) { ASN1_INTEGER *bs = NULL; BIGNUM *serial = NULL; std::string serial_f; if(!serialfile.empty()) serial_f = serialfile; else if(!CAfile.empty()){ std::size_t pos; pos = CAfile.rfind("."); if(pos != std::string::npos) serial_f = CAfile.substr(0, pos); serial_f.append(".srl"); } else{ return bs;} serial = load_serial(serial_f, NULL); if (serial == NULL) { CredentialLogger.msg(ERROR,"load serial from %s failure",serial_f.c_str()); return bs; } if (!BN_add_word(serial,1)) { CredentialLogger.msg(ERROR,"add_word failure"); BN_free(serial); return bs; } if(!save_serial(serial_f, NULL, serial, &bs)) { CredentialLogger.msg(ERROR,"save serial to %s failure",serial_f.c_str()); BN_free(serial); return bs; } BN_free(serial); return bs; } static int x509_certify(X509_STORE *ctx, const std::string& CAfile, const EVP_MD *digest, X509 *x, X509 *xca, EVP_PKEY *pkey, const std::string& serialfile, time_t start, time_t lifetime, int clrext, CONF *conf, char *section, ASN1_INTEGER *sno) { int ret=0; ASN1_INTEGER *bs=NULL; X509_STORE_CTX* xsc = X509_STORE_CTX_new(); EVP_PKEY *upkey; upkey = X509_get_pubkey(xca); EVP_PKEY_copy_parameters(upkey,pkey); EVP_PKEY_free(upkey); if(!X509_STORE_CTX_init(xsc,ctx,x,NULL)) { CredentialLogger.msg(ERROR,"Error initialising X509 store"); goto end; } if (sno) bs = sno; else if (!(bs = x509_load_serial(CAfile, serialfile))) { bs = ASN1_INTEGER_new(); if( bs == NULL || !rand_serial(NULL,bs)) { CredentialLogger.msg(ERROR,"Out of memory when generate random serial"); goto end; } //bs = s2i_ASN1_INTEGER(NULL, "1"); } //X509_STORE_CTX_set_cert(&xsc,x); //X509_STORE_CTX_set_flags(&xsc, X509_V_FLAG_CHECK_SS_SIGNATURE); //if (!X509_verify_cert(&xsc)) // goto end; if (!X509_check_private_key(xca,pkey)) { CredentialLogger.msg(ERROR,"CA certificate and CA private key do not match"); goto end; } if (!X509_set_issuer_name(x,X509_get_subject_name(xca))) goto end; if (!X509_set_serialNumber(x,bs)) goto end; if (X509_gmtime_adj(X509_getm_notBefore(x), start) == NULL) goto end; /* hardwired expired */ if (X509_gmtime_adj(X509_getm_notAfter(x), lifetime) == NULL) goto end; if (clrext) { while (X509_get_ext_count(x) > 0) X509_delete_ext(x, 0); } if (conf) { X509V3_CTX ctx2; X509_set_version(x,2); X509V3_set_ctx(&ctx2, xca, x, NULL, NULL, 0); X509V3_set_nconf(&ctx2, conf); if (!X509V3_EXT_add_nconf(conf, &ctx2, section, x)) { CredentialLogger.msg(ERROR,"Failed to load extension section: %s", section); goto end; } } if (!X509_sign(x,pkey,digest)) goto end; ret=1; end: X509_STORE_CTX_cleanup(xsc); X509_STORE_CTX_free(xsc); if (!ret) ERR_clear_error(); if (!sno) ASN1_INTEGER_free(bs); return ret; } /*subject is expected to be in the format /type0=value0/type1=value1/type2=... * where characters may be escaped by \ */ X509_NAME * Credential::parse_name(char *subject, long chtype, int multirdn) { size_t buflen = strlen(subject)+1; /* to copy the types and values into. due to escaping, the copy can only become shorter */ char *buf = (char*)(OPENSSL_malloc(buflen)); size_t max_ne = buflen / 2 + 1; /* maximum number of name elements */ char **ne_types = (char **)(OPENSSL_malloc(max_ne * sizeof (char *))); char **ne_values = (char **)(OPENSSL_malloc(max_ne * sizeof (char *))); int *mval = (int*)(OPENSSL_malloc (max_ne * sizeof (int))); char *sp = subject, *bp = buf; int i, ne_num = 0; X509_NAME *n = NULL; int nid; if (!buf || !ne_types || !ne_values) { CredentialLogger.msg(ERROR,"malloc error"); goto error; } if (*subject != '/') { CredentialLogger.msg(ERROR,"Subject does not start with '/'"); goto error; } sp++; /* skip leading / */ /* no multivalued RDN by default */ mval[ne_num] = 0; while (*sp) { /* collect type */ ne_types[ne_num] = bp; while (*sp) { if (*sp == '\\') /* is there anything to escape in the type...? */ { if (*++sp) *bp++ = *sp++; else { CredentialLogger.msg(ERROR,"escape character at end of string"); goto error; } } else if (*sp == '=') { sp++; *bp++ = '\0'; break; } else *bp++ = *sp++; } if (!*sp) { CredentialLogger.msg(ERROR,"end of string encountered while processing type of subject name element #%d",ne_num); goto error; } ne_values[ne_num] = bp; while (*sp) { if (*sp == '\\') { if (*++sp) *bp++ = *sp++; else { CredentialLogger.msg(ERROR,"escape character at end of string"); goto error; } } else if (*sp == '/') { sp++; /* no multivalued RDN by default */ mval[ne_num+1] = 0; break; } else if (*sp == '+' && multirdn) { /* a not escaped + signals a mutlivalued RDN */ sp++; mval[ne_num+1] = -1; break; } else *bp++ = *sp++; } *bp++ = '\0'; ne_num++; } if (!(n = X509_NAME_new())) goto error; for (i = 0; i < ne_num; i++) { if ((nid=OBJ_txt2nid(ne_types[i])) == NID_undef) { CredentialLogger.msg(ERROR,"Subject Attribute %s has no known NID, skipped",ne_types[i]); continue; } if (!*ne_values[i]) { CredentialLogger.msg(ERROR,"No value provided for Subject Attribute %s skipped",ne_types[i]); continue; } if (!X509_NAME_add_entry_by_NID(n, nid, chtype, (unsigned char*)ne_values[i], -1,-1,mval[i])) goto error; } OPENSSL_free(mval); OPENSSL_free(ne_values); OPENSSL_free(ne_types); OPENSSL_free(buf); return n; error: X509_NAME_free(n); if (ne_values) OPENSSL_free(ne_values); if (ne_types) OPENSSL_free(ne_types); if (buf) OPENSSL_free(buf); return NULL; } bool Credential::SelfSignEECRequest(const std::string& dn, const char* extfile, const std::string& extsect, const char* certfile) { if(extfile != NULL){ extfile_ = extfile; } if(!extsect.empty()){ extsect_ = extsect; } cert_ = X509_new(); X509_NAME *name = NULL; if(!dn.empty()) { name = parse_name((char*)(dn.c_str()), MBSTRING_ASC, 0); X509_set_subject_name(cert_, name); X509_NAME_free(name); } else { X509_set_subject_name(cert_, X509_REQ_get_subject_name(req_)); } EVP_PKEY* tmpkey; tmpkey = X509_REQ_get_pubkey(req_); if(!tmpkey || !X509_set_pubkey(cert_, tmpkey)) { CredentialLogger.msg(ERROR,"Failed to set the pubkey for X509 object by using pubkey from X509_REQ"); LogError(); return false; } EVP_PKEY_free(tmpkey); return(SignEECRequest(this, dn, certfile)); } bool Credential::SignEECRequest(Credential* eec, const std::string& dn, BIO* outputbio) { if(pkey_ == NULL) { CredentialLogger.msg(ERROR, "The private key for signing is not initialized"); return false; } bool res = false; if(eec == NULL) { CredentialLogger.msg(ERROR, "The credential to be signed is NULL"); return false; } if(eec->req_ == NULL) { CredentialLogger.msg(ERROR, "The credential to be signed contains no request"); return false; } if(outputbio == NULL) { CredentialLogger.msg(ERROR, "The BIO for output is NULL"); return false; } X509* eec_cert = NULL; EVP_PKEY* req_pubkey = NULL; req_pubkey = X509_REQ_get_pubkey(eec->req_); if(!req_pubkey) { CredentialLogger.msg(ERROR, "Error when extracting public key from request"); LogError(); return false; } if(!X509_REQ_verify(eec->req_, req_pubkey)){ CredentialLogger.msg(ERROR,"Failed to verify the request"); LogError(); return false; } eec_cert = X509_new(); X509_set_pubkey(eec_cert, req_pubkey); EVP_PKEY_free(req_pubkey); X509_NAME *subject = NULL; if(!dn.empty()) { subject = parse_name((char*)(dn.c_str()), MBSTRING_ASC, 0); X509_set_subject_name(eec_cert, subject); X509_NAME_free(subject); } else { X509_set_subject_name(eec_cert, X509_REQ_get_subject_name(eec->req_)); } /* const EVP_MD *digest=EVP_sha1(); #ifndef OPENSSL_NO_DSA if (pkey_->type == EVP_PKEY_DSA) digest=EVP_dss1(); #endif */ /* #ifndef OPENSSL_NO_ECDSA if (pkey_->type == EVP_PKEY_EC) digest = EVP_ecdsa(); #endif */ const EVP_MD* digest = NULL; int md_nid; char* md_str; if(EVP_PKEY_get_default_digest_nid(pkey_, &md_nid) <= 0) { CredentialLogger.msg(INFO, "There is no digest in issuer's private key object"); } md_str = (char *)OBJ_nid2sn(md_nid); if((digest = EVP_get_digestbyname(md_str)) == NULL) { CredentialLogger.msg(INFO, "%s is an unsupported digest type", md_str); } if(digest == NULL) digest = EVP_sha1(); X509_STORE *ctx = NULL; ctx = X509_STORE_new(); //X509_STORE_set_verify_cb_func(ctx,callb); if (!X509_STORE_set_default_paths(ctx)) { LogError(); } CONF *extconf = NULL; if (!extfile_.empty()) { long errorline = -1; extconf = NCONF_new(NULL); //configuration file with X509V3 extensions to add if (!NCONF_load(extconf, extfile_.c_str(),&errorline)) { if (errorline <= 0) { CredentialLogger.msg(ERROR,"Error when loading the extension config file: %s", extfile_.c_str()); return false; } else { CredentialLogger.msg(ERROR,"Error when loading the extension config file: %s on line: %d", extfile_.c_str(), errorline); } } //section from config file with X509V3 extensions to add if (extsect_.empty()) { //if the extension section/group has not been specified, use the group name in //the openssl.cnf file which is set by default when openssl is installed char* str = NCONF_get_string(extconf, "usr_cert", "basicConstraints"); if(str != NULL) extsect_ = "usr_cert"; } /* X509V3_set_ctx_test(&ctx2); X509V3_set_nconf(&ctx2, extconf); if (!X509V3_EXT_add_nconf(extconf, &ctx2, (char*)(extsect_.c_str()), NULL)) { CredentialLogger.msg(ERROR,"Error when loading the extension section: %s", extsect_.c_str()); LogError(); } */ } //Add extensions to certificate object for(X509_EXTENSION* ext = X509_delete_ext(eec_cert, 0); ext; ext = X509_delete_ext(eec_cert, 0)) { X509_EXTENSION_free(ext); } X509_EXTENSION* ext = NULL; for (int i=0; iextensions_); i++) { ext = sk_X509_EXTENSION_value(eec->extensions_, i); if (ext == NULL) { CredentialLogger.msg(ERROR,"Failed to duplicate extension"); LogError(); } X509_add_ext(eec_cert, ext, -1); } X509_set_version(cert_,2); time_t lifetime = (eec->GetLifeTime()).GetPeriod(); Time t1 = eec->GetStartTime(); Time t2; time_t start; if(t1 > t2) start = t1.GetTime() - t2.GetTime(); else start = 0; if (!x509_certify(ctx, certfile_, digest, eec_cert, cert_, pkey_, CAserial_, start, lifetime, 0, extconf, (char*)(extsect_.c_str()), NULL)) { CredentialLogger.msg(ERROR,"Can not sign a EEC"); LogError(); } if(PEM_write_bio_X509(outputbio, eec_cert)) { CredentialLogger.msg(INFO, "Output EEC certificate"); res = true; } else { CredentialLogger.msg(ERROR, "Can not convert signed EEC cert into DER format"); LogError(); } NCONF_free(extconf); X509_free(eec_cert); X509_STORE_free(ctx); return res; } bool Credential::SignEECRequest(Credential* eec, const std::string& dn, std::string &content) { BIO *out = BIO_new(BIO_s_mem()); if(!out) { CredentialLogger.msg(ERROR, "Can not create BIO for signed EEC certificate"); LogError(); return false; } if(SignEECRequest(eec, dn, out)) { for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; content.append(s,l); } } BIO_free_all(out); return true; } bool Credential::SignEECRequest(Credential* eec, const std::string& dn, const char* filename) { BIO *out = BIO_new(BIO_s_file()); if(!out) { CredentialLogger.msg(ERROR, "Can not create BIO for signed EEC certificate"); LogError(); return false; } if (!(BIO_write_filename_User(out, filename))) { CredentialLogger.msg(ERROR, "Can not set writable file for signed EEC certificate BIO"); LogError(); BIO_free_all(out); return false; } if(SignEECRequest(eec, dn, out)) { CredentialLogger.msg(INFO, "Wrote signed EEC certificate into a file"); } else { CredentialLogger.msg(ERROR, "Failed to write signed EEC certificate into a file"); BIO_free_all(out); return false; } BIO_free_all(out); return true; } Credential::~Credential() { if(cert_) X509_free(cert_); if(pkey_) EVP_PKEY_free(pkey_); if(cert_chain_) sk_X509_pop_free(cert_chain_, X509_free); if(proxy_cert_info_) PROXY_CERT_INFO_EXTENSION_free(proxy_cert_info_); if(req_) X509_REQ_free(req_); if(rsa_key_) RSA_free(rsa_key_); if(extensions_) sk_X509_EXTENSION_pop_free(extensions_, X509_EXTENSION_free); } } nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315724023635 xustar000000000000000030 mtime=1513200596.896975711 30 atime=1513200647.897599469 30 ctime=1513200659.134736903 nordugrid-arc-5.4.2/src/hed/libs/credential/Makefile.in0000644000175000002070000013322513214315724023711 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/credential DIST_COMMON = README $(am__libarccredential_la_HEADERS_DIST) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarccredential_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarccredential_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am__libarccredential_la_SOURCES_DIST = Proxycertinfo.cpp CertUtil.cpp \ PasswordSource.cpp Credential.cpp listfunc.cpp listfunc.h \ VOMSAttribute.cpp VOMSUtil.cpp VOMSConfig.cpp NSSUtil.cpp \ nssprivkeyinfocodec.cpp am__objects_1 = libarccredential_la-VOMSUtil.lo \ libarccredential_la-VOMSConfig.lo @NSS_ENABLED_TRUE@am__objects_2 = libarccredential_la-NSSUtil.lo \ @NSS_ENABLED_TRUE@ libarccredential_la-nssprivkeyinfocodec.lo am_libarccredential_la_OBJECTS = libarccredential_la-Proxycertinfo.lo \ libarccredential_la-CertUtil.lo \ libarccredential_la-PasswordSource.lo \ libarccredential_la-Credential.lo \ libarccredential_la-listfunc.lo \ libarccredential_la-VOMSAttribute.lo $(am__objects_1) \ $(am__objects_2) libarccredential_la_OBJECTS = $(am_libarccredential_la_OBJECTS) libarccredential_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) \ $(libarccredential_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarccredential_la_SOURCES) DIST_SOURCES = $(am__libarccredential_la_SOURCES_DIST) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__libarccredential_la_HEADERS_DIST = Credential.h CertUtil.h \ Proxycertinfo.h PasswordSource.h VOMSAttribute.h VOMSUtil.h \ VOMSConfig.h NSSUtil.h nssprivkeyinfocodec.h HEADERS = $(libarccredential_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarccredential.la #noinst_PROGRAMS = testproxy testcertinfo testproxy2proxy testvoms testeec #if WIN32 #VOMS_HEADER = #VOMS_SOURCE = #else VOMS_HEADER = VOMSUtil.h VOMSConfig.h VOMS_SOURCE = VOMSUtil.cpp VOMSConfig.cpp @NSS_ENABLED_FALSE@NSS_HEADER = #endif @NSS_ENABLED_TRUE@NSS_HEADER = NSSUtil.h nssprivkeyinfocodec.h @NSS_ENABLED_FALSE@NSS_SOURCE = @NSS_ENABLED_TRUE@NSS_SOURCE = NSSUtil.cpp nssprivkeyinfocodec.cpp libarccredential_ladir = $(pkgincludedir)/credential libarccredential_la_HEADERS = Credential.h CertUtil.h Proxycertinfo.h PasswordSource.h \ VOMSAttribute.h $(VOMS_HEADER) $(NSS_HEADER) libarccredential_la_SOURCES = Proxycertinfo.cpp CertUtil.cpp PasswordSource.cpp \ Credential.cpp listfunc.cpp listfunc.h \ VOMSAttribute.cpp $(VOMS_SOURCE) $(NSS_SOURCE) libarccredential_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(NSS_CFLAGS) $(AM_CXXFLAGS) libarccredential_la_CFLAGS = $(libarccredential_la_CXXFLAGS) libarccredential_la_LIBADD = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(OPENSSL_LIBS) $(NSS_LIBS) libarccredential_la_LDFLAGS = -version-info 3:0:0 testproxy_SOURCES = testproxy.cpp testproxy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testproxy_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) testcertinfo_SOURCES = testcertinfo.cpp testcertinfo_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testcertinfo_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) testproxy2proxy_SOURCES = testproxy2proxy.cpp testproxy2proxy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testproxy2proxy_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) testvoms_SOURCES = testvoms.cpp testvoms_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testvoms_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) testeec_SOURCES = testeec.cpp testeec_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) testeec_LDADD = ./libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/credential/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/credential/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarccredential.la: $(libarccredential_la_OBJECTS) $(libarccredential_la_DEPENDENCIES) $(libarccredential_la_LINK) -rpath $(libdir) $(libarccredential_la_OBJECTS) $(libarccredential_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-CertUtil.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-Credential.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-NSSUtil.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-PasswordSource.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-Proxycertinfo.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-VOMSAttribute.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-VOMSConfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-VOMSUtil.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-listfunc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredential_la-nssprivkeyinfocodec.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarccredential_la-Proxycertinfo.lo: Proxycertinfo.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-Proxycertinfo.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-Proxycertinfo.Tpo -c -o libarccredential_la-Proxycertinfo.lo `test -f 'Proxycertinfo.cpp' || echo '$(srcdir)/'`Proxycertinfo.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-Proxycertinfo.Tpo $(DEPDIR)/libarccredential_la-Proxycertinfo.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Proxycertinfo.cpp' object='libarccredential_la-Proxycertinfo.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-Proxycertinfo.lo `test -f 'Proxycertinfo.cpp' || echo '$(srcdir)/'`Proxycertinfo.cpp libarccredential_la-CertUtil.lo: CertUtil.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-CertUtil.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-CertUtil.Tpo -c -o libarccredential_la-CertUtil.lo `test -f 'CertUtil.cpp' || echo '$(srcdir)/'`CertUtil.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-CertUtil.Tpo $(DEPDIR)/libarccredential_la-CertUtil.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CertUtil.cpp' object='libarccredential_la-CertUtil.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-CertUtil.lo `test -f 'CertUtil.cpp' || echo '$(srcdir)/'`CertUtil.cpp libarccredential_la-PasswordSource.lo: PasswordSource.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-PasswordSource.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-PasswordSource.Tpo -c -o libarccredential_la-PasswordSource.lo `test -f 'PasswordSource.cpp' || echo '$(srcdir)/'`PasswordSource.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-PasswordSource.Tpo $(DEPDIR)/libarccredential_la-PasswordSource.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PasswordSource.cpp' object='libarccredential_la-PasswordSource.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-PasswordSource.lo `test -f 'PasswordSource.cpp' || echo '$(srcdir)/'`PasswordSource.cpp libarccredential_la-Credential.lo: Credential.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-Credential.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-Credential.Tpo -c -o libarccredential_la-Credential.lo `test -f 'Credential.cpp' || echo '$(srcdir)/'`Credential.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-Credential.Tpo $(DEPDIR)/libarccredential_la-Credential.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Credential.cpp' object='libarccredential_la-Credential.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-Credential.lo `test -f 'Credential.cpp' || echo '$(srcdir)/'`Credential.cpp libarccredential_la-listfunc.lo: listfunc.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-listfunc.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-listfunc.Tpo -c -o libarccredential_la-listfunc.lo `test -f 'listfunc.cpp' || echo '$(srcdir)/'`listfunc.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-listfunc.Tpo $(DEPDIR)/libarccredential_la-listfunc.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='listfunc.cpp' object='libarccredential_la-listfunc.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-listfunc.lo `test -f 'listfunc.cpp' || echo '$(srcdir)/'`listfunc.cpp libarccredential_la-VOMSAttribute.lo: VOMSAttribute.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-VOMSAttribute.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-VOMSAttribute.Tpo -c -o libarccredential_la-VOMSAttribute.lo `test -f 'VOMSAttribute.cpp' || echo '$(srcdir)/'`VOMSAttribute.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-VOMSAttribute.Tpo $(DEPDIR)/libarccredential_la-VOMSAttribute.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='VOMSAttribute.cpp' object='libarccredential_la-VOMSAttribute.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-VOMSAttribute.lo `test -f 'VOMSAttribute.cpp' || echo '$(srcdir)/'`VOMSAttribute.cpp libarccredential_la-VOMSUtil.lo: VOMSUtil.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-VOMSUtil.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-VOMSUtil.Tpo -c -o libarccredential_la-VOMSUtil.lo `test -f 'VOMSUtil.cpp' || echo '$(srcdir)/'`VOMSUtil.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-VOMSUtil.Tpo $(DEPDIR)/libarccredential_la-VOMSUtil.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='VOMSUtil.cpp' object='libarccredential_la-VOMSUtil.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-VOMSUtil.lo `test -f 'VOMSUtil.cpp' || echo '$(srcdir)/'`VOMSUtil.cpp libarccredential_la-VOMSConfig.lo: VOMSConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-VOMSConfig.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-VOMSConfig.Tpo -c -o libarccredential_la-VOMSConfig.lo `test -f 'VOMSConfig.cpp' || echo '$(srcdir)/'`VOMSConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-VOMSConfig.Tpo $(DEPDIR)/libarccredential_la-VOMSConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='VOMSConfig.cpp' object='libarccredential_la-VOMSConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-VOMSConfig.lo `test -f 'VOMSConfig.cpp' || echo '$(srcdir)/'`VOMSConfig.cpp libarccredential_la-NSSUtil.lo: NSSUtil.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-NSSUtil.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-NSSUtil.Tpo -c -o libarccredential_la-NSSUtil.lo `test -f 'NSSUtil.cpp' || echo '$(srcdir)/'`NSSUtil.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-NSSUtil.Tpo $(DEPDIR)/libarccredential_la-NSSUtil.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='NSSUtil.cpp' object='libarccredential_la-NSSUtil.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-NSSUtil.lo `test -f 'NSSUtil.cpp' || echo '$(srcdir)/'`NSSUtil.cpp libarccredential_la-nssprivkeyinfocodec.lo: nssprivkeyinfocodec.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredential_la-nssprivkeyinfocodec.lo -MD -MP -MF $(DEPDIR)/libarccredential_la-nssprivkeyinfocodec.Tpo -c -o libarccredential_la-nssprivkeyinfocodec.lo `test -f 'nssprivkeyinfocodec.cpp' || echo '$(srcdir)/'`nssprivkeyinfocodec.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredential_la-nssprivkeyinfocodec.Tpo $(DEPDIR)/libarccredential_la-nssprivkeyinfocodec.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='nssprivkeyinfocodec.cpp' object='libarccredential_la-nssprivkeyinfocodec.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredential_la-nssprivkeyinfocodec.lo `test -f 'nssprivkeyinfocodec.cpp' || echo '$(srcdir)/'`nssprivkeyinfocodec.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarccredential_laHEADERS: $(libarccredential_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarccredential_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarccredential_ladir)" @list='$(libarccredential_la_HEADERS)'; test -n "$(libarccredential_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarccredential_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarccredential_ladir)" || exit $$?; \ done uninstall-libarccredential_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarccredential_la_HEADERS)'; test -n "$(libarccredential_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarccredential_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarccredential_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarccredential_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarccredential_laHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarccredential_laHEADERS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags ctags-recursive \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarccredential_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarccredential_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/NSSUtil.cpp0000644000000000000000000000012413213445240023572 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200574.637703 30 ctime=1513200659.142737001 nordugrid-arc-5.4.2/src/hed/libs/credential/NSSUtil.cpp0000644000175000002070000030307613213445240023650 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #else #include #endif #include #include #include #include "NSSUtil.h" #include "nssprivkeyinfocodec.h" /********************************* * Structures used in exporting the PKCS 12 blob *********************************/ /* A SafeInfo is used for each ContentInfo which makes up the * sequence of safes in the AuthenticatedSafe portion of the * PFX structure. */ struct SEC_PKCS12SafeInfoStr { PRArenaPool *arena; /* information for setting up password encryption */ SECItem pwitem; SECOidTag algorithm; PK11SymKey *encryptionKey; /* how many items have been stored in this safe, * we will skip any safe which does not contain any * items */ unsigned int itemCount; /* the content info for the safe */ SEC_PKCS7ContentInfo *cinfo; sec_PKCS12SafeContents *safe; }; /* An opaque structure which contains information needed for exporting * certificates and keys through PKCS 12. */ struct SEC_PKCS12ExportContextStr { PRArenaPool *arena; PK11SlotInfo *slot; void *wincx; /* integrity information */ PRBool integrityEnabled; PRBool pwdIntegrity; union { struct sec_PKCS12PasswordModeInfo pwdInfo; struct sec_PKCS12PublicKeyModeInfo pubkeyInfo; } integrityInfo; /* helper functions */ /* retrieve the password call back */ SECKEYGetPasswordKey pwfn; void *pwfnarg; /* safe contents bags */ SEC_PKCS12SafeInfo **safeInfos; unsigned int safeInfoCount; /* the sequence of safes */ sec_PKCS12AuthenticatedSafe authSafe; /* information needing deletion */ CERTCertificate **certList; }; typedef struct SEC_PKCS12SafeInfoStr SEC_PKCS12SafeInfo; typedef struct SEC_PKCS12ExportContextStr SEC_PKCS12ExportContext; using namespace Arc; namespace ArcAuthNSS { //Logger& NSSUtilLogger = log(); Arc::Logger NSSUtilLogger(Arc::Logger::rootLogger, "NSSUtil"); std::string nss_error() { int len; char* text; std::string ret; if ((len = PR_GetErrorTextLength()) > 0) { text = (char*)malloc(len); if (PR_GetErrorText(text) > 0) ret.append("error string: ").append(text); free(text); } else ret.append("unknown NSS error"); return ret; } #define NS_CERT_HEADER "-----BEGIN CERTIFICATE-----" #define NS_CERT_TRAILER "-----END CERTIFICATE-----" #define NS_CERTREQ_HEADER "-----BEGIN NEW CERTIFICATE REQUEST-----" #define NS_CERTREQ_TRAILER "-----END NEW CERTIFICATE REQUEST-----" static SECStatus output_cert(const CERTCertificate* cert, PRBool ascii, PRFileDesc *outfile) { SECItem data; PRInt32 num; SECStatus rv = SECFailure; data.data = cert->derCert.data; data.len = cert->derCert.len; if (ascii) { PR_fprintf(outfile, "%s\n%s\n%s\n", NS_CERT_HEADER, BTOA_DataToAscii(data.data, data.len), NS_CERT_TRAILER); rv = SECSuccess; } else { //raw data num = PR_Write(outfile, data.data, data.len); if (num != (PRInt32) data.len) { NSSUtilLogger.msg(ERROR, "Error writing raw certificate"); rv = SECFailure; } rv = SECSuccess; } return rv; } static SECStatus p12u_SwapUnicodeBytes(SECItem *uniItem) { unsigned int i; unsigned char a; if((uniItem == NULL) || (uniItem->len % 2)) { return SECFailure; } for(i = 0; i < uniItem->len; i += 2) { a = uniItem->data[i]; uniItem->data[i] = uniItem->data[i+1]; uniItem->data[i+1] = a; } return SECSuccess; } static PRBool p12u_ucs2_ascii_conversion_function(PRBool toUnicode, unsigned char* inBuf, unsigned int inBufLen, unsigned char* outBuf, unsigned int maxOutBufLen, unsigned int* outBufLen, PRBool swapBytes) { SECItem it; SECItem *dup = NULL; PRBool ret; it.data = inBuf; it.len = inBufLen; dup = SECITEM_DupItem(&it); // If converting Unicode to ASCII, swap bytes before conversion as neccessary. if (!toUnicode && swapBytes) { if (p12u_SwapUnicodeBytes(dup) != SECSuccess) { SECITEM_ZfreeItem(dup, PR_TRUE); return PR_FALSE; } } // Perform the conversion. ret = PORT_UCS2_UTF8Conversion(toUnicode, dup->data, dup->len, outBuf, maxOutBufLen, outBufLen); if (dup) SECITEM_ZfreeItem(dup, PR_TRUE); return ret; } //RFC 3820 and VOMS AC sequence #define OIDT static const unsigned char /* RFC 3820 Proxy OID. (1 3 6 1 5 5 7 1 14)*/ OIDT proxy[] = { 0x2B, 6, 1, 5, 5, 7, 1, 14 }; OIDT anyLanguage[] = { 0x2B, 6, 1, 5, 5, 7, 21, 0 };//(1.3 .6.1.5.5.7.21.0) OIDT inheritAll[] = { 0x2B, 6, 1, 5, 5, 7, 21, 1 }; //(1.3.6.1.5.5.7.21.1) OIDT Independent[] = { 0x2B, 6, 1, 5, 5, 7, 21, 2 }; //(1.3.6.1.5.5.7.21.1) /* VOMS AC sequence OID. ()*/ OIDT VOMS_acseq[] = { 0x2B, 6, 1, 4, 1, 0xBE, 0x45, 100, 100, 5 }; //(1.3.6.1.4.1.8005.100.100.5) // according to BER "Basic Encoding Ruls", 8005 is encoded as 0xBE 0x45 #define OI(x) { siDEROID, (unsigned char *)x, sizeof x } #define ODN(oid,desc) { OI(oid), (SECOidTag)0, desc, CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION } static const SECOidData oids[] = { ODN(proxy, "RFC 3820 proxy extension"), ODN(anyLanguage, "Any language"), ODN(inheritAll, "Inherit all"), ODN(Independent, "Independent"), ODN(VOMS_acseq, "acseq"), }; static const unsigned int numOids = (sizeof oids) / (sizeof oids[0]); SECOidTag tag_proxy, tag_anylang, tag_inheritall, tag_independent, tag_vomsacseq; SECStatus RegisterDynamicOids(void) { SECStatus rv = SECSuccess; tag_proxy = SECOID_AddEntry(&oids[0]); if (tag_proxy == SEC_OID_UNKNOWN) { rv = SECFailure; NSSUtilLogger.msg(ERROR, "Failed to add RFC proxy OID"); } else { NSSUtilLogger.msg(DEBUG, "Succeeded to add RFC proxy OID, tag %d is returned", tag_proxy); } tag_anylang = SECOID_AddEntry(&oids[1]); if (tag_anylang == SEC_OID_UNKNOWN) { rv = SECFailure; NSSUtilLogger.msg(ERROR, "Failed to add anyLanguage OID"); } else { NSSUtilLogger.msg(DEBUG, "Succeeded to add anyLanguage OID, tag %d is returned", tag_anylang); } tag_inheritall = SECOID_AddEntry(&oids[2]); if (tag_inheritall == SEC_OID_UNKNOWN) { rv = SECFailure; NSSUtilLogger.msg(ERROR, "Failed to add inheritAll OID"); } else { NSSUtilLogger.msg(DEBUG, "Succeeded to add inheritAll OID, tag %d is returned", tag_inheritall); } tag_independent = SECOID_AddEntry(&oids[3]); if (tag_independent == SEC_OID_UNKNOWN) { rv = SECFailure; NSSUtilLogger.msg(ERROR, "Failed to add Independent OID"); } else { NSSUtilLogger.msg(DEBUG, "Succeeded to add anyLanguage OID, tag %d is returned", tag_independent); } tag_vomsacseq = SECOID_AddEntry(&oids[4]); if (tag_vomsacseq == SEC_OID_UNKNOWN) { rv = SECFailure; NSSUtilLogger.msg(ERROR, "Failed to add VOMS AC sequence OID"); } else { NSSUtilLogger.msg(DEBUG, "Succeeded to add VOMS AC sequence OID, tag %d is returned", tag_vomsacseq); } return rv; } static char* nss_obtain_password(PK11SlotInfo* slot, PRBool retry, void *arg) { PasswordSource* source = (PasswordSource*)arg; if(!source) return NULL; std::string password; PasswordSource::Result result = source->Get(password, -1, -1); if(result != PasswordSource::PASSWORD) return NULL; return PL_strdup(password.c_str()); } bool nssInit(const std::string& configdir) { SECStatus rv; //Initialize NSPR PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 256); //Set the PKCS #11 strings for the internal token char* db_name = PL_strdup("internal (software) "); PK11_ConfigurePKCS11(NULL,NULL,NULL, db_name, NULL, NULL,NULL,NULL,8,1); //Initialize NSS and open the certificate database read-only rv = NSS_Initialize(configdir.c_str(), NULL, NULL, "secmod.db", 0);// NSS_INIT_READONLY); if (rv != SECSuccess) { rv = NSS_NoDB_Init(configdir.c_str()); } if (rv != SECSuccess) { // || NSS_InitTokens() != SECSuccess) { NSS_Shutdown(); NSSUtilLogger.msg(ERROR, "NSS initialization failed on certificate database: %s", configdir.c_str()); return false; } /* if (NSS_SetDomesticPolicy() != SECSuccess ){ NSS_Shutdown(); NSSUtilLogger.msg(ERROR, "NSS set domestic policy failed (%s) on certificate database %s", nss_error().c_str(), configdir.c_str()); return false; } */ PK11_SetPasswordFunc(nss_obtain_password); NSSUtilLogger.msg(INFO, "Succeeded to initialize NSS"); PORT_SetUCS2_ASCIIConversionFunction(p12u_ucs2_ascii_conversion_function); SEC_PKCS12EnableCipher(PKCS12_RC4_40, 1); SEC_PKCS12EnableCipher(PKCS12_RC4_128, 1); SEC_PKCS12EnableCipher(PKCS12_RC2_CBC_40, 1); SEC_PKCS12EnableCipher(PKCS12_RC2_CBC_128, 1); SEC_PKCS12EnableCipher(PKCS12_DES_56, 1); SEC_PKCS12EnableCipher(PKCS12_DES_EDE3_168, 1); SEC_PKCS12SetPreferredCipher(PKCS12_DES_EDE3_168, 1); RegisterDynamicOids(); return true; } static bool ReadPrivKeyAttribute(SECKEYPrivateKey* key, CK_ATTRIBUTE_TYPE type, std::vector* output) { SECItem item; SECStatus rv; rv = PK11_ReadRawAttribute(PK11_TypePrivKey, key, type, &item); if (rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to read attribute %x from private key.", type); return false; } output->assign(item.data, item.data + item.len); SECITEM_FreeItem(&item, PR_FALSE); return true; } static bool ExportPrivateKey(SECKEYPrivateKey* key, std::vector* output) { PrivateKeyInfoCodec private_key_info(true); // Manually read the component attributes of the private key and build up // the PrivateKeyInfo. if (!ReadPrivKeyAttribute(key, CKA_MODULUS, private_key_info.modulus()) || !ReadPrivKeyAttribute(key, CKA_PUBLIC_EXPONENT, private_key_info.public_exponent()) || !ReadPrivKeyAttribute(key, CKA_PRIVATE_EXPONENT, private_key_info.private_exponent()) || !ReadPrivKeyAttribute(key, CKA_PRIME_1, private_key_info.prime1()) || !ReadPrivKeyAttribute(key, CKA_PRIME_2, private_key_info.prime2()) || !ReadPrivKeyAttribute(key, CKA_EXPONENT_1, private_key_info.exponent1()) || !ReadPrivKeyAttribute(key, CKA_EXPONENT_2, private_key_info.exponent2()) || !ReadPrivKeyAttribute(key, CKA_COEFFICIENT, private_key_info.coefficient())) { return false; } return private_key_info.Export(output); } bool nssExportCertificate(const std::string& certname, const std::string& certfile) { CERTCertList* list; CERTCertificate* find_cert = NULL; CERTCertListNode* node; list = PK11_ListCerts(PK11CertListAll, NULL); for (node = CERT_LIST_HEAD(list); !CERT_LIST_END(node,list); node = CERT_LIST_NEXT(node)) { CERTCertificate* cert = node->cert; const char* nickname = (const char*)node->appData; if (!nickname) { nickname = cert->nickname; } if(nickname == NULL) continue; if (strcmp(certname.c_str(), nickname) == 0) { find_cert = CERT_DupCertificate(cert); break; } } if (list) { CERT_DestroyCertList(list); } if(find_cert) NSSUtilLogger.msg(INFO, "Succeeded to get credential"); else { NSSUtilLogger.msg(ERROR, "Failed to get credential"); return false; } PRFileDesc* out = NULL; out = PR_Open(certfile.c_str(), PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, 00660); output_cert(find_cert, true, out); PR_Close(out); /* char* passwd = "secretpw"; if (find_cert->slot) { SECKEYPrivateKey* privKey = PK11_FindKeyByDERCert(find_cert->slot, find_cert, passwd); if (privKey) { PRBool isExtractable; SECItem value; SECStatus rv; rv = PK11_ReadRawAttribute(PK11_TypePrivKey, privKey, CKA_EXTRACTABLE, &value); if (rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to read CKA_EXTRACTABLE attribute from private key."); return false; } if ((value.len == 1) && (value.data != NULL)) isExtractable = !!(*(CK_BBOOL*)value.data); else rv = SECFailure; SECITEM_FreeItem(&value, PR_FALSE); if (rv == SECSuccess && !isExtractable) { NSSUtilLogger.msg(ERROR, "Private key is not extractable."); return false; } std::vector output; if(!ExportPrivateKey(privKey, &output)) NSSUtilLogger.msg(ERROR, "Failed to export private key"); NSSUtilLogger.msg(INFO, "Succeeded to get credential with name %s.", certname.c_str()); SECKEYPrivateKey* key = SECKEY_CopyPrivateKey(privKey); SECKEY_DestroyPrivateKey(privKey); if(key) NSSUtilLogger.msg(INFO, "Succeeded to copy private key"); } else NSSUtilLogger.msg(ERROR, "The private key is not accessible."); } else NSSUtilLogger.msg(INFO, "The certificate is without slot."); */ return true; } typedef struct p12uContextStr { char *filename; /* name of file */ PRFileDesc *file; /* pointer to file */ PRBool error; /* error occurred? */ SECItem *data; } p12uContext; static void p12u_WriteToExportFile(void *arg, const char *buf, unsigned long len) { p12uContext* p12cxt = (p12uContext*)arg; int writeLen; if(!p12cxt || (p12cxt->error == PR_TRUE)) return; if(p12cxt->file == NULL) { NSSUtilLogger.msg(ERROR, "p12 file is empty"); p12cxt->error = PR_TRUE; return; } writeLen = PR_Write(p12cxt->file, (unsigned char *)buf, (int32)len); if(writeLen != (int)len) { PR_Close(p12cxt->file); PR_Free(p12cxt->filename); p12cxt->filename = NULL; p12cxt->file = NULL; NSSUtilLogger.msg(ERROR, "Unable to write to p12 file"); p12cxt->error = PR_TRUE; } } static PRBool p12u_OpenExportFile(p12uContext *p12cxt, PRBool fileRead) { if(!p12cxt || !p12cxt->filename) return PR_FALSE; if(fileRead) { p12cxt->file = PR_Open(p12cxt->filename, PR_RDONLY, 0400); } else { p12cxt->file = PR_Open(p12cxt->filename, PR_CREATE_FILE | PR_RDWR | PR_TRUNCATE, 0600); } if(!p12cxt->file) { p12cxt->error = PR_TRUE; NSSUtilLogger.msg(ERROR, "Failed to open pk12 file"); return PR_FALSE; } return PR_TRUE; } static void p12u_DestroyExportFileInfo(p12uContext **exp_ptr, PRBool removeFile) { if(!exp_ptr || !(*exp_ptr)) { return; } if((*exp_ptr)->file != NULL) { PR_Close((*exp_ptr)->file); } if((*exp_ptr)->filename != NULL) { if(removeFile) { PR_Delete((*exp_ptr)->filename); } PR_Free((*exp_ptr)->filename); } PR_Free(*exp_ptr); *exp_ptr = NULL; } static p12uContext * p12u_InitFile(PRBool fileImport, char *filename) { p12uContext *p12cxt; PRBool fileExist; if(fileImport) fileExist = PR_TRUE; else fileExist = PR_FALSE; p12cxt = (p12uContext *)PORT_ZAlloc(sizeof(p12uContext)); if(!p12cxt) { NSSUtilLogger.msg(ERROR, "Failed to allocate p12 context"); return NULL; } p12cxt->error = PR_FALSE; p12cxt->filename = strdup(filename); if(!p12u_OpenExportFile(p12cxt, fileImport)) { p12u_DestroyExportFileInfo(&p12cxt, PR_FALSE); return NULL; } return p12cxt; } static CERTCertificate* FindIssuerCert(CERTCertificate* cert) { CERTCertificate* issuercert = NULL; issuercert = CERT_FindCertByName(cert->dbhandle, &cert->derIssuer); return issuercert; } static CERTCertList* RevertChain(CERTCertList* chain) { CERTCertListNode* node; CERTCertListNode* newnd; CERTCertListNode* head; CERTCertList* certlist = NULL; certlist = CERT_NewCertList(); for (node = CERT_LIST_HEAD(chain) ; !CERT_LIST_END(node, chain); node= CERT_LIST_NEXT(node)) { head = CERT_LIST_HEAD(certlist); newnd = (CERTCertListNode *)PORT_ArenaZAlloc(certlist->arena, sizeof(CERTCertListNode)); if(newnd == NULL ) return certlist; PR_INSERT_BEFORE(&newnd->links, &head->links); newnd->cert = node->cert; } return certlist; } static CERTCertList* FindCertChain(CERTCertificate* cert) { CERTCertificate* issuercert = NULL; CERTCertList* certlist = NULL; certlist = CERT_NewCertList(); //certlist = CERT_CertListFromCert(issuercert); //cert = CERT_DupCertificate(cert); CERT_AddCertToListTail(certlist, cert); do { issuercert = FindIssuerCert(cert); if(issuercert) { CERT_AddCertToListTail(certlist, issuercert); if(CERT_IsCACert(issuercert, NULL)) break; cert = issuercert; } else break; } while(true); return certlist; } //The following code is copied from nss source code tree, the file //mozilla/security/nss/lib/pkcs12/p12e.c, and file //mozilla/security/nss/lib/pkcs12/p12local.c //Because we need a new version of SEC_PKCS12AddKeyForCert which can //parse the cert chain of proxy /*************Start Here************/ /* Creates a new certificate bag and returns a pointer to it. If an error * occurs NULL is returned. */ sec_PKCS12CertBag * sec_PKCS12NewCertBag(PRArenaPool *arena, SECOidTag certType) { sec_PKCS12CertBag *certBag = NULL; SECOidData *bagType = NULL; SECStatus rv; void *mark = NULL; if(!arena) { return NULL; } mark = PORT_ArenaMark(arena); certBag = (sec_PKCS12CertBag *)PORT_ArenaZAlloc(arena, sizeof(sec_PKCS12CertBag)); if(!certBag) { PORT_ArenaRelease(arena, mark); PORT_SetError(SEC_ERROR_NO_MEMORY); return NULL; } bagType = SECOID_FindOIDByTag(certType); if(!bagType) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } rv = SECITEM_CopyItem(arena, &certBag->bagID, &bagType->oid); if(rv != SECSuccess) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } PORT_ArenaUnmark(arena, mark); return certBag; loser: PORT_ArenaRelease(arena, mark); return NULL; } /* Creates a safeBag of the specified type, and if bagData is specified, * the contents are set. The contents could be set later by the calling * routine. */ sec_PKCS12SafeBag * sec_PKCS12CreateSafeBag(SEC_PKCS12ExportContext *p12ctxt, SECOidTag bagType, void *bagData) { sec_PKCS12SafeBag *safeBag; PRBool setName = PR_TRUE; void *mark = NULL; SECStatus rv = SECSuccess; SECOidData *oidData = NULL; if(!p12ctxt) { return NULL; } mark = PORT_ArenaMark(p12ctxt->arena); if(!mark) { PORT_SetError(SEC_ERROR_NO_MEMORY); return NULL; } safeBag = (sec_PKCS12SafeBag *)PORT_ArenaZAlloc(p12ctxt->arena, sizeof(sec_PKCS12SafeBag)); if(!safeBag) { PORT_ArenaRelease(p12ctxt->arena, mark); PORT_SetError(SEC_ERROR_NO_MEMORY); return NULL; } /* set the bags content based upon bag type */ switch(bagType) { case SEC_OID_PKCS12_V1_KEY_BAG_ID: safeBag->safeBagContent.pkcs8KeyBag = (SECKEYPrivateKeyInfo *)bagData; break; case SEC_OID_PKCS12_V1_CERT_BAG_ID: safeBag->safeBagContent.certBag = (sec_PKCS12CertBag *)bagData; break; case SEC_OID_PKCS12_V1_CRL_BAG_ID: safeBag->safeBagContent.crlBag = (sec_PKCS12CRLBag *)bagData; break; case SEC_OID_PKCS12_V1_SECRET_BAG_ID: safeBag->safeBagContent.secretBag = (sec_PKCS12SecretBag *)bagData; break; case SEC_OID_PKCS12_V1_PKCS8_SHROUDED_KEY_BAG_ID: safeBag->safeBagContent.pkcs8ShroudedKeyBag = (SECKEYEncryptedPrivateKeyInfo *)bagData; break; case SEC_OID_PKCS12_V1_SAFE_CONTENTS_BAG_ID: safeBag->safeBagContent.safeContents = (sec_PKCS12SafeContents *)bagData; setName = PR_FALSE; break; default: goto loser; } oidData = SECOID_FindOIDByTag(bagType); if(oidData) { rv = SECITEM_CopyItem(p12ctxt->arena, &safeBag->safeBagType, &oidData->oid); if(rv != SECSuccess) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } } else { goto loser; } safeBag->arena = p12ctxt->arena; PORT_ArenaUnmark(p12ctxt->arena, mark); return safeBag; loser: if(mark) { PORT_ArenaRelease(p12ctxt->arena, mark); } return NULL; } /* this function converts a password to unicode and encures that the * required double 0 byte be placed at the end of the string */ PRBool sec_pkcs12_convert_item_to_unicode(PRArenaPool *arena, SECItem *dest, SECItem *src, PRBool zeroTerm, PRBool asciiConvert, PRBool toUnicode) { PRBool success = PR_FALSE; if(!src || !dest) { PORT_SetError(SEC_ERROR_INVALID_ARGS); return PR_FALSE; } dest->len = src->len * 3 + 2; if(arena) { dest->data = (unsigned char*)PORT_ArenaZAlloc(arena, dest->len); } else { dest->data = (unsigned char*)PORT_ZAlloc(dest->len); } if(!dest->data) { dest->len = 0; return PR_FALSE; } if(!asciiConvert) { success = PORT_UCS2_UTF8Conversion(toUnicode, src->data, src->len, dest->data, dest->len, &dest->len); } else { #ifndef IS_LITTLE_ENDIAN PRBool swapUnicode = PR_FALSE; #else PRBool swapUnicode = PR_TRUE; #endif success = PORT_UCS2_ASCIIConversion(toUnicode, src->data, src->len, dest->data, dest->len, &dest->len, swapUnicode); } if(!success) { if(!arena) { PORT_Free(dest->data); dest->data = NULL; dest->len = 0; } return PR_FALSE; } if((dest->data[dest->len-1] || dest->data[dest->len-2]) && zeroTerm) { if(dest->len + 2 > 3 * src->len) { if(arena) { dest->data = (unsigned char*)PORT_ArenaGrow(arena, dest->data, dest->len, dest->len + 2); } else { dest->data = (unsigned char*)PORT_Realloc(dest->data, dest->len + 2); } if(!dest->data) { return PR_FALSE; } } dest->len += 2; dest->data[dest->len-1] = dest->data[dest->len-2] = 0; } return PR_TRUE; } /* sec_PKCS12AddAttributeToBag * adds an attribute to a safeBag. currently, the only attributes supported * are those which are specified within PKCS 12. * * p12ctxt - the export context * safeBag - the safeBag to which attributes are appended * attrType - the attribute type * attrData - the attribute data */ SECStatus sec_PKCS12AddAttributeToBag(SEC_PKCS12ExportContext *p12ctxt, sec_PKCS12SafeBag *safeBag, SECOidTag attrType, SECItem *attrData) { sec_PKCS12Attribute *attribute; void *mark = NULL, *dummy = NULL; SECOidData *oiddata = NULL; SECItem unicodeName = { siBuffer, NULL, 0}; void *src = NULL; unsigned int nItems = 0; SECStatus rv; if(!safeBag || !p12ctxt) { return SECFailure; } mark = PORT_ArenaMark(safeBag->arena); /* allocate the attribute */ attribute = (sec_PKCS12Attribute *)PORT_ArenaZAlloc(safeBag->arena, sizeof(sec_PKCS12Attribute)); if(!attribute) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } /* set up the attribute */ oiddata = SECOID_FindOIDByTag(attrType); if(!oiddata) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } if(SECITEM_CopyItem(p12ctxt->arena, &attribute->attrType, &oiddata->oid) != SECSuccess) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } nItems = 1; switch(attrType) { case SEC_OID_PKCS9_LOCAL_KEY_ID: { src = attrData; break; } case SEC_OID_PKCS9_FRIENDLY_NAME: { if(!sec_pkcs12_convert_item_to_unicode(p12ctxt->arena, &unicodeName, attrData, PR_FALSE, PR_FALSE, PR_TRUE)) { goto loser; } src = &unicodeName; break; } default: goto loser; } /* append the attribute to the attribute value list */ attribute->attrValue = (SECItem **)PORT_ArenaZAlloc(p12ctxt->arena, ((nItems + 1) * sizeof(SECItem *))); if(!attribute->attrValue) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } /* XXX this will need to be changed if attributes requiring more than * one element are ever used. */ attribute->attrValue[0] = (SECItem *)PORT_ArenaZAlloc(p12ctxt->arena, sizeof(SECItem)); if(!attribute->attrValue[0]) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } attribute->attrValue[1] = NULL; rv = SECITEM_CopyItem(p12ctxt->arena, attribute->attrValue[0], (SECItem*)src); if(rv != SECSuccess) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } /* append the attribute to the safeBag attributes */ if(safeBag->nAttribs) { dummy = PORT_ArenaGrow(p12ctxt->arena, safeBag->attribs, ((safeBag->nAttribs + 1) * sizeof(sec_PKCS12Attribute *)), ((safeBag->nAttribs + 2) * sizeof(sec_PKCS12Attribute *))); safeBag->attribs = (sec_PKCS12Attribute **)dummy; } else { safeBag->attribs = (sec_PKCS12Attribute **)PORT_ArenaZAlloc(p12ctxt->arena, 2 * sizeof(sec_PKCS12Attribute *)); dummy = safeBag->attribs; } if(!dummy) { goto loser; } safeBag->attribs[safeBag->nAttribs] = attribute; safeBag->attribs[++safeBag->nAttribs] = NULL; PORT_ArenaUnmark(p12ctxt->arena, mark); return SECSuccess; loser: if(mark) { PORT_ArenaRelease(p12ctxt->arena, mark); } return SECFailure; } /********************************* * Routines to handle the exporting of the keys and certificates *********************************/ /* creates a safe contents which safeBags will be appended to */ sec_PKCS12SafeContents * sec_PKCS12CreateSafeContents(PRArenaPool *arena) { sec_PKCS12SafeContents *safeContents; if(arena == NULL) { return NULL; } /* create the safe contents */ safeContents = (sec_PKCS12SafeContents *)PORT_ArenaZAlloc(arena, sizeof(sec_PKCS12SafeContents)); if(!safeContents) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } /* set up the internal contents info */ safeContents->safeBags = NULL; safeContents->arena = arena; safeContents->bagCount = 0; return safeContents; loser: return NULL; } /* appends a safe bag to a safeContents using the specified arena. */ SECStatus sec_pkcs12_append_bag_to_safe_contents(PRArenaPool *arena, sec_PKCS12SafeContents *safeContents, sec_PKCS12SafeBag *safeBag) { void *mark = NULL, *dummy = NULL; if(!arena || !safeBag || !safeContents) { return SECFailure; } mark = PORT_ArenaMark(arena); if(!mark) { PORT_SetError(SEC_ERROR_NO_MEMORY); return SECFailure; } /* allocate space for the list, or reallocate to increase space */ if(!safeContents->safeBags) { safeContents->safeBags = (sec_PKCS12SafeBag **)PORT_ArenaZAlloc(arena, (2 * sizeof(sec_PKCS12SafeBag *))); dummy = safeContents->safeBags; safeContents->bagCount = 0; } else { dummy = PORT_ArenaGrow(arena, safeContents->safeBags, (safeContents->bagCount + 1) * sizeof(sec_PKCS12SafeBag *), (safeContents->bagCount + 2) * sizeof(sec_PKCS12SafeBag *)); safeContents->safeBags = (sec_PKCS12SafeBag **)dummy; } if(!dummy) { PORT_ArenaRelease(arena, mark); PORT_SetError(SEC_ERROR_NO_MEMORY); return SECFailure; } /* append the bag at the end and null terminate the list */ safeContents->safeBags[safeContents->bagCount++] = safeBag; safeContents->safeBags[safeContents->bagCount] = NULL; PORT_ArenaUnmark(arena, mark); return SECSuccess; } /* appends a safeBag to a specific safeInfo. */ SECStatus sec_pkcs12_append_bag(SEC_PKCS12ExportContext *p12ctxt, SEC_PKCS12SafeInfo *safeInfo, sec_PKCS12SafeBag *safeBag) { sec_PKCS12SafeContents *dest; SECStatus rv = SECFailure; if(!p12ctxt || !safeBag || !safeInfo) { return SECFailure; } if(!safeInfo->safe) { safeInfo->safe = sec_PKCS12CreateSafeContents(p12ctxt->arena); if(!safeInfo->safe) { return SECFailure; } } dest = safeInfo->safe; rv = sec_pkcs12_append_bag_to_safe_contents(p12ctxt->arena, dest, safeBag); if(rv == SECSuccess) { safeInfo->itemCount++; } return rv; } /* compute the thumbprint of the DER cert and create a digest info * to store it in and return the digest info. * a return of NULL indicates an error. */ SGNDigestInfo * sec_pkcs12_compute_thumbprint(SECItem *der_cert) { SGNDigestInfo *thumb = NULL; SECItem digest; PRArenaPool *temparena = NULL; SECStatus rv = SECFailure; if(der_cert == NULL) return NULL; temparena = PORT_NewArena(SEC_ASN1_DEFAULT_ARENA_SIZE); if(temparena == NULL) { return NULL; } digest.data = (unsigned char *)PORT_ArenaZAlloc(temparena, sizeof(unsigned char) * SHA1_LENGTH); /* digest data and create digest info */ if(digest.data != NULL) { digest.len = SHA1_LENGTH; rv = PK11_HashBuf(SEC_OID_SHA1, digest.data, der_cert->data, der_cert->len); if(rv == SECSuccess) { thumb = SGN_CreateDigestInfo(SEC_OID_SHA1, digest.data, digest.len); } else { PORT_SetError(SEC_ERROR_NO_MEMORY); } } else { PORT_SetError(SEC_ERROR_NO_MEMORY); } PORT_FreeArena(temparena, PR_TRUE); return thumb; } /* SEC_PKCS12AddKeyForCert * Extracts the key associated with a particular certificate and exports * it. * * p12ctxt - the export context * safe - the safeInfo to place the key in * nestedDest - the nested safeContents to place a key * cert - the certificate which the key belongs to * shroudKey - encrypt the private key for export. This value should * always be true. lower level code will not allow the export * of unencrypted private keys. * algorithm - the algorithm with which to encrypt the private key * pwitem - the password to encrypt the private key with * keyId - the keyID attribute * nickName - the nickname attribute */ SECStatus my_SEC_PKCS12AddKeyForCert(SEC_PKCS12ExportContext *p12ctxt, SEC_PKCS12SafeInfo *safe, void *nestedDest, CERTCertificate *cert, PRBool shroudKey, SECOidTag algorithm, SECItem *pwitem, SECItem *keyId, SECItem *nickName) { void *mark; void *keyItem; SECOidTag keyType; SECStatus rv = SECFailure; SECItem nickname = {siBuffer,NULL,0}, uniPwitem = {siBuffer, NULL, 0}; sec_PKCS12SafeBag *returnBag; if(!p12ctxt || !cert || !safe) { return SECFailure; } mark = PORT_ArenaMark(p12ctxt->arena); /* retrieve the key based upon the type that it is and * specify the type of safeBag to store the key in */ if(!shroudKey) { /* extract the key unencrypted. this will most likely go away */ SECKEYPrivateKeyInfo *pki = PK11_ExportPrivateKeyInfo(cert, p12ctxt->wincx); if(!pki) { PORT_ArenaRelease(p12ctxt->arena, mark); PORT_SetError(SEC_ERROR_PKCS12_UNABLE_TO_EXPORT_KEY); return SECFailure; } keyItem = PORT_ArenaZAlloc(p12ctxt->arena, sizeof(SECKEYPrivateKeyInfo)); if(!keyItem) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } rv = SECKEY_CopyPrivateKeyInfo(p12ctxt->arena, (SECKEYPrivateKeyInfo *)keyItem, pki); keyType = SEC_OID_PKCS12_V1_KEY_BAG_ID; SECKEY_DestroyPrivateKeyInfo(pki, PR_TRUE); } else { /* extract the key encrypted */ SECKEYEncryptedPrivateKeyInfo *epki = NULL; PK11SlotInfo *slot = NULL; if(!sec_pkcs12_convert_item_to_unicode(p12ctxt->arena, &uniPwitem, pwitem, PR_TRUE, PR_TRUE, PR_TRUE)) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } /* we want to make sure to take the key out of the key slot */ if(PK11_IsInternal(p12ctxt->slot)) { slot = PK11_GetInternalKeySlot(); } else { slot = PK11_ReferenceSlot(p12ctxt->slot); } epki = PK11_ExportEncryptedPrivateKeyInfo(slot, algorithm, &uniPwitem, cert, 1, p12ctxt->wincx); PK11_FreeSlot(slot); if(!epki) { PORT_SetError(SEC_ERROR_PKCS12_UNABLE_TO_EXPORT_KEY); goto loser; } keyItem = PORT_ArenaZAlloc(p12ctxt->arena, sizeof(SECKEYEncryptedPrivateKeyInfo)); if(!keyItem) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto loser; } rv = SECKEY_CopyEncryptedPrivateKeyInfo(p12ctxt->arena, (SECKEYEncryptedPrivateKeyInfo *)keyItem, epki); keyType = SEC_OID_PKCS12_V1_PKCS8_SHROUDED_KEY_BAG_ID; SECKEY_DestroyEncryptedPrivateKeyInfo(epki, PR_TRUE); } if(rv != SECSuccess) { goto loser; } /* if no nickname specified, let's see if the certificate has a * nickname. */ if(!nickName) { if(cert->nickname) { nickname.data = (unsigned char *)cert->nickname; nickname.len = PORT_Strlen(cert->nickname); nickName = &nickname; } } /* create the safe bag and set any attributes */ returnBag = sec_PKCS12CreateSafeBag(p12ctxt, keyType, keyItem); if(!returnBag) { rv = SECFailure; goto loser; } if(nickName) { if(sec_PKCS12AddAttributeToBag(p12ctxt, returnBag, SEC_OID_PKCS9_FRIENDLY_NAME, nickName) != SECSuccess) { goto loser; } } if(keyId) { if(sec_PKCS12AddAttributeToBag(p12ctxt, returnBag, SEC_OID_PKCS9_LOCAL_KEY_ID, keyId) != SECSuccess) { goto loser; } } if(nestedDest) { rv = sec_pkcs12_append_bag_to_safe_contents(p12ctxt->arena, (sec_PKCS12SafeContents*)nestedDest, returnBag); } else { rv = sec_pkcs12_append_bag(p12ctxt, safe, returnBag); } loser: if (rv != SECSuccess) { PORT_ArenaRelease(p12ctxt->arena, mark); } else { PORT_ArenaUnmark(p12ctxt->arena, mark); } return rv; } /*************End Here************/ //Our version of SEC_PKCS12AddCert static SECStatus my_SEC_PKCS12AddCert(SEC_PKCS12ExportContext* p12ctxt, SEC_PKCS12SafeInfo* safe, void* nestedDest, CERTCertificate* cert, CERTCertDBHandle* certDb, SECItem* keyId) { sec_PKCS12CertBag *certBag; sec_PKCS12SafeBag *safeBag; void *mark = NULL; CERTCertListNode* node; CERTCertList* certlist_tmp = NULL; CERTCertList* certlist = NULL; SECStatus rv; SECItem nick; if(!p12ctxt) return SECFailure; certlist_tmp = FindCertChain(cert); if(!certlist_tmp) { NSSUtilLogger.msg(ERROR, "Failed to find issuer certificate for proxy certificate"); goto err; } certlist = RevertChain(certlist_tmp); /* for (i=chain->len-1; i>=0; i--) { CERTCertificate *c; c = CERT_FindCertByDERCert(handle, &chain->certs[i]); for (j=i; jlen-1; j++) printf(" "); printf("\"%s\" [%s]\n\n", c->nickname, c->subjectName); CERT_DestroyCertificate(c); } */ for (node = CERT_LIST_HEAD(certlist); !CERT_LIST_END(node,certlist); node= CERT_LIST_NEXT(node)) { CERTCertificate* cert = node->cert; nick.type = siBuffer; nick.data = NULL; nick.len = 0; if(!cert) return SECFailure; mark = PORT_ArenaMark(p12ctxt->arena); certBag = sec_PKCS12NewCertBag(p12ctxt->arena, SEC_OID_PKCS9_X509_CERT); if(!certBag) goto err; if(SECITEM_CopyItem(p12ctxt->arena, &certBag->value.x509Cert, &cert->derCert) != SECSuccess) { goto err; } //If the certificate has a nickname, set the friendly name to that. if(cert->nickname) { nick.data = (unsigned char *)cert->nickname; nick.len = PORT_Strlen(cert->nickname); } safeBag = sec_PKCS12CreateSafeBag(p12ctxt, SEC_OID_PKCS12_V1_CERT_BAG_ID, certBag); if(!safeBag) goto err; // Add the friendly name and keyId attributes, if necessary if(nick.data) { if(sec_PKCS12AddAttributeToBag(p12ctxt, safeBag, SEC_OID_PKCS9_FRIENDLY_NAME, &nick) != SECSuccess) { goto err; } } if(keyId) { if(sec_PKCS12AddAttributeToBag(p12ctxt, safeBag, SEC_OID_PKCS9_LOCAL_KEY_ID, keyId) != SECSuccess) { goto err; } } // Append the cert safeBag if(nestedDest) { rv = sec_pkcs12_append_bag_to_safe_contents(p12ctxt->arena, (sec_PKCS12SafeContents*)nestedDest, safeBag); } else { rv = sec_pkcs12_append_bag(p12ctxt, safe, safeBag); } if(rv != SECSuccess) goto err; PORT_ArenaUnmark(p12ctxt->arena, mark); } return SECSuccess; err: if(mark) PORT_ArenaRelease(p12ctxt->arena, mark); if(certlist_tmp) CERT_DestroyCertList(certlist_tmp); if(certlist) CERT_DestroyCertList(certlist); return SECFailure; } //Our version of SEC_PKCS12AddCertAndKey SECStatus my_SEC_PKCS12AddCertAndKey(SEC_PKCS12ExportContext *p12ctxt, void *certSafe, void *certNestedDest, CERTCertificate *cert, CERTCertDBHandle *certDb, void *keySafe, void *keyNestedDest, PRBool shroudKey, SECItem *pwitem, SECOidTag algorithm) { SECStatus rv = SECFailure; SGNDigestInfo *digest = NULL; void *mark = NULL; if(!p12ctxt || !certSafe || !keySafe || !cert) { return SECFailure; } mark = PORT_ArenaMark(p12ctxt->arena); //Generate the thumbprint of the cert to use as a keyId digest = sec_pkcs12_compute_thumbprint(&cert->derCert); if(!digest) { PORT_ArenaRelease(p12ctxt->arena, mark); return SECFailure; } // add the certificate rv = my_SEC_PKCS12AddCert(p12ctxt, (SEC_PKCS12SafeInfo*)certSafe, (SEC_PKCS12SafeInfo*)certNestedDest, cert, certDb, &digest->digest); if(rv != SECSuccess) { goto loser; } /* add the key */ rv = my_SEC_PKCS12AddKeyForCert(p12ctxt, (SEC_PKCS12SafeInfo*)keySafe, keyNestedDest, cert, shroudKey, algorithm, pwitem, &digest->digest, NULL ); if(rv != SECSuccess) { goto loser; } SGN_DestroyDigestInfo(digest); PORT_ArenaUnmark(p12ctxt->arena, mark); return SECSuccess; loser: SGN_DestroyDigestInfo(digest); PORT_ArenaRelease(p12ctxt->arena, mark); return SECFailure; } bool nssOutputPKCS12(const std::string certname, char* outfile, char* slotpw, char* p12pw) { PasswordSource* passphrase = NULL; if(slotpw) { passphrase = new PasswordSourceString(slotpw); } else { passphrase = new PasswordSourceInteractive("TODO: prompt here",false); } PasswordSource* p12passphrase = NULL; if(p12pw) { p12passphrase = new PasswordSourceString(p12pw); } else { p12passphrase = new PasswordSourceNone(); } bool r = nssOutputPKCS12(certname, outfile, *passphrase, *p12passphrase); delete passphrase; delete p12passphrase; return r; } bool nssOutputPKCS12(const std::string certname, char* outfile, PasswordSource& passphrase, PasswordSource& p12passphrase) { SEC_PKCS12ExportContext *p12ecx = NULL; SEC_PKCS12SafeInfo *keySafe = NULL, *certSafe = NULL; SECItem *pwitem = NULL; PK11SlotInfo *slot = NULL; p12uContext *p12cxt = NULL; CERTCertList* certlist = NULL; CERTCertListNode* node = NULL; slot = PK11_GetInternalKeySlot(); if (PK11_Authenticate(slot, PR_TRUE, (void*)&passphrase) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to authenticate to PKCS11 slot %s", PK11_GetSlotName(slot)); goto err; } certlist = PK11_FindCertsFromNickname((char*)(certname.c_str()), (void*)&passphrase); if(!certlist) { NSSUtilLogger.msg(ERROR, "Failed to find certificates by nickname: %s", certname.c_str()); return false; } if((SECSuccess != CERT_FilterCertListForUserCerts(certlist)) || CERT_LIST_EMPTY(certlist)) { NSSUtilLogger.msg(ERROR, "No user certificate by nickname %s found", certname.c_str()); return false; } if(certlist) { CERTCertificate* cert = NULL; node = CERT_LIST_HEAD(certlist); if(node) cert = node->cert; if(cert) slot = cert->slot; //use the slot from the first matching //certificate to create the context. This is for keygen } if(!slot) { NSSUtilLogger.msg(ERROR, "Certificate does not have a slot"); goto err; } p12ecx = SEC_PKCS12CreateExportContext(NULL, NULL, slot, (void*)&passphrase); if(!p12ecx) { NSSUtilLogger.msg(ERROR, "Failed to create export context"); goto err; } //Password for the output PKCS12 file. { std::string p12pw; PasswordSource::Result p12res = p12passphrase.Get(p12pw,-1,-1); if(p12res == PasswordSource::PASSWORD) { pwitem = SECITEM_AllocItem(NULL, NULL, p12pw.length() + 1); if(pwitem) { memset(pwitem->data, 0, pwitem->len); // ?? memcpy(pwitem->data, p12pw.c_str(), pwitem->len); } } else if(p12res == PasswordSource::CANCEL) { NSSUtilLogger.msg(ERROR, "PKCS12 output password not provided"); goto err; } } if(pwitem != NULL) { if(SEC_PKCS12AddPasswordIntegrity(p12ecx, pwitem, SEC_OID_SHA1) != SECSuccess) { NSSUtilLogger.msg(ERROR, "PKCS12 add password integrity failed"); goto err; } } for(node = CERT_LIST_HEAD(certlist); !CERT_LIST_END(node,certlist); node = CERT_LIST_NEXT(node)) { CERTCertificate* cert = node->cert; if(!cert->slot) { NSSUtilLogger.msg(ERROR, "Certificate does not have a slot"); goto err; } keySafe = SEC_PKCS12CreateUnencryptedSafe(p12ecx); if(!SEC_PKCS12IsEncryptionAllowed() || PK11_IsFIPS() || !pwitem) { certSafe = keySafe; } else { certSafe = SEC_PKCS12CreatePasswordPrivSafe(p12ecx, pwitem, SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_40_BIT_RC2_CBC); } if(!certSafe || !keySafe) { NSSUtilLogger.msg(ERROR, "Failed to create key or certificate safe"); goto err; } //issuercert = FindIssuerCert(cert); /* if(my_SEC_PKCS12AddCert(p12ecx, certSafe, NULL, cert, CERT_GetDefaultCertDB(), NULL) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to add cert and key"); goto err; } */ if(my_SEC_PKCS12AddCertAndKey(p12ecx, certSafe, NULL, cert, CERT_GetDefaultCertDB(), keySafe, NULL, !pwitem ? PR_FALSE : PR_TRUE, pwitem, SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_3KEY_TRIPLE_DES_CBC) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to add certificate and key"); goto err; } } CERT_DestroyCertList(certlist); certlist = NULL; p12cxt = p12u_InitFile(PR_FALSE, outfile); if(!p12cxt) { NSSUtilLogger.msg(ERROR, "Failed to initialize PKCS12 file: %s", outfile); goto err; } if(SEC_PKCS12Encode(p12ecx, p12u_WriteToExportFile, p12cxt) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to encode PKCS12"); goto err; } NSSUtilLogger.msg(INFO, "Succeeded to export PKCS12"); p12u_DestroyExportFileInfo(&p12cxt, PR_FALSE); if(pwitem) SECITEM_ZfreeItem(pwitem, PR_TRUE); SEC_PKCS12DestroyExportContext(p12ecx); return true; err: SEC_PKCS12DestroyExportContext(p12ecx); if (certlist) { CERT_DestroyCertList(certlist); certlist = NULL; } p12u_DestroyExportFileInfo(&p12cxt, PR_TRUE); if(pwitem) { SECITEM_ZfreeItem(pwitem, PR_TRUE); } return false; } static SECStatus DeleteCertOnly(const char* certname) { SECStatus rv; CERTCertificate *cert; CERTCertDBHandle* handle; handle = CERT_GetDefaultCertDB(); cert = CERT_FindCertByNicknameOrEmailAddr(handle, (char*)certname); if(!cert) { NSSUtilLogger.msg(INFO, "There is no certificate named %s found, the certificate could be removed when generating CSR", certname); return SECSuccess; } rv = SEC_DeletePermCertificate(cert); CERT_DestroyCertificate(cert); if(rv) { NSSUtilLogger.msg(ERROR, "Failed to delete certificate"); } return rv; } static SECStatus deleteKeyAndCert(const char* privkeyname, PasswordSource& passphrase, bool delete_cert) { SECKEYPrivateKeyList* list; SECKEYPrivateKeyListNode* node; int count = 0; SECStatus rv; PK11SlotInfo* slot; slot = PK11_GetInternalKeySlot(); if(!privkeyname) NSSUtilLogger.msg(WARNING, "The name of the private key to delete is empty"); if(PK11_NeedLogin(slot)) { rv = PK11_Authenticate(slot, PR_TRUE, (void*)&passphrase); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to authenticate to token %s.", PK11_GetTokenName(slot)); return SECFailure; } } list = PK11_ListPrivKeysInSlot(slot, (char *)privkeyname, (void*)&passphrase); if(list == NULL) { NSSUtilLogger.msg(INFO, "No private key with nickname %s exist in NSS database", privkeyname); return SECFailure; } for(node=PRIVKEY_LIST_HEAD(list); !PRIVKEY_LIST_END(node,list); node=PRIVKEY_LIST_NEXT(node)) { char * keyname; static const char orphan[] = { "(orphan)" }; CERTCertificate* cert; keyname = PK11_GetPrivateKeyNickname(node->key); if(!keyname || !keyname[0]) { PORT_Free((void *)keyname); keyname = NULL; cert = PK11_GetCertFromPrivateKey(node->key); if(cert) { if(cert->nickname && cert->nickname[0]) { keyname = PORT_Strdup(cert->nickname); } else if(cert->emailAddr && cert->emailAddr[0]) { keyname = PORT_Strdup(cert->emailAddr); } CERT_DestroyCertificate(cert); } } if(!keyname || PL_strcmp(keyname, privkeyname)) { /* PKCS#11 module returned unwanted keys */ PORT_Free((void *)keyname); continue; } cert = PK11_GetCertFromPrivateKey(node->key); if(cert && delete_cert){ //Delete the private key and the cert related rv = PK11_DeleteTokenCertAndKey(cert, (void*)&passphrase); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to delete private key and certificate"); CERT_DestroyCertificate(cert); continue; } CERT_DestroyCertificate(cert); } else{ //Delete the private key without deleting the cert related //rv = PK11_DeleteTokenPrivateKey(node->key, PR_FALSE); rv = PK11_DestroyTokenObject(node->key->pkcs11Slot, node->key->pkcs11ID); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to delete private key"); continue; } } if(!keyname) keyname = (char *)orphan; if(keyname != (char *)orphan) PORT_Free((void *)keyname); count++; } SECKEY_DestroyPrivateKeyList(list); if(count == 0) { NSSUtilLogger.msg(WARNING, "Can not find key with name: %s", privkeyname); } if(slot) PK11_FreeSlot(slot); return SECSuccess; } static SECStatus DeleteKeyOnly(const char* privkeyname, PasswordSource& passphrase) { return deleteKeyAndCert(privkeyname, passphrase, false); } static SECStatus DeleteKeyAndCert(const char* privkeyname, PasswordSource& passphrase) { return deleteKeyAndCert(privkeyname, passphrase, true); } static SECStatus DeleteCertAndKey(const char* certname, PasswordSource& passphrase) { SECStatus rv; CERTCertificate* cert; PK11SlotInfo* slot; slot = PK11_GetInternalKeySlot(); if(PK11_NeedLogin(slot)) { SECStatus rv = PK11_Authenticate(slot, PR_TRUE, (void*)&passphrase); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to authenticate to token %s.", PK11_GetTokenName(slot)); return SECFailure; } } cert = PK11_FindCertFromNickname((char*)certname, (void*)&passphrase); if(!cert) { PK11_FreeSlot(slot); return SECFailure; } rv = PK11_DeleteTokenCertAndKey(cert, (void*)&passphrase); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to delete private key that attaches to certificate: %s", certname); } CERT_DestroyCertificate(cert); PK11_FreeSlot(slot); return rv; } static bool InputPrivateKey(std::vector& output, const std::string& privk_in) { EVP_PKEY* pkey=NULL; BIO* in = NULL; in = BIO_new(BIO_s_mem()); BIO_write(in, privk_in.c_str(), privk_in.length()); //PW_CB_DATA cb_data; //cb_data.password = (passphrase.empty()) ? NULL : (void*)(passphrase.c_str()); //cb_data.prompt_info = prompt_info.empty() ? NULL : prompt_info.c_str(); //if(!(pkey = PEM_read_bio_PrivateKey(keybio, NULL, passwordcb, &cb_data))) { if(!(pkey = PEM_read_bio_PrivateKey(in, NULL, NULL, NULL))) { int reason = ERR_GET_REASON(ERR_peek_error()); if(reason == PEM_R_BAD_BASE64_DECODE) NSSUtilLogger.msg(ERROR, "Can not read PEM private key: probably bad password"); if(reason == PEM_R_BAD_DECRYPT) NSSUtilLogger.msg(ERROR, "Can not read PEM private key: failed to decrypt"); if(reason == PEM_R_BAD_PASSWORD_READ) NSSUtilLogger.msg(ERROR, "Can not read PEM private key: failed to obtain password"); if(reason == PEM_R_PROBLEMS_GETTING_PASSWORD) NSSUtilLogger.msg(ERROR,"Can not read PEM private key: failed to obtain password"); NSSUtilLogger.msg(ERROR, "Can not read PEM private key"); } BIO_free(in); PKCS8_PRIV_KEY_INFO *p8inf = NULL; if (pkey) { if (!(p8inf = EVP_PKEY2PKCS8(pkey))) { NSSUtilLogger.msg(ERROR, "Failed to convert EVP_PKEY to PKCS8"); EVP_PKEY_free(pkey); return false; } BIO* key = NULL; key = BIO_new(BIO_s_mem()); i2d_PKCS8_PRIV_KEY_INFO_bio(key, p8inf); std::string privk_out; for(;;) { char s[256]; int l = BIO_read(key,s,sizeof(s)); if(l <= 0) break; privk_out.append(s,l); } std::string::iterator it; for(it = privk_out.begin() ; it < privk_out.end(); it++) output.push_back(*it); BIO_free(key); } EVP_PKEY_free(pkey); PKCS8_PRIV_KEY_INFO_free(p8inf); return true; } static bool OutputPrivateKey(const std::vector& input, std::string& privk_out) { std::stringstream strstream; std::vector::const_iterator it; for(it = input.begin(); it != input.end(); it++) strstream<<(*it); BIO* key= NULL; PKCS8_PRIV_KEY_INFO* p8info = NULL; EVP_PKEY* pkey=NULL; key = BIO_new(BIO_s_mem()); std::string str = strstream.str(); BIO_write(key, str.c_str(), str.length()); p8info = d2i_PKCS8_PRIV_KEY_INFO_bio(key, NULL); if(p8info == NULL) { NSSUtilLogger.msg(ERROR, "Failed to load private key"); return false; } else NSSUtilLogger.msg(INFO, "Succeeded to load PrivateKeyInfo"); if(p8info) { pkey = EVP_PKCS82PKEY(p8info); if(pkey == NULL) { NSSUtilLogger.msg(ERROR, "Failed to convert PrivateKeyInfo to EVP_PKEY"); BIO_free(key); return false; } else NSSUtilLogger.msg(INFO, "Succeeded to convert PrivateKeyInfo to EVP_PKEY"); } BIO* out = NULL; //out = BIO_new_file (outfile, "wb"); out = BIO_new(BIO_s_mem()); //char* passout = "secretpw"; //PEM_write_bio_PrivateKey(out, pkey, EVP_des_ede3_cbc(), NULL, 0, NULL, passout); PEM_write_bio_PrivateKey(out, pkey, NULL, NULL, 0, NULL, NULL); privk_out.clear(); for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; privk_out.append(s,l); } BIO_free(key); BIO_free(out); PKCS8_PRIV_KEY_INFO_free(p8info); return true; } static bool ImportDERPrivateKey(PK11SlotInfo* slot, const std::vector& input, const std::string& name) { SECItem der_private_key_info; SECStatus rv; der_private_key_info.data = const_cast(&input.front()); der_private_key_info.len = input.size(); //The private key is set to be used for //key unwrapping, data decryption, and signature generation. SECItem nickname; nickname.data = (unsigned char*)(name.c_str()); nickname.len = name.size(); const unsigned int key_usage = KU_KEY_ENCIPHERMENT | KU_DATA_ENCIPHERMENT | KU_DIGITAL_SIGNATURE; rv = PK11_ImportDERPrivateKeyInfoAndReturnKey( slot, &der_private_key_info, &nickname, NULL, PR_TRUE, PR_FALSE, key_usage, NULL, NULL);//&privKey, NULL); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to import private key"); return false; } else NSSUtilLogger.msg(INFO, "Succeeded to import private key"); return true; } static bool GenerateKeyPair(PasswordSource& passphrase, SECKEYPublicKey **pubk, SECKEYPrivateKey **privk, std::string& privk_str, int keysize, const std::string& nick_str) { PK11RSAGenParams rsaParams; rsaParams.keySizeInBits = keysize; rsaParams.pe = 0x10001; PK11SlotInfo* slot = NULL; slot = PK11_GetInternalKeySlot(); if(PK11_Authenticate(slot, PR_TRUE, (void*)&passphrase) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to authenticate to key database"); if(slot) PK11_FreeSlot(slot); return false; } *privk = PK11_GenerateKeyPair(slot, CKM_RSA_PKCS_KEY_PAIR_GEN, &rsaParams, pubk, PR_FALSE, PR_FALSE, NULL); //Only by setting the "isPerm" parameter to be false, the private key can be export if(*privk != NULL && *pubk != NULL) NSSUtilLogger.msg(DEBUG, "Succeeded to generate public/private key pair"); else { NSSUtilLogger.msg(ERROR, "Failed to generate public/private key pair"); if(slot) PK11_FreeSlot(slot); return false; } std::vector output; if(!ExportPrivateKey(*privk, &output)) NSSUtilLogger.msg(ERROR, "Failed to export private key"); OutputPrivateKey(output, privk_str); ImportDERPrivateKey(slot, output, nick_str); if(slot) PK11_FreeSlot(slot); return true; } static bool ImportPrivateKey(PasswordSource& passphrase, const std::string& keyfile, const std::string& nick_str) { BIO* key = NULL; key = BIO_new_file(keyfile.c_str(), "r"); std::string key_str; for(;;) { char s[256]; int l = BIO_read(key,s,sizeof(s)); if(l <= 0) break; key_str.append(s,l); } BIO_free_all(key); std::vector input; InputPrivateKey(input, key_str); PK11SlotInfo* slot = NULL; slot = PK11_GetInternalKeySlot(); if(PK11_Authenticate(slot, PR_TRUE, (void*)&passphrase) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to authenticate to key database"); if(slot) PK11_FreeSlot(slot); return false; } DeleteKeyOnly((nick_str.c_str()), passphrase); ImportDERPrivateKey(slot, input, nick_str); if(slot) PK11_FreeSlot(slot); return true; } bool nssGenerateCSR(const std::string& privkey_name, const std::string& dn, const char* slotpw, const std::string& outfile, std::string& privk_str, bool ascii) { PasswordSource* passphrase = NULL; if(slotpw) { passphrase = new PasswordSourceString(slotpw); } else { passphrase = new PasswordSourceInteractive("TODO: prompt here",false); } bool r = nssGenerateCSR(privkey_name, dn, *passphrase, outfile, privk_str, ascii); delete passphrase; return r; } bool nssGenerateCSR(const std::string& privkey_name, const std::string& dn, Arc::PasswordSource& passphrase, const std::string& outfile, std::string& privk_str, bool ascii) { CERTCertificateRequest* req = NULL; CERTSubjectPublicKeyInfo* spki; SECKEYPrivateKey* privkey = NULL; SECKEYPublicKey* pubkey = NULL; CERTName* name = NULL; int keybits = 1024; PRArenaPool* arena; SECItem* encoding; SECOidTag signAlgTag; SECStatus rv; SECItem result; PRFileDesc* out = NULL; if(!dn.empty()) { name = CERT_AsciiToName((char*)(dn.c_str())); if(name == NULL) { NSSUtilLogger.msg(ERROR, "Failed to create subject name"); return false; } } //Remove the existing private key and related cert in nss db rv = DeleteKeyAndCert((privkey_name.c_str()), passphrase); if(!GenerateKeyPair(passphrase, &pubkey, &privkey, privk_str, keybits, privkey_name)) return false; //PK11_SetPrivateKeyNickname(privkey, privkey_name.c_str()); //privkey = SECKEY_CreateRSAPrivateKey(keybits, &pubkey, NULL); spki = SECKEY_CreateSubjectPublicKeyInfo(pubkey); req = CERT_CreateCertificateRequest(name, spki, NULL); if(req == NULL) { NSSUtilLogger.msg(ERROR, "Failed to create certificate request"); } if(pubkey != NULL) { SECKEY_DestroyPublicKey(pubkey); } if(spki != NULL) { SECKEY_DestroySubjectPublicKeyInfo(spki); } if(name) CERT_DestroyName(name); //Output the cert request arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if(!arena ) { NSSUtilLogger.msg(ERROR, "Failed to call PORT_NewArena"); return false; } //Encode the cert request encoding = SEC_ASN1EncodeItem(arena, NULL, req, SEC_ASN1_GET(CERT_CertificateRequestTemplate)); CERT_DestroyCertificateRequest(req); if (encoding == NULL){ PORT_FreeArena (arena, PR_FALSE); NSSUtilLogger.msg(ERROR, "Failed to encode the certificate request with DER format"); return false; } //Sign the cert request signAlgTag = SEC_GetSignatureAlgorithmOidTag(privkey->keyType, SEC_OID_UNKNOWN); if (signAlgTag == SEC_OID_UNKNOWN) { PORT_FreeArena (arena, PR_FALSE); NSSUtilLogger.msg(ERROR, "Unknown key or hash type"); return false; } rv = SEC_DerSignData(arena, &result, encoding->data, encoding->len, privkey, signAlgTag); if(rv) { PORT_FreeArena (arena, PR_FALSE); NSSUtilLogger.msg(ERROR, "Failed to sign the certificate request"); return false; } out = PR_Open(outfile.c_str(), PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, 00660); // Encode cert request with specified format if (ascii) { char* buf; int len, num; buf = BTOA_ConvertItemToAscii(&result); len = PL_strlen(buf); PR_fprintf(out, "%s\n", NS_CERTREQ_HEADER); num = PR_Write(out, buf, len); PORT_Free(buf); if(num != len) { PORT_FreeArena (arena, PR_FALSE); NSSUtilLogger.msg(ERROR, "Failed to output the certificate request as ASCII format"); return false; } PR_fprintf(out, "\n%s\n", NS_CERTREQ_TRAILER); } else { int num = PR_Write(out, result.data, result.len); if(num != (int)result.len) { PORT_FreeArena (arena, PR_FALSE); NSSUtilLogger.msg(ERROR,"Failed to output the certificate request as DER format"); return false; } } PORT_FreeArena (arena, PR_FALSE); PR_Close(out); if (privkey) { SECKEY_DestroyPrivateKey(privkey); } NSSUtilLogger.msg(INFO, "Succeeded to output the certificate request into %s", outfile.c_str()); return true; } SECStatus SECU_FileToItem(SECItem *dst, PRFileDesc *src) { PRFileInfo info; PRStatus status; status = PR_GetOpenFileInfo(src, &info); if(status != PR_SUCCESS) { return SECFailure; } dst->data = 0; if(!SECITEM_AllocItem(NULL, dst, info.size)) { SECITEM_FreeItem(dst, PR_FALSE); dst->data = NULL; return SECFailure; } PRInt32 num = PR_Read(src, dst->data, info.size); if (num != info.size) { SECITEM_FreeItem(dst, PR_FALSE); dst->data = NULL; return SECFailure; } return SECSuccess; } SECStatus SECU_ReadDERFromFile(SECItem* der, PRFileDesc* infile, bool ascii) { SECStatus rv; if (ascii) { //Convert ascii to binary/ SECItem filedata; char* data; char* body; // Read ascii data rv = SECU_FileToItem(&filedata, infile); data = (char *)filedata.data; if (!data) { NSSUtilLogger.msg(ERROR, "Failed to read data from input file"); return SECFailure; } //Remove headers and trailers from data if((body = strstr(data, "-----BEGIN")) != NULL) { char* trailer = NULL; data = body; body = PORT_Strchr(data, '\n'); if (!body) body = PORT_Strchr(data, '\r'); if (body) trailer = strstr(++body, "-----END"); if (trailer != NULL) { *trailer = '\0'; } else { NSSUtilLogger.msg(ERROR, "Input is without trailer\n"); PORT_Free(filedata.data); return SECFailure; } } else { body = data; } // Convert to binary rv = ATOB_ConvertAsciiToItem(der, body); if(rv) { NSSUtilLogger.msg(ERROR, "Failed to convert ASCII to DER"); PORT_Free(filedata.data); return SECFailure; } PORT_Free(filedata.data); } else{ // Read binary data rv = SECU_FileToItem(der, infile); if(rv) { NSSUtilLogger.msg(ERROR, "Failed to read data from input file"); return SECFailure; } } return SECSuccess; } static CERTCertificateRequest* getCertRequest(const std::string& infile, bool ascii) { CERTCertificateRequest* req = NULL; CERTSignedData signed_data; PRArenaPool* arena = NULL; SECItem req_der; PRFileDesc* in; SECStatus rv; in = PR_Open(infile.c_str(), PR_RDONLY, 0); req_der.data = NULL; do { arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if (arena == NULL) { rv = SECFailure; break; } rv = SECU_ReadDERFromFile(&req_der, in, ascii); if (rv) break; req = (CERTCertificateRequest*) PORT_ArenaZAlloc (arena, sizeof(CERTCertificateRequest)); if (!req) { rv = SECFailure; break; } req->arena = arena; PORT_Memset(&signed_data, 0, sizeof(signed_data)); rv = SEC_ASN1DecodeItem(arena, &signed_data, SEC_ASN1_GET(CERT_SignedDataTemplate), &req_der); if (rv) break; rv = SEC_ASN1DecodeItem(arena, req, SEC_ASN1_GET(CERT_CertificateRequestTemplate), &signed_data.data); if (rv) break; rv = CERT_VerifySignedDataWithPublicKeyInfo(&signed_data, &req->subjectPublicKeyInfo, NULL); } while (0); if (req_der.data) SECITEM_FreeItem(&req_der, PR_FALSE); if (rv) { NSSUtilLogger.msg(ERROR, "Certificate request is invalid"); if (arena) { PORT_FreeArena(arena, PR_FALSE); } req = NULL; } return req; } //With pathlen and with policy struct ProxyPolicy1 { SECItem policylanguage; SECItem policy; }; struct ProxyCertInfo1 { PLArenaPool *arena; SECItem pathlength; ProxyPolicy1 proxypolicy; }; const SEC_ASN1Template ProxyPolicyTemplate1[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(ProxyPolicy1) }, { SEC_ASN1_OBJECT_ID, offsetof(ProxyPolicy1, policylanguage), NULL, 0 }, { SEC_ASN1_OCTET_STRING, offsetof(ProxyPolicy1, policy), NULL, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(ProxyPolicyTemplate1) const SEC_ASN1Template ProxyCertInfoTemplate1[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(ProxyCertInfo1) }, { SEC_ASN1_INTEGER, offsetof(ProxyCertInfo1, pathlength), NULL, 0 }, { SEC_ASN1_INLINE, offsetof(ProxyCertInfo1, proxypolicy), ProxyPolicyTemplate1, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(ProxyCertInfoTemplate1) //With pathlen and without policy struct ProxyPolicy2 { SECItem policylanguage; }; struct ProxyCertInfo2 { PLArenaPool *arena; SECItem pathlength; ProxyPolicy2 proxypolicy; }; const SEC_ASN1Template ProxyPolicyTemplate2[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(ProxyPolicy2) }, { SEC_ASN1_OBJECT_ID, offsetof(ProxyPolicy2, policylanguage), NULL, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(ProxyPolicyTemplate2) const SEC_ASN1Template ProxyCertInfoTemplate2[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(ProxyCertInfo2) }, { SEC_ASN1_INTEGER, offsetof(ProxyCertInfo2, pathlength), NULL, 0 }, { SEC_ASN1_INLINE, offsetof(ProxyCertInfo2, proxypolicy), ProxyPolicyTemplate2, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(ProxyCertInfoTemplate2) //Without pathlen and with policy struct ProxyPolicy3 { SECItem policylanguage; SECItem policy; }; struct ProxyCertInfo3 { PLArenaPool *arena; ProxyPolicy3 proxypolicy; }; const SEC_ASN1Template ProxyPolicyTemplate3[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(ProxyPolicy3) }, { SEC_ASN1_OBJECT_ID, offsetof(ProxyPolicy3, policylanguage), NULL, 0 }, { SEC_ASN1_OCTET_STRING, offsetof(ProxyPolicy3, policy), NULL, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(ProxyPolicyTemplate3) const SEC_ASN1Template ProxyCertInfoTemplate3[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(ProxyCertInfo3) }, { SEC_ASN1_INLINE, offsetof(ProxyCertInfo3, proxypolicy), ProxyPolicyTemplate3, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(ProxyCertInfoTemplate3) //Without pathlen and without policy struct ProxyPolicy4 { SECItem policylanguage; }; struct ProxyCertInfo4 { PLArenaPool *arena; ProxyPolicy4 proxypolicy; }; const SEC_ASN1Template ProxyPolicyTemplate4[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(ProxyPolicy4) }, { SEC_ASN1_OBJECT_ID, offsetof(ProxyPolicy4, policylanguage), NULL, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(ProxyPolicyTemplate4) const SEC_ASN1Template ProxyCertInfoTemplate4[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(ProxyCertInfo4) }, { SEC_ASN1_INLINE, offsetof(ProxyCertInfo4, proxypolicy), ProxyPolicyTemplate4, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(ProxyCertInfoTemplate4) SECStatus EncodeProxyCertInfoExtension1(PRArenaPool *arena, ProxyCertInfo1* info, SECItem* dest) { SECStatus rv = SECSuccess; PORT_Assert(info != NULL && dest != NULL); if(info == NULL || dest == NULL) { return SECFailure; } if(SEC_ASN1EncodeItem(arena, dest, info, SEC_ASN1_GET(ProxyCertInfoTemplate1)) == NULL) { rv = SECFailure; } return(rv); } SECStatus EncodeProxyCertInfoExtension2(PRArenaPool *arena, ProxyCertInfo2* info, SECItem* dest) { SECStatus rv = SECSuccess; PORT_Assert(info != NULL && dest != NULL); if(info == NULL || dest == NULL) { return SECFailure; } if(SEC_ASN1EncodeItem(arena, dest, info, SEC_ASN1_GET(ProxyCertInfoTemplate2)) == NULL) { rv = SECFailure; } return(rv); } SECStatus EncodeProxyCertInfoExtension3(PRArenaPool *arena, ProxyCertInfo3* info, SECItem* dest) { SECStatus rv = SECSuccess; PORT_Assert(info != NULL && dest != NULL); if(info == NULL || dest == NULL) { return SECFailure; } if(SEC_ASN1EncodeItem(arena, dest, info, SEC_ASN1_GET(ProxyCertInfoTemplate3)) == NULL) { rv = SECFailure; } return(rv); } SECStatus EncodeProxyCertInfoExtension4(PRArenaPool *arena, ProxyCertInfo4* info, SECItem* dest) { SECStatus rv = SECSuccess; PORT_Assert(info != NULL && dest != NULL); if(info == NULL || dest == NULL) { return SECFailure; } if(SEC_ASN1EncodeItem(arena, dest, info, SEC_ASN1_GET(ProxyCertInfoTemplate4)) == NULL) { rv = SECFailure; } return(rv); } typedef SECStatus (* EXTEN_EXT_VALUE_ENCODER) (PRArenaPool *extHandleArena, void *value, SECItem *encodedValue); SECStatus SECU_EncodeAndAddExtensionValue(PRArenaPool *arena, void *extHandle, void *value, PRBool criticality, int extenType, EXTEN_EXT_VALUE_ENCODER EncodeValueFn) { SECItem encodedValue; SECStatus rv; encodedValue.data = NULL; encodedValue.len = 0; do { rv = (*EncodeValueFn)(arena, value, &encodedValue); if (rv != SECSuccess) break; rv = CERT_AddExtension(extHandle, extenType, &encodedValue, criticality, PR_TRUE); if (rv != SECSuccess) break; } while (0); return (rv); } template static std::string to_string (const T& t) { std::stringstream ss; ss << t; return ss.str(); } static SECStatus AddProxyCertInfoExtension(void* extHandle, int pathlen, const char* policylang, const char* policy) { PRArenaPool *arena = NULL; SECStatus rv = SECSuccess; SECOidData* oid = NULL; SECItem policy_item; std::string pl_lang; SECOidTag tag; void* mark; pl_lang = policylang; if(pl_lang == "Any language") tag = tag_anylang; else if(pl_lang == "Inherit all") tag = tag_inheritall; else if(pl_lang == "Independent") tag = tag_independent; else { NSSUtilLogger.msg(ERROR, "The policy language: %s is not supported", policylang); goto error; } oid = SECOID_FindOIDByTag(tag); if((policy != NULL) && (pathlen != -1)) { ProxyCertInfo1* proxy_certinfo = NULL; arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if (!arena ) { NSSUtilLogger.msg(ERROR, "Failed to new arena"); return SECFailure; } proxy_certinfo = PORT_ArenaZNew(arena, ProxyCertInfo1); if ( proxy_certinfo== NULL) { return SECFailure; } proxy_certinfo->arena = arena; if((pathlen != 0) && (SEC_ASN1EncodeInteger(arena, &proxy_certinfo->pathlength, pathlen) == NULL)) { NSSUtilLogger.msg(ERROR, "Failed to create path length"); goto error; } if (oid == NULL || SECITEM_CopyItem(arena, &proxy_certinfo->proxypolicy.policylanguage, &oid->oid) == SECFailure) { NSSUtilLogger.msg(ERROR, "Failed to create policy language"); goto error; } proxy_certinfo->proxypolicy.policy.len = PORT_Strlen(policy); proxy_certinfo->proxypolicy.policy.data = (unsigned char*)PORT_ArenaStrdup(arena, policy); rv = SECU_EncodeAndAddExtensionValue(arena, extHandle, proxy_certinfo, PR_TRUE, tag_proxy, (EXTEN_EXT_VALUE_ENCODER)EncodeProxyCertInfoExtension1); } else if((policy == NULL) && (pathlen != -1)) { ProxyCertInfo2* proxy_certinfo = NULL; arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if (!arena ) { NSSUtilLogger.msg(ERROR, "Failed to new arena"); return SECFailure; } proxy_certinfo = PORT_ArenaZNew(arena, ProxyCertInfo2); if ( proxy_certinfo== NULL) { return SECFailure; } proxy_certinfo->arena = arena; if((pathlen != -1) && (SEC_ASN1EncodeInteger(arena, &proxy_certinfo->pathlength, pathlen) == NULL)) { NSSUtilLogger.msg(ERROR, "Failed to create path length"); goto error; } if (oid == NULL || SECITEM_CopyItem(arena, &proxy_certinfo->proxypolicy.policylanguage, &oid->oid) == SECFailure) { NSSUtilLogger.msg(ERROR, "Failed to create policy language"); goto error; } rv = SECU_EncodeAndAddExtensionValue(arena, extHandle, proxy_certinfo, PR_TRUE, tag_proxy, (EXTEN_EXT_VALUE_ENCODER)EncodeProxyCertInfoExtension2); } else if((policy != NULL) && (pathlen == -1)) { ProxyCertInfo3* proxy_certinfo = NULL; arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if (!arena ) { NSSUtilLogger.msg(ERROR, "Failed to new arena"); return SECFailure; } proxy_certinfo = PORT_ArenaZNew(arena, ProxyCertInfo3); if ( proxy_certinfo== NULL) { return SECFailure; } proxy_certinfo->arena = arena; if (oid == NULL || SECITEM_CopyItem(arena, &proxy_certinfo->proxypolicy.policylanguage, &oid->oid) == SECFailure) { NSSUtilLogger.msg(ERROR, "Failed to create policy language"); goto error; } proxy_certinfo->proxypolicy.policy.len = PORT_Strlen(policy); proxy_certinfo->proxypolicy.policy.data = (unsigned char*)PORT_ArenaStrdup(arena, policy); rv = SECU_EncodeAndAddExtensionValue(arena, extHandle, proxy_certinfo, PR_TRUE, tag_proxy, (EXTEN_EXT_VALUE_ENCODER)EncodeProxyCertInfoExtension3); } else { ProxyCertInfo4* proxy_certinfo = NULL; arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if (!arena ) { NSSUtilLogger.msg(ERROR, "Failed to new arena"); return SECFailure; } proxy_certinfo = PORT_ArenaZNew(arena, ProxyCertInfo4); if ( proxy_certinfo== NULL) { return SECFailure; } proxy_certinfo->arena = arena; if (oid == NULL || SECITEM_CopyItem(arena, &proxy_certinfo->proxypolicy.policylanguage, &oid->oid) == SECFailure) { NSSUtilLogger.msg(ERROR, "Failed to create policy language"); goto error; } rv = SECU_EncodeAndAddExtensionValue(arena, extHandle, proxy_certinfo, PR_TRUE, tag_proxy, (EXTEN_EXT_VALUE_ENCODER)EncodeProxyCertInfoExtension4); } error: if (arena) PORT_FreeArena(arena, PR_FALSE); return (rv); } // Add a binary into extension, specific for the VOMS AC sequence static SECStatus AddVOMSACSeqExtension(void* extHandle, char* vomsacseq, int length) { SECStatus rv = SECFailure; SECOidData* oid = NULL; SECOidTag tag; tag = tag_vomsacseq; oid = SECOID_FindOIDByTag(tag); if(vomsacseq != NULL) { SECItem encodedValue; encodedValue.data = (unsigned char*)vomsacseq; encodedValue.len = length; rv = CERT_AddExtension(extHandle, tag, &encodedValue, PR_FALSE, PR_TRUE); } return (rv); } #if !defined(WIN32) && !defined(MACOS) // CERT_NameFromDERCert // CERT_IssuerNameFromDERCert // CERT_SerialNumberFromDERCert // The above nss methods which we need to use have not been exposed by // nss.def (the situation does not apply to MACOS, only to Win and Linux, // strange, maybe *.def is not used by MACOS when packaging). // Therefore we have two different solutions for Linux and Win. // For Linux, the three methods are duplicated here. // For Win, the code duplication does not work ( arcproxy always crashes // when goes to these three method, the crash is from // nssutil3.dll!SECOID_GetAlgorithmTag_Util, which I don't know how // to solve it for now). So the other working solution is to change the // nss.def file under nss source tree to add these three methods, so that // these methods can be exposed to external. const SEC_ASN1Template SEC_SkipTemplate[] = { { SEC_ASN1_SKIP, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(SEC_SkipTemplate) //Find the subjectName in a DER encoded certificate const SEC_ASN1Template SEC_CertSubjectTemplate[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(SECItem) }, { SEC_ASN1_EXPLICIT | SEC_ASN1_OPTIONAL | SEC_ASN1_CONSTRUCTED | SEC_ASN1_CONTEXT_SPECIFIC | SEC_ASN1_XTRN | 0, 0, SEC_SkipTemplate, 0 }, /* version */ { SEC_ASN1_SKIP, 0, NULL, 0 }, /* serial number */ { SEC_ASN1_SKIP, 0, NULL, 0 }, /* signature algorithm */ { SEC_ASN1_SKIP, 0, NULL, 0 }, /* issuer */ { SEC_ASN1_SKIP, 0, NULL, 0 }, /* validity */ { SEC_ASN1_ANY, 0, NULL, 0 }, /* subject */ { SEC_ASN1_SKIP_REST, 0, NULL, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(SEC_CertSubjectTemplate) //Find the issuerName in a DER encoded certificate const SEC_ASN1Template SEC_CertIssuerTemplate[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(SECItem) }, { SEC_ASN1_EXPLICIT | SEC_ASN1_OPTIONAL | SEC_ASN1_CONSTRUCTED | SEC_ASN1_CONTEXT_SPECIFIC | SEC_ASN1_XTRN | 0, 0, SEC_SkipTemplate, 0 }, /* version */ { SEC_ASN1_SKIP, 0, NULL, 0 }, /* serial number */ { SEC_ASN1_SKIP, 0, NULL, 0 }, /* signature algorithm */ { SEC_ASN1_ANY, 0, NULL, 0 }, /* issuer */ { SEC_ASN1_SKIP_REST, 0, NULL, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(SEC_CertIssuerTemplate) //Find the serialNumber in a DER encoded certificate const SEC_ASN1Template SEC_CertSerialNumberTemplate[] = { { SEC_ASN1_SEQUENCE, 0, NULL, sizeof(SECItem) }, { SEC_ASN1_EXPLICIT | SEC_ASN1_OPTIONAL | SEC_ASN1_CONSTRUCTED | SEC_ASN1_CONTEXT_SPECIFIC | SEC_ASN1_XTRN | 0, 0, SEC_SkipTemplate, 0 }, /* version */ { SEC_ASN1_ANY, 0, NULL, 0 }, /* serial number */ { SEC_ASN1_SKIP_REST, 0, NULL, 0 }, { 0, 0, NULL, 0 } }; SEC_ASN1_CHOOSER_IMPLEMENT(SEC_CertSerialNumberTemplate) /* Extract the subject name from a DER certificate This is a copy from nss code, due to the "undefined reference to" compiling issue */ SECStatus my_CERT_NameFromDERCert(SECItem *derCert, SECItem *derName) { int rv; PRArenaPool *arena; CERTSignedData sd; void *tmpptr; arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if ( ! arena ) { return(SECFailure); } PORT_Memset(&sd, 0, sizeof(CERTSignedData)); rv = SEC_QuickDERDecodeItem(arena, &sd, SEC_ASN1_GET(CERT_SignedDataTemplate), derCert); if ( rv ) { goto loser; } PORT_Memset(derName, 0, sizeof(SECItem)); rv = SEC_QuickDERDecodeItem(arena, derName, SEC_ASN1_GET(SEC_CertSubjectTemplate), &sd.data); if ( rv ) { goto loser; } tmpptr = derName->data; derName->data = (unsigned char*)PORT_Alloc(derName->len); if ( derName->data == NULL ) { goto loser; } PORT_Memcpy(derName->data, tmpptr, derName->len); PORT_FreeArena(arena, PR_FALSE); return(SECSuccess); loser: PORT_FreeArena(arena, PR_FALSE); return(SECFailure); } SECStatus my_CERT_IssuerNameFromDERCert(SECItem *derCert, SECItem *derName) { int rv; PRArenaPool *arena; CERTSignedData sd; void *tmpptr; arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if ( ! arena ) { return(SECFailure); } PORT_Memset(&sd, 0, sizeof(CERTSignedData)); rv = SEC_QuickDERDecodeItem(arena, &sd, SEC_ASN1_GET(CERT_SignedDataTemplate), derCert); if ( rv ) { goto loser; } PORT_Memset(derName, 0, sizeof(SECItem)); rv = SEC_QuickDERDecodeItem(arena, derName, SEC_ASN1_GET(SEC_CertIssuerTemplate), &sd.data); if ( rv ) { goto loser; } tmpptr = derName->data; derName->data = (unsigned char*)PORT_Alloc(derName->len); if ( derName->data == NULL ) { goto loser; } PORT_Memcpy(derName->data, tmpptr, derName->len); PORT_FreeArena(arena, PR_FALSE); return(SECSuccess); loser: PORT_FreeArena(arena, PR_FALSE); return(SECFailure); } SECStatus my_CERT_SerialNumberFromDERCert(SECItem *derCert, SECItem *derName) { int rv; PRArenaPool *arena; CERTSignedData sd; void *tmpptr; arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); if ( ! arena ) { return(SECFailure); } PORT_Memset(&sd, 0, sizeof(CERTSignedData)); rv = SEC_QuickDERDecodeItem(arena, &sd, SEC_ASN1_GET(CERT_SignedDataTemplate), derCert); if ( rv ) { goto loser; } PORT_Memset(derName, 0, sizeof(SECItem)); rv = SEC_QuickDERDecodeItem(arena, derName, SEC_ASN1_GET(SEC_CertSerialNumberTemplate), &sd.data); if ( rv ) { goto loser; } tmpptr = derName->data; derName->data = (unsigned char*)PORT_Alloc(derName->len); if ( derName->data == NULL ) { goto loser; } PORT_Memcpy(derName->data, tmpptr, derName->len); PORT_FreeArena(arena, PR_FALSE); return(SECSuccess); loser: PORT_FreeArena(arena, PR_FALSE); return(SECFailure); } #endif // #if !defined(WIN32) && !defined(MACOS) void nssListUserCertificatesInfo(std::list& certInfolist) { CERTCertList* list; CERTCertificate* find_cert = NULL; CERTCertListNode* node; list = PK11_ListCerts(PK11CertListAll, NULL); for (node = CERT_LIST_HEAD(list); !CERT_LIST_END(node,list); node = CERT_LIST_NEXT(node)) { CERTCertificate* cert = node->cert; const char* nickname = (const char*)node->appData; if (!nickname) { nickname = cert->nickname; } if(nickname == NULL) continue; PRBool isUser = CERT_IsUserCert(cert); if(!isUser) continue; certInfo cert_info; cert_info.certname = nickname; SECStatus rv; std::string subject_dn; SECItem derSubject; #if !defined(WIN32) && !defined(MACOS) rv = my_CERT_NameFromDERCert(&cert->derCert, &derSubject); #else rv = CERT_NameFromDERCert(&cert->derCert, &derSubject); #endif if(rv == SECSuccess) { char* subjectName = CERT_DerNameToAscii(&derSubject); subject_dn = subjectName; if(subjectName) PORT_Free(subjectName); cert_info.subject_dn = subject_dn; } std::string issuer_dn; SECItem derIssuer; #if !defined(WIN32) && !defined(MACOS) rv = my_CERT_IssuerNameFromDERCert(&cert->derCert, &derIssuer); #else rv = CERT_IssuerNameFromDERCert(&cert->derCert, &derIssuer); #endif if(rv == SECSuccess) { char* issuerName = CERT_DerNameToAscii(&derIssuer); issuer_dn = issuerName; if(issuerName) PORT_Free(issuerName); cert_info.issuer_dn = issuer_dn; } cert_info.serial = 0; std::string serial; SECItem derSerial; #if !defined(WIN32) && !defined(MACOS) rv = my_CERT_SerialNumberFromDERCert (&cert->derCert, &derSerial); #else rv = CERT_SerialNumberFromDERCert (&cert->derCert, &derSerial); #endif if(rv == SECSuccess) { SECItem decodedValue; decodedValue.data = NULL; rv = SEC_ASN1DecodeItem (NULL, &decodedValue, SEC_ASN1_GET(SEC_IntegerTemplate), &derSerial); if (rv == SECSuccess) { unsigned long res; rv = SEC_ASN1DecodeInteger(&decodedValue, &res); if(rv == SECSuccess) cert_info.serial = res; } } PRTime notBefore, notAfter; rv = CERT_GetCertTimes(cert, ¬Before, ¬After); if(rv == SECSuccess) { cert_info.start = Arc::Time(notBefore/1000/1000); cert_info.end = Arc::Time(notAfter/1000/1000); certInfolist.push_back(cert_info); } } if (list) { CERT_DestroyCertList(list); } } static SECStatus copy_CERTName(CERTName* destName, CERTName* srcName) { PRArenaPool* poolp = NULL; SECStatus rv; if (destName->arena != NULL) { poolp = destName->arena; } else { poolp = PORT_NewArena(SEC_ASN1_DEFAULT_ARENA_SIZE); } if (poolp == NULL) { return SECFailure; } destName->arena = NULL; rv = CERT_CopyName(poolp, destName, srcName); destName->arena = poolp; return rv; } static SECStatus AddKeyUsageExtension (void* extHandle, CERTCertificate* issuercert) { SECStatus rv; SECItem bitStringValue; PRBool isCriticalExt = PR_TRUE; SECItem keyUsageValue = {siBuffer, NULL, 0}; unsigned char ku_value; rv = CERT_FindKeyUsageExtension(issuercert, &keyUsageValue); if(rv == SECFailure) { rv = (PORT_GetError () == SEC_ERROR_EXTENSION_NOT_FOUND) ? SECSuccess : SECFailure; return rv; } else { ku_value = keyUsageValue.data[0]; // mask off the key usage that should not be allowed for proxy if(ku_value & KU_NON_REPUDIATION) ku_value &=(~KU_NON_REPUDIATION); if(ku_value & KU_KEY_CERT_SIGN) ku_value &=(~KU_KEY_CERT_SIGN); if(ku_value & KU_CRL_SIGN) ku_value &=(~KU_CRL_SIGN); } PORT_Free (keyUsageValue.data); bitStringValue.data = &ku_value; bitStringValue.len = 1; return (CERT_EncodeAndAddBitStrExtension (extHandle, SEC_OID_X509_KEY_USAGE, &bitStringValue, isCriticalExt)); } bool nssCreateCert(const std::string& csrfile, const std::string& issuername, const char* passwd, const int duration, const std::string& vomsacseq, std::string& outfile, bool ascii) { CERTCertDBHandle* certhandle; CERTCertificate* issuercert = NULL; SECKEYPrivateKey* issuerkey = NULL; CERTValidity* validity; CERTCertificate* cert = NULL; PRExplodedTime extime; PRTime now, start, end; int serialnum; CERTCertificateRequest* req = NULL; void* ext_handle; PRArenaPool* arena; SECOidTag tag_sigalg; SECOidTag tag_hashalg; int pathlen = -1; const char* policylang = "Inherit all"; //TODO const char* policy = NULL;//"test policy"; //TODO CERTCertExtension** exts; SECItem cert_der; SECItem derSubject; void* dummy; SECItem* signed_cert = NULL; PRFileDesc* out = NULL; SECStatus rv = SECSuccess; bool ret = false; req = getCertRequest(csrfile, ascii); if(!req) { NSSUtilLogger.msg(ERROR, "Failed to parse certificate request from CSR file %s", csrfile.c_str()); return false; } certhandle = CERT_GetDefaultCertDB(); issuercert = CERT_FindCertByNicknameOrEmailAddr(certhandle, (char*)(issuername.c_str())); if(!issuercert) { NSSUtilLogger.msg(ERROR, "Can not find certificate with name %s", issuername.c_str()); return false; } now = PR_Now(); PR_ExplodeTime(now, PR_GMTParameters, &extime); extime.tm_min -= 5; start = PR_ImplodeTime(&extime); extime.tm_min +=5; extime.tm_hour += duration; end = PR_ImplodeTime (&extime); validity = CERT_CreateValidity(start, end); //Subject #if !defined(WIN32) && !defined(MACOS) my_CERT_NameFromDERCert(&issuercert->derCert, &derSubject); #else CERT_NameFromDERCert(&issuercert->derCert, &derSubject); #endif char* subjectName = CERT_DerNameToAscii(&derSubject); std::string subname_str = subjectName; srand(time(NULL)); unsigned long random_cn; random_cn = rand(); char* CN_name = NULL; CN_name = (char*)malloc(sizeof(long)*4 + 1); snprintf(CN_name, sizeof(long)*4 + 1, "%lu", random_cn); std::string str = "CN="; str.append(CN_name); str.append(","); subname_str.insert(0, str.c_str(), str.length()); NSSUtilLogger.msg(DEBUG, "Proxy subject: %s", subname_str.c_str()); if(CN_name) free(CN_name); CERTName* subName = NULL; if(!subname_str.empty()) subName = CERT_AsciiToName((char*)(subname_str.c_str())); if(subjectName) PORT_Free(subjectName); if (validity) { if(subName != NULL) { rv = copy_CERTName(&req->subject, subName); } cert = CERT_CreateCertificate(rand(), &issuercert->subject, validity, req); CERT_DestroyValidity(validity); if(subName) CERT_DestroyName(subName); } //Extensions ext_handle = CERT_StartCertExtensions (cert); if (ext_handle == NULL) { NSSUtilLogger.msg(ERROR, "Failed to start certificate extension"); goto error; } if(AddKeyUsageExtension(ext_handle, issuercert) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to add key usage extension"); goto error; } if(AddProxyCertInfoExtension(ext_handle, pathlen, policylang, policy) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to add proxy certificate information extension"); goto error; } if((!vomsacseq.empty()) && (AddVOMSACSeqExtension(ext_handle, (char*)(vomsacseq.c_str()), vomsacseq.length()) != SECSuccess)) { NSSUtilLogger.msg(ERROR, "Failed to add voms AC extension"); goto error; } if(req->attributes != NULL && req->attributes[0] != NULL && req->attributes[0]->attrType.data != NULL && req->attributes[0]->attrType.len > 0 && SECOID_FindOIDTag(&req->attributes[0]->attrType) == SEC_OID_PKCS9_EXTENSION_REQUEST) { rv = CERT_GetCertificateRequestExtensions(req, &exts); if(rv != SECSuccess) goto error; rv = CERT_MergeExtensions(ext_handle, exts); if (rv != SECSuccess) goto error; } CERT_FinishExtensions(ext_handle); //Sign the certificate issuerkey = PK11_FindKeyByAnyCert(issuercert, (char*)passwd); if(issuerkey == NULL) { NSSUtilLogger.msg(ERROR, "Failed to retrieve private key for issuer"); goto error; } arena = cert->arena; tag_hashalg = SEC_OID_SHA1; tag_sigalg = SEC_GetSignatureAlgorithmOidTag(issuerkey->keyType, tag_hashalg); if (tag_sigalg == SEC_OID_UNKNOWN) { NSSUtilLogger.msg(ERROR, "Unknown key or hash type of issuer"); goto error; } rv = SECOID_SetAlgorithmID(arena, &cert->signature, tag_sigalg, 0); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to set signature algorithm ID"); goto error; } *(cert->version.data) = 2; cert->version.len = 1; cert_der.len = 0; cert_der.data = NULL; dummy = SEC_ASN1EncodeItem (arena, &cert_der, cert, SEC_ASN1_GET(CERT_CertificateTemplate)); if (!dummy) { NSSUtilLogger.msg(ERROR, "Failed to encode certificate"); goto error; } signed_cert = (SECItem *)PORT_ArenaZAlloc(arena, sizeof(SECItem)); if(signed_cert == NULL) { NSSUtilLogger.msg(ERROR, "Failed to allocate item for certificate data"); goto error; } rv = SEC_DerSignData(arena, signed_cert, cert_der.data, cert_der.len, issuerkey, tag_sigalg); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to sign encoded certificate data"); //signed_cert will be freed when arena is freed. signed_cert = NULL; goto error; } cert->derCert = *signed_cert; out = PR_Open(outfile.c_str(), PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, 00660); if(!out) { NSSUtilLogger.msg(ERROR, "Failed to open file %s", outfile.c_str()); goto error; } if(ascii) { PR_fprintf(out, "%s\n%s\n%s\n", NS_CERT_HEADER, BTOA_DataToAscii(signed_cert->data, signed_cert->len), NS_CERT_TRAILER); } else { PR_Write(out, signed_cert->data, signed_cert->len); } if(out) PR_Close(out); NSSUtilLogger.msg(INFO, "Succeeded to output certificate to %s", outfile.c_str()); ret = true; error: if(issuerkey) SECKEY_DestroyPrivateKey(issuerkey); if(issuercert) CERT_DestroyCertificate(issuercert); if(req) CERT_DestroyCertificateRequest(req); return ret; } bool nssImportCert(char* slotpw, const std::string& certfile, const std::string& name, const char* trusts, bool ascii) { PasswordSource* passphrase = NULL; if(slotpw) { passphrase = new PasswordSourceString(slotpw); } else { passphrase = new PasswordSourceInteractive("TODO: prompt here",false); } bool r = nssImportCert(*passphrase, certfile, name, trusts, ascii); delete passphrase; return r; } bool nssImportCert(PasswordSource& passphrase, const std::string& certfile, const std::string& name, const char* trusts, bool ascii) { PK11SlotInfo* slot = NULL; CERTCertDBHandle* certhandle; CERTCertTrust* trust = NULL; CERTCertificate* cert = NULL; PRFileDesc* in = NULL; SECItem certder; SECStatus rv = SECSuccess; slot = PK11_GetInternalKeySlot(); if(PK11_Authenticate(slot, PR_TRUE, (void*)&passphrase) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to authenticate to key database"); if(slot) PK11_FreeSlot(slot); return false; } in = PR_Open(certfile.c_str(), PR_RDONLY, 0); if(in == NULL) { NSSUtilLogger.msg(ERROR, "Failed to open input certificate file %s", certfile.c_str()); if(slot) PK11_FreeSlot(slot); return false; } certhandle = CERT_GetDefaultCertDB(); rv = DeleteCertOnly(name.c_str()); //rv = DeleteCertAndKey(name.c_str(), slotpw); if(rv == SECFailure) { PR_Close(in); PK11_FreeSlot(slot); } certder.data = NULL; do { rv = SECU_ReadDERFromFile(&certder, in, ascii); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to read input certificate file"); break; } cert = CERT_DecodeCertFromPackage((char *)certder.data, certder.len); if(!cert) { NSSUtilLogger.msg(ERROR, "Failed to get certificate from certificate file"); rv = SECFailure; break; } //Create a cert trust trust = (CERTCertTrust *)PORT_ZAlloc(sizeof(CERTCertTrust)); if(!trust) { NSSUtilLogger.msg(ERROR, "Failed to allocate certificate trust"); rv = SECFailure; break; } rv = CERT_DecodeTrustString(trust, (char*)trusts); if(rv) { NSSUtilLogger.msg(ERROR, "Failed to decode trust string"); rv = SECFailure; break; } //Import the certificate rv = PK11_ImportCert(slot, cert, CK_INVALID_HANDLE, (char*)(name.c_str()), PR_FALSE); if(rv != SECSuccess) { if(PORT_GetError() == SEC_ERROR_TOKEN_NOT_LOGGED_IN) { if(PK11_Authenticate(slot, PR_TRUE, (void*)&passphrase) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to authenticate to token %s", PK11_GetTokenName(slot)); rv = SECFailure; break; } rv = PK11_ImportCert(slot, cert, CK_INVALID_HANDLE, (char*)(name.c_str()), PR_FALSE); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to add certificate to token or database"); break; } else NSSUtilLogger.msg(INFO, "Succeeded to import certificate"); } } else NSSUtilLogger.msg(INFO, "Succeeded to import certificate"); rv = CERT_ChangeCertTrust(certhandle, cert, trust); if(rv != SECSuccess) { if (PORT_GetError() == SEC_ERROR_TOKEN_NOT_LOGGED_IN) { if(PK11_Authenticate(slot, PR_TRUE, (void*)&passphrase) != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to authenticate to token %s", PK11_GetTokenName(slot)); rv = SECFailure; break; } rv = CERT_ChangeCertTrust(certhandle, cert, trust); if(rv != SECSuccess) { NSSUtilLogger.msg(ERROR, "Failed to add certificate to token or database"); break; } else NSSUtilLogger.msg(INFO, "Succeeded to change trusts to: %s", trusts); } } else NSSUtilLogger.msg(INFO, "Succeeded to change trusts to: %s", trusts); } while (0); PR_Close(in); PK11_FreeSlot(slot); CERT_DestroyCertificate (cert); PORT_Free(trust); PORT_Free(certder.data); if(rv == SECSuccess) return true; else return false; } bool nssImportCertAndPrivateKey(char* slotpw, const std::string& keyfile, const std::string& keyname, const std::string& certfile, const std::string& certname, const char* trusts, bool ascii) { PasswordSource* passphrase = NULL; if(slotpw) { passphrase = new PasswordSourceString(slotpw); } else { passphrase = new PasswordSourceInteractive("TODO: prompt here",false); } bool r = nssImportCertAndPrivateKey(*passphrase, keyfile, keyname, certfile, certname, trusts, ascii); delete passphrase; return r; } bool nssImportCertAndPrivateKey(PasswordSource& passphrase, const std::string& keyfile, const std::string& keyname, const std::string& certfile, const std::string& certname, const char* trusts, bool ascii) { bool res; res = ImportPrivateKey(passphrase, keyfile, keyname); if(!res) { NSSUtilLogger.msg(ERROR, "Failed to import private key from file: %s", keyfile.c_str()); return false; } res = nssImportCert(passphrase, certfile, certname, trusts, ascii); if(!res) { NSSUtilLogger.msg(ERROR, "Failed to import certificate from file: %s", certfile.c_str()); return false; } return true; } } nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/CertUtil.cpp0000644000000000000000000000012413153454775024044 xustar000000000000000027 mtime=1504598525.713781 27 atime=1513200574.629703 30 ctime=1513200659.136736928 nordugrid-arc-5.4.2/src/hed/libs/credential/CertUtil.cpp0000644000175000002070000005160713153454775024122 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "CertUtil.h" #define X509_CERT_DIR "X509_CERT_DIR" #ifndef WIN32 #define FILE_SEPARATOR "/" #else #define FILE_SEPARATOR "\\" #endif #define SIGNING_POLICY_FILE_EXTENSION ".signing_policy" namespace ArcCredential { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static X509_OBJECT* X509_OBJECT_new(void) { X509_OBJECT* obj = (X509_OBJECT*)std::malloc(sizeof(X509_OBJECT)); if(obj) { std::memset(obj, 0, sizeof(X509_OBJECT)); } return obj; } static void X509_OBJECT_free(X509_OBJECT* obj) { if(obj) { X509_OBJECT_free_contents(obj); std::free(obj); } } static X509_CRL *X509_OBJECT_get0_X509_CRL(X509_OBJECT *obj) { if(!obj) return NULL; if(obj->type != X509_LU_CRL) return NULL; return obj->data.crl; } #define X509_STORE_CTX_get0_chain X509_STORE_CTX_get_chain #define X509_CRL_get0_lastUpdate X509_CRL_get_lastUpdate #define X509_CRL_get0_nextUpdate X509_CRL_get_nextUpdate #define X509_STORE_CTX_get_by_subject X509_STORE_get_by_subject #define X509_getm_notAfter X509_get_notAfter #define X509_getm_notBefore X509_get_notBefore static const ASN1_INTEGER *X509_REVOKED_get0_serialNumber(const X509_REVOKED *x) { if(!x) return NULL; return x->serialNumber; } #endif static Arc::Logger& logger = Arc::Logger::rootLogger; static int verify_callback(int ok, X509_STORE_CTX* store_ctx); static bool collect_proxy_info(std::string& proxy_policy, X509* cert); static int verify_cert_additional(X509* cert, X509_STORE_CTX* store_ctx, std::string const& ca_dir, std::string& proxy_policy); int verify_cert_chain(X509* cert, STACK_OF(X509)** certchain, std::string const& ca_file, std::string const& ca_dir, std::string& proxy_policy) { int i; int j; int retval = 0; X509_STORE* cert_store = NULL; X509_STORE_CTX* store_ctx = NULL; X509* cert_in_chain = NULL; X509* user_cert = NULL; user_cert = cert; if ((cert_store = X509_STORE_new()) == NULL) { goto err; } X509_STORE_set_verify_cb_func(cert_store, verify_callback); if (*certchain != NULL) { for (i=0;i= 0; --i) { X509* cert_in_chain = sk_X509_value(*certchain,i); if(cert_in_chain) { if(!collect_proxy_info(proxy_policy, cert_in_chain)) return (0); } } } return (1); } static int verify_callback(int ok, X509_STORE_CTX* store_ctx) { //if failed, show the error message. Hopefully nicer than generated by OpenSSL. if(!ok) { char * subject_name = X509_NAME_oneline(X509_get_subject_name(X509_STORE_CTX_get_current_cert(store_ctx)), 0, 0); unsigned long issuer_hash = X509_issuer_name_hash(X509_STORE_CTX_get_current_cert(store_ctx)); logger.msg(Arc::DEBUG,"Error number in store context: %i",(int)(X509_STORE_CTX_get_error(store_ctx))); if(sk_X509_num(X509_STORE_CTX_get0_chain(store_ctx)) == 1) { logger.msg(Arc::VERBOSE,"Self-signed certificate"); } if (X509_STORE_CTX_get_error(store_ctx) == X509_V_ERR_CERT_NOT_YET_VALID) { logger.msg(Arc::INFO,"The certificate with subject %s is not valid",subject_name); } else if(X509_STORE_CTX_get_error(store_ctx) == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY) { logger.msg(Arc::INFO,"Can not find issuer certificate for the certificate with subject %s and hash: %lu",subject_name,issuer_hash); } else if(X509_STORE_CTX_get_error(store_ctx) == X509_V_ERR_CERT_HAS_EXPIRED) { logger.msg(Arc::INFO,"Certificate with subject %s has expired",subject_name); } else if(X509_STORE_CTX_get_error(store_ctx) == X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN) { logger.msg(Arc::INFO,"Untrusted self-signed certificate in chain with subject %s and hash: %lu",subject_name,issuer_hash); } else { logger.msg(Arc::INFO,"Certificate verification error: %s",X509_verify_cert_error_string(X509_STORE_CTX_get_error(store_ctx))); } if(subject_name) OPENSSL_free(subject_name); } return ok; } static int verify_cert_additional(X509* cert, X509_STORE_CTX* store_ctx, std::string const& ca_dir, std::string& proxy_policy) { certType type; if(!check_cert_type(cert,type)) { logger.msg(Arc::ERROR,"Can not get the certificate type"); return 0; } /** We need to check whether the certificate is revoked if it is not a proxy; *for proxy, it does not ever get revoked */ if((type == CERT_TYPE_EEC) || (type == CERT_TYPE_CA)) { X509_OBJECT* obj = NULL; /* * SSLeay 0.9.0 handles CRLs but does not check them. * We will check the crl for this cert, if there * is a CRL in the store. * If we find the crl is not valid, we will fail, * as once the sysadmin indicates that CRLs are to * be checked, he best keep it upto date. * * When future versions of SSLeay support this better, * we can remove these tests. * we come through this code for each certificate, * starting with the CA's We will check for a CRL * each time, but only check the signature if the * subject name matches, and check for revoked * if the issuer name matches. * this allows the CA to revoke its own cert as well. */ obj = X509_OBJECT_new(); if (!obj) return 0; if (X509_STORE_CTX_get_by_subject(store_ctx, X509_LU_CRL, X509_get_subject_name(cert), obj)) { if(X509_CRL* crl=X509_OBJECT_get0_X509_CRL(obj)) { /* verify the signature on this CRL */ EVP_PKEY* key = X509_get_pubkey(cert); if(!key) { X509_OBJECT_free(obj); return (0); } if (X509_CRL_verify(crl, key) <= 0) { X509_STORE_CTX_set_error(store_ctx,X509_V_ERR_CRL_SIGNATURE_FAILURE); // TODO: tell which crl failed logger.msg(Arc::ERROR,"Couldn't verify availability of CRL"); EVP_PKEY_free(key); X509_OBJECT_free(obj); return (0); } EVP_PKEY_free(key); int i = 0; /* Check date see if expired */ i = X509_CRL_get0_lastUpdate(crl) ? X509_cmp_current_time(X509_CRL_get0_lastUpdate(crl)) : 0; if (i == 0) { X509_STORE_CTX_set_error(store_ctx,X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD); // TODO: tell which crl failed logger.msg(Arc::ERROR,"In the available CRL the lastUpdate field is not valid"); X509_OBJECT_free(obj); return (0); } if(i>0) { X509_STORE_CTX_set_error(store_ctx,X509_V_ERR_CRL_NOT_YET_VALID); // TODO: tell which crl failed logger.msg(Arc::ERROR,"The available CRL is not yet valid"); X509_OBJECT_free(obj); return (0); } i = X509_CRL_get0_nextUpdate(crl) ? X509_cmp_current_time(X509_CRL_get0_nextUpdate(crl)) : 1; if (i == 0) { X509_STORE_CTX_set_error(store_ctx,X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD); // TODO: tell which crl failed logger.msg(Arc::ERROR,"In the available CRL, the nextUpdate field is not valid"); X509_OBJECT_free(obj); return (0); } if (i < 0) { X509_STORE_CTX_set_error(store_ctx,X509_V_ERR_CRL_HAS_EXPIRED); logger.msg(Arc::ERROR,"The available CRL has expired"); X509_OBJECT_free(obj); return (0); } } } X509_OBJECT_free(obj); obj = NULL; /* now check if the *issuer* has a CRL, and we are revoked */ obj = X509_OBJECT_new(); if (!obj) return 0; if (X509_STORE_CTX_get_by_subject(store_ctx, X509_LU_CRL, X509_get_issuer_name(cert), obj)) { if(X509_CRL* crl=X509_OBJECT_get0_X509_CRL(obj)) { /* check if this cert is revoked */ int n = sk_X509_REVOKED_num(X509_CRL_get_REVOKED(crl)); for (int i=0; i=097g (which means proxy cert info is * supported), and NID_proxyCertInfo can be got from the extension, * then we use the proxy cert info support from openssl itself. * Otherwise we have to use globus-customized proxy cert info support. */ PROXY_CERT_INFO_EXTENSION* proxycertinfo = NULL; proxycertinfo = (PROXY_CERT_INFO_EXTENSION*) X509V3_EXT_d2i(ext); if (proxycertinfo == NULL) { logger.msg(Arc::WARNING,"Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal format"); } else { /**Parse the policy*/ // Must be proxy because proxy extension is set - if(X509_STORE_CTX_get_current_cert(store_ctx)->ex_flags & EXFLAG_PROXY) { switch (OBJ_obj2nid(proxycertinfo->proxyPolicy->policyLanguage)) { case NID_Independent: /* Put whatever explicit policy here to this particular proxy certificate, usually by * pulling them from some database. If there is none policy which need to be explicitly * inserted here, clear all the policy storage (make this and any subsequent proxy certificate * be void of any policy, because here the policylanguage is independent) */ proxy_policy.clear(); break; case NID_id_ppl_inheritAll: /* This is basically a NOP */ break; default: /* Here get the proxy policy */ proxy_policy.clear(); if((proxycertinfo->proxyPolicy) && (proxycertinfo->proxyPolicy->policy) && (proxycertinfo->proxyPolicy->policy->data)) { proxy_policy.append( (char const*)(proxycertinfo->proxyPolicy->policy->data), proxycertinfo->proxyPolicy->policy->length); } /* Use : as separator for policies parsed from different proxy certificate*/ /* !!!! Taking int account previous proxy_policy.clear() !!!! !!!! it seems to be impossible to have more than one !!!! !!!! policy collected anyway !!!! */ proxy_policy.append(":"); break; } } PROXY_CERT_INFO_EXTENSION_free(proxycertinfo); proxycertinfo = NULL; } } } } return true; } bool check_cert_type(X509* cert, certType& type) { logger.msg(Arc::DEBUG, "Trying to check X509 cert with check_cert_type"); bool ret = false; type = CERT_TYPE_EEC; ASN1_STRING* data; X509_EXTENSION* certinfo_ext; PROXY_POLICY* policy = NULL; ASN1_OBJECT* policylang = NULL; int policynid; PROXY_CERT_INFO_EXTENSION* certinfo_openssl = NULL; int index; int critical; BASIC_CONSTRAINTS* x509v3_bc = NULL; if(!cert) return false; if((x509v3_bc = (BASIC_CONSTRAINTS*) X509_get_ext_d2i(cert, NID_basic_constraints, &critical, NULL)) && x509v3_bc->ca) { type = CERT_TYPE_CA; if(x509v3_bc) { BASIC_CONSTRAINTS_free(x509v3_bc); } return true; } X509_NAME* issuer = NULL; X509_NAME* subject = X509_get_subject_name(cert); X509_NAME_ENTRY * name_entry = NULL; if(!subject) goto err; name_entry = X509_NAME_get_entry(subject, X509_NAME_entry_count(subject)-1); if(!name_entry) goto err; if (!OBJ_cmp(X509_NAME_ENTRY_get_object(name_entry),OBJ_nid2obj(NID_commonName))) { /* the name entry is of the type: common name */ data = X509_NAME_ENTRY_get_data(name_entry); if(!data) goto err; if (data->length == 5 && !memcmp(data->data,"proxy",5)) { type = CERT_TYPE_GSI_2_PROXY; } else if(data->length == 13 && !memcmp(data->data,"limited proxy",13)) { type = CERT_TYPE_GSI_2_LIMITED_PROXY; } else if((index = X509_get_ext_by_NID(cert, NID_proxyCertInfo, -1)) != -1) { certinfo_ext = X509_get_ext(cert,index); if(X509_EXTENSION_get_critical(certinfo_ext)) { PROXY_POLICY* policy_openssl = NULL; ASN1_OBJECT* policylang_openssl = NULL; if((certinfo_openssl = (PROXY_CERT_INFO_EXTENSION *)X509V3_EXT_d2i(certinfo_ext)) == NULL) { logger.msg(Arc::ERROR,"Can't convert DER encoded PROXYCERTINFO extension to internal form"); goto err; } if((policy_openssl = certinfo_openssl->proxyPolicy) == NULL) { logger.msg(Arc::ERROR,"Can't get policy from PROXYCERTINFO extension"); goto err; } if((policylang_openssl = policy_openssl->policyLanguage) == NULL) { logger.msg(Arc::ERROR,"Can't get policy language from PROXYCERTINFO extension"); goto err; } policynid = OBJ_obj2nid(policylang_openssl); if(policynid == NID_id_ppl_inheritAll) { type = CERT_TYPE_RFC_IMPERSONATION_PROXY; } else if(policynid == NID_Independent) { type = CERT_TYPE_RFC_INDEPENDENT_PROXY; } else if(policynid == NID_id_ppl_anyLanguage) { type = CERT_TYPE_RFC_ANYLANGUAGE_PROXY; } else { type = CERT_TYPE_RFC_RESTRICTED_PROXY; } //if((index = X509_get_ext_by_NID(cert, OBJ_txt2nid(PROXYCERTINFO_V3), -1)) != -1) { // logger.msg(Arc::ERROR,"Found more than one PCI extension"); // goto err; //} } } /*Duplicate the issuer, and add the CN=proxy, or CN=limitedproxy, etc. * This should match the subject. i.e. proxy can only be signed by * the owner. We do it this way, to double check all the ANS1 bits * as well. */ X509_NAME_ENTRY* new_name_entry = NULL; if(type != CERT_TYPE_EEC && type != CERT_TYPE_CA) { issuer = X509_NAME_dup(X509_get_issuer_name(cert)); new_name_entry = X509_NAME_ENTRY_create_by_NID(NULL, NID_commonName, V_ASN1_APP_CHOOSE, data->data, -1); if(!new_name_entry) goto err; X509_NAME_add_entry(issuer,new_name_entry,X509_NAME_entry_count(issuer),0); X509_NAME_ENTRY_free(new_name_entry); new_name_entry = NULL; if (X509_NAME_cmp(issuer, subject)) { /* Reject this certificate, only the user may sign the proxy */ logger.msg(Arc::ERROR,"The subject does not match the issuer name + proxy CN entry"); goto err; } X509_NAME_free(issuer); issuer = NULL; } } ret = true; err: if(issuer) { X509_NAME_free(issuer); } if(certinfo_openssl) {PROXY_CERT_INFO_EXTENSION_free(certinfo_openssl);} if(x509v3_bc) { BASIC_CONSTRAINTS_free(x509v3_bc); } return ret; } const char* certTypeToString(certType type) { switch(type) { case CERT_TYPE_EEC: case CERT_TYPE_CA: return "CA certificate"; case CERT_TYPE_GSI_3_IMPERSONATION_PROXY: return "X.509 Proxy Certificate Profile (pre-RFC) compliant impersonation proxy"; case CERT_TYPE_GSI_3_INDEPENDENT_PROXY: return "X.509 Proxy Certificate Profile (pre-RFC) compliant independent proxy"; case CERT_TYPE_GSI_3_LIMITED_PROXY: return "X.509 Proxy Certificate Profile (pre-RFC) compliant limited proxy"; case CERT_TYPE_GSI_3_RESTRICTED_PROXY: return "X.509 Proxy Certificate Profile (pre-RFC) compliant restricted proxy"; case CERT_TYPE_GSI_2_PROXY: return "Legacy Globus impersonation proxy"; case CERT_TYPE_GSI_2_LIMITED_PROXY: return "Legacy Globus limited impersonation proxy"; case CERT_TYPE_RFC_IMPERSONATION_PROXY: return "X.509 Proxy Certificate Profile RFC compliant impersonation proxy - RFC inheritAll proxy"; case CERT_TYPE_RFC_INDEPENDENT_PROXY: return "X.509 Proxy Certificate Profile RFC compliant independent proxy - RFC independent proxy"; case CERT_TYPE_RFC_LIMITED_PROXY: return "X.509 Proxy Certificate Profile RFC compliant limited proxy"; case CERT_TYPE_RFC_RESTRICTED_PROXY: return "X.509 Proxy Certificate Profile RFC compliant restricted proxy"; case CERT_TYPE_RFC_ANYLANGUAGE_PROXY: return "RFC anyLanguage proxy"; default: return "Unknown certificate type"; } } } // namespace ArcCredential nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/test0000644000000000000000000000013213214316023022463 xustar000000000000000030 mtime=1513200659.172737368 30 atime=1513200668.720854145 30 ctime=1513200659.172737368 nordugrid-arc-5.4.2/src/hed/libs/credential/test/0000755000175000002070000000000013214316023022606 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/credential/test/PaxHeaders.7502/listfuncTest.cpp0000644000000000000000000000012413103157342025740 xustar000000000000000027 mtime=1494015714.810493 27 atime=1513200574.611703 30 ctime=1513200659.172737368 nordugrid-arc-5.4.2/src/hed/libs/credential/test/listfuncTest.cpp0000644000175000002070000000236213103157342026010 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "../listfunc.h" using namespace ArcCredential; class listfuncTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(listfuncTest); CPPUNIT_TEST(listTest); CPPUNIT_TEST_SUITE_END(); public: listfuncTest() {} void setUp() {} void tearDown() {} void listTest(); }; void f(void *v) { if (v==NULL) return; free(v); return; } void listfuncTest::listTest() { char **v1,**v2,**v3; char *str1,*str2,*str3; str1 = (char*) malloc(sizeof(char)*6); strncpy(str1,"Hello",6); str2 = (char*) malloc(sizeof(char)*6); strncpy(str2,"World",6); str3 = (char*) malloc(sizeof(char)*6); strncpy(str3,"World",6); v1 = listadd(NULL,NULL,sizeof(char*)); CPPUNIT_ASSERT_EQUAL(v1,(char**)NULL); v1 = listadd(NULL,str1,sizeof(char*)); CPPUNIT_ASSERT(v1); v2 = listadd(v1,str2,sizeof(char*)); CPPUNIT_ASSERT(v1); v3 = listadd(v2,str3,sizeof(char*)); CPPUNIT_ASSERT(v3); CPPUNIT_ASSERT(v3[0] == str1); CPPUNIT_ASSERT(v3[1] == str2); CPPUNIT_ASSERT(v3[2] == str3); CPPUNIT_ASSERT(v3[3] == NULL); listfree(v3,(freefn)f); } CPPUNIT_TEST_SUITE_REGISTRATION(listfuncTest); nordugrid-arc-5.4.2/src/hed/libs/credential/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713065017103024601 xustar000000000000000027 mtime=1490296387.698578 30 atime=1513200596.911975895 30 ctime=1513200659.167737307 nordugrid-arc-5.4.2/src/hed/libs/credential/test/Makefile.am0000644000175000002070000000277013065017103024651 0ustar00mockbuildmock00000000000000TESTS = CredentialTest VOMSUtilTest listfuncTest TESTS_ENVIRONMENT = srcdir=$(srcdir) check_PROGRAMS = $(TESTS) check_DATA = ca_key.pem ca_cert.pem CredentialTest_SOURCES = $(top_srcdir)/src/Test.cpp CredentialTest.cpp CredentialTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) CredentialTest_LDADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) VOMSUtilTest_SOURCES = $(top_srcdir)/src/Test.cpp VOMSUtilTest.cpp VOMSUtilTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) VOMSUtilTest_LDADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) listfuncTest_SOURCES = $(top_srcdir)/src/Test.cpp listfuncTest.cpp listfuncTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) listfuncTest_LDADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ca_key.pem ca_cert.pem: openssl req -x509 -set_serial 1 -passout pass:password -newkey rsa:2048 -keyout ca_key.pem -out ca_cert.pem -config $(srcdir)/ca.cnf EXTRA_DIST = ca.cnf CLEANFILES = *.pem ca_serial nordugrid-arc-5.4.2/src/hed/libs/credential/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315724024614 xustar000000000000000030 mtime=1513200596.970976616 30 atime=1513200647.914599677 30 ctime=1513200659.168737319 nordugrid-arc-5.4.2/src/hed/libs/credential/test/Makefile.in0000644000175000002070000011563513214315724024675 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = CredentialTest$(EXEEXT) VOMSUtilTest$(EXEEXT) \ listfuncTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/libs/credential/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = CredentialTest$(EXEEXT) VOMSUtilTest$(EXEEXT) \ listfuncTest$(EXEEXT) am_CredentialTest_OBJECTS = CredentialTest-Test.$(OBJEXT) \ CredentialTest-CredentialTest.$(OBJEXT) CredentialTest_OBJECTS = $(am_CredentialTest_OBJECTS) am__DEPENDENCIES_1 = CredentialTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) CredentialTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(CredentialTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_VOMSUtilTest_OBJECTS = VOMSUtilTest-Test.$(OBJEXT) \ VOMSUtilTest-VOMSUtilTest.$(OBJEXT) VOMSUtilTest_OBJECTS = $(am_VOMSUtilTest_OBJECTS) VOMSUtilTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) VOMSUtilTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(VOMSUtilTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_listfuncTest_OBJECTS = listfuncTest-Test.$(OBJEXT) \ listfuncTest-listfuncTest.$(OBJEXT) listfuncTest_OBJECTS = $(am_listfuncTest_OBJECTS) listfuncTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) listfuncTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(listfuncTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(CredentialTest_SOURCES) $(VOMSUtilTest_SOURCES) \ $(listfuncTest_SOURCES) DIST_SOURCES = $(CredentialTest_SOURCES) $(VOMSUtilTest_SOURCES) \ $(listfuncTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ TESTS_ENVIRONMENT = srcdir=$(srcdir) check_DATA = ca_key.pem ca_cert.pem CredentialTest_SOURCES = $(top_srcdir)/src/Test.cpp CredentialTest.cpp CredentialTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) CredentialTest_LDADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) VOMSUtilTest_SOURCES = $(top_srcdir)/src/Test.cpp VOMSUtilTest.cpp VOMSUtilTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) VOMSUtilTest_LDADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) listfuncTest_SOURCES = $(top_srcdir)/src/Test.cpp listfuncTest.cpp listfuncTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) listfuncTest_LDADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) EXTRA_DIST = ca.cnf CLEANFILES = *.pem ca_serial all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/credential/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/credential/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list CredentialTest$(EXEEXT): $(CredentialTest_OBJECTS) $(CredentialTest_DEPENDENCIES) @rm -f CredentialTest$(EXEEXT) $(CredentialTest_LINK) $(CredentialTest_OBJECTS) $(CredentialTest_LDADD) $(LIBS) VOMSUtilTest$(EXEEXT): $(VOMSUtilTest_OBJECTS) $(VOMSUtilTest_DEPENDENCIES) @rm -f VOMSUtilTest$(EXEEXT) $(VOMSUtilTest_LINK) $(VOMSUtilTest_OBJECTS) $(VOMSUtilTest_LDADD) $(LIBS) listfuncTest$(EXEEXT): $(listfuncTest_OBJECTS) $(listfuncTest_DEPENDENCIES) @rm -f listfuncTest$(EXEEXT) $(listfuncTest_LINK) $(listfuncTest_OBJECTS) $(listfuncTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/CredentialTest-CredentialTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/CredentialTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/VOMSUtilTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/VOMSUtilTest-VOMSUtilTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/listfuncTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/listfuncTest-listfuncTest.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< CredentialTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CredentialTest_CXXFLAGS) $(CXXFLAGS) -MT CredentialTest-Test.o -MD -MP -MF $(DEPDIR)/CredentialTest-Test.Tpo -c -o CredentialTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/CredentialTest-Test.Tpo $(DEPDIR)/CredentialTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='CredentialTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CredentialTest_CXXFLAGS) $(CXXFLAGS) -c -o CredentialTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp CredentialTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CredentialTest_CXXFLAGS) $(CXXFLAGS) -MT CredentialTest-Test.obj -MD -MP -MF $(DEPDIR)/CredentialTest-Test.Tpo -c -o CredentialTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/CredentialTest-Test.Tpo $(DEPDIR)/CredentialTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='CredentialTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CredentialTest_CXXFLAGS) $(CXXFLAGS) -c -o CredentialTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` CredentialTest-CredentialTest.o: CredentialTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CredentialTest_CXXFLAGS) $(CXXFLAGS) -MT CredentialTest-CredentialTest.o -MD -MP -MF $(DEPDIR)/CredentialTest-CredentialTest.Tpo -c -o CredentialTest-CredentialTest.o `test -f 'CredentialTest.cpp' || echo '$(srcdir)/'`CredentialTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/CredentialTest-CredentialTest.Tpo $(DEPDIR)/CredentialTest-CredentialTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CredentialTest.cpp' object='CredentialTest-CredentialTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CredentialTest_CXXFLAGS) $(CXXFLAGS) -c -o CredentialTest-CredentialTest.o `test -f 'CredentialTest.cpp' || echo '$(srcdir)/'`CredentialTest.cpp CredentialTest-CredentialTest.obj: CredentialTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CredentialTest_CXXFLAGS) $(CXXFLAGS) -MT CredentialTest-CredentialTest.obj -MD -MP -MF $(DEPDIR)/CredentialTest-CredentialTest.Tpo -c -o CredentialTest-CredentialTest.obj `if test -f 'CredentialTest.cpp'; then $(CYGPATH_W) 'CredentialTest.cpp'; else $(CYGPATH_W) '$(srcdir)/CredentialTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/CredentialTest-CredentialTest.Tpo $(DEPDIR)/CredentialTest-CredentialTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CredentialTest.cpp' object='CredentialTest-CredentialTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CredentialTest_CXXFLAGS) $(CXXFLAGS) -c -o CredentialTest-CredentialTest.obj `if test -f 'CredentialTest.cpp'; then $(CYGPATH_W) 'CredentialTest.cpp'; else $(CYGPATH_W) '$(srcdir)/CredentialTest.cpp'; fi` VOMSUtilTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(VOMSUtilTest_CXXFLAGS) $(CXXFLAGS) -MT VOMSUtilTest-Test.o -MD -MP -MF $(DEPDIR)/VOMSUtilTest-Test.Tpo -c -o VOMSUtilTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/VOMSUtilTest-Test.Tpo $(DEPDIR)/VOMSUtilTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='VOMSUtilTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(VOMSUtilTest_CXXFLAGS) $(CXXFLAGS) -c -o VOMSUtilTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp VOMSUtilTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(VOMSUtilTest_CXXFLAGS) $(CXXFLAGS) -MT VOMSUtilTest-Test.obj -MD -MP -MF $(DEPDIR)/VOMSUtilTest-Test.Tpo -c -o VOMSUtilTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/VOMSUtilTest-Test.Tpo $(DEPDIR)/VOMSUtilTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='VOMSUtilTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(VOMSUtilTest_CXXFLAGS) $(CXXFLAGS) -c -o VOMSUtilTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` VOMSUtilTest-VOMSUtilTest.o: VOMSUtilTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(VOMSUtilTest_CXXFLAGS) $(CXXFLAGS) -MT VOMSUtilTest-VOMSUtilTest.o -MD -MP -MF $(DEPDIR)/VOMSUtilTest-VOMSUtilTest.Tpo -c -o VOMSUtilTest-VOMSUtilTest.o `test -f 'VOMSUtilTest.cpp' || echo '$(srcdir)/'`VOMSUtilTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/VOMSUtilTest-VOMSUtilTest.Tpo $(DEPDIR)/VOMSUtilTest-VOMSUtilTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='VOMSUtilTest.cpp' object='VOMSUtilTest-VOMSUtilTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(VOMSUtilTest_CXXFLAGS) $(CXXFLAGS) -c -o VOMSUtilTest-VOMSUtilTest.o `test -f 'VOMSUtilTest.cpp' || echo '$(srcdir)/'`VOMSUtilTest.cpp VOMSUtilTest-VOMSUtilTest.obj: VOMSUtilTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(VOMSUtilTest_CXXFLAGS) $(CXXFLAGS) -MT VOMSUtilTest-VOMSUtilTest.obj -MD -MP -MF $(DEPDIR)/VOMSUtilTest-VOMSUtilTest.Tpo -c -o VOMSUtilTest-VOMSUtilTest.obj `if test -f 'VOMSUtilTest.cpp'; then $(CYGPATH_W) 'VOMSUtilTest.cpp'; else $(CYGPATH_W) '$(srcdir)/VOMSUtilTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/VOMSUtilTest-VOMSUtilTest.Tpo $(DEPDIR)/VOMSUtilTest-VOMSUtilTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='VOMSUtilTest.cpp' object='VOMSUtilTest-VOMSUtilTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(VOMSUtilTest_CXXFLAGS) $(CXXFLAGS) -c -o VOMSUtilTest-VOMSUtilTest.obj `if test -f 'VOMSUtilTest.cpp'; then $(CYGPATH_W) 'VOMSUtilTest.cpp'; else $(CYGPATH_W) '$(srcdir)/VOMSUtilTest.cpp'; fi` listfuncTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(listfuncTest_CXXFLAGS) $(CXXFLAGS) -MT listfuncTest-Test.o -MD -MP -MF $(DEPDIR)/listfuncTest-Test.Tpo -c -o listfuncTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/listfuncTest-Test.Tpo $(DEPDIR)/listfuncTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='listfuncTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(listfuncTest_CXXFLAGS) $(CXXFLAGS) -c -o listfuncTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp listfuncTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(listfuncTest_CXXFLAGS) $(CXXFLAGS) -MT listfuncTest-Test.obj -MD -MP -MF $(DEPDIR)/listfuncTest-Test.Tpo -c -o listfuncTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/listfuncTest-Test.Tpo $(DEPDIR)/listfuncTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='listfuncTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(listfuncTest_CXXFLAGS) $(CXXFLAGS) -c -o listfuncTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` listfuncTest-listfuncTest.o: listfuncTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(listfuncTest_CXXFLAGS) $(CXXFLAGS) -MT listfuncTest-listfuncTest.o -MD -MP -MF $(DEPDIR)/listfuncTest-listfuncTest.Tpo -c -o listfuncTest-listfuncTest.o `test -f 'listfuncTest.cpp' || echo '$(srcdir)/'`listfuncTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/listfuncTest-listfuncTest.Tpo $(DEPDIR)/listfuncTest-listfuncTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='listfuncTest.cpp' object='listfuncTest-listfuncTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(listfuncTest_CXXFLAGS) $(CXXFLAGS) -c -o listfuncTest-listfuncTest.o `test -f 'listfuncTest.cpp' || echo '$(srcdir)/'`listfuncTest.cpp listfuncTest-listfuncTest.obj: listfuncTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(listfuncTest_CXXFLAGS) $(CXXFLAGS) -MT listfuncTest-listfuncTest.obj -MD -MP -MF $(DEPDIR)/listfuncTest-listfuncTest.Tpo -c -o listfuncTest-listfuncTest.obj `if test -f 'listfuncTest.cpp'; then $(CYGPATH_W) 'listfuncTest.cpp'; else $(CYGPATH_W) '$(srcdir)/listfuncTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/listfuncTest-listfuncTest.Tpo $(DEPDIR)/listfuncTest-listfuncTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='listfuncTest.cpp' object='listfuncTest-listfuncTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(listfuncTest_CXXFLAGS) $(CXXFLAGS) -c -o listfuncTest-listfuncTest.obj `if test -f 'listfuncTest.cpp'; then $(CYGPATH_W) 'listfuncTest.cpp'; else $(CYGPATH_W) '$(srcdir)/listfuncTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(check_DATA) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am ca_key.pem ca_cert.pem: openssl req -x509 -set_serial 1 -passout pass:password -newkey rsa:2048 -keyout ca_key.pem -out ca_cert.pem -config $(srcdir)/ca.cnf # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/credential/test/PaxHeaders.7502/VOMSUtilTest.cpp0000644000000000000000000000012412015640020025522 xustar000000000000000027 mtime=1345798160.652728 27 atime=1513200574.612703 30 ctime=1513200659.171737356 nordugrid-arc-5.4.2/src/hed/libs/credential/test/VOMSUtilTest.cpp0000644000175000002070000001011712015640020025567 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include class VOMSUtilTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(VOMSUtilTest); CPPUNIT_TEST(VOMSTrustListTest); CPPUNIT_TEST_SUITE_END(); public: VOMSUtilTest() {} void setUp() {} void tearDown() {} void VOMSTrustListTest(); }; void VOMSUtilTest::VOMSTrustListTest() { std::vector output; std::string emptystring = ""; Arc::VOMSTrustList emptylist; // CPPUNIT_ASSERT_EQUAL(!parseVOMSAC(c, emptystring, emptystring, emptylist, output, false),false); // Create the AC on the VOMS side std::string CAcert("ca_cert.pem"); std::string user_key_file("user_key.pem"); std::string user_cert_file("user_cert.pem"); Arc::Credential issuer_cred(user_cert_file, user_key_file, "", CAcert, "userpassword"); std::string vomsserver_key_file("host_key.pem"); std::string vomsserver_cert_file("host_cert.pem"); Arc::Credential ac_issuer_cred(vomsserver_cert_file, vomsserver_key_file, "", CAcert, ""); std::string holder_proxy_file("user_proxy.pem"); Arc::Credential holder_cred(holder_proxy_file, "", "", CAcert); std::vector fqan; fqan.push_back("/nordugrid.org"); std::vector targets; targets.push_back("www.nordugrid.org"); std::vector attrs; attrs.push_back("::role=admin"); attrs.push_back("::role=guest"); std::string voname = "nordugrid"; std::string uri = "voms.nordugrid.org:50000"; std::string ac_str; Arc::createVOMSAC(ac_str, ac_issuer_cred, holder_cred, fqan, targets, attrs, voname, uri, 3600*12); // // Create the full AC which is an ordered list of AC // // encode the AC string into base64 int size; char* enc = NULL; std::string ac_str_b64; enc = Arc::VOMSEncode((char*)(ac_str.c_str()), ac_str.length(), &size); if (enc != NULL) { ac_str_b64.append(enc, size); free(enc); enc = NULL; } std::string aclist_str; aclist_str.append(VOMS_AC_HEADER).append("\n"); aclist_str.append(ac_str_b64).append("\n"); aclist_str.append(VOMS_AC_TRAILER).append("\n"); std::string voms_proxy_file("voms_proxy.pem"); // Request int keybits = 1024; int proxydepth = 10; Arc::Time t; Arc::Credential proxy_req(t, Arc::Period(12*3600), keybits, "gsi2", "limited", "", proxydepth); std::string proxy_req_file("voms_req.pem"); proxy_req.GenerateRequest(proxy_req_file.c_str()); //Add AC extension to proxy certificat before signing it proxy_req.AddExtension("acseq", (char**) (aclist_str.c_str())); //Sign the voms proxy issuer_cred.SignRequest(&proxy_req, voms_proxy_file.c_str()); std::string private_key, signing_cert, signing_cert_chain; proxy_req.OutputPrivatekey(private_key); issuer_cred.OutputCertificate(signing_cert); issuer_cred.OutputCertificateChain(signing_cert_chain); std::ofstream out_f(voms_proxy_file.c_str(), std::ofstream::app); out_f.write(private_key.c_str(), private_key.size()); out_f.write(signing_cert.c_str(), signing_cert.size()); out_f.write(signing_cert_chain.c_str(), signing_cert_chain.size()); out_f.close(); std::vector vomscert_trust_dn; vomscert_trust_dn.push_back("/O=Grid/OU=ARC/CN=localhost"); vomscert_trust_dn.push_back("/O=Grid/OU=ARC/CN=CA"); vomscert_trust_dn.push_back("NEXT CHAIN"); vomscert_trust_dn.push_back("^/O=Grid/OU=ARC"); // Read and pars VOMS proxy Arc::Credential voms_proxy(voms_proxy_file, "", ".", CAcert); std::vector attributes; Arc::VOMSTrustList trust_dn(vomscert_trust_dn); Arc::parseVOMSAC(voms_proxy, ".", CAcert, "", trust_dn, attributes, true); for(size_t n=0; n #endif #include #include #include #include #include #include class CredentialTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(CredentialTest); CPPUNIT_TEST(testCAcert); CPPUNIT_TEST(testhostcert); CPPUNIT_TEST(testusercert); CPPUNIT_TEST(testproxy); CPPUNIT_TEST(testproxy2proxy); CPPUNIT_TEST(testproxycertinfo); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown() {} void testCAcert(); void testhostcert(); void testusercert(); void testproxy(); void testproxy2proxy(); void testproxycertinfo(){}; private: std::string srcdir; std::string CAcert; std::string CAkey; std::string CAserial; std::string CApassphrase; std::string CAdn; std::string CAconf; std::string ca_ext_sect; std::string host_cert_ext_sect; std::string user_cert_ext_sect; std::string host_key_file; std::string host_cert_file; std::string host_dn; std::string user_key_file; std::string user_cert_file; std::string user_passphrase; std::string user_dn; std::string user_proxy_file; }; void CredentialTest::setUp() { srcdir = Arc::GetEnv("srcdir"); if (srcdir.length() == 0) srcdir = "."; CAcert = "ca_cert.pem"; CAkey = "ca_key.pem"; CAserial = "ca_serial"; CApassphrase = "capassword"; CAdn = "/O=Grid/OU=ARC/CN=CA"; CAconf = srcdir + "/ca.cnf"; ca_ext_sect = "v3_ca"; host_cert_ext_sect = "host_cert"; user_cert_ext_sect = "usr_cert"; host_key_file = "host_key.pem"; host_cert_file = "host_cert.pem"; host_dn = "/O=Grid/OU=ARC/CN=localhost"; user_key_file = "user_key.pem"; user_cert_file = "user_cert.pem"; user_passphrase = "userpassword"; user_dn = "/O=Grid/OU=ARC/OU=localdomain/CN=User"; user_proxy_file = "user_proxy.pem"; } void CredentialTest::testCAcert() { // Create serial file std::ofstream out_f; out_f.open(CAserial.c_str()); out_f << "00"; out_f.close(); // Create a CA certificate and its related key int ca_keybits = 2048; Arc::Time ca_t; Arc::Credential ca(ca_t, Arc::Period(365*24*3600), ca_keybits, "EEC"); BIO* ca_req_bio = NULL; ca_req_bio = BIO_new(BIO_s_mem()); ca.GenerateRequest(ca_req_bio); std::string subkeyid("hash"); //ca.AddExtension("subjectKeyIdentifier", subkeyid.c_str()); //ca.AddExtension("authorityKeyIdentifier", "keyid:always,issuer"); //ca.AddExtension("basicConstraints", (char **)("CA:TRUE")); ca.SelfSignEECRequest(CAdn, CAconf.c_str(), ca_ext_sect, CAcert.c_str()); std::ofstream out_key(CAkey.c_str(), std::ofstream::out); std::string ca_private_key; ca.OutputPrivatekey(ca_private_key); out_key.write(ca_private_key.c_str(), ca_private_key.size()); out_key.close(); // Load CA credential Arc::Credential ca2(CAcert, CAkey, CAserial, CAconf, "", CApassphrase); // Did we load a CA cert? CPPUNIT_ASSERT_EQUAL(ArcCredential::CERT_TYPE_CA, ca2.GetType()); // Test if the DN is read properly CPPUNIT_ASSERT_EQUAL(CAdn, ca2.GetDN()); if(ca_req_bio)BIO_free_all(ca_req_bio); } void CredentialTest::testhostcert() { // Default EEC values int keybits = 1024; Arc::Time t; // host cert signing std::string host_req_file("host_req.pem"); Arc::Credential host_req(keybits); host_req.GenerateRequest(host_req_file.c_str()); // Write private key to file for loading later - no passphrase for hosts std::string host_key; host_req.OutputPrivatekey(host_key); std::ofstream out_f; out_f.open(host_key_file.c_str()); out_f << host_key;; out_f.close(); // Load the request Arc::Credential host_eec; host_eec.InquireRequest(host_req_file.c_str(), true); Arc::Period host_life(30*24*3600); host_eec.SetLifeTime(host_life); // Add subjectAltname extension to host cert std::string host_ext("DNS:localhost"); host_eec.AddExtension("2.5.29.17", host_ext); // Load CA credential Arc::Credential ca(CAcert, CAkey, CAserial, CAconf, host_cert_ext_sect, CApassphrase); // Sign request ca.SignEECRequest(&host_eec, host_dn, host_cert_file.c_str()); //Load signed host cert Arc::Credential host_cert(host_cert_file, host_key_file, "", CAcert); //Does the certificate chain verify? CPPUNIT_ASSERT(host_cert.GetVerification()); // Did the signed cert get the right DN? CPPUNIT_ASSERT_EQUAL(host_dn,host_cert.GetDN()); // Right type? CPPUNIT_ASSERT_EQUAL(ArcCredential::CERT_TYPE_EEC,host_cert.GetType()); } void CredentialTest::testusercert() { // Default EEC values int keybits = 1024; Arc::Time t; // User cert signing std::string user_req_file("user_req.pem"); Arc::Period user_life(30*24*3600); Arc::Credential user_req(t, user_life, keybits, "EEC"); user_req.GenerateRequest(user_req_file.c_str()); // Write private key to file for loading later std::string user_key; user_req.OutputPrivatekey(user_key,true,user_passphrase); std::ofstream out_f; out_f.open(user_key_file.c_str()); out_f << user_key; out_f.close(); // Here the original Credential object is used for signing; // We don't need to load the request, since we don't need to // inquire the X509_REQ //Arc::Credential user_eec; //user_eec.InquireRequest(user_req_file.c_str(), true); //user_eec.SetLifeTime(user_life); // Add subjectAltname extension to host cert std::string user_ext("EMAIL:user@localhost"); user_req.AddExtension("2.5.29.17", user_ext); // Load CA credential Arc::Credential ca(CAcert, CAkey, CAserial, CAconf, user_cert_ext_sect, CApassphrase); // Sign request ca.SignEECRequest(&user_req, user_dn, user_cert_file.c_str()); //Try to load user cert with wrong passphrase Arc::Credential user_cert_bad(user_cert_file,user_key_file,".",CAcert,"Bad password"); //Load signed user cert Arc::Credential user_cert(user_cert_file, user_key_file, ".", CAcert, user_passphrase); //Does the certificate chain verify? CPPUNIT_ASSERT(user_cert.GetVerification()); // Did the signed cert get the right DN? CPPUNIT_ASSERT_EQUAL(user_dn,user_cert.GetDN()); // Did the signed cert get the right identity - trivial for non-proxy? CPPUNIT_ASSERT_EQUAL(user_dn,user_cert.GetIdentityName()); // Right type? CPPUNIT_ASSERT_EQUAL(ArcCredential::CERT_TYPE_EEC,user_cert.GetType()); // Get the lifetime CPPUNIT_ASSERT_EQUAL(user_life, user_cert.GetLifeTime()); } void CredentialTest::testproxy() { int keybits = 1024; Arc::Time t; // Generate certificate request BIO* req; req = BIO_new(BIO_s_mem()); Arc::Credential proxy_req(t,0,keybits); proxy_req.GenerateRequest(req); // Load EEC credential Arc::Credential user_cert(user_cert_file, user_key_file, ".", CAcert, user_passphrase); // Load the request Arc::Credential proxy_cert; proxy_cert.InquireRequest(req); proxy_cert.SetProxyPolicy("rfc","independent","",-1); Arc::Period proxy_life(7*24*3600); proxy_cert.SetLifeTime(proxy_life); // Sign the request std::string proxy_cert_string; user_cert.SignRequest(&proxy_cert, proxy_cert_string); BIO_free_all(req); // Output proxy std::string proxy_key_string; proxy_req.OutputPrivatekey(proxy_key_string); proxy_cert_string.append(proxy_key_string); std::string user_cert_string; user_cert.OutputCertificate(user_cert_string); proxy_cert_string.append(user_cert_string); std::ofstream out_f; out_f.open(user_proxy_file.c_str()); out_f< #include #include #if (OPENSSL_VERSION_NUMBER < 0x10100000L) #define DEFINE_STACK_OF(S) \ inline STACK_OF(S)* sk_##S##_new(int (*cmp) (const S* const *, const S* const *)) { return SKM_sk_new(S, (cmp)); } \ inline STACK_OF(S)* sk_##S##_new_null() { return SKM_sk_new_null(S); } \ inline int sk_##S##_is_sorted(STACK_OF(S) const *st) { return SKM_sk_is_sorted(S, st); } \ inline void sk_##S##_free(STACK_OF(S)* st) { SKM_sk_free(S, st); } \ inline int sk_##S##_num(STACK_OF(S) const* st) { return SKM_sk_num(S, st); } \ inline int sk_##S##_push(STACK_OF(S)* st, S* val) { return SKM_sk_push(S, st, val); } \ inline S* sk_##S##_value(STACK_OF(S) const* st, int i) { return SKM_sk_value(S, st, i); } \ inline void sk_##S##_pop_free(STACK_OF(S)* st, void (*free_func)(S*)) { SKM_sk_pop_free(S, st, free_func); } \ DECLARE_STACK_OF(S) /* # define sk_ASN1_GENERALSTRING_set(st, i, val) SKM_sk_set(ASN1_GENERALSTRING, (st), (i), (val)) # define sk_ASN1_GENERALSTRING_zero(st) SKM_sk_zero(ASN1_GENERALSTRING, (st)) # define sk_ASN1_GENERALSTRING_unshift(st, val) SKM_sk_unshift(ASN1_GENERALSTRING, (st), (val)) # define sk_ASN1_GENERALSTRING_find(st, val) SKM_sk_find(ASN1_GENERALSTRING, (st), (val)) # define sk_ASN1_GENERALSTRING_find_ex(st, val) SKM_sk_find_ex(ASN1_GENERALSTRING, (st), (val)) # define sk_ASN1_GENERALSTRING_delete(st, i) SKM_sk_delete(ASN1_GENERALSTRING, (st), (i)) # define sk_ASN1_GENERALSTRING_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASN1_GENERALSTRING, (st), (ptr)) # define sk_ASN1_GENERALSTRING_insert(st, val, i) SKM_sk_insert(ASN1_GENERALSTRING, (st), (val), (i)) # define sk_ASN1_GENERALSTRING_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASN1_GENERALSTRING, (st), (cmp)) # define sk_ASN1_GENERALSTRING_dup(st) SKM_sk_dup(ASN1_GENERALSTRING, st) # define sk_ASN1_GENERALSTRING_deep_copy(st, copy_func, free_func) SKM_sk_deep_copy(ASN1_GENERALSTRING, (st), (copy_func), (free_func)) # define sk_ASN1_GENERALSTRING_shift(st) SKM_sk_shift(ASN1_GENERALSTRING, (st)) # define sk_ASN1_GENERALSTRING_pop(st) SKM_sk_pop(ASN1_GENERALSTRING, (st)) # define sk_ASN1_GENERALSTRING_sort(st) SKM_sk_sort(ASN1_GENERALSTRING, (st)) */ #endif #define VOMS_AC_HEADER "-----BEGIN VOMS AC-----" #define VOMS_AC_TRAILER "-----END VOMS AC-----" namespace ArcCredential { #define ASN1_F_D2I_AC_ATTR 5000 #define AC_F_ATTR_New 5001 #define ASN1_F_D2I_AC_ROLE 5002 #define AC_F_ROLE_New 5003 #define ASN1_F_D2I_AC_IETFATTR 5004 #define AC_F_IETFATTR_New 5005 #define ASN1_F_D2I_AC_IETFATTRVAL 5006 #define ASN1_F_D2I_AC_DIGEST 5007 #define AC_F_DIGEST_New 5008 #define ASN1_F_D2I_AC_IS 5009 #define AC_F_AC_IS_New 5010 #define ASN1_F_D2I_AC_FORM 5011 #define AC_F_AC_FORM_New 5012 #define ASN1_F_D2I_AC_ACI 5013 #define ASN1_F_AC_ACI_New 5014 #define ASN1_F_D2I_AC_HOLDER 5015 #define ASN1_F_AC_HOLDER_New 5016 #define ASN1_F_AC_VAL_New 5017 #define AC_F_AC_INFO_NEW 5018 #define AC_F_D2I_AC 5019 #define AC_F_AC_New 5020 #define ASN1_F_I2D_AC_IETFATTRVAL 5021 #define AC_F_D2I_AC_DIGEST 5022 #define AC_F_AC_DIGEST_New 5023 #define AC_F_D2I_AC_IS 5024 #define AC_ERR_UNSET 5025 #define AC_ERR_SET 5026 #define AC_ERR_SIGNATURE 5027 #define AC_ERR_VERSION 5028 #define AC_ERR_HOLDER_SERIAL 5029 #define AC_ERR_HOLDER 5030 #define AC_ERR_UID_MISMATCH 5031 #define AC_ERR_ISSUER_NAME 5032 #define AC_ERR_SERIAL 5033 #define AC_ERR_DATES 5034 #define AC_ERR_ATTRIBS 5035 #define AC_F_AC_TARGET_New 5036 #define ASN1_F_D2I_AC_TARGET 5037 #define AC_F_AC_TARGETS_New 5036 #define ASN1_F_D2I_AC_TARGETS 5037 #define ASN1_F_D2I_AC_SEQ 5038 #define AC_F_AC_SEQ_new 5039 #define AC_ERR_ATTRIB_URI 5040 #define AC_ERR_ATTRIB_FQAN 5041 #define AC_ERR_EXTS_ABSENT 5042 #define AC_ERR_MEMORY 5043 #define AC_ERR_EXT_CRIT 5044 #define AC_ERR_EXT_TARGET 5045 #define AC_ERR_EXT_KEY 5046 #define AC_ERR_UNKNOWN 5047 #define AC_ERR_PARAMETERS 5048 #define X509_ERR_ISSUER_NAME 5049 #define X509_ERR_HOLDER_NAME 5050 #define AC_ERR_NO_EXTENSION 5051 #define ASN1_F_D2I_AC_CERTS 5052 #define AC_F_X509_New 5053 #define AC_F_D2I_AC_ATTRIBUTE 5054 #define AC_F_ATTRIBUTE_New 5055 #define ASN1_F_D2I_AC_ATT_HOLDER 5056 #define AC_F_AC_ATT_HOLDER_New 5057 #define ASN1_F_D2I_AC_FULL_ATTRIBUTES 5058 #define AC_F_AC_FULL_ATTRIBUTES_New 5059 #define ASN1_F_D2I_AC_ATTRIBUTEVAL 5060 #define ASN1_F_I2D_AC_ATTRIBUTEVAL 5061 #define AC_F_AC_ATTRIBUTEVAL_New 5062 #define AC_ERR_ATTRIB 5063 typedef struct ACDIGEST { ASN1_ENUMERATED *type; ASN1_OBJECT *oid; X509_ALGOR *algor; ASN1_BIT_STRING *digest; } AC_DIGEST; DECLARE_ASN1_FUNCTIONS(AC_DIGEST) typedef struct ACIS { STACK_OF(GENERAL_NAME) *issuer; ASN1_INTEGER *serial; ASN1_BIT_STRING *uid; } AC_IS; DECLARE_ASN1_FUNCTIONS(AC_IS) typedef struct ACFORM { STACK_OF(GENERAL_NAME) *names; AC_IS *is; AC_DIGEST *digest; } AC_FORM; DECLARE_ASN1_FUNCTIONS(AC_FORM) typedef struct ACACI { STACK_OF(GENERAL_NAME) *names; AC_FORM *form; } AC_ACI; DECLARE_ASN1_FUNCTIONS(AC_ACI) typedef struct ACHOLDER { AC_IS *baseid; STACK_OF(GENERAL_NAME) *name; AC_DIGEST *digest; } AC_HOLDER; DECLARE_ASN1_FUNCTIONS(AC_HOLDER) typedef struct ACVAL { ASN1_GENERALIZEDTIME *notBefore; ASN1_GENERALIZEDTIME *notAfter; } AC_VAL; DECLARE_ASN1_FUNCTIONS(AC_VAL) //typedef struct asn1_string_st AC_IETFATTRVAL; //typedef ASN1_TYPE AC_IETFATTRVAL; #define AC_IETFATTRVAL ASN1_TYPE #define AC_IETFATTRVAL_new ASN1_TYPE_new #define AC_IETFATTRVAL_free ASN1_TYPE_free #define sk_AC_IETFATTRVAL_push sk_ASN1_TYPE_push #define stack_st_AC_IETFATTRVAL stack_st_ASN1_TYPE #define sk_AC_IETFATTRVAL_num sk_ASN1_TYPE_num #define sk_AC_IETFATTRVAL_value sk_ASN1_TYPE_value #define sk_AC_IETFATTRVAL_new_null sk_ASN1_TYPE_new_null typedef struct ACIETFATTR { STACK_OF(GENERAL_NAME) *names; STACK_OF(AC_IETFATTRVAL) *values; } AC_IETFATTR; DECLARE_ASN1_FUNCTIONS(AC_IETFATTR) typedef struct ACTARGET { GENERAL_NAME *name; GENERAL_NAME *group; AC_IS *cert; } AC_TARGET; DECLARE_ASN1_FUNCTIONS(AC_TARGET) typedef struct ACTARGETS { STACK_OF(AC_TARGET) *targets; } AC_TARGETS; DECLARE_ASN1_FUNCTIONS(AC_TARGETS) typedef struct ACATTR { ASN1_OBJECT * type; //int get_type; STACK_OF(AC_IETFATTR) *ietfattr; //STACK_OF(AC_FULL_ATTRIBUTES) *fullattributes; } AC_ATTR; #define GET_TYPE_FQAN 1 #define GET_TYPE_ATTRIBUTES 2 DECLARE_ASN1_FUNCTIONS(AC_ATTR) typedef struct ACINFO { ASN1_INTEGER *version; AC_HOLDER *holder; AC_FORM *form; X509_ALGOR *alg; ASN1_INTEGER *serial; AC_VAL *validity; STACK_OF(AC_ATTR) *attrib; ASN1_BIT_STRING *id; STACK_OF(X509_EXTENSION) *exts; } AC_INFO; DECLARE_ASN1_FUNCTIONS(AC_INFO) typedef struct ACC { AC_INFO *acinfo; X509_ALGOR *sig_alg; ASN1_BIT_STRING *signature; } AC; DECLARE_ASN1_FUNCTIONS(AC) typedef struct ACSEQ { STACK_OF(AC) *acs; } AC_SEQ; DECLARE_ASN1_FUNCTIONS(AC_SEQ) typedef struct ACCERTS { STACK_OF(X509) *stackcert; } AC_CERTS; DECLARE_ASN1_FUNCTIONS(AC_CERTS) typedef struct ACATTRIBUTE { ASN1_OCTET_STRING *name; ASN1_OCTET_STRING *qualifier; ASN1_OCTET_STRING *value; } AC_ATTRIBUTE; DECLARE_ASN1_FUNCTIONS(AC_ATTRIBUTE) typedef struct ACATTHOLDER { STACK_OF(GENERAL_NAME) *grantor; STACK_OF(AC_ATTRIBUTE) *attributes; } AC_ATT_HOLDER; DECLARE_ASN1_FUNCTIONS(AC_ATT_HOLDER) typedef struct ACFULLATTRIBUTES { STACK_OF(AC_ATT_HOLDER) *providers; } AC_FULL_ATTRIBUTES; DECLARE_ASN1_FUNCTIONS(AC_FULL_ATTRIBUTES) DEFINE_STACK_OF(AC_TARGET) DEFINE_STACK_OF(AC_TARGETS) DEFINE_STACK_OF(AC_IETFATTR) //DEFINE_STACK_OF(AC_IETFATTRVAL) DEFINE_STACK_OF(AC_ATTR) DEFINE_STACK_OF(AC) DEFINE_STACK_OF(AC_INFO) DEFINE_STACK_OF(AC_VAL) DEFINE_STACK_OF(AC_HOLDER) DEFINE_STACK_OF(AC_ACI) DEFINE_STACK_OF(AC_FORM) DEFINE_STACK_OF(AC_IS) DEFINE_STACK_OF(AC_DIGEST) DEFINE_STACK_OF(AC_CERTS) DEFINE_STACK_OF(AC_ATTRIBUTE) DEFINE_STACK_OF(AC_ATT_HOLDER) DEFINE_STACK_OF(AC_FULL_ATTRIBUTES) X509V3_EXT_METHOD * VOMSAttribute_auth_x509v3_ext_meth(); X509V3_EXT_METHOD * VOMSAttribute_avail_x509v3_ext_meth(); X509V3_EXT_METHOD * VOMSAttribute_targets_x509v3_ext_meth(); X509V3_EXT_METHOD * VOMSAttribute_acseq_x509v3_ext_meth(); X509V3_EXT_METHOD * VOMSAttribute_certseq_x509v3_ext_meth(); X509V3_EXT_METHOD * VOMSAttribute_attribs_x509v3_ext_meth(); } // namespace ArcCredential #endif /// \endcond nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/PasswordSource.h0000644000000000000000000000012312306304733024723 xustar000000000000000027 mtime=1394182619.331424 27 atime=1513200574.629703 29 ctime=1513200659.12873683 nordugrid-arc-5.4.2/src/hed/libs/credential/PasswordSource.h0000644000175000002070000000333412306304733024774 0ustar00mockbuildmock00000000000000#ifndef __ARC_PASSWORD_SOURCE_H__ #define __ARC_PASSWORD_SOURCE_H__ #include namespace Arc { /** \addtogroup credential * @{ */ /// Obtain password from some source /** * Pure virtual class meant to be extended with a specific mechanism to obtain * password. * \since Added in 4.0.0. **/ class PasswordSource { public: typedef enum { /// No password is returned. Authoritative. Not same as empty password. NO_PASSWORD = 0, /// Password is provided. Authoritative. PASSWORD = 1, /// Request to cancel procedure which need password. CANCEL = 2 } Result; virtual Result Get(std::string& password, int minsize, int maxsize) = 0; virtual ~PasswordSource(void) { }; }; /// No password class PasswordSourceNone: public PasswordSource { public: virtual Result Get(std::string& password, int minsize, int maxsize); }; /// Obtain password from a string class PasswordSourceString: public PasswordSource { private: std::string password_; public: PasswordSourceString(const std::string& password); virtual Result Get(std::string& password, int minsize, int maxsize); }; /// Obtain password from stream class PasswordSourceStream: public PasswordSource { private: std::istream* password_; public: PasswordSourceStream(std::istream* password); virtual Result Get(std::string& password, int minsize, int maxsize); }; /// Obtain password through OpenSSL user interface class PasswordSourceInteractive: public PasswordSource { private: std::string prompt_; bool verify_; public: PasswordSourceInteractive(const std::string& prompt, bool verify); virtual Result Get(std::string& password, int minsize, int maxsize); }; /** @} */ } // namespace Arc #endif // __ARC_PASSWORD_SOURCE_H__ nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/PasswordSource.cpp0000644000000000000000000000012412223224441025252 xustar000000000000000027 mtime=1380788513.247154 27 atime=1513200574.610703 30 ctime=1513200659.136736928 nordugrid-arc-5.4.2/src/hed/libs/credential/PasswordSource.cpp0000644000175000002070000000730212223224441025321 0ustar00mockbuildmock00000000000000#include #include #include #include #include "PasswordSource.h" namespace Arc { static int ssl_err_cb(const char *str, size_t, void *u) { std::cerr << "OpenSSL error string: " << str << std::endl; return 1; } PasswordSource::Result PasswordSourceNone::Get(std::string& password, int minsize, int maxsize) { return NO_PASSWORD; } PasswordSourceString::PasswordSourceString(const std::string& password): password_(password) { } PasswordSource::Result PasswordSourceString::Get(std::string& password, int minsize, int maxsize) { password = password_; return PASSWORD; } PasswordSourceStream::PasswordSourceStream(std::istream* password): password_(password) { } PasswordSource::Result PasswordSourceStream::Get(std::string& password, int minsize, int maxsize) { if(!password_) return NO_PASSWORD; std::getline(*password_, password); return PASSWORD; } PasswordSourceInteractive::PasswordSourceInteractive(const std::string& prompt, bool verify): prompt_(prompt),verify_(verify) { } PasswordSource::Result PasswordSourceInteractive::Get(std::string& password, int minsize, int maxsize) { UI *ui = UI_new(); if (!ui) return CANCEL; int res = 0; int ok = 0; char *buf1 = NULL; char *buf2 = NULL; int ui_flags = 0; char *prompt = NULL; int bufsiz = maxsize; if(bufsiz <= 0) bufsiz = 256; ++bufsiz; // for \0 prompt = UI_construct_prompt(ui, "pass phrase", prompt_.c_str()); ui_flags |= UI_INPUT_FLAG_DEFAULT_PWD; // UI_ctrl(ui, UI_CTRL_PRINT_ERRORS, 1, 0, 0); if (ok >= 0) { ok = -1; if((buf1 = (char *)OPENSSL_malloc(bufsiz)) != NULL) { memset(buf1,0,(unsigned int)bufsiz); ok = UI_add_input_string(ui,prompt,ui_flags,buf1,(minsize>0)?minsize:0,bufsiz-1); } } if (ok >= 0 && verify_) { ok = -1; if((buf2 = (char *)OPENSSL_malloc(bufsiz)) != NULL) { memset(buf2,0,(unsigned int)bufsiz); ok = UI_add_verify_string(ui,prompt,ui_flags,buf2,(minsize>0)?minsize:0,bufsiz-1,buf1); } } if (ok >= 0) do { ok = UI_process(ui); if(ok == -2) break; // Abort request if(ok == -1) { // Password error unsigned long errcode = ERR_get_error(); const char* errstr = ERR_reason_error_string(errcode); if(errstr == NULL) { std::cerr << "Password input error - code " << errcode << std::endl; } else if(strstr(errstr,"result too small")) { std::cerr << "Password is too short, need at least " << minsize << " charcters" << std::endl; } else if(strstr(errstr,"result too large")) { std::cerr << "Password is too long, need at most " << bufsiz-1 << " characters" << std::endl; } else { std::cerr << errstr << std::endl; }; }; } while (ok < 0 && UI_ctrl(ui, UI_CTRL_IS_REDOABLE, 0, 0, 0)); if (buf2){ memset(buf2,0,(unsigned int)bufsiz); OPENSSL_free(buf2); } if (ok >= 0) { if(buf1) { buf1[bufsiz-1] = 0; res = strlen(buf1); password.assign(buf1,res); } } if (buf1){ memset(buf1,0,(unsigned int)bufsiz); OPENSSL_free(buf1); } if (ok == -1){ std::cerr << "User interface error" << std::endl; ERR_print_errors_cb(&ssl_err_cb, NULL); memset((void*)password.c_str(),0,(unsigned int)password.length()); password.resize(0); res = 0; } else if (ok == -2) { memset((void*)password.c_str(),0,(unsigned int)password.length()); password.resize(0); res = 0; } UI_free(ui); OPENSSL_free(prompt); return (res>0)?PasswordSource::PASSWORD:PasswordSource::CANCEL; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/listfunc.cpp0000644000000000000000000000012413103157342024121 xustar000000000000000027 mtime=1494015714.810493 27 atime=1513200574.629703 30 ctime=1513200659.138736952 nordugrid-arc-5.4.2/src/hed/libs/credential/listfunc.cpp0000644000175000002070000000141713103157342024171 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include namespace ArcCredential { char **listadd(char **vect, char *data, int size) { int i = 0; char **newvect; if (!data || (size <= 0)) return NULL; if (vect) while (vect[i++]) ; else i=1; if ((newvect = (char **)malloc((i+1)*size))) { if (vect) { memcpy(newvect, vect, (size*(i-1))); newvect[i-1] = data; newvect[i] = NULL; free(vect); } else { newvect[0] = data; newvect[1] = NULL; } return newvect; } return NULL; } void listfree(char **vect, void (*f)(void *)) { char **tmp = vect; if (tmp) { int i = 0; while (tmp[i]) f(tmp[i++]); free(vect); } } } // namespace ArcCredential nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/NSSUtil.h0000644000000000000000000000012412675602216023250 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.612703 30 ctime=1513200659.131736867 nordugrid-arc-5.4.2/src/hed/libs/credential/NSSUtil.h0000644000175000002070000000435012675602216023317 0ustar00mockbuildmock00000000000000#ifndef __ARC_NSSUTIL_H__ #define __ARC_NSSUTIL_H__ #include #include #include /// Code for handling Network Security Services (NSS) credentials. namespace ArcAuthNSS { /// NSS certificate information. struct certInfo { std::string certname; std::string subject_dn; std::string issuer_dn; unsigned long serial; Arc::Time start; Arc::Time end; }; /** * Initializes nss library * @param configdir full path to the nss db */ bool nssInit(const std::string& configdir); bool nssExportCertificate(const std::string& certname, const std::string& certfile); bool nssOutputPKCS12(const std::string certname, char* outfile, char* slotpw, char* p12pw); bool nssOutputPKCS12(const std::string certname, char* outfile, Arc::PasswordSource& passphrase, Arc::PasswordSource& p12passphrase); bool nssGenerateCSR(const std::string& privkey_name, const std::string& dn, const char* slotpw, const std::string& outfile, std::string& privk_str, bool ascii = true); bool nssGenerateCSR(const std::string& privkey_name, const std::string& dn, Arc::PasswordSource& passphrase, const std::string& outfile, std::string& privk_str, bool ascii = true); void nssListUserCertificatesInfo(std::list& certInfolist); bool nssCreateCert(const std::string& csrfile, const std::string& issuername, const char* passwd, const int duration, const std::string& vomsacseq, std::string& outfile, bool ascii = true); bool nssImportCertAndPrivateKey(char* slotpw, const std::string& keyfile, const std::string& keyname, const std::string& certfile, const std::string& certname, const char* trusts = NULL, bool ascii = true); bool nssImportCertAndPrivateKey(Arc::PasswordSource& passphrase, const std::string& keyfile, const std::string& keyname, const std::string& certfile, const std::string& certname, const char* trusts = NULL, bool ascii = true); bool nssImportCert(char* slotpw, const std::string& certfile, const std::string& name, const char* trusts = NULL, bool ascii = true); bool nssImportCert(Arc::PasswordSource& passphrase, const std::string& certfile, const std::string& name, const char* trusts = NULL, bool ascii = true); } #endif /*__ARC_NSSUTIL_H__*/ nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/listfunc.h0000644000000000000000000000012413103157342023566 xustar000000000000000027 mtime=1494015714.810493 27 atime=1513200574.611703 30 ctime=1513200659.139736964 nordugrid-arc-5.4.2/src/hed/libs/credential/listfunc.h0000644000175000002070000000206613103157342023637 0ustar00mockbuildmock00000000000000/**Borrow the code about list operation from VOMS, specially for list of Attribute Certificate. * Because the AC** operator is required for i2d_AC() and d2i_AC() method, and implicitly required by OpenSSL code, * some other solution like std::list is not suitable here. */ /**The listfunc.h and listfunc.h are introduced from code written by VOMS project, *so here the original license follows. */ #ifndef ARC_LISTFUNC_H #define ARC_LISTFUNC_H namespace ArcCredential { #include typedef void (*freefn)(void *); /* Merges element data to NULL-terminated array vect. Returns pointer to new merged array. Old vect array is destroyed. size is the size of array element and for safety should always be sizeof(char*) */ extern char **listadd(char **vect, char *data, int size); /* Frees memory associated with array vect all data which its elements are pointing to. For freeing pointed data supplied function f is used. On exit vect array is destroyed. */ extern void listfree(char **vect, freefn f); } // namespace ArcCredential #endif nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/nssprivkeyinfocodec.cpp0000644000000000000000000000012412675602216026371 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.627703 30 ctime=1513200659.143737013 nordugrid-arc-5.4.2/src/hed/libs/credential/nssprivkeyinfocodec.cpp0000644000175000002070000002700512675602216026442 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "nssprivkeyinfocodec.h" // This file manually encodes and decodes RSA private keys using PrivateKeyInfo // from PKCS #8 and RSAPrivateKey from PKCS #1. These structures are: // // PrivateKeyInfo ::= SEQUENCE { // version Version, // privateKeyAlgorithm PrivateKeyAlgorithmIdentifier, // privateKey PrivateKey, // attributes [0] IMPLICIT Attributes OPTIONAL // } // // RSAPrivateKey ::= SEQUENCE { // version Version, // modulus INTEGER, // publicExponent INTEGER, // privateExponent INTEGER, // prime1 INTEGER, // prime2 INTEGER, // exponent1 INTEGER, // exponent2 INTEGER, // coefficient INTEGER // } namespace ArcAuthNSS { const uint8 PrivateKeyInfoCodec::kRsaAlgorithmIdentifier[] = { 0x30, 0x0D, 0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x01, 0x05, 0x00 }; PrivateKeyInfoCodec::PrivateKeyInfoCodec(bool big_endian) : big_endian_(big_endian) {} PrivateKeyInfoCodec::~PrivateKeyInfoCodec() {} bool PrivateKeyInfoCodec::Export(std::vector* output) { std::list content; // Version (always zero) uint8 version = 0; PrependInteger(coefficient_, &content); PrependInteger(exponent2_, &content); PrependInteger(exponent1_, &content); PrependInteger(prime2_, &content); PrependInteger(prime1_, &content); PrependInteger(private_exponent_, &content); PrependInteger(public_exponent_, &content); PrependInteger(modulus_, &content); PrependInteger(&version, 1, &content); PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content); PrependTypeHeaderAndLength(kOctetStringTag, content.size(), &content); // RSA algorithm OID for (size_t i = sizeof(kRsaAlgorithmIdentifier); i > 0; --i) content.push_front(kRsaAlgorithmIdentifier[i - 1]); PrependInteger(&version, 1, &content); PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content); // Copy everying into the output. output->reserve(content.size()); output->assign(content.begin(), content.end()); return true; } bool PrivateKeyInfoCodec::ExportPublicKeyInfo(std::vector* output) { // Create a sequence with the modulus (n) and public exponent (e). std::vector bit_string; if (!ExportPublicKey(&bit_string)) return false; // Add the sequence as the contents of a bit string. std::list content; PrependBitString(&bit_string[0], static_cast(bit_string.size()), &content); // Add the RSA algorithm OID. for (size_t i = sizeof(kRsaAlgorithmIdentifier); i > 0; --i) content.push_front(kRsaAlgorithmIdentifier[i - 1]); // Finally, wrap everything in a sequence. PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content); // Copy everything into the output. output->reserve(content.size()); output->assign(content.begin(), content.end()); return true; } bool PrivateKeyInfoCodec::ExportPublicKey(std::vector* output) { // Create a sequence with the modulus (n) and public exponent (e). std::list content; PrependInteger(&public_exponent_[0], static_cast(public_exponent_.size()), &content); PrependInteger(&modulus_[0], static_cast(modulus_.size()), &content); PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content); // Copy everything into the output. output->reserve(content.size()); output->assign(content.begin(), content.end()); return true; } bool PrivateKeyInfoCodec::Import(const std::vector& input) { if (input.empty()) { return false; } // Parse the private key info up to the public key values, ignoring // the subsequent private key values. uint8* src = const_cast(&input.front()); uint8* end = src + input.size(); if (!ReadSequence(&src, end) || !ReadVersion(&src, end) || !ReadAlgorithmIdentifier(&src, end) || !ReadTypeHeaderAndLength(&src, end, kOctetStringTag, NULL) || !ReadSequence(&src, end) || !ReadVersion(&src, end) || !ReadInteger(&src, end, &modulus_)) return false; int mod_size = modulus_.size(); if(mod_size % 2 != 0) return false;; int primes_size = mod_size / 2; if (!ReadIntegerWithExpectedSize(&src, end, 4, &public_exponent_) || !ReadIntegerWithExpectedSize(&src, end, mod_size, &private_exponent_) || !ReadIntegerWithExpectedSize(&src, end, primes_size, &prime1_) || !ReadIntegerWithExpectedSize(&src, end, primes_size, &prime2_) || !ReadIntegerWithExpectedSize(&src, end, primes_size, &exponent1_) || !ReadIntegerWithExpectedSize(&src, end, primes_size, &exponent2_) || !ReadIntegerWithExpectedSize(&src, end, primes_size, &coefficient_)) return false; if(src != end) return false; return true; } void PrivateKeyInfoCodec::PrependInteger(const std::vector& in, std::list* out) { uint8* ptr = const_cast(&in.front()); PrependIntegerImpl(ptr, in.size(), out, big_endian_); } // Helper to prepend an ASN.1 integer. void PrivateKeyInfoCodec::PrependInteger(uint8* val, int num_bytes, std::list* data) { PrependIntegerImpl(val, num_bytes, data, big_endian_); } void PrivateKeyInfoCodec::PrependIntegerImpl(uint8* val, int num_bytes, std::list* data, bool big_endian) { // Reverse input if little-endian. std::vector tmp; if (!big_endian) { tmp.assign(val, val + num_bytes); std::reverse(tmp.begin(), tmp.end()); val = &tmp.front(); } // ASN.1 integers are unpadded byte arrays, so skip any null padding bytes // from the most-significant end of the integer. int start = 0; while (start < (num_bytes - 1) && val[start] == 0x00) { start++; num_bytes--; } PrependBytes(val, start, num_bytes, data); // ASN.1 integers are signed. To encode a positive integer whose sign bit // (the most significant bit) would otherwise be set and make the number // negative, ASN.1 requires a leading null byte to force the integer to be // positive. uint8 front = data->front(); if ((front & 0x80) != 0) { data->push_front(0x00); num_bytes++; } PrependTypeHeaderAndLength(kIntegerTag, num_bytes, data); } bool PrivateKeyInfoCodec::ReadInteger(uint8** pos, uint8* end, std::vector* out) { return ReadIntegerImpl(pos, end, out, big_endian_); } bool PrivateKeyInfoCodec::ReadIntegerWithExpectedSize(uint8** pos, uint8* end, size_t expected_size, std::vector* out) { std::vector temp; if (!ReadIntegerImpl(pos, end, &temp, true)) // Big-Endian return false; int pad = expected_size - temp.size(); int index = 0; if (out->size() == expected_size + 1) { if(out->front() != 0x00) return false;; pad++; index++; } else { if(!(out->size() <= expected_size)) return false; } out->insert(out->end(), pad, 0x00); out->insert(out->end(), temp.begin(), temp.end()); // Reverse output if little-endian. if (!big_endian_) std::reverse(out->begin(), out->end()); return true; } bool PrivateKeyInfoCodec::ReadIntegerImpl(uint8** pos, uint8* end, std::vector* out, bool big_endian) { uint32 length = 0; if (!ReadTypeHeaderAndLength(pos, end, kIntegerTag, &length) || !length) return false; // The first byte can be zero to force positiveness. We can ignore this. if (**pos == 0x00) { ++(*pos); --length; } if (length) out->insert(out->end(), *pos, (*pos) + length); (*pos) += length; // Reverse output if little-endian. if (!big_endian) std::reverse(out->begin(), out->end()); return true; } void PrivateKeyInfoCodec::PrependBytes(uint8* val, int start, int num_bytes, std::list* data) { while (num_bytes > 0) { --num_bytes; data->push_front(val[start + num_bytes]); } } void PrivateKeyInfoCodec::PrependLength(size_t size, std::list* data) { // The high bit is used to indicate whether additional octets are needed to // represent the length. if (size < 0x80) { data->push_front(static_cast(size)); } else { uint8 num_bytes = 0; while (size > 0) { data->push_front(static_cast(size & 0xFF)); size >>= 8; num_bytes++; } if(!(num_bytes <= 4)) std::cerr<<"num of bytes if bigger than 4"<push_front(0x80 | num_bytes); } } void PrivateKeyInfoCodec::PrependTypeHeaderAndLength(uint8 type, uint32 length, std::list* output) { PrependLength(length, output); output->push_front(type); } void PrivateKeyInfoCodec::PrependBitString(uint8* val, int num_bytes, std::list* output) { // Start with the data. PrependBytes(val, 0, num_bytes, output); // Zero unused bits. output->push_front(0); // Add the length. PrependLength(num_bytes + 1, output); // Finally, add the bit string tag. output->push_front((uint8) kBitStringTag); } bool PrivateKeyInfoCodec::ReadLength(uint8** pos, uint8* end, uint32* result) { if(!(*pos < end)) return false; int length = 0; // If the MSB is not set, the length is just the byte itself. if (!(**pos & 0x80)) { length = **pos; (*pos)++; } else { // Otherwise, the lower 7 indicate the length of the length. int length_of_length = **pos & 0x7F; if(!(length_of_length <= 4)) return false; (*pos)++; if(!(*pos + length_of_length < end)) return false; length = 0; for (int i = 0; i < length_of_length; ++i) { length <<= 8; length |= **pos; (*pos)++; } } if(!(*pos + length <= end)) return false; if (result) *result = length; return true; } bool PrivateKeyInfoCodec::ReadTypeHeaderAndLength(uint8** pos, uint8* end, uint8 expected_tag, uint32* length) { if(!(*pos < end)) return false; if(!(**pos == expected_tag)) return false; (*pos)++; return ReadLength(pos, end, length); } bool PrivateKeyInfoCodec::ReadSequence(uint8** pos, uint8* end) { return ReadTypeHeaderAndLength(pos, end, kSequenceTag, NULL); } bool PrivateKeyInfoCodec::ReadAlgorithmIdentifier(uint8** pos, uint8* end) { if(!(*pos + sizeof(kRsaAlgorithmIdentifier) < end)) return false; if(!(memcmp(*pos, kRsaAlgorithmIdentifier, sizeof(kRsaAlgorithmIdentifier)) == 0)) return false; (*pos) += sizeof(kRsaAlgorithmIdentifier); return true; } bool PrivateKeyInfoCodec::ReadVersion(uint8** pos, uint8* end) { uint32 length = 0; if (!ReadTypeHeaderAndLength(pos, end, kIntegerTag, &length)) return false; // The version should be zero. for (uint32 i = 0; i < length; ++i) { if(!(**pos == 0x00)) return false; (*pos)++; } return true; } } nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/nssprivkeyinfocodec.h0000644000000000000000000000012412675602216026036 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.611703 30 ctime=1513200659.132736879 nordugrid-arc-5.4.2/src/hed/libs/credential/nssprivkeyinfocodec.h0000644000175000002070000001351612675602216026111 0ustar00mockbuildmock00000000000000//The following code is for for serializing and deserializing // PKCS #8 PrivateKeyInfo and PublicKeyInfo //This part of code is introduced from chromium, therefore the BSD license applies /// \cond #include #include namespace ArcAuthNSS { typedef unsigned int uint32; typedef int int32; typedef unsigned short uint16; typedef short int16; typedef unsigned char uint8; typedef signed char int8; /* struct SECKEYPrivateKeyStr; struct SECKEYPublicKeyStr; */ class PrivateKeyInfoCodec { public: // ASN.1 encoding of the AlgorithmIdentifier from PKCS #8. static const uint8 kRsaAlgorithmIdentifier[]; // ASN.1 tags for some types we use. static const uint8 kBitStringTag = 0x03; static const uint8 kIntegerTag = 0x02; static const uint8 kNullTag = 0x05; static const uint8 kOctetStringTag = 0x04; static const uint8 kSequenceTag = 0x30; // |big_endian| here specifies the byte-significance of the integer components // that will be parsed & serialized (modulus(), etc...) during Import(), // Export() and ExportPublicKeyInfo() -- not the ASN.1 DER encoding of the // PrivateKeyInfo/PublicKeyInfo (which is always big-endian). explicit PrivateKeyInfoCodec(bool big_endian); ~PrivateKeyInfoCodec(); // Exports the contents of the integer components to the ASN.1 DER encoding // of the PrivateKeyInfo structure to |output|. bool Export(std::vector* output); // Exports the contents of the integer components to the ASN.1 DER encoding // of the PublicKeyInfo structure to |output|. bool ExportPublicKeyInfo(std::vector* output); // Exports the contents of the integer components to the ASN.1 DER encoding // of the RSAPublicKey structure to |output|. bool ExportPublicKey(std::vector* output); // Parses the ASN.1 DER encoding of the PrivateKeyInfo structure in |input| // and populates the integer components with |big_endian_| byte-significance. // IMPORTANT NOTE: This is currently *not* security-approved for importing // keys from unstrusted sources. bool Import(const std::vector& input); // Accessors to the contents of the integer components of the PrivateKeyInfo structure. std::vector* modulus() { return &modulus_; }; std::vector* public_exponent() { return &public_exponent_; }; std::vector* private_exponent() { return &private_exponent_; }; std::vector* prime1() { return &prime1_; }; std::vector* prime2() { return &prime2_; }; std::vector* exponent1() { return &exponent1_; }; std::vector* exponent2() { return &exponent2_; }; std::vector* coefficient() { return &coefficient_; }; private: // Utility wrappers for PrependIntegerImpl that use the class's |big_endian_| // value. void PrependInteger(const std::vector& in, std::list* out); void PrependInteger(uint8* val, int num_bytes, std::list* data); // Prepends the integer stored in |val| - |val + num_bytes| with |big_endian| // byte-significance into |data| as an ASN.1 integer. void PrependIntegerImpl(uint8* val, int num_bytes, std::list* data, bool big_endian); // Utility wrappers for ReadIntegerImpl that use the class's |big_endian_| // value. bool ReadInteger(uint8** pos, uint8* end, std::vector* out); bool ReadIntegerWithExpectedSize(uint8** pos, uint8* end, size_t expected_size, std::vector* out); // Reads an ASN.1 integer from |pos|, and stores the result into |out| with // |big_endian| byte-significance. bool ReadIntegerImpl(uint8** pos, uint8* end, std::vector* out, bool big_endian); // Prepends the integer stored in |val|, starting a index |start|, for // |num_bytes| bytes onto |data|. void PrependBytes(uint8* val, int start, int num_bytes, std::list* data); // Helper to prepend an ASN.1 length field. void PrependLength(size_t size, std::list* data); // Helper to prepend an ASN.1 type header. void PrependTypeHeaderAndLength(uint8 type, uint32 length, std::list* output); // Helper to prepend an ASN.1 bit string void PrependBitString(uint8* val, int num_bytes, std::list* output); // Read an ASN.1 length field. This also checks that the length does not // extend beyond |end|. bool ReadLength(uint8** pos, uint8* end, uint32* result); // Read an ASN.1 type header and its length. bool ReadTypeHeaderAndLength(uint8** pos, uint8* end, uint8 expected_tag, uint32* length); // Read an ASN.1 sequence declaration. This consumes the type header and // length field, but not the contents of the sequence. bool ReadSequence(uint8** pos, uint8* end); // Read the RSA AlgorithmIdentifier. bool ReadAlgorithmIdentifier(uint8** pos, uint8* end); // Read one of the two version fields in PrivateKeyInfo. bool ReadVersion(uint8** pos, uint8* end); // The byte-significance of the stored components (modulus, etc..). bool big_endian_; // Component integers of the PrivateKeyInfo std::vector modulus_; std::vector public_exponent_; std::vector private_exponent_; std::vector prime1_; std::vector prime2_; std::vector exponent1_; std::vector exponent2_; std::vector coefficient_; }; } /// \endcond nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/VOMSAttribute.cpp0000644000000000000000000000012413103157342024742 xustar000000000000000027 mtime=1494015714.810493 27 atime=1513200574.585702 30 ctime=1513200659.140736977 nordugrid-arc-5.4.2/src/hed/libs/credential/VOMSAttribute.cpp0000644000175000002070000003440713103157342025017 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "VOMSAttribute.h" #include "VOMSUtil.h" namespace ArcCredential { IMPLEMENT_ASN1_FUNCTIONS(AC_DIGEST) ASN1_SEQUENCE(AC_DIGEST) = { ASN1_SIMPLE(AC_DIGEST, type, ASN1_ENUMERATED), ASN1_SIMPLE(AC_DIGEST, oid, ASN1_OBJECT), ASN1_SIMPLE(AC_DIGEST, algor, X509_ALGOR), ASN1_SIMPLE(AC_DIGEST, digest, ASN1_BIT_STRING) } ASN1_SEQUENCE_END(AC_DIGEST) IMPLEMENT_ASN1_FUNCTIONS(AC_IS) ASN1_SEQUENCE(AC_IS) = { ASN1_SIMPLE(AC_IS, issuer, GENERAL_NAMES), ASN1_SIMPLE(AC_IS, serial, ASN1_INTEGER), ASN1_IMP_OPT(AC_IS, uid, ASN1_BIT_STRING, V_ASN1_BIT_STRING) } ASN1_SEQUENCE_END(AC_IS) IMPLEMENT_ASN1_FUNCTIONS(AC_FORM) ASN1_SEQUENCE(AC_FORM) = { ASN1_SIMPLE(AC_FORM, names, GENERAL_NAMES), ASN1_IMP_OPT(AC_FORM, is, AC_IS, 0), ASN1_IMP_OPT(AC_FORM, digest, AC_DIGEST, 1) } ASN1_SEQUENCE_END(AC_FORM) IMPLEMENT_ASN1_FUNCTIONS(AC_ACI) ASN1_SEQUENCE(AC_ACI) = { ASN1_IMP_OPT(AC_ACI, form, AC_FORM, 0) } ASN1_SEQUENCE_END(AC_ACI) IMPLEMENT_ASN1_FUNCTIONS(AC_HOLDER) ASN1_SEQUENCE(AC_HOLDER) = { ASN1_IMP_OPT(AC_HOLDER, baseid, AC_IS, 0), ASN1_IMP_OPT(AC_HOLDER, name, GENERAL_NAMES, 1), ASN1_IMP_OPT(AC_HOLDER, digest, AC_DIGEST, 2) } ASN1_SEQUENCE_END(AC_HOLDER) IMPLEMENT_ASN1_FUNCTIONS(AC_VAL) ASN1_SEQUENCE(AC_VAL) = { ASN1_SIMPLE(AC_VAL, notBefore, ASN1_GENERALIZEDTIME), ASN1_SIMPLE(AC_VAL, notAfter, ASN1_GENERALIZEDTIME) } ASN1_SEQUENCE_END(AC_VAL) IMPLEMENT_ASN1_FUNCTIONS(AC_IETFATTR) ASN1_SEQUENCE(AC_IETFATTR) = { ASN1_IMP_SEQUENCE_OF_OPT(AC_IETFATTR, names, GENERAL_NAME, 0), ASN1_SEQUENCE_OF(AC_IETFATTR, values, ASN1_ANY) } ASN1_SEQUENCE_END(AC_IETFATTR) /*ASN1_IMP_OPT(AC_IETFATTR, names, GENERAL_NAMES, 0),*/ /* ASN1_SEQUENCE_OF(AC_IETFATTR, values, AC_IETFATTRVAL) */ IMPLEMENT_ASN1_FUNCTIONS(AC_TARGET) ASN1_SEQUENCE(AC_TARGET) = { ASN1_EXP_OPT(AC_TARGET, name, GENERAL_NAME, 0), ASN1_EXP_OPT(AC_TARGET, group, GENERAL_NAME, 1), ASN1_EXP_OPT(AC_TARGET, cert, AC_IS, 2) } ASN1_SEQUENCE_END(AC_TARGET) IMPLEMENT_ASN1_FUNCTIONS(AC_TARGETS) ASN1_SEQUENCE(AC_TARGETS) = { ASN1_SEQUENCE_OF(AC_TARGETS, targets, AC_TARGET) } ASN1_SEQUENCE_END(AC_TARGETS) IMPLEMENT_ASN1_FUNCTIONS(AC_ATTR) ASN1_SEQUENCE(AC_ATTR) = { ASN1_SIMPLE(AC_ATTR, type, ASN1_OBJECT), ASN1_SET_OF_OPT(AC_ATTR, ietfattr, AC_IETFATTR) /* if (!i2t_ASN1_OBJECT(text,999,a->type)) return 0; else if (!((strcmp(text, "idacagroup") == 0) || (strcmp(text,"idatcap") == 0))) return 0; ASN1_OBJECT * type; int get_type; STACK_OF(AC_IETFATTR) *ietfattr; STACK_OF(AC_FULL_ATTRIBUTES) *fullattributes; */ } ASN1_SEQUENCE_END(AC_ATTR) IMPLEMENT_ASN1_FUNCTIONS(AC_INFO) ASN1_SEQUENCE(AC_INFO) = { ASN1_SIMPLE(AC_INFO, version, ASN1_INTEGER), ASN1_SIMPLE(AC_INFO, holder, AC_HOLDER), ASN1_IMP_OPT(AC_INFO, form, AC_FORM, 0), /*V_ASN1_SEQUENCE*/ ASN1_SIMPLE(AC_INFO, alg, X509_ALGOR), ASN1_SIMPLE(AC_INFO, serial, ASN1_INTEGER), ASN1_SIMPLE(AC_INFO, validity, AC_VAL), ASN1_SEQUENCE_OF(AC_INFO, attrib, AC_ATTR), ASN1_IMP_OPT(AC_INFO, id, ASN1_BIT_STRING, V_ASN1_BIT_STRING), ASN1_SEQUENCE_OF_OPT(AC_INFO, exts, X509_EXTENSION) } ASN1_SEQUENCE_END(AC_INFO) IMPLEMENT_ASN1_FUNCTIONS(AC) ASN1_SEQUENCE(AC) = { ASN1_SIMPLE(AC, acinfo, AC_INFO), ASN1_SIMPLE(AC, sig_alg, X509_ALGOR), ASN1_SIMPLE(AC, signature, ASN1_BIT_STRING) } ASN1_SEQUENCE_END(AC) IMPLEMENT_ASN1_FUNCTIONS(AC_SEQ) ASN1_SEQUENCE(AC_SEQ) = { ASN1_SEQUENCE_OF(AC_SEQ, acs, AC) } ASN1_SEQUENCE_END(AC_SEQ) IMPLEMENT_ASN1_FUNCTIONS(AC_CERTS) ASN1_SEQUENCE(AC_CERTS) = { ASN1_SEQUENCE_OF(AC_CERTS, stackcert, X509) } ASN1_SEQUENCE_END(AC_CERTS) IMPLEMENT_ASN1_FUNCTIONS(AC_ATTRIBUTE) ASN1_SEQUENCE(AC_ATTRIBUTE) = { ASN1_SIMPLE(AC_ATTRIBUTE, name, ASN1_OCTET_STRING), ASN1_SIMPLE(AC_ATTRIBUTE, value, ASN1_OCTET_STRING), ASN1_SIMPLE(AC_ATTRIBUTE, qualifier, ASN1_OCTET_STRING) } ASN1_SEQUENCE_END(AC_ATTRIBUTE) IMPLEMENT_ASN1_FUNCTIONS(AC_ATT_HOLDER) ASN1_SEQUENCE(AC_ATT_HOLDER) = { ASN1_SIMPLE(AC_ATT_HOLDER, grantor, GENERAL_NAMES), ASN1_SEQUENCE_OF(AC_ATT_HOLDER, attributes, AC_ATTRIBUTE) } ASN1_SEQUENCE_END(AC_ATT_HOLDER) IMPLEMENT_ASN1_FUNCTIONS(AC_FULL_ATTRIBUTES) ASN1_SEQUENCE(AC_FULL_ATTRIBUTES) = { ASN1_SEQUENCE_OF(AC_FULL_ATTRIBUTES, providers, AC_ATT_HOLDER) } ASN1_SEQUENCE_END(AC_FULL_ATTRIBUTES) static char *norep() { static char buffer[] = ""; return buffer; } /* char *acseq_i2s(struct v3_ext_method*, void* data) { AC **aclist = NULL; AC *item = NULL; AC_SEQ *seq = (AC_SEQ*)data; if(!seq) return NULL; int num = sk_AC_num(seq->acs); if(num > 0) aclist = (AC **)OPENSSL_malloc(num * sizeof(AC*)); for (int i =0; i < num; i++) { item = sk_AC_value(seq->acs, i); // AC itself is not duplicated aclist[i] = item; } if(aclist == NULL) return NULL; return (char *)aclist; // return norep(); } */ char *acseq_i2s(struct v3_ext_method*, void* data) { AC_SEQ* acseq = NULL; acseq = (AC_SEQ *)data; if(!acseq) return NULL; std::string encoded_acseq; AC *item = NULL; int num = sk_AC_num(acseq->acs); for (int i =0; i < num; i++) { item = sk_AC_value(acseq->acs, i); unsigned int len = i2d_AC(item, NULL); unsigned char *tmp = (unsigned char *)OPENSSL_malloc(len); std::string ac_str; if(tmp) { unsigned char *ttmp = tmp; i2d_AC(item, &ttmp); //ac_str = std::string((char *)tmp, len); ac_str.append((const char*)tmp, len); free(tmp); } // encode the AC string int size; char* enc = NULL; std::string encodedac; enc = Arc::VOMSEncode((char*)(ac_str.c_str()), ac_str.length(), &size); if (enc != NULL) { encodedac.append(enc, size); free(enc); enc = NULL; } encoded_acseq.append(VOMS_AC_HEADER).append("\n"); encoded_acseq.append(encodedac).append("\n"); encoded_acseq.append(VOMS_AC_TRAILER).append("\n"); } char* ret = NULL; int len = encoded_acseq.length(); if(len) { ret = (char*)OPENSSL_malloc(len + 1); memset(ret, 0, len + 1); memcpy(ret, encoded_acseq.c_str(), len); /* ret = (char*)OPENSSL_malloc(len); strncpy(ret, encoded_acseq.c_str(), len); */ } return (char *) ret; } char *targets_i2s(struct v3_ext_method*, void*) { return norep(); } char *certs_i2s(struct v3_ext_method*, void*) { return norep(); } char *null_i2s(struct v3_ext_method*, void*) { return norep(); } char *attributes_i2s(struct v3_ext_method*, void*) { return norep(); } /* void *acseq_s2i(struct v3_ext_method*, struct v3_ext_ctx*, char *data) { AC **list = (AC **)data; AC_SEQ *a; if (!list) return NULL; a = AC_SEQ_new(); while (*list) sk_AC_push(a->acs, *list++); return (void *)a; } */ void *acseq_s2i(struct v3_ext_method*, struct v3_ext_ctx*, char *data) { AC_SEQ* acseq = NULL; AC** aclist = NULL; std::string acseq_str; std::string ac_str; if(data == NULL) return NULL; acseq_str = data; std::string::size_type pos1 = 0, pos2 = 0; while(pos1 < acseq_str.length()) { pos1 = acseq_str.find(VOMS_AC_HEADER, pos1); if(pos1 == std::string::npos) break; pos1 = acseq_str.find_first_of("\r\n", pos1); if(pos1 == std::string::npos) break; pos2 = acseq_str.find(VOMS_AC_TRAILER, pos1); if(pos2 == std::string::npos) break; ac_str.clear(); ac_str = acseq_str.substr(pos1+1, (pos2-1) - (pos1+1)); pos2 = acseq_str.find_first_of("\r\n", pos2); if(pos2 == std::string::npos) pos2 = acseq_str.length(); pos1 = pos2+1; // decode the AC string int size; char* dec = NULL; std::string decodedac; dec = Arc::VOMSDecode((char*)(ac_str.c_str()), ac_str.length(), &size); if (dec != NULL) { decodedac.append(dec, size); free(dec); dec = NULL; } // TODO: is the ac order required? std::string acorder; Arc::addVOMSAC(aclist, acorder, decodedac); } if (!aclist) return NULL; AC** ac = aclist; acseq = AC_SEQ_new(); if(acseq) { while (*ac) { if(!sk_AC_push(acseq->acs, *ac)) break; ++ac; } } if((*ac) || (!acseq)) { // Not (all) ACs were transfered - error while (*ac) { AC_free(*ac); ++ac; }; free(aclist); if(acseq) AC_SEQ_free(acseq); return NULL; } free(aclist); return (void *)acseq; } void *targets_s2i(struct v3_ext_method*, struct v3_ext_ctx*, char *data) { char* list = strdup(data); char* pos = list; AC_TARGETS *a = AC_TARGETS_new(); while(pos) { char* cpos = strchr(pos, ','); if (cpos) *cpos = '\0'; { GENERAL_NAME *g = GENERAL_NAME_new(); ASN1_IA5STRING *tmpr = ASN1_IA5STRING_new(); AC_TARGET *targ = AC_TARGET_new(); if (!g || !tmpr || !targ) { GENERAL_NAME_free(g); ASN1_IA5STRING_free(tmpr); AC_TARGET_free(targ); goto err; } ASN1_STRING_set(tmpr, pos, strlen(list)); g->type = GEN_URI; g->d.ia5 = tmpr; targ->name = g; sk_AC_TARGET_push(a->targets, targ); } pos = cpos; if (pos) ++pos; }; free(list); return a; err: free(list); AC_TARGETS_free(a); return NULL; } void *certs_s2i(struct v3_ext_method*, struct v3_ext_ctx*, char *data) { STACK_OF(X509) *certs = (STACK_OF(X509) *)data; int i = 0; if (data) { AC_CERTS *a = AC_CERTS_new(); sk_X509_pop_free(a->stackcert, X509_free); a->stackcert = sk_X509_new_null(); /* a->stackcert = sk_X509_dup(certs); */ for (i =0; i < sk_X509_num(certs); i++) sk_X509_push(a->stackcert, X509_dup(sk_X509_value(certs, i))); return a; } return NULL; } void *attributes_s2i(struct v3_ext_method*, struct v3_ext_ctx*, char *data) { int i = 0; STACK_OF(AC_ATT_HOLDER) *stack = (STACK_OF(AC_ATT_HOLDER) *)data; if (data) { AC_FULL_ATTRIBUTES *a = AC_FULL_ATTRIBUTES_new(); sk_AC_ATT_HOLDER_pop_free(a->providers, AC_ATT_HOLDER_free); a->providers = sk_AC_ATT_HOLDER_new_null(); /* a->providers = sk_AC_ATT_HOLDER_dup(stack); */ for (i = 0; i < sk_AC_ATT_HOLDER_num(stack); i++) { sk_AC_ATT_HOLDER_push(a->providers, ASN1_dup_of(AC_ATT_HOLDER, i2d_AC_ATT_HOLDER, d2i_AC_ATT_HOLDER, sk_AC_ATT_HOLDER_value(stack, i))); }; return a; } return NULL; } void *null_s2i(struct v3_ext_method*, struct v3_ext_ctx*, char*) { return ASN1_NULL_new(); } char *authkey_i2s(struct v3_ext_method*, void*) { return norep(); } void *authkey_s2i(struct v3_ext_method*, struct v3_ext_ctx* ctx, char *data) { AUTHORITY_KEYID* keyid = NULL; X509* cert = ctx ? ctx->issuer_cert : NULL; if(cert) { ASN1_BIT_STRING* pkeystr = X509_get0_pubkey_bitstr(cert); if(pkeystr) { ASN1_OCTET_STRING *str = ASN1_OCTET_STRING_new(); if(str) { keyid = AUTHORITY_KEYID_new(); if(keyid) { char digest[21]; SHA1(pkeystr->data, pkeystr->length, (unsigned char*)digest); ASN1_OCTET_STRING_set(str, (unsigned char*)digest, 20); if(keyid->keyid) ASN1_OCTET_STRING_free(keyid->keyid); keyid->keyid = str; str = NULL; } if (str) ASN1_OCTET_STRING_free(str); } } } return keyid; } /* IMPL_STACK(AC_IETFATTR) IMPL_STACK(AC_IETFATTRVAL) IMPL_STACK(AC_ATTR) IMPL_STACK(AC) IMPL_STACK(AC_INFO) IMPL_STACK(AC_VAL) IMPL_STACK(AC_HOLDER) IMPL_STACK(AC_ACI) IMPL_STACK(AC_FORM) IMPL_STACK(AC_IS) IMPL_STACK(AC_DIGEST) IMPL_STACK(AC_TARGETS) IMPL_STACK(AC_TARGET) IMPL_STACK(AC_CERTS) IMPL_STACK(AC_ATTRIBUTE) IMPL_STACK(AC_ATT_HOLDER) IMPL_STACK(AC_FULL_ATTRIBUTES) */ X509V3_EXT_METHOD * VOMSAttribute_auth_x509v3_ext_meth() { static X509V3_EXT_METHOD vomsattribute_auth_x509v3_ext_meth = { -1, 0, NULL, (X509V3_EXT_NEW) AUTHORITY_KEYID_new, (X509V3_EXT_FREE) AUTHORITY_KEYID_free, (X509V3_EXT_D2I) d2i_AUTHORITY_KEYID, (X509V3_EXT_I2D) i2d_AUTHORITY_KEYID, (X509V3_EXT_I2S) authkey_i2s, (X509V3_EXT_S2I) authkey_s2i, NULL, NULL, NULL, NULL, NULL }; return (&vomsattribute_auth_x509v3_ext_meth); } X509V3_EXT_METHOD * VOMSAttribute_avail_x509v3_ext_meth() { static X509V3_EXT_METHOD vomsattribute_avail_x509v3_ext_meth = { -1, 0, NULL, (X509V3_EXT_NEW) ASN1_NULL_new, (X509V3_EXT_FREE) ASN1_NULL_free, (X509V3_EXT_D2I) d2i_ASN1_NULL, (X509V3_EXT_I2D) i2d_ASN1_NULL, (X509V3_EXT_I2S) null_i2s, (X509V3_EXT_S2I) null_s2i, NULL, NULL, NULL, NULL, NULL }; return (&vomsattribute_avail_x509v3_ext_meth); } X509V3_EXT_METHOD * VOMSAttribute_targets_x509v3_ext_meth() { static X509V3_EXT_METHOD vomsattribute_targets_x509v3_ext_meth = { -1, 0, NULL, (X509V3_EXT_NEW) AC_TARGETS_new, (X509V3_EXT_FREE) AC_TARGETS_free, (X509V3_EXT_D2I) d2i_AC_TARGETS, (X509V3_EXT_I2D) i2d_AC_TARGETS, (X509V3_EXT_I2S) targets_i2s, (X509V3_EXT_S2I) targets_s2i, NULL, NULL, NULL, NULL, NULL }; return (&vomsattribute_targets_x509v3_ext_meth); } X509V3_EXT_METHOD * VOMSAttribute_acseq_x509v3_ext_meth() { static X509V3_EXT_METHOD vomsattribute_acseq_x509v3_ext_meth = { -1, 0, NULL, (X509V3_EXT_NEW) AC_SEQ_new, (X509V3_EXT_FREE) AC_SEQ_free, (X509V3_EXT_D2I) d2i_AC_SEQ, (X509V3_EXT_I2D) i2d_AC_SEQ, (X509V3_EXT_I2S) acseq_i2s, (X509V3_EXT_S2I) acseq_s2i, NULL, NULL, NULL, NULL, NULL }; return (&vomsattribute_acseq_x509v3_ext_meth); } X509V3_EXT_METHOD * VOMSAttribute_certseq_x509v3_ext_meth() { static X509V3_EXT_METHOD vomsattribute_certseq_x509v3_ext_meth = { -1, 0, NULL, (X509V3_EXT_NEW) AC_CERTS_new, (X509V3_EXT_FREE) AC_CERTS_free, (X509V3_EXT_D2I) d2i_AC_CERTS, (X509V3_EXT_I2D) i2d_AC_CERTS, (X509V3_EXT_I2S) certs_i2s, (X509V3_EXT_S2I) certs_s2i, NULL, NULL, NULL, NULL, NULL }; return (&vomsattribute_certseq_x509v3_ext_meth); } X509V3_EXT_METHOD * VOMSAttribute_attribs_x509v3_ext_meth() { static X509V3_EXT_METHOD vomsattribute_attribs_x509v3_ext_meth = { -1, 0, NULL, (X509V3_EXT_NEW) AC_FULL_ATTRIBUTES_new, (X509V3_EXT_FREE) AC_FULL_ATTRIBUTES_free, (X509V3_EXT_D2I) d2i_AC_FULL_ATTRIBUTES, (X509V3_EXT_I2D) i2d_AC_FULL_ATTRIBUTES, (X509V3_EXT_I2S) attributes_i2s, (X509V3_EXT_S2I) attributes_s2i, NULL, NULL, NULL, NULL, NULL }; return (&vomsattribute_attribs_x509v3_ext_meth); } } //namespace ArcCredential nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/VOMSConfig.h0000644000000000000000000000012412372474121023655 xustar000000000000000027 mtime=1407875153.714679 27 atime=1513200574.610703 30 ctime=1513200659.130736854 nordugrid-arc-5.4.2/src/hed/libs/credential/VOMSConfig.h0000644000175000002070000000326512372474121023730 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_VOMSCONFIG_H__ #define __ARC_VOMSCONFIG_H__ #include namespace Arc { class VOMSConfigLine { public: VOMSConfigLine(const std::string& line); operator bool(void); bool operator!(void); const std::string& Name() const; const std::string& Host() const; const std::string& Port() const; const std::string& Subject() const; const std::string& Alias() const; std::string Str() const; private: std::string name; std::string host; std::string port; std::string subject; std::string alias; }; class VOMSConfig { public: class filter { public: virtual bool match(const VOMSConfigLine& line) const; }; class iterator: private std::list::iterator { friend class VOMSConfig; public: iterator NextByName(void); iterator NextByAlias(void); iterator Next(const VOMSConfig::filter& lfilter); iterator& operator=(const iterator& it); operator bool(void) const; bool operator!(void) const; iterator(void); iterator(const iterator& it); VOMSConfigLine* operator->() const; private: std::list* list_; iterator(std::list& list, std::list::iterator it); }; VOMSConfig(const std::string& path, const filter& lfilter = filter()); operator bool(void) const; bool operator!(void) const; iterator FirstByName(const std::string name); iterator FirstByAlias(const std::string alias); iterator First(const filter& lfilter); private: std::list lines; bool AddPath(const std::string& path, int depth, const filter& lfilter); }; } // namespace Arc #endif // __ARC_VOMSCONFIG_H__ nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/Credential.h0000644000000000000000000000012413153454775024030 xustar000000000000000027 mtime=1504598525.713781 27 atime=1513200574.586702 30 ctime=1513200659.125736793 nordugrid-arc-5.4.2/src/hed/libs/credential/Credential.h0000644000175000002070000005556413153454775024114 0ustar00mockbuildmock00000000000000#ifndef __ARC_CREDENTIAL_H__ #define __ARC_CREDENTIAL_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Arc { /** \defgroup credential Credential handling classes and functions. */ /// An exception class for the Credential class. /** This is an exception class that is used to handle runtime errors * discovered in the Credential class. * \ingroup credential * \headerfile Credential.h arc/credential/Credential.h */ class CredentialError : public std::runtime_error { public: // Constructor /** This is the constructor of the CredentialError class. * @param what An explanation of the error. */ CredentialError(const std::string& what=""); }; typedef enum {CRED_PEM, CRED_DER, CRED_PKCS, CRED_UNKNOWN} Credformat; /// Signal algorithm /** * \since Added in 4.0.0. **/ typedef enum { SIGN_DEFAULT = 0, SIGN_SHA1, SIGN_SHA224, SIGN_SHA256, SIGN_SHA384, SIGN_SHA512 } Signalgorithm; /**Logger to be used by all modules of credentials library*/ extern Logger CredentialLogger; /// Class for handling X509 credentials. /**Credential class covers the functionality about general processing about certificate/key * files, including: * -# certificate/key parsing, information extracting (such as subject name, * issuer name, lifetime, etc.), chain verifying, extension processing about proxy certinfo, * extension processing about other general certificate extension (such as voms attributes, * it should be the extension-specific code itself to create, parse and verify the extension, * not the Credential class. For voms, it is some code about writing and parsing voms-implementing * Attribute Certificate/ RFC3281, the voms-attribute is then be looked as a binary part and * embedded into extension of X509 certificate/proxy certificate); * -# certificate request, extension embedding and certificate signing, for both proxy certificate * and EEC (end entity certificate) certificate. * * The Credential class supports PEM, DER PKCS12 credentials. * \ingroup credential * \headerfile Credential.h arc/credential/Credential.h */ class Credential { public: /**Default constructor, only acts as a container for inquiring certificate request, *is meaningless for any other use. */ Credential(); /** Constructor with user-defined keylength. Needed for creation of EE certs, since some * applications will only support keys with a certain minimum length > 1024 */ Credential(int keybits); virtual ~Credential(); /**Constructor, specific constructor for CA certificate *is meaningless for any other use. */ Credential(const std::string& CAfile, const std::string& CAkey, const std::string& CAserial, const std::string& extfile, const std::string& extsect, const std::string& passphrase4key); /** Same as previuos constructor but allows password to be * supplied from different sources. * \since Added in 4.0.0. */ Credential(const std::string& CAfile, const std::string& CAkey, const std::string& CAserial, const std::string& extfile, const std::string& extsect, PasswordSource& passphrase4key); /**Constructor, specific constructor for proxy certificate, only acts as a * container for constraining certificate signing and/or generating certificate * request (only keybits is useful for creating certificate request), is meaningless * for any other use. * * The proxyversion and policylang is for specifying the proxy certificate type and * the policy language inside proxy. * The definition of proxyversion and policy language is based on * http://dev.globus.org/wiki/Security/ProxyCertTypes#RFC_3820_Proxy_Certificates * The code is supposed to support proxy version: * - GSI2 (legacy proxy) * - GSI3 (Proxy draft) * - RFC (RFC3820 proxy) * * and corresponding policy languages * - GSI2 (GSI2, GSI2_LIMITED) * - GSI3 * - RFC * - IMPERSONATION_PROXY--1.3.6.1.5.5.7.21.1 * - INDEPENDENT_PROXY--1.3.6.1.5.5.7.21.2 * - LIMITED_PROXY--1.3.6.1.4.1.3536.1.1.1.9 * - RESTRICTED_PROXY--policy language undefined * * In openssl>=098, there are three types of policy languages: * - id-ppl-inheritAll--1.3.6.1.5.5.7.21.1 * - id-ppl-independent--1.3.6.1.5.5.7.21.2 * - id-ppl-anyLanguage-1.3.6.1.5.5.7.21.0 * * @param start start time of proxy certificate * @param lifetime lifetime of proxy certificate * @param keybits modulus size for RSA key generation, * it should be greater than 1024 if 'this' class is * used for generating X509 request; it should be '0' if * 'this' class is used for constraining certificate signing. * @param proxyversion proxy certificate version (see above for values) * @param policylang policy language of the proxy (see above for values) * @param policy path to file with policy content * @param pathlength path length constraint */ Credential(Time start, Period lifetime = Period("PT12H"), int keybits = 1024, std::string proxyversion = "rfc", std::string policylang = "inheritAll", std::string policy = "", int pathlength = -1); /**Constructor, specific constructor for usual certificate, constructing from * credential files. only acts as a container for parsing the certificate and key * files, is meaningless for any other use. this constructor will parse the credential * information, and put them into "this" object * @param cert path to certificate file * @param key path to key file * @param cadir path to directory of CA certificates * @param cafile path to file with CA certificate * @param passphrase4key specifies the password for decrypting private key (if needed). * If value is empty then password will be asked interactively. To avoid asking for * password use value provided by NoPassword() method. * @param is_file specifies if the cert/key are from file, otherwise they * are supposed to be from string. default is from file */ Credential(const std::string& cert, const std::string& key, const std::string& cadir, const std::string& cafile, const std::string& passphrase4key = "", const bool is_file = true); /** Same as previuos constructor but allows password to be * supplied from different sources. * \since Added in 4.0.0. */ Credential(const std::string& cert, const std::string& key, const std::string& cadir, const std::string& cafile, PasswordSource& passphrase4key, const bool is_file = true); /**Constructor, specific constructor for usual certificate, constructing from * information in UserConfig object. Only acts as a container for parsing the * certificate and key files, is meaningless for any other use. this constructor * will parse the credential information, and put them into "this" object. * @param usercfg UserConfig object from which certificate information is obtained * @param passphrase4key passphrase for private key */ Credential(const UserConfig& usercfg, const std::string& passphrase4key = ""); /** Same as previuos constructor but allows password to be * supplied from different sources. * \since Added in 4.0.0. */ Credential(const UserConfig& usercfg, PasswordSource& passphrase4key); /**Initiate nid for proxy certificate extension*/ static void InitProxyCertInfo(void); /** Returns true if credentials are valid. Credentials are read from locations specified in UserConfig object. This method is deprecated. User per-instance method IsValid() instead. */ static bool IsCredentialsValid(const UserConfig& usercfg); /**General method for adding a new nid into openssl's global const*/ void AddCertExtObj(std::string& sn, std::string& oid); /// Set signing algorithm /** * \since Added in 4.0.0. **/ void SetSigningAlgorithm(Signalgorithm signing_algorithm = SIGN_DEFAULT); /// Set key bits /** * \since Added in 4.0.0. **/ void SetKeybits(int keybits = 0); static std::string NoPassword(void) { return std::string("\0",1); }; private: /** Credential object so far is not supposed to be copied */ Credential(const Credential&); void InitCredential(const std::string& cert, const std::string& key, const std::string& cadir, const std::string& cafile, PasswordSource& passphrase4key, const bool is_file); /**load key from argument keybio, and put key information into argument pkey */ //void loadKeyString(const std::string& key, EVP_PKEY* &pkey, const std::string& passphrase = ""); void loadKeyString(const std::string& key, EVP_PKEY* &pkey, PasswordSource& passphrase); //void loadKeyFile(const std::string& keyfile, EVP_PKEY* &pkey, const std::string& passphrase = ""); void loadKeyFile(const std::string& keyfile, EVP_PKEY* &pkey, PasswordSource& passphrase); //void loadKey(BIO* bio, EVP_PKEY* &pkey, const std::string& passphrase = "", const std::string& prompt_info = "", const bool is_file = true); /**load certificate from argument certbio, and put certificate information into * argument cert and certchain */ void loadCertificateString(const std::string& cert, X509* &x509, STACK_OF(X509)** certchain); void loadCertificateFile(const std::string& certfile, X509* &x509, STACK_OF(X509)** certchain); //void loadCertificate(BIO* bio, X509* &x509, STACK_OF(X509)** certchain, const bool is_file=true); /**Initiate cert_verify_context which will be used for certificate verification*/ void InitVerification(void); /**Verify whether the certificate is signed by trusted CAs *the verification is not needed for EEC, but needed for verifying a proxy certificate which *is generated by the others */ bool Verify(void); /**Create a certificate extension based on the arguments * name and data. argument crit will be used by * X509_EXTENSION_create_by_OBJ which is called inside * CreateExtension method. */ X509_EXTENSION* CreateExtension(const std::string& name, const std::string& data, bool crit = false); /** Set the start and end time for the proxy credential. * After setting, the start time of proxy will not * before the later value from issuer's start time * and the "start" parameter, and the end time of proxy * will not after the ealier value from issuer's end * time and the "start" parameter plus "lifetime" paremeter */ bool SetProxyPeriod(X509* tosign, X509* issuer, const Time& start, const Period& lifetime); /**Assistant method for signing the proxy request, the method will duplicate some information *(subject and extension) from signing certificate */ bool SignRequestAssistant(Credential* proxy, EVP_PKEY* req_pubkey, X509** tosign); public: /**Log error information related with openssl*/ void LogError(void) const; /************************************/ /*****Get information from "this" object**/ /**Get the verification result about certificate chain checking*/ bool GetVerification(void) const {return verification_valid; }; /**Get the private key attached to this object*/ EVP_PKEY* GetPrivKey(void) const; /**Get the public key attached to this object*/ EVP_PKEY* GetPubKey(void) const; /**Get the certificate attached to this object*/ X509* GetCert(void) const; /** Get the certificate request, if there is any */ X509_REQ* GetCertReq(void) const; /**Get the certificate chain attached to this object*/ STACK_OF(X509)* GetCertChain(void) const; /**Get the number of certificates in the certificate * chain attached to this object */ int GetCertNumofChain(void) const; /**Get the certificate format, PEM PKCS12 or DER * BIO could be memory or file, they should be processed * differently. */ Credformat getFormat_BIO(BIO * in, const bool is_file = true) const; Credformat getFormat_str(const std::string& source) const; /**Get the DN of the certificate attached to this object*/ std::string GetDN(void) const; /**Get the Identity name of the certificate attached to this object, * the result will not include proxy CN */ std::string GetIdentityName(void) const; /**Get type of the certificate attached to this object*/ ArcCredential::certType GetType(void) const; /**Get issuer of the certificate attached to this object*/ std::string GetIssuerName(void) const; /**Get CA of the certificate attached to this object, if the certificate *is an EEC, GetCAName get the same value as GetIssuerName */ std::string GetCAName(void) const; /**Get signing algorithm used to sign the certificate attached to this object * \since Added in 4.0.0. **/ Signalgorithm GetSigningAlgorithm(void) const; /**Get key size of the certificate attached to this object * \since Added in 4.0.0. **/ int GetKeybits(void) const; /**Get the proxy policy attached to the "proxy certificate * information" extension of the proxy certificate */ std::string GetProxyPolicy(void) const; /**Set the proxy policy attached to the "proxy certificate * information" extension of the proxy certificate */ void SetProxyPolicy(const std::string& proxyversion, const std::string& policylang, const std::string& policy, int pathlength); /**Output the private key into string * @param content Filled with private key content * @param encryption whether encrypt the output private key or not * @param passphrase the passphrase to encrypt the output private key */ bool OutputPrivatekey(std::string &content, bool encryption = false, const std::string& passphrase =""); /**Output the private key into string * @param content Filled with private key content * @param encryption whether encrypt the output private key or not * @param passphrase the source for passphrase to encrypt the output private key * \since Added in 4.0.0. */ bool OutputPrivatekey(std::string &content, bool encryption, PasswordSource& passphrase); /**Output the public key into string*/ bool OutputPublickey(std::string &content); /**Output the certificate into string * @param content Filled with certificate content * @param is_der false for PEM, true for DER */ bool OutputCertificate(std::string &content, bool is_der=false); /**Output the certificate chain into string * @param content Filled with certificate chain content * @param is_der false for PEM, true for DER */ bool OutputCertificateChain(std::string &content, bool is_der=false); /**Returns lifetime of certificate or proxy*/ Period GetLifeTime(void) const; /**Returns validity start time of certificate or proxy*/ Time GetStartTime() const; /**Returns validity end time of certificate or proxy*/ Time GetEndTime() const; /**Set lifetime of certificate or proxy*/ void SetLifeTime(const Period& period); /**Set start time of certificate or proxy*/ void SetStartTime(const Time& start_time); /**Returns true if credentials are valid*/ bool IsValid(void); /************************************/ /*****Generate certificate request, add certificate extension, inquire certificate request, *and sign certificate request **/ /**Add an extension to the extension part of the certificate *@param name the name of the extension, there OID related with the name *should be registered into openssl firstly *@param data the data which will be inserted into certificate extension *@param crit critical */ bool AddExtension(const std::string& name, const std::string& data, bool crit = false); /**Add an extension to the extension part of the certificate * @param name the name of the extension, there OID related with the name * should be registered into openssl firstly * @param binary the data which will be inserted into certificate * extension part as a specific extension there should be specific * methods defined inside specific X509V3_EXT_METHOD structure * to parse the specific extension format. * For example, VOMS attribute certificate is a specific * extension to proxy certificate. There is specific X509V3_EXT_METHOD * defined in VOMSAttribute.h and VOMSAttribute.c for parsing attribute * certificate. * In openssl, the specific X509V3_EXT_METHOD can be got according to * the extension name/id, see X509V3_EXT_get_nid(ext_nid) */ bool AddExtension(const std::string& name, char** binary); /**Get the specific extension (named by the parameter) in a certificate * this function is only supposed to be called after certificate and key * are loaded by the constructor for usual certificate * @param name the name of the extension to get */ std::string GetExtension(const std::string& name); /**Generate an EEC request, based on the keybits and signing * algorithm information inside this object * output the certificate request to output BIO * * The user will be asked for a private key password */ bool GenerateEECRequest(BIO* reqbio, BIO* keybio, const std::string& dn = ""); /**Generate an EEC request, output the certificate request to a string*/ bool GenerateEECRequest(std::string &reqcontent, std::string &keycontent, const std::string& dn = ""); /**Generate an EEC request, output the certificate request and the key to a file*/ bool GenerateEECRequest(const char* request_filename, const char* key_filename, const std::string& dn = ""); /**Generate a proxy request, base on the keybits and signing * algorithm information inside this object * output the certificate request to output BIO */ bool GenerateRequest(BIO* bio, bool if_der = false); /**Generate a proxy request, output the certificate request to a string*/ bool GenerateRequest(std::string &content, bool if_der = false); /**Generate a proxy request, output the certificate request to a file*/ bool GenerateRequest(const char* filename, bool if_der = false); /**Inquire the certificate request from BIO, and put the request * information to X509_REQ inside this object, * and parse the certificate type from the PROXYCERTINFO * of request' extension * @param reqbio the BIO containing the certificate request * @param if_eec true if EEC request * @param if_der false for PEM; true for DER */ bool InquireRequest(BIO* reqbio, bool if_eec = false, bool if_der = false); /**Inquire the certificate request from a string*/ bool InquireRequest(std::string &content, bool if_eec = false, bool if_der = false); /**Inquire the certificate request from a file*/ bool InquireRequest(const char* filename, bool if_eec = false, bool if_der = false); /**Sign request based on the information inside proxy, and * output the signed certificate to output BIO * @param proxy Credential object holding proxy information * @param outputbio BIO to hold the signed certificate * @param if_der false for PEM, true for DER */ bool SignRequest(Credential* proxy, BIO* outputbio, bool if_der = false); /**Sign request and output the signed certificate to a string * @param proxy Credential object holding proxy information * @param content string to hold the signed certificate * @param if_der false for PEM, true for DER */ bool SignRequest(Credential* proxy, std::string &content, bool if_der = false); /**Sign request and output the signed certificate to a file * @param proxy Credential object holding proxy information * @param filename path to file where certificate will be written * @param if_der false for PEM, true for DER */ bool SignRequest(Credential* proxy, const char* filename, bool if_der = false); /**Self sign a certificate. This functionality is specific for creating a CA credential * by using this Credential class. * @param dn the DN for the subject * @param extfile the configuration file which includes the extension information, typically the openssl.cnf file * @param extsect the section/group name for the extension, e.g. in openssl.cnf, usr_cert and v3_ca * @param certfile the certificate file, which contains the signed certificate */ bool SelfSignEECRequest(const std::string& dn, const char* extfile, const std::string& extsect, const char* certfile); //The following three methods is about signing an EEC certificate by implementing the same //functionality as a normal CA /**Sign eec request, and output the signed certificate to output BIO*/ bool SignEECRequest(Credential* eec, const std::string& dn, BIO* outputbio); /**Sign request and output the signed certificate to a string*/ bool SignEECRequest(Credential* eec, const std::string& dn, std::string &content); /**Sign request and output the signed certificate to a file*/ bool SignEECRequest(Credential* eec, const std::string& dn, const char* filename); private: // PKI files std::string cacertfile_; std::string cacertdir_; std::string certfile_; std::string keyfile_; //Verification result bool verification_valid; std::string verification_proxy_policy; //Certificate structures X509 * cert_; //certificate ArcCredential::certType cert_type_; EVP_PKEY * pkey_; //private key STACK_OF(X509) * cert_chain_; //certificates chain which is parsed //from the certificate, after //verification, the ca certificate //will be included PROXY_CERT_INFO_EXTENSION* proxy_cert_info_; Credformat format; Time start_; Period lifetime_; //Certificate request X509_REQ* req_; RSA* rsa_key_; EVP_MD* signing_alg_; int keybits_; //Proxy policy std::string proxyversion_; std::string policy_; int proxyver_; int pathlength_; //Extensions for certificate, such as certificate policy, attributes, etc. STACK_OF(X509_EXTENSION)* extensions_; //CA functionality related information std::string CAserial_; std::string extfile_; std::string extsect_; static X509_NAME *parse_name(char *subject, long chtype, int multirdn); }; }// namespace Arc #endif /* __ARC_CREDENTIAL_H__ */ nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/VOMSUtil.h0000644000000000000000000000012412771225230023363 xustar000000000000000027 mtime=1474636440.725253 27 atime=1513200574.611703 30 ctime=1513200659.129736842 nordugrid-arc-5.4.2/src/hed/libs/credential/VOMSUtil.h0000644000175000002070000004073112771225230023435 0ustar00mockbuildmock00000000000000#ifndef __ARC_VOMSUTIL_H__ #define __ARC_VOMSUTIL_H__ #include #include #include #include #include namespace Arc { /** \addtogroup credential * @{ */ typedef std::vector VOMSTrustChain; typedef std::string VOMSTrustRegex; /// Represents VOMS attribute part of a credential. class VOMSACInfo { public: // Not all statuses are implemented typedef enum { Success = 0, CAUnknown = (1<<0), // Signed by VOMS certificate of unknow CA CertRevoked = (1<<1), // Signed by revoked VOMS certificate LSCFailed = (1<<2), // Failed while matching VOMS attr. against LSC files TrustFailed = (1<<2), // Failed matching VOMS attr. against specified trust list X509ParsingFailed = (1<<3), // Failed while parsing at X509 level ACParsingFailed = (1<<4), // Failed while parsing at AC level InternalParsingFailed = (1<<5), // Failed while parsing internal VOMS structures TimeValidFailed = (1<<6), // VOMS attributes are not valid yet or expired IsCritical = (1<<7), // VOMS extension was marked as critical (unusual but not error) ParsingError = (X509ParsingFailed | ACParsingFailed | InternalParsingFailed), // Mask to test if status represents any failure caused by failed parsing ValidationError = (CAUnknown | CertRevoked | LSCFailed | TrustFailed | TimeValidFailed), // Mask to test if status represents any failure caused by validation rules Error = (0xffff & ~IsCritical) // Mask to test if status represents any failure } status_t; std::string voname; std::string holder; std::string issuer; std::string target; std::vector attributes; Time from; Time till; //Period validity; unsigned int status; VOMSACInfo(void):from(-1),till(-1),status(0) { }; }; /// Stores definitions for making decision if VOMS server is trusted. class VOMSTrustList { private: std::vector chains_; std::vector regexs_; public: VOMSTrustList(void) { }; /** Creates chain lists and regexps from plain list. List is made of chunks delimited by elements containing pattern "NEXT CHAIN". Each chunk with more than one element is converted into one instance of VOMSTrustChain. Chunks with single element are converted to VOMSTrustChain if element does not have special symbols. Otherwise it is treated as regular expression. Those symbols are '^','$' and '*'. Trusted chains can be congicured in two ways: one way is: /O=Grid/O=NorduGrid/CN=host/arthur.hep.lu.se /O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority ----NEXT CHAIN--- /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch /DC=ch/DC=cern/CN=CERN Trusted Certification Authority the other way is: /O=Grid/O=NorduGrid/CN=host/arthur.hep.lu.se /O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch /DC=ch/DC=cern/CN=CERN Trusted Certification Authority each chunk is supposed to contain a suit of DN of trusted certificate chain, in which the first DN is the DN of the certificate (cert0) which is used to sign the Attribute Certificate (AC), the second DN is the DN of the issuer certificate(cert1) which is used to sign cert0. So if there are one or more intermediate issuers, then there should be 3 or more than 3 DNs in this chunk (considering cert0 and the root certificate, plus the intermediate certificate) . */ VOMSTrustList(const std::vector& encoded_list); /** Creates chain lists and regexps from those specified in arguments. See AddChain() and AddRegex() for more information. */ VOMSTrustList(const std::vector& chains,const std::vector& regexs); ~VOMSTrustList(void); /** Adds chain of trusted DNs to list. During verification each signature of AC is checked against all stored chains. DNs of chain of certificate used for signing AC are compared against DNs stored in these chains one by one. If needed DN of issuer of last certificate is checked too. Comparison succeeds if DNs in at least one stored chain are same as those in certificate chain. Comparison stops when all DNs in stored chain are compared. If there are more DNs in stored chain than in certificate chain then comparison fails. Empty stored list matches any certificate chain. Taking into account that certificate chains are verified down to trusted CA anyway, having more than one DN in stored chain seems to be useless. But such feature may be found useful by some very strict sysadmins. ??? IMO,DN list here is not only for authentication, it is also kind of ACL, which means the AC consumer only trusts those DNs which issues AC. */ VOMSTrustChain& AddChain(const VOMSTrustChain& chain); /** Adds empty chain of trusted DNs to list. */ VOMSTrustChain& AddChain(void); void AddElement(const std::vector& encoded_list); /** Adds regular expression to list. During verification each signature of AC is checked against all stored regular expressions. DN of signing certificate must match at least one of stored regular expressions. */ RegularExpression& AddRegex(const VOMSTrustRegex& reg); int SizeChains(void) const { return chains_.size(); }; int SizeRegexs(void) const { return regexs_.size(); }; const VOMSTrustChain& GetChain(int num) const { return chains_[num]; }; const RegularExpression& GetRegex(int num) const { return *(regexs_[num]); }; }; void InitVOMSAttribute(void); /* This method is used to create an AC. It is supposed * to be used by the voms server * @param issuer The issuer which will be used to sign the AC, it is also * the voms server certificate * @param issuerstack The stack of the issuer certificates that issue the * voms server certificate. If the voms server certificate * is issued by a root CA (self-signed), then this param * is empty. * @param holder The certificate of the holder of this AC. It should be * parsed from the peer that launches a AC query request * @param pkey The key of the holder * @param fqan The AC_IETFATTR. According to the definition of voms, the fqan * will be like /Role=Employee/Group=Tester/Capability=NULL * @param attributes The AC_FULL_ATTRIBUTES. Accoding to the definition of voms, * the attributes will be like "qualifier::name=value" * @param target The list of targets which are supposed to consume this AC * @param ac The generated AC * @param voname The vo name * @param uri The uri of this vo, together with voname, it will be * as the grantor of this AC * @param lifetime The lifetime of this AC */ /* int createVOMSAC(X509 *issuer, STACK_OF(X509) *issuerstack, X509 *holder, EVP_PKEY *pkey, BIGNUM *serialnum, std::vector &fqan, std::vector &targets, std::vector& attributes, ArcCredential::AC **ac, std::string voname, std::string uri, int lifetime); */ /**Create AC(Attribute Certificate) with voms specific format. * @param codedac The coded AC as output of this method * @param issuer_cred The issuer credential which is used to sign the AC * @param holder_cred The holder credential, the holder certificate * is the one which carries AC * @param fqan The AC_IETFATTR. According to the definition of voms, the fqan * will be like /Role=Employee/Group=Tester/Capability=NULL * @param targets The list of targets which are supposed to consume this AC * @param attributes The AC_FULL_ATTRIBUTES. Accoding to the definition of voms, * the attributes will be like "qualifier::name=value" * @param voname The vo name * @param uri The uri of this vo, together with voname, it will be * as the granter of this AC * @param lifetime The lifetime of this AC * */ bool createVOMSAC(std::string& codedac, Credential& issuer_cred, Credential& holder_cred, std::vector &fqan, std::vector &targets, std::vector& attributes, std::string &voname, std::string &uri, int lifetime); /**Add decoded AC string into a list of AC objects * @param aclist The list of AC objects (output) * @param acorder The order of AC objects (output) * @param decodedac The AC string that is decoded from the string * returned from voms server (input) */ bool addVOMSAC(ArcCredential::AC** &aclist, std::string &acorder, std::string &decodedac); /**Parse the certificate, and output the attributes. * @param holder The proxy certificate which includes the voms * specific formated AC. * @param ca_cert_dir The trusted certificates which are used to * verify the certificate which is used to sign the AC * @param ca_cert_file The same as ca_cert_dir except it is a file * instead of a directory. Only one of them need to be set * @param vomsdir The directory which include *.lsc file for each vo. * For instance, a vo called "knowarc.eu" should * have file vomsdir/knowarc/voms.knowarc.eu.lsc which * contains on the first line the DN of the VOMS server, and on * the second line the corresponding CA DN: * /O=Grid/O=NorduGrid/OU=KnowARC/CN=voms.knowarc.eu * /O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority * See more in : https://twiki.cern.ch/twiki/bin/view/LCG/VomsFAQforServiceManagers * @param vomscert_trust_dn List of VOMS trust chains * @param output The parsed attributes (Role and Generic Attribute) . * Each attribute is stored in element of a vector as a string. * It is up to the consumer to understand the meaning of the * attribute. * There are two types of attributes stored in VOMS AC: * AC_IETFATTR, AC_FULL_ATTRIBUTES. * The AC_IETFATTR will be like /Role=Employee/Group=Tester/Capability=NULL * The AC_FULL_ATTRIBUTES will be like knowarc:Degree=PhD (qualifier::name=value) * In order to make the output attribute values be identical, the voms server * information is added as prefix of the original attributes in AC. * for AC_FULL_ATTRIBUTES, the voname + hostname is added: * /voname=knowarc.eu/hostname=arthur.hep.lu.se:15001//knowarc.eu/coredev:attribute1=1 * for AC_IETFATTR, the 'VO' (voname) is added: * /VO=knowarc.eu/Group=coredev/Role=NULL/Capability=NULL * /VO=knowarc.eu/Group=testers/Role=NULL/Capability=NULL * some other redundant attributes is provided: * voname=knowarc.eu/hostname=arthur.hep.lu.se:15001 * @param verify true: Verify the voms certificate is trusted based on the * ca_cert_dir/ca_cert_file which specifies the CA certificates, * and the vomscert_trust_dn which specifies the trusted DN chain * from voms server certificate to CA certificate. * false: Not verify, which means the issuer of AC (voms server * certificate is supposed to be trusted by default). * In this case the parameters 'ca_cert_dir', * 'ca_cert_file' and 'vomscert_trust_dn' will not effect, and * may be left empty. * This case is specifically used by 'arcproxy --info' to list * all of the attributes in AC, and not to need to verify if * the AC's issuer is trusted. * * @param reportall If set to true fills output with all attributes * including those which failed passing test procedures. * Validity of attributes can be checked through status * members of output items. * Combination of verify=true and reportall=true provides * most information. * */ bool parseVOMSAC(X509* holder, const std::string& ca_cert_dir, const std::string& ca_cert_file, const std::string& vomsdir, VOMSTrustList& vomscert_trust_dn, std::vector& output, bool verify = true, bool reportall = false); /**Parse the certificate. Similar to above one, but collects information From all certificates in a chain. */ bool parseVOMSAC(const Credential& holder_cred, const std::string& ca_cert_dir, const std::string& ca_cert_file, const std::string& vomsdir, VOMSTrustList& vomscert_trust_dn, std::vector& output, bool verify = true, bool reportall = false); /**Parse the certificate or a chain of certificates, in string format */ bool parseVOMSAC(const std::string& cert_str, const std::string& ca_cert_dir, const std::string& ca_cert_file, const std::string& vomsdir, VOMSTrustList& vomscert_trust_dn, std::vector& output, bool verify = true, bool reportall = false); /**Decode the data which is encoded by voms server. Since voms code uses some specific * coding method (not base64 encoding), we simply copy the method from voms code to here*/ char *VOMSDecode(const char *data, int size, int *j); /**Encode the data with base64 encoding */ char *VOMSEncode(const char *data, int size, int *j); /**Extract the needed field from the certificate. * @param u The proxy certificate which includes the voms * specific formated AC. * @param property The property that caller would get, * including: dn, voms:vo, voms:role, voms:group * @param ca_cert_dir * @param ca_cert_file * @param vomsdir * @param voms_trust_list the dn chain that is trusted when parsing voms AC * \since Changed in 4.1.0. Provide ability to query credential for VOMS * nickname attribute. */ std::string getCredentialProperty(const Arc::Credential& u, const std::string& property, const std::string& ca_cert_dir = std::string(""), const std::string& ca_cert_file = std::string(""), const std::string& vomsdir = std::string(""), const std::vector& voms_trust_list = std::vector()); std::string VOMSFQANToFull(const std::string& vo, const std::string& fqan); std::string VOMSFQANFromFull(const std::string& attribute); /**Encode the VOMS AC list into ASN1, so that the result can be used * to insert into X509 as extension. * @param ac_seq The input string includes a list of AC with VOMS_AC_HEADER and VOMS_AC_TRAILER as separator * @param asn1 The encoded value as output */ bool VOMSACSeqEncode(const std::string& ac_seq, std::string& asn1); /**Encode the VOMS AC list into ASN1, so that the result can be used * to insert into X509 as extension. * @param acs The input list includes a list of AC * @param asn1 The encoded value as output */ bool VOMSACSeqEncode(const std::list acs, std::string& asn1); /** @} */ }// namespace Arc #endif /* __ARC_VOMSUTIL_H__ */ nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/CertUtil.h0000644000000000000000000000012413065017103023467 xustar000000000000000027 mtime=1490296387.698578 27 atime=1513200574.612703 30 ctime=1513200659.126736805 nordugrid-arc-5.4.2/src/hed/libs/credential/CertUtil.h0000644000175000002070000000651413065017103023542 0ustar00mockbuildmock00000000000000#ifndef __ARC_CERTUTIL_H__ #define __ARC_CERTUTIL_H__ #include #include #include #include #include namespace ArcCredential { /// Certificate Types /** \ingroup credential */ typedef enum { /** A end entity certificate */ CERT_TYPE_EEC, /** A CA certificate */ CERT_TYPE_CA, /** A X.509 Proxy Certificate Profile (pre-RFC) compliant impersonation proxy - obsolete */ CERT_TYPE_GSI_3_IMPERSONATION_PROXY, /** A X.509 Proxy Certificate Profile (pre-RFC) compliant independent proxy - obsolete */ CERT_TYPE_GSI_3_INDEPENDENT_PROXY, /** A X.509 Proxy Certificate Profile (pre-RFC) compliant limited proxy - obsolete */ CERT_TYPE_GSI_3_LIMITED_PROXY, /** A X.509 Proxy Certificate Profile (pre-RFC) compliant restricted proxy - obsolete */ CERT_TYPE_GSI_3_RESTRICTED_PROXY, /** A legacy Globus impersonation proxy - obsolete */ CERT_TYPE_GSI_2_PROXY, /** A legacy Globus limited impersonation proxy - obsolete */ CERT_TYPE_GSI_2_LIMITED_PROXY, /** A X.509 Proxy Certificate Profile RFC compliant impersonation proxy; RFC inheritAll proxy */ CERT_TYPE_RFC_IMPERSONATION_PROXY, /** A X.509 Proxy Certificate Profile RFC compliant independent proxy; RFC independent proxy */ CERT_TYPE_RFC_INDEPENDENT_PROXY, /** A X.509 Proxy Certificate Profile RFC compliant limited proxy */ CERT_TYPE_RFC_LIMITED_PROXY, /** A X.509 Proxy Certificate Profile RFC compliant restricted proxy */ CERT_TYPE_RFC_RESTRICTED_PROXY, /** RFC anyLanguage proxy */ CERT_TYPE_RFC_ANYLANGUAGE_PROXY } certType; /** True if certificate type is one of proxy certificates */ #define CERT_IS_PROXY(cert_type) \ (cert_type == CERT_TYPE_RFC_IMPERSONATION_PROXY || \ cert_type == CERT_TYPE_RFC_INDEPENDENT_PROXY || \ cert_type == CERT_TYPE_RFC_LIMITED_PROXY || \ cert_type == CERT_TYPE_RFC_RESTRICTED_PROXY || \ cert_type == CERT_TYPE_RFC_ANYLANGUAGE_PROXY) /** True if certificate type is one of standard proxy certificates */ #define CERT_IS_RFC_PROXY(cert_type) \ (cert_type == CERT_TYPE_RFC_IMPERSONATION_PROXY || \ cert_type == CERT_TYPE_RFC_INDEPENDENT_PROXY || \ cert_type == CERT_TYPE_RFC_LIMITED_PROXY || \ cert_type == CERT_TYPE_RFC_RESTRICTED_PROXY || \ cert_type == CERT_TYPE_RFC_ANYLANGUAGE_PROXY) #define CERT_IS_INDEPENDENT_PROXY(cert_type) \ (cert_type == CERT_TYPE_RFC_INDEPENDENT_PROXY) #define CERT_IS_RESTRICTED_PROXY(cert_type) \ (cert_type == CERT_TYPE_RFC_RESTRICTED_PROXY) #define CERT_IS_LIMITED_PROXY(cert_type) \ (cert_type == CERT_TYPE_RFC_LIMITED_PROXY) #define CERT_IS_IMPERSONATION_PROXY(cert_type) \ (cert_type == CERT_TYPE_RFC_IMPERSONATION_PROXY || \ cert_type == CERT_TYPE_RFC_LIMITED_PROXY) int verify_cert_chain(X509* cert, STACK_OF(X509)** certchain, std::string const& ca_file, std::string const& ca_dir, std::string& proxy_policy); int collect_cert_chain(X509* cert, STACK_OF(X509)** certchain, std::string& proxy_policy); bool check_cert_type(X509* cert, certType& type); const char* certTypeToString(certType type); } #endif // __ARC_CERTUTIL_H__ nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/VOMSUtil.cpp0000644000000000000000000000012413153455044023721 xustar000000000000000027 mtime=1504598564.314262 27 atime=1513200574.624703 30 ctime=1513200659.140736977 nordugrid-arc-5.4.2/src/hed/libs/credential/VOMSUtil.cpp0000644000175000002070000022250013153455044023767 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #ifndef HAVE_GETDOMAINNAME #include #endif #include #include #include #include #include #include #include #include #include "listfunc.h" #define idpkixOID "1.3.6.1.5.5.7" // #define idpkcs9OID "1.2.840.113549.1.9" // #define idpeOID idpkixOID ".1" #define idceOID "2.5.29" // #define idacaOID idpkixOID ".10" // #define idatOID "2.5.4" #define idceauthKeyIdentifierOID idceOID ".35" #define idcenoRevAvailOID idceOID ".56" #define idceTargetsOID idceOID ".55" #define vomsOID "1.3.6.1.4.1.8005.100.100.1" #define incfileOID "1.3.6.1.4.1.8005.100.100.2" #define voOID "1.3.6.1.4.1.8005.100.100.3" #define idatcapOID "1.3.6.1.4.1.8005.100.100.4" #define attributesOID "1.3.6.1.4.1.8005.100.100.11" #define acseqOID "1.3.6.1.4.1.8005.100.100.5" #define orderOID "1.3.6.1.4.1.8005.100.100.6" #define certseqOID "1.3.6.1.4.1.8005.100.100.10" // #define emailOID idpkcs9OID ".1" static std::string default_vomsdir = std::string(G_DIR_SEPARATOR_S) + "etc" + G_DIR_SEPARATOR_S +"grid-security" + G_DIR_SEPARATOR_S + "vomsdir"; #ifdef WIN32 int gethostname_mingw (char *, size_t); int gethostname_mingw (char *name, size_t len) { DWORD dlen = (len <= (DWORD)~0 ? len : (DWORD)~0); return (GetComputerNameExA(ComputerNameDnsHostname, name, &dlen) ? 0 : -1); } #define gethostname gethostname_mingw int getdomainname_mingw (char *, size_t); int getdomainname_mingw (char *name, size_t len) { DWORD dlen = (len <= (DWORD)~0 ? len : (DWORD)~0); return (GetComputerNameExA(ComputerNameDnsDomain, name, &dlen) ? 0 : -1); } #define getdomainname getdomainname_mingw #endif using namespace ArcCredential; namespace Arc { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static void X509_get0_uids(const X509 *x, const ASN1_BIT_STRING **piuid, const ASN1_BIT_STRING **psuid) { if (x) { if (piuid != NULL) { if (x->cert_info) { *piuid = x->cert_info->issuerUID; } } if (psuid != NULL) { if (x->cert_info) { *psuid = x->cert_info->subjectUID; } } } } static const X509_ALGOR *X509_get0_tbs_sigalg(const X509 *x) { if(!x) return NULL; if(!(x->cert_info)) return NULL; return x->cert_info->signature; } #endif #if (OPENSSL_VERSION_NUMBER < 0x10002000L) static void X509_get0_signature(ASN1_BIT_STRING **psig, X509_ALGOR **palg, const X509 *x) { if (psig) *psig = x->signature; if (palg) *palg = x->sig_alg; } #endif void VOMSTrustList::AddElement(const std::vector& encoded_list) { VOMSTrustChain chain; for(std::vector::const_iterator i = encoded_list.begin(); i != encoded_list.end(); ++i) { if((*i).find("NEXT CHAIN") != std::string::npos) { if(chain.size() > 0) { if(chain.size() > 1) { // More than one item in chain means DN list AddChain(chain); } else { // Trying to find special symbols if((chain[0].find('^') != std::string::npos) || (chain[0].find('$') != std::string::npos) || (chain[0].find('*') != std::string::npos)) { AddRegex(chain[0]); } else { AddChain(chain); }; } chain.clear(); } continue; } chain.push_back(*i); } if(chain.size() > 0) { if(chain.size() > 1) { // More than one item in chain means DN list AddChain(chain); } else { // Trying to find special symbols if((chain[0].find('^') != std::string::npos) || (chain[0].find('$') != std::string::npos) || (chain[0].find('*') != std::string::npos)) { AddRegex(chain[0]); } else { AddChain(chain); }; } chain.clear(); } } VOMSTrustList::VOMSTrustList(const std::vector& encoded_list) { AddElement(encoded_list); } VOMSTrustList::VOMSTrustList(const std::vector& chains, const std::vector& regexs):chains_(chains) { for(std::vector::const_iterator r = regexs.begin(); r != regexs.end();++r) { AddRegex(*r); } } VOMSTrustList::~VOMSTrustList(void) { for(std::vector::iterator r = regexs_.begin(); r != regexs_.end();++r) { if(*r) delete *r; *r=NULL; } } VOMSTrustChain& VOMSTrustList::AddChain(void) { VOMSTrustChain chain; return *chains_.insert(chains_.end(),chain); } VOMSTrustChain& VOMSTrustList::AddChain(const VOMSTrustChain& chain) { return *chains_.insert(chains_.end(),chain); } RegularExpression& VOMSTrustList::AddRegex(const VOMSTrustRegex& reg) { RegularExpression* r = new RegularExpression(reg); regexs_.insert(regexs_.end(),r); return *r; } void InitVOMSAttribute(void) { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) #define OBJCREATE(c,n) { \ (void)OBJ_create(c,n,#c); \ } #else #define OBJCREATE(c,n) { \ if(OBJ_create(c,n,#c) == 0) { \ unsigned long __err = ERR_get_error(); \ if(ERR_GET_REASON(__err) != OBJ_R_OID_EXISTS) { \ CredentialLogger.msg(ERROR, \ "Failed to create OpenSSL object %s %s - %u %s", \ c, n, ERR_GET_REASON(__err), ERR_error_string(__err,NULL)); \ return; \ }; \ }; \ } #endif #define OBJSETNID(v,n) { v = OBJ_txt2nid(n); if(v == NID_undef) CredentialLogger.msg(ERROR, "Failed to obtain OpenSSL identifier for %s", n); } X509V3_EXT_METHOD *vomsattribute_x509v3_ext_meth; OpenSSLInit(); static Glib::Mutex lock_; static bool done = false; Glib::Mutex::Lock lock(lock_); if (done) return; /* VOMS Attribute related objects*/ //OBJ_create(email, "Email", "Email"); OBJCREATE(idatcapOID,"idatcap"); OBJCREATE(attributesOID,"attributes"); OBJCREATE(idcenoRevAvailOID, "idcenoRevAvail"); OBJCREATE(idceauthKeyIdentifierOID, "idceauthKeyIdentifier"); OBJCREATE(idceTargetsOID, "idceTargets"); OBJCREATE(acseqOID, "acseq"); OBJCREATE(orderOID, "order"); OBJCREATE(vomsOID, "voms"); OBJCREATE(incfileOID, "incfile"); OBJCREATE(voOID, "vo"); OBJCREATE(certseqOID, "certseq"); vomsattribute_x509v3_ext_meth = VOMSAttribute_auth_x509v3_ext_meth(); if (vomsattribute_x509v3_ext_meth) { OBJSETNID(vomsattribute_x509v3_ext_meth->ext_nid, idceauthKeyIdentifierOID); X509V3_EXT_add(vomsattribute_x509v3_ext_meth); } vomsattribute_x509v3_ext_meth = VOMSAttribute_avail_x509v3_ext_meth(); if (vomsattribute_x509v3_ext_meth) { OBJSETNID(vomsattribute_x509v3_ext_meth->ext_nid, idcenoRevAvailOID); X509V3_EXT_add(vomsattribute_x509v3_ext_meth); } vomsattribute_x509v3_ext_meth = VOMSAttribute_targets_x509v3_ext_meth(); if (vomsattribute_x509v3_ext_meth) { OBJSETNID(vomsattribute_x509v3_ext_meth->ext_nid, idceTargetsOID); X509V3_EXT_add(vomsattribute_x509v3_ext_meth); } vomsattribute_x509v3_ext_meth = VOMSAttribute_acseq_x509v3_ext_meth(); if (vomsattribute_x509v3_ext_meth) { OBJSETNID(vomsattribute_x509v3_ext_meth->ext_nid, acseqOID); X509V3_EXT_add(vomsattribute_x509v3_ext_meth); } vomsattribute_x509v3_ext_meth = VOMSAttribute_certseq_x509v3_ext_meth(); if (vomsattribute_x509v3_ext_meth) { OBJSETNID(vomsattribute_x509v3_ext_meth->ext_nid, certseqOID); X509V3_EXT_add(vomsattribute_x509v3_ext_meth); } vomsattribute_x509v3_ext_meth = VOMSAttribute_attribs_x509v3_ext_meth(); if (vomsattribute_x509v3_ext_meth) { OBJSETNID(vomsattribute_x509v3_ext_meth->ext_nid, attributesOID); X509V3_EXT_add(vomsattribute_x509v3_ext_meth); } if(!PersistentLibraryInit("modcredential")) { CredentialLogger.msg(WARNING, "Failed to lock arccredential library in memory"); }; done=true; } static int createVOMSAC(X509 *issuer, STACK_OF(X509) *issuerstack, X509 *holder, EVP_PKEY *pkey, BIGNUM *serialnum, std::vector &fqan, std::vector &targets, std::vector& attrs, AC *ac, std::string voname, std::string uri, int lifetime) { #define ERROR(e) do { err = (e); goto err; } while (0) AC *a = NULL; AC_ATTR *capabilities = NULL; AC_IETFATTR *capnames = NULL; ASN1_OBJECT *cobj = NULL; X509_NAME *subname = NULL; X509_NAME *issname = NULL; ASN1_INTEGER *serial = NULL; ASN1_INTEGER *holdserial = NULL; ASN1_INTEGER *version = NULL; ASN1_BIT_STRING *uid = NULL; AC_FULL_ATTRIBUTES *ac_full_attrs = NULL; X509_ALGOR *alg1 = NULL; X509_ALGOR *alg2 = NULL; ASN1_GENERALIZEDTIME *time1 = NULL; ASN1_GENERALIZEDTIME *time2 = NULL; X509_EXTENSION *norevavail = NULL; X509_EXTENSION *targetsext = NULL; X509_EXTENSION *auth = NULL; X509_EXTENSION *certstack = NULL; AC_ATT_HOLDER *ac_att_holder = NULL; int err = AC_ERR_UNKNOWN; time_t curtime; InitVOMSAttribute(); if (!issuer || !holder || !serialnum || fqan.empty() || !ac || !pkey) return AC_ERR_PARAMETERS; X509V3_CTX extctx; X509V3_set_ctx(&extctx, issuer, NULL, NULL, NULL, 0); a = ac; subname = X509_NAME_dup(X509_get_issuer_name(holder)); //old or new version? issname = X509_NAME_dup(X509_get_subject_name(issuer)); time(&curtime); time1 = ASN1_GENERALIZEDTIME_set(NULL, curtime); time2 = ASN1_GENERALIZEDTIME_set(NULL, curtime+lifetime); capabilities = AC_ATTR_new(); capnames = AC_IETFATTR_new(); cobj = OBJ_txt2obj(idatcapOID,0); holdserial = ASN1_INTEGER_dup(X509_get_serialNumber(holder)); serial = BN_to_ASN1_INTEGER(serialnum, NULL); version = BN_to_ASN1_INTEGER((BIGNUM *)(BN_value_one()), NULL); ac_full_attrs = AC_FULL_ATTRIBUTES_new(); ac_att_holder = AC_ATT_HOLDER_new(); std::string buffer, complete; if (!subname || !issuer || !holdserial || !serial || !capabilities || !cobj || !capnames || !time1 || !time2 || !ac_full_attrs || !ac_att_holder) ERROR(AC_ERR_MEMORY); for (std::vector::iterator i = targets.begin(); i != targets.end(); i++) { if (i == targets.begin()) complete = (*i); else complete.append(",").append(*i); } // prepare AC_IETFATTR for (std::vector::iterator i = fqan.begin(); i != fqan.end(); i++) { AC_IETFATTRVAL *tmpc = AC_IETFATTRVAL_new(); if (!tmpc) { ERROR(AC_ERR_MEMORY); } tmpc->value.octet_string = ASN1_OCTET_STRING_new(); if(!tmpc->value.octet_string) { AC_IETFATTRVAL_free(tmpc); ERROR(AC_ERR_MEMORY); } tmpc->type = V_ASN1_OCTET_STRING; CredentialLogger.msg(DEBUG,"VOMS: create FQAN: %s",*i); ASN1_OCTET_STRING_set(tmpc->value.octet_string, (const unsigned char*)((*i).c_str()), (*i).length()); if(capnames->values == NULL) capnames->values = sk_AC_IETFATTRVAL_new_null(); sk_AC_IETFATTRVAL_push(capnames->values, tmpc); } buffer.append(voname); buffer.append("://"); buffer.append(uri); { GENERAL_NAME *g = GENERAL_NAME_new(); { ASN1_IA5STRING *tmpr = ASN1_IA5STRING_new(); if (!tmpr || !g) { GENERAL_NAME_free(g); ASN1_IA5STRING_free(tmpr); ERROR(AC_ERR_MEMORY); } ASN1_STRING_set(tmpr, buffer.c_str(), buffer.size()); g->type = GEN_URI; g->d.ia5 = tmpr; } if(capnames->names == NULL) capnames->names = sk_GENERAL_NAME_new_null(); sk_GENERAL_NAME_push(capnames->names, g); } // stuff the created AC_IETFATTR in ietfattr (values) and define its object if(capabilities->ietfattr == NULL) capabilities->ietfattr = sk_AC_IETFATTR_new_null(); sk_AC_IETFATTR_push(capabilities->ietfattr, capnames); capnames = NULL; ASN1_OBJECT_free(capabilities->type); capabilities->type = cobj; cobj = NULL; // prepare AC_FULL_ATTRIBUTES for (std::vector::iterator i = attrs.begin(); i != attrs.end(); i++) { std::string qual, name, value; CredentialLogger.msg(DEBUG,"VOMS: create attribute: %s",*i); AC_ATTRIBUTE *ac_attr = AC_ATTRIBUTE_new(); if (!ac_attr) { ERROR(AC_ERR_MEMORY); } //Accoding to the definition of voms, the attributes will be like "qualifier::name=value" or "::name=value" size_t pos =(*i).find_first_of("::"); if (pos != std::string::npos) { qual = (*i).substr(0, pos); pos += 2; } else { pos = 2; } size_t pos1 = (*i).find_first_of("="); if (pos1 == std::string::npos) { AC_ATTRIBUTE_free(ac_attr); ERROR(AC_ERR_PARAMETERS); } else { name = (*i).substr(pos, pos1 - pos); value = (*i).substr(pos1 + 1); } if (!qual.empty()) { ASN1_OCTET_STRING_set(ac_attr->qualifier, (const unsigned char*)(qual.c_str()), qual.length()); } else { ASN1_OCTET_STRING_set(ac_attr->qualifier, (const unsigned char*)(voname.c_str()), voname.length()); } ASN1_OCTET_STRING_set(ac_attr->name, (const unsigned char*)(name.c_str()), name.length()); ASN1_OCTET_STRING_set(ac_attr->value, (const unsigned char*)(value.c_str()), value.length()); if(ac_att_holder->attributes == NULL) ac_att_holder->attributes = sk_AC_ATTRIBUTE_new_null(); sk_AC_ATTRIBUTE_push(ac_att_holder->attributes, ac_attr); } if (attrs.empty()) { AC_ATT_HOLDER_free(ac_att_holder); ac_att_holder = NULL; } else { GENERAL_NAME *g = GENERAL_NAME_new(); ASN1_IA5STRING *tmpr = ASN1_IA5STRING_new(); if (!tmpr || !g) { GENERAL_NAME_free(g); ASN1_IA5STRING_free(tmpr); ERROR(AC_ERR_MEMORY); } std::string buffer(voname); buffer.append("://"); buffer.append(uri); ASN1_STRING_set(tmpr, buffer.c_str(), buffer.length()); g->type = GEN_URI; g->d.ia5 = tmpr; if(ac_att_holder->grantor == NULL) ac_att_holder->grantor = sk_GENERAL_NAME_new_null(); sk_GENERAL_NAME_push(ac_att_holder->grantor, g); if(ac_full_attrs->providers == NULL) ac_full_attrs->providers = sk_AC_ATT_HOLDER_new_null(); sk_AC_ATT_HOLDER_push(ac_full_attrs->providers, ac_att_holder); ac_att_holder = NULL; } // push both AC_ATTR into STACK_OF(AC_ATTR) if(a->acinfo->attrib == NULL) a->acinfo->attrib = sk_AC_ATTR_new_null(); sk_AC_ATTR_push(a->acinfo->attrib, capabilities); capabilities = NULL; if (ac_full_attrs) { X509_EXTENSION *ext = NULL; ext = X509V3_EXT_conf_nid(NULL, &extctx, OBJ_txt2nid(attributesOID), (char *)(ac_full_attrs->providers)); AC_FULL_ATTRIBUTES_free(ac_full_attrs); ac_full_attrs = NULL; if (!ext) ERROR(AC_ERR_NO_EXTENSION); if(a->acinfo->exts == NULL) a->acinfo->exts = sk_X509_EXTENSION_new_null(); sk_X509_EXTENSION_push(a->acinfo->exts, ext); } { STACK_OF(X509) *stk = sk_X509_new_null(); sk_X509_push(stk, X509_dup(issuer)); if (issuerstack) { for (int j =0; j < sk_X509_num(issuerstack); j++) { sk_X509_push(stk, X509_dup(sk_X509_value(issuerstack, j))); } } //for(int i=0; iacinfo->exts == NULL) { a->acinfo->exts = sk_X509_EXTENSION_new_null(); if(a->acinfo->exts == NULL) ERROR(AC_ERR_NO_EXTENSION); } if(sk_X509_EXTENSION_push(a->acinfo->exts, norevavail)) norevavail = NULL; if(sk_X509_EXTENSION_push(a->acinfo->exts, auth)) auth = NULL; if(certstack) { if(sk_X509_EXTENSION_push(a->acinfo->exts, certstack)) certstack = NULL; } if(targetsext) { if(sk_X509_EXTENSION_push(a->acinfo->exts, targetsext)) targetsext = NULL; } alg1 = (X509_ALGOR*)X509_get0_tbs_sigalg(issuer); if(alg1) alg1 = X509_ALGOR_dup(alg1); #if (OPENSSL_VERSION_NUMBER < 0x10100000L) X509_get0_signature(NULL, &alg2, issuer); #else X509_get0_signature(NULL, (X509_ALGOR const**)&alg2, issuer); #endif if(alg2) alg2 = X509_ALGOR_dup(alg2); { const ASN1_BIT_STRING* issuerUID = NULL; X509_get0_uids(issuer, &issuerUID, NULL); if (issuerUID) if (!(uid = ASN1_STRING_dup(issuerUID))) ERROR(AC_ERR_MEMORY); } if(a->acinfo->holder->baseid == NULL) a->acinfo->holder->baseid = AC_IS_new(); // optional if(a->acinfo->form == NULL) a->acinfo->form = AC_FORM_new(); // optional if(subname) { GENERAL_NAME *dirn1 = GENERAL_NAME_new(); dirn1->d.dirn = subname; dirn1->type = GEN_DIRNAME; if(a->acinfo->holder->baseid->issuer == NULL) a->acinfo->holder->baseid->issuer = sk_GENERAL_NAME_new_null(); sk_GENERAL_NAME_push(a->acinfo->holder->baseid->issuer, dirn1); dirn1 = NULL; } if(issname) { GENERAL_NAME *dirn2 = GENERAL_NAME_new(); dirn2->d.dirn = issname; dirn2->type = GEN_DIRNAME; if(a->acinfo->form->names == NULL) a->acinfo->form->names = sk_GENERAL_NAME_new_null(); sk_GENERAL_NAME_push(a->acinfo->form->names, dirn2); dirn2 = NULL; } if(holdserial) { if(a->acinfo->holder->baseid->serial) ASN1_INTEGER_free(a->acinfo->holder->baseid->serial); a->acinfo->holder->baseid->serial = holdserial; holdserial = NULL; } if(serial) { ASN1_INTEGER_free(a->acinfo->serial); a->acinfo->serial = serial; serial = NULL; } if(version) { ASN1_INTEGER_free(a->acinfo->version); a->acinfo->version = version; version = NULL; } if(time1) { ASN1_GENERALIZEDTIME_free(a->acinfo->validity->notBefore); a->acinfo->validity->notBefore = time1; time1 = NULL; } if(time2) { ASN1_GENERALIZEDTIME_free(a->acinfo->validity->notAfter); a->acinfo->validity->notAfter = time2; time2 = NULL; } if (uid) { ASN1_BIT_STRING_free(a->acinfo->id); a->acinfo->id = uid; uid = NULL; } if(alg1) { X509_ALGOR_free(a->acinfo->alg); a->acinfo->alg = alg1; alg1 = NULL; } if(alg2) { X509_ALGOR_free(a->sig_alg); a->sig_alg = alg2; alg2 = NULL; } ASN1_sign((int (*)(void*, unsigned char**))i2d_AC_INFO, a->acinfo->alg, a->sig_alg, a->signature, (char *)a->acinfo, pkey, EVP_md5()); return 0; err: X509_EXTENSION_free(auth); X509_EXTENSION_free(norevavail); X509_EXTENSION_free(targetsext); X509_EXTENSION_free(certstack); X509_NAME_free(subname); X509_NAME_free(issname); ASN1_INTEGER_free(holdserial); ASN1_INTEGER_free(serial); AC_ATTR_free(capabilities); ASN1_OBJECT_free(cobj); AC_IETFATTR_free(capnames); ASN1_UTCTIME_free(time1); ASN1_UTCTIME_free(time2); AC_ATT_HOLDER_free(ac_att_holder); AC_FULL_ATTRIBUTES_free(ac_full_attrs); X509_ALGOR_free(alg1); X509_ALGOR_free(alg2); ASN1_INTEGER_free(version); ASN1_BIT_STRING_free(uid); return err; } bool createVOMSAC(std::string &codedac, Credential &issuer_cred, Credential &holder_cred, std::vector &fqan, std::vector &targets, std::vector& attributes, std::string &voname, std::string &uri, int lifetime) { EVP_PKEY* issuerkey = NULL; X509* holder = NULL; X509* issuer = NULL; STACK_OF(X509)* issuerchain = NULL; issuer = issuer_cred.GetCert(); issuerchain = issuer_cred.GetCertChain(); issuerkey = issuer_cred.GetPrivKey(); holder = holder_cred.GetCert(); AC* ac = AC_new(); if(createVOMSAC(issuer, issuerchain, holder, issuerkey, (BIGNUM *)(BN_value_one()), fqan, targets, attributes, ac, voname, uri, lifetime)){ if(ac) AC_free(ac); if(issuer) X509_free(issuer); if(holder) X509_free(holder); if(issuerkey) EVP_PKEY_free(issuerkey); if(issuerchain) sk_X509_pop_free(issuerchain, X509_free); return false; } unsigned int len = i2d_AC(ac, NULL); unsigned char *tmp = (unsigned char *)OPENSSL_malloc(len); if (tmp) { unsigned char *ttmp = tmp; i2d_AC(ac, &ttmp); //codedac = std::string((char *)tmp, len); codedac.append((const char*)tmp, len); } OPENSSL_free(tmp); if(ac) AC_free(ac); if(issuer) X509_free(issuer); if(holder) X509_free(holder); if(issuerkey) EVP_PKEY_free(issuerkey); if(issuerchain) sk_X509_pop_free(issuerchain, X509_free); return true; } bool addVOMSAC(AC** &aclist, std::string &acorder, std::string &codedac) { BIGNUM* dataorder = NULL; InitVOMSAttribute(); if(codedac.empty()) return true; int l = codedac.size(); unsigned char* pp = (unsigned char *)malloc(codedac.size()); if(!pp) { CredentialLogger.msg(ERROR,"VOMS: Can not allocate memory for parsing AC"); return false; } memcpy(pp, codedac.data(), l); dataorder = BN_new(); if (!dataorder) { free(pp); CredentialLogger.msg(ERROR,"VOMS: Can not allocate memory for storing the order of AC"); return false; } BN_one(dataorder); //Parse the AC, and insert it into an AC list unsigned char const* p = pp; AC* received_ac = NULL; if((received_ac = d2i_AC(NULL, &p, l))) { AC** actmplist = (AC **)listadd((char **)aclist, (char *)received_ac, sizeof(AC *)); if (actmplist) { aclist = actmplist; (void)BN_lshift1(dataorder, dataorder); (void)BN_set_bit(dataorder, 0); char *buffer = BN_bn2hex(dataorder); if(buffer) acorder = std::string(buffer); OPENSSL_free(buffer); free(pp); BN_free(dataorder); } else { listfree((char **)aclist, (freefn)AC_free); free(pp); BN_free(dataorder); return false; } } else { CredentialLogger.msg(ERROR,"VOMS: Can not parse AC"); free(pp); BN_free(dataorder); return false; } return true; } static int cb(int ok, X509_STORE_CTX *ctx) { if (!ok) { if (X509_STORE_CTX_get_error(ctx) == X509_V_ERR_CERT_HAS_EXPIRED) ok=1; /* since we are just checking the certificates, it is * ok if they are self signed. But we should still warn * the user. */ if (X509_STORE_CTX_get_error(ctx) == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) ok=1; /* Continue after extension errors too */ if (X509_STORE_CTX_get_error(ctx) == X509_V_ERR_INVALID_CA) ok=1; if (X509_STORE_CTX_get_error(ctx) == X509_V_ERR_PATH_LENGTH_EXCEEDED) ok=1; if (X509_STORE_CTX_get_error(ctx) == X509_V_ERR_CERT_CHAIN_TOO_LONG) ok=1; if (X509_STORE_CTX_get_error(ctx) == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) ok=1; } return(ok); } static bool checkCert(STACK_OF(X509) *stack, const std::string& ca_cert_dir, const std::string& ca_cert_file) { X509_STORE *ctx = NULL; X509_LOOKUP *lookup = NULL; int index = 0; if(ca_cert_dir.empty() && ca_cert_file.empty()) { CredentialLogger.msg(ERROR,"VOMS: CA directory or CA file must be provided"); return false; } ctx = X509_STORE_new(); if (ctx) { X509_STORE_set_verify_cb_func(ctx,cb); //#ifdef SIGPIPE // signal(SIGPIPE,SIG_IGN); //#endif // CRYPTO_malloc_init(); if (!(ca_cert_dir.empty()) && (lookup = X509_STORE_add_lookup(ctx,X509_LOOKUP_hash_dir()))) { X509_LOOKUP_add_dir(lookup, ca_cert_dir.c_str(), X509_FILETYPE_PEM); } if (!(ca_cert_file.empty()) && (lookup = X509_STORE_add_lookup(ctx, X509_LOOKUP_file()))) { X509_LOOKUP_load_file(lookup, ca_cert_file.c_str(), X509_FILETYPE_PEM); } //Check the AC issuer certificate's chain for (int i = sk_X509_num(stack)-1; i >=0; i--) { X509_STORE_CTX *csc = X509_STORE_CTX_new(); if (csc) { //Firstly, try to verify the certificate which is issues by CA; //Then try to verify the next one; the last one is the certificate //(voms server certificate) which issues AC. //Normally the voms server certificate is directly issued by a CA, //in this case, sk_X509_num(stack) should be 1. //On the other hand, if the voms server certificate is issued by a CA //which is issued by an parent CA, and so on, then the AC issuer should //put those CA certificates (except the root CA certificate which has //been configured to be trusted on the AC consumer side) together with //the voms server certificate itself in the 'certseq' part of AC. // //The CA certificates are checked one by one: the certificate which //is signed by root CA is checked firstly; the voms server certificate //is checked lastly. // if(X509_STORE_CTX_init(csc, ctx, sk_X509_value(stack, i), NULL)) { index = X509_verify_cert(csc); } X509_STORE_CTX_free(csc); if(!index) break; //If the 'i'th certificate is verified, then add it as trusted certificate, //then 'i'th certificate will be used as 'trusted certificate' to check //the 'i-1'th certificate X509_STORE_add_cert(ctx,sk_X509_value(stack, i)); } } } if (ctx) X509_STORE_free(ctx); return (index != 0); } static bool checkSigAC(X509* cert, AC* ac){ if (!cert || !ac) return false; EVP_PKEY *key = X509_extract_key(cert); if (!key) return false; int res = 0; res = ASN1_verify((int (*)(void*, unsigned char**))i2d_AC_INFO, ac->sig_alg, ac->signature, (char *)ac->acinfo, key); if (!res) CredentialLogger.msg(ERROR,"VOMS: failed to verify AC signature"); EVP_PKEY_free(key); return (res == 1); } #if 0 static bool regex_match(std::string& label, std::string& value) { bool match=false; RegularExpression regex(label); if(regex.isOk()){ std::list unmatched, matched; if(regex.match(value, unmatched, matched)) match=true; } return match; } #endif static std::string x509name2ascii(X509_NAME* name) { std::string str; if(name!=NULL) { char* buf = X509_NAME_oneline(name, NULL, 0); if(buf) { str.append(buf); OPENSSL_free(buf); } } return str; } static bool checkTrust(const VOMSTrustChain& chain,STACK_OF(X509)* certstack) { int n = 0; X509 *current = NULL; //A trusted chain is like following: // /O=Grid/O=NorduGrid/CN=host/arthur.hep.lu.se // /O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority // ----NEXT CHAIN---- if(chain.size()-1 > (sk_X509_num(certstack)+1)) return false; #if 0 for(;n < sk_X509_num(certstack);++n) { if(n >= chain.size()) return true; current = sk_X509_value(certstack,n); if(!current) return false; char* buf = X509_NAME_oneline(X509_get_subject_name(current),NULL,0); if(!buf) { return false; } if(chain[n] != buf) { OPENSSL_free(buf); return false; } OPENSSL_free(buf); } if(n < chain.size()) { if(!current) return false; char* buf = X509_NAME_oneline(X509_get_subject_name(current),NULL,0); if(!buf) { return false; } if(chain[n] != buf) { OPENSSL_free(buf); return false; } OPENSSL_free(buf); } #endif for(int i = 0; i< chain.size(); i++) CredentialLogger.msg(VERBOSE, "VOMS: trust chain to check: %s ", chain[i]); for(;n < sk_X509_num(certstack);++n) { if((n+1) >= chain.size()) return true; current = sk_X509_value(certstack,n); if(!current) return false; std::string sub_name = x509name2ascii(X509_get_subject_name(current)); if(chain[n] != sub_name) { CredentialLogger.msg(VERBOSE,"VOMS: the DN in certificate: %s does not match that in trusted DN list: %s", sub_name, chain[n]); return false; } std::string iss_name = x509name2ascii(X509_get_issuer_name(current)); if(chain[n+1] != iss_name) { CredentialLogger.msg(VERBOSE,"VOMS: the Issuer identity in certificate: %s does not match that in trusted DN list: %s", iss_name, chain[n+1]); return false; } } return true; } static bool checkTrust(const RegularExpression& reg,STACK_OF(X509)* certstack) { if(sk_X509_num(certstack) <= 0) return false; X509 *current = sk_X509_value(certstack,0); #if 0 std::string subject; char* buf = X509_NAME_oneline(X509_get_subject_name(current),NULL,0); if(buf) { subject.append(buf); OPENSSL_free(buf); } std::list unmatched, matched; return reg.match(subject,unmatched,matched); #endif std::string subject = x509name2ascii(X509_get_subject_name(current)); std::string issuer = x509name2ascii(X509_get_issuer_name(current)); std::list unmatched, matched; return (reg.match(subject,unmatched,matched) && reg.match(issuer,unmatched,matched)); } /* Get the DNs chain from relative *.lsc file. * The location of .lsc file is path: $vomsdir//.lsc */ static bool getLSC(const std::string& vomsdir, const std::string& voname, const std::string& hostname, std::vector& vomscert_trust_dn) { std::string lsc_loc = vomsdir + G_DIR_SEPARATOR_S + voname + G_DIR_SEPARATOR_S + hostname + ".lsc"; if (!Glib::file_test(lsc_loc, Glib::FILE_TEST_IS_REGULAR)) { CredentialLogger.msg(INFO, "VOMS: The lsc file %s does not exist", lsc_loc); return false; } std::string trustdn_str; std::ifstream in(lsc_loc.c_str(), std::ios::in); if (!in) { CredentialLogger.msg(ERROR, "VOMS: The lsc file %s can not be open", lsc_loc); return false; } std::getline(in, trustdn_str, 0); in.close(); tokenize(trustdn_str, vomscert_trust_dn, "\n"); return true; } static bool checkSignature(AC* ac, const std::string vomsdir, const std::string& voname, const std::string& hostname, const std::string& ca_cert_dir, const std::string& ca_cert_file, VOMSTrustList& vomscert_trust_dn, X509*& issuer_cert, unsigned int& status, bool verify) { bool res = true; X509* issuer = NULL; issuer_cert = NULL; int nid = OBJ_txt2nid(certseqOID); STACK_OF(X509_EXTENSION) *exts = ac->acinfo->exts; int pos = X509v3_get_ext_by_NID(exts, nid, -1); if (pos >= 0) { //Check if the DN/CA file is installed for a given VO. X509_EXTENSION* ext = sk_X509_EXTENSION_value(exts, pos); if(!ext) { // X509 parsing error status |= VOMSACInfo::X509ParsingFailed; return false; } AC_CERTS* certs = (AC_CERTS *)X509V3_EXT_d2i(ext); if(!certs) { // X509 parsing error status |= VOMSACInfo::X509ParsingFailed; return false; } //The relatively new version of VOMS server is supposed to //create AC which includes the certificate stack: //the certificate of voms server; the non-CA certificate/s //(if there are) that signs the voms server' certificate. STACK_OF(X509)* certstack = certs->stackcert; if(verify) { bool trust_success = false; bool lsc_check = false; if((vomscert_trust_dn.SizeChains()==0) && (vomscert_trust_dn.SizeRegexs()==0)) { std::vector voms_trustdn; if(!getLSC(vomsdir, voname, hostname, voms_trustdn)) { CredentialLogger.msg(WARNING,"VOMS: there is no constraints of trusted voms DNs, the certificates stack in AC will not be checked."); trust_success = true; status |= VOMSACInfo::TrustFailed; status |= VOMSACInfo::LSCFailed; } else { vomscert_trust_dn.AddElement(voms_trustdn); lsc_check = true; //lsc checking only happens if the VOMSTrustList argument is empty. } } //Check if the DN of those certificates in the certificate stack //corresponds to the trusted DN chain in the configuration if(certstack && !trust_success) { for(int n = 0;n < vomscert_trust_dn.SizeChains();++n) { const VOMSTrustChain& chain = vomscert_trust_dn.GetChain(n); if(checkTrust(chain,certstack)) { trust_success = true; break; } } if(!trust_success) for(int n = 0;n < vomscert_trust_dn.SizeRegexs();++n) { const RegularExpression& reg = vomscert_trust_dn.GetRegex(n); if(checkTrust(reg,certstack)) { trust_success = true; break; } } } if (!trust_success) { //AC_CERTS_free(certs); CredentialLogger.msg(ERROR,"VOMS: unable to match certificate chain against VOMS trusted DNs"); if(!lsc_check) status |= VOMSACInfo::TrustFailed; else status |= VOMSACInfo::LSCFailed; //return false; } } bool sig_valid = false; if(certstack) { //If the certificate stack does correspond to some of the trusted DN chain, //then check if the AC signature is valid by using the voms server //certificate (voms server certificate is supposed to be the first //in the certificate stack). issuer = X509_dup(sk_X509_value(certstack, 0)); } if(issuer) { if (checkSigAC(issuer, ac)) { sig_valid = true; } else { CredentialLogger.msg(ERROR,"VOMS: AC signature verification failed"); } } if(verify) { //Check if those certificate in the certificate stack are trusted. if (sig_valid) { // Note - sig_valid=true never happens with certstack=NULL if (!checkCert(certstack, ca_cert_dir, ca_cert_file)) { if(issuer) { X509_free(issuer); issuer = NULL; } CredentialLogger.msg(ERROR,"VOMS: unable to verify certificate chain"); status |= VOMSACInfo::CAUnknown; res = false; } } else { CredentialLogger.msg(ERROR,"VOMS: cannot validate AC issuer for VO %s",voname); status |= VOMSACInfo::ACParsingFailed; res = false; } } AC_CERTS_free(certs); } #if 0 //For those old-stype voms configuration, there is no //certificate stack in the AC. So there should be a local //directory which includes the voms server certificate. //It is not suppoted anymore. // check if able to find the signing certificate // among those specific for the vo or else in the vomsdir // directory if(issuer == NULL){ bool found = false; BIO * in = NULL; X509 * x = NULL; for(int i = 0; (i < 2 && !found); ++i) { std::string directory = vomsdir + (i ? "" : "/" + voname); CredentialLogger.msg(DEBUG,"VOMS: directory for trusted service certificates: %s",directory); Glib::Dir dir(directory); while(true){ std::string filename = dir.read_name(); if (!filename.empty()) { in = BIO_new(BIO_s_file()); if (in) { std::string temp = directory + "/" + filename; if (BIO_read_filename(in, temp.c_str()) > 0) { x = PEM_read_bio_X509(in, NULL, 0, NULL); if (x) { if (checkSigAC(x, ac)) { found = true; break; } else { X509_free(x); x = NULL; } } } BIO_free(in); in = NULL; } } else break; } } if (in) BIO_free(in); if (found) { if (!checkCert(x, ca_cert_dir, ca_cert_file)) { X509_free(x); x = NULL; } } else { CredentialLogger.msg(ERROR,"VOMS: Cannot find certificate of AC issuer for VO %s",voname); issuer = x; } #endif issuer_cert = issuer; return res; } static bool checkAttributes(STACK_OF(AC_ATTR) *atts, std::vector& attributes, unsigned int& status) { AC_ATTR *caps = NULL; STACK_OF(AC_IETFATTRVAL) *values = NULL; AC_IETFATTR *capattr = NULL; AC_IETFATTRVAL *capname = NULL; GENERAL_NAME *data = NULL; /* find AC_ATTR with IETFATTR type */ int nid = OBJ_txt2nid(idatcapOID); int pos = X509at_get_attr_by_NID((STACK_OF(X509_ATTRIBUTE)*)atts, nid, -1); if (!(pos >=0)) { CredentialLogger.msg(ERROR,"VOMS: Can not find AC_ATTR with IETFATTR type"); return false; } caps = sk_AC_ATTR_value(atts, pos); /* check there's exactly one IETFATTR attribute */ if (sk_AC_IETFATTR_num(caps->ietfattr) != 1) { CredentialLogger.msg(ERROR,"VOMS: case of multiple IETFATTR attributes not supported"); return false; } /* retrieve the only AC_IETFFATTR */ capattr = sk_AC_IETFATTR_value(caps->ietfattr, 0); values = capattr->values; /* check it has exactly one policyAuthority */ if (sk_GENERAL_NAME_num(capattr->names) != 1) { CredentialLogger.msg(ERROR,"VOMS: case of multiple policyAuthority not supported"); return false; } /* store policyAuthority */ data = sk_GENERAL_NAME_value(capattr->names, 0); if (data->type == GEN_URI) { std::string voname("/voname="); voname.append((const char*)(data->d.ia5->data), data->d.ia5->length); std::string::size_type pos = voname.find("://"); if(pos != std::string::npos) { voname.replace(pos,3,"/hostname="); } attributes.push_back(voname); } else { CredentialLogger.msg(ERROR,"VOMS: the format of policyAuthority is unsupported - expecting URI"); return false; } /* scan the stack of IETFATTRVAL to store attribute */ for (int i=0; itype != V_ASN1_OCTET_STRING) { CredentialLogger.msg(ERROR,"VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING"); return false; } std::string fqan((const char*)(capname->value.octet_string->data), capname->value.octet_string->length); // if the attribute is like: /knowarc.eu/Role=NULL/Capability=NULL // or /knowarc.eu/Role=tester/Capability=NULL // then remove the element with "=NULL" to be: // /knowarc.eu // /knowarc.eu/Role=tester std::string str = fqan; std::size_t pos = str.find("/Role=NULL"); if(pos != std::string::npos) str.erase(pos, 10); pos = str.find("/Capability=NULL"); if(pos != std::string::npos) str.erase(pos, 16); attributes.push_back(str); } return true; } #ifndef HAVE_GETDOMAINNAME static int getdomainname(char *name, int length) { char szBuffer[256]; long nBufSize = sizeof(szBuffer); char *pBuffer = szBuffer; long result_len = sysinfo( SI_SRPC_DOMAIN, pBuffer, nBufSize ); if (result_len > length) { return -1; } memcpy (name, pBuffer, result_len); if (result_len < length) name[result_len] = '\0'; return 0; } #endif static std::string getfqdn(void) { std::string name; char hostname[256]; char domainname[256]; if ((!gethostname(hostname, 255)) && (!getdomainname(domainname, 255))) { name.append(hostname); if(strcmp(domainname, "(none)")) { if (*domainname == '.') name.append(domainname); else { name.append(".").append(domainname); } } } return name; } static bool interpretAttributes(AC_FULL_ATTRIBUTES *full_attr, std::vector& attributes, unsigned int& status) { std::string name, value, qualifier, grantor, voname, uri; GENERAL_NAME *gn = NULL; STACK_OF(AC_ATT_HOLDER) *providers = NULL; int i; providers = full_attr->providers; for (i = 0; i < sk_AC_ATT_HOLDER_num(providers); i++) { AC_ATT_HOLDER *holder = sk_AC_ATT_HOLDER_value(providers, i); STACK_OF(AC_ATTRIBUTE) *atts = holder->attributes; gn = sk_GENERAL_NAME_value(holder->grantor, 0); grantor.assign((const char*)(gn->d.ia5->data), gn->d.ia5->length); if(grantor.empty()) { CredentialLogger.msg(ERROR,"VOMS: the grantor attribute is empty"); status |= VOMSACInfo::InternalParsingFailed; return false; } std::string::size_type pos = grantor.find("://"); if(pos == std::string::npos) { voname = grantor; uri="NULL"; } else { voname = grantor.substr(0,pos); uri = grantor.substr(pos+3); } for (int j = 0; j < sk_AC_ATTRIBUTE_num(atts); j++) { std::string attribute; AC_ATTRIBUTE *at = sk_AC_ATTRIBUTE_value(atts, j); name.assign((const char*)(at->name->data), at->name->length); if(name.empty()) { CredentialLogger.msg(ERROR,"VOMS: the attribute name is empty"); status |= VOMSACInfo::InternalParsingFailed; return false; } value.assign((const char*)(at->value->data), at->value->length); if(value.empty()) { CredentialLogger.msg(WARNING,"VOMS: the attribute value for %s is empty", name.c_str()); //return false; } qualifier.assign((const char*)(at->qualifier->data), at->qualifier->length); if(qualifier.empty()) { CredentialLogger.msg(ERROR,"VOMS: the attribute qualifier is empty"); status |= VOMSACInfo::InternalParsingFailed; return false; } //attribute.append("/grantor=").append(grantor).append("/").append(qualifier).append(":").append(name).append("=").append(value); std::string separator; if(qualifier.substr(0,1) != "/") separator = "/"; attribute.append("/voname=").append(voname). append("/hostname=").append(uri). append(separator).append(qualifier).append(":").append(name). append("=").append(value); attributes.push_back(attribute); } grantor.clear(); } return true; } static bool checkExtensions(STACK_OF(X509_EXTENSION) *exts, X509 *iss, std::vector& output, unsigned int& status) { int nid1 = OBJ_txt2nid(idcenoRevAvailOID); int nid2 = OBJ_txt2nid(idceauthKeyIdentifierOID); int nid3 = OBJ_txt2nid(idceTargetsOID); int nid5 = OBJ_txt2nid(attributesOID); int pos1 = X509v3_get_ext_by_NID(exts, nid1, -1); int pos2 = X509v3_get_ext_by_NID(exts, nid2, -1); int pos3 = X509v3_get_ext_by_critical(exts, 1, -1); int pos4 = X509v3_get_ext_by_NID(exts, nid3, -1); int pos5 = X509v3_get_ext_by_NID(exts, nid5, -1); /* noRevAvail, Authkeyid MUST be present */ if ((pos1 < 0) || (pos2 < 0)) { CredentialLogger.msg(ERROR,"VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions must be present"); status = VOMSACInfo::InternalParsingFailed; return false; } //Check if the target fqan matches idceTargets while (pos3 >=0) { X509_EXTENSION *ex; AC_TARGETS *targets; AC_TARGET *name; ex = sk_X509_EXTENSION_value(exts, pos3); if (pos3 == pos4) { //The only critical extension allowed is idceTargets, std::string fqdn = getfqdn(); int ok = 0; int i; ASN1_IA5STRING* fqdns = ASN1_IA5STRING_new(); if (fqdns) { ASN1_STRING_set(fqdns, fqdn.c_str(), fqdn.size()); targets = (AC_TARGETS *)X509V3_EXT_d2i(ex); if (targets) { for (i = 0; i < sk_AC_TARGET_num(targets->targets); i++) { name = sk_AC_TARGET_value(targets->targets, i); if (name->name && name->name->type == GEN_URI) { ok = !ASN1_STRING_cmp(name->name->d.ia5, fqdns); if (ok) break; } } AC_TARGETS_free(targets); } ASN1_STRING_free(fqdns); } if (!ok) { CredentialLogger.msg(WARNING,"VOMS: FQDN of this host %s does not match any target in AC", fqdn); // return false; ??? } } else { CredentialLogger.msg(ERROR,"VOMS: the only supported critical extension of the AC is idceTargets"); status = VOMSACInfo::InternalParsingFailed; return false; } pos3 = X509v3_get_ext_by_critical(exts, 1, pos3); } //Parse the attributes if (pos5 >= 0) { X509_EXTENSION *ex = NULL; AC_FULL_ATTRIBUTES *full_attr = NULL; ex = sk_X509_EXTENSION_value(exts, pos5); full_attr = (AC_FULL_ATTRIBUTES *)X509V3_EXT_d2i(ex); if (full_attr) { if (!interpretAttributes(full_attr, output, status)) { CredentialLogger.msg(ERROR,"VOMS: failed to parse attributes from AC"); AC_FULL_ATTRIBUTES_free(full_attr); status = VOMSACInfo::InternalParsingFailed; return false; } AC_FULL_ATTRIBUTES_free(full_attr); } } //Check the authorityKeyIdentifier if (pos2 >= 0) { X509_EXTENSION *ex; bool keyerr = false; AUTHORITY_KEYID *key; ex = sk_X509_EXTENSION_value(exts, pos2); key = (AUTHORITY_KEYID *)X509V3_EXT_d2i(ex); if (key) { if (iss) { if (key->keyid) { unsigned char hashed[20]; ASN1_BIT_STRING* pkeystr = X509_get0_pubkey_bitstr(iss); if (!SHA1(pkeystr->data, pkeystr->length, hashed)) keyerr = true; if ((memcmp(key->keyid->data, hashed, 20) != 0) && (key->keyid->length == 20)) keyerr = true; } else { if (!(key->issuer && key->serial)) keyerr = true; if (ASN1_INTEGER_cmp((key->serial), X509_get_serialNumber(iss))) keyerr = true; if (key->serial->type != GEN_DIRNAME) keyerr = true; if (X509_NAME_cmp(sk_GENERAL_NAME_value((key->issuer), 0)->d.dirn, X509_get_subject_name(iss))) keyerr = true; } } AUTHORITY_KEYID_free(key); } else { keyerr = true; } if(keyerr) { CredentialLogger.msg(ERROR,"VOMS: authorityKey is wrong"); status = VOMSACInfo::InternalParsingFailed; return false; } } return true; } static time_t ASN1_GENERALIZEDTIME_get(const ASN1_GENERALIZEDTIME* const s) { if ((s == NULL) || (s->data == NULL) || (s->length == 0)) return Arc::Time::UNDEFINED; std::string str((char const *)(s->data), s->length); Arc::Time t(str); return t.GetTime(); } static bool checkACInfo(X509* cert, X509* issuer, AC* ac, std::vector& output, std::string& ac_holder_name, std::string& ac_issuer_name, Time& valid_from, Time& valid_till, unsigned int& status) { bool res = true; if(!ac || !cert || !(ac->acinfo) || !(ac->acinfo->version) || !(ac->acinfo->holder) || (ac->acinfo->holder->digest) || !(ac->acinfo->form) || !(ac->acinfo->form->names) || (ac->acinfo->form->is) || (ac->acinfo->form->digest) || !(ac->acinfo->serial) || !(ac->acinfo->alg) || !(ac->acinfo->validity) || !(ac->acinfo->validity->notBefore) || !(ac->acinfo->validity->notAfter) || !(ac->acinfo->attrib) || !(ac->sig_alg) || !(ac->signature)) { CredentialLogger.msg(ERROR,"VOMS: missing AC parts"); status |= VOMSACInfo::ACParsingFailed; return false; } //Check the validity time ASN1_GENERALIZEDTIME *start; ASN1_GENERALIZEDTIME *end; start = ac->acinfo->validity->notBefore; end = ac->acinfo->validity->notAfter; time_t ctime, dtime; time (&ctime); ctime += 300; dtime = ctime-600; if ((start->type != V_ASN1_GENERALIZEDTIME) || (end->type != V_ASN1_GENERALIZEDTIME)) { CredentialLogger.msg(ERROR,"VOMS: unsupported time format format in AC - expecting GENERALIZED TIME"); status |= VOMSACInfo::ACParsingFailed; return false; // ? } if ((X509_cmp_current_time(start) >= 0) && (X509_cmp_time(start, &ctime) >= 0)) { CredentialLogger.msg(ERROR,"VOMS: AC is not yet valid"); status |= VOMSACInfo::TimeValidFailed; res = false; //return false; } if ((X509_cmp_current_time(end) <= 0) && (X509_cmp_time(end, &dtime) <= 0)) { CredentialLogger.msg(ERROR,"VOMS: AC has expired"); status |= VOMSACInfo::TimeValidFailed; res = false; //return false; } valid_from = Time(ASN1_GENERALIZEDTIME_get(start)); valid_till = Time(ASN1_GENERALIZEDTIME_get(end)); STACK_OF(GENERAL_NAME) *names; GENERAL_NAME *name = NULL; if (ac->acinfo->holder->baseid) { if(!(ac->acinfo->holder->baseid->serial) || !(ac->acinfo->holder->baseid->issuer)) { CredentialLogger.msg(ERROR,"VOMS: AC is not complete - missing Serial or Issuer information"); status |= VOMSACInfo::ACParsingFailed; return false; } CredentialLogger.msg(DEBUG,"VOMS: the holder serial number is: %lx", ASN1_INTEGER_get(X509_get_serialNumber(cert))); CredentialLogger.msg(DEBUG,"VOMS: the serial number in AC is: %lx", ASN1_INTEGER_get(ac->acinfo->holder->baseid->serial)); if (ASN1_INTEGER_cmp(ac->acinfo->holder->baseid->serial, X509_get_serialNumber(cert))) { CredentialLogger.msg(VERBOSE,"VOMS: the holder serial number %lx is not the same as the serial number in AC %lx, the holder certificate that is used to create a voms proxy could be a proxy certificate with a different serial number as the original EEC cert", ASN1_INTEGER_get(X509_get_serialNumber(cert)), ASN1_INTEGER_get(ac->acinfo->holder->baseid->serial)); // return false; } names = ac->acinfo->holder->baseid->issuer; if ((sk_GENERAL_NAME_num(names) != 1) || !(name = sk_GENERAL_NAME_value(names,0)) || (name->type != GEN_DIRNAME)) { CredentialLogger.msg(ERROR,"VOMS: the holder information in AC is wrong"); status |= VOMSACInfo::ACParsingFailed; return false; } char *ac_holder_name_chars = X509_NAME_oneline(name->d.dirn,NULL,0); if(ac_holder_name_chars) { ac_holder_name = ac_holder_name_chars; OPENSSL_free(ac_holder_name_chars); } std::string holder_name; char *holder_name_chars = X509_NAME_oneline(X509_get_subject_name(cert),NULL,0); if(holder_name_chars) { holder_name = holder_name_chars; OPENSSL_free(holder_name_chars); } std::string holder_issuer_name; char *holder_issuer_name_chars = X509_NAME_oneline(X509_get_issuer_name(cert),NULL,0); if(holder_issuer_name_chars) { holder_issuer_name = holder_issuer_name_chars; OPENSSL_free(holder_issuer_name_chars); } CredentialLogger.msg(DEBUG,"VOMS: DN of holder in AC: %s",ac_holder_name.c_str()); CredentialLogger.msg(DEBUG,"VOMS: DN of holder: %s",holder_name.c_str()); CredentialLogger.msg(DEBUG,"VOMS: DN of issuer: %s",holder_issuer_name.c_str()); if ((ac_holder_name != holder_name) && (ac_holder_name != holder_issuer_name)) { std::size_t found1, found2; found1 = holder_name.find(ac_holder_name); found2 = holder_issuer_name.find(ac_holder_name); if((found1 == std::string::npos) && (found2 == std::string::npos)) { CredentialLogger.msg(ERROR,"VOMS: the holder name in AC is not related to the distinguished name in holder certificate"); status |= VOMSACInfo::ACParsingFailed; return false; } } const ASN1_BIT_STRING* issuerUID = NULL; X509_get0_uids(cert, &issuerUID, NULL); if ((ac->acinfo->holder->baseid->uid && issuerUID) || (!issuerUID && !ac->acinfo->holder->baseid->uid)) { if (ac->acinfo->holder->baseid->uid) { if (ASN1_STRING_cmp(ac->acinfo->holder->baseid->uid, issuerUID)) { CredentialLogger.msg(ERROR,"VOMS: the holder issuerUID is not the same as that in AC"); status |= VOMSACInfo::ACParsingFailed; return false; } } } else { CredentialLogger.msg(ERROR,"VOMS: the holder issuerUID is not the same as that in AC"); status |= VOMSACInfo::ACParsingFailed; return false; } } else if (ac->acinfo->holder->name) { names = ac->acinfo->holder->name; if ((sk_GENERAL_NAME_num(names) == 1) || //??? ((name = sk_GENERAL_NAME_value(names,0))) || (name->type != GEN_DIRNAME)) { if (X509_NAME_cmp(name->d.dirn, X509_get_issuer_name(cert))) { // CHECK ALT_NAMES // in VOMS ACs, checking into alt names is assumed to always fail. CredentialLogger.msg(ERROR,"VOMS: the holder issuer name is not the same as that in AC"); status |= VOMSACInfo::ACParsingFailed; return false; } } } names = ac->acinfo->form->names; if ((sk_GENERAL_NAME_num(names) != 1) || !(name = sk_GENERAL_NAME_value(names,0)) || (name->type != GEN_DIRNAME)) { CredentialLogger.msg(ERROR,"VOMS: the issuer information in AC is wrong"); status |= VOMSACInfo::ACParsingFailed; return false; } ac_issuer_name = x509name2ascii(name->d.dirn); if(issuer) { if (X509_NAME_cmp(name->d.dirn, X509_get_subject_name(issuer))) { std::string issuer_name = x509name2ascii(X509_get_subject_name(issuer)); CredentialLogger.msg(ERROR,"VOMS: the issuer name %s is not the same as that in AC - %s", issuer_name, ac_issuer_name); status |= VOMSACInfo::ACParsingFailed; return false; } } if (ac->acinfo->serial->length > 20) { CredentialLogger.msg(ERROR,"VOMS: the serial number of AC INFO is too long - expecting no more than 20 octets"); status |= VOMSACInfo::InternalParsingFailed; return false; } //Check AC's extension if(!checkExtensions(ac->acinfo->exts, issuer, output, status)) res = false; //Check AC's attribute if(!checkAttributes(ac->acinfo->attrib, output, status)) res = false; // ?? return res; } // Returns false if any error happened. // Also always fills status with information about errors detected if any. static bool verifyVOMSAC(AC* ac, const std::string& ca_cert_dir, const std::string& ca_cert_file, const std::string vomsdir, VOMSTrustList& vomscert_trust_dn, X509* holder, std::vector& attr_output, std::string& vo_name, std::string& ac_holder_name, std::string& ac_issuer_name, Time& from, Time& till, unsigned int& status, bool verify) { bool res = true; //Extract name STACK_OF(AC_ATTR) * atts = ac->acinfo->attrib; int nid = 0; int pos = 0; nid = OBJ_txt2nid(idatcapOID); pos = X509at_get_attr_by_NID((STACK_OF(X509_ATTRIBUTE)*)atts, nid, -1); if(!(pos >=0)) { CredentialLogger.msg(ERROR,"VOMS: unable to extract VO name from AC"); status |= VOMSACInfo::ACParsingFailed; // ? return false; } AC_ATTR * caps = sk_AC_ATTR_value(atts, pos); if(!caps) { // Must not happen. X509 parsing error CredentialLogger.msg(ERROR,"VOMS: unable to extract VO name from AC"); status |= VOMSACInfo::ACParsingFailed; // ? return false; } AC_IETFATTR * capattr = sk_AC_IETFATTR_value(caps->ietfattr, 0); if(!capattr) { // Must not happen. AC parsing error CredentialLogger.msg(ERROR,"VOMS: unable to extract VO name from AC"); status |= VOMSACInfo::ACParsingFailed; // ? return false; } GENERAL_NAME * name = sk_GENERAL_NAME_value(capattr->names, 0); if(!name) { // Must not happen. AC parsing error CredentialLogger.msg(ERROR,"VOMS: unable to extract VO name from AC"); status |= VOMSACInfo::ACParsingFailed; // ? return false; } std::string voname((const char *)name->d.ia5->data, 0, name->d.ia5->length); std::string::size_type cpos = voname.find("://"); std::string hostname; if (cpos != std::string::npos) { std::string::size_type cpos2 = voname.find(":", cpos+1); if (cpos2 != std::string::npos) hostname = voname.substr(cpos+3, (cpos2 - cpos - 3)); else { // Must not happen. VOMS parsing error CredentialLogger.msg(ERROR,"VOMS: unable to determine hostname of AC from VO name: %s",voname); status |= VOMSACInfo::InternalParsingFailed; // ? return false; } voname = voname.substr(0, cpos); vo_name = voname; } else { // Must not happen. VOMS parsing error CredentialLogger.msg(ERROR,"VOMS: unable to extract VO name from AC"); status |= VOMSACInfo::InternalParsingFailed; // ? return false; } X509* issuer = NULL; if(!checkSignature(ac, vomsdir, voname, hostname, ca_cert_dir, ca_cert_file, vomscert_trust_dn, issuer, status, verify)) { CredentialLogger.msg(ERROR,"VOMS: can not verify the signature of the AC"); res = false; } if(!checkACInfo(holder, issuer, ac, attr_output, ac_holder_name, ac_issuer_name, from, till, status)) { // Not printing anything because checkACInfo prints a lot of information itself CredentialLogger.msg(ERROR,"VOMS: problems while parsing information in AC"); res = false; } if(issuer) X509_free(issuer); return res; } bool parseVOMSAC(X509* holder, const std::string& ca_cert_dir, const std::string& ca_cert_file, const std::string& vomsdir, VOMSTrustList& vomscert_trust_dn, std::vector& output, bool verify, bool reportall) { InitVOMSAttribute(); //Search the extension int nid = 0; int position = 0; bool critical = false; X509_EXTENSION * ext; AC_SEQ* aclist = NULL; nid = OBJ_txt2nid(acseqOID); position = X509_get_ext_by_NID(holder, nid, -1); if(position >= 0) { ext = X509_get_ext(holder, position); if (ext){ if(X509_EXTENSION_get_critical(ext)) critical = true; aclist = (AC_SEQ *)X509V3_EXT_d2i(ext); } } if(aclist == NULL) { ERR_clear_error(); //while(ERR_get_error() != 0); //std::cerr<<"No AC in the proxy certificate"<acs); for (int i = 0; i < num; i++) { AC *ac = (AC *)sk_AC_value(aclist->acs, i); VOMSACInfo ac_info; bool r = verifyVOMSAC(ac, ca_cert_dir, ca_cert_file, vomsdir.empty()?default_vomsdir:vomsdir, vomscert_trust_dn, holder, ac_info.attributes, ac_info.voname, ac_info.holder, ac_info.issuer, ac_info.from, ac_info.till, ac_info.status, verify); if(!r) verified = false; if(r || reportall) { if(critical) ac_info.status |= VOMSACInfo::IsCritical; output.push_back(ac_info); } ERR_clear_error(); } if(aclist)AC_SEQ_free(aclist); return verified; } bool parseVOMSAC(const Credential& holder_cred, const std::string& ca_cert_dir, const std::string& ca_cert_file, const std::string& vomsdir, VOMSTrustList& vomscert_trust_dn, std::vector& output, bool verify, bool reportall) { X509* holder = holder_cred.GetCert(); if(!holder) return false; bool res = parseVOMSAC(holder, ca_cert_dir, ca_cert_file, vomsdir, vomscert_trust_dn, output, verify, reportall); //Also parse the voms attributes inside the certificates on //the upstream of the holder certificate; in this case, //multiple level of delegation exists, and user(or intermediate //actor such as grid manager) could hold a voms proxy and use this //proxy to create a more level of proxy STACK_OF(X509)* certchain = holder_cred.GetCertChain(); if(certchain != NULL) { for(int idx = 0;;++idx) { if(idx >= sk_X509_num(certchain)) break; // TODO: stop at actual certificate, do not go to CAs and sub-CAs X509* cert = sk_X509_value(certchain,sk_X509_num(certchain)-idx-1); bool res2 = parseVOMSAC(cert, ca_cert_dir, ca_cert_file, vomsdir, vomscert_trust_dn, output, verify, reportall); if (!res2) res = res2; }; } X509_free(holder); sk_X509_pop_free(certchain, X509_free); return res; } bool parseVOMSAC(const std::string& cert_str, const std::string& ca_cert_dir, const std::string& ca_cert_file, const std::string& vomsdir, VOMSTrustList& vomscert_trust_dn, std::vector& output, bool verify, bool reportall) { STACK_OF(X509)* cert_chain = NULL; cert_chain = sk_X509_new_null(); BIO* bio = BIO_new(BIO_s_mem()); BIO_write(bio, cert_str.c_str(), cert_str.size()); bool res = true; bool found = false; while(!BIO_eof(bio)) { X509* tmp = NULL; if(!(PEM_read_bio_X509(bio, &tmp, NULL, NULL))){ ERR_clear_error(); if(!found) res = false; break; } else { found = true; } if(!sk_X509_push(cert_chain, tmp)) { //std::string str(X509_NAME_oneline(X509_get_subject_name(tmp),0,0)); X509_free(tmp); } } for(int idx = 0;;++idx) { if(idx >= sk_X509_num(cert_chain)) break; X509* cert = sk_X509_value(cert_chain, idx); bool res2 = parseVOMSAC(cert, ca_cert_dir, ca_cert_file, vomsdir, vomscert_trust_dn, output, verify, reportall); if (!res2) res = res2; } sk_X509_pop_free(cert_chain, X509_free); BIO_free_all(bio); return res; } static char trans2[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 0, 0, 0, 0, 0, 0, 0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 62, 0, 63, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0}; static char *base64Encode(const char *data, int size, int *j) { BIO *in = NULL; BIO *b64 = NULL; int len = 0; char *buffer = NULL; in = BIO_new(BIO_s_mem()); b64 = BIO_new(BIO_f_base64()); if (!in || !b64) goto err; b64 = BIO_push(b64, in); BIO_write(b64, data, size); BIO_flush(b64); *j = len = BIO_pending(in); buffer = (char *)malloc(len); if (!buffer) goto err; if (BIO_read(in, buffer, len) != len) { free(buffer); buffer = NULL; goto err; } err: BIO_free(b64); BIO_free(in); return buffer; } static char *base64Decode(const char *data, int size, int *j) { BIO *b64 = NULL; BIO *in = NULL; char *buffer = (char *)malloc(size); if (!buffer) return NULL; memset(buffer, 0, size); b64 = BIO_new(BIO_f_base64()); in = BIO_new_mem_buf((void*)data, size); in = BIO_push(b64, in); *j = BIO_read(in, buffer, size); BIO_free_all(in); return buffer; } static char *MyDecode(const char *data, int size, int *n) { int bit = 0; int i = 0; char *res; if (!data || !size) return NULL; if ((res = (char *)calloc(1, (size*3)/4 + 2))) { *n = 0; while (i < size) { char c = trans2[(int)data[i]]; char c2 = (((i+1) < size) ? trans2[(int)data[i+1]] : 0); switch(bit) { case 0: res[*n] = ((c & 0x3f) << 2) | ((c2 & 0x30) >> 4); if ((i+1) < size) (*n)++; bit=4; i++; break; case 4: res[*n] = ((c & 0x0f) << 4) | ((c2 & 0x3c) >> 2); if ((i+1) < size) (*n)++; bit=2; i++; break; case 2: res[*n] = ((c & 0x03) << 6) | (c2 & 0x3f); if ((i+1) < size) (*n)++; i += 2; bit = 0; break; } } return res; } return NULL; } char *VOMSEncode(const char *data, int size, int *j) { // Only base64 encoding is used here return base64Encode(data, size, j); } char *VOMSDecode(const char *data, int size, int *j) { int i = 0; while (i < size) if (data[i++] == '\n') return base64Decode(data, size, j); return MyDecode(data, size, j); } std::string getCredentialProperty(const Arc::Credential& u, const std::string& property, const std::string& ca_cert_dir, const std::string& ca_cert_file, const std::string& vomsdir, const std::vector& voms_trust_list) { if (property == "dn"){ return u.GetIdentityName(); } // If it was not DN, then we have to deal with VOMS std::vector output; VOMSTrustList vomstrustlist(voms_trust_list); bool verify = false; if(vomstrustlist.SizeRegexs() || vomstrustlist.SizeChains())verify = true; parseVOMSAC(u,ca_cert_dir,vomsdir,ca_cert_file,vomstrustlist,output,verify); if (property == "voms:vo"){ if (output.empty()) { // if it's not possible to determine the VO -- such jobs will go into generic share return ""; } else { // Using first vo in list return output[0].voname; } } else if (property == "voms:role"){ size_t pos1, pos2; unsigned int i; unsigned int n; std::string vo_name; for (n=0;n elements; tokenize(fqan,elements,"/"); if(elements.empty()) return fqan; // No idea how to handle std::list::iterator element = elements.begin(); if(element->find('=') != std::string::npos) return fqan; // Already full // Insert Group= into every group part std::string fqan_ = "/Group="+(*element); for(++element;element!=elements.end();++element) { if(element->find('=') != std::string::npos) break; fqan_ += "/Group="+(*element); } for(;element!=elements.end();++element) { fqan_ += "/"+(*element); } if(!voname.empty()) fqan_ = std::string("/VO=")+voname+fqan_; return fqan_; } bool VOMSACSeqEncode(const std::string& ac_seq, std::string& asn1) { bool ret = false; X509_EXTENSION* ext = NULL; if(ac_seq.empty()) return false; ext = X509V3_EXT_conf_nid(NULL, NULL, OBJ_txt2nid(acseqOID), (char*)(ac_seq.c_str())); if(ext!=NULL) { asn1.clear(); asn1.assign((const char*)(X509_EXTENSION_get_data(ext)->data), X509_EXTENSION_get_data(ext)->length); ret = true; X509_EXTENSION_free(ext); } return ret; } bool VOMSACSeqEncode(const std::list acs, std::string& asn1) { std::string ac_seq; std::list::const_iterator it; for(it = acs.begin(); it != acs.end(); it++) { std::string ac = (*it); ac_seq.append(VOMS_AC_HEADER).append("\n"); ac_seq.append(ac).append("\n"); ac_seq.append(VOMS_AC_TRAILER).append("\n"); } return VOMSACSeqEncode(ac_seq, asn1); } // The attributes passed to this method are of "extended fqan" kind with every field // made of key=value pair. Also each attribute has /VO=voname prepended. // Special ARC attribute /voname=voname/hostname=hostname is used for assigning // server host name to VO. std::string VOMSFQANFromFull(const std::string& attribute) { std::list elements; Arc::tokenize(attribute, elements, "/"); // Handle first element which contains VO name std::list::iterator i = elements.begin(); if(i == elements.end()) return ""; // empty attribute? // Handle first element which contains VO name std::string fqan_voname; std::vector keyvalue; Arc::tokenize(*i, keyvalue, "="); if (keyvalue.size() != 2) return ""; // improper record if (keyvalue[0] == "voname") { // VO to hostname association // does not map into FQAN directly return ""; } else if(keyvalue[0] == "VO") { fqan_voname = keyvalue[1]; if(fqan_voname.empty()) { return ""; }; } else { // Skip unknown record return ""; } ++i; //voms_fqan_t fqan; std::string fqan_group; std::string fqan_role; std::string fqan_capability; for (; i != elements.end(); ++i) { std::vector keyvalue; Arc::tokenize(*i, keyvalue, "="); // /Group=mygroup/Role=myrole // Ignoring unrecognized records if (keyvalue.size() == 2) { if (keyvalue[0] == "Group") { fqan_group += "/"+keyvalue[1]; } else if (keyvalue[0] == "Role") { fqan_role = keyvalue[1]; } else if (keyvalue[0] == "Capability") { fqan_capability = keyvalue[1]; }; }; }; std::string fqan = fqan_group; if(!fqan_role.empty()) fqan += "/Role="+fqan_role; if(!fqan_capability.empty()) fqan += "/Capability="+fqan_capability; return fqan; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/README0000644000000000000000000000012412064074625022455 xustar000000000000000027 mtime=1355839893.892789 27 atime=1513200574.618703 30 ctime=1513200659.124736781 nordugrid-arc-5.4.2/src/hed/libs/credential/README0000644000175000002070000000166312064074625022530 0ustar00mockbuildmock00000000000000credential directory is supposed to include classes which will be used as convinient lib for credential handling. Credential class covers the functionality about general processing to certificate/key files, including cerficate/key parsing, information extracting (such as subject name, issuer name, lifetime, etc.), chain verifying, extension processing about proxy certinfo, extension processing about other general certificate extension (such as voms attributes certificate). It should be the extension-specific code itself (for voms, it is some code about writing and parsing voms-implementing Attibute Certificate/RFC3281, the voms-attibute is then be looked as a binary part and embeded into extension of X509 certificate/proxy certificate) to create, parse and verify the extension, not the Credential class. The VOMSUtil code is used to parse the voms AC extension. The Crendential class support PEM, DER PKCS12 credential. nordugrid-arc-5.4.2/src/hed/libs/credential/PaxHeaders.7502/VOMSConfig.cpp0000644000000000000000000000012412441404735024211 xustar000000000000000027 mtime=1418070493.334037 27 atime=1513200574.615703 30 ctime=1513200659.141736989 nordugrid-arc-5.4.2/src/hed/libs/credential/VOMSConfig.cpp0000644000175000002070000001464112441404735024264 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "Credential.h" #include "VOMSConfig.h" namespace Arc { VOMSConfigLine::operator bool(void) { return !name.empty(); } bool VOMSConfigLine::operator!(void) { return name.empty(); } const std::string& VOMSConfigLine::Name() const { return name; } const std::string& VOMSConfigLine::Host() const { return host; } const std::string& VOMSConfigLine::Port() const { return port; } const std::string& VOMSConfigLine::Subject() const { return subject; } const std::string& VOMSConfigLine::Alias() const { return alias; } std::string VOMSConfigLine::Str() const { return "\"" + name + "\" \"" + host + "\" \"" + port + "\" \"" + subject + "\" \"" + alias + "\""; } VOMSConfig::iterator::operator bool(void) const { return (list_ && (*this != list_->end())); } bool VOMSConfig::iterator::operator!(void) const { return (!list_ || (*this == list_->end())); } VOMSConfig::iterator::iterator(void): std::list::iterator(), list_(NULL) { } VOMSConfig::iterator::iterator(const iterator& it): std::list::iterator(it), list_(it.list_) { } VOMSConfig::iterator& VOMSConfig::iterator::operator=(const VOMSConfig::iterator& it) { list_ = it.list_; std::list::iterator::operator=((const std::list::iterator&)it); return *this; } VOMSConfig::iterator::iterator(std::list& list, std::list::iterator it): std::list::iterator(it), list_(&list) { } bool VOMSConfig::filter::match(const VOMSConfigLine& line) const { return true; } VOMSConfig::operator bool(void) const { return !lines.empty(); } bool VOMSConfig::operator!(void) const { return lines.empty(); } VOMSConfig::iterator VOMSConfig::FirstByName(const std::string name) { for(std::list::iterator line = lines.begin(); line != lines.end(); ++line) { if(line->Name() == name) { return iterator(lines,line); }; }; return iterator(lines,lines.end()); } VOMSConfig::iterator VOMSConfig::FirstByAlias(const std::string alias) { for(std::list::iterator line = lines.begin(); line != lines.end(); ++line) { if(line->Alias() == alias) { return iterator(lines,line); }; }; return iterator(lines,lines.end()); } VOMSConfig::iterator VOMSConfig::First(const VOMSConfig::filter& lfilter) { for(std::list::iterator line = lines.begin(); line != lines.end(); ++line) { if(lfilter.match(*line)) { return iterator(lines,line); }; }; return iterator(lines,lines.end()); } VOMSConfig::iterator VOMSConfig::iterator::NextByName(void) { if(!this->list_) return iterator(); for(std::list::iterator line = *this; ++line != this->list_->end(); ) { if(line->Name() == (*this)->Name()) { return iterator(*(this->list_),line); }; }; return iterator(*(this->list_),this->list_->end()); } VOMSConfig::iterator VOMSConfig::iterator::NextByAlias(void) { if(!this->list_) return iterator(); for(std::list::iterator line = *this; ++line != this->list_->end(); ) { if(line->Alias() == (*this)->Alias()) { return iterator(*(this->list_),line); }; }; return iterator(*(this->list_),this->list_->end()); } VOMSConfig::iterator VOMSConfig::iterator::Next(const VOMSConfig::filter& lfilter) { if(!this->list_) return iterator(); for(std::list::iterator line = *this; ++line != this->list_->end(); ) { if(lfilter.match(*line)) { return iterator(*(this->list_),line); }; }; return iterator(*(this->list_),this->list_->end()); } VOMSConfigLine* VOMSConfig::iterator::operator->(void) const { if(!this->list_) return NULL; return std::list::iterator::operator->(); } VOMSConfigLine::VOMSConfigLine(const std::string& line) { std::string::size_type p = line.find_first_not_of("\t "); if(p == std::string::npos) return; if(line[p] == '#') return; std::vector tokens; Arc::tokenize(line, tokens, " \t", "\"", "\""); // Normally there must be 5 items in line. // But older voms files (still in use) have 6 parameters. if((tokens.size() != 5) && (tokens.size() != 6)) { CredentialLogger.msg(ERROR,"ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. Line was: %s", line); return; }; name = tokens[0]; host = tokens[1]; port = tokens[2]; subject = tokens[3]; alias = tokens[4]; } bool VOMSConfig::AddPath(const std::string& path, int depth, const filter& lfilter) { const int max_line_length = 1024; const int max_lines_num = 1024; const int max_depth = 64; if(Glib::file_test(path, Glib::FILE_TEST_IS_DIR)) { if((++depth) >= max_depth) { CredentialLogger.msg(ERROR,"ERROR: file tree is too deep while scanning VOMS configuration. Max allowed nesting is %i.",max_depth); return false; }; Glib::Dir dir(path); bool r = true; while(true) { std::string name = dir.read_name(); if(name.empty()) break; if(!AddPath(Glib::build_filename(path,name),depth,lfilter)) r = false; }; return r; } else if(Glib::file_test(path, Glib::FILE_TEST_IS_REGULAR)) { // Sanity check // Read and parse std::ifstream iv(path.c_str()); int n = 0; while(!iv.eof()) { if(iv.fail()) { CredentialLogger.msg(ERROR,"ERROR: failed to read file %s while scanning VOMS configuration.",path); return false; }; if((n++) >= max_lines_num) { // too many lines CredentialLogger.msg(ERROR,"ERROR: VOMS configuration file %s contains too many lines. Max supported number is %i.",path,max_lines_num); return false; }; char buf[max_line_length]; iv.getline(buf,sizeof(buf)); if(iv.gcount() >= (sizeof(buf)-1)) { // too long line CredentialLogger.msg(ERROR,"ERROR: VOMS configuration file %s contains too long line(s). Max supported length is %i characters.",path,max_line_length-1); return false; }; VOMSConfigLine vline(buf); if(vline && lfilter.match(vline)) lines.push_back(vline); }; return true; }; // This is not error. Just file we can't process. So skip logging. return false; } VOMSConfig::VOMSConfig(const std::string& path, const VOMSConfig::filter& lfilter) { AddPath(path,0,lfilter); } } //namespace Arc nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/ws-security0000644000000000000000000000013213214316023021670 xustar000000000000000030 mtime=1513200659.575742297 30 atime=1513200668.720854145 30 ctime=1513200659.575742297 nordugrid-arc-5.4.2/src/hed/libs/ws-security/0000755000175000002070000000000013214316023022013 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/ws-security/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602024010 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200598.804999047 30 ctime=1513200659.566742187 nordugrid-arc-5.4.2/src/hed/libs/ws-security/Makefile.am0000644000175000002070000000521012231165602024050 0ustar00mockbuildmock00000000000000DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) lib_LTLIBRARIES = libarcwssecurity.la if XMLSEC_ENABLED noinst_PROGRAMS = test_usernametoken test_x509token test_samltoken else noinst_PROGRAMS = test_usernametoken endif libarcwssecurity_ladir = $(pkgincludedir)/ws-security if XMLSEC_ENABLED libarcwssecurity_la_HEADERS = UsernameToken.h X509Token.h SAMLToken.h libarcwssecurity_la_SOURCES = UsernameToken.cpp X509Token.cpp SAMLToken.cpp libarcwssecurity_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) \ $(XMLSEC_CFLAGS) $(XMLSEC_OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcwssecurity_la_LIBADD = \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) \ $(XMLSEC_LIBS) $(XMLSEC_OPENSSL_LIBS) else libarcwssecurity_la_HEADERS = UsernameToken.h libarcwssecurity_la_SOURCES = UsernameToken.cpp libarcwssecurity_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcwssecurity_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) endif libarcwssecurity_la_LDFLAGS = -version-info 3:0:0 test_usernametoken_SOURCES = test_usernametoken.cpp test_usernametoken_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) test_usernametoken_LDADD = \ libarcwssecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) if XMLSEC_ENABLED test_x509token_SOURCES = test_x509token.cpp test_x509token_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) test_x509token_LDADD = \ libarcwssecurity.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) test_samltoken_SOURCES = test_samltoken.cpp test_samltoken_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) test_samltoken_LDADD = \ libarcwssecurity.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) endif nordugrid-arc-5.4.2/src/hed/libs/ws-security/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726024023 xustar000000000000000030 mtime=1513200598.870999854 30 atime=1513200648.320604642 30 ctime=1513200659.567742199 nordugrid-arc-5.4.2/src/hed/libs/ws-security/Makefile.in0000644000175000002070000013703313214315726024100 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @XMLSEC_ENABLED_FALSE@noinst_PROGRAMS = test_usernametoken$(EXEEXT) @XMLSEC_ENABLED_TRUE@noinst_PROGRAMS = test_usernametoken$(EXEEXT) \ @XMLSEC_ENABLED_TRUE@ test_x509token$(EXEEXT) \ @XMLSEC_ENABLED_TRUE@ test_samltoken$(EXEEXT) subdir = src/hed/libs/ws-security DIST_COMMON = $(am__libarcwssecurity_la_HEADERS_DIST) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarcwssecurity_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = @XMLSEC_ENABLED_FALSE@libarcwssecurity_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_FALSE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_FALSE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_FALSE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_FALSE@ $(am__DEPENDENCIES_1) @XMLSEC_ENABLED_TRUE@libarcwssecurity_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) am__libarcwssecurity_la_SOURCES_DIST = UsernameToken.cpp X509Token.cpp \ SAMLToken.cpp @XMLSEC_ENABLED_FALSE@am_libarcwssecurity_la_OBJECTS = \ @XMLSEC_ENABLED_FALSE@ libarcwssecurity_la-UsernameToken.lo @XMLSEC_ENABLED_TRUE@am_libarcwssecurity_la_OBJECTS = \ @XMLSEC_ENABLED_TRUE@ libarcwssecurity_la-UsernameToken.lo \ @XMLSEC_ENABLED_TRUE@ libarcwssecurity_la-X509Token.lo \ @XMLSEC_ENABLED_TRUE@ libarcwssecurity_la-SAMLToken.lo libarcwssecurity_la_OBJECTS = $(am_libarcwssecurity_la_OBJECTS) libarcwssecurity_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcwssecurity_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcwssecurity_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am__test_samltoken_SOURCES_DIST = test_samltoken.cpp @XMLSEC_ENABLED_TRUE@am_test_samltoken_OBJECTS = \ @XMLSEC_ENABLED_TRUE@ test_samltoken-test_samltoken.$(OBJEXT) test_samltoken_OBJECTS = $(am_test_samltoken_OBJECTS) @XMLSEC_ENABLED_TRUE@test_samltoken_DEPENDENCIES = \ @XMLSEC_ENABLED_TRUE@ libarcwssecurity.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) test_samltoken_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_samltoken_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_test_usernametoken_OBJECTS = \ test_usernametoken-test_usernametoken.$(OBJEXT) test_usernametoken_OBJECTS = $(am_test_usernametoken_OBJECTS) test_usernametoken_DEPENDENCIES = libarcwssecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) test_usernametoken_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_usernametoken_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am__test_x509token_SOURCES_DIST = test_x509token.cpp @XMLSEC_ENABLED_TRUE@am_test_x509token_OBJECTS = \ @XMLSEC_ENABLED_TRUE@ test_x509token-test_x509token.$(OBJEXT) test_x509token_OBJECTS = $(am_test_x509token_OBJECTS) @XMLSEC_ENABLED_TRUE@test_x509token_DEPENDENCIES = \ @XMLSEC_ENABLED_TRUE@ libarcwssecurity.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @XMLSEC_ENABLED_TRUE@ $(am__DEPENDENCIES_1) test_x509token_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_x509token_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcwssecurity_la_SOURCES) $(test_samltoken_SOURCES) \ $(test_usernametoken_SOURCES) $(test_x509token_SOURCES) DIST_SOURCES = $(am__libarcwssecurity_la_SOURCES_DIST) \ $(am__test_samltoken_SOURCES_DIST) \ $(test_usernametoken_SOURCES) \ $(am__test_x509token_SOURCES_DIST) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__libarcwssecurity_la_HEADERS_DIST = UsernameToken.h X509Token.h \ SAMLToken.h HEADERS = $(libarcwssecurity_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) lib_LTLIBRARIES = libarcwssecurity.la libarcwssecurity_ladir = $(pkgincludedir)/ws-security @XMLSEC_ENABLED_FALSE@libarcwssecurity_la_HEADERS = UsernameToken.h @XMLSEC_ENABLED_TRUE@libarcwssecurity_la_HEADERS = UsernameToken.h X509Token.h SAMLToken.h @XMLSEC_ENABLED_FALSE@libarcwssecurity_la_SOURCES = UsernameToken.cpp @XMLSEC_ENABLED_TRUE@libarcwssecurity_la_SOURCES = UsernameToken.cpp X509Token.cpp SAMLToken.cpp @XMLSEC_ENABLED_FALSE@libarcwssecurity_la_CXXFLAGS = -I$(top_srcdir)/include \ @XMLSEC_ENABLED_FALSE@ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) @XMLSEC_ENABLED_TRUE@libarcwssecurity_la_CXXFLAGS = -I$(top_srcdir)/include \ @XMLSEC_ENABLED_TRUE@ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) \ @XMLSEC_ENABLED_TRUE@ $(XMLSEC_CFLAGS) $(XMLSEC_OPENSSL_CFLAGS) $(AM_CXXFLAGS) @XMLSEC_ENABLED_FALSE@libarcwssecurity_la_LIBADD = \ @XMLSEC_ENABLED_FALSE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_FALSE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_FALSE@ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) @XMLSEC_ENABLED_TRUE@libarcwssecurity_la_LIBADD = \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_TRUE@ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) \ @XMLSEC_ENABLED_TRUE@ $(XMLSEC_LIBS) $(XMLSEC_OPENSSL_LIBS) libarcwssecurity_la_LDFLAGS = -version-info 3:0:0 test_usernametoken_SOURCES = test_usernametoken.cpp test_usernametoken_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) test_usernametoken_LDADD = \ libarcwssecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) @XMLSEC_ENABLED_TRUE@test_x509token_SOURCES = test_x509token.cpp @XMLSEC_ENABLED_TRUE@test_x509token_CXXFLAGS = -I$(top_srcdir)/include \ @XMLSEC_ENABLED_TRUE@ $(LIBXML2_CFLAGS) $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) @XMLSEC_ENABLED_TRUE@test_x509token_LDADD = \ @XMLSEC_ENABLED_TRUE@ libarcwssecurity.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_TRUE@ $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) @XMLSEC_ENABLED_TRUE@test_samltoken_SOURCES = test_samltoken.cpp @XMLSEC_ENABLED_TRUE@test_samltoken_CXXFLAGS = -I$(top_srcdir)/include \ @XMLSEC_ENABLED_TRUE@ $(LIBXML2_CFLAGS) $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) @XMLSEC_ENABLED_TRUE@test_samltoken_LDADD = \ @XMLSEC_ENABLED_TRUE@ libarcwssecurity.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ @XMLSEC_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @XMLSEC_ENABLED_TRUE@ $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/ws-security/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/ws-security/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcwssecurity.la: $(libarcwssecurity_la_OBJECTS) $(libarcwssecurity_la_DEPENDENCIES) $(libarcwssecurity_la_LINK) -rpath $(libdir) $(libarcwssecurity_la_OBJECTS) $(libarcwssecurity_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test_samltoken$(EXEEXT): $(test_samltoken_OBJECTS) $(test_samltoken_DEPENDENCIES) @rm -f test_samltoken$(EXEEXT) $(test_samltoken_LINK) $(test_samltoken_OBJECTS) $(test_samltoken_LDADD) $(LIBS) test_usernametoken$(EXEEXT): $(test_usernametoken_OBJECTS) $(test_usernametoken_DEPENDENCIES) @rm -f test_usernametoken$(EXEEXT) $(test_usernametoken_LINK) $(test_usernametoken_OBJECTS) $(test_usernametoken_LDADD) $(LIBS) test_x509token$(EXEEXT): $(test_x509token_OBJECTS) $(test_x509token_DEPENDENCIES) @rm -f test_x509token$(EXEEXT) $(test_x509token_LINK) $(test_x509token_OBJECTS) $(test_x509token_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcwssecurity_la-SAMLToken.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcwssecurity_la-UsernameToken.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcwssecurity_la-X509Token.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_samltoken-test_samltoken.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_usernametoken-test_usernametoken.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_x509token-test_x509token.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcwssecurity_la-UsernameToken.lo: UsernameToken.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcwssecurity_la_CXXFLAGS) $(CXXFLAGS) -MT libarcwssecurity_la-UsernameToken.lo -MD -MP -MF $(DEPDIR)/libarcwssecurity_la-UsernameToken.Tpo -c -o libarcwssecurity_la-UsernameToken.lo `test -f 'UsernameToken.cpp' || echo '$(srcdir)/'`UsernameToken.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcwssecurity_la-UsernameToken.Tpo $(DEPDIR)/libarcwssecurity_la-UsernameToken.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UsernameToken.cpp' object='libarcwssecurity_la-UsernameToken.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcwssecurity_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcwssecurity_la-UsernameToken.lo `test -f 'UsernameToken.cpp' || echo '$(srcdir)/'`UsernameToken.cpp libarcwssecurity_la-X509Token.lo: X509Token.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcwssecurity_la_CXXFLAGS) $(CXXFLAGS) -MT libarcwssecurity_la-X509Token.lo -MD -MP -MF $(DEPDIR)/libarcwssecurity_la-X509Token.Tpo -c -o libarcwssecurity_la-X509Token.lo `test -f 'X509Token.cpp' || echo '$(srcdir)/'`X509Token.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcwssecurity_la-X509Token.Tpo $(DEPDIR)/libarcwssecurity_la-X509Token.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='X509Token.cpp' object='libarcwssecurity_la-X509Token.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcwssecurity_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcwssecurity_la-X509Token.lo `test -f 'X509Token.cpp' || echo '$(srcdir)/'`X509Token.cpp libarcwssecurity_la-SAMLToken.lo: SAMLToken.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcwssecurity_la_CXXFLAGS) $(CXXFLAGS) -MT libarcwssecurity_la-SAMLToken.lo -MD -MP -MF $(DEPDIR)/libarcwssecurity_la-SAMLToken.Tpo -c -o libarcwssecurity_la-SAMLToken.lo `test -f 'SAMLToken.cpp' || echo '$(srcdir)/'`SAMLToken.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcwssecurity_la-SAMLToken.Tpo $(DEPDIR)/libarcwssecurity_la-SAMLToken.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SAMLToken.cpp' object='libarcwssecurity_la-SAMLToken.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcwssecurity_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcwssecurity_la-SAMLToken.lo `test -f 'SAMLToken.cpp' || echo '$(srcdir)/'`SAMLToken.cpp test_samltoken-test_samltoken.o: test_samltoken.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_samltoken_CXXFLAGS) $(CXXFLAGS) -MT test_samltoken-test_samltoken.o -MD -MP -MF $(DEPDIR)/test_samltoken-test_samltoken.Tpo -c -o test_samltoken-test_samltoken.o `test -f 'test_samltoken.cpp' || echo '$(srcdir)/'`test_samltoken.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_samltoken-test_samltoken.Tpo $(DEPDIR)/test_samltoken-test_samltoken.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_samltoken.cpp' object='test_samltoken-test_samltoken.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_samltoken_CXXFLAGS) $(CXXFLAGS) -c -o test_samltoken-test_samltoken.o `test -f 'test_samltoken.cpp' || echo '$(srcdir)/'`test_samltoken.cpp test_samltoken-test_samltoken.obj: test_samltoken.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_samltoken_CXXFLAGS) $(CXXFLAGS) -MT test_samltoken-test_samltoken.obj -MD -MP -MF $(DEPDIR)/test_samltoken-test_samltoken.Tpo -c -o test_samltoken-test_samltoken.obj `if test -f 'test_samltoken.cpp'; then $(CYGPATH_W) 'test_samltoken.cpp'; else $(CYGPATH_W) '$(srcdir)/test_samltoken.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_samltoken-test_samltoken.Tpo $(DEPDIR)/test_samltoken-test_samltoken.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_samltoken.cpp' object='test_samltoken-test_samltoken.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_samltoken_CXXFLAGS) $(CXXFLAGS) -c -o test_samltoken-test_samltoken.obj `if test -f 'test_samltoken.cpp'; then $(CYGPATH_W) 'test_samltoken.cpp'; else $(CYGPATH_W) '$(srcdir)/test_samltoken.cpp'; fi` test_usernametoken-test_usernametoken.o: test_usernametoken.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_usernametoken_CXXFLAGS) $(CXXFLAGS) -MT test_usernametoken-test_usernametoken.o -MD -MP -MF $(DEPDIR)/test_usernametoken-test_usernametoken.Tpo -c -o test_usernametoken-test_usernametoken.o `test -f 'test_usernametoken.cpp' || echo '$(srcdir)/'`test_usernametoken.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_usernametoken-test_usernametoken.Tpo $(DEPDIR)/test_usernametoken-test_usernametoken.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_usernametoken.cpp' object='test_usernametoken-test_usernametoken.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_usernametoken_CXXFLAGS) $(CXXFLAGS) -c -o test_usernametoken-test_usernametoken.o `test -f 'test_usernametoken.cpp' || echo '$(srcdir)/'`test_usernametoken.cpp test_usernametoken-test_usernametoken.obj: test_usernametoken.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_usernametoken_CXXFLAGS) $(CXXFLAGS) -MT test_usernametoken-test_usernametoken.obj -MD -MP -MF $(DEPDIR)/test_usernametoken-test_usernametoken.Tpo -c -o test_usernametoken-test_usernametoken.obj `if test -f 'test_usernametoken.cpp'; then $(CYGPATH_W) 'test_usernametoken.cpp'; else $(CYGPATH_W) '$(srcdir)/test_usernametoken.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_usernametoken-test_usernametoken.Tpo $(DEPDIR)/test_usernametoken-test_usernametoken.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_usernametoken.cpp' object='test_usernametoken-test_usernametoken.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_usernametoken_CXXFLAGS) $(CXXFLAGS) -c -o test_usernametoken-test_usernametoken.obj `if test -f 'test_usernametoken.cpp'; then $(CYGPATH_W) 'test_usernametoken.cpp'; else $(CYGPATH_W) '$(srcdir)/test_usernametoken.cpp'; fi` test_x509token-test_x509token.o: test_x509token.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_x509token_CXXFLAGS) $(CXXFLAGS) -MT test_x509token-test_x509token.o -MD -MP -MF $(DEPDIR)/test_x509token-test_x509token.Tpo -c -o test_x509token-test_x509token.o `test -f 'test_x509token.cpp' || echo '$(srcdir)/'`test_x509token.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_x509token-test_x509token.Tpo $(DEPDIR)/test_x509token-test_x509token.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_x509token.cpp' object='test_x509token-test_x509token.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_x509token_CXXFLAGS) $(CXXFLAGS) -c -o test_x509token-test_x509token.o `test -f 'test_x509token.cpp' || echo '$(srcdir)/'`test_x509token.cpp test_x509token-test_x509token.obj: test_x509token.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_x509token_CXXFLAGS) $(CXXFLAGS) -MT test_x509token-test_x509token.obj -MD -MP -MF $(DEPDIR)/test_x509token-test_x509token.Tpo -c -o test_x509token-test_x509token.obj `if test -f 'test_x509token.cpp'; then $(CYGPATH_W) 'test_x509token.cpp'; else $(CYGPATH_W) '$(srcdir)/test_x509token.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_x509token-test_x509token.Tpo $(DEPDIR)/test_x509token-test_x509token.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_x509token.cpp' object='test_x509token-test_x509token.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_x509token_CXXFLAGS) $(CXXFLAGS) -c -o test_x509token-test_x509token.obj `if test -f 'test_x509token.cpp'; then $(CYGPATH_W) 'test_x509token.cpp'; else $(CYGPATH_W) '$(srcdir)/test_x509token.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcwssecurity_laHEADERS: $(libarcwssecurity_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcwssecurity_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcwssecurity_ladir)" @list='$(libarcwssecurity_la_HEADERS)'; test -n "$(libarcwssecurity_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcwssecurity_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcwssecurity_ladir)" || exit $$?; \ done uninstall-libarcwssecurity_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcwssecurity_la_HEADERS)'; test -n "$(libarcwssecurity_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcwssecurity_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcwssecurity_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarcwssecurity_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcwssecurity_laHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarcwssecurity_laHEADERS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool clean-noinstPROGRAMS ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarcwssecurity_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarcwssecurity_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/ws-security/PaxHeaders.7502/SAMLToken.h0000644000000000000000000000012411205363131023654 xustar000000000000000027 mtime=1242949209.200871 27 atime=1513200574.991707 30 ctime=1513200659.565742175 nordugrid-arc-5.4.2/src/hed/libs/ws-security/SAMLToken.h0000644000175000002070000001562211205363131023727 0ustar00mockbuildmock00000000000000#ifndef __ARC_SAMLTOKEN_H__ #define __ARC_SAMLTOKEN_H__ #include #include // WS-Security SAML Token Profile v1.1 // wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" namespace Arc { ///Class for manipulating SAML Token Profile. /**This class is for generating/consuming SAML Token profile. * See WS-Security SAML Token Profile v1.1 (www.oasis-open.org/committees/wss) * Currently this class is used by samltoken handler (will appears in src/hed/pdc/samltokensh/) * It is not a must to directly called this class. If we need to * use SAML Token functionality, we only need to configure the samltoken * handler into service and client. * Currently, only a minor part of the specification has been implemented. * * About how to identify and reference security token for signing message, * currently, only the "SAML Assertion Referenced from KeyInfo" (part 3.4.2 of * WS-Security SAML Token Profile v1.1 specification) is supported, which means * the implementation can only process SAML assertion "referenced from KeyInfo", * and also can only generate SAML Token with SAML assertion "referenced from KeyInfo". * More complete support need to implement. * * About subject confirmation method, the implementation can process "hold-of-key" * (part 3.5.1 of WS-Security SAML Token Profile v1.1 specification) subject * subject confirmation method. * * About SAML vertion, the implementation can process SAML assertion with SAML * version 1.1 and 2.0; can only generate SAML assertion with SAML vertion 2.0. * * In the SAML Token profile, for the hold-of-key subject confirmation method, * there are three interaction parts: the attesting entity, the relying party * and the issuing authority. In the hold-of-key subject confirmation method, * it is the attesting entity's subject identity which will be inserted into * the SAML assertion. * * Firstly the attesting entity authenticates to issuing authority by using some * authentication scheme such as WSS x509 Token profile (Alterbatively the * usename/password authentication scheme or other different authentication scheme * can also be used, unless the issuing authority can retrive the key from a * trusted certificate server after firmly establishing the subject's identity * under the username/password scheme). So then issuing authority is able to * make a definitive statement (sign a SAML assertion) about an act of * authentication that has already taken place. * * The attesting entity gets the SAML assertion and then signs the soap message together * with the assertion by using its private key (the relevant certificate has been authenticated * by issuing authority, and its relevant public key has been put into SubjectConfirmation * element under saml assertion by issuing authority. Only the actual owner of the * saml assertion can do this, as only the subject possesses the private key paired * with the public key in the assertion. This establishes an irrefutable connection * between the author of the SOAP message and the assertion describing an authentication event.) * * The relying party is supposed to trust the issuing authority. When it receives a message * from the asserting entity, it will check the saml assertion based on its * predetermined trust relationship with the SAML issuing authority, and check * the signature of the soap message based on the public key in the saml assertion * without directly trust relationship with attesting entity (subject owner). */ class SAMLToken : public SOAPEnvelope { public: /**Since the specfication SAMLVersion is for distinguishing * two types of saml version. It is used as * the parameter of constructor. */ typedef enum { SAML1, SAML2 } SAMLVersion; /** Constructor. Parse SAML Token information from SOAP header. * SAML Token related information is extracted from SOAP header and * stored in class variables. And then it the SAMLToken object will * be used for authentication. * @param soap The SOAP message which contains the SAMLToken in the soap header */ SAMLToken(SOAPEnvelope& soap); /** Constructor. Add SAML Token information into the SOAP header. * Generated token contains elements SAML token and signature, and is * meant to be used for authentication on the consuming side. * This constructor is for a specific SAML Token profile usage, in which * the attesting entity signs the SAML assertion for itself (self-sign). * This usage implicitly requires that the relying party trust the attesting * entity. * More general (requires issuing authority) usage will be provided by other * constructor. And the under-developing SAML service will be used as the * issuing authority. * @param soap The SOAP message to which the SAML Token will be inserted. * @param certfile The certificate file. * @param keyfile The key file which will be used to create signature. * @param samlversion The SAML version, only SAML2 is supported currently. * @param samlassertion The SAML assertion got from 3rd party, and used for protecting * the SOAP message; If not present, then self-signed assertion will be generated. */ SAMLToken(SOAPEnvelope& soap, const std::string& certfile, const std::string& keyfile, SAMLVersion saml_version = SAML2, XMLNode saml_assertion = XMLNode()); /** Deconstructor. Nothing to be done except finalizing the xmlsec library. */ ~SAMLToken(void); /** Returns true of constructor succeeded */ operator bool(void); /**Check signature by using the trusted certificates * It is used by relying parting after calling SAMLToken(SOAPEnvelope& soap) * This method will check the SAML assertion based on the trusted certificated * specified as parameter cafile or capath; and also check the signature to soap * message (the signature is generated by attesting entity by signing soap body * together witl SAML assertion) by using the public key inside SAML assetion. @param cafile ca file @param capath ca directory */ bool Authenticate(const std::string& cafile, const std::string& capath); /** Check signature by using the cert information in soap message */ bool Authenticate(void); private: /** Tells if specified SOAP header has WSSE element and SAMLToken inside the WSSE element */ bool Check(SOAPEnvelope& soap); private: xmlNodePtr assertion_signature_nd; xmlNodePtr wsse_signature_nd; /**public key string under (under 's ), which is used sign the soap body message: ["Subject"]["SubjectConfirmation"]["KeyInfo"]["KeyValue"]; Alternative is: ["Subject"]["SubjectConfirmation"]["KeyInfo"]["X509Data"]["X509Certificate"] */ std::string pubkey_str; std::string x509cert_str; /** inside 's , which is used to sign the assertion itself*/ XMLNode x509data; SAMLVersion samlversion; }; } // namespace Arc #endif /* __ARC_SAMLTOKEN_H__ */ nordugrid-arc-5.4.2/src/hed/libs/ws-security/PaxHeaders.7502/X509Token.cpp0000644000000000000000000000012412113711433024121 xustar000000000000000027 mtime=1362072347.509927 27 atime=1513200574.994707 30 ctime=1513200659.569742224 nordugrid-arc-5.4.2/src/hed/libs/ws-security/X509Token.cpp0000644000175000002070000005310712113711433024174 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include //#include //#include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CHARSET_EBCDIC #include #endif #include #include #include #include "X509Token.h" namespace Arc { #define WSSE_NAMESPACE "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" #define WSSE11_NAMESPACE "http://docs.oasis-open.org/wss/oasis-wss-wssecurity-secext-1.1.xsd" #define WSU_NAMESPACE "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" #define XENC_NAMESPACE "http://www.w3.org/2001/04/xmlenc#" #define DSIG_NAMESPACE "http://www.w3.org/2000/09/xmldsig#" #define X509TOKEN_BASE_URL "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0" #define BASE64BINARY "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soapmessage-security-1.0#Base64Binary" #define STRTRANSFORM "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soapmessage-security-1.0#STR-Transform" #define PKCS7 "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#PKCS7" #define X509V3 "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3" #define X509PKIPATHV1 "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509PKIPathv1" #define X509SUBJECTKEYID "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509SubjectKeyIdentifier" bool X509Token::Check(SOAPEnvelope& soap) { XMLNode header = soap.Header(); if(header.NamespacePrefix(WSSE_NAMESPACE).empty()){ std::cerr<<"No wsse namespace in SOAP Header"<node_; xmlDocPtr docPtr = bodyPtr->doc; xmlChar* id = xmlGetProp(bodyPtr, (xmlChar *)"Id"); xmlAttrPtr id_attr = xmlHasProp(bodyPtr, (xmlChar *)"Id"); xmlAddID(NULL, docPtr, (xmlChar *)id, id_attr); xmlFree(id); //BinaryToken reference xmlNodePtr tokenPtr = ((X509Token*)(&token))->node_; id_attr = xmlHasProp(tokenPtr, (xmlChar *)"Id"); xmlAddID(NULL, docPtr, (xmlChar *)"binarytoken", id_attr); //Signature signature_nd = ((X509Token*)(&signature))->node_; if(!signature_nd) { std::cerr<<"No Signature node in SOAP header"<node_; //Create encryption context xmlSecKeysMngr* keys_mngr = NULL; //TODO: which key file will be used should be got according to the issuer name and //serial number information in incoming soap head std::string issuer_name = (std::string)(header["wsse:Security"]["xenc:EncryptedKey"]["ds:KeyInfo"]["wsse:SecurityTokenReference"]["ds:X509Data"]["ds:X509IssuerSerial"]["ds:X509IssuerName"]); std::string serial_number = (std::string)(header["wsse:Security"]["xenc:EncryptedKey"]["ds:KeyInfo"]["wsse:SecurityTokenReference"]["ds:X509Data"]["ds:X509IssuerSerial"]["ds:X509SerialNumber"]); keys_mngr = load_key_from_keyfile(&keys_mngr, keyfile.c_str()); xmlSecEncCtxPtr encCtx = NULL; encCtx = xmlSecEncCtxCreate(keys_mngr); if(encCtx == NULL) { std::cerr<<"Failed to create encryption context"<data<data); XMLNode decrypted_data(decrypted_str); // TODO: less copies //body.Replace(decrypted_data); //if the node is replaced with whole , then node_ will be lost. //body.Child().Replace(decrypted_data.Child()); for (int cn = 0;; ++cn) { XMLNode cnode = body.Child(cn); if(!cnode) break; cnode.Destroy(); } for (int cn = 0;; ++cn) { XMLNode cnode = decrypted_data.Child(cn); if (!cnode) break; body.NewChild(cnode); } //Destroy the wsse:Security in header header["wsse:Security"].Destroy(); //Ajust namespaces, delete mutiple definition ns = envelope.Namespaces(); envelope.Namespaces(ns); //if(decrypted_buf != NULL)xmlSecBufferDestroy(decrypted_buf); if(encCtx != NULL) xmlSecEncCtxDestroy(encCtx); if(keys_mngr != NULL)xmlSecKeysMngrDestroy(keys_mngr); } } bool X509Token::Authenticate(void) { xmlSecDSigCtx *dsigCtx; dsigCtx = xmlSecDSigCtxCreate(NULL); //Load public key from incoming soap's security token xmlSecKey* pubkey = get_key_from_certstr(cert_str); if (pubkey == NULL){ xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Can not load public key"<signKey = pubkey; if (xmlSecDSigCtxVerify(dsigCtx, signature_nd) < 0) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Signature verification failed"<status == xmlSecDSigStatusSucceeded) { std::cout<<"Succeed to verify the signature in SOAP message"<, here we are using some //kind of hack method by insertinga into , after verification, we //delete the node. //TODO. The other option is to implement a "key data" object and some "read" "write" method as //xmlsec does, it will be more complicated but it is a more correct way. Put it as TODO XMLNode keyinfo_nd = header["wsse:Security"]["Signature"]["KeyInfo"]; XMLNode st_ref_nd = keyinfo_nd["wsse:SecurityTokenReference"]; XMLNode x509data_node = get_node(keyinfo_nd, "X509Data"); XMLNode x509cert_node = get_node(x509data_node, "X509Certificate"); x509cert_node = cert_str; dsigCtx = xmlSecDSigCtxCreate(keys_manager); if (xmlSecDSigCtxVerify(dsigCtx, signature_nd) < 0) { xmlSecDSigCtxDestroy(dsigCtx); if (keys_manager) xmlSecKeysMngrDestroy(keys_manager); std::cerr<<"Signature verification failed (with trusted certificate checking)"<status == xmlSecDSigStatusSucceeded) { std::cout<<"Succeed to verify the signature in SOAP message (with trusted certificate checking)"<node_; xmlAddChild(wsse_nd, signature); //Add reference for signature //Body reference xmlNodePtr bodyPtr = ((X509Token*)(&body))->node_; xmlDocPtr docPtr = bodyPtr->doc; xmlChar* id = NULL; id = xmlGetProp(bodyPtr, (xmlChar *)"Id"); if(!id) { std::cout<<"There is not wsu:Id attribute in soap body, add a new one"<node_; std::string token_uri; token_uri.append("#").append("binarytoken"); reference = xmlSecTmplSignatureAddReference(signature, xmlSecTransformSha1Id, NULL, (xmlChar *)(token_uri.c_str()), NULL); xmlSecTmplReferenceAddTransform(reference, xmlSecTransformEnvelopedId); xmlSecTmplReferenceAddTransform(reference, xmlSecTransformExclC14NId); id_attr = xmlHasProp(tokenPtr, (xmlChar *)"Id"); xmlAddID(NULL, docPtr, (xmlChar *)"binarytoken", id_attr); xmlSecTmplSignatureEnsureKeyInfo(signature, NULL); XMLNode keyinfo_nd = wsse["Signature"]["KeyInfo"]; XMLNode st_ref_nd = keyinfo_nd.NewChild("wsse:SecurityTokenReference"); XMLNode ref_nd = st_ref_nd.NewChild("wsse:Reference"); ref_nd.NewAttribute("URI") = token_uri; //Sign the SOAP message xmlSecDSigCtx *dsigCtx = xmlSecDSigCtxCreate(NULL); //load private key, assuming there is no need for passphrase dsigCtx->signKey = xmlSecCryptoAppKeyLoad(keyfile.c_str(), xmlSecKeyDataFormatPem, NULL, NULL, NULL); //dsigCtx->signKey = xmlSecCryptoAppKeyLoad(keyfile.c_str(), xmlSecKeyDataFormatPem, NULL, (void*)passphrase_callback, NULL); if(dsigCtx->signKey == NULL) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Can not load key"< if(xmlSecCryptoAppKeyCertLoad(dsigCtx->signKey, certfile.c_str(), xmlSecKeyDataFormatPem) < 0) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Can not load certificate"<node_; xmlDocPtr docPtr = bodyPtr->doc; xmlNodePtr encDataNode = NULL; xmlNodePtr keyInfoNode = NULL; xmlNodePtr encKeyNode = NULL; xmlNodePtr keyInfoNode2 = NULL; xmlSecEncCtxPtr encCtx = NULL; //Create encryption template for a specific symetric key type encDataNode = xmlSecTmplEncDataCreate(docPtr , xmlSecTransformDes3CbcId, (const xmlChar*)"encrypted", xmlSecTypeEncElement, NULL, NULL); if(encDataNode == NULL) { std::cerr<<"Failed to create encryption template"< node if(xmlSecTmplEncDataEnsureCipherValue(encDataNode) == NULL){ std::cerr<<"Failed to add CipherValue node"< keyInfoNode = xmlSecTmplEncDataEnsureKeyInfo(encDataNode, NULL); if(keyInfoNode == NULL) { std::cerr<<"Failed to add key info"< to store the encrypted session key encKeyNode = xmlSecTmplKeyInfoAddEncryptedKey(keyInfoNode, xmlSecTransformRsaPkcs1Id, NULL, NULL, NULL); if(encKeyNode == NULL) { std::cerr<<"Failed to add key info"< node if(xmlSecTmplEncDataEnsureCipherValue(encKeyNode) == NULL) std::cerr<<"Error: failed to add CipherValue node"< and nodes to keyInfoNode2 = xmlSecTmplEncDataEnsureKeyInfo(encKeyNode, NULL); if(keyInfoNode2 == NULL){ std::cerr<<"Failed to add key info"<encKey = xmlSecKeyGenerate(xmlSecKeyDataDesId, 192, xmlSecKeyDataTypeSession); if(encCtx->encKey == NULL) { std::cerr<<"Failed to generate session des key"<node_ = (bodyPtr=encDataNode); encDataNode = NULL; //if(encCtx != NULL){ xmlSecEncCtxDestroy(encCtx); encCtx = NULL; } std::string str; body_cp.GetDoc(str); std::cout<<"Body new : "< for(int i=0;;i++) { XMLNode nd = body.Child(i); if(!nd) break; nd.Destroy(); } XMLNode body_encdata = get_node(body,"xenc:EncryptedData"); body_encdata.NewAttribute("wsu:Id") = (std::string)(encrypted_data.Attribute("Id")); body_encdata.NewAttribute("Type") = (std::string)(encrypted_data.Attribute("Type")); XMLNode body_cipherdata = get_node(body_encdata,"xenc:CipherData"); get_node(body_cipherdata,"xenc:CipherValue") = (std::string)(encrypted_data["CipherData"]["CipherValue"]); XMLNode enc_key = get_node(wsse,"xenc:EncryptedKey"); XMLNode enc_method = get_node(enc_key,"xenc:EncryptionMethod"); enc_method.NewAttribute("Algorithm") = (std::string)(encrypted_data["KeyInfo"]["EncryptedKey"]["EncryptionMethod"].Attribute("Algorithm")); XMLNode keyinfo = get_node(enc_key, "ds:KeyInfo"); XMLNode sec_token_ref = get_node(keyinfo, "wsse:SecurityTokenReference"); XMLNode x509_data = get_node(sec_token_ref, "ds:X509Data"); XMLNode x509_issuer_serial = get_node(x509_data, "ds:X509IssuerSerial"); XMLNode x509_issuer_name = get_node(x509_issuer_serial, "ds:X509IssuerName"); XMLNode x509_serial_number = get_node(x509_issuer_serial, "ds:X509SerialNumber"); //TODO: issuer name and issuer number should be extracted from certificate //There should be some way by which the sender could get the peer certificate //and use the public key inside this certificate to encrypt the message. X509* cert = NULL; BIO* certbio = NULL; certbio = BIO_new_file(certfile.c_str(), "r"); cert = PEM_read_bio_X509(certbio, NULL, NULL, NULL); //get formated issuer name from certificate BIO* namebio = NULL; namebio = BIO_new(BIO_s_mem()); X509_NAME_print_ex(namebio, X509_get_issuer_name(cert), 0, XN_FLAG_SEP_CPLUS_SPC); char name[256]; memset(name,0,256); BIO_read(namebio, name, 256); //char* name = X509_NAME_oneline(X509_get_issuer_name(cert), NULL, 0); std::string issuer_name(name); //OPENSSL_free(name); int serial = (int) ASN1_INTEGER_get(X509_get_serialNumber(cert)); std::stringstream ss; std::string serial_number; ss<>serial_number; x509_issuer_name = issuer_name; x509_serial_number = serial_number; XMLNode key_cipherdata = get_node(enc_key, "xenc:CipherData"); key_cipherdata.NewChild("xenc:CipherValue") = (std::string)(encrypted_data["KeyInfo"]["EncryptedKey"]["CipherData"]["CipherValue"]); XMLNode ref_list = get_node(enc_key, "xenc:ReferenceList"); XMLNode ref_item = get_node(ref_list,"xenc:DataReference"); ref_item.NewAttribute("URI") = "#" + (std::string)(encrypted_data.Attribute("Id")); if(encCtx != NULL) xmlSecEncCtxDestroy(encCtx); if(keys_mngr != NULL)xmlSecKeysMngrDestroy(keys_mngr); envelope.GetXML(str); std::cout<<"Envelope: "< #endif #include #include "UsernameToken.h" int main(void) { std::string xml("\ \ \ \ \ \ 42\ \ \ \ "); std::string username("user"); std::string password("pass"); std::string derived_key; std::string derived_key3; std::string derived_key4; /*Generate the Username Token*/ Arc::SOAPEnvelope soap(xml); std::string uid("test-1"); Arc::UsernameToken ut1(soap, username, password, uid, Arc::UsernameToken::PasswordDigest); std::string str; soap.GetXML(str,true); std::cout<<"SOAP message with UsernameToken:"< #endif #include #include #include #include class SAMLTokenTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(SAMLTokenTest); CPPUNIT_TEST(TestSAML2Token); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestSAML2Token(); private: std::string xml; std::string certfile; std::string keyfile; }; void SAMLTokenTest::setUp() { xml = std::string("\ \ \ \ \ \ 42\ \ \ \ "); Arc::init_xmlsec(); certfile = "../../credential/test/host_cert.pem"; keyfile = "../../credential/test/host_key.pem"; } void SAMLTokenTest::tearDown() { Arc::final_xmlsec(); } void SAMLTokenTest::TestSAML2Token() { Arc::SOAPEnvelope soap1(xml); CPPUNIT_ASSERT((bool)soap1); Arc::SAMLToken st1(soap1, certfile, keyfile, Arc::SAMLToken::SAML2); CPPUNIT_ASSERT((bool)st1); std::string str; st1.GetXML(str); Arc::SOAPEnvelope soap2(str); CPPUNIT_ASSERT((bool)soap2); Arc::SAMLToken st2(soap2); CPPUNIT_ASSERT((bool)st2); CPPUNIT_ASSERT(st2.Authenticate()); CPPUNIT_ASSERT(st2.Authenticate("../../credential/test/ca_cert.pem", "")); } CPPUNIT_TEST_SUITE_REGISTRATION(SAMLTokenTest); nordugrid-arc-5.4.2/src/hed/libs/ws-security/test/PaxHeaders.7502/UsernameTokenTest.cpp0000644000000000000000000000012311741501077027060 xustar000000000000000026 mtime=1334215231.35079 27 atime=1513200574.989707 30 ctime=1513200659.605742664 nordugrid-arc-5.4.2/src/hed/libs/ws-security/test/UsernameTokenTest.cpp0000644000175000002070000000510311741501077027125 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include class UsernameTokenTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(UsernameTokenTest); CPPUNIT_TEST(TestTokenGenerationWithIteration); CPPUNIT_TEST(TestTokenGenerationWithPassword); CPPUNIT_TEST(TestTokenParsing); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestTokenGenerationWithIteration(); void TestTokenGenerationWithPassword(); void TestTokenParsing(); private: std::string xml; std::string username; std::string password; std::string uid; }; void UsernameTokenTest::setUp() { xml = std::string("\ \ \ \ \ \ 42\ \ \ \ "); username = "user"; password = "pass"; uid = "test-1"; } void UsernameTokenTest::tearDown() { } void UsernameTokenTest::TestTokenGenerationWithPassword() { Arc::SOAPEnvelope soap(xml); Arc::UsernameToken ut(soap, username, password, uid, Arc::UsernameToken::PasswordDigest); CPPUNIT_ASSERT((bool)ut); std::string derived_key; CPPUNIT_ASSERT((bool)ut); CPPUNIT_ASSERT(ut.Authenticate(password,derived_key)); CPPUNIT_ASSERT_EQUAL(ut.Username(), username); CPPUNIT_ASSERT(derived_key.empty()); } void UsernameTokenTest::TestTokenGenerationWithIteration() { Arc::SOAPEnvelope soap(xml); Arc::UsernameToken ut(soap, username, uid, true, 150); std::string derived_key; CPPUNIT_ASSERT((bool)ut); CPPUNIT_ASSERT(ut.Authenticate(password,derived_key)); CPPUNIT_ASSERT_EQUAL(ut.Username(), username); CPPUNIT_ASSERT(!derived_key.empty()); } void UsernameTokenTest::TestTokenParsing() { Arc::SOAPEnvelope soap(xml); Arc::UsernameToken ut1(soap, username, uid, true, 150); std::string derived_key1; CPPUNIT_ASSERT((bool)ut1); CPPUNIT_ASSERT(ut1.Authenticate(password,derived_key1)); CPPUNIT_ASSERT_EQUAL(ut1.Username(), username); CPPUNIT_ASSERT(!derived_key1.empty()); Arc::UsernameToken ut2(soap); std::string derived_key2; CPPUNIT_ASSERT((bool)ut2); CPPUNIT_ASSERT(ut2.Authenticate(password,derived_key2)); CPPUNIT_ASSERT_EQUAL(ut2.Username(), username); CPPUNIT_ASSERT(!derived_key2.empty()); CPPUNIT_ASSERT_EQUAL(derived_key1, derived_key2); } CPPUNIT_TEST_SUITE_REGISTRATION(UsernameTokenTest); nordugrid-arc-5.4.2/src/hed/libs/ws-security/test/PaxHeaders.7502/X509TokenTest.cpp0000644000000000000000000000012311741501077025746 xustar000000000000000026 mtime=1334215231.35079 27 atime=1513200574.989707 30 ctime=1513200659.606742676 nordugrid-arc-5.4.2/src/hed/libs/ws-security/test/X509TokenTest.cpp0000644000175000002070000000447611741501077026027 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include class X509TokenTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(X509TokenTest); CPPUNIT_TEST(TestSignatureToken); CPPUNIT_TEST(TestEncryptionToken); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestSignatureToken(); void TestEncryptionToken(); private: std::string xml; std::string certfile; std::string keyfile; }; void X509TokenTest::setUp() { xml = std::string("\ \ \ \ \ \ 42\ \ \ \ "); Arc::init_xmlsec(); certfile = "../../credential/test/host_cert.pem"; keyfile = "../../credential/test/host_key.pem"; } void X509TokenTest::tearDown() { Arc::final_xmlsec(); } void X509TokenTest::TestSignatureToken() { Arc::SOAPEnvelope soap1(xml); CPPUNIT_ASSERT((bool)soap1); Arc::X509Token xt1(soap1, certfile, keyfile); CPPUNIT_ASSERT((bool)xt1); std::string str; xt1.GetXML(str); Arc::SOAPEnvelope soap2(str); CPPUNIT_ASSERT((bool)soap2); Arc::X509Token xt2(soap2); CPPUNIT_ASSERT((bool)xt2); CPPUNIT_ASSERT(xt2.Authenticate()); CPPUNIT_ASSERT(xt2.Authenticate("../../credential/test/ca_cert.pem", "")); } void X509TokenTest::TestEncryptionToken() { Arc::SOAPEnvelope soap1(xml); CPPUNIT_ASSERT((bool)soap1); Arc::X509Token xt1(soap1, certfile, "", Arc::X509Token::Encryption); CPPUNIT_ASSERT((bool)xt1); std::string str; xt1.GetXML(str); Arc::SOAPEnvelope soap2(str); CPPUNIT_ASSERT((bool)soap2); Arc::X509Token xt2(soap2, keyfile); CPPUNIT_ASSERT((bool)xt2); Arc::XMLNode node1(xml); CPPUNIT_ASSERT((bool)node1); std::string str1; node1["Body"].Child().GetXML(str1); CPPUNIT_ASSERT(!str1.empty()); std::string str2; xt2.Child().GetXML(str2); CPPUNIT_ASSERT_EQUAL(str1, str2); } CPPUNIT_TEST_SUITE_REGISTRATION(X509TokenTest); nordugrid-arc-5.4.2/src/hed/libs/ws-security/PaxHeaders.7502/test_x509token.cpp0000644000000000000000000000012311276534161025271 xustar000000000000000026 mtime=1257945201.45071 27 atime=1513200574.993707 30 ctime=1513200659.573742272 nordugrid-arc-5.4.2/src/hed/libs/ws-security/test_x509token.cpp0000644000175000002070000000373111276534161025343 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "X509Token.h" int main(void) { std::string xml("\ \ \ \ \ \ 42\ \ \ \ "); Arc::init_xmlsec(); std::string cert = "../../../tests/echo/testcert.pem"; std::string key = "../../../tests/echo/testkey-nopass.pem"; /*Generate the signature X509 Token*/ Arc::SOAPEnvelope soap1(xml); Arc::X509Token xt1(soap1, cert, key); std::string str; xt1.GetXML(str); std::cout<<"SOAP message with X509Token for signature:"< #include // WS-Security X509 Token Profile v1.1 // wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" namespace Arc { /// Class for manipulating X.509 Token Profile. /**This class is for generating/consuming X.509 Token profile. * Currently it is used by x509token handler (src/hed/pdc/x509tokensh/) * It is not necessary to directly called this class. If we need to * use X.509 Token functionality, we only need to configure the x509token * handler into service and client. */ class X509Token : public SOAPEnvelope { public: /**X509TokeType is for distinguishing two types of operation. *It is used as the parameter of constuctor. */ typedef enum { Signature, Encryption } X509TokenType; /** Constructor.Parse X509 Token information from SOAP header. * X509 Token related information is extracted from SOAP header and * stored in class variables. And then it the X509Token object will * be used for authentication if the tokentype is Signature; otherwise * if the tokentype is Encryption, the encrypted soap body will be * decrypted and replaced by decrypted message. * keyfile is only needed when the X509Token is encryption token */ X509Token(SOAPEnvelope& soap, const std::string& keyfile = ""); /** Constructor. Add X509 Token information into the SOAP header. *Generated token contains elements X509 token and signature, and is *meant to be used for authentication on the consuming side. *@param soap The SOAP message to which the X509 Token will be inserted *@param certfile The certificate file which will be used to encrypt the SOAP body *(if parameter tokentype is Encryption), or be used as *(if parameter tokentype is Signature). *@param keyfile The key file which will be used to create signature. Not needed when create encryption. *@param tokentype Token type: Signature or Encryption. */ X509Token(SOAPEnvelope& soap, const std::string& certfile, const std::string& keyfile, X509TokenType token_type = Signature); /** Deconstructor. Nothing to be done except finalizing the xmlsec library. */ ~X509Token(void); /** Returns true of constructor succeeded */ operator bool(void); /**Check signature by using the certificare information in X509Token which * is parsed by the constructor, and the trusted certificates specified as * one of the two parameters. * Not only the signature (in the X509Token) itself is checked, * but also the certificate which is supposed to check the signature needs * to be trused (which means the certificate is issued by the ca certificate * from CA file or CA directory). * At least one the the two parameters should be set. * @param cafile The CA file * @param capath The CA directory * @return true if authentication passes; otherwise false */ bool Authenticate(const std::string& cafile, const std::string& capath); /** Check signature by using the cert information in soap message. * Only the signature itself is checked, and it is not guranteed that * the certificate which is supposed to check the signature is trusted. */ bool Authenticate(void); private: /** Tells if specified SOAP header has WSSE element and X509Token inside * the WSSE element. */ bool Check(SOAPEnvelope& soap); private: xmlNodePtr signature_nd; std::string cert_str; X509TokenType tokentype; }; } // namespace Arc #endif /* __ARC_X509TOKEN_H__ */ nordugrid-arc-5.4.2/src/hed/libs/ws-security/PaxHeaders.7502/test_samltoken.cpp0000644000000000000000000000012311276534161025520 xustar000000000000000026 mtime=1257945201.45071 27 atime=1513200574.997707 30 ctime=1513200659.571742248 nordugrid-arc-5.4.2/src/hed/libs/ws-security/test_samltoken.cpp0000644000175000002070000000256111276534161025572 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "SAMLToken.h" int main(void) { std::string xml("\ \ \ \ \ \ 42\ \ \ \ "); Arc::init_xmlsec(); std::string cert = "../../../tests/echo/testcert.pem"; std::string key = "../../../tests/echo/testkey-nopass.pem"; /*Generate the signature SAML Token*/ Arc::SOAPEnvelope soap1(xml); Arc::SAMLToken st1(soap1, cert, key, Arc::SAMLToken::SAML2); std::string str; st1.GetXML(str); std::cout<<"SOAP message with SAMLToken:"< #endif #include #include #include #include #include #include //#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CHARSET_EBCDIC #include #endif #include #include #include #include #include #include #include "SAMLToken.h" namespace Arc { #define WSSE_NAMESPACE "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" #define WSSE11_NAMESPACE "http://docs.oasis-open.org/wss/oasis-wss-wssecurity-secext-1.1.xsd" #define WSU_NAMESPACE "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" #define XENC_NAMESPACE "http://www.w3.org/2001/04/xmlenc#" #define DSIG_NAMESPACE "http://www.w3.org/2000/09/xmldsig#" #define SAMLTOKEN_BASE_URL "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-saml-token-profile-1.0" #define SAML_NAMESPACE "urn:oasis:names:tc:SAML:1.0:assertion" #define SAML2_NAMESPACE "urn:oasis:names:tc:SAML:2.0:assertion" #define SAMLP_NAMESPACE "urn:oasis:names:tc:SAML:1.0:protocol" bool SAMLToken::Check(SOAPEnvelope& soap) { XMLNode header = soap.Header(); if(header.NamespacePrefix(WSSE_NAMESPACE).empty()){ std::cerr<<"No wsse namespace in SOAP Header"<node_; xmlDocPtr docPtr = bodyPtr->doc; xmlNodePtr assertionPtr = ((SAMLToken*)(&assertion))->node_; xmlChar* id; xmlAttrPtr id_attr; //Assertion reference if(samlversion == SAML1) { id = xmlGetProp(assertionPtr, (xmlChar *)"AssertionID"); id_attr = NULL; id_attr = xmlHasProp(assertionPtr, (xmlChar *)"AssertionID"); if(id_attr == NULL) std::cerr<<"Can not find AssertionID attribute from saml:Assertion"<node_; if(!assertion_signature_nd) { std::cerr<<"No Signature node in saml:Assertion"<node_; if(!wsse_signature_nd) { std::cerr<<"No Signature node in wsse:Security"< exists, or no trusted certificates configured"<status == xmlSecDSigStatusSucceeded) { std::cout<<"Succeed to verify the signature in saml:assertion"<signKey = pubkey; if (xmlSecDSigCtxVerify(dsigCtx, wsse_signature_nd) < 0) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Signature verification failed for wsse:security"<status == xmlSecDSigStatusSucceeded) { std::cout<<"Succeed to verify the signature in wsse:security"<node_; xmlAddChild(assertion_nd, assertion_signature); //Add reference for signature xmlDocPtr docPtr = assertion_nd->doc; xmlChar* id = NULL; id = xmlGetProp(assertion_nd, (xmlChar *)"ID"); if(!id) { std::cerr<<"There is not Assertion ID attribute in assertion"<signKey = xmlSecCryptoAppKeyLoad(keyfile.c_str(), xmlSecKeyDataFormatPem, NULL, NULL, NULL); if(dsigCtx->signKey == NULL) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Can not load key"<signKey, certfile.c_str(), xmlSecKeyDataFormatPem) < 0) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Can not load certificate"<node_; xmlAddChild(wsse_nd, wsse_signature); //Add reference for signature xmlNodePtr bodyPtr = ((SAMLToken*)(&body))->node_; //docPtr = wsse_nd->doc; xmlChar* id = NULL; id = xmlGetProp(bodyPtr, (xmlChar *)"Id"); if(!id) { std::cout<<"There is not wsu:Id attribute in soap body, add a new one"<doc; xmlAddID(NULL, docPtr, (xmlChar *)id, id_attr); xmlFree(id); xmlSecTmplSignatureEnsureKeyInfo(wsse_signature, NULL); XMLNode keyinfo_nd = wsse["Signature"]["KeyInfo"]; XMLNode st_ref_nd = keyinfo_nd.NewChild("wsse:SecurityTokenReference"); st_ref_nd.NewAttribute("wsu:Id") = "STR1"; st_ref_nd.NewAttribute("wsse11:TokenType")="http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.1#SAMLV2.0"; XMLNode keyid_nd = st_ref_nd.NewChild("wsse:KeyIdentifier"); keyid_nd.NewAttribute("wsu:Id") = "abcde"; //not specified in the specification keyid_nd.NewAttribute("ValueType")="http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.0#SAMLID"; keyid_nd = (std::string)(assertion.Attribute("ID")); xmlSecDSigCtx *dsigCtx = xmlSecDSigCtxCreate(NULL); //Sign the assertion dsigCtx = xmlSecDSigCtxCreate(NULL); //load private key, assuming there is no need for passphrase dsigCtx->signKey = xmlSecCryptoAppKeyLoad(keyfile.c_str(), xmlSecKeyDataFormatPem, NULL, NULL, NULL); if(dsigCtx->signKey == NULL) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Can not load key"<signKey, certfile.c_str(), xmlSecKeyDataFormatPem) < 0) { // xmlSecDSigCtxDestroy(dsigCtx); // std::cerr<<"Can not load certificate"< #include // WS-Security Username Token Profile v1.1 // wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" namespace Arc { /// Interface for manipulation of WS-Security according to Username Token Profile. class UsernameToken { protected: XMLNode header_; /** SOAP header element */ public: typedef enum { PasswordText, PasswordDigest } PasswordType; /** Link to existing SOAP header and parse Username Token information. Username Token related information is extracted from SOAP header and stored in class variables. */ UsernameToken(SOAPEnvelope& soap); /** Add Username Token information into the SOAP header. Generated token contains elements Username and Password and is meant to be used for authentication. @param soap the SOAP message @param username ... - if empty it is entered interactively from stdin @param password ... - if empty it is entered interactively from stdin @param uid @param pwdtype ... */ UsernameToken(SOAPEnvelope& soap, const std::string& username, const std::string& password,const std::string& uid, PasswordType pwdtype); /** Add Username Token information into the SOAP header. Generated token contains elements Username and Salt and is meant to be used for deriving Key Derivation. @param soap the SOAP message @param username ... @param mac if derived key is meant to be used for Message Authentication Code @param iteration ... */ UsernameToken(SOAPEnvelope& soap, const std::string& username, const std::string& id, bool mac, int iteration); /** Returns true of constructor succeeded */ operator bool(void); /** Returns username associated with this instance */ std::string Username(void); /** Checks parsed/generated token against specified password. If token is meant to be used for deriving a key then key is returned in derived_key. In that case authentication is performed outside of UsernameToken class using obtained derived_key. */ bool Authenticate(const std::string& password,std::string& derived_key); /** Checks parsed token against password stored in specified stream. If token is meant to be used for deriving a key then key is returned in derived_key */ bool Authenticate(std::istream& password,std::string& derived_key); private: /** Tells if specified SOAP header has WSSE element and UsernameToken inside the WSSE element */ static bool Check(SOAPEnvelope& soap); private: std::string username_; std::string uid_; std::string password_; std::string passwdtype_; std::string nonce_; std::string created_; std::string salt_; int iteration_; }; } // namespace Arc #endif /* __ARC_USERNAMETOKEN_H__ */ nordugrid-arc-5.4.2/src/hed/libs/ws-security/PaxHeaders.7502/UsernameToken.cpp0000644000000000000000000000012413153454775025255 xustar000000000000000027 mtime=1504598525.713781 27 atime=1513200574.990707 30 ctime=1513200659.568742211 nordugrid-arc-5.4.2/src/hed/libs/ws-security/UsernameToken.cpp0000644000175000002070000003206513153454775025330 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include //#include #include #include #include #ifdef CHARSET_EBCDIC #include #endif #include #include #include #include "UsernameToken.h" namespace Arc { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static EVP_MD_CTX* EVP_MD_CTX_new(void) { EVP_MD_CTX* ctx = (EVP_MD_CTX*)std::malloc(sizeof(EVP_MD_CTX)); if(ctx) { EVP_MD_CTX_init(ctx); } return ctx; } static void EVP_MD_CTX_free(EVP_MD_CTX* ctx) { if(ctx) { EVP_MD_CTX_cleanup(ctx); std::free(ctx); } } #endif #define WSSE_NAMESPACE "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" #define WSSE11_NAMESPACE "http://docs.oasis-open.org/wss/oasis-wss-wssecurity-secext-1.1.xsd" #define WSU_NAMESPACE "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" #define USERNAMETOKEN_BASE_URL "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0" #define USENAME_TOKEN "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#UsernameToken" #define PASSWORD_TEXT "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText" #define PASSWORD_DIGEST "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordDigest" #define PASS_MIN_LENGTH 4 static bool get_password(std::string& password, bool verify) { char pwd[1024]; int len = sizeof(pwd); int j, r; char prompt[128]; for(;;) { snprintf(prompt, sizeof(prompt), "Enter password for Username Token: "); r = EVP_read_pw_string(pwd, len, prompt, 0); if(r != 0) { std::cerr<<"Failed to read read input password"< #endif #include // This is just an intermediate module for making libarccrypto library // persistent in compatible way. // Adding plugin descriptor to avoid warning messages from loader extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/libs/cryptomod/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023535 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200597.167979025 30 ctime=1513200659.103736524 nordugrid-arc-5.4.2/src/hed/libs/cryptomod/Makefile.am0000644000175000002070000000057112052416515023602 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libmodcrypto.la libmodcrypto_la_SOURCES = crypto.cpp libmodcrypto_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmodcrypto_la_LIBADD = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libmodcrypto_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/libs/cryptomod/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315725023544 xustar000000000000000030 mtime=1513200597.210979551 30 atime=1513200647.961600251 30 ctime=1513200659.104736536 nordugrid-arc-5.4.2/src/hed/libs/cryptomod/Makefile.in0000644000175000002070000006023513214315725023620 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/cryptomod DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) libmodcrypto_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libmodcrypto_la_OBJECTS = libmodcrypto_la-crypto.lo libmodcrypto_la_OBJECTS = $(am_libmodcrypto_la_OBJECTS) libmodcrypto_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libmodcrypto_la_CXXFLAGS) $(CXXFLAGS) \ $(libmodcrypto_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmodcrypto_la_SOURCES) DIST_SOURCES = $(libmodcrypto_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libmodcrypto.la libmodcrypto_la_SOURCES = crypto.cpp libmodcrypto_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmodcrypto_la_LIBADD = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libmodcrypto_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/cryptomod/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/cryptomod/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmodcrypto.la: $(libmodcrypto_la_OBJECTS) $(libmodcrypto_la_DEPENDENCIES) $(libmodcrypto_la_LINK) -rpath $(pkglibdir) $(libmodcrypto_la_OBJECTS) $(libmodcrypto_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmodcrypto_la-crypto.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmodcrypto_la-crypto.lo: crypto.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmodcrypto_la_CXXFLAGS) $(CXXFLAGS) -MT libmodcrypto_la-crypto.lo -MD -MP -MF $(DEPDIR)/libmodcrypto_la-crypto.Tpo -c -o libmodcrypto_la-crypto.lo `test -f 'crypto.cpp' || echo '$(srcdir)/'`crypto.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmodcrypto_la-crypto.Tpo $(DEPDIR)/libmodcrypto_la-crypto.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='crypto.cpp' object='libmodcrypto_la-crypto.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmodcrypto_la_CXXFLAGS) $(CXXFLAGS) -c -o libmodcrypto_la-crypto.lo `test -f 'crypto.cpp' || echo '$(srcdir)/'`crypto.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/common0000644000000000000000000000013213214316022020661 xustar000000000000000030 mtime=1513200658.875733736 30 atime=1513200668.720854145 30 ctime=1513200658.875733736 nordugrid-arc-5.4.2/src/hed/libs/common/0000755000175000002070000000000013214316022021004 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/IString.cpp0000644000000000000000000000012412574537240023041 xustar000000000000000027 mtime=1441971872.085743 27 atime=1513200574.905706 30 ctime=1513200658.855733491 nordugrid-arc-5.4.2/src/hed/libs/common/IString.cpp0000644000175000002070000000275412574537240023116 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #ifdef ENABLE_NLS #include #endif #include "IString.h" namespace Arc { PrintFBase::PrintFBase() : refcount(1) {} PrintFBase::~PrintFBase() {} void PrintFBase::Retain() { refcount++; } bool PrintFBase::Release() { refcount--; return (refcount == 0); } const char* FindTrans(const char *p) { #ifdef ENABLE_NLS return dgettext(PACKAGE, p ? *p ? p : istring("(empty)") : istring("(null)")); #else return p ? *p ? p : "(empty)" : "(null)"; #endif } const char* FindNTrans(const char *s, const char *p, unsigned long n) { #ifdef ENABLE_NLS return dngettext(PACKAGE, s ? *s ? s : istring("(empty)") : istring("(null)"), p ? *p ? p : istring("(empty)") : istring("(null)"), n); #else return n == 1 ? s ? *s ? s : "(empty)" : "(null)" : p ? *p ? p : "(empty)" : "(null)"; #endif } IString::IString(const IString& istr) : p(istr.p) { p->Retain(); } IString::~IString() { if (p->Release()) delete p; } IString& IString::operator=(const IString& istr) { if (p->Release()) delete p; p = istr.p; p->Retain(); return *this; } std::string IString::str(void) const { std::string s; p->msg(s); return s; } std::ostream& operator<<(std::ostream& os, const IString& msg) { msg.p->msg(os); return os; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcLocation.h0000644000000000000000000000012412365443155023324 xustar000000000000000027 mtime=1406551661.397686 27 atime=1513200574.913706 30 ctime=1513200658.804732867 nordugrid-arc-5.4.2/src/hed/libs/common/ArcLocation.h0000644000175000002070000000272312365443155023375 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_ARCLOCATION_H__ #define __ARC_ARCLOCATION_H__ #include #include namespace Arc { /// Determines ARC installation location /** \ingroup common * \headerfile ArcLocation.h arc/ArcLocation.h */ class ArcLocation { public: /// Initializes location information /** Main source is value of variable ARC_LOCATION, otherwise path to executable provided in path is used. If nothing works then warning message is sent to logger and initial installation prefix is used. */ static void Init(std::string path); /// Returns ARC installation location static const std::string& Get(); /// Returns ARC plugins directory location /** Main source is value of variable ARC_PLUGIN_PATH, otherwise path is derived from installation location. */ static std::list GetPlugins(); /// Returns location of ARC system data, e.g. $ARC_LOCATION/share/arc /** * \since Added in 3.0.0 **/ static std::string GetDataDir(); /// Returns location of ARC system libraries, e.g. $ARC_LOCATION/lib/arc /** * \since Added in 4.2.0 **/ static std::string GetLibDir(); /// Returns location of ARC system tools, e.g. $ARC_LOCATION/libexec/arc /** * \since Added in 3.0.0 **/ static std::string GetToolsDir(); private: static std::string& location(void); }; } // namespace Arc #endif // __ARC_ARCLOCATION_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcConfigIni.h0000644000000000000000000000012412771223617023421 xustar000000000000000027 mtime=1474635663.023405 27 atime=1513200574.912706 30 ctime=1513200658.806732892 nordugrid-arc-5.4.2/src/hed/libs/common/ArcConfigIni.h0000644000175000002070000000760012771223617023471 0ustar00mockbuildmock00000000000000#ifndef __GM_CONFIG_SECTIONS_H__ #define __GM_CONFIG_SECTIONS_H__ #include #include #include #include namespace Arc { /// This class is used to process ini-like configuration files. class ConfigIni { private: ConfigFile* fin; bool open; std::list section_names; std::string current_section; int current_section_n; std::list::iterator current_section_p; int line_number; bool current_section_changed; public: /// Creates object associated with file located at filename. /// File is kept open and is closed in destructor. ConfigIni(const char* filename); /// Creates object associated with already open file f. /// Associated file will not be closed in destructor and /// corresponding ConfigFile object must be valid during /// whole lifetime of this object. ConfigIni(ConfigFile& f); /// Ordinary destructor ~ConfigIni(void); /// Returns true if proper configuration file is associated. operator bool(void) const { return ((fin!=NULL) && (*fin)); }; /// Specifies section name which will be processed. /// Unspecified sections will be skipped by ReadNext() methods. bool AddSection(const char* name); /// Read next line of configuration from sesction(s) specified by AddSection(). /// Returns true in case of success and fills content of line into line. bool ReadNext(std::string& line); /// Read next line of configuration from sesction(s) specified by AddSection(). /// Returns true in case of success and fills split content of line into /// command and value. bool ReadNext(std::string& name,std::string& value); /// Return name of the section to which last read line belongs. /// This name also includes subsection name. const char* Section(void) const { return current_section.c_str(); }; /// Returns true if last ReadNext() switched to next section. bool SectionNew(void) const { return current_section_changed; }; /// Return number of the section to which last read line belongs. /// Numbers are assigned in order they are passed to AddSection() /// method starting from 0. int SectionNum(void) const { return current_section_n; }; /// Returns name of the section to which last read line matched. /// It is similar to Section() method but name is as specified /// in AddSection() and hence does not contain subsection(s). const char* SectionMatch(void) const { if(current_section_n<0) return ""; return current_section_p->c_str(); }; /// Returns name of subsection to which last read line belongs. const char* SubSection(void) const { if(current_section_n<0) return ""; if(current_section.length() > current_section_p->length()) return (current_section.c_str()+current_section_p->length()+1); return ""; }; /// Return name of subsubsection to which last read line belongs /// relative to subsection given by name. If current subsection /// does not match specified name then NULL is returned. const char* SubSectionMatch(const char* name); /// Helper method which reads keyword from string at 'line' separated by 'separator' /// and stores it in 'str'. Each couple of characters starting from \ is /// replaced by second character. \x## is replaced by code corresponding /// to hexadecimal number ##. /// If separator is set to \0 then whole line is consumed. /// If quotes are not \0 keyword may be enclosed in specified character. /// Returns position of first character in 'line', which is not in read keyword. static int NextArg(const char* line,std::string &str,char separator = ' ',char quotes = '"'); /// Reads keyword from string at 'rest' and reduces 'rest' by removing keyword from it. /// The way it processes keyword is similar to static NextArg method. static std::string NextArg(std::string &rest,char separator = ' ',char quotes = '"'); }; } // namespace ARex #endif // __GM_CONFIG_SECTIONS_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713214315176023007 xustar000000000000000027 mtime=1513200254.761812 30 atime=1513200596.049965352 30 ctime=1513200658.840733307 nordugrid-arc-5.4.2/src/hed/libs/common/Makefile.am0000644000175000002070000000407213214315176023054 0ustar00mockbuildmock00000000000000DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) if MYSQL_LIBRARY_ENABLED MYSQL_WRAPPER_HEADER = MysqlWrapper.h MYSQL_WRAPPER_CPP = MysqlWrapper.cpp else MYSQL_WRAPPER_HEADER = MYSQL_WRAPPER_CPP = endif lib_LTLIBRARIES = libarccommon.la pgmpkglibdir = $(pkglibdir) pgmpkglib_PROGRAMS = arc-file-access arc-hostname-resolver libarccommon_ladir = $(pkgincludedir) libarccommon_la_HEADERS = ArcVersion.h ArcConfig.h ArcLocation.h \ ArcRegex.h ArcConfigIni.h ArcConfigFile.h \ Base64.h CheckSum.h DBInterface.h DateTime.h \ FileLock.h FileUtils.h FileAccess.h GUID.h IString.h \ Logger.h OptionParser.h StringConv.h Thread.h URL.h \ User.h UserConfig.h Utils.h XMLNode.h HostnameResolver.h \ Counter.h IntraProcessCounter.h IniConfig.h Profile.h \ Run.h Watchdog.h JobPerfLog.h win32.h $(MYSQL_WRAPPER_HEADER) if WIN32 PLATFORM_SPECIFIC = Run_win32.cpp win32.cpp else PLATFORM_SPECIFIC = Run_unix.cpp endif libarccommon_la_SOURCES = ArcVersion.cpp ArcConfig.cpp ArcLocation.cpp \ ArcRegex.cpp ArcConfigIni.cpp ArcConfigFile.cpp \ Base64.cpp CheckSum.cpp DateTime.cpp \ FileLock.cpp FileUtils.cpp FileAccess.cpp GUID.cpp IString.cpp \ Logger.cpp OptionParser.cpp StringConv.cpp Thread.cpp URL.cpp \ User.cpp UserConfig.cpp Utils.cpp XMLNode.cpp HostnameResolver.cpp \ Counter.cpp IntraProcessCounter.cpp IniConfig.cpp Profile.cpp JobPerfLog.cpp \ $(PLATFORM_SPECIFIC) $(MYSQL_WRAPPER_CPP) libarccommon_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GTHREAD_CFLAGS) $(ZLIB_CFLAGS) \ $(MYSQL_CFLAGS) $(AM_CXXFLAGS) libarccommon_la_LIBADD = $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GTHREAD_LIBS) $(ZLIB_LIBS) \ $(REGEX_LIBS) $(UUID_LIBS) $(EXTRA_LIBS) $(SOCKET_LIBS) \ $(MYSQL_LIBS) $(LIBINTL) libarccommon_la_LDFLAGS = -version-info 3:0:0 arc_file_access_SOURCES = file_access.cpp file_access.h arc_file_access_CXXFLAGS = -I$(top_srcdir)/include $(AM_CXXFLAGS) arc_file_access_LDADD = arc_hostname_resolver_SOURCES = hostname_resolver.cpp hostname_resolver.h arc_hostname_resolver_CXXFLAGS = -I$(top_srcdir)/include $(AM_CXXFLAGS) arc_hostname_resolver_LDADD = nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcLocation.cpp0000644000000000000000000000012412365443155023657 xustar000000000000000027 mtime=1406551661.397686 27 atime=1513200574.888706 30 ctime=1513200658.845733369 nordugrid-arc-5.4.2/src/hed/libs/common/ArcLocation.cpp0000644000175000002070000000656612365443155023741 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #ifdef ENABLE_NLS #include #endif #include #include "ArcLocation.h" namespace Arc { std::string& ArcLocation::location(void) { static std::string* location_ = new std::string; return *location_; } // Removes null parts from path - /./ and // static void squash_path(std::string& path) { std::string::size_type p = 0; while(p < path.length()) { if(path[p] == G_DIR_SEPARATOR) { if((p+2) < path.length()) { if((path[p+1] == '.') && (path[p+2] == G_DIR_SEPARATOR)) { // ..././... path.erase(p+1,2); continue; } } if((p+1) < path.length()) { if(path[p+1] == G_DIR_SEPARATOR) { // ...//... path.erase(p+1,1); continue; } } } ++p; } } void ArcLocation::Init(std::string path) { location().clear(); location() = GetEnv("ARC_LOCATION"); if (location().empty() && !path.empty()) { if (path.rfind(G_DIR_SEPARATOR_S) == std::string::npos) path = Glib::find_program_in_path(path); if (path.substr(0, 2) == std::string(".") + G_DIR_SEPARATOR_S) { std::string cwd = Glib::get_current_dir(); path.replace(0, 1, cwd); } squash_path(path); std::string::size_type pos = path.rfind(G_DIR_SEPARATOR_S); if (pos != std::string::npos && pos > 0) { pos = path.rfind(G_DIR_SEPARATOR_S, pos - 1); if (pos != std::string::npos) location() = path.substr(0, pos); } } if (location().empty()) { Logger::getRootLogger().msg(WARNING, "Can not determine the install location. " "Using %s. Please set ARC_LOCATION " "if this is not correct.", INSTPREFIX); location() = INSTPREFIX; } #ifdef ENABLE_NLS bindtextdomain(PACKAGE, (location() + G_DIR_SEPARATOR_S "share" G_DIR_SEPARATOR_S "locale").c_str()); #endif } const std::string& ArcLocation::Get() { if (location().empty()) Init(""); return location(); } std::list ArcLocation::GetPlugins() { std::list plist; std::string arcpluginpath = GetEnv("ARC_PLUGIN_PATH"); if (!arcpluginpath.empty()) { std::string::size_type pos = 0; while (pos != std::string::npos) { std::string::size_type pos2 = arcpluginpath.find(G_SEARCHPATH_SEPARATOR, pos); plist.push_back(pos2 == std::string::npos ? arcpluginpath.substr(pos) : arcpluginpath.substr(pos, pos2 - pos)); pos = pos2; if (pos != std::string::npos) pos++; } } else plist.push_back(Get() + G_DIR_SEPARATOR_S + PKGLIBSUBDIR); return plist; } std::string ArcLocation::GetDataDir() { if (location().empty()) Init(""); return location() + G_DIR_SEPARATOR_S + PKGDATASUBDIR; } std::string ArcLocation::GetLibDir() { if (location().empty()) Init(""); return location() + G_DIR_SEPARATOR_S + PKGLIBSUBDIR; } std::string ArcLocation::GetToolsDir() { if (location().empty()) Init(""); return location() + G_DIR_SEPARATOR_S + PKGLIBEXECSUBDIR; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/hostname_resolver.h0000644000000000000000000000012413124220224024645 xustar000000000000000027 mtime=1498488980.396721 27 atime=1513200574.915706 30 ctime=1513200658.874733723 nordugrid-arc-5.4.2/src/hed/libs/common/hostname_resolver.h0000644000175000002070000000031413124220224024710 0ustar00mockbuildmock00000000000000#define CMD_PING (0) // - #define CMD_RESOLVE_TCP_LOCAL (1) #define CMD_RESOLVE_TCP_REMOTE (2) // string hostname // - // result // errno // [ // int family // socklen_t length // void addr[] // ]* nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/JobPerfLog.h0000644000000000000000000000012412675602216023116 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.860706 30 ctime=1513200658.837733271 nordugrid-arc-5.4.2/src/hed/libs/common/JobPerfLog.h0000644000175000002070000000230012675602216023156 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOB_PERF_LOGGER__ #define __ARC_JOB_PERF_LOGGER__ #include #include namespace Arc { class JobPerfLog { public: JobPerfLog(); ~JobPerfLog(); void SetOutput(const std::string& filename); void SetEnabled(bool enabled); const std::string& GetOutput() const { return log_path; }; bool GetEnabled() const { return log_enabled; }; /** Log one performance record. */ void Log(const std::string& name, const std::string& id, const timespec& start, const timespec& end); private: std::string log_path; bool log_enabled; }; class JobPerfRecord { public: /** Creates object */ JobPerfRecord(JobPerfLog& log); /** Creates object and calls Start() */ JobPerfRecord(JobPerfLog& log, const std::string& id); /** Prepare to log one record by remembering start time of action being measured. */ void Start(const std::string& id); /** Log performance record started by previous LogStart(). */ void End(const std::string& name); bool Started() { return start_recorded; }; private: JobPerfLog& perf_log; bool start_recorded; timespec start_time; std::string start_id; }; } // namespace Arc #endif // __ARC_JOB_PERF_LOGGER__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/MysqlWrapper.cpp0000644000000000000000000000012311620164404024114 xustar000000000000000026 mtime=1312876804.40538 27 atime=1513200574.899706 30 ctime=1513200658.871733686 nordugrid-arc-5.4.2/src/hed/libs/common/MysqlWrapper.cpp0000644000175000002070000001250711620164404024167 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "MysqlWrapper.h" namespace Arc { MySQLDatabase::MySQLDatabase(std::string& server, int port) : is_connected(false), server_(server), port_(port), mysql(NULL) {} MySQLDatabase::MySQLDatabase(const MySQLDatabase& other) : is_connected(false) { if (other.isconnected()) { if (isconnected()) close(); mysql_real_connect(mysql, other.server_.c_str(), other.user_.c_str(), other.password_.c_str(), other.dbname_.c_str(), other.port_, NULL, 0); if(mysql == NULL) is_connected = false; else is_connected = true; } else is_connected = false; } MySQLDatabase::~MySQLDatabase() { if (isconnected()) close(); } bool MySQLDatabase::connect(std::string& dbname, std::string& user, std::string& password) { mysql = mysql_init(NULL); if (!mysql_real_connect(mysql, server_.c_str(), user.c_str(), password.c_str(), dbname.c_str(), port_, NULL, 0)) { std::cerr << "Database connection failed" << std::endl; return false; } is_connected = true; return true; } void MySQLDatabase::close() { if (mysql) mysql_close(mysql); mysql = NULL; is_connected = false; } bool MySQLDatabase::enable_ssl(const std::string& keyfile, const std::string& certfile, const std::string& cafile, const std::string& capath) { return mysql_ssl_set(mysql, keyfile.c_str(), certfile.c_str(), cafile.c_str(), capath.c_str(), NULL) == 0; } bool MySQLDatabase::shutdown() { return mysql_shutdown(mysql, SHUTDOWN_DEFAULT); } MySQLQuery::MySQLQuery(Database *db) : db_(NULL), res(NULL), num_rows(0), num_colums(0) { MySQLDatabase *database = NULL; database = dynamic_cast(db); if(database == NULL) std::cerr<<"The parameter of constructor should be MySQLDatabase type"<mysql == NULL) std::cerr << "mysql object is NULL" << std::endl; if (mysql_query(db_->mysql, sqlstr.c_str())) { std::cerr << "Database query failed" << std::endl; return false; } res = mysql_store_result(db_->mysql); if (res) { num_colums = 0; MYSQL_FIELD *field = NULL; while (true) { field = mysql_fetch_field(res); if (field) { if (field->name) field_names[field->name] = num_colums; num_colums++; } else break; } num_rows = mysql_num_rows(res); } return true; } int MySQLQuery::get_num_colums() { return num_colums; } int MySQLQuery::get_num_rows() { return num_rows; } QueryRowResult MySQLQuery::get_row(int row_number) const { mysql_data_seek(res, row_number); return get_row(); } QueryRowResult MySQLQuery::get_row() const { MYSQL_ROW row = mysql_fetch_row(res); QueryRowResult row_value; if (row == NULL) return row_value; std::string field; for (int i = 0; i < num_colums; i++) { if(row[i] == NULL) field=""; else field = row[i]; row_value.push_back(field); } return row_value; } std::string MySQLQuery::get_row_field(int row_number, std::string& field_name) { int field_number = field_names[field_name]; QueryRowResult row_value = get_row(row_number); return row_value[field_number]; } bool MySQLQuery::get_array(std::string& sqlstr, QueryArrayResult& result, std::vector& arguments) { //replace the "?" in sql sentence with the values from argument of this method. //e.g. if the sql sentence is "select table.value from table where table.name = ?", //and the third argument of this method is std::string& name, then the "name" will replace //the "?" in the above sql sentence. //The repalacement will be sequentially done, which means the first argument in the vector will replace //the first "?" in sql sentence, and the second one will replace the second "?". //The values inside the third arguments--"arguments" should all be std::string type, since we will not //distinguish them in this method itself. std::string arg_str; size_t found = std::string::npos; int i = 0; while (true) { if (i < arguments.size()) arg_str = arguments[i++]; else arg_str = ""; found = sqlstr.find("?", found + 1); if ((found != std::string::npos) && arg_str.empty()) { std::cerr << "There is no enough arguments given in method: MySQLQuery::getarray " << std::endl; return false; } if (found == std::string::npos) break; sqlstr.replace(found, 1, arg_str); } //std::cout << "The sql sentence after replacement: " << sqlstr << std::endl; QueryRowResult row_value; if (execute(sqlstr)) { int rows = get_num_rows(); for (int i = 0; i < rows; i++) { row_value = get_row(); result.push_back(row_value); } return true; } return false; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315724023012 xustar000000000000000030 mtime=1513200596.246967761 30 atime=1513200647.848598869 29 ctime=1513200658.84173332 nordugrid-arc-5.4.2/src/hed/libs/common/Makefile.in0000644000175000002070000023567713214315724023105 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pgmpkglib_PROGRAMS = arc-file-access$(EXEEXT) \ arc-hostname-resolver$(EXEEXT) subdir = src/hed/libs/common DIST_COMMON = README $(am__libarccommon_la_HEADERS_DIST) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pgmpkglibdir)" \ "$(DESTDIR)$(libarccommon_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarccommon_la_DEPENDENCIES = $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am__libarccommon_la_SOURCES_DIST = ArcVersion.cpp ArcConfig.cpp \ ArcLocation.cpp ArcRegex.cpp ArcConfigIni.cpp \ ArcConfigFile.cpp Base64.cpp CheckSum.cpp DateTime.cpp \ FileLock.cpp FileUtils.cpp FileAccess.cpp GUID.cpp IString.cpp \ Logger.cpp OptionParser.cpp StringConv.cpp Thread.cpp URL.cpp \ User.cpp UserConfig.cpp Utils.cpp XMLNode.cpp \ HostnameResolver.cpp Counter.cpp IntraProcessCounter.cpp \ IniConfig.cpp Profile.cpp JobPerfLog.cpp Run_unix.cpp \ Run_win32.cpp win32.cpp MysqlWrapper.cpp @WIN32_FALSE@am__objects_1 = libarccommon_la-Run_unix.lo @WIN32_TRUE@am__objects_1 = libarccommon_la-Run_win32.lo \ @WIN32_TRUE@ libarccommon_la-win32.lo @MYSQL_LIBRARY_ENABLED_TRUE@am__objects_2 = \ @MYSQL_LIBRARY_ENABLED_TRUE@ libarccommon_la-MysqlWrapper.lo am_libarccommon_la_OBJECTS = libarccommon_la-ArcVersion.lo \ libarccommon_la-ArcConfig.lo libarccommon_la-ArcLocation.lo \ libarccommon_la-ArcRegex.lo libarccommon_la-ArcConfigIni.lo \ libarccommon_la-ArcConfigFile.lo libarccommon_la-Base64.lo \ libarccommon_la-CheckSum.lo libarccommon_la-DateTime.lo \ libarccommon_la-FileLock.lo libarccommon_la-FileUtils.lo \ libarccommon_la-FileAccess.lo libarccommon_la-GUID.lo \ libarccommon_la-IString.lo libarccommon_la-Logger.lo \ libarccommon_la-OptionParser.lo libarccommon_la-StringConv.lo \ libarccommon_la-Thread.lo libarccommon_la-URL.lo \ libarccommon_la-User.lo libarccommon_la-UserConfig.lo \ libarccommon_la-Utils.lo libarccommon_la-XMLNode.lo \ libarccommon_la-HostnameResolver.lo libarccommon_la-Counter.lo \ libarccommon_la-IntraProcessCounter.lo \ libarccommon_la-IniConfig.lo libarccommon_la-Profile.lo \ libarccommon_la-JobPerfLog.lo $(am__objects_1) \ $(am__objects_2) libarccommon_la_OBJECTS = $(am_libarccommon_la_OBJECTS) libarccommon_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) \ $(libarccommon_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(pgmpkglib_PROGRAMS) am_arc_file_access_OBJECTS = arc_file_access-file_access.$(OBJEXT) arc_file_access_OBJECTS = $(am_arc_file_access_OBJECTS) arc_file_access_DEPENDENCIES = arc_file_access_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(arc_file_access_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_arc_hostname_resolver_OBJECTS = \ arc_hostname_resolver-hostname_resolver.$(OBJEXT) arc_hostname_resolver_OBJECTS = $(am_arc_hostname_resolver_OBJECTS) arc_hostname_resolver_DEPENDENCIES = arc_hostname_resolver_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(arc_hostname_resolver_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarccommon_la_SOURCES) $(arc_file_access_SOURCES) \ $(arc_hostname_resolver_SOURCES) DIST_SOURCES = $(am__libarccommon_la_SOURCES_DIST) \ $(arc_file_access_SOURCES) $(arc_hostname_resolver_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__libarccommon_la_HEADERS_DIST = ArcVersion.h ArcConfig.h \ ArcLocation.h ArcRegex.h ArcConfigIni.h ArcConfigFile.h \ Base64.h CheckSum.h DBInterface.h DateTime.h FileLock.h \ FileUtils.h FileAccess.h GUID.h IString.h Logger.h \ OptionParser.h StringConv.h Thread.h URL.h User.h UserConfig.h \ Utils.h XMLNode.h HostnameResolver.h Counter.h \ IntraProcessCounter.h IniConfig.h Profile.h Run.h Watchdog.h \ JobPerfLog.h win32.h MysqlWrapper.h HEADERS = $(libarccommon_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) @MYSQL_LIBRARY_ENABLED_FALSE@MYSQL_WRAPPER_HEADER = @MYSQL_LIBRARY_ENABLED_TRUE@MYSQL_WRAPPER_HEADER = MysqlWrapper.h @MYSQL_LIBRARY_ENABLED_FALSE@MYSQL_WRAPPER_CPP = @MYSQL_LIBRARY_ENABLED_TRUE@MYSQL_WRAPPER_CPP = MysqlWrapper.cpp lib_LTLIBRARIES = libarccommon.la pgmpkglibdir = $(pkglibdir) libarccommon_ladir = $(pkgincludedir) libarccommon_la_HEADERS = ArcVersion.h ArcConfig.h ArcLocation.h \ ArcRegex.h ArcConfigIni.h ArcConfigFile.h \ Base64.h CheckSum.h DBInterface.h DateTime.h \ FileLock.h FileUtils.h FileAccess.h GUID.h IString.h \ Logger.h OptionParser.h StringConv.h Thread.h URL.h \ User.h UserConfig.h Utils.h XMLNode.h HostnameResolver.h \ Counter.h IntraProcessCounter.h IniConfig.h Profile.h \ Run.h Watchdog.h JobPerfLog.h win32.h $(MYSQL_WRAPPER_HEADER) @WIN32_FALSE@PLATFORM_SPECIFIC = Run_unix.cpp @WIN32_TRUE@PLATFORM_SPECIFIC = Run_win32.cpp win32.cpp libarccommon_la_SOURCES = ArcVersion.cpp ArcConfig.cpp ArcLocation.cpp \ ArcRegex.cpp ArcConfigIni.cpp ArcConfigFile.cpp \ Base64.cpp CheckSum.cpp DateTime.cpp \ FileLock.cpp FileUtils.cpp FileAccess.cpp GUID.cpp IString.cpp \ Logger.cpp OptionParser.cpp StringConv.cpp Thread.cpp URL.cpp \ User.cpp UserConfig.cpp Utils.cpp XMLNode.cpp HostnameResolver.cpp \ Counter.cpp IntraProcessCounter.cpp IniConfig.cpp Profile.cpp JobPerfLog.cpp \ $(PLATFORM_SPECIFIC) $(MYSQL_WRAPPER_CPP) libarccommon_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GTHREAD_CFLAGS) $(ZLIB_CFLAGS) \ $(MYSQL_CFLAGS) $(AM_CXXFLAGS) libarccommon_la_LIBADD = $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GTHREAD_LIBS) $(ZLIB_LIBS) \ $(REGEX_LIBS) $(UUID_LIBS) $(EXTRA_LIBS) $(SOCKET_LIBS) \ $(MYSQL_LIBS) $(LIBINTL) libarccommon_la_LDFLAGS = -version-info 3:0:0 arc_file_access_SOURCES = file_access.cpp file_access.h arc_file_access_CXXFLAGS = -I$(top_srcdir)/include $(AM_CXXFLAGS) arc_file_access_LDADD = arc_hostname_resolver_SOURCES = hostname_resolver.cpp hostname_resolver.h arc_hostname_resolver_CXXFLAGS = -I$(top_srcdir)/include $(AM_CXXFLAGS) arc_hostname_resolver_LDADD = all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/common/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/common/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarccommon.la: $(libarccommon_la_OBJECTS) $(libarccommon_la_DEPENDENCIES) $(libarccommon_la_LINK) -rpath $(libdir) $(libarccommon_la_OBJECTS) $(libarccommon_la_LIBADD) $(LIBS) install-pgmpkglibPROGRAMS: $(pgmpkglib_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(pgmpkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pgmpkglibdir)" @list='$(pgmpkglib_PROGRAMS)'; test -n "$(pgmpkglibdir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pgmpkglibdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pgmpkglibdir)$$dir" || exit $$?; \ } \ ; done uninstall-pgmpkglibPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pgmpkglib_PROGRAMS)'; test -n "$(pgmpkglibdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pgmpkglibdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pgmpkglibdir)" && rm -f $$files clean-pgmpkglibPROGRAMS: @list='$(pgmpkglib_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arc-file-access$(EXEEXT): $(arc_file_access_OBJECTS) $(arc_file_access_DEPENDENCIES) @rm -f arc-file-access$(EXEEXT) $(arc_file_access_LINK) $(arc_file_access_OBJECTS) $(arc_file_access_LDADD) $(LIBS) arc-hostname-resolver$(EXEEXT): $(arc_hostname_resolver_OBJECTS) $(arc_hostname_resolver_DEPENDENCIES) @rm -f arc-hostname-resolver$(EXEEXT) $(arc_hostname_resolver_LINK) $(arc_hostname_resolver_OBJECTS) $(arc_hostname_resolver_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_file_access-file_access.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_hostname_resolver-hostname_resolver.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-ArcConfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-ArcConfigFile.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-ArcConfigIni.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-ArcLocation.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-ArcRegex.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-ArcVersion.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-Base64.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-CheckSum.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-Counter.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-DateTime.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-FileAccess.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-FileLock.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-FileUtils.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-GUID.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-HostnameResolver.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-IString.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-IniConfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-IntraProcessCounter.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-JobPerfLog.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-Logger.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-MysqlWrapper.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-OptionParser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-Profile.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-Run_unix.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-Run_win32.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-StringConv.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-Thread.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-URL.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-User.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-UserConfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-Utils.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-XMLNode.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccommon_la-win32.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarccommon_la-ArcVersion.lo: ArcVersion.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-ArcVersion.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-ArcVersion.Tpo -c -o libarccommon_la-ArcVersion.lo `test -f 'ArcVersion.cpp' || echo '$(srcdir)/'`ArcVersion.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-ArcVersion.Tpo $(DEPDIR)/libarccommon_la-ArcVersion.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcVersion.cpp' object='libarccommon_la-ArcVersion.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-ArcVersion.lo `test -f 'ArcVersion.cpp' || echo '$(srcdir)/'`ArcVersion.cpp libarccommon_la-ArcConfig.lo: ArcConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-ArcConfig.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-ArcConfig.Tpo -c -o libarccommon_la-ArcConfig.lo `test -f 'ArcConfig.cpp' || echo '$(srcdir)/'`ArcConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-ArcConfig.Tpo $(DEPDIR)/libarccommon_la-ArcConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcConfig.cpp' object='libarccommon_la-ArcConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-ArcConfig.lo `test -f 'ArcConfig.cpp' || echo '$(srcdir)/'`ArcConfig.cpp libarccommon_la-ArcLocation.lo: ArcLocation.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-ArcLocation.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-ArcLocation.Tpo -c -o libarccommon_la-ArcLocation.lo `test -f 'ArcLocation.cpp' || echo '$(srcdir)/'`ArcLocation.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-ArcLocation.Tpo $(DEPDIR)/libarccommon_la-ArcLocation.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcLocation.cpp' object='libarccommon_la-ArcLocation.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-ArcLocation.lo `test -f 'ArcLocation.cpp' || echo '$(srcdir)/'`ArcLocation.cpp libarccommon_la-ArcRegex.lo: ArcRegex.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-ArcRegex.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-ArcRegex.Tpo -c -o libarccommon_la-ArcRegex.lo `test -f 'ArcRegex.cpp' || echo '$(srcdir)/'`ArcRegex.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-ArcRegex.Tpo $(DEPDIR)/libarccommon_la-ArcRegex.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcRegex.cpp' object='libarccommon_la-ArcRegex.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-ArcRegex.lo `test -f 'ArcRegex.cpp' || echo '$(srcdir)/'`ArcRegex.cpp libarccommon_la-ArcConfigIni.lo: ArcConfigIni.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-ArcConfigIni.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-ArcConfigIni.Tpo -c -o libarccommon_la-ArcConfigIni.lo `test -f 'ArcConfigIni.cpp' || echo '$(srcdir)/'`ArcConfigIni.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-ArcConfigIni.Tpo $(DEPDIR)/libarccommon_la-ArcConfigIni.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcConfigIni.cpp' object='libarccommon_la-ArcConfigIni.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-ArcConfigIni.lo `test -f 'ArcConfigIni.cpp' || echo '$(srcdir)/'`ArcConfigIni.cpp libarccommon_la-ArcConfigFile.lo: ArcConfigFile.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-ArcConfigFile.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-ArcConfigFile.Tpo -c -o libarccommon_la-ArcConfigFile.lo `test -f 'ArcConfigFile.cpp' || echo '$(srcdir)/'`ArcConfigFile.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-ArcConfigFile.Tpo $(DEPDIR)/libarccommon_la-ArcConfigFile.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcConfigFile.cpp' object='libarccommon_la-ArcConfigFile.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-ArcConfigFile.lo `test -f 'ArcConfigFile.cpp' || echo '$(srcdir)/'`ArcConfigFile.cpp libarccommon_la-Base64.lo: Base64.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-Base64.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-Base64.Tpo -c -o libarccommon_la-Base64.lo `test -f 'Base64.cpp' || echo '$(srcdir)/'`Base64.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-Base64.Tpo $(DEPDIR)/libarccommon_la-Base64.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Base64.cpp' object='libarccommon_la-Base64.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-Base64.lo `test -f 'Base64.cpp' || echo '$(srcdir)/'`Base64.cpp libarccommon_la-CheckSum.lo: CheckSum.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-CheckSum.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-CheckSum.Tpo -c -o libarccommon_la-CheckSum.lo `test -f 'CheckSum.cpp' || echo '$(srcdir)/'`CheckSum.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-CheckSum.Tpo $(DEPDIR)/libarccommon_la-CheckSum.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CheckSum.cpp' object='libarccommon_la-CheckSum.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-CheckSum.lo `test -f 'CheckSum.cpp' || echo '$(srcdir)/'`CheckSum.cpp libarccommon_la-DateTime.lo: DateTime.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-DateTime.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-DateTime.Tpo -c -o libarccommon_la-DateTime.lo `test -f 'DateTime.cpp' || echo '$(srcdir)/'`DateTime.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-DateTime.Tpo $(DEPDIR)/libarccommon_la-DateTime.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DateTime.cpp' object='libarccommon_la-DateTime.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-DateTime.lo `test -f 'DateTime.cpp' || echo '$(srcdir)/'`DateTime.cpp libarccommon_la-FileLock.lo: FileLock.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-FileLock.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-FileLock.Tpo -c -o libarccommon_la-FileLock.lo `test -f 'FileLock.cpp' || echo '$(srcdir)/'`FileLock.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-FileLock.Tpo $(DEPDIR)/libarccommon_la-FileLock.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileLock.cpp' object='libarccommon_la-FileLock.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-FileLock.lo `test -f 'FileLock.cpp' || echo '$(srcdir)/'`FileLock.cpp libarccommon_la-FileUtils.lo: FileUtils.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-FileUtils.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-FileUtils.Tpo -c -o libarccommon_la-FileUtils.lo `test -f 'FileUtils.cpp' || echo '$(srcdir)/'`FileUtils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-FileUtils.Tpo $(DEPDIR)/libarccommon_la-FileUtils.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileUtils.cpp' object='libarccommon_la-FileUtils.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-FileUtils.lo `test -f 'FileUtils.cpp' || echo '$(srcdir)/'`FileUtils.cpp libarccommon_la-FileAccess.lo: FileAccess.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-FileAccess.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-FileAccess.Tpo -c -o libarccommon_la-FileAccess.lo `test -f 'FileAccess.cpp' || echo '$(srcdir)/'`FileAccess.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-FileAccess.Tpo $(DEPDIR)/libarccommon_la-FileAccess.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileAccess.cpp' object='libarccommon_la-FileAccess.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-FileAccess.lo `test -f 'FileAccess.cpp' || echo '$(srcdir)/'`FileAccess.cpp libarccommon_la-GUID.lo: GUID.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-GUID.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-GUID.Tpo -c -o libarccommon_la-GUID.lo `test -f 'GUID.cpp' || echo '$(srcdir)/'`GUID.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-GUID.Tpo $(DEPDIR)/libarccommon_la-GUID.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GUID.cpp' object='libarccommon_la-GUID.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-GUID.lo `test -f 'GUID.cpp' || echo '$(srcdir)/'`GUID.cpp libarccommon_la-IString.lo: IString.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-IString.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-IString.Tpo -c -o libarccommon_la-IString.lo `test -f 'IString.cpp' || echo '$(srcdir)/'`IString.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-IString.Tpo $(DEPDIR)/libarccommon_la-IString.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='IString.cpp' object='libarccommon_la-IString.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-IString.lo `test -f 'IString.cpp' || echo '$(srcdir)/'`IString.cpp libarccommon_la-Logger.lo: Logger.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-Logger.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-Logger.Tpo -c -o libarccommon_la-Logger.lo `test -f 'Logger.cpp' || echo '$(srcdir)/'`Logger.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-Logger.Tpo $(DEPDIR)/libarccommon_la-Logger.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Logger.cpp' object='libarccommon_la-Logger.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-Logger.lo `test -f 'Logger.cpp' || echo '$(srcdir)/'`Logger.cpp libarccommon_la-OptionParser.lo: OptionParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-OptionParser.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-OptionParser.Tpo -c -o libarccommon_la-OptionParser.lo `test -f 'OptionParser.cpp' || echo '$(srcdir)/'`OptionParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-OptionParser.Tpo $(DEPDIR)/libarccommon_la-OptionParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='OptionParser.cpp' object='libarccommon_la-OptionParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-OptionParser.lo `test -f 'OptionParser.cpp' || echo '$(srcdir)/'`OptionParser.cpp libarccommon_la-StringConv.lo: StringConv.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-StringConv.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-StringConv.Tpo -c -o libarccommon_la-StringConv.lo `test -f 'StringConv.cpp' || echo '$(srcdir)/'`StringConv.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-StringConv.Tpo $(DEPDIR)/libarccommon_la-StringConv.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='StringConv.cpp' object='libarccommon_la-StringConv.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-StringConv.lo `test -f 'StringConv.cpp' || echo '$(srcdir)/'`StringConv.cpp libarccommon_la-Thread.lo: Thread.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-Thread.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-Thread.Tpo -c -o libarccommon_la-Thread.lo `test -f 'Thread.cpp' || echo '$(srcdir)/'`Thread.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-Thread.Tpo $(DEPDIR)/libarccommon_la-Thread.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Thread.cpp' object='libarccommon_la-Thread.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-Thread.lo `test -f 'Thread.cpp' || echo '$(srcdir)/'`Thread.cpp libarccommon_la-URL.lo: URL.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-URL.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-URL.Tpo -c -o libarccommon_la-URL.lo `test -f 'URL.cpp' || echo '$(srcdir)/'`URL.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-URL.Tpo $(DEPDIR)/libarccommon_la-URL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='URL.cpp' object='libarccommon_la-URL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-URL.lo `test -f 'URL.cpp' || echo '$(srcdir)/'`URL.cpp libarccommon_la-User.lo: User.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-User.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-User.Tpo -c -o libarccommon_la-User.lo `test -f 'User.cpp' || echo '$(srcdir)/'`User.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-User.Tpo $(DEPDIR)/libarccommon_la-User.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='User.cpp' object='libarccommon_la-User.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-User.lo `test -f 'User.cpp' || echo '$(srcdir)/'`User.cpp libarccommon_la-UserConfig.lo: UserConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-UserConfig.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-UserConfig.Tpo -c -o libarccommon_la-UserConfig.lo `test -f 'UserConfig.cpp' || echo '$(srcdir)/'`UserConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-UserConfig.Tpo $(DEPDIR)/libarccommon_la-UserConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UserConfig.cpp' object='libarccommon_la-UserConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-UserConfig.lo `test -f 'UserConfig.cpp' || echo '$(srcdir)/'`UserConfig.cpp libarccommon_la-Utils.lo: Utils.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-Utils.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-Utils.Tpo -c -o libarccommon_la-Utils.lo `test -f 'Utils.cpp' || echo '$(srcdir)/'`Utils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-Utils.Tpo $(DEPDIR)/libarccommon_la-Utils.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Utils.cpp' object='libarccommon_la-Utils.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-Utils.lo `test -f 'Utils.cpp' || echo '$(srcdir)/'`Utils.cpp libarccommon_la-XMLNode.lo: XMLNode.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-XMLNode.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-XMLNode.Tpo -c -o libarccommon_la-XMLNode.lo `test -f 'XMLNode.cpp' || echo '$(srcdir)/'`XMLNode.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-XMLNode.Tpo $(DEPDIR)/libarccommon_la-XMLNode.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XMLNode.cpp' object='libarccommon_la-XMLNode.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-XMLNode.lo `test -f 'XMLNode.cpp' || echo '$(srcdir)/'`XMLNode.cpp libarccommon_la-HostnameResolver.lo: HostnameResolver.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-HostnameResolver.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-HostnameResolver.Tpo -c -o libarccommon_la-HostnameResolver.lo `test -f 'HostnameResolver.cpp' || echo '$(srcdir)/'`HostnameResolver.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-HostnameResolver.Tpo $(DEPDIR)/libarccommon_la-HostnameResolver.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='HostnameResolver.cpp' object='libarccommon_la-HostnameResolver.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-HostnameResolver.lo `test -f 'HostnameResolver.cpp' || echo '$(srcdir)/'`HostnameResolver.cpp libarccommon_la-Counter.lo: Counter.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-Counter.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-Counter.Tpo -c -o libarccommon_la-Counter.lo `test -f 'Counter.cpp' || echo '$(srcdir)/'`Counter.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-Counter.Tpo $(DEPDIR)/libarccommon_la-Counter.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Counter.cpp' object='libarccommon_la-Counter.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-Counter.lo `test -f 'Counter.cpp' || echo '$(srcdir)/'`Counter.cpp libarccommon_la-IntraProcessCounter.lo: IntraProcessCounter.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-IntraProcessCounter.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-IntraProcessCounter.Tpo -c -o libarccommon_la-IntraProcessCounter.lo `test -f 'IntraProcessCounter.cpp' || echo '$(srcdir)/'`IntraProcessCounter.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-IntraProcessCounter.Tpo $(DEPDIR)/libarccommon_la-IntraProcessCounter.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='IntraProcessCounter.cpp' object='libarccommon_la-IntraProcessCounter.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-IntraProcessCounter.lo `test -f 'IntraProcessCounter.cpp' || echo '$(srcdir)/'`IntraProcessCounter.cpp libarccommon_la-IniConfig.lo: IniConfig.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-IniConfig.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-IniConfig.Tpo -c -o libarccommon_la-IniConfig.lo `test -f 'IniConfig.cpp' || echo '$(srcdir)/'`IniConfig.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-IniConfig.Tpo $(DEPDIR)/libarccommon_la-IniConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='IniConfig.cpp' object='libarccommon_la-IniConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-IniConfig.lo `test -f 'IniConfig.cpp' || echo '$(srcdir)/'`IniConfig.cpp libarccommon_la-Profile.lo: Profile.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-Profile.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-Profile.Tpo -c -o libarccommon_la-Profile.lo `test -f 'Profile.cpp' || echo '$(srcdir)/'`Profile.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-Profile.Tpo $(DEPDIR)/libarccommon_la-Profile.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Profile.cpp' object='libarccommon_la-Profile.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-Profile.lo `test -f 'Profile.cpp' || echo '$(srcdir)/'`Profile.cpp libarccommon_la-JobPerfLog.lo: JobPerfLog.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-JobPerfLog.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-JobPerfLog.Tpo -c -o libarccommon_la-JobPerfLog.lo `test -f 'JobPerfLog.cpp' || echo '$(srcdir)/'`JobPerfLog.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-JobPerfLog.Tpo $(DEPDIR)/libarccommon_la-JobPerfLog.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobPerfLog.cpp' object='libarccommon_la-JobPerfLog.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-JobPerfLog.lo `test -f 'JobPerfLog.cpp' || echo '$(srcdir)/'`JobPerfLog.cpp libarccommon_la-Run_unix.lo: Run_unix.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-Run_unix.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-Run_unix.Tpo -c -o libarccommon_la-Run_unix.lo `test -f 'Run_unix.cpp' || echo '$(srcdir)/'`Run_unix.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-Run_unix.Tpo $(DEPDIR)/libarccommon_la-Run_unix.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Run_unix.cpp' object='libarccommon_la-Run_unix.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-Run_unix.lo `test -f 'Run_unix.cpp' || echo '$(srcdir)/'`Run_unix.cpp libarccommon_la-Run_win32.lo: Run_win32.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-Run_win32.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-Run_win32.Tpo -c -o libarccommon_la-Run_win32.lo `test -f 'Run_win32.cpp' || echo '$(srcdir)/'`Run_win32.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-Run_win32.Tpo $(DEPDIR)/libarccommon_la-Run_win32.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Run_win32.cpp' object='libarccommon_la-Run_win32.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-Run_win32.lo `test -f 'Run_win32.cpp' || echo '$(srcdir)/'`Run_win32.cpp libarccommon_la-win32.lo: win32.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-win32.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-win32.Tpo -c -o libarccommon_la-win32.lo `test -f 'win32.cpp' || echo '$(srcdir)/'`win32.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-win32.Tpo $(DEPDIR)/libarccommon_la-win32.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='win32.cpp' object='libarccommon_la-win32.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-win32.lo `test -f 'win32.cpp' || echo '$(srcdir)/'`win32.cpp libarccommon_la-MysqlWrapper.lo: MysqlWrapper.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -MT libarccommon_la-MysqlWrapper.lo -MD -MP -MF $(DEPDIR)/libarccommon_la-MysqlWrapper.Tpo -c -o libarccommon_la-MysqlWrapper.lo `test -f 'MysqlWrapper.cpp' || echo '$(srcdir)/'`MysqlWrapper.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccommon_la-MysqlWrapper.Tpo $(DEPDIR)/libarccommon_la-MysqlWrapper.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MysqlWrapper.cpp' object='libarccommon_la-MysqlWrapper.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccommon_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccommon_la-MysqlWrapper.lo `test -f 'MysqlWrapper.cpp' || echo '$(srcdir)/'`MysqlWrapper.cpp arc_file_access-file_access.o: file_access.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_file_access_CXXFLAGS) $(CXXFLAGS) -MT arc_file_access-file_access.o -MD -MP -MF $(DEPDIR)/arc_file_access-file_access.Tpo -c -o arc_file_access-file_access.o `test -f 'file_access.cpp' || echo '$(srcdir)/'`file_access.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_file_access-file_access.Tpo $(DEPDIR)/arc_file_access-file_access.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='file_access.cpp' object='arc_file_access-file_access.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_file_access_CXXFLAGS) $(CXXFLAGS) -c -o arc_file_access-file_access.o `test -f 'file_access.cpp' || echo '$(srcdir)/'`file_access.cpp arc_file_access-file_access.obj: file_access.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_file_access_CXXFLAGS) $(CXXFLAGS) -MT arc_file_access-file_access.obj -MD -MP -MF $(DEPDIR)/arc_file_access-file_access.Tpo -c -o arc_file_access-file_access.obj `if test -f 'file_access.cpp'; then $(CYGPATH_W) 'file_access.cpp'; else $(CYGPATH_W) '$(srcdir)/file_access.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_file_access-file_access.Tpo $(DEPDIR)/arc_file_access-file_access.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='file_access.cpp' object='arc_file_access-file_access.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_file_access_CXXFLAGS) $(CXXFLAGS) -c -o arc_file_access-file_access.obj `if test -f 'file_access.cpp'; then $(CYGPATH_W) 'file_access.cpp'; else $(CYGPATH_W) '$(srcdir)/file_access.cpp'; fi` arc_hostname_resolver-hostname_resolver.o: hostname_resolver.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_hostname_resolver_CXXFLAGS) $(CXXFLAGS) -MT arc_hostname_resolver-hostname_resolver.o -MD -MP -MF $(DEPDIR)/arc_hostname_resolver-hostname_resolver.Tpo -c -o arc_hostname_resolver-hostname_resolver.o `test -f 'hostname_resolver.cpp' || echo '$(srcdir)/'`hostname_resolver.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_hostname_resolver-hostname_resolver.Tpo $(DEPDIR)/arc_hostname_resolver-hostname_resolver.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='hostname_resolver.cpp' object='arc_hostname_resolver-hostname_resolver.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_hostname_resolver_CXXFLAGS) $(CXXFLAGS) -c -o arc_hostname_resolver-hostname_resolver.o `test -f 'hostname_resolver.cpp' || echo '$(srcdir)/'`hostname_resolver.cpp arc_hostname_resolver-hostname_resolver.obj: hostname_resolver.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_hostname_resolver_CXXFLAGS) $(CXXFLAGS) -MT arc_hostname_resolver-hostname_resolver.obj -MD -MP -MF $(DEPDIR)/arc_hostname_resolver-hostname_resolver.Tpo -c -o arc_hostname_resolver-hostname_resolver.obj `if test -f 'hostname_resolver.cpp'; then $(CYGPATH_W) 'hostname_resolver.cpp'; else $(CYGPATH_W) '$(srcdir)/hostname_resolver.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_hostname_resolver-hostname_resolver.Tpo $(DEPDIR)/arc_hostname_resolver-hostname_resolver.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='hostname_resolver.cpp' object='arc_hostname_resolver-hostname_resolver.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_hostname_resolver_CXXFLAGS) $(CXXFLAGS) -c -o arc_hostname_resolver-hostname_resolver.obj `if test -f 'hostname_resolver.cpp'; then $(CYGPATH_W) 'hostname_resolver.cpp'; else $(CYGPATH_W) '$(srcdir)/hostname_resolver.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarccommon_laHEADERS: $(libarccommon_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarccommon_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarccommon_ladir)" @list='$(libarccommon_la_HEADERS)'; test -n "$(libarccommon_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarccommon_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarccommon_ladir)" || exit $$?; \ done uninstall-libarccommon_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarccommon_la_HEADERS)'; test -n "$(libarccommon_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarccommon_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarccommon_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pgmpkglibdir)" "$(DESTDIR)$(libarccommon_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ clean-pgmpkglibPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarccommon_laHEADERS \ install-pgmpkglibPROGRAMS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarccommon_laHEADERS uninstall-pgmpkglibPROGRAMS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool clean-pgmpkglibPROGRAMS \ ctags ctags-recursive distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-libLTLIBRARIES \ install-libarccommon_laHEADERS install-man install-pdf \ install-pdf-am install-pgmpkglibPROGRAMS install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarccommon_laHEADERS \ uninstall-pgmpkglibPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Counter.cpp0000644000000000000000000000012411720467271023077 xustar000000000000000027 mtime=1329753785.729844 27 atime=1513200574.959707 30 ctime=1513200658.865733613 nordugrid-arc-5.4.2/src/hed/libs/common/Counter.cpp0000644000175000002070000000500011720467271023137 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif // Counter.cpp #include #include "Counter.h" namespace Arc { const Glib::TimeVal ETERNAL(G_MAXLONG, 0); const Glib::TimeVal HISTORIC(G_MINLONG, 0); Counter::Counter() { // Nothing needs to be done. } Counter::~Counter() { // Nothing needs to be done. } Glib::TimeVal Counter::getCurrentTime() { Glib::TimeVal currentTime; currentTime.assign_current_time(); return currentTime; } Glib::TimeVal Counter::getExpiryTime(Glib::TimeVal duration) { if (duration < ETERNAL) return getCurrentTime() + duration; else return ETERNAL; } CounterTicket Counter::getCounterTicket(Counter::IDType reservationID, Glib::TimeVal expiryTime, Counter *counter) { return CounterTicket(reservationID, expiryTime, counter); } ExpirationReminder Counter::getExpirationReminder(Glib::TimeVal expTime, Counter::IDType resID) { return ExpirationReminder(expTime, resID); } CounterTicket::CounterTicket() : reservationID(0), expiryTime(HISTORIC), counter(0) { // Nothing else needs to be done. } CounterTicket::CounterTicket(Counter::IDType reservationID, Glib::TimeVal expiryTime, Counter *counter) : reservationID(reservationID), expiryTime(expiryTime), counter(counter) { // Nothing else needs to be done. } bool CounterTicket::isValid() { return expiryTime > counter->getCurrentTime(); } void CounterTicket::extend(Glib::TimeVal duration) { counter->extend(reservationID, expiryTime, duration); } void CounterTicket::cancel() { counter->cancel(reservationID); reservationID = 0; expiryTime = HISTORIC; counter = 0; } ExpirationReminder::ExpirationReminder(Glib::TimeVal expiryTime, Counter::IDType reservationID) : expiryTime(expiryTime), reservationID(reservationID) { // Nothing else needs to be done. } bool ExpirationReminder::operator<(const ExpirationReminder& other) const { // Smaller time has higher priority! return expiryTime > other.expiryTime; } Glib::TimeVal ExpirationReminder::getExpiryTime() const { return expiryTime; } Counter::IDType ExpirationReminder::getReservationID() const { return reservationID; } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/DateTime.h0000644000000000000000000000012413124220410022577 xustar000000000000000027 mtime=1498489096.378221 27 atime=1513200574.949707 30 ctime=1513200658.812732965 nordugrid-arc-5.4.2/src/hed/libs/common/DateTime.h0000644000175000002070000001567213124220410022657 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef ARCLIB_TIME #define ARCLIB_TIME #include #include #include #include #include namespace Arc { /** \addtogroup common * @{ */ /// An enumeration that contains the possible textual time formats. enum TimeFormat { MDSTime, ///< YYYYMMDDHHMMSSZ ASCTime, ///< Day Mon DD HH:MM:SS YYYY UserTime, ///< YYYY-MM-DD HH:MM:SS ISOTime, ///< YYYY-MM-DDTHH:MM:SS+HH:MM UTCTime, ///< YYYY-MM-DDTHH:MM:SSZ RFC1123Time, ///< Day, DD Mon YYYY HH:MM:SS GMT EpochTime, ///< 1234567890 UserExtTime, ///< YYYY-MM-DD HH:MM:SS.mmmmmm (microseconds resolution) ElasticTime, ///< YYYY-MM-DD HH:MM:SS.mmm (milliseconds resolution, suitable for Elasticsearch) }; /// Base to use when constructing a new Period. enum PeriodBase { PeriodNanoseconds, ///< Nanoseconds PeriodMicroseconds, ///< Microseconds PeriodMiliseconds, ///< Milliseconds PeriodSeconds, ///< Seconds PeriodMinutes, ///< Minutes PeriodHours, ///< Hours PeriodDays, ///< Days PeriodWeeks ///< Weeks }; /// A Period represents a length of time. /** Period represents a length of time (eg 2 mins and 30.1 seconds), whereas Time represents a moment of time (eg midnight on 1st Jan 2000). \see Time \headerfile DateTime.h arc/DateTime.h */ class Period { public: /// Default constructor. The period is set to 0 length. Period(); /// Constructor that takes a time_t variable and stores it. Period(time_t); /// Constructor that takes seconds and nanoseconds and stores them. Period(time_t seconds, uint32_t nanoseconds); /// Constructor that tries to convert a string. Period(const std::string&, PeriodBase base = PeriodSeconds); /// Assignment operator from a time_t. Period& operator=(time_t); /// Assignment operator from a Period. Period& operator=(const Period&); /// Sets the period in seconds. void SetPeriod(time_t sec); /// Sets the period in seconds and nanoseconds. void SetPeriod(time_t sec, uint32_t nanosec); /// Gets the period in seconds. time_t GetPeriod() const; /// Gets the number of nanoseconds after the last whole second. time_t GetPeriodNanoseconds() const; /// For use with IString. const sigc::slot* istr() const; /// Returns a string representation of the period. operator std::string() const; /// Comparing two Period objects. bool operator<(const Period&) const; /// Comparing two Period objects. bool operator>(const Period&) const; /// Comparing two Period objects. bool operator<=(const Period&) const; /// Comparing two Period objects. bool operator>=(const Period&) const; /// Comparing two Period objects. bool operator==(const Period&) const; /// Comparing two Period objects. bool operator!=(const Period&) const; Period& operator+=(const Period&); private: /// The duration of the period time_t seconds; uint32_t nanoseconds; /// Internal IString implementation const char* IStr() const; sigc::slot slot; std::string is; }; /// Prints a Period-object to the given ostream -- typically cout. std::ostream& operator<<(std::ostream&, const Period&); /// A class for storing and manipulating times. /** Time represents a moment of time (eg midnight on 1st Jan 2000), whereas Period represents a length of time (eg 2 mins and 30.1 seconds). \see Period \headerfile DateTime.h arc/DateTime.h */ class Time { public: /// Default constructor. The time is put equal the current time. Time(); /// Constructor that takes a time_t variable and stores it. Time(time_t); /// Constructor that takes a fine grained time variables and stores them. Time(time_t time, uint32_t nanosec); /// Constructor that tries to convert a string into a time_t. Time(const std::string&); /// Assignment operator from a time_t. Time& operator=(time_t); /// Assignment operator from a Time. Time& operator=(const Time&); /// Assignment operator from a char pointer. Time& operator=(const char*); /// Assignment operator from a string. Time& operator=(const std::string&); /// Sets the time. void SetTime(time_t); /// Sets the fine grained time. void SetTime(time_t time, uint32_t nanosec); /// Gets the time in seconds. time_t GetTime() const; /// Gets the nanoseconds fraction of the time. time_t GetTimeNanoseconds() const; /// Returns a string representation of the time, using the default format. operator std::string() const; /// Returns a string representation of the time, using the specified format. std::string str(const TimeFormat& = time_format) const; /// Sets the default format for time strings. static void SetFormat(const TimeFormat&); /// Gets the default format for time strings. static TimeFormat GetFormat(); /// Comparing two Time objects. bool operator<(const Time&) const; /// Comparing two Time objects. bool operator>(const Time&) const; /// Comparing two Time objects. bool operator<=(const Time&) const; /// Comparing two Time objects. bool operator>=(const Time&) const; /// Comparing two Time objects. bool operator==(const Time&) const; /// Comparing two Time objects. bool operator!=(const Time&) const; /// Adding Time object with Period object. Time operator+(const Period&) const; /// Subtracting Period object from Time object. Time operator-(const Period&) const; /// Subtracting Time object from the other Time object. Period operator-(const Time&) const; /// Number of seconds in a year (365 days) static const int YEAR = 31536000; /// Number of seconds in 30 days static const int MONTH = 2592000; /// Number of seconds in a week static const int WEEK = 604800; /// Number of seconds in a day static const int DAY = 86400; /// Number of seconds in an hour static const int HOUR = 3600; /// Undefined time static const time_t UNDEFINED = (time_t)(-1); private: /// The time stored -- by default it is equal to the current time. time_t gtime; /// The nanosecond part of time stored -- by default it is 0. uint32_t gnano; /// The time-format stored. By default it is equal to UserTime static TimeFormat time_format; }; /// Prints a Time-object to the given ostream -- typically cout. std::ostream& operator<<(std::ostream&, const Time&); /// Returns a time-stamp of the current time in some format. std::string TimeStamp(const TimeFormat& = Time::GetFormat()); /// Returns a time-stamp of some specified time in some format. std::string TimeStamp(Time, const TimeFormat& = Time::GetFormat()); /** @} */ } // namespace Arc #endif // ARCLIB_TIME nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/User.cpp0000644000000000000000000000012413214315176022372 xustar000000000000000027 mtime=1513200254.761812 27 atime=1513200574.958707 30 ctime=1513200658.861733564 nordugrid-arc-5.4.2/src/hed/libs/common/User.cpp0000644000175000002070000002075713214315176022452 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #ifndef WIN32 #include #include #include #include #include #include #else // WIN32 #include #endif #include #include #include "User.h" namespace Arc { SimpleCondition UserSwitch::suid_lock; int UserSwitch::suid_count = 0; int UserSwitch::suid_uid_orig = -1; int UserSwitch::suid_gid_orig = -1; #ifndef WIN32 static uid_t get_user_id(void) { uid_t user_id = getuid(); if (user_id != 0) return user_id; std::string user_s = GetEnv("USER_ID"); if (user_s.empty()) return 0; stringto(user_s, user_id); return user_id; } static uid_t get_group_id(void) { return getgid(); } bool User::set(const struct passwd *pwd_p) { if (pwd_p == NULL) return false; name = pwd_p->pw_name; uid = pwd_p->pw_uid; gid = pwd_p->pw_gid; home = GetEnv("HOME"); // don't use HOME if user is different from current user if (home.empty() || uid != get_user_id()) home = pwd_p->pw_dir; return true; } User::User(void): valid(false) { uid = get_user_id(); gid = get_group_id(); struct passwd pwd; char pwdbuf[2048]; struct passwd *pwd_p; if (getpwuid_r(uid, &pwd, pwdbuf, sizeof(pwdbuf), &pwd_p) != 0) return; if (!set(pwd_p)) return; valid = true; } // Unix implementation User::User(const std::string& name, const std::string& group): valid(false) { this->name = name; struct passwd pwd; char pwdbuf[2048]; struct passwd *pwd_p; if (getpwnam_r(name.c_str(), &pwd, pwdbuf, sizeof(pwdbuf), &pwd_p) != 0) return; if (!set(pwd_p)) return; if (!group.empty()) { // override gid with given group struct group* gr = getgrnam(group.c_str()); if (!gr) return; gid = gr->gr_gid; } valid = true; } User::User(int uid, int gid): valid(false) { this->uid = uid; this->gid = gid; struct passwd pwd; char pwdbuf[2048]; struct passwd *pwd_p; if (getpwuid_r(uid, &pwd, pwdbuf, sizeof(pwdbuf), &pwd_p) != 0) return; if (!set(pwd_p)) return; // override what is found in passwd with given gid if (gid != -1) this->gid = gid; valid = true; } int User::check_file_access(const std::string& path, int flags) const { struct stat st; mode_t m; char **grmem; flags &= O_RDWR | O_RDONLY | O_WRONLY; if ((flags != O_RDWR) && (flags != O_RDONLY) && (flags != O_WRONLY)) return -1; if (getuid() != 0) { /* not root - just try to open */ int h; if ((h = open(path.c_str(), flags)) == -1) return -1; close(h); return 0; } if (uid == 0) return 0; /* check for file */ if (stat(path.c_str(), &st) != 0) return -1; if (!S_ISREG(st.st_mode)) return -1; m = 0; if (st.st_uid == uid) m |= st.st_mode & (S_IRUSR | S_IWUSR); if (st.st_gid == gid) m |= st.st_mode & (S_IRGRP | S_IWGRP); else { char grbuf[2048]; struct group grp; struct group *grp_p = NULL; char pwdbuf[2048]; struct passwd pwd; struct passwd *pwd_p = NULL; getpwuid_r(uid, &pwd, pwdbuf, sizeof(pwdbuf), &pwd_p); getgrgid_r(st.st_gid, &grp, grbuf, sizeof(grbuf), &grp_p); if ((grp_p != NULL) && (pwd_p != NULL)) for (grmem = grp_p->gr_mem; (*grmem) != NULL; grmem++) if (strcmp(*grmem, pwd_p->pw_name) == 0) { m |= st.st_mode & (S_IRGRP | S_IWGRP); break; } } m |= st.st_mode & (S_IROTH | S_IWOTH); if (flags == O_RDWR) { if (((m & (S_IRUSR | S_IRGRP | S_IROTH)) == 0) || ((m & (S_IWUSR | S_IWGRP | S_IWOTH)) == 0)) return 1; } else if (flags == O_RDONLY) { if ((m & (S_IRUSR | S_IRGRP | S_IROTH)) == 0) return 1; } else if (flags == O_WRONLY) { if ((m & (S_IWUSR | S_IWGRP | S_IWOTH)) == 0) return 1; } else return -1; /* check if all directories allow to read - not implemented yet */ return 0; } bool User::SwitchUser() const { // set proper umask umask(0077); if (getuid() != 0 && uid != 0 && getuid() != uid) return false; if (uid != 0) { setgid(gid); // this is not an error if group failed, not a big deal if (setuid(uid) != 0) return false; } return true; } #else // Win32 implementation static uid_t get_user_id(void) { return 0; // TODO: The user id is not used on windows for file permissions etc. } static uid_t get_group_id(void) { return 0; // TODO: The user id is not used on windows for file permissions etc. } bool User::set(const struct passwd *pwd_p) { if (pwd_p == NULL) return false; name = pwd_p->pw_name; home = pwd_p->pw_dir; uid = pwd_p->pw_uid; gid = pwd_p->pw_gid; return true; } User::User(void): valid(false) { int uid = get_user_id(); int gid = get_group_id(); bool found; struct passwd pwd_p; std::string name = Glib::getenv("USERNAME", found); if (!found) name = ""; std::string home = g_get_user_config_dir(); pwd_p.pw_name = const_cast(name.c_str()); pwd_p.pw_uid = uid; pwd_p.pw_gid = gid; pwd_p.pw_dir = const_cast(home.c_str()); set(&pwd_p); valid = true; } User::User(const std::string& name, const std::string& group): valid(false) { this->name = name; int uid = get_user_id(); int gid = get_group_id(); // TODO: get gid from group struct passwd pwd_p; std::string home = g_get_user_config_dir(); pwd_p.pw_name = const_cast(name.c_str()); pwd_p.pw_uid = uid; pwd_p.pw_gid = gid; pwd_p.pw_dir = const_cast(home.c_str()); set(&pwd_p); valid = true; } User::User(int uid, int gid): valid(false) { this->uid = uid; this->gid = gid; if (this->gid == -1) this->gid = 0; bool found; struct passwd pwd_p; std::string name = Glib::getenv("USERNAME", found); if (!found) name = ""; std::string home = g_get_user_config_dir(); pwd_p.pw_name = const_cast(name.c_str()); pwd_p.pw_uid = uid; pwd_p.pw_gid = gid; pwd_p.pw_dir = const_cast(home.c_str()); set(&pwd_p); valid = true; } int User::check_file_access(const std::string& path, int flags) const { // XXX NOP return 0; } bool User::SwitchUser() const { // XXX NOP return false; } #endif #ifndef WIN32 UserSwitch::UserSwitch(int uid,int gid):valid(false) { suid_lock.lock(); // locking while analyzing situation while(suid_count > 0) { // Already locked // Check if request is compatible with current situation bool need_switch = false; if(uid) { if(uid != geteuid()) need_switch = true; } else { if(suid_uid_orig != geteuid()) need_switch = true; }; if(gid) { if(gid != getegid()) need_switch = true; } else { if(suid_gid_orig != getegid()) need_switch = true; }; if(!need_switch) { // can join existing switch ++suid_count; valid = true; suid_lock.unlock(); return; }; // must wait till released suid_lock.wait_nonblock(); // and then re-check }; // First request - apply // Make sure current state is properly set if(suid_uid_orig == -1) { suid_uid_orig = geteuid(); suid_gid_orig = getegid(); }; // Try to apply new state if(gid && (suid_gid_orig != gid)) { if(setegid(gid) == -1) { suid_lock.unlock(); return; }; }; if(uid && (suid_uid_orig != uid)) { if(seteuid(uid) == -1) { if(suid_gid_orig != gid) setegid(suid_gid_orig); suid_lock.unlock(); return; }; }; // switch done - record that ++suid_count; valid = true; suid_lock.unlock(); } UserSwitch::~UserSwitch(void) { if(valid) { suid_lock.lock(); --suid_count; if(suid_count <= 0) { if(suid_uid_orig != geteuid()) seteuid(suid_uid_orig); if(suid_gid_orig != getegid()) setegid(suid_gid_orig); suid_lock.signal_nonblock(); }; suid_lock.unlock(); }; } void UserSwitch::resetPostFork(void) { valid = false; } #else UserSwitch::UserSwitch(int uid,int gid):old_uid(0),old_gid(0),valid(false) { } UserSwitch::~UserSwitch(void) { } void UserSwitch::resetPostFork(void) { } #endif } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/test0000644000000000000000000000013213214316022021640 xustar000000000000000030 mtime=1513200658.919734274 30 atime=1513200668.720854145 30 ctime=1513200658.919734274 nordugrid-arc-5.4.2/src/hed/libs/common/test/0000755000175000002070000000000013214316022021763 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712113711433023757 xustar000000000000000027 mtime=1362072347.509927 30 atime=1513200596.262967957 30 ctime=1513200658.901734054 nordugrid-arc-5.4.2/src/hed/libs/common/test/Makefile.am0000644000175000002070000001277512113711433024035 0ustar00mockbuildmock00000000000000if MYSQL_LIBRARY_ENABLED MYSQL_WRAPPER_TEST = MysqlWrapperTest else MYSQL_WRAPPER_TEST = endif TESTS = URLTest LoggerTest RunTest XMLNodeTest FileAccessTest FileUtilsTest \ ProfileTest ArcRegexTest FileLockTest EnvTest UserConfigTest \ StringConvTest CheckSumTest WatchdogTest UserTest $(MYSQL_WRAPPER_TEST) \ Base64Test check_PROGRAMS = $(TESTS) ThreadTest TESTS_ENVIRONMENT = srcdir=$(srcdir) URLTest_SOURCES = $(top_srcdir)/src/Test.cpp URLTest.cpp URLTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) URLTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) LoggerTest_SOURCES = $(top_srcdir)/src/Test.cpp LoggerTest.cpp LoggerTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) LoggerTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) RunTest_SOURCES = $(top_srcdir)/src/Test.cpp RunTest.cpp RunTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) RunTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ProfileTest_SOURCES = $(top_srcdir)/src/Test.cpp ProfileTest.cpp ProfileTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ProfileTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ThreadTest_SOURCES = $(top_srcdir)/src/Test.cpp ThreadTest.cpp ThreadTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) ThreadTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) XMLNodeTest_SOURCES = $(top_srcdir)/src/Test.cpp XMLNodeTest.cpp XMLNodeTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) XMLNodeTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) FileAccessTest_SOURCES = $(top_srcdir)/src/Test.cpp FileAccessTest.cpp FileAccessTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) FileAccessTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) FileUtilsTest_SOURCES = $(top_srcdir)/src/Test.cpp FileUtilsTest.cpp FileUtilsTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) FileUtilsTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ArcRegexTest_SOURCES = $(top_srcdir)/src/Test.cpp ArcRegexTest.cpp ArcRegexTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ArcRegexTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) FileLockTest_SOURCES = $(top_srcdir)/src/Test.cpp FileLockTest.cpp FileLockTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) FileLockTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) EnvTest_SOURCES = $(top_srcdir)/src/Test.cpp EnvTest.cpp EnvTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) EnvTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) UserConfigTest_SOURCES = $(top_srcdir)/src/Test.cpp UserConfigTest.cpp UserConfigTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) UserConfigTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) StringConvTest_SOURCES = $(top_srcdir)/src/Test.cpp StringConvTest.cpp StringConvTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) StringConvTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) CheckSumTest_SOURCES = $(top_srcdir)/src/Test.cpp CheckSumTest.cpp CheckSumTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) CheckSumTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) WatchdogTest_SOURCES = $(top_srcdir)/src/Test.cpp WatchdogTest.cpp WatchdogTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) WatchdogTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) UserTest_SOURCES = $(top_srcdir)/src/Test.cpp UserTest.cpp UserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) UserTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) if MYSQL_LIBRARY_ENABLED MysqlWrapperTest_SOURCES = $(top_srcdir)/src/Test.cpp MysqlWrapperTest.cpp MysqlWrapperTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(MYSQL_CFLAGS) $(AM_CXXFLAGS) MysqlWrapperTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(MYSQL_LIBS) endif Base64Test_SOURCES = $(top_srcdir)/src/Test.cpp Base64Test.cpp Base64Test_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) Base64Test_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) EXTRA_DIST = rcode nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/RunTest.cpp0000644000000000000000000000012212032756371024040 xustar000000000000000027 mtime=1349246201.310118 27 atime=1513200574.860706 28 ctime=1513200658.9137342 nordugrid-arc-5.4.2/src/hed/libs/common/test/RunTest.cpp0000644000175000002070000000460412032756371024113 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include class RunTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(RunTest); CPPUNIT_TEST(TestRun0); CPPUNIT_TEST(TestRun255); CPPUNIT_TEST(TestRunMany); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestRun0(); void TestRun255(); void TestRunMany(); private: std::string srcdir; }; void RunTest::setUp() { srcdir = Arc::GetEnv("srcdir"); if (srcdir.length() == 0) srcdir = "."; } void RunTest::tearDown() { } static void initializer_func(void* arg) { Arc::UserSwitch u(0,0); CPPUNIT_ASSERT(arg == (void*)1); } void RunTest::TestRun0() { std::string outstr; std::string errstr; Arc::Run run(srcdir + "/rcode 0"); run.AssignStdout(outstr); run.AssignStderr(errstr); run.AssignInitializer(&initializer_func,(void*)1); CPPUNIT_ASSERT((bool)run); CPPUNIT_ASSERT(run.Start()); CPPUNIT_ASSERT(run.Wait(10)); CPPUNIT_ASSERT_EQUAL(0, run.Result()); #ifndef WIN32 CPPUNIT_ASSERT_EQUAL(std::string("STDOUT"), outstr); CPPUNIT_ASSERT_EQUAL(std::string("STDERR"), errstr); #endif } void RunTest::TestRun255() { std::string outstr; std::string errstr; Arc::Run run(srcdir + "/rcode 255"); run.AssignStdout(outstr); run.AssignStderr(errstr); CPPUNIT_ASSERT((bool)run); CPPUNIT_ASSERT(run.Start()); CPPUNIT_ASSERT(run.Wait(10)); CPPUNIT_ASSERT_EQUAL(255, run.Result()); } class RunH { public: unsigned long long int cnt; Arc::Run* run; }; void RunTest::TestRunMany() { std::list runs; for(int n=0;n<5000;++n) { //std::cerr<::iterator r = runs.begin();r != runs.end();) { //std::cerr<cnt<<"/"<run->getPid()<<" "; if(r->run->Running()) { ++(r->cnt); ++r; continue; }; delete (r->run); r = runs.erase(r); } //std::cerr<Start()) runs.push_back(r); } //std::cerr<::iterator r = runs.begin();r != runs.end();++r) { CPPUNIT_ASSERT(r->run->Wait(120)); } //std::cerr< #endif #include #include #include #include class ThreadTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ThreadTest); CPPUNIT_TEST(TestThread); CPPUNIT_TEST(TestBroadcast); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestThread(); void TestBroadcast(); private: static void func(void*); static void func_wait(void* arg); static int counter; static Glib::Mutex* lock; Arc::SimpleCondition cond; }; static int titem_created = 0; static int titem_deleted = 0; class TItem: public Arc::ThreadDataItem { private: std::string tid; int id; ~TItem(void); public: TItem(void); TItem(const TItem& parent); virtual void Dup(void); }; TItem::TItem(void) { Attach(tid); id=(++titem_created); } TItem::TItem(const TItem& parent):Arc::ThreadDataItem(parent.tid) { id=(++titem_created); } TItem::~TItem(void) { ++titem_deleted; } void TItem::Dup(void) { new TItem(*this); } void ThreadTest::setUp() { counter = 0; cond.reset(); lock = new Glib::Mutex; new TItem; } void ThreadTest::tearDown() { if(lock) delete lock; } void ThreadTest::TestThread() { // Simply run 500 threads and see if executable crashes for(int n = 0;n<500;++n) { CPPUNIT_ASSERT(Arc::CreateThreadFunction(&func,NULL)); } // Wait for all threads // In worst case it should be no more than one thread simultaneously. for(int n = 0; n<(500*10); ++n) { sleep(1); if(counter >= 500) break; } sleep(1); CPPUNIT_ASSERT_EQUAL(500,counter); CPPUNIT_ASSERT_EQUAL(501,titem_created); CPPUNIT_ASSERT_EQUAL(500,titem_deleted); } void ThreadTest::TestBroadcast() { // Create 2 threads which wait and check broadcast wakes them both up CPPUNIT_ASSERT(Arc::CreateThreadFunction(&func_wait, this)); CPPUNIT_ASSERT(Arc::CreateThreadFunction(&func_wait, this)); // Wait for threads to start sleep(1); cond.broadcast(); // Wait for result sleep(1); CPPUNIT_ASSERT_EQUAL(2, counter); } void ThreadTest::func_wait(void* arg) { ThreadTest* test = (ThreadTest*)arg; test->cond.wait(); lock->lock(); test->counter++; lock->unlock(); } void ThreadTest::func(void*) { sleep(1); lock->lock(); ++counter; lock->unlock(); } int ThreadTest::counter = 0; Glib::Mutex* ThreadTest::lock = NULL; CPPUNIT_TEST_SUITE_REGISTRATION(ThreadTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315724023771 xustar000000000000000030 mtime=1513200596.404969694 29 atime=1513200647.86659909 30 ctime=1513200658.902734066 nordugrid-arc-5.4.2/src/hed/libs/common/test/Makefile.in0000644000175000002070000034073113214315724024050 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = URLTest$(EXEEXT) LoggerTest$(EXEEXT) RunTest$(EXEEXT) \ XMLNodeTest$(EXEEXT) FileAccessTest$(EXEEXT) \ FileUtilsTest$(EXEEXT) ProfileTest$(EXEEXT) \ ArcRegexTest$(EXEEXT) FileLockTest$(EXEEXT) EnvTest$(EXEEXT) \ UserConfigTest$(EXEEXT) StringConvTest$(EXEEXT) \ CheckSumTest$(EXEEXT) WatchdogTest$(EXEEXT) UserTest$(EXEEXT) \ $(am__EXEEXT_1) Base64Test$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_2) ThreadTest$(EXEEXT) subdir = src/hed/libs/common/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = @MYSQL_LIBRARY_ENABLED_TRUE@am__EXEEXT_1 = MysqlWrapperTest$(EXEEXT) am__EXEEXT_2 = URLTest$(EXEEXT) LoggerTest$(EXEEXT) RunTest$(EXEEXT) \ XMLNodeTest$(EXEEXT) FileAccessTest$(EXEEXT) \ FileUtilsTest$(EXEEXT) ProfileTest$(EXEEXT) \ ArcRegexTest$(EXEEXT) FileLockTest$(EXEEXT) EnvTest$(EXEEXT) \ UserConfigTest$(EXEEXT) StringConvTest$(EXEEXT) \ CheckSumTest$(EXEEXT) WatchdogTest$(EXEEXT) UserTest$(EXEEXT) \ $(am__EXEEXT_1) Base64Test$(EXEEXT) am_ArcRegexTest_OBJECTS = ArcRegexTest-Test.$(OBJEXT) \ ArcRegexTest-ArcRegexTest.$(OBJEXT) ArcRegexTest_OBJECTS = $(am_ArcRegexTest_OBJECTS) am__DEPENDENCIES_1 = ArcRegexTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) ArcRegexTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(ArcRegexTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_Base64Test_OBJECTS = Base64Test-Test.$(OBJEXT) \ Base64Test-Base64Test.$(OBJEXT) Base64Test_OBJECTS = $(am_Base64Test_OBJECTS) Base64Test_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) Base64Test_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(Base64Test_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_CheckSumTest_OBJECTS = CheckSumTest-Test.$(OBJEXT) \ CheckSumTest-CheckSumTest.$(OBJEXT) CheckSumTest_OBJECTS = $(am_CheckSumTest_OBJECTS) CheckSumTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) CheckSumTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(CheckSumTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_EnvTest_OBJECTS = EnvTest-Test.$(OBJEXT) EnvTest-EnvTest.$(OBJEXT) EnvTest_OBJECTS = $(am_EnvTest_OBJECTS) EnvTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) EnvTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(EnvTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_FileAccessTest_OBJECTS = FileAccessTest-Test.$(OBJEXT) \ FileAccessTest-FileAccessTest.$(OBJEXT) FileAccessTest_OBJECTS = $(am_FileAccessTest_OBJECTS) FileAccessTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) FileAccessTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_FileLockTest_OBJECTS = FileLockTest-Test.$(OBJEXT) \ FileLockTest-FileLockTest.$(OBJEXT) FileLockTest_OBJECTS = $(am_FileLockTest_OBJECTS) FileLockTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) FileLockTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(FileLockTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_FileUtilsTest_OBJECTS = FileUtilsTest-Test.$(OBJEXT) \ FileUtilsTest-FileUtilsTest.$(OBJEXT) FileUtilsTest_OBJECTS = $(am_FileUtilsTest_OBJECTS) FileUtilsTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) FileUtilsTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(FileUtilsTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_LoggerTest_OBJECTS = LoggerTest-Test.$(OBJEXT) \ LoggerTest-LoggerTest.$(OBJEXT) LoggerTest_OBJECTS = $(am_LoggerTest_OBJECTS) LoggerTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) LoggerTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(LoggerTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am__MysqlWrapperTest_SOURCES_DIST = $(top_srcdir)/src/Test.cpp \ MysqlWrapperTest.cpp @MYSQL_LIBRARY_ENABLED_TRUE@am_MysqlWrapperTest_OBJECTS = \ @MYSQL_LIBRARY_ENABLED_TRUE@ MysqlWrapperTest-Test.$(OBJEXT) \ @MYSQL_LIBRARY_ENABLED_TRUE@ MysqlWrapperTest-MysqlWrapperTest.$(OBJEXT) MysqlWrapperTest_OBJECTS = $(am_MysqlWrapperTest_OBJECTS) @MYSQL_LIBRARY_ENABLED_TRUE@MysqlWrapperTest_DEPENDENCIES = $(top_builddir)/src/hed/libs/common/libarccommon.la \ @MYSQL_LIBRARY_ENABLED_TRUE@ $(am__DEPENDENCIES_1) \ @MYSQL_LIBRARY_ENABLED_TRUE@ $(am__DEPENDENCIES_1) MysqlWrapperTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_ProfileTest_OBJECTS = ProfileTest-Test.$(OBJEXT) \ ProfileTest-ProfileTest.$(OBJEXT) ProfileTest_OBJECTS = $(am_ProfileTest_OBJECTS) ProfileTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) ProfileTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(ProfileTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_RunTest_OBJECTS = RunTest-Test.$(OBJEXT) RunTest-RunTest.$(OBJEXT) RunTest_OBJECTS = $(am_RunTest_OBJECTS) RunTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) RunTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(RunTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_StringConvTest_OBJECTS = StringConvTest-Test.$(OBJEXT) \ StringConvTest-StringConvTest.$(OBJEXT) StringConvTest_OBJECTS = $(am_StringConvTest_OBJECTS) StringConvTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) StringConvTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(StringConvTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_ThreadTest_OBJECTS = ThreadTest-Test.$(OBJEXT) \ ThreadTest-ThreadTest.$(OBJEXT) ThreadTest_OBJECTS = $(am_ThreadTest_OBJECTS) ThreadTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) ThreadTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(ThreadTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_URLTest_OBJECTS = URLTest-Test.$(OBJEXT) URLTest-URLTest.$(OBJEXT) URLTest_OBJECTS = $(am_URLTest_OBJECTS) URLTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) URLTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(URLTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_UserConfigTest_OBJECTS = UserConfigTest-Test.$(OBJEXT) \ UserConfigTest-UserConfigTest.$(OBJEXT) UserConfigTest_OBJECTS = $(am_UserConfigTest_OBJECTS) UserConfigTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) UserConfigTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_UserTest_OBJECTS = UserTest-Test.$(OBJEXT) \ UserTest-UserTest.$(OBJEXT) UserTest_OBJECTS = $(am_UserTest_OBJECTS) UserTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) UserTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(UserTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_WatchdogTest_OBJECTS = WatchdogTest-Test.$(OBJEXT) \ WatchdogTest-WatchdogTest.$(OBJEXT) WatchdogTest_OBJECTS = $(am_WatchdogTest_OBJECTS) WatchdogTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) WatchdogTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(WatchdogTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_XMLNodeTest_OBJECTS = XMLNodeTest-Test.$(OBJEXT) \ XMLNodeTest-XMLNodeTest.$(OBJEXT) XMLNodeTest_OBJECTS = $(am_XMLNodeTest_OBJECTS) XMLNodeTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) XMLNodeTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(XMLNodeTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(ArcRegexTest_SOURCES) $(Base64Test_SOURCES) \ $(CheckSumTest_SOURCES) $(EnvTest_SOURCES) \ $(FileAccessTest_SOURCES) $(FileLockTest_SOURCES) \ $(FileUtilsTest_SOURCES) $(LoggerTest_SOURCES) \ $(MysqlWrapperTest_SOURCES) $(ProfileTest_SOURCES) \ $(RunTest_SOURCES) $(StringConvTest_SOURCES) \ $(ThreadTest_SOURCES) $(URLTest_SOURCES) \ $(UserConfigTest_SOURCES) $(UserTest_SOURCES) \ $(WatchdogTest_SOURCES) $(XMLNodeTest_SOURCES) DIST_SOURCES = $(ArcRegexTest_SOURCES) $(Base64Test_SOURCES) \ $(CheckSumTest_SOURCES) $(EnvTest_SOURCES) \ $(FileAccessTest_SOURCES) $(FileLockTest_SOURCES) \ $(FileUtilsTest_SOURCES) $(LoggerTest_SOURCES) \ $(am__MysqlWrapperTest_SOURCES_DIST) $(ProfileTest_SOURCES) \ $(RunTest_SOURCES) $(StringConvTest_SOURCES) \ $(ThreadTest_SOURCES) $(URLTest_SOURCES) \ $(UserConfigTest_SOURCES) $(UserTest_SOURCES) \ $(WatchdogTest_SOURCES) $(XMLNodeTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @MYSQL_LIBRARY_ENABLED_FALSE@MYSQL_WRAPPER_TEST = @MYSQL_LIBRARY_ENABLED_TRUE@MYSQL_WRAPPER_TEST = MysqlWrapperTest TESTS_ENVIRONMENT = srcdir=$(srcdir) URLTest_SOURCES = $(top_srcdir)/src/Test.cpp URLTest.cpp URLTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) URLTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) LoggerTest_SOURCES = $(top_srcdir)/src/Test.cpp LoggerTest.cpp LoggerTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) LoggerTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) RunTest_SOURCES = $(top_srcdir)/src/Test.cpp RunTest.cpp RunTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) RunTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ProfileTest_SOURCES = $(top_srcdir)/src/Test.cpp ProfileTest.cpp ProfileTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ProfileTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ThreadTest_SOURCES = $(top_srcdir)/src/Test.cpp ThreadTest.cpp ThreadTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) ThreadTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) XMLNodeTest_SOURCES = $(top_srcdir)/src/Test.cpp XMLNodeTest.cpp XMLNodeTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) XMLNodeTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) FileAccessTest_SOURCES = $(top_srcdir)/src/Test.cpp FileAccessTest.cpp FileAccessTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) FileAccessTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) FileUtilsTest_SOURCES = $(top_srcdir)/src/Test.cpp FileUtilsTest.cpp FileUtilsTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) FileUtilsTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ArcRegexTest_SOURCES = $(top_srcdir)/src/Test.cpp ArcRegexTest.cpp ArcRegexTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ArcRegexTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) FileLockTest_SOURCES = $(top_srcdir)/src/Test.cpp FileLockTest.cpp FileLockTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) FileLockTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) EnvTest_SOURCES = $(top_srcdir)/src/Test.cpp EnvTest.cpp EnvTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) EnvTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) UserConfigTest_SOURCES = $(top_srcdir)/src/Test.cpp UserConfigTest.cpp UserConfigTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) UserConfigTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) StringConvTest_SOURCES = $(top_srcdir)/src/Test.cpp StringConvTest.cpp StringConvTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) StringConvTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) CheckSumTest_SOURCES = $(top_srcdir)/src/Test.cpp CheckSumTest.cpp CheckSumTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) CheckSumTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) WatchdogTest_SOURCES = $(top_srcdir)/src/Test.cpp WatchdogTest.cpp WatchdogTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) WatchdogTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) UserTest_SOURCES = $(top_srcdir)/src/Test.cpp UserTest.cpp UserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) UserTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) @MYSQL_LIBRARY_ENABLED_TRUE@MysqlWrapperTest_SOURCES = $(top_srcdir)/src/Test.cpp MysqlWrapperTest.cpp @MYSQL_LIBRARY_ENABLED_TRUE@MysqlWrapperTest_CXXFLAGS = -I$(top_srcdir)/include \ @MYSQL_LIBRARY_ENABLED_TRUE@ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(MYSQL_CFLAGS) $(AM_CXXFLAGS) @MYSQL_LIBRARY_ENABLED_TRUE@MysqlWrapperTest_LDADD = \ @MYSQL_LIBRARY_ENABLED_TRUE@ $(top_builddir)/src/hed/libs/common/libarccommon.la \ @MYSQL_LIBRARY_ENABLED_TRUE@ $(CPPUNIT_LIBS) $(MYSQL_LIBS) Base64Test_SOURCES = $(top_srcdir)/src/Test.cpp Base64Test.cpp Base64Test_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) Base64Test_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) EXTRA_DIST = rcode all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/common/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/common/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list ArcRegexTest$(EXEEXT): $(ArcRegexTest_OBJECTS) $(ArcRegexTest_DEPENDENCIES) @rm -f ArcRegexTest$(EXEEXT) $(ArcRegexTest_LINK) $(ArcRegexTest_OBJECTS) $(ArcRegexTest_LDADD) $(LIBS) Base64Test$(EXEEXT): $(Base64Test_OBJECTS) $(Base64Test_DEPENDENCIES) @rm -f Base64Test$(EXEEXT) $(Base64Test_LINK) $(Base64Test_OBJECTS) $(Base64Test_LDADD) $(LIBS) CheckSumTest$(EXEEXT): $(CheckSumTest_OBJECTS) $(CheckSumTest_DEPENDENCIES) @rm -f CheckSumTest$(EXEEXT) $(CheckSumTest_LINK) $(CheckSumTest_OBJECTS) $(CheckSumTest_LDADD) $(LIBS) EnvTest$(EXEEXT): $(EnvTest_OBJECTS) $(EnvTest_DEPENDENCIES) @rm -f EnvTest$(EXEEXT) $(EnvTest_LINK) $(EnvTest_OBJECTS) $(EnvTest_LDADD) $(LIBS) FileAccessTest$(EXEEXT): $(FileAccessTest_OBJECTS) $(FileAccessTest_DEPENDENCIES) @rm -f FileAccessTest$(EXEEXT) $(FileAccessTest_LINK) $(FileAccessTest_OBJECTS) $(FileAccessTest_LDADD) $(LIBS) FileLockTest$(EXEEXT): $(FileLockTest_OBJECTS) $(FileLockTest_DEPENDENCIES) @rm -f FileLockTest$(EXEEXT) $(FileLockTest_LINK) $(FileLockTest_OBJECTS) $(FileLockTest_LDADD) $(LIBS) FileUtilsTest$(EXEEXT): $(FileUtilsTest_OBJECTS) $(FileUtilsTest_DEPENDENCIES) @rm -f FileUtilsTest$(EXEEXT) $(FileUtilsTest_LINK) $(FileUtilsTest_OBJECTS) $(FileUtilsTest_LDADD) $(LIBS) LoggerTest$(EXEEXT): $(LoggerTest_OBJECTS) $(LoggerTest_DEPENDENCIES) @rm -f LoggerTest$(EXEEXT) $(LoggerTest_LINK) $(LoggerTest_OBJECTS) $(LoggerTest_LDADD) $(LIBS) MysqlWrapperTest$(EXEEXT): $(MysqlWrapperTest_OBJECTS) $(MysqlWrapperTest_DEPENDENCIES) @rm -f MysqlWrapperTest$(EXEEXT) $(MysqlWrapperTest_LINK) $(MysqlWrapperTest_OBJECTS) $(MysqlWrapperTest_LDADD) $(LIBS) ProfileTest$(EXEEXT): $(ProfileTest_OBJECTS) $(ProfileTest_DEPENDENCIES) @rm -f ProfileTest$(EXEEXT) $(ProfileTest_LINK) $(ProfileTest_OBJECTS) $(ProfileTest_LDADD) $(LIBS) RunTest$(EXEEXT): $(RunTest_OBJECTS) $(RunTest_DEPENDENCIES) @rm -f RunTest$(EXEEXT) $(RunTest_LINK) $(RunTest_OBJECTS) $(RunTest_LDADD) $(LIBS) StringConvTest$(EXEEXT): $(StringConvTest_OBJECTS) $(StringConvTest_DEPENDENCIES) @rm -f StringConvTest$(EXEEXT) $(StringConvTest_LINK) $(StringConvTest_OBJECTS) $(StringConvTest_LDADD) $(LIBS) ThreadTest$(EXEEXT): $(ThreadTest_OBJECTS) $(ThreadTest_DEPENDENCIES) @rm -f ThreadTest$(EXEEXT) $(ThreadTest_LINK) $(ThreadTest_OBJECTS) $(ThreadTest_LDADD) $(LIBS) URLTest$(EXEEXT): $(URLTest_OBJECTS) $(URLTest_DEPENDENCIES) @rm -f URLTest$(EXEEXT) $(URLTest_LINK) $(URLTest_OBJECTS) $(URLTest_LDADD) $(LIBS) UserConfigTest$(EXEEXT): $(UserConfigTest_OBJECTS) $(UserConfigTest_DEPENDENCIES) @rm -f UserConfigTest$(EXEEXT) $(UserConfigTest_LINK) $(UserConfigTest_OBJECTS) $(UserConfigTest_LDADD) $(LIBS) UserTest$(EXEEXT): $(UserTest_OBJECTS) $(UserTest_DEPENDENCIES) @rm -f UserTest$(EXEEXT) $(UserTest_LINK) $(UserTest_OBJECTS) $(UserTest_LDADD) $(LIBS) WatchdogTest$(EXEEXT): $(WatchdogTest_OBJECTS) $(WatchdogTest_DEPENDENCIES) @rm -f WatchdogTest$(EXEEXT) $(WatchdogTest_LINK) $(WatchdogTest_OBJECTS) $(WatchdogTest_LDADD) $(LIBS) XMLNodeTest$(EXEEXT): $(XMLNodeTest_OBJECTS) $(XMLNodeTest_DEPENDENCIES) @rm -f XMLNodeTest$(EXEEXT) $(XMLNodeTest_LINK) $(XMLNodeTest_OBJECTS) $(XMLNodeTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ArcRegexTest-ArcRegexTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ArcRegexTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Base64Test-Base64Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Base64Test-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/CheckSumTest-CheckSumTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/CheckSumTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/EnvTest-EnvTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/EnvTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/FileAccessTest-FileAccessTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/FileAccessTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/FileLockTest-FileLockTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/FileLockTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/FileUtilsTest-FileUtilsTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/FileUtilsTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/LoggerTest-LoggerTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/LoggerTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/MysqlWrapperTest-MysqlWrapperTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/MysqlWrapperTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ProfileTest-ProfileTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ProfileTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/RunTest-RunTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/RunTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/StringConvTest-StringConvTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/StringConvTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ThreadTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ThreadTest-ThreadTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/URLTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/URLTest-URLTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/UserConfigTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/UserConfigTest-UserConfigTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/UserTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/UserTest-UserTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/WatchdogTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/WatchdogTest-WatchdogTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/XMLNodeTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/XMLNodeTest-XMLNodeTest.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< ArcRegexTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ArcRegexTest_CXXFLAGS) $(CXXFLAGS) -MT ArcRegexTest-Test.o -MD -MP -MF $(DEPDIR)/ArcRegexTest-Test.Tpo -c -o ArcRegexTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ArcRegexTest-Test.Tpo $(DEPDIR)/ArcRegexTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ArcRegexTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ArcRegexTest_CXXFLAGS) $(CXXFLAGS) -c -o ArcRegexTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ArcRegexTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ArcRegexTest_CXXFLAGS) $(CXXFLAGS) -MT ArcRegexTest-Test.obj -MD -MP -MF $(DEPDIR)/ArcRegexTest-Test.Tpo -c -o ArcRegexTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ArcRegexTest-Test.Tpo $(DEPDIR)/ArcRegexTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ArcRegexTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ArcRegexTest_CXXFLAGS) $(CXXFLAGS) -c -o ArcRegexTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ArcRegexTest-ArcRegexTest.o: ArcRegexTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ArcRegexTest_CXXFLAGS) $(CXXFLAGS) -MT ArcRegexTest-ArcRegexTest.o -MD -MP -MF $(DEPDIR)/ArcRegexTest-ArcRegexTest.Tpo -c -o ArcRegexTest-ArcRegexTest.o `test -f 'ArcRegexTest.cpp' || echo '$(srcdir)/'`ArcRegexTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ArcRegexTest-ArcRegexTest.Tpo $(DEPDIR)/ArcRegexTest-ArcRegexTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcRegexTest.cpp' object='ArcRegexTest-ArcRegexTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ArcRegexTest_CXXFLAGS) $(CXXFLAGS) -c -o ArcRegexTest-ArcRegexTest.o `test -f 'ArcRegexTest.cpp' || echo '$(srcdir)/'`ArcRegexTest.cpp ArcRegexTest-ArcRegexTest.obj: ArcRegexTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ArcRegexTest_CXXFLAGS) $(CXXFLAGS) -MT ArcRegexTest-ArcRegexTest.obj -MD -MP -MF $(DEPDIR)/ArcRegexTest-ArcRegexTest.Tpo -c -o ArcRegexTest-ArcRegexTest.obj `if test -f 'ArcRegexTest.cpp'; then $(CYGPATH_W) 'ArcRegexTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ArcRegexTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ArcRegexTest-ArcRegexTest.Tpo $(DEPDIR)/ArcRegexTest-ArcRegexTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcRegexTest.cpp' object='ArcRegexTest-ArcRegexTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ArcRegexTest_CXXFLAGS) $(CXXFLAGS) -c -o ArcRegexTest-ArcRegexTest.obj `if test -f 'ArcRegexTest.cpp'; then $(CYGPATH_W) 'ArcRegexTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ArcRegexTest.cpp'; fi` Base64Test-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(Base64Test_CXXFLAGS) $(CXXFLAGS) -MT Base64Test-Test.o -MD -MP -MF $(DEPDIR)/Base64Test-Test.Tpo -c -o Base64Test-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/Base64Test-Test.Tpo $(DEPDIR)/Base64Test-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='Base64Test-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(Base64Test_CXXFLAGS) $(CXXFLAGS) -c -o Base64Test-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp Base64Test-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(Base64Test_CXXFLAGS) $(CXXFLAGS) -MT Base64Test-Test.obj -MD -MP -MF $(DEPDIR)/Base64Test-Test.Tpo -c -o Base64Test-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/Base64Test-Test.Tpo $(DEPDIR)/Base64Test-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='Base64Test-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(Base64Test_CXXFLAGS) $(CXXFLAGS) -c -o Base64Test-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` Base64Test-Base64Test.o: Base64Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(Base64Test_CXXFLAGS) $(CXXFLAGS) -MT Base64Test-Base64Test.o -MD -MP -MF $(DEPDIR)/Base64Test-Base64Test.Tpo -c -o Base64Test-Base64Test.o `test -f 'Base64Test.cpp' || echo '$(srcdir)/'`Base64Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/Base64Test-Base64Test.Tpo $(DEPDIR)/Base64Test-Base64Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Base64Test.cpp' object='Base64Test-Base64Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(Base64Test_CXXFLAGS) $(CXXFLAGS) -c -o Base64Test-Base64Test.o `test -f 'Base64Test.cpp' || echo '$(srcdir)/'`Base64Test.cpp Base64Test-Base64Test.obj: Base64Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(Base64Test_CXXFLAGS) $(CXXFLAGS) -MT Base64Test-Base64Test.obj -MD -MP -MF $(DEPDIR)/Base64Test-Base64Test.Tpo -c -o Base64Test-Base64Test.obj `if test -f 'Base64Test.cpp'; then $(CYGPATH_W) 'Base64Test.cpp'; else $(CYGPATH_W) '$(srcdir)/Base64Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/Base64Test-Base64Test.Tpo $(DEPDIR)/Base64Test-Base64Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Base64Test.cpp' object='Base64Test-Base64Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(Base64Test_CXXFLAGS) $(CXXFLAGS) -c -o Base64Test-Base64Test.obj `if test -f 'Base64Test.cpp'; then $(CYGPATH_W) 'Base64Test.cpp'; else $(CYGPATH_W) '$(srcdir)/Base64Test.cpp'; fi` CheckSumTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CheckSumTest_CXXFLAGS) $(CXXFLAGS) -MT CheckSumTest-Test.o -MD -MP -MF $(DEPDIR)/CheckSumTest-Test.Tpo -c -o CheckSumTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/CheckSumTest-Test.Tpo $(DEPDIR)/CheckSumTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='CheckSumTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CheckSumTest_CXXFLAGS) $(CXXFLAGS) -c -o CheckSumTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp CheckSumTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CheckSumTest_CXXFLAGS) $(CXXFLAGS) -MT CheckSumTest-Test.obj -MD -MP -MF $(DEPDIR)/CheckSumTest-Test.Tpo -c -o CheckSumTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/CheckSumTest-Test.Tpo $(DEPDIR)/CheckSumTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='CheckSumTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CheckSumTest_CXXFLAGS) $(CXXFLAGS) -c -o CheckSumTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` CheckSumTest-CheckSumTest.o: CheckSumTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CheckSumTest_CXXFLAGS) $(CXXFLAGS) -MT CheckSumTest-CheckSumTest.o -MD -MP -MF $(DEPDIR)/CheckSumTest-CheckSumTest.Tpo -c -o CheckSumTest-CheckSumTest.o `test -f 'CheckSumTest.cpp' || echo '$(srcdir)/'`CheckSumTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/CheckSumTest-CheckSumTest.Tpo $(DEPDIR)/CheckSumTest-CheckSumTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CheckSumTest.cpp' object='CheckSumTest-CheckSumTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CheckSumTest_CXXFLAGS) $(CXXFLAGS) -c -o CheckSumTest-CheckSumTest.o `test -f 'CheckSumTest.cpp' || echo '$(srcdir)/'`CheckSumTest.cpp CheckSumTest-CheckSumTest.obj: CheckSumTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CheckSumTest_CXXFLAGS) $(CXXFLAGS) -MT CheckSumTest-CheckSumTest.obj -MD -MP -MF $(DEPDIR)/CheckSumTest-CheckSumTest.Tpo -c -o CheckSumTest-CheckSumTest.obj `if test -f 'CheckSumTest.cpp'; then $(CYGPATH_W) 'CheckSumTest.cpp'; else $(CYGPATH_W) '$(srcdir)/CheckSumTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/CheckSumTest-CheckSumTest.Tpo $(DEPDIR)/CheckSumTest-CheckSumTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CheckSumTest.cpp' object='CheckSumTest-CheckSumTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(CheckSumTest_CXXFLAGS) $(CXXFLAGS) -c -o CheckSumTest-CheckSumTest.obj `if test -f 'CheckSumTest.cpp'; then $(CYGPATH_W) 'CheckSumTest.cpp'; else $(CYGPATH_W) '$(srcdir)/CheckSumTest.cpp'; fi` EnvTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(EnvTest_CXXFLAGS) $(CXXFLAGS) -MT EnvTest-Test.o -MD -MP -MF $(DEPDIR)/EnvTest-Test.Tpo -c -o EnvTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/EnvTest-Test.Tpo $(DEPDIR)/EnvTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='EnvTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(EnvTest_CXXFLAGS) $(CXXFLAGS) -c -o EnvTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp EnvTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(EnvTest_CXXFLAGS) $(CXXFLAGS) -MT EnvTest-Test.obj -MD -MP -MF $(DEPDIR)/EnvTest-Test.Tpo -c -o EnvTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/EnvTest-Test.Tpo $(DEPDIR)/EnvTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='EnvTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(EnvTest_CXXFLAGS) $(CXXFLAGS) -c -o EnvTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` EnvTest-EnvTest.o: EnvTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(EnvTest_CXXFLAGS) $(CXXFLAGS) -MT EnvTest-EnvTest.o -MD -MP -MF $(DEPDIR)/EnvTest-EnvTest.Tpo -c -o EnvTest-EnvTest.o `test -f 'EnvTest.cpp' || echo '$(srcdir)/'`EnvTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/EnvTest-EnvTest.Tpo $(DEPDIR)/EnvTest-EnvTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EnvTest.cpp' object='EnvTest-EnvTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(EnvTest_CXXFLAGS) $(CXXFLAGS) -c -o EnvTest-EnvTest.o `test -f 'EnvTest.cpp' || echo '$(srcdir)/'`EnvTest.cpp EnvTest-EnvTest.obj: EnvTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(EnvTest_CXXFLAGS) $(CXXFLAGS) -MT EnvTest-EnvTest.obj -MD -MP -MF $(DEPDIR)/EnvTest-EnvTest.Tpo -c -o EnvTest-EnvTest.obj `if test -f 'EnvTest.cpp'; then $(CYGPATH_W) 'EnvTest.cpp'; else $(CYGPATH_W) '$(srcdir)/EnvTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/EnvTest-EnvTest.Tpo $(DEPDIR)/EnvTest-EnvTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EnvTest.cpp' object='EnvTest-EnvTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(EnvTest_CXXFLAGS) $(CXXFLAGS) -c -o EnvTest-EnvTest.obj `if test -f 'EnvTest.cpp'; then $(CYGPATH_W) 'EnvTest.cpp'; else $(CYGPATH_W) '$(srcdir)/EnvTest.cpp'; fi` FileAccessTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) -MT FileAccessTest-Test.o -MD -MP -MF $(DEPDIR)/FileAccessTest-Test.Tpo -c -o FileAccessTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileAccessTest-Test.Tpo $(DEPDIR)/FileAccessTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='FileAccessTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) -c -o FileAccessTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp FileAccessTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) -MT FileAccessTest-Test.obj -MD -MP -MF $(DEPDIR)/FileAccessTest-Test.Tpo -c -o FileAccessTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileAccessTest-Test.Tpo $(DEPDIR)/FileAccessTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='FileAccessTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) -c -o FileAccessTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` FileAccessTest-FileAccessTest.o: FileAccessTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) -MT FileAccessTest-FileAccessTest.o -MD -MP -MF $(DEPDIR)/FileAccessTest-FileAccessTest.Tpo -c -o FileAccessTest-FileAccessTest.o `test -f 'FileAccessTest.cpp' || echo '$(srcdir)/'`FileAccessTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileAccessTest-FileAccessTest.Tpo $(DEPDIR)/FileAccessTest-FileAccessTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileAccessTest.cpp' object='FileAccessTest-FileAccessTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) -c -o FileAccessTest-FileAccessTest.o `test -f 'FileAccessTest.cpp' || echo '$(srcdir)/'`FileAccessTest.cpp FileAccessTest-FileAccessTest.obj: FileAccessTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) -MT FileAccessTest-FileAccessTest.obj -MD -MP -MF $(DEPDIR)/FileAccessTest-FileAccessTest.Tpo -c -o FileAccessTest-FileAccessTest.obj `if test -f 'FileAccessTest.cpp'; then $(CYGPATH_W) 'FileAccessTest.cpp'; else $(CYGPATH_W) '$(srcdir)/FileAccessTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileAccessTest-FileAccessTest.Tpo $(DEPDIR)/FileAccessTest-FileAccessTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileAccessTest.cpp' object='FileAccessTest-FileAccessTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileAccessTest_CXXFLAGS) $(CXXFLAGS) -c -o FileAccessTest-FileAccessTest.obj `if test -f 'FileAccessTest.cpp'; then $(CYGPATH_W) 'FileAccessTest.cpp'; else $(CYGPATH_W) '$(srcdir)/FileAccessTest.cpp'; fi` FileLockTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileLockTest_CXXFLAGS) $(CXXFLAGS) -MT FileLockTest-Test.o -MD -MP -MF $(DEPDIR)/FileLockTest-Test.Tpo -c -o FileLockTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileLockTest-Test.Tpo $(DEPDIR)/FileLockTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='FileLockTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileLockTest_CXXFLAGS) $(CXXFLAGS) -c -o FileLockTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp FileLockTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileLockTest_CXXFLAGS) $(CXXFLAGS) -MT FileLockTest-Test.obj -MD -MP -MF $(DEPDIR)/FileLockTest-Test.Tpo -c -o FileLockTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileLockTest-Test.Tpo $(DEPDIR)/FileLockTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='FileLockTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileLockTest_CXXFLAGS) $(CXXFLAGS) -c -o FileLockTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` FileLockTest-FileLockTest.o: FileLockTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileLockTest_CXXFLAGS) $(CXXFLAGS) -MT FileLockTest-FileLockTest.o -MD -MP -MF $(DEPDIR)/FileLockTest-FileLockTest.Tpo -c -o FileLockTest-FileLockTest.o `test -f 'FileLockTest.cpp' || echo '$(srcdir)/'`FileLockTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileLockTest-FileLockTest.Tpo $(DEPDIR)/FileLockTest-FileLockTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileLockTest.cpp' object='FileLockTest-FileLockTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileLockTest_CXXFLAGS) $(CXXFLAGS) -c -o FileLockTest-FileLockTest.o `test -f 'FileLockTest.cpp' || echo '$(srcdir)/'`FileLockTest.cpp FileLockTest-FileLockTest.obj: FileLockTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileLockTest_CXXFLAGS) $(CXXFLAGS) -MT FileLockTest-FileLockTest.obj -MD -MP -MF $(DEPDIR)/FileLockTest-FileLockTest.Tpo -c -o FileLockTest-FileLockTest.obj `if test -f 'FileLockTest.cpp'; then $(CYGPATH_W) 'FileLockTest.cpp'; else $(CYGPATH_W) '$(srcdir)/FileLockTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileLockTest-FileLockTest.Tpo $(DEPDIR)/FileLockTest-FileLockTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileLockTest.cpp' object='FileLockTest-FileLockTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileLockTest_CXXFLAGS) $(CXXFLAGS) -c -o FileLockTest-FileLockTest.obj `if test -f 'FileLockTest.cpp'; then $(CYGPATH_W) 'FileLockTest.cpp'; else $(CYGPATH_W) '$(srcdir)/FileLockTest.cpp'; fi` FileUtilsTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileUtilsTest_CXXFLAGS) $(CXXFLAGS) -MT FileUtilsTest-Test.o -MD -MP -MF $(DEPDIR)/FileUtilsTest-Test.Tpo -c -o FileUtilsTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileUtilsTest-Test.Tpo $(DEPDIR)/FileUtilsTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='FileUtilsTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileUtilsTest_CXXFLAGS) $(CXXFLAGS) -c -o FileUtilsTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp FileUtilsTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileUtilsTest_CXXFLAGS) $(CXXFLAGS) -MT FileUtilsTest-Test.obj -MD -MP -MF $(DEPDIR)/FileUtilsTest-Test.Tpo -c -o FileUtilsTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileUtilsTest-Test.Tpo $(DEPDIR)/FileUtilsTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='FileUtilsTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileUtilsTest_CXXFLAGS) $(CXXFLAGS) -c -o FileUtilsTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` FileUtilsTest-FileUtilsTest.o: FileUtilsTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileUtilsTest_CXXFLAGS) $(CXXFLAGS) -MT FileUtilsTest-FileUtilsTest.o -MD -MP -MF $(DEPDIR)/FileUtilsTest-FileUtilsTest.Tpo -c -o FileUtilsTest-FileUtilsTest.o `test -f 'FileUtilsTest.cpp' || echo '$(srcdir)/'`FileUtilsTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileUtilsTest-FileUtilsTest.Tpo $(DEPDIR)/FileUtilsTest-FileUtilsTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileUtilsTest.cpp' object='FileUtilsTest-FileUtilsTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileUtilsTest_CXXFLAGS) $(CXXFLAGS) -c -o FileUtilsTest-FileUtilsTest.o `test -f 'FileUtilsTest.cpp' || echo '$(srcdir)/'`FileUtilsTest.cpp FileUtilsTest-FileUtilsTest.obj: FileUtilsTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileUtilsTest_CXXFLAGS) $(CXXFLAGS) -MT FileUtilsTest-FileUtilsTest.obj -MD -MP -MF $(DEPDIR)/FileUtilsTest-FileUtilsTest.Tpo -c -o FileUtilsTest-FileUtilsTest.obj `if test -f 'FileUtilsTest.cpp'; then $(CYGPATH_W) 'FileUtilsTest.cpp'; else $(CYGPATH_W) '$(srcdir)/FileUtilsTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/FileUtilsTest-FileUtilsTest.Tpo $(DEPDIR)/FileUtilsTest-FileUtilsTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FileUtilsTest.cpp' object='FileUtilsTest-FileUtilsTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(FileUtilsTest_CXXFLAGS) $(CXXFLAGS) -c -o FileUtilsTest-FileUtilsTest.obj `if test -f 'FileUtilsTest.cpp'; then $(CYGPATH_W) 'FileUtilsTest.cpp'; else $(CYGPATH_W) '$(srcdir)/FileUtilsTest.cpp'; fi` LoggerTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(LoggerTest_CXXFLAGS) $(CXXFLAGS) -MT LoggerTest-Test.o -MD -MP -MF $(DEPDIR)/LoggerTest-Test.Tpo -c -o LoggerTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/LoggerTest-Test.Tpo $(DEPDIR)/LoggerTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='LoggerTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(LoggerTest_CXXFLAGS) $(CXXFLAGS) -c -o LoggerTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp LoggerTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(LoggerTest_CXXFLAGS) $(CXXFLAGS) -MT LoggerTest-Test.obj -MD -MP -MF $(DEPDIR)/LoggerTest-Test.Tpo -c -o LoggerTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/LoggerTest-Test.Tpo $(DEPDIR)/LoggerTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='LoggerTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(LoggerTest_CXXFLAGS) $(CXXFLAGS) -c -o LoggerTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` LoggerTest-LoggerTest.o: LoggerTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(LoggerTest_CXXFLAGS) $(CXXFLAGS) -MT LoggerTest-LoggerTest.o -MD -MP -MF $(DEPDIR)/LoggerTest-LoggerTest.Tpo -c -o LoggerTest-LoggerTest.o `test -f 'LoggerTest.cpp' || echo '$(srcdir)/'`LoggerTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/LoggerTest-LoggerTest.Tpo $(DEPDIR)/LoggerTest-LoggerTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LoggerTest.cpp' object='LoggerTest-LoggerTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(LoggerTest_CXXFLAGS) $(CXXFLAGS) -c -o LoggerTest-LoggerTest.o `test -f 'LoggerTest.cpp' || echo '$(srcdir)/'`LoggerTest.cpp LoggerTest-LoggerTest.obj: LoggerTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(LoggerTest_CXXFLAGS) $(CXXFLAGS) -MT LoggerTest-LoggerTest.obj -MD -MP -MF $(DEPDIR)/LoggerTest-LoggerTest.Tpo -c -o LoggerTest-LoggerTest.obj `if test -f 'LoggerTest.cpp'; then $(CYGPATH_W) 'LoggerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/LoggerTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/LoggerTest-LoggerTest.Tpo $(DEPDIR)/LoggerTest-LoggerTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LoggerTest.cpp' object='LoggerTest-LoggerTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(LoggerTest_CXXFLAGS) $(CXXFLAGS) -c -o LoggerTest-LoggerTest.obj `if test -f 'LoggerTest.cpp'; then $(CYGPATH_W) 'LoggerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/LoggerTest.cpp'; fi` MysqlWrapperTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) -MT MysqlWrapperTest-Test.o -MD -MP -MF $(DEPDIR)/MysqlWrapperTest-Test.Tpo -c -o MysqlWrapperTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/MysqlWrapperTest-Test.Tpo $(DEPDIR)/MysqlWrapperTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='MysqlWrapperTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) -c -o MysqlWrapperTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp MysqlWrapperTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) -MT MysqlWrapperTest-Test.obj -MD -MP -MF $(DEPDIR)/MysqlWrapperTest-Test.Tpo -c -o MysqlWrapperTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/MysqlWrapperTest-Test.Tpo $(DEPDIR)/MysqlWrapperTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='MysqlWrapperTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) -c -o MysqlWrapperTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` MysqlWrapperTest-MysqlWrapperTest.o: MysqlWrapperTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) -MT MysqlWrapperTest-MysqlWrapperTest.o -MD -MP -MF $(DEPDIR)/MysqlWrapperTest-MysqlWrapperTest.Tpo -c -o MysqlWrapperTest-MysqlWrapperTest.o `test -f 'MysqlWrapperTest.cpp' || echo '$(srcdir)/'`MysqlWrapperTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/MysqlWrapperTest-MysqlWrapperTest.Tpo $(DEPDIR)/MysqlWrapperTest-MysqlWrapperTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MysqlWrapperTest.cpp' object='MysqlWrapperTest-MysqlWrapperTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) -c -o MysqlWrapperTest-MysqlWrapperTest.o `test -f 'MysqlWrapperTest.cpp' || echo '$(srcdir)/'`MysqlWrapperTest.cpp MysqlWrapperTest-MysqlWrapperTest.obj: MysqlWrapperTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) -MT MysqlWrapperTest-MysqlWrapperTest.obj -MD -MP -MF $(DEPDIR)/MysqlWrapperTest-MysqlWrapperTest.Tpo -c -o MysqlWrapperTest-MysqlWrapperTest.obj `if test -f 'MysqlWrapperTest.cpp'; then $(CYGPATH_W) 'MysqlWrapperTest.cpp'; else $(CYGPATH_W) '$(srcdir)/MysqlWrapperTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/MysqlWrapperTest-MysqlWrapperTest.Tpo $(DEPDIR)/MysqlWrapperTest-MysqlWrapperTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MysqlWrapperTest.cpp' object='MysqlWrapperTest-MysqlWrapperTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(MysqlWrapperTest_CXXFLAGS) $(CXXFLAGS) -c -o MysqlWrapperTest-MysqlWrapperTest.obj `if test -f 'MysqlWrapperTest.cpp'; then $(CYGPATH_W) 'MysqlWrapperTest.cpp'; else $(CYGPATH_W) '$(srcdir)/MysqlWrapperTest.cpp'; fi` ProfileTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProfileTest_CXXFLAGS) $(CXXFLAGS) -MT ProfileTest-Test.o -MD -MP -MF $(DEPDIR)/ProfileTest-Test.Tpo -c -o ProfileTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ProfileTest-Test.Tpo $(DEPDIR)/ProfileTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ProfileTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProfileTest_CXXFLAGS) $(CXXFLAGS) -c -o ProfileTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ProfileTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProfileTest_CXXFLAGS) $(CXXFLAGS) -MT ProfileTest-Test.obj -MD -MP -MF $(DEPDIR)/ProfileTest-Test.Tpo -c -o ProfileTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ProfileTest-Test.Tpo $(DEPDIR)/ProfileTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ProfileTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProfileTest_CXXFLAGS) $(CXXFLAGS) -c -o ProfileTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ProfileTest-ProfileTest.o: ProfileTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProfileTest_CXXFLAGS) $(CXXFLAGS) -MT ProfileTest-ProfileTest.o -MD -MP -MF $(DEPDIR)/ProfileTest-ProfileTest.Tpo -c -o ProfileTest-ProfileTest.o `test -f 'ProfileTest.cpp' || echo '$(srcdir)/'`ProfileTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ProfileTest-ProfileTest.Tpo $(DEPDIR)/ProfileTest-ProfileTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ProfileTest.cpp' object='ProfileTest-ProfileTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProfileTest_CXXFLAGS) $(CXXFLAGS) -c -o ProfileTest-ProfileTest.o `test -f 'ProfileTest.cpp' || echo '$(srcdir)/'`ProfileTest.cpp ProfileTest-ProfileTest.obj: ProfileTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProfileTest_CXXFLAGS) $(CXXFLAGS) -MT ProfileTest-ProfileTest.obj -MD -MP -MF $(DEPDIR)/ProfileTest-ProfileTest.Tpo -c -o ProfileTest-ProfileTest.obj `if test -f 'ProfileTest.cpp'; then $(CYGPATH_W) 'ProfileTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ProfileTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ProfileTest-ProfileTest.Tpo $(DEPDIR)/ProfileTest-ProfileTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ProfileTest.cpp' object='ProfileTest-ProfileTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProfileTest_CXXFLAGS) $(CXXFLAGS) -c -o ProfileTest-ProfileTest.obj `if test -f 'ProfileTest.cpp'; then $(CYGPATH_W) 'ProfileTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ProfileTest.cpp'; fi` RunTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RunTest_CXXFLAGS) $(CXXFLAGS) -MT RunTest-Test.o -MD -MP -MF $(DEPDIR)/RunTest-Test.Tpo -c -o RunTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/RunTest-Test.Tpo $(DEPDIR)/RunTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='RunTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RunTest_CXXFLAGS) $(CXXFLAGS) -c -o RunTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp RunTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RunTest_CXXFLAGS) $(CXXFLAGS) -MT RunTest-Test.obj -MD -MP -MF $(DEPDIR)/RunTest-Test.Tpo -c -o RunTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/RunTest-Test.Tpo $(DEPDIR)/RunTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='RunTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RunTest_CXXFLAGS) $(CXXFLAGS) -c -o RunTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` RunTest-RunTest.o: RunTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RunTest_CXXFLAGS) $(CXXFLAGS) -MT RunTest-RunTest.o -MD -MP -MF $(DEPDIR)/RunTest-RunTest.Tpo -c -o RunTest-RunTest.o `test -f 'RunTest.cpp' || echo '$(srcdir)/'`RunTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/RunTest-RunTest.Tpo $(DEPDIR)/RunTest-RunTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RunTest.cpp' object='RunTest-RunTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RunTest_CXXFLAGS) $(CXXFLAGS) -c -o RunTest-RunTest.o `test -f 'RunTest.cpp' || echo '$(srcdir)/'`RunTest.cpp RunTest-RunTest.obj: RunTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RunTest_CXXFLAGS) $(CXXFLAGS) -MT RunTest-RunTest.obj -MD -MP -MF $(DEPDIR)/RunTest-RunTest.Tpo -c -o RunTest-RunTest.obj `if test -f 'RunTest.cpp'; then $(CYGPATH_W) 'RunTest.cpp'; else $(CYGPATH_W) '$(srcdir)/RunTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/RunTest-RunTest.Tpo $(DEPDIR)/RunTest-RunTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RunTest.cpp' object='RunTest-RunTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RunTest_CXXFLAGS) $(CXXFLAGS) -c -o RunTest-RunTest.obj `if test -f 'RunTest.cpp'; then $(CYGPATH_W) 'RunTest.cpp'; else $(CYGPATH_W) '$(srcdir)/RunTest.cpp'; fi` StringConvTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(StringConvTest_CXXFLAGS) $(CXXFLAGS) -MT StringConvTest-Test.o -MD -MP -MF $(DEPDIR)/StringConvTest-Test.Tpo -c -o StringConvTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/StringConvTest-Test.Tpo $(DEPDIR)/StringConvTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='StringConvTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(StringConvTest_CXXFLAGS) $(CXXFLAGS) -c -o StringConvTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp StringConvTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(StringConvTest_CXXFLAGS) $(CXXFLAGS) -MT StringConvTest-Test.obj -MD -MP -MF $(DEPDIR)/StringConvTest-Test.Tpo -c -o StringConvTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/StringConvTest-Test.Tpo $(DEPDIR)/StringConvTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='StringConvTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(StringConvTest_CXXFLAGS) $(CXXFLAGS) -c -o StringConvTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` StringConvTest-StringConvTest.o: StringConvTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(StringConvTest_CXXFLAGS) $(CXXFLAGS) -MT StringConvTest-StringConvTest.o -MD -MP -MF $(DEPDIR)/StringConvTest-StringConvTest.Tpo -c -o StringConvTest-StringConvTest.o `test -f 'StringConvTest.cpp' || echo '$(srcdir)/'`StringConvTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/StringConvTest-StringConvTest.Tpo $(DEPDIR)/StringConvTest-StringConvTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='StringConvTest.cpp' object='StringConvTest-StringConvTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(StringConvTest_CXXFLAGS) $(CXXFLAGS) -c -o StringConvTest-StringConvTest.o `test -f 'StringConvTest.cpp' || echo '$(srcdir)/'`StringConvTest.cpp StringConvTest-StringConvTest.obj: StringConvTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(StringConvTest_CXXFLAGS) $(CXXFLAGS) -MT StringConvTest-StringConvTest.obj -MD -MP -MF $(DEPDIR)/StringConvTest-StringConvTest.Tpo -c -o StringConvTest-StringConvTest.obj `if test -f 'StringConvTest.cpp'; then $(CYGPATH_W) 'StringConvTest.cpp'; else $(CYGPATH_W) '$(srcdir)/StringConvTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/StringConvTest-StringConvTest.Tpo $(DEPDIR)/StringConvTest-StringConvTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='StringConvTest.cpp' object='StringConvTest-StringConvTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(StringConvTest_CXXFLAGS) $(CXXFLAGS) -c -o StringConvTest-StringConvTest.obj `if test -f 'StringConvTest.cpp'; then $(CYGPATH_W) 'StringConvTest.cpp'; else $(CYGPATH_W) '$(srcdir)/StringConvTest.cpp'; fi` ThreadTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ThreadTest_CXXFLAGS) $(CXXFLAGS) -MT ThreadTest-Test.o -MD -MP -MF $(DEPDIR)/ThreadTest-Test.Tpo -c -o ThreadTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ThreadTest-Test.Tpo $(DEPDIR)/ThreadTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ThreadTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ThreadTest_CXXFLAGS) $(CXXFLAGS) -c -o ThreadTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ThreadTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ThreadTest_CXXFLAGS) $(CXXFLAGS) -MT ThreadTest-Test.obj -MD -MP -MF $(DEPDIR)/ThreadTest-Test.Tpo -c -o ThreadTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ThreadTest-Test.Tpo $(DEPDIR)/ThreadTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ThreadTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ThreadTest_CXXFLAGS) $(CXXFLAGS) -c -o ThreadTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ThreadTest-ThreadTest.o: ThreadTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ThreadTest_CXXFLAGS) $(CXXFLAGS) -MT ThreadTest-ThreadTest.o -MD -MP -MF $(DEPDIR)/ThreadTest-ThreadTest.Tpo -c -o ThreadTest-ThreadTest.o `test -f 'ThreadTest.cpp' || echo '$(srcdir)/'`ThreadTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ThreadTest-ThreadTest.Tpo $(DEPDIR)/ThreadTest-ThreadTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ThreadTest.cpp' object='ThreadTest-ThreadTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ThreadTest_CXXFLAGS) $(CXXFLAGS) -c -o ThreadTest-ThreadTest.o `test -f 'ThreadTest.cpp' || echo '$(srcdir)/'`ThreadTest.cpp ThreadTest-ThreadTest.obj: ThreadTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ThreadTest_CXXFLAGS) $(CXXFLAGS) -MT ThreadTest-ThreadTest.obj -MD -MP -MF $(DEPDIR)/ThreadTest-ThreadTest.Tpo -c -o ThreadTest-ThreadTest.obj `if test -f 'ThreadTest.cpp'; then $(CYGPATH_W) 'ThreadTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ThreadTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ThreadTest-ThreadTest.Tpo $(DEPDIR)/ThreadTest-ThreadTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ThreadTest.cpp' object='ThreadTest-ThreadTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ThreadTest_CXXFLAGS) $(CXXFLAGS) -c -o ThreadTest-ThreadTest.obj `if test -f 'ThreadTest.cpp'; then $(CYGPATH_W) 'ThreadTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ThreadTest.cpp'; fi` URLTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(URLTest_CXXFLAGS) $(CXXFLAGS) -MT URLTest-Test.o -MD -MP -MF $(DEPDIR)/URLTest-Test.Tpo -c -o URLTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/URLTest-Test.Tpo $(DEPDIR)/URLTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='URLTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(URLTest_CXXFLAGS) $(CXXFLAGS) -c -o URLTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp URLTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(URLTest_CXXFLAGS) $(CXXFLAGS) -MT URLTest-Test.obj -MD -MP -MF $(DEPDIR)/URLTest-Test.Tpo -c -o URLTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/URLTest-Test.Tpo $(DEPDIR)/URLTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='URLTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(URLTest_CXXFLAGS) $(CXXFLAGS) -c -o URLTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` URLTest-URLTest.o: URLTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(URLTest_CXXFLAGS) $(CXXFLAGS) -MT URLTest-URLTest.o -MD -MP -MF $(DEPDIR)/URLTest-URLTest.Tpo -c -o URLTest-URLTest.o `test -f 'URLTest.cpp' || echo '$(srcdir)/'`URLTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/URLTest-URLTest.Tpo $(DEPDIR)/URLTest-URLTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='URLTest.cpp' object='URLTest-URLTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(URLTest_CXXFLAGS) $(CXXFLAGS) -c -o URLTest-URLTest.o `test -f 'URLTest.cpp' || echo '$(srcdir)/'`URLTest.cpp URLTest-URLTest.obj: URLTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(URLTest_CXXFLAGS) $(CXXFLAGS) -MT URLTest-URLTest.obj -MD -MP -MF $(DEPDIR)/URLTest-URLTest.Tpo -c -o URLTest-URLTest.obj `if test -f 'URLTest.cpp'; then $(CYGPATH_W) 'URLTest.cpp'; else $(CYGPATH_W) '$(srcdir)/URLTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/URLTest-URLTest.Tpo $(DEPDIR)/URLTest-URLTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='URLTest.cpp' object='URLTest-URLTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(URLTest_CXXFLAGS) $(CXXFLAGS) -c -o URLTest-URLTest.obj `if test -f 'URLTest.cpp'; then $(CYGPATH_W) 'URLTest.cpp'; else $(CYGPATH_W) '$(srcdir)/URLTest.cpp'; fi` UserConfigTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) -MT UserConfigTest-Test.o -MD -MP -MF $(DEPDIR)/UserConfigTest-Test.Tpo -c -o UserConfigTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/UserConfigTest-Test.Tpo $(DEPDIR)/UserConfigTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='UserConfigTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) -c -o UserConfigTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp UserConfigTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) -MT UserConfigTest-Test.obj -MD -MP -MF $(DEPDIR)/UserConfigTest-Test.Tpo -c -o UserConfigTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/UserConfigTest-Test.Tpo $(DEPDIR)/UserConfigTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='UserConfigTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) -c -o UserConfigTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` UserConfigTest-UserConfigTest.o: UserConfigTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) -MT UserConfigTest-UserConfigTest.o -MD -MP -MF $(DEPDIR)/UserConfigTest-UserConfigTest.Tpo -c -o UserConfigTest-UserConfigTest.o `test -f 'UserConfigTest.cpp' || echo '$(srcdir)/'`UserConfigTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/UserConfigTest-UserConfigTest.Tpo $(DEPDIR)/UserConfigTest-UserConfigTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UserConfigTest.cpp' object='UserConfigTest-UserConfigTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) -c -o UserConfigTest-UserConfigTest.o `test -f 'UserConfigTest.cpp' || echo '$(srcdir)/'`UserConfigTest.cpp UserConfigTest-UserConfigTest.obj: UserConfigTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) -MT UserConfigTest-UserConfigTest.obj -MD -MP -MF $(DEPDIR)/UserConfigTest-UserConfigTest.Tpo -c -o UserConfigTest-UserConfigTest.obj `if test -f 'UserConfigTest.cpp'; then $(CYGPATH_W) 'UserConfigTest.cpp'; else $(CYGPATH_W) '$(srcdir)/UserConfigTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/UserConfigTest-UserConfigTest.Tpo $(DEPDIR)/UserConfigTest-UserConfigTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UserConfigTest.cpp' object='UserConfigTest-UserConfigTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserConfigTest_CXXFLAGS) $(CXXFLAGS) -c -o UserConfigTest-UserConfigTest.obj `if test -f 'UserConfigTest.cpp'; then $(CYGPATH_W) 'UserConfigTest.cpp'; else $(CYGPATH_W) '$(srcdir)/UserConfigTest.cpp'; fi` UserTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserTest_CXXFLAGS) $(CXXFLAGS) -MT UserTest-Test.o -MD -MP -MF $(DEPDIR)/UserTest-Test.Tpo -c -o UserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/UserTest-Test.Tpo $(DEPDIR)/UserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='UserTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserTest_CXXFLAGS) $(CXXFLAGS) -c -o UserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp UserTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserTest_CXXFLAGS) $(CXXFLAGS) -MT UserTest-Test.obj -MD -MP -MF $(DEPDIR)/UserTest-Test.Tpo -c -o UserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/UserTest-Test.Tpo $(DEPDIR)/UserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='UserTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserTest_CXXFLAGS) $(CXXFLAGS) -c -o UserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` UserTest-UserTest.o: UserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserTest_CXXFLAGS) $(CXXFLAGS) -MT UserTest-UserTest.o -MD -MP -MF $(DEPDIR)/UserTest-UserTest.Tpo -c -o UserTest-UserTest.o `test -f 'UserTest.cpp' || echo '$(srcdir)/'`UserTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/UserTest-UserTest.Tpo $(DEPDIR)/UserTest-UserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UserTest.cpp' object='UserTest-UserTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserTest_CXXFLAGS) $(CXXFLAGS) -c -o UserTest-UserTest.o `test -f 'UserTest.cpp' || echo '$(srcdir)/'`UserTest.cpp UserTest-UserTest.obj: UserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserTest_CXXFLAGS) $(CXXFLAGS) -MT UserTest-UserTest.obj -MD -MP -MF $(DEPDIR)/UserTest-UserTest.Tpo -c -o UserTest-UserTest.obj `if test -f 'UserTest.cpp'; then $(CYGPATH_W) 'UserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/UserTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/UserTest-UserTest.Tpo $(DEPDIR)/UserTest-UserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UserTest.cpp' object='UserTest-UserTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(UserTest_CXXFLAGS) $(CXXFLAGS) -c -o UserTest-UserTest.obj `if test -f 'UserTest.cpp'; then $(CYGPATH_W) 'UserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/UserTest.cpp'; fi` WatchdogTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(WatchdogTest_CXXFLAGS) $(CXXFLAGS) -MT WatchdogTest-Test.o -MD -MP -MF $(DEPDIR)/WatchdogTest-Test.Tpo -c -o WatchdogTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/WatchdogTest-Test.Tpo $(DEPDIR)/WatchdogTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='WatchdogTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(WatchdogTest_CXXFLAGS) $(CXXFLAGS) -c -o WatchdogTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp WatchdogTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(WatchdogTest_CXXFLAGS) $(CXXFLAGS) -MT WatchdogTest-Test.obj -MD -MP -MF $(DEPDIR)/WatchdogTest-Test.Tpo -c -o WatchdogTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/WatchdogTest-Test.Tpo $(DEPDIR)/WatchdogTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='WatchdogTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(WatchdogTest_CXXFLAGS) $(CXXFLAGS) -c -o WatchdogTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` WatchdogTest-WatchdogTest.o: WatchdogTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(WatchdogTest_CXXFLAGS) $(CXXFLAGS) -MT WatchdogTest-WatchdogTest.o -MD -MP -MF $(DEPDIR)/WatchdogTest-WatchdogTest.Tpo -c -o WatchdogTest-WatchdogTest.o `test -f 'WatchdogTest.cpp' || echo '$(srcdir)/'`WatchdogTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/WatchdogTest-WatchdogTest.Tpo $(DEPDIR)/WatchdogTest-WatchdogTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='WatchdogTest.cpp' object='WatchdogTest-WatchdogTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(WatchdogTest_CXXFLAGS) $(CXXFLAGS) -c -o WatchdogTest-WatchdogTest.o `test -f 'WatchdogTest.cpp' || echo '$(srcdir)/'`WatchdogTest.cpp WatchdogTest-WatchdogTest.obj: WatchdogTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(WatchdogTest_CXXFLAGS) $(CXXFLAGS) -MT WatchdogTest-WatchdogTest.obj -MD -MP -MF $(DEPDIR)/WatchdogTest-WatchdogTest.Tpo -c -o WatchdogTest-WatchdogTest.obj `if test -f 'WatchdogTest.cpp'; then $(CYGPATH_W) 'WatchdogTest.cpp'; else $(CYGPATH_W) '$(srcdir)/WatchdogTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/WatchdogTest-WatchdogTest.Tpo $(DEPDIR)/WatchdogTest-WatchdogTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='WatchdogTest.cpp' object='WatchdogTest-WatchdogTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(WatchdogTest_CXXFLAGS) $(CXXFLAGS) -c -o WatchdogTest-WatchdogTest.obj `if test -f 'WatchdogTest.cpp'; then $(CYGPATH_W) 'WatchdogTest.cpp'; else $(CYGPATH_W) '$(srcdir)/WatchdogTest.cpp'; fi` XMLNodeTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XMLNodeTest_CXXFLAGS) $(CXXFLAGS) -MT XMLNodeTest-Test.o -MD -MP -MF $(DEPDIR)/XMLNodeTest-Test.Tpo -c -o XMLNodeTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XMLNodeTest-Test.Tpo $(DEPDIR)/XMLNodeTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='XMLNodeTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XMLNodeTest_CXXFLAGS) $(CXXFLAGS) -c -o XMLNodeTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp XMLNodeTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XMLNodeTest_CXXFLAGS) $(CXXFLAGS) -MT XMLNodeTest-Test.obj -MD -MP -MF $(DEPDIR)/XMLNodeTest-Test.Tpo -c -o XMLNodeTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XMLNodeTest-Test.Tpo $(DEPDIR)/XMLNodeTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='XMLNodeTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XMLNodeTest_CXXFLAGS) $(CXXFLAGS) -c -o XMLNodeTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` XMLNodeTest-XMLNodeTest.o: XMLNodeTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XMLNodeTest_CXXFLAGS) $(CXXFLAGS) -MT XMLNodeTest-XMLNodeTest.o -MD -MP -MF $(DEPDIR)/XMLNodeTest-XMLNodeTest.Tpo -c -o XMLNodeTest-XMLNodeTest.o `test -f 'XMLNodeTest.cpp' || echo '$(srcdir)/'`XMLNodeTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XMLNodeTest-XMLNodeTest.Tpo $(DEPDIR)/XMLNodeTest-XMLNodeTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XMLNodeTest.cpp' object='XMLNodeTest-XMLNodeTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XMLNodeTest_CXXFLAGS) $(CXXFLAGS) -c -o XMLNodeTest-XMLNodeTest.o `test -f 'XMLNodeTest.cpp' || echo '$(srcdir)/'`XMLNodeTest.cpp XMLNodeTest-XMLNodeTest.obj: XMLNodeTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XMLNodeTest_CXXFLAGS) $(CXXFLAGS) -MT XMLNodeTest-XMLNodeTest.obj -MD -MP -MF $(DEPDIR)/XMLNodeTest-XMLNodeTest.Tpo -c -o XMLNodeTest-XMLNodeTest.obj `if test -f 'XMLNodeTest.cpp'; then $(CYGPATH_W) 'XMLNodeTest.cpp'; else $(CYGPATH_W) '$(srcdir)/XMLNodeTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XMLNodeTest-XMLNodeTest.Tpo $(DEPDIR)/XMLNodeTest-XMLNodeTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XMLNodeTest.cpp' object='XMLNodeTest-XMLNodeTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XMLNodeTest_CXXFLAGS) $(CXXFLAGS) -c -o XMLNodeTest-XMLNodeTest.obj `if test -f 'XMLNodeTest.cpp'; then $(CYGPATH_W) 'XMLNodeTest.cpp'; else $(CYGPATH_W) '$(srcdir)/XMLNodeTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/MysqlWrapperTest.cpp0000644000000000000000000000012311741501077025740 xustar000000000000000026 mtime=1334215231.35079 27 atime=1513200574.841705 30 ctime=1513200658.911734176 nordugrid-arc-5.4.2/src/hed/libs/common/test/MysqlWrapperTest.cpp0000644000175000002070000000577711741501077026026 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include class MysqlWrapperTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(MysqlWrapperTest); CPPUNIT_TEST(TestDB); CPPUNIT_TEST_SUITE_END(); public: void TestDB(); }; void MysqlWrapperTest::TestDB() { /******************************************************* * this test only runs if you have a database set up * * with the parameters specified here * *******************************************************/ std::string server = "127.0.0.1"; int port = 3306; std::string dbname = "voms_myvo"; std::string user = "root"; std::string password = "aa1122"; Arc::MySQLDatabase mydb(server, port); bool res = false; res = mydb.connect(dbname, user, password); if (res == false) return; Arc::MySQLQuery myquery(&mydb); CPPUNIT_ASSERT(mydb.isconnected()); std::string querystr = "select * from roles"; CPPUNIT_ASSERT(myquery.execute(querystr)); int num_rows, num_columns; num_rows = myquery.get_num_rows(); num_columns = myquery.get_num_colums(); CPPUNIT_ASSERT(num_rows > 0); CPPUNIT_ASSERT(num_columns > 0); Arc::QueryRowResult strlist; strlist = myquery.get_row(); CPPUNIT_ASSERT(strlist.size() > 0); //for (int i = 0; i < strlist.size(); i++) //std::cout << "The value of " << i << "th field :" << strlist[i] << std::endl; std::string str1, str2; std::string fieldname = "role"; str1 = myquery.get_row_field(0, fieldname); CPPUNIT_ASSERT(str1.length() > 0); fieldname = "rid"; str2 = myquery.get_row_field(0, fieldname); CPPUNIT_ASSERT(str2.length() > 0); //std::cout << "Number of rows: " << num_rows << " Number of colums: " << num_colums << std::endl; //std::cout << str1 << " " << str2 << std::endl; //Get role, the sql sentence can be put in some independent place, and then we //can adapt to different database schema without changing the code itself; std::string role = "'VO-Admin'"; std::string userid = "1"; querystr = "SELECT groups.dn, role FROM groups, m LEFT JOIN roles ON roles.rid = m.rid WHERE groups.gid = m.gid AND roles.role =" + role + "AND m.userid =" + userid; CPPUNIT_ASSERT(myquery.execute(querystr)); Arc::QueryArrayResult strarray; num_rows = myquery.get_num_rows(); CPPUNIT_ASSERT(num_rows > 0); //std::cout << "Get " << num_rows << " rows" << std::endl; for (int i = 0; i < num_rows; i++) { strlist = myquery.get_row(); strarray.push_back(strlist); } querystr = "SELECT groups.dn, role FROM groups, m LEFT JOIN roles ON roles.rid = m.rid WHERE groups.gid = m.gid AND roles.role = ? AND m.userid = ?"; Arc::QueryArrayResult strarray1; Arc::MySQLQuery myquery1(&mydb); std::vector args; args.push_back(role); args.push_back(userid); CPPUNIT_ASSERT(myquery1.get_array(querystr, strarray1, args)); //std::cout << "Get an result array with " << strarray1.size() << " rows" << std::endl; } CPPUNIT_TEST_SUITE_REGISTRATION(MysqlWrapperTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/FileAccessTest.cpp0000644000000000000000000000012312016664644025301 xustar000000000000000026 mtime=1346070948.43109 27 atime=1513200574.857706 30 ctime=1513200658.909734151 nordugrid-arc-5.4.2/src/hed/libs/common/test/FileAccessTest.cpp0000644000175000002070000001303612016664644025352 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #ifndef WIN32 #include #endif #include #include #include #include #include class FileAccessTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(FileAccessTest); #ifndef WIN32 CPPUNIT_TEST(TestOpenWriteReadStat); CPPUNIT_TEST(TestCopy); CPPUNIT_TEST(TestRename); CPPUNIT_TEST(TestDir); CPPUNIT_TEST(TestSeekAllocate); #endif CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); #ifndef WIN32 void TestOpenWriteReadStat(); void TestCopy(); void TestRename(); void TestDir(); void TestSeekAllocate(); #endif private: uid_t uid; gid_t gid; std::string testroot; }; void FileAccessTest::setUp() { CPPUNIT_ASSERT(Arc::TmpDirCreate(testroot)); CPPUNIT_ASSERT(!testroot.empty()); #ifndef WIN32 if(getuid() == 0) { struct passwd* pwd = getpwnam("nobody"); CPPUNIT_ASSERT(pwd); uid = pwd->pw_uid; gid = pwd->pw_gid; CPPUNIT_ASSERT_EQUAL(0,::chmod(testroot.c_str(),0777)); } else { uid = getuid(); gid = getgid(); } #else uid = 0; gid = 0; #endif Arc::FileAccess::testtune(); } void FileAccessTest::tearDown() { Arc::DirDelete(testroot); } #ifndef WIN32 void FileAccessTest::TestOpenWriteReadStat() { Arc::FileAccess fa; std::string testfile = testroot+"/file1"; std::string testdata = "test"; CPPUNIT_ASSERT(fa.fa_setuid(uid,gid)); CPPUNIT_ASSERT(fa.fa_open(testfile,O_WRONLY|O_CREAT|O_EXCL,0600)); CPPUNIT_ASSERT_EQUAL((int)testdata.length(),(int)fa.fa_write(testdata.c_str(),testdata.length())); CPPUNIT_ASSERT(fa.fa_close()); struct stat st; CPPUNIT_ASSERT_EQUAL(0,::stat(testfile.c_str(),&st)); CPPUNIT_ASSERT_EQUAL((int)testdata.length(),(int)st.st_size); CPPUNIT_ASSERT_EQUAL((int)uid,(int)st.st_uid); // Group ownership of a file is not guaranteed to be gid of user proces. // This is especially true on MAC OSX: // https://bugzilla.nordugrid.org/show_bug.cgi?id=2089#c3 //CPPUNIT_ASSERT_EQUAL(gid,st.st_gid); CPPUNIT_ASSERT_EQUAL(0600,(int)(st.st_mode & 0777)); CPPUNIT_ASSERT(fa.fa_open(testfile,O_RDONLY,0)); char buf[16]; struct stat st2; CPPUNIT_ASSERT(fa.fa_fstat(st2)); CPPUNIT_ASSERT_EQUAL((int)testdata.length(),(int)fa.fa_read(buf,sizeof(buf))); CPPUNIT_ASSERT(fa.fa_close()); std::string testdata2(buf,testdata.length()); CPPUNIT_ASSERT_EQUAL(testdata,testdata2); CPPUNIT_ASSERT_EQUAL(st.st_mode,st2.st_mode); CPPUNIT_ASSERT_EQUAL(st.st_uid,st2.st_uid); CPPUNIT_ASSERT_EQUAL(st.st_gid,st2.st_gid); CPPUNIT_ASSERT_EQUAL(st.st_size,st2.st_size); CPPUNIT_ASSERT(fa.fa_stat(testfile,st2)); CPPUNIT_ASSERT_EQUAL(st.st_mode,st2.st_mode); CPPUNIT_ASSERT_EQUAL(st.st_uid,st2.st_uid); CPPUNIT_ASSERT_EQUAL(st.st_gid,st2.st_gid); CPPUNIT_ASSERT_EQUAL(st.st_size,st2.st_size); } void FileAccessTest::TestCopy() { Arc::FileAccess fa; std::string testfile1 = testroot+"/copyfile1"; std::string testfile2 = testroot+"/copyfile2"; std::string testdata = "copytest"; CPPUNIT_ASSERT(fa.fa_setuid(uid,gid)); CPPUNIT_ASSERT(fa.fa_open(testfile1,O_WRONLY|O_CREAT|O_EXCL,0600)); CPPUNIT_ASSERT_EQUAL((int)testdata.length(),(int)fa.fa_write(testdata.c_str(),testdata.length())); CPPUNIT_ASSERT(fa.fa_close()); CPPUNIT_ASSERT(fa.fa_copy(testfile1,testfile2,0600)); CPPUNIT_ASSERT(fa.fa_open(testfile2,O_RDONLY,0)); char buf[16]; CPPUNIT_ASSERT_EQUAL((int)testdata.length(),(int)fa.fa_read(buf,sizeof(buf))); CPPUNIT_ASSERT(fa.fa_close()); std::string testdata2(buf,testdata.length()); CPPUNIT_ASSERT_EQUAL(testdata,testdata2); } void FileAccessTest::TestRename() { Arc::FileAccess fa; std::string oldfile = testroot+"/oldfile"; std::string newfile = testroot+"/newfile"; std::string testdata = "renametest"; CPPUNIT_ASSERT(fa.fa_setuid(uid,gid)); CPPUNIT_ASSERT(fa.fa_open(oldfile,O_WRONLY|O_CREAT|O_EXCL,0600)); CPPUNIT_ASSERT_EQUAL((int)testdata.length(),(int)fa.fa_write(testdata.c_str(),testdata.length())); CPPUNIT_ASSERT(fa.fa_close()); CPPUNIT_ASSERT(fa.fa_rename(oldfile, newfile)); struct stat st; CPPUNIT_ASSERT(fa.fa_stat(newfile,st)); CPPUNIT_ASSERT(!fa.fa_stat(oldfile,st)); } void FileAccessTest::TestDir() { std::string testdir1 = testroot + "/dir1/dir2/dir3"; std::string testdir2 = testroot + "/dir1/dir2/dir3/dir4"; std::string testdir3 = testroot + "/dir1"; Arc::FileAccess fa; CPPUNIT_ASSERT(fa.fa_setuid(uid,gid)); CPPUNIT_ASSERT(!fa.fa_mkdir(testdir1,0700)); CPPUNIT_ASSERT(fa.fa_mkdirp(testdir1,0700)); CPPUNIT_ASSERT(fa.fa_mkdir(testdir2,0700)); CPPUNIT_ASSERT(fa.fa_opendir(testdir1)); std::string name; while(true) { CPPUNIT_ASSERT(fa.fa_readdir(name)); if(name == ".") continue; if(name == "..") continue; break; } CPPUNIT_ASSERT(fa.fa_closedir()); CPPUNIT_ASSERT_EQUAL(testdir2.substr(testdir1.length()+1),name); CPPUNIT_ASSERT(!fa.fa_rmdir(testdir3)); CPPUNIT_ASSERT(fa.fa_rmdir(testdir2)); CPPUNIT_ASSERT(fa.fa_rmdirr(testdir3)); } void FileAccessTest::TestSeekAllocate() { Arc::FileAccess fa; std::string testfile = testroot+"/file3"; CPPUNIT_ASSERT(fa.fa_setuid(uid,gid)); CPPUNIT_ASSERT(fa.fa_open(testfile,O_WRONLY|O_CREAT|O_EXCL,0600)); CPPUNIT_ASSERT_EQUAL((int)4096,(int)fa.fa_fallocate(4096)); CPPUNIT_ASSERT_EQUAL((int)0,(int)fa.fa_lseek(0,SEEK_SET)); CPPUNIT_ASSERT_EQUAL((int)4096,(int)fa.fa_lseek(0,SEEK_END)); CPPUNIT_ASSERT(fa.fa_close()); } #endif CPPUNIT_TEST_SUITE_REGISTRATION(FileAccessTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/ProfileTest.cpp0000644000000000000000000000012311741501077024672 xustar000000000000000026 mtime=1334215231.35079 27 atime=1513200574.854706 30 ctime=1513200658.912734188 nordugrid-arc-5.4.2/src/hed/libs/common/test/ProfileTest.cpp0000644000175000002070000011332211741501077024742 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #define TESTSINGLE #define TESTATTRIBUTE #define TESTMULTI #define TESTMULTIELEMENT #define TESTMULTISECTION #define TESTTOKENENABLES #define TESTDEFAULTVALUE #define TESTOTHERS class ProfileTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ProfileTest); #ifdef TESTSINGLE CPPUNIT_TEST(TestSingle); #endif #ifdef TESTATTRIBUTE CPPUNIT_TEST(TestAttribute); #endif #ifdef TESTMULTI CPPUNIT_TEST(TestMulti); #endif #ifdef TESTMULTIELEMENT CPPUNIT_TEST(TestMultiElement); #endif #ifdef TESTMULTISECTION CPPUNIT_TEST(TestMultiSection); #endif #ifdef TESTTOKENENABLES CPPUNIT_TEST(TestTokenEnables); #endif #ifdef TESTDEFAULTVALUE CPPUNIT_TEST(TestDefaultValue); #endif #ifdef TESTOTHERS CPPUNIT_TEST(TestOthers); #endif CPPUNIT_TEST_SUITE_END(); public: ProfileTest() : p(""), first("first"), second("second"), common("common"), special("special") {} void setUp() {} void tearDown() {} #ifdef TESTSINGLE void TestSingle(); #endif #ifdef TESTATTRIBUTE void TestAttribute(); #endif #ifdef TESTMULTI void TestMulti(); #endif #ifdef TESTMULTIELEMENT void TestMultiElement(); #endif #ifdef TESTMULTISECTION void TestMultiSection(); #endif #ifdef TESTTOKENENABLES void TestTokenEnables(); #endif #ifdef TESTDEFAULTVALUE void TestDefaultValue(); #endif #ifdef TESTOTHERS void TestOthers(); #endif private: Arc::Profile p; Arc::IniConfig i; const std::string first, second, common, special; void ClearNodes(); }; void ProfileTest::ClearNodes() { while (p.Size() > 0) { p.Child().Destroy(); } while (i.Size() > 0) { i.Child().Destroy(); } } #ifdef TESTSINGLE void ProfileTest::TestSingle() { std::stringstream ps; ps << "" "" "" "" "" "" "" "" ""; p.ReadFromStream(ps); i.NewChild(first); i[first].NewChild("bara") = first; i[first].NewChild("barc") = first; i.NewChild(second); i[second].NewChild("bara") = second; i[second].NewChild("barb") = second; i[second].NewChild("barc") = second; /* Config: first second first default-bard */ Arc::Config c; p.Evaluate(c, i); CPPUNIT_ASSERT_EQUAL(1, c.Size()); CPPUNIT_ASSERT_EQUAL(0, c.AttributesSize()); CPPUNIT_ASSERT_EQUAL(4, c.Child(0).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).AttributesSize()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(0).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(0).AttributesSize()); CPPUNIT_ASSERT_EQUAL(first, (std::string)c.Child(0).Child(0)); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(1).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(1).AttributesSize()); CPPUNIT_ASSERT_EQUAL(second, (std::string)c.Child(0).Child(1)); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(2).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(2).AttributesSize()); CPPUNIT_ASSERT_EQUAL(first, (std::string)c.Child(0).Child(2)); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(3).Size()); CPPUNIT_ASSERT_EQUAL(2, c.Child(0).Child(3).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"default-bard", (std::string)c.Child(0).Child(3)); CPPUNIT_ASSERT_EQUAL((std::string)"testa", (std::string)c.Child(0).Child(3).Attribute("testa")); CPPUNIT_ASSERT_EQUAL((std::string)"testb", (std::string)c.Child(0).Child(3).Attribute("testb")); ClearNodes(); } #endif #ifdef TESTATTRIBUTE void ProfileTest::TestAttribute() { std::stringstream ps; ps << "" "" ""; p.ReadFromStream(ps); i.NewChild(first); i[first].NewChild("foo") = first; i.NewChild(second); i[second].NewChild("foo") = second; /* Config: */ Arc::Config c; p.Evaluate(c, i); CPPUNIT_ASSERT_EQUAL(first, (std::string)c.Attribute("foo")); CPPUNIT_ASSERT_EQUAL(1, c.AttributesSize()); CPPUNIT_ASSERT_EQUAL(0, c.Size()); ClearNodes(); } #endif #ifdef TESTMULTI void ProfileTest::TestMulti() { std::stringstream ps; ps << "" "" "" "" "" ""; p.ReadFromStream(ps); i.NewChild(first); i[first].NewChild("foo") = first + "1"; i[first].NewChild("foo") = first + "2"; i[first].NewChild("foo") = first + "3"; i[first].NewChild("before") = "***"; i[first].NewChild("after") = "---"; i.NewChild(second); i[second].NewChild("foo") = second + "1"; i[second].NewChild("foo") = second + "2"; i[second].NewChild("foo") = second + "3"; /* Config: *** first1 first2 first3 --- */ Arc::Config c; p.Evaluate(c, i); CPPUNIT_ASSERT_EQUAL(first + "1", (std::string)c["foo"][0]); CPPUNIT_ASSERT_EQUAL(first + "2", (std::string)c["foo"][1]); CPPUNIT_ASSERT_EQUAL(first + "3", (std::string)c["foo"][2]); // Test ordering. CPPUNIT_ASSERT_MESSAGE("Ordering of nodes incorrect.", c.Child(0) == c["before"][0]); CPPUNIT_ASSERT_MESSAGE("Ordering of nodes incorrect.", c.Child(1) == c["foo"][0]); CPPUNIT_ASSERT_MESSAGE("Ordering of nodes incorrect.", c.Child(2) == c["foo"][1]); CPPUNIT_ASSERT_MESSAGE("Ordering of nodes incorrect.", c.Child(3) == c["foo"][2]); CPPUNIT_ASSERT_MESSAGE("Ordering of nodes incorrect.", c.Child(4) == c["after"][0]); CPPUNIT_ASSERT_EQUAL(0, c.AttributesSize()); CPPUNIT_ASSERT_EQUAL(0, c["foo"][1].AttributesSize()); CPPUNIT_ASSERT_EQUAL(5, c.Size()); ClearNodes(); } #endif #ifdef TESTMULTIELEMENT void ProfileTest::TestMultiElement() { std::stringstream ps; ps << "" "" "" "" "" "" "" "" "" "" "" "" "" ""; p.ReadFromStream(ps); i.NewChild(first); i[first].NewChild("baz") = first + "-multielement1"; i[first].NewChild("baz") = first + "-multielement2"; i[first].NewChild("baz") = first + "-multielement3"; i[first].NewChild("fox") = first + "-fox"; i[first].NewChild("geea") = first + "-geea"; i[first].NewChild("geeb") = first + "-geeb"; i.NewChild(second); i[second].NewChild("baz") = second + "-multielement1"; i[second].NewChild("baz") = second + "-multielement2"; i[second].NewChild("baz") = second + "-multielement3"; i[second].NewChild("fox") = second + "-fox"; i[second].NewChild("geec") = second + "-geec-attr"; /* * Config: first-fox first-geea first-geeb first-multielement1 first-fox first-geea first-geeb first-multielement2 first-fox first-geea first-geeb first-multielement3 */ Arc::Config c; p.Evaluate(c, i); CPPUNIT_ASSERT_EQUAL(3, c.Size()); CPPUNIT_ASSERT_EQUAL(0, c.AttributesSize()); CPPUNIT_ASSERT_EQUAL(2, c.Child(0).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"foo", (std::string)c.Child(0).Name()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(0).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).Child(0).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"fox", (std::string)c.Child(0).Child(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-fox", (std::string)c.Child(0).Child(0)); CPPUNIT_ASSERT_EQUAL(3, c.Child(0).Child(1).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(0).Child(1).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"bar", (std::string)c.Child(0).Child(1).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"geea", (std::string)c.Child(0).Child(1).Child(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-geea", (std::string)c.Child(0).Child(1).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"geeb", (std::string)c.Child(0).Child(1).Child(1).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-geeb", (std::string)c.Child(0).Child(1).Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"baz", (std::string)c.Child(0).Child(1).Child(2).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-multielement1", (std::string)c.Child(0).Child(1).Child(2)); CPPUNIT_ASSERT_EQUAL((std::string)"geec", (std::string)c.Child(0).Child(1).Attribute(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"second-geec-attr", (std::string)c.Child(0).Child(1).Attribute(0)); CPPUNIT_ASSERT_EQUAL(2, c.Child(1).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(1).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"foo", (std::string)c.Child(1).Name()); CPPUNIT_ASSERT_EQUAL(0, c.Child(1).Child(0).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(1).Child(0).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"fox", (std::string)c.Child(1).Child(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-fox", (std::string)c.Child(1).Child(0)); CPPUNIT_ASSERT_EQUAL(3, c.Child(1).Child(1).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(1).Child(1).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"bar", (std::string)c.Child(1).Child(1).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"geea", (std::string)c.Child(1).Child(1).Child(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-geea", (std::string)c.Child(1).Child(1).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"geeb", (std::string)c.Child(1).Child(1).Child(1).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-geeb", (std::string)c.Child(1).Child(1).Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"baz", (std::string)c.Child(1).Child(1).Child(2).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-multielement2", (std::string)c.Child(1).Child(1).Child(2)); CPPUNIT_ASSERT_EQUAL((std::string)"geec", (std::string)c.Child(1).Child(1).Attribute(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"second-geec-attr", (std::string)c.Child(1).Child(1).Attribute(0)); CPPUNIT_ASSERT_EQUAL(2, c.Child(2).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(2).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"foo", (std::string)c.Child(2).Name()); CPPUNIT_ASSERT_EQUAL(0, c.Child(2).Child(0).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(2).Child(0).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"fox", (std::string)c.Child(2).Child(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-fox", (std::string)c.Child(2).Child(0)); CPPUNIT_ASSERT_EQUAL(3, c.Child(2).Child(1).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(2).Child(1).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"bar", (std::string)c.Child(2).Child(1).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"geea", (std::string)c.Child(2).Child(1).Child(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-geea", (std::string)c.Child(2).Child(1).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"geeb", (std::string)c.Child(2).Child(1).Child(1).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-geeb", (std::string)c.Child(2).Child(1).Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"baz", (std::string)c.Child(2).Child(1).Child(2).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"first-multielement3", (std::string)c.Child(2).Child(1).Child(2)); CPPUNIT_ASSERT_EQUAL((std::string)"geec", (std::string)c.Child(2).Child(1).Attribute(0).Name()); CPPUNIT_ASSERT_EQUAL((std::string)"second-geec-attr", (std::string)c.Child(2).Child(1).Attribute(0)); ClearNodes(); } #endif #ifdef TESTMULTISECTION void ProfileTest::TestMultiSection() { std::stringstream ps; ps << "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""; p.ReadFromStream(ps); i.NewChild("default"); i["default"].NewChild("baza") = "default-baza-1"; i["default"].NewChild("baza") = "default-baza-2"; i["default"].NewChild("baza") = "default-baza-3"; i["default"].NewChild("bazb") = "default-bazb"; i["default"].NewChild("wija") = "default-wija"; i["default"].NewChild("wijb") = "default-wijb"; i["default"].NewChild("wijc") = "default-wijc"; i["default"].NewChild("wijd") = "default-wijd"; i["default"].NewChild("bazc") = "default-bazc"; i["default"].NewChild("geea") = "default-geea"; i.NewChild("multi-first"); i["multi-first"][0].NewChild("foo") = "1"; i["multi-first"][0].NewChild("baza") = "multi-first-1.1"; i["multi-first"][0].NewChild("baza") = "multi-first-1.2"; i["multi-first"][0].NewChild("baza") = "multi-first-1.3"; i["multi-first"][0].NewChild("geea") = "multi-first-geea-1"; i["multi-first"][0].NewChild("geeb") = "multi-first-geeb-1"; i["multi-first"][0].NewChild("wijc") = "multi-first-wijc-1"; i.NewChild("multi-first"); i["multi-first"][1].NewChild("foo"); // Included for coverage. i["multi-first"][1].NewChild("baza") = "multi-first-2.1"; i["multi-first"][1].NewChild("baza") = "multi-first-2.2"; i["multi-first"][1].NewChild("geea") = "multi-first-geea-2"; i.NewChild("multi-first"); i["multi-first"][2].NewChild("foo") = "3"; i["multi-first"][2].NewChild("foo") = "3-a"; i["multi-first"][2].NewChild("geea") = "multi-first-geea-3"; i["multi-first"][2].NewChild("wijc") = "multi-first-wijc-2"; i.NewChild("multi-first"); i["multi-first"][3].NewChild("foo") = "4"; i["multi-first"][3].NewChild("baza") = "multi-first-3.1"; i["multi-first"][3].NewChild("baza") = "multi-first-3.2"; i["multi-first"][3].NewChild("baza") = "multi-first-3.3"; i["multi-first"][3].NewChild("baza") = "multi-first-3.4"; i["multi-first"][3].NewChild("geeb") = "multi-first-geeb-2"; i["multi-first"][3].NewChild("wijc") = "multi-first-wijc-3"; i.NewChild("multi-second"); i["multi-second"].NewChild("foo") = "1-second"; i["multi-second"].NewChild("baza") = "multi-second-1.1"; i["multi-second"].NewChild("geea") = "multi-second-geea"; i["multi-second"].NewChild("wijd") = "multi-second-wijd-1"; /* * Config: 1 2 3 multi-first-1.1 multi-first-1.2 multi-first-1.3 default-bazb default-bazc default-wija default-wijb multi-first-wijc-1 multi-second-wijd-1 multi-first-2.1 multi-first-2.2 default-bazb default-bazc default-wija default-wijb default-wijc default-wijd default-baza-1 default-baza-2 default-baza-3 default-bazb default-bazc default-wija default-wijb multi-first-wijc-2 default-wijb multi-first-3.1 multi-first-3.2 multi-first-3.3 multi-first-3.4 default-bazb default-bazc default-wija default-wijb multi-first-wijc-3 default-wijd */ Arc::Config c; p.Evaluate(c, i); CPPUNIT_ASSERT_EQUAL(7, c.Size()); CPPUNIT_ASSERT_EQUAL(0, c.AttributesSize()); CPPUNIT_ASSERT_EQUAL(0, c["foo"][1].Size()); CPPUNIT_ASSERT_EQUAL(0, c["foo"][1].AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"1", (std::string)c.Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"3", (std::string)c.Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"4", (std::string)c.Child(2)); CPPUNIT_ASSERT_EQUAL(9, c.Child(3).Size()); CPPUNIT_ASSERT_EQUAL(2, c.Child(3).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-1.1", (std::string)c.Child(3).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-1.2", (std::string)c.Child(3).Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-1.3", (std::string)c.Child(3).Child(2)); CPPUNIT_ASSERT_EQUAL((std::string)"default-bazb", (std::string)c.Child(3).Child(3)); CPPUNIT_ASSERT_EQUAL((std::string)"default-bazc", (std::string)c.Child(3).Child(4)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wija", (std::string)c.Child(3).Child(5)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijb", (std::string)c.Child(3).Child(6)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-wijc-1", (std::string)c.Child(3).Child(7)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijd", (std::string)c.Child(3).Child(8)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-geea-1", (std::string)c.Child(3).Attribute("geea")); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-geeb-1", (std::string)c.Child(3).Attribute("geeb")); CPPUNIT_ASSERT_EQUAL(8, c.Child(4).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(4).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-2.1", (std::string)c.Child(4).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-2.2", (std::string)c.Child(4).Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"default-bazb", (std::string)c.Child(4).Child(2)); CPPUNIT_ASSERT_EQUAL((std::string)"default-bazc", (std::string)c.Child(4).Child(3)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wija", (std::string)c.Child(4).Child(4)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijb", (std::string)c.Child(4).Child(5)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijc", (std::string)c.Child(4).Child(6)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijd", (std::string)c.Child(4).Child(7)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-geea-2", (std::string)c.Child(4).Attribute("geea")); CPPUNIT_ASSERT_EQUAL(9, c.Child(5).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(5).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"default-baza-1", (std::string)c.Child(5).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"default-baza-2", (std::string)c.Child(5).Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"default-baza-3", (std::string)c.Child(5).Child(2)); CPPUNIT_ASSERT_EQUAL((std::string)"default-bazb", (std::string)c.Child(5).Child(3)); CPPUNIT_ASSERT_EQUAL((std::string)"default-bazc", (std::string)c.Child(5).Child(4)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wija", (std::string)c.Child(5).Child(5)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijb", (std::string)c.Child(5).Child(6)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-wijc-2", (std::string)c.Child(5).Child(7)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijd", (std::string)c.Child(5).Child(8)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-geea-3", (std::string)c.Child(5).Attribute("geea")); CPPUNIT_ASSERT_EQUAL(10, c.Child(6).Size()); CPPUNIT_ASSERT_EQUAL(2, c.Child(6).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-3.1", (std::string)c.Child(6).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-3.2", (std::string)c.Child(6).Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-3.3", (std::string)c.Child(6).Child(2)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-3.4", (std::string)c.Child(6).Child(3)); CPPUNIT_ASSERT_EQUAL((std::string)"default-bazb", (std::string)c.Child(6).Child(4)); CPPUNIT_ASSERT_EQUAL((std::string)"default-bazc", (std::string)c.Child(6).Child(5)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wija", (std::string)c.Child(6).Child(6)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijb", (std::string)c.Child(6).Child(7)); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-wijc-3", (std::string)c.Child(6).Child(8)); CPPUNIT_ASSERT_EQUAL((std::string)"default-wijd", (std::string)c.Child(6).Child(9)); CPPUNIT_ASSERT_EQUAL((std::string)"default-geea", (std::string)c.Child(6).Attribute("geea")); CPPUNIT_ASSERT_EQUAL((std::string)"multi-first-geeb-2", (std::string)c.Child(6).Attribute("geeb")); ClearNodes(); } #endif #ifdef TESTTOKENENABLES void ProfileTest::TestTokenEnables() { std::stringstream ps; ps << "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""; p.ReadFromStream(ps); i.NewChild(first); i[first].NewChild("baza") = first + "-baza"; i[first].NewChild("xbaza") = "enabled-xbaza"; i[first].NewChild("bazb") = first + "-bazb"; i[first].NewChild("xbazb") = "enabled-xbazb"; i[first].NewChild("xbazc") = "enabled-xbazc"; i[first].NewChild("bax") = "empty"; i[first].NewChild("xbax") = "empty"; i.NewChild(common); i[common].NewChild("geea") = common + "-geea"; i[common].NewChild("xgeea") = "enabled-xgeea"; i[common].NewChild("geeb") = "-geeb"; i[common].NewChild("xgeeb") = "enabled-xgeeb"; i[common].NewChild("xgeec") = "enabled-xgeec"; i[common].NewChild("hufa") = common + "-hufa"; i[common].NewChild("xhufa") = "enabled-xhufa"; i[common].NewChild("hufb") = common + "-hufb"; i[common].NewChild("xhufb") = "enabled-xhufb"; i[common].NewChild("xhufc") = "enabled-xhufc"; i[common].NewChild("blax") = "enabled-blax"; /* * Config: first-bara common-geea common-hufa enabled enabled enabled enabled */ Arc::Config c; p.Evaluate(c, i); CPPUNIT_ASSERT_EQUAL(0, c.AttributesSize()); CPPUNIT_ASSERT_EQUAL(7, c.Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(0).Size()); CPPUNIT_ASSERT_EQUAL(first + "-baza", (std::string)c.Child(0).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(1).Size()); CPPUNIT_ASSERT_EQUAL(common + "-geea", (std::string)c.Child(1).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(2).Size()); CPPUNIT_ASSERT_EQUAL(common + "-hufa", (std::string)c.Child(2).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(3).Size()); CPPUNIT_ASSERT_EQUAL((std::string)"enabled-xbaza", (std::string)c.Child(3).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(4).Size()); CPPUNIT_ASSERT_EQUAL((std::string)"enabled-xgeea", (std::string)c.Child(4).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(5).Size()); CPPUNIT_ASSERT_EQUAL((std::string)"enabled-xhufa", (std::string)c.Child(5).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(6).Size()); CPPUNIT_ASSERT_EQUAL((std::string)"enabled-blax", (std::string)c.Child(6).Child(0)); ClearNodes(); } #endif #ifdef TESTDEFAULTVALUE void ProfileTest::TestDefaultValue() { std::stringstream ps; ps << "" "" "" "" "" "" "" "" "bazc-constant" "" "" "no-effect" "" "" "" "" "" "" "" "" "" "no-effect" "" "" "" "" "" "" "" "" "no-effect" "" "" "" "" "" "" "" "" "" "" "" "" "no-effect" "no-effect" "no-effect" "" "" "" "" "" "" "" "" "" "" "" "" "no-effect" "no-effect" "no-effect" "" ""; p.ReadFromStream(ps); i.NewChild(common); i[common].NewChild("baza") = common + "-baza"; i[common].NewChild("foza") = common + "-foza"; i[common].NewChild("mfoza") = common + "-mfoza"; i.NewChild(special); i[special].NewChild("msfoza") = special + "-msfoza"; i[special].NewChild("msbara") = special + "-msbara"; i[special].NewChild("msmfoza") = special + "-msmfoza"; i[special].NewChild("mefoza") = special + "-mefoza"; i[special].NewChild("mesfoza") = special + "-mesfoza"; i[special].NewChild("meafoza") = special + "-meafoza"; i[special].NewChild("mefozb") = special + "-mefozb"; /* * Config: common-baza bazb-default bazc-constant common-mfoza mfozb-default special-msfoza special-msmfoza msfozb-default msmfozb-default special-mefoza special-mesfoza mefozb-default mesfozb-default */ Arc::Config c; p.Evaluate(c, i); CPPUNIT_ASSERT_EQUAL(0, c.AttributesSize()); CPPUNIT_ASSERT_EQUAL(15, c.Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(0).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(0).AttributesSize()); CPPUNIT_ASSERT_EQUAL(common + "-baza", (std::string)c.Child(0).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(1).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(1).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"bazb-default", (std::string)c.Child(1).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(2).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(2).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"bazc-constant", (std::string)c.Child(2).Child(0)); CPPUNIT_ASSERT_EQUAL(0, c.Child(3).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(3).AttributesSize()); CPPUNIT_ASSERT_EQUAL(0, c.Child(4).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(4).AttributesSize()); CPPUNIT_ASSERT_EQUAL(common + "-foza", (std::string)c.Child(4).Attribute("foza")); CPPUNIT_ASSERT_EQUAL(0, c.Child(5).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(5).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"fozb-default", (std::string)c.Child(5).Attribute("fozb")); CPPUNIT_ASSERT_EQUAL(0, c.Child(6).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(6).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"fozc-constant", (std::string)c.Child(6).Attribute("fozc")); CPPUNIT_ASSERT_EQUAL(0, c.Child(7).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(7).AttributesSize()); CPPUNIT_ASSERT_EQUAL(1, c.Child(8).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(8).AttributesSize()); CPPUNIT_ASSERT_EQUAL(common + "-mfoza", (std::string)c.Child(8).Child(0)); CPPUNIT_ASSERT_EQUAL(1, c.Child(9).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(9).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"mfozb-default", (std::string)c.Child(9).Child(0)); CPPUNIT_ASSERT_EQUAL(0, c.Child(10).Size()); CPPUNIT_ASSERT_EQUAL(0, c.Child(10).AttributesSize()); CPPUNIT_ASSERT_EQUAL(2, c.Child(11).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(11).AttributesSize()); CPPUNIT_ASSERT_EQUAL(special + "-msfoza", (std::string)c.Child(11).Child(0)); CPPUNIT_ASSERT_EQUAL(special + "-msbara", (std::string)c.Child(11).Attribute("msbara")); CPPUNIT_ASSERT_EQUAL(special + "-msmfoza", (std::string)c.Child(11).Child(1)); CPPUNIT_ASSERT_EQUAL(2, c.Child(12).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(12).AttributesSize()); CPPUNIT_ASSERT_EQUAL((std::string)"msfozb-default", (std::string)c.Child(12).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"msbarb-default", (std::string)c.Child(12).Attribute("msbarb")); CPPUNIT_ASSERT_EQUAL((std::string)"msmfozb-default", (std::string)c.Child(12).Child(1)); CPPUNIT_ASSERT_EQUAL(2, c.Child(13).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(13).AttributesSize()); CPPUNIT_ASSERT_EQUAL(special + "-mefoza", (std::string)c.Child(13).Child(0)); CPPUNIT_ASSERT_EQUAL(special + "-mesfoza", (std::string)c.Child(13).Child(1)); CPPUNIT_ASSERT_EQUAL(special + "-meafoza", (std::string)c.Child(13).Attribute("meafoza")); CPPUNIT_ASSERT_EQUAL(2, c.Child(14).Size()); CPPUNIT_ASSERT_EQUAL(1, c.Child(14).AttributesSize()); CPPUNIT_ASSERT_EQUAL(special + "-mefozb", (std::string)c.Child(14).Child(0)); CPPUNIT_ASSERT_EQUAL((std::string)"mesfozb-default", (std::string)c.Child(14).Child(1)); CPPUNIT_ASSERT_EQUAL((std::string)"meafozb-default", (std::string)c.Child(14).Attribute("meafozb")); ClearNodes(); } #endif #ifdef TESTOTHERS void ProfileTest::TestOthers() { std::stringstream ps; ps << "" "" ""; p.ReadFromStream(ps); i.NewChild(first); i[first].NewChild("foo") = first; i.NewChild(second); i[second].NewChild("foo") = second; /* * Config: */ Arc::Config c; p.Evaluate(c, i); CPPUNIT_ASSERT_EQUAL(0, c.AttributesSize()); CPPUNIT_ASSERT_EQUAL(0, c.Size()); ClearNodes(); } #endif CPPUNIT_TEST_SUITE_REGISTRATION(ProfileTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/ArcRegexTest.cpp0000644000000000000000000000012312300422304024755 xustar000000000000000027 mtime=1392649412.257768 27 atime=1513200574.853706 29 ctime=1513200658.90473409 nordugrid-arc-5.4.2/src/hed/libs/common/test/ArcRegexTest.cpp0000644000175000002070000000736112300422304025032 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include class ArcRegexTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ArcRegexTest); CPPUNIT_TEST(TestRegex); CPPUNIT_TEST(TestCaseInsensitive); CPPUNIT_TEST(TestSubExpression); CPPUNIT_TEST_SUITE_END(); public: void TestRegex(); void TestCaseInsensitive(); void TestSubExpression(); }; void ArcRegexTest::TestRegex() { std::list match, unmatch; std::string s = "the cat sat on the mat"; std::string r = "cat"; Arc::RegularExpression simplerx(r); CPPUNIT_ASSERT(simplerx.isOk()); CPPUNIT_ASSERT(!simplerx.match(s)); // must match whole string for success CPPUNIT_ASSERT(simplerx.match(s, unmatch, match)); CPPUNIT_ASSERT_EQUAL(1, (int)match.size()); std::list::iterator i = match.begin(); CPPUNIT_ASSERT_EQUAL(std::string("cat"), *i); CPPUNIT_ASSERT_EQUAL(2, (int)unmatch.size()); i = unmatch.begin(); CPPUNIT_ASSERT_EQUAL(std::string("the "), *i); i++; CPPUNIT_ASSERT_EQUAL(std::string(" sat on the mat"), *i); r = "([a-zA-Z0-9_\\\\-]*)=\"([a-zA-Z0-9_\\\\-]*)\""; Arc::RegularExpression rx1(r); CPPUNIT_ASSERT(rx1.isOk()); Arc::RegularExpression rx2 = rx1; CPPUNIT_ASSERT(rx2.isOk()); CPPUNIT_ASSERT(rx1.hasPattern("([a-zA-Z0-9_\\\\-]*)=\"([a-zA-Z0-9_\\\\-]*)\"")); CPPUNIT_ASSERT(rx1.hasPattern(r)); CPPUNIT_ASSERT(!rx1.hasPattern("abcd")); CPPUNIT_ASSERT(!rx1.match("keyvalue")); CPPUNIT_ASSERT(rx1.match("key=\"value\"", unmatch, match)); CPPUNIT_ASSERT_EQUAL(3, (int)match.size()); i = match.begin(); CPPUNIT_ASSERT_EQUAL(std::string("key=\"value\""), *i); i++; CPPUNIT_ASSERT_EQUAL(std::string("key"), *i); i++; CPPUNIT_ASSERT_EQUAL(std::string("value"), *i); CPPUNIT_ASSERT_EQUAL(2, (int)unmatch.size()); i = unmatch.begin(); CPPUNIT_ASSERT_EQUAL(std::string("=\""), *i); i++; CPPUNIT_ASSERT_EQUAL(std::string("\""), *i); Arc::RegularExpression empty_regexp; CPPUNIT_ASSERT(!empty_regexp.isOk()); } void ArcRegexTest::TestCaseInsensitive() { Arc::RegularExpression cis("foo", true); CPPUNIT_ASSERT(cis.isOk()); CPPUNIT_ASSERT(cis.match("FOO")); CPPUNIT_ASSERT(cis.match("foo")); CPPUNIT_ASSERT(cis.match("fOo")); Arc::RegularExpression cs("foo"); CPPUNIT_ASSERT(cs.isOk()); CPPUNIT_ASSERT(!cs.match("FOO")); CPPUNIT_ASSERT(cs.match("foo")); CPPUNIT_ASSERT(!cs.match("fOo")); } void ArcRegexTest::TestSubExpression() { Arc::RegularExpression r("^(abc)?def(ghi)jkl(mno)?$"); CPPUNIT_ASSERT(r.isOk()); std::vector matches; CPPUNIT_ASSERT(r.match("defghijkl", matches)); CPPUNIT_ASSERT_EQUAL(3, (int)matches.size()); CPPUNIT_ASSERT(matches[0].empty()); CPPUNIT_ASSERT_EQUAL((std::string)"ghi", matches[1]); CPPUNIT_ASSERT(matches[2].empty()); matches.clear(); CPPUNIT_ASSERT(r.match("abcdefghijkl", matches)); CPPUNIT_ASSERT_EQUAL(3, (int)matches.size()); CPPUNIT_ASSERT_EQUAL((std::string)"abc", matches[0]); CPPUNIT_ASSERT_EQUAL((std::string)"ghi", matches[1]); CPPUNIT_ASSERT(matches[2].empty()); matches.clear(); CPPUNIT_ASSERT(r.match("defghijklmno", matches)); CPPUNIT_ASSERT_EQUAL(3, (int)matches.size()); CPPUNIT_ASSERT(matches[0].empty()); CPPUNIT_ASSERT_EQUAL((std::string)"ghi", matches[1]); CPPUNIT_ASSERT_EQUAL((std::string)"mno", matches[2]); matches.clear(); CPPUNIT_ASSERT(r.match("abcdefghijklmno", matches)); CPPUNIT_ASSERT_EQUAL(3, (int)matches.size()); CPPUNIT_ASSERT_EQUAL((std::string)"abc", matches[0]); CPPUNIT_ASSERT_EQUAL((std::string)"ghi", matches[1]); CPPUNIT_ASSERT_EQUAL((std::string)"mno", matches[2]); matches.clear(); CPPUNIT_ASSERT(!r.match("defjkl", matches)); matches.clear(); } CPPUNIT_TEST_SUITE_REGISTRATION(ArcRegexTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/StringConvTest.cpp0000644000000000000000000000012212733561721025371 xustar000000000000000027 mtime=1466885073.986422 27 atime=1513200574.852706 28 ctime=1513200658.9137342 nordugrid-arc-5.4.2/src/hed/libs/common/test/StringConvTest.cpp0000644000175000002070000000742312733561721025446 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include class StringConvTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(StringConvTest); CPPUNIT_TEST(TestStringConv); CPPUNIT_TEST(TestURIEncode); CPPUNIT_TEST(TestIntegers); CPPUNIT_TEST(TestJoin); CPPUNIT_TEST_SUITE_END(); public: void TestStringConv(); void TestURIEncode(); void TestIntegers(); void TestJoin(); }; void StringConvTest::TestStringConv() { std::string in; std::string out; in = "aBcDeFgHiJkLmN"; out = Arc::lower(in); CPPUNIT_ASSERT_EQUAL(std::string("abcdefghijklmn"), out); out = Arc::upper(in); CPPUNIT_ASSERT_EQUAL(std::string("ABCDEFGHIJKLMN"), out); in = "####0123456789++++"; out = Arc::trim(in,"#+"); CPPUNIT_ASSERT_EQUAL(std::string("0123456789"), out); in = "0123\n\t \t\n\n\n\n456789\n\t\n"; out = Arc::strip(in); CPPUNIT_ASSERT_EQUAL(std::string("0123\n456789"), out); in = "1234567890"; out = Arc::escape_chars(in,"13579",'#',false); CPPUNIT_ASSERT_EQUAL(std::string("#12#34#56#78#90"), out); out += "###"; out = Arc::unescape_chars(out,'#'); CPPUNIT_ASSERT_EQUAL(in + "##", out); out = Arc::escape_chars(in,"13579",'#',false,Arc::escape_hex); CPPUNIT_ASSERT_EQUAL(std::string("#312#334#356#378#390"), out); out = Arc::unescape_chars(out,'#',Arc::escape_hex); CPPUNIT_ASSERT_EQUAL(in, out); out = Arc::escape_chars(in,"13579",'#',false,Arc::escape_octal); CPPUNIT_ASSERT_EQUAL(std::string("#0612#0634#0656#0678#0710"), out); out = Arc::unescape_chars(out,'#',Arc::escape_octal); CPPUNIT_ASSERT_EQUAL(in, out); out = Arc::escape_chars(in,"13579",'#',true,Arc::escape_hex); CPPUNIT_ASSERT_EQUAL(std::string("1#323#345#367#389#30"), out); out = Arc::unescape_chars(out,'#',Arc::escape_hex); CPPUNIT_ASSERT_EQUAL(in, out); } void StringConvTest::TestURIEncode() { std::string in; std::string out; // simple URL in = "http://localhost/data/file1"; out = Arc::uri_encode(in, false); CPPUNIT_ASSERT_EQUAL(std::string("http%3A//localhost/data/file1"), out); CPPUNIT_ASSERT_EQUAL(in, Arc::uri_unencode(out)); // complex case in = "http://localhost:80/data/file with spaces&name=value&symbols=()!%*$"; out = Arc::uri_encode(in, false); CPPUNIT_ASSERT_EQUAL(std::string("http%3A//localhost%3A80/data/file%20with%20spaces%26name%3Dvalue%26symbols%3D%28%29%21%25%2A%24"), out); CPPUNIT_ASSERT_EQUAL(in, Arc::uri_unencode(out)); out = Arc::uri_encode(in, true); CPPUNIT_ASSERT_EQUAL(std::string("http%3A%2F%2Flocalhost%3A80%2Fdata%2Ffile%20with%20spaces%26name%3Dvalue%26symbols%3D%28%29%21%25%2A%24"), out); CPPUNIT_ASSERT_EQUAL(in, Arc::uri_unencode(out)); } void StringConvTest::TestIntegers() { int n = 12345; CPPUNIT_ASSERT_EQUAL(std::string("12345"),Arc::inttostr(n)); CPPUNIT_ASSERT_EQUAL(std::string("-12345"),Arc::inttostr(-n)); CPPUNIT_ASSERT_EQUAL(std::string("0000012345"),Arc::inttostr(n,10,10)); CPPUNIT_ASSERT_EQUAL(std::string("343340"),Arc::inttostr(n,5,2)); CPPUNIT_ASSERT_EQUAL(std::string("1ah5"),Arc::inttostr(n,20)); CPPUNIT_ASSERT(Arc::strtoint("12345",n)); CPPUNIT_ASSERT_EQUAL(12345,n); CPPUNIT_ASSERT(Arc::strtoint("343340",n,5)); CPPUNIT_ASSERT_EQUAL(12345,n); CPPUNIT_ASSERT(Arc::strtoint("1ah5",n,20)); CPPUNIT_ASSERT_EQUAL(12345,n); } void StringConvTest::TestJoin() { std::list strlist; CPPUNIT_ASSERT(Arc::join(strlist, " ").empty()); strlist.push_back("test"); CPPUNIT_ASSERT_EQUAL(std::string("test"), Arc::join(strlist, " ")); strlist.push_back("again"); CPPUNIT_ASSERT_EQUAL(std::string("test again"), Arc::join(strlist, " ")); strlist.push_back("twice"); CPPUNIT_ASSERT_EQUAL(std::string("test,again,twice"), Arc::join(strlist, ",")); } CPPUNIT_TEST_SUITE_REGISTRATION(StringConvTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/UserConfigTest.cpp0000644000000000000000000000012411743735542025347 xustar000000000000000027 mtime=1334819682.016609 27 atime=1513200574.860706 30 ctime=1513200658.915734225 nordugrid-arc-5.4.2/src/hed/libs/common/test/UserConfigTest.cpp0000644000175000002070000003531011743735542025416 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include class UserConfigTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(UserConfigTest); CPPUNIT_TEST(ParseRegistryTest); CPPUNIT_TEST(ParseComputingTest); CPPUNIT_TEST(UnspecifiedInterfaceTest); CPPUNIT_TEST(GroupTest); CPPUNIT_TEST(RequestedInterfacesTest); // CPPUNIT_TEST(ServiceFromLegacyStringTest); CPPUNIT_TEST(LegacyDefaultServicesTest); CPPUNIT_TEST(LegacyAliasTest); CPPUNIT_TEST(RejectionTest); CPPUNIT_TEST(SaveToFileTest); CPPUNIT_TEST_SUITE_END(); public: UserConfigTest() : uc(Arc::initializeCredentialsType(Arc::initializeCredentialsType::SkipCredentials)), conffile("test-client.conf") {} void ParseRegistryTest(); void ParseComputingTest(); void UnspecifiedInterfaceTest(); void GroupTest(); void RequestedInterfacesTest(); // void ServiceFromLegacyStringTest(); void LegacyDefaultServicesTest(); void LegacyAliasTest(); void RejectionTest(); void SaveToFileTest(); void setUp() {} void tearDown() {} private: Arc::UserConfig uc; const std::string conffile; }; void UserConfigTest::ParseRegistryTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); f << "[registry/emir1]\nurl=http://emir1.emi-eu.eu\nregistryinterface=org.nordugrid.emir\ndefault=yes\n"; f.close(); uc.LoadConfigurationFile(conffile); std::list services; services = uc.GetDefaultServices(); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://emir1.emi-eu.eu", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.emir", services.front().InterfaceName); services = uc.GetDefaultServices(Arc::ConfigEndpoint::REGISTRY); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://emir1.emi-eu.eu", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.emir", services.front().InterfaceName); services = uc.GetDefaultServices(Arc::ConfigEndpoint::COMPUTINGINFO); CPPUNIT_ASSERT_EQUAL(0, (int)services.size()); Arc::ConfigEndpoint service = uc.GetService("emir1"); CPPUNIT_ASSERT_EQUAL((std::string)"http://emir1.emi-eu.eu", service.URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.emir", service.InterfaceName); remove(conffile.c_str()); } void UserConfigTest::ParseComputingTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); f << "[computing/puff]\nurl=ldap://puff.hep.lu.se\ninfointerface=org.nordugrid.ldapglue2\nsubmissioninterface=org.nordugrid.gridftpjob\ndefault=yes\n"; f.close(); uc.LoadConfigurationFile(conffile); std::list services; services = uc.GetDefaultServices(); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://puff.hep.lu.se", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapglue2", services.front().InterfaceName); services = uc.GetDefaultServices(Arc::ConfigEndpoint::COMPUTINGINFO); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://puff.hep.lu.se", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapglue2", services.front().InterfaceName); services = uc.GetDefaultServices(Arc::ConfigEndpoint::REGISTRY); CPPUNIT_ASSERT_EQUAL(0, (int)services.size()); Arc::ConfigEndpoint service = uc.GetService("puff"); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://puff.hep.lu.se", service.URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapglue2", service.InterfaceName); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.gridftpjob", service.RequestedSubmissionInterfaceName); remove(conffile.c_str()); } void UserConfigTest::UnspecifiedInterfaceTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); f << "[computing/puff]\nurl=ldap://puff.hep.lu.se\n" << "[registry/emir1]\nurl=http://emir1.nordugrid.org\n"; f.close(); uc.LoadConfigurationFile(conffile); Arc::ConfigEndpoint service; service = uc.GetService("puff"); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://puff.hep.lu.se", service.URLString); CPPUNIT_ASSERT_EQUAL((std::string)"", service.InterfaceName); CPPUNIT_ASSERT_EQUAL((std::string)"", service.RequestedSubmissionInterfaceName); service = uc.GetService("emir1"); CPPUNIT_ASSERT_EQUAL((std::string)"http://emir1.nordugrid.org", service.URLString); CPPUNIT_ASSERT_EQUAL((std::string)"", service.InterfaceName); remove(conffile.c_str()); } void UserConfigTest::GroupTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); f << "[computing/puff]\nurl=ldap://puff.hep.lu.se\ngroup=hep\n" << "[computing/paff]\nurl=ldap://paff.hep.lu.se\ngroup=hep\n" << "[computing/interop]\nurl=https://interop.grid.niif.hu\ngroup=niif\n"; f.close(); uc.LoadConfigurationFile(conffile); std::list services; services = uc.GetServicesInGroup("hep"); CPPUNIT_ASSERT_EQUAL(2, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://puff.hep.lu.se", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://paff.hep.lu.se", services.back().URLString); services = uc.GetServicesInGroup("niif"); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"https://interop.grid.niif.hu", services.front().URLString); remove(conffile.c_str()); } void UserConfigTest::RequestedInterfacesTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); f << "infointerface=org.nordugrid.ldapglue2\nsubmissioninterface=org.nordugrid.gridftpjob\n" << "[computing/puff]\nurl=ldap://puff.hep.lu.se\n"; f.close(); uc.LoadConfigurationFile(conffile); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.gridftpjob", uc.SubmissionInterface()); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapglue2", uc.InfoInterface()); Arc::ConfigEndpoint service; service = uc.GetService("puff"); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.gridftpjob", service.RequestedSubmissionInterfaceName); remove(conffile.c_str()); } // void UserConfigTest::ServiceFromLegacyStringTest() // { // Arc::ConfigEndpoint service; // service = Arc::UserConfig::ServiceFromLegacyString("computing:ARC0:http://a.org"); // CPPUNIT_ASSERT_EQUAL(Arc::ConfigEndpoint::COMPUTINGINFO, service.type); // CPPUNIT_ASSERT_EQUAL((std::string)"http://a.org", service.URLString); // CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapng", service.InterfaceName); // // service = Arc::UserConfig::ServiceFromLegacyString("computing:ARC1:http://a.org"); // CPPUNIT_ASSERT_EQUAL(Arc::ConfigEndpoint::COMPUTINGINFO, service.type); // CPPUNIT_ASSERT_EQUAL((std::string)"http://a.org", service.URLString); // CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.wsrfglue2", service.InterfaceName); // // service = Arc::UserConfig::ServiceFromLegacyString("index:ARC0:http://a.org"); // CPPUNIT_ASSERT_EQUAL(Arc::ConfigEndpoint::REGISTRY, service.type); // CPPUNIT_ASSERT_EQUAL((std::string)"http://a.org", service.URLString); // CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapegiis", service.InterfaceName); // // service = Arc::UserConfig::ServiceFromLegacyString("index:EMIES:http://a.org"); // CPPUNIT_ASSERT_EQUAL(Arc::ConfigEndpoint::REGISTRY, service.type); // CPPUNIT_ASSERT_EQUAL((std::string)"http://a.org", service.URLString); // CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.emir", service.InterfaceName); // } void UserConfigTest::LegacyDefaultServicesTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); f << "defaultservices=" << "index:ARC0:ldap://index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid " << "index:ARC0:ldap://index2.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid " << "computing:ARC0:ldap://a.org " << "computing:ARC1:http://b.org\n"; f.close(); uc.LoadConfigurationFile(conffile); std::list services; services = uc.GetDefaultServices(); CPPUNIT_ASSERT_EQUAL(4, (int)services.size()); services = uc.GetDefaultServices(Arc::ConfigEndpoint::REGISTRY); CPPUNIT_ASSERT_EQUAL(2, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapegiis", services.front().InterfaceName); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://index2.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", services.back().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapegiis", services.back().InterfaceName); services = uc.GetDefaultServices(Arc::ConfigEndpoint::COMPUTINGINFO); CPPUNIT_ASSERT_EQUAL(2, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://a.org", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapng", services.front().InterfaceName); CPPUNIT_ASSERT_EQUAL((std::string)"http://b.org", services.back().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.wsrfglue2", services.back().InterfaceName); remove(conffile.c_str()); } void UserConfigTest::LegacyAliasTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); f << "[ alias ]" << std::endl; f << "a = computing:ARC0:http://a.org" << std::endl; f << "b = computing:ARC0:http://b.org" << std::endl; f << "c = a b computing:ARC0:http://c.org" << std::endl; f << "invalid = compute:ARC0:http://invalid.org" << std::endl; f << "i = index:ARC0:http://i.org" << std::endl; f << "j = index:ARC0:http://j.org" << std::endl; f << "k = i j index:ARC0:http://k.org" << std::endl; f << "mixed = a b i j" << std::endl; f << "loop = a b link" << std::endl; f << "link = loop" << std::endl; f.close(); uc.LoadConfigurationFile(conffile); std::list services; // legacy aliases become groups in the new config services = uc.GetServices("a"); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://a.org", services.front().URLString); services = uc.GetServices("b"); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://b.org", services.front().URLString); services = uc.GetServices("c"); CPPUNIT_ASSERT_EQUAL(3, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://a.org", services.front().URLString); services.pop_front(); CPPUNIT_ASSERT_EQUAL((std::string)"http://b.org", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"http://c.org", services.back().URLString); services = uc.GetServices("i"); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://i.org", services.front().URLString); services = uc.GetServices("j"); CPPUNIT_ASSERT_EQUAL(1, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://j.org", services.front().URLString); services = uc.GetServices("k"); CPPUNIT_ASSERT_EQUAL(3, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://i.org", services.front().URLString); services.pop_front(); CPPUNIT_ASSERT_EQUAL((std::string)"http://j.org", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"http://k.org", services.back().URLString); services = uc.GetServices("invalid"); CPPUNIT_ASSERT_EQUAL(0, (int)services.size()); services = uc.GetServices("mixed"); CPPUNIT_ASSERT_EQUAL(4, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://a.org", services.front().URLString); services.pop_front(); CPPUNIT_ASSERT_EQUAL((std::string)"http://b.org", services.front().URLString); services.pop_front(); CPPUNIT_ASSERT_EQUAL((std::string)"http://i.org", services.front().URLString); services.pop_front(); CPPUNIT_ASSERT_EQUAL((std::string)"http://j.org", services.front().URLString); services = uc.GetServices("loop"); CPPUNIT_ASSERT_EQUAL(2, (int)services.size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://a.org", services.front().URLString); CPPUNIT_ASSERT_EQUAL((std::string)"http://b.org", services.back().URLString); services = uc.GetServices("undefined"); CPPUNIT_ASSERT_EQUAL(0, (int)services.size()); remove(conffile.c_str()); } void UserConfigTest::RejectionTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); f << "rejectdiscovery=ldap://puff.hep.lu.se\nrejectdiscovery=test.nordugrid.org\n"; f << "rejectmanagement=ldap://puff.hep.lu.se\nrejectmanagement=test.nordugrid.org\n"; f.close(); uc.LoadConfigurationFile(conffile); std::list urls = uc.RejectDiscoveryURLs(); CPPUNIT_ASSERT_EQUAL(2, (int)urls.size()); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://puff.hep.lu.se", urls.front()); CPPUNIT_ASSERT_EQUAL((std::string)"test.nordugrid.org", urls.back()); std::list urls2 = uc.RejectManagementURLs(); CPPUNIT_ASSERT_EQUAL(2, (int)urls2.size()); CPPUNIT_ASSERT_EQUAL((std::string)"ldap://puff.hep.lu.se", urls2.front()); CPPUNIT_ASSERT_EQUAL((std::string)"test.nordugrid.org", urls2.back()); remove(conffile.c_str()); } void UserConfigTest::SaveToFileTest() { std::ofstream f(conffile.c_str(), std::ifstream::trunc); std::string input = "[common]\n" "proxypath = /tmp/my-proxy\n" "certificatepath = /home/username/cert.pem\n" "keypath = /home/username/key.pem\n" "cacertificatesdirectory = /home/user/cacertificates\n" "rejectdiscovery = bad.service.org\n" "rejectdiscovery = bad2.service.org\n" "rejectmanagement = bad3.service.org\n" "rejectmanagement = bad4.service.org\n" "timeout = 50\n" "brokername = FastestQueue\n" "brokerarguments = arg\n" "vomsespath = /home/user/vomses\n" "submissioninterface = org.nordugrid.gridftpjob\n" "infointerface = org.nordugrid.ldapglue2\n" "[registry/index1]\n" "url = ldap://index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid\n" "registryinterface = org.nordugrid.ldapegiis\n" "default = yes\n" "group = index\n" "[registry/index2]\n" "url = ldap://index2.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid\n" "registryinterface = org.nordugrid.ldapegiis\n" "group = index\n" "group = special\n" "[computing/interop]\n" "url = https://interop.grid.niif.hu:2010/arex-x509\n" "infointerface = org.nordugrid.ldapglue2\n" "submissioninterface = org.nordugrid.gridftpjob\n" "default = yes\n"; f << input; f.close(); uc.LoadConfigurationFile(conffile); remove(conffile.c_str()); uc.SaveToFile(conffile); std::ifstream ff(conffile.c_str()); std::string output; std::getline(ff,output,'\0'); ff.close(); remove(conffile.c_str()); CPPUNIT_ASSERT_EQUAL((std::string)input, (std::string)output); } CPPUNIT_TEST_SUITE_REGISTRATION(UserConfigTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/Base64Test.cpp0000644000000000000000000000012412113727507024321 xustar000000000000000027 mtime=1362079559.274818 27 atime=1513200574.844706 30 ctime=1513200658.905734102 nordugrid-arc-5.4.2/src/hed/libs/common/test/Base64Test.cpp0000644000175000002070000000167712113727507024401 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include class Base64Test : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(Base64Test); CPPUNIT_TEST(TestBase64); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestBase64(); }; void Base64Test::setUp() { } void Base64Test::tearDown() { } void Base64Test::TestBase64() { std::string val; bool found = false; std::string plain_str("base64 to test 1234567890 abcdefghijklmnopqrstuvwxyz"); std::string encoded_str("YmFzZTY0IHRvIHRlc3QgMTIzNDU2Nzg5MCBhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5eg=="); std::string str; // encode str = Arc::Base64::encode(plain_str); CPPUNIT_ASSERT_EQUAL(encoded_str, str); str.insert(16,"\r\n"); str.insert(34,"\r\n"); // decode str = Arc::Base64::decode(str); CPPUNIT_ASSERT_EQUAL(plain_str, str); } CPPUNIT_TEST_SUITE_REGISTRATION(Base64Test); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/XMLNodeTest.cpp0000644000000000000000000000012412077225413024541 xustar000000000000000027 mtime=1358768907.759887 27 atime=1513200574.841705 30 ctime=1513200658.918734261 nordugrid-arc-5.4.2/src/hed/libs/common/test/XMLNodeTest.cpp0000644000175000002070000001475712077225413024624 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include class XMLNodeTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(XMLNodeTest); CPPUNIT_TEST(TestParsing); CPPUNIT_TEST(TestExchange); CPPUNIT_TEST(TestMove); CPPUNIT_TEST(TestQuery); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestParsing(); void TestExchange(); void TestMove(); void TestQuery(); }; void XMLNodeTest::setUp() { } void XMLNodeTest::tearDown() { } void XMLNodeTest::TestParsing() { std::string xml_str( "\n" "\n" " value1\n" " value2\n" "" ); Arc::XMLNode xml(xml_str); CPPUNIT_ASSERT((bool)xml); CPPUNIT_ASSERT_EQUAL(std::string("root"), xml.Name()); CPPUNIT_ASSERT_EQUAL(std::string("value1"), (std::string)xml["child1"]); CPPUNIT_ASSERT_EQUAL(std::string("value2"), (std::string)xml["child2"]); std::string s; xml.GetXML(s); CPPUNIT_ASSERT_EQUAL(std::string("\n value1\n value2\n"),s); xml.GetDoc(s); CPPUNIT_ASSERT_EQUAL("\n"+xml_str+"\n",s); xml["child1"].GetXML(s); CPPUNIT_ASSERT_EQUAL(std::string("value1"),s); } void XMLNodeTest::TestExchange() { std::string xml1_str( "" "value1" "" "value3" "" "" ); std::string xml2_str( "" "value4" "" "value6" "" "" ); Arc::XMLNode* xml1 = NULL; Arc::XMLNode* xml2 = NULL; Arc::XMLNode node1; Arc::XMLNode node2; // Exchanging ordinary nodes xml1 = new Arc::XMLNode(xml1_str); xml2 = new Arc::XMLNode(xml2_str); node1 = (*xml1)["child2"]; node2 = (*xml2)["child5"]; node1.Exchange(node2); delete xml2; xml2 = new Arc::XMLNode(xml1_str); CPPUNIT_ASSERT_EQUAL(std::string("value6"),(std::string)((*xml1)["ns4:child5"]["ns4:child6"])); delete xml1; delete xml2; // Exchanging root nodes - operation must fail xml1 = new Arc::XMLNode(xml1_str); xml2 = new Arc::XMLNode(xml2_str); node1 = (*xml1); node2 = (*xml2); node1.Exchange(node2); delete xml2; xml2 = new Arc::XMLNode(xml1_str); CPPUNIT_ASSERT_EQUAL(std::string("ns1:root1"),(*xml1).FullName()); delete xml1; delete xml2; // Exchanging documents xml1 = new Arc::XMLNode(xml1_str); xml2 = new Arc::XMLNode(xml2_str); xml1->Exchange(*xml2); CPPUNIT_ASSERT_EQUAL(std::string("ns3:root2"),(*xml1).FullName()); CPPUNIT_ASSERT_EQUAL(std::string("ns1:root1"),(*xml2).FullName()); delete xml1; delete xml2; // Exchanging document and empty node xml1 = new Arc::XMLNode(xml1_str); xml2 = new Arc::XMLNode; xml1->Exchange(*xml2); CPPUNIT_ASSERT_EQUAL(std::string("ns1:root1"),(*xml2).FullName()); CPPUNIT_ASSERT(!(*xml1)); delete xml1; delete xml2; // Exchanging ordinary and empty node xml1 = new Arc::XMLNode(xml1_str); xml2 = new Arc::XMLNode; node1 = (*xml1)["ns2:child2"]; node1.Exchange(*xml2); CPPUNIT_ASSERT_EQUAL(std::string("ns2:child2"),xml2->FullName()); CPPUNIT_ASSERT(!node1); delete xml1; delete xml2; } void XMLNodeTest::TestMove() { std::string xml1_str( "" "value1" "" "value3" "" "" ); std::string xml2_str( "" "value4" "" "value6" "" "" ); Arc::XMLNode* xml1 = NULL; Arc::XMLNode* xml2 = NULL; Arc::XMLNode node1; Arc::XMLNode node2; // Moving ordinary to ordinary node xml1 = new Arc::XMLNode(xml1_str); xml2 = new Arc::XMLNode(xml2_str); node1 = (*xml1)["child2"]; node2 = (*xml2)["child5"]; node1.Move(node2); CPPUNIT_ASSERT_EQUAL(std::string("ns2:child2"),node2.FullName()); CPPUNIT_ASSERT(!node1); CPPUNIT_ASSERT((*xml2)["child5"]); CPPUNIT_ASSERT(!(*xml1)["child2"]); delete xml1; delete xml2; // Moving ordinary to empty node xml1 = new Arc::XMLNode(xml1_str); xml2 = new Arc::XMLNode; node1 = (*xml1)["child2"]; node1.Move(*xml2); CPPUNIT_ASSERT_EQUAL(std::string("ns2:child2"),xml2->FullName()); CPPUNIT_ASSERT(!node1); delete xml1; delete xml2; // Moving document to empty node xml1 = new Arc::XMLNode(xml1_str); xml2 = new Arc::XMLNode; xml1->Move(*xml2); CPPUNIT_ASSERT_EQUAL(std::string("ns1:root1"),xml2->FullName()); CPPUNIT_ASSERT(!*xml1); delete xml1; delete xml2; } void XMLNodeTest::TestQuery() { std::string xml_str1( "" "value1" "" "value3" "" "" ); std::string xml_str2( "" "value1" "" "value3" "" "" ); Arc::XMLNode xml1(xml_str1); Arc::XMLNode xml2(xml_str2); Arc::NS ns; ns["ns1"] = "http://host/path1"; ns["ns2"] = "http://host/path2"; Arc::XMLNodeList list1 = xml1.XPathLookup("/root1/child2",Arc::NS()); CPPUNIT_ASSERT_EQUAL(1,(int)list1.size()); Arc::XMLNodeList list2 = xml1.XPathLookup("/ns1:root1/ns2:child2",ns); CPPUNIT_ASSERT_EQUAL(0,(int)list2.size()); Arc::XMLNodeList list3 = xml2.XPathLookup("/root1/child2",Arc::NS()); CPPUNIT_ASSERT_EQUAL(0,(int)list3.size()); Arc::XMLNodeList list4 = xml2.XPathLookup("/ns1:root1/ns2:child2",ns); CPPUNIT_ASSERT_EQUAL(1,(int)list4.size()); Arc::XMLNodeList list5 = xml2.XPathLookup("/ns1:root1/ns1:child1",ns); CPPUNIT_ASSERT_EQUAL(1,(int)list5.size()); xml2.StripNamespace(-1); Arc::XMLNodeList list6 = xml2.XPathLookup("/root1/child1",Arc::NS()); CPPUNIT_ASSERT_EQUAL(1,(int)list6.size()); } CPPUNIT_TEST_SUITE_REGISTRATION(XMLNodeTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/UserTest.cpp0000644000000000000000000000012412050420662024203 xustar000000000000000027 mtime=1352802738.551413 27 atime=1513200574.842705 30 ctime=1513200658.916734237 nordugrid-arc-5.4.2/src/hed/libs/common/test/UserTest.cpp0000644000175000002070000000356012050420662024254 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #ifdef WIN32 #include #else #include #include #endif #include #include #include class UserTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(UserTest); CPPUNIT_TEST(OwnUserTest); CPPUNIT_TEST_SUITE_END(); public: void OwnUserTest(); }; void UserTest::OwnUserTest() { #ifdef WIN32 // uid/gid not implemented in win32 version of User int uid = 0; int gid = 0; bool found; std::string username = Glib::getenv("USERNAME", found); CPPUNIT_ASSERT(found); std::string home = g_get_user_config_dir(); #else uid_t uid_ = getuid(); // My username struct passwd pwd; char pwdbuf[2048]; struct passwd *pwd_p; CPPUNIT_ASSERT_EQUAL(0, getpwuid_r(uid_, &pwd, pwdbuf, sizeof(pwdbuf), &pwd_p)); int uid = (int)uid_; int gid = pwd_p->pw_gid; std::string username = pwd_p->pw_name; std::string home = Arc::GetEnv("HOME"); if (home.empty()) pwd_p->pw_dir; #endif // User using this user's uid Arc::User user; CPPUNIT_ASSERT(user); CPPUNIT_ASSERT_EQUAL(uid, user.get_uid()); CPPUNIT_ASSERT_EQUAL(gid, user.get_gid()); CPPUNIT_ASSERT_EQUAL(username, user.Name()); CPPUNIT_ASSERT_EQUAL(home, user.Home()); // User with specified uid and gid Arc::User user2(uid, gid); CPPUNIT_ASSERT(user2); CPPUNIT_ASSERT_EQUAL(uid, user2.get_uid()); CPPUNIT_ASSERT_EQUAL(gid, user2.get_gid()); CPPUNIT_ASSERT_EQUAL(username, user2.Name()); CPPUNIT_ASSERT_EQUAL(home, user2.Home()); // User with specified username Arc::User user3(username); CPPUNIT_ASSERT(user3); CPPUNIT_ASSERT_EQUAL(uid, user3.get_uid()); CPPUNIT_ASSERT_EQUAL(gid, user3.get_gid()); CPPUNIT_ASSERT_EQUAL(username, user3.Name()); CPPUNIT_ASSERT_EQUAL(home, user3.Home()); } CPPUNIT_TEST_SUITE_REGISTRATION(UserTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/WatchdogTest.cpp0000644000000000000000000000012311757661340025041 xustar000000000000000026 mtime=1337942752.88116 27 atime=1513200574.852706 30 ctime=1513200658.917734249 nordugrid-arc-5.4.2/src/hed/libs/common/test/WatchdogTest.cpp0000644000175000002070000000123411757661340025107 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include class WatchdogTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(WatchdogTest); CPPUNIT_TEST(TestWatchdog); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestWatchdog(); }; void WatchdogTest::setUp() { } void WatchdogTest::tearDown() { } void WatchdogTest::TestWatchdog() { Arc::WatchdogListener l; Arc::WatchdogChannel c(20); CPPUNIT_ASSERT_EQUAL(true,l.Listen()); } CPPUNIT_TEST_SUITE_REGISTRATION(WatchdogTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/FileLockTest.cpp0000644000000000000000000000012411743734244024771 xustar000000000000000027 mtime=1334818980.182524 27 atime=1513200574.844706 30 ctime=1513200658.909734151 nordugrid-arc-5.4.2/src/hed/libs/common/test/FileLockTest.cpp0000644000175000002070000002240411743734244025040 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #ifndef WIN32 #include #endif #include #ifdef WIN32 #include // for gethostname() #endif #include #include #include class FileLockTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(FileLockTest); CPPUNIT_TEST(TestFileLockAcquire); CPPUNIT_TEST(TestFileLockRelease); CPPUNIT_TEST(TestFileLockCheck); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestFileLockAcquire(); void TestFileLockRelease(); void TestFileLockCheck(); private: bool _createFile(const std::string& filename, const std::string& text = "a"); std::string _readFile(const std::string& filename); std::string testroot; }; void FileLockTest::setUp() { std::string tmpdir; Arc::TmpDirCreate(tmpdir); testroot = tmpdir; } void FileLockTest::tearDown() { Arc::DirDelete(testroot); } void FileLockTest::TestFileLockAcquire() { // test that a lock can be acquired std::string filename(testroot + "/file1"); std::string lock_file(filename + ".lock"); Arc::FileLock lock(filename); CPPUNIT_ASSERT(lock.acquire()); // test file is locked struct stat fileStat; CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); // look at modification time - should not be more than 1 second old time_t mod_time = fileStat.st_mtime; time_t now = time(NULL); CPPUNIT_ASSERT((now - mod_time) <= 1); // check it has the right pid inside std::string lock_pid = _readFile(lock_file); CPPUNIT_ASSERT(lock_pid != ""); // construct hostname char hostname[256]; if(gethostname(hostname, sizeof(hostname)) != 0) hostname[0] = '\0'; std::string host(hostname); CPPUNIT_ASSERT_EQUAL(Arc::tostring(getpid()) + "@" + host, lock_pid); bool lock_removed = false; struct utimbuf times; time_t t = 1; #ifndef WIN32 // set old modification time // utime does not work on windows - skip for windows times.actime = t; times.modtime = t; CPPUNIT_ASSERT_EQUAL(0, utime(lock_file.c_str(), ×)); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL(t, fileStat.st_mtime); // call acquire() again - should succeed and make new lock file CPPUNIT_ASSERT(lock.acquire(lock_removed)); CPPUNIT_ASSERT(lock_removed); #endif // look at modification time - should not be more than 1 second old CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); mod_time = fileStat.st_mtime; now = time(NULL); CPPUNIT_ASSERT((now - mod_time) <= 1); // lock the file with a pid which is still running on this host _createFile(lock_file, "1@" + host); lock_removed = false; CPPUNIT_ASSERT(!lock.acquire(lock_removed)); CPPUNIT_ASSERT(!lock_removed); // lock with process on different host lock_removed = false; _createFile(lock_file, "1@mybadhost.org"); CPPUNIT_ASSERT(!lock.acquire(lock_removed)); CPPUNIT_ASSERT(!lock_removed); #ifndef WIN32 // try again with a non-existent pid // windows can't check for running pid - skip for windows _createFile(lock_file, "99999@" + host); lock_removed = false; CPPUNIT_ASSERT(lock.acquire(lock_removed)); CPPUNIT_ASSERT(lock_removed); #endif // badly formatted pid _createFile(lock_file, "abcd@" + host); lock_removed = false; CPPUNIT_ASSERT(!lock.acquire(lock_removed)); CPPUNIT_ASSERT(!lock_removed); // set small timeout CPPUNIT_ASSERT_EQUAL(0, remove(lock_file.c_str())); lock_removed = false; lock = Arc::FileLock(filename, 1); CPPUNIT_ASSERT(lock.acquire(lock_removed)); CPPUNIT_ASSERT(!lock_removed); // use longer sleep because times and sleeps are very // approximate on windows sleep(4); CPPUNIT_ASSERT(lock.acquire(lock_removed)); CPPUNIT_ASSERT(lock_removed); // don't use pid CPPUNIT_ASSERT_EQUAL(0, remove(lock_file.c_str())); lock_removed = false; lock = Arc::FileLock(filename, 30, false); CPPUNIT_ASSERT(lock.acquire(lock_removed)); CPPUNIT_ASSERT(!lock_removed); // check lock file is empty lock_pid = _readFile(lock_file); CPPUNIT_ASSERT(lock_pid.empty()); // create an empty lock file - acquire should fail CPPUNIT_ASSERT_EQUAL(0, remove(lock_file.c_str())); _createFile(lock_file, ""); lock = Arc::FileLock(filename); CPPUNIT_ASSERT(!lock.acquire()); #ifndef WIN32 // set old modification time - acquire should now succeed // utime does not work on windows - skip for windows times.actime = t; times.modtime = t; CPPUNIT_ASSERT_EQUAL(0, utime(lock_file.c_str(), ×)); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); CPPUNIT_ASSERT_EQUAL(t, fileStat.st_mtime); CPPUNIT_ASSERT(lock.acquire()); #endif #ifndef WIN32 // create lock with empty hostname - acquire should still work // windows can't check for running pid - skip for windows lock = Arc::FileLock(filename); _createFile(lock_file, "99999@"); lock_removed = false; CPPUNIT_ASSERT(lock.acquire(lock_removed)); CPPUNIT_ASSERT(lock_removed); #endif } void FileLockTest::TestFileLockRelease() { std::string filename(testroot + "/file2"); std::string lock_file(filename + ".lock"); // release non-existent lock struct stat fileStat; CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); Arc::FileLock lock(filename); CPPUNIT_ASSERT(!lock.release()); // construct hostname char hostname[256]; if(gethostname(hostname, sizeof(hostname)) != 0) hostname[0] = '\0'; std::string host(hostname); // create a valid lock file with this pid _createFile(lock_file, std::string(Arc::tostring(getpid()) + "@" + host)); lock = Arc::FileLock(filename); CPPUNIT_ASSERT(lock.release()); // test lock file is gone CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); // create a lock with a different pid _createFile(lock_file, std::string("1@" + host)); CPPUNIT_ASSERT(!lock.release()); // test lock file is still there CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); // create lock with different host CPPUNIT_ASSERT_EQUAL(0, remove(lock_file.c_str())); _createFile(lock_file, std::string(Arc::tostring(getpid()) + "@mybadhost.org")); CPPUNIT_ASSERT(!lock.release()); CPPUNIT_ASSERT_EQUAL_MESSAGE("Could not stat lock file " + lock_file, 0, stat(lock_file.c_str(), &fileStat)); // force release CPPUNIT_ASSERT(lock.release(true)); CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); // create an empty lock file - release should fail _createFile(lock_file, ""); lock = Arc::FileLock(filename); CPPUNIT_ASSERT(!lock.release()); // set use_pid false, release should succeed now lock = Arc::FileLock(filename, 30, false); CPPUNIT_ASSERT(lock.release()); // create lock with empty hostname - release should still work lock = Arc::FileLock(filename); _createFile(lock_file, std::string(Arc::tostring(getpid()) + "@")); CPPUNIT_ASSERT(lock.release()); } void FileLockTest::TestFileLockCheck() { std::string filename(testroot + "/file3"); std::string lock_file(filename + ".lock"); // check non-existent lock struct stat fileStat; CPPUNIT_ASSERT(stat(lock_file.c_str(), &fileStat) != 0); Arc::FileLock lock(filename); CPPUNIT_ASSERT_EQUAL(-1, lock.check()); // construct hostname char hostname[256]; if(gethostname(hostname, sizeof(hostname)) != 0) hostname[0] = '\0'; std::string host(hostname); // create a valid lock file with this pid _createFile(lock_file, std::string(Arc::tostring(getpid()) + "@" + host)); lock = Arc::FileLock(filename); CPPUNIT_ASSERT_EQUAL(0, lock.check()); // create a lock with a different pid _createFile(lock_file, std::string("1@" + host)); CPPUNIT_ASSERT_EQUAL(1, lock.check()); // create lock with different host CPPUNIT_ASSERT_EQUAL(0, remove(lock_file.c_str())); _createFile(lock_file, std::string(Arc::tostring(getpid()) + "@mybadhost.org")); CPPUNIT_ASSERT_EQUAL(-1, lock.check()); // create an empty lock file - check should fail _createFile(lock_file, ""); lock = Arc::FileLock(filename); CPPUNIT_ASSERT_EQUAL(-1, lock.check()); // set use_pid false, check should succeed now lock = Arc::FileLock(filename, 30, false); CPPUNIT_ASSERT_EQUAL(0, lock.check()); // create lock with empty hostname - check should still be ok lock = Arc::FileLock(filename); _createFile(lock_file, std::string(Arc::tostring(getpid()) + "@")); CPPUNIT_ASSERT_EQUAL(0, lock.check()); } bool FileLockTest::_createFile(const std::string& filename, const std::string& text) { remove(filename.c_str()); FILE *pFile; pFile = fopen((char*)filename.c_str(), "w"); if (pFile == NULL) return false; fputs((char*)text.c_str(), pFile); fclose(pFile); return true; } std::string FileLockTest::_readFile(const std::string& filename) { FILE *pFile; char mystring[1024]; pFile = fopen((char*)filename.c_str(), "r"); if (pFile == NULL) return ""; std::string data; while (fgets(mystring, sizeof(mystring), pFile)) data += std::string(mystring); fclose(pFile); return data; } CPPUNIT_TEST_SUITE_REGISTRATION(FileLockTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/FileUtilsTest.cpp0000644000000000000000000000012413124220317025163 xustar000000000000000027 mtime=1498489039.895801 27 atime=1513200574.853706 30 ctime=1513200658.910734164 nordugrid-arc-5.4.2/src/hed/libs/common/test/FileUtilsTest.cpp0000644000175000002070000002362713124220317025242 0ustar00mockbuildmock00000000000000// TODO: test for operations under different account #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include class FileUtilsTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(FileUtilsTest); CPPUNIT_TEST(TestFileStat); CPPUNIT_TEST(TestFileCopy); CPPUNIT_TEST(TestFileLink); CPPUNIT_TEST(TestFileCreateAndRead); CPPUNIT_TEST(TestMakeAndDeleteDir); CPPUNIT_TEST(TestTmpDirCreate); CPPUNIT_TEST(TestTmpFileCreate); CPPUNIT_TEST(TestDirList); CPPUNIT_TEST(TestCanonicalDir); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestFileStat(); void TestFileCopy(); void TestFileLink(); void TestFileCreateAndRead(); void TestMakeAndDeleteDir(); void TestTmpDirCreate(); void TestTmpFileCreate(); void TestDirList(); void TestCanonicalDir(); private: bool _createFile(const std::string& filename, const std::string& text = "a"); std::string testroot; std::string sep; }; void FileUtilsTest::setUp() { std::string tmpdir; Arc::TmpDirCreate(tmpdir); testroot = tmpdir; sep = G_DIR_SEPARATOR_S; Arc::FileAccess::testtune(); } void FileUtilsTest::tearDown() { Arc::DirDelete(testroot); } void FileUtilsTest::TestFileStat() { CPPUNIT_ASSERT(_createFile(testroot + "/file1")); struct stat st; CPPUNIT_ASSERT(Arc::FileStat(testroot+"/file1", &st, true)); CPPUNIT_ASSERT_EQUAL(1, (int)st.st_size); CPPUNIT_ASSERT(S_IRUSR & st.st_mode); CPPUNIT_ASSERT(S_IWUSR & st.st_mode); CPPUNIT_ASSERT(!Arc::FileStat(testroot+"/file2", &st, true)); CPPUNIT_ASSERT(!Arc::FileStat(testroot+"/file1", &st, -1, -1, true)); } void FileUtilsTest::TestFileCopy() { CPPUNIT_ASSERT(_createFile(testroot + "/file1")); CPPUNIT_ASSERT(Arc::FileCopy(testroot+"/file1", testroot+"/file2")); struct stat st; CPPUNIT_ASSERT(Arc::FileStat(testroot+"/file2", &st, true)); CPPUNIT_ASSERT_EQUAL(1, (int)st.st_size); int h = open(std::string(testroot+"/file2").c_str(), O_RDONLY , S_IRUSR | S_IWUSR); CPPUNIT_ASSERT(h > 0); int h2 = open(std::string(testroot+"/file3").c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); CPPUNIT_ASSERT(h > 0); CPPUNIT_ASSERT(Arc::FileCopy(h, h2)); CPPUNIT_ASSERT_EQUAL(0, close(h)); CPPUNIT_ASSERT_EQUAL(0, close(h2)); } void FileUtilsTest::TestFileLink() { #ifndef WIN32 CPPUNIT_ASSERT(_createFile(testroot + "/file1")); CPPUNIT_ASSERT(Arc::FileLink(testroot+"/file1", testroot+"/file1s", true)); CPPUNIT_ASSERT(Arc::FileLink(testroot+"/file1", testroot+"/file1h", false)); struct stat st; CPPUNIT_ASSERT(Arc::FileStat(testroot+"/file1s", &st, true)); CPPUNIT_ASSERT_EQUAL(1, (int)st.st_size); CPPUNIT_ASSERT(Arc::FileStat(testroot+"/file1h", &st, true)); CPPUNIT_ASSERT_EQUAL(1, (int)st.st_size); CPPUNIT_ASSERT_EQUAL(testroot+"/file1", Arc::FileReadLink(testroot+"/file1s")); #endif } void FileUtilsTest::TestFileCreateAndRead() { // create empty file std::string filename(testroot + "/file1"); CPPUNIT_ASSERT(Arc::FileCreate(filename, "", 0, 0, 0600)); struct stat st; CPPUNIT_ASSERT(Arc::FileStat(filename, &st, true)); CPPUNIT_ASSERT_EQUAL(0, (int)st.st_size); #ifndef WIN32 CPPUNIT_ASSERT_EQUAL(0600, (int)(st.st_mode & 0777)); #endif std::list data; CPPUNIT_ASSERT(Arc::FileRead(filename, data)); CPPUNIT_ASSERT(data.empty()); // create again with some data CPPUNIT_ASSERT(Arc::FileCreate(filename, "12\nabc\n\nxyz\n")); CPPUNIT_ASSERT(Arc::FileRead(filename, data)); CPPUNIT_ASSERT_EQUAL(4, (int)data.size()); CPPUNIT_ASSERT_EQUAL(std::string("12"), data.front()); CPPUNIT_ASSERT_EQUAL(std::string("xyz"), data.back()); std::string cdata; CPPUNIT_ASSERT(Arc::FileRead(filename, cdata)); CPPUNIT_ASSERT_EQUAL(std::string("12\nabc\n\nxyz\n"), cdata); // remove file and check failure CPPUNIT_ASSERT_EQUAL(true, Arc::FileDelete(filename.c_str())); CPPUNIT_ASSERT(!Arc::FileRead(filename, data)); } void FileUtilsTest::TestMakeAndDeleteDir() { // create a few subdirs and files then recursively delete struct stat st; CPPUNIT_ASSERT(stat(testroot.c_str(), &st) == 0); CPPUNIT_ASSERT(_createFile(testroot+sep+"file1")); CPPUNIT_ASSERT(Arc::DirCreate(std::string(testroot+sep+"dir1"), S_IRUSR | S_IWUSR | S_IXUSR)); CPPUNIT_ASSERT(Arc::DirCreate(std::string(testroot+sep+"dir1"), S_IRUSR | S_IWUSR | S_IXUSR)); CPPUNIT_ASSERT(stat(std::string(testroot+sep+"dir1").c_str(), &st) == 0); CPPUNIT_ASSERT(S_ISDIR(st.st_mode)); CPPUNIT_ASSERT(_createFile(testroot+sep+"dir1"+sep+"file2")); // should fail if with_parents is set to false CPPUNIT_ASSERT(!Arc::DirCreate(std::string(testroot+sep+"dir1"+sep+"dir2"+sep+"dir3"), S_IRUSR | S_IWUSR | S_IXUSR, false)); CPPUNIT_ASSERT(Arc::DirCreate(std::string(testroot+sep+"dir1"+sep+"dir2"+sep+"dir3"), S_IRUSR | S_IWUSR | S_IXUSR, true)); CPPUNIT_ASSERT(stat(std::string(testroot+sep+"dir1"+sep+"dir2"+sep+"dir3").c_str(), &st) == 0); CPPUNIT_ASSERT(S_ISDIR(st.st_mode)); CPPUNIT_ASSERT(_createFile(testroot+sep+"dir1"+sep+"dir2"+sep+"dir3"+sep+"file4")); #ifndef WIN32 CPPUNIT_ASSERT(symlink(std::string(testroot+sep+"dir1"+sep+"dir2").c_str(), std::string(testroot+sep+"dir1"+sep+"dir2"+sep+"link1").c_str()) == 0); #endif CPPUNIT_ASSERT(!Arc::DirDelete(testroot, false)); CPPUNIT_ASSERT(Arc::DirDelete(testroot, true)); CPPUNIT_ASSERT(stat(testroot.c_str(), &st) != 0); } void FileUtilsTest::TestTmpDirCreate() { std::string path; CPPUNIT_ASSERT(Arc::TmpDirCreate(path)); struct stat st; CPPUNIT_ASSERT(stat(path.c_str(), &st) == 0); CPPUNIT_ASSERT(S_ISDIR(st.st_mode)); CPPUNIT_ASSERT(Arc::DirDelete(path)); CPPUNIT_ASSERT(stat(path.c_str(), &st) != 0); } void FileUtilsTest::TestTmpFileCreate() { // No specified path - uses system tmp std::string path; CPPUNIT_ASSERT(Arc::TmpFileCreate(path,"TEST")); struct stat st; CPPUNIT_ASSERT(stat(path.c_str(), &st) == 0); CPPUNIT_ASSERT(S_ISREG(st.st_mode)); CPPUNIT_ASSERT_EQUAL(4,(int)st.st_size); CPPUNIT_ASSERT(Arc::FileDelete(path)); CPPUNIT_ASSERT(stat(path.c_str(), &st) != 0); // Specified path path = Glib::build_filename(Glib::get_tmp_dir(), "myfile-XXXXXX"); CPPUNIT_ASSERT(Arc::TmpFileCreate(path,"TEST")); CPPUNIT_ASSERT_EQUAL(0, (int)path.find(Glib::build_filename(Glib::get_tmp_dir(), "myfile-"))); CPPUNIT_ASSERT(stat(path.c_str(), &st) == 0); CPPUNIT_ASSERT(S_ISREG(st.st_mode)); CPPUNIT_ASSERT_EQUAL(4,(int)st.st_size); CPPUNIT_ASSERT(Arc::FileDelete(path)); CPPUNIT_ASSERT(stat(path.c_str(), &st) != 0); // Specified path with no template - should use default name path = Glib::build_filename(Glib::get_tmp_dir(), "myfile"); CPPUNIT_ASSERT(Arc::TmpFileCreate(path,"TEST")); CPPUNIT_ASSERT_EQUAL((int)std::string::npos, (int)path.find(Glib::build_filename(Glib::get_tmp_dir(), "myfile"))); CPPUNIT_ASSERT(stat(path.c_str(), &st) == 0); CPPUNIT_ASSERT(S_ISREG(st.st_mode)); CPPUNIT_ASSERT_EQUAL(4,(int)st.st_size); CPPUNIT_ASSERT(Arc::FileDelete(path)); CPPUNIT_ASSERT(stat(path.c_str(), &st) != 0); } void FileUtilsTest::TestDirList() { // create a few subdirs and files then list struct stat st; std::list entries; CPPUNIT_ASSERT(stat(testroot.c_str(), &st) == 0); CPPUNIT_ASSERT(Arc::DirCreate(std::string(testroot+sep+"dir1"), S_IRUSR | S_IWUSR | S_IXUSR)); CPPUNIT_ASSERT(_createFile(testroot+sep+"dir1"+sep+"file1")); CPPUNIT_ASSERT(_createFile(testroot+sep+"file1")); // No such dir CPPUNIT_ASSERT(!Arc::DirList(std::string(testroot+sep+"test"), entries, false)); CPPUNIT_ASSERT(entries.empty()); // Not a dir CPPUNIT_ASSERT(!Arc::DirList(std::string(testroot+sep+"file1"), entries, false)); CPPUNIT_ASSERT(entries.empty()); // Should only list top-level CPPUNIT_ASSERT(Arc::DirList(std::string(testroot), entries, false)); CPPUNIT_ASSERT_EQUAL(2, (int)entries.size()); // The order of entries is not guaranteed CPPUNIT_ASSERT(std::find(entries.begin(), entries.end(), std::string(testroot+sep+"dir1")) != entries.end()); CPPUNIT_ASSERT(std::find(entries.begin(), entries.end(), std::string(testroot+sep+"file1")) != entries.end()); // List recursively CPPUNIT_ASSERT(Arc::DirList(std::string(testroot), entries, true)); CPPUNIT_ASSERT_EQUAL(3, (int)entries.size()); // The order of entries is not guaranteed CPPUNIT_ASSERT(std::find(entries.begin(), entries.end(), std::string(testroot+sep+"dir1")) != entries.end()); CPPUNIT_ASSERT(std::find(entries.begin(), entries.end(), std::string(testroot+sep+"file1")) != entries.end()); CPPUNIT_ASSERT(std::find(entries.begin(), entries.end(), std::string(testroot+sep+"dir1"+sep+"file1")) != entries.end()); } void FileUtilsTest::TestCanonicalDir() { std::string dir(sep+"home"+sep+"me"+sep+"dir1"); CPPUNIT_ASSERT(Arc::CanonicalDir(dir)); CPPUNIT_ASSERT_EQUAL(std::string(sep+"home"+sep+"me"+sep+"dir1"), dir); CPPUNIT_ASSERT(Arc::CanonicalDir(dir, false)); CPPUNIT_ASSERT_EQUAL(std::string("home"+sep+"me"+sep+"dir1"), dir); dir = sep+"home"+sep+"me"+sep+".."+sep+"me"; CPPUNIT_ASSERT(Arc::CanonicalDir(dir)); CPPUNIT_ASSERT_EQUAL(std::string(sep+"home"+sep+"me"), dir); dir = sep+"home"+sep+"me"+sep+".."+sep+".."; CPPUNIT_ASSERT(Arc::CanonicalDir(dir)); CPPUNIT_ASSERT_EQUAL(sep, dir); dir = sep+"home"+sep+"me"+sep+".."+sep+".."; CPPUNIT_ASSERT(Arc::CanonicalDir(dir, false)); CPPUNIT_ASSERT_EQUAL(std::string(""), dir); dir = sep+"home"+sep+"me"+sep+".."+sep+".."+sep+".."; CPPUNIT_ASSERT(!Arc::CanonicalDir(dir, false)); dir = sep+"home"+sep+"me"+sep; CPPUNIT_ASSERT(Arc::CanonicalDir(dir, true, true)); CPPUNIT_ASSERT_EQUAL(std::string(sep+"home"+sep+"me"+sep), dir); } bool FileUtilsTest::_createFile(const std::string& filename, const std::string& text) { FILE *pFile; pFile = fopen((char*)filename.c_str(), "w"); if (pFile == NULL) return false; fputs((char*)text.c_str(), pFile); fclose(pFile); return true; } CPPUNIT_TEST_SUITE_REGISTRATION(FileUtilsTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/rcode0000644000000000000000000000012411442406425022745 xustar000000000000000027 mtime=1284115733.924716 27 atime=1513200574.842705 30 ctime=1513200658.919734274 nordugrid-arc-5.4.2/src/hed/libs/common/test/rcode0000755000175000002070000000007211442406425023014 0ustar00mockbuildmock00000000000000#!/bin/sh printf 'STDOUT' printf 'STDERR' 1>&2 exit $1 nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/URLTest.cpp0000644000000000000000000000012412771225074023741 xustar000000000000000027 mtime=1474636348.325172 27 atime=1513200574.858706 30 ctime=1513200658.915734225 nordugrid-arc-5.4.2/src/hed/libs/common/test/URLTest.cpp0000644000175000002070000003322212771225074024010 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include class URLTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(URLTest); CPPUNIT_TEST(TestGsiftpUrl); CPPUNIT_TEST(TestLdapUrl); CPPUNIT_TEST(TestHttpUrl); CPPUNIT_TEST(TestFileUrl); CPPUNIT_TEST(TestLdapUrl2); CPPUNIT_TEST(TestOptUrl); CPPUNIT_TEST(TestFtpUrl); CPPUNIT_TEST(TestSrmUrl); CPPUNIT_TEST(TestIP6Url); CPPUNIT_TEST(TestIP6Url2); CPPUNIT_TEST(TestIP6Url3); CPPUNIT_TEST(TestBadUrl); CPPUNIT_TEST(TestWithDefaults); CPPUNIT_TEST(TestStringMatchesURL); CPPUNIT_TEST(TestOptions); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestGsiftpUrl(); void TestLdapUrl(); void TestHttpUrl(); void TestFileUrl(); void TestLdapUrl2(); void TestOptUrl(); void TestFtpUrl(); void TestSrmUrl(); void TestIP6Url(); void TestIP6Url2(); void TestIP6Url3(); void TestBadUrl(); void TestWithDefaults(); void TestStringMatchesURL(); void TestOptions(); private: Arc::URL *gsiftpurl, *gsiftpurl2, *ldapurl, *httpurl, *fileurl, *ldapurl2, *opturl, *ftpurl, *srmurl, *ip6url, *ip6url2, *ip6url3; }; void URLTest::setUp() { gsiftpurl = new Arc::URL("gsiftp://hathi.hep.lu.se/public/test.txt"); gsiftpurl2 = new Arc::URL("gsiftp://hathi.hep.lu.se:2811/public:/test.txt:checksumtype=adler32"); ldapurl = new Arc::URL("ldap://grid.uio.no/o=grid/mds-vo-name=local"); httpurl = new Arc::URL("http://www.nordugrid.org/monitor:v1.php?debug=2&newpath=/path/to/file&sort=yes&symbols=() *!%\""); fileurl = new Arc::URL("file:/home/grid/runtime/TEST-ATLAS-8.0.5"); ldapurl2 = new Arc::URL("ldap://grid.uio.no/mds-vo-name=local, o=grid"); opturl = new Arc::URL("gsiftp://hathi.hep.lu.se;threads=10;autodir=yes/public/test.txt"); ftpurl = new Arc::URL("ftp://user:secret@ftp.nordugrid.org/pub/files/guide.pdf"); srmurl = new Arc::URL("srm://srm.nordugrid.org/srm/managerv2?SFN=/data/public:/test.txt:checksumtype=adler32"); ip6url = new Arc::URL("ftp://[ffff:eeee:dddd:cccc:aaaa:9999:8888:7777]/path"); ip6url2 = new Arc::URL("ftp://[ffff:eeee:dddd:cccc:aaaa:9999:8888:7777]:2021/path"); ip6url3 = new Arc::URL("ftp://[ffff:eeee:dddd:cccc:aaaa:9999:8888:7777];cache=no/path"); } void URLTest::tearDown() { delete gsiftpurl; delete gsiftpurl2; delete ldapurl; delete httpurl; delete fileurl; delete ldapurl2; delete opturl; delete ftpurl; delete srmurl; delete ip6url; delete ip6url2; delete ip6url3; } void URLTest::TestGsiftpUrl() { CPPUNIT_ASSERT(*gsiftpurl); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp"), gsiftpurl->Protocol()); CPPUNIT_ASSERT(gsiftpurl->Username().empty()); CPPUNIT_ASSERT(gsiftpurl->Passwd().empty()); CPPUNIT_ASSERT_EQUAL(std::string("hathi.hep.lu.se"), gsiftpurl->Host()); CPPUNIT_ASSERT_EQUAL(2811, gsiftpurl->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/public/test.txt"), gsiftpurl->Path()); CPPUNIT_ASSERT(gsiftpurl->HTTPOptions().empty()); CPPUNIT_ASSERT(gsiftpurl->Options().empty()); CPPUNIT_ASSERT(gsiftpurl->Locations().empty()); CPPUNIT_ASSERT(*gsiftpurl2); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp"), gsiftpurl2->Protocol()); CPPUNIT_ASSERT(gsiftpurl2->Username().empty()); CPPUNIT_ASSERT(gsiftpurl2->Passwd().empty()); CPPUNIT_ASSERT_EQUAL(std::string("hathi.hep.lu.se"), gsiftpurl2->Host()); CPPUNIT_ASSERT_EQUAL(2811, gsiftpurl2->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/public:/test.txt"), gsiftpurl2->Path()); CPPUNIT_ASSERT(gsiftpurl2->HTTPOptions().empty()); CPPUNIT_ASSERT(gsiftpurl2->Options().empty()); CPPUNIT_ASSERT(gsiftpurl2->Locations().empty()); CPPUNIT_ASSERT_EQUAL(std::string("adler32"), gsiftpurl2->MetaDataOption("checksumtype")); } void URLTest::TestLdapUrl() { CPPUNIT_ASSERT(*ldapurl); CPPUNIT_ASSERT_EQUAL(std::string("ldap"), ldapurl->Protocol()); CPPUNIT_ASSERT(ldapurl->Username().empty()); CPPUNIT_ASSERT(ldapurl->Passwd().empty()); CPPUNIT_ASSERT_EQUAL(std::string("grid.uio.no"), ldapurl->Host()); CPPUNIT_ASSERT_EQUAL(389, ldapurl->Port()); CPPUNIT_ASSERT_EQUAL(std::string("mds-vo-name=local, o=grid"), ldapurl->Path()); CPPUNIT_ASSERT(ldapurl->HTTPOptions().empty()); CPPUNIT_ASSERT(ldapurl->Options().empty()); CPPUNIT_ASSERT(ldapurl->Locations().empty()); } void URLTest::TestHttpUrl() { CPPUNIT_ASSERT(*httpurl); CPPUNIT_ASSERT_EQUAL(std::string("http"), httpurl->Protocol()); CPPUNIT_ASSERT(httpurl->Username().empty()); CPPUNIT_ASSERT(httpurl->Passwd().empty()); CPPUNIT_ASSERT_EQUAL(std::string("www.nordugrid.org"), httpurl->Host()); CPPUNIT_ASSERT_EQUAL(80, httpurl->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/monitor:v1.php"), httpurl->Path()); CPPUNIT_ASSERT_EQUAL(std::string("/monitor:v1.php?debug=2&newpath=/path/to/file&sort=yes&symbols=() *!%\""), httpurl->FullPath()); CPPUNIT_ASSERT_EQUAL(std::string("/monitor%3Av1.php?debug=2&newpath=%2Fpath%2Fto%2Ffile&sort=yes&symbols=%28%29%20%2A%21%25%22"), httpurl->FullPathURIEncoded()); std::map httpmap = httpurl->HTTPOptions(); CPPUNIT_ASSERT_EQUAL((int)httpmap.size(), 4); std::map::iterator mapit = httpmap.begin(); CPPUNIT_ASSERT_EQUAL(mapit->first, std::string("debug")); CPPUNIT_ASSERT_EQUAL(mapit->second, std::string("2")); mapit++; CPPUNIT_ASSERT_EQUAL(mapit->first, std::string("newpath")); CPPUNIT_ASSERT_EQUAL(mapit->second, std::string("/path/to/file")); mapit++; CPPUNIT_ASSERT_EQUAL(mapit->first, std::string("sort")); CPPUNIT_ASSERT_EQUAL(mapit->second, std::string("yes")); mapit++; CPPUNIT_ASSERT_EQUAL(mapit->first, std::string("symbols")); CPPUNIT_ASSERT_EQUAL(mapit->second, std::string("() *!%\"")); CPPUNIT_ASSERT(httpurl->Options().empty()); CPPUNIT_ASSERT(httpurl->Locations().empty()); } void URLTest::TestFileUrl() { CPPUNIT_ASSERT(*fileurl); CPPUNIT_ASSERT_EQUAL(std::string("file"), fileurl->Protocol()); CPPUNIT_ASSERT(fileurl->Username().empty()); CPPUNIT_ASSERT(fileurl->Passwd().empty()); CPPUNIT_ASSERT(fileurl->Host().empty()); CPPUNIT_ASSERT_EQUAL(-1, fileurl->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/home/grid/runtime/TEST-ATLAS-8.0.5"), fileurl->Path()); CPPUNIT_ASSERT(fileurl->HTTPOptions().empty()); CPPUNIT_ASSERT(fileurl->Options().empty()); CPPUNIT_ASSERT(fileurl->Locations().empty()); } void URLTest::TestLdapUrl2() { CPPUNIT_ASSERT(*ldapurl); CPPUNIT_ASSERT_EQUAL(std::string("ldap"), ldapurl2->Protocol()); CPPUNIT_ASSERT(ldapurl2->Username().empty()); CPPUNIT_ASSERT(ldapurl2->Passwd().empty()); CPPUNIT_ASSERT_EQUAL(std::string("grid.uio.no"), ldapurl2->Host()); CPPUNIT_ASSERT_EQUAL(389, ldapurl2->Port()); CPPUNIT_ASSERT_EQUAL(std::string("mds-vo-name=local, o=grid"), ldapurl2->Path()); CPPUNIT_ASSERT(ldapurl2->HTTPOptions().empty()); CPPUNIT_ASSERT(ldapurl2->Options().empty()); CPPUNIT_ASSERT(ldapurl2->Locations().empty()); } void URLTest::TestOptUrl() { CPPUNIT_ASSERT(*opturl); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp"), opturl->Protocol()); CPPUNIT_ASSERT(opturl->Username().empty()); CPPUNIT_ASSERT(opturl->Passwd().empty()); CPPUNIT_ASSERT_EQUAL(std::string("hathi.hep.lu.se"), opturl->Host()); CPPUNIT_ASSERT_EQUAL(2811, opturl->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/public/test.txt"), opturl->Path()); CPPUNIT_ASSERT(opturl->HTTPOptions().empty()); CPPUNIT_ASSERT(opturl->Locations().empty()); std::map options = opturl->Options(); CPPUNIT_ASSERT_EQUAL(2, (int)options.size()); std::map::iterator mapit = options.begin(); CPPUNIT_ASSERT_EQUAL(std::string("autodir"), mapit->first); CPPUNIT_ASSERT_EQUAL(std::string("yes"), mapit->second); mapit++; CPPUNIT_ASSERT_EQUAL(std::string("threads"), mapit->first); CPPUNIT_ASSERT_EQUAL(std::string("10"), mapit->second); } void URLTest::TestFtpUrl() { CPPUNIT_ASSERT(*ftpurl); CPPUNIT_ASSERT_EQUAL(std::string("ftp"), ftpurl->Protocol()); CPPUNIT_ASSERT_EQUAL(std::string("user"), ftpurl->Username()); CPPUNIT_ASSERT_EQUAL(std::string("secret"), ftpurl->Passwd()); CPPUNIT_ASSERT_EQUAL(std::string("ftp.nordugrid.org"), ftpurl->Host()); CPPUNIT_ASSERT_EQUAL(21, ftpurl->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/pub/files/guide.pdf"), ftpurl->Path()); CPPUNIT_ASSERT(ftpurl->HTTPOptions().empty()); CPPUNIT_ASSERT(ftpurl->Options().empty()); CPPUNIT_ASSERT(ftpurl->Locations().empty()); } void URLTest::TestSrmUrl() { CPPUNIT_ASSERT(*srmurl); CPPUNIT_ASSERT_EQUAL(std::string("srm"), srmurl->Protocol()); CPPUNIT_ASSERT(srmurl->Username().empty()); CPPUNIT_ASSERT(srmurl->Passwd().empty()); CPPUNIT_ASSERT_EQUAL(std::string("srm.nordugrid.org"), srmurl->Host()); // no default port is defined for SRM CPPUNIT_ASSERT_EQUAL(-1, srmurl->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/srm/managerv2"), srmurl->Path()); CPPUNIT_ASSERT_EQUAL(std::string("/data/public:/test.txt"), srmurl->HTTPOption("SFN")); CPPUNIT_ASSERT(srmurl->Options().empty()); CPPUNIT_ASSERT(srmurl->Locations().empty()); CPPUNIT_ASSERT_EQUAL(std::string("adler32"), srmurl->MetaDataOption("checksumtype")); } void URLTest::TestIP6Url() { CPPUNIT_ASSERT(*ip6url); CPPUNIT_ASSERT_EQUAL(std::string("ftp"), ip6url->Protocol()); CPPUNIT_ASSERT_EQUAL(std::string("ffff:eeee:dddd:cccc:aaaa:9999:8888:7777"), ip6url->Host()); CPPUNIT_ASSERT_EQUAL(21, ip6url->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/path"), ip6url->Path()); CPPUNIT_ASSERT(ip6url->Options().empty()); } void URLTest::TestIP6Url2() { CPPUNIT_ASSERT(*ip6url2); CPPUNIT_ASSERT_EQUAL(std::string("ftp"), ip6url2->Protocol()); CPPUNIT_ASSERT_EQUAL(std::string("ffff:eeee:dddd:cccc:aaaa:9999:8888:7777"), ip6url2->Host()); CPPUNIT_ASSERT_EQUAL(2021, ip6url2->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/path"), ip6url2->Path()); CPPUNIT_ASSERT(ip6url2->Options().empty()); } void URLTest::TestIP6Url3() { CPPUNIT_ASSERT(*ip6url3); CPPUNIT_ASSERT_EQUAL(std::string("ftp"), ip6url3->Protocol()); CPPUNIT_ASSERT_EQUAL(std::string("ffff:eeee:dddd:cccc:aaaa:9999:8888:7777"), ip6url3->Host()); CPPUNIT_ASSERT_EQUAL(21, ip6url3->Port()); CPPUNIT_ASSERT_EQUAL(std::string("/path"), ip6url3->Path()); CPPUNIT_ASSERT_EQUAL(1, (int)(ip6url3->Options().size())); std::map options = ip6url3->Options(); CPPUNIT_ASSERT_EQUAL(std::string("no"), options["cache"]); CPPUNIT_ASSERT_EQUAL(std::string("ftp://[ffff:eeee:dddd:cccc:aaaa:9999:8888:7777]:21;cache=no/path"), ip6url3->fullstr()); } void URLTest::TestBadUrl() { Arc::URL *url = new Arc::URL(""); CPPUNIT_ASSERT(!(*url)); url = new Arc::URL("#url"); CPPUNIT_ASSERT(!(*url)); url = new Arc::URL("arc:file1"); CPPUNIT_ASSERT(!(*url)); url = new Arc::URL("http:/file1"); CPPUNIT_ASSERT(!(*url)); delete url; } void URLTest::TestWithDefaults() { Arc::URL url("http://example.org", false, 123, "/test"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT_EQUAL(123, url.Port()); CPPUNIT_ASSERT_EQUAL((std::string)"/test", url.Path()); url = Arc::URL("http://example.org:321", false, 123, "/test"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT_EQUAL(321, url.Port()); CPPUNIT_ASSERT_EQUAL((std::string)"/test", url.Path()); url = Arc::URL("http://example.org/testing", false, 123, "/test"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT_EQUAL(123, url.Port()); CPPUNIT_ASSERT_EQUAL((std::string)"/testing", url.Path()); url = Arc::URL("http://example.org:321/testing", false, 123, "/test"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT_EQUAL(321, url.Port()); CPPUNIT_ASSERT_EQUAL((std::string)"/testing", url.Path()); url = Arc::URL("http://[::1]", false, 123, "/test"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT_EQUAL(123, url.Port()); CPPUNIT_ASSERT_EQUAL((std::string)"/test", url.Path()); url = Arc::URL("http://[::1]:321", false, 123, "/test"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT_EQUAL(321, url.Port()); CPPUNIT_ASSERT_EQUAL((std::string)"/test", url.Path()); url = Arc::URL("http://[::1]/testing", false, 123, "/test"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT_EQUAL(123, url.Port()); CPPUNIT_ASSERT_EQUAL((std::string)"/testing", url.Path()); url = Arc::URL("http://[::1]:321/testing", false, 123, "/test"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT_EQUAL(321, url.Port()); CPPUNIT_ASSERT_EQUAL((std::string)"/testing", url.Path()); } void URLTest::TestStringMatchesURL() { std::string str; Arc::URL url; str = "example.org"; url = (std::string)"http://example.org:8080/path"; CPPUNIT_ASSERT(url.StringMatches(str)); str += ":8080"; CPPUNIT_ASSERT(url.StringMatches(str)); str = "http://" + str; CPPUNIT_ASSERT(url.StringMatches(str)); str += "/path"; CPPUNIT_ASSERT(url.StringMatches(str)); str = "example.org/"; CPPUNIT_ASSERT(url.StringMatches(str)); } void URLTest::TestOptions() { Arc::URL url("http://example.org:8080/path"); CPPUNIT_ASSERT(url); CPPUNIT_ASSERT(!url.AddOption(std::string("attr1"))); CPPUNIT_ASSERT(!url.AddOption(std::string(""), std::string(""))); CPPUNIT_ASSERT_EQUAL(std::string(""), (url.Option("attr1"))); CPPUNIT_ASSERT(url.AddOption(std::string("attr1"), std::string("value1"))); CPPUNIT_ASSERT_EQUAL(std::string("value1"), url.Option("attr1")); CPPUNIT_ASSERT(!url.AddOption("attr1", "value2", false)); CPPUNIT_ASSERT_EQUAL(std::string("value1"), url.Option("attr1")); CPPUNIT_ASSERT(url.AddOption("attr1", "value2", true)); CPPUNIT_ASSERT_EQUAL(std::string("value2"), url.Option("attr1")); CPPUNIT_ASSERT(!url.AddOption("attr1=value1", false)); CPPUNIT_ASSERT_EQUAL(std::string("value2"), url.Option("attr1")); CPPUNIT_ASSERT(url.AddOption("attr1=value1", true)); CPPUNIT_ASSERT_EQUAL(std::string("value1"), url.Option("attr1")); } CPPUNIT_TEST_SUITE_REGISTRATION(URLTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/EnvTest.cpp0000644000000000000000000000012311743750502024023 xustar000000000000000026 mtime=1334825282.36847 27 atime=1513200574.844706 30 ctime=1513200658.908734139 nordugrid-arc-5.4.2/src/hed/libs/common/test/EnvTest.cpp0000644000175000002070000000317511743750502024077 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include class EnvTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(EnvTest); CPPUNIT_TEST(TestEnv); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestEnv(); }; void EnvTest::setUp() { } void EnvTest::tearDown() { } void EnvTest::TestEnv() { std::string val; bool found = false; Arc::SetEnv("TEST_ENV_VAR","TEST_ENV_VALUE"); CPPUNIT_ASSERT_EQUAL(std::string("TEST_ENV_VALUE"), Arc::GetEnv("TEST_ENV_VAR",found)); CPPUNIT_ASSERT_EQUAL(true, found); Arc::UnsetEnv("TEST_ENV_VAR"); CPPUNIT_ASSERT_EQUAL(std::string(""), Arc::GetEnv("TEST_ENV_VAR",found)); CPPUNIT_ASSERT_EQUAL(false, found); Arc::SetEnv("TEST_ENV_VAR","TEST_ENV_VALUE2"); Arc::SetEnv("TEST_ENV_VAR","TEST_ENV_VALUE3"); Arc::SetEnv("TEST_ENV_VAR","TEST_ENV_VALUE4", false); CPPUNIT_ASSERT_EQUAL(std::string("TEST_ENV_VALUE3"), Arc::GetEnv("TEST_ENV_VAR",found)); time_t start = ::time(NULL); for(int n = 0; n < 1000000; ++n) { Arc::SetEnv("TEST_ENV_VAR1","TEST_ENV_VALUE"); Arc::UnsetEnv("TEST_ENV_VAR1"); Arc::SetEnv("TEST_ENV_VAR1","TEST_ENV_VALUE"); Arc::SetEnv("TEST_ENV_VAR2","TEST_ENV_VALUE"); Arc::UnsetEnv("TEST_ENV_VAR2"); Arc::SetEnv("TEST_ENV_VAR2","TEST_ENV_VALUE"); Arc::SetEnv("TEST_ENV_VAR3","TEST_ENV_VALUE"); Arc::UnsetEnv("TEST_ENV_VAR3"); Arc::SetEnv("TEST_ENV_VAR3","TEST_ENV_VALUE"); // Limit duration by reasonable value if(((unsigned int)(time(NULL)-start)) > 300) break; } } CPPUNIT_TEST_SUITE_REGISTRATION(EnvTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/CheckSumTest.cpp0000644000000000000000000000012311741501077024774 xustar000000000000000026 mtime=1334215231.35079 27 atime=1513200574.857706 30 ctime=1513200658.906734115 nordugrid-arc-5.4.2/src/hed/libs/common/test/CheckSumTest.cpp0000644000175000002070000000467011741501077025051 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include class CheckSumTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(CheckSumTest); CPPUNIT_TEST(CRC32SumTest); CPPUNIT_TEST(MD5SumTest); CPPUNIT_TEST(Adler32SumTest); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void CRC32SumTest(); void MD5SumTest(); void Adler32SumTest(); }; void CheckSumTest::setUp() { std::ofstream f1K("CheckSumTest.f1K.data", std::ios::out), f1M("CheckSumTest.f1M.data", std::ios::out); for (int i = 0; i < 1000; ++i) { f1K << "0"; for (int j = 0; j < 1000; ++j) { f1M << "0"; } } f1K.close(); f1M.close(); } void CheckSumTest::tearDown() { remove("CheckSumTest.f1K.data"); remove("CheckSumTest.f1M.data"); } void CheckSumTest::CRC32SumTest() { CPPUNIT_ASSERT_EQUAL((std::string)"acb7ca96", Arc::CheckSumAny::FileChecksum("CheckSumTest.f1K.data", Arc::CheckSumAny::cksum)); CPPUNIT_ASSERT_EQUAL((std::string)"53a57307", Arc::CheckSumAny::FileChecksum("CheckSumTest.f1M.data", Arc::CheckSumAny::cksum)); char buf[64]; Arc::CheckSumAny ck(Arc::CheckSumAny::cksum); ck.scan("cksum:53a57307"); ck.print(buf,sizeof(buf)); CPPUNIT_ASSERT_EQUAL((std::string)"cksum:53a57307", (std::string)buf); } void CheckSumTest::MD5SumTest() { CPPUNIT_ASSERT_EQUAL((std::string)"88bb69a5d5e02ec7af5f68d82feb1f1d", Arc::CheckSumAny::FileChecksum("CheckSumTest.f1K.data")); CPPUNIT_ASSERT_EQUAL((std::string)"2f54d66538c094bf229e89ed0667b6fd", Arc::CheckSumAny::FileChecksum("CheckSumTest.f1M.data")); char buf[64]; Arc::CheckSumAny ck(Arc::CheckSumAny::md5); ck.scan("md5:2f54d66538c094bf229e89ed0667b6fd"); ck.print(buf,sizeof(buf)); CPPUNIT_ASSERT_EQUAL((std::string)"md5:2f54d66538c094bf229e89ed0667b6fd", (std::string)buf); } void CheckSumTest::Adler32SumTest() { CPPUNIT_ASSERT_EQUAL((std::string)"ad1abb81", Arc::CheckSumAny::FileChecksum("CheckSumTest.f1K.data", Arc::CheckSumAny::adler32)); CPPUNIT_ASSERT_EQUAL((std::string)"471b96e5", Arc::CheckSumAny::FileChecksum("CheckSumTest.f1M.data", Arc::CheckSumAny::adler32)); //char buf[64]; //Arc::CheckSumAny ck(Arc::CheckSumAny::adler32); //ck.scan("adler32:471b96e5"); //ck.print(buf,sizeof(buf)); //CPPUNIT_ASSERT_EQUAL((std::string)"adler32:471b96e5", (std::string)buf); } CPPUNIT_TEST_SUITE_REGISTRATION(CheckSumTest); nordugrid-arc-5.4.2/src/hed/libs/common/test/PaxHeaders.7502/LoggerTest.cpp0000644000000000000000000000012412301125744024506 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200574.844706 30 ctime=1513200658.911734176 nordugrid-arc-5.4.2/src/hed/libs/common/test/LoggerTest.cpp0000644000175000002070000000675012301125744024563 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include class LoggerTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(LoggerTest); CPPUNIT_TEST(TestLoggerINFO); CPPUNIT_TEST(TestLoggerVERBOSE); CPPUNIT_TEST(TestLoggerTHREAD); CPPUNIT_TEST(TestLoggerDEFAULT); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestLoggerINFO(); void TestLoggerVERBOSE(); void TestLoggerTHREAD(); void TestLoggerDEFAULT(); private: std::stringstream stream; std::stringstream stream_thread; Arc::LogStream *output; Arc::LogStream *output_thread; Arc::Logger *logger; static void thread(void* arg); Glib::Mutex thread_lock; }; void LoggerTest::setUp() { output = new Arc::LogStream(stream); output_thread = new Arc::LogStream(stream_thread); Arc::Logger::getRootLogger().addDestination(*output); logger = new Arc::Logger(Arc::Logger::getRootLogger(), "TestLogger", Arc::INFO); } void LoggerTest::tearDown() { Arc::Logger::getRootLogger().removeDestinations(); delete logger; delete output; } void LoggerTest::TestLoggerINFO() { std::string res; logger->msg(Arc::VERBOSE, "This VERBOSE message should not be seen"); res = stream.str(); CPPUNIT_ASSERT(res.empty()); logger->msg(Arc::INFO, "This INFO message should be seen"); res = stream.str(); res = res.substr(res.rfind(']') + 2); CPPUNIT_ASSERT_EQUAL(res, std::string("This INFO message should be seen\n")); stream.str(""); } void LoggerTest::TestLoggerVERBOSE() { std::string res; logger->setThreshold(Arc::VERBOSE); logger->msg(Arc::VERBOSE, "This VERBOSE message should now be seen"); res = stream.str(); res = res.substr(res.rfind(']') + 2); CPPUNIT_ASSERT_EQUAL(res, std::string("This VERBOSE message should now be seen\n")); stream.str(""); logger->msg(Arc::INFO, "This INFO message should also be seen"); res = stream.str(); res = res.substr(res.rfind(']') + 2); CPPUNIT_ASSERT_EQUAL(res, std::string("This INFO message should also be seen\n")); stream.str(""); } void LoggerTest::TestLoggerTHREAD() { std::string res; logger->setThreshold(Arc::VERBOSE); thread_lock.lock(); Arc::CreateThreadFunction(&thread,this); thread_lock.lock(); thread_lock.unlock(); logger->msg(Arc::VERBOSE, "This message goes to initial destination"); res = stream.str(); res = res.substr(res.rfind(']') + 2); CPPUNIT_ASSERT_EQUAL(res, std::string("This message goes to initial destination\n")); stream.str(""); res = stream_thread.str(); CPPUNIT_ASSERT(res.empty()); } void LoggerTest::thread(void* arg) { std::string res; LoggerTest& it = *((LoggerTest*)arg); Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().addDestination(*it.output_thread); it.logger->msg(Arc::VERBOSE, "This message goes to per-thread destination"); res = it.stream_thread.str(); res = res.substr(res.rfind(']') + 2); CPPUNIT_ASSERT_EQUAL(res, std::string("This message goes to per-thread destination\n")); it.stream_thread.str(""); res = it.stream.str(); CPPUNIT_ASSERT(res.empty()); it.thread_lock.unlock(); } void LoggerTest::TestLoggerDEFAULT() { Arc::LogLevel default_level = Arc::Logger::getRootLogger().getThreshold(); Arc::LogLevel bad_level = Arc::istring_to_level("COW"); CPPUNIT_ASSERT_EQUAL(bad_level, default_level); } CPPUNIT_TEST_SUITE_REGISTRATION(LoggerTest); nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/XMLNode.h0000644000000000000000000000012412222541376022370 xustar000000000000000027 mtime=1380631294.108282 27 atime=1513200574.920706 30 ctime=1513200658.828733161 nordugrid-arc-5.4.2/src/hed/libs/common/XMLNode.h0000644000175000002070000005112012222541376022434 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_XMLNODE_H__ #define __ARC_XMLNODE_H__ #include #include #include #include #include #include #include #include #include #include #include namespace Arc { /** \addtogroup common * @{ */ class XMLNode; /// Class to represent an XML namespace. /** \headerfile XMLNode.h arc/XMLNode.h */ class NS : public std::map { public: /// Constructor creates empty namespace NS(void) {} /// Constructor creates namespace with one entry NS(const char *prefix, const char *uri) { operator[](prefix) = uri; } /// Constructor creates namespace with multiple entries /** \param nslist Array made of prefix and URI pairs and must be NULL terminated */ NS(const char *nslist[][2]) { for (int n = 0; nslist[n][0]; ++n) operator[](nslist[n][0]) = nslist[n][1]; } /// Constructor creates namespace with multiple entries NS(const std::map& nslist) : std::map(nslist) {} }; typedef std::list XMLNodeList; /// Wrapper for LibXML library Tree interface. /** This class wraps XML Node, Document and Property/Attribute structures. Each instance serves as pointer to actual LibXML element and provides convenient (for chosen purpose) methods for manipulating it. This class has no special ties to LibXML library and may be easily rewritten for any XML parser which provides interface similar to LibXML Tree. It implements only small subset of XML capabilities, which is probably enough for performing most of useful actions. This class also filters out (usually) useless textual nodes which are often used to make XML documents human-readable. \headerfile XMLNode.h arc/XMLNode.h */ class XMLNode { friend bool MatchXMLName(const XMLNode& node1, const XMLNode& node2); friend bool MatchXMLName(const XMLNode& node, const char *name); friend bool MatchXMLName(const XMLNode& node, const std::string& name); friend bool MatchXMLNamespace(const XMLNode& node1, const XMLNode& node2); friend bool MatchXMLNamespace(const XMLNode& node, const char *uri); friend bool MatchXMLNamespace(const XMLNode& node, const std::string& uri); friend class XMLNodeContainer; protected: xmlNodePtr node_; /// If true node is owned by this instance - hence released in destructor. /** Normally that may be true only for top level node of XML document. */ bool is_owner_; /// This variable is reserved for future use. bool is_temporary_; /// Protected constructor for inherited classes. /** Creates instance and links to existing LibXML structure. Acquired structure is not owned by class instance. If there is need to completely pass control of LibXML document to then instance's is_owner_ variable has to be set to true. */ XMLNode(xmlNodePtr node) : node_(node), is_owner_(false), is_temporary_(false) {} /** printf-like callback for libxml */ static void LogError(void * ctx, const char * msg, ...); /** Convenience method for XML validation */ bool Validate(xmlSchemaPtr schema, std::string &err_msg); public: /// Constructor of invalid node. /** Created instance does not point to XML element. All methods are still allowed for such instance but produce no results. */ XMLNode(void) : node_(NULL), is_owner_(false), is_temporary_(false) {} /// Copies existing instance. /** Underlying XML element is NOT copied. Ownership is NOT inherited. Strictly speaking there should be no const here - but that conflicts with C++. */ XMLNode(const XMLNode& node) : node_(node.node_), is_owner_(false), is_temporary_(false) {} /// Creates XML document structure from textual representation of XML document. /** Created structure is pointed and owned by constructed instance. */ XMLNode(const std::string& xml); /// Creates XML document structure from textual representation of XML document. /** Created structure is pointed and owned by constructed instance. */ XMLNode(const char *xml, int len = -1); /// Copy constructor. Used by language bindings XMLNode(long ptr_addr); /// Creates empty XML document structure with specified namespaces. /** Created XML contains only root element named 'name'. Created structure is pointed and owned by constructed instance. */ XMLNode(const NS& ns, const char *name); /// Destructor /** Also destroys underlying XML document if owned by this instance */ ~XMLNode(void); /// Creates a copy of XML (sub)tree. /** If object does not represent whole document - top level document is created. 'node' becomes a pointer owning new XML document. */ void New(XMLNode& node) const; /// Exchanges XML (sub)trees. /** The following combinations are possible: - If both this and node are referring owned XML tree (top level node) then references are simply exchanged. This operation is fast. - If both this and node are referring to XML (sub)tree of different documents then (sub)trees are exchanged between documents. - If both this and node are referring to XML (sub)tree of same document then (sub)trees are moved inside document. The main reason for this method is to provide an effective way to insert one XML document inside another. One should take into account that if any of the exchanged nodes is top level it must be also the owner of the document. Otherwise this method will fail. If both nodes are top level owners and/or invalid nodes then this method is identical to Swap(). */ void Exchange(XMLNode& node); /// Moves content of this XML (sub)tree to node. /** This operation is similar to New() except that XML (sub)tree to referred by this is destroyed. This method is more effective than combination of New() and Destroy() because internally it is optimized not to copy data if not needed. The main purpose of this is to effectively extract part of XML document. */ void Move(XMLNode& node); /// Swaps XML (sub)trees to which this and node refer. /** For XML subtrees this method is not anyhow different then using the combination \code XMLNode tmp=*this; *this=node; node=tmp; \endcode But in case of either this or node owning XML document ownership is swapped too. And this is the main purpose of this method. */ void Swap(XMLNode& node); /// Returns true if instance points to XML element - valid instance. operator bool(void) const { return ((node_ != NULL) && (!is_temporary_)); } /// Returns true if instance does not point to XML element - invalid instance. bool operator!(void) const { return ((node_ == NULL) || is_temporary_); } /// Returns true if 'node' represents same XML element. bool operator==(const XMLNode& node) { return ((node_ == node.node_) && (node_ != NULL)); } /// Returns false if 'node' represents same XML element. bool operator!=(const XMLNode& node) { return ((node_ != node.node_) || (node_ == NULL)); } /// Returns true if 'node' represents same XML element - for bindings. bool Same(const XMLNode& node) { return operator==(node); } /// This operator is needed to avoid ambiguity. bool operator==(bool val) { return ((bool)(*this) == val); } /// This operator is needed to avoid ambiguity. bool operator!=(bool val) { return ((bool)(*this) != val); } /// This operator is needed to avoid ambiguity. bool operator==(const std::string& str) { return ((std::string)(*this) == str); } /// This operator is needed to avoid ambiguity. bool operator!=(const std::string& str) { return ((std::string)(*this) != str); } /// This operator is needed to avoid ambiguity. bool operator==(const char *str) { return ((std::string)(*this) == str); } /// This operator is needed to avoid ambiguity. bool operator!=(const char *str) { return ((std::string)(*this) != str); } /// Returns XMLNode instance representing n-th child of XML element. /** If such does not exist invalid XMLNode instance is returned */ XMLNode Child(int n = 0); /// Returns XMLNode instance representing first child element with specified name. /** Name may be "namespace_prefix:name", "namespace_uri:name" or simply "name". In last case namespace is ignored. If such node does not exist invalid XMLNode instance is returned. This method should not be marked const because obtaining unrestricted XMLNode of child element allows modification of underlying XML tree. But in order to keep const in other places non-const-handling is passed to programmer. Otherwise C++ compiler goes nuts. */ XMLNode operator[](const char *name) const; /// Returns XMLNode instance representing first child element with specified name. /** Similar to operator[](const char *name) const. */ XMLNode operator[](const std::string& name) const { return operator[](name.c_str()); } /// Returns XMLNode instance representing n-th node in sequence of siblings of same name. /** Its main purpose is to be used to retrieve an element in an array of children of the same name like node["name"][5]. This method should not be marked const because obtaining unrestricted XMLNode of child element allows modification of underlying XML tree. But in order to keep const in other places non-const-handling is passed to programmer. Otherwise C++ compiler goes nuts. */ XMLNode operator[](int n) const; /// Convenience operator to switch to next element of same name. /** If there is no such node this object becomes invalid. */ void operator++(void); /// Convenience operator to switch to previous element of same name. /** If there is no such node this object becomes invalid. */ void operator--(void); /// Returns number of children nodes. int Size(void) const; /// Same as operator[](). XMLNode Get(const std::string& name) const { return operator[](name.c_str()); } /// Returns name of XML node. std::string Name(void) const; /// Returns namespace prefix of XML node. std::string Prefix(void) const; /// Returns prefix:name of XML node. std::string FullName(void) const { return Prefix() + ":" + Name(); } /// Returns namespace URI of XML node. std::string Namespace(void) const; /// Assigns namespace prefix to XML node(s). /** The 'recursion' allows to assign prefixes recursively. Setting it to -1 allows for unlimited recursion. And 0 limits it to this node. */ void Prefix(const std::string& prefix, int recursion = 0); /// Removes namespace prefix from XML node(s). void StripNamespace(int recursion = 0); /// Assigns new name to XML node. void Name(const char *name); /// Assigns new name to XML node. void Name(const std::string& name) { Name(name.c_str()); } /// Fills argument with this instance XML subtree textual representation. void GetXML(std::string& out_xml_str, bool user_friendly = false) const; /// Get string representation of XML subtree. /** Fills out_xml_str with this instance XML subtree textual representation if the XML subtree corresponds to the encoding format specified in the argument, e.g. utf-8. */ void GetXML(std::string& out_xml_str, const std::string& encoding, bool user_friendly = false) const; /// Fills out_xml_str with whole XML document textual representation. void GetDoc(std::string& out_xml_str, bool user_friendly = false) const; /// Returns textual content of node excluding content of children nodes. operator std::string(void) const; /// Sets textual content of node. All existing children nodes are discarded. XMLNode& operator=(const char *content); /// Sets textual content of node. All existing children nodes are discarded. XMLNode& operator=(const std::string& content) { return operator=(content.c_str()); } /// Same as operator=. Used for bindings. void Set(const std::string& content) { operator=(content.c_str()); } /// Make instance refer to another XML node. Ownership is not inherited. /** Due to nature of XMLNode there should be no const here, but that does not fit into C++. */ XMLNode& operator=(const XMLNode& node); // Returns list of all attributes of node // std::list Attributes(void); /// Returns XMLNode instance reresenting n-th attribute of node. XMLNode Attribute(int n = 0); /// Returns XMLNode instance representing first attribute of node with specified by name. XMLNode Attribute(const char *name); /// Returns XMLNode instance representing first attribute of node with specified by name. XMLNode Attribute(const std::string& name) { return Attribute(name.c_str()); } /// Creates new attribute with specified name. XMLNode NewAttribute(const char *name); /// Creates new attribute with specified name. XMLNode NewAttribute(const std::string& name) { return NewAttribute(name.c_str()); } /// Returns number of attributes of node. int AttributesSize(void) const; /// Assigns namespaces of XML document at point specified by this instance. /** If namespace already exists it gets new prefix. New namespaces are added. It is useful to apply this method to XML being processed in order to refer to it's elements by known prefix. If keep is set to false existing namespace definition residing at this instance and below are removed (default behavior). If recursion is set to positive number then depth of prefix replacement is limited by this number (0 limits it to this node only). For unlimited recursion use -1. If recursion is limited then value of keep is ignored and existing namespaces are always kept. */ void Namespaces(const NS& namespaces, bool keep = false, int recursion = -1); /// Returns namespaces known at this node. NS Namespaces(void); /// Returns prefix of specified namespace or empty string if no such namespace. std::string NamespacePrefix(const char *urn); /// Creates new child XML element at specified position with specified name. /** Default is to put it at end of list. If global_order is true position applies to whole set of children, otherwise only to children of same name. Returns created node. */ XMLNode NewChild(const char *name, int n = -1, bool global_order = false); /// Same as NewChild(const char*,int,bool). XMLNode NewChild(const std::string& name, int n = -1, bool global_order = false) { return NewChild(name.c_str(), n, global_order); } /// Creates new child XML element at specified position with specified name and namespaces. /** For more information look at NewChild(const char*,int,bool) */ XMLNode NewChild(const char *name, const NS& namespaces, int n = -1, bool global_order = false); /// Same as NewChild(const char*,const NS&,int,bool). XMLNode NewChild(const std::string& name, const NS& namespaces, int n = -1, bool global_order = false) { return NewChild(name.c_str(), namespaces, n, global_order); } /// Link a copy of supplied XML node as child. /** Returns instance referring to new child. XML element is a copy of supplied one but not owned by returned instance */ XMLNode NewChild(const XMLNode& node, int n = -1, bool global_order = false); /// Makes a copy of supplied XML node and makes this instance refer to it. void Replace(const XMLNode& node); /// Destroys underlying XML element. /** XML element is unlinked from XML tree and destroyed. After this operation XMLNode instance becomes invalid */ void Destroy(void); /// Collects nodes corresponding to specified path. /** This is a convenience function to cover common use of XPath but without performance hit. Path is made of node_name[/node_name[...]] and is relative to current node. node_names are treated in same way as in operator[]. \return all nodes which are represented by path. */ XMLNodeList Path(const std::string& path); /// Uses XPath to look up XML tree. /** Returns a list of XMLNode points. The xpathExpr should be like "//xx:child1/" which indicates the namespace and node that you would like to find. The nsList contains namespaces used by the xpathExpr. Query is run on whole XML document but only the elements belonging to this XML subtree are returned. Please note, that default namespaces - without prefix - are not fully supported. So xpathExpr without properly defined namespace prefixes will only work for XML documents without namespaces. */ XMLNodeList XPathLookup(const std::string& xpathExpr, const NS& nsList); /// Get the root node from any child node of the tree. XMLNode GetRoot(void); /// Get the parent node from any child node of the tree. XMLNode Parent(void); /// Save string representation of node to file. bool SaveToFile(const std::string& file_name) const; /// Save string representation of node to stream. bool SaveToStream(std::ostream& out) const; /// Read XML document from file and associate it with this node. bool ReadFromFile(const std::string& file_name); /// Read XML document from stream and associate it with this node. bool ReadFromStream(std::istream& in); // Remove all eye-candy information leaving only informational parts // void Purify(void);. /// XML schema validation against the schema file defined as argument. bool Validate(const std::string &schema_file, std::string &err_msg); /** XML schema validation against the schema XML document defined as argument */ bool Validate(XMLNode schema_doc, std::string &err_msg); }; /// Write XMLNode to output stream. std::ostream& operator<<(std::ostream& out, const XMLNode& node); /// Read into XMLNode from input stream. std::istream& operator>>(std::istream& in, XMLNode& node); /// Container for multiple XMLNode elements. /** \headerfile XMLNode.h arc/XMLNode.h */ class XMLNodeContainer { private: std::vector nodes_; public: /// Default constructor. XMLNodeContainer(void); /// Copy constructor. /** Add nodes from argument. Nodes owning XML document are copied using AddNew(). Not owning nodes are linked using Add() method. */ XMLNodeContainer(const XMLNodeContainer&); ~XMLNodeContainer(void); /// Same as copy constructor with current nodes being deleted first. XMLNodeContainer& operator=(const XMLNodeContainer&); /// Link XML subtree refered by node to container. /** XML tree must be available as long as this object is used. */ void Add(const XMLNode&); /// Link multiple XML subtrees to container. void Add(const std::list&); /// Copy XML subtree referenced by node to container. /** After this operation container refers to independent XML document. This document is deleted when container is destroyed. */ void AddNew(const XMLNode&); /// Copy multiple XML subtrees to container. void AddNew(const std::list&); /// Return number of refered/stored nodes. int Size(void) const; /// Returns n-th node in a store. XMLNode operator[](int); /// Returns all stored nodes. std::list Nodes(void); }; /// Returns true if underlying XML elements have same names. bool MatchXMLName(const XMLNode& node1, const XMLNode& node2); /// Returns true if 'name' matches name of 'node'. If name contains prefix it's checked too. bool MatchXMLName(const XMLNode& node, const char *name); /// Returns true if 'name' matches name of 'node'. If name contains prefix it's checked too. bool MatchXMLName(const XMLNode& node, const std::string& name); /// Returns true if underlying XML elements belong to same namespaces. bool MatchXMLNamespace(const XMLNode& node1, const XMLNode& node2); /// Returns true if 'namespace' matches 'node's namespace.. bool MatchXMLNamespace(const XMLNode& node, const char *uri); /// Returns true if 'namespace' matches 'node's namespace.. bool MatchXMLNamespace(const XMLNode& node, const std::string& uri); /** @} */ } // namespace Arc #endif /* __ARC_XMLNODE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/win32.h0000644000000000000000000000012412075536170022066 xustar000000000000000027 mtime=1358347384.377177 27 atime=1513200574.899706 30 ctime=1513200658.839733295 nordugrid-arc-5.4.2/src/hed/libs/common/win32.h0000644000175000002070000001060212075536170022132 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- // Disable doxygen as this is not part of the ARC API /// \cond #ifndef __ARC_WIN32_H__ #define __ARC_WIN32_H__ #define NOGDI #define WINVER 0x0501 /* we support XP or higher */ #define WIN32_LEAN_AND_MEAN #include #include #include #undef USE_WINSOCK #define USE_WINSOCK 2 #include #include /* Windows redefines CreateDirectory in winbase.h */ #ifdef CreateDirectory #undef CreateDirectory #endif #define SIGPIPE 13 #define SIGTTIN 21 #define SIGTTOU 22 #define sleep(x) Sleep((x) * 1000) inline int usleep(int x) { Sleep((x + 999) / 1000); return 0; } #ifndef HAVE_MKSTEMP #ifdef HAVE_MKTEMP inline int mkstemp(char *pattern) { return mktemp(pattern) != '\0'; }; #endif #endif //#define mkdir(x, y) mkdir((x)) //#define lstat stat // no windows functions #define chown(x, y, z) (0) #define lchown(x, y, z) (0) #define fchown(x, y, z) (0) #define symlink(x, y) (-1) //#define link(x, y) (-1) //#define readlink(x, y, z) (-1) #define getuid() (0) #define getgid() (0) // Socket errors are prefixed WSA in winsock2.h #define EWOULDBLOCK WSAEWOULDBLOCK /* Operation would block */ #define EINPROGRESS WSAEINPROGRESS /* Operation now in progress */ #define EALREADY WSAEALREADY /* Operation already in progress */ #define ENOTSOCK WSAENOTSOCK /* Socket operation on non-socket */ #define EDESTADDRREQ WSAEDESTADDRREQ /* Destination address required */ #define EMSGSIZE WSAEMSGSIZE /* Message too long */ #define EPROTOTYPE WSAEPROTOTYPE /* Protocol wrong type for socket */ #define ENOPROTOOPT WSAENOPROTOOPT /* Protocol not available */ #define EPROTONOSUPPORT WSAEPROTONOSUPPORT /* Protocol not supported */ #define ESOCKTNOSUPPORT WSAESOCKTNOSUPPORT /* Socket type not supported */ #define EOPNOTSUPP WSAEOPNOTSUPP /* Operation not supported on transport endpoint */ #define EPFNOSUPPORT WSAEPFNOSUPPORT /* Protocol family not supported */ #define EAFNOSUPPORT WSAEAFNOSUPPORT /* Address family not supported by protocol */ #define EADDRINUSE WSAEADDRINUSE /* Address already in use */ #define EADDRNOTAVAIL WSAEADDRNOTAVAIL /* Cannot assign requested address */ #define ENETDOWN WSAENETDOWN /* Network is down */ #define ENETUNREACH WSAENETUNREACH /* Network is unreachable */ #define ENETRESET WSAENETRESET /* Network dropped connection because of reset */ #define ECONNABORTED WSAECONNABORTED /* Software caused connection abort */ #define ECONNRESET WSAECONNRESET /* Connection reset by peer */ #define ENOBUFS WSAENOBUFS /* No buffer space available */ #define EISCONN WSAEISCONN /* Transport endpoint is already connected */ #define ENOTCONN WSAENOTCONN /* Transport endpoint is not connected */ #define ESHUTDOWN WSAESHUTDOWN /* Cannot send after transport endpoint shutdown */ #define ETOOMANYREFS WSAETOOMANYREFS /* Too many references: cannot splice */ #define ETIMEDOUT WSAETIMEDOUT /* Connection timed out */ #define ECONNREFUSED WSAECONNREFUSED /* Connection refused */ #define ELOOP WSAELOOP /* Too many symbolic links encountered */ #define ENAMETOOLONG WSAENAMETOOLONG /* File name too long */ #define EHOSTDOWN WSAEHOSTDOWN /* Host is down */ #define EHOSTUNREACH WSAEHOSTUNREACH /* No route to host */ #define EUSERS WSAEUSERS /* Too many users */ #define EDQUOT WSAEDQUOT /* Quota exceeded */ #define ESTALE WSAESTALE /* Stale NFS file handle */ #define EREMOTE WSAEREMOTE /* Object is remote */ inline ssize_t readlink(const char *path, char *buf, size_t bufsiz) { return -1; }; #if defined(__cplusplus) #include #include inline int mkdir(const char *pathname, mode_t mode) { return ::mkdir(pathname); } #endif inline int link(const char *oldpath, const char *newpath) { return -1; }; #if defined(__cplusplus) #include inline int lstat(const char *path, struct stat *buf) { return ::stat(path,buf); }; #endif // pwd.h does not exist on windows struct passwd { char *pw_name; char *pw_passwd; int pw_uid; int pw_gid; char *pw_age; char *pw_comment; char *pw_gecos; char *pw_dir; char *pw_shell; }; #endif /// \endcond nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/win32.cpp0000644000000000000000000000012411571010512022404 xustar000000000000000027 mtime=1306792266.481566 27 atime=1513200574.864706 30 ctime=1513200658.870733674 nordugrid-arc-5.4.2/src/hed/libs/common/win32.cpp0000644000175000002070000000116111571010512022450 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "win32.h" std::string GetOsErrorMessage(void) { std::string rv; LPVOID lpMsgBuf; if (FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language (LPTSTR)&lpMsgBuf, 0, NULL)) rv.assign(reinterpret_cast(lpMsgBuf)); else rv.assign("FormatMessage API failed"); LocalFree(lpMsgBuf); return rv; } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/MysqlWrapper.h0000644000000000000000000000012412111140470023553 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.922706 30 ctime=1513200658.839733295 nordugrid-arc-5.4.2/src/hed/libs/common/MysqlWrapper.h0000644000175000002070000000406012111140470023620 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_MYSQLWRAPPER_H__ #define __ARC_MYSQLWRAPPER_H__ #include #include #include #include #include namespace Arc { /// Implements a MySQL version of the Database interface. /** \ingroup common * \headerfile MysqlWrapper.h arc/MysqlWrapper.h */ class MySQLDatabase : public Database { friend class MySQLQuery; public: MySQLDatabase(std::string& server, int port); MySQLDatabase(const MySQLDatabase& other); virtual ~MySQLDatabase(); virtual bool connect(std::string& dbname, std::string& user, std::string& password); virtual bool isconnected() const { return is_connected; } virtual void close(); virtual bool enable_ssl(const std::string& keyfile = "", const std::string& certfile = "", const std::string& cafile = "", const std::string& capath = ""); virtual bool shutdown(); private: bool is_connected; std::string server_; int port_; std::string dbname_; std::string user_; std::string password_; MYSQL *mysql; }; /// Implements a MySQL version of the Query database query class. /** \ingroup common * \headerfile MysqlWrapper.h arc/MysqlWrapper.h */ class MySQLQuery : public Query { public: MySQLQuery(Database *db); //MySQLQuery(Database* db, const std::string& sqlstr); virtual ~MySQLQuery(); virtual int get_num_colums(); virtual int get_num_rows(); virtual bool execute(const std::string& sqlstr); virtual QueryRowResult get_row(int row_number) const; virtual QueryRowResult get_row() const; virtual std::string get_row_field(int row_number, std::string& field_name); virtual bool get_array(std::string& sqlstr, QueryArrayResult& result, std::vector& arguments); private: MySQLDatabase *db_; MYSQL_RES *res; int num_rows; int num_colums; std::map field_names; }; } // namespace Arc #endif /* __ARC_MYSQLWRAPPER_H__ */ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/User.h0000644000000000000000000000012412720550513022034 xustar000000000000000027 mtime=1463996747.958365 27 atime=1513200574.885706 30 ctime=1513200658.824733112 nordugrid-arc-5.4.2/src/hed/libs/common/User.h0000644000175000002070000001003312720550513022076 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_USER_H__ #define __ARC_USER_H__ #include #include struct passwd; namespace Arc { /// Platform independent representation of system user /** \ingroup common * \headerfile User.h arc/User.h */ class User { private: // local name, home directory, uid and gid of this user std::string name; std::string home; int uid; int gid; bool valid; bool set(const struct passwd*); public: /// Construct user from current process owner. User(); /// Construct user from username and optional group name. /** If group is not specified it is determined automatically. */ User(const std::string& name, const std::string& group=""); /// Construct user from uid and optional gid. /** If gid is not specified it is determined automatically. */ User(int uid, int gid=-1); /// Returns true if this is a valid user. operator bool() const { return valid; } /// Returns true is this is not a valid user. bool operator !() const { return !valid; } /// Returns the name of this user. const std::string& Name(void) const { return name; } /// Returns the path to the user's home directory. const std::string& Home(void) const { return home; } /// Returns the user's uid. int get_uid(void) const { return (int)uid; } /// Returns the user's gid. int get_gid(void) const { return (int)gid; } /// Returns true if this User's name is the same as n. bool operator==(const std::string& n) { return (n == name); } /// Check if this User has the rights specified by flags on the given path. /** \return 0 if User has the rights */ int check_file_access(const std::string& path, int flags) const; /// Change the owner of the current process. /** Internally this method calls setuid() and setgid() with this User's values. It can be used in the initializer of Arc::Run to switch the owner of a child process just after fork(). To temporarily change the owner of a thread in a multi-threaded environment UserSwitch should be used instead. \return true if switch succeeded. */ bool SwitchUser() const; }; // class User /// Class for temporary switching of user id. /** If this class is created, the user identity is switched to the provided uid and gid. Due to an internal lock there will be only one valid instance of this class. Any attempt to create another instance will block until the first one is destroyed. If uid and gid are set to 0 then the user identity is not switched, but the lock is applied anyway. The lock has a dual purpose. The first and most important is to protect communication with the underlying operating system which may depend on user identity. For that it is advisable for code which talks to the operating system to acquire a valid instance of this class. Care must be taken not to hold that instance too long as that may block other code in a multithreaded environment. The other purpose of this lock is to provide a workaround for a glibc bug in __nptl_setxid. This bug causes lockup of seteuid() function if racing with fork. To avoid this problem the lock mentioned above is used by the Run class while spawning a new process. \ingroup common \headerfile User.h arc/User.h */ class UserSwitch { private: static SimpleCondition suid_lock; static int suid_count; static int suid_uid_orig; static int suid_gid_orig; bool valid; public: /// Switch uid and gid. UserSwitch(int uid,int gid); /// Switch back to old uid and gid and release lock on this class. ~UserSwitch(void); /// Returns true if switching user succeeded. operator bool(void) { return valid; }; /// This method to be called after fork in child /// process to avoid deadlock on global condition. void resetPostFork(void); }; // class UserSwitch } // namespace Arc #endif nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcConfigFile.h0000644000000000000000000000012412771223617023561 xustar000000000000000027 mtime=1474635663.023405 27 atime=1513200574.892706 30 ctime=1513200658.807732904 nordugrid-arc-5.4.2/src/hed/libs/common/ArcConfigFile.h0000644000175000002070000000220212771223617023622 0ustar00mockbuildmock00000000000000#ifndef __ARC_CONFIG_FILE_H__ #define __ARC_CONFIG_FILE_H__ #include #include #include #include #include #include namespace Arc { class ConfigFile: public std::ifstream { public: /// Default constructor creates object not associated with file. ConfigFile(void) { }; /// Constructor creates object associated with file located at path. ConfigFile(const std::string &path) { open(path); }; /// Open/assign configuration file located at path to this object. bool open(const std::string &path); /// Closes configuration file. bool close(void); /// Read one line of configuration file. /// Returns string containing whole line. std::string read_line(); /// Helper function to read one line of configuration from provided stream. static std::string read_line(std::istream& stream); /// Recognizable configuration file types. typedef enum { file_XML, file_INI, file_unknown } file_type; /// Detect type of currently associated configuration file. file_type detect(void); }; } // namespace Arc #endif // __ARC_CONFIG_FILE_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcConfig.h0000644000000000000000000000012412771223617022761 xustar000000000000000027 mtime=1474635663.023405 27 atime=1513200574.892706 30 ctime=1513200658.803732855 nordugrid-arc-5.4.2/src/hed/libs/common/ArcConfig.h0000644000175000002070000001327712771223617023040 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_CONFIG_H__ #define __ARC_CONFIG_H__ #include #include #include #include namespace Arc { /// Configuration element - represents (sub)tree of ARC XML configuration. /** This class is intended to be used to pass configuration details to various parts of HED and external modules. Currently it's just a wrapper over XML tree. But that may change in the future, although the interface should be preserved. Currently it is capable of loading an XML configuration document from a file. In future it will be capable of loading a more user-readable format and processing it into a tree-like structure convenient for machine processing (XML-like). So far there are no schema and/or namespaces assigned. \ingroup common \headerfile ArcConfig.h arc/ArcConfig.h */ class Config : public XMLNode { private: std::string file_name_; public: /// Creates empty XML tree Config() : XMLNode(NS(), "ArcConfig") {} /// Creates an empty configuration object with the given namespace Config(const NS& ns) : XMLNode(ns, "ArcConfig") {} /// Loads configuration document from file at filename Config(const char *filename); /// Parse configuration document from string Config(const std::string& xml_str) : XMLNode(xml_str) {} /// Acquire existing XML (sub)tree. /** Content is not copied. Make sure XML tree is not destroyed while in use by this object. */ Config(XMLNode xml) : XMLNode(xml) {} /// Acquire existing XML (sub)tree and set config file. /** Content is not copied. Make sure XML tree is not destroyed while in use by this object. */ Config(XMLNode xml, const std::string& filename) : XMLNode(xml) { file_name_ = filename; } ~Config(void); /// Copy constructor used by language bindings Config(long cfg_ptr_addr); /// Copy constructor used by language bindings Config(const Config& cfg); /// Print structure of document for debugging purposes. /** Printed content is not an XML document. */ void print(void); /// Parse configuration document from file at filename bool parse(const char *filename); /// Returns file name of config file or empty string if it was generated from the XMLNode subtree const std::string& getFileName(void) const { return file_name_; } /// Set the file name of config file void setFileName(const std::string& filename) { file_name_ = filename; } /// Save config to file void save(const char *filename); /// Helper method for processing element value. /** Extracts sub-element ename from pnode and tries to convert it into boolean. In case of success returns true and fills result into val reference. */ static bool elementtobool(Arc::XMLNode pnode,const char* ename,bool& val); /// Helper method for processing element value. /** Extracts sub-element ename from pnode and tries to convert it into enumeration defined by opts. In case of success returns true and fills result into val reference. */ static bool elementtoenum(Arc::XMLNode pnode,const char* ename,int& val,const char* const opts[]); /// Helper method for processing element value. /** Extracts sub-element ename from pnode and tries to convert it into integer number. In case of success returns true and fills result into val reference. */ template static bool elementtoint(Arc::XMLNode pnode,const char* ename,T& val) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default return Arc::stringto(v,val); } }; /// Configuration for client interface. /** It contains information which can't be expressed in class constructor arguments. Most probably common things like software installation location, identity of user, etc. \ingroup common \headerfile ArcConfig.h arc/ArcConfig.h */ class BaseConfig { protected: /// List of file system paths to ARC plugin files std::list plugin_paths; public: /// Credential stored as string /** * \since Added in 4.0.0. **/ std::string credential; /// Path to private key std::string key; /// Path to certificate std::string cert; /// Path to proxy certificate std::string proxy; /// Path to CA certificate std::string cafile; /// Path to directory of CA certificates std::string cadir; /// Configuration overlay XMLNode overlay; /// Construct new BaseConfig. Plugin paths are determined automatically. BaseConfig(); virtual ~BaseConfig() {} /// Adds non-standard location of plugins void AddPluginsPath(const std::string& path); /// Add credential string /** * \since Added in 4.0.0. **/ void AddCredential(const std::string& cred); /// Add private key void AddPrivateKey(const std::string& path); /// Add certificate void AddCertificate(const std::string& path); /// Add credentials proxy void AddProxy(const std::string& path); /// Add CA file void AddCAFile(const std::string& path); /// Add CA directory void AddCADir(const std::string& path); /// Add configuration overlay void AddOverlay(XMLNode cfg); /// Read overlay from file void GetOverlay(std::string fname); /// Adds plugin configuration into common configuration tree supplied in 'cfg' argument. /** \return reference to XML node representing configuration of ModuleManager */ virtual XMLNode MakeConfig(XMLNode cfg) const; }; } // namespace Arc #endif /* __ARC_CONFIG_H__ */ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Profile.h0000644000000000000000000000012412111140470022505 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.920706 30 ctime=1513200658.834733234 nordugrid-arc-5.4.2/src/hed/libs/common/Profile.h0000644000175000002070000000131512111140470022552 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_PROFILE_H__ #define __ARC_PROFILE_H__ #include #include #include #include #include namespace Arc { /// Class used to convert human-friendly ini-style configuration to XML. /** \ingroup common * \headerfile Profile.h arc/Profile.h */ class Profile : public XMLNode { public: /// Create a new profile with the given profile file Profile(const std::string& filename); ~Profile(); /// Evaluate the given ini-style configuration against the current profile. void Evaluate(Config &cfg, IniConfig ini); }; } // namespace Arc #endif /* __ARC_PROFILE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/CheckSum.cpp0000644000000000000000000000012311745215456023164 xustar000000000000000027 mtime=1335171886.306924 27 atime=1513200574.953707 29 ctime=1513200658.85073343 nordugrid-arc-5.4.2/src/hed/libs/common/CheckSum.cpp0000644000175000002070000004042511745215456023237 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #ifdef WIN32 typedef unsigned char u_char; typedef unsigned int u_int; #endif // ---------------------------------------------------------------------------- // This is CRC(32bit) implementation as in 'cksum' utility // ---------------------------------------------------------------------------- // g=0x0000000104C11DB7LL; static uint32_t gtable[256] = { 0x00000000, 0x04C11DB7, 0x09823B6E, 0x0D4326D9, 0x130476DC, 0x17C56B6B, 0x1A864DB2, 0x1E475005, 0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6, 0x2B4BCB61, 0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD, 0x4C11DB70, 0x48D0C6C7, 0x4593E01E, 0x4152FDA9, 0x5F15ADAC, 0x5BD4B01B, 0x569796C2, 0x52568B75, 0x6A1936C8, 0x6ED82B7F, 0x639B0DA6, 0x675A1011, 0x791D4014, 0x7DDC5DA3, 0x709F7B7A, 0x745E66CD, 0x9823B6E0, 0x9CE2AB57, 0x91A18D8E, 0x95609039, 0x8B27C03C, 0x8FE6DD8B, 0x82A5FB52, 0x8664E6E5, 0xBE2B5B58, 0xBAEA46EF, 0xB7A96036, 0xB3687D81, 0xAD2F2D84, 0xA9EE3033, 0xA4AD16EA, 0xA06C0B5D, 0xD4326D90, 0xD0F37027, 0xDDB056FE, 0xD9714B49, 0xC7361B4C, 0xC3F706FB, 0xCEB42022, 0xCA753D95, 0xF23A8028, 0xF6FB9D9F, 0xFBB8BB46, 0xFF79A6F1, 0xE13EF6F4, 0xE5FFEB43, 0xE8BCCD9A, 0xEC7DD02D, 0x34867077, 0x30476DC0, 0x3D044B19, 0x39C556AE, 0x278206AB, 0x23431B1C, 0x2E003DC5, 0x2AC12072, 0x128E9DCF, 0x164F8078, 0x1B0CA6A1, 0x1FCDBB16, 0x018AEB13, 0x054BF6A4, 0x0808D07D, 0x0CC9CDCA, 0x7897AB07, 0x7C56B6B0, 0x71159069, 0x75D48DDE, 0x6B93DDDB, 0x6F52C06C, 0x6211E6B5, 0x66D0FB02, 0x5E9F46BF, 0x5A5E5B08, 0x571D7DD1, 0x53DC6066, 0x4D9B3063, 0x495A2DD4, 0x44190B0D, 0x40D816BA, 0xACA5C697, 0xA864DB20, 0xA527FDF9, 0xA1E6E04E, 0xBFA1B04B, 0xBB60ADFC, 0xB6238B25, 0xB2E29692, 0x8AAD2B2F, 0x8E6C3698, 0x832F1041, 0x87EE0DF6, 0x99A95DF3, 0x9D684044, 0x902B669D, 0x94EA7B2A, 0xE0B41DE7, 0xE4750050, 0xE9362689, 0xEDF73B3E, 0xF3B06B3B, 0xF771768C, 0xFA325055, 0xFEF34DE2, 0xC6BCF05F, 0xC27DEDE8, 0xCF3ECB31, 0xCBFFD686, 0xD5B88683, 0xD1799B34, 0xDC3ABDED, 0xD8FBA05A, 0x690CE0EE, 0x6DCDFD59, 0x608EDB80, 0x644FC637, 0x7A089632, 0x7EC98B85, 0x738AAD5C, 0x774BB0EB, 0x4F040D56, 0x4BC510E1, 0x46863638, 0x42472B8F, 0x5C007B8A, 0x58C1663D, 0x558240E4, 0x51435D53, 0x251D3B9E, 0x21DC2629, 0x2C9F00F0, 0x285E1D47, 0x36194D42, 0x32D850F5, 0x3F9B762C, 0x3B5A6B9B, 0x0315D626, 0x07D4CB91, 0x0A97ED48, 0x0E56F0FF, 0x1011A0FA, 0x14D0BD4D, 0x19939B94, 0x1D528623, 0xF12F560E, 0xF5EE4BB9, 0xF8AD6D60, 0xFC6C70D7, 0xE22B20D2, 0xE6EA3D65, 0xEBA91BBC, 0xEF68060B, 0xD727BBB6, 0xD3E6A601, 0xDEA580D8, 0xDA649D6F, 0xC423CD6A, 0xC0E2D0DD, 0xCDA1F604, 0xC960EBB3, 0xBD3E8D7E, 0xB9FF90C9, 0xB4BCB610, 0xB07DABA7, 0xAE3AFBA2, 0xAAFBE615, 0xA7B8C0CC, 0xA379DD7B, 0x9B3660C6, 0x9FF77D71, 0x92B45BA8, 0x9675461F, 0x8832161A, 0x8CF30BAD, 0x81B02D74, 0x857130C3, 0x5D8A9099, 0x594B8D2E, 0x5408ABF7, 0x50C9B640, 0x4E8EE645, 0x4A4FFBF2, 0x470CDD2B, 0x43CDC09C, 0x7B827D21, 0x7F436096, 0x7200464F, 0x76C15BF8, 0x68860BFD, 0x6C47164A, 0x61043093, 0x65C52D24, 0x119B4BE9, 0x155A565E, 0x18197087, 0x1CD86D30, 0x029F3D35, 0x065E2082, 0x0B1D065B, 0x0FDC1BEC, 0x3793A651, 0x3352BBE6, 0x3E119D3F, 0x3AD08088, 0x2497D08D, 0x2056CD3A, 0x2D15EBE3, 0x29D4F654, 0xC5A92679, 0xC1683BCE, 0xCC2B1D17, 0xC8EA00A0, 0xD6AD50A5, 0xD26C4D12, 0xDF2F6BCB, 0xDBEE767C, 0xE3A1CBC1, 0xE760D676, 0xEA23F0AF, 0xEEE2ED18, 0xF0A5BD1D, 0xF464A0AA, 0xF9278673, 0xFDE69BC4, 0x89B8FD09, 0x8D79E0BE, 0x803AC667, 0x84FBDBD0, 0x9ABC8BD5, 0x9E7D9662, 0x933EB0BB, 0x97FFAD0C, 0xAFB010B1, 0xAB710D06, 0xA6322BDF, 0xA2F33668, 0xBCB4666D, 0xB8757BDA, 0xB5365D03, 0xB1F740B4 }; namespace Arc { CRC32Sum::CRC32Sum(void) { start(); } void CRC32Sum::start(void) { r = 0; count = 0; computed = false; } void CRC32Sum::add(void *buf, unsigned long long int len) { for (unsigned long long int i = 0; i < len; i++) { unsigned char c = (r >> 24); r = ((r << 8) | ((unsigned char*)buf)[i]) ^ gtable[c]; } count += len; } void CRC32Sum::end(void) { if (computed) return; unsigned long long l = count; for (; l;) { unsigned char c = (l & 0xFF); ((CheckSum*)this)->add(&c, 1); l >>= 8; } uint32_t u = 0; ((CheckSum*)this)->add(&u, 4); r = ((~r) & 0xFFFFFFFF); computed = true; } int CRC32Sum::print(char *buf, int len) const { if (!computed) { if (len > 0) buf[0] = 0; return 0; } return snprintf(buf, len, "cksum:%08x", r); } void CRC32Sum::scan(const char *buf) { computed = false; int l; if (strncasecmp("cksum:", buf, 6) == 0) { unsigned long long rr; // for compatibilty with bug in 0.4 l = sscanf(buf + 6, "%llx", &rr); r = rr; } else { int i; l = 0; for (i = 0; buf[i]; i++) if (!isdigit(buf[i])) break; if (!(buf[i])) l = sscanf(buf, "%u", &r); else { for (i = 0; buf[i]; i++) if (!isxdigit(buf[i])) break; if (!(buf[i])) { unsigned long long rr; l = sscanf(buf, "%llx", &rr); r = rr; } } } if (l != 1) return; computed = true; return; } // ---------------------------------------------------------------------------- // This is MD5 implementation derived directly from RFC // ---------------------------------------------------------------------------- #define F(X, Y, Z) (((X)&(Y)) | ((~(X)) & (Z))) #define G(X, Y, Z) (((X)&(Z)) | ((Y)&(~(Z)))) #define H(X, Y, Z) ((X) ^ (Y) ^ (Z)) #define I(X, Y, Z) ((Y) ^ ((X) | (~(Z)))) #define OP1(a, b, c, d, k, s, i) { \ uint32_t t = ((a) + F(b, c, d) + X[k] + T[i - 1]); \ (a) = (b) + (((t) << (s)) | ((t) >> (32 - s))); \ } #define OP2(a, b, c, d, k, s, i) { \ uint32_t t = ((a) + G(b, c, d) + X[k] + T[i - 1]); \ (a) = (b) + (((t) << (s)) | ((t) >> (32 - s))); \ } #define OP3(a, b, c, d, k, s, i) { \ uint32_t t = ((a) + H(b, c, d) + X[k] + T[i - 1]); \ (a) = (b) + (((t) << (s)) | ((t) >> (32 - s))); \ } #define OP4(a, b, c, d, k, s, i) { \ uint32_t t = ((a) + I(b, c, d) + X[k] + T[i - 1]); \ (a) = (b) + (((t) << (s)) | ((t) >> (32 - s))); \ } #define A_INIT (0x67452301) #define B_INIT (0xefcdab89) #define C_INIT (0x98badcfe) #define D_INIT (0x10325476) static uint32_t T[64] = { 3614090360U, 3905402710U, 606105819U, 3250441966U, 4118548399U, 1200080426U, 2821735955U, 4249261313U, 1770035416U, 2336552879U, 4294925233U, 2304563134U, 1804603682U, 4254626195U, 2792965006U, 1236535329U, 4129170786U, 3225465664U, 643717713U, 3921069994U, 3593408605U, 38016083U, 3634488961U, 3889429448U, 568446438U, 3275163606U, 4107603335U, 1163531501U, 2850285829U, 4243563512U, 1735328473U, 2368359562U, 4294588738U, 2272392833U, 1839030562U, 4259657740U, 2763975236U, 1272893353U, 4139469664U, 3200236656U, 681279174U, 3936430074U, 3572445317U, 76029189U, 3654602809U, 3873151461U, 530742520U, 3299628645U, 4096336452U, 1126891415U, 2878612391U, 4237533241U, 1700485571U, 2399980690U, 4293915773U, 2240044497U, 1873313359U, 4264355552U, 2734768916U, 1309151649U, 4149444226U, 3174756917U, 718787259U, 3951481745U }; MD5Sum::MD5Sum(void) { // for(u_int i = 1;i<=64;i++) T[i-1]=(uint32_t)(4294967296LL*fabs(sin(i))); start(); } void MD5Sum::start(void) { A = A_INIT; B = B_INIT; C = C_INIT; D = D_INIT; count = 0; Xlen = 0; memset(X,0,sizeof(X)); computed = false; } void MD5Sum::add(void *buf, unsigned long long int len) { u_char *buf_ = (u_char*)buf; for (; len;) { for(;Xlen < 64;) { // 16 words = 64 bytes if(!len) break; u_int Xi = Xlen >> 2; u_int Xs = (Xlen & 3) << 3; X[Xi] |= ((uint32_t)(*buf_)) << Xs; ++Xlen; ++count; --len; ++buf_; } if (Xlen < 64) return; uint32_t AA = A; uint32_t BB = B; uint32_t CC = C; uint32_t DD = D; OP1(A, B, C, D, 0, 7, 1); OP1(D, A, B, C, 1, 12, 2); OP1(C, D, A, B, 2, 17, 3); OP1(B, C, D, A, 3, 22, 4); OP1(A, B, C, D, 4, 7, 5); OP1(D, A, B, C, 5, 12, 6); OP1(C, D, A, B, 6, 17, 7); OP1(B, C, D, A, 7, 22, 8); OP1(A, B, C, D, 8, 7, 9); OP1(D, A, B, C, 9, 12, 10); OP1(C, D, A, B, 10, 17, 11); OP1(B, C, D, A, 11, 22, 12); OP1(A, B, C, D, 12, 7, 13); OP1(D, A, B, C, 13, 12, 14); OP1(C, D, A, B, 14, 17, 15); OP1(B, C, D, A, 15, 22, 16); OP2(A, B, C, D, 1, 5, 17); OP2(D, A, B, C, 6, 9, 18); OP2(C, D, A, B, 11, 14, 19); OP2(B, C, D, A, 0, 20, 20); OP2(A, B, C, D, 5, 5, 21); OP2(D, A, B, C, 10, 9, 22); OP2(C, D, A, B, 15, 14, 23); OP2(B, C, D, A, 4, 20, 24); OP2(A, B, C, D, 9, 5, 25); OP2(D, A, B, C, 14, 9, 26); OP2(C, D, A, B, 3, 14, 27); OP2(B, C, D, A, 8, 20, 28); OP2(A, B, C, D, 13, 5, 29); OP2(D, A, B, C, 2, 9, 30); OP2(C, D, A, B, 7, 14, 31); OP2(B, C, D, A, 12, 20, 32); OP3(A, B, C, D, 5, 4, 33); OP3(D, A, B, C, 8, 11, 34); OP3(C, D, A, B, 11, 16, 35); OP3(B, C, D, A, 14, 23, 36); OP3(A, B, C, D, 1, 4, 37); OP3(D, A, B, C, 4, 11, 38); OP3(C, D, A, B, 7, 16, 39); OP3(B, C, D, A, 10, 23, 40); OP3(A, B, C, D, 13, 4, 41); OP3(D, A, B, C, 0, 11, 42); OP3(C, D, A, B, 3, 16, 43); OP3(B, C, D, A, 6, 23, 44); OP3(A, B, C, D, 9, 4, 45); OP3(D, A, B, C, 12, 11, 46); OP3(C, D, A, B, 15, 16, 47); OP3(B, C, D, A, 2, 23, 48); OP4(A, B, C, D, 0, 6, 49); OP4(D, A, B, C, 7, 10, 50); OP4(C, D, A, B, 14, 15, 51); OP4(B, C, D, A, 5, 21, 52); OP4(A, B, C, D, 12, 6, 53); OP4(D, A, B, C, 3, 10, 54); OP4(C, D, A, B, 10, 15, 55); OP4(B, C, D, A, 1, 21, 56); OP4(A, B, C, D, 8, 6, 57); OP4(D, A, B, C, 15, 10, 58); OP4(C, D, A, B, 6, 15, 59); OP4(B, C, D, A, 13, 21, 60); OP4(A, B, C, D, 4, 6, 61); OP4(D, A, B, C, 11, 10, 62); OP4(C, D, A, B, 2, 15, 63); OP4(B, C, D, A, 9, 21, 64); A += AA; B += BB; C += CC; D += DD; Xlen = 0; memset(X,0,sizeof(X)); } } void MD5Sum::end(void) { if (computed) return; // pad uint64_t l = 8 * count; // number of bits u_char c = 0x80; add(&c, 1); c = 0; for (; Xlen != 56;) add(&c, 1); //add(&l, 8); c = (u_char)(l>>0); add(&c, 1); c = (u_char)(l>>8); add(&c, 1); c = (u_char)(l>>16); add(&c, 1); c = (u_char)(l>>24); add(&c, 1); c = (u_char)(l>>32); add(&c, 1); c = (u_char)(l>>40); add(&c, 1); c = (u_char)(l>>48); add(&c, 1); c = (u_char)(l>>56); add(&c, 1); computed = true; } int MD5Sum::print(char *buf, int len) const { if (!computed) { if (len > 0) buf[0] = 0; return 0; } return snprintf(buf, len, "md5:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", (u_char)(A>>0), (u_char)(A>>8), (u_char)(A>>16), (u_char)(A>>24), (u_char)(B>>0), (u_char)(B>>8), (u_char)(B>>16), (u_char)(B>>24), (u_char)(C>>0), (u_char)(C>>8), (u_char)(C>>16), (u_char)(C>>24), (u_char)(D>>0), (u_char)(D>>8), (u_char)(D>>16), (u_char)(D>>24) ); } void MD5Sum::scan(const char *buf) { unsigned A0, A1, A2, A3, B0, B1, B2, B3, C0, C1, C2, C3, D0, D1, D2, D3; computed = false; if (strncasecmp("md5:", buf, 4) != 0) return; int l = sscanf(buf + 4, "%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x%02x%02x", &A0, &A1, &A2, &A3, &B0, &B1, &B2, &B3, &C0, &C1, &C2, &C3, &D0, &D1, &D2, &D3 ); A = (((uint32_t)A0)<<0) | (((uint32_t)A1)<<8) | (((uint32_t)A2)<<16) | (((uint32_t)A3)<<24); B = (((uint32_t)B0)<<0) | (((uint32_t)B1)<<8) | (((uint32_t)B2)<<16) | (((uint32_t)B3)<<24); C = (((uint32_t)C0)<<0) | (((uint32_t)C1)<<8) | (((uint32_t)C2)<<16) | (((uint32_t)C3)<<24); D = (((uint32_t)D0)<<0) | (((uint32_t)D1)<<8) | (((uint32_t)D2)<<16) | (((uint32_t)D3)<<24); if (l != 16) return; computed = true; return; } // -------------------------------------------------------------------------- // This is a wrapper for any supported checksum // -------------------------------------------------------------------------- CheckSumAny::CheckSumAny(const char *type) : cs(NULL), tp(CheckSumAny::none) { if (!type) return; if (strncasecmp("cksum", type, 5) == 0) { cs = new CRC32Sum; tp = cksum; return; } if (strncasecmp("md5", type, 3) == 0) { cs = new MD5Sum; tp = md5; return; } if(strncasecmp("adler32",type,7) == 0) { cs=new Adler32Sum; tp=adler32; return; } } CheckSumAny::CheckSumAny(type type) { if (type == cksum) { cs = new CRC32Sum; tp = type; return; } if (type == md5) { cs = new MD5Sum; tp = type; return; } if (type == adler32) { cs = new Adler32Sum; tp = type; return; } } CheckSumAny::type CheckSumAny::Type(const char *crc) { if (!crc) return none; if (!crc[0]) return none; const char *p = strchr(crc, ':'); if (!p) { p = crc + strlen(crc); int i; for (i = 0; crc[i]; i++) if (!isxdigit(crc[i])) break; if (!(crc[i])) return cksum; } if (((p - crc) == 5) && (strncasecmp(crc, "cksum", 5) == 0)) return cksum; if (((p - crc) == 3) && (strncasecmp(crc, "md5", 3) == 0)) return md5; if (((p - crc) == 7) && (strncasecmp(crc, "adler32", 7) == 0)) return adler32; if (((p - crc) == 9) && (strncasecmp(crc, "undefined", 9) == 0)) return undefined; return unknown; } void CheckSumAny::operator=(const char *type) { if (cs) delete cs; cs = NULL; tp = none; if (!type) return; if (strncasecmp("cksum", type, 5) == 0) { cs = new CRC32Sum; tp = cksum; return; } if (strncasecmp("md5", type, 3) == 0) { cs = new MD5Sum; tp = md5; return; } if (strncasecmp("adler32", type, 7) == 0) { cs = new Adler32Sum; tp = adler32; return; } } bool CheckSumAny::operator==(const char *s) { if (!cs) return false; if (!(*cs)) return false; if (!s) return false; CheckSumAny c(s); if (!(c.cs)) return false; c.cs->scan(s); if (!(*(c.cs))) return false; if (c.tp != tp) return false; unsigned char *res; unsigned char *res_; unsigned int len; unsigned int len_; cs->result(res, len); c.cs->result(res_, len_); if (len != len_) return false; if (memcmp(res, res_, len) != 0) return false; return true; } bool CheckSumAny::operator==(const CheckSumAny& c) { if (!cs) return false; if (!(*cs)) return false; if (!c) return false; unsigned char *res; unsigned char *res_; unsigned int len; unsigned int len_; cs->result(res, len); c.cs->result(res_, len_); if (len != len_) return false; if (memcmp(res, res_, len) != 0) return false; return true; } std::string CheckSumAny::FileChecksum(const std::string& file_path, type tp, bool decimalbase) { int h = open(file_path.c_str(), O_RDONLY); if (h == -1) { return ""; } CheckSumAny csa(tp); char buffer[1024]; ssize_t l; for(;;) { l = read(h, buffer, 1024); if (l == -1) return ""; if (l == 0) break; csa.add(buffer, l); } close(h); csa.end(); char checksum[100]; csa.print(checksum, 100); std::string sChecksum(checksum); std::string::size_type pos = sChecksum.find(':'); if (pos == std::string::npos) { return ""; } if (!decimalbase) { return sChecksum.substr(pos+1); } // Convert to specified base char* str; unsigned long long int val = strtoull(sChecksum.substr(pos+1).c_str(), &str, 16); if (!str) { return ""; } return tostring(val); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcVersion.h0000644000000000000000000000012413214315410023163 xustar000000000000000027 mtime=1513200392.861335 27 atime=1513200574.873706 30 ctime=1513200658.802732843 nordugrid-arc-5.4.2/src/hed/libs/common/ArcVersion.h0000644000175000002070000001253513214315410023236 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_ARCVERSION_H__ #define __ARC_ARCVERSION_H__ /** \defgroup common Common utility classes and functions. */ /// \file ArcVersion.h /** \addtogroup common * @{ */ /** ARC API version */ #define ARC_VERSION "5.4.2" /** ARC API version number */ #define ARC_VERSION_NUM 0x050402 /** ARC API major version number */ #define ARC_VERSION_MAJOR 5 /** ARC API minor version number */ #define ARC_VERSION_MINOR 4 /** ARC API patch number */ #define ARC_VERSION_PATCH 2 /// Arc namespace contains all core ARC classes. namespace Arc { /// Determines ARC HED libraries version at runtime /** * ARC also provides pre-processor macros to determine the API version at * compile time in \ref ArcVersion.h. * \ingroup common * \headerfile ArcVersion.h arc/ArcVersion.h */ class ArcVersion { public: /// Major version number const unsigned int Major; /// Minor version number const unsigned int Minor; /// Patch version number const unsigned int Patch; /// Parses ver and fills major, minor and patch version values ArcVersion(const char* ver); }; /// Use this object to obtain current ARC HED version /// at runtime. extern const ArcVersion Version; // Front page for ARC SDK documentation /** * \mainpage * The ARC %Software Development Kit (SDK) is a set of tools that allow * manipulation of jobs and data in a Grid environment. The SDK is divided * into a set of modules which take care of * different aspects of Grid interaction. * * In the figure below an overview of the SDK is illustrated, showing * software components depending on the SDK, supported security standards and * in particular plugins providing functionality for different index and * registry services, local information systems, job submission and management * interfaces, matching and ranking algorithms, data access protocols and * job description languages. * * \image html arcsdk.png * * \version The version of the SDK that this documentation refers to can be * found from #ARC_VERSION. The ARC release corresponding to the SDK version * can be found using the "SVN tag" column in the table at * http://www.nordugrid.org/arc/releases/ * * \section sec Quick Start * The following code is a minimal example showing how to submit a job to a * Grid resource using the ARC SDK. For futher examples see the \ref examples * "list of all examples". * \include basic_job_submission.cpp * This code can be compiled with * \code * g++ -o submit -I/usr/include/libxml2 `pkg-config --cflags glibmm-2.4` -l arccompute submit.cpp * \endcode * * And this example shows how to copy a file to or from the Grid: * \include simple_copy.cpp * * This example can be compiled with * \code * g++ -o copy -I/usr/include/libxml2 `pkg-config --cflags glibmm-2.4` -l arcdata copy.cpp * \endcode */ // Page listing all examples /** * \page examples List of all examples * %Job submission and management * - \subpage basicjobsubmission * - \subpage jobfiltering * - \subpage jobstatus * - \subpage joblistretrieval * - \subpage retrievingresults * - \subpage servicediscovery * * Data management * - \subpage copyfile * - \subpage partialcopy * - \subpage exampledmc * - \subpage dtrgenerator * * \page basicjobsubmission Basic %Job Submission * \section cpp C++ * \include basic_job_submission.cpp * \section py Python * \include basic_job_submission.py * \section java Java * \include BasicJobSubmission.java * \section txt xRSL job description * \include helloworld.xrsl * * \page jobfiltering %Job Filtering * \tableofcontents * When managing multiple jobs it is a speedup and may be more convenient to * use the JobSupervisor class, instead of working on single Job objects. * In the JobSupervisor class jobs can be filtered so operations can be * limited to a subset of jobs. Such examples are shown below: * \section cpp C++ * \include job_selector.cpp * \section py Python * \subsection job_selector Select jobs using custom class * \include job_selector.py * \subsection job_filtering Select jobs based on job state * \include job_filtering.py * * \page jobstatus %Job Status * \include job_status.py * * \page joblistretrieval %Job List Retrieval * \include joblist_retrieval.py * * \page retrievingresults Retrieving Results * \include retrieving_results.py * * \page servicediscovery Service Discovery * \include service_discovery.py * * \page copyfile Copy File * \section cpp C++ * \include simple_copy.cpp * \section py Python * \include copy_file.py * * \page partialcopy Partial File Copy * \section cpp C++ * \include partial_copy.cpp * \section py Python * \include partial_copy.py * * \page exampledmc Example Protocol %Plugin * \include DataPointMyProtocol.cpp * * \page dtrgenerator DTR Generator * \section cpp C++ * Generator.cpp * \include Generator.cpp * Generator.h * \include Generator.h * generator-main.cpp * \include generator-main.cpp * \section py Python * \include dtr_generator.py * \section java Java * \include DTRGenerator.java */ } // namespace Arc /** @} */ #endif // __ARC_ARCVERSION_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/IniConfig.h0000644000000000000000000000012412111140470022752 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.904706 30 ctime=1513200658.833733222 nordugrid-arc-5.4.2/src/hed/libs/common/IniConfig.h0000644000175000002070000000145012111140470023017 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_INICONFIG_H__ #define __ARC_INICONFIG_H__ #include #include #include namespace Arc { /// Class representing "ini-style" configuration. /** It provides a way to convert configuration to XML for use with HED * internally. * \see Profile * \ingroup common * \headerfile IniConfig.h arc/IniConfig.h */ class IniConfig : public XMLNode { public: /// Dummy constructor. IniConfig() : XMLNode(NS(), "IniConfig") {} /// Read configuration from specified filename. IniConfig(const std::string& filename); ~IniConfig(); /// Evaluate configuration against the standard profile. bool Evaluate(Config &cfg); }; } // namespace Arc #endif /* __ARC_INICONFIG_H__ */ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcConfigIni.cpp0000644000000000000000000000012412771223617023754 xustar000000000000000027 mtime=1474635663.023405 27 atime=1513200574.932707 30 ctime=1513200658.847733393 nordugrid-arc-5.4.2/src/hed/libs/common/ArcConfigIni.cpp0000644000175000002070000001611712771223617024027 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "ArcConfigFile.h" #include "ArcConfigIni.h" namespace Arc { ConfigIni::~ConfigIni(void) { if(fin && open) { ((std::ifstream*)(fin))->close(); delete fin; }; } ConfigIni::ConfigIni(ConfigFile& f):fin(NULL),open(false) { fin=&f; line_number=0; current_section_n=-1; current_section_p=section_names.end(); current_section_changed=false; } ConfigIni::ConfigIni(const char* filename):fin(NULL),open(false) { line_number=0; current_section_n=-1; current_section_p=section_names.end(); if(!filename) return; fin=new ConfigFile(filename); if(*fin) open=true; current_section_changed=false; } bool ConfigIni::AddSection(const char* name) { if(name) section_names.push_back(std::string(name)); return true; } bool ConfigIni::ReadNext(std::string& line) { if(!fin) return false; current_section_changed=false; for(;;) { line=fin->read_line(); if(line=="") { // eof current_section=""; current_section_n=-1; current_section_p=section_names.end(); current_section_changed=true; return true; }; std::string::size_type n=line.find_first_not_of(" \t"); if(n == std::string::npos) continue; // should never happen if(line[n] == '[') { // section n++; std::string::size_type nn = line.find(']',n); if(nn == std::string::npos) { line=""; return false; }; // missing ']' current_section=line.substr(n,nn-n); current_section_n=-1; current_section_p=section_names.end(); current_section_changed=true; continue; }; if(!section_names.empty()) { // only limited sections allowed bool match = false; int s_n = -1; for(std::list::iterator sec = section_names.begin(); sec!=section_names.end();++sec) { std::string::size_type len = sec->length(); s_n++; if(strncasecmp(sec->c_str(),current_section.c_str(),len) != 0) continue; if(len != current_section.length()) { if(current_section[len] != '/') continue; }; current_section_n=s_n; current_section_p=sec; match=true; break; }; if(!match) continue; }; line.erase(0,n); break; }; return true; } bool ConfigIni::ReadNext(std::string& name,std::string& value) { if(!ReadNext(name)) return false; std::string::size_type n = name.find('='); if(n == std::string::npos) { value=""; return true; }; value=name.c_str()+n+1; name.erase(n); std::string::size_type l = value.length(); for(n = 0;n=l) { value=""; return true; }; if(n) value.erase(0,n); if(value[0] != '"') return true; std::string::size_type nn = value.rfind('"'); if(nn == 0) return true; // strange std::string::size_type n_ = value.find('"',1); if((nn > n_) && (n_ != 1)) return true; value.erase(nn); value.erase(0,1); return true; } const char* ConfigIni::SubSectionMatch(const char* name) { const char* subsection = current_section.c_str(); if(current_section_n>=0) subsection+=current_section_p->length()+1; int l = strlen(name); if(strncmp(name,subsection,l) != 0) return NULL; if(subsection[l] == 0) return (subsection+l); if(subsection[l] == '/') return (subsection+l+1); return NULL; } // TODO: not all functions can handle tabs and other non-space spaces. static int hextoint(unsigned char c) { if(c >= 'a') return (c-('a'-10)); if(c >= 'A') return (c-('A'-10)); return (c-'0'); } /// Remove escape chracters from string and decode \x## codes. /// Unescaped value of e is also treated as end of string and is converted to \0 static char* make_unescaped_string(char* str,char e) { size_t l = 0; char* s_end = str; // looking for end of string if(e == 0) { l=strlen(str); s_end=str+l; } else { for(;str[l];l++) { if(str[l] == '\\') { l++; if(str[l] == 0) { s_end=str+l; break; }; }; if(e) { if(str[l] == e) { s_end=str+l+1; str[l]=0; break; }; }; }; }; // unescaping if(l==0) return s_end; // string is empty char* p = str; char* p_ = str; for(;*p;) { if((*p) == '\\') { p++; if((*p) == 0) { p--; } // backslash at end of string else if((*p) == 'x') { // 2 hex digits int high,low; p++; if((*p) == 0) continue; // \x at end of string if(!isxdigit(*p)) { p--; continue; }; high=*p; p++; if((*p) == 0) continue; // \x# at end of string if(!isxdigit(*p)) { p-=2; continue; }; low=*p; high=hextoint(high); low=hextoint(low); (*p)=(high<<4) | low; }; }; (*p_)=(*p); p++; p_++; }; return s_end; } /// Remove escape characters from string and decode \x## codes. static void make_unescaped_string(std::string &str) { std::string::size_type p = 0; std::string::size_type l = str.length(); for(;p= l) break; // backslash at end of string if(str[p] == 'x') { // 2 hex digits int high,low; p++; if(p >= l) continue; // \x at end of string high=str[p]; if(!isxdigit(high)) { p--; continue; }; p++; if(p >= l) continue; // \x# at end of string low=str[p]; if(!isxdigit(low)) { p-=2; continue; }; high=hextoint(high); low=hextoint(low); str[p]=(high<<4) | low; str.erase(p-3,3); p-=3; l-=3; continue; } else { str.erase(p-1,1); l--; continue; }; }; p++; }; return; } /// Extract element from input buffer and if needed process escape /// characters in it. /// \param buf input buffer. /// \param str place for output element. /// \param separator character used to separate elements. Separator ' ' is /// treated as any blank space (space and tab in GNU). /// \param quotes int ConfigIni::NextArg(const char* buf,std::string &str,char separator,char quotes) { std::string::size_type i,ii; str=""; /* skip initial separators and blank spaces */ for(i=0;isspace(buf[i]) || buf[i]==separator;i++) {} ii=i; if((quotes) && (buf[i] == quotes)) { const char* e = strchr(buf+ii+1,quotes); while(e) { // look for unescaped quote if((*(e-1)) != '\\') break; // check for escaped quote e = strchr(e+1,quotes); }; if(e) { ii++; i=e-buf; str.append(buf+ii,i-ii); i++; if(separator && (buf[i] == separator)) i++; make_unescaped_string(str); return i; }; }; // look for unescaped separator (' ' also means '\t') for(;buf[i]!=0;i++) { if(buf[i] == '\\') { // skip escape i++; if(buf[i]==0) break; continue; }; if(separator == ' ') { if(isspace(buf[i])) break; } else { if(buf[i]==separator) break; }; }; str.append(buf+ii,i-ii); make_unescaped_string(str); if(buf[i]) i++; // skip detected separator return i; } std::string ConfigIni::NextArg(std::string &rest,char separator,char quotes) { int n; std::string arg; n=NextArg(rest.c_str(),arg,separator,quotes); rest=rest.substr(n); return arg; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Logger.cpp0000644000000000000000000000012413124220303022656 xustar000000000000000027 mtime=1498489027.786384 27 atime=1513200574.959707 30 ctime=1513200658.856733503 nordugrid-arc-5.4.2/src/hed/libs/common/Logger.cpp0000644000175000002070000003610013124220303022723 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #ifdef WIN32 #include #endif #include "Logger.h" #undef rootLogger namespace Arc { static const Arc::LogLevel DefaultLogLevel = Arc::DEBUG; static Arc::LogFormat DefaultLogFormat = Arc::LongFormat; static std::string list_to_domain(const std::list& subdomains) { std::string domain; for(std::list::const_iterator subdomain = subdomains.begin(); subdomain != subdomains.end();++subdomain) { domain += "."+(*subdomain); } return domain; } std::ostream& operator<<(std::ostream& os, LogLevel level) { if (level == DEBUG) os << "DEBUG"; else if (level == VERBOSE) os << "VERBOSE"; else if (level == INFO) os << "INFO"; else if (level == WARNING) os << "WARNING"; else if (level == ERROR) os << "ERROR"; else if (level == FATAL) os << "FATAL"; // There should be no more alternative! return os; } LogLevel string_to_level(const std::string& str) { LogLevel ll; if (string_to_level(str, ll)) return ll; else { // should not happen... Logger::getRootLogger().msg(WARNING, "Invalid log level. Using default "+level_to_string(DefaultLogLevel)+"."); return DefaultLogLevel; } } LogLevel istring_to_level(const std::string& llStr) { return string_to_level(upper(llStr)); } bool string_to_level(const std::string& str, LogLevel& ll) { if (str == "DEBUG") ll = DEBUG; else if (str == "VERBOSE") ll = VERBOSE; else if (str == "INFO") ll = INFO; else if (str == "WARNING") ll = WARNING; else if (str == "ERROR") ll = ERROR; else if (str == "FATAL") ll = FATAL; else // should not happen... return false; return true; } bool istring_to_level(const std::string& llStr, LogLevel& ll) { return string_to_level(upper(llStr), ll); } std::string level_to_string(const LogLevel& level) { switch (level) { case DEBUG: return "DEBUG"; case VERBOSE: return "VERBOSE"; case INFO: return "INFO"; case WARNING: return "WARNING"; case ERROR: return "ERROR"; case FATAL: return "FATAL"; default: // should not happen... return ""; } } LogLevel old_level_to_level(unsigned int old_level) { if (old_level >= 5) return DEBUG; else if (old_level == 4) return VERBOSE; else if (old_level == 3) return INFO; else if (old_level == 2) return WARNING; else if (old_level == 1) return ERROR; else if (old_level == 0) return FATAL; else { // cannot happen... Logger::getRootLogger().msg(WARNING, "Invalid old log level. Using default "+level_to_string(DefaultLogLevel)+"."); return DefaultLogLevel; } } LogMessage::LogMessage(LogLevel level, const IString& message) : time(TimeStamp()), level(level), domain("---"), identifier(getDefaultIdentifier()), message(message) {} LogMessage::LogMessage(LogLevel level, const IString& message, const std::string& identifier) : time(TimeStamp()), level(level), domain("---"), identifier(identifier), message(message) {} LogLevel LogMessage::getLevel() const { return level; } void LogMessage::setIdentifier(std::string identifier) { this->identifier = identifier; } std::string LogMessage::getDefaultIdentifier() { std::ostringstream sout; #ifdef HAVE_GETPID sout << getpid() << "/" #ifdef WIN32 << (unsigned long int)GetCurrentThreadId(); #else << ThreadId::getInstance().get(); #endif #else #ifdef WIN32 sout << (unsigned long int)GetCurrentThreadId(); #else sout << ThreadId::getInstance().get(); #endif #endif return sout.str(); } void LogMessage::setDomain(std::string domain) { this->domain = domain; } static const int formatindex = std::ios_base::xalloc(); static const int prefixindex = std::ios_base::xalloc(); std::ostream& operator<<(std::ostream& os, const LoggerFormat& format) { os.iword(formatindex) = format.format; return os; } std::ostream& operator<<(std::ostream& os, const LogDestination& dest) { os.iword(formatindex) = dest.format; if (!dest.prefix.empty()) os.pword(prefixindex) = const_cast(&dest.prefix); return os; } std::ostream& operator<<(std::ostream& os, const LogMessage& message) { std::string* pre = (std::string*)os.pword(prefixindex); std::string prefix = pre ? *pre : ""; switch (os.iword(formatindex)) { case LongFormat: os << "[" << message.time << "] " << "[" << message.domain << "] " << "[" << message.level << "] " << "[" << message.identifier << "] " << prefix << message.message; break; case MediumFormat: os << "[" << message.time << "] " << "[" << message.level << "] " << "[" << message.identifier << "] " << prefix << message.message; break; case ShortFormat: os << message.level << ": " << prefix << message.message; break; case EmptyFormat: os << message.message; break; case DebugFormat: Time ct; static Time lt(0); os << "[" << ct.GetTime() << "." << std::setfill('0') << std::setw(6) << ct.GetTimeNanoseconds()/1000 << std::setfill(' ') << std::setw(0); if(lt.GetTime()) { Period d = ct - lt; os << "(" << d.GetPeriod()*1000000+d.GetPeriodNanoseconds()/1000<<")"; }; lt = ct; os << "] " << prefix << message.message; break; } return os; } LogDestination::LogDestination() : format(DefaultLogFormat) {} void LogDestination::setFormat(const LogFormat& newformat) { format = newformat; } void LogDestination::setDefaultFormat(const LogFormat& newformat) { DefaultLogFormat = newformat; } void LogDestination::setPrefix(const std::string& pre) { prefix = pre; } LogStream::LogStream(std::ostream& destination) : destination(destination) {} void LogStream::log(const LogMessage& message) { Glib::Mutex::Lock lock(mutex); EnvLockWrap(false); // Protecting getenv inside gettext() destination << *this << message << std::endl; EnvLockUnwrap(false); } LogFile::LogFile(const std::string& path) : LogDestination(), path(path), destination(), maxsize(-1), backups(-1), reopen(false) { if(path.empty()) { //logger.msg(Arc::ERROR,"Log file path is not specified"); return; } destination.open(path.c_str(), std::fstream::out | std::fstream::app); if(!destination.is_open()) { //logger.msg(Arc::ERROR,"Failed to open log file: %s",path); return; } } void LogFile::setMaxSize(int newsize) { maxsize = newsize; } void LogFile::setBackups(int newbackups) { backups = newbackups; } void LogFile::setReopen(bool newreopen) { Glib::Mutex::Lock lock(mutex); reopen = newreopen; if(reopen) { destination.close(); } else { if(!destination.is_open()) { destination.open(path.c_str(), std::fstream::out | std::fstream::app); } } } LogFile::operator bool(void) { Glib::Mutex::Lock lock(mutex); return (reopen)?(!path.empty()):destination.is_open(); } bool LogFile::operator!(void) { Glib::Mutex::Lock lock(mutex); return (reopen)?path.empty():(!destination.is_open()); } void LogFile::log(const LogMessage& message) { Glib::Mutex::Lock lock(mutex); // If requested to reopen on every write or if was closed because of error if (reopen || !destination.is_open()) { destination.open(path.c_str(), std::fstream::out | std::fstream::app); } if(!destination.is_open()) return; EnvLockWrap(false); // Protecting getenv inside gettext() destination << *this << message << std::endl; EnvLockUnwrap(false); // Check if unrecoverable error occurred. Close if error // and reopen on next write. if(destination.bad()) destination.close(); // Before closing check if must backup backup(); if (reopen) destination.close(); } void LogFile::backup(void) { if(maxsize <= 0) return; if(destination.tellp() < maxsize) return; bool backup_done = true; // Not sure if this will work on windows, but glibmm // has no functions for removing and renaming files if(backups > 0) { std::string backup_path = path+"."+tostring(backups); ::unlink(backup_path.c_str()); for(int n = backups;n>0;--n) { std::string old_backup_path = (n>1)?(path+"."+tostring(n-1)):path; if(::rename(old_backup_path.c_str(),backup_path.c_str()) != 0) { if(n == 1) backup_done=false; } backup_path = old_backup_path; } } else { if(::unlink(path.c_str()) != 0) backup_done=false; } if((backup_done) && (!reopen)) { destination.close(); destination.open(path.c_str(), std::fstream::out | std::fstream::app); } } class LoggerContextRef: public ThreadDataItem { friend class Logger; private: std::string id; LoggerContext& context; LoggerContextRef(LoggerContext& ctx, std::string& i); virtual ~LoggerContextRef(void); public: virtual void Dup(void); }; LoggerContextRef::LoggerContextRef(LoggerContext& ctx, std::string& i): ThreadDataItem(i),context(ctx) { id = i; context.Acquire(); } LoggerContextRef::~LoggerContextRef(void) { context.Release(); } void LoggerContextRef::Dup(void) { new LoggerContextRef(context,id); } void LoggerContext::Acquire(void) { mutex.lock(); ++usage_count; mutex.unlock(); } void LoggerContext::Release(void) { mutex.lock(); --usage_count; if(!usage_count) { delete this; } else { mutex.unlock(); } } LoggerContext::~LoggerContext(void) { mutex.trylock(); mutex.unlock(); } Logger* Logger::rootLogger = NULL; std::map* Logger::defaultThresholds = NULL; unsigned int Logger::rootLoggerMark = ~rootLoggerMagic; Logger& Logger::getRootLogger(void) { if ((rootLogger == NULL) || (rootLoggerMark != rootLoggerMagic)) { rootLogger = new Logger(); defaultThresholds = new std::map; rootLoggerMark = rootLoggerMagic; } return *rootLogger; } Logger::Logger(Logger& parent, const std::string& subdomain) : parent(&parent), domain(parent.getDomain() + "." + subdomain), context((LogLevel)0) { std::map::const_iterator thr = defaultThresholds->find(domain); if(thr != defaultThresholds->end()) { context.threshold = thr->second; } } Logger::Logger(Logger& parent, const std::string& subdomain, LogLevel threshold) : parent(&parent), domain(parent.getDomain() + "." + subdomain), context(threshold) { } Logger::~Logger() { } void Logger::addDestination(LogDestination& destination) { Glib::Mutex::Lock lock(mutex); getContext().destinations.push_back(&destination); } void Logger::addDestinations(const std::list& destinations) { Glib::Mutex::Lock lock(mutex); for(std::list::const_iterator dest = destinations.begin(); dest != destinations.end();++dest) { getContext().destinations.push_back(*dest); } } void Logger::setDestinations(const std::list& destinations) { Glib::Mutex::Lock lock(mutex); getContext().destinations.clear(); for(std::list::const_iterator dest = destinations.begin(); dest != destinations.end();++dest) { getContext().destinations.push_back(*dest); } } const std::list& Logger::getDestinations(void) const { Glib::Mutex::Lock lock((Glib::Mutex&)mutex); return ((Logger*)this)->getContext().destinations; } void Logger::removeDestinations(void) { Glib::Mutex::Lock lock(mutex); getContext().destinations.clear(); } void Logger::deleteDestinations(LogDestination* exclude) { Glib::Mutex::Lock lock(mutex); std::list& destinations = getContext().destinations; for(std::list::iterator dest = destinations.begin(); dest != destinations.end();) { if (*dest != exclude) { delete *dest; *dest = NULL; dest = destinations.erase(dest); } else { ++dest; } } } void Logger::setThreshold(LogLevel threshold) { Glib::Mutex::Lock lock(mutex); this->getContext().threshold = threshold; } void Logger::setThresholdForDomain(LogLevel threshold, const std::list& subdomains) { setThresholdForDomain(threshold, list_to_domain(subdomains)); } void Logger::setThresholdForDomain(LogLevel threshold, const std::string& domain) { getRootLogger(); if(domain.empty() || (domain == "Arc")) { getRootLogger().setThreshold(threshold); } else { (*defaultThresholds)[domain] = threshold; } } LogLevel Logger::getThreshold() const { Glib::Mutex::Lock lock((Glib::Mutex&)mutex); const LoggerContext& ctx = ((Logger*)this)->getContext(); if(ctx.threshold != (LogLevel)0) return ctx.threshold; if(parent) return parent->getThreshold(); return (LogLevel)0; } void Logger::setThreadContext(void) { Glib::Mutex::Lock lock(mutex); LoggerContext* nctx = new LoggerContext(getContext()); new LoggerContextRef(*nctx,context_id); } LoggerContext& Logger::getContext(void) { if(context_id.empty()) return context; try { ThreadDataItem* item = ThreadDataItem::Get(context_id); if(!item) return context; LoggerContextRef* citem = dynamic_cast(item); if(!citem) return context; return citem->context; } catch(std::exception&) { }; return context; } void Logger::msg(LogMessage message) { message.setDomain(domain); if (message.getLevel() >= getThreshold()) { log(message); } } Logger::Logger() : parent(0), domain("Arc"), context(DefaultLogLevel) { // addDestination(cerr); } std::string Logger::getDomain() { return domain; } void Logger::log(const LogMessage& message) { Glib::Mutex::Lock lock(mutex); LoggerContext& ctx = getContext(); std::list::iterator dest; std::list::iterator begin = ctx.destinations.begin(); std::list::iterator end = ctx.destinations.end(); for (dest = begin; dest != end; ++dest) (*dest)->log(message); if (parent) parent->log(message); } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/UserConfig.h0000644000000000000000000000012412623100302023147 xustar000000000000000027 mtime=1447854274.517336 27 atime=1513200574.953707 30 ctime=1513200658.825733124 nordugrid-arc-5.4.2/src/hed/libs/common/UserConfig.h0000644000175000002070000015621012623100302023221 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_USERCONFIG_H__ #define __ARC_USERCONFIG_H__ #include #include #include #include #include #include #include namespace Arc { /** \addtogroup common * @{ */ class Logger; class XMLNode; /// Type of service enum ServiceType { COMPUTING, ///< A service that processes jobs INDEX ///< A service that provides information }; /// Represents the endpoint of service with a given type and GLUE2 InterfaceName /** A ConfigEndpoint can be a service registry or a local information system of a computing element. It has a URL, and optionally GLUE2 InterfaceName and a RequestedSubmissionInterfaceName, which will be used to filter the possible job submission interfaces on a computing element. \headerfile UserConfig.h arc/UserConfig.h */ class ConfigEndpoint { public: /// Types of ComputingEndpoint objects. enum Type { REGISTRY, ///< a service registry COMPUTINGINFO, ///< a local information system of a computing element ANY ///< both, only used for filtering, when both types are accepted }; /// Creates a ConfigEndpoint from a URL an InterfaceName and a Type. /** \param[in] URLString is a string containing the URL of the ConfigEndpoint \param[in] InterfaceName is a string containing the type of the interface based on the InterfaceName attribute in the GLUE2 specification \param[in] type is either ConfigEndpoint::REGISTRY or ConfigEndpoint::COMPUTINGINFO */ ConfigEndpoint(const std::string& URLString = "", const std::string& InterfaceName = "", ConfigEndpoint::Type type = ConfigEndpoint::ANY) : type(type), URLString(URLString), InterfaceName(InterfaceName) {} /// The type of the ConfigEndpoint: REGISTRY or COMPUTINGINFO. Type type; /// A string representing the URL of the ConfigEndpoint. std::string URLString; /// A string representing the interface type (based on the InterfaceName attribute of the GLUE2 specification). std::string InterfaceName; /// A GLUE2 InterfaceName requesting a job submission interface. /** This will be used when collecting information about the computing element. Only those job submission interfaces will be considered which has this requested InterfaceName. */ std::string RequestedSubmissionInterfaceName; /// Return true if the URL is not empty. operator bool() const { return (!URLString.empty()); } /// Returns true if the URL is empty. bool operator!() const { return (URLString.empty()); } /// Returns true if the type, the URLString, the InterfaceName and the RequestedSubmissionInterfaceName matches. bool operator==(ConfigEndpoint c) const { return (type == c.type) && (URLString == c.URLString) && (InterfaceName == c.InterfaceName) && (RequestedSubmissionInterfaceName == c.RequestedSubmissionInterfaceName); } }; /// Returns "computing" if st is COMPUTING, "index" if st is "INDEX", otherwise an empty string. std::string tostring(const ServiceType st); /// Defines how user credentials are looked for. /** * For complete information see description of * UserConfig::InitializeCredentials(initializeCredentialsType) * method. * \headerfile UserConfig.h arc/UserConfig.h */ class initializeCredentialsType { public: /// initializeType determines how UserConfig deals with credentials. typedef enum { SkipCredentials, ///< Don't look for credentials NotTryCredentials, ///< Look for credentials but don't evaluate them TryCredentials, ///< Look for credentials and test if they are valid RequireCredentials, ///< Look for credentials, test if they are valid and report errors if not valid SkipCANotTryCredentials, ///< Same as NotTryCredentials but skip checking CA certificates SkipCATryCredentials, ///< Same as TryCredentials but skip checking CA certificates SkipCARequireCredentials ///< Same as RequireCredentials but skip checking CA certificates } initializeType; /// Construct a new initializeCredentialsType with initializeType TryCredentials. initializeCredentialsType(void):val(TryCredentials) { }; /// Construct a new initializeCredentialsType with initializeType v. initializeCredentialsType(initializeType v):val(v) { }; /// Returns true if this initializeType is the same as v. bool operator==(initializeType v) { return (val == v); }; /// Returns true if this initializeType is not the same as v. bool operator!=(initializeType v) { return (val != v); }; /// Operator returns initializeType. operator initializeType(void) { return val; }; private: initializeType val; }; /// %User configuration class /** * This class provides a container for a selection of various * attributes/parameters which can be configured to needs of the user, * and can be read by implementing instances or programs. The class * can be used in two ways. One can create a object from a * configuration file, or simply set the desired attributes by using * the setter method, associated with every setable attribute. The * list of attributes which can be configured in this class are: * - certificatepath / CertificatePath(const std::string&) * - keypath / KeyPath(const std::string&) * - proxypath / ProxyPath(const std::string&) * - cacertificatesdirectory / CACertificatesDirectory(const std::string&) * - cacertificatepath / CACertificatePath(const std::string&) * - timeout / Timeout(int) * - joblist / JobListFile(const std::string&) * - joblisttype / JobListType(const std::string&) * - verbosity / Verbosity(const std::string&) * - brokername / Broker(const std::string&) or Broker(const std::string&, const std::string&) * - brokerarguments / Broker(const std::string&) or Broker(const std::string&, const std::string&) * - vomsserverpath / VOMSESPath(const std::string&) * - username / UserName(const std::string&) * - password / Password(const std::string&) * - keypassword / KeyPassword(const std::string&) * - keysize / KeySize(int) * - certificatelifetime / CertificateLifeTime(const Period&) * - slcs / SLCS(const URL&) * - storedirectory / StoreDirectory(const std::string&) * - jobdownloaddirectory / JobDownloadDirectory(const std::string&) * - idpname / IdPName(const std::string&) * - submissioninterface / SubmissionInterface(const std::string&) * - infointerface / InfoInterface(const std::string&) * * where the first term is the name of the attribute used in the * configuration file, and the second term is the associated setter * method (for more information about a given attribute see the * description of the setter method). * * The configuration file should have a INI-style format and the * IniConfig class will thus be used to parse the file. The above * mentioned attributes should be placed in the common section. * * Besides the options above, the configuration file can contain * information about services (service registries and computing elements). * Each service has to be put in its on section. Each service has * an alias, which is a short name. The name of the section consists * of the word `registry` for service registries and `computing` for * computing elements, then contains a slash and the alias of the * service. e.g. `[registry/index1]` or `[computing/testce]` * In a service section the possible options are the following: * - url: is the url of the service * - default: if yes, then this service will be used if no other is specified * - group: assigns the service to a group with a given name * * For computing elements the following additional options exist: * - infointerface: the GLUE2 InterfaceName of the local information system * - submissioninterface: the GLUE2 InterfaceName to the job submission interface * * For a service registry the following additional option exist: * - registryinterface: the GLUE2 InterfaceName of the service registry interface * * These services can be accessed by the #GetService, #GetServices, * #GetDefaultServices, #GetServicesInGroup methods, which return ConfigEndpoint * object(s). The ConfigEndpoint objects contain the URL and the InterfaceNames * of the services. * * The UserConfig class also provides a method InitializeCredentials() * for locating user credentials by searching in different standard * locations. The CredentialsFound() method can be used to test if * locating the credentials succeeded. * \headerfile UserConfig.h arc/UserConfig.h **/ class UserConfig { public: /// Create a UserConfig object /** * The UserConfig object created by this constructor initializes * only default values, and if specified by the * \a initializeCredentials boolean credentials will be tried * initialized using the InitializeCredentials() method. The object * is only non-valid if initialization of credentials fails which * can be checked with the #operator bool() method. * * @param initializeCredentials is a optional boolean indicating if * the InitializeCredentials() method should be invoked, the * default is \c true. * @see InitializeCredentials() * @see #operator bool() **/ UserConfig(initializeCredentialsType initializeCredentials = initializeCredentialsType()); /// Create a UserConfig object /** * The UserConfig object created by this constructor will, if * specified by the \a loadSysConfig boolean, first try to load the * system configuration file by invoking the LoadConfigurationFile() * method, and if this fails a ::WARNING is reported. Then the * configuration file passed will be tried loaded using the before * mentioned method, and if this fails an ::ERROR is reported, and * the created object will be non-valid. Note that if the passed * file path is empty the example configuration will be tried copied * to the default configuration file path specified by * DEFAULTCONFIG. If the example file cannot be copied one or more * ::WARNING messages will be reported and no configration will be * loaded. If loading the configurations file succeeded and if * \a initializeCredentials is \c true then credentials will be * initialized using the InitializeCredentials() method, and if no * valid credentials are found the created object will be non-valid. * * @param conffile is the path to a INI-configuration file. * @param initializeCredentials is a boolean indicating if * credentials should be initialized, the default is \c true. * @param loadSysConfig is a boolean indicating if the system * configuration file should be loaded aswell, the default is * \c true. * @see LoadConfigurationFile(const std::string&, bool) * @see InitializeCredentials() * @see #operator bool() * @see SYSCONFIG * @see EXAMPLECONFIG **/ UserConfig(const std::string& conffile, initializeCredentialsType initializeCredentials = initializeCredentialsType(), bool loadSysConfig = true); /// Create a UserConfig object /** * The UserConfig object created by this constructor does only * differ from the UserConfig(const std::string&, bool, bool) * constructor in that it is possible to pass the path of the job * list file directly to this constructor. If the job list file * \a joblistfile is empty, the behaviour of this constructor is * exactly the same as the before mentioned, otherwise the job list * file will be initilized by invoking the setter method * JobListFile(const std::string&). If it fails the created object * will be non-valid, otherwise the specified configuration file * \a conffile will be loaded with the \a ignoreJobListFile argument * set to \c true. * * @param conffile is the path to a INI-configuration file * @param jfile is the path to a (non-)existing job list file. * @param initializeCredentials is a boolean indicating if * credentials should be initialized, the default is \c true. * @param loadSysConfig is a boolean indicating if the system * configuration file should be loaded aswell, the default is * \c true. * @see JobListFile(const std::string&) * @see LoadConfigurationFile(const std::string&, bool) * @see InitializeCredentials() * @see #operator bool() **/ UserConfig(const std::string& conffile, const std::string& jfile, initializeCredentialsType initializeCredentials = initializeCredentialsType(), bool loadSysConfig = true); /// Language binding constructor /** * The passed long int should be a pointer address to a UserConfig * object, and this address is then casted into this UserConfig * object. * * @param ptraddr is an memory address to a UserConfig object. **/ UserConfig(const long int& ptraddr); ~UserConfig() {} /// Initialize user credentials. /** * The location of the user credentials will be tried located when * calling this method and stored internally when found. The method * searches in different locations. * Depending on value of initializeCredentials this method behaves * differently. Following is an explanation for RequireCredentials. * For less strict values see information below. * First the user proxy or the user key/certificate pair is tried * located in the following order: * - Proxy path specified by the environment variable * X509_USER_PROXY. If value is set and corresponding file does * not exist it considered to be an error and no other locations * are tried. If found no more proxy paths are tried. * - Current proxy path as passed to the contructor, explicitly set * using the setter method ProxyPath(const std::string&) or read * from configuration by constructor or LoadConfiguartionFile() * method. If value is set and corresponding file does not exist * it considered to be an error and no other locations are tried. * If found no more proxy paths are tried. * - Proxy path made of x509up_u token concatenated with the user * numerical ID located in the OS temporary directory. It is NOT * an error if corresponding file does not exist and processing * continues. * - Key/certificate paths specified by the environment variables * X509_USER_KEY and X509_USER_CERT. If values are set and * corresponding files do not exist it considered to be an error * and no other locations are tried. Error message is supressed * if proxy was previously found. * - Current key/certificate paths passed to the contructor or * explicitly set using the setter methods KeyPath(const std::string&) * and CertificatePath(const std::string&) or read from configuration * by constructor or LoadConfiguartionFile() method. If values * are set and corresponding files do not exist it is an error * and no other locations are tried. Error message is supressed * if proxy was previously found. * - Key/certificate paths ~/.arc/usercert.pem and ~/.arc/userkey.pem * respectively are tried. It is not an error if not found. * - Key/certificate paths ~/.globus/usercert.pem and ~/.globus/userkey.pem * respectively are tried. It is not an error if not found. * - Key/certificate paths created by concatenation of ARC installation * location and /etc/arc/usercert.pem and /etc/arc/userkey.pem * respectively are tried. It is not an error if not found. * - Key/certificate located in current working directory are tried. * - If neither proxy nor key/certificate files are found this is * considered to be an error. * * Along with the proxy and key/certificate pair, the path of the * directory containing CA certificates is also located. The presence * of directory will be checked in the following order and first * found is accepted: * - Path specified by the X509_CERT_DIR environment variable. * It is an error if value is set and directory does not exist. * - Current path explicitly specified by using the setter method * CACertificatesDirectory() or read from configuration by * constructor or LoadConfiguartionFile() method. It is an error * if value is set and directory does not exist. * - Path ~/.globus/certificates. It is not an error if it does * not exist. * - Path created by concatenating the ARC installation location * and /etc/certificates. It is not an error if it does not exist. * - Path created by concatenating the ARC installation location * and /share/certificates. It is not an error if it does not exist. * - Path /etc/grid-security/certificates. * * It is an error if none of the directories above exist. * * In case of initializeCredentials == TryCredentials method behaves * same way like in case RequireCredentials except it does not report * errors through its Logger object and does not return false. * * If NotTryCredentials is used method does not check for presence of * credentials. It behaves like if corresponding files are always * present. * * And in case of SkipCredentials method does nothing. * * All options with SkipCA* prefix behaves similar to those without * prefix except the path of the directory containing CA certificates * is completely ignored. * * @see CredentialsFound() * @see ProxyPath(const std::string&) * @see KeyPath(const std::string&) * @see CertificatePath(const std::string&) * @see CACertificatesDirectory(const std::string&) **/ bool InitializeCredentials(initializeCredentialsType initializeCredentials); /// Validate credential location /** * Valid credentials consists of a combination of a path * to existing CA-certificate directory and either a path to * existing proxy or a path to existing user key/certificate pair. * If valid credentials are found this method returns \c true, * otherwise \c false is returned. * * @return \c true if valid credentials are found, otherwise \c * false. * @see InitializeCredentials() **/ bool CredentialsFound() const { return !((proxyPath.empty() && (certificatePath.empty() || keyPath.empty())) || caCertificatesDirectory.empty()); } /// Load specified configuration file /** * The configuration file passed is parsed by this method by using * the IniConfig class. If the parsing is unsuccessful a ::WARNING * is reported. * * The format of the configuration file should follow that of INI, * and every attribute present in the file is only allowed once * (except the `rejectmanagement` and `rejectdiscovery` attributes), * otherwise a ::WARNING will be reported. For the list of allowed * attributes see the detailed description of UserConfig. * * @param conffile is the path to the configuration file. * @param ignoreJobListFile is a optional boolean which indicates * whether the joblistfile attribute in the configuration * file should be ignored. Default is to ignored it * (\c true). * @return If loading the configuration file succeeds \c true is * returned, otherwise \c false is returned. * @see SaveToFile() * \since Changed in 4.0.0. Added the joblisttype attribute to attributes * being parsed. Parsing of the retired bartender attribute is removed. **/ bool LoadConfigurationFile(const std::string& conffile, bool ignoreJobListFile = true); /// Save to INI file. /** * This method will save the object data as a INI file. The * saved file can be loaded with the LoadConfigurationFile method. * * @param filename the name of the file which the data will be * saved to. * @return \c false if unable to get handle on file, otherwise * \c true is returned. * @see LoadConfigurationFile() **/ bool SaveToFile(const std::string& filename) const; /// Apply credentials to BaseConfig /** * This methods sets the BaseConfig credentials to the credentials * contained in this object. It also passes user defined configuration * overlay if any. * * @see InitializeCredentials() * @see CredentialsFound() * @see BaseConfig * @param ccfg a BaseConfig object which will configured with * the credentials of this object. * \since Changed in 4.0.0. %Credential string is checked first and used if * non-empty (see * \ref CredentialString(const std::string&) "CredentialString"). **/ void ApplyToConfig(BaseConfig& ccfg) const; /// Check for validity /** * The validity of an object created from this class can be checked * using this casting operator. An object is valid if the * constructor did not encounter any errors. * @see operator!() **/ operator bool() const { return ok; } /// Check for non-validity /** * See #operator bool() for a description. * @see #operator bool() **/ bool operator!() const { return !ok; } /// Set path to job list file /** * The method takes a path to a file which will be used as the job * list file for storing and reading job information. This method always * return true. * * The attribute associated with this setter method is 'joblist'. * * @param path the path to the job list file. * @return true is always returned. * @see JobListFile() const * \since Changed in 4.0.0. Method now always returns true. **/ bool JobListFile(const std::string& path); /// Get a reference to the path of the job list file. /** * The job list file is used to store and fetch information about * submitted computing jobs to computing services. This method will * return the path to the specified job list file. * * @return The path to the job list file is returned. * @see JobListFile(const std::string&) **/ const std::string& JobListFile() const { return joblistfile; } /// Set type of job storage /** * Possible storage types are BDB and XML. This method always return true. * * The attribute associated with this setter method is 'joblisttype'. * * @param type of job storage * @return true is always returned. * @see JobListType() * \since Added in 4.0.0. **/ bool JobListType(const std::string& type); /// Get type of job storage /** * @return The type of job storage is returned. * @see JobListType(const std::string&) * \since Added in 4.0.0. **/ const std::string& JobListType() const { return joblisttype; } /// Set timeout /** * When communicating with a service the timeout specifies how long, * in seconds, the communicating instance should wait for a * response. If the response have not been received before this * period in time, the connection is typically dropped, and an error * will be reported. * * This method will set the timeout to the specified integer. If * the passed integer is less than or equal to 0 then \c false is * returned and the timeout will not be set, otherwise \c true is * returned and the timeout will be set to the new value. * * The attribute associated with this setter method is 'timeout'. * * @param newTimeout the new timeout value in seconds. * @return \c false in case \a newTimeout <= 0, otherwise \c true. * @see Timeout() const * @see DEFAULT_TIMEOUT **/ bool Timeout(int newTimeout); /// Get timeout /** * Returns the timeout in seconds. * * @return timeout in seconds. * @see Timeout(int) * @see DEFAULT_TIMEOUT **/ int Timeout() const { return timeout; } /// Set verbosity. /** * The verbosity will be set when invoking this method. If the * string passed cannot be parsed into a corresponding LogLevel, * using the function a * ::WARNING is reported and \c false is returned, otherwise \c true * is returned. * * The attribute associated with this setter method is 'verbosity'. * * @return \c true in case the verbosity could be set to a allowed * LogLevel, otherwise \c false. * @see Verbosity() const * \since Changed in 4.1.0. The argument string is now treated * case-insensitively. **/ bool Verbosity(const std::string& newVerbosity); /// Get the user selected level of verbosity. /** * The string representation of the verbosity level specified by the * user is returned when calling this method. If the user have not * specified the verbosity level the empty string will be * referenced. * * @return the verbosity level, or empty if it has not been set. * @see Verbosity(const std::string&) **/ const std::string& Verbosity() const { return verbosity; } /// Set broker to use in target matching /** * The string passed to this method should be in the format: * \f[[:]\f] * where the \ is the name of the broker and cannot contain * any ':', and the optional \ should contain arguments which * should be passed to the broker. * * Two attributes are associated with this setter method * 'brokername' and 'brokerarguments'. * * @param name the broker name and argument specified in the format * given above. * @return This method allways returns \c true. * @see Broker * @see Broker(const std::string&, const std::string&) * @see Broker() const * @see DEFAULT_BROKER **/ bool Broker(const std::string& name); /// Set broker to use in target matching /** * As opposed to the Broker(const std::string&) method this method * sets broker name and arguments directly from the passed two * arguments. * * Two attributes are associated with this setter method * 'brokername' and 'brokerarguments'. * * @param name is the name of the broker. * @param argument is the arguments of the broker. * @return This method always returns \c true. * @see Broker * @see Broker(const std::string&) * @see Broker() const * @see DEFAULT_BROKER **/ bool Broker(const std::string& name, const std::string& argument) { broker = std::pair(name, argument); return true;} /// Get the broker and corresponding arguments. /** * The returned pair contains the broker name as the first component * and the argument as the second. * * @see Broker(const std::string&) * @see Broker(const std::string&, const std::string&) * @see DEFAULT_BROKER **/ const std::pair& Broker() const { return broker; } /// Set path to file containing VOMS configuration /** * Set path to file which contians list of VOMS services and * associated configuration parameters needed to contact those * services. It is used by arcproxy. * * The attribute associated with this setter method is * 'vomsserverpath'. * * @param path the path to VOMS configuration file * @return This method always return true. * @see VOMSESPath() const **/ bool VOMSESPath(const std::string& path) { vomsesPath = path; return true; } /// Get path to file containing VOMS configuration /** * Get path to file which contians list of VOMS services and * associated configuration parameters. * * @return The path to VOMS configuration file is returned. * @see VOMSESPath(const std::string&) **/ const std::string& VOMSESPath(); /// Set user-name for SLCS /** * Set username which is used for requesting credentials from * Short Lived Credentials Service. * * The attribute associated with this setter method is 'username'. * * @param name is the name of the user. * @return This method always return true. * @see UserName() const **/ bool UserName(const std::string& name) { username = name; return true; } /// Get user-name /** * Get username which is used for requesting credentials from * Short Lived Credentials Service. * * @return The username is returned. * @see UserName(const std::string&) **/ const std::string& UserName() const { return username; } /// Set password /** * Set password which is used for requesting credentials from * Short Lived Credentials Service. * * The attribute associated with this setter method is 'password'. * * @param newPassword is the new password to set. * @return This method always returns true. * @see Password() const **/ bool Password(const std::string& newPassword) { password = newPassword; return true; } /// Get password /** * Get password which is used for requesting credentials from * Short Lived Credentials Service. * * @return The password is returned. * @see Password(const std::string&) **/ const std::string& Password() const { return password; } /// Set credentials. /** * For code which does not need credentials stored in files, this method * can be used to set the credential as a string stored in memory. * * @param cred The credential represented as a string * \since Added in 4.0.0. */ void CredentialString(const std::string& cred) { credentialString = cred; } /// Get credentials. /** * Returns the string representation of credentials previously set by * CredentialString(const std::string&). * * @return String representation of credentials * \since Added in 4.0.0. */ const std::string& CredentialString() const { return credentialString; } /// Set path to user proxy /** * This method will set the path of the user proxy. Note that the * InitializeCredentials() method will also try to set this path, by * searching in different locations. * * The attribute associated with this setter method is 'proxypath' * * @param newProxyPath is the path to a user proxy. * @return This method always returns \c true. * @see InitializeCredentials() * @see CredentialsFound() * @see ProxyPath() const **/ bool ProxyPath(const std::string& newProxyPath) { proxyPath = newProxyPath; return true;} /// Get path to user proxy. /** * Retrieve path to user proxy. * * @return Returns the path to the user proxy. * @see ProxyPath(const std::string&) **/ const std::string& ProxyPath() const { return proxyPath; } /// Set path to certificate /** * The path to user certificate will be set by this method. The path * to the correcsponding key can be set with the * KeyPath(const std::string&) method. Note that the * InitializeCredentials() method will also try to set this path, by * searching in different locations. * * The attribute associated with this setter method is * 'certificatepath'. * * @param newCertificatePath is the path to the new certificate. * @return This method always returns \c true. * @see InitializeCredentials() * @see CredentialsFound() const * @see CertificatePath() const * @see KeyPath(const std::string&) **/ bool CertificatePath(const std::string& newCertificatePath) { certificatePath = newCertificatePath; return true; } /// Get path to certificate /** * The path to the cerficate is returned when invoking this method. * @return The certificate path is returned. * @see InitializeCredentials() * @see CredentialsFound() const * @see CertificatePath(const std::string&) * @see KeyPath() const **/ const std::string& CertificatePath() const { return certificatePath; } /// Set path to key /** * The path to user key will be set by this method. The path to the * corresponding certificate can be set with the * CertificatePath(const std::string&) method. Note that the * InitializeCredentials() method will also try to set this path, by * searching in different locations. * * The attribute associated with this setter method is 'keypath'. * * @param newKeyPath is the path to the new key. * @return This method always returns \c true. * @see InitializeCredentials() * @see CredentialsFound() const * @see KeyPath() const * @see CertificatePath(const std::string&) * @see KeyPassword(const std::string&) * @see KeySize(int) **/ bool KeyPath(const std::string& newKeyPath) { keyPath = newKeyPath; return true; } /// Get path to key /** * The path to the key is returned when invoking this method. * * @return The path to the user key is returned. * @see InitializeCredentials() * @see CredentialsFound() const * @see KeyPath(const std::string&) * @see CertificatePath() const * @see KeyPassword() const * @see KeySize() const **/ const std::string& KeyPath() const { return keyPath; } /// Set password for generated key /** * Set password to be used to encode private key of credentials * obtained from Short Lived Credentials Service. * * The attribute associated with this setter method is * 'keypassword'. * * @param newKeyPassword is the new password to the key. * @return This method always returns \c true. * @see KeyPassword() const * @see KeyPath(const std::string&) * @see KeySize(int) **/ bool KeyPassword(const std::string& newKeyPassword) { keyPassword = newKeyPassword; return true; } /// Get password for generated key /** * Get password to be used to encode private key of credentials * obtained from Short Lived Credentials Service. * * @return The key password is returned. * @see KeyPassword(const std::string&) * @see KeyPath() const * @see KeySize() const **/ const std::string& KeyPassword() const { return keyPassword; } /// Set key size /** * Set size/strengt of private key of credentials obtained from * Short Lived Credentials Service. * * The attribute associated with this setter method is 'keysize'. * * @param newKeySize is the size, an an integer, of the key. * @return This method always returns \c true. * @see KeySize() const * @see KeyPath(const std::string&) * @see KeyPassword(const std::string&) **/ bool KeySize(int newKeySize) { keySize = newKeySize; return true;} /// Get key size /** * Get size/strengt of private key of credentials obtained from * Short Lived Credentials Service. * * @return The key size, as an integer, is returned. * @see KeySize(int) * @see KeyPath() const * @see KeyPassword() const **/ int KeySize() const { return keySize; } /// Set CA-certificate path /** * The path to the file containing CA-certificate will be set * when calling this method. This configuration parameter is * deprecated - use CACertificatesDirectory instead. Only arcslcs * uses it. * * The attribute associated with this setter method is * 'cacertificatepath'. * * @param newCACertificatePath is the path to the CA-certificate. * @return This method always returns \c true. * @see CACertificatePath() const **/ bool CACertificatePath(const std::string& newCACertificatePath) { caCertificatePath = newCACertificatePath; return true; } /// Get path to CA-certificate /** * Retrieve the path to the file containing CA-certificate. * This configuration parameter is deprecated. * * @return The path to the CA-certificate is returned. * @see CACertificatePath(const std::string&) **/ const std::string& CACertificatePath() const { return caCertificatePath; } /// Set path to CA-certificate directory /** * The path to the directory containing CA-certificates will be set * when calling this method. Note that the InitializeCredentials() * method will also try to set this path, by searching in different * locations. * * The attribute associated with this setter method is * 'cacertificatesdirectory'. * * @param newCACertificatesDirectory is the path to the * CA-certificate directory. * @return This method always returns \c true. * @see InitializeCredentials() * @see CredentialsFound() const * @see CACertificatesDirectory() const **/ bool CACertificatesDirectory(const std::string& newCACertificatesDirectory) { caCertificatesDirectory = newCACertificatesDirectory; return true; } /// Get path to CA-certificate directory /** * Retrieve the path to the CA-certificate directory. * * @return The path to the CA-certificate directory is returned. * @see InitializeCredentials() * @see CredentialsFound() const * @see CACertificatesDirectory(const std::string&) **/ const std::string& CACertificatesDirectory() const { return caCertificatesDirectory; } /// Set certificate life time /** * Sets lifetime of user certificate which will be obtained from * Short Lived Credentials Service. * * The attribute associated with this setter method is * 'certificatelifetime'. * * @param newCertificateLifeTime is the life time of a certificate, * as a Period object. * @return This method always returns \c true. * @see CertificateLifeTime() const **/ bool CertificateLifeTime(const Period& newCertificateLifeTime) { certificateLifeTime = newCertificateLifeTime; return true; } /// Get certificate life time /** * Gets lifetime of user certificate which will be obtained from * Short Lived Credentials Service. * * @return The certificate life time is returned as a Period object. * @see CertificateLifeTime(const Period&) **/ const Period& CertificateLifeTime() const { return certificateLifeTime; } /// Set the URL to the Short Lived Certificate Service (SLCS). /** * * The attribute associated with this setter method is 'slcs'. * * @param newSLCS is the URL to the SLCS * @return This method always returns \c true. * @see SLCS() const **/ bool SLCS(const URL& newSLCS) { slcs = newSLCS; return true; } /// Get the URL to the Short Lived Certificate Service (SLCS). /** * * @return The SLCS is returned. * @see SLCS(const URL&) **/ const URL& SLCS() const { return slcs; } /// Set store directory /** * Sets directory which will be used to store credentials obtained * from Short Lived Credential Servide. * * The attribute associated with this setter method is * 'storedirectory'. * @param newStoreDirectory is the path to the store directory. * @return This method always returns \c true. * @see **/ bool StoreDirectory(const std::string& newStoreDirectory) { storeDirectory = newStoreDirectory; return true; } /// Get store diretory /** * Sets directory which is used to store credentials obtained * from Short Lived Credential Servide. * * @return The path to the store directory is returned. * @see StoreDirectory(const std::string&) **/ const std::string& StoreDirectory() const { return storeDirectory; } /// Set download directory /** * Sets directory which will be used to download the job * directory using arcget command. * * The attribute associated with this setter method is * 'jobdownloaddirectory'. * @param newDownloadDirectory is the path to the download directory. * @return This method always returns \c true. * @see **/ bool JobDownloadDirectory(const std::string& newDownloadDirectory) { downloadDirectory = newDownloadDirectory; return true; } /// Get download directory /** * returns directory which will be used to download the job * directory using arcget command. * * The attribute associated with the method is * 'jobdownloaddirectory'. * @return This method returns the job download directory. * @see **/ const std::string& JobDownloadDirectory() const { return downloadDirectory; } /// Set IdP name /** * Sets Identity Provider name (Shibboleth) to which user belongs. * It is used for contacting Short Lived Certificate Service. * * The attribute associated with this setter method is 'idpname'. * @param name is the new IdP name. * @return This method always returns \c true. * @see **/ bool IdPName(const std::string& name) { idPName = name; return true; } /// Get IdP name /** * Gets Identity Provider name (Shibboleth) to which user belongs. * * @return The IdP name * @see IdPName(const std::string&) **/ const std::string& IdPName() const { return idPName; } /// Set path to configuration overlay file /** * Content of specified file is a backdoor to configuration XML * generated from information stored in this class. The content * of file is passed to BaseConfig class in ApplyToConfig(BaseConfig&) * then merged with internal configuration XML representation. * This feature is meant for quick prototyping/testing/tuning of * functionality without rewriting code. It is meant for developers and * most users won't need it. * * The attribute associated with this setter method is 'overlayfile'. * @param path is the new overlay file path. * @return This method always returns \c true. * @see **/ bool OverlayFile(const std::string& path) { overlayfile = path; return true; } /// Get path to configuration overlay file /** * @return The overlay file path * @see OverlayFile(const std::string&) **/ const std::string& OverlayFile() const { return overlayfile; } /// Set path to directory storing utility files for DataPoints /** * Some DataPoints can store information on remote services in local * files. This method sets the path to the directory containing these * files. For example arc* tools set it to ARCUSERDIRECTORY and A-REX * sets it to the control directory. The directory is created if it * does not exist. * @param dir is the new utils dir path. * @return This method always returns \c true. */ bool UtilsDirPath(const std::string& dir); /// Get path to directory storing utility files for DataPoints /** * @return The utils dir path * @see UtilsDirPath(const std::string&) */ const std::string& UtilsDirPath() const { return utilsdir; }; /// Set User for filesystem access /** * Sometimes it is desirable to use the identity of another user * when accessing the filesystem. This user can be specified through * this method. By default this user is the same as the user running * the process. * @param u User identity to use */ void SetUser(const User& u) { user = u; }; /// Get User for filesystem access /** * @return The user identity to use for file system access * @see SetUser(const User&) */ const User& GetUser() const { return user; }; /// Set the default local information system interface /** For services which does not specify a local information system interface, this default will be used. If a local information system interface is given, the computing element will be only queried using this interface. \param infointerface_ is a string specifying a GLUE2 InterfaceName \return This method always returns \c true. */ bool InfoInterface(const std::string& infointerface_) { infointerface = infointerface_; return true; } /// Get the default local information system interface /** \return the GLUE2 InterfaceName string specifying the default local information system interface \see InfoInterface(const std::string&) */ const std::string& InfoInterface() const { return infointerface; }; /// Set the default submission interface /** For services which does not specify a submission interface this default submission interface will be used. If a submission interface is given, then all the jobs will be submitted to this interface, no other job submission interfaces of the computing element will be tried. \param submissioninterface_ is a string specifying a GLUE2 InterfaceName \return This method always returns \c true. */ bool SubmissionInterface(const std::string& submissioninterface_) { submissioninterface = submissioninterface_; return true; } /// Get the default submission interface /** \return the GLUE2 InterfaceName string specifying the default submission interface \see SubmissionInterface(const std::string&) */ const std::string& SubmissionInterface() const { return submissioninterface; }; /// Get the list of rejected service discovery URLs /** This list is populated by the (possibly multiple) `rejectdiscovery` configuration options. A service registry should not be queried if its URL matches any string in this list. \return a list of rejected service discovery URLs */ const std::list& RejectDiscoveryURLs() const { return rejectDiscoveryURLs; }; /// Add list of URLs to ignored at service discovery /** * The passed list of strings will be added to the internal reject list * and they should represent URLs which should be ignored when doing service * discovery. * \param urls list of string representing URLs to ignore at service * discovery **/ void AddRejectDiscoveryURLs(const std::list& urls) { rejectDiscoveryURLs.insert(rejectDiscoveryURLs.end(), urls.begin(), urls.end()); } /// Clear the rejected service discovery URLs /** * Clears the list of strings representing URLs which should be ignored * during service discovery. **/ void ClearRejectDiscoveryURLs() { rejectDiscoveryURLs.clear(); } /// Get the list of rejected job managmenet URLs /** This list is populated by the (possibly multiple) `rejectmanagement` configuration options. Those jobs should not be managed, that reside on a computing element with a matching URL. \return a list of rejected job management URLs */ const std::list& RejectManagementURLs() const { return rejectManagementURLs; }; /// Get the ConfigEndpoint for the service with the given alias /** Each service in the configuration file has its own section, and the name of the section contains the type of the service (`registry` or `computing`), and the alias of the service (separated by a slash). \param[in] alias is the alias of the service \return the ConfigEndpoint generated from the service with the given alias. */ ConfigEndpoint GetService(const std::string& alias); /// Get the services in a given group filtered by type /** All services of the given group are returned if they match the type filter. \param[in] group is the name of the group \param[in] type is REGISTRY or COMPUTING if only those services are needed, or ANY if all \return a list of ConfigEndpoint objects, the services in the group, empty list if no such group, or no services matched the filter */ std::list GetServicesInGroup(const std::string& group, ConfigEndpoint::Type type = ConfigEndpoint::ANY); /// Get the services flagged as default filtered by type /** Return all the services which had `default=yes` in their configuration, if they have the given type. \param[in] type is REGISTRY or COMPUTING if only those services are needed, or ANY if all \return a list of ConfigEndpoint objects, the default services, empty list if there are no default service, or no services matched the filter */ std::list GetDefaultServices(ConfigEndpoint::Type type = ConfigEndpoint::ANY); /// Get one or more service with the given alias or in the given group filtered by type /** This is a convenience method for querying the configured services by both the name of a group or an alias of a service. If the name is a name of a group then all the services in the group will be returned (filtered by type). If there is no such group, then a service with the given alias is returned in a single item list (but only if it matches the filter). \param[in] groupOrAlias is either a name of a group or an alias of a service \param[in] type is REGISTRY or COMPUTING if only those services are needed, or ANY if all \return a list of ConfigEndpoint objects, the found services, empty list if no such group and no such alias or no services matched the filter */ std::list GetServices(const std::string& groupOrAlias, ConfigEndpoint::Type type = ConfigEndpoint::ANY); /// Get all services std::map GetAllConfiguredServices() { return allServices; } /// Path to ARC user home directory /** * The \a ARCUSERDIRECTORY variable is the path to the ARC home * directory of the current user. This path is created using the * User::Home() method. * @see User::Home() **/ static const std::string ARCUSERDIRECTORY; /// Path to system configuration /** * The \a SYSCONFIG variable is the path to the system configuration * file. This variable is only equal to SYSCONFIGARCLOC if ARC is installed * in the root (highly unlikely). **/ static const std::string SYSCONFIG; /// Path to system configuration at ARC location. /** * The \a SYSCONFIGARCLOC variable is the path to the system configuration * file which reside at the ARC installation location. **/ static const std::string SYSCONFIGARCLOC; /// Path to default configuration file /** * The \a DEFAULTCONFIG variable is the path to the default * configuration file used in case no configuration file have been * specified. The path is created from the * ARCUSERDIRECTORY object. **/ static const std::string DEFAULTCONFIG; /// Path to example configuration /** * The \a EXAMPLECONFIG variable is the path to the example * configuration file. **/ static const std::string EXAMPLECONFIG; /// Path to default job list file /** * The \a JOBLISTFILE variable specifies the default path to the job list * file used by the ARC job management tools. The job list file is located * in the directory specified by the ARCUSERDIRECTORY variable with name * 'jobs.dat'. * @see ARCUSERDIRECTORY * \since Added in 4.0.0. **/ static const std::string JOBLISTFILE; /// Default timeout in seconds /** * The \a DEFAULT_TIMEOUT specifies interval which will be used * in case no timeout interval have been explicitly specified. For a * description about timeout see Timeout(int). * @see Timeout(int) * @see Timeout() const **/ static const int DEFAULT_TIMEOUT = 20; /// Default broker /** * The \a DEFAULT_BROKER specifies the name of the broker which * should be used in case no broker is explicitly chosen. * @see Broker * @see Broker(const std::string&) * @see Broker(const std::string&, const std::string&) * @see Broker() const **/ static const std::string DEFAULT_BROKER; private: static ConfigEndpoint ServiceFromLegacyString(std::string); void setDefaults(); static bool makeDir(const std::string& path); static bool copyFile(const std::string& source, const std::string& destination); bool CreateDefaultConfigurationFile() const; std::list FilterServices(const std::list&, ConfigEndpoint::Type); std::string joblistfile; std::string joblisttype; int timeout; std::string verbosity; // Broker name and arguments. std::pair broker; std::list defaultServices; std::map allServices; std::map > groupMap; std::list rejectDiscoveryURLs; std::list rejectManagementURLs; std::string credentialString; std::string proxyPath; std::string certificatePath; std::string keyPath; std::string keyPassword; int keySize; std::string caCertificatePath; std::string caCertificatesDirectory; Period certificateLifeTime; URL slcs; std::string vomsesPath; std::string storeDirectory; std::string downloadDirectory; std::string idPName; std::string username; std::string password; std::string overlayfile; std::string utilsdir; std::string submissioninterface; std::string infointerface; // User whose identity (uid/gid) should be used to access filesystem // Normally this is the same as the process owner User user; // Private members not refered to outside this class: bool ok; initializeCredentialsType initializeCredentials; static Logger logger; }; /// Class for handling X509* variables in a multi-threaded environment. /** * This class is useful when using external libraries which depend on X509* * environment variables in a multi-threaded environment. When an instance of * this class is created it holds a lock on these variables until the * instance is destroyed. Additionally, if the credentials pointed to by the * those variables are owned by a different uid from the uid of the current * process, a temporary copy is made owned by the uid of the current process * and the X509 variable points there instead. This is to comply with some * restrictions in third-party libraries which insist on the credential files * being owned by the current uid. * \headerfile UserConfig.h arc/UserConfig.h */ class CertEnvLocker { public: /// Create a lock on X509 environment variables. Blocks if another instance exists. CertEnvLocker(const UserConfig& cfg); /// Release lock on X509 environment variables and set back to old values if they were changed. ~CertEnvLocker(void); protected: std::string x509_user_key_old; std::string x509_user_key_new; bool x509_user_key_set; std::string x509_user_cert_old; std::string x509_user_cert_new; bool x509_user_cert_set; std::string x509_user_proxy_old; std::string x509_user_proxy_new; bool x509_user_proxy_set; std::string ca_cert_dir_old; bool ca_cert_dir_set; }; /** @} */ } // namespace Arc #endif // __ARC_USERCONFIG_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/IString.h0000644000000000000000000000012412574537240022506 xustar000000000000000027 mtime=1441971872.085743 27 atime=1513200574.885706 30 ctime=1513200658.817733026 nordugrid-arc-5.4.2/src/hed/libs/common/IString.h0000644000175000002070000001462212574537240022560 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_ISTRING__ #define __ARC_ISTRING__ #include #include #include #include #include #include // snprintf #include // free #include // strcpy, strdup #define istring(x) (x) namespace Arc { /** \cond Class used internally by IString. */ class PrintFBase { public: PrintFBase(); virtual ~PrintFBase(); virtual void msg(std::ostream& os) const = 0; virtual void msg(std::string& s) const = 0; void Retain(); bool Release(); private: // Copying not allowed PrintFBase(const PrintFBase&); // Assignment not allowed PrintFBase& operator=(const PrintFBase&); int refcount; }; /** \endcond */ /// Return the translation of the given string. /** \ingroup common */ const char* FindTrans(const char *p); /// Return the plural form translation of the given string when it refers to multiple n. /** \ingroup common */ const char* FindNTrans(const char *s, const char *p, unsigned long n); /** \cond Class used internally by IString. */ template class PrintF : public PrintFBase { public: PrintF(const std::string& m, const T0& tt0 = 0, const T1& tt1 = 0, const T2& tt2 = 0, const T3& tt3 = 0, const T4& tt4 = 0, const T5& tt5 = 0, const T6& tt6 = 0, const T7& tt7 = 0) : PrintFBase(), m(m) { Copy(t0, tt0); Copy(t1, tt1); Copy(t2, tt2); Copy(t3, tt3); Copy(t4, tt4); Copy(t5, tt5); Copy(t6, tt6); Copy(t7, tt7); } ~PrintF() { for (std::list::iterator it = ptrs.begin(); it != ptrs.end(); it++) free(*it); } virtual void msg(std::ostream& os) const { char buffer[2048]; snprintf(buffer, 2048, Get(m), Get(t0), Get(t1), Get(t2), Get(t3), Get(t4), Get(t5), Get(t6), Get(t7)); os << buffer; } virtual void msg(std::string& s) const { char buffer[2048]; snprintf(buffer, 2048, Get(m), Get(t0), Get(t1), Get(t2), Get(t3), Get(t4), Get(t5), Get(t6), Get(t7)); s = buffer; } private: // general case template inline void Copy(T& t, const U& u) { t = u; } // char[] and const char[] template inline void Copy(T& t, const char u[]) { strcpy(t, u); } // const char* inline void Copy(const char*& t, const char*const& u) { t = strdup(u); ptrs.push_back(const_cast(t)); } // char* inline void Copy(char*& t, char*const& u) { t = strdup(u); ptrs.push_back(t); } // general case template inline static const T& Get(const T& t) { return t; } // const char[] and const char* inline static const char* Get(const char*const& t) { return FindTrans(t); } // char[] and char* inline static const char* Get(char*const& t) { return FindTrans(const_cast(t)); } // std::string inline static const char* Get(const std::string& t) { return FindTrans(t.c_str()); } // std::string ()() inline static const char* Get(std::string (*t)()) { return FindTrans(t().c_str()); } // Glib::ustring inline static const char* Get(const Glib::ustring& t) { return FindTrans(t.c_str()); } // Glib::ustring ()() inline static const char* Get(Glib::ustring (*t)()) { return FindTrans(t().c_str()); } // sigc::slot* inline static const char* Get(const sigc::slot *t) { return (*t)(); } std::string m; T0 t0; T1 t1; T2 t2; T3 t3; T4 t4; T5 t5; T6 t6; T7 t7; std::list ptrs; }; /** \endcond */ /// Class used for localised output of log messages. /** IString should only be used directly in rare cases. Logger should be used * instead in most cases. * \ingroup common * \headerfile IString.h arc/IString.h */ class IString { public: IString(const std::string& m) : p(new PrintF<>(m)) {} template IString(const std::string& m, const T0& t0) : p(new PrintF(m, t0)) {} template IString(const std::string& m, const T0& t0, const T1& t1) : p(new PrintF(m, t0, t1)) {} template IString(const std::string& m, const T0& t0, const T1& t1, const T2& t2) : p(new PrintF(m, t0, t1, t2)) {} template IString(const std::string& m, const T0& t0, const T1& t1, const T2& t2, const T3& t3) : p(new PrintF(m, t0, t1, t2, t3)) {} template IString(const std::string& m, const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4) : p(new PrintF(m, t0, t1, t2, t3, t4)) {} template IString(const std::string& m, const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5) : p(new PrintF(m, t0, t1, t2, t3, t4, t5)) {} template IString(const std::string& m, const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6) : p(new PrintF(m, t0, t1, t2, t3, t4, t5, t6)) {} template IString(const std::string& m, const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7) : p(new PrintF(m, t0, t1, t2, t3, t4, t5, t6, t7)) {} ~IString(); IString(const IString& istr); IString& operator=(const IString& istr); std::string str(void) const; private: PrintFBase *p; friend std::ostream& operator<<(std::ostream& os, const IString& msg); }; /// Output localised message to an output stream. std::ostream& operator<<(std::ostream& os, const IString& msg); } // namespace Arc #endif // __ARC_ISTRING__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Watchdog.h0000644000000000000000000000012412111140470022645 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.896706 30 ctime=1513200658.836733258 nordugrid-arc-5.4.2/src/hed/libs/common/Watchdog.h0000644000175000002070000000326312111140470022716 0ustar00mockbuildmock00000000000000#ifndef __ARC_WATCHDOG_H__ #define __ARC_WATCHDOG_H__ namespace Arc { // Internal implementation of watchdog. // Currently only single global watchdog is supported. class Watchdog; /// This class is meant to provide interface for Watchdog executor part. /** \ingroup common * \headerfile Watchdog.h arc/Watchdog.h */ class WatchdogListener { private: Watchdog& instance_; time_t last; public: WatchdogListener(void); /// Waits till timeout occurs and then returns true. /** If any error occurs it returns false and watchdog is normally not usable anymore. */ bool Listen(void); /// Similar to Listen() but forces method to exit after limit seconds. /** If limit passed false is returned. If method is exited due to internal error then error argument is filled with true. */ bool Listen(int limit, bool& error); }; /// This class is meant to be used in code which provides "I'm alive" ticks to watchdog. /** \ingroup common * \headerfile Watchdog.h arc/Watchdog.h */ class WatchdogChannel { private: int id_; public: /// Defines watchdog kicking source with specified timeout. /** Code must call Kick() method of this instance to keep watchdog from timeouting. If object is destroyed watchdog does not monitor it anymore. Althogh timeout is specified in seconds real time resolution of watchdog is about 1 minute. */ WatchdogChannel(int timeout); /// Upon destruction channel is closed and watchdog forgets about it. ~WatchdogChannel(void); /// Tells watchdog this source is still alive. void Kick(void); }; } // namespace Arc #endif // __ARC_WATCHDOG_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcVersion.cpp0000644000000000000000000000012412100524524023517 xustar000000000000000027 mtime=1359128916.008217 27 atime=1513200574.905706 30 ctime=1513200658.842733332 nordugrid-arc-5.4.2/src/hed/libs/common/ArcVersion.cpp0000644000175000002070000000134412100524524023566 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "ArcVersion.h" namespace Arc { static unsigned int extract_subversion(const char* ver,unsigned int pos) { const char* p = ver; if(!p) return 0; for(;pos;--pos) { p = strchr(p,'.'); if(!p) return 0; ++p; } return (unsigned int)strtoul(p,NULL,10); } ArcVersion::ArcVersion(const char* ver): Major(extract_subversion(ver,0)), Minor(extract_subversion(ver,1)), Patch(extract_subversion(ver,2)) { } const ArcVersion Version(ARC_VERSION); } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/hostname_resolver.cpp0000644000000000000000000000012413214315176025213 xustar000000000000000027 mtime=1513200254.761812 27 atime=1513200574.936707 30 ctime=1513200658.874733723 nordugrid-arc-5.4.2/src/hed/libs/common/hostname_resolver.cpp0000644000175000002070000001434613214315176025270 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #ifndef WIN32 #include #endif #include #include #include #include "hostname_resolver.h" typedef struct { unsigned int size; unsigned int cmd; } header_t; // How long it is allowed for controlling side to react #define COMMUNICATION_TIMEOUT (10) static bool sread_start = true; static bool sread(int s,void* buf,size_t size) { while(size) { #ifndef WIN32 struct pollfd p[1]; p[0].fd = s; p[0].events = POLLIN; p[0].revents = 0; int err = poll(p,1,sread_start?-1:(COMMUNICATION_TIMEOUT*1000)); if(err == 0) return false; if((err == -1) && (errno != EINTR)) return false; if(err == 1) { #else { #endif ssize_t l = ::read(s,buf,size); if(l <= 0) return false; size-=l; buf = (void*)(((char*)buf)+l); sread_start = false; }; }; return true; } static bool swrite(int s,const void* buf,size_t size) { while(size) { #ifndef WIN32 struct pollfd p[1]; p[0].fd = s; p[0].events = POLLOUT; p[0].revents = 0; int err = poll(p,1,COMMUNICATION_TIMEOUT*1000); if(err == 0) return false; if((err == -1) && (errno != EINTR)) return false; if(err == 1) { #else { #endif ssize_t l = ::write(s,buf,size); if(l < 0) return false; size-=l; buf = (void*)(((char*)buf)+l); }; }; return true; } static bool sread_string(int s,std::string& str,unsigned int& maxsize) { unsigned int ssize; if(sizeof(ssize) > maxsize) return false; if(!sread(s,&ssize,sizeof(ssize))) return false; maxsize -= sizeof(ssize); if(ssize > maxsize) return false; str.assign(ssize,' '); // Not nice but saves memory copying if(!sread(s,(void*)(str.c_str()),ssize)) return false; maxsize -= ssize; return true; } static bool sread_buf(int s,void* buf,unsigned int& bufsize,unsigned int& maxsize) { char dummy[1024]; unsigned int size; if(sizeof(size) > maxsize) return false; if(!sread(s,&size,sizeof(size))) return false; maxsize -= sizeof(size); if(size > maxsize) return false; if(size <= bufsize) { if(!sread(s,buf,size)) return false; bufsize = size; maxsize -= size; } else { if(!sread(s,buf,bufsize)) return false; maxsize -= bufsize; // skip rest size -= bufsize; while(size > sizeof(dummy)) { if(!sread(s,dummy,sizeof(dummy))) return false; size -= sizeof(dummy); maxsize -= sizeof(dummy); }; if(!sread(s,dummy,size)) return false; maxsize -= size; }; return true; } static bool swrite_result(int s,int cmd,int res,int err) { header_t header; header.cmd = cmd; header.size = sizeof(res) + sizeof(err); if(!swrite(s,&header,sizeof(header))) return -1; if(!swrite(s,&res,sizeof(res))) return -1; if(!swrite(s,&err,sizeof(err))) return -1; return true; } static bool swrite_result(int s,int cmd,int res,int err,const void* add,int addsize) { header_t header; header.cmd = cmd; header.size = sizeof(res) + sizeof(err) + addsize; if(!swrite(s,&header,sizeof(header))) return -1; if(!swrite(s,&res,sizeof(res))) return -1; if(!swrite(s,&err,sizeof(err))) return -1; if(!swrite(s,add,addsize)) return -1; return true; } static bool swrite_result(int s,int cmd,int res,int err,const void* add1,int addsize1,const void* add2,int addsize2) { header_t header; header.cmd = cmd; header.size = sizeof(res) + sizeof(err) + addsize1 + addsize2; if(!swrite(s,&header,sizeof(header))) return -1; if(!swrite(s,&res,sizeof(res))) return -1; if(!swrite(s,&err,sizeof(err))) return -1; if(!swrite(s,add1,addsize1)) return -1; if(!swrite(s,add2,addsize2)) return -1; return true; } static bool swrite_result(int s,int cmd,int res,int err,const std::string& str) { unsigned int l = str.length(); header_t header; header.cmd = cmd; header.size = sizeof(res) + sizeof(err) + sizeof(l) + str.length(); if(!swrite(s,&header,sizeof(header))) return -1; if(!swrite(s,&res,sizeof(res))) return -1; if(!swrite(s,&err,sizeof(err))) return -1; if(!swrite(s,&l,sizeof(l))) return -1; if(!swrite(s,str.c_str(),l)) return -1; return true; } int main(int argc,char* argv[]) { if(argc != 3) return -1; char* e; e = argv[1]; int sin = strtoul(argv[1],&e,10); if((e == argv[1]) || (*e != 0)) return -1; e = argv[2]; int sout = strtoul(argv[2],&e,10); if((e == argv[2]) || (*e != 0)) return -1; while(true) { header_t header; sread_start = true; if(!sread(sin,&header,sizeof(header))) break; switch(header.cmd) { case CMD_PING: { if(header.size != 0) return -1; if(!swrite(sout,&header,sizeof(header))) return -1; }; break; case CMD_RESOLVE_TCP_LOCAL: case CMD_RESOLVE_TCP_REMOTE: { std::string node; std::string service; if(!sread_string(sin,node,header.size)) return -1; if(!sread_string(sin,service,header.size)) return -1; if(header.size) return -1; errno = 0; struct addrinfo hint; ::memset(&hint,0,sizeof(hint)); hint.ai_socktype = SOCK_STREAM; hint.ai_protocol = IPPROTO_TCP; hint.ai_family = AF_UNSPEC; if (header.cmd == CMD_RESOLVE_TCP_LOCAL) hint.ai_flags = AI_PASSIVE; struct addrinfo* addrs = NULL; std::string data; int res = getaddrinfo(node.c_str(),service.c_str(),&hint,&addrs); if(res == 0) { for (struct addrinfo* addr = addrs; addr != NULL; addr = addr->ai_next) { int family = addr->ai_family; socklen_t length = addr->ai_addrlen; struct sockaddr* saddr = addr->ai_addr; data.append((char const*)&family,sizeof(family)); data.append((char const*)&length,sizeof(length)); data.append((char const*)saddr,length); }; freeaddrinfo(addrs); errno = 0; }; if(!swrite_result(sout,header.cmd,res,errno,data.c_str(),data.length())) return -1; }; break; default: return -1; }; }; return 0; } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/UserConfig.cpp0000644000000000000000000000012413213445240023513 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200574.915706 30 ctime=1513200658.861733564 nordugrid-arc-5.4.2/src/hed/libs/common/UserConfig.cpp0000644000175000002070000013652113213445240023570 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "UserConfig.h" #define HANDLESTRATT(ATT, SETTER) \ if (common[ATT]) {\ SETTER((std::string)common[ATT]);\ common[ATT].Destroy();\ if (common[ATT]) {\ logger.msg(WARNING, "Multiple %s attributes in configuration file (%s)", ATT, conffile); \ while (common[ATT]) common[ATT].Destroy();\ }\ } namespace Arc { typedef enum { file_test_missing, file_test_not_file, file_test_wrong_ownership, file_test_wrong_permissions, file_test_success } file_test_status; #ifndef WIN32 static file_test_status user_file_test(const std::string& path, const User& user) { struct stat st; if(::stat(path.c_str(),&st) != 0) return file_test_missing; if(!S_ISREG(st.st_mode)) return file_test_not_file; if(user.get_uid() != st.st_uid) return file_test_wrong_ownership; return file_test_success; } static file_test_status private_file_test(const std::string& path, const User& user) { struct stat st; if(::stat(path.c_str(),&st) != 0) return file_test_missing; if(!S_ISREG(st.st_mode)) return file_test_not_file; if(user.get_uid() != st.st_uid) return file_test_wrong_ownership; if(st.st_mode & (S_IRWXG | S_IRWXO)) return file_test_wrong_permissions; return file_test_success; } #else static file_test_status user_file_test(const std::string& path, const User& /*user*/) { // TODO: implement if(!Glib::file_test(path, Glib::FILE_TEST_EXISTS)) return file_test_missing; if(!Glib::file_test(path, Glib::FILE_TEST_IS_REGULAR)) return file_test_not_file; return file_test_success; } static file_test_status private_file_test(const std::string& path, const User& /*user*/) { // TODO: implement if(!Glib::file_test(path, Glib::FILE_TEST_EXISTS)) return file_test_missing; if(!Glib::file_test(path, Glib::FILE_TEST_IS_REGULAR)) return file_test_not_file; return file_test_success; } #endif static void certificate_file_error_report(file_test_status fts, bool require, const std::string& path, Logger& logger) { if(fts == file_test_success) { } else if(fts == file_test_wrong_ownership) { logger.msg(require?ERROR:VERBOSE, "Wrong ownership of certificate file: %s", path); } else if(fts == file_test_wrong_permissions) { logger.msg(require?ERROR:VERBOSE, "Wrong permissions of certificate file: %s", path); } else { logger.msg(require?ERROR:VERBOSE, "Can not access certificate file: %s", path); } } static void key_file_error_report(file_test_status fts, bool require, const std::string& path, Logger& logger) { if(fts == file_test_success) { } else if(fts == file_test_wrong_ownership) { logger.msg(require?ERROR:VERBOSE, "Wrong ownership of key file: %s", path); } else if(fts == file_test_wrong_permissions) { logger.msg(require?ERROR:VERBOSE, "Wrong permissions of key file: %s", path); } else { logger.msg(require?ERROR:VERBOSE, "Can not access key file: %s", path); } } static void proxy_file_error_report(file_test_status fts, bool require, const std::string& path, Logger& logger) { if(fts == file_test_success) { } else if(fts == file_test_wrong_ownership) { logger.msg(require?ERROR:VERBOSE, "Wrong ownership of proxy file: %s", path); } else if(fts == file_test_wrong_permissions) { logger.msg(require?ERROR:VERBOSE, "Wrong permissions of proxy file: %s", path); } else { logger.msg(require?ERROR:VERBOSE, "Can not access proxy file: %s", path); } } static bool dir_test(const std::string& path) { return Glib::file_test(path, Glib::FILE_TEST_IS_DIR); } std::string tostring(const ServiceType st) { switch (st) { case COMPUTING: return istring("computing"); case INDEX: return istring("index"); } return ""; } Logger UserConfig::logger(Logger::getRootLogger(), "UserConfig"); const std::string UserConfig::DEFAULT_BROKER = "Random"; const std::string UserConfig::ARCUSERDIRECTORY = Glib::build_filename(User().Home(), ".arc"); #ifndef WIN32 const std::string UserConfig::SYSCONFIG = G_DIR_SEPARATOR_S "etc" G_DIR_SEPARATOR_S "arc" G_DIR_SEPARATOR_S "client.conf"; #else const std::string UserConfig::SYSCONFIG = ArcLocation::Get() + G_DIR_SEPARATOR_S "etc" G_DIR_SEPARATOR_S "arc" G_DIR_SEPARATOR_S "client.conf"; #endif const std::string UserConfig::SYSCONFIGARCLOC = ArcLocation::Get() + G_DIR_SEPARATOR_S "etc" G_DIR_SEPARATOR_S "arc" G_DIR_SEPARATOR_S "client.conf"; const std::string UserConfig::EXAMPLECONFIG = ArcLocation::Get() + G_DIR_SEPARATOR_S PKGDATASUBDIR G_DIR_SEPARATOR_S "examples" G_DIR_SEPARATOR_S "client.conf"; const std::string UserConfig::DEFAULTCONFIG = Glib::build_filename(ARCUSERDIRECTORY, "client.conf"); const std::string UserConfig::JOBLISTFILE = Glib::build_filename(UserConfig::ARCUSERDIRECTORY, "jobs.dat"); UserConfig::UserConfig(initializeCredentialsType initializeCredentials) : timeout(0), keySize(0), ok(false), initializeCredentials(initializeCredentials) { if (!InitializeCredentials(initializeCredentials)) { return; } ok = true; setDefaults(); } UserConfig::UserConfig(const std::string& conffile, initializeCredentialsType initializeCredentials, bool loadSysConfig) : timeout(0), keySize(0), ok(false), initializeCredentials(initializeCredentials) { setDefaults(); if (loadSysConfig) { #ifndef WIN32 if (Glib::file_test(SYSCONFIG, Glib::FILE_TEST_IS_REGULAR)) { if (!LoadConfigurationFile(SYSCONFIG, true)) logger.msg(INFO, "System configuration file (%s) contains errors.", SYSCONFIG); } else #endif if (Glib::file_test(SYSCONFIGARCLOC, Glib::FILE_TEST_IS_REGULAR)) { if (!LoadConfigurationFile(SYSCONFIGARCLOC, true)) logger.msg(INFO, "System configuration file (%s) contains errors.", SYSCONFIGARCLOC); } else #ifndef WIN32 if (!ArcLocation::Get().empty() && ArcLocation::Get() != G_DIR_SEPARATOR_S) logger.msg(VERBOSE, "System configuration file (%s or %s) does not exist.", SYSCONFIG, SYSCONFIGARCLOC); else logger.msg(VERBOSE, "System configuration file (%s) does not exist.", SYSCONFIG); #else logger.msg(VERBOSE, "System configuration file (%s) does not exist.", SYSCONFIGARCLOC); #endif } if (conffile.empty()) { if (CreateDefaultConfigurationFile()) { if (!LoadConfigurationFile(DEFAULTCONFIG, false)) { logger.msg(WARNING, "User configuration file (%s) contains errors.", DEFAULTCONFIG); return; } } else logger.msg(INFO, "No configuration file could be loaded."); } else if (!Glib::file_test(conffile, Glib::FILE_TEST_IS_REGULAR)) { logger.msg(WARNING, "User configuration file (%s) does not exist or cannot be loaded.", conffile); return; } else if (!LoadConfigurationFile(conffile)) { logger.msg(WARNING, "User configuration file (%s) contains errors.", conffile); return; } if (!InitializeCredentials(initializeCredentials)) { return; } ok = true; } UserConfig::UserConfig(const std::string& conffile, const std::string& jfile, initializeCredentialsType initializeCredentials, bool loadSysConfig) : timeout(0), keySize(0), ok(false), initializeCredentials(initializeCredentials) { // If job list file have been specified, try to initialize it, and // if it fails then this object is non-valid (ok = false). setDefaults(); if (!jfile.empty() && !JobListFile(jfile)) return; if (loadSysConfig) { #ifndef WIN32 if (Glib::file_test(SYSCONFIG, Glib::FILE_TEST_IS_REGULAR)) { if (!LoadConfigurationFile(SYSCONFIG, true)) logger.msg(INFO, "System configuration file (%s) contains errors.", SYSCONFIG); } else #endif if (Glib::file_test(SYSCONFIGARCLOC, Glib::FILE_TEST_IS_REGULAR)) { if (!LoadConfigurationFile(SYSCONFIGARCLOC, true)) logger.msg(INFO, "System configuration file (%s) contains errors.", SYSCONFIGARCLOC); } else #ifndef WIN32 if (!ArcLocation::Get().empty() && ArcLocation::Get() != G_DIR_SEPARATOR_S) logger.msg(VERBOSE, "System configuration file (%s or %s) does not exist.", SYSCONFIG, SYSCONFIGARCLOC); else logger.msg(VERBOSE, "System configuration file (%s) does not exist.", SYSCONFIG); #else logger.msg(VERBOSE, "System configuration file (%s) does not exist.", SYSCONFIGARCLOC); #endif } if (conffile.empty()) { if (CreateDefaultConfigurationFile()) { if (!LoadConfigurationFile(DEFAULTCONFIG, !jfile.empty())) { logger.msg(WARNING, "User configuration file (%s) contains errors.", DEFAULTCONFIG); return; } } else logger.msg(INFO, "No configuration file could be loaded."); } else if (!Glib::file_test(conffile, Glib::FILE_TEST_IS_REGULAR)) { logger.msg(WARNING, "User configuration file (%s) does not exist or cannot be loaded.", conffile); return; } else if (!LoadConfigurationFile(conffile, !jfile.empty())) { logger.msg(WARNING, "User configuration file (%s) contains errors.", conffile); return; } // If no job list file have been initialized use the default. If the // job list file cannot be initialized this object is non-valid. if (joblistfile.empty() && !JobListFile(JOBLISTFILE)) return; if (!InitializeCredentials(initializeCredentials)) { return; } ok = true; } UserConfig::UserConfig(const long int& ptraddr) { *this = *((UserConfig*)ptraddr); } void UserConfig::ApplyToConfig(BaseConfig& ccfg) const { if (!credentialString.empty()) { ccfg.AddCredential(credentialString); } else { if (!proxyPath.empty()) ccfg.AddProxy(proxyPath); else { ccfg.AddCertificate(certificatePath); ccfg.AddPrivateKey(keyPath); } } ccfg.AddCADir(caCertificatesDirectory); if(!overlayfile.empty()) ccfg.GetOverlay(overlayfile); } bool UserConfig::Timeout(int newTimeout) { if (newTimeout > 0) { timeout = newTimeout; return true; } return false; } bool UserConfig::Verbosity(const std::string& newVerbosity) { LogLevel ll; if (istring_to_level(newVerbosity, ll)) { verbosity = newVerbosity; return true; } else { logger.msg(WARNING, "Unable to parse the specified verbosity (%s) to one of the allowed levels", newVerbosity); return false; } } bool UserConfig::JobListFile(const std::string& path) { joblistfile = path; return true; } bool UserConfig::JobListType(const std::string& type) { if (type != "XML" && type != "BDB" && type != "SQLITE") { logger.msg(WARNING, "Unsupported job list type '%s', using 'BDB'. Supported types are: BDB, SQLITE, XML.", type); joblisttype = "BDB"; return true; } joblisttype = type; return true; } bool UserConfig::Broker(const std::string& nameandarguments) { const std::size_t pos = nameandarguments.find(":"); if (pos != std::string::npos) // Arguments given in 'nameandarguments' broker = std::pair(nameandarguments.substr(0, pos), nameandarguments.substr(pos+1)); else broker = std::pair(nameandarguments, ""); return true; } bool UserConfig::InitializeCredentials(initializeCredentialsType initializeCredentials) { if(initializeCredentials == initializeCredentialsType::SkipCredentials) return true; bool res = true; bool require = ((initializeCredentials == initializeCredentialsType::RequireCredentials) || (initializeCredentials == initializeCredentialsType::SkipCARequireCredentials)); bool test = ((initializeCredentials == initializeCredentialsType::RequireCredentials) || (initializeCredentials == initializeCredentialsType::TryCredentials) || (initializeCredentials == initializeCredentialsType::SkipCARequireCredentials) || (initializeCredentials == initializeCredentialsType::SkipCATryCredentials)); bool noca = ((initializeCredentials == initializeCredentialsType::SkipCARequireCredentials) || (initializeCredentials == initializeCredentialsType::SkipCATryCredentials) || (initializeCredentials == initializeCredentialsType::SkipCANotTryCredentials)); const User user; #ifndef WIN32 std::string home_path = user.Home(); #else std::string home_path = Glib::get_home_dir(); #endif bool has_proxy = false; // Look for credentials. std::string proxy_path = GetEnv("X509_USER_PROXY"); if (!proxy_path.empty()) { proxyPath = proxy_path; file_test_status fts; if (test && ((fts = private_file_test(proxyPath, user)) != file_test_success)) { proxy_file_error_report(fts,require,proxyPath,logger); if(require) { //res = false; } proxyPath.clear(); } else { has_proxy = true; } } else if (!proxyPath.empty()) { file_test_status fts; if (test && ((fts = private_file_test(proxyPath, user)) != file_test_success)) { proxy_file_error_report(fts,require,proxyPath,logger); if(require) { //res = false; } proxyPath.clear(); } else { has_proxy = true; } } else { proxy_path = Glib::build_filename(Glib::get_tmp_dir(), std::string("x509up_u") + tostring(user.get_uid())); proxyPath = proxy_path; file_test_status fts; if (test && ((fts = private_file_test(proxyPath, user)) != file_test_success)) { proxy_file_error_report(fts,require,proxyPath,logger); if (require) { // TODO: Maybe this message should be printed only after checking for key/cert // This is not error yet because there may be key/credentials //res = false; } proxyPath.clear(); } else { has_proxy = true; } } // Should we really handle them in pairs std::string cert_path = GetEnv("X509_USER_CERT"); std::string key_path = GetEnv("X509_USER_KEY"); if (!cert_path.empty() && !key_path.empty()) { certificatePath = cert_path; file_test_status fts; if (test && ((fts = user_file_test(certificatePath, user)) != file_test_success)) { certificate_file_error_report(fts,!has_proxy,certificatePath,logger); if(!has_proxy) { res = false; } certificatePath.clear(); } keyPath = key_path; if (test && ((fts = private_file_test(keyPath, user)) != file_test_success)) { key_file_error_report(fts,!has_proxy,keyPath,logger); if(!has_proxy) { res = false; } keyPath.clear(); } } else if (!certificatePath.empty() && !keyPath.empty()) { file_test_status fts; if (test && ((fts = user_file_test(certificatePath, user)) != file_test_success)) { certificate_file_error_report(fts,!has_proxy,certificatePath,logger); if(!has_proxy) { res = false; } certificatePath.clear(); } if (test && ((fts = private_file_test(keyPath, user)) != file_test_success)) { key_file_error_report(fts,!has_proxy,keyPath,logger); if(!has_proxy) { res = false; } keyPath.clear(); } } else if (!certificatePath.empty() && (certificatePath.find(".p12") != std::string::npos) && keyPath.empty()) { //If only certificatePath provided, then it could be a pkcs12 file file_test_status fts; if (test && ((fts = user_file_test(certificatePath, user)) != file_test_success)) { certificate_file_error_report(fts,!has_proxy,certificatePath,logger); if(!has_proxy) { res = false; } certificatePath.clear(); } } else if (!cert_path.empty() && (cert_path.find(".p12") != std::string::npos) && key_path.empty()) { //If only cert_path provided, then it could be a pkcs12 file certificatePath = cert_path; file_test_status fts; if (test && ((fts = user_file_test(cert_path, user)) != file_test_success)) { certificate_file_error_report(fts,!has_proxy,cert_path,logger); if(!has_proxy) { res = false; } cert_path.clear(); } } else if (test) { // Guessing starts here // First option is also main default std::list search_paths; search_paths.push_back(home_path+G_DIR_SEPARATOR_S+".arc"+G_DIR_SEPARATOR_S); search_paths.push_back(home_path+G_DIR_SEPARATOR_S+".globus"+G_DIR_SEPARATOR_S); search_paths.push_back(ArcLocation::Get()+G_DIR_SEPARATOR_S+"etc"+G_DIR_SEPARATOR_S+"arc"+G_DIR_SEPARATOR_S); // TODO: is it really safe to take credentials from ./ ? NOOOO search_paths.push_back(Glib::get_current_dir() + G_DIR_SEPARATOR_S); std::string tried_paths = ""; std::list::const_iterator it = search_paths.begin(); for (; it != search_paths.end(); ++it) { cert_path = (*it) + "usercert.pem"; key_path = (*it) + "userkey.pem"; file_test_status fts1 = user_file_test(cert_path, user); file_test_status fts2 = private_file_test(key_path, user); if (fts1 == file_test_success && fts2 == file_test_success) { certificatePath = cert_path; keyPath = key_path; break; } else if (fts1 != file_test_success && (fts1 != file_test_missing || fts2 == file_test_success)) { certificate_file_error_report(fts1,false,cert_path,logger); break; } else if (fts2 != file_test_success && (fts2 != file_test_missing || fts1 == file_test_success)) { key_file_error_report(fts2,false,key_path,logger); break; } if (tried_paths != "") { tried_paths += ", "; } tried_paths += "'" + (*it) + "'"; } if (it == search_paths.end() && !has_proxy) { logger.msg(WARNING, "Certificate and key ('%s' and '%s') not found in any of the paths: %s", "usercert.pem", "userkey.pem", tried_paths); logger.msg(WARNING, "If the proxy or certificate/key does exist, you can manually specify the locations via environment variables " "'%s'/'%s' or '%s', or the '%s'/'%s' or '%s' attributes in the client configuration file (e.g. '%s')", "X509_USER_CERT", "X509_USER_KEY", "X509_USER_PROXY", "certificatepath", "proxypath", "keypath", "~/.arc/client.conf"); } if((certificatePath.empty() || keyPath.empty()) && !has_proxy) { res = false; } } if(!noca) { std::string ca_dir = GetEnv("X509_CERT_DIR"); //std::cerr<<"-- ca_dir = "< serviceStrings; tokenize(alias.Child(), serviceStrings, " \t"); for (std::list::iterator it = serviceStrings.begin(); it != serviceStrings.end(); it++) { std::list services = GetServices(*it); if (services.empty()) { ConfigEndpoint service = ServiceFromLegacyString(*it); if (service) { services.push_back(service); } } groupMap[group].insert(groupMap[group].end(), services.begin(), services.end()); } alias.Child().Destroy(); } alias.Destroy(); } if (ini["common"]) { XMLNode common = ini["common"]; HANDLESTRATT("verbosity", Verbosity) if (!verbosity.empty()) logger.setThreshold(Arc::istring_to_level(verbosity)); if (!ignoreJobListFile) { HANDLESTRATT("joblist", JobListFile) } HANDLESTRATT("joblisttype", JobListType) if (common["timeout"]) { if (!stringto(common["timeout"], timeout)) logger.msg(WARNING, "The value of the timeout attribute in the configuration file (%s) was only partially parsed", conffile); common["timeout"].Destroy(); if (common["timeout"]) { logger.msg(WARNING, "Multiple %s attributes in configuration file (%s)", "timeout", conffile); while (common["timeout"]) common["timeout"].Destroy(); } } if (common["brokername"]) { broker = std::pair(common["brokername"], common["brokerarguments"] ? common["brokerarguments"] : ""); common["brokername"].Destroy(); if (common["brokername"]) { logger.msg(WARNING, "Multiple %s attributes in configuration file (%s)", "brokername", conffile); while (common["brokername"]) common["brokername"].Destroy(); } if (common["brokerarguments"]) { common["brokerarguments"].Destroy(); if (common["brokerarguments"]) { logger.msg(WARNING, "Multiple %s attributes in configuration file (%s)", "brokerarguments", conffile); while (common["brokerarguments"]) common["brokerarguments"].Destroy(); } } } // This block must be executed after the 'brokername' block. if (common["brokerarguments"]) { logger.msg(WARNING, "The brokerarguments attribute can only be used in conjunction with the brokername attribute"); while (common["brokerarguments"]) common["brokerarguments"].Destroy(); } HANDLESTRATT("vomsespath", VOMSESPath) HANDLESTRATT("username", UserName) HANDLESTRATT("password", Password) HANDLESTRATT("proxypath", ProxyPath) HANDLESTRATT("certificatepath", CertificatePath) HANDLESTRATT("keypath", KeyPath) HANDLESTRATT("keypassword", KeyPassword) if (common["keysize"]) { if (!stringto(ini["common"]["keysize"], keySize)) logger.msg(WARNING, "The value of the keysize attribute in the configuration file (%s) was only partially parsed", conffile); common["keysize"].Destroy(); if (common["keysize"]) { logger.msg(WARNING, "Multiple %s attributes in configuration file (%s)", "keysize", conffile); while (common["keysize"]) common["keysize"].Destroy(); } } HANDLESTRATT("cacertificatepath", CACertificatePath) HANDLESTRATT("cacertificatesdirectory", CACertificatesDirectory) if (common["certificatelifetime"]) { certificateLifeTime = Period((std::string)common["certificatelifetime"]); common["certificatelifetime"].Destroy(); if (common["certificatelifetime"]) { logger.msg(WARNING, "Multiple %s attributes in configuration file (%s)", "certificatelifetime", conffile); while (common["certificatelifetime"]) common["certificatelifetime"].Destroy(); } } if (common["slcs"]) { slcs = URL((std::string)common["slcs"]); if (!slcs) { logger.msg(WARNING, "Could not convert the slcs attribute value (%s) to an URL instance in configuration file (%s)", (std::string)common["slcs"], conffile); slcs = URL(); } common["slcs"].Destroy(); if (common["slcs"]) { logger.msg(WARNING, "Multiple %s attributes in configuration file (%s)", "slcs", conffile); while (common["slcs"]) common["slcs"].Destroy(); } } HANDLESTRATT("jobdownloaddirectory", JobDownloadDirectory) HANDLESTRATT("storedirectory", StoreDirectory) HANDLESTRATT("idpname", IdPName) HANDLESTRATT("infointerface", InfoInterface) HANDLESTRATT("submissioninterface", SubmissionInterface) // Legacy defaultservices support if (common["defaultservices"]) { std::list defaultServicesStr; tokenize(common["defaultservices"], defaultServicesStr, " \t"); for (std::list::const_iterator it = defaultServicesStr.begin(); it != defaultServicesStr.end(); it++) { ConfigEndpoint service = ServiceFromLegacyString(*it); if (service) defaultServices.push_back(service); } common["defaultservices"].Destroy(); if (common["defaultservices"]) { logger.msg(WARNING, "Multiple %s attributes in configuration file (%s)", "defaultservices", conffile); while (common["defaultservices"]) common["defaultservices"].Destroy(); } } while (common["rejectdiscovery"]) { rejectDiscoveryURLs.push_back((std::string)common["rejectdiscovery"]); common["rejectdiscovery"].Destroy(); } while (common["rejectmanagement"]) { rejectManagementURLs.push_back((std::string)common["rejectmanagement"]); common["rejectmanagement"].Destroy(); } HANDLESTRATT("overlayfile", OverlayFile) if(!overlayfile.empty()) if (!Glib::file_test(overlayfile, Glib::FILE_TEST_IS_REGULAR)) logger.msg(WARNING, "Specified overlay file (%s) does not exist.", overlayfile); while (common.Child()) { logger.msg(WARNING, "Unknown attribute %s in common section of configuration file (%s), ignoring it", common.Child().Name(), conffile); common.Child().Destroy(); } common.Destroy(); } const std::string registrySectionPrefix = "registry/"; const std::string computingSectionPrefix = "computing/"; while (XMLNode section = ini.Child()) { std::string sectionName = section.Name(); if (sectionName.find(registrySectionPrefix) == 0 || sectionName.find(computingSectionPrefix) == 0) { ConfigEndpoint service(section["url"]); std::string alias; if (sectionName.find(registrySectionPrefix) == 0) { alias = sectionName.substr(registrySectionPrefix.length()); service.type = ConfigEndpoint::REGISTRY; service.InterfaceName = (std::string)section["registryinterface"]; } else { alias = sectionName.substr(computingSectionPrefix.length()); service.type = ConfigEndpoint::COMPUTINGINFO; service.InterfaceName = (std::string)section["infointerface"]; if (service.InterfaceName.empty()) { service.InterfaceName = InfoInterface(); } service.RequestedSubmissionInterfaceName = (std::string)section["submissioninterface"]; if (service.RequestedSubmissionInterfaceName.empty()) { service.RequestedSubmissionInterfaceName = SubmissionInterface(); } } allServices[alias] = service; if (section["default"] && section["default"] != "no") { defaultServices.push_back(service); } while (section["group"]) { groupMap[section["group"]].push_back(service); section["group"].Destroy(); } } else { logger.msg(INFO, "Unknown section %s, ignoring it", sectionName); } section.Destroy(); } logger.msg(INFO, "Configuration (%s) loaded", conffile); } else logger.msg(WARNING, "Could not load configuration (%s)", conffile); } return true; } bool UserConfig::SaveToFile(const std::string& filename) const { std::ofstream file(filename.c_str(), std::ios::out); if (!file) return false; file << "[common]" << std::endl; if (!proxyPath.empty()) file << "proxypath = " << proxyPath << std::endl; if (!certificatePath.empty()) file << "certificatepath = " << certificatePath << std::endl; if (!keyPath.empty()) file << "keypath = " << keyPath << std::endl; if (!keyPassword.empty()) file << "keypassword = " << keyPassword << std::endl; if (keySize > 0) file << "keysize = " << keySize << std::endl; if (!caCertificatePath.empty()) file << "cacertificatepath = " << caCertificatePath << std::endl; if (!caCertificatesDirectory.empty()) file << "cacertificatesdirectory = " << caCertificatesDirectory << std::endl; if (certificateLifeTime > 0) file << "certificatelifetime = " << certificateLifeTime << std::endl; if (slcs) file << "slcs = " << slcs.fullstr() << std::endl; if (!rejectDiscoveryURLs.empty()) { for (std::list::const_iterator it = rejectDiscoveryURLs.begin(); it != rejectDiscoveryURLs.end(); it++) { file << "rejectdiscovery = " << (*it) << std::endl; } } if (!rejectManagementURLs.empty()) { for (std::list::const_iterator it = rejectManagementURLs.begin(); it != rejectManagementURLs.end(); it++) { file << "rejectmanagement = " << (*it) << std::endl; } } if (!verbosity.empty()) file << "verbosity = " << verbosity << std::endl; if (!joblistfile.empty()) file << "joblist = " << joblistfile << std::endl; if (timeout > 0) file << "timeout = " << timeout << std::endl; if (!broker.first.empty()) { file << "brokername = " << broker.first << std::endl; if (!broker.second.empty()) file << "brokerarguments = " << broker.second << std::endl; } if (!vomsesPath.empty()) file << "vomsespath = " << vomsesPath << std::endl; if (!username.empty()) file << "username = " << username << std::endl; if (!password.empty()) file << "password = " << password << std::endl; if (!storeDirectory.empty()) file << "storedirectory = " << storeDirectory << std::endl; if (!idPName.empty()) file << "idpname = " << idPName << std::endl; if (!overlayfile.empty()) file << "overlayfile = " << overlayfile << std::endl; if (!submissioninterface.empty()) file << "submissioninterface = " << submissioninterface << std::endl; if (!infointerface.empty()) file << "infointerface = " << infointerface << std::endl; for (std::map::const_iterator it = allServices.begin(); it != allServices.end(); it++) { if (it->second.type == ConfigEndpoint::REGISTRY) { file << "[registry/" << it->first << "]" << std::endl; file << "url = " << it->second.URLString << std::endl; if (!it->second.InterfaceName.empty()) { file << "registryinterface = " << it->second.InterfaceName << std::endl; } } else { file << "[computing/" << it->first << "]" << std::endl; file << "url = " << it->second.URLString << std::endl; if (!it->second.InterfaceName.empty()) { file << "infointerface = " << it->second.InterfaceName << std::endl; } if (!it->second.RequestedSubmissionInterfaceName.empty()) { file << "submissioninterface = " << it->second.RequestedSubmissionInterfaceName << std::endl; } } if (std::find(defaultServices.begin(), defaultServices.end(), it->second) != defaultServices.end()) { file << "default = yes" << std::endl; } for (std::map >::const_iterator git = groupMap.begin(); git != groupMap.end(); git++) { if (std::find(git->second.begin(), git->second.end(), it->second) != git->second.end()) { file << "group = " << git->first << std::endl; } } } logger.msg(INFO, "UserConfiguration saved to file (%s)", filename); return true; } bool UserConfig::CreateDefaultConfigurationFile() const { // If the default configuration file does not exist, copy an example file. if (!Glib::file_test(DEFAULTCONFIG, Glib::FILE_TEST_EXISTS)) { // Check if the parent directory exist. if (!Glib::file_test(ARCUSERDIRECTORY, Glib::FILE_TEST_EXISTS)) { // Create directory. if (!makeDir(ARCUSERDIRECTORY)) { logger.msg(WARNING, "Unable to create %s directory.", ARCUSERDIRECTORY); return false; } } if (dir_test(ARCUSERDIRECTORY)) { if (Glib::file_test(EXAMPLECONFIG, Glib::FILE_TEST_IS_REGULAR)) { // Destination: Get basename, remove example prefix and add .arc directory. if (copyFile(EXAMPLECONFIG, DEFAULTCONFIG)) logger.msg(VERBOSE, "Configuration example file created (%s)", DEFAULTCONFIG); else logger.msg(INFO, "Unable to copy example configuration from existing configuration (%s)", EXAMPLECONFIG); return false; } else { logger.msg(INFO, "Cannot copy example configuration (%s), it is not a regular file", EXAMPLECONFIG); return false; } } else { logger.msg(INFO, "Example configuration (%s) not created.", DEFAULTCONFIG); return false; } } else if (!Glib::file_test(DEFAULTCONFIG, Glib::FILE_TEST_IS_REGULAR)) { logger.msg(INFO, "The default configuration file (%s) is not a regular file.", DEFAULTCONFIG); return false; } return true; } void UserConfig::setDefaults() { timeout = DEFAULT_TIMEOUT; broker.first = DEFAULT_BROKER; broker.second = ""; } bool UserConfig::makeDir(const std::string& path) { bool dirCreated = false; dirCreated = DirCreate(path, 0755); if (dirCreated) logger.msg(INFO, "%s directory created", path); else logger.msg(WARNING, "Failed to create directory %s", path); return dirCreated; } bool UserConfig::copyFile(const std::string& source, const std::string& destination) { /* TODO: Make FileUtils function to this #ifdef HAVE_GIOMM try { return Gio::File::create_for_path(source)->copy(Gio::File::create_for_path(destination), Gio::FILE_COPY_NONE); } catch (Gio::Error e) { logger.msg(WARNING, "%s", (std::string)e.what()); return false; } #else */ std::ifstream ifsSource(source.c_str(), std::ios::in | std::ios::binary); if (!ifsSource) return false; std::ofstream ofsDestination(destination.c_str(), std::ios::out | std::ios::binary); if (!ofsDestination) return false; int bytesRead = -1; char buff[1024]; while (bytesRead != 0) { ifsSource.read((char*)buff, 1024); bytesRead = ifsSource.gcount(); ofsDestination.write((char*)buff, bytesRead); } return true; //#endif } bool UserConfig::UtilsDirPath(const std::string& dir) { if (!DirCreate(dir, 0700, true)) logger.msg(WARNING, "Failed to create directory %s", dir); else utilsdir = dir; return true; } std::list UserConfig::FilterServices(const std::list& unfilteredServices, ConfigEndpoint::Type type) { std::list services; for (std::list::const_iterator it = unfilteredServices.begin(); it != unfilteredServices.end(); it++) { if (type == ConfigEndpoint::ANY || type == it->type) { services.push_back(*it); } } return services; } std::list UserConfig::GetDefaultServices(ConfigEndpoint::Type type) { return FilterServices(defaultServices, type); } ConfigEndpoint UserConfig::GetService(const std::string& alias) { return allServices[alias]; } std::list UserConfig::GetServices(const std::string& groupOrAlias, ConfigEndpoint::Type type) { std::list services = GetServicesInGroup(groupOrAlias); if (services.empty()) { ConfigEndpoint service = GetService(groupOrAlias); if (service) services.push_back(service); } return FilterServices(services, type); } std::list UserConfig::GetServicesInGroup(const std::string& group, ConfigEndpoint::Type type) { return FilterServices(groupMap[group], type); } ConfigEndpoint UserConfig::ServiceFromLegacyString(std::string type_flavour_url) { ConfigEndpoint service; size_t pos = type_flavour_url.find(":"); if (pos != type_flavour_url.npos) { std::string type = type_flavour_url.substr(0, pos); std::string flavour_url = type_flavour_url.substr(pos + 1); if (type == "index") { service.type = ConfigEndpoint::REGISTRY; } else if (type == "computing") { service.type = ConfigEndpoint::COMPUTINGINFO; } else { return service; } pos = flavour_url.find(":"); if (pos != flavour_url.npos) { std::string flavour = flavour_url.substr(0, pos); std::string url = flavour_url.substr(pos + 1); if (service.type == ConfigEndpoint::REGISTRY) { std::string registryinterface = "org.nordugrid.emir"; if (flavour == "ARC0") registryinterface = "org.nordugrid.ldapegiis"; service.InterfaceName = registryinterface; } else if (service.type == ConfigEndpoint::COMPUTINGINFO) { std::string infointerface = "org.nordugrid.ldapglue2"; if (flavour == "ARC0") infointerface = "org.nordugrid.ldapng"; if (flavour == "ARC1") infointerface = "org.nordugrid.wsrfglue2"; if (flavour == "EMIES") infointerface = "org.ogf.glue.emies.resourceinfo"; if (flavour == "CREAM") infointerface = "org.nordugrid.ldapglue1"; service.InterfaceName = infointerface; } service.URLString = url; } } return service; } static std::string cert_file_fix(const std::string& old_file,std::string& new_file) { struct stat st; if(old_file.empty()) return old_file; if(::stat(old_file.c_str(),&st) != 0) return old_file; // No getuid on win32 #ifndef WIN32 if(::getuid() == st.st_uid) return old_file; #endif std::string tmpname = Glib::build_filename(Glib::get_tmp_dir(), "arccred.XXXXXX"); if (!TmpFileCreate(tmpname, "")) return old_file; if (!FileCopy(old_file, tmpname)) { unlink(tmpname.c_str()); return old_file; } new_file = tmpname; return new_file; } #define GET_OLD_VAR(name,var,set) { \ var = GetEnv(name,set); \ } #define SET_OLD_VAR(name,var,set) { \ if(set) { \ SetEnv(name,var,true); \ } else { \ UnsetEnv(name); \ }; \ } #define SET_NEW_VAR(name,val) { \ std::string v = val; \ if(v.empty()) { \ UnsetEnv(name); \ } else { \ SetEnv(name,v,true); \ }; \ } #define SET_NEW_VAR_FILE(name,val,fname) { \ std::string v = val; \ if(v.empty()) { \ UnsetEnv(name); \ } else { \ v = cert_file_fix(v,fname); \ SetEnv(name,v,true); \ }; \ } CertEnvLocker::CertEnvLocker(const UserConfig& cfg) { EnvLockAcquire(); GET_OLD_VAR("X509_USER_KEY",x509_user_key_old,x509_user_key_set); GET_OLD_VAR("X509_USER_CERT",x509_user_cert_old,x509_user_cert_set); GET_OLD_VAR("X509_USER_PROXY",x509_user_proxy_old,x509_user_proxy_set); GET_OLD_VAR("X509_CERT_DIR",ca_cert_dir_old,ca_cert_dir_set); SET_NEW_VAR_FILE("X509_USER_KEY",cfg.KeyPath(),x509_user_key_new); SET_NEW_VAR_FILE("X509_USER_CERT",cfg.CertificatePath(),x509_user_cert_new); SET_NEW_VAR_FILE("X509_USER_PROXY",cfg.ProxyPath(),x509_user_proxy_new); SET_NEW_VAR("X509_CERT_DIR",cfg.CACertificatesDirectory()); EnvLockWrap(false); } CertEnvLocker::~CertEnvLocker(void) { EnvLockUnwrap(false); SET_OLD_VAR("X509_CERT_DIR",ca_cert_dir_old,ca_cert_dir_set); SET_OLD_VAR("X509_USER_PROXY",x509_user_proxy_old,x509_user_proxy_set); SET_OLD_VAR("X509_USER_CERT",x509_user_cert_old,x509_user_cert_set); SET_OLD_VAR("X509_USER_KEY",x509_user_key_old,x509_user_key_set); if(!x509_user_key_new.empty()) ::unlink(x509_user_key_new.c_str()); if(!x509_user_cert_new.empty()) ::unlink(x509_user_cert_new.c_str()); if(!x509_user_proxy_new.empty()) ::unlink(x509_user_proxy_new.c_str()); EnvLockRelease(); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/IniConfig.cpp0000644000000000000000000000012411743732400023317 xustar000000000000000027 mtime=1334818048.269851 27 atime=1513200574.919706 30 ctime=1513200658.866733625 nordugrid-arc-5.4.2/src/hed/libs/common/IniConfig.cpp0000644000175000002070000000457711743732400023401 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "IniConfig.h" namespace Arc { IniConfig::IniConfig(const std::string& filename) : XMLNode(NS(), "IniConfig") { std::ifstream is(filename.c_str()); std::string line; XMLNode section; while (getline(is, line)) { line = trim(line, " \t\r\n"); if (line.empty() || line[0] == '#') continue; if (line[0] == '[' && line[line.size() - 1] == ']') { std::string sectionname = trim(line.substr(1, line.size() - 2), " \t"); section = (sectionname == "common" && Get("common") ? Get("common") : NewChild(sectionname)); continue; } std::string::size_type sep = line.find('='); if (sep == std::string::npos) { continue; } std::string attr = trim(line.substr(0, sep), " \t"); std::string value = trim(line.substr(sep + 1), " \t"); if (!section) section = NewChild("common"); section.NewChild(attr) = value; } } IniConfig::~IniConfig() {} bool IniConfig::Evaluate(Config &cfg) { std::string profilename = (*this)["common"]["profile"]; if (profilename.empty()) { profilename = "general"; } if (Glib::file_test(profilename, Glib::FILE_TEST_EXISTS) == false) { // If profilename does not contain directory separators and do not have xml suffix, then look for the profile in ARC profile directory. if ((profilename.find(G_DIR_SEPARATOR_S) == std::string::npos) && (profilename.substr(profilename.size()>=4?profilename.size()-4:0) != ".xml")) { const std::string pkgprofilename = ArcLocation::Get() + G_DIR_SEPARATOR_S PKGDATASUBDIR G_DIR_SEPARATOR_S "profiles" G_DIR_SEPARATOR_S + profilename + ".xml"; if (Glib::file_test(pkgprofilename, Glib::FILE_TEST_EXISTS)) { profilename = pkgprofilename; } } else { std::cerr << profilename << " does not exist" << std::endl; return false; } } Profile profile(profilename); profile.Evaluate(cfg, *this); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/XMLNode.cpp0000644000000000000000000000012412223053526022717 xustar000000000000000027 mtime=1380734806.964267 27 atime=1513200574.896706 30 ctime=1513200658.863733589 nordugrid-arc-5.4.2/src/hed/libs/common/XMLNode.cpp0000644000175000002070000013042512223053526022771 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "XMLNode.h" #include "Utils.h" namespace Arc { // prefix == NULL means node should have no namespace // for default namespace prefix == "" is used static void SetName(xmlNodePtr node, const char *name, const char *prefix) { if (!node) return; xmlNsPtr ns = NULL; if(prefix) { // libxml expect empty prefix to be NULL, not empty string. ns = xmlSearchNs(node->doc, node, (const xmlChar*)(prefix[0]?prefix:NULL)); } // ns element is located at same place in Node and Attr elements node->ns = ns; xmlNodeSetName(node, (const xmlChar*)name); } // prefix == NULL means node should have no namespace // for default namespace prefix == "" is used static void SetPrefix(xmlNodePtr node, const char *prefix, int recursion) { if (!node) return; if ((node->type != XML_ELEMENT_NODE) && (node->type != XML_ATTRIBUTE_NODE)) return; if(!prefix || (node->type == XML_ATTRIBUTE_NODE)) { // Request to remove namespace or attribute - attributes have no namespaces node->ns = NULL; } else { xmlNsPtr ns = xmlSearchNs(node->doc, node, (const xmlChar*)(prefix[0]?prefix:NULL)); node->ns = ns; } if (recursion == 0) return; for (xmlNodePtr node_ = node->children; node_; node_ = node_->next) { SetPrefix(node_, prefix, (recursion>0)?(recursion-1):(-1)); } } static xmlNsPtr GetNamespace(xmlNodePtr node) { if (node == NULL) return NULL; xmlNsPtr ns_ = NULL; if (node->type == XML_ELEMENT_NODE) { ns_ = node->ns; } else if (node->type == XML_ATTRIBUTE_NODE) { ns_ = ((xmlAttrPtr)node)->ns; }; if (ns_) return ns_; if (node->parent) return GetNamespace(node->parent); return NULL; } static bool MatchXMLName(xmlNodePtr node1, xmlNodePtr node2) { if (node1 == NULL) return false; if (node2 == NULL) return false; if (strcmp((char*)(node1->name), (char*)(node2->name)) != 0) return false; if (node1->type != node2->type) return false; if ((node1->type != XML_ELEMENT_NODE) && (node1->type != XML_ATTRIBUTE_NODE)) return true; xmlNsPtr ns1 = GetNamespace(node1); xmlNsPtr ns2 = GetNamespace(node2); if (ns1 == ns2) return true; if (ns1 && ns2) { if(ns1->href && ns2->href) { return (strcmp((const char*)(ns1->href),(const char*)(ns2->href)) == 0); }; }; return false; } static bool MatchXMLName(xmlNodePtr node, const char *name) { if (node == NULL) return false; if (name == NULL) return false; const char *name_ = strrchr(name, ':'); if (name_ == NULL) { name_ = name; } else { ++name_; }; if (strcmp(name_, (char*)(node->name)) != 0) return false; if (name_ == name) return true; xmlNsPtr ns_ = GetNamespace(node); std::string ns(name, name_ - name - 1); if (ns_ == NULL) return ns.empty(); if (ns.find(':') != std::string::npos) { // URI if (ns_->href == NULL) return false; return (ns == (const char*)(ns_->href)); } else { // prefix if (ns_->prefix == NULL) return ns.empty(); return (ns == (const char*)(ns_->prefix)); } } static bool MatchXMLNamespace(xmlNodePtr node1, xmlNodePtr node2) { if (node1 == NULL) return false; if (node2 == NULL) return false; if (node1->type != node2->type) return false; if ((node1->type != XML_ELEMENT_NODE) && (node1->type != XML_ATTRIBUTE_NODE)) return false; xmlNsPtr ns1 = GetNamespace(node1); xmlNsPtr ns2 = GetNamespace(node2); if (ns1 == ns2) return true; if (ns1 && ns2) { if(ns1->href && ns2->href) { return (strcmp((const char*)(ns1->href),(const char*)(ns2->href)) == 0); }; }; return false; } static bool MatchXMLNamespace(xmlNodePtr node, const char *uri) { if (node == NULL) return false; if (uri == NULL) return false; xmlNsPtr ns_ = GetNamespace(node); if ((ns_ == NULL) || (ns_->href == NULL)) return (uri[0] == 0); return (strcmp(uri, (const char*)(ns_->href)) == 0); } bool MatchXMLName(const XMLNode& node1, const XMLNode& node2) { return MatchXMLName(node1.node_, node2.node_); } bool MatchXMLName(const XMLNode& node, const char *name) { return MatchXMLName(node.node_, name); } bool MatchXMLName(const XMLNode& node, const std::string& name) { return MatchXMLName(node.node_, name.c_str()); } bool MatchXMLNamespace(const XMLNode& node1, const XMLNode& node2) { return MatchXMLNamespace(node1.node_, node2.node_); } bool MatchXMLNamespace(const XMLNode& node, const char *uri) { return MatchXMLNamespace(node.node_, uri); } bool MatchXMLNamespace(const XMLNode& node, const std::string& uri) { return MatchXMLNamespace(node.node_, uri.c_str()); } static void ReplaceNamespace(xmlNsPtr ns, xmlNodePtr node, xmlNsPtr new_ns) { if (node->type == XML_ELEMENT_NODE) { if (node->ns == ns) node->ns = new_ns; for (xmlAttrPtr node_ = node->properties; node_; node_ = node_->next) ReplaceNamespace(ns, (xmlNodePtr)node_, new_ns); for (xmlNodePtr node_ = node->children; node_; node_ = node_->next) ReplaceNamespace(ns, node_, new_ns); } else if (node->type == XML_ATTRIBUTE_NODE) { if (((xmlAttrPtr)node)->ns == ns) ((xmlAttrPtr)node)->ns = new_ns; } else return; } static void ReassignNamespace(xmlNsPtr ns, xmlNodePtr node,bool keep = false,int recursion = -1) { if(recursion >= 0) keep = true; xmlNsPtr ns_cur = node->nsDef; xmlNsPtr ns_prev = NULL; for (; ns_cur;) { if (ns == ns_cur) { ns_prev = ns_cur; ns_cur = ns_cur->next; continue; } if (ns->href && ns_cur->href && (xmlStrcmp(ns->href, ns_cur->href) == 0)) { ReplaceNamespace(ns_cur, node, ns); if(!keep) { // Unlinking namespace from tree if (ns_prev) ns_prev->next = ns_cur->next; else node->nsDef = ns_cur->next; xmlNsPtr ns_tmp = ns_cur; ns_cur = ns_cur->next; xmlFreeNs(ns_tmp); } else { ns_cur = ns_cur->next; } continue; } ns_prev = ns_cur; ns_cur = ns_cur->next; } if(recursion == 0) return; if(recursion > 0) --recursion; for (xmlNodePtr node_ = node->children; node_; node_ = node_->next) { ReassignNamespace(ns, node_, keep, recursion); } } // Adds new 'namespaces' to namespace definitions of 'node_'. // The 'node_' and its children are converted to new prefixes. // If keep == false all existing namespaces with same href // defined in 'node_' or children are removed. // 'recursion' limits how deep to follow children nodes. 0 for // 'node_' only. -1 for unlimited depth. If 'recursion' is set // to >=0 then existing namespaces always kept disregarding // value of 'keep'. Otherwise some XML node would be left // without valid namespaces. static void SetNamespaces(const NS& namespaces, xmlNodePtr node_,bool keep = false,int recursion = -1) { for (NS::const_iterator ns = namespaces.begin(); ns != namespaces.end(); ++ns) { // First check maybe this namespace is already defined xmlNsPtr ns_ = xmlSearchNsByHref(node_->doc, node_, (const xmlChar*)(ns->second.c_str())); if (ns_) { const char *prefix = (const char*)(ns_->prefix); if (!prefix) prefix = ""; if (ns->first == prefix) { // Same namespace with same prefix - doing nothing } else { // Change to new prefix ns_ = NULL; } } if (!ns_) { // New namespace needed // If the namespace's name is defined then pass it to the libxml function else set the value as default namespace ns_ = xmlNewNs(node_, (const xmlChar*)(ns->second.c_str()), ns->first.empty() ? NULL : (const xmlChar*)(ns->first.c_str())); if (ns_ == NULL) // There is already namespace with same prefix (or some other error) // TODO: optional change of prefix return; } // Go through all children removing same namespaces and reassigning elements to this one. ReassignNamespace(ns_, node_, keep, recursion); } } static void GetNamespaces(NS& namespaces, xmlNodePtr node_) { if (node_ == NULL) return; if (node_->type != XML_ELEMENT_NODE) return; // TODO: Check for duplicate prefixes xmlNsPtr ns = node_->nsDef; for (; ns; ns = ns->next) { std::string prefix = ns->prefix ? (char*)(ns->prefix) : ""; if (ns->href) { if (namespaces[prefix].empty()) namespaces[prefix] = (char*)(ns->href); } } GetNamespaces(namespaces, node_->parent); } // Finds all namespaces defined in XML subtree specified by node_. static void CollectLocalNamespaces(xmlNodePtr node_, std::map& localns) { if (node_ == NULL) return; if (node_->type != XML_ELEMENT_NODE) return; xmlNodePtr node = node_; for(;;) { if(node->type == XML_ELEMENT_NODE) { for(xmlNsPtr ns = node->nsDef; ns ; ns = ns->next) { localns[ns] = NULL; } } // 1. go down if(node->children) { node = node->children; continue; } // 2. if impossible go next if(node->next) { node = node->next; continue; } // 3. if impossible go up till next exists and then to next for(;;) { if((node == node_) || (!node)) return; node = node->parent; if((node == node_) || (!node)) return; if(node->next) { node = node->next; break; } } } } // Finds all namespaces referenced in XML subtree specified by node_ which // are not defined there. static void CollectExternalNamespaces(xmlNodePtr node_, std::map& extns) { if (node_ == NULL) return; if (node_->type != XML_ELEMENT_NODE) return; std::map localns; xmlNodePtr node = node_; for(;;) { if(node->type == XML_ELEMENT_NODE) { for(xmlNsPtr ns = node->nsDef; ns ; ns = ns->next) { localns[ns] = NULL; } // Look for refered namespaces if(node->ns) { if(localns.find(node->ns) == localns.end()) extns[node->ns] = NULL; } for(xmlAttrPtr attr = node->properties; attr ; attr = attr->next) { if(attr->ns) { if(localns.find(attr->ns) == localns.end()) extns[attr->ns] = NULL; } } } // 1. go down if(node->children) { node = node->children; continue; } // 2. if impossible go next if(node->next) { node = node->next; continue; } // 3. if impossible go up till next exists and then to next for(;;) { if((node == node_) || (!node)) return; node = node->parent; if((node == node_) || (!node)) return; if(node->next) { node = node->next; break; } } } } static void AdjustNamespace(xmlNodePtr node_, xmlNsPtr& ns, std::map& localns, std::map& newns) { if(!ns) return; if(localns.find(ns) == localns.end()) { std::map::iterator ins = newns.find(ns); if(ins == newns.end()) { xmlNsPtr nns = xmlNewNs(node_, ns->href, ns->prefix); newns[ns] = nns; ns = nns; } else { // already copied ns = ins->second; } } } static void LocalizeNamespaces(xmlNodePtr node_) { if (node_ == NULL) return; if (node_->type != XML_ELEMENT_NODE) return; // First collect all locally defined namespaces std::map localns; CollectLocalNamespaces(node_,localns); // Identify referednamespaces and make copy if // defined externally std::map newns; xmlNodePtr node = node_; for(;;) { if(node->type == XML_ELEMENT_NODE) { AdjustNamespace(node_, node->ns, localns, newns); for(xmlAttrPtr attr = node->properties; attr ; attr = attr->next) { AdjustNamespace(node_, attr->ns, localns, newns); } } // 1. go down if(node->children) { node = node->children; continue; } // 2. if impossible go next if(node->next) { node = node->next; continue; } // 3. if impossible go up till next exists and then to next for(;;) { if((node == node_) || (!node)) return; node = node->parent; if((node == node_) || (!node)) return; if(node->next) { node = node->next; break; } } } } XMLNode::XMLNode(const std::string& xml) : node_(NULL), is_owner_(false), is_temporary_(false) { //xmlDocPtr doc = xmlParseMemory((char*)(xml.c_str()), xml.length()); xmlDocPtr doc = xmlReadMemory(xml.c_str(),xml.length(),NULL,NULL, XML_PARSE_NODICT|XML_PARSE_NOERROR|XML_PARSE_NOWARNING); if (!doc) return; xmlNodePtr p = doc->children; for (; p; p = p->next) { if (p->type == XML_ELEMENT_NODE) break; } if (!p) { xmlFreeDoc(doc); return; } node_ = p; is_owner_ = true; } XMLNode::XMLNode(const char *xml, int len) : node_(NULL), is_owner_(false), is_temporary_(false) { if (!xml) return; if (len == -1) len = strlen(xml); //xmlDocPtr doc = xmlParseMemory((char*)xml, len); xmlDocPtr doc = xmlReadMemory(xml,len,NULL,NULL, XML_PARSE_NODICT|XML_PARSE_NOERROR|XML_PARSE_NOWARNING); if (!doc) return; xmlNodePtr p = doc->children; for (; p; p = p->next) { if (p->type == XML_ELEMENT_NODE) break; } if (!p) { xmlFreeDoc(doc); return; } node_ = p; is_owner_ = true; } XMLNode::XMLNode(long ptr_addr) : node_(NULL), is_owner_(false), is_temporary_(false) { XMLNode *other = (XMLNode *)ptr_addr; (*other).New((*this)); } XMLNode::XMLNode(const NS& ns, const char *name) : node_(NULL), is_owner_(false), is_temporary_(false) { xmlDocPtr doc = xmlNewDoc((const xmlChar*)"1.0"); if (!doc) return; if (name == NULL) name = ""; const char *name_ = strchr(name, ':'); std::string node_ns_; if (name_ != NULL) { node_ns_.assign(name, name_ - name); ++name_; } else { name_ = name; } xmlNodePtr new_node = xmlNewNode(NULL, (const xmlChar*)name_); if (new_node == NULL) { xmlFreeDoc(doc); return; } xmlDocSetRootElement(doc, new_node); node_ = new_node; is_owner_ = true; SetNamespaces(ns, node_); node_->ns = xmlSearchNs(node_->doc, node_, (const xmlChar*)(node_ns_.empty()?NULL:node_ns_.c_str())); } XMLNode::~XMLNode(void) { if (is_owner_ && node_) xmlFreeDoc(node_->doc); } XMLNode XMLNode::operator[](int n) const { if (!node_) return XMLNode(); xmlNodePtr p = n < 0 ? NULL : node_; for (; p; p = p->next) { if ((p->type != XML_ELEMENT_NODE) && (p->type != XML_ATTRIBUTE_NODE)) continue; if (node_->name) { if (!(p->name)) continue; if (!MatchXMLName(node_, p)) continue; } if ((--n) < 0) break; } return XMLNode(p); } XMLNode XMLNode::operator[](const char *name) const { if (!node_) return XMLNode(); if ((node_->type != XML_ELEMENT_NODE) && (node_->type != XML_ATTRIBUTE_NODE)) return XMLNode(); xmlNodePtr p = node_->children; for (; p; p = p->next) { if ((p->type != XML_ELEMENT_NODE) && (p->type != XML_ATTRIBUTE_NODE)) continue; if (MatchXMLName(p, name)) break; } return XMLNode(p); } void XMLNode::operator++(void) { if (!node_) return; if (is_owner_) { // top node has no siblings xmlFreeDoc(node_->doc); node_ = NULL; is_owner_ = false; return; } xmlNodePtr p = node_->next; for (; p; p = p->next) { if (node_->type != p->type) continue; if (node_->name) { if (!(p->name)) continue; if (!MatchXMLName(node_, p)) continue; } break; } node_ = p; } void XMLNode::operator--(void) { if (!node_) return; if (is_owner_) { // top node has no siblings xmlFreeDoc(node_->doc); node_ = NULL; is_owner_ = false; return; } xmlNodePtr p = node_->prev; for (; p; p = p->prev) { if (node_->type != p->type) continue; if (node_->name) { if (!(p->name)) continue; if (!MatchXMLName(node_, p)) continue; } break; } node_ = p; } int XMLNode::Size(void) const { if (!node_) return 0; int n = 0; xmlNodePtr p = node_->children; for (; p; p = p->next) { if (p->type != XML_ELEMENT_NODE) continue; ++n; } return n; } std::string XMLNode::Name(void) const { const char *name = (node_) ? ((node_->name) ? (char*)(node_->name) : "") : ""; return std::string(name); } int XMLNode::AttributesSize(void) const { if (!node_) return 0; if (node_->type != XML_ELEMENT_NODE) return 0; int n = 0; xmlAttrPtr p = node_->properties; for (; p; p = p->next) { if (p->type != XML_ATTRIBUTE_NODE) continue; ++n; } return n; } XMLNode XMLNode::Attribute(int n) { if (!node_) return XMLNode(); if (node_->type != XML_ELEMENT_NODE) return XMLNode(); xmlAttrPtr p = n < 0 ? NULL : node_->properties; for (; p; p = p->next) { if (p->type != XML_ATTRIBUTE_NODE) continue; if ((--n) < 0) break; } return XMLNode((xmlNodePtr)p); } XMLNode XMLNode::Attribute(const char *name) { if (!node_) return XMLNode(); if (node_->type != XML_ELEMENT_NODE) return XMLNode(); xmlNodePtr p = (xmlNodePtr)(node_->properties); for (; p; p = p->next) { if (p->type != XML_ATTRIBUTE_NODE) continue; if (MatchXMLName(p, name)) break; } if (p) return XMLNode(p); // New temporary node return XMLNode(p); } XMLNode XMLNode::NewAttribute(const char *name) { if (!node_) return XMLNode(); if (node_->type != XML_ELEMENT_NODE) return XMLNode(); const char *name_ = strchr(name, ':'); xmlNsPtr ns = NULL; if ((name_ != NULL) && (name_ != name)) { std::string ns_(name, name_ - name); ns = xmlSearchNs(node_->doc, node_, (const xmlChar*)(ns_.c_str())); ++name_; } else if(name_ != NULL) { ns = xmlSearchNs(node_->doc, node_, (const xmlChar*)NULL); ++name_; } else { ns = xmlSearchNs(node_->doc, node_, (const xmlChar*)NULL); name_ = name; } return XMLNode((xmlNodePtr)xmlNewNsProp(node_, ns, (const xmlChar*)name_, NULL)); } std::string XMLNode::Prefix(void) const { if (!node_) return ""; xmlNsPtr ns = GetNamespace(node_); if (!ns) return ""; if (!(ns->prefix)) return ""; return (const char*)(ns->prefix); } void XMLNode::Prefix(const std::string& prefix, int recursion) { SetPrefix(node_, prefix.c_str(), recursion); } void XMLNode::StripNamespace(int recursion) { SetPrefix(node_, NULL, recursion); } std::string XMLNode::Namespace(void) const { if (!node_) return ""; xmlNsPtr ns = GetNamespace(node_); if (!ns) return ""; if (!(ns->href)) return ""; return (const char*)(ns->href); } void XMLNode::Name(const char *name) { if (!node_) return; const char *name_ = strchr(name, ':'); xmlNsPtr ns = NULL; if (name_ != NULL) { std::string ns_(name, name_ - name); SetName(node_, name_+1, ns_.c_str()); } else { SetName(node_, name, ""); } } XMLNode XMLNode::Child(int n) { if (!node_) return XMLNode(); if (node_->type != XML_ELEMENT_NODE) return XMLNode(); xmlNodePtr p = n < 0 ? NULL : node_->children; for (; p; p = p->next) { if (p->type != XML_ELEMENT_NODE) continue; if ((--n) < 0) break; } return XMLNode(p); } XMLNode::operator std::string(void) const { std::string content; if (!node_) return content; for (xmlNodePtr p = node_->children; p; p = p->next) { if (p->type != XML_TEXT_NODE) continue; xmlChar *buf = xmlNodeGetContent(p); if (!buf) continue; content += (char*)buf; xmlFree(buf); } return content; } XMLNode& XMLNode::operator=(const char *content) { if (!node_) return *this; if (!content) content = ""; xmlChar *encode = xmlEncodeSpecialChars(node_->doc, (xmlChar*)content); if (!encode) encode = (xmlChar*)""; xmlNodeSetContent(node_, encode); xmlFree(encode); return *this; } XMLNode XMLNode::NewChild(const char *name, const NS& namespaces, int n, bool global_order) { XMLNode x = NewChild("", n, global_order); // placeholder x.Namespaces(namespaces); x.Name(name); return x; } XMLNode XMLNode::NewChild(const char *name, int n, bool global_order) { if (node_ == NULL) return XMLNode(); if (node_->type != XML_ELEMENT_NODE) return XMLNode(); const char *name_ = strchr(name, ':'); xmlNsPtr ns = NULL; if ((name_ != NULL) && (name_ != name)) { std::string ns_(name, name_ - name); ns = xmlSearchNs(node_->doc, node_, (const xmlChar*)(ns_.c_str())); ++name_; } else if(name_ != NULL) { ns = xmlSearchNs(node_->doc, node_, (const xmlChar*)NULL); ++name_; } else { ns = xmlSearchNs(node_->doc, node_, (const xmlChar*)NULL); name_ = name; } xmlNodePtr new_node = xmlNewNode(ns, (const xmlChar*)name_); if (new_node == NULL) return XMLNode(); if (n < 0) { return XMLNode(xmlAddChild(node_, new_node)); } XMLNode old_node = global_order ? Child(n) : operator[](name)[n]; if (!old_node) { // TODO: find last old_node return XMLNode(xmlAddChild(node_, new_node)); } if (old_node) { return XMLNode(xmlAddPrevSibling(old_node.node_, new_node)); } return XMLNode(xmlAddChild(node_, new_node)); } XMLNode XMLNode::NewChild(const XMLNode& node, int n, bool global_order) { if (node_ == NULL) return XMLNode(); if (node.node_ == NULL) return XMLNode(); if (node_->type != XML_ELEMENT_NODE) return XMLNode(); // TODO: Add new attribute if 'node' is attribute if (node.node_->type != XML_ELEMENT_NODE) return XMLNode(); xmlNodePtr new_node = xmlDocCopyNode(node.node_, node_->doc, 1); if (new_node == NULL) return XMLNode(); if (n < 0) return XMLNode(xmlAddChild(node_, new_node)); std::string name; xmlNsPtr ns = GetNamespace(new_node); if (ns != NULL) { if (ns->prefix != NULL) name = (char*)ns->prefix; name += ":"; } if (new_node->name) name += (char*)(new_node->name); XMLNode old_node = global_order ? Child(n) : operator[](name)[n]; if (!old_node) // TODO: find last old_node return XMLNode(xmlAddChild(node_, new_node)); if (old_node) return XMLNode(xmlAddPrevSibling(old_node.node_, new_node)); return XMLNode(xmlAddChild(node_, new_node)); } void XMLNode::Replace(const XMLNode& node) { if (node_ == NULL) return; if (node.node_ == NULL) return; if (node_->type != XML_ELEMENT_NODE) return; if (node.node_->type != XML_ELEMENT_NODE) return; xmlNodePtr new_node = xmlDocCopyNode(node.node_, node_->doc, 1); if (new_node == NULL) return; xmlReplaceNode(node_, new_node); xmlFreeNode(node_); node_ = new_node; return; } void XMLNode::New(XMLNode& new_node) const { if (new_node.is_owner_ && new_node.node_) xmlFreeDoc(new_node.node_->doc); new_node.is_owner_ = false; new_node.node_ = NULL; if (node_ == NULL) return; // TODO: Copy attribute node too if (node_->type != XML_ELEMENT_NODE) return; xmlDocPtr doc = xmlNewDoc((const xmlChar*)"1.0"); if (doc == NULL) return; new_node.node_ = xmlDocCopyNode(node_, doc, 1); if (new_node.node_ == NULL) return; xmlDocSetRootElement(doc, new_node.node_); new_node.is_owner_ = true; return; } void XMLNode::Move(XMLNode& node) { if (node.is_owner_ && node.node_) xmlFreeDoc(node.node_->doc); node.is_owner_ = false; node.node_ = NULL; if (node_ == NULL) return; // TODO: Copy attribute node too if (node_->type != XML_ELEMENT_NODE) { return; } if(is_owner_) { // Owner also means top level. So just copy and clean. node.node_=node_; node.is_owner_=true; node_=NULL; is_owner_=false; return; } // Otherwise unlink this node and make a new document of it // New(node); Destroy(); xmlDocPtr doc = xmlNewDoc((const xmlChar*)"1.0"); if (doc == NULL) return; xmlUnlinkNode(node_); // Unlinked node still may contain references to namespaces // defined in parent nodes. Those must be copied. LocalizeNamespaces(node_); node.node_ = node_; node_ = NULL; xmlDocSetRootElement(doc, node.node_); node.is_owner_ = true; return; } void XMLNode::Swap(XMLNode& node) { xmlNodePtr tmp_node_ = node.node_; bool tmp_is_owner_ = node.is_owner_; node.node_ = node_; node.is_owner_ = is_owner_; node_ = tmp_node_; is_owner_ = tmp_is_owner_; } void XMLNode::Exchange(XMLNode& node) { xmlNodePtr node1 = node_; xmlNodePtr node2 = node.node_; bool owner1 = is_owner_; bool owner2 = node.is_owner_; if(((node1 == NULL) || owner1) && ((node2 == NULL) || owner2)) { Swap(node); // ? return; } if(node1 && (node1->type != XML_ELEMENT_NODE)) return; if(node2 && (node2->type != XML_ELEMENT_NODE)) return; node_ = NULL; node.node_ = NULL; xmlNodePtr neighb1 = node1?(node1->next):NULL; xmlNodePtr neighb2 = node2?(node2->next):NULL; xmlNodePtr parent1 = node1?(node1->parent):NULL; xmlNodePtr parent2 = node2?(node2->parent):NULL; xmlDocPtr doc1 = node1?(node1->doc):NULL; xmlDocPtr doc2 = node2?(node2->doc):NULL; // In current implementation it is dangerous to move // top level element if node is not owning document if(node1 && (parent1 == NULL) && (!owner1)) return; if(node2 && (parent2 == NULL) && (!owner2)) return; if(node1) { xmlUnlinkNode(node1); if(doc1 != doc2) LocalizeNamespaces(node1); } if(node2) { xmlUnlinkNode(node2); if(doc1 != doc2) LocalizeNamespaces(node2); } if(node2) { if(parent1) { if(neighb1) { xmlAddPrevSibling(neighb1,node2); } else { xmlAddChild(parent1,node2); } } else if(doc1) { xmlDocSetRootElement(doc1,node2); } else { // Make document to store node doc1 = xmlNewDoc((const xmlChar*)"1.0"); if(doc1) { xmlDocSetRootElement(doc1,node2); is_owner_ = true; } else { // Should not happen xmlFreeNode(node2); node2 = NULL; } } } else { // Prevent memleaking document if(doc1 && !parent1) { xmlFreeDoc(doc1); } } if(node1) { if(parent2) { if(neighb2) { xmlAddPrevSibling(neighb2,node1); } else { xmlAddChild(parent2,node1); } } else if(doc2) { xmlDocSetRootElement(doc2,node1); } else { // Make document to store node doc2 = xmlNewDoc((const xmlChar*)"1.0"); if(doc2) { xmlDocSetRootElement(doc2,node1); node.is_owner_ = true; } else { // Should not happen xmlFreeNode(node1); node1 = NULL; } } } else { // Prevent memleaking document if(doc2 && !parent2) { xmlFreeDoc(doc2); } } node_ = node2; node.node_ = node1; } void XMLNode::Namespaces(const NS& namespaces, bool keep, int recursion) { if (node_ == NULL) return; if (node_->type != XML_ELEMENT_NODE) return; SetNamespaces(namespaces, node_, keep, recursion); } NS XMLNode::Namespaces(void) { NS namespaces; if (node_ == NULL) return namespaces; if (node_->type != XML_ELEMENT_NODE) return namespaces; GetNamespaces(namespaces, node_); return namespaces; } std::string XMLNode::NamespacePrefix(const char *urn) { if (node_ == NULL) return ""; xmlNsPtr ns_ = xmlSearchNsByHref(node_->doc, node_, (const xmlChar*)urn); if (!ns_) return ""; return (char*)(ns_->prefix); } void XMLNode::Destroy(void) { if (node_ == NULL) return; if (is_owner_) { xmlFreeDoc(node_->doc); node_ = NULL; is_owner_ = false; return; } if (node_->type == XML_ELEMENT_NODE) { xmlNodePtr p = node_->prev; if(p && (p->type == XML_TEXT_NODE)) { xmlChar *buf = xmlNodeGetContent(p); if (buf) { while(*buf) { if(!isspace(*buf)) { p = NULL; break; } ++buf; } } } else { p = NULL; } xmlUnlinkNode(node_); xmlFreeNode(node_); node_ = NULL; // Remove beautyfication text too. if(p) { xmlUnlinkNode(p); xmlFreeNode(p); } return; } if (node_->type == XML_ATTRIBUTE_NODE) { xmlRemoveProp((xmlAttrPtr)node_); node_ = NULL; return; } } XMLNodeList XMLNode::Path(const std::string& path) { XMLNodeList res; std::string::size_type name_s = 0; std::string::size_type name_e = path.find('/', name_s); if (name_e == std::string::npos) name_e = path.length(); res.push_back(*this); for (;;) { if (res.size() <= 0) return res; XMLNodeList::iterator node = res.begin(); std::string node_name = path.substr(name_s, name_e - name_s); int nodes_num = res.size(); for (int n = 0; n < nodes_num; ++n) { for (int cn = 0;; ++cn) { XMLNode cnode = (*node).Child(cn); if (!cnode) break; if (MatchXMLName(cnode, node_name)) res.push_back(cnode); } ++node; } res.erase(res.begin(), node); if (name_e >= path.length()) break; name_s = name_e + 1; name_e = path.find('/', name_s); if (name_e == std::string::npos) name_e = path.length(); } return res; } XMLNodeList XMLNode::XPathLookup(const std::string& xpathExpr, const NS& nsList) { std::list retlist; if (node_ == NULL) return retlist; if (node_->type != XML_ELEMENT_NODE) return retlist; xmlDocPtr doc = node_->doc; if (doc == NULL) return retlist; xmlXPathContextPtr xpathCtx = xmlXPathNewContext(doc); for (NS::const_iterator ns = nsList.begin(); ns != nsList.end(); ++ns) { // Note: XPath in libxml does not allow default namesapces. // So it does not matter if NULL or empty string is used. It still // will not work. But for consistency we use NULL here. xmlXPathRegisterNs(xpathCtx, (xmlChar*)(ns->first.empty()?NULL:ns->first.c_str()), (xmlChar*)ns->second.c_str()); } xmlXPathObjectPtr xpathObj = xmlXPathEvalExpression((const xmlChar*)(xpathExpr.c_str()), xpathCtx); if (xpathObj && xpathObj->nodesetval && xpathObj->nodesetval->nodeNr) { xmlNodeSetPtr nodes = xpathObj->nodesetval; int size = nodes->nodeNr; for (int i = 0; i < size; ++i) { if (nodes->nodeTab[i]->type == XML_ELEMENT_NODE) { xmlNodePtr cur = nodes->nodeTab[i]; xmlNodePtr parent = cur; for (; parent; parent = parent->parent) if (parent == node_) break; if (parent) retlist.push_back(XMLNode(cur)); } } } xmlXPathFreeObject(xpathObj); xmlXPathFreeContext(xpathCtx); return retlist; } XMLNode XMLNode::GetRoot(void) { if (node_ == NULL) return XMLNode(); xmlDocPtr doc = node_->doc; if (doc == NULL) return XMLNode(); return XMLNode(doc->children); } XMLNode XMLNode::Parent(void) { if (node_ == NULL) return XMLNode(); if (node_->type == XML_ELEMENT_NODE) return XMLNode(node_->parent); if (node_->type == XML_ATTRIBUTE_NODE) return XMLNode(((xmlAttrPtr)node_)->parent); return XMLNode(); } XMLNode& XMLNode::operator=(const XMLNode& node) { if (is_owner_ && node_) { xmlDocPtr doc = node_->doc; if (doc != NULL) xmlFreeDoc(doc); } node_ = node.node_; is_owner_ = false; is_temporary_ = node.is_temporary_; return *this; } static int write_to_string(void* context,const char* buffer,int len) { if(!context) return -1; std::string* str = (std::string*)context; if(len <= 0) return 0; if(!buffer) return -1; str->append(buffer,len); return len; } static int close_string(void* context) { if(!context) return -1; return 0; } void XMLNode::GetDoc(std::string& out_xml_str, bool user_friendly) const { out_xml_str.resize(0); if (!node_) return; xmlDocPtr doc = node_->doc; if (doc == NULL) return; xmlOutputBufferPtr buf = xmlOutputBufferCreateIO(&write_to_string,&close_string,&out_xml_str,NULL); if(buf == NULL) return; /* xmlChar *buf = NULL; int bufsize = 0; if (user_friendly) xmlDocDumpFormatMemory(doc, &buf, &bufsize, 1); else xmlDocDumpMemory(doc, &buf, &bufsize); if (buf) { out_xml_str = (char*)buf; xmlFree(buf); } */ // Note xmlSaveFormatFileTo/xmlSaveFileTo call xmlOutputBufferClose if (user_friendly) xmlSaveFormatFileTo(buf, doc, (const char*)(doc->encoding), 1); else xmlSaveFileTo(buf, doc, (const char*)(doc->encoding)); } static void NamespacesToString(std::map& extns, std::string& ns_str) { for(std::map::iterator ns = extns.begin(); ns != extns.end(); ++ns) { char* prefix = (char*)(ns->first->prefix); char* href = (char*)(ns->first->href); if(prefix && prefix[0]) { ns_str+=" xmlns:"; ns_str+=prefix; ns_str+="="; } else { ns_str+=" xmlns="; } ns_str+="\""; ns_str+=(href?href:""); ns_str+="\""; } } static void InsertExternalNamespaces(std::string& out_xml_str, const std::string& ns_str) { // Find end of first name " | std::string::size_type p = out_xml_str.find('<'); // tag start if(p == std::string::npos) return; //if(p < ns_str.length()) return; ++p; if(p >= out_xml_str.length()) return; if(out_xml_str[p] == '?') { // p = out_xml_str.find("?>",p); // tag end if(p == std::string::npos) return; p+=2; p = out_xml_str.find('<',p); //tag start if(p == std::string::npos) return; ++p; } p = out_xml_str.find_first_not_of(" \t",p); // name start if(p == std::string::npos) return; p = out_xml_str.find_first_of(" \t>/",p); //name end if(p == std::string::npos) return; std::string namestr = out_xml_str.substr(ns_str.length(),p-ns_str.length()); out_xml_str.replace(0,p,namestr+ns_str); } void XMLNode::GetXML(std::string& out_xml_str, bool user_friendly) const { out_xml_str.resize(0); if (!node_) return; if (node_->type != XML_ELEMENT_NODE) return; xmlDocPtr doc = node_->doc; if (doc == NULL) return; // Printing non-root node omits namespaces defined at higher level. // So we need to create temporary namespace definitions and place them // at node being printed std::map extns; CollectExternalNamespaces(node_, extns); // It is easier to insert namespaces into final text. Hence // allocating place for them. std::string ns_str; NamespacesToString(extns, ns_str); out_xml_str.append(ns_str.length(),' '); /* xmlBufferPtr buf = xmlBufferCreate(); xmlNodeDump(buf, doc, node_, 0, user_friendly ? 1 : 0); out_xml_str = (char*)(buf->content); xmlBufferFree(buf); */ xmlOutputBufferPtr buf = xmlOutputBufferCreateIO(&write_to_string,&close_string,&out_xml_str,NULL); if(buf == NULL) return; xmlNodeDumpOutput(buf, doc, node_, 0, user_friendly ? 1 : 0, (const char*)(doc->encoding)); xmlOutputBufferClose(buf); // Insert external namespaces into final string using allocated space InsertExternalNamespaces(out_xml_str, ns_str); } void XMLNode::GetXML(std::string& out_xml_str, const std::string& encoding, bool user_friendly) const { out_xml_str.resize(0); if (!node_) return; if (node_->type != XML_ELEMENT_NODE) return; xmlDocPtr doc = node_->doc; if (doc == NULL) return; xmlCharEncodingHandlerPtr handler = NULL; handler = xmlFindCharEncodingHandler(encoding.c_str()); if (handler == NULL) return; std::map extns; CollectExternalNamespaces(node_, extns); std::string ns_str; NamespacesToString(extns, ns_str); out_xml_str.append(ns_str.length(),' '); //xmlOutputBufferPtr buf = xmlAllocOutputBuffer(handler); xmlOutputBufferPtr buf = xmlOutputBufferCreateIO(&write_to_string,&close_string,&out_xml_str,NULL); if(buf == NULL) return; xmlNodeDumpOutput(buf, doc, node_, 0, user_friendly ? 1 : 0, encoding.c_str()); xmlOutputBufferFlush(buf); //out_xml_str = (char*)(buf->conv ? buf->conv->content : buf->buffer->content); xmlOutputBufferClose(buf); InsertExternalNamespaces(out_xml_str, ns_str); } bool XMLNode::SaveToStream(std::ostream& out) const { std::string s; GetXML(s); out << "" << std::endl; out << s; return (bool)out; } bool XMLNode::SaveToFile(const std::string& file_name) const { InterruptGuard guard; std::ofstream out(file_name.c_str(), std::ios::out); if (!out) return false; bool r = SaveToStream(out); out.close(); return r; } std::ostream& operator<<(std::ostream& out, const XMLNode& node) { node.SaveToStream(out); return out; } bool XMLNode::ReadFromStream(std::istream& in) { std::string s; std::getline(in, s, 0); if (!in) return false; //xmlDocPtr doc = xmlParseMemory((char*)(s.c_str()), s.length()); xmlDocPtr doc = xmlReadMemory(s.c_str(),s.length(),NULL,NULL, XML_PARSE_NODICT|XML_PARSE_NOERROR|XML_PARSE_NOWARNING); if (doc == NULL) return false; xmlNodePtr p = doc->children; for (; p; p = p->next) if (p->type == XML_ELEMENT_NODE) break; if (!p) { xmlFreeDoc(doc); return false; } if (node_ != NULL) if (is_owner_) { xmlFreeDoc(node_->doc); node_ = NULL; is_owner_ = false; } node_ = p; if (node_) is_owner_ = true; return true; } bool XMLNode::ReadFromFile(const std::string& file_name) { std::ifstream in(file_name.c_str(), std::ios::in); if (!in) return false; bool r = ReadFromStream(in); in.close(); return r; } std::istream& operator>>(std::istream& in, XMLNode& node) { node.ReadFromStream(in); return in; } bool XMLNode::Validate(XMLNode schema_doc, std::string &err_msg) { if(!node_) return false; XMLNode doc; // Making copy of schema because it may be changed during parsing. schema_doc.New(doc); if((!doc.node_) || (!doc.node_->doc)) { err_msg = "XML schema is invalid"; return false; } xmlSchemaParserCtxtPtr schemaParser = xmlSchemaNewDocParserCtxt(doc.node_->doc); if (!schemaParser) { err_msg = "Can not aquire XML schema"; return false; } // parse schema xmlSchemaPtr schema = xmlSchemaParse(schemaParser); if (!schema) { xmlSchemaFreeParserCtxt(schemaParser); err_msg = "Can not parse schema"; return false; } xmlSchemaFreeParserCtxt(schemaParser); return Validate(schema, err_msg); } bool XMLNode::Validate(const std::string& schema_file, std::string &err_msg) { if(!node_) return false; // create parser ctxt for schema accessible on schemaPath xmlSchemaParserCtxtPtr schemaParser = xmlSchemaNewParserCtxt(schema_file.c_str()); if (!schemaParser) { err_msg = "Can not load schema from file "+schema_file; return false; } // parse schema xmlSchemaPtr schema = xmlSchemaParse(schemaParser); if (!schema) { xmlSchemaFreeParserCtxt(schemaParser); err_msg = "Can not parse schema"; return false; } xmlSchemaFreeParserCtxt(schemaParser); return Validate(schema, err_msg); } void XMLNode::LogError(void * ctx, const char * msg, ...) { std::string* str = (std::string*)ctx; va_list ap; va_start(ap, msg); const size_t bufsize = 256; char* buf = new char[bufsize]; buf[0] = 0; vsnprintf(buf, bufsize, msg, ap); buf[bufsize-1] = 0; //if(!str.empty()) str += ; *str += buf; delete[] buf; va_end(ap); } bool XMLNode::Validate(xmlSchemaPtr schema, std::string &err_msg) { if(!node_) return false; // create schema validation context xmlSchemaValidCtxtPtr validityCtx = xmlSchemaNewValidCtxt(schema); if (!validityCtx) { xmlSchemaFree(schema); err_msg = "Can not create validation context"; return false; } // Set context collectors xmlSchemaSetValidErrors(validityCtx,&LogError,&LogError,&err_msg); // validate against schema bool result = false; if(node_->parent == (xmlNodePtr)node_->doc) { result = (xmlSchemaValidateDoc(validityCtx, node_->doc) == 0); } else { // It lookslike a bug in libxml makes xmlSchemaValidateOneElement // behave like xmlSchemaValidateDoc. // So fake doc is needed //result = (xmlSchemaValidateOneElement(validityCtx, node_) == 0); xmlDocPtr olddoc = node_->doc; xmlDocPtr newdoc = xmlNewDoc((const xmlChar*)"1.0"); if(newdoc) { newdoc->children = node_; node_->parent = (xmlNodePtr)newdoc; node_->doc = newdoc; result = (xmlSchemaValidateDoc(validityCtx, node_->doc) == 0); node_->parent = (xmlNodePtr)olddoc; node_->doc = olddoc; newdoc->children = NULL; xmlFreeDoc(newdoc); } } // free resources and return result xmlSchemaFreeValidCtxt(validityCtx); xmlSchemaFree(schema); return result; } XMLNodeContainer::XMLNodeContainer(void) {} XMLNodeContainer::XMLNodeContainer(const XMLNodeContainer& container) { operator=(container); } XMLNodeContainer::~XMLNodeContainer(void) { for (std::vector::iterator n = nodes_.begin(); n != nodes_.end(); ++n) delete *n; } XMLNodeContainer& XMLNodeContainer::operator=(const XMLNodeContainer& container) { for (std::vector::iterator n = nodes_.begin(); n != nodes_.end(); ++n) delete *n; for (std::vector::const_iterator n = container.nodes_.begin(); n != container.nodes_.end(); ++n) { if ((*n)->is_owner_) AddNew(*(*n)); else Add(*(*n)); } return *this; } void XMLNodeContainer::Add(const XMLNode& node) { XMLNode *new_node = new XMLNode(node); nodes_.push_back(new_node); } void XMLNodeContainer::Add(const std::list& nodes) { for (std::list::const_iterator n = nodes.begin(); n != nodes.end(); ++n) Add(*n); } void XMLNodeContainer::AddNew(const XMLNode& node) { XMLNode *new_node = new XMLNode(); node.New(*new_node); nodes_.push_back(new_node); } void XMLNodeContainer::AddNew(const std::list& nodes) { for (std::list::const_iterator n = nodes.begin(); n != nodes.end(); ++n) AddNew(*n); } int XMLNodeContainer::Size(void) const { return nodes_.size(); } XMLNode XMLNodeContainer::operator[](int n) { if (n < 0) return XMLNode(); if (n >= nodes_.size()) return XMLNode(); return *nodes_[n]; } std::list XMLNodeContainer::Nodes(void) { std::list r; for (std::vector::iterator n = nodes_.begin(); n != nodes_.end(); ++n) r.push_back(**n); return r; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Run_unix.cpp0000644000000000000000000000012313213442363023260 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200574.900706 29 ctime=1513200658.86873365 nordugrid-arc-5.4.2/src/hed/libs/common/Run_unix.cpp0000644000175000002070000007332013213442363023333 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "Run.h" #include "Watchdog.h" #define DUAL_CHECK_LOST_CHILD namespace Arc { // This function is hackish workaround for // problem of child processes disappearing without // trace and even waitpid() can't detect that. static bool check_pid(pid_t p, Time& t) { if(p <= 0) { // Child PID is lost - memory corruption? std::cerr<<"HUGE PROBLEM: lost PID of child process: "< Period(60)) { std::cerr<<"HUGE PROBLEM: lost child process: "< abandoned_; //std::list runs_; Glib::Mutex abandoned_lock_; Glib::Mutex list_lock_; //Glib::Mutex pump_lock_; TimedMutex pump_lock_; Glib::RefPtr context_; Glib::Thread *thread_; int storm_count; RunPump(void); ~RunPump(void); static RunPump& Instance(void); operator bool(void) const { return (bool)context_; } bool operator!(void) const { return !(bool)context_; } void Pump(void); void Add(Run *r); void Remove(Run *r); void child_handler(Glib::Pid pid, int result); static void fork_handler(void); }; Glib::StaticMutex RunPump::instance_lock_ = GLIBMM_STATIC_MUTEX_INIT; RunPump* RunPump::instance_ = NULL; unsigned int RunPump::mark_ = ~RunPumpMagic; class Pid { private: Glib::Pid p_; public: Pid(void):p_(0) { }; Pid(Glib::Pid p):p_(p) { }; ~Pid(void) { if(p_) Glib::spawn_close_pid(p_); }; operator Glib::Pid(void) const { return p_; }; Glib::Pid pid(void) const { return p_; }; Glib::Pid& operator=(const Glib::Pid& p) { return (p_=p); }; }; class RunInitializerArgument { private: void *arg_; void (*func_)(void*); UserSwitch* usw_; int user_id_; int group_id_; public: RunInitializerArgument(void(*func)(void*), void *arg, UserSwitch* usw, int user_id, int group_id) : arg_(arg), func_(func), usw_(usw), user_id_(user_id), group_id_(group_id) {} void Run(void); }; void RunInitializerArgument::Run(void) { // It would be nice to have function which removes all Glib::Mutex locks. // But so far we need to save ourselves only from Logger and SetEnv/GetEnv. void *arg = arg_; void (*func)(void*) = func_; if(group_id_ > 0) ::setgid(group_id_); if(user_id_ != 0) { if(::setuid(user_id_) != 0) { // Can't switch user id _exit(-1); }; // in case previous user was not allowed to switch group id if(group_id_ > 0) ::setgid(group_id_); }; // set proper umask ::umask(0077); // To leave clean environment reset all signals. // Otherwise we may get some signals non-intentionally ignored. // Glib takes care of open handles. #ifdef SIGRTMIN for(int n = SIGHUP; n < SIGRTMIN; ++n) { #else // At least reset all signals whose numbers are well defined for(int n = SIGHUP; n < SIGTERM; ++n) { #endif signal(n,SIG_DFL); } if (!func) return; // Run initializer requested by caller (*func)(arg); return; } RunPump::RunPump(void) : context_(NULL), thread_(NULL), storm_count(0) { try { thread_ = Glib::Thread::create(sigc::mem_fun(*this, &RunPump::Pump), false); } catch (Glib::Exception& e) {} catch (std::exception& e) {} ; if (thread_ == NULL) return; // Wait for context_ to be intialized // TODO: what to do if context never initialized for (;;) { if (context_) break; thread_->yield(); // This is simpler than condition+mutex } } void RunPump::fork_handler(void) { instance_ = NULL; mark_ = RunPumpMagic; } RunPump& RunPump::Instance(void) { instance_lock_.lock(); if ((instance_ == NULL) || (mark_ != RunPumpMagic)) { //pthread_atfork(NULL,NULL,&fork_handler); instance_ = new RunPump(); mark_ = RunPumpMagic; } instance_lock_.unlock(); return *instance_; } void RunPump::Pump(void) { // TODO: put try/catch everythere for glibmm errors try { context_ = Glib::MainContext::create(); // In infinite loop monitor state of children processes // and pump information to/from std* channels if requested //context_->acquire(); for (;;) { list_lock_.lock(); list_lock_.unlock(); pump_lock_.lock(); bool dispatched = context_->iteration(true); for(std::list::iterator a = abandoned_.begin(); a != abandoned_.end();) { if(a->pid_ == 0) { SAFE_DISCONNECT(a->child_conn_); a = abandoned_.erase(a); } else { ++a; } } pump_lock_.unlock(); thread_->yield(); if (!dispatched) { // Under some unclear circumstance storm of iteration() // returning false non-stop was observed. So here we // are trying to prevent that. if((++storm_count) >= 10) { sleep(1); storm_count = 0; }; } else { storm_count = 0; } } } catch (Glib::Exception& e) {} catch (std::exception& e) {}; } void RunPump::Add(Run *r) { if (!r) return; if (!(*r)) return; if (!(*this)) return; // Take full control over context list_lock_.lock(); while (true) { context_->wakeup(); // doing it like that because experience says // wakeup does not always wakes it up if(pump_lock_.lock(100)) break; } try { // Add sources to context if (r->stdout_str_ && !(r->stdout_keep_)) { r->stdout_conn_ = context_->signal_io().connect(sigc::mem_fun(*r, &Run::stdout_handler), r->stdout_, Glib::IO_IN | Glib::IO_HUP); } if (r->stderr_str_ && !(r->stderr_keep_)) { r->stderr_conn_ = context_->signal_io().connect(sigc::mem_fun(*r, &Run::stderr_handler), r->stderr_, Glib::IO_IN | Glib::IO_HUP); } if (r->stdin_str_ && !(r->stdin_keep_)) { r->stdin_conn_ = context_->signal_io().connect(sigc::mem_fun(*r, &Run::stdin_handler), r->stdin_, Glib::IO_OUT | Glib::IO_HUP); } #ifdef HAVE_GLIBMM_CHILDWATCH r->child_conn_ = context_->signal_child_watch().connect(sigc::mem_fun(*r, &Run::child_handler), r->pid_->pid()); //if(r->child_conn_.empty()) std::cerr<<"connect for signal_child_watch failed"<wakeup(); // doing it like that because experience says // wakeup does not always wakes it up if(pump_lock_.lock(100)) break; } // Disconnect sources from context SAFE_DISCONNECT(r->stdout_conn_); SAFE_DISCONNECT(r->stderr_conn_); SAFE_DISCONNECT(r->stdin_conn_); SAFE_DISCONNECT(r->child_conn_); if(r->running_) { #ifdef HAVE_GLIBMM_CHILDWATCH abandoned_.push_back(Abandoned(r->pid_->pid(),context_->signal_child_watch().connect(sigc::mem_fun(*this,&RunPump::child_handler), r->pid_->pid()))); #endif r->running_ = false; } pump_lock_.unlock(); list_lock_.unlock(); } void RunPump::child_handler(Glib::Pid pid, int /* result */) { abandoned_lock_.lock(); for(std::list::iterator a = abandoned_.begin(); a != abandoned_.end();++a) { if(a->pid_ == pid) { a->pid_ = 0; break; } } abandoned_lock_.unlock(); } Run::Run(const std::string& cmdline) : working_directory("."), stdout_(-1), stderr_(-1), stdin_(-1), stdout_str_(NULL), stderr_str_(NULL), stdin_str_(NULL), stdout_keep_(false), stderr_keep_(false), stdin_keep_(false), pid_(NULL), argv_(Glib::shell_parse_argv(cmdline)), initializer_func_(NULL), initializer_arg_(NULL), kicker_func_(NULL), kicker_arg_(NULL), started_(false), running_(false), abandoned_(false), result_(-1), user_id_(0), group_id_(0), run_time_(Time::UNDEFINED), exit_time_(Time::UNDEFINED) { pid_ = new Pid; } Run::Run(const std::list& argv) : working_directory("."), stdout_(-1), stderr_(-1), stdin_(-1), stdout_str_(NULL), stderr_str_(NULL), stdin_str_(NULL), stdout_keep_(false), stderr_keep_(false), stdin_keep_(false), pid_(NULL), argv_(argv), initializer_func_(NULL), initializer_arg_(NULL), kicker_func_(NULL), kicker_arg_(NULL), started_(false), running_(false), abandoned_(false), result_(-1), user_id_(0), group_id_(0), run_time_(Time::UNDEFINED), exit_time_(Time::UNDEFINED) { pid_ = new Pid; } Run::~Run(void) { if(*this) { if(!abandoned_) Kill(0); CloseStdout(); CloseStderr(); CloseStdin(); RunPump::Instance().Remove(this); delete pid_; }; } static void remove_env(std::list& envp, const std::string& key) { std::list::iterator e = envp.begin(); while(e != envp.end()) { if((strncmp(e->c_str(),key.c_str(),key.length()) == 0) && ((e->length() == key.length()) || (e->at(key.length()) == '='))) { e = envp.erase(e); continue; }; ++e; }; } static void remove_env(std::list& envp, const std::list& keys) { for(std::list::const_iterator key = keys.begin(); key != keys.end(); ++key) { remove_env(envp, *key); }; } static void add_env(std::list& envp, const std::string& rec) { std::string key(rec); std::string::size_type pos = key.find('='); if(pos != std::string::npos) key.resize(pos); remove_env(envp,key); envp.push_back(rec); } static void add_env(std::list& envp, const std::list& recs) { for(std::list::const_iterator rec = recs.begin(); rec != recs.end(); ++rec) { add_env(envp, *rec); }; } static void exit_child(int code, char const * msg) { int l = strlen(msg); (void)write(2, msg, l); _exit(code); } bool Run::Start(void) { if (started_) return false; if (argv_.size() < 1) return false; RunPump& pump = RunPump::Instance(); UserSwitch* usw = NULL; RunInitializerArgument *arg = NULL; std::list envp_tmp; try { running_ = true; Glib::Pid pid = 0; // Locking user switching to make sure fork is // is done with proper uid usw = new UserSwitch(0,0); arg = new RunInitializerArgument(initializer_func_, initializer_arg_, usw, user_id_, group_id_); #ifdef USE_GLIB_PROCESS_SPAWN { EnvLockWrapper wrapper; // Protection against gettext using getenv envp_tmp = GetEnv(); remove_env(envp_tmp, envx_); add_env(envp_tmp, envp_); spawn_async_with_pipes(working_directory, argv_, envp_tmp, Glib::SpawnFlags(Glib::SPAWN_DO_NOT_REAP_CHILD), sigc::mem_fun(*arg, &RunInitializerArgument::Run), &pid, stdin_keep_ ? NULL : &stdin_, stdout_keep_ ? NULL : &stdout_, stderr_keep_ ? NULL : &stderr_); }; #else envp_tmp = GetEnv(); remove_env(envp_tmp, envx_); add_env(envp_tmp, envp_); int pipe_stdin[2] = { -1, -1 }; int pipe_stdout[2] = { -1, -1 }; int pipe_stderr[2] = { -1, -1 }; pid = -1; if((stdin_keep_ || (::pipe(pipe_stdin) == 0)) && (stdout_keep_ || (::pipe(pipe_stdout) == 0)) && (stderr_keep_ || (::pipe(pipe_stderr) == 0))) { pid = ::fork(); if(pid == 0) { // child - set std* and do exec if(pipe_stdin[0] != -1) { close(pipe_stdin[1]); if(dup2(pipe_stdin[0], 0) != 0) exit_child(-1, "Failed to setup stdin\n"); close(pipe_stdin[0]); }; if(pipe_stdout[1] != -1) { close(pipe_stdout[0]); if(dup2(pipe_stdout[1], 1) != 1) exit_child(-1, "Failed to setup stdout\n"); close(pipe_stdout[1]); }; if(pipe_stderr[1] != -1) { close(pipe_stderr[0]); if(dup2(pipe_stderr[1], 2) != 2) exit_child(-1, "Failed to setup stderr\n"); close(pipe_stderr[1]); }; char * * argv = new char*[argv_.size()+1]; char * * envp = new char*[envp_tmp.size()+1]; int n = 0; for(std::list::iterator item = argv_.begin(); item != argv_.end(); ++item) { argv[n++] = const_cast(item->c_str()); }; argv[n] = NULL; n = 0; for(std::list::iterator item = envp_tmp.begin(); item != envp_tmp.end(); ++item) { envp[n++] = const_cast(item->c_str()); }; envp[n] = NULL; arg->Run(); if(::chdir(working_directory.c_str()) != 0) { exit_child(-1, "Failed to change working directory\n"); } // close all handles inherited from parent uint64_t max_files = RLIM_INFINITY; struct rlimit lim; if(getrlimit(RLIMIT_NOFILE,&lim) == 0) { max_files=lim.rlim_cur; }; if(max_files == RLIM_INFINITY) max_files=4096; // some safe value for(int i=3;i 0) { // Kill softly ::kill(pid_->pid(), SIGTERM); Wait(timeout); } if (!running_) return; // Kill with no merci ::kill(pid_->pid(), SIGKILL); } void Run::Abandon(void) { if(*this) { CloseStdout(); CloseStderr(); CloseStdin(); abandoned_=true; } } bool Run::stdout_handler(Glib::IOCondition) { if (stdout_str_) { char buf[256]; int l = ReadStdout(0, buf, sizeof(buf)); if ((l == 0) || (l == -1)) { CloseStdout(); return false; } else { stdout_str_->append(buf, l); } } else { // Event shouldn't happen if not expected } return true; } bool Run::stderr_handler(Glib::IOCondition) { if (stderr_str_) { char buf[256]; int l = ReadStderr(0, buf, sizeof(buf)); if ((l == 0) || (l == -1)) { CloseStderr(); return false; } else { stderr_str_->append(buf, l); } } else { // Event shouldn't happen if not expected } return true; } bool Run::stdin_handler(Glib::IOCondition) { if (stdin_str_) { if (stdin_str_->length() == 0) { CloseStdin(); stdin_str_ = NULL; } else { int l = WriteStdin(0, stdin_str_->c_str(), stdin_str_->length()); if (l == -1) { CloseStdin(); return false; } else { // Not very effective *stdin_str_ = stdin_str_->substr(l); } } } else { // Event shouldn't happen if not expected } return true; } void Run::child_handler(Glib::Pid, int result) { if (stdout_str_) for (;;) if (!stdout_handler(Glib::IO_IN)) break; if (stderr_str_) for (;;) if (!stderr_handler(Glib::IO_IN)) break; //CloseStdout(); //CloseStderr(); CloseStdin(); lock_.lock(); cond_.signal(); // There is reference in Glib manual that 'result' is same // as returned by waitpid. It is not clear how it works for // windows but atleast for *nix we can use waitpid related // macros. #ifdef DUAL_CHECK_LOST_CHILD if(result == -1) { // special value to indicate lost child result_ = -1; } else #endif if(WIFEXITED(result)) { result_ = WEXITSTATUS(result); } else { result_ = -1; } running_ = false; exit_time_ = Time(); lock_.unlock(); if (kicker_func_) (*kicker_func_)(kicker_arg_); } void Run::CloseStdout(void) { if (stdout_ != -1) ::close(stdout_); stdout_ = -1; SAFE_DISCONNECT(stdout_conn_); } void Run::CloseStderr(void) { if (stderr_ != -1) ::close(stderr_); stderr_ = -1; SAFE_DISCONNECT(stderr_conn_); } void Run::CloseStdin(void) { if (stdin_ != -1) ::close(stdin_); stdin_ = -1; SAFE_DISCONNECT(stdin_conn_); } int Run::ReadStdout(int timeout, char *buf, int size) { if (stdout_ == -1) return -1; // TODO: do it through context for timeout? for(;;) { pollfd fd; fd.fd = stdout_; fd.events = POLLIN; fd.revents = 0; int err = ::poll(&fd, 1, timeout); if((err < 0) && (errno == EINTR)) continue; if(err <= 0) return err; if(!(fd.revents & POLLIN)) return -1; break; } return ::read(stdout_, buf, size); } int Run::ReadStderr(int timeout, char *buf, int size) { if (stderr_ == -1) return -1; // TODO: do it through context for timeout for(;;) { pollfd fd; fd.fd = stderr_; fd.events = POLLIN; fd.revents = 0; int err = ::poll(&fd, 1, timeout); if((err < 0) && (errno == EINTR)) continue; if(err <= 0) return err; if(!(fd.revents & POLLIN)) return -1; break; } return ::read(stderr_, buf, size); } int Run::WriteStdin(int timeout, const char *buf, int size) { if (stdin_ == -1) return -1; // TODO: do it through context for timeout for(;;) { pollfd fd; fd.fd = stdin_; fd.events = POLLOUT; fd.revents = 0; int err = ::poll(&fd, 1, timeout); if((err < 0) && (errno == EINTR)) continue; if(err <= 0) return err; if(!(fd.revents & POLLOUT)) return -1; break; } return write(stdin_, buf, size); } bool Run::Running(void) { #ifdef DUAL_CHECK_LOST_CHILD if(running_) { if(!check_pid(pid_->pid(),exit_time_)) { lock_.unlock(); child_handler(pid_->pid(), -1); // simulate exit lock_.lock(); } } #endif #ifdef HAVE_GLIBMM_CHILDWATCH return running_; #else Wait(0); return running_; #endif } bool Run::Wait(int timeout) { if (!started_) return false; if (!running_) return true; Glib::TimeVal till; till.assign_current_time(); till += timeout; lock_.lock(); while (running_) { Glib::TimeVal t; t.assign_current_time(); t.subtract(till); #ifdef HAVE_GLIBMM_CHILDWATCH if (!t.negative()) break; cond_.timed_wait(lock_, till); #else int status; int r = ::waitpid(pid_->pid(), &status, WNOHANG); if (r == 0) { if (!t.negative()) break; lock_.unlock(); sleep(1); lock_.lock(); continue; } if (r == -1) { // Child lost? status = (-1)<<8; } // Child exited lock_.unlock(); child_handler(pid_->pid(), status); lock_.lock(); #endif #ifdef DUAL_CHECK_LOST_CHILD if(running_) { if(!check_pid(pid_->pid(),exit_time_)) { lock_.unlock(); child_handler(pid_->pid(), -1); // simulate exit lock_.lock(); } } #endif } lock_.unlock(); return (!running_); } bool Run::Wait(void) { if (!started_) return false; if (!running_) return true; lock_.lock(); Glib::TimeVal till; while (running_) { #ifdef HAVE_GLIBMM_CHILDWATCH till.assign_current_time(); till += 1; // one sec later cond_.timed_wait(lock_, till); #else int status; int r = ::waitpid(pid_->pid(), &status, WNOHANG); if (r == 0) { lock_.unlock(); sleep(1); lock_.lock(); continue; } if (r == -1) { // Child lost? status = (-1)<<8; } // Child exited lock_.unlock(); child_handler(pid_->pid(), status); lock_.lock(); #endif #ifdef DUAL_CHECK_LOST_CHILD if(running_) { if(!check_pid(pid_->pid(),exit_time_)) { lock_.unlock(); child_handler(pid_->pid(), -1); // simulate exit lock_.lock(); } } #endif } lock_.unlock(); return (!running_); } void Run::AssignStdout(std::string& str) { if (!running_) stdout_str_ = &str; } void Run::AssignStderr(std::string& str) { if (!running_) stderr_str_ = &str; } void Run::AssignStdin(std::string& str) { if (!running_) stdin_str_ = &str; } void Run::KeepStdout(bool keep) { if (!running_) stdout_keep_ = keep; } void Run::KeepStderr(bool keep) { if (!running_) stderr_keep_ = keep; } void Run::KeepStdin(bool keep) { if (!running_) stdin_keep_ = keep; } void Run::AssignInitializer(void (*initializer_func)(void *arg), void *initializer_arg) { if (!running_) { initializer_arg_ = initializer_arg; initializer_func_ = initializer_func; } } void Run::AssignKicker(void (*kicker_func)(void *arg), void *kicker_arg) { if (!running_) { kicker_arg_ = kicker_arg; kicker_func_ = kicker_func; } } void Run::AfterFork(void) { RunPump::fork_handler(); } #define WATCHDOG_TEST_INTERVAL (60) #define WATCHDOG_KICK_INTERVAL (10) class Watchdog { friend class WatchdogListener; friend class WatchdogChannel; private: class Channel { public: int timeout; time_t next; Channel(void):timeout(-1),next(0) {}; }; int lpipe[2]; sigc::connection timer_; static Glib::Mutex instance_lock_; std::vector channels_; static Watchdog *instance_; static unsigned int mark_; #define WatchdogMagic (0x1E84FC05) static Watchdog& Instance(void); int Open(int timeout); void Kick(int channel); void Close(int channel); int Listen(void); bool Timer(void); public: Watchdog(void); ~Watchdog(void); }; Glib::Mutex Watchdog::instance_lock_; Watchdog* Watchdog::instance_ = NULL; unsigned int Watchdog::mark_ = ~WatchdogMagic; Watchdog& Watchdog::Instance(void) { instance_lock_.lock(); if ((instance_ == NULL) || (mark_ != WatchdogMagic)) { instance_ = new Watchdog(); mark_ = WatchdogMagic; } instance_lock_.unlock(); return *instance_; } Watchdog::Watchdog(void) { lpipe[0] = -1; lpipe[1] = -1; ::pipe(lpipe); if(lpipe[1] != -1) fcntl(lpipe[1], F_SETFL, fcntl(lpipe[1], F_GETFL) | O_NONBLOCK); } Watchdog::~Watchdog(void) { if(timer_.connected()) timer_.disconnect(); if(lpipe[0] != -1) ::close(lpipe[0]); if(lpipe[1] != -1) ::close(lpipe[1]); } bool Watchdog::Timer(void) { char c = '\0'; time_t now = ::time(NULL); { Glib::Mutex::Lock lock(instance_lock_); for(int n = 0; n < channels_.size(); ++n) { if(channels_[n].timeout < 0) continue; if(((int)(now - channels_[n].next)) > 0) return true; // timeout } } if(lpipe[1] != -1) write(lpipe[1],&c,1); return true; } int Watchdog::Open(int timeout) { if(timeout <= 0) return -1; Glib::Mutex::Lock lock(instance_lock_); if(!timer_.connected()) { // start glib loop and attach timer to context Glib::RefPtr context = RunPump::Instance().context_; timer_ = context->signal_timeout().connect(sigc::mem_fun(*this,&Watchdog::Timer),WATCHDOG_KICK_INTERVAL*1000); } int n = 0; for(; n < channels_.size(); ++n) { if(channels_[n].timeout < 0) { channels_[n].timeout = timeout; channels_[n].next = ::time(NULL) + timeout; return n; } } channels_.resize(n+1); channels_[n].timeout = timeout; channels_[n].next = ::time(NULL) + timeout; return n; } void Watchdog::Kick(int channel) { Glib::Mutex::Lock lock(instance_lock_); if((channel < 0) || (channel >= channels_.size())) return; if(channels_[channel].timeout < 0) return; channels_[channel].next = ::time(NULL) + channels_[channel].timeout; } void Watchdog::Close(int channel) { Glib::Mutex::Lock lock(instance_lock_); if((channel < 0) || (channel >= channels_.size())) return; channels_[channel].timeout = -1; // resize? } int Watchdog::Listen(void) { return lpipe[0]; } WatchdogChannel::WatchdogChannel(int timeout) { id_ = Watchdog::Instance().Open(timeout); } WatchdogChannel::~WatchdogChannel(void) { Watchdog::Instance().Close(id_); } void WatchdogChannel::Kick(void) { Watchdog::Instance().Kick(id_); } WatchdogListener::WatchdogListener(void): instance_(Watchdog::Instance()),last((time_t)(-1)) { } bool WatchdogListener::Listen(int limit, bool& error) { error = false; int h = instance_.Listen(); if(h == -1) return !(error = true); time_t out = (time_t)(-1); // when to leave if(limit >= 0) out = ::time(NULL) + limit; int to = 0; // initailly just check if something already arrived for(;;) { pollfd fd; fd.fd = h; fd.events = POLLIN; fd.revents = 0; int err = ::poll(&fd, 1, to); // process errors if((err < 0) && (errno != EINTR)) break; // unrecoverable error if(err > 0) { // unexpected results if(err != 1) break; if(!(fd.revents & POLLIN)) break; }; time_t now = ::time(NULL); time_t next = (time_t)(-1); // when to timeout if(err == 1) { // something arrived char c; ::read(fd.fd,&c,1); last = now; next = now + WATCHDOG_TEST_INTERVAL; } else { // check timeout if(last != (time_t)(-1)) next = last + WATCHDOG_TEST_INTERVAL; if((next != (time_t)(-1)) && (((int)(next-now)) <= 0)) return true; } // check for time limit if((limit >= 0) && (((int)(out-now)) <= 0)) return false; // prepare timeout for poll to = WATCHDOG_TEST_INTERVAL; if(next != (time_t)(-1)) { int tto = next-now; if(tto < to) to = tto; } if(limit >= 0) { int tto = out-now; if(tto < to) to = tto; } if(to < 0) to = 0; } // communication failure error = true; return false; } bool WatchdogListener::Listen(void) { bool error; return Listen(-1,error); } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/DateTime.cpp0000644000000000000000000000012313214315176023147 xustar000000000000000027 mtime=1513200254.761812 27 atime=1513200574.912706 29 ctime=1513200658.85073343 nordugrid-arc-5.4.2/src/hed/libs/common/DateTime.cpp0000644000175000002070000005761313214315176023231 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "DateTime.h" #ifndef HAVE_TIMEGM time_t timegm(struct tm *tm) { bool found; std::string tz = Arc::GetEnv("TZ", found); Arc::SetEnv("TZ", "UTC"); tzset(); tm->tm_isdst = -1; time_t ret = mktime(tm); if (found) Arc::SetEnv("TZ", tz); else Arc::UnsetEnv("TZ"); tzset(); return ret; } #endif #ifndef HAVE_LOCALTIME_R struct tm* localtime_r(const time_t *timep, struct tm *result) { struct tm *TM = localtime(timep); *result = *TM; return result; } #endif #ifndef HAVE_GMTIME_R struct tm* gmtime_r(const time_t *timep, struct tm *result) { struct tm *TM = gmtime(timep); *result = *TM; return result; } #endif namespace Arc { static Logger dateTimeLogger(Logger::getRootLogger(), "DateTime"); TimeFormat Time::time_format = UserTime; Time::Time() : gtime(0), gnano(0) { timeval tv; if(gettimeofday(&tv, NULL) != 0) { gtime = time(NULL); } else { gtime = tv.tv_sec; gnano = tv.tv_usec * 1000; } } Time::Time(time_t time) : gtime(time), gnano(0) {} Time::Time(time_t time, uint32_t nanosec) : gtime(time), gnano(nanosec) {} Time::Time(const std::string& timestring) : gtime(-1), gnano(0) { if (timestring.empty()) { dateTimeLogger.msg(ERROR, "Empty string"); return; } if (isdigit(timestring[0])) { tm timestr; std::string::size_type pos = 0; if (sscanf(timestring.substr(pos, 10).c_str(), "%4d-%2d-%2d", ×tr.tm_year, ×tr.tm_mon, ×tr.tm_mday) == 3) pos += 10; else if (sscanf(timestring.substr(pos, 8).c_str(), "%4d%2d%2d", ×tr.tm_year, ×tr.tm_mon, ×tr.tm_mday) == 3) pos += 8; else { dateTimeLogger.msg(ERROR, "Can not parse date: %s", timestring); return; } timestr.tm_year -= 1900; timestr.tm_mon--; if (timestring[pos] == 'T' || timestring[pos] == ' ') pos++; if (sscanf(timestring.substr(pos, 8).c_str(), "%2d:%2d:%2d", ×tr.tm_hour, ×tr.tm_min, ×tr.tm_sec) == 3) pos += 8; else if (sscanf(timestring.substr(pos, 6).c_str(), "%2d%2d%2d", ×tr.tm_hour, ×tr.tm_min, ×tr.tm_sec) == 3) pos += 6; else { dateTimeLogger.msg(ERROR, "Can not parse time: %s", timestring); return; } // skip fraction of second if (timestring[pos] == '.') { pos++; while (isdigit(timestring[pos])) pos++; } if (timestring[pos] == 'Z') { pos++; gtime = timegm(×tr); } else if (timestring[pos] == '+' || timestring[pos] == '-') { bool tzplus = (timestring[pos] == '+'); pos++; int tzh, tzm; if (sscanf(timestring.substr(pos, 5).c_str(), "%2d:%2d", &tzh, &tzm) == 2) pos += 5; else if (sscanf(timestring.substr(pos, 4).c_str(), "%2d%2d", &tzh, &tzm) == 2) pos += 4; else { dateTimeLogger.msg(ERROR, "Can not parse time zone offset: %s", timestring); return; } gtime = timegm(×tr); if (gtime != -1) { if (tzplus) gtime -= tzh * 3600 + tzm * 60; else gtime += tzh * 3600 + tzm * 60; } } else { timestr.tm_isdst = -1; gtime = mktime(×tr); } if (timestring.size() != pos) { dateTimeLogger.msg(ERROR, "Illegal time format: %s", timestring); return; } } else if (timestring.length() == 24) { // C time tm timestr; char day[4]; char month[4]; if (sscanf(timestring.c_str(), "%3s %3s %2d %2d:%2d:%2d %4d", day, month, ×tr.tm_mday, ×tr.tm_hour, ×tr.tm_min, ×tr.tm_sec, ×tr.tm_year) != 7) { dateTimeLogger.msg(ERROR, "Illegal time format: %s", timestring); return; } timestr.tm_year -= 1900; if (strncmp(month, "Jan", 3) == 0) timestr.tm_mon = 0; else if (strncmp(month, "Feb", 3) == 0) timestr.tm_mon = 1; else if (strncmp(month, "Mar", 3) == 0) timestr.tm_mon = 2; else if (strncmp(month, "Apr", 3) == 0) timestr.tm_mon = 3; else if (strncmp(month, "May", 3) == 0) timestr.tm_mon = 4; else if (strncmp(month, "Jun", 3) == 0) timestr.tm_mon = 5; else if (strncmp(month, "Jul", 3) == 0) timestr.tm_mon = 6; else if (strncmp(month, "Aug", 3) == 0) timestr.tm_mon = 7; else if (strncmp(month, "Sep", 3) == 0) timestr.tm_mon = 8; else if (strncmp(month, "Oct", 3) == 0) timestr.tm_mon = 9; else if (strncmp(month, "Nov", 3) == 0) timestr.tm_mon = 10; else if (strncmp(month, "Dec", 3) == 0) timestr.tm_mon = 11; else { dateTimeLogger.msg(ERROR, "Can not parse month: %s", month); return; } timestr.tm_isdst = -1; gtime = mktime(×tr); } else if (timestring.length() == 29) { // RFC 1123 time (used by HTTP protoccol) tm timestr; char day[4]; char month[4]; if (sscanf(timestring.c_str(), "%3s, %2d %3s %4d %2d:%2d:%2d GMT", day, ×tr.tm_mday, month, ×tr.tm_year, ×tr.tm_hour, ×tr.tm_min, ×tr.tm_sec) != 7) { dateTimeLogger.msg(ERROR, "Illegal time format: %s", timestring); return; } timestr.tm_year -= 1900; if (strncmp(month, "Jan", 3) == 0) timestr.tm_mon = 0; else if (strncmp(month, "Feb", 3) == 0) timestr.tm_mon = 1; else if (strncmp(month, "Mar", 3) == 0) timestr.tm_mon = 2; else if (strncmp(month, "Apr", 3) == 0) timestr.tm_mon = 3; else if (strncmp(month, "May", 3) == 0) timestr.tm_mon = 4; else if (strncmp(month, "Jun", 3) == 0) timestr.tm_mon = 5; else if (strncmp(month, "Jul", 3) == 0) timestr.tm_mon = 6; else if (strncmp(month, "Aug", 3) == 0) timestr.tm_mon = 7; else if (strncmp(month, "Sep", 3) == 0) timestr.tm_mon = 8; else if (strncmp(month, "Oct", 3) == 0) timestr.tm_mon = 9; else if (strncmp(month, "Nov", 3) == 0) timestr.tm_mon = 10; else if (strncmp(month, "Dec", 3) == 0) timestr.tm_mon = 11; else { dateTimeLogger.msg(ERROR, "Can not parse month: %s", month); return; } gtime = timegm(×tr); } if (gtime == -1) dateTimeLogger.msg(ERROR, "Illegal time format: %s", timestring); } void Time::SetTime(time_t time) { gtime = time; gnano = 0; } void Time::SetTime(time_t time, uint32_t nanosec) { gtime = time; gnano = nanosec; } time_t Time::GetTime() const { return gtime; } time_t Time::GetTimeNanoseconds() const { return gnano; } void Time::SetFormat(const TimeFormat& format) { time_format = format; } TimeFormat Time::GetFormat() { return time_format; } Time::operator std::string() const { return str(); } std::string Time::str(const TimeFormat& format) const { const char *day[] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; // C week starts on Sunday - just live with it... const char *month[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; switch (format) { case ASCTime: // Day Mon DD HH:MM:SS YYYY { tm tmtime; localtime_r(>ime, &tmtime); std::stringstream ss; ss << std::setfill('0'); ss << day[tmtime.tm_wday] << ' ' << month[tmtime.tm_mon] << ' ' << std::setw(2) << tmtime.tm_mday << ' ' << std::setw(2) << tmtime.tm_hour << ':' << std::setw(2) << tmtime.tm_min << ':' << std::setw(2) << tmtime.tm_sec << ' ' << std::setw(4) << tmtime.tm_year + 1900; return ss.str(); } case UserTime: { tm tmtime; localtime_r(>ime, &tmtime); std::stringstream ss; ss << std::setfill('0'); ss << std::setw(4) << tmtime.tm_year + 1900 << '-' << std::setw(2) << tmtime.tm_mon + 1 << '-' << std::setw(2) << tmtime.tm_mday << ' ' << std::setw(2) << tmtime.tm_hour << ':' << std::setw(2) << tmtime.tm_min << ':' << std::setw(2) << tmtime.tm_sec; return ss.str(); } case UserExtTime: { tm tmtime; localtime_r(>ime, &tmtime); std::stringstream ss; ss << std::setfill('0'); ss << std::setw(4) << tmtime.tm_year + 1900 << '-' << std::setw(2) << tmtime.tm_mon + 1 << '-' << std::setw(2) << tmtime.tm_mday << ' ' << std::setw(2) << tmtime.tm_hour << ':' << std::setw(2) << tmtime.tm_min << ':' << std::setw(2) << tmtime.tm_sec << '.' << std::setw(6) << gnano/1000; return ss.str(); } case ElasticTime: { tm tmtime; localtime_r(>ime, &tmtime); std::stringstream ss; ss << std::setfill('0'); ss << std::setw(4) << tmtime.tm_year + 1900 << '-' << std::setw(2) << tmtime.tm_mon + 1 << '-' << std::setw(2) << tmtime.tm_mday << ' ' << std::setw(2) << tmtime.tm_hour << ':' << std::setw(2) << tmtime.tm_min << ':' << std::setw(2) << tmtime.tm_sec << '.' << std::setw(3) << gnano/1000000; return ss.str(); } case MDSTime: { tm tmtime; gmtime_r(>ime, &tmtime); std::stringstream ss; ss << std::setfill('0'); ss << std::setw(4) << tmtime.tm_year + 1900 << std::setw(2) << tmtime.tm_mon + 1 << std::setw(2) << tmtime.tm_mday << std::setw(2) << tmtime.tm_hour << std::setw(2) << tmtime.tm_min << std::setw(2) << tmtime.tm_sec << 'Z'; return ss.str(); } case ISOTime: { tm tmtime; localtime_r(>ime, &tmtime); time_t tzoffset = timegm(&tmtime) - gtime; std::stringstream ss; ss << std::setfill('0'); ss << std::setw(4) << tmtime.tm_year + 1900 << '-' << std::setw(2) << tmtime.tm_mon + 1 << '-' << std::setw(2) << tmtime.tm_mday << 'T' << std::setw(2) << tmtime.tm_hour << ':' << std::setw(2) << tmtime.tm_min << ':' << std::setw(2) << tmtime.tm_sec << (tzoffset < 0 ? '-' : '+') << std::setw(2) << abs(tzoffset) / Time::HOUR << ':' << std::setw(2) << (abs(tzoffset) % Time::HOUR) / 60; return ss.str(); } case UTCTime: { tm tmtime; gmtime_r(>ime, &tmtime); std::stringstream ss; ss << std::setfill('0'); ss << std::setw(4) << tmtime.tm_year + 1900 << '-' << std::setw(2) << tmtime.tm_mon + 1 << '-' << std::setw(2) << tmtime.tm_mday << 'T' << std::setw(2) << tmtime.tm_hour << ':' << std::setw(2) << tmtime.tm_min << ':' << std::setw(2) << tmtime.tm_sec << 'Z'; return ss.str(); } case RFC1123Time: { tm tmtime; gmtime_r(>ime, &tmtime); std::stringstream ss; ss << std::setfill('0'); ss << day[tmtime.tm_wday] << ", " << std::setw(2) << tmtime.tm_mday << ' ' << month[tmtime.tm_mon] << ' ' << std::setw(4) << tmtime.tm_year + 1900 << ' ' << std::setw(2) << tmtime.tm_hour << ':' << std::setw(2) << tmtime.tm_min << ':' << std::setw(2) << tmtime.tm_sec << " GMT"; return ss.str(); } case EpochTime: { return tostring(gtime); } } return ""; } bool Time::operator<(const Time& othertime) const { if(gtime == othertime.gtime) return gnano < othertime.gnano; return gtime < othertime.gtime; } bool Time::operator>(const Time& othertime) const { if(gtime == othertime.gtime) return gnano > othertime.gnano; return gtime > othertime.gtime; } bool Time::operator<=(const Time& othertime) const { if(gtime == othertime.gtime) return gnano <= othertime.gnano; return gtime <= othertime.gtime; } bool Time::operator>=(const Time& othertime) const { if(gtime == othertime.gtime) return gnano >= othertime.gnano; return gtime >= othertime.gtime; } bool Time::operator==(const Time& othertime) const { return (gtime == othertime.gtime) && (gnano == othertime.gnano); } bool Time::operator!=(const Time& othertime) const { return (gtime != othertime.gtime) || (gnano != othertime.gnano); } Time Time::operator+(const Period& duration) const { time_t t; uint32_t n; t = gtime + duration.GetPeriod(); n = gnano + duration.GetPeriodNanoseconds(); t += n / 1000000000; n = n % 1000000000; return (Time(t,n)); } Time Time::operator-(const Period& duration) const { time_t t; uint32_t n = 0; t = gtime - duration.GetPeriod(); if(duration.GetPeriodNanoseconds() > gnano) { --t; n = 1000000000; } n += gnano - duration.GetPeriodNanoseconds(); return (Time(t,n)); } Period Time::operator-(const Time& other) const { time_t t; uint32_t n = 0; t = gtime - other.gtime; if(gnano < other.gnano) { --t; n = 1000000000; } n += gnano - other.gnano; return (Period(t,n)); } Time& Time::operator=(time_t newtime) { gtime = newtime; gnano = 0; return *this; } Time& Time::operator=(const Time& newtime) { gtime = newtime.gtime; gnano = newtime.gnano; return *this; } Time& Time::operator=(const char* newtime) { return operator=(std::string(newtime)); } Time& Time::operator=(const std::string& newtime) { return *this = Arc::Time(newtime); } std::ostream& operator<<(std::ostream& out, const Time& time) { return (out << time.str()); } std::string TimeStamp(const TimeFormat& format) { Time now; return now.str(format); } std::string TimeStamp(Time newtime, const TimeFormat& format) { return newtime.str(format); } Period::Period() : seconds(0), nanoseconds(0) {} Period::Period(time_t sec) : seconds(sec), nanoseconds(0) {} Period::Period(time_t sec, uint32_t nanosec ) : seconds(sec), nanoseconds(nanosec) {} Period::Period(const std::string& period, PeriodBase base) : seconds(0), nanoseconds(0) { if (period.empty()) { dateTimeLogger.msg(ERROR, "Empty string"); return; } if (period[0] == 'P') { // ISO duration std::string::size_type pos = 1; bool min = false; // months or minutes? while (pos < period.size()) { if (period[pos] == 'T') min = true; else { std::string::size_type pos2 = pos; while (pos2 < period.size() && isdigit(period[pos2])) pos2++; if (pos2 == pos || pos2 == period.size()) { dateTimeLogger.msg(ERROR, "Invalid ISO duration format: %s", period); seconds = 0; return; } int num = stringtoi(period.substr(pos, pos2 - pos)); pos = pos2; switch (period[pos]) { case 'Y': seconds += num * Time::YEAR; break; case 'W': seconds += num * Time::WEEK; min = true; break; case 'D': seconds += num * Time::DAY; min = true; break; case 'H': seconds += num * Time::HOUR; min = true; break; case 'M': if (min) seconds += num * 60; else { seconds += num * Time::MONTH; min = true; } break; case 'S': seconds += num; break; default: dateTimeLogger.msg(ERROR, "Invalid ISO duration format: %s", period); seconds = 0; return; break; } } pos++; } } else { // "free" format std::string::size_type pos = std::string::npos; int len = 0; for (std::string::size_type i = 0; i != period.length(); i++) { if (isdigit(period[i])) { if (pos == std::string::npos) { pos = i; len = 0; } len++; } else if (pos != std::string::npos) { switch (period[i]) { case 'w': case 'W': seconds += stringtoi(period.substr(pos, len)) * Time::WEEK; pos = std::string::npos; base = PeriodDays; break; case 'd': case 'D': seconds += stringtoi(period.substr(pos, len)) * Time::DAY; pos = std::string::npos; base = PeriodHours; break; case 'h': case 'H': seconds += stringtoi(period.substr(pos, len)) * Time::HOUR; pos = std::string::npos; base = PeriodMinutes; break; case 'm': case 'M': seconds += stringtoi(period.substr(pos, len)) * 60; pos = std::string::npos; base = PeriodSeconds; break; case 's': case 'S': seconds += stringtoi(period.substr(pos, len)); pos = std::string::npos; base = PeriodMiliseconds; break; case ' ': break; default: dateTimeLogger.msg(ERROR, "Invalid period string: %s", period); seconds = 0; return; break; } } } if (pos != std::string::npos) { uint64_t n; uint32_t nn = 0; if(stringto(period.substr(pos, len), n)) { switch (base) { case PeriodNanoseconds: nn = n % 1000000000; n /= 1000000000; break; case PeriodMicroseconds: nn = (n % 1000000) * 1000; n /= 1000000; break; case PeriodMiliseconds: nn = (n % 1000) * 1000000; n /= 1000; break; case PeriodSeconds: break; case PeriodMinutes: n *= 60; break; case PeriodHours: n *= Time::HOUR; break; case PeriodDays: n *= Time::DAY; break; case PeriodWeeks: n *= Time::WEEK; break; } seconds += n; nanoseconds += nn; } } } } Period& Period::operator=(time_t length) { seconds = length; nanoseconds = 0; return *this; } Period& Period::operator=(const Period& newperiod) { seconds = newperiod.seconds; nanoseconds = newperiod.nanoseconds; return *this; } void Period::SetPeriod(time_t sec) { seconds = sec; nanoseconds = 0; } void Period::SetPeriod(time_t sec, uint32_t nanosec) { seconds = sec; nanoseconds = nanosec; } time_t Period::GetPeriod() const { return seconds; } time_t Period::GetPeriodNanoseconds() const { return nanoseconds; } const sigc::slot* Period::istr() const { const_cast(this)->slot = sigc::mem_fun(this, &Arc::Period::IStr); return &slot; } const char* Period::IStr() const { time_t remain = seconds; std::stringstream ss; /* Size of year, month and even day is variable. To avoid ambiguity let's keep only time parts. if (remain >= Time::YEAR) { ss << remain / Time::YEAR << " " << FindNTrans("year", "years", remain / Time::YEAR); remain %= Time::YEAR; } if (remain >= Time::MONTH) { if (remain != seconds) ss << " "; ss << remain / Time::MONTH << " " << FindNTrans("month", "months", remain / Time::MONTH); remain %= Time::MONTH; } if (remain >= Time::DAY) { if (remain != seconds) ss << " "; ss << remain / Time::DAY << " " << FindNTrans("day", "days", remain / Time::DAY); remain %= Time::DAY; } */ if (remain >= Time::HOUR) { if (remain != seconds) ss << " "; ss << remain / Time::HOUR << " " << FindNTrans("hour", "hours", remain / Time::HOUR); remain %= Time::HOUR; } if (remain >= 60) { if (remain != seconds) ss << " "; ss << remain / 60 << " " << FindNTrans("minute", "minutes", remain / 60); remain %= 60; } if ((remain >= 1) || (seconds == 0)) { if (remain != seconds) ss << " "; ss << remain << " " << FindNTrans("second", "seconds", remain); } const_cast(this)->is = ss.str(); return is.c_str(); } Period::operator std::string() const { time_t remain = seconds; std::stringstream ss; ss << 'P'; /* Size of year, month and even day is variable. To avoid ambiguity let's keep only time parts. if (remain >= Time::YEAR) { ss << remain / Time::YEAR << 'Y'; remain %= Time::YEAR; } if (remain >= Time::MONTH) { ss << remain / Time::MONTH << 'M'; remain %= Time::MONTH; } if (remain >= Time::DAY) { ss << remain / Time::DAY << 'D'; remain %= Time::DAY; } */ if (remain) { ss << 'T'; if (remain >= Time::HOUR) { ss << remain / Time::HOUR << 'H'; remain %= Time::HOUR; } if (remain >= 60) { ss << remain / 60 << 'M'; remain %= 60; } if ((remain >= 1) || (seconds == 0)) ss << remain << 'S'; } else { ss << "T0S"; } return ss.str(); } bool Period::operator<(const Period& othertime) const { if(seconds != othertime.seconds) return seconds < othertime.seconds; return nanoseconds < othertime.nanoseconds; } bool Period::operator>(const Period& othertime) const { if(seconds != othertime.seconds) return seconds > othertime.seconds; return nanoseconds > othertime.nanoseconds; } bool Period::operator<=(const Period& othertime) const { if(seconds != othertime.seconds) return seconds <= othertime.seconds; return nanoseconds <= othertime.nanoseconds; } bool Period::operator>=(const Period& othertime) const { if(seconds != othertime.seconds) return seconds >= othertime.seconds; return nanoseconds >= othertime.nanoseconds; } bool Period::operator==(const Period& othertime) const { return (seconds == othertime.seconds) && (nanoseconds == othertime.nanoseconds); } bool Period::operator!=(const Period& othertime) const { return (seconds != othertime.seconds) || (nanoseconds != othertime.nanoseconds); } Period& Period::operator+=(const Period&otherPeriod) { nanoseconds += otherPeriod.nanoseconds; seconds += otherPeriod.seconds + nanoseconds/1000000000; nanoseconds = nanoseconds%1000000000; return *this; } std::ostream& operator<<(std::ostream& out, const Period& period) { return (out << (std::string)period); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/FileLock.cpp0000644000000000000000000000012412122353571023142 xustar000000000000000027 mtime=1363793785.538369 27 atime=1513200574.906706 30 ctime=1513200658.851733442 nordugrid-arc-5.4.2/src/hed/libs/common/FileLock.cpp0000644000175000002070000002266112122353571023216 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #ifdef WIN32 #define NOGDI #include #endif #include #include #include #include #include "FileLock.h" namespace Arc { const int FileLock::DEFAULT_LOCK_TIMEOUT = 30; const std::string FileLock::LOCK_SUFFIX = ".lock"; Logger FileLock::logger(Logger::getRootLogger(), "FileLock"); FileLock::FileLock(const std::string& filename, unsigned int timeout, bool use_pid) : filename(filename), lock_file(filename + LOCK_SUFFIX), timeout(timeout), use_pid(use_pid), pid(""), hostname("") { if (use_pid) { // get our hostname and pid char host[256]; if (gethostname(host, sizeof(host)) != 0) { logger.msg(WARNING, "Cannot determine hostname from gethostname()"); } else { host[sizeof(host)-1] = 0; hostname = host; } int pid_i = getpid(); pid = Arc::tostring(pid_i); } } bool FileLock::acquire() { bool lock_removed = false; return acquire(lock_removed); } bool FileLock::acquire(bool& lock_removed) { return acquire_(lock_removed); } bool FileLock::write_pid(int h) { if (!use_pid) return true; std::string buf = pid + "@" + hostname; std::string::size_type p = 0; for (;p timeout) { logger.msg(VERBOSE, "Timeout has expired, will remove lock file %s", lock_file); // TODO: kill the process holding the lock, only if we know it was the original // process which created it if (remove(lock_file.c_str()) != 0 && errno != ENOENT) { logger.msg(ERROR, "Failed to remove stale lock file %s: %s", lock_file, StrError(errno)); return false; } // lock has expired and has been removed. Call acquire() again lock_removed = true; return acquire_(lock_removed); } // lock is still valid, check if we own it int lock_pid = check(false); if (lock_pid == 0) { // safer to wait until lock expires than risk corruption logger.msg(INFO, "This process already owns the lock on %s", filename); } else if (lock_pid != -1) { #ifndef WIN32 // check if the pid owning the lock is still running - if not we can claim the lock if (kill(lock_pid, 0) != 0 && errno == ESRCH) { logger.msg(VERBOSE, "The process owning the lock on %s is no longer running, will remove lock", filename); if (remove(lock_file.c_str()) != 0 && errno != ENOENT) { logger.msg(ERROR, "Failed to remove file %s: %s", lock_file, StrError(errno)); return false; } // call acquire() again lock_removed = true; return acquire_(lock_removed); } #endif } logger.msg(VERBOSE, "The file %s is currently locked with a valid lock", filename); return false; } // if we get to here we have acquired the lock return true; } bool FileLock::release(bool force) { if (!force && check(true) != 0) return false; // delete the lock if (remove(lock_file.c_str()) != 0 && errno != ENOENT) { logger.msg(ERROR, "Failed to unlock file with lock %s: %s", lock_file, StrError(errno)); return false; } return true; } int FileLock::check(bool log_error) { LogLevel log_level = (log_error ? ERROR : INFO); // check for existence of lock file struct stat fileStat; if (!FileStat(lock_file, &fileStat, false)) { if (errno == ENOENT) logger.msg(log_level, "Lock file %s doesn't exist", lock_file); else logger.msg(log_level, "Error listing lock file %s: %s", lock_file, StrError(errno)); return -1; } if (use_pid) { // an empty lock was created while we held the lock if (fileStat.st_size == 0) { logger.msg(log_level, "Found unexpected empty lock file %s. Must go back to acquire()", lock_file); return -1; } // check the lock file's pid and hostname matches ours std::list lock_info; if (!FileRead(lock_file, lock_info)) { logger.msg(log_level, "Error reading lock file %s: %s", lock_file, StrError(errno)); return -1; } if (lock_info.size() != 1 || lock_info.front().find('@') == std::string::npos) { logger.msg(log_level, "Error with formatting in lock file %s", lock_file); return -1; } std::vector lock_bits; tokenize(trim(lock_info.front()), lock_bits, "@"); // check hostname if given if (lock_bits.size() == 2) { std::string lock_host = lock_bits.at(1); if (!lock_host.empty() && lock_host != hostname) { logger.msg(VERBOSE, "Lock %s is owned by a different host (%s)", lock_file, lock_host); return -1; } } // check pid std::string lock_pid = lock_bits.at(0); if (lock_pid != pid) { int lock_pid_i(stringtoi(lock_pid)); if (lock_pid_i == 0) { logger.msg(log_level, "Badly formatted pid %s in lock file %s", lock_pid, lock_file); return -1; } logger.msg(log_level, "Another process (%s) owns the lock on file %s", lock_pid, filename); return lock_pid_i; } } return 0; } std::string FileLock::getLockSuffix() { return LOCK_SUFFIX; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Thread.h0000644000000000000000000000012312735425017022332 xustar000000000000000026 mtime=1467361807.53907 27 atime=1513200574.956707 30 ctime=1513200658.821733075 nordugrid-arc-5.4.2/src/hed/libs/common/Thread.h0000644000175000002070000004555412735425017022415 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_THREAD_H__ #define __ARC_THREAD_H__ #include #include namespace Arc { /** \addtogroup common * @{ */ class SimpleCondition; class SimpleCounter; // This module provides convenient helpers for Glibmm interface for thread // management. So far it takes care of automatic initialization // of threading environment and creation of simple detached threads. // Always use it instead of glibmm/thread.h and keep among first // includes. It safe to use it multiple times and to include it both // from source files and other include files. /** \cond Defines size of stack assigned to every new thread. It seems like MacOS has very small stack per thread by default. So it's safer to have bigger one defined. If this value is set to 0 default value will be used. */ const size_t thread_stacksize = (16 * 1024 * 1024); /** \endcond */ /// Helper function to create simple thread. /** It takes care of all the peculiarities of the Glib::Thread API. It runs function 'func' with argument 'arg' in a separate thread. If count parameter is not NULL then count will be incremented before this function returns and then decremented when thread finishes. \return true on success. */ bool CreateThreadFunction(void (*func)(void*), void *arg, SimpleCounter* count = NULL); /** \cond Internal class used to map glib thread ids (pointer addresses) to an incremental counter, for easier debugging. */ class ThreadId { private: Glib::Mutex mutex; std::map thread_ids; unsigned long int thread_no; ThreadId(); public: static ThreadId& getInstance(); /// Called at beginning of ThreadArgument.thread() to add thread id to map void add(); /// Called at end of ThreadArgument.thread() to remove thread id from map void remove(); /// Called by logger to get id of current thread unsigned long int get(); }; /** \endcond */ class ThreadData; /// Base class for per-thread object. /** Classes inherited from this one are attached to current thread under specified key and destroyed only when thread ends or object is replaced by another one with same key. \headerfile Thread.h arc/Thread.h */ class ThreadDataItem { friend class ThreadData; private: ThreadDataItem(const ThreadDataItem& it); protected: virtual ~ThreadDataItem(void); public: /// Dummy constructor which does nothing. /** To make object usable one of the Attach(...) methods must be used. */ ThreadDataItem(void); /// Creates instance and attaches it to current thread under key. /** If supplied key is empty random one is generated and stored in key variable. */ ThreadDataItem(std::string& key); /// Creates instance and attaches it to current thread under key. ThreadDataItem(const std::string& key); /// Attaches object to current thread under key. /** If supplied key is empty random one is generated and stored in key variable. This method must be used only if object was created using dummy constructor. */ void Attach(std::string& key); /// Attaches object to current thread under key. /** This method must be used only if object was created using dummy constructor. */ void Attach(const std::string& key); /// Retrieves object attached to thread under key. /** \return NULL if no such obejct. */ static ThreadDataItem* Get(const std::string& key); /// Creates copy of object. /** This method is called when a new thread is created from the current thread. It is called in the new thread, so the new object - if created - gets attached to the new thread. If the object is not meant to be inherited by new threads then this method should do nothing. */ virtual void Dup(void); }; /// Simple triggered condition. /** Provides condition and semaphor objects in one element. \headerfile Thread.h arc/Thread.h */ class SimpleCondition { private: Glib::Cond cond_; Glib::Mutex lock_; unsigned int flag_; unsigned int waiting_; public: SimpleCondition(void) : flag_(0), waiting_(0) {} ~SimpleCondition(void) { /* race condition ? */ broadcast(); } /// Acquire semaphor. void lock(void) { lock_.lock(); } /// Release semaphor. void unlock(void) { lock_.unlock(); } /** Signal about condition. This overrides broadcast(). */ void signal(void) { lock_.lock(); flag_ = 1; cond_.signal(); lock_.unlock(); } /// Signal about condition without using semaphor. /** Call it *only* with lock acquired. */ void signal_nonblock(void) { flag_ = 1; cond_.signal(); } /// Signal about condition to all waiting threads. /** If there are no waiting threads, it works like signal(). */ void broadcast(void) { lock_.lock(); flag_ = waiting_?waiting_:1; cond_.broadcast(); lock_.unlock(); } /// Wait for condition. void wait(void) { lock_.lock(); ++waiting_; while (!flag_) cond_.wait(lock_); --waiting_; --flag_; lock_.unlock(); } /// Wait for condition without using semaphor. /** Call it *only* with lock acquired. */ void wait_nonblock(void) { ++waiting_; while (!flag_) cond_.wait(lock_); --waiting_; --flag_; } /// Wait for condition no longer than t milliseconds. /** \return false if timeout occurred */ bool wait(int t) { lock_.lock(); Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(t); bool res(true); ++waiting_; while (!flag_) { res = cond_.timed_wait(lock_, etime); if (!res) break; } --waiting_; if(res) --flag_; lock_.unlock(); return res; } /// Reset object to initial state. void reset(void) { lock_.lock(); flag_ = 0; lock_.unlock(); } /// This method is meant to be used only after fork. /** It resets state of all internal locks and variables. */ void forceReset(void) { // This function is deprecated and its body removed because // there is no safe way to reset locks after call to fork(). } }; /// Thread-safe counter with capability to wait for zero value. /** It is extendible through re-implementation of virtual methods. \headerfile Thread.h arc/Thread.h */ class SimpleCounter { private: Glib::Cond cond_; Glib::Mutex lock_; int count_; public: SimpleCounter(void) : count_(0) {} virtual ~SimpleCounter(void); /// Increment value of counter. /** \return new value. */ virtual int inc(void); /// Decrement value of counter. /** \return new value. Does not go below 0 value. */ virtual int dec(void); /// \return current value of counter. virtual int get(void) const; /// Set value of counter. /** \return new value. */ virtual int set(int v); /// Wait for zero condition. virtual void wait(void) const; /// Wait for zero condition no longer than t milliseconds. /** If t is negative - wait forever. \return false if timeout occurred. */ virtual bool wait(int t) const; /// This method is meant to be used only after fork. /** It resets state of all internal locks and variables. */ virtual void forceReset(void) { // This function is deprecated and its body removed because // there is no safe way to reset locks after call to fork(). } }; /// Mutex which allows a timeout on locking. /** \headerfile Thread.h arc/Thread.h */ class TimedMutex { private: Glib::Cond cond_; Glib::Mutex lock_; bool locked_; public: TimedMutex(void):locked_(false) { }; ~TimedMutex(void) { }; /// Lock mutex, but wait no longer than t milliseconds. /** \return false if timeout occurred. */ bool lock(int t = -1) { lock_.lock(); if(t < 0) { // infinite while(locked_) { cond_.wait(lock_); }; } else if(t > 0) { // timed Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(t); while(locked_) { if(!cond_.timed_wait(lock_, etime)) break; }; }; bool res = !locked_; locked_=true; lock_.unlock(); return res; }; /// Returns true if mutex is currently locked, but does not attempt to acquire lock. bool trylock(void) { return lock(0); }; /// Release mutex. bool unlock(void) { lock_.lock(); bool res = locked_; if(res) { locked_ = false; cond_.signal(); }; lock_.unlock(); return true; }; /// This method is meant to be used only after fork. /** It resets state of all internal locks and variables. */ void forceReset(void) { // This function is deprecated and its body removed because // there is no safe way to reset locks after call to fork(). } }; /// Mutex which allows shared and exclusive locking. /** \headerfile Thread.h arc/Thread.h */ class SharedMutex { private: Glib::Cond cond_; Glib::Mutex lock_; unsigned int exclusive_; Glib::Thread* thread_; typedef std::map shared_list; shared_list shared_; void add_shared_lock(void); void remove_shared_lock(void); bool have_shared_lock(void); inline bool have_exclusive_lock(void) { if(!exclusive_) return false; if(thread_ == Glib::Thread::self()) return false; return true; }; public: SharedMutex(void):exclusive_(0),thread_(NULL) { }; ~SharedMutex(void) { }; /// Acquire a shared lock. Blocks until exclusive lock is released. void lockShared(void); /// Release a shared lock. void unlockShared(void); /// Returns true if at least one shared lock is held. bool isLockShared(void) { return (shared_.size() > 0); // Is it safe? }; /// Acquire an exclusive lock. Blocks until all shared and exclusive locks are released. void lockExclusive(void); /// Release exclusive lock. void unlockExclusive(void); /// Returns true if the exclusive lock is held. bool isLockExclusive(void) { return (exclusive_ > 0); }; /// This method is meant to be used only after fork. /** It resets state of all internal locks and variables. */ void forceReset(void) { // This function is deprecated and its body removed because // there is no safe way to reset locks after call to fork(). }; }; /** \cond Helper class for ThreadedPointer. \headerfile Thread.h arc/Thread.h */ class ThreadedPointerBase { private: Glib::Mutex lock_; Glib::Cond cond_; unsigned int cnt_; void *ptr_; bool released_; ThreadedPointerBase(ThreadedPointerBase&); ~ThreadedPointerBase(void); public: ThreadedPointerBase(void *p); ThreadedPointerBase* add(void); void* rem(void); void* ptr(void) const { return ptr_; }; void rel(void) { released_ = true; }; unsigned int cnt(void) const { return cnt_; }; void lock(void) { lock_.lock(); }; void unlock(void) { lock_.unlock(); }; void wait(void) { cond_.wait(lock_); }; bool wait(Glib::TimeVal etime) { return cond_.timed_wait(lock_,etime); }; }; /** \endcond */ /// Wrapper for pointer with automatic destruction and multiple references. /** See for CountedPointer for description. Differently from CountedPointer this class provides thread safe destruction of referred object. But the instance of ThreadedPointer itself is not thread safe. Hence it is advisable to use different instances in different threads. \headerfile Thread.h arc/Thread.h */ template class ThreadedPointer { private: ThreadedPointerBase *object_; public: ThreadedPointer(T *p) : object_(new ThreadedPointerBase(p)) {} ThreadedPointer(const ThreadedPointer& p) : object_(p.object_->add()) {} ThreadedPointer(void) : object_(new ThreadedPointerBase(NULL)) {} ~ThreadedPointer(void) { delete((T*)(object_->rem())); } /// Assign a new ThreadedPointer from a pointer to an object. ThreadedPointer& operator=(T *p) { if (p != object_->ptr()) { delete((T*)(object_->rem())); object_ = new ThreadedPointerBase(p); } return *this; } /// Assign a new ThreadedPointer from another ThreadedPointer. ThreadedPointer& operator=(const ThreadedPointer& p) { if (p.object_->ptr() != object_->ptr()) { delete((T*)(object_->rem())); object_ = p.object_->add(); } return *this; } /// For referring to wrapped object T& operator*(void) const { return *(T*)(object_->ptr()); } /// For referring to wrapped object T* operator->(void) const { return (T*)(object_->ptr()); } /// Returns false if pointer is NULL and true otherwise. operator bool(void) const { return ((object_->ptr()) != NULL); } /// Returns true if pointer is NULL and false otherwise. bool operator!(void) const { return ((object_->ptr()) == NULL); } /// Returns true if pointers are equal bool operator==(const ThreadedPointer& p) const { return ((T*)(object_->ptr()) == (T*)(p.object_->ptr())); } /// Returns true if pointers are not equal bool operator!=(const ThreadedPointer& p) const { return ((T*)(object_->ptr()) != (T*)(p.object_->ptr())); } /// Comparison operator bool operator<(const ThreadedPointer& p) const { return ((T*)(object_->ptr()) < (T*)(p.object_->ptr())); } /// Cast to original pointer T* Ptr(void) const { return (T*)(object_->ptr()); } /// Release referred object so that it can be passed to other container /** After Release() is called referred object is will not be destroyed automatically anymore. */ T* Release(void) { T* tmp = (T*)(object_->ptr()); object_->rel(); return tmp; } /// Returns number of ThreadedPointer instances referring to underlying object unsigned int Holders(void) { return object_->cnt(); } /// Waits till number of ThreadedPointer instances <= minThr or >= maxThr /* Returns current number of instances. */ unsigned int WaitOutRange(unsigned int minThr, unsigned int maxThr) { unsigned int r = 0; object_->lock(); for(;;) { r = object_->cnt(); if(r <= minThr) break; if(r >= maxThr) break; object_->wait(); }; object_->unlock(); return r; } /// Waits till number of ThreadedPointer instances <= minThr or >= maxThr /** Waits no longer than timeout milliseconds. If timeout is negative - wait forever. Returns current number of instances. */ unsigned int WaitOutRange(unsigned int minThr, unsigned int maxThr, int timeout) { if(timeout < 0) return WaitOutRange(minThr, maxThr); unsigned int r = 0; object_->lock(); Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(timeout); for(;;) { r = object_->cnt(); if(r <= minThr) break; if(r >= maxThr) break; if(!object_->wait(etime)) break; }; object_->unlock(); return r; } /// Waits till number of ThreadedPointer instances >= minThr and <= maxThr /* Returns current number of instances. */ unsigned int WaitInRange(unsigned int minThr, unsigned int maxThr) { unsigned int r = 0; object_->lock(); for(;;) { r = object_->cnt(); if((r >= minThr) && (r <= maxThr)) break; object_->wait(); }; object_->unlock(); return r; } /// Waits till number of ThreadedPointer instances >= minThr and <= maxThr /** Waits no longer than timeout milliseconds. If timeout is negative - wait forever. Returns current number of instances. */ unsigned int WaitInRange(unsigned int minThr, unsigned int maxThr, int timeout) { if(timeout < 0) return WaitInRange(minThr, maxThr); unsigned int r = 0; object_->lock(); Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(timeout); for(;;) { r = object_->cnt(); if((r >= minThr) && (r <= maxThr)) break; if(!object_->wait(etime)) break; }; object_->unlock(); return r; } }; /// A set of conditions, mutexes, etc. conveniently exposed to monitor running child threads and to wait till they exit. /** There are no protections against race conditions, so use it carefully. \headerfile Thread.h arc/Thread.h */ class ThreadRegistry { private: int counter_; bool cancel_; Glib::Cond cond_; Glib::Mutex lock_; public: ThreadRegistry(void); ~ThreadRegistry(void); /// Register thread as started/starting into this instance void RegisterThread(void); /// Report thread as exited void UnregisterThread(void); /// Wait for timeout milliseconds or cancel request. /** \return true if cancel request received. */ bool WaitOrCancel(int timeout); /// Wait for registered threads to exit. /** Leave after timeout milliseconds if failed. \return true if all registered threads reported their exit. */ bool WaitForExit(int timeout = -1); /// Send cancel request to registered threads void RequestCancel(void); /// This method is meant to be used only after fork. /** It resets state of all internal locks and variables. */ void forceReset(void) { // This function is deprecated and its body removed because // there is no safe way to reset locks after call to fork(). } }; /** \cond Internal function to initialize Glib thread system. Use ThreadInitializer instead. */ void GlibThreadInitialize(void); /** \endcond */ /// This class initializes the glibmm thread system. class ThreadInitializer { public: /// Initialise the thread system ThreadInitializer(void) { GlibThreadInitialize(); } /// This method is meant to be used only after fork. /** It resets state of all internal locks and variables. This method is deprecated. */ void forceReset(void); /// Wait for all known threads to exit. /** It can be used before exiting application to make sure no concurrent threads are running during cleanup. */ void waitExit(void); }; // This is done intentionally to make sure glibmm is // properly initialized before every module starts // using threads functionality. To make it work this // header must be included before defining any // variable/class instance using static threads-related // elements. The simplest way to do that is to use // this header instead of glibmm/thread.h static ThreadInitializer _local_thread_initializer; /** @} */ } // namespace Arc #endif /* __ARC_THREAD_H__ */ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/file_access.cpp0000644000000000000000000000012411746532427023724 xustar000000000000000027 mtime=1335538967.079094 27 atime=1513200574.893706 30 ctime=1513200658.872733699 nordugrid-arc-5.4.2/src/hed/libs/common/file_access.cpp0000644000175000002070000004576311746532427024010 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #ifndef WIN32 #include #endif #include #include #include "file_access.h" typedef struct { unsigned int size; unsigned int cmd; } header_t; // How long it is allowed for controlling side to react #define COMMUNICATION_TIMEOUT (10) static bool sread_start = true; static bool sread(int s,void* buf,size_t size) { while(size) { #ifndef WIN32 struct pollfd p[1]; p[0].fd = s; p[0].events = POLLIN; p[0].revents = 0; int err = poll(p,1,sread_start?-1:(COMMUNICATION_TIMEOUT*1000)); if(err == 0) return false; if((err == -1) && (errno != EINTR)) return false; if(err == 1) { #else { #endif ssize_t l = ::read(s,buf,size); if(l <= 0) return false; size-=l; buf = (void*)(((char*)buf)+l); sread_start = false; }; }; return true; } static bool swrite(int s,const void* buf,size_t size) { while(size) { #ifndef WIN32 struct pollfd p[1]; p[0].fd = s; p[0].events = POLLOUT; p[0].revents = 0; int err = poll(p,1,COMMUNICATION_TIMEOUT*1000); if(err == 0) return false; if((err == -1) && (errno != EINTR)) return false; if(err == 1) { #else { #endif ssize_t l = ::write(s,buf,size); if(l < 0) return false; size-=l; buf = (void*)(((char*)buf)+l); }; }; return true; } static bool sread_string(int s,std::string& str,unsigned int& maxsize) { unsigned int ssize; if(sizeof(ssize) > maxsize) return false; if(!sread(s,&ssize,sizeof(ssize))) return false; maxsize -= sizeof(ssize); if(ssize > maxsize) return false; str.assign(ssize,' '); // Not nice but saves memory copying if(!sread(s,(void*)(str.c_str()),ssize)) return false; maxsize -= ssize; return true; } static bool sread_buf(int s,void* buf,unsigned int& bufsize,unsigned int& maxsize) { char dummy[1024]; unsigned int size; if(sizeof(size) > maxsize) return false; if(!sread(s,&size,sizeof(size))) return false; maxsize -= sizeof(size); if(size > maxsize) return false; if(size <= bufsize) { if(!sread(s,buf,size)) return false; bufsize = size; maxsize -= size; } else { if(!sread(s,buf,bufsize)) return false; maxsize -= bufsize; // skip rest size -= bufsize; while(size > sizeof(dummy)) { if(!sread(s,dummy,sizeof(dummy))) return false; size -= sizeof(dummy); maxsize -= sizeof(dummy); }; if(!sread(s,dummy,size)) return false; maxsize -= size; }; return true; } static bool swrite_result(int s,int cmd,int res,int err) { header_t header; header.cmd = cmd; header.size = sizeof(res) + sizeof(err); if(!swrite(s,&header,sizeof(header))) return -1; if(!swrite(s,&res,sizeof(res))) return -1; if(!swrite(s,&err,sizeof(err))) return -1; return true; } static bool swrite_result(int s,int cmd,int res,int err,const void* add,int addsize) { header_t header; header.cmd = cmd; header.size = sizeof(res) + sizeof(err) + addsize; if(!swrite(s,&header,sizeof(header))) return -1; if(!swrite(s,&res,sizeof(res))) return -1; if(!swrite(s,&err,sizeof(err))) return -1; if(!swrite(s,add,addsize)) return -1; return true; } static bool swrite_result(int s,int cmd,int res,int err,const void* add1,int addsize1,const void* add2,int addsize2) { header_t header; header.cmd = cmd; header.size = sizeof(res) + sizeof(err) + addsize1 + addsize2; if(!swrite(s,&header,sizeof(header))) return -1; if(!swrite(s,&res,sizeof(res))) return -1; if(!swrite(s,&err,sizeof(err))) return -1; if(!swrite(s,add1,addsize1)) return -1; if(!swrite(s,add2,addsize2)) return -1; return true; } static bool swrite_result(int s,int cmd,int res,int err,const std::string& str) { unsigned int l = str.length(); header_t header; header.cmd = cmd; header.size = sizeof(res) + sizeof(err) + sizeof(l) + str.length(); if(!swrite(s,&header,sizeof(header))) return -1; if(!swrite(s,&res,sizeof(res))) return -1; if(!swrite(s,&err,sizeof(err))) return -1; if(!swrite(s,&l,sizeof(l))) return -1; if(!swrite(s,str.c_str(),l)) return -1; return true; } static char filebuf[1024*1024*10]; static bool cleandir(const std::string& path,int& err) { errno = 0; DIR* dir = opendir(path.c_str()); if(!dir) { err = errno; return false; }; for(;;) { struct dirent* d = ::readdir(dir); if(!d) break; if(strcmp(d->d_name,".") == 0) continue; if(strcmp(d->d_name,"..") == 0) continue; std::string npath = path + "/" + d->d_name; errno = 0; if(::remove(npath.c_str()) == 0) continue; if(errno != ENOTEMPTY) { err = errno; closedir(dir); return false; }; if(!cleandir(npath,err)) { closedir(dir); return false; }; errno = 0; if(::remove(npath.c_str()) != 0) { err = errno; closedir(dir); return false; }; }; closedir(dir); err = 0; return true; } int main(int argc,char* argv[]) { #ifndef WIN32 uid_t initial_uid = getuid(); gid_t initial_gid = getgid(); #endif DIR* curdir = NULL; int curfile = -1; if(argc != 3) return -1; char* e; e = argv[1]; int sin = strtoul(argv[1],&e,10); if((e == argv[1]) || (*e != 0)) return -1; e = argv[2]; int sout = strtoul(argv[2],&e,10); if((e == argv[2]) || (*e != 0)) return -1; while(true) { header_t header; sread_start = true; if(!sread(sin,&header,sizeof(header))) break; switch(header.cmd) { case CMD_PING: { if(header.size != 0) return -1; if(!swrite(sout,&header,sizeof(header))) return -1; }; break; case CMD_SETUID: { int uid = 0; int gid = 0; int res = -1; if(header.size != (sizeof(uid)+sizeof(gid))) return -1; if(!sread(sin,&uid,sizeof(uid))) return -1; if(!sread(sin,&gid,sizeof(gid))) return -1; errno = 0; #ifndef WIN32 res = 0; seteuid(initial_uid); setegid(initial_gid); if((gid != 0) && (gid != initial_gid)) { errno = 0; res = setegid(gid); }; if((res == 0) && (uid != 0) && (uid != initial_uid)) { errno = 0; res = seteuid(uid); }; #endif if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; case CMD_MKDIR: case CMD_MKDIRP: { mode_t mode; std::string dirname; if(!sread(sin,&mode,sizeof(mode))) return -1; header.size -= sizeof(mode); if(!sread_string(sin,dirname,header.size)) return -1; if(header.size) return -1; errno = 0; #ifndef WIN32 int res = ::mkdir(dirname.c_str(),mode); #else int res = ::mkdir(dirname.c_str()); #endif if((res != 0) && (header.cmd == CMD_MKDIRP)) { // resursively up std::string::size_type p = dirname.length(); if(p > 0) { while(errno == ENOENT) { p = dirname.rfind('/',p-1); if(p == std::string::npos) break; if(p == 0) break; errno = 0; #ifndef WIN32 res = ::mkdir(dirname.substr(0,p).c_str(),mode); #else res = ::mkdir(dirname.substr(0,p).c_str()); #endif if(res == 0) break; }; if((res == 0) || (errno == EEXIST)) { // resursively down while(p < dirname.length()) { p = dirname.find('/',p+1); if(p == std::string::npos) p = dirname.length(); errno = 0; #ifndef WIN32 res = ::mkdir(dirname.substr(0,p).c_str(),mode); #else res = ::mkdir(dirname.substr(0,p).c_str()); #endif if(res != 0) break; }; }; }; }; if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; case CMD_HARDLINK: case CMD_SOFTLINK: { std::string oldpath; std::string newpath; if(!sread_string(sin,oldpath,header.size)) return -1; if(!sread_string(sin,newpath,header.size)) return -1; if(header.size) return -1; int res = -1; errno = 0; #ifndef WIN32 if(header.cmd == CMD_HARDLINK) { res = ::link(oldpath.c_str(),newpath.c_str()); } else { res = ::symlink(oldpath.c_str(),newpath.c_str()); }; #endif if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; case CMD_COPY: { mode_t mode; std::string oldpath; std::string newpath; if(!sread(sin,&mode,sizeof(mode))) return -1; header.size -= sizeof(mode); if(!sread_string(sin,oldpath,header.size)) return -1; if(!sread_string(sin,newpath,header.size)) return -1; if(header.size) return -1; int res = 0; int err = 0; errno = 0; int h_src = ::open(oldpath.c_str(),O_RDONLY,0); if(h_src != -1) { int h_dst = ::open(newpath.c_str(),O_WRONLY|O_CREAT|O_TRUNC,mode); if(h_dst != -1) { for(;;) { ssize_t l = read(h_src,filebuf,sizeof(filebuf)); if(l <= 0) { err = errno; res = l; break; }; for(size_t p = 0;pd_name; } else { res = -1; }; if(!swrite_result(sout,header.cmd,res,errno,name)) return -1; } else { if(!swrite_result(sout,header.cmd,res,EBADF)) return -1; }; }; break; case CMD_OPENFILE: { int flags; mode_t mode; std::string path; if(!sread(sin,&flags,sizeof(flags))) return -1; header.size -= sizeof(flags); if(!sread(sin,&mode,sizeof(mode))) return -1; header.size -= sizeof(mode); if(!sread_string(sin,path,header.size)) return -1; if(header.size) return -1; if(curfile != -1) ::close(curfile); errno = 0; int res = (curfile = ::open(path.c_str(),flags,mode)); if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; case CMD_TEMPFILE: { mode_t mode; std::string path; if(!sread(sin,&mode,sizeof(mode))) return -1; header.size -= sizeof(mode); if(!sread_string(sin,path,header.size)) return -1; if(header.size) return -1; if(curfile != -1) ::close(curfile); errno = 0; int res = (curfile = mkstemp((char*)(path.c_str()))); if(res != -1) ::chmod(path.c_str(),mode); int l = path.length(); if(!swrite_result(sout,header.cmd,res,errno,&l,sizeof(l),path.c_str(),l)) return -1; }; break; case CMD_CLOSEFILE: { if(header.size) return -1; int res = 0; errno = 0; res = ::close(curfile); curfile = -1; if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; case CMD_SEEKFILE: { off_t offset; int whence; if(!sread(sin,&offset,sizeof(offset))) return -1; header.size -= sizeof(offset); if(!sread(sin,&whence,sizeof(whence))) return -1; header.size -= sizeof(whence); if(header.size) return -1; errno = 0; int res = (offset = ::lseek(curfile,offset,whence)); if(!swrite_result(sout,header.cmd,res,errno,&offset,sizeof(offset))) return -1; }; break; case CMD_READFILE: { // TODO: maybe use shared memory size_t size; if(!sread(sin,&size,sizeof(size))) return -1; header.size -= sizeof(size); if(header.size) return -1; if(size > sizeof(filebuf)) size = sizeof(filebuf); errno = 0; ssize_t l = ::read(curfile,filebuf,size); int res = l; if(l < 0) l = 0; int n = l; if(!swrite_result(sout,header.cmd,res,errno,&n,sizeof(n),filebuf,l)) return -1; }; break; case CMD_WRITEFILE: { unsigned int size = sizeof(filebuf); if(!sread_buf(sin,filebuf,size,header.size)) return false; if(header.size) return -1; errno = 0; ssize_t l = ::write(curfile,filebuf,size); int res = l; if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; case CMD_READFILEAT: { off_t offset; size_t size; if(!sread(sin,&size,sizeof(size))) return -1; header.size -= sizeof(size); if(!sread(sin,&offset,sizeof(offset))) return -1; header.size -= sizeof(offset); if(header.size) return -1; if(size > sizeof(filebuf)) size = sizeof(filebuf); ssize_t l = -1; errno = 0; #ifndef WIN32 l = ::pread(curfile,filebuf,size,offset); #else if (::lseek(curfile,offset,SEEK_SET) == offset) { l = ::read(curfile,filebuf,size); } #endif int res = l; if(l < 0) l = 0; int n = l; if(!swrite_result(sout,header.cmd,res,errno,&n,sizeof(n),filebuf,l)) return -1; }; break; case CMD_WRITEFILEAT: { off_t offset; if(!sread(sin,&offset,sizeof(offset))) return -1; header.size -= sizeof(offset); unsigned int size = sizeof(filebuf); if(!sread_buf(sin,filebuf,size,header.size)) return false; if(header.size) return -1; ssize_t l = -1; errno = 0; #ifndef WIN32 l = ::pwrite(curfile,filebuf,size,offset); #else if (::lseek(curfile,offset,SEEK_SET) == offset) { l = ::write(curfile,filebuf,size); } #endif int res = l; if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; case CMD_FSTAT: { if(header.size) return -1; int res = 0; struct stat st; errno = 0; res = ::fstat(curfile,&st); if(!swrite_result(sout,header.cmd,res,errno,&st,sizeof(st))) return -1; }; break; case CMD_READLINK: { std::string path; if(!sread_string(sin,path,header.size)) return -1; if(header.size) return -1; int res = 0; errno = 0; int l = readlink(path.c_str(), filebuf, sizeof(filebuf)); res = l; if(l < 0) l = 0; if(!swrite_result(sout,header.cmd,res,errno,&l,sizeof(l),filebuf,l)) return -1; }; break; case CMD_FTRUNCATE: { off_t length; if(!sread(sin,&length,sizeof(length))) return -1; header.size -= sizeof(length); if(header.size) return -1; int res = 0; errno = 0; res = ::ftruncate(curfile,length); if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; case CMD_FALLOCATE: { off_t length; if(!sread(sin,&length,sizeof(length))) return -1; header.size -= sizeof(length); if(header.size) return -1; int res = 0; int err = 0; errno = 0; #ifdef HAVE_POSIX_FALLOCATE res = posix_fallocate(curfile,0,length); err = res; length = lseek(curfile,0,SEEK_END); #else off_t olength = lseek(curfile, 0, SEEK_END); if(olength >= 0) { if(olength < length) { memset(filebuf, 0xFF, sizeof(filebuf)); while(olength < length) { size_t l = sizeof(filebuf); if (l > (length - olength)) l = length - olength; if (write(curfile, filebuf, l) == -1) { break; }; olength = lseek(curfile, 0, SEEK_END); }; }; }; err = errno; res = err; length = olength; #endif if(!swrite_result(sout,header.cmd,res,err,&length,sizeof(length))) return -1; }; break; case CMD_RENAME: { std::string oldpath; std::string newpath; if(!sread_string(sin,oldpath,header.size)) return -1; if(!sread_string(sin,newpath,header.size)) return -1; if(header.size) return -1; errno = 0; int res = rename(oldpath.c_str(), newpath.c_str()); if(!swrite_result(sout,header.cmd,res,errno)) return -1; }; break; default: return -1; }; }; return 0; } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Logger.h0000644000000000000000000000012413124220303022323 xustar000000000000000027 mtime=1498489027.786384 27 atime=1513200574.929707 30 ctime=1513200658.818733038 nordugrid-arc-5.4.2/src/hed/libs/common/Logger.h0000644000175000002070000006210113124220303022370 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_LOGGER__ #define __ARC_LOGGER__ #include #include #include #include #include #include #include namespace Arc { /** \addtogroup common * @{ */ /// Logging levels for tagging and filtering log messages. enum LogLevel { DEBUG = 1, ///< DEBUG level designates finer-grained informational events ///< which should only be used for debugging purposes. VERBOSE = 2, ///< VERBOSE level designates fine-grained informational events ///< that will give additional information about the application. INFO = 4, ///< INFO level designates informational messages that highlight ///< the progress of the application at coarse-grained level. WARNING = 8, ///< WARNING level designates potentially harmful situations. ERROR = 16, ///< ERROR level designates error events that might still allow ///< the application to continue running. FATAL = 32 ///< FATAL level designates very severe error events that will ///< presumably lead the application to abort. }; /// Output formats. Defines prefix for each message. enum LogFormat { /// All information about message is printed LongFormat, /// All information except domain is printed /** * \since Added in 4.0.0. **/ MediumFormat, /// Only message level is printed ShortFormat, /// Message time (microsecond precision) and time difference /// from previous message are printed. This format is mostly /// meant for profiling. DebugFormat, /// Only message is printed EmptyFormat }; /// Struct to contain LogFormat, to use with operator<<(std::ostream&, const LoggerFormat&) struct LoggerFormat { /// Make a new LoggerFormat with the given LogFormat. LoggerFormat(LogFormat format) : format(format) {}; LogFormat format; }; /// Allows printing of messages to streams using ARC Logger formatting. std::ostream& operator<<(std::ostream& os, const LoggerFormat& format); /// Printing of LogLevel values to ostreams. /** Output operator so that LogLevel values can be printed in a nicer way. */ std::ostream& operator<<(std::ostream& os, LogLevel level); /// Convert string to a LogLevel. LogLevel string_to_level(const std::string& str); /// Convert string case-insensitively to a LogLevel /** * \since Added in 4.1.0. **/ LogLevel istring_to_level(const std::string& llStr); /// Case-insensitive parsing of a string to a LogLevel with error response. /** * The method will try to parse (case-insensitive) the argument string * to a corresponding LogLevel. If the method succeeds, true will * be returned and the argument \a ll will be set to * the parsed LogLevel. If the parsing fails \c false will be * returned. The parsing succeeds if \a llStr match * (case-insensitively) one of the names of the LogLevel members. * * @param llStr a string which should be parsed to a Arc::LogLevel. * @param ll a Arc::LogLevel reference which will be set to the * matching Arc::LogLevel upon successful parsing. * @return \c true in case of successful parsing, otherwise \c false. * @see LogLevel */ bool istring_to_level(const std::string& llStr, LogLevel& ll); /// Same as istring_to_level except it is case-sensitive. bool string_to_level(const std::string& str, LogLevel& ll); /// Convert LogLevel to a string. std::string level_to_string(const LogLevel& level); /// Convert an old-style log level (int from 0 to 5) to a LogLevel. LogLevel old_level_to_level(unsigned int old_level); /// A class for log messages. /** This class is used to represent log messages internally. It contains the time the message was created, its level, from which domain it was sent, an identifier and the message text itself. \headerfile Logger.h arc/Logger.h */ class LogMessage { public: /// Creates a LogMessage with the specified level and message text. /** This constructor creates a LogMessage with the specified level and message text. The time is set automatically, the domain is set by the Logger to which the LogMessage is sent and the identifier is composed from the process ID and the address of the Thread object corresponding to the calling thread. @param level The level of the LogMessage. @param message The message text. */ LogMessage(LogLevel level, const IString& message); /// Creates a LogMessage with the specified attributes. /** This constructor creates a LogMessage with the specified level, message text and identifier. The time is set automatically and the domain is set by the Logger to which the LogMessage is sent. @param level The level of the LogMessage. @param message The message text. @param identifier The identifier of the LogMessage. */ LogMessage(LogLevel level, const IString& message, const std::string& identifier); /// Returns the level of the LogMessage. /** Returns the level of the LogMessage. @return The level of the LogMessage. */ LogLevel getLevel() const; protected: /// Sets the identifier of the LogMessage. /** The purpose of this method is to allow subclasses (in case there are any) to set the identifier of a LogMessage. @param identifier The identifier. */ void setIdentifier(std::string identifier); private: /// Composes a default identifier. /** This method composes a default identifier by combining the the process ID and the address of the Thread object corresponding to the calling thread. @return A default identifier. */ static std::string getDefaultIdentifier(); /// Sets the domain of the LogMessage /** This method sets the domain (origin) of the LogMessage. It is called by the Logger to which the LogMessage is sent. @param domain The domain. */ void setDomain(std::string domain); /// The time when the LogMessage was created. std::string time; /// The level (severity) of the LogMessage. LogLevel level; /// The domain (origin) of the LogMessage. std::string domain; /// An identifier that may be used for filtering. std::string identifier; /// The message text. IString message; /// Printing of LogMessages to ostreams. /** Output operator so that LogMessages can be printed conveniently by LogDestinations. */ friend std::ostream& operator<<(std::ostream& os, const LogMessage& message); /// The Logger class is a friend. /** The Logger class must have some privileges (e.g. ability to call the setDomain() method), therefore it is a friend. */ friend class Logger; }; /// A base class for log destinations. /** This class defines an interface for LogDestinations. LogDestination objects will typically contain synchronization mechanisms and should therefore never be copied. If setlocale() has been called with a supported locale, log messages will be logged in that locale. \headerfile Logger.h arc/Logger.h */ class LogDestination { public: /// Logs a LogMessage to this LogDestination. virtual void log(const LogMessage& message) = 0; virtual ~LogDestination() {} /// Set format for this log destination. void setFormat(const LogFormat& newformat); /// Set format for any new log destination. static void setDefaultFormat(const LogFormat& newformat); /// Set a prefix for this log destination to be logged before messages. /** The prefix is prepended to messages in all formats except EmptyFormat. * \since Added in 4.0.0. */ void setPrefix(const std::string& prefix); protected: /// Default constructor. Protected since subclasses should be used instead. LogDestination(); private: /// Private copy constructor /** LogDestinations should never be copied, therefore the copy constructor is private. */ LogDestination(const LogDestination& unique); /// Private assignment operator /** LogDestinations should never be assigned, therefore the assignment operator is private. */ void operator=(const LogDestination& unique); /// Sets iword and pword for format and prefix friend std::ostream& operator<<(std::ostream& os, const LogDestination& dest); protected: /// Format to use in this LogDestination. LogFormat format; /// Prefix to use in this log destination. /** * \since Added in 4.0.0. **/ std::string prefix; }; /// A class for logging to ostreams. /** This class is used for logging to ostreams (cout, cerr, files). It provides synchronization in order to prevent different LogMessages to appear mixed with each other in the stream. In order not to break the synchronization, LogStreams should never be copied. Therefore the copy constructor and assignment operator are private. Furthermore, it is important to keep a LogStream object as long as the Logger to which it has been registered. \headerfile Logger.h arc/Logger.h */ class LogStream : public LogDestination { public: /// Creates a LogStream connected to an ostream. /** Creates a LogStream connected to the specified ostream. In order not to break synchronization, it is important not to connect more than one LogStream object to a certain stream. @param destination The ostream to which to write LogMessages. */ LogStream(std::ostream& destination); /// Writes a LogMessage to the stream. /** This method writes a LogMessage to the ostream that is connected to this LogStream object. It is synchronized so that not more than one LogMessage can be written at a time. @param message The LogMessage to write. */ virtual void log(const LogMessage& message); private: /// Private copy constructor /** LogStreams should never be copied, therefore the copy constructor is private. */ LogStream(const LogStream& unique); /// Private assignment operator /** LogStreams should never be assigned, therefore the assignment operator is private. */ void operator=(const LogStream& unique); /// The ostream to which to write LogMessages /** This is the ostream to which LogMessages sent to this LogStream will be written. */ std::ostream& destination; /// A mutex for synchronization. /** This mutex is locked before a LogMessage is written and it is not unlocked until the entire message has been written and the stream flushed. This is done in order to prevent LogMessages to appear mixed in the stream. */ Glib::Mutex mutex; }; /// A class for logging to files. /** This class is used for logging to files. It provides synchronization in order to prevent different LogMessages to appear mixed with each other in the stream. It is possible to limit size of created file. Whenever specified size is exceeded file is deleted and new one is created. Old files may be moved into backup files instead of being deleted. Those files have names same as initial file with additional number suffix - similar to those found in /var/log of many Unix-like systems. \headerfile Logger.h arc/Logger.h */ class LogFile : public LogDestination { public: /// Creates a LogFile connected to a file. /** Creates a LogFile connected to the file located at specified path. In order not to break synchronization, it is important not to connect more than one LogFile object to a certain file. If file does not exist it will be created. @param path The path to file to which to write LogMessages. */ LogFile(const std::string& path); /// Set maximal allowed size of file. /** Set maximal allowed size of file. This value is not obeyed exactly. Specified size may be exceeded by amount of one LogMessage. To disable limit specify -1. @param newsize Max size of log file. */ void setMaxSize(int newsize); /// Set number of backups to store. /** Set number of backups to store. When file size exceeds one specified with setMaxSize() file is closed and moved to one named path.1. If path.1 exists it is moved to path.2 and so on. Number of path.# files is one set in newbackup. @param newbackup Number of backup files. */ void setBackups(int newbackup); /// Set file reopen on every write. /** Set file reopen on every write. If set to true file is opened before writing every log record and closed afterward. @param newreopen If file to be reopened for every log record. */ void setReopen(bool newreopen); /// Returns true if this instance is valid. operator bool(void); /// Returns true if this instance is invalid. bool operator!(void); /// Writes a LogMessage to the file. /** This method writes a LogMessage to the file that is connected to this LogFile object. If after writing size of file exceeds one set by setMaxSize() file is moved to backup and new one is created. @param message The LogMessage to write. */ virtual void log(const LogMessage& message); private: LogFile(void); LogFile(const LogFile& unique); void operator=(const LogFile& unique); void backup(void); std::string path; std::ofstream destination; int maxsize; int backups; bool reopen; Glib::Mutex mutex; }; class LoggerContextRef; /** \cond Container for internal logger configuration. \headerfile Logger.h arc/Logger.h */ class LoggerContext { friend class Logger; friend class LoggerContextRef; private: /// This counts how many threads are using this object. int usage_count; /// Protection for usage_count. Glib::Mutex mutex; /// A list of pointers to LogDestinations. std::list destinations; /// The threshold of Logger. LogLevel threshold; LoggerContext(LogLevel thr):usage_count(0),threshold(thr) { }; LoggerContext(const LoggerContext& ctx): usage_count(0),destinations(ctx.destinations),threshold(ctx.threshold) { }; ~LoggerContext(void); void Acquire(void); void Release(void); }; /** \endcond */ /// A logger class. /** This class defines a Logger to which LogMessages can be sent. Every Logger (except for the rootLogger) has a parent Logger. The domain of a Logger (a string that indicates the origin of LogMessages) is composed by adding a subdomain to the domain of its parent Logger. A Logger also has a threshold. Every LogMessage that have a level that is greater than or equal to the threshold is forwarded to any LogDestination connected to this Logger as well as to the parent Logger. Typical usage of the Logger class is to declare a global Logger object for each library/module/component to be used by all classes and methods there. Logger messages may be localised according to the current locale. Some locales are better supported than others. Example code for setting up logger in main(): @code // Set up stderr as a log stream Arc::LogStream logcerr(std::cerr); // Log message is prefixed by level only logcerr.setFormat(Arc::ShortFormat); // Add the stderr destination to the root logger Arc::Logger::getRootLogger().addDestination(logcerr); // Set the logging threshold to WARNING Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); // Logger to use in main() - it inherits all properties from the root Logger Arc::Logger logger(Arc::Logger::getRootLogger(), "main"); // this message will not be logged since it is below the threshold logger.msg(Arc::INFO, "main started"); int i = 5; // This message will be logged logger.msg(Arc::ERROR, "Oops, an error occurred when i was %i", i); @endcode */ class Logger { public: /// The root Logger. /** This is the root Logger. It is an ancestor of any other Logger and always exists. */ //static Logger rootLogger; static Logger& getRootLogger(); /// Creates a logger. /** The threshold is inherited from its parent Logger. @param parent The parent Logger of the new Logger. @param subdomain The subdomain of the new logger. */ Logger(Logger& parent, const std::string& subdomain); /// Creates a logger. /** @param parent The parent Logger of the new Logger. @param subdomain The subdomain of the new logger. @param threshold The threshold of the new logger. */ Logger(Logger& parent, const std::string& subdomain, LogLevel threshold); /// Destroys a logger. ~Logger(); /// Adds a LogDestination. /** Adds a LogDestination to which to forward LogMessages sent to this logger (if they pass the threshold). Since LogDestinations should not be copied, the new LogDestination is passed by reference and a pointer to it is kept for later use. It is therefore important that the LogDestination passed to this Logger exists at least as long as the Logger itself. */ void addDestination(LogDestination& destination); /// Adds LogDestinations. /** See addDestination(LogDestination& destination). */ void addDestinations(const std::list& destinations); /// Set LogDestinations. /** A safe atomic way to remove and add LogDestinations. */ void setDestinations(const std::list& destinations); /// Obtains current LogDestinations. /** Returns list of pointers to LogDestination objects. Returned result refers directly to internal member of Logger instance. Hence it should not be used after this Logger is destroyed. */ const std::list& getDestinations(void) const; /// Removes all LogDestinations. void removeDestinations(void); /// Remove all LogDestinations and delete LogDestination objects. /** A LogDestination to not delete can be specified in the exclude parameter. * @param exclude Do not delete this destination */ void deleteDestinations(LogDestination* exclude=NULL); /// Sets the logging threshold. /** This method sets the threshold of the Logger. Any message sent to this Logger that has a level below this threshold will be discarded. @param threshold The threshold */ void setThreshold(LogLevel threshold); /// Sets the threshold for domain. /** This method sets the default threshold of the domain. All new loggers created with specified domain will have specified threshold set by default. The subdomains of all loggers in chain are matched against list of provided subdomains. @param threshold The threshold @param subdomains The subdomains of all loggers in chain */ static void setThresholdForDomain(LogLevel threshold, const std::list& subdomains); /// Sets the threshold for domain. /** This method sets the default threshold of the domain. All new loggers created with specified domain will have specified threshold set by default. The domain is composed of all subdomains of all loggers in chain by merging them with '.' as separator. @param threshold The threshold @param domain The domain of logger */ static void setThresholdForDomain(LogLevel threshold, const std::string& domain); /// Returns the threshold of this logger. LogLevel getThreshold() const; /// Creates per-thread context. /** Creates new context for this logger which becomes effective for operations initiated by this thread. All new threads started by this one will inherit new context. Context stores current threshold and pointers to destinations. Hence new context is identical to current one. One can modify new context using setThreshold(), removeDestinations() and addDestination(). All such operations will not affect old context. */ void setThreadContext(void); /// Sends a LogMessage. /** @param message The LogMessage to send. */ void msg(LogMessage message); /// Logs a message text. /** Logs a message text string at the specified LogLevel. This is a convenience method to save some typing. It simply creates a LogMessage and sends it to the other msg() methods. It is also possible to use msg() with multiple arguments and printf-style string formatting, for example @code logger.msg(INFO, "Operation no %i failed: %s", number, reason); @endcode @param level The level of the message. @param str The message text. */ void msg(LogLevel level, const std::string& str) { msg(LogMessage(level, IString(str))); } template void msg(LogLevel level, const std::string& str, const T0& t0) { msg(LogMessage(level, IString(str, t0))); } template void msg(LogLevel level, const std::string& str, const T0& t0, const T1& t1) { msg(LogMessage(level, IString(str, t0, t1))); } template void msg(LogLevel level, const std::string& str, const T0& t0, const T1& t1, const T2& t2) { msg(LogMessage(level, IString(str, t0, t1, t2))); } template void msg(LogLevel level, const std::string& str, const T0& t0, const T1& t1, const T2& t2, const T3& t3) { msg(LogMessage(level, IString(str, t0, t1, t2, t3))); } template void msg(LogLevel level, const std::string& str, const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4) { msg(LogMessage(level, IString(str, t0, t1, t2, t3, t4))); } template void msg(LogLevel level, const std::string& str, const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5) { msg(LogMessage(level, IString(str, t0, t1, t2, t3, t4, t5))); } template void msg(LogLevel level, const std::string& str, const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6) { msg(LogMessage(level, IString(str, t0, t1, t2, t3, t4, t5, t6))); } template void msg(LogLevel level, const std::string& str, const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7) { msg(LogMessage(level, IString(str, t0, t1, t2, t3, t4, t5, t6, t7))); } private: /// A private constructor. /** Every Logger (except the root logger) must have a parent, therefore the default constructor (which does not specify a parent) is private to prevent accidental use. It is only used when creating the root logger. */ Logger(); /// Private copy constructor /** Loggers should never be copied, therefore the copy constructor is private. */ Logger(const Logger& unique); /// Private assignment operator /** Loggers should never be assigned, therefore the assignment operator is private. */ void operator=(const Logger& unique); /// Returns the domain. /** This method returns the domain of this logger, i.e. the string that is attached to all LogMessages sent from this logger to indicate their origin. */ std::string getDomain(); /// Forwards a log message. /** This method is called by the msg() method and by child Loggers. It filters messages based on their level and forwards them to the parent Logger and any LogDestination that has been added to this Logger. @param message The message to send. */ void log(const LogMessage& message); /// A pointer to the parent of this logger. Logger *parent; /// The domain of this logger. std::string domain; /// Per-trhread storage id for context; std::string context_id; LoggerContext context; LoggerContext& getContext(void); Glib::Mutex mutex; #define rootLoggerMagic (0xF6569201) static Logger *rootLogger; static std::map* defaultThresholds; static unsigned int rootLoggerMark; }; /** @} */ } // namespace Arc #define rootLogger getRootLogger() #define LOG(LGR, THR, FSTR, ...) { if ((LGR).getThreshold() >= (THR)(LGR).msg((THR), (FSTR), ...); } #endif // __ARC_LOGGER__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Profile.cpp0000644000000000000000000000012412062134076023052 xustar000000000000000027 mtime=1355331646.140129 27 atime=1513200574.888706 30 ctime=1513200658.867733638 nordugrid-arc-5.4.2/src/hed/libs/common/Profile.cpp0000644000175000002070000004753512062134076023135 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "Profile.h" namespace Arc { static Logger profileLogger(Logger::getRootLogger(), "Profile"); Profile::Profile(const std::string& filename) : XMLNode(NS(), "ArcConfig") { ReadFromFile(filename); } Profile::~Profile() {} /* * This function parses the initokenenables and initokendisables attributes and * returns a boolean acoording to the following. * With the initokenenables attribute elements in a profile can be * enabled/disabled from INI either by specifying/not specifying a given * section or a INI tag/value pair in a given section. With initokendisables * the opposite applies, i.e. an element is enabled unless the ini tag/value * is present. * The format is:
    [#[=]] * So either a section, or a section/tag or a section/tag/value can be specified. * With initokenenables, if the specified set is found in IniConfig then true * is returned, otherwise false. Also section and tag (if requested) must be * non-empty otherwise false is returned. The opposite applies to initokendisables. */ static bool isenabled(XMLNode node, IniConfig ini) { std::string::size_type pos; std::string section = "", tag = "", value = ""; if (node.Attribute("initokenenables")) { const std::string initokenenables = node.Attribute("initokenenables"); pos = initokenenables.find('#'); section = initokenenables.substr(0, pos); if (section.empty()) { return false; } if (pos != std::string::npos) { tag = initokenenables.substr(pos+1); pos = tag.find('='); if (pos != std::string::npos) { value = tag.substr(pos+1); tag.resize(pos); return ((!tag.empty()) && (bool)ini[section][tag] && ((std::string)ini[section][tag] == value)); } return ((!tag.empty()) && (bool)ini[section][tag]); } return (bool)ini[section]; } if (node.Attribute("initokendisables")) { const std::string initokendisables = node.Attribute("initokendisables"); pos = initokendisables.find('#'); section = initokendisables.substr(0, pos); if (section.empty()) { return true; } if (pos != std::string::npos) { tag = initokendisables.substr(pos+1); pos = tag.find('='); if (pos != std::string::npos) { value = tag.substr(pos+1); tag.resize(pos); return !((!tag.empty()) && (bool)ini[section][tag] && ((std::string)ini[section][tag] == value)); } return !((!tag.empty()) && (bool)ini[section][tag]); } return !(bool)ini[section]; } return true; } /* From the space separated list of sections 'sections' this function sets the * 'sectionName' variable to the first ini section existing in the * IniConfig object 'ini'. The function returns true if one of the sections in * space separated list 'sections' is found, otherwise false. */ static bool locateSection(const std::string& sections, IniConfig ini, std::string& sectionName) { std::list sectionList; tokenize(sections, sectionList); // Find first existing section from the list of sections. for (std::list::const_iterator it = sectionList.begin(); it != sectionList.end(); it++) { if (ini[*it]) { sectionName = *it; return true; } } return false; } static bool locateSectionWithTag(const std::string& sections, const std::string& tag, IniConfig ini, std::string& sectionName) { std::list sectionList; tokenize(sections, sectionList); // Find first existing section from the list of sections. for (std::list::const_iterator it = sectionList.begin(); it != sectionList.end(); it++) { if (ini[*it][tag]) { sectionName = *it; return true; } } return false; } // Returns number of child's added. static int MapTags(XMLNode node, const std::string& sections, const std::string& tag, const std::string& type, IniConfig iniNode, int nodePosition) { std::string sectionName = ""; if (!locateSectionWithTag(sections, tag, iniNode, sectionName)) { // initag not found in any sections. if (node.Attribute("inidefaultvalue")) { // Set the default value. if (type == "attribute") { node.Parent().NewAttribute(node.FullName()) = (std::string)node.Attribute("inidefaultvalue"); } else { XMLNode newNode = node.Parent().NewChild(node.FullName(), nodePosition, true); newNode = (std::string)node.Attribute("inidefaultvalue"); for (int i = 0; i < node.AttributesSize(); i++) { const std::string attName = node.Attribute(i).Name(); if (!(attName == "inisections" || attName == "initag" || attName == "inidefaultvalue" || attName == "initype")) { newNode.NewAttribute(node.Attribute(i).FullName()) = (std::string)node.Attribute(i); } } return 1; } } return 0; } if (type == "attribute") { node.Parent().NewAttribute(node.FullName()) = (std::string)iniNode[sectionName][tag]; return 0; } int i = 0; for (XMLNode in = iniNode[sectionName][tag]; (type == "multi" || i == 0) && in; ++in, i++) { XMLNode newNode = node.Parent().NewChild(node.FullName(), nodePosition + i, true); newNode = (std::string)in; for (int j = 0; j < node.AttributesSize(); j++) { const std::string attName = node.Attribute(j).Name(); if (!(attName == "inisections" || attName == "initag" || attName == "inidefaultvalue" || attName == "initype")) { newNode.NewAttribute(node.Attribute(j).FullName()) = (std::string)node.Attribute(j); } } } return i; } /* * This function processes a parent element and its child elements where the * initype of the parent is 'multielement'. The parent element has inisections * and initag specified which enable a child element to refer to those by * specifying initype '#this'. If a child element has initype set to '#this', * then 'iniNode' will be searched for childs corresponding to section * 'thisSectionName' and tag 'thisTagName' and the value of each of these * childs will be mapped to a child of the node 'node'. If a child element has * no initype, inisections and initag and this element has children then these * are processes aswell using the function recursively. A child element can * also have initype set to 'single' or 'attribute' in which case these are * processes as if the parent element had no initype. **/ static void MapMultiElement(XMLNode node, XMLNodeList& parentNodes, const std::string& thisSectionName, const std::string& thisTagName, IniConfig iniNode) { for (int i = 0; node.Child(i); i++) { if (!isenabled(node.Child(i), iniNode)) { continue; } const std::string childFullName = (node.Child(i).Prefix().empty() ? node.Child(i).Name() : node.Child(i).FullName()); const std::string sections = node.Child(i).Attribute("inisections"); if (node.Child(i).Attribute("inisections") && sections.empty()) { profileLogger.msg(WARNING, "Element \"%s\" in the profile ignored: the value of the \"inisections\" attribute cannot be the empty string.", node.Child(i).FullName()); continue; } const std::string tag = node.Child(i).Attribute("initag"); if (node.Child(i).Attribute("initag") && tag.empty()) { profileLogger.msg(WARNING, "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute cannot be the empty string.", node.Child(i).FullName()); continue; } const std::string type = (node.Child(i).Attribute("initype") ? (std::string)node.Child(i).Attribute("initype") : (!tag.empty() && !sections.empty() ? "single" : "")); if (type.empty() && sections.empty() && tag.empty()) { if (node.Child(i).Size() == 0) { // This child has no child's, proceed to next child in loop. continue; } XMLNodeList l; // Create child node beneath each parent node. for (XMLNodeList::iterator it = parentNodes.begin(); it != parentNodes.end(); it++) { l.push_back(it->NewChild(childFullName)); } MapMultiElement(node.Child(i), l, thisSectionName, thisTagName, iniNode); for (XMLNodeList::iterator it = l.begin(); it != l.end(); it++) { if (it->Size() == 0) { // Remove nodes in list, which has no childs. it->Destroy(); } } } else if (type == "#this" && sections.empty() && tag.empty()) { int j = 0; for (XMLNodeList::iterator it = parentNodes.begin(); it != parentNodes.end(); it++, j++) { if (iniNode[thisSectionName][thisTagName][j]) { it->NewChild(childFullName) = (std::string)iniNode[thisSectionName][thisTagName][j]; } else if (node.Child(i).Attribute("inidefaultvalue")) { it->NewChild(childFullName) = (std::string)node.Child(i).Attribute("inidefaultvalue"); } } } else if ((type == "single" || type == "attribute") && !sections.empty() && !tag.empty()) { const std::string tagName = tag; std::string sectionName = ""; if (locateSectionWithTag(sections, tagName, iniNode, sectionName) || node.Child(i).Attribute("inidefaultvalue")) { const std::string value = (!sectionName.empty() ? iniNode[sectionName][tagName] : node.Child(i).Attribute("inidefaultvalue")); for (XMLNodeList::iterator it = parentNodes.begin(); it != parentNodes.end(); it++) { if (type == "attribute") { it->NewAttribute(childFullName) = value; } else { it->NewChild(childFullName) = value; } } } } } } static void MapMultiSection(XMLNode node, XMLNodeList& parentNodes, const std::string& sectionName, IniConfig iniNode) { // Loop over child nodes under multisection XML element. for (int i = 0; node.Child(i); i++) { if (!isenabled(node.Child(i), iniNode)) { continue; } const std::string childFullName = (node.Child(i).Prefix().empty() ? node.Child(i).Name() : node.Child(i).FullName()); const std::string sections = node.Child(i).Attribute("inisections"); if (node.Child(i).Attribute("inisections") && sections.empty()) { profileLogger.msg(WARNING, "Element \"%s\" in the profile ignored: the value of the \"inisections\" attribute cannot be the empty string.", node.Child(i).FullName()); continue; } const std::string tag = node.Child(i).Attribute("initag"); if (node.Child(i).Attribute("initag") && tag.empty()) { profileLogger.msg(WARNING, "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute cannot be the empty string.", node.Child(i).FullName()); continue; } const std::string type = (node.Child(i).Attribute("initype") ? (std::string)node.Child(i).Attribute("initype") : (!tag.empty() && !sections.empty() ? "single" : "")); if (sections.empty() && tag.empty()) { if (node.Child(i).Size() == 0) { // This child has no childs, proceed to next child in loop. continue; } XMLNodeList l; // Create child node beneath each parent node. for (XMLNodeList::iterator it = parentNodes.begin(); it != parentNodes.end(); it++) { l.push_back(it->NewChild(childFullName)); } MapMultiSection(node.Child(i), l, sectionName, iniNode); for (XMLNodeList::iterator it = l.begin(); it != l.end(); it++) { if (it->Size() == 0) { // Remove nodes in list, which has no childs. it->Destroy(); } } } else if ((type == "single" || type == "multi" || type == "attribute") && !sections.empty() && !tag.empty()) { std::list sectionList; tokenize(sections, sectionList); bool tagInMultisections = false; int j = 0; // First populate XML elements with common values. for (std::list::const_iterator it = sectionList.begin(); it != sectionList.end(); it++) { if (*it == "#this") { tagInMultisections = true; continue; } // Map tag to node for every existing section. for (; (type == "multi" || j == 0) && iniNode[*it][tag][j]; j++) { for (XMLNodeList::iterator itMNodes = parentNodes.begin(); itMNodes != parentNodes.end(); itMNodes++) { if (type == "attribute") { itMNodes->NewAttribute(childFullName) = (std::string)iniNode[*it][tag][j]; } else { itMNodes->NewChild(childFullName) = (std::string)iniNode[*it][tag][j]; } } } // Only assign common values from one section. if (j != 0) { break; } } if (j == 0 && node.Child(i).Attribute("inidefaultvalue")) { // Set default value of node/attribute for every existing section. for (XMLNodeList::iterator itMNodes = parentNodes.begin(); itMNodes != parentNodes.end(); itMNodes++) { if (type == "attribute") { itMNodes->NewAttribute(childFullName) = (std::string)node.Child(i).Attribute("inidefaultvalue"); } else { itMNodes->NewChild(childFullName) = (std::string)node.Child(i).Attribute("inidefaultvalue"); } } } if (!tagInMultisections) { continue; } // And then assign/overwrite values from multisections. j = 0; for (XMLNodeList::iterator itMNodes = parentNodes.begin(); itMNodes != parentNodes.end(); itMNodes++, j++) { // Loop over parent nodes and #this sections. They are of the same size. int k = 0; for (; (type == "multi" || k == 0) && iniNode[sectionName][j][tag][k]; k++) { if (type == "attribute") { if (!itMNodes->Attribute(childFullName)) { itMNodes->NewAttribute(childFullName); } itMNodes->Attribute(childFullName) = (std::string)iniNode[sectionName][j][tag][k]; } else { if (!(*itMNodes)[childFullName][k]) { itMNodes->NewChild(childFullName); } (*itMNodes)[childFullName][k] = (std::string)iniNode[sectionName][j][tag][k]; } } /* * The following code only executes for type == "multi" ( (*itMNodes)[childFullName][1] does only exist for type == "multi"). * It removes any common elements which exist as child of *itMNodes. These should * be removed since specific values was assigned in this section. */ while (k != 0 && (*itMNodes)[childFullName][k]) { (*itMNodes)[childFullName][k].Destroy(); } } } } } /* * Returns number of child's added (positive) or removed (negative). */ int EvaluateNode (XMLNode n, IniConfig ini, int nodePosition) { if (!isenabled(n, ini)) { n.Destroy(); return -1; } if (n.Attribute("initokenenables")) { n.Attribute("initokenenables").Destroy(); } if (n.Attribute("initokendisables")) { n.Attribute("initokendisables").Destroy(); } XMLNode sections = n.Attribute("inisections"); if (sections && ((std::string)sections).empty()) { profileLogger.msg(WARNING, "Element \"%s\" in the profile ignored: the value of the \"inisections\" attribute cannot be the empty string.", n.FullName()); n.Destroy(); return -1; } XMLNode tag = n.Attribute("initag"); if (tag && ((std::string)tag).empty()) { profileLogger.msg(WARNING, "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute cannot be the empty string.", n.FullName()); n.Destroy(); return -1; } const std::string type = (n.Attribute("initype") ? (std::string)n.Attribute("initype") : (tag && sections ? "single" : "")); if (type.empty() && (n.Attribute("initype") || n.Attribute("inidefaultvalue"))) { if (n.Attribute("initype")) { profileLogger.msg(WARNING, "Element \"%s\" in the profile ignored: the value of the \"initype\" attribute cannot be the empty string.", n.FullName()); } else { profileLogger.msg(WARNING, "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute cannot be specified when the \"inisections\" and \"initag\" attributes have not been specified.", n.FullName()); } n.Destroy(); return -1; } if (type == "multisection") { const std::string tagName = tag; std::string sectionName = ""; if (!locateSection(sections, ini, sectionName)) { // None of the specified sections was found in the XMLNode 'iniNode'. n.Destroy(); return -1; } // Make a node for every existing section with 'sectionName' in the IniConfig. XMLNodeList mNodes; for (int i = 0; ini[sectionName][i]; i++) { mNodes.push_back(n.Parent().NewChild(n.FullName(), nodePosition+i, true)); if (tag && ini[sectionName][i][tagName]) { // A tag have been specified for this multisection node, thus values should be assigned. mNodes.back() = (std::string)ini[sectionName][i][tagName]; } } // No tag have been specified process the substructure. if (!tag) { MapMultiSection(n, mNodes, sectionName, ini); } int nChilds = mNodes.size(); // Remove generated elements without text and children. They carry no information. for (XMLNodeList::iterator it = mNodes.begin(); it != mNodes.end(); it++) { if (((std::string)*it).empty() && it->Size() == 0) { it->Destroy(); nChilds--; } } n.Destroy(); return nChilds-1; } else if (type == "multielement") { if (!sections || ((std::string)sections).empty() || !tag || ((std::string)tag).empty()) { n.Destroy(); return -1; } const std::string tagName = tag; std::string sectionName = ""; if (!locateSectionWithTag(sections, tagName, ini, sectionName)) { n.Destroy(); return -1; } // Make a node for every existing tag 'tagName' in the registered section with 'sectionName' in the IniConfig. XMLNodeList mNodes; for (int i = 0; ini[sectionName][tagName][i]; i++) { mNodes.push_back(n.Parent().NewChild(n.FullName(), nodePosition + i, true)); } MapMultiElement(n, mNodes, sectionName, tagName, ini); n.Destroy(); return mNodes.size()-1; } else if ((type == "single" || type == "attribute" || type == "multi")) { int nChilds = MapTags(n, sections, tag, type, ini, nodePosition); n.Destroy(); return nChilds - 1; } else if (!type.empty()) { profileLogger.msg(WARNING, "In the configuration profile the 'initype' attribute on the \"%s\" element has a invalid value \"%s\".", n.Prefix().empty() ? n.Name() : n.FullName(), type); n.Destroy(); return -1; } else { for (int i = 0; n.Child(i); i++) i += EvaluateNode(n.Child(i), ini, i); } return 0; } void Profile::Evaluate(Config &cfg, IniConfig ini) { cfg.Replace(*this); for (int i = 0; cfg.Child(i); i++) i += EvaluateNode(cfg.Child(i), ini, i); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcConfigFile.cpp0000644000000000000000000000012412771223617024114 xustar000000000000000027 mtime=1474635663.023405 27 atime=1513200574.910706 30 ctime=1513200658.848733405 nordugrid-arc-5.4.2/src/hed/libs/common/ArcConfigFile.cpp0000644000175000002070000000722512771223617024167 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "ArcConfigFile.h" //#include "../misc/escaped.h" namespace Arc { bool ConfigFile::open(const std::string &name) { close(); std::ifstream::open(name.c_str(),std::ifstream::in); return std::ifstream::is_open(); } bool ConfigFile::close(void) { if(std::ifstream::is_open()) std::ifstream::close(); return true; } /* std::string ConfigFile::read_line(std::string &rest,char separator) { rest = read_line(); return next_arg(rest,separator); } */ std::string ConfigFile::read_line() { return read_line(*this); } std::string ConfigFile::read_line(std::istream& stream) { std::string rest; for(;;) { if(stream.eof() || stream.fail()) { rest=""; return rest; }; std::getline(stream,rest); Arc::trim(rest," \t\r\n"); if(rest.empty()) continue; /* empty string - skip */ if(rest[0] == '#') continue; /* comment - skip */ break; }; return rest; } /* std::string ConfigFile::next_arg(std::string &rest,char separator) { int n; std::string arg; n=input_escaped_string(rest.c_str(),arg,separator); rest=rest.substr(n); return arg; } */ ConfigFile::file_type ConfigFile::detect() { char inchar; if (!good()) return file_unknown; while(good()) { inchar = (char)(get()); if(isspace(inchar)) continue; if(inchar == '<') { // XML starts from < even if it is comment putback(inchar); return file_XML; }; if((inchar == '#') || (inchar = '[')) { // INI file starts from comment or section putback(inchar); return file_INI; }; }; putback(inchar); return file_unknown; } /* bool elementtobool(Arc::XMLNode pnode,const char* ename,bool& val,Arc::Logger* logger) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default if((v == "true") || (v == "1")) { val=true; return true; }; if((v == "false") || (v == "0")) { val=false; return true; }; if(logger && ename) logger->msg(Arc::ERROR,"wrong boolean in %s: %s",ename,v.c_str()); return false; } bool elementtoint(Arc::XMLNode pnode,const char* ename,unsigned int& val,Arc::Logger* logger) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default if(Arc::stringto(v,val)) return true; if(logger && ename) logger->msg(Arc::ERROR,"wrong number in %s: %s",ename,v.c_str()); return false; } bool elementtoint(Arc::XMLNode pnode,const char* ename,int& val,Arc::Logger* logger) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default if(Arc::stringto(v,val)) return true; if(logger && ename) logger->msg(Arc::ERROR,"wrong number in %s: %s",ename,v.c_str()); return false; } bool elementtoint(Arc::XMLNode pnode,const char* ename,time_t& val,Arc::Logger* logger) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default if(Arc::stringto(v,val)) return true; if(logger && ename) logger->msg(Arc::ERROR,"wrong number in %s: %s",ename,v.c_str()); return false; } bool elementtoint(Arc::XMLNode pnode,const char* ename,unsigned long long int& val,Arc::Logger* logger) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default if(Arc::stringto(v,val)) return true; if(logger && ename) logger->msg(Arc::ERROR,"wrong number in %s: %s",ename,v.c_str()); return false; } bool elementtoenum(Arc::XMLNode pnode,const char* ename,int& val,const char* const opts[],Arc::Logger* logger) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default for(int n = 0;opts[n];++n) { if(v == opts[n]) { val = n; return true; }; }; return false; } */ } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Run_win32.cpp0000644000000000000000000000012412735423540023244 xustar000000000000000027 mtime=1467361120.207262 27 atime=1513200574.932707 30 ctime=1513200658.869733662 nordugrid-arc-5.4.2/src/hed/libs/common/Run_win32.cpp0000644000175000002070000001635112735423540023317 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "Run.h" #include "Watchdog.h" std::string GetOsErrorMessage(void); namespace Arc { struct PipeThreadArg { HANDLE child; HANDLE parent; }; void pipe_handler(void *arg); class RunPump { // NOP }; class Pid { friend class Run; private: PROCESS_INFORMATION processinfo; Pid(void) {} Pid(int p_) {} Pid(const PROCESS_INFORMATION p_) { processinfo = p_; } }; Run::Run(const std::string& cmdline) : working_directory("."), stdout_(-1), stderr_(-1), stdin_(-1), stdout_str_(NULL), stderr_str_(NULL), stdin_str_(NULL), stdout_keep_(false), stderr_keep_(false), stdin_keep_(false), pid_(NULL), argv_(Glib::shell_parse_argv(cmdline)), initializer_func_(NULL), initializer_arg_(NULL), kicker_func_(NULL), kicker_arg_(NULL), started_(false), running_(false), abandoned_(false), result_(-1), user_id_(0), group_id_(0), run_time_(Time::UNDEFINED), exit_time_(Time::UNDEFINED) { pid_ = new Pid(); } Run::Run(const std::list& argv) : working_directory("."), stdout_(-1), stderr_(-1), stdin_(-1), stdout_str_(NULL), stderr_str_(NULL), stdin_str_(NULL), stdout_keep_(false), stderr_keep_(false), stdin_keep_(false), pid_(NULL), argv_(argv), initializer_func_(NULL), initializer_arg_(NULL), kicker_func_(NULL), kicker_arg_(NULL), started_(false), running_(false), abandoned_(false), result_(-1), user_id_(0), group_id_(0), run_time_(Time::UNDEFINED), exit_time_(Time::UNDEFINED) { pid_ = new Pid(); } Run::~Run(void) { if(*this) { if(!abandoned_) Kill(0); CloseStdout(); CloseStderr(); CloseStdin(); }; if(pid_) delete pid_; } bool Run::Start(void) { if (started_) return false; if (argv_.size() < 1) return false; try { running_ = true; SECURITY_ATTRIBUTES saAttr; saAttr.nLength = sizeof(SECURITY_ATTRIBUTES); saAttr.bInheritHandle = TRUE; saAttr.lpSecurityDescriptor = NULL; STARTUPINFO startupinfo; memset(&startupinfo, 0, sizeof(startupinfo)); startupinfo.cb = sizeof(STARTUPINFO); // TODO: stdin, stdout, stderr redirections (Apache/BSD license) std::list::const_iterator argp = argv_.begin(); std::string cmd = ""; for (; argp != argv_.end(); ++argp) { cmd += ((*argp) + " "); } int result = CreateProcess(NULL, (LPSTR)cmd.c_str(), NULL, NULL, TRUE, CREATE_NEW_PROCESS_GROUP | CREATE_NO_WINDOW | IDLE_PRIORITY_CLASS, NULL, (LPSTR)working_directory.c_str(), &startupinfo, &(pid_->processinfo)); if (!result) { std::cout << "Spawn Error: " << GetOsErrorMessage() << std::endl; return false; } started_ = true; run_time_ = Time(); } catch (std::exception &e) { std::cerr << e.what() << std::endl; return false; } catch (Glib::Exception& e) { std::cerr << e.what() << std::endl; } return true; } void Run::Kill(int timeout) { if (!running_) return; // Kill with no merci running_ = false; TerminateProcess(pid_->processinfo.hProcess, 256); exit_time_ = Time(); } void Run::Abandon(void) { if(*this) { CloseStdout(); CloseStderr(); CloseStdin(); abandoned_=true; } } bool Run::stdout_handler(Glib::IOCondition) { return true; } bool Run::stderr_handler(Glib::IOCondition) { return true; } bool Run::stdin_handler(Glib::IOCondition) { return true; } void Run::child_handler(Glib::Pid, int result) { } void Run::CloseStdout(void) { if (stdout_ != -1) ::close(stdout_); stdout_ = -1; } void Run::CloseStderr(void) { if (stderr_ != -1) ::close(stderr_); stderr_ = -1; } void Run::CloseStdin(void) { if (stdin_ != -1) ::close(stdin_); stdin_ = -1; } int Run::ReadStdout(int /*timeout*/, char *buf, int size) { if (stdout_ == -1) return -1; // TODO: do it through context for timeout return ::read(stdout_, buf, size); } int Run::ReadStderr(int /*timeout*/, char *buf, int size) { if (stderr_ == -1) return -1; // TODO: do it through context for timeout return ::read(stderr_, buf, size); } int Run::WriteStdin(int /*timeout*/, const char *buf, int size) { return 0; } bool Run::Running(void) { Wait(0); return running_; } bool Run::Wait(int timeout) { if (!started_) return false; if (!running_) return true; if(WaitForSingleObject(pid_->processinfo.hProcess, timeout*1000) == WAIT_TIMEOUT) return false; if(WaitForSingleObject(pid_->processinfo.hThread, timeout*1000) == WAIT_TIMEOUT) return false; DWORD code = (DWORD)(-1); if(GetExitCodeProcess(pid_->processinfo.hProcess,&code)) result_ = code; CloseHandle(pid_->processinfo.hThread); CloseHandle(pid_->processinfo.hProcess); running_ = false; exit_time_ = Time(); return true; } bool Run::Wait(void) { if (!started_) return false; if (!running_) return true; WaitForSingleObject(pid_->processinfo.hProcess, INFINITE); WaitForSingleObject(pid_->processinfo.hThread, INFINITE); DWORD code = (DWORD)(-1); if(GetExitCodeProcess(pid_->processinfo.hProcess,&code)) result_ = code; CloseHandle(pid_->processinfo.hThread); CloseHandle(pid_->processinfo.hProcess); running_ = false; exit_time_ = Time(); return true; } void Run::AssignStdout(std::string& str) { if (!running_) stdout_str_ = &str; } void Run::AssignStderr(std::string& str) { if (!running_) stderr_str_ = &str; } void Run::AssignStdin(std::string& str) { if (!running_) stdin_str_ = &str; } void Run::KeepStdout(bool keep) { if (!running_) stdout_keep_ = keep; } void Run::KeepStderr(bool keep) { if (!running_) stderr_keep_ = keep; } void Run::KeepStdin(bool keep) { if (!running_) stdin_keep_ = keep; } void Run::AssignInitializer(void (*initializer_func)(void *arg), void *initializer_arg) { if (!running_) { initializer_arg_ = initializer_arg; initializer_func_ = initializer_func; } } void Run::AssignKicker(void (*kicker_func)(void *arg), void *kicker_arg) { if (!running_) { kicker_arg_ = kicker_arg; kicker_func_ = kicker_func; } } void Run::AfterFork(void) { } WatchdogChannel::WatchdogChannel(int timeout) { id_ = -1; } void WatchdogChannel::Kick(void) { } bool WatchdogListener::Listen(void) { return false; } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/GUID.h0000644000000000000000000000012412111140470021635 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.936707 30 ctime=1513200658.815733002 nordugrid-arc-5.4.2/src/hed/libs/common/GUID.h0000644000175000002070000000102312111140470021676 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_GUID_H__ #define __ARC_GUID_H__ #include namespace Arc { // Utilities for generating unique identifiers in the form 12345678-90ab-cdef-1234-567890abcdef /// Generates a unique identifier using information such as IP address, current time etc. /** \ingroup common */ void GUID(std::string& guid); /// Generates a unique identifier using the system uuid libraries. /** \ingroup common */ std::string UUID(void); } // namespace Arc #endif // __ARC_GUID_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/IntraProcessCounter.cpp0000644000000000000000000000012411720467271025434 xustar000000000000000027 mtime=1329753785.729844 27 atime=1513200574.956707 30 ctime=1513200658.866733625 nordugrid-arc-5.4.2/src/hed/libs/common/IntraProcessCounter.cpp0000644000175000002070000001060311720467271025501 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif // Counter.cpp #include #include "IntraProcessCounter.h" namespace Arc { IntraProcessCounter::IntraProcessCounter(int limit, int excess) : limit(limit), excess(excess), value(limit), nextReservationID(1) { // Nothing else needs to be done. } IntraProcessCounter::~IntraProcessCounter() { // Nothing needs to be done. } int IntraProcessCounter::getLimit() { return limit; } int IntraProcessCounter::setLimit(int newLimit) { synchMutex.lock(); value += newLimit - limit; limit = newLimit; synchMutex.unlock(); synchCond.signal(); return newLimit; } int IntraProcessCounter::changeLimit(int amount) { int newLimit; synchMutex.lock(); newLimit = limit + amount; value += amount; limit = newLimit; synchMutex.unlock(); synchCond.signal(); return newLimit; } int IntraProcessCounter::getExcess() { return excess; } int IntraProcessCounter::setExcess(int newExcess) { synchMutex.lock(); excess = newExcess; synchMutex.unlock(); synchCond.signal(); return newExcess; } int IntraProcessCounter::changeExcess(int amount) { int newExcess; synchMutex.lock(); newExcess = excess + amount; excess += amount; synchMutex.unlock(); synchCond.signal(); return newExcess; } int IntraProcessCounter::getValue() { int result; synchMutex.lock(); result = unsafeGetValue(); synchMutex.unlock(); return result; } CounterTicket IntraProcessCounter::reserve(int amount, Glib::TimeVal duration, bool prioritized, Glib::TimeVal timeOut) { Glib::TimeVal deadline = getExpiryTime(timeOut); Glib::TimeVal expiryTime; IDType reservationID; synchMutex.lock(); while (amount > unsafeGetValue() + (prioritized ? excess : 0) and getCurrentTime() < deadline) synchCond.timed_wait(synchMutex, std::min(deadline, unsafeGetNextExpiration())); if (amount <= unsafeGetValue() + (prioritized ? excess : 0)) { expiryTime = getExpiryTime(duration); reservationID = unsafeReserve(amount, expiryTime); } else { expiryTime = HISTORIC; reservationID = 0; } synchMutex.unlock(); return getCounterTicket(reservationID, expiryTime, this); } void IntraProcessCounter::cancel(unsigned long long int reservationID) { synchMutex.lock(); unsafeCancel(reservationID); synchMutex.unlock(); synchCond.signal(); } void IntraProcessCounter::extend(IDType& reservationID, Glib::TimeVal& expiryTime, Glib::TimeVal duration) { int amount; synchMutex.lock(); amount = unsafeCancel(reservationID); if (amount > 0) { expiryTime = getExpiryTime(duration); reservationID = unsafeReserve(amount, expiryTime); } else { expiryTime = HISTORIC; reservationID = 0; } synchMutex.unlock(); synchCond.signal(); } int IntraProcessCounter::unsafeGetValue() { while (unsafeGetNextExpiration() < getCurrentTime()) { unsafeCancel(selfExpiringReservations.top().getReservationID()); selfExpiringReservations.pop(); } return value; } int IntraProcessCounter::unsafeCancel(IDType reservationID) { std::map::iterator resIter = reservations.find(reservationID); int amount = 0; if (resIter != reservations.end()) { amount = resIter->second; value += amount; reservations.erase(resIter); } return amount; } Counter::IDType IntraProcessCounter::unsafeReserve(int amount, Glib::TimeVal expiryTime) { IDType reservationID = nextReservationID++; value -= amount; reservations[reservationID] = amount; if (expiryTime < ETERNAL) selfExpiringReservations.push(getExpirationReminder(expiryTime, reservationID)); return reservationID; } Glib::TimeVal IntraProcessCounter::unsafeGetNextExpiration() { if (selfExpiringReservations.empty()) return ETERNAL; else return selfExpiringReservations.top().getExpiryTime(); } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/HostnameResolver.cpp0000644000000000000000000000012413131637327024757 xustar000000000000000027 mtime=1499938519.516258 27 atime=1513200574.947707 30 ctime=1513200658.864733601 nordugrid-arc-5.4.2/src/hed/libs/common/HostnameResolver.cpp0000644000175000002070000002025313131637327025026 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "hostname_resolver.h" #include "HostnameResolver.h" namespace Arc { #define READ_TIMEOUT (60000) #define WRITE_TIMEOUT (60000) static bool sread(Run& r,void* buf,size_t size) { while(size) { int l = r.ReadStdout(READ_TIMEOUT,(char*)buf,size); if(l <= 0) return false; size-=l; buf = (void*)(((char*)buf)+l); }; return true; } static bool swrite(Run& r,const void* buf,size_t size) { while(size) { int l = r.WriteStdin(WRITE_TIMEOUT,(const char*)buf,size); if(l <= 0) return false; size-=l; buf = (void*)(((char*)buf)+l); }; return true; } #define ABORTALL { dispose_executer(hostname_resolver_); hostname_resolver_=NULL; continue; } #define STARTHEADER(CMD,SIZE) { \ if(!hostname_resolver_) break; \ if(!(hostname_resolver_->Running())) break; \ header_t header; \ header.cmd = CMD; \ header.size = SIZE; \ if(!swrite(*hostname_resolver_,&header,sizeof(header))) ABORTALL; \ } #define ENDHEADER(CMD,SIZE) { \ header_t header; \ if(!sread(*hostname_resolver_,&header,sizeof(header))) ABORTALL; \ if((header.cmd != CMD) || (header.size != (sizeof(res)+sizeof(errno_)+SIZE))) ABORTALL; \ if(!sread(*hostname_resolver_,&res,sizeof(res))) ABORTALL; \ if(!sread(*hostname_resolver_,&errno_,sizeof(errno_))) ABORTALL; \ } static void release_executer(Run* hostname_resolver) { delete hostname_resolver; } static void dispose_executer(Run* hostname_resolver) { delete hostname_resolver; } static bool do_tests = false; static Run* acquire_executer() { std::list argv; if(!do_tests) { argv.push_back(Arc::ArcLocation::Get()+G_DIR_SEPARATOR_S+PKGLIBSUBDIR+G_DIR_SEPARATOR_S+"arc-hostname-resolver"); } else { argv.push_back(std::string("..")+G_DIR_SEPARATOR_S+"arc-hostname-resolver"); } argv.push_back("0"); argv.push_back("1"); Run* hostname_resolver_ = new Run(argv); hostname_resolver_->KeepStdin(false); hostname_resolver_->KeepStdout(false); hostname_resolver_->KeepStderr(true); if(!(hostname_resolver_->Start())) { delete hostname_resolver_; hostname_resolver_ = NULL; return NULL; } return hostname_resolver_; } static bool sread_buf(Run& r,void* buf,unsigned int& bufsize,unsigned int& maxsize) { char dummy[1024]; unsigned int size; if(sizeof(size) > maxsize) return false; if(!sread(r,&size,sizeof(size))) return false; maxsize -= sizeof(size); if(size > maxsize) return false; if(size <= bufsize) { if(!sread(r,buf,size)) return false; bufsize = size; maxsize -= size; } else { if(!sread(r,buf,bufsize)) return false; maxsize -= bufsize; // skip rest size -= bufsize; while(size > sizeof(dummy)) { if(!sread(r,dummy,sizeof(dummy))) return false; size -= sizeof(dummy); maxsize -= sizeof(dummy); }; if(!sread(r,dummy,size)) return false; maxsize -= size; }; return true; } static bool swrite_string(Run& r,const std::string& str) { int l = str.length(); if(!swrite(r,&l,sizeof(l))) return false; if(!swrite(r,str.c_str(),l)) return false; return true; } #define RETRYLOOP Glib::Mutex::Lock mlock(lock_); for(int n = 2; n && (hostname_resolver_?hostname_resolver_:(hostname_resolver_=acquire_executer())) ;--n) #define NORETRYLOOP Glib::Mutex::Lock mlock(lock_); for(int n = 1; n && (hostname_resolver_?hostname_resolver_:(hostname_resolver_=acquire_executer())) ;--n) HostnameResolver::SockAddr::SockAddr():family(0),length(0),addr(NULL) { } HostnameResolver::SockAddr::SockAddr(SockAddr const& other):family(0),length(0),addr(NULL) { operator=(other); } HostnameResolver::SockAddr& HostnameResolver::SockAddr::operator=(SockAddr const& other) { family = other.family; length = other.length; ::free(addr); addr = (sockaddr*)::malloc(length); memcpy(addr,other.addr,length); return *this; } HostnameResolver::SockAddr::~SockAddr() { ::free(addr); } HostnameResolver::HostnameResolver(void):hostname_resolver_(NULL),errno_(0) { hostname_resolver_ = acquire_executer(); } HostnameResolver::~HostnameResolver(void) { release_executer(hostname_resolver_); hostname_resolver_ = NULL; } bool HostnameResolver::ping(void) { RETRYLOOP { STARTHEADER(CMD_PING,0); header_t header; if(!sread(*hostname_resolver_,&header,sizeof(header))) ABORTALL; if((header.cmd != CMD_PING) || (header.size != 0)) ABORTALL; return true; } return false; } int HostnameResolver::hr_resolve(std::string const& node, std::string const& service, bool local, std::list& addrs) { NORETRYLOOP { int command = local?CMD_RESOLVE_TCP_LOCAL:CMD_RESOLVE_TCP_REMOTE; STARTHEADER(command,sizeof(int)+node.length()+sizeof(int)+service.length()); if(!swrite_string(*hostname_resolver_,node)) ABORTALL; if(!swrite_string(*hostname_resolver_,service)) ABORTALL; int res = 0; header_t header; if(!sread(*hostname_resolver_,&header,sizeof(header))) ABORTALL; if((header.cmd != command) || (header.size < (sizeof(res)+sizeof(errno_)))) ABORTALL; \ if(!sread(*hostname_resolver_,&res,sizeof(res))) ABORTALL; if(!sread(*hostname_resolver_,&errno_,sizeof(errno_))) ABORTALL; header.size -= sizeof(res)+sizeof(errno_); while(hostname_resolver_ && (header.size > 0)) { SockAddr addr; if(header.size < sizeof(addr.family)) ABORTALL; if(!sread(*hostname_resolver_,&addr.family,sizeof(addr.family))) ABORTALL; header.size-=sizeof(addr.family); if(header.size < sizeof(addr.length)) ABORTALL; if(!sread(*hostname_resolver_,&addr.length,sizeof(addr.length))) ABORTALL; header.size-=sizeof(addr.length); if(header.size < addr.length) ABORTALL; if((addr.addr = (sockaddr*)::malloc(addr.length)) == NULL) ABORTALL; if(!sread(*hostname_resolver_,addr.addr,addr.length)) ABORTALL; header.size-=addr.length; addrs.push_back(addr); }; if(!hostname_resolver_) continue; return res; } errno_ = -1; return -1; } void HostnameResolver::testtune(void) { do_tests = true; } static HostnameResolverContainer hrs_(0,100); HostnameResolver* HostnameResolver::Acquire(void) { return hrs_.Acquire(); } void HostnameResolver::Release(HostnameResolver* hr) { hrs_.Release(hr); } HostnameResolverContainer::HostnameResolverContainer(unsigned int minval,unsigned int maxval):min_(minval),max_(maxval) { KeepRange(); } HostnameResolverContainer::HostnameResolverContainer(void):min_(1),max_(10) { KeepRange(); } HostnameResolverContainer::~HostnameResolverContainer(void) { Glib::Mutex::Lock lock(lock_); for(std::list::iterator hr = hrs_.begin();hr != hrs_.end();++hr) { delete *hr; } } HostnameResolver* HostnameResolverContainer::Acquire(void) { Glib::Mutex::Lock lock(lock_); HostnameResolver* r = NULL; for(std::list::iterator hr = hrs_.begin();hr != hrs_.end();) { r = *hr; hr = hrs_.erase(hr); // Test if it still works if(r->ping()) break; // Broken proxy delete r; r = NULL; } // If no proxies - make new if(!r) r = new HostnameResolver; KeepRange(); return r; } void HostnameResolverContainer::Release(HostnameResolver* hr) { Glib::Mutex::Lock lock(lock_); if(!hr) return; hrs_.push_back(hr); KeepRange(); return; } void HostnameResolverContainer::SetMin(unsigned int val) { Glib::Mutex::Lock lock(lock_); min_ = val; KeepRange(); } void HostnameResolverContainer::SetMax(unsigned int val) { Glib::Mutex::Lock lock(lock_); min_ = val; KeepRange(); } void HostnameResolverContainer::KeepRange(void) { while(hrs_.size() > ((max_>=min_)?max_:min_)) { HostnameResolver* fa = hrs_.front(); hrs_.pop_front(); delete fa; } while(hrs_.size() < ((min_<=max_)?min_:max_)) { hrs_.push_back(new HostnameResolver); } } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/URL.cpp0000644000000000000000000000012313065017727022122 xustar000000000000000027 mtime=1490296791.969155 27 atime=1513200574.866706 29 ctime=1513200658.85973354 nordugrid-arc-5.4.2/src/hed/libs/common/URL.cpp0000644000175000002070000007753213065017727022206 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "URL.h" namespace Arc { static Logger URLLogger(Logger::getRootLogger(), "URL"); std::map URL::ParseOptions(const std::string& optstring, char separator, bool encoded) { std::map options; if (optstring.empty()) return options; std::string::size_type pos = 0; while (pos != std::string::npos) { std::string::size_type pos2 = optstring.find(separator, pos); std::string opt = (pos2 == std::string::npos ? optstring.substr(pos) : optstring.substr(pos, pos2 - pos)); pos = pos2; if (pos != std::string::npos) pos++; pos2 = opt.find('='); std::string option_name, option_value = ""; if (pos2 == std::string::npos) { option_name = opt; } else { option_name = opt.substr(0, pos2); option_value = opt.substr(pos2 + 1); } if (encoded) option_name = uri_unencode(option_name); if (encoded) option_value = uri_unencode(option_value); options[option_name] = option_value; } return options; } static std::list ParseAttributes(const std::string& attrstring, char separator, bool encoded = false) { std::list attributes; if (attrstring.empty()) return attributes; std::string::size_type pos = 0; while (pos != std::string::npos) { std::string::size_type pos2 = attrstring.find(separator, pos); std::string attr = (pos2 == std::string::npos ? attrstring.substr(pos) : attrstring.substr(pos, pos2 - pos)); pos = pos2; if (pos != std::string::npos) pos++; if (encoded) attr = uri_unencode(attr); attributes.push_back(attr); } return attributes; } static std::string AttributeString(const std::list& attributes, char separator, bool encode = false) { std::string attrstring; if (attributes.empty()) return attrstring; for (std::list::const_iterator it = attributes.begin(); it != attributes.end(); it++) { if (it != attributes.begin()) attrstring += separator; if(encode) { attrstring += uri_encode(*it, true); } else { attrstring += *it; } } return attrstring; } URL::URL() : ip6addr(false), port(-1), ldapscope(base), valid(false) {} URL::URL(const std::string& url, bool encoded, int defaultPort, const std::string& defaultPath) : ip6addr(false), port(-1), ldapscope(base), valid(true) { std::string::size_type pos, pos2, pos3; if (url[0] == '\0') { valid = false; return; } if (url[0] == '#') { // TODO: describe URLLogger.msg(VERBOSE, "URL is not valid: %s", url); valid = false; return; } // Looking for protocol separator pos = url.find(":"); if (pos != std::string::npos) { // Check if protocol looks like protocol for(std::string::size_type p = 0; p < pos; ++p) { char c = url[p]; if(isalnum(c) || (c == '+') || (c == '-') || (c == '.')) continue; pos = std::string::npos; break; } #ifdef WIN32 // Windows paths look like protocols - additional checks are needed // So for windows it looks like "disk:\" // TODO: probably some additional check is needed for "disk:"-like // paths. If such path can exist at all. if(pos != std::string::npos) { if(url[pos+1] == '\\') { pos = std::string::npos; } } #endif } if (pos == std::string::npos) { // URL does not start from protocol - must be simple path if (url[0] == '@') { protocol = "urllist"; path = url.substr(1); } else { protocol = "file"; path = url; } if (encoded) path = uri_unencode(path); if (!Glib::path_is_absolute(path)) { path = Glib::build_filename(Glib::get_current_dir(), path); } // Simple paths are not expected to contain any options or metadata return; } // RFC says protocols should be lowercase and uppercase // must be converted to lowercase for consistency protocol = lower(url.substr(0, pos)); // Checking if protocol followed by host/authority part // or by path directly if((url[pos+1] != '/') || (url[pos+2] != '/')) { // No host part host = ""; pos += 1; pos2 = pos; // path start position path = url.substr(pos2); // This must be only path - we can accept path only for // limited set of protocols if ((protocol == "file" || protocol == "urllist" || protocol == "link")) { // decode it here because no more parsing is done if (encoded) path = uri_unencode(path); if (!Glib::path_is_absolute(path)) { path = Glib::build_filename(Glib::get_current_dir(), path); } return; } else if (protocol == "arc") { // TODO: It is not defined how arc protocol discovers // entry point in general case. // For same reason let's assume path must be always // absolute. if(url[pos] != '/') { URLLogger.msg(VERBOSE, "Illegal URL - path must be absolute: %s", url); valid = false; return; } } else { URLLogger.msg(VERBOSE, "Illegal URL - no hostname given: %s", url); valid = false; return; } } else { // There is host/authority part in this URL. That also // means path is absolute if present pos += 3; pos2 = url.find("@", pos); if (pos2 != std::string::npos) { if (protocol == "rc" || protocol == "fireman" || protocol == "lfc") { // Indexing protocols may contain locations std::string locstring = url.substr(pos, pos2 - pos); pos = pos2 + 1; pos2 = 0; while (pos2 != std::string::npos) { pos3 = locstring.find('|', pos2); std::string loc = (pos3 == std::string::npos ? locstring.substr(pos2) : locstring.substr(pos2, pos3 - pos2)); pos2 = pos3; if (pos2 != std::string::npos) pos2++; if (loc[0] == ';') { commonlocoptions = ParseOptions(loc.substr(1), ';'); } else { if (protocol == "rc") { pos3 = loc.find(';'); if (pos3 == std::string::npos) { locations.push_back(URLLocation(ParseOptions("", ';'), loc)); } else { locations.push_back(URLLocation(ParseOptions (loc.substr(pos3 + 1), ';'), loc.substr(pos3 + 1))); } } else { locations.push_back(loc); } } } } else { pos3 = url.find("/", pos); if (pos3 == std::string::npos) pos3 = url.length(); if (pos3 > pos2) { username = url.substr(pos, pos2 - pos); pos3 = username.find(':'); if (pos3 != std::string::npos) { passwd = username.substr(pos3 + 1); username.resize(pos3); } pos = pos2 + 1; } } } // Looking for end of host/authority part pos2 = url.find("/", pos); if (pos2 == std::string::npos) { // Path part is empty, host may be empty too host = url.substr(pos); path = ""; } else if (pos2 == pos) { // Empty host and non-empty absolute path host = ""; path = url.substr(pos2); } else { // Both host and absolute path present host = url.substr(pos, pos2 - pos); path = url.substr(pos2); } } if (path.empty() && !defaultPath.empty()) { path += defaultPath; } // At this point path must be absolutely absolute (starts with /) or empty if ((!path.empty()) && (path[0] != '/')) { URLLogger.msg(VERBOSE, "Illegal URL - path must be absolute or empty: %s", url); valid = false; return; } // Extracting port URL options (ARC extension) if (!host.empty()) { // Check for [ip6address] notation // If behaving strictly we should check for valid address // inside []. But if we do not do that only drawback is that // URL may have any hostname inside []. Not really important // issue. if(host[0] == '[') { ip6addr = true; pos2 = host.find(']'); if(pos2 == std::string::npos) { URLLogger.msg(VERBOSE, "Illegal URL - no closing ] for IPv6 address found: %s", url); valid = false; return; } // There may be only port or options after closing ] ++pos2; if(pos2 < host.length()) { if((host[pos2] != ':') && (host[pos2] != ';')) { URLLogger.msg(VERBOSE, "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s", url); valid = false; return; } if(host[pos2] != ':') pos2 = std::string::npos; } else { pos2 = std::string::npos; } } else { pos2 = host.find(':'); } if (pos2 != std::string::npos) { pos3 = host.find(';', pos2); if (!stringto(pos3 == std::string::npos ? host.substr(pos2 + 1) : host.substr(pos2 + 1, pos3 - pos2 - 1), port)) { URLLogger.msg(VERBOSE, "Invalid port number in %s", url); } } else { pos3 = host.find(';'); pos2 = pos3; } if (pos3 != std::string::npos) urloptions = ParseOptions(host.substr(pos3 + 1), ';'); if (pos2 != std::string::npos) host.resize(pos2); if (ip6addr) host = host.substr(1,host.length()-2); } if (port == -1 && defaultPort != -1) { port = defaultPort; } else if (port == -1) { // If port is not default set default one if (protocol == "rc") port = RC_DEFAULT_PORT; if (protocol == "http") port = HTTP_DEFAULT_PORT; if (protocol == "https") port = HTTPS_DEFAULT_PORT; if (protocol == "httpg") port = HTTPG_DEFAULT_PORT; if (protocol == "ldap") port = LDAP_DEFAULT_PORT; if (protocol == "ftp") port = FTP_DEFAULT_PORT; if (protocol == "gsiftp") port = GSIFTP_DEFAULT_PORT; if (protocol == "lfc") port = LFC_DEFAULT_PORT; if (protocol == "root") port = XROOTD_DEFAULT_PORT; } if (protocol != "ldap" && protocol != "arc" && protocol.find("http") != 0) { pos2 = path.rfind('='); if (pos2 != std::string::npos) { pos3 = path.rfind(':', pos2); if (pos3 != std::string::npos) { pos = pos3; while (pos2 != std::string::npos && pos3 != std::string::npos) { pos2 = path.rfind('=', pos); if (pos2 != std::string::npos) { pos3 = path.rfind(':', pos2); if (pos3 != std::string::npos) pos = pos3; } } metadataoptions = ParseOptions(path.substr(pos + 1), ':', encoded); path = path.substr(0, pos); } } } ParsePath(encoded); // Normally host/authority names are case-insensitive host = lower(host); } void URL::ParsePath(bool encoded) { std::string::size_type pos, pos2, pos3; // if protocol = http, get the options after the ? if (protocol == "http" || protocol == "https" || protocol == "httpg" || protocol == "arc" || protocol == "srm" || protocol == "rucio" ) { pos = path.find("?"); if (pos != std::string::npos) { httpoptions = ParseOptions(path.substr(pos + 1), '&', encoded); path = path.substr(0, pos); } } // parse ldap protocol specific attributes if (protocol == "ldap") { std::string ldapscopestr; pos = path.find('?'); if (pos != std::string::npos) { pos2 = path.find('?', pos + 1); if (pos2 != std::string::npos) { pos3 = path.find('?', pos2 + 1); if (pos3 != std::string::npos) { ldapfilter = path.substr(pos3 + 1); ldapscopestr = path.substr(pos2 + 1, pos3 - pos2 - 1); } else { ldapscopestr = path.substr(pos2 + 1); } ldapattributes = ParseAttributes(path.substr(pos + 1, pos2 - pos - 1), ',', encoded); if (encoded) ldapfilter = uri_unencode(ldapfilter); if (encoded) ldapscopestr = uri_unencode(ldapscopestr); } else { ldapattributes = ParseAttributes(path.substr(pos + 1), ',', encoded); } path = path.substr(0, pos); } if (ldapscopestr == "base") ldapscope = base; else if (ldapscopestr == "one") ldapscope = onelevel; else if (ldapscopestr == "sub") ldapscope = subtree; else if (!ldapscopestr.empty()) { URLLogger.msg(VERBOSE, "Unknown LDAP scope %s - using base", ldapscopestr); } if (ldapfilter.empty()) ldapfilter = "(objectClass=*)"; if (path.find("/",1) != std::string::npos) path = Path2BaseDN(path); else path.erase(0,1); // remove leading / } if (encoded) path = uri_unencode(path); } URL::~URL() {} void URL::URIDecode(void) { path = uri_unencode(path); std::map newhttpoptions; for(std::map::iterator o = httpoptions.begin(); o != httpoptions.end(); ++o) { newhttpoptions[uri_unencode(o->first)] = uri_unencode(o->second); } httpoptions = newhttpoptions; std::map newmetadataoptions; for(std::map::iterator o = metadataoptions.begin(); o != metadataoptions.end(); ++o) { newmetadataoptions[uri_unencode(o->first)] = uri_unencode(o->second); } metadataoptions = newmetadataoptions; for(std::list::iterator a = ldapattributes.begin(); a != ldapattributes.end(); ++a) { *a = uri_unencode(*a); } ldapfilter = uri_unencode(ldapfilter); } const std::string& URL::Protocol() const { return protocol; } void URL::ChangeProtocol(const std::string& newprot) { protocol = lower(newprot); } const std::string& URL::Username() const { return username; } const std::string& URL::Passwd() const { return passwd; } const std::string& URL::Host() const { return host; } void URL::ChangeHost(const std::string& newhost) { host = lower(newhost); } int URL::Port() const { return port; } void URL::ChangePort(int newport) { port = newport; } const std::string& URL::Path() const { return path; } std::string URL::FullPath() const { std::string fullpath; if (!path.empty()) fullpath += path; if (!httpoptions.empty()) { fullpath += '?' + OptionString(httpoptions, '&'); } if (!ldapattributes.empty() || (ldapscope != base) || !ldapfilter.empty()) { fullpath += '?' + AttributeString(ldapattributes, ','); } if ((ldapscope != base) || !ldapfilter.empty()) { switch (ldapscope) { case base: fullpath += "?base"; break; case onelevel: fullpath += "?one"; break; case subtree: fullpath += "?sub"; break; } } if (!ldapfilter.empty()) fullpath += '?' + ldapfilter; return fullpath; } std::string URL::FullPathURIEncoded() const { std::string fullpath; if (!path.empty()) fullpath += uri_encode(path, false); if (!httpoptions.empty()) { fullpath += '?' + OptionString(httpoptions, '&', true); } if (!ldapattributes.empty() || (ldapscope != base) || !ldapfilter.empty()) { fullpath += '?' + AttributeString(ldapattributes, ',', true); } if ((ldapscope != base) || !ldapfilter.empty()) { switch (ldapscope) { case base: fullpath += "?base"; break; case onelevel: fullpath += "?one"; break; case subtree: fullpath += "?sub"; break; } } if (!ldapfilter.empty()) fullpath += '?' + uri_encode(ldapfilter, true); return fullpath; } void URL::ChangeFullPath(const std::string& newpath, bool encoded) { path = newpath; ParsePath(encoded); std::string basepath = path; if (protocol != "ldap") ChangePath(basepath); } void URL::ChangePath(const std::string& newpath) { path = newpath; // parse basedn in case of ldap-protocol if (protocol == "ldap") { if (path.find("/") != std::string::npos) path = Path2BaseDN(path); // add absolute path for relative file URLs } else if (protocol == "file" || protocol == "urllist") { if(!Glib::path_is_absolute(path)) { path = Glib::build_filename(Glib::get_current_dir(), path); } // for generic URL just make sure path has leading / } else if ((path[0] != '/') && (!path.empty())) { URLLogger.msg(WARNING, "Attempt to assign relative path to URL - making it absolute"); path = "/" + path; } } const std::map& URL::HTTPOptions() const { return httpoptions; } const std::string& URL::HTTPOption(const std::string& option, const std::string& undefined) const { std::map::const_iterator opt = httpoptions.find(option); if (opt != httpoptions.end()) { return opt->second; } else { return undefined; } } bool URL::AddHTTPOption(const std::string& option, const std::string& value, bool overwrite) { if (option.empty() || value.empty() || (!overwrite && httpoptions.find(option) != httpoptions.end())) { return false; } httpoptions[option] = value; return true; } void URL::RemoveHTTPOption(const std::string& option) { httpoptions.erase(option); } const std::map& URL::MetaDataOptions() const { return metadataoptions; } const std::string& URL::MetaDataOption(const std::string& option, const std::string& undefined) const { std::map::const_iterator opt = metadataoptions.find(option); if (opt != metadataoptions.end()) { return opt->second; } else { return undefined; } } const std::list& URL::LDAPAttributes() const { return ldapattributes; } void URL::AddLDAPAttribute(const std::string& attribute) { ldapattributes.push_back(attribute); } URL::Scope URL::LDAPScope() const { return ldapscope; } void URL::ChangeLDAPScope(const Scope newscope) { ldapscope = newscope; } const std::string& URL::LDAPFilter() const { return ldapfilter; } void URL::ChangeLDAPFilter(const std::string& newfilter) { ldapfilter = newfilter; } const std::map& URL::Options() const { return urloptions; } const std::string& URL::Option(const std::string& option, const std::string& undefined) const { std::map::const_iterator opt = urloptions.find(option); if (opt != urloptions.end()) return opt->second; else return undefined; } bool URL::AddOption(const std::string& option, const std::string& value, bool overwrite) { if (option.empty() || value.empty() || (!overwrite && urloptions.find(option) != urloptions.end())) return false; urloptions[option] = value; return true; } bool URL::AddOption(const std::string& option, bool overwrite) { std::string::size_type pos = option.find('='); if (pos == std::string::npos) { URLLogger.msg(VERBOSE, "URL option %s does not have format name=value", option); return false; } std::string attr_name(option.substr(0, pos)); std::string attr_value(option.substr(pos+1)); return AddOption(attr_name, attr_value, overwrite); } void URL::AddMetaDataOption(const std::string& option, const std::string& value, bool overwrite) { if (!overwrite && metadataoptions.find(option) != metadataoptions.end()) return; metadataoptions[option] = value; } void URL::AddLocation(const URLLocation& location) { locations.push_back(location); } void URL::RemoveOption(const std::string& option) { urloptions.erase(option); } void URL::RemoveMetaDataOption(const std::string& option) { metadataoptions.erase(option); } const std::list& URL::Locations() const { return locations; } const std::map& URL::CommonLocOptions() const { return commonlocoptions; } const std::string& URL::CommonLocOption(const std::string& option, const std::string& undefined) const { std::map::const_iterator opt = commonlocoptions.find(option); if (opt != commonlocoptions.end()) return opt->second; else return undefined; } std::string URL::fullstr(bool encode) const { std::string urlstr; if (!username.empty()) urlstr += username; if (!passwd.empty()) urlstr += ':' + passwd; for (std::list::const_iterator it = locations.begin(); it != locations.end(); it++) { if (it != locations.begin()) urlstr += '|'; urlstr += it->fullstr(); } if (!locations.empty() && !commonlocoptions.empty()) urlstr += '|'; if (!commonlocoptions.empty()) { urlstr += ';' + OptionString(commonlocoptions, ';', encode); } if (!username.empty() || !passwd.empty() || !locations.empty() || !commonlocoptions.empty()) { urlstr += '@'; } if (!host.empty()) { if(ip6addr) { urlstr += "[" + host + "]"; } else { urlstr += host; } } if (port != -1) urlstr += ':' + tostring(port); if (!urloptions.empty()) { urlstr += ';' + OptionString(urloptions, ';'); } if (!protocol.empty()) { if (!urlstr.empty()) { urlstr = protocol + "://" + urlstr; } else { urlstr = protocol + ":"; } } // Constructor makes sure path is absolute or empty. // ChangePath() also makes such check. if ( protocol == "ldap") { // Unfortunately ldap is special case urlstr += '/'; } if (encode) { urlstr += uri_encode(path, false); } else { urlstr += path; } // If there is nothing at this point there is no sense // to add any options if (urlstr.empty()) return urlstr; if (!httpoptions.empty()) { urlstr += '?' + OptionString(httpoptions, '&', encode); } if (!ldapattributes.empty() || (ldapscope != base) || !ldapfilter.empty()) { urlstr += '?' + AttributeString(ldapattributes, ',', encode); } if ((ldapscope != base) || !ldapfilter.empty()) { switch (ldapscope) { case base: urlstr += "?base"; break; case onelevel: urlstr += "?one"; break; case subtree: urlstr += "?sub"; break; } } if (!ldapfilter.empty()) { if (encode) { urlstr += '?' + uri_encode(ldapfilter, true); } else { urlstr += '?' + ldapfilter; } } if (!metadataoptions.empty()) { urlstr += ':' + OptionString(metadataoptions, ':', encode); } return urlstr; } std::string URL::plainstr(bool encode) const { std::string urlstr; if (!username.empty()) urlstr += username; if (!passwd.empty()) urlstr += ':' + passwd; if (!username.empty() || !passwd.empty()) urlstr += '@'; if (!host.empty()) { if(ip6addr) { urlstr += "[" + host + "]"; } else { urlstr += host; } } if (port != -1) urlstr += ':' + tostring(port); if (!protocol.empty()) { if (!urlstr.empty()) { urlstr = protocol + "://" + urlstr; } else { urlstr = protocol + ":"; } } // Constructor makes sure path is absolute or empty. // ChangePath also makes such check. if ( protocol == "ldap") { // Unfortunately ldap is special case urlstr += '/'; } if (encode) { urlstr += uri_encode(path,false); } else { urlstr += path; } // If there is nothing at this point there is no sense // to add any options if (urlstr.empty()) return urlstr; if (!httpoptions.empty()) { urlstr += '?' + OptionString(httpoptions, '&', encode); } if (!ldapattributes.empty() || (ldapscope != base) || !ldapfilter.empty()) { urlstr += '?' + AttributeString(ldapattributes, ',', encode); } if ((ldapscope != base) || !ldapfilter.empty()) { switch (ldapscope) { case base: urlstr += "?base"; break; case onelevel: urlstr += "?one"; break; case subtree: urlstr += "?sub"; break; } } if (!ldapfilter.empty()) { if (encode) { urlstr += '?' + uri_encode(ldapfilter, true); } else { urlstr += '?' + ldapfilter; } } return urlstr; } std::string URL::str(bool encode) const { std::string urlstr = plainstr(encode); if (!metadataoptions.empty()) { urlstr += ':' + OptionString(metadataoptions, ':', encode); } return urlstr; } std::string URL::ConnectionURL() const { std::string urlstr; if (!protocol.empty()) urlstr = protocol + "://"; if (!host.empty()) { if(ip6addr) { urlstr += "[" + host + "]"; } else { urlstr += host; } } if (port != -1) urlstr += ':' + tostring(port); return urlstr; } bool URL::operator<(const URL& url) const { return (str() < url.str()); } bool URL::operator==(const URL& url) const { return (str() == url.str()); } std::string URL::BaseDN2Path(const std::string& basedn) { std::string::size_type pos, pos2; // mds-vo-name=local, o=grid --> o=grid/mds-vo-name=local std::string newpath; pos = basedn.size(); while ((pos2 = basedn.rfind(",", pos - 1)) != std::string::npos) { std::string tmppath = basedn.substr(pos2 + 1, pos - pos2 - 1); tmppath = tmppath.substr(tmppath.find_first_not_of(' ')); newpath += tmppath + '/'; pos = pos2; } newpath += basedn.substr(0, pos); return newpath; } std::string URL::Path2BaseDN(const std::string& newpath) { if (newpath.empty()) return ""; std::string basedn; std::string::size_type pos, pos2; pos = newpath.size(); while ((pos2 = newpath.rfind("/", pos - 1)) != std::string::npos) { if (pos2 == 0) break; basedn += newpath.substr(pos2 + 1, pos - pos2 - 1) + ", "; pos = pos2; } if (pos2 == std::string::npos) basedn += newpath.substr(0, pos); else basedn += newpath.substr(pos2 + 1, pos - pos2 - 1); return basedn; } URL::operator bool() const { return valid; } bool URL::operator!() const { return !valid; } std::string URL::OptionString(const std::map& options, char separator, bool encode) { std::string optstring; if (options.empty()) return optstring; for (std::map::const_iterator it = options.begin(); it != options.end(); it++) { if (it != options.begin()) optstring += separator; if (encode) { optstring += uri_encode(it->first, true) + '=' + uri_encode(it->second, true); } else { optstring += it->first + '=' + it->second; } } return optstring; } std::string URL::URIEncode(const std::string& str) { return uri_encode(str, true); } std::string URL::URIDecode(const std::string& str) { return uri_unencode(str); } std::ostream& operator<<(std::ostream& out, const URL& url) { return (out << url.str()); } URLLocation::URLLocation(const std::string& url) : URL(url) {} URLLocation::URLLocation(const std::string& url, const std::string& name) : URL(url), name(name) {} URLLocation::URLLocation(const URL& url) : URL(url) {} URLLocation::URLLocation(const URL& url, const std::string& name) : URL(url), name(name) {} URLLocation::URLLocation(const std::map& options, const std::string& name) : URL(), name(name) { urloptions = options; } const std::string& URLLocation::Name() const { return name; } URLLocation::~URLLocation() {} std::string URLLocation::str(bool encode) const { if (*this) return URL::str(encode); else return name; } std::string URLLocation::fullstr(bool encode) const { if (*this) return URL::fullstr(encode); else if (urloptions.empty()) return name; else return name + ';' + OptionString(urloptions, ';'); } PathIterator::PathIterator(const std::string& path, bool end) : path(path), pos(std::string::npos), end(end), done(false) { if (end) operator--(); else operator++(); } PathIterator::~PathIterator() {} PathIterator& PathIterator::operator++() { done = false; if (pos != std::string::npos) pos = path.find('/', pos + 1); else if (!end && !path.empty()) pos = path.find('/',(path[0] == '/')?1:0); else done = true; end = true; return *this; } PathIterator& PathIterator::operator--() { done = false; if (pos != std::string::npos) { pos = pos ? path.rfind('/', pos - 1) : std::string::npos; } else if (end && !path.empty()) { if((pos = path.rfind('/')) == 0) pos = std::string::npos; } else { done = true; } end = false; return *this; } PathIterator::operator bool() const { return !done; } std::string PathIterator::operator*() const { if (pos != std::string::npos) return path.substr(0, pos); else if (end) return path; else return ""; } std::string PathIterator::Rest() const { if (pos != std::string::npos) return path.substr(pos + 1); else if (!end) return path; else return ""; } std::list ReadURLList(const URL& url) { std::list urllist; if (url.Protocol() == "urllist") { std::ifstream f(url.Path().c_str()); std::string line; while (getline(f, line)) { URL url(line); if (url) urllist.push_back(url); else URLLogger.msg(VERBOSE, "urllist %s contains invalid URL: %s", url.Path(), line); } } else URLLogger.msg(VERBOSE, "URL protocol is not urllist: %s", url.str()); return urllist; } bool URL::StringMatches(const std::string& _str) const { std::string str = _str; if (str[str.length()-1] == '/') { str.erase(str.length()-1); } if (lower(protocol) + "://" == lower(str.substr(0, protocol.length() + 3))) { str.erase(0, protocol.length()+3); if (str.empty()) { return false; } } if (lower(host) != lower(str.substr(0, host.length()))) { return false; } str.erase(0, host.length()); std::string sPort = tostring(port); if (":" + sPort == str.substr(0, sPort.length()+1)) { str.erase(0, sPort.length()+1); } if (str.empty()) { return true; } if (protocol == "ldap" && str[0] == '/') { // For LDAP there is no starting slash (/) str.erase(0, 1); } return path == str; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/FileLock.h0000644000000000000000000000012412111140470022575 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.936707 30 ctime=1513200658.813732977 nordugrid-arc-5.4.2/src/hed/libs/common/FileLock.h0000644000175000002070000001036612111140470022650 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_FILELOCK_H__ #define __ARC_FILELOCK_H__ #include #include namespace Arc { /// A general file locking class. /** * This class can be used when protected access is required to files * which are used by multiple processes or threads. Call acquire() to * obtain a lock and release() to release it when finished. check() can * be used to verify if a lock is valid for the current process. Locks are * independent of FileLock objects - locks are only created and destroyed * through acquire() and release(), not on creation or destruction of * FileLock objects. * * Unless use_pid is set false, the process ID and hostname of the calling * process are stored in a file filename.lock in the form pid\@hostname. * This information is used to determine whether a lock is still valid. * It is also possible to specify a timeout on the lock. * * To ensure an atomic locking operation, acquire() first creates a * temporary lock file filename.lock.XXXXXX, then attempts to rename * this file to filename.lock. After a successful rename the lock file * is checked to make sure the correct process ID and hostname are inside. * This eliminates race conditions where multiple processes compete to * obtain the lock. * @ingroup common * @headerfile FileLock.h arc/FileLock.h */ class FileLock { public: /// Default timeout for a lock const static int DEFAULT_LOCK_TIMEOUT; /// Suffix added to file name to make lock file const static std::string LOCK_SUFFIX; /// Create a new FileLock object. /** * @param filename The name of the file to be locked * @param timeout The timeout of the lock * @param use_pid If true, use process id in the lock and to * determine lock validity */ FileLock(const std::string& filename, unsigned int timeout=DEFAULT_LOCK_TIMEOUT, bool use_pid=true); /// Acquire the lock. /** * Returns true if the lock was acquired successfully. Locks are acquired * if no lock file currently exists, or if the current lock file is * invalid. A lock is invalid if the process ID inside the lock no longer * exists on the host inside the lock, or the age of the lock file is * greater than the lock timeout. * * @param lock_removed Set to true if an existing lock was removed due * to being invalid. In this case the caller may decide to check or * delete the file as it is potentially corrupted. * @return True if lock is successfully acquired */ bool acquire(bool& lock_removed); /// Acquire the lock. /** * Callers can use this version of acquire() if they do not care whether * an invalid lock was removed in the process of obtaining the lock. * @return True if lock is successfully acquired */ bool acquire(); /// Release the lock. /** * @param force Remove the lock without checking ownership or timeout * @return True if lock is successfully released */ bool release(bool force=false); /// Check the lock is valid. /** * @param log_error may be set to false to log error messages at INFO * level, in cases where the lock not existing or being owned by another * host are not errors. * @return 0 if the lock is valid for the current process, the pid inside * the lock if the lock is owned by another process on the same host, or * -1 if the lock is owned by another host or any other error occurred. */ int check(bool log_error = true); /// Get the lock suffix used static std::string getLockSuffix(); private: /// File to apply lock to std::string filename; /// Lock file name: filename + lock suffix std::string lock_file; /// Lock timeout int timeout; /// Whether to use process ID in the lock file bool use_pid; /// Process ID to use in the lock file std::string pid; /// Hostname to use in the lock file std::string hostname; /// Logger object static Logger logger; /// private acquire method. bool acquire_(bool& lock_removed); /// convenience method for writing pid@hostname to file bool write_pid(int h); }; } // namespace Arc #endif nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/GUID.cpp0000644000000000000000000000012411524601542022201 xustar000000000000000027 mtime=1297285986.246685 27 atime=1513200574.906706 30 ctime=1513200658.854733479 nordugrid-arc-5.4.2/src/hed/libs/common/GUID.cpp0000644000175000002070000001045711524601542022255 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #ifdef WIN32 #define NOGDI #include #include #else #include #include #include #include #include #include #endif #include "GUID.h" #ifdef WIN32 void Arc::GUID(std::string& guid) { ::GUID g; HRESULT r; r = CoCreateGuid(&g); if (r != S_OK) { printf("There is an error during GUID generation!\n"); return; } char s[32]; snprintf(s, 32, "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7]); guid = s; } std::string Arc::UUID(void) { std::string ret; GUID(ret); return ret; } #else static bool initialized = false; static char guid_chars[] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' }; static uint32_t guid_counter; static void guid_add_string(std::string& guid, uint32_t n) { uint32_t max = 0xFFFFFFFF; for (; max;) { uint32_t i = n % sizeof(guid_chars); guid += guid_chars[i]; n /= sizeof(guid_chars); max /= sizeof(guid_chars); n += 0x55555555; } } void Arc::GUID(std::string& guid) { if (!initialized) { srandom(time(NULL)+random()); initialized = true; } struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); // Use up to 4 IP addresses uint32_t hostid[4] = { INADDR_ANY, INADDR_ANY, INADDR_ANY, INADDR_ANY }; hostid[0] = gethostid(); if (htonl(INADDR_LOOPBACK) == hostid[0]) hostid[0] = INADDR_ANY; char hostname[1024]; // Local addresses if (gethostname(hostname, sizeof(hostname) - 1) == 0) { hostname[sizeof(hostname) - 1] = 0; struct addrinfo* res = NULL; if(getaddrinfo(hostname,NULL,NULL,&res) == 0) { for(struct addrinfo* r=res;r;r=r->ai_next) { if(!(r->ai_addr)) continue; uint32_t s_address = INADDR_ANY; if(r->ai_addr->sa_family == AF_INET) { struct sockaddr_in* addr = (struct sockaddr_in*)(r->ai_addr); s_address = addr->sin_addr.s_addr; if(s_address == htonl(INADDR_LOOPBACK)) continue; } else if(r->ai_addr->sa_family == AF_INET6) { struct sockaddr_in6* addr = (struct sockaddr_in6*)(r->ai_addr); s_address = 0; for(int i=0;i<16;++i) { s_address ^= addr->sin6_addr.s6_addr[i]; s_address <<= 2; } } if(s_address == INADDR_ANY) continue; int i; for (i = 0; i < 3; i++) { if (hostid[i] == INADDR_ANY) break; if (s_address == hostid[i]) break; } if (i >= 3) continue; if (hostid[i] != INADDR_ANY) continue; hostid[i] = s_address; } freeaddrinfo(res); } } // External address (TODO) // Use collected information guid_add_string(guid, tv.tv_usec); guid_add_string(guid, tv.tv_sec); guid_add_string(guid, hostid[0]); guid_add_string(guid, hostid[1]); guid_add_string(guid, hostid[2]); guid_add_string(guid, hostid[3]); guid_add_string(guid, getpid()); guid_add_string(guid, guid_counter++); guid_add_string(guid, random()); } #if HAVE_UUID_UUID_H #include std::string Arc::UUID(void) { uuid_t uu; uuid_generate(uu); char uustr[37]; uuid_unparse(uu, uustr); return uustr; } #else std::string Arc::UUID(void) { if (!initialized) { srandom(time(NULL)+random()); initialized = true; } std::string uuid_str(""); int rnd[16]; char buffer[20]; unsigned long uuid_part; for (int j = 0; j < 4; j++) { for (int i = 0; i < 16; i++) rnd[i] = random() % 256; rnd[6] = (rnd[6] & 0x4F) | 0x40; rnd[8] = (rnd[8] & 0xBF) | 0x80; uuid_part = 0L; for (int i = 0; i < 16; i++) uuid_part = (uuid_part << 8) + rnd[i]; snprintf(buffer, sizeof(buffer), "%08lx", uuid_part); uuid_str.append(buffer, 8); } uuid_str.insert(8, "-"); uuid_str.insert(13, "-"); uuid_str.insert(18, "-"); uuid_str.insert(23, "-"); return uuid_str; } #endif #endif // non WIN32 nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/DBInterface.h0000644000000000000000000000012412075504704023230 xustar000000000000000027 mtime=1358334404.730937 27 atime=1513200574.922706 30 ctime=1513200658.811732953 nordugrid-arc-5.4.2/src/hed/libs/common/DBInterface.h0000644000175000002070000001000112075504704023265 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DBINTERFACE_H__ #define __ARC_DBINTERFACE_H__ #include #include namespace Arc { /// Interface for calling database client library. /** For different types of database client library, different classes should be implemented by implementing this interface. \headerfile DBInterface.h arc/DBInterface.h */ class Database { public: /// Default constructor. Database() {} /// Constructor which uses the server's name(or IP address) and port as parameters. Database(std::string& server, int port) {} /// Copy constructor. Database(const Database& other) {} /// Deconstructor. virtual ~Database() {} /// Do connection with database server. /** @param dbname The database name which will be used. @param user The username which will be used to access database. @param password The password which will be used to access database. */ virtual bool connect(std::string& dbname, std::string& user, std::string& password) = 0; /// Get the connection status. virtual bool isconnected() const = 0; /// Close the connection with database server. virtual void close() = 0; /// Enable ssl communication for the connection. /** @param keyfile The location of key file. @param certfile The location of certificate file. @param cafile The location of ca file. @param capath The location of ca directory */ virtual bool enable_ssl(const std::string& keyfile = "", const std::string& certfile = "", const std::string& cafile = "", const std::string& capath = "") = 0; /// Ask database server to shutdown. virtual bool shutdown() = 0; }; typedef std::vector > QueryArrayResult; typedef std::vector QueryRowResult; /// Class representing a database query. /** \headerfile DBInterface.h arc/DBInterface.h */ class Query { public: /// Default constructor. Query() {} /// Constructor. /** @param db The database object which will be used by Query class to get the database connection */ Query(Database *db) {} //Query(Database* db, const std::string& sqlstr); /// Deconstructor. virtual ~Query() {} /// Get the column number in the query result. virtual int get_num_colums() = 0; /// Get the row number in the query result. virtual int get_num_rows() = 0; /// Execute the query /** @param sqlstr The sql sentence used to query */ virtual bool execute(const std::string& sqlstr) = 0; /// Get the value of one row in the query result /** @param row_number The number of the row @return A vector includes all the values in the row */ virtual QueryRowResult get_row(int row_number) const = 0; /// Get the value of one row in the query result. /** The row number will be automatically increased each time the method is called. */ virtual QueryRowResult get_row() const = 0; /// Get the value of one specific field in one specific row /** @param row_number The row number inside the query result @param field_name The field name for the value which will be return @return The value of the specified filed in the specified row */ virtual std::string get_row_field(int row_number, std::string& field_name) = 0; /// Query the database by using some parameters into sql sentence /** An example sentence: "select table.value from table where table.name = ?" @param sqlstr The sql sentence with some parameters marked with "?". @param result The result in an array which includes all of the value in query result. @param arguments The argument list which should exactly correspond with the parameters in the sql sentence. */ virtual bool get_array(std::string& sqlstr, QueryArrayResult& result, std::vector& arguments) = 0; }; } // namespace Arc #endif /* __ARC_DBINTERFACE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Thread.cpp0000644000000000000000000000012413213442363022661 xustar000000000000000027 mtime=1512981747.779448 27 atime=1513200574.869706 30 ctime=1513200658.858733528 nordugrid-arc-5.4.2/src/hed/libs/common/Thread.cpp0000644000175000002070000004640613213442363022740 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #define USE_THREAD_POOL #define USE_THREAD_DATA #ifdef HAVE_STDINT_H #include #endif #include #include #ifdef USE_THREAD_POOL #include #ifndef WIN32 #include #endif #endif #include #include #include #include "Thread.h" namespace Arc { static Logger threadLogger(Logger::getRootLogger(), "Thread"); #ifdef USE_THREAD_DATA class ThreadDataPool; class ThreadData { friend class ThreadDataPool; private: std::map items_; typedef std::map::iterator items_iterator; typedef std::pair items_pair; Glib::Mutex lock_; // This counter is needed because due to delayed thread creation // parent instance may be already destroyed while child is not yet // created. Another solution would be to do Inherit() in parent thread. // but then interface of ThreadDataItem would become too complicated. int usage_count; ThreadData(void); ~ThreadData(void); // Do not call this void Acquire(void); public: // Get ThreadData instance of current thread static ThreadData* Get(void); // Destroy ThreadData instance of current thread static void Remove(void); // Copy items from another instance (uses Dup method) void Inherit(ThreadData* parent); // Attach item to this instance void AddItem(const std::string& key,ThreadDataItem* item); // Fetch item from this instance ThreadDataItem* GetItem(const std::string& key); // Decrease counter and destroy object if 0. void Release(void); }; #endif #ifdef USE_THREAD_POOL class ThreadInc { public: ThreadInc(void); ~ThreadInc(void); }; #endif class ThreadArgument { public: typedef void (*func_t)(void*); void *arg; func_t func; SimpleCounter* count; #ifdef USE_THREAD_DATA ThreadData* data; #endif #ifdef USE_THREAD_POOL ThreadInc* resource; ThreadArgument& acquire(void) { resource = new ThreadInc; return *this; } void release(void) { if(resource) delete resource; resource = NULL; } #endif #ifdef USE_THREAD_DATA ThreadArgument(func_t f, void *a, SimpleCounter *c, ThreadData *d) : arg(a), func(f), count(c), data(d) #else ThreadArgument(func_t f, void *a, SimpleCounter *c) : arg(a), func(f), count(c) #endif #ifdef USE_THREAD_POOL ,resource(NULL) #endif {} ~ThreadArgument(void) { } void thread(void); }; #ifdef USE_THREAD_POOL // This is not a real pool. It is just a queue of requests for // new threads. Hopefuly it will be converted to pool later for // better performance. class ThreadPool { public: friend class ThreadInc; private: int max_count; int count; Glib::Mutex count_lock; Glib::Mutex queue_lock; std::list queue; int CheckQueue(void); ~ThreadPool(void) { }; public: ThreadPool(void); void PushQueue(ThreadArgument* arg); int Num(void) { return count; }; }; ThreadPool::ThreadPool(void):max_count(0),count(0) { // Estimating amount of available memory uint64_t n_max; { // This is very estimation of size of virtual memory available for process // Using size of pointer (32-bit vs 64-bit architecture) unsigned int bits = 8*sizeof(int*); // Still all 64 bits can't be used for addressing. // Normally max adressable size is 2^48 // Source: http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details if(bits > 48) bits = 48; // It is common to have half taken by OS bits = bits - 1; // Dividing by 2 assuming each thread will equally use // stack and heap uint64_t n = (((uint64_t)1)< INT_MAX) n_max = INT_MAX; max_count = (int)n_max-1; // TODO: can't use logger here because it will try to initilize pool //threadLogger.msg(DEBUG, "Maximum number of threads is %i",max_count); } int ThreadPool::CheckQueue(void) { Glib::Mutex::Lock lock(queue_lock, Glib::TRY_LOCK); if(!lock.locked()) return -1; int size = queue.size(); while((count < max_count) && (size > 0)) { ThreadArgument* argument = *(queue.begin()); argument->acquire(); try { UserSwitch usw(0,0); Glib::Thread::create(sigc::mem_fun(*argument, &ThreadArgument::thread), thread_stacksize, false, false, Glib::THREAD_PRIORITY_NORMAL); queue.erase(queue.begin()); } catch (Glib::Error& e) { threadLogger.msg(ERROR, "%s", e.what()); argument->release(); } catch (Glib::Exception& e) { threadLogger.msg(ERROR, "%s", e.what()); argument->release(); } catch (std::exception& e) { threadLogger.msg(ERROR, "%s", e.what()); argument->release(); }; size = queue.size(); } return size; } void ThreadPool::PushQueue(ThreadArgument* arg) { Glib::Mutex::Lock lock(queue_lock); queue.push_back(arg); lock.release(); if(CheckQueue() > 0) threadLogger.msg(INFO, "Maximum number of threads running - puting new request into queue"); } static ThreadPool* pool = NULL; ThreadInc::ThreadInc(void) { if(!pool) return; pool->count_lock.lock(); ++(pool->count); pool->count_lock.unlock(); } ThreadInc::~ThreadInc(void) { if(!pool) return; pool->count_lock.lock(); --(pool->count); pool->count_lock.unlock(); pool->CheckQueue(); } #endif void ThreadArgument::thread(void) { ThreadId::getInstance().add(); #ifdef USE_THREAD_DATA ThreadData* tdata = ThreadData::Get(); if(tdata) { tdata->Inherit(data); tdata->Release(); } #endif #ifdef USE_THREAD_POOL ThreadInc resource_; release(); #endif func_t f_temp = func; void *a_temp = arg; SimpleCounter *c_temp = count; delete this; try { (*f_temp)(a_temp); } catch (Glib::Error& e) { threadLogger.msg(ERROR, "Thread exited with Glib error: %s", e.what()); } catch (Glib::Exception& e) { threadLogger.msg(ERROR, "Thread exited with Glib exception: %s", e.what()); } catch (std::exception& e) { threadLogger.msg(ERROR, "Thread exited with generic exception: %s", e.what()); }; if(c_temp) c_temp->dec(); #ifdef USE_THREAD_DATA ThreadData::Remove(); #endif ThreadId::getInstance().remove(); } ThreadId& ThreadId::getInstance() { static ThreadId* id = new ThreadId(); return *id; } ThreadId::ThreadId(): thread_no(0) {} void ThreadId::add() { Glib::Mutex::Lock lock(mutex); if (thread_no == ULONG_MAX) thread_no = 0; thread_ids[(size_t)(void*)Glib::Thread::self()] = ++thread_no; } void ThreadId::remove() { Glib::Mutex::Lock lock(mutex); thread_ids.erase((size_t)(void*)Glib::Thread::self()); } unsigned long int ThreadId::get() { Glib::Mutex::Lock lock(mutex); size_t id = (size_t)(void*)Glib::Thread::self(); if (thread_ids.count(id) == 0) return id; return thread_ids[id]; } bool CreateThreadFunction(void (*func)(void*), void *arg, SimpleCounter* count ) { #ifdef USE_THREAD_POOL if(!pool) return false; #ifdef USE_THREAD_DATA ThreadArgument *argument = new ThreadArgument(func, arg, count, ThreadData::Get()); #else ThreadArgument *argument = new ThreadArgument(func, arg, count); #endif if(count) count->inc(); pool->PushQueue(argument); #else #ifdef USE_THREAD_DATA ThreadArgument *argument = new ThreadArgument(func, arg, count, ThreadData::Get()); #else ThreadArgument *argument = new ThreadArgument(func, arg, count); #endif if(count) count->inc(); try { UserSwitch usw(0,0); Glib::Thread::create(sigc::mem_fun(*argument, &ThreadArgument::thread), thread_stacksize, false, false, Glib::THREAD_PRIORITY_NORMAL); } catch (std::exception& e) { threadLogger.msg(ERROR, e.what()); if(count) count->dec(); delete argument; return false; }; #endif return true; } /* bool CreateThreadFunction(void (*func)(void*), void *arg, Glib::Thread *&thr) { ThreadArgument *argument = new ThreadArgument(func, arg); Glib::Thread *thread; try { UserSwitch usw(0,0); thread = Glib::Thread::create(sigc::mem_fun(*argument, &ThreadArgument::thread), thread_stacksize, true, // thread joinable false, Glib::THREAD_PRIORITY_NORMAL); } catch (std::exception& e) { threadLogger.msg(ERROR, e.what()); delete argument; return false; }; thr = thread; return true; } */ /* Example of how to use CreateThreadClass macro class testclass { public: int a; testclass(int v) { a=v; }; void run(void) { a=0; }; }; void test(void) { testclass tc(1); CreateThreadClass(tc,testclass::run); } */ // ---------------------------------------- SimpleCounter::~SimpleCounter(void) { /* race condition ? */ lock_.lock(); count_ = 0; cond_.broadcast(); lock_.unlock(); } int SimpleCounter::inc(void) { lock_.lock(); ++count_; cond_.broadcast(); int r = count_; lock_.unlock(); return r; } int SimpleCounter::dec(void) { lock_.lock(); if(count_ > 0) --count_; cond_.broadcast(); int r = count_; lock_.unlock(); return r; } int SimpleCounter::get(void) const { Glib::Mutex& vlock = const_cast(lock_); vlock.lock(); int r = count_; vlock.unlock(); return r; } int SimpleCounter::set(int v) { lock_.lock(); count_ = v; cond_.broadcast(); int r = count_; lock_.unlock(); return r; } void SimpleCounter::wait(void) const { Glib::Mutex& vlock = const_cast(lock_); Glib::Cond& vcond = const_cast(cond_); vlock.lock(); while (count_ > 0) vcond.wait(vlock); vlock.unlock(); } bool SimpleCounter::wait(int t) const { if(t < 0) { wait(); return true; } Glib::Mutex& vlock = const_cast(lock_); Glib::Cond& vcond = const_cast(cond_); vlock.lock(); Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(t); bool res(true); while (count_ > 0) { res = vcond.timed_wait(vlock, etime); if (!res) break; } vlock.unlock(); return res; } // ---------------------------------------- void SharedMutex::add_shared_lock(void) { shared_list::iterator s = shared_.find(Glib::Thread::self()); if(s != shared_.end()) { ++(s->second); } else { shared_[Glib::Thread::self()] = 1; }; } void SharedMutex::remove_shared_lock(void) { shared_list::iterator s = shared_.find(Glib::Thread::self()); if(s != shared_.end()) { --(s->second); if(!(s->second)) { shared_.erase(s); }; }; } bool SharedMutex::have_shared_lock(void) { if(shared_.size() >= 2) return true; if(shared_.size() == 1) { if(shared_.begin()->first != Glib::Thread::self()) return true; }; return false; } void SharedMutex::lockShared(void) { lock_.lock(); while(have_exclusive_lock()) { cond_.wait(lock_); }; add_shared_lock(); lock_.unlock(); }; void SharedMutex::unlockShared(void) { lock_.lock(); remove_shared_lock(); cond_.broadcast(); lock_.unlock(); }; void SharedMutex::lockExclusive(void) { lock_.lock(); while(have_exclusive_lock() || have_shared_lock()) { cond_.wait(lock_); }; ++exclusive_; thread_ = Glib::Thread::self(); lock_.unlock(); } void SharedMutex::unlockExclusive(void) { lock_.lock(); if(thread_ == Glib::Thread::self()) { if(exclusive_) --exclusive_; if(!exclusive_) thread_ = NULL; }; cond_.broadcast(); lock_.unlock(); } // ---------------------------------------- ThreadedPointerBase::~ThreadedPointerBase(void) { //if (ptr && !released) delete ptr; } ThreadedPointerBase::ThreadedPointerBase(void *p) : cnt_(0), ptr_(p), released_(false) { add(); } ThreadedPointerBase* ThreadedPointerBase::add(void) { Glib::Mutex::Lock lock(lock_); ++cnt_; cond_.broadcast(); return this; } void* ThreadedPointerBase::rem(void) { Glib::Mutex::Lock lock(lock_); cond_.broadcast(); if (--cnt_ == 0) { void* p = released_?NULL:ptr_; lock.release(); delete this; return p; } return NULL; } // ---------------------------------------- ThreadRegistry::ThreadRegistry(void):counter_(0),cancel_(false) { } ThreadRegistry::~ThreadRegistry(void) { } void ThreadRegistry::RegisterThread(void) { lock_.lock(); ++counter_; lock_.unlock(); } void ThreadRegistry::UnregisterThread(void) { lock_.lock(); --counter_; cond_.broadcast(); lock_.unlock(); } bool ThreadRegistry::WaitOrCancel(int timeout) { bool v = false; lock_.lock(); Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(timeout); while (!cancel_) { if(!cond_.timed_wait(lock_, etime)) break; } v = cancel_; lock_.unlock(); return v; } bool ThreadRegistry::WaitForExit(int timeout) { int n = 0; lock_.lock(); if(timeout >= 0) { Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(timeout); while (counter_ > 0) { if(!cond_.timed_wait(lock_, etime)) break; } } else { while (counter_ > 0) { cond_.wait(lock_); } } n = counter_; lock_.unlock(); return (n <= 0); } void ThreadRegistry::RequestCancel(void) { lock_.lock(); cancel_=true; cond_.broadcast(); lock_.unlock(); } // ---------------------------------------- #ifdef USE_THREAD_DATA class ThreadDataPool { private: std::map datas_; typedef std::map::iterator datas_iterator; typedef std::pair datas_pair; Glib::Mutex lock_; ~ThreadDataPool(void); public: ThreadDataPool(void); ThreadData* GetData(void); void RemoveData(void); }; static ThreadDataPool* data_pool = NULL; ThreadDataPool::ThreadDataPool(void) { } ThreadData* ThreadDataPool::GetData(void) { ThreadData* data = NULL; Glib::Thread* self = Glib::Thread::self(); lock_.lock(); datas_iterator d = datas_.find(self); if(d == datas_.end()) { data = new ThreadData; d = datas_.insert(datas_.end(),datas_pair(self,data)); } else { data = d->second; }; lock_.unlock(); return data; } void ThreadDataPool::RemoveData(void) { Glib::Thread* self = Glib::Thread::self(); lock_.lock(); datas_iterator d = datas_.find(self); if(d != datas_.end()) { ThreadData* data = d->second; datas_.erase(d); lock_.unlock(); data->Release(); //delete data; return; } lock_.unlock(); } void ThreadData::Inherit(ThreadData* parent) { if(!parent) return; parent->lock_.lock(); for(items_iterator d = parent->items_.begin();d != parent->items_.end();++d) { d->second->Dup(); }; parent->lock_.unlock(); parent->Release(); } ThreadData* ThreadData::Get(void) { if(!data_pool) return NULL; ThreadData* data = data_pool->GetData(); if(data) data->Acquire(); return data; } void ThreadData::Remove(void) { if(!data_pool) return; data_pool->RemoveData(); } ThreadData::ThreadData(void):usage_count(1) { } void ThreadData::Acquire(void) { lock_.lock(); ++usage_count; lock_.unlock(); } void ThreadData::Release(void) { lock_.lock(); if(usage_count) --usage_count; if(!usage_count) { delete this; } else { lock_.unlock(); } } ThreadData::~ThreadData(void) { //lock_.lock(); for(items_iterator i = items_.begin(); i != items_.end(); ++i) { delete i->second; }; lock_.unlock(); } void ThreadData::AddItem(const std::string& key,ThreadDataItem* item) { lock_.lock(); items_iterator i = items_.find(key); if(i != items_.end()) { if(i->second != item) { delete i->second; i->second = item; }; } else { i = items_.insert(items_.end(),items_pair(key,item)); }; lock_.unlock(); } ThreadDataItem* ThreadData::GetItem(const std::string& key) { ThreadDataItem* item = NULL; lock_.lock(); items_iterator i = items_.find(key); if(i != items_.end()) item = i->second; lock_.unlock(); return item; } #endif ThreadDataItem::ThreadDataItem(void) { } ThreadDataItem::ThreadDataItem(const ThreadDataItem& it) { // Never happens } ThreadDataItem::~ThreadDataItem(void) { // Called by pool } ThreadDataItem::ThreadDataItem(const std::string& key) { Attach(key); } void ThreadDataItem::Attach(const std::string& key) { #ifdef USE_THREAD_DATA if(key.empty()) return; ThreadData* data = ThreadData::Get(); if(data) { data->AddItem(key,this); data->Release(); } #endif } ThreadDataItem::ThreadDataItem(std::string& key) { #ifdef USE_THREAD_DATA Attach(key); #endif } void ThreadDataItem::Attach(std::string& key) { #ifdef USE_THREAD_DATA ThreadData* data = ThreadData::Get(); if(!data) return; if(key.empty()) { for(;;) { key = UUID(); if(!(data->GetItem(key))) break; }; }; data->AddItem(key,this); data->Release(); #endif } ThreadDataItem* ThreadDataItem::Get(const std::string& key) { #ifdef USE_THREAD_DATA ThreadData* data = ThreadData::Get(); if(!data) return NULL; ThreadDataItem* item = data->GetItem(key); data->Release(); return item; #else return NULL; #endif } void ThreadDataItem::Dup(void) { // new ThreadDataItem; } // ---------------------------------------- void GlibThreadInitialize(void) { Glib::init(); if (!Glib::thread_supported()) Glib::thread_init(); #ifdef USE_THREAD_POOL if (!pool) { #ifdef USE_THREAD_DATA data_pool = new ThreadDataPool; #endif pool = new ThreadPool; } #endif } void ThreadInitializer::forceReset(void) { // This function is deprecated and its body removed because // there is no safe way to reset locks after call to fork(). } void ThreadInitializer::waitExit(void) { #ifdef USE_THREAD_POOL while(pool->Num() > 0) { sleep(1); } #endif } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Run.h0000644000000000000000000000012412735423540021667 xustar000000000000000027 mtime=1467361120.207262 27 atime=1513200574.872706 30 ctime=1513200658.835733246 nordugrid-arc-5.4.2/src/hed/libs/common/Run.h0000644000175000002070000001717612735423540021750 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_RUN_H__ #define __ARC_RUN_H__ #include #include #include namespace Arc { class RunPump; class Pid; /// This class runs an external executable. /** It is possible to read from or write to its standard handles or to * redirect them to std::string elements. * \ingroup common * \headerfile Run.h arc/Run.h */ class Run { friend class RunPump; private: Run(const Run&); Run& operator=(Run&); protected: // working directory std::string working_directory; // Handles int stdout_; int stderr_; int stdin_; // Associated string containers std::string *stdout_str_; std::string *stderr_str_; std::string *stdin_str_; // bool stdout_keep_; bool stderr_keep_; bool stdin_keep_; // Signal connections sigc::connection stdout_conn_; sigc::connection stderr_conn_; sigc::connection stdin_conn_; sigc::connection child_conn_; // PID of child Pid *pid_; // Arguments to execute std::list argv_; std::list envp_; std::list envx_; void (*initializer_func_)(void*); void *initializer_arg_; void (*kicker_func_)(void*); void *kicker_arg_; // IO handlers bool stdout_handler(Glib::IOCondition cond); bool stderr_handler(Glib::IOCondition cond); bool stdin_handler(Glib::IOCondition cond); // Child exit handler void child_handler(Glib::Pid pid, int result); bool started_; bool running_; bool abandoned_; int result_; Glib::Mutex lock_; Glib::Cond cond_; int user_id_; int group_id_; Time run_time_; Time exit_time_; public: /// Constructor prepares object to run cmdline. Run(const std::string& cmdline); /// Constructor prepares object to run executable and arguments specified in argv. Run(const std::list& argv); /// Destructor kills running executable and releases associated resources. ~Run(void); /// Returns true if object is valid. operator bool(void) { return argv_.size() != 0; } /// Returns true if object is invalid. bool operator!(void) { return argv_.size() == 0; } /// Starts running executable. This method may be called only once. /** \return true if executable started without problems */ bool Start(void); /// Wait till execution finished or till timeout seconds expires. /** \return true if execution is complete. */ bool Wait(int timeout); /// Wait till execution finished. /** \return true if execution is complete, false if execution was not started. */ bool Wait(void); /// Returns exit code of execution. /** If child process was killed then exit code is -1. If code is compiled with support for detecting lost child process this code is -1 also if track of child was lost. */ int Result(void) { return result_; } /// Return true if execution is going on. bool Running(void); /// Return when executable was started. Time RunTime(void) { return run_time_; }; /// Return when executable finished executing. Time ExitTime(void) { return exit_time_; }; /// Read from stdout handle of running executable. /** This method may be used while stdout is directed to string, but the result is unpredictable. \param timeout upper limit for which method will block in milliseconds. Negative means infinite. \param buf buffer to write the stdout to \param size size of buf \return number of read bytes. */ int ReadStdout(int timeout, char *buf, int size); /// Read from stderr handle of running executable. /** This method may be used while stderr is directed to string, but the result is unpredictable. \param timeout upper limit for which method will block in milliseconds. Negative means infinite. \param buf buffer to write the stderr to \param size size of buf \return number of read bytes. */ int ReadStderr(int timeout, char *buf, int size); /// Write to stdin handle of running executable. /** This method may be used while stdin is directed to string, but the result is unpredictable. \param timeout upper limit for which method will block in milliseconds. Negative means infinite. \param buf buffer to read the stdin from \param size size of buf \return number of written bytes. */ int WriteStdin(int timeout, const char *buf, int size); /// Associate stdout handle of executable with string. /** This method must be called before Start(). str object must be valid as long as this object exists. */ void AssignStdout(std::string& str); /// Associate stderr handle of executable with string. /** This method must be called before Start(). str object must be valid as long as this object exists. */ void AssignStderr(std::string& str); /// Associate stdin handle of executable with string. /** This method must be called before Start(). str object must be valid as long as this object exists. */ void AssignStdin(std::string& str); /// Keep stdout same as parent's if keep = true. void KeepStdout(bool keep = true); /// Keep stderr same as parent's if keep = true. void KeepStderr(bool keep = true); /// Keep stdin same as parent's if keep = true. void KeepStdin(bool keep = true); /// Closes pipe associated with stdout handle. void CloseStdout(void); /// Closes pipe associated with stderr handle. void CloseStderr(void); /// Closes pipe associated with stdin handle. void CloseStdin(void); //void DumpStdout(void); //void DumpStderr(void); /// Assign a function to be called just after process is forked but before execution starts. void AssignInitializer(void (*initializer_func)(void*), void *initializer_arg); /// Assign a function to be called just after execution ends void AssignKicker(void (*kicker_func)(void*), void *kicker_arg); /// Assign working directory of the running process. void AssignWorkingDirectory(std::string& wd) { working_directory = wd; } /// Assign uid for the process to run under. void AssignUserId(int uid) { user_id_ = uid; } /// Assign gid for the process to run under. void AssignGroupId(int gid) { group_id_ = gid; } /// Add environment variable to be passed to process being run void AddEnvironment(const std::string& key, const std::string& value) { AddEnvironment(key+"="+value); } /// Add environment variable to be passed to process being run void AddEnvironment(const std::string& var) { envp_.push_back(var); } /// Remove environment variable to be passed to process being run void RemoveEnvironment(const std::string& key) { envx_.push_back(key); } /// Kill running executable. /** First soft kill signal (SIGTERM) is sent to executable. If after timeout seconds executable is still running it's killed completely. Currently this method does not work for Windows OS */ void Kill(int timeout); /// Detach this object from running process. /** After calling this method instance is not associated with external process anymore. As result destructor will not kill process. */ void Abandon(void); /// Call this method after fork() in child process. /** It will reinitialize internal structures for new environment. Do not call it in any other case than defined. */ static void AfterFork(void); }; } #endif // __ARC_RUN_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcRegex.cpp0000644000000000000000000000012412300422304023137 xustar000000000000000027 mtime=1392649412.257768 27 atime=1513200574.892706 30 ctime=1513200658.846733381 nordugrid-arc-5.4.2/src/hed/libs/common/ArcRegex.cpp0000644000175000002070000000506712300422304023214 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "ArcRegex.h" #include namespace Arc { RegularExpression::RegularExpression() : pattern(""), status(-1) { regcomp(&preg, pattern.c_str(), REG_EXTENDED); } RegularExpression::RegularExpression(std::string pattern, bool ignoreCase) : pattern(pattern) { status = regcomp(&preg, pattern.c_str(), REG_EXTENDED | (REG_ICASE * ignoreCase) ); } RegularExpression::RegularExpression(const RegularExpression& regex) : pattern(regex.pattern) { status = regcomp(&preg, pattern.c_str(), 0); } RegularExpression::~RegularExpression() { regfree(&preg); } RegularExpression& RegularExpression::operator=(const RegularExpression& regex) { regfree(&preg); pattern = regex.pattern; status = regcomp(&preg, pattern.c_str(), 0); return *this; } bool RegularExpression::isOk() { return status == 0; } bool RegularExpression::hasPattern(std::string str) { return pattern == str; } bool RegularExpression::match(const std::string& str) const { std::list unmatched; std::list matched; return match(str, unmatched, matched) && (unmatched.empty()); } bool RegularExpression::match(const std::string& str, std::vector& matched) const { if (status != 0) return false; regmatch_t rm[preg.re_nsub+1]; if (regexec(&preg, str.c_str(), preg.re_nsub+1, rm, 0) != 0) return false; for (int n = 1; n <= preg.re_nsub; ++n) { if (rm[n].rm_so == -1) { matched.push_back(""); } else { matched.push_back(str.substr(rm[n].rm_so, rm[n].rm_eo - rm[n].rm_so)); } } return true; } bool RegularExpression::match(const std::string& str, std::list& unmatched, std::list& matched) const { if (status == 0) { int st; regmatch_t rm[256]; unmatched.clear(); matched.clear(); st = regexec(&preg, str.c_str(), 256, rm, 0); if (st != 0) return false; regoff_t p = 0; for (int n = 0; n < 256; ++n) { if (rm[n].rm_so == -1) break; matched.push_back(str.substr(rm[n].rm_so, rm[n].rm_eo - rm[n].rm_so)); if (rm[n].rm_so > p) unmatched.push_back(str.substr(p, rm[n].rm_so - p)); p = rm[n].rm_eo; } if (p < str.length()) unmatched.push_back(str.substr(p)); return true; } else return false; } std::string RegularExpression::getPattern() const { return pattern; } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/FileAccess.cpp0000644000000000000000000000012412365443155023462 xustar000000000000000027 mtime=1406551661.397686 27 atime=1513200574.912706 30 ctime=1513200658.853733466 nordugrid-arc-5.4.2/src/hed/libs/common/FileAccess.cpp0000644000175000002070000004470712365443155023543 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "file_access.h" #include "FileAccess.h" namespace Arc { static bool sread(Run& r,void* buf,size_t size) { while(size) { int l = r.ReadStdout(-1,(char*)buf,size); if(l < 0) return false; size-=l; buf = (void*)(((char*)buf)+l); }; return true; } static bool swrite(Run& r,const void* buf,size_t size) { while(size) { int l = r.WriteStdin(-1,(const char*)buf,size); if(l < 0) return false; size-=l; buf = (void*)(((char*)buf)+l); }; return true; } #define ABORTALL { dispose_executer(file_access_); file_access_=NULL; continue; } #define STARTHEADER(CMD,SIZE) { \ if(!file_access_) break; \ if(!(file_access_->Running())) break; \ FileAccess::header_t header; \ header.cmd = CMD; \ header.size = SIZE; \ if(!swrite(*file_access_,&header,sizeof(header))) ABORTALL; \ } #define ENDHEADER(CMD,SIZE) { \ FileAccess::header_t header; \ if(!sread(*file_access_,&header,sizeof(header))) ABORTALL; \ if((header.cmd != CMD) || (header.size != (sizeof(res)+sizeof(errno_)+SIZE))) ABORTALL; \ if(!sread(*file_access_,&res,sizeof(res))) ABORTALL; \ if(!sread(*file_access_,&errno_,sizeof(errno_))) ABORTALL; \ } static void release_executer(Run* file_access) { delete file_access; } static void dispose_executer(Run* file_access) { delete file_access; } static bool do_tests = false; static Run* acquire_executer(uid_t uid,gid_t gid) { // TODO: pool std::list argv; if(!do_tests) { argv.push_back(Arc::ArcLocation::Get()+G_DIR_SEPARATOR_S+PKGLIBSUBDIR+G_DIR_SEPARATOR_S+"arc-file-access"); } else { argv.push_back(std::string("..")+G_DIR_SEPARATOR_S+"arc-file-access"); } argv.push_back("0"); argv.push_back("1"); Run* file_access_ = new Run(argv); file_access_->KeepStdin(false); file_access_->KeepStdout(false); file_access_->KeepStderr(true); if(!(file_access_->Start())) { delete file_access_; file_access_ = NULL; return NULL; } if(uid || gid) { for(int n=0;n<1;++n) { STARTHEADER(CMD_SETUID,sizeof(uid)+sizeof(gid)); if(!swrite(*file_access_,&uid,sizeof(uid))) ABORTALL; if(!swrite(*file_access_,&gid,sizeof(gid))) ABORTALL; int res = 0; int errno_ = 0; ENDHEADER(CMD_SETUID,0); if(res != 0) ABORTALL; }; }; return file_access_; } static bool sread_buf(Run& r,void* buf,unsigned int& bufsize,unsigned int& maxsize) { char dummy[1024]; unsigned int size; if(sizeof(size) > maxsize) return false; if(!sread(r,&size,sizeof(size))) return false; maxsize -= sizeof(size); if(size > maxsize) return false; if(size <= bufsize) { if(!sread(r,buf,size)) return false; bufsize = size; maxsize -= size; } else { if(!sread(r,buf,bufsize)) return false; maxsize -= bufsize; // skip rest size -= bufsize; while(size > sizeof(dummy)) { if(!sread(r,dummy,sizeof(dummy))) return false; size -= sizeof(dummy); maxsize -= sizeof(dummy); }; if(!sread(r,dummy,size)) return false; maxsize -= size; }; return true; } static bool swrite_string(Run& r,const std::string& str) { int l = str.length(); if(!swrite(r,&l,sizeof(l))) return false; if(!swrite(r,str.c_str(),l)) return false; return true; } #define RETRYLOOP Glib::Mutex::Lock mlock(lock_); for(int n = 2; n && (file_access_?file_access_:(file_access_=acquire_executer(uid_,gid_))) ;--n) #define NORETRYLOOP Glib::Mutex::Lock mlock(lock_); for(int n = 1; n && (file_access_?file_access_:(file_access_=acquire_executer(uid_,gid_))) ;--n) FileAccess::FileAccess(void):file_access_(NULL),errno_(0),uid_(0),gid_(0) { file_access_ = acquire_executer(uid_,gid_); } FileAccess::~FileAccess(void) { release_executer(file_access_); file_access_ = NULL; } bool FileAccess::ping(void) { RETRYLOOP { STARTHEADER(CMD_PING,0); header_t header; if(!sread(*file_access_,&header,sizeof(header))) ABORTALL; if((header.cmd != CMD_PING) || (header.size != 0)) ABORTALL; return true; } return false; } bool FileAccess::fa_setuid(int uid,int gid) { RETRYLOOP { STARTHEADER(CMD_SETUID,sizeof(uid)+sizeof(gid)); if(!swrite(*file_access_,&uid,sizeof(uid))) ABORTALL; if(!swrite(*file_access_,&gid,sizeof(gid))) ABORTALL; int res = 0; ENDHEADER(CMD_SETUID,0); if(res == 0) { uid_ = uid; gid_ = gid; }; return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_mkdir(const std::string& path, mode_t mode) { RETRYLOOP { STARTHEADER(CMD_MKDIR,sizeof(mode)+sizeof(int)+path.length()); if(!swrite(*file_access_,&mode,sizeof(mode))) ABORTALL; if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_MKDIR,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_mkdirp(const std::string& path, mode_t mode) { RETRYLOOP { STARTHEADER(CMD_MKDIRP,sizeof(mode)+sizeof(int)+path.length()); if(!swrite(*file_access_,&mode,sizeof(mode))) ABORTALL; if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_MKDIRP,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_link(const std::string& oldpath, const std::string& newpath) { RETRYLOOP { STARTHEADER(CMD_HARDLINK,sizeof(int)+oldpath.length()+sizeof(int)+newpath.length()); if(!swrite_string(*file_access_,oldpath)) ABORTALL; if(!swrite_string(*file_access_,newpath)) ABORTALL; int res = 0; ENDHEADER(CMD_HARDLINK,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_softlink(const std::string& oldpath, const std::string& newpath) { RETRYLOOP { STARTHEADER(CMD_SOFTLINK,sizeof(int)+oldpath.length()+sizeof(int)+newpath.length()); if(!swrite_string(*file_access_,oldpath)) ABORTALL; if(!swrite_string(*file_access_,newpath)) ABORTALL; int res = 0; ENDHEADER(CMD_SOFTLINK,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_copy(const std::string& oldpath, const std::string& newpath, mode_t mode) { RETRYLOOP { STARTHEADER(CMD_COPY,sizeof(mode)+sizeof(int)+oldpath.length()+sizeof(int)+newpath.length()); if(!swrite(*file_access_,&mode,sizeof(mode))) ABORTALL; if(!swrite_string(*file_access_,oldpath)) ABORTALL; if(!swrite_string(*file_access_,newpath)) ABORTALL; int res = 0; ENDHEADER(CMD_COPY,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_rename(const std::string& oldpath, const std::string& newpath) { RETRYLOOP { STARTHEADER(CMD_RENAME,sizeof(int)+oldpath.length()+sizeof(int)+newpath.length()); if(!swrite_string(*file_access_,oldpath)) ABORTALL; if(!swrite_string(*file_access_,newpath)) ABORTALL; int res = 0; ENDHEADER(CMD_RENAME,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_stat(const std::string& path, struct stat& st) { RETRYLOOP { STARTHEADER(CMD_STAT,sizeof(int)+path.length()); if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_STAT,sizeof(st)); if(!sread(*file_access_,&st,sizeof(st))) ABORTALL; return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_lstat(const std::string& path, struct stat& st) { RETRYLOOP { STARTHEADER(CMD_LSTAT,sizeof(int)+path.length()); if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_LSTAT,sizeof(st)); if(!sread(*file_access_,&st,sizeof(st))) ABORTALL; return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_chmod(const std::string& path, mode_t mode) { RETRYLOOP { STARTHEADER(CMD_CHMOD,sizeof(mode)+sizeof(int)+path.length()); if(!swrite(*file_access_,&mode,sizeof(mode))) ABORTALL; if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_CHMOD,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_fstat(struct stat& st) { RETRYLOOP { STARTHEADER(CMD_FSTAT,0); int res = 0; ENDHEADER(CMD_FSTAT,sizeof(st)); if(!sread(*file_access_,&st,sizeof(st))) ABORTALL; return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_ftruncate(off_t length) { RETRYLOOP { STARTHEADER(CMD_FTRUNCATE,sizeof(length)); if(!swrite(*file_access_,&length,sizeof(length))) ABORTALL; int res = 0; ENDHEADER(CMD_FTRUNCATE,0); return (res == 0); } errno_ = -1; return false; } off_t FileAccess::fa_fallocate(off_t length) { RETRYLOOP { STARTHEADER(CMD_FALLOCATE,sizeof(length)); if(!swrite(*file_access_,&length,sizeof(length))) ABORTALL; int res = 0; ENDHEADER(CMD_FALLOCATE,sizeof(length)); if(!sread(*file_access_,&length,sizeof(length))) ABORTALL; return length; } errno_ = -1; return -1; } bool FileAccess::fa_readlink(const std::string& path, std::string& linkpath) { RETRYLOOP { STARTHEADER(CMD_READLINK,sizeof(int)+path.length()); if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; int l = 0; header_t header; if(!sread(*file_access_,&header,sizeof(header))) ABORTALL; if((header.cmd != CMD_READLINK) || (header.size < (sizeof(res)+sizeof(errno_)+sizeof(int)))) ABORTALL; if(!sread(*file_access_,&res,sizeof(res))) ABORTALL; if(!sread(*file_access_,&errno_,sizeof(errno_))) ABORTALL; if(!sread(*file_access_,&l,sizeof(l))) ABORTALL; if((sizeof(res)+sizeof(errno_)+sizeof(l)+l) != header.size) ABORTALL; linkpath.assign(l,' '); if(!sread(*file_access_,(void*)linkpath.c_str(),l)) ABORTALL; return (res >= 0); } errno_ = -1; return false; } bool FileAccess::fa_remove(const std::string& path) { RETRYLOOP { STARTHEADER(CMD_REMOVE,sizeof(int)+path.length()); if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_REMOVE,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_unlink(const std::string& path) { RETRYLOOP { STARTHEADER(CMD_UNLINK,sizeof(int)+path.length()); if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_UNLINK,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_rmdir(const std::string& path) { RETRYLOOP { STARTHEADER(CMD_RMDIR,sizeof(int)+path.length()); if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_RMDIR,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_rmdirr(const std::string& path) { RETRYLOOP { STARTHEADER(CMD_RMDIRR,sizeof(int)+path.length()); if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_RMDIRR,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_opendir(const std::string& path) { RETRYLOOP { STARTHEADER(CMD_OPENDIR,sizeof(int)+path.length()); if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; ENDHEADER(CMD_OPENDIR,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_closedir(void) { NORETRYLOOP { STARTHEADER(CMD_CLOSEDIR,0); int res = 0; ENDHEADER(CMD_CLOSEDIR,0); return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_readdir(std::string& name) { NORETRYLOOP { STARTHEADER(CMD_READDIR,0); int res = 0; int l = 0; header_t header; if(!sread(*file_access_,&header,sizeof(header))) ABORTALL; if((header.cmd != CMD_READDIR) || (header.size < (sizeof(res)+sizeof(errno_)+sizeof(l)))) ABORTALL; if(!sread(*file_access_,&res,sizeof(res))) ABORTALL; if(!sread(*file_access_,&errno_,sizeof(errno_))) ABORTALL; if(!sread(*file_access_,&l,sizeof(l))) ABORTALL; if((sizeof(res)+sizeof(errno_)+sizeof(l)+l) != header.size) ABORTALL; name.assign(l,' '); if(!sread(*file_access_,(void*)name.c_str(),l)) ABORTALL; return (res == 0); } errno_ = -1; return false; } bool FileAccess::fa_open(const std::string& path, int flags, mode_t mode) { RETRYLOOP { STARTHEADER(CMD_OPENFILE,sizeof(flags)+sizeof(mode)+sizeof(int)+path.length()); if(!swrite(*file_access_,&flags,sizeof(flags))) ABORTALL; if(!swrite(*file_access_,&mode,sizeof(mode))) ABORTALL; if(!swrite_string(*file_access_,path)) ABORTALL; int res = -1; ENDHEADER(CMD_OPENFILE,0); return (res != -1); } errno_ = -1; return false; } bool FileAccess::fa_mkstemp(std::string& path, mode_t mode) { RETRYLOOP { STARTHEADER(CMD_TEMPFILE,sizeof(mode)+sizeof(int)+path.length()); if(!swrite(*file_access_,&mode,sizeof(mode))) ABORTALL; if(!swrite_string(*file_access_,path)) ABORTALL; int res = 0; int l = 0; header_t header; if(!sread(*file_access_,&header,sizeof(header))) ABORTALL; if((header.cmd != CMD_TEMPFILE) || (header.size < (sizeof(res)+sizeof(errno_)+sizeof(int)))) ABORTALL; if(!sread(*file_access_,&res,sizeof(res))) ABORTALL; if(!sread(*file_access_,&errno_,sizeof(errno_))) ABORTALL; if(!sread(*file_access_,&l,sizeof(l))) ABORTALL; if((sizeof(res)+sizeof(errno_)+sizeof(l)+l) != header.size) ABORTALL; path.assign(l,' '); if(!sread(*file_access_,(void*)path.c_str(),l)) ABORTALL; return (res != -1); } errno_ = -1; return false; } bool FileAccess::fa_close(void) { NORETRYLOOP { STARTHEADER(CMD_CLOSEFILE,0); int res = 0; ENDHEADER(CMD_CLOSEFILE,0); return (res == 0); } errno_ = -1; return false; } off_t FileAccess::fa_lseek(off_t offset, int whence) { NORETRYLOOP { STARTHEADER(CMD_SEEKFILE,sizeof(offset)+sizeof(whence)); if(!swrite(*file_access_,&offset,sizeof(offset))) ABORTALL; if(!swrite(*file_access_,&whence,sizeof(whence))) ABORTALL; int res = 0; ENDHEADER(CMD_SEEKFILE,sizeof(offset)); if(!sread(*file_access_,&offset,sizeof(offset))) ABORTALL; return offset; } errno_ = -1; return (off_t)(-1); } ssize_t FileAccess::fa_read(void* buf,size_t size) { NORETRYLOOP { STARTHEADER(CMD_READFILE,sizeof(size)); if(!swrite(*file_access_,&size,sizeof(size))) ABORTALL; int res = 0; header_t header; if(!sread(*file_access_,&header,sizeof(header))) ABORTALL; if((header.cmd != CMD_READFILE) || (header.size < (sizeof(res)+sizeof(errno_)))) ABORTALL; if(!sread(*file_access_,&res,sizeof(res))) ABORTALL; if(!sread(*file_access_,&errno_,sizeof(errno_))) ABORTALL; header.size -= sizeof(res)+sizeof(errno_); unsigned int l = size; if(!sread_buf(*file_access_,buf,l,header.size)) ABORTALL; return (res < 0)?res:l; } errno_ = -1; return -1; } ssize_t FileAccess::fa_pread(void* buf,size_t size,off_t offset) { NORETRYLOOP { STARTHEADER(CMD_READFILEAT,sizeof(size)+sizeof(offset)); if(!swrite(*file_access_,&size,sizeof(size))) ABORTALL; if(!swrite(*file_access_,&offset,sizeof(offset))) ABORTALL; int res = 0; header_t header; if(!sread(*file_access_,&header,sizeof(header))) ABORTALL; if((header.cmd != CMD_READFILEAT) || (header.size < (sizeof(res)+sizeof(errno_)))) ABORTALL; if(!sread(*file_access_,&res,sizeof(res))) ABORTALL; if(!sread(*file_access_,&errno_,sizeof(errno_))) ABORTALL; header.size -= sizeof(res)+sizeof(errno_); unsigned int l = size; if(!sread_buf(*file_access_,buf,l,header.size)) ABORTALL; return (res < 0)?res:l; } errno_ = -1; return -1; } ssize_t FileAccess::fa_write(const void* buf,size_t size) { NORETRYLOOP { unsigned int l = size; STARTHEADER(CMD_WRITEFILE,sizeof(l)+l); if(!swrite(*file_access_,&l,sizeof(l))) ABORTALL; if(!swrite(*file_access_,buf,l)) ABORTALL; int res = 0; ENDHEADER(CMD_WRITEFILE,0); return res; } errno_ = -1; return -1; } ssize_t FileAccess::fa_pwrite(const void* buf,size_t size,off_t offset) { NORETRYLOOP { unsigned int l = size; STARTHEADER(CMD_WRITEFILEAT,sizeof(offset)+sizeof(l)+l); if(!swrite(*file_access_,&offset,sizeof(offset))) ABORTALL; if(!swrite(*file_access_,&l,sizeof(l))) ABORTALL; if(!swrite(*file_access_,buf,l)) ABORTALL; int res = 0; ENDHEADER(CMD_WRITEFILEAT,0); return res; } errno_ = -1; return -1; } void FileAccess::testtune(void) { do_tests = true; } static FileAccessContainer fas_(0,100); FileAccess* FileAccess::Acquire(void) { return fas_.Acquire(); } void FileAccess::Release(FileAccess* fa) { fas_.Release(fa); } FileAccessContainer::FileAccessContainer(unsigned int minval,unsigned int maxval):min_(minval),max_(maxval) { KeepRange(); } FileAccessContainer::FileAccessContainer(void):min_(1),max_(10) { KeepRange(); } FileAccessContainer::~FileAccessContainer(void) { Glib::Mutex::Lock lock(lock_); for(std::list::iterator fa = fas_.begin();fa != fas_.end();++fa) { delete *fa; } } FileAccess* FileAccessContainer::Acquire(void) { Glib::Mutex::Lock lock(lock_); FileAccess* r = NULL; for(std::list::iterator fa = fas_.begin();fa != fas_.end();) { r = *fa; fa = fas_.erase(fa); // Test if it still works if(r->ping()) break; // Broken proxy delete r; r = NULL; } // If no proxies - make new if(!r) r = new FileAccess; KeepRange(); return r; } void FileAccessContainer::Release(FileAccess* fa) { Glib::Mutex::Lock lock(lock_); if(!fa) return; fa->fa_close(); fa->fa_closedir(); if(!fa->fa_setuid(0,0)) { delete fa; return; } fas_.push_back(fa); KeepRange(); return; } void FileAccessContainer::SetMin(unsigned int val) { Glib::Mutex::Lock lock(lock_); min_ = val; KeepRange(); } void FileAccessContainer::SetMax(unsigned int val) { Glib::Mutex::Lock lock(lock_); min_ = val; KeepRange(); } void FileAccessContainer::KeepRange(void) { while(fas_.size() > ((max_>=min_)?max_:min_)) { FileAccess* fa = fas_.front(); fas_.pop_front(); delete fa; } while(fas_.size() < ((min_<=max_)?min_:max_)) { fas_.push_back(new FileAccess); } } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Base64.h0000644000000000000000000000012312306335057022145 xustar000000000000000026 mtime=1394194991.93061 27 atime=1513200574.840705 30 ctime=1513200658.808732916 nordugrid-arc-5.4.2/src/hed/libs/common/Base64.h0000644000175000002070000000232312306335057022213 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- // Base64 encoding and decoding #ifndef ARCLIB_BASE64 #define ARCLIB_BASE64 #include namespace Arc { /// Base64 encoding and decoding. /** \ingroup common * \headerfile Base64.h arc/Base64.h */ class Base64 { public: /// Constructor is not implemented. Use static methods instead. Base64(); ~Base64(); /// Encode a string to base 64 /** * \since Added in 3.0.1. **/ static std::string encode(const std::string& bufplain); /// Decode a string from base 64 /** * \since Added in 3.0.1. **/ static std::string decode(const std::string& bufcoded); // The next 4 methods are legacy API kept for backwards compatibility. They // can be removed in the next major version. static int encode_len(int len); /// Encode a string to base 64 /** \deprecated Use encode(std::string&) instead */ static int encode(char *encoded, const char *string, int len); static int decode_len(const char *bufcoded); /// Decode a string from base 64 /** \deprecated Use decode(std::string&) instead */ static int decode(char *bufplain, const char *bufcoded); }; } // namespace Arc #endif // ARCLIB_BASE64 nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Base64.cpp0000644000000000000000000000012412117331575022502 xustar000000000000000027 mtime=1362998141.987348 27 atime=1513200574.929707 30 ctime=1513200658.849733418 nordugrid-arc-5.4.2/src/hed/libs/common/Base64.cpp0000644000175000002070000001074712117331575022560 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif /* #include #include */ #include #include #include "Base64.h" namespace Arc { // Implemented according to RFC4648, MSB first approach and assuming ASCII codes. // There are no checks for bad characters. static char base64_character_encode(char in) { if(((unsigned char)in) < (unsigned char)26) return ('A' + in); in -= 26; if(((unsigned char)in) < (unsigned char)26) return ('a' + in); in -= 26; if(((unsigned char)in) < (unsigned char)10) return ('0' + in); in -= 10; if(in == (char)0) return '+'; if(in == (char)1) return '/'; return '?'; } static char base64_character_decode(char in) { if((in >= 'A') && (in <= 'Z')) return (in - 'A'); if((in >= 'a') && (in <= 'z')) return (in - 'a' + 26); if((in >= '0') && (in <= '9')) return (in - '0' + 26 + 26); if(in == '+') return (0 + 26 + 26 + 10); if(in == '/') return (1 + 26 + 26 + 10); return 0xff; } static void base64_quantum_encode(const char in[3], int size, char out[4]) { if(size == 3) { out[0] = base64_character_encode((in[0]>>2) & 0x3f); out[1] = base64_character_encode(((in[0]<<4) & 0x30) | ((in[1]>>4) & 0x0f)); out[2] = base64_character_encode(((in[1]<<2) & 0x3c) | ((in[2]>>6) & 0x03)); out[3] = base64_character_encode(in[2] & 0x3f); } else if(size == 2) { out[0] = base64_character_encode((in[0]>>2) & 0x3f); out[1] = base64_character_encode(((in[0]<<4) & 0x30) | ((in[1]>>4) & 0x0f)); out[2] = base64_character_encode((in[1]<<2) & 0x3c); out[3] = '='; } else if(size == 1) { out[0] = base64_character_encode((in[0]>>2) & 0x3f); out[1] = base64_character_encode((in[0]<<4) & 0x30); out[2] = '='; out[3] = '='; } else { out[0] = '?'; out[1] = '?'; out[2] = '?'; out[3] = '?'; } } static int base64_quantum_decode(const char in[4], char out[3]) { char c; out[0] = 0; out[1] = 0; out[2] = 0; if(in[0] != '=') { c = base64_character_decode(in[0]); out[0] |= (c << 2) & 0xfc; if(in[1] != '=') { c = base64_character_decode(in[1]); out[0] |= (c >> 4) & 0x03; out[1] |= (c << 4) & 0xf0; if(in[2] != '=') { c = base64_character_decode(in[2]); out[1] |= (c >> 2) & 0x0f; out[2] |= (c << 6) & 0xc0; if(in[3] != '=') { c = base64_character_decode(in[3]); out[2] |= c & 0x3f; return 3; } return 2; } return 1; } return 1; // must not happen } return 0; // must not happen } std::string Base64::decode(const std::string& bufcoded) { std::string bufplain; char quantum[3]; char encoded[4]; std::string::size_type p = 0; int ecnt = 0; for(;p < bufcoded.length();++p) { if(base64_character_decode(bufcoded[p]) == (char)0xff) continue; // ignore eol and garbage encoded[ecnt] = bufcoded[p]; ++ecnt; if(ecnt >= 4) { int qsize = base64_quantum_decode(encoded, quantum); bufplain.append(quantum, qsize); ecnt = 0; } } if(ecnt > 0) { for(;ecnt<4;++ecnt) encoded[ecnt] = '='; int qsize = base64_quantum_decode(encoded, quantum); bufplain.append(quantum, qsize); } return bufplain; } std::string Base64::encode(const std::string& bufplain) { std::string bufcoded; char quantum[3]; char encoded[4]; std::string::size_type p = 0; int qcnt = 0; for(;p < bufplain.length();++p) { quantum[qcnt] = bufplain[p]; ++qcnt; if(qcnt >= 3) { base64_quantum_encode(quantum,3,encoded); bufcoded.append(encoded,4); qcnt = 0; } } if(qcnt > 0) { base64_quantum_encode(quantum,qcnt,encoded); bufcoded.append(encoded,4); } return bufcoded; } int Base64::encode_len(int len) { return ((len + 2) / 3 * 4) + 1; } int Base64::encode(char *encoded, const char *string, int len) { std::string str(string, len); strncpy(encoded, encode(str).c_str(), str.length()); return 0; } int Base64::decode_len(const char *bufcoded) { std::string str(bufcoded); return (((str.length() + 3) / 4) * 3) + 1; } int Base64::decode(char *bufplain, const char *bufcoded) { std::string str(bufcoded); strncpy(bufplain, decode(str).c_str(), str.length()); return 0; } } nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/file_access.h0000644000000000000000000000012411746532427023371 xustar000000000000000027 mtime=1335538967.079094 27 atime=1513200574.919706 30 ctime=1513200658.873733711 nordugrid-arc-5.4.2/src/hed/libs/common/file_access.h0000644000175000002070000000413511746532427023441 0ustar00mockbuildmock00000000000000 #define CMD_PING (0) // - #define CMD_SETUID (1) // int uid // int gid // - // result // errno #define CMD_MKDIR (2) // mode_t mode // string dirname // - // result // errno #define CMD_MKDIRP (3) // mode_t mode // string dirname // - // result // errno #define CMD_HARDLINK (4) // string oldname // string newname // - // result // errno #define CMD_SOFTLINK (5) // string oldname // string newname // - // result // errno #define CMD_COPY (6) // mode_t mode // string oldname // string newname // - // result // errno #define CMD_STAT (7) // string path // - // result // errno // stat #define CMD_LSTAT (8) // string path // - // result // errno // stat #define CMD_REMOVE (9) // string path // - // result // errno #define CMD_UNLINK (10) // string path // - // result // errno #define CMD_RMDIR (11) // string path // - // result // errno #define CMD_OPENDIR (12) // string path // - // result // errno #define CMD_CLOSEDIR (13) // - // result // errno #define CMD_READDIR (14) // - // result // errno // string name #define CMD_OPENFILE (15) // flags // mode // string path // - // result // errno #define CMD_CLOSEFILE (16) // - // result // errno #define CMD_READFILE (17) // size // - // result // errno // string data #define CMD_WRITEFILE (18) // string data // - // result // errno #define CMD_READFILEAT (19) // offset // size // - // result // errno // string data #define CMD_WRITEFILEAT (20) // offset // string data // - // result // errno #define CMD_SEEKFILE (21) // offset // whence // - // result // errno // offset #define CMD_FSTAT (22) // - // result // errno // stat #define CMD_READLINK (23) // string path // - // result // errno // stat // string path #define CMD_RMDIRR (24) // string path // - // result // errno #define CMD_FTRUNCATE (25) // length // - // result // errno #define CMD_FALLOCATE (26) // length // - // result // errno #define CMD_CHMOD (27) // mode // string path // - // result // errno #define CMD_TEMPFILE (28) // mode // string prefix // - // result // errno // string path #define CMD_RENAME (29) // string oldname // string newname // - // result // errno nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/OptionParser.h0000644000000000000000000000012412315232175023544 xustar000000000000000027 mtime=1395995773.808369 27 atime=1513200574.909706 30 ctime=1513200658.819733051 nordugrid-arc-5.4.2/src/hed/libs/common/OptionParser.h0000644000175000002070000001054212315232175023613 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_OPTION_H__ #define __ARC_OPTION_H__ #include #include namespace Arc { class OptionBase; /// Command line option parser used by ARC command line tools. /** * The command line arguments and a brief and detailed description can be set * in the constructor. Each command line option should be added with an * AddOption() method, corresponding to the type of the option. Parse() * can then be called with the same arguments as main() takes. It returns a * list of arguments and fills each "val" passed in AddOption() if the * corresponding option is specified on the command line. * * A help text is automatically generated and displayed on stdout if a help * option (-h or -?) is used on the command line. Note that Parse() calls * exit(0) after displaying the help text. * * Both short and long format options are supported. * \ingroup common * \headerfile OptionParser.h arc/OptionParser.h */ class OptionParser { public: /// Create a new OptionParser. /** * @param arguments Command line arguments * @param summary Brief summary of command * @param description Detailed description of command */ OptionParser(const std::string& arguments = "", const std::string& summary = "", const std::string& description = ""); ~OptionParser(); /// Add an option which does not take any arguments. /** * @param shortOpt Short version of this option * @param longOpt Long version of this option * @param optDesc Description of option * @param val Value filled during Parse() */ void AddOption(const char shortOpt, const std::string& longOpt, const std::string& optDesc, bool& val); /// Add an option which takes an integer argument. /** * @param shortOpt Short version of this option * @param longOpt Long version of this option * @param optDesc Description of option * @param argDesc Value of option argument * @param val Value filled during Parse() */ void AddOption(const char shortOpt, const std::string& longOpt, const std::string& optDesc, const std::string& argDesc, int& val); /// Add an option which takes a string argument. /** * @param shortOpt Short version of this option * @param longOpt Long version of this option * @param optDesc Description of option * @param argDesc Value of option argument * @param val Value filled during Parse() */ void AddOption(const char shortOpt, const std::string& longOpt, const std::string& optDesc, const std::string& argDesc, std::string& val); /// Add an option which takes a string argument and can be specified multiple times. /** * @param shortOpt Short version of this option * @param longOpt Long version of this option * @param optDesc Description of option * @param argDesc Value of option argument * @param val Value filled during Parse() */ void AddOption(const char shortOpt, const std::string& longOpt, const std::string& optDesc, const std::string& argDesc, std::list& val); /// Parse the options and arguments. /** * Should be called after all options have been added with AddOption(). * The parameters can be the same as those taken by main(). Note that if a * help option is given this method calls exit(0) after printing help text * to stdout. * @return The list of command line arguments */ std::list Parse(int argc, char **argv); /// Get command and arguments /** * Get the arguments as they were passed to the Parse method as a string * joined by spaces. * @return The command and all arguments joined by a spaces. * @since Added in 4.1.0. **/ const std::string& GetCommandWithArguments() const { return origcmdwithargs; } private: std::string arguments; std::string summary; std::string description; std::list options; std::string origcmdwithargs; }; } // namespace Arc #endif // __ARC_OPTION_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcRegex.h0000644000000000000000000000012412306304733022617 xustar000000000000000027 mtime=1394182619.331424 27 atime=1513200574.919706 30 ctime=1513200658.805732879 nordugrid-arc-5.4.2/src/hed/libs/common/ArcRegex.h0000644000175000002070000000451312306304733022667 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_REGEX_H__ #define __ARC_REGEX_H__ #include #include #include #include namespace Arc { /// A regular expression class. /** This class is a wrapper around the functions provided in regex.h. \ingroup common \headerfile ArcRegex.h arc/ArcRegex.h */ class RegularExpression { public: /// Default constructor RegularExpression(); /// Creates a regex from a pattern string. /** * \since Changed in 4.1.0. ignoreCase argument was added. **/ RegularExpression(std::string pattern, bool ignoreCase = false); /// Copy constructor. RegularExpression(const RegularExpression& regex); /// Destructor ~RegularExpression(); /// Assignment operator. RegularExpression& operator=(const RegularExpression& regex); /// Returns true if the pattern of this regex is ok. bool isOk(); /// Returns true if this regex has the pattern provided. bool hasPattern(std::string str); /// Returns true if this regex matches whole string provided. bool match(const std::string& str) const; /// Returns true if this regex matches the string provided. /** Unmatched parts of the string are stored in 'unmatched'. * Matched parts of the string are stored in 'matched'. The * first entry in matched is the string that matched the regex, * and the following entries are parenthesised elements * of the regex. */ bool match(const std::string& str, std::list& unmatched, std::list& matched) const; /// Try to match string /** * The passed string is matched against this regular expression. If string * matches, any matched subexpression will be appended to the passed vector, * for any conditional subexpression failing to match a empty is appended. * * \param str string to match against this regular expression. * \param matched vector which to append matched subexpressions to. * \return true is returned is string matches, otherwise false. * \since Added in 4.1.0. **/ bool match(const std::string& str, std::vector& matched) const; /// Returns pattern std::string getPattern() const; private: std::string pattern; regex_t preg; int status; }; } #endif nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/FileUtils.cpp0000644000000000000000000000012413153453672023362 xustar000000000000000027 mtime=1504597946.546683 27 atime=1513200574.946707 30 ctime=1513200658.852733454 nordugrid-arc-5.4.2/src/hed/libs/common/FileUtils.cpp0000644000175000002070000004427313153453672023441 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // These utilities are implemented using POSIX. // But most used features are availble in MinGW // and hence code should compile in windows environment. #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #include #endif #include #include #include #include #include #include #include "FileUtils.h" namespace Arc { Glib::Mutex suid_lock; static bool write_all(int h,const void* buf,size_t l) { for(;l>0;) { ssize_t ll = ::write(h,buf,l); if(ll == -1) { if(errno == EINTR) continue; return false; }; buf = (const void*)(((const char*)buf)+ll); l-=ll; } return true; } static bool write_all(FileAccess& fa,const void* buf,size_t l) { for(;l>0;) { ssize_t ll = fa.fa_write(buf,l); if(ll == -1) { if(fa.geterrno() == EINTR) continue; return false; }; buf = (const void*)(((const char*)buf)+ll); l-=ll; } return true; } bool FileCopy(const std::string& source_path,const std::string& destination_path,uid_t uid,gid_t gid) { if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) return false; bool r = fa.fa_copy(source_path,destination_path,S_IRUSR|S_IWUSR); errno = fa.geterrno(); return r; }; return FileCopy(source_path,destination_path); } bool FileCopy(const std::string& source_path,const std::string& destination_path) { struct stat st; int source_handle = ::open(source_path.c_str(),O_RDONLY,0); if(source_handle == -1) return false; if(::fstat(source_handle,&st) != 0) { ::close(source_handle); return false; } int destination_handle = ::open(destination_path.c_str(),O_WRONLY | O_CREAT | O_TRUNC,st.st_mode); if(destination_handle == -1) { ::close(source_handle); return false; } bool r = FileCopy(source_handle,destination_handle); ::close(source_handle); ::close(destination_handle); return r; } bool FileCopy(const std::string& source_path,int destination_handle) { int source_handle = ::open(source_path.c_str(),O_RDONLY,0); if(source_handle == -1) return false; if(::ftruncate(destination_handle,0) != 0) { ::close(source_handle); return false; } bool r = FileCopy(source_handle,destination_handle); ::close(source_handle); return r; } bool FileCopy(int source_handle,const std::string& destination_path) { int destination_handle = ::open(destination_path.c_str(),O_WRONLY | O_CREAT | O_TRUNC,0600); if(destination_handle == -1) return false; bool r = FileCopy(source_handle,destination_handle); ::close(destination_handle); return r; } #define FileCopyBigThreshold (50*1024*1024) #define FileCopyBufSize (4*1024) bool FileCopy(int source_handle,int destination_handle) { off_t source_size = lseek(source_handle,0,SEEK_END); if(source_size == (off_t)(-1)) return false; if(source_size == 0) return true; #ifndef WIN32 if(source_size <= FileCopyBigThreshold) { void* source_addr = mmap(NULL,source_size,PROT_READ,MAP_SHARED,source_handle,0); if(source_addr != MAP_FAILED) { bool r = write_all(destination_handle,source_addr,source_size); munmap(source_addr,source_size); return r; } } #endif if(lseek(source_handle,0,SEEK_SET) != 0) return false; char* buf = new char[FileCopyBufSize]; if(!buf) return false; bool r = true; for(;;) { ssize_t l = FileCopyBufSize; l=::read(source_handle,buf,l); if(l == 0) break; // less than expected if(l == -1) { if(errno == EINTR) continue; // EWOULDBLOCK r = false; break; } if(!write_all(destination_handle,buf,l)) { r = false; break; } } return r; } bool FileRead(const std::string& filename, std::list& data, uid_t uid, gid_t gid) { data.clear(); if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { std::string content; if (!FileRead(filename, content, uid, gid)) return false; for(;;) { std::string::size_type p = content.find('\n'); data.push_back(content.substr(0,p)); if (p == std::string::npos) break; content.erase(0,p+1); } return true; } std::ifstream is(filename.c_str()); if (!is.good()) return false; std::string line; while (std::getline(is, line)) { data.push_back(line); } return true; } bool FileRead(const std::string& filename, std::string& data, uid_t uid, gid_t gid) { data.clear(); if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) { errno = fa.geterrno(); return false; }; if(!fa.fa_open(filename,O_RDONLY,0)) { errno = fa.geterrno(); return false; }; char buf[1024]; for(;;) { ssize_t l = fa.fa_read(buf,sizeof(buf)); if(l <= 0) break; data += std::string(buf,l); } fa.fa_close(); return true; } int h = ::open(filename.c_str(),O_RDONLY); if(h == -1) return false; char buf[1024]; for(;;) { ssize_t l = ::read(h,buf,sizeof(buf)); if(l <= 0) break; data += std::string(buf,l); } ::close(h); return true; } bool FileCreate(const std::string& filename, const std::string& data, uid_t uid, gid_t gid, mode_t mode) { if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; // If somebody bother about changing uid/gid then probably safer mode is needed if(mode == 0) mode = S_IRUSR|S_IWUSR; if(!fa.fa_setuid(uid,gid)) { errno = fa.geterrno(); return false; } std::string tempfile(filename + ".XXXXXX"); if(!fa.fa_mkstemp(tempfile, S_IRUSR|S_IWUSR)) { errno = fa.geterrno(); return false; } if(!write_all(fa,data.c_str(),data.length())) { errno = fa.geterrno(); fa.fa_close(); fa.fa_unlink(tempfile); return false; } fa.fa_close(); if(!fa.fa_chmod(filename, mode)) { errno = fa.geterrno(); fa.fa_unlink(tempfile); return false; } if(!fa.fa_rename(tempfile, filename)) { errno = fa.geterrno(); fa.fa_unlink(tempfile); return false; } return true; } #ifndef WIN32 if(mode == 0) mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH; #else if(mode == 0) mode = S_IRUSR|S_IWUSR; #endif std::string tempfile = filename+".XXXXXX"; int h = ::mkstemp(const_cast(tempfile.c_str())); if(h == -1) return false; if(!write_all(h,data.c_str(),data.length())) { ::close(h); return false; } ::close(h); if(chmod(tempfile.c_str(), mode) != 0) { unlink(tempfile.c_str()); return false; } if(rename(tempfile.c_str(), filename.c_str()) != 0) { unlink(tempfile.c_str()); return false; } return true; } // TODO: maybe by using open + fstat it would be possible to // make this functin less blocking bool FileStat(const std::string& path,struct stat *st,uid_t uid,gid_t gid,bool follow_symlinks) { if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) { errno = fa.geterrno(); return false; } if(follow_symlinks) { if(!fa.fa_stat(path,*st)) { errno = fa.geterrno(); return false; } } else { if(!fa.fa_lstat(path,*st)) { errno = fa.geterrno(); return false; } } return true; }; return FileStat(path,st,follow_symlinks); } bool FileStat(const std::string& path,struct stat *st,bool follow_symlinks) { int r = -1; { if(follow_symlinks) { r = ::stat(path.c_str(),st); } else { r = ::lstat(path.c_str(),st); }; }; return (r == 0); } bool FileLink(const std::string& oldpath,const std::string& newpath,bool symbolic) { if(symbolic) { return (symlink(oldpath.c_str(),newpath.c_str()) == 0); } else { return (link(oldpath.c_str(),newpath.c_str()) == 0); } } bool FileLink(const std::string& oldpath,const std::string& newpath,uid_t uid,gid_t gid,bool symbolic) { if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) return false; if(symbolic) { if(!fa.fa_softlink(oldpath,newpath)) { errno = fa.geterrno(); return false; } } else { if(!fa.fa_link(oldpath,newpath)) { errno = fa.geterrno(); return false; } } return true; } return FileLink(oldpath,newpath,symbolic); } std::string FileReadLink(const std::string& path,uid_t uid,gid_t gid) { if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) return ""; std::string linkpath; fa.fa_readlink(path,linkpath); errno = fa.geterrno(); return linkpath; } return FileReadLink(path); } std::string FileReadLink(const std::string& path) { class charbuf { private: char* v; public: charbuf(int size) { v = new char[size]; }; ~charbuf(void) { delete[] v; }; char* str(void) { return v; }; char& operator[](int n) { return v[n]; }; }; const int bufsize = 1024; charbuf buf(bufsize); ssize_t l = readlink(path.c_str(),buf.str(),bufsize); if(l<0) { l = 0; } else if(l>bufsize) { l = bufsize; } return std::string(buf.str(),l); } bool FileDelete(const std::string& path) { return (unlink(path.c_str()) == 0); } bool FileDelete(const std::string& path,uid_t uid,gid_t gid) { if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) { errno = fa.geterrno(); return false; } if(!fa.fa_unlink(path)) { errno = fa.geterrno(); return false; } return true; }; return FileDelete(path); } // TODO: find non-blocking way to create directory bool DirCreate(const std::string& path,uid_t uid,gid_t gid,mode_t mode,bool with_parents) { if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { bool created = false; bool exists = false; FileAccess fa; if(!fa.fa_setuid(uid,gid)) return false; if(with_parents) { created = fa.fa_mkdirp(path,mode); } else { created = fa.fa_mkdir(path,mode); } int err = fa.geterrno(); exists = created; if(!created) { // Normally following should be done only if errno is EEXIST. // But some file systems may return different error even if // directory exists. Lustre for example returns EACCESS // if user can't create directory even if directory already exists. // So doing stat in order to check if directory exists and that it // is directory. That still does not solve problem with parent // directory without x access right. struct stat st; if(fa.fa_stat(path,st)) { if(S_ISDIR(st.st_mode)) { exists = true; } else { err = EEXIST; // exists, but not a directory } } } if(!exists) { // Nothing we can do - escape errno = err; return false; } if(!created) { // Directory was created by another actor. // There is no sense to apply permissions in that case. errno = EEXIST; return true; } if(fa.fa_chmod(path,mode)) return true; errno = fa.geterrno(); return false; } return DirCreate(path,mode,with_parents); } bool DirCreate(const std::string& path,mode_t mode,bool with_parents) { if(::mkdir(path.c_str(),mode) == 0) { if (::chmod(path.c_str(), mode) == 0) return true; else return false; } if(errno == ENOENT) { if(with_parents) { std::string ppath(path); if(!Glib::path_is_absolute(ppath)) { ppath=Glib::get_current_dir()+G_DIR_SEPARATOR_S+ppath; } std::string::size_type pos = ppath.rfind(G_DIR_SEPARATOR_S); if((pos != 0) && (pos != std::string::npos)) { ppath.resize(pos); if(!DirCreate(ppath,mode,true)) return false; if((::mkdir(path.c_str(),mode) == 0) && (chmod(path.c_str(), mode) == 0)) return true; } } } // Look above in previous DirCreate() for description of following struct stat st; if(::stat(path.c_str(),&st) != 0) return false; if(!S_ISDIR(st.st_mode)) return false; errno = EEXIST; return true; } bool DirDelete(const std::string& path,bool recursive,uid_t uid,gid_t gid) { if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) { errno = fa.geterrno(); return false; } if (recursive) { if(!fa.fa_rmdirr(path)) { errno = fa.geterrno(); return false; } } else { if(!fa.fa_rmdir(path)) { errno = fa.geterrno(); return false; } } return true; } return DirDelete(path, recursive); } bool DirDelete(const std::string& path, bool recursive) { if (!recursive) return (rmdir(path.c_str()) == 0); struct stat st; if (::stat(path.c_str(), &st) != 0 || ! S_ISDIR(st.st_mode)) return false; try { Glib::Dir dir(path); std::string file_name; while ((file_name = dir.read_name()) != "") { std::string fullpath(path); fullpath += G_DIR_SEPARATOR_S + file_name; if (::lstat(fullpath.c_str(), &st) != 0) return false; if (S_ISDIR(st.st_mode)) { if (!DirDelete(fullpath.c_str())) { return false; } } else { if (::remove(fullpath.c_str()) != 0) { return false; } } } } catch (Glib::FileError& e) { return false; } if (rmdir(path.c_str()) != 0) return false; return true; } static bool list_recursive(FileAccess* fa,const std::string& path,std::list& entries,bool recursive) { std::string curpath = path; while (curpath.rfind('/') == curpath.length()-1) curpath.erase(curpath.length()-1); if (!fa->fa_opendir(curpath)) return false; std::string entry; while (fa->fa_readdir(entry)) { if (entry == "." || entry == "..") continue; std::string fullentry(curpath + '/' + entry); struct stat st; if (!fa->fa_lstat(fullentry, st)) return false; entries.push_back(fullentry); if (recursive && S_ISDIR(st.st_mode)) { FileAccess fa_; if (!list_recursive(&fa_, fullentry, entries, recursive)) return false; } } return true; } bool DirList(const std::string& path,std::list& entries,bool recursive,uid_t uid,gid_t gid) { entries.clear(); if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) { errno = fa.geterrno(); return false; } if(!list_recursive(&fa,path,entries,recursive)) { errno = fa.geterrno(); return false; } return true; } return DirList(path, entries, recursive); } static bool list_recursive(const std::string& path,std::list& entries,bool recursive) { struct stat st; std::string curpath = path; while (curpath.rfind('/') == curpath.length()-1) curpath.erase(curpath.length()-1); try { Glib::Dir dir(curpath); std::string file_name; while ((file_name = dir.read_name()) != "") { std::string fullpath(curpath); fullpath += G_DIR_SEPARATOR_S + file_name; if (::lstat(fullpath.c_str(), &st) != 0) return false; entries.push_back(fullpath); if (recursive && S_ISDIR(st.st_mode)) { if (!list_recursive(fullpath, entries, recursive)) { return false; } } } } catch (Glib::FileError& e) { return false; } return true; } bool DirList(const std::string& path, std::list& entries, bool recursive) { entries.clear(); return list_recursive(path, entries, recursive); } bool TmpDirCreate(std::string& path) { std::string tmpdir(Glib::get_tmp_dir()); bool result = false; #ifdef HAVE_MKDTEMP char tmptemplate[] = "ARC-XXXXXX"; tmpdir = Glib::build_filename(tmpdir, tmptemplate); char* tmpdirchar = mkdtemp(const_cast(tmpdir.c_str())); if (tmpdirchar) { path = tmpdirchar; result = true; } #else // NOTE: not safe! std::string tmp("ARC-" + UUID()); path = Glib::build_filename(tmpdir, tmp); result = DirCreate(path, 0700, true); #endif return result; } bool TmpFileCreate(std::string& filename, const std::string& data, uid_t uid, gid_t gid, mode_t mode) { if (filename.length() < 6 || filename.find("XXXXXX") != filename.length() - 6) { std::string tmpdir(Glib::get_tmp_dir()); char tmptemplate[] = "ARC-XXXXXX"; filename = Glib::build_filename(tmpdir, tmptemplate); } if(mode == 0) mode = S_IRUSR|S_IWUSR; if((uid && (uid != getuid())) || (gid && (gid != getgid()))) { FileAccess fa; if(!fa.fa_setuid(uid,gid)) { errno = fa.geterrno(); return false; } if(!fa.fa_mkstemp(filename,mode)) { errno = fa.geterrno(); return false; } if(!write_all(fa,data.c_str(),data.length())) { errno = fa.geterrno(); fa.fa_close(); fa.fa_unlink(filename); return false; } return true; } int h = Glib::mkstemp(filename); if(h == -1) return false; if (::chmod(filename.c_str(), mode) != 0) { ::close(h); unlink(filename.c_str()); return false; } if(!write_all(h,data.c_str(),data.length())) { ::close(h); unlink(filename.c_str()); return false; }; ::close(h); return true; } bool CanonicalDir(std::string& name, bool leading_slash, bool trailing_slash) { std::string::size_type i,ii,n; char separator = G_DIR_SEPARATOR_S[0]; ii=0; i=0; if(name[0] != separator) name=separator+name; for(;i= name.length()) { if(trailing_slash) ii++; n=i; break; } else if(name[n] == '.') { n++; if(name[n] == '.') { n++; if((n >= name.length()) || (name[n] == separator)) { i=n; /* go 1 directory up */ for(;;) { if(ii<=0) { /* bad dir */ return false; } ii--; if(name[ii] == separator) break; } ii--; i--; } } else if((n >= name.length()) || (name[n] == separator)) { i=n; ii--; i--; } } else if(name[n] == separator) { i=n; ii--; i--; } } n=i; i++; ii++; } if(leading_slash) { if((name[0] != separator) || (ii==0)) name=separator+name.substr(0,ii); else name=name.substr(0,ii); } else { if((name[0] != separator) || (ii==0)) name=name.substr(0,ii); else name=name.substr(1,ii-1); } return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/FileUtils.h0000644000000000000000000000012412356526562023032 xustar000000000000000027 mtime=1404743026.191114 27 atime=1513200574.867706 30 ctime=1513200658.813732977 nordugrid-arc-5.4.2/src/hed/libs/common/FileUtils.h0000644000175000002070000001632512356526562023106 0ustar00mockbuildmock00000000000000#ifndef __ARC_FILEUTILS_H__ #define __ARC_FILEUTILS_H__ #include #include #ifdef WIN32 #ifndef uid_t #define uid_t int #endif #ifndef gid_t #define gid_t int #endif #endif namespace Arc { // Utility functions for handling files and directories. // These functions offer possibility to access files and directories // under user and group ids different from those of current user. // Id switching is done in a safe way for multi-threaded applications. // If any of specified ids is 0 then such id is not switched and // current id is used instead. /** \addtogroup common * @{ */ /// Copy file source_path to file destination_path. /** Specified uid and gid are used for accessing filesystem. */ bool FileCopy(const std::string& source_path,const std::string& destination_path, uid_t uid, gid_t gid); /// Copy file source_path to file destination_path. bool FileCopy(const std::string& source_path,const std::string& destination_path); /// Copy file source_path to file handle destination_handle. bool FileCopy(const std::string& source_path,int destination_handle); /// Copy from file handle source_handle to file destination_path. bool FileCopy(int source_handle,const std::string& destination_path); /// Copy from file handle source_handle to file handle destination_handle. bool FileCopy(int source_handle,int destination_handle); /// Simple method to read file content from filename. /** Specified uid and gid are used for accessing filesystem. The content is * split into lines with the new line character removed, and the lines are * returned in the data list. If protected access is required, FileLock * should be used in addition to FileRead. */ bool FileRead(const std::string& filename, std::list& data, uid_t uid=0, gid_t gid=0); /// Simple method to read whole file content from filename. /** Specified uid and gid are used for accessing filesystem. */ bool FileRead(const std::string& filename, std::string& data, uid_t uid=0, gid_t gid=0); /// Simple method to create a new file containing given data. /** Specified uid and gid are used for accessing filesystem. An existing file * is overwritten with the new data. Overwriting is performed atomically so * the file is guaranteed to exist throughout the duration of this method. * Permissions of the created file are determined by mode, the default is 644 * or 600 if uid and gid are non-zero. If protected access is required, * FileLock should be used in addition to FileCreate. If uid/gid are zero * then no real switch of uid/gid is done. */ bool FileCreate(const std::string& filename, const std::string& data, uid_t uid=0, gid_t gid=0, mode_t mode = 0); /// Stat a file and put info into the st struct bool FileStat(const std::string& path,struct stat *st,bool follow_symlinks); /// Stat a file using the specified uid and gid and put info into the st struct. /** Specified uid and gid are used for accessing filesystem. */ bool FileStat(const std::string& path,struct stat *st,uid_t uid,gid_t gid,bool follow_symlinks); /// Make symbolic or hard link of file. bool FileLink(const std::string& oldpath,const std::string& newpath, bool symbolic); /// Make symbolic or hard link of file using the specified uid and gid. /** Specified uid and gid are used for accessing filesystem. */ bool FileLink(const std::string& oldpath,const std::string& newpath,uid_t uid,gid_t gid,bool symbolic); /// Returns path at which symbolic link is pointing. std::string FileReadLink(const std::string& path); /// Returns path at which symbolic link is pointing using the specified uid and gid. /** Specified uid and gid are used for accessing filesystem. */ std::string FileReadLink(const std::string& path,uid_t uid,gid_t gid); /// Deletes file at path. bool FileDelete(const std::string& path); /// Deletes file at path using the specified uid and gid. /** Specified uid and gid are used for accessing filesystem. */ bool FileDelete(const std::string& path,uid_t uid,gid_t gid); /// Create a new directory. bool DirCreate(const std::string& path,mode_t mode,bool with_parents = false); /// Create a new directory using the specified uid and gid. /** Specified uid and gid are used for accessing filesystem. */ bool DirCreate(const std::string& path,uid_t uid,gid_t gid,mode_t mode,bool with_parents = false); /// Delete a directory, and its content if recursive is true. /** If the directory is not empty and recursive is false DirDelete will fail. */ bool DirDelete(const std::string& path, bool recursive = true); /// Delete a directory, and its content if recursive is true. /** If the directory is not empty and recursive is false DirDelete will fail. Specified uid and gid are used for accessing filesystem. */ bool DirDelete(const std::string& path, bool recursive, uid_t uid, gid_t gid); /// List all entries in a directory. /** If path is not a directory or cannot be listed then false is returned. On success entries is filled with the list of entries in the directory. The entries are appended to path. */ bool DirList(const std::string& path, std::list& entries, bool recursive); /// List all entries in a directory using the specified uid and gid. /** If path is not a directory or cannot be listed then false is returned. On success entries is filled with the list of entries in the directory. The entries are appended to path. */ bool DirList(const std::string& path, std::list& entries, bool recursive, uid_t uid, gid_t gid); /// Create a temporary directory under the system defined temp location, and return its path. /** Uses mkdtemp if available, and a combination of random parameters if not. This latter method is not as safe as mkdtemp. */ bool TmpDirCreate(std::string& path); /// Simple method to create a temporary file containing given data. /** Specified uid and gid are used for accessing filesystem. Permissions of * the created file are determined by mode, with default being read/write * only by owner. If uid/gid are zero then no real switch of uid/gid is done. * If the parameter filename ends with "XXXXXX" then the file created has the * same path as filename with these characters replaced by random values. If * filename has any other value or is empty then the file is created in the * system defined temp location. On success filename contains the name of the * temporary file. The content of the data argument is written into this * file. This method returns true if data was successfully written to the * temporary file, false otherwise. */ bool TmpFileCreate(std::string& filename, const std::string& data, uid_t uid=0, gid_t gid=0, mode_t mode = 0); /// Removes /../ from 'name'. /** If leading_slash=true '/' will be added at the beginning of 'name' if * missing. Otherwise it will be removed. The directory separator used here * depends on the platform. If trailing_slash=true a trailing slash will not * be removed. * \return false if it is not possible to remove all the ../ */ bool CanonicalDir(std::string& name, bool leading_slash = true, bool trailing_slash = false); /** @} */ } // namespace Arc #endif // __ARC_FILEUTILS_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/HostnameResolver.h0000644000000000000000000000012413214315176024421 xustar000000000000000027 mtime=1513200254.761812 27 atime=1513200574.867706 30 ctime=1513200658.829733173 nordugrid-arc-5.4.2/src/hed/libs/common/HostnameResolver.h0000644000175000002070000000737513214315176024502 0ustar00mockbuildmock00000000000000#ifndef __ARC_HOSTNAMERESOLVER_H__ #define __ARC_HOSTNAMERESOLVER_H__ #include #include #include #include #include #include #include #ifdef WIN32 #ifndef uid_t #define uid_t int #endif #ifndef gid_t #define gid_t int #endif #endif namespace Arc { class Run; /// Defines interface for accessing filesystems. /** This class performs host name respolution through a proxy executable. \ingroup common \headerfile HostnameResolver.h arc/HostnameResolver.h */ class HostnameResolver { public: class SockAddr { friend class HostnameResolver; public: SockAddr(); SockAddr(SockAddr const& other); SockAddr& operator=(SockAddr const& other); ~SockAddr(); int Family() const { return family; } sockaddr const* Addr() const { return addr; } socklen_t Length() const { return length; } private: int family; socklen_t length; sockaddr *addr; }; /// New HostnameResolver object. HostnameResolver(void); /// Shuts down any spawned executable. ~HostnameResolver(void); /// Constructor which takes already existing object from global cache static HostnameResolver* Acquire(void); /// Destructor which returns object into global cache static void Release(HostnameResolver* fa); /// Check if communication with proxy works bool ping(void); /// Performs resolution of provided host name. int hr_resolve(std::string const& node, std::string const& service, bool local, std::list& addrs); /// Get errno of last operation. Every operation resets errno. int geterrno() { return errno_; }; /// Returns true if this instance is in useful condition operator bool(void) { return (hostname_resolver_ != NULL); }; /// Returns true if this instance is not in useful condition bool operator!(void) { return (hostname_resolver_ == NULL); }; /// Special method for using in unit tests. static void testtune(void); private: Glib::Mutex lock_; Run* hostname_resolver_; int errno_; public: /// Internal struct used for communication between processes. typedef struct { unsigned int size; unsigned int cmd; } header_t; }; /// Container for shared HostnameResolver objects. /** HostnameResolverContainer maintains a pool of executables and can be used to reduce the overhead in creating and destroying executables when using HostnameResolver. \ingroup common \headerfile HostnameResolver.h arc/HostnameResolver.h */ class HostnameResolverContainer { public: /// Creates container with number of stored objects between minval and maxval. HostnameResolverContainer(unsigned int minval, unsigned int maxval); /// Creates container with number of stored objects between 1 and 10. HostnameResolverContainer(void); /// Destroys container and all stored objects. ~HostnameResolverContainer(void); /// Get object from container. /** Object either is taken from stored ones or new one created. Acquired object looses its connection to container and can be safely destroyed or returned into other container. */ HostnameResolver* Acquire(void); /// Returns object into container. /** It can be any object - taken from another container or created using new. */ void Release(HostnameResolver* hr); /// Adjust minimal number of stored objects. void SetMin(unsigned int val); /// Adjust maximal number of stored objects. void SetMax(unsigned int val); private: Glib::Mutex lock_; unsigned int min_; unsigned int max_; std::list hrs_; void KeepRange(void); }; } // namespace Arc #endif // __ARC_HOSTNAMERESOLVER_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/OptionParser.cpp0000644000000000000000000000012412315232175024077 xustar000000000000000027 mtime=1395995773.808369 27 atime=1513200574.932707 30 ctime=1513200658.857733515 nordugrid-arc-5.4.2/src/hed/libs/common/OptionParser.cpp0000644000175000002070000002675612315232175024164 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY #include #ifndef HAVE_GLIBMM_OPTIONCONTEXT_GET_HELP #include #endif #else #include #include #include #endif #include "OptionParser.h" namespace Arc { #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY class OptionBase { public: OptionBase(char shortOpt, const std::string& longOpt, const std::string& optDesc, const std::string& argDesc) { entry.set_short_name(shortOpt); entry.set_long_name(longOpt); entry.set_description(optDesc); if (!argDesc.empty()) entry.set_arg_description(argDesc); } virtual ~OptionBase() {} virtual void AddEntry(Glib::OptionGroup& grp) = 0; virtual void Result() {} protected: Glib::OptionEntry entry; }; #else class OptionBase { public: OptionBase(char shortOpt, const std::string& longOpt, const std::string& optDesc, const std::string& argDesc) : shortOpt(shortOpt), longOpt(longOpt), optDesc(optDesc), argDesc(argDesc) {} virtual ~OptionBase() {} virtual bool Set(const std::string& val) = 0; protected: char shortOpt; std::string longOpt; std::string optDesc; std::string argDesc; friend class OptionParser; }; #endif class BoolOption : public OptionBase { public: BoolOption(char shortOpt, std::string longOpt, std::string optDesc, std::string argDesc, bool& value) : OptionBase(shortOpt, longOpt, optDesc, argDesc), value(value) {} ~BoolOption() {} #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY void AddEntry(Glib::OptionGroup& grp) { grp.add_entry(entry, value); } #else bool Set(const std::string&) { value = true; return true; } #endif private: bool& value; }; struct IntOption : public OptionBase { public: IntOption(char shortOpt, std::string longOpt, std::string optDesc, std::string argDesc, int& value) : OptionBase(shortOpt, longOpt, optDesc, argDesc), value(value) {} ~IntOption() {} #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY void AddEntry(Glib::OptionGroup& grp) { grp.add_entry(entry, value); } #else bool Set(const std::string& val) { bool ok = stringto(val, value); if (!ok) std::cout << IString("Cannot parse integer value '%s' for -%c", val, shortOpt) << std::endl; return ok; } #endif private: int& value; }; struct StringOption : public OptionBase { public: StringOption(char shortOpt, std::string longOpt, std::string optDesc, std::string argDesc, std::string& value) : OptionBase(shortOpt, longOpt, optDesc, argDesc), value(value) {} ~StringOption() {} #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY void AddEntry(Glib::OptionGroup& grp) { grp.add_entry(entry, gvalue); } void Result() { if (!gvalue.empty()) value = gvalue; } #else bool Set(const std::string& val) { value = val; return true; } #endif private: std::string& value; #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY Glib::ustring gvalue; #endif }; struct StringListOption : public OptionBase { public: StringListOption(char shortOpt, std::string longOpt, std::string optDesc, std::string argDesc, std::list& value) : OptionBase(shortOpt, longOpt, optDesc, argDesc), value(value) {} ~StringListOption() {} #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY void AddEntry(Glib::OptionGroup& grp) { grp.add_entry(entry, gvalue); } void Result() { value.insert(value.end(), gvalue.begin(), gvalue.end()); } #else bool Set(const std::string& val) { value.push_back(val); return true; } #endif private: std::list& value; #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY Glib::OptionGroup::vecustrings gvalue; #endif }; OptionParser::OptionParser(const std::string& arguments, const std::string& summary, const std::string& description) : arguments(arguments), summary(summary), description(description) {} OptionParser::~OptionParser() { for (std::list::iterator it = options.begin(); it != options.end(); it++) delete *it; } void OptionParser::AddOption(const char shortOpt, const std::string& longOpt, const std::string& optDesc, bool& value) { options.push_back(new BoolOption(shortOpt, longOpt, optDesc, "", value)); } void OptionParser::AddOption(const char shortOpt, const std::string& longOpt, const std::string& optDesc, const std::string& argDesc, int& value) { options.push_back(new IntOption(shortOpt, longOpt, optDesc, argDesc, value)); } void OptionParser::AddOption(const char shortOpt, const std::string& longOpt, const std::string& optDesc, const std::string& argDesc, std::string& value) { options.push_back(new StringOption(shortOpt, longOpt, optDesc, argDesc, value)); } void OptionParser::AddOption(const char shortOpt, const std::string& longOpt, const std::string& optDesc, const std::string& argDesc, std::list& value) { options.push_back(new StringListOption(shortOpt, longOpt, optDesc, argDesc, value)); } #ifdef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY std::list OptionParser::Parse(int argc, char **argv) { if (argc > 0) { origcmdwithargs = argv[0]; for (int i = 1; i < argc; ++i) { origcmdwithargs += " " + std::string(argv[i]); } } Glib::OptionContext ctx(arguments); if (!summary.empty()) ctx.set_summary(summary); if (!description.empty()) ctx.set_description(description); ctx.set_translation_domain(PACKAGE); Glib::OptionGroup grp("main", "Main Group"); grp.set_translation_domain(PACKAGE); bool h_value = false; BoolOption h_entry('h', "help", "Show help options", "", h_value); h_entry.AddEntry(grp); for (std::list::iterator it = options.begin(); it != options.end(); it++) (*it)->AddEntry(grp); ctx.set_main_group(grp); try { ctx.parse(argc, argv); } catch (Glib::OptionError& err) { std::cout << err.what() << std::endl; exit(1); } if(h_value) { #ifdef HAVE_GLIBMM_OPTIONCONTEXT_GET_HELP std::cout << ctx.get_help() << std::endl; #else std::cout << IString("Use -? to get usage description") << std::endl; #endif exit(0); } for (std::list::iterator it = options.begin(); it != options.end(); it++) (*it)->Result(); std::list params; for (int i = 1; i < argc; i++) params.push_back(argv[i]); return params; } #else static inline void setopt(struct option& opt, const char *name, int has_arg, int *flag, int val) { opt.name = (char*)name; // for buggy getopt header - Solaris opt.has_arg = has_arg; opt.flag = flag; opt.val = val; } std::list OptionParser::Parse(int argc, char **argv) { if (argc > 0) { origcmdwithargs = argv[0]; for (int i = 1; i < argc; ++i) { origcmdwithargs += " " + std::string(argv[i]); } } struct option *longoptions = new struct option[options.size() + 3]; int i = 0; std::string optstring; for (std::list::iterator it = options.begin(); it != options.end(); it++) { setopt(longoptions[i], (*it)->longOpt.c_str(), (*it)->argDesc.empty() ? no_argument : required_argument, NULL, (*it)->shortOpt?(*it)->shortOpt:(i+0x100)); if((*it)->shortOpt) { optstring += (*it)->shortOpt; if (!(*it)->argDesc.empty()) optstring += ':'; } ++i; } setopt(longoptions[i++], "help", no_argument, NULL, 'h'); optstring += 'h'; setopt(longoptions[i++], "help", no_argument, NULL, '?'); optstring += '?'; setopt(longoptions[i++], NULL, no_argument, NULL, '\0'); char *argv0save = argv[0]; argv[0] = strrchr(argv[0], '/'); if (argv[0]) argv[0]++; else argv[0] = argv0save; int opt = 0; while (opt != -1) { #ifdef HAVE_GETOPT_LONG_ONLY opt = getopt_long_only(argc, argv, optstring.c_str(), longoptions, NULL); #else opt = getopt_long(argc, argv, optstring.c_str(), longoptions, NULL); #endif if (opt == -1) continue; if ((opt == '?') || (opt == ':') || (opt == 'h')) { if (optopt) { delete[] longoptions; exit(1); } std::cout << IString("Usage:") << std::endl; std::cout << " " << argv[0]; if (!options.empty()) std::cout << " [" << IString("OPTION...") << "]"; if (!arguments.empty()) std::cout << " " << IString(arguments); std::cout << std::endl << std::endl; if (!summary.empty()) std::cout << IString(summary) << std::endl << std::endl; std::cout << IString("Help Options:") << std::endl; std::cout << " -h, -?, --help " << IString("Show help options") << std::endl << std::endl; std::cout << IString("Application Options:") << std::endl; for (std::list::iterator it = options.begin(); it != options.end(); it++) { std::cout << " "; if ((*it)->shortOpt) { std::cout << "-" << (*it)->shortOpt << ", "; } std::cout << "--"; if ((*it)->argDesc.empty()) std::cout << std::setw(20+4*((*it)->shortOpt == 0)) << std::left << (*it)->longOpt; else std::cout << (*it)->longOpt << "=" << std::setw(19-((*it)->longOpt).length()+(((*it)->shortOpt == 0))*4) << std::left << IString((*it)->argDesc); std::cout << " " << IString((*it)->optDesc) << std::endl; } std::cout << std::endl; if (!description.empty()) std::cout << IString(description) << std::endl; delete[] longoptions; exit(0); } i = 0; for (std::list::iterator it = options.begin(); it != options.end(); it++) { int o = (*it)->shortOpt; if(!o) o = longoptions[i].val; if (opt == o) { if (!(*it)->Set(optarg ? optarg : "")) { delete[] longoptions; exit(1); } break; } ++i; } } delete[] longoptions; argv[0] = argv0save; std::list params; while (argc > optind) params.push_back(argv[optind++]); return params; } #endif } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Utils.cpp0000644000000000000000000000012412735427505022563 xustar000000000000000027 mtime=1467363141.749989 27 atime=1513200574.937707 30 ctime=1513200658.862733576 nordugrid-arc-5.4.2/src/hed/libs/common/Utils.cpp0000644000175000002070000003040212735427505022627 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #ifdef HAVE_GLIBMM_GETENV #include #else #include #endif #ifdef HAVE_GLIBMM_SETENV #include #else #include #endif #ifdef HAVE_GLIBMM_UNSETENV #include #else #include #endif #include #include #include #include #include #include #include "Utils.h" #ifndef BUFLEN #define BUFLEN 1024 #endif // If defined this turn on additional layer in handling // setenv() which tries to avoid memory leaks. // Windows uses different way to store environment. TODO: investigate #ifndef WIN32 #define TRICKED_ENVIRONMENT extern char** environ; #endif namespace Arc { #ifndef WIN32 class SIGPIPEIngore { private: void (*sighandler_)(int); public: SIGPIPEIngore(void); ~SIGPIPEIngore(void); }; SIGPIPEIngore::SIGPIPEIngore(void) { sighandler_ = signal(SIGPIPE,SIG_IGN); } SIGPIPEIngore::~SIGPIPEIngore(void) { signal(SIGPIPE,sighandler_); } static SIGPIPEIngore sigpipe_ignore; #endif #ifdef TRICKED_ENVIRONMENT // This class is not protected by mutexes because its methods // are always called with SetEnv mutex locked. class TrickEnvRecord { private: static const unsigned int alloc_size = 1024; static std::list records_; std::string name_; //std::string value_; char* ptr_; unsigned int size_; // max value to fit into allocated space bool unset_; TrickEnvRecord(void); TrickEnvRecord(const std::string& name); public: bool Set(const std::string& value); bool Unset(void); static bool Set(const std::string& name, const std::string& value, bool overwrite); static bool Unset(const std::string& name); }; std::list TrickEnvRecord::records_; TrickEnvRecord::TrickEnvRecord(const std::string& name): name_(name),ptr_(NULL),size_(0),unset_(false) { } bool TrickEnvRecord::Set(const std::string& value) { char* curval = getenv(name_.c_str()); if(ptr_ && curval && (curval == ptr_)) { // Still same memory is active. Can modify content. if(value.length() <= size_) { // Enough space to store new value memcpy(ptr_,value.c_str(),value.length()); memset(ptr_+value.length(),0,size_-value.length()+1); return true; }; }; unsigned int newsize = 0; char* newrec = NULL; bool allocated = false; if(unset_ && ptr_ && (value.length() <= size_)) { // Unset buffer can be reused newsize = size_; size_ = 0; newrec = ptr_-(name_.length()+1); ptr_ = NULL; } else { // Allocate new memory newsize = (value.length()/alloc_size+1)*alloc_size; newrec = (char*) ::malloc(name_.length()+1+newsize+1); allocated = true; }; if(newrec) { memcpy(newrec,name_.c_str(),name_.length()); newrec[name_.length()] = '='; memcpy(newrec+name_.length()+1,value.c_str(),value.length()); memset(newrec+name_.length()+1+value.length(),0,newsize-value.length()+1); if(::putenv(newrec) == 0) { // New record stored ptr_ = newrec+name_.length()+1; size_ = newsize; //value_ = value; unset_ = false; return true; } else { if(allocated) { free(newrec); newrec = NULL; }; }; }; // Failure return false; } bool TrickEnvRecord::Set(const std::string& name, const std::string& value, bool overwrite) { if(!overwrite) { if(getenv(name.c_str())) return false; }; for(std::list::iterator r = records_.begin(); r != records_.end(); ++r) { if(r->name_ == name) { // TODO: more optimal search return r->Set(value); }; }; // No such record - making new TrickEnvRecord rec(name); if(!rec.Set(value)) return false; records_.push_back(rec); return true; } bool TrickEnvRecord::Unset(void) { unset_ = true; #ifdef HAVE_UNSETENV unsetenv(name_.c_str()); if(ptr_) *(ptr_-1) = 0; // Removing '=' #else // Reusing buffer if(ptr_) { *(ptr_-1) = 0; putenv(ptr_-(name_.length()+1)); } else { return false; // Never happens }; #endif return true; } bool TrickEnvRecord::Unset(const std::string& name) { for(std::list::iterator r = records_.begin(); r != records_.end(); ++r) { if(r->name_ == name) { // TODO: more optimal search return r->Unset(); }; }; #ifdef HAVE_UNSETENV unsetenv(name.c_str()); #else // Better solution is needed putenv(strdup(name.c_str())); #endif return true; } #endif // TRICKED_ENVIRONMENT // Below is a set of mutexes for protecting environment // variables. Current implementation is very simplistic. // There are 2 mutexes. 'env_read_lock' protects any // access to the pool of environment variables. It is // exposed to outside through EnvLockWrap() and EnvLockUnwrap() // functions. Any third party code doing setenv()/getenv() // must be wrapped using those functions. GetEnv() and SetEnv() // functions of this API are using that lock internally. // Second mutex 'env_write_lock' is meant for protecting // set of environment variables from modification. It // is only supposed to be used outside this library // for protecting access to external libraries using // environment variables as input parameters. That mutex // is exposed through EnvLockAcquire() and EnvLockRelease(). // Those functions do not acquire 'env_lock_read' mutex. // Hence proper usage of this infrastructure requires // one to acquire both mutexes. See example below: // EnvLockAcquire(); // SetEnv("ARG","VAL"); // Setting variable needed for ext. library // EnvLockWrap(); // func(); // Calling ext function which uses getenv() // EnvLockUnwrap(); // EnvLockRelease(); // Current implementation locks too many resources and has // negative performance impact. In a future (unless there // will be no need for all that at all) EnvLockAcquire will // must provide lock per variable. And EnvLockWrap must // provide different functionality depending on setenv() or // getenv() is going to be wrapped. // The purpose of this mutex is to 'solve' problem with // some third party libraries which use environment variables // as input arguments :( static Glib::Mutex& env_write_lock(void) { static Glib::Mutex* mutex = new Glib::Mutex; return *mutex; } // And this mutex is needed because it seems like none if // underlying functions provide proper thread protection // of environment variables. Also calls to external libraries // using getenv() need to be protected by this lock. static SharedMutex& env_read_lock(void) { static SharedMutex* mutex = new SharedMutex; return *mutex; } class SharedMutexExclusive { private: SharedMutex& lock_; public: SharedMutexExclusive(SharedMutex& lock):lock_(lock) { lock_.lockExclusive(); }; ~SharedMutexExclusive(void) { lock_.unlockExclusive(); }; }; class SharedMutexShared { private: SharedMutex& lock_; public: SharedMutexShared(SharedMutex& lock):lock_(lock) { lock_.lockShared(); }; ~SharedMutexShared(void) { lock_.unlockShared(); }; }; std::string GetEnv(const std::string& var) { SharedMutexShared env_lock(env_read_lock()); #ifdef HAVE_GLIBMM_GETENV return Glib::getenv(var); #else char* val = getenv(var.c_str()); return val ? val : ""; #endif } std::string GetEnv(const std::string& var, bool &found) { SharedMutexShared env_lock(env_read_lock()); #ifdef HAVE_GLIBMM_GETENV return Glib::getenv(var, found); #else char* val = getenv(var.c_str()); found = (val != NULL); return val ? val : ""; #endif } std::list GetEnv(void) { SharedMutexShared env_lock(env_read_lock()); #if defined(HAVE_GLIBMM_LISTENV) && defined(HAVE_GLIBMM_GETENV) std::list envp = Glib::listenv(); for(std::list::iterator env = envp.begin(); env != envp.end(); ++env) { *env = *env + "=" + Glib::getenv(*env); }; return envp; #else #ifdef WIN32 #error Glib with support for listenv and getenv is needed #endif std::list envp; for(char** enventry = environ; *enventry; ++enventry) { envp.push_back(*enventry); }; return envp; #endif } bool SetEnv(const std::string& var, const std::string& value, bool overwrite) { SharedMutexExclusive env_lock(env_read_lock()); #ifndef TRICKED_ENVIRONMENT #ifdef HAVE_GLIBMM_SETENV return Glib::setenv(var, value, overwrite); #else #ifdef HAVE_SETENV return (setenv(var.c_str(), value.c_str(), overwrite) == 0); #else return (putenv(strdup((var + "=" + value).c_str())) == 0); #endif #endif #else // TRICKED_ENVIRONMENT return TrickEnvRecord::Set(var, value, overwrite); #endif // TRICKED_ENVIRONMENT } void UnsetEnv(const std::string& var) { SharedMutexExclusive env_lock(env_read_lock()); #ifndef TRICKED_ENVIRONMENT #ifdef HAVE_GLIBMM_UNSETENV Glib::unsetenv(var); #else #ifdef HAVE_UNSETENV unsetenv(var.c_str()); #else putenv(strdup(var.c_str())); #endif #endif #else // TRICKED_ENVIRONMENT // This is compromise and will not work if third party // code distinguishes between empty and unset variable. // But without this pair of setenv/unsetenv will // definitely leak memory. TrickEnvRecord::Unset(var); #endif // TRICKED_ENVIRONMENT } void EnvLockAcquire(void) { env_write_lock().lock(); } void EnvLockRelease(void) { env_write_lock().unlock(); } void EnvLockWrap(bool all) { if(all) { env_read_lock().lockExclusive(); } else { env_read_lock().lockShared(); } } void EnvLockUnwrap(bool all) { if(all) { env_read_lock().unlockExclusive(); } else { env_read_lock().unlockShared(); } } void EnvLockUnwrapComplete(void) { // This function is deprecated and its body removed because // there is no safe way to reset locks after call to fork(). } static Glib::Mutex signal_lock; InterruptGuard::InterruptGuard() { signal_lock.lock(); saved_sigint_handler = signal(SIGINT, SIG_IGN); } InterruptGuard::~InterruptGuard() { signal(SIGINT, saved_sigint_handler); signal_lock.unlock(); } std::string StrError(int errnum) { #ifdef HAVE_STRERROR_R char errbuf[BUFLEN]; #ifdef STRERROR_R_CHAR_P return strerror_r(errnum, errbuf, sizeof(errbuf)); #else if (strerror_r(errnum, errbuf, sizeof(errbuf)) == 0) return errbuf; else return "Unknown error " + tostring(errnum); #endif #else #warning USING THREAD UNSAFE strerror(). UPGRADE YOUR libc. return strerror(errnum); #endif } static Glib::Mutex persistent_libraries_lock; static std::list persistent_libraries_list; bool PersistentLibraryInit(const std::string& name) { // Library is made persistent by loading intermediate // module which depends on that library. So passed name // is name of that module. Modules usually reside in // ARC_LOCATION/lib/arc. This approach is needed because // on some platforms shared libraries can't be dlopen'ed. std::string arc_lib_path = ArcLocation::Get(); if(!arc_lib_path.empty()) arc_lib_path = arc_lib_path + G_DIR_SEPARATOR_S + PKGLIBSUBDIR; std::string libpath = Glib::build_filename(arc_lib_path,"lib"+name+"."+G_MODULE_SUFFIX); persistent_libraries_lock.lock(); for(std::list::iterator l = persistent_libraries_list.begin(); l != persistent_libraries_list.end();++l) { if(*l == libpath) { persistent_libraries_lock.unlock(); return true; }; }; persistent_libraries_lock.unlock(); Glib::Module *module = new Glib::Module(libpath); if(module && (*module)) { persistent_libraries_lock.lock(); persistent_libraries_list.push_back(libpath); persistent_libraries_lock.unlock(); return true; }; if(module) delete module; return false; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/StringConv.cpp0000644000000000000000000000012412733561721023554 xustar000000000000000027 mtime=1466885073.986422 27 atime=1513200574.933707 30 ctime=1513200658.858733528 nordugrid-arc-5.4.2/src/hed/libs/common/StringConv.cpp0000644000175000002070000003136612733561721023632 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "StringConv.h" namespace Arc { Logger stringLogger(Logger::getRootLogger(), "StringConv"); static std::string char_to_hex(char c, bool uppercase) { // ASCII only std::string s; unsigned int n; n = (unsigned int)((c>>4) & 0x0f); s += (char)((n<10)?(n+'0'):(n-10+(uppercase?'A':'a'))); n = (unsigned int)((c>>0) & 0x0f); s += (char)((n<10)?(n+'0'):(n-10+(uppercase?'A':'a'))); return s; } static std::string char_to_octal(char c) { // ASCII only std::string s; unsigned int n; n = (unsigned int)((c>>6) & 0x07); s += (char)(n+'0'); n = (unsigned int)((c>>3) & 0x07); s += (char)(n+'0'); n = (unsigned int)((c>>0) & 0x07); s += (char)(n+'0'); return s; } static char hex_to_char(char n) { // ASCII only if((n>='0') && (n <= '9')) { n-='0'; } else if((n>='a') && (n <= 'f')) { n = n - 'a' + 10; } else if((n>='A') && (n <= 'F')) { n = n - 'A' + 10; } else { n = 0; }; return n; } static char octal_to_char(char n) { // ASCII only if((n>='0') && (n <= '7')) { n-='0'; } else { n = 0; }; return n; } char hex_to_char(const std::string& str) { char c = 0; for(std::string::size_type p = 0; p= end_quotes.length()) { te = str.find(start_quotes[qp],pos+1); } else { te = str.find(end_quotes[qp],pos+1); } if(te != std::string::npos) { token = str.substr(pos+1, te-pos-1); return te+1; } } te = str.find_first_of(delimiters,pos+1); if(te != std::string::npos) { token = str.substr(pos, te - pos); } else { token = str.substr(pos); } return te; } void tokenize(const std::string& str, std::vector& tokens, const std::string& delimiters, const std::string& start_quotes, const std::string& end_quotes) { // Skip delimiters at beginning. Find first "non-delimiter". std::string::size_type lastPos = str.find_first_not_of(delimiters, 0); while (std::string::npos != lastPos) { std::string token; // Found a token, find end of it std::string::size_type pos = get_token(token, str, lastPos, delimiters, start_quotes, end_quotes); tokens.push_back(token); if(std::string::npos == pos) break; // Last token lastPos = str.find_first_not_of(delimiters, pos); } } void tokenize(const std::string& str, std::list& tokens, const std::string& delimiters, const std::string& start_quotes, const std::string& end_quotes) { // Skip delimiters at beginning. Find first "non-delimiter". std::string::size_type lastPos = str.find_first_not_of(delimiters, 0); while (std::string::npos != lastPos) { std::string token; // Found a token, find end of it std::string::size_type pos = get_token(token, str, lastPos, delimiters, start_quotes, end_quotes); tokens.push_back(token); if(std::string::npos == pos) break; // Last token lastPos = str.find_first_not_of(delimiters, pos); } } static const char kBlankChars[] = " \t\n\r"; std::string trim(const std::string& str, const char *sep) { if (sep == NULL) sep = kBlankChars; std::string::size_type const first = str.find_first_not_of(sep); return (first == std::string::npos) ? std::string() : str.substr(first, str.find_last_not_of(sep) - first + 1); } std::string strip(const std::string& str) { std::string retstr = ""; std::string::size_type pos = str.find_first_of("\n"); std::string::size_type lastPos = 0; while (std::string::npos != pos) { const std::string tmpstr = str.substr(lastPos, pos-lastPos); if (!trim(tmpstr).empty()) { if (!retstr.empty()) retstr += "\n"; retstr += tmpstr; } lastPos = pos+1; pos = str.find_first_of("\n", lastPos); } if (!str.substr(lastPos).empty()) { if (!retstr.empty()) retstr += "\n"; retstr += str.substr(lastPos); } return retstr; } std::string join(const std::list& strlist, const std::string& delimiter) { std::string result; for (std::list::const_iterator i = strlist.begin(); i != strlist.end(); ++i) { if (i == strlist.begin()) { result.append(*i); } else { result.append(delimiter).append(*i); } } return result; } std::string join(const std::vector& strlist, const std::string& delimiter) { std::string result; for (std::vector::const_iterator i = strlist.begin(); i != strlist.end(); ++i) { if (i == strlist.begin()) { result.append(*i); } else { result.append(delimiter).append(*i); } } return result; } static int unescape_character(const std::string& scanner, int i) { int first_digit; int second_digit; first_digit = g_ascii_xdigit_value(scanner[i++]); if (first_digit < 0) return -1; second_digit = g_ascii_xdigit_value(scanner[i++]); if (second_digit < 0) return -1; return (first_digit << 4) | second_digit; } std::string uri_encode(const std::string& str, bool encode_slash) { // characters not to escape (unreserved characters from RFC 3986) std::string unreserved_chars("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~"); if (!encode_slash) unreserved_chars += '/'; // RFC 3986 says upper case hex chars should be used return escape_chars(str, unreserved_chars, '%', true, escape_hex_upper); } std::string uri_unencode(const std::string& str) { std::string out = str; int character; if (str.empty()) return str; int j = 0; for (size_t i = 0; i < str.size(); i++) { character = str[i]; if (str[i] == '%') { i++; if (str.size() - i < 2) return ""; character = unescape_character(str, i); i++; /* The other char will be eaten in the loop header */ } out[j++] = (char)character; } out.resize(j); return out; } std::string convert_to_rdn(const std::string& dn) { std::string ret; size_t pos1 = std::string::npos; size_t pos2; do { std::string str; pos2 = dn.find_last_of("/", pos1); if(pos2 != std::string::npos && pos1 == std::string::npos) { str = dn.substr(pos2+1); ret.append(str); pos1 = pos2-1; } else if (pos2 != std::string::npos && pos1 != std::string::npos) { str = dn.substr(pos2+1, pos1-pos2); ret.append(str); pos1 = pos2-1; } if(pos2 != (std::string::npos+1)) ret.append(","); }while(pos2 != std::string::npos && pos2 != (std::string::npos+1)); return ret; } std::string escape_chars(const std::string& str, const std::string& chars, char esc, bool excl, escape_type type) { std::string out = str; std::string esc_chars = chars; if (!excl) esc_chars += esc; std::string::size_type p = 0; for(;;) { if (excl) p = out.find_first_not_of(esc_chars,p); else p = out.find_first_of(esc_chars,p); if(p == std::string::npos) break; out.insert(p,1,esc); ++p; switch(type) { case escape_octal: out.replace(p,1,char_to_octal(out[p])); p+=3; break; case escape_hex: out.replace(p,1,char_to_hex(out[p], false)); p+=2; break; case escape_hex_upper: out.replace(p,1,char_to_hex(out[p], true)); p+=2; break; default: ++p; break; }; } return out; } std::string unescape_chars(const std::string& str, char esc, escape_type type) { std::string out = str; std::string::size_type p = 0; for(;(p+1) 36) return false; std::string::size_type p = 0; for(;;++p) { if(p >= s.length()) return false; if(!isspace(s[p])) break; } if(s[p] == '+') { sign = true; } else if(s[p] == '-') { sign = false; } else { sign = true; } unsigned long long n = 0; for(;p < s.length();++p) { unsigned int v = 0; char c = s[p]; if((c >= '0') && (c <= '9')) { v = (unsigned int)((unsigned char)(c-'0')); } else if((c >= 'a') && (c <= 'z')) { v = (unsigned int)((unsigned char)(c-'a'))+10U; } else if((c >= 'A') && (c <= 'A')) { v = (unsigned int)((unsigned char)(c-'A'))+10U; } else { break; // false? } if(v >= (unsigned int)base) break; // false? n = n*base + (unsigned long long)v; } t = n; return true; } bool strtoint(const std::string& s, int& t, int base) { unsigned long long n; bool sign; if(!strtoint(s,n,sign,base)) return false; t = (int)n; if(!sign) t=-t; return true; } bool strtoint(const std::string& s, unsigned int& t, int base) { unsigned long long n; bool sign; if(!strtoint(s,n,sign,base)) return false; if(!sign) return false; t = (unsigned int)n; return true; } bool strtoint(const std::string& s, long& t, int base) { unsigned long long n; bool sign; if(!strtoint(s,n,sign,base)) return false; t = (long)n; if(!sign) t=-t; return true; } bool strtoint(const std::string& s, unsigned long& t, int base) { unsigned long long n; bool sign; if(!strtoint(s,n,sign,base)) return false; if(!sign) return false; t = (unsigned long)n; return true; } bool strtoint(const std::string& s, long long& t, int base) { unsigned long long n; bool sign; if(!strtoint(s,n,sign,base)) return false; t = (long long)n; if(!sign) t=-t; return true; } bool strtoint(const std::string& s, unsigned long long& t, int base) { unsigned long long n; bool sign; if(!strtoint(s,n,sign,base)) return false; if(!sign) return false; t = n; return true; } std::string inttostr(signed long long t, int base, int width) { unsigned long long n; if(t < 0) { n = (unsigned long long)(-t); } else { n = (unsigned long long)t; } std::string s = inttostr(n,base,width); if((!s.empty()) && (t < 0)) { if((s.length() > 1) && (s[0] == '0')) { s[0] = '-'; } else { s.insert(0,1,'-'); } } return s; } std::string inttostr(unsigned long long t, int base, int width) { if(base < 2) return ""; if(base > 36) return ""; std::string s; for(;t;) { unsigned int v = t % (unsigned int)base; char c; if(v < 10) { c = ((char)v) + '0'; } else { c = ((char)(v-10)) + 'a'; } s.insert(0,1,c); t = t / (unsigned int)base; } if(s.empty()) s="0"; while(s.length() < width) s.insert(0,1,'0'); return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/IntraProcessCounter.h0000644000000000000000000000012412111140470025061 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.873706 30 ctime=1513200658.831733197 nordugrid-arc-5.4.2/src/hed/libs/common/IntraProcessCounter.h0000644000175000002070000002452512111140470025136 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- // IntraProcessCounter.h #ifndef __IntraProcessCounter__ #define __IntraProcessCounter__ #include #include #include #include namespace Arc { /// A class for counters used by threads within a single process. /** This is a class for shared among different threads within a single process. See the Counter class for further information about counters and examples of usage. @ingroup common @headerfile IntraProcessCounter.h arc/IntraProcessCounter.h */ class IntraProcessCounter : public Counter { public: /// Creates an IntraProcessCounter with specified limit and excess. /** This constructor creates a counter with the specified limit (amount of resources available for reservation) and excess limit (an extra amount of resources that may be used for prioritized reservations). @param limit The limit of the counter. @param excess The excess limit of the counter. */ IntraProcessCounter(int limit, int excess); /// Destructor. /** This is the destructor of the IntraProcessCounter class. Does not need to do anything. */ virtual ~IntraProcessCounter(); /// Returns the current limit of the counter. /** This method returns the current limit of the counter, i.e. how many units can be reserved simultaneously by different threads without claiming high priority. @return The current limit of the counter. */ virtual int getLimit(); /// Sets the limit of the counter. /** This method sets a new limit for the counter. @param newLimit The new limit, an absolute number. @return The new limit. */ virtual int setLimit(int newLimit); /// Changes the limit of the counter. /** Changes the limit of the counter by adding a certain amount to the current limit. @param amount The amount by which to change the limit. @return The new limit. */ virtual int changeLimit(int amount); /// Returns the excess limit of the counter. /** Returns the excess limit of the counter, i.e. by how much the usual limit may be exceeded by prioritized reservations. @return The excess limit. */ virtual int getExcess(); /// Sets the excess limit of the counter. /** This method sets a new excess limit for the counter. @param newExcess The new excess limit, an absolute number. @return The new excess limit. */ virtual int setExcess(int newExcess); /// Changes the excess limit of the counter. /** Changes the excess limit of the counter by adding a certain amount to the current excess limit. @param amount The amount by which to change the excess limit. @return The new excess limit. */ virtual int changeExcess(int amount); /// Returns the current value of the counter. /** Returns the current value of the counter, i.e. the number of unreserved units. Initially, the value is equal to the limit of the counter. When a reservation is made, the the value is decreased. Normally, the value should never be negative, but this may happen if there are prioritized reservations. It can also happen if the limit is decreased after some reservations have been made, since reservations are never revoked. @return The current value of the counter. */ virtual int getValue(); /// Makes a reservation from the counter. /** This method makes a reservation from the counter. If the current value of the counter is too low to allow for the reservation, the method blocks until the reservation is possible or times out. @param amount The amount to reserve, default value is 1. @param duration The duration of a self expiring reservation, default is that it lasts forever. @param prioritized Whether this reservation is prioritized and thus allowed to use the excess limit. @param timeOut The maximum time to block if the value of the counter is too low, default is to allow "eternal" blocking. @return A CounterTicket that can be queried about the status of the reservation as well as for cancellations and extensions. */ virtual CounterTicket reserve(int amount = 1, Glib::TimeVal duration = ETERNAL, bool prioritized = false, Glib::TimeVal timeOut = ETERNAL); protected: /// Cancellation of a reservation. /** This method cancels a reservation. It is called by the CounterTicket that corresponds to the reservation. @param reservationID The identity number (key) of the reservation to cancel. */ virtual void cancel(IDType reservationID); /// Extension of a reservation. /** This method extends a reservation. It is called by the CounterTicket that corresponds to the reservation. @param reservationID Used for input as well as output. Contains the identification number of the original reservation on entry and the new identification number of the extended reservation on exit. @param expiryTime Used for input as well as output. Contains the expiry time of the original reservation on entry and the new expiry time of the extended reservation on exit. @param duration The time by which to extend the reservation. The new expiration time is computed based on the current time, NOT the previous expiration time. */ virtual void extend(IDType& reservationID, Glib::TimeVal& expiryTime, Glib::TimeVal duration = ETERNAL); private: /// Copy constructor, should not be used. /** A private copy constructor, since Counters should never be copied. It should be impossible to use, but if that would happen by accident the program will exit with the EXIT_FAILURE code. */ IntraProcessCounter(const IntraProcessCounter& unique); /// Assignment operator, should not be used. /** A private assignment operator, since Counters should never be assigned. It should be impossible to use, but if that would happen by accident the program will exit with the EXIT_FAILURE code. */ void operator=(const IntraProcessCounter& unique); /// Computes and returns the value of the counter. /** Cancels any pending reservations that have expired and returns the value of the counter. This method is not thread-safe by itself and should only be called from other methods that have already locked synchMutex. @return The value of the counter. */ int unsafeGetValue(); /// Cancels a reservation. /** Cancels a reservation with the specified identification number, i.e. removes that entry from the reservations map and increases the value by the corresponding amount. This method is not thread-safe by itself and should only be called from other methods that have already locked synchMutex. @param reservationID The identification number of the reservation to cancel. @return The amount that was reserved, or zero if there was no reservation with the specified identification number. */ int unsafeCancel(IDType reservationID); /// Makes a reservation. /** Makes a reservation of the specified amount for the specified duration and returns the identification number of the reservation. This method is not thread-safe by itself and should only be called from other methods that have already locked synchMutex. Furthermore, it assumes that the calling method has already asserted that the specified amount is available for reservation. @param amount The amount to reserve. @duration The duration of the reservation. @return The identification number of the reservation. */ IDType unsafeReserve(int amount, Glib::TimeVal duration); /// Returns the expiry time for the next expiring reservation. /** Returns the expiry time for the next expiring reservation, i.e. the expiry time of the top entry of the selfExpiringReservations priority queue. @return The expiry time for the next expiring reservation. */ Glib::TimeVal unsafeGetNextExpiration(); /// The limit of the counter. /** The current limit of the counter. Should not be altered unless synchMutex is locked. */ int limit; /// The excess limit of the counter. /** The current excess limit of the counter. Should not be altered unless synchMutex is locked. */ int excess; /// The value of the counter. /** The current value of the counter. Should not be altered unless synchMutex is locked. */ int value; /// The identification number of the next reservation. /** The attribute holds the identification number of the next reservation. When a new identification number is needed, this number is used and the attribute is incremented in order to hold a number that is available for the next reservation. Should not be altered unless synchMutex is locked. */ IDType nextReservationID; /// Maps identification numbers of reservations to amounts. /** This is a map that uses identification numbers of reservations as keys and maps them to the corresponding amounts amounts. */ std::map reservations; /// Contains expiration reminders of self-expiring reservations. /** This priority queue contains expiration reminders of self-expiring reservations. The next reservation to expire is allways at the top. */ std::priority_queue selfExpiringReservations; /// A mutex that protects the attributes. /** This mutex is used for protection of attributes from concurrent access from several threads. Any method that alter an attribute should lock this mutex. */ Glib::Mutex synchMutex; /// A condition used for waiting for waiting for a higher value. /** This condition is used when a reservation cannot be made immediately because the amount that shall be reserved is larger than what is currently available. */ Glib::Cond synchCond; }; } #endif nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/FileAccess.h0000644000000000000000000000012312120077760023120 xustar000000000000000027 mtime=1363181552.082845 27 atime=1513200574.899706 29 ctime=1513200658.81473299 nordugrid-arc-5.4.2/src/hed/libs/common/FileAccess.h0000644000175000002070000001757012120077760023200 0ustar00mockbuildmock00000000000000#ifndef __ARC_FILEACCESS_H__ #define __ARC_FILEACCESS_H__ #include #include #include #include #include #include #ifdef WIN32 #ifndef uid_t #define uid_t int #endif #ifndef gid_t #define gid_t int #endif #endif namespace Arc { class Run; /// Defines interface for accessing filesystems. /** This class accesses the local filesystem through a proxy executable which allows switching user id in multithreaded systems without introducing conflict with other threads. Its methods are mostly replicas of corresponding POSIX functions with some convenience tweaking. \ingroup common \headerfile FileAccess.h arc/FileAccess.h */ class FileAccess { public: /// New FileAccess object. FileAccess(void); /// Shuts down any spawned executable. ~FileAccess(void); /// Constructor which takes already existing object from global cache static FileAccess* Acquire(void); /// Destructor which returns object into global cache static void Release(FileAccess* fa); /// Check if communication with proxy works bool ping(void); /// Modify user uid and gid. /** * If any is set to 0 then executable is switched to original uid/gid. * * \since Renamed in 3.0.0 from setuid */ bool fa_setuid(int uid,int gid); /// Make a directory and assign it specified mode. /** * \since Renamed in 3.0.0 from mkdir */ bool fa_mkdir(const std::string& path, mode_t mode); /// Make a directory and assign it specified mode. /** * If missing all intermediate directories are created too. * * \since Renamed in 3.0.0 from mkdirp */ bool fa_mkdirp(const std::string& path, mode_t mode); /// Create hard link. /** * \since Renamed in 3.0.0 from link */ bool fa_link(const std::string& oldpath, const std::string& newpath); /// Create symbolic (aka soft) link. /** * \since Renamed in 3.0.0 form softlink */ bool fa_softlink(const std::string& oldpath, const std::string& newpath); /// Copy file to new location. /** * If new file is created it is assigned specified mode. * * \since Renamed in 3.0.0 from copy */ bool fa_copy(const std::string& oldpath, const std::string& newpath, mode_t mode); /// Rename file /** * \since Renamed in 3.0.0 from rename */ bool fa_rename(const std::string& oldpath, const std::string& newpath); /// Change mode of filesystem object /** * \since Renamed in 3.0.0 from chmod */ bool fa_chmod(const std::string& path,mode_t mode); /// Stat file. /** * \since Renamed in 3.0.0 from stat */ bool fa_stat(const std::string& path, struct stat& st); /// Stat symbolic link or file. /** * \since Renamed in 3.0.0 from lstat */ bool fa_lstat(const std::string& path, struct stat& st); /// Stat open file. /** * \since Renamed in 3.0.0 from fstat */ bool fa_fstat(struct stat& st); /// Truncate open file. /** * \since Renamed in 3.0.0 from ftruncate */ bool fa_ftruncate(off_t length); /// Allocate disk space for open file. /** * \since Renamed in 3.0.0 from fallocate */ off_t fa_fallocate(off_t length); /// Read content of symbolic link. /** * \since Renamed in 3.0.0 from readlink */ bool fa_readlink(const std::string& path, std::string& linkpath); /// Remove file system object. /** * \since Renamed in 3.0.0 from remove */ bool fa_remove(const std::string& path); /// Remove file. /** * \since Renamed in 3.0.0 from unlink */ bool fa_unlink(const std::string& path); /// Remove directory (if empty). /** * \since Renamed in 3.0.0 from rmdir */ bool fa_rmdir(const std::string& path); /// Remove directory recursively. /** * \since Renamed in 3.0.0 from rmdirr */ bool fa_rmdirr(const std::string& path); /// Open directory. /** * Only one directory may be open at a time. * * \since Renamed in 3.0.0 from opendir */ bool fa_opendir(const std::string& path); /// Close open directory. /** * \since Renamed in 3.0.0 from closedir */ bool fa_closedir(void); /// Read relative name of object in open directory. /** * \since Renamed in 3.0.0 from readdir */ bool fa_readdir(std::string& name); /// Open file. Only one file may be open at a time. /** * \since Renamed in 3.0.0 from open */ bool fa_open(const std::string& path, int flags, mode_t mode); /// Close open file. /** * \since Renamed in 3.0.0 from close */ bool fa_close(void); /// Open new temporary file for writing. /** * On input path contains template of file name ending with XXXXXX. * On output path is path to created file. * * \since Renamed in 3.0.0 from mkstemp */ bool fa_mkstemp(std::string& path, mode_t mode); /// Change current position in open file. /** * \since Renamed in 3.0.0 from lseek */ off_t fa_lseek(off_t offset, int whence); /// Read from open file. /** * \since Renamed in 3.0.0 from read */ ssize_t fa_read(void* buf,size_t size); /// Write to open file. /** * \since Renamed in 3.0.0 from write */ ssize_t fa_write(const void* buf,size_t size); /// Read from open file at specified offset. /** * \since Renamed in 3.0.0 from pread */ ssize_t fa_pread(void* buf,size_t size,off_t offset); /// Write to open file at specified offset. /** * \since Renamed in 3.0.0 from pwrite */ ssize_t fa_pwrite(const void* buf,size_t size,off_t offset); /// Get errno of last operation. Every operation resets errno. int geterrno() { return errno_; }; /// Returns true if this instance is in useful condition operator bool(void) { return (file_access_ != NULL); }; /// Returns true if this instance is not in useful condition bool operator!(void) { return (file_access_ == NULL); }; /// Special method for using in unit tests. static void testtune(void); private: Glib::Mutex lock_; Run* file_access_; int errno_; uid_t uid_; gid_t gid_; public: /// Internal struct used for communication between processes. typedef struct { unsigned int size; unsigned int cmd; } header_t; }; /// Container for shared FileAccess objects. /** FileAccessContainer maintains a pool of executables and can be used to reduce the overhead in creating and destroying executables when using FileAccess. \ingroup common \headerfile FileAccess.h arc/FileAccess.h */ class FileAccessContainer { public: /// Creates container with number of stored objects between minval and maxval. FileAccessContainer(unsigned int minval, unsigned int maxval); /// Creates container with number of stored objects between 1 and 10. FileAccessContainer(void); /// Destroys container and all stored objects. ~FileAccessContainer(void); /// Get object from container. /** Object either is taken from stored ones or new one created. Acquired object looses its connection to container and can be safely destroyed or returned into other container. */ FileAccess* Acquire(void); /// Returns object into container. /** It can be any object - taken from another container or created using new. */ void Release(FileAccess* fa); /// Adjust minimal number of stored objects. void SetMin(unsigned int val); /// Adjust maximal number of stored objects. void SetMax(unsigned int val); private: Glib::Mutex lock_; unsigned int min_; unsigned int max_; std::list fas_; void KeepRange(void); }; } // namespace Arc #endif // __ARC_FILEACCESS_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/CheckSum.h0000644000000000000000000000012312111140470022606 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.871706 29 ctime=1513200658.81073294 nordugrid-arc-5.4.2/src/hed/libs/common/CheckSum.h0000644000175000002070000002166412111140470022665 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_CHECKSUM_H__ #define __ARC_CHECKSUM_H__ #include #include #include #include #include #include namespace Arc { /// Interface for checksum manipulations. /** This class is an interface and is extended in the specialized classes * CRC32Sum, MD5Sum and Adler32Sum. The interface is among others used * during data transfers through DataBuffer class. The helper class * CheckSumAny can be used as an easier way of handling automatically the * different checksum types. * * @see CheckSumAny * @see CRC32Sum * @see MD5Sum * @see Adler32Sum * @ingroup common * @headerfile CheckSum.h arc/CheckSum.h **/ class CheckSum { public: /// Default constructor CheckSum(void) {} virtual ~CheckSum(void) {} /// Initiate the checksum algorithm. /** * This method must be called before starting a new checksum * calculation. **/ virtual void start(void) = 0; /// Add data to be checksummed. /** * This method calculates the checksum of the passed data chunk, taking * into account the previous state of this object. * * @param buf pointer to data chuck to be checksummed. * @param len size of the data chuck. **/ virtual void add(void *buf, unsigned long long int len) = 0; /// Finalize the checksumming. /** * This method finalizes the checksum algorithm, that is calculating the * final checksum result. **/ virtual void end(void) = 0; /// Retrieve result of checksum as binary blob. virtual void result(unsigned char*& res, unsigned int& len) const = 0; /// Retrieve result of checksum into a string. /** * The passed string buf is filled with result of checksum algorithm in * base 16. At most len characters are filled into buffer buf. The * hexadecimal value is prepended with "algorithm:", where algorithm * is one of "cksum", "md5" or "adler32" respectively corresponding to * the result from the CRC32Sum, MD5Sum and Adler32 classes. * * @param buf pointer to buffer which should be filled with checksum * result. * @param len max number of character filled into buffer. * @return 0 on success **/ virtual int print(char *buf, int len) const { if (len > 0) buf[0] = 0; return 0; } /// Set internal checksum state /** * This method sets the internal state to that of the passed textual * representation. The format passed to this method must be the same as * retrieved from the CheckSum::print method. * * @param buf string containing textual representation of checksum * @see CheckSum::print **/ virtual void scan(const char *buf) = 0; /// Indicates whether the checksum has been calculated virtual operator bool(void) const { return false; } /// Indicates whether the checksum has not been calculated virtual bool operator!(void) const { return true; } }; /// Implementation of CRC32 checksum /** * This class is a specialized class of the CheckSum class. It provides an * implementation for the CRC-32 IEEE 802.3 standard. * @ingroup common * @headerfile CheckSum.h arc/CheckSum.h **/ class CRC32Sum : public CheckSum { private: uint32_t r; unsigned long long count; bool computed; public: CRC32Sum(void); virtual ~CRC32Sum(void) {} virtual void start(void); virtual void add(void *buf, unsigned long long int len); virtual void end(void); virtual void result(unsigned char*& res, unsigned int& len) const { res = (unsigned char*)&r; len = 4; } virtual int print(char *buf, int len) const; virtual void scan(const char *buf); virtual operator bool(void) const { return computed; } virtual bool operator!(void) const { return !computed; } uint32_t crc(void) const { return r; } }; /// Implementation of MD5 checksum /** * This class is a specialized class of the CheckSum class. It provides an * implementation of the MD5 message-digest algorithm specified in RFC * 1321. * @ingroup common * @headerfile CheckSum.h arc/CheckSum.h **/ class MD5Sum : public CheckSum { private: bool computed; uint32_t A; uint32_t B; uint32_t C; uint32_t D; uint64_t count; uint32_t X[16]; unsigned int Xlen; // uint32_t T[64]; public: MD5Sum(void); virtual void start(void); virtual void add(void *buf, unsigned long long int len); virtual void end(void); virtual void result(unsigned char*& res, unsigned int& len) const { res = (unsigned char*)&A; len = 16; } virtual int print(char *buf, int len) const; virtual void scan(const char *buf); virtual operator bool(void) const { return computed; } virtual bool operator!(void) const { return !computed; } }; /// Implementation of Adler32 checksum /** * This class is a specialized class of the CheckSum class. It provides an * implementation of the Adler-32 checksum algorithm. * @ingroup common * @headerfile CheckSum.h arc/CheckSum.h **/ class Adler32Sum : public CheckSum { private: uLong adler; bool computed; public: Adler32Sum(void) : computed(false) { start(); } virtual void start(void) { adler = adler32(0L, Z_NULL, 0); } virtual void add(void* buf,unsigned long long int len) { adler = adler32(adler, (const Bytef *)buf, len); } virtual void end(void) { computed = true; } virtual void result(unsigned char*& res,unsigned int& len) const { res=(unsigned char*)&adler; len=4; } virtual int print(char* buf,int len) const { if(!computed) { if(len>0) { buf[0]=0; return 0; } } return snprintf(buf,len,"adler32:%08lx",adler); }; virtual void scan(const char* /* buf */) { }; virtual operator bool(void) const { return computed; } virtual bool operator!(void) const { return !computed; } }; /// Wrapper for CheckSum class /** * To be used for manipulation of any supported checksum type in a * transparent way. * @ingroup common * @headerfile CheckSum.h arc/CheckSum.h **/ class CheckSumAny : public CheckSum { public: /// Type of checksum typedef enum { none, ///< No checksum unknown, ///< Unknown checksum undefined, ///< Undefined checksum cksum, ///< CRC32 checksum md5, ///< MD5 checksum adler32 ///< ADLER32 checksum } type; private: CheckSum *cs; type tp; public: /// Construct a new CheckSumAny from the given CheckSum. CheckSumAny(CheckSum *c = NULL) : cs(c), tp(none) {} /// Construct a new CheckSumAny using the given checksum type. CheckSumAny(type type); /// Construct a new CheckSumAny using the given checksum type represented as a string. CheckSumAny(const char *type); virtual ~CheckSumAny(void) { if (cs) delete cs; } virtual void start(void) { if (cs) cs->start(); } virtual void add(void *buf, unsigned long long int len) { if (cs) cs->add(buf, len); } virtual void end(void) { if (cs) cs->end(); } virtual void result(unsigned char*& res, unsigned int& len) const { if (cs) { cs->result(res, len); return; } len = 0; } virtual int print(char *buf, int len) const { if (cs) return cs->print(buf, len); if (len > 0) buf[0] = 0; return 0; } virtual void scan(const char *buf) { if (cs) cs->scan(buf); } virtual operator bool(void) const { if (!cs) return false; return *cs; } virtual bool operator!(void) const { if (!cs) return true; return !(*cs); } bool active(void) { return (cs != NULL); } static type Type(const char *crc); type Type(void) const { return tp; } void operator=(const char *type); bool operator==(const char *s); bool operator==(const CheckSumAny& ck); /// Get checksum of a file /** * This method provides an easy way to get the checksum of a file, by only * specifying the path to the file. Optionally the checksum type can be * specified, if not the MD5 algorithm will be used. * * @param filepath path to file of which checksum should be calculated * @param tp type of checksum algorithm to use, default is md5. * @param decimalbase specifies whether output should be in base 10 or * 16 * @return a string containing the calculated checksum is returned. **/ static std::string FileChecksum(const std::string& filepath, type tp = md5, bool decimalbase = false); }; } // namespace Arc #endif // __ARC_CHECKSUM_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Utils.h0000644000000000000000000000012312735427327022231 xustar000000000000000026 mtime=1467363031.73435 27 atime=1513200574.867706 30 ctime=1513200658.826733136 nordugrid-arc-5.4.2/src/hed/libs/common/Utils.h0000644000175000002070000001763012735427327022306 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_UTILS_H__ #define __ARC_UTILS_H__ #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include namespace Arc { /** \addtogroup common * @{ */ /// Portable function for getting environment variables. Protected by shared lock. std::string GetEnv(const std::string& var); /// Portable function for getting environment variables. Protected by shared lock. std::string GetEnv(const std::string& var, bool &found); /// Portable function for getting all environment variables. Protected by shared lock. std::list GetEnv(); /// Portable function for setting environment variables. Protected by exclusive lock. bool SetEnv(const std::string& var, const std::string& value, bool overwrite = true); /// Portable function for unsetting environment variables. Protected by exclusive lock. void UnsetEnv(const std::string& var); // These are functions to be used used exclusively for solving // problem with specific libraries which depend too much on // environment variables. /// Obtain lock on environment. /** For use with external libraries using unprotected setenv/getenv in a multi-threaded environment. */ void EnvLockAcquire(void); /// Release lock on environment. /** For use with external libraries using unprotected setenv/getenv in a multi-threaded environment. */ void EnvLockRelease(void); /// Start code which is using setenv/getenv. /** For use with external libraries using unprotected setenv/getenv in a multi-threaded environment. Must always have corresponding EnvLockUnwrap. * \param all set to true for setenv and false for getenv. */ void EnvLockWrap(bool all = false); /// End code which is using setenv/getenv. /** For use with external libraries using unprotected setenv/getenv in a multi-threaded environment. \param all must be same as in corresponding EnvLockWrap. */ void EnvLockUnwrap(bool all = false); /// Use after fork() to reset all internal variables and release all locks. /** For use with external libraries using unprotected setenv/getenv in a multi-threaded environment. This function is deprecated. */ void EnvLockUnwrapComplete(void); /// Class to provide automatic locking/unlocking of environment on creation/destruction. /** For use with external libraries using unprotected setenv/getenv in a multi-threaded environment. \headerfile Utils.h arc/Utils.h */ class EnvLockWrapper { private: bool all_; public: /// Create a new environment lock for using setenv/getenv. /** \param all set to true for setenv and false for getenv. */ EnvLockWrapper(bool all = false):all_(all) { EnvLockWrap(all_); }; /// Release environment lock. ~EnvLockWrapper(void) { EnvLockUnwrap(all_); }; }; /// Marks off a section of code which should not be interrupted by signals. /** \headerfile Utils.h arc/Utils.h */ class InterruptGuard { public: InterruptGuard(); ~InterruptGuard(); private: void (*saved_sigint_handler)(int); }; /// Portable function for obtaining description of last system error std::string StrError(int errnum = errno); /// Wrapper for pointer with automatic destruction /** If ordinary pointer is wrapped in instance of this class it will be automatically destroyed when instance is destroyed. This is useful for maintaining pointers in scope of one function. Only pointers returned by new() are supported. \headerfile Utils.h arc/Utils.h */ template class AutoPointer { private: T *object; void operator=(const AutoPointer&) {} AutoPointer(const AutoPointer&) : object(NULL) {} public: /// NULL pointer constructor AutoPointer(void) : object(NULL) {} /// Constructor which wraps pointer AutoPointer(T *o) : object(o) {} /// Destructor destroys wrapped object using delete() ~AutoPointer(void) { if (object) delete object; } void operator=(T* o) { if (object) delete object; object = o; } /// For referring wrapped object T& operator*(void) const { return *object; } /// For referring wrapped object T* operator->(void) const { return object; } /// Returns false if pointer is NULL and true otherwise. operator bool(void) const { return (object != NULL); } /// Returns true if pointer is NULL and false otherwise. bool operator!(void) const { return (object == NULL); } /// Cast to original pointer T* Ptr(void) const { return object; } /// Release referred object so that it can be passed to other container T* Release(void) { T* tmp = object; object = NULL; return tmp; } }; /// Wrapper for pointer with automatic destruction and multiple references /** If ordinary pointer is wrapped in instance of this class it will be automatically destroyed when all instances referring to it are destroyed. This is useful for maintaining pointers referred from multiple structures with automatic destruction of original object when last reference is destroyed. It is similar to Java approach with a difference that destruction time is strictly defined. Only pointers returned by new() are supported. This class is not thread-safe. \headerfile Utils.h arc/Utils.h */ template class CountedPointer { private: template class Base { private: Base(Base

    &); public: int cnt; P *ptr; bool released; Base(P *p) : cnt(0), ptr(p), released(false) { add(); } ~Base(void) { if (ptr && !released) delete ptr; } Base

    * add(void) { ++cnt; return this; } bool rem(void) { if (--cnt == 0) { if(!released) delete this; return true; } return false; } }; Base *object; public: CountedPointer(T *p = NULL) : object(new Base(p)) {} CountedPointer(const CountedPointer& p) : object(p.object->add()) {} ~CountedPointer(void) { object->rem(); } CountedPointer& operator=(T *p) { if (p != object->ptr) { object->rem(); object = new Base(p); } return *this; } CountedPointer& operator=(const CountedPointer& p) { if (p.object->ptr != object->ptr) { object->rem(); object = p.object->add(); } return *this; } /// For referring wrapped object T& operator*(void) const { return *(object->ptr); } /// For referring wrapped object T* operator->(void) const { return (object->ptr); } /// Returns false if pointer is NULL and true otherwise. operator bool(void) const { return ((object->ptr) != NULL); } /// Returns true if pointer is NULL and false otherwise. bool operator!(void) const { return ((object->ptr) == NULL); } /// Returns true if pointers are equal bool operator==(const CountedPointer& p) const { return ((object->ptr) == (p.object->ptr)); } /// Returns true if pointers are not equal bool operator!=(const CountedPointer& p) const { return ((object->ptr) != (p.object->ptr)); } /// Comparison operator bool operator<(const CountedPointer& p) const { return ((object->ptr) < (p.object->ptr)); } /// Cast to original pointer T* Ptr(void) const { return (object->ptr); } /// Release referred object so that it can be passed to other container T* Release(void) { T* tmp = object->ptr; object->released = true; return tmp; } }; /// Load library and keep persistent. bool PersistentLibraryInit(const std::string& name); /** @{ */ } // namespace Arc # endif // __ARC_UTILS_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/JobPerfLog.cpp0000644000000000000000000000012313044716763023454 xustar000000000000000027 mtime=1486069235.226471 27 atime=1513200574.887706 29 ctime=1513200658.86873365 nordugrid-arc-5.4.2/src/hed/libs/common/JobPerfLog.cpp0000644000175000002070000000502613044716763023525 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #ifdef _MACOSX #include #include #endif #include #include "JobPerfLog.h" namespace Arc { JobPerfLog::JobPerfLog(): log_enabled(false) { } JobPerfLog::~JobPerfLog() { } void JobPerfLog::SetOutput(const std::string& filename) { log_path = filename; } void JobPerfLog::SetEnabled(bool enabled) { if(enabled != log_enabled) { log_enabled = enabled; }; } JobPerfRecord::JobPerfRecord(JobPerfLog& log):perf_log(log) { start_recorded = false; } JobPerfRecord::JobPerfRecord(JobPerfLog& log, const std::string& id):perf_log(log) { Start(id); } void JobPerfRecord::Start(const std::string& id) { start_recorded = false; if(&perf_log == NULL) return; if(!perf_log.GetEnabled()) return; struct timespec ts; #ifdef _MACOSX // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); start_time.tv_sec = mts.tv_sec; start_time.tv_nsec = mts.tv_nsec; { #else if((clock_gettime(CLOCK_MONOTONIC, &start_time) == 0) || (clock_gettime(CLOCK_REALTIME, &start_time) == 0)) { #endif start_recorded = true; start_id = id; }; start_id = id; } void JobPerfRecord::End(const std::string& name) { if(start_recorded) { timespec end_time; #ifdef _MACOSX // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); end_time.tv_sec = mts.tv_sec; end_time.tv_nsec = mts.tv_nsec; { #else if((clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) || (clock_gettime(CLOCK_REALTIME, &end_time) == 0)) { #endif perf_log.Log(name, start_id, start_time, end_time); }; start_recorded = false; }; } void JobPerfLog::Log(const std::string& name, const std::string& id, const timespec& start, const timespec& end) { if(!log_enabled) return; if(log_path.empty()) return; std::ofstream logout(log_path.c_str(), std::ofstream::app); if(logout.is_open()) { uint64_t delta = ((uint64_t)(end.tv_sec-start.tv_sec))*1000000000 + end.tv_nsec - start.tv_nsec; logout << Arc::Time().str(Arc::UTCTime) << "\t" << name << "\t" << id << "\t" << delta << " ns" << std::endl; }; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/ArcConfig.cpp0000644000000000000000000000012412771223617023314 xustar000000000000000027 mtime=1474635663.023405 27 atime=1513200574.910706 30 ctime=1513200658.843733344 nordugrid-arc-5.4.2/src/hed/libs/common/ArcConfig.cpp0000644000175000002070000000621712771223617023367 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "ArcConfig.h" namespace Arc { Config::Config(const char *filename) : file_name_(filename) { ReadFromFile(filename); } Config::~Config(void) { // NOP } static void _print(XMLNode& node, int skip) { int n; for (n = 0; n < skip; n++) std::cout << " "; std::string content = (std::string)node; std::cout << "* " << node.Name() << "(" << node.Size() << ")" << " = " << content << std::endl; for (n = 0;; n++) { XMLNode _node = node.Child(n); if (!_node) break; _print(_node, skip + 2); } } void Config::print(void) { _print(*((XMLNode*)this), 0); } bool Config::parse(const char *filename) { file_name_ = filename; return ReadFromFile(filename); } Config::Config(long cfg_ptr_addr) { Config *cfg = (Config*)cfg_ptr_addr; cfg->New(*this); } Config::Config(const Config& cfg) : XMLNode() { cfg.New(*this); file_name_ = cfg.file_name_; } void Config::save(const char *filename) { std::string str; GetDoc(str); std::ofstream out(filename); out << str; out.close(); } bool Config::elementtoenum(Arc::XMLNode pnode,const char* ename,int& val,const char* const opts[]) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default for(int n = 0;opts[n];++n) { if(v == opts[n]) { val = n; return true; }; }; return false; } bool Config::elementtobool(Arc::XMLNode pnode,const char* ename,bool& val) { std::string v = ename?pnode[ename]:pnode; if(v.empty()) return true; // default if((v == "true") || (v == "1")) { val=true; return true; }; if((v == "false") || (v == "0")) { val=false; return true; }; return false; } BaseConfig::BaseConfig() : plugin_paths(ArcLocation::GetPlugins()) {} void BaseConfig::AddPluginsPath(const std::string& path) { plugin_paths.push_back(path); } XMLNode BaseConfig::MakeConfig(XMLNode cfg) const { XMLNode mm = cfg.NewChild("ModuleManager"); for (std::list::const_iterator p = plugin_paths.begin(); p != plugin_paths.end(); ++p) mm.NewChild("Path") = *p; return mm; } void BaseConfig::AddCredential(const std::string& cred) { credential = cred; } void BaseConfig::AddPrivateKey(const std::string& path) { key = path; } void BaseConfig::AddCertificate(const std::string& path) { cert = path; } void BaseConfig::AddProxy(const std::string& path) { proxy = path; } void BaseConfig::AddCAFile(const std::string& path) { cafile = path; } void BaseConfig::AddCADir(const std::string& path) { cadir = path; } void BaseConfig::AddOverlay(XMLNode cfg) { overlay.Destroy(); cfg.New(overlay); } void BaseConfig::GetOverlay(std::string fname) { overlay.Destroy(); if (fname.empty()) return; overlay.ReadFromFile(fname); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/URL.h0000644000000000000000000000012412356741540021567 xustar000000000000000027 mtime=1404814176.549059 27 atime=1513200574.904706 30 ctime=1513200658.822733087 nordugrid-arc-5.4.2/src/hed/libs/common/URL.h0000644000175000002070000003634612356741540021650 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_URL_H__ #define __ARC_URL_H__ #include #include #include #include // Default ports for different protocols #define RC_DEFAULT_PORT 389 #define HTTP_DEFAULT_PORT 80 #define HTTPS_DEFAULT_PORT 443 #define HTTPG_DEFAULT_PORT 8443 #define LDAP_DEFAULT_PORT 389 #define FTP_DEFAULT_PORT 21 #define GSIFTP_DEFAULT_PORT 2811 #define LFC_DEFAULT_PORT 5010 #define XROOTD_DEFAULT_PORT 1094 namespace Arc { class URLLocation; /// Class to represent general URLs. /** The URL is split into protocol, hostname, port and path. This class tries * to follow RFC 3986 for splitting URLs, at least for protocol + host part. * It also accepts local file paths which are converted to file:path. * The usual system dependent file paths are supported. Relative paths are * converted to absolute paths by prepending them with current working * directory path. A file path can't start from # symbol. If the string * representation of URL starts from '@' then it is treated as path to a * file containing a list of URLs. * * A URL is parsed in the following way: \verbatim [protocol:][//[username:passwd@][host][:port]][;urloptions[;...]][/path[?httpoption[&...]][:metadataoption[:...]]] \endverbatim * The 'protocol' and 'host' parts are treated as case-insensitive and * to avoid confusion are converted to lowercase in constructor. Note that * 'path' is always converted to absolute path in the constructor. The meaning * of 'absolute' may depend upon URL type. For generic URL and local POSIX * file paths that means the path starts from / like \verbatim /path/to/file \endverbatim * For Windows paths the absolute path may look like \verbatim C:\path\to\file \endverbatim * It is important to note that path still can be empty. For referencing * a local file using an absolute path on a POSIX filesystem one may use either \verbatim file:///path/to/file or file:/path/to/file \endverbatim * The relative path will look like \verbatim file:to/file \endverbatim * For local Windows files possible URLs are \verbatim %file:C:\path\to\file or %file:to\file \endverbatim * URLs representing LDAP resources have a different structure of options * following the 'path' part: \verbatim ldap://host[:port][;urloptions[;...]][/path[?attributes[?scope[?filter]]]] \endverbatim * For LDAP URLs paths are converted from /key1=value1/.../keyN=valueN * notation to keyN=valueN,...,key1=value1 and hence path does not contain a * leading /. If an LDAP URL initially had its path in the second notation, * the leading / is treated as a separator only and is stripped. * * URLs of indexing services optionally may have locations specified * before the 'host' part \verbatim protocol://[location[;location[;...]]@][host][:port]... \endverbatim * The structure of the 'location' element is protocol specific. * \ingroup common * \headerfile URL.h arc/URL.h */ class URL { public: /// Empty constructor. URL object is invalid. URL(); /// Constructs a new URL from a string representation. /** * \param url The string representation of URL * \param encoded Set to true if URL is encoded according to RFC 3986 * \param defaultPort Port to use if 'url' doesn't specify port * \param defaultPath Path to use if 'url' doesn't specify path * \since Changed in 4.1.0. defaultPort and defaultPath arguments added. **/ URL(const std::string& url, bool encoded = false, int defaultPort = -1, const std::string& defaultPath = ""); /// Empty destructor. virtual ~URL(); /// Scope for LDAP URLs. enum Scope { base, onelevel, subtree }; /// Perform decoding of stored URL parts according to RFC 3986 /** This method is supposed to be used only if for some reason URL constructor was called with encoded=false for URL which was encoded. Use it only once. */ void URIDecode(void); /// Returns the protocol of the URL. const std::string& Protocol() const; /// Changes the protocol of the URL. void ChangeProtocol(const std::string& newprot); /// Returns the username of the URL. const std::string& Username() const; /// Returns the password of the URL. const std::string& Passwd() const; /// Returns the hostname of the URL. const std::string& Host() const; /// Changes the hostname of the URL. void ChangeHost(const std::string& newhost); /// Returns the port of the URL. int Port() const; /// Changes the port of the URL. void ChangePort(int newport); /// Returns the path of the URL. const std::string& Path() const; /// Returns the path of the URL with all options attached. std::string FullPath() const; /// Returns the path and all options, URI-encoded according to RFC 3986. /** Forward slashes ('/') in the path are not encoded but are encoded in * the options. */ std::string FullPathURIEncoded() const; /// Changes the path of the URL. void ChangePath(const std::string& newpath); /// Changes the path of the URL and all options attached. void ChangeFullPath(const std::string& newpath, bool encoded = false); /// Returns HTTP options if any. const std::map& HTTPOptions() const; /// Returns the value of an HTTP option. /** \param option The option whose value is returned. * \param undefined This value is returned if the HTTP option is * not defined. */ const std::string& HTTPOption(const std::string& option, const std::string& undefined = "") const; /// Adds a HTP option with the given value. /** \return false if overwrite is false and option already exists, true * otherwise. */ bool AddHTTPOption(const std::string& option, const std::string& value, bool overwrite = true); /// Removes a HTTP option if exists. /** \param option The option to remove. */ void RemoveHTTPOption(const std::string& option); /// Returns the LDAP attributes if any. const std::list& LDAPAttributes() const; /// Adds an LDAP attribute. void AddLDAPAttribute(const std::string& attribute); /// Returns the LDAP scope. Scope LDAPScope() const; /// Changes the LDAP scope. void ChangeLDAPScope(const Scope newscope); /// Returns the LDAP filter. const std::string& LDAPFilter() const; /// Changes the LDAP filter. void ChangeLDAPFilter(const std::string& newfilter); /// Returns URL options if any. const std::map& Options() const; /// Returns the value of a URL option. /** \param option The option whose value is returned. * \param undefined This value is returned if the URL option is * not defined. */ const std::string& Option(const std::string& option, const std::string& undefined = "") const; /// Returns metadata options if any. const std::map& MetaDataOptions() const; /// Returns the value of a metadata option. /** \param option The option whose value is returned. * \param undefined This value is returned if the metadata option is * not defined. */ const std::string& MetaDataOption(const std::string& option, const std::string& undefined = "") const; /// Adds a URL option with the given value. /** Note that some compilers may interpret AddOption("name", "value") as a * call to AddOption(const std::string&, bool) so it is recommended to use * explicit string types when calling this method. * \return false if overwrite is false and option already exists, true * otherwise. */ bool AddOption(const std::string& option, const std::string& value, bool overwrite = true); /// Adds a URL option where option has the format "name=value". /** \return false if overwrite is true and option already exists or if * option does not have the correct format. Returns true otherwise. */ bool AddOption(const std::string& option, bool overwrite = true); /// Adds a metadata option. void AddMetaDataOption(const std::string& option, const std::string& value, bool overwrite = true); /// Adds a Location. void AddLocation(const URLLocation& location); /// Returns the locations if any. const std::list& Locations() const; /// Returns the common location options if any. const std::map& CommonLocOptions() const; /// Returns the value of a common location option. /** \param option The option whose value is returned. * \param undefined This value is returned if the common location * option is not defined. */ const std::string& CommonLocOption(const std::string& option, const std::string& undefined = "") const; /// Removes a URL option if exists. /** \param option The option to remove. */ void RemoveOption(const std::string& option); /// Remove a metadata option if exits. /** \param option The option to remove. */ void RemoveMetaDataOption(const std::string& option); /// Returns a string representation of the URL including meta-options. virtual std::string str(bool encode = false) const; /// Returns a string representation of the URL without any options. virtual std::string plainstr(bool encode = false) const; /// Returns a string representation including options and locations. virtual std::string fullstr(bool encode = false) const; /// Returns a string representation with protocol, host and port only. virtual std::string ConnectionURL() const; /// Compares one URL to another. bool operator<(const URL& url) const; /// Is one URL equal to another? bool operator==(const URL& url) const; /// Check if instance holds valid URL. operator bool() const; /// Check if instance does not hold valid URL. bool operator!() const; /// Returns true if string matches url. bool StringMatches(const std::string& str) const; /// Parse a string of options separated by separator into an attribute->value map. std::map ParseOptions(const std::string& optstring, char separator, bool encoded = false); /// Returns a string representation of the options given in the options map. /** \param options Key-value map of options * \param separator The character that separates options * \param encode if set to true then options are encoded according to RFC 3986 */ static std::string OptionString(const std::map& options, char separator, bool encode = false); /// Perform encoding according to RFC 3986. /** This simply calls Arc::uri_encode(). */ static std::string URIEncode(const std::string& str); /// Perform decoding according to RFC 3986. /** This simply calls Arc::uri_unencode(). */ static std::string URIDecode(const std::string& str); protected: /// the url protocol. std::string protocol; /// username of the url. std::string username; /// password of the url. std::string passwd; /// hostname of the url. std::string host; /// if host is IPv6 numerical address notation. bool ip6addr; /// portnumber of the url. int port; /// the url path. std::string path; /// HTTP options of the url. std::map httpoptions; /// Meta data options std::map metadataoptions; /// LDAP attributes of the url. std::list ldapattributes; /// LDAP scope of the url. Scope ldapscope; /// LDAP filter of the url. std::string ldapfilter; /// options of the url. std::map urloptions; /// locations for index server URLs. std::list locations; /// common location options for index server URLs. std::map commonlocoptions; /// flag to describe validity of URL bool valid; /// a private method that converts an ldap basedn to a path. static std::string BaseDN2Path(const std::string&); /// a private method that converts an ldap path to a basedn. static std::string Path2BaseDN(const std::string&); /// Overloaded operator << to print a URL. friend std::ostream& operator<<(std::ostream& out, const URL& u); /// Convenience method for splitting schema specific part into path and options. void ParsePath(bool encoded = false); }; /// Class to hold a resolved URL location. /** It is specific to file indexing service registrations. * \ingroup common * \headerfile URL.h arc/URL.h */ class URLLocation : public URL { public: /// Creates a URLLocation from a string representation. URLLocation(const std::string& url = ""); /// Creates a URLLocation from a string representation and a name. URLLocation(const std::string& url, const std::string& name); /// Creates a URLLocation from a URL. URLLocation(const URL& url); /// Creates a URLLocation from a URL and a name. URLLocation(const URL& url, const std::string& name); /// Creates a URLLocation from options and a name. URLLocation(const std::map& options, const std::string& name); /// URLLocation destructor. virtual ~URLLocation(); /// Returns the URLLocation name. const std::string& Name() const; /// Returns a string representation of the URLLocation. virtual std::string str(bool encode = false) const; /// Returns a string representation including options and locations virtual std::string fullstr(bool encode = false) const; protected: /// the URLLocation name as registered in the indexing service. std::string name; }; /// Class to iterate through elements of a path. /** \ingroup common * \headerfile URL.h arc/URL.h */ class PathIterator { public: /// Constructor accepts path and stores it internally. /** If end is set to false iterator points at first element * in path. Otherwise selected element is one before last. */ PathIterator(const std::string& path, bool end = false); ~PathIterator(); /// Advances iterator to point at next path element PathIterator& operator++(); /// Moves iterator to element before current PathIterator& operator--(); /// Return false when iterator moved outside path elements operator bool() const; /// Returns part of initial path from first till and including current std::string operator*() const; /// Returns part of initial path from one after current till end std::string Rest() const; private: const std::string& path; std::string::size_type pos; bool end; bool done; }; /// Reads a list of URLs from a file std::list ReadURLList(const URL& urllist); } // namespace Arc #endif // __ARC_URL_H__ nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/README0000644000000000000000000000012311001653037021620 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200574.873706 29 ctime=1513200658.80173283 nordugrid-arc-5.4.2/src/hed/libs/common/README0000644000175000002070000000007711001653037021672 0ustar00mockbuildmock00000000000000collection of classes and function used everywhere in the code nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/Counter.h0000644000000000000000000000012412111140470022524 xustar000000000000000027 mtime=1361363256.081099 27 atime=1513200574.922706 30 ctime=1513200658.830733185 nordugrid-arc-5.4.2/src/hed/libs/common/Counter.h0000644000175000002070000004412412111140470022576 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- // Counter.h #ifndef __Counter__ #define __Counter__ #include namespace Arc { /// A time very far in the future. extern const Glib::TimeVal ETERNAL; /// A time very far in the past. extern const Glib::TimeVal HISTORIC; // Some forward declarations. class Counter; class CounterTicket; class ExpirationReminder; /// A class defining a common interface for counters. /** This class defines a common interface for counters as well as some common functionality. The purpose of a counter is to provide housekeeping some resource such as e.g. disk space, memory or network bandwidth. The counter itself will not be aware of what kind of resource it limits the use of. Neither will it be aware of what unit is being used to measure that resource. Counters are thus very similar to semaphores. Furthermore, counters are designed to handle concurrent operations from multiple threads/processes in a consistent manner. Every counter has a limit, an excess limit and a value. The limit is a number that specify how many units are available for reservation. The value is the number of units that are currently available for reservation, i.e. has not already been reserved. The excess limit specify how many extra units can be reserved for high priority needs even if there are no normal units available for reservation. The excess limit is similar to the credit limit of e.g. a VISA card. The users of the resource must thus first call the counter in order to make a reservation of an appropriate amount of the resource, then allocate and use the resource and finally call the counter again to cancel the reservation. Typical usage is: \code // Declare a counter. Replace XYZ by some appropriate kind of // counter and provide required parameters. Unit is MB. XYZCounter memory(...); ... // Make a reservation of memory for 2000000 doubles. CounterTicket tick = memory.reserve(2*sizeof(double)); // Use the memory. double* A=new double[2000000]; doSomething(A); delete[] A; // Cancel the reservation. tick.cancel(); \endcode There are also alternative ways to make reservations, including self-expiring reservations, prioritized reservations and reservations that fail if they cannot be made fast enough. For self expiring reservations, a duration is provided in the reserve call: \code tick = memory.reserve(2*sizeof(double), Glib::TimeVal(1,0)); \endcode A self-expiring reservation can be cancelled explicitly before it expires, but if it is not cancelled it will expire automatically when the duration has passed. The default value for the duration is ETERNAL, which means that the reservation will not be cancelled automatically. Prioritized reservations may use the excess limit and succeed immediately even if there are no normal units available for reservation. The value of the counter will in this case become negative. A prioritized reservation looks like this: \code tick = memory.reserve(2*sizeof(double), Glib::TimeVal(1,0), true); \endcode Finally, a time out option can be provided for a reservation. If some task should be performed within two seconds or not at all, the reservation can look like this: \code tick = memory.reserve(2*sizeof(double), Glib::TimeVal(1,0), true, Glib::TimeVal(2,0)); if (tick.isValid()) doSomething(...); \endcode \ingroup common \headerfile Counter.h arc/Counter.h */ class Counter { protected: /// A typedef of identification numbers for reservation. /** This is a type that is used as identification numbers (keys) for referencing of reservations. It is used internally in counters for book keeping of reservations as well as in the CounterTicket class in order to be able to cancel and extend reservations. */ typedef unsigned long long int IDType; /// Default constructor. /** This is the default constructor. Since Counter is an abstract class, it should only be used by subclasses. Therefore it is protected. Furthermore, since the Counter class has no attributes, nothing needs to be initialized and thus this constructor is empty. */ Counter(); public: /// The destructor. /** This is the destructor of the Counter class. Since the Counter class has no attributes, nothing needs to be cleaned up and thus the destructor is empty. */ virtual ~Counter(); /// Returns the current limit of the counter. /** This method returns the current limit of the counter, i.e. how many units can be reserved simultaneously by different threads without claiming high priority. @return The current limit of the counter. */ virtual int getLimit() = 0; /// Sets the limit of the counter. /** This method sets a new limit for the counter. @param newLimit The new limit, an absolute number. @return The new limit. */ virtual int setLimit(int newLimit) = 0; /// Changes the limit of the counter. /** Changes the limit of the counter by adding a certain amount to the current limit. @param amount The amount by which to change the limit. @return The new limit. */ virtual int changeLimit(int amount) = 0; /// Returns the excess limit of the counter. /** Returns the excess limit of the counter, i.e. by how much the usual limit may be exceeded by prioritized reservations. @return The excess limit. */ virtual int getExcess() = 0; /// Sets the excess limit of the counter. /** This method sets a new excess limit for the counter. @param newExcess The new excess limit, an absolute number. @return The new excess limit. */ virtual int setExcess(int newExcess) = 0; /// Changes the excess limit of the counter. /** Changes the excess limit of the counter by adding a certain amount to the current excess limit. @param amount The amount by which to change the excess limit. @return The new excess limit. */ virtual int changeExcess(int amount) = 0; /// Returns the current value of the counter. /** Returns the current value of the counter, i.e. the number of unreserved units. Initially, the value is equal to the limit of the counter. When a reservation is made, the the value is decreased. Normally, the value should never be negative, but this may happen if there are prioritized reservations. It can also happen if the limit is decreased after some reservations have been made, since reservations are never revoked. @return The current value of the counter. */ virtual int getValue() = 0; /// Makes a reservation from the counter. /** This method makes a reservation from the counter. If the current value of the counter is too low to allow for the reservation, the method blocks until the reservation is possible or times out. @param amount The amount to reserve, default value is 1. @param duration The duration of a self expiring reservation, default is that it lasts forever. @param prioritized Whether this reservation is prioritized and thus allowed to use the excess limit. @param timeOut The maximum time to block if the value of the counter is too low, default is to allow "eternal" blocking. @return A CounterTicket that can be queried about the status of the reservation as well as for cancellations and extensions. */ virtual CounterTicket reserve(int amount = 1, Glib::TimeVal duration = ETERNAL, bool prioritized = false, Glib::TimeVal timeOut = ETERNAL) = 0; protected: /// Cancellation of a reservation. /** This method cancels a reservation. It is called by the CounterTicket that corresponds to the reservation. @param reservationID The identity number (key) of the reservation to cancel. */ virtual void cancel(IDType reservationID) = 0; /// Extension of a reservation. /** This method extends a reservation. It is called by the CounterTicket that corresponds to the reservation. @param reservationID Used for input as well as output. Contains the identification number of the original reservation on entry and the new identification number of the extended reservation on exit. @param expiryTime Used for input as well as output. Contains the expiry time of the original reservation on entry and the new expiry time of the extended reservation on exit. @param duration The time by which to extend the reservation. The new expiration time is computed based on the current time, NOT the previous expiration time. */ virtual void extend(IDType& reservationID, Glib::TimeVal& expiryTime, Glib::TimeVal duration = ETERNAL) = 0; /// Get the current time. /** Returns the current time. An "adapter method" for the assign_current_time() method in the Glib::TimeVal class. return The current time. */ Glib::TimeVal getCurrentTime(); /// Computes an expiry time. /** This method computes an expiry time by adding a duration to the current time. @param duration The duration. @return The expiry time. */ Glib::TimeVal getExpiryTime(Glib::TimeVal duration); /// A "relay method" for a constructor of the CounterTicket class. /** This method acts as a relay for one of the constructors of the CounterTicket class. That constructor is private, but needs to be accessible from the subclasses of Counter (but not from anywhere else). In order not to have to declare every possible subclass of Counter as a friend of CounterTicket, only the base class Counter is a friend and its subclasses access the constructor through this method. (If C++ had supported "package access", as Java does, this trick would not have been necessary.) @param reservationID The identity number of the reservation corresponding to the CounterTicket. @param expiryTime the expiry time of the reservation corresponding to the CounterTicket. @param counter The Counter from which the reservation has been made. @return The counter ticket that has been created. */ CounterTicket getCounterTicket(Counter::IDType reservationID, Glib::TimeVal expiryTime, Counter *counter); /// A "relay method" for the constructor of ExpirationReminder. /** This method acts as a relay for one of the constructors of the ExpirationReminder class. That constructor is private, but needs to be accessible from the subclasses of Counter (but not from anywhere else). In order not to have to declare every possible subclass of Counter as a friend of ExpirationReminder, only the base class Counter is a friend and its subclasses access the constructor through this method. (If C++ had supported "package access", as Java does, this trick would not have been necessary.) @param expTime the expiry time of the reservation corresponding to the ExpirationReminder. @param resID The identity number of the reservation corresponding to the ExpirationReminder. @return The ExpirationReminder that has been created. */ ExpirationReminder getExpirationReminder(Glib::TimeVal expTime, Counter::IDType resID); private: /// Copy constructor, should not be used. /** A private copy constructor, since Counters should never be copied. It should be impossible to use, but if that would happen by accident the program will exit with the EXIT_FAILURE code. */ Counter(const Counter& unique); /// Assignment operator, should not be used. /** A private assignment operator, since Counters should never be assigned. It should be impossible to use, but if that would happen by accident the program will exit with the EXIT_FAILURE code. */ void operator=(const Counter& unique); /// The CounterTicket class needs to be a friend. friend class CounterTicket; /// The ExpirationReminder class needs to be a friend. friend class ExpirationReminder; }; /// A class for "tickets" that correspond to counter reservations. /** This is a class for reservation tickets. When a reservation is made from a Counter, a ReservationTicket is returned. This ticket can then be queried about the validity of a reservation. It can also be used for cancellation and extension of reservations. Typical usage is: \code // Declare a counter. Replace XYZ by some appropriate kind of // counter and provide required parameters. Unit is MB. XYZCounter memory(...); ... // Make a reservation of memory for 2000000 doubles. CounterTicket tick = memory.reserve(2*sizeof(double)); // Use the memory. double* A=new double[2000000]; doSomething(A); delete[] A; // Cancel the reservation. tick.cancel(); \endcode \ingroup common \headerfile Counter.h arc/Counter.h */ class CounterTicket { public: /// The default constructor. /** This is the default constructor. It creates a CounterTicket that is not valid. The ticket object that is created can later be assigned a ticket that is returned by the reserve() method of a Counter. */ CounterTicket(); /// Returns the validity of a CounterTicket. /** This method checks whether a CounterTicket is valid. The ticket was probably returned earlier by the reserve() method of a Counter but the corresponding reservation may have expired. @return The validity of the ticket. */ bool isValid(); /// Extends a reservation. /** Extends a self-expiring reservation. In order to succeed the extension should be made before the previous reservation expires. @param duration The time by which to extend the reservation. The new expiration time is computed based on the current time, NOT the previous expiration time. */ void extend(Glib::TimeVal duration); /// Cancels a reservation. /** This method is called to cancel a reservation. It may be called also for self-expiring reservations, which will then be cancelled before they were originally planned to expire. */ void cancel(); private: /// A private constructor. /** This constructor creates an CounterTicket containing the specified expiry time and identity number of a reservation besides a pointer to the counter from which the reservation was made. In order to prevent unintended use, it is private. Because the Counter class must be able to use this constructor, it is declared to be a friend of this class. @param reservationID The identification number of the reservation. @param expiryTime The expiry time of the reservation. @param counter A pointer to the counter from which the reservation was made. */ CounterTicket(Counter::IDType reservationID, Glib::TimeVal expiryTime, Counter *counter); /// The identification number of the corresponding reservation. Counter::IDType reservationID; /// The expiry time of the corresponding reservation. Glib::TimeVal expiryTime; /// A pointer to the Counter from which the reservation was made. Counter *counter; //! The Counter class needs to be a friend. friend class Counter; }; /// A class intended for internal use within counters. /** This class is used for "reminder objects" that are used for automatic deallocation of self-expiring reservations. \ingroup common \headerfile Counter.h arc/Counter.h */ class ExpirationReminder { public: /// Less than operator, compares "soonness". /** This is the less than operator for the ExpirationReminder class. It compares the priority of such objects with respect to which reservation expires first. It is used when reminder objects are inserted in a priority queue in order to allways place the next reservation to expire at the top. */ bool operator<(const ExpirationReminder& other) const; /// Returns the expiry time. /** This method returns the expiry time of the reservation that this ExpirationReminder is associated with. @return The expiry time. */ Glib::TimeVal getExpiryTime() const; /// Returns the identification number of the reservation. /** This method returns the identification number of the self-expiring reservation that this ExpirationReminder is associated with. @return The identification number. */ Counter::IDType getReservationID() const; private: /// The constructor. /** This constructor creates an ExpirationReminder containing the specified expiry time and identity number of a reservation. In order to prevent unintended use, it is private. Because the Counter class must be able to use this constructor, it is declared to be a friend of this class. @param expiryTime The expiry time of the reservation. @param reservationID The identification number of the reservation. */ ExpirationReminder(Glib::TimeVal expiryTime, Counter::IDType reservationID); /// The expiry time of the corresponding reservation. Glib::TimeVal expiryTime; /// The identification number of t he corresponding reservation. Counter::IDType reservationID; /// The Counter class needs to be a friend. friend class Counter; }; } #endif nordugrid-arc-5.4.2/src/hed/libs/common/PaxHeaders.7502/StringConv.h0000644000000000000000000000012412733561721023221 xustar000000000000000027 mtime=1466885073.986422 27 atime=1513200574.863706 30 ctime=1513200658.820733063 nordugrid-arc-5.4.2/src/hed/libs/common/StringConv.h0000644000175000002070000002055412733561721023274 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef ARCLIB_STRINGCONV #define ARCLIB_STRINGCONV #include #include #include #include #include namespace Arc { /** \addtogroup common * @{ */ extern Logger stringLogger; /// This method converts a string to any type. template T stringto(const std::string& s) { T t; if (s.empty()) { stringLogger.msg(ERROR, "Empty string"); return 0; } std::stringstream ss(s); ss >> t; if (ss.fail()) { stringLogger.msg(ERROR, "Conversion failed: %s", s); return 0; } if (!ss.eof()) stringLogger.msg(WARNING, "Full string not used: %s", s); return t; } /// This method converts a string to any type but lets calling function process errors. template bool stringto(const std::string& s, T& t) { t = 0; if (s.empty()) return false; std::stringstream ss(s); ss >> t; if (ss.fail()) return false; if (!ss.eof()) return false; return true; } #define stringtoi(A) stringto < int > ((A)) #define stringtoui(A) stringto < unsigned int > ((A)) #define stringtol(A) stringto < long > ((A)) #define stringtoll(A) stringto < long long > ((A)) #define stringtoul(A) stringto < unsigned long > ((A)) #define stringtoull(A) stringto < unsigned long long > ((A)) #define stringtof(A) stringto < float > ((A)) #define stringtod(A) stringto < double > ((A)) #define stringtold(A) stringto < long double > ((A)) /// Convert string to integer with specified base. /** \return false if any argument is wrong. */ bool strtoint(const std::string& s, signed int& t, int base = 10); /// Convert string to unsigned integer with specified base. /** \return false if any argument is wrong. */ bool strtoint(const std::string& s, unsigned int& t, int base = 10); /// Convert string to long integer with specified base. /** \return false if any argument is wrong. */ bool strtoint(const std::string& s, signed long& t, int base = 10); /// Convert string to unsigned long integer with specified base. /** \return false if any argument is wrong. */ bool strtoint(const std::string& s, unsigned long& t, int base = 10); /// Convert string to long long integer with specified base. /** \return false if any argument is wrong. */ bool strtoint(const std::string& s, signed long long& t, int base = 10); /// Convert string to unsigned long long integer with specified base. /** \return false if any argument is wrong. */ bool strtoint(const std::string& s, unsigned long long& t, int base = 10); /// This method converts any type to a string of the width given. template std::string tostring(T t, int width = 0, int precision = 0) { std::stringstream ss; if (precision) ss << std::setprecision(precision); ss << std::setw(width) << t; return ss.str(); } /// Convert long long integer to textual representation for specified base. /** The result is left-padded with zeroes to make the string size width. */ std::string inttostr(signed long long t, int base = 10, int width = 0); /// Convert unsigned long long integer to textual representation for specified base. /** The result is left-padded with zeroes to make the string size width. */ std::string inttostr(unsigned long long t, int base = 10, int width = 0); /// Convert integer to textual representation for specied base. /** The result is left-padded with zeroes to make the string size width. */ inline std::string inttostr(signed int t, int base = 10, int width = 0) { return inttostr((signed long long)t,base,width); } /// Convert unsigned integer to textual representation for specied base. /** The result is left-padded with zeroes to make the string size width. */ inline std::string inttostr(unsigned int t, int base = 10, int width = 0) { return inttostr((unsigned long long)t,base,width); } /// Convert long integer to textual representation for specied base. /** The result is left-padded with zeroes to make the string size width. */ inline std::string inttostr(signed long t, int base = 10, int width = 0) { return inttostr((signed long long)t,base,width); } /// Convert unsigned long integer to textual representation for specied base. /** The result is left-padded with zeroes to make the string size width. */ inline std::string inttostr(unsigned long t, int base = 10, int width = 0) { return inttostr((unsigned long long)t,base,width); } /// Convert bool to textual representation, i.e. "true" or "false". inline std::string booltostr(bool b) { return b ? "true" : "false"; } /// Convert string to bool. Simply checks string if equal to "true" or "1". inline bool strtobool(const std::string& s) { return s == "true" || s == "1"; } /// Convert string to bool. /** Checks whether string is equal to one of "true", "false", "1" or "0", and if not returns false. If equal, true is returned and the bool reference is set to true, if string equals "true" or "1", otherwise it is set to false. */ inline bool strtobool(const std::string& s, bool& b) { if (s == "true" || s == "1" || s == "false" || s == "0") { b = (s == "true" || s == "1"); return true; } return false; } /// This method converts the given string to lower case. std::string lower(const std::string& s); /// This method converts the given string to upper case. std::string upper(const std::string& s); /// This method tokenizes string. void tokenize(const std::string& str, std::vector& tokens, const std::string& delimiters = " ", const std::string& start_quotes = "", const std::string& end_quotes = ""); /// This method tokenizes string. void tokenize(const std::string& str, std::list& tokens, const std::string& delimiters = " ", const std::string& start_quotes = "", const std::string& end_quotes = ""); /// This method extracts first token in string str starting at pos. std::string::size_type get_token(std::string& token, const std::string& str, std::string::size_type pos, const std::string& delimiters = " ", const std::string& start_quotes = "", const std::string& end_quotes = ""); /// This method removes given separators from the beginning and the end of the string. std::string trim(const std::string& str, const char *sep = NULL); /// This method removes blank lines from the passed text string. Lines with only space on them are considered blank. std::string strip(const std::string& str); /// Join all the elements in strlist using delimiter /** * \since Added in 4.1.0. **/ std::string join(const std::list& strlist, const std::string& delimiter); /// Join all the elements in strlist using delimiter /** * \since Added in 4.1.1. **/ std::string join(const std::vector& strlist, const std::string& delimiter); /// This method %-encodes characters in URI str. /** Characters which are not unreserved according to RFC 3986 are encoded. If encode_slash is true forward slashes will also be encoded. It is useful to set encode_slash to false when encoding full paths. */ std::string uri_encode(const std::string& str, bool encode_slash); /// This method unencodes the %-encoded URI str. std::string uri_unencode(const std::string& str); ///Convert dn to rdn: /O=Grid/OU=Knowarc/CN=abc ---> CN=abc,OU=Knowarc,O=Grid. std::string convert_to_rdn(const std::string& dn); /// Type of escaping or encoding to use. typedef enum { escape_char, ///< place the escape character before the character being escaped escape_octal, ///< octal encoding of the character escape_hex, ///< hex encoding of the character (lower case) escape_hex_upper ///< hex encoding of the character (upper case) } escape_type; /// Escape or encode the given chars in str using the escape character esc. /** If excl is true then escape all characters not in chars. */ std::string escape_chars(const std::string& str, const std::string& chars, char esc, bool excl, escape_type type = escape_char); /// Unescape or unencode characters in str escaped with esc. std::string unescape_chars(const std::string& str, char esc, escape_type type = escape_char); /** @} */ } // namespace Arc #endif // ARCLIB_STRINGCONV nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/xmlsec0000644000000000000000000000013213214316023020665 xustar000000000000000030 mtime=1513200659.542741893 30 atime=1513200668.720854145 30 ctime=1513200659.542741893 nordugrid-arc-5.4.2/src/hed/libs/xmlsec/0000755000175000002070000000000013214316023021010 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/xmlsec/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602023005 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200599.082002435 30 ctime=1513200659.537741832 nordugrid-arc-5.4.2/src/hed/libs/xmlsec/Makefile.am0000644000175000002070000000206512231165602023052 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarcxmlsec.la libarcxmlsec_ladir = $(pkgincludedir)/xmlsec libarcxmlsec_la_HEADERS = XmlSecUtils.h XMLSecNode.h saml_util.h libarcxmlsec_la_SOURCES = XmlSecUtils.cpp XMLSecNode.cpp saml_util.cpp libarcxmlsec_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) \ $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) libarcxmlsec_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) \ $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) $(ZLIB_LIBS) libarcxmlsec_la_LDFLAGS = -version-info 3:0:0 noinst_PROGRAMS = test_xmlsecnode test_xmlsecnode_SOURCES = test_xmlsecnode.cpp test_xmlsecnode_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) test_xmlsecnode_LDADD = \ libarcxmlsec.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(XMLSEC_LIBS) $(EXTRA_LIBS) nordugrid-arc-5.4.2/src/hed/libs/xmlsec/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727023021 xustar000000000000000030 mtime=1513200599.137003107 30 atime=1513200648.443606147 30 ctime=1513200659.538741844 nordugrid-arc-5.4.2/src/hed/libs/xmlsec/Makefile.in0000644000175000002070000007750613214315727023106 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test_xmlsecnode$(EXEEXT) subdir = src/hed/libs/xmlsec DIST_COMMON = $(libarcxmlsec_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarcxmlsec_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcxmlsec_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libarcxmlsec_la_OBJECTS = libarcxmlsec_la-XmlSecUtils.lo \ libarcxmlsec_la-XMLSecNode.lo libarcxmlsec_la-saml_util.lo libarcxmlsec_la_OBJECTS = $(am_libarcxmlsec_la_OBJECTS) libarcxmlsec_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcxmlsec_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcxmlsec_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am_test_xmlsecnode_OBJECTS = \ test_xmlsecnode-test_xmlsecnode.$(OBJEXT) test_xmlsecnode_OBJECTS = $(am_test_xmlsecnode_OBJECTS) test_xmlsecnode_DEPENDENCIES = libarcxmlsec.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) test_xmlsecnode_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_xmlsecnode_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcxmlsec_la_SOURCES) $(test_xmlsecnode_SOURCES) DIST_SOURCES = $(libarcxmlsec_la_SOURCES) $(test_xmlsecnode_SOURCES) HEADERS = $(libarcxmlsec_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarcxmlsec.la libarcxmlsec_ladir = $(pkgincludedir)/xmlsec libarcxmlsec_la_HEADERS = XmlSecUtils.h XMLSecNode.h saml_util.h libarcxmlsec_la_SOURCES = XmlSecUtils.cpp XMLSecNode.cpp saml_util.cpp libarcxmlsec_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) \ $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) libarcxmlsec_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) \ $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) $(ZLIB_LIBS) libarcxmlsec_la_LDFLAGS = -version-info 3:0:0 test_xmlsecnode_SOURCES = test_xmlsecnode.cpp test_xmlsecnode_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) test_xmlsecnode_LDADD = \ libarcxmlsec.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(XMLSEC_LIBS) $(EXTRA_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/xmlsec/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/xmlsec/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcxmlsec.la: $(libarcxmlsec_la_OBJECTS) $(libarcxmlsec_la_DEPENDENCIES) $(libarcxmlsec_la_LINK) -rpath $(libdir) $(libarcxmlsec_la_OBJECTS) $(libarcxmlsec_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test_xmlsecnode$(EXEEXT): $(test_xmlsecnode_OBJECTS) $(test_xmlsecnode_DEPENDENCIES) @rm -f test_xmlsecnode$(EXEEXT) $(test_xmlsecnode_LINK) $(test_xmlsecnode_OBJECTS) $(test_xmlsecnode_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcxmlsec_la-XMLSecNode.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcxmlsec_la-XmlSecUtils.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcxmlsec_la-saml_util.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_xmlsecnode-test_xmlsecnode.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcxmlsec_la-XmlSecUtils.lo: XmlSecUtils.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcxmlsec_la_CXXFLAGS) $(CXXFLAGS) -MT libarcxmlsec_la-XmlSecUtils.lo -MD -MP -MF $(DEPDIR)/libarcxmlsec_la-XmlSecUtils.Tpo -c -o libarcxmlsec_la-XmlSecUtils.lo `test -f 'XmlSecUtils.cpp' || echo '$(srcdir)/'`XmlSecUtils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcxmlsec_la-XmlSecUtils.Tpo $(DEPDIR)/libarcxmlsec_la-XmlSecUtils.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XmlSecUtils.cpp' object='libarcxmlsec_la-XmlSecUtils.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcxmlsec_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcxmlsec_la-XmlSecUtils.lo `test -f 'XmlSecUtils.cpp' || echo '$(srcdir)/'`XmlSecUtils.cpp libarcxmlsec_la-XMLSecNode.lo: XMLSecNode.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcxmlsec_la_CXXFLAGS) $(CXXFLAGS) -MT libarcxmlsec_la-XMLSecNode.lo -MD -MP -MF $(DEPDIR)/libarcxmlsec_la-XMLSecNode.Tpo -c -o libarcxmlsec_la-XMLSecNode.lo `test -f 'XMLSecNode.cpp' || echo '$(srcdir)/'`XMLSecNode.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcxmlsec_la-XMLSecNode.Tpo $(DEPDIR)/libarcxmlsec_la-XMLSecNode.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XMLSecNode.cpp' object='libarcxmlsec_la-XMLSecNode.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcxmlsec_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcxmlsec_la-XMLSecNode.lo `test -f 'XMLSecNode.cpp' || echo '$(srcdir)/'`XMLSecNode.cpp libarcxmlsec_la-saml_util.lo: saml_util.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcxmlsec_la_CXXFLAGS) $(CXXFLAGS) -MT libarcxmlsec_la-saml_util.lo -MD -MP -MF $(DEPDIR)/libarcxmlsec_la-saml_util.Tpo -c -o libarcxmlsec_la-saml_util.lo `test -f 'saml_util.cpp' || echo '$(srcdir)/'`saml_util.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcxmlsec_la-saml_util.Tpo $(DEPDIR)/libarcxmlsec_la-saml_util.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='saml_util.cpp' object='libarcxmlsec_la-saml_util.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcxmlsec_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcxmlsec_la-saml_util.lo `test -f 'saml_util.cpp' || echo '$(srcdir)/'`saml_util.cpp test_xmlsecnode-test_xmlsecnode.o: test_xmlsecnode.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_xmlsecnode_CXXFLAGS) $(CXXFLAGS) -MT test_xmlsecnode-test_xmlsecnode.o -MD -MP -MF $(DEPDIR)/test_xmlsecnode-test_xmlsecnode.Tpo -c -o test_xmlsecnode-test_xmlsecnode.o `test -f 'test_xmlsecnode.cpp' || echo '$(srcdir)/'`test_xmlsecnode.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_xmlsecnode-test_xmlsecnode.Tpo $(DEPDIR)/test_xmlsecnode-test_xmlsecnode.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_xmlsecnode.cpp' object='test_xmlsecnode-test_xmlsecnode.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_xmlsecnode_CXXFLAGS) $(CXXFLAGS) -c -o test_xmlsecnode-test_xmlsecnode.o `test -f 'test_xmlsecnode.cpp' || echo '$(srcdir)/'`test_xmlsecnode.cpp test_xmlsecnode-test_xmlsecnode.obj: test_xmlsecnode.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_xmlsecnode_CXXFLAGS) $(CXXFLAGS) -MT test_xmlsecnode-test_xmlsecnode.obj -MD -MP -MF $(DEPDIR)/test_xmlsecnode-test_xmlsecnode.Tpo -c -o test_xmlsecnode-test_xmlsecnode.obj `if test -f 'test_xmlsecnode.cpp'; then $(CYGPATH_W) 'test_xmlsecnode.cpp'; else $(CYGPATH_W) '$(srcdir)/test_xmlsecnode.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_xmlsecnode-test_xmlsecnode.Tpo $(DEPDIR)/test_xmlsecnode-test_xmlsecnode.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_xmlsecnode.cpp' object='test_xmlsecnode-test_xmlsecnode.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_xmlsecnode_CXXFLAGS) $(CXXFLAGS) -c -o test_xmlsecnode-test_xmlsecnode.obj `if test -f 'test_xmlsecnode.cpp'; then $(CYGPATH_W) 'test_xmlsecnode.cpp'; else $(CYGPATH_W) '$(srcdir)/test_xmlsecnode.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcxmlsec_laHEADERS: $(libarcxmlsec_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcxmlsec_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcxmlsec_ladir)" @list='$(libarcxmlsec_la_HEADERS)'; test -n "$(libarcxmlsec_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcxmlsec_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcxmlsec_ladir)" || exit $$?; \ done uninstall-libarcxmlsec_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcxmlsec_la_HEADERS)'; test -n "$(libarcxmlsec_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcxmlsec_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcxmlsec_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarcxmlsec_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libarcxmlsec_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarcxmlsec_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool clean-noinstPROGRAMS ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarcxmlsec_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-libLTLIBRARIES \ uninstall-libarcxmlsec_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/xmlsec/PaxHeaders.7502/XmlSecUtils.cpp0000644000000000000000000000012411620162637023674 xustar000000000000000027 mtime=1312875935.851842 27 atime=1513200574.978707 30 ctime=1513200659.539741857 nordugrid-arc-5.4.2/src/hed/libs/xmlsec/XmlSecUtils.cpp0000644000175000002070000003371111620162637023746 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include // Workaround for include bugs in xmlsec #include #include #include #include #include #include #include #include #include #include #include #ifdef CHARSET_EBCDIC #include #endif #include #include "XmlSecUtils.h" namespace Arc { int passphrase_callback(char* buf, int size, int /* rwflag */, void *) { int len; char prompt[128]; snprintf(prompt, sizeof(prompt), "Enter passphrase for the key file: \n"); int r = EVP_read_pw_string(buf, size, prompt, 0); if(r != 0) { std::cerr<<"Failed to read passphrase from stdin"< #endif #include // Workaround for include bugs in xmlsec #include #include #include #include #include //#include #include #include #include #include #ifdef CHARSET_EBCDIC #include #endif #include "XmlSecUtils.h" #include "XMLSecNode.h" namespace Arc { #define SAML_NAMESPACE "urn:oasis:names:tc:SAML:1.0:assertion" #define SAML2_NAMESPACE "urn:oasis:names:tc:SAML:2.0:assertion" #define SAMLP_NAMESPACE "urn:oasis:names:tc:SAML:2.0:protocol" #define XENC_NAMESPACE "http://www.w3.org/2001/04/xmlenc#" #define DSIG_NAMESPACE "http://www.w3.org/2000/09/xmldsig#" XMLSecNode::XMLSecNode(XMLNode& node):XMLNode(node) { if(!node_) return; if(node_->type != XML_ELEMENT_NODE) { node_=NULL; return; }; } XMLSecNode::~XMLSecNode(void) { } void XMLSecNode::AddSignatureTemplate(const std::string& id_name, const SignatureMethod sign_method, const std::string& incl_namespaces) { xmlNodePtr signature = NULL; xmlNodePtr reference = NULL; if(sign_method == RSA_SHA1) signature = xmlSecTmplSignatureCreate(NULL, xmlSecTransformExclC14NId, xmlSecTransformRsaSha1Id, NULL); else signature = xmlSecTmplSignatureCreate(NULL, xmlSecTransformExclC14NId, xmlSecTransformDsaSha1Id, NULL); //Add signature to the node xmlNodePtr nd = this->node_; xmlAddChild(nd, signature); //Add reference for signature xmlDocPtr docPtr = nd->doc; xmlChar* id = NULL; id = xmlGetProp(nd, (xmlChar *)(id_name.c_str())); if(!id) { std::cerr<<"There is not "<node_; xmlSecDSigCtx *dsigCtx = xmlSecDSigCtxCreate(NULL); //load private key, assuming there is no need for passphrase dsigCtx->signKey = xmlSecCryptoAppKeyLoad(privkey_file.c_str(), xmlSecKeyDataFormatPem, NULL, NULL, NULL); if(dsigCtx->signKey == NULL) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Can not load key"<signKey, cert_file.c_str(), xmlSecKeyDataFormatPem) < 0) { xmlSecDSigCtxDestroy(dsigCtx); std::cerr<<"Can not load certificate"<node_; xmlDocPtr docPtr = node->doc; xmlChar* id = xmlGetProp(node, (xmlChar *)(id_name.c_str())); xmlAttrPtr id_attr = xmlHasProp(node, (xmlChar *)(id_name.c_str())); xmlAddID(NULL, docPtr, (xmlChar *)id, id_attr); xmlFree(id); XMLNode signature = (*this)["Signature"]; if(!signature) { std::cerr<<"No signature node under this node"<node_; XMLNode keyinfo = signature["KeyInfo"]; XMLNode x509data = signature["KeyInfo"]["X509Data"]; xmlSecKeysMngr* keys_manager = NULL; xmlSecDSigCtx *dsigCtx; if(verify_trusted) { //Verify the signature under the signature node (this node) if((bool)x509data && (!ca_file.empty() || !ca_path.empty())) { keys_manager = load_trusted_certs(&keys_manager, ca_file.c_str(), ca_path.c_str()); if(keys_manager == NULL) { std::cerr<<"Can not load trusted certificates"< exists, or no trusted certificates configured"<signKey = pubkey; } else { //Use xmlSecKeyInfoNodeRead to extract public key dsigCtx->flags |= XMLSEC_DSIG_FLAGS_STORE_SIGNEDINFO_REFERENCES; dsigCtx->signKey = xmlSecKeyCreate(); if(!keyinfo) { std::cerr<<"No KeyInfo node exists"<node_; xmlSecKeyInfoCtxPtr keyInfo; keyInfo = xmlSecKeyInfoCtxCreate(NULL); if(!keyInfo) { xmlSecDSigCtxDestroy(dsigCtx); return false; } xmlSecKeyInfoNodeRead(keyinfoptr, dsigCtx->signKey,keyInfo); xmlSecKeyInfoCtxDestroy(keyInfo); } } #if 0 if(//(xmlSecDSigCtxEnableReferenceTransform(dsigCtx, xmlSecTransformInclC14NId) < 0) || (xmlSecDSigCtxEnableReferenceTransform(dsigCtx, xmlSecTransformExclC14NId) < 0) || (xmlSecDSigCtxEnableReferenceTransform(dsigCtx, xmlSecTransformSha1Id) < 0) || (xmlSecDSigCtxEnableReferenceTransform(dsigCtx, xmlSecTransformEnvelopedId) < 0) ) #endif if (xmlSecDSigCtxVerify(dsigCtx, signatureptr) < 0) { xmlSecDSigCtxDestroy(dsigCtx); if (keys_manager) xmlSecKeysMngrDestroy(keys_manager); std::cerr<<"Signature verification failed"<status == xmlSecDSigStatusSucceeded) { std::cout<<"Succeed to verify the signature under this node"<node_; xmlDocPtr doc_nd = data_nd->doc; xmlNodePtr encDataNode = NULL; xmlNodePtr keyInfoNode = NULL; xmlNodePtr encKeyNode = NULL; xmlNodePtr keyInfoNode2 = NULL; xmlSecEncCtxPtr encCtx = NULL; xmlSecTransformId encryption_sym_key_type; switch (encrpt_type) { case AES_256: encryption_sym_key_type = xmlSecTransformAes256CbcId; break; case TRIPLEDES: encryption_sym_key_type = xmlSecTransformDes3CbcId; break; case AES_128: default: encryption_sym_key_type = xmlSecTransformAes128CbcId; break; } //Create encryption template for a specific symetric key type encDataNode = xmlSecTmplEncDataCreate(doc_nd , encryption_sym_key_type, NULL, xmlSecTypeEncElement, NULL, NULL); if(encDataNode == NULL) { std::cerr<<"Failed to create encryption template"< node if(xmlSecTmplEncDataEnsureCipherValue(encDataNode) == NULL){ std::cerr<<"Failed to add CipherValue node"< keyInfoNode = xmlSecTmplEncDataEnsureKeyInfo(encDataNode, NULL); if(keyInfoNode == NULL) { std::cerr<<"Failed to add key info"< to store the encrypted session key encKeyNode = xmlSecTmplKeyInfoAddEncryptedKey(keyInfoNode, xmlSecTransformRsaPkcs1Id, NULL, NULL, NULL); if(encKeyNode == NULL) { std::cerr<<"Failed to add key info"< node if(xmlSecTmplEncDataEnsureCipherValue(encKeyNode) == NULL) { std::cerr<<"Error: failed to add CipherValue node"< and nodes to keyInfoNode2 = xmlSecTmplEncDataEnsureKeyInfo(encKeyNode, NULL); if(keyInfoNode2 == NULL){ std::cerr<<"Failed to add key info"<encKey = xmlSecKeyGenerate(xmlSecKeyDataAesId, 256, xmlSecKeyDataTypeSession); break; case TRIPLEDES: encCtx->encKey = xmlSecKeyGenerate(xmlSecKeyDataDesId, 192, xmlSecKeyDataTypeSession); break; case AES_128: default: encCtx->encKey = xmlSecKeyGenerate(xmlSecKeyDataAesId, 128, xmlSecKeyDataTypeSession); break; } if(encCtx->encKey == NULL) { std::cerr<<"Failed to generate session des key"<node_ = (data_nd=encDataNode); encDataNode = NULL; if(encCtx != NULL) xmlSecEncCtxDestroy(encCtx); if(keys_mngr != NULL)xmlSecKeysMngrDestroy(keys_mngr); return true; } bool XMLSecNode::DecryptNode(const std::string& privkey_file, XMLNode& decrypted_node) { XMLNode encrypted_data = (*this)["xenc:EncryptedData"]; XMLNode enc_method1 = encrypted_data["xenc:EncryptionMethod"]; std::string algorithm = (std::string)(enc_method1.Attribute("Algorithm")); if(algorithm.empty()) { std::cerr<<"No EncryptionMethod"<node_; XMLNode encrypted_key = encrypted_data["KeyInfo"]["EncryptedKey"]; //Copy the encrypted key, because it will be replaced by decrypted node after //decryption, and then it will affect the decryption if encrypted data xmlNodePtr todecrypt_key_nd = xmlCopyNode(((XMLSecNode*)(&encrypted_key))->node_, 1); xmlDocPtr doc_key_nd = NULL; doc_key_nd = xmlNewDoc((xmlChar*)"1.0"); xmlDocSetRootElement(doc_key_nd, todecrypt_key_nd); xmlSecKeyPtr private_key = get_key_from_keyfile(privkey_file.c_str()); xmlSecEncCtxPtr encCtx = NULL; xmlSecKeyPtr symmetric_key = NULL; xmlSecBufferPtr key_buffer; encCtx = xmlSecEncCtxCreate(NULL); if (encCtx == NULL) { std::cerr<<"Failed to create encryption context"<encKey = private_key; encCtx->mode = xmlEncCtxModeEncryptedKey; key_buffer = xmlSecEncCtxDecryptToBuffer(encCtx, todecrypt_key_nd); if (key_buffer == NULL) { std::cerr<<"Failed to decrypt EncryptedKey"<encKey = symmetric_key; encCtx->mode = xmlEncCtxModeEncryptedData; xmlSecBufferPtr decrypted_buf; decrypted_buf = xmlSecEncCtxDecryptToBuffer(encCtx, todecrypt_data_nd); if(decrypted_buf == NULL) { std::cerr<<"Failed to decrypt EncryptedData"<data); //std::cout<<"Decrypted node: "< #endif #include #include "XMLSecNode.h" #include "XmlSecUtils.h" #include #include #include #include #include int main(void) { std::string xml_str = ""; std::string str; std::ifstream f("testxmlsec.xml"); // load content of file while (f >> str) { xml_str.append(str); xml_str.append(" "); } f.close(); Arc::XMLNode node(xml_str); node.GetXML(str); std::cout<<"Original node: "< #endif #ifdef WIN32 #define NOGDI #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CHARSET_EBCDIC #include #endif #include "XmlSecUtils.h" #include "saml_util.h" namespace Arc { std::string SignQuery(std::string query, SignatureMethod sign_method, std::string& privkey_file) { std::string ret; BIO* key_bio = BIO_new_file(privkey_file.c_str(), "rb"); if (key_bio == NULL) { std::cout<<"Failed to open private key file: "<value->id != xmlSecOpenSSLKeyDataRsaId) { xmlFree(usig_alg); return false; } rsa = xmlSecOpenSSLKeyDataRsaGetRsa(sender_public_key->value); if (rsa == NULL) { xmlFree(usig_alg); return false; } key_size = RSA_size(rsa); } else if (strcmp(usig_alg, (char*)xmlSecHrefDsaSha1) == 0) { if (sender_public_key->value->id != xmlSecOpenSSLKeyDataDsaId) { xmlFree(usig_alg); return false; } dsa = xmlSecOpenSSLKeyDataDsaGetDsa(sender_public_key->value); if (dsa == NULL) { xmlFree(usig_alg); return false; } key_size = DSA_size(dsa); } else { xmlFree(usig_alg); return false; } f = str1.find("&"); std::string sig_str = str1.substr(0, f-1); char *b64_signature = NULL; xmlSecByte *signature = NULL; /* get signature (unescape + base64 decode) */ signature = (unsigned char*)(xmlMalloc(key_size+1)); b64_signature = (char*)xmlURIUnescapeString(sig_str.c_str(), 0, NULL); xmlSecBase64Decode((xmlChar*)b64_signature, signature, key_size+1); /* compute signature digest */ xmlChar* md; md = (xmlChar*)(xmlMalloc(20)); char* digest = (char*)SHA1((unsigned char*)(str0.c_str()), str0.size(), md); if (digest == NULL) { xmlFree(b64_signature); xmlFree(signature); xmlFree(usig_alg); return false; } int status = 0; if (rsa) { status = RSA_verify(NID_sha1, (unsigned char*)digest, 20, signature, key_size, rsa); } else if (dsa) { status = DSA_verify(NID_sha1, (unsigned char*)digest, 20, signature, key_size, dsa); } if (status == 0) { std::cout<<"Signature of the query is not valid"<= 0) b64 = true; else { free(str); str = (char*)(msg.c_str()); } } if (strchr(str, '<')) { XMLNode nd(str); if(!nd) { std::cerr<<"Message format unknown"<& properties); ~WSRPGetResourcePropertyResponse(void); int Size(void); void Property(const XMLNode& prop,int pos = -1); XMLNode Property(int pos); XMLNode Properties(void); }; // ============================================================ class WSRPGetMultipleResourcePropertiesRequest: public WSRP { public: WSRPGetMultipleResourcePropertiesRequest(SOAPEnvelope& soap); WSRPGetMultipleResourcePropertiesRequest(void); WSRPGetMultipleResourcePropertiesRequest(const std::vector& names); ~WSRPGetMultipleResourcePropertiesRequest(void); std::vector Names(void); void Names(const std::vector& names); }; class WSRPGetMultipleResourcePropertiesResponse: public WSRP { public: WSRPGetMultipleResourcePropertiesResponse(SOAPEnvelope& soap); WSRPGetMultipleResourcePropertiesResponse(void); //WSRPGetMultipleResourcePropertiesResponse(const std::list& properties); ~WSRPGetMultipleResourcePropertiesResponse(void); int Size(void); void Property(const XMLNode& prop,int pos = -1); XMLNode Property(int pos); XMLNode Properties(void); }; // ============================================================ class WSRPPutResourcePropertyDocumentRequest: public WSRP { public: WSRPPutResourcePropertyDocumentRequest(SOAPEnvelope& soap); WSRPPutResourcePropertyDocumentRequest(const XMLNode& prop_doc = XMLNode()); ~WSRPPutResourcePropertyDocumentRequest(void); void Document(const XMLNode& prop_doc); XMLNode Document(void); }; class WSRPPutResourcePropertyDocumentResponse: public WSRP { public: WSRPPutResourcePropertyDocumentResponse(SOAPEnvelope& soap); WSRPPutResourcePropertyDocumentResponse(const XMLNode& prop_doc = XMLNode()); ~WSRPPutResourcePropertyDocumentResponse(void); void Document(const XMLNode& prop_doc); XMLNode Document(void); }; // ============================================================ class WSRPModifyResourceProperties { protected: XMLNode element_; public: // Create new node in XML tree or acquire element from XML tree WSRPModifyResourceProperties(XMLNode& node,bool create,const std::string& name = ""); WSRPModifyResourceProperties(void) { }; virtual ~WSRPModifyResourceProperties(void); //operator XMLNode(void) { return element_; }; operator bool(void) { return (bool)element_; }; bool operator!(void) { return !element_; }; }; class WSRPInsertResourceProperties: public WSRPModifyResourceProperties { public: WSRPInsertResourceProperties(XMLNode node,bool create):WSRPModifyResourceProperties(node,create,"wsrf-rp:Insert") { }; WSRPInsertResourceProperties(void) { }; virtual ~WSRPInsertResourceProperties(void); XMLNode Properties(void) { return element_; }; }; class WSRPUpdateResourceProperties: public WSRPModifyResourceProperties { public: WSRPUpdateResourceProperties(XMLNode node,bool create):WSRPModifyResourceProperties(node,create,"wsrf-rp:Update") { }; WSRPUpdateResourceProperties(void) { }; virtual ~WSRPUpdateResourceProperties(void); XMLNode Properties(void) { return element_; }; }; class WSRPDeleteResourceProperties: public WSRPModifyResourceProperties { public: WSRPDeleteResourceProperties(XMLNode node,bool create):WSRPModifyResourceProperties(node,create,"wsrf-rp:Delete") { }; WSRPDeleteResourceProperties(void) { }; virtual ~WSRPDeleteResourceProperties(void); std::string Property(void); void Property(const std::string& name); }; // ============================================================ class WSRPSetResourcePropertiesRequest: public WSRP { public: WSRPSetResourcePropertiesRequest(SOAPEnvelope& soap); WSRPSetResourcePropertiesRequest(void); ~WSRPSetResourcePropertiesRequest(void); XMLNode Properties(void); }; class WSRPSetResourcePropertiesResponse: public WSRP { public: WSRPSetResourcePropertiesResponse(SOAPEnvelope& soap); WSRPSetResourcePropertiesResponse(void); ~WSRPSetResourcePropertiesResponse(void); }; // ============================================================ class WSRPInsertResourcePropertiesRequest: public WSRP { public: WSRPInsertResourcePropertiesRequest(SOAPEnvelope& soap); WSRPInsertResourcePropertiesRequest(void); ~WSRPInsertResourcePropertiesRequest(void); WSRPInsertResourceProperties Property(void); }; class WSRPInsertResourcePropertiesResponse: public WSRP { public: WSRPInsertResourcePropertiesResponse(SOAPEnvelope& soap); WSRPInsertResourcePropertiesResponse(void); ~WSRPInsertResourcePropertiesResponse(void); }; // ============================================================ class WSRPUpdateResourcePropertiesRequest: public WSRP { public: WSRPUpdateResourcePropertiesRequest(SOAPEnvelope& soap); WSRPUpdateResourcePropertiesRequest(void); ~WSRPUpdateResourcePropertiesRequest(void); WSRPUpdateResourceProperties Property(void); }; class WSRPUpdateResourcePropertiesResponse: public WSRP { public: WSRPUpdateResourcePropertiesResponse(SOAPEnvelope& soap); WSRPUpdateResourcePropertiesResponse(void); ~WSRPUpdateResourcePropertiesResponse(void); }; // ============================================================ class WSRPDeleteResourcePropertiesRequest: public WSRP { public: WSRPDeleteResourcePropertiesRequest(SOAPEnvelope& soap); WSRPDeleteResourcePropertiesRequest(const std::string& name); WSRPDeleteResourcePropertiesRequest(void); ~WSRPDeleteResourcePropertiesRequest(void); std::string Name(void); void Name(const std::string& name); }; class WSRPDeleteResourcePropertiesResponse: public WSRP { public: WSRPDeleteResourcePropertiesResponse(SOAPEnvelope& soap); WSRPDeleteResourcePropertiesResponse(void); ~WSRPDeleteResourcePropertiesResponse(void); }; // ============================================================ class WSRPQueryResourcePropertiesRequest: public WSRP { public: WSRPQueryResourcePropertiesRequest(SOAPEnvelope& soap); WSRPQueryResourcePropertiesRequest(const std::string& dialect); WSRPQueryResourcePropertiesRequest(void); ~WSRPQueryResourcePropertiesRequest(void); std::string Dialect(void); void Dialect(const std::string& dialect); XMLNode Query(void); }; class WSRPQueryResourcePropertiesResponse: public WSRP { public: WSRPQueryResourcePropertiesResponse(SOAPEnvelope& soap); WSRPQueryResourcePropertiesResponse(void); ~WSRPQueryResourcePropertiesResponse(void); XMLNode Properties(void); }; // UnknownQueryExpressionDialectFaultType // InvalidQueryExpressionFault // QueryEvaluationErrorFault // // ============================================================ WSRF& CreateWSRP(SOAPEnvelope& soap); } // namespace Arc #endif /* _ARC_WSRP_H__ */ nordugrid-arc-5.4.2/src/hed/libs/wsrf/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022476 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200599.018001652 30 ctime=1513200659.637743055 nordugrid-arc-5.4.2/src/hed/libs/wsrf/Makefile.am0000644000175000002070000000151612052416515022543 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libwsrf.la #noinst_PROGRAMS = test libwsrf_ladir = $(pkgincludedir)/wsrf libwsrf_la_HEADERS = WSRF.h WSRFBaseFault.h WSResourceProperties.h libwsrf_la_SOURCES = WSRF.cpp WSRFBaseFault.cpp WSResourceProperties.cpp libwsrf_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libwsrf_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws-addressing/libwsaddressing.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) #test_SOURCES = WSRF.cpp WSRFBaseFault.cpp WSResourceProperties.cpp #test_CXXFLAGS = -I$(top_srcdir)/include \ # $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_LDADD = \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/libs/common/libarccommon.la \ # $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/libs/wsrf/PaxHeaders.7502/WSRFBaseFault.cpp0000644000000000000000000000012310674736220023520 xustar000000000000000027 mtime=1190378640.536283 27 atime=1513200574.782705 29 ctime=1513200659.63974308 nordugrid-arc-5.4.2/src/hed/libs/wsrf/WSRFBaseFault.cpp0000644000175000002070000001016510674736220023571 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "WSRFBaseFault.h" namespace Arc { const char* WSRFBaseFaultAction = "http://docs.oasis-open.org/wsrf/fault"; void WSRFBaseFault::set_namespaces(void) { //XMLNode::NS ns; //ns["wsa"]="http://www.w3.org/2005/08/addressing"; //ns["wsrf-bf"]="http://docs.oasis-open.org/wsrf/bf-2"; //ns["wsrf-r"]="http://docs.oasis-open.org/wsrf/r-2"; //ns["wsrf-rw"]="http://docs.oasis-open.org/wsrf/rw-2"; //soap_.Namespaces(ns); } WSRFBaseFault::WSRFBaseFault(SOAPEnvelope& soap):WSRF(soap,WSRFBaseFaultAction) { if(!valid_) return; // Check if that is fault SOAPFault* fault = SOAP().Fault(); if(!fault) { valid_=false; return; }; // It must have timestamp XMLNode wsrf_fault = fault->Detail()[0]; if(!(wsrf_fault["wsrf-bf:Timestamp"])) { valid_=false; return; }; } WSRFBaseFault::WSRFBaseFault(const std::string& type):WSRF(true,WSRFBaseFaultAction) { if(!valid_) return; SOAPFault* fault = SOAP().Fault(); if(!fault) return; fault->Detail(true).NewChild(type); // Timestamp(Time()); } WSRFBaseFault::~WSRFBaseFault(void) { } std::string WSRFBaseFault::Type(void) { if(!valid_) return ""; SOAPFault* fault = SOAP().Fault(); if(!fault) return ""; return fault->Detail()[0].Name(); } /* Time WSRFBaseFault::Timestamp(void) { if(!valid_) return 0; SOAPFault* fault = SOAP().Fault(); if(!fault) return 0; std::string time_s = fault->Detail()[0]["wsrf-bf:Timestamp"]; return Time(time_s); } void WSRFBaseFault::Timestamp(Time t) { if(!valid_) return; SOAPFault* fault = SOAP().Fault(); if(!fault) return; XMLNode timestamp = fault->Detail()[0]["wsrf-bf:Timestamp"]; if(!timestamp) timestamp = fault->Detail()[0].NewChild("wsrf-bf:Timestamp"); timestamp = t.str(UTCTime); } */ WSAEndpointReference WSRFBaseFault::Originator(void) { if(!valid_) return WSAEndpointReference(); SOAPFault* fault = SOAP().Fault(); if(!fault) return WSAEndpointReference(); return WSAEndpointReference(fault->Detail()[0]["wsrf-bf:Originator"]); } void WSRFBaseFault::ErrorCode(const std::string&,const XMLNode&) { } XMLNode WSRFBaseFault::ErrorCode(void) { if(!valid_) return XMLNode(); SOAPFault* fault = SOAP().Fault(); if(!fault) return XMLNode(); return fault->Detail()[0]["wsrf-bf:ErrorCode"]; } std::string WSRFBaseFault::ErrorCodeDialect(void) { return ErrorCode().Attribute("wsrf-bf:dialect"); } void WSRFBaseFault::FaultCause(int pos,const XMLNode& cause) { if(!valid_) return; SOAPFault* fault = SOAP().Fault(); if(!fault) return; XMLNode fcause = fault->Detail()[0]["wsrf-bf:FaultCause"]; if(!fcause) fcause=fault->Detail()[0].NewChild("wsrf-bf:FaultCause"); fcause.NewChild(cause,pos); } XMLNode WSRFBaseFault::FaultCause(int pos) { if(!valid_) return XMLNode(); SOAPFault* fault = SOAP().Fault(); if(!fault) return XMLNode(); XMLNode fcause = fault->Detail()[0]["wsrf-bf:FaultCause"]; if(!fcause) return XMLNode(); return fcause.Child(pos); } void WSRFBaseFault::Description(int pos,const std::string& desc,const std::string& lang) { if(!valid_) return; SOAPFault* fault = SOAP().Fault(); if(!fault) return; XMLNode d = fault->Detail()[0].NewChild("wsrf-bf:Description",pos); d=desc; if(!lang.empty()) d.NewAttribute("wsrf-bf:lang")=lang; } std::string WSRFBaseFault::Description(int pos) { if(!valid_) return XMLNode(); SOAPFault* fault = SOAP().Fault(); if(!fault) return XMLNode(); return fault->Detail()[0]["wsrf-bf:Description"][pos]; } std::string WSRFBaseFault::DescriptionLang(int pos) { if(!valid_) return XMLNode(); SOAPFault* fault = SOAP().Fault(); if(!fault) return XMLNode(); return fault->Detail()[0]["wsrf-bf:Description"][pos].Attribute("wsrf-bf:lang"); } WSRF& CreateWSRFBaseFault(SOAPEnvelope& soap) { // Not the most efective way to extract type of message WSRFBaseFault& v = *(new WSRFBaseFault(soap)); std::string type = v.Type(); delete &v; if(v.Type() == "wsrf-r:ResourceUnknownFault") return *(new WSRFResourceUnknownFault(soap)); if(v.Type() == "wsrf-r:ResourceUnavailableFault") return *(new WSRFResourceUnavailableFault(soap)); return *(new WSRF()); } /* }; */ } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/wsrf/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727022507 xustar000000000000000030 mtime=1513200599.067002251 30 atime=1513200648.289604263 30 ctime=1513200659.637743055 nordugrid-arc-5.4.2/src/hed/libs/wsrf/Makefile.in0000644000175000002070000006562013214315727022566 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/wsrf DIST_COMMON = README $(libwsrf_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libwsrf_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws-addressing/libwsaddressing.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libwsrf_la_OBJECTS = libwsrf_la-WSRF.lo libwsrf_la-WSRFBaseFault.lo \ libwsrf_la-WSResourceProperties.lo libwsrf_la_OBJECTS = $(am_libwsrf_la_OBJECTS) libwsrf_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libwsrf_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libwsrf_la_SOURCES) DIST_SOURCES = $(libwsrf_la_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libwsrf_ladir)" HEADERS = $(libwsrf_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libwsrf.la #noinst_PROGRAMS = test libwsrf_ladir = $(pkgincludedir)/wsrf libwsrf_la_HEADERS = WSRF.h WSRFBaseFault.h WSResourceProperties.h libwsrf_la_SOURCES = WSRF.cpp WSRFBaseFault.cpp WSResourceProperties.cpp libwsrf_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libwsrf_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws-addressing/libwsaddressing.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/wsrf/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/wsrf/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libwsrf.la: $(libwsrf_la_OBJECTS) $(libwsrf_la_DEPENDENCIES) $(libwsrf_la_LINK) $(libwsrf_la_OBJECTS) $(libwsrf_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libwsrf_la-WSRF.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libwsrf_la-WSRFBaseFault.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libwsrf_la-WSResourceProperties.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libwsrf_la-WSRF.lo: WSRF.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libwsrf_la_CXXFLAGS) $(CXXFLAGS) -MT libwsrf_la-WSRF.lo -MD -MP -MF $(DEPDIR)/libwsrf_la-WSRF.Tpo -c -o libwsrf_la-WSRF.lo `test -f 'WSRF.cpp' || echo '$(srcdir)/'`WSRF.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libwsrf_la-WSRF.Tpo $(DEPDIR)/libwsrf_la-WSRF.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='WSRF.cpp' object='libwsrf_la-WSRF.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libwsrf_la_CXXFLAGS) $(CXXFLAGS) -c -o libwsrf_la-WSRF.lo `test -f 'WSRF.cpp' || echo '$(srcdir)/'`WSRF.cpp libwsrf_la-WSRFBaseFault.lo: WSRFBaseFault.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libwsrf_la_CXXFLAGS) $(CXXFLAGS) -MT libwsrf_la-WSRFBaseFault.lo -MD -MP -MF $(DEPDIR)/libwsrf_la-WSRFBaseFault.Tpo -c -o libwsrf_la-WSRFBaseFault.lo `test -f 'WSRFBaseFault.cpp' || echo '$(srcdir)/'`WSRFBaseFault.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libwsrf_la-WSRFBaseFault.Tpo $(DEPDIR)/libwsrf_la-WSRFBaseFault.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='WSRFBaseFault.cpp' object='libwsrf_la-WSRFBaseFault.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libwsrf_la_CXXFLAGS) $(CXXFLAGS) -c -o libwsrf_la-WSRFBaseFault.lo `test -f 'WSRFBaseFault.cpp' || echo '$(srcdir)/'`WSRFBaseFault.cpp libwsrf_la-WSResourceProperties.lo: WSResourceProperties.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libwsrf_la_CXXFLAGS) $(CXXFLAGS) -MT libwsrf_la-WSResourceProperties.lo -MD -MP -MF $(DEPDIR)/libwsrf_la-WSResourceProperties.Tpo -c -o libwsrf_la-WSResourceProperties.lo `test -f 'WSResourceProperties.cpp' || echo '$(srcdir)/'`WSResourceProperties.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libwsrf_la-WSResourceProperties.Tpo $(DEPDIR)/libwsrf_la-WSResourceProperties.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='WSResourceProperties.cpp' object='libwsrf_la-WSResourceProperties.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libwsrf_la_CXXFLAGS) $(CXXFLAGS) -c -o libwsrf_la-WSResourceProperties.lo `test -f 'WSResourceProperties.cpp' || echo '$(srcdir)/'`WSResourceProperties.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libwsrf_laHEADERS: $(libwsrf_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libwsrf_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libwsrf_ladir)" @list='$(libwsrf_la_HEADERS)'; test -n "$(libwsrf_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libwsrf_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libwsrf_ladir)" || exit $$?; \ done uninstall-libwsrf_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libwsrf_la_HEADERS)'; test -n "$(libwsrf_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libwsrf_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libwsrf_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libwsrf_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libwsrf_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libwsrf_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libwsrf_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-libwsrf_laHEADERS #test_SOURCES = WSRF.cpp WSRFBaseFault.cpp WSResourceProperties.cpp #test_CXXFLAGS = -I$(top_srcdir)/include \ # $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_LDADD = \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/libs/common/libarccommon.la \ # $(LIBXML2_LIBS) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/wsrf/PaxHeaders.7502/WSRF.cpp0000644000000000000000000000012410672011347021724 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200574.787705 30 ctime=1513200659.638743067 nordugrid-arc-5.4.2/src/hed/libs/wsrf/WSRF.cpp0000644000175000002070000000171410672011347021774 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "WSRF.h" namespace Arc { void WSRF::set_namespaces(void) { NS ns; ns["wsa"]="http://www.w3.org/2005/08/addressing"; ns["wsrf-bf"]="http://docs.oasis-open.org/wsrf/bf-2"; ns["wsrf-r"]="http://docs.oasis-open.org/wsrf/r-2"; ns["wsrf-rw"]="http://docs.oasis-open.org/wsrf/rw-2"; soap_.Namespaces(ns); } WSRF::WSRF(SOAPEnvelope& soap,const std::string& action): soap_(soap),allocated_(false),valid_(false) { if(!soap_) return; set_namespaces(); if(!action.empty()) if(WSAHeader(soap).Action() != action) return; valid_=true; } WSRF::WSRF(bool fault,const std::string& action): soap_(*(new SOAPEnvelope(NS(),fault))), allocated_(true),valid_(false) { set_namespaces(); if(!action.empty()) WSAHeader(soap_).Action(action); valid_=true; } WSRF::~WSRF(void) { if(allocated_) delete (&soap_); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/wsrf/PaxHeaders.7502/WSResourceProperties.cpp0000644000000000000000000000012410672011347025261 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200574.785705 30 ctime=1513200659.640743092 nordugrid-arc-5.4.2/src/hed/libs/wsrf/WSResourceProperties.cpp0000644000175000002070000006553410672011347025343 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "WSResourceProperties.h" namespace Arc { // ============= Actions ============== static const char* WSRPGetResourcePropertyDocumentRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/GetResourcePropertyDocument/GetResourcePropertyDocumentRequest"; static const char* WSRPGetResourcePropertyDocumentResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/GetResourcePropertyDocument/GetResourcePropertyDocumentResponse"; static const char* WSRPGetResourcePropertyRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/GetResourceProperty/GetResourcePropertyRequest"; static const char* WSRPGetResourcePropertyResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/GetResourceProperty/GetResourcePropertyResponse"; static const char* WSRPGetMultipleResourcePropertiesRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/GetMultipleResourceProperties/GetMultipleResourcePropertiesRequest"; static const char* WSRPGetMultipleResourcePropertiesResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/GetMultipleResourceProperties/GetMultipleResourcePropertiesResponse"; static const char* WSRPQueryResourcePropertiesRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/QueryResourceProperties/QueryResourcePropertiesRequest"; static const char* WSRPQueryResourcePropertiesResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/QueryResourceProperties/QueryResourcePropertiesResponse"; static const char* WSRPPutResourcePropertyDocumentRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/PutResourcePropertyDocument/PutResourcePropertyDocumentRequest"; static const char* WSRPPutResourcePropertyDocumentResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/PutResourcePropertyDocument/PutResourcePropertyDocumentResponse"; static const char* WSRPSetResourcePropertiesRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/SetResourceProperties/SetResourcePropertiesRequest"; static const char* WSRPSetResourcePropertiesResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/SetResourceProperties/SetResourcePropertiesResponse"; static const char* WSRPInsertResourcePropertiesRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/InsertResourceProperties/InsertResourcePropertiesRequest"; static const char* WSRPInsertResourcePropertiesResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/InsertResourceProperties/InsertResourcePropertiesResponse"; static const char* WSRPUpdateResourcePropertiesRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/UpdateResourceProperties/UpdateResourcePropertiesRequest"; static const char* WSRPUpdateResourcePropertiesResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/UpdateResourceProperties/UpdateResourcePropertiesResponse"; static const char* WSRPDeleteResourcePropertiesRequestAction = "http://docs.oasis-open.org/wsrf/rpw-2/DeleteResourceProperties/DeleteResourcePropertiesRequest"; static const char* WSRPDeleteResourcePropertiesResponseAction = "http://docs.oasis-open.org/wsrf/rpw-2/DeleteResourceProperties/DeleteResourcePropertiesResponse"; // ============= BaseClass ============== void WSRP::set_namespaces(void) { NS ns; ns["wsrf-bf"]="http://docs.oasis-open.org/wsrf/bf-2"; ns["wsrf-rp"]="http://docs.oasis-open.org/wsrf/rp-2"; ns["wsrf-rpw"]="http://docs.oasis-open.org/wsrf/rpw-2"; ns["wsrf-rw"]="http://docs.oasis-open.org/wsrf/rw-2"; soap_.Namespaces(ns); } WSRP::WSRP(bool fault,const std::string& action):WSRF(fault,action) { set_namespaces(); } WSRP::WSRP(SOAPEnvelope& soap,const std::string& action):WSRF(soap,action) { set_namespaces(); } // ============= ResourceProperties modifiers ============== WSRPModifyResourceProperties::WSRPModifyResourceProperties(XMLNode& node,bool create,const std::string& name) { if(create) { if(!name.empty()) element_=node.NewChild(name); } else { if(MatchXMLName(node,name)) element_=node; }; } WSRPModifyResourceProperties::~WSRPModifyResourceProperties(void) { } WSRPInsertResourceProperties::~WSRPInsertResourceProperties(void) { } WSRPUpdateResourceProperties::~WSRPUpdateResourceProperties(void) { } WSRPDeleteResourceProperties::~WSRPDeleteResourceProperties(void) { } std::string WSRPDeleteResourceProperties::Property(void) { return (std::string)(element_.Attribute("wsrf-rp:ResourceProperty")); } void WSRPDeleteResourceProperties::Property(const std::string& name) { XMLNode property = element_.Attribute("wsrf-rp:ResourceProperty"); if(!property) property=element_.NewAttribute("wsrf-rp:ResourceProperty"); property=name; } // ============= GetResourcePropertyDocument ============== WSRPGetResourcePropertyDocumentRequest::WSRPGetResourcePropertyDocumentRequest(SOAPEnvelope& soap):WSRP(soap,WSRPGetResourcePropertyDocumentRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:GetResourcePropertyDocument")) valid_=false; } WSRPGetResourcePropertyDocumentRequest::WSRPGetResourcePropertyDocumentRequest(void):WSRP(false,WSRPGetResourcePropertyDocumentRequestAction) { if(!soap_.NewChild("wsrf-rp:GetResourcePropertyDocument")) valid_=false; } WSRPGetResourcePropertyDocumentRequest::~WSRPGetResourcePropertyDocumentRequest(void) { } WSRPGetResourcePropertyDocumentResponse::WSRPGetResourcePropertyDocumentResponse(SOAPEnvelope& soap):WSRP(soap,WSRPGetResourcePropertyDocumentResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:GetResourcePropertyDocumentResponse")) valid_=false; } WSRPGetResourcePropertyDocumentResponse::WSRPGetResourcePropertyDocumentResponse(const XMLNode& prop_doc):WSRP(false,WSRPGetResourcePropertyDocumentResponseAction) { XMLNode resp = soap_.NewChild("wsrf-rp:GetResourcePropertyDocumentResponse"); if(!resp) { valid_=false; return; }; if(prop_doc) resp.NewChild(prop_doc); } WSRPGetResourcePropertyDocumentResponse::~WSRPGetResourcePropertyDocumentResponse(void) { } void WSRPGetResourcePropertyDocumentResponse::Document(const XMLNode& prop_doc) { if(!valid_) return; XMLNode resp = soap_.Child(); resp.Child().Destroy(); if(prop_doc) resp.NewChild(prop_doc); } XMLNode WSRPGetResourcePropertyDocumentResponse::Document(void) { if(!valid_) return XMLNode(); return soap_.Child().Child(); } // ============= GetResourceProperty ============== WSRPGetResourcePropertyRequest::WSRPGetResourcePropertyRequest(SOAPEnvelope& soap):WSRP(soap,WSRPGetResourcePropertyRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:GetResourceProperty")) valid_=false; } WSRPGetResourcePropertyRequest::WSRPGetResourcePropertyRequest(const std::string& name):WSRP(false,WSRPGetResourcePropertyRequestAction) { XMLNode req = soap_.NewChild("wsrf-rp:GetResourceProperty"); if(!req) { valid_=false; return; }; req=name; // QName } WSRPGetResourcePropertyRequest::~WSRPGetResourcePropertyRequest(void) { } std::string WSRPGetResourcePropertyRequest::Name(void) { if(!valid_) return ""; return (std::string)(soap_.Child()); } void WSRPGetResourcePropertyRequest::Name(const std::string& name) { if(!valid_) return; soap_.Child()=name; } WSRPGetResourcePropertyResponse::WSRPGetResourcePropertyResponse(SOAPEnvelope& soap):WSRP(soap,WSRPGetResourcePropertyResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:GetResourcePropertyResponse")) valid_=false; } WSRPGetResourcePropertyResponse::WSRPGetResourcePropertyResponse(void):WSRP(false,WSRPGetResourcePropertyResponseAction) { XMLNode resp = soap_.NewChild("wsrf-rp:GetResourcePropertyResponse"); if(!resp) valid_=false; } WSRPGetResourcePropertyResponse::~WSRPGetResourcePropertyResponse(void) { } int WSRPGetResourcePropertyResponse::Size(void) { if(!valid_) return 0; return soap_.Child().Size(); } void WSRPGetResourcePropertyResponse::Property(const XMLNode& prop,int pos) { if(!valid_) return; XMLNode resp = soap_.Child(); if(resp) resp.NewChild(prop,pos); } XMLNode WSRPGetResourcePropertyResponse::Property(int pos) { if(!valid_) return XMLNode(); return soap_.Child().Child(pos); } XMLNode WSRPGetResourcePropertyResponse::Properties(void) { if(!valid_) return XMLNode(); return soap_.Child(); } // ============= GetMultipleResourceProperties ============== WSRPGetMultipleResourcePropertiesRequest::WSRPGetMultipleResourcePropertiesRequest(SOAPEnvelope& soap):WSRP(soap,WSRPGetMultipleResourcePropertiesRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:GetMultipleResourceProperties")) valid_=false; } WSRPGetMultipleResourcePropertiesRequest::WSRPGetMultipleResourcePropertiesRequest(void):WSRP(false,WSRPGetMultipleResourcePropertiesRequestAction) { XMLNode req = soap_.NewChild("wsrf-rp:GetMultipleResourceProperties"); if(!req) valid_=false; } WSRPGetMultipleResourcePropertiesRequest::WSRPGetMultipleResourcePropertiesRequest(const std::vector& names):WSRP(false,WSRPGetMultipleResourcePropertiesRequestAction) { XMLNode req = soap_.NewChild("wsrf-rp:GetMultipleResourceProperties"); if(!req) { valid_=false; return; }; for(std::vector::const_iterator i = names.begin();i!=names.end();++i) { XMLNode new_node = req.NewChild("wsrf-rp:ResourceProperty"); new_node=*i; }; } WSRPGetMultipleResourcePropertiesRequest::~WSRPGetMultipleResourcePropertiesRequest(void) { } std::vector WSRPGetMultipleResourcePropertiesRequest::Names(void) { std::vector names; if(!valid_) return names; XMLNode props = soap_.Child()["wsrf-rp:ResourceProperty"]; for(int n = 0;;++n) { XMLNode prop = props[n]; if(!prop) break; names.push_back((std::string)prop); }; return names; } void WSRPGetMultipleResourcePropertiesRequest::Names(const std::vector& names) { if(!valid_) return; XMLNode req = soap_.Child(); for(;;) { XMLNode prop = req["wsrf-rp:ResourceProperty"]; if(!prop) break; prop.Destroy(); }; for(std::vector::const_iterator i = names.begin();i!=names.end();++i) { XMLNode new_node = req.NewChild("wsrf-rp:ResourceProperty"); new_node=*i; }; } WSRPGetMultipleResourcePropertiesResponse::WSRPGetMultipleResourcePropertiesResponse(SOAPEnvelope& soap):WSRP(soap,WSRPGetMultipleResourcePropertiesResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:GetMultipleResourcePropertiesResponse")) valid_=false; } WSRPGetMultipleResourcePropertiesResponse::WSRPGetMultipleResourcePropertiesResponse(void):WSRP(false,WSRPGetMultipleResourcePropertiesResponseAction) { XMLNode resp = soap_.NewChild("wsrf-rp:GetMultipleResourcePropertiesResponse"); if(!resp) valid_=false; } WSRPGetMultipleResourcePropertiesResponse::~WSRPGetMultipleResourcePropertiesResponse(void) { } int WSRPGetMultipleResourcePropertiesResponse::Size(void) { if(!valid_) return 0; return soap_.Child().Size(); } void WSRPGetMultipleResourcePropertiesResponse::Property(const XMLNode& prop,int pos) { if(!valid_) return; XMLNode resp = soap_.Child(); if(resp) resp.NewChild(prop,pos); } XMLNode WSRPGetMultipleResourcePropertiesResponse::Property(int pos) { if(!valid_) return XMLNode(); return soap_.Child().Child(pos); } XMLNode WSRPGetMultipleResourcePropertiesResponse::Properties(void) { if(!valid_) return XMLNode(); return soap_.Child(); } // ============= PutResourcePropertiesDocument ============== WSRPPutResourcePropertyDocumentRequest::WSRPPutResourcePropertyDocumentRequest(SOAPEnvelope& soap):WSRP(soap,WSRPPutResourcePropertyDocumentRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:PutResourcePropertyDocument")) valid_=false; } WSRPPutResourcePropertyDocumentRequest::WSRPPutResourcePropertyDocumentRequest(const XMLNode& prop_doc):WSRP(false,WSRPPutResourcePropertyDocumentRequestAction) { XMLNode resp = soap_.NewChild("wsrf-rp:PutResourcePropertyDocument"); if(!resp) { valid_=false; return; }; if(prop_doc) resp.NewChild(prop_doc); } WSRPPutResourcePropertyDocumentRequest::~WSRPPutResourcePropertyDocumentRequest(void) { } void WSRPPutResourcePropertyDocumentRequest::Document(const XMLNode& prop_doc) { if(!valid_) return; XMLNode resp = soap_.Child(); resp.Child().Destroy(); if(prop_doc) resp.NewChild(prop_doc); } XMLNode WSRPPutResourcePropertyDocumentRequest::Document(void) { if(!valid_) return XMLNode(); return soap_.Child().Child(); } WSRPPutResourcePropertyDocumentResponse::WSRPPutResourcePropertyDocumentResponse(SOAPEnvelope& soap):WSRP(soap,WSRPPutResourcePropertyDocumentResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:PutResourcePropertyDocumentResponse")) valid_=false; } WSRPPutResourcePropertyDocumentResponse::WSRPPutResourcePropertyDocumentResponse(const XMLNode& prop_doc):WSRP(false,WSRPPutResourcePropertyDocumentResponseAction) { XMLNode resp = soap_.NewChild("wsrf-rp:PutResourcePropertyDocumentResponse"); if(!resp) { valid_=false; return; }; if(prop_doc) resp.NewChild(prop_doc); } WSRPPutResourcePropertyDocumentResponse::~WSRPPutResourcePropertyDocumentResponse(void) { } void WSRPPutResourcePropertyDocumentResponse::Document(const XMLNode& prop_doc) { if(!valid_) return; XMLNode resp = soap_.Child(); resp.Child().Destroy(); if(prop_doc) resp.NewChild(prop_doc); } XMLNode WSRPPutResourcePropertyDocumentResponse::Document(void) { if(!valid_) return XMLNode(); return soap_.Child().Child(); } // ============= SetResourceProperties ============== WSRPSetResourcePropertiesRequest::WSRPSetResourcePropertiesRequest(SOAPEnvelope& soap):WSRP(soap,WSRPSetResourcePropertiesRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:SetResourceProperties")) valid_=false;} WSRPSetResourcePropertiesRequest::WSRPSetResourcePropertiesRequest(void):WSRP(false,WSRPSetResourcePropertiesRequestAction) { if(!soap_.NewChild("wsrf-rp:SetResourceProperties")) valid_=false; } WSRPSetResourcePropertiesRequest::~WSRPSetResourcePropertiesRequest(void) { } XMLNode WSRPSetResourcePropertiesRequest::Properties(void) { if(!valid_) return XMLNode(); return soap_.Child(); } WSRPSetResourcePropertiesResponse::WSRPSetResourcePropertiesResponse(SOAPEnvelope& soap):WSRP(soap,WSRPSetResourcePropertiesResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:SetResourcePropertiesResponse")) valid_=false; } WSRPSetResourcePropertiesResponse::WSRPSetResourcePropertiesResponse(void):WSRP(false,WSRPSetResourcePropertiesResponseAction) { if(!soap_.NewChild("wsrf-rp:SetResourcePropertiesResponse")) valid_=false; } WSRPSetResourcePropertiesResponse::~WSRPSetResourcePropertiesResponse(void) { } // ============= InsertResourceProperties ============== WSRPInsertResourcePropertiesRequest::WSRPInsertResourcePropertiesRequest(SOAPEnvelope& soap):WSRP(soap,WSRPInsertResourcePropertiesRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:InsertResourceProperties")) valid_=false; } WSRPInsertResourcePropertiesRequest::WSRPInsertResourcePropertiesRequest(void):WSRP(false,WSRPInsertResourcePropertiesRequestAction) { if(!soap_.NewChild("wsrf-rp:InsertResourceProperties")) valid_=false; } WSRPInsertResourcePropertiesRequest::~WSRPInsertResourcePropertiesRequest(void) { } WSRPInsertResourceProperties WSRPInsertResourcePropertiesRequest::Property(void) { if(!valid_) return WSRPInsertResourceProperties(); return WSRPInsertResourceProperties(soap_.Child()["wsrf-rp:Insert"],false); } WSRPInsertResourcePropertiesResponse::WSRPInsertResourcePropertiesResponse(SOAPEnvelope& soap):WSRP(soap,WSRPInsertResourcePropertiesResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:InsertResourcePropertiesResponse")) valid_=false; } WSRPInsertResourcePropertiesResponse::WSRPInsertResourcePropertiesResponse(void):WSRP(false,WSRPInsertResourcePropertiesResponseAction) { if(!soap_.NewChild("wsrf-rp:InsertResourcePropertiesResponse")) valid_=false; } WSRPInsertResourcePropertiesResponse::~WSRPInsertResourcePropertiesResponse(void) { } // ============= UpdateResourceProperties ============== WSRPUpdateResourcePropertiesRequest::WSRPUpdateResourcePropertiesRequest(SOAPEnvelope& soap):WSRP(soap,WSRPUpdateResourcePropertiesRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:UpdateResourceProperties")) valid_=false; } WSRPUpdateResourcePropertiesRequest::WSRPUpdateResourcePropertiesRequest(void):WSRP(false,WSRPUpdateResourcePropertiesRequestAction) { XMLNode req = soap_.NewChild("wsrf-rp:UpdateResourceProperties"); if(!req) valid_=false; XMLNode el = req.NewChild("wsrf-rp:Update"); if(!el) valid_=false; } WSRPUpdateResourcePropertiesRequest::~WSRPUpdateResourcePropertiesRequest(void) { } WSRPUpdateResourceProperties WSRPUpdateResourcePropertiesRequest::Property(void) { if(!valid_) return WSRPUpdateResourceProperties(); return WSRPUpdateResourceProperties(soap_.Child()["wsrf-rp:Update"],false); } WSRPUpdateResourcePropertiesResponse::WSRPUpdateResourcePropertiesResponse(SOAPEnvelope& soap):WSRP(soap,WSRPUpdateResourcePropertiesResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:UpdateResourcePropertiesResponse")) valid_=false; } WSRPUpdateResourcePropertiesResponse::WSRPUpdateResourcePropertiesResponse(void):WSRP(false,WSRPUpdateResourcePropertiesResponseAction) { if(!soap_.NewChild("wsrf-rp:UpdateResourcePropertiesResponse")) valid_=false; } WSRPUpdateResourcePropertiesResponse::~WSRPUpdateResourcePropertiesResponse(void) { } // ============= DeleteResourceProperties ============== WSRPDeleteResourcePropertiesRequest::WSRPDeleteResourcePropertiesRequest(SOAPEnvelope& soap):WSRP(soap,WSRPDeleteResourcePropertiesRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:DeleteResourceProperties")) valid_=false; } WSRPDeleteResourcePropertiesRequest::WSRPDeleteResourcePropertiesRequest(void):WSRP(false,WSRPDeleteResourcePropertiesRequestAction) { XMLNode req = soap_.NewChild("wsrf-rp:DeleteResourceProperties"); if(!req) valid_=false; XMLNode el = req.NewChild("wsrf-rp:Delete"); if(!el) valid_=false; } WSRPDeleteResourcePropertiesRequest::WSRPDeleteResourcePropertiesRequest(const std::string& name):WSRP(false,WSRPDeleteResourcePropertiesRequestAction) { XMLNode req = soap_.NewChild("wsrf-rp:DeleteResourceProperties"); if(!req) valid_=false; Name(name); } WSRPDeleteResourcePropertiesRequest::~WSRPDeleteResourcePropertiesRequest(void) { } std::string WSRPDeleteResourcePropertiesRequest::Name(void) { if(!valid_) return ""; return WSRPDeleteResourceProperties(soap_.Child()["wsrf-rp:Delete"],false).Property(); } void WSRPDeleteResourcePropertiesRequest::Name(const std::string& name) { if(!valid_) return; WSRPDeleteResourceProperties prop(soap_.Child()["wsrf-rp:Delete"],false); if(prop) { prop.Property(name); return; }; WSRPDeleteResourceProperties(soap_.Child()["wsrf-rp:Delete"],true).Property(name); } WSRPDeleteResourcePropertiesResponse::WSRPDeleteResourcePropertiesResponse(SOAPEnvelope& soap):WSRP(soap,WSRPDeleteResourcePropertiesResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:DeleteResourcePropertiesResponse")) valid_=false; } WSRPDeleteResourcePropertiesResponse::WSRPDeleteResourcePropertiesResponse(void):WSRP(false,WSRPDeleteResourcePropertiesResponseAction) { if(!soap_.NewChild("wsrf-rp:DeleteResourcePropertiesResponse")) valid_=false; } WSRPDeleteResourcePropertiesResponse::~WSRPDeleteResourcePropertiesResponse(void) { } // ==================== Faults ================================ WSRPFault::WSRPFault(SOAPEnvelope& soap):WSRFBaseFault(soap) { } WSRPFault::WSRPFault(const std::string& type):WSRFBaseFault(type) { } WSRPFault::~WSRPFault(void) { } XMLNode WSRPResourcePropertyChangeFailure::CurrentProperties(bool create) { SOAPFault* fault = soap_.Fault(); if(!fault) return XMLNode(); XMLNode detail = fault->Detail(true); XMLNode failure = detail["wsrf-rp:ResourcePropertyChangeFailure"]; if(!failure) { if(!create) return XMLNode(); failure=detail.NewChild("wsrf-rp:ResourcePropertyChangeFailure"); }; XMLNode cur_value = failure["wsrf-rp:CurrentValue"]; if(!cur_value) { if(!create) return XMLNode(); cur_value=failure.NewChild("wsrf-rp:CurrentValue"); }; return cur_value; } XMLNode WSRPResourcePropertyChangeFailure::RequestedProperties(bool create) { SOAPFault* fault = soap_.Fault(); if(!fault) return XMLNode(); XMLNode detail = fault->Detail(true); XMLNode failure = detail["wsrf-rp:ResourcePropertyChangeFailure"]; if(!failure) { if(!create) return XMLNode(); failure=detail.NewChild("wsrf-rp:ResourcePropertyChangeFailure"); }; XMLNode req_value = failure["wsrf-rp:RequestedValue"]; if(!req_value) { if(!create) return XMLNode(); req_value=failure.NewChild("wsrf-rp:RequestedValue"); }; return req_value; } // ============= QueryResourceProperties ============== WSRPQueryResourcePropertiesRequest::WSRPQueryResourcePropertiesRequest(SOAPEnvelope& soap):WSRP(soap,WSRPQueryResourcePropertiesRequestAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:QueryResourceProperties")) valid_=false; } WSRPQueryResourcePropertiesRequest::WSRPQueryResourcePropertiesRequest(const std::string& dialect):WSRP(false,WSRPQueryResourcePropertiesRequestAction) { XMLNode req = soap_.NewChild("wsrf-rp:QueryResourceProperties"); if(!req) valid_=false; Dialect(dialect); } WSRPQueryResourcePropertiesRequest::WSRPQueryResourcePropertiesRequest(void) { XMLNode req = soap_.NewChild("wsrf-rp:QueryResourceProperties"); if(!req) valid_=false; } WSRPQueryResourcePropertiesRequest::~WSRPQueryResourcePropertiesRequest(void) { } std::string WSRPQueryResourcePropertiesRequest::Dialect(void) { if(!valid_) return ""; return soap_.Child()["wsrf-rp:QueryExpression"].Attribute("Dialect"); } void WSRPQueryResourcePropertiesRequest::Dialect(const std::string& dialect_uri) { if(!valid_) return; XMLNode query = soap_.Child()["wsrf-rp:QueryExpression"]; if(!query) query=soap_.Child().NewChild("wsrf-rp:QueryExpression"); XMLNode dialect = query.Attribute("Dialect"); if(!dialect) dialect=query.NewAttribute("Dialect"); dialect=dialect_uri; } XMLNode WSRPQueryResourcePropertiesRequest::Query(void) { XMLNode query = soap_.Child()["wsrf-rp:QueryExpression"]; if(!query) { query=soap_.Child().NewChild("wsrf-rp:QueryExpression"); query.NewAttribute("Dialect"); }; return query; } WSRPQueryResourcePropertiesResponse::WSRPQueryResourcePropertiesResponse(SOAPEnvelope& soap):WSRP(soap,WSRPQueryResourcePropertiesResponseAction) { if(!valid_) return; if(!MatchXMLName(soap_.Child(),"wsrf-rp:QueryResourcePropertiesResponse")) valid_=false; } WSRPQueryResourcePropertiesResponse::WSRPQueryResourcePropertiesResponse(void):WSRP(false,WSRPQueryResourcePropertiesResponseAction) { XMLNode req = soap_.NewChild("wsrf-rp:QueryResourcePropertiesResponse"); if(!req) valid_=false; } WSRPQueryResourcePropertiesResponse::~WSRPQueryResourcePropertiesResponse(void) { } XMLNode WSRPQueryResourcePropertiesResponse::Properties(void) { if(!valid_) return XMLNode(); return soap_.Child(); } // ===================================================================== WSRF& CreateWSRPFault(SOAPEnvelope& soap) { // Not the most efective way to extract type of message WSRPFault& v = *(new WSRPFault(soap)); std::string type = v.Type(); delete &v; if(v.Type() == "wsrf-rp:WSRPInvalidResourcePropertyQNameFault") return *(new WSRPInvalidResourcePropertyQNameFault(soap)); if(v.Type() == "wsrf-rp:WSRPUnableToPutResourcePropertyDocumentFault") return *(new WSRPUnableToPutResourcePropertyDocumentFault(soap)); if(v.Type() == "wsrf-rp:WSRPInvalidModificationFault") return *(new WSRPInvalidModificationFault(soap)); if(v.Type() == "wsrf-rp:WSRPUnableToModifyResourcePropertyFault") return *(new WSRPUnableToModifyResourcePropertyFault(soap)); if(v.Type() == "wsrf-rp:WSRPSetResourcePropertyRequestFailedFault") return *(new WSRPSetResourcePropertyRequestFailedFault(soap)); if(v.Type() == "wsrf-rp:WSRPInsertResourcePropertiesRequestFailedFault") return *(new WSRPInsertResourcePropertiesRequestFailedFault(soap)); if(v.Type() == "wsrf-rp:WSRPUpdateResourcePropertiesRequestFailedFault") return *(new WSRPUpdateResourcePropertiesRequestFailedFault(soap)); if(v.Type() == "wsrf-rp:WSRPDeleteResourcePropertiesRequestFailedFault") return *(new WSRPDeleteResourcePropertiesRequestFailedFault(soap)); return *(new WSRF()); } WSRF& CreateWSRP(SOAPEnvelope& soap) { NS ns; ns["wsa"]="http://www.w3.org/2005/08/addressing"; ns["wsrf-r"]="http://docs.oasis-open.org/wsrf/r-2"; ns["wsrf-rw"]="http://docs.oasis-open.org/wsrf/rw-2"; ns["wsrf-bf"]="http://docs.oasis-open.org/wsrf/bf-2"; ns["wsrf-rp"]="http://docs.oasis-open.org/wsrf/rp-2"; ns["wsrf-rpw"]="http://docs.oasis-open.org/wsrf/rpw-2"; soap.Namespaces(ns); std::string action = WSAHeader(soap).Action(); if(action == WSRFBaseFaultAction) { WSRF& fault = CreateWSRFBaseFault(soap); if(fault) return fault; return CreateWSRPFault(soap); }; if(action == WSRPGetResourcePropertyDocumentRequestAction) return *(new WSRPGetResourcePropertyDocumentRequest(soap)); if(action == WSRPGetResourcePropertyDocumentResponseAction) return *(new WSRPGetResourcePropertyDocumentResponse(soap)); if(action == WSRPGetResourcePropertyRequestAction) return *(new WSRPGetResourcePropertyRequest(soap)); if(action == WSRPGetResourcePropertyResponseAction) return *(new WSRPGetResourcePropertyResponse(soap)); if(action == WSRPGetMultipleResourcePropertiesRequestAction) return *(new WSRPGetMultipleResourcePropertiesRequest(soap)); if(action == WSRPGetMultipleResourcePropertiesResponseAction) return *(new WSRPGetMultipleResourcePropertiesResponse(soap)); if(action == WSRPQueryResourcePropertiesRequestAction) return *(new WSRPQueryResourcePropertiesRequest(soap)); if(action == WSRPQueryResourcePropertiesResponseAction) return *(new WSRPQueryResourcePropertiesResponse(soap)); if(action == WSRPPutResourcePropertyDocumentRequestAction) return *(new WSRPPutResourcePropertyDocumentRequest(soap)); if(action == WSRPPutResourcePropertyDocumentResponseAction) return *(new WSRPPutResourcePropertyDocumentResponse(soap)); if(action == WSRPSetResourcePropertiesRequestAction) return *(new WSRPSetResourcePropertiesRequest(soap)); if(action == WSRPSetResourcePropertiesResponseAction) return *(new WSRPSetResourcePropertiesResponse(soap)); if(action == WSRPInsertResourcePropertiesRequestAction) return *(new WSRPInsertResourcePropertiesRequest(soap)); if(action == WSRPInsertResourcePropertiesResponseAction) return *(new WSRPInsertResourcePropertiesResponse(soap)); if(action == WSRPUpdateResourcePropertiesRequestAction) return *(new WSRPUpdateResourcePropertiesRequest(soap)); if(action == WSRPUpdateResourcePropertiesResponseAction) return *(new WSRPUpdateResourcePropertiesResponse(soap)); if(action == WSRPDeleteResourcePropertiesRequestAction) return *(new WSRPDeleteResourcePropertiesRequest(soap)); if(action == WSRPDeleteResourcePropertiesResponseAction) return *(new WSRPDeleteResourcePropertiesResponse(soap)); return *(new WSRP()); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/wsrf/PaxHeaders.7502/WSRFBaseFault.h0000644000000000000000000000012410672011347023160 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200574.783705 30 ctime=1513200659.634743019 nordugrid-arc-5.4.2/src/hed/libs/wsrf/WSRFBaseFault.h0000644000175000002070000000340210672011347023224 0ustar00mockbuildmock00000000000000#include "WSRF.h" #include #include namespace Arc { extern const char* WSRFBaseFaultAction; /// Base class for WSRF fault messages /** Use classes inherited from it for specific faults. */ class WSRFBaseFault: public WSRF { protected: /** set WS-ResourceProperties namespaces and default prefixes in SOAP message */ void set_namespaces(void); public: /** Constructor - creates object out of supplied SOAP tree. */ WSRFBaseFault(SOAPEnvelope& soap); /** Constructor - creates new WSRF fault */ WSRFBaseFault(const std::string& type); virtual ~WSRFBaseFault(void); std::string Type(void); Time Timestamp(void); void Timestamp(Time); WSAEndpointReference Originator(void); //void Originator(const WSAEndpointReference&); void ErrorCode(const std::string& dialect,const XMLNode& error); XMLNode ErrorCode(void); std::string ErrorCodeDialect(void); void Description(int pos,const std::string& desc,const std::string& lang); std::string Description(int pos); std::string DescriptionLang(int pos); void FaultCause(int pos,const XMLNode& cause); XMLNode FaultCause(int pos); }; class WSRFResourceUnknownFault: public WSRFBaseFault { public: WSRFResourceUnknownFault(SOAPEnvelope& soap):WSRFBaseFault(soap) { }; WSRFResourceUnknownFault(void):WSRFBaseFault("wsrf-r:ResourceUnknownFault") { } virtual ~WSRFResourceUnknownFault(void) { }; }; class WSRFResourceUnavailableFault: public WSRFBaseFault { public: WSRFResourceUnavailableFault(SOAPEnvelope& soap):WSRFBaseFault(soap) { }; WSRFResourceUnavailableFault(void):WSRFBaseFault("wsrf-r:ResourceUnavailableFault") { } virtual ~WSRFResourceUnavailableFault(void) { }; }; WSRF& CreateWSRFBaseFault(SOAPEnvelope& soap); } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/wsrf/PaxHeaders.7502/WSRF.h0000644000000000000000000000012410672011347021371 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200574.787705 30 ctime=1513200659.633743006 nordugrid-arc-5.4.2/src/hed/libs/wsrf/WSRF.h0000644000175000002070000000230110672011347021432 0ustar00mockbuildmock00000000000000#ifndef __ARC_WSRF_H__ #define __ARC_WSRF_H__ #include namespace Arc { /// Base class for every WSRF message /** This class is not intended to be used directly. Use it like reference while passing through unknown WSRF message or use classes derived from it. */ class WSRF { protected: SOAPEnvelope& soap_; /** Associated SOAP message - it's SOAP message after all */ bool allocated_; /** true if soap_ needs to be deleted in destructor */ bool valid_; /** true if object represents valid WSRF message */ /** set WS Resource namespaces and default prefixes in SOAP message */ void set_namespaces(void); public: /** Constructor - creates object out of supplied SOAP tree. */ WSRF(SOAPEnvelope& soap,const std::string& action = ""); /** Constructor - creates new WSRF object */ WSRF(bool fault = false,const std::string& action = ""); virtual ~WSRF(void); /** Direct access to underlying SOAP element */ virtual SOAPEnvelope& SOAP(void) { return soap_; }; /** Returns true if instance is valid */ virtual operator bool(void) { return valid_; }; virtual bool operator!(void) { return !valid_; }; }; } // namespace Arc #endif // __ARC_WSRF_H__ nordugrid-arc-5.4.2/src/hed/libs/wsrf/PaxHeaders.7502/README0000644000000000000000000000012411001653037021312 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200574.783705 30 ctime=1513200659.632742994 nordugrid-arc-5.4.2/src/hed/libs/wsrf/README0000644000175000002070000000005411001653037021356 0ustar00mockbuildmock00000000000000implementation of limited set of WSRF spec. nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/infosys0000644000000000000000000000013213214316023021064 xustar000000000000000030 mtime=1513200659.943746798 30 atime=1513200668.720854145 30 ctime=1513200659.943746798 nordugrid-arc-5.4.2/src/hed/libs/infosys/0000755000175000002070000000000013214316023021207 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712053177163023213 xustar000000000000000027 mtime=1353514611.374025 30 atime=1513200597.816986963 30 ctime=1513200659.931746651 nordugrid-arc-5.4.2/src/hed/libs/infosys/Makefile.am0000644000175000002070000000272712053177163023265 0ustar00mockbuildmock00000000000000TEST_DIR = test DIST_SUBDIRS = test schema SUBDIRS = schema $(TEST_DIR) lib_LTLIBRARIES = libarcinfosys.la #noinst_PROGRAMS = test test_cache # test_register libarcinfosys_ladir = $(pkgincludedir)/infosys libarcinfosys_la_HEADERS = InformationInterface.h InfoCache.h \ InfoRegister.h InfoFilter.h RegisteredService.h libarcinfosys_la_SOURCES = InformationInterface.cpp InfoCache.cpp \ InfoRegister.cpp InfoFilter.cpp RegisteredService.cpp BootstrapISIS.cpp libarcinfosys_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcinfosys_la_LIBADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) #test_SOURCES = test.cpp #test_CXXFLAGS = -I$(top_srcdir)/include $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) #test_LDADD = libinfosys.la $(GLIBMM_LIBS) $(LIBXML2_LIBS) #test_cache_SOURCES = test_cache.cpp #test_cache_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_cache_LDADD = libinfosys.la $(GLIBMM_LIBS) $(LIBXML2_LIBS) #test_register_SOURCES = test_register.cpp #test_register_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_register_LDADD = libinfosys.la $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315725023216 xustar000000000000000030 mtime=1513200597.872987648 30 atime=1513200648.351605021 30 ctime=1513200659.933746675 nordugrid-arc-5.4.2/src/hed/libs/infosys/Makefile.in0000644000175000002070000011436013214315725023271 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/infosys DIST_COMMON = README $(libarcinfosys_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarcinfosys_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcinfosys_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libarcinfosys_la_OBJECTS = \ libarcinfosys_la-InformationInterface.lo \ libarcinfosys_la-InfoCache.lo libarcinfosys_la-InfoRegister.lo \ libarcinfosys_la-InfoFilter.lo \ libarcinfosys_la-RegisteredService.lo \ libarcinfosys_la-BootstrapISIS.lo libarcinfosys_la_OBJECTS = $(am_libarcinfosys_la_OBJECTS) libarcinfosys_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcinfosys_la_SOURCES) DIST_SOURCES = $(libarcinfosys_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive HEADERS = $(libarcinfosys_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = test TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ DIST_SUBDIRS = test schema SUBDIRS = schema $(TEST_DIR) lib_LTLIBRARIES = libarcinfosys.la #noinst_PROGRAMS = test test_cache # test_register libarcinfosys_ladir = $(pkgincludedir)/infosys libarcinfosys_la_HEADERS = InformationInterface.h InfoCache.h \ InfoRegister.h InfoFilter.h RegisteredService.h libarcinfosys_la_SOURCES = InformationInterface.cpp InfoCache.cpp \ InfoRegister.cpp InfoFilter.cpp RegisteredService.cpp BootstrapISIS.cpp libarcinfosys_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcinfosys_la_LIBADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/infosys/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/infosys/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcinfosys.la: $(libarcinfosys_la_OBJECTS) $(libarcinfosys_la_DEPENDENCIES) $(libarcinfosys_la_LINK) -rpath $(libdir) $(libarcinfosys_la_OBJECTS) $(libarcinfosys_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcinfosys_la-BootstrapISIS.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcinfosys_la-InfoCache.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcinfosys_la-InfoFilter.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcinfosys_la-InfoRegister.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcinfosys_la-InformationInterface.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcinfosys_la-RegisteredService.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcinfosys_la-InformationInterface.lo: InformationInterface.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -MT libarcinfosys_la-InformationInterface.lo -MD -MP -MF $(DEPDIR)/libarcinfosys_la-InformationInterface.Tpo -c -o libarcinfosys_la-InformationInterface.lo `test -f 'InformationInterface.cpp' || echo '$(srcdir)/'`InformationInterface.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcinfosys_la-InformationInterface.Tpo $(DEPDIR)/libarcinfosys_la-InformationInterface.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InformationInterface.cpp' object='libarcinfosys_la-InformationInterface.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcinfosys_la-InformationInterface.lo `test -f 'InformationInterface.cpp' || echo '$(srcdir)/'`InformationInterface.cpp libarcinfosys_la-InfoCache.lo: InfoCache.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -MT libarcinfosys_la-InfoCache.lo -MD -MP -MF $(DEPDIR)/libarcinfosys_la-InfoCache.Tpo -c -o libarcinfosys_la-InfoCache.lo `test -f 'InfoCache.cpp' || echo '$(srcdir)/'`InfoCache.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcinfosys_la-InfoCache.Tpo $(DEPDIR)/libarcinfosys_la-InfoCache.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InfoCache.cpp' object='libarcinfosys_la-InfoCache.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcinfosys_la-InfoCache.lo `test -f 'InfoCache.cpp' || echo '$(srcdir)/'`InfoCache.cpp libarcinfosys_la-InfoRegister.lo: InfoRegister.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -MT libarcinfosys_la-InfoRegister.lo -MD -MP -MF $(DEPDIR)/libarcinfosys_la-InfoRegister.Tpo -c -o libarcinfosys_la-InfoRegister.lo `test -f 'InfoRegister.cpp' || echo '$(srcdir)/'`InfoRegister.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcinfosys_la-InfoRegister.Tpo $(DEPDIR)/libarcinfosys_la-InfoRegister.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InfoRegister.cpp' object='libarcinfosys_la-InfoRegister.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcinfosys_la-InfoRegister.lo `test -f 'InfoRegister.cpp' || echo '$(srcdir)/'`InfoRegister.cpp libarcinfosys_la-InfoFilter.lo: InfoFilter.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -MT libarcinfosys_la-InfoFilter.lo -MD -MP -MF $(DEPDIR)/libarcinfosys_la-InfoFilter.Tpo -c -o libarcinfosys_la-InfoFilter.lo `test -f 'InfoFilter.cpp' || echo '$(srcdir)/'`InfoFilter.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcinfosys_la-InfoFilter.Tpo $(DEPDIR)/libarcinfosys_la-InfoFilter.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InfoFilter.cpp' object='libarcinfosys_la-InfoFilter.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcinfosys_la-InfoFilter.lo `test -f 'InfoFilter.cpp' || echo '$(srcdir)/'`InfoFilter.cpp libarcinfosys_la-RegisteredService.lo: RegisteredService.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -MT libarcinfosys_la-RegisteredService.lo -MD -MP -MF $(DEPDIR)/libarcinfosys_la-RegisteredService.Tpo -c -o libarcinfosys_la-RegisteredService.lo `test -f 'RegisteredService.cpp' || echo '$(srcdir)/'`RegisteredService.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcinfosys_la-RegisteredService.Tpo $(DEPDIR)/libarcinfosys_la-RegisteredService.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RegisteredService.cpp' object='libarcinfosys_la-RegisteredService.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcinfosys_la-RegisteredService.lo `test -f 'RegisteredService.cpp' || echo '$(srcdir)/'`RegisteredService.cpp libarcinfosys_la-BootstrapISIS.lo: BootstrapISIS.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -MT libarcinfosys_la-BootstrapISIS.lo -MD -MP -MF $(DEPDIR)/libarcinfosys_la-BootstrapISIS.Tpo -c -o libarcinfosys_la-BootstrapISIS.lo `test -f 'BootstrapISIS.cpp' || echo '$(srcdir)/'`BootstrapISIS.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcinfosys_la-BootstrapISIS.Tpo $(DEPDIR)/libarcinfosys_la-BootstrapISIS.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BootstrapISIS.cpp' object='libarcinfosys_la-BootstrapISIS.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcinfosys_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcinfosys_la-BootstrapISIS.lo `test -f 'BootstrapISIS.cpp' || echo '$(srcdir)/'`BootstrapISIS.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcinfosys_laHEADERS: $(libarcinfosys_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcinfosys_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcinfosys_ladir)" @list='$(libarcinfosys_la_HEADERS)'; test -n "$(libarcinfosys_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcinfosys_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcinfosys_ladir)" || exit $$?; \ done uninstall-libarcinfosys_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcinfosys_la_HEADERS)'; test -n "$(libarcinfosys_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcinfosys_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcinfosys_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarcinfosys_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcinfosys_laHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarcinfosys_laHEADERS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags ctags-recursive \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarcinfosys_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarcinfosys_laHEADERS #test_SOURCES = test.cpp #test_CXXFLAGS = -I$(top_srcdir)/include $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) #test_LDADD = libinfosys.la $(GLIBMM_LIBS) $(LIBXML2_LIBS) #test_cache_SOURCES = test_cache.cpp #test_cache_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_cache_LDADD = libinfosys.la $(GLIBMM_LIBS) $(LIBXML2_LIBS) #test_register_SOURCES = test_register.cpp #test_register_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_register_LDADD = libinfosys.la $(GLIBMM_LIBS) $(LIBXML2_LIBS) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/RegisteredService.cpp0000644000000000000000000000012411770351466025303 xustar000000000000000027 mtime=1340199734.229368 27 atime=1513200574.967707 30 ctime=1513200659.938746736 nordugrid-arc-5.4.2/src/hed/libs/infosys/RegisteredService.cpp0000644000175000002070000000045211770351466025351 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "RegisteredService.h" namespace Arc { RegisteredService::RegisteredService(Config* cfg, PluginArgument* parg): Service(cfg, parg),inforeg(*cfg, this) { } RegisteredService::~RegisteredService(void) { } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/InformationInterface.h0000644000000000000000000000012311211751127025423 xustar000000000000000027 mtime=1244123735.543438 27 atime=1513200574.965707 29 ctime=1513200659.92674659 nordugrid-arc-5.4.2/src/hed/libs/infosys/InformationInterface.h0000644000175000002070000001114511211751127025473 0ustar00mockbuildmock00000000000000#ifndef __ARC_INFORMATIONINTERFACE_H__ #define __ARC_INFORMATIONINTERFACE_H__ #include #include #include #include #include #include #include namespace Arc { /// Information System message processor. /** This class provides callback for 2 operations of WS-ResourceProperties and convenient parsing/generation of corresponding SOAP mesages. In a future it may extend range of supported specifications. */ class InformationInterface { protected: /** Mutex used to protect access to Get methods in multi-threaded env. */ Glib::Mutex lock_; bool to_lock_; /** This method is called by this object's Process method. Real implementation of this class should return (sub)tree of XML document. This method may be called multiple times per single Process call. Here @path is a set on XML element names specifying how to reach requested node(s). */ virtual void Get(const std::list& path,XMLNodeContainer& result); virtual void Get(XMLNode xpath,XMLNodeContainer& result); public: /** Constructor. If 'safe' is true all calls to Get will be locked. */ InformationInterface(bool safe = true); virtual ~InformationInterface(void); /* This method is called by service which wants to process WSRF request. It parses 'in' message, calls appropriate 'Get' method and returns response SOAP message. In case of error it either returns NULL or corresponding SOAP fault. */ SOAPEnvelope* Process(SOAPEnvelope& in); /* This method adds possibility to filter produced document. Document is filtered according to embedded and provided policies. User identity and filtering algorithm are defined by specified */ SOAPEnvelope* Process(SOAPEnvelope& in,const InfoFilter& filter,const InfoFilterPolicies& policies = InfoFilterPolicies(),const NS& ns = NS()); }; /// Information System document container and processor. /** This class inherits form InformationInterface and offers container for storing informational XML document. */ class InformationContainer: public InformationInterface { protected: /** Either link or container of XML document */ XMLNode doc_; virtual void Get(const std::list& path,XMLNodeContainer& result); virtual void Get(XMLNode xpath,XMLNodeContainer& result); public: InformationContainer(void); /** Creates an instance with XML document @doc. If @copy is true this method makes a copy of @doc for internal use. */ InformationContainer(XMLNode doc,bool copy = false); virtual ~InformationContainer(void); /** Get a lock on contained XML document. To be used in multi-threaded environment. Do not forget to release it with Release() */ XMLNode Acquire(void); void Release(void); /** Replaces internal XML document with @doc. If @copy is true this method makes a copy of @doc for internal use. */ void Assign(XMLNode doc,bool copy = false); }; /// Request for information in InfoSystem /** This is a convenience wrapper creating proper WS-ResourceProperties request targeted InfoSystem interface of service. */ class InformationRequest { private: WSRP* wsrp_; public: /** Dummy constructor */ InformationRequest(void); /** Request for attribute specified by elements of path. Currently only first element is used. */ InformationRequest(const std::list& path); /** Request for attribute specified by elements of paths. Currently only first element of every path is used. */ InformationRequest(const std::list >& paths); /** Request for attributes specified by XPath query. */ InformationRequest(XMLNode query); ~InformationRequest(void); operator bool(void) { return (wsrp_ != NULL); }; bool operator!(void) { return (wsrp_ == NULL); }; /** Returns generated SOAP message */ SOAPEnvelope* SOAP(void); }; /// Informational response from InfoSystem /** This is a convenience wrapper analyzing WS-ResourceProperties response from InfoSystem interface of service. */ class InformationResponse { private: WSRF* wsrp_; public: /** Constructor parses WS-ResourceProperties ressponse. Provided SOAPEnvelope object must be valid as long as this object is in use. */ InformationResponse(SOAPEnvelope& soap); ~InformationResponse(void); operator bool(void) { return (wsrp_ != NULL); }; bool operator!(void) { return (wsrp_ == NULL); }; /** Returns set of attributes which were in SOAP message passed to constructor. */ std::list Result(void); }; } // namespace Arc #endif /* __ARC_INFORMATIONINTERFACE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/test0000644000000000000000000000013213214316023022043 xustar000000000000000030 mtime=1513200659.975747189 30 atime=1513200668.720854145 30 ctime=1513200659.975747189 nordugrid-arc-5.4.2/src/hed/libs/infosys/test/0000755000175000002070000000000013214316023022166 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/infosys/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612331112760024160 xustar000000000000000027 mtime=1399100912.859245 30 atime=1513200597.934988406 29 ctime=1513200659.97174714 nordugrid-arc-5.4.2/src/hed/libs/infosys/test/Makefile.am0000644000175000002070000000235612331112760024231 0ustar00mockbuildmock00000000000000TESTS = InformationInterfaceTest InfoFilterTest RegisteredServiceTest check_PROGRAMS = $(TESTS) InformationInterfaceTest_SOURCES = $(top_srcdir)/src/Test.cpp \ InformationInterfaceTest.cpp InformationInterfaceTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) InformationInterfaceTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcinfosys.la $(CPPUNIT_LIBS) $(GLIBMM_LIBS) InfoFilterTest_SOURCES = $(top_srcdir)/src/Test.cpp InfoFilterTest.cpp InfoFilterTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) InfoFilterTest_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcinfosys.la $(CPPUNIT_LIBS) $(GLIBMM_LIBS) RegisteredServiceTest_SOURCES = $(top_srcdir)/src/Test.cpp \ RegisteredServiceTest.cpp RegisteredServiceTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) RegisteredServiceTest_LDADD = \ ../libarcinfosys.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/hed/libs/infosys/test/PaxHeaders.7502/RegisteredServiceTest.cpp0000644000000000000000000000012411730440757027120 xustar000000000000000027 mtime=1331839471.916249 27 atime=1513200574.972707 30 ctime=1513200659.975747189 nordugrid-arc-5.4.2/src/hed/libs/infosys/test/RegisteredServiceTest.cpp0000644000175000002070000001027311730440757027170 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include class RegisteredServiceTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(RegisteredServiceTest); CPPUNIT_TEST(TestRegisteredService); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestRegisteredService(); }; static const char* xml_str = "\ \ \ \ \ Demo \ HPCPInterop \ \ \ \ /home/interop/demo_job.sh \ 60 \ demo_job.out.%J \ /home/interop/username \ \ \ \ \ 1 \ \ \ \ "; class MyRegisteredService: public Arc::RegisteredService { public: /** Example contructor - Server takes at least it's configuration subtree */ MyRegisteredService(Arc::Config* cfg,Arc::PluginArgument* parg):RegisteredService(cfg,parg) {}; virtual ~MyRegisteredService(void) { }; virtual Arc::MCC_Status process(Arc::Message&,Arc::Message&) {return Arc::MCC_Status(Arc::STATUS_OK);}; }; void RegisteredServiceTest::setUp() { } void RegisteredServiceTest::tearDown() { } void RegisteredServiceTest::TestRegisteredService() { Arc::SOAPEnvelope soap(xml_str); std::string cfg_str=""; cfg_str +="\n"; cfg_str +=" \n"; cfg_str +=" \n"; cfg_str +=" .libs/\n"; cfg_str +=" ../../hed/mcc/http/.libs/\n"; cfg_str +=" ../../hed/mcc/soap/.libs/\n"; cfg_str +=" ../../hed/mcc/tls/.libs/\n"; cfg_str +=" ../../hed/mcc/tcp/.libs/\n"; cfg_str +=" \n"; cfg_str +=" mcctcp\n"; cfg_str +=" mcctls\n"; cfg_str +=" mcchttp\n"; cfg_str +=" mccsoap\n"; cfg_str +=" \n"; cfg_str +=" example.org50000\n"; cfg_str +=" POSTecho\n"; cfg_str +=" \n"; cfg_str +=" \n"; cfg_str +=" "; Arc::Config cfg(cfg_str); MyRegisteredService myservice(&cfg,NULL); CPPUNIT_ASSERT(true); } CPPUNIT_TEST_SUITE_REGISTRATION(RegisteredServiceTest); nordugrid-arc-5.4.2/src/hed/libs/infosys/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013013214315725024173 xustar000000000000000030 mtime=1513200597.992989116 28 atime=1513200648.3826054 30 ctime=1513200659.972747152 nordugrid-arc-5.4.2/src/hed/libs/infosys/test/Makefile.in0000644000175000002070000012061013214315725024243 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = InformationInterfaceTest$(EXEEXT) InfoFilterTest$(EXEEXT) \ RegisteredServiceTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/libs/infosys/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = InformationInterfaceTest$(EXEEXT) \ InfoFilterTest$(EXEEXT) RegisteredServiceTest$(EXEEXT) am_InfoFilterTest_OBJECTS = InfoFilterTest-Test.$(OBJEXT) \ InfoFilterTest-InfoFilterTest.$(OBJEXT) InfoFilterTest_OBJECTS = $(am_InfoFilterTest_OBJECTS) am__DEPENDENCIES_1 = InfoFilterTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcinfosys.la $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) InfoFilterTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_InformationInterfaceTest_OBJECTS = \ InformationInterfaceTest-Test.$(OBJEXT) \ InformationInterfaceTest-InformationInterfaceTest.$(OBJEXT) InformationInterfaceTest_OBJECTS = \ $(am_InformationInterfaceTest_OBJECTS) InformationInterfaceTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcinfosys.la $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) InformationInterfaceTest_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_RegisteredServiceTest_OBJECTS = \ RegisteredServiceTest-Test.$(OBJEXT) \ RegisteredServiceTest-RegisteredServiceTest.$(OBJEXT) RegisteredServiceTest_OBJECTS = $(am_RegisteredServiceTest_OBJECTS) RegisteredServiceTest_DEPENDENCIES = ../libarcinfosys.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) RegisteredServiceTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(InfoFilterTest_SOURCES) \ $(InformationInterfaceTest_SOURCES) \ $(RegisteredServiceTest_SOURCES) DIST_SOURCES = $(InfoFilterTest_SOURCES) \ $(InformationInterfaceTest_SOURCES) \ $(RegisteredServiceTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ InformationInterfaceTest_SOURCES = $(top_srcdir)/src/Test.cpp \ InformationInterfaceTest.cpp InformationInterfaceTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) InformationInterfaceTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcinfosys.la $(CPPUNIT_LIBS) $(GLIBMM_LIBS) InfoFilterTest_SOURCES = $(top_srcdir)/src/Test.cpp InfoFilterTest.cpp InfoFilterTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) InfoFilterTest_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarcinfosys.la $(CPPUNIT_LIBS) $(GLIBMM_LIBS) RegisteredServiceTest_SOURCES = $(top_srcdir)/src/Test.cpp \ RegisteredServiceTest.cpp RegisteredServiceTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) RegisteredServiceTest_LDADD = \ ../libarcinfosys.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/infosys/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/infosys/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list InfoFilterTest$(EXEEXT): $(InfoFilterTest_OBJECTS) $(InfoFilterTest_DEPENDENCIES) @rm -f InfoFilterTest$(EXEEXT) $(InfoFilterTest_LINK) $(InfoFilterTest_OBJECTS) $(InfoFilterTest_LDADD) $(LIBS) InformationInterfaceTest$(EXEEXT): $(InformationInterfaceTest_OBJECTS) $(InformationInterfaceTest_DEPENDENCIES) @rm -f InformationInterfaceTest$(EXEEXT) $(InformationInterfaceTest_LINK) $(InformationInterfaceTest_OBJECTS) $(InformationInterfaceTest_LDADD) $(LIBS) RegisteredServiceTest$(EXEEXT): $(RegisteredServiceTest_OBJECTS) $(RegisteredServiceTest_DEPENDENCIES) @rm -f RegisteredServiceTest$(EXEEXT) $(RegisteredServiceTest_LINK) $(RegisteredServiceTest_OBJECTS) $(RegisteredServiceTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/InfoFilterTest-InfoFilterTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/InfoFilterTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/InformationInterfaceTest-InformationInterfaceTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/InformationInterfaceTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/RegisteredServiceTest-RegisteredServiceTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/RegisteredServiceTest-Test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< InfoFilterTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) -MT InfoFilterTest-Test.o -MD -MP -MF $(DEPDIR)/InfoFilterTest-Test.Tpo -c -o InfoFilterTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/InfoFilterTest-Test.Tpo $(DEPDIR)/InfoFilterTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='InfoFilterTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) -c -o InfoFilterTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp InfoFilterTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) -MT InfoFilterTest-Test.obj -MD -MP -MF $(DEPDIR)/InfoFilterTest-Test.Tpo -c -o InfoFilterTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/InfoFilterTest-Test.Tpo $(DEPDIR)/InfoFilterTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='InfoFilterTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) -c -o InfoFilterTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` InfoFilterTest-InfoFilterTest.o: InfoFilterTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) -MT InfoFilterTest-InfoFilterTest.o -MD -MP -MF $(DEPDIR)/InfoFilterTest-InfoFilterTest.Tpo -c -o InfoFilterTest-InfoFilterTest.o `test -f 'InfoFilterTest.cpp' || echo '$(srcdir)/'`InfoFilterTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/InfoFilterTest-InfoFilterTest.Tpo $(DEPDIR)/InfoFilterTest-InfoFilterTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InfoFilterTest.cpp' object='InfoFilterTest-InfoFilterTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) -c -o InfoFilterTest-InfoFilterTest.o `test -f 'InfoFilterTest.cpp' || echo '$(srcdir)/'`InfoFilterTest.cpp InfoFilterTest-InfoFilterTest.obj: InfoFilterTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) -MT InfoFilterTest-InfoFilterTest.obj -MD -MP -MF $(DEPDIR)/InfoFilterTest-InfoFilterTest.Tpo -c -o InfoFilterTest-InfoFilterTest.obj `if test -f 'InfoFilterTest.cpp'; then $(CYGPATH_W) 'InfoFilterTest.cpp'; else $(CYGPATH_W) '$(srcdir)/InfoFilterTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/InfoFilterTest-InfoFilterTest.Tpo $(DEPDIR)/InfoFilterTest-InfoFilterTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InfoFilterTest.cpp' object='InfoFilterTest-InfoFilterTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InfoFilterTest_CXXFLAGS) $(CXXFLAGS) -c -o InfoFilterTest-InfoFilterTest.obj `if test -f 'InfoFilterTest.cpp'; then $(CYGPATH_W) 'InfoFilterTest.cpp'; else $(CYGPATH_W) '$(srcdir)/InfoFilterTest.cpp'; fi` InformationInterfaceTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -MT InformationInterfaceTest-Test.o -MD -MP -MF $(DEPDIR)/InformationInterfaceTest-Test.Tpo -c -o InformationInterfaceTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/InformationInterfaceTest-Test.Tpo $(DEPDIR)/InformationInterfaceTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='InformationInterfaceTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -c -o InformationInterfaceTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp InformationInterfaceTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -MT InformationInterfaceTest-Test.obj -MD -MP -MF $(DEPDIR)/InformationInterfaceTest-Test.Tpo -c -o InformationInterfaceTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/InformationInterfaceTest-Test.Tpo $(DEPDIR)/InformationInterfaceTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='InformationInterfaceTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -c -o InformationInterfaceTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` InformationInterfaceTest-InformationInterfaceTest.o: InformationInterfaceTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -MT InformationInterfaceTest-InformationInterfaceTest.o -MD -MP -MF $(DEPDIR)/InformationInterfaceTest-InformationInterfaceTest.Tpo -c -o InformationInterfaceTest-InformationInterfaceTest.o `test -f 'InformationInterfaceTest.cpp' || echo '$(srcdir)/'`InformationInterfaceTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/InformationInterfaceTest-InformationInterfaceTest.Tpo $(DEPDIR)/InformationInterfaceTest-InformationInterfaceTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InformationInterfaceTest.cpp' object='InformationInterfaceTest-InformationInterfaceTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -c -o InformationInterfaceTest-InformationInterfaceTest.o `test -f 'InformationInterfaceTest.cpp' || echo '$(srcdir)/'`InformationInterfaceTest.cpp InformationInterfaceTest-InformationInterfaceTest.obj: InformationInterfaceTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -MT InformationInterfaceTest-InformationInterfaceTest.obj -MD -MP -MF $(DEPDIR)/InformationInterfaceTest-InformationInterfaceTest.Tpo -c -o InformationInterfaceTest-InformationInterfaceTest.obj `if test -f 'InformationInterfaceTest.cpp'; then $(CYGPATH_W) 'InformationInterfaceTest.cpp'; else $(CYGPATH_W) '$(srcdir)/InformationInterfaceTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/InformationInterfaceTest-InformationInterfaceTest.Tpo $(DEPDIR)/InformationInterfaceTest-InformationInterfaceTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InformationInterfaceTest.cpp' object='InformationInterfaceTest-InformationInterfaceTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(InformationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -c -o InformationInterfaceTest-InformationInterfaceTest.obj `if test -f 'InformationInterfaceTest.cpp'; then $(CYGPATH_W) 'InformationInterfaceTest.cpp'; else $(CYGPATH_W) '$(srcdir)/InformationInterfaceTest.cpp'; fi` RegisteredServiceTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) -MT RegisteredServiceTest-Test.o -MD -MP -MF $(DEPDIR)/RegisteredServiceTest-Test.Tpo -c -o RegisteredServiceTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/RegisteredServiceTest-Test.Tpo $(DEPDIR)/RegisteredServiceTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='RegisteredServiceTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) -c -o RegisteredServiceTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp RegisteredServiceTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) -MT RegisteredServiceTest-Test.obj -MD -MP -MF $(DEPDIR)/RegisteredServiceTest-Test.Tpo -c -o RegisteredServiceTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/RegisteredServiceTest-Test.Tpo $(DEPDIR)/RegisteredServiceTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='RegisteredServiceTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) -c -o RegisteredServiceTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` RegisteredServiceTest-RegisteredServiceTest.o: RegisteredServiceTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) -MT RegisteredServiceTest-RegisteredServiceTest.o -MD -MP -MF $(DEPDIR)/RegisteredServiceTest-RegisteredServiceTest.Tpo -c -o RegisteredServiceTest-RegisteredServiceTest.o `test -f 'RegisteredServiceTest.cpp' || echo '$(srcdir)/'`RegisteredServiceTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/RegisteredServiceTest-RegisteredServiceTest.Tpo $(DEPDIR)/RegisteredServiceTest-RegisteredServiceTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RegisteredServiceTest.cpp' object='RegisteredServiceTest-RegisteredServiceTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) -c -o RegisteredServiceTest-RegisteredServiceTest.o `test -f 'RegisteredServiceTest.cpp' || echo '$(srcdir)/'`RegisteredServiceTest.cpp RegisteredServiceTest-RegisteredServiceTest.obj: RegisteredServiceTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) -MT RegisteredServiceTest-RegisteredServiceTest.obj -MD -MP -MF $(DEPDIR)/RegisteredServiceTest-RegisteredServiceTest.Tpo -c -o RegisteredServiceTest-RegisteredServiceTest.obj `if test -f 'RegisteredServiceTest.cpp'; then $(CYGPATH_W) 'RegisteredServiceTest.cpp'; else $(CYGPATH_W) '$(srcdir)/RegisteredServiceTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/RegisteredServiceTest-RegisteredServiceTest.Tpo $(DEPDIR)/RegisteredServiceTest-RegisteredServiceTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RegisteredServiceTest.cpp' object='RegisteredServiceTest-RegisteredServiceTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RegisteredServiceTest_CXXFLAGS) $(CXXFLAGS) -c -o RegisteredServiceTest-RegisteredServiceTest.obj `if test -f 'RegisteredServiceTest.cpp'; then $(CYGPATH_W) 'RegisteredServiceTest.cpp'; else $(CYGPATH_W) '$(srcdir)/RegisteredServiceTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/infosys/test/PaxHeaders.7502/InfoFilterTest.cpp0000644000000000000000000000012412236456272025544 xustar000000000000000027 mtime=1383750842.365036 27 atime=1513200574.972707 30 ctime=1513200659.973747165 nordugrid-arc-5.4.2/src/hed/libs/infosys/test/InfoFilterTest.cpp0000644000175000002070000001124412236456272025613 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include using namespace Arc; class InfoFilterTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(InfoFilterTest); CPPUNIT_TEST(TestInfoFilter); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestInfoFilter(); }; static void add_libs(std::string& paths,const std::string& curpath) { std::string fname = Glib::build_filename(curpath,".libs"); if(file_test(fname, Glib::FILE_TEST_IS_DIR)) { paths+=":"+fname; }; Glib::Dir dir(curpath); for(;;) { fname = dir.read_name(); if(fname.empty()) break; fname=Glib::build_filename(curpath,fname); if(fname == ".libs" || file_test(fname, Glib::FILE_TEST_IS_SYMLINK)) { } else if(file_test(fname, Glib::FILE_TEST_IS_DIR)) { add_libs(paths,fname); }; }; } void InfoFilterTest::setUp() { std::string paths; std::string toppath = Glib::get_current_dir(); // Find top source directory by looking for configure file for(int n=0;n<15;++n) { std::string fname = Glib::build_filename(toppath, "configure"); if(file_test(fname, Glib::FILE_TEST_IS_REGULAR)) { // Go to all .libs directories add_libs(paths,toppath); break; }; toppath = Glib::path_get_dirname(toppath); }; Arc::SetEnv("ARC_PLUGIN_PATH",paths); } void InfoFilterTest::tearDown() { } class TestSecAttr: public Arc::SecAttr { public: TestSecAttr(const char* id):id_(id) { }; virtual ~TestSecAttr(void) { }; virtual operator bool(void) const { return true; }; virtual bool Export(SecAttrFormat format,XMLNode &val) const; protected: virtual bool equal(const SecAttr& /* b */) const { return false; }; private: std::string id_; }; bool TestSecAttr::Export(SecAttrFormat format,XMLNode &val) const { if(format != ARCAuth) return false; NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; val.Namespaces(ns); val.Name("ra:Request"); XMLNode item = val.NewChild("ra:RequestItem"); XMLNode subj = item.NewChild("ra:Subject"); XMLNode attr = subj.NewChild("ra:SubjectAttribute"); attr=id_; attr.NewAttribute("Type")="string"; attr.NewAttribute("AttributeId")="urn:testID"; return true; } void InfoFilterTest::TestInfoFilter() { // Service description document Arc::XMLNode infodoc("\ \n\ \n\ \n\ A lot\n\ Turltle-like\n\ \n\ \n\ Unknown\n\ \n\ \n\ 640kb enough for everyone\n\ Quantum computer\n\ \n\ \n"); // Policies Arc::XMLNode policy1("\ \n\ \n\ \n\ \n\ \n\ USER1\n\ \n\ \n\ \n\ "); Arc::XMLNode policy2("\ \n\ \n\ \n\ \n\ \n\ USER2\n\ \n\ \n\ \n\ "); // Service description with policies XMLNode infodoc_sec; infodoc.New(infodoc_sec); infodoc_sec["Resource"][0].NewChild(policy1); infodoc_sec["Resource"][0].NewAttribute("InfoFilterTag")="policy1"; infodoc_sec["Resource"][1].NewChild(policy2); infodoc_sec["Resource"][1].NewAttribute("InfoFilterTag")="policy2"; // Requestor's identifier MessageAuth user_id; TestSecAttr* user_attr = new TestSecAttr("USER1"); user_id.set("TEST",user_attr); // Filter InfoFilter filter(user_id); // External policies //Arc::NS ns; //XMLNode policy1(); std::list< std::pair > policies; // Service description for filtering XMLNode infodoc_filtered; // Applying filter infodoc_sec.New(infodoc_filtered); //CPPUNIT_ASSERT(filter.Filter(infodoc_filtered)); } CPPUNIT_TEST_SUITE_REGISTRATION(InfoFilterTest); nordugrid-arc-5.4.2/src/hed/libs/infosys/test/PaxHeaders.7502/InformationInterfaceTest.cpp0000644000000000000000000000012411133615365027604 xustar000000000000000027 mtime=1232018165.041931 27 atime=1513200574.972707 30 ctime=1513200659.974747177 nordugrid-arc-5.4.2/src/hed/libs/infosys/test/InformationInterfaceTest.cpp0000644000175000002070000000404111133615365027650 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include class InformationInterfaceTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(InformationInterfaceTest); CPPUNIT_TEST(TestInformationInterface); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestInformationInterface(); }; void InformationInterfaceTest::setUp() { } void InformationInterfaceTest::tearDown() { } void InformationInterfaceTest::TestInformationInterface() { // Service description document Arc::XMLNode infodoc("\ \n\ \n\ \n\ A lot\n\ Turltle-like\n\ \n\ \n\ Unknown\n\ \n\ \n\ 640kb enough for everyone\n\ Quantum computer\n\ \n\ \n"); // Creating service container Arc::InformationContainer container(infodoc); //std::cout<<"Document:\n"< name; name.push_back("Resource"); //std::cout<<"Request for elements: "<<*(name.begin())< results = res.Result(); CPPUNIT_ASSERT_EQUAL((int)results.size(), int(2)); std::list::iterator r = results.begin(); CPPUNIT_ASSERT_EQUAL((std::string)(*r)["Memory"], std::string("A lot")); ++r; CPPUNIT_ASSERT_EQUAL((std::string)(*r)["Performance"], std::string("Quantum computer")); } CPPUNIT_TEST_SUITE_REGISTRATION(InformationInterfaceTest); nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/InfoCache.h0000644000000000000000000000012411222120620023123 xustar000000000000000027 mtime=1246273936.463344 27 atime=1513200574.970707 30 ctime=1513200659.927746602 nordugrid-arc-5.4.2/src/hed/libs/infosys/InfoCache.h0000644000175000002070000000365611222120620023202 0ustar00mockbuildmock00000000000000#ifndef __ARC_INFO_CACHE_H__ #define __ARC_INFO_CACHE_H__ #include #include #include #include #include #include #ifdef WIN32 #include #endif namespace Arc { /// Stores XML document in filesystem split into parts /** */ class InfoCache { protected: std::string path_base; public: bool Query(const char *xml_path, const char *q, XMLNodeContainer &result); bool Query(const std::string &xml_path, std::string &q, XMLNodeContainer &result) { return Query(xml_path.c_str(), q.c_str(), result); }; bool Set(const char *xml_path, XMLNode &value); bool Set(const std::string &xml_path, XMLNode &value) { return Set(xml_path.c_str(), value) ; }; bool Get(const char *xml_path, XMLNodeContainer &result); bool Get(const std::string &xml_path, XMLNodeContainer &result) { return Get(xml_path.c_str(), result); }; bool Unset(const char *xml_path); bool Unset(const std::string &xml_path) { return Unset(xml_path.c_str()); }; /// Creates object according to configuration (see InfoCacheConfig.xsd) /** XML configuration is passed in cfg. Argument service_id is used to distiguish between various documents stored under same path - corresponding files will be stored in subdirectory with service_id name. */ InfoCache(const Config &cfg, const std::string &service_id); ~InfoCache(); }; class InfoCacheInterface: public InformationInterface { protected: InfoCache cache; virtual void Get(const std::list& path,XMLNodeContainer& result); virtual void Get(XMLNode xpath,XMLNodeContainer& result); public: InfoCacheInterface(Config &cfg, std::string &service_id); virtual ~InfoCacheInterface(void); InfoCache& Cache(void) { return cache; }; }; } // namespace Arc #endif // __ARC_INFO_CACHE_H__ nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/InfoRegister.h0000644000000000000000000000012411770364330023724 xustar000000000000000027 mtime=1340205272.046989 27 atime=1513200574.962707 30 ctime=1513200659.928746614 nordugrid-arc-5.4.2/src/hed/libs/infosys/InfoRegister.h0000644000175000002070000001774311770364330024005 0ustar00mockbuildmock00000000000000#ifndef __ARC_INFOSYS_REGISTER_H__ #define __ARC_INFOSYS_REGISTER_H__ #include #include #include #include #include #include #include #include #include #define GLUE2_D42_NAMESPACE "http://schemas.ogf.org/glue/2008/05/spec_2.0_d42_r1" #define REGISTRATION_NAMESPACE "http://www.nordugrid.org/schemas/registartion/2008/08" #define ISIS_NAMESPACE "http://www.nordugrid.org/schemas/isis/2008/08" #define REGISTRATION_CONFIG_NAMESPACE "http://www.nordugrid.org/schemas/InfoRegisterConfig/2008" namespace Arc { class InfoRegisterContainer; /// Registration to Information Indexing Service /** This class represents service registering to Information Indexing Service. It does not perform registration itself. It only collects configuration information. Configuration is as described in InfoRegisterConfig.xsd for element InfoRegistration. */ class InfoRegister { friend class InfoRegisterContainer; private: // Registration period long int reg_period_; // Registration information std::string serviceid; std::string expiration; std::string endpoint; // Associated service - it is used to fetch information document Service *service_; NS ns_; public: InfoRegister(XMLNode node, Service *service_); ~InfoRegister(); operator bool(void) { return service_; }; bool operator!(void) { return !service_; }; long int getPeriod(void) const { return reg_period_; }; std::string getServiceID(void) { if (serviceid.empty()) return ""; else return serviceid; }; std::string getEndpoint(void) { if (endpoint.empty()) return ""; else return endpoint; }; std::string getExpiration(void) { if (expiration.empty()) return ""; else return expiration; }; Service* getService(void) { return service_; }; }; /// Handling registrations to multiple Information Indexing Services class InfoRegisters { private: std::list registers_; public: /// Constructor creates InfoRegister objects according to configuration /** Inside cfg elements InfoRegister elements are found and for each corresponding InfoRegister object is created. Those objects are destroyed in destructor of this class. */ InfoRegisters(XMLNode cfg, Service *service); ~InfoRegisters(void); /// Dinamically add one more InfoRegister object bool addRegister(XMLNode cfg, Service *service); }; // Data stucture for the InfoRegistrar class. struct Register_Info_Type{ // Necessary information for the registration InfoRegister* p_register; Period period; Time next_registration; // ServiceID extracted from the first provided registration message std::string serviceid_; // Registration information std::string serviceid; std::string expiration; std::string endpoint; }; // Data structure for describe a remote ISIS struct ISIS_description { std::string url; std::string key; std::string cert; std::string proxy; std::string cadir; std::string cafile; }; /// Registration process associated with particular ISIS /** Instance of this class starts thread which takes care passing information about associated services to ISIS service defined in configuration. Configuration is as described in InfoRegister.xsd for element InfoRegistrar. */ class InfoRegistrar { friend class InfoRegisterContainer; private: /// Constructor creates object according to configuration. /** This object can only be created by InfoRegisterContainer which takes care of finding configuration elements in configuration document. */ InfoRegistrar(XMLNode cfg); // Configuration parameters std::string id_; // Type of the information system connected to (EMIREG, ISIS, etc.) std::string infosys_type; std::string path; int retry; // Security attributes std::string key_; std::string cert_; std::string proxy_; std::string cadir_; std::string cafile_; // Associated services std::list reg_; // Mutex protecting reg_ list Glib::Mutex lock_; // Condition signaled when thread has to exit Glib::Cond cond_exit_; // Condition signaled when thread exited Glib::Cond cond_exited_; // InfoRegistrar object creation time moment Time creation_time; // Time window providing some flexibility to avoid the casual slides Period stretch_window; // ISIS handle attributes & functions ISIS_description defaultBootstrapISIS; ISIS_description myISIS; bool myISISList_initialized; int originalISISCount; int call_depth; std::vector myISISList; void initISIS(XMLNode cfg); void removeISIS(ISIS_description isis); void getISISList(ISIS_description isis); ISIS_description getISIS(void); // End of ISIS handle attributes & functions // Sending functions bool already_registered; void sendRegistrationToISIS(); void sendRegistrationToEMIREG(); void sendDeleteToISIS(std::list::iterator r); void sendDeleteToEMIREG(std::list::iterator r); public: ~InfoRegistrar(void); operator bool(void) { return !myISIS.url.empty(); }; bool operator!(void) { return myISIS.url.empty(); }; /// Performs registartion in a loop. /** Never exits unless there is a critical error or requested by destructor. Must be called only once. */ void registration(void); /// Adds new service to list of handled services. /** Service is described by it's InfoRegister object which must be valid as long as this object is functional. */ bool addService(InfoRegister*, XMLNode); /// Removes service from list of handled services. bool removeService(InfoRegister*); const std::string& id(void) { return id_; }; bool empty(void); }; /// Singleton class for scanning configuration and storing /// refernces to registration elements. // TODO: Make it per chain, singleton solution reduces flexibility. class InfoRegisterContainer { private: static InfoRegisterContainer* instance_; //std::list regs_; std::list regr_; // Mutex protecting regr_ list Glib::Mutex lock_; InfoRegisterContainer(void); InfoRegisterContainer(const InfoRegisterContainer&) {}; public: ~InfoRegisterContainer(void); static InfoRegisterContainer& Instance(void); /// Adds ISISes to list of handled services. /** Supplied configuration document is scanned for InfoRegistrar elements and those are turned into InfoRegistrar classes for handling connection to ISIS service each. */ InfoRegistrar *addRegistrar(XMLNode doc); /// Adds service to list of handled /** This method must be called first time after last addRegistrar was called - services will be only associated with ISISes which are already added. Argument ids contains list of ISIS identifiers to which service is associated. If ids is empty then service is associated to all ISISes currently added. If argument cfg is available and no ISISes are configured then addRegistrars is called with cfg used as configuration document. */ void addService(InfoRegister* reg,const std::list& ids,XMLNode cfg = XMLNode()); // Disassociates service from all configured ISISes. /** This method must be called if service being destroyed. */ void removeService(InfoRegister* reg); }; } // namespace Arc #endif nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/InfoRegister.cpp0000644000000000000000000000012412075123170024251 xustar000000000000000027 mtime=1358210680.472853 27 atime=1513200574.975707 30 ctime=1513200659.936746712 nordugrid-arc-5.4.2/src/hed/libs/infosys/InfoRegister.cpp0000644000175000002070000012312312075123170024320 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "InfoRegister.h" namespace Arc { static Logger logger_(Logger::rootLogger, "InfoSys"); static void reg_thread(void *data) { InfoRegistrar *self = (InfoRegistrar *)data; { // Very important!!!: Delete this block imediately!!! // Sleep and exit if interrupted by request to exit unsigned int sleep_time = 15; //seconds logger_.msg(VERBOSE, "InfoRegistrar thread waiting %d seconds for the all Registers elements creation.", sleep_time); sleep(sleep_time); } self->registration(); } // ------------------------------------------------------------------- InfoRegister::InfoRegister(XMLNode cfg, Service *service):reg_period_(0),service_(service) { reg_period_ = -1; ns_["isis"] = ISIS_NAMESPACE; ns_["glue2"] = GLUE2_D42_NAMESPACE; ns_["register"] = REGISTRATION_NAMESPACE; if(!cfg) return; if(cfg.Name() != "InfoRegister") { if ( bool(cfg["NoRegister"])) return; cfg = cfg["InfoRegister"]; } if(!cfg) return; // parse config std::string s_reg_period = (std::string)cfg["Period"]; if (!s_reg_period.empty()) { Period p(s_reg_period); reg_period_ = p.GetPeriod(); if (reg_period_ < 120) reg_period_ = 120; } std::string s_serviceid = (std::string)cfg["ServiceID"]; if (!s_serviceid.empty()) { serviceid = s_serviceid; } if ((bool)cfg["Endpoint"]) endpoint = (std::string)cfg["Endpoint"]; if ((bool)cfg["Expiration"]) { expiration = (std::string)cfg["Expiration"]; Period p(expiration); if (p.GetPeriod() < 120) expiration = "PT2M"; } //VERBOSE// { std::string configuration_string; cfg.GetXML(configuration_string, true); logger_.msg(VERBOSE, "InfoRegister created with config:\n%s", configuration_string); } // Add service to registration list. Optionally only for // registration through specific registrants. std::list ids; for(XMLNode r = cfg["Registrar"];(bool)r;++r) { std::string id = (std::string) r["URL"]; if(!id.empty()) { ids.push_back(id); logger_.msg(VERBOSE, "InfoRegister to be registered in Registrar %s", id); } else { logger_.msg(WARNING, "Discarding Registrar because the \"URL\" element is missing or empty."); }; }; InfoRegisterContainer::Instance().addService(this,ids,cfg); } InfoRegister::~InfoRegister(void) { // This element is supposed to be destroyed with service. // Hence service should not be restered anymore. // TODO: initiate un-register of service InfoRegisterContainer::Instance().removeService(this); } // ------------------------------------------------------------------- InfoRegisterContainer* InfoRegisterContainer::instance_ = NULL; InfoRegisterContainer& InfoRegisterContainer::Instance(void) { if(!instance_) instance_ = new InfoRegisterContainer; return *instance_; } InfoRegisterContainer::InfoRegisterContainer(void) { } InfoRegisterContainer::~InfoRegisterContainer(void) { Glib::Mutex::Lock lock(lock_); for(std::list::iterator r = regr_.begin(); r != regr_.end();++r) { delete (*r); }; } InfoRegistrar *InfoRegisterContainer::addRegistrar(XMLNode node) { //Glib::Mutex::Lock lock(lock_); InfoRegistrar* r = new InfoRegistrar(node); if(!(r)) { delete r; return NULL; } regr_.push_back(r); CreateThreadFunction(®_thread, r); return r; } void InfoRegisterContainer::addService(InfoRegister* reg,const std::list& ids,XMLNode cfg) { // Add to registrars Glib::Mutex::Lock lock(lock_); for(std::list::const_iterator i = ids.begin(); i != ids.end();++i) { bool id_found = false; for(std::list::iterator r = regr_.begin(); r != regr_.end();++r) { if((*i).find((*r)->id()) != std::string::npos) { logger_.msg(VERBOSE, "InfoRegistrar id \"%s\" has been found.", (*i)); (*r)->addService(reg, cfg); id_found = true; } } if (!id_found) { // id appears at first time - InfoRegistrar need to be created logger_.msg(VERBOSE, "InfoRegistrar id \"%s\" was not found. New registrar created", (*i)); for(XMLNode node = cfg["Registrar"];(bool)node;++node) { if ((*i) == (std::string)node["URL"]) { InfoRegistrar *r = addRegistrar(node); if (r != NULL) r->addService(reg, cfg); } } } } } void InfoRegisterContainer::removeService(InfoRegister* reg) { // If this method is called that means service is most probably // being deleted. Glib::Mutex::Lock lock(lock_); for(std::list::iterator r = regr_.begin(); r != regr_.end();) { InfoRegistrar* rega = *r; if(rega) { // just in case // Detach service if(rega->removeService(reg)) { }; // If corresponding InfoRegistrar has no services attached delete it too. if(rega->empty()) { r = regr_.erase(r); delete rega; continue; }; }; ++r; }; } // ------------------------------------------------------------------- InfoRegistrar::InfoRegistrar(XMLNode cfg):stretch_window("PT20S"), already_registered(false) { id_=(std::string)cfg["URL"]; if ((bool)cfg["Retry"]) { if (!((std::string)cfg["Retry"]).empty()) { if(EOF == sscanf(((std::string)cfg["Retry"]).c_str(), "%d", &retry) || retry < 0) { logger_.msg(ERROR, "Configuration error. Retry: \"%s\" is not a valid value. Default value will be used.",(std::string)cfg["Retry"]); retry = 5; } } else retry = 5; } else retry = 5; logger_.msg(VERBOSE, "Retry: %d", retry); // Parsing security attributes key_ = (std::string)cfg["KeyPath"]; cert_ = (std::string)cfg["CertificatePath"]; proxy_ = (std::string)cfg["ProxyPath"]; cadir_ = (std::string)cfg["CACertificatesDir"]; cafile_ = (std::string)cfg["CACertificatePath"]; logger_.msg(VERBOSE, "Key: %s, cert: %s", key_, cert_); if (id_.substr(0,7) == "EMIREG:") { infosys_type = "EMIREG"; path = "serviceadmin"; id_ = id_.substr(7); if ( id_.at(id_.length()-1) != '/' ){ path.insert(0,"/"); // now the new path is "/serviceadmin" } } else if (id_.substr(0,5) == "ISIS:") { infosys_type = "ISIS"; id_ = id_.substr(5); cfg["URL"] = id_; initISIS(cfg); } else { // default behaviour infosys_type = "ISIS"; initISIS(cfg); } time_t rawtime; time ( &rawtime ); //current time gmtime ( &rawtime ); Time ctime(rawtime); creation_time = ctime; } bool InfoRegistrar::addService(InfoRegister* reg, XMLNode cfg) { if (!bool(cfg)){ logger_.msg(VERBOSE, "The service won't be registered."); return true; } if (!(bool)cfg["Period"] ) { logger_.msg(ERROR, "Configuration error. Missing mandatory \"Period\" element."); return false; } if (!(bool)cfg["Endpoint"] ) { logger_.msg(ERROR, "Configuration error. Missing mandatory \"Endpoint\" element."); return false; } if (!(bool)cfg["Expiration"] ) { logger_.msg(ERROR, "Configuration error. Missing mandatory \"Expiration\" element."); return false; } Glib::Mutex::Lock lock(lock_); for(std::list::iterator r = reg_.begin(); r!=reg_.end();++r) { if(reg == r->p_register) { logger_.msg(VERBOSE, "Service was already registered to the InfoRegistrar connecting to infosys %s.", id_); return false; } } Register_Info_Type reg_info; reg_info.p_register = reg; std::string current_serviceid = reg->getServiceID(); std::string current_expiration = reg->getExpiration(); std::string current_endpoint = reg->getEndpoint(); Period period(reg->getPeriod()); for(XMLNode node = cfg["Registrar"];(bool)node;++node) { if ( (std::string)node["URL"] == id_ ) { if (! ((std::string)node["Period"]).empty() ) { Period current_period((std::string)node["Period"]); period = current_period; } if (! ((std::string)node["ServiceID"]).empty() ) { current_serviceid = (std::string)node["ServiceID"]; } if (! ((std::string)node["Endpoint"]).empty() ) { current_endpoint = (std::string)node["Endpoint"]; } if (! ((std::string)node["Expiration"]).empty() ) { current_expiration = (std::string)node["Expiration"]; } } } reg_info.period = period; reg_info.serviceid = current_serviceid; reg_info.expiration = current_expiration; reg_info.endpoint = current_endpoint; reg_info.next_registration = creation_time.GetTime(); reg_.push_back(reg_info); logger_.msg(VERBOSE, "Service is successfully added to the InfoRegistrar connecting to infosys %s.", id_); return true; } bool InfoRegistrar::removeService(InfoRegister* reg) { Glib::Mutex::Lock lock(lock_); for(std::list::iterator r = reg_.begin(); r!=reg_.end();++r) { if(reg == r->p_register) { if ( infosys_type == "ISIS" ) { sendDeleteToISIS(r); } else if ( infosys_type == "EMIREG" ) { sendDeleteToEMIREG(r); } return true; }; }; logger_.msg(VERBOSE, "Unregistred Service can not be removed."); return false; } bool InfoRegistrar::empty(void) { Glib::Mutex::Lock lock(lock_); return reg_.empty(); } void InfoRegistrar::sendDeleteToISIS(std::list::iterator r) { NS reg_ns; reg_ns["glue2"] = GLUE2_D42_NAMESPACE; reg_ns["isis"] = ISIS_NAMESPACE; time_t current_time; time ( ¤t_time ); //current time tm * ptm; ptm = gmtime ( ¤t_time ); std::string mon_prefix = (ptm->tm_mon+1 < 10)?"0":""; std::string day_prefix = (ptm->tm_mday < 10)?"0":""; std::string hour_prefix = (ptm->tm_hour < 10)?"0":""; std::string min_prefix = (ptm->tm_min < 10)?"0":""; std::string sec_prefix = (ptm->tm_sec < 10)?"0":""; std::stringstream out; out << ptm->tm_year+1900<<"-"<tm_mon+1<<"-"<tm_mday<<"T"; out << hour_prefix<tm_hour<<":"<tm_min<<":"<tm_sec; out << "+0000"; PayloadSOAP request(reg_ns); XMLNode op = request.NewChild("isis:RemoveRegistrations"); op.NewChild("ServiceID") = r->serviceid_; op.NewChild("MessageGenerationTime") = out.str(); // send MCCConfig mcc_cfg; ISIS_description usedISIS = getISIS(); if (!key_.empty()) mcc_cfg.AddPrivateKey(key_); if (!cert_.empty()) mcc_cfg.AddCertificate(cert_); if (!proxy_.empty()) mcc_cfg.AddProxy(proxy_); if (!cadir_.empty()) mcc_cfg.AddCADir(cadir_); if (!cafile_.empty()) mcc_cfg.AddCAFile(cafile_); logger_.msg(VERBOSE, "Key: %s, Cert: %s, Proxy: %s, CADir: %s CAPath", key_, cert_, proxy_, cadir_, cafile_); int retry_ = retry; while ( retry_ >= 1 ){ PayloadSOAP *response = NULL; ClientSOAP cli(mcc_cfg,Arc::URL(usedISIS.url),60); MCC_Status status = cli.process(&request, &response); if(response) { std::string response_string; (*response).GetXML(response_string, true); logger_.msg(VERBOSE, "Response from the ISIS: %s", response_string); } if ((!status.isOk()) || (!response)) { logger_.msg(ERROR, "Failed to remove registration from %s ISIS", usedISIS.url); } else { if(!(bool)(*response)["RemoveRegistrationResponseElement"]) { logger_.msg(VERBOSE, "Successfuly removed registration from ISIS (%s)", usedISIS.url); delete response; break; } else { int i=0; while ((bool)(*response)["RemoveRegistrationResponseElement"][i]) { logger_.msg(VERBOSE, "Failed to remove registration from ISIS (%s) - %s", usedISIS.url, std::string((*response)["RemoveRegistrationResponseElement"][i]["Fault"])); i++; } } } retry_--; logger_.msg(VERBOSE, "Retry connecting to the ISIS (%s) %d time(s).", usedISIS.url, retry-retry_); delete response; sleep(1); } if (retry_ == 0 ) logger_.msg(VERBOSE, "ISIS (%s) is not available.", usedISIS.url); reg_.erase(r); logger_.msg(VERBOSE, "Service removed from InfoRegistrar connecting to infosys %s.", id_); } void InfoRegistrar::sendDeleteToEMIREG(std::list::iterator r) { // send PayloadRaw http_request; HTTPClientInfo http_info; std::multimap http_attributes; http_attributes.insert( std::pair("Accept","text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2")); URL service_url(id_+path+"?Service_Endpoint_URL="+r->endpoint); MCCConfig mcc_cfg; if (!key_.empty()) mcc_cfg.AddPrivateKey(key_); if (!cert_.empty()) mcc_cfg.AddCertificate(cert_); if (!proxy_.empty()) mcc_cfg.AddProxy(proxy_); if (!cadir_.empty()) mcc_cfg.AddCADir(cadir_); if (!cafile_.empty()) mcc_cfg.AddCAFile(cafile_); logger_.msg(VERBOSE, "Key: %s, Cert: %s, Proxy: %s, CADir: %s CAPath", key_, cert_, proxy_, cadir_, cafile_); int retry_ = retry; while ( retry_ >= 1 ){ PayloadRawInterface *http_response = NULL; Arc::ClientHTTP cli(mcc_cfg, service_url); MCC_Status status = cli.process("DELETE", http_attributes, &http_request, &http_info, &http_response); if ((!status.isOk()) || (!http_response)) { logger_.msg(ERROR, "Failed to remove registration from %s EMIRegistry", id_); } else { if(http_info.code == 200) { logger_.msg(VERBOSE, "Successfuly removed registration from EMIRegistry (%s)", id_); delete http_response; break; } } retry_--; logger_.msg(VERBOSE, "Retry connecting to the EMIRegistry (%s) %d time(s).", id_, retry-retry_); delete http_response; sleep(1); } if (retry_ == 0 ) logger_.msg(VERBOSE, "EMIRegistry (%s) is not available.", id_); reg_.erase(r); logger_.msg(VERBOSE, "Service removed from InfoRegistrar connecting to infosys %s.", id_); } InfoRegistrar::~InfoRegistrar(void) { // Registering thread must be stopped before destructor succeeds Glib::Mutex::Lock lock(lock_); cond_exit_.signal(); cond_exited_.wait(lock_); } class CondExit { private: Glib::Cond& cond_; public: CondExit(Glib::Cond& cond):cond_(cond) { }; ~CondExit(void) { cond_.signal(); }; }; void InfoRegistrar::registration(void) { Glib::Mutex::Lock lock(lock_); CondExit cond(cond_exited_); if ( infosys_type == "ISIS" ) { sendRegistrationToISIS(); } else if ( infosys_type == "EMIREG" ) { sendRegistrationToEMIREG(); } } void InfoRegistrar::sendRegistrationToISIS() { ISIS_description usedISIS; std::string isis_name; if(!myISISList_initialized) getISISList(myISIS); myISISList_initialized = true; while(reg_.size() > 0) { usedISIS = getISIS(); isis_name = usedISIS.url; logger_.msg(VERBOSE, "Registration starts: %s",isis_name); logger_.msg(VERBOSE, "reg_.size(): %d",reg_.size()); if(usedISIS.url.empty()) { logger_.msg(WARNING, "Registrant has no proper URL specified. Registration end."); return; } NS reg_ns; reg_ns["glue2"] = GLUE2_D42_NAMESPACE; reg_ns["isis"] = ISIS_NAMESPACE; // Registration algorithm is stupid and straightforward. // This part has to be redone to fit P2P network od ISISes time_t current_time; time ( ¤t_time ); //current time tm * ptm; ptm = gmtime ( ¤t_time ); Time min_reg_time(-1); XMLNode send_doc(reg_ns, ""); std::string mon_prefix = (ptm->tm_mon+1 < 10)?"0":""; std::string day_prefix = (ptm->tm_mday < 10)?"0":""; std::string hour_prefix = (ptm->tm_hour < 10)?"0":""; std::string min_prefix = (ptm->tm_min < 10)?"0":""; std::string sec_prefix = (ptm->tm_sec < 10)?"0":""; std::stringstream out; out << ptm->tm_year+1900<<"-"<tm_mon+1<<"-"<tm_mday<<"T"; out << hour_prefix<tm_hour<<":"<tm_min<<":"<tm_sec; out << "+0000"; for(std::list::iterator r = reg_.begin(); r!=reg_.end();++r) { if ( (r->next_registration).GetTime() <= current_time + stretch_window.GetPeriod() ){ logger_.msg(VERBOSE,"Create RegEntry XML element"); Time current(current_time); // set the next registration time r->next_registration = current + r->period; XMLNode services_doc(reg_ns,"RegEntry"); if(!((r->p_register)->getService())) continue; (r->p_register)->getService()->RegistrationCollector(services_doc); // Fill attributes from InfoRegister configuration if (!((bool)services_doc["SrcAdv"]["EPR"]["Address"]) && !((r->endpoint).empty()) ) { if (!(bool)services_doc["SrcAdv"]) services_doc.NewChild("SrcAdv"); if (!(bool)services_doc["SrcAdv"]["EPR"]) services_doc["SrcAdv"].NewChild("EPR"); if (!(bool)services_doc["SrcAdv"]["EPR"]["Address"]) services_doc["SrcAdv"]["EPR"].NewChild("Address"); services_doc["SrcAdv"]["EPR"]["Address"] = r->endpoint; } if (!((bool)services_doc["MetaSrcAdv"]["ServiceID"]) && !((r->serviceid).empty()) ) { if (!(bool)services_doc["MetaSrcAdv"]) services_doc.NewChild("MetaSrcAdv"); if (!(bool)services_doc["MetaSrcAdv"]["ServiceID"]) services_doc["MetaSrcAdv"].NewChild("ServiceID"); services_doc["MetaSrcAdv"]["ServiceID"] = r->serviceid; } if (!((bool)services_doc["MetaSrcAdv"]["Expiration"]) && !((r->expiration).empty()) ) { if (!(bool)services_doc["MetaSrcAdv"]) services_doc.NewChild("MetaSrcAdv"); if (!(bool)services_doc["MetaSrcAdv"]["Expiration"]) services_doc["MetaSrcAdv"].NewChild("Expiration"); services_doc["MetaSrcAdv"]["Expiration"] = r->expiration; } // Possible completion of the services_doc if (!((bool)services_doc["MetaSrcAdv"]["ServiceID"]) && ((bool)services_doc["SrcAdv"]["EPR"]["Address"])) { services_doc["MetaSrcAdv"].NewChild("ServiceID") = (std::string) services_doc["SrcAdv"]["EPR"]["Address"]; logger_.msg(VERBOSE, "ServiceID attribute calculated from Endpoint Reference"); } if (!(bool)services_doc["MetaSrcAdv"]["GenTime"]) { services_doc["MetaSrcAdv"].NewChild("GenTime") = out.str(); logger_.msg(VERBOSE, "Generation Time attribute calculated from current time"); } // Store the sent ServiceID for the clear shutdown RemoveRegistration operation, if necessary if ( (r->serviceid_).empty() && (bool)services_doc["MetaSrcAdv"]["ServiceID"]) { r->serviceid_ = (std::string) services_doc["MetaSrcAdv"]["ServiceID"]; logger_.msg(VERBOSE,"ServiceID stored: %s", r->serviceid_); } // TODO check the received registration information bool valid_services_doc = true; if (!(services_doc.Name() == "RegEntry")) { logger_.msg(ERROR,"Missing service document provided by the service %s", r->serviceid_); valid_services_doc = false; } if (!((bool) services_doc["MetaSrcAdv"]) || !((bool) services_doc["MetaSrcAdv"]["Expiration"])) { logger_.msg(ERROR,"Missing MetaServiceAdvertisment or Expiration values provided by the service %s", r->serviceid_); valid_services_doc = false; } if (!((bool) services_doc["SrcAdv"]) || !((bool) services_doc["SrcAdv"]["Type"])) { logger_.msg(ERROR,"Missing Type value provided by the service %s", r->serviceid_); valid_services_doc = false; } if (!((bool) services_doc["SrcAdv"]) || !((bool) services_doc["SrcAdv"]["EPR"]) || !((bool) services_doc["SrcAdv"]["EPR"]["Address"])) { logger_.msg(ERROR,"Missing Endpoint Reference value provided by the service %s", r->serviceid_); valid_services_doc = false; } if (valid_services_doc) send_doc.NewChild(services_doc); } // conditioned minimum search if ( min_reg_time.GetTime() == -1 ){ min_reg_time = r->next_registration; } else if ( r->next_registration < min_reg_time ) { min_reg_time = r->next_registration; } } // prepare for sending to ISIS if ( min_reg_time.GetTime() != -1 ) { logger_.msg(VERBOSE, "Registering to %s ISIS", isis_name); PayloadSOAP request(reg_ns); XMLNode op = request.NewChild("isis:Register"); XMLNode header = op.NewChild("isis:Header"); header.NewChild("MessageGenerationTime") = out.str(); // create body for(XMLNode node = send_doc["RegEntry"];(bool)node;++node) { op.NewChild(node); } // send MCCConfig mcc_cfg; if (!key_.empty()) mcc_cfg.AddPrivateKey(key_); if (!cert_.empty()) mcc_cfg.AddCertificate(cert_); if (!proxy_.empty()) mcc_cfg.AddProxy(proxy_); if (!cadir_.empty()) mcc_cfg.AddCADir(cadir_); if (!cafile_.empty()) mcc_cfg.AddCAFile(cafile_); logger_.msg(VERBOSE, "Key: %s, Cert: %s, Proxy: %s, CADir: %s, CAFile", key_, cert_, proxy_, cadir_, cafile_); {std::string services_document; op.GetDoc(services_document, true); logger_.msg(VERBOSE, "Sent RegEntries: %s", services_document); } //logger_.msg(VERBOSE, "Call the ISIS.process method."); int retry_ = retry; while ( retry_ >= 1 ) { PayloadSOAP *response = NULL; ClientSOAP cli(mcc_cfg,Arc::URL(usedISIS.url),60); MCC_Status status = cli.process(&request, &response); // multiple tries if ((!status.isOk()) || (!response) || (!bool((*response)["RegisterResponse"]))) { logger_.msg(ERROR, "Error during registration to %s ISIS", isis_name); } else { XMLNode fault = (*response)["Fault"]; if(!fault) { std::string response_string; (*response)["RegisterResponse"].GetXML(response_string, true); logger_.msg(VERBOSE, "Response from the ISIS: %s", response_string); logger_.msg(VERBOSE, "Successful registration to ISIS (%s)", isis_name); delete response; break; } else { logger_.msg(VERBOSE, "Failed to register to ISIS (%s) - %s", isis_name, std::string(fault["Description"])); } } retry_--; logger_.msg(VERBOSE, "Retry connecting to the ISIS (%s) %d time(s).", isis_name, retry-retry_); delete response; sleep(1); } if ( retry_ == 0 ) removeISIS(usedISIS); } // end of the connection with the ISIS // Thread sleeping long int period_ = min_reg_time.GetTime() - current_time; logger_.msg(VERBOSE, "Registration ends: %s",isis_name); logger_.msg(VERBOSE, "Waiting period is %d second(s).",period_); // The next line is removed for infinite operation // if(period_ <= 0) break; // One time registration Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(period_*1000L); // Sleep and exit if interrupted by request to exit if(cond_exit_.timed_wait(lock_,etime)) break; //sleep(period_); } logger_.msg(VERBOSE, "Registration exit: %s",isis_name); } void InfoRegistrar::sendRegistrationToEMIREG() { while(reg_.size() > 0) { logger_.msg(VERBOSE, "Registration starts: %s",id_); logger_.msg(VERBOSE, "reg_.size(): %d",reg_.size()); if(id_.empty()) { logger_.msg(WARNING, "Registrant has no proper URL specified. Registration end."); return; } time_t current_time; time ( ¤t_time ); //current time tm * ptm; ptm = gmtime ( ¤t_time ); Time min_reg_time(-1); NS reg_ns; XMLNode send_doc(reg_ns, ""); std::string mon_prefix = (ptm->tm_mon+1 < 10)?"0":""; std::string day_prefix = (ptm->tm_mday < 10)?"0":""; std::string hour_prefix = (ptm->tm_hour < 10)?"0":""; std::string min_prefix = (ptm->tm_min < 10)?"0":""; std::string sec_prefix = (ptm->tm_sec < 10)?"0":""; std::stringstream out; out << ptm->tm_year+1900<<"-"<tm_mon+1<<"-"<tm_mday<<"T"; out << hour_prefix<tm_hour<<":"<tm_min<<":"<tm_sec; out << "+0000"; for(std::list::iterator r = reg_.begin(); r!=reg_.end();++r) { if ( (r->next_registration).GetTime() <= current_time + stretch_window.GetPeriod() ){ logger_.msg(VERBOSE,"Create RegEntry XML element"); Time current(current_time); // set the next registration time r->next_registration = current + r->period; XMLNode services_doc(reg_ns,"RegEntry"); if(!((r->p_register)->getService())) continue; (r->p_register)->getService()->RegistrationCollector(services_doc); // Fill attributes from InfoRegister configuration if (!((bool)services_doc["SrcAdv"]["EPR"]["Address"]) && !((r->endpoint).empty()) ) { if (!(bool)services_doc["SrcAdv"]) services_doc.NewChild("SrcAdv"); if (!(bool)services_doc["SrcAdv"]["EPR"]) services_doc["SrcAdv"].NewChild("EPR"); if (!(bool)services_doc["SrcAdv"]["EPR"]["Address"]) services_doc["SrcAdv"]["EPR"].NewChild("Address"); services_doc["SrcAdv"]["EPR"]["Address"] = r->endpoint; } if (!((bool)services_doc["MetaSrcAdv"]["ServiceID"]) && !((r->serviceid).empty()) ) { if (!(bool)services_doc["MetaSrcAdv"]) services_doc.NewChild("MetaSrcAdv"); if (!(bool)services_doc["MetaSrcAdv"]["ServiceID"]) services_doc["MetaSrcAdv"].NewChild("ServiceID"); services_doc["MetaSrcAdv"]["ServiceID"] = r->serviceid; } if (!((bool)services_doc["MetaSrcAdv"]["Expiration"]) && !((r->expiration).empty()) ) { if (!(bool)services_doc["MetaSrcAdv"]) services_doc.NewChild("MetaSrcAdv"); if (!(bool)services_doc["MetaSrcAdv"]["Expiration"]) services_doc["MetaSrcAdv"].NewChild("Expiration"); services_doc["MetaSrcAdv"]["Expiration"] = r->expiration; } // Possible completion of the services_doc if (!((bool)services_doc["MetaSrcAdv"]["ServiceID"]) && ((bool)services_doc["SrcAdv"]["EPR"]["Address"])) { services_doc["MetaSrcAdv"].NewChild("ServiceID") = (std::string) services_doc["SrcAdv"]["EPR"]["Address"]; logger_.msg(VERBOSE, "ServiceID attribute calculated from Endpoint Reference"); } if (!(bool)services_doc["MetaSrcAdv"]["GenTime"]) { services_doc["MetaSrcAdv"].NewChild("GenTime") = out.str(); logger_.msg(VERBOSE, "Generation Time attribute calculated from current time"); } // Store the sent ServiceID for the clear shutdown RemoveRegistration operation, if necessary if ( (r->serviceid_).empty() && (bool)services_doc["MetaSrcAdv"]["ServiceID"]) { r->serviceid_ = (std::string) services_doc["MetaSrcAdv"]["ServiceID"]; logger_.msg(VERBOSE,"ServiceID stored: %s", r->serviceid_); } // TODO check the received registration information bool valid_services_doc = true; if (!(services_doc.Name() == "RegEntry")) { logger_.msg(ERROR,"Missing service document provided by the service %s", r->serviceid_); valid_services_doc = false; } if (!((bool) services_doc["MetaSrcAdv"]) || !((bool) services_doc["MetaSrcAdv"]["Expiration"])) { logger_.msg(ERROR,"Missing MetaServiceAdvertisment or Expiration values provided by the service %s", r->serviceid_); valid_services_doc = false; } if (!((bool) services_doc["SrcAdv"]) || !((bool) services_doc["SrcAdv"]["Type"])) { logger_.msg(ERROR,"Missing Type value provided by the service %s", r->serviceid_); valid_services_doc = false; } if (!((bool) services_doc["SrcAdv"]) || !((bool) services_doc["SrcAdv"]["EPR"]) || !((bool) services_doc["SrcAdv"]["EPR"]["Address"])) { logger_.msg(ERROR,"Missing Endpoint Reference value provided by the service %s", r->serviceid_); valid_services_doc = false; } if (valid_services_doc) send_doc.NewChild(services_doc); } // conditioned minimum search if ( min_reg_time.GetTime() == -1 ){ min_reg_time = r->next_registration; } else if ( r->next_registration < min_reg_time ) { min_reg_time = r->next_registration; } } // prepare for sending to EMIREG if ( min_reg_time.GetTime() != -1 ) { logger_.msg(VERBOSE, "Registering to %s EMIRegistry", id_); // send PayloadRaw http_request; HTTPClientInfo http_info; std::multimap http_attributes; //Add http attributes http_attributes.insert( std::pair("Content-Type","application/json")); MCCConfig mcc_cfg; if (!key_.empty()) mcc_cfg.AddPrivateKey(key_); if (!cert_.empty()) mcc_cfg.AddCertificate(cert_); if (!proxy_.empty()) mcc_cfg.AddProxy(proxy_); if (!cadir_.empty()) mcc_cfg.AddCADir(cadir_); if (!cafile_.empty()) mcc_cfg.AddCAFile(cafile_); logger_.msg(VERBOSE, "Key: %s, Cert: %s, Proxy: %s, CADir: %s, CAFile", key_, cert_, proxy_, cadir_, cafile_); // create body Arc::URL service_url(id_+path); Arc::ClientHTTP httpclient(mcc_cfg, service_url); std::string tmp_message; tmp_message += "["; bool first_item =true; for(XMLNode node = send_doc["RegEntry"];(bool)node;++node) { // Message generation from the XMLNode if (first_item){ first_item = false; } else { tmp_message += ","; } tmp_message += "{"; // Mandatory attributes are here: https://twiki.cern.ch/twiki/bin/view/EMI/EMIRSERDesc // Service_ID tmp_message += "\"Service_ID\":\""; tmp_message += (std::string)node["SrcAdv"]["EPR"]["Address"]; tmp_message += "\""; // Service_Name if ( ((std::string)node["SrcAdv"]["Type"]).find("org.nordugrid.execution") != std::string::npos){ tmp_message += "\"Service_Name\":\"ComputingService\","; } else if (((std::string)node["SrcAdv"]["Type"]).find("org.nordugrid.tests") != std::string::npos){ tmp_message += "\"Service_Name\":\"TestService\","; } else if (((std::string)node["SrcAdv"]["Type"]).find("org.nordugrid.storage") != std::string::npos){ tmp_message += "\"Service_Name\":\"StorageService\","; } else if (((std::string)node["SrcAdv"]["Type"]).find("org.nordugrid.security") != std::string::npos){ tmp_message += "\"Service_Name\":\"SecurityService\","; } else if (((std::string)node["SrcAdv"]["Type"]).find("org.nordugrid.infosys") != std::string::npos){ tmp_message += "\"Service_Name\":\"InfosysService\","; } else if (((std::string)node["SrcAdv"]["Type"]).find("org.nordugrid.accounting") != std::string::npos){ tmp_message += "\"Service_Name\":\"AccountingService\","; } // Service_Type tmp_message += "\"Service_Type\":\""; tmp_message += (std::string)node["SrcAdv"]["Type"]; tmp_message += "\","; //Service_Endpoint_ID tmp_message += "\"Service_Endpoint_ID\":\""; tmp_message += (std::string)node["SrcAdv"]["EPR"]["Address"]; tmp_message += "\""; //Service_Endpoint_URL tmp_message += "\"Service_Endpoint_URL\":\""; tmp_message += (std::string)node["SrcAdv"]["EPR"]["Address"]; tmp_message += "\""; //Service_Endpoint_Capability tmp_message += "\"Service_Endpoint_Capability\":[\""; tmp_message += "N/A"; //TODO: set capability values tmp_message += "]\""; //Service_Endpoint_Technology tmp_message += "\"Service_Endpoint_Technology\":\""; tmp_message += "N/A"; //TODO: set technology values tmp_message += "\""; //Service_Endpoint_InterfaceName tmp_message += "\"Service_Endpoint_InterfaceName\":\""; tmp_message += "N/A"; //TODO: set InterfaceName values tmp_message += "\""; //Service_Endpoint_InterfaceVersion tmp_message += "\"Service_Endpoint_InterfaceVersion\":\""; tmp_message += "N/A"; //TODO: set InterfaceVersion values tmp_message += "\""; if (already_registered){ // Service_ExpireOn Period expiration((std::string)node["MetaSrcAdv"]["Expiration"]); time_t time = current_time+expiration.GetPeriod(); ptm = gmtime ( &time ); std::string mon_prefix = (ptm->tm_mon+1 < 10)?"0":""; std::string day_prefix = (ptm->tm_mday < 10)?"0":""; std::string hour_prefix = (ptm->tm_hour < 10)?"0":""; std::string min_prefix = (ptm->tm_min < 10)?"0":""; std::string sec_prefix = (ptm->tm_sec < 10)?"0":""; std::stringstream out_exp; out_exp << ptm->tm_year+1900<<"-"<tm_mon+1<<"-"<tm_mday<<"T"; out_exp << hour_prefix<tm_hour<<":"<tm_min<<":"<tm_sec; out_exp << ".000Z"; tmp_message += ",\"Service_ExpireOn\":{\"$date\":\""; tmp_message += out_exp.str(); tmp_message += "\"},"; // Service_CreationTime time_t creation = creation_time.GetTime(); ptm = gmtime ( &creation ); mon_prefix = (ptm->tm_mon+1 < 10)?"0":""; day_prefix = (ptm->tm_mday < 10)?"0":""; hour_prefix = (ptm->tm_hour < 10)?"0":""; min_prefix = (ptm->tm_min < 10)?"0":""; sec_prefix = (ptm->tm_sec < 10)?"0":""; std::stringstream out_creat; out_creat << ptm->tm_year+1900<<"-"<tm_mon+1<<"-"<tm_mday<<"T"; out_creat << hour_prefix<tm_hour<<":"<tm_min<<":"<tm_sec; out_creat << ".000Z"; tmp_message += "\"Service_CreationTime\":{\"$date\":\""; tmp_message += out_creat.str(); tmp_message += "\"}"; } tmp_message += "}"; } tmp_message += "]"; const std::string &message( tmp_message ); logger_.msg(DEBUG, "Sent entry: %s", message); //Add the message into the request http_request.Insert(message.c_str()); int retry_ = retry; while ( retry_ >= 1 ) { PayloadRawInterface *http_response = NULL; MCC_Status status = httpclient.process( already_registered ? "PUT" : "POST", http_attributes, &http_request, &http_info, &http_response); // multiple tries std::string method = already_registered ? "updating" : "registration"; if ( !status.isOk() ) { logger_.msg(ERROR, "Error during %s to %s EMIRegistry", method, id_); } else { if( http_info.code == 200 ) { logger_.msg(VERBOSE, "Successful %s to EMIRegistry (%s)", method, id_); already_registered = true; delete http_response; break; } else { std::string method2 = already_registered ? "update" : "register"; logger_.msg(VERBOSE, "Failed to %s to EMIRegistry (%s) - %d", method2, id_, http_info.code ); if ( retry_ == 1 ) { // Already registration need becasue the entry expired (and removed) on the EMIR. already_registered = false; } } } retry_--; logger_.msg(VERBOSE, "Retry connecting to the EMIRegistry (%s) %d time(s).", id_, retry-retry_); delete http_response; sleep(1); } } // end of the connection with the EMIRegistry // Thread sleeping long int period_ = min_reg_time.GetTime() - current_time; logger_.msg(VERBOSE, "Registration ends: %s",id_); logger_.msg(VERBOSE, "Waiting period is %d second(s).",period_); // The next line is removed for infinite operation // if(period_ <= 0) break; // One time registration Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(period_*1000L); // Sleep and exit if interrupted by request to exit if(cond_exit_.timed_wait(lock_,etime)) break; //sleep(period_); } logger_.msg(VERBOSE, "Registration exit: %s",id_); } // ------------------------------------------------------------------- InfoRegisters::InfoRegisters(XMLNode cfg, Service *service) { if(!service) return; NS ns; ns["iregc"]=REGISTRATION_CONFIG_NAMESPACE; cfg.Namespaces(ns); if ( bool(cfg["iregc:NoRegister"])) return; for(XMLNode node = cfg["iregc:InfoRegister"];(bool)node;++node) { registers_.push_back(new InfoRegister(node,service)); } } InfoRegisters::~InfoRegisters(void) { for(std::list::iterator i = registers_.begin();i!=registers_.end();++i) { if(*i) delete (*i); } } bool InfoRegisters::addRegister(XMLNode cfg, Service *service) { registers_.push_back(new InfoRegister(cfg,service)); return true; } // ------------------------------------------------------------------- } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/InfoFilter.cpp0000644000000000000000000000012411717026620023716 xustar000000000000000027 mtime=1329343888.401429 27 atime=1513200574.978707 30 ctime=1513200659.937746724 nordugrid-arc-5.4.2/src/hed/libs/infosys/InfoFilter.cpp0000644000175000002070000001521211717026620023764 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "InfoFilter.h" namespace Arc { class InfoPolicy { public: XMLNode xml; bool done; ArcSec::Result res; InfoPolicy(void):xml(),done(false),res(ArcSec::DECISION_DENY) { }; InfoPolicy(XMLNode node):xml(node),done(false),res(ArcSec::DECISION_DENY) { }; ~InfoPolicy(void) { }; bool Evaluate(MessageAuth& id); }; bool InfoPolicy::Evaluate(MessageAuth& id) { if(done) return true; // Parse internal policy ArcSec::EvaluatorLoader eloader; AutoPointer policy(eloader.getPolicy(ArcSec::Source(xml))); if(!policy) { // Failed to parse policy return false; }; // Find proper evaluator AutoPointer eval(eloader.getEvaluator(policy.Ptr())); if(!eval) { // Failed to find proper evaluator return false; }; // Generate request from identity of requestor std::string policyname = policy->getName(); if((policyname.length() > 7) && (policyname.substr(policyname.length()-7) == ".policy")) { policyname.resize(policyname.length()-7); }; XMLNode req; // TODO: do it only once if(!id.Export(SecAttrFormat(policyname.c_str()),req)) { // Failed to generate request return false; }; // Evaluate internal policy AutoPointer resp(eval->evaluate(ArcSec::Source(req),policy.Ptr())); if(!resp) { // Failed to evaluate policy return false; }; ArcSec::ResponseList& rlist = resp->getResponseItems(); // Most probably there will be only one item. So far // using hardcoded prorities for response results. int res_deny = 0; int res_permit = 0; int res_notapplicable = 0; int res_indeteminate = 0; for(int n = 0;nres) { case ArcSec::DECISION_PERMIT: ++res_permit; break; case ArcSec::DECISION_DENY: ++res_deny; break; case ArcSec::DECISION_INDETERMINATE: ++res_indeteminate; break; case ArcSec::DECISION_NOT_APPLICABLE: ++res_notapplicable; break; default: ++res_deny; break; // Safe }; }; }; // Store evaluation result if(res_deny) { res=ArcSec::DECISION_DENY; } else if(res_permit) { res=ArcSec::DECISION_PERMIT; } else if(res_notapplicable) { res=ArcSec::DECISION_NOT_APPLICABLE; } else if(res_indeteminate) { res=ArcSec::DECISION_INDETERMINATE; }; return true; } static void RemovePolicies(std::list< std::pair::iterator> >& policies,XMLNode node) { // Remove nodes associated with external policies. // This is not most effective way to handle this problem, but // currently I could not find anything better. for(std::list< std::pair::iterator> >::iterator p = policies.begin(); p != policies.end();) { if(node == p->first) { p=policies.erase(p); } else { ++p; }; }; // Process children nodes XMLNode cnode = node.Child(); for(;(bool)cnode;++cnode) RemovePolicies(policies,cnode); } static void RemoveEmbeddedPolicies(XMLNode node) { // Remove all children policies while(true) { XMLNode def = node["InfoFilterDefinition"]; if(!def) break; def.Destroy(); }; // Remove tag XMLNode tag = node.Attribute("InfoFilterTag"); tag.Destroy(); // Process children nodes XMLNode cnode = node.Child(); for(;(bool)cnode;++cnode) RemoveEmbeddedPolicies(cnode); return; } static bool FilterNode(MessageAuth& id,XMLNode node,std::list< std::pair::iterator> >& policies,std::list& epolicies,std::map& ipolicies) { // Check if node has external policy for(std::list< std::pair::iterator> >::iterator p = policies.begin(); p != policies.end();) { if(node == p->first) { // Node has assigned policy - evaluate it if(!p->second->Evaluate(id)) { // Policy evaluation failed return false; }; if(p->second->res != ArcSec::DECISION_PERMIT) { RemovePolicies(policies,node); node.Destroy(); break; }; }; ++p; }; if((bool)node) { // Check for internal policy // 1. Pick policy definitions XMLNode def = node["InfoFilterDefinition"]; for(;(bool)def;++def) { // Create policy and store it in map // TODO: policies without identifier std::string pid = def.Attribute("id"); ipolicies[pid]=InfoPolicy(def.Child()); }; // 2. Check for tag // TODO: policies without ids and tags std::string tag = node.Attribute("InfoFilterTag"); if(!tag.empty()) { InfoPolicy& policy = ipolicies[tag]; if(!policy.xml) { // No such policy defined return false; }; if(!policy.Evaluate(id)) { // Failed to evaluate policy return false; }; if(policy.res != ArcSec::DECISION_PERMIT) { RemovePolicies(policies,node); node.Destroy(); return true; }; }; }; if((bool)node) { // Process children nodes for(int n = 0;;++n) { XMLNode cnode = node.Child(n); if(!cnode) break; if(!FilterNode(id,cnode,policies,epolicies,ipolicies)) return false; }; }; return true; } InfoFilter::InfoFilter(MessageAuth& id):id_(id) { } bool InfoFilter::Filter(XMLNode doc) const { std::list< std::pair > policies; NS ns; return Filter(doc,policies,ns); } bool InfoFilter::Filter(XMLNode doc,const std::list< std::pair >& policies,const NS& ns) const { std::map ipolicies_; // internal policies associated to their ids std::list epolicies_; // external policies std::list< std::pair::iterator> > policies_; // nodes associated to external policies for(std::list< std::pair >::const_iterator p = policies.begin(); p != policies.end();++p) { XMLNodeList nodes = doc.XPathLookup(p->first,ns); if(nodes.size() > 0) { std::list::iterator ep = epolicies_.insert(epolicies_.end(),InfoPolicy(p->second)); for(XMLNodeList::iterator n = nodes.begin();n != nodes.end();++n) { policies_.push_back(std::pair::iterator>(*n,ep)); }; }; }; // Go through nodes and check policies bool r = FilterNode(id_,doc,policies_,epolicies_,ipolicies_); if(!r) return false; // Remove policies embedded into document RemoveEmbeddedPolicies(doc); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/BootstrapISIS.cpp0000644000000000000000000000012412042216423024314 xustar000000000000000027 mtime=1351163155.111915 27 atime=1513200574.963707 30 ctime=1513200659.939746749 nordugrid-arc-5.4.2/src/hed/libs/infosys/BootstrapISIS.cpp0000644000175000002070000001714412042216423024370 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "InfoRegister.h" namespace Arc { static Logger logger_(Logger::rootLogger, "InfoSys"); struct Registrar_data { ISIS_description isis; InfoRegistrar *registrar; }; void InfoRegistrar::initISIS(XMLNode cfg) { logger_.msg(DEBUG, "Initialize ISIS handler"); // Process configuration call_depth = 0; defaultBootstrapISIS.url = (std::string)cfg["URL"]; if(defaultBootstrapISIS.url.empty()) { logger_.msg(ERROR, "Can't recognize URL: %s",(std::string)cfg["URL"]); } else { //logger_.msg(VERBOSE, "InfoRegistrar created for URL: %s",(std::string)cfg["URL"]); } defaultBootstrapISIS.key = (std::string)cfg["KeyPath"]; defaultBootstrapISIS.cert = (std::string)cfg["CertificatePath"]; defaultBootstrapISIS.proxy = (std::string)cfg["ProxyPath"]; defaultBootstrapISIS.cadir = (std::string)cfg["CACertificatesDir"]; defaultBootstrapISIS.cafile = (std::string)cfg["CACertificatePath"]; // Set up default values myISIS = defaultBootstrapISIS; originalISISCount = 1; myISISList.push_back(myISIS); //getISISList(myISIS); myISISList_initialized = false; logger_.msg(DEBUG, "Initialize ISIS handler succeeded"); } void InfoRegistrar::removeISIS(ISIS_description isis) { logger_.msg(VERBOSE, "Remove ISIS (%s) from list", isis.url); // Remove isis from myISISList for (std::vector::iterator it = myISISList.begin(); (it < myISISList.end()) && (((*it).url != myISIS.url) || (myISISList.erase(it) == it)); it++) { }; // If the 'isis' is the currently used (myISIS) isis if ( isis.url == myISIS.url && myISISList.size() != 0 ) { // Select a new random isis from the list std::srand(time(NULL)); ISIS_description rndISIS = myISISList[std::rand() % myISISList.size()]; // Add the neighbors of the newly selected ISIS to the list and set myISIS to one of them getISISList(rndISIS); } // Check if there is enough ISIS's left getISIS(); } void InfoRegistrar::getISISList(ISIS_description isis) { logger_.msg(VERBOSE, "getISISList from %s", isis.url); logger_.msg(VERBOSE, "Key %s, Cert: %s, CA: %s", isis.key, isis.cert, isis.cadir); // Try to get ISISList from the actual ISIS // Compose getISISList request NS query_ns; query_ns[""] = "http://www.nordugrid.org/schemas/isis/2007/06"; // Try to get ISIS.getISISList() PayloadSOAP request(query_ns); request.NewChild("GetISISList"); // Send message PayloadSOAP *response; MCCConfig mcc_cfg; mcc_cfg.AddPrivateKey(isis.key); mcc_cfg.AddCertificate(isis.cert); mcc_cfg.AddProxy(isis.proxy); if (!isis.cadir.empty()) { mcc_cfg.AddCADir(isis.cadir); } if (!isis.cafile.empty()) { mcc_cfg.AddCAFile(isis.cafile); } int retry_ = retry; int reconnection = 0; while ( retry_ >= 1 ) { ClientSOAP cli(mcc_cfg,isis.url,60); MCC_Status status = cli.process(&request, &response); retry_--; reconnection++; // If the given ISIS wasn't available try reconnect if (!status.isOk() || !response || !bool((*response)["GetISISListResponse"])) { logger_.msg(VERBOSE, "ISIS (%s) is not available or not valid response. (%d. reconnection)", isis.url, reconnection); } else { logger_.msg(VERBOSE, "Connection to the ISIS (%s) is success and get the list of ISIS.", isis.url); break; } } // If the given ISIS wasn't available remove it and return if ( retry_ == 0 ) { removeISIS(isis); return; } // Merge result with the orignal list of known ISIS's int i = 0; while((bool)(*response)["GetISISListResponse"]["EPR"][i]) { bool ISIS_found = false; for (std::vector::iterator it = myISISList.begin(); (it < myISISList.end()) && (((*it).url != (std::string) (*response)["GetISISListResponse"]["EPR"][i]) || (ISIS_found = true)); it++) {}; if ( !ISIS_found ) { ISIS_description new_ISIS; new_ISIS.url = (std::string)(*response)["GetISISListResponse"]["EPR"][i]; new_ISIS.key = defaultBootstrapISIS.key; new_ISIS.cert = defaultBootstrapISIS.cert; new_ISIS.proxy = defaultBootstrapISIS.proxy; new_ISIS.cadir = defaultBootstrapISIS.cadir; new_ISIS.cafile = defaultBootstrapISIS.cafile; myISISList.push_back(new_ISIS); logger_.msg(VERBOSE, "GetISISList add this (%s) ISIS into the list.", new_ISIS.url); } i++; } // Update the original number of ISIS's variable originalISISCount = myISISList.size(); // Select a new random isis from the list std::srand(time(NULL)); ISIS_description rndISIS = myISISList[std::rand() % myISISList.size()]; logger_.msg(VERBOSE, "Chosen ISIS for communication: %s", rndISIS.url); myISIS = rndISIS; if (response) delete response; } ISIS_description InfoRegistrar::getISIS(void) { logger_.msg(VERBOSE, "Get ISIS from list of ISIS handler"); call_depth++; if ( call_depth > (int)myISISList.size() ){ call_depth--; logger_.msg(DEBUG, "Here is the end of the infinite calling loop."); ISIS_description temporary_ISIS; temporary_ISIS.url = ""; return temporary_ISIS; } if (myISISList.size() == 0) { if ( myISIS.url == defaultBootstrapISIS.url ) { logger_.msg(WARNING, "There is no more ISIS available. The list of ISIS's is already empty."); // Set up default values for the further tries myISIS = defaultBootstrapISIS; originalISISCount = 1; myISISList.push_back(myISIS); // If there is no available, return an empty ISIS ISIS_description temporary_ISIS; temporary_ISIS.url = ""; call_depth--; return temporary_ISIS; } else { // Try to receive the "original" bootsrap informations, if the BootstrapISIS is already available. getISISList(defaultBootstrapISIS); call_depth--; return getISIS(); } } if (myISISList.size() == 1) { // If there is only one known ISIS than force the check of availability of new cloud members. getISISList(myISIS); call_depth--; return myISIS; } if ((int)myISISList.size() <= originalISISCount / 2) { // Select a new random isis from the list std::srand(time(NULL)); ISIS_description rndISIS = myISISList[std::rand() % myISISList.size()]; // Add the neighbors of the newly selected ISIS to the list and set myISIS to one of them getISISList(rndISIS); } //And finally... call_depth--; return myISIS; } } nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/InfoCache.cpp0000644000000000000000000000012212075123170023466 xustar000000000000000027 mtime=1358210680.472853 27 atime=1513200574.967707 28 ctime=1513200659.9357467 nordugrid-arc-5.4.2/src/hed/libs/infosys/InfoCache.cpp0000644000175000002070000001613512075123170023543 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include namespace Arc { static Logger logger(Logger::getRootLogger(), "InfoCache"); static RegularExpression id_regex("@id=\"([a-zA-Z0-9_\\\\-]*)\""); static void merge_xml(std::string& path_base, XMLNode &node) { Glib::Dir dir(path_base); std::string d; while ((d = dir.read_name()) != "") { std::string path_fl1 = Glib::build_filename(path_base, d); //std::cout << "merge_xml f1: " << path_fl1 << std::endl; if (Glib::file_test(path_fl1, Glib::FILE_TEST_IS_REGULAR)) { std::string xml_str = Glib::file_get_contents(path_fl1); XMLNode n(xml_str); XMLNode c; for (int i = 0; (bool)(c = n.Child(i)); i++) { node.NewChild(c); } } } } static bool create_directory(const std::string& dir) { if (!Glib::file_test(dir, Glib::FILE_TEST_IS_DIR)) { // create directory if (mkdir(dir.c_str(), 0700) != 0) { logger.msg(ERROR,"cannot create directory: %s",dir); return false; } } return true; } // -------------------------------------------------------------------------------- InfoCache::InfoCache(const Config &cfg, const std::string &service_id) { std::string cfg_s; cfg.GetXML(cfg_s); logger.msg(DEBUG,"Cache configuration: %s",cfg_s); std::string root = std::string(const_cast(cfg)["CacheRoot"]); if(root.empty()) { logger.msg(ERROR,"Missing cache root in configuration"); return; } if(service_id.empty()) { logger.msg(ERROR,"Missing service ID"); return; } logger.msg(DEBUG,"Cache root: %s",root); if (!create_directory(root)) return; std::string id(service_id); std::string sdir = Glib::build_filename(root, id); if (!create_directory(sdir)) return; path_base=sdir; logger.msg(DEBUG,"Cache directory: %s",path_base); } InfoCache::~InfoCache() { // NOP } static void clean_path(std::string s) { size_t idx; do { idx = s.find("//", 0); if (idx != std::string::npos) { s.replace(idx, 2, "/", 0, 1); } } while (idx != std::string::npos); } static bool set_path(const std::string &path_base,const std::list &tokens,const XMLNode &node) { if(tokens.size() < 1) return false; std::string dir = path_base; const std::list::const_iterator itLastElement = --tokens.end(); for (std::list::const_iterator it = tokens.begin(); it != itLastElement; it++) { dir = Glib::build_filename(dir, *it); if (!create_directory(dir)) return false; }; std::string file = Glib::build_filename(dir, tokens.back() + ".xml"); // Workaround needed to save namespaces properly. // TODO: solve it in some better way. XMLNode doc; node.New(doc); return doc.SaveToFile(file); } static bool unset_path(const std::string &path_base,const std::list &tokens) { if(tokens.size() < 1) return false; std::string dir = path_base; const std::list::const_iterator itLastElement = --tokens.end(); for (std::list::const_iterator it = tokens.begin(); it != itLastElement; it++) { dir = Glib::build_filename(dir, *it); if (!create_directory(dir)) return false; }; std::string file = Glib::build_filename(dir, tokens.back() + ".xml"); return (::remove(file.c_str()) == 0); } static bool get_path(const std::string &path_base,const std::list &tokens,XMLNode &node) { if(tokens.size() < 1) return false; std::string dir = path_base; const std::list::const_iterator itLastElement = --tokens.end(); for (std::list::const_iterator it = tokens.begin(); it != itLastElement; it++) { dir = Glib::build_filename(dir, *it); if (!create_directory(dir)) return false; }; std::string file = Glib::build_filename(dir, tokens.back() + ".xml"); return node.ReadFromFile(file); } bool InfoCache::Set(const char *xml_path, XMLNode &value) { if (path_base.empty()) { logger.msg(ERROR,"InfoCache object is not set up"); return false; } if (xml_path[0] != '/') { logger.msg(ERROR,"Invalid path in Set(): %s",xml_path); return false; } std::string p(xml_path); clean_path(p); std::list tokens; tokenize(p, tokens, "/"); bool ret; ret = set_path(path_base, tokens, value); return ret; } bool InfoCache::Unset(const char *xml_path) { if (path_base.empty()) { logger.msg(ERROR,"InfoCache object is not set up"); return false; } if (xml_path[0] != '/') { logger.msg(ERROR,"Invalid path in Set(): %s",xml_path); return false; } std::string p(xml_path); clean_path(p); std::list tokens; tokenize(p, tokens, "/"); bool ret; ret = unset_path(path_base, tokens); return ret; } bool InfoCache::Get(const char *xml_path, XMLNodeContainer &result) { if (path_base.empty()) { logger.msg(ERROR,"InfoCache object is not set up"); return false; } if (xml_path[0] != '/') { logger.msg(ERROR,"Invalid path in Get(): %s",xml_path); return false; } std::string p(xml_path); clean_path(p); std::list tokens; tokenize(p, tokens, "/"); if (tokens.size() <= 0) { NS ns; XMLNode node(ns, "InfoDoc"); merge_xml(path_base, node); result.AddNew(node); return true; } XMLNode node; return get_path(path_base,tokens,node); } bool InfoCache::Query(const char *path, const char *query, XMLNodeContainer &result) { if (path_base.empty()) { logger.msg(ERROR,"InfoCache object is not set up"); return false; } XMLNodeContainer gc; Get(path, gc); NS ns; for (int i = 0; i < gc.Size(); i++) { XMLNode node = gc[i]; XMLNodeList xresult = node.XPathLookup(query,ns); result.AddNew(xresult); } return true; } // -------------------------------------------------------------------------------- InfoCacheInterface::InfoCacheInterface(Config &cfg, std::string &service_id): cache(cfg,service_id) { } InfoCacheInterface::~InfoCacheInterface(void) { } void InfoCacheInterface::Get(const std::list& path,XMLNodeContainer& result) { std::string xml_path; for(std::list::const_iterator cur_name = path.begin(); cur_name != path.end(); ++cur_name) { xml_path+="/"+(*cur_name); }; if(xml_path.empty()) xml_path="/"; cache.Get(xml_path,result); } void InfoCacheInterface::Get(XMLNode xpath,XMLNodeContainer& result) { std::string query = xpath; if(!cache.Query("/",query.c_str(),result)) return; return; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/InformationInterface.cpp0000644000000000000000000000012411274376472025776 xustar000000000000000027 mtime=1257372986.404138 27 atime=1513200574.964707 30 ctime=1513200659.934746688 nordugrid-arc-5.4.2/src/hed/libs/infosys/InformationInterface.cpp0000644000175000002070000002300311274376472026041 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "InformationInterface.h" namespace Arc { #define XPATH_1_0_URI "http://www.w3.org/TR/1999/REC-xpath-19991116" class MutexSLock { private: Glib::Mutex& mutex_; bool locked_; public: MutexSLock(Glib::Mutex& mutex,bool lock = true):mutex_(mutex),locked_(false) { if(lock) { mutex_.lock(); locked_=true; }; }; ~MutexSLock(void) { if(locked_) mutex_.unlock(); }; }; void InformationInterface::Get(const std::list&,XMLNodeContainer&) { return; } void InformationInterface::Get(XMLNode,XMLNodeContainer&) { return; } InformationInterface::InformationInterface(bool safe):to_lock_(safe) { } InformationInterface::~InformationInterface(void) { } SOAPEnvelope* InformationInterface::Process(SOAPEnvelope& in) { // Try to extract WSRP object from message WSRF& wsrp = CreateWSRP(in); if(!wsrp) { delete &wsrp; return NULL; }; // Check if operation is supported MutexSLock(lock_,to_lock_); try { WSRPGetResourcePropertyDocumentRequest* req = dynamic_cast(&wsrp); if(!req) throw std::exception(); if(!(*req)) throw std::exception(); // Requesting whole document XMLNodeContainer presp; Get(std::list(),presp); XMLNode xresp; if(presp.Size() > 0) xresp=presp[0]; WSRPGetResourcePropertyDocumentResponse resp(xresp); SOAPEnvelope* out = NULL; if(resp) { NS ns; (out = new SOAPEnvelope(ns))->Swap(resp.SOAP()); }; delete &wsrp; return out; } catch(std::exception& e) { }; try { WSRPGetResourcePropertyRequest* req = dynamic_cast(&wsrp); if(!req) throw std::exception(); if(!(*req)) throw std::exception(); std::list name; name.push_back(req->Name()); XMLNodeContainer presp; Get(name,presp); // Requesting sub-element WSRPGetResourcePropertyResponse resp; for(int n = 0;nSwap(resp.SOAP()); }; delete &wsrp; return out; } catch(std::exception& e) { }; try { WSRPGetMultipleResourcePropertiesRequest* req = dynamic_cast(&wsrp); if(!req) throw std::exception(); if(!(*req)) throw std::exception(); WSRPGetMultipleResourcePropertiesResponse resp; std::vector names = req->Names(); for(std::vector::iterator iname = names.begin(); iname != names.end(); ++iname) { std::list name; name.push_back(*iname); XMLNodeContainer presp; Get(name,presp); // Requesting sub-element for(int n = 0;nSwap(resp.SOAP()); }; delete &wsrp; return out; } catch(std::exception& e) { }; try { WSRPQueryResourcePropertiesRequest* req = dynamic_cast(&wsrp); if(!req) throw std::exception(); if(!(*req)) throw std::exception(); if(req->Dialect() != XPATH_1_0_URI) { // TODO: generate proper fault delete &wsrp; return SOAPFault::MakeSOAPFault(SOAPFault::Sender,"Query dialect not supported"); } XMLNodeContainer presp; Get(req->Query(),presp); WSRPQueryResourcePropertiesResponse resp; for(int n = 0;nSwap(resp.SOAP()); }; delete &wsrp; return out; } catch(std::exception& e) { }; if(to_lock_) lock_.unlock(); delete &wsrp; return SOAPFault::MakeSOAPFault(SOAPFault::Sender,"Operation not supported"); } SOAPEnvelope* InformationInterface::Process(SOAPEnvelope& in,const InfoFilter& filter,const InfoFilterPolicies& policies,const NS& ns) { SOAPEnvelope* out = Process(in); // If error or fault - leave if(!out) return out; if(!(*out)) return out; if(out->IsFault()) return out; // Otherwise filter body of result if(filter.Filter(out->Body(),policies,ns)) return out; // If filtering failed it is safer to return SOAP fault delete out; return SOAPFault::MakeSOAPFault(SOAPFault::Sender,"Operation not supported"); } InformationContainer::InformationContainer(void):InformationInterface(true) { } InformationContainer::InformationContainer(XMLNode doc,bool copy):InformationInterface(true) { if(copy) { doc.New(doc_); } else { doc_=doc; }; } InformationContainer::~InformationContainer(void) { } XMLNode InformationContainer::Acquire(void) { lock_.lock(); return doc_; } void InformationContainer::Release(void) { lock_.unlock(); } void InformationContainer::Assign(XMLNode doc,bool copy) { lock_.lock(); if(copy) { doc.New(doc_); } else { doc_=doc; }; lock_.unlock(); } void InformationContainer::Get(const std::list& path,XMLNodeContainer& result) { std::list cur_list; std::list::const_iterator cur_name = path.begin(); cur_list.push_back(doc_); for(;cur_name != path.end(); ++cur_name) { std::list new_list; for(std::list::iterator cur_node = cur_list.begin(); cur_node != cur_list.end(); ++cur_node) { // TODO: namespaces std::string name = *cur_name; std::string::size_type p = name.find(':'); if(p != std::string::npos) name=name.substr(p+1); XMLNode new_node = (*cur_node)[name]; for(;;new_node=new_node[1]) { if(!new_node) break; new_list.push_back(new_node); }; }; cur_list=new_list; }; result.Add(cur_list); return; } void InformationContainer::Get(XMLNode query,XMLNodeContainer& result) { std::string q = query; NS ns = query.Namespaces(); result.Add(doc_.XPathLookup(q,ns)); return; } InformationRequest::InformationRequest(void):wsrp_(NULL) { wsrp_=new WSRPGetResourcePropertyDocumentRequest(); } InformationRequest::InformationRequest(const std::list& path):wsrp_(NULL) { if(path.size() > 0) { wsrp_=new WSRPGetResourcePropertyRequest(*(path.begin())); } else { wsrp_=new WSRPGetResourcePropertyDocumentRequest(); }; } InformationRequest::InformationRequest(const std::list >& paths):wsrp_(NULL) { std::vector names; std::list >::const_iterator path = paths.begin(); for(;path != paths.end();++path) { if(path->size() > 0) names.push_back(*(path->begin())); }; if(names.size() > 1) { wsrp_=new WSRPGetMultipleResourcePropertiesRequest(names); } else if(names.size() == 1) { wsrp_=new WSRPGetResourcePropertyRequest(*(names.begin())); } else { wsrp_=new WSRPGetResourcePropertyDocumentRequest(); }; } InformationRequest::InformationRequest(XMLNode query):wsrp_(NULL) { WSRPQueryResourcePropertiesRequest* req = new WSRPQueryResourcePropertiesRequest(XPATH_1_0_URI); wsrp_=req; XMLNode q = req->Query(); std::string s = query; if(!s.empty()) { q=s; return; }; for(int n = 0;;++n) { XMLNode node = query.Child(n); if(!node) break; q.NewChild(node); }; } InformationRequest::~InformationRequest(void) { if(wsrp_) delete wsrp_; } SOAPEnvelope* InformationRequest::SOAP(void) { if(!wsrp_) return NULL; return &(wsrp_->SOAP()); } InformationResponse::InformationResponse(SOAPEnvelope& soap) { // Try to extract WSRP object from message wsrp_ = &(CreateWSRP(soap)); if(!(*wsrp_)) { delete wsrp_; wsrp_=NULL; }; } InformationResponse::~InformationResponse(void) { if(wsrp_) delete wsrp_; } std::list InformationResponse::Result(void) { if(!wsrp_) return std::list(); std::list props; // Check if operation is supported try { WSRPGetResourcePropertyDocumentResponse* resp = dynamic_cast(wsrp_); if(!resp) throw std::exception(); if(!(*resp)) throw std::exception(); props.push_back(resp->Document()); return props; } catch(std::exception& e) { }; try { WSRPGetResourcePropertyResponse* resp = dynamic_cast(wsrp_); if(!resp) throw std::exception(); if(!(*resp)) throw std::exception(); for(int n = 0;;++n) { XMLNode prop = resp->Property(n); if(!prop) break; props.push_back(prop); }; return props; } catch(std::exception& e) { }; try { WSRPGetMultipleResourcePropertiesResponse* resp = dynamic_cast(wsrp_); if(!resp) throw std::exception(); if(!(*resp)) throw std::exception(); for(int n = 0;;++n) { XMLNode prop = resp->Property(n); if(!prop) break; props.push_back(prop); }; return props; } catch(std::exception& e) { }; try { WSRPQueryResourcePropertiesResponse* resp = dynamic_cast(wsrp_); if(!resp) throw std::exception(); if(!(*resp)) throw std::exception(); XMLNode props_ = resp->Properties(); for(int n = 0;;++n) { XMLNode prop = props_.Child(n); if(!prop) break; props.push_back(prop); }; return props; } catch(std::exception& e) { }; return props; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/InfoFilter.h0000644000000000000000000000012411174366545023375 xustar000000000000000027 mtime=1240591717.919273 27 atime=1513200574.964707 30 ctime=1513200659.929746626 nordugrid-arc-5.4.2/src/hed/libs/infosys/InfoFilter.h0000644000175000002070000000477011174366545023452 0ustar00mockbuildmock00000000000000#include /* */ namespace Arc { typedef std::list< std::pair > InfoFilterPolicies; /// Filters information document according to identity of requestor /** Identity is compared to policies stored inside information document and external ones. Parts of document which do not pass policy evaluation are removed. */ class InfoFilter { private: MessageAuth& id_; public: /// Creates object and associates identity /** Associated identity is not copied, hence passed argument must not be destroyed while this method is used. */ InfoFilter(MessageAuth& id); /// Filter information document according to internal policies /** In provided document all policies and nodes which have their policies evaluated to negative result are removed. */ bool Filter(XMLNode doc) const; /// Filter information document according to internal and external policies /** In provided document all policies and nodes which have their policies evaluated to negative result are removed. External policies are provided in policies argument. First element of every pair is XPath defining to which XML node policy must be applied. Second element is policy itself. Argument ns defines XML namespaces for XPath evaluation. */ bool Filter(XMLNode doc,const InfoFilterPolicies& policies,const NS& ns) const; }; } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/schema0000644000000000000000000000013213214316023022324 xustar000000000000000030 mtime=1513200659.996747446 30 atime=1513200668.720854145 30 ctime=1513200659.996747446 nordugrid-arc-5.4.2/src/hed/libs/infosys/schema/0000755000175000002070000000000013214316023022447 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/infosys/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255705373024456 xustar000000000000000027 mtime=1253542651.230162 30 atime=1513200597.887987831 30 ctime=1513200659.993747409 nordugrid-arc-5.4.2/src/hed/libs/infosys/schema/Makefile.am0000644000175000002070000000014011255705373024513 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = infosys.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/libs/infosys/schema/PaxHeaders.7502/infosys.xsd0000644000000000000000000000012411454625143024626 xustar000000000000000027 mtime=1286810211.487196 27 atime=1513200574.969707 30 ctime=1513200659.996747446 nordugrid-arc-5.4.2/src/hed/libs/infosys/schema/infosys.xsd0000644000175000002070000002027011454625143024674 0ustar00mockbuildmock00000000000000 This element defines configuration of Information Registration active element. URL specifies an ISIS service, which will be used for bootstraping the registration process: it will be queried for a list of ISIS services, then one of these will be chosen and the registration will be sent to the chosen one until it is not available anymore, then another one will be chosen. If every ISIS on this list is unavailable, then the URL from the config will be used again. Retry count. Specifies how many times the InfoRegister retries connecting to an ISIS before treats it as unavailable. This element defines URL of the service as seen from outside. Custom value for this Registrar. Element defines the unique id of the service propagated outside. Custom value for this Registrar. This element defines the expiration time of the information provided by the service (at least 2 minutes). Custom value for this Registrar. Period specifies how often registration has to be done (at least 2 minutes). Custom value for this Registrar. Optional KeyPath for SSL connection Optional CertificatePath for SSL connection Optional ProxyPath for SSL connection Optional CACertificatesDir for SSL connection Optional CACertificatePath for SSL connection Element for Service element to link it to InfoRegistrar elements. It may also override some configuration parameters. Presence of this element means that service will be registered to ISISes. This element defines URL of the service as seen from outside. Element defines the unique id of the service propagated outside. This element defines the expiration time of the information provided by the service (at least 2 minutes). Period specifies how often registration has to be done (at least 2 minutes). This elements specify which registrars must be used for registering services. If there is no such element then registration is done using all registrars. Configuration element force skipping the Self-Registration nordugrid-arc-5.4.2/src/hed/libs/infosys/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315725024456 xustar000000000000000030 mtime=1513200597.919988223 30 atime=1513200648.367605217 30 ctime=1513200659.995747434 nordugrid-arc-5.4.2/src/hed/libs/infosys/schema/Makefile.in0000644000175000002070000004352313214315725024533 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/infosys/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = infosys.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/infosys/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/infosys/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/RegisteredService.h0000644000000000000000000000012411770351466024750 xustar000000000000000027 mtime=1340199734.229368 27 atime=1513200574.970707 30 ctime=1513200659.930746639 nordugrid-arc-5.4.2/src/hed/libs/infosys/RegisteredService.h0000644000175000002070000000225711770351466025023 0ustar00mockbuildmock00000000000000#ifndef __ARC_SERVICEISIS_H__ #define __ARC_SERVICEISIS_H__ #include #include #include #include #include #include namespace Arc { /// RegisteredService - extension of Service performing self-registration. /** Service is automatically added to registration framework. Registration information for service is obtained by calling its RegistrationCollector() method. It is important to note that RegistrationCollector() may be called anytime after RegisteredService constructor completed and hence even before actual constructor of inheriting class is complete. That must be taken into account while writing implementation of RegistrationCollector() or object of InfoRegisters class must be used directly. */ class RegisteredService: public Service { private: InfoRegisters inforeg; public: /** Example contructor - Server takes at least it's configuration subtree */ RegisteredService(Config*, PluginArgument*); virtual ~RegisteredService(void); }; } // namespace Arc #endif /* __ARC_SERVICEISIS_H__ */ nordugrid-arc-5.4.2/src/hed/libs/infosys/PaxHeaders.7502/README0000644000000000000000000000012411001653037022023 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200574.966707 30 ctime=1513200659.925746578 nordugrid-arc-5.4.2/src/hed/libs/infosys/README0000644000175000002070000000023711001653037022072 0ustar00mockbuildmock00000000000000implementaton of class used to implement information system components: - provide limited WSRF interface, - information cache - ISIS registartion component nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/credentialstore0000644000000000000000000000013213214316024022562 xustar000000000000000030 mtime=1513200660.051748119 30 atime=1513200668.720854145 30 ctime=1513200660.051748119 nordugrid-arc-5.4.2/src/hed/libs/credentialstore/0000755000175000002070000000000013214316024022705 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/credentialstore/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612343353401024700 xustar000000000000000027 mtime=1401804545.672799 30 atime=1513200597.043977509 29 ctime=1513200660.04774807 nordugrid-arc-5.4.2/src/hed/libs/credentialstore/Makefile.am0000644000175000002070000000207012343353401024742 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarccredentialstore.la #noinst_PROGRAMS = test libarccredentialstore_ladir = $(pkgincludedir)/credentialstore libarccredentialstore_la_HEADERS = CredentialStore.h ClientVOMS.h ClientVOMSRESTful.h libarccredentialstore_la_SOURCES = CredentialStore.cpp ClientVOMS.cpp ClientVOMSRESTful.cpp libarccredentialstore_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) libarccredentialstore_la_LIBADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) libarccredentialstore_la_LDFLAGS = -version-info 3:0:0 #test_SOURCES = test.cpp #test_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) #test_LDADD = \ # libarccredentialstore.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) nordugrid-arc-5.4.2/src/hed/libs/credentialstore/PaxHeaders.7502/CredentialStore.cpp0000644000000000000000000000012412343353401026435 xustar000000000000000027 mtime=1401804545.672799 27 atime=1513200574.835705 30 ctime=1513200660.049748094 nordugrid-arc-5.4.2/src/hed/libs/credentialstore/CredentialStore.cpp0000644000175000002070000002750112343353401026507 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "CredentialStore.h" namespace Arc { using namespace Arc; #define logger (Logger::getRootLogger()) CredentialStore::~CredentialStore(void) { } CredentialStore::CredentialStore(const URL& url):valid(false) { if(!url) return; if(url.Protocol() != "myproxy") return; UserConfig ucfg; set(url.Host(),url.Port(),ucfg); } CredentialStore::CredentialStore(const UserConfig& cfg, const URL& url):valid(false) { if(!url) return; if(url.Protocol() != "myproxy") return; set(url.Host(),url.Port(),cfg); } void CredentialStore::set(const std::string& host_, int port_, const UserConfig& cfg) { if(port_ <= 0) port_ = default_port; if(host_.empty()) return; timeout = 10; cfg.ApplyToConfig(concfg); host = host_; port = port_; valid = true; } static std::string read_response(PayloadStreamInterface& stream,bool nullterm = true) { std::string resp; do { char buf[256]; int len = sizeof(buf); if(!stream.Get(buf,len)) break; //std::cout<<"Response: "< parse_response(const std::string& str) { std::map vals; std::list lines; tokenize(str,lines,"\r\n"); for(std::list::iterator line = lines.begin(); line != lines.end();++line) { std::list tokens; //trick to deal with the token in response like: //CRED_OWNER=/O=Grid/OU=ARC/OU=localdomain/CN=User std::string::size_type pos = (*line).find_first_of("="); if(pos!=std::string::npos)(*line).replace(pos,1,"+"); tokenize(*line,tokens,"+"); if(tokens.size() != 2) continue; vals[*tokens.begin()] += vals[*tokens.begin()].empty()? (*(++tokens.begin())) : ("; " + *(++tokens.begin())); } return vals; } static std::map parse_response(PayloadStreamInterface& stream) { return parse_response(read_response(stream)); } bool compose_msg(std::string& msg, const std::map& options, bool dummy_pass = false) { std::string credname; std::string username; std::string password; std::string newpassword; std::string trusted_retriever; int lifetime = 0; std::map::const_iterator val; val = options.find("credname"); if(val != options.end()) credname = val->second; val = options.find("username"); if(val != options.end()) username = val->second; val = options.find("password"); if(val != options.end()) password = val->second; val = options.find("newpassword"); if(val != options.end()) newpassword = val->second; val = options.find("lifetime"); if(val != options.end()) lifetime = stringto(val->second); val = options.find("retriever_trusted"); if(val != options.end()) trusted_retriever = val->second; if(username.empty()) return false; //USERNAME must be provided; PASSPHRASE could be empty //if RETRIEVER_TRUSTED is provided. msg.append("USERNAME="+username+"\n"); //if(!password.empty()) { if(!dummy_pass) msg.append("PASSPHRASE="+password+"\n"); //} else msg.append("PASSPHRASE=DUMMY-PASSPHRASE\n"); if(!newpassword.empty()) { msg.append("NEW_PHRASE="+newpassword+"\n"); } if(lifetime > 0) { msg.append("LIFETIME="+tostring(lifetime)+"\n"); } else { msg.append("LIFETIME=0").append("\n"); } //The other attributes are optional. if(!credname.empty()) { msg.append("CRED_NAME="+credname+"\n"); } if(!trusted_retriever.empty()) { msg.append("RETRIEVER_TRUSTED="+trusted_retriever+"\n"); } for(std::map::const_iterator it = options.begin(); it != options.end(); it++) { if((*it).first.find("vomsname") != std::string::npos) { std::string vomsname = (*it).second; if(!vomsname.empty()) { msg.append("VONAME="+vomsname+"\n"); } } } for(std::map::const_iterator it = options.begin(); it != options.end(); it++) { if((*it).first.find("vomses") != std::string::npos) { std::string vomses_line = (*it).second; if(!vomses_line.empty()) { msg.append("VOMSES="+vomses_line+"\n"); } } } msg.append("\0"); return true; } bool CredentialStore::Store(const std::map& options,const std::string& cred, bool if_delegate, const Arc::Time deleg_start, const Arc::Period deleg_period) { if(!valid) return false; //if(if_delegate) { std::string msg("VERSION=MYPROXYv2\nCOMMAND=1\n"); //} //else { // std::string msg("VERSION=MYPROXYv2\nCOMMAND=5\n"); //TODO //} if(!compose_msg(msg, options)) return false; PayloadStreamInterface *response = NULL; std::map rvals; ClientTCP client(concfg,host,port,GSISec,timeout); { PayloadRaw request; request.Insert(msg.c_str(),0,msg.length()); MCC_Status status = client.process(&request,&response,true); if(!status) { if(response) delete response; return false; }; if(!response) { return false; }; }; rvals = parse_response(*response); if(rvals["RESPONSE"] != "0") { logger.msg(ERROR, "MyProxy failure: %s",rvals["ERROR"]); delete response; response=NULL; return false; } std::string credrequest = read_response(*response,false); delete response; response=NULL; Arc::Credential proxy(deleg_start,deleg_period,1024); // DER binary request if(!proxy.InquireRequest(credrequest, false, true)) return false; std::string signercred = cred; std::string signerkey; bool isfile = false; if(signercred.empty()) { isfile = true; signercred = concfg.proxy; if(signercred.empty()) { signercred = concfg.cert; signerkey = concfg.key; } } Arc::Credential signer(signercred,signerkey,concfg.cadir,concfg.cafile,"",isfile); std::string credresp; std::string credtmp; if(!signer.SignRequest(&proxy,credtmp,true)) return false; unsigned char ncerts = signer.GetCertNumofChain() + 2; credresp.append((char*)&ncerts,1); credresp.append(credtmp); credtmp.clear(); if(!signer.OutputCertificate(credtmp,true)) return false; credresp.append(credtmp); if(ncerts > 2) { credtmp.clear(); if(!signer.OutputCertificateChain(credtmp,true)) return false; credresp.append(credtmp); }; { PayloadRaw request; request.Insert(credresp.c_str(),0,credresp.length()); MCC_Status status = client.process(&request,&response,true); if(!status) { if(response) delete response; return false; }; if(!response) { return false; }; }; rvals = parse_response(*response); delete response; response=NULL; if(rvals["RESPONSE"] != "0") { logger.msg(ERROR, "MyProxy failure: %s",rvals["ERROR"]); return false; } return true; } bool CredentialStore::Retrieve(const std::map& options, std::string& cred, bool if_delegate) { if(!valid) return false; std::string msg("VERSION=MYPROXYv2\nCOMMAND=0\n"); if(!compose_msg(msg, options)) return false; PayloadStreamInterface *response = NULL; ClientTCP client(concfg,host,port,GSISec,timeout); { PayloadRaw request; request.Insert(msg.c_str(),0,msg.length()); MCC_Status status = client.process(&request,&response,true); if(!status) { if(response) delete response; return false; }; if(!response) { return false; }; }; std::map rvals = parse_response(*response); delete response; response=NULL; if(rvals["RESPONSE"] != "0") { logger.msg(ERROR, "MyProxy failure: %s",rvals["ERROR"]); return false; } std::string credrequest; Arc::Credential requester(Time(), 0 ,1024); requester.GenerateRequest(credrequest,true); { PayloadRaw request; request.Insert(credrequest.c_str(),0,credrequest.length()); MCC_Status status = client.process(&request,&response,true); if(!status) { if(response) delete response; return false; }; if(!response) { return false; }; }; std::string credresp = read_response(*response,false); delete response; response=NULL; if(credresp.empty()) return false; credresp.erase(0,1); std::string credkey; requester.OutputPrivatekey(credkey); Credential proxy(credresp,credkey,concfg.cadir,concfg.cafile,"",false); cred.clear(); std::string credtmp; proxy.OutputCertificate(credtmp); if(credtmp.empty()) return false; cred.append(credtmp); credtmp.clear(); proxy.OutputPrivatekey(credtmp); if(credtmp.empty()) return false; cred.append(credtmp); credtmp.clear(); proxy.OutputCertificateChain(credtmp); cred.append(credtmp); return true; } bool CredentialStore::Info(const std::map& options,std::string& respinfo){ if(!valid) return false; std::string msg("VERSION=MYPROXYv2\nCOMMAND=2\n"); if(!compose_msg(msg, options,true)) return false; PayloadStreamInterface *response = NULL; std::map rvals; ClientTCP client(concfg,host,port,GSISec,timeout); { PayloadRaw request; request.Insert(msg.c_str(),0,msg.length()); MCC_Status status = client.process(&request,&response,true); if(!status) { if(response) delete response; return false; }; if(!response) { return false; }; }; rvals = parse_response(*response); delete response; response=NULL; if(rvals["RESPONSE"] != "0") { logger.msg(ERROR, "MyProxy failure: %s",rvals["ERROR"]); return false; } std::string owner = rvals["CRED_OWNER"]; respinfo.append("owner: ").append(rvals["CRED_OWNER"]).append("\n"); std::string et_str = rvals["CRED_END_TIME"]; long end = stringto(et_str); Time endtime(end); Time now; Period left(endtime-now); std::string left_str = left; respinfo.append("time left: ").append(left_str).append("\n"); std::cout<& options) { if(!valid) return false; std::string msg("VERSION=MYPROXYv2\nCOMMAND=4\n"); if(!compose_msg(msg, options)) return false; PayloadStreamInterface *response = NULL; std::map rvals; ClientTCP client(concfg,host,port,GSISec,timeout); { PayloadRaw request; request.Insert(msg.c_str(),0,msg.length()); MCC_Status status = client.process(&request,&response,true); if(!status) { if(response) delete response; return false; }; if(!response) { return false; }; }; rvals = parse_response(*response); delete response; response=NULL; if(rvals["RESPONSE"] != "0") { logger.msg(ERROR, "MyProxy failure: %s",rvals["ERROR"]); return false; } return true; } bool CredentialStore::Destroy(const std::map& options) { if(!valid) return false; std::string msg("VERSION=MYPROXYv2\nCOMMAND=3\n"); if(!compose_msg(msg, options)) return false; PayloadStreamInterface *response = NULL; std::map rvals; ClientTCP client(concfg,host,port,GSISec,timeout); { PayloadRaw request; request.Insert(msg.c_str(),0,msg.length()); MCC_Status status = client.process(&request,&response,true); if(!status) { if(response) delete response; return false; }; if(!response) { return false; }; }; rvals = parse_response(*response); delete response; response=NULL; if(rvals["RESPONSE"] != "0") { logger.msg(ERROR, "MyProxy failure: %s",rvals["ERROR"]); return false; } return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/credentialstore/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315725024713 xustar000000000000000030 mtime=1513200597.092978108 30 atime=1513200648.271604043 30 ctime=1513200660.048748082 nordugrid-arc-5.4.2/src/hed/libs/credentialstore/Makefile.in0000644000175000002070000007257313214315725024777 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/credentialstore DIST_COMMON = $(libarccredentialstore_la_HEADERS) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarccredentialstore_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarccredentialstore_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libarccredentialstore_la_OBJECTS = \ libarccredentialstore_la-CredentialStore.lo \ libarccredentialstore_la-ClientVOMS.lo \ libarccredentialstore_la-ClientVOMSRESTful.lo libarccredentialstore_la_OBJECTS = \ $(am_libarccredentialstore_la_OBJECTS) libarccredentialstore_la_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarccredentialstore_la_CXXFLAGS) $(CXXFLAGS) \ $(libarccredentialstore_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarccredentialstore_la_SOURCES) DIST_SOURCES = $(libarccredentialstore_la_SOURCES) HEADERS = $(libarccredentialstore_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarccredentialstore.la #noinst_PROGRAMS = test libarccredentialstore_ladir = $(pkgincludedir)/credentialstore libarccredentialstore_la_HEADERS = CredentialStore.h ClientVOMS.h ClientVOMSRESTful.h libarccredentialstore_la_SOURCES = CredentialStore.cpp ClientVOMS.cpp ClientVOMSRESTful.cpp libarccredentialstore_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) libarccredentialstore_la_LIBADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) libarccredentialstore_la_LDFLAGS = -version-info 3:0:0 all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/credentialstore/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/credentialstore/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarccredentialstore.la: $(libarccredentialstore_la_OBJECTS) $(libarccredentialstore_la_DEPENDENCIES) $(libarccredentialstore_la_LINK) -rpath $(libdir) $(libarccredentialstore_la_OBJECTS) $(libarccredentialstore_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredentialstore_la-ClientVOMS.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredentialstore_la-ClientVOMSRESTful.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccredentialstore_la-CredentialStore.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarccredentialstore_la-CredentialStore.lo: CredentialStore.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredentialstore_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredentialstore_la-CredentialStore.lo -MD -MP -MF $(DEPDIR)/libarccredentialstore_la-CredentialStore.Tpo -c -o libarccredentialstore_la-CredentialStore.lo `test -f 'CredentialStore.cpp' || echo '$(srcdir)/'`CredentialStore.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredentialstore_la-CredentialStore.Tpo $(DEPDIR)/libarccredentialstore_la-CredentialStore.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CredentialStore.cpp' object='libarccredentialstore_la-CredentialStore.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredentialstore_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredentialstore_la-CredentialStore.lo `test -f 'CredentialStore.cpp' || echo '$(srcdir)/'`CredentialStore.cpp libarccredentialstore_la-ClientVOMS.lo: ClientVOMS.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredentialstore_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredentialstore_la-ClientVOMS.lo -MD -MP -MF $(DEPDIR)/libarccredentialstore_la-ClientVOMS.Tpo -c -o libarccredentialstore_la-ClientVOMS.lo `test -f 'ClientVOMS.cpp' || echo '$(srcdir)/'`ClientVOMS.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredentialstore_la-ClientVOMS.Tpo $(DEPDIR)/libarccredentialstore_la-ClientVOMS.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ClientVOMS.cpp' object='libarccredentialstore_la-ClientVOMS.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredentialstore_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredentialstore_la-ClientVOMS.lo `test -f 'ClientVOMS.cpp' || echo '$(srcdir)/'`ClientVOMS.cpp libarccredentialstore_la-ClientVOMSRESTful.lo: ClientVOMSRESTful.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredentialstore_la_CXXFLAGS) $(CXXFLAGS) -MT libarccredentialstore_la-ClientVOMSRESTful.lo -MD -MP -MF $(DEPDIR)/libarccredentialstore_la-ClientVOMSRESTful.Tpo -c -o libarccredentialstore_la-ClientVOMSRESTful.lo `test -f 'ClientVOMSRESTful.cpp' || echo '$(srcdir)/'`ClientVOMSRESTful.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccredentialstore_la-ClientVOMSRESTful.Tpo $(DEPDIR)/libarccredentialstore_la-ClientVOMSRESTful.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ClientVOMSRESTful.cpp' object='libarccredentialstore_la-ClientVOMSRESTful.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccredentialstore_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccredentialstore_la-ClientVOMSRESTful.lo `test -f 'ClientVOMSRESTful.cpp' || echo '$(srcdir)/'`ClientVOMSRESTful.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarccredentialstore_laHEADERS: $(libarccredentialstore_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarccredentialstore_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarccredentialstore_ladir)" @list='$(libarccredentialstore_la_HEADERS)'; test -n "$(libarccredentialstore_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarccredentialstore_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarccredentialstore_ladir)" || exit $$?; \ done uninstall-libarccredentialstore_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarccredentialstore_la_HEADERS)'; test -n "$(libarccredentialstore_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarccredentialstore_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarccredentialstore_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarccredentialstore_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libarccredentialstore_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarccredentialstore_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libLTLIBRARIES install-libarccredentialstore_laHEADERS \ install-man install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-libLTLIBRARIES \ uninstall-libarccredentialstore_laHEADERS #test_SOURCES = test.cpp #test_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) #test_LDADD = \ # libarccredentialstore.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) $(EXTRA_LIBS) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/credentialstore/PaxHeaders.7502/ClientVOMSRESTful.cpp0000644000000000000000000000012413131637327026506 xustar000000000000000027 mtime=1499938519.516258 27 atime=1513200574.836705 30 ctime=1513200660.051748119 nordugrid-arc-5.4.2/src/hed/libs/credentialstore/ClientVOMSRESTful.cpp0000644000175000002070000001041713131637327026556 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #include #include "ClientVOMSRESTful.h" namespace Arc { static URL options_to_voms_url(const std::string& host, int port, const TCPSec& sec) { URL url("http://127.0.0.1"); // safe initial value if(sec.sec == TLSSec) { url.ChangeProtocol("https"); if(port <= 0) port = HTTPS_DEFAULT_PORT; } else if(sec.sec == SSL3Sec) { url.ChangeProtocol("https"); url.AddOption("protocol","ssl3",true); if(port <= 0) port = HTTPS_DEFAULT_PORT; } else if(sec.sec == GSISec) { url.ChangeProtocol("httpg"); if(port <= 0) port = HTTPG_DEFAULT_PORT; } else if(sec.sec == GSIIOSec) { url.ChangeProtocol("httpg"); url.AddOption("protocol","gsi",true); if(port <= 0) port = HTTPG_DEFAULT_PORT; } else { if(port <= 0) port = HTTP_DEFAULT_PORT; } if(sec.enc == RequireEnc) { url.AddOption("encryption","required",true); } else if(sec.enc == PreferEnc) { url.AddOption("encryption","preferred",true); } else if(sec.enc == OptionalEnc) { url.AddOption("encryption","optional",true); } else if(sec.enc == NoEnc) { url.AddOption("encryption","off",true); } url.ChangeHost(host); url.ChangePort(port); url.ChangePath("/generate-ac"); return url; } ClientVOMSRESTful::~ClientVOMSRESTful() { } ClientVOMSRESTful::ClientVOMSRESTful(const BaseConfig& cfg, const std::string& host, int port, TCPSec sec, int timeout, const std::string& proxy_host, int proxy_port): ClientHTTP(cfg, options_to_voms_url(host, port, sec), timeout, proxy_host, proxy_port) { } MCC_Status ClientVOMSRESTful::Load() { MCC_Status r(STATUS_OK); if(!(r=ClientHTTP::Load())) return r; return r; } MCC_Status ClientVOMSRESTful::process(const std::list& fqans, const Period& lifetime, std::string& result) { std::string principal; std::list targets; return ClientVOMSRESTful::process(principal, fqans, lifetime, targets, result); } MCC_Status ClientVOMSRESTful::process(const std::string& principal, const std::list& fqans, const Period& lifetime, const std::list& targets, std::string& result) { URL url = GetURL(); url.ChangePath(url.Path()+"/generate-ac"); if(!principal.empty()) url.AddHTTPOption("principal",principal,true); if(!fqans.empty()) url.AddHTTPOption("fqans",join(fqans,","),true); if(lifetime != 0) url.AddHTTPOption("lifetime",(std::string)lifetime,true); if(!targets.empty()) url.AddHTTPOption("targets",join(targets,","),true); PayloadRaw request; PayloadStreamInterface* response = NULL; HTTPClientInfo info; MCC_Status status = ClientHTTP::process(ClientHTTPAttributes("GET", url.FullPathURIEncoded()), &request, &info, &response); if(!status) { if(response) delete response; return status; } if(!response) { return MCC_Status(GENERIC_ERROR,"VOMS","Response is empty"); } // voms // ac // warning* // error // code // message std::string resp_str; // TODO: something more effective is needed do { char buf[1024]; int len = sizeof(buf); if(!response->Get(buf,len)) break; resp_str.append(buf,len); if(resp_str.length() > 4*1024*1024) break; // Some sanity check } while(true); delete response; //std::cerr<<"--- response: "< #include namespace Arc { class VOMSCommand { // According to https://wiki.italiangrid.it/twiki/bin/view/VOMS/VOMSProtocol // Note that List andd Query commands are deprecated // and hence not present here. private: std::string str; public: VOMSCommand(void) {}; ~VOMSCommand(void) {}; VOMSCommand& GetGroup(const std::string& groupname); VOMSCommand& GetRole(const std::string& rolename); VOMSCommand& GetRoleInGroup(const std::string& groupname, const std::string& rolename); VOMSCommand& GetEverything(void); VOMSCommand& GetFQANs(void); VOMSCommand& GetFQAN(const std::string& fqan); const std::string& Str(void) const { return str; }; operator const std::string&(void) const { return str; }; }; class ClientVOMS : public ClientTCP { public: ClientVOMS() {} ClientVOMS(const BaseConfig& cfg, const std::string& host, int port, TCPSec sec, int timeout = -1); virtual ~ClientVOMS(); virtual MCC_Status Load(); MCC_Status process(const VOMSCommand& command, const Period& lifetime, std::string& result); MCC_Status process(const VOMSCommand& command, const std::list >& order, const Period& lifetime, std::string& result); MCC_Status process(const std::list& commands, const Period& lifetime, std::string& result); MCC_Status process(const std::list& commands, const std::list >& order, const Period& lifetime, std::string& result); }; } // namespace Arc #endif // __ARC_CLIENTVOMS_H__ nordugrid-arc-5.4.2/src/hed/libs/credentialstore/PaxHeaders.7502/CredentialStore.h0000644000000000000000000000012412343353401026102 xustar000000000000000027 mtime=1401804545.672799 27 atime=1513200574.839705 30 ctime=1513200660.043748021 nordugrid-arc-5.4.2/src/hed/libs/credentialstore/CredentialStore.h0000644000175000002070000000400012343353401026141 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include namespace Arc { /** This class provides functionality for storing delegated credentials and retrieving them from some store services. This is very preliminary implementation and currently support only one type of credentials - X.509 proxies, and only one type of store service - MyProxy. Later it will be extended to support at least following services: ARC delegation service, VOMS service, local file system. */ class CredentialStore { private: static const int default_port = 7512; bool valid; int timeout; void set(const std::string& host, int port, const UserConfig& cfg); MCCConfig concfg; std::string host; int port; public: CredentialStore(const URL& url); CredentialStore(const UserConfig& cfg, const URL& url); ~CredentialStore(void); operator bool(void) { return valid; }; bool operator!(void) { return !valid; }; // Store delegated credentials (or an end-entity credential) to credential store. // The options contains key=value pairs affecting how credentials are // stored. For MyProxy following options are supported - // username, password, credname, lifetime. // If cred is not empty it should contain credentials to delegate/store. // Otherwise credentials of user configuration are used. bool Store(const std::map& options, const std::string& cred = "", bool if_delegate = true, const Arc::Time deleg_start = Arc::Time(), const Arc::Period deleg_period = 604800); bool Retrieve(const std::map& options, std::string& cred, bool if_delegate = true); bool Info(const std::map& options,std::string& respinfo); bool Destroy(const std::map& options); bool ChangePassword(const std::map& options); }; } nordugrid-arc-5.4.2/src/hed/libs/credentialstore/PaxHeaders.7502/ClientVOMS.cpp0000644000000000000000000000012413131637327025301 xustar000000000000000027 mtime=1499938519.516258 27 atime=1513200574.834705 30 ctime=1513200660.050748106 nordugrid-arc-5.4.2/src/hed/libs/credentialstore/ClientVOMS.cpp0000644000175000002070000001603013131637327025346 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #include #include #include "ClientVOMS.h" namespace Arc { ClientVOMS::~ClientVOMS() { } ClientVOMS::ClientVOMS(const BaseConfig& cfg, const std::string& host, int port, TCPSec sec, int timeout): ClientTCP(cfg, host, port, sec, timeout) { } VOMSCommand& VOMSCommand::GetGroup(const std::string& groupname) { str = "G"; str.append(groupname); return *this; } VOMSCommand& VOMSCommand::GetRole(const std::string& rolename) { str = "R"; str.append(rolename); return *this; } VOMSCommand& VOMSCommand::GetRoleInGroup(const std::string& groupname, const std::string& rolename) { str = "B"; str.append(groupname); str.append(":"); str.append(rolename); return *this; } VOMSCommand& VOMSCommand::GetEverything(void) { str = "A"; return *this; } VOMSCommand& VOMSCommand::GetFQANs(void) { str = "M"; return *this; } VOMSCommand& VOMSCommand::GetFQAN(const std::string& fqan) { str = fqan; return *this; } // EBCDIC is not supported static bool VOMSDecodeChar(unsigned char c) { if((c >= (unsigned char)'a') && (c <= (unsigned char)'z')) { return (c - (unsigned char)'a'); } if((c >= (unsigned char)'A') && (c <= (unsigned char)'Z')) { c = (c - (unsigned char)'A') + ((unsigned char)'z' - (unsigned char)'a' + 1); return true; } if((c >= (unsigned char)'0') && (c <= (unsigned char)'9')) { c = (c - (unsigned char)'0') + ((unsigned char)'z' - (unsigned char)'a' + 1) + ((unsigned char)'Z' - (unsigned char)'A' + 1); return true; } if(c == (unsigned char)'[') { c = 0 + ((unsigned char)'z' - (unsigned char)'a' + 1) + ((unsigned char)'Z' - (unsigned char)'A' + 1) + ((unsigned char)'9' - (unsigned char)'0' + 1); return true; } if(c == (unsigned char)']') { c = 1 + ((unsigned char)'z' - (unsigned char)'a' + 1) + ((unsigned char)'Z' - (unsigned char)'A' + 1) + ((unsigned char)'9' - (unsigned char)'0' + 1); return true; } return false; } static bool VOMSDecodeData(const std::string& in, std::string& out) { std::string::size_type p_in = 0; std::string::size_type p_out = 0; // 4 bytes -> 3 bytes out.resize((in.length()+3)/4*3, '\0'); if(out.empty()) return true; for(;;) { unsigned char c; if(p_in >= in.length()) break; c = in[p_in++]; if(!VOMSDecodeChar(c)) return false; out[p_out] = c<<2; if(p_in >= in.length()) { ++p_out; break; } c = in[p_in++]; if(!VOMSDecodeChar(c)) return false; out[p_out] |= c>>4; ++p_out; out[p_out] = c<<4; if(p_in >= in.length()) { ++p_out; break; } c = in[p_in++]; if(!VOMSDecodeChar(c)) return false; out[p_out] |= c>>2; ++p_out; out[p_out] = c<<6; if(p_in >= in.length()) { ++p_out; break; } c = in[p_in++]; if(!VOMSDecodeChar(c)) return false; out[p_out] |= c; ++p_out; } out.resize(p_out); return true; } MCC_Status ClientVOMS::Load() { MCC_Status r(STATUS_OK); if(!(r=ClientTCP::Load())) return r; return r; } MCC_Status ClientVOMS::process(const VOMSCommand& command, const Period& lifetime, std::string& result) { std::list commands; const std::list > order; commands.push_back(command); return process(commands, order, lifetime, result); } MCC_Status ClientVOMS::process(const VOMSCommand& command, const std::list >& order, const Period& lifetime, std::string& result) { std::list commands; commands.push_back(command); return process(commands, order, lifetime, result); } MCC_Status ClientVOMS::process(const std::list& commands, const Period& lifetime, std::string& result) { const std::list > order; return process(commands, order, lifetime, result); } MCC_Status ClientVOMS::process(const std::list& commands, const std::list >& order, const Period& lifetime, std::string& result) { if(commands.empty()) return MCC_Status(STATUS_OK); // No request - no response //voms // command + // order ? // targets ? // lifetime ? // base64 ? // version ? XMLNode msg(NS(),"voms"); for(std::list::const_iterator cmd = commands.begin(); cmd != commands.end(); ++cmd) { msg.NewChild("command") = cmd->Str(); } std::string ord_str; for(std::list >::const_iterator ord = order.begin(); ord != order.end(); ++ord) { if(!ord_str.empty()) ord_str += ","; ord_str += ord->first; if(!ord->second.empty()) ord_str += (":"+ord->second); } if(!ord_str.empty()) msg.NewChild("order") = ord_str; if(lifetime.GetPeriod() > 0) msg.NewChild("lifetime") = tostring(lifetime.GetPeriod()); // Leaving base64 and version default to avoid dealing with various versions of service Arc::PayloadRaw request; { std::string msg_str; msg.GetXML(msg_str,"US-ASCII"); msg_str.insert(0,""); request.Insert(msg_str.c_str(), 0, msg_str.length()); } Arc::PayloadStreamInterface *response = NULL; Arc::MCC_Status status = ClientTCP::process(&request, &response, true); if(!status) { if(response) delete response; return status; } if(!response) { return MCC_Status(GENERIC_ERROR,"VOMS","Response is empty"); } // vomsans // error ? // item * // number // message // bitstr // ac // version // It is not clear how VOMS combines answers to different commands. // So we are assuming it is always one answer std::string resp_str; // TODO: something more effective is needed do { char buf[1024]; int len = sizeof(buf); if(!response->Get(buf,len)) break; resp_str.append(buf,len); if(resp_str.length() > 4*1024*1024) break; // Some sanity check } while(true); delete response; XMLNode resp(resp_str); if(!resp) { return MCC_Status(GENERIC_ERROR,"VOMS","Response is not recognized as XML"); } if(resp.Name() != "vomsans") { return MCC_Status(GENERIC_ERROR,"VOMS","Response is missing required 'vomsans' element"); } if(resp["ac"]) { result = "-----BEGIN VOMS AC-----\n"+(std::string)resp["ac"]+"\n-----END VOMS AC-----"; } else if(resp["bitstr"]) { std::string bitstr = (std::string)resp["bitstr"]; if(!VOMSDecodeData(bitstr,result)) { result = Base64::decode(bitstr); } } else { result.resize(0); } if(resp["error"]) { std::string err_str; for(XMLNode err = resp["error"]["item"]; (bool)err; ++err) { if(!err_str.empty()) err_str += "\n"; err_str += (std::string)(err["message"]); } return MCC_Status(GENERIC_ERROR,"VOMS",err_str); } return MCC_Status(STATUS_OK); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/credentialstore/PaxHeaders.7502/ClientVOMSRESTful.h0000644000000000000000000000012412343353401026143 xustar000000000000000027 mtime=1401804545.672799 27 atime=1513200574.839705 30 ctime=1513200660.045748045 nordugrid-arc-5.4.2/src/hed/libs/credentialstore/ClientVOMSRESTful.h0000644000175000002070000000174212343353401026214 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_CLIENTVOMSRESTFUL_H__ #define __ARC_CLIENTVOMSRESTFUL_H__ #include #include namespace Arc { class ClientVOMSRESTful : public ClientHTTP { public: ClientVOMSRESTful() {} ClientVOMSRESTful(const BaseConfig& cfg, const std::string& host, int port, TCPSec sec, int timeout = -1, const std::string& proxy_host = "", int proxy_port = 0); virtual ~ClientVOMSRESTful(); virtual MCC_Status Load(); MCC_Status process(const std::list& fqans, const Period& lifetime, std::string& result); MCC_Status process(const std::string& principal, const std::list& fqans, const Period& lifetime, const std::list& targets, std::string& result); }; } // namespace Arc #endif // __ARC_CLIENTVOMSRESTFUL_H__ nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/security0000644000000000000000000000013213214316023021241 xustar000000000000000030 mtime=1513200659.334739349 30 atime=1513200668.720854145 30 ctime=1513200659.334739349 nordugrid-arc-5.4.2/src/hed/libs/security/0000755000175000002070000000000013214316023021364 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602023361 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200598.674997457 30 ctime=1513200659.328739276 nordugrid-arc-5.4.2/src/hed/libs/security/Makefile.am0000644000175000002070000000121612231165602023423 0ustar00mockbuildmock00000000000000SUBDIRS = ArcPDP lib_LTLIBRARIES = libarcsecurity.la libarcsecurity_ladir = $(pkgincludedir)/security libarcsecurity_la_HEADERS = PDP.h Security.h ClassLoader.h libarcsecurity_la_SOURCES = PDP.cpp Security.cpp ClassLoader.cpp libarcsecurity_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcsecurity_la_LIBADD = \ ArcPDP/libarcpdp.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GTHREAD_LIBS) libarcsecurity_la_LDFLAGS = -version-info 3:0:0 nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/ClassLoader.cpp0000644000000000000000000000012411330053136024217 xustar000000000000000027 mtime=1264604766.327876 27 atime=1513200574.510701 30 ctime=1513200659.333739337 nordugrid-arc-5.4.2/src/hed/libs/security/ClassLoader.cpp0000644000175000002070000000723111330053136024267 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "ClassLoader.h" namespace Arc { static Config cfg_empty; // TODO (IMPORTANT): protect it against multi-threaded access // Or even better redesign this class to distinguish between // different types of classes properly. //typedef std::map identifier_map_t; //static identifier_map_t id_map; ClassLoader* ClassLoader::_instance = NULL; static void freeClassLoader() { ClassLoader* cl = ClassLoader::getClassLoader(); if(cl) delete cl; } ClassLoader::ClassLoader(Config * cfg) : PluginsFactory(*(cfg?cfg:(&cfg_empty))){ if(cfg!=NULL) load_all_instances(cfg); } ClassLoader::~ClassLoader(){ // Delete the list (do not delete the element), gurantee the caller // will not need to delete the elements. If the list is automatically // deleted by caller (e.g. main function), there will be "double free // or corruption (fasttop)", because ModuleManager also deletes the // same memory space of the element. //!!if(!id_map.empty()) //!! id_map.clear(); } ClassLoader* ClassLoader::getClassLoader(Config* cfg) { if(_instance == NULL && cfg == NULL) { std::cout<<"Configuration should not be NULL at the initiation step of singleton ClassLoader"<setCfg(*cfg); _instance->load_all_instances(cfg); } return _instance; } void ClassLoader::load_all_instances(Config *cfg){ XMLNode root = (*cfg).GetRoot(); if(!root) return; if(!MatchXMLName(root,"ArcConfig")) return; for (int i = 0;;++i) { XMLNode plugins = root["Plugins"][i]; if (!plugins) { break; } std::string share_lib_name = (std::string)(plugins.Attribute("Name")); for(int j = 0;;++j){ XMLNode plugin = plugins.Child(j); if(!plugin){ break; } if (MatchXMLName(plugin, "Plugin")) { std::string plugin_name = (std::string)(plugin.Attribute("Name")); if(!load(share_lib_name,plugin_name)) { //std::cout<<"There is no " << plugin_name <<" type plugin"<name)==NULL); ++desc) { //std::cout<<"size:"<name<<"------classid:"<name == classId){ loader_descriptor &descriptor =*desc; LoadableClass * res = NULL; res = (*descriptor.get_instance)(arg); return res; } } } return NULL; */ ClassLoaderPluginArgument clarg(arg); return get_instance(className,classId,&clarg); } LoadableClass* ClassLoader::Instance(XMLNode* arg, const std::string& className){ /* identifier_map_t::iterator it; void* ptr; for(it=id_map.begin(); it!=id_map.end(); ++it){ if((!className.empty()) && (className != (*it).first)) continue; ptr =(*it).second; for(loader_descriptor* desc = (loader_descriptor*)ptr; !((desc->name)==NULL); ++desc) { loader_descriptor &descriptor =*desc; LoadableClass * res = (*descriptor.get_instance)(arg); if(res) return res; } } return NULL; */ ClassLoaderPluginArgument clarg(arg); return get_instance(className,&clarg); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726023374 xustar000000000000000030 mtime=1513200598.724998068 30 atime=1513200648.177602893 30 ctime=1513200659.329739288 nordugrid-arc-5.4.2/src/hed/libs/security/Makefile.in0000644000175000002070000010270413214315726023446 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/security DIST_COMMON = $(libarcsecurity_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarcsecurity_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcsecurity_la_DEPENDENCIES = ArcPDP/libarcpdp.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libarcsecurity_la_OBJECTS = libarcsecurity_la-PDP.lo \ libarcsecurity_la-Security.lo libarcsecurity_la-ClassLoader.lo libarcsecurity_la_OBJECTS = $(am_libarcsecurity_la_OBJECTS) libarcsecurity_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcsecurity_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcsecurity_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcsecurity_la_SOURCES) DIST_SOURCES = $(libarcsecurity_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive HEADERS = $(libarcsecurity_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = ArcPDP lib_LTLIBRARIES = libarcsecurity.la libarcsecurity_ladir = $(pkgincludedir)/security libarcsecurity_la_HEADERS = PDP.h Security.h ClassLoader.h libarcsecurity_la_SOURCES = PDP.cpp Security.cpp ClassLoader.cpp libarcsecurity_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcsecurity_la_LIBADD = \ ArcPDP/libarcpdp.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GTHREAD_LIBS) libarcsecurity_la_LDFLAGS = -version-info 3:0:0 all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/security/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/security/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcsecurity.la: $(libarcsecurity_la_OBJECTS) $(libarcsecurity_la_DEPENDENCIES) $(libarcsecurity_la_LINK) -rpath $(libdir) $(libarcsecurity_la_OBJECTS) $(libarcsecurity_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcsecurity_la-ClassLoader.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcsecurity_la-PDP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcsecurity_la-Security.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcsecurity_la-PDP.lo: PDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcsecurity_la_CXXFLAGS) $(CXXFLAGS) -MT libarcsecurity_la-PDP.lo -MD -MP -MF $(DEPDIR)/libarcsecurity_la-PDP.Tpo -c -o libarcsecurity_la-PDP.lo `test -f 'PDP.cpp' || echo '$(srcdir)/'`PDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcsecurity_la-PDP.Tpo $(DEPDIR)/libarcsecurity_la-PDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PDP.cpp' object='libarcsecurity_la-PDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcsecurity_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcsecurity_la-PDP.lo `test -f 'PDP.cpp' || echo '$(srcdir)/'`PDP.cpp libarcsecurity_la-Security.lo: Security.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcsecurity_la_CXXFLAGS) $(CXXFLAGS) -MT libarcsecurity_la-Security.lo -MD -MP -MF $(DEPDIR)/libarcsecurity_la-Security.Tpo -c -o libarcsecurity_la-Security.lo `test -f 'Security.cpp' || echo '$(srcdir)/'`Security.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcsecurity_la-Security.Tpo $(DEPDIR)/libarcsecurity_la-Security.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Security.cpp' object='libarcsecurity_la-Security.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcsecurity_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcsecurity_la-Security.lo `test -f 'Security.cpp' || echo '$(srcdir)/'`Security.cpp libarcsecurity_la-ClassLoader.lo: ClassLoader.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcsecurity_la_CXXFLAGS) $(CXXFLAGS) -MT libarcsecurity_la-ClassLoader.lo -MD -MP -MF $(DEPDIR)/libarcsecurity_la-ClassLoader.Tpo -c -o libarcsecurity_la-ClassLoader.lo `test -f 'ClassLoader.cpp' || echo '$(srcdir)/'`ClassLoader.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcsecurity_la-ClassLoader.Tpo $(DEPDIR)/libarcsecurity_la-ClassLoader.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ClassLoader.cpp' object='libarcsecurity_la-ClassLoader.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcsecurity_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcsecurity_la-ClassLoader.lo `test -f 'ClassLoader.cpp' || echo '$(srcdir)/'`ClassLoader.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcsecurity_laHEADERS: $(libarcsecurity_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcsecurity_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcsecurity_ladir)" @list='$(libarcsecurity_la_HEADERS)'; test -n "$(libarcsecurity_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcsecurity_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcsecurity_ladir)" || exit $$?; \ done uninstall-libarcsecurity_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcsecurity_la_HEADERS)'; test -n "$(libarcsecurity_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcsecurity_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcsecurity_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarcsecurity_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcsecurity_laHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarcsecurity_laHEADERS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags ctags-recursive \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarcsecurity_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarcsecurity_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/PDP.h0000644000000000000000000000012412110410653022111 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200574.508701 30 ctime=1513200659.324739227 nordugrid-arc-5.4.2/src/hed/libs/security/PDP.h0000644000175000002070000001121012110410653022151 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_PDP_H__ #define __ARC_SEC_PDP_H__ #include #include #include #include #include namespace ArcSec { //AuthzRequest, AuthzRequestSection, internal structure for request context /** These structure are based on the request schema for PDP, so far it can apply to * the ArcPDP's request schema, see src/hed/pdc/Request.xsd and src/hed/pdc/Request.xml. It could also apply to * the XACMLPDP's request schema, since the difference is minor. * * Another approach is, the service composes/marshalls the xml structure directly, then the service should use * difference code to compose for ArcPDP's request schema and XACMLPDP's schema, which is not so good. */ typedef struct { std::string value; std::string id; std::string type; std::string issuer; } AuthzRequestSection; typedef struct { std::list subject; std::list resource; std::list action; std::list context; } AuthzRequest; class PDPConfigContext:public Arc::MessageContextElement { private: std::list request; std::list policylocation; public: PDPConfigContext() {}; PDPConfigContext(std::list req, std::string& policy) {request = req; policylocation.push_back(policy); }; PDPConfigContext(std::list req, std::list policy) {request = req; policylocation = policy; }; void AddRequestItem(ArcSec::AuthzRequest requestitem) { request.push_back(requestitem); }; void SetRequestItem(ArcSec::AuthzRequest requestitem) { while(!(request.empty())) { request.pop_back(); } request.push_back(requestitem); }; void SetRequestItem(std::list req) { while(!(request.empty())) { request.pop_back(); } request = req; }; int RequestItemSize() { return (int)(request.size()); }; ArcSec::AuthzRequest& GetRequestItem(int n) { std::list::iterator it, ret; it = request.begin(); for(int i = 0; i<=n; i++) {ret = it; it++;} return (*ret); }; void AddPolicyLocation(std::string& policy) { policylocation.push_back(policy); }; void SetPolicyLocation(std::list policy) { std::list::iterator it1 = policylocation.begin(); std::list::iterator it2 = policylocation.end(); policylocation.erase(it1, it2); policylocation = policy; }; void SetPolicyLocation(std::string& policy) { std::list::iterator it1 = policylocation.begin(); std::list::iterator it2 = policylocation.end(); policylocation.erase(it1, it2); policylocation.push_back(policy); }; std::list& GetPolicyLocation() { return policylocation; }; virtual ~PDPConfigContext(void) { while(!(request.empty())) { request.pop_back(); } }; }; class PDPStatus { public: enum { STATUS_ALLOW = 0, STATUS_DENY = 1 } Code; PDPStatus(void); PDPStatus(bool positive); PDPStatus(int code); PDPStatus(int code, const std::string& explanation); operator bool(void) const { return (code == 0); }; int getCode(void) const; const std::string& getExplanation(void) const; operator std::string(void) const; private: int code; std::string explanation; }; /// Base class for Policy Decision Point plugins /** This virtual class defines method isPermitted() which processes security related information/attributes in Message and makes security decision - permit (true) or deny (false). Configuration of PDP is consumed during creation of instance through XML subtree fed to constructor. */ class PDP: public Arc::Plugin { public: PDP(Arc::Config* cfg, Arc::PluginArgument* parg): Arc::Plugin(parg) { if(cfg) id_=(std::string)(cfg->Attribute("id")); }; virtual ~PDP() {}; virtual PDPStatus isPermitted(Arc::Message *msg) const = 0; void SetId(std::string& id) { id_ = id; }; std::string GetId() { return id_; }; protected: std::string id_; static Arc::Logger logger; }; #define PDPPluginKind ("HED:PDP") class PDPPluginArgument: public Arc::PluginArgument { private: Arc::Config* config_; public: PDPPluginArgument(Arc::Config* config):config_(config) { }; virtual ~PDPPluginArgument(void) { }; operator Arc::Config* (void) { return config_; }; }; } // namespace ArcSec #endif /* __ARC_SEC_PDP_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/PDP.cpp0000644000000000000000000000012212110410653022442 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200574.478701 28 ctime=1513200659.3307393 nordugrid-arc-5.4.2/src/hed/libs/security/PDP.cpp0000644000175000002070000000122312110410653022507 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // PDP.cpp #include "Security.h" #include "PDP.h" namespace ArcSec{ Arc::Logger PDP::logger(Arc::Logger::rootLogger, "PDP"); PDPStatus::PDPStatus(void): code(STATUS_DENY) { } PDPStatus::PDPStatus(bool positive): code(positive?STATUS_ALLOW:STATUS_DENY) { } PDPStatus::PDPStatus(int code_): code(code_) { } PDPStatus::PDPStatus(int code_, const std::string& explanation_): code(code_),explanation(explanation_) { } int PDPStatus::getCode(void) const { return code; } const std::string& PDPStatus::getExplanation(void) const { return explanation; } } nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/Security.cpp0000644000000000000000000000012410705666764023660 xustar000000000000000027 mtime=1192717812.465516 27 atime=1513200574.478701 30 ctime=1513200659.332739325 nordugrid-arc-5.4.2/src/hed/libs/security/Security.cpp0000644000175000002070000000026410705666764023727 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // Security.cpp #include "Security.h" namespace ArcSec{ Arc::Logger Security::logger(Arc::Logger::rootLogger, "Security"); } nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/ArcPDP0000644000000000000000000000013213214316023022312 xustar000000000000000030 mtime=1513200659.373739826 30 atime=1513200668.720854145 30 ctime=1513200659.373739826 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/0000755000175000002070000000000013214316023022435 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/alg0000644000000000000000000000013213214316023023055 xustar000000000000000030 mtime=1513200659.494741306 30 atime=1513200668.720854145 30 ctime=1513200659.494741306 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/0000755000175000002070000000000013214316023023200 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515025200 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200598.415994289 30 ctime=1513200659.490741257 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/Makefile.am0000644000175000002070000000070212052416515025241 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libarcalg.la libarcalg_ladir = $(pkgincludedir)/security/ArcPDP/alg libarcalg_la_HEADERS = AlgFactory.h CombiningAlg.h DenyOverridesAlg.h PermitOverridesAlg.h OrderedAlg.h libarcalg_la_SOURCES = DenyOverridesAlg.cpp PermitOverridesAlg.cpp OrderedAlg.cpp libarcalg_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcalg_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/AlgFactory.h0000644000000000000000000000012411730411253025341 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200574.486701 30 ctime=1513200659.486741208 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/AlgFactory.h0000644000175000002070000000174111730411253025411 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ALGFACTORY_H__ #define __ARC_SEC_ALGFACTORY_H__ #include #include #include "CombiningAlg.h" namespace ArcSec { typedef std::map AlgMap; ///Interface for algorithm factory class /**AlgFactory is in charge of creating CombiningAlg according to the algorithm type given as * argument of method createAlg. This class can be inherited for implementing a factory class * which can create some specific combining algorithm objects. */ class AlgFactory : public Arc::LoadableClass { public: AlgFactory(Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; virtual ~AlgFactory() {}; public: /**creat algorithm object based on the type algorithm type *@param type The type of combining algorithm *@return The object of CombiningAlg */ virtual CombiningAlg* createAlg(const std::string& type) = 0; protected: AlgMap algmap; }; } // namespace ArcSec #endif /* __ARC_SEC_ALGFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315726025207 xustar000000000000000030 mtime=1513200598.463994876 30 atime=1513200648.241603676 29 ctime=1513200659.49174127 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/Makefile.in0000644000175000002070000006520013214315726025261 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/security/ArcPDP/alg DIST_COMMON = $(libarcalg_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libarcalg_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libarcalg_la_OBJECTS = libarcalg_la-DenyOverridesAlg.lo \ libarcalg_la-PermitOverridesAlg.lo libarcalg_la-OrderedAlg.lo libarcalg_la_OBJECTS = $(am_libarcalg_la_OBJECTS) libarcalg_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarcalg_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcalg_la_SOURCES) DIST_SOURCES = $(libarcalg_la_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libarcalg_ladir)" HEADERS = $(libarcalg_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libarcalg.la libarcalg_ladir = $(pkgincludedir)/security/ArcPDP/alg libarcalg_la_HEADERS = AlgFactory.h CombiningAlg.h DenyOverridesAlg.h PermitOverridesAlg.h OrderedAlg.h libarcalg_la_SOURCES = DenyOverridesAlg.cpp PermitOverridesAlg.cpp OrderedAlg.cpp libarcalg_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcalg_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/alg/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/alg/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcalg.la: $(libarcalg_la_OBJECTS) $(libarcalg_la_DEPENDENCIES) $(libarcalg_la_LINK) $(libarcalg_la_OBJECTS) $(libarcalg_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcalg_la-DenyOverridesAlg.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcalg_la-OrderedAlg.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcalg_la-PermitOverridesAlg.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcalg_la-DenyOverridesAlg.lo: DenyOverridesAlg.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcalg_la_CXXFLAGS) $(CXXFLAGS) -MT libarcalg_la-DenyOverridesAlg.lo -MD -MP -MF $(DEPDIR)/libarcalg_la-DenyOverridesAlg.Tpo -c -o libarcalg_la-DenyOverridesAlg.lo `test -f 'DenyOverridesAlg.cpp' || echo '$(srcdir)/'`DenyOverridesAlg.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcalg_la-DenyOverridesAlg.Tpo $(DEPDIR)/libarcalg_la-DenyOverridesAlg.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DenyOverridesAlg.cpp' object='libarcalg_la-DenyOverridesAlg.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcalg_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcalg_la-DenyOverridesAlg.lo `test -f 'DenyOverridesAlg.cpp' || echo '$(srcdir)/'`DenyOverridesAlg.cpp libarcalg_la-PermitOverridesAlg.lo: PermitOverridesAlg.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcalg_la_CXXFLAGS) $(CXXFLAGS) -MT libarcalg_la-PermitOverridesAlg.lo -MD -MP -MF $(DEPDIR)/libarcalg_la-PermitOverridesAlg.Tpo -c -o libarcalg_la-PermitOverridesAlg.lo `test -f 'PermitOverridesAlg.cpp' || echo '$(srcdir)/'`PermitOverridesAlg.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcalg_la-PermitOverridesAlg.Tpo $(DEPDIR)/libarcalg_la-PermitOverridesAlg.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PermitOverridesAlg.cpp' object='libarcalg_la-PermitOverridesAlg.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcalg_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcalg_la-PermitOverridesAlg.lo `test -f 'PermitOverridesAlg.cpp' || echo '$(srcdir)/'`PermitOverridesAlg.cpp libarcalg_la-OrderedAlg.lo: OrderedAlg.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcalg_la_CXXFLAGS) $(CXXFLAGS) -MT libarcalg_la-OrderedAlg.lo -MD -MP -MF $(DEPDIR)/libarcalg_la-OrderedAlg.Tpo -c -o libarcalg_la-OrderedAlg.lo `test -f 'OrderedAlg.cpp' || echo '$(srcdir)/'`OrderedAlg.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcalg_la-OrderedAlg.Tpo $(DEPDIR)/libarcalg_la-OrderedAlg.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='OrderedAlg.cpp' object='libarcalg_la-OrderedAlg.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcalg_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcalg_la-OrderedAlg.lo `test -f 'OrderedAlg.cpp' || echo '$(srcdir)/'`OrderedAlg.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcalg_laHEADERS: $(libarcalg_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcalg_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcalg_ladir)" @list='$(libarcalg_la_HEADERS)'; test -n "$(libarcalg_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcalg_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcalg_ladir)" || exit $$?; \ done uninstall-libarcalg_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcalg_la_HEADERS)'; test -n "$(libarcalg_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcalg_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcalg_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libarcalg_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libarcalg_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libarcalg_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libarcalg_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-libarcalg_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/PermitOverridesAlg.cpp0000644000000000000000000000012411066203470027412 xustar000000000000000027 mtime=1222182712.681943 27 atime=1513200574.487701 30 ctime=1513200659.493741294 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PermitOverridesAlg.cpp0000644000175000002070000000274411066203470027466 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "PermitOverridesAlg.h" namespace ArcSec{ std::string PermitOverridesCombiningAlg::algId = "Permit-Overrides"; Result PermitOverridesCombiningAlg::combine(EvaluationCtx* ctx, std::list policies){ bool atleast_onedeny = false; bool atleast_onenotapplicable = false; std::list::iterator it; for(it = policies.begin(); it != policies.end(); it++) { Policy* policy = *it; Result res = policy->eval(ctx); //If get a return DECISION_PERMIT, then regardless of whatelse result from the other Rule, //always return PERMIT. if(res == DECISION_PERMIT) return DECISION_PERMIT; //If get a return DECISION_NOT_APPLICABLE (this usually happens when Attribute with corrsponding //AttributeId can be found from RequestItem, but value does not match). if (res == DECISION_NOT_APPLICABLE){ atleast_onenotapplicable = true; } //Keep track of whether we had at least one rule that is pertained to the request else if(res == DECISION_DENY) atleast_onedeny = true; } //Some Rule said DENY, so since nothing could have permitted, return DENY if(atleast_onedeny) return DECISION_DENY; //No Rule said DENY, none of the rules actually applied, return NOT_APPLICABLE if(atleast_onenotapplicable) return DECISION_NOT_APPLICABLE; //If here, there is problem with one of the Rules, then return INDETERMINATE return DECISION_INDETERMINATE; } } //namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/PermitOverridesAlg.h0000644000000000000000000000012411073223740027057 xustar000000000000000027 mtime=1223501792.844198 27 atime=1513200574.484701 30 ctime=1513200659.488741233 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PermitOverridesAlg.h0000644000175000002070000000241111073223740027122 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_PERMITOVERRIDESCOMBININGALG_H__ #define __ARC_SEC_PERMITOVERRIDESCOMBININGALG_H__ #include #include namespace ArcSec { ///Implement the "Permit-Overrides" algorithm /**Permit-Overrides, scans the policy set which is given as the parameters of "combine" *method, if gets "permit" result from any policy, then stops scanning and gives "permit" *as result, otherwise gives "deny". */ class PermitOverridesCombiningAlg : public CombiningAlg { private: static std::string algId; public: PermitOverridesCombiningAlg(){}; virtual ~PermitOverridesCombiningAlg(){}; public: /**If there is one policy which return positive evaluation result, then omit the *other policies and return DECISION_PERMIT *@param ctx This object contains request information which will be used to evaluated *against policy. *@param policlies This is a container which contains policy objects. *@return The combined result according to the algorithm. */ virtual Result combine(EvaluationCtx* ctx, std::list policies); /**Get the identifier*/ virtual const std::string& getalgId(void) const {return algId;}; }; } // namespace ArcSec #endif /* __ARC_SEC_PERMITOVERRIDESCOMBININGALG_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/DenyOverridesAlg.h0000644000000000000000000000012411073223740026516 xustar000000000000000027 mtime=1223501792.844198 27 atime=1513200574.484701 30 ctime=1513200659.487741221 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/DenyOverridesAlg.h0000644000175000002070000000237111073223740026566 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_DENYOVERRIDESCOMBININGALG_H__ #define __ARC_SEC_DENYOVERRIDESCOMBININGALG_H__ #include #include namespace ArcSec { ///Implement the "Deny-Overrides" algorithm /**Deny-Overrides, scans the policy set which is given as the parameters of "combine" *method, if gets "deny" result from any policy, then stops scanning and gives "deny" *as result, otherwise gives "permit". */ class DenyOverridesCombiningAlg : public CombiningAlg { private: static std::string algId; public: DenyOverridesCombiningAlg(){}; virtual ~DenyOverridesCombiningAlg(){}; public: /**If there is one policy which return negative evaluation result, then omit the *other policies and return DECISION_DENY *@param ctx This object contains request information which will be used to evaluated *against policy. *@param policlies This is a container which contains policy objects. *@return The combined result according to the algorithm. */ virtual Result combine(EvaluationCtx* ctx, std::list policies); /**Get the identifier*/ virtual const std::string& getalgId(void) const {return algId;}; }; } // namespace ArcSec #endif /* __ARC_SEC_DENYOVERRIDESCOMBININGALG_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/OrderedAlg.h0000644000000000000000000000012411073223740025320 xustar000000000000000027 mtime=1223501792.844198 27 atime=1513200574.486701 30 ctime=1513200659.489741245 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/OrderedAlg.h0000644000175000002070000000516511073223740025374 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ORDEREDCOMBININGALG_H__ #define __ARC_SEC_ORDEREDCOMBININGALG_H__ #include #include namespace ArcSec { #define MAX_OREDERED_PRIORITIES 4 class OrderedCombiningAlg : public CombiningAlg { public: OrderedCombiningAlg() {}; virtual ~OrderedCombiningAlg() {}; protected: Result combine(EvaluationCtx* ctx, std::list policies,const Result priorities[MAX_OREDERED_PRIORITIES]); }; #define ORDERED_ALG_CLASS(NAME) \ class NAME: public OrderedCombiningAlg { \ private: \ static std::string algId; \ static Result priorities[MAX_OREDERED_PRIORITIES]; \ public: \ NAME(void) {}; \ virtual ~NAME(void) {}; \ virtual const std::string& getalgId(void) const { return algId; }; \ virtual Result combine(EvaluationCtx* ctx, std::list policies) { \ return OrderedCombiningAlg::combine(ctx,policies,priorities); \ }; \ } ORDERED_ALG_CLASS(PermitDenyIndeterminateNotApplicableCombiningAlg); ORDERED_ALG_CLASS(PermitDenyNotApplicableIndeterminateCombiningAlg); ORDERED_ALG_CLASS(PermitIndeterminateDenyNotApplicableCombiningAlg); ORDERED_ALG_CLASS(PermitIndeterminateNotApplicableDenyCombiningAlg); ORDERED_ALG_CLASS(PermitNotApplicableDenyIndeterminateCombiningAlg); ORDERED_ALG_CLASS(PermitNotApplicableIndeterminateDenyCombiningAlg); ORDERED_ALG_CLASS(DenyPermitIndeterminateNotApplicableCombiningAlg); ORDERED_ALG_CLASS(DenyPermitNotApplicableIndeterminateCombiningAlg); ORDERED_ALG_CLASS(DenyIndeterminatePermitNotApplicableCombiningAlg); ORDERED_ALG_CLASS(DenyIndeterminateNotApplicablePermitCombiningAlg); ORDERED_ALG_CLASS(DenyNotApplicablePermitIndeterminateCombiningAlg); ORDERED_ALG_CLASS(DenyNotApplicableIndeterminatePermitCombiningAlg); ORDERED_ALG_CLASS(IndeterminatePermitDenyNotApplicableCombiningAlg); ORDERED_ALG_CLASS(IndeterminatePermitNotApplicableDenyCombiningAlg); ORDERED_ALG_CLASS(IndeterminateDenyPermitNotApplicableCombiningAlg); ORDERED_ALG_CLASS(IndeterminateDenyNotApplicablePermitCombiningAlg); ORDERED_ALG_CLASS(IndeterminateNotApplicablePermitDenyCombiningAlg); ORDERED_ALG_CLASS(IndeterminateNotApplicableDenyPermitCombiningAlg); ORDERED_ALG_CLASS(NotApplicablePermitDenyIndeterminateCombiningAlg); ORDERED_ALG_CLASS(NotApplicablePermitIndeterminateDenyCombiningAlg); ORDERED_ALG_CLASS(NotApplicableDenyPermitIndeterminateCombiningAlg); ORDERED_ALG_CLASS(NotApplicableDenyIndeterminatePermitCombiningAlg); ORDERED_ALG_CLASS(NotApplicableIndeterminatePermitDenyCombiningAlg); ORDERED_ALG_CLASS(NotApplicableIndeterminateDenyPermitCombiningAlg); } // namespace ArcSec #endif /* __ARC_SEC_ORDEREDCOMBININGALG_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/OrderedAlg.cpp0000644000000000000000000000012411073223740025653 xustar000000000000000027 mtime=1223501792.844198 27 atime=1513200574.488701 30 ctime=1513200659.494741306 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/OrderedAlg.cpp0000644000175000002070000001773111073223740025731 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "OrderedAlg.h" // Evaluation results // PERMIT // DENY // INDETERMINATE // NOT_APPLICABLE namespace ArcSec{ std::string PermitDenyIndeterminateNotApplicableCombiningAlg::algId = "Permit-Deny-Indeterminate-NotApplicable"; std::string PermitDenyNotApplicableIndeterminateCombiningAlg::algId = "Permit-Deny-NotApplicable-Indeterminate"; std::string PermitIndeterminateDenyNotApplicableCombiningAlg::algId = "Permit-Indeterminate-Deny-NotApplicable"; std::string PermitIndeterminateNotApplicableDenyCombiningAlg::algId = "Permit-Indeterminate-NotApplicable-Deny"; std::string PermitNotApplicableDenyIndeterminateCombiningAlg::algId = "Permit-NotApplicable-Deny-Indeterminate"; std::string PermitNotApplicableIndeterminateDenyCombiningAlg::algId = "Permit-NotApplicable-Indeterminate-Deny"; std::string DenyPermitIndeterminateNotApplicableCombiningAlg::algId = "Deny-Permit-Indeterminate-NotApplicable"; std::string DenyPermitNotApplicableIndeterminateCombiningAlg::algId = "Deny-Permit-NotApplicable-Indeterminate"; std::string DenyIndeterminatePermitNotApplicableCombiningAlg::algId = "Deny-Indeterminate-Permit-NotApplicable"; std::string DenyIndeterminateNotApplicablePermitCombiningAlg::algId = "Deny-Indeterminate-NotApplicable-Permit"; std::string DenyNotApplicablePermitIndeterminateCombiningAlg::algId = "Deny-NotApplicable-Permit-Indeterminate"; std::string DenyNotApplicableIndeterminatePermitCombiningAlg::algId = "Deny-NotApplicable-Indeterminate-Permit"; std::string IndeterminatePermitDenyNotApplicableCombiningAlg::algId = "Indeterminate-Permit-Deny-NotApplicable"; std::string IndeterminatePermitNotApplicableDenyCombiningAlg::algId = "Indeterminate-Permit-NotApplicable-Deny"; std::string IndeterminateDenyPermitNotApplicableCombiningAlg::algId = "Indeterminate-Deny-Permit-NotApplicable"; std::string IndeterminateDenyNotApplicablePermitCombiningAlg::algId = "Indeterminate-Deny-NotApplicable-Permit"; std::string IndeterminateNotApplicablePermitDenyCombiningAlg::algId = "Indeterminate-NotApplicable-Permit-Deny"; std::string IndeterminateNotApplicableDenyPermitCombiningAlg::algId = "Indeterminate-NotApplicable-Deny-Permit"; std::string NotApplicablePermitDenyIndeterminateCombiningAlg::algId = "NotApplicable-Permit-Deny-Indeterminate"; std::string NotApplicablePermitIndeterminateDenyCombiningAlg::algId = "NotApplicable-Permit-Indeterminate-Deny"; std::string NotApplicableDenyPermitIndeterminateCombiningAlg::algId = "NotApplicable-Deny-Permit-Indeterminate"; std::string NotApplicableDenyIndeterminatePermitCombiningAlg::algId = "NotApplicable-Deny-Indeterminate-Permit"; std::string NotApplicableIndeterminatePermitDenyCombiningAlg::algId = "NotApplicable-Indeterminate-Permit-Deny"; std::string NotApplicableIndeterminateDenyPermitCombiningAlg::algId = "NotApplicable-Indeterminate-Deny-Permit"; Result PermitDenyIndeterminateNotApplicableCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_PERMIT, DECISION_DENY, DECISION_INDETERMINATE, DECISION_NOT_APPLICABLE }; Result PermitDenyNotApplicableIndeterminateCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_PERMIT, DECISION_DENY, DECISION_NOT_APPLICABLE, DECISION_INDETERMINATE }; Result PermitIndeterminateDenyNotApplicableCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_PERMIT, DECISION_INDETERMINATE, DECISION_DENY, DECISION_NOT_APPLICABLE }; Result PermitIndeterminateNotApplicableDenyCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_PERMIT, DECISION_INDETERMINATE, DECISION_NOT_APPLICABLE, DECISION_DENY }; Result PermitNotApplicableDenyIndeterminateCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_PERMIT, DECISION_NOT_APPLICABLE, DECISION_DENY, DECISION_INDETERMINATE }; Result PermitNotApplicableIndeterminateDenyCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_PERMIT, DECISION_NOT_APPLICABLE, DECISION_INDETERMINATE, DECISION_DENY }; Result DenyPermitIndeterminateNotApplicableCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_DENY, DECISION_PERMIT, DECISION_INDETERMINATE, DECISION_NOT_APPLICABLE }; Result DenyPermitNotApplicableIndeterminateCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_DENY, DECISION_PERMIT, DECISION_NOT_APPLICABLE, DECISION_INDETERMINATE }; Result DenyIndeterminatePermitNotApplicableCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_DENY, DECISION_INDETERMINATE, DECISION_PERMIT, DECISION_NOT_APPLICABLE }; Result DenyIndeterminateNotApplicablePermitCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_DENY, DECISION_INDETERMINATE, DECISION_NOT_APPLICABLE, DECISION_PERMIT }; Result DenyNotApplicablePermitIndeterminateCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_DENY, DECISION_NOT_APPLICABLE, DECISION_PERMIT, DECISION_INDETERMINATE }; Result DenyNotApplicableIndeterminatePermitCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_DENY, DECISION_NOT_APPLICABLE, DECISION_INDETERMINATE, DECISION_PERMIT }; Result IndeterminatePermitDenyNotApplicableCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_INDETERMINATE, DECISION_PERMIT, DECISION_DENY, DECISION_NOT_APPLICABLE }; Result IndeterminatePermitNotApplicableDenyCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_INDETERMINATE, DECISION_PERMIT, DECISION_NOT_APPLICABLE, DECISION_DENY }; Result IndeterminateDenyPermitNotApplicableCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_INDETERMINATE, DECISION_DENY, DECISION_PERMIT, DECISION_NOT_APPLICABLE }; Result IndeterminateDenyNotApplicablePermitCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_INDETERMINATE, DECISION_DENY, DECISION_NOT_APPLICABLE, DECISION_PERMIT }; Result IndeterminateNotApplicablePermitDenyCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_INDETERMINATE, DECISION_NOT_APPLICABLE, DECISION_PERMIT, DECISION_DENY }; Result IndeterminateNotApplicableDenyPermitCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_INDETERMINATE, DECISION_NOT_APPLICABLE, DECISION_DENY, DECISION_PERMIT }; Result NotApplicablePermitDenyIndeterminateCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_NOT_APPLICABLE, DECISION_PERMIT, DECISION_DENY, DECISION_INDETERMINATE }; Result NotApplicablePermitIndeterminateDenyCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_NOT_APPLICABLE, DECISION_PERMIT, DECISION_INDETERMINATE, DECISION_DENY }; Result NotApplicableDenyPermitIndeterminateCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_NOT_APPLICABLE, DECISION_DENY, DECISION_PERMIT, DECISION_INDETERMINATE }; Result NotApplicableDenyIndeterminatePermitCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_NOT_APPLICABLE, DECISION_DENY, DECISION_INDETERMINATE, DECISION_PERMIT }; Result NotApplicableIndeterminatePermitDenyCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_NOT_APPLICABLE, DECISION_INDETERMINATE, DECISION_PERMIT, DECISION_DENY }; Result NotApplicableIndeterminateDenyPermitCombiningAlg::priorities[MAX_OREDERED_PRIORITIES] = { DECISION_NOT_APPLICABLE, DECISION_INDETERMINATE, DECISION_DENY, DECISION_PERMIT }; Result OrderedCombiningAlg::combine(EvaluationCtx* ctx, std::list policies,const Result priorities[MAX_OREDERED_PRIORITIES]){ std::list::iterator it; int occurencies[MAX_OREDERED_PRIORITIES]; memset(occurencies,0,sizeof(occurencies)); for(it = policies.begin(); it != policies.end(); it++) { Policy* policy = *it; Result res = policy->eval(ctx); for(int n = 0;n #endif #include "DenyOverridesAlg.h" namespace ArcSec{ std::string DenyOverridesCombiningAlg::algId = "Deny-Overrides"; Result DenyOverridesCombiningAlg::combine(EvaluationCtx* ctx, std::list policies){ bool atleast_onepermit = false; bool atleast_onenotapplicable = false; std::list::iterator it; for(it = policies.begin(); it != policies.end(); it++) { Policy* policy = *it; Result res = policy->eval(ctx); //If get a return DECISION_DENY, then regardless of whatelse result from the other Rule, //always return DENY if(res == DECISION_DENY) return DECISION_DENY; //If get a return DECISION_NOT_APPLICABLE (this usually happens when Attribute with corrsponding //AttributeId can be found from RequestItem, but value does not match). if(res == DECISION_NOT_APPLICABLE) { atleast_onenotapplicable = true; } //Keep track of whether we had at least one rule that is pertained to the request else if(res == DECISION_PERMIT) atleast_onepermit = true; } //Some Rule said PERMIT, so since nothing could have denied, return PERMIT if(atleast_onepermit) return DECISION_PERMIT; //No Rule said DENY, none of the rules actually applied, return NOT_APPLICABLE if(atleast_onenotapplicable) return DECISION_NOT_APPLICABLE; //If here, there is problem with one of the Rules, then return INDETERMINATE return DECISION_INDETERMINATE; } } //namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/PaxHeaders.7502/CombiningAlg.h0000644000000000000000000000012411073223740025641 xustar000000000000000027 mtime=1223501792.844198 27 atime=1513200574.486701 30 ctime=1513200659.487741221 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/alg/CombiningAlg.h0000644000175000002070000000225211073223740025707 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_COMBININGALG_H__ #define __ARC_SEC_COMBININGALG_H__ #include #include #include "../EvaluationCtx.h" #include "../policy/Policy.h" namespace ArcSec { ///Interface for combining algrithm /**This class is used to implement a specific combining algorithm for * combining policies. */ class CombiningAlg { public: CombiningAlg(){}; virtual ~CombiningAlg(){}; public: /**Evaluate request against policy, and if there are more than one policies, combine * the evaluation results according to the combing algorithm implemented inside in the * method combine(ctx, policies) itself. *@param ctx The information about request is included *@param policies The "match" and "eval" method inside each policy will be called, * and then those results from each policy will be combined according to the combining * algorithm inside CombingAlg class. */ virtual Result combine(EvaluationCtx* ctx, std::list policies) = 0; /**Get the identifier of the combining algorithm class *@return The identity of the algorithm */ virtual const std::string& getalgId(void) const = 0; }; } // namespace ArcSec #endif /* __ARC_SEC_COMBININGALG_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024435 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200598.346993445 30 ctime=1513200659.363739704 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Makefile.am0000644000175000002070000000130312052416515024474 0ustar00mockbuildmock00000000000000SUBDIRS = attr fn policy alg noinst_LTLIBRARIES = libarcpdp.la libarcpdp_ladir = $(pkgincludedir)/security/ArcPDP libarcpdp_la_HEADERS = Source.h EvaluationCtx.h Evaluator.h Response.h \ Request.h RequestItem.h Result.h EvaluatorLoader.h PolicyParser.h \ PolicyStore.h libarcpdp_la_SOURCES = Source.cpp Evaluator.cpp EvaluatorLoader.cpp \ PolicyParser.cpp PolicyStore.cpp libarcpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcpdp_la_LIBADD = \ attr/libarcattr.la fn/libarcfn.la \ policy/libarcpolicy.la alg/libarcalg.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/EvaluationCtx.h0000644000000000000000000000012411455353323025340 xustar000000000000000027 mtime=1286985427.270721 27 atime=1513200574.505701 30 ctime=1513200659.357739631 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/EvaluationCtx.h0000644000175000002070000000445111455353323025411 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_EVALUATIONCTX_H__ #define __ARC_SEC_EVALUATIONCTX_H__ #include #include #include #include #include "attr/AttributeValue.h" #include "Request.h" namespace ArcSec { class RequestTuple { public: virtual RequestTuple* duplicate(const RequestTuple*) { return NULL; }; virtual Arc::XMLNode& getNode() { return tuple; }; RequestTuple() { }; virtual ~RequestTuple(){ }; virtual void erase() { }; public: Subject sub; Resource res; Action act; Context ctx; protected: Arc::XMLNode tuple; }; ///EvaluationCtx, in charge of storing some context information for //evaluation, including Request, current time, etc. class EvaluationCtx { public: /**Construct a new EvaluationCtx based on the given request */ EvaluationCtx (Request* request) { req = request; }; virtual ~EvaluationCtx() { }; virtual Request* getRequest() const { return req; }; //virtual void setRequestItem(RequestItem* reqit) { }; //virtual RequestItem* getRequestItem() const { return NULL; }; virtual std::list getSubjectAttributes(std::string& /* id */, std::string& /* type */, std::string& /* issuer */, std::string& /* category */, AttributeFactory* /* attrfactory */) { std::list attrlist; return attrlist; }; virtual std::list getResourceAttributes(std::string& /* id */, std::string& /* type */, std::string& /* issuer */, AttributeFactory* /* attrfactory */) { std::list attrlist; return attrlist; }; virtual std::list getActionAttributes(std::string& /* id */, std::string& /* type */, std::string& /* issuer */, AttributeFactory* /* attrfactory */) { std::list attrlist; return attrlist; }; virtual std::list getContextAttributes(std::string& /* id */, std::string& /* type */, std::string& /* issuer */, AttributeFactory* /* attrfactory */) { std::list attrlist; return attrlist; }; virtual std::list getAttributes(std::string& /* reqctxpath */, Arc::XMLNode& /* policy */, std::string& /* data_type */, AttributeFactory* /* attrfactory */) { std::list attrlist; return attrlist; }; private: Request* req; }; } // namespace ArcSec #endif /* __ARC_SEC_EVALUATIONCTX_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/PolicyParser.cpp0000644000000000000000000000012411114777117025524 xustar000000000000000027 mtime=1228144207.795496 27 atime=1513200574.481701 30 ctime=1513200659.368739765 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PolicyParser.cpp0000644000175000002070000000704111114777117025573 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "PolicyParser.h" using namespace Arc; using namespace ArcSec; static Arc::Logger logger(Arc::Logger::rootLogger, "PolicyParser"); PolicyParser::PolicyParser(){ } /* ///Get policy from local file void getfromFile(const char* name, std::string& xml_policy){ std::string str; std::ifstream f(name); if(!f) logger.msg(ERROR,"Failed to read policy file %s",name); while (f >> str) { xml_policy.append(str); xml_policy.append(" "); } f.close(); } ///Get policy from remote URL void getfromURL(const char* name, std::string& xml_policy){ Arc::URL url(name); // TODO: IMPORTANT: Use client interface. Arc::NS ns; Arc::Config c(ns); Arc::XMLNode cfg = c; Arc::XMLNode mgr = cfg.NewChild("ModuleManager"); Arc::XMLNode pth1 = mgr.NewChild("Path"); pth1 = "../../../mcc/tcp/.libs"; Arc::XMLNode pth2 = mgr.NewChild("Path"); pth2 = "../../../mcc/http/.libs"; Arc::XMLNode plg1 = cfg.NewChild("Plugins"); Arc::XMLNode mcctcp = plg1.NewChild("Name"); mcctcp = "mcctcp"; Arc::XMLNode plg2 = cfg.NewChild("Plugins"); Arc::XMLNode mcchttp = plg2.NewChild("Name"); mcchttp = "mcchttp"; Arc::XMLNode chn = cfg.NewChild("Chain"); Arc::XMLNode tcp = chn.NewChild("Component"); Arc::XMLNode tcpname = tcp.NewAttribute("name"); tcpname = "tcp.client"; Arc::XMLNode tcpid = tcp.NewAttribute("id"); tcpid = "tcp"; Arc::XMLNode tcpcnt = tcp.NewChild("Connect"); Arc::XMLNode tcphost = tcpcnt.NewChild("Host"); tcphost = url.Host(); Arc::XMLNode tcpport = tcpcnt.NewChild("Port"); tcpport = Arc::tostring(url.Port()); Arc::XMLNode http = chn.NewChild("Component"); Arc::XMLNode httpname = http.NewAttribute("name"); httpname = "http.client"; Arc::XMLNode httpid = http.NewAttribute("id"); httpid = "http"; Arc::XMLNode httpentry = http.NewAttribute("entry"); httpentry = "http"; Arc::XMLNode httpnext = http.NewChild("next"); Arc::XMLNode httpnextid = httpnext.NewAttribute("id"); httpnextid = "tcp"; Arc::XMLNode httpmeth = http.NewChild("Method"); httpmeth = "GET"; Arc::XMLNode httpep = http.NewChild("Endpoint"); httpep = url.str(); //std::cout<<"------ Configuration ------"<process(request,response))) logger.msg(ERROR,"Failed to read policy from URL %s",name); try { Arc::PayloadRaw& payload = dynamic_cast(*response.Payload()); xml_policy.append(payload.Content()); xml_policy.append(" "); } catch(std::exception&) { }; } */ Policy* PolicyParser::parsePolicy(const Source& source, std::string policyclassname, EvaluatorContext* ctx){ Arc::XMLNode node = source.Get(); Arc::ClassLoader* classloader = NULL; classloader=Arc::ClassLoader::getClassLoader(); ArcSec::Policy * policy = (ArcSec::Policy*)(classloader->Instance(policyclassname, &node)); if(policy == NULL) { logger.msg(ERROR, "Can not generate policy object"); return NULL; } policy->setEvaluatorContext(ctx); policy->make_policy(); return policy; } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726024445 xustar000000000000000030 mtime=1513200598.400994106 30 atime=1513200648.192603077 30 ctime=1513200659.364739716 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Makefile.in0000644000175000002070000010475713214315726024531 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/security/ArcPDP DIST_COMMON = $(libarcpdp_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libarcpdp_la_DEPENDENCIES = attr/libarcattr.la fn/libarcfn.la \ policy/libarcpolicy.la alg/libarcalg.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libarcpdp_la_OBJECTS = libarcpdp_la-Source.lo \ libarcpdp_la-Evaluator.lo libarcpdp_la-EvaluatorLoader.lo \ libarcpdp_la-PolicyParser.lo libarcpdp_la-PolicyStore.lo libarcpdp_la_OBJECTS = $(am_libarcpdp_la_OBJECTS) libarcpdp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarcpdp_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcpdp_la_SOURCES) DIST_SOURCES = $(libarcpdp_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libarcpdp_ladir)" HEADERS = $(libarcpdp_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = attr fn policy alg noinst_LTLIBRARIES = libarcpdp.la libarcpdp_ladir = $(pkgincludedir)/security/ArcPDP libarcpdp_la_HEADERS = Source.h EvaluationCtx.h Evaluator.h Response.h \ Request.h RequestItem.h Result.h EvaluatorLoader.h PolicyParser.h \ PolicyStore.h libarcpdp_la_SOURCES = Source.cpp Evaluator.cpp EvaluatorLoader.cpp \ PolicyParser.cpp PolicyStore.cpp libarcpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcpdp_la_LIBADD = \ attr/libarcattr.la fn/libarcfn.la \ policy/libarcpolicy.la alg/libarcalg.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcpdp.la: $(libarcpdp_la_OBJECTS) $(libarcpdp_la_DEPENDENCIES) $(libarcpdp_la_LINK) $(libarcpdp_la_OBJECTS) $(libarcpdp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-Evaluator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-EvaluatorLoader.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-PolicyParser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-PolicyStore.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-Source.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcpdp_la-Source.lo: Source.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-Source.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-Source.Tpo -c -o libarcpdp_la-Source.lo `test -f 'Source.cpp' || echo '$(srcdir)/'`Source.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-Source.Tpo $(DEPDIR)/libarcpdp_la-Source.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Source.cpp' object='libarcpdp_la-Source.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-Source.lo `test -f 'Source.cpp' || echo '$(srcdir)/'`Source.cpp libarcpdp_la-Evaluator.lo: Evaluator.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-Evaluator.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-Evaluator.Tpo -c -o libarcpdp_la-Evaluator.lo `test -f 'Evaluator.cpp' || echo '$(srcdir)/'`Evaluator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-Evaluator.Tpo $(DEPDIR)/libarcpdp_la-Evaluator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Evaluator.cpp' object='libarcpdp_la-Evaluator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-Evaluator.lo `test -f 'Evaluator.cpp' || echo '$(srcdir)/'`Evaluator.cpp libarcpdp_la-EvaluatorLoader.lo: EvaluatorLoader.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-EvaluatorLoader.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-EvaluatorLoader.Tpo -c -o libarcpdp_la-EvaluatorLoader.lo `test -f 'EvaluatorLoader.cpp' || echo '$(srcdir)/'`EvaluatorLoader.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-EvaluatorLoader.Tpo $(DEPDIR)/libarcpdp_la-EvaluatorLoader.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EvaluatorLoader.cpp' object='libarcpdp_la-EvaluatorLoader.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-EvaluatorLoader.lo `test -f 'EvaluatorLoader.cpp' || echo '$(srcdir)/'`EvaluatorLoader.cpp libarcpdp_la-PolicyParser.lo: PolicyParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-PolicyParser.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-PolicyParser.Tpo -c -o libarcpdp_la-PolicyParser.lo `test -f 'PolicyParser.cpp' || echo '$(srcdir)/'`PolicyParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-PolicyParser.Tpo $(DEPDIR)/libarcpdp_la-PolicyParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PolicyParser.cpp' object='libarcpdp_la-PolicyParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-PolicyParser.lo `test -f 'PolicyParser.cpp' || echo '$(srcdir)/'`PolicyParser.cpp libarcpdp_la-PolicyStore.lo: PolicyStore.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-PolicyStore.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-PolicyStore.Tpo -c -o libarcpdp_la-PolicyStore.lo `test -f 'PolicyStore.cpp' || echo '$(srcdir)/'`PolicyStore.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-PolicyStore.Tpo $(DEPDIR)/libarcpdp_la-PolicyStore.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PolicyStore.cpp' object='libarcpdp_la-PolicyStore.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-PolicyStore.lo `test -f 'PolicyStore.cpp' || echo '$(srcdir)/'`PolicyStore.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcpdp_laHEADERS: $(libarcpdp_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcpdp_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcpdp_ladir)" @list='$(libarcpdp_la_HEADERS)'; test -n "$(libarcpdp_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcpdp_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcpdp_ladir)" || exit $$?; \ done uninstall-libarcpdp_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcpdp_la_HEADERS)'; test -n "$(libarcpdp_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcpdp_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcpdp_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libarcpdp_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcpdp_laHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libarcpdp_laHEADERS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libarcpdp_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-libarcpdp_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/RequestItem.h0000644000000000000000000000012410725564730025026 xustar000000000000000027 mtime=1196878296.194113 27 atime=1513200574.480701 30 ctime=1513200659.359739655 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/RequestItem.h0000644000175000002070000001354410725564730025102 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_REQUESTITEM_H__ #define __ARC_SEC_REQUESTITEM_H__ #include #include #include "attr/AttributeFactory.h" #include "attr/RequestAttribute.h" namespace ArcSec { ///Attribute containers, which includes a few RequestAttribute objects /** Why do we need such containers? A Subject node could be like below, include a few attributes at the same time: administrator /O=NorduGrid/OU=UIO/CN=admin Or only include one attribute: /O=NorduGrid/OU=UIO/CN=test Or include a few the same types of attributes at the same time: administrator /O=NorduGrid/OU=UIO/CN=admin Note, (or others) node with more than one s means the owns all the included attributes at the same time. e.g. a person with email: abc@xyz and DN:/O=XYZ/OU=ABC/CN=theguy and role: administrator However, Parallel s inside one SubList (see below about definition if ***List) does not means there is any relationship between these s. Then if there are two examples of here: Subject1: administrator /O=NorduGrid/OU=UIO/CN=admin and, Subject2: /O=NorduGrid/OU=UIO/CN=test Subject3: administrator the former one will be explained as the request tuple has two attributes at the same time the later one will be explained as the two , independently has one attribute. If we consider the Policy side, a policy snipet example like this: /O=NorduGrid/OU=UIO/CN=admin administrator ...... ...... ...... then all of the Subject1 Subject2 Subject3 will satisfy the in policy. but if the policy snipet is like this: /O=NorduGrid/OU=UIO/CN=admin administrator ...... ...... ...... then only Subject1 can satisfy the in policy. A complete request item could be like: /O=NorduGrid/OU=UIO/CN=test administrator guest /O=NorduGrid/OU=UIO/CN=anonymous file://home/test read copy 2007-09-10T20:30:20/P1Y1M Here putting a few s s s or s together (inside one RequestItem) is only for the convinient of expression (there is no logical relationship between them). For more than one <, , , > tuples, if there is one element (e.g. ) which is different to each other, you can put these tuples together by using one tuple <,, , , > tuple, and don't need to write a few tuples. */ typedef std::list Subject, Resource, Action, Context; ///Containers, which include a few Subject, Resource, Action or Context objects typedef std::list SubList; typedef std::list ResList; typedef std::list ActList; typedef std::list CtxList; ///Interface for request item container, tuple class RequestItem{ public: /**Constructor @param node The XMLNode structure of the request item @param attributefactory The AttributeFactory which will be used to generate RequestAttribute */ RequestItem(Arc::XMLNode&, AttributeFactory*){}; virtual ~RequestItem(){}; protected: SubList subjects; ResList actions; ActList resources; CtxList contexts; public: virtual SubList getSubjects () const = 0; virtual void setSubjects (const SubList& sl) = 0; virtual ResList getResources () const = 0; virtual void setResources (const ResList& rl) = 0; virtual ActList getActions () const = 0; virtual void setActions (const ActList& al) = 0; virtual CtxList getContexts () const = 0; virtual void setContexts (const CtxList& ctx) = 0; }; } // namespace Arc #endif /* __ARC_SEC_REQUESTITEM_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Source.cpp0000644000000000000000000000012411667514164024354 xustar000000000000000027 mtime=1323210868.962599 27 atime=1513200574.505701 30 ctime=1513200659.365739729 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Source.cpp0000644000175000002070000000152711667514164024426 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "Source.h" namespace ArcSec { Source::Source(Arc::XMLNode xml):node(xml) { } Source::Source(std::istream& stream) { node.ReadFromStream(stream); } Source::Source(Arc::URL&) { //TODO: } Source::Source(const std::string& str) { Arc::XMLNode xml(str); xml.Exchange(node); } SourceFile::SourceFile(const char* name):Source(*(stream = new std::ifstream(name))) { } SourceFile::SourceFile(const std::string& name):Source(*(stream = new std::ifstream(name.c_str()))) { } SourceFile::~SourceFile(void) { if(stream) delete stream; } SourceURL::SourceURL(const char* source):Source(*(url = new Arc::URL(source))) { } SourceURL::SourceURL(const std::string& source):Source(*(url = new Arc::URL(source))) { } SourceURL::~SourceURL(void) { if(url) delete url; } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Evaluator.cpp0000644000000000000000000000012411023572402025037 xustar000000000000000027 mtime=1213134082.542036 27 atime=1513200574.505701 30 ctime=1513200659.366739741 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Evaluator.cpp0000644000175000002070000000027711023572402025112 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "Evaluator.h" namespace ArcSec { Arc::Logger ArcSec::Evaluator::logger(Arc::Logger::rootLogger, "Evaluator"); } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/EvaluatorLoader.h0000644000000000000000000000012311114777117025645 xustar000000000000000027 mtime=1228144207.795496 27 atime=1513200574.505701 29 ctime=1513200659.36173968 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/EvaluatorLoader.h0000644000175000002070000000352511114777117025720 0ustar00mockbuildmock00000000000000#ifndef __ARCSEC_EVALUATORLOADER_H__ #define __ARCSEC_EVALUATORLOADER_H__ #include #include #include #include #include #include #include #include #include namespace ArcSec { ///EvaluatorLoader is implemented as a helper class for loading different Evaluator objects, like ArcEvaluator /**The object loading is based on the configuration information about evaluator, including information for factory class, request, policy and evaluator itself */ class EvaluatorLoader { public: EvaluatorLoader(); /**Get evaluator object according to the class name*/ Evaluator* getEvaluator(const std::string& classname); /**Get evaluator object suitable for presented policy*/ Evaluator* getEvaluator(const Policy* policy); /**Get evaluator object suitable for presented request*/ Evaluator* getEvaluator(const Request* request); /**Get request object according to the class name, based on the request source*/ Request* getRequest(const std::string& classname, const Source& requestsource); /**Get request object according to the request source*/ Request* getRequest(const Source& requestsource); /**Get policy object according to the class name, based on the policy source*/ Policy* getPolicy(const std::string& classname, const Source& policysource); /**Get proper policy object according to the policy source*/ Policy* getPolicy(const Source& policysource); protected: static Arc::Logger logger; private: /**configuration information for loading objects; there could be more than one suits of configuration*/ std::list class_config_list_; }; } //namespace ArcSec #endif /* __ARCSEC_EVALUATORLOADER_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Evaluator.h0000644000000000000000000000012411730411253024505 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200574.508701 30 ctime=1513200659.357739631 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Evaluator.h0000644000175000002070000001151611730411253024556 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_EVALUATOR_H__ #define __ARC_SEC_EVALUATOR_H__ #include #include #include #include #include #include #include "fn/FnFactory.h" #include "attr/AttributeFactory.h" #include "alg/AlgFactory.h" #include "Request.h" #include "Response.h" namespace ArcSec { typedef enum { /** Evaluation is carried out till any non-matching policy found and all matching policies are discarded from reported list. This is a default behavior. */ EvaluatorFailsOnDeny, /** Evaluation is carried out till any non-matching policy found */ EvaluatorStopsOnDeny, /** Evaluation is carried out till any matching policy found */ EvaluatorStopsOnPermit, /** Evaluation is done till all policies are checked. */ EvaluatorStopsNever } EvaluatorCombiningAlg; ///Interface for policy evaluation. Execute the policy evaluation, based on the request and policy class Evaluator : public Arc::LoadableClass { protected: static Arc::Logger logger; public: Evaluator (Arc::XMLNode*, Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; Evaluator (const char *, Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; virtual ~Evaluator() {}; /**Evaluates the request by using a Request object. Evaluation is done till at least one of policies is satisfied. */ virtual Response* evaluate(Request* request) = 0; /**Evaluates the request by using a specified source */ virtual Response* evaluate(const Source& request) = 0; /**Evaluate the specified request against the policy from specified source. In some implementations all of the existing policies inside the evaluator may be destroyed by this method. */ virtual Response* evaluate(Request* request, const Source& policy) = 0; /**Evaluate the request from specified source against the policy from specified source. In some implementations all of the existing policie inside the evaluator may be destroyed by this method. */ virtual Response* evaluate(const Source& request, const Source& policy) = 0; /**Evaluate the specified request against the specified policy. In some implementations all of the existing policy inside the evaluator may be destroyed by this method. */ virtual Response* evaluate(Request* request, Policy* policyobj) = 0; /**Evaluate the request from specified source against the specified policy. In some implementations all of the existing policie inside the evaluator may be destroyed by this method. */ virtual Response* evaluate(const Source& request, Policy* policyobj) = 0; /**Get the AttributeFactory object*/ virtual AttributeFactory* getAttrFactory () = 0; /**Get the FnFactory object*/ virtual FnFactory* getFnFactory () = 0; /**Get the AlgFactory object*/ virtual AlgFactory* getAlgFactory () = 0; /**Add policy from specified source to the evaluator. Policy will be marked with id. */ virtual void addPolicy(const Source& policy,const std::string& id = "") = 0; /**Add policy to the evaluator. Policy will be marked with id. The policy object is taken over by this instance and will be destroyed in destructor. */ virtual void addPolicy(Policy* policy,const std::string& id = "") = 0; /**Specifies one of simple combining algorithms. In case of multiple policies their results will be combined using this algorithm. */ virtual void setCombiningAlg(EvaluatorCombiningAlg alg) = 0; /**Specifies loadable combining algorithms. In case of multiple policies their results will be combined using this algorithm. To switch to simple algorithm specify NULL argument. */ virtual void setCombiningAlg(CombiningAlg* alg = NULL) = 0; /**Get the name of this evaluator*/ virtual const char* getName(void) const = 0; protected: /**Evaluate the request by using the EvaluationCtx object (which includes the information about request). The ctx is destroyed inside this method (why?!?!?). */ virtual Response* evaluate(EvaluationCtx* ctx) = 0; private: /**Parse the configuration, and dynamically create PolicyStore, AttributeFactory, FnFactory and AlgFactoryy*/ virtual void parsecfg(Arc::XMLNode& cfg) = 0; }; ///Context for evaluator. It includes the factories which will be used to create related objects class EvaluatorContext { private: Evaluator* evaluator; public: EvaluatorContext(Evaluator* evaluator) : evaluator(evaluator) {}; ~EvaluatorContext() {}; public: /** Returns associated AttributeFactory object */ operator AttributeFactory*() { return evaluator->getAttrFactory(); }; /** Returns associated FnFactory object */ operator FnFactory*() { return evaluator->getFnFactory(); }; /** Returns associated AlgFactory object */ operator AlgFactory*() { return evaluator->getAlgFactory(); }; }; } // namespace ArcSec #endif /* __ARC_SEC_EVALUATOR_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Source.h0000644000000000000000000000012412044527530024010 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200574.508701 30 ctime=1513200659.356739618 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Source.h0000644000175000002070000000452612044527530024064 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_SOURCE_H__ #define __ARC_SEC_SOURCE_H__ #include #include #include namespace ArcSec { /// Acquires and parses XML document from specified source. /** This class is to be used to provide easy way to specify different sources for XML Authorization Policies and Requests. */ class Source { private: Arc::XMLNode node; Source(void) { }; public: /// Copy constructor. /** Use this constructor only for temporary objects. Parsed XML document is still owned by copied source and hence lifetime of created object should not exceed that of copied one. */ Source(const Source& s):node(s.node) { }; /// Use XML subtree refered by xml. /** There is no copy of xml made. Hence lifetime of this object should not exceed that of xml. */ Source(Arc::XMLNode xml); /// Read XML document from stream and parse it. Source(std::istream& stream); /// Fetch XML document from specified url and parse it. /** This constructor is not implemented yet. */ Source(Arc::URL& url); /// Read XML document from string. Source(const std::string& str); /// Get reference to parsed document Arc::XMLNode Get(void) const { return node; }; /// Returns true if valid document is available operator bool(void) { return (bool)node; }; operator Arc::XMLNode(void) { return node; }; }; /// Convenience class for obtaining XML document from file class SourceFile: public Source { private: std::ifstream* stream; SourceFile(void):Source(std::string("")),stream(NULL) {}; public: /// See corresponding constructor of Source class SourceFile(const SourceFile& s):Source(s),stream(NULL) {}; /// Read XML document from file named name and store it SourceFile(const char* name); /// Read XML document from file named name and store it SourceFile(const std::string& name); ~SourceFile(void); }; /// Convenience class for obtaining XML document from remote URL class SourceURL: public Source { private: Arc::URL* url; SourceURL(void):Source(std::string("")),url(NULL) {}; public: /// See corresponding constructor of Source class SourceURL(const SourceURL& s):Source(s),url(NULL) {}; /// Read XML document from URL url and store it SourceURL(const char* url); /// Read XML document from URL url and store it SourceURL(const std::string& url); ~SourceURL(void); }; } #endif nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/fn0000644000000000000000000000013213214316023022715 xustar000000000000000030 mtime=1513200659.449740756 30 atime=1513200668.720854145 30 ctime=1513200659.449740756 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/0000755000175000002070000000000013214316023023040 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515025040 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200598.551995952 30 ctime=1513200659.445740707 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/Makefile.am0000644000175000002070000000066012052416515025104 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libarcfn.la libarcfn_ladir = $(pkgincludedir)/security/ArcPDP/fn libarcfn_la_HEADERS = Function.h FnFactory.h EqualFunction.h MatchFunction.h InRangeFunction.h libarcfn_la_SOURCES = EqualFunction.cpp MatchFunction.cpp InRangeFunction.cpp libarcfn_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcfn_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/FnFactory.h0000644000000000000000000000012311730411253025040 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200574.491701 29 ctime=1513200659.44274067 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/FnFactory.h0000644000175000002070000000170211730411253025106 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_FUNCTIONFACTORY_H__ #define __ARC_SEC_FUNCTIONFACTORY_H__ #include #include #include "Function.h" namespace ArcSec { typedef std::map FnMap; ///Interface for function factory class /**FnFactory is in charge of creating Function object according to the * algorithm type given as argument of method createFn. * This class can be inherited for implementing a factory class * which can create some specific Function objects. */ class FnFactory : public Arc::LoadableClass { public: FnFactory(Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; virtual ~FnFactory(){}; public: /**creat algorithm object based on the type algorithm type *@param type The type of Function *@return The object of Function */ virtual Function* createFn(const std::string& type) = 0; protected: FnMap fnmap; }; } // namespace ArcSec #endif /* __ARC_SEC_FUNCTIONFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315726025047 xustar000000000000000029 mtime=1513200598.59999654 30 atime=1513200648.256603859 30 ctime=1513200659.446740719 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/Makefile.in0000644000175000002070000006476413214315726025137 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/security/ArcPDP/fn DIST_COMMON = $(libarcfn_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libarcfn_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libarcfn_la_OBJECTS = libarcfn_la-EqualFunction.lo \ libarcfn_la-MatchFunction.lo libarcfn_la-InRangeFunction.lo libarcfn_la_OBJECTS = $(am_libarcfn_la_OBJECTS) libarcfn_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarcfn_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcfn_la_SOURCES) DIST_SOURCES = $(libarcfn_la_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libarcfn_ladir)" HEADERS = $(libarcfn_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libarcfn.la libarcfn_ladir = $(pkgincludedir)/security/ArcPDP/fn libarcfn_la_HEADERS = Function.h FnFactory.h EqualFunction.h MatchFunction.h InRangeFunction.h libarcfn_la_SOURCES = EqualFunction.cpp MatchFunction.cpp InRangeFunction.cpp libarcfn_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcfn_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/fn/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/fn/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcfn.la: $(libarcfn_la_OBJECTS) $(libarcfn_la_DEPENDENCIES) $(libarcfn_la_LINK) $(libarcfn_la_OBJECTS) $(libarcfn_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcfn_la-EqualFunction.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcfn_la-InRangeFunction.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcfn_la-MatchFunction.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcfn_la-EqualFunction.lo: EqualFunction.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcfn_la_CXXFLAGS) $(CXXFLAGS) -MT libarcfn_la-EqualFunction.lo -MD -MP -MF $(DEPDIR)/libarcfn_la-EqualFunction.Tpo -c -o libarcfn_la-EqualFunction.lo `test -f 'EqualFunction.cpp' || echo '$(srcdir)/'`EqualFunction.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcfn_la-EqualFunction.Tpo $(DEPDIR)/libarcfn_la-EqualFunction.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EqualFunction.cpp' object='libarcfn_la-EqualFunction.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcfn_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcfn_la-EqualFunction.lo `test -f 'EqualFunction.cpp' || echo '$(srcdir)/'`EqualFunction.cpp libarcfn_la-MatchFunction.lo: MatchFunction.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcfn_la_CXXFLAGS) $(CXXFLAGS) -MT libarcfn_la-MatchFunction.lo -MD -MP -MF $(DEPDIR)/libarcfn_la-MatchFunction.Tpo -c -o libarcfn_la-MatchFunction.lo `test -f 'MatchFunction.cpp' || echo '$(srcdir)/'`MatchFunction.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcfn_la-MatchFunction.Tpo $(DEPDIR)/libarcfn_la-MatchFunction.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MatchFunction.cpp' object='libarcfn_la-MatchFunction.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcfn_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcfn_la-MatchFunction.lo `test -f 'MatchFunction.cpp' || echo '$(srcdir)/'`MatchFunction.cpp libarcfn_la-InRangeFunction.lo: InRangeFunction.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcfn_la_CXXFLAGS) $(CXXFLAGS) -MT libarcfn_la-InRangeFunction.lo -MD -MP -MF $(DEPDIR)/libarcfn_la-InRangeFunction.Tpo -c -o libarcfn_la-InRangeFunction.lo `test -f 'InRangeFunction.cpp' || echo '$(srcdir)/'`InRangeFunction.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcfn_la-InRangeFunction.Tpo $(DEPDIR)/libarcfn_la-InRangeFunction.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='InRangeFunction.cpp' object='libarcfn_la-InRangeFunction.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcfn_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcfn_la-InRangeFunction.lo `test -f 'InRangeFunction.cpp' || echo '$(srcdir)/'`InRangeFunction.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcfn_laHEADERS: $(libarcfn_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcfn_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcfn_ladir)" @list='$(libarcfn_la_HEADERS)'; test -n "$(libarcfn_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcfn_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcfn_ladir)" || exit $$?; \ done uninstall-libarcfn_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcfn_la_HEADERS)'; test -n "$(libarcfn_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcfn_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcfn_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libarcfn_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libarcfn_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libarcfn_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libarcfn_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-libarcfn_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/MatchFunction.cpp0000644000000000000000000000012411232072001026232 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.491701 30 ctime=1513200659.447740731 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/MatchFunction.cpp0000644000175000002070000000376511232072001026312 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "MatchFunction.h" #include "../attr/BooleanAttribute.h" #include "../attr/StringAttribute.h" #include "../attr/DateTimeAttribute.h" #include "../attr/X500NameAttribute.h" #include "../attr/AnyURIAttribute.h" namespace ArcSec { std::string MatchFunction::getFunctionName(std::string datatype){ std::string ret; if (datatype == StringAttribute::getIdentifier()) ret = NAME_REGEXP_STRING_MATCH; else if(datatype == AnyURIAttribute::getIdentifier()) ret = NAME_ANYURI_REGEXP_MATCH; else if(datatype == X500NameAttribute::getIdentifier()) ret = NAME_X500NAME_REGEXP_MATCH; return ret; } MatchFunction::MatchFunction(std::string functionName, std::string argumentType) : Function(functionName, argumentType) { fnName = functionName; argType = argumentType; } AttributeValue* MatchFunction::evaluate(AttributeValue* arg0, AttributeValue* arg1, bool check_id){ //TODO //arg0 is the attributevalue in policy //arg1 is the attributevalue in request if(check_id) { if(arg0->getId() != arg1->getId()) return new BooleanAttribute(false); } std::string label = arg0->encode(); std::string value = arg1->encode(); Arc::RegularExpression regex(label); if(regex.isOk()){ std::list unmatched, matched; if(regex.match(value, unmatched, matched)) return new BooleanAttribute(true); } // std::cerr<<"Bad Regex"< MatchFunction::evaluate(std::list args, bool check_id) { AttributeValue* arg0 = NULL; AttributeValue* arg1 = NULL; std::list::iterator it = args.begin(); arg0 = *it; it++; if(it!= args.end()) arg1 = *it; if(check_id) { if(arg0->getId() != arg1->getId()) { std::list ret; ret.push_back(new BooleanAttribute(false)); return ret; } } AttributeValue* res = evaluate(arg0, arg1); std::list ret; ret.push_back(res); return ret; } } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/EqualFunction.cpp0000644000000000000000000000012411232072001026245 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.489701 30 ctime=1513200659.447740731 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/EqualFunction.cpp0000644000175000002070000000517711232072001026324 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "EqualFunction.h" #include "../attr/BooleanAttribute.h" #include "../attr/StringAttribute.h" #include "../attr/DateTimeAttribute.h" #include "../attr/X500NameAttribute.h" #include "../attr/AnyURIAttribute.h" namespace ArcSec { std::string EqualFunction::getFunctionName(std::string datatype){ std::string ret; if (datatype == StringAttribute::getIdentifier()) ret = NAME_STRING_EQUAL; else if(datatype == BooleanAttribute::getIdentifier()) ret = NAME_BOOLEAN_EQUAL; //else if(datatype == IntegerAttribute::getIdentify()) ret = NAME_INTEGER_EQUAL; //else if(datatype == DoubleAttribute::getIdentify()) ret = NAME_DOUBLE_EQUAL; else if(datatype == DateAttribute::getIdentifier()) ret = NAME_DATE_EQUAL; else if(datatype == TimeAttribute::getIdentifier()) ret = NAME_TIME_EQUAL; else if(datatype == DateTimeAttribute::getIdentifier()) ret = NAME_DATETIME_EQUAL; else if(datatype == DurationAttribute::getIdentifier()) ret = NAME_DURATION_EQUAL; else if(datatype == PeriodAttribute::getIdentifier()) ret = NAME_PERIOD_EQUAL; //else if(datatype == DayTimeDurationAttribute::getIdentify()) ret = NAME_DAYTIME_DURATION_EQUAL; //else if(datatype == YearMonthDurationAttribute::getIdentify()) ret = NAME_YEARMONTH_DURATION_EQUAL; else if(datatype == AnyURIAttribute::getIdentifier()) ret = NAME_ANYURI_EQUAL; else if(datatype == X500NameAttribute::getIdentifier()) ret = NAME_X500NAME_EQUAL; //else if(datatype == RFC822NameAttribute::getIdentify()) ret = NAME_RFC822NAME_EQUAL; //else if(datatype == HexBinaryAttribute::getIdentify()) ret = NAME_HEXBINARY_EQUAL; //else if(datatype == Base64BinaryAttribute::getIdentify()) ret = BASE64BINARY_EQUAL; //else if(datatype == IPAddressAttribute::getIdentify()) ret = NAME_IPADDRESS_EQUAL; //else if(datatype == DNSName::getIdentify()) ret = NAME_DNSNAME_EQUAL; return ret; } EqualFunction::EqualFunction(std::string functionName, std::string argumentType) : Function(functionName, argumentType) { fnName = functionName; argType = argumentType; } AttributeValue* EqualFunction::evaluate(AttributeValue* arg0, AttributeValue* arg1, bool check_id){ //TODO return new BooleanAttribute(arg0->equal(arg1, check_id)); } std::list EqualFunction::evaluate(std::list args, bool check_id) { AttributeValue* arg0 = NULL; AttributeValue* arg1 = NULL; std::list::iterator it = args.begin(); arg0 = *it; it++; if(it!= args.end()) arg1 = *it; AttributeValue* res = new BooleanAttribute(arg0->equal(arg1, check_id)); std::list ret; ret.push_back(res); return ret; } } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/InRangeFunction.cpp0000644000000000000000000000012411232072001026521 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.489701 30 ctime=1513200659.449740756 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/InRangeFunction.cpp0000644000175000002070000000416411232072001026573 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "InRangeFunction.h" #include "../attr/BooleanAttribute.h" #include "../attr/DateTimeAttribute.h" #include "../attr/StringAttribute.h" namespace ArcSec { std::string InRangeFunction::getFunctionName(std::string datatype){ std::string ret; if (datatype == DateTimeAttribute::getIdentifier()) ret = NAME_TIME_IN_RANGE; else if (datatype == StringAttribute::getIdentifier()) ret = NAME_STRING_IN_RANGE; return ret; } InRangeFunction::InRangeFunction(std::string functionName, std::string argumentType) : Function(functionName, argumentType) { fnName = functionName; argType = argumentType; } AttributeValue* InRangeFunction::evaluate(AttributeValue* arg0, AttributeValue* arg1, bool check_id){ //TODO //arg0 is the attributevalue in policy //arg1 is the attributevalue in request if(check_id) { if(arg0->getId() != arg1->getId()) return new BooleanAttribute(false); } if(fnName == NAME_TIME_IN_RANGE){ PeriodAttribute* v0; DateTimeAttribute* v1; try{ v0 = dynamic_cast(arg0); v1 = dynamic_cast(arg1); } catch(std::exception&){ }; if(v1->inrange(v0)) return new BooleanAttribute(true); } else if(fnName == NAME_STRING_IN_RANGE) { StringAttribute* v0; StringAttribute* v1; try{ v0 = dynamic_cast(arg0); v1 = dynamic_cast(arg1); } catch(std::exception&){ }; if(v1->inrange(v0)) return new BooleanAttribute(true); } return new BooleanAttribute(false); } std::list InRangeFunction::evaluate(std::list args, bool check_id) { AttributeValue* arg0 = NULL; AttributeValue* arg1 = NULL; std::list::iterator it = args.begin(); arg0 = *it; it++; if(it!= args.end()) arg1 = *it; if(check_id) { if(arg0->getId() != arg1->getId()) { std::list ret; ret.push_back(new BooleanAttribute(false)); return ret; } } AttributeValue* res = evaluate(arg0, arg1); std::list ret; ret.push_back(res); return ret; } } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/EqualFunction.h0000644000000000000000000000012411232072001025712 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.491701 30 ctime=1513200659.443740682 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/EqualFunction.h0000644000175000002070000000306311232072001025761 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_EQUAL_FUNCTION_H__ #define __ARC_SEC_EQUAL_FUNCTION_H__ #include namespace ArcSec { #define NAME_STRING_EQUAL "string-equal" #define NAME_BOOLEAN_EQUAL "boolean-equal" #define NAME_INTEGER_EQUAL "integer-equal" #define NAME_DOUBLE_EQUAL "double-equal" #define NAME_DATE_EQUAL "date-equal" #define NAME_TIME_EQUAL "time-equal" #define NAME_DATETIME_EQUAL "datetime-equal" #define NAME_DURATION_EQUAL "duration-equal" #define NAME_PERIOD_EQUAL "period-equal" //#define NAME_DAYTIME_DURATION_EQUAL "dayTimeDuration-equal" //#define NAME_YEARMONTH_DURATION_EQUAL "yearMonthDuration-equal" #define NAME_ANYURI_EQUAL "anyURI-equal" #define NAME_X500NAME_EQUAL "x500Name-equal" #define NAME_RFC822NAME_EQUAL "rfc822Name-equal" #define NAME_HEXBINARY_EQUAL "hexBinary-equal" #define NAME_BASE64BINARY_EQUAL "base64Binary-equal" #define NAME_IPADDRESS_EQUAL "ipAddress-equal" #define NAME_DNSNAME_EQUAL "dnsName-equal" ///Evaluate whether the two values are equal class EqualFunction : public Function { public: EqualFunction(std::string functionName, std::string argumentType); public: virtual AttributeValue* evaluate(AttributeValue* arg0, AttributeValue* arg1, bool check_id = true); virtual std::list evaluate(std::list args, bool check_id = true); /**help function to get the FunctionName*/ static std::string getFunctionName(std::string datatype); private: std::string fnName; std::string argType; }; } // namespace ArcSec #endif /* __ARC_SEC_EQUAL_FUNCTION_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/MatchFunction.h0000644000000000000000000000012411232072001025677 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.492701 30 ctime=1513200659.444740695 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/MatchFunction.h0000644000175000002070000000174411232072001025752 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_MATCH_FUNCTION_H__ #define __ARC_SEC_MATCH_FUNCTION_H__ #include #include namespace ArcSec { #define NAME_REGEXP_STRING_MATCH "regexp-string-match" #define NAME_ANYURI_REGEXP_MATCH "anyURI-regexp-match" #define NAME_X500NAME_REGEXP_MATCH "x500Name-regexp-match" ///Evaluate whether arg1 (value in regular expression) matched arg0 (lable in regular expression) class MatchFunction : public Function { public: MatchFunction(std::string functionName, std::string argumentType); public: virtual AttributeValue* evaluate(AttributeValue* arg0, AttributeValue* arg1, bool check_id = true); virtual std::list evaluate(std::list args, bool check_id = true); /**help function to get the FunctionName*/ static std::string getFunctionName(std::string datatype); private: std::string fnName; std::string argType; }; } // namespace ArcSec #endif /* __ARC_SEC_MATCH_FUNCTION_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/Function.h0000644000000000000000000000012411232072001024722 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.489701 30 ctime=1513200659.441740658 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/Function.h0000644000175000002070000000157711232072001025001 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_FUNCTION_H__ #define __ARC_SEC_FUNCTION_H__ #include #include #include namespace ArcSec { //static std::string FUNCTION_NS = "know-arc:function"; //#define FUNCTION_NS "know-arc:function" ///Interface for function, which is in charge of evaluating two AttributeValue class Function { public: Function(std::string, std::string){}; virtual ~Function(){}; public: /**Evaluate two AttributeValue objects, and return one AttributeValue object */ virtual AttributeValue* evaluate(AttributeValue* arg0, AttributeValue* arg1, bool check_id = true) = 0; /**Evaluate a list of AttributeValue objects, and return a list of Attribute objects*/ virtual std::list evaluate(std::list args, bool check_id = true) = 0; }; } // namespace ArcSec #endif /* __ARC_SEC_FUNCTION_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/PaxHeaders.7502/InRangeFunction.h0000644000000000000000000000012411232072001026166 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.491701 30 ctime=1513200659.445740707 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/fn/InRangeFunction.h0000644000175000002070000000160011232072001026230 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_INRANGE_FUNCTION_H__ #define __ARC_SEC_INRANGE_FUNCTION_H__ #include namespace ArcSec { #define IN_RANGE "-in-range" #define NAME_STRING_IN_RANGE "string-in-range" #define NAME_TIME_IN_RANGE "time-in-range" class InRangeFunction : public Function { public: InRangeFunction(std::string functionName, std::string argumentType); public: virtual AttributeValue* evaluate(AttributeValue* arg0, AttributeValue* arg1, bool check_id = true); virtual std::list evaluate(std::list args, bool check_id = true); //help function specific for existing policy expression because of no exiplicit function defined in policy static std::string getFunctionName(std::string datatype); private: std::string fnName; std::string argType; }; } // namespace ArcSec #endif /* __ARC_SEC_INRANGE_FUNCTION_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/PolicyStore.cpp0000644000000000000000000000012411611503620025350 xustar000000000000000027 mtime=1311147920.204773 27 atime=1513200574.481701 30 ctime=1513200659.369739777 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PolicyStore.cpp0000644000175000002070000000431511611503620025420 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "PolicyParser.h" #include "PolicyStore.h" using namespace Arc; using namespace ArcSec; //PolicyStore::PolicyStore(const std::list& filelist, const std::string& alg, const std::string& policyclassname, EvaluatorContext* ctx){ PolicyStore::PolicyStore(const std::string& /* alg */, const std::string& policyclassname, EvaluatorContext* /* ctx */){ //combalg = alg; policy_classname = policyclassname; //PolicyParser plparser; ////call parsePolicy to parse each policies //for(std::list::const_iterator it = filelist.begin(); it != filelist.end(); it++){ // policies.push_back(PolicyElement(plparser.parsePolicy((*it).c_str(), policy_classname, ctx))); //} } //Policy list //there also can be a class "PolicySet", which includes a few policies std::list PolicyStore::findPolicy(EvaluationCtx*) { //ctx){ //For the existing Arc policy expression, we only need to return all the policies, because there is //no Target definition in ArcPolicy (the Target is only in ArcRule) return policies; /* std::list ret; std::list::iterator it; for(it = policies.begin(); it!=policies.end(); it++ ){ MatchResult res = (*it)->match(ctx); if (res == MATCH ) ret.push_back(*it); } return ret; */ //TODO } void PolicyStore::addPolicy(const Source& policy, EvaluatorContext* ctx, const std::string& id) { PolicyParser plparser; Policy* pls; pls = PolicyElement(plparser.parsePolicy(policy, policy_classname, ctx),id); if(pls != NULL) policies.push_back(pls); } void PolicyStore::addPolicy(Policy* policy, EvaluatorContext* ctx,const std::string& id) { Policy* pls = dynamic_cast(policy); if(pls!=NULL) { pls->setEvaluatorContext(ctx); pls->make_policy(); policies.push_back(PolicyElement(pls, id)); } } void PolicyStore::removePolicies(void) { while(!(policies.empty())){ delete (Policy*)(policies.back()); policies.pop_back(); } } void PolicyStore::releasePolicies(void) { while(!(policies.empty())){ policies.pop_back(); } } PolicyStore::~PolicyStore(){ removePolicies(); } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Request.h0000644000000000000000000000012411730411253024173 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200574.505701 30 ctime=1513200659.359739655 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Request.h0000644000175000002070000000624611730411253024250 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_REQUEST_H__ #define __ARC_SEC_REQUEST_H__ #include #include #include #include #include #include #include namespace ArcSec { ///Following is some general structures and classes for storing the request information. ///In principle, the request structure shoud be in XML format, and also can include a few items ///ReqItemList is a container for RequestItem objects typedef std::list ReqItemList; ///Attr contains a tuple of attribute type and value typedef struct{ std::string value; std::string type; } Attr; ///Attrs is a container for one or more Attr /**Attrs includes includes methonds for inserting, getting items, and counting size as well*/ class Attrs { public: void addItem(Attr attr) { int n = (attrs.size()); attrs.insert(std::pair(n, attr)); }; int size() { return attrs.size();}; Attr& getItem(int n) { return attrs[n]; }; Attr& operator[](int n) { return attrs[n]; }; private: std::map attrs; }; ///Base class/Interface for request, includes a container for RequestItems and some operations /**A Request object can has a few tuples, i.e. RequestItem The Request class and any customized class which inherit from it, should be loadable, which means these classes can be dynamically loaded according to the configuration informtation, see the example configuration below: <......> <......> There can be different types of subclass which inherit Request, such like XACMLRequest, ArcRequest, GACLRequest */ class Request : public Arc::LoadableClass { protected: ReqItemList rlist; public: /**Get all the RequestItem inside RequestItem container */ virtual ReqItemList getRequestItems () const { ReqItemList list; return list; }; /**Set the content of the container*/ virtual void setRequestItems (ReqItemList /* sl */) { }; /**Add request tuple from non-XMLNode*/ virtual void addRequestItem(Attrs& /* sub */, Attrs& /* res */, Attrs& /* act */ , Attrs& /* ctx */) { }; /**Set the attribute factory for the usage of Request*/ virtual void setAttributeFactory(AttributeFactory* attributefactory) = 0; /**Create the objects included in Request according to the node attached to the Request object*/ virtual void make_request() = 0; /**Get the name of corresponding evaulator*/ virtual const char* getEvalName() const = 0; /**Get the name of this request*/ virtual const char* getName() const = 0; /**Default constructor*/ Request (Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; /**Constructor: Parse request information from a xml stucture in memory*/ Request (const Source&, Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; virtual Arc::XMLNode& getReqNode() = 0; virtual ~Request(){}; }; } // namespace ArcSec #endif /* __ARC_SEC_REQUEST_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Result.h0000644000000000000000000000012411441150373024023 xustar000000000000000027 mtime=1283772667.306058 27 atime=1513200574.494701 30 ctime=1513200659.360739667 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Result.h0000644000175000002070000000306311441150373024072 0ustar00mockbuildmock00000000000000namespace ArcSec { ///Evaluation result typedef enum { /**Permit*/ DECISION_PERMIT = 0, /**Deny*/ DECISION_DENY = 1, /**Indeterminate, because of the Indeterminate from the "Matching"*/ DECISION_INDETERMINATE = 2, /**Not_Applicable, means the the request tuple does not match the rule. So there is no way to get to the "Permit"/"Deny" effect. */ DECISION_NOT_APPLICABLE = 3 } Result; inline std::ostream& operator<<(std::ostream& o,Result r) { switch(r) { case DECISION_PERMIT: return o<<"Permit"; case DECISION_DENY: return o<<"Deny"; case DECISION_INDETERMINATE: return o<<"Indeterminate"; case DECISION_NOT_APPLICABLE: return o<<"Not Applicable"; }; return o<<"Undefined"; } ///Match result enum MatchResult { /**Match, the request tuple matches the rule*/ MATCH = 0, /**No_Match, the request tuple does not match the rule*/ NO_MATCH = 1, /**Indeterminate, means that the request tuple matches the rule, but in terms of the other "Condition", the tuple does not match. So far, the Indeterminate has no meaning in the existing code (will never be switched to)*/ INDETERMINATE = 2 }; ///Struct to record the xml node and effect, which will be used by Evaluator to get the information about which rule/policy(in xmlnode) is satisfied typedef struct { Arc::XMLNode node; std::string effect; } EvalResult; } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/Response.h0000644000000000000000000000012412044527530024346 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200574.507701 30 ctime=1513200659.358739643 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/Response.h0000644000175000002070000000375412044527530024424 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_RESPONSE_H__ #define __ARC_SEC_RESPONSE_H__ #include #include #include #include "EvaluationCtx.h" #include "policy/Policy.h" namespace ArcSec { typedef std::list Policies; ///Evaluation result concerning one RequestTuple /**Include the RequestTuple, related XMLNode, the set of policy objects which give positive evaluation result, and the related XMLNode*/ class ResponseItem { public: ResponseItem():reqtp(NULL),res(DECISION_DENY){}; public: //TODO: Convertion method to decrease memory consumption RequestTuple* reqtp; Result res; Arc::XMLNode reqxml; Policies pls; std::list plsxml; }; class ResponseList { public: void addItem(ResponseItem* item) { int n = (resps.size()); resps.insert(std::pair(n, item)); }; int size() { return resps.size();}; ResponseItem* getItem(int n) { return resps[n]; }; ResponseItem* operator[](int n) { return resps[n]; }; bool empty() { return resps.empty(); }; void clear() { std::map::iterator it; for(it = resps.begin(); it != resps.end();it = resps.begin()){ ResponseItem* item = it->second; resps.erase(it); if(item) { RequestTuple* tpl = item->reqtp; if(tpl) { tpl->erase(); delete tpl; }; delete item; }; } }; private: std::map resps; }; ///Container for the evaluation results class Response { private: int request_size; protected: ResponseList rlist; public: void setRequestSize(int size) { request_size = size; }; int getRequestSize() { return request_size; }; virtual ResponseList& getResponseItems () { return rlist; }; virtual void setResponseItems (const ResponseList& rl) { rlist.clear(); rlist = rl; }; virtual void addResponseItem(ResponseItem* respitem){ rlist.addItem(respitem); }; virtual ~Response() { rlist.clear(); }; }; } // namespace ArcSec #endif /* __ARC_SEC_RESPONSE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/policy0000644000000000000000000000013213214316023023611 xustar000000000000000030 mtime=1513200659.470741013 30 atime=1513200668.720854145 30 ctime=1513200659.470741013 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/0000755000175000002070000000000013214316023023734 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515025734 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200598.615996735 30 ctime=1513200659.468740988 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/Makefile.am0000644000175000002070000000054012052416515025775 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libarcpolicy.la libarcpolicy_ladir = $(pkgincludedir)/security/ArcPDP/policy libarcpolicy_la_HEADERS = Policy.h libarcpolicy_la_SOURCES = Policy.cpp libarcpolicy_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcpolicy_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/PaxHeaders.7502/Policy.cpp0000644000000000000000000000012410705130270025632 xustar000000000000000027 mtime=1192538296.037528 27 atime=1513200574.484701 30 ctime=1513200659.470741013 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/Policy.cpp0000644000175000002070000000024510705130270025700 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "Policy.h" namespace ArcSec { Arc::Logger ArcSec::Policy::logger(Arc::Logger::rootLogger, "Policy"); } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/PaxHeaders.7502/Makefile.in0000644000000000000000000000012713214315726025750 xustar000000000000000030 mtime=1513200598.659997273 30 atime=1513200648.224603468 27 ctime=1513200659.469741 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/Makefile.in0000644000175000002070000006016513214315726026022 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/security/ArcPDP/policy DIST_COMMON = $(libarcpolicy_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libarcpolicy_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libarcpolicy_la_OBJECTS = libarcpolicy_la-Policy.lo libarcpolicy_la_OBJECTS = $(am_libarcpolicy_la_OBJECTS) libarcpolicy_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcpolicy_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcpolicy_la_SOURCES) DIST_SOURCES = $(libarcpolicy_la_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libarcpolicy_ladir)" HEADERS = $(libarcpolicy_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libarcpolicy.la libarcpolicy_ladir = $(pkgincludedir)/security/ArcPDP/policy libarcpolicy_la_HEADERS = Policy.h libarcpolicy_la_SOURCES = Policy.cpp libarcpolicy_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcpolicy_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/policy/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/policy/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcpolicy.la: $(libarcpolicy_la_OBJECTS) $(libarcpolicy_la_DEPENDENCIES) $(libarcpolicy_la_LINK) $(libarcpolicy_la_OBJECTS) $(libarcpolicy_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpolicy_la-Policy.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcpolicy_la-Policy.lo: Policy.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpolicy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpolicy_la-Policy.lo -MD -MP -MF $(DEPDIR)/libarcpolicy_la-Policy.Tpo -c -o libarcpolicy_la-Policy.lo `test -f 'Policy.cpp' || echo '$(srcdir)/'`Policy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpolicy_la-Policy.Tpo $(DEPDIR)/libarcpolicy_la-Policy.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Policy.cpp' object='libarcpolicy_la-Policy.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpolicy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpolicy_la-Policy.lo `test -f 'Policy.cpp' || echo '$(srcdir)/'`Policy.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcpolicy_laHEADERS: $(libarcpolicy_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcpolicy_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcpolicy_ladir)" @list='$(libarcpolicy_la_HEADERS)'; test -n "$(libarcpolicy_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcpolicy_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcpolicy_ladir)" || exit $$?; \ done uninstall-libarcpolicy_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcpolicy_la_HEADERS)'; test -n "$(libarcpolicy_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcpolicy_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcpolicy_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libarcpolicy_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libarcpolicy_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libarcpolicy_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libarcpolicy_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-libarcpolicy_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/PaxHeaders.7502/Policy.h0000644000000000000000000000012411730411253025301 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200574.481701 30 ctime=1513200659.467740976 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/policy/Policy.h0000644000175000002070000000641511730411253025354 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_POLICY_H__ #define __ARC_SEC_POLICY_H__ #include #include #include #include #include "../EvaluationCtx.h" #include "../Result.h" namespace ArcSec { class EvaluatorContext; ///Interface for containing and processing different types of policy. /**Basically, each policy object is a container which includes a few elements *e.g., ArcPolicySet objects includes a few ArcPolicy objects; ArcPolicy object *includes a few ArcRule objects. There is logical relationship between ArcRules *or ArcPolicies, which is called combining algorithm. According to algorithm, *evaluation results from the elements are combined, and then the combined *evaluation result is returned to the up-level. */ class Policy : public Arc::LoadableClass { protected: std::list subelements; static Arc::Logger logger; public: /// Template constructor - creates empty policy Policy(Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; /// Template constructor - creates policy based on XML document /** If XML document is empty then empty policy is created. If it is not empty then it must be valid policy document - otherwise created object should be invalid. */ Policy(const Arc::XMLNode, Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; /// Template constructor - creates policy based on XML document /** If XML document is empty then empty policy is created. If it is not empty then it must be valid policy document - otherwise created object should be invalid. This constructor is based on the policy node and i the EvaluatorContext which includes the factory objects for combining algorithm and function */ Policy(const Arc::XMLNode, EvaluatorContext*, Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; virtual ~Policy(){}; /// Returns true is object is valid. virtual operator bool(void) const = 0; ///Evaluate whether the two targets to be evaluated match to each other virtual MatchResult match(EvaluationCtx*) = 0; /**Evaluate policy * For the of Arc, only get the "Effect" from rules; * For the of Arc, combine the evaluation result from ; * For the of XACML, evaluate the node by using information from request, * and use the "Effect" attribute of ; * For the of XACML, combine the evaluation result from */ virtual Result eval(EvaluationCtx*) = 0; /**Add a policy element to into "this" object */ virtual void addPolicy(Policy* pl){subelements.push_back(pl);}; /**Set Evaluator Context for the usage in creating low-level policy object*/ virtual void setEvaluatorContext(EvaluatorContext*) {}; /**Parse XMLNode, and construct the low-level Rule object*/ virtual void make_policy() {}; /**Get the "Effect" attribute*/ virtual std::string getEffect() const = 0; /**Get eveluation result*/ virtual EvalResult& getEvalResult() = 0; /**Set eveluation result*/ virtual void setEvalResult(EvalResult& res) = 0; /**Get the name of Evaluator which can evaluate this policy*/ virtual const char* getEvalName() const = 0; /**Get the name of this policy*/ virtual const char* getName() const = 0; }; } // namespace ArcSec #endif /* __ARC_SEC_POLICY_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/PolicyParser.h0000644000000000000000000000012311017520056025156 xustar000000000000000027 mtime=1212063790.482718 27 atime=1513200574.484701 29 ctime=1513200659.36173968 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PolicyParser.h0000644000175000002070000000155411017520056025231 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_POLICYPARSER_H__ #define __ARC_SEC_POLICYPARSER_H__ #include #include #include #include namespace ArcSec { ///A interface which will isolate the policy object from actual policy storage (files, urls, database) /**Parse the policy from policy source (e.g. files, urls, database, etc.). */ class PolicyParser { public: PolicyParser(); /**Parse policy @param source location of the policy @param policyclassname name of the policy for ClassLoader @param ctx EvaluatorContext which includes the **Factory */ virtual Policy* parsePolicy(const Source& source, std::string policyclassname, EvaluatorContext* ctx); virtual ~PolicyParser(){}; }; } // namespace ArcSec #endif /* __ARC_SEC_POLICYPARSER_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/EvaluatorLoader.cpp0000644000000000000000000000012412044527530026174 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200574.494701 30 ctime=1513200659.367739753 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp0000644000175000002070000003114012044527530026240 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "EvaluatorLoader.h" Arc::Logger ArcSec::EvaluatorLoader::logger(Arc::Logger::rootLogger, "EvaluatorLoader"); namespace ArcSec { Arc::XMLNode arc_evaluator_cfg_nd("\ \ \ \ \ \ attrfactory\ fnfactory\ algfactory\ evaluator\ request\ policy\ \ \ \ \ \ \ \ \ \ "); Arc::XMLNode xacml_evaluator_cfg_nd("\ \ \ \ \ \ attrfactory\ fnfactory\ algfactory\ evaluator\ request\ policy\ \ \ \ \ \ \ \ \ \ "); EvaluatorLoader::EvaluatorLoader() { class_config_list_.push_back(arc_evaluator_cfg_nd); class_config_list_.push_back(xacml_evaluator_cfg_nd); } Evaluator* EvaluatorLoader::getEvaluator(const std::string& classname) { ArcSec::Evaluator* eval = NULL; Arc::ClassLoader* classloader = NULL; //Get the lib path from environment, and put it into the configuration xml node std::list plugins = Arc::ArcLocation::GetPlugins(); Arc::XMLNode node; std::list::iterator it; bool found = false; for( it = class_config_list_.begin(); it != class_config_list_.end(); it++) { node = (*it); if((std::string)(node["PDPConfig"]["Evaluator"].Attribute("name")) == classname) { found = true; break; } } if(found) { bool has_covered = false; for(std::list::iterator p = plugins.begin();p!=plugins.end();++p) { for(int i=0;;i++) { Arc::XMLNode cn = node["ModuleManager"]["Path"][i]; if(!cn) break; if((std::string)(cn) == (*p)){ has_covered = true; break; } } if(!has_covered) node["ModuleManager"].NewChild("Path")=*p; } } else { // Loading unknown evaluator Arc::XMLNode cfg("\ \ \ "); for(std::list::iterator plugin = plugins.begin();plugin!=plugins.end();++plugin) { cfg["ModuleManager"].NewChild("Path")=*plugin; try { Glib::Dir dir(*plugin); for(Glib::DirIterator file = dir.begin(); file != dir.end(); file++) { std::string name = *file; if(name.substr(0, 3) != "lib") continue; std::size_t pos = name.rfind("."); std::string subname; if(pos!=std::string::npos) subname = name.substr(pos); if(subname != G_MODULE_SUFFIX) continue; std::string fname = Glib::build_filename(*plugin,name); // Few tests just in case if(!file_test(fname,Glib::FILE_TEST_IS_EXECUTABLE)) continue; if(!file_test(fname,Glib::FILE_TEST_IS_REGULAR | Glib::FILE_TEST_IS_SYMLINK)) continue; name = name.substr(3,name.find('.')-3); Arc::XMLNode plugcfg = cfg.NewChild("Plugins"); plugcfg.NewAttribute("Name")=name; plugcfg.NewChild("Plugin").NewAttribute("Name")="__arc_evaluator_modules__"; }; } catch (Glib::FileError&) {}; } cfg.New(node); } Arc::Config modulecfg(node); classloader = Arc::ClassLoader::getClassLoader(&modulecfg); //Dynamically load Evaluator object according to configure information. //It should be the caller to free the object eval = (Evaluator*)(classloader->Instance(classname, &node, "__arc_evaluator_modules__")); if(!eval) logger.msg(Arc::ERROR, "Can not load ARC evaluator object: %s",classname); return eval; } Evaluator* EvaluatorLoader::getEvaluator(const Policy* policy) { if(!policy) return NULL; return getEvaluator(policy->getEvalName()); } Evaluator* EvaluatorLoader::getEvaluator(const Request* request) { if(!request) return NULL; return getEvaluator(request->getEvalName()); } Request* EvaluatorLoader::getRequest(const std::string& classname, const Source& requestsource) { ArcSec::Request* req = NULL; Arc::ClassLoader* classloader = NULL; //Get the request node Arc::XMLNode reqnode = requestsource.Get(); //Get the lib path from environment, and put it into the configuration xml node std::list plugins = Arc::ArcLocation::GetPlugins(); Arc::XMLNode node; std::list::iterator it; bool found = false; for( it = class_config_list_.begin(); it != class_config_list_.end(); it++) { node = (*it); if((std::string)(node["PDPConfig"]["Request"].Attribute("name")) == classname) { found = true; break; } } if(found) { bool has_covered = false; for(std::list::iterator p = plugins.begin();p!=plugins.end();++p) { for(int i=0;;i++) { Arc::XMLNode cn = node["ModuleManager"]["Path"][i]; if(!cn) break; if((std::string)(cn) == (*p)){ has_covered = true; break; } } if(!has_covered) node["ModuleManager"].NewChild("Path")=*p; } Arc::Config modulecfg(node); classloader = Arc::ClassLoader::getClassLoader(&modulecfg); //Dynamically load Request object according to configure information. //It should be the caller to free the object req = (Request*)(classloader->Instance(classname, &reqnode, "__arc_request_modules__")); } if(!req) logger.msg(Arc::ERROR, "Can not load ARC request object: %s",classname); return req; } Policy* EvaluatorLoader::getPolicy(const std::string& classname, const Source& policysource) { ArcSec::Policy* policy = NULL; Arc::ClassLoader* classloader = NULL; //Get policy node Arc::XMLNode policynode = policysource.Get(); //Get the lib path from environment, and put it into the configuration xml node std::list plugins = Arc::ArcLocation::GetPlugins(); Arc::XMLNode node; std::list::iterator it; bool found = false; for( it = class_config_list_.begin(); it != class_config_list_.end(); it++) { node = (*it); if((std::string)(node["PDPConfig"]["Policy"].Attribute("name")) == classname) { found = true; break; } } if(found) { bool has_covered = false; for(std::list::iterator p = plugins.begin();p!=plugins.end();++p) { for(int i=0;;i++) { Arc::XMLNode cn = node["ModuleManager"]["Path"][i]; if(!cn) break; if((std::string)(cn) == (*p)){ has_covered = true; break; } } if(!has_covered) node["ModuleManager"].NewChild("Path")=*p; } Arc::Config modulecfg(node); classloader = Arc::ClassLoader::getClassLoader(&modulecfg); //Dynamically load Policy object according to configure information. //It should be the caller to free the object policy = (Policy*)(classloader->Instance(classname, &policynode, "__arc_policy_modules__")); } if(!policy) logger.msg(Arc::ERROR, "Can not load policy object: %s",classname); return policy; } Policy* EvaluatorLoader::getPolicy(const Source& policysource) { ArcSec::Policy* policy = NULL; Arc::ClassLoader* classloader = NULL; //Get policy node Arc::XMLNode policynode = policysource.Get(); //Get the lib path from environment, and put it into the configuration xml node std::list plugins = Arc::ArcLocation::GetPlugins(); Arc::XMLNode cfg("\ \ \ "); for(std::list::iterator plugin = plugins.begin();plugin!=plugins.end();++plugin) { cfg["ModuleManager"].NewChild("Path")=*plugin; try { Glib::Dir dir(*plugin); for(Glib::DirIterator file = dir.begin(); file != dir.end(); file++) { std::string name = *file; if(name.substr(0, 3) != "lib") continue; // TODO: This won't work on windows and maybe even on some // unices which do have shared libraries ending with .so if(name.substr(name.length()-3, 3) != ".so") continue; std::string fname = Glib::build_filename(*plugin,name); // Few tests just in case if(!file_test(fname,Glib::FILE_TEST_IS_EXECUTABLE)) continue; if(!file_test(fname,Glib::FILE_TEST_IS_REGULAR | Glib::FILE_TEST_IS_SYMLINK)) continue; name = name.substr(3,name.find('.')-3); Arc::XMLNode plugcfg = cfg.NewChild("Plugins"); plugcfg.NewAttribute("Name")=name; plugcfg.NewChild("Plugin").NewAttribute("Name")="__arc_policy_modules__"; // ?? plugcfg["Plugin"]="policy"; ?? }; } catch (Glib::FileError&) {}; }; Arc::Config modulecfg(cfg); classloader = Arc::ClassLoader::getClassLoader(&modulecfg); //Dynamically load Policy object according to configure information. //It should be the caller to free the object policy = (Policy*)(classloader->Instance(&policynode, "__arc_policy_modules__")); if(!policy) logger.msg(Arc::ERROR, "Can not load policy object"); return policy; } Request* EvaluatorLoader::getRequest(const Source& requestsource) { ArcSec::Request* request = NULL; Arc::ClassLoader* classloader = NULL; //Get policy node Arc::XMLNode requestnode = requestsource.Get(); //Get the lib path from environment, and put it into the configuration xml node std::list plugins = Arc::ArcLocation::GetPlugins(); Arc::XMLNode cfg("\ \ \ "); for(std::list::iterator plugin = plugins.begin();plugin!=plugins.end();++plugin) { cfg["ModuleManager"].NewChild("Path")=*plugin; try { Glib::Dir dir(*plugin); for(Glib::DirIterator file = dir.begin(); file != dir.end(); file++) { std::string name = *file; if(name.substr(0, 3) != "lib") continue; // TODO: This won't work on windows and maybe even on some // unices which do have shared libraries ending with .so if(name.substr(name.length()-3, 3) != ".so") continue; std::string fname = Glib::build_filename(*plugin,name); // Few tests just in case if(!file_test(fname,Glib::FILE_TEST_IS_EXECUTABLE)) continue; if(!file_test(fname,Glib::FILE_TEST_IS_REGULAR | Glib::FILE_TEST_IS_SYMLINK)) continue; name = name.substr(3,name.find('.')-3); Arc::XMLNode plugcfg = cfg.NewChild("Plugins"); plugcfg.NewAttribute("Name")=name; plugcfg.NewChild("Plugin").NewAttribute("Name")="__arc_request_modules__"; // ?? plugcfg["Plugin"]="request"; ?? }; } catch (Glib::FileError&) {}; }; Arc::Config modulecfg(cfg); classloader = Arc::ClassLoader::getClassLoader(&modulecfg); //Dynamically load Request object according to configure information. //It should be the caller to free the object request = (Request*)(classloader->Instance(&requestnode, "__arc_request_modules__")); if(!request) logger.msg(Arc::ERROR, "Can not load request object"); return request; } } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/PolicyStore.h0000644000000000000000000000012411133607047025024 xustar000000000000000027 mtime=1232014887.663229 27 atime=1513200574.484701 30 ctime=1513200659.362739692 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PolicyStore.h0000644000175000002070000000260311133607047025072 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_POLICYSTORE_H__ #define __ARC_SEC_POLICYSTORE_H__ #include #include namespace ArcSec { class EvaluatorContext; ///Storage place for policy objects class PolicyStore { public: class PolicyElement { private: Policy* policy; std::string id; public: PolicyElement(Policy* policy_):policy(policy_) { }; PolicyElement(Policy* policy_, const std::string& id_):policy(policy_),id(id_) { }; operator Policy*(void) const { return policy; }; const std::string& Id(void) const { return id; }; }; PolicyStore(); /// Creates policy store with specified combing algorithm (alg - not used yet), policy name /// (policyclassname) and context (ctx) PolicyStore(const std::string& alg, const std::string& policyclassname, EvaluatorContext* ctx); virtual ~PolicyStore(); virtual std::list findPolicy(EvaluationCtx* context); virtual void addPolicy(const Source& policy, EvaluatorContext* ctx,const std::string& id); virtual void addPolicy(Policy* policyobj, EvaluatorContext* ctx,const std::string& id); virtual void removePolicies(); virtual void releasePolicies(); // std::list policysrclist; private: std::list policies; //std::string combalg; std::string policy_classname; }; } // namespace ArcSec #endif /* __ARC_SEC_POLICYSTORE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/PaxHeaders.7502/attr0000644000000000000000000000013213214316023023264 xustar000000000000000030 mtime=1513200659.416740352 30 atime=1513200668.720854145 30 ctime=1513200659.416740352 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/0000755000175000002070000000000013214316023023407 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/StringAttribute.cpp0000644000000000000000000000012411232072001027171 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.497701 30 ctime=1513200659.411740291 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/StringAttribute.cpp0000644000175000002070000000371411232072001027243 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "StringAttribute.h" namespace ArcSec { std::string StringAttribute::identifier = "string"; bool StringAttribute::equal(AttributeValue* o, bool check_id){ StringAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not StringAttribute"<id) return false; } if((value.compare(other->getValue()))==0) return true; else return false; } bool StringAttribute::inrange(AttributeValue* o){ StringAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not StringAttribute"<id) return false; //if there is a few values in the policy side, e.g. //read; write; delete std::string other_value = other->getValue(); size_t p1, p2; std::string str, str1, str2; str = value; do { p1 = str.find_first_not_of(" "); p2 = str.find_first_of(";"); if(p2!=std::string::npos) str1 = str.substr(p1,p2-p1); else str1 = str.substr(p1); size_t f1, f2; std::string o_str, o_str1, o_str2; o_str = other_value; bool match = false; do { f1 = o_str.find_first_not_of(" "); f2 = o_str.find_first_of(";"); if(f2!=std::string::npos) o_str1 = o_str.substr(f1,f2-f1); else o_str1 = o_str.substr(f1); if((o_str1.compare(str1)) == 0) { match = true; break; } o_str2=o_str.substr(f2+1); o_str.clear(); o_str = o_str2; o_str2.clear(); } while(f2!=std::string::npos); if(match == false) {return false;}; str2=str.substr(p2+1); str.clear(); str = str2; str2.clear(); } while(p2!=std::string::npos); return true; } } //namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515025407 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200598.479995072 30 ctime=1513200659.407740242 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/Makefile.am0000644000175000002070000000124412052416515025452 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libarcattr.la libarcattr_ladir = $(pkgincludedir)/security/ArcPDP/attr libarcattr_la_HEADERS = AttributeValue.h StringAttribute.h DateTimeAttribute.h X500NameAttribute.h AnyURIAttribute.h BooleanAttribute.h \ GenericAttribute.h AttributeProxy.h AttributeFactory.h RequestAttribute.h libarcattr_la_SOURCES = RequestAttribute.cpp StringAttribute.cpp DateTimeAttribute.cpp X500NameAttribute.cpp AnyURIAttribute.cpp BooleanAttribute.cpp GenericAttribute.cpp libarcattr_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcattr_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726025417 xustar000000000000000030 mtime=1513200598.535995757 30 atime=1513200648.208603272 30 ctime=1513200659.408740254 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/Makefile.in0000644000175000002070000007775013214315726025505 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/security/ArcPDP/attr DIST_COMMON = $(libarcattr_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libarcattr_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libarcattr_la_OBJECTS = libarcattr_la-RequestAttribute.lo \ libarcattr_la-StringAttribute.lo \ libarcattr_la-DateTimeAttribute.lo \ libarcattr_la-X500NameAttribute.lo \ libarcattr_la-AnyURIAttribute.lo \ libarcattr_la-BooleanAttribute.lo \ libarcattr_la-GenericAttribute.lo libarcattr_la_OBJECTS = $(am_libarcattr_la_OBJECTS) libarcattr_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarcattr_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcattr_la_SOURCES) DIST_SOURCES = $(libarcattr_la_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libarcattr_ladir)" HEADERS = $(libarcattr_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libarcattr.la libarcattr_ladir = $(pkgincludedir)/security/ArcPDP/attr libarcattr_la_HEADERS = AttributeValue.h StringAttribute.h DateTimeAttribute.h X500NameAttribute.h AnyURIAttribute.h BooleanAttribute.h \ GenericAttribute.h AttributeProxy.h AttributeFactory.h RequestAttribute.h libarcattr_la_SOURCES = RequestAttribute.cpp StringAttribute.cpp DateTimeAttribute.cpp X500NameAttribute.cpp AnyURIAttribute.cpp BooleanAttribute.cpp GenericAttribute.cpp libarcattr_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcattr_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/attr/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/security/ArcPDP/attr/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcattr.la: $(libarcattr_la_OBJECTS) $(libarcattr_la_DEPENDENCIES) $(libarcattr_la_LINK) $(libarcattr_la_OBJECTS) $(libarcattr_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcattr_la-AnyURIAttribute.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcattr_la-BooleanAttribute.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcattr_la-DateTimeAttribute.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcattr_la-GenericAttribute.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcattr_la-RequestAttribute.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcattr_la-StringAttribute.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcattr_la-X500NameAttribute.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcattr_la-RequestAttribute.lo: RequestAttribute.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -MT libarcattr_la-RequestAttribute.lo -MD -MP -MF $(DEPDIR)/libarcattr_la-RequestAttribute.Tpo -c -o libarcattr_la-RequestAttribute.lo `test -f 'RequestAttribute.cpp' || echo '$(srcdir)/'`RequestAttribute.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcattr_la-RequestAttribute.Tpo $(DEPDIR)/libarcattr_la-RequestAttribute.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RequestAttribute.cpp' object='libarcattr_la-RequestAttribute.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcattr_la-RequestAttribute.lo `test -f 'RequestAttribute.cpp' || echo '$(srcdir)/'`RequestAttribute.cpp libarcattr_la-StringAttribute.lo: StringAttribute.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -MT libarcattr_la-StringAttribute.lo -MD -MP -MF $(DEPDIR)/libarcattr_la-StringAttribute.Tpo -c -o libarcattr_la-StringAttribute.lo `test -f 'StringAttribute.cpp' || echo '$(srcdir)/'`StringAttribute.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcattr_la-StringAttribute.Tpo $(DEPDIR)/libarcattr_la-StringAttribute.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='StringAttribute.cpp' object='libarcattr_la-StringAttribute.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcattr_la-StringAttribute.lo `test -f 'StringAttribute.cpp' || echo '$(srcdir)/'`StringAttribute.cpp libarcattr_la-DateTimeAttribute.lo: DateTimeAttribute.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -MT libarcattr_la-DateTimeAttribute.lo -MD -MP -MF $(DEPDIR)/libarcattr_la-DateTimeAttribute.Tpo -c -o libarcattr_la-DateTimeAttribute.lo `test -f 'DateTimeAttribute.cpp' || echo '$(srcdir)/'`DateTimeAttribute.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcattr_la-DateTimeAttribute.Tpo $(DEPDIR)/libarcattr_la-DateTimeAttribute.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DateTimeAttribute.cpp' object='libarcattr_la-DateTimeAttribute.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcattr_la-DateTimeAttribute.lo `test -f 'DateTimeAttribute.cpp' || echo '$(srcdir)/'`DateTimeAttribute.cpp libarcattr_la-X500NameAttribute.lo: X500NameAttribute.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -MT libarcattr_la-X500NameAttribute.lo -MD -MP -MF $(DEPDIR)/libarcattr_la-X500NameAttribute.Tpo -c -o libarcattr_la-X500NameAttribute.lo `test -f 'X500NameAttribute.cpp' || echo '$(srcdir)/'`X500NameAttribute.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcattr_la-X500NameAttribute.Tpo $(DEPDIR)/libarcattr_la-X500NameAttribute.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='X500NameAttribute.cpp' object='libarcattr_la-X500NameAttribute.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcattr_la-X500NameAttribute.lo `test -f 'X500NameAttribute.cpp' || echo '$(srcdir)/'`X500NameAttribute.cpp libarcattr_la-AnyURIAttribute.lo: AnyURIAttribute.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -MT libarcattr_la-AnyURIAttribute.lo -MD -MP -MF $(DEPDIR)/libarcattr_la-AnyURIAttribute.Tpo -c -o libarcattr_la-AnyURIAttribute.lo `test -f 'AnyURIAttribute.cpp' || echo '$(srcdir)/'`AnyURIAttribute.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcattr_la-AnyURIAttribute.Tpo $(DEPDIR)/libarcattr_la-AnyURIAttribute.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='AnyURIAttribute.cpp' object='libarcattr_la-AnyURIAttribute.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcattr_la-AnyURIAttribute.lo `test -f 'AnyURIAttribute.cpp' || echo '$(srcdir)/'`AnyURIAttribute.cpp libarcattr_la-BooleanAttribute.lo: BooleanAttribute.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -MT libarcattr_la-BooleanAttribute.lo -MD -MP -MF $(DEPDIR)/libarcattr_la-BooleanAttribute.Tpo -c -o libarcattr_la-BooleanAttribute.lo `test -f 'BooleanAttribute.cpp' || echo '$(srcdir)/'`BooleanAttribute.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcattr_la-BooleanAttribute.Tpo $(DEPDIR)/libarcattr_la-BooleanAttribute.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BooleanAttribute.cpp' object='libarcattr_la-BooleanAttribute.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcattr_la-BooleanAttribute.lo `test -f 'BooleanAttribute.cpp' || echo '$(srcdir)/'`BooleanAttribute.cpp libarcattr_la-GenericAttribute.lo: GenericAttribute.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -MT libarcattr_la-GenericAttribute.lo -MD -MP -MF $(DEPDIR)/libarcattr_la-GenericAttribute.Tpo -c -o libarcattr_la-GenericAttribute.lo `test -f 'GenericAttribute.cpp' || echo '$(srcdir)/'`GenericAttribute.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcattr_la-GenericAttribute.Tpo $(DEPDIR)/libarcattr_la-GenericAttribute.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GenericAttribute.cpp' object='libarcattr_la-GenericAttribute.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcattr_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcattr_la-GenericAttribute.lo `test -f 'GenericAttribute.cpp' || echo '$(srcdir)/'`GenericAttribute.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcattr_laHEADERS: $(libarcattr_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcattr_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcattr_ladir)" @list='$(libarcattr_la_HEADERS)'; test -n "$(libarcattr_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcattr_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcattr_ladir)" || exit $$?; \ done uninstall-libarcattr_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcattr_la_HEADERS)'; test -n "$(libarcattr_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcattr_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcattr_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libarcattr_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libarcattr_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libarcattr_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libarcattr_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-libarcattr_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/X500NameAttribute.cpp0000644000000000000000000000012411232072001027160 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.503701 30 ctime=1513200659.413740315 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/X500NameAttribute.cpp0000644000175000002070000000123511232072001027226 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "X500NameAttribute.h" namespace ArcSec { std::string X500NameAttribute::identifier = "x500Name"; bool X500NameAttribute::equal(AttributeValue* o, bool check_id){ X500NameAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not X500NameAttribute"<id) return false; } if((value.compare(other->getValue()))==0) //Now, deal with it the same as StringAttribute. return true; else return false; } } //namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/GenericAttribute.cpp0000644000000000000000000000012411232072001027277 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.497701 30 ctime=1513200659.416740352 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/GenericAttribute.cpp0000644000175000002070000000104611232072001027345 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "GenericAttribute.h" namespace ArcSec { std::string GenericAttribute::identifier(""); bool GenericAttribute::equal(AttributeValue* o, bool check_id){ if(!o) return false; if(check_id) { if( (getType() == (o->getType())) && (getId() == (o->getId())) && (encode() == (o->encode())) ) return true; } else { if( (getType() == (o->getType())) && (encode() == (o->encode())) ) return true; } return false; } } //namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/AttributeFactory.h0000644000000000000000000000012411730411253027010 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200574.497701 30 ctime=1513200659.405740218 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/AttributeFactory.h0000644000175000002070000000130311730411253027052 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ATTRIBUTEFACTORY_H__ #define __ARC_SEC_ATTRIBUTEFACTORY_H__ #include #include #include #include #include "AttributeProxy.h" namespace ArcSec { typedef std::map AttrProxyMap; /** Base attribute factory class*/ class AttributeFactory : public Arc::LoadableClass { public: AttributeFactory(Arc::PluginArgument* parg): Arc::LoadableClass(parg) {}; virtual ~AttributeFactory(){}; public: virtual AttributeValue* createValue(const Arc::XMLNode& node, const std::string& type) = 0; protected: AttrProxyMap apmap; }; } // namespace ArcSec #endif /* __ARC_SEC_ATTRIBUTEFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/BooleanAttribute.cpp0000644000000000000000000000012311232072001027301 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.500701 29 ctime=1513200659.41574034 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/BooleanAttribute.cpp0000644000175000002070000000112511232072001027346 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "BooleanAttribute.h" namespace ArcSec { std::string BooleanAttribute::identifier = "bool"; bool BooleanAttribute::equal(AttributeValue* o, bool check_id){ BooleanAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not BooleanAttribute"<id) return false; } if(value==(other->getValue())) return true; else return false; } } //namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/DateTimeAttribute.cpp0000644000000000000000000000012411232072001027417 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.502701 30 ctime=1513200659.412740303 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/DateTimeAttribute.cpp0000644000175000002070000001641511232072001027473 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "DateTimeAttribute.h" namespace ArcSec { //DateTimeAttribute std::string DateTimeAttribute::identifier = "datetime"; bool DateTimeAttribute::equal(AttributeValue* o, bool check_id){ DateTimeAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not DateTimeAttribute"<id) return false; } if(value==(other->getValue())) return true; else return false; } bool DateTimeAttribute::lessthan(AttributeValue* o){ DateTimeAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not DateTimeAttribute"<getValue())) return true; else return false; } bool DateTimeAttribute::inrange(AttributeValue* o){ PeriodAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not PeriodAttribute"<getValue(); Arc::Time st, et; st = period.starttime; et = period.endtime; if(period.starttime == Arc::Time(-1)) st = period.endtime - period.duration; else if(period.endtime == Arc::Time(-1)) et = period.starttime + period.duration; if((value>=st)&&(value<=et)) return true; else return false; } std::string DateTimeAttribute::encode(){ return(value.str(Arc::ISOTime)); } //TimeAttribute std::string TimeAttribute::identifier = "time"; TimeAttribute::TimeAttribute(const std::string& v,const std::string& i) { id=i; std::string v1 = "1970-01-01T" + v; DateTimeAttribute attr(v1,i); value = attr.getValue(); } bool TimeAttribute::equal(AttributeValue* o, bool check_id){ TimeAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not TimeAttribute"<id) return false; } if(value==(other->getValue())) return true; else return false; } bool TimeAttribute::lessthan(AttributeValue* o){ TimeAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not TimeAttribute"<getValue())) return true; else return false; } std::string TimeAttribute::encode(){ std::string v; v = value.str(Arc::ISOTime); return(v.substr(11)); } //DateAttribute std::string DateAttribute::identifier = "date"; DateAttribute::DateAttribute(const std::string& v,const std::string& i) { id=i; std::string v1 = v + "T00:00:00+00:00"; DateTimeAttribute attr(v1,i); value = attr.getValue(); } bool DateAttribute::equal(AttributeValue* o, bool check_id){ DateAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not DateAttribute"<id) return false; } if(value==(other->getValue())) return true; else return false; } bool DateAttribute::lessthan(AttributeValue* o){ DateAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not DateAttribute"<id) return false; if(value<(other->getValue())) return true; else return false; } std::string DateAttribute::encode(){ std::string v; v = value.str(Arc::ISOTime); return(v.substr(0,9)); } //DurationAttribute std::string DurationAttribute::identifier = "duration"; bool DurationAttribute::equal(AttributeValue* o, bool check_id){ DurationAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not DurationAttribute"<id) return false; } if((value.GetPeriod())==((other->getValue()).GetPeriod())) return true; else return false; } std::string DurationAttribute::encode(){ return(std::string(value)); } //PeriodAttribute std::string PeriodAttribute::identifier = "period"; PeriodAttribute::PeriodAttribute(const std::string& v,const std::string& i) { id=i; (value.starttime).SetTime(-1); (value.endtime).SetTime(-1); (value.duration).SetPeriod(0); Arc::Time t1(-1), t2(-1); Arc::Period d1(0), d2(0); std::string::size_type pos = v.find("/"); std::string s1 = v.substr(0, pos); std::string s2 = v.substr(pos+1); pos = s1.find("-"); if(pos == std::string::npos) pos = s1.find(":"); if(pos == std::string::npos) pos = s1.find("Z"); if(pos == std::string::npos) pos = s1.find("GMT"); if(pos != std::string::npos) t1 = Arc::Time(s1); else d1 = Arc::Period(s1); pos = s2.find("-"); if(pos == std::string::npos) pos = s2.find(":"); if(pos == std::string::npos) pos = s2.find("Z"); if(pos == std::string::npos) pos = s2.find("GMT"); if(pos != std::string::npos) t2 = Arc::Time(s2); else d2 = Arc::Period(s2); if(t1.GetTime()!=-1){ if(t2.GetTime()!=-1){ value.starttime = t1; value.endtime = t2; } else if(d2.GetPeriod()!=0){ value.starttime = t1; value.duration = d2; } else { //std::cerr<<"Invalid ISO period format!"<(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not PeriodAttribute"<id) return false; } ArcPeriod oth = other->getValue(); Arc::Time ls, le, os, oe; Arc::Period ld, od; ls = value.starttime; le = value.endtime; os = oth.starttime; oe = oth.endtime; ld = value.duration; od = oth.duration; if((ls!=Arc::Time(-1))&&(le==Arc::Time(-1))) le = ls + ld; else if((ls==Arc::Time(-1))&&(le!=Arc::Time(-1))) ls = le - ld; else if((ls==Arc::Time(-1))||(le==Arc::Time(-1))) return false; if((os!=Arc::Time(-1))&&(oe==Arc::Time(-1))) oe = os + od; else if((os==Arc::Time(-1))&&(oe!=Arc::Time(-1))) os = oe - od; else if((os==Arc::Time(-1))||(oe==Arc::Time(-1))) return false; //std::cout< namespace ArcSec { class AnyURIAttribute : public AttributeValue { private: static std::string identifier; std::string value; std::string id; public: AnyURIAttribute() { }; AnyURIAttribute(const std::string& v,const std::string& i) : value(v), id(i){ }; virtual ~AnyURIAttribute(){ }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual std::string encode() {return value;}; std::string getValue(){ return value; }; std::string getId(){ return id; }; virtual std::string getType() {return identifier; }; static const std::string& getIdentifier(void) { return identifier; }; }; }// namespace ArcSec #endif /* __ARC_SEC_ANYURIATTRIBUTE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/RequestAttribute.cpp0000644000000000000000000000012412044527530027371 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200574.500701 30 ctime=1513200659.409740267 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp0000644000175000002070000000570012044527530027440 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "RequestAttribute.h" using namespace Arc; using namespace ArcSec; Logger RequestAttribute::logger(Logger::rootLogger, "RequestAttribute"); RequestAttribute::RequestAttribute(XMLNode& node, AttributeFactory* attrfactory) : attrval(NULL), attrfactory(attrfactory) { Arc::XMLNode nd; //Get the attribute of the node id = (std::string)(node.Attribute("AttributeId")); if(id.empty()) id = (std::string)(node.Attribute("Id")); std::string tp = (std::string)(node.Attribute("Type")); if(tp.empty()) tp = (std::string)(node.Attribute("DataType")); std::size_t found = tp.find_last_of("#"); //http://www.w3.org/2001/XMLSchema#string if(found!=std::string::npos) { type = tp.substr(found+1); } else { found=tp.find_last_of(":"); //urn:oasis:names:tc:xacml:1.0:data-type:rfc822Name type = tp.substr(found+1); } issuer = (std::string)(node.Attribute("Issuer")); //Create the attribute value object according to the data type attrval = attrfactory->createValue(node, type); logger.msg(DEBUG, "Id= %s,Type= %s,Issuer= %s,Value= %s",id, type, issuer, attrval?attrval->encode():""); if(attrval == NULL) logger.msg(ERROR,"No Attribute exists, which can deal with type: %s", type); //Copy the node parameter into this->node_, for the usage in duplicate method node.New(node_); /* if(!(node.Size())){ avlist.push_back(attrfactory->createValue(node, type)); } else{ for(int i=0; icreateValue(node.Child(i), type)); } */ } RequestAttribute::RequestAttribute(): attrval(NULL), attrfactory(NULL) { } XMLNode RequestAttribute::getNode() { return node_; } std::string RequestAttribute::getAttributeId () const{ return id; } void RequestAttribute::setAttributeId (const std::string& attributeId){ id = attributeId; } std::string RequestAttribute::getDataType () const{ return type; } void RequestAttribute::setDataType (const std::string& dataType){ type = dataType; } std::string RequestAttribute::getIssuer () const{ return issuer; } void RequestAttribute::setIssuer (const std::string& is){ issuer = is; } /*AttrValList RequestAttribute::getAttributeValueList () const{ } void RequestAttribute::setAttributeValueList (const AttrValList& attributeValueList){ } */ AttributeValue* RequestAttribute::getAttributeValue() const{ return attrval; } AttributeFactory* RequestAttribute::getAttributeFactory() const { return attrfactory; } RequestAttribute& RequestAttribute::duplicate(RequestAttribute& req_attr) { id = req_attr.getAttributeId(); type = req_attr.getDataType(); issuer = req_attr.getIssuer(); node_ = req_attr.getNode(); attrval = (req_attr.getAttributeFactory())->createValue(node_, type); return *this; } RequestAttribute::~RequestAttribute(){ if(attrval) delete attrval; /*while(!avlist.empty()){ delete (*(avlist.back())); avlist.pop_back(); } */ } nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/RequestAttribute.h0000644000000000000000000000012311611503620027026 xustar000000000000000027 mtime=1311147920.204773 27 atime=1513200574.503701 29 ctime=1513200659.40674023 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/RequestAttribute.h0000644000175000002070000000413611611503620027100 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_REQUESTATTRIBUTE_H__ #define __ARC_SEC_REQUESTATTRIBUTE_H__ #include #include #include #include "AttributeValue.h" #include "AttributeFactory.h" namespace ArcSec { //typedef std::list AttrValList; ///Wrapper which includes AttributeValue object which is generated according to date type of one spefic node in Request.xml class RequestAttribute { public: /**Constructor - create attribute value object according to the "Type" in the node urn:mace:shibboleth:examples */ RequestAttribute(Arc::XMLNode& node, AttributeFactory* attrfactory); RequestAttribute(); virtual ~RequestAttribute(); public: Arc::XMLNode getNode(); std::string getAttributeId () const; void setAttributeId (const std::string& attributeId); std::string getDataType () const; void setDataType (const std::string& dataType); std::string getIssuer () const; void setIssuer (const std::string& issuer); //AttrValList getAttributeValueList () const; //void setAttributeValueList (const AttrValList& attributeValueList); virtual AttributeValue* getAttributeValue() const; virtual AttributeFactory* getAttributeFactory() const; /**Duplicate the parameter into "this"*/ RequestAttribute& duplicate(RequestAttribute&); //protect: //AttrValList avlist; private: static Arc::Logger logger; /**the node*/ Arc::XMLNode node_; /**id of this , it could be useful if the policy specify to get value from request*/ std::string id; /**data type of node, it a important factor for generating the different AttributeValue objects*/ std::string type; /**issuer of the value of ; it could be useful if the policy */ std::string issuer; //AttrValList avlist; /**the AttributeValue object*/ AttributeValue* attrval; /**the AttributeFactory which is used to generate the AttributeValue object*/ AttributeFactory* attrfactory; }; } // namespace ArcSec #endif /* __ARC_SEC_REQUESTATTRIBUTE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/DateTimeAttribute.h0000644000000000000000000000012311232072001027063 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.503701 29 ctime=1513200659.39774012 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/DateTimeAttribute.h0000644000175000002070000001010311232072001027124 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_DATETIMEATTRIBUTE_H__ #define __ARC_SEC_DATETIMEATTRIBUTE_H__ #include #include namespace ArcSec { //DateTimeAttribute, TimeAttribute, DateAttribute, DurationAttribute, PeriodAttribute //As reference: See Date and Time on the Internet: Timestamps //(http://www.ietf.org/rfc/rfc3339.txt) //DateTimeAttribute /**Format: * YYYYMMDDHHMMSSZ * Day Month DD HH:MM:SS YYYY * YYYY-MM-DD HH:MM:SS * YYYY-MM-DDTHH:MM:SS+HH:MM * YYYY-MM-DDTHH:MM:SSZ */ class DateTimeAttribute : public AttributeValue { private: static std::string identifier; Arc::Time value; //using the Time class definition in DateTime.h std::string id; public: DateTimeAttribute(){ }; DateTimeAttribute(const std::string& v,const std::string& i) : value(v), id(i) {}; virtual ~DateTimeAttribute(){ }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual bool lessthan(AttributeValue* other); virtual bool inrange(AttributeValue* other); virtual std::string encode(); //encode value into ISOTime format Arc::Time getValue(){ return value; }; virtual std::string getType() {return identifier; }; virtual std::string getId() {return id;}; static const std::string& getIdentifier(void) { return identifier; }; }; //TimeAttribute /**Format: * HHMMSSZ * HH:MM:SS * HH:MM:SS+HH:MM * HH:MM:SSZ */ class TimeAttribute : public AttributeValue { private: static std::string identifier; Arc::Time value; std::string id; public: TimeAttribute() { }; TimeAttribute(const std::string& v,const std::string& i); virtual ~TimeAttribute(){ }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual bool lessthan(AttributeValue* other); virtual std::string encode(); Arc::Time getValue(){ return value; }; virtual std::string getType() {return identifier; }; virtual std::string getId() {return id;}; static const std::string& getIdentifier(void) { return identifier; }; }; //DateAttribute //Formate: //YYYY-MM-DD class DateAttribute : public AttributeValue { private: static std::string identifier; Arc::Time value; std::string id; public: DateAttribute() { }; DateAttribute(const std::string& v,const std::string& i); virtual ~DateAttribute(){ }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual bool lessthan(AttributeValue* other); virtual std::string encode(); Arc::Time getValue(){ return value; }; virtual std::string getType() {return identifier; }; virtual std::string getId() {return id;}; static const std::string& getIdentifier(void) { return identifier; }; }; //DurationAttribute /**Formate: *P??Y??M??DT??H??M??S */ class DurationAttribute : public AttributeValue { private: static std::string identifier; Arc::Period value; std::string id; public: DurationAttribute() { }; DurationAttribute(const std::string& v,const std::string& i) : value(v), id(i){}; virtual ~DurationAttribute(){ }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual std::string encode(); Arc::Period getValue(){ return value; }; virtual std::string getType() {return identifier;}; virtual std::string getId() {return id;}; static const std::string& getIdentifier(void) { return identifier; }; }; typedef struct{ Arc::Time starttime; Arc::Time endtime; Arc::Period duration; }ArcPeriod; //PeriodAttribute /**Formate: *datetime"/"duration *datetime"/"datetime *duration"/"datetime */ class PeriodAttribute : public AttributeValue { private: static std::string identifier; ArcPeriod value; std::string id; public: PeriodAttribute() { }; PeriodAttribute(const std::string& v,const std::string& i); virtual ~PeriodAttribute(){ }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual std::string encode(); ArcPeriod getValue(){ return value; }; virtual std::string getType() {return identifier; }; virtual std::string getId() {return id;}; static const std::string& getIdentifier(void) { return identifier; }; }; }// namespace ArcSec #endif /* __ARC_SEC_DATETIMEATTRIBUTE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/StringAttribute.h0000644000000000000000000000012411232072001026636 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.497701 30 ctime=1513200659.396740108 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/StringAttribute.h0000644000175000002070000000155611232072001026712 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_STRINGATTRIBUTE_H__ #define __ARC_SEC_STRINGATTRIBUTE_H__ #include namespace ArcSec { class StringAttribute : public AttributeValue { private: static std::string identifier; std::string value; std::string id; public: StringAttribute(){ }; StringAttribute(const std::string& v,const std::string& i) : value(v), id(i){ }; virtual ~StringAttribute(){ }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual bool inrange(AttributeValue* other); virtual std::string encode() {return value;}; std::string getValue(){ return value; }; virtual std::string getType() {return identifier; }; virtual std::string getId() {return id;}; static const std::string& getIdentifier(void) { return identifier; }; }; }// namespace ArcSec #endif /* __ARC_SEC_STRINGATTRIBUTE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/GenericAttribute.h0000644000000000000000000000012411232072001026744 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.500701 30 ctime=1513200659.402740181 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/GenericAttribute.h0000644000175000002070000000163511232072001027016 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_GENERICATTRIBUTE_H__ #define __ARC_SEC_GENERICATTRIBUTE_H__ #include namespace ArcSec { class GenericAttribute : public AttributeValue { private: std::string value; std::string type; std::string id; static std::string identifier; public: GenericAttribute() { }; GenericAttribute(const std::string& v,const std::string& i) : value(v), id(i) { }; virtual ~GenericAttribute() { }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual std::string encode() { return value; }; std::string getValue() { return value; }; virtual std::string getType() { return type; }; virtual std::string getId() { return id; }; void setType(const std::string& new_type) { type=new_type; }; static const std::string& getIdentifier(void) { return identifier; }; }; }// namespace ArcSec #endif /* __ARC_SEC_GENERICATTRIBUTE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/AttributeProxy.h0000644000000000000000000000012411056761442026533 xustar000000000000000027 mtime=1220272930.466197 27 atime=1513200574.500701 30 ctime=1513200659.403740193 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/AttributeProxy.h0000644000175000002070000000152011056761442026576 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ATTRIBUTEPROXY_H__ #define __ARC_SEC_ATTRIBUTEPROXY_H__ #include #include #include #include #include "AttributeValue.h" namespace ArcSec { ///Interface for creating the AttributeValue object, it will be used by AttributeFactory /**The AttributeProxy object will be insert into AttributeFactoty; and the *getAttribute(node) method will be called inside AttributeFacroty.createvalue(node), *in order to create a specific AttributeValue */ class AttributeProxy { public: AttributeProxy() {}; virtual ~AttributeProxy(){}; public: /**Create a AttributeValue object according to the information inside *the XMLNode as parameter. */ virtual AttributeValue* getAttribute(const Arc::XMLNode& node) = 0; }; } // namespace ArcSec #endif /* __ARC_SEC_ATTRIBUTEPROXY_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/X500NameAttribute.h0000644000000000000000000000012411232072001026625 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.503701 30 ctime=1513200659.399740144 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/X500NameAttribute.h0000644000175000002070000000150311232072001026671 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_X500NAMEATTRIBUTE_H__ #define __ARC_SEC_X500NAMEATTRIBUTE_H__ #include namespace ArcSec { class X500NameAttribute : public AttributeValue { private: static std::string identifier; std::string value; std::string id; public: X500NameAttribute() { }; X500NameAttribute(std::string v, std::string i) : value(v), id(i) { }; virtual ~X500NameAttribute(){ }; virtual bool equal(AttributeValue* other, bool check_id = true); virtual std::string encode() {return value;}; std::string getValue(){ return value; }; virtual std::string getType() {return identifier; }; virtual std::string getId(){ return id; }; static const std::string& getIdentifier(void) { return identifier; }; }; }// namespace ArcSec #endif /* __ARC_SEC_X500NAMEATTRIBUTE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/AttributeValue.h0000644000000000000000000000012411232072001026444 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.500701 30 ctime=1513200659.395740095 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/AttributeValue.h0000644000175000002070000000221511232072001026511 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ATTRIBUTEVALUE_H__ #define __ARC_SEC_ATTRIBUTEVALUE_H__ #include namespace ArcSec { ///Interface for containing different type of node for both policy and request /** contains different "Type" definition; Each type of needs *different approach to compare the value. *Any specific class which is for processing specific "Type" shoud inherit this class. *The "Type" supported so far is: StringAttribute, *DateAttribute, TimeAttribute, DurationAttribute, PeriodAttribute, AnyURIAttribute, *X500NameAttribute */ class AttributeValue { public: AttributeValue(){}; virtual ~AttributeValue(){}; /**Evluate whether "this" equale to the parameter value */ virtual bool equal(AttributeValue* value, bool check_id = true) = 0; //virtual int compare(AttributeValue* other){}; /**encode the value in a string format*/ virtual std::string encode() = 0; /**Get the DataType of the */ virtual std::string getType() = 0; /**Get the AttributeId of the */ virtual std::string getId() = 0; }; } // namespace ArcSec #endif /* __ARC_SEC_ATTRIBUTEVALUE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/BooleanAttribute.h0000644000000000000000000000012412356757245027003 xustar000000000000000027 mtime=1404821157.416181 27 atime=1513200574.500701 30 ctime=1513200659.401740169 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/BooleanAttribute.h0000644000175000002070000000157412356757245027057 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_BOOLEANATTRIBUTE_H__ #define __ARC_SEC_BOOLEANATTRIBUTE_H__ #include namespace ArcSec { class BooleanAttribute : public AttributeValue { private: static std::string identifier; bool value; std::string id; public: BooleanAttribute() : value(false) { }; BooleanAttribute(const bool& v,const std::string& i = std::string()) : value(v), id(i){ }; virtual ~BooleanAttribute(){ }; virtual bool equal(AttributeValue* o, bool check_id = true); virtual std::string encode() { if(value) return std::string("true"); else return std::string("false"); }; bool getValue(){ return value; }; std::string getId(){ return id; }; std::string getType() {return identifier; }; static const std::string& getIdentifier(void) { return identifier; }; }; }// namespace ArcSec #endif /* __ARC_SEC_BOOLEANATTRIBUTE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/PaxHeaders.7502/AnyURIAttribute.cpp0000644000000000000000000000012411232072001027032 xustar000000000000000027 mtime=1248359425.692324 27 atime=1513200574.497701 30 ctime=1513200659.414740328 nordugrid-arc-5.4.2/src/hed/libs/security/ArcPDP/attr/AnyURIAttribute.cpp0000644000175000002070000000121711232072001027100 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "AnyURIAttribute.h" namespace ArcSec { std::string AnyURIAttribute::identifier = "anyURI"; bool AnyURIAttribute::equal(AttributeValue* o, bool check_id){ AnyURIAttribute *other; try{ other = dynamic_cast(o); } catch(std::exception&) { }; if(other==NULL){ //std::cerr<<"not AnyURIAttribute"<id) return false; } if((value.compare(other->getValue()))==0) //Now, deal with it the same as StringAttribute. return true; else return false; } } //namespace ArcSec nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/ClassLoader.h0000644000000000000000000000012411607551052023673 xustar000000000000000027 mtime=1310642730.762695 27 atime=1513200574.478701 30 ctime=1513200659.327739264 nordugrid-arc-5.4.2/src/hed/libs/security/ClassLoader.h0000644000175000002070000000250711607551052023744 0ustar00mockbuildmock00000000000000#ifndef __ARC_CLASSLOADER_H__ #define __ARC_CLASSLOADER_H__ #include #include #include #include #include //Some implicit idea in the ClassLoader/ModuleManager stuff: //share_lib_name (e.g. mccsoap) should be global identical //plugin_name (e.g. __arc_attrfactory_modules__) should be global identical //desc->name (e.g. attr.factory) should also be global identical namespace Arc{ typedef Plugin LoadableClass; // TODO: Unify with Loader class ClassLoader : public PluginsFactory { protected: ClassLoader(Config *cfg = NULL); protected: void load_all_instances(Config *cfg); public: static ClassLoader* getClassLoader(Config* cfg = NULL); LoadableClass *Instance(const std::string& classId, XMLNode* arg = NULL, const std::string& className = ""); LoadableClass *Instance(XMLNode* arg = NULL, const std::string& className = ""); ~ClassLoader(); private: static Logger logger; static ClassLoader* _instance; }; class ClassLoaderPluginArgument: public PluginArgument { private: XMLNode* xml_; public: ClassLoaderPluginArgument(XMLNode* xml):xml_(xml) { }; virtual ~ClassLoaderPluginArgument(void) { }; operator XMLNode* (void) { return xml_; }; }; } // namespace Arc #endif /* __ARC_CLASSLOADER_H__ */ nordugrid-arc-5.4.2/src/hed/libs/security/PaxHeaders.7502/Security.h0000644000000000000000000000012410705666764023325 xustar000000000000000027 mtime=1192717812.465516 27 atime=1513200574.511701 30 ctime=1513200659.325739239 nordugrid-arc-5.4.2/src/hed/libs/security/Security.h0000644000175000002070000000066410705666764023400 0ustar00mockbuildmock00000000000000// Security.h #ifndef __Security_h__ #define __Security_h__ #include namespace ArcSec{ //! Common stuff used by security related slasses. /*! This class is just a place where to put common stuff that is used by security related slasses. So far it only contains a logger. */ class Security { private: static Arc::Logger logger; friend class SecHandler; friend class PDP; }; } #endif nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/credentialmod0000644000000000000000000000013213214316023022204 xustar000000000000000030 mtime=1513200659.194737637 30 atime=1513200668.720854145 30 ctime=1513200659.194737637 nordugrid-arc-5.4.2/src/hed/libs/credentialmod/0000755000175000002070000000000013214316023022327 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/credentialmod/PaxHeaders.7502/Makefile.am0000644000000000000000000000012512052416515024325 xustar000000000000000027 mtime=1353325901.850498 28 atime=1513200596.9859768 30 ctime=1513200659.192737613 nordugrid-arc-5.4.2/src/hed/libs/credentialmod/Makefile.am0000644000175000002070000000062312052416515024372 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libmodcredential.la libmodcredential_la_SOURCES = cred.cpp libmodcredential_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmodcredential_la_LIBADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libmodcredential_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/libs/credentialmod/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315725024335 xustar000000000000000030 mtime=1513200597.028977326 29 atime=1513200647.92959986 30 ctime=1513200659.193737625 nordugrid-arc-5.4.2/src/hed/libs/credentialmod/Makefile.in0000644000175000002070000006042713214315725024415 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/credentialmod DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) libmodcredential_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libmodcredential_la_OBJECTS = libmodcredential_la-cred.lo libmodcredential_la_OBJECTS = $(am_libmodcredential_la_OBJECTS) libmodcredential_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libmodcredential_la_CXXFLAGS) $(CXXFLAGS) \ $(libmodcredential_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmodcredential_la_SOURCES) DIST_SOURCES = $(libmodcredential_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libmodcredential.la libmodcredential_la_SOURCES = cred.cpp libmodcredential_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmodcredential_la_LIBADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libmodcredential_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/credentialmod/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/credentialmod/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmodcredential.la: $(libmodcredential_la_OBJECTS) $(libmodcredential_la_DEPENDENCIES) $(libmodcredential_la_LINK) -rpath $(pkglibdir) $(libmodcredential_la_OBJECTS) $(libmodcredential_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmodcredential_la-cred.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmodcredential_la-cred.lo: cred.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmodcredential_la_CXXFLAGS) $(CXXFLAGS) -MT libmodcredential_la-cred.lo -MD -MP -MF $(DEPDIR)/libmodcredential_la-cred.Tpo -c -o libmodcredential_la-cred.lo `test -f 'cred.cpp' || echo '$(srcdir)/'`cred.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmodcredential_la-cred.Tpo $(DEPDIR)/libmodcredential_la-cred.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='cred.cpp' object='libmodcredential_la-cred.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmodcredential_la_CXXFLAGS) $(CXXFLAGS) -c -o libmodcredential_la-cred.lo `test -f 'cred.cpp' || echo '$(srcdir)/'`cred.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/credentialmod/PaxHeaders.7502/cred.cpp0000644000000000000000000000012412675602216023717 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.641703 30 ctime=1513200659.194737637 nordugrid-arc-5.4.2/src/hed/libs/credentialmod/cred.cpp0000644000175000002070000000061012675602216023761 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include // This is just an intermediate module for making libarccredential library // persistent in compatible way. // Adding plugin descriptor to avoid warning messages from loader extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/crypto0000644000000000000000000000013213214316023020712 xustar000000000000000030 mtime=1513200659.084736292 30 atime=1513200668.721854157 30 ctime=1513200659.084736292 nordugrid-arc-5.4.2/src/hed/libs/crypto/0000755000175000002070000000000013214316023021035 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/crypto/PaxHeaders.7502/OpenSSL.cpp0000644000000000000000000000012413153454775023002 xustar000000000000000027 mtime=1504598525.713781 27 atime=1513200574.778705 30 ctime=1513200659.084736292 nordugrid-arc-5.4.2/src/hed/libs/crypto/OpenSSL.cpp0000644000175000002070000001266213153454775023056 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "OpenSSL.h" #include namespace Arc { static Glib::Mutex lock; static bool initialized = false; static Glib::Mutex* ssl_locks = NULL; static int ssl_locks_num = 0; static std::map app_data_indices; static Logger& logger(void) { static Logger* logger_ = new Logger(Logger::getRootLogger(), "OpenSSL"); return *logger_; } // This class takes care of cleaning OpenSSL data stored per-thread. // Here assumption is that every thread dealing with OpenSSL either // calls OpenSSLInit or is started by thread which called OpenSSLInit. class OpenSSLThreadCleaner: private ThreadDataItem { public: OpenSSLThreadCleaner(void); virtual ~OpenSSLThreadCleaner(void); virtual void Dup(void); }; OpenSSLThreadCleaner::OpenSSLThreadCleaner(void):ThreadDataItem("arc.openssl.thread.cleaner") { } OpenSSLThreadCleaner::~OpenSSLThreadCleaner(void) { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) ERR_remove_state(0); #endif } void OpenSSLThreadCleaner::Dup(void) { new OpenSSLThreadCleaner; } void HandleOpenSSLError(void) { HandleOpenSSLError(SSL_ERROR_NONE); } void HandleOpenSSLError(int code) { unsigned long e = (code==SSL_ERROR_NONE)?ERR_get_error():code; while(e != SSL_ERROR_NONE) { if(e == SSL_ERROR_SYSCALL) { // Hiding system errors //logger().msg(DEBUG, "SSL error: %d - system call failed",e); } else { const char* lib = ERR_lib_error_string(e); const char* func = ERR_func_error_string(e); const char* reason = ERR_reason_error_string(e); logger().msg(DEBUG, "SSL error: %d - %s:%s:%s", e, lib?lib:"", func?func:"", reason?reason:""); }; e = ERR_get_error(); }; } #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static void ssl_locking_cb(int mode, int n, const char * s_, int n_){ if(!ssl_locks) { logger().msg(FATAL, "SSL locks not initialized"); _exit(-1); }; if((n < 0) || (n >= ssl_locks_num)) { logger().msg(FATAL, "wrong SSL lock requested: %i of %i: %i - %s",n,ssl_locks_num,n_,s_); _exit(-1); }; if(mode & CRYPTO_LOCK) { ssl_locks[n].lock(); } else { ssl_locks[n].unlock(); }; } static unsigned long ssl_id_cb(void) { #ifdef WIN32 return (unsigned long)(GetCurrentThreadId()); #else return (unsigned long)(Glib::Thread::self()); #endif } //static void* ssl_idptr_cb(void) { // return (void*)(Glib::Thread::self()); //} #endif bool OpenSSLInit(void) { Glib::Mutex::Lock flock(lock); if(!initialized) { if(!PersistentLibraryInit("modcrypto")) { logger().msg(WARNING, "Failed to lock arccrypto library in memory"); }; #if (OPENSSL_VERSION_NUMBER < 0x10100000L) SSL_load_error_strings(); if(!SSL_library_init()){ logger().msg(ERROR, "Failed to initialize OpenSSL library"); HandleOpenSSLError(); ERR_free_strings(); return false; }; #else if(!OPENSSL_init_ssl(OPENSSL_INIT_LOAD_SSL_STRINGS | OPENSSL_INIT_LOAD_CRYPTO_STRINGS | OPENSSL_INIT_ADD_ALL_CIPHERS | OPENSSL_INIT_ADD_ALL_DIGESTS | OPENSSL_INIT_NO_LOAD_CONFIG, NULL)) { logger().msg(ERROR, "Failed to initialize OpenSSL library"); HandleOpenSSLError(); return false; }; #endif // We could RAND_seed() here. But since 0.9.7 OpenSSL // knows how to make use of OS specific source of random // data. I think it's better to let OpenSSL do a job. // Here we could also generate ephemeral DH key to avoid // time consuming genaration during connection handshake. // But is not clear if it is needed for curently used // connections types at all. Needs further investigation. // Using RSA key violates TLS (according to OpenSSL // documentation) hence we do not use it. // A.K. }; #if (OPENSSL_VERSION_NUMBER < 0x10100000L) // Always make sure our own locks are installed int num_locks = CRYPTO_num_locks(); if(num_locks > 0) { if(num_locks != ssl_locks_num) { if(ssl_locks_num > 0) { logger().msg(ERROR, "Number of OpenSSL locks changed - reinitializing"); ssl_locks_num=0; ssl_locks=NULL; }; }; if((!ssl_locks) || (!initialized)) { ssl_locks_num=0; ssl_locks=new Glib::Mutex[num_locks]; }; if(!ssl_locks) return false; ssl_locks_num=num_locks; CRYPTO_set_locking_callback(&ssl_locking_cb); CRYPTO_set_id_callback(&ssl_id_cb); //CRYPTO_set_idptr_callback(&ssl_idptr_cb); } if(!initialized) { OpenSSL_add_all_algorithms(); } #endif new OpenSSLThreadCleaner; initialized=true; return true; } int OpenSSLAppDataIndex(const std::string& id) { Glib::Mutex::Lock flock(lock); std::map::iterator i = app_data_indices.find(id); if(i == app_data_indices.end()) { int n = SSL_CTX_get_ex_new_index(0,NULL,NULL,NULL,NULL); app_data_indices[id] = n; return n; } return i->second; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/crypto/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602023032 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200597.108978304 30 ctime=1513200659.081736255 nordugrid-arc-5.4.2/src/hed/libs/crypto/Makefile.am0000644000175000002070000000066212231165602023100 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarccrypto.la libarccrypto_ladir = $(pkgincludedir)/crypto libarccrypto_la_HEADERS = OpenSSL.h libarccrypto_la_SOURCES = OpenSSL.cpp libarccrypto_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarccrypto_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(GTHREAD_LIBS) $(OPENSSL_LIBS) libarccrypto_la_LDFLAGS = -version-info 3:0:0 nordugrid-arc-5.4.2/src/hed/libs/crypto/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315725023044 xustar000000000000000030 mtime=1513200597.152978842 30 atime=1513200647.944600044 30 ctime=1513200659.083736279 nordugrid-arc-5.4.2/src/hed/libs/crypto/Makefile.in0000644000175000002070000006254513214315725023126 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/crypto DIST_COMMON = $(libarccrypto_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarccrypto_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarccrypto_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libarccrypto_la_OBJECTS = libarccrypto_la-OpenSSL.lo libarccrypto_la_OBJECTS = $(am_libarccrypto_la_OBJECTS) libarccrypto_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarccrypto_la_CXXFLAGS) $(CXXFLAGS) \ $(libarccrypto_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarccrypto_la_SOURCES) DIST_SOURCES = $(libarccrypto_la_SOURCES) HEADERS = $(libarccrypto_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarccrypto.la libarccrypto_ladir = $(pkgincludedir)/crypto libarccrypto_la_HEADERS = OpenSSL.h libarccrypto_la_SOURCES = OpenSSL.cpp libarccrypto_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarccrypto_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(GTHREAD_LIBS) $(OPENSSL_LIBS) libarccrypto_la_LDFLAGS = -version-info 3:0:0 all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/crypto/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/crypto/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarccrypto.la: $(libarccrypto_la_OBJECTS) $(libarccrypto_la_DEPENDENCIES) $(libarccrypto_la_LINK) -rpath $(libdir) $(libarccrypto_la_OBJECTS) $(libarccrypto_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccrypto_la-OpenSSL.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarccrypto_la-OpenSSL.lo: OpenSSL.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccrypto_la_CXXFLAGS) $(CXXFLAGS) -MT libarccrypto_la-OpenSSL.lo -MD -MP -MF $(DEPDIR)/libarccrypto_la-OpenSSL.Tpo -c -o libarccrypto_la-OpenSSL.lo `test -f 'OpenSSL.cpp' || echo '$(srcdir)/'`OpenSSL.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccrypto_la-OpenSSL.Tpo $(DEPDIR)/libarccrypto_la-OpenSSL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='OpenSSL.cpp' object='libarccrypto_la-OpenSSL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccrypto_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccrypto_la-OpenSSL.lo `test -f 'OpenSSL.cpp' || echo '$(srcdir)/'`OpenSSL.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarccrypto_laHEADERS: $(libarccrypto_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarccrypto_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarccrypto_ladir)" @list='$(libarccrypto_la_HEADERS)'; test -n "$(libarccrypto_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarccrypto_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarccrypto_ladir)" || exit $$?; \ done uninstall-libarccrypto_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarccrypto_la_HEADERS)'; test -n "$(libarccrypto_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarccrypto_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarccrypto_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarccrypto_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libarccrypto_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarccrypto_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libLTLIBRARIES install-libarccrypto_laHEADERS \ install-man install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am uninstall-libLTLIBRARIES \ uninstall-libarccrypto_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/crypto/PaxHeaders.7502/OpenSSL.h0000644000000000000000000000012411533023024022422 xustar000000000000000027 mtime=1298933268.253756 27 atime=1513200574.780705 30 ctime=1513200659.080736243 nordugrid-arc-5.4.2/src/hed/libs/crypto/OpenSSL.h0000644000175000002070000000161611533023024022473 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_OPENSSL_H__ #define __ARC_OPENSSL_H__ namespace Arc { /// This module contains various convenience utilities for using OpenSSL /** Application may be linked to this module instead of OpenSSL libraries directly. */ /// This function initializes OpenSSL library. /** It may be called multiple times and makes sure everything is done properly and OpenSSL may be used in multi-threaded environment. Because this function makes use of ArcLocation it is advisable to call it after ArcLocation::Init(). */ bool OpenSSLInit(void); /// Prints chain of accumulaed OpenSSL errors if any available void HandleOpenSSLError(void); /// Prints chain of accumulaed OpenSSL errors if any available void HandleOpenSSLError(int code); int OpenSSLAppDataIndex(const std::string& id); } // namespace Arc #endif /* __ARC_OPENSSL_H__ */ nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/globusutils0000644000000000000000000000013213214316024021747 xustar000000000000000030 mtime=1513200660.023747776 30 atime=1513200668.721854157 30 ctime=1513200660.023747776 nordugrid-arc-5.4.2/src/hed/libs/globusutils/0000755000175000002070000000000013214316024022072 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/GSSCredential.h0000644000000000000000000000012412133523112024622 xustar000000000000000027 mtime=1366206026.745609 27 atime=1513200574.775705 30 ctime=1513200660.020747739 nordugrid-arc-5.4.2/src/hed/libs/globusutils/GSSCredential.h0000644000175000002070000000243512133523112024673 0ustar00mockbuildmock00000000000000#ifndef __ARC_GLOBUSGSS_H__ #define __ARC_GLOBUSGSS_H__ #include #include namespace Arc { class Logger; class UserConfig; /// Class for converting credentials stored in file /// in PEM format into Globus structure. /// It works only for full credentials containing /// private key. This limitation is due to limited /// API of Globus. class GSSCredential { public: /// Load credentials from file(s) GSSCredential(const std::string& proxyPath, const std::string& certificatePath, const std::string& keyPath); /// Load credentials from UserConfig information. First tries string then files. GSSCredential(const UserConfig& usercfg); GSSCredential(): credential(GSS_C_NO_CREDENTIAL) {}; ~GSSCredential(); operator gss_cred_id_t&(); operator gss_cred_id_t*(); static std::string ErrorStr(OM_uint32 majstat, OM_uint32 minstat); private: std::string readCredFromFiles(const std::string& proxyPath, const std::string& certificatePath, const std::string& keyPath); void initCred(const std::string& credbuf); gss_cred_id_t credential; //static Logger logger; }; } // namespace Arc #endif // __ARC_GLOBUSGSS_H__ nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712675602216024077 xustar000000000000000027 mtime=1459029134.924374 30 atime=1513200597.753986193 30 ctime=1513200660.014747666 nordugrid-arc-5.4.2/src/hed/libs/globusutils/Makefile.am0000644000175000002070000000110512675602216024136 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarcglobusutils.la libarcglobusutils_la_SOURCES = \ GlobusErrorUtils.cpp GlobusErrorUtils.h \ GSSCredential.cpp GSSCredential.h \ GlobusWorkarounds.cpp GlobusWorkarounds.h libarcglobusutils_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) \ $(GLOBUS_COMMON_CFLAGS) \ $(OPENSSL_CFLAGS) \ $(AM_CXXFLAGS) \ $(LIBXML2_CFLAGS) libarcglobusutils_la_LIBADD = \ $(ARCCOMMON_LIBS) \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_GSI_CALLBACK_LIBS) \ $(GLOBUS_COMMON_LIBS) libarcglobusutils_la_LDFLAGS = -version-info 3:0:0 nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/GlobusWorkarounds.cpp0000644000000000000000000000012413213471407026231 xustar000000000000000027 mtime=1512993543.496042 27 atime=1513200574.778705 30 ctime=1513200660.021747752 nordugrid-arc-5.4.2/src/hed/libs/globusutils/GlobusWorkarounds.cpp0000644000175000002070000000571713213471407026310 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "GlobusErrorUtils.h" #include "GlobusWorkarounds.h" namespace Arc { static Glib::Mutex lock_; bool GlobusRecoverProxyOpenSSL(void) { // No harm even if not needed - shall trun proxies on for code // which was written with no proxies in mind SetEnv("OPENSSL_ALLOW_PROXY_CERTS","1"); return true; // Following algorithm is unsafe because if mixes callbacks // meant to work with different structures. It is now replaced // with NID precalculation hack. See TLS MCC for more information. /* #if OPENSSL_VERSION_NUMBER > 0x0090804f # warning ********************************************************* # warning ** Since OpenSSL 0.9.8e proxy extension is const. ** # warning ** Hence we can not manipulate it. That means combining** # warning ** it with Globus Toolkit libraries may cause problems ** # warning ** during runtime. Problematic behavior was observed ** # warning ** at least for Globus Toolkit version 4.0. But it was ** # warning ** tested and worked for Globus Toolkit 4.2.1. ** # warning ********************************************************* return true; #else // Use OpenSSL hack to make proxies work with Globus disabled const char* sn = "proxyCertInfo"; const char* gsn = "PROXYCERTINFO"; int nid = OBJ_sn2nid(sn); int gnid = OBJ_sn2nid(gsn); // If Globus proxy extension is present // And if OpenSSL proxy extension is present // And if they are not equal if((gnid > 0) && (nid > 0) && (gnid != nid)) { ASN1_OBJECT* obj = NULL; X509V3_EXT_METHOD* ext = X509V3_EXT_get_nid(nid); X509V3_EXT_METHOD* gext = X509V3_EXT_get_nid(gnid); // Globus object with OpenSSL NID unsigned char tmpbuf[512]; int i = a2d_ASN1_OBJECT(tmpbuf,sizeof(tmpbuf),"1.3.6.1.5.5.7.1.14",-1); if(i > 0) { obj = ASN1_OBJECT_create(nid,tmpbuf,i,gsn,"Proxy Certificate Info Extension"); if(obj != NULL) { gnid = OBJ_add_object(obj); // Merging Globus and OpenSSL extensions - probably dangerous if((ext != NULL) && (gext != NULL)) { gext->ext_nid = gnid; if(ext->d2i == NULL) ext->d2i=gext->d2i; if(ext->i2d == NULL) ext->i2d=gext->i2d; return true; } } } } return false; #endif // OPENSSL_VERSION_NUMBER > 0x0090804f */ } bool GlobusPrepareGSSAPI(void) { Glib::Mutex::Lock lock(lock_); int index = -1; GlobusResult(globus_gsi_callback_get_X509_STORE_callback_data_index(&index)); GlobusResult(globus_gsi_callback_get_SSL_callback_data_index(&index)); return true; } bool GlobusModuleActivate(globus_module_descriptor_t* module) { Glib::Mutex::Lock lock(lock_); return (GlobusResult(globus_module_activate(module))); } } nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315725024077 xustar000000000000000029 mtime=1513200597.80198678 30 atime=1513200648.461606367 30 ctime=1513200660.016747691 nordugrid-arc-5.4.2/src/hed/libs/globusutils/Makefile.in0000644000175000002070000006730413214315725024160 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/globusutils DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcglobusutils_la_DEPENDENCIES = $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libarcglobusutils_la_OBJECTS = \ libarcglobusutils_la-GlobusErrorUtils.lo \ libarcglobusutils_la-GSSCredential.lo \ libarcglobusutils_la-GlobusWorkarounds.lo libarcglobusutils_la_OBJECTS = $(am_libarcglobusutils_la_OBJECTS) libarcglobusutils_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcglobusutils_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcglobusutils_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcglobusutils_la_SOURCES) DIST_SOURCES = $(libarcglobusutils_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarcglobusutils.la libarcglobusutils_la_SOURCES = \ GlobusErrorUtils.cpp GlobusErrorUtils.h \ GSSCredential.cpp GSSCredential.h \ GlobusWorkarounds.cpp GlobusWorkarounds.h libarcglobusutils_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) \ $(GLOBUS_COMMON_CFLAGS) \ $(OPENSSL_CFLAGS) \ $(AM_CXXFLAGS) \ $(LIBXML2_CFLAGS) libarcglobusutils_la_LIBADD = \ $(ARCCOMMON_LIBS) \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_GSI_CALLBACK_LIBS) \ $(GLOBUS_COMMON_LIBS) libarcglobusutils_la_LDFLAGS = -version-info 3:0:0 all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/globusutils/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/globusutils/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcglobusutils.la: $(libarcglobusutils_la_OBJECTS) $(libarcglobusutils_la_DEPENDENCIES) $(libarcglobusutils_la_LINK) -rpath $(libdir) $(libarcglobusutils_la_OBJECTS) $(libarcglobusutils_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcglobusutils_la-GSSCredential.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcglobusutils_la-GlobusErrorUtils.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcglobusutils_la-GlobusWorkarounds.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcglobusutils_la-GlobusErrorUtils.lo: GlobusErrorUtils.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcglobusutils_la_CXXFLAGS) $(CXXFLAGS) -MT libarcglobusutils_la-GlobusErrorUtils.lo -MD -MP -MF $(DEPDIR)/libarcglobusutils_la-GlobusErrorUtils.Tpo -c -o libarcglobusutils_la-GlobusErrorUtils.lo `test -f 'GlobusErrorUtils.cpp' || echo '$(srcdir)/'`GlobusErrorUtils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcglobusutils_la-GlobusErrorUtils.Tpo $(DEPDIR)/libarcglobusutils_la-GlobusErrorUtils.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GlobusErrorUtils.cpp' object='libarcglobusutils_la-GlobusErrorUtils.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcglobusutils_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcglobusutils_la-GlobusErrorUtils.lo `test -f 'GlobusErrorUtils.cpp' || echo '$(srcdir)/'`GlobusErrorUtils.cpp libarcglobusutils_la-GSSCredential.lo: GSSCredential.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcglobusutils_la_CXXFLAGS) $(CXXFLAGS) -MT libarcglobusutils_la-GSSCredential.lo -MD -MP -MF $(DEPDIR)/libarcglobusutils_la-GSSCredential.Tpo -c -o libarcglobusutils_la-GSSCredential.lo `test -f 'GSSCredential.cpp' || echo '$(srcdir)/'`GSSCredential.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcglobusutils_la-GSSCredential.Tpo $(DEPDIR)/libarcglobusutils_la-GSSCredential.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GSSCredential.cpp' object='libarcglobusutils_la-GSSCredential.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcglobusutils_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcglobusutils_la-GSSCredential.lo `test -f 'GSSCredential.cpp' || echo '$(srcdir)/'`GSSCredential.cpp libarcglobusutils_la-GlobusWorkarounds.lo: GlobusWorkarounds.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcglobusutils_la_CXXFLAGS) $(CXXFLAGS) -MT libarcglobusutils_la-GlobusWorkarounds.lo -MD -MP -MF $(DEPDIR)/libarcglobusutils_la-GlobusWorkarounds.Tpo -c -o libarcglobusutils_la-GlobusWorkarounds.lo `test -f 'GlobusWorkarounds.cpp' || echo '$(srcdir)/'`GlobusWorkarounds.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcglobusutils_la-GlobusWorkarounds.Tpo $(DEPDIR)/libarcglobusutils_la-GlobusWorkarounds.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GlobusWorkarounds.cpp' object='libarcglobusutils_la-GlobusWorkarounds.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcglobusutils_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcglobusutils_la-GlobusWorkarounds.lo `test -f 'GlobusWorkarounds.cpp' || echo '$(srcdir)/'`GlobusWorkarounds.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(libdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libLTLIBRARIES install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-libLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/GlobusErrorUtils.h0000644000000000000000000000012413213471662025475 xustar000000000000000027 mtime=1512993714.552508 27 atime=1513200574.775705 30 ctime=1513200660.018747715 nordugrid-arc-5.4.2/src/hed/libs/globusutils/GlobusErrorUtils.h0000644000175000002070000000263113213471662025544 0ustar00mockbuildmock00000000000000#ifndef __ARC_GLOBUSERRORUTILS_H__ #define __ARC_GLOBUSERRORUTILS_H__ #include #include #ifdef WIN32 #include #endif #include namespace Arc { std::string globus_object_to_string(globus_object_t *err); /// Parse error message, set errorno if possible and return it. int globus_error_to_errno(const std::string& msg, int errorno); std::ostream& operator<<(std::ostream& o, globus_object_t *err); class GlobusResult { public: GlobusResult(); ~GlobusResult(); explicit GlobusResult(const globus_result_t result); GlobusResult& operator=(const globus_result_t result); bool operator==(const GlobusResult& result) const { return (r == result.r); } bool operator!=(const GlobusResult& result) const { return (r != result.r); } operator bool() const { return (r == GLOBUS_SUCCESS); } bool operator!() const { return (r != GLOBUS_SUCCESS); } operator globus_result_t() const { return r; } operator globus_object_t*() const { return o; } std::string str() const; static void wipe(); private: globus_result_t r; globus_object_t* o; GlobusResult(const GlobusResult&); GlobusResult& operator=(const GlobusResult&); }; std::ostream& operator<<(std::ostream& o, const GlobusResult& res); } // namespace Arc #endif // __ARC_GLOBUSERRORUTILS_H__ nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/GSSCredential.cpp0000644000000000000000000000012413213471676025176 xustar000000000000000027 mtime=1512993726.134971 27 atime=1513200574.776705 30 ctime=1513200660.019747727 nordugrid-arc-5.4.2/src/hed/libs/globusutils/GSSCredential.cpp0000644000175000002070000001116113213471676025243 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "GlobusErrorUtils.h" #include "GSSCredential.h" namespace Arc { static Logger logger(Logger::getRootLogger(), "GSSCredential"); GSSCredential::GSSCredential(const std::string& proxyPath, const std::string& certificatePath, const std::string& keyPath) : credential(GSS_C_NO_CREDENTIAL) { initCred(readCredFromFiles(proxyPath, certificatePath, keyPath)); } GSSCredential::GSSCredential(const UserConfig& usercfg) : credential(GSS_C_NO_CREDENTIAL) { if (!usercfg.CredentialString().empty()) initCred(usercfg.CredentialString()); else initCred(readCredFromFiles(usercfg.ProxyPath(), usercfg.CertificatePath(), usercfg.KeyPath())); } std::string GSSCredential::readCredFromFiles(const std::string& proxyPath, const std::string& certificatePath, const std::string& keyPath) { std::string credbuf; if (!proxyPath.empty()) { std::ifstream is(proxyPath.c_str()); getline(is, credbuf, '\0'); if(!is || credbuf.empty()) { logger.msg(ERROR, "Failed to read proxy file: %s", proxyPath); return ""; } } else if (!certificatePath.empty() && !keyPath.empty()) { std::ifstream is(certificatePath.c_str()); getline(is, credbuf, '\0'); if(!is || credbuf.empty()) { logger.msg(ERROR, "Failed to read certificate file: %s", certificatePath); return ""; } std::string keybuf; std::ifstream ik(keyPath.c_str()); getline(ik, keybuf, '\0'); if(!ik || keybuf.empty()) { logger.msg(ERROR, "Failed to read private key file: %s", keyPath); return ""; } credbuf += "\n"; credbuf += keybuf; } return credbuf; } void GSSCredential::initCred(const std::string& credbuf) { if(credbuf.empty()) return; //Convert to GSS credental only if find credential content OM_uint32 majstat = 0; OM_uint32 minstat = GLOBUS_SUCCESS; gss_buffer_desc gbuf; gbuf.value = (void*)credbuf.c_str(); gbuf.length = credbuf.length(); majstat = gss_import_cred(&minstat, &credential, NULL, 0, &gbuf, GSS_C_INDEFINITE, NULL); GlobusResult gstat(minstat); if (GSS_ERROR(majstat)) { credential = GSS_C_NO_CREDENTIAL; logger.msg(ERROR, "Failed to convert GSI credential to " "GSS credential (major: %d, minor: %d)%s:%s", majstat, minstat, ErrorStr(majstat,0), gstat.str()); } } GSSCredential::~GSSCredential() { if (credential != GSS_C_NO_CREDENTIAL) { OM_uint32 majstat = 0; OM_uint32 minstat = GLOBUS_SUCCESS; majstat = gss_release_cred(&minstat, &credential); GlobusResult gstat(minstat); if (GSS_ERROR(majstat)) { logger.msg(DEBUG, "Failed to release GSS credential " "(major: %d, minor: %d):%s:%s", majstat, minstat, ErrorStr(majstat,0), gstat.str()); } } } GSSCredential::operator gss_cred_id_t&() { return credential; } GSSCredential::operator gss_cred_id_t*() { return &credential; } std::string GSSCredential::ErrorStr(OM_uint32 majstat, OM_uint32 /*minstat*/) { std::string errstr; if(majstat & GSS_S_BAD_MECH) errstr+=":GSS_S_BAD_MECH"; if(majstat & GSS_S_BAD_NAME) errstr+=":GSS_S_BAD_NAME"; if(majstat & GSS_S_BAD_NAMETYPE) errstr+=":GSS_S_BAD_NAMETYPE"; if(majstat & GSS_S_BAD_BINDINGS) errstr+=":GSS_S_BAD_BINDINGS"; if(majstat & GSS_S_BAD_STATUS) errstr+=":GSS_S_BAD_STATUS"; if(majstat & GSS_S_BAD_SIG) errstr+=":GSS_S_BAD_SIG"; if(majstat & GSS_S_NO_CRED) errstr+=":GSS_S_NO_CRED"; if(majstat & GSS_S_NO_CONTEXT) errstr+=":GSS_S_NO_CONTEXT"; if(majstat & GSS_S_DEFECTIVE_TOKEN) errstr+=":GSS_S_DEFECTIVE_TOKEN"; if(majstat & GSS_S_DEFECTIVE_CREDENTIAL) errstr+=":GSS_S_DEFECTIVE_CREDENTIAL"; if(majstat & GSS_S_CREDENTIALS_EXPIRED) errstr+=":GSS_S_CREDENTIALS_EXPIRED"; if(majstat & GSS_S_CONTEXT_EXPIRED) errstr+=":GSS_S_CONTEXT_EXPIRED"; if(majstat & GSS_S_FAILURE) errstr+=":GSS_S_FAILURE"; if(majstat & GSS_S_BAD_QOP) errstr+=":GSS_S_BAD_QOP"; if(majstat & GSS_S_UNAUTHORIZED) errstr+=":GSS_S_UNAUTHORIZED"; if(majstat & GSS_S_UNAVAILABLE) errstr+=":GSS_S_UNAVAILABLE"; if(majstat & GSS_S_DUPLICATE_ELEMENT) errstr+=":GSS_S_DUPLICATE_ELEMENT"; if(majstat & GSS_S_NAME_NOT_MN) errstr+=":GSS_S_NAME_NOT_MN"; if(majstat & GSS_S_EXT_COMPAT) errstr+=":GSS_S_EXT_COMPAT"; return errstr; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/GlobusWorkarounds.h0000644000000000000000000000012411547215745025707 xustar000000000000000027 mtime=1302141925.565586 27 atime=1513200574.776705 30 ctime=1513200660.023747776 nordugrid-arc-5.4.2/src/hed/libs/globusutils/GlobusWorkarounds.h0000644000175000002070000000105111547215745025751 0ustar00mockbuildmock00000000000000#include namespace Arc { // Workaround for Globus adding own proxy object. It is not needed anymore. bool GlobusRecoverProxyOpenSSL(void); // Initializes OpenSSL indices used in Globus. This solves problem // of race condition in Globus and bug which is triggered by that // race condition. bool GlobusPrepareGSSAPI(void); // Wrapper for globus_module_activate() function to avoid // race conditions in some Globus module activation functions. bool GlobusModuleActivate(globus_module_descriptor_t* module); } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/GlobusErrorUtils.cpp0000644000000000000000000000012413213471662026030 xustar000000000000000027 mtime=1512993714.552508 27 atime=1513200574.776705 30 ctime=1513200660.017747703 nordugrid-arc-5.4.2/src/hed/libs/globusutils/GlobusErrorUtils.cpp0000644000175000002070000001265613213471662026107 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "GlobusErrorUtils.h" namespace Arc { static Logger logger(Logger::getRootLogger(), "GLOBUS"); // When Globus code fails it allocates error object, assigns it to cache // and returns cache index as error code. If object is not retrived // from cache it is effectiviely leaked. So each error code must be passed // through though globus_error_get() (or even better GlobusResult class // implemented here). Unfortunately Globus code itself does not always // follow this pattern and hence is effectively leaking memory. // Here we have adjust_last_processed() trying to pick up such lost // error object and free them. static Glib::StaticMutex last_processed_lock = GLIBMM_STATIC_MUTEX_INIT; static globus_result_t last_processed = GLOBUS_SUCCESS+1; static const long int safety_gap = 10; // leave 10 last errors in cache in case something still needs them static const long int processing_limit = 100; // process no more than 100 lost error object at once static void adjust_last_processed(globus_result_t new_result) { last_processed_lock.lock(); long int diff = (long int)(new_result-last_processed); if(diff > processing_limit) diff = processing_limit; while(diff > safety_gap) { globus_object_t* o = globus_error_get(last_processed); if((o != NULL) && (o != GLOBUS_ERROR_NO_INFO)) { globus_object_free(o); } ++last_processed; --diff; } last_processed_lock.unlock(); } void GlobusResult::wipe() { GlobusResult(globus_error_put(globus_error_construct_error(GLOBUS_COMMON_MODULE, GLOBUS_NULL, 0, "", "", 0, ""))); } GlobusResult::GlobusResult() : r(GLOBUS_SUCCESS), o(NULL) { } GlobusResult::GlobusResult(const globus_result_t result) : r(result), o(NULL) { if((r != GLOBUS_SUCCESS) && (r != GLOBUS_FAILURE)) { o = globus_error_get(r); if(o == GLOBUS_ERROR_NO_INFO) o = NULL; adjust_last_processed(r); } } GlobusResult::~GlobusResult() { if(o) globus_object_free(o); } GlobusResult& GlobusResult::operator=(const globus_result_t result) { if(o) globus_object_free(o); o = NULL; r = result; if((r != GLOBUS_SUCCESS) && (r != GLOBUS_FAILURE)) { o = globus_error_get(r); if(o == GLOBUS_ERROR_NO_INFO) o = NULL; adjust_last_processed(r); } return *this; } std::string GlobusResult::str() const { if (r == GLOBUS_SUCCESS) return ""; std::string s; for (globus_object_t *err_ = o; err_; err_ = globus_error_base_get_cause(err_)) { if (err_ != o) s += "/"; char *tmp = globus_object_printable_to_string(err_); if (tmp) { s += tmp; free(tmp); } else s += "unknown error"; } return trim(s); } std::ostream& operator<<(std::ostream& o, const GlobusResult& res) { if (res) return (o << ""); globus_object_t *err = static_cast(res); for (globus_object_t *err_ = err; err_; err_ = globus_error_base_get_cause(err_)) { if (err_ != err) o << "/"; char *tmp = globus_object_printable_to_string(err_); if (tmp) { o << tmp; free(tmp); } else o << "unknown error"; } return o; } std::ostream& operator<<(std::ostream& o, globus_object_t *err) { if (err == GLOBUS_NULL) return (o << ""); for (globus_object_t *err_ = err; err_; err_ = globus_error_base_get_cause(err_)) { if (err_ != err) o << "/"; char *tmp = globus_object_printable_to_string(err_); if (tmp) { o << tmp; free(tmp); } else o << "unknown error"; } return o; } std::string globus_object_to_string(globus_object_t *err) { if (err == GLOBUS_NULL) return ""; std::string s; for (globus_object_t *err_ = err; err_; err_ = globus_error_base_get_cause(err_)) { if (err_ != err) s += "/"; char *tmp = globus_object_printable_to_string(err_); if (tmp) { s += tmp; free(tmp); } else s += "unknown error"; } return s; } int globus_error_to_errno(const std::string& msg, int errorno) { // parse the message and try to detect certain errors. If none found leave // errorno unchanged. There is no guarantee that Globus won't change error // messages but there is no other way to determine the reason for errors. if (lower(msg).find("no such file") != std::string::npos) return ENOENT; if (lower(msg).find("object unavailable") != std::string::npos) return ENOENT; if (lower(msg).find("object not available") != std::string::npos) return ENOENT; if (lower(msg).find("no such job") != std::string::npos) return ENOENT; if (lower(msg).find("file unavailable") != std::string::npos) return ENOENT; if (lower(msg).find("file exists") != std::string::npos) return EEXIST; if (lower(msg).find("file not allowed") != std::string::npos) return EACCES; if (lower(msg).find("permission denied") != std::string::npos) return EACCES; if (lower(msg).find("failed authenticating")!= std::string::npos) return EACCES; if (lower(msg).find("can't make") != std::string::npos) return EACCES; if (lower(msg).find("directory not empty") != std::string::npos) return ENOTEMPTY; if (lower(msg).find("do not understand") != std::string::npos) return EOPNOTSUPP; return errorno; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/globusutils/PaxHeaders.7502/README0000644000000000000000000000012411110031017022672 xustar000000000000000027 mtime=1226846735.746351 27 atime=1513200574.775705 30 ctime=1513200660.013747654 nordugrid-arc-5.4.2/src/hed/libs/globusutils/README0000644000175000002070000000013411110031017022735 0ustar00mockbuildmock00000000000000This directory collects common utilities used by plugins that depend on the globus toolkit. nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/message0000644000000000000000000000013213214316023021016 xustar000000000000000030 mtime=1513200659.032735656 30 atime=1513200668.721854157 30 ctime=1513200659.032735656 nordugrid-arc-5.4.2/src/hed/libs/message/0000755000175000002070000000000013214316023021141 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712231165602023136 xustar000000000000000027 mtime=1382345602.148497 30 atime=1513200598.188991513 30 ctime=1513200659.018735485 nordugrid-arc-5.4.2/src/hed/libs/message/Makefile.am0000644000175000002070000000167512231165602023211 0ustar00mockbuildmock00000000000000DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) lib_LTLIBRARIES = libarcmessage.la libarcmessage_ladir = $(pkgincludedir)/message libarcmessage_la_HEADERS = SOAPEnvelope.h PayloadRaw.h PayloadSOAP.h \ PayloadStream.h MCC_Status.h MCC.h Service.h Plexer.h \ MessageAttributes.h Message.h SOAPMessage.h MessageAuth.h \ SecAttr.h MCCLoader.h SecHandler.h libarcmessage_la_SOURCES = SOAPEnvelope.cpp PayloadRaw.cpp PayloadSOAP.cpp \ PayloadStream.cpp MCC_Status.cpp MCC.cpp Service.cpp Plexer.cpp \ MessageAttributes.cpp Message.cpp SOAPMessage.cpp MessageAuth.cpp \ SecAttr.cpp MCCLoader.cpp SecHandler.cpp libarcmessage_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcmessage_la_LIBADD = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libarcmessage_la_LDFLAGS = -version-info 3:0:0 nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/SecHandler.cpp0000644000000000000000000000012412110401544023605 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200574.796705 30 ctime=1513200659.031735643 nordugrid-arc-5.4.2/src/hed/libs/message/SecHandler.cpp0000644000175000002070000000245612110401544023661 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // SecHandler.cpp #include "SecHandler.h" namespace ArcSec{ Arc::Logger SecHandler::logger(Arc::Logger::rootLogger, "SecHandler"); SecHandlerConfig::SecHandlerConfig(const std::string& name,const std::string& event,const std::string& id):Arc::XMLNode("") { NewAttribute("name")=name; if(!event.empty()) NewAttribute("event")=event; if(!id.empty()) NewAttribute("id")=id; } SecHandlerStatus::SecHandlerStatus(void): code(STATUS_DENY) { } SecHandlerStatus::SecHandlerStatus(bool positive): code(positive?STATUS_ALLOW:STATUS_DENY) { } SecHandlerStatus::SecHandlerStatus(int code_): code(code_) { } SecHandlerStatus::SecHandlerStatus(int code_, const std::string& explanation_): code(code_),explanation(explanation_) { } SecHandlerStatus::SecHandlerStatus(int code_,const std::string& origin_,const std::string& explanation_): code(code_),origin(origin_),explanation(explanation_) { } int SecHandlerStatus::getCode(void) const { return code; } const std::string& SecHandlerStatus::getOrigin(void) const { return origin; } const std::string& SecHandlerStatus::getExplanation(void) const { return explanation; } } nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MessageAuth.cpp0000644000000000000000000000012313103157402024006 xustar000000000000000026 mtime=1494015746.76441 27 atime=1513200574.793705 30 ctime=1513200659.028735607 nordugrid-arc-5.4.2/src/hed/libs/message/MessageAuth.cpp0000644000175000002070000003213713103157402024062 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "MessageAuth.h" namespace Arc { MessageAuth::MessageAuth(void):attrs_created_(true) { } MessageAuth::~MessageAuth(void) { if(!attrs_created_) return; std::map::iterator attr = attrs_.begin(); for(;attr != attrs_.end();++attr) { if(attr->second) delete attr->second; }; } void MessageAuth::set(const std::string& key, SecAttr* value) { if(!attrs_created_) return; std::map::iterator attr = attrs_.find(key); if(attr == attrs_.end()) { attrs_[key]=value; } else { if(attr->second != value) { if(attr->second) delete attr->second; attr->second=value; }; }; } void MessageAuth::remove(const std::string& key) { std::map::iterator attr = attrs_.find(key); if(attr != attrs_.end()) { if(attrs_created_) if(attr->second) delete attr->second; attrs_.erase(attr); }; } SecAttr* MessageAuth::get(const std::string& key) { std::map::iterator attr = attrs_.find(key); if(attr == attrs_.end()) return NULL; return attr->second; } class _XMLPair { public: XMLNode element; XMLNode context; _XMLPair(XMLNode e,XMLNode c):element(e),context(c) { }; }; void copy_xml_elements(XMLNode item,XMLNode elements) { for(;(bool)elements;++elements) { item.NewChild(elements); }; } // All permutations of Subject, Resource, Action elements are generated. // Attribute sub-elements get collected in single Element (Subject, Condition). // Each element withoit Attribute sub-elements are put into separate // RequestItem. Attributes of Condition are collected inside single // Condition element in every RequestItem if it comes from same source // as corresponding Subject, Resource or Action. // All generated content is merged to existing content in val variable. // TODO: Avoid duplicate Context attributes bool MessageAuth::Export(SecAttrFormat format,XMLNode &val) const { if(format == SecAttr::ARCAuth) { // Making XML document top level Request element NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; if(!val) XMLNode(ns,"ra:Request").New(val); XMLNode newreq = val; newreq.Namespaces(ns); newreq.Name("ra:Request"); XMLNodeContainer xmls; std::list<_XMLPair> subjects; std::list<_XMLPair> resources; std::list<_XMLPair> actions; // Collecting elements from previously generated request for(XMLNode item = newreq["RequestItem"];(bool)item;++item) { for(XMLNode subject = item["Subject"];(bool)subject;++subject) { subjects.push_back(_XMLPair(subject,item["Context"])); }; for(XMLNode resource = item["Resource"];(bool)resource;++resource) { resources.push_back(_XMLPair(resource,item["Context"])); }; for(XMLNode action = item["Action"];(bool)action;++action) { actions.push_back(_XMLPair(action,item["Context"])); }; }; int subjects_new = subjects.size(); int resources_new = resources.size(); int actions_new = actions.size(); // Getting XMLs from all SecAttr std::map::const_iterator attr = attrs_.begin(); for(;attr != attrs_.end();++attr) { xmls.AddNew(XMLNode(ns,"")); XMLNode r = xmls[xmls.Size()-1]; if(!(attr->second)) return false; if(!(attr->second->Export(format,r))) return false; //std::string str; //r.GetXML(str); //std::cout<<"SecAttr: ++++ Name: "<first<<"XML: "<::iterator subject = subjects.begin(); for(int subject_n = 0;;++subject_n,++subject) { if(subject_n < subjects_new) continue; if(subject == subjects.end()) break; if(subject->element.Size() > 0) { copy_xml_elements(new_subject,subject->element["SubjectAttribute"]); copy_xml_elements(new_context,subject->context["ContextAttribute"]); }; }; }; // Add new subject into existing ones - assuming all // already existing subjests are the same. { std::list<_XMLPair>::iterator subject = subjects.begin(); for(int subject_n = 0;;++subject_n,++subject) { if(subject_n >= subjects_new) break; if(subject == subjects.end()) break; copy_xml_elements(subject->element,new_subject["SubjectAttribute"]); copy_xml_elements(subject->context,new_subject["ContextAttribute"]); }; }; // Use one of existing old subjects as template for new // elements (if present) if(subjects_new > 0) { new_subject=subjects.begin()->element; new_context=subjects.begin()->context; }; // Create all permutations of Action and Resource elements std::list<_XMLPair>::iterator action = actions.begin(); for(int action_n = 0;;++action_n) { std::list<_XMLPair>::iterator resource = resources.begin(); for(int resource_n = 0;;++resource_n) { if((action_n < actions_new) && (resource_n < resources_new)) { if(resources.size()) ++resource; if(resource == resources.end()) break; continue; // This combination is already in request }; XMLNode newitem = newreq.NewChild("ra:RequestItem"); XMLNode newctx = newitem.NewChild("ra:Context"); if(new_subject.Size() > 0) { newitem.NewChild(new_subject); copy_xml_elements(newctx,new_context["ContextAttribute"]); }; if(action != actions.end()) { newitem.NewChild(action->element); copy_xml_elements(newctx,action->context["ContextAttribute"]); }; if(resource != resources.end()) { newitem.NewChild(resource->element); copy_xml_elements(newitem,resource->context["ContextAttribute"]); }; if(resources.size()) ++resource; if(resource == resources.end()) break; }; if(actions.size()) ++action; if(action == actions.end()) break; }; return true; } else if(format == SecAttr::XACML) { // Making XML document top level Request element /* XACML request is like this: ...... ...... ...... ...... */ NS ns; ns["ra"]="urn:oasis:names:tc:xacml:2.0:context:schema:os"; if(!val) XMLNode(ns,"ra:Request").New(val); XMLNode newreq = val; newreq.Namespaces(ns); newreq.Name("ra:Request"); XMLNodeContainer xmls; // Getting XMLs from all SecAttr std::map::const_iterator attr = attrs_.begin(); for(;attr != attrs_.end();++attr) { xmls.AddNew(XMLNode(ns,"")); XMLNode r = xmls[xmls.Size()-1]; if(!(attr->second)) return false; if(!(attr->second->Export(format,r))) return false; }; // Merge all collected elements into single request XMLNode subject = newreq["Subject"]; if(!subject) subject = newreq.NewChild("Subject"); XMLNode resource = newreq["Resource"]; if(!resource) resource = newreq.NewChild("Resource"); XMLNode action = newreq["Action"]; if(!action) action = newreq.NewChild("Action"); XMLNode environment = newreq["Environment"]; if(!environment) environment = newreq.NewChild("Environment"); for(int i=0; i::const_iterator attr = attrs_.begin(); for(;attr != attrs_.end();++attr) { if(!(attr->second)) return false; if(!(attr->second->Export(format,val))) continue; // GACL support is not mandatory }; return true; } return false; } #if 0 static void add_new_elements(XMLNode item,XMLNode element) { for(;(bool)element;++element) item.NewChild(element); } // Subject, Resource, Action, Context RequestItem in each component // goes into one separated output . // The Subject from all of the SecAttr goes into every output , // because there is only one request entity (with a few SubjectAttribute) //for the incoming message chain bool MessageAuth::Export(SecAttrFormat format,XMLNode &val) const { // Currently only ARCAuth is supported if(format != SecAttr::ARCAuth) return false; NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; XMLNode newreq = val; newreq.Namespaces(ns); newreq.Name("ra:Request"); //A specific RequestItem for collecting all of the from //differenc SecAttr XMLNode subjects(ns,"ra:RequestItem"); std::map::const_iterator attr = attrs_.begin(); for(;attr != attrs_.end();++attr) { XMLNode r(ns,""); if(!(attr->second)) return false; if(!(attr->second->Export(format,r))) return false; XMLNode item, newitem; for(item = r["RequestItem"];(bool)item;++item) { //If there ["Resource"] or ["Action"] inside input ["RequestItem"], we generate a new //["RequstItem"] for output. if(((bool)item) && ( ((bool)(item["Resource"])) || ((bool)(item["Action"])) )) newitem=newreq.NewChild("ra:RequestItem"); //Collect all of the s. Since in HED each incoming/outgoing message //is supposed to implicitly have only one meaningful entity(Subject), it doesn't hurt //if we put all of the s together and merge(later) them into one (with //a number of s from different original s). add_new_elements(subjects,item["Subject"]); // is also collected. add_new_elements(subjects,item["Context"]); //We store into the just generated new // by keeping the original organizing shape. //Notice that we do not put the into new now. if( ((bool)(item["Resource"])) || ((bool)(item["Action"])) ) { add_new_elements(newitem,item["Resource"]); add_new_elements(newitem,item["Action"]); add_new_elements(newitem,item["Context"]); }; }; }; //Reset the namespaces to delete the namespaces inside subnode subjects.Namespaces(ns); //Merge the s into one XMLNode new_subject(ns, "ra:Subject"); XMLNode sub = subjects["ra:Subject"]; for(sub = subjects["Subject"];(bool)sub;++sub) { add_new_elements(new_subject, sub["SubjectAttribute"]); } //Merge the s into one XMLNode new_context(ns, "ra:Context"); for(XMLNode ctx = subjects["ra:Context"];(bool)ctx;++ctx) { add_new_elements(new_context, ctx["ContextAttribute"]); } //If finally, there is not any output ["RequestItem"], we use the just generated new . //This is the case: There is MCCTLS SecAttr which has no elements except ["Subject"], and //there is no other SecAttr exist. // is also added XMLNode item = newreq["ra:RequestItem"]; if(!item) { XMLNode newitem = newreq.NewChild("ra:RequestItem"); add_new_elements(newitem, new_subject); add_new_elements(newitem, new_context); } //Put the new into each new for(;(bool)item;++item) { add_new_elements(item, new_subject); }; //Reset the namespaces to delete the namespace inside subnode. newreq.Namespaces(ns); return true; } #endif MessageAuth* MessageAuth::Filter(const std::list& selected_keys,const std::list& rejected_keys) { MessageAuth* newauth = new MessageAuth; newauth->attrs_created_=false; if(selected_keys.empty()) { newauth->attrs_=attrs_; } else { for(std::list::const_iterator key = selected_keys.begin(); key!=selected_keys.end();++key) { std::map::const_iterator attr = attrs_.find(*key); if((attr != attrs_.end()) && (attr->second != NULL)) newauth->attrs_[*key]=attr->second; }; }; if(!rejected_keys.empty()) { for(std::list::const_iterator key = rejected_keys.begin(); key!=rejected_keys.end();++key) { newauth->remove(*key); }; }; return newauth; } } nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MCC_Status.h0000644000000000000000000000012411167602504023221 xustar000000000000000027 mtime=1239352644.068852 27 atime=1513200574.824705 30 ctime=1513200659.010735387 nordugrid-arc-5.4.2/src/hed/libs/message/MCC_Status.h0000644000175000002070000000621111167602504023266 0ustar00mockbuildmock00000000000000// MCC_Status.h #ifndef __MCC_Status__ #define __MCC_Status__ #include namespace Arc { //! Status kinds (types) /*! This enum defines a set of possible status kinds. */ enum StatusKind { STATUS_UNDEFINED = 0, //! Default status - undefined error STATUS_OK = 1, //! No error GENERIC_ERROR = 2, //! Error does not fit any class PARSING_ERROR = 4, //! Error detected while parsing request/response PROTOCOL_RECOGNIZED_ERROR = 8, //! Message does not fit into expected protocol UNKNOWN_SERVICE_ERROR = 16, //! There is no destination configured for this message BUSY_ERROR = 32, //! Message can't be processed now SESSION_CLOSE = 64 //! Higher level protocol needs session to be closed }; //! Conversion to string. /*! Conversion from StatusKind to string. @param kind The StatusKind to convert. */ std::string string(StatusKind kind); //! A class for communication of MCC processing results /*! This class is used to communicate result status between MCCs. It contains a status kind, a string specifying the origin (MCC) of the status object and an explanation. */ class MCC_Status { public: //! The constructor /*! Creates a MCC_Status object. @param kind The StatusKind (default: STATUS_UNDEFINED) @param origin The origin MCC (default: "???") @param explanation An explanation (default: "No explanation.") */ MCC_Status(StatusKind kind = STATUS_UNDEFINED, const std::string& origin = "???", const std::string& explanation = "No explanation."); //! Is the status kind ok? /*! This method returns true if the status kind of this object is STATUS_OK @return true if kind==STATUS_OK */ bool isOk() const; //! Returns the status kind. /*! Returns the status kind of this object. @return The status kind of this object. */ StatusKind getKind() const; //! Returns the origin. /*! This method returns a string specifying the origin MCC of this object. @return A string specifying the origin MCC of this object. */ const std::string& getOrigin() const; //! Returns an explanation. /*! This method returns an explanation of this object. @return An explanation of this object. */ const std::string& getExplanation() const; //! Conversion to string. /*! This operator converts a MCC_Status object to a string. */ operator std::string() const; //! Is the status kind ok? /*! This method returns true if the status kind of this object is STATUS_OK @return true if kind==STATUS_OK */ operator bool(void) const { return isOk(); }; //! not operator /*! Returns true if the status kind is not OK @return true if kind!=STATUS_OK */ bool operator!(void) const { return !isOk(); }; private: //! The kind (type) of status. StatusKind kind; //! A string describing the origin MCC of this object. std::string origin; //! An explanation of this object. std::string explanation; }; } #endif nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/PayloadStream.h0000644000000000000000000000012412052517637024027 xustar000000000000000027 mtime=1353359263.521199 27 atime=1513200574.796705 30 ctime=1513200659.009735374 nordugrid-arc-5.4.2/src/hed/libs/message/PayloadStream.h0000644000175000002070000001013412052517637024073 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADSTREAM_H__ #define __ARC_PAYLOADSTREAM_H__ #include #include "Message.h" namespace Arc { /// Stream-like Payload for Message object. /** This class is a virtual interface for managing stream-like source and destination. It's supposed to be passed through MCC chain as payload of Message. It must be treated by MCCs and Services as dynamic payload. */ class PayloadStreamInterface: virtual public MessagePayload { public: // Avoid defining size of int - just use biggest possible typedef signed long long int Size_t; PayloadStreamInterface(void) { }; virtual ~PayloadStreamInterface(void) { }; /** Extracts information from stream up to 'size' bytes. 'size' contains number of read bytes on exit. Returns true in case of success. */ virtual bool Get(char* buf,int& size) = 0; /** Read as many as possible (sane amount) of bytes into buf. Implemented through call to Get(char*,int). */ virtual bool Get(std::string& buf); /** Read and return as many as possible (sane amount) of bytes. Implemented through call to Get(std::string&). */ virtual std::string Get(void); /** Read up to 'size' bytes and pass them to 'dest'. 'size' contains number of read bytes on exit. If on input 'size' contains -1 then as much as possible is transfered. This method is both for convenience and for making it possible to have optimized implementations. */ virtual bool Get(PayloadStreamInterface& dest,int& size); /** Push 'size' bytes from 'buf' into stream. Returns true on success. */ virtual bool Put(const char* buf,Size_t size) = 0; /** Push information from 'buf' into stream. Returns true on success. Implemented though call to Put(const char*,Size_t). */ virtual bool Put(const std::string& buf); /** Push null terminated information from 'buf' into stream. Returns true on success. Implemented though call to Put(const char*,Size_t). */ virtual bool Put(const char* buf); /** Push 'size' bytes from 'source' into stream. If on 'size' contains -1 then as much as possible is transfered. This method is both for convenience and for making it possible to have optimized implementations. */ virtual bool Put(PayloadStreamInterface& source,Size_t size); /** Returns true if stream is valid. */ virtual operator bool(void) = 0; /** Returns true if stream is invalid. */ virtual bool operator!(void) = 0; /** Query current timeout for Get() and Put() operations. */ virtual int Timeout(void) const = 0; /** Set current timeout for Get() and Put() operations. */ virtual void Timeout(int to) = 0; /** Returns current position in stream if supported. */ virtual Size_t Pos(void) const = 0; /** Returns size of underlying object if supported. */ virtual Size_t Size(void) const = 0; /** Returns position at which stream reading will stop if supported. That may be not same as Size() if instance is meant to provide access to only part of underlying obejct. */ virtual Size_t Limit(void) const = 0; }; /// POSIX handle as Payload /** This is an implemetation of PayloadStreamInterface for generic POSIX handle. */ class PayloadStream: virtual public PayloadStreamInterface { protected: int timeout_; /** Timeout for read/write operations */ int handle_; /** Handle for operations */ bool seekable_; /** true if lseek operation is applicable to open handle */ public: /** Constructor. Attaches to already open handle. Handle is not managed by this class and must be closed by external code. */ PayloadStream(int h = -1); /** Destructor. */ virtual ~PayloadStream(void) { }; virtual bool Get(char* buf,int& size); virtual bool Put(const char* buf,Size_t size); virtual operator bool(void) { return (handle_ != -1); }; virtual bool operator!(void) { return (handle_ == -1); }; virtual int Timeout(void) const { return timeout_; }; virtual void Timeout(int to) { timeout_=to; }; virtual Size_t Pos(void) const { return 0; }; virtual Size_t Size(void) const { return 0; }; virtual Size_t Limit(void) const { return 0; }; }; } #endif /* __ARC_PAYLOADSTREAM_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/PayloadSOAP.h0000644000000000000000000000012411174366545023342 xustar000000000000000027 mtime=1240591717.919273 27 atime=1513200574.799705 30 ctime=1513200659.008735362 nordugrid-arc-5.4.2/src/hed/libs/message/PayloadSOAP.h0000644000175000002070000000156411174366545023415 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADSOAP_H__ #define __ARC_PAYLOADSOAP_H__ #include "Message.h" #include "SOAPEnvelope.h" namespace Arc { /// Payload of Message with SOAP content. /** This class combines MessagePayload with SOAPEnvelope to make it possible to pass SOAP messages through MCC chain. */ class PayloadSOAP: public SOAPEnvelope, virtual public MessagePayload { public: /** Constructor - creates new Message payload */ PayloadSOAP(const NS& ns,bool fault = false); /** Constructor - creates Message payload from SOAP document. Provided SOAP document is copied to new object. */ PayloadSOAP(const SOAPEnvelope& soap); /** Constructor - creates SOAP message from payload. PayloadRawInterface and derived classes are supported. */ PayloadSOAP(const MessagePayload& source); virtual ~PayloadSOAP(void); }; } // namespace Arc #endif /* __ARC_PAYLOADSOAP_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/SecHandler.h0000644000000000000000000000012412110401544023252 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200574.826705 30 ctime=1513200659.017735472 nordugrid-arc-5.4.2/src/hed/libs/message/SecHandler.h0000644000175000002070000000530412110401544023321 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_SECHANDLER_H__ #define __ARC_SEC_SECHANDLER_H__ #include #include #include #include namespace Arc { class ChainContext; } namespace ArcSec { class SecHandlerStatus { public: enum { STATUS_ALLOW = 0, STATUS_DENY = 1 } Code; SecHandlerStatus(void); SecHandlerStatus(bool positive); SecHandlerStatus(int code); SecHandlerStatus(int code, const std::string& explanation); SecHandlerStatus(int code, const std::string& origin, const std::string& explanation); operator bool(void) const { return (code == 0); }; int getCode(void) const; const std::string& getOrigin(void) const; const std::string& getExplanation(void) const; operator std::string(void) const; private: //! The code status. 0 always stands for positive. int code; //! A string describing the origin SHC of this object. std::string origin; //! A user-friendly explanation of this object. std::string explanation; }; /// Base class for simple security handling plugins /** This virtual class defines method Handle() which processes security related information/attributes in Message and optionally makes security decision. Instances of such classes are normally arranged in chains abd are called on incoming and outgoing messages in various MCC and Service plugins. Return value of Handle() defines either processing should continie (true) or stop with error (false). Configuration of SecHandler is consumed during creation of instance through XML subtree fed to constructor. */ class SecHandler: public Arc::Plugin { public: SecHandler(Arc::Config*, Arc::PluginArgument* arg):Arc::Plugin(arg) {}; virtual ~SecHandler() {}; virtual SecHandlerStatus Handle(Arc::Message *msg) const = 0; protected: static Arc::Logger logger; }; #define SecHandlerPluginKind ("HED:SHC") class SecHandlerPluginArgument: public Arc::PluginArgument { private: Arc::Config* config_; Arc::ChainContext* context_; public: SecHandlerPluginArgument(Arc::Config* config,Arc::ChainContext* context):config_(config),context_(context) { }; virtual ~SecHandlerPluginArgument(void) { }; operator Arc::Config* (void) { return config_; }; operator Arc::ChainContext* (void) { return context_; }; }; /** Helper class to create Security Handler configuration */ class SecHandlerConfig: public Arc::XMLNode { public: SecHandlerConfig(const std::string& name,const std::string& event = "",const std::string& id = ""); }; } // namespace ArcSec #endif /* __ARC_SEC_SECHANDLER_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726023151 xustar000000000000000030 mtime=1513200598.262992418 30 atime=1513200648.146602514 30 ctime=1513200659.018735485 nordugrid-arc-5.4.2/src/hed/libs/message/Makefile.in0000644000175000002070000013753313214315726023233 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/message DIST_COMMON = README $(libarcmessage_la_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarcmessage_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcmessage_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libarcmessage_la_OBJECTS = libarcmessage_la-SOAPEnvelope.lo \ libarcmessage_la-PayloadRaw.lo libarcmessage_la-PayloadSOAP.lo \ libarcmessage_la-PayloadStream.lo \ libarcmessage_la-MCC_Status.lo libarcmessage_la-MCC.lo \ libarcmessage_la-Service.lo libarcmessage_la-Plexer.lo \ libarcmessage_la-MessageAttributes.lo \ libarcmessage_la-Message.lo libarcmessage_la-SOAPMessage.lo \ libarcmessage_la-MessageAuth.lo libarcmessage_la-SecAttr.lo \ libarcmessage_la-MCCLoader.lo libarcmessage_la-SecHandler.lo libarcmessage_la_OBJECTS = $(am_libarcmessage_la_OBJECTS) libarcmessage_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcmessage_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcmessage_la_SOURCES) DIST_SOURCES = $(libarcmessage_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive HEADERS = $(libarcmessage_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) lib_LTLIBRARIES = libarcmessage.la libarcmessage_ladir = $(pkgincludedir)/message libarcmessage_la_HEADERS = SOAPEnvelope.h PayloadRaw.h PayloadSOAP.h \ PayloadStream.h MCC_Status.h MCC.h Service.h Plexer.h \ MessageAttributes.h Message.h SOAPMessage.h MessageAuth.h \ SecAttr.h MCCLoader.h SecHandler.h libarcmessage_la_SOURCES = SOAPEnvelope.cpp PayloadRaw.cpp PayloadSOAP.cpp \ PayloadStream.cpp MCC_Status.cpp MCC.cpp Service.cpp Plexer.cpp \ MessageAttributes.cpp Message.cpp SOAPMessage.cpp MessageAuth.cpp \ SecAttr.cpp MCCLoader.cpp SecHandler.cpp libarcmessage_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcmessage_la_LIBADD = \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libarcmessage_la_LDFLAGS = -version-info 3:0:0 all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/message/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/message/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcmessage.la: $(libarcmessage_la_OBJECTS) $(libarcmessage_la_DEPENDENCIES) $(libarcmessage_la_LINK) -rpath $(libdir) $(libarcmessage_la_OBJECTS) $(libarcmessage_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-MCC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-MCCLoader.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-MCC_Status.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-Message.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-MessageAttributes.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-MessageAuth.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-PayloadRaw.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-PayloadSOAP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-PayloadStream.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-Plexer.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-SOAPEnvelope.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-SOAPMessage.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-SecAttr.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-SecHandler.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcmessage_la-Service.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcmessage_la-SOAPEnvelope.lo: SOAPEnvelope.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-SOAPEnvelope.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-SOAPEnvelope.Tpo -c -o libarcmessage_la-SOAPEnvelope.lo `test -f 'SOAPEnvelope.cpp' || echo '$(srcdir)/'`SOAPEnvelope.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-SOAPEnvelope.Tpo $(DEPDIR)/libarcmessage_la-SOAPEnvelope.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SOAPEnvelope.cpp' object='libarcmessage_la-SOAPEnvelope.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-SOAPEnvelope.lo `test -f 'SOAPEnvelope.cpp' || echo '$(srcdir)/'`SOAPEnvelope.cpp libarcmessage_la-PayloadRaw.lo: PayloadRaw.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-PayloadRaw.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-PayloadRaw.Tpo -c -o libarcmessage_la-PayloadRaw.lo `test -f 'PayloadRaw.cpp' || echo '$(srcdir)/'`PayloadRaw.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-PayloadRaw.Tpo $(DEPDIR)/libarcmessage_la-PayloadRaw.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PayloadRaw.cpp' object='libarcmessage_la-PayloadRaw.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-PayloadRaw.lo `test -f 'PayloadRaw.cpp' || echo '$(srcdir)/'`PayloadRaw.cpp libarcmessage_la-PayloadSOAP.lo: PayloadSOAP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-PayloadSOAP.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-PayloadSOAP.Tpo -c -o libarcmessage_la-PayloadSOAP.lo `test -f 'PayloadSOAP.cpp' || echo '$(srcdir)/'`PayloadSOAP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-PayloadSOAP.Tpo $(DEPDIR)/libarcmessage_la-PayloadSOAP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PayloadSOAP.cpp' object='libarcmessage_la-PayloadSOAP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-PayloadSOAP.lo `test -f 'PayloadSOAP.cpp' || echo '$(srcdir)/'`PayloadSOAP.cpp libarcmessage_la-PayloadStream.lo: PayloadStream.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-PayloadStream.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-PayloadStream.Tpo -c -o libarcmessage_la-PayloadStream.lo `test -f 'PayloadStream.cpp' || echo '$(srcdir)/'`PayloadStream.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-PayloadStream.Tpo $(DEPDIR)/libarcmessage_la-PayloadStream.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PayloadStream.cpp' object='libarcmessage_la-PayloadStream.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-PayloadStream.lo `test -f 'PayloadStream.cpp' || echo '$(srcdir)/'`PayloadStream.cpp libarcmessage_la-MCC_Status.lo: MCC_Status.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-MCC_Status.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-MCC_Status.Tpo -c -o libarcmessage_la-MCC_Status.lo `test -f 'MCC_Status.cpp' || echo '$(srcdir)/'`MCC_Status.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-MCC_Status.Tpo $(DEPDIR)/libarcmessage_la-MCC_Status.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MCC_Status.cpp' object='libarcmessage_la-MCC_Status.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-MCC_Status.lo `test -f 'MCC_Status.cpp' || echo '$(srcdir)/'`MCC_Status.cpp libarcmessage_la-MCC.lo: MCC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-MCC.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-MCC.Tpo -c -o libarcmessage_la-MCC.lo `test -f 'MCC.cpp' || echo '$(srcdir)/'`MCC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-MCC.Tpo $(DEPDIR)/libarcmessage_la-MCC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MCC.cpp' object='libarcmessage_la-MCC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-MCC.lo `test -f 'MCC.cpp' || echo '$(srcdir)/'`MCC.cpp libarcmessage_la-Service.lo: Service.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-Service.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-Service.Tpo -c -o libarcmessage_la-Service.lo `test -f 'Service.cpp' || echo '$(srcdir)/'`Service.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-Service.Tpo $(DEPDIR)/libarcmessage_la-Service.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Service.cpp' object='libarcmessage_la-Service.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-Service.lo `test -f 'Service.cpp' || echo '$(srcdir)/'`Service.cpp libarcmessage_la-Plexer.lo: Plexer.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-Plexer.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-Plexer.Tpo -c -o libarcmessage_la-Plexer.lo `test -f 'Plexer.cpp' || echo '$(srcdir)/'`Plexer.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-Plexer.Tpo $(DEPDIR)/libarcmessage_la-Plexer.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Plexer.cpp' object='libarcmessage_la-Plexer.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-Plexer.lo `test -f 'Plexer.cpp' || echo '$(srcdir)/'`Plexer.cpp libarcmessage_la-MessageAttributes.lo: MessageAttributes.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-MessageAttributes.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-MessageAttributes.Tpo -c -o libarcmessage_la-MessageAttributes.lo `test -f 'MessageAttributes.cpp' || echo '$(srcdir)/'`MessageAttributes.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-MessageAttributes.Tpo $(DEPDIR)/libarcmessage_la-MessageAttributes.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MessageAttributes.cpp' object='libarcmessage_la-MessageAttributes.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-MessageAttributes.lo `test -f 'MessageAttributes.cpp' || echo '$(srcdir)/'`MessageAttributes.cpp libarcmessage_la-Message.lo: Message.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-Message.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-Message.Tpo -c -o libarcmessage_la-Message.lo `test -f 'Message.cpp' || echo '$(srcdir)/'`Message.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-Message.Tpo $(DEPDIR)/libarcmessage_la-Message.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Message.cpp' object='libarcmessage_la-Message.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-Message.lo `test -f 'Message.cpp' || echo '$(srcdir)/'`Message.cpp libarcmessage_la-SOAPMessage.lo: SOAPMessage.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-SOAPMessage.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-SOAPMessage.Tpo -c -o libarcmessage_la-SOAPMessage.lo `test -f 'SOAPMessage.cpp' || echo '$(srcdir)/'`SOAPMessage.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-SOAPMessage.Tpo $(DEPDIR)/libarcmessage_la-SOAPMessage.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SOAPMessage.cpp' object='libarcmessage_la-SOAPMessage.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-SOAPMessage.lo `test -f 'SOAPMessage.cpp' || echo '$(srcdir)/'`SOAPMessage.cpp libarcmessage_la-MessageAuth.lo: MessageAuth.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-MessageAuth.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-MessageAuth.Tpo -c -o libarcmessage_la-MessageAuth.lo `test -f 'MessageAuth.cpp' || echo '$(srcdir)/'`MessageAuth.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-MessageAuth.Tpo $(DEPDIR)/libarcmessage_la-MessageAuth.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MessageAuth.cpp' object='libarcmessage_la-MessageAuth.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-MessageAuth.lo `test -f 'MessageAuth.cpp' || echo '$(srcdir)/'`MessageAuth.cpp libarcmessage_la-SecAttr.lo: SecAttr.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-SecAttr.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-SecAttr.Tpo -c -o libarcmessage_la-SecAttr.lo `test -f 'SecAttr.cpp' || echo '$(srcdir)/'`SecAttr.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-SecAttr.Tpo $(DEPDIR)/libarcmessage_la-SecAttr.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SecAttr.cpp' object='libarcmessage_la-SecAttr.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-SecAttr.lo `test -f 'SecAttr.cpp' || echo '$(srcdir)/'`SecAttr.cpp libarcmessage_la-MCCLoader.lo: MCCLoader.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-MCCLoader.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-MCCLoader.Tpo -c -o libarcmessage_la-MCCLoader.lo `test -f 'MCCLoader.cpp' || echo '$(srcdir)/'`MCCLoader.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-MCCLoader.Tpo $(DEPDIR)/libarcmessage_la-MCCLoader.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MCCLoader.cpp' object='libarcmessage_la-MCCLoader.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-MCCLoader.lo `test -f 'MCCLoader.cpp' || echo '$(srcdir)/'`MCCLoader.cpp libarcmessage_la-SecHandler.lo: SecHandler.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -MT libarcmessage_la-SecHandler.lo -MD -MP -MF $(DEPDIR)/libarcmessage_la-SecHandler.Tpo -c -o libarcmessage_la-SecHandler.lo `test -f 'SecHandler.cpp' || echo '$(srcdir)/'`SecHandler.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcmessage_la-SecHandler.Tpo $(DEPDIR)/libarcmessage_la-SecHandler.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SecHandler.cpp' object='libarcmessage_la-SecHandler.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcmessage_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcmessage_la-SecHandler.lo `test -f 'SecHandler.cpp' || echo '$(srcdir)/'`SecHandler.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcmessage_laHEADERS: $(libarcmessage_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcmessage_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcmessage_ladir)" @list='$(libarcmessage_la_HEADERS)'; test -n "$(libarcmessage_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcmessage_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcmessage_ladir)" || exit $$?; \ done uninstall-libarcmessage_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcmessage_la_HEADERS)'; test -n "$(libarcmessage_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcmessage_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcmessage_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarcmessage_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcmessage_laHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarcmessage_laHEADERS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool ctags ctags-recursive \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarcmessage_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-libLTLIBRARIES uninstall-libarcmessage_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/Plexer.h0000644000000000000000000000012412135423131022504 xustar000000000000000027 mtime=1366697561.553206 27 atime=1513200574.793705 30 ctime=1513200659.013735423 nordugrid-arc-5.4.2/src/hed/libs/message/Plexer.h0000644000175000002070000000466012135423131022557 0ustar00mockbuildmock00000000000000// MCCPlexer.h #ifndef __ARC_MCC_PLEXER__ #define __ARC_MCC_PLEXER__ #include #include #include #include #include #include namespace Arc { //! A pair of label (regex) and pointer to MCC. /*! A helper class that stores a label (regex) and a pointer to a service. */ class PlexerEntry { private: //! Constructor. /*! Constructs a PlexerEntry and initializes its attributes. */ PlexerEntry(const RegularExpression& label, MCCInterface* service); RegularExpression label; MCCInterface* mcc; friend class Plexer; }; //! The Plexer class, used for routing messages to services. /*! This is the Plexer class. Its purpose is to route incoming messages to appropriate Services and MCC chains. */ class Plexer: public MCC { public: //! The constructor. /*! This is the constructor. Since all member variables are instances of "well-behaving" STL classes, nothing needs to be done. */ Plexer(Config *cfg, PluginArgument* arg); //! The destructor. /*! This is the destructor. Since all member variables are instances of "well-behaving" STL classes, nothing needs to be done. */ virtual ~Plexer(); //! Add reference to next MCC in chain. /*! This method is called by Loader for every potentially labeled link to next component which implements MCCInterface. If next is set NULL corresponding link is removed. */ virtual void Next(MCCInterface* next, const std::string& label); //! Route request messages to appropriate services. /*! Routes the request message to the appropriate service. Routing is based on the path part of value of the ENDPOINT attribute. Routed message is assigned following attributes: PLEXER:PATTERN - matched pattern, PLEXER:EXTENSION - last unmatched part of ENDPOINT path. */ virtual MCC_Status process(Message& request, Message& response); /* protected: XXX: workaround because the python segmentation fault */ static Logger logger; private: //! Extracts the path part of an URL. static std::string getPath(std::string url); //! The map of next MCCs. /*! This is a map that maps labels (regex expressions) to next elements with MCC interface. It is used for routing messages. */ std::list mccs; }; } #endif nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MessageAuth.h0000644000000000000000000000012412044527530023462 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200574.824705 30 ctime=1513200659.015735448 nordugrid-arc-5.4.2/src/hed/libs/message/MessageAuth.h0000644000175000002070000000362212044527530023532 0ustar00mockbuildmock00000000000000#ifndef __ARC_MESSAGEAUTH_H__ #define __ARC_MESSAGEAUTH_H__ #include #include #include namespace Arc { /// Contains authencity information, authorization tokens and decisions. /** This class only supports string keys and SecAttr values. */ class MessageAuth { private: std::map attrs_; bool attrs_created_; MessageAuth(const MessageAuth&); public: MessageAuth(void); ~MessageAuth(void); /// Adds/overwrites security attribute stored under specified key void set(const std::string& key, SecAttr* value); /// Deletes security attribute stored under specified key void remove(const std::string& key); /// Retrieves reference to security attribute stored under specified key SecAttr* get(const std::string& key); /// Same as MessageAuth::get SecAttr* operator[](const std::string& key) { return get(key); }; /// Returns properly catenated attributes in specified format /** Content of XML node at @val is replaced with generated information if XML tree is empty. If tree at @val is not empty then Export() tries to merge generated information to already existing like everything would be generated inside same Export() method. If @val does not represent valid node then new XML tree is created. */ bool Export(SecAttrFormat format,XMLNode &val) const; /// Creates new instance of MessageAuth with attributes filtered /** In new instance all attributes with keys listed in @rejected_keys are removed. If @selected_keys is not empty only corresponding attributes are transferred to new instance. Created instance does not own refered attributes. Hence parent instance must not be deleted as long as this one is in use. */ MessageAuth* Filter(const std::list& selected_keys,const std::list& rejected_keys); }; } #endif nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/test0000644000000000000000000000013213214316023021775 xustar000000000000000030 mtime=1513200659.059735986 30 atime=1513200668.721854157 30 ctime=1513200659.059735986 nordugrid-arc-5.4.2/src/hed/libs/message/test/0000755000175000002070000000000013214316023022120 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/message/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024120 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200598.277992601 30 ctime=1513200659.054735925 nordugrid-arc-5.4.2/src/hed/libs/message/test/Makefile.am0000644000175000002070000000243712052416515024170 0ustar00mockbuildmock00000000000000TESTS = ChainTest check_LTLIBRARIES = libtestmcc.la libtestservice.la check_PROGRAMS = $(TESTS) libtestmcc_la_SOURCES = TestMCC.cpp libtestmcc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libtestmcc_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libtestmcc_la_LDFLAGS = -no-undefined -avoid-version -module -rpath $(CURDIR) libtestservice_la_SOURCES = TestService.cpp libtestservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libtestservice_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libtestservice_la_LDFLAGS = -no-undefined -avoid-version -module -rpath $(CURDIR) ChainTest_SOURCES = $(top_srcdir)/src/Test.cpp ChainTest.cpp ChainTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ChainTest_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) nordugrid-arc-5.4.2/src/hed/libs/message/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315726024130 xustar000000000000000030 mtime=1513200598.331993262 30 atime=1513200648.161602697 30 ctime=1513200659.056735949 nordugrid-arc-5.4.2/src/hed/libs/message/test/Makefile.in0000644000175000002070000010107513214315726024202 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = ChainTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/libs/message/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__DEPENDENCIES_1 = libtestmcc_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libtestmcc_la_OBJECTS = libtestmcc_la-TestMCC.lo libtestmcc_la_OBJECTS = $(am_libtestmcc_la_OBJECTS) libtestmcc_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libtestmcc_la_CXXFLAGS) \ $(CXXFLAGS) $(libtestmcc_la_LDFLAGS) $(LDFLAGS) -o $@ libtestservice_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libtestservice_la_OBJECTS = libtestservice_la-TestService.lo libtestservice_la_OBJECTS = $(am_libtestservice_la_OBJECTS) libtestservice_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libtestservice_la_CXXFLAGS) $(CXXFLAGS) \ $(libtestservice_la_LDFLAGS) $(LDFLAGS) -o $@ am__EXEEXT_1 = ChainTest$(EXEEXT) am_ChainTest_OBJECTS = ChainTest-Test.$(OBJEXT) \ ChainTest-ChainTest.$(OBJEXT) ChainTest_OBJECTS = $(am_ChainTest_OBJECTS) ChainTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) ChainTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(ChainTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libtestmcc_la_SOURCES) $(libtestservice_la_SOURCES) \ $(ChainTest_SOURCES) DIST_SOURCES = $(libtestmcc_la_SOURCES) $(libtestservice_la_SOURCES) \ $(ChainTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ check_LTLIBRARIES = libtestmcc.la libtestservice.la libtestmcc_la_SOURCES = TestMCC.cpp libtestmcc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libtestmcc_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libtestmcc_la_LDFLAGS = -no-undefined -avoid-version -module -rpath $(CURDIR) libtestservice_la_SOURCES = TestService.cpp libtestservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libtestservice_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libtestservice_la_LDFLAGS = -no-undefined -avoid-version -module -rpath $(CURDIR) ChainTest_SOURCES = $(top_srcdir)/src/Test.cpp ChainTest.cpp ChainTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ChainTest_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/message/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/message/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkLTLIBRARIES: -test -z "$(check_LTLIBRARIES)" || rm -f $(check_LTLIBRARIES) @list='$(check_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libtestmcc.la: $(libtestmcc_la_OBJECTS) $(libtestmcc_la_DEPENDENCIES) $(libtestmcc_la_LINK) $(libtestmcc_la_OBJECTS) $(libtestmcc_la_LIBADD) $(LIBS) libtestservice.la: $(libtestservice_la_OBJECTS) $(libtestservice_la_DEPENDENCIES) $(libtestservice_la_LINK) $(libtestservice_la_OBJECTS) $(libtestservice_la_LIBADD) $(LIBS) clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list ChainTest$(EXEEXT): $(ChainTest_OBJECTS) $(ChainTest_DEPENDENCIES) @rm -f ChainTest$(EXEEXT) $(ChainTest_LINK) $(ChainTest_OBJECTS) $(ChainTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ChainTest-ChainTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ChainTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libtestmcc_la-TestMCC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libtestservice_la-TestService.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libtestmcc_la-TestMCC.lo: TestMCC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libtestmcc_la_CXXFLAGS) $(CXXFLAGS) -MT libtestmcc_la-TestMCC.lo -MD -MP -MF $(DEPDIR)/libtestmcc_la-TestMCC.Tpo -c -o libtestmcc_la-TestMCC.lo `test -f 'TestMCC.cpp' || echo '$(srcdir)/'`TestMCC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libtestmcc_la-TestMCC.Tpo $(DEPDIR)/libtestmcc_la-TestMCC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TestMCC.cpp' object='libtestmcc_la-TestMCC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libtestmcc_la_CXXFLAGS) $(CXXFLAGS) -c -o libtestmcc_la-TestMCC.lo `test -f 'TestMCC.cpp' || echo '$(srcdir)/'`TestMCC.cpp libtestservice_la-TestService.lo: TestService.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libtestservice_la_CXXFLAGS) $(CXXFLAGS) -MT libtestservice_la-TestService.lo -MD -MP -MF $(DEPDIR)/libtestservice_la-TestService.Tpo -c -o libtestservice_la-TestService.lo `test -f 'TestService.cpp' || echo '$(srcdir)/'`TestService.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libtestservice_la-TestService.Tpo $(DEPDIR)/libtestservice_la-TestService.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TestService.cpp' object='libtestservice_la-TestService.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libtestservice_la_CXXFLAGS) $(CXXFLAGS) -c -o libtestservice_la-TestService.lo `test -f 'TestService.cpp' || echo '$(srcdir)/'`TestService.cpp ChainTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ChainTest_CXXFLAGS) $(CXXFLAGS) -MT ChainTest-Test.o -MD -MP -MF $(DEPDIR)/ChainTest-Test.Tpo -c -o ChainTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ChainTest-Test.Tpo $(DEPDIR)/ChainTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ChainTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ChainTest_CXXFLAGS) $(CXXFLAGS) -c -o ChainTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ChainTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ChainTest_CXXFLAGS) $(CXXFLAGS) -MT ChainTest-Test.obj -MD -MP -MF $(DEPDIR)/ChainTest-Test.Tpo -c -o ChainTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ChainTest-Test.Tpo $(DEPDIR)/ChainTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ChainTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ChainTest_CXXFLAGS) $(CXXFLAGS) -c -o ChainTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ChainTest-ChainTest.o: ChainTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ChainTest_CXXFLAGS) $(CXXFLAGS) -MT ChainTest-ChainTest.o -MD -MP -MF $(DEPDIR)/ChainTest-ChainTest.Tpo -c -o ChainTest-ChainTest.o `test -f 'ChainTest.cpp' || echo '$(srcdir)/'`ChainTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ChainTest-ChainTest.Tpo $(DEPDIR)/ChainTest-ChainTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ChainTest.cpp' object='ChainTest-ChainTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ChainTest_CXXFLAGS) $(CXXFLAGS) -c -o ChainTest-ChainTest.o `test -f 'ChainTest.cpp' || echo '$(srcdir)/'`ChainTest.cpp ChainTest-ChainTest.obj: ChainTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ChainTest_CXXFLAGS) $(CXXFLAGS) -MT ChainTest-ChainTest.obj -MD -MP -MF $(DEPDIR)/ChainTest-ChainTest.Tpo -c -o ChainTest-ChainTest.obj `if test -f 'ChainTest.cpp'; then $(CYGPATH_W) 'ChainTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ChainTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ChainTest-ChainTest.Tpo $(DEPDIR)/ChainTest-ChainTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ChainTest.cpp' object='ChainTest-ChainTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ChainTest_CXXFLAGS) $(CXXFLAGS) -c -o ChainTest-ChainTest.obj `if test -f 'ChainTest.cpp'; then $(CYGPATH_W) 'ChainTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ChainTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_LTLIBRARIES) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkLTLIBRARIES clean-checkPROGRAMS clean-generic \ clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkLTLIBRARIES clean-checkPROGRAMS clean-generic \ clean-libtool ctags distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/message/test/PaxHeaders.7502/ChainTest.cpp0000644000000000000000000000012311741501077024450 xustar000000000000000026 mtime=1334215231.35079 27 atime=1513200574.801705 30 ctime=1513200659.059735986 nordugrid-arc-5.4.2/src/hed/libs/message/test/ChainTest.cpp0000644000175000002070000000263711741501077024526 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include class ChainTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ChainTest); CPPUNIT_TEST(TestPlugin); CPPUNIT_TEST_SUITE_END(); public: void TestPlugin(); }; class ChainTestLoader: Arc::Loader { public: ChainTestLoader(Arc::XMLNode cfg):Arc::Loader(cfg) { }; Arc::PluginsFactory* factory(void) { return factory_; }; }; void ChainTest::TestPlugin() { std::string config_xml("\ \n\ \n\ \n\ .libs/\n\ \n\ \n\ testmcc\n\ testservice\n\ \n\ \n\ \n\ \n\ \n\ \n\ \n\ \n\ \n\ /service1\n\ /service2\n\ \n\ \n\ \n\ \n\ \n\ \n\ "); Arc::Config cfg(config_xml); Arc::MCCLoader loader(cfg); CPPUNIT_ASSERT(loader); } CPPUNIT_TEST_SUITE_REGISTRATION(ChainTest); nordugrid-arc-5.4.2/src/hed/libs/message/test/PaxHeaders.7502/TestMCC.cpp0000644000000000000000000000012412675602216024035 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.801705 30 ctime=1513200659.057735961 nordugrid-arc-5.4.2/src/hed/libs/message/test/TestMCC.cpp0000644000175000002070000000213012675602216024076 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include /* #include #include #include #include #include #include "PayloadHTTP.h" #include "MCCHTTP.h" */ class TestMCC: public Arc::MCC { public: TestMCC(Arc::Config *cfg, Arc::PluginArgument *parg); virtual ~TestMCC(void); virtual Arc::MCC_Status process(Arc::Message&,Arc::Message&); }; TestMCC::TestMCC(Arc::Config* cfg, Arc::PluginArgument *parg):Arc::MCC(cfg,parg) { } TestMCC::~TestMCC() { } Arc::MCC_Status TestMCC::process(Arc::Message&,Arc::Message&) { return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::Plugin* get_mcc(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new TestMCC((Arc::Config*)(*mccarg),arg); } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "testmcc", "HED:MCC", NULL, 0, &get_mcc }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/libs/message/test/PaxHeaders.7502/TestService.cpp0000644000000000000000000000012412675602216025033 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.801705 30 ctime=1513200659.058735974 nordugrid-arc-5.4.2/src/hed/libs/message/test/TestService.cpp0000644000175000002070000000173612675602216025107 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include class TestService: public Arc::Service { public: TestService(Arc::Config *cfg, Arc::PluginArgument *parg); virtual ~TestService(void); virtual Arc::MCC_Status process(Arc::Message&,Arc::Message&); }; TestService::TestService(Arc::Config* cfg, Arc::PluginArgument *parg):Arc::Service(cfg,parg) { } TestService::~TestService() { } Arc::MCC_Status TestService::process(Arc::Message&,Arc::Message&) { return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::Plugin* get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* servicearg = arg?dynamic_cast(arg):NULL; if(!servicearg) return NULL; return new TestService((Arc::Config*)(*servicearg),arg); } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "testservice", "HED:SERVICE", NULL, 0, &get_service }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/SOAPMessage.cpp0000644000000000000000000000012411174366545023670 xustar000000000000000027 mtime=1240591717.919273 27 atime=1513200574.823705 30 ctime=1513200659.027735595 nordugrid-arc-5.4.2/src/hed/libs/message/SOAPMessage.cpp0000644000175000002070000000177411174366545023746 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "SOAPMessage.h" namespace Arc { SOAPMessage::SOAPMessage(long msg_ptr_addr):payload_(NULL) { SOAPMessage *msg = (SOAPMessage *)msg_ptr_addr; auth_ = msg->Auth(); attributes_ = msg->Attributes(); context_ = msg->Context(); Payload(msg->Payload()); } SOAPMessage::SOAPMessage(Message& msg):payload_(NULL) { auth_ = msg.Auth(); attributes_ = msg.Attributes(); context_ = msg.Context(); Payload(dynamic_cast(msg.Payload())); } SOAPEnvelope* SOAPMessage::Payload(void) { return payload_; } /* This class is intended to be used in language binding. So to make it's usage safe pointers are not used directly. Instead copy of pointed object is created. */ void SOAPMessage::Payload(SOAPEnvelope* new_payload) { SOAPEnvelope* p = payload_; payload_=new_payload?new_payload->New():NULL; if(p) delete p; } SOAPMessage::~SOAPMessage(void) { if(payload_) delete payload_; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/SecAttr.h0000644000000000000000000000012213103157414022614 xustar000000000000000026 mtime=1494015756.80239 27 atime=1513200574.796705 29 ctime=1513200659.01673546 nordugrid-arc-5.4.2/src/hed/libs/message/SecAttr.h0000644000175000002070000001272013103157414022665 0ustar00mockbuildmock00000000000000#ifndef __ARC_SECATTR__ #define __ARC_SECATTR__ #include #include #include #include namespace Arc { /// Export/import format. /** Format is identified by textual identity string. Class description includes basic formats only. That list may be extended. */ class SecAttrFormat { private: const char* format_; public: inline SecAttrFormat(const SecAttrFormat& format):format_(format.format_) {}; inline SecAttrFormat(const char* format = ""):format_(format) {}; inline SecAttrFormat& operator=(SecAttrFormat format) { format_=format.format_; return *this; }; inline SecAttrFormat& operator=(const char* format) { format_=format; return *this; }; inline bool operator==(SecAttrFormat format) { return (strcmp(format_,format.format_) == 0); }; inline bool operator==(const char* format) { return (strcmp(format_,format) == 0); }; inline bool operator!=(SecAttrFormat format) { return (strcmp(format_,format.format_) != 0); }; inline bool operator!=(const char* format) { return (strcmp(format_,format) != 0); }; }; /// This is an abstract interface to a security attribute /** This class is meant to be inherited to implement security attributes. Depending on what data it needs to store inheriting classes may need to implement constructor and destructor. They must however override the equality and the boolean operators. The equality is meant to compare security attributes. The prototype implies that all attributes are comparable to all others. This behaviour should be modified as needed by using dynamic_cast operations. The boolean cast operation is meant to embody "nullness" if that is applicable to the particular type. */ class SecAttr { public: static SecAttrFormat UNDEFINED; /// own serialization/deserialization format static SecAttrFormat ARCAuth; /// representation for ARC authorization policy static SecAttrFormat XACML; /// represenation for XACML policy static SecAttrFormat SAML; /// suitable for inclusion into SAML structures static SecAttrFormat GACL; /// representation for GACL policy SecAttr() {}; virtual ~SecAttr() {}; /** This function should (in inheriting classes) return true if this and b are considered to represent same content. Identifying and restricting the type of b should be done using dynamic_cast operations. Currently it is not defined how comparison methods to be used. Hence their implementation is not required. */ bool operator==(const SecAttr &b) const { return equal(b); }; /** This is a convenience function to allow the usage of "not equal" conditions and need not be overridden.*/ bool operator!=(const SecAttr &b) const { return !equal(b); }; /** This function should return false if the value is to be considered null, e.g. if it hasn't been set or initialized. In other cases it should return true.*/ virtual operator bool() const; /** Convert internal structure into specified format. Returns false if format is not supported/suitable for this attribute. */ virtual bool Export(SecAttrFormat format,std::string &val) const; /** Convert internal structure into specified format. Returns false if format is not supported/suitable for this attribute. XML node referenced by @val is turned into top level element of specified format. */ virtual bool Export(SecAttrFormat format,XMLNode &val) const; /** Fills internal structure from external object of specified format. Returns false if failed to do. The usage pattern for this method is not defined and it is provided only to make class symmetric. Hence it's implementation is not required yet. */ virtual bool Import(SecAttrFormat format,const std::string &val); virtual bool Import(SecAttrFormat format,XMLNode val); /** Access to specific item of the security attribute. If there are few items of same id the first one is presented. It is meant to be used for tightly coupled SecHandlers and provides more effective interface than Export. */ virtual std::string get(const std::string& id) const; /** Access to specific items of the security attribute. This method returns all items which have id assigned. It is meant to be used for tightly coupled SecHandlers and provides more effective interface than Export. */ virtual std::list getAll(const std::string& id) const; protected: virtual bool equal(const SecAttr &b) const; }; /// Container of multiple SecAttr attributes /** This class combines multiple attributes. It's export/import methods catenate results of underlying objects. Primary meaning of this class is to serve as base for classes implementing multi level hierarchical tree-like descriptions of user identity. It may also be used for collecting information of same source or kind. Like all information extracted from X509 certificate. */ class MultiSecAttr: public SecAttr { public: MultiSecAttr() {}; virtual ~MultiSecAttr(); virtual operator bool() const; virtual bool Export(SecAttrFormat format,XMLNode &val) const; virtual bool Import(SecAttrFormat format,XMLNode val); protected: std::list attrs_; virtual bool equal(const SecAttr &b) const; virtual bool Add(SecAttrFormat format,XMLNode &val); }; } #endif nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/Message.h0000644000000000000000000000012412574532370022646 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200574.831705 30 ctime=1513200659.014735435 nordugrid-arc-5.4.2/src/hed/libs/message/Message.h0000644000175000002070000001744012574532370022721 0ustar00mockbuildmock00000000000000#ifndef __ARC_MESSAGE_H__ #define __ARC_MESSAGE_H__ #include #include "MessageAttributes.h" #include "MessageAuth.h" #include "MCC_Status.h" namespace Arc { /// Base class for content of message passed through chain. /** It's not intended to be used directly. Instead functional classes must be derived from it. */ class MessagePayload { protected: MCC_Status failure_; public: MessagePayload(void); virtual ~MessagePayload(void); /// Returns last failure description MCC_Status Failure(void); }; /// Top class for elements contained in message context. /** Objects of classes inherited with this one may be stored in MessageContext container. */ class MessageContextElement { public: MessageContextElement(void) { }; virtual ~MessageContextElement(void) { }; }; /// Handler for content of message context. /** This class is a container for objects derived from MessageContextElement. It gets associated with Message object usually by first MCC in a chain and is kept as long as connection persists. */ class MessageContext { private: std::map elements_; public: MessageContext(void); ~MessageContext(void); /** Provided element is taken over by this class. It is remembered by it and destroyed when this class is destroyed. */ void Add(const std::string& name,MessageContextElement* element); MessageContextElement* operator[](const std::string& id); }; /// Handler for content of message auth* context. /** This class is a container for authorization and authentication information. It gets associated with Message object usually by first MCC in a chain and is kept as long as connection persists. */ class MessageAuthContext: public MessageAuth { public: MessageAuthContext(void) { }; ~MessageAuthContext(void) { }; }; /// Object being passed through chain of MCCs. /** An instance of this class refers to objects with main content (MessagePayload), authentication/authorization information (MessageAuth) and common purpose attributes (MessageAttributes). Message class does not manage pointers to objects and their content. It only serves for grouping those objects. Message objects are supposed to be processed by MCCs and Services implementing MCCInterface method process(). All objects constituting content of Message object are subject to following policies: 1. All objects created inside call to process() method using new command must be explicitly destroyed within same call using delete command with following exceptions. a) Objects which are assigned to 'response' Message. b) Objects whose management is completely acquired by objects assigned to 'response' Message. 2. All objects not created inside call to process() method are not explicitly destroyed within that call with following exception. a) Objects which are part of 'response' Method returned from call to next's process() method. Unless those objects are passed further to calling process(), of course. 3. It is not allowed to make 'response' point to same objects as 'request' does on entry to process() method. That is needed to avoid double destruction of same object. (Note: if in a future such need arises it may be solved by storing additional flags in Message object). 4. It is allowed to change content of pointers of 'request' Message. Calling process() method must not rely on that object to stay intact. 5. Called process() method should either fill 'response' Message with pointers to valid objects or to keep them intact. This makes it possible for calling process() to preload 'response' with valid error message. */ class Message { private: MessagePayload* payload_; /** Main content of message */ MessageAuth* auth_; /** Authentication and authorization related information */ bool auth_created_; /** true if auth_ was created internally */ MessageAttributes* attr_; /** Various useful attributes */ bool attr_created_; /** true if attr_ was created internally */ /** This element is maintained by MCC/element which handles/knows persistency of connection/session. It must be created and destroyed by that element. This object must survive during whole connectivity session - whatever that means. This is a place for MCCs and services to store information related to connection. All the other objects are only guaranteed to stay during single request. */ MessageContext* ctx_; bool ctx_created_; /** true if ctx_ was created internally */ MessageAuthContext* auth_ctx_; bool auth_ctx_created_; /** true if auth_ctx_ was created internally */ public: /** Dummy constructor */ Message(void):payload_(NULL),auth_(NULL),auth_created_(false),attr_(NULL),attr_created_(false),ctx_(NULL),ctx_created_(false),auth_ctx_(NULL),auth_ctx_created_(false) { }; /** Copy constructor. Ensures shallow copy. */ Message(Message& msg):payload_(msg.payload_),auth_(msg.auth_),auth_created_(false),attr_(msg.attr_),attr_created_(false),ctx_(msg.ctx_),ctx_created_(false),auth_ctx_(msg.auth_ctx_),auth_ctx_created_(false) { }; /** Copy constructor. Used by language bindigs */ Message(long msg_ptr_addr); /** Destructor does not affect refered objects except those created internally */ ~Message(void) { if(attr_created_) delete attr_; if(auth_created_) delete auth_; if(ctx_created_) delete ctx_; if(auth_ctx_created_) delete auth_ctx_; }; /** Assignment. Ensures shallow copy. */ Message& operator=(Message& msg) { payload_=msg.payload_; if(msg.auth_) Auth(msg.auth_); if(msg.attr_) Attributes(msg.attr_); if(msg.ctx_) Context(msg.ctx_); if(msg.auth_ctx_) AuthContext(msg.auth_ctx_); return *this; }; /** Returns pointer to current payload or NULL if no payload assigned. */ MessagePayload* Payload(void) { return payload_; }; /** Replaces payload with new one. Returns the old one. */ MessagePayload* Payload(MessagePayload* payload) { MessagePayload* p = payload_; payload_=payload; return p; }; /** Returns a pointer to the current attributes object or creates it if no attributes object has been assigned. */ MessageAttributes* Attributes(void) { if(attr_ == NULL) { attr_created_=true; attr_=new MessageAttributes; }; return attr_; }; void Attributes(MessageAttributes* attr) { if(attr_created_) { attr_created_=false; delete attr_; }; attr_=attr; }; /** Returns a pointer to the current authentication/authorization object or creates it if no object has been assigned. */ MessageAuth* Auth(void) { if(auth_ == NULL) { auth_created_=true; auth_=new MessageAuth; }; return auth_; }; void Auth(MessageAuth* auth) { if(auth_created_) { auth_created_=false; delete auth_; }; auth_=auth; }; /** Returns a pointer to the current context object or creates it if no object has been assigned. Last case should happen only if first MCC in a chain is connectionless like one implementing UDP protocol. */ MessageContext* Context(void) { if(ctx_ == NULL) { ctx_created_=true; ctx_=new MessageContext; }; return ctx_; }; /** Returns a pointer to the current auth* context object or creates it if no object has been assigned. */ MessageAuthContext* AuthContext(void) { if(auth_ctx_ == NULL) { auth_ctx_created_=true; auth_ctx_=new MessageAuthContext; }; return auth_ctx_; }; /** Assigns message context object */ void Context(MessageContext* ctx) { if(ctx_created_) { ctx_created_=false; delete ctx_; }; ctx_=ctx; }; /** Assigns auth* context object */ void AuthContext(MessageAuthContext* auth_ctx) { if(auth_ctx_created_) { auth_ctx_created_=false; delete auth_ctx_; }; auth_ctx_=auth_ctx; }; }; } // namespace Arc #endif /* __ARC_MESSAGE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/PayloadSOAP.cpp0000644000000000000000000000012412071010044023646 xustar000000000000000027 mtime=1357123620.186841 27 atime=1513200574.826705 30 ctime=1513200659.021735521 nordugrid-arc-5.4.2/src/hed/libs/message/PayloadSOAP.cpp0000644000175000002070000000112112071010044023706 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "PayloadSOAP.h" #include "PayloadRaw.h" namespace Arc { PayloadSOAP::PayloadSOAP(const MessagePayload& source):SOAPEnvelope(ContentFromPayload(source)) { if(XMLNode::operator!()) { // TODO: implement error reporting in SOAP parsing failure_ = MCC_Status(GENERIC_ERROR,"SOAP","Failed to parse SOAP message"); } } PayloadSOAP::PayloadSOAP(const SOAPEnvelope& soap):SOAPEnvelope(soap) { } PayloadSOAP::PayloadSOAP(const NS& ns,bool fault):SOAPEnvelope(ns,fault) { } PayloadSOAP::~PayloadSOAP(void) { } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MCCLoader.h0000644000000000000000000000012312574532370023012 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200574.799705 29 ctime=1513200659.01673546 nordugrid-arc-5.4.2/src/hed/libs/message/MCCLoader.h0000644000175000002070000001131112574532370023055 0ustar00mockbuildmock00000000000000#ifndef __ARC_MCCLOADER_H__ #define __ARC_MCCLOADER_H__ #include #include #include #include #include #include #include #include #include namespace Arc { // Internal classes - used in private methods class mcc_connectors_t; class plexer_connectors_t; class ChainContext; /// Creator of Message Component Chains (MCC). /** This class processes XML configration and creates message chains. Accepted configuration is defined by XML schema mcc.xsd. Supported components are of types MCC, Service and Plexer. MCC and Service are loaded from dynamic libraries. For Plexer only internal implementation is supported. This object is also a container for loaded componets. All components and chains are destroyed if this object is destroyed. Chains are created in 2 steps. First all components are loaded and corresponding objects are created. Constructors are supplied with corresponding configuration subtrees. During next step components are linked together by calling their Next() methods. Each call creates labeled link to next component in a chain. 2 step method has an advantage over single step because it allows loops in chains and makes loading procedure more simple. But that also means during short period of time components are only partly configured. Components in such state must produce proper error response if Message arrives. Note: Current implementation requires all components and links to be labeled. All labels must be unique. Future implementation will be able to assign labels automatically. */ class MCCLoader: public Loader { friend class ChainContext; public: typedef std::map mcc_container_t; typedef std::map service_container_t; typedef std::map sechandler_container_t; typedef std::map plexer_container_t; private: bool valid_; /** Set of labeled MCC objects */ mcc_container_t mccs_; /** Set of labeled MCC objects which are not linked by anything */ mcc_container_t mccs_unlinked_; /** Set of MCC objects exposed to external interface */ mcc_container_t mccs_exposed_; /** Set of labeled Service objects */ service_container_t services_; /** Set of labeled security handlers */ sechandler_container_t sechandlers_; /** Set of labeled Plexer objects */ plexer_container_t plexers_; /** Internal method which performs whole stuff specific to creation of Message Chains. It is taken out from constructor to make it easier to reconfigure chains in a future. Returns true if all objects were successfully initialized and all links created. */ bool make_elements(Config& cfg, int level = 0, mcc_connectors_t *mcc_connectors = NULL, plexer_connectors_t *plexer_connectors = NULL); MCC* make_component(Config& cfg, XMLNode cn, mcc_connectors_t *mcc_connectors = NULL); ArcSec::SecHandler* make_sec_handler(Config& cfg, XMLNode& node); ChainContext* context_; std::string error_description_; public: MCCLoader():valid_(false), context_(NULL) {}; /** Constructor that takes whole XML configuration and creates component chains */ MCCLoader(Config& cfg); /** Destructor destroys all components created by constructor */ ~MCCLoader(); /** Access entry MCCs in chains. Those are components exposed for external access using 'entry' attribute */ MCC* operator[](const std::string& id); MCC* operator[](const char* id) { return operator[](std::string(id)); }; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; const std::string& failure(void) { return error_description_; }; bool ReloadElement(Config& cfg); }; /// Interface to chain specific functionality /** Object of this class is associated with every MCCLoader object. It is accessible for MCC and Service components and provides an interface to manipulate chains stored in Loader. This makes it possible to modify chains dynamically - like deploying new services on demand. */ class ChainContext { friend class MCCLoader; private: MCCLoader& loader_; ChainContext(MCCLoader& loader) : loader_(loader) {}; ~ChainContext() {}; public: /** Returns associated PluginsFactory object */ operator PluginsFactory*() { return loader_.factory_; }; }; } // namespace Arc #endif /* __ARC_MCCLOADER_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/PayloadStream.cpp0000644000000000000000000000012413213471344024354 xustar000000000000000027 mtime=1512993508.704992 27 atime=1513200574.819705 30 ctime=1513200659.021735521 nordugrid-arc-5.4.2/src/hed/libs/message/PayloadStream.cpp0000644000175000002070000001176313213471344024431 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #ifndef WIN32 #include #include #endif #include #include "PayloadStream.h" namespace Arc { PayloadStream::PayloadStream(int h):timeout_(60),handle_(h),seekable_(false) { struct stat st; if(fstat(handle_,&st) != 0) return; if(!(S_ISREG(st.st_mode))) return; seekable_=true; #ifdef WIN32 COMMTIMEOUTS to; if(GetCommTimeouts((HANDLE)handle_,&to)) { to.ReadIntervalTimeout=timeout_*1000; to.ReadTotalTimeoutMultiplier=0; to.ReadTotalTimeoutConstant=timeout_*1000; to.WriteTotalTimeoutMultiplier=0; to.WriteTotalTimeoutConstant=timeout_*1000; SetCommTimeouts((HANDLE)handle_,&to); }; #endif return; } #define FAILSYSERR(msg) { \ failure_ = MCC_Status(GENERIC_ERROR,"STREAM",(msg)+StrError(errno)); \ return false; \ } bool PayloadStream::Get(char* buf,int& size) { if(handle_ == -1) return false; ssize_t l = size; size=0; if(seekable_) { // check for EOF struct stat st; if(::fstat(handle_,&st) != 0) { FAILSYSERR("Can't get state of stream's handle: "); }; off_t o = ::lseek(handle_,0,SEEK_CUR); if(o == (off_t)(-1)) { FAILSYSERR("Failed to seek in stream's handle: "); }; o++; if(o >= st.st_size) { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Failed to seek to requested position"); return false; }; }; #ifndef WIN32 struct pollfd fd; fd.fd=handle_; fd.events=POLLIN | POLLPRI | POLLERR; fd.revents=0; int r = 0; if((r=poll(&fd,1,timeout_*1000)) != 1) { if(r == 0) { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Timeout waiting for incoming data"); } else { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Failed while waiting for incoming data: "+StrError(errno)); }; return false; } if(!(fd.revents & (POLLIN | POLLPRI))) { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Error in stream's handle"); return false; } #endif l=::read(handle_,buf,l); if(l == -1) { FAILSYSERR("Failed reading stream's handle: "); } size=l; #ifndef WIN32 if((l == 0) && (fd.revents && POLLERR)) { // TODO: remove because it never happens failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Error in stream's handle"); return false; } #else if(l == 0) { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","No data in stream's handle"); return false; } #endif return true; } bool PayloadStream::Put(const char* buf,Size_t size) { ssize_t l; if(handle_ == -1) return false; time_t start = time(NULL); for(;size;) { #ifndef WIN32 struct pollfd fd; fd.fd=handle_; fd.events=POLLOUT | POLLERR; fd.revents=0; int to = timeout_-(unsigned int)(time(NULL)-start); if(to < 0) to=0; int r = 0; if((r=poll(&fd,1,to*1000)) != 1) { if(r == 0) { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Timeout waiting for outgoing data"); } else { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Failed while waiting for outgoing data: "+StrError(errno)); }; return false; }; if(!(fd.revents & POLLOUT)) { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Error in stream's handle"); return false; }; #endif l=::write(handle_,buf,size); if(l == -1) { FAILSYSERR("Failed writing into stream's handle: "); } buf+=l; size-=l; #ifdef WIN32 if(size > 0) { int to = timeout_-(unsigned int)(time(NULL)-start); if(to < 0) { failure_ = MCC_Status(GENERIC_ERROR,"STREAM","Timeout waiting for outgoing data"); return false; }; }; #endif }; return true; } // --------------------------------------------------------- bool PayloadStreamInterface::Get(std::string& buf) { char tbuf[1024]; int l = sizeof(tbuf); bool result = Get(tbuf,l); buf.assign(tbuf,l); return result; } std::string PayloadStreamInterface::Get(void) { std::string buf; Get(buf); return buf; } bool PayloadStreamInterface::Get(PayloadStreamInterface& dest,int& size) { char tbuf[1024]; int size_ = 0; bool r = false; while(true) { if(size == 0) { r = true; break; }; int l = size; if((size == -1) || (l > (int)sizeof(tbuf))) l = (int)sizeof(tbuf); if(!Get(tbuf,l)) break; if(l <= 0) { r = true; break; }; size_ += l; if(size != -1) size -= l; if(!dest.Put(tbuf,l)) break; }; size = size_; return r; } bool PayloadStreamInterface::Put(PayloadStreamInterface& source,Size_t size) { char tbuf[1024]; bool r = false; while(true) { if(size == 0) { r = true; break; }; int l = size; if((size == -1) || (l > (int)sizeof(tbuf))) l = (int)sizeof(tbuf); if(!source.Get(tbuf,l)) break; if(l <= 0) { r = true; break; }; if(!Put(tbuf,l)) break; if(size != -1) size -= l; }; return r; } bool PayloadStreamInterface::Put(const std::string& buf) { return Put(buf.c_str(),buf.length()); } bool PayloadStreamInterface::Put(const char* buf) { return Put(buf,buf?strlen(buf):0); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/SOAPMessage.h0000644000000000000000000000012411611503620023314 xustar000000000000000027 mtime=1311147920.204773 27 atime=1513200574.798705 30 ctime=1513200659.015735448 nordugrid-arc-5.4.2/src/hed/libs/message/SOAPMessage.h0000644000175000002070000000411011611503620023355 0ustar00mockbuildmock00000000000000#ifndef __ARC_SOAPMESSAGE_H__ #define __ARC_SOAPMESSAGE_H__ #include #include "SOAPEnvelope.h" #include "Message.h" namespace Arc { /// Message restricted to SOAP payload /** This is a special Message intended to be used in language bindings for programming languages which are not flexible enough to support all kinds of Payloads. It is passed through chain of MCCs and works like the Message but can carry only SOAP content. */ class SOAPMessage { private: SOAPEnvelope* payload_; /** Main content of message */ MessageAuth* auth_; /** Authentication and authorization related information */ MessageAttributes* attributes_; /** Various useful attributes */ /** This element is maintained by MCC/element which handles/knows persistency of connection. It must be created and destroyed by that element. */ MessageContext* context_; /** No copying is allowed */ SOAPMessage(SOAPMessage& msg); /** No assignment is allowed. */ SOAPMessage& operator=(SOAPMessage& msg); public: /** Dummy constructor */ SOAPMessage(void):payload_(NULL),auth_(NULL),attributes_(NULL),context_(NULL) { }; /** Copy constructor. Used by language bindigs */ SOAPMessage(long msg_ptr_addr); /** Copy constructor. Ensures shallow copy. */ SOAPMessage(Message& msg); /** Destructor does not affect refered objects */ ~SOAPMessage(void); /** Returns pointer to current payload or NULL if no payload assigned. */ SOAPEnvelope* Payload(void); /** Replace payload with a COPY of new one */ void Payload(SOAPEnvelope* new_payload); /** Returns a pointer to the current attributes object or NULL if no attributes object has been assigned. */ MessageAttributes* Attributes(void) { return attributes_; }; void Attributes(MessageAttributes* attributes) { attributes_=attributes; }; MessageAuth *Auth(void) { return auth_; }; void Auth(MessageAuth *auth) { auth_ = auth; }; MessageContext* Context(void) { return context_; }; void Context(MessageContext* context) { context_=context; }; }; } // namespace Arc #endif /* __ARC_SOAPMESSAGE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/Service.cpp0000644000000000000000000000012411730411253023202 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200574.833705 30 ctime=1513200659.024735558 nordugrid-arc-5.4.2/src/hed/libs/message/Service.cpp0000644000175000002070000000267311730411253023257 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "Service.h" namespace Arc { Logger Service::logger(Logger::getRootLogger(), "Service"); Service::Service(Config*, PluginArgument* arg) : MCCInterface(arg), valid(true) { } void Service::AddSecHandler(Config* cfg,ArcSec::SecHandler* sechandler,const std::string& label) { if(sechandler) { sechandlers_[label].push_back(sechandler); //need polishing to put the SecHandlerFactory->getinstance here XMLNode cn = (*cfg)["SecHandler"]; Config cfg_(cn); } } bool Service::ProcessSecHandlers(Message& message,const std::string& label) const { std::map >::const_iterator q = sechandlers_.find(label); if(q == sechandlers_.end()) { logger.msg(DEBUG, "No security processing/check requested for '%s'", label); return true; } std::list::const_iterator h = q->second.begin(); for(;h!=q->second.end();++h) { const ArcSec::SecHandler* handler = *h; if(handler) if(!(handler->Handle(&message))) { logger.msg(DEBUG, "Security processing/check for '%s' failed", label); return false; } } logger.msg(DEBUG, "Security processing/check for '%s' passed", label); return true; } bool Service::RegistrationCollector(XMLNode& /* doc */) { logger.msg(WARNING, "Empty registration collector"); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/SOAPEnvelope.cpp0000644000000000000000000000012412104732741024046 xustar000000000000000027 mtime=1360246241.410857 27 atime=1513200574.831705 30 ctime=1513200659.019735497 nordugrid-arc-5.4.2/src/hed/libs/message/SOAPEnvelope.cpp0000644000175000002070000003165012104732741024120 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "SOAPEnvelope.h" #define SOAP12_ENV_NAMESPACE "http://www.w3.org/2003/05/soap-envelope" #define SOAP12_ENC_NAMESPACE "http://www.w3.org/2003/05/soap-encoding" #define SOAP11_ENV_NAMESPACE "http://schemas.xmlsoap.org/soap/envelope/" #define SOAP11_ENC_NAMESPACE "http://schemas.xmlsoap.org/soap/encoding/" namespace Arc { namespace internal { class SOAPNS: public NS { public: SOAPNS(bool ver12) { if(ver12) { (*this)["soap-enc"]=SOAP12_ENC_NAMESPACE; (*this)["soap-env"]=SOAP12_ENV_NAMESPACE; } else { (*this)["soap-enc"]=SOAP11_ENC_NAMESPACE; (*this)["soap-env"]=SOAP11_ENV_NAMESPACE; }; } }; } SOAPEnvelope::SOAPEnvelope(const std::string& s):XMLNode(s) { set(); decode(); } SOAPEnvelope::SOAPEnvelope(const char* s,int l):XMLNode(s,l) { set(); decode(); } SOAPEnvelope::SOAPEnvelope(const SOAPEnvelope& soap):XMLNode(),fault(NULL) { soap.envelope.New(*this); set(); } SOAPEnvelope::SOAPEnvelope(const NS& ns,bool f):XMLNode(ns,"Envelope"),fault(NULL) { XMLNode& it = *this; if(!it) return; ver12=false; for(NS::const_iterator i = ns.begin();i!=ns.end();++i) { if(i->second == SOAP12_ENV_NAMESPACE) { ver12=true; break; }; }; internal::SOAPNS ns_(ver12); ns_["xsi"]="http://www.w3.org/2001/XMLSchema-instance"; ns_["xsd"]="http://www.w3.org/2001/XMLSchema"; XMLNode::Namespaces(ns_); XMLNode::Name("soap-env:Envelope"); // Fixing namespace header=XMLNode::NewChild("soap-env:Header"); body=XMLNode::NewChild("soap-env:Body"); envelope=it; ((SOAPEnvelope*)(&envelope))->is_owner_=true; XMLNode::is_owner_=false; XMLNode::node_=((SOAPEnvelope*)(&body))->node_; if(f) { XMLNode fault_n = body.NewChild("soap-env:Fault"); if(ver12) { XMLNode code_n = fault_n.NewChild("soap-env:Code"); XMLNode reason_n = fault_n.NewChild("soap-env:Reason"); reason_n.NewChild("soap-env:Text")="unknown"; code_n.NewChild("soap-env:Value")="soap-env:Receiver"; } else { XMLNode code_n = fault_n.NewChild("soap-env:faultcode"); XMLNode reason_n = fault_n.NewChild("soap-env:faultstring"); reason_n.NewChild("soap-env:Text")="unknown"; code_n.NewChild("soap-env:Value")="soap-env:Server"; }; fault=new SOAPFault(body); }; } SOAPEnvelope::SOAPEnvelope(XMLNode root):XMLNode(root),fault(NULL) { if(!node_) return; if(node_->type != XML_ELEMENT_NODE) { node_=NULL; return; }; set(); // decode(); ?? } SOAPEnvelope::~SOAPEnvelope(void) { if(fault) delete fault; } // This function is only called from constructor void SOAPEnvelope::set(void) { fault=NULL; XMLNode& it = *this; if(!it) return; ver12=false; if(!it.NamespacePrefix(SOAP12_ENV_NAMESPACE).empty()) ver12=true; internal::SOAPNS ns(ver12); ns["xsi"]="http://www.w3.org/2001/XMLSchema-instance"; ns["xsd"]="http://www.w3.org/2001/XMLSchema"; // Do not apply deeper than Envelope + Header/Body + Fault it.Namespaces(ns,true,2); envelope = it; if((!envelope) || (!MatchXMLName(envelope,"soap-env:Envelope"))) { // No SOAP Envelope found if(is_owner_) xmlFreeDoc(node_->doc); node_=NULL; return; }; if(MatchXMLName(envelope.Child(0),"soap-env:Header")) { // SOAP has Header header=envelope.Child(0); body=envelope.Child(1); } else { // SOAP has no header - create an empty one body=envelope.Child(0); header=envelope.NewChild("soap-env:Header",0,true); }; if(!MatchXMLName(body,"soap-env:Body")) { // No SOAP Body found if(is_owner_) xmlFreeDoc(node_->doc); node_=NULL; return; }; // Transfer ownership. ((SOAPEnvelope*)(&envelope))->is_owner_=is_owner_; // true // Make this object represent SOAP Body is_owner_=false; this->node_=((SOAPEnvelope*)(&body))->node_; // Check if this message is fault fault = new SOAPFault(body); if(!(*fault)) { delete fault; fault=NULL; } else { // Apply namespaces to Fault element body.Namespaces(ns); } } XMLNode SOAPEnvelope::findid(XMLNode parent, const std::string& id) { XMLNode node; if(!parent) return node; for(int n = 0; (bool)(node = parent.Child(n)); ++n) { if(node.Attribute("id") == id) return node; } if(parent == body) return node; return findid(parent.Parent(),id); } void SOAPEnvelope::decode(XMLNode node) { if(!node) return; // A lot of TODOs if((node.Size() == 0) && ((std::string)node).empty()) { XMLNode href = node.Attribute("href"); if((bool)href) { std::string id = href; if(id[0] == '#') { id = id.substr(1); if(!id.empty()) { // Looking for corresponding id XMLNode id_node = findid(node.Parent(),id); if((bool)id_node) { href.Destroy(); // Replacing content node = (std::string)id_node; XMLNode cnode; for(int n = 0; (bool)(cnode = id_node.Child(n)); ++n) { node.NewChild(cnode); } } } } } } // Repeat for all children nodes XMLNode cnode; for(int n = 0; (bool)(cnode = node.Child(n)); ++n) { decode(cnode); } } void SOAPEnvelope::decode(void) { // Do links in first elelment decode(body.Child(0)); // Remove rest XMLNode cnode; while((bool)(cnode = body.Child(1))) { cnode.Destroy(); } } SOAPEnvelope* SOAPEnvelope::New(void) { XMLNode new_envelope; envelope.New(new_envelope); SOAPEnvelope* new_soap = new SOAPEnvelope(new_envelope); if(new_soap) { ((SOAPEnvelope*)(&(new_soap->envelope)))->is_owner_=true; ((SOAPEnvelope*)(&new_envelope))->is_owner_=false; }; return new_soap; } void SOAPEnvelope::Swap(SOAPEnvelope& soap) { bool ver12_tmp = ver12; ver12=soap.ver12; soap.ver12=ver12_tmp; SOAPFault* fault_tmp = fault; fault=soap.fault; soap.fault=fault_tmp; envelope.Swap(soap.envelope); header.Swap(soap.header); body.Swap(soap.body); XMLNode::Swap(soap); } void SOAPEnvelope::Swap(Arc::XMLNode& soap) { XMLNode& it = *this; envelope.Swap(soap); it.Swap(envelope); envelope=XMLNode(); body=XMLNode(); header=XMLNode(); ver12=false; fault=NULL; set(); } void SOAPEnvelope::Namespaces(const NS& namespaces) { envelope.Namespaces(namespaces); } NS SOAPEnvelope::Namespaces(void) { return (envelope.Namespaces()); } void SOAPEnvelope::GetXML(std::string& out_xml_str,bool user_friendly) const { if(header.Size() == 0) { SOAPEnvelope& it = *(SOAPEnvelope*)this; // Moving header outside tree and then back. // This fully recovers initial tree and allows this methof to be const XMLNode tmp_header; it.header.Move(tmp_header); envelope.GetXML(out_xml_str,user_friendly); it.header=it.envelope.NewChild("soap-env:Header",0,true); // It can be any dummy it.header.Exchange(tmp_header); return; }; envelope.GetXML(out_xml_str,user_friendly); } // Wrap existing fault SOAPFault::SOAPFault(XMLNode body) { ver12 = (body.Namespace() == SOAP12_ENV_NAMESPACE); if(body.Size() != 1) return; fault=body.Child(0); if(!MatchXMLName(fault,body.Prefix()+":Fault")) { fault=XMLNode(); return; }; if(!ver12) { code=fault["faultcode"]; if(code) { reason=fault["faultstring"]; node=fault["faultactor"]; role=XMLNode(); detail=fault["detail"]; return; }; } else { code=fault["Code"]; if(code) { reason=fault["Reason"]; node=fault["Node"]; role=fault["Role"]; detail=fault["Detail"]; return; }; }; fault=XMLNode(); return; } // Create new fault in existing body SOAPFault::SOAPFault(XMLNode body,SOAPFaultCode c,const char* r) { ver12 = (body.Namespace() == SOAP12_ENV_NAMESPACE); fault=body.NewChild("Fault"); if(!fault) return; internal::SOAPNS ns(ver12); fault.Namespaces(ns); fault.Name("soap-env:Fault"); Code(c); Reason(0,r); } // Create new fault in existing body SOAPFault::SOAPFault(XMLNode body,SOAPFaultCode c,const char* r,bool v12) { ver12=v12; fault=body.NewChild("soap-env:Fault"); if(!fault) return; Code(c); Reason(0,r); } std::string SOAPFault::Reason(int num) { if(ver12) return reason.Child(num); if(num != 0) return ""; return reason; } void SOAPFault::Reason(int num,const char* r) { if(!r) r = ""; if(ver12) { if(!reason) reason=fault.NewChild(fault.Prefix()+":Reason"); XMLNode rn = reason.Child(num); if(!rn) rn=reason.NewChild(fault.Prefix()+":Text"); rn=r; return; }; if(!reason) reason=fault.NewChild(fault.Prefix()+":faultstring"); if(*r) { reason=r; } else { // RFC says it SHOULD provide some description. // And some implementations take it too literally. reason="unknown"; }; return; } std::string SOAPFault::Node(void) { return node; } void SOAPFault::Node(const char* n) { if(!n) n = ""; if(!node) { if(ver12) { node=fault.NewChild(fault.Prefix()+":Node"); } else { node=fault.NewChild(fault.Prefix()+":faultactor"); }; }; node=n; } std::string SOAPFault::Role(void) { return role; } void SOAPFault::Role(const char* r) { if(!r) r = ""; if(ver12) { if(!role) role=fault.NewChild(fault.Prefix()+":Role"); role=r; }; } static const char* FaultCodeMatch(const char* base,const char* code) { if(!base) base = ""; if(!code) code = ""; int l = strlen(base); if(strncasecmp(base,code,l) != 0) return NULL; if(code[l] == 0) return code+l; if(code[l] == '.') return code+l+1; return NULL; } SOAPFault::SOAPFaultCode SOAPFault::Code(void) { if(!code) return undefined; if(ver12) { std::string c = code["Value"]; std::string::size_type p = c.find(":"); if(p != std::string::npos) c.erase(0,p+1); if(strcasecmp("VersionMismatch",c.c_str()) == 0) return VersionMismatch; if(strcasecmp("MustUnderstand",c.c_str()) == 0) return MustUnderstand; if(strcasecmp("DataEncodingUnknown",c.c_str()) == 0) return DataEncodingUnknown; if(strcasecmp("Sender",c.c_str()) == 0) return Sender; if(strcasecmp("Receiver",c.c_str()) == 0) return Receiver; return unknown; } else { std::string c = code; std::string::size_type p = c.find(":"); if(p != std::string::npos) c.erase(0,p+1); if(FaultCodeMatch("VersionMismatch",c.c_str())) return VersionMismatch; if(FaultCodeMatch("MustUnderstand",c.c_str())) return MustUnderstand; if(FaultCodeMatch("Client",c.c_str())) return Sender; if(FaultCodeMatch("Server",c.c_str())) return Receiver; }; return unknown; } void SOAPFault::Code(SOAPFaultCode c) { if(ver12) { if(!code) code=fault.NewChild(fault.Prefix()+":Code"); XMLNode value = code["Value"]; if(!value) value=code.NewChild(code.Prefix()+":Value"); switch(c) { case VersionMismatch: value=value.Prefix()+":VersionMismatch"; break; case MustUnderstand: value=value.Prefix()+":MustUnderstand"; break; case DataEncodingUnknown: value=value.Prefix()+":DataEncodingUnknown"; break; case Sender: value=value.Prefix()+":Sender"; break; case Receiver: value=value.Prefix()+":Receiver"; break; default: value=""; break; }; } else { if(!code) code=fault.NewChild(fault.Prefix()+":faultcode"); switch(c) { case VersionMismatch: code=code.Prefix()+":VersionMismatch"; break; case MustUnderstand: code=code.Prefix()+":MustUnderstand"; break; case Sender: code=code.Prefix()+":Client"; break; case Receiver: code=code.Prefix()+":Server"; break; default: code=""; break; }; }; } std::string SOAPFault::Subcode(int level) { if(!ver12) return ""; if(level < 0) return ""; if(!code) return ""; XMLNode subcode = code; for(;level;--level) { subcode=subcode["Subcode"]; if(!subcode) return ""; }; return subcode["Value"]; } void SOAPFault::Subcode(int level,const char* s) { if(!ver12) return; if(level < 0) return; if(!s) s = ""; if(!code) code=fault.NewChild(fault.Prefix()+":Code"); XMLNode subcode = code; for(;level;--level) { XMLNode subcode_ = subcode["Subcode"]; if(!subcode_) subcode_=subcode.NewChild(subcode.Prefix()+":Subcode"); subcode=subcode_; }; if(!subcode["Value"]) { subcode.NewChild(subcode.Prefix()+":Value")=s; } else { subcode["Value"]=s; }; } XMLNode SOAPFault::Detail(bool create) { if(detail) return detail; if(!create) return XMLNode(); if(!ver12) { detail=fault.NewChild(fault.Prefix()+":detail"); } else { detail=fault.NewChild(fault.Prefix()+":Detail"); }; return detail; } SOAPEnvelope& SOAPEnvelope::operator=(const SOAPEnvelope& soap) { if(fault) delete fault; fault=NULL; envelope=XMLNode(); header=XMLNode(); body=XMLNode(); soap.envelope.New(*this); set(); return *this; } SOAPEnvelope* SOAPFault::MakeSOAPFault(SOAPFaultCode code,const std::string& reason) { SOAPEnvelope* out = new SOAPEnvelope(NS(),true); if(!out) return NULL; SOAPFault* fault = out->Fault(); if(!fault) { delete out; return NULL; }; fault->Code(code); fault->Reason(reason); return out; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MessageAttributes.h0000644000000000000000000000012411327660051024706 xustar000000000000000027 mtime=1264541737.726961 27 atime=1513200574.818705 30 ctime=1513200659.013735423 nordugrid-arc-5.4.2/src/hed/libs/message/MessageAttributes.h0000644000175000002070000002205711327660051024761 0ustar00mockbuildmock00000000000000// MessageAttributes.h #ifndef __ARC_MESSAGE_ATTRIBUTES__ #define __ARC_MESSAGE_ATTRIBUTES__ #include #include namespace Arc { //! A typefed of a multimap for storage of message attributes. /*! This typedef is used as a shorthand for a multimap that uses strings for keys as well as values. It is used within the MesssageAttributes class for internal storage of message attributes, but is not visible externally. */ typedef std::multimap AttrMap; //! A typedef of a const_iterator for AttrMap. /*! This typedef is used as a shorthand for a const_iterator for AttrMap. It is used extensively within the MessageAttributes class as well as the AttributesIterator class, but is not visible externally. */ typedef AttrMap::const_iterator AttrConstIter; //! A typedef of an (non-const) iterator for AttrMap. /*! This typedef is used as a shorthand for a (non-const) iterator for AttrMap. It is used in one method within the MessageAttributes class, but is not visible externally. */ typedef AttrMap::iterator AttrIter; //! A const iterator class for accessing multiple values of an attribute. /*! This is an iterator class that is used when accessing multiple values of an attribute. The getAll() method of the MessageAttributes class returns an AttributeIterator object that can be used to access the values of the attribute. Typical usage is: \code MessageAttributes attributes; ... for (AttributeIterator iterator=attributes.getAll("Foo:Bar"); iterator.hasMore(); ++iterator) std::cout << *iterator << std::endl; \endcode */ class AttributeIterator { public: //! Default constructor. /*! The default constructor. Does nothing since all attributes are instances of well-behaving STL classes. */ AttributeIterator(); //! The dereference operator. /*! This operator is used to access the current value referred to by the iterator. \return A (constant reference to a) string representation of the current value. */ const std::string& operator*() const; //! The arrow operator. /*! Used to call methods for value objects (strings) conveniently. */ const std::string* operator->() const; //! The key of attribute. /*! This method returns reference to key of attribute to which iterator refers. */ const std::string& key(void) const; //! The prefix advance operator. /*! Advances the iterator to the next value. Works intuitively. \return A const reference to this iterator. */ const AttributeIterator& operator++(); //! The postfix advance operator. /*! Advances the iterator to the next value. Works intuitively. \return An iterator referring to the value referred to by this iterator before the advance. */ AttributeIterator operator++(int); //! Predicate method for iteration termination. /*! This method determines whether there are more values for the iterator to refer to. \return Returns true if there are more values, otherwise false. */ bool hasMore() const; protected: //! Protected constructor used by the MessageAttributes class. /*! This constructor is used to create an iterator for iteration over all values of an attribute. It is not supposed to be visible externally, but is only used from within the getAll() method of MessageAttributes class. \param begin A const_iterator pointing to the first matching key-value pair in the internal multimap of the MessageAttributes class. \param end A const_iterator pointing to the first key-value pair in the internal multimap of the MessageAttributes class where the key is larger than the key searched for. */ AttributeIterator(AttrConstIter begin, AttrConstIter end); //! A const_iterator pointing to the current key-value pair. /*! This iterator is the internal representation of the current value. It points to the corresponding key-value pair in the internal multimap of the MessageAttributes class. */ AttrConstIter current_; //! A const_iterator pointing beyond the last key-value pair. /*! A const_iterator pointing to the first key-value pair in the internal multimap of the MessageAttributes class where the key is larger than the key searched for. */ AttrConstIter end_; //! The MessageAttributes class is a friend. /*! The constructor that creates an AttributeIterator that is connected to the internal multimap of the MessageAttributes class should not be exposed to the outside, but it still needs to be accessible from the getAll() method of the MessageAttributes class. Therefore, that class is a friend. */ friend class MessageAttributes; }; //! A class for storage of attribute values. /*! This class is used to store attributes of messages. All attribute keys and their corresponding values are stored as strings. Any key or value that is not a string must thus be represented as a string during storage. Furthermore, an attribute is usually a key-value pair with a unique key, but there may also be multiple such pairs with equal keys. The key of an attribute is composed by the name of the Message Chain Component (MCC) which produce it and the name of the attribute itself with a colon (:) in between, i.e. MCC_Name:Attribute_Name. For example, the key of the "Content-Length" attribute of the HTTP MCC is thus "HTTP:Content-Length". There are also "global attributes", which may be produced by different MCCs depending on the configuration. The keys of such attributes are NOT prefixed by the name of the producing MCC. Before any new global attribute is introduced, it must be agreed upon by the core development team and added below. The global attributes decided so far are: \li \c Request-URI Identifies the service to which the message shall be sent. This attribute is produced by e.g. the HTTP MCC and used by the plexer for routing the message to the appropriate service. */ class MessageAttributes { public: //! The default constructor. /*! This is the default constructor of the MessageAttributes class. It constructs an empty object that initially contains no attributes. */ MessageAttributes(); //! Sets a unique value of an attribute. /*! This method removes any previous value of an attribute and sets the new value as the only value. \param key The key of the attribute. \param value The (new) value of the attribute. */ void set(const std::string& key, const std::string& value); //! Adds a value to an attribute. /*! This method adds a new value to an attribute. Any previous value will be preserved, i.e. the attribute may become multiple valued. \param key The key of the attribute. \param value The (new) value of the attribute. */ void add(const std::string& key, const std::string& value); //! Removes all attributes with a certain key. /*! This method removes all attributes that match a certain key. \param key The key of the attributes to remove. */ void removeAll(const std::string& key); //! Removes one value of an attribute. /*! This method removes a certain value from the attribute that matches a certain key. \param key The key of the attribute from which the value shall be removed. \param value The value to remove. */ void remove(const std::string& key, const std::string& value); //! Returns the number of values of an attribute. /*! Returns the number of values of an attribute that matches a certain key. \param key The key of the attribute for which to count values. \return The number of values that corresponds to the key. */ int count(const std::string& key) const; //! Returns the value of a single-valued attribute. /*! This method returns the value of a single-valued attribute. If the attribute is not single valued (i.e. there is no such attribute or it is a multiple-valued attribute) an empty string is returned. \param key The key of the attribute for which to return the value. \return The value of the attribute. */ const std::string& get(const std::string& key) const; //! Access the value(s) of an attribute. /*! This method returns an AttributeIterator that can be used to access the values of an attribute. \param key The key of the attribute for which to return the values. \return An AttributeIterator for access of the values of the attribute. */ AttributeIterator getAll(const std::string& key) const; //! Access all value and attributes. AttributeIterator getAll(void) const; protected: //! Internal storage of attributes. /*! An AttrMap (multimap) in which all attributes (key-value pairs) are stored. */ AttrMap attributes_; }; } #endif nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/Message.cpp0000644000000000000000000000012313103157414023167 xustar000000000000000026 mtime=1494015756.80239 27 atime=1513200574.799705 30 ctime=1513200659.026735582 nordugrid-arc-5.4.2/src/hed/libs/message/Message.cpp0000644000175000002070000000242513103157414023240 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include namespace Arc { MessagePayload::MessagePayload(void):failure_(STATUS_OK) { } MessagePayload::~MessagePayload(void) { } MCC_Status MessagePayload::Failure(void) { return failure_; } MessageContext::MessageContext(void) { } MessageContext::~MessageContext(void) { std::map::iterator i; for(i=elements_.begin();i!=elements_.end();++i) { delete i->second; }; } void MessageContext::Add(const std::string& name,MessageContextElement* element) { MessageContextElement* old = operator[](name); if(old != element) { elements_[name]=element; if(old) delete old; } } MessageContextElement* MessageContext::operator[](const std::string& id) { std::map::iterator i; i=elements_.find(id); if(i == elements_.end()) return NULL; return i->second; } Message::Message(long msg_ptr_addr) { Message *msg = (Message *)msg_ptr_addr; auth_ = msg->auth_; auth_created_=false; attr_ = msg->attr_; attr_created_=false; ctx_ = msg->ctx_; ctx_created_=false; auth_ctx_ = msg->auth_ctx_; auth_ctx_created_=false; payload_ = msg->payload_; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MCCLoader.cpp0000644000000000000000000000012413213471344023340 xustar000000000000000027 mtime=1512993508.704992 27 atime=1513200574.828705 30 ctime=1513200659.030735631 nordugrid-arc-5.4.2/src/hed/libs/message/MCCLoader.cpp0000644000175000002070000004470413213471344023416 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "MCCLoader.h" namespace Arc { MCCLoader::MCCLoader(Config& cfg):Loader(cfg),valid_(false) { context_ = new ChainContext(*this); valid_ = make_elements(cfg); if(!valid_) MCCLoader::logger.msg(VERBOSE, "Chain(s) configuration failed"); } MCCLoader::~MCCLoader(void) { // TODO: stop any processing on those MCCs or mark them for // self-destruction or break links first or use semaphors in // MCC destructors // Unlink all objects for(mcc_container_t::iterator mcc_i = mccs_unlinked_.begin(); mcc_i != mccs_unlinked_.end(); ++mcc_i) { MCC* mcc = mcc_i->second; if(mcc) mcc->Unlink(); } for(mcc_container_t::iterator mcc_i = mccs_.begin(); mcc_i != mccs_.end(); ++mcc_i) { MCC* mcc = mcc_i->second; if(mcc) mcc->Unlink(); } for(plexer_container_t::iterator plexer_i = plexers_.begin(); plexer_i != plexers_.end(); ++plexer_i) { Plexer* plexer = plexer_i->second; if(plexer) plexer->Unlink(); } // Destroy all objects // First unlinked MCC are destroyed because they handle spawned threads // processing request. After they are destroyed there should be no // processing left. for(mcc_container_t::iterator mcc_i = mccs_unlinked_.begin(); mcc_i != mccs_unlinked_.end(); mcc_i = mccs_unlinked_.begin()) { MCC* mcc = mcc_i->second; mccs_unlinked_.erase(mcc_i); if(mcc) delete mcc; } // Then ordinary MCCs and other objects for(mcc_container_t::iterator mcc_i = mccs_.begin(); mcc_i != mccs_.end(); mcc_i = mccs_.begin()) { MCC* mcc = mcc_i->second; mccs_.erase(mcc_i); if(mcc) delete mcc; } for(plexer_container_t::iterator plexer_i = plexers_.begin(); plexer_i != plexers_.end(); plexer_i = plexers_.begin()) { Plexer* plexer = plexer_i->second; plexers_.erase(plexer_i); if(plexer) delete plexer; } for(service_container_t::iterator service_i = services_.begin(); service_i != services_.end(); service_i = services_.begin()) { Service* service = service_i->second; services_.erase(service_i); if(service) delete service; } // Last are SecHandlers because now there are no objects which // could use them. for(sechandler_container_t::iterator sechandler_i = sechandlers_.begin(); sechandler_i != sechandlers_.end(); sechandler_i = sechandlers_.begin()) { ArcSec::SecHandler* sechandler = sechandler_i->second; sechandlers_.erase(sechandler_i); if(sechandler) delete sechandler; } if(context_) delete context_; } class mcc_connector_t { public: MCCLoader::mcc_container_t::iterator mcc; std::string name; std::map nexts; mcc_connector_t(MCCLoader::mcc_container_t::iterator mcc) : mcc(mcc) {}; }; std::ostream& operator<<(std::ostream& o, const mcc_connector_t& mcc) { o << mcc.name; o << "(" << mcc.mcc->first << ")"; return o; } class plexer_connector_t { public: MCCLoader::plexer_container_t::iterator plexer; std::map nexts; plexer_connector_t(MCCLoader::plexer_container_t::iterator plexer) : plexer(plexer) {}; }; class mcc_connectors_t : public std::list {}; class plexer_connectors_t : public std::list {}; static XMLNode FindElementByID(XMLNode node, const std::string& id, const std::string& name) { for(int n = 0;; ++n) { XMLNode cn = node.Child(n); if(!cn) break; if(MatchXMLName(cn, "ArcConfig")) { XMLNode result = FindElementByID(cn, id, name); if(result) return result; continue; } if(MatchXMLName(cn, "Chain")) { XMLNode result = FindElementByID(cn, id, name); if(result) return result; continue; } if(MatchXMLName(cn, name)) { if((std::string)(cn.Attribute("id")) == id) return cn; } } return XMLNode(); } ArcSec::SecHandler* MCCLoader::make_sec_handler(Config& cfg, XMLNode& node) { if(!node) { // Normally should not happen MCCLoader::logger.msg(ERROR, "SecHandler configuration is not defined"); error_description_ = "Security plugin configuration is not defined"; return NULL; } XMLNode desc_node; std::string refid = node.Attribute("refid"); if(refid.empty()) { desc_node = node; refid = (std::string)node.Attribute("id"); if(refid.empty()) { refid = "__arc_sechandler_" + tostring(sechandlers_.size()) + "__"; } } else { // Maybe it's already created MCCLoader::sechandler_container_t::iterator phandler = sechandlers_.find(refid); if(phandler != sechandlers_.end()) { return phandler->second; } // Look for it's configuration desc_node = FindElementByID(cfg, refid, "SecHandler"); } if(!desc_node) { MCCLoader::logger.msg(ERROR, "SecHandler has no configuration"); error_description_ = "Security plugin configuration is not defined"; return NULL; } std::string name = desc_node.Attribute("name"); if(name.empty()) { MCCLoader::logger.msg(ERROR, "SecHandler has no name attribute defined"); error_description_ = "Security plugin has no name defined"; return NULL; } // Create new security handler Config cfg_(desc_node, cfg.getFileName()); ArcSec::SecHandlerPluginArgument arg(&cfg_,context_); Plugin* plugin = factory_->get_instance(SecHandlerPluginKind, name, &arg); ArcSec::SecHandler* sechandler = plugin?dynamic_cast(plugin):NULL; if(!sechandler) { Loader::logger.msg(ERROR, "Security Handler %s(%s) could not be created", name, refid); error_description_ = "Security plugin component "+name+" could not be created"; // TODO: need a way to propagate error description from factory. } else { Loader::logger.msg(INFO, "SecHandler: %s(%s)", name, refid); sechandlers_[refid] = sechandler; } return sechandler; } MCC* MCCLoader::make_component(Config& cfg, XMLNode cn, mcc_connectors_t *mcc_connectors) { Config cfg_(cn, cfg.getFileName()); std::string name = cn.Attribute("name"); std::string id = cn.Attribute("id"); if(name.empty()) { error_description_ = "Component "+id+" has no name defined"; logger.msg(ERROR, "Component has no name attribute defined"); return NULL; } if(id.empty()) { error_description_ = "Component "+name+" has no id defined"; logger.msg(ERROR, "Component has no ID attribute defined"); return NULL; } MCCPluginArgument arg(&cfg_,context_); Plugin* plugin = factory_->get_instance(MCCPluginKind ,name, &arg); MCC* mcc = plugin?dynamic_cast(plugin):NULL; if(!mcc) { error_description_ = "Component "+name+" could not be created"; // TODO: need a way to propagate error description from factory. logger.msg(VERBOSE, "Component %s(%s) could not be created", name, id); if(plugin) delete plugin; return NULL; } // Configure security plugins XMLNode an = cn["SecHandler"]; for(int n = 0;; ++n) { XMLNode can = an[n]; if(!can) break; ArcSec::SecHandler* sechandler = make_sec_handler(cfg, can); if(!sechandler) { if(plugin) delete plugin; return NULL; }; std::string event = can.Attribute("event"); mcc->AddSecHandler(&cfg_, sechandler, event); } mcc_container_t::iterator mccp = mccs_.find(id); MCC* oldmcc = (mccp == mccs_.end())?NULL:(mccp->second); mccs_[id] = mcc; if(mcc_connectors) { // Add to chain list mcc_connector_t mcc_connector(mccs_.find(id)); for(int nn = 0;; ++nn) { XMLNode cnn = cn["next"][nn]; if(!cnn) break; std::string nid = cnn.Attribute("id"); if(nid.empty()) { logger.msg(ERROR, "Component's %s(%s) next has no ID " "attribute defined", name, id); error_description_ = "Component "+name+" has no id defined in next target"; if(plugin) delete plugin; if(oldmcc) { mccs_[id] = oldmcc; } else { mccs_.erase(id); } return NULL; } std::string label = cnn; mcc_connector.nexts[label] = nid; } mcc_connector.name = name; mcc_connectors->push_back(mcc_connector); } std::string entry = cn.Attribute("entry"); if(!entry.empty()) mccs_exposed_[entry] = mcc; return mcc; } bool MCCLoader::make_elements(Config& cfg, int level, mcc_connectors_t *mcc_connectors, plexer_connectors_t *plexer_connectors) { bool success = true; if(mcc_connectors == NULL) mcc_connectors = new mcc_connectors_t; if(plexer_connectors == NULL) plexer_connectors = new plexer_connectors_t; // 1st stage - creating all elements. // Configuration is parsed recursively - going deeper at ArcConfig // and Chain elements for(int n = 0;; ++n) { XMLNode cn = cfg.Child(n); if(!cn) break; Config cfg_(cn, cfg.getFileName()); if(MatchXMLName(cn, "ArcConfig")) { if(!make_elements(cfg_, level + 1, mcc_connectors, plexer_connectors)) success = false; continue; } if(MatchXMLName(cn, "Chain")) { if(!make_elements(cfg_, level + 1, mcc_connectors, plexer_connectors)) success = false; continue; } if(MatchXMLName(cn, "Component")) { // Create new MCC MCC* mcc = make_component(cfg,cn,mcc_connectors); if(!mcc) { success = false; continue; } logger.msg(DEBUG, "Loaded MCC %s(%s)", (std::string)cn.Attribute("name"), (std::string)cn.Attribute("id")); continue; } if(MatchXMLName(cn, "Plexer")) { std::string id = cn.Attribute("id"); if(id.empty()) id = "plexer"; MCCPluginArgument arg(&cfg_,context_); Plexer* plexer = new Plexer(&cfg_,&arg); plexers_[id] = plexer; plexer_connector_t plexer_connector(plexers_.find(id)); for(int nn = 0;; ++nn) { XMLNode cnn = cn["next"][nn]; if(!cnn) break; std::string nid = cnn.Attribute("id"); if(nid.empty()) { logger.msg(ERROR, "Plexer's (%s) next has no ID " "attribute defined", id); error_description_ = "MCC chain is broken in plexer "+id+" - no id of next component defined"; success = false; continue; } std::string label = cnn; plexer_connector.nexts[label] = nid; } plexer_connectors->push_back(plexer_connector); logger.msg(INFO, "Loaded Plexer %s", id); continue; } if(MatchXMLName(cn, "Service")) { std::string name = cn.Attribute("name"); std::string id = cn.Attribute("id"); if(name.empty()) { logger.msg(ERROR, "Service has no Name attribute defined"); error_description_ = "MCC chain is broken in service "+id+" - no name defined"; success = false; continue; } if(id.empty()) { logger.msg(ERROR, "Service has no ID attribute defined"); error_description_ = "MCC chain is broken in service "+name+" - no id defined"; success = false; continue; } ServicePluginArgument arg(&cfg_,context_); Plugin* plugin = factory_->get_instance(ServicePluginKind, name, &arg); Service* service = plugin?dynamic_cast(plugin):NULL; if(!service) { logger.msg(ERROR, "Service %s(%s) could not be created", name, id); error_description_ = "Service component "+name+" could not be created"; // TODO: need a way to propagate error description from factory. success = false; continue; } services_[id] = service; logger.msg(INFO, "Loaded Service %s(%s)", name, id); // Configure security plugins XMLNode an; an = cn["SecHandler"]; for(int n = 0;; ++n) { XMLNode can = an[n]; if(!can) break; ArcSec::SecHandler* sechandler = make_sec_handler(cfg, can); if(!sechandler) { success = false; continue; } std::string event = can.Attribute("event"); service->AddSecHandler(&cfg_, sechandler, event); } continue; } // Configuration processing is split to multiple functions - hence // ignoring all unknown elements. //logger.msg(WARNING, "Unknown element \"%s\" - ignoring", cn.Name()); } if(level != 0) return success; // 2nd stage - making links between elements. // Making links from MCCs mccs_unlinked_ = mccs_; for(mcc_connectors_t::iterator mcc = mcc_connectors->begin(); mcc != mcc_connectors->end(); ++mcc) { for(std::map::iterator next = mcc->nexts.begin(); next != mcc->nexts.end(); next = mcc->nexts.begin()) { std::string label = next->first; std::string id = next->second; mcc_container_t::iterator mcc_l = mccs_.find(id); if(mcc_l != mccs_.end()) { // Make link MCC->MCC mcc->mcc->second->Next(mcc_l->second, label); logger.msg(DEBUG, "Linking MCC %s(%s) to MCC (%s) under %s", mcc->name, mcc->mcc->first, id, label); mcc->nexts.erase(next); mcc_container_t::iterator mcc_ul = mccs_unlinked_.find(id); if(mcc_ul != mccs_unlinked_.end()) mccs_unlinked_.erase(mcc_ul); continue; } service_container_t::iterator service_l = services_.find(id); if(service_l != services_.end()) { // Make link MCC->Service mcc->mcc->second->Next(service_l->second, label); logger.msg(INFO, "Linking MCC %s(%s) to Service (%s) under %s", mcc->name, mcc->mcc->first, id, label); mcc->nexts.erase(next); continue; } plexer_container_t::iterator plexer_l = plexers_.find(id); if(plexer_l != plexers_.end()) { // Make link MCC->Plexer mcc->mcc->second->Next(plexer_l->second, label); logger.msg(INFO, "Linking MCC %s(%s) to Plexer (%s) under %s", mcc->name, mcc->mcc->first, id, label); mcc->nexts.erase(next); continue; } logger.msg(VERBOSE, "MCC %s(%s) - next %s(%s) has no target", mcc->name, mcc->mcc->first, label, id); error_description_ = "MCC chain is broken - no "+id+" target was found for "+mcc->name+" component"; success = false; mcc->nexts.erase(next); } } // Making links from Plexers for(plexer_connectors_t::iterator plexer = plexer_connectors->begin(); plexer != plexer_connectors->end(); ++plexer) { for(std::map::iterator next = plexer->nexts.begin(); next != plexer->nexts.end(); next = plexer->nexts.begin()) { std::string label = next->first; std::string id = next->second; mcc_container_t::iterator mcc_l = mccs_.find(id); if(mcc_l != mccs_.end()) { // Make link Plexer->MCC plexer->plexer->second->Next(mcc_l->second, label); logger.msg(INFO, "Linking Plexer %s to MCC (%s) under %s", plexer->plexer->first, id, label); plexer->nexts.erase(next); mcc_container_t::iterator mcc_ul = mccs_unlinked_.find(id); if(mcc_ul != mccs_unlinked_.end()) mccs_unlinked_.erase(mcc_ul); continue; } service_container_t::iterator service_l = services_.find(id); if(service_l != services_.end()) { // Make link Plexer->Service plexer->plexer->second->Next(service_l->second, label); logger.msg(INFO, "Linking Plexer %s to Service (%s) under %s", plexer->plexer->first, id, label); plexer->nexts.erase(next); continue; } plexer_container_t::iterator plexer_l = plexers_.find(id); if(plexer_l != plexers_.end()) { // Make link Plexer->Plexer plexer->plexer->second->Next(plexer_l->second, label); logger.msg(INFO, "Linking Plexer %s to Plexer (%s) under %s", plexer->plexer->first, id, label); plexer->nexts.erase(next); continue; } logger.msg(ERROR, "Plexer (%s) - next %s(%s) has no target", plexer->plexer->first, label, id); error_description_ = "MCC chain is broken - no "+id+" target was found for "+plexer->plexer->first+" plexer"; success = false; plexer->nexts.erase(next); } } if(mcc_connectors) delete mcc_connectors; if(plexer_connectors) delete plexer_connectors; // Move all unlinked MCCs to dedicated container for(mcc_container_t::iterator mcc = mccs_unlinked_.begin(); mcc != mccs_unlinked_.end();++mcc) { mcc_container_t::iterator mcc_l = mccs_.find(mcc->first); if(mcc_l != mccs_.end()) mccs_.erase(mcc_l); } return success; } bool MCCLoader::ReloadElement(Config& cfg) { XMLNode cn = cfg; if(MatchXMLName(cn, "Component")) { std::string id = cn.Attribute("id"); if(id.empty()) return false; // Look for element to replace mcc_container_t::iterator mccp = mccs_.find(id); if(mccp == mccs_.end()) return false; MCC* oldmcc = mccp->second; // Replace element MCC* mcc = make_component(cfg,cn,NULL); if(!mcc) return false; // Replace references to old element // Create 'next' references // Remove old element and all references for(mccp = mccs_exposed_.begin();mccp != mccs_exposed_.end();) { if(mccp->second == oldmcc) { mccs_exposed_.erase(mccp); mccp = mccs_exposed_.begin(); } else ++mccp; } if(oldmcc) delete oldmcc; return true; } else if(MatchXMLName(cn, "Service")) { return false; } else if(MatchXMLName(cn, "Plexer")) { return false; } return false; } MCC* MCCLoader::operator[](const std::string& id) { mcc_container_t::iterator mcc = mccs_exposed_.find(id); if(mcc != mccs_exposed_.end()) return mcc->second; return NULL; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MCC.h0000644000000000000000000000012412135423131021647 xustar000000000000000027 mtime=1366697561.553206 27 atime=1513200574.796705 30 ctime=1513200659.011735399 nordugrid-arc-5.4.2/src/hed/libs/message/MCC.h0000644000175000002070000001276212135423131021724 0ustar00mockbuildmock00000000000000#ifndef __ARC_MCC_H__ #define __ARC_MCC_H__ #include #include #include #include #include #include #include #include #include namespace Arc { /// Interface for communication between MCC, Service and Plexer objects. /** The Interface consists of the method process() which is called by the previous MCC in the chain. For memory management policies please read the description of the Message class. */ class MCCInterface: public Plugin { public: MCCInterface(PluginArgument* arg); /** Method for processing of requests and responses. This method is called by preceeding MCC in chain when a request needs to be processed. This method must call similar method of next MCC in chain unless any failure happens. Result returned by call to next MCC should be processed and passed back to previous MCC. In case of failure this method is expected to generate valid error response and return it back to previous MCC without calling the next one. \param request The request that needs to be processed. \param response A Message object that will contain the response of the request when the method returns. \return An object representing the status of the call. */ virtual MCC_Status process(Message& request, Message& response) = 0; virtual ~MCCInterface(); }; /// Message Chain Component - base class for every MCC plugin. /** This is partialy virtual class which defines interface and common functionality for every MCC plugin needed for managing of component in a chain. */ class MCC : public MCCInterface { protected: /** Set of labeled "next" components. Each implemented MCC must call process() method of corresponding MCCInterface from this set in own process() method. */ std::map next_; /** Mutex to protect access to next_. */ Glib::Mutex next_lock_; /** Returns "next" component associated with provided label. */ MCCInterface *Next(const std::string& label = ""); /** Set of labeled authentication and authorization handlers. MCC calls sequence of handlers at specific point depending on associated identifier. In most cases those are "in" and "out" for incoming and outgoing messages correspondingly. */ std::map > sechandlers_; /** Executes security handlers of specified queue. Returns true if the message is authorized for further processing or if there are no security handlers which implement authorization functionality. This is a convenience method and has to be called by the implemention of the MCC. */ bool ProcessSecHandlers(Message& message, const std::string& label = "") const; /// A logger for MCCs. /** A logger intended to be the parent of loggers in the different MCCs. */ static Logger logger; public: /** Example contructor - MCC takes at least it's configuration subtree */ MCC(Config *, PluginArgument* arg); virtual ~MCC(); /** Add reference to next MCC in chain. This method is called by Loader for every potentially labeled link to next component which implements MCCInterface. If next is NULL corresponding link is removed. */ virtual void Next(MCCInterface *next, const std::string& label = ""); /** Add security components/handlers to this MCC. Security handlers are stacked into a few queues with each queue identified by its label. The queue labelled 'incoming' is executed for every 'request' message after the message is processed by the MCC on the service side and before processing on the client side. The queue labelled 'outgoing' is run for response message before it is processed by MCC algorithms on the service side and after processing on the client side. Those labels are just a matter of agreement and some MCCs may implement different queues executed at various message processing steps. */ virtual void AddSecHandler(Config *cfg, ArcSec::SecHandler *sechandler, const std::string& label = ""); /** Removing all links. Useful for destroying chains. */ virtual void Unlink(); /** Dummy Message processing method. Just a placeholder. */ virtual MCC_Status process(Message& /* request */, Message& /* response */) { return MCC_Status(); } }; class MCCConfig : public BaseConfig { public: MCCConfig() : BaseConfig() {} virtual ~MCCConfig() {} virtual XMLNode MakeConfig(XMLNode cfg) const; }; /* class SecHandlerConfig { private: XMLNode cfg_; public: SecHandlerConfig(XMLNode cfg); virtual ~SecHandlerConfig() {} virtual XMLNode MakeConfig(XMLNode cfg) const; }; */ #define MCCPluginKind ("HED:MCC") class ChainContext; class MCCPluginArgument: public PluginArgument { private: Config* config_; ChainContext* context_; public: MCCPluginArgument(Config* config,ChainContext* context):config_(config),context_(context) { }; virtual ~MCCPluginArgument(void) { }; operator Config* (void) { return config_; }; operator ChainContext* (void) { return context_; }; }; } // namespace Arc #endif /* __ARC_MCC_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/PayloadRaw.h0000644000000000000000000000012411205460514023313 xustar000000000000000027 mtime=1242980684.465954 27 atime=1513200574.824705 30 ctime=1513200659.008735362 nordugrid-arc-5.4.2/src/hed/libs/message/PayloadRaw.h0000644000175000002070000000716011205460514023364 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADRAW_H__ #define __ARC_PAYLOADRAW_H__ #include #include "Message.h" namespace Arc { /// Random Access Payload for Message objects /** This class is a virtual interface for managing Message payload with arbitrarily accessible content. Inheriting classes are supposed to implement memory-resident or memory-mapped content made of optionally multiple chunks/buffers. Every buffer has own size and offset. This class is purely virtual. */ class PayloadRawInterface: virtual public MessagePayload { public: // Avoid defining size of int - just use biggest possible typedef signed long long int Size_t; PayloadRawInterface(void) { }; virtual ~PayloadRawInterface(void) { }; /** Returns content of byte at specified position. Specified position 'pos' is treated as global one and goes through all buffers placed one after another. */ virtual char operator[](Size_t pos) const = 0; /** Get pointer to buffer content at global position 'pos'. By default to beginning of main buffer whatever that means. */ virtual char* Content(Size_t pos = -1) = 0; /** Returns logical size of whole structure. */ virtual Size_t Size(void) const = 0; /** Create new buffer at global position 'pos' of size 'size'. */ virtual char* Insert(Size_t pos = 0,Size_t size = 0) = 0; /** Create new buffer at global position 'pos' of size 'size'. Created buffer is filled with content of memory at 's'. If 'size' is negative content at 's' is expected to be null-terminated. */ virtual char* Insert(const char* s,Size_t pos = 0,Size_t size = -1) = 0; /** Returns pointer to num'th buffer */ virtual char* Buffer(unsigned int num) = 0; /** Returns length of num'th buffer */ virtual Size_t BufferSize(unsigned int num) const = 0; /** Returns position of num'th buffer */ virtual Size_t BufferPos(unsigned int num) const = 0; /** Change size of stored information. If size exceeds end of allocated buffer, buffers are not re-allocated, only logical size is extended. Buffers with location behind new size are deallocated. */ virtual bool Truncate(Size_t size) = 0; }; /* Buffer type for PayloadRaw */ typedef struct { char* data; /** pointer to buffer in memory */ int size; /** size of allocated memory */ int length; /** size of used memory - size of buffer */ bool allocated; /** true if memory has to free by destructor */ } PayloadRawBuf; /// Raw byte multi-buffer. /** This is implementation of PayloadRawInterface. Buffers are memory blocks logically placed one after another. */ class PayloadRaw: virtual public PayloadRawInterface { protected: Size_t offset_; Size_t size_; std::vector buf_; /** List of handled buffers. */ public: /** Constructor. Created object contains no buffers. */ PayloadRaw(void):offset_(0),size_(0) { }; /** Destructor. Frees allocated buffers. */ virtual ~PayloadRaw(void); virtual char operator[](Size_t pos) const; virtual char* Content(Size_t pos = -1); virtual Size_t Size(void) const; virtual char* Insert(Size_t pos = 0,Size_t size = 0); virtual char* Insert(const char* s,Size_t pos = 0,Size_t size = -1); virtual char* Buffer(unsigned int num = 0); virtual Size_t BufferSize(unsigned int num = 0) const; virtual Size_t BufferPos(unsigned int num = 0) const; virtual bool Truncate(Size_t size); }; /// Returns pointer to main memory chunk of Message payload. /** If no buffer is present or if payload is not of PayloadRawInterface type NULL is returned. */ const char* ContentFromPayload(const MessagePayload& payload); } // namespace Arc #endif /* __ARC_PAYLOADRAW_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/SOAPEnvelope.h0000644000000000000000000000012312574532370023521 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200574.821705 29 ctime=1513200659.00773535 nordugrid-arc-5.4.2/src/hed/libs/message/SOAPEnvelope.h0000644000175000002070000001640512574532370023575 0ustar00mockbuildmock00000000000000#ifndef __ARC_SOAPENVELOP_H__ #define __ARC_SOAPENVELOP_H__ #include #include namespace Arc { class SOAPEnvelope; /// Interface to SOAP Fault message. /** SOAPFault class provides a convenience interface for accessing elements of SOAP faults. It also tries to expose single interface for both version 1.0 and 1.2 faults. This class is not intended to 'own' any information stored. It's purpose is to manipulate information which is kept under control of XMLNode or SOAPEnvelope classes. If instance does not refer to valid SOAP Fault structure all manipulation methods will have no effect. */ class SOAPFault { friend class SOAPEnvelope; private: bool ver12; /** true if SOAP version is 1.2 */ XMLNode fault; /** Fault element of SOAP */ XMLNode code; /** Code element of SOAP Fault */ XMLNode reason; /** Reason element of SOAP Fault */ XMLNode node; /** Node element of SOAP Fault */ XMLNode role; /** Role element of SOAP Fault */ XMLNode detail; /** Detail element of SOAP Fault */ public: /** Fault codes of SOAP specs */ typedef enum { undefined, unknown, VersionMismatch, MustUnderstand, Sender, /* Client in SOAP 1.0 */ Receiver, /* Server in SOAP 1.0 */ DataEncodingUnknown } SOAPFaultCode; /** Parse Fault elements of SOAP Body or any other XML tree with Fault element */ SOAPFault(XMLNode body); /** Creates Fault element inside @body SOAP Body node with specified @code and @reason. Version of Fault element is picked from SOAP Body version. */ SOAPFault(XMLNode body,SOAPFaultCode code,const char* reason); /** Creates Fault element inside @body SOAP Body node with specified @code and @reason. SOAP version of Fault element must be specified explicitly. */ SOAPFault(XMLNode body,SOAPFaultCode code,const char* reason,bool ver12); /** Returns true if instance refers to SOAP Fault */ operator bool(void) { return (bool)fault; }; /** Returns top level Fault element. This element is not automatically created. */ operator XMLNode(void) { return fault; }; /** Returns Fault Code element */ SOAPFaultCode Code(void); /** Set Fault Code element */ void Code(SOAPFaultCode code); /** Returns Fault Subcode element at various levels (0 is for Code) */ std::string Subcode(int level); /** Set Fault Subcode element at various levels (0 is for Code) to 'subcode' */ void Subcode(int level,const char* subcode); /** Returns content of Fault Reason element at various levels */ std::string Reason(int num = 0); /** Set Fault Reason content at various levels to 'reason' */ void Reason(int num,const char* reason); /** Set Fault Reason element at top level */ void Reason(const char* reason) { Reason(0,reason); }; /** Set Fault Reason element at top level */ void Reason(const std::string &reason) { Reason(0, reason.c_str()); }; /** Returns content of Fault Node element */ std::string Node(void); /** Set content of Fault Node element to 'node' */ void Node(const char* node); /** Returns content of Fault Role element */ std::string Role(void); /** Set content of Fault Role element to 'role' */ void Role(const char* role); /** Access Fault Detail element. If create is set to true this element is created if not present. */ XMLNode Detail(bool create = false); /** Convenience method for creating SOAP Fault message. Returns full SOAP message representing Fault with specified code and reason. */ static SOAPEnvelope* MakeSOAPFault(SOAPFaultCode code,const std::string& reason = ""); }; /// Extends XMLNode class to support structures of SOAP message. /** All XMLNode methods are exposed by inheriting from XMLNode and if used are applied to Body part of SOAP. Direct access to whole SOAP message/Envelope is not provided in order to protect internal variables - although full protection is not possible. */ class SOAPEnvelope: public XMLNode { public: typedef enum { Version_1_1, Version_1_2 } SOAPVersion; /** Create new SOAP message from textual representation of XML document. Created XML structure is owned by this instance. This constructor also sets default namespaces to default prefixes as specified below. */ SOAPEnvelope(const std::string& xml); /** Same as previous */ SOAPEnvelope(const char* xml,int len = -1); /** Create new SOAP message with specified namespaces. Created XML structure is owned by this instance. If argument fault is set to true created message is fault. */ SOAPEnvelope(const NS& ns,bool fault = false); /** Acquire XML document as SOAP message. Created XML structure is NOT owned by this instance. */ SOAPEnvelope(XMLNode root); /** Create a copy of another SOAPEnvelope object. */ SOAPEnvelope(const SOAPEnvelope& soap); ~SOAPEnvelope(void); /** Creates complete copy of SOAP. Do not use New() method of XMLNode for copying whole SOAP - use this one. */ SOAPEnvelope* New(void); /** Swaps internals of two SOAPEnvelope instances. This method is identical to XMLNode::Swap() but also takes into account all internals of SOAPEnvelope class. */ void Swap(SOAPEnvelope& soap); /** Swaps SOAP message and generic XML tree. XMLNode variable gets whole SOAP message and this instance is filled with content of analyzed XMLNode like in case of SOAPEnvelope(XMLNode) constructor. Ownerships are swapped too. */ void Swap(Arc::XMLNode& soap); /** Modify assigned namespaces. Default namespaces and prefixes are soap-enc http://schemas.xmlsoap.org/soap/encoding/ soap-env http://schemas.xmlsoap.org/soap/envelope/ xsi http://www.w3.org/2001/XMLSchema-instance xsd http://www.w3.org/2001/XMLSchema */ void Namespaces(const NS& namespaces); NS Namespaces(void); // Setialize SOAP message into XML document void GetXML(std::string& out_xml_str,bool user_friendly = false) const; /** Get SOAP header as XML node. */ XMLNode Header(void) { return header; }; /** Get SOAP body as XML node. It is not necessary to use this method because instance of this class itself represents SOAP Body. */ XMLNode Body(void) { return body; }; /** Returns true if message is Fault. */ bool IsFault(void) { return fault != NULL; }; /** Get Fault part of message. Returns NULL if message is not Fault. */ SOAPFault* Fault(void) { return fault; }; /** Makes this object a copy of another SOAPEnvelope object. */ SOAPEnvelope& operator=(const SOAPEnvelope& soap); SOAPVersion Version(void) { return ver12?Version_1_2:Version_1_1; }; protected: XMLNode envelope; /** Envelope element of SOAP and owner of XML tree */ XMLNode header; /** Header element of SOAP */ XMLNode body; /** Body element of SOAP */ private: bool ver12; /** Is true if SOAP version 1.2 is used */ SOAPFault* fault; /**Fault element of SOAP, NULL if message is not a fault. */ /** Fill instance variables parent XMLNode class. This method is called from constructors. */ void set(void); /** Decodes SOAP message encoded according to SOAP Encoding rules. Currently only internal href/id links are supported. */ void decode(void); void decode(XMLNode node); XMLNode findid(XMLNode parent,const std::string& id); }; } // namespace Arc #endif /* __ARC_SOAPENVELOP_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/Plexer.cpp0000644000000000000000000000012412135423131023037 xustar000000000000000027 mtime=1366697561.553206 27 atime=1513200574.802705 30 ctime=1513200659.024735558 nordugrid-arc-5.4.2/src/hed/libs/message/Plexer.cpp0000644000175000002070000000431612135423131023110 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // Plexer.cpp #include "Plexer.h" namespace Arc { PlexerEntry::PlexerEntry(const RegularExpression& label, MCCInterface* mcc) : label(label), mcc(mcc) { } Plexer::Plexer(Config *cfg, PluginArgument* arg) : MCC(cfg, arg) { } Plexer::~Plexer(){ } void Plexer::Next(MCCInterface* next, const std::string& label){ std::list::iterator iter; if (next!=0) { RegularExpression regex(label); if (regex.isOk()) { mccs.push_front(PlexerEntry(regex,next)); } else { logger.msg(WARNING, "Bad label: \"%s\"", label); } } else { for (iter=mccs.begin(); iter!=mccs.end();) { if (iter->label.hasPattern(label)) { iter = mccs.erase(iter); } else { ++iter; } } } } MCC_Status Plexer::process(Message& request, Message& response){ std::string ep = request.Attributes()->get("ENDPOINT"); std::string path = getPath(ep); logger.msg(VERBOSE, "Operation on path \"%s\"",path); std::list::iterator iter; for (iter=mccs.begin(); iter!=mccs.end(); ++iter) { std::list unmatched, matched; if (iter->label.match(path, unmatched, matched)) { request.Attributes()->set("PLEXER:PATTERN",iter->label.getPattern()); request.Attributes()->set("PLEXER:EXTENSION", ""); if(unmatched.size() > 0) { request.Attributes()->set("PLEXER:EXTENSION",*(--unmatched.end())); }; return iter->mcc->process(request, response); } } logger.msg(WARNING, "No next MCC or Service at path \"%s\"",path); return MCC_Status(UNKNOWN_SERVICE_ERROR, (std::string)("MCC Plexer"), path); } //XXX: workaround because the python bindig segmentation fault Logger Plexer::logger(Logger::rootLogger,"Plexer"); std::string Plexer::getPath(std::string url){ // TODO: Need even more reliable URL detection std::string::size_type ds, ps; ds=url.find("://"); if (ds==std::string::npos) ps=url.find("/"); else ps=url.find("/", ds+3); if (ps==std::string::npos) return ""; else return url.substr(ps); } } nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MCC.cpp0000644000000000000000000000012412135423131022202 xustar000000000000000027 mtime=1366697561.553206 27 atime=1513200574.819705 30 ctime=1513200659.023735546 nordugrid-arc-5.4.2/src/hed/libs/message/MCC.cpp0000644000175000002070000001070112135423131022246 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include namespace Arc { Logger MCC::logger(Logger::getRootLogger(), "MCC"); MCCInterface::MCCInterface(PluginArgument* arg):Plugin(arg) { } MCCInterface::~MCCInterface() { } MCC::~MCC() { } MCC::MCC(Config *, PluginArgument* arg):MCCInterface(arg) { } void MCC::Next(MCCInterface *next, const std::string& label) { Glib::Mutex::Lock lock(next_lock_); if (next == NULL) next_.erase(label); else next_[label] = next; } MCCInterface *MCC::Next(const std::string& label) { Glib::Mutex::Lock lock(next_lock_); std::map::iterator n = next_.find(label); if (n == next_.end()) return NULL; return n->second; } void MCC::Unlink() { Glib::Mutex::Lock lock(next_lock_); next_.clear(); } void MCC::AddSecHandler(Config *cfg, ArcSec::SecHandler *sechandler, const std::string& label) { if (sechandler) { sechandlers_[label].push_back(sechandler); // need polishing to put the SecHandlerFactory->getinstance here XMLNode cn = (*cfg)["SecHandler"]; Config cfg_(cn); } } bool MCC::ProcessSecHandlers(Message& message, const std::string& label) const { // Each MCC/Service can define security handler queues in the configuration // file, the queues have labels specified in handlers configuration 'event' // attribute. // Security handlers in one queue are called sequentially. // Each one should be configured carefully, because there can be some // relationship between them (e.g. authentication should be put in front // of authorization). // The SecHandler::Handle() only returns true/false with true meaning that // handler processed message successfuly. If SecHandler implements // authorization functionality, it returns false if message is disallowed // and true otherwise. // If any SecHandler in the handler chain produces some information which // will be used by some following handler, the information should be // stored in the attributes of message (e.g. the Identity extracted from // authentication will be used by authorization to make access control // decision). std::map >::const_iterator q = sechandlers_.find(label); if (q == sechandlers_.end()) { logger.msg(DEBUG, "No security processing/check requested for '%s'", label); return true; } for (std::list::const_iterator h = q->second.begin(); h != q->second.end(); ++h) { const ArcSec::SecHandler *handler = *h; if (!handler) continue; // Shouldn't happen. Just a sanity check. if (!(handler->Handle(&message))) { logger.msg(INFO, "Security processing/check failed"); return false; } } logger.msg(DEBUG, "Security processing/check passed"); return true; } XMLNode MCCConfig::MakeConfig(XMLNode cfg) const { XMLNode mm = BaseConfig::MakeConfig(cfg); std::list mccs; for (std::list::const_iterator path = plugin_paths.begin(); path != plugin_paths.end(); path++) { try { Glib::Dir dir(*path); for (Glib::DirIterator file = dir.begin(); file != dir.end(); file++) { if ((*file).substr(0, 6) == "libmcc") { std::string name = (*file).substr(6, (*file).find('.') - 6); if (std::find(mccs.begin(), mccs.end(), name) == mccs.end()) { mccs.push_back(name); cfg.NewChild("Plugins").NewChild("Name") = "mcc" + name; } } //Since the security handler could also be used by mcc like // tls and soap, putting the libarcshc here. Here we suppose // all of the sec handlers are put in libarcshc // TODO: Rewrite it to behave in generic way. //if ((*file).substr(0, 9) == "libarcshc") { // cfg.NewChild("Plugins").NewChild("Name") = "arcshc"; //} } } catch (Glib::FileError&) {} } return mm; } /* SecHandlerConfig::SecHandlerConfig(XMLNode cfg) { cfg.New(cfg_); NS ns("cfg","http://www.nordugrid.org/schemas/ArcConfig/2007"); cfg_.Namespaces(ns); cfg_.Name("cfg:SecHandler"); } XMLNode SecHandlerConfig::MakeConfig(XMLNode cfg) const { cfg.NewChild(cfg_); return cfg; } */ } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/SecAttr.cpp0000644000000000000000000000012413103157440023150 xustar000000000000000027 mtime=1494015776.888883 27 atime=1513200574.821705 30 ctime=1513200659.029735619 nordugrid-arc-5.4.2/src/hed/libs/message/SecAttr.cpp0000644000175000002070000000533113103157440023217 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "SecAttr.h" namespace Arc { SecAttrFormat SecAttr::UNDEFINED; SecAttrFormat SecAttr::ARCAuth("arc"); SecAttrFormat SecAttr::XACML("xacml"); SecAttrFormat SecAttr::SAML("saml"); SecAttrFormat SecAttr::GACL("gacl"); bool SecAttr::equal(const SecAttr&) const { return false; } SecAttr::operator bool() const { return false; } bool SecAttr::Export(SecAttrFormat format,std::string &val) const { NS ns; XMLNode x(ns, ""); if(!Export(format,x)) return false; x.GetXML(val); return true; } bool SecAttr::Export(SecAttrFormat, XMLNode&) const { return false; } bool SecAttr::Import(SecAttrFormat format,const std::string &val) { XMLNode x(val); if(!x) return false; return Import(format,x); } bool SecAttr::Import(SecAttrFormat, XMLNode) { return false; } std::string SecAttr::get(const std::string&) const { return std::string(); } std::list SecAttr::getAll(const std::string& id) const { std::list items; std::string item = get(id); if(!item.empty()) items.push_back(item); return items; } MultiSecAttr::~MultiSecAttr() { for(std::list::iterator attr = attrs_.begin(); attr != attrs_.end(); ++attr) { delete *attr; } } MultiSecAttr::operator bool() const { return !attrs_.empty(); } bool MultiSecAttr::Export(SecAttrFormat format,XMLNode &val) const { // Name of created node to be replaced by inheriting class if(!val) { NS ns; XMLNode(ns,"MultiSecAttr").New(val); } else { val.Name("MultiSecAttr"); }; for(std::list::const_iterator a = attrs_.begin(); a!=attrs_.end();++a) { NS ns; XMLNode x(ns,""); if(!((*a)->Export(format,x))) return false; val.NewChild(x); } return true; } bool MultiSecAttr::Import(SecAttrFormat format,XMLNode val) { XMLNode x = val.Child(0); for(;(bool)x;x=x[1]) { if(!Add(format,x)) return false; } return true; } // This method to be implemented in inheriting classes // or there must be an automatic detection of registered // object types implemented. bool MultiSecAttr::Add(SecAttrFormat, XMLNode&) { return false; } // This implemention assumes odrered lists of attributes bool MultiSecAttr::equal(const SecAttr &b) const { try { const MultiSecAttr& b_ = dynamic_cast(b); std::list::const_iterator i_a = attrs_.begin(); std::list::const_iterator i_b = b_.attrs_.begin(); for(;;) { if((i_a == attrs_.end()) && (i_b == b_.attrs_.end())) break; if(i_a == attrs_.end()) return false; if(i_b == b_.attrs_.end()) return false; if((**i_a) != (**i_b)) return false; } return true; } catch(std::exception&) { }; return false; } } nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/PayloadRaw.cpp0000644000000000000000000000012412071010044023635 xustar000000000000000027 mtime=1357123620.186841 27 atime=1513200574.833705 30 ctime=1513200659.020735509 nordugrid-arc-5.4.2/src/hed/libs/message/PayloadRaw.cpp0000644000175000002070000001263412071010044023710 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "PayloadRaw.h" namespace Arc { PayloadRaw::~PayloadRaw(void) { for(std::vector::iterator b = buf_.begin();b!=buf_.end();++b) { if(b->allocated) free(b->data); }; } static bool BufferAtPos(const std::vector& buf_,PayloadRawInterface::Size_t pos,unsigned int& bufnum,PayloadRawInterface::Size_t& bufpos) { if(pos == -1) pos=0; if(pos < 0) return false; PayloadRawInterface::Size_t cpos = 0; for(bufnum = 0;bufnumpos) { bufpos=pos-(cpos-buf_[bufnum].length); return true; }; }; return false; } static bool BufferAtPos(std::vector& buf_,PayloadRawInterface::Size_t pos,std::vector::iterator& bufref,PayloadRawInterface::Size_t& bufpos) { if(pos == -1) pos=0; if(pos < 0) return false; PayloadRawInterface::Size_t cpos = 0; for(bufref = buf_.begin();bufref!=buf_.end();++bufref) { cpos+=bufref->length; if(cpos>pos) { bufpos=pos-(cpos-bufref->length); return true; }; }; return false; } char* PayloadRaw::Content(Size_t pos) { unsigned int bufnum; Size_t bufpos; if(!BufferAtPos(buf_,pos-offset_,bufnum,bufpos)) { failure_ = MCC_Status(GENERIC_ERROR,"RAW","Position is out of range"); return NULL; }; return buf_[bufnum].data+bufpos; } char PayloadRaw::operator[](Size_t pos) const { unsigned int bufnum; Size_t bufpos; if(!BufferAtPos(buf_,pos-offset_,bufnum,bufpos)) { // Avoid setting failure because behavior is well defined //failure_ = MCC_Status(GENERIC_ERROR,"RAW","Position is out of range"); return 0; }; return buf_[bufnum].data[bufpos]; } PayloadRaw::Size_t PayloadRaw::Size(void) const { return size_; //Size_t cpos = 0; //for(unsigned int bufnum = 0;bufnum::iterator bufref; Size_t bufpos; if(!BufferAtPos(buf_,pos-offset_,bufref,bufpos)) { bufref=buf_.end(); bufpos=0; if(buf_.size() == 0) { offset_=pos; } else { pos = 0; for(unsigned int bufnum = 0;bufnumlength - bufpos; buf.data=(char*)malloc(buf.size+1); if(!buf.data) return NULL; buf.data[buf.size]=0; memcpy(buf.data,bufref->data+bufpos,buf.size); buf.length=buf.size; buf.allocated=true; bufref->length=bufpos; bufref->data[bufref->length]=0; if(bufref->allocated) { char* b = (char*)realloc(bufref->data,bufref->length+1); if(b) { bufref->size=bufref->length; bufref->data=b; }; }; ++bufref; bufref=buf_.insert(bufref,buf); }; // Inserting between buffers buf.data=(char*)malloc(size+1); if(!buf.data) return NULL; buf.data[size]=0; buf.size=size; buf.length=size; buf.allocated=true; buf_.insert(bufref,buf); if((pos+size) > size_) size_=pos+size; return buf.data; } char* PayloadRaw::Insert(const char* s,Size_t pos,Size_t size) { if(size < 0) size=strlen(s); char* s_ = Insert(pos,size); if(s_) { memcpy(s_,s,size); } else { failure_ = MCC_Status(GENERIC_ERROR,"RAW","Failed to allocate new buffer"); }; return s_; } char* PayloadRaw::Buffer(unsigned int num) { if(num>=buf_.size()) { // Avoid setting failure because behavior is well defined //failure_ = MCC_Status(GENERIC_ERROR,"RAW","Index is out of range"); return NULL; }; return buf_[num].data; } PayloadRaw::Size_t PayloadRaw::BufferSize(unsigned int num) const { if(num>=buf_.size()) { // Avoid setting failure because behavior is well defined //failure_ = MCC_Status(GENERIC_ERROR,"RAW","Index is out of range"); return 0; }; return buf_[num].length; } PayloadRaw::Size_t PayloadRaw::BufferPos(unsigned int num) const { Size_t pos = offset_; std::vector::const_iterator b = buf_.begin(); for(;b!=buf_.end();++b) { if(!num) break; pos+=(b->length); }; return pos; } bool PayloadRaw::Truncate(Size_t size) { if(size_ == size) return true; // Buffer is already of right size if(size_ < size) { // Buffer needs to be extended size_=size; return true; }; if(size <= offset_) { // All buffers must be released offset_=size; for(std::vector::iterator b = buf_.begin();b!=buf_.end();) { if(b->allocated) free(b->data); b=buf_.erase(b); }; size_=size; return true; }; Size_t l = offset_; for(unsigned int bufnum = 0;bufnum::iterator b; Size_t p; if(!BufferAtPos(buf_,size-offset_,b,p)) return false; if(p != 0) { b->length=p; ++b; }; for(;b!=buf_.end();) { if(b->allocated) free(b->data); b=buf_.erase(b); }; size_=size; return true; } const char* ContentFromPayload(const MessagePayload& payload) { try { const PayloadRawInterface& buffer = dynamic_cast(payload); return ((PayloadRawInterface&)buffer).Content(); } catch(std::exception& e) { }; return ""; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MessageAttributes.cpp0000644000000000000000000000012310706224670025243 xustar000000000000000027 mtime=1192831416.781004 27 atime=1513200574.831705 29 ctime=1513200659.02573557 nordugrid-arc-5.4.2/src/hed/libs/message/MessageAttributes.cpp0000644000175000002070000000435710706224670025322 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // MessageAttributes.h #include "MessageAttributes.h" namespace Arc { AttributeIterator::AttributeIterator(){ } AttributeIterator::AttributeIterator(AttrConstIter begin, AttrConstIter end) : current_(begin), end_(end) { } const std::string& AttributeIterator::operator*() const { return current_->second; } const std::string* AttributeIterator::operator->() const { return &(current_->second); } const std::string& AttributeIterator::key(void) const { return current_->first; } const AttributeIterator& AttributeIterator::operator++() { ++current_; return *this; } AttributeIterator AttributeIterator::operator++(int) { AttrConstIter recent=current_++; return AttributeIterator(recent,end_); } bool AttributeIterator::hasMore() const { return current_!=end_; } MessageAttributes::MessageAttributes() { } void MessageAttributes::set(const std::string& key, const std::string& value) { removeAll(key); add(key, value); } void MessageAttributes::add(const std::string& key, const std::string& value) { attributes_.insert(make_pair(key, value)); } void MessageAttributes::removeAll(const std::string& key) { attributes_.erase(key); } void MessageAttributes::remove(const std::string& key, const std::string& value) { AttrIter begin = attributes_.lower_bound(key); AttrIter end = attributes_.upper_bound(key); for (AttrIter i=begin; i!=end; i++) if (i->second==value) attributes_.erase(i); } int MessageAttributes::count(const std::string& key) const { return attributes_.count(key); } const std::string& MessageAttributes::get(const std::string& key) const { static std::string emptyString=""; if (count(key)==1) return attributes_.find(key)->second; else return emptyString; // Throw an exception? } AttributeIterator MessageAttributes::getAll(const std::string& key) const { return AttributeIterator(attributes_.lower_bound(key), attributes_.upper_bound(key)); } AttributeIterator MessageAttributes::getAll(void) const { return AttributeIterator(attributes_.begin(), attributes_.end()); } } nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/Service.h0000644000000000000000000000012411730411253022647 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200574.822705 30 ctime=1513200659.012735411 nordugrid-arc-5.4.2/src/hed/libs/message/Service.h0000644000175000002070000000774711730411253022733 0ustar00mockbuildmock00000000000000#ifndef __ARC_SERVICE_H__ #define __ARC_SERVICE_H__ #include #include #include #include #include namespace Arc { /// Service - last component in a Message Chain. /** This class which defines interface and common functionality for every Service plugin. Interface is made of method process() which is called by Plexer or MCC class. There is one Service object created for every service description processed by Loader class objects. Classes derived from Service class must implement process() method of MCCInterface. It is up to developer how internal state of service is stored and communicated to other services and external utilities. Service is free to expect any type of payload passed to it and generate any payload as well. Useful types depend on MCCs in chain which leads to that service. For example if service is expected to by linked to SOAP MCC it must accept and generate messages with PayloadSOAP payload. Method process() of class derived from Service class may be called concurrently in multiple threads. Developers must take that into account and write thread-safe implementation. Simple example of service is provided in /src/tests/echo/echo.cpp of source tree. The way to write client counterpart of corresponding service is undefined yet. For example see /src/tests/echo/test.cpp . */ class Service: public MCCInterface { protected: /** Set of labelled authentication and authorization handlers. MCC calls sequence of handlers at specific point depending on associated identifier. in most aces those are "in" and "out" for incoming and outgoing messages correspondingly. */ std::map > sechandlers_; /** Logger object used to print messages generated by this class. */ static Logger logger; /** Is service valid? Services which are not valid should set this * to false in their constructor. */ bool valid; /** Executes security handlers of specified queue. For more information please see description of MCC::ProcessSecHandlers */ bool ProcessSecHandlers(Message& message,const std::string& label = "") const; public: /** Example constructor - Server takes at least its configuration subtree */ Service(Config*, PluginArgument* arg); virtual ~Service(void) { }; /** Add security components/handlers to this MCC. For more information please see description of MCC::AddSecHandler */ virtual void AddSecHandler(Config *cfg,ArcSec::SecHandler* sechandler,const std::string& label = ""); /** Service specific registration collector, used for generate service registrations. In implemented service this method should generate GLUE2 document with part of service description which service wishes to advertise to Information Services. */ virtual bool RegistrationCollector(XMLNode &doc); /** Service may implement own service identifier gathering method. This method return identifier of service which is used for registering it Information Services. */ virtual std::string getID() { return ""; }; /** Returns true if the Service is valid. */ operator bool() const { return valid; }; /** Returns true if the Service is not valid. */ bool operator!() const { return !valid; }; }; #define ServicePluginKind ("HED:SERVICE") class ServicePluginArgument: public PluginArgument { private: Config* config_; ChainContext* context_; public: ServicePluginArgument(Config* config,ChainContext* context):config_(config),context_(context) { }; virtual ~ServicePluginArgument(void) { }; operator Config* (void) { return config_; }; operator ChainContext* (void) { return context_; }; }; } // namespace Arc #endif /* __ARC_SERVICE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/README0000644000000000000000000000012411001653037021755 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200574.826705 30 ctime=1513200659.006735338 nordugrid-arc-5.4.2/src/hed/libs/message/README0000644000175000002070000000004611001653037022022 0ustar00mockbuildmock00000000000000collection to common Message classes. nordugrid-arc-5.4.2/src/hed/libs/message/PaxHeaders.7502/MCC_Status.cpp0000644000000000000000000000012413111074340023544 xustar000000000000000027 mtime=1495562464.413611 27 atime=1513200574.793705 30 ctime=1513200659.022735533 nordugrid-arc-5.4.2/src/hed/libs/message/MCC_Status.cpp0000644000175000002070000000266213111074340023617 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif // MCC_Status.cpp #include #include namespace Arc { std::string string(StatusKind kind){ if (kind==STATUS_UNDEFINED) return "STATUS_UNDEFINED"; else if (kind==STATUS_OK) return "STATUS_OK"; else if(kind==GENERIC_ERROR) return "GENERIC_ERROR"; else if(kind==PARSING_ERROR) return "PARSING_ERROR"; else if(kind==PROTOCOL_RECOGNIZED_ERROR) return "PROTOCOL_RECOGNIZED_ERROR"; else if(kind==UNKNOWN_SERVICE_ERROR) return "UNKNOWN_SERVICE_ERROR"; else if(kind==BUSY_ERROR) return "BUSY_ERROR"; else if(kind==SESSION_CLOSE) return "SESSION_CLOSE"; else // There should be no other alternative! return tostring((unsigned int)kind); } MCC_Status::MCC_Status(StatusKind kind, const std::string& origin, const std::string& explanation): kind(kind), origin(origin), explanation(explanation) { } bool MCC_Status::isOk() const{ return kind==STATUS_OK; } StatusKind MCC_Status::getKind() const{ return kind; } const std::string& MCC_Status::getOrigin() const{ return origin; } const std::string& MCC_Status::getExplanation() const{ return explanation; } MCC_Status::operator std::string() const{ return origin + ": " + string(kind) + " (" + explanation + ")"; } } nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/delegation0000644000000000000000000000013213214316023021505 xustar000000000000000030 mtime=1513200659.668743434 30 atime=1513200668.721854157 30 ctime=1513200659.668743434 nordugrid-arc-5.4.2/src/hed/libs/delegation/0000755000175000002070000000000013214316023021630 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/delegation/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023630 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200597.630984688 30 ctime=1513200659.664743385 nordugrid-arc-5.4.2/src/hed/libs/delegation/Makefile.am0000644000175000002070000000156112052416515023675 0ustar00mockbuildmock00000000000000DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) noinst_LTLIBRARIES = libarcdelegation.la #noinst_PROGRAMS = test libarcdelegation_ladir = $(pkgincludedir)/delegation libarcdelegation_la_HEADERS = DelegationInterface.h libarcdelegation_la_SOURCES = DelegationInterface.cpp libarcdelegation_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcdelegation_la_LIBADD = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/ws-addressing/libwsaddressing.la \ $(OPENSSL_LIBS) #test_SOURCES = test.cpp #test_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_LDADD = libarcdelegation.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(GLIBMM_LIBS) $(EXTRA_LIBS) $(OPENSSL_LIBS) nordugrid-arc-5.4.2/src/hed/libs/delegation/PaxHeaders.7502/DelegationInterface.cpp0000644000000000000000000000012313153454775026205 xustar000000000000000027 mtime=1504598525.713781 27 atime=1513200574.650703 29 ctime=1513200659.66674341 nordugrid-arc-5.4.2/src/hed/libs/delegation/DelegationInterface.cpp0000644000175000002070000021315613153454775026263 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "DelegationInterface.h" namespace Arc { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) #define X509_getm_notAfter X509_get_notAfter #define X509_getm_notBefore X509_get_notBefore #define X509_set1_notAfter X509_set_notAfter #define X509_set1_notBefore X509_set_notBefore #endif #define DELEGATION_NAMESPACE "http://www.nordugrid.org/schemas/delegation" #define GDS10_NAMESPACE "http://www.gridsite.org/ns/delegation.wsdl" //#define GDS20_NAMESPACE "http://www.gridsite.org/namespaces/delegation-2" #define EMIES_NAMESPACE "http://www.eu-emi.eu/es/2010/12/delegation/types" #define EMIES_TYPES_NAMESPACE "http://www.eu-emi.eu/es/2010/12/types" //#define EMIDS_NAMESPACE "http://www.gridsite.org/namespaces/delegation-21" // GDS 2.1 was made on wire compatible with GDS 2.0 - so they use same namespace #define EMIDS_NAMESPACE "http://www.gridsite.org/namespaces/delegation-2" #define GLOBUS_LIMITED_PROXY_OID "1.3.6.1.4.1.3536.1.1.1.9" //#define SERIAL_RAND_BITS 64 #define SERIAL_RAND_BITS 31 static int rand_serial(ASN1_INTEGER *ai) { int ret = 0; BIGNUM *btmp = BN_new(); if(!btmp) goto error; if(!BN_pseudo_rand(btmp, SERIAL_RAND_BITS, 0, 0)) goto error; if(ai && !BN_to_ASN1_INTEGER(btmp, ai)) goto error; ret = 1; error: if(btmp) BN_free(btmp); return ret; } static bool x509_to_string(X509* cert,std::string& str) { BIO *out = BIO_new(BIO_s_mem()); if(!out) return false; if(!PEM_write_bio_X509(out,cert)) { BIO_free_all(out); return false; }; for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; str.append(s,l);; }; BIO_free_all(out); return true; } /* static bool x509_to_string(EVP_PKEY* key,std::string& str) { BIO *out = BIO_new(BIO_s_mem()); if(!out) return false; EVP_CIPHER *enc = NULL; if(!PEM_write_bio_PrivateKey(out,key,enc,NULL,0,NULL,NULL)) { BIO_free_all(out); return false; }; for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; str.append(s,l);; }; BIO_free_all(out); return true; } */ static bool x509_to_string(RSA* key,std::string& str) { BIO *out = BIO_new(BIO_s_mem()); if(!out) return false; EVP_CIPHER *enc = NULL; if(!PEM_write_bio_RSAPrivateKey(out,key,enc,NULL,0,NULL,NULL)) { BIO_free_all(out); return false; }; for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; str.append(s,l);; }; BIO_free_all(out); return true; } static int passphrase_callback(char* buf, int size, int, void *arg) { std::istream* in = (std::istream*)arg; if(in == &std::cin) std::cout<<"Enter passphrase for your private key: "; buf[0]=0; in->getline(buf,size); //if(!(*in)) { // if(in == &std::cin) std::cerr<< "Failed to read passphrase from stdin"<type == V_ASN1_UTCTIME) return Time(std::string("20")+((char*)(s->data))); if(s->type == V_ASN1_GENERALIZEDTIME) return Time(std::string((char*)(s->data))); } return Time(Time::UNDEFINED); } static bool X509_add_ext_by_nid(X509 *cert,int nid,char *value,int pos) { X509_EXTENSION* ext = X509V3_EXT_conf_nid(NULL, NULL, nid, value); if(!ext) return false; X509_add_ext(cert,ext,pos); X509_EXTENSION_free(ext); return true; } static std::string::size_type find_line(const std::string& val, const char* token, std::string::size_type p = std::string::npos) { std::string::size_type l = ::strlen(token); if(p == std::string::npos) { p = val.find(token); } else { p = val.find(token,p); }; if(p == std::string::npos) return p; if((p > 0) && (val[p-1] != '\r') && (val[p-1] != '\n')) return std::string::npos; if(((p+l) < val.length()) && (val[p+l] != '\r') && (val[p+l] != '\n')) return std::string::npos; return p; } static bool strip_PEM(std::string& val, const char* ts, const char* te) { std::string::size_type ps = find_line(val,ts); if(ps == std::string::npos) return false; ps += ::strlen(ts); ps = val.find_first_not_of("\r\n",ps); if(ps == std::string::npos) return false; std::string::size_type pe = find_line(val,te,ps); if(pe == std::string::npos) return false; if(pe == 0) return false; pe = val.find_last_not_of("\r\n",pe-1); if(pe == std::string::npos) return false; if(pe < ps) return false; val = val.substr(ps,pe-ps+1); return true; } static void wrap_PEM(std::string& val, const char* ts, const char* te) { val = std::string(ts)+"\n"+trim(val,"\r\n")+"\n"+te; } static bool strip_PEM_request(std::string& val) { const char* ts = "-----BEGIN CERTIFICATE REQUEST-----"; const char* te = "-----END CERTIFICATE REQUEST-----"; return strip_PEM(val, ts, te); } static bool strip_PEM_cert(std::string& val) { const char* ts = "-----BEGIN CERTIFICATE-----"; const char* te = "-----END CERTIFICATE-----"; return strip_PEM(val, ts, te); } static void wrap_PEM_request(std::string& val) { const char* ts = "-----BEGIN CERTIFICATE REQUEST-----"; const char* te = "-----END CERTIFICATE REQUEST-----"; wrap_PEM(val, ts, te); } static void wrap_PEM_cert(std::string& val) { const char* ts = "-----BEGIN CERTIFICATE-----"; const char* te = "-----END CERTIFICATE-----"; wrap_PEM(val, ts, te); } class cred_info_t { public: Arc::Time valid_from; Arc::Time valid_till; std::string identity; std::string ca; unsigned int deleg_depth; unsigned int strength; }; static bool get_cred_info(const std::string& str,cred_info_t& info) { // It shold use Credential class. But so far keeping dependencies simple. bool r = false; X509* cert = NULL; STACK_OF(X509)* cert_sk = NULL; if(string_to_x509(str,cert,cert_sk) && cert && cert_sk) { info.valid_from=Time(Time::UNDEFINED); info.valid_till=Time(Time::UNDEFINED); info.deleg_depth=0; info.strength=0; X509* c = cert; for(int idx = 0;;++idx) { char* buf = X509_NAME_oneline(X509_get_issuer_name(c),NULL,0); if(buf) { info.ca = buf; OPENSSL_free(buf); buf = NULL; } else { info.ca = ""; } buf = X509_NAME_oneline(X509_get_subject_name(c),NULL,0); if(buf) { info.identity=buf; OPENSSL_free(buf); buf = NULL; } else { info.identity=""; } Time from = asn1_to_time(X509_getm_notBefore(c)); Time till = asn1_to_time(X509_getm_notAfter(c)); if(from != Time(Time::UNDEFINED)) { if((info.valid_from == Time(Time::UNDEFINED)) || (from > info.valid_from)) { info.valid_from = from; }; }; if(till != Time(Time::UNDEFINED)) { if((info.valid_till == Time(Time::UNDEFINED)) || (till < info.valid_till)) { info.valid_till = till; }; }; if(X509_get_ext_by_NID(cert,NID_proxyCertInfo,-1) < 0) break; if(idx >= sk_X509_num(cert_sk)) break; c = sk_X509_value(cert_sk,idx); }; r = true; }; if(cert) X509_free(cert); if(cert_sk) { for(int i = 0;iproxyPolicy && pci->proxyPolicy->policyLanguage) { int const bufSize = 255; char* buf = new char[bufSize+1]; int l = OBJ_obj2txt(buf,bufSize,pci->proxyPolicy->policyLanguage,1); if(l > 0) { if(l > bufSize) l=bufSize; buf[l] = 0; if(strcmp(GLOBUS_LIMITED_PROXY_OID,buf) == 0) { // Gross hack for globus. If Globus marks own proxy as limited // it expects every derived proxy to be limited or at least // independent. Independent proxies has little sense in Grid // world. So here we make our proxy globus-limited to allow // it to be used with globus code. obj=OBJ_txt2obj(GLOBUS_LIMITED_PROXY_OID,1); }; }; delete[] buf; }; PROXY_CERT_INFO_EXTENSION_free(pci); }; if(!obj) { obj=OBJ_nid2obj(NID_id_ppl_inheritAll); // Unrestricted proxy }; if(!obj) goto err; proxy_policy.policyLanguage=obj; }; if(X509_add1_ext_i2d(cert,NID_proxyCertInfo,&proxy_info,1,X509V3_ADD_REPLACE) != 1) goto err; if(policy_string) ASN1_OCTET_STRING_free(policy_string); policy_string=NULL; ASN1_OBJECT_free(obj); obj=NULL; /* PROXY_CERT_INFO_EXTENSION *pci = X509_get_ext_d2i(x, NID_proxyCertInfo, NULL, NULL); typedef struct PROXY_CERT_INFO_EXTENSION_st { ASN1_INTEGER *pcPathLengthConstraint; PROXY_POLICY *proxyPolicy; } PROXY_CERT_INFO_EXTENSION; typedef struct PROXY_POLICY_st { ASN1_OBJECT *policyLanguage; ASN1_OCTET_STRING *policy; } PROXY_POLICY; */ subject=X509_get_subject_name((X509*)cert_); if(!subject) goto err; subject=X509_NAME_dup(subject); if(!subject) goto err; if(!X509_set_issuer_name(cert,subject)) goto err; if(!X509_NAME_add_entry_by_NID(subject,NID_commonName,MBSTRING_ASC,(unsigned char*)(proxy_cn.c_str()),proxy_cn.length(),-1,0)) goto err; if(!X509_set_subject_name(cert,subject)) goto err; X509_NAME_free(subject); subject=NULL; if(!(restrictions_["validityStart"].empty())) { validity_start=Time(restrictions_["validityStart"]).GetTime(); validity_start_adjustment = 0; }; if(!(restrictions_["validityEnd"].empty())) { validity_end=Time(restrictions_["validityEnd"]).GetTime(); } else if(!(restrictions_["validityPeriod"].empty())) { validity_end=validity_start+Period(restrictions_["validityPeriod"]).GetPeriod(); }; validity_start -= validity_start_adjustment; //Set "notBefore" if( X509_cmp_time(X509_getm_notBefore((X509*)cert_), &validity_start) < 0) { X509_time_adj(X509_getm_notBefore(cert), 0L, &validity_start); } else { X509_set1_notBefore(cert, X509_getm_notBefore((X509*)cert_)); } //Set "not After" if(validity_end == (time_t)(-1)) { X509_set1_notAfter(cert,X509_getm_notAfter((X509*)cert_)); } else { X509_gmtime_adj(X509_getm_notAfter(cert), (validity_end-validity_start)); }; X509_set_pubkey(cert,pkey); EVP_PKEY_free(pkey); pkey=NULL; if(!X509_sign(cert,(EVP_PKEY*)key_,digest)) goto err; /* { int pci_NID = NID_undef; ASN1_OBJECT * extension_oid = NULL; int nid; PROXY_CERT_INFO_EXTENSION* proxy_cert_info; X509_EXTENSION * extension; pci_NID = OBJ_sn2nid(SN_proxyCertInfo); for(i=0;icount("SOAP:ACTION") > 0)) { header.Action(attributes_in->get("SOAP:ACTION")); header.To(attributes_in->get("SOAP:ENDPOINT")); } req.Attributes(attributes_in); req.Context(context); req.Payload(in); resp.Attributes(attributes_out); resp.Context(context); MCC_Status r = interface.process(req,resp); if(r != STATUS_OK) return NULL; if(!resp.Payload()) return NULL; PayloadSOAP* resp_soap = NULL; try { resp_soap=dynamic_cast(resp.Payload()); } catch(std::exception& e) { }; if(!resp_soap) { delete resp.Payload(); return NULL; }; resp.Payload(NULL); return resp_soap; } bool DelegationProviderSOAP::DelegateCredentialsInit(MCCInterface& interface,MessageAttributes* attributes_in,MessageAttributes* attributes_out,MessageContext* context,DelegationProviderSOAP::ServiceType stype) { if(stype == DelegationProviderSOAP::ARCDelegation) { NS ns; ns["deleg"]=DELEGATION_NAMESPACE; PayloadSOAP req_soap(ns); req_soap.NewChild("deleg:DelegateCredentialsInit"); PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; XMLNode token = (*resp_soap)["DelegateCredentialsInitResponse"]["TokenRequest"]; if(!token) { delete resp_soap; return false; }; if(((std::string)(token.Attribute("Format"))) != "x509") { delete resp_soap; return false; }; id_=(std::string)(token["Id"]); request_=(std::string)(token["Value"]); delete resp_soap; if(id_.empty() || request_.empty()) return false; return true; } else if((stype == DelegationProviderSOAP::GDS10) || (stype == DelegationProviderSOAP::GDS10RENEW)) { // Not implemented yet due to problems with id /* } else if((stype == DelegationProviderSOAP::GDS20) || (stype == DelegationProviderSOAP::GDS20RENEW)) { NS ns; ns["deleg"]=GDS20_NAMESPACE; PayloadSOAP req_soap(ns); req_soap.NewChild("deleg:getNewProxyReq"); PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; XMLNode token = (*resp_soap)["getNewProxyReqResponse"]["NewProxyReq"]; if(!token) { delete resp_soap; return false; }; id_=(std::string)(token["delegationID"]); request_=(std::string)(token["proxyRequest"]); delete resp_soap; if(id_.empty() || request_.empty()) return false; return true; */ } else if((stype == DelegationProviderSOAP::GDS20) || (stype == DelegationProviderSOAP::GDS20RENEW) || (stype == DelegationProviderSOAP::EMIDS) || (stype == DelegationProviderSOAP::EMIDSRENEW)) { NS ns; ns["deleg"]=EMIDS_NAMESPACE; PayloadSOAP req_soap(ns); if((!id_.empty()) && ((stype == DelegationProviderSOAP::GDS20RENEW) || (stype == DelegationProviderSOAP::EMIDSRENEW))) { req_soap.NewChild("deleg:renewProxyReq").NewChild("deleg:delegationID") = id_; PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; XMLNode token = (*resp_soap)["renewProxyReqResponse"]; if(!token) { delete resp_soap; return false; }; request_=(std::string)(token["renewProxyReqReturn"]); delete resp_soap; } else { req_soap.NewChild("deleg:getNewProxyReq"); PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; XMLNode token = (*resp_soap)["getNewProxyReqResponse"]; if(!token) { delete resp_soap; return false; }; id_=(std::string)(token["delegationID"]); request_=(std::string)(token["proxyRequest"]); delete resp_soap; } if(id_.empty() || request_.empty()) return false; return true; } else if((stype == DelegationProviderSOAP::EMIES)) { NS ns; ns["deleg"]=EMIES_NAMESPACE; ns["estypes"]=EMIES_TYPES_NAMESPACE; PayloadSOAP req_soap(ns); XMLNode op = req_soap.NewChild("deleg:InitDelegation"); op.NewChild("deleg:CredentialType") = "RFC3820"; //op.NewChild("deleg:RenewalID") = ""; PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; XMLNode token = (*resp_soap)["InitDelegationResponse"]; if(!token) { delete resp_soap; return false; }; id_=(std::string)(token["DelegationID"]); request_=(std::string)(token["CSR"]); delete resp_soap; if(id_.empty() || request_.empty()) return false; //wrap_PEM_request(request_); return true; }; return false; } bool DelegationProviderSOAP::UpdateCredentials(MCCInterface& interface,MessageContext* context,const DelegationRestrictions& /* restrictions */,DelegationProviderSOAP::ServiceType stype) { MessageAttributes attributes_in; MessageAttributes attributes_out; return UpdateCredentials(interface,&attributes_in,&attributes_out,context,DelegationRestrictions(),stype); } bool DelegationProviderSOAP::UpdateCredentials(MCCInterface& interface,MessageAttributes* attributes_in,MessageAttributes* attributes_out,MessageContext* context,const DelegationRestrictions& restrictions,DelegationProviderSOAP::ServiceType stype) { if(id_.empty()) return false; if(request_.empty()) return false; if(stype == DelegationProviderSOAP::ARCDelegation) { std::string delegation = Delegate(request_,restrictions); if(delegation.empty()) return false; NS ns; ns["deleg"]=DELEGATION_NAMESPACE; PayloadSOAP req_soap(ns); XMLNode token = req_soap.NewChild("deleg:UpdateCredentials").NewChild("deleg:DelegatedToken"); token.NewAttribute("deleg:Format")="x509"; token.NewChild("deleg:Id")=id_; token.NewChild("deleg:Value")=delegation; PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; if(!(*resp_soap)["UpdateCredentialsResponse"]) { delete resp_soap; return false; }; delete resp_soap; return true; } else if((stype == DelegationProviderSOAP::GDS10) || (stype == DelegationProviderSOAP::GDS10RENEW)) { // No implemented yet due to problems with id /* } else if((stype == DelegationProviderSOAP::GDS20) || (stype == DelegationProviderSOAP::GDS20RENEW)) { std::string delegation = Delegate(request_,restrictions); if(delegation.empty()) return false; NS ns; ns["deleg"]=GDS20_NAMESPACE; PayloadSOAP req_soap(ns); XMLNode token = req_soap.NewChild("deleg:putProxy"); token.NewChild("deleg:delegationID")=id_; token.NewChild("deleg:proxy")=delegation; PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; if(!(*resp_soap)["putProxyResponse"]) { delete resp_soap; return false; }; delete resp_soap; return true; */ } else if((stype == DelegationProviderSOAP::GDS20) || (stype == DelegationProviderSOAP::GDS20RENEW) || (stype == DelegationProviderSOAP::EMIDS) || (stype == DelegationProviderSOAP::EMIDSRENEW)) { std::string delegation = Delegate(request_,restrictions); //strip_PEM_cert(delegation); if(delegation.empty()) return false; NS ns; ns["deleg"]=EMIDS_NAMESPACE; PayloadSOAP req_soap(ns); XMLNode token = req_soap.NewChild("deleg:putProxy"); token.NewChild("delegationID")=id_; // unqualified token.NewChild("proxy")=delegation; // unqualified PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; if(resp_soap->Size() > 0) { delete resp_soap; return false; }; delete resp_soap; return true; } else if((stype == DelegationProviderSOAP::EMIES)) { std::string delegation = Delegate(request_,restrictions); //if((!strip_PEM_cert(delegation)) || (delegation.empty())) return false; if(delegation.empty()) return false; NS ns; ns["deleg"]=EMIES_NAMESPACE; ns["estypes"]=EMIES_TYPES_NAMESPACE; PayloadSOAP req_soap(ns); XMLNode token = req_soap.NewChild("deleg:PutDelegation"); //token.NewChild("deleg:CredentialType")="RFC3820"; token.NewChild("deleg:DelegationId")=id_; token.NewChild("deleg:Credential")=delegation; PayloadSOAP* resp_soap = do_process(interface,attributes_in,attributes_out,context,&req_soap); if(!resp_soap) return false; if((std::string)((*resp_soap)["PutDelegationResponse"]) != "SUCCESS") { delete resp_soap; return false; }; delete resp_soap; return true; }; return false; } bool DelegationProviderSOAP::DelegatedToken(XMLNode parent) { if(id_.empty()) return false; if(request_.empty()) return false; std::string delegation = Delegate(request_); if(delegation.empty()) return false; NS ns; ns["deleg"]=DELEGATION_NAMESPACE; parent.Namespaces(ns); XMLNode token = parent.NewChild("deleg:DelegatedToken"); token.NewAttribute("deleg:Format")="x509"; token.NewChild("deleg:Id")=id_; token.NewChild("deleg:Value")=delegation; return true; } // --------------------------------------------------------------------------------- class DelegationContainerSOAP::Consumer { public: DelegationConsumerSOAP* deleg; unsigned int usage_count; unsigned int acquired; bool to_remove; time_t last_used; std::string client_id; DelegationContainerSOAP::ConsumerIterator previous; DelegationContainerSOAP::ConsumerIterator next; Consumer(void): deleg(NULL),usage_count(0),acquired(0),to_remove(false),last_used(time(NULL)) { }; Consumer(DelegationConsumerSOAP* d): deleg(d),usage_count(0),acquired(0),to_remove(false),last_used(time(NULL)) { }; Consumer& operator=(DelegationConsumerSOAP* d) { deleg=d; usage_count=0; acquired=0; to_remove=false; last_used=time(NULL); return *this; }; }; #define DELEGFAULT(out) { \ for(XMLNode old = out.Child();(bool)old;old = out.Child()) old.Destroy(); \ SOAPFault((out),SOAPFault::Receiver,failure_.c_str()); \ } #define GDS10FAULT(out) { \ for(XMLNode old = out.Child();(bool)old;old = out.Child()) old.Destroy(); \ SOAPFault((out),SOAPFault::Receiver,failure_.c_str()); \ } #define GDS20FAULT(out) { \ for(XMLNode old = out.Child();(bool)old;old = out.Child()) old.Destroy(); \ XMLNode r = SOAPFault((out),SOAPFault::Receiver,"").Detail(true); \ XMLNode ex = r.NewChild("DelegationException"); \ ex.Namespaces(ns); ex.NewChild("msg") = (failure_); \ } #define EMIDSFAULT(out) { \ for(XMLNode old = out.Child();(bool)old;old = out.Child()) old.Destroy(); \ XMLNode r = SOAPFault((out),SOAPFault::Receiver,"").Detail(true); \ XMLNode ex = r.NewChild("deleg:DelegationException",ns); \ ex.NewChild("msg") = (failure_); \ } // InternalServiceDelegationFault #define EMIESFAULT(out,msg) { \ for(XMLNode old = out.Child();(bool)old;old = out.Child()) old.Destroy(); \ XMLNode r = SOAPFault((out),SOAPFault::Receiver,"").Detail(true); \ XMLNode ex = r.NewChild("InternalServiceDelegationFault"); \ ex.Namespaces(ns); \ ex.NewChild("estypes:Message") = (msg); \ ex.NewChild("estypes:Timestamp") = Time().str(ISOTime); \ /*ex.NewChild("estypes:Description") = "";*/ \ /*ex.NewChild("estypes:FailureCode") = "0";*/ \ } // UnknownDelegationIDFault #define EMIESIDFAULT(out,msg) { \ XMLNode r = SOAPFault((out),SOAPFault::Receiver,"").Detail(true); \ XMLNode ex = r.NewChild("UnknownDelegationIDFault"); \ ex.Namespaces(ns); \ ex.NewChild("Message") = (msg); \ ex.NewChild("Timestamp") = Time().str(ISOTime); \ /*ex.NewChild("Description") = "";*/ \ /*ex.NewChild("FailureCode") = "0";*/ \ } // InternalBaseFault // AccessControlFault DelegationContainerSOAP::DelegationContainerSOAP(void) { max_size_=0; // unlimited size of container max_duration_=30; // 30 seconds for delegation max_usage_=2; // allow 1 failure context_lock_=false; consumers_first_=consumers_.end(); consumers_last_=consumers_.end(); } DelegationContainerSOAP::~DelegationContainerSOAP(void) { lock_.lock(); ConsumerIterator i = consumers_.begin(); for(;i!=consumers_.end();++i) { if(i->second->deleg) delete i->second->deleg; delete i->second; }; lock_.unlock(); } DelegationConsumerSOAP* DelegationContainerSOAP::AddConsumer(std::string& id,const std::string& client) { lock_.lock(); if(id.empty()) { for(int tries = 0;tries<1000;++tries) { GUID(id); ConsumerIterator i = consumers_.find(id); if(i == consumers_.end()) break; id.resize(0); }; if(id.empty()) { failure_ = "Failed to generate unique identifier"; lock_.unlock(); return NULL; }; } else { ConsumerIterator i = consumers_.find(id); if(i != consumers_.end()) { failure_ = "Requested identifier already in use"; lock_.unlock(); return NULL; }; }; Consumer* c = new Consumer(); c->deleg=new DelegationConsumerSOAP(); c->client_id=client; c->previous=consumers_.end(); c->next=consumers_first_; ConsumerIterator i = consumers_.insert(consumers_.begin(),make_pair(id,c)); if(consumers_first_ != consumers_.end()) consumers_first_->second->previous=i; consumers_first_=i; if(consumers_last_ == consumers_.end()) consumers_last_=i; i->second->acquired = 1; DelegationConsumerSOAP* cs = i->second->deleg; lock_.unlock(); return cs; } bool DelegationContainerSOAP::TouchConsumer(DelegationConsumerSOAP* c,const std::string& /*credentials*/) { Glib::Mutex::Lock lock(lock_); ConsumerIterator i = find(c); if(i == consumers_.end()) { failure_ = "Delegation not found"; return false; }; i->second->last_used=time(NULL); if(((++(i->second->usage_count)) > max_usage_) && (max_usage_ > 0)) { i->second->to_remove=true; } else { i->second->to_remove=false; }; if(i == consumers_first_) return true; ConsumerIterator previous = i->second->previous; ConsumerIterator next = i->second->next; if(previous != consumers_.end()) previous->second->next=next; if(next != consumers_.end()) next->second->previous=previous; i->second->previous=consumers_.end(); i->second->next=consumers_first_; if(consumers_first_ != consumers_.end()) consumers_first_->second->previous=i; consumers_first_=i; return true; } bool DelegationContainerSOAP::QueryConsumer(DelegationConsumerSOAP* c,std::string& credentials) { Glib::Mutex::Lock lock(lock_); ConsumerIterator i = find(c); if(i == consumers_.end()) { failure_ = "Delegation not found"; return false; }; if(i->second->deleg) i->second->deleg->Backup(credentials); // only key is available return true; } void DelegationContainerSOAP::ReleaseConsumer(DelegationConsumerSOAP* c) { lock_.lock(); ConsumerIterator i = find(c); if(i == consumers_.end()) { lock_.unlock(); return; }; if(i->second->acquired > 0) --(i->second->acquired); remove(i); lock_.unlock(); return; } void DelegationContainerSOAP::RemoveConsumer(DelegationConsumerSOAP* c) { lock_.lock(); ConsumerIterator i = find(c); if(i == consumers_.end()) { lock_.unlock(); return; }; if(i->second->acquired > 0) --(i->second->acquired); i->second->to_remove=true; remove(i); lock_.unlock(); return; } DelegationContainerSOAP::ConsumerIterator DelegationContainerSOAP::find(DelegationConsumerSOAP* c) { ConsumerIterator i = consumers_first_; for(;i!=consumers_.end();i=i->second->next) { if(i->second->deleg == c) break; }; return i; } bool DelegationContainerSOAP::remove(ConsumerIterator i) { if(i->second->acquired > 0) return false; if(!i->second->to_remove) return false; ConsumerIterator previous = i->second->previous; ConsumerIterator next = i->second->next; if(previous != consumers_.end()) previous->second->next=next; if(next != consumers_.end()) next->second->previous=previous; if(consumers_first_ == i) consumers_first_=next; if(consumers_last_ == i) consumers_last_=previous; if(i->second->deleg) delete i->second->deleg; delete i->second; consumers_.erase(i); return true; } void DelegationContainerSOAP::CheckConsumers(void) { if(max_size_ > 0) { lock_.lock(); ConsumerIterator i = consumers_last_; unsigned int count = consumers_.size(); while(count > max_size_) { if(i == consumers_.end()) break; ConsumerIterator prev = i->second->previous; i->second->to_remove=true; remove(i); i=prev; --count; }; lock_.unlock(); }; if(max_duration_ > 0) { lock_.lock(); time_t t = time(NULL); for(ConsumerIterator i = consumers_last_;i!=consumers_.end();) { ConsumerIterator next = i->second->next; if(((unsigned int)(t - i->second->last_used)) > max_duration_) { i->second->to_remove=true; remove(i); i=next; } else { break; }; }; lock_.unlock(); }; return; } bool DelegationContainerSOAP::DelegateCredentialsInit(const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client) { std::string id; DelegationConsumerSOAP* consumer = AddConsumer(id,client); if(!consumer) { DELEGFAULT(out); return true; }; if(!(consumer->DelegateCredentialsInit(id,in,out))) { RemoveConsumer(consumer); failure_ = "Failed to generate credentials request"; DELEGFAULT(out); return true; }; ReleaseConsumer(consumer); CheckConsumers(); return true; } bool DelegationContainerSOAP::UpdateCredentials(std::string& credentials,const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client) { std::string identity; return UpdateCredentials(credentials,identity,in,out,client); } #define ClientAuthorized(consumer,client) \ ( ((consumer)->client_id.empty()) || ((consumer)->client_id == (client)) ) DelegationConsumerSOAP* DelegationContainerSOAP::FindConsumer(const std::string& id,const std::string& client) { Glib::Mutex::Lock lock(lock_); ConsumerIterator i = consumers_.find(id); if(i == consumers_.end()) { failure_ = "Identifier not found"; return NULL; }; if(!(i->second->deleg)) { failure_ = "Identifier has no delegation associated"; return NULL; }; if(!ClientAuthorized(i->second,client)) { failure_ = "Client not authorized for this identifier"; return NULL; }; ++(i->second->acquired); DelegationConsumerSOAP* cs = i->second->deleg; return cs; } bool DelegationContainerSOAP::UpdateCredentials(std::string& credentials,std::string& identity, const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client) { std::string id = (std::string)(const_cast(in)["UpdateCredentials"]["DelegatedToken"]["Id"]); if(id.empty()) { failure_ = "Credentials identifier is missing"; DELEGFAULT(out); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { DELEGFAULT(out); return true; }; if(!c->UpdateCredentials(credentials,identity,in,out)) { ReleaseConsumer(c); failure_ = "Failed to acquire credentials"; DELEGFAULT(out); return true; }; if(!TouchConsumer(c,credentials)) { ReleaseConsumer(c); DELEGFAULT(out); return true; }; ReleaseConsumer(c); return true; } bool DelegationContainerSOAP::DelegatedToken(std::string& credentials,XMLNode token,const std::string& client) { std::string identity; return DelegatedToken(credentials,identity,token,client); } bool DelegationContainerSOAP::DelegatedToken(std::string& credentials,std::string& identity,XMLNode token,const std::string& client) { std::string id = (std::string)(token["Id"]); if(id.empty()) return false; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) return false; bool r = c->DelegatedToken(credentials,identity,token); if(!TouchConsumer(c,credentials)) r = false; ReleaseConsumer(c); return r; } bool DelegationContainerSOAP::Process(const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client) { std::string credentials; return Process(credentials,in,out,client); } bool DelegationContainerSOAP::Process(std::string& credentials,const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client) { credentials.resize(0); XMLNode op = ((SOAPEnvelope&)in).Child(0); if(!op) return false; std::string op_ns = op.Namespace(); std::string op_name = op.Name(); if(op_ns == DELEGATION_NAMESPACE) { // ARC Delegation if(op_name == "DelegateCredentialsInit") { return DelegateCredentialsInit(in,out,client); } else if(op_name == "UpdateCredentials") { return UpdateCredentials(credentials,in,out,client); }; } else if(op_ns == GDS10_NAMESPACE) { // Original GDS NS ns("",GDS10_NAMESPACE); if(op_name == "getProxyReq") { Arc::XMLNode r = out.NewChild("getProxyReqResponse",ns); std::string id = op["delegationID"]; if(id.empty()) { failure_ = "No identifier specified"; GDS10FAULT(out); return true; }; // check if new id or id belongs to this client bool found = true; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { found=false; if(!(c = AddConsumer(id,client))) { GDS10FAULT(out); return true; }; }; std::string x509_request; c->Request(x509_request); if(x509_request.empty()) { if(found) ReleaseConsumer(c); else RemoveConsumer(c); failure_ = "Failed to generate request"; GDS10FAULT(out); return true; }; ReleaseConsumer(c); r.NewChild("request") = x509_request; return true; } else if(op_name == "putProxy") { Arc::XMLNode r = out.NewChild("putProxyResponse",ns); std::string id = op["delegationID"]; std::string cred = op["proxy"]; if(id.empty()) { failure_ = "Identifier is missing"; GDS10FAULT(out); return true; }; if(cred.empty()) { failure_ = "proxy is missing"; GDS10FAULT(out); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { GDS10FAULT(out); return true; }; if(!c->Acquire(cred)) { ReleaseConsumer(c); failure_ = "Failed to acquire credentials"; GDS10FAULT(out); return true; }; if(!TouchConsumer(c,cred)) { ReleaseConsumer(c); GDS10FAULT(out); return true; }; ReleaseConsumer(c); credentials = cred; return true; }; /* } else if(op_ns == GDS20_NAMESPACE) { // Glite GDS NS ns("",GDS20_NAMESPACE); if(op_name == "getVersion") { Arc::XMLNode r = out.NewChild("getVersionResponse",ns); r.NewChild("getVersionReturn")="0"; return true; } else if(op_name == "getInterfaceVersion") { Arc::XMLNode r = out.NewChild("getInterfaceVersionResponse",ns); r.NewChild("getInterfaceVersionReturn")="2"; return true; } else if(op_name == "getServiceMetadata") { //Arc::XMLNode r = out.NewChild("getServiceMetadataResponse",ns); GDS20FAULT(out,"Service has no metadata"); return true; } else if(op_name == "getProxyReq") { Arc::XMLNode r = out.NewChild("getProxyReqResponse",ns); std::string id = op["delegationID"]; if(id.empty()) { GDS20FAULT(out,"No identifier specified"); return true; }; // check if new id or id belongs to this client bool found = true; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { found = false; if(!(c = AddConsumer(id,client))) { GDS20FAULT(out,"Wrong identifier"); // ? return true; }; }; std::string x509_request; c->Request(x509_request); if(x509_request.empty()) { if(found) ReleaseConsumer(c); else RemoveConsumer(c); GDS20FAULT(out,"Failed to generate request"); return true; }; ReleaseConsumer(c); r.NewChild("getProxyReqReturn") = x509_request; return true; } else if(op_name == "getNewProxyReq") { Arc::XMLNode r = out.NewChild("getNewProxyReqResponse",ns); std::string id; DelegationConsumerSOAP* c = AddConsumer(id,client); if(!c) { GDS20FAULT(out,"Failed to generate identifier"); return true; }; std::string x509_request; c->Request(x509_request); if(x509_request.empty()) { RemoveConsumer(c); GDS20FAULT(out,"Failed to generate request"); return true; }; ReleaseConsumer(c); CheckConsumers(); Arc::XMLNode ret = r.NewChild("NewProxyReq"); ret.NewChild("proxyRequest") = x509_request; ret.NewChild("delegationID") = id; return true; } else if(op_name == "putProxy") { Arc::XMLNode r = out.NewChild("putProxyResponse",ns); std::string id = op["delegationID"]; std::string cred = op["proxy"]; if(id.empty()) { GDS20FAULT(out,"Identifier is missing"); return true; }; if(cred.empty()) { GDS20FAULT(out,"proxy is missing"); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { GDS20FAULT(out,"Failed to find identifier"); return true; }; if(!c->Acquire(cred)) { ReleaseConsumer(c); GDS20FAULT(out,"Failed to acquire credentials"); return true; }; if(!TouchConsumer(c,cred)) { ReleaseConsumer(c); GDS20FAULT(out,"Failed to process credentials"); return true; }; ReleaseConsumer(c); credentials = cred; return true; } else if(op_name == "renewProxyReq") { Arc::XMLNode r = out.NewChild("renewProxyReqResponse",ns); std::string id = op["delegationID"]; if(id.empty()) { GDS20FAULT(out,"No identifier specified"); return true; }; // check if new id or id belongs to this client bool found = true; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { found=false; // Probably it is wrong to create new delegation if // client explicitly requests to renew. //if(!(c = AddConsumer(id,client))) { GDS20FAULT(out,"Wrong identifier"); // ? return true; //}; }; std::string x509_request; c->Request(x509_request); if(x509_request.empty()) { if(found) ReleaseConsumer(c); else RemoveConsumer(c); GDS20FAULT(out,"Failed to generate request"); return true; }; ReleaseConsumer(c); r.NewChild("renewProxyReqReturn") = x509_request; return true; } else if(op_name == "getTerminationTime") { Arc::XMLNode r = out.NewChild("getTerminationTimeResponse",ns); std::string id = op["delegationID"]; if(id.empty()) { GDS20FAULT(out,"No identifier specified"); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { GDS20FAULT(out,"Wrong identifier"); return true; }; std::string credentials; if((!QueryConsumer(c,credentials)) || credentials.empty()) { ReleaseConsumer(c); GDS20FAULT(out,"Delegated credentials missing"); return true; }; ReleaseConsumer(c); cred_info_t info; if(!get_cred_info(credentials,info)) { GDS20FAULT(out,"Delegated credentials missing"); return true; }; if(info.valid_till == Time(Time::UNDEFINED)) info.valid_till = Time(); r.NewChild("getTerminationTimeReturn") = info.valid_till.str(ISOTime); return true; } else if(op_name == "destroy") { Arc::XMLNode r = out.NewChild("destroyResponse",ns); std::string id = op["delegationID"]; if(id.empty()) { GDS20FAULT(out,"No identifier specified"); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(c) RemoveConsumer(c); return true; }; */ } else if(op_ns == EMIDS_NAMESPACE) { // EMI GDS == gLite GDS NS ns("deleg",EMIDS_NAMESPACE); if(op_name == "getVersion") { // getVersion // // getVersionResponse // getVersionReturn // DelegationException Arc::XMLNode r = out.NewChild("deleg:getVersionResponse",ns); r.NewChild("getVersionReturn")="0"; return true; } else if(op_name == "getInterfaceVersion") { // getInterfaceVersion // // getInterfaceVersionResponse // getInterfaceVersionReturn // DelegationException Arc::XMLNode r = out.NewChild("deleg:getInterfaceVersionResponse",ns); r.NewChild("getInterfaceVersionReturn")="2.1"; return true; } else if(op_name == "getServiceMetadata") { // getServiceMetadata // key // // getServiceMetadataResponse // getServiceMetadataReturn // DelegationException //Arc::XMLNode r = out.NewChild("getServiceMetadataResponse"); //r.Namespaces(ns); failure_ = "Service has no metadata"; EMIDSFAULT(out); return true; } else if(op_name == "getProxyReq") { // getProxyReq // delegationID // // getProxyReqResponse // getProxyReqReturn // DelegationException Arc::XMLNode r = out.NewChild("deleg:getProxyReqResponse",ns); std::string id = op["delegationID"]; // check if new id or id belongs to this client bool found = true; DelegationConsumerSOAP* c = id.empty()?NULL:FindConsumer(id,client); if(!c) { found = false; if(!(c = AddConsumer(id,client))) { EMIDSFAULT(out); return true; }; }; std::string x509_request; c->Request(x509_request); if(x509_request.empty()) { if(found) ReleaseConsumer(c); else RemoveConsumer(c); failure_ = "Failed to generate request"; EMIDSFAULT(out); return true; }; ReleaseConsumer(c); r.NewChild("getProxyReqReturn") = x509_request; return true; } else if(op_name == "getNewProxyReq") { // getNewProxyReq // // getNewProxyReqResponse // proxyRequest // delegationID // DelegationException Arc::XMLNode r = out.NewChild("deleg:getNewProxyReqResponse",ns); std::string id; DelegationConsumerSOAP* c = AddConsumer(id,client); if(!c) { EMIDSFAULT(out); return true; }; std::string x509_request; c->Request(x509_request); if(x509_request.empty()) { RemoveConsumer(c); failure_ = "Failed to generate request"; EMIDSFAULT(out); return true; }; ReleaseConsumer(c); CheckConsumers(); r.NewChild("proxyRequest") = x509_request; r.NewChild("delegationID") = id; return true; } else if(op_name == "putProxy") { // putProxy // delegationID // proxy // // DelegationException std::string id = op["delegationID"]; std::string cred = op["proxy"]; if(id.empty()) { failure_ = "Identifier is missing"; EMIDSFAULT(out); return true; }; if(cred.empty()) { failure_ = "proxy is missing"; EMIDSFAULT(out); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { EMIDSFAULT(out); return true; }; if(!c->Acquire(cred)) { ReleaseConsumer(c); failure_ = "Failed to acquire credentials"; EMIDSFAULT(out); return true; }; if(!TouchConsumer(c,cred)) { ReleaseConsumer(c); EMIDSFAULT(out); return true; }; ReleaseConsumer(c); credentials = cred; return true; } else if(op_name == "renewProxyReq") { // renewProxyReq // delegationID // // renewProxyReqResponse // renewProxyReqReturn // DelegationException Arc::XMLNode r = out.NewChild("deleg:renewProxyReqResponse",ns); std::string id = op["delegationID"]; if(id.empty()) { failure_ = "No identifier specified"; EMIDSFAULT(out); return true; }; // check if new id or id belongs to this client bool found = true; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { found=false; if(!(c = AddConsumer(id,client))) { EMIDSFAULT(out); return true; }; }; std::string x509_request; c->Request(x509_request); if(x509_request.empty()) { if(found) ReleaseConsumer(c); else RemoveConsumer(c); failure_ = "Failed to generate request"; EMIDSFAULT(out); return true; }; ReleaseConsumer(c); r.NewChild("renewProxyReqReturn") = x509_request; return true; } else if(op_name == "getTerminationTime") { // getTerminationTime // delegationID // // getTerminationTimeResponse // getTerminationTimeReturn (dateTime) // DelegationException Arc::XMLNode r = out.NewChild("deleg:getTerminationTimeResponse",ns); std::string id = op["delegationID"]; if(id.empty()) { failure_ = "No identifier specified"; EMIDSFAULT(out); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { EMIDSFAULT(out); return true; }; std::string credentials; if((!QueryConsumer(c,credentials)) || credentials.empty()) { ReleaseConsumer(c); if(failure_.empty()) failure_ = "Delegated credentials missing"; EMIDSFAULT(out); return true; }; ReleaseConsumer(c); cred_info_t info; if(!get_cred_info(credentials,info)) { failure_ = "Delegated credentials missing"; EMIDSFAULT(out); return true; }; if(info.valid_till == Time(Time::UNDEFINED)) info.valid_till = Time(); r.NewChild("getTerminationTimeReturn") = info.valid_till.str(ISOTime); return true; } else if(op_name == "destroy") { // destroy // delegationID // // DelegationException Arc::XMLNode r = out.NewChild("deleg:destroyResponse",ns); std::string id = op["delegationID"]; if(id.empty()) { failure_ = "No identifier specified"; EMIDSFAULT(out); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(c) RemoveConsumer(c); return true; }; } else if(op_ns == EMIES_NAMESPACE) { // EMI Execution Service own delegation interface NS ns("",EMIES_NAMESPACE); ns["estypes"] = EMIES_TYPES_NAMESPACE; if(op_name == "InitDelegation") { // InitDelegation // CredentialType [RFC3820] // RenewalID 0- // InitDelegationLifetime 0- // InitDelegationResponse // DelegationID // CSR // InternalServiceDelegationFault // AccessControlFault // InternalBaseFault // Need UnknownDelegationIDFault for reporting bad RenewalID Arc::XMLNode r = out.NewChild("InitDelegationResponse",ns); if((std::string)op["CredentialType"] != "RFC3820") { EMIESFAULT(out,"Unsupported credential type requested"); return true; } unsigned long long int lifetime = 0; if((bool)op["InitDelegationLifetime"]) { if(!stringto((std::string)op["InitDelegationLifetime"],lifetime)) { EMIESFAULT(out,"Unsupported credential lifetime requested"); return true; } } std::string id = op["RenewalID"]; DelegationConsumerSOAP* c = NULL; bool renew = false; if(!id.empty()) { c = FindConsumer(id,client); if(!c) { EMIESFAULT(out,"Failed to find identifier"); return true; }; renew = true; } else { c = AddConsumer(id,client); if(!c) { EMIESFAULT(out,"Failed to generate identifier"); return true; }; }; std::string x509_request; // TODO: use lifetime c->Request(x509_request); //if((!strip_PEM_request(x509_request)) || (x509_request.empty())) { if(x509_request.empty()) { if(renew) ReleaseConsumer(c); else RemoveConsumer(c); EMIESFAULT(out,"Failed to generate request"); return true; }; ReleaseConsumer(c); CheckConsumers(); r.NewChild("DelegationID") = id; r.NewChild("CSR") = x509_request; return true; } else if(op_name == "PutDelegation") { // PutDelegation // DelegationID // Credential // PutDelegationResponse [SUCCESS] // UnknownDelegationIDFault // AccessControlFault // InternalBaseFault Arc::XMLNode r = out.NewChild("PutDelegationResponse",ns); std::string id = op["DelegationId"]; std::string cred = op["Credential"]; if(id.empty()) { EMIESFAULT(out,"Identifier is missing"); return true; }; if(cred.empty()) { EMIESFAULT(out,"Delegated credential is missing"); return true; }; //wrap_PEM_cert(cred); DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { EMIESIDFAULT(out,"Failed to find identifier"); return true; }; if(!c->Acquire(cred)) { ReleaseConsumer(c); EMIESFAULT(out,"Failed to acquire credentials"); return true; }; if(!TouchConsumer(c,cred)) { ReleaseConsumer(c); EMIESFAULT(out,"Failed to process credentials"); return true; }; ReleaseConsumer(c); credentials = cred; r = "SUCCESS"; return true; } else if(op_name == "GetDelegationInfo") { // GetDelegationInfo // DelegationID // GetDelegationInfoResponse // Lifetime // Issuer 0- // Subject 0- // UnknownDelegationIDFault // AccessControlFault // InternalBaseFault Arc::XMLNode r = out.NewChild("GetDelegationInfoResponse",ns); std::string id = op["DelegationID"]; if(id.empty()) { EMIESFAULT(out,"Identifier is missing"); return true; }; DelegationConsumerSOAP* c = FindConsumer(id,client); if(!c) { EMIESIDFAULT(out,"Wrong identifier"); return true; }; std::string credentials; if((!QueryConsumer(c,credentials)) || credentials.empty()) { ReleaseConsumer(c); EMIESFAULT(out,"Delegated credentials missing"); return true; }; ReleaseConsumer(c); cred_info_t info; if(!get_cred_info(credentials,info)) { EMIESFAULT(out,"Delegated credentials missing"); return true; }; if(info.valid_till == Time(Time::UNDEFINED)) info.valid_till = Time(); r.NewChild("Lifetime") = info.valid_till.str(ISOTime); r.NewChild("Issuer") = info.ca; r.NewChild("Subject") = info.identity; return true; }; }; return false; } bool DelegationContainerSOAP::MatchNamespace(const SOAPEnvelope& in) { XMLNode op = ((SOAPEnvelope&)in).Child(0); if(!op) return false; std::string op_ns = op.Namespace(); return ((op_ns == DELEGATION_NAMESPACE) || (op_ns == GDS10_NAMESPACE) || /*(op_ns == GDS20_NAMESPACE) ||*/ (op_ns == EMIDS_NAMESPACE) || (op_ns == EMIES_NAMESPACE)); } std::string DelegationContainerSOAP::GetFailure(void) { return failure_; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/delegation/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315725023637 xustar000000000000000030 mtime=1513200597.676985251 30 atime=1513200648.395605559 30 ctime=1513200659.665743398 nordugrid-arc-5.4.2/src/hed/libs/delegation/Makefile.in0000644000175000002070000007473513214315725023725 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/libs/delegation DIST_COMMON = README $(libarcdelegation_la_HEADERS) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libarcdelegation_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/ws-addressing/libwsaddressing.la \ $(am__DEPENDENCIES_1) am_libarcdelegation_la_OBJECTS = \ libarcdelegation_la-DelegationInterface.lo libarcdelegation_la_OBJECTS = $(am_libarcdelegation_la_OBJECTS) libarcdelegation_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcdelegation_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcdelegation_la_SOURCES) DIST_SOURCES = $(libarcdelegation_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libarcdelegation_ladir)" HEADERS = $(libarcdelegation_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) noinst_LTLIBRARIES = libarcdelegation.la #noinst_PROGRAMS = test libarcdelegation_ladir = $(pkgincludedir)/delegation libarcdelegation_la_HEADERS = DelegationInterface.h libarcdelegation_la_SOURCES = DelegationInterface.cpp libarcdelegation_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcdelegation_la_LIBADD = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/ws-addressing/libwsaddressing.la \ $(OPENSSL_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/delegation/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/delegation/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcdelegation.la: $(libarcdelegation_la_OBJECTS) $(libarcdelegation_la_DEPENDENCIES) $(libarcdelegation_la_LINK) $(libarcdelegation_la_OBJECTS) $(libarcdelegation_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdelegation_la-DelegationInterface.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcdelegation_la-DelegationInterface.lo: DelegationInterface.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdelegation_la-DelegationInterface.lo -MD -MP -MF $(DEPDIR)/libarcdelegation_la-DelegationInterface.Tpo -c -o libarcdelegation_la-DelegationInterface.lo `test -f 'DelegationInterface.cpp' || echo '$(srcdir)/'`DelegationInterface.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcdelegation_la-DelegationInterface.Tpo $(DEPDIR)/libarcdelegation_la-DelegationInterface.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationInterface.cpp' object='libarcdelegation_la-DelegationInterface.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdelegation_la-DelegationInterface.lo `test -f 'DelegationInterface.cpp' || echo '$(srcdir)/'`DelegationInterface.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcdelegation_laHEADERS: $(libarcdelegation_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarcdelegation_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarcdelegation_ladir)" @list='$(libarcdelegation_la_HEADERS)'; test -n "$(libarcdelegation_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcdelegation_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcdelegation_ladir)" || exit $$?; \ done uninstall-libarcdelegation_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcdelegation_la_HEADERS)'; test -n "$(libarcdelegation_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarcdelegation_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarcdelegation_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libarcdelegation_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcdelegation_laHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libarcdelegation_laHEADERS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libarcdelegation_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \ uninstall-libarcdelegation_laHEADERS #test_SOURCES = test.cpp #test_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #test_LDADD = libarcdelegation.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(GLIBMM_LIBS) $(EXTRA_LIBS) $(OPENSSL_LIBS) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/delegation/PaxHeaders.7502/test0000644000000000000000000000013213214316023022464 xustar000000000000000030 mtime=1513200659.698743801 30 atime=1513200668.721854157 30 ctime=1513200659.698743801 nordugrid-arc-5.4.2/src/hed/libs/delegation/test/0000755000175000002070000000000013214316023022607 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/delegation/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024607 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200597.692985447 30 ctime=1513200659.696743777 nordugrid-arc-5.4.2/src/hed/libs/delegation/test/Makefile.am0000644000175000002070000000061312052416515024651 0ustar00mockbuildmock00000000000000TESTS = DelegationInterfaceTest check_PROGRAMS = $(TESTS) DelegationInterfaceTest_SOURCES = $(top_srcdir)/src/Test.cpp \ DelegationInterfaceTest.cpp DelegationInterfaceTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DelegationInterfaceTest_LDADD = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(CPPUNIT_LIBS) nordugrid-arc-5.4.2/src/hed/libs/delegation/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315725024616 xustar000000000000000030 mtime=1513200597.737985997 30 atime=1513200648.410605743 30 ctime=1513200659.697743789 nordugrid-arc-5.4.2/src/hed/libs/delegation/test/Makefile.in0000644000175000002070000007143413214315725024675 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = DelegationInterfaceTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/libs/delegation/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = DelegationInterfaceTest$(EXEEXT) am_DelegationInterfaceTest_OBJECTS = \ DelegationInterfaceTest-Test.$(OBJEXT) \ DelegationInterfaceTest-DelegationInterfaceTest.$(OBJEXT) DelegationInterfaceTest_OBJECTS = \ $(am_DelegationInterfaceTest_OBJECTS) am__DEPENDENCIES_1 = DelegationInterfaceTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(am__DEPENDENCIES_1) DelegationInterfaceTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(DelegationInterfaceTest_SOURCES) DIST_SOURCES = $(DelegationInterfaceTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ DelegationInterfaceTest_SOURCES = $(top_srcdir)/src/Test.cpp \ DelegationInterfaceTest.cpp DelegationInterfaceTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DelegationInterfaceTest_LDADD = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(CPPUNIT_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/delegation/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/delegation/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list DelegationInterfaceTest$(EXEEXT): $(DelegationInterfaceTest_OBJECTS) $(DelegationInterfaceTest_DEPENDENCIES) @rm -f DelegationInterfaceTest$(EXEEXT) $(DelegationInterfaceTest_LINK) $(DelegationInterfaceTest_OBJECTS) $(DelegationInterfaceTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DelegationInterfaceTest-DelegationInterfaceTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DelegationInterfaceTest-Test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< DelegationInterfaceTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -MT DelegationInterfaceTest-Test.o -MD -MP -MF $(DEPDIR)/DelegationInterfaceTest-Test.Tpo -c -o DelegationInterfaceTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DelegationInterfaceTest-Test.Tpo $(DEPDIR)/DelegationInterfaceTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='DelegationInterfaceTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -c -o DelegationInterfaceTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp DelegationInterfaceTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -MT DelegationInterfaceTest-Test.obj -MD -MP -MF $(DEPDIR)/DelegationInterfaceTest-Test.Tpo -c -o DelegationInterfaceTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DelegationInterfaceTest-Test.Tpo $(DEPDIR)/DelegationInterfaceTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='DelegationInterfaceTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -c -o DelegationInterfaceTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` DelegationInterfaceTest-DelegationInterfaceTest.o: DelegationInterfaceTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -MT DelegationInterfaceTest-DelegationInterfaceTest.o -MD -MP -MF $(DEPDIR)/DelegationInterfaceTest-DelegationInterfaceTest.Tpo -c -o DelegationInterfaceTest-DelegationInterfaceTest.o `test -f 'DelegationInterfaceTest.cpp' || echo '$(srcdir)/'`DelegationInterfaceTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DelegationInterfaceTest-DelegationInterfaceTest.Tpo $(DEPDIR)/DelegationInterfaceTest-DelegationInterfaceTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationInterfaceTest.cpp' object='DelegationInterfaceTest-DelegationInterfaceTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -c -o DelegationInterfaceTest-DelegationInterfaceTest.o `test -f 'DelegationInterfaceTest.cpp' || echo '$(srcdir)/'`DelegationInterfaceTest.cpp DelegationInterfaceTest-DelegationInterfaceTest.obj: DelegationInterfaceTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -MT DelegationInterfaceTest-DelegationInterfaceTest.obj -MD -MP -MF $(DEPDIR)/DelegationInterfaceTest-DelegationInterfaceTest.Tpo -c -o DelegationInterfaceTest-DelegationInterfaceTest.obj `if test -f 'DelegationInterfaceTest.cpp'; then $(CYGPATH_W) 'DelegationInterfaceTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DelegationInterfaceTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/DelegationInterfaceTest-DelegationInterfaceTest.Tpo $(DEPDIR)/DelegationInterfaceTest-DelegationInterfaceTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationInterfaceTest.cpp' object='DelegationInterfaceTest-DelegationInterfaceTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DelegationInterfaceTest_CXXFLAGS) $(CXXFLAGS) -c -o DelegationInterfaceTest-DelegationInterfaceTest.obj `if test -f 'DelegationInterfaceTest.cpp'; then $(CYGPATH_W) 'DelegationInterfaceTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DelegationInterfaceTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/delegation/test/PaxHeaders.7502/DelegationInterfaceTest.cpp0000644000000000000000000000012413065017103030003 xustar000000000000000027 mtime=1490296387.698578 27 atime=1513200574.653703 30 ctime=1513200659.698743801 nordugrid-arc-5.4.2/src/hed/libs/delegation/test/DelegationInterfaceTest.cpp0000644000175000002070000001226113065017103030052 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include class DelegationInterfaceTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(DelegationInterfaceTest); CPPUNIT_TEST(TestDelegationInterfaceDELEGATEARC); CPPUNIT_TEST(TestDelegationInterfaceDELEGATEGDS20); CPPUNIT_TEST(TestDelegationInterfaceDELEGATEEMIES); CPPUNIT_TEST(TestDelegationInterfaceDELEGATEEMIDS); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestDelegationInterfaceDELEGATEARC(); void TestDelegationInterfaceDELEGATEGDS20(); void TestDelegationInterfaceDELEGATEEMIES(); void TestDelegationInterfaceDELEGATEEMIDS(); private: std::string credentials; }; class DirectMCC: public Arc::MCCInterface { private: Arc::DelegationContainerSOAP& container_; public: DirectMCC(Arc::DelegationContainerSOAP& container,Arc::PluginArgument* parg):Arc::MCCInterface(parg),container_(container) {}; Arc::MCC_Status process(Arc::Message& in,Arc::Message& out); }; Arc::MCC_Status DirectMCC::process(Arc::Message& in,Arc::Message& out) { if(!in.Payload()) return Arc::MCC_Status(); Arc::PayloadSOAP* in_payload = NULL; try { in_payload = dynamic_cast(in.Payload()); } catch(std::exception& e) { }; if(!in_payload) return Arc::MCC_Status(); Arc::MCC_Status r; Arc::NS ns; Arc::PayloadSOAP* out_payload = new Arc::PayloadSOAP(ns); out.Payload(out_payload); if(!container_.Process(*in_payload,*out_payload,"")) return Arc::MCC_Status(); // if(cred.empty()) return Arc::MCC_Status(); return Arc::MCC_Status(Arc::STATUS_OK); } void DelegationInterfaceTest::setUp() { credentials.assign("\ -----BEGIN CERTIFICATE-----\n\ MIIBxjCCAS8CARAwDQYJKoZIhvcNAQEFBQAwKzENMAsGA1UEChMER3JpZDENMAsG\n\ A1UEChMEVGVzdDELMAkGA1UEAxMCQ0EwHhcNMDkwMTA1MTExNzQ2WhcNMDkwMjA0\n\ MTExNzQ2WjAsMQswCQYDVQQGEwJYWDEOMAwGA1UEChMFRHVtbXkxDTALBgNVBAMT\n\ BFRlc3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMPRUsusZTJG5tph8EUv\n\ s8Lvsv8+JRyoMuNhxcg5sy2MtxKvs1LBG8uBIeiI5vDHEyaA+kM3RP6/RvBD9Uru\n\ /qZRkmMlYwWDNyhU2Ft/7//M8jVIXl8pagWEwEAFwUPcdBX5OSPe5GFpeJnGtyWu\n\ 0vLTrxDammqIDtdyrJM8c8AvAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAMNxlpMJo\n\ vo/2Mdwib+DLLyALm7HT0PbAFupj+QIyAntqqqQKaQqZwD4WeQf4jk2Vx9gGzFLV\n\ gEt3slFV2uxcuLf8BNQdPcv5rFwvwYu5AlExVZDUCQ06oR+RWiktekDWTAsx/PEt\n\ AjVVi0njg0Iev5AN7zWqxCOPjSW2yePNzCE=\n\ -----END CERTIFICATE-----\n\ -----BEGIN RSA PRIVATE KEY-----\n\ MIICXQIBAAKBgQDD0VLLrGUyRubaYfBFL7PC77L/PiUcqDLjYcXIObMtjLcSr7NS\n\ wRvLgSHoiObwxxMmgPpDN0T+v0bwQ/VK7v6mUZJjJWMFgzcoVNhbf+//zPI1SF5f\n\ KWoFhMBABcFD3HQV+Tkj3uRhaXiZxrclrtLy068Q2ppqiA7XcqyTPHPALwIDAQAB\n\ AoGAMuSPeUH4FyYYT7/Om5y3Qr3brrzvFlCc0T4TEmP0rqz409F4cNShrbWFI5OZ\n\ OhDzaDlzUc7mjrMV89IlyXDuG8WJJApCvd5fkZcigxa+cmrcGKRO/BOq5Zit0yKM\n\ ebE9csJKfj5WeXyjtQSWmAXlJJ5Y9bKO+PuVUaav5V/W/QkCQQDi33mOaf99o1o1\n\ jjnSUl5TgbqAtE4LXgnOgVl+Sazq3dVHBBhaFTzFYfa57YlvN8i6nYF8SfegpFJa\n\ Pt7BdSFlAkEA3PUrZgZDQDdrIFrqk12tW7P4YGqPkSjGrbuLTwGxhTiWhPL7Tej7\n\ Up/z8zpahDbGEXhNUgKKw0AOwHtZ2wssAwJBAJyPr2jyCRgApH4U2h4kLWffPH8Y\n\ 7kq16HqTlNirqyKhV08cqllwEnH7+rGwFImlq2Xsz7Cfsr0u6I3SmRJT7GkCQQCJ\n\ v8q7gyH/8cy2Uhe1oYwHBI7OxQAV9f7OpoH10k9yh1HHNpgW/S1ZWGDEfNebX25h\n\ y8cgXndVvCS2OPBOz4szAkBWx+6KgpQ+Xdx5Jv7IoGRdE9GdIGMtTaHOnUxSsdlj\n\ buEHRt+0Gp5Rod9S6w9Ppl6CphSPq5HRCo49SBBRgAWm\n\ -----END RSA PRIVATE KEY-----\n\ "); } void DelegationInterfaceTest::tearDown() { } void DelegationInterfaceTest::TestDelegationInterfaceDELEGATEARC() { Arc::DelegationContainerSOAP c; Arc::DelegationProviderSOAP p(credentials); DirectMCC m(c,NULL); Arc::MessageContext context; CPPUNIT_ASSERT((bool)p.DelegateCredentialsInit(m,&context,Arc::DelegationProviderSOAP::ARCDelegation)); CPPUNIT_ASSERT((bool)p.UpdateCredentials(m,&context,Arc::DelegationRestrictions(),Arc::DelegationProviderSOAP::ARCDelegation)); } void DelegationInterfaceTest::TestDelegationInterfaceDELEGATEGDS20() { Arc::DelegationContainerSOAP c; Arc::DelegationProviderSOAP p(credentials); DirectMCC m(c,NULL); Arc::MessageContext context; CPPUNIT_ASSERT((bool)p.DelegateCredentialsInit(m,&context,Arc::DelegationProviderSOAP::GDS20)); CPPUNIT_ASSERT((bool)p.UpdateCredentials(m,&context,Arc::DelegationRestrictions(),Arc::DelegationProviderSOAP::GDS20)); } void DelegationInterfaceTest::TestDelegationInterfaceDELEGATEEMIES() { Arc::DelegationContainerSOAP c; Arc::DelegationProviderSOAP p(credentials); DirectMCC m(c,NULL); Arc::MessageContext context; CPPUNIT_ASSERT((bool)p.DelegateCredentialsInit(m,&context,Arc::DelegationProviderSOAP::EMIES)); CPPUNIT_ASSERT((bool)p.UpdateCredentials(m,&context,Arc::DelegationRestrictions(),Arc::DelegationProviderSOAP::EMIES)); } void DelegationInterfaceTest::TestDelegationInterfaceDELEGATEEMIDS() { Arc::DelegationContainerSOAP c; Arc::DelegationProviderSOAP p(credentials); DirectMCC m(c,NULL); Arc::MessageContext context; CPPUNIT_ASSERT((bool)p.DelegateCredentialsInit(m,&context,Arc::DelegationProviderSOAP::EMIDS)); CPPUNIT_ASSERT((bool)p.UpdateCredentials(m,&context,Arc::DelegationRestrictions(),Arc::DelegationProviderSOAP::EMIDS)); } CPPUNIT_TEST_SUITE_REGISTRATION(DelegationInterfaceTest); nordugrid-arc-5.4.2/src/hed/libs/delegation/PaxHeaders.7502/DelegationInterface.h0000644000000000000000000000012412306304733025636 xustar000000000000000027 mtime=1394182619.331424 27 atime=1513200574.642703 30 ctime=1513200659.663743373 nordugrid-arc-5.4.2/src/hed/libs/delegation/DelegationInterface.h0000644000175000002070000002767012306304733025717 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATIONINTERFACE_H__ #define __ARC_DELEGATIONINTERFACE_H__ #include #include #include #include #include #include #include namespace Arc { typedef std::map DelegationRestrictions; /** A consumer of delegated X509 credentials. During delegation procedure this class acquires delegated credentials aka proxy - certificate, private key and chain of previous certificates. Delegation procedure consists of calling Request() method for generating certificate request followed by call to Acquire() method for making complete credentials from certificate chain. */ class DelegationConsumer { protected: void* key_; /** Private key */ bool Generate(void); /** Creates private key */ void LogError(void); public: /** Creates object with new private key */ DelegationConsumer(void); /** Creates object with provided private key */ DelegationConsumer(const std::string& content); ~DelegationConsumer(void); operator bool(void) { return key_ != NULL; }; bool operator!(void) { return key_ == NULL; }; /** Return identifier of this object - not implemented */ const std::string& ID(void); /** Stores content of this object into a string */ bool Backup(std::string& content); /** Restores content of object from string */ bool Restore(const std::string& content); /** Make X509 certificate request from internal private key */ bool Request(std::string& content); /** Ads private key into certificates chain in 'content' On exit content contains complete delegated credentials. */ bool Acquire(std::string& content); /** Includes the functionality of Acquire(content) plus extracting the credential identity. */ bool Acquire(std::string& content,std::string& identity); }; /** A provider of delegated credentials. During delegation procedure this class generates new credential to be used in proxy/delegated credential. */ class DelegationProvider { void* key_; /** Private key used to sign delegated certificate */ void* cert_; /** Public key/certificate corresponding to public key */ void* chain_; /** Chain of other certificates needed to verify 'cert_' if any */ void LogError(void); void CleanError(void); public: /** Creates instance from provided credentials. Credentials are used to sign delegated credentials. Arguments should contain PEM-encoded certificate, private key and optionally certificates chain. */ DelegationProvider(const std::string& credentials); /** Creates instance from provided credentials. Credentials are used to sign delegated credentials. Arguments should contain filesystem path to PEM-encoded certificate and private key. Optionally cert_file may contain certificates chain. */ DelegationProvider(const std::string& cert_file,const std::string& key_file,std::istream* inpwd = NULL); ~DelegationProvider(void); operator bool(void) { return key_ != NULL; }; bool operator!(void) { return key_ == NULL; }; /** Perform delegation. Takes X509 certificate request and creates proxy credentials excluding private key. Result is then to be fed into DelegationConsumer::Acquire */ std::string Delegate(const std::string& request,const DelegationRestrictions& restrictions = DelegationRestrictions()); }; /** This class extends DelegationConsumer to support SOAP message exchange. Implements WS interface http://www.nordugrid.org/schemas/delegation described in delegation.wsdl. */ class DelegationConsumerSOAP: public DelegationConsumer { protected: public: /** Creates object with new private key */ DelegationConsumerSOAP(void); /** Creates object with specified private key */ DelegationConsumerSOAP(const std::string& content); ~DelegationConsumerSOAP(void); /** Process SOAP message which starts delegation. Generated message in 'out' is meant to be sent back to DelagationProviderSOAP. Argument 'id' contains identifier of procedure and is used only to produce SOAP message. */ bool DelegateCredentialsInit(const std::string& id,const SOAPEnvelope& in,SOAPEnvelope& out); /** Accepts delegated credentials. Process 'in' SOAP message and stores full proxy credentials in 'credentials'. 'out' message is generated for sending to DelagationProviderSOAP. */ bool UpdateCredentials(std::string& credentials,const SOAPEnvelope& in,SOAPEnvelope& out); /** Includes the functionality in above UpdateCredentials method; plus extracting the credential identity*/ bool UpdateCredentials(std::string& credentials,std::string& identity,const SOAPEnvelope& in,SOAPEnvelope& out); /** Similar to UpdateCredentials but takes only DelegatedToken XML element */ bool DelegatedToken(std::string& credentials,XMLNode token); bool DelegatedToken(std::string& credentials,std::string& identity,XMLNode token); }; /** Extension of DelegationProvider with SOAP exchange interface. This class is also a temporary container for intermediate information used during delegation procedure. */ class DelegationProviderSOAP: public DelegationProvider { protected: /** Delegation request as returned by service accepting delegations. */ std::string request_; /** Assigned delegation identifier. */ std::string id_; public: typedef enum { ARCDelegation, GDS10, GDS10RENEW, GDS20, GDS20RENEW, EMIES, EMIDS, EMIDSRENEW } ServiceType; /** Creates instance from provided credentials. Credentials are used to sign delegated credentials. */ DelegationProviderSOAP(const std::string& credentials); /** Creates instance from provided credentials. Credentials are used to sign delegated credentials. Arguments should contain filesystem path to PEM-encoded certificate and private key. Optionally cert_file may contain certificates chain. */ DelegationProviderSOAP(const std::string& cert_file,const std::string& key_file,std::istream* inpwd = NULL); ~DelegationProviderSOAP(void); /** Performs DelegateCredentialsInit SOAP operation. As result request for delegated credentials is received by this instance and stored internally. Call to UpdateCredentials should follow. */ bool DelegateCredentialsInit(MCCInterface& mcc_interface,MessageContext* context,ServiceType stype = ARCDelegation); /** Extended version of DelegateCredentialsInit(MCCInterface&,MessageContext*). Additionally takes attributes for request and response message to make fine control on message processing possible. */ bool DelegateCredentialsInit(MCCInterface& mcc_interface,MessageAttributes* attributes_in,MessageAttributes* attributes_out,MessageContext* context,ServiceType stype = ARCDelegation); /** Performs UpdateCredentials SOAP operation. This concludes delegation procedure and passes delagated credentials to DelegationConsumerSOAP instance. */ bool UpdateCredentials(MCCInterface& mcc_interface,MessageContext* context,const DelegationRestrictions& restrictions = DelegationRestrictions(),ServiceType stype = ARCDelegation); /** Extended version of UpdateCredentials(MCCInterface&,MessageContext*). Additionally takes attributes for request and response message to make fine control on message processing possible. */ bool UpdateCredentials(MCCInterface& mcc_interface,MessageAttributes* attributes_in,MessageAttributes* attributes_out,MessageContext* context,const DelegationRestrictions& restrictions = DelegationRestrictions(),ServiceType stype = ARCDelegation); /** Generates DelegatedToken element. Element is created as child of provided XML element and contains structure described in delegation.wsdl. */ bool DelegatedToken(XMLNode parent); /** Returns the identifier provided by service accepting delegated credentials. This identifier may then be used to refer to credentials stored at service. */ const std::string& ID(void) { return id_;}; /** * Assigns identifier to be used for while initiating delegation procedure. * Assigning identifier is useful only for *RENEW ServiceTypes. * * \since Added in 4.1.0. **/ void ID(const std::string& id) { id_ = id; }; }; // Implementaion of the container for delegation credentials /** Manages multiple delegated credentials. Delegation consumers are created automatically with DelegateCredentialsInit method up to max_size_ and assigned unique identifier. It's methods are similar to those of DelegationConsumerSOAP with identifier included in SOAP message used to route execution to one of managed DelegationConsumerSOAP instances. */ class DelegationContainerSOAP { protected: Glib::Mutex lock_; /// Stores description of last error. Derived classes should store their errors here. std::string failure_; class Consumer; /** * \since Changed in 4.1.0. Mapped value (Consumer) changed to pointer. **/ typedef std::map ConsumerMap; typedef ConsumerMap::iterator ConsumerIterator; ConsumerMap consumers_; ConsumerIterator consumers_first_; ConsumerIterator consumers_last_; ConsumerIterator find(DelegationConsumerSOAP* c); bool remove(ConsumerIterator i); /** Max. number of delegation consumers */ int max_size_; /** Lifetime of unused delegation consumer */ int max_duration_; /** Max. times same delegation consumer may accept credentials */ int max_usage_; /** If true delegation consumer is deleted when connection context is destroyed */ bool context_lock_; // Rewritable interface to the box /** Creates new consumer object, if empty assigns id and stores in intenal store */ virtual DelegationConsumerSOAP* AddConsumer(std::string& id,const std::string& client); /** Finds previously created consumer in internal store */ virtual DelegationConsumerSOAP* FindConsumer(const std::string& id,const std::string& client); /** Marks consumer as recently used and acquire new credentials */ virtual bool TouchConsumer(DelegationConsumerSOAP* c,const std::string& credentials); /** Obtain stored credentials - not all containers may provide this functionality */ virtual bool QueryConsumer(DelegationConsumerSOAP* c,std::string& credentials); /** Releases consumer obtained by call to AddConsumer() or FindConsumer() */ virtual void ReleaseConsumer(DelegationConsumerSOAP* c); /** Releases consumer obtained by call to AddConsumer() or FindConsumer() and deletes it */ virtual void RemoveConsumer(DelegationConsumerSOAP* c); /** Periodic management of stored consumers */ virtual void CheckConsumers(void); // Helper methods /** See DelegationConsumerSOAP::DelegateCredentialsInit If 'client' is not empty then all subsequent calls involving access to generated credentials must contain same value in their 'client' arguments. */ bool DelegateCredentialsInit(const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client = ""); /** See DelegationConsumerSOAP::UpdateCredentials */ bool UpdateCredentials(std::string& credentials,const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client = ""); bool UpdateCredentials(std::string& credentials,std::string& identity,const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client = ""); public: DelegationContainerSOAP(void); virtual ~DelegationContainerSOAP(void); /** See DelegationConsumerSOAP::DelegatedToken */ bool DelegatedToken(std::string& credentials,XMLNode token,const std::string& client = ""); bool DelegatedToken(std::string& credentials,std::string& identity,XMLNode token,const std::string& client = ""); bool Process(const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client = ""); bool Process(std::string& credentials,const SOAPEnvelope& in,SOAPEnvelope& out,const std::string& client = ""); /** Match namespace of SOAP request against supported interfaces. Returns true if namespace is supported. */ bool MatchNamespace(const SOAPEnvelope& in); /** Returns textual description of last failure. */ std::string GetFailure(void); }; } // namespace Arc #endif /* __ARC_DELEGATIONINTERFACE_H__ */ nordugrid-arc-5.4.2/src/hed/libs/delegation/PaxHeaders.7502/README0000644000000000000000000000012411016534143022446 xustar000000000000000027 mtime=1211807843.180089 27 atime=1513200574.652703 30 ctime=1513200659.661743349 nordugrid-arc-5.4.2/src/hed/libs/delegation/README0000644000175000002070000000011411016534143022507 0ustar00mockbuildmock00000000000000This library implement client and service side of ARC delegation interface. nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/ws-addressing0000644000000000000000000000013213214316023022144 xustar000000000000000030 mtime=1513200659.520741624 30 atime=1513200668.721854157 30 ctime=1513200659.520741624 nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/0000755000175000002070000000000013214316023022267 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024267 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200598.740998264 30 ctime=1513200659.517741588 nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/Makefile.am0000644000175000002070000000132112052416515024326 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libwsaddressing.la noinst_PROGRAMS = test libwsaddressing_ladir = $(pkgincludedir)/ws-addressing libwsaddressing_la_HEADERS = WSA.h libwsaddressing_la_SOURCES = WSA.cpp libwsaddressing_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libwsaddressing_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) test_LDADD = libwsaddressing.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/PaxHeaders.7502/Makefile.in0000644000000000000000000000013013214315726024275 xustar000000000000000030 mtime=1513200598.788998851 30 atime=1513200648.304604446 28 ctime=1513200659.5187416 nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/Makefile.in0000644000175000002070000006627113214315726024361 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test$(EXEEXT) subdir = src/hed/libs/ws-addressing DIST_COMMON = README $(libwsaddressing_la_HEADERS) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libwsaddressing_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libwsaddressing_la_OBJECTS = libwsaddressing_la-WSA.lo libwsaddressing_la_OBJECTS = $(am_libwsaddressing_la_OBJECTS) libwsaddressing_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libwsaddressing_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am_test_OBJECTS = test-test.$(OBJEXT) test_OBJECTS = $(am_test_OBJECTS) test_DEPENDENCIES = libwsaddressing.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) test_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(test_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libwsaddressing_la_SOURCES) $(test_SOURCES) DIST_SOURCES = $(libwsaddressing_la_SOURCES) $(test_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libwsaddressing_ladir)" HEADERS = $(libwsaddressing_la_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libwsaddressing.la libwsaddressing_ladir = $(pkgincludedir)/ws-addressing libwsaddressing_la_HEADERS = WSA.h libwsaddressing_la_SOURCES = WSA.cpp libwsaddressing_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libwsaddressing_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) test_LDADD = libwsaddressing.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/ws-addressing/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/ws-addressing/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libwsaddressing.la: $(libwsaddressing_la_OBJECTS) $(libwsaddressing_la_DEPENDENCIES) $(libwsaddressing_la_LINK) $(libwsaddressing_la_OBJECTS) $(libwsaddressing_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test$(EXEEXT): $(test_OBJECTS) $(test_DEPENDENCIES) @rm -f test$(EXEEXT) $(test_LINK) $(test_OBJECTS) $(test_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libwsaddressing_la-WSA.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libwsaddressing_la-WSA.lo: WSA.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libwsaddressing_la_CXXFLAGS) $(CXXFLAGS) -MT libwsaddressing_la-WSA.lo -MD -MP -MF $(DEPDIR)/libwsaddressing_la-WSA.Tpo -c -o libwsaddressing_la-WSA.lo `test -f 'WSA.cpp' || echo '$(srcdir)/'`WSA.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libwsaddressing_la-WSA.Tpo $(DEPDIR)/libwsaddressing_la-WSA.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='WSA.cpp' object='libwsaddressing_la-WSA.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libwsaddressing_la_CXXFLAGS) $(CXXFLAGS) -c -o libwsaddressing_la-WSA.lo `test -f 'WSA.cpp' || echo '$(srcdir)/'`WSA.cpp test-test.o: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.o -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp test-test.obj: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.obj -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libwsaddressing_laHEADERS: $(libwsaddressing_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libwsaddressing_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libwsaddressing_ladir)" @list='$(libwsaddressing_la_HEADERS)'; test -n "$(libwsaddressing_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libwsaddressing_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libwsaddressing_ladir)" || exit $$?; \ done uninstall-libwsaddressing_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libwsaddressing_la_HEADERS)'; test -n "$(libwsaddressing_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libwsaddressing_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libwsaddressing_ladir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(HEADERS) installdirs: for dir in "$(DESTDIR)$(libwsaddressing_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-libwsaddressing_laHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libwsaddressing_laHEADERS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES clean-noinstPROGRAMS \ ctags distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libwsaddressing_laHEADERS install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am \ uninstall-libwsaddressing_laHEADERS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/PaxHeaders.7502/test.cpp0000644000000000000000000000012410672011347023713 xustar000000000000000027 mtime=1189614311.554858 27 atime=1513200574.580702 30 ctime=1513200659.520741624 nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/test.cpp0000644000175000002070000000340110672011347023756 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "WSA.h" int main(void) { std::string xml("\ \ \ http://example.com/someuniquestring\ \ http://example.com/business/client1\ \ \ http://example.com/business/client1\ \ mailto:fabrikam@example.com\ http://example.com/fabrikam/mail/Delete\ 123456789\ ABCDEFG\ \ mailto:admin@example.com\ \ \ \ \ 42\ \ \ \ "); Arc::SOAPEnvelope soap(xml); Arc::WSAHeader header(soap); std::cout<<"To: "< #endif #include #include "WSA.h" namespace Arc { static std::string strip_spaces(const std::string& s) { std::string::size_type start = 0; for(;start=start;--end) if(!isspace(s[end])) break; return s.substr(start,end-start+1); } static void remove_empty_nodes(XMLNode& parent,const char* name) { while(true) { XMLNode to = parent[name]; if(!to) break; if(to.Size() > 0) break; if(!(((std::string)to).empty())) break; to.Destroy(); }; return; } static XMLNode get_node(XMLNode& parent,const char* name) { XMLNode n = parent[name]; if(!n) n=parent.NewChild(name); return n; } WSAEndpointReference::WSAEndpointReference(XMLNode epr) : epr_(epr) { NS ns; ns["wsa"]=WSA_NAMESPACE; epr_.Namespaces(ns); } WSAEndpointReference::WSAEndpointReference(const WSAEndpointReference& wsa) : epr_(wsa.epr_) {} WSAEndpointReference::WSAEndpointReference(const std::string&) { } WSAEndpointReference::WSAEndpointReference(void) { } WSAEndpointReference::~WSAEndpointReference(void) { remove_empty_nodes(epr_,"wsa:Address"); remove_empty_nodes(epr_,"wsa:ReferenceParameters"); remove_empty_nodes(epr_,"wsa:MetaData"); } std::string WSAEndpointReference::Address(void) const { return strip_spaces(const_cast(epr_)["wsa:Address"]); } bool WSAEndpointReference::hasAddress(void) const { return (bool)(const_cast(epr_)["wsa:Address"]); } WSAEndpointReference& WSAEndpointReference::operator=(const std::string& address) { Address(address); return *this; } void WSAEndpointReference::Address(const std::string& uri) { get_node(epr_,"wsa:Address")=uri; } XMLNode WSAEndpointReference::ReferenceParameters(void) { return get_node(epr_,"wsa:ReferenceParameters"); } XMLNode WSAEndpointReference::MetaData(void) { return get_node(epr_,"wsa:MetaData"); } WSAEndpointReference::operator XMLNode(void) { return epr_; } WSAHeader::WSAHeader(SOAPEnvelope& soap) { header_=soap.Header(); header_allocated_=false; // apply predefined namespace prefix NS ns; ns["wsa"]=WSA_NAMESPACE; header_.Namespaces(ns); } WSAHeader::WSAHeader(const std::string&): header_allocated_(false) { } WSAHeader::~WSAHeader(void) { if(!header_) return; // Scan for empty WSA element and remove them from tree remove_empty_nodes(header_,"wsa:To"); remove_empty_nodes(header_,"wsa:From"); remove_empty_nodes(header_,"wsa:ReplyTo"); remove_empty_nodes(header_,"wsa:FaultTo"); remove_empty_nodes(header_,"wsa:MessageID"); remove_empty_nodes(header_,"wsa:RelatesTo"); remove_empty_nodes(header_,"wsa:ReferenceParameters"); remove_empty_nodes(header_,"wsa:Action"); } std::string WSAHeader::To(void) const { return strip_spaces(const_cast(header_)["wsa:To"]); } bool WSAHeader::hasTo(void) const { return (bool)(const_cast(header_)["wsa:To"]); } void WSAHeader::To(const std::string& uri) { get_node(header_,"wsa:To")=uri; } std::string WSAHeader::Action(void) const { return strip_spaces(const_cast(header_)["wsa:Action"]); } bool WSAHeader::hasAction(void) const { return (bool)(const_cast(header_)["wsa:Action"]); } void WSAHeader::Action(const std::string& uri) { get_node(header_,"wsa:Action")=uri; } std::string WSAHeader::MessageID(void) const { return strip_spaces(const_cast(header_)["wsa:MessageID"]); } bool WSAHeader::hasMessageID(void) const { return (bool)(const_cast(header_)["wsa:MessageID"]); } void WSAHeader::MessageID(const std::string& uri) { get_node(header_,"wsa:MessageID")=uri; } std::string WSAHeader::RelatesTo(void) const { return strip_spaces(const_cast(header_)["wsa:RelatesTo"]); } bool WSAHeader::hasRelatesTo(void) const { return (bool)(const_cast(header_)["wsa:RelatesTo"]); } void WSAHeader::RelatesTo(const std::string& uri) { get_node(header_,"wsa:RelatesTo")=uri; } WSAEndpointReference WSAHeader::From(void) { return WSAEndpointReference(get_node(header_,"wsa:From")); } WSAEndpointReference WSAHeader::ReplyTo(void) { return WSAEndpointReference(get_node(header_,"wsa:ReplyTo")); } WSAEndpointReference WSAHeader::FaultTo(void) { return WSAEndpointReference(get_node(header_,"wsa:FaultTo")); } std::string WSAHeader::RelationshipType(void) const { return strip_spaces(const_cast(header_)["wsa:ReplyTo"].Attribute("wsa:RelationshipType")); } bool WSAHeader::hasRelationshipType(void) const { return (bool)(const_cast(header_)["wsa:ReplyTo"].Attribute("wsa:RelationshipType")); } void WSAHeader::RelationshipType(const std::string& uri) { XMLNode n = get_node(header_,"wsa:ReplyTo"); XMLNode a = n.Attribute("wsa:RelationshipType"); if(!a) a=n.NewAttribute("wsa:RelationshipType"); a=uri; } //XMLNode WSAHeader::ReferenceParameters(void) { // return get_node(header_,"wsa:ReferenceParameters"); //} XMLNode WSAHeader::ReferenceParameter(int num) { for(int i=0;;++i) { XMLNode n = header_.Child(i); if(!n) return n; XMLNode a = n.Attribute("wsa:IsReferenceParameter"); if(!a) continue; if(strcasecmp("true",((std::string)a).c_str()) != 0) continue; if((num--) <= 0) return n; }; return XMLNode(); // to keep compiler happy } XMLNode WSAHeader::ReferenceParameter(const std::string& name) { XMLNode n_ = header_[name]; for(int i=0;;++i) { XMLNode n = n_[i]; if(!n) return n; XMLNode a = n.Attribute("wsa:IsReferenceParameter"); if(!a) continue; if(strcasecmp("true",((std::string)a).c_str()) != 0) continue; return n; }; return XMLNode(); // to keep compiler happy } XMLNode WSAHeader::NewReferenceParameter(const std::string& name) { XMLNode n = header_.NewChild(name); XMLNode a = n.NewAttribute("wsa:IsReferenceParameter"); a="true"; return n; } bool WSAHeader::Check(SOAPEnvelope& soap) { if(soap.NamespacePrefix(WSA_NAMESPACE).empty()) return false; WSAHeader wsa(soap); if(!wsa.header_["wsa:Action"]) return false; if(!wsa.header_["wsa:To"]) return false; return true; } void WSAFaultAssign(SOAPEnvelope& message,WSAFault fid) { // TODO: Detail SOAPFault& fault = *(message.Fault()); if(&fault == NULL) return; NS ns; ns["wsa"]="http://www.w3.org/2005/08/addressing"; message.Namespaces(ns); switch(fid) { case WSAFaultInvalidAddressingHeader: case WSAFaultInvalidAddress: case WSAFaultInvalidEPR: case WSAFaultInvalidCardinality: case WSAFaultMissingAddressInEPR: case WSAFaultDuplicateMessageID: case WSAFaultActionMismatch: case WSAFaultOnlyAnonymousAddressSupported: case WSAFaultOnlyNonAnonymousAddressSupported: fault.Code(SOAPFault::Sender); fault.Subcode(1,"wsa:InvalidAddressingHeader"); fault.Reason(0,"A header representing a Message Addressing Property is not valid and the message cannot be processed"); switch(fid) { case WSAFaultInvalidAddress: fault.Subcode(2,"wsa:InvalidAddress"); break; case WSAFaultInvalidEPR: fault.Subcode(2,"wsa:InvalidEPR"); break; case WSAFaultInvalidCardinality: fault.Subcode(2,"wsa:InvalidCardinality"); break; case WSAFaultMissingAddressInEPR: fault.Subcode(2,"wsa:MissingAddressInEPR"); break; case WSAFaultDuplicateMessageID: fault.Subcode(2,"wsa:DuplicateMessageID"); break; case WSAFaultActionMismatch: fault.Subcode(2,"wsa:ActionMismatch"); break; case WSAFaultOnlyAnonymousAddressSupported: fault.Subcode(2,"wsa:OnlyAnonymousAddressSupported"); break; case WSAFaultOnlyNonAnonymousAddressSupported: fault.Subcode(2,"wsa:OnlyNonAnonymousAddressSupported"); break; default: break; }; break; case WSAFaultMessageAddressingHeaderRequired: fault.Code(SOAPFault::Sender); fault.Subcode(1,"wsa:MessageAddressingHeaderRequired"); fault.Reason(0,"A required header representing a Message Addressing Property is not present"); break; case WSAFaultDestinationUnreachable: fault.Code(SOAPFault::Sender); fault.Subcode(1,"wsa:DestinationUnreachable"); fault.Reason(0,"No route can be determined to reach [destination]"); break; case WSAFaultActionNotSupported: fault.Code(SOAPFault::Sender); fault.Subcode(1,"wsa:ActionNotSupported"); fault.Reason(0,"The [action] cannot be processed at the receiver"); break; case WSAFaultEndpointUnavailable: fault.Code(SOAPFault::Receiver); fault.Subcode(1,"wsa:EndpointUnavailable"); fault.Reason(0,"The endpoint is unable to process the message at this time"); break; default: break; }; } WSAFault WSAFaultExtract(SOAPEnvelope& message) { // TODO: extend XML interface to compare QNames WSAFault fid = WSAFaultNone; SOAPFault& fault = *(message.Fault()); if(&fault == NULL) return fid; //XMLNode::NS ns; //ns["wsa"]="http://www.w3.org/2005/08/addressing"; //message.Namespaces(ns); std::string prefix = message.NamespacePrefix("http://www.w3.org/2005/08/addressing"); std::string code = fault.Subcode(1); if(code.empty()) return fid; if(!prefix.empty()) { prefix=":"+prefix; if(strncasecmp(prefix.c_str(),code.c_str(),prefix.length()) != 0) return fid; code=code.substr(prefix.length()); }; fid=WSAFaultUnknown; if(strcasecmp(code.c_str(),"InvalidAddressingHeader") == 0) { fid=WSAFaultInvalidAddressingHeader; std::string subcode = fault.Subcode(2); if(!subcode.empty()) { if(!prefix.empty()) { prefix=":"+prefix; if(strncasecmp(prefix.c_str(),subcode.c_str(),prefix.length()) != 0) return fid; subcode=subcode.substr(prefix.length()); }; if(strcasecmp(subcode.c_str(),"InvalidAddress") == 0) { fid=WSAFaultInvalidAddress; } else if(strcasecmp(subcode.c_str(),"InvalidEPR") == 0) { fid=WSAFaultInvalidEPR; } else if(strcasecmp(subcode.c_str(),"InvalidCardinality") == 0) { fid=WSAFaultInvalidCardinality; } else if(strcasecmp(subcode.c_str(),"MissingAddressInEPR") == 0) { fid=WSAFaultMissingAddressInEPR; } else if(strcasecmp(subcode.c_str(),"DuplicateMessageID") == 0) { fid=WSAFaultDuplicateMessageID; } else if(strcasecmp(subcode.c_str(),"ActionMismatch") == 0) { fid=WSAFaultActionMismatch; } else if(strcasecmp(subcode.c_str(),"OnlyAnonymousAddressSupported") == 0) { fid=WSAFaultOnlyAnonymousAddressSupported; } else if(strcasecmp(subcode.c_str(),"OnlyNonAnonymousAddressSupported") == 0) { fid=WSAFaultOnlyNonAnonymousAddressSupported; }; }; } else if(strcasecmp(code.c_str(),"MessageAddressingHeaderRequired") == 0) { fid=WSAFaultMessageAddressingHeaderRequired; } else if(strcasecmp(code.c_str(),"DestinationUnreachable") == 0) { fid=WSAFaultDestinationUnreachable; } else if(strcasecmp(code.c_str(),"ActionNotSupported") == 0) { fid=WSAFaultActionNotSupported; } else if(strcasecmp(code.c_str(),"EndpointUnavailable") == 0) { fid=WSAFaultEndpointUnavailable; }; return fid; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/PaxHeaders.7502/WSA.h0000644000000000000000000000012411656057712023044 xustar000000000000000027 mtime=1320705994.866899 27 atime=1513200574.579702 30 ctime=1513200659.516741575 nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/WSA.h0000644000175000002070000001414211656057712023113 0ustar00mockbuildmock00000000000000#ifndef __ARC_WSA_H__ #define __ARC_WSA_H__ #include #include // WS-Adressing // wsa="http://www.w3.org/2005/08/addressing" namespace Arc { #define WSA_NAMESPACE "http://www.w3.org/2005/08/addressing" /// Interface for manipulation of WS-Adressing Endpoint Reference. /** It works on Endpoint Reference stored in XML tree. No information is stored in this object except reference to corresponding XML subtree. */ class WSAEndpointReference { protected: XMLNode epr_; /** Link to top level EPR XML node */ public: /** Linking to existing EPR in XML tree */ WSAEndpointReference(XMLNode epr); /** Copy constructor */ WSAEndpointReference(const WSAEndpointReference& wsa); /** Creating independent EPR - not implemented */ WSAEndpointReference(const std::string& address); /** Dummy constructor - creates invalid instance */ WSAEndpointReference(void); /** Destructor. All empty elements of EPR XML are destroyed here too */ ~WSAEndpointReference(void); /** Returns Address (URL) encoded in EPR */ std::string Address(void) const; /** Returns true if Address is defined */ bool hasAddress(void) const; /** Assigns new Address value. If EPR had no Address element it is created. */ void Address(const std::string& uri); /** Same as Address(uri) */ WSAEndpointReference& operator=(const std::string& address); /** Access to ReferenceParameters element of EPR. Obtained XML element should be manipulated directly in application-dependent way. If EPR had no ReferenceParameters element it is created. */ XMLNode ReferenceParameters(void); /** Access to MetaData element of EPR. Obtained XML element should be manipulated directly in application-dependent way. If EPR had no MetaData element it is created. */ XMLNode MetaData(void); /** Returns reference to EPR top XML node */ operator XMLNode(void); }; /// Interface for manipulation WS-Addressing information in SOAP header /** It works on Endpoint Reference stored in XML tree. No information is stored in this object except reference to corresponding XML subtree. */ class WSAHeader { protected: XMLNode header_; /** SOAP header element */ bool header_allocated_; /* not used */ //XMLNode from_; //XMLNode to_; //XMLNode replyto_; //XMLNode faultto_; public: /** Linking to a header of existing SOAP message */ WSAHeader(SOAPEnvelope& soap); /** Creating independent SOAP header - not implemented */ WSAHeader(const std::string& action); ~WSAHeader(void); /** Returns content of To element of SOAP Header. */ std::string To(void) const; /** Returns true if To element is defined. */ bool hasTo(void) const; /** Set content of To element of SOAP Header. If such element does not exist it's created. */ void To(const std::string& uri); /** Returns From element of SOAP Header. If such element does not exist it's created. Obtained element may be manipulted. */ WSAEndpointReference From(void); /** Returns ReplyTo element of SOAP Header. If such element does not exist it's created. Obtained element may be manipulted. */ WSAEndpointReference ReplyTo(void); /** Returns FaultTo element of SOAP Header. If such element does not exist it's created. Obtained element may be manipulted. */ WSAEndpointReference FaultTo(void); /** Returns content of Action element of SOAP Header. */ std::string Action(void) const; /** Returns true if Action element is defined. */ bool hasAction(void) const; /** Set content of Action element of SOAP Header. If such element does not exist it's created. */ void Action(const std::string& uri); /** Returns content of MessageID element of SOAP Header. */ std::string MessageID(void) const; /** Returns true if MessageID element is defined. */ bool hasMessageID(void) const; /** Set content of MessageID element of SOAP Header. If such element does not exist it's created. */ void MessageID(const std::string& uri); /** Returns content of RelatesTo element of SOAP Header. */ std::string RelatesTo(void) const; /** Returns true if RelatesTo element is defined. */ bool hasRelatesTo(void) const; /** Set content of RelatesTo element of SOAP Header. If such element does not exist it's created. */ void RelatesTo(const std::string& uri); /** Returns content of RelationshipType element of SOAP Header. */ std::string RelationshipType(void) const; /** Returns true if RelationshipType element is defined. */ bool hasRelationshipType(void) const; /** Set content of RelationshipType element of SOAP Header. If such element does not exist it's created. */ void RelationshipType(const std::string& uri); /** Return n-th ReferenceParameter element */ XMLNode ReferenceParameter(int n); /** Returns first ReferenceParameter element with specified name */ XMLNode ReferenceParameter(const std::string& name); /** Creates new ReferenceParameter element with specified name. Returns reference to created element. */ XMLNode NewReferenceParameter(const std::string& name); /** Returns reference to SOAP Header - not implemented */ operator XMLNode(void); /** Tells if specified SOAP message has WSA header */ static bool Check(SOAPEnvelope& soap); }; /// WS-Addressing possible faults typedef enum { WSAFaultNone, /** This is not a fault */ WSAFaultUnknown, /** This is not a WS-Addressing fault */ WSAFaultInvalidAddressingHeader, WSAFaultInvalidAddress, WSAFaultInvalidEPR, WSAFaultInvalidCardinality, WSAFaultMissingAddressInEPR, WSAFaultDuplicateMessageID, WSAFaultActionMismatch, WSAFaultOnlyAnonymousAddressSupported, WSAFaultOnlyNonAnonymousAddressSupported, WSAFaultMessageAddressingHeaderRequired, WSAFaultDestinationUnreachable, WSAFaultActionNotSupported, WSAFaultEndpointUnavailable } WSAFault; /// Makes WS-Addressing fault. /** It fills SOAP Fault message with WS-Addressing fault related information. */ void WSAFaultAssign(SOAPEnvelope& mesage,WSAFault fid); /// Gets WS-addressing fault. /** Analyzes SOAP Fault message and returns WS-Addressing fault it represents. */ WSAFault WSAFaultExtract(SOAPEnvelope& message); } // namespace Arc #endif /* __ARC_WSA_H__ */ nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/PaxHeaders.7502/README0000644000000000000000000000012411001653037023103 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200574.579702 30 ctime=1513200659.515741563 nordugrid-arc-5.4.2/src/hed/libs/ws-addressing/README0000644000175000002070000000004511001653037023147 0ustar00mockbuildmock00000000000000implementation of WS-Addresing spec. nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/README0000644000000000000000000000012411016534143020333 xustar000000000000000027 mtime=1211807843.180089 27 atime=1513200574.985707 30 ctime=1513200658.769732439 nordugrid-arc-5.4.2/src/hed/libs/README0000644000175000002070000000002711016534143020377 0ustar00mockbuildmock00000000000000Base libraries of HED. nordugrid-arc-5.4.2/src/hed/libs/PaxHeaders.7502/compute0000644000000000000000000000013213214316023021046 xustar000000000000000030 mtime=1513200659.818745269 30 atime=1513200668.721854157 30 ctime=1513200659.818745269 nordugrid-arc-5.4.2/src/hed/libs/compute/0000755000175000002070000000000013214316023021171 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/SubmitterPlugin.cpp0000644000000000000000000000012412675602216025001 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.772705 30 ctime=1513200659.793744963 nordugrid-arc-5.4.2/src/hed/libs/compute/SubmitterPlugin.cpp0000644000175000002070000001722012675602216025050 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Arc { Logger SubmitterPlugin::logger(Logger::getRootLogger(), "SubmitterPlugin"); std::map SubmitterPluginLoader::interfacePluginMap; SubmissionStatus SubmitterPlugin::Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted) { for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { notSubmitted.push_back(&*it); } return SubmissionStatus::NOT_IMPLEMENTED | SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; } bool SubmitterPlugin::PutFiles(const JobDescription& job, const URL& url) const { FileCache cache; DataMover mover; mover.retry(true); mover.secure(false); mover.passive(true); mover.verbose(false); for (std::list::const_iterator it = job.DataStaging.InputFiles.begin(); it != job.DataStaging.InputFiles.end(); ++it) if (!it->Sources.empty()) { const URL& src = it->Sources.front(); if (src.Protocol() == "file") { if(!url) { logger.msg(ERROR, "No stagein URL is provided"); return false; }; URL dst(url); dst.ChangePath(dst.Path() + '/' + it->Name); dst.AddOption("blocksize=1048576",false); dst.AddOption("checksum=no",false); DataHandle source(src, *usercfg); if ((!dest_handle) || (!*dest_handle) || (!(*dest_handle)->SetURL(dst))) { if(dest_handle) delete dest_handle; ((SubmitterPlugin*)this)->dest_handle = new DataHandle(dst, *usercfg); }; DataHandle& destination = *dest_handle; source->SetTries((src.Protocol() == "file")?1:3); destination->SetTries((dst.Protocol() == "file")?1:3); DataStatus res = mover.Transfer(*source, *destination, cache, URLMap(), 0, 0, 0, usercfg->Timeout()); if (!res.Passed()) { logger.msg(ERROR, "Failed uploading file %s to %s: %s", source->GetURL().fullstr(), destination->GetURL().fullstr(), std::string(res)); return false; } } } return true; } void SubmitterPlugin::AddJobDetails(const JobDescription& jobdesc, Job& job) const { if (!jobdesc.Identification.JobName.empty()) { job.Name = jobdesc.Identification.JobName; } job.LocalSubmissionTime = Arc::Time().str(UTCTime); job.ActivityOldID = jobdesc.Identification.ActivityOldID; jobdesc.UnParse(job.JobDescriptionDocument, !jobdesc.GetSourceLanguage().empty() ? jobdesc.GetSourceLanguage() : "nordugrid:jsdl"); // Assuming job description is valid. job.LocalInputFiles.clear(); for (std::list::const_iterator it = jobdesc.DataStaging.InputFiles.begin(); it != jobdesc.DataStaging.InputFiles.end(); it++) { if (!it->Sources.empty() && it->Sources.front().Protocol() == "file") { job.LocalInputFiles[it->Name] = it->Checksum; //CheckSumAny::FileChecksum(it->Sources.front().Path(), CheckSumAny::cksum, true); } } } bool SubmitterPlugin::Migrate(const std::string& /*jobid*/, const JobDescription& /*jobdesc*/, const ExecutionTarget& et, bool /*forcemigration*/, Job& /*job*/) { logger.msg(INFO, "Trying to migrate to %s: Migration to a %s interface is not supported.", et.ComputingEndpoint->URLString, !supportedInterfaces.empty() ? supportedInterfaces.front() : ""); return false; }; SubmitterPluginLoader::SubmitterPluginLoader() : Loader(BaseConfig().MakeConfig(Config()).Parent()) {} SubmitterPluginLoader::~SubmitterPluginLoader() { for (std::multimap::iterator it = submitters.begin(); it != submitters.end(); ++it) { delete it->second; } } void SubmitterPluginLoader::initialiseInterfacePluginMap(const UserConfig& uc) { if (!interfacePluginMap.empty()) { return; } std::list modules; PluginsFactory factory(BaseConfig().MakeConfig(Config()).Parent()); factory.scan(FinderLoader::GetLibrariesList(), modules); PluginsFactory::FilterByKind("HED:SubmitterPlugin", modules); std::list availablePlugins; for (std::list::const_iterator it = modules.begin(); it != modules.end(); ++it) { for (std::list::const_iterator it2 = it->plugins.begin(); it2 != it->plugins.end(); ++it2) { availablePlugins.push_back(it2->name); } } if (interfacePluginMap.empty()) { // Map supported interfaces to available plugins. for (std::list::iterator itT = availablePlugins.begin(); itT != availablePlugins.end(); ++itT) { SubmitterPlugin* p = load(*itT, uc); if (!p) { continue; } for (std::list::const_iterator itI = p->SupportedInterfaces().begin(); itI != p->SupportedInterfaces().end(); ++itI) { if (!itT->empty()) { // Do not allow empty interface. // If two plugins supports two identical interfaces, then only the last will appear in the map. interfacePluginMap[*itI] = *itT; } } } } } SubmitterPlugin* SubmitterPluginLoader::load(const std::string& name, const UserConfig& usercfg) { if (name.empty()) return NULL; if(!factory_->load(FinderLoader::GetLibrariesList(), "HED:SubmitterPlugin", name)) { logger.msg(ERROR, "Unable to locate the \"%s\" plugin. Please refer to installation instructions and check if package providing support for \"%s\" plugin is installed", name, name); logger.msg(DEBUG, "SubmitterPlugin plugin \"%s\" not found.", name); return NULL; } SubmitterPluginArgument arg(usercfg); SubmitterPlugin *submitter = factory_->GetInstance("HED:SubmitterPlugin", name, &arg, false); if (!submitter) { logger.msg(ERROR, "Unable to locate the \"%s\" plugin. Please refer to installation instructions and check if package providing support for \"%s\" plugin is installed", name, name); logger.msg(DEBUG, "SubmitterPlugin %s could not be created", name); return NULL; } submitters.insert(std::pair(name, submitter)); logger.msg(DEBUG, "Loaded SubmitterPlugin %s", name); return submitter; } SubmitterPlugin* SubmitterPluginLoader::loadByInterfaceName(const std::string& name, const UserConfig& uc) { if (interfacePluginMap.empty()) { initialiseInterfacePluginMap(uc); } std::map::const_iterator itPN = interfacePluginMap.find(name); if (itPN != interfacePluginMap.end()) { std::map::iterator itS = submitters.find(itPN->second); if (itS != submitters.end()) { itS->second->SetUserConfig(uc); return itS->second; } return load(itPN->second, uc); } return NULL; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713065017510023166 xustar000000000000000027 mtime=1490296648.482805 30 atime=1513200596.487970709 30 ctime=1513200659.786744877 nordugrid-arc-5.4.2/src/hed/libs/compute/Makefile.am0000644000175000002070000000644113065017510023235 0ustar00mockbuildmock00000000000000lib_LTLIBRARIES = libarccompute.la noinst_PROGRAMS = test_jobdescription test_JobInformationStorage if DBJSTORE_ENABLED HEADER_WITH_DBJSTORE = JobInformationStorageBDB.h SOURCE_WITH_DBJSTORE = JobInformationStorageBDB.cpp CXXFLAGS_WITH_DBJSTORE = $(DBCXX_CPPFLAGS) LIBADD_WITH_DBJSTORE = $(DBCXX_LIBS) else HEADER_WITH_DBJSTORE = SOURCE_WITH_DBJSTORE = CXXFLAGS_WITH_DBJSTORE = LIBADD_WITH_DBJSTORE = endif if SQLITE_ENABLED HEADER_WITH_SQLITEJSTORE = JobInformationStorageSQLite.h SOURCE_WITH_SQLITEJSTORE = JobInformationStorageSQLite.cpp CXXFLAGS_WITH_SQLITEJSTORE = $(SQLITE_CFLAGS) LIBADD_WITH_SQLITEJSTORE = $(SQLITE_LIBS) else HEADER_WITH_SQLITEJSTORE = SOURCE_WITH_SQLITEJSTORE = CXXFLAGS_WITH_SQLITEJSTORE = LIBADD_WITH_SQLITEJSTORE = endif libarccompute_ladir = $(pkgincludedir)/compute libarccompute_la_HEADERS = Broker.h BrokerPlugin.h ExecutionTarget.h \ Submitter.h SubmitterPlugin.h GLUE2Entity.h SubmissionStatus.h \ JobState.h Job.h JobControllerPlugin.h JobSupervisor.h \ JobDescription.h JobDescriptionParserPlugin.h Software.h \ GLUE2.h EndpointQueryingStatus.h TestACCControl.h \ EntityRetriever.h EntityRetrieverPlugin.h Endpoint.h \ ComputingServiceRetriever.h JobInformationStorage.h \ JobInformationStorageXML.h $(HEADER_WITH_DBJSTORE) $(HEADER_WITH_SQLITEJSTORE) libarccompute_la_SOURCES = Broker.cpp BrokerPlugin.cpp ExecutionTarget.cpp \ Submitter.cpp SubmitterPlugin.cpp \ JobState.cpp Job.cpp JobControllerPlugin.cpp JobSupervisor.cpp \ JobDescription.cpp JobDescriptionParserPlugin.cpp Software.cpp \ GLUE2.cpp EndpointQueryingStatus.cpp TestACCControl.cpp \ EntityRetriever.cpp EntityRetrieverPlugin.cpp Endpoint.cpp \ ComputingServiceRetriever.cpp \ JobInformationStorageDescriptor.cpp \ JobInformationStorageXML.cpp $(SOURCE_WITH_DBJSTORE) $(SOURCE_WITH_SQLITEJSTORE) libarccompute_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) \ $(CXXFLAGS_WITH_DBJSTORE) $(CXXFLAGS_WITH_SQLITEJSTORE) $(AM_CXXFLAGS) libarccompute_la_LIBADD = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(LIBADD_WITH_DBJSTORE) $(LIBADD_WITH_SQLITEJSTORE) libarccompute_la_LDFLAGS = -version-info 3:0:0 test_jobdescription_SOURCES = test_jobdescription.cpp test_jobdescription_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) test_jobdescription_LDADD = libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(DBCXX_LIBS) test_JobInformationStorage_SOURCES = test_JobInformationStorage.cpp test_JobInformationStorage_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) test_JobInformationStorage_LDADD = libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) testjobdir = $(pkgdatadir)/test-jobs testjob_DATA = test-job-1 test-job-2 test-job-3 EXTRA_DIST = $(testjob_DATA) DIST_SUBDIRS = test examples SUBDIRS = . $(TEST_DIR) examples nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Job.cpp0000644000000000000000000000012413167372401022353 xustar000000000000000027 mtime=1507718401.841603 27 atime=1513200574.732704 30 ctime=1513200659.795744988 nordugrid-arc-5.4.2/src/hed/libs/compute/Job.cpp0000644000175000002070000010344113167372401022423 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "Job.h" #define JXMLTOSTRING(NAME) \ if (job[ #NAME ]) {\ NAME = (std::string)job[ #NAME ];\ } #define JXMLSTRINGTO(TYPE, NAME) \ if (job[ #NAME ]) {\ TYPE temp##TYPE##NAME;\ if (stringto((std::string)job[ #NAME ], temp##TYPE##NAME)) {\ NAME = temp##TYPE##NAME;\ }\ } #define JXMLTOTIME(NAME) \ if (job[ #NAME ]) {\ Time temp##NAME((std::string)job[ #NAME ]);\ if (temp##NAME.GetTime() != -1) {\ NAME = temp##NAME;\ }\ } #define JXMLTOSTRINGLIST(NAME) \ NAME.clear();\ for (XMLNode n = job[ #NAME ]; n; ++n) {\ NAME.push_back((std::string)n);\ } #define STRINGTOXML(NAME) \ if (!(NAME).empty()) {\ node.NewChild( #NAME ) = NAME;\ } #define URLTOXML(NAME) \ if (NAME) {\ node.NewChild( #NAME ) = NAME.fullstr();\ } #define INTTOXML(NAME) \ if (NAME != -1) {\ node.NewChild( #NAME ) = tostring(NAME);\ } #define TIMETOSTRING(NAME) \ if (NAME != -1) {\ node.NewChild( #NAME ) = (NAME).str(UTCTime);\ } #define PERIODTOSTRING(NAME) \ if (NAME != -1) {\ node.NewChild( #NAME ) = (std::string)NAME;\ } #define STRINGLISTTOXML(NAME) \ for (std::list::const_iterator it = NAME.begin();\ it != NAME.end(); it++) {\ node.NewChild( #NAME ) = *it;\ } namespace Arc { Logger Job::logger(Logger::getRootLogger(), "Job"); JobControllerPluginLoader& Job::getLoader() { // For C++ it would be enough to have // static JobControllerPluginLoader loader; // But Java sometimes does not destroy objects causing // PluginsFactory destructor loop forever waiting for // plugins to exit. static JobControllerPluginLoader* loader = NULL; if(!loader) { loader = new JobControllerPluginLoader(); } return *loader; } DataHandle* Job::data_source = NULL; DataHandle* Job::data_destination = NULL; Job::Job() : ExitCode(-1), WaitingPosition(-1), RequestedTotalWallTime(-1), RequestedTotalCPUTime(-1), RequestedSlots(-1), UsedTotalWallTime(-1), UsedTotalCPUTime(-1), UsedMainMemory(-1), LocalSubmissionTime(-1), SubmissionTime(-1), ComputingManagerSubmissionTime(-1), StartTime(-1), ComputingManagerEndTime(-1), EndTime(-1), WorkingAreaEraseTime(-1), ProxyExpirationTime(-1), CreationTime(-1), Validity(-1), jc(NULL) {} Job::~Job() {} Job::Job(const Job& j) : ExitCode(-1), WaitingPosition(-1), RequestedTotalWallTime(-1), RequestedTotalCPUTime(-1), RequestedSlots(-1), UsedTotalWallTime(-1), UsedTotalCPUTime(-1), UsedMainMemory(-1), LocalSubmissionTime(-1), SubmissionTime(-1), ComputingManagerSubmissionTime(-1), StartTime(-1), ComputingManagerEndTime(-1), EndTime(-1), WorkingAreaEraseTime(-1), ProxyExpirationTime(-1), CreationTime(-1), Validity(-1), jc(NULL) { *this = j; } Job::Job(XMLNode j) : ExitCode(-1), WaitingPosition(-1), RequestedTotalWallTime(-1), RequestedTotalCPUTime(-1), RequestedSlots(-1), UsedTotalWallTime(-1), UsedTotalCPUTime(-1), UsedMainMemory(-1), LocalSubmissionTime(-1), SubmissionTime(-1), ComputingManagerSubmissionTime(-1), StartTime(-1), ComputingManagerEndTime(-1), EndTime(-1), WorkingAreaEraseTime(-1), ProxyExpirationTime(-1), CreationTime(-1), Validity(-1) { *this = j; } Job& Job::operator=(const Job& j) { jc = j.jc; // Proposed mandatory attributes for ARC 3.0 Name = j.Name; ServiceInformationURL = j.ServiceInformationURL; ServiceInformationInterfaceName = j.ServiceInformationInterfaceName; JobStatusURL = j.JobStatusURL; JobStatusInterfaceName = j.JobStatusInterfaceName; JobManagementURL = j.JobManagementURL; JobManagementInterfaceName = j.JobManagementInterfaceName; StageInDir = j.StageInDir; StageOutDir = j.StageOutDir; SessionDir = j.SessionDir; JobID = j.JobID; Type = j.Type; IDFromEndpoint = j.IDFromEndpoint; LocalIDFromManager = j.LocalIDFromManager; JobDescription = j.JobDescription; JobDescriptionDocument = j.JobDescriptionDocument; State = j.State; RestartState = j.RestartState; ExitCode = j.ExitCode; ComputingManagerExitCode = j.ComputingManagerExitCode; Error = j.Error; WaitingPosition = j.WaitingPosition; UserDomain = j.UserDomain; Owner = j.Owner; LocalOwner = j.LocalOwner; RequestedTotalWallTime = j.RequestedTotalWallTime; RequestedTotalCPUTime = j.RequestedTotalCPUTime; RequestedSlots = j.RequestedSlots; RequestedApplicationEnvironment = j.RequestedApplicationEnvironment; StdIn = j.StdIn; StdOut = j.StdOut; StdErr = j.StdErr; LogDir = j.LogDir; ExecutionNode = j.ExecutionNode; Queue = j.Queue; UsedTotalWallTime = j.UsedTotalWallTime; UsedTotalCPUTime = j.UsedTotalCPUTime; UsedMainMemory = j.UsedMainMemory; RequestedApplicationEnvironment = j.RequestedApplicationEnvironment; RequestedSlots = j.RequestedSlots; LocalSubmissionTime = j.LocalSubmissionTime; SubmissionTime = j.SubmissionTime; ComputingManagerSubmissionTime = j.ComputingManagerSubmissionTime; StartTime = j.StartTime; ComputingManagerEndTime = j.ComputingManagerEndTime; EndTime = j.EndTime; WorkingAreaEraseTime = j.WorkingAreaEraseTime; ProxyExpirationTime = j.ProxyExpirationTime; SubmissionHost = j.SubmissionHost; SubmissionClientName = j.SubmissionClientName; CreationTime = j.CreationTime; Validity = j.Validity; OtherMessages = j.OtherMessages; ActivityOldID = j.ActivityOldID; LocalInputFiles = j.LocalInputFiles; DelegationID = j.DelegationID; return *this; } int Job::operator==(const Job& other) { return JobID == other.JobID; } Job& Job::operator=(XMLNode job) { jc = NULL; // Detect format: if (job["JobID"] && job["IDFromEndpoint"] && (job["ServiceInformationURL"] || job["ServiceInformationInterfaceName"] || job["JobStatusURL"] || job["JobStatusInterfaceName"] || job["JobManagementURL"] || job["JobManagementInterfaceName"]) ) { // Version >= 3.x format. JobID = (std::string)job["JobID"]; IDFromEndpoint = (std::string)job["IDFromEndpoint"]; ServiceInformationURL = URL((std::string)job["ServiceInformationURL"]); ServiceInformationInterfaceName = (std::string)job["ServiceInformationInterfaceName"]; JobStatusURL = URL((std::string)job["JobStatusURL"]); JobStatusInterfaceName = (std::string)job["JobStatusInterfaceName"]; JobManagementURL = URL((std::string)job["JobManagementURL"]); JobManagementInterfaceName = (std::string)job["JobManagementInterfaceName"]; if (job["StageInDir"]) StageInDir = URL((std::string)job["StageInDir"]); if (job["StageOutDir"]) StageOutDir = URL((std::string)job["StageOutDir"]); if (job["SessionDir"]) SessionDir = URL((std::string)job["SessionDir"]); } else if (job["JobID"] && job["Cluster"] && job["InterfaceName"] && job["IDFromEndpoint"]) { // Version 2.x format. JobID = (std::string)job["JobID"]; ServiceInformationURL = URL((std::string)job["Cluster"]); JobStatusURL = URL((std::string)job["IDFromEndpoint"]); JobManagementURL = URL(JobID); StageInDir = JobManagementURL; StageOutDir = JobManagementURL; SessionDir = JobManagementURL; const std::string path = JobManagementURL.Path(); std::size_t slashpos = path.rfind("/"); IDFromEndpoint = path.substr(slashpos+1); JobManagementURL.ChangePath(path.substr(0, slashpos)); JobManagementInterfaceName = (std::string)job["InterfaceName"]; if (JobManagementInterfaceName == "org.nordugrid.gridftpjob") { ServiceInformationInterfaceName = "org.nordugrid.ldapng"; JobStatusInterfaceName = "org.nordugrid.ldapng"; } else if (JobManagementInterfaceName == "org.ogf.bes") { ServiceInformationInterfaceName = "org.ogf.bes"; JobStatusInterfaceName = "org.ogf.bes"; } else if (JobManagementInterfaceName == "org.ogf.xbes") { ServiceInformationInterfaceName = "org.nordugrid.wsrfglue2"; JobStatusInterfaceName = "org.ogf.xbes"; } } else if (job["Cluster"] && job["InfoEndpoint"] && job["IDFromEndpoint"] && job["Flavour"]) { // Version 1.x format. JobID = (std::string)job["IDFromEndpoint"]; ServiceInformationURL = URL((std::string)job["Cluster"]); JobStatusURL = URL((std::string)job["InfoEndpoint"]); JobManagementURL = URL(JobID); StageInDir = JobManagementURL; StageOutDir = JobManagementURL; SessionDir = JobManagementURL; const std::string path = JobManagementURL.Path(); std::size_t slashpos = path.rfind("/"); IDFromEndpoint = path.substr(slashpos+1); JobManagementURL.ChangePath(path.substr(0, slashpos)); if ((std::string)job["Flavour"] == "ARC0") { ServiceInformationInterfaceName = "org.nordugrid.ldapng"; JobStatusInterfaceName = "org.nordugrid.ldapng"; JobManagementInterfaceName = "org.nordugrid.gridftpjob"; } else if ((std::string)job["Flavour"] == "BES") { ServiceInformationInterfaceName = "org.ogf.bes"; JobStatusInterfaceName = "org.ogf.bes"; JobManagementInterfaceName = "org.ogf.bes"; } else if ((std::string)job["Flavour"] == "ARC1") { ServiceInformationInterfaceName = "org.nordugrid.wsrfglue2"; JobStatusInterfaceName = "org.nordugrid.xbes"; JobManagementInterfaceName = "org.nordugrid.xbes"; } } else { logger.msg(WARNING, "Unable to detect format of job record."); } JXMLTOSTRING(Name) if (job["JobDescription"]) { const std::string sjobdesc = job["JobDescription"]; if (job["JobDescriptionDocument"] || job["State"] || !job["LocalSubmissionTime"]) { // If the 'JobDescriptionDocument' or 'State' element is set assume that the 'JobDescription' element is the GLUE2 one. // Default is to assume it is the GLUE2 one. JobDescription = sjobdesc; } else { // If the 'LocalSubmissionTime' element is set assume that the 'JobDescription' element contains the actual job description. JobDescriptionDocument = sjobdesc; } } JXMLTOSTRING(JobDescriptionDocument) JXMLTOTIME(LocalSubmissionTime) if (job["Associations"]["ActivityOldID"]) { ActivityOldID.clear(); for (XMLNode n = job["Associations"]["ActivityOldID"]; n; ++n) { ActivityOldID.push_back((std::string)n); } } else if (job["OldJobID"]) { // Included for backwards compatibility. ActivityOldID.clear(); for (XMLNode n = job["OldJobID"]; n; ++n) { ActivityOldID.push_back((std::string)n); } } if (job["Associations"]["LocalInputFile"]) { LocalInputFiles.clear(); for (XMLNode n = job["Associations"]["LocalInputFile"]; n; ++n) { if (n["Source"] && n["CheckSum"]) { LocalInputFiles[(std::string)n["Source"]] = (std::string)n["CheckSum"]; } } } else if (job["LocalInputFiles"]["File"]) { // Included for backwards compatibility. LocalInputFiles.clear(); for (XMLNode n = job["LocalInputFiles"]["File"]; n; ++n) { if (n["Source"] && n["CheckSum"]) { LocalInputFiles[(std::string)n["Source"]] = (std::string)n["CheckSum"]; } } } if (job["Associations"]["DelegationID"]) { DelegationID.clear(); for (XMLNode n = job["Associations"]["DelegationID"]; n; ++n) { DelegationID.push_back((std::string)n); } } // Pick generic GLUE2 information SetFromXML(job); return *this; } void Job::SetFromXML(XMLNode job) { JXMLTOSTRING(Type) // TODO: find out how to treat IDFromEndpoint in case of pure GLUE2 JXMLTOSTRING(LocalIDFromManager) /* Earlier the 'JobDescription' element in a XMLNode representing a Job * object contained the actual job description, but in GLUE2 the name * 'JobDescription' specifies the job description language which was used to * describe the job. Due to the name clash we must guess what is meant when * parsing the 'JobDescription' element. */ // TODO: same for JobDescription // Parse libarccompute special state format. if (job["State"]["General"] && job["State"]["Specific"]) { State.state = (std::string)job["State"]["Specific"]; State.type = JobState::GetStateType((std::string)job["State"]["General"]); } // Only use the first state. ACC modules should set the state them selves. else if (job["State"] && job["State"].Size() == 0) { State.state = (std::string)job["State"]; State.type = JobState::OTHER; } if (job["RestartState"]["General"] && job["RestartState"]["Specific"]) { RestartState.state = (std::string)job["RestartState"]["Specific"]; RestartState.type = JobState::GetStateType((std::string)job["RestartState"]["General"]); } // Only use the first state. ACC modules should set the state them selves. else if (job["RestartState"] && job["RestartState"].Size() == 0) { RestartState.state = (std::string)job["RestartState"]; RestartState.type = JobState::OTHER; } JXMLSTRINGTO(int, ExitCode) JXMLTOSTRING(ComputingManagerExitCode) JXMLTOSTRINGLIST(Error) JXMLSTRINGTO(int, WaitingPosition) JXMLTOSTRING(UserDomain) JXMLTOSTRING(Owner) JXMLTOSTRING(LocalOwner) JXMLSTRINGTO(long, RequestedTotalWallTime) JXMLSTRINGTO(long, RequestedTotalCPUTime) JXMLSTRINGTO(int, RequestedSlots) JXMLTOSTRINGLIST(RequestedApplicationEnvironment) JXMLTOSTRING(StdIn) JXMLTOSTRING(StdOut) JXMLTOSTRING(StdErr) JXMLTOSTRING(LogDir) JXMLTOSTRINGLIST(ExecutionNode) JXMLTOSTRING(Queue) JXMLSTRINGTO(long, UsedTotalWallTime) JXMLSTRINGTO(long, UsedTotalCPUTime) JXMLSTRINGTO(int, UsedMainMemory) JXMLTOTIME(SubmissionTime) JXMLTOTIME(ComputingManagerSubmissionTime) JXMLTOTIME(StartTime) JXMLTOTIME(ComputingManagerEndTime) JXMLTOTIME(EndTime) JXMLTOTIME(WorkingAreaEraseTime) JXMLTOTIME(ProxyExpirationTime) JXMLTOSTRING(SubmissionHost) JXMLTOSTRING(SubmissionClientName) JXMLTOSTRINGLIST(OtherMessages) } void Job::ToXML(XMLNode node) const { // Proposed mandatory attributes for ARC 3.0 STRINGTOXML(Name) URLTOXML(ServiceInformationURL) STRINGTOXML(ServiceInformationInterfaceName) URLTOXML(JobStatusURL) STRINGTOXML(JobStatusInterfaceName) URLTOXML(JobManagementURL) STRINGTOXML(JobManagementInterfaceName) URLTOXML(StageInDir) URLTOXML(StageOutDir) URLTOXML(SessionDir) STRINGTOXML(JobID) STRINGTOXML(Type) STRINGTOXML(IDFromEndpoint) STRINGTOXML(LocalIDFromManager) STRINGTOXML(JobDescription) STRINGTOXML(JobDescriptionDocument) if (State) { node.NewChild("State"); node["State"].NewChild("Specific") = State(); node["State"].NewChild("General") = State.GetGeneralState(); } if (RestartState) { node.NewChild("RestartState"); node["RestartState"].NewChild("Specific") = RestartState(); node["RestartState"].NewChild("General") = RestartState.GetGeneralState(); } INTTOXML(ExitCode) STRINGTOXML(ComputingManagerExitCode) STRINGLISTTOXML(Error) INTTOXML(WaitingPosition) STRINGTOXML(UserDomain) STRINGTOXML(Owner) STRINGTOXML(LocalOwner) PERIODTOSTRING(RequestedTotalWallTime) PERIODTOSTRING(RequestedTotalCPUTime) INTTOXML(RequestedSlots) STRINGLISTTOXML(RequestedApplicationEnvironment) STRINGTOXML(StdIn) STRINGTOXML(StdOut) STRINGTOXML(StdErr) STRINGTOXML(LogDir) STRINGLISTTOXML(ExecutionNode) STRINGTOXML(Queue) PERIODTOSTRING(UsedTotalWallTime) PERIODTOSTRING(UsedTotalCPUTime) INTTOXML(UsedMainMemory) TIMETOSTRING(LocalSubmissionTime) TIMETOSTRING(SubmissionTime) TIMETOSTRING(ComputingManagerSubmissionTime) TIMETOSTRING(StartTime) TIMETOSTRING(ComputingManagerEndTime) TIMETOSTRING(EndTime) TIMETOSTRING(WorkingAreaEraseTime) TIMETOSTRING(ProxyExpirationTime) STRINGTOXML(SubmissionHost) STRINGTOXML(SubmissionClientName) STRINGLISTTOXML(OtherMessages) if ((ActivityOldID.size() > 0 || LocalInputFiles.size() > 0 || DelegationID.size() > 0) && !node["Associations"]) { node.NewChild("Associations"); } for (std::list::const_iterator it = ActivityOldID.begin(); it != ActivityOldID.end(); it++) { node["Associations"].NewChild("ActivityOldID") = *it; } for (std::map::const_iterator it = LocalInputFiles.begin(); it != LocalInputFiles.end(); it++) { XMLNode lif = node["Associations"].NewChild("LocalInputFile"); lif.NewChild("Source") = it->first; lif.NewChild("CheckSum") = it->second; } for (std::list::const_iterator it = DelegationID.begin(); it != DelegationID.end(); it++) { node["Associations"].NewChild("DelegationID") = *it; } } void Job::SaveToStream(std::ostream& out, bool longlist) const { out << IString("Job: %s", JobID) << std::endl; if (!Name.empty()) out << IString(" Name: %s", Name) << std::endl; out << IString(" State: %s", State.GetGeneralState()) << std::endl; if (longlist && !State().empty()) { out << IString(" Specific state: %s", State.GetSpecificState()) << std::endl; } if (State == JobState::QUEUING && WaitingPosition != -1) { out << IString(" Waiting Position: %d", WaitingPosition) << std::endl; } if (ExitCode != -1) out << IString(" Exit Code: %d", ExitCode) << std::endl; if (!Error.empty()) { for (std::list::const_iterator it = Error.begin(); it != Error.end(); it++) out << IString(" Job Error: %s", *it) << std::endl; } if (longlist) { if (!Owner.empty()) out << IString(" Owner: %s", Owner) << std::endl; if (!OtherMessages.empty()) for (std::list::const_iterator it = OtherMessages.begin(); it != OtherMessages.end(); it++) out << IString(" Other Messages: %s", *it) << std::endl; if (!Queue.empty()) out << IString(" Queue: %s", Queue) << std::endl; if (RequestedSlots != -1) out << IString(" Requested Slots: %d", RequestedSlots) << std::endl; if (WaitingPosition != -1) out << IString(" Waiting Position: %d", WaitingPosition) << std::endl; if (!StdIn.empty()) out << IString(" Stdin: %s", StdIn) << std::endl; if (!StdOut.empty()) out << IString(" Stdout: %s", StdOut) << std::endl; if (!StdErr.empty()) out << IString(" Stderr: %s", StdErr) << std::endl; if (!LogDir.empty()) out << IString(" Computing Service Log Directory: %s", LogDir) << std::endl; if (SubmissionTime != -1) out << IString(" Submitted: %s", (std::string)SubmissionTime) << std::endl; if (EndTime != -1) out << IString(" End Time: %s", (std::string)EndTime) << std::endl; if (!SubmissionHost.empty()) out << IString(" Submitted from: %s", SubmissionHost) << std::endl; if (!SubmissionClientName.empty()) out << IString(" Submitting client: %s", SubmissionClientName) << std::endl; if (RequestedTotalCPUTime != -1) out << IString(" Requested CPU Time: %s", RequestedTotalCPUTime.istr()) << std::endl; if (UsedTotalCPUTime != -1) out << IString(" Used CPU Time: %s", UsedTotalCPUTime.istr()) << std::endl; if (UsedTotalWallTime != -1) out << IString(" Used Wall Time: %s", UsedTotalWallTime.istr()) << std::endl; if (UsedMainMemory != -1) out << IString(" Used Memory: %d", UsedMainMemory) << std::endl; if (WorkingAreaEraseTime != -1) out << IString((State == JobState::DELETED) ? istring(" Results were deleted: %s") : istring(" Results must be retrieved before: %s"), (std::string)WorkingAreaEraseTime) << std::endl; if (ProxyExpirationTime != -1) out << IString(" Proxy valid until: %s", (std::string)ProxyExpirationTime) << std::endl; if (CreationTime != -1) out << IString(" Entry valid from: %s", (std::string)CreationTime) << std::endl; if (Validity != -1) out << IString(" Entry valid for: %s", Validity.istr()) << std::endl; if (!ActivityOldID.empty()) { out << IString(" Old job IDs:") << std::endl; for (std::list::const_iterator it = ActivityOldID.begin(); it != ActivityOldID.end(); ++it) { out << " " << *it << std::endl; } } // Proposed mandatory attributes for ARC 3.0 out << IString(" ID on service: %s", IDFromEndpoint) << std::endl; out << IString(" Service information URL: %s (%s)", ServiceInformationURL.fullstr(), ServiceInformationInterfaceName) << std::endl; out << IString(" Job status URL: %s (%s)", JobStatusURL.fullstr(), JobStatusInterfaceName) << std::endl; out << IString(" Job management URL: %s (%s)", JobManagementURL.fullstr(), JobManagementInterfaceName) << std::endl; if (StageInDir) out << IString(" Stagein directory URL: %s", StageInDir.fullstr()) << std::endl; if (StageOutDir) out << IString(" Stageout directory URL: %s", StageOutDir.fullstr()) << std::endl; if (SessionDir) out << IString(" Session directory URL: %s", SessionDir.fullstr()) << std::endl; if (!DelegationID.empty()) { out << IString(" Delegation IDs:") << std::endl; for (std::list::const_iterator it = DelegationID.begin(); it != DelegationID.end(); ++it) { out << " " << *it << std::endl; } } } out << std::endl; } // end Print bool Job::PrepareHandler(const UserConfig& uc) { if (jc != NULL) return true; // If InterfaceName is not specified then all JobControllerPlugin classes should be tried. if (JobManagementInterfaceName.empty()) { logger.msg(VERBOSE, "Unable to handle job (%s), no interface specified.", JobID); } jc = getLoader().loadByInterfaceName(JobManagementInterfaceName, uc); if (!jc) { logger.msg(VERBOSE, "Unable to handle job (%s), no plugin associated with the specified interface (%s)", JobID, JobManagementInterfaceName); return false; } return true; } bool Job::Update() { if (!jc) return false; std::list jobs(1, this); jc->UpdateJobs(jobs); return true; } bool Job::Clean() { return jc ? jc->CleanJobs(std::list(1, this)) : false; } bool Job::Cancel() { return jc ? jc->CancelJobs(std::list(1, this)) : false; } bool Job::Resume() { return jc ? jc->ResumeJobs(std::list(1, this)) : false; } bool Job::Renew() { return jc ? jc->RenewJobs(std::list(1, this)) : false; } bool Job::GetURLToResource(ResourceType resource, URL& url) const { return jc ? jc->GetURLToJobResource(*this, resource, url) : false; } bool Job::Retrieve(const UserConfig& uc, const URL& destination, bool force) const { if (!destination) { logger.msg(ERROR, "Invalid download destination path specified (%s)", destination.fullstr()); return false; } if (jc == NULL) { logger.msg(DEBUG, "Unable to download job (%s), no JobControllerPlugin plugin was set to handle the job.", JobID); return false; } logger.msg(VERBOSE, "Downloading job: %s", JobID); URL src, dst(destination); if (!jc->GetURLToJobResource(*this, STAGEOUTDIR, src)) { logger.msg(ERROR, "Cant retrieve job files for job (%s) - unable to determine URL of stage out directory", JobID); return false; } if (!src) { logger.msg(ERROR, "Invalid stage out path specified (%s)", src.fullstr()); return false; } // TODO: can destination be remote? if (!force && Glib::file_test(dst.Path(), Glib::FILE_TEST_EXISTS)) { logger.msg(WARNING, "%s directory exist! Skipping job.", dst.Path()); return false; } std::list files; if (!ListFilesRecursive(uc, src, files)) { logger.msg(ERROR, "Unable to retrieve list of job files to download for job %s", JobID); return false; } if (files.empty()) { logger.msg(WARNING, "No files to retrieve for job %s", JobID); return true; } // We must make it sure it is directory and it exists if (!DirCreate(dst.Path(), S_IRWXU, true)) { logger.msg(WARNING, "Failed to create directory %s! Skipping job.", dst.Path()); return false; } const std::string srcpath = src.Path() + (src.Path().empty() || *src.Path().rbegin() != '/' ? "/" : ""); const std::string dstpath = dst.Path() + (dst.Path().empty() || *dst.Path().rbegin() != G_DIR_SEPARATOR ? G_DIR_SEPARATOR_S : ""); bool ok = true; for (std::list::const_iterator it = files.begin(); it != files.end(); ++it) { src.ChangePath(srcpath + *it); dst.ChangePath(dstpath + *it); if (Glib::file_test(dst.Path(), Glib::FILE_TEST_EXISTS)) { if (!force) { logger.msg(ERROR, "Failed downloading %s to %s, destination already exist", src.str(), dst.Path()); ok = false; continue; } if (!FileDelete(dst.Path())) { logger.msg(ERROR, "Failed downloading %s to %s, unable to remove existing destination", src.str(), dst.Path()); ok = false; continue; } } if (!CopyJobFile(uc, src, dst)) { logger.msg(INFO, "Failed downloading %s to %s", src.str(), dst.Path()); ok = false; } } return ok; } bool Job::ListFilesRecursive(const UserConfig& uc, const URL& dir, std::list& files, const std::string& prefix) { std::list outputfiles; DataHandle handle(dir, uc); if (!handle) { logger.msg(INFO, "Unable to list files at %s", dir.str()); return false; } if(!handle->List(outputfiles, (Arc::DataPoint::DataPointInfoType) (DataPoint::INFO_TYPE_NAME | DataPoint::INFO_TYPE_TYPE))) { logger.msg(INFO, "Unable to list files at %s", dir.str()); return false; } for (std::list::iterator i = outputfiles.begin(); i != outputfiles.end(); i++) { if (i->GetName() == ".." || i->GetName() == ".") { continue; } if (i->GetType() == FileInfo::file_type_unknown || i->GetType() == FileInfo::file_type_file) { files.push_back(prefix + i->GetName()); } else if (i->GetType() == FileInfo::file_type_dir) { std::string path = dir.Path(); if (path[path.size() - 1] != '/') { path += "/"; } URL tmpdir(dir); tmpdir.ChangePath(path + i->GetName()); std::string dirname = prefix + i->GetName(); if (dirname[dirname.size() - 1] != '/') { dirname += "/"; } if (!ListFilesRecursive(uc, tmpdir, files, dirname)) { return false; } } } return true; } bool Job::CopyJobFile(const UserConfig& uc, const URL& src, const URL& dst) { DataMover mover; mover.retry(true); mover.secure(false); mover.passive(true); mover.verbose(false); logger.msg(VERBOSE, "Now copying (from -> to)"); logger.msg(VERBOSE, " %s -> %s", src.str(), dst.str()); URL src_(src); URL dst_(dst); src_.AddOption("checksum=no"); dst_.AddOption("checksum=no"); src_.AddOption("blocksize=1048576",false); dst_.AddOption("blocksize=1048576",false); if ((!data_source) || (!*data_source) || (!(*data_source)->SetURL(src_))) { if(data_source) delete data_source; data_source = new DataHandle(src_, uc); } DataHandle& source = *data_source; if (!source) { logger.msg(ERROR, "Unable to initialise connection to source: %s", src.str()); return false; } if ((!data_destination) || (!*data_destination) || (!(*data_destination)->SetURL(dst_))) { if(data_destination) delete data_destination; data_destination = new DataHandle(dst_, uc); } DataHandle& destination = *data_destination; if (!destination) { logger.msg(ERROR, "Unable to initialise connection to destination: %s", dst.str()); return false; } // Set desired number of retries. Also resets any lost // tries from previous files. source->SetTries((src.Protocol() == "file")?1:3); destination->SetTries((dst.Protocol() == "file")?1:3); // Turn off all features we do not need source->SetAdditionalChecks(false); destination->SetAdditionalChecks(false); FileCache cache; DataStatus res = mover.Transfer(*source, *destination, cache, URLMap(), 0, 0, 0, uc.Timeout()); if (!res.Passed()) { logger.msg(ERROR, "File download failed: %s", std::string(res)); // Reset connection because one can't be sure how failure // affects server and/or connection state. // TODO: Investigate/define DMC behavior in such case. delete data_source; data_source = NULL; delete data_destination; data_destination = NULL; return false; } return true; } bool Job::ReadJobIDsFromFile(const std::string& filename, std::list& jobids, unsigned nTries, unsigned tryInterval) { if (!Glib::file_test(filename, Glib::FILE_TEST_IS_REGULAR)) return false; FileLock lock(filename); for (int tries = (int)nTries; tries > 0; --tries) { if (lock.acquire()) { std::ifstream is(filename.c_str()); if (!is.good()) { is.close(); lock.release(); return false; } std::string line; while (std::getline(is, line)) { line = Arc::trim(line, " \t"); if (!line.empty() && line[0] != '#') { jobids.push_back(line); } } is.close(); lock.release(); return true; } if (tries == 6) { logger.msg(WARNING, "Waiting for lock on file %s", filename); } Glib::usleep(tryInterval); } return false; } bool Job::WriteJobIDToFile(const std::string& jobid, const std::string& filename, unsigned nTries, unsigned tryInterval) { if (Glib::file_test(filename, Glib::FILE_TEST_IS_DIR)) return false; FileLock lock(filename); for (int tries = (int)nTries; tries > 0; --tries) { if (lock.acquire()) { std::ofstream os(filename.c_str(), std::ios::app); if (!os.good()) { os.close(); lock.release(); return false; } os << jobid << std::endl; bool good = os.good(); os.close(); lock.release(); return good; } if (tries == 6) { logger.msg(WARNING, "Waiting for lock on file %s", filename); } Glib::usleep(tryInterval); } return false; } bool Job::WriteJobIDsToFile(const std::list& jobids, const std::string& filename, unsigned nTries, unsigned tryInterval) { if (Glib::file_test(filename, Glib::FILE_TEST_IS_DIR)) return false; FileLock lock(filename); for (int tries = (int)nTries; tries > 0; --tries) { if (lock.acquire()) { std::ofstream os(filename.c_str(), std::ios::app); if (!os.good()) { os.close(); lock.release(); return false; } for (std::list::const_iterator it = jobids.begin(); it != jobids.end(); ++it) { os << *it << std::endl; } bool good = os.good(); os.close(); lock.release(); return good; } if (tries == 6) { logger.msg(WARNING, "Waiting for lock on file %s", filename); } Glib::usleep(tryInterval); } return false; } bool Job::WriteJobIDsToFile(const std::list& jobs, const std::string& filename, unsigned nTries, unsigned tryInterval) { if (Glib::file_test(filename, Glib::FILE_TEST_IS_DIR)) return false; FileLock lock(filename); for (int tries = (int)nTries; tries > 0; --tries) { if (lock.acquire()) { std::ofstream os(filename.c_str(), std::ios::app); if (!os.good()) { os.close(); lock.release(); return false; } for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { os << it->JobID << std::endl; } bool good = os.good(); os.close(); lock.release(); return good; } if (tries == 6) { logger.msg(WARNING, "Waiting for lock on file %s", filename); } Glib::usleep(tryInterval); } return false; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/SubmissionStatus.h0000644000000000000000000000012412072651017024642 xustar000000000000000027 mtime=1357599247.730872 27 atime=1513200574.717704 30 ctime=1513200659.764744609 nordugrid-arc-5.4.2/src/hed/libs/compute/SubmissionStatus.h0000644000175000002070000000612712072651017024715 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMISSIONSTATUS_H__ #define __ARC_SUBMISSIONSTATUS_H__ namespace Arc { /** * \ingroup compute * \headerfile SubmissionStatus.h arc/compute/SubmissionStatus.h */ class SubmissionStatus { private: static const unsigned int maxValue = (1 << 8) - 1; public: enum SubmissionStatusType { NONE = 0, NOT_IMPLEMENTED = 1 << 0, NO_SERVICES = 1 << 1, ENDPOINT_NOT_QUERIED = 1 << 2, BROKER_PLUGIN_NOT_LOADED = 1 << 3, DESCRIPTION_NOT_SUBMITTED = 1 << 4, SUBMITTER_PLUGIN_NOT_LOADED = 1 << 5, AUTHENTICATION_ERROR = 1 << 6, ERROR_FROM_ENDPOINT = 1 << 7 }; SubmissionStatus() : status(NONE) {} SubmissionStatus(const SubmissionStatus& s) : status(s.status) {} SubmissionStatus(SubmissionStatusType s) : status(s) {} SubmissionStatus(unsigned int s) : status(s & maxValue) {} SubmissionStatus& operator|=(SubmissionStatusType s) { status |= s; return *this; } SubmissionStatus& operator|=(const SubmissionStatus& s) { status |= s.status; return *this; } SubmissionStatus& operator|=(unsigned int s) { status |= (s & maxValue); return *this; } SubmissionStatus operator|(SubmissionStatusType s) const { return (status | s); } SubmissionStatus operator|(const SubmissionStatus& s) const { return (status | s.status); } SubmissionStatus operator|(unsigned int s) const { return (status | (s & maxValue)); } SubmissionStatus& operator&=(SubmissionStatusType s) { status &= s; return *this; } SubmissionStatus& operator&=(const SubmissionStatus& s) { status &= s.status; return *this; } SubmissionStatus& operator&=(unsigned int s) { status &= s; return *this; } SubmissionStatus operator&(SubmissionStatusType s) const { return (status & s); } SubmissionStatus operator&(const SubmissionStatus& s) const { return (status & s.status); } SubmissionStatus operator&(unsigned int s) const { return (status & s); } SubmissionStatus& operator=(SubmissionStatusType s) { status = s; return *this; } SubmissionStatus& operator=(unsigned int s) { status = (s & maxValue); return *this; } operator bool() const { return status == NONE; } bool operator==(const SubmissionStatus& s) const { return status == s.status; } bool operator==(SubmissionStatusType s) const { return status == (unsigned int)s; } bool operator==(unsigned int s) const { return status == s; } friend bool operator==(SubmissionStatusType a, const SubmissionStatus& b); bool operator!=(const SubmissionStatus& s) const { return !operator==(s); } bool operator!=(SubmissionStatusType s) const { return !operator==(s); } bool operator!=(unsigned int s) const { return !operator==(s); } bool isSet(SubmissionStatusType s) const { return (s & status) == (unsigned int)s; } void unset(SubmissionStatusType s) { status &= (~s); } private: unsigned int status; }; inline bool operator==(SubmissionStatus::SubmissionStatusType a, const SubmissionStatus& b) { return (unsigned int)a == b.status; } } #endif // __ARC_SUBMISSIONSTATUS_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobSupervisor.cpp0000644000000000000000000000012412771222411024450 xustar000000000000000027 mtime=1474635017.400882 27 atime=1513200574.673703 30 ctime=1513200659.797745012 nordugrid-arc-5.4.2/src/hed/libs/compute/JobSupervisor.cpp0000644000175000002070000005455512771222411024533 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "JobSupervisor.h" namespace Arc { Logger JobSupervisor::logger(Logger::getRootLogger(), "JobSupervisor"); JobSupervisor::JobSupervisor(const UserConfig& usercfg, const std::list& jobs) : usercfg(usercfg) { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { AddJob(*it); } } bool JobSupervisor::AddJob(const Job& job) { if (job.JobID.empty()) { logger.msg(VERBOSE, "Ignoring job, the job ID is empty"); return false; } if (job.JobManagementInterfaceName.empty()) { logger.msg(VERBOSE, "Ignoring job (%s), the management interface name is unknown", job.JobID); return false; } if (!job.JobManagementURL) { logger.msg(VERBOSE, "Ignoring job (%s), the job management URL is unknown", job.JobID); return false; } if (job.JobStatusInterfaceName.empty()) { logger.msg(VERBOSE, "Ignoring job (%s), the status interface name is unknown", job.JobID); return false; } if (!job.JobStatusURL) { logger.msg(VERBOSE, "Ignoring job (%s), the job status URL is unknown", job.JobID); return false; } std::map::iterator currentJC = loadedJCs.find(job.JobManagementInterfaceName); if (currentJC == loadedJCs.end()) { JobControllerPlugin *jc = Job::getLoader().loadByInterfaceName(job.JobManagementInterfaceName, usercfg); currentJC = loadedJCs.insert(std::pair(job.JobManagementInterfaceName, jc)).first; if (!jc) { logger.msg(VERBOSE, "Ignoring job (%s), unable to load JobControllerPlugin for %s", job.JobID, job.JobManagementInterfaceName); return false; } jcJobMap[jc] = std::pair< std::list, std::list >(); } else if (!currentJC->second) { // Already tried to load JobControllerPlugin, and it failed. logger.msg(VERBOSE, "Ignoring job (%s), already tried and were unable to load JobControllerPlugin", job.JobID); return false; } // Because jobs are identified by ID there should probably be // protection against duplicate job ID jobs.push_back(job); jobs.back().jc = currentJC->second; jcJobMap[currentJC->second].first.push_back(&jobs.back()); return true; } void JobSupervisor::SelectValid() { processed.clear(); notprocessed.clear(); for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (!(*itJ)->State) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { processed.push_back((*itJ)->JobID); ++itJ; } } } } void JobSupervisor::SelectByStatus(const std::list& status) { processed.clear(); notprocessed.clear(); if (status.empty()) { return; } for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (std::find(status.begin(), status.end(), (*itJ)->State()) == status.end() && std::find(status.begin(), status.end(), (*itJ)->State.GetGeneralState()) == status.end()) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { processed.push_back((*itJ)->JobID); ++itJ; } } } } void JobSupervisor::SelectByID(const std::list& ids) { processed.clear(); notprocessed.clear(); if (ids.empty()) { return; } for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (std::find(ids.begin(), ids.end(), (*itJ)->JobID) == ids.end()) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { processed.push_back((*itJ)->JobID); ++itJ; } } } } void JobSupervisor::Select(const JobSelector& selector) { processed.clear(); notprocessed.clear(); for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (!selector.Select(**itJ)) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { processed.push_back((*itJ)->JobID); ++itJ; } } } } void JobSupervisor::ClearSelection() { for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { it->second.first.clear(); it->second.second.clear(); } for (std::list::iterator itJ = jobs.begin(); itJ != jobs.end(); ++itJ) { jcJobMap[itJ->jc].first.push_back(&*itJ); } } void JobSupervisor::Update() { for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { it->first->UpdateJobs(it->second.first, processed, notprocessed); } } std::list JobSupervisor::GetSelectedJobs() const { std::list selectedJobs; for (JobSelectionMap::const_iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::const_iterator itJ = it->second.first.begin(); itJ != it->second.first.end(); ++itJ) { selectedJobs.push_back(**itJ); } } return selectedJobs; } bool JobSupervisor::Retrieve(const std::string& downloaddirprefix, bool usejobname, bool force, std::list& downloaddirectories) { notprocessed.clear(); processed.clear(); bool ok = true; for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (!(*itJ)->State || (*itJ)->State == JobState::DELETED || !(*itJ)->State.IsFinished()) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } std::string downloaddirname; if (usejobname && !(*itJ)->Name.empty()) { downloaddirname = (*itJ)->Name; } else { std::string path = URL((*itJ)->JobID).Path(); std::string::size_type pos = path.rfind('/'); downloaddirname = path.substr(pos + 1); } URL downloaddir; if (!downloaddirprefix.empty()) { downloaddir = downloaddirprefix; if (downloaddir.Protocol() == "file") { downloaddir.ChangePath(downloaddir.Path() + G_DIR_SEPARATOR_S + downloaddirname); } else { downloaddir.ChangePath(downloaddir.Path() + "/" + downloaddirname); } } else { downloaddir = downloaddirname; } if (!(*itJ)->Retrieve(usercfg, downloaddir, force)) { ok = false; notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { processed.push_back((*itJ)->JobID); if (downloaddir.Protocol() == "file") { if (Glib::file_test(downloaddir.Path(), Glib::FILE_TEST_IS_DIR)) { std::string cwd = URL(".").Path(); cwd.resize(cwd.size()-1); if (downloaddir.Path().substr(0, cwd.size()) == cwd) { downloaddirectories.push_back(downloaddir.Path().substr(cwd.size())); } else { downloaddirectories.push_back(downloaddir.Path()); } } } else { downloaddirectories.push_back(downloaddir.str()); } ++itJ; } } } return ok; } bool JobSupervisor::Renew() { notprocessed.clear(); processed.clear(); bool ok = true; for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (!(*itJ)->State || (*itJ)->State == JobState::FINISHED || (*itJ)->State == JobState::KILLED || (*itJ)->State == JobState::DELETED) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } if (!it->first->RenewJobs(std::list(1, *itJ), processed, notprocessed)) { ok = false; it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { ++itJ; } } } return ok; } bool JobSupervisor::Resume() { notprocessed.clear(); processed.clear(); bool ok = true; for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (!(*itJ)->State || (*itJ)->State == JobState::FINISHED || (*itJ)->State == JobState::KILLED || (*itJ)->State == JobState::DELETED) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } if (!it->first->ResumeJobs(std::list(1, *itJ), processed, notprocessed)) { ok = false; it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { ++itJ; } } } return ok; } bool JobSupervisor::Resubmit(int destination, const std::list& services, std::list& resubmittedJobs, const std::list& rejectedURLs) { notprocessed.clear(); processed.clear(); bool ok = true; std::list< std::list::iterator > resubmittableJobs; for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { // If job description is not set, then try to fetch it from execution service. if ((*itJ)->JobDescriptionDocument.empty() && !it->first->GetJobDescription(**itJ, (*itJ)->JobDescriptionDocument)) { notprocessed.push_back((*itJ)->JobID); ok = false; it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } // Verify checksums of local input files if (!(*itJ)->LocalInputFiles.empty()) { std::map::iterator itF = (*itJ)->LocalInputFiles.begin(); for (; itF != (*itJ)->LocalInputFiles.end(); ++itF) { if (itF->second != CheckSumAny::FileChecksum(itF->first, CheckSumAny::cksum, true)) { break; } } if (itF != (*itJ)->LocalInputFiles.end()) { notprocessed.push_back((*itJ)->JobID); ok = false; it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } } resubmittableJobs.push_back(itJ); ++itJ; } } if (resubmittableJobs.empty()) { return ok; } UserConfig resubmitUsercfg = usercfg; // UserConfig object might need to be modified. Broker broker(resubmitUsercfg, resubmitUsercfg.Broker().first); if (!broker.isValid(false)) { logger.msg(ERROR, "Job resubmission failed: Unable to load broker (%s)", resubmitUsercfg.Broker().first); for (std::list< std::list::iterator >::iterator itJ = resubmittableJobs.begin(); itJ != resubmittableJobs.end(); ++itJ) { notprocessed.push_back((**itJ)->JobID); jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); } return false; } ComputingServiceRetriever* csr = NULL; if (destination != 1) { // Jobs should not go to same target, making a general information gathering. csr = new ComputingServiceRetriever(resubmitUsercfg, services, rejectedURLs); csr->wait(); if (csr->empty()) { logger.msg(ERROR, "Job resubmission aborted because no resource returned any information"); delete csr; for (std::list< std::list::iterator >::iterator itJ = resubmittableJobs.begin(); itJ != resubmittableJobs.end(); ++itJ) { notprocessed.push_back((**itJ)->JobID); jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); } return false; } } Submitter s(resubmitUsercfg); for (std::list< std::list::iterator >::iterator itJ = resubmittableJobs.begin(); itJ != resubmittableJobs.end(); ++itJ) { resubmittedJobs.push_back(Job()); std::list jobdescs; if (!JobDescription::Parse((**itJ)->JobDescriptionDocument, jobdescs) || jobdescs.empty()) { std::cout << (**itJ)->JobDescriptionDocument << std::endl; logger.msg(ERROR, "Unable to resubmit job (%s), unable to parse obtained job description", (**itJ)->JobID); resubmittedJobs.pop_back(); notprocessed.push_back((**itJ)->JobID); ok = false; jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); continue; } jobdescs.front().Identification.ActivityOldID = (**itJ)->ActivityOldID; jobdescs.front().Identification.ActivityOldID.push_back((**itJ)->JobID); // remove the queuename which was added during the original submission of the job jobdescs.front().Resources.QueueName = ""; std::list rejectEndpoints; if (destination == 1) { // Jobs should be resubmitted to same target. std::list sametarget(1, Endpoint((**itJ)->ServiceInformationURL.fullstr())); sametarget.front().Capability.insert(Endpoint::GetStringForCapability(Endpoint::COMPUTINGINFO)); csr = new ComputingServiceRetriever(resubmitUsercfg, sametarget, rejectedURLs); csr->wait(); if (csr->empty()) { logger.msg(ERROR, "Unable to resubmit job (%s), target information retrieval failed for target: %s", (**itJ)->JobID, (**itJ)->ServiceInformationURL.str()); delete csr; resubmittedJobs.pop_back(); notprocessed.push_back((**itJ)->JobID); jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); continue; } } else if (destination == 2) { // Jobs should NOT be resubmitted to same target. rejectEndpoints.push_back((**itJ)->ServiceInformationURL); } ExecutionTargetSorter ets(broker, jobdescs.front(), *csr, rejectEndpoints); for (ets.reset(); !ets.endOfList(); ets.next()) { if (s.Submit(*ets, jobdescs.front(), resubmittedJobs.back())) { ets.registerJobSubmission(); processed.push_back((**itJ)->JobID); break; } } if (ets.endOfList()) { resubmittedJobs.pop_back(); notprocessed.push_back((**itJ)->JobID); ok = false; logger.msg(ERROR, "Unable to resubmit job (%s), no targets applicable for submission", (**itJ)->JobID); jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); } if (destination == 1) { delete csr; } } if (destination != 1) { delete csr; } return ok; } bool JobSupervisor::Migrate(bool forcemigration, const std::list& services, std::list& migratedJobs, const std::list& rejectedURLs) { bool ok = true; std::list< std::list::iterator > migratableJobs; for (JobSelectionMap::iterator it = jcJobMap.begin(); ++it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if ((*itJ)->State != JobState::QUEUING) { notprocessed.push_back((*itJ)->JobID); ok = false; it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } // If job description is not set, then try to fetch it from execution service. if ((*itJ)->JobDescriptionDocument.empty() && !it->first->GetJobDescription((**itJ), (*itJ)->JobDescriptionDocument)) { logger.msg(ERROR, "Unable to migrate job (%s), job description could not be retrieved remotely", (*itJ)->JobID); notprocessed.push_back((*itJ)->JobID); ok = false; it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } migratableJobs.push_back(itJ); ++itJ; } } if (migratableJobs.empty()) { return ok; } ComputingServiceRetriever csr(usercfg, services, rejectedURLs); csr.wait(); if (csr.empty()) { logger.msg(ERROR, "Job migration aborted, no resource returned any information"); for (std::list< std::list::iterator >::const_iterator itJ = migratableJobs.begin(); itJ != migratableJobs.end(); ++itJ) { notprocessed.push_back((**itJ)->JobID); jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); } return false; } Broker broker(usercfg, usercfg.Broker().first); if (!broker.isValid(false)) { logger.msg(ERROR, "Job migration aborted, unable to load broker (%s)", usercfg.Broker().first); for (std::list< std::list::iterator >::const_iterator itJ = migratableJobs.begin(); itJ != migratableJobs.end(); ++itJ) { notprocessed.push_back((**itJ)->JobID); jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); } return false; } SubmitterPluginLoader *spl = NULL; for (std::list< std::list::iterator >::iterator itJ = migratableJobs.begin(); itJ != migratableJobs.end(); ++itJ) { std::list jobdescs; if (!JobDescription::Parse((**itJ)->JobDescriptionDocument, jobdescs) || jobdescs.empty()) { logger.msg(ERROR, "Unable to migrate job (%s), unable to parse obtained job description", (**itJ)->JobID); notprocessed.push_back((**itJ)->JobID); jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); continue; } jobdescs.front().Identification.ActivityOldID = (**itJ)->ActivityOldID; jobdescs.front().Identification.ActivityOldID.push_back((**itJ)->JobID); // remove the queuename which was added during the original submission of the job jobdescs.front().Resources.QueueName = ""; migratedJobs.push_back(Job()); ExecutionTargetSorter ets(broker, jobdescs.front(), csr); for (ets.reset(); !ets.endOfList(); ets.next()) { if (spl == NULL) { spl = new SubmitterPluginLoader(); } SubmitterPlugin* sp = spl->loadByInterfaceName(ets->ComputingEndpoint->InterfaceName, usercfg); if (sp == NULL) { logger.msg(INFO, "Unable to load submission plugin for %s interface", ets->ComputingEndpoint->InterfaceName); continue; } if (sp->Migrate((**itJ)->JobID, jobdescs.front(), *ets, forcemigration, migratedJobs.back())) { ets.registerJobSubmission(); break; } } if (ets.endOfList()) { logger.msg(ERROR, "Job migration failed for job (%s), no applicable targets", (**itJ)->JobID); ok = false; migratedJobs.pop_back(); notprocessed.push_back((**itJ)->JobID); jcJobMap[(**itJ)->jc].second.push_back(**itJ); jcJobMap[(**itJ)->jc].first.erase(*itJ); } else { processed.push_back((**itJ)->JobID); } } if (spl) { delete spl; } return ok; } bool JobSupervisor::Cancel() { notprocessed.clear(); processed.clear(); bool ok = true; for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (!(*itJ)->State || (*itJ)->State == JobState::DELETED || (*itJ)->State.IsFinished()) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } if (!it->first->CancelJobs(std::list(1, *itJ), processed, notprocessed)) { ok = false; it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { ++itJ; } } } return ok; } bool JobSupervisor::Clean() { notprocessed.clear(); processed.clear(); bool ok = true; for (JobSelectionMap::iterator it = jcJobMap.begin(); it != jcJobMap.end(); ++it) { for (std::list::iterator itJ = it->second.first.begin(); itJ != it->second.first.end();) { if (!(*itJ)->State || !(*itJ)->State.IsFinished()) { notprocessed.push_back((*itJ)->JobID); it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); continue; } if (!it->first->CleanJobs(std::list(1, *itJ), processed, notprocessed)) { ok = false; it->second.second.push_back(*itJ); itJ = it->second.first.erase(itJ); } else { ++itJ; } } } return ok; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/test-job-30000644000000000000000000000012412045235201022735 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.670703 30 ctime=1513200659.816745244 nordugrid-arc-5.4.2/src/hed/libs/compute/test-job-30000644000175000002070000000037212045235201023004 0ustar00mockbuildmock00000000000000&(executable = '/bin/cp') (arguments = 'in.html' 'out.html') (stdout = 'stdout') (stderr = 'stderr') (inputfiles = ('in.html' 'http://www.nordugrid.org/data/in.html') ) (outputfiles = ('out.html' '') ) (jobname = 'arctest3') (gmlog = 'gmlog') nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobInformationStorageDescriptor.cpp0000644000000000000000000000012413065017510030137 xustar000000000000000027 mtime=1490296648.482805 27 atime=1513200574.716704 30 ctime=1513200659.810745171 nordugrid-arc-5.4.2/src/hed/libs/compute/JobInformationStorageDescriptor.cpp0000644000175000002070000000115213065017510030203 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "JobInformationStorageXML.h" #ifdef DBJSTORE_ENABLED #include "JobInformationStorageBDB.h" #endif #ifdef HAVE_SQLITE #include "JobInformationStorageSQLite.h" #endif namespace Arc { JobInformationStorageDescriptor JobInformationStorage::AVAILABLE_TYPES[] = { #ifdef DBJSTORE_ENABLED { "BDB", &JobInformationStorageBDB::Instance }, #endif #ifdef HAVE_SQLITE { "SQLITE", &JobInformationStorageSQLite::Instance }, #endif { "XML", &JobInformationStorageXML::Instance }, { NULL, NULL } }; } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315724023176 xustar000000000000000030 mtime=1513200596.593972005 30 atime=1513200647.797598245 29 ctime=1513200659.78774489 nordugrid-arc-5.4.2/src/hed/libs/compute/Makefile.in0000644000175000002070000021755013214315724023257 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test_jobdescription$(EXEEXT) \ test_JobInformationStorage$(EXEEXT) subdir = src/hed/libs/compute DIST_COMMON = README $(am__libarccompute_la_HEADERS_DIST) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(testjobdir)" \ "$(DESTDIR)$(libarccompute_ladir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = @DBJSTORE_ENABLED_TRUE@am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) @SQLITE_ENABLED_TRUE@am__DEPENDENCIES_3 = $(am__DEPENDENCIES_1) libarccompute_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_3) am__libarccompute_la_SOURCES_DIST = Broker.cpp BrokerPlugin.cpp \ ExecutionTarget.cpp Submitter.cpp SubmitterPlugin.cpp \ JobState.cpp Job.cpp JobControllerPlugin.cpp JobSupervisor.cpp \ JobDescription.cpp JobDescriptionParserPlugin.cpp Software.cpp \ GLUE2.cpp EndpointQueryingStatus.cpp TestACCControl.cpp \ EntityRetriever.cpp EntityRetrieverPlugin.cpp Endpoint.cpp \ ComputingServiceRetriever.cpp \ JobInformationStorageDescriptor.cpp \ JobInformationStorageXML.cpp JobInformationStorageBDB.cpp \ JobInformationStorageSQLite.cpp @DBJSTORE_ENABLED_TRUE@am__objects_1 = libarccompute_la-JobInformationStorageBDB.lo @SQLITE_ENABLED_TRUE@am__objects_2 = libarccompute_la-JobInformationStorageSQLite.lo am_libarccompute_la_OBJECTS = libarccompute_la-Broker.lo \ libarccompute_la-BrokerPlugin.lo \ libarccompute_la-ExecutionTarget.lo \ libarccompute_la-Submitter.lo \ libarccompute_la-SubmitterPlugin.lo \ libarccompute_la-JobState.lo libarccompute_la-Job.lo \ libarccompute_la-JobControllerPlugin.lo \ libarccompute_la-JobSupervisor.lo \ libarccompute_la-JobDescription.lo \ libarccompute_la-JobDescriptionParserPlugin.lo \ libarccompute_la-Software.lo libarccompute_la-GLUE2.lo \ libarccompute_la-EndpointQueryingStatus.lo \ libarccompute_la-TestACCControl.lo \ libarccompute_la-EntityRetriever.lo \ libarccompute_la-EntityRetrieverPlugin.lo \ libarccompute_la-Endpoint.lo \ libarccompute_la-ComputingServiceRetriever.lo \ libarccompute_la-JobInformationStorageDescriptor.lo \ libarccompute_la-JobInformationStorageXML.lo $(am__objects_1) \ $(am__objects_2) libarccompute_la_OBJECTS = $(am_libarccompute_la_OBJECTS) libarccompute_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) \ $(libarccompute_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am_test_JobInformationStorage_OBJECTS = test_JobInformationStorage-test_JobInformationStorage.$(OBJEXT) test_JobInformationStorage_OBJECTS = \ $(am_test_JobInformationStorage_OBJECTS) test_JobInformationStorage_DEPENDENCIES = libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_JobInformationStorage_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_JobInformationStorage_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_test_jobdescription_OBJECTS = \ test_jobdescription-test_jobdescription.$(OBJEXT) test_jobdescription_OBJECTS = $(am_test_jobdescription_OBJECTS) test_jobdescription_DEPENDENCIES = libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) test_jobdescription_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_jobdescription_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarccompute_la_SOURCES) \ $(test_JobInformationStorage_SOURCES) \ $(test_jobdescription_SOURCES) DIST_SOURCES = $(am__libarccompute_la_SOURCES_DIST) \ $(test_JobInformationStorage_SOURCES) \ $(test_jobdescription_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive DATA = $(testjob_DATA) am__libarccompute_la_HEADERS_DIST = Broker.h BrokerPlugin.h \ ExecutionTarget.h Submitter.h SubmitterPlugin.h GLUE2Entity.h \ SubmissionStatus.h JobState.h Job.h JobControllerPlugin.h \ JobSupervisor.h JobDescription.h JobDescriptionParserPlugin.h \ Software.h GLUE2.h EndpointQueryingStatus.h TestACCControl.h \ EntityRetriever.h EntityRetrieverPlugin.h Endpoint.h \ ComputingServiceRetriever.h JobInformationStorage.h \ JobInformationStorageXML.h JobInformationStorageBDB.h \ JobInformationStorageSQLite.h HEADERS = $(libarccompute_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ lib_LTLIBRARIES = libarccompute.la @DBJSTORE_ENABLED_FALSE@HEADER_WITH_DBJSTORE = @DBJSTORE_ENABLED_TRUE@HEADER_WITH_DBJSTORE = JobInformationStorageBDB.h @DBJSTORE_ENABLED_FALSE@SOURCE_WITH_DBJSTORE = @DBJSTORE_ENABLED_TRUE@SOURCE_WITH_DBJSTORE = JobInformationStorageBDB.cpp @DBJSTORE_ENABLED_FALSE@CXXFLAGS_WITH_DBJSTORE = @DBJSTORE_ENABLED_TRUE@CXXFLAGS_WITH_DBJSTORE = $(DBCXX_CPPFLAGS) @DBJSTORE_ENABLED_FALSE@LIBADD_WITH_DBJSTORE = @DBJSTORE_ENABLED_TRUE@LIBADD_WITH_DBJSTORE = $(DBCXX_LIBS) @SQLITE_ENABLED_FALSE@HEADER_WITH_SQLITEJSTORE = @SQLITE_ENABLED_TRUE@HEADER_WITH_SQLITEJSTORE = JobInformationStorageSQLite.h @SQLITE_ENABLED_FALSE@SOURCE_WITH_SQLITEJSTORE = @SQLITE_ENABLED_TRUE@SOURCE_WITH_SQLITEJSTORE = JobInformationStorageSQLite.cpp @SQLITE_ENABLED_FALSE@CXXFLAGS_WITH_SQLITEJSTORE = @SQLITE_ENABLED_TRUE@CXXFLAGS_WITH_SQLITEJSTORE = $(SQLITE_CFLAGS) @SQLITE_ENABLED_FALSE@LIBADD_WITH_SQLITEJSTORE = @SQLITE_ENABLED_TRUE@LIBADD_WITH_SQLITEJSTORE = $(SQLITE_LIBS) libarccompute_ladir = $(pkgincludedir)/compute libarccompute_la_HEADERS = Broker.h BrokerPlugin.h ExecutionTarget.h \ Submitter.h SubmitterPlugin.h GLUE2Entity.h SubmissionStatus.h \ JobState.h Job.h JobControllerPlugin.h JobSupervisor.h \ JobDescription.h JobDescriptionParserPlugin.h Software.h \ GLUE2.h EndpointQueryingStatus.h TestACCControl.h \ EntityRetriever.h EntityRetrieverPlugin.h Endpoint.h \ ComputingServiceRetriever.h JobInformationStorage.h \ JobInformationStorageXML.h $(HEADER_WITH_DBJSTORE) $(HEADER_WITH_SQLITEJSTORE) libarccompute_la_SOURCES = Broker.cpp BrokerPlugin.cpp ExecutionTarget.cpp \ Submitter.cpp SubmitterPlugin.cpp \ JobState.cpp Job.cpp JobControllerPlugin.cpp JobSupervisor.cpp \ JobDescription.cpp JobDescriptionParserPlugin.cpp Software.cpp \ GLUE2.cpp EndpointQueryingStatus.cpp TestACCControl.cpp \ EntityRetriever.cpp EntityRetrieverPlugin.cpp Endpoint.cpp \ ComputingServiceRetriever.cpp \ JobInformationStorageDescriptor.cpp \ JobInformationStorageXML.cpp $(SOURCE_WITH_DBJSTORE) $(SOURCE_WITH_SQLITEJSTORE) libarccompute_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) \ $(CXXFLAGS_WITH_DBJSTORE) $(CXXFLAGS_WITH_SQLITEJSTORE) $(AM_CXXFLAGS) libarccompute_la_LIBADD = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(LIBADD_WITH_DBJSTORE) $(LIBADD_WITH_SQLITEJSTORE) libarccompute_la_LDFLAGS = -version-info 3:0:0 test_jobdescription_SOURCES = test_jobdescription.cpp test_jobdescription_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) test_jobdescription_LDADD = libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(DBCXX_LIBS) test_JobInformationStorage_SOURCES = test_JobInformationStorage.cpp test_JobInformationStorage_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) test_JobInformationStorage_LDADD = libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) testjobdir = $(pkgdatadir)/test-jobs testjob_DATA = test-job-1 test-job-2 test-job-3 EXTRA_DIST = $(testjob_DATA) DIST_SUBDIRS = test examples SUBDIRS = . $(TEST_DIR) examples all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/compute/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/compute/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarccompute.la: $(libarccompute_la_OBJECTS) $(libarccompute_la_DEPENDENCIES) $(libarccompute_la_LINK) -rpath $(libdir) $(libarccompute_la_OBJECTS) $(libarccompute_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test_JobInformationStorage$(EXEEXT): $(test_JobInformationStorage_OBJECTS) $(test_JobInformationStorage_DEPENDENCIES) @rm -f test_JobInformationStorage$(EXEEXT) $(test_JobInformationStorage_LINK) $(test_JobInformationStorage_OBJECTS) $(test_JobInformationStorage_LDADD) $(LIBS) test_jobdescription$(EXEEXT): $(test_jobdescription_OBJECTS) $(test_jobdescription_DEPENDENCIES) @rm -f test_jobdescription$(EXEEXT) $(test_jobdescription_LINK) $(test_jobdescription_OBJECTS) $(test_jobdescription_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-Broker.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-BrokerPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-ComputingServiceRetriever.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-Endpoint.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-EndpointQueryingStatus.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-EntityRetriever.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-EntityRetrieverPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-ExecutionTarget.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-GLUE2.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-Job.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobControllerPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobDescription.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobDescriptionParserPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobInformationStorageBDB.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobInformationStorageDescriptor.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobInformationStorageSQLite.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobInformationStorageXML.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobState.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-JobSupervisor.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-Software.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-Submitter.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-SubmitterPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarccompute_la-TestACCControl.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_JobInformationStorage-test_JobInformationStorage.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_jobdescription-test_jobdescription.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarccompute_la-Broker.lo: Broker.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-Broker.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-Broker.Tpo -c -o libarccompute_la-Broker.lo `test -f 'Broker.cpp' || echo '$(srcdir)/'`Broker.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-Broker.Tpo $(DEPDIR)/libarccompute_la-Broker.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Broker.cpp' object='libarccompute_la-Broker.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-Broker.lo `test -f 'Broker.cpp' || echo '$(srcdir)/'`Broker.cpp libarccompute_la-BrokerPlugin.lo: BrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-BrokerPlugin.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-BrokerPlugin.Tpo -c -o libarccompute_la-BrokerPlugin.lo `test -f 'BrokerPlugin.cpp' || echo '$(srcdir)/'`BrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-BrokerPlugin.Tpo $(DEPDIR)/libarccompute_la-BrokerPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BrokerPlugin.cpp' object='libarccompute_la-BrokerPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-BrokerPlugin.lo `test -f 'BrokerPlugin.cpp' || echo '$(srcdir)/'`BrokerPlugin.cpp libarccompute_la-ExecutionTarget.lo: ExecutionTarget.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-ExecutionTarget.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-ExecutionTarget.Tpo -c -o libarccompute_la-ExecutionTarget.lo `test -f 'ExecutionTarget.cpp' || echo '$(srcdir)/'`ExecutionTarget.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-ExecutionTarget.Tpo $(DEPDIR)/libarccompute_la-ExecutionTarget.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ExecutionTarget.cpp' object='libarccompute_la-ExecutionTarget.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-ExecutionTarget.lo `test -f 'ExecutionTarget.cpp' || echo '$(srcdir)/'`ExecutionTarget.cpp libarccompute_la-Submitter.lo: Submitter.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-Submitter.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-Submitter.Tpo -c -o libarccompute_la-Submitter.lo `test -f 'Submitter.cpp' || echo '$(srcdir)/'`Submitter.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-Submitter.Tpo $(DEPDIR)/libarccompute_la-Submitter.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Submitter.cpp' object='libarccompute_la-Submitter.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-Submitter.lo `test -f 'Submitter.cpp' || echo '$(srcdir)/'`Submitter.cpp libarccompute_la-SubmitterPlugin.lo: SubmitterPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-SubmitterPlugin.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-SubmitterPlugin.Tpo -c -o libarccompute_la-SubmitterPlugin.lo `test -f 'SubmitterPlugin.cpp' || echo '$(srcdir)/'`SubmitterPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-SubmitterPlugin.Tpo $(DEPDIR)/libarccompute_la-SubmitterPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPlugin.cpp' object='libarccompute_la-SubmitterPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-SubmitterPlugin.lo `test -f 'SubmitterPlugin.cpp' || echo '$(srcdir)/'`SubmitterPlugin.cpp libarccompute_la-JobState.lo: JobState.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobState.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobState.Tpo -c -o libarccompute_la-JobState.lo `test -f 'JobState.cpp' || echo '$(srcdir)/'`JobState.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobState.Tpo $(DEPDIR)/libarccompute_la-JobState.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobState.cpp' object='libarccompute_la-JobState.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobState.lo `test -f 'JobState.cpp' || echo '$(srcdir)/'`JobState.cpp libarccompute_la-Job.lo: Job.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-Job.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-Job.Tpo -c -o libarccompute_la-Job.lo `test -f 'Job.cpp' || echo '$(srcdir)/'`Job.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-Job.Tpo $(DEPDIR)/libarccompute_la-Job.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Job.cpp' object='libarccompute_la-Job.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-Job.lo `test -f 'Job.cpp' || echo '$(srcdir)/'`Job.cpp libarccompute_la-JobControllerPlugin.lo: JobControllerPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobControllerPlugin.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobControllerPlugin.Tpo -c -o libarccompute_la-JobControllerPlugin.lo `test -f 'JobControllerPlugin.cpp' || echo '$(srcdir)/'`JobControllerPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobControllerPlugin.Tpo $(DEPDIR)/libarccompute_la-JobControllerPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPlugin.cpp' object='libarccompute_la-JobControllerPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobControllerPlugin.lo `test -f 'JobControllerPlugin.cpp' || echo '$(srcdir)/'`JobControllerPlugin.cpp libarccompute_la-JobSupervisor.lo: JobSupervisor.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobSupervisor.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobSupervisor.Tpo -c -o libarccompute_la-JobSupervisor.lo `test -f 'JobSupervisor.cpp' || echo '$(srcdir)/'`JobSupervisor.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobSupervisor.Tpo $(DEPDIR)/libarccompute_la-JobSupervisor.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobSupervisor.cpp' object='libarccompute_la-JobSupervisor.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobSupervisor.lo `test -f 'JobSupervisor.cpp' || echo '$(srcdir)/'`JobSupervisor.cpp libarccompute_la-JobDescription.lo: JobDescription.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobDescription.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobDescription.Tpo -c -o libarccompute_la-JobDescription.lo `test -f 'JobDescription.cpp' || echo '$(srcdir)/'`JobDescription.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobDescription.Tpo $(DEPDIR)/libarccompute_la-JobDescription.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobDescription.cpp' object='libarccompute_la-JobDescription.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobDescription.lo `test -f 'JobDescription.cpp' || echo '$(srcdir)/'`JobDescription.cpp libarccompute_la-JobDescriptionParserPlugin.lo: JobDescriptionParserPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobDescriptionParserPlugin.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobDescriptionParserPlugin.Tpo -c -o libarccompute_la-JobDescriptionParserPlugin.lo `test -f 'JobDescriptionParserPlugin.cpp' || echo '$(srcdir)/'`JobDescriptionParserPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobDescriptionParserPlugin.Tpo $(DEPDIR)/libarccompute_la-JobDescriptionParserPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobDescriptionParserPlugin.cpp' object='libarccompute_la-JobDescriptionParserPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobDescriptionParserPlugin.lo `test -f 'JobDescriptionParserPlugin.cpp' || echo '$(srcdir)/'`JobDescriptionParserPlugin.cpp libarccompute_la-Software.lo: Software.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-Software.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-Software.Tpo -c -o libarccompute_la-Software.lo `test -f 'Software.cpp' || echo '$(srcdir)/'`Software.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-Software.Tpo $(DEPDIR)/libarccompute_la-Software.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Software.cpp' object='libarccompute_la-Software.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-Software.lo `test -f 'Software.cpp' || echo '$(srcdir)/'`Software.cpp libarccompute_la-GLUE2.lo: GLUE2.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-GLUE2.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-GLUE2.Tpo -c -o libarccompute_la-GLUE2.lo `test -f 'GLUE2.cpp' || echo '$(srcdir)/'`GLUE2.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-GLUE2.Tpo $(DEPDIR)/libarccompute_la-GLUE2.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GLUE2.cpp' object='libarccompute_la-GLUE2.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-GLUE2.lo `test -f 'GLUE2.cpp' || echo '$(srcdir)/'`GLUE2.cpp libarccompute_la-EndpointQueryingStatus.lo: EndpointQueryingStatus.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-EndpointQueryingStatus.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-EndpointQueryingStatus.Tpo -c -o libarccompute_la-EndpointQueryingStatus.lo `test -f 'EndpointQueryingStatus.cpp' || echo '$(srcdir)/'`EndpointQueryingStatus.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-EndpointQueryingStatus.Tpo $(DEPDIR)/libarccompute_la-EndpointQueryingStatus.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EndpointQueryingStatus.cpp' object='libarccompute_la-EndpointQueryingStatus.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-EndpointQueryingStatus.lo `test -f 'EndpointQueryingStatus.cpp' || echo '$(srcdir)/'`EndpointQueryingStatus.cpp libarccompute_la-TestACCControl.lo: TestACCControl.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-TestACCControl.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-TestACCControl.Tpo -c -o libarccompute_la-TestACCControl.lo `test -f 'TestACCControl.cpp' || echo '$(srcdir)/'`TestACCControl.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-TestACCControl.Tpo $(DEPDIR)/libarccompute_la-TestACCControl.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TestACCControl.cpp' object='libarccompute_la-TestACCControl.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-TestACCControl.lo `test -f 'TestACCControl.cpp' || echo '$(srcdir)/'`TestACCControl.cpp libarccompute_la-EntityRetriever.lo: EntityRetriever.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-EntityRetriever.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-EntityRetriever.Tpo -c -o libarccompute_la-EntityRetriever.lo `test -f 'EntityRetriever.cpp' || echo '$(srcdir)/'`EntityRetriever.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-EntityRetriever.Tpo $(DEPDIR)/libarccompute_la-EntityRetriever.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EntityRetriever.cpp' object='libarccompute_la-EntityRetriever.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-EntityRetriever.lo `test -f 'EntityRetriever.cpp' || echo '$(srcdir)/'`EntityRetriever.cpp libarccompute_la-EntityRetrieverPlugin.lo: EntityRetrieverPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-EntityRetrieverPlugin.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-EntityRetrieverPlugin.Tpo -c -o libarccompute_la-EntityRetrieverPlugin.lo `test -f 'EntityRetrieverPlugin.cpp' || echo '$(srcdir)/'`EntityRetrieverPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-EntityRetrieverPlugin.Tpo $(DEPDIR)/libarccompute_la-EntityRetrieverPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EntityRetrieverPlugin.cpp' object='libarccompute_la-EntityRetrieverPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-EntityRetrieverPlugin.lo `test -f 'EntityRetrieverPlugin.cpp' || echo '$(srcdir)/'`EntityRetrieverPlugin.cpp libarccompute_la-Endpoint.lo: Endpoint.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-Endpoint.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-Endpoint.Tpo -c -o libarccompute_la-Endpoint.lo `test -f 'Endpoint.cpp' || echo '$(srcdir)/'`Endpoint.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-Endpoint.Tpo $(DEPDIR)/libarccompute_la-Endpoint.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Endpoint.cpp' object='libarccompute_la-Endpoint.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-Endpoint.lo `test -f 'Endpoint.cpp' || echo '$(srcdir)/'`Endpoint.cpp libarccompute_la-ComputingServiceRetriever.lo: ComputingServiceRetriever.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-ComputingServiceRetriever.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-ComputingServiceRetriever.Tpo -c -o libarccompute_la-ComputingServiceRetriever.lo `test -f 'ComputingServiceRetriever.cpp' || echo '$(srcdir)/'`ComputingServiceRetriever.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-ComputingServiceRetriever.Tpo $(DEPDIR)/libarccompute_la-ComputingServiceRetriever.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ComputingServiceRetriever.cpp' object='libarccompute_la-ComputingServiceRetriever.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-ComputingServiceRetriever.lo `test -f 'ComputingServiceRetriever.cpp' || echo '$(srcdir)/'`ComputingServiceRetriever.cpp libarccompute_la-JobInformationStorageDescriptor.lo: JobInformationStorageDescriptor.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobInformationStorageDescriptor.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobInformationStorageDescriptor.Tpo -c -o libarccompute_la-JobInformationStorageDescriptor.lo `test -f 'JobInformationStorageDescriptor.cpp' || echo '$(srcdir)/'`JobInformationStorageDescriptor.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobInformationStorageDescriptor.Tpo $(DEPDIR)/libarccompute_la-JobInformationStorageDescriptor.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobInformationStorageDescriptor.cpp' object='libarccompute_la-JobInformationStorageDescriptor.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobInformationStorageDescriptor.lo `test -f 'JobInformationStorageDescriptor.cpp' || echo '$(srcdir)/'`JobInformationStorageDescriptor.cpp libarccompute_la-JobInformationStorageXML.lo: JobInformationStorageXML.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobInformationStorageXML.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobInformationStorageXML.Tpo -c -o libarccompute_la-JobInformationStorageXML.lo `test -f 'JobInformationStorageXML.cpp' || echo '$(srcdir)/'`JobInformationStorageXML.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobInformationStorageXML.Tpo $(DEPDIR)/libarccompute_la-JobInformationStorageXML.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobInformationStorageXML.cpp' object='libarccompute_la-JobInformationStorageXML.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobInformationStorageXML.lo `test -f 'JobInformationStorageXML.cpp' || echo '$(srcdir)/'`JobInformationStorageXML.cpp libarccompute_la-JobInformationStorageBDB.lo: JobInformationStorageBDB.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobInformationStorageBDB.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobInformationStorageBDB.Tpo -c -o libarccompute_la-JobInformationStorageBDB.lo `test -f 'JobInformationStorageBDB.cpp' || echo '$(srcdir)/'`JobInformationStorageBDB.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobInformationStorageBDB.Tpo $(DEPDIR)/libarccompute_la-JobInformationStorageBDB.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobInformationStorageBDB.cpp' object='libarccompute_la-JobInformationStorageBDB.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobInformationStorageBDB.lo `test -f 'JobInformationStorageBDB.cpp' || echo '$(srcdir)/'`JobInformationStorageBDB.cpp libarccompute_la-JobInformationStorageSQLite.lo: JobInformationStorageSQLite.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -MT libarccompute_la-JobInformationStorageSQLite.lo -MD -MP -MF $(DEPDIR)/libarccompute_la-JobInformationStorageSQLite.Tpo -c -o libarccompute_la-JobInformationStorageSQLite.lo `test -f 'JobInformationStorageSQLite.cpp' || echo '$(srcdir)/'`JobInformationStorageSQLite.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarccompute_la-JobInformationStorageSQLite.Tpo $(DEPDIR)/libarccompute_la-JobInformationStorageSQLite.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobInformationStorageSQLite.cpp' object='libarccompute_la-JobInformationStorageSQLite.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarccompute_la_CXXFLAGS) $(CXXFLAGS) -c -o libarccompute_la-JobInformationStorageSQLite.lo `test -f 'JobInformationStorageSQLite.cpp' || echo '$(srcdir)/'`JobInformationStorageSQLite.cpp test_JobInformationStorage-test_JobInformationStorage.o: test_JobInformationStorage.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_JobInformationStorage_CXXFLAGS) $(CXXFLAGS) -MT test_JobInformationStorage-test_JobInformationStorage.o -MD -MP -MF $(DEPDIR)/test_JobInformationStorage-test_JobInformationStorage.Tpo -c -o test_JobInformationStorage-test_JobInformationStorage.o `test -f 'test_JobInformationStorage.cpp' || echo '$(srcdir)/'`test_JobInformationStorage.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_JobInformationStorage-test_JobInformationStorage.Tpo $(DEPDIR)/test_JobInformationStorage-test_JobInformationStorage.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_JobInformationStorage.cpp' object='test_JobInformationStorage-test_JobInformationStorage.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_JobInformationStorage_CXXFLAGS) $(CXXFLAGS) -c -o test_JobInformationStorage-test_JobInformationStorage.o `test -f 'test_JobInformationStorage.cpp' || echo '$(srcdir)/'`test_JobInformationStorage.cpp test_JobInformationStorage-test_JobInformationStorage.obj: test_JobInformationStorage.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_JobInformationStorage_CXXFLAGS) $(CXXFLAGS) -MT test_JobInformationStorage-test_JobInformationStorage.obj -MD -MP -MF $(DEPDIR)/test_JobInformationStorage-test_JobInformationStorage.Tpo -c -o test_JobInformationStorage-test_JobInformationStorage.obj `if test -f 'test_JobInformationStorage.cpp'; then $(CYGPATH_W) 'test_JobInformationStorage.cpp'; else $(CYGPATH_W) '$(srcdir)/test_JobInformationStorage.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_JobInformationStorage-test_JobInformationStorage.Tpo $(DEPDIR)/test_JobInformationStorage-test_JobInformationStorage.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_JobInformationStorage.cpp' object='test_JobInformationStorage-test_JobInformationStorage.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_JobInformationStorage_CXXFLAGS) $(CXXFLAGS) -c -o test_JobInformationStorage-test_JobInformationStorage.obj `if test -f 'test_JobInformationStorage.cpp'; then $(CYGPATH_W) 'test_JobInformationStorage.cpp'; else $(CYGPATH_W) '$(srcdir)/test_JobInformationStorage.cpp'; fi` test_jobdescription-test_jobdescription.o: test_jobdescription.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_jobdescription_CXXFLAGS) $(CXXFLAGS) -MT test_jobdescription-test_jobdescription.o -MD -MP -MF $(DEPDIR)/test_jobdescription-test_jobdescription.Tpo -c -o test_jobdescription-test_jobdescription.o `test -f 'test_jobdescription.cpp' || echo '$(srcdir)/'`test_jobdescription.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_jobdescription-test_jobdescription.Tpo $(DEPDIR)/test_jobdescription-test_jobdescription.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_jobdescription.cpp' object='test_jobdescription-test_jobdescription.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_jobdescription_CXXFLAGS) $(CXXFLAGS) -c -o test_jobdescription-test_jobdescription.o `test -f 'test_jobdescription.cpp' || echo '$(srcdir)/'`test_jobdescription.cpp test_jobdescription-test_jobdescription.obj: test_jobdescription.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_jobdescription_CXXFLAGS) $(CXXFLAGS) -MT test_jobdescription-test_jobdescription.obj -MD -MP -MF $(DEPDIR)/test_jobdescription-test_jobdescription.Tpo -c -o test_jobdescription-test_jobdescription.obj `if test -f 'test_jobdescription.cpp'; then $(CYGPATH_W) 'test_jobdescription.cpp'; else $(CYGPATH_W) '$(srcdir)/test_jobdescription.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test_jobdescription-test_jobdescription.Tpo $(DEPDIR)/test_jobdescription-test_jobdescription.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test_jobdescription.cpp' object='test_jobdescription-test_jobdescription.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_jobdescription_CXXFLAGS) $(CXXFLAGS) -c -o test_jobdescription-test_jobdescription.obj `if test -f 'test_jobdescription.cpp'; then $(CYGPATH_W) 'test_jobdescription.cpp'; else $(CYGPATH_W) '$(srcdir)/test_jobdescription.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-testjobDATA: $(testjob_DATA) @$(NORMAL_INSTALL) test -z "$(testjobdir)" || $(MKDIR_P) "$(DESTDIR)$(testjobdir)" @list='$(testjob_DATA)'; test -n "$(testjobdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(testjobdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(testjobdir)" || exit $$?; \ done uninstall-testjobDATA: @$(NORMAL_UNINSTALL) @list='$(testjob_DATA)'; test -n "$(testjobdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(testjobdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(testjobdir)" && rm -f $$files install-libarccompute_laHEADERS: $(libarccompute_la_HEADERS) @$(NORMAL_INSTALL) test -z "$(libarccompute_ladir)" || $(MKDIR_P) "$(DESTDIR)$(libarccompute_ladir)" @list='$(libarccompute_la_HEADERS)'; test -n "$(libarccompute_ladir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarccompute_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarccompute_ladir)" || exit $$?; \ done uninstall-libarccompute_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarccompute_la_HEADERS)'; test -n "$(libarccompute_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(libarccompute_ladir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(libarccompute_ladir)" && rm -f $$files # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(DATA) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(testjobdir)" "$(DESTDIR)$(libarccompute_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarccompute_laHEADERS install-testjobDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarccompute_laHEADERS uninstall-testjobDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool clean-noinstPROGRAMS ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES \ install-libarccompute_laHEADERS install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ install-testjobDATA installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-libLTLIBRARIES \ uninstall-libarccompute_laHEADERS uninstall-testjobDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/test_JobInformationStorage.cpp0000644000000000000000000000012412301125744027140 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200574.719704 30 ctime=1513200659.813745208 nordugrid-arc-5.4.2/src/hed/libs/compute/test_JobInformationStorage.cpp0000644000175000002070000002252312301125744027211 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "JobInformationStorageXML.h" #ifdef DBJSTORE_ENABLED #include "JobInformationStorageBDB.h" #endif int main(int argc, char **argv) { Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::OptionParser options("", "", ""); int nJobs = -1; options.AddOption('N', "NJobs", "number of jobs to write/read to/from storage", "n", nJobs); int bunchSize = 50000; options.AddOption('B', "bunchSize", "size of bunches of job objects to pass to JobInformationStorage object methods", "n", bunchSize); std::string action = "write"; options.AddOption('a', "action", "Action to perform: write, append, appendreturnnew, read, readall, remove", "action", action); std::string filename = ""; options.AddOption('f', "filename", "", "", filename); std::string typeS = ""; options.AddOption('t', "type", "Type of storage back-end to use (BDB or XML)", "type", typeS); std::string hostname = "test.nordugrid.org"; options.AddOption(0, "hostname", "", "", hostname); std::list endpoints; options.AddOption(0, "endpoint", "", "", endpoints); std::list rejectEndpoints; options.AddOption(0, "rejectEndpoint", "Reject jobs with JobManagementURL matching specified endpoints (matching algorithm: URL::StringMatches)", "reject", rejectEndpoints); std::string jobidinfile; options.AddOption('i', "jobids-from-file", "a file containing a list of job IDs", "filename", jobidinfile); std::string jobidoutfile; options.AddOption('o', "jobids-to-file", "the IDs of jobs will be appended to this file", "filename", jobidoutfile); std::string debug; options.AddOption('d', "debug", "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG", "debuglevel", debug); options.Parse(argc, argv); if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); if (filename.empty()) { std::cerr << "ERROR: No filename specified." << std::endl; return 1; } Arc::JobInformationStorage** jisPointer = NULL; if (typeS == "XML") { Arc::JobInformationStorageXML *jisXML = new Arc::JobInformationStorageXML(filename); jisPointer = (Arc::JobInformationStorage**)&jisXML; } #ifdef DBJSTORE_ENABLED else if (typeS == "BDB") { Arc::JobInformationStorageBDB *jisDB4 = new Arc::JobInformationStorageBDB(filename); jisPointer = (Arc::JobInformationStorage**)&jisDB4; } #endif else { std::cerr << "ERROR: Unable to determine storage back-end to use." << std::endl; return 1; } Arc::JobInformationStorage& jis = **jisPointer; Arc::Period timing; if (action == "write" || action == "append") { if (nJobs <= 0) { std::cerr << "ERROR: Invalid number of jobs specified (nJobs = " << nJobs << ")" << std::endl; goto error; } if (bunchSize <= 0) { std::cerr << "ERROR: Invalid bunch size (bunchSize = " << bunchSize << ")" << std::endl; goto error; } if (action == "write") { remove(filename.c_str()); } else { Arc::FileCopy(filename, filename + ".orig"); } Arc::Job j; j.ServiceInformationInterfaceName = "org.nordugrid.test"; j.JobStatusInterfaceName = "org.nordugrid.test"; j.JobManagementInterfaceName = "org.nordugrid.test"; j.JobDescriptionDocument = "&( executable = \"/bin/echo\" )( arguments = \"Hello World\" )( stdout = \"std.out\" )( stderr = \"std.out\" )( cputime = \"PT1M\" )( outputfiles = ( \"std.out\" \"\" ) ( \"std.out\" \"\" ) )( queue = \"gridlong\" )( jobname = \"Hello World\" )( clientsoftware = \"libarccompute-trunk\" )( clientxrsl = \"&( executable = \"\"/bin/echo\"\" )( arguments = \"\"Hello World\"\" )( stdout = \"\"std.out\"\" )( join = \"\"yes\"\" )( cputime = \"\"1\"\" )( jobname = \"\"Hello World\"\" )\" )( hostname = \"x220-skou\" )( savestate = \"yes\" )"; srand(Arc::Time().GetTimeNanoseconds()); for (int m = 0; m <= nJobs/bunchSize; ++m) { std::list jobs; const int bunchEnd = (m != nJobs/bunchSize)*(m+1)*bunchSize + (m == nJobs/bunchSize)*(nJobs%bunchSize); for (int n = m*bunchSize; n < bunchEnd; ++n) { j.Name = "Job " + Arc::tostring(n); j.IDFromEndpoint = Arc::tostring(rand())+Arc::tostring(n)+Arc::tostring(rand()); j.JobID = "http://" + hostname + "/" + j.IDFromEndpoint; j.ServiceInformationURL = Arc::URL("http://" + hostname + "/serviceinfo"); j.JobStatusURL = Arc::URL("http://" + hostname + "/jobstatus"); j.JobManagementURL = Arc::URL("http://" + hostname + "/jobmanagement"); j.StageInDir = Arc::URL("http://" + hostname + "/stagein/" + j.IDFromEndpoint); j.StageOutDir = Arc::URL("http://" + hostname + "/stageout/" + j.IDFromEndpoint); j.SessionDir = Arc::URL("http://" + hostname + "/session/" + j.IDFromEndpoint); j.LocalSubmissionTime = Arc::Time(); jobs.push_back(j); } if (!jobidoutfile.empty()) { Arc::Job::WriteJobIDsToFile(jobs, jobidoutfile); } Arc::Time tBefore; jis.Write(jobs); timing += Arc::Time()-tBefore; } } else if (action == "appendreturnnew") { if (nJobs <= 0) { std::cerr << "ERROR: Invalid number of jobs specified (nJobs = " << nJobs << ")" << std::endl; goto error; } if (bunchSize <= 0) { std::cerr << "ERROR: Invalid bunch size (bunchSize = " << bunchSize << ")" << std::endl; goto error; } Arc::FileCopy(filename, "append-" + filename); filename = "append-" + filename; Arc::Job j; j.ServiceInformationInterfaceName = "org.nordugrid.test"; j.JobStatusInterfaceName = "org.nordugrid.test"; j.JobManagementInterfaceName = "org.nordugrid.test"; j.JobDescriptionDocument = "&( executable = \"/bin/echo\" )( arguments = \"Hello World\" )( stdout = \"std.out\" )( stderr = \"std.out\" )( cputime = \"PT1M\" )( outputfiles = ( \"std.out\" \"\" ) ( \"std.out\" \"\" ) )( queue = \"gridlong\" )( jobname = \"Hello World\" )( clientsoftware = \"libarccompute-trunk\" )( clientxrsl = \"&( executable = \"\"/bin/echo\"\" )( arguments = \"\"Hello World\"\" )( stdout = \"\"std.out\"\" )( join = \"\"yes\"\" )( cputime = \"\"1\"\" )( jobname = \"\"Hello World\"\" )\" )( hostname = \"x220-skou\" )( savestate = \"yes\" )"; std::list identifiers; Arc::Job::ReadJobIDsFromFile(jobidinfile, identifiers); for (int m = 0; m <= nJobs/bunchSize; ++m) { std::list jobs; std::set prunedServices; const int bunchEnd = (m != nJobs/bunchSize)*(m+1)*bunchSize + (m == nJobs/bunchSize)*(nJobs%bunchSize); for (int n = m*bunchSize; n < bunchEnd; ++n) { std::string jobHostName = hostname; if (!identifiers.empty()) { Arc::URL temp(identifiers.front()); identifiers.pop_front(); jobHostName = temp.Host(); j.IDFromEndpoint = temp.Path(); } else { j.IDFromEndpoint = Arc::tostring(rand())+Arc::tostring(rand()); } prunedServices.insert(jobHostName); j.Name = "Job " + Arc::tostring(n); j.JobID = "http://" + jobHostName + "/" + j.IDFromEndpoint; j.ServiceInformationURL = Arc::URL("http://" + jobHostName + "/serviceinfo"); j.JobStatusURL = Arc::URL("http://" + jobHostName + "/jobstatus"); j.JobManagementURL = Arc::URL("http://" + jobHostName + "/jobmanagement"); j.StageInDir = Arc::URL("http://" + jobHostName + "/stagein/" + j.IDFromEndpoint); j.StageOutDir = Arc::URL("http://" + jobHostName + "/stageout/" + j.IDFromEndpoint); j.SessionDir = Arc::URL("http://" + jobHostName + "/session/" + j.IDFromEndpoint); j.LocalSubmissionTime = Arc::Time(); jobs.push_back(j); } std::list newJobs; Arc::Time tBefore; jis.Write(jobs, prunedServices, newJobs); timing += Arc::Time()-tBefore; } } else if (action == "readall") { std::list jobs; Arc::Time tBefore; jis.ReadAll(jobs, rejectEndpoints); timing += Arc::Time()-tBefore; } else if (action == "read") { std::list jobs; std::list identifiers; Arc::Job::ReadJobIDsFromFile(jobidinfile, identifiers); Arc::Time tBefore; jis.Read(jobs, identifiers, endpoints, rejectEndpoints); timing += Arc::Time()-tBefore; } else if (action == "remove") { std::list identifiers; Arc::Job::ReadJobIDsFromFile(jobidinfile, identifiers); Arc::Time tBefore; jis.Remove(identifiers); timing += Arc::Time()-tBefore; } else { std::cerr << "ERROR: Invalid action specified (action = \"" << action << "\")" << std::endl; delete *jisPointer; return 1; } delete *jisPointer; { int nanosecs = timing.GetPeriodNanoseconds(); std::string zerosToPrefix = ""; for (int i = 100000000; i > 1; i /= 10) { if (nanosecs / i == 0) zerosToPrefix += "0"; else break; } std::cout << timing.GetPeriod() << "." << zerosToPrefix << nanosecs/1000 << std::endl; } return 0; error: delete *jisPointer; return 1; } nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Software.cpp0000644000000000000000000000012412574532370023437 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200574.754704 30 ctime=1513200659.801745061 nordugrid-arc-5.4.2/src/hed/libs/compute/Software.cpp0000644000175000002070000002144612574532370023513 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "Software.h" namespace Arc { Logger Software::logger(Logger::getRootLogger(), "Software"); Logger SoftwareRequirement::logger(Logger::getRootLogger(), "SoftwareRequirement"); const std::string Software::VERSIONTOKENS = "-."; Software::Software(const std::string& name_version) : family(""), version("") { std::size_t pos = 0; while (pos != std::string::npos) { // Look for dashes in the input string. pos = name_version.find_first_of("-", pos); if (pos != std::string::npos) { // 'name' and 'version' is defined to be separated at the first dash which // is followed by a digit. if (isdigit(name_version[pos+1])) { name = name_version.substr(0, pos); version = name_version.substr(pos+1); tokenize(version, tokenizedVersion, VERSIONTOKENS); return; } pos++; } } // If no version part is found from the input string set the input to be the name. name = name_version; } Software::Software(const std::string& name, const std::string& version) : family(""), name(name), version(version) { tokenize(version, tokenizedVersion, VERSIONTOKENS); } Software::Software(const std::string& family, const std::string& name, const std::string& version) : family(family), name(name), version(version) { tokenize(version, tokenizedVersion, VERSIONTOKENS); } std::string Software::operator()() const { if (empty()) return ""; if (family.empty() && version.empty()) return name; if (family.empty()) return name + "-" + version; if (version.empty()) return family + "-" + name; return family + "-" + name + "-" + version; } bool Software::operator>(const Software& sv) const { if (family != sv.family || name != sv.name || version.empty() || version == sv.version) { logger.msg(VERBOSE, "%s > %s => false", (std::string)*this, (std::string)sv); return false; } if (sv.version.empty()) { logger.msg(VERBOSE, "%s > %s => true", (std::string)*this, (std::string)sv); return true; } int lhsInt, rhsInt; std::list::const_iterator lhsIt, rhsIt; for (lhsIt = tokenizedVersion.begin(), rhsIt = sv.tokenizedVersion.begin(); lhsIt != tokenizedVersion.end() && rhsIt != sv.tokenizedVersion.end(); lhsIt++, rhsIt++) { if (*lhsIt == *rhsIt) continue; if (stringto(*lhsIt, lhsInt) && stringto(*rhsIt, rhsInt)) { if (lhsInt > rhsInt) { logger.msg(VERBOSE, "%s > %s => true", (std::string)*this, (std::string)sv); return true; } if (lhsInt == rhsInt) continue; } else { logger.msg(VERBOSE, "%s > %s => false: %s contains non numbers in the version part.", (std::string)*this, (std::string)sv, (!stringto(*lhsIt, lhsInt) ? (std::string)*this : (std::string)sv)); return false; } logger.msg(VERBOSE, "%s > %s => false", (std::string)*this, (std::string)sv); return false; } if (sv.tokenizedVersion.size() != tokenizedVersion.size()) { // Left side contains extra tokens. These must only contain numbers. for (; lhsIt != tokenizedVersion.end(); lhsIt++) { if (!stringto(*lhsIt, lhsInt)) { // Try to convert ot an integer. logger.msg(VERBOSE, "%s > %s => false: %s contains non numbers in the version part.", (std::string)*this, (std::string)sv, (std::string)*this); return false; } if (lhsInt != 0) { logger.msg(VERBOSE, "%s > %s => true", (std::string)*this, (std::string)sv); return true; } } } logger.msg(VERBOSE, "%s > %s => false", (std::string)*this, (std::string)sv); return false; } std::string Software::toString(ComparisonOperator co) { if (co == &Software::operator==) return "=="; if (co == &Software::operator<) return "<"; if (co == &Software::operator>) return ">"; if (co == &Software::operator<=) return "<="; if (co == &Software::operator>=) return ">="; return "!="; } Software::ComparisonOperator Software::convert(const Software::ComparisonOperatorEnum& co) { switch (co) { case Software::EQUAL: return &Software::operator==; case Software::NOTEQUAL: return &Software::operator!=; case Software::GREATERTHAN: return &Software::operator>; case Software::LESSTHAN: return &Software::operator<; case Software::GREATERTHANOREQUAL: return &Software::operator>=; case Software::LESSTHANOREQUAL: return &Software::operator<=; }; return &Software::operator!=; // Avoid compilation warning } SoftwareRequirement::SoftwareRequirement(const Software& sw, Software::ComparisonOperatorEnum co) : softwareList(1, sw), comparisonOperatorList(1, Software::convert(co)) {} SoftwareRequirement::SoftwareRequirement(const Software& sw, Software::ComparisonOperator swComOp) : softwareList(1, sw), comparisonOperatorList(1, swComOp) {} SoftwareRequirement& SoftwareRequirement::operator=(const SoftwareRequirement& sr) { softwareList = sr.softwareList; comparisonOperatorList = sr.comparisonOperatorList; return *this; } void SoftwareRequirement::add(const Software& sw, Software::ComparisonOperator swComOp) { if (!sw.empty()) { softwareList.push_back(sw); comparisonOperatorList.push_back(swComOp); } } void SoftwareRequirement::add(const Software& sw, Software::ComparisonOperatorEnum co) { add(sw, Software::convert(co)); } bool SoftwareRequirement::isSatisfied(const std::list& swList) const { return isSatisfiedSelect(reinterpret_cast< const std::list& >(swList)); } bool SoftwareRequirement::isSatisfiedSelect(const std::list& swList, SoftwareRequirement* sr) const { // Compare Software objects in the 'versions' list with those in 'swList'. std::list::const_iterator itSW = softwareList.begin(); std::list::const_iterator itSWC = comparisonOperatorList.begin(); for (; itSW != softwareList.end() && itSWC != comparisonOperatorList.end(); itSW++, itSWC++) { Software * currentSelectedSoftware = NULL; // Pointer to the current selected software from the argument list. // Loop over 'swList'. std::list::const_iterator itSWList = swList.begin(); for (; itSWList != swList.end(); itSWList++) { if (((*itSWList).**itSWC)(*itSW)) { // One of the requirements satisfied. if (*itSWC == &Software::operator!=) { continue; } if (sr != NULL) { if (currentSelectedSoftware == NULL) { // First software to satisfy requirement. Push it to the selected software. sr->softwareList.push_back(*itSWList); sr->comparisonOperatorList.push_back(&Software::operator ==); } else if (*currentSelectedSoftware < *itSWList) { // Select the software with the highest version still satisfying the requirement. sr->softwareList.back() = *itSWList; } currentSelectedSoftware = &sr->softwareList.back(); } else { break; } } else if (*itSWC == &Software::operator!=) { logger.msg(VERBOSE, "Requirement \"%s %s\" NOT satisfied.", Software::toString(*itSWC), (std::string)*itSW); return false; } } if (*itSWC == &Software::operator!=) { logger.msg(VERBOSE, "Requirement \"%s %s\" satisfied.", Software::toString(*itSWC), (std::string)*itSW); continue; } if (itSWList == swList.end() && currentSelectedSoftware == NULL) { logger.msg(VERBOSE, "Requirement \"%s %s\" NOT satisfied.", Software::toString(*itSWC), (std::string)*itSW); return false; } logger.msg(VERBOSE, "Requirement \"%s %s\" satisfied by \"%s\".", Software::toString(*itSWC), (std::string)*itSW, (std::string)(currentSelectedSoftware == NULL ? *itSWList : *currentSelectedSoftware)); // Keep options from requirement if(currentSelectedSoftware != NULL) currentSelectedSoftware->addOptions(itSW->getOptions()); } logger.msg(VERBOSE, "All requirements satisfied."); return true; } bool SoftwareRequirement::selectSoftware(const std::list& swList) { SoftwareRequirement* sr = new SoftwareRequirement(); bool status = isSatisfiedSelect(swList, sr); if (status) { *this = *sr; } delete sr; return status; } bool SoftwareRequirement::selectSoftware(const std::list& swList) { return selectSoftware(reinterpret_cast< const std::list& >(swList)); } bool SoftwareRequirement::isResolved() const { for (std::list::const_iterator it = comparisonOperatorList.begin(); it != comparisonOperatorList.end(); it++) { if (*it != &Software::operator==) { return false; } } return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobControllerPlugin.h0000644000000000000000000000012413165644550025250 xustar000000000000000027 mtime=1507281256.705161 27 atime=1513200574.716704 30 ctime=1513200659.768744657 nordugrid-arc-5.4.2/src/hed/libs/compute/JobControllerPlugin.h0000644000175000002070000001110513165644550025313 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBCONTROLLER_H__ #define __ARC_JOBCONTROLLER_H__ /** \file * \brief Plugin, loader and argument classes for job controller specialisation. */ #include #include #include #include #include #include #include namespace Arc { class Broker; class Logger; class UserConfig; /** * \ingroup accplugins * \headerfile JobControllerPlugin.h arc/compute/JobControllerPlugin.h */ class JobControllerPlugin : public Plugin { protected: JobControllerPlugin(const UserConfig& usercfg, PluginArgument* parg) : Plugin(parg), usercfg(&usercfg) {} public: virtual ~JobControllerPlugin() {} virtual void UpdateJobs(std::list& jobs, bool isGrouped = false) const; virtual void UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const = 0; virtual bool CleanJobs(const std::list& jobs, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const = 0; virtual bool CancelJobs(const std::list& jobs, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const = 0; virtual bool RenewJobs(const std::list& jobs, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const = 0; virtual bool ResumeJobs(const std::list& jobs, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const = 0; virtual bool GetJobDescription(const Job& job, std::string& desc_str) const = 0; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const = 0; virtual std::string GetGroupID() const { return ""; } virtual const std::list& SupportedInterfaces() const { return supportedInterfaces; }; /** * \since Added in 5.1.0 **/ virtual void SetUserConfig(const UserConfig& uc) { usercfg = &uc; } protected: /** * UserConfig object not owned by this class, and relies on its existence * throughout lifetime of objects from this class. Must not be deleted by * this class. Pointers to this object must not be exposed publicly. **/ const UserConfig* usercfg; std::list supportedInterfaces; static Logger logger; }; /** Class responsible for loading JobControllerPlugin plugins * The JobControllerPlugin objects returned by a JobControllerPluginLoader * must not be used after the JobControllerPluginLoader goes out of scope. * * \ingroup accplugins * \headerfile JobControllerPlugin.h arc/compute/JobControllerPlugin.h */ class JobControllerPluginLoader : public Loader { public: /** Constructor * Creates a new JobControllerPluginLoader. */ JobControllerPluginLoader(); /** Destructor * Calling the destructor destroys all JobControllerPlugins loaded * by the JobControllerPluginLoader instance. */ ~JobControllerPluginLoader(); /** Load a new JobControllerPlugin * \param name The name of the JobControllerPlugin to load. * \param uc The UserConfig object for the new JobControllerPlugin. * \return A pointer to the new JobControllerPlugin (NULL on error). */ JobControllerPlugin* load(const std::string& name, const UserConfig& uc); JobControllerPlugin* loadByInterfaceName(const std::string& name, const UserConfig& uc); private: void initialiseInterfacePluginMap(const UserConfig& uc); std::multimap jobcontrollers; static std::map interfacePluginMap; }; /** * \ingroup accplugins * \headerfile JobControllerPlugin.h arc/compute/JobControllerPlugin.h */ class JobControllerPluginArgument : public PluginArgument { public: JobControllerPluginArgument(const UserConfig& usercfg) : usercfg(usercfg) {} ~JobControllerPluginArgument() {} operator const UserConfig&() { return usercfg; } private: const UserConfig& usercfg; }; } // namespace Arc #endif // __ARC_JOBCONTROLLER_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/GLUE2.h0000644000000000000000000000012412045235201022112 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.693704 30 ctime=1513200659.773744719 nordugrid-arc-5.4.2/src/hed/libs/compute/GLUE2.h0000644000175000002070000000215112045235201022156 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_GLUE2_H__ #define __ARC_GLUE2_H__ #include #include #include namespace Arc { /// GLUE2 parser /** * This class parses GLUE2 infromation rendeed in XML and transfers * information into various classes representing different types * of objects which GLUE2 information model can describe. * This parser uses GLUE Specification v. 2.0 (GFD-R-P.147). */ class GLUE2 { public: /** * Parses ComputingService elements of GLUE2 into ComputingServiceType objects. * The glue2tree is either XML tree representing ComputingService object * directly or ComputingService objects are immediate children of it. * On exit targets contains ComputingServiceType objects found inside glue2tree. * If targets contained any objects on entry those are not destroyed. * * @param glue2tree * @param targets */ static void ParseExecutionTargets(XMLNode glue2tree, std::list& targets); private: static Logger logger; }; } #endif // __ARC_GLUE2_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/test0000644000000000000000000000013213214316023022025 xustar000000000000000030 mtime=1513200659.861745795 30 atime=1513200668.721854157 30 ctime=1513200659.861745795 nordugrid-arc-5.4.2/src/hed/libs/compute/test/0000755000175000002070000000000013214316023022150 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/TargetInformationRetrieverTest.cpp0000644000000000000000000000012412045235201030772 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.701704 30 ctime=1513200659.861745795 nordugrid-arc-5.4.2/src/hed/libs/compute/test/TargetInformationRetrieverTest.cpp0000644000175000002070000000560212045235201031042 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include //static Arc::Logger testLogger(Arc::Logger::getRootLogger(), "TargetInformationRetrieverTest"); class TargetInformationRetrieverTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(TargetInformationRetrieverTest); CPPUNIT_TEST(PluginLoading); CPPUNIT_TEST(QueryTest); CPPUNIT_TEST(GettingStatusFromUnspecifiedCE); CPPUNIT_TEST_SUITE_END(); public: TargetInformationRetrieverTest() {}; void setUp() {} void tearDown() { Arc::ThreadInitializer().waitExit(); } void PluginLoading(); void QueryTest(); void GettingStatusFromUnspecifiedCE(); }; void TargetInformationRetrieverTest::PluginLoading() { Arc::TargetInformationRetrieverPluginLoader l; Arc::TargetInformationRetrieverPlugin* p = (Arc::TargetInformationRetrieverPlugin*)l.load("TEST"); CPPUNIT_ASSERT(p != NULL); } void TargetInformationRetrieverTest::QueryTest() { Arc::EndpointQueryingStatus sInitial(Arc::EndpointQueryingStatus::SUCCESSFUL); Arc::TargetInformationRetrieverPluginTESTControl::delay = 1; Arc::TargetInformationRetrieverPluginTESTControl::status = sInitial; Arc::TargetInformationRetrieverPluginLoader l; Arc::TargetInformationRetrieverPlugin* p = (Arc::TargetInformationRetrieverPlugin*)l.load("TEST"); CPPUNIT_ASSERT(p != NULL); Arc::UserConfig uc; Arc::Endpoint endpoint; std::list csList; Arc::EndpointQueryingStatus sReturned = p->Query(uc, endpoint, csList, Arc::EndpointQueryOptions()); CPPUNIT_ASSERT(sReturned == Arc::EndpointQueryingStatus::SUCCESSFUL); } void TargetInformationRetrieverTest::GettingStatusFromUnspecifiedCE() { // Arc::Logger logger(Arc::Logger::getRootLogger(), "TIRTest"); // Arc::LogStream logcerr(std::cerr); // logcerr.setFormat(Arc::ShortFormat); // Arc::Logger::getRootLogger().addDestination(logcerr); // Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); Arc::EndpointQueryingStatus sInitial(Arc::EndpointQueryingStatus::SUCCESSFUL); Arc::TargetInformationRetrieverPluginTESTControl::delay = 0; Arc::TargetInformationRetrieverPluginTESTControl::status = sInitial; Arc::UserConfig uc; Arc::TargetInformationRetriever retriever(uc, Arc::EndpointQueryOptions()); Arc::Endpoint ce("test.nordugrid.org", Arc::Endpoint::COMPUTINGINFO); retriever.addEndpoint(ce); retriever.wait(); Arc::EndpointQueryingStatus status = retriever.getStatusOfEndpoint(ce); // Arc::Logger::getRootLogger().removeDestinations(); CPPUNIT_ASSERT(status == Arc::EndpointQueryingStatus::SUCCESSFUL); } CPPUNIT_TEST_SUITE_REGISTRATION(TargetInformationRetrieverTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712207404076024151 xustar000000000000000027 mtime=1377699902.545279 30 atime=1513200596.671972959 30 ctime=1513200659.844745587 nordugrid-arc-5.4.2/src/hed/libs/compute/test/Makefile.am0000644000175000002070000002026312207404076024216 0ustar00mockbuildmock00000000000000if DBJSTORE_ENABLED CXXFLAGS_WITH_DBJSTORE = $(DBCXX_CPPFLAGS) LIBADD_WITH_DBJSTORE = $(DBCXX_LIBS) else CXXFLAGS_WITH_DBJSTORE = LIBADD_WITH_DBJSTORE = endif TESTS = SoftwareTest JobTest JobInformationStorageTest JobStateTest \ BrokerTest JobDescriptionTest JobControllerPluginTest \ JobDescriptionParserPluginTest SubmitterTest SubmitterPluginTest \ JobSupervisorTest TargetInformationRetrieverTest \ ServiceEndpointRetrieverTest JobListRetrieverTest ExecutionTargetTest \ ComputingServiceUniqTest SubmissionStatusTest check_PROGRAMS = $(TESTS) TESTS_ENVIRONMENT = env ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs SoftwareTest_SOURCES = $(top_srcdir)/src/Test.cpp SoftwareTest.cpp SoftwareTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) SoftwareTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobTest_SOURCES = $(top_srcdir)/src/Test.cpp JobTest.cpp JobTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) \ $(CXXFLAGS_WITH_DBJSTORE) $(AM_CXXFLAGS) JobTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(LIBADD_WITH_DBJSTORE) JobInformationStorageTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JobInformationStorageTest.cpp JobInformationStorageTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) JobInformationStorageTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(DBCXX_LIBS) JobDescriptionTest_SOURCES = $(top_srcdir)/src/Test.cpp JobDescriptionTest.cpp JobDescriptionTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobDescriptionTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobStateTest_SOURCES = $(top_srcdir)/src/Test.cpp JobStateTest.cpp JobStateTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobStateTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) BrokerTest_SOURCES = $(top_srcdir)/src/Test.cpp BrokerTest.cpp BrokerTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) BrokerTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobControllerPluginTest_SOURCES = $(top_srcdir)/src/Test.cpp JobControllerPluginTest.cpp JobControllerPluginTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobControllerPluginTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobSupervisorTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JobSupervisorTest.cpp JobSupervisorTest_CXXFLAGS = -I$(top_srcdir)/include\ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobSupervisorTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobDescriptionParserPluginTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JobDescriptionParserPluginTest.cpp JobDescriptionParserPluginTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobDescriptionParserPluginTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) SubmitterTest_SOURCES = $(top_srcdir)/src/Test.cpp SubmitterTest.cpp SubmitterTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) SubmitterTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) SubmitterPluginTest_SOURCES = $(top_srcdir)/src/Test.cpp SubmitterPluginTest.cpp SubmitterPluginTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) SubmitterPluginTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ServiceEndpointRetrieverTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ServiceEndpointRetrieverTest.cpp ServiceEndpointRetrieverTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ServiceEndpointRetrieverTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) TargetInformationRetrieverTest_SOURCES = $(top_srcdir)/src/Test.cpp \ TargetInformationRetrieverTest.cpp TargetInformationRetrieverTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) TargetInformationRetrieverTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobListRetrieverTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JobListRetrieverTest.cpp JobListRetrieverTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobListRetrieverTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ExecutionTargetTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ExecutionTargetTest.cpp ExecutionTargetTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ExecutionTargetTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ComputingServiceUniqTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ComputingServiceUniqTest.cpp ComputingServiceUniqTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ComputingServiceUniqTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) SubmissionStatusTest_SOURCES = $(top_srcdir)/src/Test.cpp \ SubmissionStatusTest.cpp SubmissionStatusTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) SubmissionStatusTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) #~ EndpointTest_SOURCES = $(top_srcdir)/src/Test.cpp \ #~ EndpointTest.cpp #~ EndpointTest_CXXFLAGS = -I$(top_srcdir)/include \ #~ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #~ EndpointTest_LDADD = \ #~ $(top_builddir)/src/hed/libs/compute/libarcclient.la \ #~ $(top_builddir)/src/hed/libs/common/libarccommon.la \ #~ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ #~ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/JobInformationStorageTest.cpp0000644000000000000000000000012412243111545027717 xustar000000000000000027 mtime=1384944485.488495 27 atime=1513200574.707704 30 ctime=1513200659.851745673 nordugrid-arc-5.4.2/src/hed/libs/compute/test/JobInformationStorageTest.cpp0000644000175000002070000005405412243111545027774 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #define JISTEST_ASSERT(CONDITION, JISTYPE) \ CPPUNIT_ASSERT_MESSAGE("failed for JobInformationStorage" + JISTYPE, \ CONDITION) #define JISTEST_ASSERT_EQUAL(EXPECTED, ACTUAL, JISTYPE) \ CPPUNIT_ASSERT_EQUAL_MESSAGE("failed for JobInformationStorage" + JISTYPE, \ (EXPECTED), (ACTUAL)) class JobInformationStorageTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JobInformationStorageTest); CPPUNIT_TEST(GeneralTest); CPPUNIT_TEST(ReadJobsTest); CPPUNIT_TEST_SUITE_END(); public: JobInformationStorageTest(); void setUp(); void tearDown() { Arc::DirDelete(tmpdir, true); } void GeneralTest(); void ReadJobsTest(); private: Arc::XMLNode xmlJob; std::string tmpdir; std::string tmpfile; }; JobInformationStorageTest::JobInformationStorageTest() : xmlJob(Arc::XMLNode("" "https://testbed-emi4.grid.upjs.sk:60000/arex" "org.ogf.glue.emies.resourceinfo" "https://testbed-emi4.grid.upjs.sk:60000/arex" "org.ogf.glue.emies.activitymanagement" "https://testbed-emi4.grid.upjs.sk:60000/arex" "org.ogf.glue.emies.activitymanagement" "https://testbed-emi4.grid.upjs.sk:60000/arex/HiqNDmAiivgnIfnhppWRvMapABFKDmABFKDmQhJKDmCBFKDmmdhHxm" "HiqNDmAiivgnIfnhppWRvMapABFKDmABFKDmQhJKDmCBFKDmmdhHxm" "https://testbed-emi4.grid.upjs.sk:60000/arex" "https://testbed-emi4.grid.upjs.sk:60000/arex" "https://testbed-emi4.grid.upjs.sk:60000/arex" "mc08.1050.J7" "single" "345.ce01" "nordugrid:xrsl" "&(executable=\"helloworld.sh\")(arguments=\"random.dat\")(inputfiles=(\"helloworld.sh\")(\"random.dat\"))(stdout=\"helloworld.out\")(join=\"yes\")" "bes:failed" "nordugrid:FAILED" "bes:running" "nordugrid:FINISHING" "0" "0" "Uploading timed out" "Failed stage-out" "0" "vo:atlas" "CONFIDENTIAL" "grid02" "5000" "20000" "4" "ENV/JAVA/JRE-1.6.0" "APPS/HEP/ATLAS-14.2.23.4" "input.dat" "job.out" "err.out" "celog" "wn043" "wn056" "pbs-short" "2893" "12340" "4453" "2008-04-21T10:04:36Z" "2008-04-21T10:05:12Z" "2008-04-20T06:05:12Z" "2008-04-20T06:45:12Z" "2008-04-20T10:05:12Z" "2008-04-20T10:15:12Z" "2008-04-24T10:05:12Z" "2008-04-30T10:05:12Z" "pc4.niif.hu:3432" "nordugrid-arc-0.94" "Cached input file is outdated; downloading again" "User proxy has expired" "" "https://example-ce.com:443/arex/765234" "https://helloworld-ce.com:12345/arex/543678" "" "helloworld.sh" "c0489bec6f7f4454d6cfe1b0a07ad5b8" "" "" "random.dat" "e52b14b10b967d9135c198fd11b9b8bc" "" "" "")) {} void JobInformationStorageTest::setUp() { Arc::TmpDirCreate(tmpdir); tmpfile = Glib::build_filename(tmpdir, "jobs.dat"); } void JobInformationStorageTest::GeneralTest() { Arc::JobInformationStorage* jis = NULL; for (int i = 0; Arc::JobInformationStorage::AVAILABLE_TYPES[i].name != NULL; ++i) { const std::string jisType = Arc::JobInformationStorage::AVAILABLE_TYPES[i].name; jis = (Arc::JobInformationStorage::AVAILABLE_TYPES[i].instance)(tmpfile); JISTEST_ASSERT(jis != NULL, jisType); JISTEST_ASSERT(jis->IsValid(), jisType); std::list inJobs, outJobs; inJobs.push_back(xmlJob); inJobs.back().Name = "Job0"; inJobs.back().JobID = "https://ce00.niif.hu:60000/arex/job0"; inJobs.back().ServiceInformationURL = Arc::URL("https://info00.niif.hu:2135/aris"); inJobs.push_back(xmlJob); inJobs.back().Name = "Job1"; inJobs.back().JobID = "https://ce01.niif.hu:60000/arex/job1"; inJobs.back().ServiceInformationURL = Arc::URL("https://info01.niif.hu:2135/aris"); inJobs.push_back(xmlJob); inJobs.back().Name = "Job2"; inJobs.back().JobID = "https://ce01.niif.hu:60000/arex/job2"; inJobs.back().ServiceInformationURL = Arc::URL("https://info01.niif.hu:2135/aris"); inJobs.push_back(xmlJob); inJobs.back().Name = "Job3"; inJobs.back().JobID = "https://ce01.niif.hu:60000/arex/job3"; inJobs.back().ServiceInformationURL = Arc::URL("https://info01.niif.hu:2135/aris"); inJobs.push_back(xmlJob); inJobs.back().Name = "Other Job"; inJobs.back().JobID = "https://ce-other.niif.hu:60000/arex/other-job"; inJobs.back().ServiceInformationURL = Arc::URL("https://info-other.niif.hu:2135/aris"); // Write and read jobs. JISTEST_ASSERT(jis->Clean(), jisType); JISTEST_ASSERT(jis->Write(inJobs), jisType); JISTEST_ASSERT(jis->ReadAll(outJobs), jisType); JISTEST_ASSERT_EQUAL(5, (int)outJobs.size(), jisType); { std::set jobNames; jobNames.insert("Job0"); jobNames.insert("Job1"); jobNames.insert("Job2"); jobNames.insert("Job3"); jobNames.insert("Other Job"); for (std::list::const_iterator itJ = outJobs.begin(); itJ != outJobs.end(); ++itJ) { CPPUNIT_ASSERT_EQUAL_MESSAGE("JobInformationStorage" + jisType + ": Job with name \"" + itJ->Name + "\" was unexpected" , 1, (int)jobNames.erase(itJ->Name)); } } inJobs.clear(); std::set prunedServices; prunedServices.insert("info01.niif.hu"); prunedServices.insert("info02.niif.hu"); inJobs.push_back(xmlJob); inJobs.back().Name = "Job4"; inJobs.back().JobID = "https://ce02.niif.hu:60000/arex/job4"; inJobs.back().ServiceInformationURL = Arc::URL("https://info02.niif.hu:2135/aris"); inJobs.push_back(xmlJob); inJobs.back().Name = "Job2"; inJobs.back().JobID = "https://ce01.niif.hu:60000/arex/job2"; inJobs.back().ServiceInformationURL = Arc::URL("https://info01.niif.hu:2135/aris"); // Check that pointers to new jobs are added to the list, and that jobs on services specified to be pruned are removed. std::list newJobs; JISTEST_ASSERT(jis->Write(inJobs, prunedServices, newJobs), jisType); JISTEST_ASSERT_EQUAL(1, (int)newJobs.size(), jisType); JISTEST_ASSERT_EQUAL((std::string)"Job4", newJobs.front()->Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce02.niif.hu:60000/arex/job4", newJobs.front()->JobID, jisType); JISTEST_ASSERT(jis->ReadAll(outJobs), jisType); JISTEST_ASSERT_EQUAL(4, (int)outJobs.size(), jisType); { std::set jobNames; jobNames.insert("Job0"); jobNames.insert("Job2"); jobNames.insert("Job4"); jobNames.insert("Other Job"); for (std::list::const_iterator itJ = outJobs.begin(); itJ != outJobs.end(); ++itJ) { CPPUNIT_ASSERT_EQUAL_MESSAGE("JobInformationStorage" + jisType + ": Job with name \"" + itJ->Name + "\" was unexpected" , 1, (int)jobNames.erase(itJ->Name)); } } // Check whether file is truncated. JISTEST_ASSERT(jis->Clean(), jisType); JISTEST_ASSERT(jis->Write(inJobs), jisType); JISTEST_ASSERT(jis->ReadAll(outJobs), jisType); JISTEST_ASSERT_EQUAL(2, (int)outJobs.size(), jisType); if ("https://ce02.niif.hu:60000/arex/job4" == outJobs.front().JobID) { JISTEST_ASSERT_EQUAL((std::string)"Job4", outJobs.front().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"Job2", outJobs.back().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce01.niif.hu:60000/arex/job2", outJobs.back().JobID, jisType); } else if ("https://ce01.niif.hu:60000/arex/job2" == outJobs.front().JobID) { JISTEST_ASSERT_EQUAL((std::string)"Job2", outJobs.front().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"Job4", outJobs.back().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce02.niif.hu:60000/arex/job4", outJobs.back().JobID, jisType); } else { CPPUNIT_FAIL(( "JobInformationStorage" + jisType + "\n" "- Expected: \"https://ce01.niif.hu:60000/arex/job2\" or \"https://ce02.niif.hu:60000/arex/job4\"\n" "- Actual: \"" + outJobs.front().JobID + "\"").c_str()); } inJobs.push_back(xmlJob); inJobs.back().Name = "Job5"; inJobs.back().JobID = "https://ce01.niif.hu:60000/arex/job5"; inJobs.push_back(xmlJob); inJobs.back().Name = "Job6"; inJobs.back().JobID = "https://ce01.niif.hu:60000/arex/job6"; inJobs.push_back(inJobs.back()); inJobs.back().Name = "Job6New"; // Duplicate jobs will be overwritten. JISTEST_ASSERT(jis->Clean(), jisType); JISTEST_ASSERT(jis->Write(inJobs), jisType); JISTEST_ASSERT(jis->ReadAll(outJobs), jisType); JISTEST_ASSERT_EQUAL(4, (int)outJobs.size(), jisType); bool job6NewExists = false; for (std::list::const_iterator itJob = outJobs.begin(); itJob != outJobs.end(); ++itJob) { JISTEST_ASSERT(itJob->Name != "Job6", jisType); if (itJob->Name == "Job6New") job6NewExists = true; } JISTEST_ASSERT(job6NewExists, jisType); // Truncate file. JISTEST_ASSERT(jis->Clean(), jisType); newJobs.clear(); JISTEST_ASSERT(jis->Write(inJobs, std::set(), newJobs), jisType); JISTEST_ASSERT_EQUAL(4, (int)newJobs.size(), jisType); JISTEST_ASSERT(jis->ReadAll(outJobs), jisType); JISTEST_ASSERT_EQUAL(4, (int)outJobs.size(), jisType); job6NewExists = false; for (std::list::const_iterator itJob = outJobs.begin(); itJob != outJobs.end(); ++itJob) { JISTEST_ASSERT(itJob->Name != "Job6", jisType); if (itJob->Name == "Job6New") job6NewExists = true; } JISTEST_ASSERT(job6NewExists, jisType); inJobs.pop_back(); // Adding more jobs to file. JISTEST_ASSERT(jis->Clean(), jisType); JISTEST_ASSERT(jis->Write(inJobs), jisType); JISTEST_ASSERT(jis->ReadAll(outJobs), jisType); JISTEST_ASSERT_EQUAL(4, (int)outJobs.size(), jisType); std::list toberemoved; toberemoved.push_back("https://ce02.niif.hu:60000/arex/job4"); toberemoved.push_back("https://ce01.niif.hu:60000/arex/job5"); // Check whether jobs are removed correctly. JISTEST_ASSERT(jis->Remove(toberemoved), jisType); JISTEST_ASSERT(jis->ReadAll(outJobs), jisType); JISTEST_ASSERT_EQUAL(2, (int)outJobs.size(), jisType); JISTEST_ASSERT_EQUAL((std::string)"Job2", outJobs.front().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce01.niif.hu:60000/arex/job2", outJobs.front().JobID, jisType); JISTEST_ASSERT_EQUAL((std::string)"Job6", outJobs.back().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce01.niif.hu:60000/arex/job6", outJobs.back().JobID, jisType); remove(tmpfile.c_str()); delete jis; } } void JobInformationStorageTest::ReadJobsTest() { Arc::JobInformationStorage* jis = NULL; for (int i = 0; Arc::JobInformationStorage::AVAILABLE_TYPES[i].name != NULL; ++i) { const std::string jisType = Arc::JobInformationStorage::AVAILABLE_TYPES[i].name; jis = (Arc::JobInformationStorage::AVAILABLE_TYPES[i].instance)(tmpfile); JISTEST_ASSERT(jis != NULL, jisType); JISTEST_ASSERT(jis->IsValid(), jisType); std::list inJobs, outJobs; // Check if jobs are read when specified by the jobIdentifiers argument. // Also check that the jobIdentifiers list is modified according to found jobs. { inJobs.push_back(Arc::Job()); inJobs.back().Name = "foo-job-1"; inJobs.back().JobID = "https://ce.grid.org/1234567890-foo-job-1"; inJobs.back().IDFromEndpoint = "1234567890-foo-job-1"; inJobs.back().ServiceInformationURL = Arc::URL("https://ce.grid.org/"); inJobs.back().ServiceInformationInterfaceName = "org.nordugrid.test"; inJobs.back().JobStatusURL = Arc::URL("https://ce.grid.org/"); inJobs.back().JobStatusInterfaceName = "org.nordugrid.test"; inJobs.back().JobManagementURL = Arc::URL("https://ce.grid.org/"); inJobs.back().JobManagementInterfaceName = "org.nordugrid.test"; inJobs.push_back(Arc::Job()); inJobs.back().Name = "foo-job-2"; inJobs.back().JobID = "https://ce.grid.org/1234567890-foo-job-2"; inJobs.back().IDFromEndpoint = "1234567890-foo-job-2"; inJobs.back().ServiceInformationURL = Arc::URL("https://ce.grid.org/"); inJobs.back().ServiceInformationInterfaceName = "org.nordugrid.test"; inJobs.back().JobStatusURL = Arc::URL("https://ce.grid.org/"); inJobs.back().JobStatusInterfaceName = "org.nordugrid.test"; inJobs.back().JobManagementURL = Arc::URL("https://ce.grid.org/"); inJobs.back().JobManagementInterfaceName = "org.nordugrid.test"; inJobs.push_back(Arc::Job()); inJobs.back().Name = "foo-job-2"; inJobs.back().JobID = "https://ce.grid.org/0987654321-foo-job-2"; inJobs.back().IDFromEndpoint = "0987654321-foo-job-2"; inJobs.back().ServiceInformationURL = Arc::URL("https://ce.grid.org/"); inJobs.back().ServiceInformationInterfaceName = "org.nordugrid.test"; inJobs.back().JobStatusURL = Arc::URL("https://ce.grid.org/"); inJobs.back().JobStatusInterfaceName = "org.nordugrid.test"; inJobs.back().JobManagementURL = Arc::URL("https://ce.grid.org/"); inJobs.back().JobManagementInterfaceName = "org.nordugrid.test"; inJobs.push_back(Arc::Job()); inJobs.back().Name = "foo-job-3"; inJobs.back().JobID = "https://ce.grid.org/1234567890-foo-job-3"; inJobs.back().IDFromEndpoint = "1234567890-foo-job-3"; inJobs.back().ServiceInformationURL = Arc::URL("https://ce.grid.org/"); inJobs.back().ServiceInformationInterfaceName = "org.nordugrid.test"; inJobs.back().JobStatusURL = Arc::URL("https://ce.grid.org/"); inJobs.back().JobStatusInterfaceName = "org.nordugrid.test"; inJobs.back().JobManagementURL = Arc::URL("https://ce.grid.org/"); inJobs.back().JobManagementInterfaceName = "org.nordugrid.test"; JISTEST_ASSERT(jis->Clean(), jisType); JISTEST_ASSERT(jis->Write(inJobs), jisType); std::list jobIdentifiers; jobIdentifiers.push_back("https://ce.grid.org/1234567890-foo-job-1"); // Having the same identifier twice should only result in one Job object being added to the list. jobIdentifiers.push_back("https://ce.grid.org/1234567890-foo-job-1"); jobIdentifiers.push_back("foo-job-2"); jobIdentifiers.push_back("nonexistent-job"); JISTEST_ASSERT(jis->Read(outJobs, jobIdentifiers), jisType); JISTEST_ASSERT_EQUAL(3, (int)outJobs.size(), jisType); std::list::const_iterator itJ = outJobs.begin(); JISTEST_ASSERT_EQUAL((std::string)"foo-job-1", itJ->Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce.grid.org/1234567890-foo-job-1", itJ->JobID, jisType); ++itJ; JISTEST_ASSERT_EQUAL((std::string)"foo-job-2", itJ->Name, jisType); if ("https://ce.grid.org/1234567890-foo-job-2" == itJ->JobID) { ++itJ; JISTEST_ASSERT_EQUAL((std::string)"foo-job-2", itJ->Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce.grid.org/0987654321-foo-job-2", itJ->JobID, jisType); } else if ("https://ce.grid.org/0987654321-foo-job-2" == itJ->JobID) { ++itJ; JISTEST_ASSERT_EQUAL((std::string)"foo-job-2", itJ->Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce.grid.org/1234567890-foo-job-2", itJ->JobID, jisType); } else { CPPUNIT_FAIL(( "JobInformationStorage" + jisType + "\n" "- Expected: \"https://ce.grid.org/1234567890-foo-job-2\" or \"https://ce.grid.org/0987654321-foo-job-2\"\n" "- Actual: \"" + itJ->JobID + "\"").c_str()); } JISTEST_ASSERT_EQUAL(1, (int)jobIdentifiers.size(), jisType); JISTEST_ASSERT_EQUAL((std::string)"nonexistent-job", jobIdentifiers.front(), jisType); } // Check if jobs are read when specified by the endpoints argument. // Also check if jobs are read when specified by the rejectEndpoints argument. { inJobs.clear(); inJobs.push_back(Arc::Job()); inJobs.back().Name = "foo-job-1"; inJobs.back().JobID = "https://ce1.grid.org/1234567890-foo-job-1"; inJobs.back().IDFromEndpoint = "1234567890-foo-job-1"; inJobs.back().ServiceInformationURL = Arc::URL("https://ce1.grid.org/"); inJobs.back().ServiceInformationInterfaceName = "org.nordugrid.test"; inJobs.back().JobStatusURL = Arc::URL("https://ce1.grid.org/"); inJobs.back().JobStatusInterfaceName = "org.nordugrid.test"; inJobs.back().JobManagementURL = Arc::URL("https://ce1.grid.org/"); inJobs.back().JobManagementInterfaceName = "org.nordugrid.test"; inJobs.push_back(Arc::Job()); inJobs.back().Name = "foo-job-2"; inJobs.back().JobID = "https://ce2.grid.org/1234567890-foo-job-2"; inJobs.back().IDFromEndpoint = "1234567890-foo-job-2"; inJobs.back().ServiceInformationURL = Arc::URL("https://ce2.grid.org/"); inJobs.back().ServiceInformationInterfaceName = "org.nordugrid.test"; inJobs.back().JobStatusURL = Arc::URL("https://ce2.grid.org/"); inJobs.back().JobStatusInterfaceName = "org.nordugrid.test"; inJobs.back().JobManagementURL = Arc::URL("https://ce2.grid.org/"); inJobs.back().JobManagementInterfaceName = "org.nordugrid.test"; inJobs.push_back(Arc::Job()); inJobs.back().Name = "foo-job-3"; inJobs.back().JobID = "https://ce2.grid.org/1234567890-foo-job-3"; inJobs.back().IDFromEndpoint = "1234567890-foo-job-3"; inJobs.back().ServiceInformationURL = Arc::URL("https://ce2.grid.org/"); inJobs.back().ServiceInformationInterfaceName = "org.nordugrid.test"; inJobs.back().JobStatusURL = Arc::URL("https://ce2.grid.org/"); inJobs.back().JobStatusInterfaceName = "org.nordugrid.test"; inJobs.back().JobManagementURL = Arc::URL("https://ce2.grid.org/"); inJobs.back().JobManagementInterfaceName = "org.nordugrid.test"; inJobs.push_back(Arc::Job()); inJobs.back().Name = "foo-job-4"; inJobs.back().JobID = "https://ce3.grid.org/1234567890-foo-job-4"; inJobs.back().IDFromEndpoint = "1234567890-foo-job-4"; inJobs.back().ServiceInformationURL = Arc::URL("https://ce3.grid.org/"); inJobs.back().ServiceInformationInterfaceName = "org.nordugrid.test"; inJobs.back().JobStatusURL = Arc::URL("https://ce3.grid.org/"); inJobs.back().JobStatusInterfaceName = "org.nordugrid.test"; inJobs.back().JobManagementURL = Arc::URL("https://ce3.grid.org/"); inJobs.back().JobManagementInterfaceName = "org.nordugrid.test"; JISTEST_ASSERT(jis->Clean(), jisType); JISTEST_ASSERT(jis->Write(inJobs), jisType); std::list jobIdentifiers, endpoints, rejectEndpoints; endpoints.push_back("ce2.grid.org"); JISTEST_ASSERT(jis->Read(outJobs, jobIdentifiers, endpoints), jisType); JISTEST_ASSERT_EQUAL(2, (int)outJobs.size(), jisType); JISTEST_ASSERT_EQUAL((std::string)"foo-job-2", outJobs.front().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce2.grid.org/1234567890-foo-job-2", outJobs.front().JobID, jisType); JISTEST_ASSERT_EQUAL((std::string)"foo-job-3", outJobs.back().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce2.grid.org/1234567890-foo-job-3", outJobs.back().JobID, jisType); outJobs.clear(); rejectEndpoints.push_back("ce2.grid.org"); JISTEST_ASSERT(jis->ReadAll(outJobs, rejectEndpoints), jisType); JISTEST_ASSERT_EQUAL(2, (int)outJobs.size(), jisType); JISTEST_ASSERT_EQUAL((std::string)"foo-job-1", outJobs.front().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce1.grid.org/1234567890-foo-job-1", outJobs.front().JobID, jisType); JISTEST_ASSERT_EQUAL((std::string)"foo-job-4", outJobs.back().Name, jisType); JISTEST_ASSERT_EQUAL((std::string)"https://ce3.grid.org/1234567890-foo-job-4", outJobs.back().JobID, jisType); } remove(tmpfile.c_str()); delete jis; } } CPPUNIT_TEST_SUITE_REGISTRATION(JobInformationStorageTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/SoftwareTest.cpp0000644000000000000000000000012412057627135025256 xustar000000000000000027 mtime=1354706525.201394 27 atime=1513200574.704704 30 ctime=1513200659.857745746 nordugrid-arc-5.4.2/src/hed/libs/compute/test/SoftwareTest.cpp0000644000175000002070000003154612057627135025334 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #define SV Arc::Software #define SR Arc::SoftwareRequirement class SoftwareTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(SoftwareTest); CPPUNIT_TEST(EqualityTest); CPPUNIT_TEST(ComparisonTest); CPPUNIT_TEST(BasicRequirementsTest); CPPUNIT_TEST(RequirementsAndTest); CPPUNIT_TEST(RequirementsAssignmentTest); CPPUNIT_TEST(RequirementsNotTest); CPPUNIT_TEST(RequirementsGreaterThanTest); CPPUNIT_TEST(RequirementsGreaterThanOrEqualTest); CPPUNIT_TEST(RequirementsLessThanTest); CPPUNIT_TEST(RequirementsLessThanOrEqualTest); CPPUNIT_TEST(ApplicationEnvironmentCastTest); CPPUNIT_TEST_SUITE_END(); public: SoftwareTest() {} void setUp() {} void tearDown() { Arc::ThreadInitializer().waitExit(); } void EqualityTest(); void ComparisonTest(); void BasicRequirementsTest(); void RequirementsAndTest(); void RequirementsAssignmentTest(); void RequirementsNotTest(); void RequirementsGreaterThanTest(); void RequirementsGreaterThanOrEqualTest(); void RequirementsLessThanTest(); void RequirementsLessThanOrEqualTest(); void ApplicationEnvironmentCastTest(); private: std::list versions; }; void SoftwareTest::EqualityTest() { CPPUNIT_ASSERT(SV("XX") == SV("XX")); CPPUNIT_ASSERT(!(SV("XX") == SV("YY"))); CPPUNIT_ASSERT(SV("XX-YY") == SV("XX-YY")); CPPUNIT_ASSERT(SV("XX-YY-1.3") == SV("XX-YY-1.3")); CPPUNIT_ASSERT(SV("XX-YY-1.3") == SV("XX-YY", "1.3")); CPPUNIT_ASSERT(!(SV("XX-YY-1.3") == SV("XX-YY"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.3") == SV("XX-YY-1.2"))); } void SoftwareTest::ComparisonTest() { CPPUNIT_ASSERT(!(SV("XX-YY") < SV("XX-YY-ZZ"))); CPPUNIT_ASSERT(!(SV("XX-YY") > SV("XX-YY-ZZ"))); CPPUNIT_ASSERT(!(SV("XX-YY-ZZ") < SV("XX-YY"))); CPPUNIT_ASSERT(!(SV("XX-YY-ZZ") > SV("XX-YY"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3.ZZ.4") > SV("XX-YY-1.2.3.ZZ.4"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3.ZZ.4") < SV("XX-YY-1.2.3.ZZ.4"))); CPPUNIT_ASSERT(SV("XX-YY-1.4.3.ZZ.4") > SV("XX-YY-1.2.3.ZZ.4")); CPPUNIT_ASSERT(SV("XX-YY-1.2.3.ZZ.4") < SV("XX-YY-1.4.3.ZZ.4")); CPPUNIT_ASSERT(SV("XX-YY-1.2.3.ZZ.4") < SV("XX-YY-1.2.3.ZZ.10")); CPPUNIT_ASSERT(SV("XX-YY-1.3.3.ZZ.4") > SV("XX-YY-1.2.7.ZZ.4")); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3.ZZ1.4") > SV("XX-YY-1.2.3.ZZ2.4"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3.ZZ1.4") < SV("XX-YY-1.2.3.ZZ2.4"))); CPPUNIT_ASSERT(SV("XX-YY-1.2.3") < SV("XX-YY-1-2-3-4")); CPPUNIT_ASSERT(SV("XX-YY-1.2.3.4") > SV("XX-YY-1-2-3")); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3.4") < SV("XX-YY-1-2-3"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3") > SV("XX-YY-1-2-3-4"))); CPPUNIT_ASSERT(SV("XX-YY-1.2") < SV("XX-YY-1.2.3")); CPPUNIT_ASSERT(SV("XX-YY-1.2.3") > SV("XX-YY-1.2")); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.ZZ") > SV("XX-YY-1.2"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3") < SV("XX-YY-1.2.ZZ"))); CPPUNIT_ASSERT(SV("XX-YY-1.3") > SV("XX-YY-1.2.3")); CPPUNIT_ASSERT(SV("XX-YY-1.2.3") < SV("XX-YY-1.3")); CPPUNIT_ASSERT(SV("XX-YY-1.2.3") > SV("XX-YY-1.2")); CPPUNIT_ASSERT(SV("XX-YY-1.2") < SV("XX-YY-1.2.3")); CPPUNIT_ASSERT(!(SV("XX-YY-1.2") > SV("XX-YY-1.2.3"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3") < SV("XX-YY-1.2"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2") < SV("XX-YY-1.2.ZZ"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2") > SV("XX-YY-1.2.ZZ"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.ZZ") < SV("XX-YY-1.2"))); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.ZZ") > SV("XX-YY-1.2"))); CPPUNIT_ASSERT(SV("XX-YY-1.3.2") > SV("XX-YY-1.2.3")); CPPUNIT_ASSERT(!(SV("XX-YY-1.3.2") < SV("XX-YY-1.2.3"))); CPPUNIT_ASSERT(SV("XX-YY-1.2.3") < SV("XX-YY-1.3.2")); CPPUNIT_ASSERT(!(SV("XX-YY-1.2.3") > SV("XX-YY-1.3.2"))); } void SoftwareTest::BasicRequirementsTest() { versions.push_back(SV("A-1.03")); CPPUNIT_ASSERT(SR(versions.back()).isSatisfied(versions)); CPPUNIT_ASSERT(!SR(versions.back(), &SV::operator!=).isSatisfied(versions)); CPPUNIT_ASSERT(SR(versions.back(), &SV::operator<=).isSatisfied(versions)); CPPUNIT_ASSERT(SR(versions.back(), &SV::operator>=).isSatisfied(versions)); CPPUNIT_ASSERT(!SR(versions.back(), &SV::operator> ).isSatisfied(versions)); CPPUNIT_ASSERT(!SR(versions.back(), &SV::operator< ).isSatisfied(versions)); CPPUNIT_ASSERT(!SR(SV("A-1.5")).isSatisfied(versions)); CPPUNIT_ASSERT(SR(SV("A-1.5"), &SV::operator !=).isSatisfied(versions)); CPPUNIT_ASSERT(SR(versions.back(), &SV::operator==).isSatisfied(versions)); CPPUNIT_ASSERT(!SR(versions.back(), &SV::operator!=).isSatisfied(versions)); CPPUNIT_ASSERT(SR(versions.back(), &SV::operator<=).isSatisfied(versions)); CPPUNIT_ASSERT(SR(versions.back(), &SV::operator>=).isSatisfied(versions)); CPPUNIT_ASSERT(!SR(versions.back(), &SV::operator>).isSatisfied(versions)); CPPUNIT_ASSERT(!SR(versions.back(), &SV::operator<).isSatisfied(versions)); CPPUNIT_ASSERT(!SR(SV("A-1.5"), &SV::operator==).isSatisfied(versions)); CPPUNIT_ASSERT(SR(SV("A-1.5"), &SV::operator!=).isSatisfied(versions)); } void SoftwareTest::RequirementsAndTest() { SR sr; sr.add(SV("A-1.03"), SV::EQUAL); sr.add(SV("B-2.12"), SV::EQUAL); versions.push_back(SV("A-1.03")); versions.push_back(SV("B-2.12")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); CPPUNIT_ASSERT_EQUAL(2, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT(sr.isResolved()); versions.clear(); } void SoftwareTest::RequirementsAssignmentTest() { SR sr; versions.push_back(SV("A-1.3")); versions.push_back(SV("A-1.4")); versions.push_back(SV("A-1.5")); versions.push_back(SV("A-2.1")); versions.push_back(SV("A-2.2")); versions.push_back(SV("A-2.3")); sr.add(SV("A-1.3"), SV::EQUAL); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.3"), sr.getSoftwareList().back()); CPPUNIT_ASSERT(sr.isResolved()); sr.clear(); sr.add(SV("A-1.2"), SV::EQUAL); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); sr.clear(); sr.add(SV("A-1.3"), SV::EQUAL); sr.add(SV("A-2.4"), SV::EQUAL); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); CPPUNIT_ASSERT_EQUAL(2, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.3"), sr.getSoftwareList().front()); CPPUNIT_ASSERT_EQUAL(SV("A-2.4"), sr.getSoftwareList().back()); sr.clear(); } void SoftwareTest::RequirementsNotTest() { SR sr; sr.add(SV("A-1.2"), &SV::operator!=); versions.push_back(SV("A-1.3")); versions.push_back(SV("B-4.3")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); CPPUNIT_ASSERT(sr.isResolved()); sr.add(SV("A-1.2"), &SV::operator!=); versions.push_back(SV("A-1.2")); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); CPPUNIT_ASSERT(!sr.isResolved()); } void SoftwareTest::RequirementsGreaterThanTest() { SR sr; sr.add(SV("A-1.3"), &Arc::Software::operator>); // A-1.2 > A-1.3 => false. versions.push_back(SV("A-1.2")); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); CPPUNIT_ASSERT(!sr.isResolved()); // {A-1.2 , A-1.3} > A-1.3 => false. versions.push_back(SV("A-1.3")); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); CPPUNIT_ASSERT(!sr.isResolved()); // {A-1.2 , A-1.3 , A-1.4} > A-1.3 => true. versions.push_back(SV("A-1.4")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.4 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.4"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); sr.clear(); sr.add(SV("A-1.3"), &Arc::Software::operator>); // {A-1.2 , A-1.3 , A-1.4, A-1.5} > A-1.3 => true. versions.push_back(SV("A-1.5")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.5 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.5"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); sr.clear(); sr.add(SV("A-1"), &Arc::Software::operator>); // {A-1.2 , A-1.3 , A-1.4, A-1.5} > A => true. CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.5 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.5"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); sr.clear(); sr.add(SV("A"), &Arc::Software::operator>); // {A-1.2 , A-1.3 , A-1.4, A-1.5} > A => true. CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.5 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.5"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); } void SoftwareTest::RequirementsGreaterThanOrEqualTest() { SR sr; sr.add(SV("A-1.3"), &Arc::Software::operator>=); // A-1.2 >= A-1.3 => false. versions.push_back(SV("A-1.2")); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); CPPUNIT_ASSERT(!sr.isResolved()); // {A-1.2 , A-1.3} >= A-1.3 => true. versions.push_back(SV("A-1.3")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.3 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.3"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); sr.clear(); sr.add(SV("A-1.3"), &Arc::Software::operator>=); // {A-1.2 , A-1.3 , A-1.4} >= A-1.3 => true. versions.push_back(SV("A-1.4")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.4 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.4"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); } void SoftwareTest::RequirementsLessThanTest() { SR sr; sr.add(SV("A-1.3"), &Arc::Software::operator<); // A-1.4 < A-1.3 => false. versions.push_back(SV("A-1.4")); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); CPPUNIT_ASSERT(!sr.isResolved()); // {A-1.4 , A-1.3} < A-1.3 => false. versions.push_back(SV("A-1.3")); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); CPPUNIT_ASSERT(!sr.isResolved()); // {A-1.4 , A-1.3 , A-1.2} < A-1.3 => true. versions.push_back(SV("A-1.2")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.2 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.2"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); sr.clear(); sr.add(SV("A-1.3"), &Arc::Software::operator<); // {A-1.4 , A-1.3 , A-1.2, A-1.1} < A-1.3 => true. versions.push_back(SV("A-1.1")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.2 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.2"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); } void SoftwareTest::RequirementsLessThanOrEqualTest() { SR sr; sr.add(SV("A-1.3"), &Arc::Software::operator<=); // A-1.4 <= A-1.3 => false. versions.push_back(SV("A-1.4")); CPPUNIT_ASSERT(!sr.isSatisfied(versions)); CPPUNIT_ASSERT(!sr.selectSoftware(versions)); CPPUNIT_ASSERT(!sr.isResolved()); // {A-1.4 , A-1.3} <= A-1.3 => true. versions.push_back(SV("A-1.3")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.3 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.3"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); sr.clear(); sr.add(SV("A-1.3"), &Arc::Software::operator<=); // {A-1.4 , A-1.3 , A-1.2} <= A-1.3 => true. versions.push_back(SV("A-1.2")); CPPUNIT_ASSERT(sr.isSatisfied(versions)); CPPUNIT_ASSERT(sr.selectSoftware(versions)); // A-1.3 should be selected. CPPUNIT_ASSERT_EQUAL(1, (int) sr.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL(SV("A-1.3"), sr.getSoftwareList().front()); CPPUNIT_ASSERT(sr.isResolved()); } void SoftwareTest::ApplicationEnvironmentCastTest() { std::list appEnvs(1, Arc::ApplicationEnvironment("TEST", "1.0")); const std::list* sw = reinterpret_cast< const std::list* >(&appEnvs); CPPUNIT_ASSERT_EQUAL(static_cast(appEnvs.front()), sw->front()); } CPPUNIT_TEST_SUITE_REGISTRATION(SoftwareTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315724024156 xustar000000000000000030 mtime=1513200596.810974659 30 atime=1513200647.814598453 30 ctime=1513200659.845745599 nordugrid-arc-5.4.2/src/hed/libs/compute/test/Makefile.in0000644000175000002070000037101113214315724024227 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = SoftwareTest$(EXEEXT) JobTest$(EXEEXT) \ JobInformationStorageTest$(EXEEXT) JobStateTest$(EXEEXT) \ BrokerTest$(EXEEXT) JobDescriptionTest$(EXEEXT) \ JobControllerPluginTest$(EXEEXT) \ JobDescriptionParserPluginTest$(EXEEXT) SubmitterTest$(EXEEXT) \ SubmitterPluginTest$(EXEEXT) JobSupervisorTest$(EXEEXT) \ TargetInformationRetrieverTest$(EXEEXT) \ ServiceEndpointRetrieverTest$(EXEEXT) \ JobListRetrieverTest$(EXEEXT) ExecutionTargetTest$(EXEEXT) \ ComputingServiceUniqTest$(EXEEXT) \ SubmissionStatusTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/libs/compute/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = SoftwareTest$(EXEEXT) JobTest$(EXEEXT) \ JobInformationStorageTest$(EXEEXT) JobStateTest$(EXEEXT) \ BrokerTest$(EXEEXT) JobDescriptionTest$(EXEEXT) \ JobControllerPluginTest$(EXEEXT) \ JobDescriptionParserPluginTest$(EXEEXT) SubmitterTest$(EXEEXT) \ SubmitterPluginTest$(EXEEXT) JobSupervisorTest$(EXEEXT) \ TargetInformationRetrieverTest$(EXEEXT) \ ServiceEndpointRetrieverTest$(EXEEXT) \ JobListRetrieverTest$(EXEEXT) ExecutionTargetTest$(EXEEXT) \ ComputingServiceUniqTest$(EXEEXT) \ SubmissionStatusTest$(EXEEXT) am_BrokerTest_OBJECTS = BrokerTest-Test.$(OBJEXT) \ BrokerTest-BrokerTest.$(OBJEXT) BrokerTest_OBJECTS = $(am_BrokerTest_OBJECTS) am__DEPENDENCIES_1 = BrokerTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) BrokerTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(BrokerTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_ComputingServiceUniqTest_OBJECTS = \ ComputingServiceUniqTest-Test.$(OBJEXT) \ ComputingServiceUniqTest-ComputingServiceUniqTest.$(OBJEXT) ComputingServiceUniqTest_OBJECTS = \ $(am_ComputingServiceUniqTest_OBJECTS) ComputingServiceUniqTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) ComputingServiceUniqTest_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_ExecutionTargetTest_OBJECTS = ExecutionTargetTest-Test.$(OBJEXT) \ ExecutionTargetTest-ExecutionTargetTest.$(OBJEXT) ExecutionTargetTest_OBJECTS = $(am_ExecutionTargetTest_OBJECTS) ExecutionTargetTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) ExecutionTargetTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_JobControllerPluginTest_OBJECTS = \ JobControllerPluginTest-Test.$(OBJEXT) \ JobControllerPluginTest-JobControllerPluginTest.$(OBJEXT) JobControllerPluginTest_OBJECTS = \ $(am_JobControllerPluginTest_OBJECTS) JobControllerPluginTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) JobControllerPluginTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_JobDescriptionParserPluginTest_OBJECTS = \ JobDescriptionParserPluginTest-Test.$(OBJEXT) \ JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.$(OBJEXT) JobDescriptionParserPluginTest_OBJECTS = \ $(am_JobDescriptionParserPluginTest_OBJECTS) JobDescriptionParserPluginTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) JobDescriptionParserPluginTest_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_JobDescriptionTest_OBJECTS = JobDescriptionTest-Test.$(OBJEXT) \ JobDescriptionTest-JobDescriptionTest.$(OBJEXT) JobDescriptionTest_OBJECTS = $(am_JobDescriptionTest_OBJECTS) JobDescriptionTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) JobDescriptionTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_JobInformationStorageTest_OBJECTS = \ JobInformationStorageTest-Test.$(OBJEXT) \ JobInformationStorageTest-JobInformationStorageTest.$(OBJEXT) JobInformationStorageTest_OBJECTS = \ $(am_JobInformationStorageTest_OBJECTS) JobInformationStorageTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) JobInformationStorageTest_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_JobListRetrieverTest_OBJECTS = JobListRetrieverTest-Test.$(OBJEXT) \ JobListRetrieverTest-JobListRetrieverTest.$(OBJEXT) JobListRetrieverTest_OBJECTS = $(am_JobListRetrieverTest_OBJECTS) JobListRetrieverTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) JobListRetrieverTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_JobStateTest_OBJECTS = JobStateTest-Test.$(OBJEXT) \ JobStateTest-JobStateTest.$(OBJEXT) JobStateTest_OBJECTS = $(am_JobStateTest_OBJECTS) JobStateTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) JobStateTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(JobStateTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_JobSupervisorTest_OBJECTS = JobSupervisorTest-Test.$(OBJEXT) \ JobSupervisorTest-JobSupervisorTest.$(OBJEXT) JobSupervisorTest_OBJECTS = $(am_JobSupervisorTest_OBJECTS) JobSupervisorTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) JobSupervisorTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_JobTest_OBJECTS = JobTest-Test.$(OBJEXT) JobTest-JobTest.$(OBJEXT) JobTest_OBJECTS = $(am_JobTest_OBJECTS) @DBJSTORE_ENABLED_TRUE@am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) JobTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_2) JobTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(JobTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_ServiceEndpointRetrieverTest_OBJECTS = \ ServiceEndpointRetrieverTest-Test.$(OBJEXT) \ ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.$(OBJEXT) ServiceEndpointRetrieverTest_OBJECTS = \ $(am_ServiceEndpointRetrieverTest_OBJECTS) ServiceEndpointRetrieverTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) ServiceEndpointRetrieverTest_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_SoftwareTest_OBJECTS = SoftwareTest-Test.$(OBJEXT) \ SoftwareTest-SoftwareTest.$(OBJEXT) SoftwareTest_OBJECTS = $(am_SoftwareTest_OBJECTS) SoftwareTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) SoftwareTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(SoftwareTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_SubmissionStatusTest_OBJECTS = SubmissionStatusTest-Test.$(OBJEXT) \ SubmissionStatusTest-SubmissionStatusTest.$(OBJEXT) SubmissionStatusTest_OBJECTS = $(am_SubmissionStatusTest_OBJECTS) SubmissionStatusTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) SubmissionStatusTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_SubmitterPluginTest_OBJECTS = SubmitterPluginTest-Test.$(OBJEXT) \ SubmitterPluginTest-SubmitterPluginTest.$(OBJEXT) SubmitterPluginTest_OBJECTS = $(am_SubmitterPluginTest_OBJECTS) SubmitterPluginTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) SubmitterPluginTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_SubmitterTest_OBJECTS = SubmitterTest-Test.$(OBJEXT) \ SubmitterTest-SubmitterTest.$(OBJEXT) SubmitterTest_OBJECTS = $(am_SubmitterTest_OBJECTS) SubmitterTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) SubmitterTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(SubmitterTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_TargetInformationRetrieverTest_OBJECTS = \ TargetInformationRetrieverTest-Test.$(OBJEXT) \ TargetInformationRetrieverTest-TargetInformationRetrieverTest.$(OBJEXT) TargetInformationRetrieverTest_OBJECTS = \ $(am_TargetInformationRetrieverTest_OBJECTS) TargetInformationRetrieverTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) TargetInformationRetrieverTest_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(BrokerTest_SOURCES) $(ComputingServiceUniqTest_SOURCES) \ $(ExecutionTargetTest_SOURCES) \ $(JobControllerPluginTest_SOURCES) \ $(JobDescriptionParserPluginTest_SOURCES) \ $(JobDescriptionTest_SOURCES) \ $(JobInformationStorageTest_SOURCES) \ $(JobListRetrieverTest_SOURCES) $(JobStateTest_SOURCES) \ $(JobSupervisorTest_SOURCES) $(JobTest_SOURCES) \ $(ServiceEndpointRetrieverTest_SOURCES) \ $(SoftwareTest_SOURCES) $(SubmissionStatusTest_SOURCES) \ $(SubmitterPluginTest_SOURCES) $(SubmitterTest_SOURCES) \ $(TargetInformationRetrieverTest_SOURCES) DIST_SOURCES = $(BrokerTest_SOURCES) \ $(ComputingServiceUniqTest_SOURCES) \ $(ExecutionTargetTest_SOURCES) \ $(JobControllerPluginTest_SOURCES) \ $(JobDescriptionParserPluginTest_SOURCES) \ $(JobDescriptionTest_SOURCES) \ $(JobInformationStorageTest_SOURCES) \ $(JobListRetrieverTest_SOURCES) $(JobStateTest_SOURCES) \ $(JobSupervisorTest_SOURCES) $(JobTest_SOURCES) \ $(ServiceEndpointRetrieverTest_SOURCES) \ $(SoftwareTest_SOURCES) $(SubmissionStatusTest_SOURCES) \ $(SubmitterPluginTest_SOURCES) $(SubmitterTest_SOURCES) \ $(TargetInformationRetrieverTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @DBJSTORE_ENABLED_FALSE@CXXFLAGS_WITH_DBJSTORE = @DBJSTORE_ENABLED_TRUE@CXXFLAGS_WITH_DBJSTORE = $(DBCXX_CPPFLAGS) @DBJSTORE_ENABLED_FALSE@LIBADD_WITH_DBJSTORE = @DBJSTORE_ENABLED_TRUE@LIBADD_WITH_DBJSTORE = $(DBCXX_LIBS) TESTS_ENVIRONMENT = env ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs SoftwareTest_SOURCES = $(top_srcdir)/src/Test.cpp SoftwareTest.cpp SoftwareTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) SoftwareTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobTest_SOURCES = $(top_srcdir)/src/Test.cpp JobTest.cpp JobTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) \ $(CXXFLAGS_WITH_DBJSTORE) $(AM_CXXFLAGS) JobTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(LIBADD_WITH_DBJSTORE) JobInformationStorageTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JobInformationStorageTest.cpp JobInformationStorageTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(DBCXX_CPPFLAGS) $(AM_CXXFLAGS) JobInformationStorageTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(DBCXX_LIBS) JobDescriptionTest_SOURCES = $(top_srcdir)/src/Test.cpp JobDescriptionTest.cpp JobDescriptionTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobDescriptionTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobStateTest_SOURCES = $(top_srcdir)/src/Test.cpp JobStateTest.cpp JobStateTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobStateTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) BrokerTest_SOURCES = $(top_srcdir)/src/Test.cpp BrokerTest.cpp BrokerTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) BrokerTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobControllerPluginTest_SOURCES = $(top_srcdir)/src/Test.cpp JobControllerPluginTest.cpp JobControllerPluginTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobControllerPluginTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobSupervisorTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JobSupervisorTest.cpp JobSupervisorTest_CXXFLAGS = -I$(top_srcdir)/include\ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobSupervisorTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobDescriptionParserPluginTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JobDescriptionParserPluginTest.cpp JobDescriptionParserPluginTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobDescriptionParserPluginTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) SubmitterTest_SOURCES = $(top_srcdir)/src/Test.cpp SubmitterTest.cpp SubmitterTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) SubmitterTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) SubmitterPluginTest_SOURCES = $(top_srcdir)/src/Test.cpp SubmitterPluginTest.cpp SubmitterPluginTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) SubmitterPluginTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ServiceEndpointRetrieverTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ServiceEndpointRetrieverTest.cpp ServiceEndpointRetrieverTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ServiceEndpointRetrieverTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) TargetInformationRetrieverTest_SOURCES = $(top_srcdir)/src/Test.cpp \ TargetInformationRetrieverTest.cpp TargetInformationRetrieverTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) TargetInformationRetrieverTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JobListRetrieverTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JobListRetrieverTest.cpp JobListRetrieverTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JobListRetrieverTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ExecutionTargetTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ExecutionTargetTest.cpp ExecutionTargetTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ExecutionTargetTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ComputingServiceUniqTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ComputingServiceUniqTest.cpp ComputingServiceUniqTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ComputingServiceUniqTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) SubmissionStatusTest_SOURCES = $(top_srcdir)/src/Test.cpp \ SubmissionStatusTest.cpp SubmissionStatusTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) SubmissionStatusTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/compute/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/compute/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list BrokerTest$(EXEEXT): $(BrokerTest_OBJECTS) $(BrokerTest_DEPENDENCIES) @rm -f BrokerTest$(EXEEXT) $(BrokerTest_LINK) $(BrokerTest_OBJECTS) $(BrokerTest_LDADD) $(LIBS) ComputingServiceUniqTest$(EXEEXT): $(ComputingServiceUniqTest_OBJECTS) $(ComputingServiceUniqTest_DEPENDENCIES) @rm -f ComputingServiceUniqTest$(EXEEXT) $(ComputingServiceUniqTest_LINK) $(ComputingServiceUniqTest_OBJECTS) $(ComputingServiceUniqTest_LDADD) $(LIBS) ExecutionTargetTest$(EXEEXT): $(ExecutionTargetTest_OBJECTS) $(ExecutionTargetTest_DEPENDENCIES) @rm -f ExecutionTargetTest$(EXEEXT) $(ExecutionTargetTest_LINK) $(ExecutionTargetTest_OBJECTS) $(ExecutionTargetTest_LDADD) $(LIBS) JobControllerPluginTest$(EXEEXT): $(JobControllerPluginTest_OBJECTS) $(JobControllerPluginTest_DEPENDENCIES) @rm -f JobControllerPluginTest$(EXEEXT) $(JobControllerPluginTest_LINK) $(JobControllerPluginTest_OBJECTS) $(JobControllerPluginTest_LDADD) $(LIBS) JobDescriptionParserPluginTest$(EXEEXT): $(JobDescriptionParserPluginTest_OBJECTS) $(JobDescriptionParserPluginTest_DEPENDENCIES) @rm -f JobDescriptionParserPluginTest$(EXEEXT) $(JobDescriptionParserPluginTest_LINK) $(JobDescriptionParserPluginTest_OBJECTS) $(JobDescriptionParserPluginTest_LDADD) $(LIBS) JobDescriptionTest$(EXEEXT): $(JobDescriptionTest_OBJECTS) $(JobDescriptionTest_DEPENDENCIES) @rm -f JobDescriptionTest$(EXEEXT) $(JobDescriptionTest_LINK) $(JobDescriptionTest_OBJECTS) $(JobDescriptionTest_LDADD) $(LIBS) JobInformationStorageTest$(EXEEXT): $(JobInformationStorageTest_OBJECTS) $(JobInformationStorageTest_DEPENDENCIES) @rm -f JobInformationStorageTest$(EXEEXT) $(JobInformationStorageTest_LINK) $(JobInformationStorageTest_OBJECTS) $(JobInformationStorageTest_LDADD) $(LIBS) JobListRetrieverTest$(EXEEXT): $(JobListRetrieverTest_OBJECTS) $(JobListRetrieverTest_DEPENDENCIES) @rm -f JobListRetrieverTest$(EXEEXT) $(JobListRetrieverTest_LINK) $(JobListRetrieverTest_OBJECTS) $(JobListRetrieverTest_LDADD) $(LIBS) JobStateTest$(EXEEXT): $(JobStateTest_OBJECTS) $(JobStateTest_DEPENDENCIES) @rm -f JobStateTest$(EXEEXT) $(JobStateTest_LINK) $(JobStateTest_OBJECTS) $(JobStateTest_LDADD) $(LIBS) JobSupervisorTest$(EXEEXT): $(JobSupervisorTest_OBJECTS) $(JobSupervisorTest_DEPENDENCIES) @rm -f JobSupervisorTest$(EXEEXT) $(JobSupervisorTest_LINK) $(JobSupervisorTest_OBJECTS) $(JobSupervisorTest_LDADD) $(LIBS) JobTest$(EXEEXT): $(JobTest_OBJECTS) $(JobTest_DEPENDENCIES) @rm -f JobTest$(EXEEXT) $(JobTest_LINK) $(JobTest_OBJECTS) $(JobTest_LDADD) $(LIBS) ServiceEndpointRetrieverTest$(EXEEXT): $(ServiceEndpointRetrieverTest_OBJECTS) $(ServiceEndpointRetrieverTest_DEPENDENCIES) @rm -f ServiceEndpointRetrieverTest$(EXEEXT) $(ServiceEndpointRetrieverTest_LINK) $(ServiceEndpointRetrieverTest_OBJECTS) $(ServiceEndpointRetrieverTest_LDADD) $(LIBS) SoftwareTest$(EXEEXT): $(SoftwareTest_OBJECTS) $(SoftwareTest_DEPENDENCIES) @rm -f SoftwareTest$(EXEEXT) $(SoftwareTest_LINK) $(SoftwareTest_OBJECTS) $(SoftwareTest_LDADD) $(LIBS) SubmissionStatusTest$(EXEEXT): $(SubmissionStatusTest_OBJECTS) $(SubmissionStatusTest_DEPENDENCIES) @rm -f SubmissionStatusTest$(EXEEXT) $(SubmissionStatusTest_LINK) $(SubmissionStatusTest_OBJECTS) $(SubmissionStatusTest_LDADD) $(LIBS) SubmitterPluginTest$(EXEEXT): $(SubmitterPluginTest_OBJECTS) $(SubmitterPluginTest_DEPENDENCIES) @rm -f SubmitterPluginTest$(EXEEXT) $(SubmitterPluginTest_LINK) $(SubmitterPluginTest_OBJECTS) $(SubmitterPluginTest_LDADD) $(LIBS) SubmitterTest$(EXEEXT): $(SubmitterTest_OBJECTS) $(SubmitterTest_DEPENDENCIES) @rm -f SubmitterTest$(EXEEXT) $(SubmitterTest_LINK) $(SubmitterTest_OBJECTS) $(SubmitterTest_LDADD) $(LIBS) TargetInformationRetrieverTest$(EXEEXT): $(TargetInformationRetrieverTest_OBJECTS) $(TargetInformationRetrieverTest_DEPENDENCIES) @rm -f TargetInformationRetrieverTest$(EXEEXT) $(TargetInformationRetrieverTest_LINK) $(TargetInformationRetrieverTest_OBJECTS) $(TargetInformationRetrieverTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/BrokerTest-BrokerTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/BrokerTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ComputingServiceUniqTest-ComputingServiceUniqTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ComputingServiceUniqTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ExecutionTargetTest-ExecutionTargetTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ExecutionTargetTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobControllerPluginTest-JobControllerPluginTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobControllerPluginTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobDescriptionParserPluginTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobDescriptionTest-JobDescriptionTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobDescriptionTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobInformationStorageTest-JobInformationStorageTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobInformationStorageTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobListRetrieverTest-JobListRetrieverTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobListRetrieverTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobStateTest-JobStateTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobStateTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobSupervisorTest-JobSupervisorTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobSupervisorTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobTest-JobTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JobTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ServiceEndpointRetrieverTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SoftwareTest-SoftwareTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SoftwareTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SubmissionStatusTest-SubmissionStatusTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SubmissionStatusTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SubmitterPluginTest-SubmitterPluginTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SubmitterPluginTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SubmitterTest-SubmitterTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SubmitterTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/TargetInformationRetrieverTest-TargetInformationRetrieverTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/TargetInformationRetrieverTest-Test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< BrokerTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BrokerTest_CXXFLAGS) $(CXXFLAGS) -MT BrokerTest-Test.o -MD -MP -MF $(DEPDIR)/BrokerTest-Test.Tpo -c -o BrokerTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/BrokerTest-Test.Tpo $(DEPDIR)/BrokerTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='BrokerTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BrokerTest_CXXFLAGS) $(CXXFLAGS) -c -o BrokerTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp BrokerTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BrokerTest_CXXFLAGS) $(CXXFLAGS) -MT BrokerTest-Test.obj -MD -MP -MF $(DEPDIR)/BrokerTest-Test.Tpo -c -o BrokerTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/BrokerTest-Test.Tpo $(DEPDIR)/BrokerTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='BrokerTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BrokerTest_CXXFLAGS) $(CXXFLAGS) -c -o BrokerTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` BrokerTest-BrokerTest.o: BrokerTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BrokerTest_CXXFLAGS) $(CXXFLAGS) -MT BrokerTest-BrokerTest.o -MD -MP -MF $(DEPDIR)/BrokerTest-BrokerTest.Tpo -c -o BrokerTest-BrokerTest.o `test -f 'BrokerTest.cpp' || echo '$(srcdir)/'`BrokerTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/BrokerTest-BrokerTest.Tpo $(DEPDIR)/BrokerTest-BrokerTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BrokerTest.cpp' object='BrokerTest-BrokerTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BrokerTest_CXXFLAGS) $(CXXFLAGS) -c -o BrokerTest-BrokerTest.o `test -f 'BrokerTest.cpp' || echo '$(srcdir)/'`BrokerTest.cpp BrokerTest-BrokerTest.obj: BrokerTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BrokerTest_CXXFLAGS) $(CXXFLAGS) -MT BrokerTest-BrokerTest.obj -MD -MP -MF $(DEPDIR)/BrokerTest-BrokerTest.Tpo -c -o BrokerTest-BrokerTest.obj `if test -f 'BrokerTest.cpp'; then $(CYGPATH_W) 'BrokerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/BrokerTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/BrokerTest-BrokerTest.Tpo $(DEPDIR)/BrokerTest-BrokerTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BrokerTest.cpp' object='BrokerTest-BrokerTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BrokerTest_CXXFLAGS) $(CXXFLAGS) -c -o BrokerTest-BrokerTest.obj `if test -f 'BrokerTest.cpp'; then $(CYGPATH_W) 'BrokerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/BrokerTest.cpp'; fi` ComputingServiceUniqTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) -MT ComputingServiceUniqTest-Test.o -MD -MP -MF $(DEPDIR)/ComputingServiceUniqTest-Test.Tpo -c -o ComputingServiceUniqTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ComputingServiceUniqTest-Test.Tpo $(DEPDIR)/ComputingServiceUniqTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ComputingServiceUniqTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) -c -o ComputingServiceUniqTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ComputingServiceUniqTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) -MT ComputingServiceUniqTest-Test.obj -MD -MP -MF $(DEPDIR)/ComputingServiceUniqTest-Test.Tpo -c -o ComputingServiceUniqTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ComputingServiceUniqTest-Test.Tpo $(DEPDIR)/ComputingServiceUniqTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ComputingServiceUniqTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) -c -o ComputingServiceUniqTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ComputingServiceUniqTest-ComputingServiceUniqTest.o: ComputingServiceUniqTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) -MT ComputingServiceUniqTest-ComputingServiceUniqTest.o -MD -MP -MF $(DEPDIR)/ComputingServiceUniqTest-ComputingServiceUniqTest.Tpo -c -o ComputingServiceUniqTest-ComputingServiceUniqTest.o `test -f 'ComputingServiceUniqTest.cpp' || echo '$(srcdir)/'`ComputingServiceUniqTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ComputingServiceUniqTest-ComputingServiceUniqTest.Tpo $(DEPDIR)/ComputingServiceUniqTest-ComputingServiceUniqTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ComputingServiceUniqTest.cpp' object='ComputingServiceUniqTest-ComputingServiceUniqTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) -c -o ComputingServiceUniqTest-ComputingServiceUniqTest.o `test -f 'ComputingServiceUniqTest.cpp' || echo '$(srcdir)/'`ComputingServiceUniqTest.cpp ComputingServiceUniqTest-ComputingServiceUniqTest.obj: ComputingServiceUniqTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) -MT ComputingServiceUniqTest-ComputingServiceUniqTest.obj -MD -MP -MF $(DEPDIR)/ComputingServiceUniqTest-ComputingServiceUniqTest.Tpo -c -o ComputingServiceUniqTest-ComputingServiceUniqTest.obj `if test -f 'ComputingServiceUniqTest.cpp'; then $(CYGPATH_W) 'ComputingServiceUniqTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ComputingServiceUniqTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ComputingServiceUniqTest-ComputingServiceUniqTest.Tpo $(DEPDIR)/ComputingServiceUniqTest-ComputingServiceUniqTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ComputingServiceUniqTest.cpp' object='ComputingServiceUniqTest-ComputingServiceUniqTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ComputingServiceUniqTest_CXXFLAGS) $(CXXFLAGS) -c -o ComputingServiceUniqTest-ComputingServiceUniqTest.obj `if test -f 'ComputingServiceUniqTest.cpp'; then $(CYGPATH_W) 'ComputingServiceUniqTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ComputingServiceUniqTest.cpp'; fi` ExecutionTargetTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) -MT ExecutionTargetTest-Test.o -MD -MP -MF $(DEPDIR)/ExecutionTargetTest-Test.Tpo -c -o ExecutionTargetTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ExecutionTargetTest-Test.Tpo $(DEPDIR)/ExecutionTargetTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ExecutionTargetTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) -c -o ExecutionTargetTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ExecutionTargetTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) -MT ExecutionTargetTest-Test.obj -MD -MP -MF $(DEPDIR)/ExecutionTargetTest-Test.Tpo -c -o ExecutionTargetTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ExecutionTargetTest-Test.Tpo $(DEPDIR)/ExecutionTargetTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ExecutionTargetTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) -c -o ExecutionTargetTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ExecutionTargetTest-ExecutionTargetTest.o: ExecutionTargetTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) -MT ExecutionTargetTest-ExecutionTargetTest.o -MD -MP -MF $(DEPDIR)/ExecutionTargetTest-ExecutionTargetTest.Tpo -c -o ExecutionTargetTest-ExecutionTargetTest.o `test -f 'ExecutionTargetTest.cpp' || echo '$(srcdir)/'`ExecutionTargetTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ExecutionTargetTest-ExecutionTargetTest.Tpo $(DEPDIR)/ExecutionTargetTest-ExecutionTargetTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ExecutionTargetTest.cpp' object='ExecutionTargetTest-ExecutionTargetTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) -c -o ExecutionTargetTest-ExecutionTargetTest.o `test -f 'ExecutionTargetTest.cpp' || echo '$(srcdir)/'`ExecutionTargetTest.cpp ExecutionTargetTest-ExecutionTargetTest.obj: ExecutionTargetTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) -MT ExecutionTargetTest-ExecutionTargetTest.obj -MD -MP -MF $(DEPDIR)/ExecutionTargetTest-ExecutionTargetTest.Tpo -c -o ExecutionTargetTest-ExecutionTargetTest.obj `if test -f 'ExecutionTargetTest.cpp'; then $(CYGPATH_W) 'ExecutionTargetTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ExecutionTargetTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ExecutionTargetTest-ExecutionTargetTest.Tpo $(DEPDIR)/ExecutionTargetTest-ExecutionTargetTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ExecutionTargetTest.cpp' object='ExecutionTargetTest-ExecutionTargetTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ExecutionTargetTest_CXXFLAGS) $(CXXFLAGS) -c -o ExecutionTargetTest-ExecutionTargetTest.obj `if test -f 'ExecutionTargetTest.cpp'; then $(CYGPATH_W) 'ExecutionTargetTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ExecutionTargetTest.cpp'; fi` JobControllerPluginTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) -MT JobControllerPluginTest-Test.o -MD -MP -MF $(DEPDIR)/JobControllerPluginTest-Test.Tpo -c -o JobControllerPluginTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobControllerPluginTest-Test.Tpo $(DEPDIR)/JobControllerPluginTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobControllerPluginTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o JobControllerPluginTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JobControllerPluginTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) -MT JobControllerPluginTest-Test.obj -MD -MP -MF $(DEPDIR)/JobControllerPluginTest-Test.Tpo -c -o JobControllerPluginTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobControllerPluginTest-Test.Tpo $(DEPDIR)/JobControllerPluginTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobControllerPluginTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o JobControllerPluginTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JobControllerPluginTest-JobControllerPluginTest.o: JobControllerPluginTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) -MT JobControllerPluginTest-JobControllerPluginTest.o -MD -MP -MF $(DEPDIR)/JobControllerPluginTest-JobControllerPluginTest.Tpo -c -o JobControllerPluginTest-JobControllerPluginTest.o `test -f 'JobControllerPluginTest.cpp' || echo '$(srcdir)/'`JobControllerPluginTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobControllerPluginTest-JobControllerPluginTest.Tpo $(DEPDIR)/JobControllerPluginTest-JobControllerPluginTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginTest.cpp' object='JobControllerPluginTest-JobControllerPluginTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o JobControllerPluginTest-JobControllerPluginTest.o `test -f 'JobControllerPluginTest.cpp' || echo '$(srcdir)/'`JobControllerPluginTest.cpp JobControllerPluginTest-JobControllerPluginTest.obj: JobControllerPluginTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) -MT JobControllerPluginTest-JobControllerPluginTest.obj -MD -MP -MF $(DEPDIR)/JobControllerPluginTest-JobControllerPluginTest.Tpo -c -o JobControllerPluginTest-JobControllerPluginTest.obj `if test -f 'JobControllerPluginTest.cpp'; then $(CYGPATH_W) 'JobControllerPluginTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobControllerPluginTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobControllerPluginTest-JobControllerPluginTest.Tpo $(DEPDIR)/JobControllerPluginTest-JobControllerPluginTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginTest.cpp' object='JobControllerPluginTest-JobControllerPluginTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobControllerPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o JobControllerPluginTest-JobControllerPluginTest.obj `if test -f 'JobControllerPluginTest.cpp'; then $(CYGPATH_W) 'JobControllerPluginTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobControllerPluginTest.cpp'; fi` JobDescriptionParserPluginTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) -MT JobDescriptionParserPluginTest-Test.o -MD -MP -MF $(DEPDIR)/JobDescriptionParserPluginTest-Test.Tpo -c -o JobDescriptionParserPluginTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobDescriptionParserPluginTest-Test.Tpo $(DEPDIR)/JobDescriptionParserPluginTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobDescriptionParserPluginTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o JobDescriptionParserPluginTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JobDescriptionParserPluginTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) -MT JobDescriptionParserPluginTest-Test.obj -MD -MP -MF $(DEPDIR)/JobDescriptionParserPluginTest-Test.Tpo -c -o JobDescriptionParserPluginTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobDescriptionParserPluginTest-Test.Tpo $(DEPDIR)/JobDescriptionParserPluginTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobDescriptionParserPluginTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o JobDescriptionParserPluginTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.o: JobDescriptionParserPluginTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) -MT JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.o -MD -MP -MF $(DEPDIR)/JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.Tpo -c -o JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.o `test -f 'JobDescriptionParserPluginTest.cpp' || echo '$(srcdir)/'`JobDescriptionParserPluginTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.Tpo $(DEPDIR)/JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobDescriptionParserPluginTest.cpp' object='JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.o `test -f 'JobDescriptionParserPluginTest.cpp' || echo '$(srcdir)/'`JobDescriptionParserPluginTest.cpp JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.obj: JobDescriptionParserPluginTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) -MT JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.obj -MD -MP -MF $(DEPDIR)/JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.Tpo -c -o JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.obj `if test -f 'JobDescriptionParserPluginTest.cpp'; then $(CYGPATH_W) 'JobDescriptionParserPluginTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobDescriptionParserPluginTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.Tpo $(DEPDIR)/JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobDescriptionParserPluginTest.cpp' object='JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionParserPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o JobDescriptionParserPluginTest-JobDescriptionParserPluginTest.obj `if test -f 'JobDescriptionParserPluginTest.cpp'; then $(CYGPATH_W) 'JobDescriptionParserPluginTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobDescriptionParserPluginTest.cpp'; fi` JobDescriptionTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) -MT JobDescriptionTest-Test.o -MD -MP -MF $(DEPDIR)/JobDescriptionTest-Test.Tpo -c -o JobDescriptionTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobDescriptionTest-Test.Tpo $(DEPDIR)/JobDescriptionTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobDescriptionTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) -c -o JobDescriptionTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JobDescriptionTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) -MT JobDescriptionTest-Test.obj -MD -MP -MF $(DEPDIR)/JobDescriptionTest-Test.Tpo -c -o JobDescriptionTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobDescriptionTest-Test.Tpo $(DEPDIR)/JobDescriptionTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobDescriptionTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) -c -o JobDescriptionTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JobDescriptionTest-JobDescriptionTest.o: JobDescriptionTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) -MT JobDescriptionTest-JobDescriptionTest.o -MD -MP -MF $(DEPDIR)/JobDescriptionTest-JobDescriptionTest.Tpo -c -o JobDescriptionTest-JobDescriptionTest.o `test -f 'JobDescriptionTest.cpp' || echo '$(srcdir)/'`JobDescriptionTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobDescriptionTest-JobDescriptionTest.Tpo $(DEPDIR)/JobDescriptionTest-JobDescriptionTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobDescriptionTest.cpp' object='JobDescriptionTest-JobDescriptionTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) -c -o JobDescriptionTest-JobDescriptionTest.o `test -f 'JobDescriptionTest.cpp' || echo '$(srcdir)/'`JobDescriptionTest.cpp JobDescriptionTest-JobDescriptionTest.obj: JobDescriptionTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) -MT JobDescriptionTest-JobDescriptionTest.obj -MD -MP -MF $(DEPDIR)/JobDescriptionTest-JobDescriptionTest.Tpo -c -o JobDescriptionTest-JobDescriptionTest.obj `if test -f 'JobDescriptionTest.cpp'; then $(CYGPATH_W) 'JobDescriptionTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobDescriptionTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobDescriptionTest-JobDescriptionTest.Tpo $(DEPDIR)/JobDescriptionTest-JobDescriptionTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobDescriptionTest.cpp' object='JobDescriptionTest-JobDescriptionTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobDescriptionTest_CXXFLAGS) $(CXXFLAGS) -c -o JobDescriptionTest-JobDescriptionTest.obj `if test -f 'JobDescriptionTest.cpp'; then $(CYGPATH_W) 'JobDescriptionTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobDescriptionTest.cpp'; fi` JobInformationStorageTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) -MT JobInformationStorageTest-Test.o -MD -MP -MF $(DEPDIR)/JobInformationStorageTest-Test.Tpo -c -o JobInformationStorageTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobInformationStorageTest-Test.Tpo $(DEPDIR)/JobInformationStorageTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobInformationStorageTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) -c -o JobInformationStorageTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JobInformationStorageTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) -MT JobInformationStorageTest-Test.obj -MD -MP -MF $(DEPDIR)/JobInformationStorageTest-Test.Tpo -c -o JobInformationStorageTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobInformationStorageTest-Test.Tpo $(DEPDIR)/JobInformationStorageTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobInformationStorageTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) -c -o JobInformationStorageTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JobInformationStorageTest-JobInformationStorageTest.o: JobInformationStorageTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) -MT JobInformationStorageTest-JobInformationStorageTest.o -MD -MP -MF $(DEPDIR)/JobInformationStorageTest-JobInformationStorageTest.Tpo -c -o JobInformationStorageTest-JobInformationStorageTest.o `test -f 'JobInformationStorageTest.cpp' || echo '$(srcdir)/'`JobInformationStorageTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobInformationStorageTest-JobInformationStorageTest.Tpo $(DEPDIR)/JobInformationStorageTest-JobInformationStorageTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobInformationStorageTest.cpp' object='JobInformationStorageTest-JobInformationStorageTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) -c -o JobInformationStorageTest-JobInformationStorageTest.o `test -f 'JobInformationStorageTest.cpp' || echo '$(srcdir)/'`JobInformationStorageTest.cpp JobInformationStorageTest-JobInformationStorageTest.obj: JobInformationStorageTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) -MT JobInformationStorageTest-JobInformationStorageTest.obj -MD -MP -MF $(DEPDIR)/JobInformationStorageTest-JobInformationStorageTest.Tpo -c -o JobInformationStorageTest-JobInformationStorageTest.obj `if test -f 'JobInformationStorageTest.cpp'; then $(CYGPATH_W) 'JobInformationStorageTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobInformationStorageTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobInformationStorageTest-JobInformationStorageTest.Tpo $(DEPDIR)/JobInformationStorageTest-JobInformationStorageTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobInformationStorageTest.cpp' object='JobInformationStorageTest-JobInformationStorageTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobInformationStorageTest_CXXFLAGS) $(CXXFLAGS) -c -o JobInformationStorageTest-JobInformationStorageTest.obj `if test -f 'JobInformationStorageTest.cpp'; then $(CYGPATH_W) 'JobInformationStorageTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobInformationStorageTest.cpp'; fi` JobListRetrieverTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT JobListRetrieverTest-Test.o -MD -MP -MF $(DEPDIR)/JobListRetrieverTest-Test.Tpo -c -o JobListRetrieverTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobListRetrieverTest-Test.Tpo $(DEPDIR)/JobListRetrieverTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobListRetrieverTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o JobListRetrieverTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JobListRetrieverTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT JobListRetrieverTest-Test.obj -MD -MP -MF $(DEPDIR)/JobListRetrieverTest-Test.Tpo -c -o JobListRetrieverTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobListRetrieverTest-Test.Tpo $(DEPDIR)/JobListRetrieverTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobListRetrieverTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o JobListRetrieverTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JobListRetrieverTest-JobListRetrieverTest.o: JobListRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT JobListRetrieverTest-JobListRetrieverTest.o -MD -MP -MF $(DEPDIR)/JobListRetrieverTest-JobListRetrieverTest.Tpo -c -o JobListRetrieverTest-JobListRetrieverTest.o `test -f 'JobListRetrieverTest.cpp' || echo '$(srcdir)/'`JobListRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobListRetrieverTest-JobListRetrieverTest.Tpo $(DEPDIR)/JobListRetrieverTest-JobListRetrieverTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverTest.cpp' object='JobListRetrieverTest-JobListRetrieverTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o JobListRetrieverTest-JobListRetrieverTest.o `test -f 'JobListRetrieverTest.cpp' || echo '$(srcdir)/'`JobListRetrieverTest.cpp JobListRetrieverTest-JobListRetrieverTest.obj: JobListRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT JobListRetrieverTest-JobListRetrieverTest.obj -MD -MP -MF $(DEPDIR)/JobListRetrieverTest-JobListRetrieverTest.Tpo -c -o JobListRetrieverTest-JobListRetrieverTest.obj `if test -f 'JobListRetrieverTest.cpp'; then $(CYGPATH_W) 'JobListRetrieverTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobListRetrieverTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobListRetrieverTest-JobListRetrieverTest.Tpo $(DEPDIR)/JobListRetrieverTest-JobListRetrieverTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverTest.cpp' object='JobListRetrieverTest-JobListRetrieverTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobListRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o JobListRetrieverTest-JobListRetrieverTest.obj `if test -f 'JobListRetrieverTest.cpp'; then $(CYGPATH_W) 'JobListRetrieverTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobListRetrieverTest.cpp'; fi` JobStateTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobStateTest_CXXFLAGS) $(CXXFLAGS) -MT JobStateTest-Test.o -MD -MP -MF $(DEPDIR)/JobStateTest-Test.Tpo -c -o JobStateTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobStateTest-Test.Tpo $(DEPDIR)/JobStateTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobStateTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobStateTest_CXXFLAGS) $(CXXFLAGS) -c -o JobStateTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JobStateTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobStateTest_CXXFLAGS) $(CXXFLAGS) -MT JobStateTest-Test.obj -MD -MP -MF $(DEPDIR)/JobStateTest-Test.Tpo -c -o JobStateTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobStateTest-Test.Tpo $(DEPDIR)/JobStateTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobStateTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobStateTest_CXXFLAGS) $(CXXFLAGS) -c -o JobStateTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JobStateTest-JobStateTest.o: JobStateTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobStateTest_CXXFLAGS) $(CXXFLAGS) -MT JobStateTest-JobStateTest.o -MD -MP -MF $(DEPDIR)/JobStateTest-JobStateTest.Tpo -c -o JobStateTest-JobStateTest.o `test -f 'JobStateTest.cpp' || echo '$(srcdir)/'`JobStateTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobStateTest-JobStateTest.Tpo $(DEPDIR)/JobStateTest-JobStateTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateTest.cpp' object='JobStateTest-JobStateTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobStateTest_CXXFLAGS) $(CXXFLAGS) -c -o JobStateTest-JobStateTest.o `test -f 'JobStateTest.cpp' || echo '$(srcdir)/'`JobStateTest.cpp JobStateTest-JobStateTest.obj: JobStateTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobStateTest_CXXFLAGS) $(CXXFLAGS) -MT JobStateTest-JobStateTest.obj -MD -MP -MF $(DEPDIR)/JobStateTest-JobStateTest.Tpo -c -o JobStateTest-JobStateTest.obj `if test -f 'JobStateTest.cpp'; then $(CYGPATH_W) 'JobStateTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobStateTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobStateTest-JobStateTest.Tpo $(DEPDIR)/JobStateTest-JobStateTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateTest.cpp' object='JobStateTest-JobStateTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobStateTest_CXXFLAGS) $(CXXFLAGS) -c -o JobStateTest-JobStateTest.obj `if test -f 'JobStateTest.cpp'; then $(CYGPATH_W) 'JobStateTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobStateTest.cpp'; fi` JobSupervisorTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) -MT JobSupervisorTest-Test.o -MD -MP -MF $(DEPDIR)/JobSupervisorTest-Test.Tpo -c -o JobSupervisorTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobSupervisorTest-Test.Tpo $(DEPDIR)/JobSupervisorTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobSupervisorTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) -c -o JobSupervisorTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JobSupervisorTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) -MT JobSupervisorTest-Test.obj -MD -MP -MF $(DEPDIR)/JobSupervisorTest-Test.Tpo -c -o JobSupervisorTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobSupervisorTest-Test.Tpo $(DEPDIR)/JobSupervisorTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobSupervisorTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) -c -o JobSupervisorTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JobSupervisorTest-JobSupervisorTest.o: JobSupervisorTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) -MT JobSupervisorTest-JobSupervisorTest.o -MD -MP -MF $(DEPDIR)/JobSupervisorTest-JobSupervisorTest.Tpo -c -o JobSupervisorTest-JobSupervisorTest.o `test -f 'JobSupervisorTest.cpp' || echo '$(srcdir)/'`JobSupervisorTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobSupervisorTest-JobSupervisorTest.Tpo $(DEPDIR)/JobSupervisorTest-JobSupervisorTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobSupervisorTest.cpp' object='JobSupervisorTest-JobSupervisorTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) -c -o JobSupervisorTest-JobSupervisorTest.o `test -f 'JobSupervisorTest.cpp' || echo '$(srcdir)/'`JobSupervisorTest.cpp JobSupervisorTest-JobSupervisorTest.obj: JobSupervisorTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) -MT JobSupervisorTest-JobSupervisorTest.obj -MD -MP -MF $(DEPDIR)/JobSupervisorTest-JobSupervisorTest.Tpo -c -o JobSupervisorTest-JobSupervisorTest.obj `if test -f 'JobSupervisorTest.cpp'; then $(CYGPATH_W) 'JobSupervisorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobSupervisorTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobSupervisorTest-JobSupervisorTest.Tpo $(DEPDIR)/JobSupervisorTest-JobSupervisorTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobSupervisorTest.cpp' object='JobSupervisorTest-JobSupervisorTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobSupervisorTest_CXXFLAGS) $(CXXFLAGS) -c -o JobSupervisorTest-JobSupervisorTest.obj `if test -f 'JobSupervisorTest.cpp'; then $(CYGPATH_W) 'JobSupervisorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobSupervisorTest.cpp'; fi` JobTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobTest_CXXFLAGS) $(CXXFLAGS) -MT JobTest-Test.o -MD -MP -MF $(DEPDIR)/JobTest-Test.Tpo -c -o JobTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobTest-Test.Tpo $(DEPDIR)/JobTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobTest_CXXFLAGS) $(CXXFLAGS) -c -o JobTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JobTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobTest_CXXFLAGS) $(CXXFLAGS) -MT JobTest-Test.obj -MD -MP -MF $(DEPDIR)/JobTest-Test.Tpo -c -o JobTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobTest-Test.Tpo $(DEPDIR)/JobTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JobTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobTest_CXXFLAGS) $(CXXFLAGS) -c -o JobTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JobTest-JobTest.o: JobTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobTest_CXXFLAGS) $(CXXFLAGS) -MT JobTest-JobTest.o -MD -MP -MF $(DEPDIR)/JobTest-JobTest.Tpo -c -o JobTest-JobTest.o `test -f 'JobTest.cpp' || echo '$(srcdir)/'`JobTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobTest-JobTest.Tpo $(DEPDIR)/JobTest-JobTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobTest.cpp' object='JobTest-JobTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobTest_CXXFLAGS) $(CXXFLAGS) -c -o JobTest-JobTest.o `test -f 'JobTest.cpp' || echo '$(srcdir)/'`JobTest.cpp JobTest-JobTest.obj: JobTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobTest_CXXFLAGS) $(CXXFLAGS) -MT JobTest-JobTest.obj -MD -MP -MF $(DEPDIR)/JobTest-JobTest.Tpo -c -o JobTest-JobTest.obj `if test -f 'JobTest.cpp'; then $(CYGPATH_W) 'JobTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JobTest-JobTest.Tpo $(DEPDIR)/JobTest-JobTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobTest.cpp' object='JobTest-JobTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JobTest_CXXFLAGS) $(CXXFLAGS) -c -o JobTest-JobTest.obj `if test -f 'JobTest.cpp'; then $(CYGPATH_W) 'JobTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JobTest.cpp'; fi` ServiceEndpointRetrieverTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT ServiceEndpointRetrieverTest-Test.o -MD -MP -MF $(DEPDIR)/ServiceEndpointRetrieverTest-Test.Tpo -c -o ServiceEndpointRetrieverTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ServiceEndpointRetrieverTest-Test.Tpo $(DEPDIR)/ServiceEndpointRetrieverTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ServiceEndpointRetrieverTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o ServiceEndpointRetrieverTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ServiceEndpointRetrieverTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT ServiceEndpointRetrieverTest-Test.obj -MD -MP -MF $(DEPDIR)/ServiceEndpointRetrieverTest-Test.Tpo -c -o ServiceEndpointRetrieverTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ServiceEndpointRetrieverTest-Test.Tpo $(DEPDIR)/ServiceEndpointRetrieverTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ServiceEndpointRetrieverTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o ServiceEndpointRetrieverTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.o: ServiceEndpointRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.o -MD -MP -MF $(DEPDIR)/ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.Tpo -c -o ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.o `test -f 'ServiceEndpointRetrieverTest.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.Tpo $(DEPDIR)/ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ServiceEndpointRetrieverTest.cpp' object='ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.o `test -f 'ServiceEndpointRetrieverTest.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverTest.cpp ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.obj: ServiceEndpointRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.obj -MD -MP -MF $(DEPDIR)/ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.Tpo -c -o ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.obj `if test -f 'ServiceEndpointRetrieverTest.cpp'; then $(CYGPATH_W) 'ServiceEndpointRetrieverTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ServiceEndpointRetrieverTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.Tpo $(DEPDIR)/ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ServiceEndpointRetrieverTest.cpp' object='ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ServiceEndpointRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o ServiceEndpointRetrieverTest-ServiceEndpointRetrieverTest.obj `if test -f 'ServiceEndpointRetrieverTest.cpp'; then $(CYGPATH_W) 'ServiceEndpointRetrieverTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ServiceEndpointRetrieverTest.cpp'; fi` SoftwareTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SoftwareTest_CXXFLAGS) $(CXXFLAGS) -MT SoftwareTest-Test.o -MD -MP -MF $(DEPDIR)/SoftwareTest-Test.Tpo -c -o SoftwareTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SoftwareTest-Test.Tpo $(DEPDIR)/SoftwareTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='SoftwareTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SoftwareTest_CXXFLAGS) $(CXXFLAGS) -c -o SoftwareTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp SoftwareTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SoftwareTest_CXXFLAGS) $(CXXFLAGS) -MT SoftwareTest-Test.obj -MD -MP -MF $(DEPDIR)/SoftwareTest-Test.Tpo -c -o SoftwareTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SoftwareTest-Test.Tpo $(DEPDIR)/SoftwareTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='SoftwareTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SoftwareTest_CXXFLAGS) $(CXXFLAGS) -c -o SoftwareTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` SoftwareTest-SoftwareTest.o: SoftwareTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SoftwareTest_CXXFLAGS) $(CXXFLAGS) -MT SoftwareTest-SoftwareTest.o -MD -MP -MF $(DEPDIR)/SoftwareTest-SoftwareTest.Tpo -c -o SoftwareTest-SoftwareTest.o `test -f 'SoftwareTest.cpp' || echo '$(srcdir)/'`SoftwareTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SoftwareTest-SoftwareTest.Tpo $(DEPDIR)/SoftwareTest-SoftwareTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SoftwareTest.cpp' object='SoftwareTest-SoftwareTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SoftwareTest_CXXFLAGS) $(CXXFLAGS) -c -o SoftwareTest-SoftwareTest.o `test -f 'SoftwareTest.cpp' || echo '$(srcdir)/'`SoftwareTest.cpp SoftwareTest-SoftwareTest.obj: SoftwareTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SoftwareTest_CXXFLAGS) $(CXXFLAGS) -MT SoftwareTest-SoftwareTest.obj -MD -MP -MF $(DEPDIR)/SoftwareTest-SoftwareTest.Tpo -c -o SoftwareTest-SoftwareTest.obj `if test -f 'SoftwareTest.cpp'; then $(CYGPATH_W) 'SoftwareTest.cpp'; else $(CYGPATH_W) '$(srcdir)/SoftwareTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SoftwareTest-SoftwareTest.Tpo $(DEPDIR)/SoftwareTest-SoftwareTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SoftwareTest.cpp' object='SoftwareTest-SoftwareTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SoftwareTest_CXXFLAGS) $(CXXFLAGS) -c -o SoftwareTest-SoftwareTest.obj `if test -f 'SoftwareTest.cpp'; then $(CYGPATH_W) 'SoftwareTest.cpp'; else $(CYGPATH_W) '$(srcdir)/SoftwareTest.cpp'; fi` SubmissionStatusTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) -MT SubmissionStatusTest-Test.o -MD -MP -MF $(DEPDIR)/SubmissionStatusTest-Test.Tpo -c -o SubmissionStatusTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmissionStatusTest-Test.Tpo $(DEPDIR)/SubmissionStatusTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='SubmissionStatusTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmissionStatusTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp SubmissionStatusTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) -MT SubmissionStatusTest-Test.obj -MD -MP -MF $(DEPDIR)/SubmissionStatusTest-Test.Tpo -c -o SubmissionStatusTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmissionStatusTest-Test.Tpo $(DEPDIR)/SubmissionStatusTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='SubmissionStatusTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmissionStatusTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` SubmissionStatusTest-SubmissionStatusTest.o: SubmissionStatusTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) -MT SubmissionStatusTest-SubmissionStatusTest.o -MD -MP -MF $(DEPDIR)/SubmissionStatusTest-SubmissionStatusTest.Tpo -c -o SubmissionStatusTest-SubmissionStatusTest.o `test -f 'SubmissionStatusTest.cpp' || echo '$(srcdir)/'`SubmissionStatusTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmissionStatusTest-SubmissionStatusTest.Tpo $(DEPDIR)/SubmissionStatusTest-SubmissionStatusTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmissionStatusTest.cpp' object='SubmissionStatusTest-SubmissionStatusTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmissionStatusTest-SubmissionStatusTest.o `test -f 'SubmissionStatusTest.cpp' || echo '$(srcdir)/'`SubmissionStatusTest.cpp SubmissionStatusTest-SubmissionStatusTest.obj: SubmissionStatusTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) -MT SubmissionStatusTest-SubmissionStatusTest.obj -MD -MP -MF $(DEPDIR)/SubmissionStatusTest-SubmissionStatusTest.Tpo -c -o SubmissionStatusTest-SubmissionStatusTest.obj `if test -f 'SubmissionStatusTest.cpp'; then $(CYGPATH_W) 'SubmissionStatusTest.cpp'; else $(CYGPATH_W) '$(srcdir)/SubmissionStatusTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmissionStatusTest-SubmissionStatusTest.Tpo $(DEPDIR)/SubmissionStatusTest-SubmissionStatusTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmissionStatusTest.cpp' object='SubmissionStatusTest-SubmissionStatusTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmissionStatusTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmissionStatusTest-SubmissionStatusTest.obj `if test -f 'SubmissionStatusTest.cpp'; then $(CYGPATH_W) 'SubmissionStatusTest.cpp'; else $(CYGPATH_W) '$(srcdir)/SubmissionStatusTest.cpp'; fi` SubmitterPluginTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) -MT SubmitterPluginTest-Test.o -MD -MP -MF $(DEPDIR)/SubmitterPluginTest-Test.Tpo -c -o SubmitterPluginTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmitterPluginTest-Test.Tpo $(DEPDIR)/SubmitterPluginTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='SubmitterPluginTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmitterPluginTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp SubmitterPluginTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) -MT SubmitterPluginTest-Test.obj -MD -MP -MF $(DEPDIR)/SubmitterPluginTest-Test.Tpo -c -o SubmitterPluginTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmitterPluginTest-Test.Tpo $(DEPDIR)/SubmitterPluginTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='SubmitterPluginTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmitterPluginTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` SubmitterPluginTest-SubmitterPluginTest.o: SubmitterPluginTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) -MT SubmitterPluginTest-SubmitterPluginTest.o -MD -MP -MF $(DEPDIR)/SubmitterPluginTest-SubmitterPluginTest.Tpo -c -o SubmitterPluginTest-SubmitterPluginTest.o `test -f 'SubmitterPluginTest.cpp' || echo '$(srcdir)/'`SubmitterPluginTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmitterPluginTest-SubmitterPluginTest.Tpo $(DEPDIR)/SubmitterPluginTest-SubmitterPluginTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPluginTest.cpp' object='SubmitterPluginTest-SubmitterPluginTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmitterPluginTest-SubmitterPluginTest.o `test -f 'SubmitterPluginTest.cpp' || echo '$(srcdir)/'`SubmitterPluginTest.cpp SubmitterPluginTest-SubmitterPluginTest.obj: SubmitterPluginTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) -MT SubmitterPluginTest-SubmitterPluginTest.obj -MD -MP -MF $(DEPDIR)/SubmitterPluginTest-SubmitterPluginTest.Tpo -c -o SubmitterPluginTest-SubmitterPluginTest.obj `if test -f 'SubmitterPluginTest.cpp'; then $(CYGPATH_W) 'SubmitterPluginTest.cpp'; else $(CYGPATH_W) '$(srcdir)/SubmitterPluginTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmitterPluginTest-SubmitterPluginTest.Tpo $(DEPDIR)/SubmitterPluginTest-SubmitterPluginTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPluginTest.cpp' object='SubmitterPluginTest-SubmitterPluginTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterPluginTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmitterPluginTest-SubmitterPluginTest.obj `if test -f 'SubmitterPluginTest.cpp'; then $(CYGPATH_W) 'SubmitterPluginTest.cpp'; else $(CYGPATH_W) '$(srcdir)/SubmitterPluginTest.cpp'; fi` SubmitterTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterTest_CXXFLAGS) $(CXXFLAGS) -MT SubmitterTest-Test.o -MD -MP -MF $(DEPDIR)/SubmitterTest-Test.Tpo -c -o SubmitterTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmitterTest-Test.Tpo $(DEPDIR)/SubmitterTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='SubmitterTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmitterTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp SubmitterTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterTest_CXXFLAGS) $(CXXFLAGS) -MT SubmitterTest-Test.obj -MD -MP -MF $(DEPDIR)/SubmitterTest-Test.Tpo -c -o SubmitterTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmitterTest-Test.Tpo $(DEPDIR)/SubmitterTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='SubmitterTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmitterTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` SubmitterTest-SubmitterTest.o: SubmitterTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterTest_CXXFLAGS) $(CXXFLAGS) -MT SubmitterTest-SubmitterTest.o -MD -MP -MF $(DEPDIR)/SubmitterTest-SubmitterTest.Tpo -c -o SubmitterTest-SubmitterTest.o `test -f 'SubmitterTest.cpp' || echo '$(srcdir)/'`SubmitterTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmitterTest-SubmitterTest.Tpo $(DEPDIR)/SubmitterTest-SubmitterTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterTest.cpp' object='SubmitterTest-SubmitterTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmitterTest-SubmitterTest.o `test -f 'SubmitterTest.cpp' || echo '$(srcdir)/'`SubmitterTest.cpp SubmitterTest-SubmitterTest.obj: SubmitterTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterTest_CXXFLAGS) $(CXXFLAGS) -MT SubmitterTest-SubmitterTest.obj -MD -MP -MF $(DEPDIR)/SubmitterTest-SubmitterTest.Tpo -c -o SubmitterTest-SubmitterTest.obj `if test -f 'SubmitterTest.cpp'; then $(CYGPATH_W) 'SubmitterTest.cpp'; else $(CYGPATH_W) '$(srcdir)/SubmitterTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/SubmitterTest-SubmitterTest.Tpo $(DEPDIR)/SubmitterTest-SubmitterTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterTest.cpp' object='SubmitterTest-SubmitterTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(SubmitterTest_CXXFLAGS) $(CXXFLAGS) -c -o SubmitterTest-SubmitterTest.obj `if test -f 'SubmitterTest.cpp'; then $(CYGPATH_W) 'SubmitterTest.cpp'; else $(CYGPATH_W) '$(srcdir)/SubmitterTest.cpp'; fi` TargetInformationRetrieverTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT TargetInformationRetrieverTest-Test.o -MD -MP -MF $(DEPDIR)/TargetInformationRetrieverTest-Test.Tpo -c -o TargetInformationRetrieverTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/TargetInformationRetrieverTest-Test.Tpo $(DEPDIR)/TargetInformationRetrieverTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='TargetInformationRetrieverTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o TargetInformationRetrieverTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp TargetInformationRetrieverTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT TargetInformationRetrieverTest-Test.obj -MD -MP -MF $(DEPDIR)/TargetInformationRetrieverTest-Test.Tpo -c -o TargetInformationRetrieverTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/TargetInformationRetrieverTest-Test.Tpo $(DEPDIR)/TargetInformationRetrieverTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='TargetInformationRetrieverTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o TargetInformationRetrieverTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` TargetInformationRetrieverTest-TargetInformationRetrieverTest.o: TargetInformationRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT TargetInformationRetrieverTest-TargetInformationRetrieverTest.o -MD -MP -MF $(DEPDIR)/TargetInformationRetrieverTest-TargetInformationRetrieverTest.Tpo -c -o TargetInformationRetrieverTest-TargetInformationRetrieverTest.o `test -f 'TargetInformationRetrieverTest.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/TargetInformationRetrieverTest-TargetInformationRetrieverTest.Tpo $(DEPDIR)/TargetInformationRetrieverTest-TargetInformationRetrieverTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverTest.cpp' object='TargetInformationRetrieverTest-TargetInformationRetrieverTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o TargetInformationRetrieverTest-TargetInformationRetrieverTest.o `test -f 'TargetInformationRetrieverTest.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverTest.cpp TargetInformationRetrieverTest-TargetInformationRetrieverTest.obj: TargetInformationRetrieverTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) -MT TargetInformationRetrieverTest-TargetInformationRetrieverTest.obj -MD -MP -MF $(DEPDIR)/TargetInformationRetrieverTest-TargetInformationRetrieverTest.Tpo -c -o TargetInformationRetrieverTest-TargetInformationRetrieverTest.obj `if test -f 'TargetInformationRetrieverTest.cpp'; then $(CYGPATH_W) 'TargetInformationRetrieverTest.cpp'; else $(CYGPATH_W) '$(srcdir)/TargetInformationRetrieverTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/TargetInformationRetrieverTest-TargetInformationRetrieverTest.Tpo $(DEPDIR)/TargetInformationRetrieverTest-TargetInformationRetrieverTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverTest.cpp' object='TargetInformationRetrieverTest-TargetInformationRetrieverTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(TargetInformationRetrieverTest_CXXFLAGS) $(CXXFLAGS) -c -o TargetInformationRetrieverTest-TargetInformationRetrieverTest.obj `if test -f 'TargetInformationRetrieverTest.cpp'; then $(CYGPATH_W) 'TargetInformationRetrieverTest.cpp'; else $(CYGPATH_W) '$(srcdir)/TargetInformationRetrieverTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am #~ EndpointTest_SOURCES = $(top_srcdir)/src/Test.cpp \ # #~ EndpointTest.cpp #~ EndpointTest_CXXFLAGS = -I$(top_srcdir)/include \ # #~ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #~ EndpointTest_LDADD = \ # #~ $(top_builddir)/src/hed/libs/compute/libarcclient.la \ # #~ $(top_builddir)/src/hed/libs/common/libarccommon.la \ # #~ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ # #~ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/JobControllerPluginTest.cpp0000644000000000000000000000012412045235201027403 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.704704 30 ctime=1513200659.849745648 nordugrid-arc-5.4.2/src/hed/libs/compute/test/JobControllerPluginTest.cpp0000644000175000002070000000234612045235201027455 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include class JobControllerPluginTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JobControllerPluginTest); CPPUNIT_TEST(LoadTest); CPPUNIT_TEST_SUITE_END(); public: JobControllerPluginTest(); ~JobControllerPluginTest() { delete jcl; } void setUp() {} void tearDown() { Arc::ThreadInitializer().waitExit(); } void LoadTest(); private: Arc::JobControllerPlugin *jc; Arc::JobControllerPluginLoader *jcl; Arc::UserConfig usercfg; }; JobControllerPluginTest::JobControllerPluginTest() : jc(NULL), usercfg(Arc::initializeCredentialsType(Arc::initializeCredentialsType::SkipCredentials)) { jcl = new Arc::JobControllerPluginLoader(); } void JobControllerPluginTest::LoadTest() { jc = jcl->load("", usercfg); CPPUNIT_ASSERT(jc == NULL); jc = jcl->load("NON-EXISTENT", usercfg); CPPUNIT_ASSERT(jc == NULL); jc = jcl->load("TEST", usercfg); CPPUNIT_ASSERT(jc != NULL); } CPPUNIT_TEST_SUITE_REGISTRATION(JobControllerPluginTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/ComputingServiceUniqTest.cpp0000644000000000000000000000012412046507221027576 xustar000000000000000027 mtime=1352306321.356184 27 atime=1513200574.701704 30 ctime=1513200659.848745636 nordugrid-arc-5.4.2/src/hed/libs/compute/test/ComputingServiceUniqTest.cpp0000644000175000002070000000441712046507221027651 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include class ComputingServiceUniqTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ComputingServiceUniqTest); CPPUNIT_TEST(KeepServicesWithDifferentID); CPPUNIT_TEST(KeepOnlyOneServiceWithTheSameID); CPPUNIT_TEST(KeepHigherPriorityService); CPPUNIT_TEST_SUITE_END(); public: ComputingServiceUniqTest() {}; void setUp() {} void tearDown() {} void KeepServicesWithDifferentID(); void KeepOnlyOneServiceWithTheSameID(); void KeepHigherPriorityService(); }; void ComputingServiceUniqTest::KeepServicesWithDifferentID() { Arc::ComputingServiceType cs1; cs1->ID = "ID1"; Arc::ComputingServiceType cs2; cs2->ID = "ID2"; Arc::ComputingServiceUniq csu; csu.addEntity(cs1); csu.addEntity(cs2); std::list services = csu.getServices(); CPPUNIT_ASSERT(services.size() == 2); CPPUNIT_ASSERT(services.front()->ID == "ID1"); CPPUNIT_ASSERT(services.back()->ID == "ID2"); } void ComputingServiceUniqTest::KeepOnlyOneServiceWithTheSameID() { Arc::ComputingServiceType cs1; cs1->ID = "ID"; Arc::ComputingServiceType cs2; cs2->ID = "ID"; Arc::ComputingServiceUniq csu; csu.addEntity(cs1); csu.addEntity(cs2); std::list services = csu.getServices(); CPPUNIT_ASSERT(services.size() == 1); CPPUNIT_ASSERT(services.front()->ID == "ID"); } void ComputingServiceUniqTest::KeepHigherPriorityService() { Arc::ComputingServiceType cs1; cs1->ID = "ID"; cs1->Type = "lower"; Arc::Endpoint origin1; origin1.InterfaceName = "org.nordugrid.ldapng"; cs1->InformationOriginEndpoint = origin1; Arc::ComputingServiceType cs2; cs2->ID = "ID"; cs2->Type = "higher"; Arc::Endpoint origin2; origin2.InterfaceName = "org.nordugrid.ldapglue2"; cs2->InformationOriginEndpoint = origin2; Arc::ComputingServiceUniq csu; csu.addEntity(cs1); csu.addEntity(cs2); std::list services = csu.getServices(); CPPUNIT_ASSERT(services.size() == 1); CPPUNIT_ASSERT(services.front()->Type == "higher"); } CPPUNIT_TEST_SUITE_REGISTRATION(ComputingServiceUniqTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/JobSupervisorTest.cpp0000644000000000000000000000012413065017474026277 xustar000000000000000027 mtime=1490296636.623386 27 atime=1513200574.711704 30 ctime=1513200659.854745709 nordugrid-arc-5.4.2/src/hed/libs/compute/test/JobSupervisorTest.cpp0000644000175000002070000002357713065017474026362 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include class JobSupervisorTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JobSupervisorTest); CPPUNIT_TEST(TestConstructor); CPPUNIT_TEST(TestAddJob); CPPUNIT_TEST(TestResubmit); CPPUNIT_TEST(TestCancel); CPPUNIT_TEST(TestClean); CPPUNIT_TEST(TestSelector); CPPUNIT_TEST_SUITE_END(); public: JobSupervisorTest(); ~JobSupervisorTest() {} void setUp() {} void tearDown() { Arc::ThreadInitializer().waitExit(); } void TestConstructor(); void TestAddJob(); void TestResubmit(); void TestCancel(); void TestClean(); void TestSelector(); private: Arc::UserConfig usercfg; Arc::JobSupervisor *js; Arc::Job j; }; class ThreeDaysOldJobSelector : public Arc::JobSelector { public: ThreeDaysOldJobSelector() { now = Arc::Time(); three_days = Arc::Period(60*60*24*3); } bool Select(const Arc::Job& job) const { return (now - job.EndTime) >= three_days; } private: Arc::Time now; Arc::Period three_days; }; JobSupervisorTest::JobSupervisorTest() : usercfg(Arc::initializeCredentialsType(Arc::initializeCredentialsType::SkipCredentials)), js(NULL) { j.JobStatusURL = Arc::URL("http://test.nordugrid.org"); j.JobStatusInterfaceName = "org.nordugrid.test"; j.JobManagementURL = Arc::URL("http://test.nordugrid.org"); j.JobManagementInterfaceName = "org.nordugrid.test"; } void JobSupervisorTest::TestConstructor() { std::list jobs; std::string id1 = "http://test.nordugrid.org/1234567890test1"; std::string id2 = "http://test.nordugrid.org/1234567890test2"; j.JobID = id1; jobs.push_back(j); j.JobID = id2; jobs.push_back(j); js = new Arc::JobSupervisor(usercfg, jobs); CPPUNIT_ASSERT(!js->GetAllJobs().empty()); jobs = js->GetAllJobs(); // JobSupervisor should contain 2 jobs. CPPUNIT_ASSERT_EQUAL(2, (int)jobs.size()); CPPUNIT_ASSERT_EQUAL(id1, jobs.front().JobID); CPPUNIT_ASSERT_EQUAL(id2, jobs.back().JobID); delete js; } void JobSupervisorTest::TestAddJob() { js = new Arc::JobSupervisor(usercfg, std::list()); CPPUNIT_ASSERT(js->GetAllJobs().empty()); j.JobID = "http://test.nordugrid.org/1234567890test1"; CPPUNIT_ASSERT(js->AddJob(j)); CPPUNIT_ASSERT(!js->GetAllJobs().empty()); j.JobManagementInterfaceName = ""; CPPUNIT_ASSERT(!js->AddJob(j)); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetAllJobs().size()); j.JobManagementInterfaceName = "non.existent.interface"; CPPUNIT_ASSERT(!js->AddJob(j)); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetAllJobs().size()); delete js; } void JobSupervisorTest::TestResubmit() { std::list jobs; std::string id1("http://test.nordugrid.org/1234567890test1"), id2("http://test.nordugrid.org/1234567890test2"), id3("http://test.nordugrid.org/1234567890test3"); j.State = Arc::JobStateTEST(Arc::JobState::FAILED); j.JobID = id1; j.JobDescriptionDocument = "CONTENT"; jobs.push_back(j); j.State = Arc::JobStateTEST(Arc::JobState::RUNNING); j.JobID = id1; j.JobDescriptionDocument = "CONTENT"; jobs.push_back(j); usercfg.Broker("TEST"); Arc::ComputingServiceType cs; Arc::ComputingEndpointType ce; ce->URLString = "http://test2.nordugrid.org"; ce->InterfaceName = "org.nordugrid.test"; ce->Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBSUBMIT)); ce->HealthState = "ok"; cs.ComputingEndpoint.insert(std::pair(0, ce)); cs.ComputingShare.insert(std::pair(0, Arc::ComputingShareType())); Arc::ComputingManagerType cm; cm.ExecutionEnvironment.insert(std::pair(0, Arc::ExecutionEnvironmentType())); cs.ComputingManager.insert(std::pair(0, cm)); Arc::TargetInformationRetrieverPluginTESTControl::targets.push_back(cs); Arc::TargetInformationRetrieverPluginTESTControl::status = Arc::EndpointQueryingStatus::SUCCESSFUL; Arc::BrokerPluginTestACCControl::match = true; js = new Arc::JobSupervisor(usercfg, jobs); std::list services(1, Arc::Endpoint("http://test2.nordugrid.org", Arc::Endpoint::COMPUTINGINFO, "org.nordugrid.tirtest")); std::list resubmitted; CPPUNIT_ASSERT(js->Resubmit(0, services, resubmitted)); CPPUNIT_ASSERT_EQUAL(2, (int)resubmitted.size()); delete js; } void JobSupervisorTest::TestCancel() { std::list jobs; std::string id1 = "http://test.nordugrid.org/1234567890test1"; std::string id2 = "http://test.nordugrid.org/1234567890test2"; std::string id3 = "http://test.nordugrid.org/1234567890test3"; std::string id4 = "http://test.nordugrid.org/1234567890test4"; j.State = Arc::JobStateTEST(Arc::JobState::RUNNING); j.JobID = id1; jobs.push_back(j); j.State = Arc::JobStateTEST(Arc::JobState::FINISHED); j.JobID = id2; jobs.push_back(j); j.State = Arc::JobStateTEST(Arc::JobState::UNDEFINED); j.JobID = id3; jobs.push_back(j); js = new Arc::JobSupervisor(usercfg, jobs); Arc::JobControllerPluginTestACCControl::cancelStatus = true; CPPUNIT_ASSERT(js->Cancel()); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetIDsProcessed().size()); CPPUNIT_ASSERT_EQUAL(id1, js->GetIDsProcessed().front()); CPPUNIT_ASSERT_EQUAL(2, (int)js->GetIDsNotProcessed().size()); CPPUNIT_ASSERT_EQUAL(id2, js->GetIDsNotProcessed().front()); CPPUNIT_ASSERT_EQUAL(id3, js->GetIDsNotProcessed().back()); js->ClearSelection(); Arc::JobControllerPluginTestACCControl::cancelStatus = false; CPPUNIT_ASSERT(!js->Cancel()); CPPUNIT_ASSERT_EQUAL(0, (int)js->GetIDsProcessed().size()); CPPUNIT_ASSERT_EQUAL(3, (int)js->GetIDsNotProcessed().size()); CPPUNIT_ASSERT_EQUAL(id1, js->GetIDsNotProcessed().front()); CPPUNIT_ASSERT_EQUAL(id3, js->GetIDsNotProcessed().back()); js->ClearSelection(); j.State = Arc::JobStateTEST(Arc::JobState::ACCEPTED, "Accepted"); j.JobID = id4; CPPUNIT_ASSERT(js->AddJob(j)); std::list status; status.push_back("Accepted"); Arc::JobControllerPluginTestACCControl::cancelStatus = true; js->SelectByStatus(status); CPPUNIT_ASSERT(js->Cancel()); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetIDsProcessed().size()); CPPUNIT_ASSERT_EQUAL(id4, js->GetIDsProcessed().front()); CPPUNIT_ASSERT_EQUAL(0, (int)js->GetIDsNotProcessed().size()); js->ClearSelection(); Arc::JobControllerPluginTestACCControl::cancelStatus = false; js->SelectByStatus(status); CPPUNIT_ASSERT(!js->Cancel()); CPPUNIT_ASSERT_EQUAL(0, (int)js->GetIDsProcessed().size()); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetIDsNotProcessed().size()); CPPUNIT_ASSERT_EQUAL(id4, js->GetIDsNotProcessed().front()); js->ClearSelection(); delete js; } void JobSupervisorTest::TestClean() { std::list jobs; std::string id1 = "http://test.nordugrid.org/1234567890test1"; std::string id2 = "http://test.nordugrid.org/1234567890test2"; j.State = Arc::JobStateTEST(Arc::JobState::FINISHED, "Finished"); j.JobID = id1; jobs.push_back(j); j.State = Arc::JobStateTEST(Arc::JobState::UNDEFINED); j.JobID = id2; jobs.push_back(j); js = new Arc::JobSupervisor(usercfg, jobs); CPPUNIT_ASSERT_EQUAL(2, (int)js->GetAllJobs().size()); Arc::JobControllerPluginTestACCControl::cleanStatus = true; CPPUNIT_ASSERT(js->Clean()); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetIDsProcessed().size()); CPPUNIT_ASSERT_EQUAL(id1, js->GetIDsProcessed().front()); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetIDsNotProcessed().size()); CPPUNIT_ASSERT_EQUAL(id2, js->GetIDsNotProcessed().back()); js->ClearSelection(); Arc::JobControllerPluginTestACCControl::cleanStatus = false; CPPUNIT_ASSERT(!js->Clean()); CPPUNIT_ASSERT_EQUAL(0, (int)js->GetIDsProcessed().size()); CPPUNIT_ASSERT_EQUAL(2, (int)js->GetIDsNotProcessed().size()); CPPUNIT_ASSERT_EQUAL(id1, js->GetIDsNotProcessed().front()); CPPUNIT_ASSERT_EQUAL(id2, js->GetIDsNotProcessed().back()); js->ClearSelection(); std::list status; status.push_back("Finished"); Arc::JobControllerPluginTestACCControl::cleanStatus = true; js->SelectByStatus(status); CPPUNIT_ASSERT(js->Clean()); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetIDsProcessed().size()); CPPUNIT_ASSERT_EQUAL(id1, js->GetIDsProcessed().front()); CPPUNIT_ASSERT_EQUAL(0, (int)js->GetIDsNotProcessed().size()); js->ClearSelection(); Arc::JobControllerPluginTestACCControl::cleanStatus = false; js->SelectByStatus(status); CPPUNIT_ASSERT(!js->Clean()); CPPUNIT_ASSERT_EQUAL(0, (int)js->GetIDsProcessed().size()); CPPUNIT_ASSERT_EQUAL(1, (int)js->GetIDsNotProcessed().size()); CPPUNIT_ASSERT_EQUAL(id1, js->GetIDsNotProcessed().front()); delete js; } void JobSupervisorTest::TestSelector() { js = new Arc::JobSupervisor(usercfg); j.JobID = "test-job-1-day-old"; j.EndTime = Arc::Time()-Arc::Period("P1D"); js->AddJob(j); j.JobID = "test-job-2-days-old"; j.EndTime = Arc::Time()-Arc::Period("P2D"); js->AddJob(j); j.JobID = "test-job-3-days-old"; j.EndTime = Arc::Time()-Arc::Period("P3D"); js->AddJob(j); j.JobID = "test-job-4-days-old"; j.EndTime = Arc::Time()-Arc::Period("P4D"); js->AddJob(j); CPPUNIT_ASSERT_EQUAL(4, (int)js->GetAllJobs().size()); ThreeDaysOldJobSelector selector; js->Select(selector); std::list selectedJobs = js->GetSelectedJobs(); CPPUNIT_ASSERT_EQUAL(2, (int)js->GetSelectedJobs().size()); for (std::list::iterator itJ = selectedJobs.begin(); itJ != selectedJobs.end(); ++itJ) { CPPUNIT_ASSERT(itJ->JobID == "test-job-3-days-old" || itJ->JobID == "test-job-4-days-old"); CPPUNIT_ASSERT(itJ->EndTime <= (Arc::Time()-Arc::Period("P3D"))); } delete js; } CPPUNIT_TEST_SUITE_REGISTRATION(JobSupervisorTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/JobListRetrieverTest.cpp0000644000000000000000000000012412045235201026704 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.706704 30 ctime=1513200659.852745685 nordugrid-arc-5.4.2/src/hed/libs/compute/test/JobListRetrieverTest.cpp0000644000175000002070000000315212045235201026752 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include //static Arc::Logger testLogger(Arc::Logger::getRootLogger(), "JobListRetrieverTest"); class JobListRetrieverTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JobListRetrieverTest); CPPUNIT_TEST(PluginLoading); CPPUNIT_TEST(QueryTest); CPPUNIT_TEST_SUITE_END(); public: JobListRetrieverTest() {}; void setUp() {} void tearDown() { Arc::ThreadInitializer().waitExit(); } void PluginLoading(); void QueryTest(); }; void JobListRetrieverTest::PluginLoading() { Arc::JobListRetrieverPluginLoader l; Arc::JobListRetrieverPlugin* p = (Arc::JobListRetrieverPlugin*)l.load("TEST"); CPPUNIT_ASSERT(p != NULL); } void JobListRetrieverTest::QueryTest() { Arc::EndpointQueryingStatus sInitial(Arc::EndpointQueryingStatus::SUCCESSFUL); Arc::JobListRetrieverPluginTESTControl::delay = 1; Arc::JobListRetrieverPluginTESTControl::status = sInitial; Arc::JobListRetrieverPluginLoader l; Arc::JobListRetrieverPlugin* p = (Arc::JobListRetrieverPlugin*)l.load("TEST"); CPPUNIT_ASSERT(p != NULL); Arc::UserConfig uc; Arc::Endpoint endpoint; std::list jobs; Arc::EndpointQueryingStatus sReturned = p->Query(uc, endpoint, jobs, Arc::EndpointQueryOptions()); CPPUNIT_ASSERT(sReturned == Arc::EndpointQueryingStatus::SUCCESSFUL); } CPPUNIT_TEST_SUITE_REGISTRATION(JobListRetrieverTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/SubmissionStatusTest.cpp0000644000000000000000000000012412057323177027022 xustar000000000000000027 mtime=1354606207.055931 27 atime=1513200574.706704 30 ctime=1513200659.858745758 nordugrid-arc-5.4.2/src/hed/libs/compute/test/SubmissionStatusTest.cpp0000644000175000002070000001026412057323177027072 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include class SubmissionStatusTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(SubmissionStatusTest); CPPUNIT_TEST(BasicTest); CPPUNIT_TEST(OrTest); CPPUNIT_TEST(AndTest); CPPUNIT_TEST(UnsetTest); CPPUNIT_TEST_SUITE_END(); public: SubmissionStatusTest() {} void setUp() {} void tearDown() {} void BasicTest(); void OrTest(); void AndTest(); void UnsetTest(); }; void SubmissionStatusTest::BasicTest() { { Arc::SubmissionStatus s; CPPUNIT_ASSERT(s); } { Arc::SubmissionStatus s(Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(!s); CPPUNIT_ASSERT(s == Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(Arc::SubmissionStatus::NOT_IMPLEMENTED == s); } { Arc::SubmissionStatus s((unsigned int)Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(!s); CPPUNIT_ASSERT(Arc::SubmissionStatus::NOT_IMPLEMENTED == s); } } void SubmissionStatusTest::OrTest() { { Arc::SubmissionStatus s(Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(s == Arc::SubmissionStatus::NOT_IMPLEMENTED); s = s | Arc::SubmissionStatus::NO_SERVICES; CPPUNIT_ASSERT(s != Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NOT_IMPLEMENTED)); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NO_SERVICES)); CPPUNIT_ASSERT(s == (unsigned int)(Arc::SubmissionStatus::NOT_IMPLEMENTED | Arc::SubmissionStatus::NO_SERVICES)); } { Arc::SubmissionStatus s(Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(s == Arc::SubmissionStatus::NOT_IMPLEMENTED); s |= Arc::SubmissionStatus::NO_SERVICES; CPPUNIT_ASSERT(s != Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NOT_IMPLEMENTED)); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NO_SERVICES)); CPPUNIT_ASSERT(s == (unsigned int)(Arc::SubmissionStatus::NOT_IMPLEMENTED | Arc::SubmissionStatus::NO_SERVICES)); } } void SubmissionStatusTest::AndTest() { { Arc::SubmissionStatus s((unsigned int)(Arc::SubmissionStatus::NOT_IMPLEMENTED | Arc::SubmissionStatus::NO_SERVICES)); CPPUNIT_ASSERT(s == (unsigned int)(Arc::SubmissionStatus::NOT_IMPLEMENTED | Arc::SubmissionStatus::NO_SERVICES)); s = s & Arc::SubmissionStatus::NO_SERVICES; CPPUNIT_ASSERT(!s.isSet(Arc::SubmissionStatus::NOT_IMPLEMENTED)); CPPUNIT_ASSERT(s == Arc::SubmissionStatus::NO_SERVICES); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NO_SERVICES)); } { Arc::SubmissionStatus s((unsigned int)(Arc::SubmissionStatus::NOT_IMPLEMENTED | Arc::SubmissionStatus::NO_SERVICES)); CPPUNIT_ASSERT(s == (unsigned int)(Arc::SubmissionStatus::NOT_IMPLEMENTED | Arc::SubmissionStatus::NO_SERVICES)); s &= Arc::SubmissionStatus::NO_SERVICES; CPPUNIT_ASSERT(!s.isSet(Arc::SubmissionStatus::NOT_IMPLEMENTED)); CPPUNIT_ASSERT(s == Arc::SubmissionStatus::NO_SERVICES); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NO_SERVICES)); } } void SubmissionStatusTest::UnsetTest() { { Arc::SubmissionStatus s(Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(!s); CPPUNIT_ASSERT(s == Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NOT_IMPLEMENTED)); s.unset(Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(s); CPPUNIT_ASSERT(s != Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(!s.isSet(Arc::SubmissionStatus::NOT_IMPLEMENTED)); } { Arc::SubmissionStatus s((unsigned int)(Arc::SubmissionStatus::NOT_IMPLEMENTED | Arc::SubmissionStatus::NO_SERVICES)); CPPUNIT_ASSERT(!s); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NOT_IMPLEMENTED)); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NO_SERVICES)); s.unset(Arc::SubmissionStatus::NOT_IMPLEMENTED); CPPUNIT_ASSERT(!s); CPPUNIT_ASSERT(s == Arc::SubmissionStatus::NO_SERVICES); CPPUNIT_ASSERT(!s.isSet(Arc::SubmissionStatus::NOT_IMPLEMENTED)); CPPUNIT_ASSERT(s.isSet(Arc::SubmissionStatus::NO_SERVICES)); } } CPPUNIT_TEST_SUITE_REGISTRATION(SubmissionStatusTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/SubmitterTest.cpp0000644000000000000000000000012412051675267025445 xustar000000000000000027 mtime=1353153207.099019 27 atime=1513200574.713704 30 ctime=1513200659.860745783 nordugrid-arc-5.4.2/src/hed/libs/compute/test/SubmitterTest.cpp0000644000175000002070000000374012051675267025516 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include class SubmitterTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(SubmitterTest); CPPUNIT_TEST(SubmissionToExecutionTargetTest); CPPUNIT_TEST(SubmissionToExecutionTargetWithConsumerTest); CPPUNIT_TEST_SUITE_END(); public: SubmitterTest(); void setUp() {} void tearDown() { Arc::ThreadInitializer().waitExit(); } void SubmissionToExecutionTargetTest(); void SubmissionToExecutionTargetWithConsumerTest(); private: Arc::UserConfig usercfg; }; SubmitterTest::SubmitterTest() : usercfg(Arc::initializeCredentialsType(Arc::initializeCredentialsType::SkipCredentials)) {} void SubmitterTest::SubmissionToExecutionTargetTest() { Arc::Submitter submitter(usercfg); // Prepare to job which will be returned by the test ACC Arc::Job testJob; testJob.JobID = "http://test.nordugrid.org/testjob"; Arc::SubmitterPluginTestACCControl::submitJob = testJob; Arc::JobDescription desc; Arc::ExecutionTarget et; et.ComputingEndpoint->InterfaceName = "org.nordugrid.test"; Arc::Job job; submitter.Submit(et, desc, job); CPPUNIT_ASSERT(job == testJob); } void SubmitterTest::SubmissionToExecutionTargetWithConsumerTest() { Arc::Submitter submitter(usercfg); // Prepare to job which will be returned by the test ACC Arc::Job testJob; testJob.JobID = "http://test.nordugrid.org/testjob"; Arc::SubmitterPluginTestACCControl::submitJob = testJob; Arc::JobDescription desc; Arc::ExecutionTarget et; et.ComputingEndpoint->InterfaceName = "org.nordugrid.test"; Arc::EntityContainer container; submitter.addConsumer(container); submitter.Submit(et, desc); CPPUNIT_ASSERT_EQUAL(1, (int)container.size()); CPPUNIT_ASSERT(container.front() == testJob); } CPPUNIT_TEST_SUITE_REGISTRATION(SubmitterTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/ServiceEndpointRetrieverTest.cpp0000644000000000000000000000012412051006754030445 xustar000000000000000027 mtime=1352928748.264998 27 atime=1513200574.704704 30 ctime=1513200659.856745734 nordugrid-arc-5.4.2/src/hed/libs/compute/test/ServiceEndpointRetrieverTest.cpp0000644000175000002070000000743312051006754030521 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include class ServiceEndpointRetrieverTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ServiceEndpointRetrieverTest); CPPUNIT_TEST(PluginLoading); CPPUNIT_TEST(QueryTest); CPPUNIT_TEST(BasicServiceRetrieverTest); CPPUNIT_TEST(SuspendedEndpointTest); CPPUNIT_TEST_SUITE_END(); public: ServiceEndpointRetrieverTest() {}; void setUp() {} void tearDown() {} void PluginLoading(); void QueryTest(); void BasicServiceRetrieverTest(); void SuspendedEndpointTest(); }; void ServiceEndpointRetrieverTest::PluginLoading() { Arc::ServiceEndpointRetrieverPluginLoader l; Arc::ServiceEndpointRetrieverPlugin* p = (Arc::ServiceEndpointRetrieverPlugin*)l.load("TEST"); CPPUNIT_ASSERT(p != NULL); } void ServiceEndpointRetrieverTest::QueryTest() { Arc::EndpointQueryingStatus sInitial(Arc::EndpointQueryingStatus::SUCCESSFUL); Arc::ServiceEndpointRetrieverPluginTESTControl::status.push_back(sInitial); Arc::ServiceEndpointRetrieverPluginLoader l; Arc::ServiceEndpointRetrieverPlugin* p = (Arc::ServiceEndpointRetrieverPlugin*)l.load("TEST"); CPPUNIT_ASSERT(p != NULL); Arc::UserConfig uc; Arc::Endpoint registry; std::list endpoints; Arc::EndpointQueryingStatus sReturned = p->Query(uc, registry, endpoints, Arc::EndpointQueryOptions()); CPPUNIT_ASSERT(sReturned == Arc::EndpointQueryingStatus::SUCCESSFUL); } void ServiceEndpointRetrieverTest::BasicServiceRetrieverTest() { Arc::UserConfig uc; Arc::ServiceEndpointRetriever retriever(uc); Arc::EntityContainer container; retriever.addConsumer(container); CPPUNIT_ASSERT(container.empty()); Arc::ServiceEndpointRetrieverPluginTESTControl::endpoints.push_back(std::list(1, Arc::Endpoint())); Arc::ServiceEndpointRetrieverPluginTESTControl::status.push_back(Arc::EndpointQueryingStatus::SUCCESSFUL); Arc::Endpoint registry("test.nordugrid.org", Arc::Endpoint::REGISTRY, "org.nordugrid.sertest"); retriever.addEndpoint(registry); retriever.wait(); CPPUNIT_ASSERT_EQUAL(1, (int)container.size()); } void ServiceEndpointRetrieverTest::SuspendedEndpointTest() { Arc::UserConfig uc; Arc::ServiceEndpointRetriever retriever(uc); Arc::SimpleCondition c; Arc::ServiceEndpointRetrieverPluginTESTControl::condition.push_back(&c); // Block the first instance of the ServiceEndpointRetrieverPluginTEST::Query method Arc::ServiceEndpointRetrieverPluginTESTControl::status.push_back(Arc::EndpointQueryingStatus::FAILED); // First invocation should fail Arc::ServiceEndpointRetrieverPluginTESTControl::status.push_back(Arc::EndpointQueryingStatus::SUCCESSFUL); // Second should succeed Arc::Endpoint e1("test1.nordugrid.org", Arc::Endpoint::REGISTRY, "org.nordugrid.sertest"); e1.ServiceID = "1234567890"; Arc::Endpoint e2("test2.nordugrid.org", Arc::Endpoint::REGISTRY, "org.nordugrid.sertest"); e2.ServiceID = "1234567890"; retriever.addEndpoint(e1); retriever.addEndpoint(e2); CPPUNIT_ASSERT(Arc::EndpointQueryingStatus(Arc::EndpointQueryingStatus::STARTED) == retriever.getStatusOfEndpoint(e1)); CPPUNIT_ASSERT(Arc::EndpointQueryingStatus(Arc::EndpointQueryingStatus::SUSPENDED_NOTREQUIRED) == retriever.getStatusOfEndpoint(e2)); c.signal(); // Remove block on the first query invocation retriever.wait(); CPPUNIT_ASSERT(Arc::EndpointQueryingStatus(Arc::EndpointQueryingStatus::FAILED) == retriever.getStatusOfEndpoint(e1)); CPPUNIT_ASSERT(Arc::EndpointQueryingStatus(Arc::EndpointQueryingStatus::SUCCESSFUL) == retriever.getStatusOfEndpoint(e2)); } CPPUNIT_TEST_SUITE_REGISTRATION(ServiceEndpointRetrieverTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/JobStateTest.cpp0000644000000000000000000000012412045235201025161 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.711704 30 ctime=1513200659.853745697 nordugrid-arc-5.4.2/src/hed/libs/compute/test/JobStateTest.cpp0000644000175000002070000000566212045235201025237 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include class JobStateTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JobStateTest); CPPUNIT_TEST(GetStateTypeTest); CPPUNIT_TEST(GetGeneralStateTest); CPPUNIT_TEST_SUITE_END(); public: JobStateTest() {} void setUp() {} void tearDown() {} void GetStateTypeTest(); void GetGeneralStateTest(); }; void JobStateTest::GetStateTypeTest() { CPPUNIT_ASSERT_EQUAL(Arc::JobState::ACCEPTED, Arc::JobState::GetStateType("Accepted")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::PREPARING, Arc::JobState::GetStateType("Preparing")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::SUBMITTING, Arc::JobState::GetStateType("Submitting")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::HOLD, Arc::JobState::GetStateType("Hold")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::QUEUING, Arc::JobState::GetStateType("Queuing")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::RUNNING, Arc::JobState::GetStateType("Running")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::FINISHING, Arc::JobState::GetStateType("Finishing")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::FINISHED, Arc::JobState::GetStateType("Finished")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::KILLED, Arc::JobState::GetStateType("Killed")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::DELETED, Arc::JobState::GetStateType("Deleted")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::OTHER, Arc::JobState::GetStateType("Other")); CPPUNIT_ASSERT_EQUAL(Arc::JobState::UNDEFINED, Arc::JobState::GetStateType("UnknownState")); } void JobStateTest::GetGeneralStateTest() { CPPUNIT_ASSERT_EQUAL((std::string)"Accepted", Arc::JobState::StateTypeString[Arc::JobState::ACCEPTED]); CPPUNIT_ASSERT_EQUAL((std::string)"Preparing", Arc::JobState::StateTypeString[Arc::JobState::PREPARING]); CPPUNIT_ASSERT_EQUAL((std::string)"Submitting", Arc::JobState::StateTypeString[Arc::JobState::SUBMITTING]); CPPUNIT_ASSERT_EQUAL((std::string)"Hold", Arc::JobState::StateTypeString[Arc::JobState::HOLD]); CPPUNIT_ASSERT_EQUAL((std::string)"Queuing", Arc::JobState::StateTypeString[Arc::JobState::QUEUING]); CPPUNIT_ASSERT_EQUAL((std::string)"Running", Arc::JobState::StateTypeString[Arc::JobState::RUNNING]); CPPUNIT_ASSERT_EQUAL((std::string)"Finishing", Arc::JobState::StateTypeString[Arc::JobState::FINISHING]); CPPUNIT_ASSERT_EQUAL((std::string)"Finished", Arc::JobState::StateTypeString[Arc::JobState::FINISHED]); CPPUNIT_ASSERT_EQUAL((std::string)"Killed", Arc::JobState::StateTypeString[Arc::JobState::KILLED]); CPPUNIT_ASSERT_EQUAL((std::string)"Deleted", Arc::JobState::StateTypeString[Arc::JobState::DELETED]); CPPUNIT_ASSERT_EQUAL((std::string)"Other", Arc::JobState::StateTypeString[Arc::JobState::OTHER]); CPPUNIT_ASSERT_EQUAL((std::string)"Undefined", Arc::JobState::StateTypeString[Arc::JobState::UNDEFINED]); } CPPUNIT_TEST_SUITE_REGISTRATION(JobStateTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/SubmitterPluginTest.cpp0000644000000000000000000000012312045235201026602 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.706704 29 ctime=1513200659.85974577 nordugrid-arc-5.4.2/src/hed/libs/compute/test/SubmitterPluginTest.cpp0000644000175000002070000000225012045235201026647 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include class SubmitterPluginTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(SubmitterPluginTest); CPPUNIT_TEST(LoadTest); CPPUNIT_TEST_SUITE_END(); public: SubmitterPluginTest(); ~SubmitterPluginTest() { delete sl; } void setUp() {} void tearDown() { Arc::ThreadInitializer().waitExit(); } void LoadTest(); private: Arc::SubmitterPlugin *s; Arc::SubmitterPluginLoader *sl; Arc::UserConfig usercfg; }; SubmitterPluginTest::SubmitterPluginTest() : s(NULL), usercfg(Arc::initializeCredentialsType(Arc::initializeCredentialsType::SkipCredentials)) { sl = new Arc::SubmitterPluginLoader(); } void SubmitterPluginTest::LoadTest() { s = sl->load("", usercfg); CPPUNIT_ASSERT(s == NULL); s = sl->load("NON-EXISTENT", usercfg); CPPUNIT_ASSERT(s == NULL); s = sl->load("TEST", usercfg); CPPUNIT_ASSERT(s != NULL); } CPPUNIT_TEST_SUITE_REGISTRATION(SubmitterPluginTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/JobDescriptionTest.cpp0000644000000000000000000000012412045235201026364 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.711704 30 ctime=1513200659.851745673 nordugrid-arc-5.4.2/src/hed/libs/compute/test/JobDescriptionTest.cpp0000644000175000002070000001627312045235201026442 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include class JobDescriptionTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JobDescriptionTest); CPPUNIT_TEST(TestAlternative); CPPUNIT_TEST(PrepareTest); CPPUNIT_TEST_SUITE_END(); public: JobDescriptionTest() {}; void setUp() {} void tearDown() {} void TestAlternative(); void PrepareTest(); }; void JobDescriptionTest::TestAlternative() { { Arc::JobDescription j; j.Application.Executable.Path = "/bin/exe"; { Arc::JobDescription altJ; altJ.Application.Executable.Path = "/bin/alt1exe"; j.AddAlternative(altJ); } { Arc::JobDescription altJ; altJ.Application.Executable.Path = "/bin/alt2exe"; { Arc::JobDescription altaltJ; altaltJ.Application.Executable.Path = "/bin/alt3exe"; altJ.AddAlternative(altaltJ); } j.AddAlternative(altJ); } CPPUNIT_ASSERT_EQUAL(3, (int)j.GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe", j.Application.Executable.Path); CPPUNIT_ASSERT(j.UseAlternative()); CPPUNIT_ASSERT_EQUAL(3, (int)j.GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/alt1exe", j.Application.Executable.Path); CPPUNIT_ASSERT(j.UseAlternative()); CPPUNIT_ASSERT_EQUAL(3, (int)j.GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/alt2exe", j.Application.Executable.Path); CPPUNIT_ASSERT(j.UseAlternative()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/alt3exe", j.Application.Executable.Path); CPPUNIT_ASSERT(!j.UseAlternative()); CPPUNIT_ASSERT_EQUAL(3, (int)j.GetAlternatives().size()); j.UseOriginal(); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe", j.Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(3, (int)j.GetAlternatives().size()); } } void JobDescriptionTest::PrepareTest() { { Arc::JobDescription jd; jd.Application.Executable.Path = "/hello/world"; CPPUNIT_ASSERT(jd.Prepare(Arc::ExecutionTarget())); } { const std::string exec = "PrepareJobDescriptionTest-executable"; remove(exec.c_str()); Arc::JobDescription jd; jd.Application.Executable.Path = exec; CPPUNIT_ASSERT(!jd.Prepare(Arc::ExecutionTarget())); std::ofstream f(exec.c_str(), std::ifstream::trunc); f << exec; f.close(); CPPUNIT_ASSERT(jd.Prepare(Arc::ExecutionTarget())); CPPUNIT_ASSERT_EQUAL(1, (int)jd.DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL(jd.Application.Executable.Path, jd.DataStaging.InputFiles.front().Name); remove(exec.c_str()); } { const std::string input = "PrepareJobDescriptionTest-input"; remove(input.c_str()); Arc::JobDescription jd; jd.Application.Input = input; CPPUNIT_ASSERT(!jd.Prepare(Arc::ExecutionTarget())); std::ofstream f(input.c_str(), std::ifstream::trunc); f << input; f.close(); CPPUNIT_ASSERT(jd.Prepare(Arc::ExecutionTarget())); CPPUNIT_ASSERT_EQUAL(1, (int)jd.DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL(jd.Application.Input, jd.DataStaging.InputFiles.front().Name); remove(input.c_str()); } { Arc::JobDescription jd; jd.Application.Output = "PrepareJobDescriptionTest-output"; CPPUNIT_ASSERT(jd.Prepare(Arc::ExecutionTarget())); CPPUNIT_ASSERT_EQUAL(1, (int)jd.DataStaging.OutputFiles.size()); CPPUNIT_ASSERT_EQUAL(jd.Application.Output, jd.DataStaging.OutputFiles.front().Name); } { Arc::JobDescription jd; jd.Application.Error = "PrepareJobDescriptionTest-error"; CPPUNIT_ASSERT(jd.Prepare(Arc::ExecutionTarget())); CPPUNIT_ASSERT_EQUAL(1, (int)jd.DataStaging.OutputFiles.size()); CPPUNIT_ASSERT_EQUAL(jd.Application.Error, jd.DataStaging.OutputFiles.front().Name); } { Arc::JobDescription jd; jd.Application.LogDir = "PrepareJobDescriptionTest-logdir"; CPPUNIT_ASSERT(jd.Prepare(Arc::ExecutionTarget())); CPPUNIT_ASSERT_EQUAL(1, (int)jd.DataStaging.OutputFiles.size()); CPPUNIT_ASSERT_EQUAL(jd.Application.LogDir, jd.DataStaging.OutputFiles.front().Name); } { Arc::JobDescription jd; Arc::ExecutionTarget et; et.ApplicationEnvironments->push_back(Arc::ApplicationEnvironment("SOFTWARE/HELLOWORLD-1.0.0")); jd.Resources.RunTimeEnvironment.add(Arc::Software("SOFTWARE/HELLOWORLD-1.0.0"), Arc::Software::GREATERTHAN); CPPUNIT_ASSERT(!jd.Prepare(et)); et.ApplicationEnvironments->push_back(Arc::ApplicationEnvironment("SOFTWARE/HELLOWORLD-2.0.0")); CPPUNIT_ASSERT(jd.Prepare(et)); CPPUNIT_ASSERT_EQUAL(1, (int)jd.Resources.RunTimeEnvironment.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL((std::string)"SOFTWARE/HELLOWORLD", jd.Resources.RunTimeEnvironment.getSoftwareList().front().getName()); CPPUNIT_ASSERT_EQUAL((std::string)"2.0.0", jd.Resources.RunTimeEnvironment.getSoftwareList().front().getVersion()); CPPUNIT_ASSERT_EQUAL(1, (int)jd.Resources.RunTimeEnvironment.getComparisonOperatorList().size()); CPPUNIT_ASSERT_EQUAL((std::string)"==", Arc::Software::toString(jd.Resources.RunTimeEnvironment.getComparisonOperatorList().front())); } { Arc::JobDescription jd; Arc::ExecutionTarget et; et.ComputingEndpoint->Implementation = Arc::Software("MIDDLEWARE/ABC-1.2.3"); jd.Resources.CEType.add(Arc::Software("MIDDLEWARE/ABC-2.0.0"), Arc::Software::GREATERTHANOREQUAL); CPPUNIT_ASSERT(!jd.Prepare(et)); jd.Resources.CEType.clear(); jd.Resources.CEType.add(Arc::Software("MIDDLEWARE/ABC-1.2.3"), Arc::Software::GREATERTHANOREQUAL); CPPUNIT_ASSERT(jd.Prepare(et)); CPPUNIT_ASSERT_EQUAL(1, (int)jd.Resources.CEType.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL((std::string)"MIDDLEWARE/ABC", jd.Resources.CEType.getSoftwareList().front().getName()); CPPUNIT_ASSERT_EQUAL((std::string)"1.2.3", jd.Resources.CEType.getSoftwareList().front().getVersion()); CPPUNIT_ASSERT_EQUAL(1, (int)jd.Resources.CEType.getComparisonOperatorList().size()); CPPUNIT_ASSERT_EQUAL((std::string)"==", Arc::Software::toString(jd.Resources.CEType.getComparisonOperatorList().front())); } { Arc::JobDescription jd; Arc::ExecutionTarget et; et.ExecutionEnvironment->OperatingSystem = Arc::Software("OPERATINGSYSTEM/COW-2.2.2"); jd.Resources.OperatingSystem.add(Arc::Software("OPERATINGSYSTEM/COW-2.0.0"), Arc::Software::LESSTHAN); CPPUNIT_ASSERT(!jd.Prepare(et)); jd.Resources.OperatingSystem.clear(); jd.Resources.OperatingSystem.add(Arc::Software("OPERATINGSYSTEM/COW-3.0.0"), Arc::Software::LESSTHAN); CPPUNIT_ASSERT(jd.Prepare(et)); CPPUNIT_ASSERT_EQUAL(1, (int)jd.Resources.OperatingSystem.getSoftwareList().size()); CPPUNIT_ASSERT_EQUAL((std::string)"OPERATINGSYSTEM/COW", jd.Resources.OperatingSystem.getSoftwareList().front().getName()); CPPUNIT_ASSERT_EQUAL((std::string)"2.2.2", jd.Resources.OperatingSystem.getSoftwareList().front().getVersion()); CPPUNIT_ASSERT_EQUAL(1, (int)jd.Resources.OperatingSystem.getComparisonOperatorList().size()); CPPUNIT_ASSERT_EQUAL((std::string)"==", Arc::Software::toString(jd.Resources.OperatingSystem.getComparisonOperatorList().front())); } } CPPUNIT_TEST_SUITE_REGISTRATION(JobDescriptionTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/JobDescriptionParserPluginTest.cpp0000644000000000000000000000012312072511126030722 xustar000000000000000027 mtime=1357550166.724559 27 atime=1513200574.709704 29 ctime=1513200659.85074566 nordugrid-arc-5.4.2/src/hed/libs/compute/test/JobDescriptionParserPluginTest.cpp0000644000175000002070000000215612072511126030774 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include class JobDescriptionParserPluginTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JobDescriptionParserPluginTest); CPPUNIT_TEST(LoadTest); CPPUNIT_TEST_SUITE_END(); public: JobDescriptionParserPluginTest(); ~JobDescriptionParserPluginTest() { delete jdpl; } void setUp() {} void tearDown() { Arc::ThreadInitializer().waitExit(); } void LoadTest(); private: Arc::JobDescriptionParserPlugin *jdp; Arc::JobDescriptionParserPluginLoader *jdpl; }; JobDescriptionParserPluginTest::JobDescriptionParserPluginTest() : jdp(NULL) { jdpl = new Arc::JobDescriptionParserPluginLoader(); } void JobDescriptionParserPluginTest::LoadTest() { jdp = jdpl->load(""); CPPUNIT_ASSERT(jdp == NULL); jdp = jdpl->load("NON-EXISTENT"); CPPUNIT_ASSERT(jdp == NULL); jdp = jdpl->load("TEST"); CPPUNIT_ASSERT(jdp != NULL); } CPPUNIT_TEST_SUITE_REGISTRATION(JobDescriptionParserPluginTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/BrokerTest.cpp0000644000000000000000000000012412045235201024672 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.701704 30 ctime=1513200659.847745624 nordugrid-arc-5.4.2/src/hed/libs/compute/test/BrokerTest.cpp0000644000175000002070000002371512045235201024747 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include static Arc::Logger testLogger(Arc::Logger::getRootLogger(), "BrokerTest"); class BrokerTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(BrokerTest); CPPUNIT_TEST(LoadTest); CPPUNIT_TEST(QueueTest); CPPUNIT_TEST(CPUWallTimeTest); CPPUNIT_TEST(BenckmarkCPUWallTimeTest); CPPUNIT_TEST(RegresssionTestMultipleDifferentJobDescriptions); CPPUNIT_TEST(RejectTargetsTest); CPPUNIT_TEST_SUITE_END(); public: BrokerTest(); void setUp(); void tearDown(); void LoadTest(); void QueueTest(); void CPUWallTimeTest(); void BenckmarkCPUWallTimeTest(); void RegresssionTestMultipleDifferentJobDescriptions(); void RejectTargetsTest(); private: const Arc::UserConfig usercfg; std::list etl; Arc::JobDescription job; }; BrokerTest::BrokerTest() : usercfg(Arc::initializeCredentialsType(Arc::initializeCredentialsType::SkipCredentials)), etl(1, Arc::ExecutionTarget()) { } void BrokerTest::setUp() { Arc::BrokerPluginTestACCControl::match = true; etl.front().ComputingEndpoint->URLString = "http://localhost/test"; etl.front().ComputingEndpoint->HealthState = "ok"; } void BrokerTest::tearDown() { Arc::ThreadInitializer().waitExit(); } void BrokerTest::LoadTest() { Arc::BrokerPluginLoader bpl; Arc::BrokerPlugin *b = bpl.load(usercfg, "NON-EXISTENT"); CPPUNIT_ASSERT(b == NULL); b = bpl.load(usercfg, "TEST"); CPPUNIT_ASSERT(b != NULL); } void BrokerTest::QueueTest() { job.Resources.QueueName = "q1"; Arc::Broker b(usercfg, job, "TEST"); CPPUNIT_ASSERT(b.isValid()); CPPUNIT_ASSERT(!b.match(etl.front())); etl.front().ComputingShare->Name = "q1"; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.QueueName = "q2"; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.QueueName = ""; job.OtherAttributes["nordugrid:broker;reject_queue"] = "q1"; CPPUNIT_ASSERT(!b.match(etl.front())); job.OtherAttributes["nordugrid:broker;reject_queue"] = "q2"; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->Name = ""; CPPUNIT_ASSERT(!b.match(etl.front())); job.OtherAttributes.erase("nordugrid:broker;reject_queue"); } void BrokerTest::CPUWallTimeTest() { Arc::Broker b(usercfg, job, "TEST"); CPPUNIT_ASSERT(b.isValid()); etl.front().ComputingShare->MaxCPUTime = 100; job.Resources.IndividualCPUTime.range.max = 110; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualCPUTime.range.max = 100; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualCPUTime.range.max = 90; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->MaxCPUTime = -1; job.Resources.IndividualCPUTime.range.max = -1; etl.front().ComputingShare->MinCPUTime = 10; job.Resources.IndividualCPUTime.range.min = 5; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualCPUTime.range.min = 10; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualCPUTime.range.min = 15; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->MinCPUTime = -1; job.Resources.IndividualCPUTime.range.min = -1; etl.front().ComputingShare->MaxWallTime = 100; job.Resources.IndividualWallTime.range.max = 110; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualWallTime.range.max = 100; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualWallTime.range.max = 90; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->MaxWallTime = -1; job.Resources.IndividualWallTime.range.max = -1; etl.front().ComputingShare->MinWallTime = 10; job.Resources.IndividualWallTime.range.min = 5; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualWallTime.range.min = 10; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualWallTime.range.min = 15; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->MinWallTime = -1; job.Resources.IndividualWallTime.range.min = -1; } void BrokerTest::BenckmarkCPUWallTimeTest() { Arc::Broker b(usercfg, job, "TEST"); CPPUNIT_ASSERT(b.isValid()); (*etl.front().Benchmarks)["TestBenchmark"] = 100.; job.Resources.IndividualCPUTime.benchmark = std::pair("TestBenchmark", 50.); etl.front().ComputingShare->MaxCPUTime = 100; job.Resources.IndividualCPUTime.range.max = 210; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualCPUTime.range.max = 200; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualCPUTime.range.max = 190; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->MaxCPUTime = -1; job.Resources.IndividualCPUTime.range.max = -1; etl.front().ComputingShare->MinCPUTime = 10; job.Resources.IndividualCPUTime.range.min = 10; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualCPUTime.range.min = 20; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualCPUTime.range.min = 30; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->MinCPUTime = -1; job.Resources.IndividualCPUTime.range.min = -1; job.Resources.IndividualCPUTime.benchmark = std::pair("", -1.); job.Resources.IndividualWallTime.benchmark = std::pair("TestBenchmark", 50.); etl.front().ComputingShare->MaxWallTime = 100; job.Resources.IndividualWallTime.range.max = 210; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualWallTime.range.max = 200; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualWallTime.range.max = 190; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->MaxWallTime = -1; job.Resources.IndividualWallTime.range.max = -1; etl.front().ComputingShare->MinWallTime = 10; job.Resources.IndividualWallTime.range.min = 10; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualWallTime.range.min = 20; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualWallTime.range.min = 30; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ComputingShare->MinWallTime = -1; job.Resources.IndividualWallTime.range.min = -1; job.Resources.IndividualWallTime.benchmark = std::pair("", -1.); etl.front().ExecutionEnvironment->CPUClockSpeed = 2500; job.Resources.IndividualCPUTime.benchmark = std::pair("clock rate", 1000.); etl.front().ComputingShare->MaxCPUTime = 100; job.Resources.IndividualCPUTime.range.max = 300; CPPUNIT_ASSERT(!b.match(etl.front())); job.Resources.IndividualCPUTime.range.max = 250; CPPUNIT_ASSERT(b.match(etl.front())); job.Resources.IndividualCPUTime.range.max = 200; CPPUNIT_ASSERT(b.match(etl.front())); etl.front().ExecutionEnvironment->CPUClockSpeed = -1; etl.front().ComputingShare->MaxCPUTime = -1; job.Resources.IndividualCPUTime.range.max = -1; job.Resources.IndividualCPUTime.benchmark = std::pair("", -1.); } void BrokerTest::RegresssionTestMultipleDifferentJobDescriptions() { job.Resources.QueueName = "front"; Arc::Broker b(usercfg, job, "TEST"); CPPUNIT_ASSERT(b.isValid()); /* When prefiltered by the broker, each JobDescription object "correspond" to * a (list of) ExecutionTarget object(s). */ Arc::ExecutionTargetSorter ets(b); Arc::ExecutionTarget aET, bET; aET.ComputingEndpoint->URLString = "http://localhost/test"; aET.ComputingEndpoint->HealthState = "ok"; aET.ComputingShare->Name = "front"; ets.addEntity(aET); bET.ComputingEndpoint->URLString = "http://localhost/test"; bET.ComputingEndpoint->HealthState = "ok"; bET.ComputingShare->Name = "back"; ets.addEntity(bET); ets.reset(); CPPUNIT_ASSERT_EQUAL(1, (int)ets.getMatchingTargets().size()); CPPUNIT_ASSERT_EQUAL((std::string)"front", ets->ComputingShare->Name); job.Resources.QueueName = "back"; ets.set(job); CPPUNIT_ASSERT_EQUAL(1, (int)ets.getMatchingTargets().size()); CPPUNIT_ASSERT_EQUAL((std::string)"back", ets->ComputingShare->Name); } void BrokerTest::RejectTargetsTest() { job.Application.Executable.Path = "executable"; Arc::Broker b(usercfg, job, "TEST"); CPPUNIT_ASSERT(b.isValid()); { // Rejecting no targets. Arc::ExecutionTarget target; target.ComputingEndpoint->HealthState = "ok"; Arc::ExecutionTargetSorter ets(b); target.ComputingEndpoint->URLString = "http://localhost/test1"; ets.addEntity(target); target.ComputingEndpoint->URLString = "http://localhost/test2"; ets.addEntity(target); CPPUNIT_ASSERT_EQUAL(2, (int)ets.getMatchingTargets().size()); } { // Reject test1 target. std::list rejectTargets; rejectTargets.push_back(Arc::URL("http://localhost/test1")); Arc::ExecutionTarget aET, bET; aET.ComputingEndpoint->HealthState = "ok"; aET.ComputingEndpoint->URLString = "http://localhost/test1"; bET.ComputingEndpoint->HealthState = "ok"; bET.ComputingEndpoint->URLString = "http://localhost/test2"; Arc::ExecutionTargetSorter ets(b, rejectTargets); ets.addEntity(aET); ets.addEntity(bET); ets.reset(); CPPUNIT_ASSERT_EQUAL(1, (int)ets.getMatchingTargets().size()); CPPUNIT_ASSERT_EQUAL((std::string)"http://localhost/test2", ets->ComputingEndpoint->URLString); } { // Reject both targets. std::list rejectTargets; rejectTargets.push_back(Arc::URL("http://localhost/test1")); rejectTargets.push_back(Arc::URL("http://localhost/test2")); Arc::ExecutionTarget aET, bET; aET.ComputingEndpoint->HealthState = "ok"; aET.ComputingEndpoint->URLString = "http://localhost/test1"; bET.ComputingEndpoint->HealthState = "ok"; bET.ComputingEndpoint->URLString = "http://localhost/test2"; Arc::ExecutionTargetSorter ets(b, rejectTargets); ets.addEntity(aET); ets.addEntity(bET); ets.reset(); CPPUNIT_ASSERT_EQUAL(0, (int)ets.getMatchingTargets().size()); } } CPPUNIT_TEST_SUITE_REGISTRATION(BrokerTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/JobTest.cpp0000644000000000000000000000012412207404076024170 xustar000000000000000027 mtime=1377699902.545279 27 atime=1513200574.709704 30 ctime=1513200659.855745722 nordugrid-arc-5.4.2/src/hed/libs/compute/test/JobTest.cpp0000644000175000002070000006406212207404076024245 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include class JobTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JobTest); CPPUNIT_TEST(XMLToJobTest); CPPUNIT_TEST(JobToXMLTest); CPPUNIT_TEST(XMLToJobStateTest); CPPUNIT_TEST(VersionTwoFormatTest); CPPUNIT_TEST(VersionOneFormatTest); CPPUNIT_TEST_SUITE_END(); public: JobTest(); void setUp() {} void tearDown() {} void XMLToJobTest(); void JobToXMLTest(); void XMLToJobStateTest(); void VersionTwoFormatTest(); void VersionOneFormatTest(); private: Arc::XMLNode xmlJob; }; JobTest::JobTest() : xmlJob(Arc::XMLNode("" "https://testbed-emi4.grid.upjs.sk:60000/arex" "org.ogf.glue.emies.resourceinfo" "https://testbed-emi4.grid.upjs.sk:60000/arex" "org.ogf.glue.emies.activitymanagement" "https://testbed-emi4.grid.upjs.sk:60000/arex" "org.ogf.glue.emies.activitymanagement" "https://testbed-emi4.grid.upjs.sk:60000/arex/HiqNDmAiivgnIfnhppWRvMapABFKDmABFKDmQhJKDmCBFKDmmdhHxm" "HiqNDmAiivgnIfnhppWRvMapABFKDmABFKDmQhJKDmCBFKDmmdhHxm" "https://testbed-emi4.grid.upjs.sk:60000/arex" "https://testbed-emi4.grid.upjs.sk:60000/arex" "https://testbed-emi4.grid.upjs.sk:60000/arex" "mc08.1050.J7" "single" "345.ce01" "nordugrid:xrsl" "&(executable=\"helloworld.sh\")(arguments=\"random.dat\")(inputfiles=(\"helloworld.sh\")(\"random.dat\"))(stdout=\"helloworld.out\")(join=\"yes\")" "bes:failed" "nordugrid:FAILED" "bes:running" "nordugrid:FINISHING" "0" "0" "Uploading timed out" "Failed stage-out" "0" "vo:atlas" "CONFIDENTIAL" "grid02" "5000" "20000" "4" "ENV/JAVA/JRE-1.6.0" "APPS/HEP/ATLAS-14.2.23.4" "input.dat" "job.out" "err.out" "celog" "wn043" "wn056" "pbs-short" "2893" "12340" "4453" "2008-04-21T10:04:36Z" "2008-04-21T10:05:12Z" "2008-04-20T06:05:12Z" "2008-04-20T06:45:12Z" "2008-04-20T10:05:12Z" "2008-04-20T10:15:12Z" "2008-04-24T10:05:12Z" "2008-04-30T10:05:12Z" "pc4.niif.hu:3432" "nordugrid-arc-0.94" "Cached input file is outdated; downloading again" "User proxy has expired" "" "https://example-ce.com:443/arex/765234" "https://helloworld-ce.com:12345/arex/543678" "" "helloworld.sh" "c0489bec6f7f4454d6cfe1b0a07ad5b8" "" "" "random.dat" "e52b14b10b967d9135c198fd11b9b8bc" "" "" "")) {} void JobTest::XMLToJobTest() { Arc::Job job; job = xmlJob; CPPUNIT_ASSERT_EQUAL((std::string)"https://testbed-emi4.grid.upjs.sk:60000/arex/HiqNDmAiivgnIfnhppWRvMapABFKDmABFKDmQhJKDmCBFKDmmdhHxm", job.JobID); CPPUNIT_ASSERT_EQUAL((std::string)"mc08.1050.J7", job.Name); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://testbed-emi4.grid.upjs.sk:60000/arex"), job.ServiceInformationURL); CPPUNIT_ASSERT_EQUAL((std::string)"org.ogf.glue.emies.resourceinfo", job.ServiceInformationInterfaceName); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://testbed-emi4.grid.upjs.sk:60000/arex"), job.JobStatusURL); CPPUNIT_ASSERT_EQUAL((std::string)"org.ogf.glue.emies.activitymanagement", job.JobStatusInterfaceName); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://testbed-emi4.grid.upjs.sk:60000/arex"), job.JobManagementURL); CPPUNIT_ASSERT_EQUAL((std::string)"org.ogf.glue.emies.activitymanagement", job.JobManagementInterfaceName); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://testbed-emi4.grid.upjs.sk:60000/arex"), job.StageInDir); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://testbed-emi4.grid.upjs.sk:60000/arex"), job.StageOutDir); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://testbed-emi4.grid.upjs.sk:60000/arex"), job.SessionDir); CPPUNIT_ASSERT_EQUAL((std::string)"single", job.Type); CPPUNIT_ASSERT_EQUAL((std::string)"345.ce01", job.LocalIDFromManager); CPPUNIT_ASSERT_EQUAL((std::string)"nordugrid:xrsl", job.JobDescription); CPPUNIT_ASSERT_EQUAL((std::string)"&(executable=\"helloworld.sh\")(arguments=\"random.dat\")(inputfiles=(\"helloworld.sh\")(\"random.dat\"))(stdout=\"helloworld.out\")(join=\"yes\")", job.JobDescriptionDocument); CPPUNIT_ASSERT(job.State == Arc::JobState::OTHER); CPPUNIT_ASSERT_EQUAL((std::string)"bes:failed", job.State()); CPPUNIT_ASSERT(job.RestartState == Arc::JobState::OTHER); CPPUNIT_ASSERT_EQUAL((std::string)"bes:running", job.RestartState()); CPPUNIT_ASSERT_EQUAL(0, job.ExitCode); CPPUNIT_ASSERT_EQUAL((std::string)"0", job.ComputingManagerExitCode); CPPUNIT_ASSERT_EQUAL(2, (int)job.Error.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Uploading timed out", job.Error.front()); CPPUNIT_ASSERT_EQUAL((std::string)"Failed stage-out", job.Error.back()); CPPUNIT_ASSERT_EQUAL(0, job.WaitingPosition); CPPUNIT_ASSERT_EQUAL((std::string)"vo:atlas", job.UserDomain); CPPUNIT_ASSERT_EQUAL((std::string)"CONFIDENTIAL", job.Owner); CPPUNIT_ASSERT_EQUAL((std::string)"grid02", job.LocalOwner); CPPUNIT_ASSERT_EQUAL(Arc::Period(5000), job.RequestedTotalWallTime); CPPUNIT_ASSERT_EQUAL(Arc::Period(20000), job.RequestedTotalCPUTime); CPPUNIT_ASSERT_EQUAL(4, job.RequestedSlots); CPPUNIT_ASSERT_EQUAL(2, (int)job.RequestedApplicationEnvironment.size()); CPPUNIT_ASSERT_EQUAL((std::string)"ENV/JAVA/JRE-1.6.0", job.RequestedApplicationEnvironment.front()); CPPUNIT_ASSERT_EQUAL((std::string)"APPS/HEP/ATLAS-14.2.23.4", job.RequestedApplicationEnvironment.back()); CPPUNIT_ASSERT_EQUAL((std::string)"input.dat", job.StdIn); CPPUNIT_ASSERT_EQUAL((std::string)"job.out", job.StdOut); CPPUNIT_ASSERT_EQUAL((std::string)"err.out", job.StdErr); CPPUNIT_ASSERT_EQUAL((std::string)"celog", job.LogDir); CPPUNIT_ASSERT_EQUAL(2, (int)job.ExecutionNode.size()); CPPUNIT_ASSERT_EQUAL((std::string)"wn043", job.ExecutionNode.front()); CPPUNIT_ASSERT_EQUAL((std::string)"wn056", job.ExecutionNode.back()); CPPUNIT_ASSERT_EQUAL((std::string)"pbs-short", job.Queue); CPPUNIT_ASSERT_EQUAL(Arc::Period(2893), job.UsedTotalWallTime); CPPUNIT_ASSERT_EQUAL(Arc::Period(12340), job.UsedTotalCPUTime); CPPUNIT_ASSERT_EQUAL(4453, job.UsedMainMemory); CPPUNIT_ASSERT_EQUAL(Arc::Time("2008-04-21T10:04:36Z"), job.LocalSubmissionTime); CPPUNIT_ASSERT_EQUAL(Arc::Time("2008-04-21T10:05:12Z"), job.SubmissionTime); CPPUNIT_ASSERT_EQUAL(Arc::Time("2008-04-20T06:05:12Z"), job.ComputingManagerSubmissionTime); CPPUNIT_ASSERT_EQUAL(Arc::Time("2008-04-20T06:45:12Z"), job.StartTime); CPPUNIT_ASSERT_EQUAL(Arc::Time("2008-04-20T10:05:12Z"), job.ComputingManagerEndTime); CPPUNIT_ASSERT_EQUAL(Arc::Time("2008-04-20T10:15:12Z"), job.EndTime); CPPUNIT_ASSERT_EQUAL(Arc::Time("2008-04-24T10:05:12Z"), job.WorkingAreaEraseTime); CPPUNIT_ASSERT_EQUAL(Arc::Time("2008-04-30T10:05:12Z"), job.ProxyExpirationTime); CPPUNIT_ASSERT_EQUAL((std::string)"pc4.niif.hu:3432", job.SubmissionHost); CPPUNIT_ASSERT_EQUAL((std::string)"nordugrid-arc-0.94", job.SubmissionClientName); CPPUNIT_ASSERT_EQUAL(2, (int)job.OtherMessages.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Cached input file is outdated; downloading again", job.OtherMessages.front()); CPPUNIT_ASSERT_EQUAL((std::string)"User proxy has expired", job.OtherMessages.back()); CPPUNIT_ASSERT_EQUAL(2, (int)job.ActivityOldID.size()); CPPUNIT_ASSERT_EQUAL((std::string)"https://example-ce.com:443/arex/765234", job.ActivityOldID.front()); CPPUNIT_ASSERT_EQUAL((std::string)"https://helloworld-ce.com:12345/arex/543678", job.ActivityOldID.back()); CPPUNIT_ASSERT_EQUAL(2, (int)job.LocalInputFiles.size()); std::map::const_iterator itFiles = job.LocalInputFiles.begin(); CPPUNIT_ASSERT_EQUAL((std::string)"helloworld.sh", itFiles->first); CPPUNIT_ASSERT_EQUAL((std::string)"c0489bec6f7f4454d6cfe1b0a07ad5b8", itFiles->second); itFiles++; CPPUNIT_ASSERT_EQUAL((std::string)"random.dat", itFiles->first); CPPUNIT_ASSERT_EQUAL((std::string)"e52b14b10b967d9135c198fd11b9b8bc", itFiles->second); } void JobTest::JobToXMLTest() { Arc::Job job; job = xmlJob; Arc::XMLNode xmlOut(""); job.ToXML(xmlOut); CPPUNIT_ASSERT_EQUAL((std::string)"https://testbed-emi4.grid.upjs.sk:60000/arex/HiqNDmAiivgnIfnhppWRvMapABFKDmABFKDmQhJKDmCBFKDmmdhHxm", (std::string)xmlOut["JobID"]); xmlOut["JobID"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"mc08.1050.J7", (std::string)xmlOut["Name"]); xmlOut["Name"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"https://testbed-emi4.grid.upjs.sk:60000/arex", (std::string)xmlOut["ServiceInformationURL"]); xmlOut["ServiceInformationURL"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"org.ogf.glue.emies.resourceinfo", (std::string)xmlOut["ServiceInformationInterfaceName"]); xmlOut["ServiceInformationInterfaceName"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"https://testbed-emi4.grid.upjs.sk:60000/arex", (std::string)xmlOut["JobStatusURL"]); xmlOut["JobStatusURL"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"org.ogf.glue.emies.activitymanagement", (std::string)xmlOut["JobStatusInterfaceName"]); xmlOut["JobStatusInterfaceName"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"https://testbed-emi4.grid.upjs.sk:60000/arex", (std::string)xmlOut["JobManagementURL"]); xmlOut["JobManagementURL"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"org.ogf.glue.emies.activitymanagement", (std::string)xmlOut["JobManagementInterfaceName"]); xmlOut["JobManagementInterfaceName"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"https://testbed-emi4.grid.upjs.sk:60000/arex", (std::string)xmlOut["StageInDir"]); xmlOut["StageInDir"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"https://testbed-emi4.grid.upjs.sk:60000/arex", (std::string)xmlOut["StageOutDir"]); xmlOut["StageOutDir"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"https://testbed-emi4.grid.upjs.sk:60000/arex", (std::string)xmlOut["SessionDir"]); xmlOut["SessionDir"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"single", (std::string)xmlOut["Type"]); xmlOut["Type"].Destroy(); CPPUNIT_ASSERT(xmlOut["IDFromEndpoint"]); xmlOut["IDFromEndpoint"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"345.ce01", (std::string)xmlOut["LocalIDFromManager"]); xmlOut["LocalIDFromManager"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"nordugrid:xrsl", (std::string)xmlOut["JobDescription"]); xmlOut["JobDescription"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"&(executable=\"helloworld.sh\")(arguments=\"random.dat\")(inputfiles=(\"helloworld.sh\")(\"random.dat\"))(stdout=\"helloworld.out\")(join=\"yes\")", (std::string)xmlOut["JobDescriptionDocument"]); xmlOut["JobDescriptionDocument"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"Other", (std::string)xmlOut["State"]["General"]); xmlOut["State"]["General"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"bes:failed", (std::string)xmlOut["State"]["Specific"]); xmlOut["State"]["Specific"].Destroy(); CPPUNIT_ASSERT_EQUAL(0, xmlOut["State"].Size()); xmlOut["State"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"Other", (std::string)xmlOut["RestartState"]["General"]); xmlOut["RestartState"]["General"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"bes:running", (std::string)xmlOut["RestartState"]["Specific"]); xmlOut["RestartState"]["Specific"].Destroy(); CPPUNIT_ASSERT_EQUAL(0, xmlOut["RestartState"].Size()); xmlOut["RestartState"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"0", (std::string)xmlOut["ExitCode"]); xmlOut["ExitCode"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"0", (std::string)xmlOut["ComputingManagerExitCode"]); xmlOut["ComputingManagerExitCode"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"Uploading timed out", (std::string)xmlOut["Error"]); xmlOut["Error"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"Failed stage-out", (std::string)xmlOut["Error"]); xmlOut["Error"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"0", (std::string)xmlOut["WaitingPosition"]); xmlOut["WaitingPosition"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"vo:atlas", (std::string)xmlOut["UserDomain"]); xmlOut["UserDomain"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"CONFIDENTIAL", (std::string)xmlOut["Owner"]); xmlOut["Owner"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"grid02", (std::string)xmlOut["LocalOwner"]); xmlOut["LocalOwner"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"PT1H23M20S", (std::string)xmlOut["RequestedTotalWallTime"]); xmlOut["RequestedTotalWallTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"PT5H33M20S", (std::string)xmlOut["RequestedTotalCPUTime"]); xmlOut["RequestedTotalCPUTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"4", (std::string)xmlOut["RequestedSlots"]); xmlOut["RequestedSlots"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"ENV/JAVA/JRE-1.6.0", (std::string)xmlOut["RequestedApplicationEnvironment"]); xmlOut["RequestedApplicationEnvironment"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"APPS/HEP/ATLAS-14.2.23.4", (std::string)xmlOut["RequestedApplicationEnvironment"]); xmlOut["RequestedApplicationEnvironment"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"input.dat", (std::string)xmlOut["StdIn"]); xmlOut["StdIn"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"job.out", (std::string)xmlOut["StdOut"]); xmlOut["StdOut"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"err.out", (std::string)xmlOut["StdErr"]); xmlOut["StdErr"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"celog", (std::string)xmlOut["LogDir"]); xmlOut["LogDir"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"wn043", (std::string)xmlOut["ExecutionNode"]); xmlOut["ExecutionNode"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"wn056", (std::string)xmlOut["ExecutionNode"]); xmlOut["ExecutionNode"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"pbs-short", (std::string)xmlOut["Queue"]); xmlOut["Queue"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"PT48M13S", (std::string)xmlOut["UsedTotalWallTime"]); xmlOut["UsedTotalWallTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"PT3H25M40S", (std::string)xmlOut["UsedTotalCPUTime"]); xmlOut["UsedTotalCPUTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"4453", (std::string)xmlOut["UsedMainMemory"]); xmlOut["UsedMainMemory"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"2008-04-21T10:04:36Z", (std::string)xmlOut["LocalSubmissionTime"]); xmlOut["LocalSubmissionTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"2008-04-21T10:05:12Z", (std::string)xmlOut["SubmissionTime"]); xmlOut["SubmissionTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"2008-04-20T06:05:12Z", (std::string)xmlOut["ComputingManagerSubmissionTime"]); xmlOut["ComputingManagerSubmissionTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"2008-04-20T06:45:12Z", (std::string)xmlOut["StartTime"]); xmlOut["StartTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"2008-04-20T10:05:12Z", (std::string)xmlOut["ComputingManagerEndTime"]); xmlOut["ComputingManagerEndTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"2008-04-20T10:15:12Z", (std::string)xmlOut["EndTime"]); xmlOut["EndTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"2008-04-24T10:05:12Z", (std::string)xmlOut["WorkingAreaEraseTime"]); xmlOut["WorkingAreaEraseTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"2008-04-30T10:05:12Z", (std::string)xmlOut["ProxyExpirationTime"]); xmlOut["ProxyExpirationTime"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"pc4.niif.hu:3432", (std::string)xmlOut["SubmissionHost"]); xmlOut["SubmissionHost"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"nordugrid-arc-0.94", (std::string)xmlOut["SubmissionClientName"]); xmlOut["SubmissionClientName"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"Cached input file is outdated; downloading again", (std::string)xmlOut["OtherMessages"]); xmlOut["OtherMessages"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"User proxy has expired", (std::string)xmlOut["OtherMessages"]); xmlOut["OtherMessages"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"https://example-ce.com:443/arex/765234", (std::string)xmlOut["Associations"]["ActivityOldID"]); xmlOut["Associations"]["ActivityOldID"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"https://helloworld-ce.com:12345/arex/543678", (std::string)xmlOut["Associations"]["ActivityOldID"]); xmlOut["Associations"]["ActivityOldID"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"helloworld.sh", (std::string)xmlOut["Associations"]["LocalInputFile"]["Source"]); xmlOut["Associations"]["LocalInputFile"]["Source"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"c0489bec6f7f4454d6cfe1b0a07ad5b8", (std::string)xmlOut["Associations"]["LocalInputFile"]["CheckSum"]); xmlOut["Associations"]["LocalInputFile"]["CheckSum"].Destroy(); CPPUNIT_ASSERT_EQUAL(0, xmlOut["Associations"]["LocalInputFile"].Size()); xmlOut["Associations"]["LocalInputFile"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"random.dat", (std::string)xmlOut["Associations"]["LocalInputFile"]["Source"]); xmlOut["Associations"]["LocalInputFile"]["Source"].Destroy(); CPPUNIT_ASSERT_EQUAL((std::string)"e52b14b10b967d9135c198fd11b9b8bc", (std::string)xmlOut["Associations"]["LocalInputFile"]["CheckSum"]); xmlOut["Associations"]["LocalInputFile"]["CheckSum"].Destroy(); CPPUNIT_ASSERT_EQUAL(0, xmlOut["Associations"]["LocalInputFile"].Size()); xmlOut["Associations"]["LocalInputFile"].Destroy(); CPPUNIT_ASSERT_EQUAL(0, xmlOut["Associations"].Size()); xmlOut["Associations"].Destroy(); CPPUNIT_ASSERT_EQUAL(0, xmlOut.Size()); Arc::Job emptyJob; emptyJob.ToXML(xmlOut); CPPUNIT_ASSERT_EQUAL(0, xmlOut.Size()); } void JobTest::XMLToJobStateTest() { Arc::XMLNode xml( "" "" "Preparing" "PREPARING" "" "" ); Arc::Job job; job = xml; CPPUNIT_ASSERT_EQUAL((std::string)"PREPARING", job.State()); CPPUNIT_ASSERT_EQUAL((std::string)"Preparing", job.State.GetGeneralState()); } void JobTest::VersionTwoFormatTest() { Arc::XMLNode xml( "" "gsiftp://example-ce.nordugrid.org:2811/jobs/3456789101112" "ldap://example-ce.nordugrid.org:2135/Mds-Vo-name=local,o=Grid??base?(objectClass=*)" "org.nordugrid.gridftpjob" "ldap://example-ce.nordugrid.org:2135/Mds-Vo-name=local,o=Grid??sub?(nordugrid-job-globalid=gsiftp://example-ce.nordugrid.org:2811/jobs/3456789101112)" "2010-09-24 16:17:46" "&(executable=\"helloworld.sh\")(arguments=\"random.dat\")(inputfiles=(\"helloworld.sh\")(\"random.dat\"))(stdout=\"helloworld.out\")(join=\"yes\")" "gsiftp://example-ce.nordugrid.org:2811/jobs/765234" "https://helloworld-ce.nordugrid.org:12345/arex/543678" "" "" "helloworld.sh" "c0489bec6f7f4454d6cfe1b0a07ad5b8" "" "" "random.dat" "e52b14b10b967d9135c198fd11b9b8bc" "" "" "" ); Arc::Job job; job = xml; CPPUNIT_ASSERT_EQUAL((std::string)"gsiftp://example-ce.nordugrid.org:2811/jobs/3456789101112", job.JobID); CPPUNIT_ASSERT_EQUAL(Arc::URL("ldap://example-ce.nordugrid.org:2135/Mds-Vo-name=local,o=Grid??base?(objectClass=*)"), job.ServiceInformationURL); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapng", job.ServiceInformationInterfaceName); CPPUNIT_ASSERT_EQUAL(Arc::URL("ldap://example-ce.nordugrid.org:2135/Mds-Vo-name=local,o=Grid??sub?(nordugrid-job-globalid=gsiftp://example-ce.nordugrid.org:2811/jobs/3456789101112)"), job.JobStatusURL); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.ldapng", job.JobStatusInterfaceName); CPPUNIT_ASSERT_EQUAL(Arc::URL("gsiftp://example-ce.nordugrid.org:2811/jobs"), job.JobManagementURL); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.gridftpjob", job.JobManagementInterfaceName); CPPUNIT_ASSERT_EQUAL(Arc::URL("gsiftp://example-ce.nordugrid.org:2811/jobs/3456789101112"), job.StageInDir); CPPUNIT_ASSERT_EQUAL(Arc::URL("gsiftp://example-ce.nordugrid.org:2811/jobs/3456789101112"), job.StageOutDir); CPPUNIT_ASSERT_EQUAL(Arc::URL("gsiftp://example-ce.nordugrid.org:2811/jobs/3456789101112"), job.SessionDir); CPPUNIT_ASSERT_EQUAL((std::string)"3456789101112", job.IDFromEndpoint); CPPUNIT_ASSERT_EQUAL(Arc::Time("2010-09-24 16:17:46"), job.LocalSubmissionTime); CPPUNIT_ASSERT_EQUAL((std::string)"&(executable=\"helloworld.sh\")(arguments=\"random.dat\")(inputfiles=(\"helloworld.sh\")(\"random.dat\"))(stdout=\"helloworld.out\")(join=\"yes\")", job.JobDescriptionDocument); CPPUNIT_ASSERT_EQUAL(2, (int)job.ActivityOldID.size()); CPPUNIT_ASSERT_EQUAL((std::string)"gsiftp://example-ce.nordugrid.org:2811/jobs/765234", job.ActivityOldID.front()); CPPUNIT_ASSERT_EQUAL((std::string)"https://helloworld-ce.nordugrid.org:12345/arex/543678", job.ActivityOldID.back()); CPPUNIT_ASSERT_EQUAL(2, (int)job.LocalInputFiles.size()); std::map::const_iterator itFiles = job.LocalInputFiles.begin(); CPPUNIT_ASSERT_EQUAL((std::string)"helloworld.sh", itFiles->first); CPPUNIT_ASSERT_EQUAL((std::string)"c0489bec6f7f4454d6cfe1b0a07ad5b8", itFiles->second); itFiles++; CPPUNIT_ASSERT_EQUAL((std::string)"random.dat", itFiles->first); CPPUNIT_ASSERT_EQUAL((std::string)"e52b14b10b967d9135c198fd11b9b8bc", itFiles->second); } void JobTest::VersionOneFormatTest() { Arc::XMLNode xml( "" "gsiftp://grid.example.com:2811/jobs/1234567890" "ARC0" "ldap://grid.example.com:2135/Mds-Vo-name=local, o=Grid??sub?(|(objectclass=nordugrid-cluster)(objectclass=nordugrid-queue)(nordugrid-authuser-sn=somedn))" "ldap://grid.example.com:2135/Mds-Vo-name=local, o=Grid??sub?(nordugrid-job-globalid=gsiftp:\\2f\\2fgrid.example.com:2811\\2fjobs\\2f1234567890)" "2010-09-24 16:17:46" "&(executable=\"helloworld.sh\")(arguments=\"random.dat\")(inputfiles=(\"helloworld.sh\")(\"random.dat\"))(stdout=\"helloworld.out\")(join=\"yes\")" "https://example-ce.com:443/arex/765234" "https://helloworld-ce.com:12345/arex/543678" "" "" "helloworld.sh" "c0489bec6f7f4454d6cfe1b0a07ad5b8" "" "" "random.dat" "e52b14b10b967d9135c198fd11b9b8bc" "" "" "" ); Arc::Job job; job = xml; CPPUNIT_ASSERT_EQUAL((std::string)"gsiftp://grid.example.com:2811/jobs/1234567890", job.JobID); CPPUNIT_ASSERT_EQUAL((std::string)"org.nordugrid.gridftpjob", job.JobManagementInterfaceName); CPPUNIT_ASSERT_EQUAL(Arc::URL("ldap://grid.example.com:2135/Mds-Vo-name=local, o=Grid??sub?(|(objectclass=nordugrid-cluster)(objectclass=nordugrid-queue)(nordugrid-authuser-sn=somedn))").fullstr(), job.ServiceInformationURL.fullstr()); CPPUNIT_ASSERT_EQUAL(Arc::Time("2010-09-24 16:17:46"), job.LocalSubmissionTime); CPPUNIT_ASSERT_EQUAL((std::string)"&(executable=\"helloworld.sh\")(arguments=\"random.dat\")(inputfiles=(\"helloworld.sh\")(\"random.dat\"))(stdout=\"helloworld.out\")(join=\"yes\")", job.JobDescriptionDocument); CPPUNIT_ASSERT_EQUAL(2, (int)job.ActivityOldID.size()); CPPUNIT_ASSERT_EQUAL((std::string)"https://example-ce.com:443/arex/765234", job.ActivityOldID.front()); CPPUNIT_ASSERT_EQUAL((std::string)"https://helloworld-ce.com:12345/arex/543678", job.ActivityOldID.back()); CPPUNIT_ASSERT_EQUAL(2, (int)job.LocalInputFiles.size()); std::map::const_iterator itFiles = job.LocalInputFiles.begin(); CPPUNIT_ASSERT_EQUAL((std::string)"helloworld.sh", itFiles->first); CPPUNIT_ASSERT_EQUAL((std::string)"c0489bec6f7f4454d6cfe1b0a07ad5b8", itFiles->second); itFiles++; CPPUNIT_ASSERT_EQUAL((std::string)"random.dat", itFiles->first); CPPUNIT_ASSERT_EQUAL((std::string)"e52b14b10b967d9135c198fd11b9b8bc", itFiles->second); } CPPUNIT_TEST_SUITE_REGISTRATION(JobTest); nordugrid-arc-5.4.2/src/hed/libs/compute/test/PaxHeaders.7502/ExecutionTargetTest.cpp0000644000000000000000000000012412045235201026560 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.709704 30 ctime=1513200659.849745648 nordugrid-arc-5.4.2/src/hed/libs/compute/test/ExecutionTargetTest.cpp0000644000175000002070000000305412045235201026627 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include static Arc::Logger testLogger(Arc::Logger::getRootLogger(), "ExecutionTargetTest"); class ExecutionTargetTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ExecutionTargetTest); CPPUNIT_TEST(RegisterJobsubmissionTest); CPPUNIT_TEST_SUITE_END(); public: ExecutionTargetTest() {} void setUp(); void tearDown() {} void RegisterJobsubmissionTest(); private: Arc::ExecutionTarget et; Arc::JobDescription job; }; void ExecutionTargetTest::setUp() { et.ComputingEndpoint->URLString = "http://localhost/test"; et.ComputingEndpoint->HealthState = "ok"; } void ExecutionTargetTest::RegisterJobsubmissionTest() { job.Resources.SlotRequirement.NumberOfSlots = 4; et.ComputingManager->TotalSlots = 100; et.ComputingShare->MaxSlotsPerJob = 5; et.ComputingShare->FreeSlots = 7; et.ComputingShare->UsedSlots = 10; et.ComputingShare->WaitingJobs = 0; et.RegisterJobSubmission(job); CPPUNIT_ASSERT_EQUAL(3, et.ComputingShare->FreeSlots); CPPUNIT_ASSERT_EQUAL(14, et.ComputingShare->UsedSlots); CPPUNIT_ASSERT_EQUAL(0, et.ComputingShare->WaitingJobs); et.RegisterJobSubmission(job); CPPUNIT_ASSERT_EQUAL(3, et.ComputingShare->FreeSlots); CPPUNIT_ASSERT_EQUAL(14, et.ComputingShare->UsedSlots); CPPUNIT_ASSERT_EQUAL(4, et.ComputingShare->WaitingJobs); } CPPUNIT_TEST_SUITE_REGISTRATION(ExecutionTargetTest); nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobDescriptionParserPlugin.h0000644000000000000000000000012412676705167026575 xustar000000000000000027 mtime=1459325559.484726 27 atime=1513200574.740704 30 ctime=1513200659.771744694 nordugrid-arc-5.4.2/src/hed/libs/compute/JobDescriptionParserPlugin.h0000644000175000002070000002726612676705167026657 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBDESCRIPTIONPARSERPLUGIN_H__ #define __ARC_JOBDESCRIPTIONPARSERPLUGIN_H__ /** \file * \brief Plugin, loader and argument classes for job description parser specialisation. */ #include #include #include #include #include #include namespace Arc { class JobDescription; class Logger; /** * \ingroup accplugins * \headerfile JobDescriptionParserPlugin.h arc/compute/JobDescriptionParserPlugin.h */ /** * \since Added in 5.1.0 **/ class JobDescriptionParsingError { public: JobDescriptionParsingError() {} JobDescriptionParsingError(const std::string& message, const std::pair& line_pos = std::make_pair(0, 0), const std::string& failing_code = "") : message(message), failing_code(failing_code), line_pos(line_pos) {} ~JobDescriptionParsingError() {} std::string message; std::string failing_code; std::pair line_pos; }; class JobDescriptionParserPluginResult { public: typedef enum { Success, Failure, /**< Parsing failed **/ WrongLanguage } Result; JobDescriptionParserPluginResult(void):v_(Success) { }; JobDescriptionParserPluginResult(bool v):v_(v?Success:Failure) { }; JobDescriptionParserPluginResult(Result v):v_(v) { }; operator bool(void) { return (v_ == Success); }; bool operator!(void) { return (v_ != Success); }; bool operator==(bool v) { return ((v_ == Success) == v); }; bool operator==(Result v) { return (v_ == v); }; /** * \since Added in 5.1.0 **/ bool HasErrors() const { return !errors_.empty(); }; /** * \since Added in 5.1.0 **/ const std::list& GetErrors() const { return errors_; }; /** * \since Added in 5.1.0 **/ void AddError(const JobDescriptionParsingError& error) { errors_.push_back(error); }; /** * \since Added in 5.1.0 **/ void AddError(const IString& msg, const std::pair& location = std::make_pair(0, 0), const std::string& failing_code = "") { errors_.push_back(JobDescriptionParsingError(msg.str(), location, failing_code)); } /** * \since Added in 5.1.0 **/ void SetSuccess() { v_ = Success; }; /** * \since Added in 5.1.0 **/ void SetFailure() { v_ = Failure; }; /** * \since Added in 5.1.0 **/ void SetWrongLanguage() {v_ = WrongLanguage; }; private: Result v_; std::list errors_; }; /// Abstract plugin class for different parsers /** * The JobDescriptionParserPlugin class is abstract which provide an interface * for job description parsers. A job description parser should inherit this * class and overwrite the JobDescriptionParserPlugin::Parse and * JobDescriptionParserPlugin::UnParse methods. The inheriating class should * add the job description languages that it supports to the * 'supportedLanguages' member, formatted according to the GLUE2 * JobDescription_t type (GFD-R-P.147). The created job description parser * will then be available to the JobDescription::Parse, * JobDescription::ParseFromFile and JobDescription::Assemble methods, adding * the ability to parse and assemble job descriptions in the specified * languages. * * Using the methods in JobDescription class for parsing job descriptions is * recommended, however it is also possible to use parser plugins directly, * which can be done by loading them using the * JobDescriptionParserPluginLoader class. * * Since this class inheriates from the Plugin class, inheriating classes * should be compiled as a loadable module. See xxx for information on * creating loadable modules for ARC. * * \ingroup accplugins * \headerfile JobDescriptionParserPlugin.h arc/compute/JobDescriptionParserPlugin.h */ class JobDescriptionParserPlugin : public Plugin { public: virtual ~JobDescriptionParserPlugin(); /// Parse string into JobDescription objects /** * Parses the string argument \p source into JobDescription objects. If the * \p language argument is specified the method will only parse the string * if it supports that language - a JobDescriptionParserPluginResult object * with status \ref JobDescriptionParserPluginResult::WrongLanguage "WrongLanguage" * is returned if language is not supported. Similar for the \p dialect * argument, if specified, string is only parsed if that dialect is known * by parser. * If the \p language argument is not specified an attempt at parsing * \p source using any of the supported languages is tried. * If parsing is successful the generated JobDescription objects is appended * to the \p jobdescs argument. If parsing is unsuccessful the \p jobdescs * argument is untouched, and details of the failure is returned. * * Inheriating classes must extend this method. The extended method should * parse the \p source argument string into a JobDescription object, possibly * into multiple objects. Some languages can contain multiple alternative * views, in such cases alternatives should be added using the * JobDescription::AddAlternative method. Only if parsing is successful * should the generated JobDescription objects be added to the \p jobdescs * argument. Note: The only allowed modification of the \p jobdescs list is * adding elements. If the \p language argument is specified parsing should * only be initiated if the specified language is among the supported * ones, if that is not the case \ref JobDescriptionParserPluginResult::WrongLanguage "WrongLanguage" * should be returned. * If the \p language argument * For some languages different dialects exist (e.g. user- and GM- side xRSL, * JSDL, JSDL-POSIX), and if the \p dialect argument is specified the * parsing must strictly conform to that dialect. If the dialect is unknown * \ref JobDescriptionParserPluginResult::WrongLanguage "WrongLanguage" * should be returned. * * \param source should contain a representation of job description as a * string. * \param jobdescs a reference to a list of JobDescription object which * parsed job descriptions should be appended to. * \param language if specified parse in specified language (if not supported * \ref JobDescriptionParserPluginResult::WrongLanguage "WrongLanguage" is * returned). * \param dialect if specified parsing conforms strictly to specified * dialect. * \return A JobDescriptionParserPluginResult is returned indicating outcome * of parsing. * \see JobDescriptionParserPlugin::Assemble * \see JobDescriptionParserPluginResult **/ virtual JobDescriptionParserPluginResult Parse(const std::string& source, std::list& jobdescs, const std::string& language = "", const std::string& dialect = "") const = 0; /// Assemble job description into string /** * \since Added in 5.1.0 **/ virtual JobDescriptionParserPluginResult Assemble(const JobDescription& job, std::string& output, const std::string& language, const std::string& dialect = "") const = 0; /// [DEPRECATED] Assemble job description into string /** * \deprecated Deprecated as of 5.1.0, use the * JobDescriptionParserPlugin::Assemble method instead - expected to be * removed in 6.0.0. **/ virtual JobDescriptionParserPluginResult UnParse(const JobDescription& job, std::string& output, const std::string& language, const std::string& dialect = "") const { return Assemble(job, output, language, dialect); }; /// Get supported job description languages /** * \return A list of job description languages supported by this parser is * returned. **/ const std::list& GetSupportedLanguages() const { return supportedLanguages; } /// Check if language is supported /** * \param language a string formatted according to the GLUE2 * JobDescription_t type (GFD-R-P.147), e.g. nordugrid:xrsl. * \return \c true is returned if specified language is supported. **/ bool IsLanguageSupported(const std::string& language) const { return std::find(supportedLanguages.begin(), supportedLanguages.end(), language) != supportedLanguages.end(); } /// [DEPRECATED] Get parsing error /** * \deprecated Deprecated as of 5.1.0 - expected to be removed in 6.0.0. **/ const std::string& GetError(void) { return error; }; protected: JobDescriptionParserPlugin(PluginArgument* parg); /// [DEPRECATED] Get reference to sourceLanguage member /** * \deprecated Deprecated as of 5.1.0 - expected to be removed in 6.0.0. **/ std::string& SourceLanguage(JobDescription& j) const; /// List of supported job description languages /** * Inheriating classes should add languages supported to this list in * the constructor. **/ std::list supportedLanguages; /// [DEPRECATED] Parsing error /** * \deprecated Deprecated as of 5.1.0 - expected to be removed in 6.0.0. **/ mutable std::string error; static Logger logger; }; /** Class responsible for loading JobDescriptionParserPlugin plugins * The JobDescriptionParserPlugin objects returned by a * JobDescriptionParserPluginLoader must not be used after the * JobDescriptionParserPluginLoader goes out of scope. * * \ingroup accplugins * \headerfile JobDescriptionParserPlugin.h arc/compute/JobDescriptionParserPlugin.h */ class JobDescriptionParserPluginLoader : public Loader { public: /** Constructor * Creates a new JobDescriptionParserPluginLoader. */ JobDescriptionParserPluginLoader(); /** Destructor * Calling the destructor destroys all JobDescriptionParserPlugin object * loaded by the JobDescriptionParserPluginLoader instance. */ ~JobDescriptionParserPluginLoader(); /** Load a new JobDescriptionParserPlugin * \param name The name of the JobDescriptionParserPlugin to load. * \return A pointer to the new JobDescriptionParserPlugin (NULL on error). */ JobDescriptionParserPlugin* load(const std::string& name); /** Retrieve the list of loaded JobDescriptionParserPlugin objects. * \return A reference to the list of JobDescriptionParserPlugin objects. */ const std::list& GetJobDescriptionParserPlugins() const { return jdps; } class iterator { private: iterator(JobDescriptionParserPluginLoader& jdpl); iterator& operator=(const iterator& it) { return *this; } public: ~iterator() {} //iterator& operator=(const iterator& it) { current = it.current; jdpl = it.jdpl; return *this; } JobDescriptionParserPlugin& operator*() { return **current; } const JobDescriptionParserPlugin& operator*() const { return **current; } JobDescriptionParserPlugin* operator->() { return *current; } const JobDescriptionParserPlugin* operator->() const { return *current; } iterator& operator++(); operator bool() { return !jdpl->jdpDescs.empty() || current != jdpl->jdps.end(); } friend class JobDescriptionParserPluginLoader; private: void LoadNext(); std::list::iterator current; JobDescriptionParserPluginLoader* jdpl; }; iterator GetIterator() { return iterator(*this); } private: std::list jdps; std::list jdpDescs; void scan(); bool scaningDone; }; } // namespace Arc #endif // __ARC_JOBDESCRIPTIONPARSERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/EntityRetrieverPlugin.cpp0000644000000000000000000000012412230001413026142 xustar000000000000000027 mtime=1382023947.923184 27 atime=1513200574.750704 30 ctime=1513200659.807745134 nordugrid-arc-5.4.2/src/hed/libs/compute/EntityRetrieverPlugin.cpp0000644000175000002070000000723112230001413026212 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "EntityRetrieverPlugin.h" namespace Arc { template<> Logger EntityRetrieverPluginLoader::logger(Logger::getRootLogger(), "ServiceEndpointRetrieverPluginLoader"); template<> Logger EntityRetrieverPluginLoader::logger(Logger::getRootLogger(), "TargetInformationRetrieverPluginLoader"); template<> Logger EntityRetrieverPluginLoader::logger(Logger::getRootLogger(), "JobListRetrieverPluginLoader"); template<> const std::string EntityRetrieverPlugin::kind("HED:ServiceEndpointRetrieverPlugin"); template<> const std::string EntityRetrieverPlugin::kind("HED:TargetInformationRetrieverPlugin"); template<> const std::string EntityRetrieverPlugin::kind("HED:JobListRetrieverPlugin"); template EntityRetrieverPluginLoader::~EntityRetrieverPluginLoader() { for (typename std::map *>::iterator it = plugins.begin(); it != plugins.end(); it++) { delete it->second; } } template EntityRetrieverPlugin* EntityRetrieverPluginLoader::load(const std::string& name) { if (plugins.find(name) != plugins.end()) { logger.msg(DEBUG, "Found %s %s (it was loaded already)", EntityRetrieverPlugin::kind, name); return plugins[name]; } if (name.empty()) { return NULL; } if(!factory_->load(FinderLoader::GetLibrariesList(), EntityRetrieverPlugin::kind, name)) { logger.msg(ERROR, "Unable to locate the \"%s\" plugin. Please refer to installation instructions and check if package providing support for %s plugin is installed", name, name); logger.msg(DEBUG, "%s plugin \"%s\" not found.", EntityRetrieverPlugin::kind, name, name); return NULL; } EntityRetrieverPlugin *p = factory_->GetInstance< EntityRetrieverPlugin >(EntityRetrieverPlugin::kind, name, NULL, false); if (!p) { logger.msg(ERROR, "Unable to locate the \"%s\" plugin. Please refer to installation instructions and check if package providing support for \"%s\" plugin is installed", name, name); logger.msg(DEBUG, "%s %s could not be created.", EntityRetrieverPlugin::kind, name, name); return NULL; } plugins[name] = p; logger.msg(DEBUG, "Loaded %s %s", EntityRetrieverPlugin::kind, name); return p; } template std::list EntityRetrieverPluginLoader::getListOfPlugins() { std::list modules; PluginsFactory factory(BaseConfig().MakeConfig(Config()).Parent()); factory.scan(FinderLoader::GetLibrariesList(), modules); PluginsFactory::FilterByKind(EntityRetrieverPlugin::kind, modules); std::list names; for (std::list::const_iterator it = modules.begin(); it != modules.end(); it++) { for (std::list::const_iterator it2 = it->plugins.begin(); it2 != it->plugins.end(); it2++) { names.push_back(it2->name); } } return names; } ServiceEndpointRetrieverPlugin::ServiceEndpointRetrieverPlugin(PluginArgument* parg): EntityRetrieverPlugin(parg) {}; TargetInformationRetrieverPlugin::TargetInformationRetrieverPlugin(PluginArgument* parg): EntityRetrieverPlugin(parg) {}; JobListRetrieverPlugin::JobListRetrieverPlugin(PluginArgument* parg): EntityRetrieverPlugin(parg) {}; template class EntityRetrieverPluginLoader; template class EntityRetrieverPluginLoader; template class EntityRetrieverPluginLoader; } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/test-job-20000644000000000000000000000012412114365426022746 xustar000000000000000027 mtime=1362225942.176287 27 atime=1513200574.669703 30 ctime=1513200659.816745244 nordugrid-arc-5.4.2/src/hed/libs/compute/test-job-20000644000175000002070000000015712114365426023016 0ustar00mockbuildmock00000000000000&(executable = '/usr/bin/env') (jobname = 'arctest2') (stdout = 'stdout') (join = 'yes') (gmlog = 'gmlog') nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobInformationStorage.h0000644000000000000000000000012412306304733025550 xustar000000000000000027 mtime=1394182619.331424 27 atime=1513200574.732704 30 ctime=1513200659.783744841 nordugrid-arc-5.4.2/src/hed/libs/compute/JobInformationStorage.h0000644000175000002070000002345712306304733025630 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBINFORMATIONSTORAGE_H__ #define __ARC_JOBINFORMATIONSTORAGE_H__ #include #include #include namespace Arc { class JobInformationStorage; typedef struct { const char *name; JobInformationStorage* (*instance)(const std::string&); } JobInformationStorageDescriptor; /// Abstract class for storing job information /** * This abstract class provides an interface which can be used to store job * information, which can then later be used to initialise Job objects from. * * \note This class is abstract. All functionality is provided by specialised * child classes. * * \headerfile Job.h arc/compute/Job.h * \ingroup compute **/ class JobInformationStorage { public: /// Constructor /** * Construct a JobInformationStorage object with name \c name. The name * could be a file name or maybe a database, that is implemention specific. * The \c nTries argument specifies the number times a lock on the storage * should be tried obtained for each method invocation. The constructor it * self should not acquire a lock through-out the object lifetime. * \c tryInterval is the waiting period in micro seconds between each * locking attemp. * * @param name name of the storage. * @param nTries specifies the maximal number of times try to acquire a * lock on storage to read from. * @param tryInterval specifies the interval (in micro seconds) between each * attempt to acquire a lock. **/ JobInformationStorage(const std::string& name, unsigned nTries = 10, unsigned tryInterval = 500000) : name(name), nTries(nTries), tryInterval(tryInterval), isValid(false) {} virtual ~JobInformationStorage() {} /// Check if storage is valid /** * @return true if storage is valid. **/ bool IsValid() const { return isValid; } /// Check if storage exists /** * @return true if storage already exist. **/ bool IsStorageExisting() const { return isStorageExisting; } /// Read all jobs from storage /** * Read all jobs contained in storage, except those managed by a service at * an endpoint which matches any of those in the \c rejectEndpoints list * parameter. The read jobs are added to the list of Job objects referenced * by the \c jobs parameter. The algorithm used for matching should be * equivalent to that used in the URL::StringMatches method. * * \note This method is abstract and an implementation must be provided by * specialised classes. * * @param jobs is a reference to a list of Job objects, which will be filled * with the jobs read from storage (cleared before use). * @param rejectEndpoints is a list of strings specifying endpoints for * which Job objects with JobManagementURL matching any of those endpoints * will not be part of the retrieved jobs. The algorithm used for matching * should be equivalent to that used in the URL::StringMatches method. * @return \c true is returned if all jobs contained in the storage was * retrieved (except those rejected, if any), otherwise false. **/ virtual bool ReadAll(std::list& jobs, const std::list& rejectEndpoints = std::list()) = 0; /// Read specified jobs /** * Read jobs specified by job identifiers and/or endpoints from storage. * Only jobs which has a JobID or a Name attribute matching any of the items * in the \c identifiers list parameter, and also jobs for which the * \c JobManagementURL attribute matches any of those endpoints specified in * the \c endpoints list parameter, will be added to the * list of Job objects reference to by the \c jobs parameter, except those * jobs for which the \c JobManagementURL attribute matches any of those * endpoints specified in the \c rejectEndpoints list parameter. Identifiers * specified in the \c jobIdentifiers list parameter which matches a job in * the storage will be removed from the referenced list. The algorithm used * for matching should be equivalent to that used in the URL::StringMatches * method. * * \note This method is abstract and an implementation must be provided by * specialised classes. * * @param jobs reference to list of Job objects which will be filled with * matching jobs. * @param jobIdentifiers specifies the job IDs and names of jobs to be added * to the job list. Entries in this list is removed if they match a job * from the storage. * @param endpoints is a list of strings specifying endpoints for * which Job objects with the JobManagementURL attribute matching any of * those endpoints will added to the job list. The algorithm used for * matching should be equivalent to that used in the URL::StringMatches * method. * @param rejectEndpoints is a list of strings specifying endpoints for * which Job objects with the JobManagementURL attribute matching any of * those endpoints will not be part of the retrieved jobs. The algorithm * used for matching should be equivalent to that used in the * URL::StringMatches method. * @return \c false is returned in case a job failed to be read from * storage, otherwise \c true is returned. This method will also return in * case an identifier does not match any jobs in the storage. **/ virtual bool Read(std::list& jobs, std::list& jobIdentifiers, const std::list& endpoints = std::list(), const std::list& rejectEndpoints = std::list()) = 0; /// Write jobs /** * Add jobs to storage. If there already exist a job with a specific job ID * in the storage, and a job with the same job ID is tried added to the * storage then the existing job will be overwritten. * * A specialised implementaion does not necessarily need to be provided. If * not provided Write(const std::list&, std::set&, std::list&) * will be used. * * @param jobs is the list of Job objects which should be added to the * storage. * @return \c true is returned if all jobs in the \c jobs list are written * to to storage, otherwise \c false is returned. * @see Write(const std::list&, std::set&, std::list&) */ virtual bool Write(const std::list& jobs) { std::list newJobs; std::set prunedServices; return Write(jobs, prunedServices, newJobs); } /// Write jobs /** * Add jobs to storage. If there already exist a job with a specific job ID * in the storage, and a job with the same job ID is tried added to the * storage then the existing job will be overwritten. For jobs in the * storage with a ServiceEndpointURL attribute where the host name is equal * to any of the entries in the set referenced by the \c prunedServices * parameter, is removed from the storage, if they are not among the list of * jobs referenced by the \c jobs parameter. A pointer to jobs in the job * list (\c jobs) which does not already exist in the storage will be added * to the list of Job object pointers referenced by the \c newJobs * parameter. * * \note This method is abstract and an implementation must be provided by * specialised classes. * * @param jobs is the list of Job objects which should be added to the * storage. * @param prunedServices is a set of host names of services whose jobs * should be removed if not replaced. This is typically the list of * host names for which at least one endpoint was successfully queried. * By passing an empty set, all existing jobs are kept, even if jobs are * outdated. * @param newJobs is a reference to a list of pointers to Job objects which * are not duplicates. * @return \c true is returned if all jobs in the \c jobs list are written * to to storage, otherwise \c false is returned. **/ virtual bool Write(const std::list& jobs, const std::set& prunedServices, std::list& newJobs) = 0; /// Clean storage /** * Invoking this method causes the storage to be cleaned of any jobs it * holds. * * \note This method is abstract and an implementation must be provided by * specialised classes. * * @return \c true is returned if the storage was successfully cleaned, * otherwise \c false is returned. **/ virtual bool Clean() = 0; /// Remove jobs /** * The jobs with matching job IDs (Job::JobID attribute) as specified with * the list of job IDs (\c jobids parameter) will be remove from the * storage. * * \note This method is abstract and an implementation must be provided by * specialised classes. * * @param jobids list job IDs for which matching jobs should be remove from * storage. * @return \c is returned if any of the matching jobs failed to be removed * from the storage, otherwise \c true is returned. **/ virtual bool Remove(const std::list& jobids) = 0; /// Get name /** * @return Returns the name of the storage. **/ const std::string& GetName() const { return name; } static JobInformationStorageDescriptor AVAILABLE_TYPES[]; protected: const std::string name; unsigned nTries; unsigned tryInterval; /** * \since Added in 4.0.0. **/ bool isValid; /** * \since Added in 4.0.0. **/ bool isStorageExisting; }; } // namespace Arc #endif // __ARC_JOBINFORMATIONSTORAGE_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/ExecutionTarget.cpp0000644000000000000000000000012313024224727024751 xustar000000000000000026 mtime=1481714135.79494 27 atime=1513200574.734704 30 ctime=1513200659.791744939 nordugrid-arc-5.4.2/src/hed/libs/compute/ExecutionTarget.cpp0000644000175000002070000007555313024224727025036 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include namespace Arc { Logger ExecutionTarget::logger(Logger::getRootLogger(), "ExecutionTarget"); Logger ComputingServiceType::logger(Logger::getRootLogger(), "ComputingServiceType"); template void ComputingServiceType::GetExecutionTargets(T& container) const { // TODO: Currently assuming only one ComputingManager and one ExecutionEnvironment. CountedPointer computingManager( ComputingManager.empty()? new ComputingManagerAttributes: ComputingManager.begin()->second.Attributes); CountedPointer executionEnvironment( (ComputingManager.empty() || ComputingManager.begin()->second.ExecutionEnvironment.empty())? new ExecutionEnvironmentAttributes: ComputingManager.begin()->second.ExecutionEnvironment.begin()->second.Attributes); CountedPointer< std::map > benchmarks( ComputingManager.empty()? new std::map: ComputingManager.begin()->second.Benchmarks); CountedPointer< std::list > applicationEnvironments( ComputingManager.empty()? new std::list: ComputingManager.begin()->second.ApplicationEnvironments); for (std::map::const_iterator itCE = ComputingEndpoint.begin(); itCE != ComputingEndpoint.end(); ++itCE) { if (!itCE->second->Capability.count(Endpoint::GetStringForCapability(Endpoint::JOBSUBMIT)) && !itCE->second->Capability.count(Endpoint::GetStringForCapability(Endpoint::JOBCREATION))) { continue; } if (!Attributes->InformationOriginEndpoint.RequestedSubmissionInterfaceName.empty()) { // If this endpoint has a non-preferred job interface, we skip it if (itCE->second->InterfaceName != Attributes->InformationOriginEndpoint.RequestedSubmissionInterfaceName) { logger.msg(INFO, "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of the requested '%s'.", itCE->second->URLString, itCE->second->InterfaceName, Attributes->InformationOriginEndpoint.RequestedSubmissionInterfaceName); continue; } } // Create list of other endpoints. std::list< CountedPointer > OtherEndpoints; for (std::map::const_iterator itOE = ComputingEndpoint.begin(); itOE != ComputingEndpoint.end(); ++itOE) { if (itOE == itCE) { // Dont include the current endpoint in the list of other endpoints. continue; } OtherEndpoints.push_back(itOE->second.Attributes); } if (!itCE->second.ComputingShareIDs.empty()) { for (std::set::const_iterator itCSIDs = itCE->second.ComputingShareIDs.begin(); itCSIDs != itCE->second.ComputingShareIDs.end(); ++itCSIDs) { std::map::const_iterator itCS = ComputingShare.find(*itCSIDs); if (itCS != ComputingShare.end()) { // Create list of mapping policies std::list< CountedPointer > MappingPolicies; for (std::map::const_iterator itMP = itCS->second.MappingPolicy.begin(); itMP != itCS->second.MappingPolicy.end(); ++itMP) { MappingPolicies.push_back(itMP->second.Attributes); } AddExecutionTarget(container, ExecutionTarget(Location.Attributes, AdminDomain.Attributes, Attributes, itCE->second.Attributes, OtherEndpoints, itCS->second.Attributes, MappingPolicies, computingManager, executionEnvironment, benchmarks, applicationEnvironments)); } } } else if (!ComputingShare.empty()) { for (std::map::const_iterator itCS = ComputingShare.begin(); itCS != ComputingShare.end(); ++itCS) { // Create list of mapping policies std::list< CountedPointer > MappingPolicies; for (std::map::const_iterator itMP = itCS->second.MappingPolicy.begin(); itMP != itCS->second.MappingPolicy.end(); ++itMP) { MappingPolicies.push_back(itMP->second.Attributes); } AddExecutionTarget(container, ExecutionTarget(Location.Attributes, AdminDomain.Attributes, Attributes, itCE->second.Attributes, OtherEndpoints, itCS->second.Attributes, MappingPolicies, computingManager, executionEnvironment, benchmarks, applicationEnvironments)); } } else { // No ComputingShares and no associations. Either it is not computing service // or it does not bother to specify its share or does not split resources // by shares. // Check if it is computing endpoint at all for (std::set::const_iterator itCap = itCE->second.Attributes->Capability.begin(); itCap != itCE->second.Attributes->Capability.end(); ++itCap) { if((*itCap == "executionmanagement.jobcreation") || (*itCap == "executionmanagement.jobexecution")) { // Creating generic target CountedPointer computingShare(new ComputingShareAttributes); std::list< CountedPointer > MappingPolicies; AddExecutionTarget(container, ExecutionTarget(Location.Attributes, AdminDomain.Attributes, Attributes, itCE->second.Attributes, OtherEndpoints, computingShare, MappingPolicies, computingManager, executionEnvironment, benchmarks, applicationEnvironments)); break; } } } } } template void ComputingServiceType::GetExecutionTargets< std::list >(std::list&) const; template void ComputingServiceType::AddExecutionTarget(T&, const ExecutionTarget&) const {} template<> void ComputingServiceType::AddExecutionTarget< std::list >(std::list& etList, const ExecutionTarget& et) const { etList.push_back(et); } SubmissionStatus ExecutionTarget::Submit(const UserConfig& ucfg, const JobDescription& jobdesc, Job& job) const { return Submitter(ucfg).Submit(*this, jobdesc, job); } void ExecutionTarget::GetExecutionTargets(const std::list& csList, std::list& etList) { for (std::list::const_iterator it = csList.begin(); it != csList.end(); ++it) { it->GetExecutionTargets(etList); } } void ExecutionTarget::RegisterJobSubmission(const JobDescription& jobdesc) const { //WorkingAreaFree if (jobdesc.Resources.DiskSpaceRequirement.DiskSpace) { ComputingManager->WorkingAreaFree -= (int)(jobdesc.Resources.DiskSpaceRequirement.DiskSpace / 1024); if (ComputingManager->WorkingAreaFree < 0) ComputingManager->WorkingAreaFree = 0; } // FreeSlotsWithDuration if (!ComputingShare->FreeSlotsWithDuration.empty()) { std::map::iterator cpuit, cpuit2; cpuit = ComputingShare->FreeSlotsWithDuration.lower_bound((unsigned int)jobdesc.Resources.TotalCPUTime.range); if (cpuit != ComputingShare->FreeSlotsWithDuration.end()) { if (jobdesc.Resources.SlotRequirement.NumberOfSlots >= cpuit->second) cpuit->second = 0; else for (cpuit2 = ComputingShare->FreeSlotsWithDuration.begin(); cpuit2 != ComputingShare->FreeSlotsWithDuration.end(); cpuit2++) { if (cpuit2->first <= cpuit->first) cpuit2->second -= jobdesc.Resources.SlotRequirement.NumberOfSlots; else if (cpuit2->second >= cpuit->second) { cpuit2->second = cpuit->second; Period oldkey = cpuit->first; cpuit++; ComputingShare->FreeSlotsWithDuration.erase(oldkey); } } if (cpuit->second == 0) ComputingShare->FreeSlotsWithDuration.erase(cpuit->first); if (ComputingShare->FreeSlotsWithDuration.empty()) { if (ComputingShare->MaxWallTime != -1) ComputingShare->FreeSlotsWithDuration[ComputingShare->MaxWallTime] = 0; else ComputingShare->FreeSlotsWithDuration[LONG_MAX] = 0; } } } //FreeSlots, UsedSlots, WaitingJobs if (ComputingShare->FreeSlots >= abs(jobdesc.Resources.SlotRequirement.NumberOfSlots)) { //The job will start directly ComputingShare->FreeSlots -= abs(jobdesc.Resources.SlotRequirement.NumberOfSlots); if (ComputingShare->UsedSlots != -1) ComputingShare->UsedSlots += abs(jobdesc.Resources.SlotRequirement.NumberOfSlots); } else if (ComputingShare->WaitingJobs != -1) //The job will enter the queue (or the cluster doesn't report FreeSlots) ComputingShare->WaitingJobs += abs(jobdesc.Resources.SlotRequirement.NumberOfSlots); return; } class Indenter : public std::streambuf { private: std::streambuf* dest; bool isAtStartOfLine; std::string indent; std::ostream* owner; protected: virtual int overflow(int ch) { if (isAtStartOfLine && ch != '\n') dest->sputn(indent.data(), indent.size()); isAtStartOfLine = (ch == '\n'); return dest->sputc(ch); } public: explicit Indenter(std::streambuf* dest, unsigned indentSize = 2) : dest(dest), isAtStartOfLine(true), indent(indentSize, ' '), owner(NULL) {} explicit Indenter(std::ostringstream& dest, unsigned indentSize = 2) : dest(dest.rdbuf()), isAtStartOfLine(true), indent(indentSize, ' '), owner(&dest) { owner->rdbuf(this); } explicit Indenter(std::ostream& dest, unsigned indentSize = 2) : dest(dest.rdbuf()), isAtStartOfLine(true), indent(indentSize, ' '), owner(&dest) { owner->rdbuf(this); } virtual ~Indenter() { if (owner != NULL) owner->rdbuf(dest); } }; std::ostream& operator<<(std::ostream& out, const LocationAttributes& l) { if (!l.Address.empty()) out << IString("Address: %s", l.Address) << std::endl; if (!l.Place.empty()) out << IString("Place: %s", l.Place) << std::endl; if (!l.Country.empty()) out << IString("Country: %s", l.Country) << std::endl; if (!l.PostCode.empty()) out << IString("Postal code: %s", l.PostCode) << std::endl; if (l.Latitude > 0) out << IString("Latitude: %f", l.Latitude) << std::endl; if (l.Longitude > 0) out << IString("Longitude: %f", l.Longitude) << std::endl; return out; } std::ostream& operator<<(std::ostream& out, const AdminDomainAttributes& ad) { if (!ad.Owner.empty()) { out << IString("Owner: %s", ad.Owner) << std::endl; } return out; } std::ostream& operator<<(std::ostream& out, const ComputingServiceAttributes& cs) { if (!cs.Name.empty()) out << IString("Name: %s", cs.Name) << '\n'; if (!cs.ID.empty()) out << IString("ID: %s", cs.ID) << '\n'; if (!cs.Type.empty()) out << IString("Type: %s", cs.Type) << '\n'; return out; } std::ostream& operator<<(std::ostream& out, const ComputingEndpointAttributes& ce) { if (!ce.URLString.empty()) out << IString("URL: %s", ce.URLString) << std::endl; if (!ce.InterfaceName.empty()) out << IString("Interface: %s", ce.InterfaceName) << std::endl; if (!ce.InterfaceVersion.empty()) { out << IString("Interface versions:") << std::endl; for (std::list::const_iterator it = ce.InterfaceVersion.begin(); it != ce.InterfaceVersion.end(); ++it) out << " " << *it << std::endl; } if (!ce.InterfaceExtension.empty()) { out << IString("Interface extensions:") << std::endl; for (std::list::const_iterator it = ce.InterfaceExtension.begin(); it != ce.InterfaceExtension.end(); ++it) out << " " << *it << std::endl; } if (!ce.Capability.empty()) { out << IString("Capabilities:") << std::endl; for (std::set::const_iterator it = ce.Capability.begin(); it != ce.Capability.end(); ++it) out << " " << *it << std::endl; } if (!ce.Technology.empty()) out << IString("Technology: %s", ce.Technology) << std::endl; if (!ce.SupportedProfile.empty()) { out << IString("Supported Profiles:") << std::endl; for (std::list::const_iterator it = ce.SupportedProfile.begin(); it != ce.SupportedProfile.end(); ++it) out << " " << *it << std::endl; } if (!ce.Implementor.empty()) out << IString("Implementor: %s", ce.Implementor) << std::endl; if (!ce.Implementation().empty()) out << IString("Implementation name: %s", (std::string)ce.Implementation) << std::endl; if (!ce.QualityLevel.empty()) out << IString("Quality level: %s", ce.QualityLevel) << std::endl; if (!ce.HealthState.empty()) out << IString("Health state: %s", ce.HealthState) << std::endl; if (!ce.HealthStateInfo.empty()) out << IString("Health state info: %s", ce.HealthStateInfo) << std::endl; if (!ce.ServingState.empty()) out << IString("Serving state: %s", ce.ServingState) << std::endl; if (!ce.IssuerCA.empty()) out << IString("Issuer CA: %s", ce.IssuerCA) << std::endl; if (!ce.TrustedCA.empty()) { out << IString("Trusted CAs:") << std::endl; for (std::list::const_iterator it = ce.TrustedCA.begin(); it != ce.TrustedCA.end(); ++it) out << " " << *it << std::endl; } if (ce.DowntimeStarts > -1) out << IString("Downtime starts: %s", ce.DowntimeStarts.str())<< std::endl; if (ce.DowntimeEnds > -1) out << IString("Downtime ends: %s", ce.DowntimeEnds.str()) << std::endl; if (!ce.Staging.empty()) out << IString("Staging: %s", ce.Staging) << std::endl; if (!ce.JobDescriptions.empty()) { out << IString("Job descriptions:") << std::endl; for (std::list::const_iterator it = ce.JobDescriptions.begin(); it != ce.JobDescriptions.end(); ++it) out << " " << *it << std::endl; } return out; } std::ostream& operator<<(std::ostream& out, const MappingPolicyAttributes& mp) { // ID not printed. std::string scheme = mp.Scheme.empty() ? "basic" : mp.Scheme; out << IString("Scheme: %s", scheme) << std::endl; for (std::list::const_iterator itR = mp.Rule.begin(); itR != mp.Rule.end(); ++itR) { out << IString("Rule: %s", *itR) << std::endl; } return out; } std::ostream& operator<<(std::ostream& out, const ComputingShareAttributes& cs) { // Following attributes are not printed: // Period MaxTotalCPUTime; // Period MaxTotalWallTime; // not in current Glue2 draft // std::string ID; if (!cs.Name.empty()) out << IString("Name: %s", cs.Name) << std::endl; if (!cs.MappingQueue.empty()) out << IString("Mapping queue: %s", cs.MappingQueue) << std::endl; if (cs.MaxWallTime > -1) out << IString("Max wall-time: %s", cs.MaxWallTime.istr()) << std::endl; if (cs.MaxTotalWallTime > -1) out << IString("Max total wall-time: %s", cs.MaxTotalWallTime.istr()) << std::endl; if (cs.MinWallTime > -1) out << IString("Min wall-time: %s", cs.MinWallTime.istr()) << std::endl; if (cs.DefaultWallTime > -1) out << IString("Default wall-time: %s", cs.DefaultWallTime.istr()) << std::endl; if (cs.MaxCPUTime > -1) out << IString("Max CPU time: %s", cs.MaxCPUTime.istr()) << std::endl; if (cs.MinCPUTime > -1) out << IString("Min CPU time: %s", cs.MinCPUTime.istr()) << std::endl; if (cs.DefaultCPUTime > -1) out << IString("Default CPU time: %s", cs.DefaultCPUTime.istr()) << std::endl; if (cs.MaxTotalJobs > -1) out << IString("Max total jobs: %i", cs.MaxTotalJobs) << std::endl; if (cs.MaxRunningJobs > -1) out << IString("Max running jobs: %i", cs.MaxRunningJobs) << std::endl; if (cs.MaxWaitingJobs > -1) out << IString("Max waiting jobs: %i", cs.MaxWaitingJobs) << std::endl; if (cs.MaxPreLRMSWaitingJobs > -1) out << IString("Max pre-LRMS waiting jobs: %i", cs.MaxPreLRMSWaitingJobs) << std::endl; if (cs.MaxUserRunningJobs > -1) out << IString("Max user running jobs: %i", cs.MaxUserRunningJobs) << std::endl; if (cs.MaxSlotsPerJob > -1) out << IString("Max slots per job: %i", cs.MaxSlotsPerJob) << std::endl; if (cs.MaxStageInStreams > -1) out << IString("Max stage in streams: %i", cs.MaxStageInStreams) << std::endl; if (cs.MaxStageOutStreams > -1) out << IString("Max stage out streams: %i", cs.MaxStageOutStreams) << std::endl; if (!cs.SchedulingPolicy.empty()) out << IString("Scheduling policy: %s", cs.SchedulingPolicy) << std::endl; if (cs.MaxMainMemory > -1) out << IString("Max memory: %i", cs.MaxMainMemory) << std::endl; if (cs.MaxVirtualMemory > -1) out << IString("Max virtual memory: %i", cs.MaxVirtualMemory) << std::endl; if (cs.MaxDiskSpace > -1) out << IString("Max disk space: %i", cs.MaxDiskSpace) << std::endl; if (cs.DefaultStorageService) out << IString("Default Storage Service: %s", cs.DefaultStorageService.str()) << std::endl; if (cs.Preemption) out << IString("Supports preemption") << std::endl; else out << IString("Doesn't support preemption") << std::endl; if (cs.TotalJobs > -1) out << IString("Total jobs: %i", cs.TotalJobs) << std::endl; if (cs.RunningJobs > -1) out << IString("Running jobs: %i", cs.RunningJobs) << std::endl; if (cs.LocalRunningJobs > -1) out << IString("Local running jobs: %i", cs.LocalRunningJobs) << std::endl; if (cs.WaitingJobs > -1) out << IString("Waiting jobs: %i", cs.WaitingJobs) << std::endl; if (cs.LocalWaitingJobs > -1) out << IString("Local waiting jobs: %i", cs.LocalWaitingJobs) << std::endl; if (cs.SuspendedJobs > -1) out << IString("Suspended jobs: %i", cs.SuspendedJobs) << std::endl; if (cs.LocalSuspendedJobs > -1) out << IString("Local suspended jobs: %i", cs.LocalSuspendedJobs) << std::endl; if (cs.StagingJobs > -1) out << IString("Staging jobs: %i", cs.StagingJobs) << std::endl; if (cs.PreLRMSWaitingJobs > -1) out << IString("Pre-LRMS waiting jobs: %i", cs.PreLRMSWaitingJobs) << std::endl; if (cs.EstimatedAverageWaitingTime > -1) out << IString("Estimated average waiting time: %s", cs.EstimatedAverageWaitingTime.istr()) << std::endl; if (cs.EstimatedWorstWaitingTime > -1) out << IString("Estimated worst waiting time: %s", cs.EstimatedWorstWaitingTime.istr()) << std::endl; if (cs.FreeSlots > -1) out << IString("Free slots: %i", cs.FreeSlots) << std::endl; if (!cs.FreeSlotsWithDuration.empty()) { out << IString("Free slots grouped according to time limits (limit: free slots):") << std::endl; for (std::map::const_iterator it = cs.FreeSlotsWithDuration.begin(); it != cs.FreeSlotsWithDuration.end(); ++it) { if (it->first != Period(LONG_MAX)) out << IString(" %s: %i", it->first.istr(), it->second) << std::endl; else out << IString(" unspecified: %i", it->second) << std::endl; } } if (cs.UsedSlots > -1) out << IString("Used slots: %i", cs.UsedSlots) << std::endl; if (cs.RequestedSlots > -1) out << IString("Requested slots: %i", cs.RequestedSlots) << std::endl; if (!cs.ReservationPolicy.empty()) out << IString("Reservation policy: %s", cs.ReservationPolicy) << std::endl; return out; } std::ostream& operator<<(std::ostream& out, const ComputingManagerAttributes& cm) { if (!cm.ProductName.empty()) { out << IString("Resource manager: %s", cm.ProductName); if (!cm.ProductVersion.empty()) out << IString(" (%s)", cm.ProductVersion); out << std::endl; } if (cm.TotalPhysicalCPUs > -1) out << IString("Total physical CPUs: %i", cm.TotalPhysicalCPUs) << std::endl; if (cm.TotalLogicalCPUs > -1) out << IString("Total logical CPUs: %i", cm.TotalLogicalCPUs) << std::endl; if (cm.TotalSlots > -1) out << IString("Total slots: %i", cm.TotalSlots) << std::endl; if (cm.Reservation) out << IString("Supports advance reservations") << std::endl; else out << IString("Doesn't support advance reservations") << std::endl; if (cm.BulkSubmission) out << IString("Supports bulk submission") << std::endl; else out << IString("Doesn't support bulk Submission") << std::endl; if (cm.Homogeneous) out << IString("Homogeneous resource") << std::endl; else out << IString("Non-homogeneous resource") << std::endl; if (!cm.NetworkInfo.empty()) { out << IString("Network information:") << std::endl; for (std::list::const_iterator it = cm.NetworkInfo.begin(); it != cm.NetworkInfo.end(); ++it) out << " " << *it << std::endl; } if (cm.WorkingAreaShared) out << IString("Working area is shared among jobs") << std::endl; else out << IString("Working area is not shared among jobs") << std::endl; if (cm.WorkingAreaTotal > -1) out << IString("Working area total size: %i GB", cm.WorkingAreaTotal) << std::endl; if (cm.WorkingAreaFree > -1) out << IString("Working area free size: %i GB", cm.WorkingAreaFree) << std::endl; if (cm.WorkingAreaLifeTime > -1) out << IString("Working area life time: %s", cm.WorkingAreaLifeTime.istr()) << std::endl; if (cm.CacheTotal > -1) out << IString("Cache area total size: %i GB", cm.CacheTotal) << std::endl; if (cm.CacheFree > -1) out << IString("Cache area free size: %i GB", cm.CacheFree) << std::endl; return out; } std::ostream& operator<<(std::ostream& out, const ExecutionEnvironmentAttributes& ee) { if (!ee.Platform.empty()) out << IString("Platform: %s", ee.Platform) << std::endl; if (ee.ConnectivityIn) out << IString("Execution environment supports inbound connections") << std::endl; else out << IString("Execution environment does not support inbound connections") << std::endl; if (ee.ConnectivityOut) out << IString("Execution environment supports outbound connections") << std::endl; else out << IString("Execution environment does not support outbound connections") << std::endl; if (ee.VirtualMachine) out << IString("Execution environment is a virtual machine") << std::endl; else out << IString("Execution environment is a physical machine") << std::endl; if (!ee.CPUVendor.empty()) out << IString("CPU vendor: %s", ee.CPUVendor) << std::endl; if (!ee.CPUModel.empty()) out << IString("CPU model: %s", ee.CPUModel) << std::endl; if (!ee.CPUVersion.empty()) out << IString("CPU version: %s", ee.CPUVersion) << std::endl; if (ee.CPUClockSpeed > -1) out << IString("CPU clock speed: %i", ee.CPUClockSpeed) << std::endl; if (ee.MainMemorySize > -1) out << IString("Main memory size: %i", ee.MainMemorySize) << std::endl; if (!ee.OperatingSystem.getFamily().empty()) out << IString("OS family: %s", ee.OperatingSystem.getFamily()) << std::endl; if (!ee.OperatingSystem.getName().empty()) out << IString("OS name: %s", ee.OperatingSystem.getName()) << std::endl; if (!ee.OperatingSystem.getVersion().empty()) out << IString("OS version: %s", ee.OperatingSystem.getVersion()) << std::endl; return out; } std::ostream& operator<<(std::ostream& out, const ComputingServiceType& cst) { std::ostringstream buffer; out << IString("Computing service:") << std::endl; Indenter iOut(out); buffer << *cst; if (buffer.tellp() > 0) { out << buffer.str(); } buffer.str(""); buffer << *cst.Location; if (buffer.tellp() > 0) { out << buffer.str(); } buffer.str(""); buffer << *cst.AdminDomain; if (buffer.tellp() > 0) { out << buffer.str(); } buffer.str(""); if (!cst.ComputingEndpoint.empty()) { out << std::endl; if (cst.ComputingEndpoint.size() > 1) { out << IString("%d Endpoints", cst.ComputingEndpoint.size()) << std::endl; } for (std::map::const_iterator it = cst.ComputingEndpoint.begin(); it != cst.ComputingEndpoint.end(); ++it) { if (it != cst.ComputingEndpoint.begin()) out << std::endl; out << IString("Endpoint Information:") << std::endl; std::ostringstream endpointBuffer; Indenter iEndpoint(endpointBuffer); endpointBuffer << (*it->second); out << endpointBuffer.str(); } } if (!cst.ComputingManager.empty()) { out << std::endl; if (cst.ComputingManager.size() > 1) { out << IString("%d Batch Systems", cst.ComputingManager.size()) << std::endl; } for (std::map::const_iterator it = cst.ComputingManager.begin(); it != cst.ComputingManager.end(); ++it) { if (it != cst.ComputingManager.begin()) out << std::endl; out << IString("Batch System Information:") << std::endl; std::ostringstream managerBuffer; Indenter iManager(managerBuffer); managerBuffer << (*it->second); if (!it->second.ApplicationEnvironments->empty()) { managerBuffer << IString("Installed application environments:") << std::endl; for (std::list::const_iterator itAE = it->second.ApplicationEnvironments->begin(); itAE != it->second.ApplicationEnvironments->end(); ++itAE) { managerBuffer << " " << *itAE << std::endl; } } out << managerBuffer.str(); } } if (!cst.ComputingShare.empty()) { out << std::endl; if (cst.ComputingShare.size() > 1) { out << IString("%d Shares", cst.ComputingShare.size()) << std::endl; } for (std::map::const_iterator itCH = cst.ComputingShare.begin(); itCH != cst.ComputingShare.end(); ++itCH) { if (itCH != cst.ComputingShare.begin()) out << std::endl; out << IString("Share Information:") << std::endl; std::ostringstream queueBuffer; Indenter iQueue(queueBuffer); queueBuffer << (*itCH->second); if (itCH->second.MappingPolicy.size() > 1) { queueBuffer << IString("%d mapping policies", itCH->second.MappingPolicy.size()); } for (std::map::const_iterator itMP = itCH->second.MappingPolicy.begin(); itMP != itCH->second.MappingPolicy.end(); ++itMP) { queueBuffer << IString("Mapping policy:") << std::endl; std::ostringstream mpBuffer; Indenter indMP(mpBuffer); mpBuffer << (*itMP->second); queueBuffer << mpBuffer.str(); } out << queueBuffer.str(); } } return out; } std::ostream& operator<<(std::ostream& out, const ExecutionTarget& et) { out << IString("Execution Target on Computing Service: %s", et.ComputingService->Name) << std::endl; if (!et.ComputingEndpoint->URLString.empty()) out << IString(" Computing endpoint URL: %s", et.ComputingEndpoint->URLString) << std::endl; if (!et.ComputingEndpoint->InterfaceName.empty()) out << IString(" Computing endpoint interface name: %s", et.ComputingEndpoint->InterfaceName) << std::endl; if (!et.ComputingShare->Name.empty()) { out << IString(" Queue: %s", et.ComputingShare->Name) << std::endl; } if (!et.ComputingShare->MappingQueue.empty()) { out << IString(" Mapping queue: %s", et.ComputingShare->MappingQueue) << std::endl; } if (!et.ComputingEndpoint->HealthState.empty()){ out << IString(" Health state: %s", et.ComputingEndpoint->HealthState) << std::endl; } out << std::endl << *et.Location; out << std::endl << *et.AdminDomain << std::endl; out << IString("Service information:") << std::endl << *et.ComputingService; out << std::endl; out << *et.ComputingEndpoint; if (!et.ApplicationEnvironments->empty()) { out << IString(" Installed application environments:") << std::endl; for (std::list::const_iterator it = et.ApplicationEnvironments->begin(); it != et.ApplicationEnvironments->end(); ++it) { out << " " << *it << std::endl; } } out << IString("Batch system information:"); out << *et.ComputingManager; out << IString("Queue information:"); out << *et.ComputingShare; out << std::endl << *et.ExecutionEnvironment; // Benchmarks if (!et.Benchmarks->empty()) { out << IString(" Benchmark information:") << std::endl; for (std::map::const_iterator it = et.Benchmarks->begin(); it != et.Benchmarks->end(); ++it) out << " " << it->first << ": " << it->second << std::endl; } out << std::endl; return out; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/GLUE2Entity.h0000644000000000000000000000012412072552041023313 xustar000000000000000027 mtime=1357567009.530851 27 atime=1513200574.742704 30 ctime=1513200659.763744596 nordugrid-arc-5.4.2/src/hed/libs/compute/GLUE2Entity.h0000644000175000002070000000132412072552041023360 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_GLUE2ENTITY_H__ #define __ARC_GLUE2ENTITY_H__ /** \file * \brief template class for %GLUE2 entities. */ #include namespace Arc { /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ template class GLUE2Entity { public: GLUE2Entity() : Attributes(new T) {} T * operator->() { return &(*Attributes); } T const * operator->() const { return &(*Attributes); } T & operator*() { return *Attributes; } T const & operator*() const { return *Attributes; } CountedPointer Attributes; }; } // namespace Arc #endif // __ARC_GLUE2ENTITY_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobDescription.h0000644000000000000000000000012413124220447024217 xustar000000000000000027 mtime=1498489127.890866 27 atime=1513200574.750704 30 ctime=1513200659.770744682 nordugrid-arc-5.4.2/src/hed/libs/compute/JobDescription.h0000644000175000002070000010100713124220447024263 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBDESCRIPTION_H__ #define __ARC_JOBDESCRIPTION_H__ /** \file * \brief Classes related to creating JobDescription objects. */ #include #include #include #include #include #include #include namespace Arc { /** * \defgroup jobdescription JobDescription related classes * This list of classes is used to make up the structure of the JobDescription * class. * \ingroup compute */ /** * \mapdef jobdescription_attributes_mapping Job description attributes mapping * On this page the mapping of job description attributes of different * job description languages to those defined by the libarccompute library in * the \ref jobdescription "JobDescription" group is documented. **/ class JobDescriptionParserPluginLoader; class ExecutionTarget; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ template class OptIn { public: OptIn() : optIn(false) {} OptIn(const T& t) : v(t), optIn(false) {} OptIn(const T& t, bool o) : v(t), optIn(o) {} OptIn(const OptIn& ot) : v(ot.v), optIn(ot.optIn) {} OptIn& operator=(const OptIn& ot) { v = ot.v; optIn = ot.optIn; return *this; } OptIn& operator=(const T& t) { v = t; optIn = false; return *this; } operator T() const { return v; } T v; bool optIn; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ template class Range { public: Range() : min(0), max(0) {} Range(const T& t) : min(t), max(t) {} operator T(void) const { return max; } Range& operator=(const Range& t) { min = t.min; max = t.max; return *this; }; Range& operator=(const T& t) { max = t; return *this; }; T min; T max; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ template class ScalableTime { public: ScalableTime() : benchmark("", -1.) {} ScalableTime(const T& t) : range(t) {} std::pair benchmark; Range range; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ template<> class ScalableTime { public: ScalableTime() : benchmark("", -1.) {} ScalableTime(const int& t) : range(t) {} std::pair benchmark; Range range; int scaleMin(double s) const { return (int)(range.min*benchmark.second/s); } int scaleMax(double s) const { return (int)(range.max*benchmark.second/s); } }; /// Job identification /** * This class serves to provide human readable information about a job * description. Some of this information might also be passed to the * execution service for providing information about the job created from * this job description. An object of this class is part of the * JobDescription class as the Identification public member. * * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h **/ class JobIdentificationType { public: JobIdentificationType() : JobName(""), Description(""), Type("") {} /// Name of job /** * The JobName string is used to specify a name of the job description, and * it will most likely also be the name given to the job when created at the * execution service. * * \mapdefattr JobName Arc::JobIdentificationType **/ std::string JobName; /// Human readable description /** * The Description string can be used to provide a human readable * description of e.g. the task which should be performed when processing * the job description. * * \mapdefattr Description Arc::JobIdentificationType **/ std::string Description; /// Job type /** * The Type string specifies a classification of the activity in * compliance with GLUE2. The possible values should follow those defined in * the ComputingActivityType_t enumeration of GLUE2. * * \mapdefattr JobIdentificationType::Type Arc **/ std::string Type; /// Annotation /** * The Annotation list is used for human readable comments, tags for free * grouping or identifying different activities. * * \mapdefattr Annotation Arc::JobIdentificationType **/ std::list Annotation; /// ID of old activity /** * The ActivityOldID object is used to store a list of IDs corresponding to * activities which were performed from this job description. This * information is not intended to used by the execution service, but rather * used for keeping track of activities, e.g. when doing a job resubmission * the old activity ID is appended to this list. * * \mapdefattr ActivityOldID Arc::JobIdentificationType **/ std::list ActivityOldID; }; /// Executable /** * The ExecutableType class is used to specify path to an executable, * arguments to pass to it when invoked and the exit code for successful * execution. * * \note The Name string member has been renamed to Path. * * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h **/ class ExecutableType { public: ExecutableType() : Path(""), SuccessExitCode(false, 0) {} /// Path to executable /** * The Path string should specify the path to an executable. Note that some * implementations might only accept a relative path, while others might * also accept a absolute one. * * \mapdefattr ExecutableType::Path Arc **/ std::string Path; /// List of arguments to executable /** * The Argument list is used to specify arguments which should be passed to * the executable upon invocation. * * \mapdefattr ExecutableType::Argument Arc **/ std::list Argument; /// Exit code at successful execution /** * The SuccessExitCode pair is used to specify the exit code returned by the * executable in case of successful execution. For some scenarios the exit * code returned by the executable should be ignored, which is specified by * setting the first member of this object to false. If the exit code should * be used for validation at the execution service, the first member of pair * must be set to true, while the second member should be the exit code returned * at successful execution. * * \mapdefattr ExecutableType::SuccessExitCode Arc **/ std::pair SuccessExitCode; }; /// Remote logging /** * This class is used to specify a service which should be used to report * logging information to, such as job resource usage. * * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h **/ class RemoteLoggingType { public: RemoteLoggingType() : optional(false) {} /// Type of logging service /** * The ServiceType string specifies the type of logging service. Some * examples are "SGAS" (http://www.sgas.se) and "APEL" * (https://wiki.egi.eu/wiki/APEL), however please refer to the particular * execution service for a list of supported logging service types. * * \mapdefattr RemoteLoggingType::ServiceType Arc **/ std::string ServiceType; /// URL of logging service /** * The Location URL specifies the URL of the service which job logging * information should be sent to. * * \mapdefattr RemoteLoggingType::Location Arc **/ URL Location; /// Requirement satisfaction switch /** * The optional boolean specifies whether the requirement specified in the * particular object is mandatory for job execution, or whether it be * ignored. * * \mapdefattr RemoteLoggingType::optional Arc **/ bool optional; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class NotificationType { public: NotificationType() {} /** * \mapdefattr NotificationType::Email Arc **/ std::string Email; /** * \mapdefattr NotificationType::States Arc **/ std::list States; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class ApplicationType { public: ApplicationType() : Rerun(-1), ExpirationTime(-1), ProcessingStartTime(-1), Priority (-1), DryRun(false) {} ApplicationType& operator=(const ApplicationType&); /// Main executable to be run /** * The Executable object specifies the main executable which should be run * by the job created by the job description enclosing this object. Note * that in some job description languages specifying a main executable is * not essential. **/ ExecutableType Executable; /// Standard input /** * The Input string specifies the relative path to the job session directory * of the file to be used for standard input for the job. * * \mapdefattr Input Arc::ApplicationType **/ std::string Input; /// Standard output /** * The Output string specifies the relative path to the job session * directory of the file which standard output of the job should be written * to. * * \mapdefattr Output Arc::ApplicationType **/ std::string Output; /// Standard error /** * The Error string specifies the relative path to the job session directory * of the file which standard error of the job should be written to. * * \mapdefattr Error Arc::ApplicationType **/ std::string Error; /** * \mapdefattr Environment Arc::ApplicationType **/ std::list< std::pair > Environment; /// Executables to be run before the main executable /** * The PreExecutable object specifies a number of executables which should * be executed before invoking the main application, where the main * application is either the main executable (Executable) or the specified * run time environment (RunTimeEnvironment in the ResourcesType class). * * \mapdefattr PreExecutable Arc::ApplicationType **/ std::list PreExecutable; /// Executables to be run after the main executable /** * The PostExecutable object specifies a number of executables which should * be executed after invoking the main application, where the main * application is either the main executable (Executable) or the specified * run time environment (RunTimeEnvironment in the ResourcesType class). * * \mapdefattr PostExecutable Arc::ApplicationType **/ std::list PostExecutable; /// Name of logging directory /** * The LogDir string specifies the name of the logging directory at the * execution service which should be used to access log files for the job. * * \mapdefattr LogDir Arc::ApplicationType **/ std::string LogDir; /// Remote logging services /** * The RemoteLogging list specifies the services to use for logging job * information. See the RemoteLoggingType class for more details. **/ std::list RemoteLogging; /** * \mapdefattr Rerun Arc::ApplicationType **/ int Rerun; /** * \mapdefattr ExpirationTime Arc::ApplicationType **/ Time ExpirationTime; /** * \mapdefattr ProcessingStartTime Arc::ApplicationType **/ Time ProcessingStartTime; /** * \mapdefattr Priority Arc::ApplicationType **/ int Priority; std::list Notification; /** * \mapdefattr CredentialService Arc::ApplicationType **/ std::list CredentialService; /** * \mapdefattr AccessControl Arc::ApplicationType **/ XMLNode AccessControl; /** * \mapdefattr DryRun Arc::ApplicationType **/ bool DryRun; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class SlotRequirementType { public: SlotRequirementType() : NumberOfSlots(-1), SlotsPerHost(-1), ExclusiveExecution(EE_DEFAULT) {} /** * \mapdefattr NumberOfSlots Arc::SlotRequirementType **/ int NumberOfSlots; // Range? /** * \mapdefattr SlotsPerHost Arc::SlotRequirementType **/ int SlotsPerHost; // Range? /** * \mapdefattr ExclusiveExecution Arc::SlotRequirementType **/ enum ExclusiveExecutionType { EE_DEFAULT, EE_TRUE, EE_FALSE } ExclusiveExecution; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class DiskSpaceRequirementType { public: DiskSpaceRequirementType() : DiskSpace(-1), CacheDiskSpace(-1), SessionDiskSpace(-1) {} /** Specifies the required size of disk space which must be available to * the job in mega-bytes (MB). A negative value undefines this attribute * * \mapdefattr DiskSpace Arc::DiskSpaceRequirementType **/ Range DiskSpace; /** Specifies the required size of cache which must be available * to the job in mega-bytes (MB). A negative value undefines this * attribute * * \mapdefattr CacheDiskSpace Arc::DiskSpaceRequirementType */ int CacheDiskSpace; /** Specifies the required size of job session disk space which must be * available to the job in mega-byte (MB). A negative value undefines * this attribute. * * \mapdefattr SessionDiskSpace Arc::DiskSpaceRequirementType */ int SessionDiskSpace; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ enum SessionDirectoryAccessMode { SDAM_NONE = 0, SDAM_RO = 1, SDAM_RW = 2 }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ enum NodeAccessType { NAT_NONE = 0, NAT_INBOUND = 1, NAT_OUTBOUND = 2, NAT_INOUTBOUND = 3 }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class ParallelEnvironmentType { public: ParallelEnvironmentType() : ProcessesPerSlot(-1), ThreadsPerProcess(-1) {} /** * \mapdefattr ParallelEnvironmentType::Type Arc **/ std::string Type; /** * \mapdefattr ParallelEnvironmentType::Version Arc **/ std::string Version; /** * \mapdefattr ParallelEnvironmentType::ProcessesPerSlot Arc **/ int ProcessesPerSlot; // Range? /** * \mapdefattr ParallelEnvironmentType::ThreadsPerProcess Arc **/ int ThreadsPerProcess; // Range? /** * \mapdefattr ParallelEnvironmentType::Options Arc **/ std::multimap Options; }; /// Resource requirements structure /** * * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class ResourcesType { public: ResourcesType() : IndividualPhysicalMemory(-1), IndividualVirtualMemory(-1), SessionLifeTime(-1), SessionDirectoryAccess(SDAM_NONE), IndividualCPUTime(-1), TotalCPUTime(-1), IndividualWallTime(-1), TotalWallTime(IndividualWallTime), NodeAccess(NAT_NONE) {} ResourcesType& operator=(const ResourcesType&); /// Specifies operating system which apllication should be executed at /** * \mapdefattr OperatingSystem Arc::ResourcesType **/ SoftwareRequirement OperatingSystem; /** * \mapdefattr Platform Arc::ResourcesType **/ std::string Platform; /** * \mapdefattr NetworkInfo Arc::ResourcesType **/ std::string NetworkInfo; /// Specifies amount of requested RAM in MB /** * \mapdefattr IndividualPhysicalMemory Arc::ResourcesType **/ Range IndividualPhysicalMemory; /// Specifies amount of requested virtual memory in MB /** * \mapdefattr IndividualVirtualMemory Arc::ResourcesType **/ Range IndividualVirtualMemory; DiskSpaceRequirementType DiskSpaceRequirement; /** * \mapdefattr SessionLifeTime Arc::ResourcesType **/ Period SessionLifeTime; /** * \mapdefattr SessionDirectoryAccess Arc::ResourcesType **/ SessionDirectoryAccessMode SessionDirectoryAccess; /** * \mapdefattr IndividualCPUTime Arc::ResourcesType **/ ScalableTime IndividualCPUTime; /** * \mapdefattr TotalCPUTime Arc::ResourcesType **/ ScalableTime TotalCPUTime; /** * \mapdefattr IndividualWallTime Arc::ResourcesType **/ ScalableTime IndividualWallTime; ScalableTime& TotalWallTime; /** * \mapdefattr NodeAccess Arc::ResourcesType **/ NodeAccessType NodeAccess; /// CE Middleware /** * Specifies the middleware which the CE should use. * * \mapdefattr CEType Arc::ResourcesType **/ SoftwareRequirement CEType; SlotRequirementType SlotRequirement; ParallelEnvironmentType ParallelEnvironment; /** * \mapdefattr Coprocessor Arc::ResourcesType **/ OptIn Coprocessor; /// Name of queue to use /** * \mapdefattr QueueName Arc::ResourcesType **/ std::string QueueName; /// Runtime environment /** * Specifies which runtime environments should be available to the * application. * * \mapdefattr RunTimeEnvironment Arc::ResourcesType **/ SoftwareRequirement RunTimeEnvironment; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class SourceType: public URL { public: SourceType() {}; SourceType(const URL& u):URL(u) {}; SourceType(const std::string& s):URL(s) {}; SourceType& operator=(const URL& u) { URL::operator=(u); return *this; }; SourceType& operator=(const std::string& s) { URL::operator=(s); return *this; }; std::string DelegationID; }; /// Represent an output file destination /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class TargetType: public URL { public: /// Default constructor /** * Creation flag is set to CFE_DEFAULT, UserIfFailure is set to false, * UserIfCancel is set to false, UserIfSuccess is set to true and * DelegationID is empty. Default URL constructor is used. **/ TargetType() : CreationFlag(CFE_DEFAULT), UseIfFailure(false), UseIfCancel(false), UseIfSuccess(true) {}; /// Constructor destination from URL /** * Uses same defaults as TargetType(). Passes argument to URL constructor. **/ TargetType(const URL& u) : URL(u), CreationFlag(CFE_DEFAULT), UseIfFailure(false), UseIfCancel(false), UseIfSuccess(true) {}; /// Constructor destination from string /** * Uses same defaults as TargetType(). Passes argument to URL constructor. **/ TargetType(const std::string& s) : URL(s), CreationFlag(CFE_DEFAULT), UseIfFailure(false), UseIfCancel(false), UseIfSuccess(true) {}; /// Delegation ID to use /** * Specifies the delegation ID to use when accessing this destination. **/ std::string DelegationID; enum CreationFlagEnumeration { CFE_DEFAULT, /**< Default action should be used (default w.r.t. the service).*/ CFE_OVERWRITE, /**< Overwrite an existing file. */ CFE_APPEND, /**< Append file to an possible existing file. */ CFE_DONTOVERWRITE /**< Don't overwrite an existing file. */ }; /// Output file creation flag /** * Specifies what action should be taken when creating output file at this * destination. **/ CreationFlagEnumeration CreationFlag; /// Action in case job failed /** * Specifies whether this destination should used in case job failed * (JobState::FAILED). **/ bool UseIfFailure; /// Action in case job was cancelled /** * Specifies whether this destination should be used in case job was * cancelled (JobState::KILLED). **/ bool UseIfCancel; /// Action in case job succeeded /** * Specifies whether this destination should be used in case job succeeded * (JobState::FINISHED). **/ bool UseIfSuccess; }; /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class InputFileType { public: InputFileType() : Name(""), IsExecutable(false), FileSize(-1) {}; std::string Name; /** * Testing * * \mapdefattr InputFileType::IsExecutable Arc **/ bool IsExecutable; long FileSize; /// CRC32 checksum of file /** * The Checksum attribute specifies the textural representation of CRC32 * checksum of file in base 10. **/ std::string Checksum; std::list Sources; }; /// An output file /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class OutputFileType { public: OutputFileType() : Name("") {}; /// Name of output file std::string Name; /// List of destinations for which the output file should be copied std::list Targets; }; /// Simple structure for in- and output files. /** * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class DataStagingType { public: DataStagingType() {}; /// List of inputfiles std::list InputFiles; /// List of outputfiles std::list OutputFiles; /// Delegation for all data staging operations std::string DelegationID; }; /// Job description parsing or assembly result /** * This structure holds boolean value and a string. The boolean indicates * whether parsing or assembling a job description was succesful, while the * string gives an explanation in human readable text. * * \ingroup compute * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class JobDescriptionResult { public: JobDescriptionResult(bool r):res(r) { }; JobDescriptionResult(bool r, const std::string& s):res(r),desc(s) { }; /// Get result as bool operator bool(void) { return res; }; /// Get negated result bool operator!(void) { return !res; }; /// Get explanation of result as string const std::string& str(void) { return desc; }; private: bool res; std::string desc; }; /** * The JobDescription class is the internal representation of a job description * in the ARC-lib. It is structured into a number of other classes/objects which * should strictly follow the description given in the job description document * . * * The class consist of a parsing method JobDescription::Parse which tries to * parse the passed source using a number of different parsers. The parser * method is complemented by the JobDescription::UnParse method, a method to * generate a job description document in one of the supported formats. * Additionally the internal representation is contained in public members which * makes it directly accessible and modifiable from outside the scope of the * class. * * \ingroup compute * \ingroup jobdescription * \headerfile JobDescription.h arc/compute/JobDescription.h */ class JobDescription { public: friend class JobDescriptionParserPlugin; JobDescription() : alternatives(), current(alternatives.begin()) {}; JobDescription(const JobDescription& j, bool withAlternatives = true); // Language wrapper constructor JobDescription(const long int& ptraddr); ~JobDescription() {} /// Testing JobDescription& operator=(const JobDescription& j); /// Add alternative job description /** * \param[in] j JobDescription object to add as an alternative job * description to this. **/ void AddAlternative(const JobDescription& j); /// Has alternative job descriptions /** * \return true if this job description has any alternative job descriptions **/ bool HasAlternatives() const { return !alternatives.empty(); } /// Get list of alternative job descriptions /** * \return A const reference to the list of alternative job descriptions is * returned. **/ const std::list& GetAlternatives() const { return alternatives; } /// Get list of alternative job descriptions (modifiable) /** * \return A reference to the list of alternative job descriptions is * returned. **/ std::list& GetAlternatives() { return alternatives; } /// Get a copy of the list of alternative job descriptions /** * \return A copy of the list of alternative job descriptions is returned. **/ std::list GetAlternativesCopy() const { return alternatives; } /// Fill next alternative into this object /** * The next alternative job description is filled into this object. * * \return If there is a next alternative then it is filled into this object * and true is returned. If there is no next false is returned. **/ bool UseAlternative(); void UseOriginal(); void RemoveAlternatives(); /// Parse string into JobDescription objects /** * The passed string will be tried parsed into the list of JobDescription * objects. The available specialized JobDesciptionParser classes will be * tried one by one, parsing the string, and if one succeeds the list of * JobDescription objects is filled with the parsed contents and true is * returned, otherwise false is returned. If no language specified, each * JobDescriptionParserPlugin will try all its supported languages. On the other * hand if a language is specified, only the JobDescriptionParserPlugin supporting * that language will be tried. A dialect can also be specified, which only * has an effect on the parsing if the JobDescriptionParserPlugin supports that * dialect. * * @param source * @param jobdescs * @param language * @param dialect * @return true if the passed string can be parsed successfully by any of * the available parsers. **/ static JobDescriptionResult Parse(const std::string& source, std::list& jobdescs, const std::string& language = "", const std::string& dialect = ""); static JobDescriptionResult ParseFromFile(const std::string& filename, std::list& jobdescs, const std::string& language = "", const std::string& dialect = ""); /// Output contents in the specified language /** * * @param product * @param language * @param dialect * @return **/ JobDescriptionResult UnParse(std::string& product, std::string language, const std::string& dialect = "") const; /// Get input source language /** * If this object was created by a JobDescriptionParserPlugin, then this method * returns a string which indicates the job description language of the * parsed source. If not created by a JobDescripionParser the string * returned is empty. * * @return const std::string& source langauge of parsed input source. **/ const std::string& GetSourceLanguage() const { return sourceLanguage; } /// Print job description to a std::ostream object. /** * The job description will be written to the passed std::ostream object * out in the format indicated by the format parameter. The format parameter * should specify the format of one of the job description languages * supported by the library. Or by specifying the special "user" or * "userlong" format the job description will be written as a * attribute/value pair list with respectively less or more attributes. * * The mote * * @return true if writing the job description to the out object succeeds, * otherwise false. * @param out a std::ostream reference specifying the ostream to write the * job description to. * @param format specifies the format the job description should written in. */ JobDescriptionResult SaveToStream(std::ostream& out, const std::string& format) const; /// Prepare for submission to target /** * The Prepare method, is used to check and adapt the JobDescription object * to the passed ExecutionTarget object before submitting the job * description to the target. This method is normally called by SubmitterPlugin * plugin classes, before submitting the job description. * First the method checks the DataStaging.InputFiles list, for identical * file names, and non-existent local input files. If any of such files are * found, the method returns false. Then if the Application.Executable and * Application.Input objects are specified as local input files, and they * are not among the files in the DataStaging.InputFiles list a existence * check will be done and if not found, false will be returned, otherwise * they will be added to the list. Likewise if the Application.Output, * Application.Error and Application.LogDir attributes have been specified, * and is not among the files in the DataStaging.OutputFiles list, they will * be added to this list. * After the file check, the Resources.RunTimeEnvironment, Resources.CEType * and Resources.OperatingSystem SoftwareRequirement objects are * respectively resolved against the * ExecutionTarget::ApplicationEnvironments, ExecutionTarget::Implementation * and ExecutionTarget::OperatingSystem Software objects using the * SoftwareRequirement::selectSoftware method. If that method returns false * i.e. unable to resolve the requirements false will be returned. * After resolving software requirements, the value of the * Resources.QueueName attribute will be set to that of the * ExecutionTarget::ComputingShareName attribute, and then true is returned. * * @param et ExecutionTarget object which to resolve software requirements * against, and to pick up queue name from. * @return false is returned is file checks fails, or if unable to resolve * software requirements. */ bool Prepare(const ExecutionTarget& et) { return Prepare(&et); } bool Prepare() { return Prepare(NULL); } static bool GetTestJob(int testid, JobDescription& jobdescription); /// Structure for identification /** * This member object stores information which can be used to identify the * job description. **/ JobIdentificationType Identification; /// Structure for apllication options /** * All options relating to the application is stored in this structure. **/ ApplicationType Application; /// Structure for resource requirements /** * This structure specifies requirements which should be satisfied before * application can be started. **/ ResourcesType Resources; /// Structure for data staging /** * Input files requirements, and destinations for output files can be * specified using this structure. **/ DataStagingType DataStaging; /// Holds attributes not fitting into this class /** * This member is used by JobDescriptionParserPlugin classes to store * attribute/value pairs not fitting into attributes stored in this class. * The form of the attribute (the key in the map) should be as follows: \verbatim ; \endverbatim * E.g.: "nordugrid:xrsl;hostname". * * \mapdefattr OtherAttributes Arc::JobDescription **/ std::map OtherAttributes; private: bool Prepare(const ExecutionTarget* et); void Set(const JobDescription& j); std::string sourceLanguage; std::list alternatives; std::list::iterator current; static Glib::Mutex jdpl_lock; static JobDescriptionParserPluginLoader *jdpl; static Logger logger; }; } // namespace Arc #endif // __ARC_JOBDESCRIPTION_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/test-job-10000644000000000000000000000012413111110573022732 xustar000000000000000027 mtime=1495568763.299499 27 atime=1513200574.669703 30 ctime=1513200659.815745232 nordugrid-arc-5.4.2/src/hed/libs/compute/test-job-10000644000175000002070000000064613111110573023005 0ustar00mockbuildmock00000000000000&(executable=run.sh) (arguments=%d) (inputfiles= ('run.sh' 'http://www.nordugrid.org;cache=no/data/run.sh') ('Makefile' 'http://download.nordugrid.org;cache=no/applications/test/Makefile') ('prime.cpp' 'http://download.nordugrid.org;cache=no/applications/test/prime.cpp') ) (stderr=primenumbers) (outputfiles= ('primenumbers' '') ) (jobname='arctest1') (stdout=stdout) (gmlog=gmlog) (CPUTime=%d) nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/EntityRetriever.cpp0000644000000000000000000000012312574532370025010 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200574.654703 29 ctime=1513200659.80574511 nordugrid-arc-5.4.2/src/hed/libs/compute/EntityRetriever.cpp0000644000175000002070000005457512574532370025076 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "EntityRetriever.h" namespace Arc { template<> Logger EntityRetriever::logger(Logger::getRootLogger(), "ServiceEndpointRetriever"); template<> Logger EntityRetriever::logger(Logger::getRootLogger(), "TargetInformationRetriever"); template<> Logger EntityRetriever::logger(Logger::getRootLogger(), "JobListRetriever"); template EntityRetriever::EntityRetriever(const UserConfig& uc, const EndpointQueryOptions& options) : common(new Common(this, uc)), result(), statuses(&Endpoint::ServiceIDCompare), uc(uc), options(options), need_all_results(false) { // Used for holding names of all available plugins. std::list availablePlugins = common->getListOfPlugins(); // Map supported interfaces to available plugins. for (std::list::iterator itT = availablePlugins.begin(); itT != availablePlugins.end();) { EntityRetrieverPlugin* p = common->load(*itT); if (!p) { itT = availablePlugins.erase(itT); continue; } const std::list& interfaceNames = p->SupportedInterfaces(); if (interfaceNames.empty()) { // This plugin does not support any interfaces, skip it logger.msg(DEBUG, "The plugin %s does not support any interfaces, skipping it.", *itT); itT = availablePlugins.erase(itT); continue; } else if (interfaceNames.front().empty()) { logger.msg(DEBUG, "The first supported interface of the plugin %s is an empty string, skipping the plugin.", *itT); itT = availablePlugins.erase(itT); continue; } for (std::list::const_iterator itI = p->SupportedInterfaces().begin(); itI != p->SupportedInterfaces().end(); ++itI) { // If two plugins supports two identical interfaces, then only the last will appear in the map. interfacePluginMap[*itI] = *itT; } ++itT; } common->setAvailablePlugins(availablePlugins); } template void EntityRetriever::removeConsumer(const EntityConsumer& consumer) { consumerLock.lock(); typename std::list< EntityConsumer* >::iterator it = std::find(consumers.begin(), consumers.end(), &consumer); if (it != consumers.end()) { consumers.erase(it); } consumerLock.unlock(); } template void EntityRetriever::addEndpoint(const Endpoint& endpoint) { std::map::const_iterator itPluginName = interfacePluginMap.end(); if (!endpoint.InterfaceName.empty()) { itPluginName = interfacePluginMap.find(endpoint.InterfaceName); if (itPluginName == interfacePluginMap.end()) { //logger.msg(DEBUG, "Unable to find TargetInformationRetrieverPlugin plugin to query interface \"%s\" on \"%s\"", endpoint.InterfaceName, endpoint.URLString); setStatusOfEndpoint(endpoint, EndpointQueryingStatus::NOPLUGIN); return; } } /* Check if endpoint belongs to service which is being or has been queried. * If that is the case, then only start querying this endpoint if it is * a preferred one, and the other endpoint which is querying or has queried * the service is NOT a preferred one. */ // Compare by Endpoint::ServiceID if it is set. if (!endpoint.ServiceID.empty()) { Endpoint endpointServiceID; endpointServiceID.ServiceID = endpoint.ServiceID; bool isPreferredInterface = options.getPreferredInterfaceNames().count(endpoint.InterfaceName); logger.msg(DEBUG, "Interface on endpoint (%s) %s.", endpoint.str(), isPreferredInterface ? "IS preferred" : "is NOT preferred"); statusLock.lock(); // First check if endpoint is already registered. if (statuses.find(endpoint) != statuses.end()) { // Ignore endpoint, it has already been registered logger.msg(DEBUG, "Ignoring endpoint (%s), it is already registered in retriever.", endpoint.str()); statusLock.unlock(); return; } std::pair servicesIT = Endpoint::getServiceEndpoints(endpointServiceID, statuses); for (EndpointStatusMap::const_iterator it = servicesIT.first; it != servicesIT.second; ++it) { logger.msg(DEBUG, "Service Loop: Endpoint %s", it->first.str()); if (it->second == EndpointQueryingStatus::STARTED || it->second == EndpointQueryingStatus::SUCCESSFUL) { logger.msg(DEBUG, " This endpoint (%s) is STARTED or SUCCESSFUL", it->first.str()); if (!isPreferredInterface || options.getPreferredInterfaceNames().count(it->first.InterfaceName)) { // This interface is not a preferred one, so put this interface on SUSPENDED_NOTREQUIRED. logger.msg(DEBUG, "Suspending querying of endpoint (%s) since the service at the endpoint is already being queried, or has been queried.", endpoint.str()); statuses[endpoint] = EndpointQueryingStatus::SUSPENDED_NOTREQUIRED; statusLock.unlock(); return; } } else { logger.msg(DEBUG, " Status of endpoint (%s) is %s", it->first.str(), it->second.str()); } } logger.msg(DEBUG, "Setting status (STARTED) for endpoint: %s", endpoint.str()); statuses[endpoint] = EndpointQueryingStatus::STARTED; statusLock.unlock(); } // Set the status of the endpoint to STARTED only if it was not registered already (overwrite = false) else if (!setStatusOfEndpoint(endpoint, EndpointQueryingStatus::STARTED, false)) { // Not able to set the status (because the endpoint was already registered) logger.msg(DEBUG, "Ignoring endpoint (%s), it is already registered in retriever.", endpoint.str()); return; } // common will be copied into the thread arg, // which means that all threads will have a new // instance of the ThreadedPointer pointing to the same object ThreadArg *arg = new ThreadArg(common, result, endpoint, options); if (itPluginName != interfacePluginMap.end()) { arg->pluginName = itPluginName->second; } logger.msg(DEBUG, "Starting thread to query the endpoint on %s", arg->endpoint.str()); if (!CreateThreadFunction(&queryEndpoint, arg)) { logger.msg(ERROR, "Failed to start querying the endpoint on %s", arg->endpoint.str() + " (unable to create thread)"); setStatusOfEndpoint(endpoint, EndpointQueryingStatus::FAILED); delete arg; } } template void EntityRetriever::addEntity(const T& entity) { consumerLock.lock(); for (typename std::list< EntityConsumer* >::iterator it = consumers.begin(); it != consumers.end(); it++) { (*it)->addEntity(entity); } consumerLock.unlock(); } template<> void EntityRetriever::addEntity(const Endpoint& endpoint) { // Check if the service is among the rejected ones const std::list& rejectedServices = options.getRejectedServices(); URL url(endpoint.URLString); for (std::list::const_iterator it = rejectedServices.begin(); it != rejectedServices.end(); it++) { if (url.StringMatches(*it)) { return; } } if (options.recursiveEnabled() && endpoint.HasCapability(Endpoint::REGISTRY)) { Endpoint registry(endpoint); logger.msg(DEBUG, "Found a registry, will query it recursively: %s", registry.str()); EntityRetriever::addEndpoint(registry); } bool match = false; for (std::list::const_iterator it = options.getCapabilityFilter().begin(); it != options.getCapabilityFilter().end(); it++) { if (std::find(endpoint.Capability.begin(), endpoint.Capability.end(), *it) != endpoint.Capability.end()) { match = true; break; } } if (options.getCapabilityFilter().empty() || match) { consumerLock.lock(); for (std::list*>::iterator it = consumers.begin(); it != consumers.end(); it++) { (*it)->addEntity(endpoint); } consumerLock.unlock(); } } template EndpointQueryingStatus EntityRetriever::getStatusOfEndpoint(const Endpoint& endpoint) const { statusLock.lock(); EndpointQueryingStatus status(EndpointQueryingStatus::UNKNOWN); typename EndpointStatusMap::const_iterator it = statuses.find(endpoint); if (it != statuses.end()) { status = it->second; } statusLock.unlock(); return status; } template bool EntityRetriever::setStatusOfEndpoint(const Endpoint& endpoint, const EndpointQueryingStatus& status, bool overwrite) { statusLock.lock(); bool wasSet = false; if (overwrite || (statuses.find(endpoint) == statuses.end())) { logger.msg(DEBUG, "Setting status (%s) for endpoint: %s", status.str(), endpoint.str()); statuses[endpoint] = status; wasSet = true; } statusLock.unlock(); return wasSet; }; template void EntityRetriever::getServicesWithStatus(const EndpointQueryingStatus& status, std::set& result) { statusLock.lock(); for (EndpointStatusMap::const_iterator it = statuses.begin(); it != statuses.end(); ++it) { if (it->second == status) result.insert(it->first.getServiceName()); } statusLock.unlock(); } template void EntityRetriever::checkSuspendedAndStart(const Endpoint& e) { logger.msg(DEBUG, "Checking for suspended endpoints which should be started."); Endpoint const * suspended = NULL, *startedOrDone = NULL; statusLock.lock(); std::pair endpointsIT = Endpoint::getServiceEndpoints(e, statuses); for (EndpointStatusMap::const_iterator it = endpointsIT.first; it != endpointsIT.second; ++it) { logger.msg(DEBUG, " Status of endpoint (%s) is %s", it->first.str(), it->second.str()); switch (it->second.getStatus()) { case EndpointQueryingStatus::STARTED: case EndpointQueryingStatus::SUCCESSFUL: logger.msg(DEBUG, "Found started or successful endpoint (%s)", it->first.str()); if (options.getPreferredInterfaceNames().count(it->first.InterfaceName)) { // Preferred interface is running or done. Dont start a suspended one. statusLock.unlock(); return; } if (startedOrDone == NULL) { startedOrDone = &it->first; } break; case EndpointQueryingStatus::SUSPENDED_NOTREQUIRED: if (suspended == NULL || options.getPreferredInterfaceNames().count(it->first.InterfaceName)) { logger.msg(DEBUG, "Found suspended endpoint (%s)", it->first.str()); suspended = &it->first; } break; default: break; } } statusLock.unlock(); if (suspended != NULL && (startedOrDone == NULL || options.getPreferredInterfaceNames().count(suspended->InterfaceName))) { logger.msg(DEBUG, "Trying to start suspended endpoint (%s)", suspended->str()); std::map::const_iterator itPluginName = interfacePluginMap.end(); if (!suspended->InterfaceName.empty()) { itPluginName = interfacePluginMap.find(suspended->InterfaceName); if (itPluginName == interfacePluginMap.end()) { //logger.msg(DEBUG, "Unable to find TargetInformationRetrieverPlugin plugin to query interface \"%s\" on \"%s\"", suspended->InterfaceName, suspended->URLString); setStatusOfEndpoint(*suspended, EndpointQueryingStatus::NOPLUGIN); // Starting this suspended endpoint failed, check if there are another one and start that one. checkSuspendedAndStart(*suspended); return; } } // common will be copied into the thread arg, // which means that all threads will have a new // instance of the ThreadedPointer pointing to the same object ThreadArg *arg = new ThreadArg(common, result, *suspended, options); if (itPluginName != interfacePluginMap.end()) { arg->pluginName = itPluginName->second; } logger.msg(DEBUG, "Starting querying of suspended endpoint (%s) - no other endpoints for this service is being queried or has been queried successfully.", suspended->str()); statusLock.lock(); statuses[*suspended] = EndpointQueryingStatus::STARTED; statusLock.unlock(); if (!CreateThreadFunction(&queryEndpoint, arg)) { logger.msg(ERROR, "Failed to start querying the endpoint on %s", arg->endpoint.str() + " (unable to create thread)"); setStatusOfEndpoint(*suspended, EndpointQueryingStatus::FAILED); delete arg; checkSuspendedAndStart(*suspended); } } } /* Overview of how the queryEndpoint algorithm works * The queryEndpoint method is meant to be run in a separate thread, spawned * by the addEndpoint method. Furthermore it is designed to call it self, when * the Endpoint has no interface specified, in order to check all supported * interfaces. Since the method is static, common data is reached through a * Common class object, wrapped and protected by the ThreadedPointer template * class. * * Taking as starting point when the addEndpoint method calls the * queryEndpoint method these are the steps the queryEndpoint method goes * through: * 1. Checks whether the Endpoint has already been queried, or is in the * process of being queried. If that is the case, it * stops. Otherwise branch a is followed if the Endpoint has a interface * specified, if not then branch b is followed. * 2a. The plugin corresponding to the chosen interface is loaded and its * Query method is called passing a container used to store the result of * query. This call blocks. * 3a. When the Query call finishes the container is iterated, and each item * in the container is passed on to the EntityRetriever::addEntity method, * then status returned from the Query method is registered with the * EntityRetriever object through the setStatusOfEndpoint method, and if * status is successful the common Result object is set to be successful. * Then the method returns. * 2b. The list of available plugins is looped, and plugins are loaded. If * they fail loading the loop continues. Then it is determined whether the * specific plugin supports a preferred interface as specified in the * EndpointQueryOptions object, or not. In the end of the loop a new * thread is spawned calling this method itself, but with an Endpoint with * specified interface, thus in that call going through branch a. * 3b. After the loop, the wait method is called on the preferred Result * object, thus waiting for the querying of the preferred interfaces to * succeed. If any of these succeeded successfully then the status of the * Endpoint object with unspecified interface in status-map is set * accordingly. If not then the wait method on the other Result object is * called, and a similiar check and set is one. Then the method returns. */ template void EntityRetriever::queryEndpoint(void *arg) { AutoPointer a((ThreadArg*)arg); ThreadedPointer& common = a->common; if(!common->lockSharedIfValid()) return; bool need_all = (*common)->need_all_results; common->unlockShared(); // If the thread was able to set the status, then this is the first (and only) thread querying this endpoint if (!a->pluginName.empty()) { // If the plugin was already selected EntityRetrieverPlugin* plugin = common->load(a->pluginName); if (!plugin) { if(!common->lockSharedIfValid()) return; (*common)->setStatusOfEndpoint(a->endpoint, EndpointQueryingStatus::FAILED); common->unlockShared(); return; } logger.msg(DEBUG, "Calling plugin %s to query endpoint on %s", a->pluginName, a->endpoint.str()); // Do the actual querying against service. std::list entities; EndpointQueryingStatus status = plugin->Query(*common, a->endpoint, entities, a->options); // Add obtained results from querying to registered consumers (addEntity method) for (typename std::list::const_iterator it = entities.begin(); it != entities.end(); ++it) { if(!common->lockSharedIfValid()) return; (*common)->addEntity(*it); common->unlockShared(); } if(!common->lockSharedIfValid()) return; (*common)->setStatusOfEndpoint(a->endpoint, status); if (!status) { // Note: The checkSuspendedAndStart operation might take lot of time, consequently making the common object inaccessible in this time. (*common)->checkSuspendedAndStart(a->endpoint); } common->unlockShared(); if (status) a->result.setSuccess(); // Successful query } else { // If there was no plugin selected for this endpoint, this will try all possibility logger.msg(DEBUG, "The interface of this endpoint (%s) is unspecified, will try all possible plugins", a->endpoint.str()); // A list for collecting the new endpoints which will be created by copying the original one // and setting the InterfaceName for each possible plugins std::list preferredEndpoints; std::list otherEndpoints; const std::set& preferredInterfaceNames = a->options.getPreferredInterfaceNames(); // A new result object is created for the sub-threads, "true" means we only want to wait for the first successful query Result preferredResult(!need_all); Result otherResult(!need_all); for (std::list::const_iterator it = common->getAvailablePlugins().begin(); it != common->getAvailablePlugins().end(); ++it) { EntityRetrieverPlugin* plugin = common->load(*it); if (!plugin) { // Should not happen since all available plugins was already loaded in the constructor. // Problem loading the plugin, skip it logger.msg(DEBUG, "Problem loading plugin %s, skipping it.", *it); continue; } if (plugin->isEndpointNotSupported(a->endpoint)) { logger.msg(DEBUG, "The endpoint (%s) is not supported by this plugin (%s)", a->endpoint.URLString, *it); continue; } // Create a new endpoint with the same endpoint and a specified interface Endpoint endpoint = a->endpoint; ThreadArg* newArg = NULL; // Set interface std::list::const_iterator itSI = plugin->SupportedInterfaces().begin(); for (; itSI != plugin->SupportedInterfaces().end(); ++itSI) { if (preferredInterfaceNames.count(*itSI)) { endpoint.InterfaceName = *itSI; // TODO: *itSI must not be empty. preferredEndpoints.push_back(endpoint); newArg = new ThreadArg(*a, preferredResult); break; } } if (itSI == plugin->SupportedInterfaces().end()) { // We will use the first interfaceName this plugin supports endpoint.InterfaceName = plugin->SupportedInterfaces().front(); logger.msg(DEBUG, "New endpoint is created (%s) from the one with the unspecified interface (%s)", endpoint.str(), a->endpoint.str()); otherEndpoints.push_back(endpoint); newArg = new ThreadArg(*a, otherResult); } // Set the status of the endpoint to STARTED only if it was not registered already (overwrite = false) if(!common->lockSharedIfValid()) return; bool set = (*common)->setStatusOfEndpoint(endpoint, EndpointQueryingStatus::STARTED); common->unlockShared(); if (!set) { // Not able to set the status (because the endpoint was already registered) logger.msg(DEBUG, "Ignoring endpoint (%s), it is already registered in retriever.", endpoint.str()); return; } // Make new argument by copying old one with result report object replaced newArg->endpoint = endpoint; newArg->pluginName = *it; logger.msg(DEBUG, "Starting sub-thread to query the endpoint on %s", endpoint.str()); if (!CreateThreadFunction(&queryEndpoint, newArg)) { logger.msg(ERROR, "Failed to start querying the endpoint on %s (unable to create sub-thread)", endpoint.str()); delete newArg; if(!common->lockSharedIfValid()) return; (*common)->setStatusOfEndpoint(endpoint, EndpointQueryingStatus::FAILED); common->unlockShared(); continue; } } /* We wait for the preferred result object. The wait returns in two cases: * 1. one sub-thread was succesful * 2. all the sub-threads failed * Now check which case happens. */ preferredResult.wait(); if(!common->lockSharedIfValid()) return; EndpointQueryingStatus status; for (typename std::list::const_iterator it = preferredEndpoints.begin(); it != preferredEndpoints.end(); it++) { status = (*common)->getStatusOfEndpoint(*it); if (status) { break; } } // Set the status of the original endpoint (the one without the specified interface) if (!status) { // Wait for the other threads, maybe they were successful otherResult.wait(); typename std::list::const_iterator it = otherEndpoints.begin(); for (; it != otherEndpoints.end(); ++it) { status = (*common)->getStatusOfEndpoint(*it); if (status) { break; } } if (it == otherEndpoints.end()) { /* TODO: In case of failure of all plugins, a clever and * helpful message should be set. Maybe an algorithm for * picking the most suitable failure message among the used * plugins. */ status = EndpointQueryingStatus::FAILED; } } (*common)->setStatusOfEndpoint(a->endpoint, status); if (!status) { // Note: The checkSuspendedAndStart operation might take lot of time, consequently making the common object inaccessible in this time. (*common)->checkSuspendedAndStart(a->endpoint); } common->unlockShared(); } } template class EntityRetriever; template class EntityRetriever; template class EntityRetriever; } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Submitter.cpp0000644000000000000000000000012412176753047023627 xustar000000000000000027 mtime=1375458855.018764 27 atime=1513200574.688704 30 ctime=1513200659.792744951 nordugrid-arc-5.4.2/src/hed/libs/compute/Submitter.cpp0000644000175000002070000003565612176753047023713 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include namespace Arc { SubmitterPluginLoader& Submitter::getLoader() { // For C++ it would be enough to have // static SubmitterPluginLoader loader; // But Java sometimes does not destroy objects causing // PluginsFactory destructor loop forever waiting for // plugins to exit. static SubmitterPluginLoader* loader = NULL; if(!loader) { loader = new SubmitterPluginLoader(); } return *loader; } Logger Submitter::logger(Logger::getRootLogger(), "Submitter"); std::string EndpointSubmissionStatus::str(EndpointSubmissionStatusType s) { if (s == UNKNOWN) return "UNKNOWN"; else if (s == NOPLUGIN) return "NOPLUGIN"; else if (s == SUCCESSFUL) return "SUCCESSFUL"; else return ""; // There should be no other alternative! } void Submitter::removeConsumer(EntityConsumer& jc) { std::list*>::iterator it = std::find(consumers.begin(), consumers.end(), &jc); if (it != consumers.end()) { consumers.erase(it); } } class JobConsumerSingle : public EntityConsumer { public: JobConsumerSingle(Job& j) : _j(j) {} void addEntity(const Job& j) { _j = j; } private: Job& _j; }; class JobConsumerList : public EntityConsumer { public: JobConsumerList(std::list& joblist) : joblist(joblist) {} void addEntity(const Job& j) { joblist.push_back(j); } private: std::list& joblist; }; SubmissionStatus Submitter::Submit(const Endpoint& endpoint, const JobDescription& desc, Job& job) { JobConsumerSingle jcs(job); addConsumer(jcs); SubmissionStatus ok = Submit(endpoint, std::list(1, desc)); removeConsumer(jcs); return ok; } SubmissionStatus Submitter::Submit(const Endpoint& endpoint, const std::list& descs, std::list& jobs) { JobConsumerList jcl(jobs); addConsumer(jcl); SubmissionStatus ok = Submit(endpoint, descs); removeConsumer(jcl); return ok; } SubmissionStatus Submitter::Submit(const Endpoint& endpoint, const std::list& descs) { ClearAll(); return SubmitNoClear(endpoint, descs); } SubmissionStatus Submitter::SubmitNoClear(const Endpoint& endpoint, const std::list& descs) { ConsumerWrapper cw(*this); logger.msg(DEBUG, "Trying to submit directly to endpoint (%s)", endpoint.URLString); SubmissionStatus retval; if (!endpoint.InterfaceName.empty()) { logger.msg(DEBUG, "Interface (%s) specified, submitting only to that interface", endpoint.InterfaceName); SubmitterPlugin *sp = getLoader().loadByInterfaceName(endpoint.InterfaceName, uc); if (sp == NULL) { submissionStatusMap[endpoint] = EndpointSubmissionStatus::NOPLUGIN; retval |= SubmissionStatus::SUBMITTER_PLUGIN_NOT_LOADED; retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; for (std::list::const_iterator itJ = descs.begin(); itJ != descs.end(); ++itJ) { notsubmitted.push_back(&*itJ); } return retval; } return sp->Submit(descs, endpoint.URLString, cw, notsubmitted); } logger.msg(DEBUG, "Trying all available interfaces", endpoint.URLString); // InterfaceName is empty -> Try all interfaces. getLoader().initialiseInterfacePluginMap(uc); const std::map& interfacePluginMap = getLoader().getInterfacePluginMap(); for (std::map::const_iterator it = interfacePluginMap.begin(); it != interfacePluginMap.end(); ++it) { logger.msg(DEBUG, "Trying to submit endpoint (%s) using interface (%s) with plugin (%s).", endpoint.URLString, it->first, it->second); SubmitterPlugin *sp = getLoader().load(it->second, uc); if (sp == NULL) { logger.msg(DEBUG, "Unable to load plugin (%s) for interface (%s) when trying to submit job description.", it->second, it->first); continue; } std::list isNotSubmitted; retval = sp->Submit(descs, endpoint.URLString, cw, isNotSubmitted); /* If submission attempt above managed to submit one or multiple * descriptions then dont try other plugins. */ if (retval || (retval.isSet(SubmissionStatus::DESCRIPTION_NOT_SUBMITTED) && descs.size() != isNotSubmitted.size())) { notsubmitted.insert(notsubmitted.end(), isNotSubmitted.begin(), isNotSubmitted.end()); return retval; } } logger.msg(DEBUG, "No more interfaces to try for endpoint %s.", endpoint.URLString); for (std::list::const_iterator it = descs.begin(); it != descs.end(); ++it) { notsubmitted.push_back(&*it); } if (!notsubmitted.empty()) { retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; } return retval; } SubmissionStatus Submitter::Submit(const std::list& endpoints, const std::list& descs, std::list& jobs) { JobConsumerList jcl(jobs); addConsumer(jcl); SubmissionStatus ok = Submit(endpoints, descs); removeConsumer(jcl); return ok; } SubmissionStatus Submitter::Submit(const std::list& endpoints, const std::list& descs) { ClearAll(); std::list descs_to_submit = descs; std::list descs_to_submit_ptr; for (std::list::const_iterator itJ = descs.begin(); itJ != descs.end(); ++itJ) { descs_to_submit_ptr.push_back(&*itJ); } SubmissionStatus ok; for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { ClearNotSubmittedDescriptions(); ok |= Submit(*it, descs_to_submit); if (!ok.isSet(SubmissionStatus::DESCRIPTION_NOT_SUBMITTED)) { return ok; } /* Remove DESCRIPTION_NOT_SUBMITTED status from ok, since we are trying * multiple endpoints. Set DESCRIPTION_NOT_SUBMITTED status if some * descriptions was not submitted at end of loop. */ ok.unset(SubmissionStatus::DESCRIPTION_NOT_SUBMITTED); /* The private member notsubmitted should contain pointers to the * descriptions not submitted. So now the descriptions which _was_ * submitted should be removed from the descs_to_submit list. The not * submitted ones could just be appended to the descs_to_submit list * (that involves copying) and then remove original part of the * descs_to_submit entries. Another way is to go through each entry and * check whether it was submitted or not, and then only keep the submitted * ones. The latter algorithm is implemented below. */ std::list::iterator itJ = descs_to_submit.begin(); std::list::iterator itJPtr = descs_to_submit_ptr.begin(); for (; itJ != descs_to_submit.end();) { std::list::iterator itNotS = notsubmitted.begin(); for (; itNotS != notsubmitted.end(); ++itNotS) if (*itNotS == &*itJ) break; if (itNotS == notsubmitted.end()) { // No match found - job description was submitted. // Remove entry from descs_to_submit list, now that it was submitted. itJ = descs_to_submit.erase(itJ); itJPtr = descs_to_submit_ptr.erase(itJPtr); } else { // Match found - job not submitted. // Remove entry from notsubmitted list, now that entry was found. notsubmitted.erase(itNotS); ++itJ; } } } for (std::list::const_iterator itJ = descs_to_submit_ptr.begin(); itJ != descs_to_submit_ptr.end(); ++itJ) { notsubmitted.push_back(*itJ); } if (!notsubmitted.empty()) { ok |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; } return ok; } SubmissionStatus Submitter::Submit(const ExecutionTarget& et, const JobDescription& desc, Job& job) { JobConsumerSingle jcs(job); addConsumer(jcs); SubmissionStatus ok = Submit(et, std::list(1, desc)); removeConsumer(jcs); return ok; } SubmissionStatus Submitter::Submit(const ExecutionTarget& et, const std::list& descs, std::list& jobs) { JobConsumerList jcl(jobs); addConsumer(jcl); SubmissionStatus ok = Submit(et, descs); removeConsumer(jcl); return ok; } SubmissionStatus Submitter::Submit(const ExecutionTarget& et, const std::list& descs) { ClearAll(); ConsumerWrapper cw(*this); SubmitterPlugin *sp = getLoader().loadByInterfaceName(et.ComputingEndpoint->InterfaceName, uc); if (sp == NULL) { for (std::list::const_iterator it = descs.begin(); it != descs.end(); ++it) { notsubmitted.push_back(&*it); } return SubmissionStatus::SUBMITTER_PLUGIN_NOT_LOADED; } return sp->Submit(descs, et, cw, notsubmitted); } SubmissionStatus Submitter::BrokeredSubmit(const std::list& endpoints, const std::list& descs, std::list& jobs, const std::list& requestedSubmissionInterfaces) { JobConsumerList jcl(jobs); addConsumer(jcl); SubmissionStatus ok = BrokeredSubmit(endpoints, descs); removeConsumer(jcl); return ok; } SubmissionStatus Submitter::BrokeredSubmit(const std::list& endpoints, const std::list& descs, const std::list& requestedSubmissionInterfaces) { std::list endpointObjects; for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { endpointObjects.push_back(Endpoint(*it, Endpoint::UNSPECIFIED)); } return BrokeredSubmit(endpointObjects, descs, requestedSubmissionInterfaces); } SubmissionStatus Submitter::BrokeredSubmit(const std::list& endpoints, const std::list& descs, std::list& jobs, const std::list& requestedSubmissionInterfaces) { JobConsumerList jcl(jobs); addConsumer(jcl); SubmissionStatus ok = BrokeredSubmit(endpoints, descs, requestedSubmissionInterfaces); removeConsumer(jcl); return ok; } static bool match_submission_interface(const ExecutionTarget& target, const std::list& requestedSubmissionInterfaces) { if (requestedSubmissionInterfaces.empty()) return true; for (std::list::const_iterator iname = requestedSubmissionInterfaces.begin(); iname != requestedSubmissionInterfaces.end(); ++iname) { if (*iname == target.ComputingEndpoint->InterfaceName) return true; } return false; } SubmissionStatus Submitter::BrokeredSubmit(const std::list& endpoints, const std::list& descs, const std::list& requestedSubmissionInterfaces) { ClearAll(); std::set preferredInterfaceNames; if (uc.InfoInterface().empty()) { // Maybe defaults should be moved somewhere else. preferredInterfaceNames.insert("org.nordugrid.ldapglue2"); } else { preferredInterfaceNames.insert(uc.InfoInterface()); } Arc::ComputingServiceUniq csu; Arc::ComputingServiceRetriever csr(uc, std::list(), uc.RejectDiscoveryURLs(), preferredInterfaceNames); csr.addConsumer(csu); for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); it++) { csr.addEndpoint(*it); } csr.wait(); std::list services = csu.getServices(); queryingStatusMap = csr.getAllStatuses(); if (services.empty()) { for (std::list::const_iterator it = descs.begin(); it != descs.end(); ++it) { notsubmitted.push_back(&*it); } return (SubmissionStatus::NO_SERVICES | SubmissionStatus::DESCRIPTION_NOT_SUBMITTED); } Broker broker(uc, uc.Broker().first); if (!broker.isValid(false)) { // Only check if BrokerPlugin was loaded. for (std::list::const_iterator it = descs.begin(); it != descs.end(); ++it) { notsubmitted.push_back(&*it); } return (SubmissionStatus::BROKER_PLUGIN_NOT_LOADED | SubmissionStatus::DESCRIPTION_NOT_SUBMITTED); } SubmissionStatus retval; ConsumerWrapper cw(*this); ExecutionTargetSorter ets(broker, services); std::list::const_iterator itJAlt; // Iterator to use for alternative job descriptions. for (std::list::const_iterator itJ = descs.begin(); itJ != descs.end(); ++itJ) { bool descriptionSubmitted = false; const JobDescription* currentJobDesc = &*itJ; do { ets.set(*currentJobDesc); for (; !ets.endOfList(); ets.next()) { if(!match_submission_interface(*ets, requestedSubmissionInterfaces)) { logger.msg(VERBOSE, "Target %s does not match requested interface(s).", ets->ComputingEndpoint->URLString); continue; } SubmitterPlugin *sp = getLoader().loadByInterfaceName(ets->ComputingEndpoint->InterfaceName, uc); if (sp == NULL) { submissionStatusMap[Endpoint(*ets)] = EndpointSubmissionStatus(EndpointSubmissionStatus::NOPLUGIN); retval |= SubmissionStatus::SUBMITTER_PLUGIN_NOT_LOADED; continue; } SubmissionStatus submitStatus = sp->Submit(*currentJobDesc, *ets, cw); if (submitStatus) { submissionStatusMap[Endpoint(*ets)] = EndpointSubmissionStatus(EndpointSubmissionStatus::SUCCESSFUL); descriptionSubmitted = true; ets->RegisterJobSubmission(*currentJobDesc); break; } /* TODO: Set detailed status of endpoint, in case a general error is * encountered, i.e. not specific to the job description, so subsequent * submissions of job descriptions can check if a particular endpoint * should be avoided. *submissionStatusMap[Endpoint(*itET)] = submitStatus; // Currently 'submitStatus' is only a bool, improving the detail level of it would be helpful at this point. */ } if (!descriptionSubmitted && itJ->HasAlternatives()) { // Alternative job descriptions. if (currentJobDesc == &*itJ) { itJAlt = itJ->GetAlternatives().begin(); } else { ++itJAlt; } currentJobDesc = &*itJAlt; } } while (!descriptionSubmitted && itJ->HasAlternatives() && itJAlt != itJ->GetAlternatives().end()); if (!descriptionSubmitted) { notsubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; } } return retval; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/ComputingServiceRetriever.h0000644000000000000000000000012412124602004026447 xustar000000000000000027 mtime=1364395012.873263 27 atime=1513200574.750704 30 ctime=1513200659.781744816 nordugrid-arc-5.4.2/src/hed/libs/compute/ComputingServiceRetriever.h0000644000175000002070000001326612124602004026524 0ustar00mockbuildmock00000000000000#ifndef __ARC_COMPUTINGSERVICERETRIEVER_H__ #define __ARC_COMPUTINGSERVICERETRIEVER_H__ #include #include #include #include #include #include namespace Arc { /** * \ingroup compute * \headerfile ComputingServiceRetriever.h arc/compute/ComputingServiceRetriever.h */ class ComputingServiceUniq : public EntityConsumer { public: void addEntity(const ComputingServiceType& service); std::list getServices() { return services; } private: std::list services; static Logger logger; }; /// Retrieves information about computing elements by querying service registries and CE information systems /** * The ComputingServiceRetriever queries service registries and local * information systems of computing elements, creates ComputingServiceType * objects from the retrieved information and besides storing those objects * also sends them to all the registered consumers. * * \ingroup compute * \headerfile ComputingServiceRetriever.h arc/compute/ComputingServiceRetriever.h */ class ComputingServiceRetriever : public EntityContainer, public EntityConsumer { // The order of inheritance is important for Swig. public: /// Creates a ComputingServiceRetriever with a list of services to query /** * \param[in] uc the UserConfig object containing the credentails to use for * connecting services * \param[in] services a list of Endpoint objects containing the services * (registries or CEs) to query * \param[in] rejectedServices if the URL of a service matches an element in * this list, the service will not be queried * \param[in] preferredInterfaceNames when an Endpoint does not have it's * %GLUE2 interface name specified the class will try interfaces specified * here first, and if they return no results, then all the other possible * interfaces are tried * \param[in] capabilityFilter only those ComputingServiceType objects will * be sent to the consumer which has at least one of the capabilities provided * here */ ComputingServiceRetriever( const UserConfig& uc, const std::list& services = std::list(), const std::list& rejectedServices = std::list(), const std::set& preferredInterfaceNames = std::set(), const std::list& capabilityFilter = std::list(1, Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO)) ); /// Waits for all the results to arrive /** * This method call will only return when all the results have arrived.. */ void wait() { ser.wait(); tir.wait(); } /// Adds a new service (registry or computing element) to query /** * Depending on the type of the service, it will be added to the internal * ServiceEndpointRetriever (if it's a registry) or the internal * TargetInformationRetriever (if it's a computing element). * \param[in] service an Endpoint refering to a service to query */ void addEndpoint(const Endpoint& service); /// Adds a new service to query (used by the internal ServiceEndpointRetriever) /** * The internal ServiceEndpointRetriever queries the service registries and * feeds the results back to the ComputingServiceRetriever through this * method, so the ComputingServiceRetriever can recursively query the found * resources. * \param[in] service an Endpoint refering to a service to query */ void addEntity(const Endpoint& service) { addEndpoint(service); } /// Add a consumer to the ComputingServiceRetriever which will get the results /** * All the consumers will receive all the retrieved ComputingServiceType * objects one by one. * \param[in] c one consumer of the type EntityConsumer * capable of accepting ComputingServiceType objects */ void addConsumer(EntityConsumer& addConsumer_consumer /* The name 'addConsumer_consumer' is important for Swig when matching methods */) { tir.addConsumer(addConsumer_consumer); }; /// Remove a previously added consumer from this ComputingServiceRetriever /** * The removed consumer will not get any more result objects * \param[in] c the consumer to be removed */ void removeConsumer(const EntityConsumer& removeConsumer_consumer /* The name 'removeConsumer_consumer' is important for Swig when matching methods */) { tir.removeConsumer(removeConsumer_consumer); } /// Convenience method to generate ExectionTarget objects from the resulted ComputingServiceType objects /** * Calls the class method ExectuonTarget::GetExecutionTargets with the list * of retrieved ComputerServiceType objects. * \param[out] etList the generated ExecutionTargets will be put into this * list */ void GetExecutionTargets(std::list& etList) { ExecutionTarget::GetExecutionTargets(*this, etList); } /// Get status of all the queried Endpoint objects /** * This method returns a copy of the internal status map, and thus is only * a snapshot. If you want the final status map, make sure to invoke the * ComputingServiceRetriever::wait method before this one. * \return a map with Endpoint objects as keys and status objects as values. **/ EndpointStatusMap getAllStatuses() const { EndpointStatusMap s = ser.getAllStatuses(), t = tir.getAllStatuses(); s.insert(t.begin(), t.end()); return s; } private: ServiceEndpointRetriever ser; TargetInformationRetriever tir; static Logger logger; }; } // namespace Arc #endif // __ARC_COMPUTINGSERVICERETRIEVER_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/EndpointQueryingStatus.h0000644000000000000000000000012412073000274026005 xustar000000000000000027 mtime=1357643964.270785 27 atime=1513200574.738704 30 ctime=1513200659.775744743 nordugrid-arc-5.4.2/src/hed/libs/compute/EndpointQueryingStatus.h0000644000175000002070000001011312073000274026046 0ustar00mockbuildmock00000000000000#ifndef __ARC_ENDPOINTQUERYINGSTATUS_H__ #define __ARC_ENDPOINTQUERYINGSTATUS_H__ #include namespace Arc { /// Represents the status in the EntityRetriever of the query process of an Endpoint (service registry, computing element). /** * An object of this class is returned by the instances of the EntityRetriever * (e.g. #ServiceEndpointRetriever, #TargetInformationRetriever, * #JobListRetriever) representing the state of the process of querying an * Endpoint. It contains an #EndpointQueryingStatusType enum (#getStatus), and * a description string (#getDescription) * * \ingroup compute * \headerfile EndpointQueryingStatus.h arc/compute/EndpointQueryingStatus.h */ class EndpointQueryingStatus { public: /** The possible states: */ enum EndpointQueryingStatusType { UNKNOWN, /**< the state is unknown */ SUSPENDED_NOTREQUIRED, /**< Querying of the endpoint is suspended since querying it is not required. */ STARTED, /**< the query process was started */ FAILED, /**< the query process failed */ NOPLUGIN, /**< there is no plugin for the given Endpoint InterfaceName (so the query process was not even started) */ NOINFORETURNED, /**< query was successful but the response didn't contain entity information */ SUCCESSFUL /**< the query process was successful */ }; /** String representation of the states in the enum #EndpointQueryingStatusType */ static std::string str(EndpointQueryingStatusType status); /** A new EndpointQueryingStatus is created with #UNKNOWN status and with an empty description by default */ EndpointQueryingStatus(EndpointQueryingStatusType status = UNKNOWN, const std::string& description = "") : status(status), description(description) {}; /** This EndpointQueryingStatus object equals to an enum #EndpointQueryingStatusType if it contains the same state */ bool operator==(EndpointQueryingStatusType s) const { return status == s; }; /** This EndpointQueryingStatus object equals to another EndpointQueryingStatus object, if their state equals. The description doesn't matter. */ bool operator==(const EndpointQueryingStatus& s) const { return status == s.status; }; /** Inequality. \see operator==(EndpointQueryingStatusType) */ bool operator!=(EndpointQueryingStatusType s) const { return status != s; }; /** Inequality. \see operator==(const EndpointQueryingStatus&) */ bool operator!=(const EndpointQueryingStatus& s) const { return status != s.status; }; /** \return true if the status is not successful */ bool operator!() const { return status != SUCCESSFUL; }; /** \return true if the status is successful */ operator bool() const { return status == SUCCESSFUL; }; /** Setting the EndpointQueryingStatus object's state \param[in] s the new enum #EndpointQueryingStatusType status */ EndpointQueryingStatus& operator=(EndpointQueryingStatusType s) { status = s; return *this; }; /** Copying the EndpointQueryingStatus object into this one. \param[in] s the EndpointQueryingStatus object whose status and description will be copied into this object */ EndpointQueryingStatus& operator=(const EndpointQueryingStatus& s) { status = s.status; description = s.description; return *this; }; /** Return the enum #EndpointQueryingStatusType contained within this EndpointQueryingStatus object */ EndpointQueryingStatusType getStatus() const { return status; }; /** Return the description string contained within this EndpointQueryingStatus object */ const std::string& getDescription() const { return description; }; /** String representation of the EndpointQueryingStatus object, which is currently simply the string representation of the enum #EndpointQueryingStatusType */ std::string str() const { return str(status); }; friend bool operator==(EndpointQueryingStatusType, const EndpointQueryingStatus&); private: EndpointQueryingStatusType status; std::string description; }; inline bool operator==(EndpointQueryingStatus::EndpointQueryingStatusType eqst, const EndpointQueryingStatus& eqs) { return eqs == eqst; } } // namespace Arc #endif // __ARC_ENDPOINTQUERYINGSTATUS_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Broker.h0000644000000000000000000000012412302603467022531 xustar000000000000000027 mtime=1393231671.516332 27 atime=1513200574.752704 30 ctime=1513200659.757744523 nordugrid-arc-5.4.2/src/hed/libs/compute/Broker.h0000644000175000002070000003727412302603467022613 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_BROKER_H__ #define __ARC_BROKER_H__ #include #include #include #include #include #include #include #include namespace Arc { class ExecutionTarget; class Logger; class URL; class UserConfig; /// A Broker filters and ranks acceptable targets for job submission. /** * This class is the high-level interface to brokers. It takes care of * loading at runtime the specific BrokerPlugin type which matches and ranks * ExecutionTargets according to specific criteria, for example queue length * or CPU benchmark. The type of BrokerPlugin to use is specified in the * constructor. * * The usual workflow is to call set() for the Broker to obtain the * parameters or constraints from the job that it is interested in, then * match() for each ExecutionTarget to filter targets. operator() can then be * used to sort the targets and is equivalent to * ExecutionTarget.operator<(ExecutionTarget). * * ExecutionTargetSorter can be used as a wrapper around Broker to avoid * calling Broker directly. * * \ingroup compute * \headerfile Broker.h arc/compute/Broker.h */ class Broker { public: /// Construct a new broker and load the BrokerPlugin of the given type. /** * \param uc UserConfig, passed to the BrokerPlugin. * \param name Name of the BrokerPlugin type to use. If empty then targets * are matched using genericMatch() but are not sorted. */ Broker(const UserConfig& uc, const std::string& name = ""); /// Construct a new broker of the given type and use the given JobDescription. /** * \param uc UserConfig, passed to the BrokerPlugin. * \param j set(j) is called from this constructor. * \param name Name of the BrokerPlugin type to use. If empty then targets * are matched using genericMatch() but are not sorted. */ Broker(const UserConfig& uc, const JobDescription& j, const std::string& name = ""); /// Copy constructor. BrokerPlugin copying is handled automatically. Broker(const Broker& b); /// Destructor. BrokerPlugin unloading is handled automatically. ~Broker(); /// Assignment operator. BrokerPlugin copying is handled automatically. Broker& operator=(const Broker& b); /// Used to sort targets. Returns true if lhs[\ref genericMatch_Note1 "1"] | in | \ref ComputingEndpointAttributes::TrustedCA "TrustedCA" [\ref genericMatch_Note2 "2"] \ref JobDescription.OtherAttributes "OtherAttributes"["nordugrid:broker;reject_queue"] | != | \ref ComputingShareAttributes.Name "ComputingShare.Name" \ref ResourcesType.QueueName "QueueName" | == | \ref ComputingShareAttributes.Name "ComputingShare.Name" \ref ApplicationType.ProcessingStartTime "ProcessingStartTime" | < | \ref ComputingEndpointAttributes.DowntimeStarts "DowntimeStarts" \ref ApplicationType.ProcessingStartTime "ProcessingStartTime" | > | \ref ComputingEndpointAttributes.DowntimeEnds "DowntimeEnds" --- | | \ref lower "lower"(\ref ComputingEndpointAttributes::HealthState "HealthState") == "ok" \ref ResourcesType.CEType "CEType" | \ref SoftwareRequirement.isSatisfied "isSatisfied" | \ref ComputingEndpointAttributes.Implementation "Implementation" \ref ResourcesType.IndividualWallTime "IndividualWallTime".max | <= | \ref ComputingShareAttributes.MaxWallTime "MaxWallTime" \ref ResourcesType.IndividualWallTime "IndividualWallTime".min | >= | \ref ComputingShareAttributes.MinWallTime "MinWallTime" \ref ResourcesType.IndividualCPUTime "IndividualCPUTime".max | <= | \ref ComputingShareAttributes.MaxCPUTime "MaxCPUTime" \ref ResourcesType.IndividualCPUTime "IndividualCPUTime".min | >= | \ref ComputingShareAttributes.MinCPUTime "MinCPUTime" \ref ResourcesType.TotalCPUTime "TotalCPUTime" | <= | \ref ComputingShareAttributes.MaxTotalCPUTime "MaxTotalCPUTime" [\ref genericMatch_Note3 "3"] \ref ResourcesType.TotalCPUTime "TotalCPUTime" / \ref SlotRequirementType.NumberOfSlots "NumberOfSlots" | <= | \ref ComputingShareAttributes.MaxCPUTime "MaxCPUTime" [\ref genericMatch_Note4 "4"] \ref ResourcesType.TotalCPUTime "TotalCPUTime" / \ref SlotRequirementType.NumberOfSlots "NumberOfSlots" | >= | \ref ComputingShareAttributes.MinCPUTime "MinCPUTime" \ref ResourcesType.IndividualPhysicalMemory "IndividualPhysicalMemory" | <= | \ref ExecutionEnvironmentAttributes.MainMemorySize "MainMemorySize" [\ref genericMatch_Note5 "5"] \ref ResourcesType.IndividualPhysicalMemory "IndividualPhysicalMemory" | <= | \ref ComputingShareAttributes.MaxMainMemory "MaxMainMemory" [\ref genericMatch_Note6 "6"] \ref ResourcesType.IndividualVirtualMemory "IndividualVirtualMemory" | <= | \ref ComputingShareAttributes.MaxVirtualMemory "MaxVirtualMemory" \ref ResourcesType.Platform "Platform" | == | \ref ExecutionEnvironmentAttributes.Platform "Platform" \ref ResourcesType.OperatingSystem "OperatingSystem" | \ref SoftwareRequirement.isSatisfied "isSatisfied" | \ref ExecutionEnvironmentAttributes.OperatingSystem "OperatingSystem" \ref ResourcesType.RunTimeEnvironment "RunTimeEnvironment" | \ref SoftwareRequirement.isSatisfied "isSatisfied" | ApplicationEnvironment \ref ResourcesType.NetworkInfo "NetworkInfo" | in | \ref ComputingManagerAttributes.NetworkInfo "NetworkInfo" \ref DiskSpaceRequirementType.SessionDiskSpace "SessionDiskSpace" | <= | 1024*\ref ComputingShareAttributes.MaxDiskSpace "MaxDiskSpace" [\ref genericMatch_Note7 "7"] \ref DiskSpaceRequirementType.SessionDiskSpace "SessionDiskSpace" | <= | 1024*\ref ComputingShareAttributes.WorkingAreaFree "WorkingAreaFree" [\ref genericMatch_Note7 "7"] \ref DiskSpaceRequirementType.DiskSpace "DiskSpace" | <= | 1024*\ref ComputingShareAttributes.MaxDiskSpace "MaxDiskSpace" [\ref genericMatch_Note8 "8"] \ref DiskSpaceRequirementType.DiskSpace "DiskSpace" | <= | 1024*\ref ComputingManagerAttributes.WorkingAreaFree "WorkingAreaFree" [\ref genericMatch_Note8 "8"] \ref DiskSpaceRequirementType.CacheDiskSpace "CacheDiskSpace" | <= | 1024*\ref ComputingManagerAttributes.CacheTotal "CacheTotal" \ref SlotRequirementType.NumberOfSlots "NumberOfSlots" | <= | \ref ComputingManagerAttributes.TotalSlots "TotalSlots" [\ref genericMatch_Note9 "9"] \ref SlotRequirementType.NumberOfSlots "NumberOfSlots" | <= | \ref ComputingShareAttributes.MaxSlotsPerJob "MaxSlotsPerJob" [\ref genericMatch_Note9 "9"] \ref ResourcesType.SessionLifeTime "SessionLifeTime" | <= | \ref ComputingManagerAttributes.WorkingAreaLifeTime "WorkingAreaLifeTime" \ref ResourcesType.NodeAccess "NodeAccess" is NAT_INBOUND OR NAT_INOUTBOUND | AND | \ref ExecutionEnvironmentAttributes.ConnectivityIn "ConnectivityIn" is true \ref ResourcesType.NodeAccess "NodeAccess" is NAT_OUTBOUND OR NAT_INOUTBOUND | AND | \ref ExecutionEnvironmentAttributes.ConnectivityOut "ConnectivityOut" is true * * \b Notes: * 1. \anchor genericMatch_Note1 Credential object is not part of * JobDescription object, but is obtained from the passed UserConfig * object. * 2. \anchor genericMatch_Note2 Check is only made if * \ref ComputingEndpointAttributes::TrustedCA "TrustedCA" list is not * empty. * 3. \anchor genericMatch_Note3 If * \ref ComputingShareAttributes::MaxTotalCPUTime "MaxTotalCPUTime" is not * set, the next check in the table is made. * 4. \anchor genericMatch_Note4 Check is only done if * \ref ComputingShareAttributes::MaxTotalCPUTime "MaxTotalCPUTime" is not * set. * 5. \anchor genericMatch_Note5 If * \ref ExecutionEnvironmentAttributes::MainMemorySize "MainMemorySize" is * not set, the next check in the table is made. * 6. \anchor genericMatch_Note6 Check is only done if * \ref ExecutionEnvironmentAttributes::MainMemorySize "MainMemorySize" is * not set. * 7. \anchor genericMatch_Note7 Check doesn't fail if * \ref ComputingShareAttributes.MaxDiskSpace "MaxDiskSpace" or * \ref ComputingShareAttributes.MaxMainMemory "MaxMainMemory" * respectively is not set. Both attributes must be unspecified, and * \ref DiskSpaceRequirementType.SessionDiskSpace "SessionDiskSpace" * be specified before the checks fails. * 8. \anchor genericMatch_Note8 Check doesn't fail if * \ref ComputingShareAttributes.MaxDiskSpace "MaxDiskSpace" or * \ref ComputingShareAttributes.MaxMainMemory "MaxMainMemory" * respectively is not set. Both attributes must be unspecified, and * \ref DiskSpaceRequirementType.DiskSpace "DiskSpace" * be specified before the checks fails. * 9. \anchor genericMatch_Note9 Check doesn't fail if * \ref ComputingManagerAttributes.TotalSlots "TotalSlots" or * \ref ComputingShareAttributes.MaxSlotsPerJob "MaxSlotsPerJob" * respectively is not set. Both attributes must be unspecified, and * \ref SlotRequirementType.NumberOfSlots "NumberOfSlots" * be specified before the checks fails. * * \return True if target matches job description. */ static bool genericMatch(const ExecutionTarget& et, const JobDescription& j, const Arc::UserConfig&); /// Returns true if the BrokerPlugin loaded by this Broker is valid. /** * \param alsoCheckJobDescription Also check if JobDescription is valid. */ bool isValid(bool alsoCheckJobDescription = true) const; /// Set the JobDescription to use during brokering. void set(const JobDescription& _j) const; /// Get the JobDescription set by set(). const JobDescription& getJobDescription() const { return *j; } private: const UserConfig& uc; mutable const JobDescription* j; std::string proxyDN; std::string proxyIssuerCA; CountedPointer p; static BrokerPluginLoader& getLoader(); static Logger logger; }; /// Wrapper around Broker functionality. /** * This class can be used instead of calling Broker methods directly. It * automatically takes care of matching and sorting ExecutionTargets. It can * be thought of as an iterator over the list of sorted targets and supports * some iterator-style methods such as next(), operator-> and operator*. * \ingroup compute * \headerfile Broker.h arc/compute/Broker.h */ class ExecutionTargetSorter : public EntityConsumer { public: /// Basic constructor. ExecutionTargetSorter(const Broker& b, const std::list& rejectEndpoints = std::list()) : b(&b), rejectEndpoints(rejectEndpoints), current(targets.first.begin()) {} /// Constructor passing JobDescription. ExecutionTargetSorter(const Broker& b, const JobDescription& j, const std::list& rejectEndpoints = std::list()) : b(&b), rejectEndpoints(rejectEndpoints), current(targets.first.begin()) { set(j); } /// Constructor passing list of targets. ExecutionTargetSorter(const Broker& b, const std::list& csList, const std::list& rejectEndpoints = std::list()) : b(&b), rejectEndpoints(rejectEndpoints), current(targets.first.begin()) { addEntities(csList); } /// Constructor passing JobDescription and list of targets. ExecutionTargetSorter(const Broker& b, const JobDescription& j, const std::list& csList, const std::list& rejectEndpoints = std::list()) : b(&b), rejectEndpoints(rejectEndpoints), current(targets.first.begin()) { set(j); addEntities(csList); } virtual ~ExecutionTargetSorter() {} /// Add an ExecutionTarget and rank it according to the Broker. void addEntity(const ExecutionTarget& et); /// Add an ComputingServiceType and rank it according to the Broker. void addEntity(const ComputingServiceType& cs); /// Add a list of ComputingServiceTypes and rank them according to the Broker. void addEntities(const std::list&); /// Reset to the first target in the ranked list. void reset() { current = targets.first.begin(); } /// Advance to the next target. Returns false if the current target is the last one. bool next() { if (!endOfList()) { ++current; }; return !endOfList(); } /// Returns true if current target is last in the list. bool endOfList() const { return current == targets.first.end(); } /// Returns current target. const ExecutionTarget& operator*() const { return *current; } /// Returns current target. const ExecutionTarget& getCurrentTarget() const { return *current; } /// Returns pointer to current target. const ExecutionTarget* operator->() const { return &*current; } /// Get sorted list of matching targets. const std::list& getMatchingTargets() const { return targets.first; } /// Get list of non-matching targets. const std::list& getNonMatchingTargets() const { return targets.second; } /// Clear lists of targets. void clear() { targets.first.clear(); targets.second.clear(); } /// Register that job was submitted to current target. /** * When brokering many jobs at once this method can be called after each * job submission to update the information held about the target it was * submitted to, such as number of free slots or free disk space. */ void registerJobSubmission(); /// Set a new Broker and recreate the ranked list of targets, void set(const Broker& newBroker) { b = &newBroker; sort(); } /// Set a new job description and recreate the ranked list of targets, void set(const JobDescription& j) { b->set(j); sort(); } /// Set a list of endpoints to reject when matching. void setRejectEndpoints(const std::list& newRejectEndpoints) { rejectEndpoints = newRejectEndpoints; } private: void sort(); void insert(const ExecutionTarget& et); bool reject(const ExecutionTarget& et); const Broker* b; std::list rejectEndpoints; // Map of ExecutionTargets. first: matching; second: unsuitable. std::pair< std::list, std::list > targets; std::list::iterator current; static Logger logger; }; } // namespace Arc #endif // __ARC_BROKER_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/BrokerPlugin.cpp0000644000000000000000000000012412072545154024245 xustar000000000000000027 mtime=1357564524.902889 27 atime=1513200574.653703 30 ctime=1513200659.790744926 nordugrid-arc-5.4.2/src/hed/libs/compute/BrokerPlugin.cpp0000644000175000002070000000550412072545154024316 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "BrokerPlugin.h" namespace Arc { Logger BrokerPlugin::logger(Logger::getRootLogger(), "BrokerPlugin"); bool BrokerPlugin::operator() (const ExecutionTarget&, const ExecutionTarget&) const { return true; } bool BrokerPlugin::match(const ExecutionTarget& et) const { if(!j) return false; return Broker::genericMatch(et,*j,uc); } void BrokerPlugin::set(const JobDescription& _j) const { j = &_j; } BrokerPluginLoader& Broker::getLoader() { // For C++ it would be enough to have // static BrokerPluginLoader loader; // But Java sometimes does not destroy objects causing // PluginsFactory destructor loop forever waiting for // plugins to exit. static BrokerPluginLoader* loader = NULL; if(!loader) { loader = new BrokerPluginLoader(); } return *loader; } BrokerPluginLoader::BrokerPluginLoader() : Loader(BaseConfig().MakeConfig(Config()).Parent()) {} BrokerPluginLoader::~BrokerPluginLoader() { for (std::list::iterator it = plugins.begin(); it != plugins.end(); ++it) { delete *it; } } BrokerPlugin* BrokerPluginLoader::load(const UserConfig& uc, const std::string& name, bool keep_ownerskip) { return load(uc, NULL, name, keep_ownerskip); } BrokerPlugin* BrokerPluginLoader::load(const UserConfig& uc, const JobDescription& j, const std::string& name, bool keep_ownerskip) { return load(uc, &j, name, keep_ownerskip); } BrokerPlugin* BrokerPluginLoader::copy(const BrokerPlugin* p, bool keep_ownerskip) { if (p) { BrokerPlugin* bp = new BrokerPlugin(*p); if (bp) { if (keep_ownerskip) { plugins.push_back(bp); }; return bp; }; }; return NULL; } BrokerPlugin* BrokerPluginLoader::load(const UserConfig& uc, const JobDescription* j, const std::string& name, bool keep_ownership) { BrokerPluginArgument arg(uc); if (name.empty()) { BrokerPlugin* p = new BrokerPlugin(&arg); if (!p) { return NULL; } if (j) { p->set(*j); } if (keep_ownership) { plugins.push_back(p); } return p; } if(!factory_->load(FinderLoader::GetLibrariesList(), "HED:BrokerPlugin", name)) { logger.msg(DEBUG, "Broker plugin \"%s\" not found.", name); return NULL; } BrokerPlugin *p = factory_->GetInstance("HED:BrokerPlugin", name, &arg, false); if (!p) { logger.msg(DEBUG, "Unable to load BrokerPlugin (%s)", name); return NULL; } if (j) { p->set(*j); } if (keep_ownership) { plugins.push_back(p); } logger.msg(INFO, "Broker %s loaded", name); return p; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobState.cpp0000644000000000000000000000012412153630075023352 xustar000000000000000027 mtime=1370435645.386876 27 atime=1513200574.670703 30 ctime=1513200659.794744975 nordugrid-arc-5.4.2/src/hed/libs/compute/JobState.cpp0000644000175000002070000000205212153630075023416 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "JobState.h" namespace Arc { #ifdef JOBSTATE_X #undef JOBSTATE_X #endif const std::string JobState::StateTypeString[] = { "Undefined", "Accepted", "Preparing", "Submitting", "Hold", "Queuing", "Running", "Finishing", "Finished", "Killed", "Failed", "Deleted", "Other" }; JobState::StateType JobState::GetStateType(const std::string& stateStr) { if (stateStr == "Accepted") { return ACCEPTED; } if (stateStr == "Preparing") { return PREPARING; } if (stateStr == "Submitting") { return SUBMITTING; } if (stateStr == "Hold") { return HOLD; } if (stateStr == "Queuing") { return QUEUING; } if (stateStr == "Running") { return RUNNING; } if (stateStr == "Finishing") { return FINISHING; } if (stateStr == "Finished") { return FINISHED; } if (stateStr == "Killed") { return KILLED; } if (stateStr == "Failed") { return FAILED; } if (stateStr == "Deleted") { return DELETED; } if (stateStr == "Other") { return OTHER; } return UNDEFINED; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/EndpointQueryingStatus.cpp0000644000000000000000000000012412051006754026345 xustar000000000000000027 mtime=1352928748.264998 27 atime=1513200574.670703 30 ctime=1513200659.803745085 nordugrid-arc-5.4.2/src/hed/libs/compute/EndpointQueryingStatus.cpp0000644000175000002070000000120012051006754026403 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "EndpointQueryingStatus.h" namespace Arc { std::string EndpointQueryingStatus::str(EndpointQueryingStatusType s) { if (s == UNKNOWN) return "UNKNOWN"; else if (s == STARTED) return "STARTED"; else if (s == FAILED) return "FAILED"; else if (s == NOPLUGIN) return "NOPLUGIN"; else if (s == SUCCESSFUL) return "SUCCESSFUL"; else if (s == SUSPENDED_NOTREQUIRED) return "SUSPENDED_NOTREQUIRED"; else return ""; // There should be no other alternative! } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/SubmitterPlugin.h0000644000000000000000000000012413165644550024450 xustar000000000000000027 mtime=1507281256.705161 27 atime=1513200574.716704 30 ctime=1513200659.762744584 nordugrid-arc-5.4.2/src/hed/libs/compute/SubmitterPlugin.h0000644000175000002070000001450113165644550024516 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMITTERPLUGIN_H__ #define __ARC_SUBMITTERPLUGIN_H__ /** \file * \brief Plugin, loader and argument classes for submitter specialisation. */ #include #include #include #include #include #include #include #include #include #include #include namespace Arc { /** * \defgroup accplugins Plugin related classes for compute specialisations * \ingroup compute */ class Config; class ExecutionTarget; class JobDescription; class Logger; class UserConfig; /// Base class for the SubmitterPlugins /** * SubmitterPlugin is the base class for Grid middleware specialized * SubmitterPlugin objects. The class submits job(s) to the computing * resource it represents and uploads (needed by the job) local * input files. * * \headerfile SubmitterPlugin.h arc/compute/SubmitterPlugin.h */ class SubmitterPlugin : public Plugin { protected: SubmitterPlugin(const UserConfig& usercfg, PluginArgument* parg) : Plugin(parg), usercfg(&usercfg), dest_handle(NULL) {} public: virtual ~SubmitterPlugin() { delete dest_handle; } /// Submit a single job description /** * Convenience method for submitting single job description, it simply calls * the SubmitterPlugin::Submit method taking a list of job descriptions. * \param j JobDescription object to be submitted. * \param et ExecutionTarget to submit the job description to. * \param jc callback object used to add Job object of newly submitted job * to. * \return a bool indicating whether job submission suceeded or not. **/ virtual SubmissionStatus Submit(const JobDescription& j, const ExecutionTarget& et, EntityConsumer& jc) { std::list ns; return Submit(std::list(1, j), et, jc, ns); } /// Submit job /** * This virtual method should be overridden by plugins which should * be capable of submitting jobs, defined in the JobDescription * jobdesc, to the ExecutionTarget et. The protected convenience * method AddJob can be used to save job information. * This method should return the URL of the submitted job. In case * submission fails an empty URL should be returned. */ virtual SubmissionStatus Submit(const std::list& jobdesc, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted) = 0; virtual SubmissionStatus Submit(const std::list& jobdesc, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted); /// Migrate job /** * This virtual method should be overridden by plugins which should * be capable of migrating jobs. The active job which should be * migrated is pointed to by the URL jobid, and is represented by * the JobDescription jobdesc. The forcemigration boolean specifies * if the migration should succeed if the active job cannot be * terminated. The protected method AddJob can be used to save job * information. * This method should return the URL of the migrated job. In case * migration fails an empty URL should be returned. */ virtual bool Migrate(const std::string& jobid, const JobDescription& jobdesc, const ExecutionTarget& et, bool forcemigration, Job& job); virtual const std::list& SupportedInterfaces() const { return supportedInterfaces; }; /** * \since Added in 5.1.0 **/ virtual void SetUserConfig(const UserConfig& uc) { usercfg = &uc; } protected: bool PutFiles(const JobDescription& jobdesc, const URL& url) const; void AddJobDetails(const JobDescription& jobdesc, Job& job) const; /** * UserConfig object not owned by this class, and relies on its existence * throughout lifetime of objects from this class. Must not be deleted by * this class. Pointers to this object must not be exposed publicly. **/ const UserConfig* usercfg; std::list supportedInterfaces; DataHandle* dest_handle; static Logger logger; }; /** Class responsible for loading SubmitterPlugin plugins * The SubmitterPlugin objects returned by a SubmitterPluginLoader * must not be used after the SubmitterPluginLoader is destroyed. * * \ingroup accplugins * \headerfile SubmitterPlugin.h arc/compute/SubmitterPlugin.h */ class SubmitterPluginLoader : public Loader { public: /** Constructor * Creates a new SubmitterPluginLoader. */ SubmitterPluginLoader(); /** Destructor * Calling the destructor destroys all SubmitterPlugins loaded * by the SubmitterPluginLoader instance. */ ~SubmitterPluginLoader(); /** Load a new SubmitterPlugin * \param name The name of the SubmitterPlugin to load. * \param usercfg The UserConfig object for the new SubmitterPlugin. * \return A pointer to the new SubmitterPlugin (NULL on error). */ SubmitterPlugin* load(const std::string& name, const UserConfig& usercfg); SubmitterPlugin* loadByInterfaceName(const std::string& name, const UserConfig& usercfg); void initialiseInterfacePluginMap(const UserConfig& uc); const std::map& getInterfacePluginMap() const { return interfacePluginMap; } private: std::multimap submitters; static std::map interfacePluginMap; }; /** * \ingroup accplugins * \headerfile SubmitterPlugin.h arc/compute/SubmitterPlugin.h */ class SubmitterPluginArgument : public PluginArgument { public: SubmitterPluginArgument(const UserConfig& usercfg) : usercfg(usercfg) {} ~SubmitterPluginArgument() {} operator const UserConfig&() { return usercfg; } private: const UserConfig& usercfg; }; } // namespace Arc #endif // __ARC_SUBMITTERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobInformationStorageXML.h0000644000000000000000000000012312207646107026134 xustar000000000000000026 mtime=1377782855.24684 27 atime=1513200574.754704 30 ctime=1513200659.784744853 nordugrid-arc-5.4.2/src/hed/libs/compute/JobInformationStorageXML.h0000644000175000002070000000261312207646107026204 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBINFORMATIONSTORAGEXML_H__ #define __ARC_JOBINFORMATIONSTORAGEXML_H__ #include #include "JobInformationStorage.h" namespace Arc { class JobInformationStorageXML : public JobInformationStorage { public: JobInformationStorageXML(const std::string& name, unsigned nTries = 10, unsigned tryInterval = 500000); virtual ~JobInformationStorageXML() {} static JobInformationStorage* Instance(const std::string& name) { return new JobInformationStorageXML(name); } bool ReadAll(std::list& jobs, const std::list& rejectEndpoints = std::list()); bool Read(std::list& jobs, std::list& jobIdentifiers, const std::list& endpoints = std::list(), const std::list& rejectEndpoints = std::list()); bool Write(const std::list& jobs) { std::list newJobs; std::set prunedServices; return Write(jobs, prunedServices, newJobs); } bool Write(const std::list& jobs, const std::set& prunedServices, std::list& newJobs); bool Clean(); bool Remove(const std::list& jobids); private: Config jobstorage; static Logger logger; }; } // namespace Arc #endif // __ARC_JOBINFORMATIONSTORAGEXML_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Broker.cpp0000644000000000000000000000012313024224727023063 xustar000000000000000026 mtime=1481714135.79494 27 atime=1513200574.693704 30 ctime=1513200659.788744902 nordugrid-arc-5.4.2/src/hed/libs/compute/Broker.cpp0000644000175000002070000006653713024224727023152 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include namespace Arc { Logger Broker::logger(Logger::getRootLogger(), "Broker"); Logger ExecutionTargetSorter::logger(Logger::getRootLogger(), "ExecutionTargetSorter"); Broker::Broker(const UserConfig& uc, const JobDescription& j, const std::string& name) : uc(uc), j(&j), p(getLoader().load(uc, j, name, false)) { Credential credential(uc); proxyDN = credential.GetDN(); proxyIssuerCA = credential.GetCAName(); } Broker::Broker(const UserConfig& uc, const std::string& name) : uc(uc), j(NULL), p(getLoader().load(uc, name, false)) { Credential credential(uc); proxyDN = credential.GetDN(); proxyIssuerCA = credential.GetCAName(); } Broker::Broker(const Broker& b) : uc(b.uc), j(b.j), proxyDN(b.proxyDN), proxyIssuerCA(b.proxyIssuerCA), p(b.p) { p = getLoader().copy(p.Ptr(), false); } Broker::~Broker() { } Broker& Broker::operator=(const Broker& b) { j = b.j; proxyDN = b.proxyDN; proxyIssuerCA = b.proxyIssuerCA; p = getLoader().copy(p.Ptr(), false); return *this; } bool Broker::operator() (const ExecutionTarget& lhs, const ExecutionTarget& rhs) const { return (bool)p?(*p)(lhs, rhs):true; } bool Broker::isValid(bool alsoCheckJobDescription) const { return (bool)p && (!alsoCheckJobDescription || j != NULL); } void Broker::set(const JobDescription& _j) const { if ((bool)p) { j = &_j; p->set(_j); }; } bool Broker::match(const ExecutionTarget& t) const { logger.msg(VERBOSE, "Performing matchmaking against target (%s).", t.ComputingEndpoint->URLString); bool plugin_match = false; if(!p) { if(j) plugin_match = Broker::genericMatch(t,*j,uc); } else { plugin_match = p->match(t); } if (plugin_match) { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s matches job description", t.ComputingEndpoint->URLString); } return plugin_match; } bool decodeDN(std::string in, std::list& out) { in = trim(in," "); if(in[0] == '/') { // /N1=V1/N2=V2 kind std::string::size_type pos = 0; while(true) { std::string item; pos = get_token(item, in, pos, "/"); if((pos == std::string::npos) && item.empty()) break; std::string::size_type p = item.find('='); if(p == std::string::npos) { // Most probably this belongs to previous item if(out.size() > 0) { *out.begin() += "/"+item; } else { out.push_front(item); } } else { out.push_front(item); } } } else { // N2=V2,N1=V1 kind tokenize(in,out,","); } for(std::list::iterator item = out.begin(); item != out.end(); ++item) { std::string::size_type p = item->find('='); if(p != std::string::npos) { *item = trim(item->substr(0,p)," ") + "=" + trim(item->substr(p+1)," "); } else { *item = trim(*item," "); } } return true; } static bool compareDN(const std::list& dn1, const std::list& dn2) { if(dn1.size() != dn2.size()) return false; std::list::const_iterator d1 = dn1.begin(); std::list::const_iterator d2 = dn2.begin(); while(d1 != dn1.end()) { if(*d1 != *d2) return false; ++d1; ++d2; } return true; } static bool compareDN(const std::string& dn1, const std::string& dn2) { std::list dnl1; std::list dnl2; if(!decodeDN(dn1,dnl1)) return false; if(!decodeDN(dn2,dnl2)) return false; return compareDN(dnl1,dnl2); } static std::list::iterator findDN(std::list::iterator first, std::list::iterator last, const std::string& item) { for(;first != last;++first) { if(compareDN(*first,item)) break; } return first; } bool Broker::genericMatch(const ExecutionTarget& t, const JobDescription& j, const UserConfig& uc) { // Maybe Credential can be passed to plugins through one more set() Credential credential(uc); std::string proxyDN = credential.GetDN(); std::string proxyIssuerCA = credential.GetCAName(); if ( !(t.ComputingEndpoint->TrustedCA.empty()) && (findDN(t.ComputingEndpoint->TrustedCA.begin(), t.ComputingEndpoint->TrustedCA.end(), proxyIssuerCA) == t.ComputingEndpoint->TrustedCA.end()) ){ logger.msg(VERBOSE, "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s).", proxyIssuerCA, proxyDN, t.ComputingEndpoint->URLString); return false; } std::map::const_iterator itAtt; if ((itAtt = j.OtherAttributes.find("nordugrid:broker;reject_queue")) != j.OtherAttributes.end()) { if (t.ComputingShare->MappingQueue.empty()) { if (t.ComputingShare->Name.empty()) { logger.msg(VERBOSE, "ComputingShareName of ExecutionTarget (%s) is not defined", t.ComputingEndpoint->URLString); return false; } if (t.ComputingShare->Name == itAtt->second) { logger.msg(VERBOSE, "ComputingShare (%s) explicitly rejected", itAtt->second); return false; } } else { if (t.ComputingShare->MappingQueue == itAtt->second) { logger.msg(VERBOSE, "ComputingShare (%s) explicitly rejected", itAtt->second); return false; } } } if (!j.Resources.QueueName.empty()) { if (t.ComputingShare->MappingQueue.empty()) { if (t.ComputingShare->Name.empty()) { logger.msg(VERBOSE, "ComputingShareName of ExecutionTarget (%s) is not defined", t.ComputingEndpoint->URLString); return false; } if (t.ComputingShare->Name != j.Resources.QueueName) { logger.msg(VERBOSE, "ComputingShare (%s) does not match selected queue (%s)", t.ComputingShare->Name, j.Resources.QueueName); return false; } } else { if (t.ComputingShare->MappingQueue != j.Resources.QueueName) { logger.msg(VERBOSE, "ComputingShare (%s) does not match selected queue (%s)", t.ComputingShare->MappingQueue, j.Resources.QueueName); return false; } } } if ((int)j.Application.ProcessingStartTime.GetTime() != -1) { if ((int)t.ComputingEndpoint->DowntimeStarts.GetTime() != -1 && (int)t.ComputingEndpoint->DowntimeEnds.GetTime() != -1) { if (t.ComputingEndpoint->DowntimeStarts <= j.Application.ProcessingStartTime && j.Application.ProcessingStartTime <= t.ComputingEndpoint->DowntimeEnds) { logger.msg(VERBOSE, "ProcessingStartTime (%s) specified in job description is inside the targets downtime period [ %s - %s ].", (std::string)j.Application.ProcessingStartTime, (std::string)t.ComputingEndpoint->DowntimeStarts, (std::string)t.ComputingEndpoint->DowntimeEnds); return false; } } else logger.msg(WARNING, "The downtime of the target (%s) is not published. Keeping target.", t.ComputingEndpoint->URLString); } if (!t.ComputingEndpoint->HealthState.empty()) { if (lower(t.ComputingEndpoint->HealthState) != "ok") { // Enumeration for healthstate: ok, critical, other, unknown, warning logger.msg(VERBOSE, "HealthState of ExecutionTarget (%s) is not OK (%s)", t.ComputingEndpoint->URLString, t.ComputingEndpoint->HealthState); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, HealthState is not defined", t.ComputingEndpoint->URLString); return false; } if (!j.Resources.CEType.empty()) { if (!t.ComputingEndpoint->Implementation().empty()) { if (!j.Resources.CEType.isSatisfied(t.ComputingEndpoint->Implementation)) { logger.msg(VERBOSE, "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %s", (std::string)t.ComputingEndpoint->Implementation); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined", t.ComputingEndpoint->URLString); return false; } } { typedef std::pair EtTimePair; EtTimePair etTime[] = {EtTimePair("MaxWallTime", (int)t.ComputingShare->MaxWallTime.GetPeriod()), EtTimePair("MinWallTime", (int)t.ComputingShare->MinWallTime.GetPeriod()), EtTimePair("MaxCPUTime", (int)t.ComputingShare->MaxCPUTime.GetPeriod()), EtTimePair("MinCPUTime", (int)t.ComputingShare->MinCPUTime.GetPeriod())}; typedef std::pair*> JobTimePair; JobTimePair jobTime[] = {JobTimePair("IndividualWallTime", &j.Resources.IndividualWallTime), JobTimePair("IndividualCPUTime", &j.Resources.IndividualCPUTime)}; int i = 0; for (; i < 4; i++) { JobTimePair *jTime = &jobTime[i/2]; if (((i%2 == 0) && (jTime->second->range.max != -1)) || ((i%2 == 1) && (jTime->second->range.min != -1))) { if (etTime[i].second != -1) { if (jTime->second->benchmark.first.empty()) { // No benchmark defined, do not scale. if (((i%2 == 0) && (jTime->second->range.max > etTime[i].second)) || ((i%2 == 1) && (jTime->second->range.min < etTime[i].second))) { logger.msg(VERBOSE, "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget.", jTime->first, (i%2 == 0 ? jTime->second->range.max : jTime->second->range.min), (i%2 == 0 ? "greater" : "less"), etTime[i].first, etTime[i].second); return false; } } else { // Benchmark defined => scale using benchmark. double targetBenchmark = -1.; for (std::map::const_iterator itTBench = t.Benchmarks->begin(); itTBench != t.Benchmarks->end(); itTBench++) { if (lower(jTime->second->benchmark.first) == lower(itTBench->first)) { targetBenchmark = itTBench->second; break; } } // Make it possible to scale according to clock rate. if (targetBenchmark <= 0. && lower(jTime->second->benchmark.first) == "clock rate") { targetBenchmark = (t.ExecutionEnvironment->CPUClockSpeed > 0. ? (double)t.ExecutionEnvironment->CPUClockSpeed : 1000.); } if (targetBenchmark > 0.) { if (((i%2 == 0) && (jTime->second->scaleMax(targetBenchmark) > etTime[i].second)) || ((i%2 == 1) && (jTime->second->scaleMin(targetBenchmark) < etTime[i].second))) { logger.msg(VERBOSE, "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the ExecutionTarget.", jTime->second->benchmark.first, jTime->first, (i%2 == 0 ? jTime->second->scaleMax(targetBenchmark) : jTime->second->scaleMin(targetBenchmark)), (i%2 == 0 ? "greater" : "less"), etTime[i].first, etTime[i].second); return false; } } else { logger.msg(VERBOSE, "Matchmaking, Benchmark %s is not published by the ExecutionTarget.", jTime->second->benchmark.first); return false; } } } // Do not drop target if it does not publish attribute. } } } { const int totalcputime = (j.Resources.TotalCPUTime.range.min > 0 ? j.Resources.TotalCPUTime.range.min : j.Resources.TotalCPUTime.range.max); if (totalcputime != -1) { if (t.ComputingShare->MaxTotalCPUTime.GetPeriod() != -1) { if (t.ComputingShare->MaxTotalCPUTime.GetPeriod() < j.Resources.TotalCPUTime.range.min) { logger.msg(VERBOSE, "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)", t.ComputingShare->MaxTotalCPUTime.GetPeriod(), totalcputime); return false; } } else if (t.ComputingShare->MaxCPUTime.GetPeriod() != -1) { const int slots = (j.Resources.SlotRequirement.NumberOfSlots > 0 ? j.Resources.SlotRequirement.NumberOfSlots : 1); if (t.ComputingShare->MaxCPUTime.GetPeriod() < totalcputime/slots) { logger.msg(VERBOSE, "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)", t.ComputingShare->MaxCPUTime.GetPeriod(), totalcputime/slots); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not defined, assuming no CPU time limit", t.ComputingEndpoint->URLString); } // There is no MinTotalCPUTime if (t.ComputingShare->MinCPUTime.GetPeriod() != -1) { const int slots = (j.Resources.SlotRequirement.NumberOfSlots > 0 ? j.Resources.SlotRequirement.NumberOfSlots : 1); if (t.ComputingShare->MinCPUTime.GetPeriod() > totalcputime/slots) { logger.msg(VERBOSE, "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)", t.ComputingShare->MinCPUTime.GetPeriod(), totalcputime/slots); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU time limit", t.ComputingEndpoint->URLString); } } } if (j.Resources.IndividualPhysicalMemory != -1) { if (t.ExecutionEnvironment->MainMemorySize != -1) { // Example: 678 if (t.ExecutionEnvironment->MainMemorySize < j.Resources.IndividualPhysicalMemory) { logger.msg(VERBOSE, "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), JobDescription: %d (IndividualPhysicalMemory)", t.ExecutionEnvironment->MainMemorySize, j.Resources.IndividualPhysicalMemory.max); return false; } } else if (t.ComputingShare->MaxMainMemory != -1) { // Example: 678 if (t.ComputingShare->MaxMainMemory < j.Resources.IndividualPhysicalMemory) { logger.msg(VERBOSE, "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), JobDescription: %d (IndividualPhysicalMemory)", t.ComputingShare->MaxMainMemory, j.Resources.IndividualPhysicalMemory.max); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not defined", t.ComputingEndpoint->URLString); return false; } } if (j.Resources.IndividualVirtualMemory != -1) { if (t.ComputingShare->MaxVirtualMemory != -1) { // Example: 678 if (t.ComputingShare->MaxVirtualMemory < j.Resources.IndividualVirtualMemory) { logger.msg(VERBOSE, "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d (MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)", t.ComputingShare->MaxVirtualMemory, j.Resources.IndividualVirtualMemory.max); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined", t.ComputingEndpoint->URLString); return false; } } if (!j.Resources.Platform.empty()) { if (!t.ExecutionEnvironment->Platform.empty()) { // Example: i386 if (t.ExecutionEnvironment->Platform != j.Resources.Platform) { logger.msg(VERBOSE, "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) JobDescription: %s (Platform)", t.ExecutionEnvironment->Platform, j.Resources.Platform); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, Platform is not defined", t.ComputingEndpoint->URLString); return false; } } if (!j.Resources.OperatingSystem.empty()) { if (!t.ExecutionEnvironment->OperatingSystem.empty()) { if (!j.Resources.OperatingSystem.isSatisfied(t.ExecutionEnvironment->OperatingSystem)) { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied", t.ComputingEndpoint->URLString); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined", t.ComputingEndpoint->URLString); return false; } } if (!j.Resources.RunTimeEnvironment.empty()) { if (!t.ApplicationEnvironments->empty()) { if (!j.Resources.RunTimeEnvironment.isSatisfied(*t.ApplicationEnvironments)) { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not satisfied", t.ComputingEndpoint->URLString); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined", t.ComputingEndpoint->URLString); return false; } } if (!j.Resources.NetworkInfo.empty()) if (!t.ComputingManager->NetworkInfo.empty()) { // Example: infiniband if (std::find(t.ComputingManager->NetworkInfo.begin(), t.ComputingManager->NetworkInfo.end(), j.Resources.NetworkInfo) == t.ComputingManager->NetworkInfo.end()) { logger.msg(VERBOSE, "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not support %s, specified in the JobDescription.", j.Resources.NetworkInfo); return false; } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined", t.ComputingEndpoint->URLString); return false; } } if (j.Resources.DiskSpaceRequirement.SessionDiskSpace > -1) { if (t.ComputingShare->MaxDiskSpace > -1) { // Example: 5656 if (t.ComputingShare->MaxDiskSpace*1024 < j.Resources.DiskSpaceRequirement.SessionDiskSpace) { logger.msg(VERBOSE, "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (SessionDiskSpace)", t.ComputingShare->MaxDiskSpace*1024, j.Resources.DiskSpaceRequirement.SessionDiskSpace); return false; } } if (t.ComputingManager->WorkingAreaFree > -1) { // Example: 5656 if (t.ComputingManager->WorkingAreaFree*1024 < j.Resources.DiskSpaceRequirement.SessionDiskSpace) { logger.msg(VERBOSE, "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)", t.ComputingManager->WorkingAreaFree*1024, j.Resources.DiskSpaceRequirement.SessionDiskSpace); return false; } } if (t.ComputingShare->MaxDiskSpace <= -1 && t.ComputingManager->WorkingAreaFree <= -1) { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not defined", t.ComputingEndpoint->URLString); return false; } } if (j.Resources.DiskSpaceRequirement.DiskSpace.max > -1) { if (t.ComputingShare->MaxDiskSpace > -1) { // Example: 5656 if (t.ComputingShare->MaxDiskSpace*1024 < j.Resources.DiskSpaceRequirement.DiskSpace.max) { logger.msg(VERBOSE, "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace)", t.ComputingShare->MaxDiskSpace*1024, j.Resources.DiskSpaceRequirement.DiskSpace.max); return false; } } if (t.ComputingManager->WorkingAreaFree > -1) { // Example: 5656 if (t.ComputingManager->WorkingAreaFree*1024 < j.Resources.DiskSpaceRequirement.DiskSpace.max) { logger.msg(VERBOSE, "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (DiskSpace)", t.ComputingManager->WorkingAreaFree*1024, j.Resources.DiskSpaceRequirement.DiskSpace.max); return false; } } if (t.ComputingManager->WorkingAreaFree <= -1 && t.ComputingShare->MaxDiskSpace <= -1) { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not defined", t.ComputingEndpoint->URLString); return false; } } if (j.Resources.DiskSpaceRequirement.CacheDiskSpace > -1) { if (t.ComputingManager->CacheTotal > -1) { // Example: 5656 if (t.ComputingManager->CacheTotal*1024 < j.Resources.DiskSpaceRequirement.CacheDiskSpace) { logger.msg(VERBOSE, "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); JobDescription: %d MB (CacheDiskSpace)", t.ComputingManager->CacheTotal*1024, j.Resources.DiskSpaceRequirement.CacheDiskSpace); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined", t.ComputingEndpoint->URLString); return false; } } if (j.Resources.SlotRequirement.NumberOfSlots != -1) { if (t.ComputingManager->TotalSlots != -1) { // Example: 5656 if (t.ComputingManager->TotalSlots < j.Resources.SlotRequirement.NumberOfSlots) { logger.msg(VERBOSE, "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) JobDescription: %d (NumberOfProcesses)", t.ComputingManager->TotalSlots, j.Resources.SlotRequirement.NumberOfSlots); return false; } } if (t.ComputingShare->MaxSlotsPerJob != -1) { // Example: 5656 if (t.ComputingShare->MaxSlotsPerJob < j.Resources.SlotRequirement.NumberOfSlots) { logger.msg(VERBOSE, "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) JobDescription: %d (NumberOfProcesses)", t.ComputingShare->MaxSlotsPerJob, j.Resources.SlotRequirement.NumberOfSlots); return false; } } if (t.ComputingManager->TotalSlots == -1 && t.ComputingShare->MaxSlotsPerJob == -1) { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not defined", t.ComputingEndpoint->URLString); return false; } } if ((int)j.Resources.SessionLifeTime.GetPeriod() != -1) { if ((int)t.ComputingManager->WorkingAreaLifeTime.GetPeriod() != -1) { // Example: 123 if (t.ComputingManager->WorkingAreaLifeTime < j.Resources.SessionLifeTime) { logger.msg(VERBOSE, "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s (WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)", (std::string)t.ComputingManager->WorkingAreaLifeTime, (std::string)j.Resources.SessionLifeTime); return false; } } else { logger.msg(VERBOSE, "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined", t.ComputingEndpoint->URLString); return false; } } if ((j.Resources.NodeAccess == NAT_INBOUND || j.Resources.NodeAccess == NAT_INOUTBOUND) && !t.ExecutionEnvironment->ConnectivityIn) { // Example: false (boolean) logger.msg(VERBOSE, "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) JobDescription: %s (InBound)", (j.Resources.NodeAccess == NAT_INBOUND ? "INBOUND" : "INOUTBOUND"), (t.ExecutionEnvironment->ConnectivityIn ? "true" : "false")); return false; } if ((j.Resources.NodeAccess == NAT_OUTBOUND || j.Resources.NodeAccess == NAT_INOUTBOUND) && !t.ExecutionEnvironment->ConnectivityOut) { // Example: false (boolean) logger.msg(VERBOSE, "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) JobDescription: %s (OutBound)", (j.Resources.NodeAccess == NAT_OUTBOUND ? "OUTBOUND" : "INOUTBOUND"), (t.ExecutionEnvironment->ConnectivityIn ? "true" : "false")); return false; } return true; } void ExecutionTargetSorter::addEntities(const std::list& csList) { for (std::list::const_iterator it = csList.begin(); it != csList.end(); ++it) { addEntity(*it); } } void ExecutionTargetSorter::addEntity(const ComputingServiceType& cs) { /* Get ExecutionTarget objects with * ComputingServiceType::GetExecutionTargets method, but first save iterator * to element before end(). Check if the new ExecutionTarget objects matches * and if so insert them in the correct location. */ std::list::iterator it = --targets.second.end(); cs.GetExecutionTargets(targets.second); // Adds ExecutionTarget objects to end of targets.second list. if (b == NULL || !b->isValid()) { logger.msg(DEBUG, "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded."); return; } for (++it; it != targets.second.end();) { if (!reject(*it) && b->match(*it)) { insert(*it); it = targets.second.erase(it); } else { ++it; } } } void ExecutionTargetSorter::addEntity(const ExecutionTarget& et) { if (b == NULL || !b->isValid()) { logger.msg(DEBUG, "Unable to match target, marking it as not matching. Broker not valid."); targets.second.push_back(et); return; } if (!reject(et) && b->match(et)) { insert(et); } else { targets.second.push_back(et); } } void ExecutionTargetSorter::insert(const ExecutionTarget& et) { std::list::iterator insertPosition = targets.first.begin(); for (; insertPosition != targets.first.end(); ++insertPosition) { if ((*b)(et, *insertPosition)) { break; } } targets.first.insert(insertPosition, et); } bool ExecutionTargetSorter::reject(const ExecutionTarget& et) { for (std::list::const_iterator it = rejectEndpoints.begin(); it != rejectEndpoints.end(); ++it) { if (it->StringMatches(et.ComputingEndpoint->URLString)) return true; } return false; } void ExecutionTargetSorter::sort() { targets.second.insert(targets.second.end(), targets.first.begin(), targets.first.end()); targets.first.clear(); if (b == NULL || !b->isValid()) { reset(); logger.msg(DEBUG, "Unable to sort ExecutionTarget objects - Invalid Broker object."); return; } for (std::list::iterator it = targets.second.begin(); it != targets.second.end();) { if (!reject(*it) && b->match(*it)) { insert(*it); it = targets.second.erase(it); } else { ++it; } } reset(); } void ExecutionTargetSorter::registerJobSubmission() { if (endOfList()) { return; } if (b == NULL || !b->isValid()) { logger.msg(DEBUG, "Unable to register job submission. Can't get JobDescription object from Broker, Broker is invalid."); return; } current->RegisterJobSubmission(b->getJobDescription()); targets.second.push_back(*current); targets.first.erase(current); if (b->match(targets.second.back())) { insert(targets.second.back()); targets.second.pop_back(); } reset(); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/ExecutionTarget.h0000644000000000000000000000012412675602216024423 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.691704 30 ctime=1513200659.759744547 nordugrid-arc-5.4.2/src/hed/libs/compute/ExecutionTarget.h0000644000175000002070000005277312675602216024506 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_EXECUTIONTARGET_H__ #define __ARC_EXECUTIONTARGET_H__ /** \file * \brief Structures holding resource information. */ #include #include #include #include #include #include #include #include #include #include #include #include namespace Arc { class Job; class Logger; class SubmitterPlugin; class UserConfig; /* * == Design considerations == * In order to resemble the GLUE2 structure, the vital GLUE2 classes seen from * ARC perspective have a analogue below. However when doing match making, it * is desirable to have a complete and flat layout of a single submission * target configuration. E.g. two ComputingShares in a ComputingService gives * two configurations. Add different submission endpoints and the number of * configurations goes up... In order not to duplicate resource info instances * CountedPointer class is used indirectly in the GLUE2 master class * ComputingServiceType through the GLUE2Entity class, and directly in the * flat ExecutionTarget class (submission target configuration). */ /** * \defgroup resourceinfo Structures holding resource information * \ingroup compute * The listed structures are all used for holding resource information when * doing resource discovery and those structures are read when doing match * making. * * \ingroup compute */ /// ApplicationEnvironment /** * The ApplicationEnviroment is closely related to the definition given in * GLUE2. By extending the Software class the two GLUE2 attributes AppName and * AppVersion are mapped to two private members. However these can be obtained * through the inheriated member methods getName and getVersion. * * GLUE2 description: * A description of installed application software or software environment * characteristics available within one or more Execution Environments. * * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ApplicationEnvironment : public Software { public: ApplicationEnvironment() : FreeSlots(-1), FreeJobs(-1), FreeUserSeats(-1) {} ApplicationEnvironment(const std::string& Name) : Software(Name), FreeSlots(-1), FreeJobs(-1), FreeUserSeats(-1) {} ApplicationEnvironment(const std::string& Name, const std::string& Version) : Software(Name, Version), FreeSlots(-1), FreeJobs(-1), FreeUserSeats(-1) {} ApplicationEnvironment& operator=(const Software& sv) { Software::operator=(sv); return *this; } std::string State; int FreeSlots; int FreeJobs; int FreeUserSeats; }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class LocationAttributes { public: LocationAttributes() : Latitude(0), Longitude(0) {} std::string Address; std::string Place; std::string Country; std::string PostCode; float Latitude; float Longitude; friend std::ostream& operator<<(std::ostream& out, const LocationAttributes& l); }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class AdminDomainAttributes { public: std::string Name; std::string Owner; friend std::ostream& operator<<(std::ostream& out, const AdminDomainAttributes& ad); }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ExecutionEnvironmentAttributes { public: ExecutionEnvironmentAttributes() : VirtualMachine(false), CPUClockSpeed(-1), MainMemorySize(-1), ConnectivityIn(false), ConnectivityOut(false) {} std::string ID; std::string Platform; bool VirtualMachine; std::string CPUVendor; std::string CPUModel; std::string CPUVersion; int CPUClockSpeed; int MainMemorySize; /// OperatingSystem /** * The OperatingSystem member is not present in GLUE2 but contains the three * GLUE2 attributes OSFamily, OSName and OSVersion. * - OSFamily OSFamily_t 1 * * The general family to which the Execution Environment operating * * system belongs. * - OSName OSName_t 0..1 * * The specific name of the operating sytem * - OSVersion String 0..1 * * The version of the operating system, as defined by the vendor. */ Software OperatingSystem; bool ConnectivityIn; bool ConnectivityOut; friend std::ostream& operator<<(std::ostream&, const ExecutionEnvironmentAttributes&); }; /** * The MappingPolicyAttribtues class maps the vital attributes of the GLUE2 * MappingPolicy class. MappingPolicy can e.g. reflect VO mapping to LRMS * queues (ComputingShare). The ComputingShareType class contains a list of * references to MappingPolicyAttributes objects. * * \since Added in 5.1.0 * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class MappingPolicyAttributes { public: MappingPolicyAttributes() {} /// A global unique ID std::string ID; /// Scheme adopted to define the policy rules std::string Scheme; /** * List of policy rules. E.g. exact match of DN or VO: * 'dn:/C=XX/O=YYYY/OU=Personal Certificate/L=ZZZZ/CN=NAME SURNAME' or 'vo:/vo_a' **/ std::list Rule; friend std::ostream& operator<<(std::ostream&, const MappingPolicyAttributes&); }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ComputingManagerAttributes { public: ComputingManagerAttributes() : Reservation(false), BulkSubmission(false), TotalPhysicalCPUs(-1), TotalLogicalCPUs(-1), TotalSlots(-1), Homogeneous(true), WorkingAreaShared(true), WorkingAreaTotal(-1), WorkingAreaFree(-1), WorkingAreaLifeTime(-1), CacheTotal(-1), CacheFree(-1) {} std::string ID; std::string ProductName; std::string ProductVersion; bool Reservation; bool BulkSubmission; int TotalPhysicalCPUs; int TotalLogicalCPUs; int TotalSlots; bool Homogeneous; std::list NetworkInfo; bool WorkingAreaShared; int WorkingAreaTotal; int WorkingAreaFree; Period WorkingAreaLifeTime; int CacheTotal; int CacheFree; friend std::ostream& operator<<(std::ostream&, const ComputingManagerAttributes&); }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ComputingShareAttributes { public: ComputingShareAttributes() : MaxWallTime(-1), MaxTotalWallTime(-1), MinWallTime(-1), DefaultWallTime(-1), MaxCPUTime(-1), MaxTotalCPUTime(-1), MinCPUTime(-1), DefaultCPUTime(-1), MaxTotalJobs(-1), MaxRunningJobs(-1), MaxWaitingJobs(-1), MaxPreLRMSWaitingJobs(-1), MaxUserRunningJobs(-1), MaxSlotsPerJob(-1), MaxStageInStreams(-1), MaxStageOutStreams(-1), MaxMainMemory(-1), MaxVirtualMemory(-1), MaxDiskSpace(-1), Preemption(false), TotalJobs(-1), RunningJobs(-1), LocalRunningJobs(-1), WaitingJobs(-1), LocalWaitingJobs(-1), SuspendedJobs(-1), LocalSuspendedJobs(-1), StagingJobs(-1), PreLRMSWaitingJobs(-1), EstimatedAverageWaitingTime(-1), EstimatedWorstWaitingTime(-1), FreeSlots(-1), UsedSlots(-1), RequestedSlots(-1) {} std::string ID; /// Name String 0..1 /** * Human-readable name. * This variable represents the ComputingShare.Name attribute of GLUE2. **/ std::string Name; std::string MappingQueue; Period MaxWallTime; Period MaxTotalWallTime; // not in current Glue2 draft Period MinWallTime; Period DefaultWallTime; Period MaxCPUTime; Period MaxTotalCPUTime; Period MinCPUTime; Period DefaultCPUTime; int MaxTotalJobs; int MaxRunningJobs; int MaxWaitingJobs; int MaxPreLRMSWaitingJobs; int MaxUserRunningJobs; int MaxSlotsPerJob; int MaxStageInStreams; int MaxStageOutStreams; std::string SchedulingPolicy; /// MaxMainMemory UInt64 0..1 MB /** * The maximum physical RAM that a job is allowed to use; if the limit is * hit, then the LRMS MAY kill the job. * A negative value specifies that this member is undefined. */ int MaxMainMemory; /// MaxVirtualMemory UInt64 0..1 MB /** * The maximum total memory size (RAM plus swap) that a job is allowed to * use; if the limit is hit, then the LRMS MAY kill the job. * A negative value specifies that this member is undefined. */ int MaxVirtualMemory; /// MaxDiskSpace UInt64 0..1 GB /** * The maximum disk space that a job is allowed use in the working; if the * limit is hit, then the LRMS MAY kill the job. * A negative value specifies that this member is undefined. */ int MaxDiskSpace; URL DefaultStorageService; bool Preemption; int TotalJobs; int RunningJobs; int LocalRunningJobs; int WaitingJobs; int LocalWaitingJobs; int SuspendedJobs; int LocalSuspendedJobs; int StagingJobs; int PreLRMSWaitingJobs; Period EstimatedAverageWaitingTime; Period EstimatedWorstWaitingTime; int FreeSlots; /// FreeSlotsWithDuration std::map /** * This attribute express the number of free slots with their time limits. * The keys in the std::map are the time limit (Period) for the number of * free slots stored as the value (int). If no time limit has been specified * for a set of free slots then the key will equal Period(LONG_MAX). */ std::map FreeSlotsWithDuration; int UsedSlots; int RequestedSlots; std::string ReservationPolicy; friend std::ostream& operator<<(std::ostream&, const ComputingShareAttributes&); }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ComputingEndpointAttributes { public: ComputingEndpointAttributes() : DowntimeStarts(-1), DowntimeEnds(-1), TotalJobs(-1), RunningJobs(-1), WaitingJobs(-1), StagingJobs(-1), SuspendedJobs(-1), PreLRMSWaitingJobs(-1) {} std::string ID; std::string URLString; std::string InterfaceName; std::string HealthState; std::string HealthStateInfo; std::string QualityLevel; std::set Capability; std::string Technology; std::list InterfaceVersion; std::list InterfaceExtension; std::list SupportedProfile; std::string Implementor; Software Implementation; std::string ServingState; std::string IssuerCA; std::list TrustedCA; Time DowntimeStarts; Time DowntimeEnds; std::string Staging; int TotalJobs; int RunningJobs; int WaitingJobs; int StagingJobs; int SuspendedJobs; int PreLRMSWaitingJobs; // This is singular in the GLUE2 doc: JobDescription std::list JobDescriptions; friend std::ostream& operator<<(std::ostream&, const ComputingEndpointAttributes&); }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ComputingServiceAttributes { public: ComputingServiceAttributes() : TotalJobs(-1), RunningJobs(-1), WaitingJobs(-1), StagingJobs(-1), SuspendedJobs(-1), PreLRMSWaitingJobs(-1) {} std::string ID; std::string Name; std::string Type; std::set Capability; std::string QualityLevel; int TotalJobs; int RunningJobs; int WaitingJobs; int StagingJobs; int SuspendedJobs; int PreLRMSWaitingJobs; // Other Endpoint InformationOriginEndpoint; // this ComputingService was generated while this Endpoint was queried friend std::ostream& operator<<(std::ostream& out, const ComputingServiceAttributes& cs); }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class LocationType : public GLUE2Entity {}; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class AdminDomainType : public GLUE2Entity {}; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ExecutionEnvironmentType : public GLUE2Entity {}; /** * Wrapper class for the MappingPolicyAttribtues class. * * \since Added in 5.1.0 * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h **/ class MappingPolicyType : public GLUE2Entity {}; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ComputingManagerType : public GLUE2Entity { public: ComputingManagerType() : Benchmarks(new std::map), ApplicationEnvironments(new std::list) {} // TODO: Currently using int as key, use std::string instead for holding ID. std::map ExecutionEnvironment; CountedPointer< std::map > Benchmarks; /// ApplicationEnvironments /** * The ApplicationEnvironments member is a list of * ApplicationEnvironment's, defined in section 6.7 GLUE2. */ CountedPointer< std::list > ApplicationEnvironments; }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ComputingShareType : public GLUE2Entity { public: // TODO: Currently using int, use std::string instead for holding ID. std::set ComputingEndpointIDs; /** * \since Added in 5.1.0 **/ std::map MappingPolicy; }; /** * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ComputingEndpointType : public GLUE2Entity { public: // TODO: Currently using int, use std::string instead for holding ID. std::set ComputingShareIDs; }; /** * \ingroup compute * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ComputingServiceType : public GLUE2Entity { public: template void GetExecutionTargets(T& container) const; LocationType Location; AdminDomainType AdminDomain; // TODO: Currently using int as key, use std::string instead for holding ID. std::map ComputingEndpoint; std::map ComputingShare; std::map ComputingManager; friend std::ostream& operator<<(std::ostream&, const ComputingServiceType&); private: template void AddExecutionTarget(T& container, const ExecutionTarget& et) const; static Logger logger; }; // to avoid "explicit specialization before instantiation" errors template <> void ComputingServiceType::AddExecutionTarget< std::list >(std::list& etList, const ExecutionTarget& et) const; /// ExecutionTarget /** * This class describe a target which accept computing jobs. All of the * members contained in this class, with a few exceptions, are directly * linked to attributes defined in the GLUE Specification v. 2.0 * (GFD-R-P.147). * * \ingroup compute * \ingroup resourceinfo * \headerfile ExecutionTarget.h arc/compute/ExecutionTarget.h */ class ExecutionTarget { public: /// Create an ExecutionTarget /** * Default constructor to create an ExecutionTarget. Takes no * arguments. **/ ExecutionTarget() : Location(new LocationAttributes()), AdminDomain(new AdminDomainAttributes()), ComputingService(new ComputingServiceAttributes()), ComputingEndpoint(new ComputingEndpointAttributes()), ComputingShare(new ComputingShareAttributes()), ComputingManager(new ComputingManagerAttributes()), ExecutionEnvironment(new ExecutionEnvironmentAttributes()), Benchmarks(new std::map()), ApplicationEnvironments(new std::list()) {}; /// Create an ExecutionTarget /** * Copy constructor. * * @param t ExecutionTarget to copy. **/ ExecutionTarget(const ExecutionTarget& t) : Location(t.Location), AdminDomain(t.AdminDomain), ComputingService(t.ComputingService), ComputingEndpoint(t.ComputingEndpoint), OtherEndpoints(t.OtherEndpoints), ComputingShare(t.ComputingShare), MappingPolicies(t.MappingPolicies), ComputingManager(t.ComputingManager), ExecutionEnvironment(t.ExecutionEnvironment), Benchmarks(t.Benchmarks), ApplicationEnvironments(t.ApplicationEnvironments) {} /** * \since Changed in 5.1.0. List of MappingPolicyAttributes objects must * also be passed. **/ ExecutionTarget(const CountedPointer& l, const CountedPointer& a, const CountedPointer& cse, const CountedPointer& ce, const std::list< CountedPointer >& oe, const CountedPointer& csh, const std::list< CountedPointer >& mp, const CountedPointer& cm, const CountedPointer& ee, const CountedPointer< std::map >& b, const CountedPointer< std::list >& ae) : Location(l), AdminDomain(a), ComputingService(cse), ComputingEndpoint(ce), OtherEndpoints(oe), ComputingShare(csh), MappingPolicies(mp), ComputingManager(cm), ExecutionEnvironment(ee), Benchmarks(b), ApplicationEnvironments(ae) {} /// Create an ExecutionTarget /** * Copy constructor? Needed from Python? * * @param addrptr * **/ ExecutionTarget(long int addrptr) : Location((*(ExecutionTarget*)addrptr).Location), AdminDomain((*(ExecutionTarget*)addrptr).AdminDomain), ComputingService((*(ExecutionTarget*)addrptr).ComputingService), ComputingEndpoint((*(ExecutionTarget*)addrptr).ComputingEndpoint), OtherEndpoints((*(ExecutionTarget*)addrptr).OtherEndpoints), ComputingShare((*(ExecutionTarget*)addrptr).ComputingShare), MappingPolicies((*(ExecutionTarget*)addrptr).MappingPolicies), ComputingManager((*(ExecutionTarget*)addrptr).ComputingManager), ExecutionEnvironment((*(ExecutionTarget*)addrptr).ExecutionEnvironment), Benchmarks((*(ExecutionTarget*)addrptr).Benchmarks), ApplicationEnvironments((*(ExecutionTarget*)addrptr).ApplicationEnvironments) {} ExecutionTarget& operator=(const ExecutionTarget& et) { Location = et.Location; AdminDomain = et.AdminDomain; ComputingService = et.ComputingService; ComputingEndpoint = et.ComputingEndpoint; ComputingEndpoint = et.ComputingEndpoint; ComputingShare = et.ComputingShare; MappingPolicies = et.MappingPolicies; ComputingManager = et.ComputingManager; Benchmarks = et.Benchmarks; ExecutionEnvironment = et.ExecutionEnvironment; ApplicationEnvironments = et.ApplicationEnvironments; return *this; } ~ExecutionTarget() {}; SubmissionStatus Submit(const UserConfig& ucfg, const JobDescription& jobdesc, Job& job) const; /// Update ExecutionTarget after succesful job submission /** * Method to update the ExecutionTarget after a job successfully * has been submitted to the computing resource it * represents. E.g. if a job is sent to the computing resource and * is expected to enter the queue, then the WaitingJobs attribute * is incremented with 1. * * @param jobdesc contains all information about the job * submitted. **/ void RegisterJobSubmission(const JobDescription& jobdesc) const; /// Print the ExecutionTarget information /** * Method to print the ExecutionTarget attributes to a std::ostream object. * * @param out the std::ostream to print the attributes to. * @param et ExecutionTarget from which to obtain information * @return the input ostream object is returned. **/ friend std::ostream& operator<<(std::ostream& out, const ExecutionTarget& et); static void GetExecutionTargets(const std::list& csList, std::list& etList); CountedPointer Location; CountedPointer AdminDomain; CountedPointer ComputingService; CountedPointer ComputingEndpoint; std::list< CountedPointer > OtherEndpoints; CountedPointer ComputingShare; std::list< CountedPointer > MappingPolicies; CountedPointer ComputingManager; CountedPointer ExecutionEnvironment; CountedPointer< std::map > Benchmarks; CountedPointer< std::list > ApplicationEnvironments; private: static Logger logger; }; } // namespace Arc #endif // __ARC_EXECUTIONTARGET_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobInformationStorageBDB.h0000644000000000000000000000012412600456737026071 xustar000000000000000027 mtime=1442995679.066184 27 atime=1513200574.746704 30 ctime=1513200659.784744853 nordugrid-arc-5.4.2/src/hed/libs/compute/JobInformationStorageBDB.h0000644000175000002070000000467012600456737026145 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBINFORMATIONSTORAGEBDB_H__ #define __ARC_JOBINFORMATIONSTORAGEBDB_H__ #include #include "JobInformationStorage.h" namespace Arc { class JobInformationStorageBDB : public JobInformationStorage { public: JobInformationStorageBDB(const std::string& name, unsigned nTries = 10, unsigned tryInterval = 500000); virtual ~JobInformationStorageBDB() {} static JobInformationStorage* Instance(const std::string& name) { return new JobInformationStorageBDB(name); } bool ReadAll(std::list& jobs, const std::list& rejectEndpoints = std::list()); bool Read(std::list& jobs, std::list& jobIdentifiers, const std::list& endpoints = std::list(), const std::list& rejectEndpoints = std::list()); bool Write(const std::list& jobs); bool Write(const std::list& jobs, const std::set& prunedServices, std::list& newJobs); bool Clean(); bool Remove(const std::list& jobids); private: static void logErrorMessage(int err); static Logger logger; class JobDB { public: JobDB(const std::string&, u_int32_t = DB_RDONLY); ~JobDB(); void tearDown(); #if ((DB_VERSION_MAJOR > 4)||(DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3)) static void handleError(const DbEnv *dbenv, const char *errpfx, const char *msg); #else static void handleError(const char *errpfx, char *msg); #endif Db* operator->() { return jobDB; } Db* viaNameKeys() { return nameSecondaryKeyDB; } Db* viaEndpointKeys() { return endpointSecondaryKeyDB; } Db* viaServiceInfoKeys() { return serviceInfoSecondaryKeyDB; } DbEnv *dbEnv; Db *jobDB; Db *endpointSecondaryKeyDB; Db *nameSecondaryKeyDB; Db *serviceInfoSecondaryKeyDB; private: std::string tmpdir; }; class BDBException { public: BDBException(const std::string& msg, int ret, bool writeLogMessage = true) throw(); ~BDBException() throw() {} const std::string& getMessage() const throw() { return message; } int getReturnValue() const throw() { return returnvalue; } private: std::string message; int returnvalue; }; }; } // namespace Arc #endif // __ARC_JOBINFORMATIONSTORAGEBDB_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/GLUE2.cpp0000644000000000000000000000012312136222434022451 xustar000000000000000026 mtime=1366893852.84965 27 atime=1513200574.738704 30 ctime=1513200659.802745073 nordugrid-arc-5.4.2/src/hed/libs/compute/GLUE2.cpp0000644000175000002070000005426412136222434022532 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "GLUE2.h" namespace Arc { Logger GLUE2::logger(Logger::getRootLogger(), "GLUE2"); static bool CheckConformingDN(const std::string& dn) { // /name=value/name/... // Implementing very simplified check std::string::size_type pos = 0; if((dn[pos] != '/') && (dn.find(',') == std::string::npos)) return false; //while(pos < dn.length()) { // std::string::size_type ppos = dn.find('/',pos+1); // if(ppos == std::string::npos) ppos = dn.length(); // std::string item = dn.substr(pso+1,ppos-pos-1); // std::string::size_type epos = item.find('='); //} return true; } void GLUE2::ParseExecutionTargets(XMLNode glue2tree, std::list& targets) { XMLNode GLUEService = glue2tree; if(GLUEService.Name() != "ComputingService") { GLUEService = glue2tree["ComputingService"]; } for (; GLUEService; ++GLUEService) { ComputingServiceType cs; if (GLUEService["ID"]) { cs->ID = (std::string)GLUEService["ID"]; } if (GLUEService["Name"]) { cs->Name = (std::string)GLUEService["Name"]; } if (GLUEService["Capability"]) { for (XMLNode n = GLUEService["Capability"]; n; ++n) { cs->Capability.insert((std::string)n); } } if (GLUEService["Type"]) { cs->Type = (std::string)GLUEService["Type"]; } else { logger.msg(VERBOSE, "The Service doesn't advertise its Type."); } if (GLUEService["QualityLevel"]) { cs->QualityLevel = (std::string)GLUEService["QualityLevel"]; } else { logger.msg(VERBOSE, "The ComputingService doesn't advertise its Quality Level."); } if (GLUEService["TotalJobs"]) { cs->TotalJobs = stringtoi((std::string)GLUEService["TotalJobs"]); } if (GLUEService["RunningJobs"]) { cs->RunningJobs = stringtoi((std::string)GLUEService["RunningJobs"]); } if (GLUEService["WaitingJobs"]) { cs->WaitingJobs = stringtoi((std::string)GLUEService["WaitingJobs"]); } if (GLUEService["StagingJobs"]) { cs->StagingJobs = stringtoi((std::string)GLUEService["StagingJobs"]); } if (GLUEService["SuspendedJobs"]) { cs->SuspendedJobs = stringtoi((std::string)GLUEService["SuspendedJobs"]); } if (GLUEService["PreLRMSWaitingJobs"]) { cs->PreLRMSWaitingJobs = stringtoi((std::string)GLUEService["PreLRMSWaitingJobs"]); } // The GLUE2 specification does not have attribute ComputingService.LocalRunningJobs //if (GLUEService["LocalRunningJobs"]) { // cs->LocalRunningJobs = stringtoi((std::string)GLUEService["LocalRunningJobs"]); //} // The GLUE2 specification does not have attribute ComputingService.LocalWaitingJobs //if (GLUEService["LocalWaitingJobs"]) { // cs->LocalWaitingJobs = stringtoi((std::string)GLUEService["LocalWaitingJobs"]); //} // The GLUE2 specification does not have attribute ComputingService.LocalSuspendedJobs //if (GLUEService["LocalSuspendedJobs"]) { // cs->LocalWaitingJobs = stringtoi((std::string)GLUEService["LocalSuspendedJobs"]); //} XMLNode xmlCENode = GLUEService["ComputingEndpoint"]; int endpointID = 0; for(;(bool)xmlCENode;++xmlCENode) { ComputingEndpointType ComputingEndpoint; if (xmlCENode["URL"]) { ComputingEndpoint->URLString = (std::string)xmlCENode["URL"]; } else { logger.msg(VERBOSE, "The ComputingEndpoint has no URL."); } if (xmlCENode["HealthState"]) { ComputingEndpoint->HealthState = (std::string)xmlCENode["HealthState"]; } else { logger.msg(VERBOSE, "The Service advertises no Health State."); } if (xmlCENode["HealthStateInfo"]) { ComputingEndpoint->HealthStateInfo = (std::string)xmlCENode["HealthStateInfo"]; } if (xmlCENode["Capability"]) { for (XMLNode n = xmlCENode["Capability"]; n; ++n) { ComputingEndpoint->Capability.insert((std::string)n); } } if (xmlCENode["QualityLevel"]) { ComputingEndpoint->QualityLevel = (std::string)xmlCENode["QualityLevel"]; } else { logger.msg(VERBOSE, "The ComputingEndpoint doesn't advertise its Quality Level."); } if (xmlCENode["Technology"]) { ComputingEndpoint->Technology = (std::string)xmlCENode["Technology"]; } if (xmlCENode["InterfaceName"]) { ComputingEndpoint->InterfaceName = lower((std::string)xmlCENode["InterfaceName"]); } else if (xmlCENode["Interface"]) { // No such attribute according to GLUE2 document. Legacy/backward compatibility? ComputingEndpoint->InterfaceName = lower((std::string)xmlCENode["Interface"]); } else { logger.msg(VERBOSE, "The ComputingService doesn't advertise its Interface."); } if (xmlCENode["InterfaceVersion"]) { for (XMLNode n = xmlCENode["InterfaceVersion"]; n; ++n) { ComputingEndpoint->InterfaceVersion.push_back((std::string)n); } } if (xmlCENode["InterfaceExtension"]) { for (XMLNode n = xmlCENode["InterfaceExtension"]; n; ++n) { ComputingEndpoint->InterfaceExtension.push_back((std::string)n); } } if (xmlCENode["SupportedProfile"]) { for (XMLNode n = xmlCENode["SupportedProfile"]; n; ++n) { ComputingEndpoint->SupportedProfile.push_back((std::string)n); } } if (xmlCENode["Implementor"]) { ComputingEndpoint->Implementor = (std::string)xmlCENode["Implementor"]; } if (xmlCENode["ImplementationName"]) { if (xmlCENode["ImplementationVersion"]) { ComputingEndpoint->Implementation = Software((std::string)xmlCENode["ImplementationName"], (std::string)xmlCENode["ImplementationVersion"]); } else { ComputingEndpoint->Implementation = Software((std::string)xmlCENode["ImplementationName"]); } } if (xmlCENode["ServingState"]) { ComputingEndpoint->ServingState = (std::string)xmlCENode["ServingState"]; } else { logger.msg(VERBOSE, "The ComputingEndpoint doesn't advertise its Serving State."); } if (xmlCENode["IssuerCA"]) { ComputingEndpoint->IssuerCA = (std::string)xmlCENode["IssuerCA"]; } if (xmlCENode["TrustedCA"]) { XMLNode n = xmlCENode["TrustedCA"]; while (n) { // Workaround to drop non-conforming records generated by EGI services std::string subject = (std::string)n; if(CheckConformingDN(subject)) { ComputingEndpoint->TrustedCA.push_back(subject); } ++n; //The increment operator works in an unusual manner (returns void) } } if (xmlCENode["DowntimeStart"]) { ComputingEndpoint->DowntimeStarts = (std::string)xmlCENode["DowntimeStart"]; } if (xmlCENode["DowntimeEnd"]) { ComputingEndpoint->DowntimeEnds = (std::string)xmlCENode["DowntimeEnd"]; } if (xmlCENode["Staging"]) { ComputingEndpoint->Staging = (std::string)xmlCENode["Staging"]; } if (xmlCENode["JobDescription"]) { for (XMLNode n = xmlCENode["JobDescription"]; n; ++n) { ComputingEndpoint->JobDescriptions.push_back((std::string)n); } } if (xmlCENode["TotalJobs"]) { ComputingEndpoint->TotalJobs = stringtoi((std::string)xmlCENode["TotalJobs"]); } if (xmlCENode["RunningJobs"]) { ComputingEndpoint->RunningJobs = stringtoi((std::string)xmlCENode["RunningJobs"]); } if (xmlCENode["WaitingJobs"]) { ComputingEndpoint->WaitingJobs = stringtoi((std::string)xmlCENode["WaitingJobs"]); } if (xmlCENode["StagingJobs"]) { ComputingEndpoint->StagingJobs = stringtoi((std::string)xmlCENode["StagingJobs"]); } if (xmlCENode["SuspendedJobs"]) { ComputingEndpoint->SuspendedJobs = stringtoi((std::string)xmlCENode["SuspendedJobs"]); } if (xmlCENode["PreLRMSWaitingJobs"]) { ComputingEndpoint->PreLRMSWaitingJobs = stringtoi((std::string)xmlCENode["PreLRMSWaitingJobs"]); } // The GLUE2 specification does not have attribute ComputingEndpoint.LocalRunningJobs //if (xmlCENode["LocalRunningJobs"]) { // ComputingEndpoint->LocalRunningJobs = stringtoi((std::string)xmlCENode["LocalRunningJobs"]); //} // The GLUE2 specification does not have attribute ComputingEndpoint.LocalWaitingJobs //if (xmlCENode["LocalWaitingJobs"]) { // ComputingEndpoint->LocalWaitingJobs = stringtoi((std::string)xmlCENode["LocalWaitingJobs"]); //} // The GLUE2 specification does not have attribute ComputingEndpoint.LocalSuspendedJobs //if (xmlCENode["LocalSuspendedJobs"]) { // ComputingEndpoint->LocalSuspendedJobs = stringtoi((std::string)xmlCENode["LocalSuspendedJobs"]); //} cs.ComputingEndpoint.insert(std::pair(endpointID++, ComputingEndpoint)); } XMLNode xComputingShare = GLUEService["ComputingShare"]; int shareID = 0; for (;(bool)xComputingShare;++xComputingShare) { ComputingShareType ComputingShare; if (xComputingShare["FreeSlots"]) { ComputingShare->FreeSlots = stringtoi((std::string)xComputingShare["FreeSlots"]); } if (xComputingShare["FreeSlotsWithDuration"]) { // Format: ns[:t] [ns[:t]]..., where ns is number of slots and t is the duration. ComputingShare->FreeSlotsWithDuration.clear(); const std::string fswdValue = (std::string)xComputingShare["FreeSlotsWithDuration"]; std::list fswdList; tokenize(fswdValue, fswdList); for (std::list::iterator it = fswdList.begin(); it != fswdList.end(); it++) { std::list fswdPair; tokenize(*it, fswdPair, ":"); long duration = LONG_MAX; int freeSlots = 0; if (fswdPair.size() > 2 || !stringto(fswdPair.front(), freeSlots) || (fswdPair.size() == 2 && !stringto(fswdPair.back(), duration)) ) { logger.msg(VERBOSE, "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly formatted. Ignoring it."); logger.msg(DEBUG, "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")", fswdValue, *it); continue; } ComputingShare->FreeSlotsWithDuration[Period(duration)] = freeSlots; } } if (xComputingShare["UsedSlots"]) { ComputingShare->UsedSlots = stringtoi((std::string)xComputingShare["UsedSlots"]); } if (xComputingShare["RequestedSlots"]) { ComputingShare->RequestedSlots = stringtoi((std::string)xComputingShare["RequestedSlots"]); } if (xComputingShare["Name"]) { ComputingShare->Name = (std::string)xComputingShare["Name"]; } if (xComputingShare["MaxWallTime"]) { ComputingShare->MaxWallTime = (std::string)xComputingShare["MaxWallTime"]; } if (xComputingShare["MaxTotalWallTime"]) { ComputingShare->MaxTotalWallTime = (std::string)xComputingShare["MaxTotalWallTime"]; } if (xComputingShare["MinWallTime"]) { ComputingShare->MinWallTime = (std::string)xComputingShare["MinWallTime"]; } if (xComputingShare["DefaultWallTime"]) { ComputingShare->DefaultWallTime = (std::string)xComputingShare["DefaultWallTime"]; } if (xComputingShare["MaxCPUTime"]) { ComputingShare->MaxCPUTime = (std::string)xComputingShare["MaxCPUTime"]; } if (xComputingShare["MaxTotalCPUTime"]) { ComputingShare->MaxTotalCPUTime = (std::string)xComputingShare["MaxTotalCPUTime"]; } if (xComputingShare["MinCPUTime"]) { ComputingShare->MinCPUTime = (std::string)xComputingShare["MinCPUTime"]; } if (xComputingShare["DefaultCPUTime"]) { ComputingShare->DefaultCPUTime = (std::string)xComputingShare["DefaultCPUTime"]; } if (xComputingShare["MaxTotalJobs"]) { ComputingShare->MaxTotalJobs = stringtoi((std::string)xComputingShare["MaxTotalJobs"]); } if (xComputingShare["MaxRunningJobs"]) { ComputingShare->MaxRunningJobs = stringtoi((std::string)xComputingShare["MaxRunningJobs"]); } if (xComputingShare["MaxWaitingJobs"]) { ComputingShare->MaxWaitingJobs = stringtoi((std::string)xComputingShare["MaxWaitingJobs"]); } if (xComputingShare["MaxPreLRMSWaitingJobs"]) { ComputingShare->MaxPreLRMSWaitingJobs = stringtoi((std::string)xComputingShare["MaxPreLRMSWaitingJobs"]); } if (xComputingShare["MaxUserRunningJobs"]) { ComputingShare->MaxUserRunningJobs = stringtoi((std::string)xComputingShare["MaxUserRunningJobs"]); } if (xComputingShare["MaxSlotsPerJob"]) { ComputingShare->MaxSlotsPerJob = stringtoi((std::string)xComputingShare["MaxSlotsPerJob"]); } if (xComputingShare["MaxStageInStreams"]) { ComputingShare->MaxStageInStreams = stringtoi((std::string)xComputingShare["MaxStageInStreams"]); } if (xComputingShare["MaxStageOutStreams"]) { ComputingShare->MaxStageOutStreams = stringtoi((std::string)xComputingShare["MaxStageOutStreams"]); } if (xComputingShare["SchedulingPolicy"]) { ComputingShare->SchedulingPolicy = (std::string)xComputingShare["SchedulingPolicy"]; } if (xComputingShare["MaxMainMemory"]) { ComputingShare->MaxMainMemory = stringtoi((std::string)xComputingShare["MaxMainMemory"]); } if (xComputingShare["MaxVirtualMemory"]) { ComputingShare->MaxVirtualMemory = stringtoi((std::string)xComputingShare["MaxVirtualMemory"]); } if (xComputingShare["MaxDiskSpace"]) { ComputingShare->MaxDiskSpace = stringtoi((std::string)xComputingShare["MaxDiskSpace"]); } if (xComputingShare["DefaultStorageService"]) { ComputingShare->DefaultStorageService = (std::string)xComputingShare["DefaultStorageService"]; } if (xComputingShare["Preemption"]) { ComputingShare->Preemption = ((std::string)xComputingShare["Preemption"] == "true") ? true : false; } if (xComputingShare["EstimatedAverageWaitingTime"]) { ComputingShare->EstimatedAverageWaitingTime = (std::string)xComputingShare["EstimatedAverageWaitingTime"]; } if (xComputingShare["EstimatedWorstWaitingTime"]) { ComputingShare->EstimatedWorstWaitingTime = stringtoi((std::string)xComputingShare["EstimatedWorstWaitingTime"]); } if (xComputingShare["ReservationPolicy"]) { ComputingShare->ReservationPolicy = stringtoi((std::string)xComputingShare["ReservationPolicy"]); } cs.ComputingShare.insert(std::pair(shareID++, ComputingShare)); } /* * A ComputingShare is linked to multiple ExecutionEnvironments. * Due to bug 2101 multiple ExecutionEnvironments per ComputingShare * will be ignored. The ExecutionEnvironment information will only be * stored if there is one ExecutionEnvironment associated with a * ComputingShare. */ /* * TODO: Store ExecutionEnvironment information in the list of * ExecutionEnvironmentType objects and issue a warning when the * resources published in multiple ExecutionEnvironment are * requested in a job description document. */ int managerID = 0; for (XMLNode xComputingManager = GLUEService["ComputingManager"]; (bool)xComputingManager; ++xComputingManager) { ComputingManagerType ComputingManager; if (xComputingManager["ProductName"]) { ComputingManager->ProductName = (std::string)xComputingManager["ProductName"]; } // The GlUE2 specification does not have attribute ComputingManager.Type //if (xComputingManager["Type"]) { // ComputingManager->Type = (std::string)xComputingManager["Type"]; //} if (xComputingManager["ProductVersion"]) { ComputingManager->ProductVersion = (std::string)xComputingManager["ProductVersion"]; } if (xComputingManager["Reservation"]) { ComputingManager->Reservation = ((std::string)xComputingManager["Reservation"] == "true"); } if (xComputingManager["BulkSubmission"]) { ComputingManager->BulkSubmission = ((std::string)xComputingManager["BulkSubmission"] == "true"); } if (xComputingManager["TotalPhysicalCPUs"]) { ComputingManager->TotalPhysicalCPUs = stringtoi((std::string)xComputingManager["TotalPhysicalCPUs"]); } if (xComputingManager["TotalLogicalCPUs"]) { ComputingManager->TotalLogicalCPUs = stringtoi((std::string)xComputingManager["TotalLogicalCPUs"]); } if (xComputingManager["TotalSlots"]) { ComputingManager->TotalSlots = stringtoi((std::string)xComputingManager["TotalSlots"]); } if (xComputingManager["Homogeneous"]) { ComputingManager->Homogeneous = ((std::string)xComputingManager["Homogeneous"] == "true"); } if (xComputingManager["NetworkInfo"]) { for (XMLNode n = xComputingManager["NetworkInfo"]; n; ++n) { ComputingManager->NetworkInfo.push_back((std::string)n); } } if (xComputingManager["WorkingAreaShared"]) { ComputingManager->WorkingAreaShared = ((std::string)xComputingManager["WorkingAreaShared"] == "true"); } if (xComputingManager["WorkingAreaFree"]) { ComputingManager->WorkingAreaFree = stringtoi((std::string)xComputingManager["WorkingAreaFree"]); } if (xComputingManager["WorkingAreaTotal"]) { ComputingManager->WorkingAreaTotal = stringtoi((std::string)xComputingManager["WorkingAreaTotal"]); } if (xComputingManager["WorkingAreaLifeTime"]) { ComputingManager->WorkingAreaLifeTime = (std::string)xComputingManager["WorkingAreaLifeTime"]; } if (xComputingManager["CacheFree"]) { ComputingManager->CacheFree = stringtoi((std::string)xComputingManager["CacheFree"]); } if (xComputingManager["CacheTotal"]) { ComputingManager->CacheTotal = stringtoi((std::string)xComputingManager["CacheTotal"]); } for (XMLNode n = xComputingManager["Benchmark"]; n; ++n) { double value; if (n["Type"] && n["Value"] && stringto((std::string)n["Value"], value)) { (*ComputingManager.Benchmarks)[(std::string)n["Type"]] = value; } else { logger.msg(VERBOSE, "Couldn't parse benchmark XML:\n%s", (std::string)n); continue; } } for (XMLNode n = xComputingManager["ApplicationEnvironments"]["ApplicationEnvironment"]; n; ++n) { ApplicationEnvironment ae((std::string)n["AppName"], (std::string)n["AppVersion"]); ae.State = (std::string)n["State"]; if (n["FreeSlots"]) { ae.FreeSlots = stringtoi((std::string)n["FreeSlots"]); } //else { // ae.FreeSlots = ComputingShare->FreeSlots; // Non compatible??, i.e. a ComputingShare is unrelated to the ApplicationEnvironment. //} if (n["FreeJobs"]) { ae.FreeJobs = stringtoi((std::string)n["FreeJobs"]); } else { ae.FreeJobs = -1; } if (n["FreeUserSeats"]) { ae.FreeUserSeats = stringtoi((std::string)n["FreeUserSeats"]); } else { ae.FreeUserSeats = -1; } ComputingManager.ApplicationEnvironments->push_back(ae); } int eeID = 0; for (XMLNode xExecutionEnvironment = xComputingManager["ExecutionEnvironments"]["ExecutionEnvironment"]; (bool)xExecutionEnvironment; ++xExecutionEnvironment) { ExecutionEnvironmentType ExecutionEnvironment; if (xExecutionEnvironment["Platform"]) { ExecutionEnvironment->Platform = (std::string)xExecutionEnvironment["Platform"]; } if (xExecutionEnvironment["MainMemorySize"]) { ExecutionEnvironment->MainMemorySize = stringtoi((std::string)xExecutionEnvironment["MainMemorySize"]); } if (xExecutionEnvironment["OSName"]) { if (xExecutionEnvironment["OSVersion"]) { if (xExecutionEnvironment["OSFamily"]) { ExecutionEnvironment->OperatingSystem = Software((std::string)xExecutionEnvironment["OSFamily"], (std::string)xExecutionEnvironment["OSName"], (std::string)xExecutionEnvironment["OSVersion"]); } else { ExecutionEnvironment->OperatingSystem = Software((std::string)xExecutionEnvironment["OSName"], (std::string)xExecutionEnvironment["OSVersion"]); } } else { ExecutionEnvironment->OperatingSystem = Software((std::string)xExecutionEnvironment["OSName"]); } } if (xExecutionEnvironment["ConnectivityIn"]) { ExecutionEnvironment->ConnectivityIn = (lower((std::string)xExecutionEnvironment["ConnectivityIn"]) == "true"); } if (xExecutionEnvironment["ConnectivityOut"]) { ExecutionEnvironment->ConnectivityOut = (lower((std::string)xExecutionEnvironment["ConnectivityOut"]) == "true"); } ComputingManager.ExecutionEnvironment.insert(std::pair(eeID++, ExecutionEnvironment)); } cs.ComputingManager.insert(std::pair(managerID++, ComputingManager)); } targets.push_back(cs); } } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobControllerPlugin.cpp0000644000000000000000000000012112675602216025576 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.772705 27 ctime=1513200659.796745 nordugrid-arc-5.4.2/src/hed/libs/compute/JobControllerPlugin.cpp0000644000175000002070000001223112675602216025645 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "JobControllerPlugin.h" namespace Arc { Logger JobControllerPlugin::logger(Logger::getRootLogger(), "JobControllerPlugin"); std::map JobControllerPluginLoader::interfacePluginMap; void JobControllerPlugin::UpdateJobs(std::list& jobs, bool isGrouped) const { std::list idsProcessed, idsNotProcessed; return UpdateJobs(jobs, idsProcessed, idsNotProcessed, isGrouped); }; bool JobControllerPlugin::CleanJobs(const std::list& jobs, bool isGrouped) const { std::list idsProcessed, idsNotProcessed; return CleanJobs(jobs, idsProcessed, idsNotProcessed, isGrouped); } bool JobControllerPlugin::CancelJobs(const std::list& jobs, bool isGrouped) const { std::list idsProcessed, idsNotProcessed; return CancelJobs(jobs, idsProcessed, idsNotProcessed, isGrouped); } bool JobControllerPlugin::RenewJobs(const std::list& jobs, bool isGrouped) const { std::list idsProcessed, idsNotProcessed; return RenewJobs(jobs, idsProcessed, idsNotProcessed, isGrouped); } bool JobControllerPlugin::ResumeJobs(const std::list& jobs, bool isGrouped) const { std::list idsProcessed, idsNotProcessed; return ResumeJobs(jobs, idsProcessed, idsNotProcessed, isGrouped); } JobControllerPluginLoader::JobControllerPluginLoader() : Loader(BaseConfig().MakeConfig(Config()).Parent()) {} JobControllerPluginLoader::~JobControllerPluginLoader() { for (std::multimap::iterator it = jobcontrollers.begin(); it != jobcontrollers.end(); it++) delete it->second; } void JobControllerPluginLoader::initialiseInterfacePluginMap(const UserConfig& uc) { std::list modules; PluginsFactory factory(BaseConfig().MakeConfig(Config()).Parent()); factory.scan(FinderLoader::GetLibrariesList(), modules); PluginsFactory::FilterByKind("HED:JobControllerPlugin", modules); std::list availablePlugins; for (std::list::const_iterator it = modules.begin(); it != modules.end(); ++it) { for (std::list::const_iterator it2 = it->plugins.begin(); it2 != it->plugins.end(); ++it2) { availablePlugins.push_back(it2->name); } } if (interfacePluginMap.empty()) { // Map supported interfaces to available plugins. for (std::list::iterator itT = availablePlugins.begin(); itT != availablePlugins.end(); ++itT) { JobControllerPlugin* p = load(*itT, uc); if (!p) { continue; } for (std::list::const_iterator itI = p->SupportedInterfaces().begin(); itI != p->SupportedInterfaces().end(); ++itI) { if (!itT->empty()) { // Do not allow empty interface. // If two plugins supports two identical interfaces, then only the last will appear in the map. interfacePluginMap[*itI] = *itT; } } } } } JobControllerPlugin* JobControllerPluginLoader::load(const std::string& name, const UserConfig& uc) { if (name.empty()) return NULL; if(!factory_->load(FinderLoader::GetLibrariesList(), "HED:JobControllerPlugin", name)) { logger.msg(ERROR, "Unable to locate the \"%s\" plugin. Please refer to installation instructions and check if package providing support for \"%s\" plugin is installed", name, name); logger.msg(DEBUG, "JobControllerPlugin plugin \"%s\" not found.", name); return NULL; } JobControllerPluginArgument arg(uc); JobControllerPlugin *jobcontroller = factory_->GetInstance("HED:JobControllerPlugin", name, &arg, false); if (!jobcontroller) { logger.msg(ERROR, "Unable to locate the \"%s\" plugin. Please refer to installation instructions and check if package providing support for \"%s\" plugin is installed", name, name); logger.msg(DEBUG, "JobControllerPlugin %s could not be created", name); return NULL; } jobcontrollers.insert(std::pair(name, jobcontroller)); logger.msg(DEBUG, "Loaded JobControllerPlugin %s", name); return jobcontroller; } JobControllerPlugin* JobControllerPluginLoader::loadByInterfaceName(const std::string& name, const UserConfig& uc) { if (interfacePluginMap.empty()) { initialiseInterfacePluginMap(uc); } std::map::const_iterator itPN = interfacePluginMap.find(name); if (itPN != interfacePluginMap.end()) { std::map::iterator itJC = jobcontrollers.find(itPN->second); if (itJC != jobcontrollers.end()) { itJC->second->SetUserConfig(uc); return itJC->second; } return load(itPN->second, uc); } return NULL; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/ComputingServiceRetriever.cpp0000644000000000000000000000012412051001721027000 xustar000000000000000027 mtime=1352926161.488223 27 atime=1513200574.688704 30 ctime=1513200659.809745159 nordugrid-arc-5.4.2/src/hed/libs/compute/ComputingServiceRetriever.cpp0000644000175000002070000000640312051001721027050 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "ComputingServiceRetriever.h" namespace Arc { Logger ComputingServiceRetriever::logger(Logger::getRootLogger(), "ComputingServiceRetriever"); Logger ComputingServiceUniq::logger(Logger::getRootLogger(), "ComputingServiceUniq"); void ComputingServiceUniq::addEntity(const ComputingServiceType& service) { if (!service->ID.empty()) { // We check all the previously added services for(std::list::iterator it = services.begin(); it != services.end(); it++) { // We take the first one which has the same ID if ((*it)->ID == service->ID) { std::map priority; priority["org.nordugrid.ldapglue2"] = 3; priority["org.ogf.glue.emies.resourceinfo"] = 2; priority["org.nordugrid.wsrfglue2"] = 1; // If the new service has higher priority, we replace the previous one with the same ID, otherwise we ignore it if (priority[service->InformationOriginEndpoint.InterfaceName] > priority[(*it)->InformationOriginEndpoint.InterfaceName]) { logger.msg(DEBUG, "Uniq is replacing service coming from %s with service coming from %s", (*it)->InformationOriginEndpoint.str(), service->InformationOriginEndpoint.str()); (*it) = service; return; } else { logger.msg(DEBUG, "Uniq is ignoring service coming from %s", service->InformationOriginEndpoint.str()); return; } } } } // If none of the previously added services have the same ID, then we add this one as a new service logger.msg(DEBUG, "Uniq is adding service coming from %s", service->InformationOriginEndpoint.str()); services.push_back(service); } ComputingServiceRetriever::ComputingServiceRetriever( const UserConfig& uc, const std::list& services, const std::list& rejectedServices, const std::set& preferredInterfaceNames, const std::list& capabilityFilter ) : ser(uc, EndpointQueryOptions(true, capabilityFilter, rejectedServices)), tir(uc, EndpointQueryOptions(preferredInterfaceNames)) { ser.addConsumer(*this); tir.addConsumer(*this); for (std::list::const_iterator it = services.begin(); it != services.end(); it++) { addEndpoint(*it); } } void ComputingServiceRetriever::addEndpoint(const Endpoint& service) { // If we got a computing element info endpoint, then we pass it to the TIR if (service.HasCapability(Endpoint::COMPUTINGINFO)) { logger.msg(DEBUG, "Adding endpoint (%s) to TargetInformationRetriever", service.URLString); tir.addEndpoint(service); } else if (service.HasCapability(Endpoint::REGISTRY)) { logger.msg(DEBUG, "Adding endpoint (%s) to ServiceEndpointRetriever", service.URLString); ser.addEndpoint(service); } else if (service.HasCapability(Endpoint::UNSPECIFIED)) { // Try adding endpoint to both. logger.msg(DEBUG, "Adding endpoint (%s) to both ServiceEndpointRetriever and TargetInformationRetriever", service.URLString); tir.addEndpoint(service); ser.addEndpoint(service); } } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobInformationStorageSQLite.cpp0000644000000000000000000000012313065017622027165 xustar000000000000000026 mtime=1490296722.42801 27 atime=1513200574.698704 30 ctime=1513200659.813745208 nordugrid-arc-5.4.2/src/hed/libs/compute/JobInformationStorageSQLite.cpp0000644000175000002070000004332713065017622027244 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "JobInformationStorageSQLite.h" namespace Arc { Logger JobInformationStorageSQLite::logger(Logger::getRootLogger(), "JobInformationStorageSQLite"); static const std::string sql_special_chars("'#\r\n\b\0",6); static const char sql_escape_char('%'); static const Arc::escape_type sql_escape_type(Arc::escape_hex); inline static std::string sql_escape(const std::string& str) { return Arc::escape_chars(str, sql_special_chars, sql_escape_char, false, sql_escape_type); } inline static std::string sql_unescape(const std::string& str) { return Arc::unescape_chars(str, sql_escape_char,sql_escape_type); } int sqlite3_exec_nobusy(sqlite3* db, const char *sql, int (*callback)(void*,int,char**,char**), void *arg, char **errmsg) { int err; while((err = sqlite3_exec(db, sql, callback, arg, errmsg)) == SQLITE_BUSY) { // Access to database is designed in such way that it should not block for long time. // So it should be safe to simply wait for lock to be released without any timeout. struct timespec delay = { 0, 10000000 }; // 0.01s - should be enough for most cases (void)::nanosleep(&delay, NULL); }; return err; } JobInformationStorageSQLite::JobDB::JobDB(const std::string& name, bool create): jobDB(NULL) { int err; int flags = SQLITE_OPEN_READWRITE; // it will open read-only if access is protected if(create) flags |= SQLITE_OPEN_CREATE; while((err = sqlite3_open_v2(name.c_str(), &jobDB, flags, NULL)) == SQLITE_BUSY) { // In case something prevents database from open right now - retry if(jobDB) (void)sqlite3_close(jobDB); jobDB = NULL; struct timespec delay = { 0, 10000000 }; // 0.01s - should be enough for most cases (void)::nanosleep(&delay, NULL); } if(err != SQLITE_OK) { handleError(NULL, err); tearDown(); throw SQLiteException(IString("Unable to create data base (%s)", name).str(), err); } if(create) { err = sqlite3_exec_nobusy(jobDB, "CREATE TABLE IF NOT EXISTS jobs(" "id, idfromendpoint, name, statusinterface, statusurl, " "managementinterfacename, managementurl, " "serviceinformationinterfacename, serviceinformationurl, serviceinformationhost, " "sessiondir, stageindir, stageoutdir, " "descriptiondocument, localsubmissiontime, delegationid, UNIQUE(id))", NULL, NULL, NULL); if(err != SQLITE_OK) { handleError(NULL, err); tearDown(); throw SQLiteException(IString("Unable to create jobs table in data base (%s)", name).str(), err); } err = sqlite3_exec_nobusy(jobDB, "CREATE INDEX IF NOT EXISTS serviceinformationhost ON jobs(serviceinformationhost)", NULL, NULL, NULL); if(err != SQLITE_OK) { handleError(NULL, err); tearDown(); throw SQLiteException(IString("Unable to create index for jobs table in data base (%s)", name).str(), err); } } else { // SQLite opens database in lazy way. But we still want to know if it is good database. err = sqlite3_exec_nobusy(jobDB, "PRAGMA schema_version;", NULL, NULL, NULL); if(err != SQLITE_OK) { handleError(NULL, err); tearDown(); throw SQLiteException(IString("Failed checking database (%s)", name).str(), err); } } JobInformationStorageSQLite::logger.msg(DEBUG, "Job database created successfully (%s)", name); } void JobInformationStorageSQLite::JobDB::tearDown() { if (jobDB) { (void)sqlite3_close(jobDB); jobDB = NULL; } } JobInformationStorageSQLite::JobDB::~JobDB() { tearDown(); } void JobInformationStorageSQLite::JobDB::handleError(const char* errpfx, int err) { #ifdef HAVE_SQLITE3_ERRSTR std::string msg = sqlite3_errstr(err); #else std::string msg = "error code "+Arc::tostring(err); #endif if (errpfx) { JobInformationStorageSQLite::logger.msg(DEBUG, "Error from SQLite: %s: %s", errpfx, msg); } else { JobInformationStorageSQLite::logger.msg(DEBUG, "Error from SQLite: %s", msg); } } JobInformationStorageSQLite::SQLiteException::SQLiteException(const std::string& msg, int ret, bool writeLogMessage) throw() : message(msg), returnvalue(ret) { if (writeLogMessage) { JobInformationStorageSQLite::logger.msg(VERBOSE, msg); JobInformationStorageSQLite::logErrorMessage(ret); } } JobInformationStorageSQLite::JobInformationStorageSQLite(const std::string& name, unsigned nTries, unsigned tryInterval) : JobInformationStorage(name, nTries, tryInterval) { isValid = false; isStorageExisting = false; if (!Glib::file_test(name, Glib::FILE_TEST_EXISTS)) { const std::string joblistdir = Glib::path_get_dirname(name); // Check if the parent directory exist. if (!Glib::file_test(joblistdir, Glib::FILE_TEST_EXISTS)) { logger.msg(ERROR, "Job list file cannot be created: The parent directory (%s) doesn't exist.", joblistdir); return; } else if (!Glib::file_test(joblistdir, Glib::FILE_TEST_IS_DIR)) { logger.msg(ERROR, "Job list file cannot be created: %s is not a directory", joblistdir); return; } isValid = true; return; } else if (!Glib::file_test(name, Glib::FILE_TEST_IS_REGULAR)) { logger.msg(ERROR, "Job list file (%s) is not a regular file", name); return; } try { JobDB db(name); } catch (const SQLiteException& e) { isValid = false; return; } isStorageExisting = isValid = true; } struct ListJobsCallbackArg { std::list& ids; ListJobsCallbackArg(std::list& ids):ids(ids) { }; }; static int ListJobsCallback(void* arg, int colnum, char** texts, char** names) { ListJobsCallbackArg& carg = *reinterpret_cast(arg); for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "id") == 0) { carg.ids.push_back(texts[n]); } } } return 0; } bool JobInformationStorageSQLite::Write(const std::list& jobs, const std::set& prunedServices, std::list& newJobs) { std::string empty_string; if (!isValid) { return false; } if (jobs.empty()) return true; try { JobDB db(name, true); // Identify jobs to remove std::list prunedIds; ListJobsCallbackArg prunedArg(prunedIds); for (std::set::const_iterator itPruned = prunedServices.begin(); itPruned != prunedServices.end(); ++itPruned) { std::string sqlcmd = "SELECT id FROM jobs WHERE (serviceinformationhost = '" + *itPruned + "')"; (void)sqlite3_exec_nobusy(db.handle(), sqlcmd.c_str(), &ListJobsCallback, &prunedArg, NULL); } // Filter out jobs to be modified if(!jobs.empty()) { for(std::list::iterator itId = prunedIds.begin(); itId != prunedIds.end();) { bool found = false; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if(it->JobID == *itId) { found = true; break; } } if(found) { itId = prunedIds.erase(itId); } else { ++itId; } } } // Remove identified jobs for(std::list::iterator itId = prunedIds.begin(); itId != prunedIds.end(); ++itId) { std::string sqlcmd = "DELETE FROM jobs WHERE (id = '" + *itId + "')"; (void)sqlite3_exec_nobusy(db.handle(), sqlcmd.c_str(), &ListJobsCallback, &prunedArg, NULL); } // Add new jobs for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { std::string sqlvalues = "jobs(" "id, idfromendpoint, name, statusinterface, statusurl, " "managementinterfacename, managementurl, " "serviceinformationinterfacename, serviceinformationurl, serviceinformationhost, " "sessiondir, stageindir, stageoutdir, " "descriptiondocument, localsubmissiontime, delegationid " ") VALUES ('"+ sql_escape(it->JobID)+"', '"+ sql_escape(it->IDFromEndpoint)+"', '"+ sql_escape(it->Name)+"', '"+ sql_escape(it->JobStatusInterfaceName)+"', '"+ sql_escape(it->JobStatusURL.fullstr())+"', '"+ sql_escape(it->JobManagementInterfaceName)+"', '"+ sql_escape(it->JobManagementURL.fullstr())+"', '"+ sql_escape(it->ServiceInformationInterfaceName)+"', '"+ sql_escape(it->ServiceInformationURL.fullstr())+"', '"+ sql_escape(it->ServiceInformationURL.Host())+"', '"+ sql_escape(it->SessionDir.fullstr())+"', '"+ sql_escape(it->StageInDir.fullstr())+"', '"+ sql_escape(it->StageOutDir.fullstr())+"', '"+ sql_escape(it->JobDescriptionDocument)+"', '"+ sql_escape(tostring(it->LocalSubmissionTime.GetTime()))+"', '"+ sql_escape(it->DelegationID.size()>0?*(it->DelegationID.begin()):empty_string)+"')"; bool new_job = true; int err = sqlite3_exec_nobusy(db.handle(), ("INSERT OR IGNORE INTO " + sqlvalues).c_str(), NULL, NULL, NULL); if(err != SQLITE_OK) { logger.msg(VERBOSE, "Unable to write records into job database (%s): Id \"%s\"", name, it->JobID); logErrorMessage(err); return false; } if(sqlite3_changes(db.handle()) == 0) { err = sqlite3_exec_nobusy(db.handle(), ("REPLACE INTO " + sqlvalues).c_str(), NULL, NULL, NULL); if(err != SQLITE_OK) { logger.msg(VERBOSE, "Unable to write records into job database (%s): Id \"%s\"", name, it->JobID); logErrorMessage(err); return false; } new_job = false; } if(sqlite3_changes(db.handle()) != 1) { logger.msg(VERBOSE, "Unable to write records into job database (%s): Id \"%s\"", name, it->JobID); logErrorMessage(err); return false; } if(new_job) newJobs.push_back(&(*it)); } } catch (const SQLiteException& e) { return false; } return true; } struct ReadJobsCallbackArg { std::list& jobs; std::list* jobIdentifiers; const std::list* endpoints; const std::list* rejectEndpoints; std::list jobIdentifiersMatched; ReadJobsCallbackArg(std::list& jobs, std::list* jobIdentifiers, const std::list* endpoints, const std::list* rejectEndpoints): jobs(jobs), jobIdentifiers(jobIdentifiers), endpoints(endpoints), rejectEndpoints(rejectEndpoints) {}; }; static int ReadJobsCallback(void* arg, int colnum, char** texts, char** names) { ReadJobsCallbackArg& carg = *reinterpret_cast(arg); carg.jobs.push_back(Job()); bool accept = false; bool drop = false; for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "id") == 0) { carg.jobs.back().JobID = sql_unescape(texts[n]); if(carg.jobIdentifiers) { for(std::list::iterator it = carg.jobIdentifiers->begin(); it != carg.jobIdentifiers->end(); ++it) { if(*it == carg.jobs.back().JobID) { accept = true; carg.jobIdentifiersMatched.push_back(*it); break; } } } else { accept = true; } } else if(strcmp(names[n], "idfromendpoint") == 0) { carg.jobs.back().IDFromEndpoint = sql_unescape(texts[n]); } else if(strcmp(names[n], "name") == 0) { carg.jobs.back().Name = sql_unescape(texts[n]); if(carg.jobIdentifiers) { for(std::list::iterator it = carg.jobIdentifiers->begin(); it != carg.jobIdentifiers->end(); ++it) { if(*it == carg.jobs.back().Name) { accept = true; carg.jobIdentifiersMatched.push_back(*it); break; } } } else { accept = true; } } else if(strcmp(names[n], "statusinterface") == 0) { carg.jobs.back().JobStatusInterfaceName = sql_unescape(texts[n]); } else if(strcmp(names[n], "statusurl") == 0) { carg.jobs.back().JobStatusURL = sql_unescape(texts[n]); } else if(strcmp(names[n], "managementinterfacename") == 0) { carg.jobs.back().JobManagementInterfaceName = sql_unescape(texts[n]); } else if(strcmp(names[n], "managementurl") == 0) { carg.jobs.back().JobManagementURL = sql_unescape(texts[n]); if(carg.rejectEndpoints) { for (std::list::const_iterator it = carg.rejectEndpoints->begin(); it != carg.rejectEndpoints->end(); ++it) { if (carg.jobs.back().JobManagementURL.StringMatches(*it)) { drop = true; break; } } } if(carg.endpoints) { for (std::list::const_iterator it = carg.endpoints->begin(); it != carg.endpoints->end(); ++it) { if (carg.jobs.back().JobManagementURL.StringMatches(*it)) { accept = true; break; } } } } else if(strcmp(names[n], "serviceinformationinterfacename") == 0) { carg.jobs.back().ServiceInformationInterfaceName = sql_unescape(texts[n]); } else if(strcmp(names[n], "serviceinformationurl") == 0) { carg.jobs.back().ServiceInformationURL = sql_unescape(texts[n]); } else if(strcmp(names[n], "sessiondir") == 0) { carg.jobs.back().SessionDir = sql_unescape(texts[n]); } else if(strcmp(names[n], "stageindir") == 0) { carg.jobs.back().StageInDir = sql_unescape(texts[n]); } else if(strcmp(names[n], "stageoutdir") == 0) { carg.jobs.back().StageOutDir = sql_unescape(texts[n]); } else if(strcmp(names[n], "descriptiondocument") == 0) { carg.jobs.back().JobDescriptionDocument = sql_unescape(texts[n]); } else if(strcmp(names[n], "localsubmissiontime") == 0) { carg.jobs.back().LocalSubmissionTime.SetTime(stringtoi(sql_unescape(texts[n]))); } else if(strcmp(names[n], "delegationid") == 0) { carg.jobs.back().DelegationID.push_back(sql_unescape(texts[n])); } } } if(drop || !accept) { carg.jobs.pop_back(); } return 0; } bool JobInformationStorageSQLite::ReadAll(std::list& jobs, const std::list& rejectEndpoints) { if (!isValid) { return false; } jobs.clear(); try { int ret; JobDB db(name); std::string sqlcmd = "SELECT * FROM jobs"; ReadJobsCallbackArg carg(jobs, NULL, NULL, &rejectEndpoints); int err = sqlite3_exec_nobusy(db.handle(), sqlcmd.c_str(), &ReadJobsCallback, &carg, NULL); if(err != SQLITE_OK) { // handle error ?? return false; } } catch (const SQLiteException& e) { return false; } return true; } bool JobInformationStorageSQLite::Read(std::list& jobs, std::list& jobIdentifiers, const std::list& endpoints, const std::list& rejectEndpoints) { if (!isValid) { return false; } jobs.clear(); try { int ret; JobDB db(name); std::string sqlcmd = "SELECT * FROM jobs"; ReadJobsCallbackArg carg(jobs, &jobIdentifiers, &endpoints, &rejectEndpoints); int err = sqlite3_exec_nobusy(db.handle(), sqlcmd.c_str(), &ReadJobsCallback, &carg, NULL); if(err != SQLITE_OK) { // handle error ?? return false; } carg.jobIdentifiersMatched.sort(); carg.jobIdentifiersMatched.unique(); for(std::list::iterator itMatched = carg.jobIdentifiersMatched.begin(); itMatched != carg.jobIdentifiersMatched.end(); ++itMatched) { jobIdentifiers.remove(*itMatched); } } catch (const SQLiteException& e) { return false; } return true; } bool JobInformationStorageSQLite::Clean() { if (!isValid) { return false; } if (remove(name.c_str()) != 0) { if (errno == ENOENT) return true; // No such file. DB already cleaned. logger.msg(VERBOSE, "Unable to truncate job database (%s)", name); perror("Error"); return false; } return true; } bool JobInformationStorageSQLite::Remove(const std::list& jobids) { if (!isValid) { return false; } try { JobDB db(name, true); for (std::list::const_iterator it = jobids.begin(); it != jobids.end(); ++it) { std::string sqlcmd = "DELETE FROM jobs WHERE (id = '"+sql_escape(*it)+"')"; int err = sqlite3_exec_nobusy(db.handle(), sqlcmd.c_str(), NULL, NULL, NULL); if(err != SQLITE_OK) { } else if(sqlite3_changes(db.handle()) < 1) { } } } catch (const SQLiteException& e) { return false; } return true; } void JobInformationStorageSQLite::logErrorMessage(int err) { switch (err) { default: logger.msg(DEBUG, "Unable to determine error (%d)", err); } } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/test_jobdescription.cpp0000644000000000000000000000012312301125744025710 xustar000000000000000027 mtime=1392815076.546455 27 atime=1513200574.676703 29 ctime=1513200659.81474522 nordugrid-arc-5.4.2/src/hed/libs/compute/test_jobdescription.cpp0000644000175000002070000000773012301125744025765 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include int main(int argc, char **argv) { Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::OptionParser options(istring("[job description ...]"), istring("This tiny tool can be used for testing " "the JobDescription's conversion abilities."), istring("The job description also can be a file or a string in JDL, POSIX JSDL, JSDL, or XRSL format.")); std::string requested_format = ""; options.AddOption('f', "format", istring("define the requested format (nordugrid:jsdl, egee:jdl, nordugrid:xrsl, emies:adl)"), istring("format"), requested_format); bool show_original_description = false; options.AddOption('o', "original", istring("show the original job description"), show_original_description); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); std::list descriptions = options.Parse(argc, argv); if (descriptions.empty()) { std::cout << Arc::IString("Use --help option for detailed usage information") << std::endl; return 1; } if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); std::cout << Arc::IString(" [ JobDescription tester ] ") << std::endl; for (std::list::iterator it = descriptions.begin(); it != descriptions.end(); it++) { struct stat stFileInfo; int intStat; std::string original_description; // Attempt to get the file attributes intStat = stat((*it).c_str(), &stFileInfo); if (intStat == 0) { // We were able to get the file attributes // so the file obviously exists. std::ifstream ifs; std::string buffer; ifs.open((*it).c_str(), std::ios::in); while (std::getline(ifs, buffer)) original_description += buffer + "\n"; } else original_description = (*it); if (requested_format == "egee:jdl" || requested_format == "nordugrid:jsdl" || requested_format == "nordugrid:xrsl" || requested_format == "emies:adl" || requested_format == "") { if (show_original_description) { std::cout << std::endl << Arc::IString(" [ Parsing the original text ] ") << std::endl << std::endl; std::cout << original_description << std::endl; } std::list jds; if (!Arc::JobDescription::Parse(original_description, jds) || jds.empty()) { std::cout << Arc::IString("Unable to parse.") << std::endl; return 1; } std::string jobdesc; if (requested_format == "") { jds.front().SaveToStream(std::cout, "userlong"); jds.front().UnParse(jobdesc, "egee:jdl"); std::cout << std::endl << Arc::IString(" [ egee:jdl ] ") << std::endl << jobdesc << std::endl; jds.front().UnParse(jobdesc, "emies:adl"); std::cout << std::endl << Arc::IString(" [ emies:adl ] ") << std::endl << jobdesc << std::endl; jds.front().UnParse(jobdesc, "nordugrid:jsdl"); std::cout << std::endl << Arc::IString(" [ nordugrid:jsdl ] ") << std::endl << jobdesc << std::endl; jds.front().UnParse(jobdesc, "nordugrid:xrsl"); std::cout << std::endl << Arc::IString(" [ nordugrid:xrsl ] ") << std::endl << jobdesc << std::endl; } else { jds.front().UnParse(jobdesc, requested_format); std::cout << std::endl << " [ " << requested_format << " ] " << std::endl << jobdesc << std::endl; } } } return 0; } nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Job.h0000644000000000000000000000012412306304733022015 xustar000000000000000027 mtime=1394182619.331424 27 atime=1513200574.690704 30 ctime=1513200659.766744633 nordugrid-arc-5.4.2/src/hed/libs/compute/Job.h0000644000175000002070000002320012306304733022057 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOB_H__ #define __ARC_JOB_H__ #include #include #include #include #include namespace Arc { class DataHandle; class JobControllerPlugin; class JobControllerPluginLoader; class JobSupervisor; class Logger; class UserConfig; class XMLNode; /// Job /** * This class describe a Grid job. The class contains public accessible * member attributes and methods for dealing with a Grid job. Most of the * member attributes contained in this class are directly linked to the * ComputingActivity defined in the GLUE Specification v. 2.0 (GFD-R-P.147). * * \ingroup compute * \headerfile Job.h arc/compute/Job.h */ class Job { friend class JobSupervisor; public: /// Create a Job object /** * Default constructor. Takes no arguments. */ Job(); ~Job(); Job(const Job& job); Job(XMLNode job); // Proposed mandatory attributes for ARC 3.0 std::string JobID; std::string Name; URL ServiceInformationURL; std::string ServiceInformationInterfaceName; URL JobStatusURL; std::string JobStatusInterfaceName; URL JobManagementURL; std::string JobManagementInterfaceName; URL StageInDir; URL StageOutDir; URL SessionDir; std::string Type; std::string IDFromEndpoint; std::string LocalIDFromManager; /// Language of job description describing job /** * Equivalent to the GLUE2 ComputingActivity entity JobDescription (open * enumeration), which here is represented by a string. */ std::string JobDescription; /// Job description document describing job /** * No GLUE2 entity equivalent. Should hold the job description document * which was submitted to the computing service for this job. */ std::string JobDescriptionDocument; JobState State; JobState RestartState; int ExitCode; std::string ComputingManagerExitCode; std::list Error; int WaitingPosition; std::string UserDomain; std::string Owner; std::string LocalOwner; Period RequestedTotalWallTime; Period RequestedTotalCPUTime; int RequestedSlots; std::list RequestedApplicationEnvironment; std::string StdIn; std::string StdOut; std::string StdErr; std::string LogDir; std::list ExecutionNode; std::string Queue; Period UsedTotalWallTime; Period UsedTotalCPUTime; int UsedMainMemory; Time LocalSubmissionTime; Time SubmissionTime; Time ComputingManagerSubmissionTime; Time StartTime; Time ComputingManagerEndTime; Time EndTime; Time WorkingAreaEraseTime; Time ProxyExpirationTime; std::string SubmissionHost; std::string SubmissionClientName; Time CreationTime; Period Validity; std::list OtherMessages; //Associations std::list ActivityOldID; std::map LocalInputFiles; /** * This member is not a part of GLUE2. * \since Added in 4.1.0. **/ std::list DelegationID; enum ResourceType { STDIN, STDOUT, STDERR, STAGEINDIR, STAGEOUTDIR, SESSIONDIR, JOBLOG, JOBDESCRIPTION }; /// Write job information to a std::ostream object /** * This method will write job information to the passed std::ostream object. * The longlist boolean specifies whether more (true) or less (false) * information should be printed. * * @param out is the std::ostream object to print the attributes to. * @param longlist is a boolean for switching on long listing (more * details). **/ void SaveToStream(std::ostream& out, bool longlist) const; /// Set Job attributes from a XMLNode /** * The attributes of the Job object is set to the values specified in the * XMLNode. The XMLNode should be a ComputingActivity type using the GLUE2 * XML hierarchical rendering, see * http://forge.gridforum.org/sf/wiki/do/viewPage/projects.glue-wg/wiki/GLUE2XMLSchema * for more information. Note that associations are not parsed. * * @param job is a XMLNode of GLUE2 ComputingActivity type. * @see ToXML **/ Job& operator=(XMLNode job); Job& operator=(const Job& job); int operator==(const Job& other); /// Set Job attributes from a XMLNode representing GLUE2 ComputingActivity /** * Because job XML representation follows GLUE2 model this method is * similar to operator=(XMLNode). But it only covers job attributes which * are part of GLUE2 computing activity. Also it treats Job object as * being iextended with information provided by XMLNode. Contrary operator=(XMLNode) * fully reinitializes Job, hence removing any associations to other objects. **/ void SetFromXML(XMLNode job); /// Add job information to a XMLNode /** * Child nodes of GLUE ComputingActivity type containing job information of * this object will be added to the passed XMLNode. * * @param job is the XMLNode to add job information to in form of * GLUE2 ComputingActivity type child nodes. * @see operator= **/ void ToXML(XMLNode job) const; bool PrepareHandler(const UserConfig& uc); bool Update(); bool Clean(); bool Cancel(); bool Resume(); bool Renew(); bool GetURLToResource(ResourceType resource, URL& url) const; bool Retrieve(const UserConfig& uc, const URL& destination, bool force) const; static bool CopyJobFile(const UserConfig& uc, const URL& src, const URL& dst); static bool ListFilesRecursive(const UserConfig& uc, const URL& dir, std::list& files) { files.clear(); return ListFilesRecursive(uc, dir, files, ""); } static bool CompareJobID(const Job& a, const Job& b) { return a.JobID.compare(b.JobID) < 0; } static bool CompareSubmissionTime(const Job& a, const Job& b) { return a.SubmissionTime < b.SubmissionTime; } static bool CompareJobName(const Job& a, const Job& b) { return a.Name.compare(b.Name) < 0; } /// Read a list of Job IDs from a file, and append them to a list /** * This static method will read job IDs from the given file, and append the * strings to the string list given as parameter. File locking will be done * as described for the ReadAllJobsFromFile method. It returns false if the * file was not readable, true otherwise, even if there were no IDs in the * file. The lines of the file will be trimmed, and lines starting with # * will be ignored. * * @param filename is the filename of the jobidfile * @param jobids is a list of strings, to which the IDs read from the file * will be appended * @param nTries specifies the maximal number of times the method will try * to acquire a lock on file to read. * @param tryInterval specifies the interval (in micro seconds) between each * attempt to acquire a lock. * @return true in case of success, otherwise false. **/ static bool ReadJobIDsFromFile(const std::string& filename, std::list& jobids, unsigned nTries = 10, unsigned tryInterval = 500000); /// Append a jobID to a file /** * This static method will put the ID represented by a URL object, and * append it to the given file. File locking will be done as described for * the ReadAllJobsFromFile method. It returns false if the file is not * writable, true otherwise. * * @param jobid is a jobID as a URL object * @param filename is the filename of the jobidfile, where the jobID will be * appended * @param nTries specifies the maximal number of times the method will try * to acquire a lock on file to read. * @param tryInterval specifies the interval (in micro seconds) between each * attempt to acquire a lock. * @return true in case of success, otherwise false. **/ static bool WriteJobIDToFile(const std::string& jobid, const std::string& filename, unsigned nTries = 10, unsigned tryInterval = 500000); /// Append list of URLs to a file /** * This static method will put the ID given as a string, and append it to * the given file. File locking will be done as described for the * ReadAllJobsFromFile method. It returns false if the file was not * writable, true otherwise. * * @param jobids is a list of URL objects to be written to file * @param filename is the filename of file, where the URL objects will be * appended to. * @param nTries specifies the maximal number of times the method will try * to acquire a lock on file to read. * @param tryInterval specifies the interval (in micro seconds) between each * attempt to acquire a lock. * @return true in case of success, otherwise false. **/ static bool WriteJobIDsToFile(const std::list& jobids, const std::string& filename, unsigned nTries = 10, unsigned tryInterval = 500000); static bool WriteJobIDsToFile(const std::list& jobs, const std::string& filename, unsigned nTries = 10, unsigned tryInterval = 500000); private: static bool ListFilesRecursive(const UserConfig& uc, const URL& dir, std::list& files, const std::string& prefix); JobControllerPlugin* jc; static JobControllerPluginLoader& getLoader(); // Objects might be pointing to allocated memory upon termination, leave it as garbage. static DataHandle *data_source, *data_destination; static Logger logger; }; } // namespace Arc #endif // __ARC_JOB_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/examples0000644000000000000000000000013213214316023022664 xustar000000000000000030 mtime=1513200659.882746052 30 atime=1513200668.721854157 30 ctime=1513200659.882746052 nordugrid-arc-5.4.2/src/hed/libs/compute/examples/0000755000175000002070000000000013214316023023007 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/libs/compute/examples/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712771224504025012 xustar000000000000000027 mtime=1474636100.231249 30 atime=1513200596.609972201 30 ctime=1513200659.879746015 nordugrid-arc-5.4.2/src/hed/libs/compute/examples/Makefile.am0000644000175000002070000000136612771224504025062 0ustar00mockbuildmock00000000000000check_PROGRAMS = basic_job_submission job_selector basic_job_submission_SOURCES = basic_job_submission.cpp basic_job_submission_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) basic_job_submission_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarccompute.la $(GLIBMM_LIBS) job_selector_SOURCES = job_selector.cpp job_selector_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) job_selector_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarccompute.la $(GLIBMM_LIBS) exampledir = $(pkgdatadir)/examples/sdk example_DATA = basic_job_submission.cpp job_selector.cpp EXTRA_DIST = helloworld.xrsl nordugrid-arc-5.4.2/src/hed/libs/compute/examples/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315724025015 xustar000000000000000030 mtime=1513200596.656972776 30 atime=1513200647.832598674 30 ctime=1513200659.880746027 nordugrid-arc-5.4.2/src/hed/libs/compute/examples/Makefile.in0000644000175000002070000007071113214315724025071 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ check_PROGRAMS = basic_job_submission$(EXEEXT) job_selector$(EXEEXT) subdir = src/hed/libs/compute/examples DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am_basic_job_submission_OBJECTS = \ basic_job_submission-basic_job_submission.$(OBJEXT) basic_job_submission_OBJECTS = $(am_basic_job_submission_OBJECTS) am__DEPENDENCIES_1 = basic_job_submission_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarccompute.la $(am__DEPENDENCIES_1) basic_job_submission_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(basic_job_submission_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_job_selector_OBJECTS = job_selector-job_selector.$(OBJEXT) job_selector_OBJECTS = $(am_job_selector_OBJECTS) job_selector_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarccompute.la $(am__DEPENDENCIES_1) job_selector_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(job_selector_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(basic_job_submission_SOURCES) $(job_selector_SOURCES) DIST_SOURCES = $(basic_job_submission_SOURCES) $(job_selector_SOURCES) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(exampledir)" DATA = $(example_DATA) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ basic_job_submission_SOURCES = basic_job_submission.cpp basic_job_submission_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) basic_job_submission_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarccompute.la $(GLIBMM_LIBS) job_selector_SOURCES = job_selector.cpp job_selector_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) job_selector_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../libarccompute.la $(GLIBMM_LIBS) exampledir = $(pkgdatadir)/examples/sdk example_DATA = basic_job_submission.cpp job_selector.cpp EXTRA_DIST = helloworld.xrsl all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/libs/compute/examples/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/libs/compute/examples/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list basic_job_submission$(EXEEXT): $(basic_job_submission_OBJECTS) $(basic_job_submission_DEPENDENCIES) @rm -f basic_job_submission$(EXEEXT) $(basic_job_submission_LINK) $(basic_job_submission_OBJECTS) $(basic_job_submission_LDADD) $(LIBS) job_selector$(EXEEXT): $(job_selector_OBJECTS) $(job_selector_DEPENDENCIES) @rm -f job_selector$(EXEEXT) $(job_selector_LINK) $(job_selector_OBJECTS) $(job_selector_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/basic_job_submission-basic_job_submission.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_selector-job_selector.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< basic_job_submission-basic_job_submission.o: basic_job_submission.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(basic_job_submission_CXXFLAGS) $(CXXFLAGS) -MT basic_job_submission-basic_job_submission.o -MD -MP -MF $(DEPDIR)/basic_job_submission-basic_job_submission.Tpo -c -o basic_job_submission-basic_job_submission.o `test -f 'basic_job_submission.cpp' || echo '$(srcdir)/'`basic_job_submission.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/basic_job_submission-basic_job_submission.Tpo $(DEPDIR)/basic_job_submission-basic_job_submission.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='basic_job_submission.cpp' object='basic_job_submission-basic_job_submission.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(basic_job_submission_CXXFLAGS) $(CXXFLAGS) -c -o basic_job_submission-basic_job_submission.o `test -f 'basic_job_submission.cpp' || echo '$(srcdir)/'`basic_job_submission.cpp basic_job_submission-basic_job_submission.obj: basic_job_submission.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(basic_job_submission_CXXFLAGS) $(CXXFLAGS) -MT basic_job_submission-basic_job_submission.obj -MD -MP -MF $(DEPDIR)/basic_job_submission-basic_job_submission.Tpo -c -o basic_job_submission-basic_job_submission.obj `if test -f 'basic_job_submission.cpp'; then $(CYGPATH_W) 'basic_job_submission.cpp'; else $(CYGPATH_W) '$(srcdir)/basic_job_submission.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/basic_job_submission-basic_job_submission.Tpo $(DEPDIR)/basic_job_submission-basic_job_submission.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='basic_job_submission.cpp' object='basic_job_submission-basic_job_submission.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(basic_job_submission_CXXFLAGS) $(CXXFLAGS) -c -o basic_job_submission-basic_job_submission.obj `if test -f 'basic_job_submission.cpp'; then $(CYGPATH_W) 'basic_job_submission.cpp'; else $(CYGPATH_W) '$(srcdir)/basic_job_submission.cpp'; fi` job_selector-job_selector.o: job_selector.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(job_selector_CXXFLAGS) $(CXXFLAGS) -MT job_selector-job_selector.o -MD -MP -MF $(DEPDIR)/job_selector-job_selector.Tpo -c -o job_selector-job_selector.o `test -f 'job_selector.cpp' || echo '$(srcdir)/'`job_selector.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/job_selector-job_selector.Tpo $(DEPDIR)/job_selector-job_selector.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='job_selector.cpp' object='job_selector-job_selector.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(job_selector_CXXFLAGS) $(CXXFLAGS) -c -o job_selector-job_selector.o `test -f 'job_selector.cpp' || echo '$(srcdir)/'`job_selector.cpp job_selector-job_selector.obj: job_selector.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(job_selector_CXXFLAGS) $(CXXFLAGS) -MT job_selector-job_selector.obj -MD -MP -MF $(DEPDIR)/job_selector-job_selector.Tpo -c -o job_selector-job_selector.obj `if test -f 'job_selector.cpp'; then $(CYGPATH_W) 'job_selector.cpp'; else $(CYGPATH_W) '$(srcdir)/job_selector.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/job_selector-job_selector.Tpo $(DEPDIR)/job_selector-job_selector.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='job_selector.cpp' object='job_selector-job_selector.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(job_selector_CXXFLAGS) $(CXXFLAGS) -c -o job_selector-job_selector.obj `if test -f 'job_selector.cpp'; then $(CYGPATH_W) 'job_selector.cpp'; else $(CYGPATH_W) '$(srcdir)/job_selector.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exampleDATA \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-exampleDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/libs/compute/examples/PaxHeaders.7502/job_selector.cpp0000644000000000000000000000012412771222411026124 xustar000000000000000027 mtime=1474635017.400882 27 atime=1513200574.759704 30 ctime=1513200659.882746052 nordugrid-arc-5.4.2/src/hed/libs/compute/examples/job_selector.cpp0000644000175000002070000000430312771222411026171 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include /* * Create a JobSelector class in order to specify a custom selection to be used * with the JobSupervisor class. */ // Extend the arc.compute.JobSelector class and the select method. class ThreeDaysOldJobSelector : public Arc::JobSelector { public: ThreeDaysOldJobSelector() { now = Arc::Time(); three_days = Arc::Period(60*60*24*3); //three_days = Arc::Period("P3D") // ISO duration //three_days = Arc::Period(3*Arc::Time.DAY) } // The select method recieves a arc.compute.Job instance and must return a // boolean, indicating whether the job should be selected or rejected. // All attributes of the arc.compute.Job object can be used in this method. bool Select(const Arc::Job& job) const { return (now - job.EndTime) > three_days; } private: Arc::Time now; Arc::Period three_days; }; int main(int argc, char** argv) { Arc::UserConfig uc; Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::VERBOSE); Arc::Job j; j.JobManagementInterfaceName = "org.ogf.glue.emies.activitymanagement"; j.JobManagementURL = Arc::URL("https://localhost"); j.JobStatusInterfaceName = "org.ogf.glue.emies.activitymanagement"; j.JobStatusURL = Arc::URL("https://localhost"); Arc::JobSupervisor js(uc); j.JobID = "test-job-1-day-old"; j.EndTime = Arc::Time()-Arc::Period("P1D"); js.AddJob(j); j.JobID = "test-job-2-days-old"; j.EndTime = Arc::Time()-Arc::Period("P2D"); js.AddJob(j); j.JobID = "test-job-3-days-old"; j.EndTime = Arc::Time()-Arc::Period("P3D"); js.AddJob(j); j.JobID = "test-job-4-days-old"; j.EndTime = Arc::Time()-Arc::Period("P4D"); js.AddJob(j); ThreeDaysOldJobSelector selector; js.Select(selector); std::list selectedJobs = js.GetSelectedJobs(); for (std::list::iterator itJ = selectedJobs.begin(); itJ != selectedJobs.end(); ++itJ) { std::cout << itJ->JobID << std::endl; } // Make operation on selected jobs. E.g.: //js.Clean() return 0; } nordugrid-arc-5.4.2/src/hed/libs/compute/examples/PaxHeaders.7502/helloworld.xrsl0000644000000000000000000000012412721014565026037 xustar000000000000000027 mtime=1464080757.755156 27 atime=1513200574.759704 30 ctime=1513200659.882746052 nordugrid-arc-5.4.2/src/hed/libs/compute/examples/helloworld.xrsl0000644000175000002070000000042312721014565026103 0ustar00mockbuildmock00000000000000& (executable = "/bin/echo") (arguments = "Hello World") (stdout = "std.out") (join = "yes")(* Join stdout and stderr into stdout*) (cputime = 1) (jobname = "Hello World") (gmlog = "joblog")(* Grid manager log files should be included when retrieving job results (arcget) *) nordugrid-arc-5.4.2/src/hed/libs/compute/examples/PaxHeaders.7502/basic_job_submission.cpp0000644000000000000000000000012312721014565027643 xustar000000000000000027 mtime=1464080757.755156 27 atime=1513200574.759704 29 ctime=1513200659.88174604 nordugrid-arc-5.4.2/src/hed/libs/compute/examples/basic_job_submission.cpp0000644000175000002070000000420512721014565027712 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include #include int main() { // Set up logging to stderr with level VERBOSE (a lot of output will be shown) Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::VERBOSE); Arc::Logger logger(Arc::Logger::getRootLogger(), "jobsubmit"); // UserConfig contains information on credentials and default services to use. // This form of the constructor is necessary to initialise the local job list. Arc::UserConfig usercfg("", ""); // Simple job description which outputs hostname to stdout std::string jobdesc("&(executable=/bin/hostname)(stdout=stdout)"); // Parse job description std::list jobdescs; if (!Arc::JobDescription::Parse(jobdesc, jobdescs)) { logger.msg(Arc::ERROR, "Invalid job description"); return 1; } /* * Use 'Arc::JobDescription::ParseFromFile("helloworld.xrsl", jobdescs)' * to parse job description from file. */ // Use top-level NorduGrid information index to find resources Arc::Endpoint index("ldap://index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", Arc::Endpoint::REGISTRY, "org.nordugrid.ldapegiis"); std::list services(1, index); // Do the submission std::list jobs; Arc::Submitter submitter(usercfg); if (submitter.BrokeredSubmit(services, jobdescs, jobs) != Arc::SubmissionStatus::NONE) { logger.msg(Arc::ERROR, "Failed to submit job"); return 1; } // Write information on submitted job to local job list (~/.arc/jobs.xml) Arc::JobInformationStorageXML jobList(usercfg.JobListFile()); if (!jobList.Write(jobs)) { logger.msg(Arc::WARNING, "Failed to write to local job list %s", usercfg.JobListFile()); } // Job submitted ok std::cout << "Job submitted with job id " << jobs.front().JobID << std::endl; return 0; } nordugrid-arc-5.4.2/src/hed/libs/compute/examples/PaxHeaders.7502/README0000644000000000000000000000012412110726523023626 xustar000000000000000027 mtime=1361292627.682019 27 atime=1513200574.759704 30 ctime=1513200659.878746003 nordugrid-arc-5.4.2/src/hed/libs/compute/examples/README0000644000175000002070000000005312110726523023671 0ustar00mockbuildmock00000000000000Examples of how to use the compute library.nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobSupervisor.h0000644000000000000000000000012312771222411024114 xustar000000000000000027 mtime=1474635017.400882 27 atime=1513200574.743704 29 ctime=1513200659.76974467 nordugrid-arc-5.4.2/src/hed/libs/compute/JobSupervisor.h0000644000175000002070000004716712771222411024201 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBSUPERVISOR_H__ #define __ARC_JOBSUPERVISOR_H__ #include #include #include #include #include namespace Arc { class Logger; class Endpoint; class UserConfig; /// Abstract class used for selecting jobs with JobSupervisor /** * Only the select method is meant to be extended. * \since Added in 5.1.0. **/ class JobSelector { public: JobSelector() {} virtual ~JobSelector() {} /// Indicate whether a job should be selected or not /** * This method should be extended and should indicate whether the passed job * should be selected or not by returning a boolean. * @param job A Job object which should either be selected or rejected. * @return true if passed job should be selected otherwise false is * returned. **/ virtual bool Select(const Job& job) const = 0; }; /// JobSupervisor class /** * The JobSupervisor class is tool for loading JobControllerPlugin plugins * for managing Grid jobs. * * \ingroup compute * \headerfile JobSupervisor.h arc/compute/JobSupervisor.h **/ class JobSupervisor : public EntityConsumer { public: /// Create a JobSupervisor /** * The list of Job objects passed to the constructor will be managed by this * JobSupervisor, through the JobControllerPlugin class. It is important that the * InterfaceName member of each Job object is set and names a interface * supported by one of the available JobControllerPlugin plugins. The * JobControllerPlugin plugin will be loaded using the JobControllerPluginLoader class, * loading a plugin of type "HED:JobControllerPlugin" which supports the particular * interface, and the a reference to the UserConfig object usercfg will * be passed to the plugin. Additionally a reference to the UserConfig * object usercfg will be stored, thus usercfg must exist throughout the * scope of the created object. If the InterfaceName member of a Job object is * unset, a VERBOSE log message will be reported and that Job object will * be ignored. If the JobControllerPlugin plugin for a given interface cannot be * loaded, a WARNING log message will be reported and any Job object requesting * that interface will be ignored. If loading of a specific plugin failed, * that plugin will not be tried loaded for subsequent Job objects * requiring that plugin. * Job objects will be added to the corresponding JobControllerPlugin plugin, if * loaded successfully. * * @param usercfg UserConfig object to pass to JobControllerPlugin plugins and to * use in member methods. * @param jobs List of Job objects which will be managed by the created * object. **/ JobSupervisor(const UserConfig& usercfg, const std::list& jobs = std::list()); ~JobSupervisor() {} /// Add job /** * Add Job object to this JobSupervisor for job management. The Job * object will be passed to the corresponding specialized JobControllerPlugin. * * @param job Job object to add for job management * @return true is returned if the passed Job object was added to the * underlying JobControllerPlugin, otherwise false is returned and a log * message emitted with the reason. **/ bool AddJob(const Job& job); void addEntity(const Job& job) { AddJob(job); } /// Update job information /** * When invoking this method the job information for the jobs managed by * this JobSupervisor will be updated. Internally, for each loaded * JobControllerPlugin the JobControllerPlugin::UpdateJobs method will be * called, which will be responsible for updating job information. **/ void Update(); /// Retrieve job output files /** * This method retrieves output files of jobs managed by this JobSupervisor. * * For each of the selected jobs, the job files will be downloaded to a * directory named either as the last part of the job ID or the job name, * which is determined by the 'usejobname' argument. The download * directories will be located in the directory specified by the * 'downloaddirprefix' argument, as either a relative or absolute path. If * the 'force' argument is set to 'true', and a download directory for a * given job already exist it will be overwritten, otherwise files for * that job will not be downloaded. This method calls the * JobControllerPlugin::GetJob method in order to download jobs, and if a * job is successfully retrieved and a corresponding directory exist on * disk, the path to the directory will be appended to the * 'downloaddirectories' list. If all jobs are successfully retrieved this * method returns true, otherwise false. * * @param downloaddirprefix specifies the path to in which job download * directories will be located. * @param usejobname specifies whether to use the job name or job ID as * directory name to store job output files in. * @param force indicates whether existing job directories should be * overwritten or not. * @param downloaddirectories filled with a list of directories to which * jobs were downloaded. * @see JobControllerPlugin::RetrieveJob. * @return true if all jobs are successfully retrieved, otherwise false. * \since Changed in 4.1.0. The path to download directory is only appended * to the 'downloaddirectories' list if the directory exist. **/ bool Retrieve(const std::string& downloaddirprefix, bool usejobname, bool force, std::list& downloaddirectories); /// Renew job credentials /** * This method will renew credentials of jobs managed by this JobSupervisor. * * Before identifying jobs for which to renew credentials, the * JobControllerPlugin::UpdateJobs method is called for each loaded * JobControllerPlugin in order to retrieve the most up to date job * information. * * Since jobs in the JobState::DELETED, JobState::FINISHED or * JobState::KILLED states is in a terminal state credentials for those * jobs will not be renewed. Also jobs in the JobState::UNDEFINED state * will not get their credentials renewed, since job information is not * available. The JobState::FAILED state is also a terminal state, but * since jobs in this state can be restarted, credentials for such jobs * can be renewed. If the status-filter is non-empty, a renewal of * credentials will be done for jobs with a general or specific state * (see JobState) identical to any of the entries in the status-filter, * excluding the already filtered states as mentioned above. * * For each job for which to renew credentials, the specialized * JobControllerPlugin::RenewJob method is called and is responsible for * renewing the credentials for the given job. If the method fails to * renew any job credentials, this method will return false (otherwise * true), and the job ID (IDFromEndpoint) of such jobs is appended to the * notrenewed list. The job ID of successfully renewed jobs will be appended * to the passed renewed list. * * @return false if any call to JobControllerPlugin::RenewJob fails, true * otherwise. * @see JobControllerPlugin::RenewJob. **/ bool Renew(); /// Resume jobs by status /** * This method resumes jobs managed by this JobSupervisor. * * Before identifying jobs to resume, the * JobControllerPlugin::UpdateJobs method is called for each loaded * JobControllerPlugin in order to retrieve the most up to date job * information. * * Since jobs in the JobState::DELETED, JobState::FINISHED or * JobState::KILLED states is in a terminal state credentials for those * jobs will not be renewed. Also jobs in the JobState::UNDEFINED state * will not be resumed, since job information is not available. The * JobState::FAILED state is also a terminal state, but jobs in this * state are allowed to be restarted. If the status-filter is non-empty, * only jobs with a general or specific state (see JobState) identical * to any of the entries in the status-filter will be resumed, excluding * the already filtered states as mentioned above. * * For each job to resume, the specialized JobControllerPlugin::ResumeJob * method is called and is responsible for resuming the particular job. * If the method fails to resume a job, this method will return false, * otherwise true is returned. The job ID of successfully resumed jobs * will be appended to the passed resumedJobs list. * * @return false if any call to JobControllerPlugin::ResumeJob fails, true * otherwise. * @see JobControllerPlugin::ResumeJob. **/ bool Resume(); /// Resubmit jobs /** * Jobs managed by this JobSupervisor will be resubmitted when invoking this * method, that is the job description of a job will be tried obtained, and * if successful a new job will be submitted. * * Before identifying jobs to be resubmitted, the * JobControllerPlugin::UpdateJobs method is called for each loaded * JobControllerPlugin in order to retrieve the most up to date job information. * If an empty status-filter is specified, all jobs managed by this * JobSupervisor will be considered for resubmission, except jobs in the * undefined state (see JobState). If the status-filter is not empty, then * only jobs with a general or specific state (see JobState) identical to * any of the entries in the status-filter will be considered, except jobs * in the undefined state. Jobs for which a job description cannot be * obtained and successfully parsed will not be considered and an ERROR log * message is reported, and the IDFromEndpoint URL is appended to the * notresubmitted list. Job descriptions will be tried obtained either * from Job object itself, or fetching them remotely. Furthermore if a Job * object has the LocalInputFiles object set, then the checksum of each of * the local input files specified in that object (key) will be calculated * and verified to match the checksum LocalInputFiles object (value). If * checksums are not matching the job will be filtered, and an ERROR log * message is reported and the IDFromEndpoint URL is appended to the * notresubmitted list. If no job have been identified for resubmission, * false will be returned if ERRORs were reported, otherwise true is * returned. * * The destination for jobs is partly determined by the destination * parameter. If a value of 1 is specified a job will only be targeted to * the execution service (ES) on which it reside. A value of 2 indicates * that a job should not be targeted to the ES it currently reside. * Specifying any other value will target any ES. The ESs which can be * targeted are those specified in the UserConfig object of this class, as * selected services. Before initiating any job submission, resource * discovery and broker loading is carried out using the TargetGenerator and * Broker classes, initialised by the UserConfig object of this class. If * Broker loading fails, or no ExecutionTargets are found, an ERROR log * message is reported and all IDFromEndpoint URLs for job considered for * resubmission will be appended to the notresubmitted list and then false * will be returned. * * When the above checks have been carried out successfully, then the * Broker::Submit method will be invoked for each considered for * resubmission. If it fails the IDFromEndpoint URL for the job is appended * to the notresubmitted list, and an ERROR is reported. If submission * succeeds the new job represented by a Job object will be appended to the * resubmittedJobs list - it will not be added to this JobSupervisor. The * method returns false if ERRORs were reported otherwise true is returned. * * @param destination specifies how target destination should be determined * (1 = same target, 2 = not same, any other = any target). * @param services possible destinations for the resubmission * @param resubmittedJobs list of Job objects which resubmitted jobs will be * appended to. * @param rejectedURLs list of services which should be rejected * @return false if any error is encountered, otherwise true. **/ bool Resubmit(int destination, const std::list& services, std::list& resubmittedJobs, const std::list& rejectedURLs = std::list()); /// Migrate jobs /** * Jobs managed by this JobSupervisor will be migrated when invoking this * method, that is the job description of a job will be tried obtained, and * if successful a job migration request will be sent, based on that job * description. * * Before identifying jobs to be migrated, the * JobControllerPlugin::UpdateJobs method is called for each loaded * JobControllerPlugin in order to retrieve the most up to date job information. * Only jobs for which the State member of the Job object has the value * JobState::QUEUEING, will be considered for migration. Furthermore the job * description must be obtained (either locally or remote) and successfully * parsed in order for a job to be migrated. If the job description cannot * be obtained or parsed an ERROR log message is reported, and the * IDFromEndpoint URL of the Job object is appended to the notmigrated list. * If no jobs have been identified for migration, false will be returned in * case ERRORs were reported, otherwise true is returned. * * The execution services which can be targeted for migration are those * specified in the UserConfig object of this class, as selected services. * Before initiating any job migration request, resource discovery and * broker* loading is carried out using the TargetGenerator and Broker * classes, initialised by the UserConfig object of this class. If Broker * loading fails, or no ExecutionTargets are found, an ERROR log message is * reported and all IDFromEndpoint URLs for job considered for migration * will be appended to the notmigrated list and then false will be returned. * * When the above checks have been carried out successfully, the following * is done for each job considered for migration. The ActivityOldID member * of the Identification member in the job description will be set to that * of the Job object, and the IDFromEndpoint URL will be appended to * ActivityOldID member of the job description. After that the Broker object * will be used to find a suitable ExecutionTarget object, and if found a * migrate request will tried sent using the ExecutionTarget::Migrate * method, passing the UserConfig object of this class. The passed * forcemigration boolean indicates whether the migration request at the * service side should ignore failures in cancelling the existing queuing * job. If the request succeeds, the corresponding new Job object is * appended to the migratedJobs list. If no suitable ExecutionTarget objects * are found an ERROR log message is reported and the IDFromEndpoint URL of * the Job object is appended to the notmigrated list. When all jobs have * been processed, false is returned if any ERRORs were reported, otherwise * true. * * @param forcemigration indicates whether migration should succeed if * service fails to cancel the existing queuing job. * @param services possible destinations for the migration * @param migratedJobs list of Job objects which migrated jobs will be * appended to. * @param rejectedURLs list of services which should be rejected * @return false if any error is encountered, otherwise true. **/ bool Migrate(bool forcemigration, const std::list& services, std::list& migratedJobs, const std::list& rejectedURLs = std::list()); /// Cancel jobs /** * This method cancels jobs managed by this JobSupervisor. * * Before identifying jobs to cancel, the JobControllerPlugin::UpdateJobs * method is called for each loaded JobControllerPlugin in order to retrieve * the most up to date job information. * * Since jobs in the JobState::DELETED, JobState::FINISHED, * JobState::KILLED or JobState::FAILED states is already in a terminal * state, a cancel request will not be send for those. Also no * request will be send for jobs in the JobState::UNDEFINED state, since job * information is not available. If the status-filter is non-empty, a * cancel request will only be send to jobs with a general or specific state * (see JobState) identical to any of the entries in the status-filter, * excluding the states mentioned above. * * For each job to be cancelled, the specialized JobControllerPlugin::CancelJob * method is called and is responsible for cancelling the given job. If the * method fails to cancel a job, this method will return false (otherwise * true), and the job ID (IDFromEndpoint) of such jobs is appended to the * notcancelled list. The job ID of successfully cancelled jobs will be * appended to the passed cancelled list. * * @return false if any call to JobControllerPlugin::CancelJob failed, true * otherwise. * @see JobControllerPlugin::CancelJob. **/ bool Cancel(); /// Clean jobs /** * This method removes from services jobs managed by this JobSupervisor. * Before cleaning jobs, the JobController::GetInformation method is called in * order to update job information, and that jobs are selected by job status * instead of by job IDs. The status list argument should contain states * for which cleaning of job in any of those states should be carried out. * The states are compared using both the JobState::operator() and * JobState::GetGeneralState() methods. If the status list is empty, all * jobs will be selected for cleaning. * * @return false if calls to JobControllerPlugin::CleanJob fails, true otherwise. **/ bool Clean(); const std::list& GetAllJobs() const { return jobs; } std::list GetSelectedJobs() const; void SelectValid(); void SelectByStatus(const std::list& status); void SelectByID(const std::list& ids); /// Select jobs based on custom selector /** * Used to do more advanced job selections. NOTE: operations will only be * done on selected jobs. * \param js A JobSelector object which will be used for selecting jobs. * \see Arc::JobSelector * \since Added in 5.1.0. **/ void Select(const JobSelector& js); void ClearSelection(); const std::list& GetIDsProcessed() const { return processed; } const std::list& GetIDsNotProcessed() const { return notprocessed; } private: const UserConfig& usercfg; std::list jobs; // Selected and non-selected jobs. typedef std::map, std::list > > JobSelectionMap; JobSelectionMap jcJobMap; std::map loadedJCs; std::list processed, notprocessed; JobControllerPluginLoader loader; static Logger logger; }; } //namespace ARC #endif // __ARC_JOBSUPERVISOR_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobInformationStorageSQLite.h0000644000000000000000000000012413065017510026627 xustar000000000000000027 mtime=1490296648.482805 27 atime=1513200574.755704 30 ctime=1513200659.785744865 nordugrid-arc-5.4.2/src/hed/libs/compute/JobInformationStorageSQLite.h0000644000175000002070000000364513065017510026704 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBINFORMATIONSTORAGESQLITE_H__ #define __ARC_JOBINFORMATIONSTORAGESQLITE_H__ #include #include "JobInformationStorage.h" namespace Arc { class JobInformationStorageSQLite : public JobInformationStorage { public: JobInformationStorageSQLite(const std::string& name, unsigned nTries = 10, unsigned tryInterval = 500000); virtual ~JobInformationStorageSQLite() {} static JobInformationStorage* Instance(const std::string& name) { return new JobInformationStorageSQLite(name); } bool ReadAll(std::list& jobs, const std::list& rejectEndpoints = std::list()); bool Read(std::list& jobs, std::list& jobIdentifiers, const std::list& endpoints = std::list(), const std::list& rejectEndpoints = std::list()); bool Write(const std::list& jobs, const std::set& prunedServices, std::list& newJobs); bool Clean(); bool Remove(const std::list& jobids); private: static void logErrorMessage(int err); static Logger logger; class JobDB { public: JobDB(const std::string& name, bool create = false); ~JobDB(); sqlite3* handle() { return jobDB; } private: void tearDown(); void handleError(const char* errpfx, int err); sqlite3* jobDB; }; class SQLiteException { public: SQLiteException(const std::string& msg, int ret, bool writeLogMessage = true) throw(); ~SQLiteException() throw() {} const std::string& getMessage() const throw() { return message; } int getReturnValue() const throw() { return returnvalue; } private: std::string message; int returnvalue; }; }; } // namespace Arc #endif // __ARC_JOBINFORMATIONSTORAGESQLITE_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Submitter.h0000644000000000000000000000012412141732267023265 xustar000000000000000027 mtime=1367848119.508434 27 atime=1513200574.746704 30 ctime=1513200659.761744572 nordugrid-arc-5.4.2/src/hed/libs/compute/Submitter.h0000644000175000002070000007215012141732267023337 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMITTER_H__ #define __ARC_SUBMITTER_H__ #include #include #include #include #include #include #include #include namespace Arc { /** * \defgroup compute ARC Compute Library (libarccompute) * * libarccompute is a library for discovering, quering, matching and ranking, * submitting jobs to and managing jobs on Grid resources, as well as parsing * and assembling job descriptions. It features a uniform high-level interface * to a wide range of Service Registries, Information Systems and Computing * Services. With this interface, registries can be queried for service * endpoints, information systems can be queried for detailed resource and job * information, and jobs can be submitted to and managed in a Grid * environment. The library doesn't offer specific interfaces to different * services, instead it tries to provide a uniform interface to different kind * of services. * * An introduction on how to use the library to query services for information * is given in the description of the EntityRetriever class. How to use the * library for submitting jobs is described at the Submitter class reference * page. How to manage jobs with the library is described at the JobSupervisor * class reference page. * * The library uses ARC's dynamic plugin mechanism to load plugins for * specific services and features only when required at runtime. These plugins * for the libarccompute library are called ARC Compute Components (ACCs). * Each of the classes listed below will transparently load any required ACC * plugins at runtime when needed. If preferred ACC plugins can also be used * and loaded manually, which is described in more detail \ref accplugins * "here". * * Support for a custom service (info-system, registry or compute), a ranking * algorithm and/or a job description parsing/assembling algorithm is exactly * what is defined as a ACC, and it can easily be added to be accessible to * the libarccompute library. More details about creating such a plugin can be * found \ref accplugins "here". * * With the default NorduGrid ARC plugins installed the librarccompute library * supports the following services and specifications: * * Computing Services: * * EMI ES * * BES (+ ARC BES extension) * * CREAM * * GridFTPJob interface (requires the nordugrid-arc-plugins-globus package) * * Registry and Index Services: * * EMIR * * EGIIS * * Top BDII * * Local Information Schemes: * * %GLUE2 (through LDAP and EMI ES) * * NorduGrid schema (through LDAP) * * GLUE1 (through LDAP) * * Matchmaking and Ranking Algorithms: * * Benchmark * * Fastest queue * * ACIX * * Random * * Python interface for custom broker * * %Job description languages: * * EMI ADL * * xRSL * * JDL * * JSDL (+ Posix and HPC-P extensions) * * \page group__compute ARC Compute Library (libarccompute) */ /** * \ingroup compute * \headerfile Submitter.h arc/compute/Submitter.h */ class EndpointSubmissionStatus { public: /** The possible states: */ enum EndpointSubmissionStatusType { UNKNOWN, NOPLUGIN, SUCCESSFUL }; /** String representation of the states in the enum #EndpointSubmissionStatusType */ static std::string str(EndpointSubmissionStatusType status); /** A new EndpointSubmissionStatus is created with UNKNOWN status and with an empty description by default */ EndpointSubmissionStatus(EndpointSubmissionStatusType status = UNKNOWN, const std::string& description = "") : status(status), description(description) {}; /** This EndpointSubmissionStatus object equals to an enum #EndpointSubmissionStatusType if it contains the same state */ bool operator==(EndpointSubmissionStatusType s) const { return status == s; }; /** This EndpointSubmissionStatus object equals to another EndpointQueryingStatus object, if their state equals. The description doesn't matter. */ bool operator==(const EndpointSubmissionStatus& s) const { return status == s.status; }; /** Inequality. \see operator==(EndpointSubmissionStatus) */ bool operator!=(EndpointSubmissionStatusType s) const { return status != s; }; /** Inequality. \see operator==(const EndpointSubmissionStatus&) */ bool operator!=(const EndpointSubmissionStatus& s) const { return status != s.status; }; /** \return true if the status is not successful */ bool operator!() const { return status != SUCCESSFUL; }; /** \return true if the status is successful */ operator bool() const { return status == SUCCESSFUL; }; /** Setting the EndpointSubmissionStatus object's state \param[in] s the new enum #EndpointSubmissionStatusType status */ EndpointSubmissionStatus& operator=(EndpointSubmissionStatusType s) { status = s; return *this; }; /** Copying the EndpointSubmissionStatus object into this one. \param[in] s the EndpointSubmissionStatus object whose status and description will be copied into this object */ EndpointSubmissionStatus& operator=(const EndpointSubmissionStatus& s) { status = s.status; description = s.description; return *this; }; /** Return the enum #EndpointSubmissionStatusType contained within this EndpointSubmissionStatus object */ EndpointSubmissionStatusType getStatus() const { return status; }; /** Return the description string contained within this EndpointSubmissionStatus object */ const std::string& getDescription() const { return description; }; /** String representation of the EndpointSubmissionStatus object, which is currently simply the string representation of the enum #EndpointSubmissionStatusType */ std::string str() const { return str(status); }; private: EndpointSubmissionStatusType status; std::string description; }; class SubmissionStatus; /// Class for submitting jobs. /** * As the name indicates this class is used for submitting jobs. It has a * number of different submit methods which can be used directly for different * job submission purposes. The class it self can be considered as a frontend * to the SubmitterPlugin class, an abstract class which is extended by * specialised plugins providing the actual coupling to a particular type of * computing service. As a frontend, this class also takes care of loading * the specialised plugins and choosing the right plugin for a given computing * service, however that can also be done manually with the * SubmitterPluginLoader class. In order to use the Submitter class a * reference to a UserConfig object must be provided, it should exist * throughout the lifetime of the created Submitter object, and the UserConfig * object should contain configuration details such as the path to user * credentials. * * Generally there are two types of submit methods. One which doesn't * accept a reference to a Job or list of Job objects, and one which does. * This is because the Submitter class is able to pass submitted Job objects * to consumer objects. Registering a consumer object is done using the * \ref Submitter::addConsumer "addConsumer" method passing a reference to an * EntityConsumer object. An * example of such a consumer is the JobSupervisor class. Multiple consumers * can be registered for the same Submitter object. Every submit method will * then pass submitted Job objects to the registered consumer objects. A * registered consumer can be removed using the * \ref Submitter::removeConsumer "removeConsumer" method. * * For submitting a Grid job one should use one of the * \ref Submitter::BrokeredSubmit "BrokeredSubmit" methods. They accept a list * of job descriptions and a list of information system endpoints for which * computing services are discovered and matched to the job descriptions. * Jobs are then submitted to the matching services in the order ranked by the * \ref Broker "algorithm" specified in the UserConfig object. * * Another way of submitting a job is by using the * \ref Submitter::Submit "Submit" methods. These methods accepts submission * endpoints or ExecutionTarget objects. Using these methods will not do * any client side checks whether the computing service resources pointed to * by the submission endpoint (or ExecutionTarget) really matches the * specified job description(s). * * Common for both ways of submitting jobs is that they both return a * SubmissionStatus object indicating the outcome of the submission attemp(s). * If the returned status object indicates failures, further examination can * be carried out by using the * \ref Submitter::GetDescriptionsNotSubmitted "GetDescriptionsNotSubmitted", * \ref Submitter::GetEndpointQueryingStatuses "GetEndpointQueryingStatuses" * and/or \ref Submitter::GetEndpointSubmissionStatuses "GetEndpointSubmissionStatuses" * methods. Note that on each invocation of any of the submit methods the * state from a previous submission attemp will be cleared, thus the just * mentioned methods should be used just after an attempted submission fails. * * An example of submitting a Grid job using C++ is shown below: * \include basic_job_submission.cpp * This code can be compiled with * \code * g++ -o submit -I/usr/include/libxml2 `pkg-config --cflags glibmm-2.4` -l arccompute submit.cpp * \endcode * Same example using Python shown below: * \include basic_job_submission.py * * And same example using Java: * \include BasicJobSubmission.java * * \since Added in 2.0.0 * \ingroup compute * \headerfile Submitter.h arc/compute/Submitter.h */ class Submitter { public: /// Create a Submitter object /** * Stores a reference to the passed UserConfig object which will be used * for obtaining among others location of user credentials. * \note The UserConfig object must exist throughout the life time of the * created Submitter object. */ Submitter(const UserConfig& uc) : uc(uc) {} ~Submitter() {} // === Using the consumer concept as in the EntityRetriever === /// Add a Job consumer /** * Add a consumer object which will be called every time a job is submitted. * * Example use of consumer concept: * \param[in] addConsumer_consumer consumer object receiving newly submitted jobs. * \see removeConsumer */ void addConsumer(EntityConsumer& addConsumer_consumer /* The name 'addConsumer_consumer' is important for Swig when matching methods */) { consumers.push_back(&addConsumer_consumer); } /// Remove a previous added consumer object. /** * \param[in] removeConsumer_consumer consumer object which should be removed. * \see addConsumer */ void removeConsumer(EntityConsumer& removeConsumer_consumer /* The name 'removeConsumer_consumer' is important for Swig when matching methods */); // === // === No brokering === /// Submit job to endpoint /** * Submit a job described by the passed JobDescription object to the * specified submission endpoint of a computing service. The method will * load the specialised \ref SubmitterPlugin "submitter plugin" which * corresponds to the specified \ref Endpoint::InterfaceName "interface name". * If no such plugin is found submission is unsuccessful. If however the * the interface name is unspecified (empty), then all available submitter * plugins will be tried. If submission is successful, the submitted job * will be added to the registered consumer object. If unsuccessful, more * details can be obtained from the returned SubmissionStatus object, or by * using the \ref GetDescriptionsNotSubmitted, * \ref GetEndpointQueryingStatuses and \ref GetEndpointSubmissionStatuses. * * \param[in] endpoint the endpoint to which job should be submitted. * \param[in] desc the JobDescription object describing the job to be * submitted. * \return A SubmissionStatus object is returned indicating the status of * submission. * \see addConsumer * \see GetDescriptionsNotSubmitted * \see GetEndpointQueryingStatuses * \see GetEndpointSubmissionStatuses * \since Added in 3.0.0 */ SubmissionStatus Submit(const Endpoint& endpoint, const JobDescription& desc) { return Submit(endpoint, std::list(1, desc)); } /// Submit job to endpoint /** * Identical to Submit(const Endpoint&, const JobDescription&), with the * addition that the Job object passed as reference will also be filled with * job information if submission was successful. * * \param[out] job a reference to a Job object which will be filled with * job details if submission was successful. * \see Submit(const Endpoint&, const JobDescription&) for detailed * description. * \since Added in 3.0.0 */ SubmissionStatus Submit(const Endpoint& endpoint, const JobDescription& desc, Job& job); /// Submit jobs to endpoint /** * Identical to Submit(const Endpoint&, const JobDescription&), except that * this method submits multiple jobs to same endpoint. Submitted jobs will * be added to the registered consumer. * * \see Submit(const Endpoint&, const JobDescription&) * \since Added in 3.0.0 */ SubmissionStatus Submit(const Endpoint& endpoint, const std::list& descs); /// Submit jobs to endpoint /** * Identical to Submit(const Endpoint&, const JobDescription&), with the * addition that the list of Job objects passed reference will filled with * the submitted jobs, and that multiple jobs are submitted to same * endpoint. * * \see Submit(const Endpoint&, const JobDescription&) * \since Added in 3.0.0 */ SubmissionStatus Submit(const Endpoint& endpoint, const std::list& descs, std::list& jobs); /// Submit jobs to any endpoints /** * Submit multiple jobs to a list of submission endpoints to computing * services. For each JobDescription object submission is tried against the * list of submission endpoints in order. If submission to a endpoint fails * the next in the list is tried - no ranking of endpoints will be done. * Also note that a job is only submitted once, and not to multiple * computing services. Submitted Job objects is passed to the registered * consumer objects. * * \return A SubmissionStatus object is returned which indicates the * outcome of the submission. * \see addConsumer * \see GetDescriptionsNotSubmitted * \see GetEndpointQueryingStatuses * \see GetEndpointSubmissionStatuses * \since Added in 3.0.0 */ SubmissionStatus Submit(const std::list& endpoint, const std::list& descs); /// Submit jobs to any endpoints /** * Identical to Submit(const Endpoint&, const std::list&, std::list&) * with the addition that submitted jobs are also added to the passed list * of Job objects. * * \see Submit(const Endpoint&, const std::list&, std::list&) * \since Added in 3.0.0 */ SubmissionStatus Submit(const std::list& endpoint, const std::list& descs, std::list& jobs); // ==== Submission to single configuration (adaption of job description) ==== // ===== Single job ===== /// Submit job to ExecutionTarget (computing service) /** * Submit a job to a computing service, represented by a ExecutionTarget * object. This is useful when resource discovery is carried out manually, * not using the \ref BrokeredSubmit methods, but using the * \ref ComputingServiceRetriever class. The * \ref SubmitterPlugin "submitter plugin" corresponding to the * \ref ComputingEndpointAttributes::InterfaceName "interface name" will be * loaded. If that plugin cannot be loaded, submission will be unsuccessful. * When loaded the ExecutionTarget and JobDescription object will be passed * to the \ref SubmitterPlugin::Submit(const std::list&, const ExecutionTarget&, EntityConsumer&, std::list&) "SubmitterPlugin::Submit" * method of the loaded plugin and the status of that method is returned. * * \param[in] et the ExecutionTarget to which job should be submitted. * \param[in] desc the JobDescription object describing the job to be * submitted. * \return A SubmissionStatus object is returned indicating the status of * submission. * \see addConsumer * \see GetDescriptionsNotSubmitted * \see GetEndpointQueryingStatuses * \see GetEndpointSubmissionStatuses */ SubmissionStatus Submit(const ExecutionTarget& et, const JobDescription& desc) { return Submit(et, std::list(1, desc)); } /// Submit job to ExecutionTarget (computing service) /** * Identical to Submit(const ExecutionTarget&, const JobDescription&), with * the addition that the Job object passed as reference will also be filled * with job information if submission was successful. * * \param[in] et the ExecutionTarget to which job should be submitted. * \param[in] desc the JobDescription object describing the job to be * submitted. * \return A SubmissionStatus object is returned indicating the status of * submission. * \see Submit(const ExecutionTarget&, const JobDescription&) */ SubmissionStatus Submit(const ExecutionTarget& et, const JobDescription& desc, Job& job); // ===== // ===== Multiple jobs ===== /// Submit jobs to ExecutionTarget (computing service) /** * Identical to Submit(const ExecutionTarget&, const JobDescription&), * except that this method submits multiple jobs to the same computing * service. Submitted jobs will be added to the registered consumer. * * \param[in] et the ExecutionTarget to which job should be submitted. * \param[in] desc the JobDescription object describing the job to be * submitted. * \return A SubmissionStatus object is returned indicating the status of * submission. * \see Submit(const ExecutionTarget&, const JobDescription&) */ SubmissionStatus Submit(const ExecutionTarget& et, const std::list& descs); /// Submit jobs to ExecutionTarget (computing service) /** * Identical to Submit(const ExecutionTarget&, const JobDescription&) * with the addition that this method submits multiple jobs to the same * computing service, and that submitted jobs are also added to the passed * list of Job objects. * * \param[in] et the ExecutionTarget to which job should be submitted. * \param[in] desc the JobDescription object describing the job to be * submitted. * \return A SubmissionStatus object is returned indicating the status of * submission. * \see Submit(const ExecutionTarget&, const JobDescription&) */ SubmissionStatus Submit(const ExecutionTarget& et, const std::list& descs, std::list& jobs); // ===== // ==== // === // === Brokering with service discovery (multiple endpoints) === // ==== Using provided JobDescription objects for brokering ==== /// Submit jobs to matching and ranked computing services /** * The passed job descriptions will be submitted to any of the matching * computing services in ranked order, which have been discovered using the * provided information endpoints. * * First all previously set statuses will be cleared by invoking the * \ref Submitter::ClearAll "ClearAll" method. Then resource discovery is * invoked using the ComputingServiceRetriever class to query the provided * information endpoints, then for each JobDescription object the discovered * computing services is matched against the job description, then * matching computing services is ranked according to the broker algorithm * (specified in the UserConfig object of the Submitter). Then if any * requested submission interfaces has been specified in the optional * parameter 'requestedSubmissionInterfaces', then computing services which * doesn't have a matching submission interface will be ignored. Lastly the * job description is tried submitted to the computing services in the * ranked order, and upon the first successful submission a corresponding * Job object is propagated to the registered consumers, and then the next * job description is processed. If a job description cannot be submitted a * pointer to it will be added to an internal list, which afterwards can be * obtained using the \ref GetDescriptionsNotSubmitted method. If any * problems was encountered during submission, more details can be obtained * from the returned SubmissionStatus object, or by using the * \ref GetDescriptionsNotSubmitted, \ref GetEndpointQueryingStatuses * and \ref GetEndpointSubmissionStatuses. * * \param[in] endpoints the information endpoints which will be used to * initiate resource discovery. * \param[in] descs the JobDescription objects describing the jobs to be * submitted. * \param[in] requestedSubmissionInterfaces an optional list of submission * interfaces to use for submission. * \return A SubmissionStatus object is returned indicating the status of * the submissions. * \see addConsumer * \see GetDescriptionsNotSubmitted * \see GetEndpointQueryingStatuses * \see GetEndpointSubmissionStatuses * \since Added in 3.0.0 */ SubmissionStatus BrokeredSubmit(const std::list& endpoints, const std::list& descs, const std::list& requestedSubmissionInterfaces = std::list()); /// Submit jobs to matching and ranked computing services /** * Identical to \ref BrokeredSubmit(const std::list& endpoints, const std::list& descs, const std::list& requestedSubmissionInterfaces) "BrokeredSubmit" * except that submitted jobs are added to the referenced list of Job * objects. * * \param[in] endpoints the information endpoints which will be used to * initiate resource discovery. * \param[in] descs the JobDescription objects describing the jobs to be * submitted. * \param[in] jobs reference to a list of Job obects for which to add * submitted jobs. * \param[in] requestedSubmissionInterfaces an optional list of submission * interfaces to use for submission. * \return A SubmissionStatus object is returned indicating the status of * the submissions. * \see addConsumer * \see GetDescriptionsNotSubmitted * \see GetEndpointQueryingStatuses * \see GetEndpointSubmissionStatuses * \since Added in 3.0.0 */ SubmissionStatus BrokeredSubmit(const std::list& endpoints, const std::list& descs, std::list& jobs, const std::list& requestedSubmissionInterfaces = std::list()); /// Submit jobs to matching and ranked computing services /** * Identical to \ref BrokeredSubmit(const std::list& endpoints, const std::list& descs, const std::list& requestedSubmissionInterfaces) "BrokeredSubmit" * except that the endpoints are not strings but Endpoint objects, which * can be used to provide detailed information about endpoints making * resource discovery more performant. * * \param[in] endpoints the information endpoints which will be used to * initiate resource discovery. * \param[in] descs the JobDescription objects describing the jobs to be * submitted. * \param[in] requestedSubmissionInterfaces an optional list of submission * interfaces to use for submission. * \return A SubmissionStatus object is returned indicating the status of * the submissions. * \see addConsumer * \see GetDescriptionsNotSubmitted * \see GetEndpointQueryingStatuses * \see GetEndpointSubmissionStatuses * \since Added in 3.0.0 */ SubmissionStatus BrokeredSubmit(const std::list& endpoints, const std::list& descs, const std::list& requestedSubmissionInterfaces = std::list()); /// Submit jobs to matching and ranked computing services /** * Identical to \ref BrokeredSubmit(const std::list& endpoints, const std::list& descs, const std::list& requestedSubmissionInterfaces) "BrokeredSubmit" * except that submitted jobs are added to the referenced list of Job * objects and that the endpoints are not strings but Endpoint objects, * which can be used to provide detailed information about endpoints making * resource discovery more performant. * * \param[in] endpoints the information endpoints which will be used to * initiate resource discovery. * \param[in] descs the JobDescription objects describing the jobs to be * submitted. * \param[in] jobs reference to a list of Job obects for which to add * submitted jobs. * \param[in] requestedSubmissionInterfaces an optional list of submission * interfaces to use for submission. * \return A SubmissionStatus object is returned indicating the status of * the submissions. * \see addConsumer * \see GetDescriptionsNotSubmitted * \see GetEndpointQueryingStatuses * \see GetEndpointSubmissionStatuses * \since Added in 3.0.0 */ SubmissionStatus BrokeredSubmit(const std::list& endpoints, const std::list& descs, std::list& jobs, const std::list& requestedSubmissionInterfaces = std::list()); // ==== // === // === Methods for handling errors === /// Get job descriptions not submitted /** * \return A reference to the list of the not submitted job descriptions is * returned. */ const std::list& GetDescriptionsNotSubmitted() const { return notsubmitted; } /// Clear list of not submitted job descriptions void ClearNotSubmittedDescriptions() { notsubmitted.clear(); } /// Get status map for queried endpoints /** * The returned map contains EndpointQueryingStatus objects of all the * information endpoints which were queried during resource discovery. * * \return A reference to the status map of queried endpoints. * \since Added in 3.0.0 */ const EndpointStatusMap& GetEndpointQueryingStatuses() const { return queryingStatusMap; } /// Clear endpoint status querying map. /** * \since Added in 3.0.0 */ void ClearEndpointQueryingStatuses() { queryingStatusMap.clear(); } /// Get submission status map /** * The returned map contains EndpointSubmissionStatus objects for all the * submission endpoints which were tried for job submission. * * \return A reference to the submission status map. * \since Added in 3.0.0 */ const std::map& GetEndpointSubmissionStatuses() const { return submissionStatusMap; } /// Clear submission status map /** * \since Added in 3.0.0 */ void ClearEndpointSubmissionStatuses() { submissionStatusMap.clear(); } /// Clear all status maps /** * Convenience method which calls \ref ClearEndpointQueryingStatuses and * \ref ClearEndpointSubmissionStatuses. * \since Added in 3.0.0 */ void ClearAllStatuses() { queryingStatusMap.clear(); submissionStatusMap.clear(); } /// Clear all /** * Convenience method which calls \ref ClearNotSubmittedDescriptions and * \ref ClearEndpointQueryingStatuses and * \ref ClearEndpointSubmissionStatuses. * * \since Added in 3.0.0 */ void ClearAll() { notsubmitted.clear(); queryingStatusMap.clear(); submissionStatusMap.clear(); } // === private: class ConsumerWrapper : public EntityConsumer { public: ConsumerWrapper(Submitter& s) : s(s) {} void addEntity(const Job& j) { for (std::list*>::iterator it = s.consumers.begin(); it != s.consumers.end(); ++it) { (*it)->addEntity(j); } } private: Submitter& s; }; SubmissionStatus SubmitNoClear(const Endpoint& endpoint, const std::list& descs); const UserConfig& uc; EndpointStatusMap queryingStatusMap; std::map submissionStatusMap; std::list notsubmitted; std::list*> consumers; static SubmitterPluginLoader& getLoader(); static Logger logger; }; } #endif // __ARC_SUBMITTER_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Endpoint.cpp0000644000000000000000000000012412051006754023415 xustar000000000000000027 mtime=1352928748.264998 27 atime=1513200574.742704 30 ctime=1513200659.808745147 nordugrid-arc-5.4.2/src/hed/libs/compute/Endpoint.cpp0000644000175000002070000000702412051006754023465 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "Endpoint.h" namespace Arc { EndpointStatusMap::EndpointStatusMap() : std::map(&Endpoint::ServiceIDCompare) {} Endpoint::Endpoint(const ExecutionTarget& e, const std::string& rsi) : URLString(e.ComputingEndpoint->URLString), InterfaceName(e.ComputingEndpoint->InterfaceName), HealthState(e.ComputingEndpoint->HealthState), HealthStateInfo(e.ComputingEndpoint->HealthStateInfo), QualityLevel(e.ComputingEndpoint->QualityLevel), Capability(e.ComputingEndpoint->Capability), RequestedSubmissionInterfaceName(rsi) {} Endpoint::Endpoint(const ComputingEndpointAttributes& cea, const std::string& rsi) : URLString(cea.URLString), InterfaceName(cea.InterfaceName), HealthState(cea.HealthState), HealthStateInfo(cea.HealthStateInfo), QualityLevel(cea.QualityLevel), Capability(cea.Capability), RequestedSubmissionInterfaceName(rsi) {} Endpoint& Endpoint::operator=(const ConfigEndpoint& e) { URLString = e.URLString; InterfaceName = e.InterfaceName; RequestedSubmissionInterfaceName = e.RequestedSubmissionInterfaceName; Capability.clear(); switch (e.type) { case ConfigEndpoint::REGISTRY: Capability.insert(GetStringForCapability(Endpoint::REGISTRY)); break; case ConfigEndpoint::COMPUTINGINFO: Capability.insert(GetStringForCapability(Endpoint::COMPUTINGINFO)); break; case ConfigEndpoint::ANY: break; } return *this; } bool Endpoint::HasCapability(Endpoint::CapabilityEnum cap) const { return HasCapability(GetStringForCapability(cap)); } bool Endpoint::HasCapability(const std::string& capability) const { return Capability.count(capability); } std::string Endpoint::getServiceName() const { if (URLString.find("://") == std::string::npos) return URLString; else { URL url(URLString); if (url.Host().empty()) return URLString; else return url.Host(); } } std::string Endpoint::str() const { std::string capabilities = ""; if (!Capability.empty()) { capabilities = ", capabilities:"; for (std::set::const_iterator it = Capability.begin(); it != Capability.end(); it++) { capabilities = capabilities + " " + *it; } } std::string interfaceNameToPrint = ""; if (!InterfaceName.empty()) { interfaceNameToPrint = InterfaceName; } return URLString + " (" + interfaceNameToPrint + capabilities + ")"; } bool Endpoint::operator<(const Endpoint& other) const { return str() < other.str(); } bool Endpoint::ServiceIDCompare(const Endpoint& a, const Endpoint& b) { if (a.ServiceID != b.ServiceID) { return a.ServiceID < b.ServiceID; } if (a.URLString != b.URLString) { return a.URLString < b.URLString; } return a.InterfaceName < b.InterfaceName; } std::pair Endpoint::getServiceEndpoints(const Endpoint& e, const EndpointStatusMap& m) { Endpoint _e(e); EndpointStatusMap::const_iterator first = m.lower_bound(_e); _e.ServiceID[_e.ServiceID.size()-1]++; EndpointStatusMap::const_iterator second = m.upper_bound(_e); return make_pair(first, second); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/TestACCControl.cpp0000644000000000000000000000012412072511126024421 xustar000000000000000027 mtime=1357550166.724559 27 atime=1513200574.698704 30 ctime=1513200659.804745098 nordugrid-arc-5.4.2/src/hed/libs/compute/TestACCControl.cpp0000644000175000002070000000400212072511126024462 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "TestACCControl.h" namespace Arc { bool BrokerPluginTestACCControl::match = false; bool BrokerPluginTestACCControl::less = false; bool JobDescriptionParserPluginTestACCControl::parseStatus = true; std::list JobDescriptionParserPluginTestACCControl::parsedJobDescriptions(1, JobDescription()); bool JobDescriptionParserPluginTestACCControl::unparseStatus = true; std::string JobDescriptionParserPluginTestACCControl::unparsedString = ""; bool JobControllerPluginTestACCControl::cleanStatus = true; bool JobControllerPluginTestACCControl::cancelStatus = true; bool JobControllerPluginTestACCControl::renewStatus = true; bool JobControllerPluginTestACCControl::resumeStatus = true; bool JobControllerPluginTestACCControl::getJobDescriptionStatus = true; std::string JobControllerPluginTestACCControl::getJobDescriptionString = ""; bool JobControllerPluginTestACCControl::resourceExist = true; URL JobControllerPluginTestACCControl::resourceURL = URL(); URL JobControllerPluginTestACCControl::createURL = URL(); SubmissionStatus SubmitterPluginTestACCControl::submitStatus; bool SubmitterPluginTestACCControl::migrateStatus = true; bool SubmitterPluginTestACCControl::modifyStatus = true; Job SubmitterPluginTestACCControl::submitJob = Job(); Job SubmitterPluginTestACCControl::migrateJob = Job(); float TargetInformationRetrieverPluginTESTControl::delay = 0; std::list TargetInformationRetrieverPluginTESTControl::targets; EndpointQueryingStatus TargetInformationRetrieverPluginTESTControl::status; std::list ServiceEndpointRetrieverPluginTESTControl::condition; std::list ServiceEndpointRetrieverPluginTESTControl::status; std::list< std::list > ServiceEndpointRetrieverPluginTESTControl::endpoints; float JobListRetrieverPluginTESTControl::delay = 0; EndpointQueryingStatus JobListRetrieverPluginTESTControl::status; std::list JobListRetrieverPluginTESTControl::jobs = std::list(); } nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobDescription.cpp0000644000000000000000000000012313024224727024555 xustar000000000000000026 mtime=1481714135.79494 27 atime=1513200574.676703 30 ctime=1513200659.798745024 nordugrid-arc-5.4.2/src/hed/libs/compute/JobDescription.cpp0000644000175000002070000007035013024224727024630 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "JobDescription.h" #define INTPRINT(OUT, X, Y) if ((X) > -1) \ OUT << IString(#Y ": %d", X) << std::endl; #define STRPRINT(OUT, X, Y) if (!(X).empty()) \ OUT << IString(#Y ": %s", X) << std::endl; namespace Arc { Logger JobDescription::logger(Logger::getRootLogger(), "JobDescription"); // Maybe this mutex could go to JobDescriptionParserPluginLoader. That would make // it transparent. On another hand JobDescriptionParserPluginLoader must not know // how it is used. Glib::Mutex JobDescription::jdpl_lock; // TODO: JobDescriptionParserPluginLoader need to be freed when not used any more. JobDescriptionParserPluginLoader *JobDescription::jdpl = NULL; JobDescription::JobDescription(const long int& ptraddr) { *this = *((JobDescription*)ptraddr); } JobDescription::JobDescription(const JobDescription& j, bool withAlternatives) { if (withAlternatives) { *this = j; } else { RemoveAlternatives(); Set(j); } } ApplicationType& ApplicationType::operator=(const ApplicationType& at) { Executable = at.Executable; Input = at.Input; Output = at.Output; Error = at.Error; Environment = at.Environment; PreExecutable = at.PreExecutable; PostExecutable = at.PostExecutable; LogDir = at.LogDir; RemoteLogging = at.RemoteLogging; Rerun = at.Rerun; ExpirationTime = at.ExpirationTime; ProcessingStartTime = at.ProcessingStartTime; Priority = at.Priority; Notification = at.Notification; CredentialService = at.CredentialService; at.AccessControl.New(AccessControl); DryRun = at.DryRun; return *this; } ResourcesType& ResourcesType::operator=(const ResourcesType& rt) { OperatingSystem = rt.OperatingSystem; Platform = rt.Platform; NetworkInfo = rt.NetworkInfo; IndividualPhysicalMemory = rt.IndividualPhysicalMemory; IndividualVirtualMemory = rt.IndividualVirtualMemory; DiskSpaceRequirement = rt.DiskSpaceRequirement; SessionLifeTime = rt.SessionLifeTime; SessionDirectoryAccess = rt.SessionDirectoryAccess; IndividualCPUTime = rt.IndividualCPUTime; TotalCPUTime = rt.TotalCPUTime; IndividualWallTime = rt.IndividualWallTime; NodeAccess = rt.NodeAccess; CEType = rt.CEType; SlotRequirement = rt.SlotRequirement; ParallelEnvironment = rt.ParallelEnvironment; Coprocessor = rt.Coprocessor; QueueName = rt.QueueName; RunTimeEnvironment = rt.RunTimeEnvironment; return *this; } void JobDescription::Set(const JobDescription& j) { Identification = j.Identification; Application = j.Application; Resources = j.Resources; DataStaging = j.DataStaging; OtherAttributes = j.OtherAttributes; sourceLanguage = j.sourceLanguage; } void JobDescription::RemoveAlternatives() { alternatives.clear(); current = alternatives.begin(); } JobDescription& JobDescription::operator=(const JobDescription& j) { RemoveAlternatives(); Set(j); if (!j.alternatives.empty()) { alternatives = j.alternatives; current = alternatives.begin(); for (std::list::const_iterator it = j.alternatives.begin(); it != j.current && it != j.alternatives.end(); it++) { current++; // Increase iterator so it points to same object as in j. } } return *this; } JobDescriptionResult JobDescription::SaveToStream(std::ostream& out, const std::string& format) const { if (format != "user" && format != "userlong") { std::string outjobdesc; if (!UnParse(outjobdesc, format)) { return false; } out << outjobdesc; return true; } STRPRINT(out, Application.Executable.Path, Executable); if (Application.DryRun) { out << istring(" --- DRY RUN --- ") << std::endl; } STRPRINT(out, Application.LogDir, Log Directory) STRPRINT(out, Identification.JobName, JobName) STRPRINT(out, Identification.Description, Description) if (format == "userlong") { if (!Identification.Annotation.empty()) { std::list::const_iterator iter = Identification.Annotation.begin(); for (; iter != Identification.Annotation.end(); iter++) out << IString(" Annotation: %s", *iter) << std::endl; } if (!Identification.ActivityOldID.empty()) { std::list::const_iterator iter = Identification.ActivityOldID.begin(); for (; iter != Identification.ActivityOldID.end(); iter++) out << IString(" Old activity ID: %s", *iter) << std::endl; } if (!Application.Executable.Argument.empty()) { std::list::const_iterator iter = Application.Executable.Argument.begin(); for (; iter != Application.Executable.Argument.end(); iter++) out << IString(" Argument: %s", *iter) << std::endl; } STRPRINT(out, Application.Input, Input) STRPRINT(out, Application.Output, Output) STRPRINT(out, Application.Error, Error) if (!Application.RemoteLogging.empty()) { std::list::const_iterator iter = Application.RemoteLogging.begin(); for (; iter != Application.RemoteLogging.end(); iter++) { if (iter->optional) { out << IString(" RemoteLogging (optional): %s (%s)", iter->Location.fullstr(), iter->ServiceType) << std::endl; } else { out << IString(" RemoteLogging: %s (%s)", iter->Location.fullstr(), iter->ServiceType) << std::endl; } } } if (!Application.Environment.empty()) { std::list< std::pair >::const_iterator iter = Application.Environment.begin(); for (; iter != Application.Environment.end(); iter++) { out << IString(" Environment.name: %s", iter->first) << std::endl; out << IString(" Environment: %s", iter->second) << std::endl; } } INTPRINT(out, Application.Rerun, Rerun) if (!Application.PreExecutable.empty()) { std::list::const_iterator itPreEx = Application.PreExecutable.begin(); for (; itPreEx != Application.PreExecutable.end(); ++itPreEx) { STRPRINT(out, itPreEx->Path, PreExecutable) if (!itPreEx->Argument.empty()) { std::list::const_iterator iter = itPreEx->Argument.begin(); for (; iter != itPreEx->Argument.end(); ++iter) out << IString(" PreExecutable.Argument: %s", *iter) << std::endl; } if (itPreEx->SuccessExitCode.first) { out << IString(" Exit code for successful execution: %d", itPreEx->SuccessExitCode.second) << std::endl; } else { out << IString(" No exit code for successful execution specified.") << std::endl; } } } if (!Application.PostExecutable.empty()) { std::list::const_iterator itPostEx = Application.PostExecutable.begin(); for (; itPostEx != Application.PostExecutable.end(); ++itPostEx) { STRPRINT(out, itPostEx->Path, PostExecutable) if (!itPostEx->Argument.empty()) { std::list::const_iterator iter = itPostEx->Argument.begin(); for (; iter != itPostEx->Argument.end(); ++iter) out << IString(" PostExecutable.Argument: %s", *iter) << std::endl; } if (itPostEx->SuccessExitCode.first) { out << IString(" Exit code for successful execution: %d", itPostEx->SuccessExitCode.second) << std::endl; } else { out << IString(" No exit code for successful execution specified.") << std::endl; } } } INTPRINT(out, Resources.SessionLifeTime.GetPeriod(), SessionLifeTime) if (bool(Application.AccessControl)) { std::string str; Application.AccessControl.GetXML(str, true); out << IString(" Access control: %s", str) << std::endl; } if (Application.ProcessingStartTime.GetTime() > 0) out << IString(" Processing start time: %s", Application.ProcessingStartTime.str()) << std::endl; if (Application.Notification.size() > 0) { out << IString(" Notify:") << std::endl; for (std::list::const_iterator it = Application.Notification.begin(); it != Application.Notification.end(); it++) { for (std::list::const_iterator it2 = it->States.begin(); it2 != it->States.end(); it2++) { out << " " << *it2; } out << ": " << it->Email << std::endl; } } if (!Application.CredentialService.empty()) { std::list::const_iterator iter = Application.CredentialService.begin(); for (; iter != Application.CredentialService.end(); iter++) out << IString(" Credential service: %s", iter->str()) << std::endl; } INTPRINT(out, Resources.TotalCPUTime.range.max, TotalCPUTime) INTPRINT(out, Resources.IndividualCPUTime.range.max, IndividualCPUTime) INTPRINT(out, Resources.IndividualWallTime.range.max, IndividualWallTime) STRPRINT(out, Resources.NetworkInfo, NetworkInfo) if (!Resources.OperatingSystem.empty()) { out << IString(" Operating system requirements:") << std::endl; std::list::const_iterator itOS = Resources.OperatingSystem.getSoftwareList().begin(); std::list::const_iterator itCO = Resources.OperatingSystem.getComparisonOperatorList().begin(); for (; itOS != Resources.OperatingSystem.getSoftwareList().end(); itOS++, itCO++) { if (*itCO != &Software::operator==) out << Software::toString(*itCO) << " "; out << *itOS << std::endl; } } STRPRINT(out, Resources.Platform, Platform) INTPRINT(out, Resources.IndividualPhysicalMemory.max, IndividualPhysicalMemory) INTPRINT(out, Resources.IndividualVirtualMemory.max, IndividualVirtualMemory) INTPRINT(out, Resources.DiskSpaceRequirement.DiskSpace.max, DiskSpace [MB]) INTPRINT(out, Resources.DiskSpaceRequirement.CacheDiskSpace, CacheDiskSpace [MB]) INTPRINT(out, Resources.DiskSpaceRequirement.SessionDiskSpace, SessionDiskSpace [MB]) STRPRINT(out, Resources.QueueName, QueueName) if (!Resources.CEType.empty()) { out << IString(" Computing endpoint requirements:") << std::endl; std::list::const_iterator itCE = Resources.CEType.getSoftwareList().begin(); std::list::const_iterator itCO = Resources.CEType.getComparisonOperatorList().begin(); for (; itCE != Resources.CEType.getSoftwareList().end(); itCE++, itCO++) { if (*itCO != &Software::operator==) out << Software::toString(*itCO) << " "; out << *itCE << std::endl; } } switch (Resources.NodeAccess) { case NAT_NONE: break; case NAT_INBOUND: out << IString(" Node access: inbound") << std::endl; break; case NAT_OUTBOUND: out << IString(" Node access: outbound") << std::endl; break; case NAT_INOUTBOUND: out << IString(" Node access: inbound and outbound") << std::endl; break; } INTPRINT(out, Resources.SlotRequirement.NumberOfSlots, NumberOfSlots) INTPRINT(out, Resources.SlotRequirement.SlotsPerHost, SlotsPerHost) switch (Resources.SlotRequirement.ExclusiveExecution) { case SlotRequirementType::EE_DEFAULT: break; case SlotRequirementType::EE_TRUE: out << IString(" Job requires exclusive execution") << std::endl; break; case SlotRequirementType::EE_FALSE: out << IString(" Job does not require exclusive execution") << std::endl; break; } if (!Resources.RunTimeEnvironment.empty()) { out << IString(" Run time environment requirements:") << std::endl; std::list::const_iterator itSW = Resources.RunTimeEnvironment.getSoftwareList().begin(); std::list::const_iterator itCO = Resources.RunTimeEnvironment.getComparisonOperatorList().begin(); for (; itSW != Resources.RunTimeEnvironment.getSoftwareList().end(); itSW++, itCO++) { if (*itCO != &Software::operator==) out << Software::toString(*itCO) << " "; out << *itSW << std::endl; } } if (!DataStaging.InputFiles.empty()) { std::list::const_iterator iter = DataStaging.InputFiles.begin(); for (; iter != DataStaging.InputFiles.end(); ++iter) { out << IString(" Inputfile element:") << std::endl; out << IString(" Name: %s", iter->Name) << std::endl; if (iter->IsExecutable) { out << IString(" Is executable: true") << std::endl; } std::list::const_iterator itSource = iter->Sources.begin(); for (; itSource != iter->Sources.end(); ++itSource) { out << IString(" Sources: %s", itSource->str()) << std::endl; if (!itSource->DelegationID.empty()) { out << IString(" Sources.DelegationID: %s", itSource->DelegationID) << std::endl; } for (std::multimap::const_iterator itOptions = itSource->Options().begin(); itOptions != itSource->Options().end(); ++itOptions) { out << IString(" Sources.Options: %s = %s", itOptions->first, itOptions->second) << std::endl; } } } } if (!DataStaging.OutputFiles.empty()) { std::list::const_iterator iter = DataStaging.OutputFiles.begin(); for (; iter != DataStaging.OutputFiles.end(); ++iter) { out << IString(" Outputfile element:") << std::endl; out << IString(" Name: %s", iter->Name) << std::endl; std::list::const_iterator itTarget = iter->Targets.begin(); for (; itTarget != iter->Targets.end(); ++itTarget) { out << IString(" Targets: %s", itTarget->str()) << std::endl; if (!itTarget->DelegationID.empty()) { out << IString(" Targets.DelegationID: %s", itTarget->DelegationID) << std::endl; } for (std::multimap::const_iterator itOptions = itTarget->Options().begin(); itOptions != itTarget->Options().end(); ++itOptions) { out << IString(" Targets.Options: %s = %s", itOptions->first, itOptions->second) << std::endl; } } } } if (!DataStaging.DelegationID.empty()) { out << IString(" DelegationID element: %s", DataStaging.DelegationID) << std::endl; } } if (!OtherAttributes.empty()) { std::map::const_iterator it; for (it = OtherAttributes.begin(); it != OtherAttributes.end(); it++) out << IString(" Other attributes: [%s], %s", it->first, it->second) << std::endl; } out << std::endl; return true; } // end of Print void JobDescription::UseOriginal() { if (!alternatives.empty()) { std::list::iterator it = alternatives.insert(current, *this); // Insert this before current position. it->RemoveAlternatives(); // No nested alternatives. Set(alternatives.front()); // Set this to first element. alternatives.pop_front(); // Remove this from list. current = alternatives.begin(); // Set current to first element. } } bool JobDescription::UseAlternative() { if (!alternatives.empty() && current != alternatives.end()) { std::list::iterator it = alternatives.insert(current, *this); // Insert this before current position. it->RemoveAlternatives(); // No nested alternatives. Set(*current); // Set this to current. current = alternatives.erase(current); // Remove this from list. return true; } // There is no alternative JobDescription objects or end of list. return false; } void JobDescription::AddAlternative(const JobDescription& j) { alternatives.push_back(j); if (current == alternatives.end()) { current--; // If at end of list, set current to newly added jobdescription. } if (!j.alternatives.empty()) { alternatives.back().RemoveAlternatives(); // No nested alternatives. alternatives.insert(alternatives.end(), j.alternatives.begin(), j.alternatives.end()); } } JobDescriptionResult JobDescription::ParseFromFile(const std::string& filename, std::list& jobdescs, const std::string& language, const std::string& dialect) { std::ifstream descriptionfile(filename.c_str()); if (!descriptionfile) { return JobDescriptionResult(false, "Can not open job description file: " + filename); } descriptionfile.seekg(0, std::ios::end); std::streamsize length = descriptionfile.tellg(); descriptionfile.seekg(0, std::ios::beg); char *buffer = new char[length + 1]; descriptionfile.read(buffer, length); descriptionfile.close(); buffer[length] = '\0'; JobDescriptionResult r = Parse((std::string)buffer, jobdescs); delete[] buffer; return r; } JobDescriptionResult JobDescription::Parse(const std::string& source, std::list& jobdescs, const std::string& language, const std::string& dialect) { if (source.empty()) { logger.msg(ERROR, "Empty job description source string"); return false; } jdpl_lock.lock(); if (!jdpl) { jdpl = new JobDescriptionParserPluginLoader(); } std::list< std::pair > results; bool has_parsers = false; bool has_languages = false; for (JobDescriptionParserPluginLoader::iterator it = jdpl->GetIterator(); it; ++it) { // Releasing lock because we can't know how long parsing will take // But for current implementations of parsers it is not specified // if their Parse/Unparse methods can be called concurently. has_parsers = true; if (language.empty() || it->IsLanguageSupported(language)) { has_languages = true; JobDescriptionParserPluginResult result = it->Parse(source, jobdescs, language, dialect); if (result) { jdpl_lock.unlock(); return JobDescriptionResult(true); } results.push_back(std::make_pair(!it->GetSupportedLanguages().empty() ? it->GetSupportedLanguages().front() : "", result)); } } jdpl_lock.unlock(); std::string parse_error; if(!has_parsers) { parse_error = IString("No job description parsers available").str(); } else if(!has_languages) { parse_error = IString("No job description parsers suitable for handling '%s' language are available", language).str(); } else { for (std::list< std::pair >::iterator itRes = results.begin(); itRes != results.end(); ++itRes) { if (itRes->second == JobDescriptionParserPluginResult::Failure) { if (!parse_error.empty()) { parse_error += "\n"; } parse_error += IString("%s parsing error", itRes->first).str(); if (itRes->second.HasErrors()) { parse_error += ":"; } for (std::list::const_iterator itErr = itRes->second.GetErrors().begin(); itErr != itRes->second.GetErrors().end(); ++itErr) { parse_error += "\n"; if (itErr->line_pos.first > 0 && itErr->line_pos.second > 0) { parse_error += inttostr(itErr->line_pos.first) + ":" + inttostr(itErr->line_pos.second) + ": "; } parse_error += itErr->message; } } } } if(parse_error.empty()) { parse_error = IString("No job description parser was able to interpret job description").str(); } return JobDescriptionResult(false, parse_error); } JobDescriptionResult JobDescription::UnParse(std::string& product, std::string language, const std::string& dialect) const { if (language.empty()) { language = sourceLanguage; if (language.empty()) { logger.msg(ERROR, "Job description language is not specified, unable to output description."); return false; } } jdpl_lock.lock(); if (!jdpl) { jdpl = new JobDescriptionParserPluginLoader(); } for (JobDescriptionParserPluginLoader::iterator it = jdpl->GetIterator(); it; ++it) { if (it->IsLanguageSupported(language)) { logger.msg(VERBOSE, "Generating %s job description output", language); bool r = it->UnParse(*this, product, language, dialect); std::string unparse_error = it->GetError(); JobDescriptionResult res(r,unparse_error); /* TOOD: This log message for some reason causes a race * condition in globus on certain platforms. It has * currently only been observed during job submission on * Ubuntu 11.10 64bit, in which case job submission fails. *if (!r) logger.msg(VERBOSE, "Generating %s job description output failed: %s", language, unparse_error); */ jdpl_lock.unlock(); return res; } } jdpl_lock.unlock(); logger.msg(ERROR, "Language (%s) not recognized by any job description parsers.", language); return JobDescriptionResult(false,"Language not recognized"); } bool JobDescription::Prepare(const ExecutionTarget* et) { // Check for identical file names. // Check if executable and input is contained in the file list. bool executableIsAdded(false), inputIsAdded(false), outputIsAdded(false), errorIsAdded(false), logDirIsAdded(false); for (std::list::iterator it1 = DataStaging.InputFiles.begin(); it1 != DataStaging.InputFiles.end(); ++it1) { std::list::const_iterator it2 = it1; for (++it2; it2 != DataStaging.InputFiles.end(); ++it2) { if (it1->Name == it2->Name && !it1->Sources.empty() && !it2->Sources.empty()) { logger.msg(ERROR, "Two input files have identical name '%s'.", it1->Name); return false; } } /* if (!it1->Sources.empty() && it1->Sources.front().Protocol() == "file" && !Glib::file_test(it1->Sources.front().Path(), Glib::FILE_TEST_EXISTS)) { logger.msg(ERROR, "Cannot stat local input file '%s'", it1->Sources.front().Path()); return false; } */ executableIsAdded |= (it1->Name == Application.Executable.Path); inputIsAdded |= (it1->Name == Application.Input); } if (!Application.Executable.Path.empty() && !executableIsAdded && !Glib::path_is_absolute(Application.Executable.Path)) { if (!Glib::file_test(Application.Executable.Path, Glib::FILE_TEST_EXISTS)) { logger.msg(ERROR, "Cannot stat local input file '%s'", Application.Executable.Path); return false; } DataStaging.InputFiles.push_back(InputFileType()); InputFileType& file = DataStaging.InputFiles.back(); file.Name = Application.Executable.Path; file.Sources.push_back(URL(file.Name)); file.IsExecutable = true; } if (!Application.Input.empty() && !inputIsAdded && !Glib::path_is_absolute(Application.Input)) { if (!Glib::file_test(Application.Input, Glib::FILE_TEST_EXISTS)) { logger.msg(ERROR, "Cannot stat local input file '%s'", Application.Input); return false; } DataStaging.InputFiles.push_back(InputFileType()); InputFileType& file = DataStaging.InputFiles.back(); file.Name = Application.Input; file.Sources.push_back(URL(file.Name)); file.IsExecutable = false; } for (std::list::iterator it1 = DataStaging.InputFiles.begin(); it1 != DataStaging.InputFiles.end(); ++it1) { if (it1->Name.empty()) continue; // undefined input fule if (it1->Sources.empty() || (it1->Sources.front().Protocol() == "file")) { std::string path = it1->Name; if (!it1->Sources.empty()) path = it1->Sources.front().Path(); // Local file // Check presence struct stat st; if (!FileStat(path,&st,true) || !S_ISREG(st.st_mode)) { logger.msg(ERROR, "Cannot find local input file '%s' (%s)", path, it1->Name); return false; } // Collect information about file if (it1->FileSize < 0) it1->FileSize = st.st_size; // TODO: if FileSize defined compare? if (it1->Checksum.empty() && (st.st_size <= 65536)) { // Checksum is only done for reasonably small files // Checkum type is chosen for xRSL it1->Checksum = CheckSumAny::FileChecksum(path,CheckSumAny::cksum,true); } } } for (std::list::iterator it1 = DataStaging.OutputFiles.begin(); it1 != DataStaging.OutputFiles.end(); ++it1) { outputIsAdded |= (it1->Name == Application.Output); errorIsAdded |= (it1->Name == Application.Error); logDirIsAdded |= (it1->Name == Application.LogDir); } if (!Application.Output.empty() && !outputIsAdded) { DataStaging.OutputFiles.push_back(OutputFileType()); OutputFileType& file = DataStaging.OutputFiles.back(); file.Name = Application.Output; } if (!Application.Error.empty() && !errorIsAdded) { DataStaging.OutputFiles.push_back(OutputFileType()); OutputFileType& file = DataStaging.OutputFiles.back(); file.Name = Application.Error; } if (!Application.LogDir.empty() && !logDirIsAdded) { DataStaging.OutputFiles.push_back(OutputFileType()); OutputFileType& file = DataStaging.OutputFiles.back(); file.Name = Application.LogDir; } if (et != NULL) { if (!Resources.RunTimeEnvironment.empty() && !Resources.RunTimeEnvironment.selectSoftware(*et->ApplicationEnvironments)) { // This error should never happen since RTE is checked in the Broker. logger.msg(VERBOSE, "Unable to select runtime environment"); return false; } if (!Resources.CEType.empty() && !Resources.CEType.selectSoftware(et->ComputingEndpoint->Implementation)) { // This error should never happen since Middleware is checked in the Broker. logger.msg(VERBOSE, "Unable to select middleware"); return false; } if (!Resources.OperatingSystem.empty() && !Resources.OperatingSystem.selectSoftware(et->ExecutionEnvironment->OperatingSystem)) { // This error should never happen since OS is checked in the Broker. logger.msg(VERBOSE, "Unable to select operating system."); return false; } // Set queue name to the selected ExecutionTarget if(et->ComputingShare->MappingQueue.empty()) { Resources.QueueName = et->ComputingShare->Name; } else { Resources.QueueName = et->ComputingShare->MappingQueue; } } return true; } bool JobDescription::GetTestJob(int testid, JobDescription& jobdescription) { const std::string testJobFileName(ArcLocation::Get() + G_DIR_SEPARATOR_S PKGDATASUBDIR G_DIR_SEPARATOR_S "test-jobs" G_DIR_SEPARATOR_S "test-job-" + tostring(testid)); std::ifstream testJobFile(testJobFileName.c_str()); if (!Glib::file_test(testJobFileName, Glib::FILE_TEST_IS_REGULAR) || !testJobFile) { logger.msg(ERROR, "No test-job with ID %d found.", testid); return false; } std::string description; testJobFile.seekg(0, std::ios::end); description.reserve(testJobFile.tellg()); testJobFile.seekg(0, std::ios::beg); description.assign((std::istreambuf_iterator(testJobFile)), std::istreambuf_iterator()); std::list jobdescs; if (!JobDescription::Parse(description, jobdescs, "nordugrid:xrsl")) { logger.msg(ERROR, "Test was defined with ID %d, but some error occurred during parsing it.", testid); return false; } if (jobdescs.empty()) { logger.msg(ERROR, "No jobdescription resulted at %d test", testid); return false; } jobdescription = (*(jobdescs.begin())); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/EntityRetriever.h0000644000000000000000000000012413213442403024442 xustar000000000000000027 mtime=1512981763.893816 27 atime=1513200574.720704 30 ctime=1513200659.777744767 nordugrid-arc-5.4.2/src/hed/libs/compute/EntityRetriever.h0000644000175000002070000004245513213442403024521 0ustar00mockbuildmock00000000000000#ifndef __ARC_ENTITYRETRIEVER_H__ #define __ARC_ENTITYRETRIEVER_H__ #include #include #include #include #include #include #include #include #include #include namespace Arc { class Logger; class SharedMutex; class SimpleCondition; class SimpleCounter; template<> class ThreadedPointer; class UserConfig; /// A general concept of an object which can consume entities use by the retrievers to return results /** * A class which wants to receive results from Retrievers, needs to subclass * this class, and implement the #addEntity method. * * \ingroup compute * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ template class EntityConsumer { public: EntityConsumer() {} virtual ~EntityConsumer() {} /// Send an entity to this consumer /** * This is the method which will be called by the retrievers when a new result is available. */ virtual void addEntity(const T&) = 0; }; /// An entity consumer class storing all the consumed entities in a list. /** * This class is a concrete subclass of the EntityConsumer abstract class, * it also inherits from std::list, and implements the #addEntity method * in a way, that it stores all the consumed entities in the list (in itself). * * The retrievers return their results through entity consumer objects, * so this container object can be used in those places, and then the results * can be found in the container, which can be treated as a standard list. * * \ingroup compute * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ template class EntityContainer : public std::list, public EntityConsumer { // The order of inheritance is important for Swig. public: EntityContainer() {} virtual ~EntityContainer() {} /// All the consumed entities are pushed to the list. /** * Because the EntityContainer is a standard list, it can push the entities in itself. */ virtual void addEntity(const T& t) { this->push_back(t); } }; /// Queries Endpoint objects (using plugins in parallel) and sends the found entities to consumers /** * The EntityRetriever is a template class which queries Endpoint objects and * returns entities of the template type T. The query is done by plugins * (capable of retrieving type T objects from Endpoint objects), and the * results are sent to the registered EntityConsumer objects (capable of * consuming type T objects). * * When an Endpoint is added to the EntityRetriever, a new thread is started which * queries the given Endpoint. Each plugin is capable of querying Endpoint * objects with given interfaces (which is indicated with the InterfaceName * attribute of the Endpoint). If the Endpoint has the InterfaceName specified, * then the plugin capable of querying that interface will be selected. If the * InterfaceName of the Endpoint is not specified, all the available plugins * will be considered. If there is a preferred list of interfaces, then first * the plugins supporting those interfaces will be tried, and if there are no * preferred interfaces, or the preferred ones did not give any result, then * all the plugins will be tried. All this happens parallel in separate threads. * Currently there are three instance classes: * \li the #ServiceEndpointRetriever queries service registries and returns new * Endpoint objects * \li the #TargetInformationRetriever queries computing elements and returns * reource information in form of ComputingServiceType objects * \li the #JobListRetriever queries computing elements and returns information * about the jobs at the computing element in form of Job objects * * To start querying, a new EntityRetriever needs to be created with the user's * credentials in the UserConfig object, then one or more consumers needs to be * added with the #addConsumer method (e.g. an EntityContainer of the given T * type), then the Endpoints need to be added one by one with the #addEndpoint * method. Then the #wait method can be called to wait for all the results to * arrive, after which we can be sure that all the retrieved entities are passed * to the registered consumer objects. If we registered an EntityContainer, then * we can get all the results from the container, using it as a standard list. * * It is possible to specify options in the constructor, which in case of the * #TargetInformationRetriever and the #JobListRetriever classes is * an EndpointQueryOptions object containing a list of preferred InterfaceNames. * When an Endpoint has not InterfaceName specified, these preferred * InterfaceNames will be tried first. The #ServiceEndpointRetriever has * different options though: * the EndpointQueryOptions object does not contain a preferred list * of InterfaceNames. It has a flag for recursivity instead and string lists for * filtering services by capability and rejecting them by URL. * * \see ComputingServiceRetriever which combines the #ServiceEndpointRetriever * and the #TargetInformationRetriever to query both the service registries and * the computing elements * * #ServiceEndpointRetriever example: * \code Arc::UserConfig uc; // create the retriever with no options Arc::ServiceEndpointRetriever retriever(uc); // create a container which will store the results Arc::EntityContainer container; // add the container to the retriever retriever.addConsumer(container); // create an endpoint which will be queried Arc::Endpoint registry("test.nordugrid.org", Arc::Endpoint::REGISTRY); // start querying the endpoint retriever.addEndpoint(registry); // wait for the querying process to finish retriever.wait(); // get the status of the query Arc::EndpointQueryingStatus status = retriever.getStatusOfEndpoint(registry); \endcode * * After #wait returns, container contains all the services found in the * registry "test.nordugrid.org". * * #TargetInformationRetriever example: * \code Arc::UserConfig uc; // create the retriever with no options Arc::TargetInformationRetriever retriever(uc); // create a container which will store the results Arc::EntityContainer container; // add the container to the retriever retriever.addConsumer(container); // create an endpoint which will be queried Arc::Endpoint ce("test.nordugrid.org", Arc::Endpoint::COMPUTINGINFO); // start querying the endpoint retriever.addEndpoint(ce); // wait for the querying process to finish retriever.wait(); // get the status of the query Arc::EndpointQueryingStatus status = retriever.getStatusOfEndpoint(ce); \endcode * * After #wait returns, container contains the ComputingServiceType object which * has the full %GLUE2 information about the computing element. * * \ingroup compute * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ template class EntityRetriever : public EntityConsumer { public: /// Needs the credentials of the user and can have some options /** Creating the EntityRetriever does not start any querying yet. \param uc UserConfig with the user's credentials \param options contain type T specific querying options */ EntityRetriever(const UserConfig& uc, const EndpointQueryOptions& options = EndpointQueryOptions()); ~EntityRetriever() { common->deactivate(); } /** This method blocks until all the results arrive. */ void wait() const { result.wait(); }; //void waitForAll() const; // TODO: Make it possible to be nice and wait for all threads to finish. /** Check if the query is finished. \return true if the query is finished, all the results were delivered to the consumers. */ bool isDone() const { return result.wait(0); }; /** Register a new consumer which will receive results from now on. \param[in] addConsumer_consumer is a consumer object capable of consuming type T objects */ void addConsumer(EntityConsumer& addConsumer_consumer /* The name 'addConsumer_consumer' is important for Swig when matching methods */) { consumerLock.lock(); consumers.push_back(&addConsumer_consumer); consumerLock.unlock(); }; /** Remove a previously registered consumer \param[in] removeConsumer_consumer is the consumer object */ void removeConsumer(const EntityConsumer& removeConsumer_consumer /* The name 'removeConsumer_consumer' is important for Swig when matching methods */); /// Get the status of the query process of a given Endpoint. /** \param[in] endpoint is the Endpoint whose status we want to know. \return an EndpointQueryingStatus object containing the status of the query */ EndpointQueryingStatus getStatusOfEndpoint(const Endpoint& endpoint) const; /// Get status of all the queried Endpoint objects /** * This method returns a copy of the internal status map, and thus is only * a snapshot. If you want the final status map, make sure to invoke the * EntityRetriever::wait method before this one. * \return a map with Endpoint objects as keys and status objects as values. **/ EndpointStatusMap getAllStatuses() const { statusLock.lock(); EndpointStatusMap s = statuses; statusLock.unlock(); return s; } /// Set the status of the query process of a given Endpoint. /** This method should only be used by the plugins when they finished querying an Endpoint. \param[in] endpoint is the Endpoint whose status we want to set \param[in] status is the EndpointQueryStatus object containing the status \param[in] overwrite indicates if a previous status should be overwritten, if not, then in case of an existing status the method returns false \return true if the new status was set, false if it was not set (e.g. because overwrite was false, and the status was already set previously) */ bool setStatusOfEndpoint(const Endpoint& endpoint, const EndpointQueryingStatus& status, bool overwrite = true); /// Insert into \a results the endpoint.ServiceName() of each endpoint with the given status. /** \param[in] status is the status of the desired endpoints \param[in,out] result is a set into which the matching endpoint service names are inserted */ void getServicesWithStatus(const EndpointQueryingStatus& status, std::set& result); /// Clear statuses of registered endpoints /** * The status map of registered endpoints will be cleared when calling this * method. That can be useful if an already registered endpoint need to be * queried again. */ void clearEndpointStatuses() { statusLock.lock(); statuses.clear(); statusLock.unlock(); return; } /// Remove a particular registered endpoint /** * The specified endpoint will be removed from the status map of registered * endpoints. * \param e endpoint to remove from status map. * \return true is returned if endpoint is found in the map, otherwise false * is returned. */ bool removeEndpoint(const Endpoint& e) { statusLock.lock(); EndpointStatusMap::iterator it = statuses.find(e); if (it != statuses.end()) { statuses.erase(it); statusLock.unlock(); return true; }; statusLock.unlock(); return false; } /** This method should only be used by the plugins when they return their results. This will send the results to all the registered consumers. In the case of the #ServiceEndpointRetriever, the retrieved entities are actually Endpoint objects, and the #ServiceEndpointRetriever does more work here depending on the options set in EndpointQueryOptions: - if the URL of a retrieved Endpoint is on the rejected list, the Endpoint is not sent to the consumers - if recursivity is turned on, and the retrieved Endpoint is a service registry, then it is sent to the #addEntity method for querying - if the retrieved Endpoint does not have at least one of the capabilities provided in the capability filter, then the Endpoint is not sent to the consumers \param[in] entity is the type T object retrieved from the endpoints */ virtual void addEntity(const T& entity); /// Starts querying an Endpoint /** This method is used to start querying an Endpoint. It starts the query process in a separate thread, and returns immediately. \param[in] endpoint is the Endpoint to query */ virtual void addEndpoint(const Endpoint& endpoint); /// Sets if all wait for all queries /** This method specifies if whole query must wait for all individual queries to same endpoint to fnish. By default it waits for first successful one. But in some cases it may be needed to obtain results from all available interfaces because they may be different. */ void needAllResults(bool all_results = true) { need_all_results = all_results; } protected: static void queryEndpoint(void *arg_); void checkSuspendedAndStart(const Endpoint& e); // Common configuration part class Common : public EntityRetrieverPluginLoader { public: Common(EntityRetriever* t, const UserConfig& u) : EntityRetrieverPluginLoader(), mutex(), t(t), uc(u) {}; void deactivate(void) { mutex.lockExclusive(); t = NULL; mutex.unlockExclusive(); } bool lockExclusiveIfValid(void) { mutex.lockExclusive(); if(t) return true; mutex.unlockExclusive(); return false; } void unlockExclusive(void) { mutex.unlockExclusive(); } bool lockSharedIfValid(void) { mutex.lockShared(); if(t) return true; mutex.unlockShared(); return false; } void unlockShared(void) { mutex.unlockShared(); } operator const UserConfig&(void) const { return uc; } const std::list& getAvailablePlugins(void) const { return availablePlugins; } void setAvailablePlugins(const std::list& newAvailablePlugins) { availablePlugins = newAvailablePlugins; } EntityRetriever* operator->(void) { return t; } EntityRetriever* operator*(void) { return t; } private: SharedMutex mutex; EntityRetriever* t; const UserConfig uc; std::list availablePlugins; }; ThreadedPointer common; // Represents completeness of queriies run in threads. // Different implementations are meant for waiting for either one or all threads. // TODO: counter is duplicate in this implimentation. It may be simplified // either by using counter of ThreadedPointer or implementing part of // ThreadedPointer directly. class Result: private ThreadedPointer { public: // Creates initial instance Result(bool one_success = false): ThreadedPointer(new SimpleCounter), success(false),need_one_success(one_success) { }; // Creates new reference representing query - increments counter Result(const Result& r): ThreadedPointer(r), success(false),need_one_success(r.need_one_success) { Ptr()->inc(); }; // Query finished - decrement or reset counter (if one result is enough) ~Result(void) { if(need_one_success && success) { Ptr()->set(0); } else { Ptr()->dec(); }; }; // Mark this result as successful (failure by default) void setSuccess(void) { success = true; }; // Wait for queries to finish bool wait(int t = -1) const { return Ptr()->wait(t); }; private: bool success; bool need_one_success; }; Result result; class ThreadArg { public: ThreadArg(const ThreadedPointer& common, Result& result, const Endpoint& endpoint, const EndpointQueryOptions& options) : common(common), result(result), endpoint(endpoint), options(options) {}; ThreadArg(const ThreadArg& v, Result& result) : common(v.common), result(result), endpoint(v.endpoint), pluginName(v.pluginName), options(v.options) {}; // Objects for communication with caller ThreadedPointer common; Result result; // Per-thread parameters Endpoint endpoint; std::string pluginName; EndpointQueryOptions options; }; EndpointStatusMap statuses; static Logger logger; const UserConfig& uc; std::list< EntityConsumer* > consumers; const EndpointQueryOptions options; mutable SimpleCondition consumerLock; mutable SimpleCondition statusLock; std::map interfacePluginMap; bool need_all_results; }; /// The ServiceEndpointRetriever is an EntityRetriever retrieving Endpoint objects. /** * It queries service registries to get endpoints of registered services. * * \ingroup compute * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ typedef EntityRetriever ServiceEndpointRetriever; /// The TargetInformationRetriever is an EntityRetriever retrieving ComputingServiceType objects. /** * It queries computing elements to get the full GLUE2 information about the resource. * * \ingroup compute * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ typedef EntityRetriever TargetInformationRetriever; /// The JobListRetriever is an EntityRetriever retrieving Job objects. /** * It queries computing elements to get the list of jobs residing on the resource. * * \ingroup compute * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ typedef EntityRetriever JobListRetriever; } // namespace Arc #endif // __ARC_ENTITYRETRIEVER_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobInformationStorageXML.cpp0000644000000000000000000000012312207410330026452 xustar000000000000000026 mtime=1377702104.45933 27 atime=1513200574.696704 30 ctime=1513200659.811745183 nordugrid-arc-5.4.2/src/hed/libs/compute/JobInformationStorageXML.cpp0000644000175000002070000002066112207410330026525 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "JobInformationStorageXML.h" namespace Arc { Logger JobInformationStorageXML::logger(Logger::getRootLogger(), "JobInformationStorageXML"); JobInformationStorageXML::JobInformationStorageXML(const std::string& name, unsigned nTries, unsigned tryInterval) : JobInformationStorage(name, nTries, tryInterval) { isValid = false; isStorageExisting = false; if (!Glib::file_test(name, Glib::FILE_TEST_EXISTS)) { const std::string joblistdir = Glib::path_get_dirname(name); // Check if the parent directory exist. if (!Glib::file_test(joblistdir, Glib::FILE_TEST_EXISTS)) { logger.msg(ERROR, "Job list file cannot be created: The parent directory (%s) doesn't exist.", joblistdir); return; } else if (!Glib::file_test(joblistdir, Glib::FILE_TEST_IS_DIR)) { logger.msg(ERROR, "Job list file cannot be created: %s is not a directory", joblistdir); return; } isValid = true; return; } else if (!Glib::file_test(name, Glib::FILE_TEST_IS_REGULAR)) { logger.msg(ERROR, "Job list file (%s) is not a regular file", name); return; } FileLock lock(name); for (int tries = (int)nTries; tries > 0; --tries) { if (lock.acquire()) { isStorageExisting = isValid = jobstorage.ReadFromFile(name); lock.release(); break; } if (tries == 6) { logger.msg(VERBOSE, "Waiting for lock on job list file %s", name); } Glib::usleep(tryInterval); } } bool JobInformationStorageXML::ReadAll(std::list& jobs, const std::list& rEndpoints) { if (!isValid) { return false; } jobs.clear(); XMLNodeList xmljobs = jobstorage.Path("Job"); for (XMLNodeList::iterator xit = xmljobs.begin(); xit != xmljobs.end(); ++xit) { jobs.push_back(*xit); for (std::list::const_iterator rEIt = rEndpoints.begin(); rEIt != rEndpoints.end(); ++rEIt) { if (jobs.back().JobManagementURL.StringMatches(*rEIt)) { jobs.pop_back(); break; } } } return true; } bool JobInformationStorageXML::Read(std::list& jobs, std::list& jobIdentifiers, const std::list& endpoints, const std::list& rEndpoints) { if (!ReadAll(jobs, rEndpoints)) { return false; } std::list jobIdentifiersCopy = jobIdentifiers; for (std::list::iterator itJ = jobs.begin(); itJ != jobs.end();) { // Check if the job (itJ) is selected by the job identifies, either by job ID or Name. std::list::iterator itJIdentifier = jobIdentifiers.begin(); for (;itJIdentifier != jobIdentifiers.end(); ++itJIdentifier) { if ((!itJ->Name.empty() && itJ->Name == *itJIdentifier) || (itJ->JobID == *itJIdentifier)) { break; } } if (itJIdentifier != jobIdentifiers.end()) { // Job explicitly specified. Remove id from the copy list, in order to keep track of used identifiers. std::list::iterator itJIdentifierCopy; while ((itJIdentifierCopy = std::find(jobIdentifiersCopy.begin(), jobIdentifiersCopy.end(), *itJIdentifier)) != jobIdentifiersCopy.end()) { jobIdentifiersCopy.erase(itJIdentifierCopy); } ++itJ; continue; } // Check if the job (itJ) is selected by endpoints. std::list::const_iterator itC = endpoints.begin(); for (; itC != endpoints.end(); ++itC) { if (itJ->JobManagementURL.StringMatches(*itC)) { break; } } if (itC != endpoints.end()) { // Cluster on which job reside is explicitly specified. ++itJ; continue; } // Job is not selected - remove it. itJ = jobs.erase(itJ); } jobIdentifiers = jobIdentifiersCopy; return true; } bool JobInformationStorageXML::Clean() { if (!isValid) { return false; } if (remove(name.c_str()) != 0) { if (errno == ENOENT) { jobstorage.Destroy(); return true; // No such file. DB already cleaned. } logger.msg(VERBOSE, "Unable to truncate job database (%s)", name); perror("Error"); return false; } jobstorage.Destroy(); return true; } bool JobInformationStorageXML::Write(const std::list& jobs, const std::set& prunedServices, std::list& newJobs) { if (!isValid) { return false; } if (!jobstorage) { Config().Swap(jobstorage); } // Use std::map to store job IDs to be searched for duplicates. std::map jobIDXMLMap; std::map jobsToRemove; for (Arc::XMLNode j = jobstorage["Job"]; j; ++j) { if (!((std::string)j["JobID"]).empty()) { std::string serviceName = URL(j["ServiceInformationURL"]).Host(); if (!serviceName.empty() && prunedServices.count(serviceName)) { logger.msg(DEBUG, "Will remove %s on service %s.", ((std::string)j["JobID"]).c_str(), serviceName); jobsToRemove[(std::string)j["JobID"]] = j; } else { jobIDXMLMap[(std::string)j["JobID"]] = j; } } } // Remove jobs which belong to our list of endpoints to prune. for (std::map::iterator it = jobsToRemove.begin(); it != jobsToRemove.end(); ++it) { it->second.Destroy(); } std::map newJobsMap; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { std::map::iterator itJobXML = jobIDXMLMap.find(it->JobID); if (itJobXML == jobIDXMLMap.end()) { XMLNode xJob = jobstorage.NewChild("Job"); it->ToXML(xJob); jobIDXMLMap[it->JobID] = xJob; std::map::iterator itRemovedJobs = jobsToRemove.find(it->JobID); if (itRemovedJobs == jobsToRemove.end()) { newJobsMap[it->JobID] = &(*it); } } else { // Duplicate found, replace it. itJobXML->second.Replace(XMLNode(NS(), "Job")); it->ToXML(itJobXML->second); // Only add to newJobsMap if this is a new job, i.e. not previous present in jobfile. std::map::iterator itNewJobsMap = newJobsMap.find(it->JobID); if (itNewJobsMap != newJobsMap.end()) { itNewJobsMap->second = &(*it); } } } // Add pointers to new Job objects to the newJobs list. for (std::map::const_iterator it = newJobsMap.begin(); it != newJobsMap.end(); ++it) { newJobs.push_back(it->second); } FileLock lock(name); for (int tries = nTries; tries > 0; --tries) { if (lock.acquire()) { if (!jobstorage.SaveToFile(name)) { lock.release(); return false; } lock.release(); return true; } if (tries == 6) { logger.msg(WARNING, "Waiting for lock on job list file %s", name); } Glib::usleep(tryInterval); } return false; } bool JobInformationStorageXML::Remove(const std::list& jobids) { if (!isValid) { return false; } if (jobids.empty()) { return true; } XMLNodeList xmlJobs = jobstorage.Path("Job"); for (std::list::const_iterator it = jobids.begin(); it != jobids.end(); ++it) { for (XMLNodeList::iterator xJIt = xmlJobs.begin(); xJIt != xmlJobs.end(); ++xJIt) { if ((*xJIt)["JobID"] == *it || (*xJIt)["IDFromEndpoint"] == *it // Included for backwards compatibility. ) { xJIt->Destroy(); // Do not break, since for some reason there might be multiple identical jobs in the file. } } } FileLock lock(name); for (int tries = nTries; tries > 0; --tries) { if (lock.acquire()) { if (!jobstorage.SaveToFile(name)) { lock.release(); return false; } lock.release(); return true; } if (tries == 6) { logger.msg(VERBOSE, "Waiting for lock on job list file %s", name); } Glib::usleep(tryInterval); } return false; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobState.h0000644000000000000000000000012412306304733023016 xustar000000000000000027 mtime=1394182619.331424 27 atime=1513200574.740704 30 ctime=1513200659.765744621 nordugrid-arc-5.4.2/src/hed/libs/compute/JobState.h0000644000175000002070000001441012306304733023063 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBSTATE_H__ #define __ARC_JOBSTATE_H__ #include #include #ifdef JOBSTATE_TABLE #undef JOBSTATE_TABLE #endif #ifdef JOBSTATE_X #undef JOBSTATE_X #endif namespace Arc { /** libarccompute state model. * The class comprise the general state model used by the libarccompute * library. A JobState object has two attributes: A native state as a string, * and a enum value specifying the mapping to the state model. Each job * management extension (JobControllerPlugin specialisation), likely a * implementation against a computing service, should define a mapping of the * native job states to those in the libarccompute state model, which should * then be used when constructing a JobState object for that specific * extension. In that way both the general and the specific state is * available. * * A derived class should consist of a constructor and a mapping function (a * JobStateMap) which maps a * std::string to a JobState:StateType. An example of a constructor in a * plugin could be: * JobStatePlugin::JobStatePluging(const std::string& state) : JobState(state, &pluginStateMap) {} * where &pluginStateMap is a reference to the JobStateMap defined by the * derived class. * * Documentation for mapping of job states for different computing services to * those defined in this class can be found \subpage job_state_mapping "here". * * \ingroup compute * \headerfile JobState.h arc/compute/JobState.h */ class JobState { public: /** \enum StateType * \brief Possible job states in libarccompute * * The possible job states usable in the libarccompute library with a short * description is listed below: * * \mapdef job_state_mapping Job state mapping * On this page the mapping of job state attributes of different * computing services to those defined by the libarccompute library in the * \ref Arc::JobState "JobState" class is documented. **/ enum StateType { /** %Job state could not be resolved. * \mapdefattr UNDEFINED Arc::JobState **/ UNDEFINED, /** %Job was accepted by the computing service. * \mapdefattr ACCEPTED Arc::JobState */ ACCEPTED, /** %Job is being prepared by the computing service. * \mapdefattr PREPARING Arc::JobState **/ PREPARING, /** %Job is being submitted to a computing share. * \mapdefattr SUBMITTING Arc::JobState **/ SUBMITTING, /** %Job is put on hold. * \mapdefattr HOLD Arc::JobState **/ HOLD, /** %Job is on computing share waiting to run. * \mapdefattr QUEUING Arc::JobState **/ QUEUING, /** %Job is running on computing share. * \mapdefattr RUNNING Arc::JobState **/ RUNNING, /** %Job is finishing. * \mapdefattr FINISHING Arc::JobState **/ FINISHING, /** %Job has finished. * \mapdefattr FINISHED Arc::JobState **/ FINISHED, /** %Job has been killed. * \mapdefattr KILLED Arc::JobState **/ KILLED, /** %Job failed. * \mapdefattr FAILED Arc::JobState **/ FAILED, /** %Job have been deleted. * \mapdefattr DELETED Arc::JobState **/ DELETED, /** Any job state which does not fit the above states. * \mapdefattr OTHER Arc::JobState **/ OTHER }; static const std::string StateTypeString[]; JobState() : ssf(FormatSpecificState), type(UNDEFINED) {} /// Construct state from string /** * \since Added in 4.0.0. **/ JobState(const std::string& jobstate) : ssf(FormatSpecificState), state(jobstate), type(GetStateType(jobstate)) {} JobState& operator=(const JobState& js) { type = js.type; state = js.state; ssf = js.ssf; return *this; } operator bool() const { return type != UNDEFINED; } operator StateType() const { return type; } bool operator!() const { return type == UNDEFINED; } bool operator==(const StateType& st) const { return type == st; } bool operator!=(const StateType& st) const { return type != st; } /// Check if state is finished /** * @return true is returned if the StateType is equal to FINISHED, KILLED, * FAILED or DELETED, otherwise false is returned. **/ bool IsFinished() const { return type == FINISHED || type == KILLED || type == FAILED || type == DELETED; } /// Unformatted specific job state /** * Get the unformatted specific job state as returned by the CE. * * @return job state as returned by CE * @see GetSpecificState * @see GetGeneralState **/ const std::string& operator()() const { return state; } /// General string representation of job state /** * Get the string representation of the job state as mapped to the * libarccompute job state model. * * @return string representing general job state * @see enum StateType * @see GetSpecificState **/ const std::string& GetGeneralState() const { return StateTypeString[type]; } /// Specific string representation of job state /** * Get the string representation of the job state as returned by * the CE service possibly formatted to a human readable string. * * @return string representing specific, possibly formatted, job state * @see GetGeneralState * @see operator() **/ std::string GetSpecificState() const { return ssf(state); } static StateType GetStateType(const std::string& state); friend class Job; protected: typedef std::string (*SpecificStateFormater)(const std::string&); SpecificStateFormater ssf; static std::string FormatSpecificState(const std::string& state) { return state; } JobState(const std::string& state, JobState::StateType (*map)(const std::string&), SpecificStateFormater ssf = FormatSpecificState) : ssf(ssf), state(state), type((*map)(state)) {}; std::string state; StateType type; }; /** * \ingroup compute * \headerfile JobState.h arc/compute/JobState.h */ typedef JobState::StateType (*JobStateMap)(const std::string&); } #endif // __ARC_JOBSTATE_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobInformationStorageBDB.cpp0000644000000000000000000000012413065017544026417 xustar000000000000000027 mtime=1490296676.252136 27 atime=1513200574.674703 30 ctime=1513200659.812745196 nordugrid-arc-5.4.2/src/hed/libs/compute/JobInformationStorageBDB.cpp0000644000175000002070000005244013065017544026471 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "JobInformationStorageBDB.h" namespace Arc { Logger JobInformationStorageBDB::logger(Logger::getRootLogger(), "JobInformationStorageBDB"); static void* store_string(const std::string& str, void* buf) { uint32_t l = str.length(); unsigned char* p = (unsigned char*)buf; *p = (unsigned char)l; l >>= 8; ++p; *p = (unsigned char)l; l >>= 8; ++p; *p = (unsigned char)l; l >>= 8; ++p; *p = (unsigned char)l; l >>= 8; ++p; ::memcpy(p,str.c_str(),str.length()); p += str.length(); return (void*)p; } static void* parse_string(std::string& str, const void* buf, uint32_t& size) { uint32_t l = 0; const unsigned char* p = (unsigned char*)buf; if(size < 4) { p += size; size = 0; return (void*)p; }; l |= ((uint32_t)(*p)) << 0; ++p; --size; l |= ((uint32_t)(*p)) << 8; ++p; --size; l |= ((uint32_t)(*p)) << 16; ++p; --size; l |= ((uint32_t)(*p)) << 24; ++p; --size; if(l > size) l = size; // TODO: sanity check str.assign((const char*)p,l); p += l; size -= l; return (void*)p; } static void serialiseJob(const Job& j, Dbt& data) { const std::string version = "3.0.1"; const std::string empty_string; const std::string dataItems[] = {version, j.IDFromEndpoint, j.Name, j.JobStatusInterfaceName, j.JobStatusURL.fullstr(), j.JobManagementInterfaceName, j.JobManagementURL.fullstr(), j.ServiceInformationInterfaceName, j.ServiceInformationURL.fullstr(), j.SessionDir.fullstr(), j.StageInDir.fullstr(), j.StageOutDir.fullstr(), j.JobDescriptionDocument, tostring(j.LocalSubmissionTime.GetTime()), j.DelegationID.size()>0?*j.DelegationID.begin():empty_string}; const unsigned nItems = sizeof(dataItems)/sizeof(dataItems[0]); data.set_data(NULL); data.set_size(0); uint32_t l = 0; for (unsigned i = 0; i < nItems; ++i) l += 4 + dataItems[i].length(); void* d = (void*)::malloc(l); if(!d) return; data.set_data(d); data.set_size(l); for (unsigned i = 0; i < nItems; ++i) d = store_string(dataItems[i], d); } static void deserialiseJob(Job& j, const Dbt& data) { uint32_t size = 0; void* d = NULL; d = (void*)data.get_data(); size = (uint32_t)data.get_size(); std::string version; d = parse_string(version, d, size); if ((version == "3.0.0") || (version == "3.0.1")) { /* Order of items in record. Version 3.0.0 {version, j.IDFromEndpoint, j.Name, j.JobStatusInterfaceName, j.JobStatusURL.fullstr(), j.JobManagementInterfaceName, j.JobManagementURL.fullstr(), j.ServiceInformationInterfaceName, j.ServiceInformationURL.fullstr(), j.SessionDir.fullstr(), j.StageInDir.fullstr(), j.StageOutDir.fullstr(), j.JobDescriptionDocument, tostring(j.LocalSubmissionTime.GetTime())}; Version 3.0.1 ..., j.DelegationID} */ std::string s; d = parse_string(j.IDFromEndpoint, d, size); d = parse_string(j.Name, d, size); d = parse_string(j.JobStatusInterfaceName, d, size); d = parse_string(s, d, size); j.JobStatusURL = URL(s); d = parse_string(j.JobManagementInterfaceName, d, size); d = parse_string(s, d, size); j.JobManagementURL = URL(s); d = parse_string(j.ServiceInformationInterfaceName, d, size); d = parse_string(s, d, size); j.ServiceInformationURL = URL(s); d = parse_string(s, d, size); j.SessionDir = URL(s); d = parse_string(s, d, size); j.StageInDir = URL(s); d = parse_string(s, d, size); j.StageOutDir = URL(s); d = parse_string(j.JobDescriptionDocument, d, size); d = parse_string(s, d, size); j.LocalSubmissionTime.SetTime(stringtoi(s)); j.DelegationID.clear(); if (version == "3.0.1") { d = parse_string(s, d, size); if(!s.empty()) j.DelegationID.push_back(s); } } } static void deserialiseNthJobAttribute(std::string& attr, const Dbt& data, unsigned n) { uint32_t size = 0; void* d = NULL; d = (void*)data.get_data(); size = (uint32_t)data.get_size(); std::string version; d = parse_string(version, d, size); if ((version == "3.0.0") || (version == "3.0.1")) { for (unsigned i = 0; i < n-1; ++i) { d = parse_string(attr, d, size); } } } static int getNameKey(Db *secondary, const Dbt *key, const Dbt *data, Dbt *result) { std::string name; // 3rd attribute in job record is job name. deserialiseNthJobAttribute(name, *data, 3); result->set_flags(DB_DBT_APPMALLOC); result->set_size(name.size()); result->set_data(strdup(name.c_str())); return 0; } static int getEndpointKey(Db *secondary, const Dbt *key, const Dbt *data, Dbt *result) { std::string endpointS; // 7th attribute in job record is job management URL. deserialiseNthJobAttribute(endpointS, *data, 7); endpointS = URL(endpointS).Host(); if (endpointS.empty()) { return DB_DONOTINDEX; } result->set_flags(DB_DBT_APPMALLOC); result->set_size(endpointS.size()); result->set_data(strdup(endpointS.c_str())); return 0; } static int getServiceInfoHostnameKey(Db *secondary, const Dbt *key, const Dbt *data, Dbt *result) { std::string endpointS; // 9th attribute in job record is service information URL. deserialiseNthJobAttribute(endpointS, *data, 9); endpointS = URL(endpointS).Host(); if (endpointS.empty()) { return DB_DONOTINDEX; } result->set_flags(DB_DBT_APPMALLOC); result->set_size(endpointS.size()); result->set_data(strdup(endpointS.c_str())); return 0; } JobInformationStorageBDB::JobDB::JobDB(const std::string& name, u_int32_t flags) : dbEnv(NULL), jobDB(NULL), endpointSecondaryKeyDB(NULL), nameSecondaryKeyDB(NULL), serviceInfoSecondaryKeyDB(NULL) { int ret; const DBTYPE type = (flags == DB_CREATE ? DB_BTREE : DB_UNKNOWN); std::string basepath = ""; if (!TmpDirCreate(tmpdir)) { throw BDBException(IString("Unable to create temporary directory").str(), 1); } dbEnv = new DbEnv(DB_CXX_NO_EXCEPTIONS); dbEnv->set_errcall(&handleError); if ((ret = dbEnv->open(tmpdir.c_str(), DB_CREATE | DB_INIT_CDB | DB_INIT_MPOOL, 0)) != 0) { tearDown(); throw BDBException(IString("Unable to create data base environment (%s)", name).str(), ret); } jobDB = new Db(dbEnv, DB_CXX_NO_EXCEPTIONS); nameSecondaryKeyDB = new Db(dbEnv, DB_CXX_NO_EXCEPTIONS); endpointSecondaryKeyDB = new Db(dbEnv, DB_CXX_NO_EXCEPTIONS); serviceInfoSecondaryKeyDB = new Db(dbEnv, DB_CXX_NO_EXCEPTIONS); if ((ret = nameSecondaryKeyDB->set_flags(DB_DUPSORT)) != 0) { tearDown(); throw BDBException(IString("Unable to set duplicate flags for secondary key DB (%s)", name).str(), ret); } if ((ret = endpointSecondaryKeyDB->set_flags(DB_DUPSORT)) != 0) { tearDown(); throw BDBException(IString("Unable to set duplicate flags for secondary key DB (%s)", name).str(), ret); } if ((ret = serviceInfoSecondaryKeyDB->set_flags(DB_DUPSORT)) != 0) { tearDown(); throw BDBException(IString("Unable to set duplicate flags for secondary key DB (%s)", name).str(), ret); } std::string absPathToDB = URL(name).Path(); if ((ret = jobDB->open(NULL, absPathToDB.c_str(), "job_records", type, flags, 0)) != 0) { tearDown(); throw BDBException(IString("Unable to create job database (%s)", name).str(), ret); } if ((ret = nameSecondaryKeyDB->open(NULL, absPathToDB.c_str(), "name_keys", type, flags, 0)) != 0) { tearDown(); throw BDBException(IString("Unable to create DB for secondary name keys (%s)", name).str(), ret); } if ((ret = endpointSecondaryKeyDB->open(NULL, absPathToDB.c_str(), "endpoint_keys", type, flags, 0)) != 0) { tearDown(); throw BDBException(IString("Unable to create DB for secondary endpoint keys (%s)", name).str(), ret); } if ((ret = serviceInfoSecondaryKeyDB->open(NULL, absPathToDB.c_str(), "serviceinfo_keys", type, flags, 0)) != 0) { tearDown(); throw BDBException(IString("Unable to create DB for secondary service info keys (%s)", name).str(), ret); } if ((ret = jobDB->associate(NULL, nameSecondaryKeyDB, (flags != DB_RDONLY ? getNameKey : NULL), 0)) != 0) { tearDown(); throw BDBException(IString("Unable to associate secondary DB with primary DB (%s)", name).str(), ret); } if ((ret = jobDB->associate(NULL, endpointSecondaryKeyDB, (flags != DB_RDONLY ? getEndpointKey : NULL), 0)) != 0) { tearDown(); throw BDBException(IString("Unable to associate secondary DB with primary DB (%s)", name).str(), ret); } if ((ret = jobDB->associate(NULL, serviceInfoSecondaryKeyDB, (flags != DB_RDONLY ? getServiceInfoHostnameKey : NULL), 0)) != 0) { tearDown(); throw BDBException(IString("Unable to associate secondary DB with primary DB (%s)", name).str(), ret); } JobInformationStorageBDB::logger.msg(DEBUG, "Job database created successfully (%s)", name); } void JobInformationStorageBDB::JobDB::tearDown() { if (nameSecondaryKeyDB) { nameSecondaryKeyDB->close(0); } if (endpointSecondaryKeyDB) { endpointSecondaryKeyDB->close(0); } if (serviceInfoSecondaryKeyDB) { serviceInfoSecondaryKeyDB->close(0); } if (jobDB) { jobDB->close(0); } if (dbEnv) { dbEnv->close(0); } delete endpointSecondaryKeyDB; endpointSecondaryKeyDB = NULL; delete nameSecondaryKeyDB; nameSecondaryKeyDB = NULL; delete serviceInfoSecondaryKeyDB; serviceInfoSecondaryKeyDB = NULL; delete jobDB; jobDB = NULL; delete dbEnv; dbEnv = NULL; dbEnv = new DbEnv(DB_CXX_NO_EXCEPTIONS); dbEnv->remove(tmpdir.c_str(), 0); DirDelete(tmpdir, true); delete dbEnv; dbEnv = NULL; } JobInformationStorageBDB::JobDB::~JobDB() { tearDown(); } #if ((DB_VERSION_MAJOR > 4)||(DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3)) void JobInformationStorageBDB::JobDB::handleError(const DbEnv *dbenv, const char *errpfx, const char *msg) { #else void JobInformationStorageBDB::JobDB::handleError(const char *errpfx, char *msg) { #endif if (errpfx) { JobInformationStorageBDB::logger.msg(DEBUG, "Error from BDB: %s: %s", errpfx, msg); } else { JobInformationStorageBDB::logger.msg(DEBUG, "Error from BDB: %s", msg); } } JobInformationStorageBDB::BDBException::BDBException(const std::string& msg, int ret, bool writeLogMessage) throw() : message(msg), returnvalue(ret) { if (writeLogMessage) { JobInformationStorageBDB::logger.msg(VERBOSE, msg); JobInformationStorageBDB::logErrorMessage(ret); } } JobInformationStorageBDB::JobInformationStorageBDB(const std::string& name, unsigned nTries, unsigned tryInterval) : JobInformationStorage(name, nTries, tryInterval) { isValid = false; isStorageExisting = false; if (!Glib::file_test(name, Glib::FILE_TEST_EXISTS)) { const std::string joblistdir = Glib::path_get_dirname(name); // Check if the parent directory exist. if (!Glib::file_test(joblistdir, Glib::FILE_TEST_EXISTS)) { logger.msg(ERROR, "Job list file cannot be created: The parent directory (%s) doesn't exist.", joblistdir); return; } else if (!Glib::file_test(joblistdir, Glib::FILE_TEST_IS_DIR)) { logger.msg(ERROR, "Job list file cannot be created: %s is not a directory", joblistdir); return; } isValid = true; return; } else if (!Glib::file_test(name, Glib::FILE_TEST_IS_REGULAR)) { logger.msg(ERROR, "Job list file (%s) is not a regular file", name); return; } try { JobDB db(name); } catch (const BDBException& e) { isValid = false; return; } isStorageExisting = isValid = true; } bool JobInformationStorageBDB::Write(const std::list& jobs) { if (!isValid) { return false; } if (jobs.empty()) return true; try { JobDB db(name, DB_CREATE); int ret; std::list::const_iterator it = jobs.begin(); void* pdata = NULL; Dbt key, data; { InterruptGuard guard; do { ::free(pdata); key.set_size(it->JobID.size()); key.set_data((char*)it->JobID.c_str()); serialiseJob(*it, data); pdata = data.get_data(); } while ((ret = db->put(NULL, &key, &data, 0)) == 0 && ++it != jobs.end()); ::free(pdata); }; if (ret != 0) { logger.msg(VERBOSE, "Unable to write key/value pair to job database (%s): Key \"%s\"", name, (char*)key.get_data()); logErrorMessage(ret); return false; } } catch (const BDBException& e) { return false; } return true; } bool JobInformationStorageBDB::Write(const std::list& jobs, const std::set& prunedServices, std::list& newJobs) { if (!isValid) { return false; } if (jobs.empty()) return true; try { JobDB db(name, DB_CREATE); int ret = 0; std::set idsOfPrunedJobs; Dbc *cursor = NULL; if ((ret = db.viaServiceInfoKeys()->cursor(NULL, &cursor, DB_WRITECURSOR)) != 0) return false; for (std::set::const_iterator itPruned = prunedServices.begin(); itPruned != prunedServices.end(); ++itPruned) { Dbt key((char *)itPruned->c_str(), itPruned->size()), pkey, data; if (cursor->pget(&key, &pkey, &data, DB_SET) != 0) continue; do { idsOfPrunedJobs.insert(std::string((char *)pkey.get_data(), pkey.get_size())); cursor->del(0); } while (cursor->pget(&key, &pkey, &data, DB_NEXT_DUP) == 0); } cursor->close(); std::list::const_iterator it = jobs.begin(); void* pdata = NULL; Dbt key, data; bool jobWasPruned; { InterruptGuard guard; do { ::free(pdata); key.set_size(it->JobID.size()); key.set_data((char*)it->JobID.c_str()); serialiseJob(*it, data); pdata = data.get_data(); jobWasPruned = (idsOfPrunedJobs.count(it->JobID) != 0); if (!jobWasPruned) { // Check if job already exist. Dbt existingData; if (db->get(NULL, &key, &existingData, 0) == DB_NOTFOUND) { newJobs.push_back(&*it); } } } while (((ret = db->put(NULL, &key, &data, 0)) == 0 && ++it != jobs.end())); ::free(pdata); }; if (ret != 0) { logger.msg(VERBOSE, "Unable to write key/value pair to job database (%s): Key \"%s\"", name, (char*)key.get_data()); logErrorMessage(ret); return false; } } catch (const BDBException& e) { return false; } return true; } bool JobInformationStorageBDB::ReadAll(std::list& jobs, const std::list& rejectEndpoints) { if (!isValid) { return false; } jobs.clear(); try { int ret; JobDB db(name); Dbc *cursor; if ((ret = db->cursor(NULL, &cursor, 0)) != 0) { //dbp->err(dbp, ret, "DB->cursor"); return false; } Dbt key, data; while ((ret = cursor->get(&key, &data, DB_NEXT)) == 0) { jobs.push_back(Job()); jobs.back().JobID = std::string((char *)key.get_data(), key.get_size()); deserialiseJob(jobs.back(), data); for (std::list::const_iterator it = rejectEndpoints.begin(); it != rejectEndpoints.end(); ++it) { if (jobs.back().JobManagementURL.StringMatches(*it)) { jobs.pop_back(); break; } } } cursor->close(); if (ret != DB_NOTFOUND) { //dbp->err(dbp, ret, "DBcursor->get"); return false; } } catch (const BDBException& e) { return false; } return true; } static void addJobFromDB(const Dbt& key, const Dbt& data, std::list& jobs, std::set& idsOfAddedJobs, const std::list& rejectEndpoints) { jobs.push_back(Job()); jobs.back().JobID.assign((char *)key.get_data(), key.get_size()); deserialiseJob(jobs.back(), data); if (idsOfAddedJobs.count(jobs.back().JobID) != 0) { // Look for duplicates and remove them. jobs.pop_back(); return; } idsOfAddedJobs.insert(jobs.back().JobID); for (std::list::const_iterator it = rejectEndpoints.begin(); it != rejectEndpoints.end(); ++it) { if (jobs.back().JobManagementURL.StringMatches(*it)) { idsOfAddedJobs.erase(jobs.back().JobID); jobs.pop_back(); return; } } } static bool addJobsFromDuplicateKeys(Db& db, Dbt& key, std::list& jobs, std::set& idsOfAddedJobs, const std::list& rejectEndpoints) { int ret; Dbt pkey, data; Dbc *cursor; if ((ret = db.cursor(NULL, &cursor, 0)) != 0) { //dbp->err(dbp, ret, "DB->cursor"); return false; } ret = cursor->pget(&key, &pkey, &data, DB_SET); if (ret != 0) return false; addJobFromDB(pkey, data, jobs, idsOfAddedJobs, rejectEndpoints); while ((ret = cursor->pget(&key, &pkey, &data, DB_NEXT_DUP)) == 0) { addJobFromDB(pkey, data, jobs, idsOfAddedJobs, rejectEndpoints); } return true; } bool JobInformationStorageBDB::Read(std::list& jobs, std::list& jobIdentifiers, const std::list& endpoints, const std::list& rejectEndpoints) { if (!isValid) { return false; } jobs.clear(); try { JobDB db(name); int ret; std::set idsOfAddedJobs; for (std::list::iterator it = jobIdentifiers.begin(); it != jobIdentifiers.end();) { if (it->empty()) continue; Dbt key((char *)it->c_str(), it->size()), data; ret = db->get(NULL, &key, &data, 0); if (ret == DB_NOTFOUND) { if (addJobsFromDuplicateKeys(*db.viaNameKeys(), key, jobs, idsOfAddedJobs, rejectEndpoints)) { it = jobIdentifiers.erase(it); } else { ++it; } continue; } addJobFromDB(key, data, jobs, idsOfAddedJobs, rejectEndpoints); it = jobIdentifiers.erase(it); } if (endpoints.empty()) return true; Dbc *cursor; if ((ret = db.viaEndpointKeys()->cursor(NULL, &cursor, 0)) != 0) return false; for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { // Extract hostname from iterator. URL u(*it); if (u.Protocol() == "file") { u = URL("http://" + *it); // Only need to extract hostname. Prefix with "http://". } if (u.Host().empty()) continue; Dbt key((char *)u.Host().c_str(), u.Host().size()), pkey, data; ret = cursor->pget(&key, &pkey, &data, DB_SET); if (ret != 0) { continue; } std::string tmpEndpoint; deserialiseNthJobAttribute(tmpEndpoint, data, 7); URL jobManagementURL(tmpEndpoint); if (jobManagementURL.StringMatches(*it)) { addJobFromDB(pkey, data, jobs, idsOfAddedJobs, rejectEndpoints); } while ((ret = cursor->pget(&key, &pkey, &data, DB_NEXT_DUP)) == 0) { deserialiseNthJobAttribute(tmpEndpoint, data, 7); URL jobManagementURL(tmpEndpoint); if (jobManagementURL.StringMatches(*it)) { addJobFromDB(pkey, data, jobs, idsOfAddedJobs, rejectEndpoints); } } } } catch (const BDBException& e) { return false; } return true; } bool JobInformationStorageBDB::Clean() { if (!isValid) { return false; } if (remove(name.c_str()) != 0) { if (errno == ENOENT) return true; // No such file. DB already cleaned. logger.msg(VERBOSE, "Unable to truncate job database (%s)", name); perror("Error"); return false; } return true; } bool JobInformationStorageBDB::Remove(const std::list& jobids) { if (!isValid) { return false; } try { InterruptGuard guard; JobDB db(name, DB_CREATE); for (std::list::const_iterator it = jobids.begin(); it != jobids.end(); ++it) { Dbt key((char *)it->c_str(), it->size()); db->del(NULL, &key, 0); } } catch (const BDBException& e) { return false; } return true; } void JobInformationStorageBDB::logErrorMessage(int err) { switch (err) { case ENOENT: logger.msg(DEBUG, "ENOENT: The file or directory does not exist, Or a nonexistent re_source file was specified."); break; case DB_OLD_VERSION: logger.msg(DEBUG, "DB_OLD_VERSION: The database cannot be opened without being first upgraded."); break; case EEXIST: logger.msg(DEBUG, "EEXIST: DB_CREATE and DB_EXCL were specified and the database exists."); case EINVAL: logger.msg(DEBUG, "EINVAL"); break; default: logger.msg(DEBUG, "Unable to determine error (%d)", err); } } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Endpoint.h0000644000000000000000000000012412120575521023062 xustar000000000000000027 mtime=1363344209.220098 27 atime=1513200574.695704 30 ctime=1513200659.780744804 nordugrid-arc-5.4.2/src/hed/libs/compute/Endpoint.h0000644000175000002070000002433212120575521023133 0ustar00mockbuildmock00000000000000#ifndef __ARC_ENDPOINT_H__ #define __ARC_ENDPOINT_H__ #include #include #include #include #include #include namespace Arc { class ConfigEndpoint; class ExecutionTarget; class Endpoint; class EndpointQueryingStatus; class ComputingEndpointAttributes; /// Key comparison object definition for Endpoint objects /** * \since Added in 3.0.0. * \ingroup compute * \headerfile Endpoint.h arc/compute/Endpoint.h */ typedef bool (*EndpointCompareFn)(const Endpoint&, const Endpoint&); /// Status map for Endpoint objects. /** * Wrapper class inheriting from std::map providing no extra functionality than * that of std::map. It is needed due to limitations in the language wrapping * software (SWIG) that can't handle more than 2 template arguments. * * \since Added in 3.0.0. * \ingroup compute * \headerfile Endpoint.h arc/compute/Endpoint.h */ class EndpointStatusMap : public std::map { public: /// Creates a std::map with the key comparison object set to Endpoint::ServiceIDCompare EndpointStatusMap(); /// Creates a std::map using \c fn as key comparison object EndpointStatusMap(EndpointCompareFn fn) : std::map(fn) {} /// Copy constructor EndpointStatusMap(const EndpointStatusMap& m) : std::map(m) {} ~EndpointStatusMap() {} }; /// Represents an endpoint of a service with a given interface type and capabilities /** * This class similar in structure to the %Endpoint entity in the %GLUE2 * specification. The type of the interface is described by a string called * InterfaceName (from the %GLUE2 specification). An Endpoint object must have a * URL, and it is quite useless without capabilities (the system has to know if * an Endpoint is a service registry or a computing element), but the * InterfaceName is optional. * * The Endpoint object also contains information about the health state and * quality level of the endpoint, and optionally the requested submission * interface name, which will be used later if a job will be submitted to a * computing element related to this endpoint. * * \see CapabilityEnum where the capabilities are listed. * \since Added in 2.0.0. * \ingroup compute * \headerfile Endpoint.h arc/compute/Endpoint.h */ class Endpoint { public: /// Values for classifying capabilities of services enum CapabilityEnum { /// Service registry capable of returning endpoints REGISTRY, /// Local information system of a computing element capable of returning information about the resource COMPUTINGINFO, /// Local information system of a computing element capable of returning the list of jobs on the resource JOBLIST, /// Interface of a computing element where jobs can be submitted JOBSUBMIT, /// Interface of a computing element where jobs can be created /** * \since Added in 3.0.0. **/ JOBCREATION, /// Interface of a computing element where jobs can be managed JOBMANAGEMENT, /// Unspecified capability UNSPECIFIED }; /// Get string representation of #CapabilityEnum. /** * \return The %GLUE2 capability string associated with the passed * #CapabilityEnum value is returned. **/ static std::string GetStringForCapability(Endpoint::CapabilityEnum cap) { if (cap == Endpoint::REGISTRY) return "information.discovery.registry"; if (cap == Endpoint::COMPUTINGINFO) return "information.discovery.resource"; if (cap == Endpoint::JOBLIST) return "information.discovery.resource"; if (cap == Endpoint::JOBSUBMIT) return "executionmanagement.jobexecution"; if (cap == Endpoint::JOBCREATION) return "executionmanagement.jobcreation"; if (cap == Endpoint::JOBMANAGEMENT) return "executionmanagement.jobmanager"; return ""; } /// Create a new Endpoint with a list of capability strings /** \param[in] URLString is a string representing the URL of the endpoint \param[in] Capability is a list of capability strings specifying the capabilities of the service \param[in] InterfaceName is a string specifying the type of the interface of the service */ Endpoint(const std::string& URLString = "", const std::set& Capability = std::set(), const std::string& InterfaceName = "") : URLString(URLString), InterfaceName(InterfaceName), Capability(Capability) {} /// Create a new Endpoint with a single capability specified by the #CapabilityEnum /** \param[in] URLString is a string representing the URL of the endpoint \param[in] cap is a #CapabilityEnum specifying the single capability of the endpoint \param[in] InterfaceName is an optional string specifying the type of the interface */ Endpoint(const std::string& URLString, const Endpoint::CapabilityEnum cap, const std::string& InterfaceName = "") : URLString(URLString), InterfaceName(InterfaceName), Capability() { Capability.insert(GetStringForCapability(cap)); } /// Create new Endpoint from ExecutionTarget object /** * \param e ExecutionTarget object to create new Endpoint from. * \param rsi string specifying the requested submission interface if any. * Default value is the empty string. * \since Added in 3.0.0. **/ Endpoint(const ExecutionTarget& e, const std::string& rsi = ""); /// Create new Endpoint from ComputingEndpointAttributes object /** * \param cea ComputingEndpointAttributes object to create new Endpoint from. * \param rsi string specifying the requested submission interface if any. * Default value is the empty string. * \since Added in 3.0.0. **/ Endpoint(const ComputingEndpointAttributes& cea, const std::string& rsi = ""); /// Create a new Endpoint from a ConfigEndpoint /** * The ConfigEndpoint::URLString, ConfigEndpoint::InterfaceName and the * ConfigEndpoint::RequestedSubmissionInterfaceName attributes will be copied * from the ConfigEndpoint, and if the type of the ConfigEndpoint is #REGISTRY * or #COMPUTINGINFO, the given capability will be added to the new Endpoint * object. * * \param[in] endpoint is the ConfigEndpoint object which will be converted to * an Endpoint **/ Endpoint(const ConfigEndpoint& endpoint) { *this = endpoint; } /// Check for capability /** * Checks if the Endpoint has the given capability specified by a * #CapabilityEnum value. * * \param[in] cap is the specified #CapabilityEnum * \return true if the Endpoint has the given capability */ bool HasCapability(Endpoint::CapabilityEnum cap) const; /// Check for capability /** * Checks if the Endpoint has the given capability specified by a string. * * \param[in] cap is a string specifying a capability. * \return true if the Endpoint has the given capability. */ bool HasCapability(const std::string& cap) const; /// Get string representation of this object /** * \return String formatted as: * \verbatim ([, capabilities: ]) \endverbatim * where if #InterfaceName is empty, "" is used. **/ std::string str() const; /// Get name of service from #URLString attribute /** * \return If #URLString contains "://", a URL object will be created from it * and if the host part of it is non empty it is returned, otherwise * #URLString is returned. * \since Added in 3.0.0. **/ std::string getServiceName() const; /// Key comparison method /** * Compare passed Endpoint object with this by value returned by #str(). * * \param[in] other Endpoint object to compare with. * \return The result of lexicographically less between this object (lhs) and * other (rhs) compared using value returned by #str() method, is returned. **/ bool operator<(const Endpoint& other) const; /// Key comparison function for comparing Endpoint objects. /** * Compare endpoints by #ServiceID, #URLString and #InterfaceName in that * order. The attributes are compared lexicographically. * * \return If the ServiceID attributes are unequal lexicographically less * between the ServiceID attributes of a and b is returned. If they equal then * same procedure is done with the URLString attribute, if they equal * lexicographically less between the InterfaceName attributes of a and b is * returned. * * \since Added in 3.0.0. **/ static bool ServiceIDCompare(const Endpoint& a, const Endpoint& b); /// Set from a ConfigEndpoint object /** * \return \c *this is returned. **/ Endpoint& operator=(const ConfigEndpoint& e); /// The string representation of the URL of the Endpoint std::string URLString; /// The type of the interface (%GLUE2 InterfaceName) std::string InterfaceName; /// %GLUE2 HealthState std::string HealthState; /// %GLUE2 HealthStateInfo std::string HealthStateInfo; /// %GLUE2 QualityLevel std::string QualityLevel; /// Set of %GLUE2 Capability strings std::set Capability; /// A %GLUE2 InterfaceName requesting an InterfaceName used for job submission. /** * If a user specifies an InterfaceName for submitting jobs, that information * will be stored here and will be used when collecting information about the * computing element. Only those job submission interfaces will be considered * which has this requested InterfaceName. **/ std::string RequestedSubmissionInterfaceName; /// ID of service this Endpoint belongs to /** * \since Added in 3.0.0. **/ std::string ServiceID; /// Get bounds in EndpointStatusMap corresponding to Endpoint /** * \param[in] endpoint An Endpoint object for which the bounds of equivalent * Endpoint objects in the EndpointStatusMap should be found. * \param[in] statusMap See description above. * \return The lower and upper bound of the equivalent to the passed Endpoint * object is returned as a pair (lower, upper). **/ static std::pair getServiceEndpoints(const Endpoint&, const EndpointStatusMap&); }; } // namespace Arc #endif // __ARC_ENDPOINT_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/BrokerPlugin.h0000644000000000000000000000012412107432734023710 xustar000000000000000027 mtime=1360934364.735007 27 atime=1513200574.720704 30 ctime=1513200659.758744535 nordugrid-arc-5.4.2/src/hed/libs/compute/BrokerPlugin.h0000644000175000002070000000636012107432734023762 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_BROKERPLUGIN_H__ #define __ARC_BROKERPLUGIN_H__ /** \file * \brief Plugin, loader and argument classes for broker specialisation. */ #include #include namespace Arc { class ExecutionTarget; class JobDescription; class Logger; class URL; class UserConfig; /// Internal class representing arguments passed to BrokerPlugin. /** * \ingroup accplugins * \headerfile BrokerPlugin.h arc/compute/BrokerPlugin.h */ class BrokerPluginArgument : public PluginArgument { public: BrokerPluginArgument(const UserConfig& uc) : uc(uc) {} ~BrokerPluginArgument() {} operator const UserConfig&() const { return uc; } private: const UserConfig& uc; }; /// Base class for BrokerPlugins implementing different brokering algorithms. /** * Sub-classes implement their own version of a brokering algorithm based on * certain attributes of the job or targets. match() is called for each * ExecutionTarget and sub-classes should in general first call * BrokerPlugin::match(), which calls Broker::genericMatch(), to check that * basic requirements are satisfied, and then do their own additional checks. * In order for the targets to be ranked using operator() the sub-class * should store appropriate data about each target during match(). * \ingroup accplugins * \headerfile BrokerPlugin.h arc/compute/BrokerPlugin.h */ class BrokerPlugin : public Plugin { public: /// Should never be called directly - instead use BrokerPluginLoader.load(). BrokerPlugin(BrokerPluginArgument* arg) : Plugin(arg), uc(*arg), j(NULL) {} /// Sorting operator - returns true if lhs a better target than rhs. virtual bool operator() (const ExecutionTarget& lhs, const ExecutionTarget& rhs) const; /// Returns true if the target is acceptable for the BrokerPlugin. virtual bool match(const ExecutionTarget& et) const; /// Set the JobDescription to be used for brokering. virtual void set(const JobDescription& _j) const; protected: const UserConfig& uc; mutable const JobDescription* j; static Logger logger; }; /// Handles loading of the required BrokerPlugin plugin. /** * \ingroup accplugins * \headerfile BrokerPlugin.h arc/compute/BrokerPlugin.h */ class BrokerPluginLoader : public Loader { public: /// Load the base configuration of plugin locations etc. BrokerPluginLoader(); /// If keep_ownership in load() is true then BrokerPlugin objects are deleted. ~BrokerPluginLoader(); /// Load the BrokerPlugin with the given name. BrokerPlugin* load(const UserConfig& uc, const std::string& name = "", bool keep_ownerskip = true); /// Load the BrokerPlugin with the given name and set the JobDescription in it. BrokerPlugin* load(const UserConfig& uc, const JobDescription& j, const std::string& name = "", bool keep_ownerskip = true); /// Copy a BrokerPlugin. BrokerPlugin* copy(const BrokerPlugin* p, bool keep_ownerskip = true); private: BrokerPlugin* load(const UserConfig& uc, const JobDescription* j, const std::string& name, bool keep_ownerskip); std::list plugins; }; } // namespace Arc #endif // __ARC_BROKERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/JobDescriptionParserPlugin.cpp0000644000000000000000000000012412675602216027116 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.670703 30 ctime=1513200659.800745049 nordugrid-arc-5.4.2/src/hed/libs/compute/JobDescriptionParserPlugin.cpp0000644000175000002070000000576012675602216027173 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "JobDescriptionParserPlugin.h" namespace Arc { Logger JobDescriptionParserPlugin::logger(Logger::getRootLogger(), "JobDescriptionParserPlugin"); JobDescriptionParserPlugin::JobDescriptionParserPlugin(PluginArgument* parg): Plugin(parg) {} JobDescriptionParserPlugin::~JobDescriptionParserPlugin() {} std::string& JobDescriptionParserPlugin::SourceLanguage(JobDescription& j) const { return j.sourceLanguage; } JobDescriptionParserPluginLoader::JobDescriptionParserPluginLoader() : Loader(BaseConfig().MakeConfig(Config()).Parent()), scaningDone(false) {} JobDescriptionParserPluginLoader::~JobDescriptionParserPluginLoader() { for (std::list::iterator it = jdps.begin(); it != jdps.end(); it++) delete *it; } void JobDescriptionParserPluginLoader::scan() { factory_->scan(FinderLoader::GetLibrariesList(), jdpDescs); PluginsFactory::FilterByKind("HED:JobDescriptionParserPlugin", jdpDescs); scaningDone = true; } JobDescriptionParserPlugin* JobDescriptionParserPluginLoader::load(const std::string& name) { if (name.empty()) { return NULL; } if (!scaningDone) { scan(); } if(!factory_->load(FinderLoader::GetLibrariesList(), "HED:JobDescriptionParserPlugin", name)) { logger.msg(ERROR, "JobDescriptionParserPlugin plugin \"%s\" not found.", name); return NULL; } JobDescriptionParserPlugin *jdp = factory_->GetInstance("HED:JobDescriptionParserPlugin", name, NULL, false); if (!jdp) { logger.msg(ERROR, "JobDescriptionParserPlugin %s could not be created", name); return NULL; } jdps.push_back(jdp); logger.msg(DEBUG, "Loaded JobDescriptionParserPlugin %s", name); return jdp; } JobDescriptionParserPluginLoader::iterator::iterator(JobDescriptionParserPluginLoader& jdpl) : jdpl(&jdpl) { LoadNext(); current = this->jdpl->jdps.begin(); } JobDescriptionParserPluginLoader::iterator& JobDescriptionParserPluginLoader::iterator::operator++() { LoadNext(); current++; return *this; } void JobDescriptionParserPluginLoader::iterator::LoadNext() { if (!jdpl->scaningDone) { jdpl->scan(); } while (!jdpl->jdpDescs.empty()) { JobDescriptionParserPlugin* loadedJDPL = NULL; while (!jdpl->jdpDescs.front().plugins.empty()) { loadedJDPL = jdpl->load(jdpl->jdpDescs.front().plugins.front().name); jdpl->jdpDescs.front().plugins.pop_front(); if (loadedJDPL != NULL) { break; } } if (jdpl->jdpDescs.front().plugins.empty()) { jdpl->jdpDescs.pop_front(); } if (loadedJDPL != NULL) { break; } } } } // namespace Arc nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/Software.h0000644000000000000000000000012412072550452023076 xustar000000000000000027 mtime=1357566250.399851 27 atime=1513200574.713704 30 ctime=1513200659.772744706 nordugrid-arc-5.4.2/src/hed/libs/compute/Software.h0000644000175000002070000005527112072550452023155 0ustar00mockbuildmock00000000000000#ifndef __ARC_SOFTWAREVERSION_H__ #define __ARC_SOFTWAREVERSION_H__ /** \file * \brief Software and SoftwareRequirement classes. */ #include #include #include #include #include namespace Arc { class ApplicationEnvironment; /// Used to represent software (names and version) and comparison. /** * The Software class is used to represent the name of a piece of * software internally. Generally software are identified by a name * and possibly a version number. Some software can also be * categorized by type or family (compilers, operating system, etc.). * A software object can be compared to other software objects using * the comparison operators contained in this class. * The basic usage of this class is to test if some specified software * requirement (SoftwareRequirement) are fulfilled, by using the * comparability of the class. * * Internally the Software object is represented by a family and name * identifier, and the software version is tokenized at the characters * defined in VERSIONTOKENS, and stored as a list of tokens. * * \ingroup jobdescription * \headerfile Software.h arc/compute/Software.h */ class Software { public: /// Definition of a comparison operator method pointer. /** * This \c typedef defines a comparison operator method pointer. * * @see #operator==, * @see #operator!=, * @see #operator>, * @see #operator<, * @see #operator>=, * @see #operator<=, * @see ComparisonOperatorEnum. **/ typedef bool (Software::*ComparisonOperator)(const Software&) const; /// Dummy constructor. /** * This constructor creates a empty object. **/ Software() : family(""), name(""), version("") {}; /// Create a Software object. /** * Create a Software object from a single string composed of a name * and a version part. The created object will contain a empty * family part. The name and version part of the string will be * split at the first occurence of a dash (-) which is followed by a * digit (0-9). If the string does not contain such a pattern, the * passed string will be taken to be the name and version will be * empty. * * @param name_version should be a string composed of the name and * version of the software to represent. */ Software(const std::string& name_version); /// Create a Software object. /** * Create a Software object with the specified name and version. * The family part will be left empty. * * @param name the software name to represent. * @param version the software version to represent. **/ Software(const std::string& name, const std::string& version); /// Create a Software object. /** * Create a Software object with the specified family, name and * version. * * @param family the software family to represent. * @param name the software name to represent. * @param version the software version to represent. */ Software(const std::string& family, const std::string& name, const std::string& version); /// Comparison operator enum /** * The #ComparisonOperatorEnum enumeration is a 1-1 correspondance * between the defined comparison method operators * (Software::ComparisonOperator), and can be used in circumstances * where method pointers are not supported. **/ enum ComparisonOperatorEnum { NOTEQUAL = 0, /**< see #operator!= */ EQUAL = 1, /**< see #operator== */ GREATERTHAN = 2, /**< see #operator> */ LESSTHAN = 3, /**< see #operator< */ GREATERTHANOREQUAL = 4, /**< see #operator>= */ LESSTHANOREQUAL = 5 /**< see #operator<= */ }; /// Convert a #ComparisonOperatorEnum value to a comparison method pointer. /** * The passed #ComparisonOperatorEnum will be converted to a * comparison method pointer defined by the * Software::ComparisonOperator typedef. * * This static method is not defined in language bindings created * with Swig, since method pointers are not supported by Swig. * * @param co a #ComparisonOperatorEnum value. * @return A method pointer to a comparison method is returned. **/ static ComparisonOperator convert(const ComparisonOperatorEnum& co); /// Indicates whether the object is empty. /** * @return \c true if the name of this object is empty, otherwise * \c false. **/ bool empty() const { return name.empty(); } /// Equality operator. /** * Two Software objects are equal only if they are of the same family, * have the same name and is of same version. This operator can also * be represented by the Software::EQUAL #ComparisonOperatorEnum * value. * * @param sw is the RHS Software object. * @return \c true when the two objects equals, otherwise \c false. **/ bool operator==(const Software& sw) const { return family == sw.family && name == sw.name && version == sw.version; } /// Inequality operator. /** * The behaviour of the inequality operator is just opposite that of the * equality operator (operator==()). * * @param sw is the RHS Software object. * @return \c true when the two objects are inequal, otherwise * \c false. **/ bool operator!=(const Software& sw) const { return !operator==(sw); } /// Greater-than operator. /** * For the LHS object to be greater than the RHS object they must * first share the same family and name. If the version of the LHS is * empty or the LHS and RHS versions equal then LHS is not greater than * RHS. If the LHS version is not empty while the RHS is then LHS is * greater than RHS. If both versions are non empty and not equal then, * the first version token of each object is compared and if they are * identical, the two next version tokens will be compared. If not * identical, the two tokens will be parsed as integers, and if parsing * fails the LHS is not greater than the RHS. If parsing succeeds and * the integers equals, the two next tokens will be compared, otherwise * the comparison is resolved by the integer comparison. * * If the LHS contains more version tokens than the RHS, and the * comparison have not been resolved at the point of equal number of * tokens, then if the additional tokens contains a token which * cannot be parsed to a integer the LHS is not greater than the * RHS. If the parsed integer is not 0 then the LHS is greater than * the RHS. If the rest of the additional tokens are 0, the LHS is * not greater than the RHS. * * If the RHS contains more version tokens than the LHS and comparison * have not been resolved at the point of equal number of tokens, or * simply if comparison have not been resolved at the point of equal * number of tokens, then the LHS is not greater than the RHS. * * @param sw is the RHS object. * @return \c true if the LHS is greater than the RHS, otherwise * \c false. **/ bool operator> (const Software& sw) const; /// Less-than operator. /** * The behaviour of this less-than operator is equivalent to the * greater-than operator (operator>()) with the LHS and RHS swapped. * * @param sw is the RHS object. * @return \c true if the LHS is less than the RHS, otherwise * \c false. * @see operator>(). **/ bool operator< (const Software& sw) const { return sw.operator>(*this); } /// Greater-than or equal operator. /** * The LHS object is greater than or equal to the RHS object if * the LHS equal the RHS (operator==()) or if the LHS is greater * than the RHS (operator>()). * * @param sw is the RHS object. * @return \c true if the LHS is greated than or equal the RHS, * otherwise \c false. * @see operator==(), * @see operator>(). **/ bool operator>=(const Software& sw) const { return (*this == sw ? true : *this > sw); } /// Less-than or equal operator. /** * The LHS object is greater than or equal to the RHS object if * the LHS equal the RHS (operator==()) or if the LHS is greater * than the RHS (operator>()). * * @param sw is the RHS object. * @return \c true if the LHS is less than or equal the RHS, * otherwise \c false. * @see operator==(), * @see operator<(). **/ bool operator<=(const Software& sw) const { return (*this == sw ? true : *this < sw); } /// Write Software string representation to a std::ostream. /** * Write the string representation of a Software object to a * std::ostream. * * @param out is a std::ostream to write the string representation * of the Software object to. * @param sw is the Software object to write to the std::ostream. * @return The passed std::ostream \a out is returned. **/ friend std::ostream& operator<<(std::ostream& out, const Software& sw) { out << sw(); return out; } /// Get string representation. /** * Returns the string representation of this object, which is * 'family'-'name'-'version'. * * @return The string representation of this object is returned. * @see operator std::string(). **/ std::string operator()() const; /// Cast to string /** * This casting operator behaves exactly as #operator()() does. The * cast is used like (std::string) . * * @see #operator()(). **/ operator std::string(void) const { return operator()(); } /// Get family. /** * @return The family the represented software belongs to is * returned. **/ const std::string& getFamily() const { return family; } /// Get name. /** * @return The name of the represented software is returned. **/ const std::string& getName() const { return name; } /// Get version. /** * @return The version of the represented software is returned. **/ const std::string& getVersion() const { return version; } const std::list& getOptions() const { return option; } void addOption(const std::string& opt) { option.push_back(opt); } void addOptions(const std::list& opts) { option.insert(option.end(),opts.begin(),opts.end()); } /// Convert Software::ComparisonOperator to a string. /** * This method is not available in language bindings created by * Swig, since method pointers are not supported by Swig. * * @param co is a Software::ComparisonOperator. * @return The string representation of the passed * Software::ComparisonOperator is returned. **/ static std::string toString(ComparisonOperator co); /// Tokens used to split version string. /** * This string constant specifies which tokens will be used to split * the version string. * **/ static const std::string VERSIONTOKENS; private: std::string family; std::string name; std::string version; std::list tokenizedVersion; std::list option; static Logger logger; }; /// Class used to express and resolve version requirements on software. /** * A requirement in this class is defined as a pair composed of a * Software object and either a Software::ComparisonOperator method * pointer or equally a Software::ComparisonOperatorEnum enum value. * A SoftwareRequirement object can contain multiple of such * requirements, and then it can specified if all these requirements * should be satisfied, or if it is enough to satisfy only one of * them. The requirements can be satisfied by a single Software object * or a list of either Software or ApplicationEnvironment objects, by * using the method isSatisfied(). This class also contain a number of * methods (selectSoftware()) to select Software objects which are * satisfying the requirements, and in this way resolving * requirements. * * \ingroup jobdescription * \headerfile Software.h arc/compute/Software.h **/ class SoftwareRequirement { public: /// Create a empty SoftwareRequirement object. /** * The created SoftwareRequirement object will contain no * requirements. **/ SoftwareRequirement() {} /// Create a SoftwareRequirement object. /** * The created SoftwareRequirement object will contain one * requirement specified by the Software object \a sw, and the * Software::ComparisonOperator \a swComOp. * * This constructor is not available in language bindings created by * Swig, since method pointers are not supported by Swig, see * SoftwareRequirement(const Software&, Software::ComparisonOperatorEnum) * instead. * * @param sw is the Software object of the requirement to add. * @param swComOp is the Software::ComparisonOperator of the * requirement to add. **/ SoftwareRequirement(const Software& sw, Software::ComparisonOperator swComOp); /// Create a SoftwareRequirement object. /** * The created SoftwareRequirement object will contain one * requirement specified by the Software object \a sw, and the * Software::ComparisonOperatorEnum \a co. * * @param sw is the Software object of the requirement to add. * @param co is the Software::ComparisonOperatorEnum of the * requirement to add. **/ SoftwareRequirement(const Software& sw, Software::ComparisonOperatorEnum co = Software::EQUAL); /// Assignment operator. /** * Set this object equal to that of the passed SoftwareRequirement * object \a sr. * * @param sr is the SoftwareRequirement object to set object equal * to. **/ SoftwareRequirement& operator=(const SoftwareRequirement& sr); /// Copy constructor /** * Create a SoftwareRequirement object from another * SoftwareRequirement object. * * @param sr is the SoftwareRequirement object to make a copy of. **/ SoftwareRequirement(const SoftwareRequirement& sr) { *this = sr; } /// Add a Software object a corresponding comparion operator to this object. /** * Adds software name and version to list of requirements and * associates the comparison operator with it (equality by default). * * This method is not available in language bindings created by * Swig, since method pointers are not supported by Swig, see * add(const Software&, Software::ComparisonOperatorEnum) instead. * * @param sw is the Software object to add as part of a requirement. * @param swComOp is the Software::ComparisonOperator method pointer * to add as part of a requirement, the default operator * will be Software::operator==(). **/ void add(const Software& sw, Software::ComparisonOperator swComOp); /// Add a Software object a corresponding comparion operator to this object. /** * Adds software name and version to list of requirements and * associates the comparison operator with it (equality by default). * * @param sw is the Software object to add as part of a requirement. * @param co is the Software::ComparisonOperatorEnum value * to add as part of a requirement, the default enum will be * Software::EQUAL. **/ void add(const Software& sw, Software::ComparisonOperatorEnum co); /// Test if requirements are satisfied. /** * Returns \c true if the requirements are satisfied by the * specified Software \a sw, otherwise \c false is returned. * * @param sw is the Software which should satisfy the requirements. * @return \c true if requirements are satisfied, otherwise * \c false. * @see isSatisfied(const std::list&) const, * @see isSatisfied(const std::list&) const, * @see selectSoftware(const Software&), * @see isResolved() const. **/ bool isSatisfied(const Software& sw) const { return isSatisfied(std::list(1, sw)); } /// Test if requirements are satisfied. /** * Returns \c true if stored requirements are satisfied by * software specified in \a swList, otherwise \c false is returned. * * Note that if all requirements must be satisfied and multiple * requirements exist having identical name and family all these * requirements should be satisfied by a single Software object. * * @param swList is the list of Software objects which should be * used to try satisfy the requirements. * @return \c true if requirements are satisfied, otherwise * \c false. * @see isSatisfied(const Software&) const, * @see isSatisfied(const std::list&) const, * @see selectSoftware(const std::list&), * @see isResolved() const. **/ bool isSatisfied(const std::list& swList) const { return isSatisfiedSelect(swList); } /// Test if requirements are satisfied. /** * This method behaves in exactly the same way as the * isSatisfied(const Software&) const method does. * * @param swList is the list of ApplicationEnvironment objects which * should be used to try satisfy the requirements. * @return \c true if requirements are satisfied, otherwise * \c false. * @see isSatisfied(const Software&) const, * @see isSatisfied(const std::list&) const, * @see selectSoftware(const std::list&), * @see isResolved() const. **/ bool isSatisfied(const std::list& swList) const; /// Select software. /** * If the passed Software \a sw do not satisfy the requirements * \c false is returned and this object is not modified. If however * the Software object \a sw do satisfy the requirements \c true is * returned and the requirements are set to equal the \a sw Software * object. * * @param sw is the Software object used to satisfy requirements. * @return \c true if requirements are satisfied, otherwise * \c false. * @see selectSoftware(const std::list&), * @see selectSoftware(const std::list&), * @see isSatisfied(const Software&) const, * @see isResolved() const. **/ bool selectSoftware(const Software& sw) { return selectSoftware(std::list(1, sw)); } /// Select software. /** * If the passed list of Software objects \a swList do not satisfy * the requirements \c false is returned and this object is not * modified. If however the list of Software objects \a swList do * satisfy the requirements \c true is returned and the Software * objects satisfying the requirements will replace these with the * equality operator (Software::operator==) used as the comparator * for the new requirements. * * Note that if all requirements must be satisfied and multiple * requirements exist having identical name and family all these * requirements should be satisfied by a single Software object and * it will replace all these requirements. * * @param swList is a list of Software objects used to satisfy * requirements. * @return \c true if requirements are satisfied, otherwise * \c false. * @see selectSoftware(const Software&), * @see selectSoftware(const std::list&), * @see isSatisfied(const std::list&) const, * @see isResolved() const. **/ bool selectSoftware(const std::list& swList); /// Select software. /** * This method behaves exactly as the * selectSoftware(const std::list&) method does. * * @param swList is a list of ApplicationEnvironment objects used to * satisfy requirements. * @return \c true if requirements are satisfied, otherwise * \c false. * @see selectSoftware(const Software&), * @see selectSoftware(const std::list&), * @see isSatisfied(const std::list&) const, * @see isResolved() const. **/ bool selectSoftware(const std::list& swList); /// Indicates whether requirements have been resolved or not. /** * If specified that only one requirement has to be satisfied, then * for this object to be resolved it can only contain one * requirement and it has use the equal operator * (Software::operator==). * * If specified that all requirements has to be satisfied, then for * this object to be resolved each requirement must have a Software * object with a unique family/name composition, i.e. no other * requirements have a Software object with the same family/name * composition, and each requirement must use the equal operator * (Software::operator==). * * If this object has been resolved then \c true is returned when * invoking this method, otherwise \c false is returned. * * @return \c true if this object have been resolved, otherwise * \c false. **/ bool isResolved() const; /// Test if the object is empty. /** * @return \c true if this object do no contain any requirements, * otherwise \c false. **/ bool empty() const { return softwareList.empty(); } /// Clear the object. /** * The requirements in this object will be cleared when invoking * this method. **/ void clear() { softwareList.clear(); comparisonOperatorList.clear(); } /// Get list of Software objects. /** * @return The list of internally stored Software objects is * returned. * @see Software, * @see getComparisonOperatorList. **/ const std::list& getSoftwareList() const { return softwareList; } /// Get list of comparison operators. /** * @return The list of internally stored comparison operators is * returned. * @see Software::ComparisonOperator, * @see getSoftwareList. **/ const std::list& getComparisonOperatorList() const { return comparisonOperatorList; } private: std::list softwareList; std::list comparisonOperatorList; bool isSatisfiedSelect(const std::list&, SoftwareRequirement* = NULL) const; static Logger logger; }; } // namespace Arc #endif // __ARC_SOFTWAREVERSION_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/TestACCControl.h0000644000000000000000000000012412165537061024077 xustar000000000000000027 mtime=1373027889.931476 27 atime=1513200574.755704 30 ctime=1513200659.776744755 nordugrid-arc-5.4.2/src/hed/libs/compute/TestACCControl.h0000644000175000002070000000711612165537061024151 0ustar00mockbuildmock00000000000000#ifndef __ARC_TESTACCCONTROL_H__ #define __ARC_TESTACCCONTROL_H__ /** \file * \brief Classes for controlling output of compute test plugins. */ #include #include #include #include #include #include #include #include #include #include namespace Arc { /** * \defgroup testacccontrol Classes for controlling output of compute test plugins * The listed classes are used for controlling the behaviour of the test * plugins. A test plugin can be used for simulating, testing and checking how * the compute library behaves and react to different inputs from plugins. Also * the test plugins doesn't require a network connection in order to function. * * Compute test plugins are available for the following plugin types: * \li BrokerPlugin * \li JobControllerPlugin * \li JobDescriptionParserPlugin * \li SubmitterPlugin * \li ServiceEndpointRetrieverPlugin * \li TargetInformationRetrieverPlugin * \li JobListRetrieverPlugin * * They can be loaded by using the associated plugin loader class. * * \ingroup compute */ /** * \ingroup testacccontrol * \headerfile TestACCControl.h arc/compute/TestACCControl.h */ class BrokerPluginTestACCControl { public: static bool match; static bool less; }; /** * \ingroup testacccontrol * \headerfile TestACCControl.h arc/compute/TestACCControl.h */ class JobDescriptionParserPluginTestACCControl { public: static bool parseStatus; static bool unparseStatus; static std::list parsedJobDescriptions; static std::string unparsedString; }; /** * \ingroup testacccontrol * \headerfile TestACCControl.h arc/compute/TestACCControl.h */ class JobControllerPluginTestACCControl { public: static bool cleanStatus; static bool cancelStatus; static bool renewStatus; static bool resumeStatus; static bool getJobDescriptionStatus; static std::string getJobDescriptionString; static bool resourceExist; static URL resourceURL; static URL createURL; }; /** * \ingroup testacccontrol * \headerfile TestACCControl.h arc/compute/TestACCControl.h */ class SubmitterPluginTestACCControl { public: static SubmissionStatus submitStatus; static bool migrateStatus; static bool modifyStatus; static Job submitJob; static Job migrateJob; }; /** * \ingroup testacccontrol * \headerfile TestACCControl.h arc/compute/TestACCControl.h */ class JobStateTEST : public JobState { public: JobStateTEST(JobState::StateType type_, const std::string& state_ = "TestState") { type = type_; state = state_; } }; /** * \ingroup testacccontrol * \headerfile TestACCControl.h arc/compute/TestACCControl.h */ class JobListRetrieverPluginTESTControl { public: static float delay; static std::list jobs; static EndpointQueryingStatus status; }; /** * \ingroup testacccontrol * \headerfile TestACCControl.h arc/compute/TestACCControl.h */ class ServiceEndpointRetrieverPluginTESTControl { public: static std::list condition; static std::list status; static std::list< std::list > endpoints; }; /** * \ingroup testacccontrol * \headerfile TestACCControl.h arc/compute/TestACCControl.h */ class TargetInformationRetrieverPluginTESTControl { public: static float delay; static std::list targets; static EndpointQueryingStatus status; }; } #endif // __ARC_TESTACCCONTROL_H__ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/README0000644000000000000000000000012412064073334022013 xustar000000000000000027 mtime=1355839196.464875 27 atime=1513200574.737704 30 ctime=1513200659.755744498 nordugrid-arc-5.4.2/src/hed/libs/compute/README0000644000175000002070000000145612064073334022066 0ustar00mockbuildmock00000000000000libarccompute - library for writing command line interface (client) This folder contains the framework for building command line interfaces. The following classes/files are present: o ClientInterface - Sets up client side mcc chain o Broker - Base class for Broker o ExecutionTarget - Generic execution target (flattened glue2 structure) o JobControllerPlugin - Base class for specialization (see src/hed/acc) o Job - Generic representation of a job o JobSupervisor - Responsible for loading JobControllerPlugins o Submitter - Base class for specialization (see src/hed/acc) Several of the above listed classes are base classes for ARC Client Components (ACCs) which are available in: src/hed/acc For command line interfaces built upon functionality contained within this library see src/clients/compute/ nordugrid-arc-5.4.2/src/hed/libs/compute/PaxHeaders.7502/EntityRetrieverPlugin.h0000644000000000000000000000012412106452655025633 xustar000000000000000027 mtime=1360680365.067388 27 atime=1513200574.700704 30 ctime=1513200659.779744792 nordugrid-arc-5.4.2/src/hed/libs/compute/EntityRetrieverPlugin.h0000644000175000002070000001334612106452655025707 0ustar00mockbuildmock00000000000000#ifndef __ARC_ENTITYRETRIEVERPLUGIN_H__ #define __ARC_ENTITYRETRIEVERPLUGIN_H__ /** \file * \brief Plugin, loader and argument classes for EntityRetriever specialisation. */ #include #include #include #include #include #include #include #include #include #include namespace Arc { /// Options controlling the query process template class EndpointQueryOptions { public: /// Options for querying Endpoint objects /** When an Endpoint does not have its interface name specified, all the supported interfaces can be tried. If preferred interface names are provided here, those will be tried first. \param[in] preferredInterfaceNames a list of the preferred InterfaceName strings \see EndpointQueryOptions the EntityRetriever (a.k.a. #ServiceEndpointRetriever) needs different options */ EndpointQueryOptions(const std::set& preferredInterfaceNames = std::set()) : preferredInterfaceNames(preferredInterfaceNames) {} const std::set& getPreferredInterfaceNames() const { return preferredInterfaceNames; } private: std::set preferredInterfaceNames; }; /// The EntityRetriever (a.k.a. #ServiceEndpointRetriever) needs different options template<> class EndpointQueryOptions { public: /// Options for recursivity, filtering of capabilities and rejecting services /** \param[in] recursive Recursive query means that if a service registry is discovered that will be also queried for additional services \param[in] capabilityFilter Only those services will be discovered which has at least one capability from this list. \param[in] rejectedServices If a service's URL contains any item from this list, the services will be not returned among the results. \param[in] preferredInterfaceNames Set of preferred interface names */ EndpointQueryOptions(bool recursive = false, const std::list& capabilityFilter = std::list(), const std::list& rejectedServices = std::list(), const std::set& preferredInterfaceNames = std::set() ) : recursive(recursive), capabilityFilter(capabilityFilter), rejectedServices(rejectedServices), preferredInterfaceNames(preferredInterfaceNames) {} bool recursiveEnabled() const { return recursive; } const std::list& getCapabilityFilter() const { return capabilityFilter; } const std::list& getRejectedServices() const { return rejectedServices; } const std::set& getPreferredInterfaceNames() const { return preferredInterfaceNames; } private: bool recursive; std::list capabilityFilter; std::list rejectedServices; std::set preferredInterfaceNames; }; /** * \ingroup accplugins * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ template class EntityRetrieverPlugin : public Plugin { protected: EntityRetrieverPlugin(PluginArgument* parg): Plugin(parg) {}; public: virtual const std::list& SupportedInterfaces() const { return supportedInterfaces; }; virtual bool isEndpointNotSupported(const Endpoint&) const = 0; virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint& rEndpoint, std::list&, const EndpointQueryOptions& options) const = 0; static const std::string kind; protected: std::list supportedInterfaces; }; /** * \ingroup accplugins * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ template class EntityRetrieverPluginLoader : public Loader { public: EntityRetrieverPluginLoader() : Loader(BaseConfig().MakeConfig(Config()).Parent()) {} ~EntityRetrieverPluginLoader(); EntityRetrieverPlugin* load(const std::string& name); static std::list getListOfPlugins(); const std::map *>& GetTargetInformationRetrieverPlugins() const { return plugins; } protected: std::map *> plugins; static Logger logger; }; /** * \ingroup accplugins * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ class ServiceEndpointRetrieverPlugin : public EntityRetrieverPlugin { protected: ServiceEndpointRetrieverPlugin(PluginArgument* parg); virtual ~ServiceEndpointRetrieverPlugin() {} }; /** * \ingroup accplugins * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ class TargetInformationRetrieverPlugin : public EntityRetrieverPlugin { protected: TargetInformationRetrieverPlugin(PluginArgument* parg); virtual ~TargetInformationRetrieverPlugin() {} }; /** * \ingroup accplugins * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ class JobListRetrieverPlugin : public EntityRetrieverPlugin { protected: JobListRetrieverPlugin(PluginArgument* parg); virtual ~JobListRetrieverPlugin() {} }; /** * \ingroup accplugins * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ typedef EntityRetrieverPluginLoader ServiceEndpointRetrieverPluginLoader; /** * \ingroup accplugins * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ typedef EntityRetrieverPluginLoader TargetInformationRetrieverPluginLoader; /** * \ingroup accplugins * \headerfile EntityRetriever.h arc/compute/EntityRetriever.h */ typedef EntityRetrieverPluginLoader JobListRetrieverPluginLoader; } // namespace Arc #endif // __ARC_ENTITYRETRIEVERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/acc0000644000000000000000000000013213214316024017170 xustar000000000000000030 mtime=1513200660.080748473 30 atime=1513200668.721854157 30 ctime=1513200660.080748473 nordugrid-arc-5.4.2/src/hed/acc/0000755000175000002070000000000013214316024017313 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712534011142021302 xustar000000000000000027 mtime=1433408098.604951 30 atime=1513200594.439945661 30 ctime=1513200660.071748363 nordugrid-arc-5.4.2/src/hed/acc/Makefile.am0000644000175000002070000000067512534011142021354 0ustar00mockbuildmock00000000000000if GRIDFTP_ENABLED ARC0 = ARC0 else ARC0 = endif if PYTHON_SWIG_ENABLED PYTHONBROKER = PythonBroker else PYTHONBROKER = endif if UNICORE_ENABLED UNICORE = UNICORE else UNICORE = endif if EMIES_ENABLED EMIES = EMIES else EMIES = endif SUBDIRS = TEST $(ARC0) ARC1 $(EMIES) CREAM Broker $(PYTHONBROKER) $(UNICORE) JobDescriptionParser SER ldap DIST_SUBDIRS = TEST ARC0 ARC1 EMIES CREAM Broker PythonBroker UNICORE JobDescriptionParser SER ldap nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/TEST0000644000000000000000000000013213214316024017747 xustar000000000000000030 mtime=1513200660.103748755 30 atime=1513200668.721854157 30 ctime=1513200660.103748755 nordugrid-arc-5.4.2/src/hed/acc/TEST/0000755000175000002070000000000013214316024020072 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/BrokerPluginTestACC.h0000644000000000000000000000012412045235201023746 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.322699 30 ctime=1513200660.099748706 nordugrid-arc-5.4.2/src/hed/acc/TEST/BrokerPluginTestACC.h0000644000175000002070000000157412045235201024022 0ustar00mockbuildmock00000000000000#ifndef __ARC_BROKERPLUGINTESTACC_H__ #define __ARC_BROKERPLUGINTESTACC_H__ #include #include #include #include #include namespace Arc { class BrokerPluginTestACC : public BrokerPlugin { private: BrokerPluginTestACC(BrokerPluginArgument* parg) : BrokerPlugin(parg) {} public: ~BrokerPluginTestACC() {} virtual bool operator()(const ExecutionTarget&, const ExecutionTarget&) const { return BrokerPluginTestACCControl::less; } virtual bool match(const ExecutionTarget& t) const { return BrokerPlugin::match(t)?BrokerPluginTestACCControl::match:false; } static Plugin* GetInstance(PluginArgument *arg) { BrokerPluginArgument *bparg = dynamic_cast(arg); return bparg ? new BrokerPluginTestACC(bparg) : NULL; } }; } #endif // __ARC_BROKERPLUGINTESTACC_H__ nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712072511126022065 xustar000000000000000027 mtime=1357550166.724559 30 atime=1513200594.612947777 30 ctime=1513200660.094748645 nordugrid-arc-5.4.2/src/hed/acc/TEST/Makefile.am0000644000175000002070000000177312072511126022137 0ustar00mockbuildmock00000000000000pkglibtestdir = $(pkglibdir)/test pkglibtest_LTLIBRARIES = libaccTEST.la libaccTEST_la_SOURCES = \ JobControllerPluginTestACC.cpp JobControllerPluginTestACC.h \ JobDescriptionParserPluginTestACC.cpp JobDescriptionParserPluginTestACC.h \ SubmitterPluginTestACC.cpp SubmitterPluginTestACC.h \ BrokerPluginTestACC.h \ TargetInformationRetrieverPluginTEST.cpp TargetInformationRetrieverPluginTEST.h \ ServiceEndpointRetrieverPluginTEST.cpp ServiceEndpointRetrieverPluginTEST.h \ JobListRetrieverPluginTEST.cpp JobListRetrieverPluginTEST.h \ TestACCPluginDescriptors.cpp libaccTEST_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libaccTEST_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libaccTEST_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/TestACCPluginDescriptors.cpp0000644000000000000000000000012412675602216025373 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.325699 30 ctime=1513200660.103748755 nordugrid-arc-5.4.2/src/hed/acc/TEST/TestACCPluginDescriptors.cpp0000644000175000002070000000232612675602216025443 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "BrokerPluginTestACC.h" #include "JobControllerPluginTestACC.h" #include "JobDescriptionParserPluginTestACC.h" #include "SubmitterPluginTestACC.h" #include "TargetInformationRetrieverPluginTEST.h" #include "ServiceEndpointRetrieverPluginTEST.h" #include "JobListRetrieverPluginTEST.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "TEST", "HED:BrokerPlugin", "", 0, &Arc::BrokerPluginTestACC::GetInstance }, { "TEST", "HED:JobControllerPlugin", "", 0, &Arc::JobControllerPluginTestACC::GetInstance }, { "TEST", "HED:JobDescriptionParserPlugin", "", 0, &Arc::JobDescriptionParserPluginTestACC::GetInstance }, { "TEST", "HED:SubmitterPlugin", "", 0, &Arc::SubmitterPluginTestACC::GetInstance }, { "TEST", "HED:TargetInformationRetrieverPlugin", "TargetInformationRetriever test plugin", 0, &Arc::TargetInformationRetrieverPluginTEST::Instance }, { "TEST", "HED:ServiceEndpointRetrieverPlugin", "ServiceEndpointRetriever test plugin", 0, &Arc::ServiceEndpointRetrieverPluginTEST::Instance }, { "TEST", "HED:JobListRetrieverPlugin", "JobListRetriever test plugin", 0, &Arc::JobListRetrieverPluginTEST::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315722022074 xustar000000000000000030 mtime=1513200594.667948449 29 atime=1513200648.96061247 30 ctime=1513200660.095748657 nordugrid-arc-5.4.2/src/hed/acc/TEST/Makefile.in0000644000175000002070000010473713214315722022157 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/TEST DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibtestdir)" LTLIBRARIES = $(pkglibtest_LTLIBRARIES) am__DEPENDENCIES_1 = libaccTEST_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libaccTEST_la_OBJECTS = \ libaccTEST_la-JobControllerPluginTestACC.lo \ libaccTEST_la-JobDescriptionParserPluginTestACC.lo \ libaccTEST_la-SubmitterPluginTestACC.lo \ libaccTEST_la-TargetInformationRetrieverPluginTEST.lo \ libaccTEST_la-ServiceEndpointRetrieverPluginTEST.lo \ libaccTEST_la-JobListRetrieverPluginTEST.lo \ libaccTEST_la-TestACCPluginDescriptors.lo libaccTEST_la_OBJECTS = $(am_libaccTEST_la_OBJECTS) libaccTEST_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libaccTEST_la_CXXFLAGS) \ $(CXXFLAGS) $(libaccTEST_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccTEST_la_SOURCES) DIST_SOURCES = $(libaccTEST_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglibtestdir = $(pkglibdir)/test pkglibtest_LTLIBRARIES = libaccTEST.la libaccTEST_la_SOURCES = \ JobControllerPluginTestACC.cpp JobControllerPluginTestACC.h \ JobDescriptionParserPluginTestACC.cpp JobDescriptionParserPluginTestACC.h \ SubmitterPluginTestACC.cpp SubmitterPluginTestACC.h \ BrokerPluginTestACC.h \ TargetInformationRetrieverPluginTEST.cpp TargetInformationRetrieverPluginTEST.h \ ServiceEndpointRetrieverPluginTEST.cpp ServiceEndpointRetrieverPluginTEST.h \ JobListRetrieverPluginTEST.cpp JobListRetrieverPluginTEST.h \ TestACCPluginDescriptors.cpp libaccTEST_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libaccTEST_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) libaccTEST_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/TEST/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/TEST/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibtestLTLIBRARIES: $(pkglibtest_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibtestdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibtestdir)" @list='$(pkglibtest_LTLIBRARIES)'; test -n "$(pkglibtestdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibtestdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibtestdir)"; \ } uninstall-pkglibtestLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglibtest_LTLIBRARIES)'; test -n "$(pkglibtestdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibtestdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibtestdir)/$$f"; \ done clean-pkglibtestLTLIBRARIES: -test -z "$(pkglibtest_LTLIBRARIES)" || rm -f $(pkglibtest_LTLIBRARIES) @list='$(pkglibtest_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccTEST.la: $(libaccTEST_la_OBJECTS) $(libaccTEST_la_DEPENDENCIES) $(libaccTEST_la_LINK) -rpath $(pkglibtestdir) $(libaccTEST_la_OBJECTS) $(libaccTEST_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccTEST_la-JobControllerPluginTestACC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccTEST_la-JobDescriptionParserPluginTestACC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccTEST_la-JobListRetrieverPluginTEST.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccTEST_la-ServiceEndpointRetrieverPluginTEST.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccTEST_la-SubmitterPluginTestACC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccTEST_la-TargetInformationRetrieverPluginTEST.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccTEST_la-TestACCPluginDescriptors.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccTEST_la-JobControllerPluginTestACC.lo: JobControllerPluginTestACC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -MT libaccTEST_la-JobControllerPluginTestACC.lo -MD -MP -MF $(DEPDIR)/libaccTEST_la-JobControllerPluginTestACC.Tpo -c -o libaccTEST_la-JobControllerPluginTestACC.lo `test -f 'JobControllerPluginTestACC.cpp' || echo '$(srcdir)/'`JobControllerPluginTestACC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccTEST_la-JobControllerPluginTestACC.Tpo $(DEPDIR)/libaccTEST_la-JobControllerPluginTestACC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginTestACC.cpp' object='libaccTEST_la-JobControllerPluginTestACC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccTEST_la-JobControllerPluginTestACC.lo `test -f 'JobControllerPluginTestACC.cpp' || echo '$(srcdir)/'`JobControllerPluginTestACC.cpp libaccTEST_la-JobDescriptionParserPluginTestACC.lo: JobDescriptionParserPluginTestACC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -MT libaccTEST_la-JobDescriptionParserPluginTestACC.lo -MD -MP -MF $(DEPDIR)/libaccTEST_la-JobDescriptionParserPluginTestACC.Tpo -c -o libaccTEST_la-JobDescriptionParserPluginTestACC.lo `test -f 'JobDescriptionParserPluginTestACC.cpp' || echo '$(srcdir)/'`JobDescriptionParserPluginTestACC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccTEST_la-JobDescriptionParserPluginTestACC.Tpo $(DEPDIR)/libaccTEST_la-JobDescriptionParserPluginTestACC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobDescriptionParserPluginTestACC.cpp' object='libaccTEST_la-JobDescriptionParserPluginTestACC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccTEST_la-JobDescriptionParserPluginTestACC.lo `test -f 'JobDescriptionParserPluginTestACC.cpp' || echo '$(srcdir)/'`JobDescriptionParserPluginTestACC.cpp libaccTEST_la-SubmitterPluginTestACC.lo: SubmitterPluginTestACC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -MT libaccTEST_la-SubmitterPluginTestACC.lo -MD -MP -MF $(DEPDIR)/libaccTEST_la-SubmitterPluginTestACC.Tpo -c -o libaccTEST_la-SubmitterPluginTestACC.lo `test -f 'SubmitterPluginTestACC.cpp' || echo '$(srcdir)/'`SubmitterPluginTestACC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccTEST_la-SubmitterPluginTestACC.Tpo $(DEPDIR)/libaccTEST_la-SubmitterPluginTestACC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPluginTestACC.cpp' object='libaccTEST_la-SubmitterPluginTestACC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccTEST_la-SubmitterPluginTestACC.lo `test -f 'SubmitterPluginTestACC.cpp' || echo '$(srcdir)/'`SubmitterPluginTestACC.cpp libaccTEST_la-TargetInformationRetrieverPluginTEST.lo: TargetInformationRetrieverPluginTEST.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -MT libaccTEST_la-TargetInformationRetrieverPluginTEST.lo -MD -MP -MF $(DEPDIR)/libaccTEST_la-TargetInformationRetrieverPluginTEST.Tpo -c -o libaccTEST_la-TargetInformationRetrieverPluginTEST.lo `test -f 'TargetInformationRetrieverPluginTEST.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginTEST.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccTEST_la-TargetInformationRetrieverPluginTEST.Tpo $(DEPDIR)/libaccTEST_la-TargetInformationRetrieverPluginTEST.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverPluginTEST.cpp' object='libaccTEST_la-TargetInformationRetrieverPluginTEST.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccTEST_la-TargetInformationRetrieverPluginTEST.lo `test -f 'TargetInformationRetrieverPluginTEST.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginTEST.cpp libaccTEST_la-ServiceEndpointRetrieverPluginTEST.lo: ServiceEndpointRetrieverPluginTEST.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -MT libaccTEST_la-ServiceEndpointRetrieverPluginTEST.lo -MD -MP -MF $(DEPDIR)/libaccTEST_la-ServiceEndpointRetrieverPluginTEST.Tpo -c -o libaccTEST_la-ServiceEndpointRetrieverPluginTEST.lo `test -f 'ServiceEndpointRetrieverPluginTEST.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginTEST.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccTEST_la-ServiceEndpointRetrieverPluginTEST.Tpo $(DEPDIR)/libaccTEST_la-ServiceEndpointRetrieverPluginTEST.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ServiceEndpointRetrieverPluginTEST.cpp' object='libaccTEST_la-ServiceEndpointRetrieverPluginTEST.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccTEST_la-ServiceEndpointRetrieverPluginTEST.lo `test -f 'ServiceEndpointRetrieverPluginTEST.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginTEST.cpp libaccTEST_la-JobListRetrieverPluginTEST.lo: JobListRetrieverPluginTEST.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -MT libaccTEST_la-JobListRetrieverPluginTEST.lo -MD -MP -MF $(DEPDIR)/libaccTEST_la-JobListRetrieverPluginTEST.Tpo -c -o libaccTEST_la-JobListRetrieverPluginTEST.lo `test -f 'JobListRetrieverPluginTEST.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginTEST.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccTEST_la-JobListRetrieverPluginTEST.Tpo $(DEPDIR)/libaccTEST_la-JobListRetrieverPluginTEST.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverPluginTEST.cpp' object='libaccTEST_la-JobListRetrieverPluginTEST.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccTEST_la-JobListRetrieverPluginTEST.lo `test -f 'JobListRetrieverPluginTEST.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginTEST.cpp libaccTEST_la-TestACCPluginDescriptors.lo: TestACCPluginDescriptors.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -MT libaccTEST_la-TestACCPluginDescriptors.lo -MD -MP -MF $(DEPDIR)/libaccTEST_la-TestACCPluginDescriptors.Tpo -c -o libaccTEST_la-TestACCPluginDescriptors.lo `test -f 'TestACCPluginDescriptors.cpp' || echo '$(srcdir)/'`TestACCPluginDescriptors.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccTEST_la-TestACCPluginDescriptors.Tpo $(DEPDIR)/libaccTEST_la-TestACCPluginDescriptors.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TestACCPluginDescriptors.cpp' object='libaccTEST_la-TestACCPluginDescriptors.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccTEST_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccTEST_la-TestACCPluginDescriptors.lo `test -f 'TestACCPluginDescriptors.cpp' || echo '$(srcdir)/'`TestACCPluginDescriptors.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibtestdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibtestLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-pkglibtestLTLIBRARIES install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibtestLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibtestLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibtestLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibtestLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/JobListRetrieverPluginTEST.h0000644000000000000000000000012412045235201025311 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.322699 30 ctime=1513200660.103748755 nordugrid-arc-5.4.2/src/hed/acc/TEST/JobListRetrieverPluginTEST.h0000644000175000002070000000144512045235201025362 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBLISTRETRIEVERPLUGINTEST_H__ #define __ARC_JOBLISTRETRIEVERPLUGINTEST_H__ #include namespace Arc { class JobListRetrieverPluginTEST : public JobListRetrieverPlugin { public: JobListRetrieverPluginTEST(PluginArgument* parg): JobListRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.jlrtest"); } ~JobListRetrieverPluginTEST() {} static Plugin* Instance(PluginArgument *arg) { return new JobListRetrieverPluginTEST(arg); } virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint& endpoint) const { return endpoint.URLString.empty(); } }; } #endif // __ARC_JOBLISTRETRIEVERPLUGINTEST_H__ nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/JobControllerPluginTestACC.cpp0000644000000000000000000000012412051675267025654 xustar000000000000000027 mtime=1353153207.099019 27 atime=1513200574.323699 30 ctime=1513200660.095748657 nordugrid-arc-5.4.2/src/hed/acc/TEST/JobControllerPluginTestACC.cpp0000644000175000002070000000533212051675267025724 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "JobControllerPluginTestACC.h" namespace Arc { Plugin* JobControllerPluginTestACC::GetInstance(PluginArgument *arg) { JobControllerPluginArgument *jcarg = dynamic_cast(arg); if (!jcarg) { return NULL; } return new JobControllerPluginTestACC(*jcarg,arg); } void JobControllerPluginTestACC::UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { IDsProcessed.push_back((*it)->JobID); } } bool JobControllerPluginTestACC::CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if (JobControllerPluginTestACCControl::cleanStatus) { IDsProcessed.push_back((*it)->JobID); } else { IDsNotProcessed.push_back((*it)->JobID); } } return JobControllerPluginTestACCControl::cleanStatus; } bool JobControllerPluginTestACC::CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if (JobControllerPluginTestACCControl::cancelStatus) { IDsProcessed.push_back((*it)->JobID); } else { IDsNotProcessed.push_back((*it)->JobID); } } return JobControllerPluginTestACCControl::cancelStatus; } bool JobControllerPluginTestACC::RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if (JobControllerPluginTestACCControl::renewStatus) { IDsProcessed.push_back((*it)->JobID); } else { IDsNotProcessed.push_back((*it)->JobID); } } return JobControllerPluginTestACCControl::renewStatus; } bool JobControllerPluginTestACC::ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if (JobControllerPluginTestACCControl::resumeStatus) { IDsProcessed.push_back((*it)->JobID); } else { IDsNotProcessed.push_back((*it)->JobID); } } return JobControllerPluginTestACCControl::resumeStatus; } } nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/TargetInformationRetrieverPluginTEST.cpp0000644000000000000000000000012412045235201027732 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.322699 30 ctime=1513200660.100748718 nordugrid-arc-5.4.2/src/hed/acc/TEST/TargetInformationRetrieverPluginTEST.cpp0000644000175000002070000000203312045235201027775 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "TargetInformationRetrieverPluginTEST.h" namespace Arc { Plugin* TargetInformationRetrieverPluginTEST::Instance(PluginArgument* arg) { return new TargetInformationRetrieverPluginTEST(arg); } EndpointQueryingStatus TargetInformationRetrieverPluginTEST::Query(const UserConfig& userconfig, const Endpoint& endpoint, std::list& csList, const EndpointQueryOptions&) const { Glib::usleep(TargetInformationRetrieverPluginTESTControl::delay*1000000); csList = TargetInformationRetrieverPluginTESTControl::targets; return TargetInformationRetrieverPluginTESTControl::status; }; } nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/JobDescriptionParserPluginTestACC.cpp0000644000000000000000000000012412072511126027153 xustar000000000000000027 mtime=1357550166.724559 27 atime=1513200574.322699 30 ctime=1513200660.096748669 nordugrid-arc-5.4.2/src/hed/acc/TEST/JobDescriptionParserPluginTestACC.cpp0000644000175000002070000000043412072511126027221 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "JobDescriptionParserPluginTestACC.h" namespace Arc { Plugin* JobDescriptionParserPluginTestACC::GetInstance(PluginArgument *arg) { return new JobDescriptionParserPluginTestACC(arg); } } nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/ServiceEndpointRetrieverPluginTEST.cpp0000644000000000000000000000012312050773462027412 xustar000000000000000027 mtime=1352922930.982058 27 atime=1513200574.319699 29 ctime=1513200660.10174873 nordugrid-arc-5.4.2/src/hed/acc/TEST/ServiceEndpointRetrieverPluginTEST.cpp0000644000175000002070000000272012050773462027461 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "ServiceEndpointRetrieverPluginTEST.h" namespace Arc { Plugin* ServiceEndpointRetrieverPluginTEST::Instance(PluginArgument* arg) { return new ServiceEndpointRetrieverPluginTEST(arg); } EndpointQueryingStatus ServiceEndpointRetrieverPluginTEST::Query(const UserConfig& userconfig, const Endpoint& registry, std::list& endpoints, const EndpointQueryOptions&) const { if (!ServiceEndpointRetrieverPluginTESTControl::condition.empty()) { SimpleCondition* c = ServiceEndpointRetrieverPluginTESTControl::condition.front(); ServiceEndpointRetrieverPluginTESTControl::condition.pop_front(); if (c != NULL) { c->wait(); } } if (!ServiceEndpointRetrieverPluginTESTControl::endpoints.empty()) { endpoints = ServiceEndpointRetrieverPluginTESTControl::endpoints.front(); ServiceEndpointRetrieverPluginTESTControl::endpoints.pop_front(); } if (!ServiceEndpointRetrieverPluginTESTControl::status.empty()) { EndpointQueryingStatus s = ServiceEndpointRetrieverPluginTESTControl::status.front(); ServiceEndpointRetrieverPluginTESTControl::status.pop_front(); return s; } return EndpointQueryingStatus::UNKNOWN; }; } nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/ServiceEndpointRetrieverPluginTEST.h0000644000000000000000000000012412045235201027044 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.320699 30 ctime=1513200660.102748742 nordugrid-arc-5.4.2/src/hed/acc/TEST/ServiceEndpointRetrieverPluginTEST.h0000644000175000002070000000167312045235201027120 0ustar00mockbuildmock00000000000000#ifndef __ARC_SERVICEENDPOINTRETRIEVERPLUGINTEST_H__ #define __ARC_SERVICEENDPOINTRETRIEVERPLUGINTEST_H__ #include namespace Arc { class ServiceEndpointRetrieverPluginTEST : public ServiceEndpointRetrieverPlugin { protected: ServiceEndpointRetrieverPluginTEST(PluginArgument* parg): ServiceEndpointRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.sertest"); } public: virtual EndpointQueryingStatus Query(const UserConfig& userconfig, const Endpoint& registry, std::list& endpoints, const EndpointQueryOptions& options) const; static Plugin* Instance(PluginArgument *arg); virtual bool isEndpointNotSupported(const Endpoint& endpoint) const { return endpoint.URLString.empty(); } }; } #endif // __ARC_SERVICEENDPOINTRETRIEVERPLUGINTEST_H__ nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/SubmitterPluginTestACC.cpp0000644000000000000000000000012412060134103025027 xustar000000000000000027 mtime=1354807363.254404 27 atime=1513200574.323699 30 ctime=1513200660.098748693 nordugrid-arc-5.4.2/src/hed/acc/TEST/SubmitterPluginTestACC.cpp0000644000175000002070000000330512060134103025075 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "SubmitterPluginTestACC.h" namespace Arc { SubmissionStatus SubmitterPluginTestACC::Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted) { SubmissionStatus retval = SubmitterPluginTestACCControl::submitStatus; if (SubmitterPluginTestACCControl::submitStatus) { jc.addEntity(SubmitterPluginTestACCControl::submitJob); } else for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; } return retval; } SubmissionStatus SubmitterPluginTestACC::Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted) { SubmissionStatus retval = SubmitterPluginTestACCControl::submitStatus; if (retval) { jc.addEntity(SubmitterPluginTestACCControl::submitJob); } else for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; } return retval; } } nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/JobControllerPluginTestACC.h0000644000000000000000000000012412051675267025321 xustar000000000000000027 mtime=1353153207.099019 27 atime=1513200574.319699 30 ctime=1513200660.096748669 nordugrid-arc-5.4.2/src/hed/acc/TEST/JobControllerPluginTestACC.h0000644000175000002070000000415112051675267025367 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBCONTROLLERTESTACC_H__ #define __ARC_JOBCONTROLLERTESTACC_H__ #include #include #include #include #include #include #include namespace Arc { class JobControllerPluginTestACC : public JobControllerPlugin { public: JobControllerPluginTestACC(const UserConfig& usercfg, PluginArgument* parg) : JobControllerPlugin(usercfg, parg) { supportedInterfaces.push_back("org.nordugrid.test"); } ~JobControllerPluginTestACC() {} virtual void UpdateJobs(std::list&, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const { url = JobControllerPluginTestACCControl::resourceURL; return JobControllerPluginTestACCControl::resourceExist; } virtual bool GetJobDescription(const Job& job, std::string& desc_str) const { desc_str = JobControllerPluginTestACCControl::getJobDescriptionString; return JobControllerPluginTestACCControl::getJobDescriptionStatus; } virtual URL CreateURL(std::string service, ServiceType st) const { return JobControllerPluginTestACCControl::createURL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const { return endpoint.empty(); } static Plugin* GetInstance(PluginArgument *arg); }; } #endif // __ARC_JOBCONTROLLERTESTACC_H__ nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/JobListRetrieverPluginTEST.cpp0000644000000000000000000000012412045235201025644 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.320699 30 ctime=1513200660.102748742 nordugrid-arc-5.4.2/src/hed/acc/TEST/JobListRetrieverPluginTEST.cpp0000644000175000002070000000126012045235201025710 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "JobListRetrieverPluginTEST.h" namespace Arc { EndpointQueryingStatus JobListRetrieverPluginTEST::Query(const UserConfig&, const Endpoint&, std::list& jobs, const EndpointQueryOptions&) const { Glib::usleep(JobListRetrieverPluginTESTControl::delay*1000000); jobs = JobListRetrieverPluginTESTControl::jobs; return JobListRetrieverPluginTESTControl::status; }; } nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/SubmitterPluginTestACC.h0000644000000000000000000000012412060134103024474 xustar000000000000000027 mtime=1354807363.254404 27 atime=1513200574.319699 30 ctime=1513200660.098748693 nordugrid-arc-5.4.2/src/hed/acc/TEST/SubmitterPluginTestACC.h0000644000175000002070000000312512060134103024542 0ustar00mockbuildmock00000000000000#ifndef __ARC_SUBMITTERPLUGINTESTACC_H__ #define __ARC_SUBMITTERPLUGINTESTACC_H__ #include #include #include #include #include namespace Arc { class ExecutionTarget; class Job; class JobDescription; class SubmissionStatus; class URL; class SubmitterPluginTestACC : public SubmitterPlugin { public: SubmitterPluginTestACC(const UserConfig& usercfg, PluginArgument* parg) : SubmitterPlugin(usercfg, parg) { supportedInterfaces.push_back("org.nordugrid.test"); } ~SubmitterPluginTestACC() {} static Plugin* GetInstance(PluginArgument *arg) { SubmitterPluginArgument *jcarg = dynamic_cast(arg); return jcarg ? new SubmitterPluginTestACC(*jcarg,arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const { return endpoint.empty(); } virtual SubmissionStatus Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted); virtual SubmissionStatus Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted); virtual bool Migrate(const URL& /*jobid*/, const JobDescription& /*jobdesc*/, const ExecutionTarget& /*et*/, bool /*forcemigration*/, Job& job) { job = SubmitterPluginTestACCControl::migrateJob; return SubmitterPluginTestACCControl::migrateStatus; } }; } #endif // __ARC_SUBMITTERPLUGINTESTACC_H__ nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/JobDescriptionParserPluginTestACC.h0000644000000000000000000000012412675602216026632 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.325699 30 ctime=1513200660.097748681 nordugrid-arc-5.4.2/src/hed/acc/TEST/JobDescriptionParserPluginTestACC.h0000644000175000002070000000233612675602216026703 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBDESCRIPTIONPARSERTESTACC_H__ #define __ARC_JOBDESCRIPTIONPARSERTESTACC_H__ #include #include #include namespace Arc { class JobDescriptionParserPluginTestACC : public JobDescriptionParserPlugin { private: JobDescriptionParserPluginTestACC(PluginArgument* parg) : JobDescriptionParserPlugin(parg) {} public: ~JobDescriptionParserPluginTestACC() {} static Plugin* GetInstance(PluginArgument *arg); virtual JobDescriptionParserPluginResult Parse(const std::string& /*source*/, std::list& jobdescs, const std::string& /*language = ""*/, const std::string& /*dialect = ""*/) const { jobdescs = JobDescriptionParserPluginTestACCControl::parsedJobDescriptions; return JobDescriptionParserPluginTestACCControl::parseStatus; } virtual JobDescriptionParserPluginResult Assemble(const JobDescription& /*job*/, std::string& output, const std::string& /*language*/, const std::string& /*dialect = ""*/) const { output = JobDescriptionParserPluginTestACCControl::unparsedString; return JobDescriptionParserPluginTestACCControl::unparseStatus; } }; } #endif // __ARC_JOBDESCRIPTIONPARSERTESTACC_H__ nordugrid-arc-5.4.2/src/hed/acc/TEST/PaxHeaders.7502/TargetInformationRetrieverPluginTEST.h0000644000000000000000000000012412045235201027377 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.319699 30 ctime=1513200660.100748718 nordugrid-arc-5.4.2/src/hed/acc/TEST/TargetInformationRetrieverPluginTEST.h0000644000175000002070000000176712045235201027457 0ustar00mockbuildmock00000000000000#ifndef __ARC_TARGETINFORMATIONRETRIEVERPLUGINTEST_H__ #define __ARC_TARGETINFORMATIONRETRIEVERPLUGINTEST_H__ #include #include namespace Arc { class TargetInformationRetrieverPluginTEST : public TargetInformationRetrieverPlugin { protected: TargetInformationRetrieverPluginTEST(PluginArgument* parg): TargetInformationRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.tirtest"); } public: virtual EndpointQueryingStatus Query(const UserConfig& userconfig, const Endpoint& registry, std::list& endpoints, const EndpointQueryOptions&) const; static Plugin* Instance(PluginArgument *arg); virtual bool isEndpointNotSupported(const Endpoint& endpoint) const { return endpoint.URLString.empty(); } }; } #endif // __ARC_TARGETINFORMATIONRETRIEVERPLUGINTEST_H__ nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722021316 xustar000000000000000030 mtime=1513200594.472946064 30 atime=1513200648.695609228 30 ctime=1513200660.072748375 nordugrid-arc-5.4.2/src/hed/acc/Makefile.in0000644000175000002070000005637213214315722021401 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @GRIDFTP_ENABLED_FALSE@ARC0 = @GRIDFTP_ENABLED_TRUE@ARC0 = ARC0 @PYTHON_SWIG_ENABLED_FALSE@PYTHONBROKER = @PYTHON_SWIG_ENABLED_TRUE@PYTHONBROKER = PythonBroker @UNICORE_ENABLED_FALSE@UNICORE = @UNICORE_ENABLED_TRUE@UNICORE = UNICORE @EMIES_ENABLED_FALSE@EMIES = @EMIES_ENABLED_TRUE@EMIES = EMIES SUBDIRS = TEST $(ARC0) ARC1 $(EMIES) CREAM Broker $(PYTHONBROKER) $(UNICORE) JobDescriptionParser SER ldap DIST_SUBDIRS = TEST ARC0 ARC1 EMIES CREAM Broker PythonBroker UNICORE JobDescriptionParser SER ldap all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/EMIES0000644000000000000000000000013213214316024020032 xustar000000000000000030 mtime=1513200660.209750051 30 atime=1513200668.721854157 30 ctime=1513200660.209750051 nordugrid-arc-5.4.2/src/hed/acc/EMIES/0000755000175000002070000000000013214316024020155 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712077521006022153 xustar000000000000000027 mtime=1358864902.003893 30 atime=1513200594.144942053 30 ctime=1513200660.194749868 nordugrid-arc-5.4.2/src/hed/acc/EMIES/Makefile.am0000644000175000002070000000330512077521006022216 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libaccEMIES.la bin_PROGRAMS = arcemiestest man_MANS = arcemiestest.1 libaccEMIES_la_SOURCES = EMIESClient.cpp EMIESClient.h \ JobStateEMIES.cpp JobStateEMIES.h \ SubmitterPluginEMIES.cpp SubmitterPluginEMIES.h \ JobControllerPluginEMIES.cpp JobControllerPluginEMIES.h \ JobListRetrieverPluginEMIES.cpp JobListRetrieverPluginEMIES.h \ TargetInformationRetrieverPluginEMIES.cpp TargetInformationRetrieverPluginEMIES.h \ DescriptorsEMIES.cpp libaccEMIES_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccEMIES_la_LIBADD = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la\ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccEMIES_la_LDFLAGS = -no-undefined -avoid-version -module # $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ # $(top_builddir)/src/hed/libs/data/libarcdata.la \ # $(top_builddir)/src/hed/libs/ws/libarcws.la \ # $(top_builddir)/src/hed/libs/loader/libarcloader.la \ # arcemiestest_SOURCES = TestEMIESClient.cpp EMIESClient.cpp JobStateEMIES.cpp arcemiestest_CXXFLAGS = -I$(top_srcdir)/include $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) arcemiestest_LDADD = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) #DIST_SUBDIRS = test #SUBDIRS = $(TEST_DIR) nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/TargetInformationRetrieverPluginEMIES.h0000644000000000000000000000012212103757741027560 xustar000000000000000027 mtime=1359994849.573367 25 atime=1513200574.3577 30 ctime=1513200660.206750014 nordugrid-arc-5.4.2/src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.h0000644000175000002070000000232112103757741027625 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_TARGETINFORMATIONRETRIEVEREMIES_H__ #define __ARC_TARGETINFORMATIONRETRIEVEREMIES_H__ #include #include namespace Arc { class Logger; class EndpointQueryingStatus; class ExecutionTarget; class URL; class UserConfig; class XMLNode; class TargetInformationRetrieverPluginEMIES: public TargetInformationRetrieverPlugin { public: TargetInformationRetrieverPluginEMIES(PluginArgument* parg): TargetInformationRetrieverPlugin(parg) { supportedInterfaces.push_back("org.ogf.glue.emies.resourceinfo"); }; ~TargetInformationRetrieverPluginEMIES() {}; static Plugin* Instance(PluginArgument *arg) { return new TargetInformationRetrieverPluginEMIES(arg); }; virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; static void ExtractTargets(const URL&, XMLNode response, std::list&); private: static Logger logger; }; } // namespace Arc #endif // __ARC_TARGETINFORMATIONRETRIEVEREMIES_H__ nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/JobControllerPluginEMIES.h0000644000000000000000000000012213165644550025014 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.3727 30 ctime=1513200660.202749966 nordugrid-arc-5.4.2/src/hed/acc/EMIES/JobControllerPluginEMIES.h0000644000175000002070000000374513165644550025074 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBCONTROLLEREMIES_H__ #define __ARC_JOBCONTROLLEREMIES_H__ #include namespace Arc { class URL; class JobControllerPluginEMIES : public JobControllerPlugin { public: JobControllerPluginEMIES(const UserConfig& usercfg, PluginArgument* parg) : JobControllerPlugin(usercfg, parg),clients(usercfg) { supportedInterfaces.push_back("org.ogf.glue.emies.activitymanagement"); } virtual ~JobControllerPluginEMIES() {} virtual void SetUserConfig(const UserConfig& uc); static Plugin* Instance(PluginArgument *arg) { JobControllerPluginArgument *jcarg = dynamic_cast(arg); return jcarg ? new JobControllerPluginEMIES(*jcarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual void UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const; virtual bool GetJobDescription(const Job& job, std::string& desc_str) const; private: mutable EMIESClients clients; static Logger logger; }; } // namespace Arc #endif // __ARC_JOBCONTROLLEREMIES_H__ nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/EMIESClient.cpp0000644000000000000000000000012213213471767022632 xustar000000000000000027 mtime=1512993783.598054 25 atime=1513200574.3587 30 ctime=1513200660.197749904 nordugrid-arc-5.4.2/src/hed/acc/EMIES/EMIESClient.cpp0000644000175000002070000014515413213471767022713 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "JobStateEMIES.h" #include "EMIESClient.h" #ifdef CPPUNITTEST #include "../../libs/communication/test/SimulatorClasses.h" #define DelegationProviderSOAP DelegationProviderSOAPTest #endif static const std::string ES_TYPES_NPREFIX("estypes"); static const std::string ES_TYPES_NAMESPACE("http://www.eu-emi.eu/es/2010/12/types"); static const std::string ES_CREATE_NPREFIX("escreate"); static const std::string ES_CREATE_NAMESPACE("http://www.eu-emi.eu/es/2010/12/creation/types"); static const std::string ES_DELEG_NPREFIX("esdeleg"); static const std::string ES_DELEG_NAMESPACE("http://www.eu-emi.eu/es/2010/12/delegation/types"); static const std::string ES_RINFO_NPREFIX("esrinfo"); static const std::string ES_RINFO_NAMESPACE("http://www.eu-emi.eu/es/2010/12/resourceinfo/types"); static const std::string ES_MANAG_NPREFIX("esmanag"); static const std::string ES_MANAG_NAMESPACE("http://www.eu-emi.eu/es/2010/12/activitymanagement/types"); static const std::string ES_AINFO_NPREFIX("esainfo"); static const std::string ES_AINFO_NAMESPACE("http://www.eu-emi.eu/es/2010/12/activity/types"); static const std::string ES_ADL_NPREFIX("esadl"); static const std::string ES_ADL_NAMESPACE("http://www.eu-emi.eu/es/2010/12/adl"); static const std::string GLUE2_NPREFIX("glue2"); static const std::string GLUE2_NAMESPACE("http://schemas.ogf.org/glue/2009/03/spec/2/0"); static const std::string GLUE2PRE_NPREFIX("glue2pre"); static const std::string GLUE2PRE_NAMESPACE("http://schemas.ogf.org/glue/2008/05/spec_2.0_d41_r01"); static const std::string GLUE2D_NPREFIX("glue2d"); static const std::string GLUE2D_NAMESPACE("http://schemas.ogf.org/glue/2009/03/spec_2.0_r1"); namespace Arc { Logger EMIESClient::logger(Logger::rootLogger, "EMI ES Client"); static void set_namespaces(NS& ns) { ns[ES_TYPES_NPREFIX] = ES_TYPES_NAMESPACE; ns[ES_CREATE_NPREFIX] = ES_CREATE_NAMESPACE; ns[ES_DELEG_NPREFIX] = ES_DELEG_NAMESPACE; ns[ES_RINFO_NPREFIX] = ES_RINFO_NAMESPACE; ns[ES_MANAG_NPREFIX] = ES_MANAG_NAMESPACE; ns[ES_AINFO_NPREFIX] = ES_AINFO_NAMESPACE; ns[ES_ADL_NPREFIX] = ES_ADL_NAMESPACE; ns[GLUE2_NPREFIX] = GLUE2_NAMESPACE; ns[GLUE2PRE_NPREFIX] = GLUE2PRE_NAMESPACE; ns[GLUE2D_NPREFIX] = GLUE2D_NAMESPACE; ns["jsdl"]="http://schemas.ggf.org/jsdl/2005/11/jsdl"; // TODO: move to EMI ES lang. } EMIESClient::EMIESClient(const URL& url, const MCCConfig& cfg, int timeout) : client(NULL), rurl(url), cfg(cfg), timeout(timeout), soapfault(false) { logger.msg(DEBUG, "Creating an EMI ES client"); client = new ClientSOAP(cfg, url, timeout); if (!client) logger.msg(VERBOSE, "Unable to create SOAP client used by EMIESClient."); set_namespaces(ns); } EMIESClient::~EMIESClient() { if(client) delete client; } std::string EMIESClient::delegation(const std::string& renew_id) { std::string id = dodelegation(renew_id); if(!id.empty()) return id; delete client; client = NULL; if(!reconnect()) return id; return dodelegation(renew_id); } std::string EMIESClient::dodelegation(const std::string& renew_id) { DelegationProviderSOAP* deleg; if (!cfg.credential.empty()) { deleg = new DelegationProviderSOAP(cfg.credential); } else { const std::string& cert = (!cfg.proxy.empty() ? cfg.proxy : cfg.cert); const std::string& key = (!cfg.proxy.empty() ? cfg.proxy : cfg.key); if (key.empty() || cert.empty()) { lfailure = "Failed locating credentials for delegating."; return ""; } deleg = new DelegationProviderSOAP(cert, key); } if(!client->Load()) { lfailure = "Failed to initiate client connection."; delete deleg; return ""; } MCC* entry = client->GetEntry(); if(!entry) { lfailure = "Client connection has no entry point."; delete deleg; return ""; } if(!renew_id.empty()) deleg->ID(renew_id); logger.msg(VERBOSE, "Initiating delegation procedure"); MessageAttributes attrout; MessageAttributes attrin; attrout.set("SOAP:ENDPOINT",rurl.str()); if (!deleg->DelegateCredentialsInit(*entry,&attrout,&attrin,&(client->GetContext()), (renew_id.empty()?DelegationProviderSOAP::EMIDS:DelegationProviderSOAP::EMIDSRENEW))) { lfailure = "Failed to initiate delegation credentials"; delete deleg; return ""; } std::string delegation_id = deleg->ID(); if(delegation_id.empty()) { lfailure = "Failed to obtain delegation identifier"; delete deleg; return ""; }; if (!deleg->UpdateCredentials(*entry,&(client->GetContext()),DelegationRestrictions(),DelegationProviderSOAP::EMIDS)) { lfailure = "Failed to pass delegated credentials"; delete deleg; return ""; } delete deleg; return delegation_id; } bool EMIESClient::reconnect(void) { delete client; client = NULL; logger.msg(DEBUG, "Re-creating an EMI ES client"); client = new ClientSOAP(cfg, rurl, timeout); if (!client) { lfailure = "Unable to create SOAP client used by EMIESClient."; return false; } set_namespaces(ns); return true; } bool EMIESClient::process(PayloadSOAP& req, XMLNode& response, bool retry) { soapfault = false; if (!client) { lfailure = "EMIESClient was not created properly."; return false; } logger.msg(VERBOSE, "Processing a %s request", req.Child(0).FullName()); std::string action = req.Child(0).Name(); PayloadSOAP* resp = NULL; if (!client->process(&req, &resp)) { logger.msg(VERBOSE, "%s request failed", req.Child(0).FullName()); lfailure = "Failed processing request"; delete client; client = NULL; if(!retry) return false; if(!reconnect()) return false; return process(req,response,false); } if (resp == NULL) { logger.msg(VERBOSE, "No response from %s", rurl.str()); lfailure = "No response received"; delete client; client = NULL; if(!retry) return false; if(!reconnect()) return false; return process(req,response,false); } if (resp->IsFault()) { logger.msg(VERBOSE, "%s request to %s failed with response: %s", req.Child(0).FullName(), rurl.str(), resp->Fault()->Reason()); lfailure = "Fault response received: "+resp->Fault()->Reason(); soapfault = true; // Trying to check if it is EMI ES fault if(resp->Fault()->Code() != SOAPFault::Receiver) retry = false; // Check if fault is EMI ES fault XMLNode soapFaultDetail = resp->Fault()->Detail(); if (EMIESFault::isEMIESFault(soapFaultDetail)) { soapFaultDetail.New(response); delete resp; return false; } { std::string s; resp->GetXML(s); logger.msg(DEBUG, "XML response: %s", s); }; delete resp; delete client; client = NULL; if(!retry) return false; if(!reconnect()) return false; return process(req,response,false); } if (!(*resp)[action + "Response"]) { logger.msg(VERBOSE, "%s request to %s failed. Unexpected response: %s.", action, rurl.str(), resp->Child(0).Name()); lfailure = "Unexpected response received"; delete resp; return false; } // TODO: switch instead of copy (*resp)[action + "Response"].New(response); delete resp; return true; } bool EMIESClient::submit(XMLNode jobdesc, EMIESResponse** response, const std::string delegation_id) { std::string action = "CreateActivity"; logger.msg(VERBOSE, "Creating and sending job submit request to %s", rurl.str()); // Create job request /* escreate:CreateActivity esadl:ActivityDescription escreate:CreateActivityResponse escreate:ActivityCreationResponse estypes:ActivityID estypes:ActivityMgmtEndpointURL estypes:ResourceInfoEndpointURL estypes:ActivityStatus escreate:ETNSC escreate:StageInDirectory URL escreate:SessionDirectory URL escreate:StageOutDirectory URL or estypes:InternalBaseFault estypes:AccessControlFault escreate:InvalidActivityDescriptionFault escreate:InvalidActivityDescriptionSemanticFault escreate:UnsupportedCapabilityFault */ PayloadSOAP req(ns); XMLNode op = req.NewChild("escreate:" + action); XMLNode act_doc = op.NewChild(jobdesc); act_doc.Name("esadl:ActivityDescription"); // In case it had different top element if(!delegation_id.empty()) { // Inserting delegation id into job desription - ADL specific XMLNodeList sources = op.Path("ActivityDescription/DataStaging/InputFile/Source"); for(XMLNodeList::iterator item = sources.begin();item!=sources.end();++item) { item->NewChild("esadl:DelegationID") = delegation_id; }; XMLNodeList targets = op.Path("ActivityDescription/DataStaging/OutputFile/Target"); for(XMLNodeList::iterator item = targets.begin();item!=targets.end();++item) { item->NewChild("esadl:DelegationID") = delegation_id; }; if(!op["ActivityDescription"]["DataStaging"]["DelegationID"]) { /* XMLNodeList outputs = op.Path("ActivityDescription/DataStaging/OutputFile"); for(XMLNodeList::iterator item = outputs.begin();item!=outputs.end();++item) { if(!((*item)["Target"])) { if(((std::string)(*item)["Name"])[0] == '@') { // Hack for ARC implementation - names starting from @ represent // lists of files and also may need delegated credentials. op["ActivityDescription"]["DataStaging"].NewChild("nordugrid-adl:DelegationID") = delegation_id; break; }; }; }; */ // If delegation provided always pass it if(!op["ActivityDescription"]["DataStaging"]) op["ActivityDescription"].NewChild("esadl:DataStaging"); op["ActivityDescription"]["DataStaging"].NewChild("nordugrid-adl:DelegationID") = delegation_id; }; }; { std::string s; act_doc.GetXML(s); logger.msg(DEBUG, "Job description to be sent: %s", s); }; XMLNode xmlResponse; if (!process(req, xmlResponse)) { if (EMIESFault::isEMIESFault(xmlResponse)) { EMIESFault *fault = new EMIESFault; *fault = xmlResponse; *response = fault; } else { *response = new UnexpectedError(lfailure); } return false; } xmlResponse.Namespaces(ns); XMLNode item = xmlResponse.Child(0); if(!MatchXMLName(item,"escreate:ActivityCreationResponse")) { lfailure = "Response is not ActivityCreationResponse"; *response = new UnexpectedError(lfailure); return false; } EMIESFault *fault = new EMIESFault; *fault = item; if(*fault) { lfailure = "Service responded with fault: "+fault->message+" - "+fault->description; *response = fault; return false; }; delete fault; EMIESJob *job = new EMIESJob; *job = item; if(!(*job)) { lfailure = "Response is not valid ActivityCreationResponse"; *response = new UnexpectedError(lfailure); delete job; return false; }; *response = job; return true; } bool EMIESClient::submit(const XMLNodeList& jobdescs, std::list& responses, const std::string delegation_id) { std::string action = "CreateActivity"; logger.msg(VERBOSE, "Creating and sending job submit request to %s", rurl.str()); // Create job request /* escreate:CreateActivity esadl:ActivityDescription escreate:CreateActivityResponse escreate:ActivityCreationResponse estypes:ActivityID estypes:ActivityMgmtEndpointURL estypes:ResourceInfoEndpointURL estypes:ActivityStatus escreate:ETNSC escreate:StageInDirectory URL escreate:SessionDirectory URL escreate:StageOutDirectory URL or estypes:InternalBaseFault estypes:AccessControlFault escreate:InvalidActivityDescriptionFault escreate:InvalidActivityDescriptionSemanticFault escreate:UnsupportedCapabilityFault */ bool noFailures = true; int limit = 1000000; // 1 M - Safety XMLNodeList::const_iterator itSubmit = jobdescs.begin(), itLastProcessedEnd = jobdescs.begin(); while (itSubmit != jobdescs.end() && limit > 0) { PayloadSOAP req(ns); XMLNode op = req.NewChild("escreate:" + action); for (int i = 0; itSubmit != jobdescs.end() && i < limit; ++itSubmit, ++i) { XMLNode act_doc = op.NewChild(*itSubmit); act_doc.Name("esadl:ActivityDescription"); // In case it had different top element if(!delegation_id.empty()) { // Inserting delegation id into job desription - ADL specific XMLNodeList sources = act_doc.Path("DataStaging/InputFile/Source"); for(XMLNodeList::iterator item = sources.begin();item!=sources.end();++item) { XMLNode delegNode = (*item)["esadl:DelegationID"]; if (!(bool)delegNode) { delegNode = item->NewChild("esadl:DelegationID"); } delegNode = delegation_id; }; XMLNodeList targets = act_doc.Path("DataStaging/OutputFile/Target"); for(XMLNodeList::iterator item = targets.begin();item!=targets.end();++item) { XMLNode delegNode = (*item)["esadl:DelegationID"]; if (!(bool)delegNode) { delegNode = item->NewChild("esadl:DelegationID"); } delegNode = delegation_id; }; }; { std::string s; itSubmit->GetXML(s); logger.msg(DEBUG, "Job description to be sent: %s", s); }; } XMLNode xmlResponse; if (!process(req, xmlResponse)) { if (EMIESFault::isEMIESFault(xmlResponse)) { EMIESFault *fault = new EMIESFault; *fault = xmlResponse; if (fault->type == "VectorLimitExceededFault") { if (fault->limit < limit) { logger.msg(VERBOSE, "New limit for vector queries returned by EMI ES service: %d", fault->limit); itSubmit = itLastProcessedEnd; limit = fault->limit; delete fault; continue; } // Bail out if response is a limit higher than the current. logger.msg(DEBUG, "Error: Service returned a limit higher or equal to current limit (current: %d; returned: %d)", limit, fault->limit); delete fault; responses.push_back(new UnexpectedError("Service returned a limit higher or equal to current limit")); return false; } responses.push_back(fault); return false; } responses.push_back(new UnexpectedError(lfailure)); return false; } xmlResponse.Namespaces(ns); for (XMLNode n = xmlResponse["escreate:ActivityCreationResponse"]; (bool)n; ++n) { EMIESJob *j = new EMIESJob; *j = n; if (*j) { responses.push_back(j); continue; } delete j; noFailures = false; EMIESFault *fault = new EMIESFault; *fault = n; if (*fault) { responses.push_back(fault); continue; } delete fault; responses.push_back(new UnexpectedError("Invalid ActivityCreationResponse: It is neither a new activity or a fault")); } itLastProcessedEnd = itSubmit; } return noFailures; } bool EMIESClient::stat(const EMIESJob& job, EMIESJobState& state) { XMLNode st; if(!stat(job,st)) return false; state = st; if(!state) { lfailure = "Response does not contain valid ActivityStatus"; return false; }; return true; } bool EMIESClient::stat(const EMIESJob& job, XMLNode& state) { /* esainfo:GetActivityStatus estypes:ActivityID esainfo:GetActivityStatusResponse esainfo:ActivityStatusItem estypes:ActivityID estypes:ActivityStatus or estypes:InternalBaseFault AccessControlFault ActivityNotFoundFault UnableToRetrieveStatusFault OperationNotPossibleFault OperationNotAllowedFault */ std::string action = "GetActivityStatus"; logger.msg(VERBOSE, "Creating and sending job information query request to %s", rurl.str()); PayloadSOAP req(ns); req.NewChild("esainfo:" + action).NewChild("estypes:ActivityID") = job.id; XMLNode response; if (!process(req, response)) return false; response.Namespaces(ns); XMLNode item = response.Child(0); if(!MatchXMLName(item,"esainfo:ActivityStatusItem")) { lfailure = "Response is not ActivityStatusItem"; return false; }; if((std::string)(item["estypes:ActivityID"]) != job.id) { lfailure = "Response contains wrong or not ActivityID"; return false; }; EMIESFault fault; fault = item; if(fault) { lfailure = "Service responded with fault: "+fault.message+" - "+fault.description; return false; }; XMLNode status = item["estypes:ActivityStatus"]; if(!status) { lfailure = "Response does not contain ActivityStatus"; return false; }; status.New(state); return true; } bool EMIESClient::info(EMIESJob& job, XMLNode& info) { std::string action = "GetActivityInfo"; logger.msg(VERBOSE, "Creating and sending job information query request to %s", rurl.str()); PayloadSOAP req(ns); req.NewChild("esainfo:" + action).NewChild("estypes:ActivityID") = job.id; XMLNode response; if (!process(req, response)) return false; response.Namespaces(ns); XMLNode item = response.Child(0); if(!MatchXMLName(item,"esainfo:ActivityInfoItem")) { lfailure = "Response is not ActivityInfoItem"; return false; }; if((std::string)(item["estypes:ActivityID"]) != job.id) { lfailure = "Response contains wrong or not ActivityID"; return false; }; EMIESFault fault; fault = item; if(fault) { lfailure = "Service responded with fault: "+fault.message+" - "+fault.description; return false; }; XMLNode infodoc = item["esainfo:ActivityInfoDocument"]; if(!infodoc) { lfailure = "Response does not contain ActivityInfoDocument"; return false; }; infodoc.New(info); return true; } template void EMIESClient::info(const std::list& jobs, std::list& responses) { /* esainfo:GetActivityInfo estypes:ActivityID esainfo:AttributeName (xsd:QName) esainfo:GetActivityInfoResponse esainfo:ActivityInfoItem estypes:ActivityID esainfo:ActivityInfoDocument (glue:ComputingActivity_t) or esainfo:AttributeInfoItem or estypes:InternalBaseFault AccessControlFault ActivityNotFoundFault UnknownAttributeFault UnableToRetrieveStatusFault OperationNotPossibleFault OperationNotAllowedFault */ std::string action = "GetActivityInfo"; logger.msg(VERBOSE, "Creating and sending job information query request to %s", rurl.str()); int limit = 1000000; // 1 M - Safety typename std::list::const_iterator itRequested = jobs.begin(), itLastProcessedEnd = jobs.begin(); while (itRequested != jobs.end() && limit > 0) { PayloadSOAP req(ns); XMLNode actionNode = req.NewChild("esainfo:" + action); for (int i = 0; itRequested != jobs.end() && i < limit; ++itRequested, ++i) { actionNode.NewChild("estypes:ActivityID") = EMIESJob::getIDFromJob(*itRequested); } XMLNode xmlResponse; if (!process(req, xmlResponse)) { if (EMIESFault::isEMIESFault(xmlResponse)) { EMIESFault *f = new EMIESFault(); *f = xmlResponse; if (f->type == "VectorLimitExceededFault") { if (f->limit < limit) { logger.msg(VERBOSE, "New limit for vector queries returned by EMI ES service: %d", f->limit); itRequested = itLastProcessedEnd; limit = f->limit; delete f; continue; } // Bail out if response is a limit higher than the current. logger.msg(DEBUG, "Error: Service returned a limit higher or equal to current limit (current: %d; returned: %d)", limit, f->limit); delete f; responses.push_back(new UnexpectedError("Service returned a limit higher or equal to current limit")); return; } responses.push_back(f); return; } responses.push_back(new UnexpectedError(lfailure)); return; } for (XMLNode n = xmlResponse["esainfo:ActivityInfoItem"]; (bool)n; ++n) { if ((bool)n["esainfo:ActivityInfoDocument"]) { responses.push_back(new EMIESJobInfo(n)); } else { EMIESFault *f = new EMIESFault(); *f = n; if (*f) { responses.push_back(f); } else { delete f; responses.push_back(new UnexpectedError("An ActivityInfoDocument or EMI ES fault element was expected")); } } } itLastProcessedEnd = itRequested; } } template void EMIESClient::info(const std::list&, std::list&); template void EMIESClient::info(const std::list&, std::list&); bool EMIESClient::info(EMIESJob& job, Job& arcjob) { /* esainfo:GetActivityInfo estypes:ActivityID esainfo:AttributeName (xsd:QName) esainfo:GetActivityInfoResponse esainfo:ActivityInfoItem estypes:ActivityID esainfo:ActivityInfoDocument (glue:ComputingActivity_t) or esainfo:AttributeInfoItem or estypes:InternalBaseFault AccessControlFault ActivityNotFoundFault UnknownAttributeFault UnableToRetrieveStatusFault OperationNotPossibleFault OperationNotAllowedFault */ XMLNode infodoc; if (!info(job, infodoc)) return false; // Processing generic GLUE2 information arcjob.SetFromXML(infodoc); // Looking for EMI ES specific state XMLNode state = infodoc["State"]; EMIESJobState st; for(;(bool)state;++state) st = (std::string)state; if(st) arcjob.State = JobStateEMIES(st); EMIESJobState rst; XMLNode rstate = infodoc["RestartState"]; for(;(bool)rstate;++rstate) rst = (std::string)rstate; arcjob.RestartState = JobStateEMIES(rst); XMLNode ext; ext = infodoc["esainfo:StageInDirectory"]; for(;(bool)ext;++ext) job.stagein.push_back((std::string)ext); ext = infodoc["esainfo:StageOutDirectory"]; for(;(bool)ext;++ext) job.stageout.push_back((std::string)ext); ext = infodoc["esainfo:SessionDirectory"]; for(;(bool)ext;++ext) job.session.push_back((std::string)ext); XMLNode exts = infodoc["Extensions"]; if((bool)exts) { ext = exts["Extension"]; for(;(bool)ext;++ext) { if(ext["LocalID"] == "urn:delegid:nordugrid.org") { arcjob.DelegationID.push_back((std::string)ext["Value"]); }; }; }; // Making EMI ES specific job id // URL-izing job id arcjob.JobID = job.manager.str() + "/" + job.id; //if(!arcjob) return false; return true; } static bool add_urls(std::list& urls, XMLNode source, const URL& match) { bool matched = false; for(;(bool)source;++source) { URL url((std::string)source); if(!url) continue; if(match && (match == url)) matched = true; urls.push_back(url); }; return matched; } bool EMIESClient::sstat(std::list& activitycreation, std::list& activitymanagememt, std::list& activityinfo, std::list& resourceinfo, std::list& delegation) { activitycreation.clear(); activitymanagememt.clear(); activityinfo.clear(); resourceinfo.clear(); delegation.clear(); XMLNode info; if(!sstat(info)) return false; XMLNode service = info["ComputingService"]; for(;(bool)service;++service) { bool service_matched = false; XMLNode endpoint = service["ComputingEndpoint"]; for(;(bool)endpoint;++endpoint) { XMLNode name = endpoint["InterfaceName"]; for(;(bool)name;++name) { std::string iname = (std::string)name; if(iname == "org.ogf.glue.emies.activitycreation") { add_urls(activitycreation,endpoint["URL"],URL()); } else if(iname == "org.ogf.glue.emies.activitymanagememt") { add_urls(activitymanagememt,endpoint["URL"],URL()); } else if(iname == "org.ogf.glue.emies.activityinfo") { add_urls(activityinfo,endpoint["URL"],URL()); } else if(iname == "org.ogf.glue.emies.resourceinfo") { if(add_urls(resourceinfo,endpoint["URL"],rurl)) { service_matched = true; }; } else if(iname == "org.ogf.glue.emies.delegation") { add_urls(delegation,endpoint["URL"],URL()); }; }; }; if(service_matched) return true; activitycreation.clear(); activitymanagememt.clear(); activityinfo.clear(); resourceinfo.clear(); delegation.clear(); }; return false; } bool EMIESClient::sstat(XMLNode& response, bool nsapply) { /* esrinfo:GetResourceInfo esrinfo:GetResourceInfoResponse esrinfo:Services glue:ComputingService glue:Service */ std::string action = "GetResourceInfo"; logger.msg(VERBOSE, "Creating and sending service information request to %s", rurl.str()); PayloadSOAP req(ns); XMLNode op = req.NewChild("esrinfo:" + action); XMLNode res; if (!process(req, res)) return false; if(nsapply) res.Namespaces(ns); XMLNode services = res["Services"]; if(!services) { lfailure = "Missing Services in response"; return false; } services.Move(response); //XMLNode service = services["ComputingService"]; //if(!service) { // lfailure = "Missing ComputingService in response"; // return false; //} //XMLNode manager = services["Service"]; //if(!manager) { // lfailure = "Missing Service in response"; // return false; //} // Converting elements to glue2 namespace so it canbe used by any glue2 parser /* std::string prefix; for(int n = 0;;++n) { XMLNode c = service.Child(n); if((c.Prefix() == "glue2") || (c.Prefix() == "glue2pre") || (c.Prefix() == "glue2d")) { prefix=c.Prefix(); break; }; }; if(prefix.empty()) for(int n = 0;;++n) { XMLNode c = manager.Child(n); if((c.Prefix() == "glue2") || (c.Prefix() == "glue2pre") || (c.Prefix() == "glue2d")) { prefix=c.Prefix(); break; }; }; if(prefix.empty()) prefix="glue2"; service.Name(prefix+":ComputingService"); manager.Name(prefix+":ActivityManager"); */ return true; } bool EMIESClient::squery(const std::string& query, XMLNodeContainer& response, bool nsapply) { /* esrinfo:QueryResourceInfo esrinfo:QueryDialect esrinfo:QueryExpression esrinfo:QueryResourceInfoResponse esrinfo:QueryResourceInfoItem */ std::string action = "QueryResourceInfo"; logger.msg(VERBOSE, "Creating and sending service information query request to %s", rurl.str()); PayloadSOAP req(ns); XMLNode op = req.NewChild("esrinfo:" + action); op.NewChild("esrinfo:QueryDialect") = "XPATH 1.0"; XMLNode exp = op.NewChild("esrinfo:QueryExpression") = query; XMLNode res; if (!process(req, res)) { if(!soapfault) return false; // If service does not like how expression is presented, try another way if(!client) { if(!reconnect()) return false; } exp = ""; exp.NewChild("query") = query; if (!process(req, res)) { return false; } } if(nsapply) res.Namespaces(ns); XMLNode item = res["QueryResourceInfoItem"]; for(;item;++item) { response.AddNew(item); } return true; } bool EMIESClient::kill(const EMIESJob& job) { /* esmanag:CancelActivity estypes:ActivityID esmanag:CancelActivityResponse esmanag:CancelActivityResponseItem estypes:ActivityID esmang:EstimatedTime (xsd:unsignedLong) or estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault */ std::string action = "CancelActivity"; logger.msg(VERBOSE, "Creating and sending job clean request to %s", rurl.str()); return dosimple(action,job.id); } bool EMIESClient::clean(const EMIESJob& job) { /* esmanag:WipeActivity estypes:ActivityID esmanag:WipeActivityResponse esmanag:WipeActivityResponseItem estypes:ActivityID esmang:EstimatedTime (xsd:unsignedLong) or estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault */ std::string action = "WipeActivity"; logger.msg(VERBOSE, "Creating and sending job clean request to %s", rurl.str()); return dosimple(action,job.id); } bool EMIESClient::suspend(const EMIESJob& job) { /* esmanag:PauseActivity estypes:ActivityID esmanag:PauseActivityResponse esmanag:PauseActivityResponseItem estypes:ActivityID esmang:EstimatedTime (xsd:unsignedLong) or estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault */ std::string action = "PauseActivity"; logger.msg(VERBOSE, "Creating and sending job suspend request to %s", rurl.str()); return dosimple(action,job.id); } bool EMIESClient::resume(const EMIESJob& job) { /* esmanag:ResumeActivity estypes:ActivityID esmanag:ResumeActivityResponse esmanag:ResumeActivityResponseItem estypes:ActivityID esmang:EstimatedTime (xsd:unsignedLong) or estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault */ std::string action = "ResumeActivity"; logger.msg(VERBOSE, "Creating and sending job resume request to %s", rurl.str()); return dosimple(action,job.id); } bool EMIESClient::restart(const EMIESJob& job) { /* esmanag:RestartActivity estypes:ActivityID esmanag:RestartActivityResponse esmanag:RestartActivityResponseItem estypes:ActivityID esmang:EstimatedTime (xsd:unsignedLong) or estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault ActivityNotFoundFault AccessControlFault */ std::string action = "RestartActivity"; logger.msg(VERBOSE, "Creating and sending job restart request to %s", rurl.str()); return dosimple(action,job.id); } bool EMIESClient::dosimple(const std::string& action, const std::string& id) { PayloadSOAP req(ns); XMLNode op = req.NewChild("esmanag:" + action); op.NewChild("estypes:ActivityID") = id; // Send request XMLNode response; if (!process(req, response)) return false; response.Namespaces(ns); XMLNode item = response[action+"ResponseItem"]; if(!item) { lfailure = "Response does not contain "+action+"ResponseItem"; return false; }; if((std::string)item["ActivityID"] != id) { lfailure = "Response contains wrong or not ActivityID"; return false; }; EMIESFault fault; fault = item; if(fault) { lfailure = "Service responded with fault: "+fault.message+" - "+fault.description; return false; }; if((bool)item["EstimatedTime"]) { // time till operation is complete // TODO: do something for non-0 time. Maybe pull status. } return true; } bool EMIESClient::notify(const EMIESJob& job) { /* esmanag:NotifyService esmanag:NotifyRequestItem estypes:ActivityID esmanag:NotifyMessage client-datapull-done client-datapush-done esmanag:NotifyServiceResponse esmang:NotifyResponseItem" estypes:ActivityID Acknowledgement or estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault InternalNotificationFault ActivityNotFoundFault AccessControlFault */ std::string action = "NotifyService"; logger.msg(VERBOSE, "Creating and sending job notify request to %s", rurl.str()); PayloadSOAP req(ns); XMLNode op = req.NewChild("esmanag:" + action); XMLNode ritem = op.NewChild("esmanag:NotifyRequestItem"); ritem.NewChild("estypes:ActivityID") = job.id; ritem.NewChild("esmanag:NotifyMessage") = "client-datapush-done"; // Send request XMLNode response; if (!process(req, response)) return false; response.Namespaces(ns); XMLNode item = response["NotifyResponseItem"]; if(!item) { lfailure = "Response does not contain NotifyResponseItem"; return false; }; if((std::string)item["ActivityID"] != job.id) { lfailure = "Response contains wrong or not ActivityID"; return false; }; EMIESFault fault; fault = item; if(fault) { lfailure = "Service responded with fault: "+fault.message+" - "+fault.description; return false; }; //if(!item["Acknowledgement"]) { // lfailure = "Response does not contain Acknowledgement"; // return false; //}; return true; } bool EMIESClient::notify(const std::list jobs, std::list& responses) { /* esmanag:NotifyService esmanag:NotifyRequestItem estypes:ActivityID esmanag:NotifyMessage client-datapull-done client-datapush-done esmanag:NotifyServiceResponse esmang:NotifyResponseItem" estypes:ActivityID Acknowledgement or estypes:InternalBaseFault OperationNotPossibleFault OperationNotAllowedFault InternalNotificationFault ActivityNotFoundFault AccessControlFault */ std::string action = "NotifyService"; logger.msg(VERBOSE, "Creating and sending notify request to %s", rurl.str()); bool retval = true; int limit = 1000000; // 1 M - Safety std::list::const_iterator itRequested = jobs.begin(), itLastProcessedEnd = jobs.begin(); while (itRequested != jobs.end() && limit > 0) { PayloadSOAP req(ns); XMLNode op = req.NewChild("esmanag:" + action); for (int i = 0; itRequested != jobs.end() && i < limit; ++itRequested, ++i) { XMLNode ritem = op.NewChild("esmanag:NotifyRequestItem"); ritem.NewChild("estypes:ActivityID") = (**itRequested).id; ritem.NewChild("esmanag:NotifyMessage") = "client-datapush-done"; } // Send request XMLNode xmlResponse; if (!process(req, xmlResponse)) { if (EMIESFault::isEMIESFault(xmlResponse)) { EMIESFault *f = new EMIESFault(); *f = xmlResponse; if (f->type == "VectorLimitExceededFault") { if (f->limit < limit) { logger.msg(VERBOSE, "New limit for vector queries returned by EMI ES service: %d", f->limit); itRequested = itLastProcessedEnd; limit = f->limit; delete f; continue; } // Bail out if response is a limit higher than the current. logger.msg(DEBUG, "Error: Service returned a limit higher or equal to current limit (current: %d; returned: %d)", limit, f->limit); delete f; responses.push_back(new UnexpectedError("Service returned a limit higher or equal to current limit")); return false; } responses.push_back(f); return false; } responses.push_back(new UnexpectedError(lfailure)); return false; } xmlResponse.Namespaces(ns); for (XMLNode n = xmlResponse["NotifyResponseItem"]; (bool)n; ++n) { if(!(bool)n["ActivityID"]) { responses.push_back(new UnexpectedError("NotifyResponseItem element contained no ActivityID element")); retval = false; continue; }; if(EMIESFault::isEMIESFault(n)) { EMIESFault *fault = new EMIESFault; *fault = n; responses.push_back(fault); retval = false; continue; }; responses.push_back(new EMIESAcknowledgement((std::string)n["ActivityID"])); // TODO: Use of Acknowledgement element is unclear from current specification (v1.16). //if(!item["Acknowledgement"]) { // lfailure = "Response does not contain Acknowledgement"; // return false; //}; } itLastProcessedEnd = itRequested; } return retval; } bool EMIESClient::list(std::list& jobs) { /* esainfo:ListActivities esainfo:FromDate (xsd:dateTime) 0-1 esainfo:ToDate (xsd:dateTime) 0-1 esaonfo:Limit 0-1 esainfo:ActivityStatus esainfo:Status 0- esainfo:Attribute 0- esainfo:ListActivitiesResponse esmain:ActivityID 0- truncated (attribute) - false InvalidTimeIntervalFault AccessControlFault InternalBaseFault */ std::string action = "ListActivities"; logger.msg(VERBOSE, "Creating and sending job list request to %s", rurl.str()); PayloadSOAP req(ns); XMLNode op = req.NewChild("esainfo:" + action); // Send request XMLNode response; if (!process(req, response)) return false; response.Namespaces(ns); XMLNode id = response["ActivityID"]; for(;(bool)id;++id) { EMIESJob job; job.id = (std::string)id; jobs.push_back(job); } return true; } EMIESJobState& EMIESJobState::operator=(const std::string& st) { // From GLUE2 states //state.clear(); //attributes.clear(); //timestamp = Time(); //description.clear(); if(::strncmp("emies:",st.c_str(),6) == 0) { state = st.substr(6); } else if(::strncmp("emiesattr:",st.c_str(),10) == 0) { attributes.push_back(st.substr(10)); } return *this; } EMIESJobState& EMIESJobState::operator=(XMLNode st) { /* estypes:ActivityStatus estypes:Status accepted preprocessing processing processing-accepting processing-queued processing-running postprocessing terminal estypes:Attribute validating server-paused client-paused client-stagein-possible client-stageout-possible provisioning deprovisioning server-stagein server-stageout batch-suspend app-running preprocessing-cancel processing-cancel postprocessing-cancel validation-failure preprocessing-failure processing-failure postprocessing-failure app-failure expired estypes:Timestamp (xsd:dateTime) estypes:Description */ state.clear(); attributes.clear(); timestamp = Time(); description.clear(); if(st.Name() == "ActivityStatus") { state = (std::string)st["Status"]; if(!state.empty()) { XMLNode attr = st["Attribute"]; for(;(bool)attr;++attr) { attributes.push_back((std::string)attr); } if((bool)st["Timestamp"]) timestamp = (std::string)st["Timestamp"]; description = (std::string)st["Description"]; } } return *this; } std::string EMIESJobState::ToXML(void) const { XMLNode xml(""); xml.NewChild("Status") = state; for(std::list::const_iterator attr = attributes.begin(); attr != attributes.end();++attr) { xml.NewChild("Attribute") = *attr; }; std::string str; xml.GetXML(str); return str; } bool EMIESJobState::operator!(void) { return state.empty(); } EMIESJobState::operator bool(void) { return !(state.empty()); } bool EMIESJobState::HasAttribute(const std::string& attr) const { for(std::list::const_iterator a = attributes.begin(); a != attributes.end();++a) { if(attr == *a) return true; } return false; } EMIESJob& EMIESJob::operator=(XMLNode job) { /* estypes:ActivityID estypes:ActivityMgmtEndpointURL estypes:ResourceInfoEndpointURL escreate:StageInDirectory escreate:SessionDirectory escreate:StageOutDirectory */ stagein.clear(); session.clear(); stageout.clear(); delegation_id.clear(); id = (std::string)job["ActivityID"]; manager = (std::string)job["ActivityMgmtEndpointURL"]; resource = (std::string)job["ResourceInfoEndpointURL"]; state = job["ActivityStatus"]; for(XMLNode u = job["StageInDirectory"]["URL"];(bool)u;++u) stagein.push_back((std::string)u); for(XMLNode u = job["SessionDirectory"]["URL"];(bool)u;++u) session.push_back((std::string)u); for(XMLNode u = job["StageOutDirectory"]["URL"];(bool)u;++u) stageout.push_back((std::string)u); return *this; } EMIESJob& EMIESJob::operator=(const Job& job) { stagein.clear(); session.clear(); stageout.clear(); if (job.StageInDir) stagein.push_back(job.StageInDir); if (job.StageOutDir) stageout.push_back(job.StageOutDir); if (job.SessionDir) session.push_back(job.SessionDir); id = getIDFromJob(job); manager = job.JobManagementURL; resource = job.ServiceInformationURL; delegation_id = job.DelegationID.empty()?std::string(""):*job.DelegationID.begin(); // State information is not transfered from Job object. Currently not needed. return *this; } std::string EMIESJob::getIDFromJob(const Job& job) { XMLNode IDFromEndpointXML(job.IDFromEndpoint); if (IDFromEndpointXML) { return (std::string)IDFromEndpointXML["ReferenceParameters"]["CustomID"]; } return job.IDFromEndpoint; } std::string EMIESJob::getIDFromJob(const Job* job) { if (!job) return ""; return getIDFromJob(*job); } std::string EMIESJob::getIDFromJob(const EMIESJob& job) { return job.id; } void EMIESJob::toJob(Job& j) const { // Proposed mandatory attributes for ARC 3.0 j.JobID = manager.str() + "/" + id; j.ServiceInformationURL = resource; j.ServiceInformationInterfaceName = "org.ogf.glue.emies.resourceinfo"; j.JobStatusURL = manager; j.JobStatusInterfaceName = "org.ogf.glue.emies.activitymanagement"; j.JobManagementURL = manager; j.JobManagementInterfaceName = "org.ogf.glue.emies.activitymanagement"; j.IDFromEndpoint = id; if (!stagein.empty()) j.StageInDir = stagein.front(); if (!stageout.empty()) j.StageInDir = stageout.front(); if (!session.empty()) j.StageInDir = session.front(); j.DelegationID.clear(); if(!delegation_id.empty()) j.DelegationID.push_back(delegation_id); } std::string EMIESJob::toXML() const { /* estypes:ActivityID estypes:ActivityMgmtEndpointURL estypes:ResourceInfoEndpointURL escreate:StageInDirectory escreate:SessionDirectory escreate:StageOutDirectory */ // TODO: Add namespace; Currently it is not needed because // this XML used only internally. XMLNode item(""); item.NewChild("ActivityID") = id; item.NewChild("ActivityMgmtEndpointURL") = manager.fullstr(); item.NewChild("ResourceInfoEndpointURL") = resource.fullstr(); if(!stagein.empty()) { XMLNode si = item.NewChild("StageInDirectory"); for(std::list::const_iterator s = stagein.begin();s!=stagein.end();++s) { si.NewChild("URL") = s->fullstr(); } } if(!session.empty()) { XMLNode si = item.NewChild("SessionDirectory"); for(std::list::const_iterator s = session.begin();s!=session.end();++s) { si.NewChild("URL") = s->fullstr(); } } if(!stageout.empty()) { XMLNode si = item.NewChild("StageOutDirectory"); for(std::list::const_iterator s = stageout.begin();s!=stageout.end();++s) { si.NewChild("URL") = s->fullstr(); } } std::string str; item.GetXML(str); return str; } bool EMIESJob::operator!(void) { return id.empty() || !state; } EMIESJob::operator bool(void) { return !id.empty() && (bool)state; } void EMIESJobInfo::toJob(Job& j) const { XMLNode aid = jobInfo["ActivityInfoDocument"]; // Processing generic GLUE2 information j.SetFromXML(aid); // Looking for EMI ES specific state XMLNode state = aid["State"]; EMIESJobState st; for(;(bool)state;++state) st = (std::string)state; if(st) j.State = JobStateEMIES(st); EMIESJobState rst; XMLNode rstate = aid["RestartState"]; for(;(bool)rstate;++rstate) rst = (std::string)rstate; j.RestartState = JobStateEMIES(rst); if (aid["esainfo:StageInDirectory"]) { j.StageInDir = (std::string)aid["esainfo:StageInDirectory"]; } if (aid["esainfo:StageOutDirectory"]) { j.StageOutDir = (std::string)aid["esainfo:StageOutDirectory"]; } if (aid["esainfo:SessionDirectory"]) { j.SessionDir = (std::string)aid["esainfo:SessionDirectory"]; } if (aid["Extensions"]) { for(XMLNode ext = aid["Extensions"]["Extension"];(bool)ext;++ext) { if (ext["LocalID"] == "urn:delegid:nordugrid.org") { j.DelegationID.push_back((std::string)ext["Value"]); } } } // Making EMI ES specific job id // URL-izing job id j.JobID = j.JobManagementURL.str() + "/" + (std::string)jobInfo["ActivityID"]; // TODO: Optimize? } std::string EMIESJobInfo::getSubmittedVia() const { for (XMLNode n = jobInfo["ActivityInfoDocument"]["OtherInfo"]; (bool)n; ++n) { const std::string submittedvia = "SubmittedVia="; if (((std::string)n).substr(0, submittedvia.length()) == submittedvia) { return ((std::string)n).substr(submittedvia.length()); } } return ""; } bool EMIESFault::isEMIESFault(XMLNode item) { std::string name; return isEMIESFault(item, name); } bool EMIESFault::isEMIESFault(XMLNode item, std::string& name) { XMLNode fault; if((fault = item["InternalBaseFault"]) || (fault = item["VectorLimitExceededFault"]) || (fault = item["AccessControlFault"]) || (fault = item["InvalidActivityDescriptionFault"]) || (fault = item["InvalidActivityDescriptionSemanticFault"]) || (fault = item["UnsupportedCapabilityFault"]) || (fault = item["ActivityNotFoundFault"]) || (fault = item["UnableToRetrieveStatusFault"]) || (fault = item["OperationNotPossibleFault"]) || (fault = item["OperationNotAllowedFault"]) || (fault = item["UnknownAttributeFault"]) || (fault = item["InternalNotificationFault"]) || (fault = item["InvalidActivityStateFault"]) || (fault = item["InvalidParameterFault"]) || (fault = item["NotSupportedQueryDialectFault"]) || (fault = item["NotValidQueryStatementFault"]) || (fault = item["UnknownQueryFault"]) || (fault = item["InternalResourceInfoFault"]) || (fault = item["ResourceInfoNotFoundFault"])) { name = fault.Name(); return true; } return false; } EMIESFault& EMIESFault::operator=(XMLNode item) { type = ""; message = ""; description = ""; activityID = ""; timestamp = Time(0); code = 0; limit = 0; if (isEMIESFault(item, type)) { XMLNode fault = item[type]; description = (std::string)fault["Description"]; message = (std::string)fault["Message"]; if((bool)fault["FailureCode"]) strtoint((std::string)fault["FailureCode"],code); if((bool)fault["Timestamp"]) timestamp = (std::string)fault["Timestamp"]; if((bool)item["ActivityID"]) activityID = (std::string)item["ActivityID"]; // ActivityID is located at same level as fault. if(type == "VectorLimitExceededFault" && ((!fault["ServerLimit"]) || (!stringto((std::string)fault["ServerLimit"], limit)))) { type = "MalformedFaultError"; if (!message.empty()) { message = " [Original message: " + message + "]"; } message = "ServerLimit element of VectorLimitExceededFault is malformed: ServerLimit is \"" + (std::string)fault["ServerLimit"] + "\"." + message; } } return *this; } bool EMIESFault::operator!(void) { return type.empty(); } EMIESFault::operator bool(void) { return !type.empty(); } // ----------------------------------------------------------------------------- // TODO: does it need locking? EMIESClients::EMIESClients(const UserConfig& usercfg):usercfg_(&usercfg) { } EMIESClients::~EMIESClients(void) { while(true) { std::multimap::iterator it = clients_.begin(); if(it == clients_.end()) break; delete it->second; clients_.erase(it); } } EMIESClient* EMIESClients::acquire(const URL& url) { std::multimap::iterator it = clients_.find(url); if ( it != clients_.end() ) { // If EMIESClient is already existing for the // given URL then return with that EMIESClient* client = it->second; clients_.erase(it); return client; } // Else create a new one and return with that MCCConfig cfg; if(usercfg_) usercfg_->ApplyToConfig(cfg); EMIESClient* client = new EMIESClient(url, cfg, usercfg_?usercfg_->Timeout():0); return client; } void EMIESClients::release(EMIESClient* client) { if(!client) return; if(!*client) { delete client; return; } // TODO: maybe strip path from URL? clients_.insert(std::pair(client->url(),client)); } void EMIESClients::SetUserConfig(const UserConfig& uc) { // Changing user configuration may change identity. // Hence all open connections become invalid. usercfg_ = &uc; while(true) { std::multimap::iterator it = clients_.begin(); if(it == clients_.end()) break; delete it->second; clients_.erase(it); } } } nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315722022157 xustar000000000000000030 mtime=1513200594.211942872 30 atime=1513200648.758609999 29 ctime=1513200660.19574988 nordugrid-arc-5.4.2/src/hed/acc/EMIES/Makefile.in0000644000175000002070000014667113214315722022245 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = arcemiestest$(EXEEXT) subdir = src/hed/acc/EMIES DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arcemiestest.1.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arcemiestest.1 CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(bindir)" \ "$(DESTDIR)$(man1dir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccEMIES_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libaccEMIES_la_OBJECTS = libaccEMIES_la-EMIESClient.lo \ libaccEMIES_la-JobStateEMIES.lo \ libaccEMIES_la-SubmitterPluginEMIES.lo \ libaccEMIES_la-JobControllerPluginEMIES.lo \ libaccEMIES_la-JobListRetrieverPluginEMIES.lo \ libaccEMIES_la-TargetInformationRetrieverPluginEMIES.lo \ libaccEMIES_la-DescriptorsEMIES.lo libaccEMIES_la_OBJECTS = $(am_libaccEMIES_la_OBJECTS) libaccEMIES_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) \ $(libaccEMIES_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(bin_PROGRAMS) am_arcemiestest_OBJECTS = arcemiestest-TestEMIESClient.$(OBJEXT) \ arcemiestest-EMIESClient.$(OBJEXT) \ arcemiestest-JobStateEMIES.$(OBJEXT) arcemiestest_OBJECTS = $(am_arcemiestest_OBJECTS) arcemiestest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcemiestest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcemiestest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccEMIES_la_SOURCES) $(arcemiestest_SOURCES) DIST_SOURCES = $(libaccEMIES_la_SOURCES) $(arcemiestest_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema pkglib_LTLIBRARIES = libaccEMIES.la man_MANS = arcemiestest.1 libaccEMIES_la_SOURCES = EMIESClient.cpp EMIESClient.h \ JobStateEMIES.cpp JobStateEMIES.h \ SubmitterPluginEMIES.cpp SubmitterPluginEMIES.h \ JobControllerPluginEMIES.cpp JobControllerPluginEMIES.h \ JobListRetrieverPluginEMIES.cpp JobListRetrieverPluginEMIES.h \ TargetInformationRetrieverPluginEMIES.cpp TargetInformationRetrieverPluginEMIES.h \ DescriptorsEMIES.cpp libaccEMIES_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccEMIES_la_LIBADD = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la\ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccEMIES_la_LDFLAGS = -no-undefined -avoid-version -module # $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ # $(top_builddir)/src/hed/libs/data/libarcdata.la \ # $(top_builddir)/src/hed/libs/ws/libarcws.la \ # $(top_builddir)/src/hed/libs/loader/libarcloader.la \ # arcemiestest_SOURCES = TestEMIESClient.cpp EMIESClient.cpp JobStateEMIES.cpp arcemiestest_CXXFLAGS = -I$(top_srcdir)/include $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) arcemiestest_LDADD = \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/EMIES/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/EMIES/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arcemiestest.1: $(top_builddir)/config.status $(srcdir)/arcemiestest.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccEMIES.la: $(libaccEMIES_la_OBJECTS) $(libaccEMIES_la_DEPENDENCIES) $(libaccEMIES_la_LINK) -rpath $(pkglibdir) $(libaccEMIES_la_OBJECTS) $(libaccEMIES_la_LIBADD) $(LIBS) install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arcemiestest$(EXEEXT): $(arcemiestest_OBJECTS) $(arcemiestest_DEPENDENCIES) @rm -f arcemiestest$(EXEEXT) $(arcemiestest_LINK) $(arcemiestest_OBJECTS) $(arcemiestest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcemiestest-EMIESClient.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcemiestest-JobStateEMIES.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcemiestest-TestEMIESClient.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccEMIES_la-DescriptorsEMIES.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccEMIES_la-EMIESClient.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccEMIES_la-JobControllerPluginEMIES.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccEMIES_la-JobListRetrieverPluginEMIES.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccEMIES_la-JobStateEMIES.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccEMIES_la-SubmitterPluginEMIES.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccEMIES_la-TargetInformationRetrieverPluginEMIES.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccEMIES_la-EMIESClient.lo: EMIESClient.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -MT libaccEMIES_la-EMIESClient.lo -MD -MP -MF $(DEPDIR)/libaccEMIES_la-EMIESClient.Tpo -c -o libaccEMIES_la-EMIESClient.lo `test -f 'EMIESClient.cpp' || echo '$(srcdir)/'`EMIESClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccEMIES_la-EMIESClient.Tpo $(DEPDIR)/libaccEMIES_la-EMIESClient.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EMIESClient.cpp' object='libaccEMIES_la-EMIESClient.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccEMIES_la-EMIESClient.lo `test -f 'EMIESClient.cpp' || echo '$(srcdir)/'`EMIESClient.cpp libaccEMIES_la-JobStateEMIES.lo: JobStateEMIES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -MT libaccEMIES_la-JobStateEMIES.lo -MD -MP -MF $(DEPDIR)/libaccEMIES_la-JobStateEMIES.Tpo -c -o libaccEMIES_la-JobStateEMIES.lo `test -f 'JobStateEMIES.cpp' || echo '$(srcdir)/'`JobStateEMIES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccEMIES_la-JobStateEMIES.Tpo $(DEPDIR)/libaccEMIES_la-JobStateEMIES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateEMIES.cpp' object='libaccEMIES_la-JobStateEMIES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccEMIES_la-JobStateEMIES.lo `test -f 'JobStateEMIES.cpp' || echo '$(srcdir)/'`JobStateEMIES.cpp libaccEMIES_la-SubmitterPluginEMIES.lo: SubmitterPluginEMIES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -MT libaccEMIES_la-SubmitterPluginEMIES.lo -MD -MP -MF $(DEPDIR)/libaccEMIES_la-SubmitterPluginEMIES.Tpo -c -o libaccEMIES_la-SubmitterPluginEMIES.lo `test -f 'SubmitterPluginEMIES.cpp' || echo '$(srcdir)/'`SubmitterPluginEMIES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccEMIES_la-SubmitterPluginEMIES.Tpo $(DEPDIR)/libaccEMIES_la-SubmitterPluginEMIES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPluginEMIES.cpp' object='libaccEMIES_la-SubmitterPluginEMIES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccEMIES_la-SubmitterPluginEMIES.lo `test -f 'SubmitterPluginEMIES.cpp' || echo '$(srcdir)/'`SubmitterPluginEMIES.cpp libaccEMIES_la-JobControllerPluginEMIES.lo: JobControllerPluginEMIES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -MT libaccEMIES_la-JobControllerPluginEMIES.lo -MD -MP -MF $(DEPDIR)/libaccEMIES_la-JobControllerPluginEMIES.Tpo -c -o libaccEMIES_la-JobControllerPluginEMIES.lo `test -f 'JobControllerPluginEMIES.cpp' || echo '$(srcdir)/'`JobControllerPluginEMIES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccEMIES_la-JobControllerPluginEMIES.Tpo $(DEPDIR)/libaccEMIES_la-JobControllerPluginEMIES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginEMIES.cpp' object='libaccEMIES_la-JobControllerPluginEMIES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccEMIES_la-JobControllerPluginEMIES.lo `test -f 'JobControllerPluginEMIES.cpp' || echo '$(srcdir)/'`JobControllerPluginEMIES.cpp libaccEMIES_la-JobListRetrieverPluginEMIES.lo: JobListRetrieverPluginEMIES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -MT libaccEMIES_la-JobListRetrieverPluginEMIES.lo -MD -MP -MF $(DEPDIR)/libaccEMIES_la-JobListRetrieverPluginEMIES.Tpo -c -o libaccEMIES_la-JobListRetrieverPluginEMIES.lo `test -f 'JobListRetrieverPluginEMIES.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginEMIES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccEMIES_la-JobListRetrieverPluginEMIES.Tpo $(DEPDIR)/libaccEMIES_la-JobListRetrieverPluginEMIES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverPluginEMIES.cpp' object='libaccEMIES_la-JobListRetrieverPluginEMIES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccEMIES_la-JobListRetrieverPluginEMIES.lo `test -f 'JobListRetrieverPluginEMIES.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginEMIES.cpp libaccEMIES_la-TargetInformationRetrieverPluginEMIES.lo: TargetInformationRetrieverPluginEMIES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -MT libaccEMIES_la-TargetInformationRetrieverPluginEMIES.lo -MD -MP -MF $(DEPDIR)/libaccEMIES_la-TargetInformationRetrieverPluginEMIES.Tpo -c -o libaccEMIES_la-TargetInformationRetrieverPluginEMIES.lo `test -f 'TargetInformationRetrieverPluginEMIES.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginEMIES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccEMIES_la-TargetInformationRetrieverPluginEMIES.Tpo $(DEPDIR)/libaccEMIES_la-TargetInformationRetrieverPluginEMIES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverPluginEMIES.cpp' object='libaccEMIES_la-TargetInformationRetrieverPluginEMIES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccEMIES_la-TargetInformationRetrieverPluginEMIES.lo `test -f 'TargetInformationRetrieverPluginEMIES.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginEMIES.cpp libaccEMIES_la-DescriptorsEMIES.lo: DescriptorsEMIES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -MT libaccEMIES_la-DescriptorsEMIES.lo -MD -MP -MF $(DEPDIR)/libaccEMIES_la-DescriptorsEMIES.Tpo -c -o libaccEMIES_la-DescriptorsEMIES.lo `test -f 'DescriptorsEMIES.cpp' || echo '$(srcdir)/'`DescriptorsEMIES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccEMIES_la-DescriptorsEMIES.Tpo $(DEPDIR)/libaccEMIES_la-DescriptorsEMIES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DescriptorsEMIES.cpp' object='libaccEMIES_la-DescriptorsEMIES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccEMIES_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccEMIES_la-DescriptorsEMIES.lo `test -f 'DescriptorsEMIES.cpp' || echo '$(srcdir)/'`DescriptorsEMIES.cpp arcemiestest-TestEMIESClient.o: TestEMIESClient.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -MT arcemiestest-TestEMIESClient.o -MD -MP -MF $(DEPDIR)/arcemiestest-TestEMIESClient.Tpo -c -o arcemiestest-TestEMIESClient.o `test -f 'TestEMIESClient.cpp' || echo '$(srcdir)/'`TestEMIESClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcemiestest-TestEMIESClient.Tpo $(DEPDIR)/arcemiestest-TestEMIESClient.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TestEMIESClient.cpp' object='arcemiestest-TestEMIESClient.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -c -o arcemiestest-TestEMIESClient.o `test -f 'TestEMIESClient.cpp' || echo '$(srcdir)/'`TestEMIESClient.cpp arcemiestest-TestEMIESClient.obj: TestEMIESClient.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -MT arcemiestest-TestEMIESClient.obj -MD -MP -MF $(DEPDIR)/arcemiestest-TestEMIESClient.Tpo -c -o arcemiestest-TestEMIESClient.obj `if test -f 'TestEMIESClient.cpp'; then $(CYGPATH_W) 'TestEMIESClient.cpp'; else $(CYGPATH_W) '$(srcdir)/TestEMIESClient.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcemiestest-TestEMIESClient.Tpo $(DEPDIR)/arcemiestest-TestEMIESClient.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TestEMIESClient.cpp' object='arcemiestest-TestEMIESClient.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -c -o arcemiestest-TestEMIESClient.obj `if test -f 'TestEMIESClient.cpp'; then $(CYGPATH_W) 'TestEMIESClient.cpp'; else $(CYGPATH_W) '$(srcdir)/TestEMIESClient.cpp'; fi` arcemiestest-EMIESClient.o: EMIESClient.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -MT arcemiestest-EMIESClient.o -MD -MP -MF $(DEPDIR)/arcemiestest-EMIESClient.Tpo -c -o arcemiestest-EMIESClient.o `test -f 'EMIESClient.cpp' || echo '$(srcdir)/'`EMIESClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcemiestest-EMIESClient.Tpo $(DEPDIR)/arcemiestest-EMIESClient.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EMIESClient.cpp' object='arcemiestest-EMIESClient.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -c -o arcemiestest-EMIESClient.o `test -f 'EMIESClient.cpp' || echo '$(srcdir)/'`EMIESClient.cpp arcemiestest-EMIESClient.obj: EMIESClient.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -MT arcemiestest-EMIESClient.obj -MD -MP -MF $(DEPDIR)/arcemiestest-EMIESClient.Tpo -c -o arcemiestest-EMIESClient.obj `if test -f 'EMIESClient.cpp'; then $(CYGPATH_W) 'EMIESClient.cpp'; else $(CYGPATH_W) '$(srcdir)/EMIESClient.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcemiestest-EMIESClient.Tpo $(DEPDIR)/arcemiestest-EMIESClient.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='EMIESClient.cpp' object='arcemiestest-EMIESClient.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -c -o arcemiestest-EMIESClient.obj `if test -f 'EMIESClient.cpp'; then $(CYGPATH_W) 'EMIESClient.cpp'; else $(CYGPATH_W) '$(srcdir)/EMIESClient.cpp'; fi` arcemiestest-JobStateEMIES.o: JobStateEMIES.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -MT arcemiestest-JobStateEMIES.o -MD -MP -MF $(DEPDIR)/arcemiestest-JobStateEMIES.Tpo -c -o arcemiestest-JobStateEMIES.o `test -f 'JobStateEMIES.cpp' || echo '$(srcdir)/'`JobStateEMIES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcemiestest-JobStateEMIES.Tpo $(DEPDIR)/arcemiestest-JobStateEMIES.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateEMIES.cpp' object='arcemiestest-JobStateEMIES.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -c -o arcemiestest-JobStateEMIES.o `test -f 'JobStateEMIES.cpp' || echo '$(srcdir)/'`JobStateEMIES.cpp arcemiestest-JobStateEMIES.obj: JobStateEMIES.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -MT arcemiestest-JobStateEMIES.obj -MD -MP -MF $(DEPDIR)/arcemiestest-JobStateEMIES.Tpo -c -o arcemiestest-JobStateEMIES.obj `if test -f 'JobStateEMIES.cpp'; then $(CYGPATH_W) 'JobStateEMIES.cpp'; else $(CYGPATH_W) '$(srcdir)/JobStateEMIES.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arcemiestest-JobStateEMIES.Tpo $(DEPDIR)/arcemiestest-JobStateEMIES.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateEMIES.cpp' object='arcemiestest-JobStateEMIES.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcemiestest_CXXFLAGS) $(CXXFLAGS) -c -o arcemiestest-JobStateEMIES.obj `if test -f 'JobStateEMIES.cpp'; then $(CYGPATH_W) 'JobStateEMIES.cpp'; else $(CYGPATH_W) '$(srcdir)/JobStateEMIES.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" @list=''; test -n "$(man1dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; } # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(MANS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-binPROGRAMS clean-generic clean-libtool \ clean-pkglibLTLIBRARIES mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-man install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-binPROGRAMS install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man1 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man \ uninstall-pkglibLTLIBRARIES uninstall-man: uninstall-man1 .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-binPROGRAMS \ clean-generic clean-libtool clean-pkglibLTLIBRARIES ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-binPROGRAMS \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man1 \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-binPROGRAMS uninstall-man uninstall-man1 \ uninstall-pkglibLTLIBRARIES #DIST_SUBDIRS = test #SUBDIRS = $(TEST_DIR) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/EMIESClient.h0000644000000000000000000000012213165644550022275 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.3637 30 ctime=1513200660.197749904 nordugrid-arc-5.4.2/src/hed/acc/EMIES/EMIESClient.h0000644000175000002070000002370213165644550022350 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __EMIES_CLIENT__ #define __EMIES_CLIENT__ #include #include #include #include #include #include #include #include /* #include #include #include */ namespace Arc { #ifdef CPPUNITTEST #define ClientSOAP ClientSOAPTest #define private public #endif class ClientSOAP; class Config; class Logger; class MCCConfig; class PayloadSOAP; class Job; class EMIESJobState { public: /* accepted preprocessing processing processing-accepting processing-queued processing-running postprocessing terminal */ std::string state; /* validating server-paused client-paused client-stagein-possible client-stageout-possible provisioning deprovisioning server-stagein server-stageout batch-suspend app-running preprocessing-cancel processing-cancel postprocessing-cancel validation-failure preprocessing-failure processing-failure postprocessing-failure app-failure expired */ std::list attributes; std::string description; Arc::Time timestamp; EMIESJobState& operator=(XMLNode state); EMIESJobState& operator=(const std::string& state); bool operator!(void); operator bool(void); bool HasAttribute(const std::string& attr) const; std::string ToXML(void) const; }; class EMIESResponse { public: EMIESResponse(const EMIESResponse& r) {} virtual ~EMIESResponse() {} protected: EMIESResponse() {} }; class EMIESJob : public EMIESResponse { public: std::string id; URL manager; URL resource; std::list stagein; std::list session; std::list stageout; EMIESJobState state; std::string delegation_id; EMIESJob& operator=(XMLNode job); EMIESJob& operator=(const Job& job); EMIESJob& operator=(const std::string& s) { XMLNode n(s); return operator=(n); } std::string toXML(void) const; void toJob(Job& j) const; bool operator!(void); operator bool(void); static std::string getIDFromJob(const Job*); static std::string getIDFromJob(const Job&); static std::string getIDFromJob(const EMIESJob&); }; class EMIESJobInfo : public EMIESResponse { public: EMIESJobInfo(XMLNode item) : EMIESResponse() { item.New(jobInfo); } EMIESJobInfo(const EMIESJobInfo& ji) : EMIESResponse() {} void toJob(Job&) const; std::string getActivityID() const { return (std::string)jobInfo["ActivityID"]; } std::string getSubmittedVia() const; private: XMLNode jobInfo; }; class EMIESAcknowledgement : public EMIESResponse { public: EMIESAcknowledgement(const std::string& id) : EMIESResponse(), activityID(id) {} EMIESAcknowledgement(const EMIESAcknowledgement& a) : EMIESResponse(), activityID(a.activityID) {} std::string activityID; }; class EMIESFault : public EMIESResponse { public: EMIESFault() : code(0), limit(-1) {} std::string type; std::string message; std::string description; std::string activityID; Time timestamp; int code; int limit; EMIESFault& operator=(XMLNode item); bool operator!(void); operator bool(void); static bool isEMIESFault(XMLNode item); static bool isEMIESFault(XMLNode item, std::string& name); }; class UnexpectedError : public EMIESResponse { public: UnexpectedError(const std::string& message) : EMIESResponse(), message(message) {} UnexpectedError(const UnexpectedError& ue) : EMIESResponse(), message(ue.message) {} const std::string message; }; //! A client class for the EMI ES service. /*! This class is a client for the EMI ES service (European Middleware Initiative Execution Service). It provides methods for selected set of operations on an EMI ES service: - Job submission - Job status queries - Job termination */ class EMIESClient { public: //! The constructor for the EMIESClient class. /*! This is the constructor for the EMIESClient class. It creates an EMI ES client that corresponds to a specific EMI ES service. @param url The URL of the EMI ES service. @param cfg An MCC configuration object. */ EMIESClient(const URL& url, const MCCConfig& cfg, int timeout); //! The destructor. /*! This is the destructor. It does what destructors usually do, cleans up... */ ~EMIESClient(); operator bool(void) { return (client != NULL); } bool operator!(void) { return (client == NULL); } //! Submit a job. /*! This method submits a job to the EM IES service corresponding to this client instance. It does not do data staging. @param jobdesc A string containing the job description. @param job The container for attributes identidying submitted job. @param state The current state of submitted job. @return true on success */ bool submit(XMLNode jobdesc, EMIESResponse** response, const std::string delegation_id = ""); bool submit(const XMLNodeList& jobdescs, std::list& responses, const std::string delegation_id = ""); //! Query the status of a job. /*! This method queries the EMI ES service about the status of a job. @param job The Job identifier of the job. @param state The state of the job. @return true on success */ bool stat(const EMIESJob& job, XMLNode& state); bool stat(const EMIESJob& job, EMIESJobState& state); bool info(EMIESJob& job, XMLNode &state); bool info(EMIESJob& job, Job& info); template void info(const std::list& jobs, std::list& responses); //! Terminates a job. /*! This method sends a request to the EMI ES service to terminate a job. @param job The Job identifier of the job to terminate. @return true on success */ bool kill(const EMIESJob& job); //! Removes a job. /*! This method sends a request to the EMI ES service to remove a job from it's pool. If job is running it will not be killed by service and service wil retur error. @param jobid The Job identifier of the job to remove. @return true on success */ bool clean(const EMIESJob& job); //! Suspends a job. /*! This method sends a request to the EMI ES service to suspend a job execution if possible. @param jobid The Job identifier of the job to suspend. @return true on success */ bool suspend(const EMIESJob& job); //! Resumes a job. /*! This method sends a request to the EMI ES service to resume a job execution if it was suspended by client request. @param jobid The Job identifier of the job to resume. @return true on success */ bool resume(const EMIESJob& job); //! Resstarts a job. /*! This method sends a request to the EMI ES service to restart processing a job aftr failure if possible. @param jobid The Job identifier of the job to restart. @return true on success */ bool restart(const EMIESJob& job); bool notify(const EMIESJob& job); bool notify(const std::list jobs, std::list& responses); //! Request the status of a service. /*! This method requests the EMI ES service about its status. @param status The XML document representing status of the service. @return true on success */ bool sstat(XMLNode& status, bool nsapply = true); //! Query the status of a service. /*! This method queries the EMI ES service about its status using XPath 1.0. @param status The XML document representing status of the service. @return true on success */ bool squery(const std::string& query, XMLNodeContainer& items, bool nsapply = true); //! Query the endpoints of a service. /*! This method queries the EMI ES service about its avaialble endpoints. @return true on success */ bool sstat(std::list& activitycreation, std::list& activitymanagememt, std::list& activityinfo, std::list& resourceinfo, std::list& delegation); //! List jobs on a service. /*! This method queries the EMI ES service about current list of jobs. @param status The XML document representing status of the service. @return true on success */ bool list(std::list& jobs); ClientSOAP* SOAP(void) { return client; } const URL& url(void) { return rurl; } const std::string& failure(void) { return lfailure; } std::string delegation(const std::string& renew_id = ""); private: bool process(PayloadSOAP& req, XMLNode& response, bool retry = true); void process_with_vector_limit(PayloadSOAP& req, XMLNode& response); std::string dodelegation(const std::string& renew_id); bool reconnect(void); bool dosimple(const std::string& action, const std::string& id); ClientSOAP *client; //! Namespaces. /*! A map containing namespaces. */ NS ns; URL rurl; const MCCConfig cfg; int timeout; std::string lfailure; bool soapfault; //! A logger for the A-REX client. /*! This is a logger to which all logging messages from the EMI ES client are sent. */ static Logger logger; }; class EMIESClients { std::multimap clients_; const UserConfig* usercfg_; public: EMIESClients(const UserConfig& usercfg); ~EMIESClients(void); EMIESClient* acquire(const URL& url); void release(EMIESClient* client); void SetUserConfig(const UserConfig& uc); }; } #endif nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/JobControllerPluginEMIES.cpp0000644000000000000000000000012213165644550025347 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.3697 30 ctime=1513200660.202749966 nordugrid-arc-5.4.2/src/hed/acc/EMIES/JobControllerPluginEMIES.cpp0000644000175000002070000002576613165644550025436 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "EMIESClient.h" #include "JobStateEMIES.h" #include "JobControllerPluginEMIES.h" namespace Arc { Logger JobControllerPluginEMIES::logger(Logger::getRootLogger(), "JobControllerPlugin.EMIES"); void JobControllerPluginEMIES::SetUserConfig(const UserConfig& uc) { JobControllerPlugin::SetUserConfig(uc); clients.SetUserConfig(uc); } bool JobControllerPluginEMIES::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } void JobControllerPluginEMIES::UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { if (jobs.empty()) return; std::map > groupedJobs; if (!isGrouped) { // Group jobs per host. for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { std::map >::iterator entry = groupedJobs.find((**it).JobManagementURL.str()); if (entry == groupedJobs.end()) { groupedJobs.insert(make_pair((**it).JobManagementURL.str(), std::list(1, *it))); } else { entry->second.push_back(*it); } } } else { groupedJobs.insert(make_pair(jobs.front()->JobManagementURL.str(), jobs)); } for (std::map >::iterator it = groupedJobs.begin(); it != groupedJobs.end(); ++it) { std::list responses; AutoPointer ac(clients.acquire(it->first)); ac->info(it->second, responses); for (std::list::iterator itJ = it->second.begin(); itJ != it->second.end(); ++itJ) { std::list::iterator itR = responses.begin(); for (; itR != responses.end(); ++itR) { EMIESJobInfo *j = dynamic_cast(*itR); if (j) { if (EMIESJob::getIDFromJob(**itJ) == j->getActivityID()) { j->toJob(**itJ); delete *itR; break; } } // TODO: Handle ERROR. // TODO: Log warning: //logger.msg(WARNING, "Job information not found in the information system: %s", (*it)->JobID); } if (itR != responses.end()) { IDsProcessed.push_back((**itJ).JobID); responses.erase(itR); } else { IDsNotProcessed.push_back((**itJ).JobID); } } for (std::list::iterator itR = responses.begin(); itR != responses.end(); ++itR) { delete *itR; } clients.release(ac.Release()); } } bool JobControllerPluginEMIES::CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; EMIESJob ejob; ejob = job; AutoPointer ac(clients.acquire(ejob.manager)); if (!ac->clean(ejob)) { ok = false; IDsNotProcessed.push_back(job.JobID); clients.release(ac.Release()); continue; } IDsProcessed.push_back(job.JobID); clients.release(ac.Release()); } return ok; } bool JobControllerPluginEMIES::CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; EMIESJob ejob; ejob = job; AutoPointer ac(clients.acquire(ejob.manager)); if(!ac->kill(ejob)) { ok = false; IDsNotProcessed.push_back((*it)->JobID); clients.release(ac.Release()); continue; } // Force assign terminal state so job is cleaned afterwards (*it)->State = JobStateEMIES((std::string)"emies:terminal"); IDsProcessed.push_back((*it)->JobID); clients.release(ac.Release()); } return ok; } bool JobControllerPluginEMIES::RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { // 1. Fetch/find delegation ids for each job if((*it)->DelegationID.empty()) { logger.msg(INFO, "Job %s has no delegation associated. Can't renew such job.", (*it)->JobID); IDsNotProcessed.push_back((*it)->JobID); continue; } // 2. Leave only unique IDs - not needed yet because current code uses // different delegations for each job. // 3. Renew credentials for every ID Job& job = **it; EMIESJob ejob; ejob = job; AutoPointer ac(clients.acquire(ejob.manager)); std::list::const_iterator did = (*it)->DelegationID.begin(); for(;did != (*it)->DelegationID.end();++did) { if(ac->delegation(*did).empty()) { logger.msg(INFO, "Job %s failed to renew delegation %s - %s.", (*it)->JobID, *did, ac->failure()); break; } } if(did != (*it)->DelegationID.end()) { IDsNotProcessed.push_back((*it)->JobID); continue; } IDsProcessed.push_back((*it)->JobID); clients.release(ac.Release()); } return false; } bool JobControllerPluginEMIES::ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; if (!job.RestartState) { logger.msg(INFO, "Job %s does not report a resumable state", job.JobID); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } logger.msg(VERBOSE, "Resuming job: %s at state: %s (%s)", job.JobID, job.RestartState.GetGeneralState(), job.RestartState()); EMIESJob ejob; ejob = job; AutoPointer ac(clients.acquire(ejob.manager)); if(!ac->restart(ejob)) { ok = false; IDsNotProcessed.push_back((*it)->JobID); clients.release(ac.Release()); continue; } IDsProcessed.push_back((*it)->JobID); clients.release(ac.Release()); logger.msg(VERBOSE, "Job resuming successful"); } return ok; } bool JobControllerPluginEMIES::GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const { if (resource == Job::JOBDESCRIPTION) { return false; } // Obtain information about staging urls EMIESJob ejob; ejob = job; URL stagein; URL stageout; URL session; // TODO: currently using first valid URL. Need support for multiple. for(std::list::iterator s = ejob.stagein.begin();s!=ejob.stagein.end();++s) { if(*s) { stagein = *s; break; } } for(std::list::iterator s = ejob.stageout.begin();s!=ejob.stageout.end();++s) { if(*s) { stageout = *s; break; } } for(std::list::iterator s = ejob.session.begin();s!=ejob.session.end();++s) { if(*s) { session = *s; break; } } if ((resource != Job::STAGEINDIR || !stagein) && (resource != Job::STAGEOUTDIR || !stageout) && (resource != Job::SESSIONDIR || !session)) { // If there is no needed URL provided try to fetch it from server MCCConfig cfg; usercfg->ApplyToConfig(cfg); Job tjob; AutoPointer ac(clients.acquire(ejob.manager)); if (!ac->info(ejob, tjob)) { clients.release(ac.Release()); logger.msg(INFO, "Failed retrieving information for job: %s", job.JobID); return false; } for(std::list::iterator s = ejob.stagein.begin();s!=ejob.stagein.end();++s) { if(*s) { stagein = *s; break; } } for(std::list::iterator s = ejob.stageout.begin();s!=ejob.stageout.end();++s) { if(*s) { stageout = *s; break; } } for(std::list::iterator s = ejob.session.begin();s!=ejob.session.end();++s) { if(*s) { session = *s; break; } } // Choose url by state // TODO: maybe this method should somehow know what is purpose of URL // TODO: state attributes would be more suitable // TODO: library need to be etended to allow for multiple URLs if((tjob.State == JobState::ACCEPTED) || (tjob.State == JobState::PREPARING)) { url = stagein; } else if((tjob.State == JobState::DELETED) || (tjob.State == JobState::FAILED) || (tjob.State == JobState::KILLED) || (tjob.State == JobState::FINISHED) || (tjob.State == JobState::FINISHING)) { url = stageout; } else { url = session; } // If no url found by state still try to get something if(!url) { if(session) url = session; if(stagein) url = stagein; if(stageout) url = stageout; } clients.release(ac.Release()); } switch (resource) { case Job::STDIN: url.ChangePath(url.Path() + '/' + job.StdIn); break; case Job::STDOUT: url.ChangePath(url.Path() + '/' + job.StdOut); break; case Job::STDERR: url.ChangePath(url.Path() + '/' + job.StdErr); break; case Job::JOBLOG: url.ChangePath(url.Path() + "/" + job.LogDir + "/errors"); break; case Job::STAGEINDIR: if(stagein) url = stagein; break; case Job::STAGEOUTDIR: if(stageout) url = stageout; break; case Job::SESSIONDIR: if(session) url = session; break; default: break; } if(url && ((url.Protocol() == "https") || (url.Protocol() == "http"))) { url.AddOption("threads=2",false); url.AddOption("encryption=optional",false); // url.AddOption("httpputpartial=yes",false); - TODO: use for A-REX } return true; } bool JobControllerPluginEMIES::GetJobDescription(const Job& /* job */, std::string& /* desc_str */) const { logger.msg(INFO, "Retrieving job description of EMI ES jobs is not supported"); return false; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/TestEMIESClient.cpp0000644000000000000000000000012212177021676023470 xustar000000000000000027 mtime=1375478718.696768 25 atime=1513200574.3607 30 ctime=1513200660.208750039 nordugrid-arc-5.4.2/src/hed/acc/EMIES/TestEMIESClient.cpp0000644000175000002070000002736012177021676023547 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "EMIESClient.h" /* bool stat(const EMIESJob& job, XMLNode& state); bool info(const EMIESJob& job, Job& info); bool suspend(const EMIESJob& job); bool resume(const EMIESJob& job); bool restart(const EMIESJob& job); */ using namespace Arc; static Logger logger(Logger::getRootLogger(), "emiestest"); #define GLUE2_NAMESPACE "http://schemas.ogf.org/glue/2009/03/spec_2.0_r1" static void usage(void) { std::cout<<"arcemiestest sstat - retrieve service description"< - submit job to service"< - check state of job"< - obtain extended description of job"< - remove job from service"< - cancel job processing/execution"< - list jobs available at service"< - retrieve service description using all available methods and validate results"<::const_iterator s = job.stagein.begin();s!=job.stagein.end();++s) { std::cout<<"stagein: "<fullstr()<::const_iterator s = job.session.begin();s!=job.session.end();++s) { std::cout<<"session: "<fullstr()<::const_iterator s = job.stageout.begin();s!=job.stageout.end();++s) { std::cout<<"stageout: "<fullstr()<::const_iterator attr = state.attributes.begin(); attr != state.attributes.end();++attr) { std::cout<<"attribute: "<<*attr<& jobs) { for(std::list::const_iterator job = jobs.begin();job != jobs.end();++job) { std::cout<<"ID: "<id<(response); if(!job) { delete response; logger.msg(ERROR,"Submission failed"); return 1; }; print(*job); print(job->state); delete response; } else if(command == "stat") { EMIESJob job; FillJob(job,argc,argv); EMIESJobState state; if(!ac.stat(job,state)) { logger.msg(ERROR,"Obtaining status failed"); return 1; }; print(job); print(state); } else if(command == "info") { EMIESJob job; FillJob(job,argc,argv); Job info; if(!ac.info(job,info)) { logger.msg(ERROR,"Obtaining information failed"); return 1; }; print(job); for(std::list::const_iterator s = job.stagein.begin();s!=job.stagein.end();++s) { std::cout<<"stagein: "<fullstr()<::const_iterator s = job.session.begin();s!=job.session.end();++s) { std::cout<<"session: "<fullstr()<::const_iterator s = job.stageout.begin();s!=job.stageout.end();++s) { std::cout<<"stageout: "<fullstr()< jobs; if(!ac.list(jobs)) { logger.msg(ERROR,"List failed"); return 1; } print(jobs); } else if(command == "ivalidate") { std::map interfaces; interfaces["org.ogf.glue.emies.activitycreation"] = ""; interfaces["org.ogf.glue.emies.activitymanagement"] = ""; interfaces["org.ogf.glue.emies.activityinfo"] = ""; interfaces["org.ogf.glue.emies.resourceinfo"] = ""; interfaces["org.ogf.glue.emies.delegation"] = ""; logger.msg(INFO,"Fetching resource description from %s",url.str()); XMLNode info; if(!ac.sstat(info,false)) { logger.msg(ERROR,"Failed to obtain resource description: %s",ac.failure()); return 1; }; int n = 0; int cnum1 = 0; for(;;++n) { XMLNode node = info.Child(n); if(!node) break; if(node.Namespace() != GLUE2_NAMESPACE) { logger.msg(ERROR,"Resource description contains unexpected element: %s:%s",node.Namespace(),node.Name()); return 1; }; if((node.Name() != "ComputingService") && (node.Name() != "Service")) { logger.msg(ERROR,"Resource description contains unexpected element: %s:%s",node.Namespace(),node.Name()); return 1; }; std::string errstr; std::string glue2_schema = ArcLocation::GetDataDir()+G_DIR_SEPARATOR_S+"schema"+G_DIR_SEPARATOR_S+"GLUE2.xsd"; if(!node.Validate(glue2_schema,errstr)) { logger.msg(ERROR,"Resource description validation according to GLUE2 schema failed: "); for(std::string::size_type p = 0;;) { if(p >= errstr.length()) break; std::string::size_type e = errstr.find('\n',p); if(e == std::string::npos) e = errstr.length(); logger.msg(ERROR,"%s", errstr.substr(p,e-p)); p = e + 1; }; return 1; }; if(node.Name() == "ComputingService") { ++cnum1; XMLNode endpoint = node["ComputingEndpoint"]; for(;(bool)endpoint;++endpoint) { XMLNode name = endpoint["InterfaceName"]; for(;(bool)name;++name) { std::string iname = (std::string)name; if((bool)endpoint["URL"]) { if(interfaces.find(iname) != interfaces.end()) interfaces[iname] = (std::string)endpoint["URL"]; }; }; }; }; }; if(n == 0) { logger.msg(ERROR,"Resource description is empty"); return 1; }; int inum = 0; for(std::map::iterator i = interfaces.begin(); i != interfaces.end(); ++i) { if(!i->second.empty()) { logger.msg(INFO,"Resource description provides URL for interface %s: %s",i->first,i->second); ++inum; }; }; if(inum == 0) { logger.msg(ERROR,"Resource description provides no URLs for interfaces"); return 1; }; logger.msg(INFO,"Resource description validation passed"); int depth = 1; logger.msg(INFO,"Requesting ComputingService elements of resource description at %s",url.str()); XMLNodeContainer items; bool query_passed = false; bool all_elements = false; if(!query_passed) { logger.msg(INFO,"Performing /Services/ComputingService query"); if(!ac.squery("/Services/ComputingService",items,false)) { logger.msg(INFO,"Failed to obtain resource description: %s",ac.failure()); } else if(items.Size() <= 0) { logger.msg(INFO,"Query returned no elements."); } else { query_passed = true; }; }; if(!query_passed) { logger.msg(INFO,"Performing /ComputingService query"); if(!ac.squery("/ComputingService",items,false)) { logger.msg(INFO,"Failed to obtain resource description: %s",ac.failure()); } else if(items.Size() <= 0) { logger.msg(INFO,"Query returned no elements."); } else { query_passed = true; }; }; if(!query_passed) { all_elements = true; logger.msg(INFO,"Performing /* query"); if(!ac.squery("/*",items,false)) { logger.msg(INFO,"Failed to obtain resource description: %s",ac.failure()); } else if(items.Size() <= 0) { logger.msg(INFO,"Query returned no elements."); } else { query_passed = true; }; }; if(!query_passed) { logger.msg(ERROR,"All queries failed"); return 1; }; // In current implementation we can have different response // 1. ComputingService elements inside every Item element (ARC) // 2. Content of ComputingService elements inside every Item element (UNICORE) // 3. All elements inside every Item element // 4. Content of all elements inside every Item element int cnum2 = 0; for(int n = 0; n < items.Size(); ++n) { if((items[n].Size() > 0) && (items[n]["ComputingService"])) { // Case 1 and 3. for(int nn = 0; nn < items[n].Size(); ++nn) { if((all_elements) && (items[n].Name() != "ComputingService")) continue; // case 3 if(!CheckComputingService(items[n].Child(nn))) return 1; ++cnum2; }; } else { // Assuming 2 and 4. Because 4 can't be reliably recognised // just assume it never happens. XMLNode result; NS ns("glue2arc",GLUE2_NAMESPACE); items[n].New(result); result.Namespaces(ns,true,0); result.Name(result.NamespacePrefix(GLUE2_NAMESPACE)+":ComputingService"); if(!CheckComputingService(result)) return 1; ++cnum2; }; }; if(cnum1 != cnum2) { logger.msg(ERROR,"Number of ComputingService elements obtained from full document and XPath qury do not match: %d != %d",cnum1,cnum2); return 1; }; logger.msg(INFO,"Resource description query validation passed"); } else { logger.msg(ERROR,"Unsupported command: %s",command); return 1; } return 0; } nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/arcemiestest.1.in0000644000000000000000000000012712123705613023276 xustar000000000000000027 mtime=1364167563.653962 30 atime=1513200648.776610219 30 ctime=1513200660.196749892 nordugrid-arc-5.4.2/src/hed/acc/EMIES/arcemiestest.1.in0000644000175000002070000000554012123705613023344 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCEMIESTEST 1 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid Users Manual" .SH NAME arcemiestest \- perform simple operations on EMI ES service .SH DESCRIPTION The .B arcemiestest command performs simple operations on server with EMI Execution Service (ES) interface and is meant for testing interoperability of ARC EMI ES client implementation and corresponding service. Please see description of EMI ES at .B http://twiki.cern.ch/twiki/bin/view/EMI/EmiExecutionService for detailed information about functionality of the service. .SH SYNOPSIS .B arcemiestest command URL [arguments] .SH COMMANDS .IP "\fBsstat URL\fR takes URL of service implementing ResourceInfo capability, retrieves service description .IP "\fBsubmit URL filename\fR takes URL of service implementing ActivityCreation capability and file containing ADL activity description, sublits activity to service .IP "\fBstat URL ID\fR takes URL of service implementing ActivityManagement capability and identifier of activity as returned by submit command, retireves state of activity .IP "\fBinfo\fR takes URL of service implementing ActivityManagement capability and identifier of activity as returned by submit command, retirieves extended description of the activity .IP "\fBclean\fR takes URL of service implementing ActivityManagement capability and identifier of activity as returned by submit command, removes the activity from the service .IP "\fBkill\fR takes URL of service implementing ActivityManagement capability and identifier of activity as returned by submit command, cancels execution of the activity at the service .IP "\fBlist\fR takes URL of service implementing ActivityInfo capability, retrieves identifiers of available activities .IP "\fBivalidate\fR takes URL of service implementing ResourceInfo capability, retrieves full and partial service descriptions and validates results. .SH FILES .TP .B $(ARC_LOCATION)/share/arc/schema/GLUE2.xsd The schema of GLUE2 service description is used to validate response from service. .SH ENVIRONMENT VARIABLES .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .SH EXAMPLE arcemiestest ivalidate https://testbed.eu-emi.eu/emies .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arcproxy (1), .BR arcinfo (1), .BR arcsub (1), .BR arcstat (1) nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/TargetInformationRetrieverPluginEMIES.cpp0000644000000000000000000000012212053372654030113 xustar000000000000000027 mtime=1353577900.171111 25 atime=1513200574.3737 30 ctime=1513200660.205750002 nordugrid-arc-5.4.2/src/hed/acc/EMIES/TargetInformationRetrieverPluginEMIES.cpp0000644000175000002070000000603012053372654030161 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "JobStateEMIES.h" #include "EMIESClient.h" #include "TargetInformationRetrieverPluginEMIES.h" namespace Arc { Logger TargetInformationRetrieverPluginEMIES::logger(Logger::getRootLogger(), "TargetInformationRetrieverPlugin.EMIES"); bool TargetInformationRetrieverPluginEMIES::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return ((proto != "http") && (proto != "https")); } return false; } static URL CreateURL(std::string service) { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) { service = "https://" + service; } else { std::string proto = lower(service.substr(0,pos1)); if((proto != "http") && (proto != "https")) return URL(); } return service; } EndpointQueryingStatus TargetInformationRetrieverPluginEMIES::Query(const UserConfig& uc, const Endpoint& cie, std::list& csList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); URL url(CreateURL(cie.URLString)); if (!url) { return s; } logger.msg(DEBUG, "Collecting EMI-ES GLUE2 computing info endpoint information."); MCCConfig cfg; uc.ApplyToConfig(cfg); EMIESClient ac(url, cfg, uc.Timeout()); XMLNode servicesQueryResponse; if (!ac.sstat(servicesQueryResponse)) { return s; } ExtractTargets(url, servicesQueryResponse, csList); for (std::list::iterator it = csList.begin(); it != csList.end(); it++) { (*it)->InformationOriginEndpoint = cie; } if (!csList.empty()) s = EndpointQueryingStatus::SUCCESSFUL; return s; } void TargetInformationRetrieverPluginEMIES::ExtractTargets(const URL& url, XMLNode response, std::list& csList) { logger.msg(VERBOSE, "Generating EMIES targets"); GLUE2::ParseExecutionTargets(response, csList); for(std::list::iterator cs = csList.begin(); cs != csList.end(); ++cs) { for (std::map::iterator ce = cs->ComputingEndpoint.begin(); ce != cs->ComputingEndpoint.end(); ++ce) { if(ce->second->URLString.empty()) ce->second->URLString = url.str(); if(ce->second->InterfaceName.empty()) ce->second->InterfaceName = "org.ogf.glue.emies.activitycreation"; } if(cs->AdminDomain->Name.empty()) cs->AdminDomain->Name = url.Host(); logger.msg(VERBOSE, "Generated EMIES target: %s", cs->AdminDomain->Name); } } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/SubmitterPluginEMIES.h0000644000000000000000000000012213165644550024214 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.3737 30 ctime=1513200660.201749953 nordugrid-arc-5.4.2/src/hed/acc/EMIES/SubmitterPluginEMIES.h0000644000175000002070000000302413165644550024262 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMITTERPLUGINEMIES_H__ #define __ARC_SUBMITTERPLUGINEMIES_H__ #include #include #include #include #include #include "EMIESClient.h" namespace Arc { class SubmissionStatus; class SubmitterPluginEMIES : public SubmitterPlugin { public: SubmitterPluginEMIES(const UserConfig& usercfg, PluginArgument* parg); virtual ~SubmitterPluginEMIES(); virtual void SetUserConfig(const UserConfig& uc); static Plugin* Instance(PluginArgument *arg) { SubmitterPluginArgument *subarg = dynamic_cast(arg); return subarg ? new SubmitterPluginEMIES(*subarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual SubmissionStatus Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted); virtual SubmissionStatus Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted); private: EMIESClients clients; bool getDelegationID(const URL& durl, std::string& delegation_id); bool submit(const JobDescription& preparedjobdesc, const URL& url, const URL& iurl, URL durl, EMIESJob& jobid); static Logger logger; }; } // namespace Arc #endif // __ARC_SUBMITTERPLUGINEMIES_H__ nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/JobListRetrieverPluginEMIES.h0000644000000000000000000000012112103757741025471 xustar000000000000000027 mtime=1359994849.573367 25 atime=1513200574.3607 29 ctime=1513200660.20474999 nordugrid-arc-5.4.2/src/hed/acc/EMIES/JobListRetrieverPluginEMIES.h0000644000175000002070000000164412103757741025546 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBLISTRETRIEVERPLUGINEMIES_H__ #define __ARC_JOBLISTRETRIEVERPLUGINEMIES_H__ #include #include namespace Arc { class Logger; class JobListRetrieverPluginEMIES : public JobListRetrieverPlugin { public: JobListRetrieverPluginEMIES(PluginArgument* parg): JobListRetrieverPlugin(parg) { supportedInterfaces.push_back("org.ogf.glue.emies.resourceinfo"); } virtual ~JobListRetrieverPluginEMIES() {} static Plugin* Instance(PluginArgument *arg) { return new JobListRetrieverPluginEMIES(arg); } virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBLISTRETRIEVERPLUGINEMIES_H__ nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/JobStateEMIES.cpp0000644000000000000000000000012212153630075023116 xustar000000000000000027 mtime=1370435645.386876 25 atime=1513200574.3577 30 ctime=1513200660.198749917 nordugrid-arc-5.4.2/src/hed/acc/EMIES/JobStateEMIES.cpp0000644000175000002070000000777612153630075023206 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "JobStateEMIES.h" namespace Arc { std::string JobStateEMIES::FormatSpecificState(const std::string& state) { EMIESJobState st_; st_ = XMLNode(state); // Return format: [:[,[...]]] std::string attributes; if (!st_.attributes.empty()) { std::list::const_iterator it = st_.attributes.begin(); attributes = ":" + *it++; for (; it != st_.attributes.end(); ++it) { attributes += "," + *it; } } return st_.state + attributes; } JobState::StateType JobStateEMIES::StateMapS(const std::string& st) { EMIESJobState st_; st_ = st; return StateMapInt(st_); } JobState::StateType JobStateEMIES::StateMapX(const std::string& st) { EMIESJobState st_; st_ = XMLNode(st); return StateMapInt(st_); } JobState::StateType JobStateEMIES::StateMapInt(const EMIESJobState& st) { /// \mapname EMIES EMI ES /// \mapnote EMI ES states contains a state name and zero or more state attributes. For this mapping the notation:
    :{*|}
    is used, where '*' applies to all attributes except those already specify for a particular state name. /// \mapattr accepted:* -> ACCEPTED if(st.state == EMIES_STATE_ACCEPTED_S) { return JobState::ACCEPTED; } /// \mapattr preprocessing:* -> ACCEPTED else if(st.state == EMIES_STATE_PREPROCESSING_S) { if(st.HasAttribute(EMIES_SATTR_CLIENT_STAGEIN_POSSIBLE_S)) return JobState::PREPARING; return JobState::ACCEPTED; } /// \mapattr processing:* -> QUEUING else if(st.state == EMIES_STATE_PROCESSING_S) { return JobState::QUEUING; } /// \mapattr processing-accepting:* -> SUBMITTING else if(st.state == EMIES_STATE_PROCESSING_ACCEPTING_S) { return JobState::SUBMITTING; } /// \mapattr processing-queued:* -> QUEUING else if(st.state == EMIES_STATE_PROCESSING_QUEUED_S) { return JobState::QUEUING; } /// \mapattr processing-running:* -> RUNNING else if(st.state == EMIES_STATE_PROCESSING_RUNNING_S) { return JobState::RUNNING; } /// \mapattr postprocessing:client-stageout-possible -> FINISHING /// \mapattr postprocessing:* -> OTHER else if(st.state == EMIES_STATE_POSTPROCESSING_S) { if(st.HasAttribute(EMIES_SATTR_CLIENT_STAGEOUT_POSSIBLE_S)) return JobState::FINISHING; return JobState::OTHER; } /// \mapattr terminal:preprocessing-cancel -> FAILED /// \mapattr terminal:processing-cancel -> FAILED /// \mapattr terminal:postprocessing-cancel -> FAILED /// \mapattr terminal:validation-failure -> FAILED /// \mapattr terminal:preprocessing-failure -> FAILED /// \mapattr terminal:processing-failure -> FAILED /// \mapattr terminal:postprocessing-failure -> FAILED /// \mapattr terminal:app-failure -> FAILED /// \mapattr terminal:expired -> DELETED /// \mapattr terminal:* -> FINISHED else if(st.state == EMIES_STATE_TERMINAL_S) { if(st.HasAttribute(EMIES_SATTR_PREPROCESSING_CANCEL_S)) return JobState::FAILED; if(st.HasAttribute(EMIES_SATTR_PROCESSING_CANCEL_S)) return JobState::FAILED; if(st.HasAttribute(EMIES_SATTR_POSTPROCESSING_CANCEL_S)) return JobState::FAILED; if(st.HasAttribute(EMIES_SATTR_VALIDATION_FAILURE_S)) return JobState::FAILED; if(st.HasAttribute(EMIES_SATTR_PREPROCESSING_FAILURE_S)) return JobState::FAILED; if(st.HasAttribute(EMIES_SATTR_PROCESSING_FAILURE_S)) return JobState::FAILED; if(st.HasAttribute(EMIES_SATTR_POSTPROCESSING_FAILURE_S)) return JobState::FAILED; if(st.HasAttribute(EMIES_SATTR_APP_FAILURE_S)) return JobState::FAILED; if(st.HasAttribute(EMIES_SATTR_EXPIRED_S)) return JobState::DELETED; return JobState::FINISHED; } /// \mapattr "":* -> UNDEFINED else if(st.state == "") { return JobState::UNDEFINED; } /// \mapattr Any other state -> OTHER return JobState::OTHER; } } nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/JobStateEMIES.h0000644000000000000000000000012212045235201022553 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3607 30 ctime=1513200660.199749929 nordugrid-arc-5.4.2/src/hed/acc/EMIES/JobStateEMIES.h0000644000175000002070000000406212045235201022624 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBSTATEEMIES_H__ #define __ARC_JOBSTATEEMIES_H__ #include #include "EMIESClient.h" namespace Arc { #define EMIES_STATE_ACCEPTED_S "accepted" #define EMIES_STATE_PREPROCESSING_S "preprocessing" #define EMIES_STATE_PROCESSING_S "processing" #define EMIES_STATE_PROCESSING_ACCEPTING_S "processing-accepting" #define EMIES_STATE_PROCESSING_QUEUED_S "processing-queued" #define EMIES_STATE_PROCESSING_RUNNING_S "processing-running" #define EMIES_STATE_POSTPROCESSING_S "postprocessing" #define EMIES_STATE_TERMINAL_S "terminal" #define EMIES_SATTR_CLIENT_STAGEIN_POSSIBLE_S "client-stagein-possible" #define EMIES_SATTR_CLIENT_STAGEOUT_POSSIBLE_S "client-stageout-possible" #define EMIES_SATTR_PREPROCESSING_CANCEL_S "preprocessing-cancel" #define EMIES_SATTR_PREPROCESSING_FAILURE_S "preprocessing-failure" #define EMIES_SATTR_PROCESSING_CANCEL_S "processing-cancel" #define EMIES_SATTR_PROCESSING_FAILURE_S "processing-failure" #define EMIES_SATTR_POSTPROCESSING_CANCEL_S "postprocessing-cancel" #define EMIES_SATTR_POSTPROCESSING_FAILURE_S "postprocessing-failure" #define EMIES_SATTR_VALIDATION_FAILURE_S "validation-failure" #define EMIES_SATTR_APP_FAILURE_S "app-failure" #define EMIES_SATTR_EXPIRED_S "expired" class JobStateEMIES : public JobState { public: JobStateEMIES(const std::string& state): JobState(state, &StateMapS, FormatSpecificState) {} // TODO: extremely suboptimal JobStateEMIES(XMLNode state): JobState(xml_to_string(state), &StateMapX, FormatSpecificState) {} JobStateEMIES(const EMIESJobState& state): JobState(state.ToXML(), &StateMapX, FormatSpecificState) {} static JobState::StateType StateMapS(const std::string& state); static JobState::StateType StateMapX(const std::string& state); static JobState::StateType StateMapInt(const EMIESJobState& st); static std::string FormatSpecificState(const std::string& state); private: std::string xml_to_string(XMLNode xml) { std::string s; xml.GetXML(s); return s; }; }; } #endif // __ARC_JOBSTATEEMIES_H__ nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/SubmitterPluginEMIES.cpp0000644000000000000000000000012213213471767024551 xustar000000000000000027 mtime=1512993783.598054 25 atime=1513200574.3697 30 ctime=1513200660.200749941 nordugrid-arc-5.4.2/src/hed/acc/EMIES/SubmitterPluginEMIES.cpp0000644000175000002070000005216413213471767024630 0ustar00mockbuildmock00000000000000 // -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "SubmitterPluginEMIES.h" #include "EMIESClient.h" #include "JobStateEMIES.h" namespace Arc { Logger SubmitterPluginEMIES::logger(Logger::getRootLogger(), "SubmitterPlugin.EMIES"); SubmitterPluginEMIES::SubmitterPluginEMIES(const UserConfig& usercfg, PluginArgument* parg) : SubmitterPlugin(usercfg, parg),clients(usercfg) { supportedInterfaces.push_back("org.ogf.glue.emies.activitycreation"); } SubmitterPluginEMIES::~SubmitterPluginEMIES() { } void SubmitterPluginEMIES::SetUserConfig(const UserConfig& uc) { SubmitterPlugin::SetUserConfig(uc); clients.SetUserConfig(uc); } bool SubmitterPluginEMIES::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } bool SubmitterPluginEMIES::getDelegationID(const URL& durl, std::string& delegation_id) { if(!durl) { logger.msg(INFO, "Failed to delegate credentials to server - no delegation interface found"); return false; } AutoPointer ac(clients.acquire(durl)); delegation_id = ac->delegation(); if(delegation_id.empty()) { logger.msg(INFO, "Failed to delegate credentials to server - %s",ac->failure()); return false; } clients.release(ac.Release()); return true; } SubmissionStatus SubmitterPluginEMIES::Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted) { // TODO: this is multi step process. So having retries would be nice. // TODO: If delegation interface is not on same endpoint as submission interface this method is faulty. URL url((endpoint.find("://") == std::string::npos ? "https://" : "") + endpoint, false, 443); SubmissionStatus retval; bool need_delegation = true; // false - Force delegation always for jobs which use during execution std::string delegation_id; std::list have_uploads; XMLNodeList products; for (std::list::const_iterator itJ = jobdescs.begin(); itJ != jobdescs.end(); ++itJ) { JobDescription preparedjobdesc(*itJ); if (!preparedjobdesc.Prepare()) { logger.msg(INFO, "Failed preparing job description"); notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } { std::string jstr; JobDescriptionResult ures = preparedjobdesc.UnParse(jstr, "emies:adl"); if (!ures) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format: %s", "emies:adl", ures.str()); notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } products.push_back(XMLNode()); XMLNode(jstr).Move(products.back()); if(!products.back()) { logger.msg(INFO, "Unable to submit job. Job description is not valid XML"); notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; products.pop_back(); continue; } } have_uploads.push_back(false); for(std::list::const_iterator itIF = itJ->DataStaging.InputFiles.begin(); itIF != itJ->DataStaging.InputFiles.end() && (!need_delegation || !have_uploads.back()); ++itIF) { have_uploads.back() = have_uploads.back() || (!itIF->Sources.empty() && (itIF->Sources.front().Protocol() == "file")); need_delegation = need_delegation || (!itIF->Sources.empty() && (itIF->Sources.front().Protocol() != "file")); } for(std::list::const_iterator itOF = itJ->DataStaging.OutputFiles.begin(); itOF != itJ->DataStaging.OutputFiles.end() && !need_delegation; ++itOF) { need_delegation = !itOF->Targets.empty(); need_delegation |= (itOF->Name[0] == '@'); // ARC specific - dynamic list of output files } if (need_delegation && delegation_id.empty()) { // Assume that delegation interface is on same machine as submission interface. if (!getDelegationID(url, delegation_id)) { notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; products.pop_back(); have_uploads.pop_back(); continue; } } if(have_uploads.back()) { // At least CREAM expects to have ClientDataPush for any input file std::string prefix = products.back().Prefix(); Arc::XMLNode stage = products.back()["DataStaging"]; Arc::XMLNode flag = stage["ClientDataPush"]; // Following 2 are for satisfying inner paranoic feeling if(!stage) stage = products.back().NewChild(prefix+":DataStaging"); if(!flag) flag = stage.NewChild(prefix+":ClientDataPush",0,true); flag = "true"; } } AutoPointer ac(clients.acquire(url)); std::list responses; ac->submit(products, responses, delegation_id); std::list::iterator itHU = have_uploads.begin(); std::list::iterator itR = responses.begin(); std::list::const_iterator itJ = jobdescs.begin(); std::list jobsToNotify; std::list jobDescriptionsOfJobsToNotify; for (; itR != responses.end() && itHU != have_uploads.end() && itJ != jobdescs.end(); ++itJ, ++itR) { EMIESJob *j = dynamic_cast(*itR); if (j) { if (!(*j)) { logger.msg(INFO, "No valid job identifier returned by EMI ES"); notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; delete *itR; *itR = NULL; continue; } if(!j->manager) { j->manager = url; } if(j->delegation_id.empty()) { j->delegation_id = delegation_id; } JobDescription preparedjobdesc(*itJ); preparedjobdesc.Prepare(); bool job_ok = true; // Check if we have anything to upload. Otherwise there is no need to wait. if(*itHU) { // Wait for job to go into proper state for(;;) { // TODO: implement timeout if(j->state.HasAttribute(EMIES_SATTR_CLIENT_STAGEIN_POSSIBLE_S)) break; if(j->state.state == EMIES_STATE_TERMINAL_S) { logger.msg(INFO, "Job failed on service side"); job_ok = false; break; } // If service jumped over stageable state client probably does not // have to send anything. if((j->state.state != EMIES_STATE_ACCEPTED_S) && (j->state.state != EMIES_STATE_PREPROCESSING_S)) break; sleep(5); if(!ac->stat(*j, j->state)) { logger.msg(INFO, "Failed to obtain state of job"); job_ok = false; break; } } if (!job_ok) { notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; delete *itR; *itR = NULL; continue; } } if(*itHU) { if(!j->state.HasAttribute(EMIES_SATTR_CLIENT_STAGEIN_POSSIBLE_S)) { logger.msg(INFO, "Failed to wait for job to allow stage in"); notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; delete *itR; *itR = NULL; continue; } if(j->stagein.empty()) { // Try to obtain it from job info Job tjob; if((!ac->info(*j, tjob)) || (j->stagein.empty())) { job_ok = false; } else { job_ok = false; for(std::list::iterator stagein = j->stagein.begin(); stagein != j->stagein.end();++stagein) { if(*stagein) { job_ok = true; break; } } } if(!job_ok) { logger.msg(INFO, "Failed to obtain valid stagein URL for input files"); notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; delete *itR; *itR = NULL; continue; } } job_ok = false; for(std::list::iterator stagein = j->stagein.begin(); stagein != j->stagein.end();++stagein) { if(!*stagein) continue; // Enhance file upload performance by tuning URL if((stagein->Protocol() == "https") || (stagein->Protocol() == "http")) { stagein->AddOption("threads=2",false); stagein->AddOption("encryption=optional",false); // stagein->AddOption("httpputpartial=yes",false); - TODO: use for A-REX } stagein->AddOption("checksum=no",false); if (!PutFiles(preparedjobdesc, *stagein)) { logger.msg(INFO, "Failed uploading local input files to %s",stagein->str()); } else { job_ok = true; } } if (!job_ok) { notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; delete *itR; *itR = NULL; continue; } } clients.release(ac.Release()); jobsToNotify.push_back(j); jobDescriptionsOfJobsToNotify.push_back(&*itJ); ++itHU; continue; } EMIESFault *f = dynamic_cast(*itR); if (f) { logger.msg(INFO, "Failed to submit job description: EMIESFault(%s , %s)", f->message, f->description); notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; delete *itR; *itR = NULL; itHU = have_uploads.erase(itHU); continue; } UnexpectedError *ue = dynamic_cast(*itR); if (ue) { logger.msg(INFO, "Failed to submit job description: UnexpectedError(%s)", ue->message); notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; delete *itR; *itR = NULL; itHU = have_uploads.erase(itHU); continue; } } for (; itJ != jobdescs.end(); ++itJ) { notSubmitted.push_back(&*itJ); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; } // Safety for (; itR != responses.end(); ++itR) { delete *itR; *itR = NULL; } responses.clear(); if (jobsToNotify.empty()) return retval; // It is not clear how service is implemented. So notifying should not harm. // Notification must be sent to manager URL. // Assumption: Jobs is managed by the same manager. AutoPointer mac(clients.acquire(jobsToNotify.front()->manager)); mac->notify(jobsToNotify, responses); clients.release(mac.Release()); itR = responses.begin(); itHU = have_uploads.begin(); std::list::iterator itJob = jobsToNotify.begin(); std::list::const_iterator itJPtr = jobDescriptionsOfJobsToNotify.begin(); for (; itR != responses.end() && itJPtr != jobDescriptionsOfJobsToNotify.end() && itJob != jobsToNotify.end() && itHU != have_uploads.end(); ++itR, ++itJPtr, ++itJob, ++itHU) { EMIESAcknowledgement *ack = dynamic_cast(*itR); if (!ack) { logger.msg(VERBOSE, "Failed to notify service"); // TODO: exact logic still requires clarification of specs. // TODO: Maybe job should be killed in this case? // So far assume if there are no files to upload // activity can survive without notification. if(*itHU) { notSubmitted.push_back(*itJPtr); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; delete *itR; *itR = NULL; continue; } } JobDescription preparedjobdesc(**itJPtr); preparedjobdesc.Prepare(); Job job; (**itJob).toJob(job); AddJobDetails(preparedjobdesc, job); jc.addEntity(job); delete *itR; *itR = NULL; } return retval; } SubmissionStatus SubmitterPluginEMIES::Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted) { // TODO: this is multi step process. So having retries would be nice. // Submission to EMI ES involves delegation. Delegation may happen through // separate service. Currently existing framework does not provide possibility // to collect this information. So service is re-queried again here. URL iurl; iurl = et.ComputingService->InformationOriginEndpoint.URLString; URL durl; for (std::list< CountedPointer >::const_iterator it = et.OtherEndpoints.begin(); it != et.OtherEndpoints.end(); it++) { if ((*it)->InterfaceName == "org.ogf.glue.emies.delegation") { durl = URL((*it)->URLString); } } URL url(et.ComputingEndpoint->URLString); SubmissionStatus retval; for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { JobDescription preparedjobdesc(*it); if (!preparedjobdesc.Prepare(et)) { logger.msg(INFO, "Failed preparing job description to target resources"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } EMIESJob jobid; if(!SubmitterPluginEMIES::submit(preparedjobdesc,url,iurl,durl,jobid)) { notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } Job j; jobid.toJob(j); AddJobDetails(preparedjobdesc, j); jc.addEntity(j); } return retval; } bool SubmitterPluginEMIES::submit(const JobDescription& preparedjobdesc, const URL& url, const URL& iurl, URL durl, EMIESJob& jobid) { bool job_ok = true; Arc::XMLNode product; { std::string jstr; JobDescriptionResult ures = preparedjobdesc.UnParse(jstr, "emies:adl"); if (!ures) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format: %s", "emies:adl", ures.str()); return false; } XMLNode(jstr).Move(product); if(!product) { logger.msg(INFO, "Unable to submit job. Job description is not valid XML"); return false; } } bool have_uploads = false; bool need_delegation = true; // false - Force delegation always for jobs which use during execution for(std::list::const_iterator itIF = preparedjobdesc.DataStaging.InputFiles.begin(); itIF != preparedjobdesc.DataStaging.InputFiles.end(); ++itIF) { if(need_delegation && have_uploads) break; if(!itIF->Sources.empty()) { if(itIF->Sources.front().Protocol() == "file") { have_uploads = true; } else { need_delegation = true; } } } for(std::list::const_iterator itOF = preparedjobdesc.DataStaging.OutputFiles.begin(); itOF != preparedjobdesc.DataStaging.OutputFiles.end(); ++itOF) { if(need_delegation) break; if(!itOF->Targets.empty()) { need_delegation = true; } else if(itOF->Name[0] == '@') { // ARC specific - dynamic list of output files need_delegation = true; } } if(have_uploads) { // At least CREAM expects to have ClientDataPush for any input file std::string prefix = product.Prefix(); Arc::XMLNode stage = product["DataStaging"]; Arc::XMLNode flag = stage["ClientDataPush"]; // Following 2 are for satisfying inner paranoic feeling if(!stage) stage = product.NewChild(prefix+":DataStaging"); if(!flag) flag = stage.NewChild(prefix+":ClientDataPush",0,true); flag = "true"; } if(iurl && !durl && need_delegation) { AutoPointer ac(clients.acquire(iurl)); std::list activitycreation; std::list activitymanagememt; std::list activityinfo; std::list resourceinfo; std::list delegation; if(ac->sstat(activitycreation,activitymanagememt,activityinfo,resourceinfo,delegation)) { for(std::list::iterator d = delegation.begin(); d !=delegation.end(); ++d) { if(d->Protocol() == "https") { // http? durl = *d; break; } } } clients.release(ac.Release()); } std::string delegation_id; if(need_delegation) { if(!getDelegationID(durl, delegation_id)) return false; } EMIESResponse *response = NULL; AutoPointer ac(clients.acquire(url)); if (!ac->submit(product, &response, delegation_id)) { delete response; logger.msg(INFO, "Failed to submit job description: %s", ac->failure()); return false; } EMIESJob* jobid_ptr = dynamic_cast(response); if (!jobid_ptr) { delete response; logger.msg(INFO, "No valid job identifier returned by EMI ES"); return false; } jobid = *jobid_ptr; delete response; if(!jobid.manager) jobid.manager = url; if(jobid.delegation_id.empty()) jobid.delegation_id = delegation_id; // Check if we have anything to upload. Otherwise there is no need to wait. if(have_uploads) { // Wait for job to go into proper state for(;;) { // TODO: implement timeout if(jobid.state.HasAttribute(EMIES_SATTR_CLIENT_STAGEIN_POSSIBLE_S)) break; if(jobid.state.state == EMIES_STATE_TERMINAL_S) { logger.msg(INFO, "Job failed on service side"); job_ok = false; break; } // If service jumped over stageable state client probably does not // have to send anything. if((jobid.state.state != EMIES_STATE_ACCEPTED_S) && (jobid.state.state != EMIES_STATE_PREPROCESSING_S)) break; sleep(5); if(!ac->stat(jobid, jobid.state)) { logger.msg(INFO, "Failed to obtain state of job"); job_ok = false; break; } } if (!job_ok) { return false; } } if(have_uploads) { if(!jobid.state.HasAttribute(EMIES_SATTR_CLIENT_STAGEIN_POSSIBLE_S)) { logger.msg(INFO, "Failed to wait for job to allow stage in"); return false; } if(jobid.stagein.empty()) { // Try to obtain it from job info Job tjob; if((!ac->info(jobid, tjob)) || (jobid.stagein.empty())) { job_ok = false; } else { job_ok = false; for(std::list::iterator stagein = jobid.stagein.begin(); stagein != jobid.stagein.end();++stagein) { if(*stagein) { job_ok = true; break; } } } if(!job_ok) { logger.msg(INFO, "Failed to obtain valid stagein URL for input files"); return false; } } job_ok = false; for(std::list::iterator stagein = jobid.stagein.begin(); stagein != jobid.stagein.end();++stagein) { if(!*stagein) continue; // Enhance file upload performance by tuning URL if((stagein->Protocol() == "https") || (stagein->Protocol() == "http")) { stagein->AddOption("threads=2",false); stagein->AddOption("encryption=optional",false); // stagein->AddOption("httpputpartial=yes",false); - TODO: use for A-REX } stagein->AddOption("checksum=no",false); if (!PutFiles(preparedjobdesc, *stagein)) { logger.msg(INFO, "Failed uploading local input files to %s",stagein->str()); } else { job_ok = true; } } if (!job_ok) { return false; } } clients.release(ac.Release()); // It is not clear how service is implemented. So notifying should not harm. // Notification must be sent to manager URL. AutoPointer mac(clients.acquire(jobid.manager)); if (!mac->notify(jobid)) { logger.msg(INFO, "Failed to notify service"); // TODO: exact logic still requires clarification of specs. // TODO: Maybe job should be killed in this case? // So far assume if there are no files to upload // activity can survive without notification. if(have_uploads) return false; } else { clients.release(mac.Release()); } return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/DescriptorsEMIES.cpp0000644000000000000000000000012212675602216023711 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.3727 30 ctime=1513200660.207750027 nordugrid-arc-5.4.2/src/hed/acc/EMIES/DescriptorsEMIES.cpp0000644000175000002070000000161312675602216023761 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "SubmitterPluginEMIES.h" #include "JobControllerPluginEMIES.h" #include "JobListRetrieverPluginEMIES.h" #include "TargetInformationRetrieverPluginEMIES.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "EMIES", "HED:SubmitterPlugin", "EMI-ES conforming execution service", 0, &Arc::SubmitterPluginEMIES::Instance }, { "EMIES", "HED:JobControllerPlugin", "EMI-ES conforming execution service", 0, &Arc::JobControllerPluginEMIES::Instance }, { "EMIES", "HED:TargetInformationRetrieverPlugin", "EMI-ES conforming execution service", 0, &Arc::TargetInformationRetrieverPluginEMIES::Instance }, { "EMIES", "HED:JobListRetrieverPlugin", "EMI-ES conforming execution service", 0, &Arc::JobListRetrieverPluginEMIES::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/schema0000644000000000000000000000013213214316024021272 xustar000000000000000030 mtime=1513200660.228750284 30 atime=1513200668.721854157 30 ctime=1513200660.228750284 nordugrid-arc-5.4.2/src/hed/acc/EMIES/schema/0000755000175000002070000000000013214316024021415 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/EMIES/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712077521006023413 xustar000000000000000027 mtime=1358864902.003893 30 atime=1513200594.227943068 30 ctime=1513200660.226750259 nordugrid-arc-5.4.2/src/hed/acc/EMIES/schema/Makefile.am0000644000175000002070000000013612077521006023455 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = GLUE2.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/acc/EMIES/schema/PaxHeaders.7502/GLUE2.xsd0000644000000000000000000000012212077521006022710 xustar000000000000000027 mtime=1358864902.003893 25 atime=1513200574.3667 30 ctime=1513200660.228750284 nordugrid-arc-5.4.2/src/hed/acc/EMIES/schema/GLUE2.xsd0000644000175000002070000022450112077521006022763 0ustar00mockbuildmock00000000000000 An abstracted, logical view of software and hardware components that participate in the creation of a storage capability in a Grid environment. A Storage Service exposes zero or more Endpoints having well-defined interfaces, zero or more Storage Shares and zero or more Storage Managers and the related Data Stores. The Storage Service also offers zero or more Storage Access Protocols, and provides summary information about the overall amount of storage by means of the Storage Service Capacity. The Storage Service is autonomous and denotes a weak aggregation among Storage Endpoints, Storage Shares, Storage Managers, Storage Access Protocols and Storage Service Capacities. The Storage Service enables the identification of the entire set of entities providing storage functionality with a persistent name. A storage service exposes zero or more storage endpoints. A storage service serves zero or more storage shares. Description of the size and usage of a homogenous storage extent; the storage extent is aggregated at the storage service level by type. A type of protocol available to access the underlying storage extents. An Endpoint usable for managing Storage Shares or for accessing data stored in them; it MAY also be used to expose complementary capabilities which form part of the overall Storage Service. A utilization target for a set of extents in Data Stores, defined by a set of configuration parameters and policies and characterized by status information. A description of the size and usage of a homogenous storage extent available to a Storage Share. The primary software component locally managing one or more Data Stores. It MAY also be used to describe aggregated information about the managed resources. An abstract description of a sufficiently homogeneous storage device providing a storage extent, managed by a local software component (Storage Manager), part of a Storage Service, reachable via zero or more Endpoints and having zero or more Shares defined on it. A Data Store refers to a category of storage with summary information on the storage capacity. A description of the network link quality between a Storage Service and a computing service, and/or of a potentially dedicated access protocol that the Computing Service may use to access the Storage Service. nordugrid-arc-5.4.2/src/hed/acc/EMIES/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722023420 xustar000000000000000030 mtime=1513200594.258943447 30 atime=1513200648.791610403 30 ctime=1513200660.227750271 nordugrid-arc-5.4.2/src/hed/acc/EMIES/schema/Makefile.in0000644000175000002070000004351013214315722023471 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/EMIES/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = GLUE2.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/EMIES/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/EMIES/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/EMIES/PaxHeaders.7502/JobListRetrieverPluginEMIES.cpp0000644000000000000000000000012212205134174026015 xustar000000000000000027 mtime=1377089660.567375 25 atime=1513200574.3587 30 ctime=1513200660.203749978 nordugrid-arc-5.4.2/src/hed/acc/EMIES/JobListRetrieverPluginEMIES.cpp0000644000175000002070000000521712205134174026071 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "JobStateEMIES.h" #include "EMIESClient.h" #include "JobListRetrieverPluginEMIES.h" namespace Arc { Logger JobListRetrieverPluginEMIES::logger(Logger::getRootLogger(), "JobListRetrieverPlugin.EMIES"); bool JobListRetrieverPluginEMIES::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return ((proto != "http") && (proto != "https")); } return false; } static URL CreateURL(std::string service) { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) { service = "https://" + service; } else { std::string proto = lower(service.substr(0,pos1)); if((proto != "http") && (proto != "https")) return URL(); } // Default port other than 443? // Default path? return service; } EndpointQueryingStatus JobListRetrieverPluginEMIES::Query(const UserConfig& uc, const Endpoint& endpoint, std::list& jobs, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); URL url(CreateURL(endpoint.URLString)); if (!url) { return s; } MCCConfig cfg; uc.ApplyToConfig(cfg); EMIESClient ac(url, cfg, uc.Timeout()); std::list jobids; if (!ac.list(jobids)) { return s; } logger.msg(DEBUG, "Listing jobs succeeded, %d jobs found", jobids.size()); std::list responses; ac.info(jobids, responses); std::list::iterator itID = jobids.begin(); std::list::iterator itR = responses.begin(); for(; itR != responses.end() && itID != jobids.end(); ++itR, ++itID) { EMIESJobInfo* jInfo = dynamic_cast(*itR); if (!jInfo) { // TODO: Handle ERROR continue; } std::string submittedVia = jInfo->getSubmittedVia(); if (submittedVia != "org.ogf.glue.emies.activitycreation") { logger.msg(DEBUG, "Skipping retrieved job (%s) because it was submitted via another interface (%s).", url.fullstr() + "/" + itID->id, submittedVia); continue; } Job j; if(!itID->manager) itID->manager = url; itID->toJob(j); jInfo->toJob(j); jobs.push_back(j); }; s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/UNICORE0000644000000000000000000000013213214316024020274 xustar000000000000000030 mtime=1513200660.368751996 30 atime=1513200668.721854157 30 ctime=1513200660.368751996 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/0000755000175000002070000000000013214316024020417 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022416 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200594.682948633 30 ctime=1513200660.359751886 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/Makefile.am0000644000175000002070000000200512052416515022455 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccUNICORE.la libaccUNICORE_la_SOURCES = \ SubmitterPluginUNICORE.cpp SubmitterPluginUNICORE.h \ JobControllerPluginUNICORE.cpp JobControllerPluginUNICORE.h \ UNICOREClient.cpp UNICOREClient.h \ DescriptorsUNICORE.cpp \ JobStateUNICORE.cpp JobStateUNICORE.h libaccUNICORE_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccUNICORE_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la\ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccUNICORE_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/JobStateUNICORE.cpp0000644000000000000000000000012111337003616023617 xustar000000000000000026 mtime=1266419598.59158 25 atime=1513200574.3857 30 ctime=1513200660.367751983 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/JobStateUNICORE.cpp0000644000175000002070000000310311337003616023664 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "JobStateUNICORE.h" namespace Arc { JobState::StateType JobStateUNICORE::StateMap(const std::string& state) { if (Arc::lower(state) == "accepted") return JobState::ACCEPTED; else if (Arc::lower(state) == "queued") return JobState::QUEUING; else if (Arc::lower(state) == "running") return JobState::RUNNING; else if (Arc::lower(state) == "finished") return JobState::FINISHED; else if (Arc::lower(state) == "failed") return JobState::FAILED; else if (state == "") return JobState::UNDEFINED; else return JobState::OTHER; } } /* 113 114 115 116 117 118 119 120 UNICORE shows the following job states: * STAGINGIN - the server is staging in data from remote sites into the job directory * READY - job is ready to be started * QUEUED - job is waiting in the batch queue * RUNNING - job is running * STAGINGOUT - execution has finished, and the server is staging out data to remote sites * SUCCESSFUL - all finished, no errors occured * FAILED - errors occured in the execution and/or data staging phases * UNDEFINED - this state formally exists, but is not seen on clients */ nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722022422 xustar000000000000000030 mtime=1513200594.733949256 30 atime=1513200648.825610819 30 ctime=1513200660.360751898 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/Makefile.in0000644000175000002070000007633713214315722022510 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/UNICORE DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccUNICORE_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libaccUNICORE_la_OBJECTS = \ libaccUNICORE_la-SubmitterPluginUNICORE.lo \ libaccUNICORE_la-JobControllerPluginUNICORE.lo \ libaccUNICORE_la-UNICOREClient.lo \ libaccUNICORE_la-DescriptorsUNICORE.lo \ libaccUNICORE_la-JobStateUNICORE.lo libaccUNICORE_la_OBJECTS = $(am_libaccUNICORE_la_OBJECTS) libaccUNICORE_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) \ $(libaccUNICORE_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccUNICORE_la_SOURCES) DIST_SOURCES = $(libaccUNICORE_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccUNICORE.la libaccUNICORE_la_SOURCES = \ SubmitterPluginUNICORE.cpp SubmitterPluginUNICORE.h \ JobControllerPluginUNICORE.cpp JobControllerPluginUNICORE.h \ UNICOREClient.cpp UNICOREClient.h \ DescriptorsUNICORE.cpp \ JobStateUNICORE.cpp JobStateUNICORE.h libaccUNICORE_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccUNICORE_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la\ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccUNICORE_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/UNICORE/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/UNICORE/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccUNICORE.la: $(libaccUNICORE_la_OBJECTS) $(libaccUNICORE_la_DEPENDENCIES) $(libaccUNICORE_la_LINK) -rpath $(pkglibdir) $(libaccUNICORE_la_OBJECTS) $(libaccUNICORE_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccUNICORE_la-DescriptorsUNICORE.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccUNICORE_la-JobControllerPluginUNICORE.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccUNICORE_la-JobStateUNICORE.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccUNICORE_la-SubmitterPluginUNICORE.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccUNICORE_la-UNICOREClient.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccUNICORE_la-SubmitterPluginUNICORE.lo: SubmitterPluginUNICORE.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -MT libaccUNICORE_la-SubmitterPluginUNICORE.lo -MD -MP -MF $(DEPDIR)/libaccUNICORE_la-SubmitterPluginUNICORE.Tpo -c -o libaccUNICORE_la-SubmitterPluginUNICORE.lo `test -f 'SubmitterPluginUNICORE.cpp' || echo '$(srcdir)/'`SubmitterPluginUNICORE.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccUNICORE_la-SubmitterPluginUNICORE.Tpo $(DEPDIR)/libaccUNICORE_la-SubmitterPluginUNICORE.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPluginUNICORE.cpp' object='libaccUNICORE_la-SubmitterPluginUNICORE.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccUNICORE_la-SubmitterPluginUNICORE.lo `test -f 'SubmitterPluginUNICORE.cpp' || echo '$(srcdir)/'`SubmitterPluginUNICORE.cpp libaccUNICORE_la-JobControllerPluginUNICORE.lo: JobControllerPluginUNICORE.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -MT libaccUNICORE_la-JobControllerPluginUNICORE.lo -MD -MP -MF $(DEPDIR)/libaccUNICORE_la-JobControllerPluginUNICORE.Tpo -c -o libaccUNICORE_la-JobControllerPluginUNICORE.lo `test -f 'JobControllerPluginUNICORE.cpp' || echo '$(srcdir)/'`JobControllerPluginUNICORE.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccUNICORE_la-JobControllerPluginUNICORE.Tpo $(DEPDIR)/libaccUNICORE_la-JobControllerPluginUNICORE.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginUNICORE.cpp' object='libaccUNICORE_la-JobControllerPluginUNICORE.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccUNICORE_la-JobControllerPluginUNICORE.lo `test -f 'JobControllerPluginUNICORE.cpp' || echo '$(srcdir)/'`JobControllerPluginUNICORE.cpp libaccUNICORE_la-UNICOREClient.lo: UNICOREClient.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -MT libaccUNICORE_la-UNICOREClient.lo -MD -MP -MF $(DEPDIR)/libaccUNICORE_la-UNICOREClient.Tpo -c -o libaccUNICORE_la-UNICOREClient.lo `test -f 'UNICOREClient.cpp' || echo '$(srcdir)/'`UNICOREClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccUNICORE_la-UNICOREClient.Tpo $(DEPDIR)/libaccUNICORE_la-UNICOREClient.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UNICOREClient.cpp' object='libaccUNICORE_la-UNICOREClient.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccUNICORE_la-UNICOREClient.lo `test -f 'UNICOREClient.cpp' || echo '$(srcdir)/'`UNICOREClient.cpp libaccUNICORE_la-DescriptorsUNICORE.lo: DescriptorsUNICORE.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -MT libaccUNICORE_la-DescriptorsUNICORE.lo -MD -MP -MF $(DEPDIR)/libaccUNICORE_la-DescriptorsUNICORE.Tpo -c -o libaccUNICORE_la-DescriptorsUNICORE.lo `test -f 'DescriptorsUNICORE.cpp' || echo '$(srcdir)/'`DescriptorsUNICORE.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccUNICORE_la-DescriptorsUNICORE.Tpo $(DEPDIR)/libaccUNICORE_la-DescriptorsUNICORE.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DescriptorsUNICORE.cpp' object='libaccUNICORE_la-DescriptorsUNICORE.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccUNICORE_la-DescriptorsUNICORE.lo `test -f 'DescriptorsUNICORE.cpp' || echo '$(srcdir)/'`DescriptorsUNICORE.cpp libaccUNICORE_la-JobStateUNICORE.lo: JobStateUNICORE.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -MT libaccUNICORE_la-JobStateUNICORE.lo -MD -MP -MF $(DEPDIR)/libaccUNICORE_la-JobStateUNICORE.Tpo -c -o libaccUNICORE_la-JobStateUNICORE.lo `test -f 'JobStateUNICORE.cpp' || echo '$(srcdir)/'`JobStateUNICORE.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccUNICORE_la-JobStateUNICORE.Tpo $(DEPDIR)/libaccUNICORE_la-JobStateUNICORE.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateUNICORE.cpp' object='libaccUNICORE_la-JobStateUNICORE.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccUNICORE_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccUNICORE_la-JobStateUNICORE.lo `test -f 'JobStateUNICORE.cpp' || echo '$(srcdir)/'`JobStateUNICORE.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/JobStateUNICORE.h0000644000000000000000000000012212045235201023257 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3917 30 ctime=1513200660.368751996 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/JobStateUNICORE.h0000644000175000002070000000056712045235201023336 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBSTATEUNICORE_H__ #define __ARC_JOBSTATEUNICORE_H__ #include namespace Arc { class JobStateUNICORE : public JobState { public: JobStateUNICORE(const std::string& state) : JobState(state, &StateMap) {} static JobState::StateType StateMap(const std::string& state); }; } #endif // __ARC_JOBSTATEUNICORE_H__ nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/DescriptorsUNICORE.cpp0000644000000000000000000000012212675602216024415 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.3827 30 ctime=1513200660.367751983 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/DescriptorsUNICORE.cpp0000644000175000002070000000067512675602216024474 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "JobControllerPluginUNICORE.h" #include "SubmitterPluginUNICORE.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "UNICORE", "HED:SubmitterPlugin", NULL, 0, &Arc::SubmitterPluginUNICORE::Instance }, { "UNICORE", "HED:JobControllerPlugin", NULL, 0, &Arc::JobControllerPluginUNICORE::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/SubmitterPluginUNICORE.h0000644000000000000000000000012212052377001024704 xustar000000000000000027 mtime=1353317889.219113 25 atime=1513200574.3827 30 ctime=1513200660.362751922 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/SubmitterPluginUNICORE.h0000644000175000002070000000217112052377001024754 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMITTERPLUGINUNICORE_H__ #define __ARC_SUBMITTERPLUGINUNICORE_H__ #include #include #include #include namespace Arc { class SubmissionStatus; class SubmitterPluginUNICORE : public SubmitterPlugin { public: SubmitterPluginUNICORE(const UserConfig& usercfg, PluginArgument* parg) : SubmitterPlugin(usercfg, parg) { supportedInterfaces.push_back("org.ogf.bes"); } ~SubmitterPluginUNICORE() {} static Plugin* Instance(PluginArgument *arg) { SubmitterPluginArgument *subarg = dynamic_cast(arg); return subarg ? new SubmitterPluginUNICORE(*subarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual SubmissionStatus Submit(const std::list& jobdesc, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted); private: static Logger logger; }; } // namespace Arc #endif // __ARC_SUBMITTERPLUGINUNICORE_H__ nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/JobControllerPluginUNICORE.h0000644000000000000000000000012212051675267025522 xustar000000000000000027 mtime=1353153207.099019 25 atime=1513200574.3827 30 ctime=1513200660.364751947 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/JobControllerPluginUNICORE.h0000644000175000002070000000357212051675267025600 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBCONTROLLERUNICORE_H__ #define __ARC_JOBCONTROLLERUNICORE_H__ #include namespace Arc { class URL; class JobControllerPluginUNICORE : public JobControllerPlugin { public: JobControllerPluginUNICORE(const UserConfig& usercfg, PluginArgument* parg) : JobControllerPlugin(usercfg, parg) { supportedInterfaces.push_back("org.unicore.xbes"); } ~JobControllerPluginUNICORE() {} static Plugin* Instance(PluginArgument *arg) { JobControllerPluginArgument *jcarg = dynamic_cast(arg); return jcarg ? new JobControllerPluginUNICORE(*jcarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual void UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const { return false; } virtual bool GetJobDescription(const Job& job, std::string& desc_str) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBCONTROLLERUNICORE_H__ nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/SubmitterPluginUNICORE.cpp0000644000000000000000000000012112701730002025231 xustar000000000000000027 mtime=1460121602.429396 25 atime=1513200574.3877 29 ctime=1513200660.36175191 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/SubmitterPluginUNICORE.cpp0000644000175000002070000000331212701730002025300 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "SubmitterPluginUNICORE.h" #include "UNICOREClient.h" namespace Arc { Logger SubmitterPluginUNICORE::logger(Logger::getRootLogger(), "SubmitterPlugin.UNICORE"); bool SubmitterPluginUNICORE::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } SubmissionStatus SubmitterPluginUNICORE::Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted) { MCCConfig cfg; usercfg->ApplyToConfig(cfg); SubmissionStatus retval; for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { UNICOREClient uc(URL(et.ComputingEndpoint->URLString), cfg, usercfg->Timeout()); XMLNode id; if (!uc.submit(*it, id)){ retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; notSubmitted.push_back(&*it); continue; } Job j; id.GetDoc(j.IDFromEndpoint); j.JobID = (std::string)id["Address"]; AddJobDetails(*it, j); jc.addEntity(j); } return retval; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/UNICOREClient.cpp0000644000000000000000000000012213213445240023321 xustar000000000000000027 mtime=1512983200.815191 25 atime=1513200574.3877 30 ctime=1513200660.365751959 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/UNICOREClient.cpp0000644000175000002070000005646513213445240023410 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "UNICOREClient.h" namespace Arc { static const std::string BES_FACTORY_ACTIONS_BASE_URL("http://schemas.ggf.org/bes/2006/08/bes-factory/BESFactoryPortType/"); static const std::string BES_MANAGEMENT_ACTIONS_BASE_URL("http://schemas.ggf.org/bes/2006/08/bes-management/BESManagementPortType/"); static XMLNode find_xml_node(XMLNode node, const std::string& el_name, const std::string& attr_name, const std::string& attr_value) { if (MatchXMLName(node, el_name) && (((std::string)node.Attribute(attr_name)) == attr_value)) return node; XMLNode cn = node[el_name]; while (cn) { XMLNode fn = find_xml_node(cn, el_name, attr_name, attr_value); if (fn) return fn; cn = cn[1]; } return XMLNode(); } Logger UNICOREClient::logger(Logger::rootLogger, "UNICORE-Client"); static void set_UNICORE_namespaces(NS& ns) { ns["bes-factory"] = "http://schemas.ggf.org/bes/2006/08/bes-factory"; ns["wsa"] = "http://www.w3.org/2005/08/addressing"; ns["jsdl"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; ns["jsdl-posix"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix"; ns["jsdl-hpcpa"] = "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa"; ns["ns0"] = "urn:oasis:names:tc:SAML:2.0:assertion"; ns["rp"] = "http://docs.oasis-open.org/wsrf/rp-2"; ns["u6"] = "http://www.unicore.eu/unicore6"; ns["jms"] = "http://unigrids.org/2006/04/services/jms"; } static void set_bes_factory_action(SOAPEnvelope& soap, const char *op) { WSAHeader(soap).Action(BES_FACTORY_ACTIONS_BASE_URL + op); } // static void set_bes_management_action(SOAPEnvelope& soap,const char* op) { // WSAHeader(soap).Action(BES_MANAGEMENT_ACTIONS_BASE_URL+op); // } UNICOREClient::UNICOREClient(const URL& url, const MCCConfig& cfg, int timeout) : client_config(cfg), client_loader(NULL), client(NULL), client_entry(NULL) { logger.msg(INFO, "Creating a UNICORE client"); MCCConfig client_cfg(cfg); proxyPath = cfg.proxy; if (false) { //future test if proxy should be used or not client_cfg.AddProxy(""); } client = new ClientSOAP(client_cfg, url, timeout); rurl = url; set_UNICORE_namespaces(unicore_ns); } UNICOREClient::~UNICOREClient() { if (client_loader) delete client_loader; if (client) delete client; } bool UNICOREClient::submit(const JobDescription& jobdesc, XMLNode& id, bool delegate) { std::string faultstring; logger.msg(INFO, "Creating and sending request"); // Create job request /* bes-factory:CreateActivity bes-factory:ActivityDocument jsdl:JobDefinition */ PayloadSOAP req(unicore_ns); XMLNode op = req.NewChild("bes-factory:CreateActivity"); XMLNode act_doc = op.NewChild("bes-factory:ActivityDocument"); set_bes_factory_action(req, "CreateActivity"); WSAHeader(req).To(rurl.str()); //XMLNode proxyHeader = req.Header().NewChild("u6:Proxy"); if (true) { std::string pem_str; std::ifstream proxy_file(proxyPath.c_str()/*, ifstream::in*/); std::getline(proxy_file, pem_str, 0); req.Header().NewChild("u6:Proxy") = pem_str; //std::cout << "\n----\n" << "pem_str = " << pem_str << "\n----\n"; //debug code, remove! } //std::string jsdl_str; //std::getline(jsdl_file, jsdl_str, 0); std::string jsdl_str; if (!jobdesc.UnParse(jsdl_str, "nordugrid:jsdl")) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format", "nordugrid:jsdl"); return false; } XMLNode jsdl_doc = act_doc.NewChild(XMLNode(jsdl_str)); //std::cout << "\n----\n" << jsdl_str << "\n----\n"; //Debug line to verify the activity document jsdl_doc.Namespaces(unicore_ns); // Unify namespaces PayloadSOAP *resp = NULL; XMLNode ds = act_doc["jsdl:JobDefinition"]["jsdl:JobDescription"]["jsdl:DataStaging"]; for (; (bool)ds; ds = ds[1]) { // FilesystemName - ignore // CreationFlag - ignore // DeleteOnTermination - ignore XMLNode source = ds["jsdl:Source"]; XMLNode target = ds["jsdl:Target"]; if ((bool)source) { std::string s_name = ds["jsdl:FileName"]; if (!s_name.empty()) { XMLNode x_url = source["jsdl:URI"]; std::string s_url = x_url; if (s_url.empty()) s_url = "./" + s_name; else { URL u_url(s_url); if (!u_url) { if (s_url[0] != '/') s_url = "./" + s_url; } else { if (u_url.Protocol() == "file") { s_url = u_url.Path(); if (s_url[0] != '/') s_url = "./" + s_url; } else s_url.resize(0); } } if (!s_url.empty()) x_url.Destroy(); } } } act_doc.GetXML(jsdl_str); logger.msg(DEBUG, "Job description to be sent: %s", jsdl_str); // Try to figure out which credentials are used // TODO: Method used is unstable beacuse it assumes some predefined // structure of configuration file. Maybe there should be some // special methods of ClientTCP class introduced. std::string deleg_cert; std::string deleg_key; if (delegate) { client->Load(); // Make sure chain is ready XMLNode tls_cfg = find_xml_node((client->GetConfig())["Chain"], "Component", "name", "tls.client"); if (tls_cfg) { deleg_cert = (std::string)(tls_cfg["ProxyPath"]); if (deleg_cert.empty()) { deleg_cert = (std::string)(tls_cfg["CertificatePath"]); deleg_key = (std::string)(tls_cfg["KeyPath"]); } else deleg_key = deleg_cert; } if (deleg_cert.empty() || deleg_key.empty()) { logger.msg(ERROR, "Failed to find delegation credentials in " "client configuration"); return false; } } // Send job request + delegation if (client) { if (delegate) { DelegationProviderSOAP deleg(deleg_cert, deleg_key); logger.msg(INFO, "Initiating delegation procedure"); if (!deleg.DelegateCredentialsInit(*(client->GetEntry()), &(client->GetContext()))) { logger.msg(ERROR, "Failed to initiate delegation"); return false; } deleg.DelegatedToken(op); } MCC_Status status = client->process("http://schemas.ggf.org/bes/2006/08/bes-factory/" "BESFactoryPortType/CreateActivity", &req, &resp); if (!status) { logger.msg(ERROR, "Submission request failed"); return false; } if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); return false; } } else if (client_entry) { Message reqmsg; Message repmsg; MessageAttributes attributes_req; attributes_req.set("SOAP:ACTION", "http://schemas.ggf.org/bes/2006/08/" "bes-factory/BESFactoryPortType/CreateActivity"); MessageAttributes attributes_rep; MessageContext context; if (delegate) { DelegationProviderSOAP deleg(deleg_cert, deleg_key); logger.msg(INFO, "Initiating delegation procedure"); if (!deleg.DelegateCredentialsInit(*client_entry, &context)) { logger.msg(ERROR, "Failed to initiate delegation"); return false; } deleg.DelegatedToken(op); } reqmsg.Payload(&req); reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); MCC_Status status = client_entry->process(reqmsg, repmsg); if (!status) { logger.msg(ERROR, "Submission request failed"); return false; } logger.msg(INFO, "Submission request succeed"); if (repmsg.Payload() == NULL) { logger.msg(VERBOSE, "There was no response to a submission request"); return false; } try { resp = dynamic_cast(repmsg.Payload()); } catch (std::exception&) {} if (resp == NULL) { logger.msg(ERROR, "A response to a submission request was not " "a SOAP message"); delete repmsg.Payload(); return false; } } else { logger.msg(ERROR, "There is no connection chain configured"); return false; } //XMLNode id; SOAPFault fs(*resp); if (!fs) { (*resp)["CreateActivityResponse"]["ActivityIdentifier"].New(id); //id.GetDoc(jobid); //std::cout << "\n---\nActivityIdentifier:\n" << (std::string)((*resp)["CreateActivityResponse"]["ActivityIdentifier"]) << "\n---\n";//debug code delete resp; UNICOREClient luc((std::string)id["Address"], client_config); //local unicore client //std::cout << "\n---\nid element containing (?) Job Address:\n" << (std::string)id << "\n---\n";//debug code return luc.uasStartJob(); //return true; } else { faultstring = fs.Reason(); std::string s; resp->GetXML(s); delete resp; logger.msg(DEBUG, "Submission returned failure: %s", s); logger.msg(ERROR, "Submission failed, service returned: %s", faultstring); return false; } } bool UNICOREClient::uasStartJob(){ std::string state, faultstring; logger.msg(INFO, "Creating and sending a start job request"); PayloadSOAP req(unicore_ns); XMLNode SOAPMethod = req.NewChild("jms:Start"); WSAHeader(req).To(rurl.str()); WSAHeader(req).Action("http://schemas.ggf.org/bes/2006/08/bes-activity/BESActivityPortType/StartRequest"); // Send status request PayloadSOAP *resp = NULL; if (client) { MCC_Status status = client->process("http://schemas.ggf.org/bes/2006/08/bes-activity/BESActivityPortType/StartRequest", &req, &resp); if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); return false; } } else if (client_entry) { Message reqmsg; Message repmsg; MessageAttributes attributes_req; attributes_req.set("SOAP:ACTION", "http://schemas.ggf.org/bes/2006/08/bes-activity/BESActivityPortType/StartRequest"); MessageAttributes attributes_rep; MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); MCC_Status status = client_entry->process(reqmsg, repmsg); if (!status) { logger.msg(ERROR, "A start job request failed"); return false; } logger.msg(INFO, "A start job request succeeded"); if (repmsg.Payload() == NULL) { logger.msg(VERBOSE, "There was no response to a start job request"); return false; } try { resp = dynamic_cast(repmsg.Payload()); } catch (std::exception&) {} if (resp == NULL) { logger.msg(ERROR, "The response of a start job request was " "not a SOAP message"); delete repmsg.Payload(); return false; } } else { logger.msg(ERROR, "There is no connection chain configured"); return false; } SOAPFault fs(*resp); if (!fs) { return true; } else { faultstring = fs.Reason(); std::string s; resp->GetXML(s); delete resp; logger.msg(DEBUG, "Submission returned failure: %s", s); logger.msg(ERROR, "Submission failed, service returned: %s", faultstring); return false; } } bool UNICOREClient::stat(const std::string& jobid, std::string& status) { std::string state, substate, faultstring; logger.msg(INFO, "Creating and sending a status request"); PayloadSOAP req(unicore_ns); XMLNode jobref = req.NewChild("bes-factory:GetActivityStatuses"). NewChild(XMLNode(jobid)); set_bes_factory_action(req, "GetActivityStatuses"); WSAHeader(req).To(rurl.str()); // Send status request PayloadSOAP *resp = NULL; if (client) { MCC_Status status = client->process("http://schemas.ggf.org/bes/2006/08/bes-factory/" "BESFactoryPortType/GetActivityStatuses", &req, &resp); if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); return false; } } else if (client_entry) { Message reqmsg; Message repmsg; MessageAttributes attributes_req; attributes_req.set("SOAP:ACTION", "http://schemas.ggf.org/bes/2006/08/" "bes-factory/BESFactoryPortType/GetActivityStatuses"); MessageAttributes attributes_rep; MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); MCC_Status status = client_entry->process(reqmsg, repmsg); if (!status) { logger.msg(ERROR, "A status request failed"); return false; } logger.msg(INFO, "A status request succeed"); if (repmsg.Payload() == NULL) { logger.msg(VERBOSE, "There was no response to a status request"); return false; } try { resp = dynamic_cast(repmsg.Payload()); } catch (std::exception&) {} if (resp == NULL) { logger.msg(ERROR, "The response of a status request was not a SOAP message"); delete repmsg.Payload(); return false; } } else { logger.msg(ERROR, "There is no connection chain configured"); return false; } XMLNode st, fs; (*resp)["GetActivityStatusesResponse"]["Response"] ["ActivityStatus"].New(st); state = (std::string)st.Attribute("state"); XMLNode sst; (*resp)["GetActivityStatusesResponse"]["Response"] ["ActivityStatus"]["state"].New(sst); substate = (std::string)sst; (*resp)["Fault"]["faultstring"].New(fs); faultstring = (std::string)fs; delete resp; if (faultstring != "") { logger.msg(ERROR, faultstring); return false; } else if (state == "") { logger.msg(ERROR, "The job status could not be retrieved"); return false; } else { status = state + "/" + substate; return true; } } bool UNICOREClient::listTargetSystemFactories(std::list< std::pair >& tsf) { logger.msg(INFO, "Creating and sending an index service query"); PayloadSOAP req(unicore_ns); XMLNode query = req.NewChild("rp:QueryResourceProperties"); XMLNode exp = query.NewChild("rp:QueryExpression"); exp.NewAttribute("Dialect") = "http://www.w3.org/TR/1999/REC-xpath-19991116"; exp = "//*"; PayloadSOAP *resp = NULL; client->process("http://docs.oasis-open.org/wsrf/rpw-2" "/QueryResourceProperties/QueryResourcePropertiesRequest", &req, &resp); if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); return false; } XMLNodeList memberServices = resp->Body().Path("QueryResourcePropertiesResponse/Entry/MemberServiceEPR"); for (XMLNodeList::iterator it = memberServices.begin(); it != memberServices.end(); it++) { if (((std::string)(*it)["Metadata"]["InterfaceName"]).find("BESFactoryPortType") != std::string::npos) { // it.Metadata.InterfaceName should contain 'BESFactoryPortType'... tsf.push_back(std::pair(URL((std::string)(*it)["Address"]), COMPUTING)); } } return true; } bool UNICOREClient::sstat(std::string& status) { std::string state, faultstring; logger.msg(INFO, "Creating and sending a service status request"); PayloadSOAP req(unicore_ns); XMLNode jobref = req.NewChild("bes-factory:GetFactoryAttributesDocument"); set_bes_factory_action(req, "GetFactoryAttributesDocument"); WSAHeader(req).To(rurl.str()); // Send status request PayloadSOAP *resp = NULL; if (client) { MCC_Status status = client->process("http://schemas.ggf.org/bes/2006/08/bes-factory/" "BESFactoryPortType/GetFactoryAttributesDocument", &req, &resp); if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); return false; } } else if (client_entry) { Message reqmsg; Message repmsg; MessageAttributes attributes_req; attributes_req.set("SOAP:ACTION", "http://schemas.ggf.org/bes/2006/08/" "bes-factory/BESFactoryPortType/" "GetFactoryAttributesDocument"); MessageAttributes attributes_rep; MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); MCC_Status status = client_entry->process(reqmsg, repmsg); if (!status) { logger.msg(ERROR, "A service status request failed"); return false; } logger.msg(INFO, "A service status request succeeded"); if (repmsg.Payload() == NULL) { logger.msg(VERBOSE, "There was no response to a service status request"); return false; } try { resp = dynamic_cast(repmsg.Payload()); } catch (std::exception&) {} if (resp == NULL) { logger.msg(ERROR, "The response of a service status request was " "not a SOAP message"); delete repmsg.Payload(); return false; } } else { logger.msg(ERROR, "There is no connection chain configured"); return false; } XMLNode st; logger.msg(DEBUG, "Response:\n%s", (std::string)(*resp)); (*resp)["GetFactoryAttributesDocumentResponse"] ["FactoryResourceAttributesDocument"].New(st); st.GetDoc(state, true); delete resp; if (state == "") { logger.msg(ERROR, "The service status could not be retrieved"); return false; } else { status = state; return true; } } bool UNICOREClient::kill(const std::string& jobid) { std::string result, faultstring; logger.msg(INFO, "Creating and sending request to terminate a job"); PayloadSOAP req(unicore_ns); XMLNode jobref = req.NewChild("bes-factory:TerminateActivities"). NewChild(XMLNode(jobid)); set_bes_factory_action(req, "TerminateActivities"); WSAHeader(req).To(rurl.str()); // Send kill request PayloadSOAP *resp = NULL; if (client) { MCC_Status status = client->process("http://schemas.ggf.org/bes/2006/08/bes-factory/" "BESFactoryPortType/TerminateActivities", &req, &resp); if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); return false; } } else if (client_entry) { Message reqmsg; Message repmsg; MessageAttributes attributes_req; attributes_req.set("SOAP:ACTION", "http://schemas.ggf.org/bes/2006/08/" "bes-factory/BESFactoryPortType/TerminateActivities"); MessageAttributes attributes_rep; MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); MCC_Status status = client_entry->process(reqmsg, repmsg); if (!status) { logger.msg(ERROR, "A job termination request failed"); return false; } logger.msg(INFO, "A job termination request succeed"); if (repmsg.Payload() == NULL) { logger.msg(VERBOSE, "There was no response to a job termination request"); return false; } try { resp = dynamic_cast(repmsg.Payload()); } catch (std::exception&) {} if (resp == NULL) { logger.msg(ERROR, "The response of a job termination request was " "not a SOAP message"); delete repmsg.Payload(); return false; } } else { logger.msg(ERROR, "There is no connection chain configured"); return false; } XMLNode cancelled, fs; (*resp)["TerminateActivitiesResponse"] ["Response"]["Cancelled"].New(cancelled); result = (std::string)cancelled; (*resp)["Fault"]["faultstring"].New(fs); faultstring = (std::string)fs; delete resp; if (faultstring != "") { logger.msg(ERROR, faultstring); return false; } if (result != "true") { logger.msg(ERROR, "Job termination failed"); return false; } return true; } bool UNICOREClient::clean(const std::string& jobid) { std::string result, faultstring; logger.msg(INFO, "Creating and sending request to terminate a job"); PayloadSOAP req(unicore_ns); XMLNode op = req.NewChild("a-rex:ChangeActivityStatus"); XMLNode jobref = op.NewChild(XMLNode(jobid)); XMLNode jobstate = op.NewChild("a-rex:NewStatus"); jobstate.NewAttribute("bes-factory:state") = "Finished"; jobstate.NewChild("a-rex:state") = "Deleted"; // Send clean request PayloadSOAP *resp = NULL; if (client) { MCC_Status status = client->process("", &req, &resp); if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); return false; } } else if (client_entry) { Message reqmsg; Message repmsg; MessageAttributes attributes_req; MessageAttributes attributes_rep; MessageContext context; reqmsg.Payload(&req); reqmsg.Attributes(&attributes_req); reqmsg.Context(&context); repmsg.Attributes(&attributes_rep); repmsg.Context(&context); MCC_Status status = client_entry->process(reqmsg, repmsg); if (!status) { logger.msg(ERROR, "A job cleaning request failed"); return false; } logger.msg(INFO, "A job cleaning request succeed"); if (repmsg.Payload() == NULL) { logger.msg(VERBOSE, "There was no response to a job cleaning request"); return false; } try { resp = dynamic_cast(repmsg.Payload()); } catch (std::exception&) {} if (resp == NULL) { logger.msg(ERROR, "The response of a job cleaning request was not " "a SOAP message"); delete repmsg.Payload(); return false; } } else { logger.msg(ERROR, "There is no connection chain configured"); return false; } if (!((*resp)["ChangeActivityStatusResponse"])) { XMLNode fs; (*resp)["Fault"]["faultstring"].New(fs); faultstring = (std::string)fs; if (faultstring != "") { delete resp; logger.msg(ERROR, faultstring); return false; } if (result != "true") { delete resp; logger.msg(ERROR, "Job termination failed"); return false; } } delete resp; return true; } } nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/UNICOREClient.h0000644000000000000000000000012212045235201022762 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3847 30 ctime=1513200660.366751971 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/UNICOREClient.h0000644000175000002070000001025512045235201023034 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __UNICORE_CLIENT__ #define __UNICORE_CLIENT__ #include #include #include #include #include namespace Arc { class ClientSOAP; class Config; class Loader; //class Logger; class MCC; class MCCConfig; //! A client class for the UNICORE service. /*! This class implements client functionality for the UNICORE service. It provides methods for three operations on an UNICORE service: - Job submission - Job status queries - Job termination Internal requirement: for correct behaviour instances of this class should not further instanciate it. */ class UNICOREClient { public: //! The constructor for the UNICOREClient class. /*! This is the constructor for the UNICOREClient class. It creates an UNICORE client that corresponds to a specific UNICORE service. @param url The URL of the UNICORE service. @param cfg An MCC configuration object. @param timeout Timeout of the connection to the client. The default value is 20000 milliseconds based on this document: http://www.unicore.eu/documentation/manuals/unicore6/files/RichClient.pdf */ UNICOREClient(const URL& url, const MCCConfig& cfg, int timeout=20); //! The destructor. ~UNICOREClient(); //! Submit a job. /*! This method submits a job to the UNICORE service corresponding to this client instance. @param jsdl_file An input stream from which the JSDL file for the job can be read. @param jobid The Job ID of the the submitted job. @return true on success */ bool submit(const JobDescription& jobdesc, XMLNode& id, bool delegate = false); //! Query the status of a job. /*! This method queries the UNICORE service about the status of a job. @param jobid The Job ID of the job. @param status The status of the job. @return true on success */ bool stat(const std::string& jobid, std::string& status); //! Terminates a job. /*! This method sends a request to the UNICORE service to terminate a job. @param jobid The Job ID of the job to terminate. @return true on success */ bool kill(const std::string& jobid); //! Removes a job. /*! This method sends a request to the UNICORE service to remove a job from it's pool. If job is running it will be killed by service as well. @param jobid The Job ID of the job to remove. @return true on success */ bool clean(const std::string& jobid); //! Query the status of a service. /*! This method queries the UNICORE service about it's status. @param status The XML document representing status of the service. @return true on success */ bool sstat(std::string& status); /*! This method queries the UNICORE registry about BES compliant execution services. @param tsf A list of different execution services returned from the registry. This variable will be overwritten by the method. @return true on success */ bool listTargetSystemFactories(std::list< std::pair >& tsf); ClientSOAP* SOAP(void) { return client; } private: bool uasStartJob(); //! The configuration. /*! A configuration object containing information about how to set up this UNICORE client. */ MCCConfig client_config; //! The loader. /*! A loader object that loads and connects the appropriate components according to the configuration object. */ Loader *client_loader; ClientSOAP *client; //! The entry into the client message chain. /*! This is a pointer to the message chain components (MCC) where messages sent from this client enters the message chain. */ MCC *client_entry; //! Namespaces. /*! A map containing namespaces. */ NS unicore_ns; URL rurl; std::string proxyPath; //! A logger for the UNICORE client. /*! This is a logger to which all logging messages from the UNICORE client are sent. */ static Logger logger; }; } #endif nordugrid-arc-5.4.2/src/hed/acc/UNICORE/PaxHeaders.7502/JobControllerPluginUNICORE.cpp0000644000000000000000000000012212701730002026032 xustar000000000000000027 mtime=1460121602.429396 25 atime=1513200574.3857 30 ctime=1513200660.363751935 nordugrid-arc-5.4.2/src/hed/acc/UNICORE/JobControllerPluginUNICORE.cpp0000644000175000002070000001503112701730002026101 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "UNICOREClient.h" #include "JobStateUNICORE.h" #include "JobControllerPluginUNICORE.h" namespace Arc { Logger JobControllerPluginUNICORE::logger(Logger::getRootLogger(), "JobControllerPlugin.UNICORE"); bool JobControllerPluginUNICORE::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } void JobControllerPluginUNICORE::UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); for (std::list::iterator it = jobs.begin(); it != jobs.end(); ++it) { URL url((*it)->JobStatusURL); XMLNode id((*it)->IDFromEndpoint); ClientSOAP client(cfg, url, usercfg->Timeout()); logger.msg(INFO, "Creating and sending a status request"); NS ns; ns["bes-factory"] = "http://schemas.ggf.org/bes/2006/08/bes-factory"; ns["wsa"] = "http://www.w3.org/2005/08/addressing"; ns["jsdl"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; PayloadSOAP req(ns); XMLNode jobref = req.NewChild("bes-factory:GetActivityStatuses"). NewChild(id); WSAHeader(req).Action("http://schemas.ggf.org/bes/2006/08/bes-factory/BESFactoryPortType/GetActivityStatuses"); WSAHeader(req).To(url.str()); // Send status request PayloadSOAP *resp = NULL; MCC_Status status = client.process("http://schemas.ggf.org/bes/2006/08/bes-factory/" "BESFactoryPortType/GetActivityStatuses", &req, &resp); if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); IDsNotProcessed.push_back((*it)->JobID); continue; } XMLNode st, fs; (*resp)["GetActivityStatusesResponse"]["Response"] ["ActivityStatus"].New(st); std::string state = (std::string)st.Attribute("state"); (*resp)["Fault"]["faultstring"].New(fs); std::string faultstring = (std::string)fs; // delete resp; if (!faultstring.empty()) { logger.msg(ERROR, faultstring); IDsNotProcessed.push_back((*it)->JobID); continue; } if (state.empty()) { logger.msg(ERROR, "Failed retrieving job status information"); IDsNotProcessed.push_back((*it)->JobID); continue; } IDsProcessed.push_back((*it)->JobID); (*it)->State = JobStateUNICORE(state); } } bool JobControllerPluginUNICORE::CleanJobs(const std::list& jobs, std::list&, std::list& IDsNotProcessed, bool) const { // MCCConfig cfg; // usercfg->ApplyToConfig(cfg); // PathIterator pi(job.JobID.Path(), true); // URL url(job.JobID); // url.ChangePath(*pi); // AREXClient ac(url, cfg); // NS ns; // ns["a-rex"] = "http://www.nordugrid.org/schemas/a-rex"; // ns["bes-factory"] = "http://schemas.ggf.org/bes/2006/08/bes-factory"; // ns["wsa"] = "http://www.w3.org/2005/08/addressing"; // ns["jsdl"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; // ns["jsdl-posix"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix"; // ns["jsdl-arc"] = "http://www.nordugrid.org/ws/schemas/jsdl-arc"; // ns["jsdl-hpcpa"] = "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa"; // XMLNode id(ns, "ActivityIdentifier"); // id.NewChild("wsa:Address") = url.str(); // id.NewChild("wsa:ReferenceParameters").NewChild("a-rex:JobID") = pi.Rest(); // std::string idstr; // id.GetXML(idstr); // return ac.clean(idstr); for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(INFO, "Cleaning of UNICORE jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginUNICORE::CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { // MCCConfig cfg; // usercfg->ApplyToConfig(cfg); // PathIterator pi(job.JobID.Path(), true); // URL url(job.JobID); // url.ChangePath(*pi); // AREXClient ac(url, cfg); // NS ns; // ns["a-rex"] = "http://www.nordugrid.org/schemas/a-rex"; // ns["bes-factory"] = "http://schemas.ggf.org/bes/2006/08/bes-factory"; // ns["wsa"] = "http://www.w3.org/2005/08/addressing"; // ns["jsdl"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; // ns["jsdl-posix"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix"; // ns["jsdl-arc"] = "http://www.nordugrid.org/ws/schemas/jsdl-arc"; // ns["jsdl-hpcpa"] = "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa"; // XMLNode id(ns, "ActivityIdentifier"); // id.NewChild("wsa:Address") = url.str(); // id.NewChild("wsa:ReferenceParameters").NewChild("a-rex:JobID") = pi.Rest(); // std::string idstr; // id.GetXML(idstr); // return ac.kill(idstr); for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(INFO, "Canceling of UNICORE jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginUNICORE::RenewJobs(const std::list& jobs, std::list&, std::list& IDsNotProcessed, bool) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(ERROR, "Renewal of UNICORE jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginUNICORE::ResumeJobs(const std::list& jobs, std::list&, std::list& IDsNotProcessed, bool) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(ERROR, "Resumation of UNICORE jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginUNICORE::GetJobDescription(const Job& /* job */, std::string& /* desc_str */) const { return false; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/PythonBroker0000644000000000000000000000013213214316024021616 xustar000000000000000030 mtime=1513200660.341751666 30 atime=1513200668.721854157 30 ctime=1513200660.341751666 nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/0000755000175000002070000000000013214316024021741 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712107135420023732 xustar000000000000000027 mtime=1360837392.184106 30 atime=1513200594.487946248 30 ctime=1513200660.337751616 nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/Makefile.am0000644000175000002070000000126012107135420023773 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccPythonBroker.la libaccPythonBroker_la_SOURCES = PythonBrokerPlugin.cpp PythonBrokerPlugin.h libaccPythonBroker_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(PYTHON_CFLAGS) $(AM_CXXFLAGS) libaccPythonBroker_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(PYTHON_LIBS) libaccPythonBroker_la_LDFLAGS = -no-undefined -avoid-version -module exampledir = $(pkgdatadir)/examples/PythonBroker example_DATA = SampleBroker.py ACIXBroker.py EXTRA_DIST = $(example_DATA) nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315722023743 xustar000000000000000030 mtime=1513200594.532946798 29 atime=1513200648.87561143 30 ctime=1513200660.338751629 nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/Makefile.in0000644000175000002070000006453713214315722024031 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/PythonBroker DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(exampledir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccPythonBroker_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libaccPythonBroker_la_OBJECTS = \ libaccPythonBroker_la-PythonBrokerPlugin.lo libaccPythonBroker_la_OBJECTS = $(am_libaccPythonBroker_la_OBJECTS) libaccPythonBroker_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccPythonBroker_la_CXXFLAGS) $(CXXFLAGS) \ $(libaccPythonBroker_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccPythonBroker_la_SOURCES) DIST_SOURCES = $(libaccPythonBroker_la_SOURCES) DATA = $(example_DATA) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccPythonBroker.la libaccPythonBroker_la_SOURCES = PythonBrokerPlugin.cpp PythonBrokerPlugin.h libaccPythonBroker_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(PYTHON_CFLAGS) $(AM_CXXFLAGS) libaccPythonBroker_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(PYTHON_LIBS) libaccPythonBroker_la_LDFLAGS = -no-undefined -avoid-version -module exampledir = $(pkgdatadir)/examples/PythonBroker example_DATA = SampleBroker.py ACIXBroker.py EXTRA_DIST = $(example_DATA) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/PythonBroker/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/PythonBroker/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccPythonBroker.la: $(libaccPythonBroker_la_OBJECTS) $(libaccPythonBroker_la_DEPENDENCIES) $(libaccPythonBroker_la_LINK) -rpath $(pkglibdir) $(libaccPythonBroker_la_OBJECTS) $(libaccPythonBroker_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccPythonBroker_la-PythonBrokerPlugin.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccPythonBroker_la-PythonBrokerPlugin.lo: PythonBrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccPythonBroker_la_CXXFLAGS) $(CXXFLAGS) -MT libaccPythonBroker_la-PythonBrokerPlugin.lo -MD -MP -MF $(DEPDIR)/libaccPythonBroker_la-PythonBrokerPlugin.Tpo -c -o libaccPythonBroker_la-PythonBrokerPlugin.lo `test -f 'PythonBrokerPlugin.cpp' || echo '$(srcdir)/'`PythonBrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccPythonBroker_la-PythonBrokerPlugin.Tpo $(DEPDIR)/libaccPythonBroker_la-PythonBrokerPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PythonBrokerPlugin.cpp' object='libaccPythonBroker_la-PythonBrokerPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccPythonBroker_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccPythonBroker_la-PythonBrokerPlugin.lo `test -f 'PythonBrokerPlugin.cpp' || echo '$(srcdir)/'`PythonBrokerPlugin.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exampleDATA install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-pkglibLTLIBRARIES install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am \ uninstall-exampleDATA uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PaxHeaders.7502/PythonBrokerPlugin.cpp0000644000000000000000000000012212675602216026216 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.4237 30 ctime=1513200660.339751641 nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp0000644000175000002070000002752112675602216026274 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif // Include order is like this to avoid blind redefinition of _POSIX_C_SOURCE // in Python.h. In features.h included from ExecutionTarget.h there is // protection against redefinition. #include "PythonBrokerPlugin.h" #include extern "C" { typedef struct { PyObject_HEAD void *ptr; // There are more members in this stuct, but they are not needed here... } PySwigObject; } namespace Arc { Logger PythonBrokerPlugin::logger(Logger::getRootLogger(), "Broker.PythonBrokerPlugin"); PyThreadState* PythonBrokerPlugin::tstate = NULL; int PythonBrokerPlugin::refcount = 0; Glib::Mutex PythonBrokerPlugin::lock; class PythonLock { public: PythonLock() : gstate(PyGILState_Ensure()) {} ~PythonLock() { PyGILState_Release(gstate); } private: PyGILState_STATE gstate; }; class PyObjectP { public: PyObjectP(PyObject *obj) : obj(obj) {} ~PyObjectP() { if (obj) { Py_DECREF(obj); } } operator bool() { return obj; } bool operator!() { return !obj; } operator PyObject*() { return obj; } private: PyObject *obj; }; Plugin* PythonBrokerPlugin::Instance(PluginArgument *arg) { BrokerPluginArgument *brokerarg = dynamic_cast(arg); if (!brokerarg) return NULL; lock.lock(); // Initialize the Python Interpreter if (!Py_IsInitialized()) { #ifdef HAVE_PYTHON_INITIALIZE_EX Py_InitializeEx(0); // Python does not handle signals #endif PyEval_InitThreads(); // Main thread created and lock acquired tstate = PyThreadState_Get(); // Get current thread if (!tstate) { logger.msg(ERROR, "Failed to initialize main Python thread"); return NULL; } } else { if (!tstate) { logger.msg(ERROR, "Main Python thread was not initialized"); return NULL; } PyEval_AcquireThread(tstate); } refcount++; lock.unlock(); logger.msg(DEBUG, "Loading Python broker (%i)", refcount); PythonBrokerPlugin *broker = new PythonBrokerPlugin(brokerarg); PyEval_ReleaseThread(tstate); // Release current thread if (!broker->valid) { delete broker; return NULL; } return broker; } PythonBrokerPlugin::PythonBrokerPlugin(BrokerPluginArgument* parg) : BrokerPlugin(parg), arc_module(NULL), arc_userconfig_klass(NULL), arc_jobrepr_klass(NULL), arc_xtarget_klass(NULL), module(NULL), klass(NULL), object(NULL), valid(false) { if (!tstate) { logger.msg(ERROR, "Main Python thread is not initialized"); return; } logger.msg(VERBOSE, "PythonBroker init"); std::string args = uc.Broker().second; std::string::size_type pos = args.find(':'); if (pos != std::string::npos) args.resize(pos); pos = args.rfind('.'); if (pos == std::string::npos) { logger.msg(ERROR, "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker.MyBroker"); return; } std::string module_name = args.substr(0, pos); std::string class_name = args.substr(pos + 1); logger.msg(VERBOSE, "Class name: %s", class_name); logger.msg(VERBOSE, "Module name: %s", module_name); // Import arc python module #if PY_MAJOR_VERSION >= 3 PyObjectP py_arc_module_name = PyUnicode_FromString("arc"); #else PyObjectP py_arc_module_name = PyString_FromString("arc"); #endif if (!py_arc_module_name) { logger.msg(ERROR, "Cannot convert ARC module name to Python string"); if (PyErr_Occurred()) PyErr_Print(); return; } arc_module = PyImport_Import(py_arc_module_name); if (!arc_module) { logger.msg(ERROR, "Cannot import ARC module"); if (PyErr_Occurred()) PyErr_Print(); return; } // Get dictionary of arc module content (borrowed reference) PyObject *arc_dict = PyModule_GetDict(arc_module); if (!arc_dict) { logger.msg(ERROR, "Cannot get dictionary of ARC module"); if (PyErr_Occurred()) PyErr_Print(); return; } // Get the Config class (borrowed reference) arc_userconfig_klass = PyDict_GetItemString(arc_dict, "UserConfig"); if (!arc_userconfig_klass) { logger.msg(ERROR, "Cannot find ARC UserConfig class"); if (PyErr_Occurred()) PyErr_Print(); return; } // check is it really a class if (!PyCallable_Check(arc_userconfig_klass)) { logger.msg(ERROR, "UserConfig class is not an object"); return; } // Get the JobDescription class (borrowed reference) arc_jobrepr_klass = PyDict_GetItemString(arc_dict, "JobDescription"); if (!arc_jobrepr_klass) { logger.msg(ERROR, "Cannot find ARC JobDescription class"); if (PyErr_Occurred()) PyErr_Print(); return; } // check is it really a class if (!PyCallable_Check(arc_jobrepr_klass)) { logger.msg(ERROR, "JobDescription class is not an object"); return; } // Get the ExecutionTarget class (borrowed reference) arc_xtarget_klass = PyDict_GetItemString(arc_dict, "ExecutionTarget"); if (!arc_xtarget_klass) { logger.msg(ERROR, "Cannot find ARC ExecutionTarget class"); if (PyErr_Occurred()) PyErr_Print(); return; } // check is it really a class if (!PyCallable_Check(arc_xtarget_klass)) { logger.msg(ERROR, "ExecutionTarget class is not an object"); return; } // Import custom broker module #if PY_MAJOR_VERSION >= 3 PyObjectP py_module_name = PyUnicode_FromString(module_name.c_str()); #else PyObjectP py_module_name = PyString_FromString(module_name.c_str()); #endif if (!py_module_name) { logger.msg(ERROR, "Cannot convert module name to Python string"); if (PyErr_Occurred()) PyErr_Print(); return; } module = PyImport_Import(py_module_name); if (!module) { logger.msg(ERROR, "Cannot import module"); if (PyErr_Occurred()) PyErr_Print(); return; } // Get dictionary of module content (borrowed reference) PyObject *dict = PyModule_GetDict(module); if (!dict) { logger.msg(ERROR, "Cannot get dictionary of custom broker module"); if (PyErr_Occurred()) PyErr_Print(); return; } // Get the class (borrowed reference) klass = PyDict_GetItemString(dict, (char*)class_name.c_str()); if (!klass) { logger.msg(ERROR, "Cannot find custom broker class"); if (PyErr_Occurred()) PyErr_Print(); return; } // check is it really a class if (!PyCallable_Check(klass)) { logger.msg(ERROR, "%s class is not an object", class_name); return; } PyObjectP ucarg = Py_BuildValue("(l)", (long int)&uc); if (!ucarg) { logger.msg(ERROR, "Cannot create UserConfig argument"); if (PyErr_Occurred()) PyErr_Print(); return; } PyObject *py_uc = PyObject_CallObject(arc_userconfig_klass, ucarg); if (!py_uc) { logger.msg(ERROR, "Cannot convert UserConfig to Python object"); if (PyErr_Occurred()) PyErr_Print(); return; } PyObjectP arg = Py_BuildValue("(O)", py_uc); if (!arg) { logger.msg(ERROR, "Cannot create argument of the constructor"); if (PyErr_Occurred()) PyErr_Print(); return; } // create instance of class object = PyObject_CallObject(klass, arg); if (!object) { logger.msg(ERROR, "Cannot create instance of Python class"); if (PyErr_Occurred()) PyErr_Print(); return; } logger.msg(VERBOSE, "Python broker constructor called (%d)", refcount); valid = true; } PythonBrokerPlugin::~PythonBrokerPlugin() { if (module) { Py_DECREF(module); } if (arc_module) { Py_DECREF(arc_module); } lock.lock(); refcount--; // Finish the Python Interpreter if (refcount == 0) { PyEval_AcquireThread(tstate); Py_Finalize(); } lock.unlock(); logger.msg(VERBOSE, "Python broker destructor called (%d)", refcount); } bool PythonBrokerPlugin::operator()(const ExecutionTarget& lhs, const ExecutionTarget& rhs) const { PythonLock pylock; // Convert ExecutionTarget object to python object PyObjectP arg_lhs = Py_BuildValue("(l)", &lhs); if (!arg_lhs) { logger.msg(ERROR, "Cannot create ExecutionTarget argument"); if (PyErr_Occurred()) PyErr_Print(); return false; } PyObjectP py_lhs = PyObject_CallObject(arc_xtarget_klass, arg_lhs); if (!py_lhs) { logger.msg(ERROR, "Cannot convert ExecutionTarget (%s) to python object", lhs.ComputingEndpoint->URLString); if (PyErr_Occurred()) PyErr_Print(); return false; } PyObjectP arg_rhs = Py_BuildValue("(l)", &rhs); if (!arg_rhs) { logger.msg(ERROR, "Cannot create ExecutionTarget argument"); if (PyErr_Occurred()) PyErr_Print(); return false; } PyObjectP py_rhs = PyObject_CallObject(arc_xtarget_klass, arg_rhs); if (!py_rhs) { logger.msg(ERROR, "Cannot convert ExecutionTarget (%s) to python object", rhs.ComputingEndpoint->URLString); if (PyErr_Occurred()) PyErr_Print(); return false; } PyObjectP py_status = PyObject_CallMethod(object, (char*)"lessthan", (char*)"(OO)", (PyObject*)py_lhs, (PyObject*)py_rhs); if (!py_status) { if (PyErr_Occurred()) PyErr_Print(); return false; } return PyBool_Check((PyObject*)py_status) && PyObject_IsTrue((PyObject*)py_status); } bool PythonBrokerPlugin::match(const ExecutionTarget& et) const { PythonLock pylock; PyObjectP arg = Py_BuildValue("(l)", &et); if (!arg) { logger.msg(ERROR, "Cannot create ExecutionTarget argument"); if (PyErr_Occurred()) PyErr_Print(); return false; } PyObjectP py_xtarget = PyObject_CallObject(arc_xtarget_klass, arg); if (!py_xtarget) { logger.msg(ERROR, "Cannot convert ExecutionTarget (%s) to python object", et.ComputingEndpoint->URLString); if (PyErr_Occurred()) PyErr_Print(); return false; } PyObjectP py_status = PyObject_CallMethod(object, (char*)"match", (char*)"(O)", (PyObject*)py_xtarget); if (!py_status) { if (PyErr_Occurred()) PyErr_Print(); return false; } return PyBool_Check((PyObject*)py_status) && PyObject_IsTrue((PyObject*)py_status); } void PythonBrokerPlugin::set(const JobDescription& j) const { PythonLock pylock; PyObjectP arg = Py_BuildValue("(l)", &j); if (!arg) { logger.msg(ERROR, "Cannot create JobDescription argument"); if (PyErr_Occurred()) PyErr_Print(); return; } PyObjectP py_jobdesc = PyObject_CallObject(arc_jobrepr_klass, arg); if (!py_jobdesc) { logger.msg(ERROR, "Cannot convert JobDescription to python object"); if (PyErr_Occurred()) PyErr_Print(); return; } PyObjectP py_status = PyObject_CallMethod(object, (char*)"set", (char*)"(O)", (PyObject*)py_jobdesc); if (!py_status) { if (PyErr_Occurred()) PyErr_Print(); return; } return; } } // namespace Arc extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "PythonBroker", "HED:BrokerPlugin", istring("Do sorting using user created python broker"), 0, &Arc::PythonBrokerPlugin::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PaxHeaders.7502/ACIXBroker.py0000644000000000000000000000012212107452206024140 xustar000000000000000027 mtime=1360942214.556052 25 atime=1513200574.4237 30 ctime=1513200660.341751666 nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/ACIXBroker.py0000644000175000002070000002002012107452206024201 0ustar00mockbuildmock00000000000000''' Broker using ACIX information. This broker queries ACIX for cached locations of input files specified in the job. It then matches those locations against execution targets and ranks the targets by the number of cached files they have. Implements the following methods from BrokerPlugin: - bool operator() (const ExecutionTarget&, const ExecutionTarget&) const; - Used for sorting targets, here the method is lessthan() - bool match(const ExecutionTarget& et) const; - Returns whether the target matches - void set(const JobDescription& _j) const; - Set the job description which is to be brokered Invoke the broker with: arcsub -b PythonBroker:ACIXBroker.ACIXBroker:CacheIndexURL e.g. arcsub -b PythonBroker:ACIXBroker.ACIXBroker:https://cacheindex.ndgf.org:6443/data/index or by setting in client.conf: [common] brokername=PythonBroker brokerarguments=ACIXBroker.ACIXBroker:https://cacheindex.ndgf.org:6443/data/index The PYTHONPATH must contain the path to this file, with a default installation this is /usr/share/arc/examples/PythonBroker The log level of this broker can only be set in client.conf, because that is the only way that verbosity is set in the UserConfig object, eg verbosity=DEBUG The default level is WARNING. ''' import random import httplib import re import logging # Check if we can use json module (python >= 2.6 only) try: import json except ImportError: json = False import arc class ACIXBroker: def __init__(self, usercfg): ''' Set up internal fields and get information from UserConfig. ''' self.inputfiles = [] # list of remote input files to check self.cachelocations = {} # dict of files to cache locations (hostnames) self.targetranking = {} # dict of hostname to ranking (no of cached files) self.cacheindex = '' # URL of ACIX index service self.uc = usercfg # UserConfig object loglevel = 'WARNING' if usercfg.Verbosity(): loglevel = usercfg.Verbosity().upper() # Map ARC to python log levels if loglevel == 'FATAL': loglevel = 'CRITICAL' if loglevel == 'VERBOSE': loglevel = 'INFO' logging.basicConfig(format='%(levelname)s: ACIXBroker: %(message)s', level=getattr(logging, loglevel.upper())) brokerarg = usercfg.Broker()[1] if brokerarg.find(':') != -1: self.cacheindex = brokerarg[brokerarg.find(':') + 1:] logging.info('cache index: %s', self.cacheindex) # Check ACIX URL is valid (procotol, host, port, path) = self.splitURL(self.cacheindex) if not host or not path: logging.error('Invalid URL for ACIX index: %s', self.cacheindex) self.cacheindex = '' def match(self, target): ''' Check the number of cache files at target. All targets which match the job description are acceptable even if no files are cached. We assume only one A-REX per hostname, so multiple interfaces at the same hostname are ignored. ''' (procotol, host, port, path) = self.splitURL(target.ComputingEndpoint.URLString) if not host or host in self.targetranking: return True # First do generic matching if not arc.Broker.genericMatch(target, self.job, self.uc): return False cached = 0 for file in self.cachelocations: if host in self.cachelocations[file]: cached += 1 self.targetranking[host] = cached logging.debug('host: %s, cached files: %i', host, cached) # Accept target in all cases even if no files are cached return True def set(self, jobdescription): ''' Extract the input files from the job description and call ACIX to find cached locations. ''' self.job = jobdescription if not self.job or not self.cacheindex: return self.getInputFiles(self.job.DataStaging.InputFiles) if not self.inputfiles: return self.queryACIX(0) def lessthan(self, lhs, rhs): ''' Used to sort targets ''' (lprocotol, lhost, lport, lpath) = self.splitURL(lhs.ComputingEndpoint.URLString) (rprocotol, rhost, rport, rpath) = self.splitURL(rhs.ComputingEndpoint.URLString) if not lhost or not rhost or lhost not in self.targetranking or rhost not in self.targetranking: return random.randint(0, 1) return self.targetranking[lhost] > self.targetranking[rhost] # Internal methods def getInputFiles(self, inputfilelist): ''' Extract input files and add to our list. ''' for inputfile in inputfilelist: # remote input files only if inputfile.Sources and inputfile.Sources[0].Protocol() != 'file': # Some job desc languages allow multiple sources - for now choose the first url = inputfile.Sources[0] # Use the same string representation of URL as the cache code # does, including making sure LFC guids are used properly canonic_url = url.plainstr() if url.MetaDataOption("guid"): canonic_url += ":guid=" + url.MetaDataOption("guid") logging.debug('input file: %s', canonic_url) self.inputfiles.append(canonic_url) def queryACIX(self, index): ''' Call ACIX index to get cached locations of self.inputfiles[index:]. It seems like ACIX has a limit of 64k character URLs, so if we exceed that then call recursively. ''' maxACIXurl = 60000 (procotol, host, port, path) = self.splitURL(self.cacheindex) # add URLs to path path += '?url=' + self.inputfiles[index] index += 1 for file in self.inputfiles[index:]: path += ',' + file index += 1 if len(path) > maxACIXurl and index != len(self.inputfiles): logging.debug('URL length (%i) for ACIX query exceeds maximum (%i), will call in batches', len(path), maxACIXurl) self.queryACIX(index) break conn = httplib.HTTPSConnection(host, port) try: conn.request('GET', path) except Exception, e: logging.error('Error connecting to service at %s: %s', host, str(e)) return try: resp = conn.getresponse() except httplib.HTTPException, e: logging.error('Bad response from ACIX: %s', str(e)) return logging.info('ACIX returned %s %s', resp.status, resp.reason) data = resp.read() conn.close() logging.debug('ACIX response: %s', data) if json: try: self.cachelocations.update(json.loads(data)) except ValueError: logging.error('Unexpected response from ACIX: %s', data) else: # Using eval() is unsafe but it will only be used on old OSes # (like RHEL5). At least check response looks like a python dictionary if data[0] != '{' or data[-1] != '}': logging.error('Unexpected response from ACIX: %s', data) else: self.cachelocations.update(eval(data)) def splitURL(self, url): """ Split url into (protocol, host, port, path) and return this tuple. """ match = re.match('(\w*)://([^/?#:]*):?(\d*)/?(.*)', url) if match is None: logging.warning('URL %s is malformed', url) return ('', '', 0, '') port_s = match.group(3) if port_s: port = int(port_s) else: port = None urltuple = (match.group(1), match.group(2), port, '/'+match.group(4)) return urltuple nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PaxHeaders.7502/PythonBrokerPlugin.h0000644000000000000000000000012212106724427025661 xustar000000000000000027 mtime=1360767255.383414 25 atime=1513200574.4207 30 ctime=1513200660.340751653 nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PythonBrokerPlugin.h0000644000175000002070000000171712106724427025736 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_PYTHONBROKERPLUGIN_H__ #define __ARC_PYTHONBROKERPLUGIN_H__ #include #include #include namespace Arc { class PythonBrokerPlugin : public BrokerPlugin { public: PythonBrokerPlugin(BrokerPluginArgument* parg); virtual ~PythonBrokerPlugin(); static Plugin* Instance(PluginArgument *arg); bool operator()(const ExecutionTarget&, const ExecutionTarget&) const; bool match(const ExecutionTarget&) const; void set(const JobDescription&) const; private: PyObject *arc_module; PyObject *arc_userconfig_klass; PyObject *arc_jobrepr_klass; PyObject *arc_xtarget_klass; PyObject *module; PyObject *klass; PyObject *object; bool valid; static Logger logger; static PyThreadState *tstate; static int refcount; static Glib::Mutex lock; }; } // namespace Arc #endif // __ARC_PYTHONBROKERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PaxHeaders.7502/SampleBroker.py0000644000000000000000000000012212107452206024635 xustar000000000000000027 mtime=1360942214.556052 25 atime=1513200574.4237 30 ctime=1513200660.341751666 nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/SampleBroker.py0000644000175000002070000000407012107452206024705 0ustar00mockbuildmock00000000000000''' Example implementation of a custom broker written in python. Invoke the broker with: arcsub -b PythonBroker:SampleBroker.MyBroker:brokerargs ''' import arc import random class MyBroker: def __init__(self, usercfg): # Extract some useful information from the broker configuration self.uc = usercfg self.proxypath = usercfg.ProxyPath() self.certificatepath = usercfg.CertificatePath() self.keypath = usercfg.KeyPath() self.cacertificatesdir = usercfg.CACertificatesDirectory() pos = usercfg.Broker()[1].find(':') if pos > 0: self.args = usercfg.Broker()[1][pos + 1:] else: self.args = "" def set(self, job): # Either set the job description as a member object, or extract # the relevant information. For generic matchmaking a job member is # needed. self.job = job # Only printing below for clarity. print 'JobName:', job.Identification.JobName print 'Executable:', job.Application.Executable.Path for i in range(job.Application.Executable.Argument.size()): print 'Argument', i, ':', job.Application.Executable.Argument[i] def match(self, target): # Some printouts - only as an example print 'Proxy Path:', self.proxypath print 'Certificate Path:', self.certificatepath print 'Key Path:', self.keypath print 'CA Certificates Dir:', self.cacertificatesdir print 'Broker arguments:', self.args # Broker implementation starts here print 'Targets before brokering:' print target.ComputingEndpoint.URLString # Do generic matchmaking if not arc.Broker.genericMatch(target, self.job, self.uc): print 'Target', target.ComputingEndpoint.URLString, 'rejected' return False # Accept target print 'Target', target.ComputingEndpoint.URLString, 'accepted' return True def lessthan(self, lhs, rhs): print 'Randomizing...' return random.randint(0, 1) nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/PaxHeaders.7502/README0000644000000000000000000000012211145254320022553 xustar000000000000000027 mtime=1234524368.521341 25 atime=1513200574.4237 30 ctime=1513200660.336751604 nordugrid-arc-5.4.2/src/hed/acc/PythonBroker/README0000644000175000002070000000021511145254320022620 0ustar00mockbuildmock00000000000000PythonBroker This broker allows users to write their customized broker in python. To use this broker the user should write a python class. nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/ARC10000644000000000000000000000013213214316024017656 xustar000000000000000030 mtime=1513200660.158749427 30 atime=1513200668.721854157 30 ctime=1513200660.158749427 nordugrid-arc-5.4.2/src/hed/acc/ARC1/0000755000175000002070000000000013214316024020001 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712053177163022004 xustar000000000000000027 mtime=1353514611.374025 30 atime=1513200593.796937796 30 ctime=1513200660.141749219 nordugrid-arc-5.4.2/src/hed/acc/ARC1/Makefile.am0000644000175000002070000000556312053177163022057 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccARC1.la #noinst_LTLIBRARIES = libarexclient.la noinst_LTLIBRARIES = libaccJLRWSRFBES.la libaccARC1_la_SOURCES = SubmitterPluginARC1.cpp SubmitterPluginARC1.h \ JobListRetrieverPluginARC1.cpp JobListRetrieverPluginARC1.h \ JobControllerPluginARC1.cpp JobControllerPluginARC1.h \ AREXClient.cpp AREXClient.h \ DescriptorsARC1.cpp JobStateARC1.h JobStateARC1.cpp \ TargetInformationRetrieverPluginWSRFGLUE2.cpp TargetInformationRetrieverPluginWSRFGLUE2.h \ TargetInformationRetrieverPluginBES.cpp TargetInformationRetrieverPluginBES.h \ JobControllerPluginBES.cpp JobControllerPluginBES.h \ JobStateBES.cpp JobStateBES.h libaccARC1_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccARC1_la_LIBADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la\ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccARC1_la_LDFLAGS = -no-undefined -avoid-version -module libaccJLRWSRFBES_la_SOURCES = \ JobListRetrieverPluginWSRFBES.cpp JobListRetrieverPluginWSRFBES.h libaccJLRWSRFBES_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccJLRWSRFBES_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la\ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccJLRWSRFBES_la_LDFLAGS = -no-undefined -avoid-version -module #libarexclient_la_SOURCES = AREXClient.cpp AREXClient.h #libarexclient_la_CXXFLAGS = -I$(top_srcdir)/include \ # $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) #libarexclient_la_LIBADD = \ # $(top_builddir)/src/hed/libs/ws/libarcws.la \ # $(top_builddir)/src/hed/libs/compute/libarccompute.la \ # $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ # $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ # $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(LIBXML2_LIBS) $(GLIBMM_LIBS) DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobControllerPluginBES.h0000644000000000000000000000012213165644550024407 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.4157 30 ctime=1513200660.154749378 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobControllerPluginBES.h0000644000175000002070000000372313165644550024463 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBCONTROLLERBES_H__ #define __ARC_JOBCONTROLLERBES_H__ #include namespace Arc { class JobControllerPluginBES : public JobControllerPlugin { public: JobControllerPluginBES(const UserConfig& usercfg, PluginArgument* parg) : JobControllerPlugin(usercfg, parg) { supportedInterfaces.push_back("org.ogf.bes"); } virtual ~JobControllerPluginBES() {} virtual void SetUserConfig(const UserConfig& uc); static Plugin* Instance(PluginArgument *arg) { JobControllerPluginArgument *jcarg = dynamic_cast(arg); return jcarg ? new JobControllerPluginBES(*jcarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual void UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const { return false; } virtual bool GetJobDescription(const Job& job, std::string& desc_str) const; virtual URL CreateURL(std::string service, ServiceType st) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBCONTROLLERBES_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobControllerPluginARC1.h0000644000000000000000000000012213165644550024464 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.4077 30 ctime=1513200660.146749281 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobControllerPluginARC1.h0000644000175000002070000000376613165644550024547 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBCONTROLLERARC1_H__ #define __ARC_JOBCONTROLLERARC1_H__ #include #include "AREXClient.h" namespace Arc { class JobControllerPluginARC1 : public JobControllerPlugin { public: JobControllerPluginARC1(const UserConfig& usercfg, PluginArgument* parg) : JobControllerPlugin(usercfg, parg),clients(usercfg) { supportedInterfaces.push_back("org.nordugrid.xbes"); } virtual ~JobControllerPluginARC1() {} virtual void SetUserConfig(const UserConfig& uc); static Plugin* Instance(PluginArgument *arg) { JobControllerPluginArgument *jcarg = dynamic_cast(arg); return jcarg ? new JobControllerPluginARC1(*jcarg, arg) : NULL; } bool isEndpointNotSupported(const std::string& endpoint) const; virtual void UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const; virtual bool GetJobDescription(const Job& job, std::string& desc_str) const; private: static URL GetAddressOfResource(const Job& job); static Logger logger; mutable AREXClients clients; }; } // namespace Arc #endif // __ARC_JOBCONTROLLERARC1_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721022003 xustar000000000000000030 mtime=1513200593.863938616 30 atime=1513200648.728609632 30 ctime=1513200660.142749232 nordugrid-arc-5.4.2/src/hed/acc/ARC1/Makefile.in0000644000175000002070000014005613214315721022057 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/ARC1 DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(noinst_LTLIBRARIES) $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccARC1_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libaccARC1_la_OBJECTS = libaccARC1_la-SubmitterPluginARC1.lo \ libaccARC1_la-JobListRetrieverPluginARC1.lo \ libaccARC1_la-JobControllerPluginARC1.lo \ libaccARC1_la-AREXClient.lo libaccARC1_la-DescriptorsARC1.lo \ libaccARC1_la-JobStateARC1.lo \ libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.lo \ libaccARC1_la-TargetInformationRetrieverPluginBES.lo \ libaccARC1_la-JobControllerPluginBES.lo \ libaccARC1_la-JobStateBES.lo libaccARC1_la_OBJECTS = $(am_libaccARC1_la_OBJECTS) libaccARC1_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libaccARC1_la_CXXFLAGS) \ $(CXXFLAGS) $(libaccARC1_la_LDFLAGS) $(LDFLAGS) -o $@ libaccJLRWSRFBES_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libaccJLRWSRFBES_la_OBJECTS = \ libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.lo libaccJLRWSRFBES_la_OBJECTS = $(am_libaccJLRWSRFBES_la_OBJECTS) libaccJLRWSRFBES_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccJLRWSRFBES_la_CXXFLAGS) $(CXXFLAGS) \ $(libaccJLRWSRFBES_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccARC1_la_SOURCES) $(libaccJLRWSRFBES_la_SOURCES) DIST_SOURCES = $(libaccARC1_la_SOURCES) $(libaccJLRWSRFBES_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccARC1.la #noinst_LTLIBRARIES = libarexclient.la noinst_LTLIBRARIES = libaccJLRWSRFBES.la libaccARC1_la_SOURCES = SubmitterPluginARC1.cpp SubmitterPluginARC1.h \ JobListRetrieverPluginARC1.cpp JobListRetrieverPluginARC1.h \ JobControllerPluginARC1.cpp JobControllerPluginARC1.h \ AREXClient.cpp AREXClient.h \ DescriptorsARC1.cpp JobStateARC1.h JobStateARC1.cpp \ TargetInformationRetrieverPluginWSRFGLUE2.cpp TargetInformationRetrieverPluginWSRFGLUE2.h \ TargetInformationRetrieverPluginBES.cpp TargetInformationRetrieverPluginBES.h \ JobControllerPluginBES.cpp JobControllerPluginBES.h \ JobStateBES.cpp JobStateBES.h libaccARC1_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccARC1_la_LIBADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la\ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccARC1_la_LDFLAGS = -no-undefined -avoid-version -module libaccJLRWSRFBES_la_SOURCES = \ JobListRetrieverPluginWSRFBES.cpp JobListRetrieverPluginWSRFBES.h libaccJLRWSRFBES_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccJLRWSRFBES_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la\ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccJLRWSRFBES_la_LDFLAGS = -no-undefined -avoid-version -module #libarexclient_la_SOURCES = AREXClient.cpp AREXClient.h #libarexclient_la_CXXFLAGS = -I$(top_srcdir)/include \ # $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) #libarexclient_la_LIBADD = \ # $(top_builddir)/src/hed/libs/ws/libarcws.la \ # $(top_builddir)/src/hed/libs/compute/libarccompute.la \ # $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ # $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ # $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(LIBXML2_LIBS) $(GLIBMM_LIBS) DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/ARC1/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/ARC1/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccARC1.la: $(libaccARC1_la_OBJECTS) $(libaccARC1_la_DEPENDENCIES) $(libaccARC1_la_LINK) -rpath $(pkglibdir) $(libaccARC1_la_OBJECTS) $(libaccARC1_la_LIBADD) $(LIBS) libaccJLRWSRFBES.la: $(libaccJLRWSRFBES_la_OBJECTS) $(libaccJLRWSRFBES_la_DEPENDENCIES) $(libaccJLRWSRFBES_la_LINK) $(libaccJLRWSRFBES_la_OBJECTS) $(libaccJLRWSRFBES_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-AREXClient.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-DescriptorsARC1.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-JobControllerPluginARC1.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-JobControllerPluginBES.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-JobListRetrieverPluginARC1.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-JobStateARC1.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-JobStateBES.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-SubmitterPluginARC1.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-TargetInformationRetrieverPluginBES.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccARC1_la-SubmitterPluginARC1.lo: SubmitterPluginARC1.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-SubmitterPluginARC1.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-SubmitterPluginARC1.Tpo -c -o libaccARC1_la-SubmitterPluginARC1.lo `test -f 'SubmitterPluginARC1.cpp' || echo '$(srcdir)/'`SubmitterPluginARC1.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-SubmitterPluginARC1.Tpo $(DEPDIR)/libaccARC1_la-SubmitterPluginARC1.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPluginARC1.cpp' object='libaccARC1_la-SubmitterPluginARC1.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-SubmitterPluginARC1.lo `test -f 'SubmitterPluginARC1.cpp' || echo '$(srcdir)/'`SubmitterPluginARC1.cpp libaccARC1_la-JobListRetrieverPluginARC1.lo: JobListRetrieverPluginARC1.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-JobListRetrieverPluginARC1.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-JobListRetrieverPluginARC1.Tpo -c -o libaccARC1_la-JobListRetrieverPluginARC1.lo `test -f 'JobListRetrieverPluginARC1.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginARC1.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-JobListRetrieverPluginARC1.Tpo $(DEPDIR)/libaccARC1_la-JobListRetrieverPluginARC1.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverPluginARC1.cpp' object='libaccARC1_la-JobListRetrieverPluginARC1.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-JobListRetrieverPluginARC1.lo `test -f 'JobListRetrieverPluginARC1.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginARC1.cpp libaccARC1_la-JobControllerPluginARC1.lo: JobControllerPluginARC1.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-JobControllerPluginARC1.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-JobControllerPluginARC1.Tpo -c -o libaccARC1_la-JobControllerPluginARC1.lo `test -f 'JobControllerPluginARC1.cpp' || echo '$(srcdir)/'`JobControllerPluginARC1.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-JobControllerPluginARC1.Tpo $(DEPDIR)/libaccARC1_la-JobControllerPluginARC1.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginARC1.cpp' object='libaccARC1_la-JobControllerPluginARC1.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-JobControllerPluginARC1.lo `test -f 'JobControllerPluginARC1.cpp' || echo '$(srcdir)/'`JobControllerPluginARC1.cpp libaccARC1_la-AREXClient.lo: AREXClient.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-AREXClient.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-AREXClient.Tpo -c -o libaccARC1_la-AREXClient.lo `test -f 'AREXClient.cpp' || echo '$(srcdir)/'`AREXClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-AREXClient.Tpo $(DEPDIR)/libaccARC1_la-AREXClient.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='AREXClient.cpp' object='libaccARC1_la-AREXClient.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-AREXClient.lo `test -f 'AREXClient.cpp' || echo '$(srcdir)/'`AREXClient.cpp libaccARC1_la-DescriptorsARC1.lo: DescriptorsARC1.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-DescriptorsARC1.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-DescriptorsARC1.Tpo -c -o libaccARC1_la-DescriptorsARC1.lo `test -f 'DescriptorsARC1.cpp' || echo '$(srcdir)/'`DescriptorsARC1.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-DescriptorsARC1.Tpo $(DEPDIR)/libaccARC1_la-DescriptorsARC1.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DescriptorsARC1.cpp' object='libaccARC1_la-DescriptorsARC1.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-DescriptorsARC1.lo `test -f 'DescriptorsARC1.cpp' || echo '$(srcdir)/'`DescriptorsARC1.cpp libaccARC1_la-JobStateARC1.lo: JobStateARC1.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-JobStateARC1.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-JobStateARC1.Tpo -c -o libaccARC1_la-JobStateARC1.lo `test -f 'JobStateARC1.cpp' || echo '$(srcdir)/'`JobStateARC1.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-JobStateARC1.Tpo $(DEPDIR)/libaccARC1_la-JobStateARC1.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateARC1.cpp' object='libaccARC1_la-JobStateARC1.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-JobStateARC1.lo `test -f 'JobStateARC1.cpp' || echo '$(srcdir)/'`JobStateARC1.cpp libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.lo: TargetInformationRetrieverPluginWSRFGLUE2.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.Tpo -c -o libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.lo `test -f 'TargetInformationRetrieverPluginWSRFGLUE2.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginWSRFGLUE2.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.Tpo $(DEPDIR)/libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverPluginWSRFGLUE2.cpp' object='libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-TargetInformationRetrieverPluginWSRFGLUE2.lo `test -f 'TargetInformationRetrieverPluginWSRFGLUE2.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginWSRFGLUE2.cpp libaccARC1_la-TargetInformationRetrieverPluginBES.lo: TargetInformationRetrieverPluginBES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-TargetInformationRetrieverPluginBES.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-TargetInformationRetrieverPluginBES.Tpo -c -o libaccARC1_la-TargetInformationRetrieverPluginBES.lo `test -f 'TargetInformationRetrieverPluginBES.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginBES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-TargetInformationRetrieverPluginBES.Tpo $(DEPDIR)/libaccARC1_la-TargetInformationRetrieverPluginBES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverPluginBES.cpp' object='libaccARC1_la-TargetInformationRetrieverPluginBES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-TargetInformationRetrieverPluginBES.lo `test -f 'TargetInformationRetrieverPluginBES.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginBES.cpp libaccARC1_la-JobControllerPluginBES.lo: JobControllerPluginBES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-JobControllerPluginBES.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-JobControllerPluginBES.Tpo -c -o libaccARC1_la-JobControllerPluginBES.lo `test -f 'JobControllerPluginBES.cpp' || echo '$(srcdir)/'`JobControllerPluginBES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-JobControllerPluginBES.Tpo $(DEPDIR)/libaccARC1_la-JobControllerPluginBES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginBES.cpp' object='libaccARC1_la-JobControllerPluginBES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-JobControllerPluginBES.lo `test -f 'JobControllerPluginBES.cpp' || echo '$(srcdir)/'`JobControllerPluginBES.cpp libaccARC1_la-JobStateBES.lo: JobStateBES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC1_la-JobStateBES.lo -MD -MP -MF $(DEPDIR)/libaccARC1_la-JobStateBES.Tpo -c -o libaccARC1_la-JobStateBES.lo `test -f 'JobStateBES.cpp' || echo '$(srcdir)/'`JobStateBES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC1_la-JobStateBES.Tpo $(DEPDIR)/libaccARC1_la-JobStateBES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateBES.cpp' object='libaccARC1_la-JobStateBES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC1_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC1_la-JobStateBES.lo `test -f 'JobStateBES.cpp' || echo '$(srcdir)/'`JobStateBES.cpp libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.lo: JobListRetrieverPluginWSRFBES.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJLRWSRFBES_la_CXXFLAGS) $(CXXFLAGS) -MT libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.lo -MD -MP -MF $(DEPDIR)/libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.Tpo -c -o libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.lo `test -f 'JobListRetrieverPluginWSRFBES.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginWSRFBES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.Tpo $(DEPDIR)/libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverPluginWSRFBES.cpp' object='libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJLRWSRFBES_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccJLRWSRFBES_la-JobListRetrieverPluginWSRFBES.lo `test -f 'JobListRetrieverPluginWSRFBES.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginWSRFBES.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-pkglibLTLIBRARIES mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES clean-pkglibLTLIBRARIES ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-pkglibLTLIBRARIES install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobListRetrieverPluginARC1.cpp0000644000000000000000000000012212104277016025466 xustar000000000000000027 mtime=1360100878.840863 25 atime=1513200574.4127 30 ctime=1513200660.144749256 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobListRetrieverPluginARC1.cpp0000644000175000002070000000571712104277016025547 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "JobListRetrieverPluginARC1.h" namespace Arc { Logger JobListRetrieverPluginARC1::logger(Logger::getRootLogger(), "JobListRetrieverPlugin.WSRFGLUE2"); bool JobListRetrieverPluginARC1::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return ((proto != "http") && (proto != "https")); } return false; } static URL CreateURL(std::string service) { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) { service = "https://" + service; } else { std::string proto = lower(service.substr(0,pos1)); if((proto != "http") && (proto != "https")) return URL(); } // Default port other than 443? // Default path? return service; } EndpointQueryingStatus JobListRetrieverPluginARC1::Query(const UserConfig& uc, const Endpoint& endpoint, std::list& jobs, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); URL url(CreateURL(endpoint.URLString)); if (!url) { return s; } logger.msg(DEBUG, "Collecting Job (A-REX jobs) information."); DataHandle dir_url(url, uc); if (!dir_url) { logger.msg(INFO, "Failed retrieving job IDs: Unsupported url (%s) given", url.str()); return s; } dir_url->SetSecure(false); std::list files; if (!dir_url->List(files, DataPoint::INFO_TYPE_NAME)) { if (files.empty()) { logger.msg(INFO, "Failed retrieving job IDs"); return s; } logger.msg(VERBOSE, "Error encoutered during job ID retrieval. All job IDs might not have been retrieved"); } for (std::list::const_iterator file = files.begin(); file != files.end(); file++) { Job j; URL jobIDURL = url; jobIDURL.ChangePath(jobIDURL.Path() + "/" + file->GetName()); // Proposed mandatory attributes for ARC 3.0 j.JobID = jobIDURL.fullstr(); j.ServiceInformationURL = url; j.ServiceInformationInterfaceName = "org.nordugrid.wsrfglue2"; j.JobStatusURL = url; j.JobStatusInterfaceName = "org.nordugrid.xbes"; j.JobManagementURL = url; j.JobManagementInterfaceName = "org.nordugrid.xbes"; j.IDFromEndpoint = file->GetName(); j.StageInDir = jobIDURL; j.StageOutDir = jobIDURL; j.SessionDir = jobIDURL; jobs.push_back(j); } // TODO: Because listing/obtaining content is too generic operation // maybe it is unsafe to claim that operation suceeded if nothing // was retrieved. s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/SubmitterPluginARC1.h0000644000000000000000000000012213165644550023664 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.4067 30 ctime=1513200660.144749256 nordugrid-arc-5.4.2/src/hed/acc/ARC1/SubmitterPluginARC1.h0000644000175000002070000000324113165644550023733 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMITTERPLUGINARC1_H__ #define __ARC_SUBMITTERPLUGINARC1_H__ #include #include #include #include #include #include "AREXClient.h" namespace Arc { class SubmissionStatus; class SubmitterPluginARC1 : public SubmitterPlugin { public: SubmitterPluginARC1(const UserConfig& usercfg, PluginArgument* parg) : SubmitterPlugin(usercfg, parg), clients(usercfg) { supportedInterfaces.push_back("org.ogf.bes"); } ~SubmitterPluginARC1() { /*deleteAllClients();*/ } virtual void SetUserConfig(const UserConfig& uc); static Plugin* Instance(PluginArgument *arg) { SubmitterPluginArgument *subarg = dynamic_cast(arg); return subarg ? new SubmitterPluginARC1(*subarg, arg) : NULL; } bool isEndpointNotSupported(const std::string& endpoint) const; virtual SubmissionStatus Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted); virtual SubmissionStatus Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted); virtual bool Migrate(const std::string& jobid, const JobDescription& jobdesc, const ExecutionTarget& et, bool forcemigration, Job& job); private: // Centralized AREXClient handling AREXClients clients; static Logger logger; }; } // namespace Arc #endif // __ARC_SUBMITTERPLUGINARC1_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/test0000644000000000000000000000013013214316024020633 xustar000000000000000029 mtime=1513200660.17774966 30 atime=1513200668.721854157 29 ctime=1513200660.17774966 nordugrid-arc-5.4.2/src/hed/acc/ARC1/test/0000755000175000002070000000000013214316024020760 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/ARC1/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712061402323022747 xustar000000000000000027 mtime=1355154643.351662 30 atime=1513200593.879938811 30 ctime=1513200660.175749635 nordugrid-arc-5.4.2/src/hed/acc/ARC1/test/Makefile.am0000644000175000002070000000162612061402323023016 0ustar00mockbuildmock00000000000000TESTS = AREXClientTest check_PROGRAMS = $(TESTS) AREXClientTest_SOURCES = $(top_srcdir)/src/Test.cpp \ $(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.h \ $(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp \ AREXClientTest.cpp $(srcdir)/../AREXClient.cpp \ $(srcdir)/../JobStateARC1.cpp $(srcdir)/../JobStateBES.cpp AREXClientTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) -DCPPUNITTEST $(AM_CXXFLAGS) AREXClientTest_LDADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/acc/ARC1/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721022762 xustar000000000000000030 mtime=1513200593.933939472 30 atime=1513200648.744609828 30 ctime=1513200660.175749635 nordugrid-arc-5.4.2/src/hed/acc/ARC1/test/Makefile.in0000644000175000002070000011551213214315721023035 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = AREXClientTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/acc/ARC1/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = AREXClientTest$(EXEEXT) am_AREXClientTest_OBJECTS = AREXClientTest-Test.$(OBJEXT) \ AREXClientTest-SimulatorClasses.$(OBJEXT) \ AREXClientTest-AREXClientTest.$(OBJEXT) \ AREXClientTest-AREXClient.$(OBJEXT) \ AREXClientTest-JobStateARC1.$(OBJEXT) \ AREXClientTest-JobStateBES.$(OBJEXT) AREXClientTest_OBJECTS = $(am_AREXClientTest_OBJECTS) am__DEPENDENCIES_1 = AREXClientTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) AREXClientTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(AREXClientTest_SOURCES) DIST_SOURCES = $(AREXClientTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ AREXClientTest_SOURCES = $(top_srcdir)/src/Test.cpp \ $(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.h \ $(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp \ AREXClientTest.cpp $(srcdir)/../AREXClient.cpp \ $(srcdir)/../JobStateARC1.cpp $(srcdir)/../JobStateBES.cpp AREXClientTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) -DCPPUNITTEST $(AM_CXXFLAGS) AREXClientTest_LDADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/ARC1/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/ARC1/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list AREXClientTest$(EXEEXT): $(AREXClientTest_OBJECTS) $(AREXClientTest_DEPENDENCIES) @rm -f AREXClientTest$(EXEEXT) $(AREXClientTest_LINK) $(AREXClientTest_OBJECTS) $(AREXClientTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AREXClientTest-AREXClient.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AREXClientTest-AREXClientTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AREXClientTest-JobStateARC1.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AREXClientTest-JobStateBES.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AREXClientTest-SimulatorClasses.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AREXClientTest-Test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< AREXClientTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-Test.o -MD -MP -MF $(DEPDIR)/AREXClientTest-Test.Tpo -c -o AREXClientTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-Test.Tpo $(DEPDIR)/AREXClientTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='AREXClientTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp AREXClientTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-Test.obj -MD -MP -MF $(DEPDIR)/AREXClientTest-Test.Tpo -c -o AREXClientTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-Test.Tpo $(DEPDIR)/AREXClientTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='AREXClientTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` AREXClientTest-SimulatorClasses.o: $(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-SimulatorClasses.o -MD -MP -MF $(DEPDIR)/AREXClientTest-SimulatorClasses.Tpo -c -o AREXClientTest-SimulatorClasses.o `test -f '$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-SimulatorClasses.Tpo $(DEPDIR)/AREXClientTest-SimulatorClasses.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp' object='AREXClientTest-SimulatorClasses.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-SimulatorClasses.o `test -f '$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp AREXClientTest-SimulatorClasses.obj: $(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-SimulatorClasses.obj -MD -MP -MF $(DEPDIR)/AREXClientTest-SimulatorClasses.Tpo -c -o AREXClientTest-SimulatorClasses.obj `if test -f '$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-SimulatorClasses.Tpo $(DEPDIR)/AREXClientTest-SimulatorClasses.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp' object='AREXClientTest-SimulatorClasses.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-SimulatorClasses.obj `if test -f '$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/hed/libs/communication/test/SimulatorClasses.cpp'; fi` AREXClientTest-AREXClientTest.o: AREXClientTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-AREXClientTest.o -MD -MP -MF $(DEPDIR)/AREXClientTest-AREXClientTest.Tpo -c -o AREXClientTest-AREXClientTest.o `test -f 'AREXClientTest.cpp' || echo '$(srcdir)/'`AREXClientTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-AREXClientTest.Tpo $(DEPDIR)/AREXClientTest-AREXClientTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='AREXClientTest.cpp' object='AREXClientTest-AREXClientTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-AREXClientTest.o `test -f 'AREXClientTest.cpp' || echo '$(srcdir)/'`AREXClientTest.cpp AREXClientTest-AREXClientTest.obj: AREXClientTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-AREXClientTest.obj -MD -MP -MF $(DEPDIR)/AREXClientTest-AREXClientTest.Tpo -c -o AREXClientTest-AREXClientTest.obj `if test -f 'AREXClientTest.cpp'; then $(CYGPATH_W) 'AREXClientTest.cpp'; else $(CYGPATH_W) '$(srcdir)/AREXClientTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-AREXClientTest.Tpo $(DEPDIR)/AREXClientTest-AREXClientTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='AREXClientTest.cpp' object='AREXClientTest-AREXClientTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-AREXClientTest.obj `if test -f 'AREXClientTest.cpp'; then $(CYGPATH_W) 'AREXClientTest.cpp'; else $(CYGPATH_W) '$(srcdir)/AREXClientTest.cpp'; fi` AREXClientTest-AREXClient.o: $(srcdir)/../AREXClient.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-AREXClient.o -MD -MP -MF $(DEPDIR)/AREXClientTest-AREXClient.Tpo -c -o AREXClientTest-AREXClient.o `test -f '$(srcdir)/../AREXClient.cpp' || echo '$(srcdir)/'`$(srcdir)/../AREXClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-AREXClient.Tpo $(DEPDIR)/AREXClientTest-AREXClient.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(srcdir)/../AREXClient.cpp' object='AREXClientTest-AREXClient.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-AREXClient.o `test -f '$(srcdir)/../AREXClient.cpp' || echo '$(srcdir)/'`$(srcdir)/../AREXClient.cpp AREXClientTest-AREXClient.obj: $(srcdir)/../AREXClient.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-AREXClient.obj -MD -MP -MF $(DEPDIR)/AREXClientTest-AREXClient.Tpo -c -o AREXClientTest-AREXClient.obj `if test -f '$(srcdir)/../AREXClient.cpp'; then $(CYGPATH_W) '$(srcdir)/../AREXClient.cpp'; else $(CYGPATH_W) '$(srcdir)/$(srcdir)/../AREXClient.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-AREXClient.Tpo $(DEPDIR)/AREXClientTest-AREXClient.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(srcdir)/../AREXClient.cpp' object='AREXClientTest-AREXClient.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-AREXClient.obj `if test -f '$(srcdir)/../AREXClient.cpp'; then $(CYGPATH_W) '$(srcdir)/../AREXClient.cpp'; else $(CYGPATH_W) '$(srcdir)/$(srcdir)/../AREXClient.cpp'; fi` AREXClientTest-JobStateARC1.o: $(srcdir)/../JobStateARC1.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-JobStateARC1.o -MD -MP -MF $(DEPDIR)/AREXClientTest-JobStateARC1.Tpo -c -o AREXClientTest-JobStateARC1.o `test -f '$(srcdir)/../JobStateARC1.cpp' || echo '$(srcdir)/'`$(srcdir)/../JobStateARC1.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-JobStateARC1.Tpo $(DEPDIR)/AREXClientTest-JobStateARC1.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(srcdir)/../JobStateARC1.cpp' object='AREXClientTest-JobStateARC1.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-JobStateARC1.o `test -f '$(srcdir)/../JobStateARC1.cpp' || echo '$(srcdir)/'`$(srcdir)/../JobStateARC1.cpp AREXClientTest-JobStateARC1.obj: $(srcdir)/../JobStateARC1.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-JobStateARC1.obj -MD -MP -MF $(DEPDIR)/AREXClientTest-JobStateARC1.Tpo -c -o AREXClientTest-JobStateARC1.obj `if test -f '$(srcdir)/../JobStateARC1.cpp'; then $(CYGPATH_W) '$(srcdir)/../JobStateARC1.cpp'; else $(CYGPATH_W) '$(srcdir)/$(srcdir)/../JobStateARC1.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-JobStateARC1.Tpo $(DEPDIR)/AREXClientTest-JobStateARC1.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(srcdir)/../JobStateARC1.cpp' object='AREXClientTest-JobStateARC1.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-JobStateARC1.obj `if test -f '$(srcdir)/../JobStateARC1.cpp'; then $(CYGPATH_W) '$(srcdir)/../JobStateARC1.cpp'; else $(CYGPATH_W) '$(srcdir)/$(srcdir)/../JobStateARC1.cpp'; fi` AREXClientTest-JobStateBES.o: $(srcdir)/../JobStateBES.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-JobStateBES.o -MD -MP -MF $(DEPDIR)/AREXClientTest-JobStateBES.Tpo -c -o AREXClientTest-JobStateBES.o `test -f '$(srcdir)/../JobStateBES.cpp' || echo '$(srcdir)/'`$(srcdir)/../JobStateBES.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-JobStateBES.Tpo $(DEPDIR)/AREXClientTest-JobStateBES.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(srcdir)/../JobStateBES.cpp' object='AREXClientTest-JobStateBES.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-JobStateBES.o `test -f '$(srcdir)/../JobStateBES.cpp' || echo '$(srcdir)/'`$(srcdir)/../JobStateBES.cpp AREXClientTest-JobStateBES.obj: $(srcdir)/../JobStateBES.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -MT AREXClientTest-JobStateBES.obj -MD -MP -MF $(DEPDIR)/AREXClientTest-JobStateBES.Tpo -c -o AREXClientTest-JobStateBES.obj `if test -f '$(srcdir)/../JobStateBES.cpp'; then $(CYGPATH_W) '$(srcdir)/../JobStateBES.cpp'; else $(CYGPATH_W) '$(srcdir)/$(srcdir)/../JobStateBES.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/AREXClientTest-JobStateBES.Tpo $(DEPDIR)/AREXClientTest-JobStateBES.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(srcdir)/../JobStateBES.cpp' object='AREXClientTest-JobStateBES.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AREXClientTest_CXXFLAGS) $(CXXFLAGS) -c -o AREXClientTest-JobStateBES.obj `if test -f '$(srcdir)/../JobStateBES.cpp'; then $(CYGPATH_W) '$(srcdir)/../JobStateBES.cpp'; else $(CYGPATH_W) '$(srcdir)/$(srcdir)/../JobStateBES.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/ARC1/test/PaxHeaders.7502/AREXClientTest.cpp0000644000000000000000000000012112223776751024172 xustar000000000000000027 mtime=1380974057.045771 25 atime=1513200574.4207 29 ctime=1513200660.17774966 nordugrid-arc-5.4.2/src/hed/acc/ARC1/test/AREXClientTest.cpp0000644000175000002070000003404012223776751024243 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif /* This tests are good for the ArexClient's 19411 revision. */ #include // This define is needed to have maximal values for types with fixed size #define __STDC_LIMIT_MACROS #include #include #include #include "../JobStateARC1.h" #include "../AREXClient.h" #include "../../../libs/communication/test/SimulatorClasses.h" #define BES_FACTORY_ACTIONS_BASE_URL "http://schemas.ggf.org/bes/2006/08/bes-factory/BESFactoryPortType/" Arc::MCCConfig config; class AREXClientTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(AREXClientTest); CPPUNIT_TEST(ProcessTest); CPPUNIT_TEST(SubmitTest); CPPUNIT_TEST(SubmitTestwithDelegation); CPPUNIT_TEST(StatTest); CPPUNIT_TEST(ServiceStatTest); CPPUNIT_TEST(ListServicesFromISISTest); CPPUNIT_TEST(KillTest); CPPUNIT_TEST(CleanTest); CPPUNIT_TEST(GetdescTest); CPPUNIT_TEST(MigrateTest); CPPUNIT_TEST(ResumeTest); CPPUNIT_TEST(CreateActivityIdentifierTest); CPPUNIT_TEST_SUITE_END(); public: AREXClientTest() : ac(Arc::URL("test://AREXClientTest.now"), config, -1, true) { config.AddProxy("Proxy value");} void setUp() {} void tearDown() {} void ProcessTest(); void SubmitTest(); void SubmitTestwithDelegation(); void StatTest(); void ServiceStatTest(); void ListServicesFromISISTest(); void KillTest(); void CleanTest(); void GetdescTest(); void MigrateTest(); void ResumeTest(); void CreateActivityIdentifierTest(); private: Arc::AREXClient ac; }; void AREXClientTest::ProcessTest() { const std::string value = "Test response value"; const std::string node = "Response"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("", "http://www.nordugrid.org/schemas/isis/2007/06")); Arc::ClientSOAPTest::response->NewChild(node) = value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check Arc::PayloadSOAP req(Arc::NS("", "http://www.nordugrid.org/schemas/isis/2007/06")); req.NewChild("TestNode") = "Test request value"; bool delegate(false); Arc::XMLNode resp; CPPUNIT_ASSERT(ac.process(req,delegate,resp)); //Response Check CPPUNIT_ASSERT(resp); CPPUNIT_ASSERT_EQUAL(node, resp.Name()); CPPUNIT_ASSERT_EQUAL(value, (std::string)resp); } void AREXClientTest::SubmitTest() { const std::string jobdesc = ""; std::string jobid = ""; const std::string value = "Test value"; const std::string query = "CreateActivityResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("", "http://www.nordugrid.org/schemas/isis/2007/06")); Arc::ClientSOAPTest::response->NewChild(query).NewChild("ActivityIdentifier") = value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running check CPPUNIT_ASSERT(ac.submit(jobdesc, jobid, false)); //Response Check CPPUNIT_ASSERT_EQUAL((std::string)"\nTest value\n", jobid); CPPUNIT_ASSERT(Arc::ClientSOAPTest::request["CreateActivity"]); CPPUNIT_ASSERT(Arc::ClientSOAPTest::request["CreateActivity"]["ActivityDocument"]); CPPUNIT_ASSERT_EQUAL(jobdesc, (std::string)Arc::ClientSOAPTest::request["CreateActivity"]["ActivityDocument"]); } void AREXClientTest::SubmitTestwithDelegation() { const std::string jobdesc = ""; std::string jobid = ""; const std::string value = "Test value"; const std::string query = "CreateActivityResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("", "http://www.nordugrid.org/schemas/isis/2007/06")); Arc::ClientSOAPTest::response->NewChild(query).NewChild("ActivityIdentifier") = value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check CPPUNIT_ASSERT(ac.submit(jobdesc, jobid, true)); //Response Check CPPUNIT_ASSERT_EQUAL((std::string)"\nTest value\n", jobid); CPPUNIT_ASSERT(Arc::ClientSOAPTest::request["CreateActivity"]); CPPUNIT_ASSERT(Arc::ClientSOAPTest::request["CreateActivity"]["ActivityDocument"]); CPPUNIT_ASSERT_EQUAL(jobdesc, (std::string)Arc::ClientSOAPTest::request["CreateActivity"]["ActivityDocument"]); //Delegation part const std::string id = "id"; const std::string dvalue = "delegation"; const std::string attribute = "x509"; Arc::XMLNode delegation = Arc::ClientSOAPTest::request["CreateActivity"]["DelegatedToken"]; CPPUNIT_ASSERT(delegation); CPPUNIT_ASSERT_EQUAL(attribute, (std::string)delegation.Attribute("Format")); CPPUNIT_ASSERT(delegation["Id"]); CPPUNIT_ASSERT_EQUAL(id, (std::string)delegation["Id"]); CPPUNIT_ASSERT(delegation["Value"]); CPPUNIT_ASSERT_EQUAL(dvalue, (std::string)delegation["Value"]); } void AREXClientTest::StatTest() { const std::string node = "GetActivityStatusesResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("a-rex", "http://www.nordugrid.org/schemas/a-rex")); Arc::ClientSOAPTest::response->NewChild(node).NewChild("Response").NewChild("ActivityStatus").NewAttribute("state") = "Executing"; (*Arc::ClientSOAPTest::response)[node]["Response"]["ActivityStatus"].NewChild("a-rex:State") = "Accepted"; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check std::string jobid = "my_test_jobID_12345"; Arc::Job job; //Response Check CPPUNIT_ASSERT(ac.stat(jobid, job)); CPPUNIT_ASSERT_EQUAL((std::string)"Accepted", job.State()); CPPUNIT_ASSERT(job.State == Arc::JobState::ACCEPTED); // If LRMSState element is defined it takes precedence. Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("a-rex", "http://www.nordugrid.org/schemas/a-rex")); Arc::ClientSOAPTest::response->NewChild(node).NewChild("Response").NewChild("ActivityStatus").NewAttribute("state") = "Executing"; (*Arc::ClientSOAPTest::response)[node]["Response"]["ActivityStatus"].NewChild("a-rex:State") = "Executing"; (*Arc::ClientSOAPTest::response)[node]["Response"]["ActivityStatus"].NewChild("LRMSState") = "R"; CPPUNIT_ASSERT(ac.stat(jobid, job)); CPPUNIT_ASSERT_EQUAL((std::string)"INLRMS:R", job.State()); CPPUNIT_ASSERT(Arc::JobState::RUNNING == job.State); // If glue:State element is defined, it takes precedence. std::map ns; ns["a-rex"] = "http://www.nordugrid.org/schemas/a-rex"; ns["glue"] = "http://www.nordugrid.org/schemas/glue"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(ns); Arc::ClientSOAPTest::response->NewChild(node).NewChild("Response").NewChild("ActivityStatus").NewAttribute("state") = "Executing"; (*Arc::ClientSOAPTest::response)[node]["Response"]["ActivityStatus"].NewChild("a-rex:State") = "Executing"; (*Arc::ClientSOAPTest::response)[node]["Response"]["ActivityStatus"].NewChild("LRMSState") = "R"; (*Arc::ClientSOAPTest::response)[node]["Response"]["ActivityStatus"].NewChild("glue:State") = "INLRMS:Q"; CPPUNIT_ASSERT(ac.stat(jobid, job)); CPPUNIT_ASSERT_EQUAL((std::string)"INLRMS:Q", job.State()); CPPUNIT_ASSERT(job.State == Arc::JobState::QUEUING); } void AREXClientTest::ServiceStatTest() { const std::string value = "Test value"; const std::string query = "QueryResourcePropertiesResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("", "http://www.nordugrid.org/schemas/isis/2007/06")); Arc::ClientSOAPTest::response->NewChild(query) = value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check Arc::XMLNode status; CPPUNIT_ASSERT(ac.sstat(status)); //Response Check CPPUNIT_ASSERT(status); CPPUNIT_ASSERT_EQUAL(query, status.Name()); CPPUNIT_ASSERT_EQUAL(value, (std::string)status); } void AREXClientTest::ListServicesFromISISTest() { std::string first_value = "test://AREXClientURL1.now"; std::string third_value = "test://AREXClientURL3.now"; const std::string node = "QueryResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("isis", "http://www.nordugrid.org/schemas/isis/2007/06")); Arc::XMLNode root = Arc::ClientSOAPTest::response->NewChild(node); //1st entry Arc::XMLNode entry1 = root.NewChild("RegEntry"); entry1.NewChild("SrcAdv").NewChild("Type") = "org.nordugrid.execution.arex"; entry1["SrcAdv"].NewChild("EPR").NewChild("Address") = first_value; //2nd entry Arc::XMLNode entry2 = root.NewChild("RegEntry"); entry2.NewChild("SrcAdv").NewChild("Type") = "org.nordugrid.infosys.isis"; entry2.NewChild("MetaSrcAdv").NewChild("ServiceID") = "Test service ID"; //3rd entry Arc::XMLNode entry3 = root.NewChild("RegEntry"); entry3.NewChild("SrcAdv").NewChild("Type") = "org.nordugrid.execution.arex"; entry3["SrcAdv"].NewChild("EPR").NewChild("Address") = third_value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check std::list< std::pair > services; CPPUNIT_ASSERT(ac.listServicesFromISIS(services)); //Response Check CPPUNIT_ASSERT_EQUAL(services.front().first, Arc::URL(first_value)); CPPUNIT_ASSERT_EQUAL(services.front().second, Arc::COMPUTING); CPPUNIT_ASSERT_EQUAL(services.back().first, Arc::URL(third_value)); CPPUNIT_ASSERT_EQUAL(services.back().second, Arc::COMPUTING); } void AREXClientTest::KillTest() { const std::string node = "TerminateActivitiesResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("a-rex", "http://www.nordugrid.org/schemas/a-rex")); Arc::ClientSOAPTest::response->NewChild(node).NewChild("Response").NewChild("Terminated") = "true"; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check std::string jobid; CPPUNIT_ASSERT(ac.kill(jobid)); } void AREXClientTest::CleanTest() { const std::string value = "Test value"; const std::string node = "ChangeActivityStatusResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("a-rex", "http://www.nordugrid.org/schemas/a-rex")); Arc::ClientSOAPTest::response->NewChild(node).NewChild("Response") = value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check std::string jobid; CPPUNIT_ASSERT(ac.clean(jobid)); } void AREXClientTest::GetdescTest() { const std::string value = "Test value"; const std::string testnode = "TestNode"; const std::string node = "GetActivityDocumentsResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("a-rex", "http://www.nordugrid.org/schemas/a-rex")); Arc::ClientSOAPTest::response->NewChild(node).NewChild("Response").NewChild("JobDefinition").NewChild(testnode) = value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check std::string jobid; std::string jobdesc; CPPUNIT_ASSERT(ac.getdesc(jobid, jobdesc)); //Response Check Arc::XMLNode xml(jobdesc); CPPUNIT_ASSERT_EQUAL((std::string)"JobDefinition", xml.Name()); CPPUNIT_ASSERT_EQUAL(testnode, xml[testnode].Name()); CPPUNIT_ASSERT_EQUAL(value, (std::string)xml[testnode]); } void AREXClientTest::MigrateTest() { const std::string value = "Test value"; const std::string testnode = "TestNode"; const std::string node = "MigrateActivityResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("a-rex", "http://www.nordugrid.org/schemas/a-rex")); Arc::ClientSOAPTest::response->NewChild(node).NewChild("ActivityIdentifier").NewChild(testnode) = value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check std::string jobid; std::string jobdesc; bool forcemigration = false; std::string newjobid; bool delegate = false; CPPUNIT_ASSERT(ac.migrate(jobid, jobdesc, forcemigration, newjobid, delegate)); //Response Check Arc::XMLNode xml(newjobid); CPPUNIT_ASSERT_EQUAL((std::string)"ActivityIdentifier", xml.Name()); CPPUNIT_ASSERT_EQUAL(testnode, xml[testnode].Name()); CPPUNIT_ASSERT_EQUAL(value, (std::string)xml[testnode]); } void AREXClientTest::ResumeTest() { const std::string value = "Test value"; const std::string node = "ChangeActivityStatusResponse"; Arc::ClientSOAPTest::response = new Arc::PayloadSOAP(Arc::NS("a-rex", "http://www.nordugrid.org/schemas/a-rex")); Arc::ClientSOAPTest::response->NewChild(node).NewChild("Response") = value; Arc::ClientSOAPTest::status = Arc::STATUS_OK; //Running Check std::string jobid; CPPUNIT_ASSERT(ac.resume(jobid)); } void AREXClientTest::CreateActivityIdentifierTest() { Arc::URL url("test://AREXClientTest.now/jobid12345"); std::string a_rex = "http://www.nordugrid.org/schemas/a-rex"; std::string bes_factory = "http://schemas.ggf.org/bes/2006/08/bes-factory"; std::string wsa = "http://www.w3.org/2005/08/addressing"; std::string jsdl = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; std::string jsdl_posix = "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix"; std::string jsdl_arc = "http://www.nordugrid.org/ws/schemas/jsdl-arc"; std::string jsdl_hpcpa = "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa"; const std::string root = "ActivityIdentifier"; //Running std::string activityIdentifier; ac.createActivityIdentifier(url, activityIdentifier); Arc::XMLNode result(activityIdentifier); CPPUNIT_ASSERT_EQUAL(root, result.Name()); //Namespaces checking CPPUNIT_ASSERT_EQUAL(a_rex, result.Namespaces()["a-rex"]); CPPUNIT_ASSERT_EQUAL(bes_factory, result.Namespaces()["bes-factory"]); CPPUNIT_ASSERT_EQUAL(wsa, result.Namespaces()["wsa"]); CPPUNIT_ASSERT_EQUAL(jsdl, result.Namespaces()["jsdl"]); CPPUNIT_ASSERT_EQUAL(jsdl_posix, result.Namespaces()["jsdl-posix"]); CPPUNIT_ASSERT_EQUAL(jsdl_arc, result.Namespaces()["jsdl-arc"]); CPPUNIT_ASSERT_EQUAL(jsdl_hpcpa, result.Namespaces()["jsdl-hpcpa"]); //Values checking CPPUNIT_ASSERT_EQUAL(std::string("Address"), result["Address"].Name()); CPPUNIT_ASSERT_EQUAL(url.Protocol()+"://"+url.Host(), (std::string)result["Address"]); CPPUNIT_ASSERT_EQUAL(std::string("JobID"), (result["ReferenceParameters"]["JobID"]).Name()); CPPUNIT_ASSERT_EQUAL(url.Path(), (std::string)result["ReferenceParameters"]["JobID"]); } CPPUNIT_TEST_SUITE_REGISTRATION(AREXClientTest); nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/DescriptorsARC1.cpp0000644000000000000000000000012212675602216023361 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.4067 30 ctime=1513200660.148749305 nordugrid-arc-5.4.2/src/hed/acc/ARC1/DescriptorsARC1.cpp0000644000175000002070000000227412675602216023435 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "JobListRetrieverPluginARC1.h" #include "JobControllerPluginARC1.h" #include "SubmitterPluginARC1.h" #include "JobControllerPluginBES.h" #include "TargetInformationRetrieverPluginBES.h" #include "TargetInformationRetrieverPluginWSRFGLUE2.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "WSRFGLUE2", "HED:JobListRetrieverPlugin", "", 0, &Arc::JobListRetrieverPluginARC1::Instance }, { "ARC1", "HED:SubmitterPlugin", "A-REX (ARC REsource coupled EXecution service)", 0, &Arc::SubmitterPluginARC1::Instance }, { "ARC1", "HED:JobControllerPlugin", "A-REX (ARC REsource coupled EXecution service)", 0, &Arc::JobControllerPluginARC1::Instance }, { "BES", "HED:JobControllerPlugin", "OGSA-BES conforming execution service", 0, &Arc::JobControllerPluginBES::Instance }, { "BES", "HED:TargetInformationRetrieverPlugin", "OGSA-BES Local Information", 0, &Arc::TargetInformationRetrieverPluginBES::Instance }, { "WSRFGLUE2", "HED:TargetInformationRetrieverPlugin", "WSRF GLUE2 Local Information", 0, &Arc::TargetInformationRetrieverPluginWSRFGLUE2::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobStateBES.h0000644000000000000000000000012212045235201022146 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.4127 30 ctime=1513200660.155749391 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobStateBES.h0000644000175000002070000000054312045235201022217 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBSTATEBES_H__ #define __ARC_JOBSTATEBES_H__ #include namespace Arc { class JobStateBES : public JobState { public: JobStateBES(const std::string& state) : JobState(state, &StateMap) {} static JobState::StateType StateMap(const std::string& state); }; } #endif // __ARC_JOBSTATEBES_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/AREXClient.cpp0000644000000000000000000000012213165644550022351 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.4177 30 ctime=1513200660.147749293 nordugrid-arc-5.4.2/src/hed/acc/ARC1/AREXClient.cpp0000644000175000002070000005046313165644550022430 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "JobStateARC1.h" #include "JobStateBES.h" #include "AREXClient.h" #ifdef CPPUNITTEST #include "../../libs/communication/test/SimulatorClasses.h" #define DelegationProviderSOAP DelegationProviderSOAPTest #endif #define BES_FACTORY_ACTIONS_BASE_URL "http://schemas.ggf.org/bes/2006/08/bes-factory/BESFactoryPortType/" namespace Arc { Logger AREXClient::logger(Logger::rootLogger, "A-REX-Client"); static void set_bes_namespaces(NS& ns) { ns["bes-factory"] = "http://schemas.ggf.org/bes/2006/08/bes-factory"; ns["wsa"] = "http://www.w3.org/2005/08/addressing"; ns["jsdl"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; ns["jsdl-posix"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix"; ns["jsdl-hpcpa"] = "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa"; } static void set_arex_namespaces(NS& ns) { ns["a-rex"] = "http://www.nordugrid.org/schemas/a-rex"; ns["glue"] = "http://schemas.ogf.org/glue/2008/05/spec_2.0_d41_r01"; ns["glue2"] = "http://schemas.ogf.org/glue/2009/03/spec/2/0"; ns["glue3"] = "http://schemas.ogf.org/glue/2009/03/spec_2.0_r1"; ns["jsdl-arc"] = "http://www.nordugrid.org/ws/schemas/jsdl-arc"; ns["rp"] = "http://docs.oasis-open.org/wsrf/rp-2"; set_bes_namespaces(ns); } AREXClient::AREXClient(const URL& url, const MCCConfig& cfg, int timeout, bool arex_extensions) : client(NULL), rurl(url), cfg(cfg), timeout(timeout), arex_enabled(arex_extensions) { logger.msg(DEBUG, "Creating an A-REX client"); client = new ClientSOAP(cfg, url, timeout); if (!client) { logger.msg(VERBOSE, "Unable to create SOAP client used by AREXClient."); } if(arex_enabled) { set_arex_namespaces(arex_ns); } else { set_bes_namespaces(arex_ns); } } AREXClient::~AREXClient() { if (client) delete client; } bool AREXClient::delegation(XMLNode& op) { DelegationProviderSOAP* deleg; if (!cfg.credential.empty()) { deleg = new DelegationProviderSOAP(cfg.credential); } else { const std::string& cert = (!cfg.proxy.empty() ? cfg.proxy : cfg.cert); const std::string& key = (!cfg.proxy.empty() ? cfg.proxy : cfg.key); if (key.empty() || cert.empty()) { logger.msg(VERBOSE, "Failed locating credentials."); error_description = "Failed locating credentials for delegation to "+rurl.str(); return false; } deleg = new DelegationProviderSOAP(cert, key); } MCC_Status r = client->Load(); if(!r) { logger.msg(VERBOSE, "Failed initiate client connection."); error_description = "Failed initiating communication to "+rurl.str()+" - "+(std::string)r; delete deleg; return false; } MCC* entry = client->GetEntry(); if(!entry) { logger.msg(VERBOSE, "Client connection has no entry point."); error_description = "Internal error: failed to properly initiate communication object for "+rurl.str(); delete deleg; return false; } /* TODO: Enable password typing in case of cert and key. Currently when * using cert and key, one have to type password multiple times, which is * impracticable and should coordinated across execution. *DelegationProviderSOAP deleg(cert, key, (!cfg.proxy.empty() ? NULL : &std::cin)); */ logger.msg(VERBOSE, "Initiating delegation procedure"); if (!deleg->DelegateCredentialsInit(*entry,&(client->GetContext()))) { logger.msg(VERBOSE, "Failed to initiate delegation credentials"); error_description = "Internal error: failed to initiate delagtion at "+rurl.str(); // TODO: propagate error from DelegationProviderSOAP delete deleg; return false; } deleg->DelegatedToken(op); delete deleg; return true; } bool AREXClient::reconnect(void) { delete client; client = NULL; logger.msg(DEBUG, "Re-creating an A-REX client"); client = new ClientSOAP(cfg, rurl, timeout); //if (!client) { // logger.msg(VERBOSE, "Unable to create SOAP client used by AREXClient."); // error_description = "Internal error: unable to create object to handle connection to "+rurl.str(); // return false; //} if(arex_enabled) { set_arex_namespaces(arex_ns); } else { set_bes_namespaces(arex_ns); } return true; } bool AREXClient::process(PayloadSOAP& req, bool delegate, XMLNode& response, bool retry) { error_description = ""; if (!client) { logger.msg(VERBOSE, "AREXClient was not created properly."); // Should not happen. Happens if client = null (out of memory?) error_description = "Internal error: object is not in proper state."; return false; } logger.msg(VERBOSE, "Processing a %s request", req.Child(0).FullName()); if (delegate) { XMLNode op = req.Child(0); if(!delegation(op)) { delete client; client = NULL; // TODO: better way to check of retriable. if(!retry) return false; if(!reconnect()) return false; if(!delegation(op)) { delete client; client = NULL; return false; } } } WSAHeader header(req); header.To(rurl.str()); PayloadSOAP* resp = NULL; MCC_Status r; if (!(r = client->process(header.Action(), &req, &resp))) { error_description = (std::string)r; logger.msg(VERBOSE, "%s request failed", action); delete client; client = NULL; if(!retry) return false; if(!reconnect()) return false; return process(req,false,response,false); } if (resp == NULL) { logger.msg(VERBOSE, "No response from %s", rurl.str()); error_description = "No or malformed response received from "+rurl.str(); delete client; client = NULL; if(!retry) return false; if(!reconnect()) return false; return process(req,false,response,false); } if (resp->IsFault()) { logger.msg(VERBOSE, "%s request to %s failed with response: %s", action, rurl.str(), resp->Fault()->Reason()); error_description = "Fault received from "+rurl.str()+": "+resp->Fault()->Reason(); if(resp->Fault()->Code() != SOAPFault::Receiver) retry = false; std::string s; resp->GetXML(s); logger.msg(DEBUG, "XML response: %s", s); delete resp; delete client; client = NULL; if(!retry) return false; if(!reconnect()) return false; return process(req,false,response,false); } if (!(*resp)[action + "Response"]) { logger.msg(VERBOSE, "%s request to %s failed. No expected response.", action, rurl.str()); error_description = "No expected response received from "+rurl.str(); delete resp; return false; } (*resp)[action + "Response"].New(response); delete resp; return true; } bool AREXClient::submit(const std::string& jobdesc, std::string& jobid, bool delegate) { action = "CreateActivity"; logger.msg(VERBOSE, "Creating and sending submit request to %s", rurl.str()); // Create job request /* bes-factory:CreateActivity bes-factory:ActivityDocument jsdl:JobDefinition */ PayloadSOAP req(arex_ns); XMLNode op = req.NewChild("bes-factory:" + action); XMLNode act_doc = op.NewChild("bes-factory:ActivityDocument"); WSAHeader(req).Action(BES_FACTORY_ACTIONS_BASE_URL + action); act_doc.NewChild(XMLNode(jobdesc)); act_doc.Child(0).Namespaces(arex_ns); // Unify namespaces logger.msg(DEBUG, "Job description to be sent: %s", jobdesc); XMLNode response; if (!process(req, delegate, response)) return false; XMLNode xmlJobId; response["ActivityIdentifier"].New(xmlJobId); xmlJobId.GetDoc(jobid); return true; } bool AREXClient::stat(const std::string& jobid, Job& job) { std::string faultstring; logger.msg(VERBOSE, "Creating and sending job information query request to %s", rurl.str()); PayloadSOAP req(arex_ns); if(arex_enabled) { // TODO: use wsrf classes // AREX service //action = "QueryResourceProperties"; //std::string xpathquery = "//glue:Services/glue:ComputingService/glue:ComputingEndpoint/glue:ComputingActivities/glue:ComputingActivity/glue:ID[contains(.,'" + (std::string)(XMLNode(jobid)["ReferenceParameters"]["JobID"]) + "')]/.."; //req = *InformationRequest(XMLNode("" + xpathquery + "")).SOAP(); // GetActivityStatuses // ActivityIdentifier // ActivityStatusVerbosity action = "GetActivityStatuses"; XMLNode op = req.NewChild("bes-factory:" + action); op.NewChild(XMLNode(jobid)); op.NewChild("a-rex:ActivityStatusVerbosity") = "Full"; op.Namespaces(arex_ns); // Unify namespaces WSAHeader(req).Action(BES_FACTORY_ACTIONS_BASE_URL + action); } else { // Simple BES service // GetActivityStatuses // ActivityIdentifier action = "GetActivityStatuses"; XMLNode jobref = req.NewChild("bes-factory:" + action). NewChild(XMLNode(jobid)); jobref.Child(0).Namespaces(arex_ns); // Unify namespaces WSAHeader(req).Action(BES_FACTORY_ACTIONS_BASE_URL + action); } XMLNode response; if (!process(req, false, response)) return false; if (!arex_enabled) { XMLNode activity = response["Response"]["ActivityStatus"]; if(activity) { NS ns("a-rex","http://www.nordugrid.org/schemas/a-rex"); activity.Namespaces(ns); std::string state = activity.Attribute("state"); if(!state.empty()) { job.State = JobStateBES(state); } } if (!job.State) { logger.msg(VERBOSE, "Unable to retrieve status of job (%s)", job.JobID); return false; } return true; } // A-REX publishes GLUE2 information which is parsed by the Job& operator=(XMLNode). // See the ARC1ClusterInfo.pm script for details on what is actually published. //job = response["ComputingActivity"]; XMLNode activity = response["Response"]["ActivityStatus"]; if(activity) { XMLNode gactivity = activity["ComputingActivity"]; if(gactivity) { job.SetFromXML(gactivity); // Fetch the proper restart state. if (gactivity["RestartState"]) { for (XMLNode n = response["ComputingActivity"]["RestartState"]; n; ++n) { std::list gluestate; tokenize((std::string)n, gluestate, ":"); if (!gluestate.empty() && gluestate.front() == "nordugrid") { job.RestartState = JobStateARC1(((std::string)n).substr(10)); break; } } } } // Fetch the proper state. if (activity["glue:State"]) { job.State = JobStateARC1((std::string)activity["glue:State"]); } else if (activity["a-rex:State"]) { if (activity["LRMSState"]) { job.State = JobStateARC1("INLRMS:" + (std::string)activity["LRMSState"]); } else { job.State = JobStateARC1((std::string)activity["a-rex:State"]); } } } if (!job.State) { logger.msg(VERBOSE, "Unable to retrieve status of job (%s)", job.JobID); } return (bool)job.State; } bool AREXClient::sstat(XMLNode& response) { if(arex_enabled) { action = "QueryResourceProperties"; logger.msg(VERBOSE, "Creating and sending service information query request to %s", rurl.str()); PayloadSOAP req(*InformationRequest(XMLNode("//glue:ComputingService | //glue2:ComputingService | //glue3:ComputingService")).SOAP()); req.Child(0).Namespaces(arex_ns); if (!process(req, false, response)) return false; } else { // GetFactoryAttributesDocument PayloadSOAP req(arex_ns); action = "GetFactoryAttributesDocument"; req.NewChild("bes-factory:" + action); WSAHeader(req).Action(BES_FACTORY_ACTIONS_BASE_URL + action); if (!process(req, false, response)) return false; } return true; } bool AREXClient::listServicesFromISIS(std::list< std::pair >& services) { if(!arex_enabled) return false; action = "Query"; logger.msg(VERBOSE, "Creating and sending ISIS information query request to %s", rurl.str()); PayloadSOAP req(NS("isis", "http://www.nordugrid.org/schemas/isis/2007/06")); req.NewChild("isis:" + action).NewChild("isis:QueryString") = "/RegEntry/SrcAdv[Type=\"org.nordugrid.execution.arex\"]"; WSAHeader(req).Action("http://www.nordugrid.org/schemas/isis/2007/06/Query/QueryRequest"); XMLNode response; if (!process(req, false, response)) return false; if (XMLNode n = response["RegEntry"]) for (; n; ++n) { if ((std::string)n["SrcAdv"]["Type"] == "org.nordugrid.execution.arex") { //This check is right now superfluos but in the future a wider query might be used services.push_back(std::pair(URL((std::string)n["SrcAdv"]["EPR"]["Address"]), COMPUTING)); } else logger.msg(DEBUG, "Service %s of type %s ignored", (std::string)n["MetaSrcAdv"]["ServiceID"], (std::string)n["SrcAdv"]["Type"]); } else logger.msg(VERBOSE, "No execution services registered in the index service"); return true; } bool AREXClient::kill(const std::string& jobid) { action = "TerminateActivities"; logger.msg(VERBOSE, "Creating and sending terminate request to %s", rurl.str()); PayloadSOAP req(arex_ns); XMLNode jobref = req.NewChild("bes-factory:" + action).NewChild(XMLNode(jobid)); WSAHeader(req).Action(BES_FACTORY_ACTIONS_BASE_URL + action); XMLNode response; if (!process(req, false, response)) return false; if ((std::string)response["Response"]["Terminated"] != "true") { logger.msg(ERROR, "Job termination failed"); return false; } return true; } bool AREXClient::clean(const std::string& jobid) { if(!arex_enabled) return false; action = "ChangeActivityStatus"; logger.msg(VERBOSE, "Creating and sending clean request to %s", rurl.str()); PayloadSOAP req(arex_ns); XMLNode op = req.NewChild("a-rex:" + action); op.NewChild(XMLNode(jobid)); XMLNode jobstate = op.NewChild("a-rex:NewStatus"); jobstate.NewAttribute("bes-factory:state") = "Finished"; jobstate.NewChild("a-rex:state") = "Deleted"; // Send clean request XMLNode response; if (!process(req, false, response)) return false; /* * It is not clear how (or if) the response should be interpreted. * Currently response contains status of job before invoking requst. It is * unclear if this is the desired behaviour. * See trunk/src/services/a-rex/change_activity_status.cpp ????if ((std::string)response["NewStatus"]["state"] != "Deleted") {???? logger.msg(VERBOSE, "Job cleaning failed: Wrong response???"); return false; } */ return true; } bool AREXClient::getdesc(const std::string& jobid, std::string& jobdesc) { action = "GetActivityDocuments"; logger.msg(VERBOSE, "Creating and sending job description retrieval request to %s", rurl.str()); PayloadSOAP req(arex_ns); req.NewChild("bes-factory:" + action).NewChild(XMLNode(jobid)); WSAHeader(req).Action(BES_FACTORY_ACTIONS_BASE_URL + action); XMLNode response; if (!process(req, false, response)) return false; XMLNode xmlJobDesc; response["Response"]["JobDefinition"].New(xmlJobDesc); xmlJobDesc.GetDoc(jobdesc); return true; } bool AREXClient::migrate(const std::string& jobid, const std::string& jobdesc, bool forcemigration, std::string& newjobid, bool delegate) { if(!arex_enabled) return false; action = "MigrateActivity"; logger.msg(VERBOSE, "Creating and sending job migrate request to %s", rurl.str()); // Create migrate request /* bes-factory:MigrateActivity bes-factory:ActivityIdentifier bes-factory:ActivityDocument jsdl:JobDefinition */ PayloadSOAP req(arex_ns); XMLNode op = req.NewChild("a-rex:" + action); XMLNode act_doc = op.NewChild("bes-factory:ActivityDocument"); op.NewChild(XMLNode(jobid)); op.NewChild("a-rex:ForceMigration") = (forcemigration ? "true" : "false"); act_doc.NewChild(XMLNode(jobdesc)); act_doc.Child(0).Namespaces(arex_ns); // Unify namespaces logger.msg(DEBUG, "Job description to be sent: %s", jobdesc); XMLNode response; if (!process(req, delegate, response)) return false; XMLNode xmlNewJobId; response["ActivityIdentifier"].New(xmlNewJobId); xmlNewJobId.GetDoc(newjobid); return true; } bool AREXClient::resume(const std::string& jobid) { if(!arex_enabled) return false; action = "ChangeActivityStatus"; logger.msg(VERBOSE, "Creating and sending job resume request to %s", rurl.str()); PayloadSOAP req(arex_ns); XMLNode op = req.NewChild("a-rex:" + action); op.NewChild(XMLNode(jobid)); XMLNode jobstate = op.NewChild("a-rex:NewStatus"); jobstate.NewAttribute("bes-factory:state") = "Running"; // Not supporting resume into user-defined state jobstate.NewChild("a-rex:state") = ""; XMLNode response; if (!process(req, true, response)) return false; /* * It is not clear how (or if) the response should be interpreted. * Currently response contains status of job before invoking requst. It is * unclear if this is the desired behaviour. * See trunk/src/services/a-rex/change_activity_status.cpp ????if ((std::string)response["NewStatus"]["state"] != "Running") {???? logger.msg(VERBOSE, "Job resuming failed: Wrong response???"); return false; } */ return true; } void AREXClient::createActivityIdentifier(const URL& jobid, std::string& activityIdentifier) { PathIterator pi(jobid.Path(), true); URL url(jobid); url.ChangePath(*pi); NS ns; ns["a-rex"] = "http://www.nordugrid.org/schemas/a-rex"; ns["bes-factory"] = "http://schemas.ggf.org/bes/2006/08/bes-factory"; ns["wsa"] = "http://www.w3.org/2005/08/addressing"; ns["jsdl"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; ns["jsdl-posix"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix"; ns["jsdl-arc"] = "http://www.nordugrid.org/ws/schemas/jsdl-arc"; ns["jsdl-hpcpa"] = "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa"; XMLNode id(ns, "ActivityIdentifier"); id.NewChild("wsa:Address") = url.str(); id.NewChild("wsa:ReferenceParameters").NewChild("a-rex:JobID") = pi.Rest(); id.GetXML(activityIdentifier); } // ----------------------------------------------------------------------------- // TODO: does it need locking? AREXClients::AREXClients(const UserConfig& usercfg):usercfg_(&usercfg) { } AREXClients::~AREXClients(void) { std::multimap::iterator it; for (it = clients_.begin(); it != clients_.end(); ++it) { delete it->second; } } AREXClient* AREXClients::acquire(const URL& url, bool arex_features) { std::multimap::iterator it = clients_.find(url); if ( it != clients_.end() ) { // If AREXClient is already existing for the // given URL then return with that AREXClient* client = it->second; client->arexFeatures(arex_features); clients_.erase(it); return client; } // Else create a new one and return with that MCCConfig cfg; if(usercfg_) usercfg_->ApplyToConfig(cfg); AREXClient* client = new AREXClient(url, cfg, usercfg_?usercfg_->Timeout():0, arex_features); return client; } void AREXClients::release(AREXClient* client) { if(!client) return; if(!*client) { delete client; return; } // TODO: maybe strip path from URL? clients_.insert(std::pair(client->url(),client)); } void AREXClients::SetUserConfig(const UserConfig& uc) { // Changing user configuration may change identity. // Hence all open connections become invalid. usercfg_ = &uc; while(true) { std::multimap::iterator it = clients_.begin(); if(it == clients_.end()) break; delete it->second; clients_.erase(it); } } } nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobStateARC1.h0000644000000000000000000000012212045235201022223 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.4147 30 ctime=1513200660.149749317 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobStateARC1.h0000644000175000002070000000055012045235201022272 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBSTATEARC1_H__ #define __ARC_JOBSTATEARC1_H__ #include namespace Arc { class JobStateARC1 : public JobState { public: JobStateARC1(const std::string& state) : JobState(state, &StateMap) {} static JobState::StateType StateMap(const std::string& state); }; } #endif // __ARC_JOBSTATEARC1_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/AREXClient.h0000644000000000000000000000012213165644550022016 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.4017 30 ctime=1513200660.148749305 nordugrid-arc-5.4.2/src/hed/acc/ARC1/AREXClient.h0000644000175000002070000001267213165644550022075 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __AREX_CLIENT__ #define __AREX_CLIENT__ #include #include #include #include #include #include #include namespace Arc { #ifdef CPPUNITTEST #define ClientSOAP ClientSOAPTest #define private public #endif class ClientSOAP; class Config; class Logger; class MCCConfig; class PayloadSOAP; //! A client class for the A-REX service. /*! This class is a client for the A-REX service (Arc Resource-coupled EXecution service). It provides methods for three operations on an A-REX service: - Job submission - Job status queries - Job termination */ class AREXClient { public: //! The constructor for the AREXClient class. /*! This is the constructor for the AREXClient class. It creates an A-REX client that corresponds to a specific A-REX service. @param url The URL of the A-REX service. @param cfg An MCC configuration object. */ AREXClient(const URL& url, const MCCConfig& cfg, int timeout, bool arex_features = true); //! The destructor. /*! This is the destructor. It does what destructors usually do, cleans up... */ ~AREXClient(); operator bool(void) { return (client != NULL); } bool operator!(void) { return (client == NULL); } //! Submit a job. /*! This method submits a job to the A-REX service corresponding to this client instance. @param jobdesc A string containing the job description. @param jobid The Job ID of the the submitted job. @return true on success */ bool submit(const std::string& jobdesc, std::string& jobid, bool delegate = false); //! Query the status of a job. /*! This method queries the A-REX service about the status of a job. @param jobid The Job ID of the job. @param status The status of the job. @return true on success */ // bool stat(const std::string& jobid, std::string& status); bool stat(const std::string& jobid, Job& job); //! Terminates a job. /*! This method sends a request to the A-REX service to terminate a job. @param jobid The Job ID of the job to terminate. @return true on success */ bool kill(const std::string& jobid); //! Removes a job. /*! This method sends a request to the A-REX service to remove a job from it's pool. If job is running it will be killed by service as well. @param jobid The Job ID of the job to remove. @return true on success */ bool clean(const std::string& jobid); //! Query the status of a service. /*! This method queries the A-REX service about it's status. @param status The XML document representing status of the service. @return true on success */ bool sstat(XMLNode& status); //! Query the description of a job. /*! This method queries the A-REX service about the description of a job. @param jobid The Job ID of the job. @param jobdesc The description of the job. @return true on success */ bool getdesc(const std::string& jobid, std::string& jobdesc); //! Migrate a job. /*! This method submits a migrate request and the corresponding job to the AREX-service. @param jobid The Job ID of the job to migrate. @param jobdesc The job description of the job to migrate. @param newjobid The Job ID of returned by the AREX-client on success. @return true on success */ bool migrate(const std::string& jobid, const std::string& jobdesc, bool forcemigration, std::string& newjobid, bool delegate = false); bool listServicesFromISIS(std::list< std::pair >& services); bool resume(const std::string& jobid); //! Create a activity identifier. /*! This is a convenience method to construct a activity identifier used in BES requests. @param jobid The URL of the job to construct the activity identifier from. @param activityIdentifier The created activity identifier will be stored in this object. */ static void createActivityIdentifier(const URL& jobid, std::string& activityIdentifier); ClientSOAP* SOAP(void) { return client; } const URL& url(void) const { return rurl; } bool delegation(XMLNode& operation); void arexFeatures(bool val) { arex_enabled = val; }; const std::string& failure(void) const { return error_description; } private: bool process(PayloadSOAP& req, bool delegate, XMLNode& response, bool retry = true); bool reconnect(void); ClientSOAP *client; //! Namespaces. /*! A map containing namespaces. */ NS arex_ns; URL rurl; const MCCConfig cfg; std::string action; int timeout; bool arex_enabled; std::string error_description; //! A logger for the A-REX client. /*! This is a logger to which all logging messages from the A-REX client are sent. */ static Logger logger; }; class AREXClients { std::multimap clients_; const UserConfig* usercfg_; public: AREXClients(const UserConfig& usercfg); ~AREXClients(void); AREXClient* acquire(const URL& url, bool arex_features); void release(AREXClient* client); void SetUserConfig(const UserConfig& uc); }; } #endif nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/TargetInformationRetrieverPluginWSRFGLUE2.h0000644000000000000000000000012212045235201030045 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.4067 30 ctime=1513200660.151749342 nordugrid-arc-5.4.2/src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.h0000644000175000002070000000240212045235201030112 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_TARGETINFORMATIONRETRIEVERWSRFGLUE2_H__ #define __ARC_TARGETINFORMATIONRETRIEVERWSRFGLUE2_H__ #include #include namespace Arc { class ExecutionTarget; class Logger; class URL; class UserConfig; class XMLNode; class TargetInformationRetrieverPluginWSRFGLUE2 : public TargetInformationRetrieverPlugin { public: TargetInformationRetrieverPluginWSRFGLUE2(PluginArgument* parg): TargetInformationRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.wsrfglue2"); }; ~TargetInformationRetrieverPluginWSRFGLUE2() {}; static Plugin* Instance(PluginArgument *arg) { return new TargetInformationRetrieverPluginWSRFGLUE2(arg); }; virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; static void ExtractTargets(const URL&, XMLNode, std::list&); private: static bool EntryToInt(const URL& url, XMLNode entry, int& i); static Logger logger; }; } // namespace Arc #endif // __ARC_TARGETINFORMATIONRETRIEVERWSRFGLUE2_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/SubmitterPluginARC1.cpp0000644000000000000000000000012213165644550024217 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.4037 30 ctime=1513200660.143749244 nordugrid-arc-5.4.2/src/hed/acc/ARC1/SubmitterPluginARC1.cpp0000644000175000002070000003143613165644550024275 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "SubmitterPluginARC1.h" #include "AREXClient.h" namespace Arc { Logger SubmitterPluginARC1::logger(Logger::getRootLogger(), "SubmitterPlugin.ARC1"); void SubmitterPluginARC1::SetUserConfig(const UserConfig& uc) { SubmitterPlugin::SetUserConfig(uc); clients.SetUserConfig(uc); } bool SubmitterPluginARC1::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } SubmissionStatus SubmitterPluginARC1::Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted) { URL url((endpoint.find("://") == std::string::npos ? "https://" : "") + endpoint, false, 443, "/arex"); // TODO: Determine extended BES interface interface (A-REX WS) bool arex_features = true; //et.ComputingService->Type == "org.nordugrid.execution.arex"; AutoPointer ac(clients.acquire(url, arex_features)); SubmissionStatus retval; for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { JobDescription preparedjobdesc(*it); if (!preparedjobdesc.Prepare()) { logger.msg(INFO, "Failed to prepare job description"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } // !! TODO: For regular BES ordinary JSDL is needed - keeping nordugrid:jsdl so far std::string product; JobDescriptionResult ures = preparedjobdesc.UnParse(product, "nordugrid:jsdl"); if (!ures) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format: %s", "nordugrid:jsdl", ures.str()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } std::string idFromEndpoint; if (!ac->submit(product, idFromEndpoint, arex_features && (url.Protocol() == "https"))) { notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } if (idFromEndpoint.empty()) { logger.msg(INFO, "No job identifier returned by BES service"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } XMLNode activityIdentifier(idFromEndpoint); URL jobid; if (activityIdentifier["ReferenceParameters"]["a-rex:JobID"]) { // Service seems to be A-REX. Extract job ID, and upload files. jobid = URL((std::string)(activityIdentifier["ReferenceParameters"]["JobSessionDir"])); // compensate for time between request and response on slow networks URL sessionurl = jobid; sessionurl.AddOption("threads=3",false); sessionurl.AddOption("encryption=optional",false); if(arex_features) { sessionurl.AddOption("httpputpartial=yes",false); sessionurl.AddOption("blocksize=5242880",true); } if (!PutFiles(preparedjobdesc, sessionurl)) { logger.msg(INFO, "Failed uploading local input files"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } } else { if (activityIdentifier["Address"]) { jobid = URL((std::string)activityIdentifier["Address"]); } else { jobid = url; } Time t; // Since BES doesn't specify a simple unique ID, but rather an EPR, a unique non-reproduceable (to arcsync) job ID is created below. jobid.ChangePath(jobid.Path() + "/BES" + tostring(t.GetTime()) + tostring(t.GetTimeNanoseconds())); } Job j; AddJobDetails(preparedjobdesc, j); // Proposed mandatory attributes for ARC 3.0 j.JobID = jobid.fullstr(); j.ServiceInformationURL = url; j.ServiceInformationInterfaceName = "org.nordugrid.wsrfglue2"; j.JobStatusURL = url; j.JobStatusInterfaceName = "org.nordugrid.xbes"; j.JobManagementURL = url; j.JobManagementInterfaceName = "org.nordugrid.xbes"; j.IDFromEndpoint = (std::string)activityIdentifier["ReferenceParameters"]["a-rex:JobID"]; jc.addEntity(j); } clients.release(ac.Release()); return retval; } SubmissionStatus SubmitterPluginARC1::Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted) { URL url(et.ComputingEndpoint->URLString); bool arex_features = (et.ComputingService->Type == "org.nordugrid.execution.arex") || (et.ComputingService->Type == "org.nordugrid.arex"); AutoPointer ac(clients.acquire(url, arex_features)); SubmissionStatus retval; for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { JobDescription preparedjobdesc(*it); if (arex_features && !preparedjobdesc.Prepare(et)) { logger.msg(INFO, "Failed to prepare job description to target resources"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } // !! TODO: For regular BES ordinary JSDL is needed - keeping nordugrid:jsdl so far std::string product; JobDescriptionResult ures = preparedjobdesc.UnParse(product, "nordugrid:jsdl"); if (!ures) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format: %s", "nordugrid:jsdl", ures.str()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } std::string idFromEndpoint; if (!ac->submit(product, idFromEndpoint, arex_features && (url.Protocol() == "https"))) { notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } if (idFromEndpoint.empty()) { logger.msg(INFO, "No job identifier returned by BES service"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } XMLNode activityIdentifier(idFromEndpoint); URL jobid; if (activityIdentifier["ReferenceParameters"]["a-rex:JobID"]) { // Service seems to be A-REX. Extract job ID, and upload files. jobid = URL((std::string)(activityIdentifier["ReferenceParameters"]["JobSessionDir"])); // compensate for time between request and response on slow networks URL sessionurl = jobid; sessionurl.AddOption("threads=3",false); sessionurl.AddOption("encryption=optional",false); if(arex_features) { sessionurl.AddOption("httpputpartial=yes",false); sessionurl.AddOption("blocksize=5242880",true); } if (!PutFiles(preparedjobdesc, sessionurl)) { logger.msg(INFO, "Failed uploading local input files"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } } else { if (activityIdentifier["Address"]) { jobid = URL((std::string)activityIdentifier["Address"]); } else { jobid = url; } Time t; // Since BES doesn't specify a simple unique ID, but rather an EPR, a unique non-reproduceable (to arcsync) job ID is created below. jobid.ChangePath(jobid.Path() + "/BES" + tostring(t.GetTime()) + tostring(t.GetTimeNanoseconds())); } Job j; AddJobDetails(preparedjobdesc, j); // Proposed mandatory attributes for ARC 3.0 j.JobID = jobid.fullstr(); j.ServiceInformationURL = url; j.ServiceInformationInterfaceName = "org.nordugrid.wsrfglue2"; j.JobStatusURL = url; j.JobStatusInterfaceName = "org.nordugrid.xbes"; j.JobManagementURL = url; j.JobManagementInterfaceName = "org.nordugrid.xbes"; j.IDFromEndpoint = (std::string)activityIdentifier["ReferenceParameters"]["a-rex:JobID"]; j.StageInDir = jobid; j.StageOutDir = jobid; j.SessionDir = jobid; jc.addEntity(j); } clients.release(ac.Release()); return retval; } bool SubmitterPluginARC1::Migrate(const std::string& jobid, const JobDescription& jobdesc, const ExecutionTarget& et, bool forcemigration, Job& job) { URL url(et.ComputingEndpoint->URLString); AutoPointer ac(clients.acquire(url,true)); std::string idstr; AREXClient::createActivityIdentifier(jobid, idstr); JobDescription preparedjobdesc(jobdesc); // Modify the location of local files and files residing in a old session directory. for (std::list::iterator it = preparedjobdesc.DataStaging.InputFiles.begin(); it != preparedjobdesc.DataStaging.InputFiles.end(); it++) { if (!it->Sources.front() || it->Sources.front().Protocol() == "file") { it->Sources.front() = URL(jobid + "/" + it->Name); } else { // URL is valid, and not a local file. Check if the source reside at a // old job session directory. const size_t foundRSlash = it->Sources.front().str().rfind('/'); if (foundRSlash == std::string::npos) continue; const std::string uriPath = it->Sources.front().str().substr(0, foundRSlash); // Check if the input file URI is pointing to a old job session directory. for (std::list::const_iterator itAOID = preparedjobdesc.Identification.ActivityOldID.begin(); itAOID != preparedjobdesc.Identification.ActivityOldID.end(); itAOID++) { if (uriPath == *itAOID) { it->Sources.front() = URL(jobid + "/" + it->Name); break; } } } } if (!preparedjobdesc.Prepare(et)) { logger.msg(INFO, "Failed adapting job description to target resources"); clients.release(ac.Release()); return false; } // Add ActivityOldID. preparedjobdesc.Identification.ActivityOldID.push_back(jobid); std::string product; JobDescriptionResult ures = preparedjobdesc.UnParse(product, "nordugrid:jsdl"); if (!ures) { logger.msg(INFO, "Unable to migrate job. Job description is not valid in the %s format: %s", "nordugrid:jsdl", ures.str()); clients.release(ac.Release()); return false; } std::string sNewjobid; if (!ac->migrate(idstr, product, forcemigration, sNewjobid, url.Protocol() == "https")) { clients.release(ac.Release()); return false; } if (sNewjobid.empty()) { logger.msg(INFO, "No job identifier returned by A-REX"); clients.release(ac.Release()); return false; } XMLNode xNewjobid(sNewjobid); URL newjobid((std::string)(xNewjobid["ReferenceParameters"]["JobSessionDir"])); URL sessionurl = newjobid; sessionurl.AddOption("threads=3",false); sessionurl.AddOption("encryption=optional",false); sessionurl.AddOption("httpputpartial=yes",false); // for A-REX sessionurl.AddOption("blocksize=5242880",true); if (!PutFiles(preparedjobdesc, sessionurl)) { logger.msg(INFO, "Failed uploading local input files"); clients.release(ac.Release()); return false; } AddJobDetails(preparedjobdesc, job); // Proposed mandatory attributes for ARC 3.0 job.JobID = newjobid; job.ServiceInformationURL = url; job.ServiceInformationInterfaceName = "org.nordugrid.wsrfglue2"; job.JobStatusURL = url; job.JobStatusInterfaceName = "org.nordugrid.xbes"; job.JobManagementURL = url; job.JobManagementInterfaceName = "org.nordugrid.xbes"; job.IDFromEndpoint = (std::string)xNewjobid["ReferenceParameters"]["a-rex:JobID"]; job.StageInDir = sessionurl; job.StageOutDir = sessionurl; job.SessionDir = sessionurl; clients.release(ac.Release()); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobListRetrieverPluginARC1.h0000644000000000000000000000012212045235201025125 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.4157 30 ctime=1513200660.145749268 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobListRetrieverPluginARC1.h0000644000175000002070000000164412045235201025201 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBLISTRETRIEVERPLUGINWSRFGLUE2_H__ #define __ARC_JOBLISTRETRIEVERPLUGINWSRFGLUE2_H__ #include #include namespace Arc { class Logger; class JobListRetrieverPluginARC1 : public JobListRetrieverPlugin { public: JobListRetrieverPluginARC1(PluginArgument* parg): JobListRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.wsrfglue2"); } virtual ~JobListRetrieverPluginARC1() {} static Plugin* Instance(PluginArgument *arg) { return new JobListRetrieverPluginARC1(arg); } virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBLISTRETRIEVERPLUGINWSRFGLUE2_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/TargetInformationRetrieverPluginWSRFGLUE2.cpp0000644000000000000000000000012113024224727030410 xustar000000000000000026 mtime=1481714135.79494 25 atime=1513200574.4037 30 ctime=1513200660.150749329 nordugrid-arc-5.4.2/src/hed/acc/ARC1/TargetInformationRetrieverPluginWSRFGLUE2.cpp0000644000175000002070000005346313024224727030473 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "JobStateBES.h" #include "JobStateARC1.h" #include "AREXClient.h" #include "TargetInformationRetrieverPluginWSRFGLUE2.h" namespace Arc { Logger TargetInformationRetrieverPluginWSRFGLUE2::logger(Logger::getRootLogger(), "TargetInformationRetrieverPlugin.WSRFGLUE2"); bool TargetInformationRetrieverPluginWSRFGLUE2::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return ((proto != "http") && (proto != "https")); } return false; } static URL CreateURL(std::string service) { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) { service = "https://" + service; } else { std::string proto = lower(service.substr(0,pos1)); if((proto != "http") && (proto != "https")) return URL(); } // Default port other than 443? // Default path? return service; } EndpointQueryingStatus TargetInformationRetrieverPluginWSRFGLUE2::Query(const UserConfig& uc, const Endpoint& cie, std::list& csList, const EndpointQueryOptions&) const { logger.msg(DEBUG, "Querying WSRF GLUE2 computing info endpoint."); URL url(CreateURL(cie.URLString)); if (!url) { return EndpointQueryingStatus(EndpointQueryingStatus::FAILED,"URL "+cie.URLString+" can't be processed"); } MCCConfig cfg; uc.ApplyToConfig(cfg); AREXClient ac(url, cfg, uc.Timeout(), true); // Temporary //AREXClient ac(url, cfg, uc.Timeout(), /* thrarg->flavour == "ARC1" */); // TIR equivalent XMLNode servicesQueryResponse; if (!ac.sstat(servicesQueryResponse)) { return EndpointQueryingStatus(EndpointQueryingStatus::FAILED,ac.failure()); } ExtractTargets(url, servicesQueryResponse, csList); for (std::list::iterator it = csList.begin(); it != csList.end(); it++) { (*it)->InformationOriginEndpoint = cie; } if (!csList.empty()) return EndpointQueryingStatus(EndpointQueryingStatus::SUCCESSFUL); return EndpointQueryingStatus(EndpointQueryingStatus::FAILED,"Query returned no endpoints"); } void TargetInformationRetrieverPluginWSRFGLUE2::ExtractTargets(const URL& url, XMLNode response, std::list& csList) { /* * A-REX will not return multiple ComputingService elements, but if the * response comes from an index server then there might be multiple. */ for (XMLNode GLUEService = response["ComputingService"]; GLUEService; ++GLUEService) { ComputingServiceType cs; AdminDomainType& AdminDomain = cs.AdminDomain; AdminDomain->Name = url.Host(); if (GLUEService["Capability"]) { for (XMLNode n = GLUEService["Capability"]; n; ++n) { cs->Capability.insert((std::string)n); } } if (GLUEService["Type"]) { cs->Type = (std::string)GLUEService["Type"]; } else { logger.msg(VERBOSE, "The Service doesn't advertise its Type."); } if (GLUEService["QualityLevel"]) { cs->QualityLevel = (std::string)GLUEService["QualityLevel"]; } else { logger.msg(VERBOSE, "The Service doesn't advertise its Quality Level."); } EntryToInt(url, GLUEService["TotalJobs"], cs->TotalJobs); EntryToInt(url, GLUEService["RunningJobs"], cs->RunningJobs); EntryToInt(url, GLUEService["WaitingJobs"], cs->WaitingJobs); EntryToInt(url, GLUEService["PreLRMSWaitingJobs"], cs->PreLRMSWaitingJobs); // The GLUE2 specification does not have attribute ComputingService.LocalRunningJobs //if (GLUEService["LocalRunningJobs"]) { // cs->LocalRunningJobs = stringtoi((std::string)GLUEService["LocalRunningJobs"]); //} // The GLUE2 specification does not have attribute ComputingService.LocalWaitingJobs //if (GLUEService["LocalWaitingJobs"]) { // cs->LocalWaitingJobs = stringtoi((std::string)GLUEService["LocalWaitingJobs"]); //} // The GLUE2 specification does not have attribute ComputingService.LocalWaitingJobs //if (GLUEService["LocalSuspendedJobs"]) { // cs->LocalWaitingJobs = stringtoi((std::string)GLUEService["LocalSuspendedJobs"]); //} logger.msg(VERBOSE, "Generating A-REX target: %s", AdminDomain->Name); int endpointID = 0; for(XMLNode xmlCENode = GLUEService["ComputingEndpoint"]; (bool)xmlCENode; ++xmlCENode) { if ((xmlCENode["InterfaceName"] == "XBES") || (xmlCENode["InterfaceName"] == "BES") || (xmlCENode["Interface"] == "XBES") || (xmlCENode["Interface"] == "BES") || (xmlCENode["InterfaceName"] == "org.ogf.bes") || (xmlCENode["InterfaceName"] == "ogf.bes")) { // support for previous A-REX version, and fixing the InterfaceName xmlCENode["InterfaceName"] = "org.ogf.bes"; } for (XMLNode n = xmlCENode["InterfaceExtension"]; n; ++n) { if ((std::string)n == "http://www.nordugrid.org/schemas/a-rex") { // support for previous A-REX version, and fixing the InterfaceExtension n = "urn:org.nordugrid.xbes"; } } ComputingEndpointType ComputingEndpoint; if (xmlCENode["URL"]) { ComputingEndpoint->URLString = (std::string)xmlCENode["URL"]; } else { logger.msg(VERBOSE, "The ComputingEndpoint has no URL."); } if (xmlCENode["HealthState"]) { ComputingEndpoint->HealthState = (std::string)xmlCENode["HealthState"]; } else { logger.msg(VERBOSE, "The Service advertises no Health State."); } if (xmlCENode["HealthStateInfo"]) { ComputingEndpoint->HealthStateInfo = (std::string)xmlCENode["HealthStateInfo"]; } if (GLUEService["ID"]) { cs->ID = (std::string)GLUEService["ID"]; } if (GLUEService["Name"]) { cs->Name = (std::string)GLUEService["Name"]; } if (xmlCENode["Capability"]) { for (XMLNode n = xmlCENode["Capability"]; n; ++n) { ComputingEndpoint->Capability.insert((std::string)n); } } if (xmlCENode["QualityLevel"]) { ComputingEndpoint->QualityLevel = (std::string)xmlCENode["QualityLevel"]; } if (xmlCENode["Technology"]) { ComputingEndpoint->Technology = (std::string)xmlCENode["Technology"]; } if (xmlCENode["InterfaceName"]) { ComputingEndpoint->InterfaceName = lower((std::string)xmlCENode["InterfaceName"]); } else if (xmlCENode["Interface"]) { // No such attribute according to GLUE2 document. Legacy/backward compatibility? ComputingEndpoint->InterfaceName = lower((std::string)xmlCENode["Interface"]); } else { logger.msg(VERBOSE, "The Service doesn't advertise its Interface."); } if (xmlCENode["InterfaceVersion"]) { for (XMLNode n = xmlCENode["InterfaceVersion"]; n; ++n) { ComputingEndpoint->InterfaceVersion.push_back((std::string)n); } } if (xmlCENode["InterfaceExtension"]) { for (XMLNode n = xmlCENode["InterfaceExtension"]; n; ++n) { ComputingEndpoint->InterfaceExtension.push_back((std::string)n); } } if (xmlCENode["SupportedProfile"]) { for (XMLNode n = xmlCENode["SupportedProfile"]; n; ++n) { ComputingEndpoint->SupportedProfile.push_back((std::string)n); } } if (xmlCENode["Implementor"]) { ComputingEndpoint->Implementor = (std::string)xmlCENode["Implementor"]; } if (xmlCENode["ImplementationName"]) { if (xmlCENode["ImplementationVersion"]) { ComputingEndpoint->Implementation = Software((std::string)xmlCENode["ImplementationName"], (std::string)xmlCENode["ImplementationVersion"]); } else { ComputingEndpoint->Implementation = Software((std::string)xmlCENode["ImplementationName"]); } } if (xmlCENode["ServingState"]) { ComputingEndpoint->ServingState = (std::string)xmlCENode["ServingState"]; } else { logger.msg(VERBOSE, "The Service doesn't advertise its Serving State."); } if (xmlCENode["IssuerCA"]) { ComputingEndpoint->IssuerCA = (std::string)xmlCENode["IssuerCA"]; } if (xmlCENode["TrustedCA"]) { XMLNode n = xmlCENode["TrustedCA"]; while (n) { ComputingEndpoint->TrustedCA.push_back((std::string)n); ++n; //The increment operator works in an unusual manner (returns void) } } if (xmlCENode["DowntimeStart"]) { ComputingEndpoint->DowntimeStarts = (std::string)xmlCENode["DowntimeStart"]; } if (xmlCENode["DowntimeEnd"]) { ComputingEndpoint->DowntimeEnds = (std::string)xmlCENode["DowntimeEnd"]; } if (xmlCENode["Staging"]) { ComputingEndpoint->Staging = (std::string)xmlCENode["Staging"]; } if (xmlCENode["JobDescription"]) { for (XMLNode n = xmlCENode["JobDescription"]; n; ++n) { ComputingEndpoint->JobDescriptions.push_back((std::string)n); } } EntryToInt(url, xmlCENode["TotalJobs"], ComputingEndpoint->TotalJobs); EntryToInt(url, xmlCENode["RunningJobs"], ComputingEndpoint->RunningJobs); EntryToInt(url, xmlCENode["WaitingJobs"], ComputingEndpoint->WaitingJobs); EntryToInt(url, xmlCENode["StagingJobs"], ComputingEndpoint->StagingJobs); EntryToInt(url, xmlCENode["SuspendedJobs"], ComputingEndpoint->SuspendedJobs); EntryToInt(url, xmlCENode["PreLRMSWaitingJobs"], ComputingEndpoint->PreLRMSWaitingJobs); // The GLUE2 specification does not have attribute ComputingEndpoint.LocalRunningJobs //if (xmlCENode["LocalRunningJobs"]) { // ComputingEndpoint->LocalRunningJobs = stringtoi((std::string)xmlCENode["LocalRunningJobs"]); //} // The GLUE2 specification does not have attribute ComputingEndpoint.LocalWaitingJobs //if (xmlCENode["LocalWaitingJobs"]) { // ComputingEndpoint->LocalWaitingJobs = stringtoi((std::string)xmlCENode["LocalWaitingJobs"]); //} // The GLUE2 specification does not have attribute ComputingEndpoint.LocalSuspendedJobs //if (xmlCENode["LocalSuspendedJobs"]) { // ComputingEndpoint->LocalSuspendedJobs = stringtoi((std::string)xmlCENode["LocalSuspendedJobs"]); //} cs.ComputingEndpoint.insert(std::pair(endpointID++, ComputingEndpoint)); } int shareID = 0; for (XMLNode xmlCSNode = GLUEService["ComputingShare"]; (bool)xmlCSNode; ++xmlCSNode) { ComputingShareType ComputingShare; EntryToInt(url, xmlCSNode["FreeSlots"], ComputingShare->FreeSlots); if (xmlCSNode["FreeSlotsWithDuration"]) { // Format: ns[:t] [ns[:t]]..., where ns is number of slots and t is the duration. ComputingShare->FreeSlotsWithDuration.clear(); const std::string fswdValue = (std::string)xmlCSNode["FreeSlotsWithDuration"]; std::list fswdList; tokenize(fswdValue, fswdList); for (std::list::iterator it = fswdList.begin(); it != fswdList.end(); it++) { std::list fswdPair; tokenize(*it, fswdPair, ":"); long duration = LONG_MAX; int freeSlots = 0; if (fswdPair.size() > 2 || !Arc::stringto(fswdPair.front(), freeSlots) || (fswdPair.size() == 2 && !Arc::stringto(fswdPair.back(), duration))) { logger.msg(VERBOSE, "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly formatted. Ignoring it."); logger.msg(DEBUG, "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")", fswdValue, *it); continue; } ComputingShare->FreeSlotsWithDuration[Period(duration)] = freeSlots; } } EntryToInt(url, xmlCSNode["UsedSlots"], ComputingShare->UsedSlots); EntryToInt(url, xmlCSNode["RequestedSlots"], ComputingShare->RequestedSlots); if (xmlCSNode["Name"]) { ComputingShare->Name = (std::string)xmlCSNode["Name"]; } if (xmlCSNode["MappingQueue"]) { ComputingShare->MappingQueue = (std::string)xmlCSNode["MappingQueue"]; } if (xmlCSNode["MaxWallTime"]) { ComputingShare->MaxWallTime = (std::string)xmlCSNode["MaxWallTime"]; } if (xmlCSNode["MaxTotalWallTime"]) { ComputingShare->MaxTotalWallTime = (std::string)xmlCSNode["MaxTotalWallTime"]; } if (xmlCSNode["MinWallTime"]) { ComputingShare->MinWallTime = (std::string)xmlCSNode["MinWallTime"]; } if (xmlCSNode["DefaultWallTime"]) { ComputingShare->DefaultWallTime = (std::string)xmlCSNode["DefaultWallTime"]; } if (xmlCSNode["MaxCPUTime"]) { ComputingShare->MaxCPUTime = (std::string)xmlCSNode["MaxCPUTime"]; } if (xmlCSNode["MaxTotalCPUTime"]) { ComputingShare->MaxTotalCPUTime = (std::string)xmlCSNode["MaxTotalCPUTime"]; } if (xmlCSNode["MinCPUTime"]) { ComputingShare->MinCPUTime = (std::string)xmlCSNode["MinCPUTime"]; } if (xmlCSNode["DefaultCPUTime"]) { ComputingShare->DefaultCPUTime = (std::string)xmlCSNode["DefaultCPUTime"]; } EntryToInt(url, xmlCSNode["MaxTotalJobs"], ComputingShare->MaxTotalJobs); EntryToInt(url, xmlCSNode["MaxRunningJobs"], ComputingShare->MaxRunningJobs); EntryToInt(url, xmlCSNode["MaxWaitingJobs"], ComputingShare->MaxWaitingJobs); EntryToInt(url, xmlCSNode["WaitingJobs"], ComputingShare->WaitingJobs); EntryToInt(url, xmlCSNode["MaxPreLRMSWaitingJobs"], ComputingShare->MaxPreLRMSWaitingJobs); EntryToInt(url, xmlCSNode["MaxUserRunningJobs"], ComputingShare->MaxUserRunningJobs); EntryToInt(url, xmlCSNode["MaxSlotsPerJob"], ComputingShare->MaxSlotsPerJob); EntryToInt(url, xmlCSNode["MaxStageInStreams"], ComputingShare->MaxStageInStreams); EntryToInt(url, xmlCSNode["MaxStageOutStreams"], ComputingShare->MaxStageOutStreams); if (xmlCSNode["SchedulingPolicy"]) { ComputingShare->SchedulingPolicy = (std::string)xmlCSNode["SchedulingPolicy"]; } EntryToInt(url, xmlCSNode["MaxMainMemory"], ComputingShare->MaxMainMemory); EntryToInt(url, xmlCSNode["MaxVirtualMemory"], ComputingShare->MaxVirtualMemory); EntryToInt(url, xmlCSNode["MaxDiskSpace"], ComputingShare->MaxDiskSpace); if (xmlCSNode["DefaultStorageService"]) { ComputingShare->DefaultStorageService = (std::string)xmlCSNode["DefaultStorageService"]; } if (xmlCSNode["Preemption"]) { ComputingShare->Preemption = ((std::string)xmlCSNode["Preemption"] == "true") ? true : false; } if (xmlCSNode["EstimatedAverageWaitingTime"]) { ComputingShare->EstimatedAverageWaitingTime = (std::string)xmlCSNode["EstimatedAverageWaitingTime"]; } int EstimatedWorstWaitingTime; if (EntryToInt(url, xmlCSNode["EstimatedWorstWaitingTime"], EstimatedWorstWaitingTime)) { ComputingShare->EstimatedWorstWaitingTime = EstimatedWorstWaitingTime; } if (xmlCSNode["ReservationPolicy"]) { ComputingShare->ReservationPolicy = (std::string)xmlCSNode["ReservationPolicy"]; } cs.ComputingShare.insert(std::pair(shareID++, ComputingShare)); } int managerID = 0; for (XMLNode xmlCMNode = GLUEService["ComputingManager"]; (bool)xmlCMNode; ++xmlCMNode) { ComputingManagerType ComputingManager; if (xmlCMNode["ProductName"]) { ComputingManager->ProductName = (std::string)xmlCMNode["ProductName"]; } else if (xmlCMNode["Type"]) { // is this non-standard fallback needed? ComputingManager->ProductName = (std::string)xmlCMNode["Type"]; } if (xmlCMNode["ProductVersion"]) { ComputingManager->ProductVersion = (std::string)xmlCMNode["ProductVersion"]; } if (xmlCMNode["Reservation"]) { ComputingManager->Reservation = ((std::string)xmlCMNode["Reservation"] == "true"); } if (xmlCMNode["BulkSubmission"]) { ComputingManager->BulkSubmission = ((std::string)xmlCMNode["BulkSubmission"] == "true"); } EntryToInt(url, xmlCMNode["TotalPhysicalCPUs"], ComputingManager->TotalPhysicalCPUs); EntryToInt(url, xmlCMNode["TotalLogicalCPUs"], ComputingManager->TotalLogicalCPUs); EntryToInt(url, xmlCMNode["TotalSlots"], ComputingManager->TotalSlots); if (xmlCMNode["Homogeneous"]) { ComputingManager->Homogeneous = ((std::string)xmlCMNode["Homogeneous"] == "true"); } if (xmlCMNode["NetworkInfo"]) { for (XMLNode n = xmlCMNode["NetworkInfo"]; n; ++n) { ComputingManager->NetworkInfo.push_back((std::string)n); } } if (xmlCMNode["WorkingAreaShared"]) { ComputingManager->WorkingAreaShared = ((std::string)xmlCMNode["WorkingAreaShared"] == "true"); } EntryToInt(url, xmlCMNode["WorkingAreaFree"], ComputingManager->WorkingAreaFree); EntryToInt(url, xmlCMNode["WorkingAreaTotal"], ComputingManager->WorkingAreaTotal); if (xmlCMNode["WorkingAreaLifeTime"]) { ComputingManager->WorkingAreaLifeTime = (std::string)xmlCMNode["WorkingAreaLifeTime"]; } EntryToInt(url, xmlCMNode["CacheFree"], ComputingManager->CacheFree); EntryToInt(url, xmlCMNode["CacheTotal"], ComputingManager->CacheTotal); for (XMLNode n = xmlCMNode["Benchmark"]; n; ++n) { double value; if (n["Type"] && n["Value"] && Arc::stringto((std::string)n["Value"], value)) { (*ComputingManager.Benchmarks)[(std::string)n["Type"]] = value; } else { logger.msg(VERBOSE, "Couldn't parse benchmark XML:\n%s", (std::string)n); continue; } } for (XMLNode n = xmlCMNode["ApplicationEnvironments"]["ApplicationEnvironment"]; n; ++n) { ApplicationEnvironment ae((std::string)n["AppName"], (std::string)n["AppVersion"]); ae.State = (std::string)n["State"]; EntryToInt(url, n["FreeSlots"], ae.FreeSlots); //else { // ae.FreeSlots = ComputingShare->FreeSlots; // Non compatible??, i.e. a ComputingShare is unrelated to the ApplicationEnvironment. //} EntryToInt(url, n["FreeJobs"], ae.FreeJobs); EntryToInt(url, n["FreeUserSeats"], ae.FreeUserSeats); ComputingManager.ApplicationEnvironments->push_back(ae); } /* * A ComputingShare is linked to multiple ExecutionEnvironments. * Due to bug 2101 multiple ExecutionEnvironments per ComputingShare * will be ignored. The ExecutionEnvironment information will only be * stored if there is one ExecutionEnvironment associated with a * ComputingShare. */ int eeID = 0; for (XMLNode xmlEENode = xmlCMNode["ExecutionEnvironments"]["ExecutionEnvironment"]; (bool)xmlEENode; ++xmlEENode) { ExecutionEnvironmentType ExecutionEnvironment; if (xmlEENode["Platform"]) { ExecutionEnvironment->Platform = (std::string)xmlEENode["Platform"]; } EntryToInt(url, xmlEENode["MainMemorySize"], ExecutionEnvironment->MainMemorySize); if (xmlEENode["OSName"]) { if (xmlEENode["OSVersion"]) { if (xmlEENode["OSFamily"]) { ExecutionEnvironment->OperatingSystem = Software((std::string)xmlEENode["OSFamily"], (std::string)xmlEENode["OSName"], (std::string)xmlEENode["OSVersion"]); } else { ExecutionEnvironment->OperatingSystem = Software((std::string)xmlEENode["OSName"], (std::string)xmlEENode["OSVersion"]); } } else { ExecutionEnvironment->OperatingSystem = Software((std::string)xmlEENode["OSName"]); } } if (xmlEENode["ConnectivityIn"]) { ExecutionEnvironment->ConnectivityIn = (lower((std::string)xmlEENode["ConnectivityIn"]) == "true"); } if (xmlEENode["ConnectivityOut"]) { ExecutionEnvironment->ConnectivityOut = (lower((std::string)xmlEENode["ConnectivityOut"]) == "true"); } ComputingManager.ExecutionEnvironment.insert(std::pair(eeID++, ExecutionEnvironment)); } cs.ComputingManager.insert(std::pair(managerID++, ComputingManager)); } csList.push_back(cs); } } bool TargetInformationRetrieverPluginWSRFGLUE2::EntryToInt(const URL& url, XMLNode entry, int& i) { if (entry && !stringto((std::string)entry, i)) { logger.msg(INFO, "Unable to parse the %s.%s value from execution service (%s).", entry.Parent().Name(), entry.Name(), url.fullstr()); logger.msg(DEBUG, "Value of %s.%s is \"%s\"", entry.Parent().Name(), entry.Name(), (std::string)entry); return false; } return (bool)entry; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/TargetInformationRetrieverPluginBES.h0000644000000000000000000000012212045235201027136 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.4007 30 ctime=1513200660.152749354 nordugrid-arc-5.4.2/src/hed/acc/ARC1/TargetInformationRetrieverPluginBES.h0000644000175000002070000000202012045235201027177 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_TARGETINFORMATIONRETRIEVERBES_H__ #define __ARC_TARGETINFORMATIONRETRIEVERBES_H__ #include #include namespace Arc { class ExecutionTarget; class Logger; class UserConfig; class TargetInformationRetrieverPluginBES : public TargetInformationRetrieverPlugin { public: TargetInformationRetrieverPluginBES(PluginArgument *parg): TargetInformationRetrieverPlugin(parg) { supportedInterfaces.push_back("org.ogf.bes"); }; ~TargetInformationRetrieverPluginBES() {}; static Plugin* Instance(PluginArgument *arg) { return new TargetInformationRetrieverPluginBES(arg); }; virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_TARGETINFORMATIONRETRIEVERBES_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobListRetrieverPluginWSRFBES.cpp0000644000000000000000000000012212675602216026122 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.4117 30 ctime=1513200660.156749403 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobListRetrieverPluginWSRFBES.cpp0000644000175000002070000000270312675602216026173 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "JobListRetrieverPluginWSRFBES.h" namespace Arc { Logger JobListRetrieverPluginWSRFBES::logger(Logger::getRootLogger(), "JobListRetrieverPlugin.WSRFBES"); bool JobListRetrieverPluginWSRFBES::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return ((proto != "http") && (proto != "https")); } return false; } static URL CreateURL(std::string service) { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) { service = "https://" + service; } else { std::string proto = lower(service.substr(0,pos1)); if((proto != "http") && (proto != "https")) return URL(); } // Default port other than 443? // Default path? return service; } EndpointQueryingStatus JobListRetrieverPluginWSRFBES::Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const { return EndpointQueryingStatus::FAILED; } } // namespace Arc extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "WSRFBES", "HED:JobListRetrieverPlugin", "", 0, &Arc::JobListRetrieverPluginWSRFBES::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobControllerPluginARC1.cpp0000644000000000000000000000012213165644550025017 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.4017 30 ctime=1513200660.146749281 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobControllerPluginARC1.cpp0000644000175000002070000001522313165644550025071 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "AREXClient.h" #include "JobControllerPluginARC1.h" #include "JobStateARC1.h" namespace Arc { Logger JobControllerPluginARC1::logger(Logger::getRootLogger(), "JobControllerPlugin.ARC1"); void JobControllerPluginARC1::SetUserConfig(const UserConfig& uc) { JobControllerPlugin::SetUserConfig(uc); clients.SetUserConfig(uc); } URL JobControllerPluginARC1::GetAddressOfResource(const Job& job) { return job.ServiceInformationURL; } bool JobControllerPluginARC1::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } void JobControllerPluginARC1::UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { for (std::list::iterator it = jobs.begin(); it != jobs.end(); ++it) { AutoPointer ac(clients.acquire(GetAddressOfResource(**it), true)); std::string idstr; AREXClient::createActivityIdentifier(URL((*it)->JobID), idstr); if (!ac->stat(idstr, **it)) { logger.msg(WARNING, "Job information not found in the information system: %s", (*it)->JobID); IDsNotProcessed.push_back((*it)->JobID); ((AREXClients&)clients).release(ac.Release()); continue; } IDsProcessed.push_back((*it)->JobID); ((AREXClients&)clients).release(ac.Release()); } } bool JobControllerPluginARC1::CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; AutoPointer ac(clients.acquire(GetAddressOfResource(job), true)); std::string idstr; AREXClient::createActivityIdentifier(URL(job.JobID), idstr); if (!ac->clean(idstr)) { ok = false; IDsNotProcessed.push_back(job.JobID); ((AREXClients&)clients).release(ac.Release()); continue; } IDsProcessed.push_back(job.JobID); ((AREXClients&)clients).release(ac.Release()); } return ok; } bool JobControllerPluginARC1::CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; AutoPointer ac(clients.acquire(GetAddressOfResource(job), true)); std::string idstr; AREXClient::createActivityIdentifier(URL(job.JobID), idstr); if (!ac->kill(idstr)) { ok = false; IDsNotProcessed.push_back(job.JobID); ((AREXClients&)clients).release(ac.Release()); continue; } job.State = JobStateARC1("killed"); IDsProcessed.push_back(job.JobID); ((AREXClients&)clients).release(ac.Release()); } return ok; } bool JobControllerPluginARC1::RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(INFO, "Renewal of ARC1 jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginARC1::ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; if (!job.RestartState) { logger.msg(INFO, "Job %s does not report a resumable state", job.JobID); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } logger.msg(VERBOSE, "Resuming job: %s at state: %s (%s)", job.JobID, job.RestartState.GetGeneralState(), job.RestartState()); AutoPointer ac(clients.acquire(GetAddressOfResource(job), true)); std::string idstr; AREXClient::createActivityIdentifier(URL(job.JobID), idstr); if (!ac->resume(idstr)) { ok = false; IDsNotProcessed.push_back(job.JobID); ((AREXClients&)clients).release(ac.Release()); continue; } IDsProcessed.push_back(job.JobID); ((AREXClients&)clients).release(ac.Release()); logger.msg(VERBOSE, "Job resuming successful"); } return ok; } bool JobControllerPluginARC1::GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const { url = URL(job.JobID); // compensate for time between request and response on slow networks url.AddOption("threads=2",false); url.AddOption("encryption=optional",false); url.AddOption("httpputpartial=yes",false); switch (resource) { case Job::STDIN: url.ChangePath(url.Path() + '/' + job.StdIn); break; case Job::STDOUT: url.ChangePath(url.Path() + '/' + job.StdOut); break; case Job::STDERR: url.ChangePath(url.Path() + '/' + job.StdErr); break; case Job::STAGEINDIR: case Job::STAGEOUTDIR: case Job::SESSIONDIR: break; case Job::JOBLOG: case Job::JOBDESCRIPTION: std::string path = url.Path(); path.insert(path.rfind('/'), "/?logs"); url.ChangePath(path + (Job::JOBLOG ? "/errors" : "/description")); break; } return true; } bool JobControllerPluginARC1::GetJobDescription(const Job& job, std::string& desc_str) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); AutoPointer ac(clients.acquire(GetAddressOfResource(job), true)); std::string idstr; AREXClient::createActivityIdentifier(URL(job.JobID), idstr); if (ac->getdesc(idstr, desc_str)) { std::list descs; if (JobDescription::Parse(desc_str, descs) && !descs.empty()) { ((AREXClients&)clients).release(ac.Release()); return true; } } ((AREXClients&)clients).release(ac.Release()); logger.msg(ERROR, "Failed retrieving job description for job: %s", job.JobID); return false; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/TargetInformationRetrieverPluginBES.cpp0000644000000000000000000000012212053372654027506 xustar000000000000000027 mtime=1353577900.171111 25 atime=1513200574.4177 30 ctime=1513200660.152749354 nordugrid-arc-5.4.2/src/hed/acc/ARC1/TargetInformationRetrieverPluginBES.cpp0000644000175000002070000000464412053372654027565 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "TargetInformationRetrieverPluginBES.h" namespace Arc { Logger TargetInformationRetrieverPluginBES::logger(Logger::getRootLogger(), "TargetInformationRetrieverPlugin.BES"); bool TargetInformationRetrieverPluginBES::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return ((proto != "http") && (proto != "https")); } return false; } static URL CreateURL(std::string service) { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) { service = "https://" + service; } else { std::string proto = lower(service.substr(0,pos1)); if((proto != "http") && (proto != "https")) return URL(); } // Default port other than 443? // Default path? return service; } EndpointQueryingStatus TargetInformationRetrieverPluginBES::Query(const UserConfig& uc, const Endpoint& cie, std::list& csList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); // Return FAILED while the implementation is not complete return s; URL url(CreateURL(cie.URLString)); if (!url) { return s; } // TODO: Need to open a remote connection in order to verify a running service. //if ( /* No service running at 'url' */ ) { // return s; //} ComputingServiceType cs; cs.AdminDomain->Name = url.Host(); ComputingEndpointType ComputingEndpoint; ComputingEndpoint->URLString = url.str(); ComputingEndpoint->InterfaceName = "org.ogf.bes"; ComputingEndpoint->Implementor = "NorduGrid"; ComputingEndpoint->HealthState = "ok"; cs.ComputingEndpoint.insert(std::pair(0, ComputingEndpoint)); // TODO: ComputingServiceType object must be filled with ComputingManager, ComputingShare and ExecutionEnvironment before it is valid. csList.push_back(cs); s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobStateARC1.cpp0000644000000000000000000000012212153630075022566 xustar000000000000000027 mtime=1370435645.386876 25 atime=1513200574.4127 30 ctime=1513200660.150749329 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobStateARC1.cpp0000644000175000002070000000465612153630075022650 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "JobStateARC1.h" namespace Arc { JobState::StateType JobStateARC1::StateMap(const std::string& state) { /// \mapname ARCBES ARC extended BES /// \mapnote The mapping is case-insensitive, and prefix "pending:" are ignored when performing the mapping. std::string state_ = Arc::lower(state); std::string::size_type p = state_.find("pending:"); if(p != std::string::npos) { state_.erase(p,8); } /// \mapattr accepted -> ACCEPTED if (state_ == "accepted") return JobState::ACCEPTED; /// \mapattr preparing -> PREPARING /// \mapattr prepared -> PREPARING else if (state_ == "preparing" || state_ == "prepared") // obtained through BES return JobState::PREPARING; /// \mapattr submit -> SUBMITTING /// \mapattr submitting -> SUBMITTING else if (state_ == "submit" || state_ == "submitting") // obtained through BES return JobState::SUBMITTING; /// \mapattr inlrms:q -> QUEUING else if (state_ == "inlrms:q") return JobState::QUEUING; /// \mapattr inlrms:r -> RUNNING /// \mapattr inlrms:executed -> RUNNING /// \mapattr inlrms:s -> RUNNING /// \mapattr inlrms:e -> RUNNING /// \mapattr executing -> RUNNING /// \mapattr executed -> RUNNING /// \mapattr killing -> RUNNING else if (state_ == "inlrms:r" || state_ == "inlrms:executed" || state_ == "inlrms:s" || state_ == "inlrms:e" || state_ == "executing" || // obtained through BES state_ == "executed" || // obtained through BES state_ == "killing") // obtained through BES return JobState::RUNNING; /// \mapattr finishing -> FINISHING else if (state_ == "finishing") return JobState::FINISHING; /// \mapattr finished -> FINISHED else if (state_ == "finished") return JobState::FINISHED; /// \mapattr killed -> KILLED else if (state_ == "killed") return JobState::KILLED; /// \mapattr failed -> FAILED else if (state_ == "failed") return JobState::FAILED; /// \mapattr deleted -> DELETED else if (state_ == "deleted") return JobState::DELETED; /// \mapattr "" -> UNDEFINED else if (state_ == "") return JobState::UNDEFINED; /// \mapattr Any other state -> OTHER else return JobState::OTHER; } } nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobStateBES.cpp0000644000000000000000000000012212153630075022511 xustar000000000000000027 mtime=1370435645.386876 25 atime=1513200574.4067 30 ctime=1513200660.154749378 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobStateBES.cpp0000644000175000002070000000154312153630075022563 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "JobStateBES.h" namespace Arc { JobState::StateType JobStateBES::StateMap(const std::string& state) { /// \mapname BES BES std::string state_ = Arc::lower(state); /// \mapattr pending -> ACCEPTED if (state_ == "pending") return JobState::ACCEPTED; /// \mapattr running -> RUNNING else if (state_ == "running") return JobState::RUNNING; /// \mapattr finished -> FINISHED else if (state_ == "finished") return JobState::FINISHED; /// \mapattr cancelled -> KILLED else if (state_ == "cancelled") return JobState::KILLED; /// \mapattr failed -> FAILED else if (state_ == "failed") return JobState::FAILED; /// \mapattr Any other state -> UNDEFINED else return JobState::UNDEFINED; } } nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobListRetrieverPluginWSRFBES.h0000644000000000000000000000012212045235201025552 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.4127 30 ctime=1513200660.156749403 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobListRetrieverPluginWSRFBES.h0000644000175000002070000000175212045235201025626 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBLISTRETRIEVERPLUGINWSRFBES_H__ #define __ARC_JOBLISTRETRIEVERPLUGINWSRFBES_H__ #include #include namespace Arc { class Logger; class JobListRetrieverPluginWSRFBES : public JobListRetrieverPlugin { public: JobListRetrieverPluginWSRFBES(PluginArgument* parg): JobListRetrieverPlugin(parg) { supportedInterfaces.push_back("org.ogf.bes"); } virtual ~JobListRetrieverPluginWSRFBES() {} static Plugin* Instance(PluginArgument *arg) { return new JobListRetrieverPluginWSRFBES(arg); } virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; // No implementation in cpp file -- returns EndpointQueryingStatus::FAILED. virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBLISTRETRIEVERPLUGINWSRFBES_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/README0000644000000000000000000000012211745106052020617 xustar000000000000000027 mtime=1335135274.344052 25 atime=1513200574.4127 30 ctime=1513200660.140749207 nordugrid-arc-5.4.2/src/hed/acc/ARC1/README0000644000175000002070000000034211745106052020665 0ustar00mockbuildmock00000000000000Arc Client Component (ACC) plugins for supporting the next generation ARC middleware (ARC1) Implements the following specialized classes: o JobControllerPluginARC1 o SubmitterARC1 Additional support classes: o AREXClient nordugrid-arc-5.4.2/src/hed/acc/ARC1/PaxHeaders.7502/JobControllerPluginBES.cpp0000644000000000000000000000012213165644550024742 xustar000000000000000027 mtime=1507281256.705161 25 atime=1513200574.4147 30 ctime=1513200660.153749366 nordugrid-arc-5.4.2/src/hed/acc/ARC1/JobControllerPluginBES.cpp0000644000175000002070000001017113165644550025011 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "AREXClient.h" #include "JobControllerPluginBES.h" #include "JobStateBES.h" namespace Arc { Logger JobControllerPluginBES::logger(Logger::getRootLogger(), "JobControllerPlugin.BES"); void JobControllerPluginBES::SetUserConfig(const UserConfig& uc) { JobControllerPlugin::SetUserConfig(uc); } bool JobControllerPluginBES::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } void JobControllerPluginBES::UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); for (std::list::iterator it = jobs.begin(); it != jobs.end(); ++it) { AREXClient ac((*it)->JobStatusURL, cfg, usercfg->Timeout(),false); if (!ac.stat((*it)->IDFromEndpoint, **it)) { logger.msg(INFO, "Failed retrieving job status information"); IDsNotProcessed.push_back((*it)->JobID); continue; } IDsProcessed.push_back((*it)->JobID); } } bool JobControllerPluginBES::CleanJobs(const std::list& jobs, std::list&, std::list& IDsNotProcessed, bool) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(INFO, "Cleaning of BES jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginBES::CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; AREXClient ac(job.JobManagementURL, cfg, usercfg->Timeout(), false); if (!ac.kill(job.IDFromEndpoint)) { ok = false; IDsNotProcessed.push_back(job.JobID); continue; } job.State = JobStateBES("cancelled"); IDsProcessed.push_back(job.JobID); } return ok; } bool JobControllerPluginBES::RenewJobs(const std::list& jobs, std::list&, std::list& IDsNotProcessed, bool) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(INFO, "Renewal of BES jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginBES::ResumeJobs(const std::list& jobs, std::list&, std::list& IDsNotProcessed, bool) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(INFO, "Resuming BES jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginBES::GetJobDescription(const Job& job, std::string& desc_str) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); AREXClient ac(job.JobManagementURL, cfg, usercfg->Timeout(), false); if (ac.getdesc(job.IDFromEndpoint, desc_str)) { std::list descs; if (JobDescription::Parse(desc_str, descs) && !descs.empty()) { return true; } } logger.msg(ERROR, "Failed retrieving job description for job: %s", job.JobID); return false; } URL JobControllerPluginBES::CreateURL(std::string service, ServiceType /* st */) const { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) service = "https://" + service; // Default port other than 443? // Default path? return service; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/JobDescriptionParser0000644000000000000000000000013213214316024023263 xustar000000000000000030 mtime=1513200660.401752399 30 atime=1513200668.721854157 30 ctime=1513200660.401752399 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/0000755000175000002070000000000013214316024023406 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/XRSLParser.cpp0000644000000000000000000000012412733561034026022 xustar000000000000000027 mtime=1466884636.002405 27 atime=1513200574.276699 30 ctime=1513200660.394752314 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/XRSLParser.cpp0000644000175000002070000020732012733561034026073 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "RSLParser.h" #include "XRSLParser.h" namespace Arc { /// \mapname xRSL xRSL (nordugrid:xrsl) /// The libarccompute library has full support for xRSL. The /// reference manual is located here. /// By default the xRSL parser expects and produces user-side RSL (see /// reference manual), however if GM-side RSL is passed as input or wanted as /// output, then the "GRIDMANAGER" dialect should be used. XRSLParser::XRSLParser(PluginArgument* parg) : JobDescriptionParserPlugin(parg) { supportedLanguages.push_back("nordugrid:xrsl"); } Plugin* XRSLParser::Instance(PluginArgument *arg) { return new XRSLParser(arg); } static Software::ComparisonOperator convertOperator(const RSLRelOp& op) { if (op == RSLNotEqual) return &Software::operator!=; if (op == RSLLess) return &Software::operator<; if (op == RSLGreater) return &Software::operator>; if (op == RSLLessOrEqual) return &Software::operator <=; if (op == RSLGreaterOrEqual) return &Software::operator>=; return &Software::operator==; } static RSLRelOp convertOperator(const Software::ComparisonOperator& op) { if (op == &Software::operator==) return RSLEqual; if (op == &Software::operator<) return RSLLess; if (op == &Software::operator>) return RSLGreater; if (op == &Software::operator<=) return RSLLessOrEqual; if (op == &Software::operator>=) return RSLGreaterOrEqual; return RSLNotEqual; } static std::list SplitRSL(const RSL *r) { const RSLBoolean *b; std::list l; if ((b = dynamic_cast(r)) && b->Op() == RSLMulti) for (std::list::const_iterator it = b->begin(); it != b->end(); it++) { std::list L = SplitRSL(*it); l.insert(l.end(), L.begin(), L.end()); } else l.push_back(r); return l; } /// \mapattr executables -> InputFileType::IsExecutable void XRSLParser::ParseExecutablesAttribute(JobDescription& j, JobDescriptionParserPluginResult& result) { std::map::iterator itExecsAtt = j.OtherAttributes.find("nordugrid:xrsl;executables"); if (itExecsAtt == j.OtherAttributes.end()) { return; } RSLParser rp("&(executables = " + itExecsAtt->second + ")"); const RSL* rexecs = rp.Parse(false); const RSLBoolean* bexecs; const RSLCondition* cexecs; std::list execs; if (rexecs == NULL || (bexecs = dynamic_cast(rexecs)) == NULL || (cexecs = dynamic_cast(*bexecs->begin())) == NULL) { // Should not happen. logger.msg(DEBUG, "Error parsing the internally set executables attribute."); return; } ListValue(cexecs, execs, result); for (std::list::const_iterator itExecs = execs.begin(); itExecs != execs.end(); itExecs++) { bool fileExists = false; for (std::list::iterator itFile = j.DataStaging.InputFiles.begin(); itFile != j.DataStaging.InputFiles.end(); itFile++) { if (itFile->Name == (*itExecs)) { itFile->IsExecutable = true; fileExists = true; } } if (!fileExists) { result.AddError(IString("File '%s' in the 'executables' attribute is not present in the 'inputfiles' attribute", *itExecs)); } } // executables attribute only stored for later parsing, removing it now. j.OtherAttributes.erase(itExecsAtt); } /// TODO \mapattr executables -> InputFileType::IsExecutable void XRSLParser::ParseFTPThreadsAttribute(JobDescription& j, JobDescriptionParserPluginResult& result) { std::map::iterator itAtt; itAtt = j.OtherAttributes.find("nordugrid:xrsl;ftpthreads"); if (itAtt == j.OtherAttributes.end()) { return; } int threads; if (!stringto(itAtt->second, threads) || threads < 1 || 10 < threads) { result.AddError(IString("The value of the ftpthreads attribute must be a number from 1 to 10")); return; } for (std::list::iterator itF = j.DataStaging.InputFiles.begin(); itF != j.DataStaging.InputFiles.end(); itF++) { for (std::list::iterator itS = itF->Sources.begin(); itS != itF->Sources.end(); itS++) { itS->AddOption("threads", itAtt->second); } } for (std::list::iterator itF = j.DataStaging.OutputFiles.begin(); itF != j.DataStaging.OutputFiles.end(); itF++) { for (std::list::iterator itT = itF->Targets.begin(); itT != itF->Targets.end(); itT++) { itT->AddOption("threads", itAtt->second); } } j.OtherAttributes.erase(itAtt); return; } /// TODO \mapattr executables -> InputFileType::IsExecutable void XRSLParser::ParseCacheAttribute(JobDescription& j, JobDescriptionParserPluginResult& result) { std::map::iterator itAtt; itAtt = j.OtherAttributes.find("nordugrid:xrsl;cache"); if (itAtt == j.OtherAttributes.end()) { return; } for (std::list::iterator itF = j.DataStaging.InputFiles.begin(); itF != j.DataStaging.InputFiles.end(); itF++) { if (!itF->IsExecutable) { for (std::list::iterator itS = itF->Sources.begin(); itS != itF->Sources.end(); itS++) { itS->AddOption("cache", itAtt->second); } } } j.OtherAttributes.erase(itAtt); return; } /// TODO \mapattr executables -> InputFileType::IsExecutable void XRSLParser::ParseJoinAttribute(JobDescription& j, JobDescriptionParserPluginResult& result) { std::map::iterator itAtt; itAtt = j.OtherAttributes.find("nordugrid:xrsl;join"); if (itAtt == j.OtherAttributes.end() || (itAtt->second != "yes" && itAtt->second != "true")) { return; } if (j.Application.Output.empty()) { result.AddError(IString("'stdout' attribute must specified when 'join' attribute is specified")); } else if (!j.Application.Error.empty()) { if (j.Application.Error != j.Application.Output) { result.AddError(IString("Attribute 'join' cannot be specified when both 'stdout' and 'stderr' attributes is specified")); } } j.Application.Error = j.Application.Output; j.OtherAttributes.erase(itAtt); } /// TODO \mapattr executables -> InputFileType::IsExecutable void XRSLParser::ParseGridTimeAttribute(JobDescription& j, JobDescriptionParserPluginResult& result) { // Must be called after the 'count' attribute has been parsed. std::map::iterator itAtt; itAtt = j.OtherAttributes.find("nordugrid:xrsl;gridtime"); if (itAtt == j.OtherAttributes.end()) { return; } if (j.Resources.TotalCPUTime.range.max != -1) { result.AddError(IString("Attributes 'gridtime' and 'cputime' cannot be specified together")); return; } if (j.Resources.IndividualWallTime.range.max != -1) { result.AddError(IString("Attributes 'gridtime' and 'walltime' cannot be specified together")); return; } j.Resources.TotalCPUTime.range = Period(itAtt->second, PeriodMinutes).GetPeriod(); j.Resources.TotalCPUTime.benchmark = std::pair("clock rate", 2800); int slots = (j.Resources.SlotRequirement.NumberOfSlots > 0 ? j.Resources.SlotRequirement.NumberOfSlots : 1); j.Resources.IndividualWallTime.range = Period(itAtt->second, PeriodMinutes).GetPeriod()*slots; j.Resources.IndividualWallTime.benchmark = std::pair("clock rate", 2800); j.OtherAttributes.erase(itAtt); } /// TODO \mapattr executables -> InputFileType::IsExecutable void XRSLParser::ParseCountPerNodeAttribute(JobDescription& j, JobDescriptionParserPluginResult& result) { // Must be called after the 'count' attribute has been parsed. std::map::iterator itAtt; itAtt = j.OtherAttributes.find("nordugrid:xrsl;countpernode"); if (itAtt == j.OtherAttributes.end()) return; if (j.Resources.SlotRequirement.NumberOfSlots == -1) { result.AddError(IString("When specifying 'countpernode' attribute, 'count' attribute must also be specified")); } else if (!stringto(itAtt->second, j.Resources.SlotRequirement.SlotsPerHost)) { result.AddError(IString("Value of 'countpernode' attribute must be an integer")); } } JobDescriptionParserPluginResult XRSLParser::Parse(const std::string& source, std::list& jobdescs, const std::string& language, const std::string& dialect) const { if (language != "" && !IsLanguageSupported(language)) { return JobDescriptionParserPluginResult::WrongLanguage; } std::list parsed_descriptions; RSLParser parser(source); const RSL *r = parser.Parse(); if (!r) { return parser.GetParsingResult(); // TODO: Check result. No RSL returned - is result Failure?? } std::list l = SplitRSL(r); JobDescriptionParserPluginResult result; for (std::list::iterator it = l.begin(); it != l.end(); it++) { parsed_descriptions.push_back(JobDescription()); Parse(*it, parsed_descriptions.back(), dialect, result); // Parse remaining attributes if any. ParseExecutablesAttribute(parsed_descriptions.back(), result); ParseFTPThreadsAttribute(parsed_descriptions.back(), result); ParseCacheAttribute(parsed_descriptions.back(), result); ParseCountPerNodeAttribute(parsed_descriptions.back(), result); if (dialect != "GRIDMANAGER") { ParseJoinAttribute(parsed_descriptions.back(), result); // join is a client side attribute ParseGridTimeAttribute(parsed_descriptions.back(), result); // gridtime is a client side attribute } for (std::list::iterator itJob = parsed_descriptions.back().GetAlternatives().begin(); itJob != parsed_descriptions.back().GetAlternatives().end(); itJob++) { ParseExecutablesAttribute(*itJob, result); ParseFTPThreadsAttribute(*itJob, result); ParseCacheAttribute(*itJob, result); ParseCountPerNodeAttribute(*itJob, result); if (dialect != "GRIDMANAGER") { ParseJoinAttribute(*itJob, result); // join is a client side attribute ParseGridTimeAttribute(*itJob, result); // gridtime is a client side attribute } } std::stringstream ss; ss << **it; parsed_descriptions.back().OtherAttributes["nordugrid:xrsl;clientxrsl"] = ss.str(); SourceLanguage(parsed_descriptions.back()) = (!language.empty() ? language : supportedLanguages.front()); for (std::list::iterator itAltJob = parsed_descriptions.back().GetAlternatives().begin(); itAltJob != parsed_descriptions.back().GetAlternatives().end(); ++itAltJob) { itAltJob->OtherAttributes["nordugrid:xrsl;clientxrsl"] = ss.str(); SourceLanguage(*itAltJob) = parsed_descriptions.back().GetSourceLanguage(); } } if (parsed_descriptions.empty()) { // Probably never happens so check is just in case of future changes result.SetFailure(); result.AddError(IString("No RSL content in job description found")); return result; } if(dialect == "GRIDMANAGER") { if (parsed_descriptions.size() > 1) { result.AddError(IString("Multi-request job description not allowed in GRIDMANAGER dialect")); } std::string action = "request"; if (parsed_descriptions.front().OtherAttributes.find("nordugrid:xrsl;action") != parsed_descriptions.front().OtherAttributes.end()) { action = parsed_descriptions.front().OtherAttributes["nordugrid:xrsl;action"]; } // action = request means real job description. // Any other action may (and currently should) have almost // empty job description. if (action == "request" && parsed_descriptions.front().Application.Executable.Path.empty()) { result.AddError(IString("No execuable path specified in GRIDMANAGER dialect")); } } else { // action is not expected in client side job request for (std::list::iterator it = parsed_descriptions.begin(); it != parsed_descriptions.end(); it++) { if (it->OtherAttributes.find("nordugrid:xrsl;action") != it->OtherAttributes.end()) { result.AddError(IString("'action' attribute not allowed in user-side job description")); } if (it->Application.Executable.Path.empty()) { result.AddError(IString("Executable path not specified ('executable' attribute)")); } } } if (result.HasErrors()) { result.SetFailure(); } if (result) { logger.msg(VERBOSE, "String successfully parsed as %s.", parsed_descriptions.front().GetSourceLanguage()); jobdescs.insert(jobdescs.end(), parsed_descriptions.begin(), parsed_descriptions.end()); } return result; } void XRSLParser::SingleValue(const RSLCondition *c, std::string& value, JobDescriptionParserPluginResult& result) { if (!value.empty()) { result.AddError(IString("Attribute '%s' multiply defined", c->Attr()), c->AttrLocation()); return; } if (c->size() != 1) { result.AddError(IString("Value of attribute '%s' expected to be single value", c->Attr()), c->AttrLocation()); return; } const RSLLiteral *n = dynamic_cast(*c->begin()); if (!n) { result.AddError(IString("Value of attribute '%s' expected to be a string", c->Attr()), c->AttrLocation()); return; } value = n->Value(); } void XRSLParser::ListValue(const RSLCondition *c, std::list& value, JobDescriptionParserPluginResult& result) { if (!value.empty()) { result.AddError(IString("Attribute '%s' multiply defined", c->Attr()), c->AttrLocation()); return; } for (std::list::const_iterator it = c->begin(); it != c->end(); it++) { const RSLLiteral *n = dynamic_cast(*it); if (!n) { result.AddError(IString("Value of attribute '%s' is not a string", c->Attr()), (**it).Location()); continue; } value.push_back(n->Value()); } } void XRSLParser::SeqListValue(const RSLCondition *c, std::list >& value, JobDescriptionParserPluginResult& result, int seqlength) { if (!value.empty()) { result.AddError(IString("Attribute '%s' multiply defined", c->Attr()), c->AttrLocation()); return; } for (std::list::const_iterator it = c->begin(); it != c->end(); it++) { const RSLSequence *s = dynamic_cast(*it); if (!s) { result.AddError(IString("Value of attribute '%s' is not sequence", c->Attr()), (**it).Location()); continue; } if (seqlength != -1 && int(s->size()) != seqlength) { result.AddError(IString("Value of attribute '%s' has wrong sequence length: Expected %d, found %d", c->Attr(), seqlength, int(s->size())), s->Location()); continue; } std::list l; for (std::list::const_iterator it = s->begin(); it != s->end(); it++) { const RSLLiteral *n = dynamic_cast(*it); if (!n) { result.AddError(IString("Value of attribute '%s' is not a string", c->Attr()), (**it).Location()); continue; } l.push_back(n->Value()); } value.push_back(l); } } static char StateToShortcut(const std::string& state) { if(state == "PREPARING") { return 'b'; } if(state == "INLRMS") { return 'q'; } if(state == "FINISHING") { return 'f'; } if(state == "FINISHED") { return 'e'; } if(state == "DELETED") { return 'd'; } if(state == "CANCELING") { return 'c'; } return ' '; } static std::string ShortcutToState(char state) { if(state == 'b') { return "PREPARING"; } if(state == 'q') { return "INLRMS"; } if(state == 'f') { return "FINISHING"; } if(state == 'e') { return "FINISHED"; } if(state == 'd') { return "DELETED"; } if(state == 'c') { return "CANCELING"; } return ""; } static bool AddNotificationState( NotificationType ¬ification, const std::string& states) { for (int n = 0; n<(int)states.length(); n++) { std::string state = ShortcutToState(states[n]); if (state.empty()) { return false; } for (std::list::const_iterator s = notification.States.begin(); s != notification.States.end(); s++) { if(*s == state) { // Check if state is already added. state.resize(0); break; } } if (!state.empty()) { notification.States.push_back(state); } } return true; } static bool AddNotification( std::list ¬ifications, const std::string& states, const std::string& email) { for (std::list::iterator it = notifications.begin(); it != notifications.end(); it++) { if (it->Email == email) { // If email already exist in the list add states to that entry. return AddNotificationState(*it,states); } } NotificationType notification; notification.Email = email; if (!AddNotificationState(notification,states)) { return false; } notifications.push_back(notification); return true; } void XRSLParser::Parse(const RSL *r, JobDescription& j, const std::string& dialect, JobDescriptionParserPluginResult& result) const { const RSLBoolean *b; const RSLCondition *c; if ((b = dynamic_cast(r))) { if (b->Op() == RSLAnd) { for (std::list::const_iterator it = b->begin(); it != b->end(); it++) { Parse(*it, j, dialect, result); } } else if (b->Op() == RSLOr) { if (b->size() == 0) { return; // ??? } JobDescription jcopy(j, false); Parse(*b->begin(), j, dialect, result); std::list::const_iterator it = b->begin(); for (it++; it != b->end(); it++) { JobDescription aj(jcopy); Parse(*it, aj, dialect, result); j.AddAlternative(aj); } return; } else { logger.msg(ERROR, "Unexpected RSL type"); return; // ??? } } else if ((c = dynamic_cast(r))) { /// \mapattr executable -> ExecutableType::Path if (c->Attr() == "executable") { SingleValue(c, j.Application.Executable.Path, result); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { SingleValue(c, it->Application.Executable.Path, result); } return; } /// \mapattr arguments -> ExecutableType::Argument if (c->Attr() == "arguments") { ListValue(c, j.Application.Executable.Argument, result); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { ListValue(c, it->Application.Executable.Argument, result); } return; } /// \mapattr stdin -> Input if (c->Attr() == "stdin") { SingleValue(c, j.Application.Input, result); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { SingleValue(c, it->Application.Input, result); } return; } /// \mapattr stdout -> Output if (c->Attr() == "stdout") { SingleValue(c, j.Application.Output, result); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { SingleValue(c, it->Application.Output, result); } return; } /// \mapattr stderr -> Error if (c->Attr() == "stderr") { SingleValue(c, j.Application.Error, result); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { SingleValue(c, it->Application.Error, result); } return; } /// TODO \mapattr inputfiles -> DataStagingType::InputFiles if (c->Attr() == "inputfiles") { std::list > ll; SeqListValue(c, ll, result); for (std::list >::iterator it = ll.begin(); it != ll.end(); ++it) { /* Each of the elements of the inputfiles attribute should have at * least two values. */ if (it->size() < 2) { result.AddError(IString("At least two values are needed for the 'inputfiles' attribute")); continue; } if (it->front().empty()) { result.AddError(IString("First value of 'inputfiles' attribute (filename) cannot be empty")); continue; } InputFileType file; file.Name = it->front(); // For USER dialect (default) the second string must be empty, a path to a file or an URL. // For GRIDMANAGER dialect the second string in the list might either be a URL or filesize.checksum. std::list::iterator itValues = ++(it->begin()); bool is_size_and_checksum = false; if (dialect == "GRIDMANAGER") { long fileSize = -1; std::string::size_type sep = itValues->find('.'); if(stringto(itValues->substr(0,sep), fileSize)) { is_size_and_checksum = true; file.FileSize = fileSize; if(sep != std::string::npos) { file.Checksum = itValues->substr(sep+1); } } } if (!itValues->empty() && !is_size_and_checksum) { URL turl(*itValues); if (!turl) { result.AddError(IString("Invalid URL '%s' for input file '%s'", *itValues, file.Name)); continue; } URLLocation location; for (++itValues; itValues != it->end(); ++itValues) { // add any options and locations // an option applies to the URL preceding it (main or location) std::string::size_type pos = itValues->find('='); if (pos == std::string::npos) { result.AddError(IString("Invalid URL option syntax in option '%s' for input file '%s'", *itValues, file.Name)); continue; } std::string attr_name(itValues->substr(0, pos)); std::string attr_value(itValues->substr(pos+1)); if (attr_name == "location") { if (location) turl.AddLocation(location); location = URLLocation(attr_value); if (!location) { result.AddError(IString("Invalid URL: '%s' in input file '%s'", attr_value, file.Name)); continue; } } else if (location) { location.AddOption(attr_name, attr_value, true); } else { turl.AddOption(attr_name, attr_value, true); } } if (location) turl.AddLocation(location); file.Sources.push_back(turl); } else if (itValues->empty()) { file.Sources.push_back(URL(file.Name)); } file.IsExecutable = false; j.DataStaging.InputFiles.push_back(file); for (std::list::iterator itAlt = j.GetAlternatives().begin(); itAlt != j.GetAlternatives().end(); ++itAlt) { itAlt->DataStaging.InputFiles.push_back(file); } } return; } // Mapping documented above if (c->Attr() == "executables") { /* Store value in the OtherAttributes member and set it later when all * the attributes it depends on has been parsed. */ std::ostringstream os; c->List().Print(os); j.OtherAttributes["nordugrid:xrsl;executables"] = os.str(); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;executables"] = os.str(); } return; } // Mapping documented above if (c->Attr() == "cache") { std::string cache; SingleValue(c, cache, result); /* Store value in the OtherAttributes member and set it later when all * the attributes it depends on has been parsed. */ j.OtherAttributes["nordugrid:xrsl;cache"] = cache; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;cache"] = cache; } return; } /// TODO \mapattr outputfiles -> DataStagingType::OutputFiles if (c->Attr() == "outputfiles") { std::list > ll; SeqListValue(c, ll, result); for (std::list >::iterator it = ll.begin(); it != ll.end(); it++) { /* Each of the elements of the outputfiles attribute should have at * least two values. */ if (it->size() < 2) { result.AddError(IString("At least two values are needed for the 'outputfiles' attribute")); continue; } if (it->front().empty()) { result.AddError(IString("First value of 'outputfiles' attribute (filename) cannot be empty")); continue; } OutputFileType file; file.Name = it->front(); std::list::iterator itValues = ++(it->begin()); URL turl(*itValues); // The second string in the list (it2) might be a URL or empty if (!itValues->empty() && turl.Protocol() != "file") { if (!turl) { result.AddError(IString("Invalid URL '%s' for output file '%s'", *itValues, file.Name)); continue; } URLLocation location; for (++itValues; itValues != it->end(); ++itValues) { // add any options and locations // an option applies to the URL preceding it (main or location) std::string::size_type pos = itValues->find('='); if (pos == std::string::npos) { result.AddError(IString("Invalid URL option syntax in option '%s' for output file '%s'", *itValues, file.Name)); continue; } std::string attr_name(itValues->substr(0, pos)); std::string attr_value(itValues->substr(pos+1)); if (attr_name == "location") { if (location) turl.AddLocation(location); location = URLLocation(attr_value); if (!location) { result.AddError(IString("Invalid URL: '%s' in output file '%s'", attr_value, file.Name)); return; // ??? } } else if (location) { location.AddOption(attr_name, attr_value, true); } else { turl.AddOption(attr_name, attr_value, true); } } if (location) turl.AddLocation(location); file.Targets.push_back(turl); } j.DataStaging.OutputFiles.push_back(file); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->DataStaging.OutputFiles.push_back(file); } } return; } /// \mapattr queue -> QueueName /// TODO \mapattr queue -> JobDescription::OtherAttributes["nordugrid:broker;reject_queue"] if (c->Attr() == "queue") { std::string queueName; SingleValue(c, queueName, result); if (dialect == "GRIDMANAGER" && c->Op() != RSLEqual) { std::ostringstream sOp; sOp << c->Op(); result.AddError(IString("Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' dialect, only \"=\" is allowed", sOp.str())); return; } if (c->Op() != RSLNotEqual && c->Op() != RSLEqual) { std::ostringstream sOp; sOp << c->Op(); result.AddError(IString("Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or \"=\" are allowed.", sOp.str())); return; } if (c->Op() == RSLNotEqual) { j.OtherAttributes["nordugrid:broker;reject_queue"] = queueName; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:broker;reject_queue"] = queueName; } } else { j.Resources.QueueName = queueName; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.QueueName = queueName; } } return; } /// \mapattr starttime -> ProcessingStartTime if (c->Attr() == "starttime") { std::string time; SingleValue(c, time, result); j.Application.ProcessingStartTime = time; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Application.ProcessingStartTime = time; } return; } /// \mapattr lifetime -> SessionLifeTime if (c->Attr() == "lifetime") { std::string time; SingleValue(c, time, result); if(dialect == "GRIDMANAGER") { // No alternatives allowed for GRIDMANAGER dialect. j.Resources.SessionLifeTime = Period(time, PeriodSeconds); } else { j.Resources.SessionLifeTime = Period(time, PeriodMinutes); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.SessionLifeTime = Period(time, PeriodMinutes); } } return; } /// \mapattr cputime -> TotalCPUTime "With user-side RSL minutes is expected, while for GM-side RSL seconds." if (c->Attr() == "cputime") { std::string time; SingleValue(c, time, result); if(dialect == "GRIDMANAGER") { // No alternatives allowed for GRIDMANAGER dialect. j.Resources.TotalCPUTime = Period(time, PeriodSeconds).GetPeriod(); } else { j.Resources.TotalCPUTime = Period(time, PeriodMinutes).GetPeriod(); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.TotalCPUTime = Period(time, PeriodMinutes).GetPeriod(); } } return; } /// TotalWallTime is a reference to IndividualWallTime /// \mapattr walltime -> IndividualWallTime /// TODO cputime dialect/units if (c->Attr() == "walltime") { std::string time; SingleValue(c, time, result); if(dialect == "GRIDMANAGER") { // No alternatives allowed for GRIDMANAGER dialect. j.Resources.TotalWallTime = Period(time, PeriodSeconds).GetPeriod(); } else { j.Resources.TotalWallTime = Period(time, PeriodMinutes).GetPeriod(); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.TotalWallTime = Period(time, PeriodMinutes).GetPeriod(); } } return; } // Documented above. if (c->Attr() == "gridtime") { std::string time; SingleValue(c, time, result); /* Store value in the OtherAttributes member and set it later when all * the attributes it depends on has been parsed. */ j.OtherAttributes["nordugrid:xrsl;gridtime"] = time; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;gridtime"] = time; } return; } // TODO \mapattr benchmarks -> ResourcesType::TotalWallTime if (c->Attr() == "benchmarks") { std::list > bm; SeqListValue(c, bm, result, 3); double bValue; // Only the first parsable benchmark is currently supported. for (std::list< std::list >::const_iterator it = bm.begin(); it != bm.end(); it++) { std::list::const_iterator itB = it->begin(); if (!stringto(*++itB, bValue)) continue; // ??? if(dialect == "GRIDMANAGER") { j.Resources.TotalCPUTime.range = Period(*++itB, PeriodSeconds).GetPeriod(); } else { j.Resources.TotalCPUTime.range = Period(*++itB, PeriodMinutes).GetPeriod(); } j.Resources.TotalCPUTime.benchmark = std::pair(it->front(), bValue); return; // ??? } return; } /// \mapattr memory -> IndividualPhysicalMemory if (c->Attr() == "memory") { std::string mem; SingleValue(c, mem, result); j.Resources.IndividualPhysicalMemory = stringto(mem); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.IndividualPhysicalMemory = j.Resources.IndividualPhysicalMemory; } return; } /// \mapattr disk -> DiskSpace if (c->Attr() == "disk") { std::string disk; SingleValue(c, disk, result); j.Resources.DiskSpaceRequirement.DiskSpace = stringto(disk); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.DiskSpaceRequirement.DiskSpace = j.Resources.DiskSpaceRequirement.DiskSpace; } return; } // TODO: Maybe add support for RTE options. /// \mapattr runtimeenvironment -> RunTimeEnvironment if (c->Attr() == "runtimeenvironment") { std::string runtime; SingleValue(c, runtime, result); j.Resources.RunTimeEnvironment.add(Software(runtime), convertOperator(c->Op())); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.RunTimeEnvironment.add(Software(runtime), convertOperator(c->Op())); } return; } /// \mapattr middleware -> CEType // This attribute should be passed to the broker and should not be stored. if (c->Attr() == "middleware") { std::string cetype; SingleValue(c, cetype, result); j.Resources.CEType.add(Software(cetype), convertOperator(c->Op())); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.CEType.add(Software(cetype), convertOperator(c->Op())); } return; } /// \mapattr opsys -> OperatingSystem if (c->Attr() == "opsys") { std::string opsys; SingleValue(c, opsys, result); j.Resources.OperatingSystem.add(Software(opsys), convertOperator(c->Op())); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Resources.OperatingSystem.add(Software(opsys), convertOperator(c->Op())); } return; } // Documented above. if (c->Attr() == "join") { if (dialect == "GRIDMANAGER") { // Ignore the join attribute for GM (it is a client side attribute). return; // ??? } std::string join; SingleValue(c, join, result); /* Store value in the OtherAttributes member and set it later when all * the attributes it depends on has been parsed. */ j.OtherAttributes["nordugrid:xrsl;join"] = join; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;join"] = join; } return; } /// \mapattr gmlog -> LogDir if (c->Attr() == "gmlog") { SingleValue(c, j.Application.LogDir, result); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { SingleValue(c, it->Application.LogDir, result); } return; } /// \mapattr jobname -> JobName if (c->Attr() == "jobname") { SingleValue(c, j.Identification.JobName, result); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { SingleValue(c, it->Identification.JobName, result); } return; } // Documented above. if (c->Attr() == "ftpthreads") { std::string sthreads; SingleValue(c, sthreads, result); /* Store value in the OtherAttributes member and set it later when all * the attributes it depends on has been parsed. */ j.OtherAttributes["nordugrid:xrsl;ftpthreads"] = sthreads; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;ftpthreads"] = sthreads; } return; } /// \mapattr acl -> AccessControl if (c->Attr() == "acl") { std::string acl; SingleValue(c, acl, result); XMLNode node(acl); if (!node) { logger.msg(ERROR, "The value of the acl XRSL attribute isn't valid XML."); return; // ??? } node.New(j.Application.AccessControl); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { node.New(it->Application.AccessControl); } return; } // TODO Document non existent mapping. if (c->Attr() == "cluster") { logger.msg(ERROR, "The cluster XRSL attribute is currently unsupported."); // ??? return; } /// TODO: \mapattr notify -> ApplicationType::Notification if (c->Attr() == "notify") { std::list l; ListValue(c, l, result); for (std::list::const_iterator notf = l.begin(); notf != l.end(); ++notf) { std::list ll; tokenize(*notf, ll, " \t"); std::list::const_iterator it = ll.begin(); std::string states = "be"; // Default value. if (it->find('@') == std::string::npos) { // The first string is state flags. if (ll.size() == 1) { // Only state flags in value. result.AddError(IString("Syntax error in 'notify' attribute value ('%s'), it must contain an email address", *notf)); continue; } states = *it; it++; } for (; it != ll.end(); it++) { if (it->find('@') == std::string::npos) { result.AddError(IString("Syntax error in 'notify' attribute value ('%s'), it must only contain email addresses after state flag(s)", *notf)); } else if (!AddNotification(j.Application.Notification, states,*it)) { result.AddError(IString("Syntax error in 'notify' attribute value ('%s'), it contains unknown state flags", *notf)); } } } for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Application.Notification = j.Application.Notification; } return; } /// \mapattr replicacollection -> OtherAttributes /// TODO \mapattr replicacollection -> OtherAttributes["nordugrid:xrsl;replicacollection"] // Is this attribute supported? if (c->Attr() == "replicacollection") { std::string collection; SingleValue(c, collection, result); if (!URL(collection)) // ??? return; j.OtherAttributes["nordugrid:xrsl;replicacollection"] = collection; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;replicacollection"] = collection; } return; } /// \mapattr rerun -> Rerun if (c->Attr() == "rerun") { std::string rerun; SingleValue(c, rerun, result); j.Application.Rerun = stringtoi(rerun); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Application.Rerun = j.Application.Rerun; } return; } /// \mapattr priority -> Priority if (c->Attr() == "priority") { std::string priority; SingleValue(c, priority, result); j.Application.Priority = stringtoi(priority); if (j.Application.Priority > 100) { logger.msg(VERBOSE, "priority is too large - using max value 100"); j.Application.Priority = 100; } for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Application.Priority = j.Application.Priority; } return; } /// \mapattr architecture -> Platform if (c->Attr() == "architecture") { SingleValue(c, j.Resources.Platform, result); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { SingleValue(c, it->Resources.Platform, result); } return; } /// \mapattr nodeaccess -> NodeAccess if (c->Attr() == "nodeaccess") { std::list l; ListValue(c, l, result); for (std::list::iterator it = l.begin(); it != l.end(); it++) { if (*it == "inbound") { j.Resources.NodeAccess = ((j.Resources.NodeAccess == NAT_OUTBOUND || j.Resources.NodeAccess == NAT_INOUTBOUND) ? NAT_INOUTBOUND : NAT_INBOUND); } else if (*it == "outbound") { j.Resources.NodeAccess = ((j.Resources.NodeAccess == NAT_INBOUND || j.Resources.NodeAccess == NAT_INOUTBOUND) ? NAT_INOUTBOUND : NAT_OUTBOUND); } else { logger.msg(VERBOSE, "Invalid nodeaccess value: %s", *it); // ??? return; } } return; } /// \mapattr dryrun -> DryRun if (c->Attr() == "dryrun") { std::string dryrun; SingleValue(c, dryrun, result); j.Application.DryRun = (lower(dryrun) == "yes"); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Application.DryRun = j.Application.DryRun; } return; } // Underscore, in 'rsl_substitution', is removed by normalization. if (c->Attr() == "rslsubstitution") { // Handled internally by the RSL parser return; } /// \mapattr environment -> Environment if (c->Attr() == "environment") { std::list > ll; SeqListValue(c, ll, result, 2); for (std::list >::iterator it = ll.begin(); it != ll.end(); it++) { j.Application.Environment.push_back(std::make_pair(it->front(), it->back())); } return; } /// \mapattr count -> NumberOfSlots if (c->Attr() == "count") { std::string count; SingleValue(c, count, result); if (!stringto(count, j.Resources.SlotRequirement.NumberOfSlots)) { result.AddError(IString("Value of 'count' attribute must be an integer")); return; } for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); ++it) { it->Resources.SlotRequirement.NumberOfSlots = j.Resources.SlotRequirement.NumberOfSlots; } return; } /// \mapattr countpernode -> SlotsPerHost if (c->Attr() == "countpernode") { std::string countpernode; SingleValue(c, countpernode, result); j.OtherAttributes["nordugrid:xrsl;countpernode"] = countpernode; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); ++it) { it->OtherAttributes["nordugrid:xrsl;countpernode"] = countpernode; } return; } /// \mapattr exclusiveexecution -> ExclusiveExecution if (c->Attr() == "exclusiveexecution") { std::string ee; SingleValue(c, ee, result); ee = lower(ee); if (ee != "yes" && ee != "true" && ee != "no" && ee != "false") { result.AddError(IString("Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'")); return; } j.Resources.SlotRequirement.ExclusiveExecution = (ee == "yes" || ee == "true") ? SlotRequirementType::EE_TRUE : SlotRequirementType::EE_FALSE; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); ++it) { it->Resources.SlotRequirement.ExclusiveExecution = j.Resources.SlotRequirement.ExclusiveExecution; } return; } /// TODO: \mapattr jobreport -> RemoteLogging if (c->Attr() == "jobreport") { std::string jobreport; SingleValue(c, jobreport, result); if (!URL(jobreport)) return; // ??? j.Application.RemoteLogging.push_back(RemoteLoggingType()); j.Application.RemoteLogging.back().Location = URL(jobreport); j.Application.RemoteLogging.back().ServiceType = "SGAS"; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Application.RemoteLogging.push_back(j.Application.RemoteLogging.back()); } return; } /// \mapattr credentialserver -> CredentialService if (c->Attr() == "credentialserver") { std::string credentialserver; SingleValue(c, credentialserver, result); if (!URL(credentialserver)) return; // ??? j.Application.CredentialService.push_back(credentialserver); for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->Application.CredentialService.push_back(j.Application.CredentialService.back()); } return; } // GM-side attributes. if (c->Attr() == "action") { std::string action; SingleValue(c, action, result); if (action != "request" && action != "cancel" && action != "clean" && action != "renew" && action != "restart") { logger.msg(VERBOSE, "Invalid action value %s", action); return; // ??? } j.OtherAttributes["nordugrid:xrsl;action"] = action; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;action"] = action; } return; } if (c->Attr() == "hostname") { std::string hostname; SingleValue(c, hostname, result); j.OtherAttributes["nordugrid:xrsl;hostname"] = hostname; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;hostname"] = hostname; } return; } if (c->Attr() == "jobid") { std::string jobid; SingleValue(c, jobid, result); j.OtherAttributes["nordugrid:xrsl;jobid"] = jobid; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;jobid"] = jobid; } return; } if (c->Attr() == "clientxrsl") { std::string clientxrsl; SingleValue(c, clientxrsl, result); j.OtherAttributes["nordugrid:xrsl;clientxrsl"] = clientxrsl; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;clientxrsl"] = clientxrsl; } return; } if (c->Attr() == "clientsoftware") { std::string clientsoftware; SingleValue(c, clientsoftware, result); j.OtherAttributes["nordugrid:xrsl;clientsoftware"] = clientsoftware; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;clientsoftware"] = clientsoftware; } return; } if (c->Attr() == "savestate") { std::string savestate; SingleValue(c, savestate, result); j.OtherAttributes["nordugrid:xrsl;savestate"] = savestate; for (std::list::iterator it = j.GetAlternatives().begin(); it != j.GetAlternatives().end(); it++) { it->OtherAttributes["nordugrid:xrsl;savestate"] = savestate; } return; } // Unsupported Globus RSL attributes. if (c->Attr() == "resourcemanagercontact" || c->Attr() == "directory" || c->Attr() == "maxcputime" || c->Attr() == "maxwalltime" || c->Attr() == "maxtime" || c->Attr() == "maxmemory" || c->Attr() == "minmemory" || c->Attr() == "grammyjob" || c->Attr() == "project" || c->Attr() == "hostcount" || c->Attr() == "label" || c->Attr() == "subjobcommstype" || c->Attr() == "subjobstarttype" || c->Attr() == "filecleanup" || c->Attr() == "filestagein" || c->Attr() == "filestageinshared" || c->Attr() == "filestageout" || c->Attr() == "gasscache" || c->Attr() == "jobtype" || c->Attr() == "librarypath" || c->Attr() == "remoteiourl" || c->Attr() == "scratchdir") { logger.msg(WARNING, "The specified Globus attribute (%s) is not supported. %s ignored.", c->Attr(), c->Attr()); return; } logger.msg(VERBOSE, "Unknown XRSL attribute: %s - Ignoring it.", c->Attr()); return; } else { logger.msg(ERROR, "Unexpected RSL type"); return; // ??? } // This part will run only when the parsing is at the end of the xrsl file return; } JobDescriptionParserPluginResult XRSLParser::Assemble(const JobDescription& j, std::string& product, const std::string& language, const std::string& dialect) const { if (!IsLanguageSupported(language)) { logger.msg(DEBUG, "Wrong language requested: %s",language); return false; } // First check if the job description is valid. if (j.Application.Executable.Path.empty()) { logger.msg(DEBUG, "Missing executable"); return false; } RSLBoolean r(RSLAnd); /// \mapattr executable <- ExecutableType::Path if (!j.Application.Executable.Path.empty()) { RSLList *l = new RSLList(); l->Add(new RSLLiteral(j.Application.Executable.Path)); r.Add(new RSLCondition("executable", RSLEqual, l)); } /// \mapattr arguments <- ExecutableType::Argument if (!j.Application.Executable.Argument.empty()) { RSLList *l = new RSLList; for (std::list::const_iterator it = j.Application.Executable.Argument.begin(); it != j.Application.Executable.Argument.end(); it++) l->Add(new RSLLiteral(*it)); r.Add(new RSLCondition("arguments", RSLEqual, l)); } /// \mapattr stdin <- Input if (!j.Application.Input.empty()) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Application.Input)); r.Add(new RSLCondition("stdin", RSLEqual, l)); } /// \mapattr stdout <- Output if (!j.Application.Output.empty()) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Application.Output)); r.Add(new RSLCondition("stdout", RSLEqual, l)); } /// \mapattr stderr <- Error if (!j.Application.Error.empty()) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Application.Error)); r.Add(new RSLCondition("stderr", RSLEqual, l)); } /// \mapattr cputime <- TotalCPUTime if (j.Resources.TotalCPUTime.range > -1) { RSLList *l = new RSLList; if(dialect == "GRIDMANAGER") { // Seconds l->Add(new RSLLiteral(tostring(j.Resources.TotalCPUTime.range))); } else { // Free format l->Add(new RSLLiteral((std::string)Period(j.Resources.TotalCPUTime.range))); } r.Add(new RSLCondition("cputime", RSLEqual, l)); } /// \mapattr walltime <- IndividualWallTime if (j.Resources.TotalWallTime.range > -1) { RSLList *l = new RSLList; if(dialect == "GRIDMANAGER") { // Seconds l->Add(new RSLLiteral(tostring(j.Resources.TotalWallTime.range))); } else { // Free format l->Add(new RSLLiteral((std::string)Period(j.Resources.TotalWallTime.range))); } r.Add(new RSLCondition("walltime", RSLEqual, l)); } /// \mapattr memory <- IndividualPhysicalMemory if (j.Resources.IndividualPhysicalMemory > -1) { RSLList *l = new RSLList; l->Add(new RSLLiteral(tostring(j.Resources.IndividualPhysicalMemory))); r.Add(new RSLCondition("memory", RSLEqual, l)); } /// \mapattr environment <- Environment if (!j.Application.Environment.empty()) { RSLList *l = new RSLList; for (std::list< std::pair >::const_iterator it = j.Application.Environment.begin(); it != j.Application.Environment.end(); it++) { RSLList *s = new RSLList; s->Add(new RSLLiteral(it->first)); s->Add(new RSLLiteral(it->second)); l->Add(new RSLSequence(s)); } r.Add(new RSLCondition("environment", RSLEqual, l)); } // TODO Document mapping. if(dialect == "GRIDMANAGER") { RSLList *l = NULL; // inputfiles // name url // name size.checksum for (std::list::const_iterator it = j.DataStaging.InputFiles.begin(); it != j.DataStaging.InputFiles.end(); it++) { RSLList *s = new RSLList; s->Add(new RSLLiteral(it->Name)); if (it->Sources.empty() || (it->Sources.front().Protocol() == "file")) { // Local file std::string fsizechecksum; if(it->FileSize != -1) fsizechecksum = tostring(it->FileSize); if (!it->Checksum.empty()) fsizechecksum = "." + fsizechecksum+it->Checksum; s->Add(new RSLLiteral(fsizechecksum)); } else { s->Add(new RSLLiteral(it->Sources.front().fullstr())); } if (!l) l = new RSLList; l->Add(new RSLSequence(s)); } if (l) r.Add(new RSLCondition("inputfiles", RSLEqual, l)); l = NULL; // Executables /// \mapattr executables <- InputFileType::IsExecutable for (std::list::const_iterator it = j.DataStaging.InputFiles.begin(); it != j.DataStaging.InputFiles.end(); it++) { if (it->IsExecutable) { if (!l) l = new RSLList; l->Add(new RSLLiteral(it->Name)); } } if (l) r.Add(new RSLCondition("executables", RSLEqual, l)); l = NULL; // outputfiles // name url // name void for (std::list::const_iterator it = j.DataStaging.OutputFiles.begin(); it != j.DataStaging.OutputFiles.end(); it++) { if (it->Targets.empty() || (it->Targets.front().Protocol() == "file")) { // file to keep // normally must be no file:// here - just a protection RSLList *s = new RSLList; s->Add(new RSLLiteral(it->Name)); s->Add(new RSLLiteral("")); if (!l) l = new RSLList; l->Add(new RSLSequence(s)); } else { // file to stage RSLList *s = new RSLList; s->Add(new RSLLiteral(it->Name)); s->Add(new RSLLiteral(it->Targets.front().fullstr())); if (!l) l = new RSLList; l->Add(new RSLSequence(s)); } } if (l) r.Add(new RSLCondition("outputfiles", RSLEqual, l)); l = NULL; } else { // dialect != "GRIDMANAGER" if (!j.DataStaging.InputFiles.empty() || !j.Application.Executable.Path.empty() || !j.Application.Input.empty()) { RSLList *l = NULL; for (std::list::const_iterator it = j.DataStaging.InputFiles.begin(); it != j.DataStaging.InputFiles.end(); it++) { RSLList *s = new RSLList; s->Add(new RSLLiteral(it->Name)); if (it->Sources.empty()) { s->Add(new RSLLiteral("")); } else if (it->Sources.front().Protocol() == "file" && it->FileSize != -1) { s->Add(new RSLLiteral(it->Sources.front().Path())); } else { s->Add(new RSLLiteral(it->Sources.front().fullstr())); } if (!l) { l = new RSLList; } l->Add(new RSLSequence(s)); } if (l) { r.Add(new RSLCondition("inputfiles", RSLEqual, l)); } // Executables l = NULL; for (std::list::const_iterator it = j.DataStaging.InputFiles.begin(); it != j.DataStaging.InputFiles.end(); it++) if (it->IsExecutable) { if (!l) { l = new RSLList; } l->Add(new RSLLiteral(it->Name)); } if (l) { r.Add(new RSLCondition("executables", RSLEqual, l)); } } if (!j.DataStaging.OutputFiles.empty() || !j.Application.Output.empty() || !j.Application.Error.empty()) { RSLList *l = NULL; for (std::list::const_iterator it = j.DataStaging.OutputFiles.begin(); it != j.DataStaging.OutputFiles.end(); it++) { if (!it->Targets.empty()) { RSLList *s = new RSLList; s->Add(new RSLLiteral(it->Name)); if (!it->Targets.front() || it->Targets.front().Protocol() == "file") s->Add(new RSLLiteral("")); else { URL url(it->Targets.front()); s->Add(new RSLLiteral(url.fullstr())); } if (!l) { l = new RSLList; } l->Add(new RSLSequence(s)); } else { RSLList *s = new RSLList; s->Add(new RSLLiteral(it->Name)); s->Add(new RSLLiteral("")); if (!l) l = new RSLList; l->Add(new RSLSequence(s)); } } if (l) { r.Add(new RSLCondition("outputfiles", RSLEqual, l)); } } } // (dialect == "GRIDMANAGER") /// \mapattr queue <- QueueName if (!j.Resources.QueueName.empty()) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Resources.QueueName)); r.Add(new RSLCondition("queue", RSLEqual, l)); } /// \mapattr rerun <- Rerun if (j.Application.Rerun != -1) { RSLList *l = new RSLList; l->Add(new RSLLiteral(tostring(j.Application.Rerun))); r.Add(new RSLCondition("rerun", RSLEqual, l)); } /// \mapattr priority <- Priority if (j.Application.Priority != -1) { RSLList *l = new RSLList; l->Add(new RSLLiteral(tostring(j.Application.Priority))); r.Add(new RSLCondition("priority", RSLEqual, l)); } /// TODO: dialect/units /// \mapattr lifetime <- SessionLifeTime if (j.Resources.SessionLifeTime != -1) { RSLList *l = new RSLList; if(dialect == "GRIDMANAGER") { // Seconds l->Add(new RSLLiteral(tostring(j.Resources.SessionLifeTime.GetPeriod()))); } else { // Free format l->Add(new RSLLiteral((std::string)j.Resources.SessionLifeTime)); } r.Add(new RSLCondition("lifetime", RSLEqual, l)); } /// \mapattr disk <- DiskSpace if (j.Resources.DiskSpaceRequirement.DiskSpace > -1) { RSLList *l = new RSLList; l->Add(new RSLLiteral(tostring(j.Resources.DiskSpaceRequirement.DiskSpace))); r.Add(new RSLCondition("disk", RSLEqual, l)); } /// \mapattr runtimeenvironment <- RunTimeEnvironment if (!j.Resources.RunTimeEnvironment.empty()) { std::list::const_iterator itSW = j.Resources.RunTimeEnvironment.getSoftwareList().begin(); std::list::const_iterator itCO = j.Resources.RunTimeEnvironment.getComparisonOperatorList().begin(); for (; itSW != j.Resources.RunTimeEnvironment.getSoftwareList().end(); itSW++, itCO++) { RSLList *l = new RSLList; l->Add(new RSLLiteral(*itSW)); r.Add(new RSLCondition("runtimeenvironment", convertOperator(*itCO), l)); } } /// \mapattr middleware <- CEType if (!j.Resources.CEType.empty()) { std::list::const_iterator itSW = j.Resources.CEType.getSoftwareList().begin(); std::list::const_iterator itCO = j.Resources.CEType.getComparisonOperatorList().begin(); for (; itSW != j.Resources.CEType.getSoftwareList().end(); itSW++, itCO++) { RSLList *l = new RSLList; l->Add(new RSLLiteral(*itSW)); r.Add(new RSLCondition("middleware", convertOperator(*itCO), l)); } } /// \mapattr opsys <- OperatingSystem if (!j.Resources.OperatingSystem.empty()) { std::list::const_iterator itSW = j.Resources.OperatingSystem.getSoftwareList().begin(); std::list::const_iterator itCO = j.Resources.OperatingSystem.getComparisonOperatorList().begin(); for (; itSW != j.Resources.OperatingSystem.getSoftwareList().end(); itSW++, itCO++) { RSLList *l = new RSLList; l->Add(new RSLLiteral((std::string)*itSW)); r.Add(new RSLCondition("opsys", convertOperator(*itCO), l)); } } /// \mapattr architecture <- Platform if (!j.Resources.Platform.empty()) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Resources.Platform)); r.Add(new RSLCondition("architecture", RSLEqual, l)); } /// \mapattr count <- NumberOfSlots if (j.Resources.SlotRequirement.NumberOfSlots > -1) { RSLList *l = new RSLList; l->Add(new RSLLiteral(tostring(j.Resources.SlotRequirement.NumberOfSlots))); r.Add(new RSLCondition("count", RSLEqual, l)); } /// \mapattr countpernode <- SlotsPerHost if (j.Resources.SlotRequirement.SlotsPerHost > -1) { if (j.Resources.SlotRequirement.NumberOfSlots <= -1) { logger.msg(ERROR, "Cannot output XRSL representation: The Resources.SlotRequirement.NumberOfSlots attribute must be specified when the Resources.SlotRequirement.SlotsPerHost attribute is specified."); return false; } RSLList *l = new RSLList; l->Add(new RSLLiteral(tostring(j.Resources.SlotRequirement.SlotsPerHost))); r.Add(new RSLCondition("countpernode", RSLEqual, l)); } /// \mapattr exclusiveexecution <- ExclusiveExecution if (j.Resources.SlotRequirement.ExclusiveExecution != SlotRequirementType::EE_DEFAULT) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Resources.SlotRequirement.ExclusiveExecution == SlotRequirementType::EE_TRUE ? "yes" : "no")); r.Add(new RSLCondition("exclusiveexecution", RSLEqual, l)); } /// \mapattr starttime <- ProcessingStartTime if (j.Application.ProcessingStartTime != -1) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Application.ProcessingStartTime.str(MDSTime))); r.Add(new RSLCondition("starttime", RSLEqual, l)); } /// \mapattr gmlog <- LogDir if (!j.Application.LogDir.empty()) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Application.LogDir)); r.Add(new RSLCondition("gmlog", RSLEqual, l)); } /// \mapattr jobname <- JobName if (!j.Identification.JobName.empty()) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Identification.JobName)); r.Add(new RSLCondition("jobname", RSLEqual, l)); } /// \mapattr acl <- AccessControl if (j.Application.AccessControl) { RSLList *l = new RSLList; std::string acl; j.Application.AccessControl.GetXML(acl, false); l->Add(new RSLLiteral(acl)); r.Add(new RSLCondition("acl", RSLEqual, l)); } /// TODO \mapattr notify <- ApplicationType::Notification if (!j.Application.Notification.empty()) { RSLList *l = new RSLList; for (std::list::const_iterator it = j.Application.Notification.begin(); it != j.Application.Notification.end(); it++) { // Suboptimal, group emails later std::string states; for (std::list::const_iterator s = it->States.begin(); s != it->States.end(); s++) { char state = StateToShortcut(*s); if (state != ' ') states+=state; } l->Add(new RSLLiteral(states + " " + it->Email)); } r.Add(new RSLCondition("notify", RSLEqual, l)); } /// TODO \mapattr jobreport <- RemoteLogging if (!j.Application.RemoteLogging.empty()) { // Pick first SGAS remote logging service. for (std::list::const_iterator it = j.Application.RemoteLogging.begin(); it != j.Application.RemoteLogging.end(); ++it) { if (it->ServiceType == "SGAS") { // Ignoring the optional attribute. RSLList *l = new RSLList; l->Add(new RSLLiteral(it->Location.str())); r.Add(new RSLCondition("jobreport", RSLEqual, l)); break; } } } /// \mapattr credentialserver <- CredentialService if (!j.Application.CredentialService.empty()) { RSLList *l = new RSLList; l->Add(new RSLLiteral(j.Application.CredentialService.front().fullstr())); r.Add(new RSLCondition("credentialserver", RSLEqual, l)); } /// \mapattr dryrun <- DryRun if (j.Application.DryRun) { RSLList *l = new RSLList; l->Add(new RSLLiteral("yes")); r.Add(new RSLCondition("dryrun", RSLEqual, l)); } /// TODO \mapnote for (std::map::const_iterator it = j.OtherAttributes.begin(); it != j.OtherAttributes.end(); it++) { std::list keys; tokenize(it->first, keys, ";"); if (keys.size() != 2 || keys.front() != "nordugrid:xrsl") { continue; } if (keys.back() == "action" && dialect != "GRIDMANAGER") { // Dont put the action attribute into non GRIDMANAGER xRSL continue; } RSLList *l = new RSLList; l->Add(new RSLLiteral(it->second)); r.Add(new RSLCondition(keys.back(), RSLEqual, l)); } std::stringstream ss; ss << r; product = ss.str(); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/XRSLParser.h0000644000000000000000000000012412675602216025472 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.280699 30 ctime=1513200660.395752326 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/XRSLParser.h0000644000175000002070000000434212675602216025542 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_XRSLPARSER_H__ #define __ARC_XRSLPARSER_H__ #include #include #include /** XRSLParser * The XRSLParser class, derived from the JobDescriptionParserPlugin class, is a * job description parser for the Extended Resource Specification Language * (XRSL) specified in the NORDUGRID-MANUAL-4 document. */ namespace Arc { class RSL; class RSLCondition; class XRSLParser : public JobDescriptionParserPlugin { public: XRSLParser(PluginArgument* parg); virtual ~XRSLParser() {}; virtual JobDescriptionParserPluginResult Parse(const std::string& source, std::list& jobdescs, const std::string& language = "", const std::string& dialect = "") const; virtual JobDescriptionParserPluginResult Assemble(const JobDescription& job, std::string& product, const std::string& language, const std::string& dialect = "") const; static Plugin* Instance(PluginArgument *arg); private: void Parse(const RSL *r, JobDescription& job, const std::string& dialect, JobDescriptionParserPluginResult& result) const; static void SingleValue(const RSLCondition *c, std::string& value, JobDescriptionParserPluginResult& result); static void ListValue(const RSLCondition *c, std::list& value, JobDescriptionParserPluginResult& result); static void SeqListValue(const RSLCondition *c, std::list >& value, JobDescriptionParserPluginResult& result, int seqlength = -1); static void ParseExecutablesAttribute(JobDescription& j, JobDescriptionParserPluginResult& result); static void ParseFTPThreadsAttribute(JobDescription& j, JobDescriptionParserPluginResult& result); static void ParseCacheAttribute(JobDescription& j, JobDescriptionParserPluginResult& result); static void ParseJoinAttribute(JobDescription& j, JobDescriptionParserPluginResult& result); static void ParseGridTimeAttribute(JobDescription& j, JobDescriptionParserPluginResult& result); static void ParseCountPerNodeAttribute(JobDescription& j, JobDescriptionParserPluginResult& result); }; } // namespace Arc #endif // __ARC_XRSLPARSER_H__ nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612675602216025412 xustar000000000000000027 mtime=1459029134.924374 29 atime=1513200594.27394363 30 ctime=1513200660.387752228 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/Makefile.am0000644000175000002070000000153412675602216025460 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccJobDescriptionParser.la libaccJobDescriptionParser_la_SOURCES = \ ARCJSDLParser.cpp ARCJSDLParser.h \ ADLParser.cpp ADLParser.h \ JDLParser.cpp JDLParser.h \ XRSLParser.cpp XRSLParser.h \ RSLParser.cpp RSLParser.h \ XMLNodeRecover.cpp XMLNodeRecover.h \ DescriptorsJobDescriptionParser.cpp libaccJobDescriptionParser_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccJobDescriptionParser_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccJobDescriptionParser_la_LDFLAGS = -no-undefined -avoid-version -module DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315722025410 xustar000000000000000030 mtime=1513200594.330944327 30 atime=1513200648.892611638 29 ctime=1513200660.38875224 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/Makefile.in0000644000175000002070000011672613214315722025474 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/JobDescriptionParser DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccJobDescriptionParser_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libaccJobDescriptionParser_la_OBJECTS = \ libaccJobDescriptionParser_la-ARCJSDLParser.lo \ libaccJobDescriptionParser_la-ADLParser.lo \ libaccJobDescriptionParser_la-JDLParser.lo \ libaccJobDescriptionParser_la-XRSLParser.lo \ libaccJobDescriptionParser_la-RSLParser.lo \ libaccJobDescriptionParser_la-XMLNodeRecover.lo \ libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.lo libaccJobDescriptionParser_la_OBJECTS = \ $(am_libaccJobDescriptionParser_la_OBJECTS) libaccJobDescriptionParser_la_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) \ $(libaccJobDescriptionParser_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccJobDescriptionParser_la_SOURCES) DIST_SOURCES = $(libaccJobDescriptionParser_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccJobDescriptionParser.la libaccJobDescriptionParser_la_SOURCES = \ ARCJSDLParser.cpp ARCJSDLParser.h \ ADLParser.cpp ADLParser.h \ JDLParser.cpp JDLParser.h \ XRSLParser.cpp XRSLParser.h \ RSLParser.cpp RSLParser.h \ XMLNodeRecover.cpp XMLNodeRecover.h \ DescriptorsJobDescriptionParser.cpp libaccJobDescriptionParser_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccJobDescriptionParser_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccJobDescriptionParser_la_LDFLAGS = -no-undefined -avoid-version -module DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/JobDescriptionParser/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/JobDescriptionParser/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccJobDescriptionParser.la: $(libaccJobDescriptionParser_la_OBJECTS) $(libaccJobDescriptionParser_la_DEPENDENCIES) $(libaccJobDescriptionParser_la_LINK) -rpath $(pkglibdir) $(libaccJobDescriptionParser_la_OBJECTS) $(libaccJobDescriptionParser_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccJobDescriptionParser_la-ADLParser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccJobDescriptionParser_la-ARCJSDLParser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccJobDescriptionParser_la-JDLParser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccJobDescriptionParser_la-RSLParser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccJobDescriptionParser_la-XMLNodeRecover.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccJobDescriptionParser_la-XRSLParser.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccJobDescriptionParser_la-ARCJSDLParser.lo: ARCJSDLParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -MT libaccJobDescriptionParser_la-ARCJSDLParser.lo -MD -MP -MF $(DEPDIR)/libaccJobDescriptionParser_la-ARCJSDLParser.Tpo -c -o libaccJobDescriptionParser_la-ARCJSDLParser.lo `test -f 'ARCJSDLParser.cpp' || echo '$(srcdir)/'`ARCJSDLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccJobDescriptionParser_la-ARCJSDLParser.Tpo $(DEPDIR)/libaccJobDescriptionParser_la-ARCJSDLParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ARCJSDLParser.cpp' object='libaccJobDescriptionParser_la-ARCJSDLParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccJobDescriptionParser_la-ARCJSDLParser.lo `test -f 'ARCJSDLParser.cpp' || echo '$(srcdir)/'`ARCJSDLParser.cpp libaccJobDescriptionParser_la-ADLParser.lo: ADLParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -MT libaccJobDescriptionParser_la-ADLParser.lo -MD -MP -MF $(DEPDIR)/libaccJobDescriptionParser_la-ADLParser.Tpo -c -o libaccJobDescriptionParser_la-ADLParser.lo `test -f 'ADLParser.cpp' || echo '$(srcdir)/'`ADLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccJobDescriptionParser_la-ADLParser.Tpo $(DEPDIR)/libaccJobDescriptionParser_la-ADLParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ADLParser.cpp' object='libaccJobDescriptionParser_la-ADLParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccJobDescriptionParser_la-ADLParser.lo `test -f 'ADLParser.cpp' || echo '$(srcdir)/'`ADLParser.cpp libaccJobDescriptionParser_la-JDLParser.lo: JDLParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -MT libaccJobDescriptionParser_la-JDLParser.lo -MD -MP -MF $(DEPDIR)/libaccJobDescriptionParser_la-JDLParser.Tpo -c -o libaccJobDescriptionParser_la-JDLParser.lo `test -f 'JDLParser.cpp' || echo '$(srcdir)/'`JDLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccJobDescriptionParser_la-JDLParser.Tpo $(DEPDIR)/libaccJobDescriptionParser_la-JDLParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JDLParser.cpp' object='libaccJobDescriptionParser_la-JDLParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccJobDescriptionParser_la-JDLParser.lo `test -f 'JDLParser.cpp' || echo '$(srcdir)/'`JDLParser.cpp libaccJobDescriptionParser_la-XRSLParser.lo: XRSLParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -MT libaccJobDescriptionParser_la-XRSLParser.lo -MD -MP -MF $(DEPDIR)/libaccJobDescriptionParser_la-XRSLParser.Tpo -c -o libaccJobDescriptionParser_la-XRSLParser.lo `test -f 'XRSLParser.cpp' || echo '$(srcdir)/'`XRSLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccJobDescriptionParser_la-XRSLParser.Tpo $(DEPDIR)/libaccJobDescriptionParser_la-XRSLParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XRSLParser.cpp' object='libaccJobDescriptionParser_la-XRSLParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccJobDescriptionParser_la-XRSLParser.lo `test -f 'XRSLParser.cpp' || echo '$(srcdir)/'`XRSLParser.cpp libaccJobDescriptionParser_la-RSLParser.lo: RSLParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -MT libaccJobDescriptionParser_la-RSLParser.lo -MD -MP -MF $(DEPDIR)/libaccJobDescriptionParser_la-RSLParser.Tpo -c -o libaccJobDescriptionParser_la-RSLParser.lo `test -f 'RSLParser.cpp' || echo '$(srcdir)/'`RSLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccJobDescriptionParser_la-RSLParser.Tpo $(DEPDIR)/libaccJobDescriptionParser_la-RSLParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RSLParser.cpp' object='libaccJobDescriptionParser_la-RSLParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccJobDescriptionParser_la-RSLParser.lo `test -f 'RSLParser.cpp' || echo '$(srcdir)/'`RSLParser.cpp libaccJobDescriptionParser_la-XMLNodeRecover.lo: XMLNodeRecover.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -MT libaccJobDescriptionParser_la-XMLNodeRecover.lo -MD -MP -MF $(DEPDIR)/libaccJobDescriptionParser_la-XMLNodeRecover.Tpo -c -o libaccJobDescriptionParser_la-XMLNodeRecover.lo `test -f 'XMLNodeRecover.cpp' || echo '$(srcdir)/'`XMLNodeRecover.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccJobDescriptionParser_la-XMLNodeRecover.Tpo $(DEPDIR)/libaccJobDescriptionParser_la-XMLNodeRecover.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XMLNodeRecover.cpp' object='libaccJobDescriptionParser_la-XMLNodeRecover.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccJobDescriptionParser_la-XMLNodeRecover.lo `test -f 'XMLNodeRecover.cpp' || echo '$(srcdir)/'`XMLNodeRecover.cpp libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.lo: DescriptorsJobDescriptionParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -MT libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.lo -MD -MP -MF $(DEPDIR)/libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.Tpo -c -o libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.lo `test -f 'DescriptorsJobDescriptionParser.cpp' || echo '$(srcdir)/'`DescriptorsJobDescriptionParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.Tpo $(DEPDIR)/libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DescriptorsJobDescriptionParser.cpp' object='libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccJobDescriptionParser_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccJobDescriptionParser_la-DescriptorsJobDescriptionParser.lo `test -f 'DescriptorsJobDescriptionParser.cpp' || echo '$(srcdir)/'`DescriptorsJobDescriptionParser.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/XMLNodeRecover.cpp0000644000000000000000000000012413103157454026650 xustar000000000000000027 mtime=1494015788.187602 27 atime=1513200574.317699 30 ctime=1513200660.398752363 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/XMLNodeRecover.cpp0000644000175000002070000000433413103157454026721 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include "XMLNodeRecover.h" namespace Arc { XMLNodeRecover::XMLNodeRecover(const std::string& xml) : XMLNode() { xmlSetStructuredErrorFunc(this, &structured_error_handler); xmlDocPtr doc = xmlRecoverMemory(xml.c_str(), xml.length()); xmlSetStructuredErrorFunc(this, NULL); if (!doc) { return; } xmlNodePtr p = doc->children; for (; p; p = p->next) { if (p->type == XML_ELEMENT_NODE) break; } if (!p) { xmlFreeDoc(doc); return; } node_ = p; is_owner_ = true; } XMLNodeRecover::~XMLNodeRecover() { for (std::list::const_iterator it = errors.begin(); it != errors.end(); ++it) { if(*it) { xmlResetError(*it); delete *it; } } } void XMLNodeRecover::print_error (const xmlError& error) { std::cerr << "Domain: " << error.domain << std::endl; std::cerr << "Code: " << error.code << std::endl; std::cerr << "Message: " << error.message << std::endl; std::cerr << "Level: " << error.level << std::endl; std::cerr << "Filename: " << error.file << std::endl; std::cerr << "Line: " << error.line << std::endl; if (error.str1) std::cerr << "Additional info: " << error.str1 << std::endl; if (error.str2) std::cerr << "Additional info: " << error.str2 << std::endl; if (error.str3) std::cerr << "Additional info: " << error.str3 << std::endl; std::cerr << "Extra number: " << error.int1 << std::endl; std::cerr << "Column: " << error.int2 << std::endl; std::cerr << "Context is " << (error.ctxt == NULL ? "NULL" : "not NULL") << std::endl; std::cerr << "Node is " << (error.node == NULL ? "NULL" : "not NULL") << std::endl; } void XMLNodeRecover::structured_error_handler(void *userData, xmlErrorPtr error) { if (error == NULL) { return; } XMLNodeRecover *xml = static_cast(userData); if (xml == NULL) { return; } xmlErrorPtr new_error = new xmlError(); std::memset(new_error, 0, sizeof(xmlError)); xmlCopyError(error, new_error); xml->errors.push_back(new_error); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/ADLParser.cpp0000644000000000000000000000012413165644475025645 xustar000000000000000027 mtime=1507281213.527874 27 atime=1513200574.280699 30 ctime=1513200660.390752265 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/ADLParser.cpp0000644000175000002070000014466113165644475025726 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "XMLNodeRecover.h" #include "ADLParser.h" namespace Arc { static int bytes_to_mb(unsigned long long int value) { if(value == 0) return 0; return (int)((value - 1) / (1024*1024) + 1); } static unsigned long long int mb_to_bytes(int value) { return (unsigned long long int)(1024ull * 1024ull * (unsigned int)value); } /// \mapname ADL EMI ADL /// The libarccompute library has almost full support for EMI Activity /// Description Language (ADL) v1.16, it is described in the EMI Execution /// Service (ES) specification /// (EMI-ES-Specification_v1.16.odt).
    /// Currently the ADL parser is not able to parse:
    /// TODO ADLParser::ADLParser(PluginArgument* parg) : JobDescriptionParserPlugin(parg) { supportedLanguages.push_back("emies:adl"); } ADLParser::~ADLParser() {} Plugin* ADLParser::Instance(PluginArgument *arg) { return new ADLParser(arg); } // EMI state ARC state // ACCEPTED ACCEPTED // PREPROCESSING PREPARING // SUBMIT // PROCESSING INLRMS // PROCESSING-ACCEPTING // PROCESSING-QUEUED // PROCESSING-RUNNING // CANCELING // POSTPROCESSING FINISHING // TERMINAL FINISHED // DELETED static std::string ADLStateToInternal(const std::string& s, bool optional, Logger& logger) { if(s == "ACCEPTED") { return "ACCEPTED"; } else if(s == "PREPROCESSING") { return "PREPARING"; } else if(s == "PROCESSING") { return "INLRMS"; } else if(s == "PROCESSING-ACCEPTING") { } else if(s == "PROCESSING-QUEUED") { } else if(s == "PROCESSING-RUNNING") { } else if(s == "POSTPROCESSING") { return "FINISHING"; } else if(s == "TERMINAL") { return "FINISHED"; }; logger.msg(optional?WARNING:ERROR, "[ADLParser] Unsupported EMI ES state %s.",s); return ""; } static std::string InternalStateToADL(const std::string& s, bool optional, Logger& logger) { if(s == "ACCEPTED") { } else if(s == "") { return "ACCEPTED"; } else if(s == "PREPARING") { return "PREPROCESSING"; } else if(s == "SUBMIT") { } else if(s == "INLRMS") { return "PROCESSING"; } else if(s == "CANCELING") { } else if(s == "FINISHING") { return "POSTPROCESSING"; } else if(s == "FINISHED") { return "TERMINAL"; } else if(s == "DELETED") { } logger.msg(optional?WARNING:ERROR, "[ADLParser] Unsupported internal state %s.",s); return ""; } static bool ParseOptional(XMLNode el, bool& val, Logger& logger) { XMLNode optional = el.Attribute("optional"); if(!optional) return true; if (strtobool((std::string)optional, val)) { return true; } logger.msg(ERROR, "[ADLParser] Optional for %s elements are not supported yet.", el.Name()); return false; } static bool ParseFlag(XMLNode el, bool& val, Logger& logger) { if(!el) return true; if (strtobool((std::string)el, val)) { return true; } logger.msg(ERROR, "[ADLParser] %s element must be boolean.", el.Name()); return false; } static bool ParseFailureCode(XMLNode executable, std::pair& sec, const std::string& dialect, Logger& logger) { XMLNode failcode = executable["adl:FailIfExitCodeNotEqualTo"]; sec.first = (bool)failcode; if (!sec.first) { return true; } if(!stringto((std::string)failcode, sec.second)) { logger.msg(ERROR, "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number.",executable.Name()); return false; } return true; } static bool ParseExecutable(XMLNode executable, ExecutableType& exec, const std::string& dialect, Logger& logger) { exec.Path = (std::string)executable["adl:Path"]; for(XMLNode argument = executable["adl:Argument"]; (bool)argument;++argument) { exec.Argument.push_back((std::string)argument); } if(!ParseFailureCode(executable, exec.SuccessExitCode, dialect, logger)) { return false; } return true; } JobDescriptionParserPluginResult ADLParser::Parse(const std::string& source, std::list& jobdescs, const std::string& language, const std::string& dialect) const { JobDescriptionParserPluginResult result(JobDescriptionParserPluginResult::WrongLanguage); if (language != "" && !IsLanguageSupported(language)) { return result; } XMLNodeRecover node(source); if (node.HasErrors()) { if (node.Name() != "ActivityDescription") { return result; } else { result.SetFailure(); for (std::list::const_iterator itErr = node.GetErrors().begin(); itErr != node.GetErrors().end(); ++itErr) { JobDescriptionParsingError err; err.message = (**itErr).message; if (err.message[err.message.length()-1] == '\n') { err.message = err.message.substr(0, err.message.length()-1); } if ((**itErr).line > 0 && (**itErr).int2 > 0) { err.line_pos = std::pair((**itErr).line, (**itErr).int2); } result.AddError(err); } return result; } } /* ActivityDescription ActivityIdentification minOccurs=0 Name minOccurs=0 Description minOccurs=0 Type minOccurs=0 "collectionelement" "parallelelement" "single" "workflownode" Annotation minOccurs=0 maxOccurs=unbounded Application minOccurs=0 Executable minOccurs=0 Path Argument minOccurs=0 maxOccurs=unbounded FailIfExitCodeNotEqualTo minOccurs=0 Input minOccurs=0 Output minOccurs=0 Error minOccurs=0 Environment minOccurs=0 maxOccurs=unbounded Name Value PreExecutable minOccurs=0 maxOccurs=unbounded Path Argument minOccurs=0 maxOccurs=unbounded FailIfExitCodeNotEqualTo minOccurs=0 PostExecutable minOccurs=0 maxOccurs=unbounded Path Argument minOccurs=0 maxOccurs=unbounded FailIfExitCodeNotEqualTo minOccurs=0 RemoteLogging minOccurs=0 maxOccurs=unbounded ServiceType URL minOccurs=0 attr:optional=false ExpirationTime(dateTime) minOccurs=0 attr:optional=false WipeTime(dateTime) minOccurs=0 attr:optional=false Notification minOccurs=0 maxOccurs=unbounded Protocol "email" Recipient maxOccurs=unbounded OnState(esmain:PrimaryActivityStatus) minOccurs=0 maxOccurs=unbounded attr:optional Resources minOccurs=0 OperatingSystem minOccurs=0 Name aix centos debian fedoracore gentoo leopard linux-rocks mandrake redhatenterpriseas scientificlinux scientificlinuxcern slackware suse ubuntu windowsvista windowsxp Family minOccurs=0 "linux" "macosx" "solaris" "windows" Version minOccurs=0 Platform minOccurs=0 "amd64" "i386" "itanium" "powerpc" "sparc" RuntimeEnvironment minOccurs=0 maxOccurs=unbounded Name !!! aix centos debian fedoracore gentoo leopard linux-rocks mandrake redhatenterpriseas scientificlinux scientificlinuxcern slackware suse ubuntu windowsvista windowsxp Version minOccurs=0 Option minOccurs=0 maxOccurs=unbounded attr:optional=false ParallelEnvironment minOccurs=0 Type minOccurs=0 "MPI" "GridMPI" "IntelMPI" "LAM-MPI" "MPICH1" "MPICH2" "MPICH-GM" "MPICH-MX" "MVAPICH" "MVAPICH2" "OpenMPI" "POE" "PVM" Version minOccurs=0 ProcessesPerSlot minOccurs=0 ThreadsPerProcess minOccurs=0 Option minOccurs=0 maxOccurs=unbounded Name Value Coprocessor minOccurs=0 "CUDA" "FPGA" attr:optional NetworkInfo minOccurs=0 "100megabitethernet" "gigabitethernet" "10gigabitethernet" "infiniband" "myrinet" attr:optional NodeAccess minOccurs=0 "inbound" "outbound" "inoutbound" IndividualPhysicalMemory minOccurs=0 IndividualVirtualMemory minOccurs=0 DiskSpaceRequirement minOccurs=0 RemoteSessionAccess minOccurs=0 Benchmark minOccurs=0 BenchmarkType bogomips cfp2006 cint2006 linpack specfp2000 specint2000 BenchmarkValue attr:optional=false SlotRequirement minOccurs=0 NumberOfSlots SlotsPerHost minOccurs=0 attr:useNumberOfSlots=false ExclusiveExecution minOccurs=0 QueueName minOccurs=0 IndividualCPUTime minOccurs=0 TotalCPUTime minOccurs=0 WallTime minOccurs=0 DataStaging minOccurs=0 ClientDataPush InputFile minOccurs=0 maxOccurs=unbounded Name Source minOccurs=0 maxOccurs=unbounded URI DelegationID minOccurs=0 Option minOccurs=0 maxOccurs=unbounded Name Value IsExecutable minOccurs=0 OutputFile minOccurs=0 maxOccurs=unbounded Name Target minOccurs=0 maxOccurs=unbounded URI DelegationID minOccurs=0 Option minOccurs=0 maxOccurs=unbounded Name Value Mandatory minOccurs=0 CreationFlag minOccurs=0 "overwrite" "append" "dontOverwrite" UseIfFailure minOccurs=0 UseIfCancel minOccurs=0 UseIfSuccess minOccurs=0 */ NS ns; ns["adl"] = "http://www.eu-emi.eu/es/2010/12/adl"; ns["nordugrid-adl"] = "http://www.nordugrid.org/es/2011/12/nordugrid-adl"; node.Namespaces(ns); // ActivityDescription if(!MatchXMLName(node,"adl:ActivityDescription")) { logger.msg(VERBOSE, "[ADLParser] Root element is not ActivityDescription "); return false; } JobDescription parsed_jobdescription; XMLNode identification = node["adl:ActivityIdentification"]; XMLNode application = node["adl:Application"]; XMLNode resources = node["adl:Resources"]; XMLNode staging = node["adl:DataStaging"]; if((bool)identification) { /// \mapattr ActivityIdentification.Name -> JobName parsed_jobdescription.Identification.JobName = (std::string)identification["adl:Name"]; /// \mapattr ActivityIdentification.Description -> Description parsed_jobdescription.Identification.Description = (std::string)identification["adl:Description"]; /// \mapattr ActivityIdentification.Type -> JobIdentificationType::Type parsed_jobdescription.Identification.Type = (std::string)identification["adl:Type"]; /// \mapattr ActivityIdentification.Annotation -> Annotation for(XMLNode annotation = identification["adl:Annotation"]; (bool)annotation;++annotation) { parsed_jobdescription.Identification.Annotation.push_back((std::string)annotation); } // ARC extension: ActivityOldID // TODO: Add note about this being a ARC extension. /// \mapattr ActivityIdentification.ActivityOldID -> ActivityOldID for(XMLNode activityoldid = identification["nordugrid-adl:ActivityOldID"]; (bool)activityoldid;++activityoldid) { parsed_jobdescription.Identification.ActivityOldID.push_back((std::string)activityoldid); } } if((bool)application) { /// \mapattr Application.Executable.Path -> ExecutableType::Path /// \mapattr Application.Executable.Argument -> ExecutableType::Argument XMLNode executable = application["adl:Executable"]; if(executable && !ParseExecutable(executable, parsed_jobdescription.Application.Executable, dialect, logger)) { return false; } // ARC extensions: Rerun, Priority // TODO: Add note about this being a ARC extension. /// \mapattr Application.Rerun -> Rerun if((bool)application["nordugrid-adl:Rerun"]) parsed_jobdescription.Application.Rerun = stringtoi((std::string)application["nordugrid-adl:Rerun"]); /// \mapattr Application.Priority -> Priority if((bool)application["nordugrid-adl:Priority"]) { parsed_jobdescription.Application.Priority = stringtoi((std::string)application["nordugrid-adl:Priority"]); if (parsed_jobdescription.Application.Priority > 100) { logger.msg(VERBOSE, "[ADLParser] priority is too large - using max value 100"); parsed_jobdescription.Application.Priority = 100; } } /// \mapattr Application.Input -> Input parsed_jobdescription.Application.Input = (std::string)application["adl:Input"]; /// \mapattr Application.Output -> Output parsed_jobdescription.Application.Output = (std::string)application["adl:Output"]; /// \mapattr Application.Error -> Error parsed_jobdescription.Application.Error = (std::string)application["adl:Error"]; /// \mapattr Application.Environment -> Environment for(XMLNode environment = application["adl:Environment"]; (bool)environment;++environment) { parsed_jobdescription.Application.Environment.push_back( std::pair( (std::string)environment["adl:Name"], (std::string)environment["adl:Value"])); } /// \mapattr Application.PreExecutable -> PreExecutable for(XMLNode preexecutable = application["adl:PreExecutable"]; (bool)preexecutable;++preexecutable) { ExecutableType exec; if(!ParseExecutable(preexecutable, exec, dialect, logger)) { return false; } parsed_jobdescription.Application.PreExecutable.push_back(exec); } /// \mapattr Application.PostExecutable -> PostExecutable for(XMLNode postexecutable = application["adl:PostExecutable"]; (bool)postexecutable;++postexecutable) { ExecutableType exec; if(!ParseExecutable(postexecutable, exec, dialect, logger)) { return false; } parsed_jobdescription.Application.PostExecutable.push_back(exec); } /// \mapattr Application.LoggingDirectory -> LogDir parsed_jobdescription.Application.LogDir = (std::string)application["LoggingDirectory"]; for(XMLNode logging = application["adl:RemoteLogging"]; (bool)logging;++logging) { URL surl((std::string)logging["adl:URL"]); if(!surl) { logger.msg(ERROR, "[ADLParser] Unsupported URL %s for RemoteLogging.",(std::string)logging["adl:URL"]); return false; } parsed_jobdescription.Application.RemoteLogging.push_back(RemoteLoggingType()); parsed_jobdescription.Application.RemoteLogging.back().ServiceType = (std::string)logging["adl:ServiceType"]; parsed_jobdescription.Application.RemoteLogging.back().Location = (std::string)logging["adl:URL"]; if (!ParseOptional(logging, parsed_jobdescription.Application.RemoteLogging.back().optional, logger)) { return false; } } /// \mapattr Application.ExpirationTime -> ExpirationTime XMLNode expire = application["adl:ExpirationTime"]; if((bool)expire) { bool b; if(!ParseOptional(expire,b,logger)) { return false; } parsed_jobdescription.Application.ExpirationTime = (std::string)expire; if(parsed_jobdescription.Application.ExpirationTime.GetTime() == (time_t)(-1)) { logger.msg(b?WARNING:ERROR, "[ADLParser] Wrong time %s in ExpirationTime.",(std::string)expire); if(!b) { return false; } } } /// \mapattr Application.WipeTime -> SessionLifeTime XMLNode wipe = application["adl:WipeTime"]; if((bool)wipe) { parsed_jobdescription.Resources.SessionLifeTime = (std::string)wipe; // TODO: check validity. Do it after type is clarified. bool b; if(!ParseOptional(wipe,b,logger)) { return false; } } // Notification // *optional // Protocol 1-1 [email] // Recipient 1- // OnState 0- for(XMLNode notify = application["adl:Notification"]; (bool)notify;++notify) { bool b; if(!ParseOptional(expire,b,logger)) { return false; } if((std::string)notify["adl:Protocol"] != "email") { if(!b) { logger.msg(ERROR, "[ADLParser] Only email Prorocol for Notification is supported yet."); return false; } logger.msg(WARNING, "[ADLParser] Only email Prorocol for Notification is supported yet."); continue; } NotificationType n; for(XMLNode onstate = notify["adl:OnState"];(bool)onstate;++onstate) { std::string s = ADLStateToInternal((std::string)onstate,b,logger); if(s.empty()) { if(!b) { return false; } } n.States.push_back(s); } for(XMLNode rcpt = notify["adl:Recipient"];(bool)rcpt;++rcpt) { n.Email = (std::string)rcpt; parsed_jobdescription.Application.Notification.push_back(n); } } } if((bool)resources) { /// \mapattr Resources.OperatingSystem -> OperatingSystem XMLNode os = resources["adl:OperatingSystem"]; if((bool)os) { // TODO: convert from EMI ES types. So far they look similar. Software os_((std::string)os["adl:Family"],(std::string)os["adl:Name"],(std::string)os["adl:Version"]); parsed_jobdescription.Resources.OperatingSystem.add(os_, Software::EQUAL); } /// \mapattr Resources.Platform -> Platform XMLNode platform = resources["adl:Platform"]; if((bool)platform) { // TODO: convert from EMI ES types. So far they look similar. parsed_jobdescription.Resources.Platform = (std::string)platform; } /// \mapattr Resources.RuntimeEnvironment -> RunTimeEnvironment for(XMLNode rte = resources["adl:RuntimeEnvironment"];(bool)rte;++rte) { Software rte_("",(std::string)rte["adl:Name"],(std::string)rte["adl:Version"]); for(XMLNode o = rte["adl:Option"];(bool)o;++o) { rte_.addOption((std::string)o); } bool b; if(!ParseOptional(rte,b,logger)) { return false; } parsed_jobdescription.Resources.RunTimeEnvironment.add(rte_, Software::EQUAL); } if((bool)resources["adl:ParallelEnvironment"]) { ParallelEnvironmentType& pe = parsed_jobdescription.Resources.ParallelEnvironment; XMLNode xpe = resources["adl:ParallelEnvironment"]; /// \mapattr Resources.ParallelEnvironment.Type -> ParallelEnvironmentType::Type if ((bool)xpe["adl:Type"]) { pe.Type = (std::string)xpe["adl:Type"]; } /// \mapattr Resources.ParallelEnvironment.Version -> ParallelEnvironmentType::Version if ((bool)xpe["adl:Version"]) { pe.Version = (std::string)xpe["adl:Version"]; } /// \mapattr Resources.ParallelEnvironment.ProcessesPerSlot -> ParallelEnvironmentType::ProcessesPerSlot if (((bool)xpe["adl:ProcessesPerSlot"]) && !stringto(xpe["adl:ProcessesPerSlot"], pe.ProcessesPerSlot)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in ProcessesPerSlot."); return false; } /// \mapattr Resources.ParallelEnvironment.ThreadsPerProcess -> ParallelEnvironmentType::ThreadsPerProcess if (((bool)xpe["adl:ThreadsPerProcess"]) && !stringto(xpe["adl:ThreadsPerProcess"], pe.ThreadsPerProcess)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in ThreadsPerProcess."); return false; } /// \mapattr Resources.ParallelEnvironment.Option -> ParallelEnvironmentType::Options for (XMLNode xOption = xpe["adl:Option"]; xOption; ++xOption) { if ((!(bool)xOption["adl:Name"]) || ((std::string)xOption["adl:Name"]).empty()) { logger.msg(ERROR, "[ADLParser] Missing Name element or value in ParallelEnvironment/Option element."); return false; } pe.Options.insert(std::make_pair(xOption["adl:Name"], xOption["adl:Value"])); } } /// \mapattr Resources.Coprocessor -> Coprocessor XMLNode coprocessor = resources["adl:Coprocessor"]; if((bool)coprocessor && !((std::string)coprocessor).empty()) { parsed_jobdescription.Resources.Coprocessor = coprocessor; if (!ParseOptional(coprocessor, parsed_jobdescription.Resources.Coprocessor.optIn, logger)) { return false; } } /// \mapattr Resources.NetworkInfo -> NetworkInfo XMLNode netinfo = resources["adl:NetworkInfo"]; if(((bool)netinfo)) { logger.msg(ERROR, "[ADLParser] NetworkInfo is not supported yet."); return false; } /// \mapattr Resources.NodeAccess -> NodeAccess XMLNode nodeaccess = resources["adl:NodeAccess"]; if(nodeaccess) { std::string na = nodeaccess; if(na == "inbound") { parsed_jobdescription.Resources.NodeAccess = NAT_INBOUND; } else if(na == "outbound") { parsed_jobdescription.Resources.NodeAccess = NAT_OUTBOUND; } else if(na == "inoutbound") { parsed_jobdescription.Resources.NodeAccess = NAT_INOUTBOUND; } else { logger.msg(ERROR, "[ADLParser] NodeAccess value %s is not supported yet.",na); return false; } } XMLNode slot = resources["adl:SlotRequirement"]; if((bool)slot) { /// \mapattr Resources.SlotRequirement.NumberOfSlots -> NumberOfSlots if(!stringto(slot["adl:NumberOfSlots"],parsed_jobdescription.Resources.SlotRequirement.NumberOfSlots)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in NumberOfSlots."); return false; } if((bool)slot["adl:SlotsPerHost"]) { XMLNode use = slot["adl:SlotsPerHost"].Attribute("useNumberOfSlots"); if((bool)use && (((std::string)use == "true") || ((std::string)use == "1"))) { if (!(bool)slot["adl:NumberOfSlots"]) { logger.msg(ERROR, "[ADLParser] The NumberOfSlots element should be specified, when the value of useNumberOfSlots attribute of SlotsPerHost element is \"true\"."); return false; } parsed_jobdescription.Resources.SlotRequirement.SlotsPerHost = parsed_jobdescription.Resources.SlotRequirement.NumberOfSlots; } /// \mapattr Resources.SlotRequirement.SlotsPerHost -> SlotsPerHost else if(!stringto(slot["adl:SlotsPerHost"],parsed_jobdescription.Resources.SlotRequirement.SlotsPerHost)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in SlotsPerHost."); return false; } /// \mapattr Resources.SlotRequirement.ExclusiveExecution -> ExclusiveExecution if((bool)slot["adl:ExclusiveExecution"]) { const std::string ee = slot["adl:ExclusiveExecution"]; if ((ee == "true") || (ee == "1")) { parsed_jobdescription.Resources.SlotRequirement.ExclusiveExecution = SlotRequirementType::EE_TRUE; } else if ((ee == "false") || (ee == "0")) { parsed_jobdescription.Resources.SlotRequirement.ExclusiveExecution = SlotRequirementType::EE_FALSE; } else { parsed_jobdescription.Resources.SlotRequirement.ExclusiveExecution = SlotRequirementType::EE_DEFAULT; } } } } /// \mapattr Resources.QueueName -> QueueName XMLNode queue = resources["adl:QueueName"]; if((bool)queue) { parsed_jobdescription.Resources.QueueName = (std::string)queue; } /// \mapattr Resources.IndividualPhysicalMemory -> IndividualPhysicalMemory XMLNode memory; memory = resources["adl:IndividualPhysicalMemory"]; if((bool)memory) { unsigned long long int value = -1; if(!stringto((std::string)memory,value)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in IndividualPhysicalMemory."); return false; } parsed_jobdescription.Resources.IndividualPhysicalMemory.max = bytes_to_mb(value); } /// \mapattr Resources.IndividualVirtualMemory -> IndividualVirtualMemory memory = resources["adl:IndividualVirtualMemory"]; if((bool)memory) { unsigned long long int value = -1; if(!stringto((std::string)memory,value)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in IndividualVirtualMemory."); return false; } parsed_jobdescription.Resources.IndividualVirtualMemory.max = bytes_to_mb(value); } /// \mapattr Resources.DiskSpaceRequirement -> DiskSpace memory = resources["adl:DiskSpaceRequirement"]; if((bool)memory) { unsigned long long int v = 0; if((!stringto((std::string)memory,v)) || (v == 0)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in DiskSpaceRequirement."); return false; } parsed_jobdescription.Resources.DiskSpaceRequirement.DiskSpace.max = bytes_to_mb(v); } /// \mapattr Resources.RemoteSessionAccess -> SessionDirectoryAccess if((bool)resources["adl:RemoteSessionAccess"]) { bool v = false; if(!ParseFlag(resources["adl:RemoteSessionAccess"],v,logger)) { return false; } parsed_jobdescription.Resources.SessionDirectoryAccess = v?SDAM_RW:SDAM_NONE; } if((bool)resources["adl:Benchmark"]) { logger.msg(ERROR, "[ADLParser] Benchmark is not supported yet."); return false; } XMLNode time; time = resources["adl:IndividualCPUTime"]; if((bool)time) { /// \mapattr Resources.IndividualCPUTime -> IndividualCPUTime if(!stringto((std::string)time,parsed_jobdescription.Resources.IndividualCPUTime.range.max)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in IndividualCPUTime."); return false; } } time = resources["adl:TotalCPUTime"]; if((bool)time) { /// \mapattr Resources.TotalCPUTime -> TotalCPUTime if(!stringto((std::string)time,parsed_jobdescription.Resources.TotalCPUTime.range.max)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in TotalCPUTime."); return false; } } time = resources["adl:WallTime"]; if((bool)time) { /// \mapattr Resources.WallTime -> IndividualWallTime /// TODO: Change to IndividualWallTime if(!stringto((std::string)time,parsed_jobdescription.Resources.TotalWallTime.range.max)) { logger.msg(ERROR, "[ADLParser] Missing or wrong value in WallTime."); return false; } parsed_jobdescription.Resources.IndividualWallTime = parsed_jobdescription.Resources.TotalWallTime; } } if((bool)staging) { bool clientpush = false; if(!ParseFlag(staging["adl:ClientDataPush"],clientpush,logger)) { return false; } if (clientpush) { parsed_jobdescription.OtherAttributes["emi-adl:ClientDataPush"] = "true"; InputFileType file; // Also using unnamed file for that parsed_jobdescription.DataStaging.InputFiles.push_back(file); } for(XMLNode input = staging["adl:InputFile"];(bool)input;++input) { InputFileType file; file.Name = (std::string)input["adl:Name"]; if(file.Name.empty()) { logger.msg(ERROR, "[ADLParser] Missing or empty Name in InputFile."); return false; } std::string ex = input["adl:IsExecutable"]; file.IsExecutable = !(ex.empty() || (ex == "false") || (ex == "0")); if (!input["adl:Source"]) { file.Sources.push_back(URL(file.Name)); } else { for(XMLNode source = input["adl:Source"];(bool)source;++source) { SourceType surl((std::string)source["adl:URI"]); if(!surl) { logger.msg(ERROR, "[ADLParser] Wrong URI specified in Source - %s.",(std::string)source["adl:URI"]); return false; } if((bool)source["adl:DelegationID"]) { surl.DelegationID = (std::string)source["adl:DelegationID"]; } if((bool)source["adl:Option"]) { XMLNode option = source["adl:Option"]; for(;(bool)option;++option) { surl.AddOption(option["adl:Name"],option["adl:Value"],true); }; } file.Sources.push_back(surl); } } // TODO: FileSize and Checksum. Probably not useful for HTTP-like interfaces anyway. parsed_jobdescription.DataStaging.InputFiles.push_back(file); } for(XMLNode output = staging["adl:OutputFile"];(bool)output;++output) { OutputFileType file; file.Name = (std::string)output["adl:Name"]; if(file.Name.empty()) { logger.msg(ERROR, "[ADLParser] Missing or empty Name in OutputFile."); return false; } for(XMLNode target = output["adl:Target"];(bool)target;++target) { TargetType turl((std::string)target["adl:URI"]); if(!turl) { logger.msg(ERROR, "[ADLParser] Wrong URI specified in Target - %s.",(std::string)target["adl:URI"]); return false; } if((bool)target["adl:DelegationID"]) { turl.DelegationID = (std::string)target["adl:DelegationID"]; } if((bool)target["adl:Option"]) { XMLNode option = target["adl:Option"]; for(;(bool)option;++option) { std::string opt_name((std::string)option["adl:Name"]); if(opt_name == "location") { URLLocation location_url(option["adl:Value"]); if(!location_url || location_url.Protocol() == "file") { logger.msg(ERROR, "Location URI for file %s is invalid", file.Name); return false; } turl.AddLocation(location_url); } else { turl.AddOption(opt_name,option["adl:Value"],true); } } } bool mandatory = false; if(!ParseFlag(target["adl:Mandatory"],mandatory,logger)) { return false; } if((!ParseFlag(target["adl:UseIfFailure"],turl.UseIfFailure,logger)) || (!ParseFlag(target["adl:UseIfCancel"],turl.UseIfCancel,logger)) || (!ParseFlag(target["adl:UseIfSuccess"],turl.UseIfSuccess,logger))) { return false; } if((bool)target["adl:CreationFlag"]) { std::string v = target["adl:CreationFlag"]; if(v == "overwrite") { turl.CreationFlag = TargetType::CFE_OVERWRITE; } else if(v == "append") { turl.CreationFlag = TargetType::CFE_APPEND; } else if(v == "dontOverwrite") { turl.CreationFlag = TargetType::CFE_DONTOVERWRITE; } else { logger.msg(ERROR, "[ADLParser] CreationFlag value %s is not supported.",v); return false; }; } turl.AddOption("mandatory",mandatory?"true":"false",true); file.Targets.push_back(turl); } parsed_jobdescription.DataStaging.OutputFiles.push_back(file); } if((bool)staging["nordugrid-adl:DelegationID"]) { parsed_jobdescription.DataStaging.DelegationID = (std::string)staging["nordugrid-adl:DelegationID"]; } } jobdescs.push_back(parsed_jobdescription); return true; } static void generateExecutableTypeElement(XMLNode element, const ExecutableType& exec) { if(exec.Path.empty()) return; element.NewChild("Path") = exec.Path; for(std::list::const_iterator it = exec.Argument.begin(); it != exec.Argument.end(); ++it) { element.NewChild("Argument") = *it; } if (exec.SuccessExitCode.first) { element.NewChild("FailIfExitCodeNotEqualTo") = tostring(exec.SuccessExitCode.second); } } JobDescriptionParserPluginResult ADLParser::Assemble(const JobDescription& job, std::string& product, const std::string& language, const std::string& dialect) const { if (!IsLanguageSupported(language)) { return false; } NS ns; ns[""] = "http://www.eu-emi.eu/es/2010/12/adl"; ns["emiestypes"] = "http://www.eu-emi.eu/es/2010/12/types"; ns["nordugrid-adl"] = "http://www.nordugrid.org/es/2011/12/nordugrid-adl"; // ActivityDescription XMLNode description(ns, "ActivityDescription"); XMLNode identification = description.NewChild("ActivityIdentification"); XMLNode application = description.NewChild("Application"); XMLNode resources = description.NewChild("Resources"); XMLNode staging = description.NewChild("DataStaging"); // ActivityIdentification /// \mapattr ActivityIdentification.JobName <- JobName if(!job.Identification.JobName.empty()) identification.NewChild("Name") = job.Identification.JobName; /// \mapattr ActivityIdentification.Description <- Description if(!job.Identification.Description.empty()) identification.NewChild("Description") = job.Identification.Description; /// \mapattr ActivityIdentification.Type <- JobIdentificationType::Type if(!job.Identification.Type.empty()) identification.NewChild("Type") = job.Identification.Type; /// \mapattr ActivityIdentification.Annotation <- Annotation for (std::list::const_iterator it = job.Identification.Annotation.begin(); it != job.Identification.Annotation.end(); it++) { identification.NewChild("Annotation") = *it; } // ARC extension: ActivityOldID /// \mapattr ActivityIdentification.ActivityOldID <- ActivityOldID /// TODO: Add mapping note that this element is an extention. for (std::list::const_iterator it = job.Identification.ActivityOldID.begin(); it != job.Identification.ActivityOldID.end(); ++it) { identification.NewChild("nordugrid-adl:ActivityOldID") = *it; } // Application /// \mapattr Application.Executable.Path <- ExecutableType::Path /// \mapattr Application.Executable.Argument <- ExecutableType::Argument generateExecutableTypeElement(application.NewChild("Executable"), job.Application.Executable); /// \mapattr Application.Input <- Input if(!job.Application.Input.empty()) application.NewChild("Input") = job.Application.Input; /// \mapattr Application.Output <- Output if(!job.Application.Output.empty()) application.NewChild("Output") = job.Application.Output; /// \mapattr Application.Error <- Error if(!job.Application.Error.empty()) application.NewChild("Error") = job.Application.Error; // ARC extensions: Rerun, Priority /// TODO: Add mapping note that these elements are an extention. /// \mapattr Application.Rerun -> Rerun if(job.Application.Rerun > -1) { XMLNode rerun = application.NewChild("nordugrid-adl:Rerun"); rerun = tostring(job.Application.Rerun); } /// \mapattr Application.Priority -> Priority if(job.Application.Priority > -1) { XMLNode priority = application.NewChild("nordugrid-adl:Priority"); priority = tostring(job.Application.Priority); } /// \mapattr Application.Environment <- Environment for(std::list< std::pair >::const_iterator it = job.Application.Environment.begin(); it != job.Application.Environment.end(); it++) { XMLNode environment = application.NewChild("Environment"); environment.NewChild("Name") = it->first; environment.NewChild("Value") = it->second; } /// \mapattr Application.PreExecutable <- PreExecutable for(std::list::const_iterator it = job.Application.PreExecutable.begin(); it != job.Application.PreExecutable.end(); ++it) { generateExecutableTypeElement(application.NewChild("PreExecutable"), *it); } /// \mapattr Application.PostExecutable <- PostExecutable for(std::list::const_iterator it = job.Application.PostExecutable.begin(); it != job.Application.PostExecutable.end(); ++it) { generateExecutableTypeElement(application.NewChild("PostExecutable"), *it); } /// \mapattr Application.LoggingDirectory <- LogDir if(!job.Application.LogDir.empty()) application.NewChild("LoggingDirectory") = job.Application.LogDir; for (std::list::const_iterator it = job.Application.RemoteLogging.begin(); it != job.Application.RemoteLogging.end(); it++) { XMLNode logging = application.NewChild("RemoteLogging"); logging.NewChild("ServiceType") = it->ServiceType; logging.NewChild("URL") = it->Location.fullstr(); if(it->optional) logging.NewAttribute("optional") = "true"; } /// \mapattr Application.ExpirationTime <- ExpirationTime if(job.Application.ExpirationTime > -1) { XMLNode expire = application.NewChild("ExpirationTime"); expire = job.Application.ExpirationTime.str(); //if() expire.NewAttribute("optional") = "true"; } /// \mapattr Application.WipeTime <- SessionLifeTime if(job.Resources.SessionLifeTime > -1) { XMLNode wipe = application.NewChild("WipeTime"); // TODO: ask for type change from dateTime to period. wipe = (std::string)job.Resources.SessionLifeTime; //if() wipe.NewAttribute("optional") = "true"; } for (std::list::const_iterator it = job.Application.Notification.begin(); it != job.Application.Notification.end(); it++) { XMLNode notification = application.NewChild("Notification"); notification.NewChild("Protocol") = "email"; notification.NewChild("Recipient") = it->Email; for (std::list::const_iterator s = it->States.begin(); s != it->States.end(); s++) { std::string st = InternalStateToADL(*s,false,logger); if(st.empty()) continue; // return false; TODO later notification.NewChild("OnState") = st; } } // job.Application.ProcessingStartTime // job.Application.AccessControl // job.Application.CredentialService // job.Application.DryRun // Resources /// \mapattr Resources.OperatingSystem <- OperatingSystem for(std::list::const_iterator o = job.Resources.OperatingSystem.getSoftwareList().begin(); o != job.Resources.OperatingSystem.getSoftwareList().end(); ++o) { XMLNode os = resources.NewChild("OperatingSystem"); os.NewChild("Name") = o->getName(); std::string fam = o->getFamily(); if(!fam.empty()) os.NewChild("Family") = fam; os.NewChild("Version") = o->getVersion(); } /// \mapattr Resources.Platform <- Platform if(!job.Resources.Platform.empty()) { // TODO: convert to EMI ES types. So far they look same. resources.NewChild("Platform") = job.Resources.Platform; } /// \mapattr Resources.RuntimeEnvironment <- RunTimeEnvironment for(std::list::const_iterator s = job.Resources.RunTimeEnvironment.getSoftwareList().begin(); s != job.Resources.RunTimeEnvironment.getSoftwareList().end();++s) { XMLNode rte = resources.NewChild("RuntimeEnvironment"); rte.NewChild("Name") = s->getName(); rte.NewChild("Version") = s->getVersion(); for(std::list::const_iterator opt = s->getOptions().begin(); opt != s->getOptions().end(); ++opt) { rte.NewChild("Option") = *opt; } //if() rte.NewAttribute("optional") = "true"; } { XMLNode xpe(""); const ParallelEnvironmentType& pe = job.Resources.ParallelEnvironment; /// \mapattr Resources.ParallelEnvironment.Type <- ParallelEnvironmentType::Type if (!pe.Type.empty()) { xpe.NewChild("Type") = pe.Type; } /// \mapattr Resources.ParallelEnvironment.Version <- ParallelEnvironmentType::Version if (!pe.Version.empty()) { xpe.NewChild("Version") = pe.Version; } /// \mapattr Resources.ParallelEnvironment.ProcessesPerSlot <- ParallelEnvironmentType::ProcessesPerSlot if (pe.ProcessesPerSlot > -1) { xpe.NewChild("ProcessesPerSlot") = tostring(pe.ProcessesPerSlot); } /// \mapattr Resources.ParallelEnvironment.ThreadsPerProcess <- ParallelEnvironmentType::ThreadsPerProcess if (pe.ThreadsPerProcess > -1) { xpe.NewChild("ThreadsPerProcess") = tostring(pe.ThreadsPerProcess); } /// \mapattr Resources.ParallelEnvironment.Option <- ParallelEnvironmentType::Options for (std::multimap::const_iterator it = pe.Options.begin(); it != pe.Options.end(); ++it) { XMLNode xo = xpe.NewChild("Option"); xo.NewChild("Name") = it->first; xo.NewChild("Value") = it->second; } if (xpe.Size() > 0) { resources.NewChild(xpe); } } /// \mapattr Resources.Coprocessor <- Coprocessor if(!((std::string)job.Resources.Coprocessor).empty()) { XMLNode coprocessor = resources.NewChild("Coprocessor"); coprocessor = (std::string)job.Resources.Coprocessor; if(job.Resources.Coprocessor.optIn) coprocessor.NewAttribute("optional") = "true"; } //TODO: check values. So far they look close. /// \mapattr Resources.NetworkInfo <- NetworkInfo if(!job.Resources.NetworkInfo.empty()) { resources.NewChild("NetworkInfo") = job.Resources.NetworkInfo; } /// \mapattr Resources.NodeAccess <- NodeAccess switch(job.Resources.NodeAccess) { case NAT_INBOUND: resources.NewChild("NodeAccess") = "inbound"; break; case NAT_OUTBOUND: resources.NewChild("NodeAccess") = "outbound"; break; case NAT_INOUTBOUND: resources.NewChild("NodeAccess") = "inoutbound"; break; default: break; } /// \mapattr Resources.IndividualPhysicalMemory <- IndividualPhysicalMemory if(job.Resources.IndividualPhysicalMemory.max != -1) { resources.NewChild("IndividualPhysicalMemory") = tostring(mb_to_bytes(job.Resources.IndividualPhysicalMemory.max)); } /// \mapattr Resources.IndividualVirtualMemory <- IndividualVirtualMemory if(job.Resources.IndividualVirtualMemory.max != -1) { resources.NewChild("IndividualVirtualMemory") = tostring(mb_to_bytes(job.Resources.IndividualVirtualMemory.max)); } /// \mapattr Resources.DiskSpaceRequirement <- DiskSpace if(job.Resources.DiskSpaceRequirement.DiskSpace.max > -1) { resources.NewChild("DiskSpaceRequirement") = tostring(mb_to_bytes(job.Resources.DiskSpaceRequirement.DiskSpace.max)); } /// \mapattr Resources.RemoteSessionAccess <- SessionDirectoryAccess switch(job.Resources.SessionDirectoryAccess) { case SDAM_RW: resources.NewChild("RemoteSessionAccess") = "true"; break; case SDAM_RO: resources.NewChild("RemoteSessionAccess") = "true"; break; // approximately; TODO: Document in mapping. default: break; } //Benchmark // BenchmarkType // BenchmarkValue XMLNode slot = resources.NewChild("SlotRequirement"); /// \mapattr Resources.SlotRequirement.NumberOfSlots <- NumberOfSlots if(job.Resources.SlotRequirement.NumberOfSlots > -1) { slot.NewChild("NumberOfSlots") = tostring(job.Resources.SlotRequirement.NumberOfSlots); } /// \mapattr Resources.SlotRequirement.SlotsPerHost <- SlotsPerHost if (job.Resources.SlotRequirement.SlotsPerHost > -1) { slot.NewChild("SlotsPerHost") = tostring(job.Resources.SlotRequirement.SlotsPerHost); } /// \mapattr Resources.SlotRequirement.ExclusiveExecution <- ExclusiveExecution switch(job.Resources.SlotRequirement.ExclusiveExecution) { case SlotRequirementType::EE_TRUE: slot.NewChild("ExclusiveExecution") = "true"; break; case SlotRequirementType::EE_FALSE: slot.NewChild("ExclusiveExecution") = "false"; break; default: break; } if(slot.Size() <= 0) slot.Destroy(); /// \mapattr Resources.QueueName <- QueueName if(!job.Resources.QueueName.empty()) {; resources.NewChild("QueueName") = job.Resources.QueueName; } /// \mapattr Resources.IndividualCPUTime <- IndividualCPUTime if(job.Resources.IndividualCPUTime.range.max != -1) { resources.NewChild("IndividualCPUTime") = tostring(job.Resources.IndividualCPUTime.range.max); } /// \mapattr Resources.TotalCPUTime <- TotalCPUTime if(job.Resources.TotalCPUTime.range.max != -1) { resources.NewChild("TotalCPUTime") = tostring(job.Resources.TotalCPUTime.range.max); } /// \mapattr Resources.WallTime <- IndividualWallTime if(job.Resources.TotalWallTime.range.max != -1) { resources.NewChild("WallTime") = tostring(job.Resources.TotalWallTime.range.max); } else if(job.Resources.IndividualWallTime.range.max != -1) { resources.NewChild("WallTime") = tostring(job.Resources.IndividualWallTime.range.max); } // job.Resources.NetworkInfo // job.Resources.DiskSpaceRequirement.CacheDiskSpace // job.Resources.DiskSpaceRequirement.SessionDiskSpace // job.Resources.CEType // DataStaging // ClientDataPush { std::map::const_iterator it = job.OtherAttributes.find("emi-adl:ClientDataPush"); if (it != job.OtherAttributes.end() && it->second == "true") { staging.NewChild("ClientDataPush") = "true"; } else { // Other way to do that is to ask for undefined InputFile for (std::list::const_iterator it = job.DataStaging.InputFiles.begin(); it != job.DataStaging.InputFiles.end(); ++it) { if(it->Name.empty()) { staging.NewChild("ClientDataPush") = "true"; break; } } } } // InputFile for (std::list::const_iterator it = job.DataStaging.InputFiles.begin(); it != job.DataStaging.InputFiles.end(); ++it) { if(it->Name.empty()) { // Used to mark free stage in continue; //return false; } XMLNode file = staging.NewChild("InputFile"); file.NewChild("Name") = it->Name; for(std::list::const_iterator u = it->Sources.begin(); u != it->Sources.end(); ++u) { if(!*u) continue; // mandatory // It is not correct to do job description transformation // in parser. Parser should be dumb. Other solution is needed. if(u->Protocol() == "file") continue; XMLNode source = file.NewChild("Source"); source.NewChild("URI") = u->str(); const std::map& options = u->Options(); for(std::map::const_iterator o = options.begin(); o != options.end();++o) { XMLNode option = source.NewChild("Option"); option.NewChild("Name") = o->first; option.NewChild("Value") = o->second; } if (!u->DelegationID.empty()) { source.NewChild("DelegationID") = u->DelegationID; } } if(it->IsExecutable || it->Name == job.Application.Executable.Path) { file.NewChild("IsExecutable") = "true"; } // it->FileSize } // OutputFile for (std::list::const_iterator it = job.DataStaging.OutputFiles.begin(); it != job.DataStaging.OutputFiles.end(); ++it) { if(it->Name.empty()) { // mandatory return false; } XMLNode file = staging.NewChild("OutputFile"); file.NewChild("Name") = it->Name; for(std::list::const_iterator u = it->Targets.begin(); u != it->Targets.end(); ++u) { if(!*u) continue; // mandatory XMLNode target = file.NewChild("Target"); target.NewChild("URI") = u->str(); const std::map& options = u->Options(); for(std::map::const_iterator o = options.begin(); o != options.end();++o) { if(o->first == "mandatory") continue; if(o->first == "useiffailure") continue; if(o->first == "useifcancel") continue; if(o->first == "useifsuccess") continue; XMLNode option = target.NewChild("Option"); option.NewChild("Name") = o->first; option.NewChild("Value") = o->second; } const std::list& locations = u->Locations(); for(std::list::const_iterator l = locations.begin(); l != locations.end();++l) { XMLNode option = target.NewChild("Option"); option.NewChild("Name") = "location"; option.NewChild("Value") = l->fullstr(); } if (!u->DelegationID.empty()) { target.NewChild("DelegationID") = u->DelegationID; } target.NewChild("Mandatory") = u->Option("mandatory","false"); // target.NewChild("CreationFlag") = ; target.NewChild("UseIfFailure") = booltostr(u->UseIfFailure); target.NewChild("UseIfCancel") = booltostr(u->UseIfCancel); target.NewChild("UseIfSuccess") = booltostr(u->UseIfSuccess); } } if(!job.DataStaging.DelegationID.empty()) { staging.NewChild("nordugrid-adl:DelegationID") = job.DataStaging.DelegationID; } description.GetDoc(product, true); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/RSLParser.h0000644000000000000000000000012312676705223025344 xustar000000000000000027 mtime=1459325587.747153 27 atime=1513200574.299699 29 ctime=1513200660.39775235 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/RSLParser.h0000644000175000002070000002126612676705223025421 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_RSLPARSER_H__ #define __ARC_RSLPARSER_H__ #include #include #include #include #include #include namespace Arc { class Logger; template class SourceLocation { public: SourceLocation(const T& v) : v(v), location(std::make_pair(-1, -1)) {}; SourceLocation(const std::pair& location, const T& v) : v(v), location(location) {}; ~SourceLocation() {}; bool operator==(const T& a) const { return v == a; } bool operator!=(const T& a) const { return v != a; } SourceLocation& operator+=(const T& a) { v += a; return *this; } T v; std::pair location; // (Line number, column number). }; enum RSLBoolOp { RSLBoolError, RSLMulti, RSLAnd, RSLOr }; enum RSLRelOp { RSLRelError, RSLEqual, RSLNotEqual, RSLLess, RSLGreater, RSLLessOrEqual, RSLGreaterOrEqual }; class RSLValue { public: RSLValue(const std::pair& location = std::make_pair(-1, -1)) : location(location) {}; virtual ~RSLValue() {} const std::pair& Location() const { return location; } RSLValue* Evaluate(std::map& vars, JobDescriptionParserPluginResult& parsing_result) const; virtual void Print(std::ostream& os = std::cout) const = 0; protected: std::pair location; }; class RSLLiteral : public RSLValue { public: RSLLiteral(const SourceLocation& str) : RSLValue(str.location), str(str.v) {}; RSLLiteral(const std::string& str, const std::pair& location = std::make_pair(-1, -1)) : RSLValue(location), str(str) {}; RSLLiteral(const RSLLiteral& v) : RSLValue(v.location), str(v.str) {}; ~RSLLiteral() {}; void Print(std::ostream& os = std::cout) const; const std::string& Value() const { return str; }; private: std::string str; }; class RSLVariable : public RSLValue { public: RSLVariable(const SourceLocation& var) : RSLValue(var.location), var(var.v) {}; RSLVariable(const std::string& var, const std::pair& location = std::make_pair(-1, -1)) : RSLValue(location), var(var) {}; ~RSLVariable() {}; void Print(std::ostream& os = std::cout) const; const std::string& Var() const { return var; }; private: std::string var; }; class RSLConcat : public RSLValue { public: RSLConcat(RSLValue *left, RSLValue *right, const std::pair& location = std::make_pair(-1, -1)) : RSLValue(location), left(left), right(right) {}; ~RSLConcat(); void Print(std::ostream& os = std::cout) const; const RSLValue* Left() const { return left; } const RSLValue* Right() const { return right; } private: RSLValue *left; RSLValue *right; }; class RSLList : public RSLValue { public: RSLList(const std::pair& location = std::make_pair(-1, -1)) : RSLValue(location) {}; ~RSLList(); void Add(RSLValue *value); void Print(std::ostream& os = std::cout) const; std::list::iterator begin() { return values.begin(); } std::list::iterator end() { return values.end(); } std::list::const_iterator begin() const { return values.begin(); } std::list::const_iterator end() const { return values.end(); } std::list::size_type size() const { return values.size(); } private: std::list values; }; class RSLSequence : public RSLValue { public: RSLSequence(RSLList *seq, const std::pair& location = std::make_pair(-1, -1)) : RSLValue(location), seq(seq) {}; ~RSLSequence() { delete seq; }; void Print(std::ostream& os = std::cout) const; std::list::iterator begin() { return seq->begin(); } std::list::iterator end() { return seq->end(); } std::list::const_iterator begin() const { return seq->begin(); } std::list::const_iterator end() const { return seq->end(); } std::list::size_type size() const { return seq->size(); } private: RSLList *seq; }; class RSL { public: RSL() {}; virtual ~RSL() {}; RSL* Evaluate(JobDescriptionParserPluginResult& parsing_result) const; virtual void Print(std::ostream& os = std::cout) const = 0; private: RSL* Evaluate(std::map& vars, JobDescriptionParserPluginResult& parsing_result) const; }; class RSLBoolean : public RSL { public: RSLBoolean(SourceLocation op) : RSL(), op(op) {}; ~RSLBoolean(); void Add(RSL *condition); void Print(std::ostream& os = std::cout) const; RSLBoolOp Op() const { return op.v; } const std::pair& OpLocation() const { return op.location; } std::list::iterator begin() { return conditions.begin(); } std::list::iterator end() { return conditions.end(); } std::list::const_iterator begin() const { return conditions.begin(); } std::list::const_iterator end() const { return conditions.end(); } std::list::size_type size() const { return conditions.size(); } private: SourceLocation op; std::list conditions; }; class RSLCondition : public RSL { public: RSLCondition(const SourceLocation& attr, const SourceLocation& op, RSLList *values) : RSL(), attr(attr), op(op), values(values) { init(); }; RSLCondition(const std::string& attr, RSLRelOp op, RSLList *values) : RSL(), attr(SourceLocation(attr)), op(SourceLocation(op)), values(values) { init(); }; ~RSLCondition() { delete values; } void Print(std::ostream& os = std::cout) const; const std::string& Attr() const { return attr.v; } const std::pair& AttrLocation() const { return attr.location; } RSLRelOp Op() const { return op.v; } std::pair OpLocation() const { return op.location; } const RSLList& List() const { return *values; } std::list::iterator begin() { return values->begin(); } std::list::iterator end() { return values->end(); } std::list::const_iterator begin() const { return values->begin(); } std::list::const_iterator end() const { return values->end(); } std::list::size_type size() const { return values->size(); } private: void init(); SourceLocation attr; SourceLocation op; RSLList *values; }; class RSLParser { public: RSLParser(const std::string& s) : s(s), n(0), parsed(NULL), evaluated(NULL), parsing_result(JobDescriptionParserPluginResult::WrongLanguage) {}; ~RSLParser(); // The Parse method returns a pointer to an RSL object containing a // parsed representation of the rsl string given to the constructor. // The pointer is owned by the RSLParser object and should not be // deleted by the caller. The pointer is valid until the RSLParser // object goes out of scope. // If the evaluate flag is true the returned RSL object has been // evaluated to resolve RSL substitutions and evaluate concatenation // operations. // The parsing and evaluation is done on the first call to the Parse // method. Subsequent calls will simply return stored results. // If the rsl string can not be parsed or evaluated a NULL pointer // is returned. // It is possible that an rsl string can be parsed but not evaluated. const RSL* Parse(bool evaluate = true); const JobDescriptionParserPluginResult& GetParsingResult() const { return parsing_result; }; private: void SkipWSAndComments(); SourceLocation ParseBoolOp(); SourceLocation ParseRelOp(); SourceLocation ParseString(int& status); RSLList* ParseList(); RSL* ParseRSL(); std::pair GetLinePosition(std::string::size_type pos) const; template SourceLocation toSourceLocation(const T& v, std::string::size_type offset = 1) const; const std::string s; std::string::size_type n; RSL *parsed; RSL *evaluated; JobDescriptionParserPluginResult parsing_result; std::map comments_positions; }; std::ostream& operator<<(std::ostream& os, const RSLBoolOp op); std::ostream& operator<<(std::ostream& os, const RSLRelOp op); std::ostream& operator<<(std::ostream& os, const RSLValue& value); std::ostream& operator<<(std::ostream& os, const RSL& rsl); } // namespace Arc #endif // __ARC_RSLPARSER_H__ nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/ARCJSDLParser.cpp0000644000000000000000000000012313124220447026306 xustar000000000000000027 mtime=1498489127.890866 27 atime=1513200574.265698 29 ctime=1513200660.38875224 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/ARCJSDLParser.cpp0000644000175000002070000016505713124220447026372 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "XMLNodeRecover.h" #include "ARCJSDLParser.h" namespace Arc { static Range bytes_to_mb(Range value) { Range result(-1); result.min = (value.min < 0) ? -1 : ((value.min == 0) ? 0 : (int)((value.min - 1) / (1024*1024) + 1)); result.max = (value.max < 0) ? -1 : ((value.max == 0) ? 0 : (int)((value.max - 1) / (1024*1024) + 1)); return result; } static int bytes_to_mb(long long int value) { return (value < 0) ? -1 : ((value == 0) ? 0 : (int)((value - 1) / (1024*1024) + 1)); } static Range mb_to_bytes(Range value) { Range result(-1); result.min = 1024ll * 1024ll * value.min; result.max = 1024ll * 1024ll * value.max; return result; } static long long int mb_to_bytes(int value) { return 1024ll * 1024ll * value; } #define JSDL_NAMESPACE "http://schemas.ggf.org/jsdl/2005/11/jsdl" #define JSDL_POSIX_NAMESPACE "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix" #define JSDL_ARC_NAMESPACE "http://www.nordugrid.org/ws/schemas/jsdl-arc" #define JSDL_HPCPA_NAMESPACE "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa" ARCJSDLParser::ARCJSDLParser(PluginArgument* parg) : JobDescriptionParserPlugin(parg) { supportedLanguages.push_back("nordugrid:jsdl"); } ARCJSDLParser::~ARCJSDLParser() {} Plugin* ARCJSDLParser::Instance(PluginArgument *arg) { return new ARCJSDLParser(arg); } bool ARCJSDLParser::parseSoftware(XMLNode xmlSoftware, SoftwareRequirement& sr) const { for (int i = 0; (bool)(xmlSoftware["Software"][i]); i++) { Software::ComparisonOperator comOp = &Software::operator==; if (bool(xmlSoftware["Software"][i]["Version"].Attribute("require"))) { const std::string comOpStr = (std::string)xmlSoftware["Software"][i]["Version"].Attribute("require"); if (comOpStr == "!=" || lower(comOpStr) == "ne") comOp = &Software::operator!=; else if (comOpStr == ">" || lower(comOpStr) == "gt") comOp = &Software::operator>; else if (comOpStr == "<" || lower(comOpStr) == "lt") comOp = &Software::operator<; else if (comOpStr == ">=" || lower(comOpStr) == "ge") comOp = &Software::operator>=; else if (comOpStr == "<=" || lower(comOpStr) == "le") comOp = &Software::operator<=; else if (comOpStr != "=" && comOpStr != "==" && lower(comOpStr) != "eq") { logger.msg(ERROR, "Unknown operator '%s' in attribute require in Version element", comOpStr); return false; } } sr.add(Software(trim((std::string)xmlSoftware["Software"][i]["Name"]), trim((std::string)xmlSoftware["Software"][i]["Version"])), comOp); } return true; } void ARCJSDLParser::outputSoftware(const SoftwareRequirement& sr, XMLNode& arcJSDL) const { std::list::const_iterator itSW = sr.getSoftwareList().begin(); std::list::const_iterator itCO = sr.getComparisonOperatorList().begin(); for (; itSW != sr.getSoftwareList().end(); itSW++, itCO++) { if (itSW->empty()) continue; XMLNode xmlSoftware = arcJSDL.NewChild("arc-jsdl:Software"); if (!itSW->getFamily().empty()) xmlSoftware.NewChild("arc-jsdl:Family") = itSW->getFamily(); xmlSoftware.NewChild("arc-jsdl:Name") = itSW->getName(); if (!itSW->getVersion().empty()) { XMLNode xmlVersion = xmlSoftware.NewChild("arc-jsdl:Version"); xmlVersion = itSW->getVersion(); if (*itCO != &Software::operator ==) xmlVersion.NewAttribute("require") = Software::toString(*itCO); } } } template bool ARCJSDLParser::parseRange(XMLNode xmlRange, Range& range) const { std::map types; types["Exact"] = xmlRange.Path("Exact"); types["UpperBoundedRange"] = xmlRange.Path("UpperBoundedRange"); types["LowerBoundedRange"] = xmlRange.Path("LowerBoundedRange"); types["Range"] = xmlRange.Path("Range"); types["Range/LowerBound"] = xmlRange.Path("Range/LowerBound"); types["Range/UpperBound"] = xmlRange.Path("Range/UpperBound"); types["Min"] = xmlRange.Path("Min"); types["Max"] = xmlRange.Path("Max"); for (std::map::const_iterator it = types.begin(); it != types.end(); ++it) { if (it->second.size() > 1) { logger.msg(VERBOSE, "Multiple '%s' elements are not supported.", it->first); return false; } } XMLNodeList xmlMin, xmlMax; if(types["LowerBoundedRange"].size()) xmlMin.push_back(types["LowerBoundedRange"].front()); if(types["Range/LowerBound"].size()) xmlMin.push_back(types["Range/LowerBound"].front()); if(types["UpperBoundedRange"].size()) xmlMax.push_back(types["UpperBoundedRange"].front()); if(types["Range/UpperBound"].size()) xmlMax.push_back(types["Range/UpperBound"].front()); for(XMLNodeList::iterator xel = xmlMin.begin(); xel != xmlMin.end(); ++xel) { if (bool(xel->Attribute("exclusiveBound"))) { logger.msg(VERBOSE, "The 'exclusiveBound' attribute to the '%s' element is not supported.", xel->Name()); return false; } } for(XMLNodeList::iterator xel = xmlMax.begin(); xel != xmlMax.end(); ++xel) { if (bool(xel->Attribute("exclusiveBound"))) { logger.msg(VERBOSE, "The 'exclusiveBound' attribute to the '%s' element is not supported.", xel->Name()); return false; } } if (types["Exact"].size() == 1) { if (bool(types["Exact"].front().Attribute("epsilon"))) { logger.msg(VERBOSE, "The 'epsilon' attribute to the 'Exact' element is not supported."); return false; } xmlMax.push_back(types["Exact"].front()); } if(types["Min"].size()) xmlMin.push_back(types["Min"].front()); if(types["Max"].size()) xmlMax.push_back(types["Max"].front()); return parseMinMax(xmlMin, xmlMax, range); } static std::string namesToString(XMLNodeList xlist) { std::string str; for(XMLNodeList::iterator xel = xlist.begin(); xel != xlist.end(); ++xel) { if(!str.empty()) str += ", "; str += xel->Name(); } return str; } template bool ARCJSDLParser::parseMinMax(XMLNodeList xmlMin, XMLNodeList xmlMax, Range& range) const { std::pair min(false, .0), max(false, .0); for(XMLNodeList::iterator xel = xmlMax.begin(); xel != xmlMax.end(); ++xel) { double value; if(!stringto((std::string)*xel, value)) { logger.msg(VERBOSE, "Parsing error: Value of %s element can't be parsed as number", xel->Name()); return false; } if(max.first) { if(max.second != value) { logger.msg(VERBOSE, "Parsing error: Elements (%s) representing upper range have different values", namesToString(xmlMax)); return false; } } else { max.second = value; max.first = true; } } for(XMLNodeList::iterator xel = xmlMin.begin(); xel != xmlMin.end(); ++xel) { double value; if(!stringto((std::string)*xel, value)) { logger.msg(VERBOSE, "Parsing error: Value of %s element can't be parsed as number", xel->Name()); return false; } if(min.first) { if(max.second != value) { logger.msg(VERBOSE, "Parsing error: Elements (%s) representing lower range have different values", namesToString(xmlMax)); } } else { min.second = value; min.first = true; } } if (min.first && max.first && (min.second > max.second)) { logger.msg(VERBOSE, "Parsing error: Value of lower range (%s) is greater than value of upper range (%s)", namesToString(xmlMin), namesToString(xmlMax)); return false; } if (min.first) range.min = static_cast(min.second); if (max.first) range.max = static_cast(max.second); return true; } template void ARCJSDLParser::outputARCJSDLRange(const Range& range, XMLNode& arcJSDL, const T& undefValue) const { if (range.min != undefValue) { const std::string min = tostring(range.min); if (!min.empty()) arcJSDL.NewChild("arc-jsdl:Min") = min; } if (range.max != undefValue) { const std::string max = tostring(range.max); if (!max.empty()) arcJSDL.NewChild("arc-jsdl:Max") = max; } } template void ARCJSDLParser::outputJSDLRange(const Range& range, XMLNode& jsdl, const T& undefValue) const { if (range.min != undefValue) { const std::string lower = tostring(range.min); if (!lower.empty()) jsdl.NewChild("LowerBoundedRange") = lower; } if (range.max != undefValue) { const std::string upper = tostring(range.max); if (!upper.empty()) jsdl.NewChild("UpperBoundedRange") = upper; } } void ARCJSDLParser::parseBenchmark(XMLNode xmlBenchmark, std::pair& benchmark) const { int value; if (bool(xmlBenchmark["BenchmarkType"]) && bool(xmlBenchmark["BenchmarkValue"]) && stringto(xmlBenchmark["BenchmarkValue"], value)) benchmark = std::pair((std::string)xmlBenchmark["BenchmarkType"], value); } void ARCJSDLParser::outputBenchmark(const std::pair& benchmark, XMLNode& arcJSDL) const { if (!benchmark.first.empty()) { arcJSDL.NewChild("arc-jsdl:BenchmarkType") = benchmark.first; arcJSDL.NewChild("arc-jsdl:BenchmarkValue") = tostring(benchmark.second); } } JobDescriptionParserPluginResult ARCJSDLParser::Parse(const std::string& source, std::list& jobdescs, const std::string& language, const std::string& dialect) const { JobDescriptionParserPluginResult result(JobDescriptionParserPluginResult::WrongLanguage); if (language != "" && !IsLanguageSupported(language)) { return result; } XMLNodeRecover node(source); if (node.HasErrors()) { if (node.Name() != "JobDefinition") { return result; } else { result.SetFailure(); for (std::list::const_iterator itErr = node.GetErrors().begin(); itErr != node.GetErrors().end(); ++itErr) { JobDescriptionParsingError err; err.message = (**itErr).message; if (err.message[err.message.length()-1] == '\n') { err.message = err.message.substr(0, err.message.length()-1); } if ((**itErr).line > 0 && (**itErr).int2 > 0) { err.line_pos = std::pair((**itErr).line, (**itErr).int2); } result.AddError(err); } return result; } } JobDescription parsed_jobdescription; // The source parsing start now. XMLNode jobdescription = node["JobDescription"]; // Check if it is JSDL if((!jobdescription) || (!MatchXMLNamespace(jobdescription,JSDL_NAMESPACE))) { logger.msg(VERBOSE, "[ARCJSDLParser] Not a JSDL - missing JobDescription element"); return false; } // JobIdentification XMLNode jobidentification = node["JobDescription"]["JobIdentification"]; // std::string JobName; if (bool(jobidentification["JobName"])) parsed_jobdescription.Identification.JobName = (std::string)jobidentification["JobName"]; // std::string Description; if (bool(jobidentification["Description"])) parsed_jobdescription.Identification.Description = (std::string)jobidentification["Description"]; // JSDL compliance if (bool(jobidentification["JobProject"])) parsed_jobdescription.OtherAttributes["nordugrid:jsdl;Identification/JobProject"] = (std::string)jobidentification["JobProject"]; // std::list Annotation; for (int i = 0; (bool)(jobidentification["UserTag"][i]); i++) parsed_jobdescription.Identification.Annotation.push_back((std::string)jobidentification["UserTag"][i]); // std::list ActivityOldID; for (int i = 0; (bool)(jobidentification["ActivityOldId"][i]); i++) parsed_jobdescription.Identification.ActivityOldID.push_back((std::string)jobidentification["ActivityOldId"][i]); // end of JobIdentification // Application XMLNode xmlApplication = node["JobDescription"]["Application"]; // Look for extended application element. First look for POSIX and then HPCProfile. XMLNode xmlXApplication = xmlApplication["POSIXApplication"]; if (!xmlXApplication) xmlXApplication = xmlApplication["HPCProfileApplication"]; // ExecutableType Executable; if (bool(xmlApplication["Executable"]["Path"])) { parsed_jobdescription.Application.Executable.Path = (std::string)xmlApplication["Executable"]["Path"]; for (int i = 0; (bool)(xmlApplication["Executable"]["Argument"][i]); i++) parsed_jobdescription.Application.Executable.Argument.push_back((std::string)xmlApplication["Executable"]["Argument"]); } else if (bool(xmlXApplication["Executable"])) { parsed_jobdescription.Application.Executable.Path = (std::string)xmlXApplication["Executable"]; for (int i = 0; (bool)(xmlXApplication["Argument"][i]); i++) parsed_jobdescription.Application.Executable.Argument.push_back((std::string)xmlXApplication["Argument"][i]); } // std::string Input; if (bool(xmlApplication["Input"])) parsed_jobdescription.Application.Input = (std::string)xmlApplication["Input"]; else if (bool(xmlXApplication["Input"])) parsed_jobdescription.Application.Input = (std::string)xmlXApplication["Input"]; // std::string Output; if (bool(xmlApplication["Output"])) parsed_jobdescription.Application.Output = (std::string)xmlApplication["Output"]; else if (bool(xmlXApplication["Output"])) parsed_jobdescription.Application.Output = (std::string)xmlXApplication["Output"]; // std::string Error; if (bool(xmlApplication["Error"])) parsed_jobdescription.Application.Error = (std::string)xmlApplication["Error"]; else if (bool(xmlXApplication["Error"])) parsed_jobdescription.Application.Error = (std::string)xmlXApplication["Error"]; // std::list< std::pair > Environment; if (bool(xmlApplication["Environment"]["Name"])) { for (int i = 0; (bool)(xmlApplication["Environment"][i]); i++) { if (!((std::string)xmlApplication["Environment"][i]["Name"]).empty() && bool(xmlApplication["Environment"][i]["Value"])) parsed_jobdescription.Application.Environment.push_back(std::pair((std::string)xmlApplication["Environment"][i]["Name"], (std::string)xmlApplication["Environment"][i]["Value"])); } } else if (bool(xmlXApplication["Environment"])) { for (int i = 0; (bool)(xmlXApplication["Environment"][i]); i++) { XMLNode env = xmlXApplication["Environment"][i]; XMLNode name = env.Attribute("name"); if (!name || ((std::string)name).empty()) { logger.msg(VERBOSE, "[ARCJSDLParser] Error during the parsing: missed the name attributes of the \"%s\" Environment", (std::string)env); return false; } parsed_jobdescription.Application.Environment.push_back(std::pair(name, env)); } } // std::list PreExecutable; if (bool(xmlApplication["Prologue"]["Path"])) { if (parsed_jobdescription.Application.PreExecutable.empty()) { parsed_jobdescription.Application.PreExecutable.push_back(ExecutableType()); } parsed_jobdescription.Application.PreExecutable.front().Path = (std::string)xmlApplication["Prologue"]["Path"]; } if (bool(xmlApplication["Prologue"]["Argument"])) { if (parsed_jobdescription.Application.PreExecutable.empty()) { parsed_jobdescription.Application.PreExecutable.push_back(ExecutableType()); } for (int i = 0; (bool)(xmlApplication["Prologue"]["Argument"][i]); i++) { parsed_jobdescription.Application.PreExecutable.front().Argument.push_back((std::string)xmlApplication["Prologue"]["Argument"][i]); } } // std::list PostExecutable; if (bool(xmlApplication["Epilogue"]["Path"])) { if (parsed_jobdescription.Application.PostExecutable.empty()) { parsed_jobdescription.Application.PostExecutable.push_back(ExecutableType()); } parsed_jobdescription.Application.PostExecutable.front().Path = (std::string)xmlApplication["Epilogue"]["Path"]; } if (bool(xmlApplication["Epilogue"]["Argument"])) { if (parsed_jobdescription.Application.PostExecutable.empty()) { parsed_jobdescription.Application.PostExecutable.push_back(ExecutableType()); } for (int i = 0; (bool)(xmlApplication["Epilogue"]["Argument"][i]); i++) { parsed_jobdescription.Application.PostExecutable.front().Argument.push_back((std::string)xmlApplication["Epilogue"]["Argument"][i]); } } // std::string LogDir; if (bool(xmlApplication["LogDir"])) parsed_jobdescription.Application.LogDir = (std::string)xmlApplication["LogDir"]; // std::list RemoteLogging; for (int i = 0; (bool)(xmlApplication["RemoteLogging"][i]); i++) { URL url((std::string)xmlApplication["RemoteLogging"][i]); if (!url) { logger.msg(VERBOSE, "[ARCJSDLParser] RemoteLogging URL is wrongly formatted."); return false; } parsed_jobdescription.Application.RemoteLogging.push_back(RemoteLoggingType()); parsed_jobdescription.Application.RemoteLogging.back().ServiceType = "SGAS"; parsed_jobdescription.Application.RemoteLogging.back().Location = url; } // int Rerun; if (bool(xmlApplication["Rerun"])) parsed_jobdescription.Application.Rerun = stringtoi((std::string)xmlApplication["Rerun"]); // int Priority if (bool(xmlApplication["Priority"])) { parsed_jobdescription.Application.Priority = stringtoi((std::string)xmlApplication["Priority"]); if (parsed_jobdescription.Application.Priority > 100) { logger.msg(VERBOSE, "[ARCJSDLParser] priority is too large - using max value 100"); parsed_jobdescription.Application.Priority = 100; } } // Time ExpirationTime; if (bool(xmlApplication["ExpiryTime"])) parsed_jobdescription.Application.ExpirationTime = Time((std::string)xmlApplication["ExpiryTime"]); // Time ProcessingStartTime; if (bool(xmlApplication["ProcessingStartTime"])) parsed_jobdescription.Application.ProcessingStartTime = Time((std::string)xmlApplication["ProcessingStartTime"]); // XMLNode Notification; for (XMLNode n = xmlApplication["Notification"]; (bool)n; ++n) { // Accepting only supported notification types if(((bool)n["Type"]) && (n["Type"] != "Email")) continue; NotificationType notification; notification.Email = (std::string)n["Endpoint"]; for (int j = 0; bool(n["State"][j]); j++) { notification.States.push_back((std::string)n["State"][j]); } parsed_jobdescription.Application.Notification.push_back(notification); } // std::list CredentialService; for (int i = 0; (bool)(xmlApplication["CredentialService"][i]); i++) parsed_jobdescription.Application.CredentialService.push_back(URL((std::string)xmlApplication["CredentialService"][i])); // XMLNode AccessControl; if (bool(xmlApplication["AccessControl"])) xmlApplication["AccessControl"].Child().New(parsed_jobdescription.Application.AccessControl); if (bool(xmlApplication["DryRun"]) && lower((std::string)xmlApplication["DryRun"]) == "yes") { parsed_jobdescription.Application.DryRun = true; } // End of Application // Resources XMLNode resource = node["JobDescription"]["Resources"]; // SoftwareRequirement OperatingSystem; if (bool(resource["OperatingSystem"])) { if (!parseSoftware(resource["OperatingSystem"], parsed_jobdescription.Resources.OperatingSystem)) return false; if (!resource["OperatingSystem"]["Software"] && resource["OperatingSystem"]["OperatingSystemType"]["OperatingSystemName"] && resource["OperatingSystem"]["OperatingSystemVersion"]) parsed_jobdescription.Resources.OperatingSystem = Software((std::string)resource["OperatingSystem"]["OperatingSystemType"]["OperatingSystemName"], (std::string)resource["OperatingSystem"]["OperatingSystemVersion"]); } // std::string Platform; if (bool(resource["Platform"])) parsed_jobdescription.Resources.Platform = (std::string)resource["Platform"]; else if (bool(resource["CPUArchitecture"]["CPUArchitectureName"])) parsed_jobdescription.Resources.Platform = (std::string)resource["CPUArchitecture"]["CPUArchitectureName"]; // std::string NetworkInfo; if (bool(resource["NetworkInfo"])) parsed_jobdescription.Resources.NetworkInfo = (std::string)resource["NetworkInfo"]; else if (bool(resource["IndividualNetworkBandwidth"])) { Range bits_per_sec; if (!parseRange(resource["IndividualNetworkBandwidth"], bits_per_sec)) { return false; } const long long network = 1024 * 1024; if (bits_per_sec < 100 * network) parsed_jobdescription.Resources.NetworkInfo = "100megabitethernet"; else if (bits_per_sec < 1024 * network) parsed_jobdescription.Resources.NetworkInfo = "gigabitethernet"; else if (bits_per_sec < 10 * 1024 * network) parsed_jobdescription.Resources.NetworkInfo = "myrinet"; else parsed_jobdescription.Resources.NetworkInfo = "infiniband"; } // Range IndividualPhysicalMemory; // If the consolidated element exist parse it, else try to parse the POSIX one. if (bool(resource["IndividualPhysicalMemory"])) { Range value(-1); if (!parseRange(resource["IndividualPhysicalMemory"], value)) { return false; } parsed_jobdescription.Resources.IndividualPhysicalMemory = bytes_to_mb(value); } else if (bool(xmlXApplication["MemoryLimit"])) { long long jsdlMemoryLimit = -1; if (stringto((std::string)xmlXApplication["MemoryLimit"], jsdlMemoryLimit)) { parsed_jobdescription.Resources.IndividualPhysicalMemory.max = bytes_to_mb(jsdlMemoryLimit); } else { parsed_jobdescription.Resources.IndividualPhysicalMemory = Range(-1); } } // Range IndividualVirtualMemory; // If the consolidated element exist parse it, else try to parse the POSIX one. if (bool(resource["IndividualVirtualMemory"])) { Range value(-1); if (!parseRange(resource["IndividualVirtualMemory"], value)) { return false; } parsed_jobdescription.Resources.IndividualVirtualMemory = bytes_to_mb(value); } else if (bool(xmlXApplication["VirtualMemoryLimit"])) { long long int jsdlMemoryLimit = -1; if (stringto((std::string)xmlXApplication["VirtualMemoryLimit"], jsdlMemoryLimit)) { parsed_jobdescription.Resources.IndividualVirtualMemory.max = bytes_to_mb(jsdlMemoryLimit); } else { parsed_jobdescription.Resources.IndividualVirtualMemory = Range(-1); } } // Range IndividualCPUTime; if (bool(resource["IndividualCPUTime"]["Value"])) { if (!parseRange(resource["IndividualCPUTime"]["Value"], parsed_jobdescription.Resources.IndividualCPUTime.range)) { return false; } parseBenchmark(resource["IndividualCPUTime"], parsed_jobdescription.Resources.IndividualCPUTime.benchmark); } else if (bool(resource["IndividualCPUTime"])) { // JSDL compliance... if (!parseRange(resource["IndividualCPUTime"], parsed_jobdescription.Resources.IndividualCPUTime.range)) { return false; } } // Range TotalCPUTime; // If the consolidated element exist parse it, else try to parse the POSIX one. if (bool(resource["TotalCPUTime"]["Value"])) { if (!parseRange(resource["TotalCPUTime"]["Value"], parsed_jobdescription.Resources.TotalCPUTime.range)) { return false; } parseBenchmark(resource["TotalCPUTime"], parsed_jobdescription.Resources.TotalCPUTime.benchmark); } else if (bool(resource["TotalCPUTime"])) { // JSDL compliance... if (!parseRange(resource["TotalCPUTime"], parsed_jobdescription.Resources.TotalCPUTime.range)) { return false; } } else if (bool(xmlXApplication["CPUTimeLimit"])) { // POSIX compliance... if (!stringto((std::string)xmlXApplication["CPUTimeLimit"], parsed_jobdescription.Resources.TotalCPUTime.range.max)) parsed_jobdescription.Resources.TotalCPUTime.range = Range(-1); } // Range IndividualWallTime; if (bool(resource["IndividualWallTime"]["Value"])) { if (!parseRange(resource["IndividualWallTime"]["Value"], parsed_jobdescription.Resources.IndividualWallTime.range)) { return false; } parseBenchmark(resource["IndividualCPUTime"], parsed_jobdescription.Resources.IndividualWallTime.benchmark); } // Range TotalWallTime; // If the consolidated element exist parse it, else try to parse the POSIX one. if (bool(resource["TotalWallTime"]["Value"])) { if (!parseRange(resource["TotalWallTime"]["Value"], parsed_jobdescription.Resources.TotalWallTime.range)) { return false; } parseBenchmark(resource["TotalWallTime"], parsed_jobdescription.Resources.TotalWallTime.benchmark); } else if (bool(xmlXApplication["WallTimeLimit"])) { if (!stringto((std::string)xmlXApplication["WallTimeLimit"], parsed_jobdescription.Resources.TotalWallTime.range.max)) parsed_jobdescription.Resources.TotalWallTime.range = Range(-1); } // Range DiskSpace; // If the consolidated element exist parse it, else try to parse the JSDL one. if (bool(resource["DiskSpaceRequirement"]["DiskSpace"])) { Range diskspace(-1); if (!parseRange(resource["DiskSpaceRequirement"]["DiskSpace"], diskspace)) { return false; } if (diskspace.max > -1) { parsed_jobdescription.Resources.DiskSpaceRequirement.DiskSpace.max = bytes_to_mb(diskspace.max); } if (diskspace.min > -1) { parsed_jobdescription.Resources.DiskSpaceRequirement.DiskSpace.min = bytes_to_mb(diskspace.min); } } else if (bool(resource["FileSystem"]["DiskSpace"])) { Range diskspace = -1; if (!parseRange(resource["FileSystem"]["DiskSpace"], diskspace)) { return false; } if (diskspace.max > -1) { parsed_jobdescription.Resources.DiskSpaceRequirement.DiskSpace.max = bytes_to_mb(diskspace.max); } if (diskspace.min > -1) { parsed_jobdescription.Resources.DiskSpaceRequirement.DiskSpace.min = bytes_to_mb(diskspace.min); } } // int CacheDiskSpace; if (bool(resource["DiskSpaceRequirement"]["CacheDiskSpace"])) { long long int cachediskspace = -1; if (!stringto((std::string)resource["DiskSpaceRequirement"]["CacheDiskSpace"], cachediskspace)) { parsed_jobdescription.Resources.DiskSpaceRequirement.CacheDiskSpace = -1; } else if (cachediskspace > -1) { parsed_jobdescription.Resources.DiskSpaceRequirement.CacheDiskSpace = bytes_to_mb(cachediskspace); } } // int SessionDiskSpace; if (bool(resource["DiskSpaceRequirement"]["SessionDiskSpace"])) { long long int sessiondiskspace = -1; if (!stringto((std::string)resource["DiskSpaceRequirement"]["SessionDiskSpace"], sessiondiskspace)) { parsed_jobdescription.Resources.DiskSpaceRequirement.SessionDiskSpace = -1; } else if (sessiondiskspace > -1) { parsed_jobdescription.Resources.DiskSpaceRequirement.SessionDiskSpace = bytes_to_mb(sessiondiskspace); } } // Period SessionLifeTime; if (bool(resource["SessionLifeTime"])) parsed_jobdescription.Resources.SessionLifeTime = Period((std::string)resource["SessionLifeTime"]); // SoftwareRequirement CEType; if (bool(resource["CEType"])) { if (!parseSoftware(resource["CEType"], parsed_jobdescription.Resources.CEType)) { return false; } } // NodeAccessType NodeAccess; if (lower((std::string)resource["NodeAccess"]) == "inbound") parsed_jobdescription.Resources.NodeAccess = NAT_INBOUND; else if (lower((std::string)resource["NodeAccess"]) == "outbound") parsed_jobdescription.Resources.NodeAccess = NAT_OUTBOUND; else if (lower((std::string)resource["NodeAccess"]) == "inoutbound") parsed_jobdescription.Resources.NodeAccess = NAT_INOUTBOUND; // SlotRequirementType ExclusiveExecution if (bool(resource["ExclusiveExecution"])) { if (lower((std::string)resource["ExclusiveExecution"]) == "true") parsed_jobdescription.Resources.SlotRequirement.ExclusiveExecution = SlotRequirementType::EE_TRUE; else if (lower((std::string)resource["ExclusiveExecution"]) == "false") parsed_jobdescription.Resources.SlotRequirement.ExclusiveExecution = SlotRequirementType::EE_FALSE; else parsed_jobdescription.Resources.SlotRequirement.ExclusiveExecution = SlotRequirementType::EE_DEFAULT; } // ResourceSlotType Slots; if (bool(resource["SlotRequirement"]["NumberOfSlots"])) { if (!stringto(resource["SlotRequirement"]["NumberOfSlots"], parsed_jobdescription.Resources.SlotRequirement.NumberOfSlots)) { parsed_jobdescription.Resources.SlotRequirement.NumberOfSlots = -1; } } else if (bool(xmlXApplication["ProcessCountLimit"])) { if (!stringto((std::string)xmlXApplication["ProcessCountLimit"], parsed_jobdescription.Resources.SlotRequirement.NumberOfSlots)) { parsed_jobdescription.Resources.SlotRequirement.NumberOfSlots = -1; } } if (bool(resource["SlotRequirement"]["ThreadsPerProcesses"])) { if (!stringto(resource["SlotRequirement"]["ThreadsPerProcesses"], parsed_jobdescription.Resources.ParallelEnvironment.ThreadsPerProcess)) { parsed_jobdescription.Resources.ParallelEnvironment.ThreadsPerProcess = -1; } } else if (bool(xmlXApplication["ThreadCountLimit"])) { if (!stringto((std::string)xmlXApplication["ThreadCountLimit"], parsed_jobdescription.Resources.ParallelEnvironment.ThreadsPerProcess)) { parsed_jobdescription.Resources.ParallelEnvironment.ThreadsPerProcess = -1; } } if (bool(resource["SlotRequirement"]["ProcessPerHost"])) { if (!stringto(resource["SlotRequirement"]["ProcessPerHost"], parsed_jobdescription.Resources.SlotRequirement.SlotsPerHost)) { parsed_jobdescription.Resources.SlotRequirement.SlotsPerHost = -1; } } else if (bool(resource["TotalCPUCount"])) { Range cpuCount; if (!parseRange(resource["TotalCPUCount"], cpuCount)) { return false; } if (cpuCount.min > 0) { logger.msg(VERBOSE, "Lower bounded range is not supported for the 'TotalCPUCount' element."); return false; } parsed_jobdescription.Resources.SlotRequirement.SlotsPerHost = cpuCount.max; } // std::string SPMDVariation; //if (bool(resource["SlotRequirement"]["SPMDVariation"])) // parsed_jobdescription.Resources.SlotRequirement.SPMDVariation = (std::string)resource["Slots"]["SPMDVariation"]; // std::string QueueName; if (bool(resource["QueueName"]) || bool(resource["CandidateTarget"]["QueueName"]) // Be backwards compatible ) { XMLNode xmlQueue = (bool(resource["QueueName"]) ? resource["QueueName"] : resource["CandidateTarget"]["QueueName"]); std::string useQueue = (std::string)xmlQueue.Attribute("require"); if (!useQueue.empty() && useQueue != "eq" && useQueue != "=" && useQueue != "==" && useQueue != "ne" && useQueue != "!=") { logger.msg(ERROR, "Parsing the \"require\" attribute of the \"QueueName\" nordugrid-JSDL element failed. An invalid comparison operator was used, only \"ne\" or \"eq\" are allowed."); return false; } if (useQueue == "ne" || useQueue == "!=") { parsed_jobdescription.OtherAttributes["nordugrid:broker;reject_queue"] = (std::string)xmlQueue; } else { parsed_jobdescription.Resources.QueueName = (std::string)xmlQueue; } } // SoftwareRequirement RunTimeEnvironment; if (bool(resource["RunTimeEnvironment"])) { if (!parseSoftware(resource["RunTimeEnvironment"], parsed_jobdescription.Resources.RunTimeEnvironment)) { return false; } } // end of Resources // Datastaging XMLNode datastaging = node["JobDescription"]["DataStaging"]; for (int i = 0; datastaging[i]; i++) { XMLNode ds = datastaging[i]; XMLNode source = ds["Source"]; XMLNode source_uri = source["URI"]; XMLNode target = ds["Target"]; XMLNode target_uri = target["URI"]; XMLNode filenameNode = ds["FileName"]; if ((bool)filenameNode && (bool)source) { InputFileType file; file.Name = (std::string)filenameNode; if (bool(source_uri)) { URL source_url((std::string)source_uri); if (!source_url) { return false; } // add any URL options XMLNode option = source["URIOption"]; for (; (bool)option; ++option) { if (!source_url.AddOption((std::string)option, true)) { return false; } } // add URL Locations, which may have their own options XMLNode location = source["Location"]; for (; (bool)location; ++location) { XMLNode location_uri = location["URI"]; if (!location_uri) { logger.msg(ERROR, "No URI element found in Location for file %s", file.Name); return false; } URLLocation location_url((std::string)location_uri); if (!location_url || location_url.Protocol() == "file") { logger.msg(ERROR, "Location URI for file %s is invalid", file.Name); return false; } XMLNode loc_option = location["URIOption"]; for (; (bool)loc_option; ++loc_option) { if (!location_url.AddOption((std::string)loc_option, true)) { return false; } } source_url.AddLocation(location_url); } file.Sources.push_back(source_url); } else { file.Sources.push_back(URL(file.Name)); } if (ds["FileSize"]) { stringto((std::string)ds["FileSize"], file.FileSize); } if (ds["Checksum"]) { file.Checksum = (std::string)ds["Checksum"]; } file.IsExecutable = (ds["IsExecutable"] && lower(((std::string)ds["IsExecutable"])) == "true"); // DownloadToCache does not make sense for output files. if (ds["DownloadToCache"] && !file.Sources.empty()) { file.Sources.back().AddOption("cache", (std::string)ds["DownloadToCache"]); } parsed_jobdescription.DataStaging.InputFiles.push_back(file); } if ((bool)filenameNode && bool(target) && bool(target_uri)) { OutputFileType file; file.Name = (std::string)filenameNode; URL target_url((std::string)target_uri); if (!target_url) { return false; } // add any URL options XMLNode option = target["URIOption"]; for (; (bool)option; ++option) { if (!target_url.AddOption((std::string)option, true)) { return false; } } // add URL Locations, which may have their own options XMLNode location = target["Location"]; for (; (bool)location; ++location) { XMLNode location_uri = location["URI"]; if (!location_uri) { logger.msg(ERROR, "No URI element found in Location for file %s", file.Name); return false; } URLLocation location_url((std::string)location_uri); if (!location_url || location_url.Protocol() == "file") { logger.msg(ERROR, "Location URI for file %s is invalid", file.Name); return false; } XMLNode loc_option = location["URIOption"]; for (; (bool)loc_option; ++loc_option) { if (!location_url.AddOption((std::string)loc_option, true)) { return false; } } target_url.AddLocation(location_url); } file.Targets.push_back(target_url); parsed_jobdescription.DataStaging.OutputFiles.push_back(file); } bool deleteOnTermination = (ds["DeleteOnTermination"] && lower(((std::string)ds["DeleteOnTermination"])) == "false"); if ((bool)filenameNode && (deleteOnTermination || ((bool)target && !target_uri))) { // It is allowed by schema for uri not to be present. It is // probably safer to assume user wants that file. OutputFileType file; file.Name = (std::string)filenameNode; parsed_jobdescription.DataStaging.OutputFiles.push_back(file); } } // end of Datastaging SourceLanguage(parsed_jobdescription) = (!language.empty() ? language : supportedLanguages.front()); logger.msg(VERBOSE, "String successfully parsed as %s.", parsed_jobdescription.GetSourceLanguage()); jobdescs.push_back(parsed_jobdescription); return true; } JobDescriptionParserPluginResult ARCJSDLParser::Assemble(const JobDescription& job, std::string& product, const std::string& language, const std::string& dialect) const { if (!IsLanguageSupported(language)) { error = "Language is not supported"; return false; } if (job.Application.Executable.Path.empty()) { error = "The path of the application's executable is empty."; return false; } NS ns; ns[""] = JSDL_NAMESPACE; ns["posix-jsdl"] = JSDL_POSIX_NAMESPACE; ns["hpcpa-jsdl"] = JSDL_HPCPA_NAMESPACE; ns["arc-jsdl"] = JSDL_ARC_NAMESPACE; XMLNode jobdefinition(ns, "JobDefinition"); XMLNode jobdescription = jobdefinition.NewChild("JobDescription"); // JobIdentification // std::string JobName; XMLNode xmlIdentification(ns,"JobIdentification"); if (!job.Identification.JobName.empty()) xmlIdentification.NewChild("JobName") = job.Identification.JobName; // std::string Description; if (!job.Identification.Description.empty()) xmlIdentification.NewChild("Description") = job.Identification.Description; // JSDL compliance if (!job.OtherAttributes.empty()) { std::map::const_iterator jrIt = job.OtherAttributes.find("nordugrid:jsdl;Identification/JobProject"); if (jrIt != job.OtherAttributes.end()) { xmlIdentification.NewChild("JobProject") = jrIt->second; } } // std::list Annotation; for (std::list::const_iterator it = job.Identification.Annotation.begin(); it != job.Identification.Annotation.end(); it++) xmlIdentification.NewChild("arc-jsdl:UserTag") = *it; // std::list ActivityOldID; for (std::list::const_iterator it = job.Identification.ActivityOldID.begin(); it != job.Identification.ActivityOldID.end(); it++) xmlIdentification.NewChild("arc-jsdl:ActivityOldId") = *it; if (xmlIdentification.Size() > 0) jobdescription.NewChild(xmlIdentification); // end of JobIdentification // Application XMLNode xmlApplication(ns,"Application"); XMLNode xmlPApplication(NS("posix-jsdl", JSDL_POSIX_NAMESPACE), "posix-jsdl:POSIXApplication"); XMLNode xmlHApplication(NS("hpcpa-jsdl", JSDL_HPCPA_NAMESPACE), "hpcpa-jsdl:HPCProfileApplication"); // ExecutableType Executable; if (!job.Application.Executable.Path.empty()) { xmlPApplication.NewChild("posix-jsdl:Executable") = job.Application.Executable.Path; xmlHApplication.NewChild("hpcpa-jsdl:Executable") = job.Application.Executable.Path; for (std::list::const_iterator it = job.Application.Executable.Argument.begin(); it != job.Application.Executable.Argument.end(); it++) { xmlPApplication.NewChild("posix-jsdl:Argument") = *it; xmlHApplication.NewChild("hpcpa-jsdl:Argument") = *it; } } // std::string Input; if (!job.Application.Input.empty()) { xmlPApplication.NewChild("posix-jsdl:Input") = job.Application.Input; xmlHApplication.NewChild("hpcpa-jsdl:Input") = job.Application.Input; } // std::string Output; if (!job.Application.Output.empty()) { xmlPApplication.NewChild("posix-jsdl:Output") = job.Application.Output; xmlHApplication.NewChild("hpcpa-jsdl:Output") = job.Application.Output; } // std::string Error; if (!job.Application.Error.empty()) { xmlPApplication.NewChild("posix-jsdl:Error") = job.Application.Error; xmlHApplication.NewChild("hpcpa-jsdl:Error") = job.Application.Error; } // std::list< std::pair > Environment; for (std::list< std::pair >::const_iterator it = job.Application.Environment.begin(); it != job.Application.Environment.end(); it++) { XMLNode pEnvironment = xmlPApplication.NewChild("posix-jsdl:Environment"); XMLNode hEnvironment = xmlHApplication.NewChild("hpcpa-jsdl:Environment"); pEnvironment.NewAttribute("name") = it->first; pEnvironment = it->second; hEnvironment.NewAttribute("name") = it->first; hEnvironment = it->second; } // std::list PreExecutable; if (!job.Application.PreExecutable.empty() && !job.Application.PreExecutable.front().Path.empty()) { XMLNode prologue = xmlApplication.NewChild("arc-jsdl:Prologue"); prologue.NewChild("arc-jsdl:Path") = job.Application.PreExecutable.front().Path; for (std::list::const_iterator it = job.Application.PreExecutable.front().Argument.begin(); it != job.Application.PreExecutable.front().Argument.end(); ++it) { prologue.NewChild("arc-jsdl:Argument") = *it; } } // std::list PostExecutable; if (!job.Application.PostExecutable.empty() && !job.Application.PostExecutable.front().Path.empty()) { XMLNode epilogue = xmlApplication.NewChild("arc-jsdl:Epilogue"); epilogue.NewChild("arc-jsdl:Path") = job.Application.PostExecutable.front().Path; for (std::list::const_iterator it = job.Application.PostExecutable.front().Argument.begin(); it != job.Application.PostExecutable.front().Argument.end(); ++it) epilogue.NewChild("arc-jsdl:Argument") = *it; } // std::string LogDir; if (!job.Application.LogDir.empty()) xmlApplication.NewChild("arc-jsdl:LogDir") = job.Application.LogDir; // std::list RemoteLogging; for (std::list::const_iterator it = job.Application.RemoteLogging.begin(); it != job.Application.RemoteLogging.end(); it++) { if (it->ServiceType == "SGAS") { xmlApplication.NewChild("arc-jsdl:RemoteLogging") = it->Location.str(); } } // int Rerun; if (job.Application.Rerun > -1) xmlApplication.NewChild("arc-jsdl:Rerun") = tostring(job.Application.Rerun); // int Priority; if (job.Application.Priority > -1) xmlApplication.NewChild("arc-jsdl:Priority") = tostring(job.Application.Priority); // Time ExpirationTime; if (job.Application.ExpirationTime > -1) xmlApplication.NewChild("arc-jsdl:ExpiryTime") = job.Application.ExpirationTime.str(); // Time ProcessingStartTime; if (job.Application.ProcessingStartTime > -1) xmlApplication.NewChild("arc-jsdl:ProcessingStartTime") = job.Application.ProcessingStartTime.str(); // XMLNode Notification; for (std::list::const_iterator it = job.Application.Notification.begin(); it != job.Application.Notification.end(); it++) { XMLNode n = xmlApplication.NewChild("arc-jsdl:Notification"); n.NewChild("arc-jsdl:Type") = "Email"; n.NewChild("arc-jsdl:Endpoint") = it->Email; for (std::list::const_iterator s = it->States.begin(); s != it->States.end(); s++) { n.NewChild("arc-jsdl:State") = *s; } } // XMLNode AccessControl; if (bool(job.Application.AccessControl)) xmlApplication.NewChild("arc-jsdl:AccessControl").NewChild(job.Application.AccessControl); // std::list CredentialService; for (std::list::const_iterator it = job.Application.CredentialService.begin(); it != job.Application.CredentialService.end(); it++) xmlApplication.NewChild("arc-jsdl:CredentialService") = it->fullstr(); if (job.Application.DryRun) { xmlApplication.NewChild("arc-jsdl:DryRun") = "yes"; } // POSIX compliance... if (job.Resources.TotalWallTime.range.max != -1) xmlPApplication.NewChild("posix-jsdl:WallTimeLimit") = tostring(job.Resources.TotalWallTime.range.max); if (job.Resources.IndividualPhysicalMemory.max != -1) { xmlPApplication.NewChild("posix-jsdl:MemoryLimit") = tostring(mb_to_bytes(job.Resources.IndividualPhysicalMemory.max)); } if (job.Resources.TotalCPUTime.range.max != -1) xmlPApplication.NewChild("posix-jsdl:CPUTimeLimit") = tostring(job.Resources.TotalCPUTime.range.max); if (job.Resources.SlotRequirement.NumberOfSlots != -1) xmlPApplication.NewChild("posix-jsdl:ProcessCountLimit") = tostring(job.Resources.SlotRequirement.NumberOfSlots); if (job.Resources.IndividualVirtualMemory.max != -1) xmlPApplication.NewChild("posix-jsdl:VirtualMemoryLimit") = tostring(mb_to_bytes(job.Resources.IndividualVirtualMemory.max)); if (job.Resources.ParallelEnvironment.ThreadsPerProcess != -1) xmlPApplication.NewChild("posix-jsdl:ThreadCountLimit") = tostring(job.Resources.ParallelEnvironment.ThreadsPerProcess); if (xmlPApplication.Size() > 0) xmlApplication.NewChild(xmlPApplication); if (xmlHApplication.Size() > 0) xmlApplication.NewChild(xmlHApplication); if (xmlApplication.Size() > 0) jobdescription.NewChild(xmlApplication); // end of Application // Resources XMLNode xmlResources(ns,"Resources"); // SoftwareRequirement OperatingSystem if (!job.Resources.OperatingSystem.empty()) { XMLNode xmlOS = xmlResources.NewChild("OperatingSystem"); outputSoftware(job.Resources.OperatingSystem, xmlOS); // JSDL compliance. Only the first element in the OperatingSystem object is printed. xmlOS.NewChild("OperatingSystemType").NewChild("OperatingSystemName") = job.Resources.OperatingSystem.getSoftwareList().front().getName(); if (!job.Resources.OperatingSystem.getSoftwareList().front().getVersion().empty()) xmlOS.NewChild("OperatingSystemVersion") = job.Resources.OperatingSystem.getSoftwareList().front().getVersion(); } // std::string Platform; if (!job.Resources.Platform.empty()) { xmlResources.NewChild("arc-jsdl:Platform") = job.Resources.Platform; // JSDL compliance xmlResources.NewChild("CPUArchitecture").NewChild("CPUArchitectureName") = job.Resources.Platform; } // std::string NetworkInfo; if (!job.Resources.NetworkInfo.empty()) { xmlResources.NewChild("arc-jsdl:NetworkInfo") = job.Resources.NetworkInfo; std::string value = ""; if (job.Resources.NetworkInfo == "100megabitethernet") value = "104857600.0"; else if (job.Resources.NetworkInfo == "gigabitethernet") value = "1073741824.0"; else if (job.Resources.NetworkInfo == "myrinet") value = "2147483648.0"; else if (job.Resources.NetworkInfo == "infiniband") value = "10737418240.0"; if (value != "") xmlResources.NewChild("IndividualNetworkBandwidth").NewChild("LowerBoundedRange") = value; } // NodeAccessType NodeAccess; switch (job.Resources.NodeAccess) { case NAT_NONE: break; case NAT_INBOUND: xmlResources.NewChild("arc-jsdl:NodeAccess") = "inbound"; break; case NAT_OUTBOUND: xmlResources.NewChild("arc-jsdl:NodeAccess") = "outbound"; break; case NAT_INOUTBOUND: xmlResources.NewChild("arc-jsdl:NodeAccess") = "inoutbound"; break; } // Range IndividualPhysicalMemory; { XMLNode xmlIPM(ns,"IndividualPhysicalMemory"); outputARCJSDLRange(mb_to_bytes(job.Resources.IndividualPhysicalMemory), xmlIPM, (long long int)-1); // JSDL compliance... outputJSDLRange(mb_to_bytes(job.Resources.IndividualPhysicalMemory), xmlIPM, (long long int)-1); if (xmlIPM.Size() > 0) xmlResources.NewChild(xmlIPM); } // Range IndividualVirtualMemory; { XMLNode xmlIVM(ns,"IndividualVirtualMemory"); outputARCJSDLRange(mb_to_bytes(job.Resources.IndividualVirtualMemory), xmlIVM, (long long int)-1); outputJSDLRange(mb_to_bytes(job.Resources.IndividualVirtualMemory), xmlIVM, (long long int)-1); if (xmlIVM.Size() > 0) xmlResources.NewChild(xmlIVM); } { // Range DiskSpace; XMLNode xmlDiskSpace(ns,"arc-jsdl:DiskSpace"); XMLNode xmlFileSystem(ns,"DiskSpace"); // JDSL compliance... if (job.Resources.DiskSpaceRequirement.DiskSpace.max != -1 || job.Resources.DiskSpaceRequirement.DiskSpace.min != -1) { Range diskspace = mb_to_bytes(job.Resources.DiskSpaceRequirement.DiskSpace); outputARCJSDLRange(diskspace, xmlDiskSpace, (long long int)-1); // JSDL compliance... outputJSDLRange(diskspace, xmlFileSystem, (long long int)-1); } if (xmlDiskSpace.Size() > 0) { XMLNode dsr = xmlResources.NewChild("arc-jsdl:DiskSpaceRequirement"); dsr.NewChild(xmlDiskSpace); // int CacheDiskSpace; if (job.Resources.DiskSpaceRequirement.CacheDiskSpace > -1) { dsr.NewChild("arc-jsdl:CacheDiskSpace") = tostring(mb_to_bytes(job.Resources.DiskSpaceRequirement.CacheDiskSpace)); } // int SessionDiskSpace; if (job.Resources.DiskSpaceRequirement.SessionDiskSpace > -1) { dsr.NewChild("arc-jsdl:SessionDiskSpace") = tostring(mb_to_bytes(job.Resources.DiskSpaceRequirement.SessionDiskSpace)); } } // JSDL Compliance... if (xmlFileSystem.Size() > 0) { xmlResources.NewChild("FileSystem").NewChild(xmlFileSystem); } } // Period SessionLifeTime; if (job.Resources.SessionLifeTime > -1) xmlResources.NewChild("arc-jsdl:SessionLifeTime") = tostring(job.Resources.SessionLifeTime); // ScalableTime IndividualCPUTime; { XMLNode xmlICPUT(ns,"IndividualCPUTime"); XMLNode xmlValue = xmlICPUT.NewChild("arc-jsdl:Value"); outputARCJSDLRange(job.Resources.IndividualCPUTime.range, xmlValue, -1); if (xmlValue.Size() > 0) { outputBenchmark(job.Resources.IndividualCPUTime.benchmark, xmlICPUT); // JSDL compliance... outputJSDLRange(job.Resources.IndividualCPUTime.range, xmlICPUT, -1); xmlResources.NewChild(xmlICPUT); } } // ScalableTime TotalCPUTime; { XMLNode xmlTCPUT(ns,"TotalCPUTime"); XMLNode xmlValue = xmlTCPUT.NewChild("arc-jsdl:Value"); outputARCJSDLRange(job.Resources.TotalCPUTime.range, xmlValue, -1); if (xmlValue.Size() > 0) { outputBenchmark(job.Resources.TotalCPUTime.benchmark, xmlTCPUT); // JSDL compliance... outputJSDLRange(job.Resources.TotalCPUTime.range, xmlTCPUT, -1); xmlResources.NewChild(xmlTCPUT); } } // ScalableTime IndividualWallTime; { XMLNode xmlIWT(ns,"arc-jsdl:IndividualWallTime"); XMLNode xmlValue = xmlIWT.NewChild("arc-jsdl:Value"); outputARCJSDLRange(job.Resources.IndividualWallTime.range, xmlValue, -1); if (xmlValue.Size() > 0) { outputBenchmark(job.Resources.IndividualWallTime.benchmark, xmlIWT); xmlResources.NewChild(xmlIWT); } } // ScalableTime TotalWallTime; { XMLNode xmlTWT("arc-jsdl:TotalWallTime"); XMLNode xmlValue = xmlTWT.NewChild("arc-jsdl:Value"); outputARCJSDLRange(job.Resources.TotalWallTime.range, xmlValue, -1); if (xmlValue.Size() > 0) { outputBenchmark(job.Resources.TotalWallTime.benchmark, xmlTWT); xmlResources.NewChild(xmlTWT); } } // SoftwareRequirement CEType; if (!job.Resources.CEType.empty()) { XMLNode xmlCEType = xmlResources.NewChild("arc-jsdl:CEType"); outputSoftware(job.Resources.CEType, xmlCEType); } // ResourceSlotType Slots; { XMLNode xmlSlotRequirement(ns,"arc-jsdl:SlotRequirement"); // Range NumberOfSlots; if (job.Resources.SlotRequirement.NumberOfSlots > -1) { xmlSlotRequirement.NewChild("arc-jsdl:NumberOfSlots") = tostring(job.Resources.SlotRequirement.NumberOfSlots); } // int ProcessPerHost; if (job.Resources.SlotRequirement.SlotsPerHost > -1) { xmlSlotRequirement.NewChild("arc-jsdl:ProcessPerHost") = tostring(job.Resources.SlotRequirement.SlotsPerHost); xmlResources.NewChild("TotalCPUCount") = tostring(job.Resources.SlotRequirement.SlotsPerHost); } // int ThreadsPerProcess; if (job.Resources.ParallelEnvironment.ThreadsPerProcess > -1) { xmlSlotRequirement.NewChild("arc-jsdl:ThreadsPerProcesses") = tostring(job.Resources.ParallelEnvironment.ThreadsPerProcess); } if (!job.Resources.ParallelEnvironment.Type.empty()) { xmlSlotRequirement.NewChild("arc-jsdl:SPMDVariation") = job.Resources.ParallelEnvironment.Type; } if (job.Resources.SlotRequirement.ExclusiveExecution != SlotRequirementType::EE_DEFAULT) { if (job.Resources.SlotRequirement.ExclusiveExecution == SlotRequirementType::EE_TRUE) { xmlSlotRequirement.NewChild("arc-jsdl:ExclusiveExecution") = "true"; xmlResources.NewChild("ExclusiveExecution") = "true"; // JSDL } else if (job.Resources.SlotRequirement.ExclusiveExecution == SlotRequirementType::EE_FALSE) { xmlSlotRequirement.NewChild("arc-jsdl:ExclusiveExecution") = "false"; xmlResources.NewChild("ExclusiveExecution") = "false"; } } if (xmlSlotRequirement.Size() > 0) { xmlResources.NewChild(xmlSlotRequirement); } } // std::string QueueName; if (!job.Resources.QueueName.empty()) {; //logger.msg(INFO, "job.Resources.QueueName = %s", job.Resources.QueueName); xmlResources.NewChild("arc-jsdl:QueueName") = job.Resources.QueueName; // Be backwards compatible with NOX versions of A-REX. xmlResources.NewChild("arc-jsdl:CandidateTarget").NewChild("arc-jsdl:QueueName") = job.Resources.QueueName; } // SoftwareRequirement RunTimeEnvironment; if (!job.Resources.RunTimeEnvironment.empty()) { XMLNode xmlRTE = xmlResources.NewChild("arc-jsdl:RunTimeEnvironment"); outputSoftware(job.Resources.RunTimeEnvironment, xmlRTE); } if (xmlResources.Size() > 0) jobdescription.NewChild(xmlResources); // end of Resources // DataStaging for (std::list::const_iterator it = job.DataStaging.InputFiles.begin(); it != job.DataStaging.InputFiles.end(); ++it) { if (it->Name.empty()) { error = "The name of the input file is empty."; return false; } XMLNode datastaging = jobdescription.NewChild("DataStaging"); datastaging.NewChild("FileName") = it->Name; if (!it->Sources.empty() && it->Sources.front()) { if (it->Sources.front().Protocol() == "file") { datastaging.NewChild("Source"); } else { XMLNode xs = datastaging.NewChild("Source"); xs.NewChild("URI") = it->Sources.front().str(); // add any URL options for (std::multimap::const_iterator itOpt = it->Sources.front().Options().begin(); itOpt != it->Sources.front().Options().end(); ++itOpt) { xs.NewChild("arc-jsdl:URIOption") = itOpt->first + "=" + itOpt->second; } // add URL Locations, which may have their own options for (std::list::const_iterator itLoc = it->Sources.front().Locations().begin(); itLoc != it->Sources.front().Locations().end(); ++itLoc) { XMLNode xloc = xs.NewChild("arc-jsdl:Location"); xloc.NewChild("URI") = itLoc->str(); for (std::multimap::const_iterator itOpt = itLoc->Options().begin(); itOpt != itLoc->Options().end(); ++itOpt) { xloc.NewChild("arc-jsdl:URIOption") = itOpt->first + "=" + itOpt->second; } } } } if (it->IsExecutable) { datastaging.NewChild("arc-jsdl:IsExecutable") = "true"; } if (it->FileSize > -1) { datastaging.NewChild("arc-jsdl:FileSize") = tostring(it->FileSize); } if (!it->Checksum.empty()) { datastaging.NewChild("arc-jsdl:Checksum") = it->Checksum; } } for (std::list::const_iterator it = job.DataStaging.OutputFiles.begin(); it != job.DataStaging.OutputFiles.end(); ++it) { if (it->Name.empty()) { error = "The name of the output file is empty."; return false; } XMLNode datastaging = jobdescription.NewChild("DataStaging"); datastaging.NewChild("FileName") = it->Name; if (!it->Targets.empty() && it->Targets.front()) { XMLNode xs = datastaging.NewChild("Target"); xs.NewChild("URI") = it->Targets.front().str(); // add any URL options for (std::multimap::const_iterator itOpt = it->Targets.front().Options().begin(); itOpt != it->Targets.front().Options().end(); ++itOpt) { xs.NewChild("arc-jsdl:URIOption") = itOpt->first + "=" + itOpt->second; } // add URL Locations, which may have their own options for (std::list::const_iterator itLoc = it->Targets.front().Locations().begin(); itLoc != it->Targets.front().Locations().end(); ++itLoc) { XMLNode xloc = xs.NewChild("arc-jsdl:Location"); xloc.NewChild("URI") = itLoc->str(); for (std::multimap::const_iterator itOpt = itLoc->Options().begin(); itOpt != itLoc->Options().end(); ++itOpt) { xloc.NewChild("arc-jsdl:URIOption") = itOpt->first + "=" + itOpt->second; } } } else { datastaging.NewChild("DeleteOnTermination") = "false"; } } // End of DataStaging jobdefinition.GetDoc(product, true); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/test0000644000000000000000000000013213214316024024242 xustar000000000000000030 mtime=1513200660.426752705 30 atime=1513200668.721854157 30 ctime=1513200660.426752705 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/0000755000175000002070000000000013214316024024365 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/PaxHeaders.7502/ADLParserTest.cpp0000644000000000000000000000012412675602216027454 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.258698 30 ctime=1513200660.422752656 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/ADLParserTest.cpp0000644000175000002070000012141212675602216027522 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "../ADLParser.h" class ADLParserTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ADLParserTest); CPPUNIT_TEST(JobNameTest); CPPUNIT_TEST(DescriptionTest); CPPUNIT_TEST(TypeTest); CPPUNIT_TEST(AnnotationTest); CPPUNIT_TEST(ActivityOldIDTest); CPPUNIT_TEST(ExecutableTest); CPPUNIT_TEST(InputTest); CPPUNIT_TEST(OutputTest); CPPUNIT_TEST(ErrorTest); CPPUNIT_TEST(PreExecutableTest); CPPUNIT_TEST(PostExecutableTest); CPPUNIT_TEST(LoggingDirectoryTest); CPPUNIT_TEST(RemoteLoggingTest); CPPUNIT_TEST(TestInputFileClientStageable); CPPUNIT_TEST(TestInputFileServiceStageable); CPPUNIT_TEST(TestOutputFileClientStageable); CPPUNIT_TEST(TestOutputFileServiceStageable); CPPUNIT_TEST(TestOutputFileLocationsServiceStageable); CPPUNIT_TEST_SUITE_END(); public: ADLParserTest():PARSER((Arc::PluginArgument*)NULL) {} void setUp(); void tearDown(); void JobNameTest(); void DescriptionTest(); void TypeTest(); void AnnotationTest(); void ActivityOldIDTest(); void ExecutableTest(); void InputTest(); void OutputTest(); void ErrorTest(); void PreExecutableTest(); void PostExecutableTest(); void LoggingDirectoryTest(); void RemoteLoggingTest(); void TestInputFileClientStageable(); void TestInputFileServiceStageable(); void TestOutputFileClientStageable(); void TestOutputFileServiceStageable(); void TestOutputFileLocationsServiceStageable(); private: Arc::JobDescription INJOB; std::list OUTJOBS; Arc::ADLParser PARSER; }; std::ostream& operator<<(std::ostream& os, const std::list& strings) { for (std::list::const_iterator it = strings.begin(); it != strings.end(); it++) { if (it != strings.begin()) { os << ", "; } os << "\"" << *it << "\""; } return os; } void ADLParserTest::setUp() { } void ADLParserTest::tearDown() { } void ADLParserTest::JobNameTest() { const std::string adl = "" "" "" "EMI-ADL-minimal" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"EMI-ADL-minimal", OUTJOBS.front().Identification.JobName); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"EMI-ADL-minimal", OUTJOBS.front().Identification.JobName); } void ADLParserTest::DescriptionTest() { const std::string adl = "" "" "" "This job description provides a full example of EMI ADL" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"This job description provides a full example of EMI ADL", OUTJOBS.front().Identification.Description); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"This job description provides a full example of EMI ADL", OUTJOBS.front().Identification.Description); } void ADLParserTest::TypeTest() { // The job type is parsed into a std::string object, so it is not necessary to test the other values in the Type enumeration. const std::string adl = "" "" "" "single" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"single", OUTJOBS.front().Identification.Type); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"single", OUTJOBS.front().Identification.Type); } void ADLParserTest::AnnotationTest() { const std::string adl = "" "" "" "Full example" "EMI ADL v. 1.04" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Identification.Annotation.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Full example", OUTJOBS.front().Identification.Annotation.front()); CPPUNIT_ASSERT_EQUAL((std::string)"EMI ADL v. 1.04", OUTJOBS.front().Identification.Annotation.back()); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Identification.Annotation.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Full example", OUTJOBS.front().Identification.Annotation.front()); CPPUNIT_ASSERT_EQUAL((std::string)"EMI ADL v. 1.04", OUTJOBS.front().Identification.Annotation.back()); } void ADLParserTest::ActivityOldIDTest() { const std::string adl = "" "" "" "https://eu-emi.eu/emies/123456789first" "https://eu-emi.eu/emies/123456789second" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Identification.ActivityOldID.size()); CPPUNIT_ASSERT_EQUAL((std::string)"https://eu-emi.eu/emies/123456789first", OUTJOBS.front().Identification.ActivityOldID.front()); CPPUNIT_ASSERT_EQUAL((std::string)"https://eu-emi.eu/emies/123456789second", OUTJOBS.front().Identification.ActivityOldID.back()); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Identification.ActivityOldID.size()); CPPUNIT_ASSERT_EQUAL((std::string)"https://eu-emi.eu/emies/123456789first", OUTJOBS.front().Identification.ActivityOldID.front()); CPPUNIT_ASSERT_EQUAL((std::string)"https://eu-emi.eu/emies/123456789second", OUTJOBS.front().Identification.ActivityOldID.back()); } void ADLParserTest::ExecutableTest() { { const std::string adl = "" "" "" "" "my-executable" "Hello" "World" "104" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"my-executable", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL((std::string)"my-executable", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.Executable.Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Hello", OUTJOBS.front().Application.Executable.Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"World", OUTJOBS.front().Application.Executable.Argument.back()); CPPUNIT_ASSERT(OUTJOBS.front().Application.Executable.SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(104, OUTJOBS.front().Application.Executable.SuccessExitCode.second); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"my-executable", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL((std::string)"my-executable", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.Executable.Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Hello", OUTJOBS.front().Application.Executable.Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"World", OUTJOBS.front().Application.Executable.Argument.back()); CPPUNIT_ASSERT(OUTJOBS.front().Application.Executable.SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(104, OUTJOBS.front().Application.Executable.SuccessExitCode.second); } OUTJOBS.clear(); // Check if first member of SuccessExitCode is set to false. { const std::string adl = "" "" "" "" "my-executable" "Hello" "World" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(!OUTJOBS.front().Application.Executable.SuccessExitCode.first); } } void ADLParserTest::InputTest() { const std::string adl = "" "" "" "standard-emi-input" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"standard-emi-input", OUTJOBS.front().Application.Input); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"standard-emi-input", OUTJOBS.front().Application.Input); } void ADLParserTest::OutputTest() { const std::string adl = "" "" "" "standard-emi-output" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"standard-emi-output", OUTJOBS.front().Application.Output); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"standard-emi-output", OUTJOBS.front().Application.Output); } void ADLParserTest::ErrorTest() { const std::string adl = "" "" "" "standard-emi-error" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"standard-emi-error", OUTJOBS.front().Application.Error); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"standard-emi-error", OUTJOBS.front().Application.Error); } void ADLParserTest::PreExecutableTest() { const std::string adl = "" "" "" "" "my-first-pre-executable" "123456789" "xyz" "0" "" "" "foo/my-second-pre-executable" "1357924680" "abc" "104" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PreExecutable.size()); CPPUNIT_ASSERT_EQUAL((std::string)"my-first-pre-executable", OUTJOBS.front().Application.PreExecutable.front().Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PreExecutable.front().Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"123456789", OUTJOBS.front().Application.PreExecutable.front().Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"xyz", OUTJOBS.front().Application.PreExecutable.front().Argument.back()); CPPUNIT_ASSERT( OUTJOBS.front().Application.PreExecutable.front().SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(0, OUTJOBS.front().Application.PreExecutable.front().SuccessExitCode.second); CPPUNIT_ASSERT_EQUAL((std::string)"foo/my-second-pre-executable", OUTJOBS.front().Application.PreExecutable.back().Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PreExecutable.back().Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"1357924680", OUTJOBS.front().Application.PreExecutable.back().Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"abc", OUTJOBS.front().Application.PreExecutable.back().Argument.back()); CPPUNIT_ASSERT( OUTJOBS.front().Application.PreExecutable.back().SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(104, OUTJOBS.front().Application.PreExecutable.back().SuccessExitCode.second); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PreExecutable.size()); CPPUNIT_ASSERT_EQUAL((std::string)"my-first-pre-executable", OUTJOBS.front().Application.PreExecutable.front().Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PreExecutable.front().Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"123456789", OUTJOBS.front().Application.PreExecutable.front().Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"xyz", OUTJOBS.front().Application.PreExecutable.front().Argument.back()); CPPUNIT_ASSERT( OUTJOBS.front().Application.PreExecutable.front().SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(0, OUTJOBS.front().Application.PreExecutable.front().SuccessExitCode.second); CPPUNIT_ASSERT_EQUAL((std::string)"foo/my-second-pre-executable", OUTJOBS.front().Application.PreExecutable.back().Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PreExecutable.back().Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"1357924680", OUTJOBS.front().Application.PreExecutable.back().Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"abc", OUTJOBS.front().Application.PreExecutable.back().Argument.back()); CPPUNIT_ASSERT( OUTJOBS.front().Application.PreExecutable.back().SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(104, OUTJOBS.front().Application.PreExecutable.back().SuccessExitCode.second); } void ADLParserTest::PostExecutableTest() { const std::string adl = "" "" "" "" "my-first-post-executable" "987654321" "zyx" "-1" "" "" "foo/my-second-post-executable" "0864297531" "cba" "401" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PostExecutable.size()); CPPUNIT_ASSERT_EQUAL((std::string)"my-first-post-executable", OUTJOBS.front().Application.PostExecutable.front().Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PostExecutable.front().Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"987654321", OUTJOBS.front().Application.PostExecutable.front().Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"zyx", OUTJOBS.front().Application.PostExecutable.front().Argument.back()); CPPUNIT_ASSERT( OUTJOBS.front().Application.PostExecutable.front().SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Application.PostExecutable.front().SuccessExitCode.second); CPPUNIT_ASSERT_EQUAL((std::string)"foo/my-second-post-executable", OUTJOBS.front().Application.PostExecutable.back().Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PostExecutable.back().Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"0864297531", OUTJOBS.front().Application.PostExecutable.back().Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"cba", OUTJOBS.front().Application.PostExecutable.back().Argument.back()); CPPUNIT_ASSERT( OUTJOBS.front().Application.PostExecutable.back().SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(401, OUTJOBS.front().Application.PostExecutable.back().SuccessExitCode.second); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PostExecutable.size()); CPPUNIT_ASSERT_EQUAL((std::string)"my-first-post-executable", OUTJOBS.front().Application.PostExecutable.front().Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PostExecutable.front().Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"987654321", OUTJOBS.front().Application.PostExecutable.front().Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"zyx", OUTJOBS.front().Application.PostExecutable.front().Argument.back()); CPPUNIT_ASSERT( OUTJOBS.front().Application.PostExecutable.front().SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Application.PostExecutable.front().SuccessExitCode.second); CPPUNIT_ASSERT_EQUAL((std::string)"foo/my-second-post-executable", OUTJOBS.front().Application.PostExecutable.back().Path); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().Application.PostExecutable.back().Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"0864297531", OUTJOBS.front().Application.PostExecutable.back().Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"cba", OUTJOBS.front().Application.PostExecutable.back().Argument.back()); CPPUNIT_ASSERT( OUTJOBS.front().Application.PostExecutable.back().SuccessExitCode.first); CPPUNIT_ASSERT_EQUAL(401, OUTJOBS.front().Application.PostExecutable.back().SuccessExitCode.second); } void ADLParserTest::LoggingDirectoryTest() { const std::string adl = "" "" "" "job-log" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"job-log", OUTJOBS.front().Application.LogDir); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"job-log", OUTJOBS.front().Application.LogDir); } void ADLParserTest::RemoteLoggingTest() { const std::string adl = "" "" "" "" "SGAS" "https://sgas.eu-emi.eu/" "" "" "APEL" "https://apel.eu-emi.eu/" "" "" "FOO" "https://foo.eu-emi.eu/" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(3, (int)OUTJOBS.front().Application.RemoteLogging.size()); std::list::const_iterator itRLT = OUTJOBS.front().Application.RemoteLogging.begin(); CPPUNIT_ASSERT_EQUAL((std::string)"SGAS", itRLT->ServiceType); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://sgas.eu-emi.eu/"), itRLT->Location); CPPUNIT_ASSERT(!itRLT->optional); CPPUNIT_ASSERT(OUTJOBS.front().Application.RemoteLogging.end() != ++itRLT); CPPUNIT_ASSERT_EQUAL((std::string)"APEL", itRLT->ServiceType); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://apel.eu-emi.eu/"), itRLT->Location); CPPUNIT_ASSERT(itRLT->optional); CPPUNIT_ASSERT(OUTJOBS.front().Application.RemoteLogging.end() != ++itRLT); CPPUNIT_ASSERT_EQUAL((std::string)"FOO", itRLT->ServiceType); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://foo.eu-emi.eu/"), itRLT->Location); CPPUNIT_ASSERT(!itRLT->optional); CPPUNIT_ASSERT(OUTJOBS.front().Application.RemoteLogging.end() == ++itRLT); std::string parsed_adl; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), parsed_adl, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(parsed_adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(3, (int)OUTJOBS.front().Application.RemoteLogging.size()); itRLT = OUTJOBS.front().Application.RemoteLogging.begin(); CPPUNIT_ASSERT_EQUAL((std::string)"SGAS", itRLT->ServiceType); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://sgas.eu-emi.eu/"), itRLT->Location); CPPUNIT_ASSERT(!itRLT->optional); CPPUNIT_ASSERT(OUTJOBS.front().Application.RemoteLogging.end() != ++itRLT); CPPUNIT_ASSERT_EQUAL((std::string)"APEL", itRLT->ServiceType); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://apel.eu-emi.eu/"), itRLT->Location); CPPUNIT_ASSERT(itRLT->optional); CPPUNIT_ASSERT(OUTJOBS.front().Application.RemoteLogging.end() != ++itRLT); CPPUNIT_ASSERT_EQUAL((std::string)"FOO", itRLT->ServiceType); CPPUNIT_ASSERT_EQUAL(Arc::URL("https://foo.eu-emi.eu/"), itRLT->Location); CPPUNIT_ASSERT(!itRLT->optional); CPPUNIT_ASSERT(OUTJOBS.front().Application.RemoteLogging.end() == ++itRLT); } /** Client stageable input file */ void ADLParserTest::TestInputFileClientStageable() { { const std::string adl = "" "" "" "" "my-executable" "" "" "" "" "TestInputFileClientStageable" "" "" "executable" "true" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ifiles = OUTJOBS.front().DataStaging.InputFiles; CPPUNIT_ASSERT_EQUAL(2, (int)ifiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestInputFileClientStageable", ifiles.front().Name); CPPUNIT_ASSERT(!ifiles.front().IsExecutable); CPPUNIT_ASSERT_EQUAL(1, (int)ifiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"executable", ifiles.back().Name); CPPUNIT_ASSERT(ifiles.back().IsExecutable); CPPUNIT_ASSERT_EQUAL(1, (int)ifiles.back().Sources.size()); } { std::string tempjobdesc; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ifiles = OUTJOBS.front().DataStaging.InputFiles; CPPUNIT_ASSERT_EQUAL(2, (int)ifiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestInputFileClientStageable", ifiles.front().Name); CPPUNIT_ASSERT(!ifiles.front().IsExecutable); CPPUNIT_ASSERT_EQUAL(1, (int)ifiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"executable", ifiles.back().Name); CPPUNIT_ASSERT(ifiles.back().IsExecutable); CPPUNIT_ASSERT_EQUAL(1, (int)ifiles.back().Sources.size()); } } /** Service stageable input file */ void ADLParserTest::TestInputFileServiceStageable() { { const std::string adl = "" "" "" "" "my-executable" "" "" "" "" "TestInputFileServiceStageable" "" "https://se.eu-emi.eu/1234567890/abcdefghij/TestInputFileServiceStageable" "0a9b8c7d6e5f4g3h2i1j" "" "" "https://se-alt.eu-emi.eu/0987654321/klmnopqrst/TestInputFileServiceStageable" "1t2s3r4q5p6o7n8m9l0k" "" "" "" "executable" "" "gsiftp://gsi-se.eu-emi.eu/5647382910/xyzuvwrstq/executable" "j1i2h3g4f5e6d7c8b9a0" "" "" "gsiftp://gsi-se-alt.eu-emi.eu/0192837465/qtsrwvuzyx/executable" "0a9b8c7d6e5f4g3h2i1j" "" "true" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ifiles = OUTJOBS.front().DataStaging.InputFiles; CPPUNIT_ASSERT_EQUAL(2, (int)ifiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestInputFileServiceStageable", ifiles.front().Name); CPPUNIT_ASSERT(!ifiles.front().IsExecutable); CPPUNIT_ASSERT_EQUAL(2, (int)ifiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL(Arc::SourceType("https://se.eu-emi.eu/1234567890/abcdefghij/TestInputFileServiceStageable"), ifiles.front().Sources.front()); CPPUNIT_ASSERT_EQUAL((std::string)"0a9b8c7d6e5f4g3h2i1j", ifiles.front().Sources.front().DelegationID); CPPUNIT_ASSERT_EQUAL(Arc::SourceType("https://se-alt.eu-emi.eu/0987654321/klmnopqrst/TestInputFileServiceStageable"), ifiles.front().Sources.back()); CPPUNIT_ASSERT_EQUAL((std::string)"1t2s3r4q5p6o7n8m9l0k", ifiles.front().Sources.back().DelegationID); CPPUNIT_ASSERT_EQUAL((std::string)"executable", ifiles.back().Name); CPPUNIT_ASSERT(ifiles.back().IsExecutable); CPPUNIT_ASSERT_EQUAL(2, (int)ifiles.back().Sources.size()); CPPUNIT_ASSERT_EQUAL(Arc::SourceType("gsiftp://gsi-se.eu-emi.eu/5647382910/xyzuvwrstq/executable"), ifiles.back().Sources.front()); CPPUNIT_ASSERT_EQUAL((std::string)"j1i2h3g4f5e6d7c8b9a0", ifiles.back().Sources.front().DelegationID); CPPUNIT_ASSERT_EQUAL(Arc::SourceType("gsiftp://gsi-se-alt.eu-emi.eu/0192837465/qtsrwvuzyx/executable"), ifiles.back().Sources.back()); CPPUNIT_ASSERT_EQUAL((std::string)"0a9b8c7d6e5f4g3h2i1j", ifiles.back().Sources.back().DelegationID); } { std::string tempjobdesc; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ifiles = OUTJOBS.front().DataStaging.InputFiles; CPPUNIT_ASSERT_EQUAL(2, (int)ifiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestInputFileServiceStageable", ifiles.front().Name); CPPUNIT_ASSERT(!ifiles.front().IsExecutable); CPPUNIT_ASSERT_EQUAL(2, (int)ifiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL(Arc::SourceType("https://se.eu-emi.eu/1234567890/abcdefghij/TestInputFileServiceStageable"), ifiles.front().Sources.front()); CPPUNIT_ASSERT_EQUAL((std::string)"0a9b8c7d6e5f4g3h2i1j", ifiles.front().Sources.front().DelegationID); CPPUNIT_ASSERT_EQUAL(Arc::SourceType("https://se-alt.eu-emi.eu/0987654321/klmnopqrst/TestInputFileServiceStageable"), ifiles.front().Sources.back()); CPPUNIT_ASSERT_EQUAL((std::string)"1t2s3r4q5p6o7n8m9l0k", ifiles.front().Sources.back().DelegationID); CPPUNIT_ASSERT_EQUAL((std::string)"executable", ifiles.back().Name); CPPUNIT_ASSERT(ifiles.back().IsExecutable); CPPUNIT_ASSERT_EQUAL(2, (int)ifiles.back().Sources.size()); CPPUNIT_ASSERT_EQUAL(Arc::SourceType("gsiftp://gsi-se.eu-emi.eu/5647382910/xyzuvwrstq/executable"), ifiles.back().Sources.front()); CPPUNIT_ASSERT_EQUAL((std::string)"j1i2h3g4f5e6d7c8b9a0", ifiles.back().Sources.front().DelegationID); CPPUNIT_ASSERT_EQUAL(Arc::SourceType("gsiftp://gsi-se-alt.eu-emi.eu/0192837465/qtsrwvuzyx/executable"), ifiles.back().Sources.back()); CPPUNIT_ASSERT_EQUAL((std::string)"0a9b8c7d6e5f4g3h2i1j", ifiles.back().Sources.back().DelegationID); } } /** Client stageable output file */ void ADLParserTest::TestOutputFileClientStageable() { { const std::string adl = "" "" "" "" "my-executable" "" "" "" "" "TestOutputFileClientStageable" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ofiles = OUTJOBS.front().DataStaging.OutputFiles; CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestOutputFileClientStageable", ofiles.front().Name); CPPUNIT_ASSERT(ofiles.front().Targets.empty()); } { std::string tempjobdesc; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ofiles = OUTJOBS.front().DataStaging.OutputFiles; CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestOutputFileClientStageable", ofiles.front().Name); CPPUNIT_ASSERT(ofiles.front().Targets.empty()); } } /** Service stageable output file */ void ADLParserTest::TestOutputFileServiceStageable() { { const std::string adl = "" "" "" "" "my-executable" "" "" "" "" "TestOutputFileServiceStageable" "" "https://se.eu-emi.eu/1234567890/abcdefghij/TestInputFileServiceStageable" "0a9b8c7d6e5f4g3h2i1j" "" "" "https://se-alt.eu-emi.eu/0987654321/klmnopqrst/TestInputFileServiceStageable" "1t2s3r4q5p6o7n8m9l0k" "true" "true" "false" "" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ofiles = OUTJOBS.front().DataStaging.OutputFiles; CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestOutputFileServiceStageable", ofiles.front().Name); CPPUNIT_ASSERT_EQUAL(2, (int)ofiles.front().Targets.size()); CPPUNIT_ASSERT_EQUAL(Arc::TargetType("https://se.eu-emi.eu/1234567890/abcdefghij/TestInputFileServiceStageable"), ofiles.front().Targets.front()); CPPUNIT_ASSERT_EQUAL((std::string)"0a9b8c7d6e5f4g3h2i1j", ofiles.front().Targets.front().DelegationID); CPPUNIT_ASSERT(!ofiles.front().Targets.front().UseIfFailure); CPPUNIT_ASSERT(!ofiles.front().Targets.front().UseIfCancel); CPPUNIT_ASSERT(ofiles.front().Targets.front().UseIfSuccess); CPPUNIT_ASSERT_EQUAL(Arc::TargetType("https://se-alt.eu-emi.eu/0987654321/klmnopqrst/TestInputFileServiceStageable"), ofiles.front().Targets.back()); CPPUNIT_ASSERT_EQUAL((std::string)"1t2s3r4q5p6o7n8m9l0k", ofiles.front().Targets.back().DelegationID); CPPUNIT_ASSERT(ofiles.front().Targets.back().UseIfFailure); CPPUNIT_ASSERT(ofiles.front().Targets.back().UseIfCancel); CPPUNIT_ASSERT(!ofiles.front().Targets.back().UseIfSuccess); } { std::string tempjobdesc; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ofiles = OUTJOBS.front().DataStaging.OutputFiles; CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestOutputFileServiceStageable", ofiles.front().Name); CPPUNIT_ASSERT_EQUAL(2, (int)ofiles.front().Targets.size()); CPPUNIT_ASSERT_EQUAL(Arc::TargetType("https://se.eu-emi.eu/1234567890/abcdefghij/TestInputFileServiceStageable"), ofiles.front().Targets.front()); CPPUNIT_ASSERT_EQUAL((std::string)"0a9b8c7d6e5f4g3h2i1j", ofiles.front().Targets.front().DelegationID); CPPUNIT_ASSERT(!ofiles.front().Targets.front().UseIfFailure); CPPUNIT_ASSERT(!ofiles.front().Targets.front().UseIfCancel); CPPUNIT_ASSERT(ofiles.front().Targets.front().UseIfSuccess); CPPUNIT_ASSERT_EQUAL(Arc::TargetType("https://se-alt.eu-emi.eu/0987654321/klmnopqrst/TestInputFileServiceStageable"), ofiles.front().Targets.back()); CPPUNIT_ASSERT_EQUAL((std::string)"1t2s3r4q5p6o7n8m9l0k", ofiles.front().Targets.back().DelegationID); CPPUNIT_ASSERT(ofiles.front().Targets.back().UseIfFailure); CPPUNIT_ASSERT(ofiles.front().Targets.back().UseIfCancel); CPPUNIT_ASSERT(!ofiles.front().Targets.back().UseIfSuccess); } } /** Service stageable output file with locations and options */ void ADLParserTest::TestOutputFileLocationsServiceStageable() { { const std::string adl = "" "" "" "" "my-executable" "" "" "" "" "TestOutputFileServiceStageable" "" "lfc://lfc.eu-emi.eu:5010/1234567890/abcdefghij/TestInputFileServiceStageable" "" "location" "https://se.eu-emi.eu:443/0987654321/klmnopqrst/TestInputFileServiceStageable" "" "" "overwrite" "yes" "" "" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(adl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ofiles = OUTJOBS.front().DataStaging.OutputFiles; CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestOutputFileServiceStageable", ofiles.front().Name); CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.front().Targets.size()); CPPUNIT_ASSERT_EQUAL((std::string)"lfc://lfc.eu-emi.eu:5010/1234567890/abcdefghij/TestInputFileServiceStageable", ofiles.front().Targets.front().str()); CPPUNIT_ASSERT_EQUAL((std::string)"yes", ofiles.front().Targets.front().Option("overwrite")); CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.front().Targets.front().Locations().size()); CPPUNIT_ASSERT_EQUAL((std::string)"https://se.eu-emi.eu:443/0987654321/klmnopqrst/TestInputFileServiceStageable", ofiles.front().Targets.front().Locations().front().str()); } { std::string tempjobdesc; CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "emies:adl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); const std::list& ofiles = OUTJOBS.front().DataStaging.OutputFiles; CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"TestOutputFileServiceStageable", ofiles.front().Name); CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.front().Targets.size()); CPPUNIT_ASSERT_EQUAL((std::string)"lfc://lfc.eu-emi.eu:5010/1234567890/abcdefghij/TestInputFileServiceStageable", ofiles.front().Targets.front().str()); CPPUNIT_ASSERT_EQUAL((std::string)"yes", ofiles.front().Targets.front().Option("overwrite")); CPPUNIT_ASSERT_EQUAL(1, (int)ofiles.front().Targets.front().Locations().size()); CPPUNIT_ASSERT_EQUAL((std::string)"https://se.eu-emi.eu:443/0987654321/klmnopqrst/TestInputFileServiceStageable", ofiles.front().Targets.front().Locations().front().str()); } } CPPUNIT_TEST_SUITE_REGISTRATION(ADLParserTest); nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712675602216026372 xustar000000000000000027 mtime=1459029134.924374 30 atime=1513200594.345944511 30 ctime=1513200660.421752644 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/Makefile.am0000644000175000002070000000407212675602216026437 0ustar00mockbuildmock00000000000000TESTS = ADLParserTest ARCJSDLParserTest JDLParserTest XRSLParserTest check_PROGRAMS = $(TESTS) ADLParserTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ADLParserTest.cpp ../ADLParser.cpp ../ADLParser.h \ ../XMLNodeRecover.cpp ../XMLNodeRecover.h ADLParserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ADLParserTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ARCJSDLParserTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ARCJSDLParserTest.cpp ../ARCJSDLParser.cpp ../ARCJSDLParser.h \ ../XMLNodeRecover.cpp ../XMLNodeRecover.h ARCJSDLParserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ARCJSDLParserTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JDLParserTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JDLParserTest.cpp ../JDLParser.cpp ../JDLParser.h JDLParserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JDLParserTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) XRSLParserTest_SOURCES = $(top_srcdir)/src/Test.cpp \ XRSLParserTest.cpp ../XRSLParser.cpp ../XRSLParser.h \ ../RSLParser.cpp ../RSLParser.h XRSLParserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) XRSLParserTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722026370 xustar000000000000000030 mtime=1513200594.423945465 30 atime=1513200648.910611858 30 ctime=1513200660.422752656 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/Makefile.in0000644000175000002070000017060013214315722026442 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = ADLParserTest$(EXEEXT) ARCJSDLParserTest$(EXEEXT) \ JDLParserTest$(EXEEXT) XRSLParserTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/acc/JobDescriptionParser/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = ADLParserTest$(EXEEXT) ARCJSDLParserTest$(EXEEXT) \ JDLParserTest$(EXEEXT) XRSLParserTest$(EXEEXT) am_ADLParserTest_OBJECTS = ADLParserTest-Test.$(OBJEXT) \ ADLParserTest-ADLParserTest.$(OBJEXT) \ ADLParserTest-ADLParser.$(OBJEXT) \ ADLParserTest-XMLNodeRecover.$(OBJEXT) ADLParserTest_OBJECTS = $(am_ADLParserTest_OBJECTS) am__DEPENDENCIES_1 = ADLParserTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) ADLParserTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(ADLParserTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_ARCJSDLParserTest_OBJECTS = ARCJSDLParserTest-Test.$(OBJEXT) \ ARCJSDLParserTest-ARCJSDLParserTest.$(OBJEXT) \ ARCJSDLParserTest-ARCJSDLParser.$(OBJEXT) \ ARCJSDLParserTest-XMLNodeRecover.$(OBJEXT) ARCJSDLParserTest_OBJECTS = $(am_ARCJSDLParserTest_OBJECTS) ARCJSDLParserTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) ARCJSDLParserTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_JDLParserTest_OBJECTS = JDLParserTest-Test.$(OBJEXT) \ JDLParserTest-JDLParserTest.$(OBJEXT) \ JDLParserTest-JDLParser.$(OBJEXT) JDLParserTest_OBJECTS = $(am_JDLParserTest_OBJECTS) JDLParserTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) JDLParserTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(JDLParserTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_XRSLParserTest_OBJECTS = XRSLParserTest-Test.$(OBJEXT) \ XRSLParserTest-XRSLParserTest.$(OBJEXT) \ XRSLParserTest-XRSLParser.$(OBJEXT) \ XRSLParserTest-RSLParser.$(OBJEXT) XRSLParserTest_OBJECTS = $(am_XRSLParserTest_OBJECTS) XRSLParserTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) XRSLParserTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(ADLParserTest_SOURCES) $(ARCJSDLParserTest_SOURCES) \ $(JDLParserTest_SOURCES) $(XRSLParserTest_SOURCES) DIST_SOURCES = $(ADLParserTest_SOURCES) $(ARCJSDLParserTest_SOURCES) \ $(JDLParserTest_SOURCES) $(XRSLParserTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ ADLParserTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ADLParserTest.cpp ../ADLParser.cpp ../ADLParser.h \ ../XMLNodeRecover.cpp ../XMLNodeRecover.h ADLParserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ADLParserTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) ARCJSDLParserTest_SOURCES = $(top_srcdir)/src/Test.cpp \ ARCJSDLParserTest.cpp ../ARCJSDLParser.cpp ../ARCJSDLParser.h \ ../XMLNodeRecover.cpp ../XMLNodeRecover.h ARCJSDLParserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ARCJSDLParserTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) JDLParserTest_SOURCES = $(top_srcdir)/src/Test.cpp \ JDLParserTest.cpp ../JDLParser.cpp ../JDLParser.h JDLParserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) JDLParserTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) XRSLParserTest_SOURCES = $(top_srcdir)/src/Test.cpp \ XRSLParserTest.cpp ../XRSLParser.cpp ../XRSLParser.h \ ../RSLParser.cpp ../RSLParser.h XRSLParserTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) XRSLParserTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/JobDescriptionParser/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/JobDescriptionParser/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list ADLParserTest$(EXEEXT): $(ADLParserTest_OBJECTS) $(ADLParserTest_DEPENDENCIES) @rm -f ADLParserTest$(EXEEXT) $(ADLParserTest_LINK) $(ADLParserTest_OBJECTS) $(ADLParserTest_LDADD) $(LIBS) ARCJSDLParserTest$(EXEEXT): $(ARCJSDLParserTest_OBJECTS) $(ARCJSDLParserTest_DEPENDENCIES) @rm -f ARCJSDLParserTest$(EXEEXT) $(ARCJSDLParserTest_LINK) $(ARCJSDLParserTest_OBJECTS) $(ARCJSDLParserTest_LDADD) $(LIBS) JDLParserTest$(EXEEXT): $(JDLParserTest_OBJECTS) $(JDLParserTest_DEPENDENCIES) @rm -f JDLParserTest$(EXEEXT) $(JDLParserTest_LINK) $(JDLParserTest_OBJECTS) $(JDLParserTest_LDADD) $(LIBS) XRSLParserTest$(EXEEXT): $(XRSLParserTest_OBJECTS) $(XRSLParserTest_DEPENDENCIES) @rm -f XRSLParserTest$(EXEEXT) $(XRSLParserTest_LINK) $(XRSLParserTest_OBJECTS) $(XRSLParserTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ADLParserTest-ADLParser.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ADLParserTest-ADLParserTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ADLParserTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ADLParserTest-XMLNodeRecover.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ARCJSDLParserTest-ARCJSDLParser.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ARCJSDLParserTest-ARCJSDLParserTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ARCJSDLParserTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ARCJSDLParserTest-XMLNodeRecover.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JDLParserTest-JDLParser.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JDLParserTest-JDLParserTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/JDLParserTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/XRSLParserTest-RSLParser.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/XRSLParserTest-Test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/XRSLParserTest-XRSLParser.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/XRSLParserTest-XRSLParserTest.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< ADLParserTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ADLParserTest-Test.o -MD -MP -MF $(DEPDIR)/ADLParserTest-Test.Tpo -c -o ADLParserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ADLParserTest-Test.Tpo $(DEPDIR)/ADLParserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ADLParserTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ADLParserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ADLParserTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ADLParserTest-Test.obj -MD -MP -MF $(DEPDIR)/ADLParserTest-Test.Tpo -c -o ADLParserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ADLParserTest-Test.Tpo $(DEPDIR)/ADLParserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ADLParserTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ADLParserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ADLParserTest-ADLParserTest.o: ADLParserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ADLParserTest-ADLParserTest.o -MD -MP -MF $(DEPDIR)/ADLParserTest-ADLParserTest.Tpo -c -o ADLParserTest-ADLParserTest.o `test -f 'ADLParserTest.cpp' || echo '$(srcdir)/'`ADLParserTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ADLParserTest-ADLParserTest.Tpo $(DEPDIR)/ADLParserTest-ADLParserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ADLParserTest.cpp' object='ADLParserTest-ADLParserTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ADLParserTest-ADLParserTest.o `test -f 'ADLParserTest.cpp' || echo '$(srcdir)/'`ADLParserTest.cpp ADLParserTest-ADLParserTest.obj: ADLParserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ADLParserTest-ADLParserTest.obj -MD -MP -MF $(DEPDIR)/ADLParserTest-ADLParserTest.Tpo -c -o ADLParserTest-ADLParserTest.obj `if test -f 'ADLParserTest.cpp'; then $(CYGPATH_W) 'ADLParserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ADLParserTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ADLParserTest-ADLParserTest.Tpo $(DEPDIR)/ADLParserTest-ADLParserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ADLParserTest.cpp' object='ADLParserTest-ADLParserTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ADLParserTest-ADLParserTest.obj `if test -f 'ADLParserTest.cpp'; then $(CYGPATH_W) 'ADLParserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ADLParserTest.cpp'; fi` ADLParserTest-ADLParser.o: ../ADLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ADLParserTest-ADLParser.o -MD -MP -MF $(DEPDIR)/ADLParserTest-ADLParser.Tpo -c -o ADLParserTest-ADLParser.o `test -f '../ADLParser.cpp' || echo '$(srcdir)/'`../ADLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ADLParserTest-ADLParser.Tpo $(DEPDIR)/ADLParserTest-ADLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../ADLParser.cpp' object='ADLParserTest-ADLParser.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ADLParserTest-ADLParser.o `test -f '../ADLParser.cpp' || echo '$(srcdir)/'`../ADLParser.cpp ADLParserTest-ADLParser.obj: ../ADLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ADLParserTest-ADLParser.obj -MD -MP -MF $(DEPDIR)/ADLParserTest-ADLParser.Tpo -c -o ADLParserTest-ADLParser.obj `if test -f '../ADLParser.cpp'; then $(CYGPATH_W) '../ADLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../ADLParser.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ADLParserTest-ADLParser.Tpo $(DEPDIR)/ADLParserTest-ADLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../ADLParser.cpp' object='ADLParserTest-ADLParser.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ADLParserTest-ADLParser.obj `if test -f '../ADLParser.cpp'; then $(CYGPATH_W) '../ADLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../ADLParser.cpp'; fi` ADLParserTest-XMLNodeRecover.o: ../XMLNodeRecover.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ADLParserTest-XMLNodeRecover.o -MD -MP -MF $(DEPDIR)/ADLParserTest-XMLNodeRecover.Tpo -c -o ADLParserTest-XMLNodeRecover.o `test -f '../XMLNodeRecover.cpp' || echo '$(srcdir)/'`../XMLNodeRecover.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ADLParserTest-XMLNodeRecover.Tpo $(DEPDIR)/ADLParserTest-XMLNodeRecover.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../XMLNodeRecover.cpp' object='ADLParserTest-XMLNodeRecover.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ADLParserTest-XMLNodeRecover.o `test -f '../XMLNodeRecover.cpp' || echo '$(srcdir)/'`../XMLNodeRecover.cpp ADLParserTest-XMLNodeRecover.obj: ../XMLNodeRecover.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ADLParserTest-XMLNodeRecover.obj -MD -MP -MF $(DEPDIR)/ADLParserTest-XMLNodeRecover.Tpo -c -o ADLParserTest-XMLNodeRecover.obj `if test -f '../XMLNodeRecover.cpp'; then $(CYGPATH_W) '../XMLNodeRecover.cpp'; else $(CYGPATH_W) '$(srcdir)/../XMLNodeRecover.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ADLParserTest-XMLNodeRecover.Tpo $(DEPDIR)/ADLParserTest-XMLNodeRecover.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../XMLNodeRecover.cpp' object='ADLParserTest-XMLNodeRecover.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ADLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ADLParserTest-XMLNodeRecover.obj `if test -f '../XMLNodeRecover.cpp'; then $(CYGPATH_W) '../XMLNodeRecover.cpp'; else $(CYGPATH_W) '$(srcdir)/../XMLNodeRecover.cpp'; fi` ARCJSDLParserTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ARCJSDLParserTest-Test.o -MD -MP -MF $(DEPDIR)/ARCJSDLParserTest-Test.Tpo -c -o ARCJSDLParserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ARCJSDLParserTest-Test.Tpo $(DEPDIR)/ARCJSDLParserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ARCJSDLParserTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ARCJSDLParserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ARCJSDLParserTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ARCJSDLParserTest-Test.obj -MD -MP -MF $(DEPDIR)/ARCJSDLParserTest-Test.Tpo -c -o ARCJSDLParserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ARCJSDLParserTest-Test.Tpo $(DEPDIR)/ARCJSDLParserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='ARCJSDLParserTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ARCJSDLParserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ARCJSDLParserTest-ARCJSDLParserTest.o: ARCJSDLParserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ARCJSDLParserTest-ARCJSDLParserTest.o -MD -MP -MF $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParserTest.Tpo -c -o ARCJSDLParserTest-ARCJSDLParserTest.o `test -f 'ARCJSDLParserTest.cpp' || echo '$(srcdir)/'`ARCJSDLParserTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParserTest.Tpo $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ARCJSDLParserTest.cpp' object='ARCJSDLParserTest-ARCJSDLParserTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ARCJSDLParserTest-ARCJSDLParserTest.o `test -f 'ARCJSDLParserTest.cpp' || echo '$(srcdir)/'`ARCJSDLParserTest.cpp ARCJSDLParserTest-ARCJSDLParserTest.obj: ARCJSDLParserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ARCJSDLParserTest-ARCJSDLParserTest.obj -MD -MP -MF $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParserTest.Tpo -c -o ARCJSDLParserTest-ARCJSDLParserTest.obj `if test -f 'ARCJSDLParserTest.cpp'; then $(CYGPATH_W) 'ARCJSDLParserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ARCJSDLParserTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParserTest.Tpo $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ARCJSDLParserTest.cpp' object='ARCJSDLParserTest-ARCJSDLParserTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ARCJSDLParserTest-ARCJSDLParserTest.obj `if test -f 'ARCJSDLParserTest.cpp'; then $(CYGPATH_W) 'ARCJSDLParserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ARCJSDLParserTest.cpp'; fi` ARCJSDLParserTest-ARCJSDLParser.o: ../ARCJSDLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ARCJSDLParserTest-ARCJSDLParser.o -MD -MP -MF $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParser.Tpo -c -o ARCJSDLParserTest-ARCJSDLParser.o `test -f '../ARCJSDLParser.cpp' || echo '$(srcdir)/'`../ARCJSDLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParser.Tpo $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../ARCJSDLParser.cpp' object='ARCJSDLParserTest-ARCJSDLParser.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ARCJSDLParserTest-ARCJSDLParser.o `test -f '../ARCJSDLParser.cpp' || echo '$(srcdir)/'`../ARCJSDLParser.cpp ARCJSDLParserTest-ARCJSDLParser.obj: ../ARCJSDLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ARCJSDLParserTest-ARCJSDLParser.obj -MD -MP -MF $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParser.Tpo -c -o ARCJSDLParserTest-ARCJSDLParser.obj `if test -f '../ARCJSDLParser.cpp'; then $(CYGPATH_W) '../ARCJSDLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../ARCJSDLParser.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParser.Tpo $(DEPDIR)/ARCJSDLParserTest-ARCJSDLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../ARCJSDLParser.cpp' object='ARCJSDLParserTest-ARCJSDLParser.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ARCJSDLParserTest-ARCJSDLParser.obj `if test -f '../ARCJSDLParser.cpp'; then $(CYGPATH_W) '../ARCJSDLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../ARCJSDLParser.cpp'; fi` ARCJSDLParserTest-XMLNodeRecover.o: ../XMLNodeRecover.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ARCJSDLParserTest-XMLNodeRecover.o -MD -MP -MF $(DEPDIR)/ARCJSDLParserTest-XMLNodeRecover.Tpo -c -o ARCJSDLParserTest-XMLNodeRecover.o `test -f '../XMLNodeRecover.cpp' || echo '$(srcdir)/'`../XMLNodeRecover.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ARCJSDLParserTest-XMLNodeRecover.Tpo $(DEPDIR)/ARCJSDLParserTest-XMLNodeRecover.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../XMLNodeRecover.cpp' object='ARCJSDLParserTest-XMLNodeRecover.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ARCJSDLParserTest-XMLNodeRecover.o `test -f '../XMLNodeRecover.cpp' || echo '$(srcdir)/'`../XMLNodeRecover.cpp ARCJSDLParserTest-XMLNodeRecover.obj: ../XMLNodeRecover.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT ARCJSDLParserTest-XMLNodeRecover.obj -MD -MP -MF $(DEPDIR)/ARCJSDLParserTest-XMLNodeRecover.Tpo -c -o ARCJSDLParserTest-XMLNodeRecover.obj `if test -f '../XMLNodeRecover.cpp'; then $(CYGPATH_W) '../XMLNodeRecover.cpp'; else $(CYGPATH_W) '$(srcdir)/../XMLNodeRecover.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ARCJSDLParserTest-XMLNodeRecover.Tpo $(DEPDIR)/ARCJSDLParserTest-XMLNodeRecover.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../XMLNodeRecover.cpp' object='ARCJSDLParserTest-XMLNodeRecover.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ARCJSDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o ARCJSDLParserTest-XMLNodeRecover.obj `if test -f '../XMLNodeRecover.cpp'; then $(CYGPATH_W) '../XMLNodeRecover.cpp'; else $(CYGPATH_W) '$(srcdir)/../XMLNodeRecover.cpp'; fi` JDLParserTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT JDLParserTest-Test.o -MD -MP -MF $(DEPDIR)/JDLParserTest-Test.Tpo -c -o JDLParserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JDLParserTest-Test.Tpo $(DEPDIR)/JDLParserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JDLParserTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o JDLParserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp JDLParserTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT JDLParserTest-Test.obj -MD -MP -MF $(DEPDIR)/JDLParserTest-Test.Tpo -c -o JDLParserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JDLParserTest-Test.Tpo $(DEPDIR)/JDLParserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='JDLParserTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o JDLParserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` JDLParserTest-JDLParserTest.o: JDLParserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT JDLParserTest-JDLParserTest.o -MD -MP -MF $(DEPDIR)/JDLParserTest-JDLParserTest.Tpo -c -o JDLParserTest-JDLParserTest.o `test -f 'JDLParserTest.cpp' || echo '$(srcdir)/'`JDLParserTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JDLParserTest-JDLParserTest.Tpo $(DEPDIR)/JDLParserTest-JDLParserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JDLParserTest.cpp' object='JDLParserTest-JDLParserTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o JDLParserTest-JDLParserTest.o `test -f 'JDLParserTest.cpp' || echo '$(srcdir)/'`JDLParserTest.cpp JDLParserTest-JDLParserTest.obj: JDLParserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT JDLParserTest-JDLParserTest.obj -MD -MP -MF $(DEPDIR)/JDLParserTest-JDLParserTest.Tpo -c -o JDLParserTest-JDLParserTest.obj `if test -f 'JDLParserTest.cpp'; then $(CYGPATH_W) 'JDLParserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JDLParserTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JDLParserTest-JDLParserTest.Tpo $(DEPDIR)/JDLParserTest-JDLParserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JDLParserTest.cpp' object='JDLParserTest-JDLParserTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o JDLParserTest-JDLParserTest.obj `if test -f 'JDLParserTest.cpp'; then $(CYGPATH_W) 'JDLParserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/JDLParserTest.cpp'; fi` JDLParserTest-JDLParser.o: ../JDLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT JDLParserTest-JDLParser.o -MD -MP -MF $(DEPDIR)/JDLParserTest-JDLParser.Tpo -c -o JDLParserTest-JDLParser.o `test -f '../JDLParser.cpp' || echo '$(srcdir)/'`../JDLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JDLParserTest-JDLParser.Tpo $(DEPDIR)/JDLParserTest-JDLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../JDLParser.cpp' object='JDLParserTest-JDLParser.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o JDLParserTest-JDLParser.o `test -f '../JDLParser.cpp' || echo '$(srcdir)/'`../JDLParser.cpp JDLParserTest-JDLParser.obj: ../JDLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -MT JDLParserTest-JDLParser.obj -MD -MP -MF $(DEPDIR)/JDLParserTest-JDLParser.Tpo -c -o JDLParserTest-JDLParser.obj `if test -f '../JDLParser.cpp'; then $(CYGPATH_W) '../JDLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../JDLParser.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/JDLParserTest-JDLParser.Tpo $(DEPDIR)/JDLParserTest-JDLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../JDLParser.cpp' object='JDLParserTest-JDLParser.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(JDLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o JDLParserTest-JDLParser.obj `if test -f '../JDLParser.cpp'; then $(CYGPATH_W) '../JDLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../JDLParser.cpp'; fi` XRSLParserTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -MT XRSLParserTest-Test.o -MD -MP -MF $(DEPDIR)/XRSLParserTest-Test.Tpo -c -o XRSLParserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XRSLParserTest-Test.Tpo $(DEPDIR)/XRSLParserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='XRSLParserTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o XRSLParserTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp XRSLParserTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -MT XRSLParserTest-Test.obj -MD -MP -MF $(DEPDIR)/XRSLParserTest-Test.Tpo -c -o XRSLParserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XRSLParserTest-Test.Tpo $(DEPDIR)/XRSLParserTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='XRSLParserTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o XRSLParserTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` XRSLParserTest-XRSLParserTest.o: XRSLParserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -MT XRSLParserTest-XRSLParserTest.o -MD -MP -MF $(DEPDIR)/XRSLParserTest-XRSLParserTest.Tpo -c -o XRSLParserTest-XRSLParserTest.o `test -f 'XRSLParserTest.cpp' || echo '$(srcdir)/'`XRSLParserTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XRSLParserTest-XRSLParserTest.Tpo $(DEPDIR)/XRSLParserTest-XRSLParserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XRSLParserTest.cpp' object='XRSLParserTest-XRSLParserTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o XRSLParserTest-XRSLParserTest.o `test -f 'XRSLParserTest.cpp' || echo '$(srcdir)/'`XRSLParserTest.cpp XRSLParserTest-XRSLParserTest.obj: XRSLParserTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -MT XRSLParserTest-XRSLParserTest.obj -MD -MP -MF $(DEPDIR)/XRSLParserTest-XRSLParserTest.Tpo -c -o XRSLParserTest-XRSLParserTest.obj `if test -f 'XRSLParserTest.cpp'; then $(CYGPATH_W) 'XRSLParserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/XRSLParserTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XRSLParserTest-XRSLParserTest.Tpo $(DEPDIR)/XRSLParserTest-XRSLParserTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XRSLParserTest.cpp' object='XRSLParserTest-XRSLParserTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o XRSLParserTest-XRSLParserTest.obj `if test -f 'XRSLParserTest.cpp'; then $(CYGPATH_W) 'XRSLParserTest.cpp'; else $(CYGPATH_W) '$(srcdir)/XRSLParserTest.cpp'; fi` XRSLParserTest-XRSLParser.o: ../XRSLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -MT XRSLParserTest-XRSLParser.o -MD -MP -MF $(DEPDIR)/XRSLParserTest-XRSLParser.Tpo -c -o XRSLParserTest-XRSLParser.o `test -f '../XRSLParser.cpp' || echo '$(srcdir)/'`../XRSLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XRSLParserTest-XRSLParser.Tpo $(DEPDIR)/XRSLParserTest-XRSLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../XRSLParser.cpp' object='XRSLParserTest-XRSLParser.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o XRSLParserTest-XRSLParser.o `test -f '../XRSLParser.cpp' || echo '$(srcdir)/'`../XRSLParser.cpp XRSLParserTest-XRSLParser.obj: ../XRSLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -MT XRSLParserTest-XRSLParser.obj -MD -MP -MF $(DEPDIR)/XRSLParserTest-XRSLParser.Tpo -c -o XRSLParserTest-XRSLParser.obj `if test -f '../XRSLParser.cpp'; then $(CYGPATH_W) '../XRSLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../XRSLParser.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XRSLParserTest-XRSLParser.Tpo $(DEPDIR)/XRSLParserTest-XRSLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../XRSLParser.cpp' object='XRSLParserTest-XRSLParser.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o XRSLParserTest-XRSLParser.obj `if test -f '../XRSLParser.cpp'; then $(CYGPATH_W) '../XRSLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../XRSLParser.cpp'; fi` XRSLParserTest-RSLParser.o: ../RSLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -MT XRSLParserTest-RSLParser.o -MD -MP -MF $(DEPDIR)/XRSLParserTest-RSLParser.Tpo -c -o XRSLParserTest-RSLParser.o `test -f '../RSLParser.cpp' || echo '$(srcdir)/'`../RSLParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XRSLParserTest-RSLParser.Tpo $(DEPDIR)/XRSLParserTest-RSLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../RSLParser.cpp' object='XRSLParserTest-RSLParser.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o XRSLParserTest-RSLParser.o `test -f '../RSLParser.cpp' || echo '$(srcdir)/'`../RSLParser.cpp XRSLParserTest-RSLParser.obj: ../RSLParser.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -MT XRSLParserTest-RSLParser.obj -MD -MP -MF $(DEPDIR)/XRSLParserTest-RSLParser.Tpo -c -o XRSLParserTest-RSLParser.obj `if test -f '../RSLParser.cpp'; then $(CYGPATH_W) '../RSLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../RSLParser.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/XRSLParserTest-RSLParser.Tpo $(DEPDIR)/XRSLParserTest-RSLParser.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../RSLParser.cpp' object='XRSLParserTest-RSLParser.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(XRSLParserTest_CXXFLAGS) $(CXXFLAGS) -c -o XRSLParserTest-RSLParser.obj `if test -f '../RSLParser.cpp'; then $(CYGPATH_W) '../RSLParser.cpp'; else $(CYGPATH_W) '$(srcdir)/../RSLParser.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/PaxHeaders.7502/XRSLParserTest.cpp0000644000000000000000000000012412675602216027644 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.253698 30 ctime=1513200660.426752705 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/XRSLParserTest.cpp0000644000175000002070000014523012675602216027716 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include std::ostream& operator<<(std::ostream& os, const std::list& strings); #include #include #include #include #include "../XRSLParser.h" class XRSLParserTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(XRSLParserTest); CPPUNIT_TEST(TestExecutable); CPPUNIT_TEST(TestInputOutputError); CPPUNIT_TEST(TestInputFileClientStageable); CPPUNIT_TEST(TestInputFileServiceStageable); CPPUNIT_TEST(TestOutputFileClientStageable); CPPUNIT_TEST(TestOutputFileServiceStageable); CPPUNIT_TEST(TestURIOptionsInput); CPPUNIT_TEST(TestURIOptionsOutput); CPPUNIT_TEST(TestExecutables); CPPUNIT_TEST(TestFTPThreads); CPPUNIT_TEST(TestCache); CPPUNIT_TEST(TestQueue); CPPUNIT_TEST(TestNotify); CPPUNIT_TEST(TestJoin); CPPUNIT_TEST(TestDryRun); CPPUNIT_TEST(TestGridTime); CPPUNIT_TEST(TestAccessControl); CPPUNIT_TEST(TestParallelAttributes); CPPUNIT_TEST(TestAdditionalAttributes); CPPUNIT_TEST(TestMultiRSL); CPPUNIT_TEST(TestDisjunctRSL); CPPUNIT_TEST_SUITE_END(); public: XRSLParserTest():PARSER((Arc::PluginArgument*)NULL) {} void setUp(); void tearDown(); void TestExecutable(); void TestInputOutputError(); void TestInputFileClientStageable(); void TestInputFileServiceStageable(); void TestOutputFileClientStageable(); void TestOutputFileServiceStageable(); void TestURIOptionsInput(); void TestURIOptionsOutput(); void TestExecutables(); void TestFTPThreads(); void TestCache(); void TestQueue(); void TestNotify(); void TestJoin(); void TestDryRun(); void TestGridTime(); void TestAccessControl(); void TestParallelAttributes(); void TestAdditionalAttributes(); void TestMultiRSL(); void TestDisjunctRSL(); private: Arc::JobDescription INJOB; std::list OUTJOBS; Arc::XRSLParser PARSER; std::string MESSAGE; std::string xrsl; }; std::ostream& operator<<(std::ostream& os, const std::list& strings) { for (std::list::const_iterator it = strings.begin(); it != strings.end(); it++) { if (it != strings.begin()) { os << ", "; } os << "\"" << *it << "\""; } return os; } void XRSLParserTest::setUp() { Arc::ArcLocation::Init("./bin/app"); INJOB.Application.Executable.Path = "executable"; INJOB.Application.Executable.Argument.push_back("arg1"); INJOB.Application.Executable.Argument.push_back("arg2"); INJOB.Application.Executable.Argument.push_back("arg3"); // Needed by the XRSLParser. std::ofstream f("executable", std::ifstream::trunc); f << "executable"; f.close(); } void XRSLParserTest::tearDown() { remove("executable"); } void XRSLParserTest::TestExecutable() { std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Executable.Path, OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Executable.Argument, OUTJOBS.front().Application.Executable.Argument); } void XRSLParserTest::TestInputOutputError() { INJOB.Application.Input = "input-file"; // The file need to be there, otherwise the XRSLParser will fail. std::ofstream f(INJOB.Application.Input.c_str(), std::ifstream::trunc); f << INJOB.Application.Input; f.close(); INJOB.Application.Output = "output-file"; INJOB.Application.Error = "error-file"; std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Input, OUTJOBS.front().Application.Input); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Output, OUTJOBS.front().Application.Output); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Error, OUTJOBS.front().Application.Error); remove(INJOB.Application.Input.c_str()); } /** Client stageable input file */ void XRSLParserTest::TestInputFileClientStageable() { INJOB.DataStaging.InputFiles.clear(); MESSAGE = "Error parsing TestInputFileClientStageable data staging type."; Arc::InputFileType file; file.Name = "TestInputFileClientStageable"; file.Sources.push_back(Arc::URL(file.Name)); file.FileSize = file.Name.length(); INJOB.DataStaging.InputFiles.push_back(file); // The file need to be there, otherwise the XRSLParser will fail. std::ofstream f(file.Name.c_str(), std::ifstream::trunc); f << file.Name; f.close(); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); std::list::const_iterator it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Sources.back(), it->Sources.front()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl", "GRIDMANAGER")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS, "", "GRIDMANAGER")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.FileSize, it->FileSize); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 0, (int)it->Sources.size()); // Remove source path INJOB.DataStaging.InputFiles.front().Sources.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS, "", "")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Sources.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl", "GRIDMANAGER")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS, "", "GRIDMANAGER")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.FileSize, it->FileSize); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 0, (int)it->Sources.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl", "GRIDMANAGER")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS, "", "GRIDMANAGER")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 0, (int)it->Sources.size()); remove(file.Name.c_str()); } /** Service stageable input file */ void XRSLParserTest::TestInputFileServiceStageable() { INJOB.DataStaging.InputFiles.clear(); MESSAGE = "Error parsing TestInputFileServiceStageable data staging type."; Arc::InputFileType file; file.Name = "TestInputFileServiceStageable"; file.Sources.push_back(Arc::URL("http://example.com/" + file.Name)); INJOB.DataStaging.InputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); std::list::const_iterator it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Sources.back(), it->Sources.front()); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl", "GRIDMANAGER")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS, "", "GRIDMANAGER")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Sources.back(), it->Sources.front()); } /** Client stageable output file */ void XRSLParserTest::TestOutputFileClientStageable() { INJOB.DataStaging.OutputFiles.clear(); MESSAGE = "Error parsing TestOutputFileClientStageable data staging type."; Arc::OutputFileType file; file.Name = "TestOutputFileClientStageable"; INJOB.DataStaging.OutputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.OutputFiles.size()); std::list::const_iterator it = OUTJOBS.front().DataStaging.OutputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 0, (int)it->Targets.size()); } /** Service stageable output file */ void XRSLParserTest::TestOutputFileServiceStageable() { INJOB.DataStaging.OutputFiles.clear(); MESSAGE = "Error parsing create-download data staging type."; Arc::OutputFileType file; file.Name = "7-Create-Upload"; file.Targets.push_back(Arc::URL("http://example.com/" + file.Name)); INJOB.DataStaging.OutputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.OutputFiles.size()); std::list::const_iterator it = OUTJOBS.front().DataStaging.OutputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Targets.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Targets.back(), it->Targets.front()); } void XRSLParserTest::TestURIOptionsInput() { xrsl = "&(executable=/bin/true)" "(inputfiles=(\"in1\" \"gsiftp://example.com/in1\" \"threads=5\"))"; CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp://example.com:2811/in1"), OUTJOBS.front().DataStaging.InputFiles.front().Sources.front().str()); CPPUNIT_ASSERT_EQUAL(std::string("5"), OUTJOBS.front().DataStaging.InputFiles.front().Sources.front().Option("threads")); xrsl = "&(executable=/bin/true)" "(inputfiles=(\"in1\" \"lfc://example.com/in1\" \"location=gsiftp://example.com/in1\" \"threads=5\"))"; OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.front().Sources.size()); const std::list locations = OUTJOBS.front().DataStaging.InputFiles.front().Sources.front().Locations(); CPPUNIT_ASSERT_EQUAL(1, (int)locations.size()); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp://example.com:2811/in1"), locations.front().str()); CPPUNIT_ASSERT_EQUAL(std::string("5"), locations.front().Option("threads")); xrsl = "&(executable=/bin/true)" "(inputfiles=(\"in1\" \"gsiftp://example.com/in1\" \"threads\"))"; CPPUNIT_ASSERT(!PARSER.Parse(xrsl, OUTJOBS)); } void XRSLParserTest::TestURIOptionsOutput() { xrsl = "&(executable=/bin/true)" "(outputfiles=(\"out1\" \"lfc://example.com/in1\" \"checksum=md5\" \"location=gsiftp://example.com/in1\" \"threads=5\" \"location=gsiftp://example2.com/in1\" \"threads=10\"))"; CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.OutputFiles.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.OutputFiles.front().Targets.size()); CPPUNIT_ASSERT_EQUAL(std::string("md5"), OUTJOBS.front().DataStaging.OutputFiles.front().Targets.front().Option("checksum")); const std::list locations = OUTJOBS.front().DataStaging.OutputFiles.front().Targets.front().Locations(); CPPUNIT_ASSERT_EQUAL(2, (int)locations.size()); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp://example.com:2811/in1"), locations.front().str()); CPPUNIT_ASSERT_EQUAL(std::string("5"), locations.front().Option("threads")); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp://example2.com:2811/in1"), locations.back().str()); CPPUNIT_ASSERT_EQUAL(std::string("10"), locations.back().Option("threads")); } void XRSLParserTest::TestExecutables() { xrsl = "&(executable=/bin/true)(|(executables=\"in1\")(executables=\"in2\"))(inputfiles=(\"in1\" \"\") (\"in2\" \"\"))"; std::ofstream f("in1", std::ifstream::trunc); f << "in1"; f.close(); f.open("in2", std::ifstream::trunc); f << "in2"; f.close(); CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in1", OUTJOBS.front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT(OUTJOBS.front().DataStaging.InputFiles.front().IsExecutable); CPPUNIT_ASSERT_EQUAL((std::string)"in2", OUTJOBS.front().DataStaging.InputFiles.back().Name); CPPUNIT_ASSERT(!OUTJOBS.front().DataStaging.InputFiles.back().IsExecutable); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in1", OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT(!OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.front().IsExecutable); CPPUNIT_ASSERT_EQUAL((std::string)"in2", OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.back().Name); CPPUNIT_ASSERT(OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.back().IsExecutable); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in1", OUTJOBS.front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT(OUTJOBS.front().DataStaging.InputFiles.front().IsExecutable); CPPUNIT_ASSERT_EQUAL((std::string)"in2", OUTJOBS.front().DataStaging.InputFiles.back().Name); CPPUNIT_ASSERT(!OUTJOBS.front().DataStaging.InputFiles.back().IsExecutable); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); xrsl = "&(executable=/bin/true)(|(executables=\"non-existing\"))(inputfiles=(\"in1\" \"\"))"; CPPUNIT_ASSERT(!PARSER.Parse(xrsl, OUTJOBS)); remove("in1"); remove("in2"); } void XRSLParserTest::TestCache() { xrsl = "&(executable=\"executable\")" "(inputfiles=(\"in1\" \"gsiftp://example.com/in1\") (\"in2\" \"gsiftp://example.com/in2\"))" "(|(cache=yes)(cache=copy))"; CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in1", OUTJOBS.front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"yes", OUTJOBS.front().DataStaging.InputFiles.front().Sources.front().Option("cache")); CPPUNIT_ASSERT_EQUAL((std::string)"in2", OUTJOBS.front().DataStaging.InputFiles.back().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.back().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"yes", OUTJOBS.front().DataStaging.InputFiles.back().Sources.front().Option("cache")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in1", OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"copy", OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.front().Sources.front().Option("cache")); CPPUNIT_ASSERT_EQUAL((std::string)"in2", OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.back().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.back().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"copy", OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.back().Sources.front().Option("cache")); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in1", OUTJOBS.front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"yes", OUTJOBS.front().DataStaging.InputFiles.front().Sources.front().Option("cache")); CPPUNIT_ASSERT_EQUAL((std::string)"in2", OUTJOBS.front().DataStaging.InputFiles.back().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.back().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"yes", OUTJOBS.front().DataStaging.InputFiles.back().Sources.front().Option("cache")); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); } void XRSLParserTest::TestQueue() { xrsl = "&(executable=\"executable\")" "(|(queue=q1)(queue=q2))"; CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"q1", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"q2", OUTJOBS.front().GetAlternatives().front().Resources.QueueName); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"q1", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); xrsl = "&(executable=\"executable\")" "(|(queue!=q1)(queue!=q2))"; OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); std::map::const_iterator itAttribute; itAttribute = OUTJOBS.front().OtherAttributes.find("nordugrid:broker;reject_queue"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() != itAttribute); CPPUNIT_ASSERT_EQUAL((std::string)"q1", itAttribute->second); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().size()); itAttribute = OUTJOBS.front().GetAlternatives().front().OtherAttributes.find("nordugrid:broker;reject_queue"); CPPUNIT_ASSERT(OUTJOBS.front().GetAlternatives().front().OtherAttributes.end() != itAttribute); CPPUNIT_ASSERT_EQUAL((std::string)"q2", itAttribute->second); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); itAttribute = OUTJOBS.front().OtherAttributes.find("nordugrid:broker;reject_queue"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() == itAttribute); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); } void XRSLParserTest::TestFTPThreads() { xrsl = "&(executable=\"executable\")" "(inputfiles=(\"in\" \"gsiftp://example.com/in\"))" "(outputfiles=(\"out\" \"gsiftp://example.com/out\"))" "(|(ftpthreads=5)(ftpthreads=3))"; CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in", OUTJOBS.front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"5", OUTJOBS.front().DataStaging.InputFiles.front().Sources.front().Option("threads")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.OutputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"out", OUTJOBS.front().DataStaging.OutputFiles.back().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.OutputFiles.back().Targets.size()); CPPUNIT_ASSERT_EQUAL((std::string)"5", OUTJOBS.front().DataStaging.OutputFiles.back().Targets.front().Option("threads")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in", OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"3", OUTJOBS.front().GetAlternatives().front().DataStaging.InputFiles.front().Sources.front().Option("threads")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().front().DataStaging.OutputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"out", OUTJOBS.front().GetAlternatives().front().DataStaging.OutputFiles.back().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().front().DataStaging.OutputFiles.back().Targets.size()); CPPUNIT_ASSERT_EQUAL((std::string)"3", OUTJOBS.front().GetAlternatives().front().DataStaging.OutputFiles.back().Targets.front().Option("threads")); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"in", OUTJOBS.front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.front().Sources.size()); CPPUNIT_ASSERT_EQUAL((std::string)"5", OUTJOBS.front().DataStaging.InputFiles.front().Sources.front().Option("threads")); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.OutputFiles.size()); CPPUNIT_ASSERT_EQUAL((std::string)"out", OUTJOBS.front().DataStaging.OutputFiles.back().Name); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.OutputFiles.back().Targets.size()); CPPUNIT_ASSERT_EQUAL((std::string)"5", OUTJOBS.front().DataStaging.OutputFiles.back().Targets.front().Option("threads")); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); xrsl = "&(executable=\"executable\")" "(inputfiles=(\"in\" \"gsiftp://example.com/in\"))" "(outputfiles=(\"out\" \"gsiftp://example.com/out\"))" "(ftpthreads=20)"; CPPUNIT_ASSERT(!PARSER.Parse(xrsl, OUTJOBS)); } void XRSLParserTest::TestNotify() { /** * The value of the notify attribute must take the form: * notify = [string] ... * with first string being mandatory, and following strings being optional the * string should take the form: * [b][q][f][e][d][c] user1@domain1.tld [user2@domain2.tld] ... * Thus one email address must be specified for the notify attribute to be * valid. States are optional, along with multiple email addresses. Also only * the listed states are allowed. If no states are specified the defaults (be) * will be used. **/ MESSAGE = "Error parsing the notify attribute."; // Test default option. xrsl = "&(executable = \"executable\")(notify = \"someone@example.com\")"; std::list tempJobDescs; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, tempJobDescs)); CPPUNIT_ASSERT_EQUAL(1, (int)tempJobDescs.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(tempJobDescs.front(), xrsl, "nordugrid:xrsl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)tempJobDescs.front().Application.Notification.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().Application.Notification.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"someone@example.com", tempJobDescs.front().Application.Notification.front().Email); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"someone@example.com", OUTJOBS.front().Application.Notification.front().Email); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 2, (int)tempJobDescs.front().Application.Notification.front().States.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 2, (int)OUTJOBS.front().Application.Notification.front().States.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"PREPARING", tempJobDescs.front().Application.Notification.front().States.front()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHED", tempJobDescs.front().Application.Notification.front().States.back()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"PREPARING", OUTJOBS.front().Application.Notification.front().States.front()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHED", OUTJOBS.front().Application.Notification.front().States.back()); // Test all flags. xrsl = "&(executable = \"executable\")(notify = \"bqfedc someone@example.com\")"; tempJobDescs.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, tempJobDescs)); CPPUNIT_ASSERT_EQUAL(1, (int)tempJobDescs.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(tempJobDescs.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)tempJobDescs.front().Application.Notification.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().Application.Notification.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"someone@example.com", tempJobDescs.front().Application.Notification.front().Email); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"someone@example.com", OUTJOBS.front().Application.Notification.front().Email); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 6, (int)tempJobDescs.front().Application.Notification.front().States.size()); { std::list::const_iterator it = tempJobDescs.front().Application.Notification.front().States.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"PREPARING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"INLRMS", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHED", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"DELETED", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"CANCELING", *it); } CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 6, (int)OUTJOBS.front().Application.Notification.front().States.size()); { std::list::const_iterator it = OUTJOBS.front().Application.Notification.front().States.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"PREPARING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"INLRMS", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHED", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"DELETED", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"CANCELING", *it); } // Test multiple entries and overlapping states. xrsl = "&(executable = \"executable\")(notify = \"bqfedc someone@example.com\" \"bqf someone@example.com anotherone@example.com\")"; tempJobDescs.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, tempJobDescs)); CPPUNIT_ASSERT_EQUAL(1, (int)tempJobDescs.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(tempJobDescs.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 2, (int)tempJobDescs.front().Application.Notification.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 2, (int)OUTJOBS.front().Application.Notification.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"someone@example.com", tempJobDescs.front().Application.Notification.front().Email); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"someone@example.com", OUTJOBS.front().Application.Notification.front().Email); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"anotherone@example.com", tempJobDescs.front().Application.Notification.back().Email); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"anotherone@example.com", OUTJOBS.front().Application.Notification.back().Email); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 6, (int)tempJobDescs.front().Application.Notification.front().States.size()); { std::list::const_iterator it = tempJobDescs.front().Application.Notification.front().States.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"PREPARING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"INLRMS", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHED", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"DELETED", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"CANCELING", *it); } CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 6, (int)OUTJOBS.front().Application.Notification.front().States.size()); { std::list::const_iterator it = OUTJOBS.front().Application.Notification.front().States.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"PREPARING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"INLRMS", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHED", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"DELETED", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"CANCELING", *it); } CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 3, (int)tempJobDescs.front().Application.Notification.back().States.size()); { std::list::const_iterator it = tempJobDescs.front().Application.Notification.front().States.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"PREPARING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"INLRMS", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHING", *it); } CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 3, (int)OUTJOBS.front().Application.Notification.back().States.size()); { std::list::const_iterator it = OUTJOBS.front().Application.Notification.front().States.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"PREPARING", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"INLRMS", *it++); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"FINISHING", *it); } // Test invalid email address. xrsl = "&(executable = \"executable\")(notify = \"someoneAexample.com\")"; tempJobDescs.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, tempJobDescs)); CPPUNIT_ASSERT(tempJobDescs.empty()); // Test invalid email address with state flags. xrsl = "&(executable = \"executable\")(notify = \"bqfecd someoneAexample.com\")"; tempJobDescs.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, tempJobDescs)); CPPUNIT_ASSERT(tempJobDescs.empty()); // Test unknown state flags. xrsl = "&(executable = \"executable\")(notify = \"xyz someone@example.com\")"; tempJobDescs.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, tempJobDescs)); CPPUNIT_ASSERT(tempJobDescs.empty()); } void XRSLParserTest::TestDryRun() { MESSAGE = "Error parsing the dryrun attribute."; xrsl = "&(|(executable = \"executable\")(executable = \"executable\"))(dryrun = \"yes\")"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().Application.DryRun); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().GetAlternatives().front().Application.DryRun); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().Application.DryRun); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); xrsl = "&(|(executable = \"executable\")(executable = \"executable\"))(dryrun = \"no\")"; OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, !OUTJOBS.front().Application.DryRun); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, !OUTJOBS.front().GetAlternatives().front().Application.DryRun); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), xrsl, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, !OUTJOBS.front().Application.DryRun); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); } void XRSLParserTest::TestJoin() { MESSAGE = "Error parsing the join attribute."; xrsl = "&(executable = \"executable\")(join = \"yes\")(|(stdout = \"output-file\")(stdout = \"output-file2\"))"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"output-file", OUTJOBS.front().Application.Output); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"output-file", OUTJOBS.front().Application.Error); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"output-file2", OUTJOBS.front().GetAlternatives().front().Application.Output); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"output-file2", OUTJOBS.front().GetAlternatives().front().Application.Error); xrsl = "&(executable = \"executable\")(stderr = \"error-file\")(join = \"yes\")"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, OUTJOBS)); xrsl = "&(executable = \"executable\")(stdout = \"output-file\")(stderr = \"error-file\")(join = \"yes\")"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, OUTJOBS)); xrsl = "&(executable = \"executable\")(stdout = \"output-file\")(stderr = \"error-file\")(join = \"no\")"; OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"output-file", OUTJOBS.front().Application.Output); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"error-file", OUTJOBS.front().Application.Error); } void XRSLParserTest::TestGridTime() { xrsl = "&(executable=/bin/echo)(gridtime=600s)"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.TotalCPUTime.range.min); CPPUNIT_ASSERT_EQUAL(600, OUTJOBS.front().Resources.TotalCPUTime.range.max); CPPUNIT_ASSERT_EQUAL((std::string)"clock rate", OUTJOBS.front().Resources.TotalCPUTime.benchmark.first); CPPUNIT_ASSERT_EQUAL(2800., OUTJOBS.front().Resources.TotalCPUTime.benchmark.second); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.TotalWallTime.range.min); CPPUNIT_ASSERT_EQUAL(600, OUTJOBS.front().Resources.TotalWallTime.range.max); CPPUNIT_ASSERT_EQUAL((std::string)"clock rate", OUTJOBS.front().Resources.TotalWallTime.benchmark.first); CPPUNIT_ASSERT_EQUAL(2800., OUTJOBS.front().Resources.TotalWallTime.benchmark.second); OUTJOBS.clear(); xrsl = "&(executable=/bin/echo)(gridtime=600s)(count=1)"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.TotalCPUTime.range.min); CPPUNIT_ASSERT_EQUAL(600, OUTJOBS.front().Resources.TotalCPUTime.range.max); CPPUNIT_ASSERT_EQUAL((std::string)"clock rate", OUTJOBS.front().Resources.TotalCPUTime.benchmark.first); CPPUNIT_ASSERT_EQUAL(2800., OUTJOBS.front().Resources.TotalCPUTime.benchmark.second); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.TotalWallTime.range.min); CPPUNIT_ASSERT_EQUAL(600, OUTJOBS.front().Resources.TotalWallTime.range.max); CPPUNIT_ASSERT_EQUAL((std::string)"clock rate", OUTJOBS.front().Resources.TotalWallTime.benchmark.first); CPPUNIT_ASSERT_EQUAL(2800., OUTJOBS.front().Resources.TotalWallTime.benchmark.second); OUTJOBS.clear(); xrsl = "&(executable=/bin/echo)(gridtime=600s)(count=5)"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.TotalCPUTime.range.min); CPPUNIT_ASSERT_EQUAL(600, OUTJOBS.front().Resources.TotalCPUTime.range.max); CPPUNIT_ASSERT_EQUAL((std::string)"clock rate", OUTJOBS.front().Resources.TotalCPUTime.benchmark.first); CPPUNIT_ASSERT_EQUAL(2800., OUTJOBS.front().Resources.TotalCPUTime.benchmark.second); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.TotalWallTime.range.min); CPPUNIT_ASSERT_EQUAL(3000, OUTJOBS.front().Resources.TotalWallTime.range.max); CPPUNIT_ASSERT_EQUAL((std::string)"clock rate", OUTJOBS.front().Resources.TotalWallTime.benchmark.first); CPPUNIT_ASSERT_EQUAL(2800., OUTJOBS.front().Resources.TotalWallTime.benchmark.second); OUTJOBS.clear(); xrsl = "&(executable=/bin/echo)(gridtime=600s)(cputime=5s)"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, OUTJOBS)); xrsl = "&(executable=/bin/echo)(gridtime=600s)(walltime=42s)"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, OUTJOBS)); } void XRSLParserTest::TestAccessControl() { xrsl = "&(executable=/bin/echo)" "(acl='" "" "" "" "" "" "" "" "" "" "" "')"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(OUTJOBS.front().Application.AccessControl); CPPUNIT_ASSERT_EQUAL((std::string)"gacl", OUTJOBS.front().Application.AccessControl.Name()); CPPUNIT_ASSERT_EQUAL(1, OUTJOBS.front().Application.AccessControl.Size()); CPPUNIT_ASSERT_EQUAL((std::string)"entry", OUTJOBS.front().Application.AccessControl.Child().Name()); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(OUTJOBS.front().Application.AccessControl); CPPUNIT_ASSERT_EQUAL((std::string)"gacl", OUTJOBS.front().Application.AccessControl.Name()); CPPUNIT_ASSERT_EQUAL(1, OUTJOBS.front().Application.AccessControl.Size()); CPPUNIT_ASSERT_EQUAL((std::string)"entry", OUTJOBS.front().Application.AccessControl.Child().Name()); } void XRSLParserTest::TestParallelAttributes() { xrsl = "&(executable = \"/bin/echo\")" "(count = 8)" "(countpernode = 4)" "(exclusiveexecution = \"yes\")"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(8, OUTJOBS.front().Resources.SlotRequirement.NumberOfSlots); CPPUNIT_ASSERT_EQUAL(4, OUTJOBS.front().Resources.SlotRequirement.SlotsPerHost); CPPUNIT_ASSERT_EQUAL(Arc::SlotRequirementType::EE_TRUE, OUTJOBS.front().Resources.SlotRequirement.ExclusiveExecution); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(8, OUTJOBS.front().Resources.SlotRequirement.NumberOfSlots); CPPUNIT_ASSERT_EQUAL(4, OUTJOBS.front().Resources.SlotRequirement.SlotsPerHost); CPPUNIT_ASSERT_EQUAL(Arc::SlotRequirementType::EE_TRUE, OUTJOBS.front().Resources.SlotRequirement.ExclusiveExecution); OUTJOBS.clear(); xrsl = "&(executable = \"/bin/echo\")" "(count = 8)" "(exclusiveexecution = \"no\")"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(8, OUTJOBS.front().Resources.SlotRequirement.NumberOfSlots); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.SlotRequirement.SlotsPerHost); CPPUNIT_ASSERT_EQUAL(Arc::SlotRequirementType::EE_FALSE, OUTJOBS.front().Resources.SlotRequirement.ExclusiveExecution); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(8, OUTJOBS.front().Resources.SlotRequirement.NumberOfSlots); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.SlotRequirement.SlotsPerHost); CPPUNIT_ASSERT_EQUAL(Arc::SlotRequirementType::EE_FALSE, OUTJOBS.front().Resources.SlotRequirement.ExclusiveExecution); OUTJOBS.clear(); xrsl = "&(executable = \"/bin/echo\")" "(count = 8)"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(8, OUTJOBS.front().Resources.SlotRequirement.NumberOfSlots); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.SlotRequirement.SlotsPerHost); CPPUNIT_ASSERT_EQUAL(Arc::SlotRequirementType::EE_DEFAULT, OUTJOBS.front().Resources.SlotRequirement.ExclusiveExecution); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "nordugrid:xrsl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(8, OUTJOBS.front().Resources.SlotRequirement.NumberOfSlots); CPPUNIT_ASSERT_EQUAL(-1, OUTJOBS.front().Resources.SlotRequirement.SlotsPerHost); CPPUNIT_ASSERT_EQUAL(Arc::SlotRequirementType::EE_DEFAULT, OUTJOBS.front().Resources.SlotRequirement.ExclusiveExecution); OUTJOBS.clear(); // Test order of attributes xrsl = "&(executable = \"/bin/echo\")" "(countpernode = 4)" "(count = 8)"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(8, OUTJOBS.front().Resources.SlotRequirement.NumberOfSlots); CPPUNIT_ASSERT_EQUAL(4, OUTJOBS.front().Resources.SlotRequirement.SlotsPerHost); OUTJOBS.clear(); // Test failure cases xrsl = "&(executable = \"/bin/echo\")" "(count = \"eight\")"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, OUTJOBS)); OUTJOBS.clear(); xrsl = "&(executable = \"/bin/echo\")" "(countpernode = \"four\")"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, OUTJOBS)); OUTJOBS.clear(); xrsl = "&(executable = \"/bin/echo\")" "(exclusiveexecution = \"yes-thank-you\")"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(xrsl, OUTJOBS)); OUTJOBS.clear(); INJOB.Application.Executable.Path = "/bin/echo"; INJOB.Resources.SlotRequirement.SlotsPerHost = 6; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:xrsl")); } void XRSLParserTest::TestAdditionalAttributes() { std::string tmpjobdesc; INJOB.OtherAttributes["nordugrid:xrsl;hostname"] = "localhost"; INJOB.OtherAttributes["nordugrid:xrsl;unknownattribute"] = "none"; INJOB.OtherAttributes["bogus:nonexisting;foo"] = "bar"; CPPUNIT_ASSERT(PARSER.UnParse(INJOB, tmpjobdesc, "nordugrid:xrsl")); CPPUNIT_ASSERT(PARSER.Parse(tmpjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); std::map::const_iterator itAttribute; itAttribute = OUTJOBS.front().OtherAttributes.find("nordugrid:xrsl;hostname"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() != itAttribute); CPPUNIT_ASSERT_EQUAL((std::string)"localhost", itAttribute->second); itAttribute = OUTJOBS.front().OtherAttributes.find("nordugrid:xrsl;unknownattribute"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() == itAttribute); itAttribute = OUTJOBS.front().OtherAttributes.find("bogus:nonexisting;foo"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() == itAttribute); } void XRSLParserTest::TestMultiRSL() { xrsl = "+(&(executable= \"/bin/exe1\"))(&(executable= \"/bin/exe2\"))"; CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(2, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe1", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe2", OUTJOBS.back().Application.Executable.Path); } void XRSLParserTest::TestDisjunctRSL() { xrsl = "&(executable=\"/bin/exe\")" "(|(|(queue=\"q1.1\")" "(|(queue=\"q1.2.1\")" "(queue=\"q1.2.2\")" ")" ")" "(queue=\"q2\")" "(queue=\"q3\")" ")" "(arguments=\"Hello world!\")"; CPPUNIT_ASSERT(PARSER.Parse(xrsl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(4, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().Application.Executable.Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Hello world!", OUTJOBS.front().Application.Executable.Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"q1.1", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT(OUTJOBS.front().UseAlternative()); CPPUNIT_ASSERT_EQUAL(4, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().Application.Executable.Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Hello world!", OUTJOBS.front().Application.Executable.Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"q1.2.1", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT(OUTJOBS.front().UseAlternative()); CPPUNIT_ASSERT_EQUAL(4, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().Application.Executable.Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Hello world!", OUTJOBS.front().Application.Executable.Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"q1.2.2", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT(OUTJOBS.front().UseAlternative()); CPPUNIT_ASSERT_EQUAL(4, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().Application.Executable.Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Hello world!", OUTJOBS.front().Application.Executable.Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"q2", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT(OUTJOBS.front().UseAlternative()); CPPUNIT_ASSERT_EQUAL(4, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().Application.Executable.Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Hello world!", OUTJOBS.front().Application.Executable.Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"q3", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT(!OUTJOBS.front().UseAlternative()); OUTJOBS.front().UseOriginal(); CPPUNIT_ASSERT_EQUAL(4, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT_EQUAL((std::string)"/bin/exe", OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().Application.Executable.Argument.size()); CPPUNIT_ASSERT_EQUAL((std::string)"Hello world!", OUTJOBS.front().Application.Executable.Argument.front()); CPPUNIT_ASSERT_EQUAL((std::string)"q1.1", OUTJOBS.front().Resources.QueueName); } CPPUNIT_TEST_SUITE_REGISTRATION(XRSLParserTest); nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/PaxHeaders.7502/JDLParserTest.cpp0000644000000000000000000000012412675602216027465 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.257698 30 ctime=1513200660.425752693 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/JDLParserTest.cpp0000644000175000002070000002443112675602216027536 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include std::ostream& operator<<(std::ostream& os, const std::list& strings); #include #include #include #include "../JDLParser.h" class JDLParserTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(JDLParserTest); CPPUNIT_TEST(TestExecutable); CPPUNIT_TEST(TestInputOutputError); CPPUNIT_TEST(TestInputFileClientStageable); CPPUNIT_TEST(TestInputFileServiceStageable); CPPUNIT_TEST(TestOutputFileClientStageable); CPPUNIT_TEST(TestOutputFileServiceStageable); CPPUNIT_TEST(TestQueue); CPPUNIT_TEST(TestAdditionalAttributes); CPPUNIT_TEST_SUITE_END(); public: JDLParserTest():PARSER((Arc::PluginArgument*)NULL) {} void setUp(); void tearDown(); void TestExecutable(); void TestInputOutputError(); void TestInputFileClientStageable(); void TestInputFileServiceStageable(); void TestOutputFileClientStageable(); void TestOutputFileServiceStageable(); void TestQueue(); void TestAdditionalAttributes(); private: Arc::JobDescription INJOB; std::list OUTJOBS; Arc::JDLParser PARSER; std::string MESSAGE; }; std::ostream& operator<<(std::ostream& os, const std::list& strings) { for (std::list::const_iterator it = strings.begin(); it != strings.end(); it++) { if (it != strings.begin()) { os << ", "; } os << "\"" << *it << "\""; } return os; } void JDLParserTest::setUp() { MESSAGE = " "; INJOB.Application.Executable.Path = "executable"; INJOB.Application.Executable.Argument.push_back("arg1"); INJOB.Application.Executable.Argument.push_back("arg2"); INJOB.Application.Executable.Argument.push_back("arg3"); } void JDLParserTest::tearDown() { } void JDLParserTest::TestExecutable() { std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "egee:jdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Executable.Path, OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Executable.Argument, OUTJOBS.front().Application.Executable.Argument); } void JDLParserTest::TestInputOutputError() { INJOB.Application.Input = "input-file"; INJOB.Application.Output = "output-file"; INJOB.Application.Error = "error-file"; std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "egee:jdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Input, OUTJOBS.front().Application.Input); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Output, OUTJOBS.front().Application.Output); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Error, OUTJOBS.front().Application.Error); } /** Client stageable input file */ void JDLParserTest::TestInputFileClientStageable() { INJOB.DataStaging.InputFiles.clear(); MESSAGE = "Error parsing TestInputFileClientStageable data staging type."; Arc::InputFileType file; file.Name = "TestInputFileClientStageable"; file.Sources.push_back(Arc::URL(file.Name)); INJOB.DataStaging.InputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "egee:jdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 2, (int)OUTJOBS.front().DataStaging.InputFiles.size()); std::list::const_iterator it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Sources.back(), it->Sources.front()); it++; CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"executable", it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, Arc::SourceType("executable"), it->Sources.front()); } /** Service stageable input file */ void JDLParserTest::TestInputFileServiceStageable() { INJOB.DataStaging.InputFiles.clear(); MESSAGE = "Error parsing TestInputFileServiceStageable data staging type."; Arc::InputFileType file; file.Name = "TestInputFileServiceStageable"; file.Sources.push_back(Arc::URL("http://example.com/" + file.Name)); file.IsExecutable = false; INJOB.DataStaging.InputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "egee:jdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 2, (int)OUTJOBS.front().DataStaging.InputFiles.size()); std::list::const_iterator it = OUTJOBS.front().DataStaging.InputFiles.begin(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.IsExecutable, it->IsExecutable); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Sources.back(), it->Sources.front()); it++; CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"executable", it->Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)it->Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, Arc::SourceType("executable"), it->Sources.front()); } /** Client stageable output file */ void JDLParserTest::TestOutputFileClientStageable() { INJOB.DataStaging.InputFiles.clear(); INJOB.DataStaging.OutputFiles.clear(); MESSAGE = "Error parsing TestOutputFileClientStageable data staging type."; Arc::OutputFileType file; file.Name = "TestOutputFileClientStageable"; INJOB.DataStaging.OutputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "egee:jdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); Arc::InputFileType& ifile = OUTJOBS.front().DataStaging.InputFiles.front(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"executable", ifile.Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)ifile.Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, Arc::SourceType("executable"), ifile.Sources.front()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.OutputFiles.size()); Arc::OutputFileType& ofile = OUTJOBS.front().DataStaging.OutputFiles.front(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, ofile.Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 0, (int)ofile.Targets.size()); } /** Service stageable output file */ void JDLParserTest::TestOutputFileServiceStageable() { INJOB.DataStaging.InputFiles.clear(); INJOB.DataStaging.OutputFiles.clear(); MESSAGE = "Error parsing TestOutputFileServiceStageable data staging type."; Arc::OutputFileType file; file.Name = "TestOutputFileServiceStageable"; file.Targets.push_back(Arc::URL("http://example.com/" + file.Name)); INJOB.DataStaging.OutputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "egee:jdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); Arc::InputFileType& ifile = OUTJOBS.front().DataStaging.InputFiles.front(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, (std::string)"executable", ifile.Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)ifile.Sources.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, Arc::SourceType("executable"), ifile.Sources.front()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.front().DataStaging.OutputFiles.size()); Arc::OutputFileType& ofile = OUTJOBS.front().DataStaging.OutputFiles.front(); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Name, ofile.Name); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)ofile.Targets.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, file.Targets.back(), ofile.Targets.front()); } void JDLParserTest::TestQueue() { std::string jdl = "[" "Executable = \"executable\";" "QueueName = \"q1\";" "]"; CPPUNIT_ASSERT(PARSER.Parse(jdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"q1", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), jdl, "egee:jdl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(jdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"q1", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); } void JDLParserTest::TestAdditionalAttributes() { std::string tmpjobdesc; INJOB.OtherAttributes["egee:jdl;batchsystem"] = "test"; INJOB.OtherAttributes["egee:jdl;unknownattribute"] = "none"; INJOB.OtherAttributes["bogus:nonexisting;foo"] = "bar"; CPPUNIT_ASSERT(PARSER.UnParse(INJOB, tmpjobdesc, "egee:jdl")); CPPUNIT_ASSERT(PARSER.Parse(tmpjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); std::map::const_iterator itAttribute; itAttribute = OUTJOBS.front().OtherAttributes.find("egee:jdl;batchsystem"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() != itAttribute); CPPUNIT_ASSERT_EQUAL((std::string)"test", itAttribute->second); itAttribute = OUTJOBS.front().OtherAttributes.find("egee:jdl;unknownattribute"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() == itAttribute); itAttribute = OUTJOBS.front().OtherAttributes.find("bogus:nonexisting;foo"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() == itAttribute); } CPPUNIT_TEST_SUITE_REGISTRATION(JDLParserTest); nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/PaxHeaders.7502/ARCJSDLParserTest.cpp0000644000000000000000000000012413124220447030126 xustar000000000000000027 mtime=1498489127.890866 27 atime=1513200574.264698 30 ctime=1513200660.424752681 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/test/ARCJSDLParserTest.cpp0000644000175000002070000010216713124220447030202 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include std::ostream& operator<<(std::ostream& os, const std::list& strings); #include #include #include #include #include "../ARCJSDLParser.h" class ARCJSDLParserTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ARCJSDLParserTest); CPPUNIT_TEST(TestExecutable); CPPUNIT_TEST(TestInputOutputError); CPPUNIT_TEST(TestInputFileClientStageable); CPPUNIT_TEST(TestInputFileServiceStageable); CPPUNIT_TEST(TestOutputFileClientStageable); CPPUNIT_TEST(TestOutputFileServiceStageable); CPPUNIT_TEST(TestURIOptionsInput); CPPUNIT_TEST(TestURIOptionsOutput); CPPUNIT_TEST(TestQueue); CPPUNIT_TEST(TestDryRun); CPPUNIT_TEST(TestAccessControl); CPPUNIT_TEST(TestBasicJSDLCompliance); CPPUNIT_TEST(TestPOSIXCompliance); CPPUNIT_TEST(TestHPCCompliance); CPPUNIT_TEST(TestRangeValueType); CPPUNIT_TEST_SUITE_END(); public: ARCJSDLParserTest():PARSER((Arc::PluginArgument*)NULL) {} void setUp(); void tearDown(); void TestExecutable(); void TestInputOutputError(); void TestInputFileClientStageable(); void TestInputFileServiceStageable(); void TestOutputFileClientStageable(); void TestOutputFileServiceStageable(); void TestURIOptionsInput(); void TestURIOptionsOutput(); void TestQueue(); void TestDryRun(); void TestAccessControl(); void TestBasicJSDLCompliance(); void TestPOSIXCompliance(); void TestHPCCompliance(); void TestRangeValueType(); private: Arc::JobDescription INJOB; std::list OUTJOBS; Arc::ARCJSDLParser PARSER; std::string MESSAGE; }; std::ostream& operator<<(std::ostream& os, const std::list& strings) { for (std::list::const_iterator it = strings.begin(); it != strings.end(); it++) { if (it != strings.begin()) { os << ", "; } os << "\"" << *it << "\""; } return os; } void ARCJSDLParserTest::setUp() { INJOB.Application.Executable.Path = "executable"; INJOB.Application.Executable.Argument.push_back("arg1"); INJOB.Application.Executable.Argument.push_back("arg2"); INJOB.Application.Executable.Argument.push_back("arg3"); } void ARCJSDLParserTest::tearDown() { } void ARCJSDLParserTest::TestExecutable() { MESSAGE = "Error parsing executable related attributes."; std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:jsdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Executable.Path, OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Executable.Argument, OUTJOBS.front().Application.Executable.Argument); } void ARCJSDLParserTest::TestInputOutputError() { MESSAGE = "Error parsing standard input/output/error attributes."; INJOB.Application.Input = "input-file"; INJOB.Application.Output = "output-file"; INJOB.Application.Error = "error-file"; std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:jsdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Input, OUTJOBS.front().Application.Input); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Output, OUTJOBS.front().Application.Output); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.Application.Error, OUTJOBS.front().Application.Error); } /** Client stageable input file */ void ARCJSDLParserTest::TestInputFileClientStageable() { INJOB.DataStaging.InputFiles.clear(); MESSAGE = "Error parsing TestInputFileClientStageable data staging type."; Arc::InputFileType file; file.Name = "TestInputFileClientStageable"; file.Sources.push_back(Arc::URL(file.Name)); INJOB.DataStaging.InputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:jsdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().DataStaging.InputFiles.size() == 1); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.DataStaging.InputFiles.front().Name, OUTJOBS.front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().DataStaging.InputFiles.front().Sources.size() == 1 && OUTJOBS.front().DataStaging.InputFiles.front().Sources.front()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.DataStaging.InputFiles.front().Sources.front(), OUTJOBS.front().DataStaging.InputFiles.front().Sources.front()); } /** Service stageable input file */ void ARCJSDLParserTest::TestInputFileServiceStageable() { INJOB.DataStaging.InputFiles.clear(); MESSAGE = "Error parsing TestInputFileServiceStageable data staging type."; Arc::InputFileType file; file.Name = "TestInputFileServiceStageable"; file.Sources.push_back(Arc::URL("http://example.com/" + file.Name)); file.IsExecutable = false; INJOB.DataStaging.InputFiles.push_back(file); CPPUNIT_ASSERT(INJOB.DataStaging.InputFiles.size() == 1); CPPUNIT_ASSERT(INJOB.DataStaging.InputFiles.front().Sources.size() == 1 && INJOB.DataStaging.InputFiles.front().Sources.front()); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:jsdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().DataStaging.InputFiles.size() == 1); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.DataStaging.InputFiles.front().Name, OUTJOBS.front().DataStaging.InputFiles.front().Name); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().DataStaging.InputFiles.front().Sources.size() == 1 && OUTJOBS.front().DataStaging.InputFiles.front().Sources.front()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.DataStaging.InputFiles.front().Sources.front(), OUTJOBS.front().DataStaging.InputFiles.front().Sources.front()); } /** Client stageable output file */ void ARCJSDLParserTest::TestOutputFileClientStageable() { INJOB.DataStaging.OutputFiles.clear(); MESSAGE = "Error parsing TestOutputFileClientStageable data staging type."; Arc::OutputFileType file; file.Name = "TestOutputFileClientStageable"; INJOB.DataStaging.OutputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:jsdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().DataStaging.OutputFiles.size() == 1); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.DataStaging.OutputFiles.front().Name, OUTJOBS.front().DataStaging.OutputFiles.front().Name); } /** Service stageable output file */ void ARCJSDLParserTest::TestOutputFileServiceStageable() { INJOB.DataStaging.OutputFiles.clear(); MESSAGE = "Error parsing TestOutputFileServiceStageable data staging type."; Arc::OutputFileType file; file.Name = "TestOutputFileServiceStageable"; file.Targets.push_back(Arc::URL("http://example.com/" + file.Name)); INJOB.DataStaging.OutputFiles.push_back(file); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(INJOB, tempjobdesc, "nordugrid:jsdl")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().DataStaging.OutputFiles.size() == 1); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.DataStaging.OutputFiles.front().Name, OUTJOBS.front().DataStaging.OutputFiles.front().Name); CPPUNIT_ASSERT_MESSAGE(MESSAGE, OUTJOBS.front().DataStaging.OutputFiles.front().Targets.size() == 1 && OUTJOBS.front().DataStaging.OutputFiles.front().Targets.front()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, INJOB.DataStaging.OutputFiles.front().Targets.front(), OUTJOBS.front().DataStaging.OutputFiles.front().Targets.front()); } void ARCJSDLParserTest::TestURIOptionsInput() { std::string jsdl = "" "" "" "" "" "executable" "" "" "" "test.file" "" "gsiftp://example.com/test.file" "threads=5" "" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.InputFiles.size()); const Arc::InputFileType f(OUTJOBS.front().DataStaging.InputFiles.front()); CPPUNIT_ASSERT_EQUAL(std::string("test.file"), f.Name); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp://example.com:2811/test.file"), f.Sources.front().str()); CPPUNIT_ASSERT_EQUAL(std::string("5"), f.Sources.front().Option("threads")); } void ARCJSDLParserTest::TestURIOptionsOutput() { std::string jsdl = "" "" "" "" "" "executable" "" "" "" "test.file" "" "lfc://example.com/test.file" "checksum=md5" "" "gsiftp://example.com/test.file" "threads=5" "" "" "gsiftp://example2.com/test.file" "threads=10" "" "" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.front().DataStaging.OutputFiles.size()); const Arc::OutputFileType f(OUTJOBS.front().DataStaging.OutputFiles.front()); CPPUNIT_ASSERT_EQUAL(std::string("test.file"), f.Name); CPPUNIT_ASSERT_EQUAL(std::string("lfc://example.com:5010/test.file"), f.Targets.front().str()); CPPUNIT_ASSERT_EQUAL(std::string("md5"), f.Targets.front().Option("checksum")); const std::list locations(f.Targets.front().Locations()); CPPUNIT_ASSERT_EQUAL(2, (int)locations.size()); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp://example.com:2811/test.file"), locations.front().str()); CPPUNIT_ASSERT_EQUAL(std::string("5"), locations.front().Option("threads")); CPPUNIT_ASSERT_EQUAL(std::string("gsiftp://example2.com:2811/test.file"), locations.back().str()); CPPUNIT_ASSERT_EQUAL(std::string("10"), locations.back().Option("threads")); } void ARCJSDLParserTest::TestQueue() { std::string jsdl = "" "" "" "" "" "executable" "" "" "" "q1" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"q1", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), jsdl, "nordugrid:jsdl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL((std::string)"q1", OUTJOBS.front().Resources.QueueName); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); jsdl = "" "" "" "" "" "executable" "" "" "" "q1" "" "" ""; OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); std::map::const_iterator itAttribute; itAttribute = OUTJOBS.front().OtherAttributes.find("nordugrid:broker;reject_queue"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() != itAttribute); CPPUNIT_ASSERT_EQUAL((std::string)"q1", itAttribute->second); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), jsdl, "nordugrid:jsdl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); itAttribute = OUTJOBS.front().OtherAttributes.find("nordugrid:broker;reject_queue"); CPPUNIT_ASSERT(OUTJOBS.front().OtherAttributes.end() == itAttribute); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); } void ARCJSDLParserTest::TestDryRun() { std::string jsdl = "" "" "" "" "" "executable" "" "yes" "" "" ""; CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(OUTJOBS.front().Application.DryRun); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), jsdl, "nordugrid:jsdl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(OUTJOBS.front().Application.DryRun); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); jsdl = "" "" "" "" "" "executable" "" "no" "" "" ""; OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(!OUTJOBS.front().Application.DryRun); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); CPPUNIT_ASSERT(PARSER.UnParse(OUTJOBS.front(), jsdl, "nordugrid:jsdl")); OUTJOBS.clear(); CPPUNIT_ASSERT(PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(!OUTJOBS.front().Application.DryRun); CPPUNIT_ASSERT_EQUAL(0, (int)OUTJOBS.front().GetAlternatives().size()); } void ARCJSDLParserTest::TestAccessControl() { std::string jsdl = "" "" "" "" "" "executable" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ""; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(jsdl, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(OUTJOBS.front().Application.AccessControl); CPPUNIT_ASSERT_EQUAL((std::string)"gacl", OUTJOBS.front().Application.AccessControl.Name()); CPPUNIT_ASSERT_EQUAL(1, OUTJOBS.front().Application.AccessControl.Size()); CPPUNIT_ASSERT_EQUAL((std::string)"entry", OUTJOBS.front().Application.AccessControl.Child().Name()); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "nordugrid:jsdl")); OUTJOBS.clear(); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(tempjobdesc, OUTJOBS)); CPPUNIT_ASSERT_EQUAL(1, (int)OUTJOBS.size()); CPPUNIT_ASSERT(OUTJOBS.front().Application.AccessControl); CPPUNIT_ASSERT_EQUAL((std::string)"gacl", OUTJOBS.front().Application.AccessControl.Name()); CPPUNIT_ASSERT_EQUAL(1, OUTJOBS.front().Application.AccessControl.Size()); CPPUNIT_ASSERT_EQUAL((std::string)"entry", OUTJOBS.front().Application.AccessControl.Child().Name()); } void ARCJSDLParserTest::TestBasicJSDLCompliance() { /** Testing compliance with GFD 56 **/ MESSAGE = "Error: The parser does not comply with the JDSL specification."; const std::string jsdlStr = "" "" "" "" "" "executable" "" "" "" "" "" "128974848" // 1024*1024*123 "" "" "" "" ""; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(jsdlStr, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 123, OUTJOBS.front().Resources.DiskSpaceRequirement.DiskSpace.min); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "nordugrid:jsdl")); Arc::XMLNode xmlArcJSDL(tempjobdesc); } void ARCJSDLParserTest::TestPOSIXCompliance() { /** Testing compliance with GFD 56 - the POSIX extension **/ MESSAGE = "Error: The parser does not comply with the JDSL POSIX specification."; const std::string posixJSDLStr = "" "" "" "" "" "executable" "arg1" "arg2" "arg3" "input" "output" "error" "value1" "value2" "value3" "50" "104857600" "110" "2" "524288000" "7" "" "" "" ""; INJOB.Application.Input = "input"; INJOB.Application.Output = "output"; INJOB.Application.Error = "error"; INJOB.Application.Environment.push_back(std::make_pair("var1", "value1")); INJOB.Application.Environment.push_back(std::make_pair("var2", "value2")); INJOB.Application.Environment.push_back(std::make_pair("var3", "value3")); INJOB.Resources.TotalWallTime = 50; INJOB.Resources.IndividualPhysicalMemory = 100; INJOB.Resources.TotalCPUTime = 110; INJOB.Resources.SlotRequirement.NumberOfSlots = 2; INJOB.Resources.IndividualVirtualMemory = 500; INJOB.Resources.ParallelEnvironment.ThreadsPerProcess = 7; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(posixJSDLStr, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Application.Executable.Path, OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_MESSAGE("POSIX compliance failure", INJOB.Application.Executable.Argument == OUTJOBS.front().Application.Executable.Argument); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Application.Input, OUTJOBS.front().Application.Input); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Application.Output, OUTJOBS.front().Application.Output); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Application.Error, OUTJOBS.front().Application.Error); CPPUNIT_ASSERT_MESSAGE("POSIX compliance failure", INJOB.Application.Environment == OUTJOBS.front().Application.Environment); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.TotalWallTime.range.max, OUTJOBS.front().Resources.TotalWallTime.range.max); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.IndividualPhysicalMemory.max, OUTJOBS.front().Resources.IndividualPhysicalMemory.max); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.TotalCPUTime.range.max, OUTJOBS.front().Resources.TotalCPUTime.range.max); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.SlotRequirement.NumberOfSlots, OUTJOBS.front().Resources.SlotRequirement.NumberOfSlots); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.IndividualVirtualMemory.max, OUTJOBS.front().Resources.IndividualVirtualMemory.max); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.ParallelEnvironment.ThreadsPerProcess, OUTJOBS.front().Resources.ParallelEnvironment.ThreadsPerProcess); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "nordugrid:jsdl")); Arc::XMLNode xmlArcJSDL(tempjobdesc); Arc::XMLNode pApp = xmlArcJSDL["JobDescription"]["Application"]["POSIXApplication"]; CPPUNIT_ASSERT_MESSAGE("POSIX compliance failure", pApp); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Application.Executable.Path, (std::string)pApp["Executable"]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"arg1", (std::string)pApp["Argument"][0]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"arg2", (std::string)pApp["Argument"][1]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"arg3", (std::string)pApp["Argument"][2]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Application.Input, (std::string)pApp["Input"]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Application.Output, (std::string)pApp["Output"]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Application.Error, (std::string)pApp["Error"]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"var1", (std::string)pApp["Environment"][0].Attribute("name")); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"value1", (std::string)pApp["Environment"][0]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"var2", (std::string)pApp["Environment"][1].Attribute("name")); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"value2", (std::string)pApp["Environment"][1]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"var3", (std::string)pApp["Environment"][2].Attribute("name")); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", (std::string)"value3", (std::string)pApp["Environment"][2]); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.TotalWallTime.range.max, Arc::stringto(pApp["WallTimeLimit"])); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.IndividualPhysicalMemory.max, (int)(Arc::stringto(pApp["MemoryLimit"])/(1024*1024))); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.TotalCPUTime.range.max, Arc::stringto(pApp["CPUTimeLimit"])); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.SlotRequirement.NumberOfSlots, Arc::stringto(pApp["ProcessCountLimit"])); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.IndividualVirtualMemory.max, Arc::stringto(pApp["VirtualMemoryLimit"])/(1024*1024)); CPPUNIT_ASSERT_EQUAL_MESSAGE("POSIX compliance failure", INJOB.Resources.ParallelEnvironment.ThreadsPerProcess, Arc::stringto(pApp["ThreadCountLimit"])); } void ARCJSDLParserTest::TestHPCCompliance() { /** Testing compliance with GFD 111 **/ MESSAGE = "Error: The parser does not comply with the JSDL HPC Profile Application Extension specification."; const std::string hpcJSDLStr = "" "" "" "" "" "executable" "arg1" "arg2" "arg3" "input" "output" "error" "value1" "value2" "value3" "50" "100" "110" "2" "500" "7" "" "" "" ""; INJOB.Application.Input = "input"; INJOB.Application.Output = "output"; INJOB.Application.Error = "error"; INJOB.Application.Environment.push_back(std::make_pair("var1", "value1")); INJOB.Application.Environment.push_back(std::make_pair("var2", "value2")); INJOB.Application.Environment.push_back(std::make_pair("var3", "value3")); CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(hpcJSDLStr, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", INJOB.Application.Executable.Path, OUTJOBS.front().Application.Executable.Path); CPPUNIT_ASSERT_MESSAGE("HPC compliance failure", INJOB.Application.Executable.Argument == OUTJOBS.front().Application.Executable.Argument); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", INJOB.Application.Input, OUTJOBS.front().Application.Input); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", INJOB.Application.Output, OUTJOBS.front().Application.Output); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", INJOB.Application.Error, OUTJOBS.front().Application.Error); CPPUNIT_ASSERT_MESSAGE("HPC compliance failure", INJOB.Application.Environment == OUTJOBS.front().Application.Environment); std::string tempjobdesc; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.UnParse(OUTJOBS.front(), tempjobdesc, "nordugrid:jsdl")); Arc::XMLNode xmlArcJSDL(tempjobdesc); Arc::XMLNode pApp = xmlArcJSDL["JobDescription"]["Application"]["HPCProfileApplication"]; CPPUNIT_ASSERT_MESSAGE("HPC compliance failure", pApp); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", INJOB.Application.Executable.Path, (std::string)pApp["Executable"]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"arg1", (std::string)pApp["Argument"][0]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"arg2", (std::string)pApp["Argument"][1]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"arg3", (std::string)pApp["Argument"][2]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", INJOB.Application.Input, (std::string)pApp["Input"]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", INJOB.Application.Output, (std::string)pApp["Output"]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", INJOB.Application.Error, (std::string)pApp["Error"]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"var1", (std::string)pApp["Environment"][0].Attribute("name")); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"value1", (std::string)pApp["Environment"][0]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"var2", (std::string)pApp["Environment"][1].Attribute("name")); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"value2", (std::string)pApp["Environment"][1]); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"var3", (std::string)pApp["Environment"][2].Attribute("name")); CPPUNIT_ASSERT_EQUAL_MESSAGE("HPC compliance failure", (std::string)"value3", (std::string)pApp["Environment"][2]); } void ARCJSDLParserTest::TestRangeValueType() { /** Testing compliance with the RangeValue_Type **/ MESSAGE = "Error: The parser does not comply with the JSDL RangeValue_Type type."; const std::string beforeElement = "" "" "executable" ""; const std::string afterElement =""; std::string element; element = "3600.3"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 3600, OUTJOBS.front().Resources.IndividualCPUTime.range.max); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, -1, OUTJOBS.front().Resources.IndividualCPUTime.range.min); OUTJOBS.clear(); element = "134.5"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, -1, OUTJOBS.front().Resources.IndividualCPUTime.range.max); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 134, OUTJOBS.front().Resources.IndividualCPUTime.range.min); OUTJOBS.clear(); element = "234.5"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 234, OUTJOBS.front().Resources.IndividualCPUTime.range.max); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, -1, OUTJOBS.front().Resources.IndividualCPUTime.range.min); OUTJOBS.clear(); element = "234.5123.4"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 234, OUTJOBS.front().Resources.IndividualCPUTime.range.max); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 123, OUTJOBS.front().Resources.IndividualCPUTime.range.min); OUTJOBS.clear(); element = "234.5123.4"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 1, (int)OUTJOBS.size()); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 234, OUTJOBS.front().Resources.IndividualCPUTime.range.max); CPPUNIT_ASSERT_EQUAL_MESSAGE(MESSAGE, 123, OUTJOBS.front().Resources.IndividualCPUTime.range.min); OUTJOBS.clear(); element = "123.4234.5"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); OUTJOBS.clear(); element = "234.5123.4"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); OUTJOBS.clear(); element = "234.5123.4"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); OUTJOBS.clear(); element = "234.5"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); OUTJOBS.clear(); element = "234.5"; CPPUNIT_ASSERT_MESSAGE(MESSAGE, !PARSER.Parse(beforeElement + element + afterElement, OUTJOBS)); } CPPUNIT_TEST_SUITE_REGISTRATION(ARCJSDLParserTest); nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/DescriptorsJobDescriptionParser0000644000000000000000000000012412675602216031614 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.296699 30 ctime=1513200660.400752387 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/DescriptorsJobDescriptionParser.cpp0000644000175000002070000000140612675602216032443 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "ARCJSDLParser.h" #include "JDLParser.h" #include "XRSLParser.h" #include "ADLParser.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "ARCJSDLParser", "HED:JobDescriptionParserPlugin", "NorduGrid ARC JSDL, POSIX-JSDL, HPCP-JSDL (nordugrid:jsdl)", 0, &Arc::ARCJSDLParser::Instance }, { "JDLParser", "HED:JobDescriptionParserPlugin", "CREAM JDL (egee:jdl)", 0, &Arc::JDLParser::Instance }, { "XRSLParser", "HED:JobDescriptionParserPlugin", "NorduGrid xRSL (nordugrid:xrsl)", 0, &Arc::XRSLParser::Instance }, { "EMIESADLParser", "HED:JobDescriptionParserPlugin", "EMI-ES ADL (emies:adl)", 0, &Arc::ADLParser::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/XMLNodeRecover.h0000644000000000000000000000012412675602216026321 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.276699 30 ctime=1513200660.399752375 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/XMLNodeRecover.h0000644000175000002070000000146712675602216026376 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_XMLNODERECOVER_H__ #define __ARC_XMLNODERECOVER_H__ #include namespace Arc { class XMLNodeRecover : public XMLNode { public: XMLNodeRecover(const std::string& xml); ~XMLNodeRecover(); bool HasErrors() const { return !errors.empty(); }; const std::list& GetErrors() const { return errors; }; void print_error(const xmlError& error); static void structured_error_handler(void *userData, xmlErrorPtr error); private: XMLNodeRecover(void) {}; XMLNodeRecover(const XMLNode&) {} XMLNodeRecover(const char *xml, int len = -1) {} XMLNodeRecover(long) {} XMLNodeRecover(const NS&, const char *) {} std::list errors; }; } // namespace Arc #endif // __ARC_XMLNODERECOVER_H__ nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/JDLParser.cpp0000644000000000000000000000012412733561034025643 xustar000000000000000027 mtime=1466884636.002405 27 atime=1513200574.301699 30 ctime=1513200660.392752289 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/JDLParser.cpp0000644000175000002070000007734012733561034025723 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "JDLParser.h" #define ADDJDLSTRING(X, Y) (!(X).empty() ? " " Y " = \"" + X + "\";\n" : "") #define ADDJDLNUMBER(X, Y) ((X) > -1 ? " " Y " = \"" + tostring(X) + "\";\n" : "") namespace Arc { /// \mapname JDL CREAM JDL JDLParser::JDLParser(PluginArgument* parg) : JobDescriptionParserPlugin(parg) { supportedLanguages.push_back("egee:jdl"); } JDLParser::~JDLParser() {} Plugin* JDLParser::Instance(PluginArgument *arg) { return new JDLParser(arg); } bool JDLParser::splitJDL(const std::string& original_string, std::list& lines) const { // Clear the return variable lines.clear(); std::string jdl_text = original_string; bool quotation = false; std::list stack; std::string actual_line; if(!jdl_text.empty()) for (int i = 0; i < (int)jdl_text.size() - 1; i++) { // Looking for control character marks the line end if (jdl_text[i] == ';' && !quotation && stack.empty()) { lines.push_back(actual_line); actual_line.clear(); continue; } else if (jdl_text[i] == ';' && !quotation && stack.back() == '{') { logger.msg(ERROR, "[JDLParser] Semicolon (;) is not allowed inside brackets, at '%s;'.", actual_line); return false; } // Determinize the quotations if (jdl_text[i] == '"') { if (!quotation) { quotation = true; } else if ((i > 0) && (jdl_text[i - 1] != '\\')) { quotation = false; } } else if (!quotation) { if (jdl_text[i] == '{' || jdl_text[i] == '[') stack.push_back(jdl_text[i]); else if (jdl_text[i] == '}') { if (stack.back() == '{') stack.pop_back(); else return false; } else if (jdl_text[i] == ']') { if (stack.back() == '[') stack.pop_back(); else return false; } } actual_line += jdl_text[i]; } return true; } std::string JDLParser::simpleJDLvalue(const std::string& attributeValue) { const std::string whitespaces(" \t\f\v\n\r"); const size_t last_pos = attributeValue.find_last_of("\""); // If the text is not between quotation marks, then return with the original form if ((last_pos == std::string::npos) || (attributeValue.substr(attributeValue.find_first_not_of(whitespaces), 1) != "\"")) { return trim(attributeValue); } else { // Else remove the marks and return with the quotation's content const size_t first_pos = attributeValue.find_first_of("\""); if(last_pos == first_pos) return trim(attributeValue); return attributeValue.substr(first_pos + 1, last_pos - first_pos - 1); } } std::list JDLParser::listJDLvalue(const std::string& attributeValue, std::pair brackets, char lineEnd) { std::list elements; unsigned long first_bracket = attributeValue.find_first_of(brackets.first); if (first_bracket == std::string::npos) { elements.push_back(simpleJDLvalue(attributeValue)); return elements; } unsigned long last_bracket = attributeValue.find_last_of(brackets.second); if (last_bracket == std::string::npos) { elements.push_back(simpleJDLvalue(attributeValue)); return elements; } std::list listElements; if (first_bracket != last_bracket) { tokenize(attributeValue.substr(first_bracket + 1, last_bracket - first_bracket - 1), listElements, tostring(lineEnd)); } for (std::list::const_iterator it = listElements.begin(); it != listElements.end(); it++) { elements.push_back(simpleJDLvalue(*it)); } return elements; } bool JDLParser::handleJDLattribute(const std::string& attributeName_, const std::string& attributeValue, JobDescription& job) const { // To do the attributes name case-insensitive do them lowercase and remove the quotation marks std::string attributeName = lower(attributeName_); if (attributeName == "type") { std::string value = lower(simpleJDLvalue(attributeValue)); if (value == "job") return true; if (value == "dag") { logger.msg(VERBOSE, "[JDLParser] This kind of JDL descriptor is not supported yet: %s", value); return false; // This kind of JDL decriptor is not supported yet } if (value == "collection") { logger.msg(VERBOSE, "[JDLParser] This kind of JDL descriptor is not supported yet: %s", value); return false; // This kind of JDL decriptor is not supported yet } logger.msg(VERBOSE, "[JDLParser] Attribute named %s has unknown value: %s", attributeName, value); return false; // Unknown attribute value - error } else if (attributeName == "jobtype") return true; // Skip this attribute /// \mapattr executable -> ExecutableType::Path else if (attributeName == "executable") { job.Application.Executable.Path = simpleJDLvalue(attributeValue); return true; } /// \mapattr arguments -> ExecutableType::Argument else if (attributeName == "arguments") { tokenize(simpleJDLvalue(attributeValue), job.Application.Executable.Argument); return true; } /// \mapattr stdinput -> Input else if (attributeName == "stdinput") { job.Application.Input = simpleJDLvalue(attributeValue); return true; } /// \mapattr stdoutput -> Output else if (attributeName == "stdoutput") { job.Application.Output = simpleJDLvalue(attributeValue); return true; } /// \mapattr stderror -> Error else if (attributeName == "stderror") { job.Application.Error = simpleJDLvalue(attributeValue); return true; } else if (attributeName == "inputsandbox") { // Wait with handling this attribute till everything have been parsed job.OtherAttributes["egee:jdl;inputsandbox"] = attributeValue; return true; } else if (attributeName == "inputsandboxbaseuri") { // Wait with handling this attribute till everything have been parsed job.OtherAttributes["egee:jdl;inputsandboxbaseuri"] = attributeValue; return true; } else if (attributeName == "outputsandbox") { std::list outputfiles = listJDLvalue(attributeValue); for (std::list::const_iterator it = outputfiles.begin(); it != outputfiles.end(); it++) { OutputFileType file; file.Name = *it; file.Targets.push_back(URL(*it)); if (!file.Targets.back()) { return false; } // Initializing these variables job.DataStaging.OutputFiles.push_back(file); } return true; } else if (attributeName == "outputsandboxdesturi") { std::list value = listJDLvalue(attributeValue); std::list::iterator i = value.begin(); for (std::list::iterator it = job.DataStaging.OutputFiles.begin(); it != job.DataStaging.OutputFiles.end(); it++) { if (it->Targets.empty()) { continue; } if (i != value.end()) { URL url = *i; if (url.Protocol() == "gsiftp" && url.Host() == "localhost") { /* Specifying the local grid ftp server (local to CREAM), * is the "same", in ARC analogy, to specify the output * files being user downloadable files. Upon finished job * execution CREAM will copy outputfiles to the specified * destination, it does not support storing them at the * working directory of the job for later retrieval. Instead * the local grid ftp server to CREAM can be specified. */ it->Targets.clear(); } else { it->Targets.front() = url; } i++; } else { logger.msg(VERBOSE, "Not enough outputsandboxdesturi elements!"); return false; } } return true; } /* * The parsing of the outputsandboxbasedesturi does not work as intended. * Either it should be unsupported (which it is now) or else it should * be implemented correctly. else if (attributeName == "outputsandboxbasedesturi") { for (std::list::iterator it = job.Files.begin(); it != job.Files.end(); it++) if (!it->Target.empty() && !it->Target.front()) { it->Target.front() = simpleJDLvalue(attributeValue); return true; } */ /// \mapattr prologue -> PreExecutable else if (attributeName == "prologue") { if (job.Application.PreExecutable.empty()) { job.Application.PreExecutable.push_back(ExecutableType()); } job.Application.PreExecutable.front().Path = simpleJDLvalue(attributeValue); return true; } /// \mapattr prologuearguments -> PreExecutable else if (attributeName == "prologuearguments") { if (job.Application.PreExecutable.empty()) { job.Application.PreExecutable.push_back(ExecutableType()); } tokenize(simpleJDLvalue(attributeValue), job.Application.PreExecutable.front().Argument); return true; } /// \mapattr epilogue -> PostExecutable else if (attributeName == "epilogue") { if (job.Application.PostExecutable.empty()) { job.Application.PostExecutable.push_back(ExecutableType()); } job.Application.PostExecutable.front().Path = simpleJDLvalue(attributeValue); return true; } /// \mapattr epiloguearguments -> PostExecutable else if (attributeName == "epiloguearguments") { if (job.Application.PostExecutable.empty()) { job.Application.PostExecutable.push_back(ExecutableType()); } tokenize(simpleJDLvalue(attributeValue), job.Application.PostExecutable.front().Argument); return true; } /// TODO /// \mapattr allowzippedisb -> OtherAttributes else if (attributeName == "allowzippedisb") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;AllowZippedISB"] = simpleJDLvalue(attributeValue); return true; } /// TOOD /// \mapattr zippedisb -> OtherAttributes else if (attributeName == "zippedisb") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;ZippedISB"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } /// \mapattr expirytime -> ExpirationTime else if (attributeName == "expirytime") { job.Application.ExpirationTime = Time(stringtol(simpleJDLvalue(attributeValue))); return true; } /// \mapattr environment -> Environment else if (attributeName == "environment") { std::list variables = listJDLvalue(attributeValue); for (std::list::const_iterator it = variables.begin(); it != variables.end(); it++) { std::string::size_type equal_pos = it->find('='); if (equal_pos != std::string::npos) { job.Application.Environment.push_back( std::pair( trim(it->substr(0, equal_pos)), trim(it->substr(equal_pos + 1)))); } else { logger.msg(VERBOSE, "[JDLParser] Environment variable has been defined without any equals sign."); return false; } } return true; } /// TODO /// \mapattr perusalfileenable -> OtherAttributes else if (attributeName == "perusalfileenable") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;PerusalFileEnable"] = simpleJDLvalue(attributeValue); return true; } /// TODO /// \mapattr perusaltimeinterval -> OtherAttributes else if (attributeName == "perusaltimeinterval") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;PerusalTimeInterval"] = simpleJDLvalue(attributeValue); return true; } /// TODO /// \mapattr perusalfilesdesturi -> OtherAttributes else if (attributeName == "perusalfilesdesturi") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;PerusalFilesDestURI"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } else if (attributeName == "inputdata") // Not supported yet // will be soon deprecated return true; else if (attributeName == "outputdata") // Not supported yet // will be soon deprecated return true; else if (attributeName == "storageindex") // Not supported yet // will be soon deprecated return true; else if (attributeName == "datacatalog") // Not supported yet // will be soon deprecated return true; /// TODO /// \mapattr datarequirements -> OtherAttributes else if (attributeName == "datarequirements") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;DataRequirements"] = simpleJDLvalue(attributeValue); return true; } /// TODO /// \mapattr dataaccessprotocol -> OtherAttributes else if (attributeName == "dataaccessprotocol") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;DataAccessProtocol"] = simpleJDLvalue(attributeValue); return true; } /// TODO /// \mapattr virtualorganisation -> OtherAttributes else if (attributeName == "virtualorganisation") { job.OtherAttributes["egee:jdl;VirtualOrganisation"] = simpleJDLvalue(attributeValue); return true; } /// \mapattr queuename -> QueueName else if (attributeName == "queuename") { job.Resources.QueueName = simpleJDLvalue(attributeValue); return true; } /// TODO /// \mapattr batchsystem -> OtherAttributes else if (attributeName == "batchsystem") { job.OtherAttributes["egee:jdl;batchsystem"] = simpleJDLvalue(attributeValue); return true; } /// \mapattr cpunumber -> NumberOfSlots else if (attributeName == "cpunumber") { if (!stringto(simpleJDLvalue(attributeValue), job.Resources.SlotRequirement.NumberOfSlots)) { job.Resources.SlotRequirement.NumberOfSlots = -1; } } /// \mapattr retrycount -> Rerun else if (attributeName == "retrycount") { const int count = stringtoi(simpleJDLvalue(attributeValue)); if (job.Application.Rerun < count) job.Application.Rerun = count; return true; } /// \mapattr shallowretrycount -> Rerun else if (attributeName == "shallowretrycount") { const int count = stringtoi(simpleJDLvalue(attributeValue)); if (job.Application.Rerun < count) job.Application.Rerun = count; return true; } /// TODO /// \mapattr lbaddress -> OtherAttributes else if (attributeName == "lbaddress") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;LBAddress"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } /// \mapattr myproxyserver -> CredentialService else if (attributeName == "myproxyserver") { URL url(simpleJDLvalue(attributeValue)); if (!url) return false; job.Application.CredentialService.push_back(url); return true; } /// TODO /// \mapattr hlrlocation -> OtherAttributes else if (attributeName == "hlrlocation") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;HLRLocation"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } /// TODO /// \mapattr jobprovenance -> OtherAttributes else if (attributeName == "jobprovenance") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;JobProvenance"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } /// TODO /// \mapattr nodenumber -> OtherAttributes else if (attributeName == "nodenumber") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;NodeNumber"] = simpleJDLvalue(attributeValue); return true; } else if (attributeName == "jobsteps") // Not supported yet // will be soon deprecated return true; else if (attributeName == "currentstep") // Not supported yet // will be soon deprecated return true; else if (attributeName == "jobstate") // Not supported yet // will be soon deprecated return true; /// TODO /// \mapattr listenerport -> OtherAttributes else if (attributeName == "listenerport") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;ListenerPort"] = simpleJDLvalue(attributeValue); return true; } /// TODO /// \mapattr listenerport -> OtherAttributes else if (attributeName == "listenerhost") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;ListenerHost"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } /// TODO /// \mapattr listenerpipename -> OtherAttributes else if (attributeName == "listenerpipename") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;ListenerPipeName"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } /// TODO /// \mapattr requirements -> OtherAttributes else if (attributeName == "requirements") { // It's too complicated to determinize the right conditions, because the definition language is // LRMS specific. // Only store it. job.OtherAttributes["egee:jdl;Requirements"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } /// TODO /// \mapattr rank -> OtherAttributes else if (attributeName == "rank") { job.OtherAttributes["egee:jdl;rank"] = simpleJDLvalue(attributeValue); return true; } /// TODO /// \mapattr fuzzyrank -> OtherAttributes else if (attributeName == "fuzzyrank") { job.OtherAttributes["egee:jdl;fuzzyrank"] = simpleJDLvalue(attributeValue); return true; } /// \mapattr usertags -> Annotation else if (attributeName == "usertags") { job.Identification.Annotation = listJDLvalue(attributeValue, std::make_pair('[', ']'), ';'); return true; } /// TODO /// \mapattr outputse -> OtherAttributes else if (attributeName == "outputse") { // Not supported yet, only store it job.OtherAttributes["egee:jdl;OutputSE"] = "\"" + simpleJDLvalue(attributeValue) + "\""; return true; } logger.msg(WARNING, "[JDLParser]: Unknown attribute name: \'%s\', with value: %s", attributeName, attributeValue); return true; } std::string JDLParser::generateOutputList(const std::string& attribute, const std::list& list, std::pair brackets, char lineEnd) const { const std::string space = " "; // 13 spaces seems to be standard padding. std::ostringstream output; output << " " << attribute << " = " << brackets.first << std::endl; for (std::list::const_iterator it = list.begin(); it != list.end(); it++) { if (it != list.begin()) output << lineEnd << std::endl; output << space << "\"" << *it << "\""; } output << std::endl << space << brackets.second << ";" << std::endl; return output.str(); } bool JDLParser::ParseInputSandboxAttribute(JobDescription& j) { std::map::iterator itAtt; itAtt = j.OtherAttributes.find("egee:jdl;inputsandbox"); if (itAtt == j.OtherAttributes.end()) { return true; } std::list inputfiles = listJDLvalue(itAtt->second); bool baseuriExist = false; URL base; itAtt = j.OtherAttributes.find("egee:jdl;inputsandboxbaseuri"); if (itAtt != j.OtherAttributes.end()) { base = simpleJDLvalue(itAtt->second); baseuriExist = true; if (!base) { logger.msg(ERROR, "The inputsandboxbaseuri JDL attribute specifies an invalid URL."); return false; } } for (std::list::const_iterator it = inputfiles.begin(); it != inputfiles.end(); ++it) { InputFileType file; const std::size_t pos = it->find_last_of('/'); file.Name = (pos == std::string::npos ? *it : it->substr(pos+1)); if (baseuriExist && it->find("://") == std::string::npos) { file.Sources.push_back(base); if ((*it)[0] == '/') { file.Sources.back().ChangePath(*it); } else { file.Sources.back().ChangePath(file.Sources.back().Path() + '/' + *it); } } else { file.Sources.push_back(URL(*it)); } if (!file.Sources.back()) { return false; } // Initializing these variables file.IsExecutable = false; j.DataStaging.InputFiles.push_back(file); } return true; } JobDescriptionParserPluginResult JDLParser::Parse(const std::string& source, std::list& jobdescs, const std::string& language, const std::string& dialect) const { JobDescriptionParserPluginResult result(JobDescriptionParserPluginResult::WrongLanguage); if (language != "" && !IsLanguageSupported(language)) { return result; } JobDescription parsed_jobdescription; unsigned long first = source.find_first_of("["); unsigned long last = source.find_last_of("]"); if (first == std::string::npos || last == std::string::npos || first >= last) { return result; } std::string input_text = source.substr(first + 1, last - first - 1); //Remove multiline comments unsigned long comment_start = 0; while ((comment_start = input_text.find("/*", comment_start)) != std::string::npos) { input_text.erase(input_text.begin() + comment_start, input_text.begin() + input_text.find("*/", comment_start) + 2); } std::string wcpy = ""; std::list lines; tokenize(input_text, lines, "\n"); for (std::list::iterator it = lines.begin(); it != lines.end();) { // Remove empty lines const std::string trimmed_line = trim(*it); if (trimmed_line.length() == 0) it = lines.erase(it); // Remove lines starts with '#' - Comments else if (trimmed_line.length() >= 1 && trimmed_line.substr(0, 1) == "#") it = lines.erase(it); // Remove lines starts with '//' - Comments else if (trimmed_line.length() >= 2 && trimmed_line.substr(0, 2) == "//") it = lines.erase(it); else { wcpy += *it + "\n"; it++; } } if (!splitJDL(wcpy, lines)) { logger.msg(VERBOSE, "[JDLParser] Syntax error found during the split function."); return false; } if (lines.size() <= 0) { logger.msg(VERBOSE, "[JDLParser] Lines count is zero or other funny error has occurred."); return false; } for (std::list::iterator it = lines.begin(); it != lines.end(); it++) { const size_t equal_pos = it->find_first_of("="); if (equal_pos == std::string::npos) { logger.msg(VERBOSE, "[JDLParser] JDL syntax error. There is at least one equals sign missing where it would be expected."); return false; } if (!handleJDLattribute(trim(it->substr(0, equal_pos)), trim(it->substr(equal_pos + 1)), parsed_jobdescription)) { return false; } } if (!ParseInputSandboxAttribute(parsed_jobdescription)) { return false; } SourceLanguage(parsed_jobdescription) = (!language.empty() ? language : supportedLanguages.front()); logger.msg(VERBOSE, "String successfully parsed as %s", parsed_jobdescription.GetSourceLanguage()); jobdescs.push_back(parsed_jobdescription); return true; } JobDescriptionParserPluginResult JDLParser::Assemble(const JobDescription& job, std::string& product, const std::string& language, const std::string& dialect) const { if (!IsLanguageSupported(language)) { return false; } if (job.Application.Executable.Path.empty()) { return false; } product = "[\n Type = \"job\";\n"; /// \mapattr executable <- ExecutableType::Path product += ADDJDLSTRING(job.Application.Executable.Path, "Executable"); /// \mapattr arguments <- ExecutableType::Argument if (!job.Application.Executable.Argument.empty()) { product += " Arguments = \""; for (std::list::const_iterator it = job.Application.Executable.Argument.begin(); it != job.Application.Executable.Argument.end(); it++) { if (it != job.Application.Executable.Argument.begin()) product += " "; product += *it; } product += "\";\n"; } /// \mapattr stdinput <- Input product += ADDJDLSTRING(job.Application.Input, "StdInput"); /// \mapattr stdoutput <- Output product += ADDJDLSTRING(job.Application.Output, "StdOutput"); /// \mapattr stderror <- Error product += ADDJDLSTRING(job.Application.Error, "StdError"); /// \mapattr environment <- Environment if (!job.Application.Environment.empty()) { std::list environment; for (std::list< std::pair >::const_iterator it = job.Application.Environment.begin(); it != job.Application.Environment.end(); it++) { environment.push_back(it->first + " = " + it->second); } if (!environment.empty()) product += generateOutputList("Environment", environment); } /// \mapattr prologue <- PreExecutable /// \mapattr prologuearguments <- PreExecutable if (!job.Application.PreExecutable.empty()) { product += ADDJDLSTRING(job.Application.PreExecutable.front().Path, "Prologue"); if (!job.Application.PreExecutable.front().Argument.empty()) { product += " PrologueArguments = \""; for (std::list::const_iterator iter = job.Application.PreExecutable.front().Argument.begin(); iter != job.Application.PreExecutable.front().Argument.end(); ++iter) { if (iter != job.Application.PreExecutable.front().Argument.begin()) product += " "; product += *iter; } product += "\";\n"; } } /// \mapattr epilogue <- PostExecutable /// \mapattr epiloguearguments <- PostExecutable if (!job.Application.PostExecutable.empty()) { product += ADDJDLSTRING(job.Application.PostExecutable.front().Path, "Epilogue"); if (!job.Application.PostExecutable.front().Argument.empty()) { product += " EpilogueArguments = \""; for (std::list::const_iterator iter = job.Application.PostExecutable.front().Argument.begin(); iter != job.Application.PostExecutable.front().Argument.end(); ++iter) { if (iter != job.Application.PostExecutable.front().Argument.begin()) product += " "; product += *iter; } product += "\";\n"; } } if (!job.Application.Executable.Path.empty() || !job.DataStaging.InputFiles.empty() || !job.Application.Input.empty()) { bool addExecutable = !job.Application.Executable.Path.empty() && !Glib::path_is_absolute(job.Application.Executable.Path); bool addInput = !job.Application.Input.empty(); std::list inputSandboxList; for (std::list::const_iterator it = job.DataStaging.InputFiles.begin(); it != job.DataStaging.InputFiles.end(); it++) { /* Since JDL does not have support for multiple locations only the first * location will be added. */ if (!it->Sources.empty()) { inputSandboxList.push_back(it->Sources.front() ? it->Sources.front().fullstr() : it->Name); } addExecutable &= (it->Name != job.Application.Executable.Path); addInput &= (it->Name != job.Application.Input); } if (addExecutable) { inputSandboxList.push_back(job.Application.Executable.Path); } if (addInput) { inputSandboxList.push_back(job.Application.Input); } if (!inputSandboxList.empty()) { product += generateOutputList("InputSandbox", inputSandboxList); } } if (!job.DataStaging.OutputFiles.empty() || !job.Application.Output.empty() || !job.Application.Error.empty()) { bool addOutput = !job.Application.Output.empty(); bool addError = !job.Application.Error.empty(); std::list outputSandboxList; std::list outputSandboxDestURIList; for (std::list::const_iterator it = job.DataStaging.OutputFiles.begin(); it != job.DataStaging.OutputFiles.end(); it++) { outputSandboxList.push_back(it->Name); /* User downloadable files should go to the local grid ftp * server (local to CREAM). See comments on the parsing of the * outputsandboxdesturi attribute above. * Double slash (//) needed after localhost, otherwise job files are not * transfered to OSB dir. */ const std::string uri_tmp = (it->Targets.empty() || it->Targets.front().Protocol() == "file" ? "gsiftp://localhost//" + it->Name : it->Targets.front().fullstr()); outputSandboxDestURIList.push_back(uri_tmp); addOutput &= (it->Name != job.Application.Output); addError &= (it->Name != job.Application.Error); } if (addOutput) { outputSandboxList.push_back(job.Application.Output); outputSandboxDestURIList.push_back("gsiftp://localhost/" + job.Application.Output); } if (addError) { outputSandboxList.push_back(job.Application.Error); outputSandboxDestURIList.push_back("gsiftp://localhost/" + job.Application.Error); } if (!outputSandboxList.empty()) { product += generateOutputList("OutputSandbox", outputSandboxList); } if (!outputSandboxDestURIList.empty()) { product += generateOutputList("OutputSandboxDestURI", outputSandboxDestURIList); } } /// \mapattr queuename <- QueueName if (!job.Resources.QueueName.empty()) { product += " QueueName = \""; product += job.Resources.QueueName; product += "\";\n"; } /// \mapattr cpunumber <- NumberOfSlots product += ADDJDLNUMBER(job.Resources.SlotRequirement.NumberOfSlots, "CPUNumber"); /// \mapattr retrycount <- Rerun product += ADDJDLNUMBER(job.Application.Rerun, "RetryCount"); /// \mapattr shallowretrycount <- Rerun product += ADDJDLNUMBER(job.Application.Rerun, "ShallowRetryCount"); /// \mapattr expirytime <- ExpirationTime product += ADDJDLNUMBER(job.Application.ExpirationTime.GetTime(), "ExpiryTime"); /// \mapattr myproxyserver <- CredentialService if (!job.Application.CredentialService.empty() && job.Application.CredentialService.front()) { product += " MyProxyServer = \""; product += job.Application.CredentialService.front().fullstr(); product += "\";\n"; } /// \mapattr usertags <- Annotation if (!job.Identification.Annotation.empty()) product += generateOutputList("UserTags", job.Identification.Annotation, std::pair('[', ']'), ';'); if (!job.OtherAttributes.empty()) { std::map::const_iterator it; for (it = job.OtherAttributes.begin(); it != job.OtherAttributes.end(); it++) { std::list keys; tokenize(it->first, keys, ";"); if (keys.size() != 2 || keys.front() != "egee:jdl") { continue; } product += " "; product += keys.back(); product += " = \""; product += it->second; product += "\";\n"; } } product += "]"; return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/RSLParser.cpp0000644000000000000000000000012313124220367025665 xustar000000000000000026 mtime=1498489079.77755 27 atime=1513200574.296699 30 ctime=1513200660.396752338 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/RSLParser.cpp0000644000175000002070000005355013124220367025743 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "RSLParser.h" namespace Arc { RSLValue* RSLValue::Evaluate(std::map& vars, JobDescriptionParserPluginResult& parsing_result) const { const RSLLiteral *n; const RSLVariable *v; const RSLConcat *c; const RSLList *l; const RSLSequence *s; if ((n = dynamic_cast(this))) return new RSLLiteral(*n); else if ((v = dynamic_cast(this))) { std::map::iterator it = vars.find(v->Var()); return new RSLLiteral((it != vars.end()) ? it->second : "", v->Location()); } else if ((c = dynamic_cast(this))) { RSLValue *left = c->Left()->Evaluate(vars, parsing_result); if (!left) { return NULL; } RSLValue *right = c->Right()->Evaluate(vars, parsing_result); if (!right) { delete left; return NULL; } RSLLiteral *nleft = dynamic_cast(left); if (!nleft) { parsing_result.SetFailure(); parsing_result.AddError(JobDescriptionParsingError(IString("Left operand for RSL concatenation does not evaluate to a literal").str(), c->Location())); delete left; delete right; return NULL; } RSLLiteral *nright = dynamic_cast(right); if (!nright) { parsing_result.SetFailure(); parsing_result.AddError(JobDescriptionParsingError(IString("Right operand for RSL concatenation does not evaluate to a literal").str(), c->Location())); delete left; delete right; return NULL; } RSLLiteral *result = new RSLLiteral(nleft->Value() + nright->Value(), left->Location()); delete left; delete right; return result; } else if ((l = dynamic_cast(this))) { RSLList *result = new RSLList(l->Location()); for (std::list::const_iterator it = l->begin(); it != l->end(); it++) { RSLValue *value = (*it)->Evaluate(vars, parsing_result); if (!value) { delete result; return NULL; } result->Add(value); } return result; } else if ((s = dynamic_cast(this))) { RSLList *result = new RSLList(s->Location()); for (std::list::const_iterator it = s->begin(); it != s->end(); it++) { RSLValue *value = (*it)->Evaluate(vars, parsing_result); if (!value) { delete result; return NULL; } result->Add(value); } return new RSLSequence(result, s->Location()); } return NULL; } void RSLLiteral::Print(std::ostream& os) const { std::string s(str); std::string::size_type pos = 0; while ((pos = s.find('"', pos)) != std::string::npos) { s.insert(pos, 1, '"'); pos += 2; } os << '"' << s << '"'; } void RSLVariable::Print(std::ostream& os) const { os << "$(" << var << ')'; } RSLConcat::~RSLConcat() { delete left; delete right; } void RSLConcat::Print(std::ostream& os) const { os << *left << " # " << *right; } RSLList::~RSLList() { for (std::list::iterator it = begin(); it != end(); it++) delete *it; } void RSLList::Add(RSLValue *value) { values.push_back(value); } void RSLList::Print(std::ostream& os) const { for (std::list::const_iterator it = begin(); it != end(); it++) { if (it != begin()) os << " "; os << **it; } } void RSLSequence::Print(std::ostream& os) const { os << "( " << *seq << " )"; } RSL* RSL::Evaluate(JobDescriptionParserPluginResult& parsing_result) const { const RSLBoolean *b = dynamic_cast(this); if (b && (b->Op() == RSLMulti)) { RSLBoolean *result = new RSLBoolean(RSLMulti); for (std::list::const_iterator it = b->begin(); it != b->end(); it++) { RSL *rsl = (*it)->Evaluate(parsing_result); if (!rsl) { return NULL; } result->Add(rsl); } return result; } else { std::map vars; return Evaluate(vars, parsing_result); } } RSL* RSL::Evaluate(std::map& vars, JobDescriptionParserPluginResult& parsing_result) const { const RSLBoolean *b; const RSLCondition *c; if ((b = dynamic_cast(this))) { if (b->Op() == RSLMulti) { parsing_result.SetFailure(); parsing_result.AddError(JobDescriptionParsingError(IString("Multi-request operator only allowed at top level").str(), b->OpLocation())); return NULL; } else { RSLBoolean *result = new RSLBoolean(b->Op()); std::map vars2(vars); for (std::list::const_iterator it = b->begin(); it != b->end(); it++) { RSL *rsl = (*it)->Evaluate(vars2, parsing_result); if (!rsl) { return NULL; } result->Add(rsl); } return result; } } else if ((c = dynamic_cast(this))) { RSLList *l = new RSLList(c->List().Location()); if (c->Attr() == "rslsubstitution") // Underscore, in 'rsl_substitution', is removed by normalization. for (std::list::const_iterator it = c->begin(); it != c->end(); it++) { const RSLSequence *s = dynamic_cast(*it); if (!s) { parsing_result.SetFailure(); parsing_result.AddError(JobDescriptionParsingError(IString("RSL substitution is not a sequence").str(), (**it).Location())); // TODO: The term sequence is not defined in the xRSL manual. delete l; return NULL; } if (s->size() != 2) { parsing_result.SetFailure(); parsing_result.AddError(JobDescriptionParsingError(IString("RSL substitution sequence is not of length 2").str(), (**it).Location())); delete l; return NULL; } std::list::const_iterator it2 = s->begin(); RSLValue *var = (*it2)->Evaluate(vars, parsing_result); if (!var) { delete l; return NULL; } it2++; RSLValue *val = (*it2)->Evaluate(vars, parsing_result); if (!val) { delete l; return NULL; } RSLLiteral *nvar = dynamic_cast(var); if (!nvar) { parsing_result.SetFailure(); parsing_result.AddError(JobDescriptionParsingError(IString("RSL substitution variable name does not evaluate to a literal").str(), var->Location())); delete l; delete var; delete val; return NULL; } RSLLiteral *nval = dynamic_cast(val); if (!nval) { parsing_result.SetFailure(); parsing_result.AddError(JobDescriptionParsingError(IString("RSL substitution variable value does not evaluate to a literal").str(), val->Location())); delete l; delete var; delete val; return NULL; } vars[nvar->Value()] = nval->Value(); RSLList *seq = new RSLList(l->Location()); seq->Add(var); seq->Add(val); l->Add(new RSLSequence(seq, s->Location())); } else for (std::list::const_iterator it = c->begin(); it != c->end(); it++) { RSLValue *v = (*it)->Evaluate(vars, parsing_result); if (!v) { delete l; return NULL; } l->Add(v); } return new RSLCondition(c->Attr(), c->Op(), l); } return NULL; } RSLBoolean::~RSLBoolean() { for (std::list::iterator it = begin(); it != end(); it++) delete *it; } void RSLBoolean::Add(RSL *condition) { conditions.push_back(condition); } void RSLBoolean::Print(std::ostream& os) const { os << op.v; for (std::list::const_iterator it = begin(); it != end(); it++) os << "( " << **it << " )"; } void RSLCondition::init() { // Normalize the attribute name // Does the same thing as globus_rsl_assist_attributes_canonicalize, // i.e. lowercase the attribute name and remove underscores this->attr.v = lower(this->attr.v); std::string::size_type pos = 0; while ((pos = this->attr.v.find('_', pos)) != std::string::npos) { this->attr.v.erase(pos, 1); } } void RSLCondition::Print(std::ostream& os) const { os << attr.v << ' ' << op.v << ' ' << *values; } RSLParser::~RSLParser() { if (parsed) delete parsed; if (evaluated) delete evaluated; } std::pair RSLParser::GetLinePosition(std::string::size_type pos) const { if (pos > s.size()) { return std::pair(-1, -1); } std::pair line_pos(1, pos); std::string::size_type nl_pos, offset = 0; while ((nl_pos = s.find_first_of('\n', offset)) < pos) { line_pos.first += 1; line_pos.second = pos-nl_pos-1; offset = nl_pos+1; } return line_pos; } template SourceLocation RSLParser::toSourceLocation(const T& v, std::string::size_type offset) const { return SourceLocation(GetLinePosition(n-offset), v); } const RSL* RSLParser::Parse(bool evaluate) { if (n == 0) { std::string::size_type pos = 0; while ((pos = s.find("(*", pos)) != std::string::npos) { std::string::size_type pos2 = s.find("*)", pos); if (pos2 == std::string::npos) { int failing_code_start = std::max(pos-10, 0); const std::string failing_code = s.substr(failing_code_start, pos-failing_code_start+12); parsing_result.AddError(JobDescriptionParsingError(IString("End of comment not found").str(), GetLinePosition(pos+2), failing_code)); return NULL; } comments_positions[pos] = pos2+2; pos = pos2+2; } parsed = ParseRSL(); if (parsed) { SkipWSAndComments(); if (n != std::string::npos) { parsing_result.SetFailure(); parsing_result.AddError(JobDescriptionParsingError(IString("Junk at end of RSL").str(), GetLinePosition(n))); delete parsed; parsed = NULL; return NULL; } } if (parsed) { evaluated = parsed->Evaluate(parsing_result); } if ((!evaluate && parsed) || (evaluate && evaluated)) { parsing_result.SetSuccess(); } } return evaluate ? evaluated : parsed; } void RSLParser::SkipWSAndComments() { std::string::size_type prev_n = std::string::npos; while (prev_n != n) { prev_n = n; n = s.find_first_not_of(" \t\n\v\f\r", n); std::map::const_iterator it = comments_positions.find(n); if (it != comments_positions.end()) { n = it->second; } } } SourceLocation RSLParser::ParseBoolOp() { switch (s[n]) { case '+': n++; return toSourceLocation(RSLMulti); break; case '&': n++; return toSourceLocation(RSLAnd); break; case '|': n++; return toSourceLocation(RSLOr); break; default: return toSourceLocation(RSLBoolError, 0); break; } return toSourceLocation(RSLBoolError, 0); // to keep compiler happy } SourceLocation RSLParser::ParseRelOp() { switch (s[n]) { case '=': n++; return toSourceLocation(RSLEqual); break; case '!': if (s[n + 1] == '=') { n += 2; return toSourceLocation(RSLNotEqual, 2); } return toSourceLocation(RSLRelError, 0); break; case '<': n++; if (s[n] == '=') { n++; return toSourceLocation(RSLLessOrEqual, 2); } return toSourceLocation(RSLLess); break; case '>': n++; if (s[n] == '=') { n++; return toSourceLocation(RSLGreaterOrEqual, 2); } return toSourceLocation(RSLGreater); break; default: return toSourceLocation(RSLRelError, 0); break; } return toSourceLocation(RSLRelError, 0); // to keep compiler happy } SourceLocation RSLParser::ParseString(int& status) { // status: 1 - OK, 0 - not a string, -1 - error if (s[n] == '\'') { SourceLocation str(toSourceLocation(std::string(), 0)); do { std::string::size_type pos = s.find('\'', n + 1); if (pos == std::string::npos) { parsing_result.AddError(JobDescriptionParsingError(IString("End of single quoted string not found").str(), GetLinePosition(n))); status = -1; return toSourceLocation(std::string(), 0); } str += s.substr(n + 1, pos - n - 1); n = pos + 1; if (s[n] == '\'') str += std::string("\'"); } while (s[n] == '\''); status = 1; return str; } else if (s[n] == '"') { SourceLocation str(toSourceLocation(std::string(), 0)); do { std::string::size_type pos = s.find('"', n + 1); if (pos == std::string::npos) { parsing_result.AddError(JobDescriptionParsingError(IString("End of double quoted string not found").str(), GetLinePosition(n))); status = -1; return toSourceLocation(std::string(), 0); } str += s.substr(n + 1, pos - n - 1); n = pos + 1; if (s[n] == '"') str += std::string("\""); } while (s[n] == '"'); status = 1; return str; } else if (s[n] == '^') { n++; char delim = s[n]; SourceLocation str(toSourceLocation(std::string(), 0)); do { std::string::size_type pos = s.find(delim, n + 1); if (pos == std::string::npos) { parsing_result.AddError(JobDescriptionParsingError(IString("End of user delimiter (%s) quoted string not found", delim).str(), GetLinePosition(n))); status = -1; return toSourceLocation(std::string(), 0); } str += s.substr(n + 1, pos - n - 1); n = pos + 1; if (s[n] == delim) str += std::string(1, delim); } while (s[n] == delim); status = 1; return str; } else { std::string::size_type pos = s.find_first_of("+&|()=<>!\"'^#$ \t\n\v\f\r", n); if (pos == n) { status = 0; return toSourceLocation(std::string(), 0); } SourceLocation str = s.substr(n, pos - n); str.location = GetLinePosition(n); n = pos; status = 1; return str; } } RSLList* RSLParser::ParseList() { RSLList *values = new RSLList(GetLinePosition(n)); RSLValue *left = NULL; RSLValue *right = NULL; try { int concat = 0; // 0 = No, 1 = Explicit, 2 = Implicit std::pair concatLocation; do { right = NULL; int nextconcat = 0; std::string::size_type nsave = n; SkipWSAndComments(); // TODO: Skipping comments increases n - not compatible with earlier approach. if (n != nsave) concat = 0; if (s[n] == '#') { concatLocation = GetLinePosition(n); n++; SkipWSAndComments(); concat = 1; } if (concat == 2) { concatLocation = GetLinePosition(n); } if (s[n] == '(') { std::pair seqLocation = GetLinePosition(n); n++; RSLList *seq = ParseList(); SkipWSAndComments(); if (s[n] != ')') { parsing_result.AddError(JobDescriptionParsingError(IString("')' expected").str(), GetLinePosition(n))); throw std::exception(); } n++; right = new RSLSequence(seq, seqLocation); } else if (s[n] == '$') { n++; SkipWSAndComments(); if (s[n] != '(') { parsing_result.AddError(JobDescriptionParsingError(IString("'(' expected").str(), GetLinePosition(n))); throw std::exception(); } n++; SkipWSAndComments(); int status; SourceLocation var = ParseString(status); if (status != 1) { parsing_result.AddError(JobDescriptionParsingError(IString("Variable name expected").str(), GetLinePosition(n))); throw std::exception(); } const std::string invalid_var_chars = "+&|()=<>!\"'^#$"; if (var.v.find_first_of(invalid_var_chars) != std::string::npos) { parsing_result.AddError(JobDescriptionParsingError(IString("Variable name (%s) contains invalid character (%s)", var.v, invalid_var_chars).str(), GetLinePosition(n))); throw std::exception(); } SkipWSAndComments(); if (s[n] != ')') { parsing_result.AddError(JobDescriptionParsingError(IString("')' expected").str(), GetLinePosition(n))); throw std::exception(); } n++; right = new RSLVariable(var); nextconcat = 2; } else { int status; SourceLocation val = ParseString(status); if (status == -1) { parsing_result.AddError(JobDescriptionParsingError(IString("Broken string").str(), GetLinePosition(n))); throw std::exception(); } right = (status == 1) ? new RSLLiteral(val) : NULL; nextconcat = right ? 2 : 0; } if (concat == 0) { if (left) values->Add(left); left = right; } else if (concat == 1) { if (!left) { parsing_result.AddError(JobDescriptionParsingError(IString("No left operand for concatenation operator").str(), GetLinePosition(n))); throw std::exception(); } if (!right) { parsing_result.AddError(JobDescriptionParsingError(IString("No right operand for concatenation operator").str(), GetLinePosition(n))); throw std::exception(); } left = new RSLConcat(left, right, concatLocation); } else if (concat == 2) { if (left) { if (right) left = new RSLConcat(left, right, concatLocation); } else left = right; } concat = nextconcat; } while (left || right); } catch (std::exception& e) { if (values) delete values; if (left) delete left; if (right) delete right; return NULL; } return values; } RSL* RSLParser::ParseRSL() { SkipWSAndComments(); SourceLocation bop(ParseBoolOp()); if (bop != RSLBoolError) { SkipWSAndComments(); RSLBoolean *b = new RSLBoolean(bop); do { if (s[n] != '(') { parsing_result.AddError(JobDescriptionParsingError(IString("'(' expected").str(), GetLinePosition(n))); delete b; return NULL; } n++; SkipWSAndComments(); RSL *rsl = ParseRSL(); if (!rsl) { delete b; return NULL; } // Something was parsed (rsl recognised) - change intermediate parsing result (default WrongLanguage) - set to Failure - if parsing succeeds result is changed at end of parsing. parsing_result.SetFailure(); b->Add(rsl); SkipWSAndComments(); if (s[n] != ')') { parsing_result.AddError(JobDescriptionParsingError(IString("')' expected").str(), GetLinePosition(n))); delete b; return NULL; } n++; SkipWSAndComments(); } while (n < s.size() && s[n] == '('); return b; } else { int status; SourceLocation attr(ParseString(status)); if (status != 1) { parsing_result.AddError(JobDescriptionParsingError(IString("Attribute name expected").str(), GetLinePosition(n))); return NULL; } const std::string invalid_attr_chars = "+&|()=<>!\"'^#$"; if (attr.v.find_first_of(invalid_attr_chars) != std::string::npos) { parsing_result.AddError(JobDescriptionParsingError(IString("Attribute name (%s) contains invalid character (%s)", attr.v, invalid_attr_chars).str(), GetLinePosition(n))); return NULL; } SkipWSAndComments(); SourceLocation rop = ParseRelOp(); if (rop == RSLRelError) { parsing_result.AddError(JobDescriptionParsingError(IString("Relation operator expected").str(), GetLinePosition(n))); return NULL; } SkipWSAndComments(); RSLList *values = ParseList(); if (!values) { return NULL; } RSLCondition *c = new RSLCondition(attr, rop, values); return c; } } std::ostream& operator<<(std::ostream& os, const RSLBoolOp op) { switch (op) { case RSLBoolError: return os << "This should not happen"; case RSLMulti: return os << '+'; case RSLAnd: return os << '&'; case RSLOr: return os << '|'; } return os; } std::ostream& operator<<(std::ostream& os, const RSLRelOp op) { switch (op) { case RSLRelError: return os << "This should not happen"; case RSLEqual: return os << '='; case RSLNotEqual: return os << "!="; case RSLLess: return os << '<'; case RSLGreater: return os << '>'; case RSLLessOrEqual: return os << "<="; case RSLGreaterOrEqual: return os << ">="; } return os; } std::ostream& operator<<(std::ostream& os, const RSLValue& value) { value.Print(os); return os; } std::ostream& operator<<(std::ostream& os, const RSL& rsl) { rsl.Print(os); return os; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/JDLParser.h0000644000000000000000000000012412675602216025313 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.316699 30 ctime=1513200660.393752301 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/JDLParser.h0000644000175000002070000000417112675602216025363 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JDLPARSER_H__ #define __ARC_JDLPARSER_H__ #include #include #include /** JDLParser * The JDLParser class, derived from the JobDescriptionParserPlugin class, is a job * description parser for the Job Description Language (JDL) specified in CREAM * Job Description Language Attributes Specification for the EGEE middleware * (EGEE-JRA1-TEC-592336) and Job Description Language Attributes Specification * for the gLite middleware (EGEE-JRA1-TEC-590869-JDL-Attributes-v0-8). */ namespace Arc { class JDLParser : public JobDescriptionParserPlugin { public: JDLParser(PluginArgument* parg); ~JDLParser(); JobDescriptionParserPluginResult Parse(const std::string& source, std::list& jobdescs, const std::string& language = "", const std::string& dialect = "") const; JobDescriptionParserPluginResult Assemble(const JobDescription& job, std::string& product, const std::string& language, const std::string& dialect = "") const; static Plugin* Instance(PluginArgument *arg); private: bool splitJDL(const std::string& original_string, std::list& lines) const; bool handleJDLattribute(const std::string& attributeName, const std::string& attributeValue, JobDescription& job) const; static std::string simpleJDLvalue(const std::string& attributeValue); static std::list listJDLvalue(const std::string& attributeValue, std::pair bracket = std::make_pair('{', '}'), char lineEnd = ','); std::string generateOutputList(const std::string& attribute, const std::list& list, std::pair bracket = std::make_pair('{', '}'), char lineEnd = ',') const; static bool ParseInputSandboxAttribute(JobDescription&); }; } // namespace Arc #endif // __ARC_JDLPARSER_H__ nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/ARCJSDLParser.h0000644000000000000000000000012412675602216025764 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.276699 30 ctime=1513200660.389752253 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/ARCJSDLParser.h0000644000175000002070000000471612675602216026041 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_ARCJSDLPARSER_H__ #define __ARC_ARCJSDLPARSER_H__ #include #include /** ARCJSDLParser * The ARCJSDLParser class, derived from the JobDescriptionParserPlugin class, is * primarily a job description parser for the consolidated job description * language (ARCJSDL), derived from JSDL, described in the following document * . * However it is also capable of parsing regular JSDL (GFD 136), the POSIX-JSDL * extension (GFD 136) and the JSDL HPC Profile Application Extension (GFD 111 * and GFD 114). * When parsing ARCJSDL takes precedence over other non-ARCJSDL, so if a * non-ARCJSDL element specifies the same attribute as ARCJSDL, the ARCJSDL * element will be saved. * The output generated by the ARCJSDLParser::UnParse method will follow that of * the ARCJSDL document, see reference above. */ namespace Arc { template class Range; class Software; class SoftwareRequirement; class ARCJSDLParser : public JobDescriptionParserPlugin { public: ARCJSDLParser(PluginArgument* parg); ~ARCJSDLParser(); JobDescriptionParserPluginResult Parse(const std::string& source, std::list& jobdescs, const std::string& language = "", const std::string& dialect = "") const; JobDescriptionParserPluginResult Assemble(const JobDescription& job, std::string& product, const std::string& language, const std::string& dialect = "") const; static Plugin* Instance(PluginArgument *arg); private: bool parseSoftware(XMLNode xmlSoftware, SoftwareRequirement& sr) const; void outputSoftware(const SoftwareRequirement& sr, XMLNode& xmlSoftware) const; template bool parseRange(XMLNode xmlRange, Range& range) const; template bool parseMinMax(XMLNodeList min, XMLNodeList max, Range& range) const; template void outputARCJSDLRange(const Range& range, XMLNode& arcJSDL, const T& undefValue) const; template void outputJSDLRange(const Range& range, XMLNode& jsdl, const T& undefValue) const; void parseBenchmark(XMLNode xmlBenchmark, std::pair& benchmark) const; void outputBenchmark(const std::pair& benchmark, XMLNode& xmlBenchmark) const; }; } // namespace Arc #endif // __ARC_ARCJSDLPARSER_H__ nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/PaxHeaders.7502/ADLParser.h0000644000000000000000000000012412675602216025302 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.299699 30 ctime=1513200660.391752277 nordugrid-arc-5.4.2/src/hed/acc/JobDescriptionParser/ADLParser.h0000644000175000002070000000352212675602216025351 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_ADLPARSER_H__ #define __ARC_ADLPARSER_H__ #include #include /** ARDLParser * The ARCJSDLParser class, derived from the JobDescriptionParserPlugin class, is * a job description parser for the EMI ES job description language (ADL) * described in . */ namespace Arc { template class Range; class Software; class SoftwareRequirement; class ADLParser : public JobDescriptionParserPlugin { public: ADLParser(PluginArgument* parg); ~ADLParser(); JobDescriptionParserPluginResult Parse(const std::string& source, std::list& jobdescs, const std::string& language = "", const std::string& dialect = "") const; JobDescriptionParserPluginResult Assemble(const JobDescription& job, std::string& product, const std::string& language, const std::string& dialect = "") const; static Plugin* Instance(PluginArgument *arg); private: /* bool parseSoftware(XMLNode xmlSoftware, SoftwareRequirement& sr) const; void outputSoftware(const SoftwareRequirement& sr, XMLNode& xmlSoftware) const; template void parseRange(XMLNode xmlRange, Range& range, const T& undefValue) const; template Range parseRange(XMLNode xmlRange, const T& undefValue) const; template void outputARCJSDLRange(const Range& range, XMLNode& arcJSDL, const T& undefValue) const; template void outputJSDLRange(const Range& range, XMLNode& jsdl, const T& undefValue) const; void parseBenchmark(XMLNode xmlBenchmark, std::pair& benchmark) const; void outputBenchmark(const std::pair& benchmark, XMLNode& xmlBenchmark) const; */ }; } // namespace Arc #endif // __ARC_ADLPARSER_H__ nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/ARC00000644000000000000000000000013213214316024017655 xustar000000000000000030 mtime=1513200660.123748999 30 atime=1513200668.721854157 30 ctime=1513200660.123748999 nordugrid-arc-5.4.2/src/hed/acc/ARC0/0000755000175000002070000000000013214316024020000 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712225471542022002 xustar000000000000000027 mtime=1381397346.747038 30 atime=1513200593.729936977 30 ctime=1513200660.117748926 nordugrid-arc-5.4.2/src/hed/acc/ARC0/Makefile.am0000644000175000002070000000174612225471542022054 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccARC0.la libaccARC0_la_SOURCES = \ SubmitterPluginARC0.cpp SubmitterPluginARC0.h \ DescriptorsARC0.cpp \ JobControllerPluginARC0.cpp JobControllerPluginARC0.h \ FTPControl.cpp FTPControl.h JobStateARC0.cpp JobStateARC0.h libaccARC0_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) \ $(GLOBUS_IO_CFLAGS) $(GLOBUS_FTP_CONTROL_CFLAGS) \ $(AM_CXXFLAGS) libaccARC0_la_LIBADD = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GLOBUS_FTP_CONTROL_LIBS) \ $(GLOBUS_IO_LIBS) $(GLOBUS_COMMON_LIBS) libaccARC0_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/JobStateARC0.cpp0000644000000000000000000000012212314574062022566 xustar000000000000000027 mtime=1395849266.054561 25 atime=1513200574.3917 30 ctime=1513200660.123748999 nordugrid-arc-5.4.2/src/hed/acc/ARC0/JobStateARC0.cpp0000644000175000002070000000600112314574062022632 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "JobStateARC0.h" namespace Arc { JobState::StateType JobStateARC0::StateMap(const std::string& state) { std::string state_ = state; /* Infosys states (mapped from GM states): ACCEPTING ACCEPTED PREPARED SUBMITTING INLRMS: * KILLING EXECUTED KILLED FAILED GM states (either not mapped or somehow obtained directly): ACCEPTED PREPARING SUBMIT INLRMS CANCELING FINISHING FINISHED DELETED PENDING:* */ /// \mapname GM Grid Manager /// \mapnote Prefix "PENDING:" and spaces are ignored when mapping states. if (state_.substr(0,8) == "PENDING:") state_.erase(0,8); // remove spaces because sometimes we may have 'INLRMS: *' std::string::size_type p = 0; while((p = state_.find(' ',p)) != std::string::npos) state_.erase(p,1); /// \mapattr ACCEPTED -> ACCEPTED /// \mapattr ACCEPTING -> ACCEPTED if ((state_ == "ACCEPTED") || (state_ == "ACCEPTING")) return JobState::ACCEPTED; /// \mapattr PREPARING -> PREPARING /// \mapattr PREPARED -> PREPARING else if ((state_ == "PREPARING") || (state_ == "PREPARED")) return JobState::PREPARING; /// \mapattr SUBMIT -> SUBMITTING /// \mapattr SUBMITTING -> SUBMITTING else if ((state_ == "SUBMIT") || (state_ == "SUBMITTING")) return JobState::SUBMITTING; /// \mapattr INLRMS:Q -> QUEUING else if (state_ == "INLRMS:Q") return JobState::QUEUING; /// \mapattr INLRMS:R -> RUNNING else if (state_ == "INLRMS:R") return JobState::RUNNING; /// \mapattr INLRMS:H -> HOLD else if (state_ == "INLRMS:H") return JobState::HOLD; /// \mapattr INLRMS:S -> HOLD else if (state_ == "INLRMS:S") return JobState::HOLD; /// \mapattr INLRMS:E -> FINISHING else if (state_ == "INLRMS:E") return JobState::FINISHING; /// \mapattr INLRMS:O -> HOLD else if (state_ == "INLRMS:O") return JobState::HOLD; /// \mapattr INLRMS* -> QUEUING else if (state_.substr(0,6) == "INLRMS") return JobState::QUEUING; // expect worst ? /// \mapattr FINISHING -> FINISHING /// \mapattr KILLING -> FINISHING /// \mapattr CANCELING -> FINISHING /// \mapattr EXECUTED -> FINISHING else if ((state_ == "FINISHING") || (state_ == "KILLING") || (state_ == "CANCELING") || (state_ == "EXECUTED")) return JobState::FINISHING; /// \mapattr FINISHED -> FINISHED else if (state_ == "FINISHED") return JobState::FINISHED; /// \mapattr KILLED -> KILLED else if (state_ == "KILLED") return JobState::KILLED; /// \mapattr FAILED -> FAILED else if (state_ == "FAILED") return JobState::FAILED; /// \mapattr DELETED -> DELETED else if (state_ == "DELETED") return JobState::DELETED; /// \mapattr "" -> UNDEFINED else if (state_ == "") return JobState::UNDEFINED; /// \mapattr Any other state -> OTHER else return JobState::OTHER; } } nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721022002 xustar000000000000000030 mtime=1513200593.780937601 30 atime=1513200648.711609424 30 ctime=1513200660.117748926 nordugrid-arc-5.4.2/src/hed/acc/ARC0/Makefile.in0000644000175000002070000007527313214315721022066 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/ARC0 DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccARC0_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libaccARC0_la_OBJECTS = libaccARC0_la-SubmitterPluginARC0.lo \ libaccARC0_la-DescriptorsARC0.lo \ libaccARC0_la-JobControllerPluginARC0.lo \ libaccARC0_la-FTPControl.lo libaccARC0_la-JobStateARC0.lo libaccARC0_la_OBJECTS = $(am_libaccARC0_la_OBJECTS) libaccARC0_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libaccARC0_la_CXXFLAGS) \ $(CXXFLAGS) $(libaccARC0_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccARC0_la_SOURCES) DIST_SOURCES = $(libaccARC0_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccARC0.la libaccARC0_la_SOURCES = \ SubmitterPluginARC0.cpp SubmitterPluginARC0.h \ DescriptorsARC0.cpp \ JobControllerPluginARC0.cpp JobControllerPluginARC0.h \ FTPControl.cpp FTPControl.h JobStateARC0.cpp JobStateARC0.h libaccARC0_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) \ $(GLOBUS_IO_CFLAGS) $(GLOBUS_FTP_CONTROL_CFLAGS) \ $(AM_CXXFLAGS) libaccARC0_la_LIBADD = \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GLOBUS_FTP_CONTROL_LIBS) \ $(GLOBUS_IO_LIBS) $(GLOBUS_COMMON_LIBS) libaccARC0_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/ARC0/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/ARC0/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccARC0.la: $(libaccARC0_la_OBJECTS) $(libaccARC0_la_DEPENDENCIES) $(libaccARC0_la_LINK) -rpath $(pkglibdir) $(libaccARC0_la_OBJECTS) $(libaccARC0_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC0_la-DescriptorsARC0.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC0_la-FTPControl.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC0_la-JobControllerPluginARC0.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC0_la-JobStateARC0.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccARC0_la-SubmitterPluginARC0.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccARC0_la-SubmitterPluginARC0.lo: SubmitterPluginARC0.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC0_la-SubmitterPluginARC0.lo -MD -MP -MF $(DEPDIR)/libaccARC0_la-SubmitterPluginARC0.Tpo -c -o libaccARC0_la-SubmitterPluginARC0.lo `test -f 'SubmitterPluginARC0.cpp' || echo '$(srcdir)/'`SubmitterPluginARC0.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC0_la-SubmitterPluginARC0.Tpo $(DEPDIR)/libaccARC0_la-SubmitterPluginARC0.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPluginARC0.cpp' object='libaccARC0_la-SubmitterPluginARC0.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC0_la-SubmitterPluginARC0.lo `test -f 'SubmitterPluginARC0.cpp' || echo '$(srcdir)/'`SubmitterPluginARC0.cpp libaccARC0_la-DescriptorsARC0.lo: DescriptorsARC0.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC0_la-DescriptorsARC0.lo -MD -MP -MF $(DEPDIR)/libaccARC0_la-DescriptorsARC0.Tpo -c -o libaccARC0_la-DescriptorsARC0.lo `test -f 'DescriptorsARC0.cpp' || echo '$(srcdir)/'`DescriptorsARC0.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC0_la-DescriptorsARC0.Tpo $(DEPDIR)/libaccARC0_la-DescriptorsARC0.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DescriptorsARC0.cpp' object='libaccARC0_la-DescriptorsARC0.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC0_la-DescriptorsARC0.lo `test -f 'DescriptorsARC0.cpp' || echo '$(srcdir)/'`DescriptorsARC0.cpp libaccARC0_la-JobControllerPluginARC0.lo: JobControllerPluginARC0.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC0_la-JobControllerPluginARC0.lo -MD -MP -MF $(DEPDIR)/libaccARC0_la-JobControllerPluginARC0.Tpo -c -o libaccARC0_la-JobControllerPluginARC0.lo `test -f 'JobControllerPluginARC0.cpp' || echo '$(srcdir)/'`JobControllerPluginARC0.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC0_la-JobControllerPluginARC0.Tpo $(DEPDIR)/libaccARC0_la-JobControllerPluginARC0.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginARC0.cpp' object='libaccARC0_la-JobControllerPluginARC0.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC0_la-JobControllerPluginARC0.lo `test -f 'JobControllerPluginARC0.cpp' || echo '$(srcdir)/'`JobControllerPluginARC0.cpp libaccARC0_la-FTPControl.lo: FTPControl.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC0_la-FTPControl.lo -MD -MP -MF $(DEPDIR)/libaccARC0_la-FTPControl.Tpo -c -o libaccARC0_la-FTPControl.lo `test -f 'FTPControl.cpp' || echo '$(srcdir)/'`FTPControl.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC0_la-FTPControl.Tpo $(DEPDIR)/libaccARC0_la-FTPControl.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FTPControl.cpp' object='libaccARC0_la-FTPControl.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC0_la-FTPControl.lo `test -f 'FTPControl.cpp' || echo '$(srcdir)/'`FTPControl.cpp libaccARC0_la-JobStateARC0.lo: JobStateARC0.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -MT libaccARC0_la-JobStateARC0.lo -MD -MP -MF $(DEPDIR)/libaccARC0_la-JobStateARC0.Tpo -c -o libaccARC0_la-JobStateARC0.lo `test -f 'JobStateARC0.cpp' || echo '$(srcdir)/'`JobStateARC0.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccARC0_la-JobStateARC0.Tpo $(DEPDIR)/libaccARC0_la-JobStateARC0.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateARC0.cpp' object='libaccARC0_la-JobStateARC0.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccARC0_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccARC0_la-JobStateARC0.lo `test -f 'JobStateARC0.cpp' || echo '$(srcdir)/'`JobStateARC0.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/SubmitterPluginARC0.cpp0000644000000000000000000000012213024224756024211 xustar000000000000000027 mtime=1481714158.608508 25 atime=1513200574.3947 30 ctime=1513200660.118748938 nordugrid-arc-5.4.2/src/hed/acc/ARC0/SubmitterPluginARC0.cpp0000644000175000002070000003146413024224756024270 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "SubmitterPluginARC0.h" #include "FTPControl.h" namespace Arc { // Characters to be escaped in LDAP filter according to RFC4515 static const std::string filter_esc("&|=!><~*/()"); Logger SubmitterPluginARC0::logger(Logger::getRootLogger(), "SubmitterPlugin.ARC0"); bool SubmitterPluginARC0::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "gsiftp"; } Plugin* SubmitterPluginARC0::Instance(PluginArgument *arg) { SubmitterPluginArgument *subarg = dynamic_cast(arg); if (!subarg) return NULL; Glib::Module* module = subarg->get_module(); PluginsFactory* factory = subarg->get_factory(); if(!(factory && module)) { logger.msg(ERROR, "Missing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to developers."); return NULL; } factory->makePersistent(module); return new SubmitterPluginARC0(*subarg, arg); } SubmissionStatus SubmitterPluginARC0::Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted) { FTPControl ctrl; URL url((endpoint.find("://") == std::string::npos ? "gsiftp://" : "") + endpoint, false, 2811, "/jobs"); URL infoURL("ldap://" + url.Host(), false, 2135, "/Mds-Vo-name=local,o=Grid"); SubmissionStatus retval; for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { if (!ctrl.Connect(url, *usercfg)) { logger.msg(INFO, "Submit: Failed to connect"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } if (!ctrl.SendCommand("CWD " + url.Path(), usercfg->Timeout())) { logger.msg(INFO, "Submit: Failed sending CWD command"); ctrl.Disconnect(usercfg->Timeout()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } std::string response; if (!ctrl.SendCommand("CWD new", response, usercfg->Timeout())) { logger.msg(INFO, "Submit: Failed sending CWD new command"); ctrl.Disconnect(usercfg->Timeout()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } std::string::size_type pos2 = response.rfind('"'); std::string::size_type pos1 = response.rfind('/', pos2 - 1); std::string jobnumber = response.substr(pos1 + 1, pos2 - pos1 - 1); JobDescription preparedjobdesc(*it); if (preparedjobdesc.OtherAttributes["nordugrid:xrsl;clientxrsl"].empty()) preparedjobdesc.UnParse(preparedjobdesc.OtherAttributes["nordugrid:xrsl;clientxrsl"], "nordugrid:xrsl"); preparedjobdesc.OtherAttributes["nordugrid:xrsl;action"] = "request"; preparedjobdesc.OtherAttributes["nordugrid:xrsl;savestate"] = "yes"; preparedjobdesc.OtherAttributes["nordugrid:xrsl;clientsoftware"] = "libarccompute-" VERSION; #ifdef HAVE_GETHOSTNAME char hostname[1024]; gethostname(hostname, 1024); preparedjobdesc.OtherAttributes["nordugrid:xrsl;hostname"] = hostname; #endif if (!preparedjobdesc.Prepare()) { logger.msg(INFO, "Failed to prepare job description."); ctrl.Disconnect(usercfg->Timeout()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } std::string jobdescstring; JobDescriptionResult ures = preparedjobdesc.UnParse(jobdescstring, "nordugrid:xrsl", "GRIDMANAGER"); if (!ures) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format: %s", "nordugrid:xrsl", ures.str()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } if (!ctrl.SendData(jobdescstring, "job", usercfg->Timeout())) { logger.msg(INFO, "Submit: Failed sending job description"); ctrl.Disconnect(usercfg->Timeout()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } ctrl.Disconnect(usercfg->Timeout()); URL jobid(url); URL ContactString(url); jobid.ChangePath(jobid.Path() + '/' + jobnumber); if (!PutFiles(preparedjobdesc, jobid)) { logger.msg(INFO, "Submit: Failed uploading local input files"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } Job j; // Prepare contact url for information about this job URL infoendpoint(infoURL); infoendpoint.ChangeLDAPFilter("(nordugrid-job-globalid=" + escape_chars(jobid.str(),filter_esc,'\\',false,escape_hex) + ")"); infoendpoint.ChangeLDAPScope(URL::subtree); // Proposed mandatory attributes for ARC 3.0 j.JobID = jobid.fullstr(); j.ServiceInformationURL = infoendpoint; j.ServiceInformationURL.ChangeLDAPFilter(""); j.ServiceInformationInterfaceName = "org.nordugrid.ldapng"; j.JobStatusURL = infoendpoint; j.JobStatusURL.ChangeLDAPFilter("(nordugrid-job-globalid=" + escape_chars(jobid.str(),filter_esc,'\\',false,escape_hex) + ")"); j.JobStatusURL.ChangeLDAPScope(URL::subtree); j.JobStatusInterfaceName = "org.nordugrid.ldapng"; j.JobManagementURL = ContactString; j.JobManagementInterfaceName = "org.nordugrid.gridftpjob"; j.IDFromEndpoint = jobnumber; j.StageInDir = jobid; j.StageOutDir = jobid; j.SessionDir = jobid; AddJobDetails(preparedjobdesc, j); jc.addEntity(j); } return retval; } SubmissionStatus SubmitterPluginARC0::Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted) { SubmissionStatus retval; // gridftp and ldapng intterfaces are bound. So for submiting to // to gridftp presence of ldapng is needed. // This will not help against misbehaving information system // because actual state of interface is not propagated to // OtherEndpoints. But it should prevent submission to sites // where ldapng is explicitly disabled. bool ldapng_interface_present = false; for (std::list< CountedPointer >::const_iterator it = et.OtherEndpoints.begin(); it != et.OtherEndpoints.end(); it++) { if (((*it)->InterfaceName == "org.nordugrid.ldapng") && (((*it)->HealthState == "ok") || ((*it)->HealthState.empty()))) { ldapng_interface_present = true; break; } } if(!ldapng_interface_present) { logger.msg(INFO, "Submit: service has no suitable information interface - need org.nordugrid.ldapng"); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::NO_SERVICES; return retval; } FTPControl ctrl; URL url(et.ComputingEndpoint->URLString); for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { if (!ctrl.Connect(url, *usercfg)) { logger.msg(INFO, "Submit: Failed to connect"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } if (!ctrl.SendCommand("CWD " + url.Path(), usercfg->Timeout())) { logger.msg(INFO, "Submit: Failed sending CWD command"); ctrl.Disconnect(usercfg->Timeout()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } std::string response; if (!ctrl.SendCommand("CWD new", response, usercfg->Timeout())) { logger.msg(INFO, "Submit: Failed sending CWD new command"); ctrl.Disconnect(usercfg->Timeout()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } std::string::size_type pos2 = response.rfind('"'); std::string::size_type pos1 = response.rfind('/', pos2 - 1); std::string jobnumber = response.substr(pos1 + 1, pos2 - pos1 - 1); JobDescription preparedjobdesc(*it); if (preparedjobdesc.OtherAttributes["nordugrid:xrsl;clientxrsl"].empty()) preparedjobdesc.UnParse(preparedjobdesc.OtherAttributes["nordugrid:xrsl;clientxrsl"], "nordugrid:xrsl"); preparedjobdesc.OtherAttributes["nordugrid:xrsl;action"] = "request"; preparedjobdesc.OtherAttributes["nordugrid:xrsl;savestate"] = "yes"; preparedjobdesc.OtherAttributes["nordugrid:xrsl;clientsoftware"] = "libarccompute-" VERSION; #ifdef HAVE_GETHOSTNAME char hostname[1024]; gethostname(hostname, 1024); preparedjobdesc.OtherAttributes["nordugrid:xrsl;hostname"] = hostname; #endif if (!preparedjobdesc.Prepare(et)) { logger.msg(INFO, "Failed to prepare job description to target resources."); ctrl.Disconnect(usercfg->Timeout()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } std::string jobdescstring; JobDescriptionResult ures = preparedjobdesc.UnParse(jobdescstring, "nordugrid:xrsl", "GRIDMANAGER"); if (!ures) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format: %s", "nordugrid:xrsl", ures.str()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } if (!ctrl.SendData(jobdescstring, "job", usercfg->Timeout())) { logger.msg(INFO, "Submit: Failed sending job description"); ctrl.Disconnect(usercfg->Timeout()); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } ctrl.Disconnect(usercfg->Timeout()); URL jobid(url); URL ContactString(url); jobid.ChangePath(jobid.Path() + '/' + jobnumber); if (!PutFiles(preparedjobdesc, jobid)) { logger.msg(INFO, "Submit: Failed uploading local input files"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } // Prepare contact url for information about this job URL infoendpoint; for (std::list< CountedPointer >::const_iterator it = et.OtherEndpoints.begin(); it != et.OtherEndpoints.end(); it++) { if (((*it)->InterfaceName == "org.nordugrid.ldapng") && (((*it)->HealthState == "ok") || ((*it)->HealthState.empty()))) { infoendpoint = URL((*it)->URLString); infoendpoint.ChangeLDAPScope(URL::subtree); break; } } if (!infoendpoint) { // Should not happen infoendpoint = URL("ldap://" + url.Host(), false, 2135, "/Mds-Vo-name=local,o=Grid"); } Job j; // Proposed mandatory attributes for ARC 3.0 j.JobID = jobid.fullstr(); j.ServiceInformationURL = infoendpoint; j.ServiceInformationURL.ChangeLDAPFilter(""); j.ServiceInformationInterfaceName = "org.nordugrid.ldapng"; j.JobStatusURL = infoendpoint; j.JobStatusURL.ChangeLDAPFilter("(nordugrid-job-globalid=" + escape_chars(jobid.str(),filter_esc,'\\',false,escape_hex) + ")"); j.JobStatusURL.ChangeLDAPScope(URL::subtree); j.JobStatusInterfaceName = "org.nordugrid.ldapng"; j.JobManagementURL = ContactString; j.JobManagementInterfaceName = "org.nordugrid.gridftpjob"; j.IDFromEndpoint = jobnumber; j.StageInDir = jobid; j.StageOutDir = jobid; j.SessionDir = jobid; AddJobDetails(preparedjobdesc, j); jc.addEntity(j); } return retval; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/SubmitterPluginARC0.h0000644000000000000000000000012112060134103023636 xustar000000000000000027 mtime=1354807363.254404 25 atime=1513200574.3977 29 ctime=1513200660.11974895 nordugrid-arc-5.4.2/src/hed/acc/ARC0/SubmitterPluginARC0.h0000644000175000002070000000211712060134103023707 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMITTERPLUGINARC0_H__ #define __ARC_SUBMITTERPLUGINARC0_H__ #ifdef WIN32 #include #endif #include namespace Arc { class Config; class SubmissionStatus; class SubmitterPluginARC0 : public SubmitterPlugin { public: SubmitterPluginARC0(const UserConfig& usercfg, PluginArgument* parg) : SubmitterPlugin(usercfg, parg) { supportedInterfaces.push_back("org.nordugrid.gridftpjob"); } ~SubmitterPluginARC0() {} static Plugin* Instance(PluginArgument *arg); bool isEndpointNotSupported(const std::string& endpoint) const; virtual SubmissionStatus Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted); virtual SubmissionStatus Submit(const std::list& jobdesc, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted); private: static Logger logger; }; } // namespace Arc #endif // __ARC_SUBMITTERPLUGINARC0_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/JobControllerPluginARC0.cpp0000644000000000000000000000012212675602216025013 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.3917 30 ctime=1513200660.120748963 nordugrid-arc-5.4.2/src/hed/acc/ARC0/JobControllerPluginARC0.cpp0000644000175000002070000005013412675602216025065 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef WIN32 #include #endif #include "JobStateARC0.h" #include "JobControllerPluginARC0.h" #include "FTPControl.h" namespace Arc { Logger JobControllerPluginARC0::logger(Logger::getRootLogger(), "JobControllerPlugin.ARC0"); bool JobControllerPluginARC0::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "gsiftp"; } Plugin* JobControllerPluginARC0::Instance(PluginArgument *arg) { JobControllerPluginArgument *jcarg = dynamic_cast(arg); if (!jcarg) return NULL; Glib::Module* module = jcarg->get_module(); PluginsFactory* factory = jcarg->get_factory(); if(!(factory && module)) { logger.msg(ERROR, "Missing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to developers."); return NULL; } factory->makePersistent(module); return new JobControllerPluginARC0(*jcarg, arg); } void JobControllerPluginARC0::UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { std::map > jobsbyhost; for (std::list::iterator it = jobs.begin(); it != jobs.end(); ++it) { if (!(*it)->JobStatusURL) { (*it)->JobStatusURL = (*it)->ServiceInformationURL; } URL infoEndpoint = (*it)->JobStatusURL; if (!infoEndpoint) { logger.msg(VERBOSE, "Unable to query job information (%s), invalid URL provided (%s)", (*it)->JobID, infoEndpoint.fullstr()); continue; } jobsbyhost[infoEndpoint.ConnectionURL() + infoEndpoint.Path()].push_back(*it); } for (std::map >::iterator hostit = jobsbyhost.begin(); hostit != jobsbyhost.end(); ++hostit) { std::list &jobsOnHost = hostit->second; while (!jobsOnHost.empty()) { logger.msg(DEBUG, "Jobs left to query: %d", jobsOnHost.size()); // Move the first 1000 element of jobsOnHost into batch std::list batch = std::list(); for (int i = 0; i < 1000; i++) { if (!jobsOnHost.empty()) { batch.push_back(jobsOnHost.front()); jobsOnHost.pop_front(); } } logger.msg(DEBUG, "Querying batch with %d jobs", batch.size()); URL infourl = batch.front()->JobStatusURL; // merge filters std::string filter = "(|"; for (std::list::iterator it = batch.begin(); it != batch.end(); ++it) { filter += (*it)->JobStatusURL.LDAPFilter(); } filter += ")"; infourl.ChangeLDAPFilter(filter); DataBuffer buffer; DataHandle handler(infourl, *usercfg); if (!handler) { logger.msg(INFO, "Can't create information handle - is the ARC LDAP DMC plugin available?"); return; } if (!handler->StartReading(buffer)) continue; int handle; unsigned int length; unsigned long long int offset; std::string result; while (buffer.for_write() || !buffer.eof_read()) if (buffer.for_write(handle, length, offset, true)) { result.append(buffer[handle], length); buffer.is_written(handle); } if (!handler->StopReading()) continue; XMLNode xmlresult(result); XMLNodeList jobinfolist = xmlresult.Path("o/Mds-Vo-name/nordugrid-cluster-name/nordugrid-queue-name/nordugrid-info-group-name/nordugrid-job-globalid"); for (std::list::iterator jit = batch.begin(); jit != batch.end(); ++jit) { XMLNodeList::iterator xit = jobinfolist.begin(); for (; xit != jobinfolist.end(); ++xit) { if ((std::string)(*xit)["nordugrid-job-globalid"] == (*jit)->JobID) { break; } } if (xit == jobinfolist.end()) { logger.msg(WARNING, "Job information not found in the information system: %s", (*jit)->JobID); if (Time() - (*jit)->LocalSubmissionTime < 90) logger.msg(WARNING, "This job was very recently " "submitted and might not yet " "have reached the information system"); IDsNotProcessed.push_back((*jit)->JobID); continue; } if ((*xit)["nordugrid-job-status"]) (*jit)->State = JobStateARC0((std::string)(*xit)["nordugrid-job-status"]); if ((*xit)["nordugrid-job-globalowner"]) (*jit)->Owner = (std::string)(*xit)["nordugrid-job-globalowner"]; if ((*xit)["nordugrid-job-execqueue"]) (*jit)->Queue = (std::string)(*xit)["nordugrid-job-execqueue"]; if ((*xit)["nordugrid-job-submissionui"]) (*jit)->SubmissionHost = (std::string)(*xit)["nordugrid-job-submissionui"]; if ((*xit)["nordugrid-job-submissiontime"]) (*jit)->SubmissionTime = (std::string)(*xit)["nordugrid-job-submissiontime"]; if ((*xit)["nordugrid-job-sessiondirerasetime"]) (*jit)->WorkingAreaEraseTime = (std::string)(*xit)["nordugrid-job-sessiondirerasetime"]; if ((*xit)["nordugrid-job-proxyexpirationtime"]) (*jit)->ProxyExpirationTime = (std::string)(*xit)["nordugrid-job-proxyexpirationtime"]; if ((*xit)["nordugrid-job-completiontime"]) (*jit)->EndTime = (std::string)(*xit)["nordugrid-job-completiontime"]; if ((*xit)["nordugrid-job-cpucount"]) (*jit)->RequestedSlots = stringtoi((*xit)["nordugrid-job-cpucount"]); if ((*xit)["nordugrid-job-usedcputime"]) (*jit)->UsedTotalCPUTime = Period((std::string)(*xit)["nordugrid-job-usedcputime"], PeriodMinutes); if ((*xit)["nordugrid-job-usedwalltime"]) (*jit)->UsedTotalWallTime = Period((std::string)(*xit)["nordugrid-job-usedwalltime"], PeriodMinutes); if ((*xit)["nordugrid-job-exitcode"]) (*jit)->ExitCode = stringtoi((*xit)["nordugrid-job-exitcode"]); if ((*xit)["Mds-validfrom"]) { (*jit)->CreationTime = (std::string)((*xit)["Mds-validfrom"]); if ((*xit)["Mds-validto"]) { Time Validto = (std::string)((*xit)["Mds-validto"]); (*jit)->Validity = Validto - (*jit)->CreationTime; } } if ((*xit)["nordugrid-job-stdout"]) (*jit)->StdOut = (std::string)((*xit)["nordugrid-job-stdout"]); if ((*xit)["nordugrid-job-stderr"]) (*jit)->StdErr = (std::string)((*xit)["nordugrid-job-stderr"]); if ((*xit)["nordugrid-job-stdin"]) (*jit)->StdIn = (std::string)((*xit)["nordugrid-job-stdin"]); if ((*xit)["nordugrid-job-reqcputime"]) (*jit)->RequestedTotalCPUTime = Period((std::string)((*xit)["nordugrid-job-reqcputime"]), PeriodMinutes); if ((*xit)["nordugrid-job-reqwalltime"]) (*jit)->RequestedTotalWallTime = Period((std::string)((*xit)["nordugrid-job-reqwalltime"]), PeriodMinutes); if ((*xit)["nordugrid-job-rerunable"]) (*jit)->RestartState = JobStateARC0((std::string)((*xit)["nordugrid-job-rerunable"])); if ((*xit)["nordugrid-job-queuerank"]) (*jit)->WaitingPosition = stringtoi((*xit)["nordugrid-job-queuerank"]); if ((*xit)["nordugrid-job-comment"]) (*jit)->OtherMessages.push_back( (std::string)((*xit)["nordugrid-job-comment"])); if ((*xit)["nordugrid-job-usedmem"]) (*jit)->UsedMainMemory = stringtoi((*xit)["nordugrid-job-usedmem"]); if ((*xit)["nordugrid-job-errors"]) for (XMLNode n = (*xit)["nordugrid-job-errors"]; n; ++n) (*jit)->Error.push_back((std::string)n); if ((*xit)["nordugrid-job-jobname"]) (*jit)->Name = (std::string)((*xit)["nordugrid-job-jobname"]); if ((*xit)["nordugrid-job-gmlog"]) (*jit)->LogDir = (std::string)((*xit)["nordugrid-job-gmlog"]); if ((*xit)["nordugrid-job-clientsofware"]) (*jit)->SubmissionClientName = (std::string)((*xit)["nordugrid-job-clientsoftware"]); if ((*xit)["nordugrid-job-executionnodes"]) for (XMLNode n = (*xit)["nordugrid-job-executionnodes"]; n; ++n) (*jit)->ExecutionNode.push_back((std::string)n); if ((*xit)["nordugrid-job-runtimeenvironment"]) for (XMLNode n = (*xit)["nordugrid-job-runtimeenvironment"]; n; ++n) (*jit)->RequestedApplicationEnvironment.push_back((std::string)n); jobinfolist.erase(xit); IDsProcessed.push_back((*jit)->JobID); } } } } bool JobControllerPluginARC0::CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; logger.msg(VERBOSE, "Cleaning job: %s", job.JobID); FTPControl ctrl; if (!ctrl.Connect(URL(job.JobID), *usercfg)) { logger.msg(INFO, "Failed to connect for job cleaning"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } std::string path = URL(job.JobID).Path(); std::string::size_type pos = path.rfind('/'); std::string jobpath = path.substr(0, pos); std::string jobidnum = path.substr(pos + 1); if (!ctrl.SendCommand("CWD " + jobpath, usercfg->Timeout())) { logger.msg(INFO, "Failed sending CWD command for job cleaning"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } if (!ctrl.SendCommand("RMD " + jobidnum, usercfg->Timeout())) { logger.msg(INFO, "Failed sending RMD command for job cleaning"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } if (!ctrl.Disconnect(usercfg->Timeout())) { logger.msg(INFO, "Failed to disconnect after job cleaning"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } IDsProcessed.push_back(job.JobID); logger.msg(VERBOSE, "Job cleaning successful"); } return ok; } bool JobControllerPluginARC0::CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; logger.msg(VERBOSE, "Cancelling job: %s", job.JobID); FTPControl ctrl; if (!ctrl.Connect(URL(job.JobID), *usercfg)) { logger.msg(INFO, "Failed to connect for job cancelling"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } std::string path = URL(job.JobID).Path(); std::string::size_type pos = path.rfind('/'); std::string jobpath = path.substr(0, pos); std::string jobidnum = path.substr(pos + 1); if (!ctrl.SendCommand("CWD " + jobpath, usercfg->Timeout())) { logger.msg(INFO, "Failed sending CWD command for job cancelling"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } if (!ctrl.SendCommand("DELE " + jobidnum, usercfg->Timeout())) { logger.msg(INFO, "Failed sending DELE command for job cancelling"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } if (!ctrl.Disconnect(usercfg->Timeout())) { logger.msg(INFO, "Failed to disconnect after job cancelling"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } IDsProcessed.push_back(job.JobID); job.State = JobStateARC0("KILLED"); logger.msg(VERBOSE, "Job cancelling successful"); } return ok; } bool JobControllerPluginARC0::RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; logger.msg(VERBOSE, "Renewing credentials for job: %s", job.JobID); FTPControl ctrl; if (!ctrl.Connect(URL(job.JobID), *usercfg)) { logger.msg(INFO, "Failed to connect for credential renewal"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } std::string path = URL(job.JobID).Path(); std::string::size_type pos = path.rfind('/'); std::string jobpath = path.substr(0, pos); std::string jobidnum = path.substr(pos + 1); if (!ctrl.SendCommand("CWD " + jobpath, usercfg->Timeout())) { logger.msg(INFO, "Failed sending CWD command for credentials renewal"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } if (!ctrl.SendCommand("CWD " + jobidnum, usercfg->Timeout())) { logger.msg(INFO, "Failed sending CWD command for credentials renewal"); ok = false; IDsNotProcessed.push_back(job.JobID); } if (!ctrl.Disconnect(usercfg->Timeout())) { logger.msg(INFO, "Failed to disconnect after credentials renewal"); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } IDsProcessed.push_back(job.JobID); logger.msg(VERBOSE, "Renewal of credentials was successful"); } return ok; } bool JobControllerPluginARC0::ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; if (!job.RestartState) { logger.msg(INFO, "Job %s does not report a resumable state", job.JobID); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } // dump rsl into temporary file std::string urlstr = job.JobID; std::string::size_type pos = urlstr.rfind('/'); if (pos == std::string::npos || pos == 0) { logger.msg(INFO, "Illegal jobID specified (%s)", job.JobID); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } std::string jobnr = urlstr.substr(pos + 1); urlstr = urlstr.substr(0, pos) + "/new/action"; logger.msg(VERBOSE, "HER: %s", urlstr); std::string rsl("&(action=restart)(jobid=" + jobnr + ")"); std::string filename = Glib::build_filename(Glib::get_tmp_dir(), "arcresume.XXXXXX"); if (!TmpFileCreate(filename, rsl)) { logger.msg(INFO, "Could not create temporary file: %s", filename); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } // Send temporary file to cluster DataMover mover; FileCache cache; URL source_url(filename); URL dest_url(urlstr); dest_url.AddOption("checksum=no"); DataHandle source(source_url, *usercfg); DataHandle destination(dest_url, *usercfg); source->SetTries(1); destination->SetTries(1); DataStatus res = mover.Transfer(*source, *destination, cache, URLMap(), 0, 0, 0, usercfg->Timeout()); // Cleaning up tmp file source->Remove(); if (!res.Passed()) { logger.msg(INFO, "Current transfer FAILED: %s", std::string(res)); mover.Delete(*destination); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } else { logger.msg(INFO, "Current transfer complete"); } IDsProcessed.push_back(job.JobID); logger.msg(VERBOSE, "Job resuming successful"); } return ok; } bool JobControllerPluginARC0::GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const { url = URL(job.JobID); switch (resource) { case Job::STDIN: url.ChangePath(url.Path() + '/' + job.StdIn); break; case Job::STDOUT: url.ChangePath(url.Path() + '/' + job.StdOut); break; case Job::STDERR: url.ChangePath(url.Path() + '/' + job.StdErr); break; case Job::STAGEINDIR: case Job::STAGEOUTDIR: case Job::SESSIONDIR: break; case Job::JOBLOG: case Job::JOBDESCRIPTION: std::string path = url.Path(); path.insert(path.rfind('/'), "/info"); url.ChangePath(path + (Job::JOBLOG ? "/errors" : "/description")); break; } return true; } bool JobControllerPluginARC0::GetJobDescription(const Job& job, std::string& desc_str) const { std::string jobid = job.JobID; logger.msg(VERBOSE, "Trying to retrieve job description of %s from " "computing resource", jobid); std::string::size_type pos = jobid.rfind("/"); if (pos == std::string::npos) { logger.msg(INFO, "invalid jobID: %s", jobid); return false; } std::string cluster = jobid.substr(0, pos); std::string shortid = jobid.substr(pos + 1); // Transfer job description URL source_url; GetURLToJobResource(job, Job::JOBDESCRIPTION, source_url); std::string tmpfile = shortid + G_DIR_SEPARATOR_S + "description"; std::string localfile = Glib::build_filename(Glib::get_tmp_dir(), tmpfile); URL dest_url(localfile); if (!Job::CopyJobFile(*usercfg, source_url, dest_url)) { return false; } // Read job description from file std::ifstream descriptionfile(localfile.c_str()); if (!descriptionfile) { logger.msg(INFO, "Can not open job description file: %s", localfile); return false; } descriptionfile.seekg(0, std::ios::end); std::streamsize length = descriptionfile.tellg(); descriptionfile.seekg(0, std::ios::beg); char *buffer = new char[length + 1]; descriptionfile.read(buffer, length); descriptionfile.close(); buffer[length] = '\0'; desc_str = (std::string)buffer; //Cleaning up delete[] buffer; // Extracting original client xrsl pos = desc_str.find("clientxrsl"); if (pos != std::string::npos) { logger.msg(VERBOSE, "clientxrsl found"); std::string::size_type pos1 = desc_str.find("&", pos); if (pos1 == std::string::npos) { logger.msg(INFO, "could not find start of clientxrsl"); return false; } std::string::size_type pos2 = desc_str.find(")\"", pos1); if (pos2 == std::string::npos) { logger.msg(INFO, "could not find end of clientxrsl"); return false; } desc_str.erase(pos2 + 1); desc_str.erase(0, pos1); for (std::string::size_type i = 0; i != std::string::npos;) { i = desc_str.find("\"\"", i); if (i != std::string::npos) { desc_str.erase(i, 1); // let's step over the second doubleqoute in order not to reduce """" to " if (i != std::string::npos) i++; } } logger.msg(DEBUG, "Job description: %s", desc_str); } else { logger.msg(INFO, "clientxrsl not found"); return false; } std::list descs; if (!JobDescription::Parse(desc_str, descs) || descs.empty()) { logger.msg(INFO, "Invalid JobDescription: %s", desc_str); return false; } logger.msg(VERBOSE, "Valid JobDescription found"); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/JobControllerPluginARC0.h0000644000000000000000000000012212051675267024464 xustar000000000000000027 mtime=1353153207.099019 25 atime=1513200574.3957 30 ctime=1513200660.121748975 nordugrid-arc-5.4.2/src/hed/acc/ARC0/JobControllerPluginARC0.h0000644000175000002070000000325312051675267024536 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBCONTROLLERARC0_H__ #define __ARC_JOBCONTROLLERARC0_H__ #include namespace Arc { class URL; class JobControllerPluginARC0 : public JobControllerPlugin { public: JobControllerPluginARC0(const UserConfig& usercfg, PluginArgument* parg) : JobControllerPlugin(usercfg, parg) { supportedInterfaces.push_back("org.nordugrid.gridftpjob"); } ~JobControllerPluginARC0() {} static Plugin* Instance(PluginArgument *arg); bool isEndpointNotSupported(const std::string& endpoint) const; virtual void UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const; virtual bool GetJobDescription(const Job& job, std::string& desc_str) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBCONTROLLERARC0_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/FTPControl.cpp0000644000000000000000000000012112243105155022430 xustar000000000000000026 mtime=1384942189.92663 25 atime=1513200574.3977 30 ctime=1513200660.121748975 nordugrid-arc-5.4.2/src/hed/acc/ARC0/FTPControl.cpp0000644000175000002070000005164312243105155022511 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "FTPControl.h" namespace Arc { static bool activated_ = false; class FTPControl::CBArg { public: Arc::SimpleCondition cond; std::string response; bool responseok; bool data; bool ctrl; bool close; CBArg(void); std::string Response(); }; FTPControl::CBArg::CBArg(void): responseok(true), data(true), ctrl(true), close(true) {} std::string FTPControl::CBArg::Response() { cond.lock(); std::string res = response; cond.unlock(); return res; } static void CloseCallback(void *arg, globus_ftp_control_handle_t*, globus_object_t* /* error */, globus_ftp_control_response_t* /* response */) { FTPControl::CBArg *cb = (FTPControl::CBArg*)arg; // TODO: handle error - if ever can happen here cb->close = true; cb->cond.signal(); } static void ControlCallback(void *arg, globus_ftp_control_handle_t*, globus_object_t *error, globus_ftp_control_response_t *response) { FTPControl::CBArg *cb = (FTPControl::CBArg*)arg; if (error != GLOBUS_SUCCESS) { cb->response = Arc::globus_object_to_string(error); cb->responseok = false; } if (response && response->response_buffer) { int len = response->response_length; while (len > 0 && (response->response_buffer[len - 1] == '\r' || response->response_buffer[len - 1] == '\n' || response->response_buffer[len - 1] == '\0')) len--; cb->cond.lock(); cb->response.assign((const char*)response->response_buffer, len); switch (response->response_class) { case GLOBUS_FTP_POSITIVE_PRELIMINARY_REPLY: case GLOBUS_FTP_POSITIVE_COMPLETION_REPLY: case GLOBUS_FTP_POSITIVE_INTERMEDIATE_REPLY: cb->responseok = true; break; default: cb->responseok = false; break; } cb->cond.unlock(); } cb->ctrl = true; cb->cond.signal(); } static void DataCloseCallback(void *arg, globus_ftp_control_handle_t*, globus_object_t *error) { ControlCallback(arg, NULL, error, NULL); } static void ConnectCallback(void *arg, globus_ftp_control_handle_t*, globus_object_t *error, globus_ftp_control_response_t *response) { ControlCallback(arg,NULL,error,response); } static void DataConnectCallback(void *arg, globus_ftp_control_handle_t*, unsigned int, globus_bool_t, globus_object_t*) { FTPControl::CBArg *cb = (FTPControl::CBArg*)arg; cb->data = true; cb->cond.signal(); } static void ReadWriteCallback(void *arg, globus_ftp_control_handle_t*, globus_object_t*, globus_byte_t*, globus_size_t, globus_off_t, globus_bool_t) { FTPControl::CBArg *cb = (FTPControl::CBArg*)arg; cb->data = true; cb->cond.signal(); } Logger FTPControl::logger(Logger::getRootLogger(), "FTPControl"); FTPControl::FTPControl() { connected = false; cb = new CBArg; if(!activated_) { OpenSSLInit(); #ifdef HAVE_GLOBUS_THREAD_SET_MODEL globus_thread_set_model("pthread"); #endif GlobusPrepareGSSAPI(); GlobusModuleActivate(GLOBUS_FTP_CONTROL_MODULE); activated_ = GlobusRecoverProxyOpenSSL(); } } FTPControl::~FTPControl() { Disconnect(10); // Not deactivating Globus - that may be dangerous in some cases. // Deactivation also not needeed because this plugin is persistent. //globus_module_deactivate(GLOBUS_FTP_CONTROL_MODULE); delete cb; } bool FTPControl::Connect(const URL& url, const UserConfig& uc) { int timeout = uc.Timeout(); bool timedin; GlobusResult result; result = globus_ftp_control_handle_init(&control_handle); if (!result) { logger.msg(VERBOSE, "Connect: Failed to init handle: %s", result.str()); return false; } result = globus_ftp_control_ipv6_allow(&control_handle, GLOBUS_TRUE); if (!result) { logger.msg(VERBOSE, "Failed to enable IPv6: %s", result.str()); } cb->ctrl = false; connected = true; result = globus_ftp_control_connect(&control_handle, const_cast(url.Host().c_str()), url.Port(), &ConnectCallback, cb); if (!result) { logger.msg(VERBOSE, "Connect: Failed to connect: %s", result.str()); connected = false; return false; } while (!cb->ctrl) { timedin = cb->cond.wait(timeout * 1000); if (!timedin) { logger.msg(VERBOSE, "Connect: Connecting timed out after %d ms", timeout * 1000); Disconnect(timeout); return false; } } if (!cb->responseok) { logger.msg(VERBOSE, "Connect: Failed to connect: %s", cb->Response()); Disconnect(timeout); return false; } GSSCredential handle(uc); globus_ftp_control_auth_info_t auth; result = globus_ftp_control_auth_info_init(&auth, handle, GLOBUS_TRUE, const_cast("ftp"), const_cast("user@"), GLOBUS_NULL, GLOBUS_NULL); if (!result) { logger.msg(VERBOSE, "Connect: Failed to init auth info handle: %s", result.str()); Disconnect(timeout); return false; } cb->ctrl = false; result = globus_ftp_control_authenticate(&control_handle, &auth, GLOBUS_TRUE, &ControlCallback, cb); if (!result) { logger.msg(VERBOSE, "Connect: Failed authentication: %s", result.str()); Disconnect(timeout); return false; } while (!cb->ctrl) { timedin = cb->cond.wait(timeout * 1000); if (!timedin) { logger.msg(VERBOSE, "Connect: Authentication timed out after %d ms", timeout * 1000); Disconnect(timeout); return false; } } if (!cb->responseok) { logger.msg(VERBOSE, "Connect: Failed authentication: %s", cb->Response()); Disconnect(timeout); return false; } return true; } // end Connect bool FTPControl::SendCommand(const std::string& cmd, int timeout) { bool timedin; GlobusResult result; logger.msg(DEBUG, "SendCommand: Command: %s", cmd); cb->ctrl = false; result = globus_ftp_control_send_command(&control_handle, cmd.c_str(), &ControlCallback, cb); if (!result) { logger.msg(VERBOSE, "SendCommand: Failed: %s", result.str()); return false; } while (!cb->ctrl) { timedin = cb->cond.wait(timeout * 1000); if (!timedin) { logger.msg(VERBOSE, "SendCommand: Timed out after %d ms", timeout * 1000); return false; } } if (!cb->responseok) { logger.msg(VERBOSE, "SendCommand: Failed: %s", cb->Response()); return false; } logger.msg(DEBUG, "SendCommand: Response: %s", cb->Response()); return true; } // end SendCommand bool FTPControl::SendCommand(const std::string& cmd, std::string& response, int timeout) { bool timedin; GlobusResult result; cb->ctrl = false; logger.msg(DEBUG, "SendCommand: Command: %s", cmd); result = globus_ftp_control_send_command(&control_handle, cmd.c_str(), &ControlCallback, cb); if (!result) { logger.msg(VERBOSE, "SendCommand: Failed: %s", result.str()); return false; } while (!cb->ctrl) { timedin = cb->cond.wait(timeout * 1000); if (!timedin) { logger.msg(VERBOSE, "SendCommand: Timed out after %d ms", timeout * 1000); return false; } } if (!cb->responseok) { logger.msg(VERBOSE, "SendCommand: Failed: %s", cb->Response()); return false; } response = cb->Response(); logger.msg(VERBOSE, "SendCommand: Response: %s", response); return true; } // end SendCommand bool FTPControl::SetupPASV(int timeout) { GlobusResult result; std::string response; globus_ftp_control_host_port_t passive_addr; passive_addr.port = 0; passive_addr.hostlen = 0; // Try EPSV first to make it work over IPv6 if (!SendCommand("EPSV", response, timeout)) { // Now try PASV. It will fail on IPv6 unless server provides IPv4 data channel. if (!SendCommand("PASV", response, timeout)) { logger.msg(VERBOSE, "SendData: Failed sending EPSV and PASV commands"); return false; } std::string::size_type pos1 = response.find('('); if (pos1 == std::string::npos) { logger.msg(VERBOSE, "SendData: Server PASV response parsing failed: %s", response); return false; } std::string::size_type pos2 = response.find(')', pos1 + 1); if (pos2 == std::string::npos) { logger.msg(VERBOSE, "SendData: Server PASV response parsing failed: %s", response); return false; } unsigned short port_low, port_high; if (sscanf(response.substr(pos1 + 1, pos2 - pos1 - 1).c_str(), "%i,%i,%i,%i,%hu,%hu", &passive_addr.host[0], &passive_addr.host[1], &passive_addr.host[2], &passive_addr.host[3], &port_high, &port_low) == 6) { passive_addr.port = 256 * port_high + port_low; passive_addr.hostlen = 4; } else { logger.msg(VERBOSE, "SendData: Server PASV response parsing failed: %s", response); return false; } } else { // Successful EPSV - response is (|||port|) // Currently more complex responses with protocol and host // are not supported. std::string::size_type pos1 = response.find('('); if (pos1 == std::string::npos) { logger.msg(VERBOSE, "SendData: Server EPSV response parsing failed: %s", response); return false; } std::string::size_type pos2 = response.find(')', pos1 + 1); if (pos2 == std::string::npos) { logger.msg(VERBOSE, "SendData: Server EPSV response parsing failed: %s", response); return false; } std::string sresp = response.substr(pos1 + 1, pos2 - pos1 - 1); char sep = sresp[0]; if(!sep) { logger.msg(VERBOSE, "SendData: Server EPSV response parsing failed: %s", response); return false; } char* lsep = NULL; if((sresp[1] != sep) || (sresp[2] != sep) || ((lsep = (char*)strchr(sresp.c_str()+3,sep)) == NULL)) { logger.msg(VERBOSE, "SendData: Server EPSV response parsing failed: %s", response); return false; } *lsep = 0; passive_addr.port = strtoul(sresp.c_str()+3,&lsep,10); if(passive_addr.port == 0) { logger.msg(VERBOSE, "SendData: Server EPSV response port parsing failed: %s", response); return false; } // Apply control connection address unsigned short local_port; if(!(result = globus_io_tcp_get_remote_address_ex(&(control_handle.cc_handle.io_handle), passive_addr.host,&passive_addr.hostlen,&local_port))) { std::string globus_err(result.str()); logger.msg(VERBOSE, "SendData: Failed to apply local address to data connection: %s", globus_err); return false; } } if (passive_addr.hostlen == 0) { logger.msg(VERBOSE, "SendData: Can't parse host and/or port in response to EPSV/PASV: %s", response); return false; } if (passive_addr.hostlen == 4) { logger.msg(VERBOSE, "SendData: Data channel: %d.%d.%d.%d:%d", passive_addr.host[0], passive_addr.host[1], passive_addr.host[2], passive_addr.host[3], passive_addr.port); } else { char buf[8*5]; snprintf(buf,sizeof(buf),"%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", passive_addr.host[0]<<8 | passive_addr.host[1], passive_addr.host[2]<<8 | passive_addr.host[3], passive_addr.host[4]<<8 | passive_addr.host[5], passive_addr.host[6]<<8 | passive_addr.host[7], passive_addr.host[8]<<8 | passive_addr.host[9], passive_addr.host[10]<<8 | passive_addr.host[11], passive_addr.host[12]<<8 | passive_addr.host[13], passive_addr.host[14]<<8 | passive_addr.host[15]); buf[sizeof(buf)-1] = 0; logger.msg(VERBOSE, "SendData: Data channel: [%s]:%d", buf, passive_addr.port); } result = globus_ftp_control_local_port(&control_handle, &passive_addr); if (!result) { logger.msg(VERBOSE, "SendData: Local port failed: %s", result.str()); return false; } /* it looks like _pasv is not enough for connection - start reading immediately data_callback_status = (callback_status_t)CALLBACK_NOTREADY; if (globus_ftp_control_data_connect_read(handle, &list_conn_callback, this) != GLOBUS_SUCCESS) { logger.msg(INFO, "Failed to open data channel"); result.SetDesc("Failed to open data channel to "+urlstr); pasv_set = false; return result; } */ return true; } bool FTPControl::SendData(const std::string& data, const std::string& filename, int timeout) { bool timedin; GlobusResult result; if (!SendCommand("DCAU N", timeout)) { logger.msg(VERBOSE, "SendData: Failed sending DCAU command"); return false; } if (!SendCommand("TYPE I", timeout)) { logger.msg(VERBOSE, "SendData: Failed sending TYPE command"); return false; } if(!SetupPASV(timeout)) return false; result = globus_ftp_control_local_type(&control_handle, GLOBUS_FTP_CONTROL_TYPE_IMAGE, 0); if (!result) { logger.msg(VERBOSE, "SendData: Local type failed: %s", result.str()); return false; } cb->ctrl = false; cb->data = false; result = globus_ftp_control_send_command(&control_handle, ("STOR " + filename).c_str(), &ControlCallback, cb); if (!result) { logger.msg(VERBOSE, "SendData: Failed sending STOR command: %s", result.str()); return false; } result = globus_ftp_control_data_connect_write(&control_handle, &DataConnectCallback, cb); if (!result) { logger.msg(VERBOSE, "SendData: Data connect write failed: %s", result.str()); return false; } while (!cb->data) { timedin = cb->cond.wait(timeout * 1000); if (!timedin) { logger.msg(VERBOSE, "SendData: Data connect write timed out after %d ms", timeout * 1000); return false; } } while (!cb->ctrl) { timedin = cb->cond.wait(timeout * 1000); if (!timedin) { logger.msg(VERBOSE, "SendData: Data connect write timed out after %d ms", timeout * 1000); return false; } } if (!cb->responseok) { logger.msg(VERBOSE, "SendData: Data connect write failed: %s", cb->Response()); return false; } cb->data = false; cb->ctrl = false; result = globus_ftp_control_data_write(&control_handle, (globus_byte_t*)data.c_str(), data.size(), 0, GLOBUS_TRUE, &ReadWriteCallback, cb); if (!result) { logger.msg(VERBOSE, "SendData: Data write failed: %s", result.str()); return false; } while (!cb->data) { timedin = cb->cond.wait(timeout * 1000); if (!timedin) { logger.msg(VERBOSE, "SendData: Data write timed out after %d ms", timeout * 1000); return false; } } while (!cb->ctrl) { timedin = cb->cond.wait(timeout * 1000); if (!timedin) { logger.msg(VERBOSE, "SendData: Data write timed out after %d ms", timeout * 1000); return false; } } if (!cb->responseok) { logger.msg(VERBOSE, "SendData: Data write failed: %s", cb->Response()); return false; } return true; } // end SendData bool FTPControl::Disconnect(int timeout) { if(!connected) return true; connected = false; GlobusResult result; // Do all posible to stop communication cb->ctrl = false; result = globus_ftp_control_data_force_close(&control_handle, &DataCloseCallback, cb); if (!result) { // Maybe connection is already lost logger.msg(VERBOSE, "Disconnect: Failed aborting - ignoring: %s", result.str()); } else while (!cb->ctrl) { if(!cb->cond.wait(timeout * 1000)) { logger.msg(VERBOSE, "Disconnect: Data close timed out after %d ms", timeout * 1000); } } cb->ctrl = false; result = globus_ftp_control_abort(&control_handle, &ControlCallback, cb); if (!result) { // Maybe connection is already lost logger.msg(VERBOSE, "Disconnect: Failed aborting - ignoring: %s", result.str()); } else while (!cb->ctrl) { if(!cb->cond.wait(timeout * 1000)) { logger.msg(VERBOSE, "Disconnect: Abort timed out after %d ms", timeout * 1000); } } cb->ctrl = false; result = globus_ftp_control_quit(&control_handle, &ControlCallback, cb); if (!result) { // Maybe connection is already lost logger.msg(VERBOSE, "Disconnect: Failed quitting - ignoring: %s", result.str()); } else while (!cb->ctrl) { if(!cb->cond.wait(timeout * 1000)) { logger.msg(VERBOSE, "Disconnect: Quitting timed out after %d ms", timeout * 1000); } } cb->close = false; result = globus_ftp_control_force_close(&control_handle, &CloseCallback, cb); if (!result) { // Assuming only reason for failure here is that connection is // already closed logger.msg(DEBUG, "Disconnect: Failed closing - ignoring: %s", result.str()); } else while (!cb->close) { // Need to wait for callback to make sure handle destruction will work // Hopefully forced close should never take long time if(!cb->cond.wait(timeout * 1000)) { logger.msg(VERBOSE, "Disconnect: Closing timed out after %d ms", timeout * 1000); } } bool first_time = true; time_t start_time = time(NULL); // Waiting for stalled callbacks // If globus_ftp_control_handle_destroy is called with dc_handle // state not GLOBUS_FTP_DATA_STATE_NONE then handle is messed // and next call causes assertion. So here we are waiting for // proper state. globus_mutex_lock(&(control_handle.cc_handle.mutex)); while ((control_handle.dc_handle.state != GLOBUS_FTP_DATA_STATE_NONE) || (control_handle.cc_handle.cc_state != GLOBUS_FTP_CONTROL_UNCONNECTED)) { if(first_time) { logger.msg(VERBOSE, "Disconnect: waiting for globus handle to settle"); first_time = false; } //if((control_handle.cc_handle.cc_state == GLOBUS_FTP_CONTROL_UNCONNECTED) && // (control_handle.dc_handle.state == GLOBUS_FTP_DATA_STATE_CLOSING)) { // logger.msg(VERBOSE, "Disconnect: Tweaking Globus to think data connection is closed"); // control_handle.dc_handle.state = GLOBUS_FTP_DATA_STATE_NONE; // break; //} globus_mutex_unlock(&(control_handle.cc_handle.mutex)); cb->cond.wait(1000); globus_mutex_lock(&(control_handle.cc_handle.mutex)); // Protection against broken Globus if(((unsigned int)(time(NULL)-start_time)) > 60) { logger.msg(VERBOSE, "Disconnect: globus handle is stuck."); break; } } globus_mutex_unlock(&(control_handle.cc_handle.mutex)); if(!(result = globus_ftp_control_handle_destroy(&control_handle))) { // This situation can't be fixed because call to globus_ftp_control_handle_destroy // makes handle unusable even if it fails. logger.msg(VERBOSE, "Disconnect: Failed destroying handle: %s. Can't handle such situation.",result.str()); cb = NULL; } else if(!first_time) { logger.msg(VERBOSE, "Disconnect: handle destroyed."); } return true; } // end Disconnect } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/JobStateARC0.h0000644000000000000000000000012212045235201022221 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3947 30 ctime=1513200660.123748999 nordugrid-arc-5.4.2/src/hed/acc/ARC0/JobStateARC0.h0000644000175000002070000000055012045235201022270 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBSTATEARC0_H__ #define __ARC_JOBSTATEARC0_H__ #include namespace Arc { class JobStateARC0 : public JobState { public: JobStateARC0(const std::string& state) : JobState(state, &StateMap) {} static JobState::StateType StateMap(const std::string& state); }; } #endif // __ARC_JOBSTATEARC0_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/FTPControl.h0000644000000000000000000000012112243105155022075 xustar000000000000000026 mtime=1384942189.92663 25 atime=1513200574.4007 30 ctime=1513200660.122748987 nordugrid-arc-5.4.2/src/hed/acc/ARC0/FTPControl.h0000644000175000002070000000154212243105155022147 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_FTPCONTROL_H__ #define __ARC_FTPCONTROL_H__ #include #include #include namespace Arc { class Logger; class FTPControl { public: FTPControl(); ~FTPControl(); bool Connect(const URL& url, const UserConfig& uc); bool SendCommand(const std::string& cmd, int timeout); bool SendCommand(const std::string& cmd, std::string& response, int timeout); bool SendData(const std::string& data, const std::string& filename, int timeout); bool Disconnect(int timeout); class CBArg; private: static Logger logger; globus_ftp_control_handle_t control_handle; CBArg* cb; bool connected; bool SetupPASV(int timeout); }; } // namespace Arc #endif // __ARC_FTPCONTROL_H__ nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/README0000644000000000000000000000012211745106052020616 xustar000000000000000027 mtime=1335135274.344052 25 atime=1513200574.4007 30 ctime=1513200660.116748914 nordugrid-arc-5.4.2/src/hed/acc/ARC0/README0000644000175000002070000000031211745106052020661 0ustar00mockbuildmock00000000000000Arc Client Component (ACC) plugins for supporting ARC classic (ARC0) Implements the following specialized classes: o JobControllerPluginARC0 o SubmitterARC0 Additional support classes: o FTPControl nordugrid-arc-5.4.2/src/hed/acc/ARC0/PaxHeaders.7502/DescriptorsARC0.cpp0000644000000000000000000000012112675602216023356 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.3947 29 ctime=1513200660.11974895 nordugrid-arc-5.4.2/src/hed/acc/ARC0/DescriptorsARC0.cpp0000644000175000002070000000126012675602216023425 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "JobControllerPluginARC0.h" #include "SubmitterPluginARC0.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "ARC0", "HED:SubmitterPlugin", "ARCs classic Grid Manager", 0, &Arc::SubmitterPluginARC0::Instance }, { "ARC0", "HED:JobControllerPlugin", "ARCs classic Grid Manager", 0, &Arc::JobControllerPluginARC0::Instance }, { NULL, NULL, NULL, 0, NULL } }; extern "C" { void ARC_MODULE_CONSTRUCTOR_NAME(Glib::Module* module, Arc::ModuleManager* manager) { if(manager && module) { manager->makePersistent(module); }; } } nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/CREAM0000644000000000000000000000013213214316024020017 xustar000000000000000030 mtime=1513200660.259750663 30 atime=1513200668.721854157 30 ctime=1513200660.259750663 nordugrid-arc-5.4.2/src/hed/acc/CREAM/0000755000175000002070000000000013214316024020142 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712071046365022144 xustar000000000000000027 mtime=1357139189.188243 30 atime=1513200594.076941221 30 ctime=1513200660.247750516 nordugrid-arc-5.4.2/src/hed/acc/CREAM/Makefile.am0000644000175000002070000000175012071046365022211 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccCREAM.la libaccCREAM_la_SOURCES = DescriptorsCREAM.cpp \ SubmitterPluginCREAM.cpp SubmitterPluginCREAM.h \ JobControllerPluginCREAM.cpp JobControllerPluginCREAM.h \ CREAMClient.cpp CREAMClient.h \ JobStateCREAM.cpp JobStateCREAM.h \ JobListRetrieverPluginWSRFCREAM.cpp JobListRetrieverPluginWSRFCREAM.h libaccCREAM_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libaccCREAM_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) libaccCREAM_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722022145 xustar000000000000000030 mtime=1513200594.129941869 30 atime=1513200648.807610598 30 ctime=1513200660.248750528 nordugrid-arc-5.4.2/src/hed/acc/CREAM/Makefile.in0000644000175000002070000010045713214315722022222 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/CREAM DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccCREAM_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libaccCREAM_la_OBJECTS = libaccCREAM_la-DescriptorsCREAM.lo \ libaccCREAM_la-SubmitterPluginCREAM.lo \ libaccCREAM_la-JobControllerPluginCREAM.lo \ libaccCREAM_la-CREAMClient.lo libaccCREAM_la-JobStateCREAM.lo \ libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.lo libaccCREAM_la_OBJECTS = $(am_libaccCREAM_la_OBJECTS) libaccCREAM_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) \ $(libaccCREAM_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccCREAM_la_SOURCES) DIST_SOURCES = $(libaccCREAM_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccCREAM.la libaccCREAM_la_SOURCES = DescriptorsCREAM.cpp \ SubmitterPluginCREAM.cpp SubmitterPluginCREAM.h \ JobControllerPluginCREAM.cpp JobControllerPluginCREAM.h \ CREAMClient.cpp CREAMClient.h \ JobStateCREAM.cpp JobStateCREAM.h \ JobListRetrieverPluginWSRFCREAM.cpp JobListRetrieverPluginWSRFCREAM.h libaccCREAM_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libaccCREAM_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) libaccCREAM_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/CREAM/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/CREAM/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccCREAM.la: $(libaccCREAM_la_OBJECTS) $(libaccCREAM_la_DEPENDENCIES) $(libaccCREAM_la_LINK) -rpath $(pkglibdir) $(libaccCREAM_la_OBJECTS) $(libaccCREAM_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccCREAM_la-CREAMClient.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccCREAM_la-DescriptorsCREAM.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccCREAM_la-JobControllerPluginCREAM.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccCREAM_la-JobStateCREAM.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccCREAM_la-SubmitterPluginCREAM.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccCREAM_la-DescriptorsCREAM.lo: DescriptorsCREAM.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -MT libaccCREAM_la-DescriptorsCREAM.lo -MD -MP -MF $(DEPDIR)/libaccCREAM_la-DescriptorsCREAM.Tpo -c -o libaccCREAM_la-DescriptorsCREAM.lo `test -f 'DescriptorsCREAM.cpp' || echo '$(srcdir)/'`DescriptorsCREAM.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccCREAM_la-DescriptorsCREAM.Tpo $(DEPDIR)/libaccCREAM_la-DescriptorsCREAM.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DescriptorsCREAM.cpp' object='libaccCREAM_la-DescriptorsCREAM.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccCREAM_la-DescriptorsCREAM.lo `test -f 'DescriptorsCREAM.cpp' || echo '$(srcdir)/'`DescriptorsCREAM.cpp libaccCREAM_la-SubmitterPluginCREAM.lo: SubmitterPluginCREAM.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -MT libaccCREAM_la-SubmitterPluginCREAM.lo -MD -MP -MF $(DEPDIR)/libaccCREAM_la-SubmitterPluginCREAM.Tpo -c -o libaccCREAM_la-SubmitterPluginCREAM.lo `test -f 'SubmitterPluginCREAM.cpp' || echo '$(srcdir)/'`SubmitterPluginCREAM.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccCREAM_la-SubmitterPluginCREAM.Tpo $(DEPDIR)/libaccCREAM_la-SubmitterPluginCREAM.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SubmitterPluginCREAM.cpp' object='libaccCREAM_la-SubmitterPluginCREAM.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccCREAM_la-SubmitterPluginCREAM.lo `test -f 'SubmitterPluginCREAM.cpp' || echo '$(srcdir)/'`SubmitterPluginCREAM.cpp libaccCREAM_la-JobControllerPluginCREAM.lo: JobControllerPluginCREAM.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -MT libaccCREAM_la-JobControllerPluginCREAM.lo -MD -MP -MF $(DEPDIR)/libaccCREAM_la-JobControllerPluginCREAM.Tpo -c -o libaccCREAM_la-JobControllerPluginCREAM.lo `test -f 'JobControllerPluginCREAM.cpp' || echo '$(srcdir)/'`JobControllerPluginCREAM.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccCREAM_la-JobControllerPluginCREAM.Tpo $(DEPDIR)/libaccCREAM_la-JobControllerPluginCREAM.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobControllerPluginCREAM.cpp' object='libaccCREAM_la-JobControllerPluginCREAM.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccCREAM_la-JobControllerPluginCREAM.lo `test -f 'JobControllerPluginCREAM.cpp' || echo '$(srcdir)/'`JobControllerPluginCREAM.cpp libaccCREAM_la-CREAMClient.lo: CREAMClient.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -MT libaccCREAM_la-CREAMClient.lo -MD -MP -MF $(DEPDIR)/libaccCREAM_la-CREAMClient.Tpo -c -o libaccCREAM_la-CREAMClient.lo `test -f 'CREAMClient.cpp' || echo '$(srcdir)/'`CREAMClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccCREAM_la-CREAMClient.Tpo $(DEPDIR)/libaccCREAM_la-CREAMClient.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='CREAMClient.cpp' object='libaccCREAM_la-CREAMClient.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccCREAM_la-CREAMClient.lo `test -f 'CREAMClient.cpp' || echo '$(srcdir)/'`CREAMClient.cpp libaccCREAM_la-JobStateCREAM.lo: JobStateCREAM.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -MT libaccCREAM_la-JobStateCREAM.lo -MD -MP -MF $(DEPDIR)/libaccCREAM_la-JobStateCREAM.Tpo -c -o libaccCREAM_la-JobStateCREAM.lo `test -f 'JobStateCREAM.cpp' || echo '$(srcdir)/'`JobStateCREAM.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccCREAM_la-JobStateCREAM.Tpo $(DEPDIR)/libaccCREAM_la-JobStateCREAM.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobStateCREAM.cpp' object='libaccCREAM_la-JobStateCREAM.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccCREAM_la-JobStateCREAM.lo `test -f 'JobStateCREAM.cpp' || echo '$(srcdir)/'`JobStateCREAM.cpp libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.lo: JobListRetrieverPluginWSRFCREAM.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -MT libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.lo -MD -MP -MF $(DEPDIR)/libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.Tpo -c -o libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.lo `test -f 'JobListRetrieverPluginWSRFCREAM.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginWSRFCREAM.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.Tpo $(DEPDIR)/libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverPluginWSRFCREAM.cpp' object='libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccCREAM_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccCREAM_la-JobListRetrieverPluginWSRFCREAM.lo `test -f 'JobListRetrieverPluginWSRFCREAM.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginWSRFCREAM.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/JobControllerPluginCREAM.cpp0000644000000000000000000000012212675602216025317 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.4267 30 ctime=1513200660.252750577 nordugrid-arc-5.4.2/src/hed/acc/CREAM/JobControllerPluginCREAM.cpp0000644000175000002070000001172312675602216025372 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "CREAMClient.h" #include "JobControllerPluginCREAM.h" #include "JobStateCREAM.h" namespace Arc { Logger JobControllerPluginCREAM::logger(Logger::getRootLogger(), "JobControllerPlugin.CREAM"); bool JobControllerPluginCREAM::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } void JobControllerPluginCREAM::UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); for (std::list::iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; CREAMClient gLiteClient(job.JobStatusURL.str() + "/CREAM2", cfg, usercfg->Timeout()); if (!gLiteClient.stat(job.IDFromEndpoint, job)) { logger.msg(WARNING, "Job information not found in the information system: %s", (*it)->JobID); IDsNotProcessed.push_back((*it)->JobID); continue; } IDsProcessed.push_back((*it)->JobID); } } bool JobControllerPluginCREAM::CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; CREAMClient gLiteClient(job.JobManagementURL.str() + "/CREAM2", cfg, usercfg->Timeout()); if (!gLiteClient.purge(job.IDFromEndpoint)) { logger.msg(INFO, "Failed cleaning job: %s", job.JobID); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } IDsProcessed.push_back(job.JobID); } return ok; } bool JobControllerPluginCREAM::CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; CREAMClient gLiteClient(job.JobManagementURL.str() + "/CREAM2", cfg, usercfg->Timeout()); if (!gLiteClient.cancel(job.IDFromEndpoint)) { logger.msg(INFO, "Failed canceling job: %s", job.JobID); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } job.State = JobStateCREAM("CANCELLED"); IDsProcessed.push_back(job.JobID); } return ok; } bool JobControllerPluginCREAM::RenewJobs(const std::list& jobs, std::list&, std::list& IDsNotProcessed, bool) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { logger.msg(INFO, "Renewal of CREAM jobs is not supported"); IDsNotProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginCREAM::ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job& job = **it; CREAMClient gLiteClient(job.JobManagementURL.str() + "/CREAM2", cfg, usercfg->Timeout()); if (!gLiteClient.cancel(job.IDFromEndpoint)) { logger.msg(INFO, "Failed resuming job: %s", job.JobID); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } IDsProcessed.push_back(job.JobID); } return ok; } bool JobControllerPluginCREAM::GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const { switch (resource) { case Job::STDIN: case Job::STDOUT: case Job::STDERR: return false; break; case Job::STAGEINDIR: if (!job.StageInDir) return false; url = job.StageInDir; break; case Job::STAGEOUTDIR: if (!job.StageOutDir) return false; url = job.StageOutDir; break; case Job::SESSIONDIR: case Job::JOBLOG: case Job::JOBDESCRIPTION: return false; break; } return true; } bool JobControllerPluginCREAM::GetJobDescription(const Job& j, std::string& desc) const { MCCConfig cfg; usercfg->ApplyToConfig(cfg); CREAMClient gLiteClient(j.JobManagementURL.str() + "/CREAM2", cfg, usercfg->Timeout()); if (!gLiteClient.getJobDesc(j.IDFromEndpoint, desc)) { logger.msg(INFO, "Failed retrieving job description for job: %s", j.JobID); return false; } return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/JobListRetrieverPluginWSRFCREAM.cpp0000644000000000000000000000012112300714551026467 xustar000000000000000027 mtime=1392744809.100789 25 atime=1513200574.4297 29 ctime=1513200660.25875065 nordugrid-arc-5.4.2/src/hed/acc/CREAM/JobListRetrieverPluginWSRFCREAM.cpp0000644000175000002070000000374012300714551026543 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "CREAMClient.h" #include "JobListRetrieverPluginWSRFCREAM.h" namespace Arc { Logger JobListRetrieverPluginWSRFCREAM::logger(Logger::getRootLogger(), "JobListRetrieverPlugin.WSRFCREAM"); bool JobListRetrieverPluginWSRFCREAM::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos && lower(endpoint.URLString.substr(0, pos)) != "http" && lower(endpoint.URLString.substr(0, pos)) != "https"; } EndpointQueryingStatus JobListRetrieverPluginWSRFCREAM::Query(const UserConfig& uc, const Endpoint& e, std::list& jobs, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); URL url((e.URLString.find("://") == std::string::npos ? "https://" : "") + e.URLString, false, 8443); URL infourl("ldap://" + url.Host(), false, 2170, "/o=grid"); URL queryURL(url); queryURL.ChangePath(queryURL.Path() + "/CREAM2"); MCCConfig cfg; uc.ApplyToConfig(cfg); CREAMClient creamClient(queryURL, cfg, uc.Timeout()); std::list cJobs; if (!creamClient.listJobs(cJobs)) return s; for (std::list::const_iterator it = cJobs.begin(); it != cJobs.end(); ++it) { Job j; j.JobID = queryURL.str() + '/' + it->id; j.ServiceInformationURL = infourl; j.ServiceInformationURL.ChangeLDAPFilter(""); j.ServiceInformationInterfaceName = "org.nordugrid.ldapng"; j.JobStatusURL = url; j.JobStatusInterfaceName = "org.glite.ce.cream"; j.JobManagementURL = url; j.JobManagementInterfaceName = "org.glite.ce.cream"; j.IDFromEndpoint = it->id; jobs.push_back(j); } s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/CREAMClient.h0000644000000000000000000000012112071247707022244 xustar000000000000000026 mtime=1357205447.19319 25 atime=1513200574.4327 30 ctime=1513200660.255750614 nordugrid-arc-5.4.2/src/hed/acc/CREAM/CREAMClient.h0000644000175000002070000000377712071247707022332 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_CREAMCLIENT_H__ #define __ARC_CREAMCLIENT_H__ #include #include #include /** CREAMClient * The CREAMClient class is used for commnunicating with a CREAM 2 computing * service. CREAM uses WSRF technology with a WSDL specifying the allowed * operations. The particular WSDLs can be obtained from the CERN source code * repository with names 'org.glite.ce-cream2_service.wsdl' and * 'www.gridsite.org-delegation-2.0.0.wsdl', at path 'org.glite.ce.wsdl'. A * package for these WSDLs probably also exist. */ namespace Arc { class ClientSOAP; class Logger; class MCCConfig; class URL; class PayloadSOAP; class creamJobInfo { public: std::string id; std::string creamURL; std::string ISB; std::string OSB; std::string delegationID; creamJobInfo& operator=(XMLNode n); XMLNode ToXML() const; }; class CREAMClient { public: CREAMClient(const URL& url, const MCCConfig& cfg, int timeout); ~CREAMClient(); void setDelegationId(const std::string& delegId) { this->delegationId = delegId; } bool createDelegation(const std::string& delegation_id, const std::string& proxy); bool registerJob(const std::string& jdl_text, creamJobInfo& info); bool startJob(const std::string& jobid); bool stat(const std::string& jobid, Job& job); bool listJobs(std::list& info); bool getJobDesc(const std::string& jobid, std::string& desc); bool cancel(const std::string& jobid); bool purge(const std::string& jobid); bool resume(const std::string& jobid); private: bool process(PayloadSOAP& req, XMLNode& response, const std::string& actionNS = "http://glite.org/2007/11/ce/cream/"); std::string action; ClientSOAP *client; std::string cafile; std::string cadir; NS cream_ns; std::string delegationId; static Logger logger; }; } // namespace Arc #endif // __ARC_CREAMCLIENT_H__ nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/JobStateCREAM.h0000644000000000000000000000012212045235201022525 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.4317 30 ctime=1513200660.257750638 nordugrid-arc-5.4.2/src/hed/acc/CREAM/JobStateCREAM.h0000644000175000002070000000055512045235201022601 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBSTATECREAM_H__ #define __ARC_JOBSTATECREAM_H__ #include namespace Arc { class JobStateCREAM : public JobState { public: JobStateCREAM(const std::string& state) : JobState(state, &StateMap) {} static JobState::StateType StateMap(const std::string& state); }; } #endif // __ARC_JOBSTATECREAM_H__ nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/JobStateCREAM.cpp0000644000000000000000000000012212153630075023070 xustar000000000000000027 mtime=1370435645.386876 25 atime=1513200574.4267 30 ctime=1513200660.256750626 nordugrid-arc-5.4.2/src/hed/acc/CREAM/JobStateCREAM.cpp0000644000175000002070000000462312153630075023144 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "JobStateCREAM.h" namespace Arc { JobState::StateType JobStateCREAM::StateMap(const std::string& state) { /// \mapname CREAM CREAM extended BES /// \mapattr REGISTERED -> ACCEPTED if (state == "REGISTERED") return JobState::ACCEPTED; /// \mapattr PENDING -> ACCEPTED else if (state == "PENDING") return JobState::ACCEPTED; /// \mapattr RUNNING -> RUNNING else if (state == "RUNNING") return JobState::RUNNING; /// \mapattr REALLY-RUNNING -> RUNNING else if (state == "REALLY-RUNNING") return JobState::RUNNING; /// \mapattr HELD -> HOLD else if (state == "HELD") return JobState::HOLD; /// \mapattr DONE-FAILED -> FAILED else if (state == "DONE-FAILED") return JobState::FAILED; /// \mapattr DONE-OK -> FINISHED else if (state == "DONE-OK") return JobState::FINISHED; /// \mapattr ABORTED -> FAILED else if (state == "ABORTED") return JobState::FAILED; /// \mapattr CANCELLED -> KILLED else if (state == "CANCELLED") return JobState::KILLED; /// \mapattr IDLE -> QUEUING else if (state == "IDLE") return JobState::QUEUING; /// \mapattr "" -> UNDEFINED else if (state == "") return JobState::UNDEFINED; /// \mapattr Any other state -> OTHER else return JobState::OTHER; } } /* A CREAM JOB STATES Here below is provided a brief description of the meaning of each possible state a CREAM job can enter: REGISTERED: the job has been registered but it has not been started yet. PENDING the job has been started, but it has still to be submitted to the LRMS abstraction layer module (i.e. BLAH). IDLE: the job is idle in the Local Resource Management System (LRMS). RUNNING: the job wrapper, which "encompasses" the user job, is running in the LRMS. REALLY-RUNNING: the actual user job (the one specified as Executable in the job JDL) is running in the LRMS. HELD: the job is held (suspended) in the LRMS. CANCELLED: the job has been cancelled. DONE-OK: the job has successfully been executed. DONE-FAILED: the job has been executed, but some errors occurred. ABORTED: errors occurred during the "management" of the job, e.g. the submission to the LRMS abstraction layer software (BLAH) failed. UNKNOWN: the job is an unknown status. */ nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/JobControllerPluginCREAM.h0000644000000000000000000000012212067314257024764 xustar000000000000000027 mtime=1356699823.605136 25 atime=1513200574.4327 30 ctime=1513200660.253750589 nordugrid-arc-5.4.2/src/hed/acc/CREAM/JobControllerPluginCREAM.h0000644000175000002070000000362312067314257025037 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBCONTROLLERCREAM_H__ #define __ARC_JOBCONTROLLERCREAM_H__ #include namespace Arc { class URL; class JobControllerPluginCREAM : public JobControllerPlugin { public: JobControllerPluginCREAM(const UserConfig& usercfg, PluginArgument* parg) : JobControllerPlugin(usercfg, parg) { supportedInterfaces.push_back("org.glite.cream"); supportedInterfaces.push_back("org.glite.ce.cream"); } ~JobControllerPluginCREAM() {} static Plugin* Instance(PluginArgument *arg) { JobControllerPluginArgument *jcarg = dynamic_cast(arg); return jcarg ? new JobControllerPluginCREAM(*jcarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual void UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const; virtual bool GetJobDescription(const Job& job, std::string& desc_str) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBCONTROLLERCREAM_H__ nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/DescriptorsCREAM.cpp0000644000000000000000000000012112675602216023662 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.4327 29 ctime=1513200660.24975054 nordugrid-arc-5.4.2/src/hed/acc/CREAM/DescriptorsCREAM.cpp0000644000175000002070000000134412675602216023734 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "SubmitterPluginCREAM.h" #include "JobControllerPluginCREAM.h" #include "JobListRetrieverPluginWSRFCREAM.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "CREAM", "HED:SubmitterPlugin", "The Computing Resource Execution And Management service", 0, &Arc::SubmitterPluginCREAM::Instance }, { "CREAM", "HED:JobControllerPlugin", "The Computing Resource Execution And Management service", 0, &Arc::JobControllerPluginCREAM::Instance }, { "CREAM", "HED:JobListRetrieverPlugin", "The Computing Resource Execution And Management service", 0, &Arc::JobListRetrieverPluginWSRFCREAM::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/CREAMClient.cpp0000644000000000000000000000012212071251770022573 xustar000000000000000027 mtime=1357206520.697831 25 atime=1513200574.4287 30 ctime=1513200660.254750601 nordugrid-arc-5.4.2/src/hed/acc/CREAM/CREAMClient.cpp0000644000175000002070000004453012071251770022650 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "JobStateCREAM.h" #include "CREAMClient.h" #ifdef ISVALID #undef ISVALID #endif #define ISVALID(NODE) NODE && (std::string)NODE != "N/A" && (std::string)NODE != "[reserved]" namespace Arc { Logger CREAMClient::logger(Logger::rootLogger, "CREAMClient"); bool stringtoTime(const std::string& timestring, Time& time) { if (timestring == "" || timestring.length() < 15) return false; //The conversion for example: //before: 11/5/08 11:52 PM //after: 2008-11-05T23:52:00.000Z tm timestr; std::string::size_type pos = 0; if (sscanf(timestring.substr(pos, 6).c_str(), "%d/%d/%2d", ×tr.tm_mon, ×tr.tm_mday, ×tr.tm_year) == 3) pos += 6; else if (sscanf(timestring.substr(pos, 7).c_str(), "%2d/%d/%2d", ×tr.tm_mon, ×tr.tm_mday, ×tr.tm_year) == 3) pos += 7; else if (sscanf(timestring.substr(pos, 7).c_str(), "%d/%2d/%2d", ×tr.tm_mon, ×tr.tm_mday, ×tr.tm_year) == 3) pos += 7; else if (sscanf(timestring.substr(pos, 8).c_str(), "%2d/%2d/%2d", ×tr.tm_mon, ×tr.tm_mday, ×tr.tm_year) == 3) pos += 8; else return false; timestr.tm_year += 100; timestr.tm_mon--; if (timestring[pos] == 'T' || timestring[pos] == ' ') pos++; if (sscanf(timestring.substr(pos, 5).c_str(), "%2d:%2d", ×tr.tm_hour, ×tr.tm_min) == 2) pos += 5; else return false; // skip the space characters while (timestring[pos] == ' ') pos++; if (timestring.substr(pos, 2) == "PM") timestr.tm_hour += 12; time.SetTime(mktime(×tr)); return true; } static void set_cream_namespaces(NS& ns) { ns["deleg"] = "http://www.gridsite.org/namespaces/delegation-2"; ns["types"] = "http://glite.org/2007/11/ce/cream/types"; } XMLNode creamJobInfo::ToXML() const { return XMLNode("" ""+id+"" ""+creamURL+""+ (!ISB.empty() ? "CREAMInputSandboxURI" + ISB +"" : std::string()) + (!OSB.empty() ? "CREAMOutputSandboxURI" + OSB +"" : std::string()) + ""+delegationID+"" ""); } creamJobInfo& creamJobInfo::operator=(XMLNode n) { id = (std::string)n["id"]; if (n["creamURL"]) { creamURL = URL((std::string)n["creamURL"]); } for (XMLNode property = n["property"]; property; ++property) { if ((std::string)property["name"] == "CREAMInputSandboxURI") { ISB = (std::string)property["value"]; } else if ((std::string)property["name"] == "CREAMOutputSandboxURI") { OSB = (std::string)property["value"]; } } if (n["delegationID"]) { delegationID = (std::string)n["delegationID"]; } return *this; } CREAMClient::CREAMClient(const URL& url, const MCCConfig& cfg, int timeout) : client(NULL), cafile(cfg.cafile), cadir(cfg.cadir) { logger.msg(INFO, "Creating a CREAM client"); client = new ClientSOAP(cfg, url, timeout); if (!client) logger.msg(VERBOSE, "Unable to create SOAP client used by CREAMClient."); set_cream_namespaces(cream_ns); } CREAMClient::~CREAMClient() { if (client) delete client; } bool CREAMClient::process(PayloadSOAP& req, XMLNode& response, const std::string& actionNS) { if (!client) { logger.msg(VERBOSE, "CREAMClient not created properly"); return false; } PayloadSOAP *resp = NULL; if (!client->process(actionNS + action, &req, &resp)) { logger.msg(VERBOSE, "%s request failed", action); return false; } if (resp == NULL) { logger.msg(VERBOSE, "There was no SOAP response"); return false; } (*resp)[action + "Response"].New(response); delete resp; if (!response) { logger.msg(VERBOSE, "Empty response"); return false; } XMLNode fault; if (response["JobUnknownFault"]) fault = response["JobUnknownFault"]; if (response["JobStatusInvalidFault"]) fault = response["JobStatusInvalidFault"]; if (response["DelegationIdMismatchFault"]) fault = response["DelegationIdMismatchFault"]; if (response["DateMismatchFault"]) fault = response["DateMismatchFault"]; if (response["LeaseIdMismatchFault"]) fault = response["LeaseIdMismatchFault"]; if (response["GenericFault"]) fault = response["GenericFault"]; if (fault) { logger.msg(VERBOSE, "Request failed: %s", (std::string)(fault["Description"])); return false; } return true; } bool CREAMClient::stat(const std::string& jobid, Job& job) { logger.msg(VERBOSE, "Creating and sending a status request"); action = "JobInfo"; PayloadSOAP req(cream_ns); XMLNode xjobId = req.NewChild("types:" + action + "Request").NewChild("types:jobId"); xjobId.NewChild("types:id") = jobid; xjobId.NewChild("types:creamURL") = client->GetURL().str(); XMLNode response; if (!process(req, response)) return false; XMLNode jobInfoNode; jobInfoNode = response["result"]["jobInfo"]; XMLNode lastStatusNode = jobInfoNode.Path("status").back(); if (lastStatusNode["name"]) { job.State = JobStateCREAM((std::string)lastStatusNode["name"]); } if (lastStatusNode["failureReason"]) { job.Error.push_back((std::string)lastStatusNode["failureReason"]); } if (!job.State) { logger.msg(VERBOSE, "Unable to retrieve job status."); return false; } if (ISVALID(jobInfoNode["jobId"]["id"])) { job.IDFromEndpoint = (std::string)jobInfoNode["jobId"]["id"]; } if (ISVALID(jobInfoNode["type"])) job.Type = (std::string)jobInfoNode["type"]; if (ISVALID(jobInfoNode["JDL"])) { job.JobDescription = "egee:jdl"; job.JobDescriptionDocument = (std::string)jobInfoNode["JDL"]; std::list jds; if (JobDescription::Parse(job.JobDescriptionDocument, jds) && !jds.empty()) { if (!jds.front().Application.Input.empty()) job.StdIn = jds.front().Application.Input; if (!jds.front().Application.Output.empty()) job.StdOut = jds.front().Application.Output; if (!jds.front().Application.Error.empty()) job.StdErr = jds.front().Application.Error; if (!jds.front().Resources.QueueName.empty()) { job.Queue = jds.front().Resources.QueueName; } } } if (ISVALID(lastStatusNode["exitCode"])) job.ExitCode = stringtoi((std::string)lastStatusNode["exitCode"]); if (ISVALID(jobInfoNode["delegationProxyInfo"])) { /* Format of delegationProxyInfo node. [ isRFC=""; valid from="// : ()"; valid to="// : ()"; holder DN=""; holder AC issuer=""; VO=""; AC issuer=""; VOMS attributes= ] */ std::string delegationProxy = (std::string)jobInfoNode["delegationProxyInfo"]; std::size_t lBracketPos = delegationProxy.find('['), rBracketPos = delegationProxy.rfind(']'); if (lBracketPos != std::string::npos && rBracketPos != std::string::npos) { delegationProxy = trim(delegationProxy.substr(lBracketPos, rBracketPos - lBracketPos)); } std::list tDelegInfo; tokenize(delegationProxy, tDelegInfo, ";"); for (std::list::iterator it = tDelegInfo.begin(); it != tDelegInfo.end(); ++it) { std::list keyValuePair; tokenize(*it, keyValuePair, "=", "\"", "\""); if (keyValuePair.size() != 2) continue; if (lower(trim(keyValuePair.front())) == "holder dn") job.Owner = trim(keyValuePair.back(), " \""); if (lower(trim(keyValuePair.front())) == "valid to") stringtoTime(trim(keyValuePair.back(), " \""), job.ProxyExpirationTime); } } if (ISVALID(jobInfoNode["localUser"])) job.LocalOwner = (std::string)jobInfoNode["localUser"]; if (ISVALID(jobInfoNode["lastCommand"])) { int job_register_id_first = -1; int job_register_id_last = -1; int job_start_id_first = -1; int job_start_id_last = -1; int local_id = 0; while (true) { if (!jobInfoNode["lastCommand"][local_id]) break; if ((std::string)jobInfoNode["lastCommand"][local_id]["name"] == "JOB_REGISTER") { if (job_register_id_first == -1 && job_register_id_last == -1) { job_register_id_first = local_id; job_register_id_last = local_id; } else if (job_register_id_last > -1) job_register_id_last = local_id; } //end of the JOB_REGISTER if ((std::string)jobInfoNode["lastCommand"][local_id]["name"] == "JOB_START") { if (job_start_id_first == -1 && job_start_id_last == -1) { job_start_id_first = local_id; job_start_id_last = local_id; } else if (job_start_id_last > -1) job_start_id_last = local_id; } //end of the JOB_START local_id++; } //dependent on JOB_REGISTER if (job_register_id_first > -1) if (ISVALID(jobInfoNode["lastCommand"][job_register_id_first]["creationTime"])) { Time time((std::string)jobInfoNode["lastCommand"][job_register_id_first]["creationTime"]); if (time.GetTime() != -1) job.SubmissionTime = time; } if (job_register_id_last > -1) if (ISVALID(jobInfoNode["lastCommand"][job_register_id_last]["creationTime"])) { Time time((std::string)jobInfoNode["lastCommand"][job_register_id_last]["creationTime"]); if (time.GetTime() != -1) job.CreationTime = time; } //end of the JOB_REGISTER //dependent on JOB_START if (job_start_id_first > -1) { if (ISVALID(jobInfoNode["lastCommand"][job_start_id_first]["startSchedulingTime"])) { Time time((std::string)jobInfoNode["lastCommand"][job_start_id_first]["startSchedulingTime"]); if (time.GetTime() != -1) job.ComputingManagerSubmissionTime = time; } if (ISVALID(jobInfoNode["lastCommand"][job_start_id_first]["startProcessingTime"])) { Time time((std::string)jobInfoNode["lastCommand"][job_start_id_first]["startProcessingTime"]); if (time.GetTime() != -1) job.StartTime = time; } } if (job_start_id_last > -1) if (ISVALID(jobInfoNode["lastCommand"][job_start_id_last]["executionCompletedTime"])) { Time time((std::string)jobInfoNode["lastCommand"][job_start_id_last]["executionCompletedTime"]); if (time.GetTime() != -1) job.ComputingManagerEndTime = time; } //end of the JOB_START } //end of the LastCommand if (ISVALID(lastStatusNode["timestamp"]) && (job.State() == "DONE-OK" || job.State() == "DONE-FAILED")) { Time time((std::string)lastStatusNode["timestamp"]); if (time.GetTime() != -1) job.EndTime = time; } return true; } bool CREAMClient::cancel(const std::string& jobid) { logger.msg(VERBOSE, "Creating and sending request to terminate a job"); action = "JobCancel"; PayloadSOAP req(cream_ns); XMLNode xjobId = req.NewChild("types:" + action + "Request").NewChild("types:jobId"); xjobId.NewChild("types:id") = jobid; xjobId.NewChild("types:creamURL") = client->GetURL().str(); XMLNode response; if (!process(req, response)) return false; if (!response) { logger.msg(VERBOSE, "Empty response"); return false; } return true; } bool CREAMClient::purge(const std::string& jobid) { logger.msg(VERBOSE, "Creating and sending request to clean a job"); action = "JobPurge"; PayloadSOAP req(cream_ns); XMLNode xjobId = req.NewChild("types:" + action + "Request").NewChild("types:jobId"); xjobId.NewChild("types:id") = jobid; xjobId.NewChild("types:creamURL") = client->GetURL().str(); XMLNode response; if (!process(req, response)) return false; if (!response) { logger.msg(VERBOSE, "Empty response"); return false; } return true; } bool CREAMClient::resume(const std::string& jobid) { logger.msg(VERBOSE, "Creating and sending request to resume a job"); action = "JobResume"; PayloadSOAP req(cream_ns); XMLNode xjobId = req.NewChild("types:" + action + "Request").NewChild("types:jobId"); xjobId.NewChild("types:id") = jobid; xjobId.NewChild("types:creamURL") = client->GetURL().str(); XMLNode response; if (!process(req, response)) return false; if (!response) { logger.msg(VERBOSE, "Empty response"); return false; } return true; } bool CREAMClient::listJobs(std::list& info) { logger.msg(VERBOSE, "Creating and sending request to list jobs"); action = "JobList"; PayloadSOAP req(cream_ns); req.NewChild("types:" + action + "Request"); XMLNode response; if (!process(req, response)) return false; if (!response) { logger.msg(VERBOSE, "Empty response"); return false; } for (XMLNode n = response["result"]; n; ++n) { creamJobInfo i; i = n; info.push_back(i); } return true; } bool CREAMClient::getJobDesc(const std::string& jobid, std::string& desc) { logger.msg(VERBOSE, "Creating and sending a status request"); action = "JobInfo"; PayloadSOAP req(cream_ns); XMLNode xjobId = req.NewChild("types:" + action + "Request").NewChild("types:jobId"); xjobId.NewChild("types:id") = jobid; xjobId.NewChild("types:creamURL") = client->GetURL().str(); XMLNode response; if (!process(req, response)) return false; if (ISVALID(response["result"]["jobInfo"]["JDL"])) { desc = (std::string)response["result"]["jobInfo"]["JDL"]; return true; } return false; } bool CREAMClient::registerJob(const std::string& jdl_text, creamJobInfo& info) { logger.msg(VERBOSE, "Creating and sending job register request"); action = "JobRegister"; PayloadSOAP req(cream_ns); XMLNode act_job = req.NewChild("types:" + action + "Request").NewChild("types:jobDescriptionList"); act_job.NewChild("types:JDL") = jdl_text; if (!delegationId.empty()) act_job.NewChild("types:delegationId") = delegationId; act_job.NewChild("types:autoStart") = "false"; XMLNode response; if (!process(req, response)) return false; if (!response) { logger.msg(VERBOSE, "Empty response"); return false; } if (!response["result"]["jobId"]["id"]) { logger.msg(VERBOSE, "No job ID in response"); return false; } info = response["result"]["jobId"]; return true; } bool CREAMClient::startJob(const std::string& jobid) { logger.msg(VERBOSE, "Creating and sending job start request"); action = "JobStart"; PayloadSOAP req(cream_ns); XMLNode jobStartRequest = req.NewChild("types:" + action + "Request"); XMLNode xjobId = jobStartRequest.NewChild("types:jobId"); xjobId.NewChild("types:id") = jobid; xjobId.NewChild("types:creamURL") = client->GetURL().str(); XMLNode response; if (!process(req, response)) return false; if (!response) { logger.msg(VERBOSE, "Empty response"); return false; } if (!response["result"]["jobId"]["id"]) { logger.msg(VERBOSE, "No job ID in response"); return false; } return true; } bool CREAMClient::createDelegation(const std::string& delegation_id, const std::string& proxy) { logger.msg(VERBOSE, "Creating delegation"); action = "getProxyReq"; PayloadSOAP req(cream_ns); req.NewChild("deleg:" + action).NewChild("delegationID") = delegation_id; XMLNode response; if (!process(req, response, "http://www.gridsite.org/namespaces/delegation-2/")) return false; std::string proxyRequestStr = (std::string)response["getProxyReqReturn"]; if (proxyRequestStr.empty()) { logger.msg(VERBOSE, "Malformed response: missing getProxyReqReturn"); return false; } //Sign the proxy certificate Credential signer(proxy, "", cadir, cafile); std::string signedCert; // TODO: Hardcoded time shift - VERY BAD approach Time start_time = Time() - Period(300); Time end_time = signer.GetEndTime(); if(end_time < start_time) { logger.msg(VERBOSE, "Delegatable credentials expired: %s",end_time.str()); return false; } // CREAM is picky about end time of delegated credentials, so // make sure it does not exceed end time of signer Credential proxy_cred(start_time,end_time-start_time); proxy_cred.InquireRequest(proxyRequestStr); proxy_cred.SetProxyPolicy("gsi2", "", "", -1); if (!(signer.SignRequest(&proxy_cred, signedCert))) { logger.msg(VERBOSE, "Failed signing certificate request"); return false; } std::string signedOutputCert, signedOutputCertChain; signer.OutputCertificate(signedOutputCert); signer.OutputCertificateChain(signedOutputCertChain); signedCert.append(signedOutputCert).append(signedOutputCertChain); action = "putProxy"; req = PayloadSOAP(cream_ns); XMLNode putProxyRequest = req.NewChild("deleg:" + action); putProxyRequest.NewChild("delegationID") = delegation_id; putProxyRequest.NewChild("proxy") = signedCert; response = XMLNode(); if (!process(req, response, "http://www.gridsite.org/namespaces/delegation-2/")) return false; if (!response) { logger.msg(VERBOSE, "Failed putting signed delegation certificate to service"); return false; } return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/SubmitterPluginCREAM.h0000644000000000000000000000012212067314257024164 xustar000000000000000027 mtime=1356699823.605136 25 atime=1513200574.4327 30 ctime=1513200660.251750565 nordugrid-arc-5.4.2/src/hed/acc/CREAM/SubmitterPluginCREAM.h0000644000175000002070000000226212067314257024235 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMITTERPLUGINCREAM_H__ #define __ARC_SUBMITTERPLUGINCREAM_H__ #include namespace Arc { class SubmitterPluginCREAM : public SubmitterPlugin { public: SubmitterPluginCREAM(const UserConfig& usercfg, PluginArgument* parg) : SubmitterPlugin(usercfg, parg) { supportedInterfaces.push_back("org.glite.cream"); supportedInterfaces.push_back("org.glite.ce.cream"); } ~SubmitterPluginCREAM() {} static Plugin* Instance(PluginArgument *arg) { SubmitterPluginArgument *subarg = dynamic_cast(arg); return subarg ? new SubmitterPluginCREAM(*subarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual SubmissionStatus Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted); virtual SubmissionStatus Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted); }; } // namespace Arc #endif // __ARC_SUBMITTERPLUGINCREAM_H__ nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/JobListRetrieverPluginWSRFCREAM.h0000644000000000000000000000012412067314257026150 xustar000000000000000027 mtime=1356699823.605136 27 atime=1513200574.436701 30 ctime=1513200660.259750663 nordugrid-arc-5.4.2/src/hed/acc/CREAM/JobListRetrieverPluginWSRFCREAM.h0000644000175000002070000000206712067314257026222 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBLISTRETRIEVERPLUGINWSRFCREAM_H__ #define __ARC_JOBLISTRETRIEVERPLUGINWSRFCREAM_H__ #include #include namespace Arc { class Logger; class JobListRetrieverPluginWSRFCREAM : public JobListRetrieverPlugin { public: JobListRetrieverPluginWSRFCREAM(PluginArgument* parg): JobListRetrieverPlugin(parg) { supportedInterfaces.push_back("org.glite.cream"); supportedInterfaces.push_back("org.glite.ce.cream"); } virtual ~JobListRetrieverPluginWSRFCREAM() {} static Plugin* Instance(PluginArgument *arg) { return new JobListRetrieverPluginWSRFCREAM(arg); } virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; // No implementation in cpp file -- returns EndpointQueryingStatus::FAILED. virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBLISTRETRIEVERPLUGINWSRFCREAM_H__ nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/SubmitterPluginCREAM.cpp0000644000000000000000000000012212675602216024517 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.4277 30 ctime=1513200660.250750552 nordugrid-arc-5.4.2/src/hed/acc/CREAM/SubmitterPluginCREAM.cpp0000644000175000002070000002054512675602216024574 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "CREAMClient.h" #include "SubmitterPluginCREAM.h" namespace Arc { bool SubmitterPluginCREAM::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "http" && lower(endpoint.substr(0, pos)) != "https"; } SubmissionStatus SubmitterPluginCREAM::Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted) { MCCConfig cfg; usercfg->ApplyToConfig(cfg); URL url((endpoint.find("://") == std::string::npos ? "https://" : "") + endpoint, false, 8443); URL infourl("ldap://" + url.Host(), false, 2170, "/o=grid"); SubmissionStatus retval; for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { std::string delegationid = UUID(); URL delegationurl(url); delegationurl.ChangePath(delegationurl.Path() + "/gridsite-delegation"); CREAMClient gLiteClientDelegation(delegationurl, cfg, usercfg->Timeout()); if (!gLiteClientDelegation.createDelegation(delegationid, usercfg->ProxyPath())) { logger.msg(INFO, "Failed creating signed delegation certificate"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::AUTHENTICATION_ERROR; continue; } URL submissionurl(url); submissionurl.ChangePath(submissionurl.Path() + "/CREAM2"); CREAMClient gLiteClientSubmission(submissionurl, cfg, usercfg->Timeout()); gLiteClientSubmission.setDelegationId(delegationid); JobDescription preparedjobdesc(*it); if (!preparedjobdesc.Prepare()) { logger.msg(INFO, "Failed to prepare job description to target resources"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } std::string jobdescstring; if (!preparedjobdesc.UnParse(jobdescstring, "egee:jdl")) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format", "egee:jdl"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } creamJobInfo jobInfo; if (!gLiteClientSubmission.registerJob(jobdescstring, jobInfo)) { logger.msg(INFO, "Failed registering job"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } if (!PutFiles(preparedjobdesc, jobInfo.ISB)) { logger.msg(INFO, "Failed uploading local input files"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } if (!gLiteClientSubmission.startJob(jobInfo.id)) { logger.msg(INFO, "Failed starting job"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } Job j; j.JobID = submissionurl.str() + '/' + jobInfo.id; j.ServiceInformationURL = infourl; j.ServiceInformationURL.ChangeLDAPFilter(""); j.ServiceInformationInterfaceName = "org.nordugrid.ldapng"; j.JobStatusURL = url; j.JobStatusInterfaceName = "org.glite.ce.cream"; j.JobManagementURL = url; j.JobManagementInterfaceName = "org.glite.ce.cream"; j.StageInDir = jobInfo.ISB; j.StageOutDir = jobInfo.OSB; AddJobDetails(preparedjobdesc, j); j.IDFromEndpoint = jobInfo.id; jc.addEntity(j); } return retval; } SubmissionStatus SubmitterPluginCREAM::Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted) { MCCConfig cfg; usercfg->ApplyToConfig(cfg); URL url(et.ComputingEndpoint->URLString); SubmissionStatus retval; for (std::list::const_iterator it = jobdescs.begin(); it != jobdescs.end(); ++it) { std::string delegationid = UUID(); URL delegationurl(url); delegationurl.ChangePath(delegationurl.Path() + "/gridsite-delegation"); CREAMClient gLiteClientDelegation(delegationurl, cfg, usercfg->Timeout()); if (!gLiteClientDelegation.createDelegation(delegationid, usercfg->ProxyPath())) { logger.msg(INFO, "Failed creating singed delegation certificate"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::AUTHENTICATION_ERROR; continue; } URL submissionurl(url); submissionurl.ChangePath(submissionurl.Path() + "/CREAM2"); CREAMClient gLiteClientSubmission(submissionurl, cfg, usercfg->Timeout()); gLiteClientSubmission.setDelegationId(delegationid); JobDescription preparedjobdesc(*it); if (!preparedjobdesc.Prepare(et)) { logger.msg(INFO, "Failed to prepare job description to target resources"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } if (preparedjobdesc.OtherAttributes.find("egee:jdl;BatchSystem") == preparedjobdesc.OtherAttributes.end()) { if (!et.ComputingManager->ProductName.empty()) { preparedjobdesc.OtherAttributes["egee:jdl;BatchSystem"] = et.ComputingManager->ProductName; } else if (!et.ComputingShare->MappingQueue.empty()) { preparedjobdesc.OtherAttributes["egee:jdl;BatchSystem"] = et.ComputingShare->MappingQueue; } } std::string jobdescstring; if (!preparedjobdesc.UnParse(jobdescstring, "egee:jdl")) { logger.msg(INFO, "Unable to submit job. Job description is not valid in the %s format", "egee:jdl"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } creamJobInfo jobInfo; if (!gLiteClientSubmission.registerJob(jobdescstring, jobInfo)) { logger.msg(INFO, "Failed registering job"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } if (!PutFiles(preparedjobdesc, jobInfo.ISB)) { logger.msg(INFO, "Failed uploading local input files"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } if (!gLiteClientSubmission.startJob(jobInfo.id)) { logger.msg(INFO, "Failed starting job"); notSubmitted.push_back(&*it); retval |= SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; retval |= SubmissionStatus::ERROR_FROM_ENDPOINT; continue; } // Prepare contact url for information about this job URL infoendpoint; for (std::list< CountedPointer >::const_iterator it = et.OtherEndpoints.begin(); it != et.OtherEndpoints.end(); it++) { if ((*it)->InterfaceName == "org.nordugrid.ldapglue1") { infoendpoint = URL((*it)->URLString); infoendpoint.ChangeLDAPScope(URL::subtree); } } Job j; j.JobID = submissionurl.str() + '/' + jobInfo.id; j.ServiceInformationURL = infoendpoint; j.ServiceInformationURL.ChangeLDAPFilter(""); j.ServiceInformationInterfaceName = "org.nordugrid.ldapglue1"; j.JobStatusURL = url; j.JobStatusInterfaceName = "org.glite.ce.cream"; j.JobManagementURL = url; j.JobManagementInterfaceName = "org.glite.ce.cream"; j.StageInDir = jobInfo.ISB; j.StageOutDir = jobInfo.OSB; AddJobDetails(preparedjobdesc, j); j.IDFromEndpoint = jobInfo.id; jc.addEntity(j); } return retval; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/CREAM/PaxHeaders.7502/README0000644000000000000000000000012411745106052020762 xustar000000000000000027 mtime=1335135274.344052 27 atime=1513200574.436701 30 ctime=1513200660.246750504 nordugrid-arc-5.4.2/src/hed/acc/CREAM/README0000644000175000002070000000040011745106052021021 0ustar00mockbuildmock00000000000000Arc Client Component (ACC) plugins for supporting the gLite middleware through the CREAM interface. Implements the following specialized classes: o JobControllerPluginCREAM o SubmitterCREAM Additional support classes: o CREAMClient o OpenSSLFunctions nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/ldap0000644000000000000000000000013213214316024020110 xustar000000000000000030 mtime=1513200660.487753451 30 atime=1513200668.721854157 30 ctime=1513200660.487753451 nordugrid-arc-5.4.2/src/hed/acc/ldap/0000755000175000002070000000000013214316024020233 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/TargetInformationRetrieverPluginLDAPGLUE2.h0000644000000000000000000000012412045235201030240 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.336699 30 ctime=1513200660.484753414 nordugrid-arc-5.4.2/src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.h0000644000175000002070000000201012045235201030276 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_TARGETINFORMATIONRETRIEVERLDAPGLUE2_H__ #define __ARC_TARGETINFORMATIONRETRIEVERLDAPGLUE2_H__ #include namespace Arc { class Logger; class TargetInformationRetrieverPluginLDAPGLUE2 : public TargetInformationRetrieverPlugin { public: TargetInformationRetrieverPluginLDAPGLUE2(PluginArgument *parg): TargetInformationRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.ldapglue2"); }; ~TargetInformationRetrieverPluginLDAPGLUE2() {}; static Plugin* Instance(PluginArgument *arg) { return new TargetInformationRetrieverPluginLDAPGLUE2(arg); }; virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_TARGETINFORMATIONRETRIEVERLDAPGLUE2_H__ nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712071312350022223 xustar000000000000000027 mtime=1357223144.510838 30 atime=1513200594.749949452 30 ctime=1513200660.471753255 nordugrid-arc-5.4.2/src/hed/acc/ldap/Makefile.am0000644000175000002070000000233112071312350022264 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccldap.la libaccldap_la_SOURCES = Descriptors.cpp \ Extractor.h \ JobListRetrieverPluginLDAPNG.cpp JobListRetrieverPluginLDAPNG.h \ JobListRetrieverPluginLDAPGLUE2.cpp JobListRetrieverPluginLDAPGLUE2.h \ TargetInformationRetrieverPluginLDAPNG.cpp TargetInformationRetrieverPluginLDAPNG.h \ TargetInformationRetrieverPluginLDAPGLUE1.cpp TargetInformationRetrieverPluginLDAPGLUE1.h \ TargetInformationRetrieverPluginLDAPGLUE2.cpp TargetInformationRetrieverPluginLDAPGLUE2.h \ ServiceEndpointRetrieverPluginEGIIS.cpp ServiceEndpointRetrieverPluginEGIIS.h \ ServiceEndpointRetrieverPluginBDII.cpp ServiceEndpointRetrieverPluginBDII.h libaccldap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libaccldap_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccldap_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722022236 xustar000000000000000030 mtime=1513200594.817950284 30 atime=1513200648.945612286 30 ctime=1513200660.472753268 nordugrid-arc-5.4.2/src/hed/acc/ldap/Makefile.in0000644000175000002070000011113413214315722022305 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/ldap DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccldap_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libaccldap_la_OBJECTS = libaccldap_la-Descriptors.lo \ libaccldap_la-JobListRetrieverPluginLDAPNG.lo \ libaccldap_la-JobListRetrieverPluginLDAPGLUE2.lo \ libaccldap_la-TargetInformationRetrieverPluginLDAPNG.lo \ libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.lo \ libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.lo \ libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.lo \ libaccldap_la-ServiceEndpointRetrieverPluginBDII.lo libaccldap_la_OBJECTS = $(am_libaccldap_la_OBJECTS) libaccldap_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libaccldap_la_CXXFLAGS) \ $(CXXFLAGS) $(libaccldap_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccldap_la_SOURCES) DIST_SOURCES = $(libaccldap_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccldap.la libaccldap_la_SOURCES = Descriptors.cpp \ Extractor.h \ JobListRetrieverPluginLDAPNG.cpp JobListRetrieverPluginLDAPNG.h \ JobListRetrieverPluginLDAPGLUE2.cpp JobListRetrieverPluginLDAPGLUE2.h \ TargetInformationRetrieverPluginLDAPNG.cpp TargetInformationRetrieverPluginLDAPNG.h \ TargetInformationRetrieverPluginLDAPGLUE1.cpp TargetInformationRetrieverPluginLDAPGLUE1.h \ TargetInformationRetrieverPluginLDAPGLUE2.cpp TargetInformationRetrieverPluginLDAPGLUE2.h \ ServiceEndpointRetrieverPluginEGIIS.cpp ServiceEndpointRetrieverPluginEGIIS.h \ ServiceEndpointRetrieverPluginBDII.cpp ServiceEndpointRetrieverPluginBDII.h libaccldap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libaccldap_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccldap_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/ldap/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/ldap/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccldap.la: $(libaccldap_la_OBJECTS) $(libaccldap_la_DEPENDENCIES) $(libaccldap_la_LINK) -rpath $(pkglibdir) $(libaccldap_la_OBJECTS) $(libaccldap_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccldap_la-Descriptors.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccldap_la-JobListRetrieverPluginLDAPGLUE2.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccldap_la-JobListRetrieverPluginLDAPNG.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccldap_la-ServiceEndpointRetrieverPluginBDII.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPNG.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccldap_la-Descriptors.lo: Descriptors.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -MT libaccldap_la-Descriptors.lo -MD -MP -MF $(DEPDIR)/libaccldap_la-Descriptors.Tpo -c -o libaccldap_la-Descriptors.lo `test -f 'Descriptors.cpp' || echo '$(srcdir)/'`Descriptors.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccldap_la-Descriptors.Tpo $(DEPDIR)/libaccldap_la-Descriptors.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Descriptors.cpp' object='libaccldap_la-Descriptors.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccldap_la-Descriptors.lo `test -f 'Descriptors.cpp' || echo '$(srcdir)/'`Descriptors.cpp libaccldap_la-JobListRetrieverPluginLDAPNG.lo: JobListRetrieverPluginLDAPNG.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -MT libaccldap_la-JobListRetrieverPluginLDAPNG.lo -MD -MP -MF $(DEPDIR)/libaccldap_la-JobListRetrieverPluginLDAPNG.Tpo -c -o libaccldap_la-JobListRetrieverPluginLDAPNG.lo `test -f 'JobListRetrieverPluginLDAPNG.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginLDAPNG.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccldap_la-JobListRetrieverPluginLDAPNG.Tpo $(DEPDIR)/libaccldap_la-JobListRetrieverPluginLDAPNG.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverPluginLDAPNG.cpp' object='libaccldap_la-JobListRetrieverPluginLDAPNG.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccldap_la-JobListRetrieverPluginLDAPNG.lo `test -f 'JobListRetrieverPluginLDAPNG.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginLDAPNG.cpp libaccldap_la-JobListRetrieverPluginLDAPGLUE2.lo: JobListRetrieverPluginLDAPGLUE2.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -MT libaccldap_la-JobListRetrieverPluginLDAPGLUE2.lo -MD -MP -MF $(DEPDIR)/libaccldap_la-JobListRetrieverPluginLDAPGLUE2.Tpo -c -o libaccldap_la-JobListRetrieverPluginLDAPGLUE2.lo `test -f 'JobListRetrieverPluginLDAPGLUE2.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginLDAPGLUE2.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccldap_la-JobListRetrieverPluginLDAPGLUE2.Tpo $(DEPDIR)/libaccldap_la-JobListRetrieverPluginLDAPGLUE2.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='JobListRetrieverPluginLDAPGLUE2.cpp' object='libaccldap_la-JobListRetrieverPluginLDAPGLUE2.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccldap_la-JobListRetrieverPluginLDAPGLUE2.lo `test -f 'JobListRetrieverPluginLDAPGLUE2.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginLDAPGLUE2.cpp libaccldap_la-TargetInformationRetrieverPluginLDAPNG.lo: TargetInformationRetrieverPluginLDAPNG.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -MT libaccldap_la-TargetInformationRetrieverPluginLDAPNG.lo -MD -MP -MF $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPNG.Tpo -c -o libaccldap_la-TargetInformationRetrieverPluginLDAPNG.lo `test -f 'TargetInformationRetrieverPluginLDAPNG.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginLDAPNG.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPNG.Tpo $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPNG.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverPluginLDAPNG.cpp' object='libaccldap_la-TargetInformationRetrieverPluginLDAPNG.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccldap_la-TargetInformationRetrieverPluginLDAPNG.lo `test -f 'TargetInformationRetrieverPluginLDAPNG.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginLDAPNG.cpp libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.lo: TargetInformationRetrieverPluginLDAPGLUE1.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -MT libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.lo -MD -MP -MF $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.Tpo -c -o libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.lo `test -f 'TargetInformationRetrieverPluginLDAPGLUE1.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginLDAPGLUE1.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.Tpo $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverPluginLDAPGLUE1.cpp' object='libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE1.lo `test -f 'TargetInformationRetrieverPluginLDAPGLUE1.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginLDAPGLUE1.cpp libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.lo: TargetInformationRetrieverPluginLDAPGLUE2.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -MT libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.lo -MD -MP -MF $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.Tpo -c -o libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.lo `test -f 'TargetInformationRetrieverPluginLDAPGLUE2.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginLDAPGLUE2.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.Tpo $(DEPDIR)/libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='TargetInformationRetrieverPluginLDAPGLUE2.cpp' object='libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccldap_la-TargetInformationRetrieverPluginLDAPGLUE2.lo `test -f 'TargetInformationRetrieverPluginLDAPGLUE2.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginLDAPGLUE2.cpp libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.lo: ServiceEndpointRetrieverPluginEGIIS.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -MT libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.lo -MD -MP -MF $(DEPDIR)/libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.Tpo -c -o libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.lo `test -f 'ServiceEndpointRetrieverPluginEGIIS.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginEGIIS.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.Tpo $(DEPDIR)/libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ServiceEndpointRetrieverPluginEGIIS.cpp' object='libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccldap_la-ServiceEndpointRetrieverPluginEGIIS.lo `test -f 'ServiceEndpointRetrieverPluginEGIIS.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginEGIIS.cpp libaccldap_la-ServiceEndpointRetrieverPluginBDII.lo: ServiceEndpointRetrieverPluginBDII.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -MT libaccldap_la-ServiceEndpointRetrieverPluginBDII.lo -MD -MP -MF $(DEPDIR)/libaccldap_la-ServiceEndpointRetrieverPluginBDII.Tpo -c -o libaccldap_la-ServiceEndpointRetrieverPluginBDII.lo `test -f 'ServiceEndpointRetrieverPluginBDII.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginBDII.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccldap_la-ServiceEndpointRetrieverPluginBDII.Tpo $(DEPDIR)/libaccldap_la-ServiceEndpointRetrieverPluginBDII.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ServiceEndpointRetrieverPluginBDII.cpp' object='libaccldap_la-ServiceEndpointRetrieverPluginBDII.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccldap_la-ServiceEndpointRetrieverPluginBDII.lo `test -f 'ServiceEndpointRetrieverPluginBDII.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginBDII.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/JobListRetrieverPluginLDAPNG.cpp0000644000000000000000000000012412300714551026177 xustar000000000000000027 mtime=1392744809.100789 27 atime=1513200574.331699 30 ctime=1513200660.475753304 nordugrid-arc-5.4.2/src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.cpp0000644000175000002070000001170512300714551026250 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "JobListRetrieverPluginLDAPNG.h" namespace Arc { // Characters to be escaped in LDAP filter according to RFC4515 static const std::string filter_esc("&|=!><~*/()"); Logger JobListRetrieverPluginLDAPNG::logger(Logger::getRootLogger(), "JobListRetrieverPlugin.LDAPNG"); bool JobListRetrieverPluginLDAPNG::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos && lower(endpoint.URLString.substr(0, pos)) != "ldap"; } EndpointQueryingStatus JobListRetrieverPluginLDAPNG::Query(const UserConfig& uc, const Endpoint& endpoint, std::list& jobs, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); if (isEndpointNotSupported(endpoint)) { return s; } URL url((endpoint.URLString.find("://") == std::string::npos ? "ldap://" : "") + endpoint.URLString, false, 2135, "/Mds-Vo-name=local,o=Grid"); if (!url) { return s; } //Create credential object in order to get the user DN const std::string *certpath, *keypath; if (uc.ProxyPath().empty()) { certpath = &uc.CertificatePath(); keypath = &uc.KeyPath(); } else { certpath = &uc.ProxyPath(); keypath = &uc.ProxyPath(); } std::string emptycadir; std::string emptycafile; Credential credential(*certpath, *keypath, emptycadir, emptycafile); std::string escaped_dn = escape_chars(credential.GetIdentityName(), filter_esc, '\\', false, escape_hex); //Query GRIS for all relevant information url.ChangeLDAPScope(URL::subtree); // Applying filter. Must be done through EndpointQueryOptions. url.ChangeLDAPFilter("(|(nordugrid-job-globalowner=" + escaped_dn + ")(objectClass=nordugrid-cluster))"); DataBuffer buffer; DataHandle handler(url, uc); if (!handler) { logger.msg(INFO, "Can't create information handle - is the ARC ldap DMC plugin available?"); return s; } if (!handler->StartReading(buffer)) { return s; } int handle; unsigned int length; unsigned long long int offset; std::string result; while (buffer.for_write() || !buffer.eof_read()) if (buffer.for_write(handle, length, offset, true)) { result.append(buffer[handle], length); buffer.is_written(handle); } if (!handler->StopReading()) { return s; } XMLNode xmlresult(result); XMLNodeList xContactStrings = xmlresult.XPathLookup("//nordugrid-cluster-contactstring", NS()); if (xContactStrings.empty()) { return s; } std::string ContactString = (std::string)xContactStrings.front(); XMLNodeList xJobs = xmlresult.XPathLookup("//nordugrid-job-globalid[objectClass='nordugrid-job']", NS()); for (XMLNodeList::iterator it = xJobs.begin(); it != xJobs.end(); ++it) { Job j; if ((*it)["nordugrid-job-comment"]) { std::string comment = (std::string)(*it)["nordugrid-job-comment"]; std::string submittedvia = "SubmittedVia="; if (comment.compare(0, submittedvia.length(), submittedvia) == 0) { std::string interfacename = comment.substr(submittedvia.length()); if (interfacename != "org.nordugrid.gridftpjob") { logger.msg(DEBUG, "Skipping retrieved job (%s) because it was submitted via another interface (%s).", (std::string)(*it)["nordugrid-job-globalid"], interfacename); continue; } } } if ((*it)["nordugrid-job-jobname"]) j.Name = (std::string)(*it)["nordugrid-job-jobname"]; if ((*it)["nordugrid-job-submissiontime"]) j.LocalSubmissionTime = (std::string)(*it)["nordugrid-job-submissiontime"]; URL infoEndpoint(url); infoEndpoint.ChangeLDAPFilter("(nordugrid-job-globalid=" + escape_chars((std::string)(*it)["nordugrid-job-globalid"],filter_esc,'\\',false,escape_hex) + ")"); infoEndpoint.ChangeLDAPScope(URL::subtree); // Proposed mandatory attributes for ARC 3.0 j.JobID = (std::string)(*it)["nordugrid-job-globalid"]; j.ServiceInformationURL = url; j.ServiceInformationURL.ChangeLDAPFilter(""); j.ServiceInformationInterfaceName = "org.nordugrid.ldapng"; j.JobStatusURL = infoEndpoint; j.JobStatusInterfaceName = "org.nordugrid.ldapng"; j.JobManagementURL = URL(ContactString); j.JobManagementInterfaceName = "org.nordugrid.gridftpjob"; j.IDFromEndpoint = j.JobID.substr(ContactString.length()+1); j.StageInDir = URL(j.JobID); j.StageOutDir = URL(j.JobID); j.SessionDir = URL(j.JobID); jobs.push_back(j); } s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/TargetInformationRetrieverPluginLDAPNG.cpp0000644000000000000000000000012412356757245030307 xustar000000000000000027 mtime=1404821157.416181 27 atime=1513200574.326699 30 ctime=1513200660.479753353 nordugrid-arc-5.4.2/src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.cpp0000644000175000002070000005040112356757245030354 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "TargetInformationRetrieverPluginLDAPNG.h" namespace Arc { // Characters to be escaped in LDAP filter according to RFC4515 static const std::string filter_esc("&|=!><~*/()"); Logger TargetInformationRetrieverPluginLDAPNG::logger(Logger::getRootLogger(), "TargetInformationRetrieverPlugin.LDAPNG"); bool TargetInformationRetrieverPluginLDAPNG::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos && lower(endpoint.URLString.substr(0, pos)) != "ldap"; } EndpointQueryingStatus TargetInformationRetrieverPluginLDAPNG::Query(const UserConfig& uc, const Endpoint& cie, std::list& csList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); if (isEndpointNotSupported(cie)) { return s; } //Query ARIS for all relevant information URL url((cie.URLString.find("://") == std::string::npos ? "ldap://" : "") + cie.URLString, false, 2135, "/Mds-Vo-name=local,o=Grid"); url.ChangeLDAPScope(URL::subtree); if (!url) { return s; } //Create credential object in order to get the user DN std::string emptycadir, emptycafile; Credential credential(!uc.ProxyPath().empty() ? uc.ProxyPath() : uc.CertificatePath(), "", emptycadir, emptycafile); std::string escaped_dn = escape_chars(credential.GetIdentityName(), filter_esc, '\\', false, escape_hex); url.ChangeLDAPFilter("(|(objectclass=nordugrid-cluster)" "(objectclass=nordugrid-queue)" "(nordugrid-authuser-sn=" + escaped_dn + "))"); DataBuffer buffer; DataHandle handler(url, uc); if (!handler) { logger.msg(INFO, "Can't create information handle - " "is the ARC ldap DMC plugin available?"); return s; } if (!handler->StartReading(buffer)) { return s; } int handle; unsigned int length; unsigned long long int offset; std::string result; while (buffer.for_write() || !buffer.eof_read()) if (buffer.for_write(handle, length, offset, true)) { result.append(buffer[handle], length); buffer.is_written(handle); } if (!handler->StopReading()) { return s; } XMLNode xmlresult(result); XMLNodeList cluster = xmlresult.XPathLookup("//nordugrid-cluster-name[objectClass='nordugrid-cluster']", NS()); for (XMLNodeList::iterator it = cluster.begin(); it != cluster.end(); ++it) { ComputingServiceType cs; AdminDomainType& AdminDomain = cs.AdminDomain; LocationType& Location = cs.Location; cs->InformationOriginEndpoint = cie; // Computing Service attributes if ((*it)["nordugrid-cluster-name"]) { cs->Name = (std::string)(*it)["nordugrid-cluster-name"]; } if ((*it)["nordugrid-cluster-comment"]) { std::string comment = (std::string)(*it)["nordugrid-cluster-comment"]; std::string prefix = "GLUE2ServiceID="; std::string::size_type pos = comment.find(prefix); if (pos != std::string::npos) { // assuming that if there is any space, everything after that is not part of the ID anymore std::string::size_type spacePos = comment.find(" ", pos); if (spacePos != std::string::npos) spacePos -= pos + prefix.size(); cs->ID = comment.substr(pos + prefix.size(), spacePos); } } cs->Type = "org.nordugrid.arc-classic"; // Location attributes if ((*it)["nordugrid-cluster-location"]) { Location->PostCode = (std::string)(*it)["nordugrid-cluster-location"]; } // Admin Domain attributes if ((*it)["nordugrid-cluster-aliasname"]) { AdminDomain->Name = (std::string)(*it)["nordugrid-cluster-aliasname"]; } if ((*it)["nordugrid-cluster-owner"]) { AdminDomain->Owner = (std::string)(*it)["nordugrid-cluster-owner"]; } ComputingEndpointType ComputingEndpoint; ComputingEndpoint->Capability.insert("executionmanagement.jobexecution"); ComputingEndpoint->Capability.insert("executionmanagement.jobmanager"); ComputingEndpoint->Technology = "gridftp"; ComputingEndpoint->InterfaceName = "org.nordugrid.gridftpjob"; // Computing Endpoint attributes if ((*it)["nordugrid-cluster-contactstring"]) { ComputingEndpoint->URLString = (std::string)(*it)["nordugrid-cluster-contactstring"]; } if ((*it)["nordugrid-cluster-middleware"]) { ComputingEndpoint->Implementation = (std::string)(*it)["nordugrid-cluster-middleware"]; } if ((*it)["nordugrid-cluster-issuerca"]) { ComputingEndpoint->IssuerCA = (std::string)(*it)["nordugrid-cluster-issuerca"]; } if ((*it)["nordugrid-cluster-trustedca"]) { for (XMLNode n = (*it)["nordugrid-cluster-trustedca"]; n; ++n) { ComputingEndpoint->TrustedCA.push_back((std::string)n); } } ComputingEndpoint->Staging = "staginginout"; ComputingEndpoint->JobDescriptions.push_back("nordugrid:xrsl"); ComputingEndpoint->JobDescriptions.push_back("ogf:jsdl:1.0"); cs.ComputingEndpoint.insert(std::pair(0, ComputingEndpoint)); ComputingEndpointType InformationEndpoint; URL InformationEndpointURL = url; InformationEndpointURL.ChangeLDAPFilter(""); InformationEndpointURL.ChangeLDAPScope(URL::base); InformationEndpoint->URLString = InformationEndpointURL.fullstr(); InformationEndpoint->InterfaceName = cie.InterfaceName; InformationEndpoint->HealthState = cie.HealthState; InformationEndpoint->HealthStateInfo = cie.HealthStateInfo; InformationEndpoint->QualityLevel = cie.QualityLevel; InformationEndpoint->Capability = cie.Capability; cs.ComputingEndpoint.insert(std::pair(1, InformationEndpoint)); int shareID = 0; for (XMLNode queue = (*it)["nordugrid-queue-name"]; (bool)queue; ++queue) { ComputingShareType ComputingShare; // Only the "best" nordugrid-queue-status is mapped to ComputingEndpoint.HealthState{,Info} if (queue["nordugrid-queue-status"] && ComputingEndpoint->HealthState != "ok") { if (((std::string)queue["nordugrid-queue-status"]).substr(0, 6) == "active") { ComputingEndpoint->HealthState = "ok"; ComputingEndpoint->HealthStateInfo = (std::string)queue["nordugrid-queue-status"]; } else if (((std::string)queue["nordugrid-queue-status"]).substr(0, 8) == "inactive") { if (ComputingEndpoint->HealthState != "critical") { ComputingEndpoint->HealthState = "critical"; ComputingEndpoint->HealthStateInfo = (std::string)queue["nordugrid-queue-status"]; } } else { ComputingEndpoint->HealthState = "other"; ComputingEndpoint->HealthStateInfo = (std::string)queue["nordugrid-queue-status"]; } } XMLNode authuser = queue["nordugrid-info-group-name"]["nordugrid-authuser-name"]; if (queue["nordugrid-queue-name"]) { ComputingShare->Name = (std::string)queue["nordugrid-queue-name"]; } if (queue["nordugrid-queue-maxwalltime"]) { ComputingShare->MaxWallTime = Period((std::string)queue["nordugrid-queue-maxwalltime"], PeriodMinutes); } if (queue["nordugrid-queue-minwalltime"]) { ComputingShare->MinWallTime = Period((std::string)queue["nordugrid-queue-minwalltime"], PeriodMinutes); } if (queue["nordugrid-queue-defaultwalltime"]) { ComputingShare->DefaultWallTime = Period((std::string)queue["nordugrid-queue-defaultwalltime"], PeriodMinutes); } if (queue["nordugrid-queue-maxtotalcputime"]) { ComputingShare->MaxTotalCPUTime = Period((std::string)queue["nordugrid-queue-maxtotalcputime"], PeriodMinutes); } if (queue["nordugrid-queue-maxcputime"]) { ComputingShare->MaxCPUTime = Period((std::string)queue["nordugrid-queue-maxcputime"], PeriodMinutes); } if (queue["nordugrid-queue-mincputime"]) { ComputingShare->MinCPUTime = Period((std::string)queue["nordugrid-queue-mincputime"], PeriodMinutes); } if (queue["nordugrid-queue-defaultcputime"]) { ComputingShare->DefaultCPUTime = Period((std::string)queue["nordugrid-queue-defaultcputime"], PeriodMinutes); } EntryToInt(url, queue["nordugrid-queue-maxrunning"], ComputingShare->MaxRunningJobs); EntryToInt(url, queue["nordugrid-queue-maxqueable"], ComputingShare->MaxWaitingJobs); EntryToInt(url, queue["nordugrid-queue-maxuserrun"], ComputingShare->MaxUserRunningJobs); if (queue["nordugrid-queue-schedulingpolicy"]) { ComputingShare->SchedulingPolicy = (std::string)queue["nordugrid-queue-schedulingpolicy"]; } EntryToInt(url, queue["nordugrid-queue-nodememory"], ComputingShare->MaxVirtualMemory) || EntryToInt(url, (*it)["nordugrid-cluster-nodememory"], ComputingShare->MaxVirtualMemory); EntryToInt(url, queue["nordugrid-queue-nodememory"], ComputingShare->MaxMainMemory) || EntryToInt(url, (*it)["nordugrid-cluster-nodememory"], ComputingShare->MaxMainMemory); EntryToInt(url, authuser["nordugrid-authuser-diskspace"], ComputingShare->MaxDiskSpace); if ((*it)["nordugrid-cluster-localse"]) { ComputingShare->DefaultStorageService = (std::string)(*it)["nordugrid-cluster-localse"]; } EntryToInt(url, queue["nordugrid-queue-running"], ComputingShare->RunningJobs); if (queue["nordugrid-queue-running"] && queue["nordugrid-queue-gridrunning"]) { int run = 0, gridrun = 0; stringto((std::string)queue["nordugrid-queue-running"], run); stringto((std::string)queue["nordugrid-queue-gridrunning"], gridrun); ComputingShare->LocalRunningJobs = run - gridrun; } if (queue["nordugrid-queue-gridqueued"] && queue["nordugrid-queue-localqueued"]) { int gridqueued = 0, localqueued = 0; stringto((std::string)queue["nordugrid-queue-gridqueued"], gridqueued); stringto((std::string)queue["nordugrid-queue-localqueued"], localqueued); ComputingShare->WaitingJobs = gridqueued + localqueued; } EntryToInt(url, queue["nordugrid-queue-localqueued"], ComputingShare->LocalWaitingJobs); EntryToInt(url, queue["nordugrid-queue-prelrmsqueued"], ComputingShare->PreLRMSWaitingJobs); ComputingShare->TotalJobs = ((ComputingShare->RunningJobs > 0) ? ComputingShare->RunningJobs : 0) + ((ComputingShare->WaitingJobs > 0) ? ComputingShare->WaitingJobs : 0) + ((ComputingShare->PreLRMSWaitingJobs > 0) ? ComputingShare->PreLRMSWaitingJobs : 0); if (authuser["nordugrid-authuser-freecpus"]) { std::string value = (std::string)authuser["nordugrid-authuser-freecpus"]; std::string::size_type pos = 0; do { std::string::size_type spacepos = value.find(' ', pos); std::string entry; if (spacepos == std::string::npos) entry = value.substr(pos); else entry = value.substr(pos, spacepos - pos); int num_cpus = 0; Period time; std::string::size_type colonpos = entry.find(':'); if (colonpos == std::string::npos) { stringto(entry, num_cpus); time = LONG_MAX; } else { stringto(entry.substr(0, colonpos), num_cpus); int t = 0; if (stringto(entry.substr(colonpos + 1), t)) { time = t*60; } } ComputingShare->FreeSlotsWithDuration[time] = num_cpus; pos = spacepos; if (pos != std::string::npos) pos++; } while (pos != std::string::npos); ComputingShare->FreeSlots = ComputingShare->FreeSlotsWithDuration.begin()->second; } EntryToInt(url, (*it)["nordugrid-cluser-usedcpus"], ComputingShare->UsedSlots); if (queue["nordugrid-queue-schedulingpolicy"]) { ComputingShare->ReservationPolicy = (std::string)queue["nordugrid-queue-schedulingpolicy"]; } cs.ComputingShare.insert(std::pair(shareID++, ComputingShare)); } // Mapping: One ComputingManager per nordugrid-cluster-name. ComputingManagerType ComputingManager; // Computing Manager attributes if ((*it)["nordugrid-cluster-lrms-type"]) { ComputingManager->ProductName = (std::string)(*it)["nordugrid-cluster-lrms-type"]; } if ((*it)["nordugrid-cluster-lrms-version"]) { ComputingManager->ProductVersion = (std::string)(*it)["nordugrid-cluster-lrms-version"]; } // TODO: The nordugrid-queue-totalcpus might need to be mapped to ComputingManager CPUs, i.e. multiple to single mapping. /* if (queue["nordugrid-queue-totalcpus"]) { ComputingManager->TotalPhysicalCPUs = ComputingManager->TotalLogicalCPUs = ComputingManager->TotalSlots = stringtoi((std::string)queue["nordugrid-queue-totalcpus"]); } else */ if (EntryToInt(url, (*it)["nordugrid-cluster-totalcpus"], ComputingManager->TotalPhysicalCPUs)) { ComputingManager->TotalLogicalCPUs = ComputingManager->TotalSlots = ComputingManager->TotalPhysicalCPUs; } // TODO: nordugrid-queue-homogenity might need to mapped to ComputingManager->Homogeneous, i.e. multiple to single mapping. /* if (queue["nordugrid-queue-homogeneity"]) { ComputingManager->Homogeneous = ((std::string)queue["nordugrid-queue-homogeneity"] != "false"); } else */ if ((*it)["nordugrid-cluster-homogeneity"]) { ComputingManager->Homogeneous = ((std::string)(*it)["nordugrid-cluster-homogeneity"] != "false"); } if (EntryToInt(url, (*it)["nordugrid-cluster-sessiondir-total"], ComputingManager->WorkingAreaTotal)) { ComputingManager->WorkingAreaTotal /= 1000; } if (EntryToInt(url, (*it)["nordugrid-cluster-sessiondir-free"], ComputingManager->WorkingAreaFree)) { ComputingManager->WorkingAreaFree /= 1000; } if ((*it)["nordugrid-cluster-sessiondir-lifetime"]) { ComputingManager->WorkingAreaLifeTime = Period((std::string)(*it)["nordugrid-cluster-sessiondir-lifetime"], PeriodMinutes); } if (EntryToInt(url, (*it)["nordugrid-cluster-cache-total"], ComputingManager->CacheTotal)) { ComputingManager->CacheTotal /= 1000; } if (EntryToInt(url, (*it)["nordugrid-cluster-cache-free"], ComputingManager->CacheFree)) { ComputingManager->CacheFree /= 1000; } // Benchmarks // TODO: nordugrid-queue-benchmark might need to mapped to ComputingManager.Benchmark, i.e. multiple to single mapping. /* if (queue["nordugrid-queue-benchmark"]) { for (XMLNode n = queue["nordugrid-queue-benchmark"]; n; ++n) { std::string benchmark = (std::string)n; std::string::size_type alpha = benchmark.find_first_of("@"); std::string benchmarkname = benchmark.substr(0, alpha); double performance = stringtod(benchmark.substr(alpha + 1)); (*ComputingManager.Benchmarks)[benchmarkname] = performance; } } else */ if ((*it)["nordugrid-cluster-benchmark"]) { for (XMLNode n = (*it)["nordugrid-cluster-benchmark"]; n; ++n) { std::string benchmark = (std::string)n; std::string::size_type alpha = benchmark.find_first_of("@"); std::string benchmarkname = trim(benchmark.substr(0, alpha)); double performance = 0; stringto(trim(benchmark.substr(alpha + 1)), performance); (*ComputingManager.Benchmarks)[benchmarkname] = performance; } } // TODO: One ExecutionEnvironment per nordugrid-queue-name. Implement support for ExecutionEnvironment <-> ComputingShare associations in ComputingServiceType::GetExecutionTargets method, and in ComputingShareType. int eeID = 0; ExecutionEnvironmentType ExecutionEnvironment; // TODO: Map to multiple ExecutionEnvironment objects. /*if (queue["nordugrid-queue-architecture"]) { ExecutionEnvironment->Platform = (std::string)queue["nordugrid-queue-architecture"]; } else */ if ((*it)["nordugrid-cluster-architecture"]) { ExecutionEnvironment->Platform = (std::string)(*it)["nordugrid-cluster-architecture"]; } // TODO: Map to multiple ExecutionEnvironment objects. /* if (queue["nordugrid-queue-nodecpu"]) { ExecutionEnvironment->CPUVendor = (std::string)queue["nordugrid-queue-nodecpu"]; ExecutionEnvironment->CPUModel = (std::string)queue["nordugrid-queue-nodecpu"]; ExecutionEnvironment->CPUVersion = (std::string)queue["nordugrid-queue-nodecpu"]; // CPUClockSpeed = // (std::string)queue["nordugrid-queue-nodecpu"]; } else */ if ((*it)["nordugrid-cluster-nodecpu"]) { ExecutionEnvironment->CPUVendor = (std::string)(*it)["nordugrid-cluster-nodecpu"]; ExecutionEnvironment->CPUModel = (std::string)(*it)["nordugrid-cluster-nodecpu"]; ExecutionEnvironment->CPUVersion = (std::string)(*it)["nordugrid-cluster-nodecpu"]; // ExecutionEnvironment.CPUClockSpeed = // (std::string)queue["nordugrid-cluster-nodecpu"]; } // TODO: Map to multiple ExecutionEnvironment objects. /* if (queue["nordugrid-queue-nodememory"]) { ExecutionEnvironment->MainMemorySize = stringtoi((std::string)queue["nordugrid-queue-nodememory"]); } else */ EntryToInt(url, (*it)["nordugrid-cluster-nodememory"], ExecutionEnvironment->MainMemorySize); // TODO: Map to multiple ExecutionEnvironment objects. /* if (queue["nordugrid-queue-opsys"]) { ExecutionEnvironment->OperatingSystem = Software((std::string)queue["nordugrid-queue-opsys"][0], (std::string)queue["nordugrid-queue-opsys"][1]); } else */ if ((*it)["nordugrid-cluster-opsys"]) { ExecutionEnvironment->OperatingSystem = Software((std::string)(*it)["nordugrid-cluster-opsys"][0], (std::string)(*it)["nordugrid-cluster-opsys"][1]); } if ((*it)["nordugrid-cluster-nodeaccess"]) { for (XMLNode n = (*it)["nordugrid-cluster-nodeaccess"]; n; ++n) { if ((std::string)n == "inbound") { ExecutionEnvironment->ConnectivityIn = true; } else if ((std::string)n == "outbound") { ExecutionEnvironment->ConnectivityOut = true; } } } ComputingManager.ExecutionEnvironment.insert(std::pair(eeID, ExecutionEnvironment)); // Application Environments for (XMLNode n = (*it)["nordugrid-cluster-runtimeenvironment"]; n; ++n) { ApplicationEnvironment rte((std::string)n); rte.State = "UNDEFINEDVALUE"; rte.FreeSlots = -1; rte.FreeUserSeats = -1; rte.FreeJobs = -1; ComputingManager.ApplicationEnvironments->push_back(rte); } cs.ComputingManager.insert(std::pair(0, ComputingManager)); csList.push_back(cs); } if (!csList.empty()) s = EndpointQueryingStatus::SUCCESSFUL; return s; } bool TargetInformationRetrieverPluginLDAPNG::EntryToInt(const URL& url, XMLNode entry, int& i) { if (entry && !stringto((std::string)entry, i)) { logger.msg(INFO, "Unable to parse the %s.%s value from execution service (%s).", entry.Parent().Name(), entry.Name(), url.fullstr()); logger.msg(DEBUG, "Value of %s.%s is \"%s\"", entry.Parent().Name(), entry.Name(), (std::string)entry); return false; } return (bool)entry; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/JobListRetrieverPluginLDAPNG.h0000644000000000000000000000012412045235201025640 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.335699 30 ctime=1513200660.476753317 nordugrid-arc-5.4.2/src/hed/acc/ldap/JobListRetrieverPluginLDAPNG.h0000644000175000002070000000164012045235201025706 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBLISTRETRIEVERPLUGINLDAPNG_H__ #define __ARC_JOBLISTRETRIEVERPLUGINLDAPNG_H__ #include #include namespace Arc { class Logger; class JobListRetrieverPluginLDAPNG : public JobListRetrieverPlugin { public: JobListRetrieverPluginLDAPNG(PluginArgument* parg): JobListRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.ldapng"); } virtual ~JobListRetrieverPluginLDAPNG() {} static Plugin* Instance(PluginArgument *arg) { return new JobListRetrieverPluginLDAPNG(arg); } virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBLISTRETRIEVERPLUGINLDAPNG_H__ nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/JobListRetrieverPluginLDAPGLUE2.h0000644000000000000000000000012412045235201026152 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.336699 30 ctime=1513200660.478753341 nordugrid-arc-5.4.2/src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.h0000644000175000002070000000167012045235201026223 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBLISTRETRIEVERPLUGINLDAPGLUE2_H__ #define __ARC_JOBLISTRETRIEVERPLUGINLDAPGLUE2_H__ #include #include namespace Arc { class Logger; class JobListRetrieverPluginLDAPGLUE2 : public JobListRetrieverPlugin { public: JobListRetrieverPluginLDAPGLUE2(PluginArgument* parg): JobListRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.ldapglue2"); } virtual ~JobListRetrieverPluginLDAPGLUE2() {} static Plugin* Instance(PluginArgument *arg) { return new JobListRetrieverPluginLDAPGLUE2(arg); } virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBLISTRETRIEVERPLUGINLDAPGLUE2_H__ nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/Descriptors.cpp0000644000000000000000000000012312675602216023205 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.330699 29 ctime=1513200660.47375328 nordugrid-arc-5.4.2/src/hed/acc/ldap/Descriptors.cpp0000644000175000002070000000310312675602216023250 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include "JobListRetrieverPluginLDAPNG.h" #include "JobListRetrieverPluginLDAPGLUE2.h" #include "TargetInformationRetrieverPluginLDAPGLUE1.h" #include "TargetInformationRetrieverPluginLDAPGLUE2.h" #include "TargetInformationRetrieverPluginLDAPNG.h" #include "ServiceEndpointRetrieverPluginEGIIS.h" #include "ServiceEndpointRetrieverPluginBDII.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "LDAPNG", "HED:JobListRetrieverPlugin", "Classic NorduGrid LDAP Job List", 0, &Arc::JobListRetrieverPluginLDAPNG::Instance }, { "LDAPGLUE2", "HED:JobListRetrieverPlugin", "GLUE2 LDAP Job List", 0, &Arc::JobListRetrieverPluginLDAPGLUE2::Instance }, { "LDAPGLUE1", "HED:TargetInformationRetrieverPlugin", "GLUE1.x LDAP Local Information", 0, &Arc::TargetInformationRetrieverPluginLDAPGLUE1::Instance }, { "LDAPGLUE2", "HED:TargetInformationRetrieverPlugin", "GLUE2 LDAP Local Information", 0, &Arc::TargetInformationRetrieverPluginLDAPGLUE2::Instance }, { "LDAPNG", "HED:TargetInformationRetrieverPlugin", "Classic NorduGrid LDAP Local Information", 0, &Arc::TargetInformationRetrieverPluginLDAPNG::Instance }, { "EGIIS", "HED:ServiceEndpointRetrieverPlugin", "Classic NorduGrid EGIIS Registry", 0, &Arc::ServiceEndpointRetrieverPluginEGIIS::Instance }, { "BDII", "HED:ServiceEndpointRetrieverPlugin", "BDII top and site", 0, &Arc::ServiceEndpointRetrieverPluginBDII::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/ServiceEndpointRetrieverPluginEGIIS.h0000644000000000000000000000012412045235201027266 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.328699 30 ctime=1513200660.485753427 nordugrid-arc-5.4.2/src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.h0000644000175000002070000000215712045235201027340 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SERVICEENDPOINTRETRIEVEREPLUGINGIIS_H__ #define __ARC_SERVICEENDPOINTRETRIEVEREPLUGINGIIS_H__ #include #include #include namespace Arc { class Logger; class ServiceEndpointRetrieverPluginEGIIS : public ServiceEndpointRetrieverPlugin { public: ServiceEndpointRetrieverPluginEGIIS(PluginArgument* parg): ServiceEndpointRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.ldapegiis"); } virtual ~ServiceEndpointRetrieverPluginEGIIS() {} static Plugin* Instance(PluginArgument* arg) { return new ServiceEndpointRetrieverPluginEGIIS(arg); } virtual EndpointQueryingStatus Query(const UserConfig& uc, const Endpoint& rEndpoint, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_SERVICEENDPOINTRETRIEVEREPLUGINGIIS_H__ nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/TargetInformationRetrieverPluginLDAPGLUE1.h0000644000000000000000000000012312045235201030236 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.333699 29 ctime=1513200660.48275339 nordugrid-arc-5.4.2/src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.h0000644000175000002070000000201012045235201030275 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_TARGETINFORMATIONRETRIEVERLDAPGLUE1_H__ #define __ARC_TARGETINFORMATIONRETRIEVERLDAPGLUE1_H__ #include namespace Arc { class Logger; class TargetInformationRetrieverPluginLDAPGLUE1 : public TargetInformationRetrieverPlugin { public: TargetInformationRetrieverPluginLDAPGLUE1(PluginArgument *parg): TargetInformationRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.ldapglue1"); }; ~TargetInformationRetrieverPluginLDAPGLUE1() {}; static Plugin* Instance(PluginArgument *arg) { return new TargetInformationRetrieverPluginLDAPGLUE1(arg); }; virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_TARGETINFORMATIONRETRIEVERLDAPGLUE1_H__ nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/TargetInformationRetrieverPluginLDAPGLUE2.cpp0000644000000000000000000000012412675602216030610 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200574.333699 30 ctime=1513200660.483753402 nordugrid-arc-5.4.2/src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE2.cpp0000644000175000002070000003713012675602216030661 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "Extractor.h" #include "TargetInformationRetrieverPluginLDAPGLUE2.h" namespace Arc { Logger TargetInformationRetrieverPluginLDAPGLUE2::logger(Logger::getRootLogger(), "TargetInformationRetrieverPlugin.LDAPGLUE2"); bool TargetInformationRetrieverPluginLDAPGLUE2::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos && lower(endpoint.URLString.substr(0, pos)) != "ldap"; } EndpointQueryingStatus TargetInformationRetrieverPluginLDAPGLUE2::Query(const UserConfig& uc, const Endpoint& ce, std::list& csList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); if (isEndpointNotSupported(ce)) { return s; } URL url((ce.URLString.find("://") == std::string::npos ? "ldap://" : "") + ce.URLString, false, 2135, "/o=glue"); url.ChangeLDAPScope(URL::subtree); url.ChangeLDAPFilter("(&(!(GLUE2GroupID=ComputingActivities))(!(ObjectClass=GLUE2ComputingActivity)))"); if (!url) { return s; } DataBuffer buffer; DataHandle handler(url, uc); if (!handler) { logger.msg(INFO, "Can't create information handle - " "is the ARC ldap DMC plugin available?"); return s; } if (!handler->StartReading(buffer)) { return s; } int handle; unsigned int length; unsigned long long int offset; std::string result; while (buffer.for_write() || !buffer.eof_read()) if (buffer.for_write(handle, length, offset, true)) { result.append(buffer[handle], length); buffer.is_written(handle); } if (!handler->StopReading()) { return s; } XMLNode xml_document(result); Extractor document(xml_document, "", "GLUE2", &logger); std::list services = Extractor::All(document, "ComputingService"); for (std::list::iterator it = services.begin(); it != services.end(); it++) { Extractor& service = *it; ComputingServiceType cs; AdminDomainType& AdminDomain = cs.AdminDomain; LocationType& Location = cs.Location; cs->InformationOriginEndpoint = ce; // GFD.147 GLUE2 5.3 Location Extractor location = Extractor::First(service, "Location"); location.set("Address", Location->Address); location.set("Place", Location->Place); location.set("Country", Location->Country); location.set("PostCode", Location->PostCode); location.set("Latitude", Location->Latitude); location.set("Longitude", Location->Longitude); // GFD.147 GLUE2 5.5.1 Admin Domain Extractor domain = Extractor::First(document, "AdminDomain"); domain.set("EntityName", AdminDomain->Name); domain.set("Owner", AdminDomain->Owner); // GFD.147 GLUE2 6.1 Computing Service service.set("EntityName", cs->Name); service.set("ServiceType", cs->Type); service.set("ServiceID", cs->ID); service.set("ServiceQualityLevel", cs->QualityLevel); service.set("ServiceCapability", cs->Capability); service.set("ComputingServiceTotalJobs", cs->TotalJobs); service.set("ComputingServiceRunningJobs", cs->RunningJobs); service.set("ComputingServiceWaitingJobs", cs->WaitingJobs); service.set("ComputingServiceStagingJobs", cs->StagingJobs); service.set("ComputingServiceSuspendedJobs", cs->SuspendedJobs); service.set("ComputingServicePreLRMSWaitingJobs", cs->PreLRMSWaitingJobs); // GFD.147 GLUE2 6.2 ComputingEndpoint std::list endpoints = Extractor::All(service, "ComputingEndpoint"); int endpointID = 0; for (std::list::iterator ite = endpoints.begin(); ite != endpoints.end(); ++ite) { Extractor& endpoint = *ite; endpoint.prefix = "Endpoint"; ComputingEndpointType ComputingEndpoint; endpoint.set("URL", ComputingEndpoint->URLString); endpoint.set("Capability", ComputingEndpoint->Capability); endpoint.set("Technology", ComputingEndpoint->Technology); endpoint.set("InterfaceName", ComputingEndpoint->InterfaceName); ComputingEndpoint->InterfaceName = lower(ComputingEndpoint->InterfaceName); endpoint.set("InterfaceVersion", ComputingEndpoint->InterfaceVersion); endpoint.set("InterfaceExtension", ComputingEndpoint->InterfaceExtension); endpoint.set("SupportedProfile", ComputingEndpoint->SupportedProfile); endpoint.set("Implementor", ComputingEndpoint->Implementor); ComputingEndpoint->Implementation = Software(endpoint["ImplementationName"], endpoint["ImplementationVersion"]); endpoint.set("QualityLevel", ComputingEndpoint->QualityLevel); endpoint.set("HealthState", ComputingEndpoint->HealthState); endpoint.set("HealthStateInfo", ComputingEndpoint->HealthStateInfo); endpoint.set("ServingState", ComputingEndpoint->ServingState); endpoint.set("IssuerCA", ComputingEndpoint->IssuerCA); endpoint.set("TrustedCA", ComputingEndpoint->TrustedCA); endpoint.set("DowntimeStarts", ComputingEndpoint->DowntimeStarts); endpoint.set("DowntimeEnds", ComputingEndpoint->DowntimeEnds); endpoint.set("Staging", ComputingEndpoint->Staging); endpoint.set("JobDescription", ComputingEndpoint->JobDescriptions); cs.ComputingEndpoint.insert(std::pair(endpointID++, ComputingEndpoint)); } // GFD.147 GLUE2 5.12.2 MappingPolicy std::map MappingPolicies; // Share.ID, MappingPolicy std::list policies = Extractor::All(service, "MappingPolicy"); for (std::list::iterator itp = policies.begin(); itp != policies.end(); ++itp) { Extractor& policy = *itp; policy.prefix = "Policy"; MappingPolicyType MappingPolicy; policy.set("ID", MappingPolicy->ID); policy.set("Scheme", MappingPolicy->Scheme); policy.set("Rule", MappingPolicy->Rule); policy.prefix = "MappingPolicy"; MappingPolicies[policy.get("ShareForeignKey")] = MappingPolicy; } // GFD.147 GLUE2 6.3 Computing Share std::list shares = Extractor::All(service, "ComputingShare"); int shareID = 0; for (std::list::iterator its = shares.begin(); its != shares.end(); ++its) { Extractor& share = *its; ComputingShareType ComputingShare; share.set("EntityName", ComputingShare->Name); share.set("MappingQueue", ComputingShare->MappingQueue); share.set("MaxWallTime", ComputingShare->MaxWallTime); share.set("MaxTotalWallTime", ComputingShare->MaxTotalWallTime); share.set("MinWallTime", ComputingShare->MinWallTime); share.set("DefaultWallTime", ComputingShare->DefaultWallTime); share.set("MaxCPUTime", ComputingShare->MaxCPUTime); share.set("MaxTotalCPUTime", ComputingShare->MaxTotalCPUTime); share.set("MinCPUTime", ComputingShare->MinCPUTime); share.set("DefaultCPUTime", ComputingShare->DefaultCPUTime); share.set("MaxTotalJobs", ComputingShare->MaxTotalJobs); share.set("MaxRunningJobs", ComputingShare->MaxRunningJobs); share.set("MaxWaitingJobs", ComputingShare->MaxWaitingJobs); share.set("MaxPreLRMSWaitingJobs", ComputingShare->MaxPreLRMSWaitingJobs); share.set("MaxUserRunningJobs", ComputingShare->MaxUserRunningJobs); share.set("MaxSlotsPerJob", ComputingShare->MaxSlotsPerJob); share.set("MaxStageInStreams", ComputingShare->MaxStageInStreams); share.set("MaxStageOutStreams", ComputingShare->MaxStageOutStreams); share.set("SchedulingPolicy", ComputingShare->SchedulingPolicy); share.set("MaxMainMemory", ComputingShare->MaxMainMemory); share.set("MaxVirtualMemory", ComputingShare->MaxVirtualMemory); share.set("MaxDiskSpace", ComputingShare->MaxDiskSpace); share.set("DefaultStorageService", ComputingShare->DefaultStorageService); share.set("Preemption", ComputingShare->Preemption); share.set("TotalJobs", ComputingShare->TotalJobs); share.set("RunningJobs", ComputingShare->RunningJobs); share.set("LocalRunningJobs", ComputingShare->LocalRunningJobs); share.set("WaitingJobs", ComputingShare->WaitingJobs); share.set("LocalWaitingJobs", ComputingShare->LocalWaitingJobs); share.set("SuspendedJobs", ComputingShare->SuspendedJobs); share.set("LocalSuspendedJobs", ComputingShare->LocalSuspendedJobs); share.set("StagingJobs", ComputingShare->StagingJobs); share.set("PreLRMSWaitingJobs", ComputingShare->PreLRMSWaitingJobs); share.set("EstimatedAverageWaitingTime", ComputingShare->EstimatedAverageWaitingTime); share.set("EstimatedWorstWaitingTime", ComputingShare->EstimatedWorstWaitingTime); share.set("FreeSlots", ComputingShare->FreeSlots); std::string fswdValue = share["FreeSlotsWithDuration"]; if (!fswdValue.empty()) { // Format: ns[:t] [ns[:t]]..., where ns is number of slots and t is the duration. ComputingShare->FreeSlotsWithDuration.clear(); std::list fswdList; tokenize(fswdValue, fswdList); for (std::list::iterator it = fswdList.begin(); it != fswdList.end(); it++) { std::list fswdPair; tokenize(*it, fswdPair, ":"); long duration = LONG_MAX; int freeSlots = 0; if ((fswdPair.size() > 2) || (fswdPair.size() >= 1 && !stringto(fswdPair.front(), freeSlots)) || (fswdPair.size() == 2 && !stringto(fswdPair.back(), duration))) { logger.msg(VERBOSE, "The \"FreeSlotsWithDuration\" attribute is wrongly formatted. Ignoring it."); logger.msg(DEBUG, "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")", fswdValue, *it); continue; } ComputingShare->FreeSlotsWithDuration[Period(duration)] = freeSlots; } } share.set("UsedSlots", ComputingShare->UsedSlots); share.set("RequestedSlots", ComputingShare->RequestedSlots); share.set("ReservationPolicy", ComputingShare->ReservationPolicy); share.prefix = "Share"; ComputingShare->ID = share.get("ID"); int policyID = 0; for (std::map::const_iterator itMP = MappingPolicies.begin(); itMP != MappingPolicies.end(); ++itMP) { if (itMP->first == ComputingShare->ID) { ComputingShare.MappingPolicy[policyID++] = itMP->second; } } cs.ComputingShare.insert(std::pair(shareID++, ComputingShare)); } // GFD.147 GLUE2 6.4 Computing Manager std::list managers = Extractor::All(service, "ComputingManager"); int managerID = 0; for (std::list::iterator itm = managers.begin(); itm != managers.end(); ++itm) { Extractor& manager = *itm; ComputingManagerType ComputingManager; manager.set("ManagerProductName", ComputingManager->ProductName); manager.set("ManagerProductVersion", ComputingManager->ProductVersion); manager.set("Reservation", ComputingManager->Reservation); manager.set("BulkSubmission", ComputingManager->BulkSubmission); manager.set("TotalPhysicalCPUs", ComputingManager->TotalPhysicalCPUs); manager.set("TotalLogicalCPUs", ComputingManager->TotalLogicalCPUs); manager.set("TotalSlots", ComputingManager->TotalSlots); manager.set("Homogeneous", ComputingManager->Homogeneous); manager.set("NetworkInfo", ComputingManager->NetworkInfo); manager.set("WorkingAreaShared", ComputingManager->WorkingAreaShared); manager.set("WorkingAreaTotal", ComputingManager->WorkingAreaTotal); manager.set("WorkingAreaFree", ComputingManager->WorkingAreaFree); manager.set("WorkingAreaLifeTime", ComputingManager->WorkingAreaLifeTime); manager.set("CacheTotal", ComputingManager->CacheTotal); manager.set("CacheFree", ComputingManager->CacheFree); // TODO: Only benchmarks belonging to this ComputingManager should be considered. // GFD.147 GLUE2 6.5 Benchmark std::list benchmarks = Extractor::All(service, "Benchmark"); for (std::list::iterator itb = benchmarks.begin(); itb != benchmarks.end(); ++itb) { Extractor& benchmark = *itb; std::string Type; benchmark.set("Type", Type); double Value = -1.0; benchmark.set("Value", Value); (*ComputingManager.Benchmarks)[Type] = Value; } // GFD.147 GLUE2 6.6 Execution Environment std::list execenvironments = Extractor::All(service, "ExecutionEnvironment"); int eeID = 0; for (std::list::iterator ite = execenvironments.begin(); ite != execenvironments.end(); ite++) { Extractor& environment = *ite; ExecutionEnvironmentType ExecutionEnvironment; environment.set("Platform", ExecutionEnvironment->Platform); environment.set("VirtualMachine", ExecutionEnvironment->VirtualMachine); environment.set("CPUVendor", ExecutionEnvironment->CPUVendor); environment.set("CPUModel", ExecutionEnvironment->CPUModel); environment.set("CPUVersion", ExecutionEnvironment->CPUVersion); environment.set("CPUClockSpeed", ExecutionEnvironment->CPUClockSpeed); environment.set("MainMemorySize", ExecutionEnvironment->MainMemorySize); std::string OSName = environment["OSName"]; std::string OSVersion = environment["OSVersion"]; std::string OSFamily = environment["OSFamily"]; if (!OSName.empty()) { if (!OSVersion.empty()) { if (!OSFamily.empty()) { ExecutionEnvironment->OperatingSystem = Software(OSFamily, OSName, OSVersion); } else { ExecutionEnvironment->OperatingSystem = Software(OSName, OSVersion); } } else { ExecutionEnvironment->OperatingSystem = Software(OSName); } } environment.set("ConnectivityIn", ExecutionEnvironment->ConnectivityIn); environment.set("ConnectivityOut", ExecutionEnvironment->ConnectivityOut); ComputingManager.ExecutionEnvironment.insert(std::pair(eeID++, ExecutionEnvironment)); } // GFD.147 GLUE2 6.7 Application Environment std::list appenvironments = Extractor::All(service, "ApplicationEnvironment"); ComputingManager.ApplicationEnvironments->clear(); for (std::list::iterator ita = appenvironments.begin(); ita != appenvironments.end(); ita++) { Extractor& application = *ita; ApplicationEnvironment ae(application["AppName"], application["AppVersion"]); ae.State = application["State"]; ComputingManager.ApplicationEnvironments->push_back(ae); } cs.ComputingManager.insert(std::pair(managerID++, ComputingManager)); } csList.push_back(cs); } if (!csList.empty()) s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/ServiceEndpointRetrieverPluginEGIIS.cpp0000644000000000000000000000012412300714551027625 xustar000000000000000027 mtime=1392744809.100789 27 atime=1513200574.328699 30 ctime=1513200660.484753414 nordugrid-arc-5.4.2/src/hed/acc/ldap/ServiceEndpointRetrieverPluginEGIIS.cpp0000644000175000002070000001073512300714551027700 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "ServiceEndpointRetrieverPluginEGIIS.h" namespace Arc { Logger ServiceEndpointRetrieverPluginEGIIS::logger(Logger::getRootLogger(), "ServiceEndpointRetrieverPlugin.EGIIS"); bool ServiceEndpointRetrieverPluginEGIIS::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos && lower(endpoint.URLString.substr(0, pos)) != "ldap"; } EndpointQueryingStatus ServiceEndpointRetrieverPluginEGIIS::Query(const UserConfig& uc, const Endpoint& rEndpoint, std::list& seList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::STARTED); if (isEndpointNotSupported(rEndpoint)) { return s; } URL url((rEndpoint.URLString.find("://") == std::string::npos ? "ldap://" : "") + rEndpoint.URLString, false, 2135); url.ChangeLDAPScope(URL::base); // This is not needed for EGIIS // It was needed for the original ancient Globus GIIS // There are no such installations around any more (as far as we know) // url.AddLDAPAttribute("giisregistrationstatus"); if (!url) return EndpointQueryingStatus::FAILED; DataBuffer buffer; DataHandle handler(url, uc); if (!handler) { logger.msg(INFO, "Can't create information handle - is the ARC ldap DMC plugin available?"); return EndpointQueryingStatus::FAILED; } if (!handler->StartReading(buffer)) return EndpointQueryingStatus::FAILED; int handle; unsigned int length; unsigned long long int offset; std::string result; while (buffer.for_write() || !buffer.eof_read()) if (buffer.for_write(handle, length, offset, true)) { result.append(buffer[handle], length); buffer.is_written(handle); } if (!handler->StopReading()) { s = EndpointQueryingStatus::FAILED; return s; } XMLNode xmlresult(result); XMLNodeList mdsVoNames = xmlresult.Path("o/Mds-Vo-name"); for (XMLNodeList::iterator itMds = mdsVoNames.begin(); itMds != mdsVoNames.end(); ++itMds) { for (int i = 0; i < itMds->Size(); ++i) { if ((std::string)itMds->Child(i)["Mds-Reg-status"] == "PURGED") { continue; } if (itMds->Child(i).Name() != "Mds-Vo-name" && itMds->Child(i).Name() != "nordugrid-cluster-name" && itMds->Child(i).Name() != "nordugrid-se-name") { // Unknown entry logger.msg(DEBUG, "Unknown entry in EGIIS (%s)", itMds->Child(i).Name()); continue; } if (!itMds->Child(i)["Mds-Service-type"] || !itMds->Child(i)["Mds-Service-hn"] || !itMds->Child(i)["Mds-Service-port"] || !itMds->Child(i)["Mds-Service-Ldap-suffix"]) { logger.msg(DEBUG, "Entry in EGIIS is missing one or more of the attributes 'Mds-Service-type', 'Mds-Service-hn', 'Mds-Service-port' and/or 'Mds-Service-Ldap-suffix'"); continue; } Endpoint se((std::string)itMds->Child(i)["Mds-Service-type"] + "://" + (std::string)itMds->Child(i)["Mds-Service-hn"] + ":" + (std::string)itMds->Child(i)["Mds-Service-port"] + "/" + (std::string)itMds->Child(i)["Mds-Service-Ldap-suffix"]); if (itMds->Child(i).Name() == "Mds-Vo-name") { se.Capability.insert("information.discovery.registry"); se.InterfaceName = supportedInterfaces.empty()?std::string(""):supportedInterfaces.front(); } else if (itMds->Child(i).Name() == "nordugrid-cluster-name") { se.Capability.insert("information.discovery.resource"); se.InterfaceName = "org.nordugrid.ldapng"; } else if (itMds->Child(i).Name() == "nordugrid-se-name") { se.Capability.insert("information.discovery.resource"); se.InterfaceName = "org.nordugrid.ldapng"; } seList.push_back(se); } } s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/JobListRetrieverPluginLDAPGLUE2.cpp0000644000000000000000000000012412171221636026514 xustar000000000000000027 mtime=1373971358.977214 27 atime=1513200574.336699 30 ctime=1513200660.477753329 nordugrid-arc-5.4.2/src/hed/acc/ldap/JobListRetrieverPluginLDAPGLUE2.cpp0000644000175000002070000000531112171221636026561 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include "JobListRetrieverPluginLDAPGLUE2.h" namespace Arc { // Characters to be escaped in LDAP filter according to RFC4515 static const std::string filter_esc("&|=!><~*/()"); Logger JobListRetrieverPluginLDAPGLUE2::logger(Logger::getRootLogger(), "JobListRetrieverPlugin.LDAPGLUE2"); bool JobListRetrieverPluginLDAPGLUE2::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos && lower(endpoint.URLString.substr(0, pos)) != "ldap"; } EndpointQueryingStatus JobListRetrieverPluginLDAPGLUE2::Query(const UserConfig& uc, const Endpoint& endpoint, std::list& jobs, const EndpointQueryOptions&) const { ComputingServiceRetriever csr(uc); csr.addEndpoint(endpoint); csr.wait(); { // Check if CSR was successful. EndpointStatusMap statuses = csr.getAllStatuses(); EndpointStatusMap::const_iterator it = statuses.begin(); for (; it != statuses.end(); ++it) { if (it->second) { break; } } if (it == statuses.end()) { // csr was unsuccessful return EndpointQueryingStatus::FAILED; } } EntityContainer container; JobListRetriever jlr(uc); jlr.addConsumer(container); for (std::list::const_iterator it = csr.begin(); it != csr.end(); ++it) { for (std::map::const_iterator ite = it->ComputingEndpoint.begin(); ite != it->ComputingEndpoint.end(); ite++) { Endpoint e(*(ite->second)); if (e.HasCapability(Endpoint::JOBLIST) && e.InterfaceName != "org.nordugrid.ldapglue2" && // the wsrfglue2 job list retriever is not prepared to coexist with the others, so rather skip it e.InterfaceName != "org.nordugrid.wsrfglue2") { logger.msg(DEBUG, "Adding endpoint '%s' with interface name %s", e.URLString, e.InterfaceName); jlr.addEndpoint(e); } } } jlr.wait(); { // Check if JRL's was successful. EndpointStatusMap statuses = jlr.getAllStatuses(); EndpointStatusMap::const_iterator it = statuses.begin(); for (; it != statuses.end(); ++it) { if (it->second) { break; } } if (it == statuses.end()) { // No jlr's were successful. return EndpointQueryingStatus::FAILED; } } jobs.insert(jobs.end(), container.begin(), container.end()); return EndpointQueryingStatus::SUCCESSFUL; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/Extractor.h0000644000000000000000000000012412067315526022325 xustar000000000000000027 mtime=1356700502.945413 27 atime=1513200574.336699 30 ctime=1513200660.474753292 nordugrid-arc-5.4.2/src/hed/acc/ldap/Extractor.h0000644000175000002070000001217312067315526022376 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_EXTRACTOR_H__ #define __ARC_EXTRACTOR_H__ #include #include #include namespace Arc { class Extractor { public: Extractor() : logger(NULL) {} Extractor(XMLNode node, const std::string& prefix = "", const std::string& type = "", Logger* logger = NULL) : node(node), prefix(prefix), type(type), logger(logger) {} std::string get(const std::string& name) const { std::string value = node[type + prefix + name]; if (value.empty()) { value = (std::string)node[type + name]; } if (logger) logger->msg(DEBUG, "Extractor[%s] (%s): %s = %s", type, prefix, name, value); return value; } std::string operator[](const std::string& name) const { return get(name); } std::string operator[](const char* name) const { return get(name); } operator bool() const { return (bool)node; } bool set(const std::string& name, std::string& string, const std::string& undefined = "") const { std::string value = get(name); if (!value.empty() && value != undefined) { string = value; return true; } return false; } bool set(const std::string& name, Period& period, const std::string& undefined = "") const { std::string value = get(name); if (!value.empty() && value != undefined) { period = Period(value); return true; } return false; } bool set(const std::string& name, Time& time, const std::string& undefined = "") const { std::string value = get(name); if (!value.empty() && value != undefined) { time = Time(value); return true; } return false; } bool set(const std::string& name, int& integer, int undefined = -1) const { const std::string value = get(name); int tempInteger; if (value.empty() || !stringto(value, tempInteger) || tempInteger == undefined) return false; integer = tempInteger; return true; } bool set(const std::string& name, float& number) { std::string value = get(name); return !value.empty() && stringto(value, number); } bool set(const std::string& name, double& number) { std::string value = get(name); return !value.empty() && stringto(value, number); } bool set(const std::string& name, URL& url) { std::string value = get(name); if (!value.empty()) { url = URL(value); return true; } else { return false; } } bool set(const std::string& name, bool& boolean) { std::string value = get(name); if (!value.empty()) { boolean = (value == "TRUE"); return true; } else { return false; } } bool set(const std::string& name, std::list& list) { XMLNodeList nodelist = node.Path(type + prefix + name); if (nodelist.empty()) { nodelist = node.Path(type + name); } if (nodelist.empty()) { return false; } list.clear(); for(XMLNodeList::iterator it = nodelist.begin(); it != nodelist.end(); it++) { std::string value = *it; list.push_back(value); if (logger) logger->msg(DEBUG, "Extractor[%s] (%s): %s contains %s", type, prefix, name, value); } return true; } bool set(const std::string& name, std::set& list) { XMLNodeList nodelist = node.Path(type + prefix + name); if (nodelist.empty()) { nodelist = node.Path(type + name); } if (nodelist.empty()) { return false; } list.clear(); for(XMLNodeList::iterator it = nodelist.begin(); it != nodelist.end(); it++) { std::string value = *it; list.insert(value); if (logger) logger->msg(DEBUG, "Extractor[%s] (%s): %s contains %s", type, prefix, name, value); } return true; } static Extractor First(XMLNode& node, const std::string& objectClass, const std::string& type = "", Logger* logger = NULL) { XMLNodeList objects = node.XPathLookup("//*[objectClass='" + type + objectClass + "']", NS()); if(objects.empty()) return Extractor(); return Extractor(objects.front(), objectClass, type, logger); } static Extractor First(Extractor& e, const std::string& objectClass) { return First(e.node, objectClass, e.type, e.logger); } static std::list All(XMLNode& node, const std::string& objectClass, const std::string& type = "", Logger* logger = NULL) { std::list objects = node.XPathLookup("//*[objectClass='" + type + objectClass + "']", NS()); std::list extractors; for (std::list::iterator it = objects.begin(); it != objects.end(); ++it) { extractors.push_back(Extractor(*it, objectClass, type, logger)); } return extractors; } static std::list All(Extractor& e, const std::string& objectClass) { return All(e.node, objectClass, e.type, e.logger); } XMLNode node; std::string prefix; std::string type; Logger *logger; }; } // namespace Arc #endif // __ARC_EXTRACTOR_H__ nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/TargetInformationRetrieverPluginLDAPNG.h0000644000000000000000000000012412045235201027726 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.328699 30 ctime=1513200660.480753365 nordugrid-arc-5.4.2/src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPNG.h0000644000175000002070000000222212045235201027771 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_TARGETINFORMATIONRETRIEVERLDAPNG_H__ #define __ARC_TARGETINFORMATIONRETRIEVERLDAPNG_H__ #include #include namespace Arc { class Logger; class EndpointQueryingStatus; class ExecutionTarget; class UserConfig; class TargetInformationRetrieverPluginLDAPNG : public TargetInformationRetrieverPlugin { public: TargetInformationRetrieverPluginLDAPNG(PluginArgument *parg): TargetInformationRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.ldapng"); }; ~TargetInformationRetrieverPluginLDAPNG() {}; static Plugin* Instance(PluginArgument *arg) { return new TargetInformationRetrieverPluginLDAPNG(arg); }; virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static bool EntryToInt(const URL& url, XMLNode entry, int& i); static Logger logger; }; } // namespace Arc #endif // __ARC_TARGETINFORMATIONRETRIEVERLDAPNG_H__ nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/ServiceEndpointRetrieverPluginBDII.h0000644000000000000000000000012412071312350027135 xustar000000000000000027 mtime=1357223144.510838 27 atime=1513200574.333699 30 ctime=1513200660.487753451 nordugrid-arc-5.4.2/src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.h0000644000175000002070000000214612071312350027205 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SERVICEENDPOINTRETRIEVEREPLUGINBDII_H__ #define __ARC_SERVICEENDPOINTRETRIEVEREPLUGINBDII_H__ #include #include #include namespace Arc { class Logger; class ServiceEndpointRetrieverPluginBDII : public ServiceEndpointRetrieverPlugin { public: ServiceEndpointRetrieverPluginBDII(PluginArgument* parg): ServiceEndpointRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.bdii"); } virtual ~ServiceEndpointRetrieverPluginBDII() {} static Plugin* Instance(PluginArgument* arg) { return new ServiceEndpointRetrieverPluginBDII(arg); } virtual EndpointQueryingStatus Query(const UserConfig& uc, const Endpoint& rEndpoint, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_SERVICEENDPOINTRETRIEVEREPLUGINBDII_H__ nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/ServiceEndpointRetrieverPluginBDII.cpp0000644000000000000000000000012412300714551027474 xustar000000000000000027 mtime=1392744809.100789 27 atime=1513200574.328699 30 ctime=1513200660.486753439 nordugrid-arc-5.4.2/src/hed/acc/ldap/ServiceEndpointRetrieverPluginBDII.cpp0000644000175000002070000001067612300714551027553 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "ServiceEndpointRetrieverPluginBDII.h" namespace Arc { Logger ServiceEndpointRetrieverPluginBDII::logger(Logger::getRootLogger(), "ServiceEndpointRetrieverPlugin.BDII"); bool ServiceEndpointRetrieverPluginBDII::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos && lower(endpoint.URLString.substr(0, pos)) != "ldap"; } static std::string CreateBDIIResourceURL(const std::string& host) { return "ldap://" + host + ":2170/Mds-Vo-name=resource,o=grid"; } EndpointQueryingStatus ServiceEndpointRetrieverPluginBDII::Query(const UserConfig& uc, const Endpoint& rEndpoint, std::list& seList, const EndpointQueryOptions&) const { if (isEndpointNotSupported(rEndpoint)) { return EndpointQueryingStatus::FAILED; } URL url((rEndpoint.URLString.find("://") == std::string::npos ? "ldap://" : "") + rEndpoint.URLString, false, 2170); url.ChangeLDAPScope(URL::subtree); //url.ChangeLDAPFilter("(|(GlueServiceType=bdii_site)(GlueServiceType=bdii_top))"); if (!url) return EndpointQueryingStatus::FAILED; DataHandle handler(url, uc); DataBuffer buffer; if (!handler) { logger.msg(INFO, "Can't create information handle - is the ARC ldap DMC plugin available?"); return EndpointQueryingStatus::FAILED; } if (!handler->StartReading(buffer)) return EndpointQueryingStatus::FAILED; int handle; unsigned int length; unsigned long long int offset; std::string result; while (buffer.for_write() || !buffer.eof_read()) if (buffer.for_write(handle, length, offset, true)) { result.append(buffer[handle], length); buffer.is_written(handle); } if (!handler->StopReading()) return EndpointQueryingStatus::FAILED; XMLNode xmlresult(result); bool noServicesFound = true; XMLNodeList mdsVoNames = xmlresult.Path("o/Mds-Vo-name"); for (std::list::const_iterator itMds = mdsVoNames.begin(); itMds != mdsVoNames.end(); ++itMds) { for (XMLNode mdsVoNameSub = itMds->Get("Mds-Vo-name"); mdsVoNameSub; ++mdsVoNameSub) { for (XMLNode service = mdsVoNameSub["GlueServiceUniqueID"]; service; ++service) { if ((std::string)service["GlueServiceStatus"] != "OK") continue; const std::string serviceType = lower((std::string)service["GlueServiceType"]); Endpoint se; se.URLString = (std::string)service["GlueServiceEndpoint"]; se.HealthState = "ok"; if (serviceType == "bdii_top") { se.Capability.insert("information.discovery.registry"); se.InterfaceName = "org.nordugrid.bdii"; } else if (serviceType == "bdii_site") { se.Capability.insert("information.discovery.registry"); se.InterfaceName = "org.nordugrid.bdii"; } else if (serviceType == "org.glite.ce.cream") { logger.msg(INFO, "Adding CREAM computing service"); se.InterfaceName = "org.glite.ce.cream"; se.Capability.insert("information.lookup.job"); se.Capability.insert("executionmanagement.jobcreation"); se.Capability.insert("executionmanagement.jobdescription"); se.Capability.insert("executionmanagement.jobmanager"); // For CREAM also add resource BDII. Endpoint seInfo; seInfo.URLString = CreateBDIIResourceURL(URL(se.URLString).Host()); seInfo.InterfaceName = "org.nordugrid.ldapglue1"; seInfo.Capability.insert("information.discovery.resource"); seList.push_back(seInfo); } else { // TODO: Handle other endpoints. continue; } noServicesFound = false; seList.push_back(se); } } } return noServicesFound ? EndpointQueryingStatus::NOINFORETURNED : EndpointQueryingStatus::SUCCESSFUL; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/ldap/PaxHeaders.7502/TargetInformationRetrieverPluginLDAPGLUE1.cpp0000644000000000000000000000012313024224727030602 xustar000000000000000026 mtime=1481714135.79494 27 atime=1513200574.330699 30 ctime=1513200660.481753378 nordugrid-arc-5.4.2/src/hed/acc/ldap/TargetInformationRetrieverPluginLDAPGLUE1.cpp0000644000175000002070000002620013024224727030650 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "Extractor.h" #include "TargetInformationRetrieverPluginLDAPGLUE1.h" namespace Arc { Logger TargetInformationRetrieverPluginLDAPGLUE1::logger(Logger::getRootLogger(), "TargetInformationRetrieverPlugin.LDAPGLUE1"); bool TargetInformationRetrieverPluginLDAPGLUE1::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos && lower(endpoint.URLString.substr(0, pos)) != "ldap"; } EndpointQueryingStatus TargetInformationRetrieverPluginLDAPGLUE1::Query(const UserConfig& uc, const Endpoint& cie, std::list& csList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); if (isEndpointNotSupported(cie)) { return s; } URL url((cie.URLString.find("://") == std::string::npos ? "ldap://" : "") + cie.URLString, false, 2170, "/Mds-Vo-name=resource,o=grid"); url.ChangeLDAPScope(URL::subtree); if (!url) { return s; } DataBuffer buffer; DataHandle handler(url, uc); if (!handler) { logger.msg(INFO, "Can't create information handle - " "is the ARC ldap DMC plugin available?"); return s; } if (!handler->StartReading(buffer)) { return s; } int handle; unsigned int length; unsigned long long int offset; std::string result; while (buffer.for_write() || !buffer.eof_read()) if (buffer.for_write(handle, length, offset, true)) { result.append(buffer[handle], length); buffer.is_written(handle); } if (!handler->StopReading()) { return s; } XMLNode xmlResult(result); XMLNodeList glueServices = xmlResult.Path("o/Mds-Vo-name/GlueServiceUniqueID"); for (XMLNodeList::iterator itS = glueServices.begin(); itS != glueServices.end(); ++itS) { // Currently only consider CREAM services. if (lower((std::string)(*itS)["GlueServiceType"]) != "org.glite.ce.cream") continue; // Only consider the first 'GlueClusterUniqueID' entry - possibly there is only one. XMLNode glueCluster = xmlResult["o"]["Mds-Vo-name"]["GlueClusterUniqueID"]; if (!glueCluster) continue; XMLNode glueCE; // Find associated CE node. for (XMLNode n = glueCluster["GlueForeignKey"]; n; ++n) { std::string glueForeignKey = (std::string)n; if (glueForeignKey.substr(0, 14) == "GlueCEUniqueID") { for (XMLNode m = xmlResult["o"]["Mds-Vo-name"]["GlueCEUniqueID"]; m; ++m) { if ((std::string)m["GlueCEUniqueID"] == glueForeignKey.substr(15)) { glueCE = m; break; } } } } if (!glueCE) continue; Extractor site(xmlResult["o"]["Mds-Vo-name"]["GlueSiteUniqueID"], "Site", "Glue", &logger); Extractor service(*itS, "Service", "Glue", &logger); Extractor cluster(glueCluster, "Cluster", "Glue", &logger); Extractor ce(glueCE, "CE", "Glue", &logger); // If credentials contains a VO, then try to find matching VOView. Extractor vo(XMLNode(), "CE", "Glue", &logger); VOMSTrustList vomsTrustDN; vomsTrustDN.AddRegex(".*"); std::vector vomsAttributes; Credential cred(uc); if (parseVOMSAC(cred, uc.CACertificatesDirectory(), "", "", vomsTrustDN, vomsAttributes)) { for (std::vector::const_iterator itAC = vomsAttributes.begin(); itAC != vomsAttributes.end(); ++itAC) { for (XMLNode n = ce.node["GlueVOViewLocalID"]; n; ++n) { if ((std::string)n["GlueVOViewLocalID"] == itAC->voname) { vo.node = n; break; } } if (vo.node) break; } } ComputingServiceType cs; ComputingEndpointType ComputingEndpoint; ComputingManagerType ComputingManager; ComputingShareType ComputingShare; service.set("Status", ComputingEndpoint->HealthState); service.set("Type", ComputingEndpoint->InterfaceName); ComputingEndpoint->InterfaceName = lower(ComputingEndpoint->InterfaceName); // CREAM -> cream. ComputingEndpoint->Technology = "webservice"; // CREAM is a webservice ComputingEndpoint->Capability.insert("information.lookup.job"); ComputingEndpoint->Capability.insert("executionmanagement.jobcreation"); ComputingEndpoint->Capability.insert("executionmanagement.jobdescription"); ComputingEndpoint->Capability.insert("executionmanagement.jobmanager"); ce.set("Name", ComputingShare->Name); ce.set("Name", ComputingShare->MappingQueue); //ce.set("InfoJobManager", ComputingShare->MappingQueue); ce.set("InfoLRMSType", ComputingManager->ProductName); ce.set("InfoLRMSVersion", ComputingManager->ProductVersion); ce.set("PolicyAssignedJobSlots", ComputingManager->TotalSlots); cluster.set("Name", cs->Name); if (site) { site.set("Name", cs.AdminDomain->Name); site.set("Sponsor", cs.AdminDomain->Owner, "none"); site.set("Location", cs.Location->Place); site.set("Latitude", cs.Location->Latitude); site.set("Longitude", cs.Location->Longitude); } ce.set("InfoContactString", ComputingEndpoint->URLString); if (!ce.get("ImplementationName").empty()) { if (!ce.get("ImplementationVersion").empty()) { ComputingEndpoint->Implementation = Software(ce.get("ImplementationName"), ce.get("ImplementationVersion")); } else ComputingEndpoint->Implementation = ce.get("ImplementationName"); } if (!vo.set("StateTotalJobs", ComputingShare->TotalJobs)) ce.set("StateTotalJobs", ComputingShare->TotalJobs); if (!vo.set("StateRunningJobs", ComputingShare->RunningJobs)) ce.set("StateRunningJobs", ComputingShare->RunningJobs); if (!vo.set("StateWaitingJobs", ComputingShare->WaitingJobs, 444444)) ce.set("StateWaitingJobs", ComputingShare->WaitingJobs, 444444); if (!vo.set("PolicyMaxWallClockTime", ComputingShare->MaxWallTime)) ce.set("PolicyMaxWallClockTime", ComputingShare->MaxWallTime); if (!vo.set("PolicyMaxCPUTime", ComputingShare->MaxCPUTime)) ce.set("PolicyMaxCPUTime", ComputingShare->MaxCPUTime); if (!vo.set("PolicyMaxTotalJobs", ComputingShare->MaxTotalJobs, 999999999)) ce.set("PolicyMaxTotalJobs", ComputingShare->MaxTotalJobs, 999999999); if (!vo.set("PolicyMaxRunningJobs", ComputingShare->MaxRunningJobs, 999999999)) ce.set("PolicyMaxRunningJobs", ComputingShare->MaxRunningJobs, 999999999); if (!vo.set("PolicyMaxWaitingJobs", ComputingShare->MaxWaitingJobs, 999999999)) ce.set("PolicyMaxWaitingJobs", ComputingShare->MaxWaitingJobs, 999999999); if (!vo.set("PolicyAssignedJobSlots", ComputingShare->MaxUserRunningJobs, 999999999)) ce.set("PolicyAssignedJobSlots", ComputingShare->MaxUserRunningJobs, 999999999); if (!vo.set("PolicyMaxSlotsPerJob", ComputingShare->MaxSlotsPerJob, 999999999)) ce.set("PolicyMaxSlotsPerJob", ComputingShare->MaxSlotsPerJob, 999999999); // Only consider first SubCluster. Extractor subcluster(cluster.node["GlueSubClusterUniqueID"], "", "Glue", &logger); subcluster.set("HostMainMemoryRAMSize", ComputingShare->MaxMainMemory); subcluster.set("HostMainMemoryVirtualSize", ComputingShare->MaxVirtualMemory); ExecutionEnvironmentType ee; subcluster.set("HostNetworkAdapterInboundIP", ee->ConnectivityIn); subcluster.set("HostNetworkAdapterOutboundIP", ee->ConnectivityOut); subcluster.set("HostProcessorClockSpeed", ee->CPUClockSpeed); subcluster.set("HostProcessorModel", ee->CPUModel); subcluster.set("HostArchitecturePlatformType", ee->Platform); subcluster.set("HostProcessorVendor", ee->CPUVendor); subcluster.set("HostMainMemoryRAMSize", ee->MainMemorySize); //subcluster.set("HostMainMemoryVirtualSize", ee->VirtualMemorySize); // 'VirtualMemorySize': No such member in ExecutionEnvironment. //subcluster.set("SubClusterPhysicalCPUs", ee->PhysicalCPUs); // 'PhysicalCPUs': No such member in ExecutionEnvironment. //subcluster.set("SubClusterLogicalCPUs", ee->LogicalCPUs); // 'LogicalCPUs': No such member in ExecutionEnvironment. ee->OperatingSystem = Software(subcluster["HostOperatingSystemName"], subcluster["HostOperatingSystemRelease"]); std::string defaultSE = ""; if (!vo.set("InfoDefaultSE", defaultSE)) ce.set("InfoDefaultSE", defaultSE); if (!defaultSE.empty()) ComputingShare->DefaultStorageService = "gsiftp://" + defaultSE; if (!vo.set("PolicyPreemption", ComputingShare->Preemption)) ce.set("PolicyPreemption", ComputingShare->Preemption); if (!vo.set("StateStatus", ComputingEndpoint->ServingState)) ce.set("StateStatus", ComputingEndpoint->ServingState); if (!vo.set("StateEstimatedResponseTime", ComputingShare->EstimatedAverageWaitingTime, "2146660842")) { ce.set("StateEstimatedResponseTime", ComputingShare->EstimatedAverageWaitingTime, "2146660842"); } if (!vo.set("StateWorstResponseTime", ComputingShare->EstimatedWorstWaitingTime, "2146660842")) { ce.set("StateWorstResponseTime", ComputingShare->EstimatedWorstWaitingTime, "2146660842"); } vo.set("StateFreeJobSlots", ComputingShare->FreeSlots) || vo.set("StateFreeCPUs", ComputingShare->FreeSlots) || ce.set("StateFreeJobSlots", ComputingShare->FreeSlots) || ce.set("StateFreeJobCPUs", ComputingShare->FreeSlots); for (XMLNode node = subcluster.node["GlueHostApplicationSoftwareRunTimeEnvironment"]; node; ++node) { ApplicationEnvironment ae((std::string)node); ae.State = "UNDEFINEDVALUE"; ae.FreeSlots = -1; ae.FreeUserSeats = -1; ae.FreeJobs = -1; ComputingManager.ApplicationEnvironments->push_back(ae); } cs.ComputingEndpoint.insert(std::make_pair(0, ComputingEndpoint)); // Create information endpoint. ComputingEndpointType infoEndpoint; if (!((std::string)ce.node["GlueInformationServiceURL"]).empty()) { infoEndpoint->URLString = (std::string)ce.node["GlueInformationServiceURL"]; infoEndpoint->InterfaceName = "org.nordugrid.ldapglue1"; infoEndpoint->Capability.insert("information.discovery.resource"); infoEndpoint->Technology = "ldap"; cs.ComputingEndpoint.insert(std::make_pair(1, infoEndpoint)); } ComputingManager.ExecutionEnvironment.insert(std::make_pair(0, ee)); cs.ComputingManager.insert(std::make_pair(0, ComputingManager)); cs.ComputingShare.insert(std::make_pair(0, ComputingShare)); csList.push_back(cs); } if (!csList.empty()) s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/Broker0000644000000000000000000000013213214316024020414 xustar000000000000000030 mtime=1513200660.291751054 30 atime=1513200668.721854157 30 ctime=1513200660.291751054 nordugrid-arc-5.4.2/src/hed/acc/Broker/0000755000175000002070000000000013214316024020537 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022536 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200593.949939668 30 ctime=1513200660.278750895 nordugrid-arc-5.4.2/src/hed/acc/Broker/Makefile.am0000644000175000002070000000161012052416515022576 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccBroker.la libaccBroker_la_SOURCES = \ FastestQueueBrokerPlugin.cpp FastestQueueBrokerPlugin.h \ RandomBrokerPlugin.h \ DescriptorsBroker.cpp \ BenchmarkBrokerPlugin.cpp BenchmarkBrokerPlugin.h \ DataBrokerPlugin.cpp DataBrokerPlugin.h \ NullBrokerPlugin.h libaccBroker_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccBroker_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccBroker_la_LDFLAGS = -no-undefined -avoid-version -module DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/RandomBrokerPlugin.h0000644000000000000000000000012212045235201024403 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3757 30 ctime=1513200660.282750944 nordugrid-arc-5.4.2/src/hed/acc/Broker/RandomBrokerPlugin.h0000644000175000002070000000160012045235201024447 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_RANDOMBROKERPLUGIN_H__ #define __ARC_RANDOMBROKERPLUGIN_H__ #include #include #include namespace Arc { class RandomBrokerPlugin : public BrokerPlugin { public: RandomBrokerPlugin(BrokerPluginArgument* parg) : BrokerPlugin(parg) { std::srand(time(NULL)); } ~RandomBrokerPlugin() {}; static Plugin* Instance(PluginArgument *arg) { BrokerPluginArgument *brokerarg = dynamic_cast(arg); return brokerarg ? new RandomBrokerPlugin(brokerarg) : NULL; } virtual bool match(const ExecutionTarget& et) const { return BrokerPlugin::match(et); } virtual bool operator()(const ExecutionTarget&, const ExecutionTarget&) const { return (bool)(std::rand()%2); } }; } // namespace Arc #endif // __ARC_RANDOMBROKERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315721022541 xustar000000000000000030 mtime=1513200593.999940279 30 atime=1513200648.842611027 30 ctime=1513200660.279750907 nordugrid-arc-5.4.2/src/hed/acc/Broker/Makefile.in0000644000175000002070000010562613214315721022621 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/Broker DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccBroker_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libaccBroker_la_OBJECTS = \ libaccBroker_la-FastestQueueBrokerPlugin.lo \ libaccBroker_la-DescriptorsBroker.lo \ libaccBroker_la-BenchmarkBrokerPlugin.lo \ libaccBroker_la-DataBrokerPlugin.lo libaccBroker_la_OBJECTS = $(am_libaccBroker_la_OBJECTS) libaccBroker_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) \ $(libaccBroker_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccBroker_la_SOURCES) DIST_SOURCES = $(libaccBroker_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccBroker.la libaccBroker_la_SOURCES = \ FastestQueueBrokerPlugin.cpp FastestQueueBrokerPlugin.h \ RandomBrokerPlugin.h \ DescriptorsBroker.cpp \ BenchmarkBrokerPlugin.cpp BenchmarkBrokerPlugin.h \ DataBrokerPlugin.cpp DataBrokerPlugin.h \ NullBrokerPlugin.h libaccBroker_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libaccBroker_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libaccBroker_la_LDFLAGS = -no-undefined -avoid-version -module DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/Broker/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/Broker/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccBroker.la: $(libaccBroker_la_OBJECTS) $(libaccBroker_la_DEPENDENCIES) $(libaccBroker_la_LINK) -rpath $(pkglibdir) $(libaccBroker_la_OBJECTS) $(libaccBroker_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccBroker_la-BenchmarkBrokerPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccBroker_la-DataBrokerPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccBroker_la-DescriptorsBroker.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccBroker_la-FastestQueueBrokerPlugin.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccBroker_la-FastestQueueBrokerPlugin.lo: FastestQueueBrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) -MT libaccBroker_la-FastestQueueBrokerPlugin.lo -MD -MP -MF $(DEPDIR)/libaccBroker_la-FastestQueueBrokerPlugin.Tpo -c -o libaccBroker_la-FastestQueueBrokerPlugin.lo `test -f 'FastestQueueBrokerPlugin.cpp' || echo '$(srcdir)/'`FastestQueueBrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccBroker_la-FastestQueueBrokerPlugin.Tpo $(DEPDIR)/libaccBroker_la-FastestQueueBrokerPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FastestQueueBrokerPlugin.cpp' object='libaccBroker_la-FastestQueueBrokerPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccBroker_la-FastestQueueBrokerPlugin.lo `test -f 'FastestQueueBrokerPlugin.cpp' || echo '$(srcdir)/'`FastestQueueBrokerPlugin.cpp libaccBroker_la-DescriptorsBroker.lo: DescriptorsBroker.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) -MT libaccBroker_la-DescriptorsBroker.lo -MD -MP -MF $(DEPDIR)/libaccBroker_la-DescriptorsBroker.Tpo -c -o libaccBroker_la-DescriptorsBroker.lo `test -f 'DescriptorsBroker.cpp' || echo '$(srcdir)/'`DescriptorsBroker.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccBroker_la-DescriptorsBroker.Tpo $(DEPDIR)/libaccBroker_la-DescriptorsBroker.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DescriptorsBroker.cpp' object='libaccBroker_la-DescriptorsBroker.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccBroker_la-DescriptorsBroker.lo `test -f 'DescriptorsBroker.cpp' || echo '$(srcdir)/'`DescriptorsBroker.cpp libaccBroker_la-BenchmarkBrokerPlugin.lo: BenchmarkBrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) -MT libaccBroker_la-BenchmarkBrokerPlugin.lo -MD -MP -MF $(DEPDIR)/libaccBroker_la-BenchmarkBrokerPlugin.Tpo -c -o libaccBroker_la-BenchmarkBrokerPlugin.lo `test -f 'BenchmarkBrokerPlugin.cpp' || echo '$(srcdir)/'`BenchmarkBrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccBroker_la-BenchmarkBrokerPlugin.Tpo $(DEPDIR)/libaccBroker_la-BenchmarkBrokerPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BenchmarkBrokerPlugin.cpp' object='libaccBroker_la-BenchmarkBrokerPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccBroker_la-BenchmarkBrokerPlugin.lo `test -f 'BenchmarkBrokerPlugin.cpp' || echo '$(srcdir)/'`BenchmarkBrokerPlugin.cpp libaccBroker_la-DataBrokerPlugin.lo: DataBrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) -MT libaccBroker_la-DataBrokerPlugin.lo -MD -MP -MF $(DEPDIR)/libaccBroker_la-DataBrokerPlugin.Tpo -c -o libaccBroker_la-DataBrokerPlugin.lo `test -f 'DataBrokerPlugin.cpp' || echo '$(srcdir)/'`DataBrokerPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccBroker_la-DataBrokerPlugin.Tpo $(DEPDIR)/libaccBroker_la-DataBrokerPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataBrokerPlugin.cpp' object='libaccBroker_la-DataBrokerPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccBroker_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccBroker_la-DataBrokerPlugin.lo `test -f 'DataBrokerPlugin.cpp' || echo '$(srcdir)/'`DataBrokerPlugin.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/test0000644000000000000000000000013213214316024021373 xustar000000000000000030 mtime=1513200660.318751384 30 atime=1513200668.721854157 30 ctime=1513200660.318751384 nordugrid-arc-5.4.2/src/hed/acc/Broker/test/0000755000175000002070000000000013214316024021516 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/Broker/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023515 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200594.015940475 30 ctime=1513200660.315751348 nordugrid-arc-5.4.2/src/hed/acc/Broker/test/Makefile.am0000644000175000002070000000100512052416515023553 0ustar00mockbuildmock00000000000000TESTS = BenchmarkBrokerTest check_PROGRAMS = $(TESTS) BenchmarkBrokerTest_SOURCES = $(top_srcdir)/src/Test.cpp \ BenchmarkBrokerTest.cpp BenchmarkBrokerTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) BenchmarkBrokerTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/acc/Broker/test/PaxHeaders.7502/BenchmarkBrokerTest.cpp0000644000000000000000000000012212045235201026050 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3787 30 ctime=1513200660.318751384 nordugrid-arc-5.4.2/src/hed/acc/Broker/test/BenchmarkBrokerTest.cpp0000644000175000002070000000654212045235201026126 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "../BenchmarkBrokerPlugin.cpp" #include class BenchmarkBrokerTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(BenchmarkBrokerTest); CPPUNIT_TEST(TestComparePerformance); CPPUNIT_TEST_SUITE_END(); public: BenchmarkBrokerTest() : COMPARE_TEST_BENCHMARK(NULL), COMPARE_OTHER_BENCHMARK(NULL), COMPARE_ANOTHER_BENCHMARK(NULL) {} void setUp(); void tearDown(); void TestComparePerformance(); private: Arc::ExecutionTarget UNKNOWN_BENCHMARK_TARGET; Arc::ExecutionTarget SLOW_TARGET; Arc::ExecutionTarget FAST_TARGET; Arc::ExecutionTarget SUPERFAST_TARGET; Arc::ExecutionTarget OTHER_BENCHMARK_TARGET; Arc::ExecutionTarget MULTI_BENCHMARK_TARGET; Arc::BenchmarkBrokerPlugin *COMPARE_TEST_BENCHMARK; Arc::BenchmarkBrokerPlugin *COMPARE_OTHER_BENCHMARK; Arc::BenchmarkBrokerPlugin *COMPARE_ANOTHER_BENCHMARK; }; void BenchmarkBrokerTest::setUp() { (*SLOW_TARGET.Benchmarks)["test"] = 10; (*FAST_TARGET.Benchmarks)["test"] = 100; (*SUPERFAST_TARGET.Benchmarks)["test"] = 1000; (*OTHER_BENCHMARK_TARGET.Benchmarks)["other"] = 43; (*MULTI_BENCHMARK_TARGET.Benchmarks)["test"] = 42; (*MULTI_BENCHMARK_TARGET.Benchmarks)["other"] = 41; } void BenchmarkBrokerTest::tearDown() { } void BenchmarkBrokerTest::TestComparePerformance() { Arc::UserConfig uc; uc.Broker("", "test"); Arc::BrokerPluginArgument arg(uc); COMPARE_TEST_BENCHMARK = new Arc::BenchmarkBrokerPlugin(&arg); CPPUNIT_ASSERT_MESSAGE("FAST should be faster than SLOW", (*COMPARE_TEST_BENCHMARK)(FAST_TARGET, SLOW_TARGET)); CPPUNIT_ASSERT_MESSAGE("SLOW should not be faster than FAST", !(*COMPARE_TEST_BENCHMARK)(SLOW_TARGET, FAST_TARGET)); CPPUNIT_ASSERT_MESSAGE("SUPERFAST should be faster than FAST", (*COMPARE_TEST_BENCHMARK)(SUPERFAST_TARGET, FAST_TARGET)); CPPUNIT_ASSERT_MESSAGE("FAST should not be faster than SUPERFAST", !(*COMPARE_TEST_BENCHMARK)(FAST_TARGET, SUPERFAST_TARGET)); CPPUNIT_ASSERT_MESSAGE("FAST should be faster than MULTI_BENCHMARK", (*COMPARE_TEST_BENCHMARK)(FAST_TARGET, MULTI_BENCHMARK_TARGET)); CPPUNIT_ASSERT_MESSAGE("MULTI_BENCHMARK should not be faster than FAST", !(*COMPARE_TEST_BENCHMARK)(MULTI_BENCHMARK_TARGET, FAST_TARGET)); uc.Broker("", "other"); COMPARE_OTHER_BENCHMARK = new Arc::BenchmarkBrokerPlugin(&arg); CPPUNIT_ASSERT_MESSAGE("OTHER_BENCHMARK should be faster than MULTI_BENCHMARK if the 'other' benchmark is used", (*COMPARE_OTHER_BENCHMARK)(OTHER_BENCHMARK_TARGET, MULTI_BENCHMARK_TARGET)); CPPUNIT_ASSERT_MESSAGE("MULTI_BENCHMARK should be faster than OTHER_BENCHMARK if the 'test' benchmark is used", (*COMPARE_TEST_BENCHMARK)(MULTI_BENCHMARK_TARGET, OTHER_BENCHMARK_TARGET)); CPPUNIT_ASSERT_MESSAGE("SLOW should be faster than UNKNOWN_BENCHMARK", (*COMPARE_TEST_BENCHMARK)(SLOW_TARGET, UNKNOWN_BENCHMARK_TARGET)); uc.Broker("", "another"); COMPARE_ANOTHER_BENCHMARK = new Arc::BenchmarkBrokerPlugin(&arg); CPPUNIT_ASSERT_MESSAGE("if none of the targets has the used benchmark, this should be always false", !(*COMPARE_ANOTHER_BENCHMARK)(FAST_TARGET, SLOW_TARGET)); delete COMPARE_TEST_BENCHMARK; delete COMPARE_OTHER_BENCHMARK; delete COMPARE_ANOTHER_BENCHMARK; } CPPUNIT_TEST_SUITE_REGISTRATION(BenchmarkBrokerTest); nordugrid-arc-5.4.2/src/hed/acc/Broker/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315722023520 xustar000000000000000030 mtime=1513200594.060941025 30 atime=1513200648.859611234 29 ctime=1513200660.31675136 nordugrid-arc-5.4.2/src/hed/acc/Broker/test/Makefile.in0000644000175000002070000007124613214315722023601 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = BenchmarkBrokerTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/hed/acc/Broker/test DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = BenchmarkBrokerTest$(EXEEXT) am_BenchmarkBrokerTest_OBJECTS = BenchmarkBrokerTest-Test.$(OBJEXT) \ BenchmarkBrokerTest-BenchmarkBrokerTest.$(OBJEXT) BenchmarkBrokerTest_OBJECTS = $(am_BenchmarkBrokerTest_OBJECTS) am__DEPENDENCIES_1 = BenchmarkBrokerTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) BenchmarkBrokerTest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(BenchmarkBrokerTest_SOURCES) DIST_SOURCES = $(BenchmarkBrokerTest_SOURCES) ETAGS = etags CTAGS = ctags am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ BenchmarkBrokerTest_SOURCES = $(top_srcdir)/src/Test.cpp \ BenchmarkBrokerTest.cpp BenchmarkBrokerTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) BenchmarkBrokerTest_LDADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/Broker/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/Broker/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list BenchmarkBrokerTest$(EXEEXT): $(BenchmarkBrokerTest_OBJECTS) $(BenchmarkBrokerTest_DEPENDENCIES) @rm -f BenchmarkBrokerTest$(EXEEXT) $(BenchmarkBrokerTest_LINK) $(BenchmarkBrokerTest_OBJECTS) $(BenchmarkBrokerTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/BenchmarkBrokerTest-BenchmarkBrokerTest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/BenchmarkBrokerTest-Test.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< BenchmarkBrokerTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) -MT BenchmarkBrokerTest-Test.o -MD -MP -MF $(DEPDIR)/BenchmarkBrokerTest-Test.Tpo -c -o BenchmarkBrokerTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/BenchmarkBrokerTest-Test.Tpo $(DEPDIR)/BenchmarkBrokerTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='BenchmarkBrokerTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) -c -o BenchmarkBrokerTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp BenchmarkBrokerTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) -MT BenchmarkBrokerTest-Test.obj -MD -MP -MF $(DEPDIR)/BenchmarkBrokerTest-Test.Tpo -c -o BenchmarkBrokerTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/BenchmarkBrokerTest-Test.Tpo $(DEPDIR)/BenchmarkBrokerTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/Test.cpp' object='BenchmarkBrokerTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) -c -o BenchmarkBrokerTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` BenchmarkBrokerTest-BenchmarkBrokerTest.o: BenchmarkBrokerTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) -MT BenchmarkBrokerTest-BenchmarkBrokerTest.o -MD -MP -MF $(DEPDIR)/BenchmarkBrokerTest-BenchmarkBrokerTest.Tpo -c -o BenchmarkBrokerTest-BenchmarkBrokerTest.o `test -f 'BenchmarkBrokerTest.cpp' || echo '$(srcdir)/'`BenchmarkBrokerTest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/BenchmarkBrokerTest-BenchmarkBrokerTest.Tpo $(DEPDIR)/BenchmarkBrokerTest-BenchmarkBrokerTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BenchmarkBrokerTest.cpp' object='BenchmarkBrokerTest-BenchmarkBrokerTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) -c -o BenchmarkBrokerTest-BenchmarkBrokerTest.o `test -f 'BenchmarkBrokerTest.cpp' || echo '$(srcdir)/'`BenchmarkBrokerTest.cpp BenchmarkBrokerTest-BenchmarkBrokerTest.obj: BenchmarkBrokerTest.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) -MT BenchmarkBrokerTest-BenchmarkBrokerTest.obj -MD -MP -MF $(DEPDIR)/BenchmarkBrokerTest-BenchmarkBrokerTest.Tpo -c -o BenchmarkBrokerTest-BenchmarkBrokerTest.obj `if test -f 'BenchmarkBrokerTest.cpp'; then $(CYGPATH_W) 'BenchmarkBrokerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/BenchmarkBrokerTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/BenchmarkBrokerTest-BenchmarkBrokerTest.Tpo $(DEPDIR)/BenchmarkBrokerTest-BenchmarkBrokerTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BenchmarkBrokerTest.cpp' object='BenchmarkBrokerTest-BenchmarkBrokerTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(BenchmarkBrokerTest_CXXFLAGS) $(CXXFLAGS) -c -o BenchmarkBrokerTest-BenchmarkBrokerTest.obj `if test -f 'BenchmarkBrokerTest.cpp'; then $(CYGPATH_W) 'BenchmarkBrokerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/BenchmarkBrokerTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/BenchmarkBrokerPlugin.h0000644000000000000000000000012212045235201025055 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3817 30 ctime=1513200660.285750981 nordugrid-arc-5.4.2/src/hed/acc/Broker/BenchmarkBrokerPlugin.h0000644000175000002070000000164712045235201025134 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_BENCHMARKBROKERPLUGIN_H__ #define __ARC_BENCHMARKBROKERPLUGIN_H__ #include #include #include namespace Arc { class BenchmarkBrokerPlugin : public BrokerPlugin { public: BenchmarkBrokerPlugin(BrokerPluginArgument* parg) : BrokerPlugin(parg), benchmark(!uc.Broker().second.empty() ? lower(uc.Broker().second) : "specint2000") {} ~BenchmarkBrokerPlugin() {} static Plugin* Instance(PluginArgument *arg) { BrokerPluginArgument *brokerarg = dynamic_cast(arg); return brokerarg ? new BenchmarkBrokerPlugin(brokerarg) : NULL; } virtual bool operator()(const ExecutionTarget&, const ExecutionTarget&) const; virtual bool match(const ExecutionTarget&) const; private: std::string benchmark; }; } // namespace Arc #endif // __ARC_BENCHMARKBROKERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/NullBrokerPlugin.h0000644000000000000000000000012112045235201024074 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3817 29 ctime=1513200660.28975103 nordugrid-arc-5.4.2/src/hed/acc/Broker/NullBrokerPlugin.h0000644000175000002070000000146412045235201024151 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_NULLBROKERPLUGIN_H__ #define __ARC_NULLBROKERPLUGIN_H__ #include #include #include namespace Arc { class NullBrokerPlugin : public BrokerPlugin { public: NullBrokerPlugin(BrokerPluginArgument* parg) : BrokerPlugin(parg) {} ~NullBrokerPlugin() {} static Plugin* Instance(PluginArgument *arg) { BrokerPluginArgument *brokerarg = dynamic_cast(arg); return brokerarg ? new NullBrokerPlugin(brokerarg) : NULL; } virtual bool match(const ExecutionTarget& et) const { return true; } virtual bool operator()(const ExecutionTarget&, const ExecutionTarget&) const { return true; } }; } // namespace Arc #endif // __ARC_NULLBROKERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/BenchmarkBrokerPlugin.cpp0000644000000000000000000000012212045235201025410 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3787 30 ctime=1513200660.284750968 nordugrid-arc-5.4.2/src/hed/acc/Broker/BenchmarkBrokerPlugin.cpp0000644000175000002070000000153512045235201025463 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include "BenchmarkBrokerPlugin.h" namespace Arc { bool BenchmarkBrokerPlugin::operator()(const ExecutionTarget& lhs, const ExecutionTarget& rhs) const { std::map::const_iterator itLHS = lhs.Benchmarks->find(benchmark); std::map::const_iterator itRHS = rhs.Benchmarks->find(benchmark); if (itLHS == lhs.Benchmarks->end()) { return false; } if (itRHS == rhs.Benchmarks->end()) { return true; } return itLHS->second > itRHS->second; } bool BenchmarkBrokerPlugin::match(const ExecutionTarget& et) const { if(!BrokerPlugin::match(et)) return false; return et.Benchmarks->find(benchmark) != et.Benchmarks->end(); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/DataBrokerPlugin.h0000644000000000000000000000012212106724427024047 xustar000000000000000027 mtime=1360767255.383414 25 atime=1513200574.3797 30 ctime=1513200660.288751017 nordugrid-arc-5.4.2/src/hed/acc/Broker/DataBrokerPlugin.h0000644000175000002070000000234612106724427024123 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATABROKERPLUGIN_H__ #define __ARC_DATABROKERPLUGIN_H__ #include #include #include #include #include namespace Arc { class DataBrokerPlugin : public BrokerPlugin { public: DataBrokerPlugin(BrokerPluginArgument* parg) : BrokerPlugin(parg), request(NULL) {} DataBrokerPlugin(const DataBrokerPlugin& dbp) : BrokerPlugin(dbp), cfg(dbp.cfg), request(dbp.request?new PayloadSOAP(*dbp.request):NULL), CacheMappingTable(dbp.CacheMappingTable) {} ~DataBrokerPlugin() { if (request) { delete request; }; }; static Plugin* Instance(PluginArgument *arg) { BrokerPluginArgument *brokerarg = dynamic_cast(arg); return brokerarg ? new DataBrokerPlugin(brokerarg) : NULL; } virtual bool match(const ExecutionTarget&) const; virtual bool operator()(const ExecutionTarget&, const ExecutionTarget&) const; virtual void set(const JobDescription& _j) const; protected: mutable MCCConfig cfg; mutable PayloadSOAP * request; mutable std::map CacheMappingTable; }; } // namespace Arc #endif // __ARC_DATABROKERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/DescriptorsBroker.cpp0000644000000000000000000000012212675602216024655 xustar000000000000000027 mtime=1459029134.924374 25 atime=1513200574.3827 30 ctime=1513200660.283750956 nordugrid-arc-5.4.2/src/hed/acc/Broker/DescriptorsBroker.cpp0000644000175000002070000000176112675602216024731 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "FastestQueueBrokerPlugin.h" #include "RandomBrokerPlugin.h" #include "BenchmarkBrokerPlugin.h" #include "DataBrokerPlugin.h" #include "NullBrokerPlugin.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "FastestQueue", "HED:BrokerPlugin", istring("Sorting according to free slots in queue"), 0, &Arc::FastestQueueBrokerPlugin::Instance }, { "Random", "HED:BrokerPlugin", istring("Random sorting"), 0, &Arc::RandomBrokerPlugin::Instance }, { "Benchmark", "HED:BrokerPlugin", istring("Sorting according to specified benchmark (default \"specint2000\")"), 0, &Arc::BenchmarkBrokerPlugin::Instance }, { "Data", "HED:BrokerPlugin", istring("Sorting according to input data availability at target"), 0, &Arc::DataBrokerPlugin::Instance }, { "Null", "HED:BrokerPlugin", istring("Performs neither sorting nor matching"), 0, &Arc::NullBrokerPlugin::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/FastestQueueBrokerPlugin.cpp0000644000000000000000000000012212045235201026134 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3797 30 ctime=1513200660.280750919 nordugrid-arc-5.4.2/src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp0000644000175000002070000000272512045235201026211 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "FastestQueueBrokerPlugin.h" namespace Arc { bool FastestQueueBrokerPlugin::operator()(const ExecutionTarget& lhs, const ExecutionTarget& rhs) const { if (lhs.ComputingShare->WaitingJobs == 0 && rhs.ComputingShare->WaitingJobs == 0) { return lhs.ComputingShare->FreeSlots <= rhs.ComputingShare->FreeSlots; } return lhs.ComputingShare->WaitingJobs*rhs.ComputingManager->TotalSlots <= lhs.ComputingManager->TotalSlots*rhs.ComputingShare->WaitingJobs; } bool FastestQueueBrokerPlugin::match(const ExecutionTarget& et) const { if(!BrokerPlugin::match(et)) return false; if (et.ComputingShare->WaitingJobs <= -1 || et.ComputingManager->TotalSlots <= -1 || et.ComputingShare->FreeSlots <= -1) { if (et.ComputingShare->WaitingJobs <= -1) { logger.msg(VERBOSE, "Target %s removed by FastestQueueBroker, doesn't report number of waiting jobs", et.AdminDomain->Name); } if (et.ComputingManager->TotalSlots <= -1) { logger.msg(VERBOSE, "Target %s removed by FastestQueueBroker, doesn't report number of total slots", et.AdminDomain->Name); } if (et.ComputingShare->FreeSlots <= -1) { logger.msg(VERBOSE, "Target %s removed by FastestQueueBroker, doesn't report number of free slots", et.AdminDomain->Name); } return false; } return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/FastestQueueBrokerPlugin.h0000644000000000000000000000012212045235201025601 xustar000000000000000027 mtime=1351957121.246634 25 atime=1513200574.3757 30 ctime=1513200660.281750932 nordugrid-arc-5.4.2/src/hed/acc/Broker/FastestQueueBrokerPlugin.h0000644000175000002070000000141412045235201025650 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_FASTESTQUEUEBROKERPLUGIN_H__ #define __ARC_FASTESTQUEUEBROKERPLUGIN_H__ #include namespace Arc { class FastestQueueBrokerPlugin : public BrokerPlugin { public: FastestQueueBrokerPlugin(BrokerPluginArgument* parg) : BrokerPlugin(parg) {} ~FastestQueueBrokerPlugin() {} static Plugin* Instance(PluginArgument *arg) { BrokerPluginArgument *brokerarg = dynamic_cast(arg); return brokerarg ? new FastestQueueBrokerPlugin(brokerarg) : NULL; } virtual bool operator()(const ExecutionTarget&, const ExecutionTarget&) const; virtual bool match(const ExecutionTarget&) const; }; } // namespace Arc #endif // __ARC_FASTESTQUEUEBROKERPLUGIN_H__ nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/DataBrokerPlugin.cpp0000644000000000000000000000012212106724427024402 xustar000000000000000027 mtime=1360767255.383414 25 atime=1513200574.3767 30 ctime=1513200660.287751005 nordugrid-arc-5.4.2/src/hed/acc/Broker/DataBrokerPlugin.cpp0000644000175000002070000000476412106724427024464 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "DataBrokerPlugin.h" namespace Arc { void DataBrokerPlugin::set(const JobDescription& _j) const { BrokerPlugin::set(_j); if (j) { uc.ApplyToConfig(cfg); if (request) { delete request; } Arc::NS ns("a-rex", "http://www.nordugrid.org/schemas/a-rex"); request = new PayloadSOAP(ns); XMLNode req = request->NewChild("a-rex:CacheCheck").NewChild("a-rex:TheseFilesNeedToCheck"); for (std::list::const_iterator it = j->DataStaging.InputFiles.begin(); it != j->DataStaging.InputFiles.end(); ++it) { if (!it->Sources.empty()) { req.NewChild("a-rex:FileURL") = it->Sources.front().fullstr(); } } } } bool DataBrokerPlugin::operator()(const ExecutionTarget& lhs, const ExecutionTarget& rhs) const { std::map::const_iterator itLHS = CacheMappingTable.find(lhs.ComputingEndpoint->URLString); std::map::const_iterator itRHS = CacheMappingTable.find(rhs.ComputingEndpoint->URLString); // itLHS == CacheMappingTable.end() -> false, // itRHS == CacheMappingTable.end() -> true, // otherwise - itLHS->second > itRHS->second. return itLHS != CacheMappingTable.end() && (itRHS == CacheMappingTable.end() || itLHS->second > itRHS->second); } bool DataBrokerPlugin::match(const ExecutionTarget& et) const { if(!BrokerPlugin::match(et)) return false; // Remove targets which are not A-REX (>= ARC-1). if (et.ComputingEndpoint->Implementation < Software("ARC", "1")) { return false; } if (!request) { return false; } std::map::iterator it = CacheMappingTable.insert(std::pair(et.ComputingEndpoint->URLString, 0)).first; PayloadSOAP *response = NULL; URL url(et.ComputingEndpoint->URLString); ClientSOAP client(cfg, url, uc.Timeout()); if (!client.process(request, &response)) { return true; } if (response == NULL) { return true; } for (XMLNode ExistCount = (*response)["CacheCheckResponse"]["CacheCheckResult"]["Result"]; (bool)ExistCount; ++ExistCount) { it->second += stringto((std::string)ExistCount["FileSize"]); } delete response; return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/Broker/PaxHeaders.7502/README0000644000000000000000000000012212064067675021371 xustar000000000000000027 mtime=1355837373.860742 25 atime=1513200574.3767 30 ctime=1513200660.277750883 nordugrid-arc-5.4.2/src/hed/acc/Broker/README0000644000175000002070000000164412064067675021445 0ustar00mockbuildmock00000000000000Broker plugins (ACC) for performing resource brokering Specialized brokers: o Random The ExecutionTarget sorting is based on randomization, the PossibleTargets vector will be shuffled. o FastestQueueBroker The sorting method is based on the shortest queue, where the number waiting jobs of is the lowest. o Benchmark The sorting method is based on the fastest CPU, we use the CINT2000 (Integer Component of SPEC CPU2000) benchmark for this purpose , here are more information about this benchmark type: http://www.spec.org/cpu2000/CINT2000/ o Data The main idea was that the jobs should submit to that cluster where the data is. The sorting method is based on the A-REX file cache checking. There is a CacheCheck interface inside the A-REX which can be used to query whether files in question are present or not in the cache directory. See src/hed/libs/compute/Submitter.cpp for implementation examples. nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/SER0000644000000000000000000000013213214316024017621 xustar000000000000000030 mtime=1513200660.453753035 30 atime=1513200668.721854157 30 ctime=1513200660.453753035 nordugrid-arc-5.4.2/src/hed/acc/SER/0000755000175000002070000000000013214316024017744 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/acc/SER/PaxHeaders.7502/Makefile.am0000644000000000000000000000012613153452741021746 xustar000000000000000027 mtime=1504597473.972032 30 atime=1513200594.548946994 29 ctime=1513200660.44675295 nordugrid-arc-5.4.2/src/hed/acc/SER/Makefile.am0000644000175000002070000000162313153452741022013 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccSER.la if LDNS_ENABLED ARCHERY_SOURCES = ServiceEndpointRetrieverPluginARCHERY.cpp ServiceEndpointRetrieverPluginARCHERY.h else ARCHERY_SOURCES = endif libaccSER_la_SOURCES = DescriptorsSER.cpp \ ServiceEndpointRetrieverPluginEMIR.cpp ServiceEndpointRetrieverPluginEMIR.h \ $(ARCHERY_SOURCES) libaccSER_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) $(LDNS_CFLAGS) libaccSER_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(LDNS_LIBS) libaccSER_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/acc/SER/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722021747 xustar000000000000000030 mtime=1513200594.596947581 30 atime=1513200648.929612091 30 ctime=1513200660.448752974 nordugrid-arc-5.4.2/src/hed/acc/SER/Makefile.in0000644000175000002070000007151613214315722022027 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/acc/SER DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libaccSER_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am__libaccSER_la_SOURCES_DIST = DescriptorsSER.cpp \ ServiceEndpointRetrieverPluginEMIR.cpp \ ServiceEndpointRetrieverPluginEMIR.h \ ServiceEndpointRetrieverPluginARCHERY.cpp \ ServiceEndpointRetrieverPluginARCHERY.h @LDNS_ENABLED_TRUE@am__objects_1 = libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.lo am_libaccSER_la_OBJECTS = libaccSER_la-DescriptorsSER.lo \ libaccSER_la-ServiceEndpointRetrieverPluginEMIR.lo \ $(am__objects_1) libaccSER_la_OBJECTS = $(am_libaccSER_la_OBJECTS) libaccSER_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libaccSER_la_CXXFLAGS) \ $(CXXFLAGS) $(libaccSER_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libaccSER_la_SOURCES) DIST_SOURCES = $(am__libaccSER_la_SOURCES_DIST) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libaccSER.la @LDNS_ENABLED_FALSE@ARCHERY_SOURCES = @LDNS_ENABLED_TRUE@ARCHERY_SOURCES = ServiceEndpointRetrieverPluginARCHERY.cpp ServiceEndpointRetrieverPluginARCHERY.h libaccSER_la_SOURCES = DescriptorsSER.cpp \ ServiceEndpointRetrieverPluginEMIR.cpp ServiceEndpointRetrieverPluginEMIR.h \ $(ARCHERY_SOURCES) libaccSER_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) $(LDNS_CFLAGS) libaccSER_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(LDNS_LIBS) libaccSER_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/acc/SER/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/acc/SER/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libaccSER.la: $(libaccSER_la_OBJECTS) $(libaccSER_la_DEPENDENCIES) $(libaccSER_la_LINK) -rpath $(pkglibdir) $(libaccSER_la_OBJECTS) $(libaccSER_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccSER_la-DescriptorsSER.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccSER_la-ServiceEndpointRetrieverPluginEMIR.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libaccSER_la-DescriptorsSER.lo: DescriptorsSER.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccSER_la_CXXFLAGS) $(CXXFLAGS) -MT libaccSER_la-DescriptorsSER.lo -MD -MP -MF $(DEPDIR)/libaccSER_la-DescriptorsSER.Tpo -c -o libaccSER_la-DescriptorsSER.lo `test -f 'DescriptorsSER.cpp' || echo '$(srcdir)/'`DescriptorsSER.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccSER_la-DescriptorsSER.Tpo $(DEPDIR)/libaccSER_la-DescriptorsSER.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DescriptorsSER.cpp' object='libaccSER_la-DescriptorsSER.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccSER_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccSER_la-DescriptorsSER.lo `test -f 'DescriptorsSER.cpp' || echo '$(srcdir)/'`DescriptorsSER.cpp libaccSER_la-ServiceEndpointRetrieverPluginEMIR.lo: ServiceEndpointRetrieverPluginEMIR.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccSER_la_CXXFLAGS) $(CXXFLAGS) -MT libaccSER_la-ServiceEndpointRetrieverPluginEMIR.lo -MD -MP -MF $(DEPDIR)/libaccSER_la-ServiceEndpointRetrieverPluginEMIR.Tpo -c -o libaccSER_la-ServiceEndpointRetrieverPluginEMIR.lo `test -f 'ServiceEndpointRetrieverPluginEMIR.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginEMIR.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccSER_la-ServiceEndpointRetrieverPluginEMIR.Tpo $(DEPDIR)/libaccSER_la-ServiceEndpointRetrieverPluginEMIR.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ServiceEndpointRetrieverPluginEMIR.cpp' object='libaccSER_la-ServiceEndpointRetrieverPluginEMIR.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccSER_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccSER_la-ServiceEndpointRetrieverPluginEMIR.lo `test -f 'ServiceEndpointRetrieverPluginEMIR.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginEMIR.cpp libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.lo: ServiceEndpointRetrieverPluginARCHERY.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccSER_la_CXXFLAGS) $(CXXFLAGS) -MT libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.lo -MD -MP -MF $(DEPDIR)/libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.Tpo -c -o libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.lo `test -f 'ServiceEndpointRetrieverPluginARCHERY.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginARCHERY.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.Tpo $(DEPDIR)/libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ServiceEndpointRetrieverPluginARCHERY.cpp' object='libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccSER_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccSER_la-ServiceEndpointRetrieverPluginARCHERY.lo `test -f 'ServiceEndpointRetrieverPluginARCHERY.cpp' || echo '$(srcdir)/'`ServiceEndpointRetrieverPluginARCHERY.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/acc/SER/PaxHeaders.7502/DescriptorsSER.cpp0000644000000000000000000000012413153452741023267 xustar000000000000000027 mtime=1504597473.972032 27 atime=1513200574.249698 30 ctime=1513200660.449752986 nordugrid-arc-5.4.2/src/hed/acc/SER/DescriptorsSER.cpp0000644000175000002070000000127013153452741023334 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include "ServiceEndpointRetrieverPluginEMIR.h" #ifdef HAVE_LDNS #include "ServiceEndpointRetrieverPluginARCHERY.h" #endif extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "EMIR", "HED:ServiceEndpointRetrieverPlugin", "EMIR registry", 0, &Arc::ServiceEndpointRetrieverPluginEMIR::Instance }, #ifdef HAVE_LDNS { "ARCHERY", "HED:ServiceEndpointRetrieverPlugin", "ARC Hierarchical Endpoints DNS-based Registry", 0, &Arc::ServiceEndpointRetrieverPluginARCHERY::Instance }, #endif { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/acc/SER/PaxHeaders.7502/ServiceEndpointRetrieverPluginARCHERY.h0000644000000000000000000000012413153452741027247 xustar000000000000000027 mtime=1504597473.972032 27 atime=1513200574.249698 30 ctime=1513200660.453753035 nordugrid-arc-5.4.2/src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.h0000644000175000002070000000217613153452741027322 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SERVICEENDPOINTRETRIEVEREPLUGINARCHERY_H__ #define __ARC_SERVICEENDPOINTRETRIEVEREPLUGINARCHERY_H__ #include #include #include namespace Arc { class Logger; class ServiceEndpointRetrieverPluginARCHERY : public ServiceEndpointRetrieverPlugin { public: ServiceEndpointRetrieverPluginARCHERY(PluginArgument* parg): ServiceEndpointRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.archery"); } virtual ~ServiceEndpointRetrieverPluginARCHERY() {} static Plugin* Instance(PluginArgument* arg) { return new ServiceEndpointRetrieverPluginARCHERY(arg); } virtual EndpointQueryingStatus Query(const UserConfig& uc, const Endpoint& rEndpoint, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_SERVICEENDPOINTRETRIEVEREPLUGINARCHERY_H__ nordugrid-arc-5.4.2/src/hed/acc/SER/PaxHeaders.7502/ServiceEndpointRetrieverPluginARCHERY.cpp0000644000000000000000000000012413153452741027602 xustar000000000000000027 mtime=1504597473.972032 27 atime=1513200574.246698 30 ctime=1513200660.452753023 nordugrid-arc-5.4.2/src/hed/acc/SER/ServiceEndpointRetrieverPluginARCHERY.cpp0000644000175000002070000001414313153452741027652 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "ServiceEndpointRetrieverPluginARCHERY.h" namespace Arc { Logger ServiceEndpointRetrieverPluginARCHERY::logger(Logger::getRootLogger(), "ServiceEndpointRetrieverPlugin.ARCHERY"); bool ServiceEndpointRetrieverPluginARCHERY::isEndpointNotSupported(const Endpoint& endpoint) const { // dns:// prefixes URL is supported if ( endpoint.URLString.substr(0,6) == "dns://" ) { return false; } // as well as domain names without any URL prefixes const std::string::size_type pos = endpoint.URLString.find("://"); return pos != std::string::npos; } EndpointQueryingStatus ServiceEndpointRetrieverPluginARCHERY::Query(const UserConfig& uc, const Endpoint& rEndpoint, std::list& seList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::STARTED); if (isEndpointNotSupported(rEndpoint)) { return s; } ldns_rdf *domain; ldns_resolver *res; ldns_pkt *pkt; ldns_status dnsstatus; std::string domain_url; if ( rEndpoint.URLString.substr(0,6) == "dns://" ) { // strip dns:// prefix if specified domain_url = rEndpoint.URLString.substr(6); } else { // or add _archery selector if domain name is provided as is domain_url = "_archery." + rEndpoint.URLString; } // initialize DNS query domain = ldns_dname_new_frm_str(domain_url.c_str()); if (!domain) { logger.msg(DEBUG,"Cannot initialize ARCHERY domain name for query"); return EndpointQueryingStatus::FAILED; } // create a new resolver from /etc/resolv.conf dnsstatus = ldns_resolver_new_frm_file(&res, NULL); if (dnsstatus != LDNS_STATUS_OK) { logger.msg(DEBUG,"Cannot create resolver from /etc/resolv.conf"); return EndpointQueryingStatus::FAILED; } // query TXT records pkt = ldns_resolver_query(res,domain,LDNS_RR_TYPE_TXT,LDNS_RR_CLASS_IN,LDNS_RD); ldns_rdf_deep_free(domain); if (!pkt) { logger.msg(DEBUG,"Cannot query service endpoint TXT records from DNS"); return EndpointQueryingStatus::FAILED; } // parse DNS response ldns_rr_list *txt; ldns_rr *txtrr; ldns_rdf *txtdata; txt = ldns_pkt_rr_list_by_type(pkt,LDNS_RR_TYPE_TXT,LDNS_SECTION_ANSWER); if (!txt) { logger.msg(DEBUG,"Cannot parse service endpoint TXT records."); return EndpointQueryingStatus::FAILED; } // get list of resource records and loop over ldns_rr_list_sort(txt); txtrr = ldns_rr_list_pop_rr(txt); while (txtrr != NULL) { txtdata = ldns_rr_rdf(txtrr,0); // get service endpoint resource record std::string se_str(ldns_rdf2str(txtdata)); // and fetch the next one for next iteration txtrr = ldns_rr_list_pop_rr(txt); // process record // start with trimming whitespaces and qoutes const char* trimchars = " \"\t\n"; se_str.erase(se_str.find_last_not_of(trimchars) + 1); se_str.erase(0, se_str.find_first_not_of(trimchars)); // define variables to hold service endpoint properties std::string se_url; std::vector se_types; int se_status = 1; //missing status treated as 'active' // fetch key=value pairs from endpoint record std::size_t space_pos = 0, space_found = 0; while (space_found != std::string::npos) { space_found = se_str.find_first_of(" ", space_pos); std::string se_kv_str = se_str.substr(space_pos, space_found - space_pos); space_pos = space_found + 1; if (se_kv_str.empty()) { continue; } // check key=value part std::string keyeq = se_kv_str.substr(0,2); if ( keyeq == "u=" ) { se_url = se_kv_str.substr(2); } else if ( keyeq == "t=" ) { se_types.push_back(se_kv_str.substr(2)); } else if ( keyeq == "s=" ) { if ( se_kv_str.substr(2) != "1" ) { se_status = 0; } } else { logger.msg(WARNING,"Wrong service record field \"%s\" found in the \"%s\"", se_kv_str, se_str ); } } // check parsed values if ( se_url.empty() ) { logger.msg(WARNING,"Malformed ARCHERY record found (endpoint url is not defined): %s", se_str); continue; } if ( se_status ) { if ( se_types.empty() ) { logger.msg(WARNING,"Malformed ARCHERY record found (endpoint type is not defined): %s", se_str); continue; } else { for(std::vector::iterator it = se_types.begin(); it != se_types.end(); ++it) { logger.msg(INFO,"Found service endpoint %s (type %s)", se_url, *it); // register endpoint Endpoint se(se_url); // with corresponding capability if ( *it == "org.nordugrid.archery" || *it == "org.nordugrid.ldapegiis" || *it == "org.nordugrid.emir" || *it == "org.nordugrid.bdii" ) { se.Capability.insert("information.discovery.registry"); se.InterfaceName = supportedInterfaces.empty()?std::string(""):supportedInterfaces.front(); } else { se.Capability.insert("information.discovery.resource"); se.InterfaceName = *it; } seList.push_back(se); } } } else { logger.msg(INFO,"Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping.", se_url); } } ldns_rr_list_deep_free(txt); ldns_pkt_free(pkt); ldns_resolver_deep_free(res); s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/SER/PaxHeaders.7502/ServiceEndpointRetrieverPluginEMIR.cpp0000644000000000000000000000012412300714551027232 xustar000000000000000027 mtime=1392744809.100789 27 atime=1513200574.249698 30 ctime=1513200660.450752999 nordugrid-arc-5.4.2/src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.cpp0000644000175000002070000000765212300714551027311 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "ServiceEndpointRetrieverPluginEMIR.h" namespace Arc { Logger ServiceEndpointRetrieverPluginEMIR::logger(Logger::getRootLogger(), "ServiceEndpointRetrieverPlugin.EMIR"); bool ServiceEndpointRetrieverPluginEMIR::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return ((proto != "http") && (proto != "https")); } return false; } EndpointQueryingStatus ServiceEndpointRetrieverPluginEMIR::Query(const UserConfig& uc, const Endpoint& rEndpoint, std::list& seList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::STARTED); int currentSkip = 0; MCCConfig cfg; uc.ApplyToConfig(cfg); if (isEndpointNotSupported(rEndpoint)) { return EndpointQueryingStatus::FAILED; } URL url((rEndpoint.URLString.find("://") == std::string::npos ? "http://" : "") + rEndpoint.URLString, false, 9126, "/services/query.xml"); if (!url) { s = EndpointQueryingStatus::FAILED; return s; } // Limitation: Max number of services the below loop will fetch, according to parameters: // ServiceEndpointRetrieverPluginEMIR::maxEntries * 100 = 500.000 (currently) for (int iAvoidInfiniteLoop = 0; iAvoidInfiniteLoop < 100; ++iAvoidInfiniteLoop) { if (maxEntries > 0) url.AddHTTPOption("limit", tostring(maxEntries), true); if (currentSkip > 0) url.AddHTTPOption("skip", tostring(currentSkip), true); // increment the starting point of the fetched DB currentSkip += maxEntries; ClientHTTP httpclient(cfg, url); httpclient.RelativeURI(true); // Set Accept HTTP header tag, in order not to receive JSON output. std::multimap acceptHeaderTag; acceptHeaderTag.insert(std::make_pair("Accept", "text/xml")); PayloadRaw http_request; PayloadRawInterface *http_response = NULL; HTTPClientInfo http_info; // send query message to the EMIRegistry MCC_Status status = httpclient.process("GET", acceptHeaderTag, &http_request, &http_info, &http_response); if (http_info.code != 200 || !status) { s = EndpointQueryingStatus::FAILED; return s; } XMLNode resp_xml(http_response->Content()); XMLNodeList services = resp_xml.Path("Service"); if (services.empty()) { delete http_response; break; // No more services - exit from loop. } for (XMLNodeList::const_iterator it = services.begin(); it != services.end(); ++it) { if (!(*it)["Endpoint"] || !(*it)["Endpoint"]["URL"]) { continue; } Endpoint se((std::string)(*it)["Endpoint"]["URL"]); for (XMLNode n = (*it)["Endpoint"]["Capability"]; n; ++n) { se.Capability.insert((std::string)n); } se.InterfaceName = lower((std::string)(*it)["Endpoint"]["InterfaceName"]); se.ServiceID = (std::string)(*it)["ID"]; seList.push_back(se); } logger.msg(VERBOSE, "Found %u service endpoints from the index service at %s", resp_xml.Size(), url.str()); if (http_response != NULL) { delete http_response; } } s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/acc/SER/PaxHeaders.7502/ServiceEndpointRetrieverPluginEMIR.h0000644000000000000000000000012412045235201026673 xustar000000000000000027 mtime=1351957121.246634 27 atime=1513200574.249698 30 ctime=1513200660.451753011 nordugrid-arc-5.4.2/src/hed/acc/SER/ServiceEndpointRetrieverPluginEMIR.h0000644000175000002070000000223712045235201026744 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SERVICEENDPOINTRETRIEVEREPLUGINEMIR_H__ #define __ARC_SERVICEENDPOINTRETRIEVEREPLUGINEMIR_H__ #include #include #include namespace Arc { class EndpointQueryingStatus; class Logger; class ServiceEndpointRetrieverPluginEMIR : public ServiceEndpointRetrieverPlugin { public: ServiceEndpointRetrieverPluginEMIR(PluginArgument* parg): ServiceEndpointRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.emir"); maxEntries = 5000; } ~ServiceEndpointRetrieverPluginEMIR() {} static Plugin* Instance(PluginArgument* arg) { return new ServiceEndpointRetrieverPluginEMIR(arg); } virtual EndpointQueryingStatus Query(const UserConfig& uc, const Endpoint& rEndpoint, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; int maxEntries; }; } // namespace Arc #endif // __ARC_SERVICEENDPOINTRETRIEVEREPLUGINEMIR_H__ nordugrid-arc-5.4.2/src/hed/acc/PaxHeaders.7502/README0000644000000000000000000000012412064073334020134 xustar000000000000000027 mtime=1355839196.464875 27 atime=1513200574.317699 30 ctime=1513200660.070748351 nordugrid-arc-5.4.2/src/hed/acc/README0000644000175000002070000000141512064073334020202 0ustar00mockbuildmock00000000000000Folder for ARC Client Components (ACC) ACCs are the plugins used by the client to interface to different grid flavours and to perform resource brokering. One folder exists for each supported grid flavour and one folder for specialized brokers. Each grid flavour should implement specialized classes of the following base classes (located in src/hed/libs/client) o JobControllerPlugin o Submitter The plugins are loaded by appropriate "umbrella" classes such as the JobSupervisor and TargetGenerator which resolves issues about which plugins (i.e flavour) to load depending on the command in question. Specialized brokers are being loaded by the submission executable directly. See also: src/clients/compute (command line interface) src/doc/client (client documentation) nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/daemon0000644000000000000000000000013213214316025017706 xustar000000000000000030 mtime=1513200661.877770452 30 atime=1513200668.722854169 30 ctime=1513200661.877770452 nordugrid-arc-5.4.2/src/hed/daemon/0000755000175000002070000000000013214316025020031 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/win320000644000000000000000000000013213214316025020650 xustar000000000000000030 mtime=1513200661.955771406 30 atime=1513200668.722854169 30 ctime=1513200661.955771406 nordugrid-arc-5.4.2/src/hed/daemon/win32/0000755000175000002070000000000013214316025020773 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/daemon/win32/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022771 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200595.054953182 30 ctime=1513200661.952771369 nordugrid-arc-5.4.2/src/hed/daemon/win32/Makefile.am0000644000175000002070000000041612052416515023034 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libdaemon.la libdaemon_la_SOURCES = main_win32.cpp libdaemon_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdaemon_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/daemon/win32/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022776 xustar000000000000000030 mtime=1513200595.095953684 30 atime=1513200648.542607357 30 ctime=1513200661.953771381 nordugrid-arc-5.4.2/src/hed/daemon/win32/Makefile.in0000644000175000002070000005352713214315723023060 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/daemon/win32 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libdaemon_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libdaemon_la_OBJECTS = libdaemon_la-main_win32.lo libdaemon_la_OBJECTS = $(am_libdaemon_la_OBJECTS) libdaemon_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdaemon_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdaemon_la_SOURCES) DIST_SOURCES = $(libdaemon_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libdaemon.la libdaemon_la_SOURCES = main_win32.cpp libdaemon_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdaemon_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/daemon/win32/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/daemon/win32/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdaemon.la: $(libdaemon_la_OBJECTS) $(libdaemon_la_DEPENDENCIES) $(libdaemon_la_LINK) $(libdaemon_la_OBJECTS) $(libdaemon_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdaemon_la-main_win32.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdaemon_la-main_win32.lo: main_win32.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdaemon_la_CXXFLAGS) $(CXXFLAGS) -MT libdaemon_la-main_win32.lo -MD -MP -MF $(DEPDIR)/libdaemon_la-main_win32.Tpo -c -o libdaemon_la-main_win32.lo `test -f 'main_win32.cpp' || echo '$(srcdir)/'`main_win32.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdaemon_la-main_win32.Tpo $(DEPDIR)/libdaemon_la-main_win32.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='main_win32.cpp' object='libdaemon_la-main_win32.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdaemon_la_CXXFLAGS) $(CXXFLAGS) -c -o libdaemon_la-main_win32.lo `test -f 'main_win32.cpp' || echo '$(srcdir)/'`main_win32.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/daemon/win32/PaxHeaders.7502/main_win32.cpp0000644000000000000000000000012312301125744023401 xustar000000000000000027 mtime=1392815076.546455 26 atime=1513200575.17171 30 ctime=1513200661.955771406 nordugrid-arc-5.4.2/src/hed/daemon/win32/main_win32.cpp0000644000175000002070000001316212301125744023452 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "../options.h" Arc::Config config; Arc::MCCLoader *loader = NULL; Arc::Logger& logger=Arc::Logger::rootLogger; static bool req_shutdown = false; static void sig_shutdown(int) { if(req_shutdown) _exit(0); req_shutdown = true; } static void do_shutdown(void) { logger.msg(Arc::VERBOSE, "shutdown"); if(loader) delete loader; _exit(0); } static void glib_exception_handler() { std::cerr << "Glib exception thrown" << std::endl; try { throw; } catch(const Glib::Error& err) { std::cerr << "Glib exception caught: " << err.what() << std::endl; } catch(const std::exception &err) { std::cerr << "Exception caught: " << err.what() << std::endl; } catch(...) { std::cerr << "Unknown exception caught" << std::endl; } } static void merge_options_and_config(Arc::Config& cfg, Arc::ServerOptions& opt) { Arc::XMLNode srv = cfg["Server"]; if (!(bool)srv) { logger.msg(Arc::ERROR, "No server config part of config file"); return; } if (opt.pid_file != "") { if (!(bool)srv["PidFile"]) { srv.NewChild("PidFile")=opt.pid_file; } else { srv["PidFile"] = opt.pid_file; } } if (opt.foreground == true) { if (!(bool)srv["Foreground"]) { srv.NewChild("Foreground") = "true"; } else { srv["Foreground"] = "true"; } } } static bool is_true(Arc::XMLNode val) { std::string v = (std::string)val; if(v == "true") return true; if(v == "1") return true; return false; } static std::string init_logger(Arc::Config& cfg) { /* setup root logger */ Arc::LogFile* sd = NULL; Arc::XMLNode log = cfg["Server"]["Logger"]; Arc::XMLNode xlevel = log["Level"]; Arc::Logger::rootLogger.setThreshold(Arc::WARNING); for(;(bool)xlevel;++xlevel) { std::string domain = xlevel.Attribute("Domain"); Arc::LogLevel level = Arc::WARNING; if(!istring_to_level((std::string)xlevel, level)) { logger.msg(Arc::WARNING, "Unknown log level %s", (std::string)xlevel); } else { Arc::Logger::setThresholdForDomain(level,domain); } } std::string log_file = (log["File"] ? (std::string)log["File"] : Glib::build_filename(Glib::get_tmp_dir(), "arched.log")); sd = new Arc::LogFile(log_file); if((!sd) || (!(*sd))) { logger.msg(Arc::ERROR, "Failed to open log file: %s", log_file); _exit(1); } if(log["Backups"]) { int backups; if(Arc::stringto((std::string)log["Backups"], backups)) { sd->setBackups(backups); } } if(log["Maxsize"]) { int maxsize; if(Arc::stringto((std::string)log["Maxsize"], maxsize)) { sd->setMaxSize(maxsize); } } if(log["Reopen"]) { std::string reopen = (std::string)(log["Reopen"]); bool reopen_b = false; if((reopen == "true") || (reopen == "1")) reopen_b = true; sd->setReopen(reopen_b); } Arc::Logger::rootLogger.removeDestinations(); if(sd) Arc::Logger::rootLogger.addDestination(*sd); if (is_true(cfg["Server"]["Foreground"])) { logger.msg(Arc::INFO, "Start foreground"); Arc::LogStream *err = new Arc::LogStream(std::cerr); Arc::Logger::rootLogger.addDestination(*err); } return log_file; } int main(int argc, char **argv) { signal(SIGTTOU, SIG_IGN); // Set up Glib exception handler Glib::add_exception_handler(sigc::ptr_fun(&glib_exception_handler)); // Temporary stderr destination for error messages Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); /* Create options parser */ Arc::ServerOptions options; try { std::list params = options.Parse(argc, argv); if (params.empty()) { if (options.version) { std::cout << Arc::IString("%s version %s", "arched", VERSION) << std::endl; exit(0); } /* Load and parse config file */ if(!config.parse(options.xml_config_file.c_str())) { logger.msg(Arc::ERROR, "Failed to load service configuration from file %s",options.xml_config_file); exit(1); }; if(!MatchXMLName(config,"ArcConfig")) { logger.msg(Arc::ERROR, "Configuration root element is not "); exit(1); } /* overwrite config variables by cmdline options */ merge_options_and_config(config, options); std::string pid_file = (std::string)config["Server"]["PidFile"]; /* initalize logger infrastucture */ std::string root_log_file = init_logger(config); // set signal handlers signal(SIGTERM, sig_shutdown); signal(SIGINT, sig_shutdown); // bootstrap loader = new Arc::MCCLoader(config); if(!*loader) { logger.msg(Arc::ERROR, "Failed to load service side MCCs"); } else { logger.msg(Arc::INFO, "Service side MCCs are loaded"); // sleep forever for (;!req_shutdown;) { sleep(1); } } } else { logger.msg(Arc::ERROR, "Unexpected arguments supplied"); } } catch (const Glib::Error& error) { logger.msg(Arc::ERROR, error.what()); } do_shutdown(); return 0; } nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/options.h0000644000000000000000000000012411757677564021664 xustar000000000000000027 mtime=1337950068.779165 27 atime=1513200575.169709 30 ctime=1513200661.871770378 nordugrid-arc-5.4.2/src/hed/daemon/options.h0000644000175000002070000000121311757677564021726 0ustar00mockbuildmock00000000000000#ifndef __ARC_SERVER_OPTIONS_H__ #define __ARC_SERVER_OPTIONS_H__ #include namespace Arc { class ServerOptions : public OptionParser { public: ServerOptions(); /* Command line options values */ bool foreground; #ifndef WIN32 bool watchdog; #endif bool version; std::string xml_config_file; std::string ini_config_file; std::string pid_file; std::string log_file; std::string user; std::string group; bool config_dump; std::string schema_file; #ifdef WIN32 bool install; bool uninstall; #endif }; } // namespace Arc #endif // __ARC_SERVER_OPTIONS_H__ nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022027 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200594.832950467 30 ctime=1513200661.866770317 nordugrid-arc-5.4.2/src/hed/daemon/Makefile.am0000644000175000002070000000137512052416515022077 0ustar00mockbuildmock00000000000000if WIN32 PLATFORM_DIR = win32 else PLATFORM_DIR = unix endif SUBDIRS = scripts $(PLATFORM_DIR) schema sbin_PROGRAMS = arched SRC_UNIX = options.cpp options.h SRC_WIN32 = options.cpp options.h LIB_UNIX = unix/libdaemon.la LIB_WIN32 = win32/libdaemon.la if WIN32 SRC = $(SRC_WIN32) LIB = $(LIB_WIN32) else SRC = $(SRC_UNIX) LIB = $(LIB_UNIX) endif SOURCES = $(SRC) LDADD = $(LIB) arched_SOURCES = $(SOURCES) arched_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) arched_LDADD = $(LDADD) \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(EXTRA_LIBS) man_MANS = arched.8 nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/options.cpp0000644000000000000000000000012411757677564022217 xustar000000000000000027 mtime=1337950068.779165 27 atime=1513200575.164709 30 ctime=1513200661.870770366 nordugrid-arc-5.4.2/src/hed/daemon/options.cpp0000644000175000002070000000244311757677564022267 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "options.h" namespace Arc { ServerOptions::ServerOptions() : OptionParser() { foreground = false; AddOption('f', "foreground", "run daemon in foreground", foreground); #ifndef WIN32 watchdog = false; AddOption('w', "watchdog", "enable watchdog to restart process", watchdog); #endif AddOption('c', "xml-config", "full path of XML config file", "path", xml_config_file); AddOption('i', "ini-config", "full path of InI config file", "path", ini_config_file); config_dump = false; AddOption('d', "config-dump", "dump generated XML config", config_dump); AddOption('p', "pid-file", "full path of pid file", "path", pid_file); AddOption('l', "log-file", "full path of log file", "path", log_file); AddOption('u', "user", "user name", "user", user); AddOption('g', "group", "group name", "group", group); AddOption('s', "schema", "full path of XML schema file", "path", schema_file); version = false; AddOption('v', "version", "print version information", version); #ifdef WIN32 install = false; AddOption('a', "install", "install windows service", install); uninstall = false; AddOption('r', "uninstall", "uninstall windows service", uninstall); #endif } } // namespace Arc nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722022033 xustar000000000000000030 mtime=1513200594.882951078 30 atime=1513200648.478606575 30 ctime=1513200661.867770329 nordugrid-arc-5.4.2/src/hed/daemon/Makefile.in0000644000175000002070000010446513214315722022113 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ sbin_PROGRAMS = arched$(EXEEXT) subdir = src/hed/daemon DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arched.8.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arched.8 CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(man8dir)" PROGRAMS = $(sbin_PROGRAMS) am__arched_SOURCES_DIST = options.cpp options.h am__objects_1 = arched-options.$(OBJEXT) @WIN32_FALSE@am__objects_2 = $(am__objects_1) @WIN32_TRUE@am__objects_2 = $(am__objects_1) am__objects_3 = $(am__objects_2) am_arched_OBJECTS = $(am__objects_3) arched_OBJECTS = $(am_arched_OBJECTS) am__DEPENDENCIES_1 = arched_DEPENDENCIES = $(LDADD) \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) arched_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(arched_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DIST_SOURCES = $(am__arched_SOURCES_DIST) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' man8dir = $(mandir)/man8 NROFF = nroff MANS = $(man_MANS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = scripts unix win32 schema DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @WIN32_FALSE@PLATFORM_DIR = unix @WIN32_TRUE@PLATFORM_DIR = win32 SUBDIRS = scripts $(PLATFORM_DIR) schema SRC_UNIX = options.cpp options.h SRC_WIN32 = options.cpp options.h LIB_UNIX = unix/libdaemon.la LIB_WIN32 = win32/libdaemon.la @WIN32_FALSE@SRC = $(SRC_UNIX) @WIN32_TRUE@SRC = $(SRC_WIN32) @WIN32_FALSE@LIB = $(LIB_UNIX) @WIN32_TRUE@LIB = $(LIB_WIN32) SOURCES = $(SRC) LDADD = $(LIB) arched_SOURCES = $(SOURCES) arched_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) arched_LDADD = $(LDADD) \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(EXTRA_LIBS) man_MANS = arched.8 all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/daemon/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/daemon/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arched.8: $(top_builddir)/config.status $(srcdir)/arched.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-sbinPROGRAMS: $(sbin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)" @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ } \ ; done uninstall-sbinPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(sbindir)" && rm -f $$files clean-sbinPROGRAMS: @list='$(sbin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arched$(EXEEXT): $(arched_OBJECTS) $(arched_DEPENDENCIES) @rm -f arched$(EXEEXT) $(arched_LINK) $(arched_OBJECTS) $(arched_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arched-options.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< arched-options.o: options.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arched_CXXFLAGS) $(CXXFLAGS) -MT arched-options.o -MD -MP -MF $(DEPDIR)/arched-options.Tpo -c -o arched-options.o `test -f 'options.cpp' || echo '$(srcdir)/'`options.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arched-options.Tpo $(DEPDIR)/arched-options.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='options.cpp' object='arched-options.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arched_CXXFLAGS) $(CXXFLAGS) -c -o arched-options.o `test -f 'options.cpp' || echo '$(srcdir)/'`options.cpp arched-options.obj: options.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arched_CXXFLAGS) $(CXXFLAGS) -MT arched-options.obj -MD -MP -MF $(DEPDIR)/arched-options.Tpo -c -o arched-options.obj `if test -f 'options.cpp'; then $(CYGPATH_W) 'options.cpp'; else $(CYGPATH_W) '$(srcdir)/options.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arched-options.Tpo $(DEPDIR)/arched-options.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='options.cpp' object='arched-options.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arched_CXXFLAGS) $(CXXFLAGS) -c -o arched-options.obj `if test -f 'options.cpp'; then $(CYGPATH_W) 'options.cpp'; else $(CYGPATH_W) '$(srcdir)/options.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man8: $(man_MANS) @$(NORMAL_INSTALL) test -z "$(man8dir)" || $(MKDIR_P) "$(DESTDIR)$(man8dir)" @list=''; test -n "$(man8dir)" || exit 0; \ { for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ done; } uninstall-man8: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man8dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ test -z "$$files" || { \ echo " ( cd '$(DESTDIR)$(man8dir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(man8dir)" && rm -f $$files; } # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(PROGRAMS) $(MANS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(man8dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-sbinPROGRAMS \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-man install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-sbinPROGRAMS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man8 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-man uninstall-sbinPROGRAMS uninstall-man: uninstall-man8 .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-sbinPROGRAMS ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man8 install-pdf install-pdf-am install-ps \ install-ps-am install-sbinPROGRAMS install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am uninstall-man \ uninstall-man8 uninstall-sbinPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/arched.8.in0000644000000000000000000000012711757677564021747 xustar000000000000000027 mtime=1337950068.779165 30 atime=1513200652.983661673 30 ctime=1513200661.869770354 nordugrid-arc-5.4.2/src/hed/daemon/arched.8.in0000644000175000002070000000264511757677564022020 0ustar00mockbuildmock00000000000000.\" -*- nroff -*- .TH ARCHED 8 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid System Managers Manual" .SH NAME arched \- ARC Hosting Environment Daemon .SH SYNOPSIS .B arched [\fIOPTION\fR]... .SH DESCRIPTION .\" Add any additional description here .PP arched is the ARC Hosting Environment Daemon and is responsible for... .SH OPTIONS .TP \fB\-f\fR, \fB\-\-foreground\fR run daemon in foreground .TP \fB\-w\fR, \fB\-\-watchdog\fR enables watchdog which restarts stuck and failed arched (it works only in background mode) .TP \fB\-c\fR, \fB\-\-xml-config\fR full path of XML config file .TP \fB\-i\fR, \fB\-\-ini-config\fR full path of INI config file .TP \fB\-d\fR, \fB\-\-config-dump\fR dump XML config generated from INI one .TP \fB\-p\fR, \fB\-\-pid-file\fR full path of pid file .TP \fB\-u\fR, \fB\-\-user\fR user name to switch after starting .TP \fB\-g\fR, \fB\-\-group\fR group name to switch after starting .TP \fB\-s\fR, \fB\-\-schema\fR full path of XML schema file to be used for validating configuration .TP \fB\-v\fR, \fB\-\-version\fR output version information and exit .TP \fB\-a\fR, \fB\-\-install\fR install windows service (only works on windows) .TP \fB\-r\fR, \fB\-\-uninstall\fR uninstall windows service (only works on windows) .PP .SH REPORTING BUGS Report bugs to http://bugzilla.nordugrid.org/ .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH FILES .BR /etc/arc.conf .SH AUTHOR Aleksandr Konstantinov nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/unix0000644000000000000000000000013213214316025020671 xustar000000000000000030 mtime=1513200661.929771087 30 atime=1513200668.722854169 30 ctime=1513200661.929771087 nordugrid-arc-5.4.2/src/hed/daemon/unix/0000755000175000002070000000000013214316025021014 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/daemon/unix/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023012 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200594.994952449 30 ctime=1513200661.924771026 nordugrid-arc-5.4.2/src/hed/daemon/unix/Makefile.am0000644000175000002070000000044112052416515023053 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libdaemon.la libdaemon_la_SOURCES = daemon.cpp main_unix.cpp daemon.h libdaemon_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdaemon_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/daemon/unix/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723023017 xustar000000000000000030 mtime=1513200595.038952987 30 atime=1513200648.525607149 30 ctime=1513200661.925771039 nordugrid-arc-5.4.2/src/hed/daemon/unix/Makefile.in0000644000175000002070000005655113214315723023101 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/daemon/unix DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libdaemon_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libdaemon_la_OBJECTS = libdaemon_la-daemon.lo \ libdaemon_la-main_unix.lo libdaemon_la_OBJECTS = $(am_libdaemon_la_OBJECTS) libdaemon_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdaemon_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdaemon_la_SOURCES) DIST_SOURCES = $(libdaemon_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libdaemon.la libdaemon_la_SOURCES = daemon.cpp main_unix.cpp daemon.h libdaemon_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdaemon_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/daemon/unix/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/daemon/unix/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdaemon.la: $(libdaemon_la_OBJECTS) $(libdaemon_la_DEPENDENCIES) $(libdaemon_la_LINK) $(libdaemon_la_OBJECTS) $(libdaemon_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdaemon_la-daemon.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdaemon_la-main_unix.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdaemon_la-daemon.lo: daemon.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdaemon_la_CXXFLAGS) $(CXXFLAGS) -MT libdaemon_la-daemon.lo -MD -MP -MF $(DEPDIR)/libdaemon_la-daemon.Tpo -c -o libdaemon_la-daemon.lo `test -f 'daemon.cpp' || echo '$(srcdir)/'`daemon.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdaemon_la-daemon.Tpo $(DEPDIR)/libdaemon_la-daemon.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='daemon.cpp' object='libdaemon_la-daemon.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdaemon_la_CXXFLAGS) $(CXXFLAGS) -c -o libdaemon_la-daemon.lo `test -f 'daemon.cpp' || echo '$(srcdir)/'`daemon.cpp libdaemon_la-main_unix.lo: main_unix.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdaemon_la_CXXFLAGS) $(CXXFLAGS) -MT libdaemon_la-main_unix.lo -MD -MP -MF $(DEPDIR)/libdaemon_la-main_unix.Tpo -c -o libdaemon_la-main_unix.lo `test -f 'main_unix.cpp' || echo '$(srcdir)/'`main_unix.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdaemon_la-main_unix.Tpo $(DEPDIR)/libdaemon_la-main_unix.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='main_unix.cpp' object='libdaemon_la-main_unix.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdaemon_la_CXXFLAGS) $(CXXFLAGS) -c -o libdaemon_la-main_unix.lo `test -f 'main_unix.cpp' || echo '$(srcdir)/'`main_unix.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/daemon/unix/PaxHeaders.7502/daemon.cpp0000644000000000000000000000012213107553365022727 xustar000000000000000026 mtime=1495193333.63232 26 atime=1513200575.17471 30 ctime=1513200661.927771063 nordugrid-arc-5.4.2/src/hed/daemon/unix/daemon.cpp0000644000175000002070000002041413107553365022777 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_FILE_H #include #endif /* HAVE_SYS_FILE_H */ #include #include #include "daemon.h" namespace Arc { Logger Daemon::logger(Logger::rootLogger, "Daemon"); static void init_child(const std::string& log_file) { /* clear inherited umasks */ ::umask(0022); /* * Become a session leader: setsid must succeed because child is * guaranteed not to be a process group leader (it belongs to the * process group of the parent.) * The goal is to have no controlling terminal. * As we now don't have a controlling terminal we will not receive * tty-related signals - no need to ignore them. */ setsid(); /* redirect standard input to /dev/null */ if (std::freopen("/dev/null", "r", stdin) == NULL) fclose(stdin); if(!log_file.empty()) { /* forward stdout and stderr to log file */ if (std::freopen(log_file.c_str(), "a", stdout) == NULL) fclose(stdout); if (std::freopen(log_file.c_str(), "a", stderr) == NULL) fclose(stderr); } else { if (std::freopen("/dev/null", "a", stdout) == NULL) fclose(stdout); if (std::freopen("/dev/null", "a", stderr) == NULL) fclose(stderr); } } static void init_parent(pid_t pid,const std::string& pid_file) { if(!pid_file.empty()) { /* write pid to pid file */ std::fstream pf(pid_file.c_str(), std::fstream::out); pf << pid << std::endl; pf.close(); } } Daemon::Daemon(const std::string& pid_file_, const std::string& log_file_, bool watchdog, void (*watchdog_callback)(Daemon*)) : pid_file(pid_file_),log_file(log_file_),watchdog_pid(0),watchdog_cb(watchdog_callback) { pid_t pid = ::fork(); switch(pid) { case -1: // parent fork error logger.msg(ERROR, "Daemonization fork failed: %s", StrError(errno)); exit(1); case 0: { // child while(true) { // stay in loop waiting for watchdog alarm /* And another process is left for watchdoging */ /* Watchdog need to be initialized before fork to make sure it is shared */ WatchdogListener wdl; if(watchdog) { logger.msg(WARNING, "Watchdog (re)starting application"); pid = ::fork(); } switch(pid) { case -1: // parent fork error logger.msg(ERROR, "Watchdog fork failed: %s", StrError(errno)); exit(1); case 0: // real child if(watchdog) watchdog_pid = ::getppid(); init_child(log_file); break; default: // watchdog logger.msg(WARNING, "Watchdog starting monitoring"); init_parent(pid,pid_file); bool error; int status = 0; pid_t rpid = 0; for(;;) { if(!wdl.Listen(30,error)) { if(error) _exit(1); // watchdog gone // check if child is still there rpid = ::waitpid(pid,&status,WNOHANG); if(rpid != 0) break; } else { // watchdog timeout - but check child too rpid = ::waitpid(pid,&status,WNOHANG); break; } } if(watchdog_cb) { // Refresh connection to log files logreopen(); (*watchdog_cb)(this); } /* check if child already exited */ if(rpid == pid) { /* child exited */ if(WIFSIGNALED(status)) { logger.msg(WARNING, "Watchdog detected application exit due to signal %u", WTERMSIG(status)); } else if(WIFEXITED(status)) { logger.msg(WARNING, "Watchdog detected application exited with code %u", WEXITSTATUS(status)); } else { logger.msg(WARNING, "Watchdog detected application exit"); } if(WIFSIGNALED(status) && ((WTERMSIG(status) == SIGSEGV) || (WTERMSIG(status) == SIGFPE) || (WTERMSIG(status) == SIGABRT) || (WTERMSIG(status) == SIGILL))) { } else { /* child either exited itself or was asked to */ logger.msg(WARNING, "Watchdog exiting because application was purposely killed or exited itself"); _exit(1); } } else if((rpid < 0) && (errno == EINTR)) { // expected error - continue waiting } else { // Child not exited and watchdog timeouted or unexpected error - kill child process logger.msg(ERROR, "Watchdog detected application timeout or error - killing process"); // TODO: more sophisticated killing //sighandler_t old_sigterm = ::signal(SIGTERM,SIG_IGN); sig_t old_sigterm = ::signal(SIGTERM,SIG_IGN); int patience = 600; // how long can we wait? Maybe configure it. ::kill(pid,SIGTERM); while((rpid = ::waitpid(pid,&status,WNOHANG)) == 0) { if(patience-- < 0) break; sleep(1); } if(rpid != pid) { logger.msg(WARNING, "Watchdog failed to wait till application exited - sending KILL"); // kill hardly if not exited yet or error was detected ::kill(pid,SIGKILL); // clean zomby patience = 300; // 5 minutes should be enough while(patience > 0) { --patience; rpid = ::waitpid(pid,&status,WNOHANG); if(rpid == pid) break; sleep(1); } if(rpid != pid) { logger.msg(WARNING, "Watchdog failed to kill application - giving up and exiting"); _exit(1); } } ::signal(SIGTERM,old_sigterm); } break; // go in loop and do another fork } if(pid == 0) break; // leave watchdog loop because it is child now } }; break; default: // original parent if(!watchdog) init_parent(pid,pid_file); /* succesful exit from parent */ _exit(0); } } Daemon::~Daemon() { // Remove pid file unlink(pid_file.c_str()); Daemon::logger.msg(INFO, "Shutdown daemon"); } void Daemon::logreopen(void) { if(!log_file.empty()) { if (std::freopen(log_file.c_str(), "a", stdout) == NULL) fclose(stdout); if (std::freopen(log_file.c_str(), "a", stderr) == NULL) fclose(stderr); } } void Daemon::shutdown(void) { if(watchdog_pid) kill(watchdog_pid,SIGTERM); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/daemon/unix/PaxHeaders.7502/daemon.h0000644000000000000000000000012213107553365022374 xustar000000000000000026 mtime=1495193333.63232 26 atime=1513200575.17571 30 ctime=1513200661.929771087 nordugrid-arc-5.4.2/src/hed/daemon/unix/daemon.h0000644000175000002070000000167213107553365022451 0ustar00mockbuildmock00000000000000#ifndef __ARC_DAEMON_H__ #define __ARC_DAEMON_H__ #include #include namespace Arc { class Daemon { public: Daemon():watchdog_pid(0) {}; /** Daemonize application and optionally start watchdog. \param pid_file_ path to file to store PID of main process \param log_file_ path to file to direct stdout/stderr of main process \param watchdog if watchdog must be initialized \param watchdog_callback callback to be called when watchdog detects main application failure */ Daemon(const std::string &pid_file_, const std::string &log_file_, bool watchdog = false, void (*watchdog_callback)(Daemon*) = NULL); ~Daemon(); void logreopen(void); void shutdown(void); private: const std::string pid_file; const std::string log_file; static Logger logger; unsigned int watchdog_pid; void (*watchdog_cb)(Daemon*); }; } // namespace Arc #endif // __ARC_DAEMON_H__ nordugrid-arc-5.4.2/src/hed/daemon/unix/PaxHeaders.7502/main_unix.cpp0000644000000000000000000000012313124220410023431 xustar000000000000000027 mtime=1498489096.378221 26 atime=1513200575.17571 30 ctime=1513200661.928771075 nordugrid-arc-5.4.2/src/hed/daemon/unix/main_unix.cpp0000644000175000002070000003365113124220410023507 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "daemon.h" #include "../options.h" static Arc::Daemon *main_daemon = NULL; static Arc::Config config; static Arc::MCCLoader *loader = NULL; static Arc::Logger& logger = Arc::Logger::rootLogger; static int exit_code = 0; static bool req_shutdown = false; static void sig_shutdown(int) { if(req_shutdown) _exit(exit_code); req_shutdown = true; } static void do_shutdown(void) { if(main_daemon) main_daemon->shutdown(); logger.msg(Arc::VERBOSE, "shutdown"); if(loader) delete loader; if(main_daemon) delete main_daemon; logger.msg(Arc::DEBUG, "exit"); _exit(exit_code); } static std::list sighup_dests; static void sighup_handler(int) { int old_errno = errno; if(main_daemon) main_daemon->logreopen(); for (std::list::iterator dest = sighup_dests.begin(); dest != sighup_dests.end(); ++dest) { if(*dest) { (*dest)->setReopen(true); (*dest)->setReopen(false); } } errno = old_errno; } static void daemon_kick(Arc::Daemon*) { // Reopen log file(s) which may have been archived by now. sighup_handler(SIGHUP); } static void glib_exception_handler() { std::cerr << "Glib exception thrown" << std::endl; try { throw; } catch(const Glib::Error& err) { std::cerr << "Glib exception caught: " << err.what() << std::endl; } catch(const std::exception &err) { std::cerr << "Exception caught: " << err.what() << std::endl; } catch(...) { std::cerr << "Unknown exception caught" << std::endl; } } static void merge_options_and_config(Arc::Config& cfg, Arc::ServerOptions& opt) { Arc::XMLNode srv = cfg["Server"]; if (!(bool)srv) { logger.msg(Arc::ERROR, "No server config part of config file"); return; } if (opt.pid_file != "") { if (!(bool)srv["PidFile"]) { srv.NewChild("PidFile")=opt.pid_file; } else { srv["PidFile"] = opt.pid_file; } } if (opt.foreground == true) { if (!(bool)srv["Foreground"]) { srv.NewChild("Foreground")="true"; } else { srv["Foreground"]="true"; } } if (opt.watchdog == true) { if (!(bool)srv["Watchdog"]) { srv.NewChild("Watchdog")="true"; } else { srv["Watchdog"]="true"; } } if (opt.user != "") { if (!(bool)srv["User"]) { srv.NewChild("User") = opt.user; } else { srv["User"] = opt.user; } } if (opt.group != "") { if (!(bool)srv["Group"]) { srv.NewChild("Group") = opt.group; } else { srv["Group"] = opt.group; } } if (!opt.log_file.empty()) { if (!(bool)srv["Logger"]["File"]) { srv.NewChild("Logger").NewChild("File") = opt.log_file; } else { srv["Logger"]["File"] = opt.log_file; } } } static bool is_true(Arc::XMLNode val) { std::string v = (std::string)val; if(v == "true") return true; if(v == "1") return true; return false; } static std::string init_logger(Arc::XMLNode log, bool foreground) { // set up root logger(s) std::list dests; Arc::XMLNode xlevel = log["Level"]; Arc::Logger::rootLogger.setThreshold(Arc::WARNING); for(;(bool)xlevel;++xlevel) { std::string domain = xlevel.Attribute("Domain"); Arc::LogLevel level = Arc::WARNING; if(!istring_to_level((std::string)xlevel, level)) { logger.msg(Arc::WARNING, "Unknown log level %s", (std::string)xlevel); } else { Arc::Logger::setThresholdForDomain(level,domain); } } std::string default_log; for (Arc::XMLNode logfile = log["File"]; logfile; ++logfile) { Arc::LogFile* l = new Arc::LogFile((std::string)logfile); if((!l) || (!(*l))) { logger.msg(Arc::ERROR, "Failed to open log file: %s", (std::string)logfile); _exit(1); } dests.push_back(l); if (default_log.empty()) default_log = (std::string)logfile; } if (dests.empty()) { Arc::LogFile* l = new Arc::LogFile("/var/log/arc/arched.log"); dests.push_back(l); default_log = "/var/log/arc/arched.log"; } int backups = -1; if (log["Backups"]) Arc::stringto((std::string)log["Backups"], backups); int maxsize = -1; if (log["Maxsize"]) Arc::stringto((std::string)log["Maxsize"], maxsize); bool reopen_b = false; if (log["Reopen"]) { std::string reopen = (std::string)(log["Reopen"]); if((reopen == "true") || (reopen == "1")) reopen_b = true; } if (!reopen_b) sighup_dests = dests; Arc::Logger::rootLogger.removeDestinations(); for (std::list::iterator i = dests.begin(); i != dests.end(); ++i) { (*i)->setBackups(backups); (*i)->setMaxSize(maxsize); (*i)->setReopen(reopen_b); Arc::Logger::rootLogger.addDestination(**i); } if (foreground) { logger.msg(Arc::INFO, "Start foreground"); Arc::LogStream *err = new Arc::LogStream(std::cerr); Arc::Logger::rootLogger.addDestination(*err); } if (reopen_b) return ""; return default_log; } static uid_t get_uid(const std::string &name) { struct passwd *ent; if (name[0] == '#') { return (atoi(&(name.c_str()[1]))); } if (!(ent = getpwnam(name.c_str()))) { std::cerr << "Bad user name" << std::endl; exit(1); } return (ent->pw_uid); } static gid_t get_gid(uid_t uid) { struct passwd *ent; if (!(ent = getpwuid(uid))) { std::cerr << "Bad user id" << std::endl; exit(1); } return (ent->pw_gid); } static gid_t get_gid(const std::string &name) { struct group *ent; if (name[0] == '#') { return (atoi(&(name.c_str()[1]))); } if (!(ent = getgrnam(name.c_str()))) { std::cerr << "Bad user name" << std::endl; exit(1); } return (ent->gr_gid); } static void init_config(const Arc::ServerOptions &options) { if (!options.xml_config_file.empty()) { if (Glib::file_test(options.xml_config_file, Glib::FILE_TEST_EXISTS) == false) { logger.msg(Arc::ERROR, "XML config file %s does not exist", options.xml_config_file); exit(1); } if(!config.parse(options.xml_config_file.c_str())) { logger.msg(Arc::ERROR, "Failed to load service configuration from file %s", options.xml_config_file); exit(1); } } else if (!options.ini_config_file.empty()) { if (Glib::file_test(options.ini_config_file, Glib::FILE_TEST_EXISTS) == false) { logger.msg(Arc::ERROR, "INI config file %s does not exist", options.xml_config_file); exit(1); } Arc::IniConfig ini_parser(options.ini_config_file); if (ini_parser.Evaluate(config) == false) { logger.msg(Arc::ERROR, "Error evaluating profile"); exit(1); } if (!config) { logger.msg(Arc::ERROR, "Failed to load service configuration from file %s", options.ini_config_file); exit(1); } } else { std::string ini_config_file = "/etc/arc/service.ini"; if (Glib::file_test(ini_config_file, Glib::FILE_TEST_EXISTS) == false) { std::string xml_config_file = "/etc/arc/service.xml"; if (Glib::file_test(xml_config_file, Glib::FILE_TEST_EXISTS) == false) { } if(!config.parse(xml_config_file.c_str())) { logger.msg(Arc::ERROR, "Error loading generated configuration"); exit(1); } } else { Arc::IniConfig ini_parser(ini_config_file); if (ini_parser.Evaluate(config) == false) { logger.msg(Arc::ERROR, "Error evaulating profile"); exit(1); } } if (config.Size() == 0) { logger.msg(Arc::ERROR, "Failed to load service configuration from any default config file"); exit(1); } } } int main(int argc, char **argv) { // Ignore some signals signal(SIGTTOU,SIG_IGN); signal(SIGPIPE,SIG_IGN); signal(SIGHUP,&sighup_handler); // Set up Glib exception handler Glib::add_exception_handler(sigc::ptr_fun(&glib_exception_handler)); // Debug code for setting different logging formats char const * log_time_format = ::getenv("ARC_LOGGER_TIME_FORMAT"); if(log_time_format) { if(strcmp(log_time_format,"USER") == 0) { Arc::Time::SetFormat(Arc::UserTime); } else if(strcmp(log_time_format,"USEREXT") == 0) { Arc::Time::SetFormat(Arc::UserExtTime); } else if(strcmp(log_time_format,"ELASTIC") == 0) { Arc::Time::SetFormat(Arc::ElasticTime); } else if(strcmp(log_time_format,"MDS") == 0) { Arc::Time::SetFormat(Arc::MDSTime); } else if(strcmp(log_time_format,"ASC") == 0) { Arc::Time::SetFormat(Arc::ASCTime); } else if(strcmp(log_time_format,"ISO") == 0) { Arc::Time::SetFormat(Arc::ISOTime); } else if(strcmp(log_time_format,"UTC") == 0) { Arc::Time::SetFormat(Arc::UTCTime); } else if(strcmp(log_time_format,"RFC1123") == 0) { Arc::Time::SetFormat(Arc::RFC1123Time); } else if(strcmp(log_time_format,"EPOCH") == 0) { Arc::Time::SetFormat(Arc::EpochTime); }; }; // Temporary stderr destination for error messages Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); /* Create options parser */ Arc::ServerOptions options; if((argc>0) && (argv[0])) Arc::ArcLocation::Init(argv[0]); try { std::list params = options.Parse(argc, argv); if (params.empty()) { if (options.version) { std::cout << Arc::IString("%s version %s", "arched", VERSION) << std::endl; exit(0); } /* Load and parse config file */ init_config(options); // schema validation if (!options.schema_file.empty()) { std::string err_msg; bool ret = config.Validate(options.schema_file, err_msg); if (ret == false) { logger.msg(Arc::ERROR, "Schema validation error"); logger.msg(Arc::ERROR, err_msg); exit(1); } } // dump config if it was requested if (options.config_dump == true) { std::string str; config.GetXML(str, true); std::cout << Arc::strip(str) << std::endl; exit(0); } if(!MatchXMLName(config,"ArcConfig")) { logger.msg(Arc::ERROR, "Configuration root element is not "); exit(1); } /* overwrite config variables by cmdline options */ merge_options_and_config(config, options); std::string pid_file = (config["Server"]["PidFile"] ? (std::string)config["Server"]["PidFile"] : "/var/run/arched.pid"); std::string user = (std::string)config["Server"]["User"]; std::string group = (std::string)config["Server"]["Group"]; // switch user if (getuid() == 0) { // are we root? /* switch group if it is specified */ if (!group.empty()) { gid_t g = get_gid(group); if (setgid(g) != 0) { logger.msg(Arc::ERROR, "Cannot switch to group (%s)", group); exit(1); } } /* switch user if it is specified */ if (!user.empty()) { uid_t u = get_uid(user); if (group.empty()) { gid_t g = get_gid(u); if (setgid(g) != 0) { logger.msg(Arc::ERROR, "Cannot switch to primary group for user (%s)", user); exit(1); } } if (setuid(u) != 0) { logger.msg(Arc::ERROR, "Cannot switch to user (%s)", user); exit(1); } } } /* initalize logger infrastucture */ std::string root_log_file = init_logger(config["Server"]["Logger"], is_true(config["Server"]["Foreground"])); // demonize if the foreground option was not set if (!is_true((config)["Server"]["Foreground"])) { main_daemon = new Arc::Daemon(pid_file, root_log_file, is_true((config)["Server"]["Watchdog"]), &daemon_kick); } // set signal handlers signal(SIGTERM, sig_shutdown); signal(SIGINT, sig_shutdown); // bootstrap loader = new Arc::MCCLoader(config); if(!*loader) { logger.msg(Arc::ERROR, "Failed to load service side MCCs"); } else { logger.msg(Arc::INFO, "Service side MCCs are loaded"); // sleep forever for (;!req_shutdown;) { sleep(1); } } } else { logger.msg(Arc::ERROR, "Unexpected arguments supplied"); } } catch (const Glib::Error& error) { logger.msg(Arc::ERROR, error.what()); } if (!req_shutdown) exit_code = -1; do_shutdown(); return exit_code; } nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/scripts0000644000000000000000000000013213214316025021375 xustar000000000000000030 mtime=1513200661.904770782 30 atime=1513200668.722854169 30 ctime=1513200661.904770782 nordugrid-arc-5.4.2/src/hed/daemon/scripts/0000755000175000002070000000000013214316025021520 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/daemon/scripts/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712754431715023526 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200594.944951837 30 ctime=1513200661.899770721 nordugrid-arc-5.4.2/src/hed/daemon/scripts/Makefile.am0000644000175000002070000000043612754431715023573 0ustar00mockbuildmock00000000000000if SYSV_SCRIPTS_ENABLED HED_SCRIPT = arched else HED_SCRIPT = endif initd_SCRIPTS = $(HED_SCRIPT) if SYSTEMD_UNITS_ENABLED HED_UNIT = arched.service HED_UNIT_WRAPPER = arched-start else HED_UNIT = HED_UNIT_WRAPPER = endif units_DATA = $(HED_UNIT) pkgdata_SCRIPTS = $(HED_UNIT_WRAPPER) nordugrid-arc-5.4.2/src/hed/daemon/scripts/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315722023521 xustar000000000000000030 mtime=1513200594.979952265 29 atime=1513200648.49460677 30 ctime=1513200661.900770733 nordugrid-arc-5.4.2/src/hed/daemon/scripts/Makefile.in0000644000175000002070000005334113214315722023576 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/daemon/scripts DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/arched-start.in $(srcdir)/arched.in \ $(srcdir)/arched.service.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arched arched.service arched-start CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(unitsdir)" SCRIPTS = $(initd_SCRIPTS) $(pkgdata_SCRIPTS) SOURCES = DIST_SOURCES = DATA = $(units_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @SYSV_SCRIPTS_ENABLED_FALSE@HED_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@HED_SCRIPT = arched initd_SCRIPTS = $(HED_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@HED_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@HED_UNIT = arched.service @SYSTEMD_UNITS_ENABLED_FALSE@HED_UNIT_WRAPPER = @SYSTEMD_UNITS_ENABLED_TRUE@HED_UNIT_WRAPPER = arched-start units_DATA = $(HED_UNIT) pkgdata_SCRIPTS = $(HED_UNIT_WRAPPER) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/daemon/scripts/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/daemon/scripts/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arched: $(top_builddir)/config.status $(srcdir)/arched.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arched.service: $(top_builddir)/config.status $(srcdir)/arched.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arched-start: $(top_builddir)/config.status $(srcdir)/arched-start.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-initdSCRIPTS: $(initd_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(initddir)" || $(MKDIR_P) "$(DESTDIR)$(initddir)" @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(initddir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(initddir)" && rm -f $$files install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(pkgdatadir)" || $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkgdatadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgdatadir)" && rm -f $$files mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) test -z "$(unitsdir)" || $(MKDIR_P) "$(DESTDIR)$(unitsdir)" @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(unitsdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(unitsdir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-initdSCRIPTS install-pkgdataSCRIPTS \ install-unitsDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-unitsDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-initdSCRIPTS install-man \ install-pdf install-pdf-am install-pkgdataSCRIPTS install-ps \ install-ps-am install-strip install-unitsDATA installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-unitsDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/daemon/scripts/PaxHeaders.7502/arched.in0000644000000000000000000000012611551530225023235 xustar000000000000000027 mtime=1302769813.339513 30 atime=1513200652.999661869 29 ctime=1513200661.90377077 nordugrid-arc-5.4.2/src/hed/daemon/scripts/arched.in0000755000175000002070000000731311551530225023307 0ustar00mockbuildmock00000000000000#!/bin/bash # # chkconfig: 2345 87 13 # description: ARC HED service # processname: arched ### BEGIN INIT INFO # Provides: arched # Required-Start: $local_fs $remote_fs # Required-Stop: $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC HED service # Description: ARC's Hosting Environment Deamon ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi prog=arched RUN=yes OPTIONS="" # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/${prog} ]; then . /etc/sysconfig/${prog} elif [ -r /etc/default/${prog} ]; then . /etc/default/${prog} fi if [ "x$ARC_CONFIG" != "x" ]; then OPTIONS="-c $ARC_CONFIG" fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION if [ `id -u` = 0 ] ; then # Debian does not have /var/lock/subsys if [ -d /var/lock/subsys ]; then LOCKFILE=/var/lock/subsys/$prog else LOCKFILE=/var/lock/$prog fi if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog.pid fi else LOCKFILE=$HOME/$prog.lock if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog.pid fi fi RETVAL=0 start() { if [ "$RUN" != "yes" ] ; then echo "$prog disabled in configuration" return 0 fi echo -n "Starting $prog: " # Check if we are already running if [ -f $PID_FILE ]; then read pid < $PID_FILE if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PID_FILE" "$LOCKFILE" fi CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then log_failure_msg "Missing executable" exit 1 fi eval $CMD $OPTIONS RETVAL=$? if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } stop() { echo -n "Stopping $prog: " if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ ! -z "$pid" ] ; then kill "$pid" RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi sleep 1 kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PID_FILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } status() { if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exist" return 2 fi echo "$1 is stopped" return 3 } restart() { stop start } case "$1" in start) start ;; stop) stop ;; status) status ;; restart | force-reload) restart ;; reload) ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload|reload|condrestart|try-restart}" exit 1 ;; esac exit $? nordugrid-arc-5.4.2/src/hed/daemon/scripts/PaxHeaders.7502/arched.service.in0000644000000000000000000000012712754431715024707 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200653.015662064 30 ctime=1513200661.904770782 nordugrid-arc-5.4.2/src/hed/daemon/scripts/arched.service.in0000644000175000002070000000031012754431715024743 0ustar00mockbuildmock00000000000000[Unit] Description=ARC HED service After=local_fs.target remote_fs.target [Service] Type=forking PIDFile=/var/run/arched.pid ExecStart=@pkgdatadir@/arched-start [Install] WantedBy=multi-user.target nordugrid-arc-5.4.2/src/hed/daemon/scripts/PaxHeaders.7502/arched-start.in0000644000000000000000000000012712754431715024403 xustar000000000000000027 mtime=1471296461.229277 30 atime=1513200653.029662236 30 ctime=1513200661.901770745 nordugrid-arc-5.4.2/src/hed/daemon/scripts/arched-start.in0000755000175000002070000000171512754431715024454 0ustar00mockbuildmock00000000000000#!/bin/bash prog=arched RUN=yes OPTIONS="" # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/${prog} ]; then . /etc/sysconfig/${prog} elif [ -r /etc/default/${prog} ]; then . /etc/default/${prog} fi if [ "x$ARC_CONFIG" != "x" ]; then OPTIONS="-c $ARC_CONFIG" fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then echo "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION if [ `id -u` = 0 ] ; then if [ "x$PID_FILE" = "x" ]; then PID_FILE=/var/run/$prog.pid fi else if [ "x$PID_FILE" = "x" ]; then PID_FILE=$HOME/$prog.pid fi fi if [ "$RUN" != "yes" ] ; then echo "$prog disabled in configuration" return 0 fi CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then echo "Missing executable" exit 1 fi exec $CMD $OPTIONS nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/schema0000644000000000000000000000013213214316025021146 xustar000000000000000030 mtime=1513200661.983771748 30 atime=1513200668.722854169 30 ctime=1513200661.983771748 nordugrid-arc-5.4.2/src/hed/daemon/schema/0000755000175000002070000000000013214316025021271 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/daemon/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321023262 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200594.898951274 30 ctime=1513200661.979771699 nordugrid-arc-5.4.2/src/hed/daemon/schema/Makefile.am0000644000175000002070000000015311255700321023323 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = arcconfig.xsd base.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/daemon/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315722023273 xustar000000000000000030 mtime=1513200594.929951654 30 atime=1513200648.511606978 30 ctime=1513200661.980771711 nordugrid-arc-5.4.2/src/hed/daemon/schema/Makefile.in0000644000175000002070000004351413214315722023350 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/daemon/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = arcconfig.xsd base.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/daemon/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/daemon/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/daemon/schema/PaxHeaders.7502/arcconfig.xsd0000644000000000000000000000012411303750763023707 xustar000000000000000027 mtime=1259327987.735261 27 atime=1513200575.167709 30 ctime=1513200661.981771724 nordugrid-arc-5.4.2/src/hed/daemon/schema/arcconfig.xsd0000644000175000002070000000755611303750763023771 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/hed/daemon/schema/PaxHeaders.7502/base.xsd0000644000000000000000000000012412137462315022665 xustar000000000000000027 mtime=1367237837.793471 27 atime=1513200575.167709 30 ctime=1513200661.983771748 nordugrid-arc-5.4.2/src/hed/daemon/schema/base.xsd0000644000175000002070000001613412137462315022737 0ustar00mockbuildmock00000000000000 This is the top level element of ARC configuration document. Server specific configuration options. Path to pid file. Indicates whether the server runs in foreground or daemon mode. If it runs in foreground the log goes to standard error as well. arched will run under the specified user. If not specified the effective user will not be changed. arched will run under the specified group. If not specified the effective group will not be changed. Makes arched spawn intermediate watchdog process which restarts main process if it dies or is stuck. The enumeration lists the supported log levels. Optional attribute called Domain makes it possible to assign level to specific domain. If Domain is missing or is "Arc" level is assigned to root logger and is inherited by all other domains unless they have level specified expilicitely. Logger configuration element. This element configures the file which should be used for storing log messages. Defines the maximal size of the logfile in bytes. Defines the maximal number of logfiles used in log rotation. Defines if logger opens file before writing every record and closes after that. Default behavior is to keep file open continiously. Specifies the log level of the messages which should be logged. nordugrid-arc-5.4.2/src/hed/daemon/PaxHeaders.7502/README0000644000000000000000000000012411001653037020643 xustar000000000000000027 mtime=1208440351.928622 27 atime=1513200575.165709 30 ctime=1513200661.865770305 nordugrid-arc-5.4.2/src/hed/daemon/README0000644000175000002070000000003011001653037020701 0ustar00mockbuildmock00000000000000source of arched daemon nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/dmc0000644000000000000000000000013013214316024017203 xustar000000000000000029 mtime=1513200660.74275657 30 atime=1513200668.722854169 29 ctime=1513200660.74275657 nordugrid-arc-5.4.2/src/hed/dmc/0000755000175000002070000000000013214316024017330 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/acix0000644000000000000000000000013213214316024020131 xustar000000000000000030 mtime=1513200660.833757683 30 atime=1513200668.722854169 30 ctime=1513200660.833757683 nordugrid-arc-5.4.2/src/hed/dmc/acix/0000755000175000002070000000000013214316024020254 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/acix/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712267756665022300 xustar000000000000000027 mtime=1390402997.065635 30 atime=1513200595.158954454 30 ctime=1513200660.831757658 nordugrid-arc-5.4.2/src/hed/dmc/acix/Makefile.am0000644000175000002070000000135712267756665022350 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcacix.la libdmcacix_la_SOURCES = DataPointACIX.cpp DataPointACIX.h libdmcacix_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) $(OPENSSL_CFLAGS) libdmcacix_la_LIBADD = \ $(top_builddir)/src/external/cJSON/libcjson.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) libdmcacix_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/acix/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315723022257 xustar000000000000000029 mtime=1513200595.20195498 30 atime=1513200649.127614512 30 ctime=1513200660.832757671 nordugrid-arc-5.4.2/src/hed/dmc/acix/Makefile.in0000644000175000002070000006256713214315723022346 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/acix DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcacix_la_DEPENDENCIES = \ $(top_builddir)/src/external/cJSON/libcjson.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libdmcacix_la_OBJECTS = libdmcacix_la-DataPointACIX.lo libdmcacix_la_OBJECTS = $(am_libdmcacix_la_OBJECTS) libdmcacix_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmcacix_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmcacix_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcacix_la_SOURCES) DIST_SOURCES = $(libdmcacix_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcacix.la libdmcacix_la_SOURCES = DataPointACIX.cpp DataPointACIX.h libdmcacix_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) $(OPENSSL_CFLAGS) libdmcacix_la_LIBADD = \ $(top_builddir)/src/external/cJSON/libcjson.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) libdmcacix_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/acix/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/acix/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcacix.la: $(libdmcacix_la_OBJECTS) $(libdmcacix_la_DEPENDENCIES) $(libdmcacix_la_LINK) -rpath $(pkglibdir) $(libdmcacix_la_OBJECTS) $(libdmcacix_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcacix_la-DataPointACIX.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcacix_la-DataPointACIX.lo: DataPointACIX.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcacix_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcacix_la-DataPointACIX.lo -MD -MP -MF $(DEPDIR)/libdmcacix_la-DataPointACIX.Tpo -c -o libdmcacix_la-DataPointACIX.lo `test -f 'DataPointACIX.cpp' || echo '$(srcdir)/'`DataPointACIX.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcacix_la-DataPointACIX.Tpo $(DEPDIR)/libdmcacix_la-DataPointACIX.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointACIX.cpp' object='libdmcacix_la-DataPointACIX.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcacix_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcacix_la-DataPointACIX.lo `test -f 'DataPointACIX.cpp' || echo '$(srcdir)/'`DataPointACIX.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/acix/PaxHeaders.7502/DataPointACIX.cpp0000644000000000000000000000012312675602216023255 xustar000000000000000027 mtime=1459029134.924374 26 atime=1513200575.24471 30 ctime=1513200660.833757683 nordugrid-arc-5.4.2/src/hed/dmc/acix/DataPointACIX.cpp0000644000175000002070000003214512675602216023330 0ustar00mockbuildmock00000000000000#include #include #include #include #include "DataPointACIX.h" namespace ArcDMCACIX { using namespace Arc; Arc::Logger DataPointACIX::logger(Arc::Logger::getRootLogger(), "DataPoint.ACIX"); // Copied from DataPointHTTP. Should be put in common place static int http2errno(int http_code) { // Codes taken from RFC 2616 section 10. Only 4xx and 5xx are treated as errors switch(http_code) { case 400: case 405: case 411: case 413: case 414: case 415: case 416: case 417: return EINVAL; case 401: case 403: case 407: return EACCES; case 404: case 410: return ENOENT; case 406: case 412: return EARCRESINVAL; case 408: return ETIMEDOUT; case 409: // Conflict. Not sure about this one. case 500: case 502: case 503: case 504: return EARCSVCTMP; case 501: case 505: return EOPNOTSUPP; default: return EARCOTHER; } } DataPointACIX::DataPointACIX(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointIndex(url, usercfg, parg), original_location_resolved(false) {} DataPointACIX::~DataPointACIX() {} Plugin* DataPointACIX::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL&)(*dmcarg)).Protocol() != "acix") return NULL; // Change URL protocol to https and reconstruct URL so HTTP options are parsed std::string acix_url(((const URL&)(*dmcarg)).fullstr()); acix_url.replace(0, 4, "https"); return new DataPointACIX(URL(acix_url), *dmcarg, arg); } DataStatus DataPointACIX::Check(bool check_meta) { // If original location is set, check that if (original_location) { DataHandle h(original_location, usercfg); DataStatus r = h->Check(check_meta); if (!r) return r; SetMeta(*h); return DataStatus::Success; } // If not simply check that the file can be resolved DataStatus r = Resolve(true); if (r) return r; return DataStatus(DataStatus::CheckError, r.GetErrno(), r.GetDesc()); } DataStatus DataPointACIX::Resolve(bool source) { std::list urls(1, const_cast (this)); DataStatus r = Resolve(source, urls); if (!r) return r; if (!HaveLocations()) { logger.msg(VERBOSE, "No locations found for %s", url.str()); return DataStatus(DataStatus::ReadResolveError, ENOENT, "No valid locations found"); } return DataStatus::Success; } DataStatus DataPointACIX::Resolve(bool source, const std::list& urls) { // Contact ACIX to resolve cached replicas. Also resolve original replica // and add those locations to locations. if (!source) return DataStatus(DataStatus::WriteResolveError, ENOTSUP, "Writing to ACIX is not supported"); if (urls.empty()) return DataStatus::Success; // Resolving each original source can take a long time and exceed the DTR // processor timeout (see bug 3511). As a workaround limit the time spent // in this method to 30 mins. TODO: resolve original sources in bulk Time start_time; // Construct acix query URL, giving all urls as option. Assumes only one // URL is specified in each datapoint. std::list urllist; for (std::list::const_iterator i = urls.begin(); i != urls.end(); ++i) { // This casting is needed to access url directly, as GetURL() returns // original_location DataPointACIX* dp = dynamic_cast(*i); URL lookupurl(uri_unencode(dp->url.HTTPOption("url"))); if (!lookupurl || lookupurl.str().find(',') != std::string::npos) { logger.msg(ERROR, "Found none or multiple URLs (%s) in ACIX URL: %s", lookupurl.str(), dp->url.str()); return DataStatus(DataStatus::ReadResolveError, EINVAL, "Invalid URLs specified"); } urllist.push_back(lookupurl.str()); // Now resolve original replica if any if (dp->original_location) { DataHandle origdp(dp->original_location, dp->usercfg); if (!origdp) { logger.msg(ERROR, "Cannot handle URL %s", dp->original_location.str()); return DataStatus(DataStatus::ReadResolveError, EINVAL, "Invalid URL"); } // If index resolve the location and add replicas to this datapoint if (origdp->IsIndex()) { // Check we are within the time limit; if (Time() > Time(start_time + 1800)) { logger.msg(WARNING, "Could not resolve original source of %s: out of time", lookupurl.str()); } else { DataStatus res = origdp->Resolve(true); if (!res) { // Just log a warning and continue. One of the main use cases of ACIX // is as a fallback when the original source is not available logger.msg(WARNING, "Could not resolve original source of %s: %s", lookupurl.str(), std::string(res)); } else { // Add replicas found from resolving original replica for (; origdp->LocationValid(); origdp->NextLocation()) { dp->AddLocation(origdp->CurrentLocation(), origdp->CurrentLocation().ConnectionURL()); } } } } else { dp->AddLocation(dp->original_location, dp->original_location.ConnectionURL()); } } dp->original_location_resolved = true; } URL queryURL(url); queryURL.AddHTTPOption("url", Arc::join(urllist, ","), true); logger.msg(INFO, "Querying ACIX server at %s", queryURL.ConnectionURL()); logger.msg(DEBUG, "Calling acix with query %s", queryURL.plainstr()); std::string content; DataStatus res = queryACIX(content, queryURL.FullPath()); // It should not be an error when ACIX is unavailable if (!res) { logger.msg(WARNING, "Failed to query ACIX: %s", std::string(res)); } else { res = parseLocations(content, urls); if (!res) { logger.msg(WARNING, "Failed to parse ACIX response: %s", std::string(res)); } } return DataStatus::Success; } DataStatus DataPointACIX::Stat(FileInfo& file, DataPoint::DataPointInfoType verb) { std::list files; std::list urls(1, const_cast (this)); DataStatus r = Stat(files, urls, verb); if (!r) { return r; } if (files.empty() || !files.front()) { return DataStatus(DataStatus::StatError, EARCRESINVAL, "No results returned"); } file = files.front(); return DataStatus::Success; } DataStatus DataPointACIX::Stat(std::list& files, const std::list& urls, DataPointInfoType verb) { files.clear(); DataStatus r = Resolve(true, urls); if (!r) { return DataStatus(DataStatus::StatError, r.GetErrno(), r.GetDesc()); } for (std::list::const_iterator f = urls.begin(); f != urls.end(); ++f) { FileInfo info; if ((*f)->HaveLocations()) { // Only name and replicas are available info.SetName((*f)->GetURL().HTTPOption("url")); for (; (*f)->LocationValid(); (*f)->NextLocation()) { info.AddURL((*f)->CurrentLocation()); } } files.push_back(info); } return DataStatus::Success; } DataStatus DataPointACIX::PreRegister(bool replication, bool force) { return DataStatus(DataStatus::PreRegisterError, ENOTSUP, "Writing to ACIX is not supported"); } DataStatus DataPointACIX::PostRegister(bool replication) { return DataStatus(DataStatus::PostRegisterError, ENOTSUP, "Writing to ACIX is not supported"); } DataStatus DataPointACIX::PreUnregister(bool replication) { return DataStatus(DataStatus::UnregisterError, ENOTSUP, "Deleting from ACIX is not supported"); } DataStatus DataPointACIX::Unregister(bool all) { return DataStatus(DataStatus::UnregisterError, ENOTSUP, "Deleting from ACIX is not supported"); } DataStatus DataPointACIX::List(std::list& files, DataPoint::DataPointInfoType verb) { return DataStatus(DataStatus::ListError, ENOTSUP, "Listing in ACIX is not supported"); } DataStatus DataPointACIX::CreateDirectory(bool with_parents) { return DataStatus(DataStatus::CreateDirectoryError, ENOTSUP, "Creating directories in ACIX is not supported"); } DataStatus DataPointACIX::Rename(const URL& newurl) { return DataStatus(DataStatus::RenameError, ENOTSUP, "Renaming in ACIX is not supported"); } DataStatus DataPointACIX::AddLocation(const URL& urlloc, const std::string& meta) { if (!original_location && !original_location_resolved) { original_location = URLLocation(urlloc); // Add any URL options to the acix URL for (std::map::const_iterator opt = original_location.Options().begin(); opt != original_location.Options().end(); ++opt) { url.AddOption(opt->first, opt->second); } return DataStatus::Success; } return DataPointIndex::AddLocation(urlloc, meta); } const URL& DataPointACIX::GetURL() const { if (original_location) return original_location; return url; } std::string DataPointACIX::str() const { if (original_location) return original_location.str(); return url.str(); } DataStatus DataPointACIX::queryACIX(std::string& content, const std::string& path) const { MCCConfig cfg; usercfg.ApplyToConfig(cfg); ClientHTTP client(cfg, url, usercfg.Timeout()); client.RelativeURI(true); // twisted (ACIX server) doesn't support GET with full URL HTTPClientInfo transfer_info; PayloadRaw request; PayloadRawInterface *response = NULL; MCC_Status r = client.process("GET", path, &request, &transfer_info, &response); if (!r) { return DataStatus(DataStatus::ReadResolveError, "Failed to contact server: " + r.getExplanation()); } if (transfer_info.code != 200) { return DataStatus(DataStatus::ReadResolveError, http2errno(transfer_info.code), "HTTP error when contacting server: %s" + transfer_info.reason); } PayloadStreamInterface* instream = NULL; try { instream = dynamic_cast(dynamic_cast(response)); } catch(std::exception& e) { return DataStatus(DataStatus::ReadResolveError, "Unexpected response from server"); } if (!instream) { return DataStatus(DataStatus::ReadResolveError, "Unexpected response from server"); } content.clear(); std::string buf; while (instream->Get(buf)) content += buf; logger.msg(DEBUG, "ACIX returned %s", content); return DataStatus::Success; } DataStatus DataPointACIX::parseLocations(const std::string& content, const std::list& urls) const { // parse JSON {url: [loc1, loc2,...]} cJSON *root = cJSON_Parse(content.c_str()); if (!root) { logger.msg(ERROR, "Failed to parse ACIX response: %s", content); return DataStatus(DataStatus::ReadResolveError, "Failed to parse ACIX response"); } for (std::list::const_iterator i = urls.begin(); i != urls.end(); ++i) { // This casting is needed to access url directly, as GetURL() returns // original_location DataPointACIX* dp = dynamic_cast(*i); std::string urlstr = URL(uri_unencode(dp->url.HTTPOption("url"))).str(); cJSON *urlinfo = cJSON_GetObjectItem(root, urlstr.c_str()); if (!urlinfo) { logger.msg(WARNING, "No locations for %s", urlstr); continue; } cJSON *locinfo = urlinfo->child; while (locinfo) { std::string loc = std::string(locinfo->valuestring); logger.msg(INFO, "%s: ACIX Location: %s", urlstr, loc); if (loc.find("://") == std::string::npos) { logger.msg(INFO, "%s: Location %s not accessible remotely, skipping", urlstr, loc); } else { URL fullloc(loc + '/' + urlstr); // Add URL options to replicas for (std::map::const_iterator opt = dp->url.CommonLocOptions().begin(); opt != dp->url.CommonLocOptions().end(); opt++) fullloc.AddOption(opt->first, opt->second, false); for (std::map::const_iterator opt = dp->url.Options().begin(); opt != dp->url.Options().end(); opt++) fullloc.AddOption(opt->first, opt->second, false); dp->AddLocation(fullloc, loc); } locinfo = locinfo->next; } if (!dp->HaveLocations()) { logger.msg(WARNING, "No locations found for %s", dp->url.str()); } } cJSON_Delete(root); return DataStatus::Success; } } // namespace ArcDMCACIX extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "acix", "HED:DMC", "ARC Cache Index", 0, &ArcDMCACIX::DataPointACIX::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/acix/PaxHeaders.7502/DataPointACIX.h0000644000000000000000000000012312272725062022720 xustar000000000000000027 mtime=1391176242.555562 26 atime=1513200575.24671 30 ctime=1513200660.833757683 nordugrid-arc-5.4.2/src/hed/dmc/acix/DataPointACIX.h0000644000175000002070000000562112272725062022772 0ustar00mockbuildmock00000000000000#ifndef __ARC_DATAPOINTACIX_H__ #define __ARC_DATAPOINTACIX_H__ #include #include #include namespace ArcDMCACIX { /** * ACIX is the ARC Cache Index. This is a service which maps file URLs to ARC * CEs where the file is cached. These CEs can be configured to expose the * cache content through the A-REX HTTP interface and thus cached files can * be downloaded from the CEs. * * ACIX is special in that it can be used in addition to the regular file * location. Since the ARC data library cannot handle two separate possible * sources for a file, in order to use the original source location and ACIX * location the original source should be added to the ACIX datapoint using * AddLocation() before calling any other methods. Resolve() resolves both * the ACIX locations and the original locations (if it is an index URL). * Then the locations of the DataPointACIX will be both the ACIX locations * and the (resolved) original location(s). Only the cache locations which * are accessible from the outside are considered. */ class DataPointACIX : public Arc::DataPointIndex { public: DataPointACIX(const Arc::URL& url, const Arc::UserConfig& usercfg, Arc::PluginArgument* parg); ~DataPointACIX(); static Plugin* Instance(Arc::PluginArgument *arg); virtual Arc::DataStatus Resolve(bool source); virtual Arc::DataStatus Resolve(bool source, const std::list& urls); virtual Arc::DataStatus Check(bool check_meta); virtual Arc::DataStatus PreRegister(bool replication, bool force = false); virtual Arc::DataStatus PostRegister(bool replication); virtual Arc::DataStatus PreUnregister(bool replication); virtual Arc::DataStatus Unregister(bool all); virtual Arc::DataStatus Stat(Arc::FileInfo& file, Arc::DataPoint::DataPointInfoType verb = INFO_TYPE_ALL); virtual Arc::DataStatus Stat(std::list& files, const std::list& urls, Arc::DataPoint::DataPointInfoType verb = INFO_TYPE_ALL); virtual Arc::DataStatus List(std::list& files, Arc::DataPoint::DataPointInfoType verb = INFO_TYPE_ALL); virtual Arc::DataStatus CreateDirectory(bool with_parents=false); virtual Arc::DataStatus Rename(const Arc::URL& newurl); virtual Arc::DataStatus AddLocation(const Arc::URL& url, const std::string& meta); virtual const Arc::URL& GetURL() const; virtual std::string str() const; protected: static Arc::Logger logger; Arc::URLLocation original_location; bool original_location_resolved; private: Arc::DataStatus queryACIX(std::string& content, const std::string& path) const; Arc::DataStatus parseLocations(const std::string& content, const std::list& urls) const; }; } // namespace ArcDMCACIX #endif /* __ARC_DATAPOINTACIX_H__ */ nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712675602216021335 xustar000000000000000027 mtime=1459029134.924374 30 atime=1513200595.110953867 30 ctime=1513200660.730756423 nordugrid-arc-5.4.2/src/hed/dmc/Makefile.am0000644000175000002070000000070412675602216021400 0ustar00mockbuildmock00000000000000if GRIDFTP_ENABLED GRIDFTP_SD = gridftp endif if LDAP_ENABLED LDAP_SD = ldap endif if SRM_DMC_ENABLED SRM_SD=srm endif if XROOTD_ENABLED XROOTD_SD=xrootd endif if GFAL_ENABLED GFAL_SD=gfal endif if MOCK_DMC_ENABLED MOCK_SD=mock endif if S3_DMC_ENABLED S3_SD=s3 endif SUBDIRS = http file rucio acix $(LDAP_SD) $(GRIDFTP_SD) $(SRM_SD) $(XROOTD_SD) $(GFAL_SD) $(MOCK_SD) $(S3_SD) DIST_SUBDIRS = http file rucio acix s3 ldap gridftp srm xrootd gfal mock nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723021334 xustar000000000000000030 mtime=1513200595.143954271 30 atime=1513200648.974612641 30 ctime=1513200660.732756448 nordugrid-arc-5.4.2/src/hed/dmc/Makefile.in0000644000175000002070000005626613214315723021421 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @GRIDFTP_ENABLED_TRUE@GRIDFTP_SD = gridftp @LDAP_ENABLED_TRUE@LDAP_SD = ldap @SRM_DMC_ENABLED_TRUE@SRM_SD = srm @XROOTD_ENABLED_TRUE@XROOTD_SD = xrootd @GFAL_ENABLED_TRUE@GFAL_SD = gfal @MOCK_DMC_ENABLED_TRUE@MOCK_SD = mock @S3_DMC_ENABLED_TRUE@S3_SD = s3 SUBDIRS = http file rucio acix $(LDAP_SD) $(GRIDFTP_SD) $(SRM_SD) $(XROOTD_SD) $(GFAL_SD) $(MOCK_SD) $(S3_SD) DIST_SUBDIRS = http file rucio acix s3 ldap gridftp srm xrootd gfal mock all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/mock0000644000000000000000000000013213214316025020137 xustar000000000000000030 mtime=1513200661.005759787 30 atime=1513200668.722854169 30 ctime=1513200661.005759787 nordugrid-arc-5.4.2/src/hed/dmc/mock/0000755000175000002070000000000013214316025020262 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/mock/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712060064711022254 xustar000000000000000027 mtime=1354787273.850425 30 atime=1513200595.521958894 30 ctime=1513200661.003759762 nordugrid-arc-5.4.2/src/hed/dmc/mock/Makefile.am0000644000175000002070000000100512060064711022312 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcmock.la libdmcmock_la_SOURCES = DataPointMock.cpp DataPointMock.h libdmcmock_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) libdmcmock_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmcmock_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/mock/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315723022264 xustar000000000000000029 mtime=1513200595.56495942 30 atime=1513200649.111614317 30 ctime=1513200661.004759774 nordugrid-arc-5.4.2/src/hed/dmc/mock/Makefile.in0000644000175000002070000006167613214315723022353 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/mock DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcmock_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libdmcmock_la_OBJECTS = libdmcmock_la-DataPointMock.lo libdmcmock_la_OBJECTS = $(am_libdmcmock_la_OBJECTS) libdmcmock_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmcmock_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmcmock_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcmock_la_SOURCES) DIST_SOURCES = $(libdmcmock_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcmock.la libdmcmock_la_SOURCES = DataPointMock.cpp DataPointMock.h libdmcmock_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) libdmcmock_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmcmock_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/mock/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/mock/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcmock.la: $(libdmcmock_la_OBJECTS) $(libdmcmock_la_DEPENDENCIES) $(libdmcmock_la_LINK) -rpath $(pkglibdir) $(libdmcmock_la_OBJECTS) $(libdmcmock_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcmock_la-DataPointMock.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcmock_la-DataPointMock.lo: DataPointMock.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcmock_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcmock_la-DataPointMock.lo -MD -MP -MF $(DEPDIR)/libdmcmock_la-DataPointMock.Tpo -c -o libdmcmock_la-DataPointMock.lo `test -f 'DataPointMock.cpp' || echo '$(srcdir)/'`DataPointMock.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcmock_la-DataPointMock.Tpo $(DEPDIR)/libdmcmock_la-DataPointMock.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointMock.cpp' object='libdmcmock_la-DataPointMock.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcmock_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcmock_la-DataPointMock.lo `test -f 'DataPointMock.cpp' || echo '$(srcdir)/'`DataPointMock.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/mock/PaxHeaders.7502/DataPointMock.cpp0000644000000000000000000000012412675602216023430 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.270711 30 ctime=1513200661.004759774 nordugrid-arc-5.4.2/src/hed/dmc/mock/DataPointMock.cpp0000644000175000002070000000524412675602216023502 0ustar00mockbuildmock00000000000000#include #include #include "DataPointMock.h" #include namespace ArcDMCMock { using namespace Arc; DataPointMock::DataPointMock(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(url, usercfg, parg) {} DataPointMock::~DataPointMock() {} DataStatus DataPointMock::StartReading(DataBuffer& buffer) { sleep(10); buffer.eof_read(true); if (url.Protocol() == "fail") return DataStatus::ReadStartError; return DataStatus::Success; } DataStatus DataPointMock::StartWriting(DataBuffer& buffer, DataCallback *) { sleep(10); buffer.eof_write(true); if (url.Protocol() == "fail") return DataStatus::WriteStartError; return DataStatus::Success; } DataStatus DataPointMock::StopReading() { if (url.Protocol() == "fail") return DataStatus::ReadStopError; return DataStatus::Success; } DataStatus DataPointMock::StopWriting() { if (url.Protocol() == "fail") return DataStatus::WriteStopError; return DataStatus::Success; } DataStatus DataPointMock::Check(bool) { sleep(1); if (url.Protocol() == "fail") return DataStatus::CheckError; return DataStatus::Success; } DataStatus DataPointMock::Stat(FileInfo&, DataPointInfoType) { sleep(1); if (url.Protocol() == "fail") return DataStatus::StatError; return DataStatus::Success; } DataStatus DataPointMock::List(std::list&, DataPointInfoType) { sleep(1); if (url.Protocol() == "fail") return DataStatus::ListError; return DataStatus::Success; } DataStatus DataPointMock::Remove() { sleep(1); if (url.Protocol() == "fail") return DataStatus::DeleteError; return DataStatus::Success; } DataStatus DataPointMock::CreateDirectory(bool) { sleep(1); if (url.Protocol() == "fail") return DataStatus::CreateDirectoryError; return DataStatus::Success; } DataStatus DataPointMock::Rename(const URL&) { sleep(1); if (url.Protocol() == "fail") return DataStatus::RenameError; return DataStatus::Success; } Plugin* DataPointMock::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL &)(*dmcarg)).Protocol() != "mock" && ((const URL &)(*dmcarg)).Protocol() != "fail") return NULL; return new DataPointMock(*dmcarg, *dmcarg, dmcarg); } } // namespace ArcDMCMock extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "mock", "HED:DMC", "Dummy protocol", 0, &ArcDMCMock::DataPointMock::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/mock/PaxHeaders.7502/DataPointMock.h0000644000000000000000000000012412074027430023065 xustar000000000000000027 mtime=1357917976.740806 27 atime=1513200575.270711 30 ctime=1513200661.005759787 nordugrid-arc-5.4.2/src/hed/dmc/mock/DataPointMock.h0000644000175000002070000000267412074027430023143 0ustar00mockbuildmock00000000000000#ifndef DATAPOINTMOCK_H_ #define DATAPOINTMOCK_H_ #include namespace ArcDMCMock { using namespace Arc; /// Mock data point which does not do anything but sleep for each operation. /** * If the URL protocol is mock:// then each method returns * DataStatus::Success. If it is fail:// then each method returns an error. * This plugin is not built by default - to build it the option * --enable-mock-dmc must be passed to configure. */ class DataPointMock: public DataPointDirect { public: DataPointMock(const URL& url, const UserConfig& usercfg, PluginArgument* parg); virtual ~DataPointMock(); static Plugin* Instance(PluginArgument *arg); virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); virtual DataStatus Check(bool check_meta); virtual DataStatus Stat(FileInfo& file, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents=false); virtual DataStatus Rename(const URL& newurl); virtual bool RequiresCredentials() const { return false; } }; } // namespace ArcDMCMock #endif /* DATAPOINTMOCK_H_ */ nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/http0000644000000000000000000000012613214316024020167 xustar000000000000000028 mtime=1513200660.7697569 30 atime=1513200668.722854169 28 ctime=1513200660.7697569 nordugrid-arc-5.4.2/src/hed/dmc/http/0000755000175000002070000000000013214316024020307 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/http/PaxHeaders.7502/DataPointHTTP.h0000644000000000000000000000012312502505032022773 xustar000000000000000027 mtime=1426754074.711351 26 atime=1513200575.21271 30 ctime=1513200660.766756863 nordugrid-arc-5.4.2/src/hed/dmc/http/DataPointHTTP.h0000644000175000002070000000501512502505032023042 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARCDMCHTTP_DATAPOINTHTTP_H__ #define __ARCDMCHTTP_DATAPOINTHTTP_H__ #include #include #include namespace ArcDMCHTTP { using namespace Arc; class ChunkControl; /** * This class allows access through HTTP to remote resources. HTTP over SSL * (HTTPS) and HTTP over GSI (HTTPG) are also supported. * * This class is a loadable module and cannot be used directly. The DataHandle * class loads modules at runtime and should be used instead of this. */ class DataPointHTTP : public DataPointDirect { public: DataPointHTTP(const URL& url, const UserConfig& usercfg, PluginArgument* parg); virtual ~DataPointHTTP(); static Plugin* Instance(PluginArgument *arg); virtual bool SetURL(const URL& url); virtual DataStatus Check(bool check_meta); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents=false) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); }; virtual DataStatus Rename(const URL& url); virtual DataStatus Stat(FileInfo& file, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); virtual bool RequiresCredentials() const { return url.Protocol() != "http"; }; private: static void read_thread(void *arg); static bool read_single(void *arg); static void write_thread(void *arg); static bool write_single(void *arg); DataStatus do_stat_http(URL& curl, FileInfo& file); DataStatus do_stat_webdav(URL& curl, FileInfo& file); DataStatus do_list_webdav(URL& rurl, std::list& files, DataPointInfoType verb); ClientHTTP* acquire_client(const URL& curl); ClientHTTP* acquire_new_client(const URL& curl); void release_client(const URL& curl, ClientHTTP* client); /// Convert HTTP return code to errno int http2errno(int http_code) const; static Logger logger; bool reading; bool writing; ChunkControl *chunks; std::multimap clients; SimpleCounter transfers_started; int transfers_tofinish; Glib::Mutex transfer_lock; Glib::Mutex clients_lock; }; } // namespace Arc #endif // __ARCDMCHTTP_DATAPOINTHTTP_H__ nordugrid-arc-5.4.2/src/hed/dmc/http/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022306 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200595.399957402 30 ctime=1513200660.763756827 nordugrid-arc-5.4.2/src/hed/dmc/http/Makefile.am0000644000175000002070000000117112052416515022350 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmchttp.la libdmchttp_la_SOURCES = DataPointHTTP.cpp DataPointHTTP.h StreamBuffer.cpp StreamBuffer.h libdmchttp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmchttp_la_LIBADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmchttp_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/http/PaxHeaders.7502/StreamBuffer.cpp0000644000000000000000000000012312012153377023337 xustar000000000000000027 mtime=1344853759.361249 26 atime=1513200575.21271 30 ctime=1513200660.768756888 nordugrid-arc-5.4.2/src/hed/dmc/http/StreamBuffer.cpp0000644000175000002070000000443212012153377023410 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "StreamBuffer.h" namespace ArcDMCHTTP { using namespace Arc; StreamBuffer::StreamBuffer(DataBuffer& buffer):buffer_(buffer) { buffer_handle_ = -1; buffer_length_ = 0; buffer_offset_ = 0; current_offset_ = 0; current_size_ = 0; } StreamBuffer::~StreamBuffer(void) { if(buffer_handle_ >= 0) { buffer_.is_notwritten(buffer_handle_); buffer_handle_ = -1; }; } bool StreamBuffer::Get(char* buf,int& size) { if(buffer_handle_ < 0) { // buffer need to be obtained bool r = buffer_.for_write(buffer_handle_,buffer_length_,buffer_offset_,true); if(!r) return false; if(buffer_offset_ != current_offset_) { buffer_.is_notwritten(buffer_handle_); buffer_handle_ = -1; buffer_.error_write(true); return false; }; }; // buffer is already obtained unsigned long long int bufend = buffer_offset_ + buffer_length_; unsigned long long int bufsize = bufend - current_offset_; if(bufend > current_size_) current_size_ = bufend; if(bufsize > size) bufsize = size; ::memcpy(buf,buffer_[buffer_handle_],bufsize); size = bufsize; current_offset_ += bufsize; if(current_offset_ >= bufend) { buffer_.is_written(buffer_handle_); buffer_handle_ = -1; } return true; } bool StreamBuffer::Put(const char* buf,Size_t size) { // This implementation is unidirectonal (yet?) return false; } StreamBuffer::operator bool(void) { return (bool)buffer_; } bool StreamBuffer::operator!(void) { return !(bool)buffer_; } int StreamBuffer::Timeout(void) const { return -1; } void StreamBuffer::Timeout(int /*to*/) { } PayloadStreamInterface::Size_t StreamBuffer::Pos(void) const { return (PayloadStreamInterface::Size_t)current_offset_; } PayloadStreamInterface::Size_t StreamBuffer::Size(void) const { return (PayloadStreamInterface::Size_t)current_size_; } PayloadStreamInterface::Size_t StreamBuffer::Limit(void) const { return (PayloadStreamInterface::Size_t)current_size_; } void StreamBuffer::Size(PayloadStreamInterface::Size_t size) { if(size > current_size_) current_size_ = size; } } // namespace ArcDMCHTTP nordugrid-arc-5.4.2/src/hed/dmc/http/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022313 xustar000000000000000030 mtime=1513200595.444957952 30 atime=1513200649.017613167 30 ctime=1513200660.764756839 nordugrid-arc-5.4.2/src/hed/dmc/http/Makefile.in0000644000175000002070000006447013214315723022374 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/http DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmchttp_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libdmchttp_la_OBJECTS = libdmchttp_la-DataPointHTTP.lo \ libdmchttp_la-StreamBuffer.lo libdmchttp_la_OBJECTS = $(am_libdmchttp_la_OBJECTS) libdmchttp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmchttp_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmchttp_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmchttp_la_SOURCES) DIST_SOURCES = $(libdmchttp_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmchttp.la libdmchttp_la_SOURCES = DataPointHTTP.cpp DataPointHTTP.h StreamBuffer.cpp StreamBuffer.h libdmchttp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmchttp_la_LIBADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmchttp_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/http/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/http/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmchttp.la: $(libdmchttp_la_OBJECTS) $(libdmchttp_la_DEPENDENCIES) $(libdmchttp_la_LINK) -rpath $(pkglibdir) $(libdmchttp_la_OBJECTS) $(libdmchttp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmchttp_la-DataPointHTTP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmchttp_la-StreamBuffer.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmchttp_la-DataPointHTTP.lo: DataPointHTTP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmchttp_la_CXXFLAGS) $(CXXFLAGS) -MT libdmchttp_la-DataPointHTTP.lo -MD -MP -MF $(DEPDIR)/libdmchttp_la-DataPointHTTP.Tpo -c -o libdmchttp_la-DataPointHTTP.lo `test -f 'DataPointHTTP.cpp' || echo '$(srcdir)/'`DataPointHTTP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmchttp_la-DataPointHTTP.Tpo $(DEPDIR)/libdmchttp_la-DataPointHTTP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointHTTP.cpp' object='libdmchttp_la-DataPointHTTP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmchttp_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmchttp_la-DataPointHTTP.lo `test -f 'DataPointHTTP.cpp' || echo '$(srcdir)/'`DataPointHTTP.cpp libdmchttp_la-StreamBuffer.lo: StreamBuffer.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmchttp_la_CXXFLAGS) $(CXXFLAGS) -MT libdmchttp_la-StreamBuffer.lo -MD -MP -MF $(DEPDIR)/libdmchttp_la-StreamBuffer.Tpo -c -o libdmchttp_la-StreamBuffer.lo `test -f 'StreamBuffer.cpp' || echo '$(srcdir)/'`StreamBuffer.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmchttp_la-StreamBuffer.Tpo $(DEPDIR)/libdmchttp_la-StreamBuffer.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='StreamBuffer.cpp' object='libdmchttp_la-StreamBuffer.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmchttp_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmchttp_la-StreamBuffer.lo `test -f 'StreamBuffer.cpp' || echo '$(srcdir)/'`StreamBuffer.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/http/PaxHeaders.7502/DataPointHTTP.cpp0000644000000000000000000000012213153453750023341 xustar000000000000000026 mtime=1504597992.39874 26 atime=1513200575.20271 30 ctime=1513200660.765756851 nordugrid-arc-5.4.2/src/hed/dmc/http/DataPointHTTP.cpp0000644000175000002070000015652413153453750023425 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #define __STDC_LIMIT_MACROS #ifdef HAVE_STDINT_H #include #endif #include #include #include #include #include #include #include #include #include #include "StreamBuffer.h" #include "DataPointHTTP.h" namespace ArcDMCHTTP { using namespace Arc; // TODO: filling failure_code in multiple threads is potentailly dangerous // because it is accessible through public method. Maybe it would be // safer to fill it in StopReading/StopWriting Logger DataPointHTTP::logger(Logger::getRootLogger(), "DataPoint.HTTP"); typedef struct { DataPointHTTP *point; } HTTPInfo_t; class ChunkControl { private: typedef struct { uint64_t start; uint64_t end; } chunk_t; std::list chunks_; Glib::Mutex lock_; public: ChunkControl(uint64_t size = UINT64_MAX); ~ChunkControl(); // Get chunk to be transferred. On input 'length' // contains maximal acceptable chunk size. bool Get(uint64_t& start, uint64_t& length); // Report chunk transferred. It may be _different_ // from one obtained through Get(). void Claim(uint64_t start, uint64_t length); void Claim(uint64_t length); // Report chunk not transferred. It must be // _same_ as one obtained by Get(). void Unclaim(uint64_t start, uint64_t length); }; class PayloadMemConst : public PayloadRawInterface { private: char *buffer_; uint64_t begin_; uint64_t end_; uint64_t size_; public: PayloadMemConst(void *buffer, uint64_t offset, unsigned int length, uint64_t size = 0) : buffer_((char*)buffer), begin_(offset), end_(offset + length), size_(size) {} virtual ~PayloadMemConst() {} virtual char operator[](Size_t pos) const { if (!buffer_) return 0; if (pos < begin_) return 0; if (pos >= end_) return 0; return buffer_[pos - begin_]; } virtual char* Content(Size_t pos = -1) { if (!buffer_) return NULL; if (pos < begin_) return NULL; if (pos >= end_) return NULL; return buffer_ + (pos - begin_); } virtual Size_t Size() const { return size_; } virtual char* Insert(Size_t /* pos */ = 0, Size_t /* size */ = 0) { return NULL; } virtual char* Insert(const char* /* s */, Size_t /* pos */ = 0, Size_t /* size */ = 0) { return NULL; } virtual char* Buffer(unsigned int num) { if (num != 0) return NULL; return buffer_; } virtual Size_t BufferSize(unsigned int num) const { if (!buffer_) return 0; if (num != 0) return 0; return end_ - begin_; } virtual Size_t BufferPos(unsigned int num) const { if (!buffer_) return 0; if (num != 0) return 0; return begin_; } virtual bool Truncate(Size_t /* size */) { return false; } }; ChunkControl::ChunkControl(uint64_t size) { chunk_t chunk = { 0, size }; chunks_.push_back(chunk); } ChunkControl::~ChunkControl() {} bool ChunkControl::Get(uint64_t& start, uint64_t& length) { if (length == 0) return false; lock_.lock(); std::list::iterator c = chunks_.begin(); if (c == chunks_.end()) { lock_.unlock(); return false; } start = c->start; uint64_t l = (c->end) - (c->start); if (l <= length) { length = l; chunks_.erase(c); } else { c->start += length; } lock_.unlock(); return true; } void ChunkControl::Claim(uint64_t start, uint64_t length) { if (length == 0) return; uint64_t end = start + length; lock_.lock(); for (std::list::iterator c = chunks_.begin(); c != chunks_.end();) { if (end <= c->start) break; if ((start <= c->start) && (end >= c->end)) { start = c->end; length = end - start; c = chunks_.erase(c); if (length > 0) continue; break; } if ((start > c->start) && (end < c->end)) { chunk_t chunk; chunk.start = c->start; chunk.end = start; c->start = end; chunks_.insert(c, chunk); break; } if ((start <= c->start) && (end < c->end) && (end > c->start)) { c->start = end; break; } if ((start > c->start) && (start < c->end) && (end >= c->end)) { uint64_t start_ = c->end; c->end = start; start = start_; length = end - start; ++c; if (length > 0) continue; break; } ++c; } lock_.unlock(); } void ChunkControl::Claim(uint64_t start) { Claim(start, UINT64_MAX - start); } void ChunkControl::Unclaim(uint64_t start, uint64_t length) { if (length == 0) return; uint64_t end = start + length; lock_.lock(); for (std::list::iterator c = chunks_.begin(); c != chunks_.end(); ++c) { if ((end >= c->start) && (end <= c->end)) { if (start < c->start) c->start = start; lock_.unlock(); return; } if ((start <= c->end) && (start >= c->start)) { if (end > c->end) { c->end = end; std::list::iterator c_ = c; ++c_; while (c_ != chunks_.end()) if (c->end >= c_->start) { if (c_->end >= c->end) { c->end = c_->end; break; } c_ = chunks_.erase(c_); } else break; } lock_.unlock(); return; } if ((start <= c->start) && (end >= c->end)) { c->start = start; if (end > c->end) { c->end = end; std::list::iterator c_ = c; ++c_; while (c_ != chunks_.end()) if (c->end >= c_->start) { if (c_->end >= c->end) { c->end = c_->end; break; } c_ = chunks_.erase(c_); } else break; } lock_.unlock(); return; } if (end < c->start) { chunk_t chunk = { start, end }; chunks_.insert(c, chunk); lock_.unlock(); return; } } chunk_t chunk = { start, end }; chunks_.push_back(chunk); lock_.unlock(); } DataPointHTTP::DataPointHTTP(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(url, usercfg, parg), reading(false), writing(false), chunks(NULL), transfers_tofinish(0) {} DataPointHTTP::~DataPointHTTP() { StopReading(); StopWriting(); if (chunks) delete chunks; for(std::multimap::iterator cl = clients.begin(); cl != clients.end(); ++cl) { delete cl->second; }; } Plugin* DataPointHTTP::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL &)(*dmcarg)).Protocol() != "http" && ((const URL &)(*dmcarg)).Protocol() != "https" && ((const URL &)(*dmcarg)).Protocol() != "httpg" && ((const URL &)(*dmcarg)).Protocol() != "dav" && ((const URL &)(*dmcarg)).Protocol() != "davs") return NULL; return new DataPointHTTP(*dmcarg, *dmcarg, dmcarg); } static bool html2list(const char *html, const URL& base, std::list& files) { for (const char *pos = html;;) { // Looking for tag const char *tag_start = strchr(pos, '<'); if (!tag_start) break; // No more tags // Looking for end of tag const char *tag_end = strchr(tag_start + 1, '>'); if (!tag_end) return false; // Broken html? // 'A' tag? if (strncasecmp(tag_start, " tag_end)) url_end = NULL; } else if ((*url_start) == '\'') { ++url_start; url_end = strchr(url_start, '\''); if ((!url_end) || (url_end > tag_end)) url_end = NULL; } else { url_end = strchr(url_start, ' '); if ((!url_end) || (url_end > tag_end)) url_end = tag_end; } if (!url_end) return false; // Broken HTML std::string url(url_start, url_end - url_start); url = uri_unencode(url); if(url[0] == '/') { url = base.ConnectionURL()+url; } if (url.find("://") != std::string::npos) { URL u(url); std::string b = base.str(); if (b[b.size() - 1] != '/') b += '/'; if (u.str().substr(0, b.size()) == b) url = u.str().substr(b.size()); } if (url[0] != '?' && url[0] != '/') { if (url.find('/') == url.size() - 1) { std::list::iterator f = files.insert(files.end(), url); f->SetType(FileInfo::file_type_dir); } else if (url.find('/') == std::string::npos) { std::list::iterator f = files.insert(files.end(), url); f->SetType(FileInfo::file_type_file); } } } } pos = tag_end + 1; } return true; } DataStatus DataPointHTTP::do_stat_http(URL& rurl, FileInfo& file) { PayloadRaw request; PayloadRawInterface *inbuf = NULL; HTTPClientInfo info; for(int redirects_max = 10;redirects_max>=0;--redirects_max) { std::string path = rurl.FullPathURIEncoded(); info.lastModified = (time_t)(-1); AutoPointer client(acquire_client(rurl)); if (!client) return DataStatus::StatError; // Do HEAD to obtain some metadata MCC_Status r = client->process("HEAD", path, &request, &info, &inbuf); if (inbuf) delete inbuf; inbuf = NULL; if (!r) { // Because there is no reliable way to check if connection // is still alive at this place, we must try again client = acquire_new_client(rurl); if(client) r = client->process("HEAD", path, &request, &info, &inbuf); if (inbuf) delete inbuf; inbuf = NULL; if(!r) return DataStatus(DataStatus::StatError,r.getExplanation()); } release_client(rurl,client.Release()); if (info.code != 200) { if((info.code == 301) || // permanent redirection (info.code == 302) || // temporary redirection (info.code == 303) || // POST to GET redirection (info.code == 304)) { // redirection to cache // 305 - redirection to proxy - unhandled // Recreate connection now to new URL rurl = info.location; logger.msg(VERBOSE,"Redirecting to %s",info.location.str()); continue; } return DataStatus(DataStatus::StatError, http2errno(info.code), info.reason); } // Fetch known metadata std::string type = info.type; std::string::size_type pos = type.find(';'); if (pos != std::string::npos) type = type.substr(0, pos); // Treat every html as potential directory/set of links if(type == "text/html") { file.SetType(FileInfo::file_type_dir); } else { file.SetType(FileInfo::file_type_file); } if(info.size != (uint64_t)(-1)) { file.SetSize(info.size); } if(info.lastModified != (time_t)(-1)) { file.SetModified(info.lastModified); } // Not sure if(info.location) file.AddURL(info.location.str()); return DataStatus::Success; } return DataStatus(DataStatus::StatError,"Too many redirects"); } static unsigned int parse_http_status(const std::string& str) { // HTTP/1.1 200 OK std::vector tokens; tokenize(str, tokens); if(tokens.size() < 2) return 0; unsigned int code; if(!stringto(tokens[1],code)) return 0; return code; } static bool parse_webdav_response(XMLNode response, FileInfo& file, std::string& url) { bool found = false; XMLNode href = response["href"]; XMLNode propstat = response["propstat"]; for(;(bool)propstat;++propstat) { // Multiple propstat for multiple results // Only those with 200 represent real results std::string status = (std::string)propstat["status"]; if(parse_http_status(status) != 200) continue; XMLNode prop = propstat["prop"]; if((bool)prop) { XMLNode creationdate = prop["creationdate"]; XMLNode displayname = prop["displayname"]; XMLNode getcontentlength = prop["getcontentlength"]; XMLNode resourcetype = prop["resourcetype"]; XMLNode getlastmodified = prop["getlastmodified"]; // Fetch known metadata if((bool)resourcetype) { if((bool)resourcetype["collection"]) { file.SetType(FileInfo::file_type_dir); } else { file.SetType(FileInfo::file_type_file); } } uint64_t l = (uint64_t)(-1); if(stringto((std::string)getcontentlength,l)) { file.SetSize(l); } std::string t = (std::string)getlastmodified; if(t.empty()) t = (std::string)creationdate; if(!t.empty()) { Time tm(t); if(tm.GetTime() != Time::UNDEFINED) { file.SetModified(tm); } } found = true; } } if(found) { if((bool)href) url = (std::string)href; } return found; } DataStatus DataPointHTTP::do_stat_webdav(URL& rurl, FileInfo& file) { PayloadRaw request; { NS webdav_ns("d","DAV:"); XMLNode propfind(webdav_ns,"d:propfind"); XMLNode props = propfind.NewChild("d:prop"); props.NewChild("d:creationdate"); props.NewChild("d:displayname"); props.NewChild("d:getcontentlength"); props.NewChild("d:resourcetype"); props.NewChild("d:getlastmodified"); std::string s; propfind.GetDoc(s); request.Insert(s.c_str(),0,s.length()); } std::multimap propattr; propattr.insert(std::pair("Depth","0")); PayloadRawInterface *inbuf = NULL; HTTPClientInfo info; for(int redirects_max = 10;redirects_max>=0;--redirects_max) { std::string path = rurl.FullPathURIEncoded(); info.lastModified = (time_t)(-1); AutoPointer client(acquire_client(rurl)); if (!client) return DataStatus::StatError; MCC_Status r = client->process("PROPFIND", path, propattr, &request, &info, &inbuf); if (!r) { if (inbuf) delete inbuf; inbuf = NULL; // Because there is no reliable way to check if connection // is still alive at this place, we must try again client = acquire_new_client(rurl); if(client) r = client->process("PROPFIND", path, propattr, &request, &info, &inbuf); if(!r) { if (inbuf) delete inbuf; inbuf = NULL; return DataStatus(DataStatus::StatError,r.getExplanation()); } } if ((info.code != 200) && (info.code != 207)) { // 207 for multistatus response if (inbuf) delete inbuf; inbuf = NULL; release_client(rurl,client.Release()); if((info.code == 301) || // permanent redirection (info.code == 302) || // temporary redirection (info.code == 303) || // POST to GET redirection (info.code == 304)) { // redirection to cache // 305 - redirection to proxy - unhandled // Recreate connection now to new URL rurl = info.location; logger.msg(VERBOSE,"Redirecting to %s",info.location.str()); continue; } // Possibly following errors can be returned by server // if it does not implement webdav. // 405 - method not allowed // 501 - not implemented // 500 - internal error (for simplest servers) if((info.code == 405) || (info.code == 501) || (info.code == 500)) { // Indicating possible failure reason using POSIX error code ENOSYS return DataStatus(DataStatus::StatError, ENOSYS, info.reason); } return DataStatus(DataStatus::StatError, http2errno(info.code), info.reason); } if(inbuf) { XMLNode multistatus(ContentFromPayload(*inbuf)); delete inbuf; inbuf = NULL; release_client(rurl,client.Release()); if(multistatus.Name() == "multistatus") { XMLNode response = multistatus["response"]; if((bool)response) { std::string url; if(parse_webdav_response(response,file,url)) { return DataStatus::Success; } } } } else { release_client(rurl,client.Release()); } return DataStatus(DataStatus::StatError,"Can't process WebDAV response"); } return DataStatus(DataStatus::StatError,"Too many redirects"); } DataStatus DataPointHTTP::do_list_webdav(URL& rurl, std::list& files, DataPointInfoType verb) { PayloadRaw request; { NS webdav_ns("d","DAV:"); XMLNode propfind(webdav_ns,"d:propfind"); XMLNode props = propfind.NewChild("d:prop"); // TODO: verb props.NewChild("d:creationdate"); props.NewChild("d:displayname"); props.NewChild("d:getcontentlength"); props.NewChild("d:resourcetype"); props.NewChild("d:getlastmodified"); std::string s; propfind.GetDoc(s); request.Insert(s.c_str(),0,s.length()); } std::multimap propattr; propattr.insert(std::pair("Depth","1")); // for listing PayloadRawInterface *inbuf = NULL; HTTPClientInfo info; for(int redirects_max = 10;redirects_max>=0;--redirects_max) { std::string path = rurl.FullPathURIEncoded(); info.lastModified = (time_t)(-1); AutoPointer client(acquire_client(rurl)); if (!client) return DataStatus::StatError; MCC_Status r = client->process("PROPFIND", path, propattr, &request, &info, &inbuf); if (!r) { if (inbuf) delete inbuf; inbuf = NULL; // Because there is no reliable way to check if connection // is still alive at this place, we must try again client = acquire_new_client(rurl); if(client) r = client->process("PROPFIND", path, propattr, &request, &info, &inbuf); if(!r) { if (inbuf) delete inbuf; inbuf = NULL; return DataStatus(DataStatus::StatError,r.getExplanation()); } } if ((info.code != 200) && (info.code != 207)) { // 207 for multistatus response if (inbuf) delete inbuf; inbuf = NULL; release_client(rurl,client.Release()); if((info.code == 301) || // permanent redirection (info.code == 302) || // temporary redirection (info.code == 303) || // POST to GET redirection (info.code == 304)) { // redirection to cache // 305 - redirection to proxy - unhandled // Recreate connection now to new URL rurl = info.location; logger.msg(VERBOSE,"Redirecting to %s",info.location.str()); continue; } // Possibly following errors can be returned by server // if it does not implement webdav. // 405 - method not allowed // 501 - not implemented // 500 - internal error (for simplest servers) if((info.code == 405) || (info.code == 501) || (info.code == 500)) { // Indicating possible failure reason using POSIX error code ENOSYS return DataStatus(DataStatus::StatError, ENOSYS, info.reason); } return DataStatus(DataStatus::StatError, http2errno(info.code), info.reason); } if(inbuf) { XMLNode multistatus(ContentFromPayload(*inbuf)); delete inbuf; inbuf = NULL; release_client(rurl,client.Release()); if(multistatus.Name() == "multistatus") { XMLNode response = multistatus["response"]; for(;(bool)response;++response) { FileInfo file; std::string url; if(parse_webdav_response(response,file,url)) { // url = uri_unencode(url); ? if(url[0] == '/') { url = rurl.ConnectionURL()+url; } if (url.find("://") != std::string::npos) { URL u(url); std::string b = rurl.str(); if (b[b.size() - 1] != '/') b += '/'; if (u.str().substr(0, b.size()) == b) url = u.str().substr(b.size()); } if(!url.empty()) { // skip requested object file.SetName(url); files.push_back(file); } } } return DataStatus::Success; } } else { release_client(rurl,client.Release()); } return DataStatus(DataStatus::StatError,"Can't process WebDAV response"); } return DataStatus(DataStatus::StatError,"Too many redirects"); } DataStatus DataPointHTTP::Stat(FileInfo& file, DataPointInfoType verb) { // verb is not used URL curl = url; DataStatus r = do_stat_webdav(curl, file); if(!r) { if(r.GetErrno() != ENOSYS) return r; r = do_stat_http(curl, file); if (!r) return r; } std::string name = url.FullPath(); std::string::size_type p = name.rfind('/'); while(p != std::string::npos) { if(p != name.length()-1) { name = name.substr(p+1); break; } name.resize(p); p = name.rfind('/'); } file.SetName(name); if(file.CheckSize()) { size = file.GetSize(); logger.msg(VERBOSE, "Stat: obtained size %llu", size); } if(file.CheckModified()) { modified = file.GetModified(); logger.msg(VERBOSE, "Stat: obtained modification time %s", modified.str()); } return DataStatus::Success; } DataStatus DataPointHTTP::List(std::list& files, DataPointInfoType verb) { if (transfers_started.get() != 0) return DataStatus(DataStatus::ListError, EARCLOGIC, "Currently reading"); URL curl = url; DataStatus r; bool webdav_supported = true; { FileInfo file; r = do_stat_webdav(curl, file); if(!r) { webdav_supported = false; if(r.GetErrno() == ENOSYS) { r = do_stat_http(curl, file); } } if(r) { if(file.CheckSize()) size = file.GetSize(); if(file.CheckModified()) modified = file.GetModified(); if(file.GetType() != FileInfo::file_type_dir) return DataStatus(DataStatus::ListError, ENOTDIR); } } if(webdav_supported) { r = do_list_webdav(curl, files, verb); return r; } // If server has no webdav try to read content and present it as list of links DataBuffer buffer; // Read content of file // TODO: Reuse already redirected URL stored in curl r = StartReading(buffer); if (!r) return DataStatus(DataStatus::ListError, r.GetErrno(), r.GetDesc()); int handle; unsigned int length; unsigned long long int offset; std::string result; unsigned long long int maxsize = (10*1024*1024); // 10MB seems reasonable limit while (buffer.for_write() || !buffer.eof_read()) { if (buffer.for_write(handle, length, offset, true)) { if(offset >= maxsize) { buffer.is_written(handle); break; }; if((offset+length) > maxsize) length = maxsize-offset; if((offset+length) > result.size()) result.resize(offset+length,'\0'); result.replace(offset,length,buffer[handle], length); buffer.is_written(handle); } } r = StopReading(); if (!r) return DataStatus(DataStatus::ListError, r.GetErrno(), r.GetDesc()); // Convert obtained HTML into set of links bool is_html = false; bool is_body = false; std::string::size_type tagstart = 0; std::string::size_type tagend = 0; std::string::size_type titlestart = std::string::npos; std::string::size_type titleend = std::string::npos; do { tagstart = result.find('<', tagend); if (tagstart == std::string::npos) break; tagend = result.find('>', tagstart); if (tagend == std::string::npos) break; std::string tag = result.substr(tagstart + 1, tagend - tagstart - 1); std::string::size_type tag_e = tag.find(' '); if (tag_e != std::string::npos) tag.resize(tag_e); if (strcasecmp(tag.c_str(), "title") == 0) titlestart = tagend + 1; else if (strcasecmp(tag.c_str(), "/title") == 0) titleend = tagstart - 1; else if (strcasecmp(tag.c_str(), "html") == 0) is_html = true; else if (strcasecmp(tag.c_str(), "body") == 0) is_body = is_html; } while (!is_body); std::string title; if (titlestart != std::string::npos && titleend != std::string::npos) { title = result.substr(titlestart, titleend - titlestart + 1); } if (is_body) { // If it was redirected then links must be relative to new location. Or not? html2list(result.c_str(), curl, files); if(verb & (INFO_TYPE_TYPE | INFO_TYPE_TIMES | INFO_TYPE_CONTENT)) { for(std::list::iterator f = files.begin(); f != files.end(); ++f) { URL furl(curl.str()+'/'+(f->GetName())); do_stat_http(furl, *f); } } } return DataStatus::Success; } DataStatus DataPointHTTP::StartReading(DataBuffer& buffer) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; if (transfers_started.get() != 0) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); reading = true; int transfer_streams = 1; strtoint(url.Option("threads"),transfer_streams); if (transfer_streams < 1) transfer_streams = 1; if (transfer_streams > MAX_PARALLEL_STREAMS) transfer_streams = MAX_PARALLEL_STREAMS; DataPointHTTP::buffer = &buffer; if (chunks) delete chunks; chunks = new ChunkControl; transfer_lock.lock(); transfers_tofinish = 0; for (int n = 0; n < transfer_streams; ++n) { HTTPInfo_t *info = new HTTPInfo_t; info->point = this; if (!CreateThreadFunction(&read_thread, info, &transfers_started)) { delete info; } else { ++transfers_tofinish; } } if (transfers_tofinish == 0) { transfer_lock.unlock(); StopReading(); return DataStatus::ReadStartError; } transfer_lock.unlock(); return DataStatus::Success; } DataStatus DataPointHTTP::StopReading() { if (!reading) return DataStatus::ReadStopError; reading = false; if (!buffer) return DataStatus(DataStatus::ReadStopError, EARCLOGIC, "Not reading"); if(!buffer->eof_read()) buffer->error_read(true); while (transfers_started.get()) { transfers_started.wait(10000); // Just in case } if (chunks) delete chunks; chunks = NULL; transfers_tofinish = 0; if (buffer->error_read()) { buffer = NULL; return DataStatus::ReadError; } buffer = NULL; return DataStatus::Success; } DataStatus DataPointHTTP::StartWriting(DataBuffer& buffer, DataCallback*) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; if (transfers_started.get() != 0) return DataStatus(DataStatus::IsWritingError, EARCLOGIC); writing = true; int transfer_streams = 1; strtoint(url.Option("threads"),transfer_streams); if (transfer_streams < 1) transfer_streams = 1; if (transfer_streams > MAX_PARALLEL_STREAMS) transfer_streams = MAX_PARALLEL_STREAMS; DataPointHTTP::buffer = &buffer; if (chunks) delete chunks; chunks = new ChunkControl; transfer_lock.lock(); transfers_tofinish = 0; for (int n = 0; n < transfer_streams; ++n) { HTTPInfo_t *info = new HTTPInfo_t; info->point = this; if (!CreateThreadFunction(&write_thread, info, &transfers_started)) { delete info; } else { ++transfers_tofinish; } } if (transfers_tofinish == 0) { transfer_lock.unlock(); StopWriting(); return DataStatus::WriteStartError; } transfer_lock.unlock(); return DataStatus::Success; } DataStatus DataPointHTTP::StopWriting() { if (!writing) return DataStatus::WriteStopError; writing = false; if (!buffer) return DataStatus(DataStatus::WriteStopError, EARCLOGIC, "Not writing"); if(!buffer->eof_write()) buffer->error_write(true); while (transfers_started.get()) { transfers_started.wait(); // Just in case } if (chunks) { delete chunks; } chunks = NULL; transfers_tofinish = 0; if (buffer->error_write()) { buffer = NULL; return DataStatus::WriteError; } buffer = NULL; return DataStatus::Success; } DataStatus DataPointHTTP::Check(bool check_meta) { PayloadRaw request; PayloadRawInterface *inbuf = NULL; HTTPClientInfo info; AutoPointer client(acquire_client(url)); if (!client) return DataStatus::CheckError; MCC_Status r = client->process("GET", url.FullPathURIEncoded(), 0, 15, &request, &info, &inbuf); PayloadRawInterface::Size_t logsize = 0; if (inbuf){ logsize = inbuf->Size(); delete inbuf; inbuf = NULL; } if (!r) { client = acquire_new_client(url); if(client) r = client->process("GET", url.FullPathURIEncoded(), 0, 15, &request, &info, &inbuf); if (inbuf){ logsize = inbuf->Size(); delete inbuf; inbuf = NULL; } if(!r) return DataStatus(DataStatus::CheckError,r.getExplanation()); } release_client(url,client.Release()); if ((info.code != 200) && (info.code != 206)) return DataStatus(DataStatus::CheckError, http2errno(info.code), info.reason); size = logsize; logger.msg(VERBOSE, "Check: obtained size %llu", size); modified = info.lastModified; logger.msg(VERBOSE, "Check: obtained modification time %s", modified.str()); return DataStatus::Success; } DataStatus DataPointHTTP::Remove() { AutoPointer client(acquire_client(url)); PayloadRaw request; PayloadRawInterface *inbuf = NULL; HTTPClientInfo info; MCC_Status r = client->process("DELETE", url.FullPathURIEncoded(), &request, &info, &inbuf); if (inbuf) delete inbuf; inbuf = NULL; if(!r) { client = acquire_new_client(url); if(client) r = client->process("DELETE", url.FullPathURIEncoded(), &request, &info, &inbuf); if (inbuf) delete inbuf; inbuf = NULL; if(!r) return DataStatus(DataStatus::DeleteError,r.getExplanation()); } release_client(url,client.Release()); if ((info.code != 200) && (info.code != 202) && (info.code != 204)) { return DataStatus(DataStatus::DeleteError, http2errno(info.code), info.reason); } return DataStatus::Success; } DataStatus DataPointHTTP::Rename(const URL& destination) { AutoPointer client(acquire_client(url)); PayloadRaw request; PayloadRawInterface *inbuf = NULL; HTTPClientInfo info; std::multimap attributes; attributes.insert(std::pair("Destination", url.ConnectionURL() + destination.FullPathURIEncoded())); MCC_Status r = client->process("MOVE", url.FullPathURIEncoded(), attributes, &request, &info, &inbuf); if (inbuf) delete inbuf; inbuf = NULL; if(!r) { client = acquire_new_client(url); if(client) r = client->process("MOVE", url.FullPathURIEncoded(), attributes, &request, &info, &inbuf); if (inbuf) delete inbuf; inbuf = NULL; if(!r) return DataStatus(DataStatus::RenameError,r.getExplanation()); } release_client(url,client.Release()); if ((info.code != 201) && (info.code != 204)) { return DataStatus(DataStatus::RenameError, http2errno(info.code), info.reason); } return DataStatus::Success; } bool DataPointHTTP::read_single(void *arg) { HTTPInfo_t& info = *((HTTPInfo_t*)arg); DataPointHTTP& point = *(info.point); URL client_url = point.url; AutoPointer client(point.acquire_client(client_url)); bool transfer_failure = false; int retries = 0; std::string path = point.CurrentLocation().FullPathURIEncoded(); DataStatus failure_code; if (!client) return false; HTTPClientInfo transfer_info; PayloadRaw request; PayloadStreamInterface *instream = NULL; for(;;) { // for retries MCC_Status r = client->process(ClientHTTPAttributes("GET", path), &request, &transfer_info, &instream); if (!r) { if (instream) delete instream; // Failed to transfer - retry. // 10 times in a row seems to be reasonable number // To make it sure all retriable errors are eliminated // connection is also re-established client = NULL; // TODO: mark failure? if ((++retries) > 10) { transfer_failure = true; failure_code = DataStatus(DataStatus::ReadError, r.getExplanation()); break; } // Recreate connection client = point.acquire_new_client(client_url); if(client) continue; transfer_failure = true; break; } if((transfer_info.code == 301) || // permanent redirection (transfer_info.code == 302) || // temporary redirection (transfer_info.code == 303) || // POST to GET redirection (transfer_info.code == 304)) { // redirection to cache // 305 - redirection to proxy - unhandled if (instream) delete instream; // Recreate connection now to new URL point.release_client(client_url,client.Release()); // return client to poll client_url = transfer_info.location; logger.msg(VERBOSE,"Redirecting to %s",transfer_info.location.str()); client = point.acquire_client(client_url); if (client) { path = client_url.FullPathURIEncoded(); continue; } transfer_failure = true; break; } if ((transfer_info.code != 200) && (transfer_info.code != 206)) { // HTTP error - retry? if (instream) delete instream; if ((transfer_info.code == 500) || (transfer_info.code == 503) || (transfer_info.code == 504)) { if ((++retries) <= 10) continue; } logger.msg(VERBOSE,"HTTP failure %u - %s",transfer_info.code,transfer_info.reason); std::string reason = Arc::tostring(transfer_info.code) + " - " + transfer_info.reason; failure_code = DataStatus(DataStatus::ReadError, point.http2errno(transfer_info.code), reason); transfer_failure = true; break; } if(!instream) { transfer_failure = true; break; } // pick up useful information from HTTP header point.modified = transfer_info.lastModified; retries = 0; // Pull from stream and store in buffer unsigned int transfer_size = 0; int transfer_handle = -1; uint64_t transfer_pos = 0; for(;;) { if (transfer_handle == -1) { if (!point.buffer->for_read(transfer_handle, transfer_size, true)) { // No transfer buffer - must be failure or close initiated // externally break; } } int l = transfer_size; uint64_t pos = instream->Pos(); if(!instream->Get((*point.buffer)[transfer_handle],l)) { // Trying to find out if stream ended due to error if(transfer_pos < instream->Size()) transfer_failure = true; break; } point.buffer->is_read(transfer_handle, l, pos); transfer_handle = -1; transfer_pos = pos + l; } if (transfer_handle != -1) point.buffer->is_read(transfer_handle, 0, 0); if (instream) delete instream; // End of transfer - either success or not retrying transfer of whole body break; } point.release_client(client_url,client.Release()); return !transfer_failure; } void DataPointHTTP::read_thread(void *arg) { HTTPInfo_t& info = *((HTTPInfo_t*)arg); DataPointHTTP& point = *(info.point); point.transfer_lock.lock(); point.transfer_lock.unlock(); URL client_url = point.url; AutoPointer client(point.acquire_client(client_url)); bool transfer_failure = false; int retries = 0; std::string path = point.CurrentLocation().FullPathURIEncoded(); DataStatus failure_code; bool partial_read_allowed = (client_url.Option("httpgetpartial","yes") == "yes"); if(partial_read_allowed) for (;;) { if(client && client->GetClosed()) client = point.acquire_client(client_url); if (!client) { transfer_failure = true; break; } unsigned int transfer_size = 0; int transfer_handle = -1; // get first buffer if (!point.buffer->for_read(transfer_handle, transfer_size, true)) { // No transfer buffer - must be failure or close initiated externally break; } uint64_t transfer_offset = 0; uint64_t chunk_length = 1024*1024; if(transfer_size > chunk_length) chunk_length = transfer_size; if (!(point.chunks->Get(transfer_offset, chunk_length))) { // No more chunks to transfer - quit this thread. point.buffer->is_read(transfer_handle, 0, 0); break; } uint64_t transfer_end = transfer_offset + chunk_length - 1; // Read chunk HTTPClientInfo transfer_info; PayloadRaw request; PayloadRawInterface *inbuf = NULL; MCC_Status r = client->process("GET", path, transfer_offset, transfer_end, &request, &transfer_info, &inbuf); if (!r) { // Return buffer point.buffer->is_read(transfer_handle, 0, 0); point.chunks->Unclaim(transfer_offset, chunk_length); if (inbuf) delete inbuf; client = NULL; // Failed to transfer chunk - retry. // 10 times in a row seems to be reasonable number // TODO: mark failure? if ((++retries) > 10) { transfer_failure = true; failure_code = DataStatus(DataStatus::ReadError, r.getExplanation()); break; } // Recreate connection client = point.acquire_new_client(client_url); if(client) continue; transfer_failure = true; break; } if (transfer_info.code == 416) { // EOF point.buffer->is_read(transfer_handle, 0, 0); point.chunks->Unclaim(transfer_offset, chunk_length); if (inbuf) delete inbuf; break; } if((transfer_info.code == 301) || // permanent redirection (transfer_info.code == 302) || // temporary redirection (transfer_info.code == 303) || // POST to GET redirection (transfer_info.code == 304)) { // redirection to cache // 305 - redirection to proxy - unhandled // Return buffer point.buffer->is_read(transfer_handle, 0, 0); point.chunks->Unclaim(transfer_offset, chunk_length); if (inbuf) delete inbuf; // Recreate connection now to new URL point.release_client(client_url,client.Release()); client_url = transfer_info.location; logger.msg(VERBOSE,"Redirecting to %s",transfer_info.location.str()); client = point.acquire_client(client_url); if (client) { path = client_url.FullPathURIEncoded(); continue; } transfer_failure = true; break; } if ((transfer_info.code != 200) && (transfer_info.code != 206)) { // HTTP error - retry? point.buffer->is_read(transfer_handle, 0, 0); point.chunks->Unclaim(transfer_offset, chunk_length); if (inbuf) delete inbuf; if ((transfer_info.code == 500) || (transfer_info.code == 503) || (transfer_info.code == 504)) { // Retriable error codes if ((++retries) <= 10) continue; } logger.msg(VERBOSE,"HTTP failure %u - %s",transfer_info.code,transfer_info.reason); std::string reason = Arc::tostring(transfer_info.code) + " - " + transfer_info.reason; failure_code = DataStatus(DataStatus::ReadError, point.http2errno(transfer_info.code), reason); transfer_failure = true; break; } PayloadStreamInterface* instream = NULL; try { instream = dynamic_cast(dynamic_cast(inbuf)); } catch(std::exception& e) { transfer_failure = true; break; } if(!instream) { transfer_failure = true; break; } // pick up useful information from HTTP header point.modified = transfer_info.lastModified; retries = 0; // Exclude chunks after EOF. Normally that is not needed. // But Apache if asked about out of file range gets confused // and sends *whole* file instead of 416. if(inbuf && (inbuf->Size() > 0)) point.chunks->Claim(inbuf->Size()); bool whole = (inbuf && (((transfer_info.size == inbuf->Size() && (inbuf->BufferPos(0) == 0))) || inbuf->Size() == -1)); point.transfer_lock.lock(); point.chunks->Unclaim(transfer_offset, chunk_length); uint64_t transfer_pos = 0; for(;;) { if (transfer_handle == -1) { point.transfer_lock.unlock(); if (!point.buffer->for_read(transfer_handle, transfer_size, true)) { // No transfer buffer - must be failure or close initiated // externally point.transfer_lock.lock(); break; } point.transfer_lock.lock(); } int l = transfer_size; uint64_t pos = instream->Pos(); if(!instream->Get((*point.buffer)[transfer_handle],l)) { break; } point.buffer->is_read(transfer_handle, l, pos); point.chunks->Claim(pos, l); transfer_handle = -1; transfer_pos = pos + l; } if (transfer_handle != -1) point.buffer->is_read(transfer_handle, 0, 0); if (inbuf) delete inbuf; // If server returned chunk which is not overlaping requested one - seems // like server has nothing to say any more. if (transfer_pos <= transfer_offset) whole = true; point.transfer_lock.unlock(); if (whole) break; } point.transfer_lock.lock(); --(point.transfers_tofinish); if (transfer_failure) { point.failure_code = failure_code; point.buffer->error_read(true); } if (point.transfers_tofinish == 0) { // TODO: process/report failure? if(!partial_read_allowed) { // Reading in single chunk to be done in single thread if(!read_single(arg)) { transfer_failure = true; point.buffer->error_read(true); } } point.buffer->eof_read(true); } point.release_client(client_url,client.Release()); delete &info; point.transfer_lock.unlock(); } bool DataPointHTTP::write_single(void *arg) { HTTPInfo_t& info = *((HTTPInfo_t*)arg); DataPointHTTP& point = *(info.point); URL client_url = point.url; AutoPointer client(point.acquire_client(client_url)); if (!client) return false; HTTPClientInfo transfer_info; PayloadRawInterface *response = NULL; std::string path = client_url.FullPathURIEncoded(); // TODO: Do ping to *client in order to check if connection is alive. // TODO: But ping itself can destroy connection on 1.0-like servers. // TODO: Hence retry is needed like in other cases. // To allow for redirection from the server without uploading the whole // body we send request header with Expect: 100-continue. Servers // should return either 100 continue or 30x redirection without // waiting for body. bool expect100 = true; for (;;) { std::multimap attrs; if(expect100) { attrs.insert(std::pair("EXPECT", "100-continue")); // Note: there will be no 100 in response because it will be processed // at lower level. } StreamBuffer request(*point.buffer); if (point.CheckSize()) request.Size(point.GetSize()); MCC_Status r = client->process(ClientHTTPAttributes("PUT", path, attrs), &request, &transfer_info, &response); if (response) delete response; response = NULL; if (!r) { // It is not clear how to retry if early chunks are not available anymore. // Let it retry at higher level. point.failure_code = DataStatus(DataStatus::WriteError, r.getExplanation()); client = NULL; return false; } if (transfer_info.code == 301 || // Moved permanently transfer_info.code == 302 || // Found (temp redirection) transfer_info.code == 307) { // Temporary redirection // Got redirection response // Recreate connection now to new URL point.release_client(client_url,client.Release()); client_url = transfer_info.location; logger.msg(VERBOSE,"Redirecting to %s",transfer_info.location.str()); client = point.acquire_client(client_url); if (client) { // TODO: Only one redirection is currently supported. Here we should // try again with 100-continue but there were problems with dCache // where on upload of the real body the server returns 201 Created // immediately and leaves an empty file. We cannot use a new // connection after 100 continue because redirected URLs that dCache // sends are one time use only. // TODO: make it configurable. Maybe through redirection depth. expect100 = false; path = client_url.FullPathURIEncoded(); continue; } // Failed to acquire client for new connection - fail. point.buffer->error_write(true); point.failure_code = DataStatus(DataStatus::WriteError, "Failed to connect to redirected URL "+client_url.fullstr()); return false; } if (transfer_info.code == 417) { // Expectation not supported // Retry without expect: 100 - probably old server expect100 = false; continue; } // RFC2616 says "Many older HTTP/1.0 and HTTP/1.1 applications do not // understand the Expect header". But this is not currently treated very well. if ((transfer_info.code != 201) && (transfer_info.code != 200) && (transfer_info.code != 204)) { // HTTP error point.release_client(client_url,client.Release()); point.failure_code = DataStatus(DataStatus::WriteError, point.http2errno(transfer_info.code), transfer_info.reason); return false; } // Looks like request passed well break; } point.release_client(client_url,client.Release()); return true; } void DataPointHTTP::write_thread(void *arg) { HTTPInfo_t& info = *((HTTPInfo_t*)arg); DataPointHTTP& point = *(info.point); point.transfer_lock.lock(); point.transfer_lock.unlock(); URL client_url = point.url; AutoPointer client(point.acquire_client(client_url)); bool transfer_failure = false; int retries = 0; std::string path = client_url.FullPathURIEncoded(); bool partial_write_failure = (client_url.Option("httpputpartial") != "yes"); DataStatus failure_code; // Fall through if partial PUT is not allowed if(!partial_write_failure) for (;;) { if(client && client->GetClosed()) client = point.acquire_client(client_url); if (!client) { transfer_failure = true; break; } unsigned int transfer_size = 0; int transfer_handle = -1; unsigned long long int transfer_offset = 0; // get first buffer if (!point.buffer->for_write(transfer_handle, transfer_size, transfer_offset, true)) // No transfer buffer - must be failure or close initiated externally break; //uint64_t transfer_offset = 0; //uint64_t transfer_end = transfer_offset+transfer_size; // Write chunk HTTPClientInfo transfer_info; PayloadMemConst request((*point.buffer)[transfer_handle], transfer_offset, transfer_size, point.CheckSize() ? point.GetSize() : 0); PayloadRawInterface *response = NULL; MCC_Status r = client->process("PUT", path, &request, &transfer_info, &response); if (response) delete response; if (!r) { client = NULL; // Failed to transfer chunk - retry. // 10 times in a row seems to be reasonable number // TODO: mark failure? if ((++retries) > 10) { transfer_failure = true; failure_code = DataStatus(DataStatus::WriteError, r.getExplanation()); break; } // Return buffer point.buffer->is_notwritten(transfer_handle); // Recreate connection client = point.acquire_new_client(client_url); continue; } if ((transfer_info.code != 201) && (transfer_info.code != 200) && (transfer_info.code != 204)) { // HTTP error - retry? point.buffer->is_notwritten(transfer_handle); if ((transfer_info.code == 500) || (transfer_info.code == 503) || (transfer_info.code == 504)) { if ((++retries) <= 10) continue; } if (transfer_info.code == 501) { // Not implemented - probably means server does not accept patial PUT partial_write_failure = true; } else { transfer_failure = true; failure_code = DataStatus(DataStatus::WriteError, point.http2errno(transfer_info.code), transfer_info.reason); } break; } retries = 0; point.buffer->is_written(transfer_handle); } point.transfer_lock.lock(); --(point.transfers_tofinish); if (transfer_failure) { point.failure_code = failure_code; point.buffer->error_write(true); } if (point.transfers_tofinish == 0) { if(partial_write_failure) { // Writing in single chunk to be done in single thread if(!write_single(arg)) { transfer_failure = true; point.buffer->error_write(true); } } // TODO: process/report failure? point.buffer->eof_write(true); if ((!partial_write_failure) && (!(point.buffer->error())) && (point.buffer->eof_position() == 0)) { // Zero size data was transferred - must send at least one empty packet for (;;) { if (!client) client = point.acquire_client(client_url); if (!client) { point.buffer->error_write(true); break; } HTTPClientInfo transfer_info; PayloadMemConst request(NULL, 0, 0, 0); PayloadRawInterface *response = NULL; MCC_Status r = client->process("PUT", path, &request, &transfer_info, &response); if (response) delete response; if (!r) { client = NULL; if ((++retries) > 10) { point.failure_code = DataStatus(DataStatus::WriteError, r.getExplanation()); point.buffer->error_write(true); break; } // Recreate connection client = point.acquire_new_client(client_url);; continue; } if ((transfer_info.code != 201) && (transfer_info.code != 200) && (transfer_info.code != 204)) { // HTTP error - retry? if ((transfer_info.code == 500) || (transfer_info.code == 503) || (transfer_info.code == 504)) if ((++retries) <= 10) continue; point.buffer->error_write(true); point.failure_code = DataStatus(DataStatus::WriteError, point.http2errno(transfer_info.code), transfer_info.reason); break; } break; } } } point.release_client(client_url,client.Release()); delete &info; point.transfer_lock.unlock(); } bool DataPointHTTP::SetURL(const URL& url) { if(url.Protocol() != this->url.Protocol()) return false; if(url.Host() != this->url.Host()) return false; if(url.Port() != this->url.Port()) return false; this->url = url; if(triesleft < 1) triesleft = 1; ResetMeta(); return true; } ClientHTTP* DataPointHTTP::acquire_client(const URL& curl) { // TODO: lock if(!curl) return NULL; if((curl.Protocol() != "http") && (curl.Protocol() != "https") && (curl.Protocol() != "httpg") && (curl.Protocol() != "dav") && (curl.Protocol() != "davs")) return NULL; ClientHTTP* client = NULL; std::string key = curl.ConnectionURL(); clients_lock.lock(); std::multimap::iterator cl = clients.find(key); if(cl != clients.end()) { client = cl->second; clients.erase(cl); clients_lock.unlock(); } else { clients_lock.unlock(); MCCConfig cfg; usercfg.ApplyToConfig(cfg); client = new ClientHTTP(cfg, curl, usercfg.Timeout()); }; return client; } ClientHTTP* DataPointHTTP::acquire_new_client(const URL& curl) { if(!curl) return NULL; if((curl.Protocol() != "http") && (curl.Protocol() != "https") && (curl.Protocol() != "httpg") && (curl.Protocol() != "dav") && (curl.Protocol() != "davs")) return NULL; MCCConfig cfg; usercfg.ApplyToConfig(cfg); return new ClientHTTP(cfg, curl, usercfg.Timeout()); } void DataPointHTTP::release_client(const URL& curl, ClientHTTP* client) { if(!client) return; if(client->GetClosed()) { delete client; return; } std::string key = curl.ConnectionURL(); //if(!*client) return; clients_lock.lock(); clients.insert(std::pair(key,client)); clients_lock.unlock(); } int DataPointHTTP::http2errno(int http_code) const { // Codes taken from RFC 2616 section 10. Only 4xx and 5xx are treated as errors switch(http_code) { case 400: case 405: case 411: case 413: case 414: case 415: case 416: case 417: return EINVAL; case 401: case 403: case 407: return EACCES; case 404: case 410: return ENOENT; case 406: case 412: return EARCRESINVAL; case 408: return ETIMEDOUT; case 409: // Conflict. Not sure about this one. case 500: case 502: case 503: case 504: return EARCSVCTMP; case 501: case 505: return EOPNOTSUPP; default: return EARCOTHER; } } } // namespace Arc extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "http", "HED:DMC", "HTTP, HTTP over SSL (https) or DAV(s)", 0, &ArcDMCHTTP::DataPointHTTP::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/http/PaxHeaders.7502/StreamBuffer.h0000644000000000000000000000012112012153377023002 xustar000000000000000027 mtime=1344853759.361249 26 atime=1513200575.21371 28 ctime=1513200660.7697569 nordugrid-arc-5.4.2/src/hed/dmc/http/StreamBuffer.h0000644000175000002070000000173612012153377023061 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARCDMCHTTP_STREAMBUFFER_H__ #define __ARCDMCHTTP_STREAMBUFFER_H__ #include #include namespace ArcDMCHTTP { using namespace Arc; class StreamBuffer: public PayloadStreamInterface { public: StreamBuffer(DataBuffer& buffer); virtual ~StreamBuffer(void); virtual bool Get(char* buf,int& size); virtual bool Put(const char* buf,Size_t size); virtual operator bool(void); virtual bool operator!(void); virtual int Timeout(void) const; virtual void Timeout(int to); virtual Size_t Pos(void) const; virtual Size_t Size(void) const; virtual Size_t Limit(void) const; void Size(Size_t size); private: DataBuffer& buffer_; int buffer_handle_; unsigned int buffer_length_; unsigned long long int buffer_offset_; unsigned long long int current_offset_; unsigned long long int current_size_; }; } // namespace ArcDMCHTTP #endif // __ARCDMCHTTP_STREAMBUFFER_H__ nordugrid-arc-5.4.2/src/hed/dmc/http/PaxHeaders.7502/README0000644000000000000000000000012311001653037021121 xustar000000000000000027 mtime=1208440351.928622 26 atime=1513200575.21371 30 ctime=1513200660.762756815 nordugrid-arc-5.4.2/src/hed/dmc/http/README0000644000175000002070000000006411001653037021167 0ustar00mockbuildmock00000000000000DMC handles file transfer using http(s):// protocol nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/rucio0000644000000000000000000000013213214316024020326 xustar000000000000000030 mtime=1513200660.814757451 30 atime=1513200668.722854169 30 ctime=1513200660.814757451 nordugrid-arc-5.4.2/src/hed/dmc/rucio/0000755000175000002070000000000013214316024020451 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/rucio/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712311666211022446 xustar000000000000000027 mtime=1395092617.192596 30 atime=1513200595.580959616 30 ctime=1513200660.811757414 nordugrid-arc-5.4.2/src/hed/dmc/rucio/Makefile.am0000644000175000002070000000136312311666211022513 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcrucio.la libdmcrucio_la_SOURCES = DataPointRucio.cpp DataPointRucio.h libdmcrucio_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) $(OPENSSL_CFLAGS) libdmcrucio_la_LIBADD = \ $(top_builddir)/src/external/cJSON/libcjson.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) libdmcrucio_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/rucio/PaxHeaders.7502/DataPointRucio.h0000644000000000000000000000012312311666211023444 xustar000000000000000027 mtime=1395092617.192596 26 atime=1513200575.22071 30 ctime=1513200660.814757451 nordugrid-arc-5.4.2/src/hed/dmc/rucio/DataPointRucio.h0000644000175000002070000000712312311666211023515 0ustar00mockbuildmock00000000000000#ifndef __ARC_DATAPOINTRUCIO_H__ #define __ARC_DATAPOINTRUCIO_H__ #include #include #include namespace ArcDMCRucio { /// Store of auth tokens for different accounts. Not thread-safe so locking /// should be applied by the user of this class. class RucioTokenStore { private: /// Token associated to account and with expiry time class RucioToken { public: Arc::Time expirytime; std::string token; }; /// Map of account to RucioToken std::map tokens; static Arc::Logger logger; public: /// Add a token to the store. An existing token with same account will be replaced. void AddToken(const std::string& account, const Arc::Time& expirytime, const std::string& token); /// Get a token from the store. Returns empty string if token is not in the /// store or is expired. std::string GetToken(const std::string& account); }; /** * Rucio is the ATLAS Data Management System. A file in Rucio is represented * by a URL like rucio://rucio.cern.ch/replicas/scope/lfn. Calling GET/POST on * this URL with content-type metalink gives a list of physical locations * along with some metadata. Only reading from Rucio is currently supported. * * Before resolving a URL an auth token is obtained from the Rucio auth * service (currently hard-coded). These tokens are valid for one hour * and are cached to allow the same credentials to use a token many times. */ class DataPointRucio : public Arc::DataPointIndex { public: DataPointRucio(const Arc::URL& url, const Arc::UserConfig& usercfg, Arc::PluginArgument* parg); ~DataPointRucio(); static Plugin* Instance(Arc::PluginArgument *arg); virtual Arc::DataStatus Resolve(bool source); virtual Arc::DataStatus Resolve(bool source, const std::list& urls); virtual Arc::DataStatus Check(bool check_meta); virtual Arc::DataStatus PreRegister(bool replication, bool force = false); virtual Arc::DataStatus PostRegister(bool replication); virtual Arc::DataStatus PreUnregister(bool replication); virtual Arc::DataStatus Unregister(bool all); virtual Arc::DataStatus Stat(Arc::FileInfo& file, Arc::DataPoint::DataPointInfoType verb = INFO_TYPE_ALL); virtual Arc::DataStatus Stat(std::list& files, const std::list& urls, Arc::DataPoint::DataPointInfoType verb = INFO_TYPE_ALL); virtual Arc::DataStatus List(std::list& files, Arc::DataPoint::DataPointInfoType verb = INFO_TYPE_ALL); virtual Arc::DataStatus CreateDirectory(bool with_parents=false); virtual Arc::DataStatus Rename(const Arc::URL& newurl); protected: static Arc::Logger logger; private: /// Rucio account to use for communication with rucio std::string account; /// In-memory cache of auth tokens static RucioTokenStore tokens; /// Lock to protect access to tokens static Glib::Mutex lock; /// Rucio auth url Arc::URL auth_url; /// Length of time for which a token is valid const static Arc::Period token_validity; /// Check if a valid auth token exists in the cache and if not get a new one Arc::DataStatus checkToken(std::string& token); /// Call Rucio to obtain json of replica info Arc::DataStatus queryRucio(std::string& content, const std::string& token) const; /// Parse replica json Arc::DataStatus parseLocations(const std::string& content); }; } // namespace ArcDMCRucio #endif /* __ARC_DATAPOINTRUCIO_H__ */ nordugrid-arc-5.4.2/src/hed/dmc/rucio/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022455 xustar000000000000000030 mtime=1513200595.624960154 30 atime=1513200649.143614708 30 ctime=1513200660.812757426 nordugrid-arc-5.4.2/src/hed/dmc/rucio/Makefile.in0000644000175000002070000006265413214315723022540 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/rucio DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcrucio_la_DEPENDENCIES = \ $(top_builddir)/src/external/cJSON/libcjson.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libdmcrucio_la_OBJECTS = libdmcrucio_la-DataPointRucio.lo libdmcrucio_la_OBJECTS = $(am_libdmcrucio_la_OBJECTS) libdmcrucio_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdmcrucio_la_CXXFLAGS) $(CXXFLAGS) \ $(libdmcrucio_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcrucio_la_SOURCES) DIST_SOURCES = $(libdmcrucio_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcrucio.la libdmcrucio_la_SOURCES = DataPointRucio.cpp DataPointRucio.h libdmcrucio_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) $(OPENSSL_CFLAGS) libdmcrucio_la_LIBADD = \ $(top_builddir)/src/external/cJSON/libcjson.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) libdmcrucio_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/rucio/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/rucio/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcrucio.la: $(libdmcrucio_la_OBJECTS) $(libdmcrucio_la_DEPENDENCIES) $(libdmcrucio_la_LINK) -rpath $(pkglibdir) $(libdmcrucio_la_OBJECTS) $(libdmcrucio_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcrucio_la-DataPointRucio.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcrucio_la-DataPointRucio.lo: DataPointRucio.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcrucio_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcrucio_la-DataPointRucio.lo -MD -MP -MF $(DEPDIR)/libdmcrucio_la-DataPointRucio.Tpo -c -o libdmcrucio_la-DataPointRucio.lo `test -f 'DataPointRucio.cpp' || echo '$(srcdir)/'`DataPointRucio.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcrucio_la-DataPointRucio.Tpo $(DEPDIR)/libdmcrucio_la-DataPointRucio.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointRucio.cpp' object='libdmcrucio_la-DataPointRucio.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcrucio_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcrucio_la-DataPointRucio.lo `test -f 'DataPointRucio.cpp' || echo '$(srcdir)/'`DataPointRucio.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/rucio/PaxHeaders.7502/DataPointRucio.cpp0000644000000000000000000000012312771225157024011 xustar000000000000000027 mtime=1474636399.890904 26 atime=1513200575.22071 30 ctime=1513200660.813757438 nordugrid-arc-5.4.2/src/hed/dmc/rucio/DataPointRucio.cpp0000644000175000002070000004242012771225157024061 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include "DataPointRucio.h" namespace ArcDMCRucio { using namespace Arc; Arc::Logger DataPointRucio::logger(Arc::Logger::getRootLogger(), "DataPoint.Rucio"); RucioTokenStore DataPointRucio::tokens; Glib::Mutex DataPointRucio::lock; const Period DataPointRucio::token_validity(3600); // token lifetime is 1h Arc::Logger RucioTokenStore::logger(Arc::Logger::getRootLogger(), "DataPoint.RucioTokenStore"); void RucioTokenStore::AddToken(const std::string& account, const Time& expirytime, const std::string& token) { // Replace any existing token if (tokens.find(account) != tokens.end()) { logger.msg(VERBOSE, "Replacing existing token for %s in Rucio token cache", account); } // Create new token RucioToken t; t.expirytime = expirytime; t.token = token; tokens[account] = t; } std::string RucioTokenStore::GetToken(const std::string& account) { // Search for account in list std::string token; if (tokens.find(account) != tokens.end()) { logger.msg(VERBOSE, "Found existing token for %s in Rucio token cache with expiry time %s", account, tokens[account].expirytime.str()); // If 5 mins left until expiry time, get new token if (tokens[account].expirytime <= Time()+300) { logger.msg(VERBOSE, "Rucio token for %s has expired or is about to expire", account); } else { token = tokens[account].token; } } return token; } // Copied from DataPointHTTP. Should be put in common place static int http2errno(int http_code) { // Codes taken from RFC 2616 section 10. Only 4xx and 5xx are treated as errors switch(http_code) { case 400: case 405: case 411: case 413: case 414: case 415: case 416: case 417: return EINVAL; case 401: case 403: case 407: return EACCES; case 404: case 410: return ENOENT; case 406: case 412: return EARCRESINVAL; case 408: return ETIMEDOUT; case 409: // Conflict. Not sure about this one. case 500: case 502: case 503: case 504: return EARCSVCTMP; case 501: case 505: return EOPNOTSUPP; default: return EARCOTHER; } } DataPointRucio::DataPointRucio(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointIndex(url, usercfg, parg) { // Get RUCIO_ACCOUNT, try in order: // - rucioaccount URL option // - RUCIO_ACCOUNT environment variable // - nickname extracted from VOMS proxy account = url.Option("rucioaccount"); if (account.empty()) { account = Arc::GetEnv("RUCIO_ACCOUNT"); } if (account.empty()) { // Extract nickname from voms credential Credential cred(usercfg); account = getCredentialProperty(cred, "voms:nickname"); logger.msg(VERBOSE, "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT", account); } if (account.empty()) { logger.msg(WARNING, "Failed to extract VOMS nickname from proxy"); } logger.msg(VERBOSE, "Using Rucio account %s", account); // Take auth url from env var if available, otherwise use hard-coded one // TODO: specify through url option instead? std::string rucio_auth_url(Arc::GetEnv("RUCIO_AUTH_URL")); if (rucio_auth_url.empty()) { rucio_auth_url = "https://voatlasrucio-auth-prod.cern.ch/auth/x509_proxy"; } auth_url = URL(rucio_auth_url); } DataPointRucio::~DataPointRucio() {} Plugin* DataPointRucio::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL&)(*dmcarg)).Protocol() != "rucio") return NULL; return new DataPointRucio(*dmcarg, *dmcarg, arg); } DataStatus DataPointRucio::Check(bool check_meta) { // Simply check that the file can be resolved DataStatus r = Resolve(true); if (r) return r; return DataStatus(DataStatus::CheckError, r.GetErrno(), r.GetDesc()); } DataStatus DataPointRucio::Resolve(bool source) { // Check token and get new one if necessary std::string token; DataStatus r = checkToken(token); if (!r) return r; bool osresolve = (url.Path().find("/objectstores/") != std::string::npos); // Check if Rucio path is ok: read/write to objectstores and read only from replicas if (!osresolve && !(source && url.Path().find("/replicas/") != std::string::npos)) { logger.msg(ERROR, "Bad path for %s: Rucio supports read/write at /objectstores and read-only at /replicas", url.str()); return DataStatus(source ? DataStatus::ReadResolveError : DataStatus::WriteResolveError, EINVAL, "Bad path for Rucio"); } // Call Rucio to get a signed URL for the location std::string content; r = queryRucio(content, token); if (!r) return r; if (!osresolve) { return parseLocations(content); } // content should be a signed URL URL osurl(content, true); if (!osurl) { logger.msg(ERROR, "Can't handle URL %s", osurl.str()); return DataStatus(source ? DataStatus::ReadResolveError : DataStatus::WriteResolveError, EINVAL, "Bad signed URL returned from Rucio"); } // Add URL options to replicas for (std::map::const_iterator opt = url.CommonLocOptions().begin(); opt != url.CommonLocOptions().end(); opt++) osurl.AddOption(opt->first, opt->second, false); for (std::map::const_iterator opt = url.Options().begin(); opt != url.Options().end(); opt++) osurl.AddOption(opt->first, opt->second, false); // OS doesn't accept absolute URIs osurl.AddOption("relativeuri=yes"); AddLocation(osurl, osurl.Host()); return DataStatus::Success; } DataStatus DataPointRucio::Resolve(bool source, const std::list& urls) { if (!source) return DataStatus(DataStatus::WriteResolveError, ENOTSUP, "Writing to Rucio is not supported"); if (urls.empty()) return DataStatus(DataStatus::ReadResolveError, ENOTSUP, "Bulk resolving is not supported"); // No bulk yet so query in series for (std::list::const_iterator i = urls.begin(); i != urls.end(); ++i) { DataStatus r = (*i)->Resolve(source); if (!r) return r; } return DataStatus::Success; } DataStatus DataPointRucio::Stat(FileInfo& file, DataPoint::DataPointInfoType verb) { std::list files; std::list urls(1, this); DataStatus r = Stat(files, urls, verb); if (!r) { return r; } if (files.empty()) { return DataStatus(DataStatus::StatError, EARCRESINVAL, "No results returned"); } if (!HaveLocations()) { return DataStatus(DataStatus::StatError, ENOENT); } file = files.front(); return DataStatus::Success; } DataStatus DataPointRucio::Stat(std::list& files, const std::list& urls, DataPointInfoType verb) { files.clear(); DataStatus r = Resolve(true, urls); if (!r) { return DataStatus(DataStatus::StatError, r.GetErrno(), r.GetDesc()); } for (std::list::const_iterator f = urls.begin(); f != urls.end(); ++f) { FileInfo info; if (!(*f)->HaveLocations()) { logger.msg(ERROR, "No locations found for %s", (*f)->GetURL().str()); } else { info.SetName((*f)->GetURL().Path().substr((*f)->GetURL().Path().rfind('/')+1)); info.SetType(FileInfo::file_type_file); info.SetSize((*f)->GetSize()); info.SetCheckSum((*f)->GetCheckSum()); for (; (*f)->LocationValid(); (*f)->NextLocation()) { info.AddURL((*f)->CurrentLocation()); } } files.push_back(info); } return DataStatus::Success; } DataStatus DataPointRucio::PreRegister(bool replication, bool force) { if (url.Path().find("/objectstores/") == 0) return DataStatus::Success; return DataStatus(DataStatus::PreRegisterError, ENOTSUP, "Writing to Rucio is not supported"); } DataStatus DataPointRucio::PostRegister(bool replication) { if (url.Path().find("/objectstores/") == 0) return DataStatus::Success; return DataStatus(DataStatus::PostRegisterError, ENOTSUP, "Writing to Rucio is not supported"); } DataStatus DataPointRucio::PreUnregister(bool replication) { if (url.Path().find("/objectstores/") == 0) return DataStatus::Success; return DataStatus(DataStatus::UnregisterError, ENOTSUP, "Deleting from Rucio is not supported"); } DataStatus DataPointRucio::Unregister(bool all) { return DataStatus(DataStatus::UnregisterError, ENOTSUP, "Deleting from Rucio is not supported"); } DataStatus DataPointRucio::List(std::list& files, DataPoint::DataPointInfoType verb) { return DataStatus(DataStatus::ListError, ENOTSUP, "Listing in Rucio is not supported"); } DataStatus DataPointRucio::CreateDirectory(bool with_parents) { return DataStatus(DataStatus::CreateDirectoryError, ENOTSUP, "Creating directories in Rucio is not supported"); } DataStatus DataPointRucio::Rename(const URL& newurl) { return DataStatus(DataStatus::RenameError, ENOTSUP, "Renaming in Rucio is not supported"); } DataStatus DataPointRucio::checkToken(std::string& token) { // Locking the entire method prevents multiple concurrent calls to get tokens Glib::Mutex::Lock l(lock); std::string t = tokens.GetToken(account); if (!t.empty()) { token = t; return DataStatus::Success; } // Get a new token MCCConfig cfg; usercfg.ApplyToConfig(cfg); ClientHTTP client(cfg, auth_url, usercfg.Timeout()); std::multimap attrmap; std::string method("GET"); attrmap.insert(std::pair("X-Rucio-Account", account)); ClientHTTPAttributes attrs(method, auth_url.Path(), attrmap); HTTPClientInfo transfer_info; PayloadRaw request; PayloadRawInterface *response = NULL; MCC_Status r = client.process(attrs, &request, &transfer_info, &response); if (!r) { return DataStatus(DataStatus::ReadResolveError, "Failed to contact auth server: " + r.getExplanation()); } if (transfer_info.code != 200) { return DataStatus(DataStatus::ReadResolveError, http2errno(transfer_info.code), "HTTP error when contacting auth server: " + transfer_info.reason); } // Get auth token from header if (transfer_info.headers.find("HTTP:x-rucio-auth-token") == transfer_info.headers.end()) { return DataStatus(DataStatus::ReadResolveError, "Failed to obtain auth token"); } token = transfer_info.headers.find("HTTP:x-rucio-auth-token")->second; tokens.AddToken(account, Time()+token_validity, token); logger.msg(DEBUG, "Acquired auth token for %s: %s", account, token); return DataStatus::Success; } DataStatus DataPointRucio::queryRucio(std::string& content, const std::string& token) const { // SSL error happens if client certificate is specified, so only set CA dir MCCConfig cfg; cfg.AddCADir(usercfg.CACertificatesDirectory()); // Switch rucio protocol to http(s) for lookup URL rucio_url(url); rucio_url.ChangeProtocol(url.Port() == 80 ? "http" : "https"); if (rucio_url.Port() == -1) { rucio_url.ChangePort(url.Protocol() == "http" ? 80 : 443); } ClientHTTP client(cfg, rucio_url, usercfg.Timeout()); std::multimap attrmap; std::string method("GET"); attrmap.insert(std::pair("X-Rucio-Auth-Token", token)); // Adding the line below makes rucio return a metalink xml //attrmap.insert(std::pair("Accept", "application/metalink4+xml")); ClientHTTPAttributes attrs(method, url.Path(), attrmap); HTTPClientInfo transfer_info; PayloadRaw request; PayloadRawInterface *response = NULL; MCC_Status r = client.process(attrs, &request, &transfer_info, &response); if (!r) { return DataStatus(DataStatus::ReadResolveError, "Failed to contact server: " + r.getExplanation()); } if (transfer_info.code != 200) { std::string errormsg(transfer_info.reason); // Extract Rucio exception if any if (transfer_info.headers.find("HTTP:exceptionmessage") != transfer_info.headers.end()) { errormsg += ": " + transfer_info.headers.find("HTTP:exceptionmessage")->second; } return DataStatus(DataStatus::ReadResolveError, http2errno(transfer_info.code), "HTTP error when contacting server: " + errormsg); } PayloadStreamInterface* instream = NULL; try { instream = dynamic_cast(dynamic_cast(response)); } catch(std::exception& e) { return DataStatus(DataStatus::ReadResolveError, "Unexpected response from server"); } if (!instream) { return DataStatus(DataStatus::ReadResolveError, "Unexpected response from server"); } std::string buf; while (instream->Get(buf)) content += buf; logger.msg(DEBUG, "Rucio returned %s", content); return DataStatus::Success; } DataStatus DataPointRucio::parseLocations(const std::string& content) { // parse JSON: // {"adler32": "ffa2c799", // "name": "EVNT.545023._000082.pool.root.1", // "replicas": [], // "rses": {"LRZ-LMU_DATADISK": ["srm://lcg-lrz-srm.grid.lrz.de:8443/srm/managerv2?SFN=/pnfs/lrz-muenchen.de/data/atlas/dq2/atlasdatadisk/rucio/mc11_7TeV/6c/13/EVNT.545023._000082.pool.root.1"], // "INFN-FRASCATI_DATADISK": ["srm://atlasse.lnf.infn.it:8446/srm/managerv2?SFN=/dpm/lnf.infn.it/home/atlas/atlasdatadisk/rucio/mc11_7TeV/6c/13/EVNT.545023._000082.pool.root.1"], // "TAIWAN-LCG2_DATADISK": ["srm://f-dpm001.grid.sinica.edu.tw:8446/srm/managerv2?SFN=/dpm/grid.sinica.edu.tw/home/atlas/atlasdatadisk/rucio/mc11_7TeV/6c/13/EVNT.545023._000082.pool.root.1"]}, // "bytes": 69234676, // "space_token": "ATLASDATADISK", // "scope": "mc11_7TeV", // "md5": null} if (content.empty()) { // empty response means no such file return DataStatus(DataStatus::ReadResolveError, ENOENT); } cJSON *root = cJSON_Parse(content.c_str()); if (!root) { logger.msg(ERROR, "Failed to parse Rucio response: %s", content); cJSON_Delete(root); return DataStatus(DataStatus::ReadResolveError, EARCRESINVAL, "Failed to parse Rucio response"); } cJSON *name = cJSON_GetObjectItem(root, "name"); if (!name) { logger.msg(ERROR, "Filename not returned in Rucio response: %s", content); cJSON_Delete(root); return DataStatus(DataStatus::ReadResolveError, EARCRESINVAL, "Failed to parse Rucio response"); } std::string filename(name->valuestring); if (filename != url.Path().substr(url.Path().rfind('/')+1)) { logger.msg(ERROR, "Unexpected name returned in Rucio response: %s", content); cJSON_Delete(root); return DataStatus(DataStatus::ReadResolveError, EARCRESINVAL, "Failed to parse Rucio response"); } cJSON *rses = cJSON_GetObjectItem(root, "rses"); if (!rses) { logger.msg(ERROR, "No RSE information returned in Rucio response: %s", content); cJSON_Delete(root); return DataStatus(DataStatus::ReadResolveError, EARCRESINVAL, "Failed to parse Rucio response"); } cJSON *rse = rses->child; while (rse) { cJSON *replicas = rse->child; while(replicas) { URL loc(std::string(replicas->valuestring)); // Add URL options to replicas for (std::map::const_iterator opt = url.CommonLocOptions().begin(); opt != url.CommonLocOptions().end(); opt++) loc.AddOption(opt->first, opt->second, false); for (std::map::const_iterator opt = url.Options().begin(); opt != url.Options().end(); opt++) loc.AddOption(opt->first, opt->second, false); AddLocation(loc, loc.ConnectionURL()); replicas = replicas->next; } rse = rse->next; } cJSON *fsize = cJSON_GetObjectItem(root, "bytes"); if (!fsize || fsize->type == cJSON_NULL) { logger.msg(WARNING, "No filesize information returned in Rucio response for %s", filename); } else { SetSize((unsigned long long int)fsize->valuedouble); logger.msg(DEBUG, "%s: size %llu", filename, GetSize()); } cJSON *csum = cJSON_GetObjectItem(root, "adler32"); if (!csum || csum->type == cJSON_NULL) { logger.msg(WARNING, "No checksum information returned in Rucio response for %s", filename); } else { SetCheckSum(std::string("adler32:") + std::string(csum->valuestring)); logger.msg(DEBUG, "%s: checksum %s", filename, GetCheckSum()); } cJSON_Delete(root); if (!HaveLocations()) { logger.msg(ERROR, "No locations found for %s", url.str()); return DataStatus(DataStatus::ReadResolveError, ENOENT); } return DataStatus::Success; } } // namespace ArcDMCRucio extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "rucio", "HED:DMC", "ATLAS Data Management System", 0, &ArcDMCRucio::DataPointRucio::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/file0000644000000000000000000000013213214316024020124 xustar000000000000000030 mtime=1513200660.792757181 30 atime=1513200668.722854169 30 ctime=1513200660.792757181 nordugrid-arc-5.4.2/src/hed/dmc/file/0000755000175000002070000000000013214316024020247 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/file/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022246 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200595.217955176 30 ctime=1513200660.789757145 nordugrid-arc-5.4.2/src/hed/dmc/file/Makefile.am0000644000175000002070000000102412052416515022305 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcfile.la libdmcfile_la_SOURCES = DataPointFile.cpp DataPointFile.h libdmcfile_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmcfile_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmcfile_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/file/PaxHeaders.7502/DataPointFile.cpp0000644000000000000000000000012313213471711023374 xustar000000000000000027 mtime=1512993737.651264 26 atime=1513200575.19171 30 ctime=1513200660.791757169 nordugrid-arc-5.4.2/src/hed/dmc/file/DataPointFile.cpp0000644000175000002070000007104213213471711023446 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "DataPointFile.h" namespace ArcDMCFile { using namespace Arc; static char const * const stdfds[] = { "stdin", "stdout", "stderr" }; Logger DataPointFile::logger(Logger::getRootLogger(), "DataPoint.File"); DataPointFile::DataPointFile(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(url, usercfg, parg), reading(false), writing(false), is_channel(false), channel_num(0) { fd = -1; fa = NULL; if (url.Protocol() == "file") { cache = false; is_channel = false; local = true; } else if (url.Protocol() == "stdio") { linkable = false; is_channel = true; } } DataPointFile::~DataPointFile() { StopReading(); StopWriting(); } Plugin* DataPointFile::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL &)(*dmcarg)).Protocol() != "file" && ((const URL &)(*dmcarg)).Protocol() != "stdio") return NULL; return new DataPointFile(*dmcarg, *dmcarg, dmcarg); } int DataPointFile::open_channel() { // map known channels to strings if (!stringto(url.Path().substr(1, url.Path().length()-1), channel_num)) { // requested channel is not a number if (url.Path() == "/stdin") channel_num = STDIN_FILENO; else if (url.Path() == "/stdout") channel_num = STDOUT_FILENO; else if (url.Path() == "/stderr") channel_num = STDERR_FILENO; else { logger.msg(ERROR, "Unknown channel %s for stdio protocol", url.Path()); fd = -1; return fd; } } int fd = dup(channel_num); if (fd == -1) { if (channel_num < 3) logger.msg(ERROR, "Failed to open stdio channel %s", stdfds[channel_num]); else logger.msg(ERROR, "Failed to open stdio channel %d", channel_num); } return fd; } void DataPointFile::read_file_start(void* arg) { ((DataPointFile*)arg)->read_file(); } void DataPointFile::read_file() { bool limit_length = false; unsigned long long int range_length = 0; unsigned long long int offset = 0; bool do_cksum = true; if (range_end > range_start) { range_length = range_end - range_start; limit_length = true; if(fd != -1) ::lseek(fd, range_start, SEEK_SET); if(fa) fa->fa_lseek(range_start, SEEK_SET); offset = range_start; if(offset > 0) { // Note: checksum calculation not possible do_cksum = false; } } else { if(fd != -1) lseek(fd, 0, SEEK_SET); if(fa) fa->fa_lseek(0, SEEK_SET); } for (;;) { if (limit_length) if (range_length == 0) break; /* read from fd here and push to buffer */ /* 1. claim buffer */ int h; unsigned int l; if (!buffer->for_read(h, l, true)) { /* failed to get buffer - must be error or request to exit */ buffer->error_read(true); break; } if (buffer->error()) { buffer->is_read(h, 0, 0); break; } /* 2. read */ if (limit_length) if (l > range_length) l = range_length; unsigned long long int p = (unsigned long long int)(-1); int ll = -1; if(fd != -1) { p = ::lseek(fd, 0, SEEK_CUR); if (p == (unsigned long long int)(-1)) p = offset; ll = ::read(fd, (*(buffer))[h], l); } if(fa) { p = fa->fa_lseek(0, SEEK_CUR); if (p == (unsigned long long int)(-1)) p = offset; ll = fa->fa_read((*(buffer))[h], l); } if (ll == -1) { /* error */ buffer->is_read(h, 0, 0); buffer->error_read(true); break; } if (ll == 0) { /* eof */ buffer->is_read(h, 0, 0); if(do_cksum) { for(std::list::iterator cksum = checksums.begin(); cksum != checksums.end(); ++cksum) { if(*cksum) (*cksum)->end(); } } break; } if(do_cksum) { for(std::list::iterator cksum = checksums.begin(); cksum != checksums.end(); ++cksum) { if(*cksum) (*cksum)->add((*(buffer))[h], ll); } } /* 3. announce */ buffer->is_read(h, ll, p); if (limit_length) { if (ll > range_length) range_length = 0; else range_length -= ll; } offset += ll; // for non-seakable files } if(fd != -1) close(fd); if(fa) fa->fa_close(); buffer->eof_read(true); } void DataPointFile::write_file_start(void* arg) { ((DataPointFile*)arg)->write_file(); } class write_file_chunks { private: typedef struct { unsigned long long int start; unsigned long long int end; } chunk_t; std::list chunks; public: write_file_chunks(void) { } // which is file EOF according to collected information unsigned long long int eof(void) { if(chunks.empty()) return 0; return (--(chunks.end()))->end; } // how far non-interruptive file chunk reaches unsigned long long int extends(void) { if(chunks.empty()) return 0; if(chunks.begin()->start > 0) return 0; return chunks.begin()->end; } void add(unsigned long long int start, unsigned long long int end) { chunk_t c; c.start = start; c.end = end; if(chunks.empty()) { chunks.push_back(c); return; } for(std::list::iterator chunk = chunks.begin(); chunk != chunks.end();++chunk) { if(end < chunk->start) { chunks.insert(chunk,c); return; } if(((start >= chunk->start) && (start <= chunk->end)) || ((end >= chunk->start) && (end <= chunk->end))) { if(chunk->start < start) start = chunk->start; if(chunk->end > end) end = chunk->end; chunks.erase(chunk); add(start,end); return; } } chunks.push_back(c); } }; void DataPointFile::write_file() { unsigned long long int cksum_p = 0; bool do_cksum = (checksums.size() > 0); write_file_chunks cksum_chunks; for (;;) { /* take from buffer and write to fd */ /* 1. claim buffer */ int h; unsigned int l; unsigned long long int p; if (!buffer->for_write(h, l, p, true)) { /* failed to get buffer - must be error or request to exit */ if (!buffer->eof_read()) buffer->error_write(true); buffer->eof_write(true); break; } if (buffer->error()) { buffer->is_written(h); buffer->eof_write(true); break; } /* 2. write */ unsigned int l_ = 0; int ll = -1; if(fd != -1) { off_t coff = lseek(fd, p, SEEK_SET); if(coff == p) { ll = 0; while (l_ < l) { ll = write(fd, (*(buffer))[h] + l_, l - l_); if (ll == -1) break; // error l_ += ll; } } } if(fa) { off_t coff = fa->fa_lseek(p, SEEK_SET); if(coff == p) { ll = 0; while (l_ < l) { ll = fa->fa_write((*(buffer))[h] + l_, l - l_); if (ll == -1) break; // error l_ += ll; } } } if (ll == -1) { // error buffer->is_written(h); buffer->error_write(true); buffer->eof_write(true); break; } /* 2'. checksum */ if(do_cksum) { cksum_chunks.add(p,p+l); if(p == cksum_p) { for(std::list::iterator cksum = checksums.begin(); cksum != checksums.end(); ++cksum) { if(*cksum) (*cksum)->add((*(buffer))[h], l); } cksum_p = p+l; } if(cksum_chunks.extends() > cksum_p) { // from file off_t coff = 0; if(fd != -1) coff = lseek(fd, cksum_p, SEEK_SET); if(fa) coff = fa->fa_lseek(cksum_p, SEEK_SET); if(coff == cksum_p) { const unsigned int tbuf_size = 4096; // too small? char* tbuf = new char[tbuf_size]; for(;cksum_chunks.extends() > cksum_p;) { unsigned int l = tbuf_size; if(l > (cksum_chunks.extends()-cksum_p)) l=cksum_chunks.extends()-cksum_p; int ll = -1; if(fd != -1) ll = read(fd,tbuf,l); if(fa) ll = fa->fa_read(tbuf,l); if(ll < 0) { do_cksum=false; break; } for(std::list::iterator cksum = checksums.begin(); cksum != checksums.end(); ++cksum) { if(*cksum) (*cksum)->add(tbuf, ll); } cksum_p += ll; } delete[] tbuf; } } } /* 3. announce */ buffer->is_written(h); } if (fd != -1) { #ifndef WIN32 // This is for broken filesystems. Specifically for Lustre. if (fsync(fd) != 0 && errno != EINVAL) { // this error is caused by special files like stdout logger.msg(ERROR, "fsync of file %s failed: %s", url.Path(), StrError(errno)); buffer->error_write(true); } #endif if(close(fd) != 0) { logger.msg(ERROR, "closing file %s failed: %s", url.Path(), StrError(errno)); buffer->error_write(true); } } if (fa) { // Lustre? if(!fa->fa_close()) { logger.msg(ERROR, "closing file %s failed: %s", url.Path(), StrError(errno)); buffer->error_write(true); } } if((do_cksum) && (cksum_chunks.eof() == cksum_p)) { for(std::list::iterator cksum = checksums.begin(); cksum != checksums.end(); ++cksum) { if(*cksum) (*cksum)->end(); } } } DataStatus DataPointFile::Check(bool check_meta) { if (reading) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); if (writing) return DataStatus(DataStatus::IsWritingError, EARCLOGIC); // check_file_access() is not always correctly evaluating permissions. // TODO: redo int res = usercfg.GetUser().check_file_access(url.Path(), O_RDONLY); if (res != 0) { logger.msg(VERBOSE, "File is not accessible: %s", url.Path()); return DataStatus(DataStatus::CheckError, errno, "File is not accesible "+url.Path()); } if (check_meta) { struct stat st; if (!FileStat(url.Path(), &st, usercfg.GetUser().get_uid(), usercfg.GetUser().get_gid(), true)) { logger.msg(VERBOSE, "Can't stat file: %s: %s", url.Path(), StrError(errno)); return DataStatus(DataStatus::CheckError, errno, "Failed to stat file "+url.Path()); } SetSize(st.st_size); SetModified(st.st_mtime); } return DataStatus::Success; } static DataStatus do_stat(const std::string& path, FileInfo& file, DataPoint::DataPointInfoType verb, uid_t uid, gid_t gid) { struct stat st; if (!FileStat(path, &st, uid, gid, true)) { return DataStatus(DataStatus::StatError, errno, "Failed to stat file "+path); } if(S_ISREG(st.st_mode)) { file.SetType(FileInfo::file_type_file); } else if(S_ISDIR(st.st_mode)) { file.SetType(FileInfo::file_type_dir); } else { file.SetType(FileInfo::file_type_unknown); } file.SetSize(st.st_size); file.SetModified(st.st_mtime); file.SetMetaData("atime", (Time(st.st_atime)).str()); file.SetMetaData("ctime", (Time(st.st_ctime)).str()); file.SetMetaData("group", tostring(st.st_gid)); file.SetMetaData("owner", tostring(st.st_uid)); std::string perms; if (st.st_mode & S_IRUSR) perms += 'r'; else perms += '-'; if (st.st_mode & S_IWUSR) perms += 'w'; else perms += '-'; if (st.st_mode & S_IXUSR) perms += 'x'; else perms += '-'; #ifndef WIN32 if (st.st_mode & S_IRGRP) perms += 'r'; else perms += '-'; if (st.st_mode & S_IWGRP) perms += 'w'; else perms += '-'; if (st.st_mode & S_IXGRP) perms += 'x'; else perms += '-'; if (st.st_mode & S_IROTH) perms += 'r'; else perms += '-'; if (st.st_mode & S_IWOTH) perms += 'w'; else perms += '-'; if (st.st_mode & S_IXOTH) perms += 'x'; else perms += '-'; #endif file.SetMetaData("accessperm", perms); return DataStatus::Success; } DataStatus DataPointFile::Stat(FileInfo& file, DataPointInfoType verb) { if(is_channel) { int fd = open_channel(); if (fd == -1){ logger.msg(VERBOSE, "Can't stat stdio channel %s", url.str()); return DataStatus(DataStatus::StatError, EBADF, "Can't stat channel"); } struct stat st; if(::fstat(fd, &st) != 0) { ::close(fd); logger.msg(VERBOSE, "Can't stat stdio channel %s", url.str()); return DataStatus(DataStatus::StatError, EBADF, "Can't stat channel"); } ::close(fd); if (channel_num < 3) file.SetName(stdfds[channel_num]); else file.SetName(tostring(channel_num)); file.SetType(FileInfo::file_type_file); file.SetMetaData("type", "device"); file.SetSize(st.st_size); file.SetModified(st.st_mtime); return DataStatus::Success; } std::string name = url.Path(); // to make exact same behaviour for arcls and ngls all // lines down to file.SetName(name) should be removed // (ngls gives full path) std::string::size_type p = name.rfind(G_DIR_SEPARATOR); while(p != std::string::npos) { if(p != (name.length()-1)) { name = name.substr(p); break; } name.resize(p); p = name.rfind(G_DIR_SEPARATOR); } // remove first slash if(name.find_first_of(G_DIR_SEPARATOR) == 0){ name = name.substr(name.find_first_not_of(G_DIR_SEPARATOR), name.length()-1); } file.SetName(name); DataStatus res = do_stat(url.Path(), file, verb, usercfg.GetUser().get_uid(), usercfg.GetUser().get_gid()); if (!res) { logger.msg(VERBOSE, "Can't stat file: %s: %s", url.Path(), std::string(res)); return res; } SetSize(file.GetSize()); SetModified(file.GetModified()); return DataStatus::Success; } DataStatus DataPointFile::List(std::list& files, DataPointInfoType verb) { FileInfo file; DataStatus res = Stat(file, verb); if (!res) { return DataStatus(DataStatus::ListError, res.GetErrno(), res.GetDesc()); } if(file.GetType() != FileInfo::file_type_dir) { logger.msg(WARNING, "%s is not a directory", url.Path()); return DataStatus(DataStatus::ListError, ENOTDIR, url.Path()+" is not a directory"); } try { Glib::Dir dir(url.Path()); std::string file_name; while ((file_name = dir.read_name()) != "") { std::string fname = url.Path() + G_DIR_SEPARATOR_S + file_name; std::list::iterator f = files.insert(files.end(), FileInfo(file_name.c_str())); if (verb & (INFO_TYPE_TYPE | INFO_TYPE_TIMES | INFO_TYPE_CONTENT | INFO_TYPE_ACCESS)) { do_stat(fname, *f, verb, usercfg.GetUser().get_uid(), usercfg.GetUser().get_gid()); } } } catch (Glib::FileError& e) { logger.msg(VERBOSE, "Failed to read object %s: %s", url.Path(), e.what()); return DataStatus(DataStatus::ListError, "Failed to list directory "+url.Path()+": "+e.what()); } return DataStatus::Success; } DataStatus DataPointFile::Remove() { if (reading) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); if (writing) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); std::string path(url.Path()); struct stat st; if(!FileStat(path, &st, usercfg.GetUser().get_uid(), usercfg.GetUser().get_gid(), true)) { logger.msg(VERBOSE, "File is not accessible %s: %s", path, StrError(errno)); return DataStatus(DataStatus::DeleteError, errno, "Failed to stat file "+path); } // path is a directory if(S_ISDIR(st.st_mode)) { if (rmdir(path.c_str()) != 0) { logger.msg(VERBOSE, "Can't delete directory %s: %s", path, StrError(errno)); return DataStatus(DataStatus::DeleteError, errno, "Failed to remove directory "+path); } return DataStatus::Success; } // path is a file if(!FileDelete(path) && errno != ENOENT) { logger.msg(VERBOSE, "Can't delete file %s: %s", path, StrError(errno)); return DataStatus(DataStatus::DeleteError, errno, "Failed to delete file "+path); } return DataStatus::Success; } DataStatus DataPointFile::CreateDirectory(bool with_parents) { std::string dirpath = Glib::path_get_dirname(url.Path()); if(dirpath == ".") dirpath = G_DIR_SEPARATOR_S; logger.msg(VERBOSE, "Creating directory %s", dirpath); if (!DirCreate(dirpath, S_IRWXU, with_parents)) { return DataStatus(DataStatus::CreateDirectoryError, errno, "Failed to create directory "+dirpath); } return DataStatus::Success; } DataStatus DataPointFile::Rename(const URL& newurl) { logger.msg(VERBOSE, "Renaming %s to %s", url.Path(), newurl.Path()); if (rename(url.Path().c_str(), newurl.Path().c_str()) != 0) { logger.msg(VERBOSE, "Can't rename file %s: %s", url.Path(), StrError(errno)); return DataStatus(DataStatus::RenameError, errno, "Failed to rename file "+url.Path()); } return DataStatus::Success; } DataStatus DataPointFile::StartReading(DataBuffer& buf) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; reading = true; /* try to open */ int flags = O_RDONLY; #ifdef WIN32 flags |= O_BINARY; #endif uid_t uid = usercfg.GetUser().get_uid(); gid_t gid = usercfg.GetUser().get_gid(); if (is_channel){ fa = NULL; fd = open_channel(); if (fd == -1) { reading = false; return DataStatus(DataStatus::ReadStartError, EBADF, "Channel number is not defined"); } } else if(((!uid) || (uid == getuid())) && ((!gid) || (gid == getgid()))) { fa = NULL; fd = ::open(url.Path().c_str(), flags); if (fd == -1) { logger.msg(VERBOSE, "Failed to open %s for reading: %s", url.Path(), StrError(errno)); reading = false; return DataStatus(DataStatus::ReadStartError, errno, "Failed to open file "+url.Path()+" for reading"); } /* provide some metadata */ struct stat st; if (::fstat(fd, &st) == 0) { SetSize(st.st_size); SetModified(st.st_mtime); } } else { fd = -1; fa = new FileAccess; if(!fa->fa_setuid(uid,gid)) { delete fa; fa = NULL; logger.msg(VERBOSE, "Failed to switch user id to %d/%d", (unsigned int)uid, (unsigned int)gid); reading = false; return DataStatus(DataStatus::ReadStartError, EARCUIDSWITCH, "Failed to switch user id to "+tostring(uid)+"/"+tostring(gid)); } if(!fa->fa_open(url.Path(), flags, 0)) { delete fa; fa = NULL; logger.msg(VERBOSE, "Failed to create/open file %s: %s", url.Path(), StrError(errno)); reading = false; return DataStatus(DataStatus::ReadStartError, errno, "Failed to open file "+url.Path()+" for reading"); } struct stat st; if(fa->fa_fstat(st)) { SetSize(st.st_size); SetModified(st.st_mtime); } } buffer = &buf; /* create thread to maintain reading */ if(!CreateThreadFunction(&DataPointFile::read_file_start,this,&transfers_started)) { if(fd != -1) ::close(fd); if(fa) { fa->fa_close(); delete fa; } fd = -1; fa = NULL; logger.msg(VERBOSE, "Failed to create thread"); reading = false; return DataStatus(DataStatus::ReadStartError, "Failed to create new thread"); } return DataStatus::Success; } DataStatus DataPointFile::StopReading() { if (!reading) return DataStatus(DataStatus::ReadStopError, EARCLOGIC, "Not reading"); reading = false; if (!buffer->eof_read()) { buffer->error_read(true); /* trigger transfer error */ if(fd != -1) ::close(fd); if(fa) fa->fa_close(); // protect? fd = -1; } // buffer->wait_eof_read(); transfers_started.wait(); /* wait till reading thread exited */ delete fa; fa = NULL; // TODO: error description from reading thread if (buffer->error_read()) return DataStatus::ReadError; return DataStatus::Success; } static bool file_allocate(int fd, FileAccess* fa, unsigned long long int& fsize) { if(fa) { off_t nsize = fa->fa_fallocate(fsize); if((fa->geterrno() == 0) || (fa->geterrno() == ENOSPC)) { fsize = nsize; return true; } return false; } #ifdef HAVE_POSIX_FALLOCATE int err = posix_fallocate(fd,0,fsize); if((err == 0) || (err == ENOSPC)) { fsize = lseek(fd,0,SEEK_END); return true; } return false; #else unsigned long long int old_size = lseek(fd, 0, SEEK_END); if(old_size >= fsize) { fsize = old_size; return true; } char buf[65536]; memset(buf, 0xFF, sizeof(buf)); while(old_size < fsize) { size_t l = sizeof(buf); if (l > (fsize - old_size)) l = fsize - old_size; // because filesytem can skip empty blocks do real write if (write(fd, buf, l) == -1) { fsize = old_size; return (errno = ENOSPC); } old_size = lseek(fd, 0, SEEK_END); } fsize = old_size; return true; #endif } DataStatus DataPointFile::StartWriting(DataBuffer& buf, DataCallback *space_cb) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; writing = true; uid_t uid = usercfg.GetUser().get_uid(); gid_t gid = usercfg.GetUser().get_gid(); /* try to open */ buffer = &buf; if (is_channel) { fd = open_channel(); if (fd == -1) { buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, EBADF, "Channel number is not defined"); } } else { /* do not check permissions to create anything here - suppose it path was checked at higher level */ /* make directories */ if (url.Path().empty()) { logger.msg(VERBOSE, "Invalid url: %s", url.str()); buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, EINVAL, "Invalid URL "+url.str()); } std::string dirpath = Glib::path_get_dirname(url.Path()); if(dirpath == ".") dirpath = G_DIR_SEPARATOR_S; // shouldn't happen if (!DirCreate(dirpath, uid, gid, S_IRWXU, true)) { logger.msg(VERBOSE, "Failed to create directory %s: %s", dirpath, StrError(errno)); buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, errno, "Failed to create directory "+dirpath); } /* try to create file. Opening an existing file will cause failure */ int flags = (checksums.size() > 0)?O_RDWR:O_WRONLY; #ifdef WIN32 flags |= O_BINARY; #endif if(((!uid) || (uid == getuid())) && ((!gid) || (gid == getgid()))) { fa = NULL; fd = ::open(url.Path().c_str(), flags | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR); if (fd == -1) { logger.msg(VERBOSE, "Failed to create file %s: %s", url.Path(), StrError(errno)); buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, errno, "Failed to create file "+url.Path()); } } else { fd = -1; fa = new FileAccess; if(!fa->fa_setuid(uid,gid)) { delete fa; fa = NULL; logger.msg(VERBOSE, "Failed to switch user id to %d/%d", (unsigned int)uid, (unsigned int)gid); buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, EARCUIDSWITCH, "Failed to switch user id to "+tostring(uid)+"/"+tostring(gid)); } if(!fa->fa_open(url.Path(), flags | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR)) { delete fa; fa = NULL; logger.msg(VERBOSE, "Failed to create file %s: %s", url.Path(), StrError(errno)); buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, errno, "Failed to create file "+url.Path()); } } /* preallocate space */ buffer->speed.hold(true); if (additional_checks && CheckSize() && GetSize() > 0) { unsigned long long int fsize = GetSize(); logger.msg(INFO, "setting file %s to size %llu", url.Path(), fsize); while(true) { unsigned long long int nsize = fsize; if(file_allocate(fd,fa,nsize)) { if(nsize >= fsize) break; /* out of space */ if (space_cb != NULL) { if (space_cb->cb(fsize-nsize)) continue; } } if(fd != -1) { (lseek(fd, 0, SEEK_SET) == -1); (ftruncate(fd, 0) != 0); (close(fd) != -1); fd = -1; } if(fa) { fa->fa_lseek(0, SEEK_SET); fa->fa_ftruncate(0); fa->fa_close(); delete fa; fa = NULL; } logger.msg(VERBOSE, "Failed to preallocate space for %s", url.Path()); buffer->speed.reset(); buffer->speed.hold(false); buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, ENOSPC, "Failed to preallocate space for file "+url.Path()); } } } buffer->speed.reset(); buffer->speed.hold(false); /* create thread to maintain writing */ if(!CreateThreadFunction(&DataPointFile::write_file_start,this,&transfers_started)) { if(fd != -1) close(fd); fd = -1; if(fa) fa->fa_close(); delete fa; fa = NULL; buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, "Failed to create new thread"); } return DataStatus::Success; } DataStatus DataPointFile::StopWriting() { if (!writing) return DataStatus(DataStatus::WriteStopError, EARCLOGIC, "Not writing"); writing = false; if (!buffer->eof_write()) { buffer->error_write(true); /* trigger transfer error */ if(fd != -1) close(fd); if(fa) fa->fa_close(); fd = -1; } // buffer->wait_eof_write(); transfers_started.wait(); /* wait till writing thread exited */ // clean up if transfer failed for any reason if (buffer->error()) { bool err = false; if (fa) err = fa->fa_unlink(url.Path()); else err = FileDelete(url.Path()); if (!err && errno != ENOENT) logger.msg(WARNING, "Failed to clean up file %s: %s", url.Path(), StrError(errno)); } delete fa; fa = NULL; // validate file size, if transfer succeeded if (!buffer->error() && additional_checks && CheckSize() && !is_channel) { struct stat st; std::string path = url.Path(); if (!FileStat(path, &st, usercfg.GetUser().get_uid(), usercfg.GetUser().get_gid(), true)) { logger.msg(VERBOSE, "Error during file validation. Can't stat file %s: %s", url.Path(), StrError(errno)); return DataStatus(DataStatus::WriteStopError, errno, "Failed to stat result file "+url.Path()); } if (GetSize() != st.st_size) { logger.msg(VERBOSE, "Error during file validation: Local file size %llu does not match source file size %llu for file %s", st.st_size, GetSize(), url.Path()); return DataStatus(DataStatus::WriteStopError, "Local file size does not match source file for "+url.Path()); } } // TODO: error description from writing thread if (buffer->error_write()) return DataStatus::WriteError; return DataStatus::Success; } bool DataPointFile::WriteOutOfOrder() { if (!url) return false; if (url.Protocol() == "file") return true; return false; } } // namespace Arc extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "file", "HED:DMC", "Regular local file", 0, &ArcDMCFile::DataPointFile::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/file/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022253 xustar000000000000000030 mtime=1513200595.260955702 30 atime=1513200648.990612837 30 ctime=1513200660.790757157 nordugrid-arc-5.4.2/src/hed/dmc/file/Makefile.in0000644000175000002070000006172413214315723022333 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/file DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcfile_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libdmcfile_la_OBJECTS = libdmcfile_la-DataPointFile.lo libdmcfile_la_OBJECTS = $(am_libdmcfile_la_OBJECTS) libdmcfile_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmcfile_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmcfile_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcfile_la_SOURCES) DIST_SOURCES = $(libdmcfile_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcfile.la libdmcfile_la_SOURCES = DataPointFile.cpp DataPointFile.h libdmcfile_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmcfile_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmcfile_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/file/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/file/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcfile.la: $(libdmcfile_la_OBJECTS) $(libdmcfile_la_DEPENDENCIES) $(libdmcfile_la_LINK) -rpath $(pkglibdir) $(libdmcfile_la_OBJECTS) $(libdmcfile_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcfile_la-DataPointFile.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcfile_la-DataPointFile.lo: DataPointFile.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcfile_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcfile_la-DataPointFile.lo -MD -MP -MF $(DEPDIR)/libdmcfile_la-DataPointFile.Tpo -c -o libdmcfile_la-DataPointFile.lo `test -f 'DataPointFile.cpp' || echo '$(srcdir)/'`DataPointFile.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcfile_la-DataPointFile.Tpo $(DEPDIR)/libdmcfile_la-DataPointFile.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointFile.cpp' object='libdmcfile_la-DataPointFile.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcfile_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcfile_la-DataPointFile.lo `test -f 'DataPointFile.cpp' || echo '$(srcdir)/'`DataPointFile.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/file/PaxHeaders.7502/DataPointFile.h0000644000000000000000000000012313107551075023045 xustar000000000000000027 mtime=1495192125.590019 26 atime=1513200575.19171 30 ctime=1513200660.792757181 nordugrid-arc-5.4.2/src/hed/dmc/file/DataPointFile.h0000644000175000002070000000356213107551075023121 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAPOINTFILE_H__ #define __ARC_DATAPOINTFILE_H__ #include #include #include namespace ArcDMCFile { using namespace Arc; /** * This class allows access to the regular local filesystem through the * same interface as is used for remote storage on the grid. * * This class is a loadable module and cannot be used directly. The DataHandle * class loads modules at runtime and should be used instead of this. */ class DataPointFile : public DataPointDirect { public: DataPointFile(const URL& url, const UserConfig& usercfg, PluginArgument* parg); virtual ~DataPointFile(); static Plugin* Instance(PluginArgument *arg); virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); virtual DataStatus Check(bool check_meta); virtual DataStatus Stat(FileInfo& file, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents=false); virtual DataStatus Rename(const URL& newurl); virtual bool WriteOutOfOrder(); virtual bool RequiresCredentials() const { return false; }; private: SimpleCounter transfers_started; int open_channel(); static void read_file_start(void* arg); static void write_file_start(void* arg); void read_file(); void write_file(); bool reading; bool writing; int fd; FileAccess* fa; bool is_channel; unsigned int channel_num; static Logger logger; }; } // namespace Arc #endif // __ARC_DATAPOINTFILE_H__ nordugrid-arc-5.4.2/src/hed/dmc/file/PaxHeaders.7502/README0000644000000000000000000000012311001653037021061 xustar000000000000000027 mtime=1208440351.928622 26 atime=1513200575.18771 30 ctime=1513200660.788757133 nordugrid-arc-5.4.2/src/hed/dmc/file/README0000644000175000002070000000004411001653037021125 0ustar00mockbuildmock00000000000000DMC which handles file:// protocol nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/gridftp0000644000000000000000000000013213214316024020644 xustar000000000000000030 mtime=1513200660.892758405 30 atime=1513200668.722854169 30 ctime=1513200660.892758405 nordugrid-arc-5.4.2/src/hed/dmc/gridftp/0000755000175000002070000000000013214316024020767 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/gridftp/PaxHeaders.7502/Lister.cpp0000644000000000000000000000012213107553304022673 xustar000000000000000027 mtime=1495193284.604954 26 atime=1513200575.19871 29 ctime=1513200660.89075838 nordugrid-arc-5.4.2/src/hed/dmc/gridftp/Lister.cpp0000644000175000002070000012354213107553304022751 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #ifdef WIN32 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "Lister.h" static char default_ftp_user[] = "anonymous"; static char default_gsiftp_user[] = ":globus-mapping:"; static char default_ftp_pass[] = "dummy"; static char default_gsiftp_pass[] = "user@"; static void dos_to_unix(char *s) { if (!s) return; int l = strlen(s); for (; l;) { l--; if ((s[l] == '\r') || (s[l] == '\n')) s[l] = ' '; } } namespace ArcDMCGridFTP { using namespace Arc; static Logger logger(Logger::rootLogger, "Lister"); std::map Lister::callback_args; Glib::Mutex Lister::callback_args_mutex; void* Lister::remember_for_callback(Lister* it) { static void* last_arg = NULL; callback_args_mutex.lock(); void* arg = last_arg; std::map::iterator pos = callback_args.find(last_arg); if(pos != callback_args.end()) { // must be very old stuck communication - too old to keep globus_mutex_t* pos_mutex = &pos->second->mutex; globus_mutex_lock(pos_mutex); callback_args.erase(pos); globus_mutex_unlock(pos_mutex); }; callback_args[last_arg] = it; last_arg = (void*)(((unsigned long int)last_arg) + 1); callback_args_mutex.unlock(); return arg; } Lister* Lister::recall_for_callback(void* arg) { callback_args_mutex.lock(); Lister* it = NULL; std::map::iterator pos = callback_args.find(arg); if(pos != callback_args.end()) { it = pos->second; globus_mutex_lock(&it->mutex); }; callback_args_mutex.unlock(); return it; } void Lister::forget_about_callback(void* arg) { callback_args_mutex.lock(); std::map::iterator pos = callback_args.find(arg); if(pos != callback_args.end()) { globus_mutex_t* pos_mutex = &pos->second->mutex; globus_mutex_lock(pos_mutex); callback_args.erase(pos); globus_mutex_unlock(pos_mutex); }; callback_args_mutex.unlock(); } void SetAttributes(FileInfo& fi, const char *facts) { const char *name; const char *value; const char *p = facts; for (; *p;) { name = p; value = p; if (*p == ' ') break; // end of facts if (*p == ';') { p++; continue; } for (; *p; p++) { if (*p == ' ') break; if (*p == ';') break; if (*p == '=') value = p; } if (name == value) continue; // skip empty names value++; if (value == p) continue; // skip empty values if (((value - name - 1) == 4) && (strncasecmp(name, "type", 4) == 0)) { if (((p - value) == 3) && (strncasecmp(value, "dir", 3) == 0)) fi.SetType(FileInfo::file_type_dir); else if (((p - value) == 4) && (strncasecmp(value, "file", 4) == 0)) fi.SetType(FileInfo::file_type_file); else fi.SetType(FileInfo::file_type_unknown); } else if (((value - name - 1) == 4) && (strncasecmp(name, "size", 4) == 0)) { std::string tmp_s(value, (int)(p - value)); fi.SetSize(stringtoull(tmp_s)); } else if (((value - name - 1) == 6) && (strncasecmp(name, "modify", 6) == 0)) { std::string tmp_s(value, (int)(p - value)); if (tmp_s.size() < 14) fi.SetModified(stringtoi(tmp_s)); // UNIX time else fi.SetModified(tmp_s); // ISO time } } } Lister::callback_status_t Lister::wait_for_callback(unsigned int to) { callback_status_t res; globus_mutex_lock(&mutex); globus_abstime_t timeout; GlobusTimeAbstimeSet(timeout,to,0); while (callback_status == CALLBACK_NOTREADY) { if (globus_cond_timedwait(&cond, &mutex, &timeout) == ETIMEDOUT) { callback_status = CALLBACK_NOTREADY; globus_mutex_unlock(&mutex); return CALLBACK_TIMEDOUT; } } res = callback_status; callback_status = CALLBACK_NOTREADY; globus_mutex_unlock(&mutex); return res; } Lister::callback_status_t Lister::wait_for_data_callback(unsigned int to) { callback_status_t res; globus_mutex_lock(&mutex); globus_abstime_t timeout; GlobusTimeAbstimeSet(timeout,to,0); while (data_callback_status == CALLBACK_NOTREADY) { if (globus_cond_timedwait(&cond, &mutex, &timeout) == ETIMEDOUT) { data_callback_status = CALLBACK_NOTREADY; globus_mutex_unlock(&mutex); return CALLBACK_TIMEDOUT; } } res = data_callback_status; data_callback_status = CALLBACK_NOTREADY; globus_mutex_unlock(&mutex); return res; } Lister::callback_status_t Lister::wait_for_close_callback(unsigned int to) { callback_status_t res; globus_mutex_lock(&mutex); globus_abstime_t timeout; GlobusTimeAbstimeSet(timeout,to,0); while (close_callback_status == CALLBACK_NOTREADY) { if (globus_cond_timedwait(&cond, &mutex, &timeout) == ETIMEDOUT) { close_callback_status = CALLBACK_NOTREADY; globus_mutex_unlock(&mutex); return CALLBACK_TIMEDOUT; } } res = close_callback_status; close_callback_status = CALLBACK_NOTREADY; globus_mutex_unlock(&mutex); return res; } void Lister::resp_destroy() { globus_mutex_lock(&mutex); if (resp_n > 0) { globus_ftp_control_response_destroy(resp + (resp_n - 1)); resp_n--; } globus_mutex_unlock(&mutex); } void Lister::resp_callback(void *arg, globus_ftp_control_handle_t*, globus_object_t *error, globus_ftp_control_response_t *response) { Lister *it = recall_for_callback(arg); if(!it) return; Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); if (error != GLOBUS_SUCCESS) { it->callback_status = CALLBACK_ERROR; logger.msg(INFO, "Failure: %s", globus_object_to_string(error)); if (response) logger.msg(INFO, "Response: %s", response->response_buffer); } else { if (it->resp_n < LISTER_MAX_RESPONSES) { memmove((it->resp) + 1, it->resp, sizeof(globus_ftp_control_response_t) * (it->resp_n)); if (response && response->response_buffer) { globus_ftp_control_response_copy(response, it->resp); } else { // invalid reply causes *_copy to segfault it->resp->response_buffer = (globus_byte_t*)strdup("000 "); it->resp->response_buffer_size = 5; it->resp->response_length = 4; it->resp->code = 0; it->resp->response_class = GLOBUS_FTP_UNKNOWN_REPLY; } (it->resp_n)++; } it->callback_status = CALLBACK_DONE; if(response && response->response_buffer) { dos_to_unix((char*)(response->response_buffer)); logger.msg(VERBOSE, "Response: %s", response->response_buffer); } } globus_cond_signal(&(it->cond)); globus_mutex_unlock(&(it->mutex)); } void Lister::close_callback(void *arg, globus_ftp_control_handle_t*, globus_object_t *error, globus_ftp_control_response_t *response) { Lister *it = recall_for_callback(arg); if(!it) return; Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); if (error != GLOBUS_SUCCESS) { it->close_callback_status = CALLBACK_ERROR; } else { it->close_callback_status = CALLBACK_DONE; } globus_cond_signal(&(it->cond)); globus_mutex_unlock(&(it->mutex)); return; } void Lister::simple_callback(void *arg, globus_ftp_control_handle_t*, globus_object_t *error) { resp_callback(arg, NULL, error, NULL); } void Lister::list_read_callback(void *arg, globus_ftp_control_handle_t*, globus_object_t *error, globus_byte_t*, globus_size_t length, globus_off_t, globus_bool_t eof) { Lister *it = recall_for_callback(arg); if(!it) return; if(!it->data_activated) { globus_mutex_unlock(&(it->mutex)); return; } length += it->list_shift; if (error != GLOBUS_SUCCESS) { /* no such file or connection error - assume no such file */ logger.msg(INFO, "Error getting list of files (in list)"); logger.msg(INFO, "Failure: %s", globus_object_to_string(error)); logger.msg(INFO, "Assuming - file not found"); it->data_callback_status = CALLBACK_ERROR; globus_cond_signal(&(it->cond)); globus_mutex_unlock(&(it->mutex)); return; } /* parse names and add to list */ /* suppose we are receiving ordered blocks of data (no multiple streams) */ char *name; (it->readbuf)[length] = 0; name = it->readbuf; it->list_shift = 0; for (;;) { if ((*name) == 0) break; globus_size_t nlen; nlen = strcspn(name, "\n\r"); name[nlen] = 0; logger.msg(VERBOSE, "list record: %s", name); if (nlen == length) { if (!eof) { memmove(it->readbuf, name, nlen); it->list_shift = nlen; break; } } if (nlen == 0) { // skip empty std::string if (length == 0) break; name++; length--; continue; } char *attrs = name; if (it->facts) { for (; *name;) { nlen--; length--; if (*name == ' ') { name++; break; } name++; } } if(it->free_format) { // assuming it is 'ls -l'-like // a lot of attributes followed by filename // NOTE: it is not possible to reliably distinguish files // with empty spaces. So assuming no such files. char* name_start = strrchr(name,' '); if(name_start) { nlen-=(name_start-name+1); length-=(name_start-name+1); name=name_start+1; }; }; std::list::iterator i = it->fnames.insert(it->fnames.end(), FileInfo(name)); if (it->facts) SetAttributes(*i, attrs); if (nlen == length) break; name += (nlen + 1); length -= (nlen + 1); if (((*name) == '\r') || ((*name) == '\n')) { name++; length--; } } if (!eof) { if (globus_ftp_control_data_read(it->handle, (globus_byte_t*) ((it->readbuf) + (it->list_shift)), sizeof(it->readbuf) - (it->list_shift) - 1, &list_read_callback, arg) != GLOBUS_SUCCESS) { logger.msg(INFO, "Failed reading list of files"); it->data_callback_status = CALLBACK_ERROR; globus_cond_signal(&(it->cond)); } globus_mutex_unlock(&(it->mutex)); return; } it->data_activated = false; it->data_callback_status = CALLBACK_DONE; globus_cond_signal(&(it->cond)); globus_mutex_unlock(&(it->mutex)); return; } void Lister::list_conn_callback(void *arg, globus_ftp_control_handle_t *hctrl, unsigned int, globus_bool_t, globus_object_t *error) { Lister *it = recall_for_callback(arg); if(!it) return; if (error != GLOBUS_SUCCESS) { logger.msg(INFO, "Failure: %s", globus_object_to_string(error)); it->data_callback_status = CALLBACK_ERROR; // if data failed no reason to wait for control it->callback_status = CALLBACK_ERROR; globus_cond_signal(&(it->cond)); globus_mutex_unlock(&(it->mutex)); return; } it->list_shift = 0; it->fnames.clear(); it->data_activated = true; if (globus_ftp_control_data_read(hctrl, (globus_byte_t*)(it->readbuf), sizeof(it->readbuf) - 1, &list_read_callback, arg) != GLOBUS_SUCCESS) { logger.msg(INFO, "Failed reading data"); it->data_callback_status = CALLBACK_ERROR; // if data failed no reason to wait for control it->callback_status = CALLBACK_ERROR; globus_cond_signal(&(it->cond)); } globus_mutex_unlock(&(it->mutex)); return; } globus_ftp_control_response_class_t Lister::send_command(const char *command, const char *arg, bool wait_for_response, char **sresp, int* code, char delim) { char *cmd = NULL; if (sresp) (*sresp) = NULL; if (code) *code = 0; if (command) { /* if no command - waiting for second reply */ globus_mutex_lock(&mutex); for (int i = 0; i < resp_n; i++) { globus_ftp_control_response_destroy(resp + i); } resp_n = 0; callback_status = CALLBACK_NOTREADY; globus_mutex_unlock(&mutex); { std::string cmds(command); if(arg) { cmds += " "; cmds += arg; } logger.msg(VERBOSE, "Command: %s", cmds); cmds += "\r\n"; cmd = (char*)malloc(cmds.length()+1); if (cmd == NULL) { logger.msg(ERROR, "Memory allocation error"); return GLOBUS_FTP_UNKNOWN_REPLY; } strncpy(cmd,cmds.c_str(),cmds.length()+1); cmd[cmds.length()] = 0; } if (globus_ftp_control_send_command(handle, cmd, resp_callback, callback_arg) != GLOBUS_SUCCESS) { logger.msg(VERBOSE, "%s failed", command); if (cmd) free(cmd); return GLOBUS_FTP_UNKNOWN_REPLY; } logger.msg(DEBUG, "Command is being sent"); } if (wait_for_response) { globus_mutex_lock(&mutex); while ((callback_status == CALLBACK_NOTREADY) && (resp_n == 0)) { logger.msg(DEBUG, "Waiting for response"); globus_cond_wait(&cond, &mutex); } free(cmd); if (callback_status != CALLBACK_DONE) { logger.msg(DEBUG, "Callback got failure"); callback_status = CALLBACK_NOTREADY; if (resp_n > 0) { globus_ftp_control_response_destroy(resp + (resp_n - 1)); resp_n--; } globus_mutex_unlock(&mutex); return GLOBUS_FTP_UNKNOWN_REPLY; } if ((sresp) && (resp_n > 0)) { if (delim == 0) { (*sresp) = (char*)malloc(resp[resp_n - 1].response_length); if ((*sresp) != NULL) { memcpy(*sresp, (char*)(resp[resp_n - 1].response_buffer + 4), resp[resp_n - 1].response_length - 4); (*sresp)[resp[resp_n - 1].response_length - 4] = 0; logger.msg(VERBOSE, "Response: %s", *sresp); } else logger.msg(ERROR, "Memory allocation error"); } else { /* look for pair of enclosing characters */ logger.msg(VERBOSE, "Response: %s", resp[resp_n - 1].response_buffer); char *s_start = (char*)(resp[resp_n - 1].response_buffer + 4); char *s_end = NULL; int l = 0; s_start = strchr(s_start, delim); if (s_start) { s_start++; if (delim == '(') delim = ')'; else if (delim == '{') delim = '}'; else if (delim == '[') delim = ']'; s_end = strchr(s_start, delim); if (s_end) l = s_end - s_start; } if (l > 0) { (*sresp) = (char*)malloc(l + 1); if ((*sresp) != NULL) { memcpy(*sresp, s_start, l); (*sresp)[l] = 0; logger.msg(VERBOSE, "Response: %s", *sresp); } } } } globus_ftp_control_response_class_t resp_class = GLOBUS_FTP_UNKNOWN_REPLY; int resp_code = 0; if (resp_n > 0) { resp_class = resp[resp_n - 1].response_class; resp_code = resp[resp_n - 1].code; globus_ftp_control_response_destroy(resp + (resp_n - 1)); resp_n--; } if (resp_n == 0) callback_status = CALLBACK_NOTREADY; globus_mutex_unlock(&mutex); if (code) *code = resp_code; return resp_class; } else return GLOBUS_FTP_POSITIVE_COMPLETION_REPLY; /* !!!!!!! Memory LOST - cmd !!!!!!!! */ } Lister::Lister() : inited(false), facts(true), handle(NULL), resp_n(0), callback_status(CALLBACK_NOTREADY), data_callback_status(CALLBACK_NOTREADY), close_callback_status(CALLBACK_NOTREADY), list_shift(0), connected(false), pasv_set(false), data_activated(false), free_format(false), port((unsigned short int)(-1)), credential(NULL) { if (globus_cond_init(&cond, GLOBUS_NULL) != GLOBUS_SUCCESS) { logger.msg(ERROR, "Failed in globus_cond_init"); return; } if (globus_mutex_init(&mutex, GLOBUS_NULL) != GLOBUS_SUCCESS) { logger.msg(ERROR, "Failed in globus_mutex_init"); globus_cond_destroy(&cond); return; } handle = (globus_ftp_control_handle_t*) malloc(sizeof(globus_ftp_control_handle_t)); if (handle == NULL) { logger.msg(ERROR, "Failed allocating memory for handle"); globus_mutex_destroy(&mutex); globus_cond_destroy(&cond); } if (globus_ftp_control_handle_init(handle) != GLOBUS_SUCCESS) { logger.msg(ERROR, "Failed in globus_ftp_control_handle_init"); globus_mutex_destroy(&mutex); globus_cond_destroy(&cond); free(handle); handle = NULL; return; } if (globus_ftp_control_ipv6_allow(handle,GLOBUS_TRUE) != GLOBUS_SUCCESS) { logger.msg(WARNING, "Failed to enable IPv6"); } callback_arg = remember_for_callback(this); inited = true; } void Lister::close_connection() { if (!connected) return; connected = false; bool res = true; close_callback_status = CALLBACK_NOTREADY; logger.msg(VERBOSE, "Closing connection"); if (globus_ftp_control_data_force_close(handle, simple_callback, callback_arg) == GLOBUS_SUCCESS) { // Timeouts are used while waiting for callbacks just in case they never // come. If a timeout happens then the response object is not freed just // in case the callback eventually arrives. callback_status_t cbs = wait_for_callback(60); if (cbs == CALLBACK_TIMEDOUT) { logger.msg(VERBOSE, "Timeout waiting for Globus callback - leaking connection"); return; } if (cbs != CALLBACK_DONE) res = false; } //if (globus_ftp_control_abort(handle, resp_callback, callback_arg) != GLOBUS_SUCCESS) { //} else if (wait_for_callback() != CALLBACK_DONE) { // res = false; //} if(send_command("ABOR", NULL, true, NULL) == GLOBUS_FTP_UNKNOWN_REPLY) { res = false; } if (globus_ftp_control_quit(handle, resp_callback, callback_arg) == GLOBUS_SUCCESS) { callback_status_t cbs = wait_for_callback(60); if (cbs == CALLBACK_TIMEDOUT) { logger.msg(VERBOSE, "Timeout waiting for Globus callback - leaking connection"); return; } if (cbs != CALLBACK_DONE) res = false; } if (globus_ftp_control_force_close(handle, close_callback, callback_arg) == GLOBUS_SUCCESS) { callback_status_t cbs = wait_for_close_callback(); if (cbs != CALLBACK_DONE) res = false; } if (res) { logger.msg(VERBOSE, "Closed successfully"); } else { logger.msg(VERBOSE, "Closing may have failed"); } resp_destroy(); } Lister::~Lister() { close_connection(); if (inited) { inited = false; if(handle) { // Waiting for stalled callbacks // If globus_ftp_control_handle_destroy is called with dc_handle // state not GLOBUS_FTP_DATA_STATE_NONE then handle is messed // and next call causes assertion. So here we are waiting for // proper state. bool first_time = true; time_t start_time = time(NULL); globus_mutex_lock(&(handle->cc_handle.mutex)); while ((handle->dc_handle.state != GLOBUS_FTP_DATA_STATE_NONE) || (handle->cc_handle.cc_state != GLOBUS_FTP_CONTROL_UNCONNECTED)) { // if((handle->cc_handle.cc_state == GLOBUS_FTP_CONTROL_UNCONNECTED) && // (handle->dc_handle.state == GLOBUS_FTP_DATA_STATE_CLOSING)) { // handle->dc_handle.state = GLOBUS_FTP_DATA_STATE_NONE; // break; // }; globus_mutex_unlock(&(handle->cc_handle.mutex)); if(first_time) { logger.msg(VERBOSE, "Waiting for globus handle to settle"); first_time = false; } globus_abstime_t timeout; GlobusTimeAbstimeSet(timeout,0,100000); logger.msg(DEBUG, "Handle is not in proper state %u/%u",handle->cc_handle.cc_state,handle->dc_handle.state); globus_mutex_lock(&mutex); globus_cond_timedwait(&cond, &mutex, &timeout); globus_mutex_unlock(&mutex); globus_mutex_lock(&(handle->cc_handle.mutex)); if(((unsigned int)(time(NULL) - start_time)) > 60) { logger.msg(VERBOSE, "Globus handle is stuck"); first_time = false; break; } } // block callback execution in case anything left handle->cc_handle.close_cb_arg = NULL; handle->cc_handle.accept_cb_arg = NULL; handle->cc_handle.auth_cb_arg = NULL; handle->cc_handle.command_cb_arg = NULL; handle->dc_handle.close_callback_arg = NULL; globus_mutex_unlock(&(handle->cc_handle.mutex)); GlobusResult res; if(!(res=globus_ftp_control_handle_destroy(handle))) { // This situation can't be fixed because call to globus_ftp_control_handle_destroy // makes handle unusable even if it fails. logger.msg(DEBUG, "Failed destroying handle: %s. Can't handle such situation.",res.str()); } else { free(handle); }; handle = NULL; }; forget_about_callback(callback_arg); globus_mutex_destroy(&mutex); globus_cond_destroy(&cond); } } DataStatus Lister::setup_pasv(globus_ftp_control_host_port_t& pasv_addr) { if(pasv_set) return DataStatus::Success; char *sresp; GlobusResult res; DataStatus result = DataStatus::ListError; pasv_addr.port = 0; pasv_addr.hostlen = 0; // Try EPSV first to make it work over IPv6 if (send_command("EPSV", NULL, true, &sresp, NULL, '(') != GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) { if (sresp) { logger.msg(INFO, "EPSV failed: %s", sresp); result.SetDesc("EPSV command failed at "+urlstr+" : "+sresp); free(sresp); } else { logger.msg(INFO, "EPSV failed"); result.SetDesc("EPSV command failed at "+urlstr); } // Now try PASV. It will fail on IPv6 unless server provides IPv4 data channel. if (send_command("PASV", NULL, true, &sresp, NULL, '(') != GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) { if (sresp) { logger.msg(INFO, "PASV failed: %s", sresp); result.SetDesc("PASV command failed at "+urlstr+" : "+sresp); free(sresp); } else { logger.msg(INFO, "PASV failed"); result.SetDesc("PASV command failed at "+urlstr); } return result; } if (sresp) { int port_low, port_high; if (sscanf(sresp, "%i,%i,%i,%i,%i,%i", &(pasv_addr.host[0]), &(pasv_addr.host[1]), &(pasv_addr.host[2]), &(pasv_addr.host[3]), &port_high, &port_low) == 6) { pasv_addr.port = ((port_high & 0x000FF) << 8) | (port_low & 0x000FF); pasv_addr.hostlen = 4; } free(sresp); } } else { // Successful EPSV - response is (|||port|) // Currently more complex responses with protocol and host // are not supported. if (sresp) { char sep = sresp[0]; char* lsep = NULL; if(sep) { if((sresp[1] == sep) && (sresp[2] == sep) && ((lsep = (char*)strchr(sresp+3,sep)) != NULL)) { *lsep = 0; pasv_addr.port = strtoul(sresp+3,&lsep,10); if(pasv_addr.port != 0) { // Apply control connection address unsigned short local_port; if(!(res = globus_io_tcp_get_remote_address_ex(&(handle->cc_handle.io_handle), pasv_addr.host,&pasv_addr.hostlen,&local_port))) { logger.msg(INFO, "Failed to apply local address to data connection"); std::string globus_err(res.str()); logger.msg(INFO, "Failure: %s", globus_err); result.SetDesc("Failed to apply local address to data connection for "+urlstr+": "+globus_err); free(sresp); return result; } } } } free(sresp); } } if (pasv_addr.hostlen == 0) { logger.msg(INFO, "Can't parse host and/or port in response to EPSV/PASV"); result.SetDesc("Can't parse host and/or port in response to EPSV/PASV from "+urlstr); return result; } if (pasv_addr.hostlen == 4) { logger.msg(VERBOSE, "Data channel: %d.%d.%d.%d:%d", pasv_addr.host[0], pasv_addr.host[1], pasv_addr.host[2], pasv_addr.host[3], pasv_addr.port); } else { char buf[8*5]; snprintf(buf,sizeof(buf),"%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", pasv_addr.host[0]<<8 | pasv_addr.host[1], pasv_addr.host[2]<<8 | pasv_addr.host[3], pasv_addr.host[4]<<8 | pasv_addr.host[5], pasv_addr.host[6]<<8 | pasv_addr.host[7], pasv_addr.host[8]<<8 | pasv_addr.host[9], pasv_addr.host[10]<<8 | pasv_addr.host[11], pasv_addr.host[12]<<8 | pasv_addr.host[13], pasv_addr.host[14]<<8 | pasv_addr.host[15]); buf[sizeof(buf)-1] = 0; logger.msg(VERBOSE, "Data channel: [%s]:%d", buf, pasv_addr.port); } if (!(res = globus_ftp_control_local_port(handle, &pasv_addr))) { logger.msg(INFO, "Obtained host and address are not acceptable"); std::string globus_err(res.str()); logger.msg(INFO, "Failure: %s", globus_err); result.SetDesc("Host and address obtained from "+urlstr+" are not acceptable: "+globus_err); return result; } /* it looks like _pasv is not enough for connection - start reading immediately */ data_callback_status = (callback_status_t)CALLBACK_NOTREADY; if (globus_ftp_control_data_connect_read(handle, &list_conn_callback, callback_arg) != GLOBUS_SUCCESS) { logger.msg(INFO, "Failed to open data channel"); result.SetDesc("Failed to open data channel to "+urlstr); pasv_set = false; return result; } pasv_set = true; return DataStatus::Success; } DataStatus Lister::handle_connect(const URL& url) { GlobusResult res; DataStatus result = DataStatus::ListError; /* get listing */ fnames.clear(); globus_ftp_control_auth_info_t auth; if ((url.Protocol() != "ftp") && (url.Protocol() != "gsiftp")) { logger.msg(VERBOSE, "Unsupported protocol in url %s", url.str()); result.SetDesc("Unsupported protocol in url " + url.str()); return result; } if (connected) { if ((host == url.Host()) && (port == url.Port()) && (scheme == url.Protocol()) && (username == url.Username()) && (userpass == url.Passwd())) { /* same server - check if connection alive */ logger.msg(VERBOSE, "Reusing connection"); if (send_command("NOOP", NULL, true, NULL) != GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) { // Connection failed, close now - we will connect again in the next step close_connection(); } } } path = url.Path(); urlstr = url.str(); if ((path.length() != 0) && (path[path.length() - 1] == '/')) { path.resize(path.length() - 1); } if (!connected) { pasv_set = false; port = url.Port(); scheme = url.Protocol(); host = url.Host(); username = url.Username(); userpass = url.Passwd(); if (!(res = globus_ftp_control_connect(handle, const_cast(host.c_str()), port, &resp_callback, callback_arg))) { logger.msg(VERBOSE, "Failed connecting to server %s:%d", host.c_str(), port); result.SetDesc("Failed connecting to "+urlstr+" : "+res.str()); return result; } if (wait_for_callback() != CALLBACK_DONE) { logger.msg(VERBOSE, "Failed to connect to server %s:%d", host.c_str(), port); // TODO: error description from callback result.SetDesc("Failed to connect to server "+url.str()); resp_destroy(); return result; } connected = true; resp_destroy(); char *username_ = const_cast(username.c_str()); char *userpass_ = const_cast(userpass.c_str()); globus_bool_t use_auth; if (scheme == "gsiftp") { if (username.empty()) username_ = default_gsiftp_user; if (userpass.empty()) userpass_ = default_gsiftp_pass; if (!credential) { logger.msg(VERBOSE, "Missing authentication information"); result.SetDesc("Missing authentication information for "+url.str()); return result; } if (!(res = globus_ftp_control_auth_info_init(&auth, *credential, GLOBUS_TRUE, username_, userpass_, GLOBUS_NULL, GLOBUS_NULL))) { std::string globus_err(res.str()); logger.msg(VERBOSE, "Bad authentication information: %s", globus_err); result.SetDesc("Bad authentication information for "+urlstr+" : "+globus_err); return result; } use_auth = GLOBUS_TRUE; } else { if (username.empty()) username_ = default_ftp_user; if (userpass.empty()) userpass_ = default_ftp_pass; if (!(res = globus_ftp_control_auth_info_init(&auth, GSS_C_NO_CREDENTIAL, GLOBUS_FALSE, username_, userpass_, GLOBUS_NULL, GLOBUS_NULL))) { std::string globus_err(res.str()); logger.msg(VERBOSE, "Bad authentication information: %s", globus_err); result.SetDesc("Bad authentication information for "+urlstr+" : "+globus_err); return result; } use_auth = GLOBUS_FALSE; } if (!(res = globus_ftp_control_authenticate(handle, &auth, use_auth, resp_callback, callback_arg))) { std::string globus_err(res.str()); logger.msg(VERBOSE, "Failed authenticating: %s", globus_err); result.SetDesc("Failed authenticating at "+urlstr+" : "+globus_err); close_connection(); return result; } if (wait_for_callback() != CALLBACK_DONE) { // TODO: error description from callback logger.msg(VERBOSE, "Failed authenticating"); result.SetDesc("Failed authenticating at "+urlstr); resp_destroy(); close_connection(); return result; } for(int n = 0; n < resp_n; ++n) { if(resp[n].response_class != GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) { logger.msg(VERBOSE, "Failed authenticating: %s", resp[n].response_buffer); result.SetDesc("Failed authenticating at "+urlstr+" : "+(char*)(resp[n].response_buffer)); resp_destroy(); close_connection(); return result; } } resp_destroy(); } else { // Calling NOOP to test connection } return DataStatus::Success; } DataStatus Lister::retrieve_file_info(const URL& url,bool names_only) { DataStatus result = DataStatus::StatError; DataStatus con_result = handle_connect(url); if(!con_result) return DataStatus(DataStatus::StatError, con_result.GetErrno(), con_result.GetDesc()); globus_ftp_control_response_class_t cmd_resp; char *sresp = NULL; if (url.Protocol() == "gsiftp") { cmd_resp = send_command("DCAU", "N", true, &sresp, NULL, '"'); if ((cmd_resp != GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) && (cmd_resp != GLOBUS_FTP_PERMANENT_NEGATIVE_COMPLETION_REPLY)) { if (sresp) { logger.msg(INFO, "DCAU failed: %s", sresp); result.SetDesc("DCAU command failed at "+urlstr+" : "+sresp); free(sresp); sresp = NULL; } else { logger.msg(INFO, "DCAU failed"); result.SetDesc("DCAU command failed at "+urlstr); } return result; } free(sresp); sresp = NULL; } // default dcau globus_ftp_control_dcau_t dcau; dcau.mode = GLOBUS_FTP_CONTROL_DCAU_NONE; globus_ftp_control_local_dcau(handle, &dcau, GSS_C_NO_CREDENTIAL); globus_ftp_control_host_port_t pasv_addr; facts = true; free_format = false; if(!names_only) { /* try MLST */ int code = 0; cmd_resp = send_command("MLST", path.c_str(), true, &sresp, &code); if (cmd_resp == GLOBUS_FTP_PERMANENT_NEGATIVE_COMPLETION_REPLY) { if (code == 500) { logger.msg(INFO, "MLST is not supported - trying LIST"); free(sresp); sresp = NULL; /* run NLST */ DataStatus pasv_res = setup_pasv(pasv_addr); if (!pasv_res) return pasv_res; facts = false; free_format = true; cmd_resp = send_command("LIST", path.c_str(), true, &sresp); } } else { // MLST replies through control channel // 250 - // information // 250 - if (cmd_resp != GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) { if(sresp) { logger.msg(INFO, "Immediate completion expected: %s", sresp); result.SetDesc("MLST command failed at "+urlstr+" : "+sresp); free(sresp); sresp = NULL; } else { logger.msg(INFO, "Immediate completion expected"); result.SetDesc("MLST command failed at "+urlstr); } return result; } // Try to collect full response char* nresp = sresp?strchr(sresp,'\n'):NULL; if(nresp) { ++nresp; } else { free(sresp); sresp = NULL; cmd_resp = send_command(NULL, NULL, true, &sresp); if(cmd_resp != GLOBUS_FTP_UNKNOWN_REPLY) { logger.msg(INFO, "Missing information in reply: %s", sresp); if(sresp) { result.SetDesc("Missing information in reply from "+urlstr+" : "+sresp); free(sresp); } else { result.SetDesc("Missing information in reply from "+urlstr); } return result; } nresp=sresp; } char* fresp = NULL; if(nresp) { if(*nresp == ' ') ++nresp; fresp=strchr(nresp,'\n'); if(fresp) { // callback *fresp=0; list_shift = 0; fnames.clear(); size_t nlength = strlen(nresp); if(nlength > sizeof(readbuf)) nlength=sizeof(readbuf); memcpy(readbuf,nresp,nlength); data_activated = true; list_read_callback(callback_arg,handle,GLOBUS_SUCCESS, (globus_byte_t*)readbuf,nlength,0,1); } }; if(fresp) { ++fresp; } else { free(sresp); sresp = NULL; cmd_resp = send_command(NULL, NULL, true, &sresp); if(cmd_resp != GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) { logger.msg(INFO, "Missing final reply: %s", sresp); if(sresp) { result.SetDesc("Missing final reply from "+urlstr+" : "+sresp); free(sresp); } else { result.SetDesc("Missing final reply from "+urlstr); } return result; } fresp=sresp; } free(sresp); sresp = NULL; return DataStatus::Success; } } else { DataStatus pasv_res = setup_pasv(pasv_addr); if (!pasv_res) return pasv_res; facts = false; free_format = true; cmd_resp = send_command("LIST", path.c_str(), true, &sresp); } if (cmd_resp == GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) { /* completion is not expected here */ pasv_set = false; logger.msg(INFO, "Unexpected immediate completion: %s", sresp); if(sresp) { result.SetDesc("Unexpected completion reply from "+urlstr+" : "+sresp); free(sresp); sresp = NULL; } else { result.SetDesc("Unexpected completion reply from "+urlstr); } return result; } if ((cmd_resp != GLOBUS_FTP_POSITIVE_PRELIMINARY_REPLY) && (cmd_resp != GLOBUS_FTP_POSITIVE_INTERMEDIATE_REPLY)) { if (sresp) { logger.msg(INFO, "LIST/MLST failed: %s", sresp); result.SetDesc("LIST/MLST command failed at "+urlstr+" : "+sresp); result.SetErrno(globus_error_to_errno(sresp, result.GetErrno())); free(sresp); sresp = NULL; } else { logger.msg(INFO, "LIST/MLST failed"); result.SetDesc("LIST/MLST command failed at "+urlstr); } return result; } free(sresp); sresp = NULL; result = transfer_list(); if (!result) result = DataStatus(DataStatus::StatError, result.GetErrno(), result.GetDesc()); return result; } DataStatus Lister::retrieve_dir_info(const URL& url,bool names_only) { DataStatus result = DataStatus::StatError; DataStatus con_result = handle_connect(url); if(!con_result) return con_result; globus_ftp_control_response_class_t cmd_resp; char *sresp = NULL; if (url.Protocol() == "gsiftp") { cmd_resp = send_command("DCAU", "N", true, &sresp, NULL, '"'); if ((cmd_resp != GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) && (cmd_resp != GLOBUS_FTP_PERMANENT_NEGATIVE_COMPLETION_REPLY)) { if (sresp) { logger.msg(INFO, "DCAU failed: %s", sresp); result.SetDesc("DCAU command failed at "+urlstr+" : "+sresp); free(sresp); } else { logger.msg(INFO, "DCAU failed"); result.SetDesc("DCAU command failed at "+urlstr); } return result; } free(sresp); } globus_ftp_control_dcau_t dcau; dcau.mode = GLOBUS_FTP_CONTROL_DCAU_NONE; globus_ftp_control_local_dcau(handle, &dcau, GSS_C_NO_CREDENTIAL); globus_ftp_control_host_port_t pasv_addr; facts = true; free_format = false; DataStatus pasv_res = setup_pasv(pasv_addr); if (!pasv_res) return pasv_res; if (!names_only) { /* try MLSD */ int code = 0; cmd_resp = send_command("MLSD", path.c_str(), true, &sresp, &code); if (cmd_resp == GLOBUS_FTP_PERMANENT_NEGATIVE_COMPLETION_REPLY) { if (code == 500) { logger.msg(INFO, "MLSD is not supported - trying NLST"); free(sresp); /* run NLST */ facts = false; cmd_resp = send_command("NLST", path.c_str(), true, &sresp); } } } else { facts = false; cmd_resp = send_command("NLST", path.c_str(), true, &sresp); } if (cmd_resp == GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) { /* completion is not expected here */ pasv_set = false; logger.msg(INFO, "Immediate completion: %s", sresp?sresp:""); result.SetDesc("Unexpected completion response from "+urlstr+" : "+(sresp?sresp:"")); if (sresp) free(sresp); return result; } if ((cmd_resp != GLOBUS_FTP_POSITIVE_PRELIMINARY_REPLY) && (cmd_resp != GLOBUS_FTP_POSITIVE_INTERMEDIATE_REPLY)) { if (sresp) { logger.msg(INFO, "NLST/MLSD failed: %s", sresp); result.SetDesc("NLST/MLSD command failed at "+urlstr+" : "+sresp); result.SetErrno(globus_error_to_errno(sresp, result.GetErrno())); free(sresp); } else { logger.msg(INFO, "NLST/MLSD failed"); result.SetDesc("NLST/MLSD command failed at "+urlstr); } return result; } free(sresp); return transfer_list(); } DataStatus Lister::transfer_list(void) { DataStatus result = DataStatus::ListError; globus_ftp_control_response_class_t cmd_resp; char* sresp = NULL; /* start transfer */ for (;;) { /* waiting for response received */ cmd_resp = send_command(NULL, NULL, true, &sresp); if (cmd_resp == GLOBUS_FTP_POSITIVE_COMPLETION_REPLY) break; if ((cmd_resp != GLOBUS_FTP_POSITIVE_PRELIMINARY_REPLY) && (cmd_resp != GLOBUS_FTP_POSITIVE_INTERMEDIATE_REPLY)) { if (sresp) { logger.msg(INFO, "Data transfer aborted: %s", sresp); result.SetDesc("Data transfer aborted at "+urlstr+" : "+sresp); free(sresp); } else { logger.msg(INFO, "Data transfer aborted"); result.SetDesc("Data transfer aborted at "+urlstr); } // Destroy data connections here ????????? pasv_set = false; return result; } if (sresp) free(sresp); } if (sresp) free(sresp); /* waiting for data ended */ if (wait_for_data_callback() != CALLBACK_DONE) { logger.msg(INFO, "Failed to transfer data"); // TODO: error description from callback result.SetDesc("Failed to transfer data from "+urlstr); pasv_set = false; return result; } pasv_set = false; /* success */ return DataStatus::Success; } } // namespace ArcDMCGridFTP nordugrid-arc-5.4.2/src/hed/dmc/gridftp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712221032064022755 xustar000000000000000027 mtime=1380201524.819386 30 atime=1513200595.338956656 30 ctime=1513200660.887758343 nordugrid-arc-5.4.2/src/hed/dmc/gridftp/Makefile.am0000644000175000002070000000142712221032064023023 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcgridftp.la libdmcgridftp_la_SOURCES = DataPointGridFTP.cpp Lister.cpp \ DataPointGridFTP.h Lister.h libdmcgridftp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GLOBUS_FTP_CLIENT_CFLAGS) $(AM_CXXFLAGS) libdmcgridftp_la_LIBADD = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GLOBUS_FTP_CLIENT_LIBS) \ $(GLOBUS_FTP_CONTROL_LIBS) $(GLOBUS_COMMON_LIBS) $(GLOBUS_IO_LIBS) libdmcgridftp_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/gridftp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022773 xustar000000000000000030 mtime=1513200595.384957218 30 atime=1513200649.004613008 30 ctime=1513200660.888758356 nordugrid-arc-5.4.2/src/hed/dmc/gridftp/Makefile.in0000644000175000002070000006520413214315723023050 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/gridftp DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcgridftp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libdmcgridftp_la_OBJECTS = libdmcgridftp_la-DataPointGridFTP.lo \ libdmcgridftp_la-Lister.lo libdmcgridftp_la_OBJECTS = $(am_libdmcgridftp_la_OBJECTS) libdmcgridftp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdmcgridftp_la_CXXFLAGS) $(CXXFLAGS) \ $(libdmcgridftp_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcgridftp_la_SOURCES) DIST_SOURCES = $(libdmcgridftp_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcgridftp.la libdmcgridftp_la_SOURCES = DataPointGridFTP.cpp Lister.cpp \ DataPointGridFTP.h Lister.h libdmcgridftp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GLOBUS_FTP_CLIENT_CFLAGS) $(AM_CXXFLAGS) libdmcgridftp_la_LIBADD = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GLOBUS_FTP_CLIENT_LIBS) \ $(GLOBUS_FTP_CONTROL_LIBS) $(GLOBUS_COMMON_LIBS) $(GLOBUS_IO_LIBS) libdmcgridftp_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/gridftp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/gridftp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcgridftp.la: $(libdmcgridftp_la_OBJECTS) $(libdmcgridftp_la_DEPENDENCIES) $(libdmcgridftp_la_LINK) -rpath $(pkglibdir) $(libdmcgridftp_la_OBJECTS) $(libdmcgridftp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcgridftp_la-DataPointGridFTP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcgridftp_la-Lister.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcgridftp_la-DataPointGridFTP.lo: DataPointGridFTP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgridftp_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcgridftp_la-DataPointGridFTP.lo -MD -MP -MF $(DEPDIR)/libdmcgridftp_la-DataPointGridFTP.Tpo -c -o libdmcgridftp_la-DataPointGridFTP.lo `test -f 'DataPointGridFTP.cpp' || echo '$(srcdir)/'`DataPointGridFTP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcgridftp_la-DataPointGridFTP.Tpo $(DEPDIR)/libdmcgridftp_la-DataPointGridFTP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointGridFTP.cpp' object='libdmcgridftp_la-DataPointGridFTP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgridftp_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcgridftp_la-DataPointGridFTP.lo `test -f 'DataPointGridFTP.cpp' || echo '$(srcdir)/'`DataPointGridFTP.cpp libdmcgridftp_la-Lister.lo: Lister.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgridftp_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcgridftp_la-Lister.lo -MD -MP -MF $(DEPDIR)/libdmcgridftp_la-Lister.Tpo -c -o libdmcgridftp_la-Lister.lo `test -f 'Lister.cpp' || echo '$(srcdir)/'`Lister.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcgridftp_la-Lister.Tpo $(DEPDIR)/libdmcgridftp_la-Lister.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='Lister.cpp' object='libdmcgridftp_la-Lister.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgridftp_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcgridftp_la-Lister.lo `test -f 'Lister.cpp' || echo '$(srcdir)/'`Lister.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/gridftp/PaxHeaders.7502/DataPointGridFTP.h0000644000000000000000000000012312074027430024140 xustar000000000000000027 mtime=1357917976.740806 26 atime=1513200575.19471 30 ctime=1513200660.891758392 nordugrid-arc-5.4.2/src/hed/dmc/gridftp/DataPointGridFTP.h0000644000175000002070000001073512074027430024214 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAPOINTGRIDFTP_H__ #define __ARC_DATAPOINTGRIDFTP_H__ #include #include #include #include #include #include #include #include namespace ArcDMCGridFTP { using namespace Arc; class Lister; /** * GridFTP is essentially the FTP protocol with GSI security. This class * uses libraries from the Globus Toolkit. It can also be used for regular * FTP. * * This class is a loadable module and cannot be used directly. The DataHandle * class loads modules at runtime and should be used instead of this. */ class DataPointGridFTP : public DataPointDirect { private: class CBArg { private: Glib::Mutex lock; DataPointGridFTP* arg; CBArg(void); CBArg(const CBArg&); public: CBArg(DataPointGridFTP* a); ~CBArg(void) {}; DataPointGridFTP* acquire(void); void release(void); void abandon(void); }; static Logger logger; CBArg* cbarg; bool ftp_active; globus_ftp_client_handle_t ftp_handle; globus_ftp_client_operationattr_t ftp_opattr; globus_thread_t ftp_control_thread; int ftp_threads; bool autodir; SimpleCondition cond; DataStatus callback_status; GSSCredential *credential; bool reading; bool writing; bool ftp_eof_flag; int check_received_length; bool data_error; SimpleCounter data_counter; Lister* lister; static void ftp_complete_callback(void *arg, globus_ftp_client_handle_t *handle, globus_object_t *error); static void ftp_get_complete_callback(void *arg, globus_ftp_client_handle_t *handle, globus_object_t *error); static void ftp_put_complete_callback(void *arg, globus_ftp_client_handle_t *handle, globus_object_t *error); static void ftp_read_callback(void *arg, globus_ftp_client_handle_t *handle, globus_object_t *error, globus_byte_t *buffer, globus_size_t length, globus_off_t offset, globus_bool_t eof); static void ftp_check_callback(void *arg, globus_ftp_client_handle_t *handle, globus_object_t *error, globus_byte_t *buffer, globus_size_t length, globus_off_t offset, globus_bool_t eof); static void ftp_write_callback(void *arg, globus_ftp_client_handle_t *handle, globus_object_t *error, globus_byte_t *buffer, globus_size_t length, globus_off_t offset, globus_bool_t eof); static void* ftp_read_thread(void *arg); static void* ftp_write_thread(void *arg); bool mkdir_ftp(); char ftp_buf[16]; bool check_credentials(); void set_attributes(); DataStatus RemoveFile(); DataStatus RemoveDir(); DataStatus do_more_stat(FileInfo& f, DataPointInfoType verb); public: DataPointGridFTP(const URL& url, const UserConfig& usercfg, PluginArgument* parg); virtual ~DataPointGridFTP(); static Plugin* Instance(PluginArgument *arg); virtual bool SetURL(const URL& url); virtual DataStatus StartReading(DataBuffer& buf); virtual DataStatus StartWriting(DataBuffer& buf, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); virtual DataStatus Check(bool check_meta); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents=false); virtual DataStatus Stat(FileInfo& file, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus Rename(const URL& newurl); virtual bool WriteOutOfOrder(); virtual bool ProvidesMeta() const; virtual const std::string DefaultCheckSum() const; virtual bool RequiresCredentials() const; }; } // namespace ArcDMCGridFTP #endif // __ARC_DATAPOINTGRIDFTP_H__ nordugrid-arc-5.4.2/src/hed/dmc/gridftp/PaxHeaders.7502/DataPointGridFTP.cpp0000644000000000000000000000012313213471662024501 xustar000000000000000027 mtime=1512993714.552508 26 atime=1513200575.20171 30 ctime=1513200660.889758368 nordugrid-arc-5.4.2/src/hed/dmc/gridftp/DataPointGridFTP.cpp0000644000175000002070000015741613213471662024565 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "DataPointGridFTP.h" #include "Lister.h" namespace ArcDMCGridFTP { using namespace Arc; static bool proxy_initialized = false; char dummy_buffer = 0; Logger DataPointGridFTP::logger(Logger::getRootLogger(), "DataPoint.GridFTP"); void DataPointGridFTP::ftp_complete_callback(void *arg, globus_ftp_client_handle_t*, globus_object_t *error) { DataPointGridFTP *it = ((CBArg*)arg)->acquire(); if(!it) return; if (error == GLOBUS_SUCCESS) { logger.msg(DEBUG, "ftp_complete_callback: success"); it->callback_status = DataStatus::Success; it->cond.signal(); } else { std::string err(trim(globus_object_to_string(error))); logger.msg(VERBOSE, "ftp_complete_callback: error: %s", err); it->callback_status = DataStatus(DataStatus::GenericError, globus_error_to_errno(err, EARCOTHER), err); it->cond.signal(); } ((CBArg*)arg)->release(); } void DataPointGridFTP::ftp_check_callback(void *arg, globus_ftp_client_handle_t*, globus_object_t *error, globus_byte_t*, globus_size_t length, globus_off_t, globus_bool_t eof) { DataPointGridFTP *it = ((CBArg*)arg)->acquire(); if(!it) return; logger.msg(VERBOSE, "ftp_check_callback"); if (error != GLOBUS_SUCCESS) { logger.msg(VERBOSE, "Globus error: %s", globus_object_to_string(error)); ((CBArg*)arg)->release(); return; } if (eof) { it->ftp_eof_flag = true; ((CBArg*)arg)->release(); return; } if (it->check_received_length > 0) { logger.msg(INFO, "Excessive data received while checking file access"); it->ftp_eof_flag = true; GlobusResult(globus_ftp_client_abort(&(it->ftp_handle))); ((CBArg*)arg)->release(); return; } it->check_received_length += length; ((CBArg*)arg)->release(); GlobusResult res(globus_ftp_client_register_read(&(it->ftp_handle), (globus_byte_t*)(it->ftp_buf), sizeof(it->ftp_buf), &ftp_check_callback, arg)); it = ((CBArg*)arg)->acquire(); if(!it) return; if (!res) { logger.msg(INFO, "Registration of Globus FTP buffer failed - cancel check"); logger.msg(VERBOSE, "Globus error: %s", res.str()); GlobusResult(globus_ftp_client_abort(&(it->ftp_handle))); ((CBArg*)arg)->release(); return; } ((CBArg*)arg)->release(); return; } DataStatus DataPointGridFTP::Check(bool check_meta) { if (!ftp_active) return DataStatus::NotInitializedError; if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; GlobusResult res; globus_off_t size = 0; globus_abstime_t gl_modify_time; time_t modify_time; set_attributes(); if (check_meta) { res = globus_ftp_client_size(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, &size, &ftp_complete_callback, cbarg); if (!res) { logger.msg(VERBOSE, "check_ftp: globus_ftp_client_size failed"); logger.msg(INFO, "Globus error: %s", res.str()); } else if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(INFO, "check_ftp: timeout waiting for size"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); } else if (!callback_status) logger.msg(INFO, "check_ftp: failed to get file's size"); else { SetSize(size); logger.msg(VERBOSE, "check_ftp: obtained size: %lli", GetSize()); } res = globus_ftp_client_modification_time(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, &gl_modify_time, &ftp_complete_callback, cbarg); if (!res) { logger.msg(VERBOSE, "check_ftp: globus_ftp_client_modification_time failed"); logger.msg(INFO, "Globus error: %s", res.str()); } else if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(INFO, "check_ftp: timeout waiting for modification_time"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); } else if (!callback_status) logger.msg(INFO, "check_ftp: failed to get file's modification time"); else { int modify_utime; GlobusTimeAbstimeGet(gl_modify_time, modify_time, modify_utime); SetModified(modify_time); logger.msg(VERBOSE, "check_ftp: obtained modification date: %s", GetModified().str()); } } // check if file or directory - can't do a get on a directory FileInfo fileinfo; if (!Stat(fileinfo, INFO_TYPE_TYPE)) return DataStatus::CheckError; if (fileinfo.GetType() != FileInfo::file_type_file) // successful stat is enough to report successful access to a directory return DataStatus::Success; // Do not use partial_get for ordinary ftp. Stupid globus tries to // use non-standard commands anyway. if (is_secure) { res = globus_ftp_client_partial_get(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, GLOBUS_NULL, 0, 1, &ftp_complete_callback, cbarg); if (!res) { std::string globus_err(res.str()); logger.msg(VERBOSE, "check_ftp: globus_ftp_client_get failed"); logger.msg(VERBOSE, globus_err); return DataStatus(DataStatus::CheckError, globus_err); } // use eof_flag to pass result from callback ftp_eof_flag = false; check_received_length = 0; logger.msg(VERBOSE, "check_ftp: globus_ftp_client_register_read"); res = globus_ftp_client_register_read(&ftp_handle, (globus_byte_t*)ftp_buf, sizeof(ftp_buf), &ftp_check_callback, cbarg); if (!res) { GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); return DataStatus::CheckError; } if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(VERBOSE, "check_ftp: timeout waiting for partial get"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); return DataStatus(DataStatus::CheckError, EARCREQUESTTIMEOUT, "timeout waiting for partial get from server: "+url.plainstr()); } if (ftp_eof_flag) return DataStatus::Success; return DataStatus(DataStatus::CheckError, callback_status.GetDesc()); } else { // Do not use it at all. It does not give too much useful // information anyway. But request at least existence of file. if (!CheckSize()) return DataStatus::CheckError; return DataStatus::Success; } } DataStatus DataPointGridFTP::Remove() { if (!ftp_active) return DataStatus::NotInitializedError; if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; GlobusResult res; set_attributes(); // Try file delete and then dir delete if that fails. It would be good to // use EISDIR but we cannot rely on that error being detected properly for // all server implementations. DataStatus rm_res = RemoveFile(); if (!rm_res && rm_res.GetErrno() != ENOENT && rm_res.GetErrno() != EACCES) { logger.msg(INFO, "File delete failed, attempting directory delete for %s", url.plainstr()); rm_res = RemoveDir(); } return rm_res; } DataStatus DataPointGridFTP::RemoveFile() { GlobusResult res(globus_ftp_client_delete(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, &ftp_complete_callback, cbarg)); if (!res) { logger.msg(VERBOSE, "delete_ftp: globus_ftp_client_delete failed"); std::string globus_err(res.str()); logger.msg(VERBOSE, globus_err); return DataStatus(DataStatus::DeleteError, globus_err); } if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(VERBOSE, "delete_ftp: timeout waiting for delete"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); return DataStatus(DataStatus::DeleteError, EARCREQUESTTIMEOUT, "Timeout waiting for delete for "+url.plainstr()); } if (!callback_status) { return DataStatus(DataStatus::DeleteError, callback_status.GetErrno(), callback_status.GetDesc()); } return DataStatus::Success; } DataStatus DataPointGridFTP::RemoveDir() { GlobusResult res(globus_ftp_client_rmdir(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, &ftp_complete_callback, cbarg)); if (!res) { logger.msg(VERBOSE, "delete_ftp: globus_ftp_client_rmdir failed"); std::string globus_err(res.str()); logger.msg(VERBOSE, globus_err); return DataStatus(DataStatus::DeleteError, globus_err); } if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(VERBOSE, "delete_ftp: timeout waiting for delete"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); return DataStatus(DataStatus::DeleteError, EARCREQUESTTIMEOUT, "Timeout waiting for delete of "+url.plainstr()); } if (!callback_status) { return DataStatus(DataStatus::DeleteError, callback_status.GetErrno(), callback_status.GetDesc()); } return DataStatus::Success; } static bool remove_last_dir(std::string& dir) { // dir also contains proto and server std::string::size_type nn = std::string::npos; if (!strncasecmp(dir.c_str(), "ftp://", 6)) nn = dir.find('/', 6); else if (!strncasecmp(dir.c_str(), "gsiftp://", 9)) nn = dir.find('/', 9); if (nn == std::string::npos) return false; std::string::size_type n; if ((n = dir.rfind('/')) == std::string::npos) return false; if (n < nn) return false; dir.resize(n); return true; } static bool add_last_dir(std::string& dir, const std::string& path) { int l = dir.length(); std::string::size_type n = path.find('/', l + 1); if (n == std::string::npos) return false; dir = path; dir.resize(n); return true; } bool DataPointGridFTP::mkdir_ftp() { std::string ftp_dir_path = url.plainstr(); for (;;) if (!remove_last_dir(ftp_dir_path)) break; bool result = true; for (;;) { if (!add_last_dir(ftp_dir_path, url.plainstr())) break; logger.msg(VERBOSE, "mkdir_ftp: making %s", ftp_dir_path); GlobusResult res(globus_ftp_client_mkdir(&ftp_handle, ftp_dir_path.c_str(), &ftp_opattr, &ftp_complete_callback, cbarg)); if (!res) { logger.msg(INFO, "Globus error: %s", res.str()); return false; } if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(INFO, "mkdir_ftp: timeout waiting for mkdir"); /* timeout - have to cancel operation here */ GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); return false; } if (!callback_status) result = false; } return result; } DataStatus DataPointGridFTP::CreateDirectory(bool with_parents) { if (!ftp_active) return DataStatus::NotInitializedError; set_attributes(); // if with_parents use standard method used during StartWriting if (with_parents) return mkdir_ftp() ? DataStatus::Success : DataStatus::CreateDirectoryError; // the globus mkdir call uses the full URL std::string dirpath = url.plainstr(); // check if file is in root directory if (!remove_last_dir(dirpath)) return DataStatus::Success; logger.msg(VERBOSE, "Creating directory %s", dirpath); GlobusResult res(globus_ftp_client_mkdir(&ftp_handle, dirpath.c_str(), &ftp_opattr, &ftp_complete_callback, cbarg)); if (!res) { std::string err(res.str()); logger.msg(VERBOSE, "Globus error: %s", err); return DataStatus(DataStatus::CreateDirectoryError, err); } if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(VERBOSE, "Timeout waiting for mkdir"); /* timeout - have to cancel operation here */ GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); return DataStatus(DataStatus::CreateDirectoryError, EARCREQUESTTIMEOUT, "Timeout waiting for mkdir at "+url.plainstr()); } if (!callback_status) { return DataStatus(DataStatus::CreateDirectoryError, callback_status.GetErrno(), callback_status.GetDesc()); } return DataStatus::Success; } DataStatus DataPointGridFTP::StartReading(DataBuffer& buf) { if (!ftp_active) return DataStatus::NotInitializedError; if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; set_attributes(); reading = true; buffer = &buf; bool limit_length = false; unsigned long long int range_length = 0; if (range_end > range_start) { range_length = range_end - range_start; limit_length = true; } logger.msg(VERBOSE, "start_reading_ftp"); ftp_eof_flag = false; GlobusResult(globus_ftp_client_handle_cache_url_state(&ftp_handle, url.plainstr().c_str())); GlobusResult res; logger.msg(VERBOSE, "start_reading_ftp: globus_ftp_client_get"); cond.reset(); if (limit_length) { res = globus_ftp_client_partial_get(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, GLOBUS_NULL, range_start, range_start + range_length + 1, &ftp_get_complete_callback, cbarg); } else { res = globus_ftp_client_get(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, GLOBUS_NULL, &ftp_get_complete_callback, cbarg); } if (!res) { logger.msg(VERBOSE, "start_reading_ftp: globus_ftp_client_get failed"); std::string globus_err(res.str()); logger.msg(VERBOSE, globus_err); GlobusResult(globus_ftp_client_handle_flush_url_state(&ftp_handle, url.plainstr().c_str())); buffer->error_read(true); reading = false; return DataStatus(DataStatus::ReadStartError, globus_err); } if (!GlobusResult(globus_thread_create(&ftp_control_thread, GLOBUS_NULL, &ftp_read_thread, this))) { logger.msg(VERBOSE, "start_reading_ftp: globus_thread_create failed"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); GlobusResult(globus_ftp_client_handle_flush_url_state(&ftp_handle, url.plainstr().c_str())); buffer->error_read(true); reading = false; return DataStatus(DataStatus::ReadStartError, "Failed to create new thread"); } // make sure globus has thread for handling network/callbacks GlobusResult(globus_thread_blocking_will_block()); return DataStatus::Success; } DataStatus DataPointGridFTP::StopReading() { if (!reading) return DataStatus::ReadStopError; reading = false; // If error in buffer then read thread will already have called abort if (!buffer) return DataStatus::Success; if (!buffer->eof_read() && !buffer->error()) { logger.msg(VERBOSE, "stop_reading_ftp: aborting connection"); GlobusResult res(globus_ftp_client_abort(&ftp_handle)); if(!res) { // This mostly means transfer failed and Globus did not call complete // callback. Because it was reported that Globus may call it even // 1 hour after abort initiated here that callback is imitated. std::string globus_err(res.str()); logger.msg(INFO, "Failed to abort transfer of ftp file: %s", globus_err); logger.msg(INFO, "Assuming transfer is already aborted or failed."); cond.lock(); failure_code = DataStatus(DataStatus::ReadStopError, globus_err); cond.unlock(); buffer->error_read(true); } } logger.msg(VERBOSE, "stop_reading_ftp: waiting for transfer to finish"); cond.wait(); logger.msg(VERBOSE, "stop_reading_ftp: exiting: %s", url.plainstr()); //GlobusResult(globus_ftp_client_handle_flush_url_state(&ftp_handle, url.plainstr().c_str())); if (!callback_status) return DataStatus(DataStatus::ReadStopError, callback_status.GetDesc()); return DataStatus::Success; } void* DataPointGridFTP::ftp_read_thread(void *arg) { DataPointGridFTP *it = (DataPointGridFTP*)arg; int h; unsigned int l; GlobusResult res; int registration_failed = 0; it->data_error = false; it->data_counter.set(0); logger.msg(INFO, "ftp_read_thread: get and register buffers"); int n_buffers = 0; for (;;) { if (it->buffer->eof_read()) break; if (!it->buffer->for_read(h, l, true)) { /* eof or error */ if (it->buffer->error()) { /* error -> abort reading */ logger.msg(VERBOSE, "ftp_read_thread: for_read failed - aborting: %s", it->url.plainstr()); GlobusResult(globus_ftp_client_abort(&(it->ftp_handle))); } break; } if (it->data_error) { // This is meant to reduce time window for globus bug. // See comment in ftp_write_thread. it->buffer->is_read(h, 0, 0); logger.msg(VERBOSE, "ftp_read_thread: data callback failed - aborting: %s", it->url.plainstr()); GlobusResult(globus_ftp_client_abort(&(it->ftp_handle))); break; } it->data_counter.inc(); res = globus_ftp_client_register_read(&(it->ftp_handle), (globus_byte_t*)((*(it->buffer))[h]), l, &(it->ftp_read_callback), it->cbarg); if (!res) { it->data_counter.dec(); logger.msg(DEBUG, "ftp_read_thread: Globus error: %s", res.str()); // This can happen if handle can't either yet or already // provide data. In last case there is no reason to retry. if(it->ftp_eof_flag) { it->buffer->is_read(h, 0, 0); break; } registration_failed++; if (registration_failed >= 10) { it->buffer->is_read(h, 0, 0); it->buffer->error_read(true); // can set eof here because no callback will be called (I guess). it->buffer->eof_read(true); logger.msg(DEBUG, "ftp_read_thread: " "too many registration failures - abort: %s", it->url.plainstr()); } else { logger.msg(DEBUG, "ftp_read_thread: " "failed to register Globus buffer - will try later: %s", it->url.plainstr()); it->buffer->is_read(h, 0, 0); // First retry quickly for race condition. // Then slowly for pecularities. if(registration_failed > 2) sleep(1); } } else n_buffers++; } // make sure complete callback is called logger.msg(VERBOSE, "ftp_read_thread: waiting for eof"); it->buffer->wait_eof_read(); // And now make sure all buffers were released in case Globus calls // complete_callback before calling all read_callbacks logger.msg(VERBOSE, "ftp_read_thread: waiting for buffers released"); //if(!it->buffer->wait_for_read(15)) { if(!it->data_counter.wait(15)) { // See comment in ftp_write_thread for explanation. logger.msg(VERBOSE, "ftp_read_thread: failed to release buffers - leaking"); CBArg* cbarg_old = it->cbarg; it->cbarg = new CBArg(it); cbarg_old->abandon(); }; logger.msg(VERBOSE, "ftp_read_thread: exiting"); it->callback_status = it->buffer->error_read() ? DataStatus::ReadError : DataStatus::Success; it->cond.signal(); return NULL; } void DataPointGridFTP::ftp_read_callback(void *arg, globus_ftp_client_handle_t*, globus_object_t *error, globus_byte_t *buffer, globus_size_t length, globus_off_t offset, globus_bool_t eof) { DataPointGridFTP *it = ((CBArg*)arg)->acquire(); if(!it) return; if (error != GLOBUS_SUCCESS) { it->data_error = true; logger.msg(VERBOSE, "ftp_read_callback: failure: %s",globus_object_to_string(error)); it->buffer->is_read((char*)buffer, 0, 0); } else { logger.msg(DEBUG, "ftp_read_callback: success"); it->buffer->is_read((char*)buffer, length, offset); if (eof) it->ftp_eof_flag = true; } it->data_counter.dec(); ((CBArg*)arg)->release(); return; } void DataPointGridFTP::ftp_get_complete_callback(void *arg, globus_ftp_client_handle_t*, globus_object_t *error) { DataPointGridFTP *it = ((CBArg*)arg)->acquire(); if(!it) return; /* data transfer finished */ if (error != GLOBUS_SUCCESS) { logger.msg(INFO, "Failed to get ftp file"); std::string err(trim(globus_object_to_string(error))); logger.msg(VERBOSE, "%s", err); it->cond.lock(); it->failure_code = DataStatus(DataStatus::ReadStartError, globus_error_to_errno(err, EARCOTHER), err); it->cond.unlock(); it->buffer->error_read(true); } else { it->buffer->eof_read(true); // This also reports to working threads transfer finished } ((CBArg*)arg)->release(); return; } DataStatus DataPointGridFTP::StartWriting(DataBuffer& buf, DataCallback*) { if (!ftp_active) return DataStatus::NotInitializedError; if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; set_attributes(); writing = true; buffer = &buf; /* size of file first */ bool limit_length = false; unsigned long long int range_length = 0; if (range_end > range_start) { range_length = range_end - range_start; limit_length = true; } ftp_eof_flag = false; GlobusResult res; GlobusResult(globus_ftp_client_handle_cache_url_state(&ftp_handle, url.plainstr().c_str())); if (autodir) { logger.msg(VERBOSE, "start_writing_ftp: mkdir"); if (!mkdir_ftp()) logger.msg(VERBOSE, "start_writing_ftp: mkdir failed - still trying to write"); } logger.msg(VERBOSE, "start_writing_ftp: put"); cond.reset(); if (limit_length) { res = globus_ftp_client_partial_put(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, GLOBUS_NULL, range_start, range_start + range_length, &ftp_put_complete_callback, cbarg); } else { res = globus_ftp_client_put(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, GLOBUS_NULL, &ftp_put_complete_callback, cbarg); } if (!res) { logger.msg(VERBOSE, "start_writing_ftp: put failed"); std::string globus_err(res.str()); logger.msg(VERBOSE, globus_err); GlobusResult(globus_ftp_client_handle_flush_url_state(&ftp_handle, url.plainstr().c_str())); buffer->error_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, globus_err); } if (!GlobusResult(globus_thread_create(&ftp_control_thread, GLOBUS_NULL, &ftp_write_thread, this))) { logger.msg(VERBOSE, "start_writing_ftp: globus_thread_create failed"); GlobusResult(globus_ftp_client_handle_flush_url_state(&ftp_handle, url.plainstr().c_str())); buffer->error_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, "Failed to create new thread"); } // make sure globus has thread for handling network/callbacks GlobusResult(globus_thread_blocking_will_block()); return DataStatus::Success; } DataStatus DataPointGridFTP::StopWriting() { if (!writing) return DataStatus::WriteStopError; writing = false; // If error in buffer then write thread will already have called abort if (!buffer) return DataStatus::Success; if (!buffer->eof_write() && !buffer->error()) { logger.msg(VERBOSE, "StopWriting: aborting connection"); GlobusResult res(globus_ftp_client_abort(&ftp_handle)); if(!res) { // This mostly means transfer failed and Globus did not call complete // callback. Because it was reported that Globus may call it even // 1 hour after abort initiated here that callback is imitated. std::string globus_err(res.str()); logger.msg(INFO, "Failed to abort transfer of ftp file: %s", globus_err); logger.msg(INFO, "Assuming transfer is already aborted or failed."); cond.lock(); failure_code = DataStatus(DataStatus::WriteStopError, globus_err); cond.unlock(); buffer->error_write(true); } } // Waiting for data transfer thread to finish cond.wait(); // checksum verification const CheckSum * calc_sum = buffer->checksum_object(); if (!buffer->error() && calc_sum && *calc_sum && buffer->checksum_valid()) { char buf[100]; calc_sum->print(buf,100); std::string csum(buf); if (csum.find(':') != std::string::npos && csum.substr(0, csum.find(':')) == DefaultCheckSum()) { logger.msg(VERBOSE, "StopWriting: Calculated checksum %s", csum); if(additional_checks) { // list checksum and compare // note: not all implementations support checksum logger.msg(DEBUG, "StopWriting: " "looking for checksum of %s", url.plainstr()); char cksum[256]; std::string cksumtype(upper(DefaultCheckSum())); GlobusResult res(globus_ftp_client_cksm(&ftp_handle, url.plainstr().c_str(), &ftp_opattr, cksum, (globus_off_t)0, (globus_off_t)-1, cksumtype.c_str(), &ftp_complete_callback, cbarg)); if (!res) { logger.msg(VERBOSE, "list_files_ftp: globus_ftp_client_cksm failed"); logger.msg(VERBOSE, "Globus error: %s", res.str()); } else if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(VERBOSE, "list_files_ftp: timeout waiting for cksum"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); } else if (!callback_status) { // reset to success since failing to get checksum should not trigger an error callback_status = DataStatus::Success; logger.msg(INFO, "list_files_ftp: no checksum information possible"); } else { logger.msg(VERBOSE, "list_files_ftp: checksum %s", cksum); if (csum.substr(csum.find(':')+1).length() != std::string(cksum).length()) { // Some buggy Globus servers return a different type of checksum to the one requested logger.msg(WARNING, "Checksum type returned by server is different to requested type, cannot compare"); } else if (csum.substr(csum.find(':')+1) == std::string(cksum)) { logger.msg(INFO, "Calculated checksum %s matches checksum reported by server", csum); SetCheckSum(csum); } else { logger.msg(VERBOSE, "Checksum mismatch between calculated checksum %s and checksum reported by server %s", csum, std::string(DefaultCheckSum()+':'+cksum)); return DataStatus(DataStatus::TransferError, EARCCHECKSUM, "Checksum mismatch between calculated and reported checksums"); } } } } } //GlobusResult(globus_ftp_client_handle_flush_url_state(&ftp_handle, url.plainstr().c_str())); if (!callback_status) return DataStatus(DataStatus::WriteStopError, callback_status.GetDesc()); return DataStatus::Success; } void* DataPointGridFTP::ftp_write_thread(void *arg) { DataPointGridFTP *it = (DataPointGridFTP*)arg; int h; unsigned int l; unsigned long long int o; GlobusResult res; globus_bool_t eof = GLOBUS_FALSE; it->data_error = false; it->data_counter.set(0); logger.msg(INFO, "ftp_write_thread: get and register buffers"); for (;;) { if (!it->buffer->for_write(h, l, o, true)) { if (it->buffer->error()) { logger.msg(VERBOSE, "ftp_write_thread: for_write failed - aborting"); GlobusResult(globus_ftp_client_abort(&(it->ftp_handle))); break; } // no buffers and no errors - must be pure eof eof = GLOBUS_TRUE; o = it->buffer->eof_position(); res = globus_ftp_client_register_write(&(it->ftp_handle), (globus_byte_t*)(&dummy_buffer), 0, o, eof, &(it->ftp_write_callback), it->cbarg); break; // if(res == GLOBUS_SUCCESS) break; // sleep(1); continue; } if (it->data_error) { // This is meant to reduce time window for globus bug. // See comment below about data_counter. it->buffer->is_notwritten(h); logger.msg(VERBOSE, "ftp_write_thread: data callback failed - aborting"); GlobusResult(globus_ftp_client_abort(&(it->ftp_handle))); break; } it->data_counter.inc(); res = globus_ftp_client_register_write(&(it->ftp_handle), (globus_byte_t*)((*(it->buffer))[h]), l, o, eof, &(it->ftp_write_callback), it->cbarg); if (!res) { it->data_counter.dec(); it->buffer->is_notwritten(h); sleep(1); } } // make sure complete callback is called logger.msg(VERBOSE, "ftp_write_thread: waiting for eof"); it->buffer->wait_eof_write(); // And now make sure all buffers were released in case Globus calls // complete_callback before calling all read_callbacks logger.msg(VERBOSE, "ftp_write_thread: waiting for buffers released"); // if that does not happen quickly that means there are problems. //if(!it->buffer->wait_for_write(15)) { if(!it->data_counter.wait(15000)) { // If buffer registration happens while globus is reporting error // those buffers are lost by globus. But still we can't be sure // callback is never called. So switching to new cbarg to detach // potential callbacks from this object. logger.msg(VERBOSE, "ftp_write_thread: failed to release buffers - leaking"); CBArg* cbarg_old = it->cbarg; it->cbarg = new CBArg(it); cbarg_old->abandon(); }; logger.msg(VERBOSE, "ftp_write_thread: exiting"); it->callback_status = it->buffer->error_write() ? DataStatus::WriteError : DataStatus::Success; it->cond.signal(); // Report to control thread that data transfer thread finished return NULL; } void DataPointGridFTP::ftp_write_callback(void *arg, globus_ftp_client_handle_t*, globus_object_t *error, globus_byte_t *buffer, globus_size_t, globus_off_t, globus_bool_t is_eof) { DataPointGridFTP *it = ((CBArg*)arg)->acquire(); if(!it) return; // Filtering out dummy write - doing that to avoid additional check for dummy write complete if(buffer == (globus_byte_t*)(&dummy_buffer)) { ((CBArg*)arg)->release(); return; } if (error != GLOBUS_SUCCESS) { it->data_error = true; logger.msg(VERBOSE, "ftp_write_callback: failure: %s",globus_object_to_string(error)); it->buffer->is_notwritten((char*)buffer); } else { logger.msg(DEBUG, "ftp_write_callback: success %s",is_eof?"eof":" "); it->buffer->is_written((char*)buffer); } it->data_counter.dec(); ((CBArg*)arg)->release(); return; } void DataPointGridFTP::ftp_put_complete_callback(void *arg, globus_ftp_client_handle_t*, globus_object_t *error) { DataPointGridFTP *it = ((CBArg*)arg)->acquire(); if(!it) return; /* data transfer finished */ if (error != GLOBUS_SUCCESS) { logger.msg(INFO, "Failed to store ftp file"); std::string err(trim(globus_object_to_string(error))); logger.msg(VERBOSE, "%s", err); it->cond.lock(); // Protect access to failure_code it->failure_code = DataStatus(DataStatus::WriteStartError, globus_error_to_errno(err, EARCOTHER), err); it->cond.unlock(); it->buffer->error_write(true); } else { logger.msg(DEBUG, "ftp_put_complete_callback: success"); // This also reports to data transfer thread that transfer finished it->buffer->eof_write(true); } ((CBArg*)arg)->release(); return; } DataStatus DataPointGridFTP::do_more_stat(FileInfo& f, DataPointInfoType verb) { DataStatus result = DataStatus::Success; GlobusResult res; globus_off_t size = 0; globus_abstime_t gl_modify_time; time_t modify_time; std::string f_url = url.ConnectionURL() + f.GetName(); if (((verb & INFO_TYPE_CONTENT) == INFO_TYPE_CONTENT) && (!f.CheckSize()) && (f.GetType() != FileInfo::file_type_dir)) { logger.msg(DEBUG, "list_files_ftp: looking for size of %s", f_url); res = globus_ftp_client_size(&ftp_handle, f_url.c_str(), &ftp_opattr, &size, &ftp_complete_callback, cbarg); if (!res) { logger.msg(VERBOSE, "list_files_ftp: globus_ftp_client_size failed"); std::string globus_err(res.str()); logger.msg(INFO, "Globus error: %s", globus_err); result = DataStatus(DataStatus::StatError, globus_err); } else if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(INFO, "list_files_ftp: timeout waiting for size"); logger.msg(INFO, "list_files_ftp: timeout waiting for size"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); result = DataStatus(DataStatus::StatError, EARCREQUESTTIMEOUT, "timeout waiting for file size"); } else if (!callback_status) { logger.msg(INFO, "list_files_ftp: failed to get file's size"); result = DataStatus(DataStatus::StatError, callback_status.GetDesc()); // Guessing - directories usually have no size f.SetType(FileInfo::file_type_dir); } else { f.SetSize(size); // Guessing - only files usually have size f.SetType(FileInfo::file_type_file); } } if ((verb & INFO_TYPE_TIMES) == INFO_TYPE_TIMES && !f.CheckModified()) { logger.msg(DEBUG, "list_files_ftp: " "looking for modification time of %s", f_url); res = globus_ftp_client_modification_time(&ftp_handle, f_url.c_str(), &ftp_opattr, &gl_modify_time, &ftp_complete_callback, cbarg); if (!res) { logger.msg(VERBOSE, "list_files_ftp: " "globus_ftp_client_modification_time failed"); std::string globus_err(res.str()); logger.msg(INFO, "Globus error: %s", globus_err); result = DataStatus(DataStatus::StatError, globus_err); } else if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(INFO, "list_files_ftp: " "timeout waiting for modification_time"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); result = DataStatus(DataStatus::StatError, EARCREQUESTTIMEOUT, "timeout waiting for file modification time"); } else if (!callback_status) { logger.msg(INFO, "list_files_ftp: " "failed to get file's modification time"); result = DataStatus(DataStatus::StatError, callback_status.GetDesc()); } else { int modify_utime; GlobusTimeAbstimeGet(gl_modify_time, modify_time, modify_utime); f.SetModified(modify_time); } } if ((verb & INFO_TYPE_CONTENT) == INFO_TYPE_CONTENT && !f.CheckCheckSum() && f.GetType() != FileInfo::file_type_dir) { // not all implementations support checksum so failure is not an error logger.msg(DEBUG, "list_files_ftp: " "looking for checksum of %s", f_url); char cksum[256]; std::string cksumtype(upper(DefaultCheckSum()).c_str()); res = globus_ftp_client_cksm(&ftp_handle, f_url.c_str(), &ftp_opattr, cksum, (globus_off_t)0, (globus_off_t)-1, cksumtype.c_str(), &ftp_complete_callback, cbarg); if (!res) { logger.msg(VERBOSE, "list_files_ftp: globus_ftp_client_cksm failed"); logger.msg(VERBOSE, "Globus error: %s", res.str()); } else if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(VERBOSE, "list_files_ftp: timeout waiting for cksum"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); } else if (!callback_status) { // reset to success since failing to get checksum should not trigger an error callback_status = DataStatus::Success; logger.msg(INFO, "list_files_ftp: no checksum information possible"); } else { logger.msg(VERBOSE, "list_files_ftp: checksum %s", cksum); f.SetCheckSum(DefaultCheckSum() + ':' + std::string(cksum)); } } return result; } DataStatus DataPointGridFTP::Stat(FileInfo& file, DataPoint::DataPointInfoType verb) { if (!ftp_active) return DataStatus::NotInitializedError; if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; reading = true; set_attributes(); bool more_info = ((verb | INFO_TYPE_NAME) != INFO_TYPE_NAME); DataStatus lister_res = lister->retrieve_file_info(url,!more_info); if (!lister_res) { logger.msg(VERBOSE, "Failed to obtain stat from FTP: %s", lister_res.GetDesc()); reading = false; return lister_res; } DataStatus result = DataStatus::StatError; if (lister->size() == 0) { logger.msg(VERBOSE, "No results returned from stat"); result.SetDesc("No results found for "+url.plainstr()); reading = false; return result; } if(lister->size() != 1) { logger.msg(VERBOSE, "Wrong number of objects (%i) for stat from ftp: %s", lister->size(), url.plainstr()); // guess - that probably means it is directory file.SetName(FileInfo(url.Path()).GetName()); file.SetType(FileInfo::file_type_dir); reading = false; return DataStatus::Success; } FileInfo lister_info(*(lister->begin())); // does returned path match what we expect? // remove trailing slashes from url std::string fname(url.Path()); while (fname.length() > 1 && fname[fname.length()-1] == '/') fname.erase(fname.length()-1); if ((lister_info.GetName().substr(lister_info.GetName().rfind('/')+1)) != (fname.substr(fname.rfind('/')+1))) { logger.msg(VERBOSE, "Unexpected path %s returned from server", lister_info.GetName()); result.SetDesc("Unexpected path returned from server for "+url.plainstr()); reading = false; return result; } result = DataStatus::Success; if (lister_info.GetName()[0] != '/') lister_info.SetName(url.Path()); file.SetName(lister_info.GetName()); if (more_info) { DataStatus r = do_more_stat(lister_info, verb); if(!r) result = r; } file.SetType(lister_info.GetType()); if (lister_info.CheckSize()) { file.SetSize(lister_info.GetSize()); SetSize(lister_info.GetSize()); } if (lister_info.CheckModified()) { file.SetModified(lister_info.GetModified()); SetModified(lister_info.GetModified()); } if (lister_info.CheckCheckSum()) { file.SetCheckSum(lister_info.GetCheckSum()); SetCheckSum(lister_info.GetCheckSum()); } reading = false; return result; } DataStatus DataPointGridFTP::List(std::list& files, DataPoint::DataPointInfoType verb) { if (!ftp_active) return DataStatus::NotInitializedError; if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; reading = true; set_attributes(); bool more_info = ((verb | INFO_TYPE_NAME) != INFO_TYPE_NAME); DataStatus lister_res = lister->retrieve_dir_info(url,!more_info); if (!lister_res) { logger.msg(VERBOSE, "Failed to obtain listing from FTP: %s", lister_res.GetDesc()); reading = false; return lister_res; } DataStatus result = DataStatus::Success; for (std::list::iterator i = lister->begin(); i != lister->end(); ++i) { if (i->GetName()[0] != '/') i->SetName(url.Path()+'/'+i->GetName()); std::list::iterator f = files.insert(files.end(), FileInfo(i->GetLastName())); if (more_info) { DataStatus r = do_more_stat(*i, verb); if(!r) { if(r == DataStatus::StatError) r = DataStatus(DataStatus::ListError, r.GetDesc()); result = r; } f->SetType(i->GetType()); } if (i->CheckSize()) f->SetSize(i->GetSize()); if (i->CheckModified()) f->SetModified(i->GetModified()); if (i->CheckCheckSum()) f->SetCheckSum(i->GetCheckSum()); } reading = false; return result; } DataStatus DataPointGridFTP::Rename(const URL& newurl) { if (!ftp_active) return DataStatus::NotInitializedError; if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; set_attributes(); GlobusResult res(globus_ftp_client_move(&ftp_handle, url.plainstr().c_str(), newurl.plainstr().c_str(), &ftp_opattr, &ftp_complete_callback, cbarg)); if (!res) { logger.msg(VERBOSE, "Rename: globus_ftp_client_move failed"); std::string err(res.str()); logger.msg(VERBOSE, "Globus error: %s", err); return DataStatus(DataStatus::RenameError, err); } if (!cond.wait(1000*usercfg.Timeout())) { logger.msg(VERBOSE, "Rename: timeout waiting for operation to complete"); GlobusResult(globus_ftp_client_abort(&ftp_handle)); cond.wait(); return DataStatus(DataStatus::RenameError, EARCREQUESTTIMEOUT, "Timeout waiting for rename at "+url.plainstr()); } if (!callback_status) { return DataStatus(DataStatus::RenameError, callback_status.GetErrno(), callback_status.GetDesc()); } return DataStatus::Success; } DataPointGridFTP::DataPointGridFTP(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(url, usercfg, parg), cbarg(new CBArg(this)), ftp_active(false), credential(NULL), reading(false), writing(false), ftp_eof_flag(false), check_received_length(0), data_error(false), lister(NULL) { //globus_module_activate(GLOBUS_FTP_CLIENT_MODULE); //if (!proxy_initialized) // proxy_initialized = GlobusRecoverProxyOpenSSL(); // Activating globus only once because it looks like // deactivation of GLOBUS_FTP_CONTROL_MODULE is not // handled properly on Windows. This should not cause // problems (except for valgrind) because this plugin // is registered as persistent. if (!proxy_initialized) { #ifdef HAVE_GLOBUS_THREAD_SET_MODEL GlobusResult(globus_thread_set_model("pthread")); #endif GlobusPrepareGSSAPI(); GlobusModuleActivate(GLOBUS_COMMON_MODULE); GlobusModuleActivate(GLOBUS_FTP_CLIENT_MODULE); proxy_initialized = GlobusRecoverProxyOpenSSL(); } is_secure = false; if (url.Protocol() == "gsiftp") is_secure = true; if (!ftp_active) { GlobusResult res; globus_ftp_client_handleattr_t ftp_attr; if (!(res = globus_ftp_client_handleattr_init(&ftp_attr))) { logger.msg(ERROR, "init_handle: globus_ftp_client_handleattr_init failed"); logger.msg(ERROR, "Globus error: %s", res.str()); ftp_active = false; return; } #ifdef HAVE_GLOBUS_FTP_CLIENT_HANDLEATTR_SET_GRIDFTP2 if (!(res = globus_ftp_client_handleattr_set_gridftp2(&ftp_attr, GLOBUS_TRUE))) { GlobusResult(globus_ftp_client_handleattr_destroy(&ftp_attr)); logger.msg(ERROR, "init_handle: " "globus_ftp_client_handleattr_set_gridftp2 failed"); logger.msg(ERROR, "Globus error: %s", res.str()); ftp_active = false; return; } #endif if (!(res = globus_ftp_client_handle_init(&ftp_handle, &ftp_attr))) { GlobusResult(globus_ftp_client_handleattr_destroy(&ftp_attr)); logger.msg(ERROR, "init_handle: globus_ftp_client_handle_init failed"); logger.msg(ERROR, "Globus error: %s", res.str()); ftp_active = false; return; } GlobusResult(globus_ftp_client_handleattr_destroy(&ftp_attr)); if (!(res = globus_ftp_client_operationattr_init(&ftp_opattr))) { logger.msg(ERROR, "init_handle: " "globus_ftp_client_operationattr_init failed"); logger.msg(ERROR, "Globus error: %s", res.str()); GlobusResult(globus_ftp_client_handle_destroy(&ftp_handle)); ftp_active = false; return; } if (!(res = globus_ftp_client_operationattr_set_allow_ipv6(&ftp_opattr, GLOBUS_TRUE))) { logger.msg(WARNING, "init_handle: " "globus_ftp_client_operationattr_set_allow_ipv6 failed"); logger.msg(WARNING, "Globus error: %s", res.str()); } if (!(res = globus_ftp_client_operationattr_set_delayed_pasv(&ftp_opattr, GLOBUS_TRUE))) { logger.msg(WARNING, "init_handle: " "globus_ftp_client_operationattr_set_delayed_pasv failed"); logger.msg(WARNING, "Globus error: %s", res.str()); } } ftp_active = true; ftp_threads = 1; if (allow_out_of_order) { ftp_threads = stringtoi(url.Option("threads")); if (ftp_threads < 1) ftp_threads = 1; if (ftp_threads > MAX_PARALLEL_STREAMS) ftp_threads = MAX_PARALLEL_STREAMS; } autodir = additional_checks; std::string autodir_s = url.Option("autodir"); if(autodir_s == "yes") { autodir = true; } else if(autodir_s == "no") { autodir = false; } lister = new Lister(); } void DataPointGridFTP::set_attributes(void) { globus_ftp_control_parallelism_t paral; if (ftp_threads > 1) { paral.fixed.mode = GLOBUS_FTP_CONTROL_PARALLELISM_FIXED; paral.fixed.size = ftp_threads; } else { paral.fixed.mode = GLOBUS_FTP_CONTROL_PARALLELISM_NONE; paral.fixed.size = 1; } GlobusResult(globus_ftp_client_operationattr_set_parallelism(&ftp_opattr, ¶l)); GlobusResult(globus_ftp_client_operationattr_set_striped(&ftp_opattr, GLOBUS_FALSE)); /* globus_ftp_client_operationattr_set_layout */ /* globus_ftp_client_operationattr_set_tcp_buffer */ GlobusResult(globus_ftp_client_operationattr_set_type(&ftp_opattr, GLOBUS_FTP_CONTROL_TYPE_IMAGE)); if (!is_secure) { // plain ftp protocol GlobusResult r(globus_ftp_client_operationattr_set_authorization( &ftp_opattr, GSS_C_NO_CREDENTIAL, url.Username().empty() ? "anonymous" : url.Username().c_str(), url.Passwd().empty() ? NULL : url.Passwd().c_str(), GLOBUS_NULL, GLOBUS_NULL)); if(!r) { logger.msg(VERBOSE, "globus_ftp_client_operationattr_set_authorization: error: %s", r.str()); } GlobusResult(globus_ftp_client_operationattr_set_mode(&ftp_opattr, GLOBUS_FTP_CONTROL_MODE_STREAM)); GlobusResult(globus_ftp_client_operationattr_set_data_protection(&ftp_opattr, GLOBUS_FTP_CONTROL_PROTECTION_CLEAR)); GlobusResult(globus_ftp_client_operationattr_set_control_protection(&ftp_opattr, GLOBUS_FTP_CONTROL_PROTECTION_CLEAR)); // need to set dcau to none in order Globus libraries not to send // it to pure ftp server globus_ftp_control_dcau_t dcau; dcau.mode = GLOBUS_FTP_CONTROL_DCAU_NONE; GlobusResult(globus_ftp_client_operationattr_set_dcau(&ftp_opattr, &dcau)); } else { // gridftp protocol if (!credential){ credential = new GSSCredential(usercfg); } lister->set_credential(credential); GlobusResult r(globus_ftp_client_operationattr_set_authorization( &ftp_opattr, *credential,":globus-mapping:","user@", GLOBUS_NULL,GLOBUS_NULL)); if(!r) { logger.msg(WARNING, "Failed to set credentials for GridFTP transfer"); logger.msg(VERBOSE, "globus_ftp_client_operationattr_set_authorization: error: %s", r.str()); } if (force_secure || (url.Option("secure") == "yes")) { GlobusResult(globus_ftp_client_operationattr_set_data_protection(&ftp_opattr, GLOBUS_FTP_CONTROL_PROTECTION_PRIVATE)); logger.msg(VERBOSE, "Using secure data transfer"); } else { GlobusResult(globus_ftp_client_operationattr_set_data_protection(&ftp_opattr, GLOBUS_FTP_CONTROL_PROTECTION_CLEAR)); logger.msg(VERBOSE, "Using insecure data transfer"); globus_ftp_control_dcau_t dcau; dcau.mode = GLOBUS_FTP_CONTROL_DCAU_NONE; GlobusResult(globus_ftp_client_operationattr_set_dcau(&ftp_opattr, &dcau)); } if (force_passive) { GlobusResult(globus_ftp_client_operationattr_set_mode(&ftp_opattr, GLOBUS_FTP_CONTROL_MODE_STREAM)); } else { GlobusResult(globus_ftp_client_operationattr_set_mode(&ftp_opattr, GLOBUS_FTP_CONTROL_MODE_EXTENDED_BLOCK)); } GlobusResult(globus_ftp_client_operationattr_set_control_protection(&ftp_opattr, GLOBUS_FTP_CONTROL_PROTECTION_PRIVATE)); } /* globus_ftp_client_operationattr_set_dcau */ /* globus_ftp_client_operationattr_set_resume_third_party_transfer */ /* globus_ftp_client_operationattr_set_authorization */ GlobusResult(globus_ftp_client_operationattr_set_append(&ftp_opattr, GLOBUS_FALSE)); } DataPointGridFTP::~DataPointGridFTP() { int destroy_timeout = 15+1; // waiting some reasonable time for globus StopReading(); StopWriting(); if (ftp_active) { logger.msg(DEBUG, "~DataPoint: destroy ftp_handle"); // In case globus is still doing something asynchronously while(!GlobusResult(globus_ftp_client_handle_destroy(&ftp_handle))) { logger.msg(VERBOSE, "~DataPoint: destroy ftp_handle failed - retrying"); if(!(--destroy_timeout)) break; // Unfortunately there is no sutable condition to wait for. // But such situation should happen very rarely if ever. I hope so. // It is also expected Globus will call all pending callbacks here // so it is free to destroy DataPointGridFTP and related objects. sleep(1); } if(destroy_timeout) GlobusResult(globus_ftp_client_operationattr_destroy(&ftp_opattr)); } if (credential) delete credential; if (lister) delete lister; cbarg->abandon(); // acquires lock if(destroy_timeout) { delete cbarg; } else { // So globus maybe did not call all callbacks. Keeping // intermediate object. logger.msg(VERBOSE, "~DataPoint: failed to destroy ftp_handle - leaking"); } // Clean all Globus error objects which Globus forgot to properly process. GlobusResult::wipe(); // See activation for description //GlobusResult(globus_module_deactivate(GLOBUS_FTP_CLIENT_MODULE)); } Plugin* DataPointGridFTP::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL&)(*dmcarg)).Protocol() != "gsiftp" && ((const URL&)(*dmcarg)).Protocol() != "ftp") { return NULL; } // Make this code non-unloadable because both OpenSSL // and Globus have problems with unloading Glib::Module* module = dmcarg->get_module(); PluginsFactory* factory = dmcarg->get_factory(); if(!(factory && module)) { logger.msg(ERROR, "Missing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - (Grid)FTP code is disabled. Report to developers."); return NULL; } factory->makePersistent(module); OpenSSLInit(); return new DataPointGridFTP(*dmcarg, *dmcarg, dmcarg); } bool DataPointGridFTP::WriteOutOfOrder() { return true; } bool DataPointGridFTP::ProvidesMeta() const { return true; } const std::string DataPointGridFTP::DefaultCheckSum() const { // no way to know which checksum is used for each file, so hard-code adler32 for now return std::string("adler32"); } bool DataPointGridFTP::RequiresCredentials() const { return is_secure; } bool DataPointGridFTP::SetURL(const URL& u) { if ((u.Protocol() != "gsiftp") && (u.Protocol() != "ftp")) { return false; } if (u.Host() != url.Host()) { return false; } // Globus FTP handle allows changing url completely url = u; if(triesleft < 1) triesleft = 1; ResetMeta(); // Cache control connection GlobusResult(globus_ftp_client_handle_cache_url_state(&ftp_handle, url.plainstr().c_str())); return true; } DataPointGridFTP* DataPointGridFTP::CBArg::acquire(void) { lock.lock(); if(!arg) { lock.unlock(); } return arg; } void DataPointGridFTP::CBArg::release(void) { lock.unlock(); } DataPointGridFTP::CBArg::CBArg(DataPointGridFTP* a) { arg = a; } void DataPointGridFTP::CBArg::abandon(void) { lock.lock(); arg = NULL; lock.unlock(); } } // namespace ArcDMCGridFTP extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "gsiftp", "HED:DMC", "FTP or FTP with GSI security", 0, &ArcDMCGridFTP::DataPointGridFTP::Instance }, { NULL, NULL, NULL, 0, NULL } }; extern "C" { void ARC_MODULE_CONSTRUCTOR_NAME(Glib::Module* module, Arc::ModuleManager* manager) { if(manager && module) { manager->makePersistent(module); }; } } nordugrid-arc-5.4.2/src/hed/dmc/gridftp/PaxHeaders.7502/Lister.h0000644000000000000000000000012313107553304022341 xustar000000000000000027 mtime=1495193284.604954 26 atime=1513200575.19271 30 ctime=1513200660.892758405 nordugrid-arc-5.4.2/src/hed/dmc/gridftp/Lister.h0000644000175000002070000000762313107553304022417 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_LISTER_H__ #define __ARC_LISTER_H__ #include #include #include #include #include #include #include #include #define LISTER_MAX_RESPONSES 3 namespace ArcDMCGridFTP { using namespace Arc; class Lister { private: bool inited; bool facts; char readbuf[4096]; globus_cond_t cond; globus_mutex_t mutex; globus_ftp_control_handle_t *handle; std::list fnames; globus_ftp_control_response_t resp[LISTER_MAX_RESPONSES]; int resp_n; enum callback_status_t { CALLBACK_NOTREADY = 0, CALLBACK_DONE = 1, CALLBACK_ERROR = 2, CALLBACK_TIMEDOUT = 3 }; callback_status_t callback_status; callback_status_t data_callback_status; callback_status_t close_callback_status; globus_off_t list_shift; bool connected; bool pasv_set; bool data_activated; bool free_format; unsigned short int port; std::string host; std::string username; std::string userpass; std::string path; std::string scheme; std::string urlstr; GSSCredential* credential; static unsigned int const max_timeout = (60*20); // 20 minutes is really long callback_status_t wait_for_callback(unsigned int to = max_timeout); callback_status_t wait_for_data_callback(unsigned int to = max_timeout); callback_status_t wait_for_close_callback(unsigned int to = max_timeout); void resp_destroy(); static void resp_callback(void *arg, globus_ftp_control_handle_t *h, globus_object_t *error, globus_ftp_control_response_t *response); static void close_callback(void *arg, globus_ftp_control_handle_t *h, globus_object_t *error, globus_ftp_control_response_t *response); static void simple_callback(void *arg, globus_ftp_control_handle_t *h, globus_object_t *error); static void list_read_callback(void *arg, globus_ftp_control_handle_t *hctrl, globus_object_t *error, globus_byte_t *buffer, globus_size_t length, globus_off_t offset, globus_bool_t eof); static void list_conn_callback(void *arg, globus_ftp_control_handle_t *hctrl, unsigned int stripe_ndx, globus_bool_t reused, globus_object_t *error); static std::map callback_args; static Glib::Mutex callback_args_mutex; static void* remember_for_callback(Lister* it); static Lister* recall_for_callback(void* arg); static void forget_about_callback(void* arg); void* callback_arg; globus_ftp_control_response_class_t send_command(const char *command, const char *arg, bool wait_for_response, char **sresp = NULL, int *code = NULL, char delim = 0); DataStatus setup_pasv(globus_ftp_control_host_port_t& pasv_addr); DataStatus handle_connect(const URL& url); DataStatus transfer_list(void); void close_connection(); public: Lister(); ~Lister(); void set_credential(GSSCredential* cred) { credential = cred; }; DataStatus retrieve_dir_info(const URL& url,bool names_only = false); DataStatus retrieve_file_info(const URL& url,bool names_only = false); operator bool() { return inited; } std::list::iterator begin() { return fnames.begin(); } std::list::iterator end() { return fnames.end(); } int size() const { return fnames.size(); } }; } // namespace ArcDMCGridFTP #endif // __ARC_LISTER_H__ nordugrid-arc-5.4.2/src/hed/dmc/gridftp/PaxHeaders.7502/README0000644000000000000000000000012311001653037021601 xustar000000000000000027 mtime=1208440351.928622 26 atime=1513200575.19471 30 ctime=1513200660.886758331 nordugrid-arc-5.4.2/src/hed/dmc/gridftp/README0000644000175000002070000000010411001653037021642 0ustar00mockbuildmock00000000000000DMC which handles gridftp protocol. It depends on globus libraries. nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/s30000644000000000000000000000013213214316024017532 xustar000000000000000030 mtime=1513200660.852757915 30 atime=1513200668.722854169 30 ctime=1513200660.852757915 nordugrid-arc-5.4.2/src/hed/dmc/s3/0000755000175000002070000000000013214316024017655 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/s3/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712675602216021662 xustar000000000000000027 mtime=1459029134.924374 30 atime=1513200595.639960337 30 ctime=1513200660.850757891 nordugrid-arc-5.4.2/src/hed/dmc/s3/Makefile.am0000644000175000002070000000115312675602216021724 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcs3.la libdmcs3_la_SOURCES = DataPointS3.cpp DataPointS3.h libdmcs3_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) $(OPENSSL_CFLAGS) $(S3_CFLAGS) libdmcs3_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) $(S3_LIBS) libdmcs3_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/s3/PaxHeaders.7502/DataPointS3.cpp0000644000000000000000000000012413065020025022402 xustar000000000000000027 mtime=1490296853.127766 27 atime=1513200575.258711 30 ctime=1513200660.851757903 nordugrid-arc-5.4.2/src/hed/dmc/s3/DataPointS3.cpp0000644000175000002070000005411513065020025022455 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include //#include #include #include "DataPointS3.h" #if defined(HAVE_S3_TIMEOUT) #define S3_TIMEOUTMS 0 #endif namespace ArcDMCS3 { using namespace Arc; // Static class variables Logger DataPointS3::logger(Logger::getRootLogger(), "DataPoint.S3"); S3Status DataPointS3::request_status = S3Status(0); unsigned long long int DataPointS3::offset = 0; char ArcDMCS3::DataPointS3::error_details[4096] = { 0 }; S3Status DataPointS3::responsePropertiesCallback(const S3ResponseProperties *properties, void *callbackData) { return S3StatusOK; } void DataPointS3::getCompleteCallback(S3Status status, const S3ErrorDetails *error, void *callbackData) { request_status = status; if (status == S3StatusOK) { DataBuffer *buf = (DataBuffer *)callbackData; buf->eof_read(true); } else { int len = 0; if (error && error->message) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Message: %s\n", error->message); } if (error && error->resource) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Resource: %s\n", error->resource); } if (error && error->furtherDetails) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Further Details: %s\n", error->furtherDetails); } if (error && error->extraDetailsCount) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, "%s", " Extra Details:\n"); int i; for (i = 0; i < error->extraDetailsCount; i++) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " %s: %s\n", error->extraDetails[i].name, error->extraDetails[i].value); } } } } void DataPointS3::putCompleteCallback(S3Status status, const S3ErrorDetails *error, void *callbackData) { request_status = status; if (status == S3StatusOK) { DataBuffer *buf = (DataBuffer *)callbackData; buf->eof_write(true); } else { int len = 0; if (error && error->message) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Message: %s\n", error->message); } if (error && error->resource) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Resource: %s\n", error->resource); } if (error && error->furtherDetails) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Further Details: %s\n", error->furtherDetails); } if (error && error->extraDetailsCount) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, "%s", " Extra Details:\n"); int i; for (i = 0; i < error->extraDetailsCount; i++) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " %s: %s\n", error->extraDetails[i].name, error->extraDetails[i].value); } } } } void DataPointS3::responseCompleteCallback(S3Status status, const S3ErrorDetails *error, void *callbackData) { request_status = status; int len = 0; if (error && error->message) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Message: %s\n", error->message); } if (error && error->resource) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Resource: %s\n", error->resource); } if (error && error->furtherDetails) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " Further Details: %s\n", error->furtherDetails); } if (error && error->extraDetailsCount) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, "%s", " Extra Details:\n"); int i; for (i = 0; i < error->extraDetailsCount; i++) { len += snprintf(&(error_details[len]), sizeof(error_details) - len, " %s: %s\n", error->extraDetails[i].name, error->extraDetails[i].value); } } } // get object ---------------------------------------------------------------- S3Status DataPointS3::getObjectDataCallback(int bufferSize, const char *buffer, void *callbackData) { DataBuffer *buf = (DataBuffer *)callbackData; /* 1. claim buffer */ int h; unsigned int l; if (!buf->for_read(h, l, true)) { /* failed to get buffer - must be error or request to exit */ buf->error_read(true); return S3StatusOK; } /* 2. read */ memcpy((*(buf))[h], buffer, bufferSize); /* 3. announce */ buf->is_read(h, bufferSize, DataPointS3::offset); DataPointS3::offset += bufferSize; return S3StatusOK; } static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackData) { DataBuffer *buf = (DataBuffer *)callbackData; /* 1. claim buffer */ int h; unsigned int l; unsigned long long int p; if (!buf->for_write(h, l, p, true)) { // no more data from the buffer, did the other side finished? buf->eof_write(true); return 0; } /* 2. write */ int toCopy = ((l > (unsigned)bufferSize) ? (unsigned)bufferSize : l); memcpy(buffer, (*(buf))[h], toCopy); /* 3. announce */ buf->is_written(h); return toCopy; } DataPointS3::DataPointS3(const URL &url, const UserConfig &usercfg, PluginArgument *parg) : DataPointDirect(url, usercfg, parg), fd(-1), reading(false), writing(false) { hostname = url.Host(); access_key = Arc::GetEnv("S3_ACCESS_KEY"); secret_key = Arc::GetEnv("S3_SECRET_KEY"); #if defined(S3_DEFAULT_REGION) auth_region = Arc::GetEnv("S3_AUTH_REGION"); #endif // Extract bucket bucket_name = url.Path(); // Remove leading slash if (bucket_name.find('/') == 0) { bucket_name = bucket_name.substr(1); } // Remove trailing slash if (bucket_name.rfind('/') == bucket_name.length() - 1) { bucket_name = bucket_name.substr(0, bucket_name.length() - 1); } // extract key std::size_t found = bucket_name.find('/'); if (found != std::string::npos) { key_name = bucket_name.substr(found + 1, bucket_name.length() - 1); bucket_name = bucket_name.substr(0, found); } // S3_validate_bucket_name // if / in key_name or bucket_name then Invalid bucket/key name if (bucket_name.find('/') || key_name.find("/")) { } // Check scheme if (url.Protocol() == "s3+https") { protocol = S3ProtocolHTTPS; } else { protocol = S3ProtocolHTTP; } uri_style = S3UriStylePath; S3BucketContext bucket_context = { 0, bucket_name.c_str(), protocol, uri_style, access_key.c_str(), secret_key.c_str(), #if defined(S3_DEFAULT_REGION) NULL, auth_region.c_str() }; #else 0 }; #endif // ToDo: Port support printf ("Port: %d", url.Port()); S3_initialize("s3", S3_INIT_ALL, hostname.c_str()); bufsize = 16384; } DataPointS3::~DataPointS3() { S3_deinitialize(); } Plugin *DataPointS3::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL &)(*dmcarg)).Protocol() != "s3" && ((const URL &)(*dmcarg)).Protocol() != "s3+http" && ((const URL &)(*dmcarg)).Protocol() != "s3+https") return NULL; return new DataPointS3(*dmcarg, *dmcarg, dmcarg); } DataStatus DataPointS3::Check(bool check_meta) { return DataStatus::Success; } S3Status DataPointS3::headResponsePropertiesCallback( const S3ResponseProperties *properties, void *callbackData) { FileInfo *file = (FileInfo *)callbackData; file->SetType(FileInfo::file_type_file); file->SetSize(properties->contentLength); file->SetModified(properties->lastModified); return S3StatusOK; } DataStatus DataPointS3::Stat(FileInfo &file, DataPointInfoType verb) { if (!bucket_name.empty() && !key_name.empty()) { S3BucketContext bucketContext = { 0, bucket_name.c_str(), protocol, uri_style, access_key.c_str(), secret_key.c_str(), #if defined(S3_DEFAULT_REGION) NULL, auth_region.c_str() }; #else 0 }; #endif S3ResponseHandler responseHandler = { &headResponsePropertiesCallback, &responseCompleteCallback }; file.SetName(key_name); #if defined(S3_TIMEOUTMS) S3_head_object(&bucketContext, key_name.c_str(), NULL, S3_TIMEOUTMS, &responseHandler, #else S3_head_object(&bucketContext, key_name.c_str(), NULL, &responseHandler, #endif &file); if (request_status == S3StatusOK) { return DataStatus::Success; } return DataStatus(DataStatus::StatError, S3_get_status_name(request_status)); } return DataStatus::StatError; } S3Status DataPointS3::listBucketCallback( int isTruncated, const char *nextMarker, int contentsCount, const S3ListBucketContent *contents, int commonPrefixesCount, const char **commonPrefixes, void *callbackData) { std::list *files = (std::list *)callbackData; for (int i = 0; i < contentsCount; i++) { const S3ListBucketContent *content = &(contents[i]); time_t t = (time_t)content->lastModified; FileInfo file = FileInfo(content->key); file.SetType(FileInfo::file_type_file); file.SetSize((unsigned long long)content->size); file.SetModified(t); file.SetMetaData("group", content->ownerDisplayName); file.SetMetaData("owner", content->ownerDisplayName); std::list::iterator f = files->insert(files->end(), file); } return S3StatusOK; } S3Status DataPointS3::listServiceCallback(const char *ownerId, const char *ownerDisplayName, const char *bucketName, int64_t creationDate, void *callbackData) { std::list *files = (std::list *)callbackData; FileInfo file = FileInfo(bucketName); file.SetType(FileInfo::file_type_dir); file.SetMetaData("group", ownerDisplayName); file.SetMetaData("owner", ownerDisplayName); file.SetModified(creationDate); std::list::iterator f = files->insert(files->end(), file); return S3StatusOK; } DataStatus DataPointS3::List(std::list &files, DataPointInfoType verb) { if (!bucket_name.empty() && !key_name.empty()) { FileInfo file(key_name); S3BucketContext bucketContext = { 0, bucket_name.c_str(), protocol, uri_style, access_key.c_str(), secret_key.c_str(), #if defined(S3_DEFAULT_REGION) NULL, auth_region.c_str() }; #else 0 }; #endif S3ResponseHandler responseHandler = { &headResponsePropertiesCallback, &responseCompleteCallback }; #if defined(S3_TIMEOUTMS) S3_head_object(&bucketContext, key_name.c_str(), NULL, S3_TIMEOUTMS, &responseHandler, #else S3_head_object(&bucketContext, key_name.c_str(), NULL, &responseHandler, #endif &file); if (request_status == S3StatusOK) { std::list::iterator f = files.insert(files.end(), file); return DataStatus::Success; } return DataStatus(DataStatus::StatError, S3_get_status_name(request_status)); } else if (!bucket_name.empty()) { S3BucketContext bucketContext = { 0, bucket_name.c_str(), protocol, uri_style, access_key.c_str(), secret_key.c_str(), #if defined(S3_DEFAULT_REGION) NULL, auth_region.c_str() }; #else 0 }; #endif S3ListBucketHandler listBucketHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &listBucketCallback }; S3_list_bucket(&bucketContext, NULL, NULL, NULL, 0, NULL, #if defined(S3_TIMEOUTMS) S3_TIMEOUTMS, #endif &listBucketHandler, &files); } else { S3BucketContext bucketContext = { 0, bucket_name.c_str(), protocol, uri_style, access_key.c_str(), secret_key.c_str(), #if defined(S3_DEFAULT_REGION) NULL, auth_region.c_str() }; #else 0 }; #endif S3ListServiceHandler listServiceHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &listServiceCallback }; S3_list_service(S3ProtocolHTTP, access_key.c_str(), secret_key.c_str(), 0, #if defined(S3_DEFAULT_REGION) NULL, auth_region.c_str(), NULL, #if defined(S3_TIMEOUTMS) S3_TIMEOUTMS, #endif &listServiceHandler, &files); #else 0, 0, &listServiceHandler, &files); #endif } if (request_status == S3StatusOK) { return DataStatus::Success; } logger.msg(ERROR, "Failed to read object %s: %s", url.Path(), S3_get_status_name(request_status)); return DataStatus(DataStatus::ListError, S3_get_status_name(request_status)); } DataStatus DataPointS3::Remove() { if (key_name.empty()) { S3ResponseHandler responseHandler = { &responsePropertiesCallback, &responseCompleteCallback }; S3_delete_bucket(S3ProtocolHTTP, S3UriStylePath, access_key.c_str(), #if defined(S3_DEFAULT_REGION) secret_key.c_str(), 0, 0, bucket_name.c_str(), auth_region.c_str(), NULL, #else secret_key.c_str(), 0, 0, bucket_name.c_str(), 0, #endif #if defined(S3_TIMEOUTMS) S3_TIMEOUTMS, #endif &responseHandler, 0); } else { S3BucketContext bucketContext = { 0, bucket_name.c_str(), protocol, uri_style, access_key.c_str(), secret_key.c_str(), #if defined(S3_DEFAULT_REGION) NULL, auth_region.c_str() }; #else 0 }; #endif S3ResponseHandler responseHandler = { 0, &responseCompleteCallback }; #if defined(S3_TIMEOUTMS) S3_delete_object(&bucketContext, key_name.c_str(), NULL, S3_TIMEOUTMS, &responseHandler, 0); #else S3_delete_object(&bucketContext, key_name.c_str(), 0, &responseHandler, 0); #endif } if (request_status == S3StatusOK) { return DataStatus::Success; } return DataStatus(DataStatus::DeleteError, EINVAL, S3_get_status_name(request_status)); } DataStatus DataPointS3::Rename(const URL &newurl) { return DataStatus(DataStatus::RenameError, ENOTSUP, "Renaming in S3 is not supported"); } DataStatus DataPointS3::CreateDirectory(bool with_parents) { if (!key_name.empty()) { return DataStatus(DataStatus::CreateDirectoryError, EINVAL, "key should not be given"); } S3ResponseHandler responseHandler = { &responsePropertiesCallback, &responseCompleteCallback }; S3CannedAcl cannedAcl = S3CannedAclPrivate; S3_create_bucket(S3ProtocolHTTP, access_key.c_str(), secret_key.c_str(), 0, 0, #if defined(S3_DEFAULT_REGION) bucket_name.c_str(), auth_region.c_str(), cannedAcl, 0, 0, #if defined(S3_TIMEOUTMS) S3_TIMEOUTMS, #endif &responseHandler, 0); #else bucket_name.c_str(), cannedAcl, 0, 0, &responseHandler, 0); #endif if (request_status == S3StatusOK) { return DataStatus::Success; } return DataStatus(DataStatus::CreateDirectoryError, EINVAL, S3_get_status_name(request_status)); } void DataPointS3::read_file_start(void *arg) { ((DataPointS3 *)arg)->read_file(); } void DataPointS3::read_file() { S3GetObjectHandler getObjectHandler = { { &responsePropertiesCallback, &DataPointS3::getCompleteCallback }, &DataPointS3::getObjectDataCallback }; S3BucketContext bucketContext = { 0, bucket_name.c_str(), protocol, uri_style, access_key.c_str(), secret_key.c_str(), #if defined(S3_DEFAULT_REGION) 0, auth_region.c_str() }; #else 0 }; #endif uint64_t startByte = 0, byteCount = 0; S3_get_object(&bucketContext, key_name.c_str(), 0, startByte, byteCount, 0, #if defined(S3_TIMEOUTMS) S3_TIMEOUTMS, #endif &getObjectHandler, buffer); if (request_status != S3StatusOK) { logger.msg(ERROR, "Failed to read object %s: %s", url.Path(), S3_get_status_name(request_status)); buffer->error_read(true); } } DataStatus DataPointS3::StartReading(DataBuffer &buf) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; reading = true; buffer = &buf; // create thread to maintain reading if (!CreateThreadFunction(&DataPointS3::read_file_start, this, &transfers_started)) { reading = false; buffer = NULL; return DataStatus::ReadStartError; } return DataStatus::Success; } DataStatus DataPointS3::StopReading() { transfers_started.wait(); return DataStatus::Success; } static bool file_allocate(int fd, FileAccess *fa, unsigned long long int &fsize) { return false; } void DataPointS3::write_file_start(void *arg) { ((DataPointS3 *)arg)->write_file(); } void DataPointS3::write_file() { S3BucketContext bucketContext = { 0, bucket_name.c_str(), protocol, uri_style, access_key.c_str(), secret_key.c_str(), #if defined(S3_AUTH_REGION) 0, auth_region.c_str() }; #else 0 }; #endif S3PutObjectHandler putObjectHandler = { { &responsePropertiesCallback, &putCompleteCallback }, &putObjectDataCallback }; uint64_t contentLength = 0; const char *cacheControl = 0, *contentType = 0, *md5 = 0; const char *contentDispositionFilename = 0, *contentEncoding = 0; int64_t expires = -1; S3CannedAcl cannedAcl = S3CannedAclPrivate; int metaPropertiesCount = 0; S3NameValue metaProperties[S3_MAX_METADATA_COUNT]; char useServerSideEncryption = 0; S3PutProperties putProperties = { contentType, md5, cacheControl, contentDispositionFilename, contentEncoding, expires, cannedAcl, metaPropertiesCount, metaProperties, useServerSideEncryption }; S3_put_object(&bucketContext, key_name.c_str(), size, &putProperties, NULL, #if defined(S3_TIMEOUTMS) S3_TIMEOUTMS, #endif &putObjectHandler, buffer); if (request_status != S3StatusOK) { logger.msg(ERROR, "Failed to write object %s: %s", url.Path(), S3_get_status_name(request_status)); buffer->error_write(true); } } DataStatus DataPointS3::StartWriting(DataBuffer &buf, DataCallback *space_cb) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; writing = true; /* Check if size for source is defined */ if (!CheckSize()) { return DataStatus(DataStatus::WriteStartError, "Size of the source file missing. S3 needs to know it."); } /* try to open */ buffer = &buf; buffer->set(NULL, 16384, 3); buffer->speed.reset(); buffer->speed.hold(false); /* create thread to maintain writing */ if (!CreateThreadFunction(&DataPointS3::write_file_start, this, &transfers_started)) { buffer->error_write(true); buffer->eof_write(true); writing = false; return DataStatus(DataStatus::WriteStartError, "Failed to create new thread"); } return DataStatus::Success; } DataStatus DataPointS3::StopWriting() { writing = false; transfers_started.wait(); /* wait till writing thread exited */ buffer = NULL; return DataStatus::Success; } bool DataPointS3::WriteOutOfOrder() { return false; } } // namespace Arc extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "s3", "HED:DMC", "Amazon S3 Store", 0, &ArcDMCS3::DataPointS3::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/s3/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723021661 xustar000000000000000030 mtime=1513200595.683960875 30 atime=1513200649.159614904 30 ctime=1513200660.850757891 nordugrid-arc-5.4.2/src/hed/dmc/s3/Makefile.in0000644000175000002070000006176713214315723021750 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/s3 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcs3_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libdmcs3_la_OBJECTS = libdmcs3_la-DataPointS3.lo libdmcs3_la_OBJECTS = $(am_libdmcs3_la_OBJECTS) libdmcs3_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmcs3_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmcs3_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcs3_la_SOURCES) DIST_SOURCES = $(libdmcs3_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcs3.la libdmcs3_la_SOURCES = DataPointS3.cpp DataPointS3.h libdmcs3_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) $(OPENSSL_CFLAGS) $(S3_CFLAGS) libdmcs3_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(OPENSSL_LIBS) $(S3_LIBS) libdmcs3_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/s3/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/s3/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcs3.la: $(libdmcs3_la_OBJECTS) $(libdmcs3_la_DEPENDENCIES) $(libdmcs3_la_LINK) -rpath $(pkglibdir) $(libdmcs3_la_OBJECTS) $(libdmcs3_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcs3_la-DataPointS3.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcs3_la-DataPointS3.lo: DataPointS3.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcs3_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcs3_la-DataPointS3.lo -MD -MP -MF $(DEPDIR)/libdmcs3_la-DataPointS3.Tpo -c -o libdmcs3_la-DataPointS3.lo `test -f 'DataPointS3.cpp' || echo '$(srcdir)/'`DataPointS3.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcs3_la-DataPointS3.Tpo $(DEPDIR)/libdmcs3_la-DataPointS3.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointS3.cpp' object='libdmcs3_la-DataPointS3.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcs3_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcs3_la-DataPointS3.lo `test -f 'DataPointS3.cpp' || echo '$(srcdir)/'`DataPointS3.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/s3/PaxHeaders.7502/DataPointS3.h0000644000000000000000000000012413065020025022047 xustar000000000000000027 mtime=1490296853.127766 27 atime=1513200575.264711 30 ctime=1513200660.852757915 nordugrid-arc-5.4.2/src/hed/dmc/s3/DataPointS3.h0000644000175000002070000000727613065020025022130 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAPOINTS3_H__ #define __ARC_DATAPOINTS3_H__ #include #include #include #include namespace ArcDMCS3 { using namespace Arc; /** * This class allows access to the regular local filesystem through the * same interface as is used for remote storage on the grid. * * This class is a loadable module and cannot be used directly. The DataHandle * class loads modules at runtime and should be used instead of this. */ class DataPointS3 : public DataPointDirect { public: DataPointS3(const URL &url, const UserConfig &usercfg, PluginArgument *parg); virtual ~DataPointS3(); static Plugin *Instance(PluginArgument *arg); virtual DataStatus StartReading(DataBuffer &buffer); virtual DataStatus StartWriting(DataBuffer &buffer, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); virtual DataStatus Check(bool check_meta); virtual DataStatus Stat(FileInfo &file, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus List(std::list &files, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents = false); virtual DataStatus Rename(const URL &newurl); virtual bool WriteOutOfOrder(); virtual bool RequiresCredentials() const { return false; }; static unsigned long long int offset; private: std::string access_key; std::string secret_key; #if defined(S3_DEFAULT_REGION) std::string auth_region; #endif std::string hostname; std::string bucket_name; std::string key_name; S3Protocol protocol; S3UriStyle uri_style; S3BucketContext bucket_context; int port; bool is_secure; SimpleCounter transfers_started; static void read_file_start(void *arg); static void write_file_start(void *arg); void read_file(); void write_file(); int fd; bool reading; bool writing; static Logger logger; static S3Status request_status; static char error_details[4096]; // Callbacks static S3Status responsePropertiesCallback(const S3ResponseProperties *properties, void *callbackData); static S3Status headResponsePropertiesCallback(const S3ResponseProperties *properties, void *callbackData); static void responseCompleteCallback(S3Status status, const S3ErrorDetails *error, void *callbackData); static void getCompleteCallback(S3Status status, const S3ErrorDetails *error, void *callbackData); static void putCompleteCallback(S3Status status, const S3ErrorDetails *error, void *callbackData); static S3Status listBucketCallback(int isTruncated, const char *nextMarker, int contentsCount, const S3ListBucketContent *contents, int commonPrefixesCount, const char **commonPrefixes, void *callbackData); static S3Status listServiceCallback(const char *ownerId, const char *ownerDisplayName, const char *bucketName, int64_t creationDate, void *callbackData); static S3Status getObjectDataCallback(int bufferSize, const char *buffer, void *callbackData); }; } // namespace Arc #endif // __ARC_DATAPOINTS3_H__ nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/srm0000644000000000000000000000013213214316024020006 xustar000000000000000030 mtime=1513200660.915758686 30 atime=1513200668.722854169 30 ctime=1513200660.915758686 nordugrid-arc-5.4.2/src/hed/dmc/srm/0000755000175000002070000000000013214316024020131 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/srm/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022130 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200595.698961059 30 ctime=1513200660.912758649 nordugrid-arc-5.4.2/src/hed/dmc/srm/Makefile.am0000644000175000002070000000120412052416515022167 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcsrm.la SUBDIRS = srmclient libdmcsrm_la_SOURCES = DataPointSRM.cpp DataPointSRM.h libdmcsrm_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmcsrm_la_LIBADD = \ srmclient/libsrmclient.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmcsrm_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/srm/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022135 xustar000000000000000030 mtime=1513200595.743961609 30 atime=1513200649.048613546 30 ctime=1513200660.913758661 nordugrid-arc-5.4.2/src/hed/dmc/srm/Makefile.in0000644000175000002070000007471113214315723022215 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/srm DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcsrm_la_DEPENDENCIES = srmclient/libsrmclient.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libdmcsrm_la_OBJECTS = libdmcsrm_la-DataPointSRM.lo libdmcsrm_la_OBJECTS = $(am_libdmcsrm_la_OBJECTS) libdmcsrm_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmcsrm_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmcsrm_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcsrm_la_SOURCES) DIST_SOURCES = $(libdmcsrm_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcsrm.la SUBDIRS = srmclient libdmcsrm_la_SOURCES = DataPointSRM.cpp DataPointSRM.h libdmcsrm_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmcsrm_la_LIBADD = \ srmclient/libsrmclient.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libdmcsrm_la_LDFLAGS = -no-undefined -avoid-version -module all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/srm/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/srm/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcsrm.la: $(libdmcsrm_la_OBJECTS) $(libdmcsrm_la_DEPENDENCIES) $(libdmcsrm_la_LINK) -rpath $(pkglibdir) $(libdmcsrm_la_OBJECTS) $(libdmcsrm_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcsrm_la-DataPointSRM.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcsrm_la-DataPointSRM.lo: DataPointSRM.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcsrm_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcsrm_la-DataPointSRM.lo -MD -MP -MF $(DEPDIR)/libdmcsrm_la-DataPointSRM.Tpo -c -o libdmcsrm_la-DataPointSRM.lo `test -f 'DataPointSRM.cpp' || echo '$(srcdir)/'`DataPointSRM.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcsrm_la-DataPointSRM.Tpo $(DEPDIR)/libdmcsrm_la-DataPointSRM.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointSRM.cpp' object='libdmcsrm_la-DataPointSRM.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcsrm_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcsrm_la-DataPointSRM.lo `test -f 'DataPointSRM.cpp' || echo '$(srcdir)/'`DataPointSRM.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/srm/PaxHeaders.7502/DataPointSRM.h0000644000000000000000000000012312167545534022521 xustar000000000000000027 mtime=1373555548.255766 26 atime=1513200575.22671 30 ctime=1513200660.914758674 nordugrid-arc-5.4.2/src/hed/dmc/srm/DataPointSRM.h0000644000175000002070000000671712167545534022602 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAPOINTSRM_H__ #define __ARC_DATAPOINTSRM_H__ #include #include #include #include #include "srmclient/SRMClient.h" namespace ArcDMCSRM { using namespace Arc; /** * The Storage Resource Manager (SRM) protocol allows access to data * distributed across physical storage through a unified namespace * and management interface. PrepareReading() or PrepareWriting() must * be used before reading or writing a physical file. * * This class is a loadable module and cannot be used directly. The DataHandle * class loads modules at runtime and should be used instead of this. */ class DataPointSRM : public DataPointDirect { public: DataPointSRM(const URL& url, const UserConfig& usercfg, PluginArgument* arg); virtual ~DataPointSRM(); static Plugin* Instance(PluginArgument *arg); virtual DataStatus PrepareReading(unsigned int timeout, unsigned int& wait_time); virtual DataStatus PrepareWriting(unsigned int timeout, unsigned int& wait_time); virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopWriting(); virtual DataStatus StopReading(); virtual DataStatus FinishReading(bool error); virtual DataStatus FinishWriting(bool error); virtual DataStatus Check(bool check_meta); virtual DataStatus Remove(); virtual DataStatus Stat(FileInfo& file, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus Stat(std::list& files, const std::list& urls, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus CreateDirectory(bool with_parents=false); virtual DataStatus Rename(const URL& newurl); virtual const std::string DefaultCheckSum() const; virtual bool ProvidesMeta() const; virtual bool IsStageable() const; virtual std::vector TransferLocations() const; virtual void ClearTransferLocations(); private: SRMClientRequest *srm_request; /* holds SRM request ID between Prepare* and Finish* */ static Logger logger; std::vector turls; /* TURLs returned from prepare methods */ URL r_url; /* URL used for redirected operations in Start/Stop Reading/Writing */ DataHandle *r_handle; /* handle used for redirected operations in Start/Stop Reading/Writing */ bool reading; bool writing; DataStatus ListFiles(std::list& files, DataPointInfoType verb, int recursion); /** Check protocols given in list can be used, and if not remove them */ void CheckProtocols(std::list& transport_protocols); /** Select transfer protocols from URL option or hard-coded list */ void ChooseTransferProtocols(std::list& transport_protocols); /// Convert URL to SRM style, also URI-encoding path if necessary std::string CanonicSRMURL(const URL& srm_url); /// Convert SRM metadata into FileInfo object, inserting a new object into the list void FillFileInfo(std::list& files, const struct SRMFileMetaData& srm_metadata); }; } // namespace ArcDMCSRM #endif // __ARC_DATAPOINTSRM_H__ nordugrid-arc-5.4.2/src/hed/dmc/srm/PaxHeaders.7502/DataPointSRM.cpp0000644000000000000000000000012312675602216023047 xustar000000000000000027 mtime=1459029134.924374 26 atime=1513200575.24171 30 ctime=1513200660.914758674 nordugrid-arc-5.4.2/src/hed/dmc/srm/DataPointSRM.cpp0000644000175000002070000010714012675602216023120 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "DataPointSRM.h" namespace ArcDMCSRM { using namespace Arc; Logger DataPointSRM::logger(Logger::getRootLogger(), "DataPoint.SRM"); DataPointSRM::DataPointSRM(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(url, usercfg, parg), srm_request(NULL), r_handle(NULL), reading(false), writing(false) {} DataPointSRM::~DataPointSRM() { delete r_handle; delete srm_request; } Plugin* DataPointSRM::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL&)(*dmcarg)).Protocol() != "srm") return NULL; return new DataPointSRM(*dmcarg, *dmcarg, dmcarg); } DataStatus DataPointSRM::Check(bool check_meta) { std::string error; SRMClient *client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::CheckError, ECONNREFUSED, error); } SRMClientRequest srm_request_tmp(CanonicSRMURL(url)); // first check permissions DataStatus res = client->checkPermissions(srm_request_tmp); if (!res && res.GetErrno() != EOPNOTSUPP) { delete client; return res; } if (check_meta) { logger.msg(VERBOSE, "Check: looking for metadata: %s", CurrentLocation().str()); srm_request_tmp.long_list(true); std::list metadata; res = client->info(srm_request_tmp, metadata); delete client; client = NULL; if (!res) return DataStatus(DataStatus::CheckError, res.GetErrno(), res.GetDesc()); if (metadata.empty()) return DataStatus(DataStatus::CheckError, EARCRESINVAL, "No results returned"); if (metadata.front().size > 0) { logger.msg(INFO, "Check: obtained size: %lli", metadata.front().size); SetSize(metadata.front().size); } if (metadata.front().checkSumValue.length() > 0 && metadata.front().checkSumType.length() > 0) { std::string csum(metadata.front().checkSumType + ":" + metadata.front().checkSumValue); logger.msg(INFO, "Check: obtained checksum: %s", csum); SetCheckSum(csum); } if (metadata.front().lastModificationTime > 0) { logger.msg(INFO, "Check: obtained modification date: %s", Time(metadata.front().lastModificationTime).str()); SetModified(Time(metadata.front().lastModificationTime)); } if (metadata.front().fileLocality == SRM_ONLINE) { logger.msg(INFO, "Check: obtained access latency: low (ONLINE)"); SetAccessLatency(ACCESS_LATENCY_SMALL); } else if (metadata.front().fileLocality == SRM_NEARLINE) { logger.msg(INFO, "Check: obtained access latency: high (NEARLINE)"); SetAccessLatency(ACCESS_LATENCY_LARGE); } } else { delete client; } return DataStatus::Success; } DataStatus DataPointSRM::Remove() { std::string error; SRMClient *client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::DeleteError, ECONNREFUSED, error); } // take out options in srm url and encode path SRMClientRequest srm_request_tmp(CanonicSRMURL(url)); logger.msg(VERBOSE, "Remove: deleting: %s", CurrentLocation().str()); DataStatus res = client->remove(srm_request_tmp); delete client; client = NULL; return res; } DataStatus DataPointSRM::CreateDirectory(bool with_parents) { std::string error; SRMClient *client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::CreateDirectoryError, ECONNREFUSED, error); } // take out options in srm url and encode path SRMClientRequest request(CanonicSRMURL(url)); logger.msg(VERBOSE, "Creating directory: %s", CanonicSRMURL(url)); DataStatus res = client->mkDir(request); delete client; client = NULL; return res; } DataStatus DataPointSRM::Rename(const URL& newurl) { std::string error; SRMClient *client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::RenameError, ECONNREFUSED, error); } // take out options in srm urls and encode paths SRMClientRequest request(CanonicSRMURL(url)); URL canonic_newurl(CanonicSRMURL(newurl)); logger.msg(VERBOSE, "Renaming %s to %s", CanonicSRMURL(url), canonic_newurl.str()); DataStatus res = client->rename(request, canonic_newurl); delete client; client = NULL; return res; } DataStatus DataPointSRM::PrepareReading(unsigned int stage_timeout, unsigned int& wait_time) { if (writing) return DataStatus(DataStatus::IsWritingError, EARCLOGIC, "Already writing"); if (reading && r_handle) return DataStatus(DataStatus::IsReadingError, EARCLOGIC, "Already reading"); reading = true; turls.clear(); std::list transport_urls; DataStatus res; std::string error; // choose transfer procotols std::list transport_protocols; ChooseTransferProtocols(transport_protocols); // If the file is NEARLINE (on tape) bringOnline is called // Whether or not to do this should eventually be specified by the user if (access_latency == ACCESS_LATENCY_LARGE) { if (srm_request) { if (srm_request->status() != SRM_REQUEST_ONGOING) { // error, querying a request that was already prepared logger.msg(VERBOSE, "Calling PrepareReading when request was already prepared!"); reading = false; return DataStatus(DataStatus::ReadPrepareError, EARCLOGIC, "File is already prepared"); } SRMClient *client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { reading = false; return DataStatus(DataStatus::ReadPrepareError, ECONNREFUSED, error); } res = client->requestBringOnlineStatus(*srm_request); delete client; } // if no existing request, make a new request else { SRMClient* client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::ReadPrepareError, ECONNREFUSED, error); } // take out options in srm url and encode path delete srm_request; srm_request = new SRMClientRequest(CanonicSRMURL(url)); logger.msg(INFO, "File %s is NEARLINE, will make request to bring online", CanonicSRMURL(url)); srm_request->request_timeout(stage_timeout); res = client->requestBringOnline(*srm_request); delete client; } if (!res) return res; if (srm_request->status() == SRM_REQUEST_ONGOING) { // request is not finished yet wait_time = srm_request->waiting_time(); logger.msg(INFO, "Bring online request %s is still in queue, should wait", srm_request->request_token()); return DataStatus::ReadPrepareWait; } else if (srm_request->status() == SRM_REQUEST_FINISHED_SUCCESS) { // file is staged so go to next step to get TURLs logger.msg(INFO, "Bring online request %s finished successfully, file is now ONLINE", srm_request->request_token()); access_latency = ACCESS_LATENCY_SMALL; delete srm_request; srm_request = NULL; } else { // bad logic - SRM_OK returned but request is not finished or on going logger.msg(VERBOSE, "Bad logic for %s - bringOnline returned ok but SRM request is not finished successfully or on going", url.str()); return DataStatus(DataStatus::ReadPrepareError, EARCLOGIC, "Inconsistent status code from SRM"); } } // Here we assume the file is in an ONLINE state // If a request already exists, query status if (srm_request) { if (srm_request->status() != SRM_REQUEST_ONGOING) { // error, querying a request that was already prepared logger.msg(VERBOSE, "Calling PrepareReading when request was already prepared!"); return DataStatus(DataStatus::ReadPrepareError, EARCLOGIC, "File is already prepared"); } SRMClient *client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::ReadPrepareError, ECONNREFUSED, error); } res = client->getTURLsStatus(*srm_request, transport_urls); delete client; } // if no existing request, make a new request else { SRMClient* client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::ReadPrepareError, ECONNREFUSED, error); } delete srm_request; CheckProtocols(transport_protocols); if (transport_protocols.empty()) { logger.msg(VERBOSE, "None of the requested transfer protocols are supported"); delete client; return DataStatus(DataStatus::ReadPrepareError, EOPNOTSUPP, "None of the requested transfer protocols are supported"); } srm_request = new SRMClientRequest(CanonicSRMURL(url)); srm_request->request_timeout(stage_timeout); srm_request->transport_protocols(transport_protocols); res = client->getTURLs(*srm_request, transport_urls); delete client; } if (!res) return res; if (srm_request->status() == SRM_REQUEST_ONGOING) { // request is not finished yet wait_time = srm_request->waiting_time(); logger.msg(INFO, "Get request %s is still in queue, should wait %i seconds", srm_request->request_token(), wait_time); return DataStatus::ReadPrepareWait; } else if (srm_request->status() == SRM_REQUEST_FINISHED_SUCCESS) { // request finished - deal with TURLs // Add all valid TURLs to list for (std::list::iterator i = transport_urls.begin(); i != transport_urls.end(); ++i) { // Avoid redirection to SRM logger.msg(VERBOSE, "Checking URL returned by SRM: %s", *i); if (strncasecmp(i->c_str(), "srm://", 6) == 0) continue; // Try to use this TURL + old options URL redirected_url(*i); DataHandle redirected_handle(redirected_url, usercfg); // check if url can be handled if (!redirected_handle || !(*redirected_handle)) continue; if (redirected_handle->IsIndex()) continue; redirected_handle->AddURLOptions(url.Options()); turls.push_back(redirected_handle->GetURL()); } if (turls.empty()) { logger.msg(VERBOSE, "SRM returned no useful Transfer URLs: %s", url.str()); srm_request->finished_abort(); return DataStatus(DataStatus::ReadPrepareError, EARCRESINVAL, "No useful transfer URLs returned"); } } else { // bad logic - SRM_OK returned but request is not finished or on going logger.msg(VERBOSE, "Bad logic for %s - getTURLs returned ok but SRM request is not finished successfully or on going", url.str()); return DataStatus(DataStatus::ReadPrepareError, EARCLOGIC, "Inconsistent status code from SRM"); } return DataStatus::Success; } DataStatus DataPointSRM::StartReading(DataBuffer& buf) { logger.msg(VERBOSE, "StartReading"); if (!reading || turls.empty() || !srm_request || r_handle) { logger.msg(VERBOSE, "StartReading: File was not prepared properly"); return DataStatus(DataStatus::ReadStartError, EARCLOGIC, "File was not prepared"); } buffer = &buf; // Choose TURL randomly (validity of TURLs was already checked in Prepare) std::srand(time(NULL)); int n = (int)((std::rand() * ((double)(turls.size() - 1))) / RAND_MAX + 0.25); r_url = turls.at(n); r_handle = new DataHandle(r_url, usercfg); // check if url can be handled if (!(*r_handle)) { delete r_handle; r_handle = NULL; logger.msg(VERBOSE, "TURL %s cannot be handled", r_url.str()); return DataStatus(DataStatus::ReadStartError, EARCRESINVAL, "Transfer URL cannot be handled"); } (*r_handle)->SetAdditionalChecks(false); // checks at higher levels are always done on SRM metadata (*r_handle)->SetSecure(force_secure); (*r_handle)->Passive(force_passive); logger.msg(INFO, "Redirecting to new URL: %s", (*r_handle)->CurrentLocation().str()); DataStatus r = (*r_handle)->StartReading(buf); if(!r) { delete r_handle; r_handle = NULL; } return r; } DataStatus DataPointSRM::StopReading() { if (!reading) return DataStatus::Success; DataStatus r = DataStatus::Success; if (r_handle) { r = (*r_handle)->StopReading(); delete r_handle; r_handle = NULL; } return r; } DataStatus DataPointSRM::FinishReading(bool error) { if (!reading) return DataStatus::Success; StopReading(); reading = false; if (srm_request) { std::string err; SRMClient *client = SRMClient::getInstance(usercfg, url.fullstr(), err); // if the request finished with an error there is no need to abort or release request if (client && (srm_request->status() != SRM_REQUEST_FINISHED_ERROR)) { if (error || srm_request->status() == SRM_REQUEST_SHOULD_ABORT) { client->abort(*srm_request, true); } else if (srm_request->status() == SRM_REQUEST_FINISHED_SUCCESS) { client->releaseGet(*srm_request); } } delete client; delete srm_request; srm_request = NULL; } turls.clear(); return DataStatus::Success; } DataStatus DataPointSRM::PrepareWriting(unsigned int stage_timeout, unsigned int& wait_time) { if (reading) return DataStatus(DataStatus::IsReadingError, EARCLOGIC, "Already reading"); if (writing && r_handle) return DataStatus(DataStatus::IsWritingError, EARCLOGIC, "Already writing"); writing = true; turls.clear(); std::list transport_urls; DataStatus res; std::string error; // choose transfer procotols std::list transport_protocols; ChooseTransferProtocols(transport_protocols); // If a request already exists, query status if (srm_request) { if (srm_request->status() != SRM_REQUEST_ONGOING) { // error, querying a request that was already prepared logger.msg(VERBOSE, "Calling PrepareWriting when request was already prepared!"); return DataStatus(DataStatus::WritePrepareError, EARCLOGIC, "File was already prepared"); } SRMClient *client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::WritePrepareError, ECONNREFUSED, error); } res = client->putTURLsStatus(*srm_request, transport_urls); delete client; } // if no existing request, make a new request else { SRMClient* client = SRMClient::getInstance(usercfg, url.fullstr(), error); if (!client) { return DataStatus(DataStatus::WritePrepareError, ECONNREFUSED, error); } delete srm_request; CheckProtocols(transport_protocols); if (transport_protocols.empty()) { logger.msg(VERBOSE, "None of the requested transfer protocols are supported"); delete client; return DataStatus(DataStatus::WritePrepareError, EOPNOTSUPP, "None of the requested transfer protocols are supported"); } srm_request = new SRMClientRequest(CanonicSRMURL(url)); // set space token std::string space_token = url.Option("spacetoken"); if (space_token.empty()) { if (client->getVersion().compare("v2.2") == 0) { // only print message if using v2.2 logger.msg(VERBOSE, "No space token specified"); } } else { if (client->getVersion().compare("v2.2") != 0) { // print warning if not using srm2.2 logger.msg(WARNING, "Warning: Using SRM protocol v1 which does not support space tokens"); } else { logger.msg(VERBOSE, "Using space token description %s", space_token); // get token from SRM that matches description // errors with space tokens now cause the transfer to fail - see bug 2061 std::list tokens; DataStatus token_res = client->getSpaceTokens(tokens, space_token); if (!token_res) { logger.msg(VERBOSE, "Error looking up space tokens matching description %s", space_token); delete client; delete srm_request; srm_request = NULL; return DataStatus(DataStatus::WritePrepareError, token_res.GetErrno(), "Error looking up space tokens matching description"); } if (tokens.empty()) { logger.msg(VERBOSE, "No space tokens found matching description %s", space_token); delete client; delete srm_request; srm_request = NULL; return DataStatus(DataStatus::WritePrepareError, EARCRESINVAL, "No space tokens found matching description"); } // take the first one in the list logger.msg(VERBOSE, "Using space token %s", tokens.front()); srm_request->space_token(tokens.front()); } } srm_request->request_timeout(stage_timeout); if (CheckSize()) srm_request->total_size(GetSize()); srm_request->transport_protocols(transport_protocols); res = client->putTURLs(*srm_request, transport_urls); delete client; } if (!res) return res; if (srm_request->status() == SRM_REQUEST_ONGOING) { // request is not finished yet wait_time = srm_request->waiting_time(); logger.msg(INFO, "Put request %s is still in queue, should wait %i seconds", srm_request->request_token(), wait_time); return DataStatus::WritePrepareWait; } else if (srm_request->status() == SRM_REQUEST_FINISHED_SUCCESS) { // request finished - deal with TURLs // Add all valid TURLs to list for (std::list::iterator i = transport_urls.begin(); i != transport_urls.end(); ++i) { // Avoid redirection to SRM logger.msg(VERBOSE, "Checking URL returned by SRM: %s", *i); if (strncasecmp(i->c_str(), "srm://", 6) == 0) continue; // Try to use this TURL + old options URL redirected_url(*i); DataHandle redirected_handle(redirected_url, usercfg); // check if url can be handled if (!redirected_handle || !(*redirected_handle)) continue; if (redirected_handle->IsIndex()) continue; redirected_handle->AddURLOptions(url.Options()); turls.push_back(redirected_handle->GetURL()); } if (turls.empty()) { logger.msg(VERBOSE, "SRM returned no useful Transfer URLs: %s", url.str()); srm_request->finished_abort(); return DataStatus(DataStatus::WritePrepareError, EARCRESINVAL, "No useful transfer URLs returned"); } } else { // bad logic - SRM_OK returned but request is not finished or on going logger.msg(VERBOSE, "Bad logic for %s - putTURLs returned ok but SRM request is not finished successfully or on going", url.str()); return DataStatus(DataStatus::WritePrepareError, EARCLOGIC, "Inconsistent status code from SRM"); } return DataStatus::Success; } DataStatus DataPointSRM::StartWriting(DataBuffer& buf, DataCallback *space_cb) { logger.msg(VERBOSE, "StartWriting"); if (!writing || turls.empty() || !srm_request || r_handle) { logger.msg(VERBOSE, "StartWriting: File was not prepared properly"); return DataStatus(DataStatus::WriteStartError, EARCLOGIC, "File was not prepared"); } buffer = &buf; // Choose TURL randomly (validity of TURLs was already checked in Prepare) std::srand(time(NULL)); int n = (int)((std::rand() * ((double)(turls.size() - 1))) / RAND_MAX + 0.25); r_url = turls.at(n); r_handle = new DataHandle(r_url, usercfg); // check if url can be handled if (!(*r_handle)) { delete r_handle; r_handle = NULL; logger.msg(VERBOSE, "TURL %s cannot be handled", r_url.str()); return DataStatus(DataStatus::WriteStartError, EARCRESINVAL, "Transfer URL cannot be handled"); } (*r_handle)->SetAdditionalChecks(false); // checks at higher levels are always done on SRM metadata (*r_handle)->SetSecure(force_secure); (*r_handle)->Passive(force_passive); logger.msg(INFO, "Redirecting to new URL: %s", (*r_handle)->CurrentLocation().str()); DataStatus r = (*r_handle)->StartWriting(buf); if(!r) { delete r_handle; r_handle = NULL; } return r; } DataStatus DataPointSRM::StopWriting() { if (!writing) return DataStatus::Success; DataStatus r = DataStatus::Success; if (r_handle) { r = (*r_handle)->StopWriting(); // check if checksum was verified at lower level if ((*r_handle)->CheckCheckSum()) SetCheckSum((*r_handle)->GetCheckSum()); delete r_handle; r_handle = NULL; } return r; } DataStatus DataPointSRM::FinishWriting(bool error) { if (!writing) return DataStatus::Success; StopWriting(); writing = false; DataStatus r = DataStatus::Success; // if the request finished with an error there is no need to abort or release request if (srm_request) { std::string err; SRMClient * client = SRMClient::getInstance(usercfg, url.fullstr(), err); if (client && (srm_request->status() != SRM_REQUEST_FINISHED_ERROR)) { // call abort if failure, or releasePut on success if (error || srm_request->status() == SRM_REQUEST_SHOULD_ABORT) { client->abort(*srm_request, false); // according to the spec the SURL may or may not exist after abort // so remove may fail, however it is not an error client->remove(*srm_request); } else { // checksum verification - if requested and not already done at lower level if (srm_request->status() == SRM_REQUEST_FINISHED_SUCCESS && additional_checks && buffer && !CheckCheckSum()) { const CheckSum * calc_sum = buffer->checksum_object(); if (calc_sum && *calc_sum && buffer->checksum_valid()) { char buf[100]; calc_sum->print(buf,100); std::string csum(buf); if (!csum.empty() && csum.find(':') != std::string::npos) { // get checksum info for checksum verification logger.msg(VERBOSE, "FinishWriting: looking for metadata: %s", url.str()); // create a new request SRMClientRequest list_request(srm_request->surls()); list_request.long_list(true); std::list metadata; DataStatus res = client->info(list_request,metadata); if (!res) { client->abort(*srm_request, false); // if we can't list then we can't remove either delete client; delete srm_request; srm_request = NULL; return DataStatus(DataStatus::WriteFinishError, res.GetErrno(), res.GetDesc()); } if (!metadata.empty()) { if (metadata.front().checkSumValue.length() > 0 && metadata.front().checkSumType.length() > 0) { std::string servercsum(metadata.front().checkSumType+":"+metadata.front().checkSumValue); logger.msg(INFO, "FinishWriting: obtained checksum: %s", servercsum); if (csum.substr(0, csum.find(':')) == metadata.front().checkSumType) { if (csum.substr(csum.find(':')+1) == metadata.front().checkSumValue) { logger.msg(INFO, "Calculated/supplied transfer checksum %s matches checksum reported by SRM destination %s", csum, servercsum); } else { logger.msg(VERBOSE, "Checksum mismatch between calculated/supplied checksum (%s) and checksum reported by SRM destination (%s)", csum, servercsum); r = DataStatus(DataStatus::WriteFinishError, EARCCHECKSUM, "Checksum mismatch between calculated/supplied checksum and reported by SRM destination"); } } else logger.msg(WARNING, "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, cannot compare", servercsum, csum); } else logger.msg(WARNING, "No checksum information from server"); } else logger.msg(WARNING, "No checksum information from server"); } else logger.msg(INFO, "No checksum verification possible"); } else logger.msg(INFO, "No checksum verification possible"); } if (r.Passed()) { if (srm_request->status() == SRM_REQUEST_FINISHED_SUCCESS) { DataStatus res = client->releasePut(*srm_request); if (!res) { logger.msg(VERBOSE, "Failed to release completed request"); r = DataStatus(DataStatus::WriteFinishError, res.GetErrno(), "Failed to release completed request"); } } } else { client->abort(*srm_request, false); // according to the spec the SURL may or may not exist after abort // so remove may fail, however it is not an error client->remove(*srm_request); } } } delete client; delete srm_request; srm_request = NULL; } return r; } DataStatus DataPointSRM::Stat(FileInfo& file, DataPointInfoType verb) { std::list files; std::list urls; urls.push_back(const_cast (this)); DataStatus r = Stat(files, urls, verb); if (!r.Passed()) return r; file = files.front(); return r; } DataStatus DataPointSRM::Stat(std::list& files, const std::list& urls, DataPointInfoType verb) { if (urls.empty()) return DataStatus::Success; std::string error; SRMClient * client = SRMClient::getInstance(usercfg, url.fullstr(), error); if(!client) { return DataStatus(DataStatus::StatError, ECONNREFUSED, error); } std::list surls; for (std::list::const_iterator i = urls.begin(); i != urls.end(); ++i) { surls.push_back(CanonicSRMURL((*i)->CurrentLocation())); logger.msg(VERBOSE, "ListFiles: looking for metadata: %s", (*i)->CurrentLocation().str()); } SRMClientRequest srm_request_tmp(surls); srm_request_tmp.recursion(-1); if ((verb | INFO_TYPE_NAME) != INFO_TYPE_NAME) srm_request_tmp.long_list(true); std::map > metadata_map; // get info from SRM DataStatus res = client->info(srm_request_tmp, metadata_map); delete client; if (!res) return DataStatus(DataStatus::StatError, res.GetErrno(), res.GetDesc()); for (std::list::const_iterator dp = urls.begin(); dp != urls.end(); ++dp) { std::string surl = CanonicSRMURL((*dp)->CurrentLocation()); if (metadata_map.find(surl) == metadata_map.end()) { // error files.push_back(FileInfo()); continue; } if (metadata_map[surl].size() != 1) { // error files.push_back(FileInfo()); continue; } struct SRMFileMetaData srm_metadata = metadata_map[surl].front(); // set URL attributes for surl requested (file or dir) if(srm_metadata.size > 0) { (*dp)->SetSize(srm_metadata.size); } if(srm_metadata.checkSumType.length() > 0 && srm_metadata.checkSumValue.length() > 0) { std::string csum(srm_metadata.checkSumType+":"+srm_metadata.checkSumValue); (*dp)->SetCheckSum(csum); } if(srm_metadata.lastModificationTime > 0) { (*dp)->SetModified(Time(srm_metadata.lastModificationTime)); } if(srm_metadata.fileLocality == SRM_ONLINE) { (*dp)->SetAccessLatency(ACCESS_LATENCY_SMALL); } else if(srm_metadata.fileLocality == SRM_NEARLINE) { (*dp)->SetAccessLatency(ACCESS_LATENCY_LARGE); } FillFileInfo(files, srm_metadata); } return DataStatus::Success; } DataStatus DataPointSRM::List(std::list& files, DataPointInfoType verb) { return ListFiles(files,verb,0); } DataStatus DataPointSRM::ListFiles(std::list& files, DataPointInfoType verb, int recursion) { // This method does not use any dynamic members of this object. Hence // it can be executed even while reading or writing std::string error; SRMClient * client = SRMClient::getInstance(usercfg, url.fullstr(), error); if(!client) { return DataStatus(DataStatus::ListError, ECONNREFUSED, error); } SRMClientRequest srm_request_tmp(CanonicSRMURL(url)); srm_request_tmp.recursion(recursion); logger.msg(VERBOSE, "ListFiles: looking for metadata: %s", CurrentLocation().str()); if ((verb | INFO_TYPE_NAME) != INFO_TYPE_NAME) srm_request_tmp.long_list(true); std::list srm_metadata; // get info from SRM DataStatus res = client->info(srm_request_tmp, srm_metadata); delete client; if (!res) return res; if (srm_metadata.empty()) { return DataStatus::Success; } // set URL attributes for surl requested (file or dir) if(srm_metadata.front().size > 0) { SetSize(srm_metadata.front().size); } if(srm_metadata.front().checkSumType.length() > 0 && srm_metadata.front().checkSumValue.length() > 0) { std::string csum(srm_metadata.front().checkSumType+":"+srm_metadata.front().checkSumValue); SetCheckSum(csum); } if(srm_metadata.front().lastModificationTime > 0) { SetModified(Time(srm_metadata.front().lastModificationTime)); } if(srm_metadata.front().fileLocality == SRM_ONLINE) { SetAccessLatency(ACCESS_LATENCY_SMALL); } else if(srm_metadata.front().fileLocality == SRM_NEARLINE) { SetAccessLatency(ACCESS_LATENCY_LARGE); } // set FileInfo attributes for surl requested and any files within a dir for (std::list::const_iterator i = srm_metadata.begin(); i != srm_metadata.end(); ++i) { FillFileInfo(files, *i); } return DataStatus::Success; } const std::string DataPointSRM::DefaultCheckSum() const { return std::string("adler32"); } bool DataPointSRM::ProvidesMeta() const { return true; } bool DataPointSRM::IsStageable() const { return true; } std::vector DataPointSRM::TransferLocations() const { return turls; } void DataPointSRM::ClearTransferLocations() { turls.clear(); } void DataPointSRM::CheckProtocols(std::list& transport_protocols) { for (std::list::iterator protocol = transport_protocols.begin(); protocol != transport_protocols.end();) { // try to load plugins URL url(*protocol+"://host/path"); DataHandle handle(url, usercfg); if (handle) { ++protocol; } else { logger.msg(WARNING, "plugin for transport protocol %s is not installed", *protocol); protocol = transport_protocols.erase(protocol); } } } void DataPointSRM::ChooseTransferProtocols(std::list& transport_protocols) { std::string option_protocols(url.Option("transferprotocol")); if (option_protocols.empty()) { transport_protocols.push_back("gsiftp"); transport_protocols.push_back("http"); transport_protocols.push_back("https"); transport_protocols.push_back("httpg"); transport_protocols.push_back("ftp"); } else { tokenize(option_protocols, transport_protocols, ","); } } std::string DataPointSRM::CanonicSRMURL(const URL& srm_url) { std::string canonic_url; std::string sfn_path = srm_url.HTTPOption("SFN"); if (!sfn_path.empty()) { while (sfn_path[0] == '/') sfn_path.erase(0,1); canonic_url = srm_url.Protocol() + "://" + srm_url.Host() + "/" + uri_encode(sfn_path, false); } else { // if SFN option is not used, treat everything in the path including // options as part of the path and encode it canonic_url = srm_url.Protocol() + "://" + srm_url.Host() + uri_encode(srm_url.Path(), false); std::string extrapath; for (std::map::const_iterator it = srm_url.HTTPOptions().begin(); it != srm_url.HTTPOptions().end(); it++) { if (it == srm_url.HTTPOptions().begin()) extrapath += '?'; else extrapath += '&'; extrapath += it->first; if (!it->second.empty()) extrapath += '=' + it->second; } canonic_url += uri_encode(extrapath, false); } return canonic_url; } void DataPointSRM::FillFileInfo(std::list& files, const struct SRMFileMetaData& srm_metadata) { // set FileInfo attributes std::list::iterator f = files.insert(files.end(), FileInfo(srm_metadata.path)); if (srm_metadata.fileType == SRM_FILE) { f->SetType(FileInfo::file_type_file); } else if (srm_metadata.fileType == SRM_DIRECTORY) { f->SetType(FileInfo::file_type_dir); } if (srm_metadata.size >= 0) { f->SetSize(srm_metadata.size); } if (srm_metadata.lastModificationTime > 0) { f->SetModified(Time(srm_metadata.lastModificationTime)); } if (srm_metadata.checkSumType.length() > 0 && srm_metadata.checkSumValue.length() > 0) { std::string csum(srm_metadata.checkSumType + ":" + srm_metadata.checkSumValue); f->SetCheckSum(csum); } if (srm_metadata.fileLocality == SRM_ONLINE) { f->SetLatency("ONLINE"); } else if (srm_metadata.fileLocality == SRM_NEARLINE) { f->SetLatency("NEARLINE"); } if (srm_metadata.createdAtTime > 0) { f->SetMetaData("ctime", (Time(srm_metadata.createdAtTime)).str()); } if (!srm_metadata.spaceTokens.empty()) { std::string spaceTokens; for (std::list::const_iterator it = srm_metadata.spaceTokens.begin(); it != srm_metadata.spaceTokens.end(); it++) { if (!spaceTokens.empty()) spaceTokens += ','; spaceTokens += *it; } f->SetMetaData("spacetokens", spaceTokens); } if (!srm_metadata.owner.empty()) f->SetMetaData("owner", srm_metadata.owner); if (!srm_metadata.group.empty()) f->SetMetaData("group", srm_metadata.group); if (!srm_metadata.permission.empty()) f->SetMetaData("accessperm", srm_metadata.permission); if (srm_metadata.lifetimeLeft != 0) f->SetMetaData("lifetimeleft", tostring(srm_metadata.lifetimeLeft)); if (srm_metadata.lifetimeAssigned != 0) f->SetMetaData("lifetimeassigned", tostring(srm_metadata.lifetimeAssigned)); if (srm_metadata.retentionPolicy == SRM_REPLICA) f->SetMetaData("retentionpolicy", "REPLICA"); else if (srm_metadata.retentionPolicy == SRM_OUTPUT) f->SetMetaData("retentionpolicy", "OUTPUT"); else if (srm_metadata.retentionPolicy == SRM_CUSTODIAL) f->SetMetaData("retentionpolicy", "CUSTODIAL"); if (srm_metadata.fileStorageType == SRM_VOLATILE) f->SetMetaData("filestoragetype", "VOLATILE"); else if (srm_metadata.fileStorageType == SRM_DURABLE) f->SetMetaData("filestoragetype", "DURABLE"); else if (srm_metadata.fileStorageType == SRM_PERMANENT) f->SetMetaData("filestoragetype", "PERMANENT"); } } // namespace ArcDMCSRM extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "srm", "HED:DMC", "Storage Resource Manager", 0, &ArcDMCSRM::DataPointSRM::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/srm/PaxHeaders.7502/srmclient0000644000000000000000000000013213214316024022006 xustar000000000000000030 mtime=1513200660.940758992 30 atime=1513200668.722854169 30 ctime=1513200660.940758992 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/0000755000175000002070000000000013214316024022131 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024130 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200595.758961793 30 ctime=1513200660.930758869 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/Makefile.am0000644000175000002070000000073612052416515024200 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libsrmclient.la libsrmclient_la_SOURCES = SRMClient.cpp SRMClient.h \ SRM1Client.cpp SRM1Client.h \ SRM22Client.cpp SRM22Client.h \ SRMURL.cpp SRMURL.h \ SRMInfo.cpp SRMInfo.h \ SRMClientRequest.h libsrmclient_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723024135 xustar000000000000000030 mtime=1513200595.808962404 30 atime=1513200649.063613729 30 ctime=1513200660.931758882 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/Makefile.in0000644000175000002070000006607513214315723024221 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/srm/srmclient DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libsrmclient_la_LIBADD = am_libsrmclient_la_OBJECTS = libsrmclient_la-SRMClient.lo \ libsrmclient_la-SRM1Client.lo libsrmclient_la-SRM22Client.lo \ libsrmclient_la-SRMURL.lo libsrmclient_la-SRMInfo.lo libsrmclient_la_OBJECTS = $(am_libsrmclient_la_OBJECTS) libsrmclient_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libsrmclient_la_SOURCES) DIST_SOURCES = $(libsrmclient_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libsrmclient.la libsrmclient_la_SOURCES = SRMClient.cpp SRMClient.h \ SRM1Client.cpp SRM1Client.h \ SRM22Client.cpp SRM22Client.h \ SRMURL.cpp SRMURL.h \ SRMInfo.cpp SRMInfo.h \ SRMClientRequest.h libsrmclient_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/srm/srmclient/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/srm/srmclient/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libsrmclient.la: $(libsrmclient_la_OBJECTS) $(libsrmclient_la_DEPENDENCIES) $(libsrmclient_la_LINK) $(libsrmclient_la_OBJECTS) $(libsrmclient_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsrmclient_la-SRM1Client.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsrmclient_la-SRM22Client.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsrmclient_la-SRMClient.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsrmclient_la-SRMInfo.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsrmclient_la-SRMURL.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libsrmclient_la-SRMClient.lo: SRMClient.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -MT libsrmclient_la-SRMClient.lo -MD -MP -MF $(DEPDIR)/libsrmclient_la-SRMClient.Tpo -c -o libsrmclient_la-SRMClient.lo `test -f 'SRMClient.cpp' || echo '$(srcdir)/'`SRMClient.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libsrmclient_la-SRMClient.Tpo $(DEPDIR)/libsrmclient_la-SRMClient.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SRMClient.cpp' object='libsrmclient_la-SRMClient.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -c -o libsrmclient_la-SRMClient.lo `test -f 'SRMClient.cpp' || echo '$(srcdir)/'`SRMClient.cpp libsrmclient_la-SRM1Client.lo: SRM1Client.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -MT libsrmclient_la-SRM1Client.lo -MD -MP -MF $(DEPDIR)/libsrmclient_la-SRM1Client.Tpo -c -o libsrmclient_la-SRM1Client.lo `test -f 'SRM1Client.cpp' || echo '$(srcdir)/'`SRM1Client.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libsrmclient_la-SRM1Client.Tpo $(DEPDIR)/libsrmclient_la-SRM1Client.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SRM1Client.cpp' object='libsrmclient_la-SRM1Client.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -c -o libsrmclient_la-SRM1Client.lo `test -f 'SRM1Client.cpp' || echo '$(srcdir)/'`SRM1Client.cpp libsrmclient_la-SRM22Client.lo: SRM22Client.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -MT libsrmclient_la-SRM22Client.lo -MD -MP -MF $(DEPDIR)/libsrmclient_la-SRM22Client.Tpo -c -o libsrmclient_la-SRM22Client.lo `test -f 'SRM22Client.cpp' || echo '$(srcdir)/'`SRM22Client.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libsrmclient_la-SRM22Client.Tpo $(DEPDIR)/libsrmclient_la-SRM22Client.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SRM22Client.cpp' object='libsrmclient_la-SRM22Client.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -c -o libsrmclient_la-SRM22Client.lo `test -f 'SRM22Client.cpp' || echo '$(srcdir)/'`SRM22Client.cpp libsrmclient_la-SRMURL.lo: SRMURL.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -MT libsrmclient_la-SRMURL.lo -MD -MP -MF $(DEPDIR)/libsrmclient_la-SRMURL.Tpo -c -o libsrmclient_la-SRMURL.lo `test -f 'SRMURL.cpp' || echo '$(srcdir)/'`SRMURL.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libsrmclient_la-SRMURL.Tpo $(DEPDIR)/libsrmclient_la-SRMURL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SRMURL.cpp' object='libsrmclient_la-SRMURL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -c -o libsrmclient_la-SRMURL.lo `test -f 'SRMURL.cpp' || echo '$(srcdir)/'`SRMURL.cpp libsrmclient_la-SRMInfo.lo: SRMInfo.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -MT libsrmclient_la-SRMInfo.lo -MD -MP -MF $(DEPDIR)/libsrmclient_la-SRMInfo.Tpo -c -o libsrmclient_la-SRMInfo.lo `test -f 'SRMInfo.cpp' || echo '$(srcdir)/'`SRMInfo.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libsrmclient_la-SRMInfo.Tpo $(DEPDIR)/libsrmclient_la-SRMInfo.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SRMInfo.cpp' object='libsrmclient_la-SRMInfo.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsrmclient_la_CXXFLAGS) $(CXXFLAGS) -c -o libsrmclient_la-SRMInfo.lo `test -f 'SRMInfo.cpp' || echo '$(srcdir)/'`SRMInfo.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRMClient.cpp0000644000000000000000000000012312100254234024364 xustar000000000000000027 mtime=1359042716.344325 26 atime=1513200575.23971 30 ctime=1513200660.932758894 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRMClient.cpp0000644000175000002070000001352712100254234024442 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include "SRMClient.h" #include "SRM1Client.h" #include "SRM22Client.h" #include "SRMInfo.h" namespace ArcDMCSRM { using namespace Arc; Logger SRMClient::logger(Logger::getRootLogger(), "SRMClient"); SRMClient::SRMClient(const UserConfig& usercfg, const SRMURL& url) : service_endpoint(url.ContactURL()), implementation(SRM_IMPLEMENTATION_UNKNOWN), user_timeout(usercfg.Timeout()) { usercfg.ApplyToConfig(cfg); client = new ClientSOAP(cfg, service_endpoint, usercfg.Timeout()); } SRMClient::~SRMClient() { if (client) delete client; } SRMClient* SRMClient::getInstance(const UserConfig& usercfg, const std::string& url, std::string& error) { SRMURL srm_url(url); if (!srm_url) { error = "Invalid URL"; return NULL; } // can't use ping with srmv1 so just return if (srm_url.SRMVersion() == SRMURL::SRM_URL_VERSION_1) { return new SRM1Client(usercfg, srm_url); } if (usercfg.UtilsDirPath().empty()) { if (srm_url.SRMVersion() == SRMURL::SRM_URL_VERSION_2_2) { return new SRM22Client(usercfg, srm_url); } error = "Unknown SRM version"; return NULL; } DataStatus srm_error; std::string version; SRMInfo info(usercfg.UtilsDirPath()); SRMFileInfo srm_file_info; // lists of ports in the order to try them std::list ports; // if port is specified then only try that one if (srm_url.PortDefined()) { ports.push_back(srm_url.Port()); } // take hints from certain keywords in the url else if (srm_url.Path().find("/dpm/") != std::string::npos) { ports.push_back(8446); ports.push_back(8443); ports.push_back(8444); } else { ports.push_back(8443); ports.push_back(8446); ports.push_back(8444); } srm_file_info.host = srm_url.Host(); srm_file_info.version = srm_url.SRMVersion(); // no info if (!info.getSRMFileInfo(srm_file_info)) { for (std::list::iterator port = ports.begin(); port != ports.end(); ++port) { logger.msg(VERBOSE, "Attempting to contact %s on port %i", srm_url.Host(), *port); srm_url.SetPort(*port); SRMClient *client = new SRM22Client(usercfg, srm_url); if ((srm_error = client->ping(version)).Passed()) { srm_file_info.port = *port; logger.msg(VERBOSE, "Storing port %i for %s", *port, srm_url.Host()); info.putSRMFileInfo(srm_file_info); return client; } delete client; if (srm_error.GetErrno() == ETIMEDOUT) { // probably correct port and service is down // but don't want to risk storing incorrect info error = "Connection timed out"; return NULL; } error = srm_error.GetDesc(); } // if we get here no port has worked logger.msg(VERBOSE, "No port succeeded for %s", srm_url.Host()); } // url agrees with file info else if (srm_file_info == srm_url) { srm_url.SetPort(srm_file_info.port); return new SRM22Client(usercfg, srm_url); } // url disagrees with file info else { // ping and if ok, replace file info logger.msg(VERBOSE, "URL %s disagrees with stored SRM info, testing new info", srm_url.ShortURL()); SRMClient *client = new SRM22Client(usercfg, srm_url); if ((srm_error = client->ping(version)).Passed()) { srm_file_info.port = srm_url.Port(); logger.msg(VERBOSE, "Replacing old SRM info with new for URL %s", srm_url.ShortURL()); info.putSRMFileInfo(srm_file_info); return client; } delete client; if (srm_error.GetErrno() == ETIMEDOUT) { // probably correct port and service is down // but don't want to risk storing incorrect info error = "Connection timed out"; } else error = srm_error.GetDesc(); } return NULL; } DataStatus SRMClient::process(const std::string& action, PayloadSOAP *request, PayloadSOAP **response) { if (logger.getThreshold() <= DEBUG) { std::string xml; request->GetXML(xml, true); logger.msg(DEBUG, "SOAP request: %s", xml); } MCC_Status status = client->process(action, request, response); // Try to reconnect in case of failure if (*response && (*response)->IsFault()) { logger.msg(DEBUG, "SOAP fault: %s", (*response)->Fault()->Reason()); logger.msg(DEBUG, "Reconnecting"); delete *response; *response = NULL; delete client; client = new ClientSOAP(cfg, service_endpoint, user_timeout); status = client->process(request, response); } if (!status) { // Currently it is not possible to get the cause of failure from the // lower-level MCC code so all we can report is connection refused logger.msg(VERBOSE, "SRM Client status: %s", (std::string)status); if (*response) delete *response; *response = NULL; return DataStatus(DataStatus::GenericError, ECONNREFUSED, (std::string)status); } if (!(*response)) { logger.msg(VERBOSE, "No SOAP response"); return DataStatus(DataStatus::GenericError, ECONNREFUSED, "No SOAP response"); } if (logger.getThreshold() <= DEBUG) { std::string xml; (*response)->GetXML(xml, true); logger.msg(DEBUG, "SOAP response: %s", xml.substr(0, 10000)); } if ((*response)->IsFault()) { std::string fault((*response)->Fault()->Reason()); logger.msg(VERBOSE, "SOAP fault: %s", fault); delete *response; *response = NULL; return DataStatus(DataStatus::GenericError, EARCSVCTMP, fault); } return DataStatus::Success; } } // namespace ArcDMCSRM nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRMInfo.cpp0000644000000000000000000000012312074031150024041 xustar000000000000000027 mtime=1357918824.946932 26 atime=1513200575.23371 30 ctime=1513200660.938758967 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRMInfo.cpp0000644000175000002070000001434312074031150024114 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "SRMInfo.h" namespace ArcDMCSRM { Arc::SimpleCondition SRMInfo::lock; std::list SRMInfo::srm_info; Arc::Logger SRMInfo::logger(Arc::Logger::getRootLogger(), "SRMInfo"); SRMFileInfo::SRMFileInfo(const std::string& host_, int port_, const std::string& version_) : host(host_), port(port_) { if (version_ == "1") version = SRMURL::SRM_URL_VERSION_1; else if (version_ == "2.2") version = SRMURL::SRM_URL_VERSION_2_2; else version = SRMURL::SRM_URL_VERSION_UNKNOWN; } SRMFileInfo::SRMFileInfo(): host(""), port(0), version(SRMURL::SRM_URL_VERSION_UNKNOWN) {} bool SRMFileInfo::operator ==(SRMURL srm_url) { if (host == srm_url.Host() && (!srm_url.PortDefined() || port == srm_url.Port()) && version == srm_url.SRMVersion()) { return true; } return false; } std::string SRMFileInfo::versionString() const { switch (version) { case SRMURL::SRM_URL_VERSION_1: { return "1"; }; break; case SRMURL::SRM_URL_VERSION_2_2: { return "2.2"; }; break; default: { return ""; } } return ""; } SRMInfo::SRMInfo(std::string dir) { srm_info_filename = dir + G_DIR_SEPARATOR_S + "srms.conf"; lock.lock(); if (srm_info.empty()) { // read info from file std::list filedata; Arc::FileLock filelock(srm_info_filename); bool acquired = false; for (int tries = 10; tries > 0; --tries) { acquired = filelock.acquire(); if (acquired) break; // sleep random time to minimise clashes int sleeptime = rand() % 500000 + 100000; Glib::usleep(sleeptime); } if (!acquired) { logger.msg(Arc::WARNING, "Failed to acquire lock on file %s", srm_info_filename); lock.unlock(); return; } if (!Arc::FileRead(srm_info_filename, filedata)) { if (errno != ENOENT) logger.msg(Arc::WARNING, "Error reading info from file %s:%s", srm_info_filename, Arc::StrError(errno)); filelock.release(); lock.unlock(); return; } filelock.release(); for (std::list::iterator line = filedata.begin(); line != filedata.end(); ++line) { if (line->empty() || (*line)[0] == '#') continue; // split line std::vector fields; Arc::tokenize(*line, fields); if (fields.size() != 3) { logger.msg(Arc::WARNING, "Bad or old format detected in file %s, in line %s", srm_info_filename, *line); continue; } int port; if (!Arc::stringto(fields[1], port)) { logger.msg(Arc::WARNING, "Cannot convert string %s to int in line %s", fields[1], *line); continue; } SRMFileInfo f(fields[0], port, fields[2]); srm_info.push_back(f); } } lock.unlock(); } bool SRMInfo::getSRMFileInfo(SRMFileInfo& srm_file_info) { // look for our combination of host and version lock.lock(); for (std::list::const_iterator i = srm_info.begin(); i != srm_info.end(); ++i) { if (i->host == srm_file_info.host && i->version == srm_file_info.version) { srm_file_info.port = i->port; lock.unlock(); return true; } } lock.unlock(); return false; } void SRMInfo::putSRMFileInfo(const SRMFileInfo& srm_file_info) { // fill info cached in memory lock.lock(); for (std::list::iterator i = srm_info.begin(); i != srm_info.end();) { if (i->host == srm_file_info.host && i->version == srm_file_info.version) { if (i->port == srm_file_info.port) { // this same info was already added lock.unlock(); return; } // the info has changed, so erase existing info i = srm_info.erase(i); } else { ++i; } } srm_info.push_back(srm_file_info); lock.unlock(); // now fill in file std::string header("# This file was automatically generated by ARC for caching SRM information.\n"); header += "# Its format is lines with 3 entries separated by spaces:\n"; header += "# hostname port version\n#\n"; header += "# This file can be freely edited, but it is not advisable while there\n"; header += "# are on-going transfers. Comments begin with #\n#"; std::list filedata; Arc::FileLock filelock(srm_info_filename); bool acquired = false; for (int tries = 10; tries > 0; --tries) { acquired = filelock.acquire(); if (acquired) break; // sleep random time to minimise clashes int sleeptime = rand() % 500000 + 100000; Glib::usleep(sleeptime); } if (!acquired) { logger.msg(Arc::WARNING, "Failed to acquire lock on file %s", srm_info_filename); return; } if (!Arc::FileRead(srm_info_filename, filedata)) { // write new file filedata.push_back(header); } std::string lines; for (std::list::iterator line = filedata.begin(); line != filedata.end(); ++line) { if (line->empty()) continue; if ((*line)[0] == '#') { // check for old-style file - if so re-write whole file if (line->find("# Its format is lines with 4 entries separated by spaces:") == 0) { lines = header+'\n'; break; } lines += *line+'\n'; continue; } // split line std::vector fields; Arc::tokenize(*line, fields); if (fields.size() != 3) { if (line->length() > 0) { logger.msg(Arc::WARNING, "Bad or old format detected in file %s, in line %s", srm_info_filename, *line); } continue; } // if any line contains our combination of host and version, ignore it if (fields.at(0) == srm_file_info.host && fields.at(2) == srm_file_info.versionString()) { continue; } lines += *line+'\n'; } // add new info lines += srm_file_info.host + ' ' + Arc::tostring(srm_file_info.port) + ' ' + srm_file_info.versionString() + '\n'; // write everything back to the file if (!Arc::FileCreate(srm_info_filename, lines)) { logger.msg(Arc::WARNING, "Error writing srm info file %s", srm_info_filename); } filelock.release(); } } // namespace ArcDMCSRM { nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRMClientRequest.h0000644000000000000000000000012312100254234025402 xustar000000000000000027 mtime=1359042716.344325 26 atime=1513200575.23371 30 ctime=1513200660.940758992 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRMClientRequest.h0000644000175000002070000002016012100254234025447 0ustar00mockbuildmock00000000000000#ifndef SRMCLIENTREQUEST_H_ #define SRMCLIENTREQUEST_H_ #include namespace ArcDMCSRM { /// The version of the SRM protocol enum SRMVersion { SRM_V1, SRM_V2_2, SRM_VNULL }; /// Specifies whether file is on disk or only on tape enum SRMFileLocality { SRM_ONLINE, SRM_NEARLINE, SRM_UNKNOWN, SRM_STAGE_ERROR }; /// Quality of retention enum SRMRetentionPolicy { SRM_REPLICA, SRM_OUTPUT, SRM_CUSTODIAL, SRM_RETENTION_UNKNOWN }; /// The lifetime of the file enum SRMFileStorageType { SRM_VOLATILE, SRM_DURABLE, SRM_PERMANENT, SRM_FILE_STORAGE_UNKNOWN }; /// File, directory or link enum SRMFileType { SRM_FILE, SRM_DIRECTORY, SRM_LINK, SRM_FILE_TYPE_UNKNOWN }; /// Implementation of service. Found from srmPing (v2.2 only) enum SRMImplementation { SRM_IMPLEMENTATION_DCACHE, SRM_IMPLEMENTATION_CASTOR, SRM_IMPLEMENTATION_DPM, SRM_IMPLEMENTATION_STORM, SRM_IMPLEMENTATION_UNKNOWN }; /// General exception to represent a bad SRM request class SRMInvalidRequestException : public std::exception {}; /// The status of a request enum SRMRequestStatus { SRM_REQUEST_CREATED, SRM_REQUEST_ONGOING, SRM_REQUEST_FINISHED_SUCCESS, SRM_REQUEST_FINISHED_PARTIAL_SUCCESS, SRM_REQUEST_FINISHED_ERROR, SRM_REQUEST_SHOULD_ABORT, SRM_REQUEST_CANCELLED }; /// Class to represent a SRM request. /** * It may be used for multiple operations, for example calling getTURLs() * sets the request token in the request object (for a v2.2 client) and * then same object is passed to releaseGet(). */ class SRMClientRequest { public: /// Creates a request object with multiple SURLs. /** * The URLs here are in the form * srm://srm.host.org/path/to/file */ SRMClientRequest(const std::list& urls) throw (SRMInvalidRequestException) : _request_id(0), _space_token(""), _waiting_time(1), _status(SRM_REQUEST_CREATED), _request_timeout(60), _total_size(0), _long_list(false), _recursion(0), _offset(0), _count(0) { if (urls.empty()) throw SRMInvalidRequestException(); for (std::list::const_iterator it = urls.begin(); it != urls.end(); ++it) _surls[*it] = SRM_UNKNOWN; }; /// Creates a request object with a single SURL. /** * The URL here is in the form * srm://srm.host.org/path/to/file */ SRMClientRequest(const std::string& url="", const std::string& id="") throw (SRMInvalidRequestException) : _request_id(0), _space_token(""), _waiting_time(1), _status(SRM_REQUEST_CREATED), _request_timeout(60), _total_size(0), _long_list(false), _recursion(0), _offset(0), _count(0) { if (url.empty() && id.empty()) throw SRMInvalidRequestException(); if (!url.empty()) _surls[url] = SRM_UNKNOWN; else _request_token = id; } void request_id(int id) { _request_id = id; } int request_id() const { return _request_id; } void request_token(const std::string& token) { _request_token = token; } std::string request_token() const { return _request_token; } void file_ids(const std::list& ids) { _file_ids = ids; } std::list file_ids() const { return _file_ids; } void space_token(const std::string& token) { _space_token = token; } std::string space_token() const { return _space_token; } /// Returns the first surl in the list std::string surl() const { return _surls.begin()->first; } std::list surls() const { std::list surl_list; for (std::map::const_iterator it = _surls.begin(); it != _surls.end(); ++it) { surl_list.push_back(it->first); } return surl_list; } void surl_statuses(const std::string& surl, SRMFileLocality locality) { _surls[surl] = locality; } std::map surl_statuses() const { return _surls; } void surl_failures(const std::string& surl, const std::string& reason) { _surl_failures[surl] = reason; } std::map surl_failures() const { return _surl_failures; } void waiting_time(int wait_time) { _waiting_time = wait_time; } /// Get waiting time. A waiting time of zero means no estimate was given /// by the remote service. int waiting_time() const { return _waiting_time; } /// Set status to SRM_REQUEST_FINISHED_SUCCESS void finished_success() { _status = SRM_REQUEST_FINISHED_SUCCESS; } /// Set status to SRM_REQUEST_FINISHED_PARTIAL_SUCCESS void finished_partial_success() { _status = SRM_REQUEST_FINISHED_PARTIAL_SUCCESS; } /// Set status to SRM_REQUEST_FINISHED_ERROR void finished_error() { _status = SRM_REQUEST_FINISHED_ERROR; } /// Set status to SRM_REQUEST_SHOULD_ABORT void finished_abort() { _status = SRM_REQUEST_SHOULD_ABORT; } /// Set waiting time to t and status to SRM_REQUEST_ONGOING void wait(int t = 0) { _status = SRM_REQUEST_ONGOING; _waiting_time = t; } /// Set status to SRM_REQUEST_CANCELLED void cancelled() { _status = SRM_REQUEST_CANCELLED; } /// Get status SRMRequestStatus status() const { return _status; } void request_timeout(unsigned int timeout) { _request_timeout = timeout; }; unsigned int request_timeout() const { return _request_timeout; }; void total_size(unsigned long long size) { _total_size = size; }; unsigned long long total_size() const { return _total_size; }; void long_list(bool list) { _long_list = list; } bool long_list() const { return _long_list; } void transport_protocols(const std::list& protocols) { _transport_protocols = protocols; } std::list transport_protocols() const { return _transport_protocols; } void recursion(int level) { _recursion = level; } int recursion() const { return _recursion; } void offset(unsigned int no) { _offset = no; } unsigned int offset() const { return _offset; } void count(unsigned int no) { _count = no; } unsigned int count() const { return _count; } private: /// The SURLs of the files involved in the request, mapped to their locality. std::map _surls; /// int ids are used in SRM1 int _request_id; /// string request tokens (eg "-21249586") are used in SRM2.2 std::string _request_token; /// A list of file ids is kept in SRM1 std::list _file_ids; /// The space token associated with a request std::string _space_token; /// A map of SURLs for which requests failed to failure reason. /// Used for bring online requests. std::map _surl_failures; /// Estimated waiting time as returned by the server to wait /// until the next poll of an asychronous request. int _waiting_time; /// Status of request. Only useful for asynchronous requests. SRMRequestStatus _status; /** * For operations like getTURLs and putTURLs _request_timeout specifies * the timeout for these operations to complete. If it is zero then * these operations will act asynchronously, i.e. return and expect * the caller to poll for the status. If it is non-zero the operations * will block until completed or this timeout has been reached. */ unsigned int _request_timeout; /// Total size of all files in request. Can be used when reserving space. unsigned long long _total_size; /// Whether a detailed listing is requested bool _long_list; /// List of requested transport protocols std::list _transport_protocols; /// Recursion level (for list or stat requests only) int _recursion; /// Offset at which to start listing (for large directories) unsigned int _offset; /// How many files to list, used with _offset unsigned int _count; }; } // namespace ArcDMCSRM #endif /* SRMCLIENTREQUEST_H_ */ nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRMURL.h0000644000000000000000000000012312074031150023255 xustar000000000000000027 mtime=1357918824.946932 26 atime=1513200575.23171 30 ctime=1513200660.937758955 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRMURL.h0000644000175000002070000000374312074031150023332 0ustar00mockbuildmock00000000000000#ifndef __HTTPSD_SRM_URL_H__ #define __HTTPSD_SRM_URL_H__ #include namespace ArcDMCSRM { class SRMURL:public Arc::URL { public: enum SRM_URL_VERSION { SRM_URL_VERSION_1, SRM_URL_VERSION_2_2, SRM_URL_VERSION_UNKNOWN }; /** * Examples shown for functions below assume the object was initiated with * srm://srm.ndgf.org/pnfs/ndgf.org/data/atlas/disk/user/user.mlassnig.dataset.1/dummyfile3 */ SRMURL(std::string url); /** * eg /srm/managerv2 */ const std::string& Endpoint(void) const { return Path(); }; /** * Possible values of version are "1" and "2.2" */ void SetSRMVersion(const std::string& version); /** * eg pnfs/ndgf.org/data/atlas/disk/user/user.mlassnig.dataset.1/dummyfile3 */ std::string FileName(void) const { if(!valid) return ""; return filename; }; /** * eg httpg://srm.ndgf.org:8443/srm/managerv2 */ std::string ContactURL(void) const ; /** * eg srm://srm.ndgf.org:8443/srm/managerv2?SFN= */ std::string BaseURL(void) const; /** * eg srm://srm.ndgf.org:8443/pnfs/ndgf.org/data/atlas/disk/user/user.mlassnig.dataset.1/dummyfile3 */ std::string ShortURL(void) const; /** * eg srm://srm.ndgf.org:8443/srm/managerv2?SFN=pnfs/ndgf.org/data/atlas/disk/user/user.mlassnig.dataset.1/dummyfile3 */ std::string FullURL(void) const; enum SRM_URL_VERSION SRMVersion() { return srm_version; }; bool Short(void) const { return isshort; }; void SetPort(int portno) { port = portno; }; /** Was the port number given in the constructor? */ bool PortDefined() { return portdefined; }; operator bool(void) { return valid; }; bool operator!(void) { return !valid; }; private: std::string filename; bool isshort; bool valid; bool portdefined; enum SRM_URL_VERSION srm_version; }; } // namespace ArcDMCSRM #endif // __HTTPSD_SRM_URL_H__ nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRM1Client.h0000644000000000000000000000012312100254234024112 xustar000000000000000027 mtime=1359042716.344325 26 atime=1513200575.22971 30 ctime=1513200660.934758918 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRM1Client.h0000644000175000002070000000622312100254234024163 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __HTTPSD_SRM1_CLIENT_H__ #define __HTTPSD_SRM1_CLIENT_H__ #include "SRMClient.h" namespace ArcDMCSRM { using namespace Arc; class SRM1Client : public SRMClient { private: DataStatus acquire(SRMClientRequest& req, std::list& urls, bool source); public: SRM1Client(const UserConfig& usercfg, const SRMURL& url); ~SRM1Client(); // not supported in v1 DataStatus ping(std::string& /* version */) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } // not supported in v1 DataStatus getSpaceTokens(std::list& /* tokens */, const std::string& /* description */ = "") { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } // not supported in v1 DataStatus getRequestTokens(std::list& /* tokens */, const std::string& /* description */ = "") { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } // not supported in v1 DataStatus requestBringOnline(SRMClientRequest& /* req */) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } // not supported in v1 DataStatus requestBringOnlineStatus(SRMClientRequest& /* req */) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } // not supported DataStatus mkDir(SRMClientRequest& /* req */) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } // not supported DataStatus rename(SRMClientRequest& /* req */, const URL& /* newurl */) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } // not supported DataStatus checkPermissions(SRMClientRequest& /* req */) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } // v1 only operates in synchronous mode DataStatus getTURLs(SRMClientRequest& req, std::list& urls); DataStatus getTURLsStatus(SRMClientRequest& req, std::list& urls) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } DataStatus putTURLs(SRMClientRequest& req, std::list& urls); DataStatus putTURLsStatus(SRMClientRequest& req, std::list& urls) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } DataStatus releaseGet(SRMClientRequest& req); DataStatus releasePut(SRMClientRequest& req); DataStatus release(SRMClientRequest& req, bool source); DataStatus abort(SRMClientRequest& req, bool source); DataStatus info(SRMClientRequest& req, std::map >& metadata); DataStatus info(SRMClientRequest& req, std::list& metadata); DataStatus remove(SRMClientRequest& req); DataStatus copy(SRMClientRequest& req, const std::string& source); }; } // namespace ArcDMCSRM #endif // __HTTPSD_SRM1_CLIENT_H__ nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRMClient.h0000644000000000000000000000012312100254234024031 xustar000000000000000027 mtime=1359042716.344325 26 atime=1513200575.22971 30 ctime=1513200660.933758906 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRMClient.h0000644000175000002070000003127212100254234024104 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __HTTPSD_SRM_CLIENT_H__ #define __HTTPSD_SRM_CLIENT_H__ #include #include #include #include #include #include #include #include #include #include "SRMURL.h" #include "SRMClientRequest.h" namespace ArcDMCSRM { using namespace Arc; /// SRM-related file metadata struct SRMFileMetaData { std::string path; // absolute dir and file path long long int size; Time createdAtTime; Time lastModificationTime; std::string checkSumType; std::string checkSumValue; SRMFileLocality fileLocality; SRMRetentionPolicy retentionPolicy; SRMFileStorageType fileStorageType; SRMFileType fileType; std::list spaceTokens; std::string owner; std::string group; std::string permission; Period lifetimeLeft; // on the SURL Period lifetimeAssigned; }; /** * A client interface to the SRM protocol. Instances of SRM clients * are created by calling the getInstance() factory method. One client * instance can be used to make many requests to the same server (with * the same protocol version), but not multiple servers. */ class SRMClient { protected: /// URL of the service endpoint, eg httpg://srm.host.org:8443/srm/managerv2 /// All SURLs passed to methods must correspond to this endpoint. std::string service_endpoint; /// SOAP configuraton object MCCConfig cfg; /// SOAP client object ClientSOAP *client; /// SOAP namespace NS ns; /// The implementation of the server SRMImplementation implementation; /// Timeout for requests to the SRM service time_t user_timeout; /// The version of the SRM protocol used std::string version; /// Logger static Logger logger; /// Protected constructor SRMClient(const UserConfig& usercfg, const SRMURL& url); /// Process SOAP request DataStatus process(const std::string& action, PayloadSOAP *request, PayloadSOAP **response); public: /** * Create an SRMClient instance. The instance will be a SRM v2.2 client * unless another version is explicitly given in the url. * @param usercfg The user configuration. * @param url A SURL. A client connects to the service host derived from * this SURL. All operations with a client instance must use SURLs with * the same host as this one. * @param error Details of error if one occurred * @returns A pointer to an instance of SRMClient is returned, or NULL if * it was not possible to create one. */ static SRMClient* getInstance(const UserConfig& usercfg, const std::string& url, std::string& error); /** * Destructor */ virtual ~SRMClient(); /** * Returns the version of the SRM protocol used by this instance */ std::string getVersion() const { return version; } /** * Find out the version supported by the server this client * is connected to. Since this method is used to determine * which client version to instantiate, we may not want to * report an error to the user, so setting report_error to * false suppresses the error message. * @param version The version returned by the server * @returns DataStatus specifying outcome of operation */ virtual DataStatus ping(std::string& version) = 0; /** * Find the space tokens available to write to which correspond to * the space token description, if given. The list of tokens is * a list of numbers referring to the SRM internal definition of the * spaces, not user-readable strings. * @param tokens The list filled by the service * @param description The space token description * @returns DataStatus specifying outcome of operation */ virtual DataStatus getSpaceTokens(std::list& tokens, const std::string& description = "") = 0; /** * Returns a list of request tokens for the user calling the method * which are still active requests, or the tokens corresponding to the * token description, if given. Was used by the old ngstage command but * is currently unused. * @param tokens The list filled by the service * @param description The user request description, which can be specified * when the request is created * @returns DataStatus specifying outcome of operation */ virtual DataStatus getRequestTokens(std::list& tokens, const std::string& description = "") = 0; /** * If the user wishes to copy a file from somewhere, getTURLs() is called * to retrieve the transport URL(s) to copy the file from. It may be used * synchronously or asynchronously, depending on the synchronous property * of the request object. In the former case it will block until * the TURLs are ready, in the latter case it will return after making the * request and getTURLsStatus() must be used to poll the request status if * it was not completed. * @param req The request object * @param urls A list of TURLs filled by the method * @returns DataStatus specifying outcome of operation */ virtual DataStatus getTURLs(SRMClientRequest& req, std::list& urls) = 0; /** * In the case where getTURLs was called asynchronously and the request * was not completed, this method should be called to poll the status of * the request. getTURLs must be called before this method and the request * object must have ongoing request status. * @param req The request object. Status must be ongoing. * @param urls A list of TURLs filled by the method if the request * completed successfully * @returns DataStatus specifying outcome of operation */ virtual DataStatus getTURLsStatus(SRMClientRequest& req, std::list& urls) = 0; /** * Submit a request to bring online files. If the synchronous property * of the request object is false, this operation is asynchronous and * the status of the request can be checked by calling * requestBringOnlineStatus() with the request token in req * which is assigned by this method. If the request is synchronous, this * operation blocks until the file(s) are online or the timeout specified * in the SRMClient constructor has passed. * @param req The request object * @returns DataStatus specifying outcome of operation */ virtual DataStatus requestBringOnline(SRMClientRequest& req) = 0; /** * Query the status of a request to bring files online. The SURLs map * of the request object is updated if the status of any files in the * request has changed. requestBringOnline() but be called before * this method. * @param req The request object to query the status of * @returns DataStatus specifying outcome of operation */ virtual DataStatus requestBringOnlineStatus(SRMClientRequest& req) = 0; /** * If the user wishes to copy a file to somewhere, putTURLs() is called * to retrieve the transport URL(s) to copy the file to. It may be used * synchronously or asynchronously, depending on the synchronous property * of the request object. In the former case it will block until * the TURLs are ready, in the latter case it will return after making the * request and putTURLsStatus() must be used to poll the request status if * it was not completed. * @param req The request object * @param urls A list of TURLs filled by the method * @returns DataStatus specifying outcome of operation */ virtual DataStatus putTURLs(SRMClientRequest& req, std::list& urls) = 0; /** * In the case where putTURLs was called asynchronously and the request * was not completed, this method should be called to poll the status of * the request. putTURLs must be called before this method and the request * object must have ongoing request status. * @param req The request object. Status must be ongoing. * @param urls A list of TURLs filled by the method if the request * completed successfully * @returns DataStatus specifying outcome of operation */ virtual DataStatus putTURLsStatus(SRMClientRequest& req, std::list& urls) = 0; /** * Should be called after a successful copy from SRM storage. * @param req The request object * @returns DataStatus specifying outcome of operation */ virtual DataStatus releaseGet(SRMClientRequest& req) = 0; /** * Should be called after a successful copy to SRM storage. * @param req The request object * @returns DataStatus specifying outcome of operation */ virtual DataStatus releasePut(SRMClientRequest& req) = 0; /** * Used in SRM v1 only. Called to release files after successful transfer. * @param req The request object * @param source Whether source or destination is being released * @returns DataStatus specifying outcome of operation */ virtual DataStatus release(SRMClientRequest& req, bool source) = 0; /** * Called in the case of failure during transfer or releasePut. Releases * all TURLs involved in the transfer. * @param req The request object * @param source Whether source or destination is being aborted * @returns DataStatus specifying outcome of operation */ virtual DataStatus abort(SRMClientRequest& req, bool source) = 0; /** * Returns information on a file or files (v2.2 and higher) stored in SRM, * such as file size, checksum and estimated access latency. If a directory * or directories is listed with recursion >= 1 then the list mapped to * each SURL in metadata will contain the content of the directory or * directories. * @param req The request object * @param metadata A map mapping each SURL in the request to a list of * structs filled with file information. If a SURL is missing from the * map it means there was some problem accessing it. * @returns DataStatus specifying outcome of operation * @see SRMFileMetaData */ virtual DataStatus info(SRMClientRequest& req, std::map >& metadata) = 0; /** * Returns information on a file stored in an SRM, such as file size, * checksum and estimated access latency. If a directory is listed * with recursion >= 1 then the list in metadata will contain the * content of the directory. * @param req The request object * @param metadata A list of structs filled with file information. * @returns DataStatus specifying outcome of operation * @see SRMFileMetaData */ virtual DataStatus info(SRMClientRequest& req, std::list& metadata) = 0; /** * Delete a file physically from storage and the SRM namespace. * @param req The request object * @returns DataStatus specifying outcome of operation */ virtual DataStatus remove(SRMClientRequest& req) = 0; /** * Copy a file between two SRM storages. * @param req The request object * @param source The source SURL * @returns DataStatus specifying outcome of operation */ virtual DataStatus copy(SRMClientRequest& req, const std::string& source) = 0; /** * Make required directories for the SURL in the request * @param req The request object * @returns DataStatus specifying outcome of operation */ virtual DataStatus mkDir(SRMClientRequest& req) = 0; /** * Rename the URL in req to newurl. * @oaram req The request object * @param newurl The new URL * @returns DataStatus specifying outcome of operation */ virtual DataStatus rename(SRMClientRequest& req, const URL& newurl) = 0; /** * Check permissions for the SURL in the request using the * current credentials. * @oaram req The request object * @returns DataStatus specifying outcome of operation */ virtual DataStatus checkPermissions(SRMClientRequest& req) = 0; operator bool() const { return client; } bool operator!() const { return !client; } }; } // namespace ArcDMCSRM #endif // __HTTPSD_SRM_CLIENT_H__ nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRM1Client.cpp0000644000000000000000000000012312100254234024445 xustar000000000000000027 mtime=1359042716.344325 26 atime=1513200575.23971 30 ctime=1513200660.934758918 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRM1Client.cpp0000644000175000002070000004317512100254234024525 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #ifdef WIN32 #include #endif #include "SRM1Client.h" #include #include namespace ArcDMCSRM { using namespace Arc; SRM1Client::SRM1Client(const UserConfig& usercfg, const SRMURL& url) : SRMClient(usercfg, url) { version = "v1"; ns["SRMv1Type"] = "http://www.themindelectric.com/package/diskCacheV111.srm/"; ns["SRMv1Meth"] = "http://tempuri.org/diskCacheV111.srm.server.SRMServerV1"; } SRM1Client::~SRM1Client() {} DataStatus SRM1Client::getTURLs(SRMClientRequest& creq, std::list& urls) { SRMURL srmurl(creq.surls().front()); std::list file_ids; PayloadSOAP request(ns); XMLNode method = request.NewChild("SRMv1Meth:get"); // Source file names XMLNode arg0node = method.NewChild("arg0"); arg0node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg0node.NewChild("item") = srmurl.FullURL(); // Protocols XMLNode arg1node = method.NewChild("arg1"); arg1node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[5]"; arg1node.NewChild("item") = "gsiftp"; arg1node.NewChild("item") = "https"; arg1node.NewChild("item") = "httpg"; arg1node.NewChild("item") = "http"; arg1node.NewChild("item") = "ftp"; PayloadSOAP *response = NULL; DataStatus status = process("get", &request, &response); if (!status) return DataStatus(DataStatus::ReadPrepareError, status.GetErrno(), status.GetDesc()); XMLNode result = (*response)["getResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(DataStatus::ReadPrepareError, EARCRESINVAL, "SRM did not return any information"); } std::string request_state = (std::string)result["state"]; creq.request_id(result["requestId"]); time_t t_start = time(NULL); for (;;) { for (XMLNode n = result["fileStatuses"]["item"]; n; ++n) { if (strcasecmp(((std::string)n["state"]).c_str(), "ready") == 0) { if (n["TURL"]) { urls.push_back(n["TURL"]); file_ids.push_back(stringtoi(n["fileId"])); } } } if (!urls.empty()) break; // Have requested data if (request_state.empty()) break; // No data and no state - fishy if (strcasecmp(request_state.c_str(), "pending") != 0) break; if ((time(NULL) - t_start) > creq.request_timeout()) break; int retryDeltaTime = stringtoi(result["retryDeltaTime"]); if (retryDeltaTime < 1) retryDeltaTime = 1; if (retryDeltaTime > 10) retryDeltaTime = 10; sleep(retryDeltaTime); PayloadSOAP request(ns); request.NewChild("SRMv1Meth:getRequestStatus").NewChild("arg0") = tostring(creq.request_id()); delete response; response = NULL; status = process("getRequestStatus", &request, &response); if (!status) return DataStatus(DataStatus::ReadPrepareError, status.GetErrno(), status.GetDesc()); result = (*response)["getRequestStatusResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(DataStatus::ReadPrepareError, EARCRESINVAL, "SRM did not return any information"); } request_state = (std::string)result["state"]; } creq.file_ids(file_ids); delete response; if (urls.empty()) return DataStatus(DataStatus::ReadPrepareError, EARCRESINVAL, "SRM did not return any TURLs"); return acquire(creq, urls, true); } DataStatus SRM1Client::putTURLs(SRMClientRequest& creq, std::list& urls) { SRMURL srmurl(creq.surls().front()); std::list file_ids; PayloadSOAP request(ns); XMLNode method = request.NewChild("SRMv1Meth:put"); // Source file names XMLNode arg0node = method.NewChild("arg0"); arg0node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg0node.NewChild("item") = srmurl.FullURL(); // Destination file names XMLNode arg1node = method.NewChild("arg1"); arg1node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg1node.NewChild("item") = srmurl.FullURL(); // Sizes XMLNode arg2node = method.NewChild("arg2"); arg2node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg2node.NewChild("item") = tostring(creq.total_size()); // Want Permanent XMLNode arg3node = method.NewChild("arg3"); arg3node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg3node.NewChild("item") = "true"; // Protocols XMLNode arg4node = method.NewChild("arg4"); arg4node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[5]"; arg4node.NewChild("item") = "gsiftp"; arg4node.NewChild("item") = "https"; arg4node.NewChild("item") = "httpg"; arg4node.NewChild("item") = "http"; arg4node.NewChild("item") = "ftp"; PayloadSOAP *response = NULL; DataStatus status = process("put", &request, &response); if (!status) return DataStatus(DataStatus::WritePrepareError, status.GetErrno(), status.GetDesc()); XMLNode result = (*response)["putResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(DataStatus::WritePrepareError, EARCRESINVAL, "SRM did not return any information"); } std::string request_state = (std::string)result["state"]; creq.request_id(result["requestId"]); time_t t_start = time(NULL); for (;;) { for (XMLNode n = result["fileStatuses"]["item"]; n; ++n) { if (strcasecmp(((std::string)n["state"]).c_str(), "ready") == 0) { if (n["TURL"]) { urls.push_back(n["TURL"]); file_ids.push_back(stringtoi(n["fileId"])); } } } if (!urls.empty()) break; // Have requested data if (request_state.empty()) break; // No data and no state - fishy if (strcasecmp(request_state.c_str(), "pending") != 0) break; if ((time(NULL) - t_start) > creq.request_timeout()) break; int retryDeltaTime = stringtoi(result["retryDeltaTime"]); if (retryDeltaTime < 1) retryDeltaTime = 1; if (retryDeltaTime > 10) retryDeltaTime = 10; sleep(retryDeltaTime); PayloadSOAP request(ns); request.NewChild("SRMv1Meth:getRequestStatus").NewChild("arg0") = tostring(creq.request_id()); delete response; response = NULL; status = process("getRequestStatus", &request, &response); if (!status) return DataStatus(DataStatus::WritePrepareError, status.GetErrno(), status.GetDesc()); result = (*response)["getRequestStatusResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(DataStatus::WritePrepareError, EARCRESINVAL, "SRM did not return any information"); } request_state = (std::string)result["state"]; } creq.file_ids(file_ids); delete response; if (urls.empty()) return DataStatus(DataStatus::ReadPrepareError, EARCRESINVAL, "SRM did not return any TURLs"); return acquire(creq, urls, false); } DataStatus SRM1Client::copy(SRMClientRequest& creq, const std::string& source) { SRMURL srmurl(creq.surls().front()); std::list file_ids; PayloadSOAP request(ns); XMLNode method = request.NewChild("SRMv1Meth:copy"); // Source file names XMLNode arg0node = method.NewChild("arg0"); arg0node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg0node.NewChild("item") = source; // Destination file names XMLNode arg1node = method.NewChild("arg1"); arg1node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg1node.NewChild("item") = srmurl.FullURL(); // Whatever XMLNode arg2node = method.NewChild("arg2"); arg2node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg2node.NewChild("item") = "false"; PayloadSOAP *response = NULL; DataStatus status = process("copy", &request, &response); if (status != 0) return status; XMLNode result = (*response)["copyResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(DataStatus::TransferError, EARCRESINVAL, "SRM did not return any information"); } std::string request_state = (std::string)result["state"]; creq.request_id(result["requestId"]); time_t t_start = time(NULL); for (;;) { for (XMLNode n = result["fileStatuses"]["item"]; n; ++n) { if (strcasecmp(((std::string)n["state"]).c_str(), "ready") == 0) { file_ids.push_back(stringtoi(n["fileId"])); } } if (!file_ids.empty()) break; // Have requested data if (request_state.empty()) break; // No data and no state - fishy if ((strcasecmp(request_state.c_str(), "pending") != 0) && (strcasecmp(request_state.c_str(), "active") != 0)) break; if ((time(NULL) - t_start) > creq.request_timeout()) break; int retryDeltaTime = stringtoi(result["retryDeltaTime"]); if (retryDeltaTime < 1) retryDeltaTime = 1; if (retryDeltaTime > 10) retryDeltaTime = 10; sleep(retryDeltaTime); PayloadSOAP request(ns); request.NewChild("SRMv1Meth:getRequestStatus").NewChild("arg0") = tostring(creq.request_id()); delete response; response = NULL; status = process("getRequestStatus", &request, &response); if (status != 0) return status; result = (*response)["getRequestStatusResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(DataStatus::TransferError, EARCRESINVAL, "SRM did not return any information"); } request_state = (std::string)result["state"]; } delete response; if (file_ids.empty()) return DataStatus(DataStatus::TransferError, EARCRESINVAL, "SRM did not return any file IDs"); creq.file_ids(file_ids); return release(creq, true); } DataStatus SRM1Client::acquire(SRMClientRequest& creq, std::list& urls, bool source) { std::list file_ids = creq.file_ids(); // Tell server to move files into "Running" state std::list::iterator file_id = file_ids.begin(); std::list::iterator file_url = urls.begin(); while (file_id != file_ids.end()) { PayloadSOAP request(ns); XMLNode method = request.NewChild("SRMv1Meth:setFileStatus"); // Request ID XMLNode arg0node = method.NewChild("arg0"); arg0node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg0node.NewChild("item") = tostring(creq.request_id()); // File ID XMLNode arg1node = method.NewChild("arg1"); arg1node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg1node.NewChild("item") = tostring(*file_id); // Running XMLNode arg2node = method.NewChild("arg2"); arg2node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg2node.NewChild("item") = "Running"; PayloadSOAP *response = NULL; DataStatus status = process("setFileStatus", &request, &response); if (status != 0) return status; XMLNode result = (*response)["setFileStatusResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(source ? DataStatus::ReadPrepareError : DataStatus::WritePrepareError, EARCRESINVAL, "SRM did not return any information"); } for (XMLNode n = result["fileStatuses"]["item"]; n; ++n) { if (stringtoi(n["fileId"]) != *file_id) continue; if (strcasecmp(((std::string)n["state"]).c_str(), "running") == 0) { ++file_id; ++file_url; } else { logger.msg(VERBOSE, "File could not be moved to Running state: %s", *file_url); file_id = file_ids.erase(file_id); file_url = urls.erase(file_url); } } delete response; } creq.file_ids(file_ids); if (urls.empty()) return DataStatus(source ? DataStatus::ReadPrepareError : DataStatus::WritePrepareError, EARCRESINVAL, "SRM did not return any information"); return DataStatus::Success; } DataStatus SRM1Client::remove(SRMClientRequest& creq) { SRMURL srmurl(creq.surls().front()); PayloadSOAP request(ns); XMLNode method = request.NewChild("SRMv1Meth:advisoryDelete"); // File names XMLNode arg0node = method.NewChild("arg0"); arg0node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg0node.NewChild("item") = srmurl.FullURL(); PayloadSOAP *response = NULL; DataStatus status = process("advisoryDelete", &request, &response); delete response; return status; } DataStatus SRM1Client::info(SRMClientRequest& creq, std::map >& metadata) { SRMURL srmurl(creq.surls().front()); PayloadSOAP request(ns); XMLNode method = request.NewChild("SRMv1Meth:getFileMetaData"); // File names XMLNode arg0node = method.NewChild("arg0"); arg0node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg0node.NewChild("item") = srmurl.FullURL(); PayloadSOAP *response = NULL; DataStatus status = process("getFileMetaData", &request, &response); if (status != 0) return status; XMLNode result = (*response)["getFileMetaDataResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(DataStatus::StatError, EARCRESINVAL, "SRM did not return any information"); } XMLNode mdata = result["item"]; if (!mdata) { logger.msg(VERBOSE, "SRM did not return any useful information"); delete response; return DataStatus(DataStatus::StatError, EARCRESINVAL, "SRM did not return any useful information"); } struct SRMFileMetaData md; md.path = srmurl.FileName(); // tidy up path std::string::size_type i = md.path.find("//"); while (i != std::string::npos) { md.path.erase(i, 1); i = md.path.find("//", i); } if (md.path.find("/") != 0) md.path = "/" + md.path; // date, type and locality not supported in v1 md.createdAtTime = (time_t)0; md.fileType = SRM_FILE_TYPE_UNKNOWN; md.fileLocality = SRM_UNKNOWN; md.size = stringtoull(mdata["size"]); if (mdata["checksumType"]) md.checkSumType = (std::string)mdata["checksumType"]; if (mdata["checksumValue"]) md.checkSumValue = (std::string)mdata["checksumValue"]; std::list mdlist; mdlist.push_back(md); metadata[creq.surls().front()] = mdlist; delete response; return DataStatus::Success; } DataStatus SRM1Client::info(SRMClientRequest& req, std::list& metadata) { std::map > metadata_map; DataStatus res = info(req, metadata_map); if (!res || metadata_map.find(req.surls().front()) == metadata_map.end()) return res; metadata = metadata_map[req.surls().front()]; return DataStatus::Success; } DataStatus SRM1Client::release(SRMClientRequest& creq, bool source) { std::list file_ids = creq.file_ids(); // Tell server to move files into "Done" state std::list::iterator file_id = file_ids.begin(); while (file_id != file_ids.end()) { PayloadSOAP request(ns); XMLNode method = request.NewChild("SRMv1Meth:setFileStatus"); // Request ID XMLNode arg0node = method.NewChild("arg0"); arg0node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg0node.NewChild("item") = tostring(creq.request_id()); // File ID XMLNode arg1node = method.NewChild("arg1"); arg1node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg1node.NewChild("item") = tostring(*file_id); // Done XMLNode arg2node = method.NewChild("arg2"); arg2node.NewAttribute("SOAP-ENC:arrayType") = "xsd:string[1]"; arg2node.NewChild("item") = "Done"; PayloadSOAP *response = NULL; DataStatus status = process("setFileStatus", &request, &response); if (status != 0) return status; XMLNode result = (*response)["setFileStatusResponse"]["Result"]; if (!result) { logger.msg(VERBOSE, "SRM did not return any information"); delete response; return DataStatus(source ? DataStatus::ReadFinishError : DataStatus::WriteFinishError, EARCRESINVAL, "SRM did not return any information"); } for (XMLNode n = result["fileStatuses"]["item"]; n; ++n) { if (stringtoi(n["fileId"]) != *file_id) continue; if (strcasecmp(((std::string)n["state"]).c_str(), "done") == 0) { ++file_id; } else { logger.msg(VERBOSE, "File could not be moved to Done state"); file_id = file_ids.erase(file_id); } } delete response; } creq.file_ids(file_ids); return DataStatus::Success; } DataStatus SRM1Client::releaseGet(SRMClientRequest& creq) { return release(creq, true); } DataStatus SRM1Client::releasePut(SRMClientRequest& creq) { return release(creq, false); } DataStatus SRM1Client::abort(SRMClientRequest& creq, bool source) { return release(creq, source); } } // namespace ArcDMCSRM nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRM22Client.h0000644000000000000000000000012312100254234024175 xustar000000000000000027 mtime=1359042716.344325 26 atime=1513200575.23171 30 ctime=1513200660.936758943 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRM22Client.h0000644000175000002070000001456512100254234024256 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __HTTPSD_SRM_CLIENT_2_2_H__ #define __HTTPSD_SRM_CLIENT_2_2_H__ #include "SRMClient.h" namespace ArcDMCSRM { using namespace Arc; class SRM22Client : public SRMClient { private: /// Return codes for requests and files as defined in spec enum SRMStatusCode { SRM_SUCCESS, SRM_FAILURE, SRM_AUTHENTICATION_FAILURE, SRM_AUTHORIZATION_FAILURE, SRM_INVALID_REQUEST, SRM_INVALID_PATH, SRM_FILE_LIFETIME_EXPIRED, SRM_SPACE_LIFETIME_EXPIRED, SRM_EXCEED_ALLOCATION, SRM_NO_USER_SPACE, SRM_NO_FREE_SPACE, SRM_DUPLICATION_ERROR, SRM_NON_EMPTY_DIRECTORY, SRM_TOO_MANY_RESULTS, SRM_INTERNAL_ERROR, SRM_FATAL_INTERNAL_ERROR, SRM_NOT_SUPPORTED, SRM_REQUEST_QUEUED, SRM_REQUEST_INPROGRESS, SRM_REQUEST_SUSPENDED, SRM_ABORTED, SRM_RELEASED, SRM_FILE_PINNED, SRM_FILE_IN_CACHE, SRM_SPACE_AVAILABLE, SRM_LOWER_SPACE_GRANTED, SRM_DONE, SRM_PARTIAL_SUCCESS, SRM_REQUEST_TIMED_OUT, SRM_LAST_COPY, SRM_FILE_BUSY, SRM_FILE_LOST, SRM_FILE_UNAVAILABLE, SRM_CUSTOM_STATUS }; /// Extract status code and explanation from xml result structure SRMStatusCode GetStatus(XMLNode res, std::string& explanation); /** * Remove a file by srmRm */ DataStatus removeFile(SRMClientRequest& req); /** * Remove a directory by srmRmDir */ DataStatus removeDir(SRMClientRequest& req); /** * Return a metadata struct with values filled from the given details * @param directory Whether these are entries in a directory. Determines * whether the full path is specified (if false) or not (if true) */ SRMFileMetaData fillDetails(XMLNode details, bool directory); /** * Fill out status of files in the request object from the file_statuses */ void fileStatus(SRMClientRequest& req, XMLNode file_statuses); /** * Convert SRM error code to errno. If file-level status is defined that * will be used over request-level status. */ int srm2errno(SRMStatusCode reqstat, SRMStatusCode filestat = SRM_SUCCESS); public: /** * Constructor */ SRM22Client(const UserConfig& usercfg, const SRMURL& url); /** * Destructor */ ~SRM22Client(); /** * Get the server version from srmPing */ DataStatus ping(std::string& version); /** * Use srmGetSpaceTokens to return a list of spaces available */ DataStatus getSpaceTokens(std::list& tokens, const std::string& description = ""); /** * Use srmGetRequestTokens to return a list of spaces available */ DataStatus getRequestTokens(std::list& tokens, const std::string& description = ""); /** * Get a list of TURLs for the given SURL. Uses srmPrepareToGet and waits * until file is ready (online and pinned) if the request is synchronous. * If not it returns after making the request. Although a list is returned, * SRMv2.2 only returns one TURL per SURL. */ DataStatus getTURLs(SRMClientRequest& req, std::list& urls); /** * Uses srmStatusOfGetRequest to query the status of the given request. */ DataStatus getTURLsStatus(SRMClientRequest& req, std::list& urls); /** * Retrieve TURLs which a file can be written to. Uses srmPrepareToPut and * waits until a suitable TURL has been assigned if the request is * synchronous. If not it returns after making the request. Although a * list is returned, SRMv2.2 only returns one TURL per SURL. */ DataStatus putTURLs(SRMClientRequest& req, std::list& urls); /** * Uses srmStatusOfPutRequest to query the status of the given request. */ DataStatus putTURLsStatus(SRMClientRequest& req, std::list& urls); /** * Call srmBringOnline with the SURLs specified in req. */ DataStatus requestBringOnline(SRMClientRequest& req); /** * Call srmStatusOfBringOnlineRequest and update req with any changes. */ DataStatus requestBringOnlineStatus(SRMClientRequest& req); /** * Use srmLs to get info on the given SURLs. Info on each file or content * of directory is put in a list of metadata structs. */ DataStatus info(SRMClientRequest& req, std::map >& metadata); /** * Use srmLs to get info on the given SURL. Info on each file or content * of directory is put in a list of metadata structs */ DataStatus info(SRMClientRequest& req, std::list& metadata); /** * Release files that have been pinned by srmPrepareToGet using * srmReleaseFiles. Called after successful file transfer or * failed prepareToGet. */ DataStatus releaseGet(SRMClientRequest& req); /** * Mark a put request as finished. * Called after successful file transfer or failed prepareToPut. */ DataStatus releasePut(SRMClientRequest& req); /** * Not used in this version of SRM */ DataStatus release(SRMClientRequest& /* req */, bool /* source */) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } /** * Abort request. * Called after any failure in the data transfer or putDone calls */ DataStatus abort(SRMClientRequest& req, bool source); /** * Delete by srmRm or srmRmDir */ DataStatus remove(SRMClientRequest& req); /** * Implemented in pull mode, ie the endpoint defined in the * request object performs the copy. */ DataStatus copy(SRMClientRequest& req, const std::string& source); /** * Call srmMkDir */ DataStatus mkDir(SRMClientRequest& req); /** * Call srmMv */ DataStatus rename(SRMClientRequest& req, const URL& newurl); /** * Call srmCheckPermission */ DataStatus checkPermissions(SRMClientRequest& req); }; } // namespace ArcDMCSRM #endif // __HTTPSD_SRM_CLIENT_2_2_H__ nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRMInfo.h0000644000000000000000000000012312074031150023506 xustar000000000000000027 mtime=1357918824.946932 26 atime=1513200575.24171 30 ctime=1513200660.939758979 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRMInfo.h0000644000175000002070000000172112074031150023555 0ustar00mockbuildmock00000000000000#ifndef SRM_INFO_H_ #define SRM_INFO_H_ #include #include "SRMURL.h" namespace ArcDMCSRM { /** * Info about a particular entry in the SRM info file */ class SRMFileInfo { public: std::string host; int port; enum SRMURL::SRM_URL_VERSION version; SRMFileInfo(const std::string& host, int port, const std::string& version); SRMFileInfo(); bool operator==(SRMURL srm_url); std::string versionString() const; }; /** * Represents SRM info stored in file. A combination of host and SRM * version make a unique entry. */ class SRMInfo { public: SRMInfo(std::string dir); bool getSRMFileInfo(SRMFileInfo& srm_file_info); void putSRMFileInfo(const SRMFileInfo& srm_file_info); private: static Arc::SimpleCondition lock; // cached info in memory, to avoid constantly reading file static std::list srm_info; static Arc::Logger logger; std::string srm_info_filename; }; } //namespace ArcDMCSRM #endif /*SRM_INFO_H_*/ nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRMURL.cpp0000644000000000000000000000012312074031150023610 xustar000000000000000027 mtime=1357918824.946932 26 atime=1513200575.22971 30 ctime=1513200660.936758943 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRMURL.cpp0000644000175000002070000000422612074031150023662 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "SRMURL.h" namespace ArcDMCSRM { SRMURL::SRMURL(std::string url) try: URL(url) { portdefined = false; if(protocol != "srm") { valid=false; return; }; valid=true; if(port <= 0) port=8443; else portdefined = true; srm_version = SRM_URL_VERSION_2_2; // v2.2 unless explicitly made otherwise if(HTTPOption("SFN", "") != "") { filename=HTTPOption("SFN"); isshort=false; path = '/'+path; for(;path.size() > 1;) { if(path[1] != '/') break; path.erase(0,1); }; if (path[path.size()-1] == '1') srm_version = SRM_URL_VERSION_1; } else { if(path.length() > 0) filename=path.c_str()+1; // Skip leading '/' path = "/srm/managerv2"; isshort=true; } } catch (std::exception& e) { valid=false; } void SRMURL::SetSRMVersion(const std::string& version) { if (version.empty()) return; if (version == "1") { srm_version = SRM_URL_VERSION_1; path = "/srm/managerv1"; } else if (version == "2.2") { srm_version = SRM_URL_VERSION_2_2; path = "/srm/managerv2"; } else { srm_version = SRM_URL_VERSION_UNKNOWN; } } std::string SRMURL::ContactURL(void) const { if(!valid) return ""; std::string contact_protocol("httpg"); if((Option("protocol") == "tls") || (Option("protocol") == "ssl")) { contact_protocol = "https"; } if(!Option("protocol").empty()) { return (contact_protocol+"://"+host+":"+Arc::tostring(port)+";protocol="+Option("protocol")+path); } return (contact_protocol+"://"+host+":"+Arc::tostring(port)+path); } std::string SRMURL::BaseURL(void) const { if(!valid) return ""; return (protocol+"://"+host+":"+Arc::tostring(port)+path+"?SFN="); } std::string SRMURL::FullURL(void) const { if(!valid) return ""; return (protocol+"://"+host+":"+Arc::tostring(port)+path+"?SFN="+filename); } std::string SRMURL::ShortURL(void) const { return (protocol+"://"+host+":"+Arc::tostring(port)+"/"+filename); } } // namespace ArcDMCSRM nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/PaxHeaders.7502/SRM22Client.cpp0000644000000000000000000000012212357244634024550 xustar000000000000000027 mtime=1404914076.312585 26 atime=1513200575.22771 29 ctime=1513200660.93575893 nordugrid-arc-5.4.2/src/hed/dmc/srm/srmclient/SRM22Client.cpp0000644000175000002070000017406512357244634024634 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #ifdef WIN32 #include #endif #include "SRM22Client.h" #include #include #include namespace ArcDMCSRM { using namespace Arc; /** * The max number of files returned when listing dirs * current limits are 1000 for dcache, 1024 for castor * info() will be called multiple times for directories * with more entries than max_files_list */ const static unsigned int max_files_list = 999; SRM22Client::SRMStatusCode SRM22Client::GetStatus(XMLNode res, std::string& explanation) { std::string statuscode = (std::string)res["statusCode"]; if (res["explanation"]) explanation = (std::string)res["explanation"]; if (statuscode == "SRM_SUCCESS") return SRM_SUCCESS; if (statuscode == "SRM_FAILURE") return SRM_FAILURE; if (statuscode == "SRM_AUTHENTICATION_FAILURE") return SRM_AUTHENTICATION_FAILURE; if (statuscode == "SRM_AUTHORIZATION_FAILURE") return SRM_AUTHORIZATION_FAILURE; if (statuscode == "SRM_INVALID_REQUEST") return SRM_INVALID_REQUEST; if (statuscode == "SRM_INVALID_PATH") return SRM_INVALID_PATH; if (statuscode == "SRM_FILE_LIFETIME_EXPIRED") return SRM_FILE_LIFETIME_EXPIRED; if (statuscode == "SRM_SPACE_LIFETIME_EXPIRED") return SRM_SPACE_LIFETIME_EXPIRED; if (statuscode == "SRM_EXCEED_ALLOCATION") return SRM_EXCEED_ALLOCATION; if (statuscode == "SRM_NO_USER_SPACE") return SRM_NO_USER_SPACE; if (statuscode == "SRM_NO_FREE_SPACE") return SRM_NO_FREE_SPACE; if (statuscode == "SRM_DUPLICATION_ERROR") return SRM_DUPLICATION_ERROR; if (statuscode == "SRM_NON_EMPTY_DIRECTORY") return SRM_NON_EMPTY_DIRECTORY; if (statuscode == "SRM_TOO_MANY_RESULTS") return SRM_TOO_MANY_RESULTS; if (statuscode == "SRM_INTERNAL_ERROR") return SRM_INTERNAL_ERROR; if (statuscode == "SRM_FATAL_INTERNAL_ERROR") return SRM_FATAL_INTERNAL_ERROR; if (statuscode == "SRM_NOT_SUPPORTED") return SRM_NOT_SUPPORTED; if (statuscode == "SRM_REQUEST_QUEUED") return SRM_REQUEST_QUEUED; if (statuscode == "SRM_REQUEST_INPROGRESS") return SRM_REQUEST_INPROGRESS; if (statuscode == "SRM_REQUEST_SUSPENDED") return SRM_REQUEST_SUSPENDED; if (statuscode == "SRM_ABORTED") return SRM_ABORTED; if (statuscode == "SRM_RELEASED") return SRM_RELEASED; if (statuscode == "SRM_FILE_PINNED") return SRM_FILE_PINNED; if (statuscode == "SRM_FILE_IN_CACHE") return SRM_FILE_IN_CACHE; if (statuscode == "SRM_SPACE_AVAILABLE") return SRM_SPACE_AVAILABLE; if (statuscode == "SRM_LOWER_SPACE_GRANTED") return SRM_LOWER_SPACE_GRANTED; if (statuscode == "SRM_DONE") return SRM_DONE; if (statuscode == "SRM_PARTIAL_SUCCESS") return SRM_PARTIAL_SUCCESS; if (statuscode == "SRM_REQUEST_TIMED_OUT") return SRM_REQUEST_TIMED_OUT; if (statuscode == "SRM_LAST_COPY") return SRM_LAST_COPY; if (statuscode == "SRM_FILE_BUSY") return SRM_FILE_BUSY; if (statuscode == "SRM_FILE_LOST") return SRM_FILE_LOST; if (statuscode == "SRM_FILE_UNAVAILABLE") return SRM_FILE_UNAVAILABLE; if (statuscode == "SRM_CUSTOM_STATUS") return SRM_CUSTOM_STATUS; // fallback - should not happen return SRM_FAILURE; } SRM22Client::SRM22Client(const UserConfig& usercfg, const SRMURL& url) : SRMClient(usercfg, url) { version = "v2.2"; ns["SRMv2"] = "http://srm.lbl.gov/StorageResourceManager"; } SRM22Client::~SRM22Client() {} DataStatus SRM22Client::ping(std::string& version) { PayloadSOAP request(ns); request.NewChild("SRMv2:srmPing").NewChild("srmPingRequest"); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmPingResponse"]["srmPingResponse"]; if (!res) { logger.msg(VERBOSE, "Could not determine version of server"); delete response; return DataStatus(DataStatus::GenericError, EARCRESINVAL, "Could not determine version of server"); } version = (std::string)res["versionInfo"]; logger.msg(VERBOSE, "Server SRM version: %s", version); for (XMLNode n = res["otherInfo"]["extraInfoArray"]; n; ++n) { if ((std::string)n["key"] == "backend_type") { std::string value = (std::string)n["value"]; logger.msg(VERBOSE, "Server implementation: %s", value); if (value == "dCache") implementation = SRM_IMPLEMENTATION_DCACHE; else if (value == "CASTOR") implementation = SRM_IMPLEMENTATION_CASTOR; else if (value == "DPM") implementation = SRM_IMPLEMENTATION_DPM; else if (value == "StoRM") implementation = SRM_IMPLEMENTATION_STORM; } } delete response; return DataStatus::Success; } DataStatus SRM22Client::getSpaceTokens(std::list& tokens, const std::string& description) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmGetSpaceTokens") .NewChild("srmGetSpaceTokensRequest"); if (!description.empty()) req.NewChild("userSpaceTokenDescription") = description; PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmGetSpaceTokensResponse"] ["srmGetSpaceTokensResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, "%s", explanation); delete response; return DataStatus(DataStatus::WritePrepareError, srm2errno(statuscode), explanation); } for (XMLNode n = res["arrayOfSpaceTokens"]["stringArray"]; n; ++n) { std::string token = (std::string)n; logger.msg(VERBOSE, "Adding space token %s", token); tokens.push_back(token); } delete response; return DataStatus::Success; } DataStatus SRM22Client::getRequestTokens(std::list& tokens, const std::string& description) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmGetRequestTokens") .NewChild("srmGetRequestTokensRequest"); if (!description.empty()) req.NewChild("userRequestDescription") = description; PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmGetRequestTokensResponse"] ["srmGetRequestTokensResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode == SRM_INVALID_REQUEST) { logger.msg(VERBOSE, "No request tokens found"); delete response; return DataStatus::Success; } if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, "%s", explanation); delete response; return DataStatus(DataStatus::GenericError, srm2errno(statuscode), explanation); } for (XMLNode n = res["arrayOfRequestTokens"]["tokenArray"]; n; ++n) { std::string token = (std::string)n["requestToken"]; logger.msg(VERBOSE, "Adding request token %s", token); tokens.push_back(token); } delete response; return DataStatus::Success; } DataStatus SRM22Client::getTURLs(SRMClientRequest& creq, std::list& urls) { // only one file requested at a time PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmPrepareToGet") .NewChild("srmPrepareToGetRequest"); req.NewChild("arrayOfFileRequests").NewChild("requestArray") .NewChild("sourceSURL") = creq.surl(); XMLNode protocols = req.NewChild("transferParameters") .NewChild("arrayOfTransferProtocols"); std::list transport_protocols(creq.transport_protocols()); for (std::list::iterator prot = transport_protocols.begin(); prot != transport_protocols.end(); ++prot) { protocols.NewChild("stringArray") = *prot; } PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) { creq.finished_error(); return status; } XMLNode res = (*response)["srmPrepareToGetResponse"] ["srmPrepareToGetResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); // store the request token in the request object if (res["requestToken"]) creq.request_token(res["requestToken"]); if (statuscode == SRM_REQUEST_QUEUED || statuscode == SRM_REQUEST_INPROGRESS) { // file is queued - if asynchronous need to wait and query with returned request token unsigned int sleeptime = 1; if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"] ["estimatedWaitTime"]); } if (creq.request_timeout() == 0) { creq.wait(sleeptime); delete response; return DataStatus::Success; } unsigned int request_time = 0; while (request_time < creq.request_timeout()) { // sleep for recommended time (within limits) sleeptime = (sleeptime < 1) ? 1 : sleeptime; sleeptime = (sleeptime > creq.request_timeout() - request_time) ? creq.request_timeout() - request_time : sleeptime; logger.msg(VERBOSE, "%s: File request %s in SRM queue. Sleeping for %i seconds", creq.surl(), creq.request_token(), sleeptime); sleep(sleeptime); request_time += sleeptime; // get status of request DataStatus status_res = getTURLsStatus(creq, urls); if (creq.status() != SRM_REQUEST_ONGOING) { delete response; return status_res; } sleeptime = creq.waiting_time(); } // if we get here it means a timeout occurred std::string err_msg("PrepareToGet request timed out after " + tostring(creq.request_timeout()) + " seconds"); logger.msg(VERBOSE, err_msg); creq.finished_abort(); delete response; return DataStatus(DataStatus::ReadPrepareError, EARCREQUESTTIMEOUT, err_msg); } // if file queued else if (statuscode != SRM_SUCCESS) { // any other return code is a failure std::string file_explanation; SRMStatusCode file_statuscode = GetStatus(res["arrayOfFileStatuses"]["statusArray"]["status"], file_explanation); if (explanation.empty()) explanation = file_explanation; else if (!file_explanation.empty()) explanation += ": " + file_explanation; logger.msg(VERBOSE, explanation); creq.finished_error(); delete response; return DataStatus(DataStatus::ReadPrepareError, srm2errno(statuscode, file_statuscode), explanation); } // the file is ready and pinned - we can get the TURL std::string turl = (std::string)res["arrayOfFileStatuses"]["statusArray"]["transferURL"]; logger.msg(VERBOSE, "File is ready! TURL is %s", turl); urls.push_back(turl); creq.finished_success(); delete response; return DataStatus::Success; } DataStatus SRM22Client::getTURLsStatus(SRMClientRequest& creq, std::list& urls) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmStatusOfGetRequest") .NewChild("srmStatusOfGetRequestRequest"); req.NewChild("requestToken") = creq.request_token(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) { creq.finished_abort(); return status; } XMLNode res = (*response)["srmStatusOfGetRequestResponse"] ["srmStatusOfGetRequestResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode == SRM_REQUEST_QUEUED || statuscode == SRM_REQUEST_INPROGRESS) { // still queued - keep waiting int sleeptime = 1; if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]); } creq.wait(sleeptime); } else if (statuscode != SRM_SUCCESS) { // error std::string file_explanation; SRMStatusCode file_statuscode = GetStatus(res["arrayOfFileStatuses"]["statusArray"]["status"], file_explanation); if (explanation.empty()) explanation = file_explanation; else if (!file_explanation.empty()) explanation += ": " + file_explanation; logger.msg(VERBOSE, explanation); creq.finished_error(); delete response; return DataStatus(DataStatus::ReadPrepareError, srm2errno(statuscode, file_statuscode), explanation); } else { // success, TURL is ready std::string turl = (std::string)res["arrayOfFileStatuses"]["statusArray"]["transferURL"]; logger.msg(VERBOSE, "File is ready! TURL is %s", turl); urls.push_back(std::string(turl)); creq.finished_success(); } delete response; return DataStatus::Success; } DataStatus SRM22Client::requestBringOnline(SRMClientRequest& creq) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmBringOnline") .NewChild("srmBringOnlineRequest"); std::list surls = creq.surls(); for (std::list::iterator it = surls.begin(); it != surls.end(); ++it) { req.NewChild("arrayOfFileRequests").NewChild("requestArray") .NewChild("sourceSURL") = *it; } // should not be needed but dcache returns NullPointerException if // it is not given XMLNode protocols = req.NewChild("transferParameters") .NewChild("arrayOfTransferProtocols"); protocols.NewChild("stringArray") = "gsiftp"; protocols.NewChild("stringArray") = "https"; protocols.NewChild("stringArray") = "httpg"; protocols.NewChild("stringArray") = "http"; protocols.NewChild("stringArray") = "ftp"; // store the user id as part of the request, so they can find it later std::string user = User().Name(); if (!user.empty()) { logger.msg(VERBOSE, "Setting userRequestDescription to %s", user); req.NewChild("userRequestDescription") = user; } PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) { creq.finished_error(); return status; } XMLNode res = (*response)["srmBringOnlineResponse"] ["srmBringOnlineResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); // store the request token in the request object if (res["requestToken"]) creq.request_token(res["requestToken"]); if (statuscode == SRM_SUCCESS) { // this means files are all already online for (std::list::iterator it = surls.begin(); it != surls.end(); ++it) { creq.surl_statuses(*it, SRM_ONLINE); } creq.finished_success(); delete response; return DataStatus::Success; } if (statuscode == SRM_REQUEST_QUEUED || statuscode == SRM_REQUEST_INPROGRESS) { unsigned int sleeptime = 10; if (statuscode == SRM_REQUEST_INPROGRESS && res["arrayOfFileStatuses"]) { // some files have been queued and some are online. Check each file fileStatus(creq, res["arrayOfFileStatuses"]); } if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]); } if (creq.request_timeout() == 0) { creq.wait(sleeptime); delete response; return DataStatus::Success; } unsigned int request_time = 0; while (request_time < creq.request_timeout()) { // sleep for recommended time (within limits) sleeptime = (sleeptime < 1) ? 1 : sleeptime; sleeptime = (sleeptime > creq.request_timeout() - request_time) ? creq.request_timeout() - request_time : sleeptime; logger.msg(VERBOSE, "%s: Bring online request %s in SRM queue. Sleeping for %i seconds", creq.surl(), creq.request_token(), sleeptime); sleep(sleeptime); request_time += sleeptime; // get status of request DataStatus status_res = requestBringOnlineStatus(creq); if (creq.status() != SRM_REQUEST_ONGOING) { delete response; return status_res; } sleeptime = creq.waiting_time(); } // if we get here it means a timeout occurred std::string err_msg("Bring online request timed out after " + tostring(creq.request_timeout()) + " seconds"); logger.msg(VERBOSE, err_msg); creq.finished_abort(); delete response; return DataStatus(DataStatus::ReadPrepareError, EARCREQUESTTIMEOUT, err_msg); } if (statuscode == SRM_PARTIAL_SUCCESS) { // some files are already online, some failed. check each file fileStatus(creq, res["arrayOfFileStatuses"]); creq.finished_partial_success(); delete response; return DataStatus::Success; } // here means an error code was returned and all files failed std::string file_explanation; SRMStatusCode file_statuscode = GetStatus(res["arrayOfFileStatuses"]["statusArray"]["status"], file_explanation); if (explanation.empty()) explanation = file_explanation; else if (!file_explanation.empty()) explanation += ": " + file_explanation; logger.msg(VERBOSE, explanation); creq.finished_error(); delete response; return DataStatus(DataStatus::ReadPrepareError, srm2errno(statuscode, file_statuscode), explanation); } DataStatus SRM22Client::requestBringOnlineStatus(SRMClientRequest& creq) { if (creq.request_token().empty()) { logger.msg(VERBOSE, "No request token specified!"); creq.finished_abort(); return DataStatus(DataStatus::ReadPrepareError, EINVAL, "No request token specified"); } PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmStatusOfBringOnlineRequest") .NewChild("srmStatusOfBringOnlineRequestRequest"); req.NewChild("requestToken") = creq.request_token(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) { creq.finished_abort(); return status; } XMLNode res = (*response)["srmStatusOfBringOnlineRequestResponse"] ["srmStatusOfBringOnlineRequestResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode == SRM_SUCCESS) { // this means files are all online fileStatus(creq, res["arrayOfFileStatuses"]); creq.finished_success(); delete response; return DataStatus::Success; } if (statuscode == SRM_REQUEST_QUEUED) { // all files are in the queue - leave statuses as they are int sleeptime = 1; if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]); } creq.wait(sleeptime); delete response; return DataStatus::Success; } if (statuscode == SRM_REQUEST_INPROGRESS) { // some files have been queued and some are online. check each file fileStatus(creq, res["arrayOfFileStatuses"]); int sleeptime = 1; if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]); } creq.wait(sleeptime); delete response; return DataStatus::Success; } if (statuscode == SRM_PARTIAL_SUCCESS) { // some files are online, some failed. check each file fileStatus(creq, res["arrayOfFileStatuses"]); creq.finished_partial_success(); delete response; return DataStatus::Success; } if (statuscode == SRM_ABORTED) { // The request was aborted or finished successfully. dCache reports // SRM_ABORTED after the first time a successful request is queried // so we have to look at the explanation string for the real reason. if (explanation.find("All files are done") != std::string::npos) { logger.msg(VERBOSE, "Request is reported as ABORTED, but all files are done"); creq.finished_success(); delete response; return DataStatus::Success; } else if (explanation.find("Canceled") != std::string::npos) { logger.msg(VERBOSE, "Request is reported as ABORTED, since it was cancelled"); creq.cancelled(); delete response; return DataStatus::Success; } else { logger.msg(VERBOSE, "Request is reported as ABORTED. Reason: %s", explanation); creq.finished_error(); delete response; return DataStatus(DataStatus::ReadPrepareError, srm2errno(statuscode), explanation); } } // here means an error code was returned and all files failed fileStatus(creq, res["arrayOfFileStatuses"]); std::string file_explanation; SRMStatusCode file_statuscode = GetStatus(res["arrayOfFileStatuses"]["statusArray"]["status"], file_explanation); if (explanation.empty()) explanation = file_explanation; else if (!file_explanation.empty()) explanation += ": " + file_explanation; logger.msg(VERBOSE, explanation); creq.finished_error(); delete response; return DataStatus(DataStatus::ReadPrepareError, srm2errno(statuscode, file_statuscode), explanation); } void SRM22Client::fileStatus(SRMClientRequest& creq, XMLNode file_statuses) { int waittime = 0; for (XMLNode n = file_statuses["statusArray"]; n; ++n) { std::string surl = (std::string)n["sourceSURL"]; // store the largest estimated waiting time if (n["estimatedWaitTime"]) { int estimatedWaitTime = stringtoi(n["estimatedWaitTime"]); if (estimatedWaitTime > waittime) waittime = estimatedWaitTime; } std::string explanation; SRMStatusCode filestatus = GetStatus(n["status"], explanation); if (filestatus == SRM_SUCCESS || filestatus == SRM_FILE_IN_CACHE) { creq.surl_statuses(surl, SRM_ONLINE); } else if (filestatus == SRM_REQUEST_QUEUED || filestatus == SRM_REQUEST_INPROGRESS) { creq.surl_statuses(surl, SRM_NEARLINE); } else { creq.surl_statuses(surl, SRM_STAGE_ERROR); creq.surl_failures(surl, explanation); } } creq.waiting_time(waittime); } DataStatus SRM22Client::putTURLs(SRMClientRequest& creq, std::list& urls) { // only one file requested at a time PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmPrepareToPut") .NewChild("srmPrepareToPutRequest"); XMLNode reqarray = req.NewChild("arrayOfFileRequests") .NewChild("requestArray"); reqarray.NewChild("targetSURL") = creq.surl(); reqarray.NewChild("expectedFileSize") = tostring(creq.total_size()); req.NewChild("desiredFileStorageType") = "PERMANENT"; XMLNode protocols = req.NewChild("transferParameters") .NewChild("arrayOfTransferProtocols"); std::list transport_protocols(creq.transport_protocols()); for (std::list::iterator prot = transport_protocols.begin(); prot != transport_protocols.end(); ++prot) { protocols.NewChild("stringArray") = *prot; } // set space token if supplied if (!creq.space_token().empty()) req.NewChild("targetSpaceToken") = creq.space_token(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) { creq.finished_error(); return status; } XMLNode res = (*response)["srmPrepareToPutResponse"] ["srmPrepareToPutResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); // store the request token in the request object if (res["requestToken"]) creq.request_token(res["requestToken"]); if (statuscode == SRM_REQUEST_QUEUED || statuscode == SRM_REQUEST_INPROGRESS) { // file is queued - need to wait and query with returned request token unsigned int sleeptime = 1; if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]); } unsigned int request_time = 0; if (creq.request_timeout() == 0) { creq.wait(sleeptime); delete response; return DataStatus::Success; }; while (request_time < creq.request_timeout()) { // sleep for recommended time (within limits) sleeptime = (sleeptime < 1) ? 1 : sleeptime; sleeptime = (sleeptime > creq.request_timeout() - request_time) ? creq.request_timeout() - request_time : sleeptime; logger.msg(VERBOSE, "%s: File request %s in SRM queue. Sleeping for %i seconds", creq.surl(), creq.request_token(), sleeptime); sleep(sleeptime); request_time += sleeptime; // get status of request DataStatus status_res = putTURLsStatus(creq, urls); if (creq.status() != SRM_REQUEST_ONGOING) { delete response; return status_res; } sleeptime = creq.waiting_time(); } // if we get here it means a timeout occurred std::string err_msg("PrepareToPut request timed out after " + tostring(creq.request_timeout()) + " seconds"); logger.msg(VERBOSE, err_msg); creq.finished_abort(); delete response; return DataStatus(DataStatus::WritePrepareError, EARCREQUESTTIMEOUT, err_msg); } // if file queued else if (statuscode != SRM_SUCCESS) { std::string file_explanation; SRMStatusCode file_status = GetStatus(res["arrayOfFileStatuses"] ["statusArray"]["status"], file_explanation); if (file_status == SRM_INVALID_PATH) { // make directories logger.msg(VERBOSE, "Path %s is invalid, creating required directories", creq.surl()); DataStatus mkdirres = mkDir(creq); delete response; if (mkdirres.Passed()) return putTURLs(creq, urls); logger.msg(VERBOSE, "Error creating required directories for %s", creq.surl()); creq.finished_error(); return mkdirres; } if (explanation.empty()) explanation = file_explanation; else if (!file_explanation.empty()) explanation += ": " + file_explanation; logger.msg(VERBOSE, explanation); if (file_status == SRM_FILE_BUSY) { // TODO: a previous upload failed to be aborted so kill it } creq.finished_error(); delete response; return DataStatus(DataStatus::WritePrepareError, srm2errno(statuscode, file_status), explanation); } // the file is ready and pinned - we can get the TURL std::string turl = (std::string)res["arrayOfFileStatuses"]["statusArray"]["transferURL"]; logger.msg(VERBOSE, "File is ready! TURL is %s", turl); urls.push_back(turl); creq.finished_success(); delete response; return DataStatus::Success; } DataStatus SRM22Client::putTURLsStatus(SRMClientRequest& creq, std::list& urls) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmStatusOfPutRequest") .NewChild("srmStatusOfPutRequestRequest"); req.NewChild("requestToken") = creq.request_token(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) { creq.finished_abort(); return status; } XMLNode res = (*response)["srmStatusOfPutRequestResponse"] ["srmStatusOfPutRequestResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode == SRM_REQUEST_QUEUED || statuscode == SRM_REQUEST_INPROGRESS) { // still queued - keep waiting int sleeptime = 1; if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]); } creq.wait(sleeptime); } else if (statuscode != SRM_SUCCESS) { // error // check individual file statuses std::string file_explanation; SRMStatusCode file_status = GetStatus(res["arrayOfFileStatuses"] ["statusArray"]["status"], file_explanation); if (file_status == SRM_INVALID_PATH) { // make directories logger.msg(VERBOSE, "Path %s is invalid, creating required directories", creq.surl()); DataStatus mkdirres = mkDir(creq); delete response; if (mkdirres.Passed()) return putTURLs(creq, urls); logger.msg(VERBOSE, "Error creating required directories for %s", creq.surl()); creq.finished_error(); return mkdirres; } if (explanation.empty()) explanation = file_explanation; else if (!file_explanation.empty()) explanation += ": " + file_explanation; logger.msg(VERBOSE, explanation); creq.finished_error(); delete response; return DataStatus(DataStatus::WritePrepareError, srm2errno(statuscode, file_status), explanation); } else { // success, TURL is ready std::string turl = (std::string)res["arrayOfFileStatuses"]["statusArray"]["transferURL"]; logger.msg(VERBOSE, "File is ready! TURL is %s", turl); urls.push_back(std::string(turl)); creq.finished_success(); } delete response; return DataStatus::Success; } DataStatus SRM22Client::info(SRMClientRequest& req, std::list& metadata) { std::map > metadata_map; DataStatus res = info(req, metadata_map); if (!res || metadata_map.find(req.surl()) == metadata_map.end()) return res; metadata = metadata_map[req.surl()]; return DataStatus::Success; } DataStatus SRM22Client::info(SRMClientRequest& creq, std::map >& metadata) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmLs").NewChild("srmLsRequest"); XMLNode surl_array = req.NewChild("arrayOfSURLs"); std::list surls = creq.surls(); for (std::list::const_iterator surl = surls.begin(); surl != surls.end(); ++surl) { surl_array.NewChild("urlArray") = *surl; } // 0 corresponds to list the directory entry not the files in it // 1 corresponds to list the files in a directory - this is the desired // behaviour of arcls with no recursion, so we add 1 to the -r value req.NewChild("numOfLevels") = tostring(creq.recursion() + 1); // add count and offset options, if set if (creq.offset() != 0) req.NewChild("offset") = tostring(creq.offset()); if (creq.count() != 0) req.NewChild("count") = tostring(creq.count()); if (creq.long_list()) req.NewChild("fullDetailedList") = "true"; PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmLsResponse"]["srmLsResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); // store the request token in the request object if (res["requestToken"]) creq.request_token(res["requestToken"]); if (statuscode == SRM_SUCCESS || statuscode == SRM_TOO_MANY_RESULTS || statuscode == SRM_PARTIAL_SUCCESS) { // request is finished - we can get all the details } else if (statuscode == SRM_REQUEST_QUEUED || statuscode == SRM_REQUEST_INPROGRESS) { // file is queued - need to wait and query with returned request token int sleeptime = 1; unsigned int request_time = 0; while (statuscode != SRM_SUCCESS && statuscode != SRM_PARTIAL_SUCCESS && request_time < creq.request_timeout()) { // sleep for some time (no estimated time is given by the server) logger.msg(VERBOSE, "%s: File request %s in SRM queue. Sleeping for %i seconds", creq.surl(), creq.request_token(), sleeptime); sleep(sleeptime); request_time += sleeptime; PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmStatusOfLsRequest") .NewChild("srmStatusOfLsRequestRequest"); req.NewChild("requestToken") = creq.request_token(); delete response; response = NULL; status = process("", &request, &response); if (!status) return status; res = (*response)["srmStatusOfLsRequestResponse"] ["srmStatusOfLsRequestResponse"]; statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode == SRM_TOO_MANY_RESULTS) { // we can only handle too many results if a single directory was listed if (surls.size() > 1) { logger.msg(VERBOSE, "Too many files in one request - please try again with fewer files"); return DataStatus(DataStatus::ListError, EARCRESINVAL, "Too many files in one request"); } break; } // loop will exit on success or return false on error if (statuscode != SRM_SUCCESS && statuscode != SRM_PARTIAL_SUCCESS && statuscode != SRM_REQUEST_QUEUED && statuscode != SRM_REQUEST_INPROGRESS) { // check if individual file status gives more info std::string file_explanation; SRMStatusCode file_statuscode = GetStatus(res["details"]["pathDetailArray"]["status"], file_explanation); if (explanation.empty()) explanation = file_explanation; else if (!file_explanation.empty()) explanation += ": " + file_explanation; logger.msg(VERBOSE, explanation); delete response; return DataStatus(DataStatus::ListError, srm2errno(statuscode, file_statuscode), explanation); } } // check for timeout if (request_time >= creq.request_timeout()) { std::string err_msg("Ls request timed out after " + tostring(creq.request_timeout()) + " seconds"); logger.msg(VERBOSE, err_msg); abort(creq, true); delete response; return DataStatus(DataStatus::ListError, EARCREQUESTTIMEOUT, err_msg); } } else { // check if individual file status gives more info std::string file_explanation; SRMStatusCode file_statuscode = GetStatus(res["details"]["pathDetailArray"]["status"], file_explanation); if (explanation.empty()) explanation = file_explanation; else if (!file_explanation.empty()) explanation += ": " + file_explanation; logger.msg(VERBOSE, explanation); delete response; return DataStatus(DataStatus::ListError, srm2errno(statuscode, file_statuscode), explanation); } // the request is ready - collect the details // assuming the result is in the same order as the surls in the request std::list::const_iterator surl = surls.begin(); if (statuscode == SRM_TOO_MANY_RESULTS) { logger.msg(INFO, "Directory size is too large to list in one call, will" " have to call multiple times"); delete response; std::list md; std::list list_metadata; for (int list_no = 0; list_no == 0 || list_metadata.size() == max_files_list; ++list_no) { list_metadata.clear(); // set up new request with offset and count set SRMClientRequest list_req(creq.surl()); list_req.long_list(creq.long_list()); list_req.offset(max_files_list * list_no); list_req.count(max_files_list); DataStatus infores = info(list_req, list_metadata); if (!infores) return infores; // append to metadata for (std::list::iterator it = list_metadata.begin(); it != list_metadata.end(); ++it) { md.push_back(*it); } } // add to the map metadata[*surl] = md; return DataStatus::Success; } for (XMLNode details = res["details"]["pathDetailArray"]; details; ++details, ++surl) { // With Storm and offset and count set files are reported at this level // rather than in arrayOfSubPaths, so check whether we are really listing // a large number of surls if (surls.size() < creq.offset() && surl != surls.begin()) --surl; if (surl == surls.end()) { // Something bad happened with the parsing logger.msg(WARNING, "Failure in parsing response from server - " "some information may be inaccurate"); break; } SRMStatusCode filestatuscode = GetStatus(details["status"], explanation); if (filestatuscode != SRM_SUCCESS && filestatuscode != SRM_FILE_BUSY) { logger.msg(VERBOSE, "%s: %s", *surl, explanation); continue; } std::list md; // if we think this entry is a file or we don't want recursion, add it if (!details["type"] || details["type"] != "DIRECTORY" || creq.recursion() < 0) { // it can happen that with multiple calls to info() for large dirs the // last call returns one directory. In this case we want to list it // without the directory structure. if (creq.count() == 0) md.push_back(fillDetails(details, false)); else md.push_back(fillDetails(details, true)); } // look for sub paths (files in a directory) XMLNode subpath = details["arrayOfSubPaths"]["pathDetailArray"]; // if no subpaths or we are not doing recursion, go to next surl if (creq.recursion() < 0 || !subpath) { metadata[*surl] = md; continue; } // sometimes we don't know if we have a file or dir so take out the // entry added above if offset is 0 if (creq.offset() == 0) md.clear(); for (unsigned int i = 0; subpath; ++subpath, ++i) { // some older implementations would return a truncated list rather // than SRM_TOO_MANY_RESULTS. So to be safe, if there are more entries // than max_files_list we call info() multiple times, setting offset // and count. TODO: investigate if this is still necessary if (i == max_files_list) { logger.msg(INFO, "Directory size is larger than %i files, will have " "to call multiple times", max_files_list); std::list list_metadata; // if too many results return code, start from 0 again int list_no = 1; do { list_metadata.clear(); SRMClientRequest list_req(creq.surl()); list_req.long_list(creq.long_list()); list_req.offset(max_files_list * list_no); list_req.count(max_files_list); list_req.recursion(creq.recursion()); DataStatus res = info(list_req, list_metadata); if (!res) { delete response; return res; } list_no++; // append to metadata for (std::list::iterator it = list_metadata.begin(); it != list_metadata.end(); ++it) { md.push_back(*it); } } while (list_metadata.size() == max_files_list); break; } md.push_back(fillDetails(subpath, true)); } // add to the map metadata[*surl] = md; } delete response; return DataStatus::Success; } SRMFileMetaData SRM22Client::fillDetails(XMLNode details, bool directory) { SRMFileMetaData metadata; if (details["path"]) { metadata.path = (std::string)details["path"]; std::string::size_type i = metadata.path.find("//"); while (i != std::string::npos) { metadata.path.erase(i, 1); i = metadata.path.find("//", i); } if (metadata.path.find("/") != 0) metadata.path = "/" + metadata.path; // only use the basename of the path if directory if (directory) metadata.path = metadata.path.substr(metadata.path.rfind("/") + 1); } if (details["size"] && !((std::string)details["size"]).empty()) { metadata.size = stringtoull(details["size"]); } else { metadata.size = -1; } if (details["checkSumType"]) { metadata.checkSumType = lower((std::string)details["checkSumType"]); } else { metadata.checkSumType = ""; } if (details["checkSumValue"]) { metadata.checkSumValue = lower((std::string)details["checkSumValue"]); } else { metadata.checkSumValue = ""; } if (details["createdAtTime"]) { std::string created = (std::string)details["createdAtTime"]; if (!created.empty()) metadata.createdAtTime = created; else metadata.createdAtTime = (time_t)0; } else { metadata.createdAtTime = (time_t)0; } if (details["type"]) { std::string filetype = (std::string)details["type"]; if (filetype == "FILE") metadata.fileType = SRM_FILE; else if (filetype == "DIRECTORY") metadata.fileType = SRM_DIRECTORY; else if (filetype == "LINK") metadata.fileType = SRM_LINK; } else { metadata.fileType = SRM_FILE_TYPE_UNKNOWN; } if (details["fileLocality"]) { std::string filelocality = (std::string)details["fileLocality"]; if (filelocality == "ONLINE" || filelocality == "ONLINE_AND_NEARLINE") metadata.fileLocality = SRM_ONLINE; else if (filelocality == "NEARLINE") metadata.fileLocality = SRM_NEARLINE; } else { metadata.fileLocality = SRM_UNKNOWN; } if (details["arrayOfSpaceTokens"]) { for (XMLNode n = details["arrayOfSpaceTokens"]["stringArray"]; n; ++n) { metadata.spaceTokens.push_back((std::string)n); } } if (details["ownerPermission"] && details["groupPermission"] && details["otherPermission"]) { if (details["ownerPermission"]["userID"]) { metadata.owner = (std::string)details["ownerPermission"]["userID"]; } if (details["groupPermission"]["groupID"]) { metadata.group = (std::string)details["groupPermission"]["groupID"]; } if (details["ownerPermission"]["mode"] && details["groupPermission"]["mode"] && details["otherPermission"]) { std::string perms; std::string uperm = (std::string)details["ownerPermission"]["mode"]; std::string gperm = (std::string)details["groupPermission"]["mode"]; std::string operm = (std::string)details["otherPermission"]; if (uperm.find('R') != std::string::npos) perms += 'r'; else perms += '-'; if (uperm.find('W') != std::string::npos) perms += 'w'; else perms += '-'; if (uperm.find('X') != std::string::npos) perms += 'x'; else perms += '-'; if (gperm.find('R') != std::string::npos) perms += 'r'; else perms += '-'; if (gperm.find('W') != std::string::npos) perms += 'w'; else perms += '-'; if (gperm.find('X') != std::string::npos) perms += 'x'; else perms += '-'; if (operm.find('R') != std::string::npos) perms += 'r'; else perms += '-'; if (operm.find('W') != std::string::npos) perms += 'w'; else perms += '-'; if (operm.find('X') != std::string::npos) perms += 'x'; else perms += '-'; metadata.permission = perms; } } if (details["lastModificationTime"]) { std::string modified = (std::string)details["lastModificationTime"]; if (!modified.empty()) metadata.lastModificationTime = modified; else metadata.lastModificationTime = (time_t)0; } else { metadata.lastModificationTime = (time_t)0; } if (details["lifetimeAssigned"]) { std::string lifetimeassigned = (std::string)details["lifetimeAssigned"]; if (!lifetimeassigned.empty()) metadata.lifetimeAssigned = lifetimeassigned; else metadata.lifetimeAssigned = 0; } else { metadata.lifetimeAssigned = 0; } if (details["lifetimeLeft"]) { std::string lifetimeleft = (std::string)details["lifetimeLeft"]; if (!lifetimeleft.empty()) metadata.lifetimeLeft = lifetimeleft; else metadata.lifetimeLeft = 0; } else { metadata.lifetimeLeft = 0; } if (details["retentionPolicyInfo"]) { std::string policy = (std::string)details["retentionPolicyInfo"]; if (policy == "REPLICA") metadata.retentionPolicy = SRM_REPLICA; else if (policy == "OUTPUT") metadata.retentionPolicy = SRM_OUTPUT; else if (policy == "CUSTODIAL") metadata.retentionPolicy = SRM_CUSTODIAL; else metadata.retentionPolicy = SRM_RETENTION_UNKNOWN; } else { metadata.retentionPolicy = SRM_RETENTION_UNKNOWN; } if (details["fileStorageType"]) { std::string type = (std::string)details["fileStorageType"]; if (type == "VOLATILE") metadata.fileStorageType = SRM_VOLATILE; else if (type == "DURABLE") metadata.fileStorageType = SRM_DURABLE; else if (type == "PERMANENT") metadata.fileStorageType = SRM_PERMANENT; else metadata.fileStorageType = SRM_FILE_STORAGE_UNKNOWN; } else { // if any other value, leave undefined metadata.fileStorageType = SRM_FILE_STORAGE_UNKNOWN; } return metadata; } DataStatus SRM22Client::releaseGet(SRMClientRequest& creq) { // Release all pins referred to by the request token in the request object if (creq.request_token().empty()) { logger.msg(VERBOSE, "No request token specified!"); return DataStatus(DataStatus::ReadPrepareError, EINVAL, "No request token specified"); } PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmReleaseFiles") .NewChild("srmReleaseFilesRequest"); req.NewChild("requestToken") = creq.request_token(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmReleaseFilesResponse"] ["srmReleaseFilesResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, explanation); delete response; return DataStatus(DataStatus::ReadPrepareError, srm2errno(statuscode), explanation); } logger.msg(VERBOSE, "Files associated with request token %s released successfully", creq.request_token()); delete response; return DataStatus::Success; } DataStatus SRM22Client::releasePut(SRMClientRequest& creq) { // Set the files referred to by the request token in the request object // which were prepared to put to done if (creq.request_token().empty()) { logger.msg(VERBOSE, "No request token specified!"); return DataStatus(DataStatus::WritePrepareError, EINVAL, "No request token specified"); } PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmPutDone") .NewChild("srmPutDoneRequest"); req.NewChild("requestToken") = creq.request_token(); req.NewChild("arrayOfSURLs").NewChild("urlArray") = creq.surl(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmPutDoneResponse"]["srmPutDoneResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, "%s", explanation); delete response; return DataStatus(DataStatus::WritePrepareError, srm2errno(statuscode), explanation); } logger.msg(VERBOSE, "Files associated with request token %s put done successfully", creq.request_token()); delete response; return DataStatus::Success; } DataStatus SRM22Client::abort(SRMClientRequest& creq, bool source) { // Call srmAbortRequest on the files in the request token if (creq.request_token().empty()) { logger.msg(VERBOSE, "No request token specified!"); return DataStatus(source ? DataStatus::ReadFinishError : DataStatus::WriteFinishError, EINVAL, "No request token specified"); } PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmAbortRequest") .NewChild("srmAbortRequestRequest"); req.NewChild("requestToken") = creq.request_token(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmAbortRequestResponse"]["srmAbortRequestResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, "%s", explanation); delete response; return DataStatus(source ? DataStatus::ReadFinishError : DataStatus::WriteFinishError, srm2errno(statuscode), explanation); } logger.msg(VERBOSE, "Files associated with request token %s aborted successfully", creq.request_token()); delete response; return DataStatus::Success; } DataStatus SRM22Client::remove(SRMClientRequest& creq) { // TODO: bulk remove // call info() to find out if we are dealing with a file or directory SRMClientRequest inforeq(creq.surls()); // set recursion to -1, meaning don't list entries in a dir inforeq.recursion(-1); std::list metadata; DataStatus res = info(inforeq, metadata); if (!res) { logger.msg(VERBOSE, "Failed to find metadata info on %s for determining file or directory delete", inforeq.surl()); return res; } if (metadata.front().fileType == SRM_FILE) { logger.msg(VERBOSE, "Type is file, calling srmRm"); return removeFile(creq); } if (metadata.front().fileType == SRM_DIRECTORY) { logger.msg(VERBOSE, "Type is dir, calling srmRmDir"); return removeDir(creq); } logger.msg(WARNING, "File type is not available, attempting file delete"); if (removeFile(creq).Passed()) return DataStatus::Success; logger.msg(WARNING, "File delete failed, attempting directory delete"); return removeDir(creq); } DataStatus SRM22Client::removeFile(SRMClientRequest& creq) { // only one file requested at a time PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmRm").NewChild("srmRmRequest"); req.NewChild("arrayOfSURLs").NewChild("urlArray") = creq.surl(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmRmResponse"]["srmRmResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, explanation); delete response; return DataStatus(DataStatus::DeleteError, srm2errno(statuscode), explanation); } logger.msg(VERBOSE, "File %s removed successfully", creq.surl()); delete response; return DataStatus::Success; } DataStatus SRM22Client::removeDir(SRMClientRequest& creq) { // only one file requested at a time PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmRmdir") .NewChild("srmRmdirRequest"); req.NewChild("SURL") = creq.surl(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmRmdirResponse"]["srmRmdirResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, explanation); delete response; return DataStatus(DataStatus::DeleteError, srm2errno(statuscode), explanation); } logger.msg(VERBOSE, "Directory %s removed successfully", creq.surl()); delete response; return DataStatus::Success; } DataStatus SRM22Client::copy(SRMClientRequest& creq, const std::string& source) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmCopy").NewChild("srmCopyRequest"); XMLNode reqarray = req.NewChild("arrayOfFileRequests") .NewChild("requestArray"); reqarray.NewChild("sourceSURL") = source; reqarray.NewChild("targetSURL") = creq.surl(); if (!creq.space_token().empty()) req.NewChild("targetSpaceToken") = creq.space_token(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmCopyResponse"]["srmCopyResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); // store the request token in the request object if (res["requestToken"]) creq.request_token(res["requestToken"]); // set timeout for copying. Since we don't know the progress of the // transfer we hard code a value 10 x the request timeout time_t copy_timeout = creq.request_timeout() * 10; if (statuscode == SRM_REQUEST_QUEUED || statuscode == SRM_REQUEST_INPROGRESS) { // request is queued - need to wait and query with returned request token int sleeptime = 1; if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"] ["estimatedWaitTime"]); } int request_time = 0; while (statuscode != SRM_SUCCESS && request_time < copy_timeout) { // sleep for recommended time (within limits) sleeptime = (sleeptime < 1) ? 1 : sleeptime; sleeptime = (sleeptime > 10) ? 10 : sleeptime; logger.msg(VERBOSE, "%s: File request %s in SRM queue. Sleeping for %i seconds", creq.surl(), creq.request_token(), sleeptime); sleep(sleeptime); request_time += sleeptime; PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmStatusOfCopyRequest") .NewChild("srmStatusOfCopyRequestRequest"); req.NewChild("requestToken") = creq.request_token(); delete response; response = NULL; status = process("", &request, &response); if (!status) return status; res = (*response)["srmStatusOfCopyRequestResponse"] ["srmStatusOfCopyRequestResponse"]; statuscode = GetStatus(res["returnStatus"], explanation); // loop will exit on success or return false on error if (statuscode == SRM_REQUEST_QUEUED || statuscode == SRM_REQUEST_INPROGRESS) { // still queued - keep waiting if (res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]) { sleeptime = stringtoi(res["arrayOfFileStatuses"]["statusArray"]["estimatedWaitTime"]); } } else if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, explanation); delete response; return DataStatus(DataStatus::TransferError, srm2errno(statuscode), explanation); } } // check for timeout if (request_time >= copy_timeout) { std::string err_msg("copy request timed out after " + tostring(creq.request_timeout()) + " seconds"); logger.msg(VERBOSE, err_msg); creq.finished_abort(); delete response; return DataStatus(DataStatus::TransferError, EARCREQUESTTIMEOUT, err_msg); } } else if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, "%s", explanation); delete response; return DataStatus(DataStatus::TransferError, srm2errno(statuscode), explanation); } creq.finished_success(); delete response; return DataStatus::Success; } DataStatus SRM22Client::mkDir(SRMClientRequest& creq) { std::string surl = creq.surl(); std::string::size_type slashpos = surl.find('/', 6); slashpos = surl.find('/', slashpos + 1); // don't create root dir bool keeplisting = true; // whether to keep checking dir exists SRMStatusCode parent_status = SRM_SUCCESS; // reason for failing to create parent dirs std::string parent_explanation; // detailed reason for failing to create parent dirs while (slashpos != std::string::npos) { std::string dirname = surl.substr(0, slashpos); // list dir to see if it exists SRMClientRequest listreq(dirname); listreq.recursion(-1); std::list metadata; if (keeplisting) { logger.msg(VERBOSE, "Checking for existence of %s", dirname); if (info(listreq, metadata).Passed()) { if (metadata.front().fileType == SRM_FILE) { logger.msg(VERBOSE, "File already exists: %s", dirname); return DataStatus(DataStatus::CreateDirectoryError, ENOTDIR, "File already exists"); } slashpos = surl.find("/", slashpos + 1); continue; } } logger.msg(VERBOSE, "Creating directory %s", dirname); PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmMkdir") .NewChild("srmMkdirRequest"); req.NewChild("SURL") = dirname; PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmMkdirResponse"]["srmMkdirResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); slashpos = surl.find("/", slashpos + 1); // there can be undetectable errors in creating dirs that already exist // so only report error on creating the final dir if (statuscode == SRM_SUCCESS || statuscode == SRM_DUPLICATION_ERROR) { keeplisting = false; } else if (slashpos == std::string::npos) { if (statuscode == SRM_INVALID_PATH && parent_status != SRM_SUCCESS) { statuscode = parent_status; explanation = parent_explanation; } logger.msg(VERBOSE, "Error creating directory %s: %s", dirname, explanation); delete response; return DataStatus(DataStatus::CreateDirectoryError, srm2errno(statuscode), explanation); } else if (statuscode != SRM_INVALID_PATH) { // remember high-level error so as not to report ENOENT when final dir // fails due to failing to create parent dirs parent_status = statuscode; parent_explanation = explanation; } delete response; } return DataStatus::Success; } DataStatus SRM22Client::rename(SRMClientRequest& creq, const URL& newurl) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmMv") .NewChild("srmMvRequest"); req.NewChild("fromSURL") = creq.surl(); req.NewChild("toSURL") = newurl.plainstr(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmMvResponse"]["srmMvResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode == SRM_SUCCESS) return DataStatus::Success; logger.msg(VERBOSE, explanation); return DataStatus(DataStatus::RenameError, srm2errno(statuscode), explanation); } DataStatus SRM22Client::checkPermissions(SRMClientRequest& creq) { PayloadSOAP request(ns); XMLNode req = request.NewChild("SRMv2:srmCheckPermission") .NewChild("srmCheckPermissionRequest"); req.NewChild("arrayOfSURLs").NewChild("urlArray") = creq.surl(); PayloadSOAP *response = NULL; DataStatus status = process("", &request, &response); if (!status) return status; XMLNode res = (*response)["srmCheckPermissionResponse"]["srmCheckPermissionResponse"]; std::string explanation; SRMStatusCode statuscode = GetStatus(res["returnStatus"], explanation); if (statuscode != SRM_SUCCESS) { logger.msg(VERBOSE, explanation); delete response; return DataStatus(DataStatus::CheckError, srm2errno(statuscode), explanation); } // check if 'r' bit is set if (std::string(res["arrayOfPermissions"]["surlPermissionArray"]["permission"]).find('R') != std::string::npos) { delete response; return DataStatus::Success; } return DataStatus(DataStatus::CheckError, EACCES); } int SRM22Client::srm2errno(SRMStatusCode reqstat, SRMStatusCode filestat) { // Try file-level code first, and if no detailed status is given use // request-level code SRMStatusCode stat = filestat; if (stat == SRM_SUCCESS || stat == SRM_FAILURE) stat = reqstat; switch(stat) { case SRM_INVALID_PATH: case SRM_FILE_LOST: return ENOENT; case SRM_AUTHENTICATION_FAILURE: case SRM_AUTHORIZATION_FAILURE: return EACCES; case SRM_INVALID_REQUEST: return EINVAL; case SRM_NON_EMPTY_DIRECTORY: return ENOTEMPTY; case SRM_SPACE_LIFETIME_EXPIRED: case SRM_EXCEED_ALLOCATION: case SRM_NO_USER_SPACE: case SRM_NO_FREE_SPACE: return ENOSPC; case SRM_DUPLICATION_ERROR: return EEXIST; case SRM_TOO_MANY_RESULTS: return EARCRESINVAL; case SRM_INTERNAL_ERROR: case SRM_FILE_UNAVAILABLE: return EAGAIN; case SRM_FILE_BUSY: return EBUSY; case SRM_FATAL_INTERNAL_ERROR: return EARCSVCPERM; case SRM_NOT_SUPPORTED: return EOPNOTSUPP; case SRM_REQUEST_TIMED_OUT: return ETIMEDOUT; default: // other codes are not errors or are generic failure codes return EARCOTHER; } } } // namespace ArcDMCSRM nordugrid-arc-5.4.2/src/hed/dmc/srm/PaxHeaders.7502/README0000644000000000000000000000012311100321430020730 xustar000000000000000027 mtime=1224844056.031355 26 atime=1513200575.22671 30 ctime=1513200660.911758637 nordugrid-arc-5.4.2/src/hed/dmc/srm/README0000644000175000002070000000004311100321430020773 0ustar00mockbuildmock00000000000000DMC which handles srm:// protocol nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/gfal0000644000000000000000000000013213214316024020116 xustar000000000000000030 mtime=1513200660.990759603 30 atime=1513200668.722854169 30 ctime=1513200660.990759603 nordugrid-arc-5.4.2/src/hed/dmc/gfal/0000755000175000002070000000000013214316024020241 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612457657712022256 xustar000000000000000027 mtime=1421828042.423936 30 atime=1513200595.275955885 29 ctime=1513200660.98475953 nordugrid-arc-5.4.2/src/hed/dmc/gfal/Makefile.am0000644000175000002070000000126012457657712022320 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcgfal.la libdmcgfal_la_SOURCES = DataPointGFAL.cpp DataPointGFAL.h \ GFALTransfer3rdParty.cpp GFALTransfer3rdParty.h \ GFALUtils.cpp GFALUtils.h libdmcgfal_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GFAL2_CFLAGS) $(AM_CXXFLAGS) libdmcgfal_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GFAL2_LIBS) libdmcgfal_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/DataPointGFAL.h0000644000000000000000000000012412623100034022655 xustar000000000000000027 mtime=1447854108.789058 27 atime=1513200575.269711 30 ctime=1513200660.987759566 nordugrid-arc-5.4.2/src/hed/dmc/gfal/DataPointGFAL.h0000644000175000002070000000501212623100034022720 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAPOINTGFAL_H__ #define __ARC_DATAPOINTGFAL_H__ #include #include #include namespace ArcDMCGFAL { using namespace Arc; /** * Provides access to the gLite Grid File Access Library through ARC's API. * The following protocols are supported: lfc, srm, root, gsiftp, rfio, dcap * and gsidcap. * * Notes on env variables: * - If SRM is used LCG_GFAL_INFOSYS must be set to BDII host:port unless * the full URL with port and web service path is given. * * This class is a loadable module and cannot be used directly. The DataHandle * class loads modules at runtime and should be used instead of this. */ class DataPointGFAL : public DataPointDirect { public: DataPointGFAL(const URL& url, const UserConfig& usercfg, PluginArgument* parg); virtual ~DataPointGFAL(); static Plugin* Instance(PluginArgument *arg); virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StopReading(); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopWriting(); virtual DataStatus Check(bool check_meta); virtual DataStatus Stat(FileInfo& file, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents=false); virtual DataStatus Rename(const URL& newurl); virtual bool RequiresCredentialsInFile() const; // Even though this is not a DataPointIndex, it still needs to handle // locations so Resolve and AddLocation must be implemented virtual DataStatus Resolve(bool source = true); virtual DataStatus AddLocation(const URL& url, const std::string& meta); protected: // 3rd party transfer (destination pulls from source) virtual DataStatus Transfer3rdParty(const URL& source, const URL& destination, DataPoint::Callback3rdParty callback = NULL); private: DataStatus do_stat(const URL& stat_url, FileInfo& file, DataPointInfoType verb); static void read_file_start(void *object); void read_file(); static void write_file_start(void *object); void write_file(); static Logger logger; int fd; bool reading; bool writing; SimpleCounter transfer_condition; std::string lfc_host; std::list locations; }; } // namespace ArcDMCGFAL #endif // __ARC_DATAPOINTGFAL_H__ nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/GFALTransfer3rdParty.h0000644000000000000000000000012412623077675024237 xustar000000000000000027 mtime=1447854013.304045 27 atime=1513200575.267711 30 ctime=1513200660.989759591 nordugrid-arc-5.4.2/src/hed/dmc/gfal/GFALTransfer3rdParty.h0000644000175000002070000000157512623077675024314 0ustar00mockbuildmock00000000000000#ifndef TRANSFER3RDPARTY_H_ #define TRANSFER3RDPARTY_H_ #include #include namespace ArcDMCGFAL { using namespace Arc; /// Class to interact with GFAL2 to do third-party transfer class GFALTransfer3rdParty { public: /// Constructor GFALTransfer3rdParty(const URL& source, const URL& dest, const Arc::UserConfig& cfg, DataPoint::Callback3rdParty cb); /// Start transfer DataStatus Transfer(); private: URL source; URL destination; int transfer_timeout; DataPoint::Callback3rdParty callback; static Logger logger; /// Callback that is passed to GFAL2. It calls our Callback3rdParty callback static void gfal_3rd_party_callback(gfalt_transfer_status_t h, const char* src, const char* dst, gpointer user_data); }; } // namespace ArcDMCGFAL #endif /* TRANSFER3RDPARTY_H_ */ nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022245 xustar000000000000000030 mtime=1513200595.323956472 30 atime=1513200649.079613925 30 ctime=1513200660.985759542 nordugrid-arc-5.4.2/src/hed/dmc/gfal/Makefile.in0000644000175000002070000006712613214315723022327 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/gfal DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcgfal_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libdmcgfal_la_OBJECTS = libdmcgfal_la-DataPointGFAL.lo \ libdmcgfal_la-GFALTransfer3rdParty.lo \ libdmcgfal_la-GFALUtils.lo libdmcgfal_la_OBJECTS = $(am_libdmcgfal_la_OBJECTS) libdmcgfal_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmcgfal_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmcgfal_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcgfal_la_SOURCES) DIST_SOURCES = $(libdmcgfal_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcgfal.la libdmcgfal_la_SOURCES = DataPointGFAL.cpp DataPointGFAL.h \ GFALTransfer3rdParty.cpp GFALTransfer3rdParty.h \ GFALUtils.cpp GFALUtils.h libdmcgfal_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GFAL2_CFLAGS) $(AM_CXXFLAGS) libdmcgfal_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GFAL2_LIBS) libdmcgfal_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/gfal/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/gfal/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcgfal.la: $(libdmcgfal_la_OBJECTS) $(libdmcgfal_la_DEPENDENCIES) $(libdmcgfal_la_LINK) -rpath $(pkglibdir) $(libdmcgfal_la_OBJECTS) $(libdmcgfal_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcgfal_la-DataPointGFAL.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcgfal_la-GFALTransfer3rdParty.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcgfal_la-GFALUtils.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcgfal_la-DataPointGFAL.lo: DataPointGFAL.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgfal_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcgfal_la-DataPointGFAL.lo -MD -MP -MF $(DEPDIR)/libdmcgfal_la-DataPointGFAL.Tpo -c -o libdmcgfal_la-DataPointGFAL.lo `test -f 'DataPointGFAL.cpp' || echo '$(srcdir)/'`DataPointGFAL.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcgfal_la-DataPointGFAL.Tpo $(DEPDIR)/libdmcgfal_la-DataPointGFAL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointGFAL.cpp' object='libdmcgfal_la-DataPointGFAL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgfal_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcgfal_la-DataPointGFAL.lo `test -f 'DataPointGFAL.cpp' || echo '$(srcdir)/'`DataPointGFAL.cpp libdmcgfal_la-GFALTransfer3rdParty.lo: GFALTransfer3rdParty.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgfal_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcgfal_la-GFALTransfer3rdParty.lo -MD -MP -MF $(DEPDIR)/libdmcgfal_la-GFALTransfer3rdParty.Tpo -c -o libdmcgfal_la-GFALTransfer3rdParty.lo `test -f 'GFALTransfer3rdParty.cpp' || echo '$(srcdir)/'`GFALTransfer3rdParty.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcgfal_la-GFALTransfer3rdParty.Tpo $(DEPDIR)/libdmcgfal_la-GFALTransfer3rdParty.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GFALTransfer3rdParty.cpp' object='libdmcgfal_la-GFALTransfer3rdParty.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgfal_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcgfal_la-GFALTransfer3rdParty.lo `test -f 'GFALTransfer3rdParty.cpp' || echo '$(srcdir)/'`GFALTransfer3rdParty.cpp libdmcgfal_la-GFALUtils.lo: GFALUtils.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgfal_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcgfal_la-GFALUtils.lo -MD -MP -MF $(DEPDIR)/libdmcgfal_la-GFALUtils.Tpo -c -o libdmcgfal_la-GFALUtils.lo `test -f 'GFALUtils.cpp' || echo '$(srcdir)/'`GFALUtils.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcgfal_la-GFALUtils.Tpo $(DEPDIR)/libdmcgfal_la-GFALUtils.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GFALUtils.cpp' object='libdmcgfal_la-GFALUtils.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcgfal_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcgfal_la-GFALUtils.lo `test -f 'GFALUtils.cpp' || echo '$(srcdir)/'`GFALUtils.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/GFALUtils.h0000644000000000000000000000012412074027430022102 xustar000000000000000027 mtime=1357917976.740806 27 atime=1513200575.269711 30 ctime=1513200660.990759603 nordugrid-arc-5.4.2/src/hed/dmc/gfal/GFALUtils.h0000644000175000002070000000076312074027430022155 0ustar00mockbuildmock00000000000000#ifndef GFALUTILS_H_ #define GFALUTILS_H_ #include #include namespace ArcDMCGFAL { using namespace Arc; /// Utility functions for GFAL2 class GFALUtils { public: /// Convert a URL into GFAL URL (using lfn: or guid: for LFC). static std::string GFALURL(const URL& u); /// Log GFAL message, clear internal GFAL error and return error number. static int HandleGFALError(Logger& logger); }; } // namespace ArcDMCGFAL #endif /* GFALUTILS_H_ */ nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/GFALTransfer3rdParty.cpp0000644000000000000000000000012412623077675024572 xustar000000000000000027 mtime=1447854013.304045 27 atime=1513200575.265711 30 ctime=1513200660.988759579 nordugrid-arc-5.4.2/src/hed/dmc/gfal/GFALTransfer3rdParty.cpp0000644000175000002070000000715412623077675024646 0ustar00mockbuildmock00000000000000#include "GFALUtils.h" #include "GFALTransfer3rdParty.h" namespace ArcDMCGFAL { using namespace Arc; Logger GFALTransfer3rdParty::logger(Logger::getRootLogger(), "Transfer3rdParty"); // Callback passed to gfal. It calls DataPoint callback and fills relevant // info. DataPoint pointer is stored in user_data. void GFALTransfer3rdParty::gfal_3rd_party_callback(gfalt_transfer_status_t h, const char* src, const char* dst, gpointer user_data) { DataPoint::Callback3rdParty* cb = (DataPoint::Callback3rdParty*)user_data; if (cb && *cb) { GError * err = NULL; size_t bytes = gfalt_copy_get_bytes_transfered(h, &err); if (err != NULL) { logger.msg(WARNING, "Failed to obtain bytes transferred: %s", err->message); g_error_free(err); return; } (*(*cb))(bytes); } } GFALTransfer3rdParty::GFALTransfer3rdParty(const URL& src, const URL& dest, const Arc::UserConfig& cfg, DataPoint::Callback3rdParty cb) : source(src), destination(dest), transfer_timeout(cfg.Timeout()), callback(cb) {}; DataStatus GFALTransfer3rdParty::Transfer() { if (!source) return DataStatus(DataStatus::TransferError, EINVAL, "Invalid source URL"); if (!destination) return DataStatus(DataStatus::TransferError, EINVAL, "Invalid destination URL"); GError * err = NULL; int error_no = EARCOTHER; // Set up parameters and options gfalt_params_t params = gfalt_params_handle_new(&err); if (err != NULL) { logger.msg(VERBOSE, "Failed to get initiate GFAL2 parameter handle: %s", err->message); g_error_free(err); return DataStatus(DataStatus::TransferError, error_no, "Failed to get initiate GFAL2 parameter handle"); } gfal2_context_t ctx = gfal2_context_new(&err); if (err != NULL) { logger.msg(VERBOSE, "Failed to get initiate new GFAL2 context: %s", err->message); g_error_free(err); return DataStatus(DataStatus::TransferError, error_no, "Failed to get initiate new GFAL2 context"); } gfalt_add_monitor_callback(params, &gfal_3rd_party_callback, (gpointer)(&callback), NULL, &err); if (err != NULL) { logger.msg(VERBOSE, "Failed to set GFAL2 monitor callback: %s", err->message); g_error_free(err); return DataStatus(DataStatus::TransferError, error_no, "Failed to set GFAL2 monitor callback"); } // Set replace according to overwrite option gfalt_set_replace_existing_file(params, (destination.Option("overwrite") == "yes"), &err); if (err != NULL) { logger.msg(VERBOSE, "Failed to set overwrite option in GFAL2: %s", err->message); g_error_free(err); return DataStatus(DataStatus::TransferError, error_no, "Failed to set overwrite option in GFAL2"); } // Set transfer timeout gfalt_set_timeout(params, transfer_timeout, &err); if (err != NULL) { logger.msg(WARNING, "Failed to set GFAL2 transfer timeout, will use default: %s", err->message); g_error_free(err); err = NULL; } // Do the copy int res = gfalt_copy_file(ctx, params, GFALUtils::GFALURL(source).c_str(), GFALUtils::GFALURL(destination).c_str(), &err); gfal2_context_free(ctx); gfalt_params_handle_delete(params, NULL); if (res != 0) { logger.msg(VERBOSE, "Transfer failed"); if (err != NULL) { logger.msg(VERBOSE, err->message); error_no = err->code; g_error_free(err); } return DataStatus(DataStatus::TransferError, error_no); } logger.msg(INFO, "Transfer succeeded"); return DataStatus::Success; } } // namespace ArcDMCGFAL nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/DataPointGFAL.cpp0000644000000000000000000000012412675602216023230 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.267711 30 ctime=1513200660.986759554 nordugrid-arc-5.4.2/src/hed/dmc/gfal/DataPointGFAL.cpp0000644000175000002070000005501212675602216023300 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "DataPointGFAL.h" #include "GFALTransfer3rdParty.h" #include "GFALUtils.h" namespace ArcDMCGFAL { using namespace Arc; /// Class for locking environment while calling gfal functions. class GFALEnvLocker: public CertEnvLocker { public: static Logger logger; GFALEnvLocker(const UserConfig& usercfg, const std::string& lfc_host): CertEnvLocker(usercfg) { EnvLockUnwrap(false); // if root, we have to set X509_USER_CERT and X509_USER_KEY to // X509_USER_PROXY to force GFAL to use the proxy. If they are undefined // it uses the host cert and key. if (getuid() == 0 && !GetEnv("X509_USER_PROXY").empty()) { SetEnv("X509_USER_KEY", GetEnv("X509_USER_PROXY"), true); SetEnv("X509_USER_CERT", GetEnv("X509_USER_PROXY"), true); } logger.msg(DEBUG, "Using proxy %s", GetEnv("X509_USER_PROXY")); logger.msg(DEBUG, "Using key %s", GetEnv("X509_USER_KEY")); logger.msg(DEBUG, "Using cert %s", GetEnv("X509_USER_CERT")); if (!lfc_host.empty()) { // set LFC retry env variables (don't overwrite if set already) // connection timeout SetEnv("LFC_CONNTIMEOUT", "30", false); // number of retries SetEnv("LFC_CONRETRY", "1", false); // interval between retries SetEnv("LFC_CONRETRYINT", "10", false); // set host name env var SetEnv("LFC_HOST", lfc_host); } EnvLockWrap(false); } }; Logger GFALEnvLocker::logger(Logger::getRootLogger(), "GFALEnvLocker"); Logger DataPointGFAL::logger(Logger::getRootLogger(), "DataPoint.GFAL"); DataPointGFAL::DataPointGFAL(const URL& u, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(u, usercfg, parg), fd(-1), reading(false), writing(false) { LogLevel loglevel = logger.getThreshold(); if (loglevel == DEBUG) gfal2_log_set_level (G_LOG_LEVEL_DEBUG); if (loglevel == VERBOSE) gfal2_log_set_level (G_LOG_LEVEL_INFO); if (url.Protocol() == "lfc") lfc_host = url.Host(); } DataPointGFAL::~DataPointGFAL() { StopReading(); StopWriting(); } Plugin* DataPointGFAL::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL &)(*dmcarg)).Protocol() != "rfio" && ((const URL &)(*dmcarg)).Protocol() != "dcap" && ((const URL &)(*dmcarg)).Protocol() != "gsidcap" && ((const URL &)(*dmcarg)).Protocol() != "lfc" && // gfal protocol is used in 3rd party transfer to load this DMC ((const URL &)(*dmcarg)).Protocol() != "gfal") return NULL; return new DataPointGFAL(*dmcarg, *dmcarg, dmcarg); } DataStatus DataPointGFAL::Resolve(bool source) { // Here we just deal with getting locations from destination if (source || (url.Protocol() != "lfn" && url.Protocol() != "guid")) return DataStatus::Success; if (url.Locations().size() == 0 && locations.empty()) { logger.msg(ERROR, "Locations are missing in destination LFC URL"); return DataStatus(DataStatus::WriteResolveError, EINVAL, "No locations specified"); } for (std::list::const_iterator u = url.Locations().begin(); u != url.Locations().end(); ++u) { if (AddLocation(*u, url.ConnectionURL()) == DataStatus::LocationAlreadyExistsError) { logger.msg(WARNING, "Duplicate replica found in LFC: %s", u->plainstr()); } else { logger.msg(VERBOSE, "Adding location: %s - %s", url.ConnectionURL(), u->plainstr()); } } return DataStatus::Success; } DataStatus DataPointGFAL::AddLocation(const URL& url, const std::string& meta) { logger.msg(DEBUG, "Add location: url: %s", url.str()); logger.msg(DEBUG, "Add location: metadata: %s", meta); for (std::list::iterator i = locations.begin(); i != locations.end(); ++i) if ((i->Name() == meta) && (url == (*i))) return DataStatus::LocationAlreadyExistsError; locations.push_back(URLLocation(url, meta)); return DataStatus::Success; } DataStatus DataPointGFAL::StartReading(DataBuffer& buf) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; reading = true; // Open the file { GFALEnvLocker gfal_lock(usercfg, lfc_host); fd = gfal_open(GFALUtils::GFALURL(url).c_str(), O_RDONLY, 0); } if (fd < 0) { logger.msg(VERBOSE, "gfal_open failed: %s", StrError(errno)); int error_no = GFALUtils::HandleGFALError(logger); reading = false; return DataStatus(DataStatus::ReadStartError, error_no); } // Remember the DataBuffer we got: the separate reading thread will use it buffer = &buf; // StopReading will wait for this condition, // which will be signalled by the separate reading thread // Create the separate reading thread if (!CreateThreadFunction(&DataPointGFAL::read_file_start, this, &transfer_condition)) { if (fd != -1 && gfal_close(fd) < 0) { logger.msg(WARNING, "gfal_close failed: %s", StrError(gfal_posix_code_error())); } reading = false; return DataStatus(DataStatus::ReadStartError, "Failed to create reading thread"); } return DataStatus::Success; } void DataPointGFAL::read_file_start(void *object) { ((DataPointGFAL*)object)->read_file(); } void DataPointGFAL::read_file() { int handle; unsigned int length; unsigned long long int offset = 0; ssize_t bytes_read = 0; for (;;) { // Ask the DataBuffer for a buffer to read into if (!buffer->for_read(handle, length, true)) { buffer->error_read(true); break; } // Read into the buffer { GFALEnvLocker gfal_lock(usercfg, lfc_host); bytes_read = gfal_read(fd, (*(buffer))[handle], length); } // If there was an error if (bytes_read < 0) { logger.msg(VERBOSE, "gfal_read failed: %s", StrError(errno)); GFALUtils::HandleGFALError(logger); buffer->error_read(true); break; } // If there was no more to read if (bytes_read == 0) { buffer->is_read(handle, 0, offset); break; } // Tell the DataBuffer that we read something into it buffer->is_read(handle, bytes_read, offset); // Keep track of where we are in the file offset += bytes_read; } // We got out of the loop, which means we read the whole file // or there was an error, either case the reading is finished buffer->eof_read(true); // Close the file if (fd != -1) { int r; { GFALEnvLocker gfal_lock(usercfg, lfc_host); r = gfal_close(fd); } if (r < 0) { logger.msg(WARNING, "gfal_close failed: %s", StrError(gfal_posix_code_error())); } fd = -1; } } DataStatus DataPointGFAL::StopReading() { if (!reading) return DataStatus(DataStatus::ReadStopError, EARCLOGIC, "Not reading"); reading = false; if (!buffer) return DataStatus(DataStatus::ReadStopError, EARCLOGIC, "Not reading"); // If the reading is not finished yet trigger reading error if (!buffer->eof_read()) buffer->error_read(true); // Wait for the reading thread to finish logger.msg(DEBUG, "StopReading starts waiting for transfer_condition."); transfer_condition.wait(); logger.msg(DEBUG, "StopReading finished waiting for transfer_condition."); // Close the file if not already done if (fd != -1) { int r; { GFALEnvLocker gfal_lock(usercfg, lfc_host); r = gfal_close(fd); } if (r < 0) { logger.msg(WARNING, "gfal_close failed: %s", StrError(gfal_posix_code_error())); } fd = -1; } // If there was an error (maybe we triggered it) if (buffer->error_read()) { buffer = NULL; return DataStatus::ReadError; } // If there was no error (the reading already finished) buffer = NULL; return DataStatus::Success; } DataStatus DataPointGFAL::StartWriting(DataBuffer& buf, DataCallback *space_cb) { if (reading) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); if (writing) return DataStatus(DataStatus::IsWritingError, EARCLOGIC); writing = true; // if index service (eg LFC) then set the replica with extended attrs if (url.Protocol() == "lfn" || url.Protocol() == "guid") { if (locations.empty()) { logger.msg(ERROR, "No locations defined for %s", url.str()); writing = false; return DataStatus(DataStatus::WriteStartError, EINVAL, "No locations defined"); } // choose first location std::string location(locations.begin()->plainstr()); if (gfal_setxattr(GFALUtils::GFALURL(url).c_str(), "user.replicas", location.c_str(), location.length(), 0) != 0) { logger.msg(VERBOSE, "Failed to set LFC replicas: %s", StrError(gfal_posix_code_error())); int error_no = GFALUtils::HandleGFALError(logger); writing = false; return DataStatus(DataStatus::WriteStartError, error_no, "Failed to set LFC replicas"); } } { GFALEnvLocker gfal_lock(usercfg, lfc_host); // Open the file fd = gfal_open(GFALUtils::GFALURL(url).c_str(), O_WRONLY | O_CREAT, 0600); } if (fd < 0) { // If no entry try to create parent directories if (errno == ENOENT) { URL parent_url = URL(url.plainstr()); // For SRM the path can be given as SFN HTTP option if ((url.Protocol() == "srm" && !url.HTTPOption("SFN").empty())) { parent_url.AddHTTPOption("SFN", Glib::path_get_dirname(url.HTTPOption("SFN")), true); } else { parent_url.ChangePath(Glib::path_get_dirname(url.Path())); } { GFALEnvLocker gfal_lock(usercfg, lfc_host); // gfal_mkdir is always recursive if (gfal_mkdir(GFALUtils::GFALURL(parent_url).c_str(), 0700) != 0 && gfal_posix_code_error() != EEXIST) { logger.msg(INFO, "gfal_mkdir failed (%s), trying to write anyway", StrError(gfal_posix_code_error())); } fd = gfal_open(GFALUtils::GFALURL(url).c_str(), O_WRONLY | O_CREAT, 0600); } } if (fd < 0) { logger.msg(VERBOSE, "gfal_open failed: %s", StrError(gfal_posix_code_error())); int error_no = GFALUtils::HandleGFALError(logger); writing = false; return DataStatus(DataStatus::WriteStartError, error_no); } } // Remember the DataBuffer we got, the separate writing thread will use it buffer = &buf; // StopWriting will wait for this condition, // which will be signalled by the separate writing thread // Create the separate writing thread if (!CreateThreadFunction(&DataPointGFAL::write_file_start, this, &transfer_condition)) { if (fd != -1 && gfal_close(fd) < 0) { logger.msg(WARNING, "gfal_close failed: %s", StrError(gfal_posix_code_error())); } writing = false; return DataStatus(DataStatus::WriteStartError, "Failed to create writing thread"); } return DataStatus::Success; } void DataPointGFAL::write_file_start(void *object) { ((DataPointGFAL*)object)->write_file(); } void DataPointGFAL::write_file() { int handle; unsigned int length; unsigned long long int position; unsigned long long int offset = 0; ssize_t bytes_written = 0; unsigned int chunk_offset; for (;;) { // Ask the DataBuffer for a buffer with data to write, // and the length and position where to write to if (!buffer->for_write(handle, length, position, true)) { // no more data from the buffer, did the other side finished? if (!buffer->eof_read()) { // the other side hasn't finished yet, must be an error buffer->error_write(true); } break; } // if the buffer gives different position than we are currently in the // destination, then we have to seek there if (position != offset) { logger.msg(DEBUG, "DataPointGFAL::write_file got position %d and offset %d, has to seek", position, offset); { GFALEnvLocker gfal_lock(usercfg, lfc_host); gfal_lseek(fd, position, SEEK_SET); } offset = position; } // we want to write the chunk we got from the buffer, // but we may not be able to write it in one shot chunk_offset = 0; while (chunk_offset < length) { { GFALEnvLocker gfal_lock(usercfg, lfc_host); bytes_written = gfal_write(fd, (*(buffer))[handle] + chunk_offset, length - chunk_offset); } if (bytes_written < 0) break; // there was an error // calculate how far we got into to the chunk chunk_offset += bytes_written; // if the new chunk_offset is still less then the length of the chunk, // we have to continue writing } // we finished with writing (even if there was an error) buffer->is_written(handle); offset += length; // if there was an error during writing if (bytes_written < 0) { logger.msg(VERBOSE, "gfal_write failed: %s", StrError(gfal_posix_code_error())); GFALUtils::HandleGFALError(logger); buffer->error_write(true); break; } } buffer->eof_write(true); // Close the file if (fd != -1) { int r; { GFALEnvLocker gfal_lock(usercfg, lfc_host); r = gfal_close(fd); } if (r < 0) { logger.msg(WARNING, "gfal_close failed: %s", StrError(gfal_posix_code_error())); } fd = -1; } } DataStatus DataPointGFAL::StopWriting() { if (!writing) return DataStatus(DataStatus::WriteStopError, EARCLOGIC, "Not writing"); writing = false; if (!buffer) return DataStatus(DataStatus::WriteStopError, EARCLOGIC, "Not writing"); // If the writing is not finished, trigger writing error if (!buffer->eof_write()) buffer->error_write(true); // Wait until the writing thread finishes logger.msg(DEBUG, "StopWriting starts waiting for transfer_condition."); transfer_condition.wait(); logger.msg(DEBUG, "StopWriting finished waiting for transfer_condition."); // Close the file if not done already if (fd != -1) { int r; { GFALEnvLocker gfal_lock(usercfg, lfc_host); r = gfal_close(fd); } if (r < 0) { logger.msg(WARNING, "gfal_close failed: %s", StrError(gfal_posix_code_error())); } fd = -1; } // If there was an error (maybe we triggered it) if (buffer->error_write()) { buffer = NULL; return DataStatus::WriteError; } buffer = NULL; return DataStatus::Success; } DataStatus DataPointGFAL::do_stat(const URL& stat_url, FileInfo& file, DataPointInfoType verb) { struct stat st; int res; { GFALEnvLocker gfal_lock(usercfg, lfc_host); res = gfal_stat(GFALUtils::GFALURL(stat_url).c_str(), &st); } if (res < 0) { logger.msg(VERBOSE, "gfal_stat failed: %s", StrError(gfal_posix_code_error())); int error_no = GFALUtils::HandleGFALError(logger); return DataStatus(DataStatus::StatError, error_no); } if(S_ISREG(st.st_mode)) { file.SetType(FileInfo::file_type_file); } else if(S_ISDIR(st.st_mode)) { file.SetType(FileInfo::file_type_dir); } else { file.SetType(FileInfo::file_type_unknown); } std::string path = stat_url.Path(); // For SRM the path can be given as SFN HTTP Option if ((stat_url.Protocol() == "srm" && !stat_url.HTTPOption("SFN").empty())) path = stat_url.HTTPOption("SFN"); std::string name = Glib::path_get_basename(path); file.SetName(name); file.SetSize(st.st_size); file.SetModified(st.st_mtime); file.SetMetaData("atime", (Time(st.st_atime)).str()); file.SetMetaData("ctime", (Time(st.st_ctime)).str()); std::string perms; if (st.st_mode & S_IRUSR) perms += 'r'; else perms += '-'; if (st.st_mode & S_IWUSR) perms += 'w'; else perms += '-'; if (st.st_mode & S_IXUSR) perms += 'x'; else perms += '-'; if (st.st_mode & S_IRGRP) perms += 'r'; else perms += '-'; if (st.st_mode & S_IWGRP) perms += 'w'; else perms += '-'; if (st.st_mode & S_IXGRP) perms += 'x'; else perms += '-'; if (st.st_mode & S_IROTH) perms += 'r'; else perms += '-'; if (st.st_mode & S_IWOTH) perms += 'w'; else perms += '-'; if (st.st_mode & S_IXOTH) perms += 'x'; else perms += '-'; file.SetMetaData("accessperm", perms); if (verb & INFO_TYPE_STRUCT) { char replicas[65536]; ssize_t r; { GFALEnvLocker gfal_lock(usercfg, lfc_host); r = gfal_getxattr(GFALUtils::GFALURL(stat_url).c_str(), "user.replicas", replicas, sizeof(replicas)); } if (r < 0) { logger.msg(VERBOSE, "gfal_listxattr failed, no replica information can be obtained: %s", StrError(gfal_posix_code_error())); } else { std::vector reps; tokenize(replicas, reps, "\n"); for (std::vector::const_iterator u = reps.begin(); u != reps.end(); ++u) { file.AddURL(URL(*u)); } } } return DataStatus::Success; } DataStatus DataPointGFAL::Check(bool check_meta) { if (reading) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); if (writing) return DataStatus(DataStatus::IsWritingError, EARCLOGIC); FileInfo file; DataStatus status_from_stat = do_stat(url, file, (DataPointInfoType)(INFO_TYPE_ACCESS | INFO_TYPE_CONTENT)); if (!status_from_stat) { return DataStatus(DataStatus::CheckError, status_from_stat.GetErrno()); } SetSize(file.GetSize()); SetModified(file.GetModified()); return DataStatus::Success; } DataStatus DataPointGFAL::Stat(FileInfo& file, DataPointInfoType verb) { return do_stat(url, file, verb); } DataStatus DataPointGFAL::List(std::list& files, DataPointInfoType verb) { // Open the directory struct dirent *d; DIR *dir; { GFALEnvLocker gfal_lock(usercfg, lfc_host); dir = gfal_opendir(GFALUtils::GFALURL(url).c_str()); } if (!dir) { logger.msg(VERBOSE, "gfal_opendir failed: %s", StrError(gfal_posix_code_error())); int error_no = GFALUtils::HandleGFALError(logger); return DataStatus(DataStatus::ListError, error_no); } // Loop over the content of the directory while ((d = gfal_readdir (dir))) { // Create a new FileInfo object and add it to the list of files std::list::iterator f = files.insert(files.end(), FileInfo(d->d_name)); // If information about times, type or access was also requested, do a stat if (verb & (INFO_TYPE_TIMES | INFO_TYPE_ACCESS | INFO_TYPE_TYPE)) { URL child_url = URL(url.plainstr() + '/' + d->d_name); logger.msg(DEBUG, "List will stat the URL %s", child_url.plainstr()); do_stat(child_url, *f, verb); } } // Then close the dir if (gfal_closedir (dir) < 0) { logger.msg(WARNING, "gfal_closedir failed: %s", StrError(gfal_posix_code_error())); int error_no = GFALUtils::HandleGFALError(logger); return DataStatus(DataStatus::ListError, error_no); } return DataStatus::Success; } DataStatus DataPointGFAL::Remove() { if (reading) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); if (writing) return DataStatus(DataStatus::IsWritingError, EARCLOGIC); FileInfo file; DataStatus status_from_stat = do_stat(url, file, (DataPointInfoType)(INFO_TYPE_TYPE)); if (!status_from_stat) return DataStatus(DataStatus::DeleteError, status_from_stat.GetErrno()); int res; { GFALEnvLocker gfal_lock(usercfg, lfc_host); if (file.GetType() == FileInfo::file_type_dir) { res = gfal_rmdir(GFALUtils::GFALURL(url).c_str()); } else { res = gfal_unlink(GFALUtils::GFALURL(url).c_str()); } } if (res < 0) { if (file.GetType() == FileInfo::file_type_dir) { logger.msg(VERBOSE, "gfal_rmdir failed: %s", StrError(gfal_posix_code_error())); } else { logger.msg(VERBOSE, "gfal_unlink failed: %s", StrError(gfal_posix_code_error())); } int error_no = GFALUtils::HandleGFALError(logger); return DataStatus(DataStatus::DeleteError, error_no); } return DataStatus::Success; } DataStatus DataPointGFAL::CreateDirectory(bool with_parents) { int res; { GFALEnvLocker gfal_lock(usercfg, lfc_host); // gfal_mkdir is always recursive res = gfal_mkdir(GFALUtils::GFALURL(url).c_str(), 0700); } if (res < 0) { logger.msg(VERBOSE, "gfal_mkdir failed: %s", StrError(gfal_posix_code_error())); int error_no = GFALUtils::HandleGFALError(logger); return DataStatus(DataStatus::CreateDirectoryError, error_no); } return DataStatus::Success; } DataStatus DataPointGFAL::Rename(const URL& newurl) { int res; { GFALEnvLocker gfal_lock(usercfg, lfc_host); res = gfal_rename(GFALUtils::GFALURL(url).c_str(), GFALUtils::GFALURL(newurl).c_str()); } if (res < 0) { logger.msg(VERBOSE, "gfal_rename failed: %s", StrError(gfal_posix_code_error())); int error_no = GFALUtils::HandleGFALError(logger); return DataStatus(DataStatus::RenameError, error_no); } return DataStatus::Success; } DataStatus DataPointGFAL::Transfer3rdParty(const URL& source, const URL& destination, DataPoint::Callback3rdParty callback) { if (source.Protocol() == "lfc") lfc_host = source.Host(); GFALEnvLocker gfal_lock(usercfg, lfc_host); GFALTransfer3rdParty transfer(source, destination, usercfg, callback); return transfer.Transfer(); } bool DataPointGFAL::RequiresCredentialsInFile() const { return true; } } // namespace ArcDMCGFAL extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "gfal2", "HED:DMC", "Grid File Access Library 2", 0, &ArcDMCGFAL::DataPointGFAL::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/README0000644000000000000000000000012411652253425021065 xustar000000000000000027 mtime=1319720725.443975 27 atime=1513200575.267711 30 ctime=1513200660.983759518 nordugrid-arc-5.4.2/src/hed/dmc/gfal/README0000644000175000002070000000004111652253425021125 0ustar00mockbuildmock00000000000000DMC which handles GFAL protocols nordugrid-arc-5.4.2/src/hed/dmc/gfal/PaxHeaders.7502/GFALUtils.cpp0000644000000000000000000000012412623077675022455 xustar000000000000000027 mtime=1447854013.304045 27 atime=1513200575.266711 30 ctime=1513200660.990759603 nordugrid-arc-5.4.2/src/hed/dmc/gfal/GFALUtils.cpp0000644000175000002070000000144412623077675022525 0ustar00mockbuildmock00000000000000#include #include "GFALUtils.h" namespace ArcDMCGFAL { using namespace Arc; std::string GFALUtils::GFALURL(const URL& u) { // LFC URLs must be converted to lfn:/path or guid:abcd... std::string gfalurl; if (u.Protocol() != "lfc") gfalurl = u.plainstr(); else if (u.MetaDataOption("guid").empty()) gfalurl = "lfn:" + u.Path(); else gfalurl = "guid:" + u.MetaDataOption("guid"); return gfalurl; } // return error number int GFALUtils::HandleGFALError(Logger& logger) { // Set errno before error is cleared from gfal int error_no = gfal_posix_code_error(); char errbuf[2048]; gfal_posix_strerror_r(errbuf, sizeof(errbuf)); logger.msg(VERBOSE, errbuf); gfal_posix_clear_error(); return error_no; } } // namespace ArcDMCGFAL nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/ldap0000644000000000000000000000013213214316024020125 xustar000000000000000030 mtime=1513200660.871758148 30 atime=1513200668.722854169 30 ctime=1513200660.871758148 nordugrid-arc-5.4.2/src/hed/dmc/ldap/0000755000175000002070000000000013214316024020250 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/ldap/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022247 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200595.460958148 30 ctime=1513200660.866758087 nordugrid-arc-5.4.2/src/hed/dmc/ldap/Makefile.am0000644000175000002070000000123112052416515022306 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcldap.la libdmcldap_la_SOURCES = DataPointLDAP.cpp LDAPQuery.cpp \ DataPointLDAP.h LDAPQuery.h libdmcldap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) if WIN32 THR_LIBS = else THR_LIBS = -lpthread endif libdmcldap_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(LDAP_LIBS) $(THR_LIBS) libdmcldap_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/ldap/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022254 xustar000000000000000030 mtime=1513200595.506958711 30 atime=1513200649.033613362 30 ctime=1513200660.867758099 nordugrid-arc-5.4.2/src/hed/dmc/ldap/Makefile.in0000644000175000002070000006443313214315723022334 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/ldap DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcldap_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libdmcldap_la_OBJECTS = libdmcldap_la-DataPointLDAP.lo \ libdmcldap_la-LDAPQuery.lo libdmcldap_la_OBJECTS = $(am_libdmcldap_la_OBJECTS) libdmcldap_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdmcldap_la_CXXFLAGS) \ $(CXXFLAGS) $(libdmcldap_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcldap_la_SOURCES) DIST_SOURCES = $(libdmcldap_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcldap.la libdmcldap_la_SOURCES = DataPointLDAP.cpp LDAPQuery.cpp \ DataPointLDAP.h LDAPQuery.h libdmcldap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) @WIN32_FALSE@THR_LIBS = -lpthread @WIN32_TRUE@THR_LIBS = libdmcldap_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(LDAP_LIBS) $(THR_LIBS) libdmcldap_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/ldap/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/ldap/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcldap.la: $(libdmcldap_la_OBJECTS) $(libdmcldap_la_DEPENDENCIES) $(libdmcldap_la_LINK) -rpath $(pkglibdir) $(libdmcldap_la_OBJECTS) $(libdmcldap_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcldap_la-DataPointLDAP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcldap_la-LDAPQuery.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcldap_la-DataPointLDAP.lo: DataPointLDAP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcldap_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcldap_la-DataPointLDAP.lo -MD -MP -MF $(DEPDIR)/libdmcldap_la-DataPointLDAP.Tpo -c -o libdmcldap_la-DataPointLDAP.lo `test -f 'DataPointLDAP.cpp' || echo '$(srcdir)/'`DataPointLDAP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcldap_la-DataPointLDAP.Tpo $(DEPDIR)/libdmcldap_la-DataPointLDAP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointLDAP.cpp' object='libdmcldap_la-DataPointLDAP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcldap_la-DataPointLDAP.lo `test -f 'DataPointLDAP.cpp' || echo '$(srcdir)/'`DataPointLDAP.cpp libdmcldap_la-LDAPQuery.lo: LDAPQuery.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcldap_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcldap_la-LDAPQuery.lo -MD -MP -MF $(DEPDIR)/libdmcldap_la-LDAPQuery.Tpo -c -o libdmcldap_la-LDAPQuery.lo `test -f 'LDAPQuery.cpp' || echo '$(srcdir)/'`LDAPQuery.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcldap_la-LDAPQuery.Tpo $(DEPDIR)/libdmcldap_la-LDAPQuery.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LDAPQuery.cpp' object='libdmcldap_la-LDAPQuery.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcldap_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcldap_la-LDAPQuery.lo `test -f 'LDAPQuery.cpp' || echo '$(srcdir)/'`LDAPQuery.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/ldap/PaxHeaders.7502/DataPointLDAP.h0000644000000000000000000000012312074027430022702 xustar000000000000000027 mtime=1357917976.740806 26 atime=1513200575.21971 30 ctime=1513200660.870758135 nordugrid-arc-5.4.2/src/hed/dmc/ldap/DataPointLDAP.h0000644000175000002070000000406612074027430022756 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DATAPOINTLDAP_H__ #define __ARC_DATAPOINTLDAP_H__ #include #include #include #include #include #include namespace ArcDMCLDAP { using namespace Arc; /** * LDAP is used in grids mainly to store information about grid services * or resources rather than to store data itself. This class allows access * to LDAP data through the same interface as other grid resources. * * This class is a loadable module and cannot be used directly. The DataHandle * class loads modules at runtime and should be used instead of this. */ class DataPointLDAP : public DataPointDirect { public: DataPointLDAP(const URL& url, const UserConfig& usercfg, PluginArgument* parg); virtual ~DataPointLDAP(); static Plugin* Instance(PluginArgument *arg); virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); virtual DataStatus Check(bool check_meta); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents=false) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); }; virtual DataStatus Rename(const URL& newurl) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); }; virtual DataStatus Stat(FileInfo& file, DataPoint::DataPointInfoType verb); virtual DataStatus List(std::list& file, DataPoint::DataPointInfoType verb); virtual bool RequiresCredentials() const { return false; }; private: XMLNode node; XMLNode entry; std::map dn_cache; SimpleCounter thread_cnt; static void CallBack(const std::string& attr, const std::string& value, void *arg); static void ReadThread(void *arg); static Logger logger; }; } // namespace ArcDMCLDAP #endif // __ARC_DATAPOINTLDAP_H__ nordugrid-arc-5.4.2/src/hed/dmc/ldap/PaxHeaders.7502/LDAPQuery.h0000644000000000000000000000012312074027430022124 xustar000000000000000027 mtime=1357917976.740806 26 atime=1513200575.22071 30 ctime=1513200660.871758148 nordugrid-arc-5.4.2/src/hed/dmc/ldap/LDAPQuery.h0000644000175000002070000000456112074027430022200 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_LDAPQUERY_H__ #define __ARC_LDAPQUERY_H__ #include #include #ifdef WIN32 #include #endif #ifdef USE_WIN32_LDAP_API #include #else #include #endif #include #include #define SASLMECH "GSI-GSSAPI" /** * LDAP callback type. Your ldap callbacks should be of same structure. */ typedef void (*ldap_callback)(const std::string& attr, const std::string& value, void *ref); namespace ArcDMCLDAP { using namespace Arc; /** * LDAPQuery class; querying of LDAP servers. */ class LDAPQuery { public: /** * Constructs a new LDAPQuery object and sets connection options. * The connection is first established when calling Query. */ LDAPQuery(const std::string& ldaphost, int ldapport, int timeout, bool anonymous = true, const std::string& usersn = ""); /** * Destructor. Will disconnect from the ldapserver if still connected. */ ~LDAPQuery(); /** * Queries the ldap server. * @return 0: success, 1: timeout, -1: other error */ int Query(const std::string& base, const std::string& filter = "(objectclass=*)", const std::list& attributes = std::list(), URL::Scope scope = URL::subtree); /** * Retrieves the result of the query from the ldap-server. * @return 0: success, 1: timeout, -1: other error */ int Result(ldap_callback callback, void *ref); private: int Connect(); bool SetConnectionOptions(int version); int HandleResult(ldap_callback callback, void *ref); void HandleSearchEntry(LDAPMessage *msg, ldap_callback callback, void *ref); std::string host; int port; bool anonymous; std::string usersn; int timeout; ldap *connection; #ifdef USE_WIN32_LDAP_API ULONG messageid; #else int messageid; #endif static Logger logger; /* * Note that the last pointer is holding allocating memory * that must be freed */ friend int my_sasl_interact(ldap*, unsigned int, void*, void*); }; } // end namespace #endif // __ARC_LDAPQUERY_H__ nordugrid-arc-5.4.2/src/hed/dmc/ldap/PaxHeaders.7502/LDAPQuery.cpp0000644000000000000000000000012312311561662022463 xustar000000000000000027 mtime=1395057586.980478 26 atime=1513200575.21971 30 ctime=1513200660.869758123 nordugrid-arc-5.4.2/src/hed/dmc/ldap/LDAPQuery.cpp0000644000175000002070000003777512311561662022554 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include "config.h" #endif #include #include #include #include #ifdef USE_WIN32_LDAP_API #include #include #define timeval l_timeval # ifndef ldap_msgtype # define ldap_msgtype(m) ((m)->lm_msgtype) # endif # ifndef ldap_first_message # define ldap_first_message ldap_first_entry # endif # ifndef ldap_next_message # define ldap_next_message ldap_next_entry # endif # ifndef ldap_unbind_ext # define ldap_unbind_ext(l,s,c) ldap_unbind(l) # endif #else #include #endif #include #include #ifdef HAVE_SASL_H #include #endif #ifdef HAVE_SASL_SASL_H #include #endif #ifndef LDAP_SASL_QUIET #define LDAP_SASL_QUIET 0 /* Does not exist in Solaris LDAP */ #endif #ifndef LDAP_OPT_SUCCESS #define LDAP_OPT_SUCCESS LDAP_SUCCESS #endif #include #include #include #include "LDAPQuery.h" namespace ArcDMCLDAP { using namespace Arc; Logger LDAPQuery::logger(Logger::rootLogger, "LDAPQuery"); class ldap_bind_arg { public: LDAP *connection; LogLevel loglevel; SimpleCondition cond; bool valid; bool anonymous; std::string usersn; ldap_bind_arg(void):connection(NULL), loglevel(WARNING), valid(false), anonymous(true), count(2) { }; bool release(void) { bool freeit = false; cond.lock(); freeit = ((--count) <= 0); cond.unlock(); if(freeit) { if(connection) ldap_unbind_ext(connection,NULL,NULL); delete this; } return freeit; }; private: int count; ~ldap_bind_arg(void) { }; }; static void ldap_bind_with_timeout(void *arg); #if defined (HAVE_SASL_H) || defined (HAVE_SASL_SASL_H) class sasl_defaults { public: sasl_defaults(ldap *ld, const std::string& mech, const std::string& realm, const std::string& authcid, const std::string& authzid, const std::string& passwd); ~sasl_defaults() {} private: std::string p_mech; std::string p_realm; std::string p_authcid; std::string p_authzid; std::string p_passwd; friend int my_sasl_interact(ldap *ld, unsigned int flags, void *defaults_, void *interact_); }; sasl_defaults::sasl_defaults(ldap *ld, const std::string& mech, const std::string& realm, const std::string& authcid, const std::string& authzid, const std::string& passwd) : p_mech(mech), p_realm(realm), p_authcid(authcid), p_authzid(authzid), p_passwd(passwd) { if (p_mech.empty()) { char *temp; ldap_get_option(ld, LDAP_OPT_X_SASL_MECH, &temp); if (temp) { p_mech = temp; free(temp); } } if (p_realm.empty()) { char *temp; ldap_get_option(ld, LDAP_OPT_X_SASL_REALM, &temp); if (temp) { p_realm = temp; free(temp); } } if (p_authcid.empty()) { char *temp; ldap_get_option(ld, LDAP_OPT_X_SASL_AUTHCID, &temp); if (temp) { p_authcid = temp; free(temp); } } if (p_authzid.empty()) { char *temp; ldap_get_option(ld, LDAP_OPT_X_SASL_AUTHZID, &temp); if (temp) { p_authzid = temp; free(temp); } } } int my_sasl_interact(ldap*, unsigned int flags, void *defaults_, void *interact_) { sasl_interact_t *interact = (sasl_interact_t*)interact_; sasl_defaults *defaults = (sasl_defaults*)defaults_; if (flags == LDAP_SASL_INTERACTIVE) LDAPQuery::logger.msg(DEBUG, "SASL Interaction"); while (interact->id != SASL_CB_LIST_END) { bool noecho = false; bool challenge = false; bool use_default = false; switch (interact->id) { case SASL_CB_GETREALM: if (defaults && !defaults->p_realm.empty()) interact->defresult = strdup(defaults->p_realm.c_str()); break; case SASL_CB_AUTHNAME: if (defaults && !defaults->p_authcid.empty()) interact->defresult = strdup(defaults->p_authcid.c_str()); break; case SASL_CB_USER: if (defaults && !defaults->p_authzid.empty()) interact->defresult = strdup(defaults->p_authzid.c_str()); break; case SASL_CB_PASS: if (defaults && !defaults->p_passwd.empty()) interact->defresult = strdup(defaults->p_passwd.c_str()); noecho = true; break; case SASL_CB_NOECHOPROMPT: noecho = true; challenge = true; break; case SASL_CB_ECHOPROMPT: challenge = true; break; } if (flags != LDAP_SASL_INTERACTIVE && (interact->defresult || interact->id == SASL_CB_USER)) use_default = true; else { if (flags == LDAP_SASL_QUIET) return 1; if (challenge && interact->challenge) LDAPQuery::logger.msg(DEBUG, "Challenge: %s", interact->challenge); if (interact->defresult) LDAPQuery::logger.msg(DEBUG, "Default: %s", interact->defresult); std::string prompt; std::string input; prompt = interact->prompt ? std::string(interact->prompt) + ": " : "Interact: "; if (noecho) input = getpass(prompt.c_str()); else { std::cout << prompt; std::cin >> input; } if (input.empty()) use_default = true; else { interact->result = strdup(input.c_str()); interact->len = input.length(); } } if (use_default) { interact->result = strdup(interact->defresult ? interact->defresult : ""); interact->len = strlen((char*)interact->result); } if (defaults && interact->id == SASL_CB_PASS) // clear default password after first use defaults->p_passwd = ""; interact++; } return 0; } #endif LDAPQuery::LDAPQuery(const std::string& ldaphost, int ldapport, int timeout, bool anonymous, const std::string& usersn) : host(ldaphost), port(ldapport), anonymous(anonymous), usersn(usersn), timeout(timeout), connection(NULL), messageid(0) {} LDAPQuery::~LDAPQuery() { if (connection) { ldap_unbind_ext(connection, NULL, NULL); connection = NULL; } } // Lock to protect thread-unsafe OpenLDAP functions - // currently ldap_initialize. // TODO: investigate if OpenLDAP unloads cleanly and if // not make this plugin persistent. static Glib::Mutex* ldap_lock(void) { static Glib::Mutex* lock = new Glib::Mutex; return lock; } int LDAPQuery::Connect() { const int version = LDAP_VERSION3; logger.msg(VERBOSE, "LDAPQuery: Initializing connection to %s:%d", host, port); if (connection) { logger.msg(ERROR, "LDAP connection already open to %s", host); return -1; } ldap_lock()->lock(); #ifdef HAVE_LDAP_INITIALIZE ldap_initialize(&connection, ("ldap://" + host + ':' + tostring(port)).c_str()); #else #ifdef USE_WIN32_LDAP_API connection = ldap_init(const_cast(host.c_str()), port); #else connection = ldap_init(host.c_str(), port); #endif #endif ldap_lock()->unlock(); if (!connection) { logger.msg(ERROR, "Could not open LDAP connection to %s", host); return -1; } if (!SetConnectionOptions(version)) { ldap_unbind_ext(connection, NULL, NULL); connection = NULL; return -1; } ldap_bind_arg* arg = new ldap_bind_arg; arg->connection = connection; arg->loglevel = logger.getThreshold(); arg->valid = true; arg->anonymous = anonymous; arg->usersn = usersn; if (!Arc::CreateThreadFunction(&ldap_bind_with_timeout, arg)) { arg->release(); arg->release(); connection = NULL; logger.msg(ERROR, "Failed to create ldap bind thread (%s)", host); return -1; } if (!arg->cond.wait(1000 * (timeout + 1))) { arg->release(); connection = NULL; logger.msg(ERROR, "Ldap bind timeout (%s)", host); return 1; } if (!arg->valid) { arg->release(); connection = NULL; logger.msg(VERBOSE, "Failed to bind to ldap server (%s)", host); return -1; } arg->connection = NULL; // keep connection up arg->release(); return 0; } bool LDAPQuery::SetConnectionOptions(int version) { timeval tout; tout.tv_sec = timeout; tout.tv_usec = 0; #ifdef LDAP_OPT_NETWORK_TIMEOUT // solaris does not have LDAP_OPT_NETWORK_TIMEOUT if (ldap_set_option(connection, LDAP_OPT_NETWORK_TIMEOUT, &tout) != LDAP_OPT_SUCCESS) { logger.msg(ERROR, "Could not set LDAP network timeout (%s)", host); return false; } #endif if (ldap_set_option(connection, LDAP_OPT_TIMELIMIT, &timeout) != LDAP_OPT_SUCCESS) { logger.msg(ERROR, "Could not set LDAP timelimit (%s)", host); return false; } if (ldap_set_option(connection, LDAP_OPT_PROTOCOL_VERSION, &version) != LDAP_OPT_SUCCESS) { logger.msg(ERROR, "Could not set LDAP protocol version (%s)", host); return false; } return true; } static void ldap_bind_with_timeout(void *arg_) { ldap_bind_arg *arg = (ldap_bind_arg*)arg_; int ldresult = 0; if (arg->anonymous) { #ifdef USE_WIN32_LDAP_API ldresult = ldap_simple_bind_s(arg->connection, NULL, NULL); #else BerValue cred = { 0, const_cast("") }; ldresult = ldap_sasl_bind_s(arg->connection, NULL, LDAP_SASL_SIMPLE, &cred, NULL, NULL, NULL); #endif } else { #if defined (HAVE_SASL_H) || defined (HAVE_SASL_SASL_H) int ldapflag = LDAP_SASL_QUIET; #ifdef LDAP_SASL_AUTOMATIC // solaris does not have LDAP_SASL_AUTOMATIC if (arg->loglevel >= VERBOSE) ldapflag = LDAP_SASL_AUTOMATIC; #endif sasl_defaults defaults = sasl_defaults(arg->connection, SASLMECH, "", "", arg->usersn, ""); ldresult = ldap_sasl_interactive_bind_s(arg->connection, NULL, SASLMECH, NULL, NULL, ldapflag, my_sasl_interact, &defaults); #else #ifdef USE_WIN32_LDAP_API ldresult = ldap_simple_bind_s(arg->connection, NULL, NULL); #else BerValue cred = { 0, const_cast("") }; ldresult = ldap_sasl_bind_s(arg->connection, NULL, LDAP_SASL_SIMPLE, &cred, NULL, NULL, NULL); #endif #endif } if (ldresult != LDAP_SUCCESS) { arg->valid = false; } else { arg->valid = true; } arg->cond.signal(); arg->release(); } int LDAPQuery::Query(const std::string& base, const std::string& filter, const std::list& attributes, URL::Scope scope) { int res = Connect(); if (res != 0) return res; logger.msg(VERBOSE, "LDAPQuery: Querying %s", host); logger.msg(DEBUG, " base dn: %s", base); if (!filter.empty()) logger.msg(DEBUG, " filter: %s", filter); if (!attributes.empty()) { logger.msg(DEBUG, " attributes:"); for (std::list::const_iterator vs = attributes.begin(); vs != attributes.end(); vs++) logger.msg(DEBUG, " %s", *vs); } timeval tout; tout.tv_sec = timeout; tout.tv_usec = 0; char *filt = (char*)filter.c_str(); char **attrs; if (attributes.empty()) attrs = NULL; else { attrs = new char*[attributes.size() + 1]; int i = 0; for (std::list::const_iterator vs = attributes.begin(); vs != attributes.end(); vs++, i++) attrs[i] = (char*)vs->c_str(); attrs[i] = NULL; } int ldresult = ldap_search_ext(connection, #ifdef USE_WIN32_LDAP_API const_cast(base.c_str()), #else base.c_str(), #endif scope, filt, attrs, 0, NULL, NULL, #ifdef USE_WIN32_LDAP_API timeout, #else &tout, #endif 0, &messageid); if (attrs) delete[] attrs; if (ldresult != LDAP_SUCCESS) { logger.msg(ERROR, "%s (%s)", ldap_err2string(ldresult), host); ldap_unbind_ext(connection, NULL, NULL); connection = NULL; return -1; } return 0; } int LDAPQuery::Result(ldap_callback callback, void *ref) { int result = HandleResult(callback, ref); ldap_unbind_ext(connection, NULL, NULL); connection = NULL; messageid = 0; return result; } int LDAPQuery::HandleResult(ldap_callback callback, void *ref) { logger.msg(VERBOSE, "LDAPQuery: Getting results from %s", host); if (!messageid) { logger.msg(ERROR, "Error: no LDAP query started to %s", host); return -1; } timeval tout; tout.tv_sec = timeout; tout.tv_usec = 0; bool done = false; int ldresult = 0; LDAPMessage *res = NULL; while (!done && (ldresult = ldap_result(connection, messageid, LDAP_MSG_ONE, &tout, &res)) > 0) { #ifdef USE_WIN32_LDAP_API if (ldap_count_entries(connection, res) == 0) { done = true; continue; } #endif for (LDAPMessage *msg = ldap_first_message(connection, res); msg; msg = ldap_next_message(connection, msg)) { switch (ldap_msgtype(msg)) { case LDAP_RES_SEARCH_ENTRY: HandleSearchEntry(msg, callback, ref); break; case LDAP_RES_SEARCH_RESULT: done = true; break; } // switch } // for ldap_msgfree(res); } if (ldresult == 0) { logger.msg(ERROR, "LDAP query timed out: %s", host); return 1; } if (ldresult == -1) { logger.msg(ERROR, "%s (%s)", ldap_err2string(ldresult), host); return -1; } return 0; } void LDAPQuery::HandleSearchEntry(LDAPMessage *msg, ldap_callback callback, void *ref) { char *dn = ldap_get_dn(connection, msg); callback("dn", dn, ref); if (dn) ldap_memfree(dn); BerElement *ber = NULL; for (char *attr = ldap_first_attribute(connection, msg, &ber); attr; attr = ldap_next_attribute(connection, msg, ber)) { BerValue **bval; if ((bval = ldap_get_values_len(connection, msg, attr))) { for (int i = 0; bval[i]; i++) callback(attr, (bval[i]->bv_val ? bval[i]->bv_val : ""), ref); ber_bvecfree(bval); } ldap_memfree(attr); } if (ber) ber_free(ber, 0); } } // namespace Arc nordugrid-arc-5.4.2/src/hed/dmc/ldap/PaxHeaders.7502/DataPointLDAP.cpp0000644000000000000000000000012312675602216023245 xustar000000000000000027 mtime=1459029134.924374 26 atime=1513200575.21671 30 ctime=1513200660.868758111 nordugrid-arc-5.4.2/src/hed/dmc/ldap/DataPointLDAP.cpp0000644000175000002070000001363312675602216023321 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "DataPointLDAP.h" #include "LDAPQuery.h" namespace ArcDMCLDAP { using namespace Arc; Logger DataPointLDAP::logger(Logger::getRootLogger(), "DataPoint.LDAP"); DataPointLDAP::DataPointLDAP(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(url, usercfg, parg) {} DataPointLDAP::~DataPointLDAP() { StopReading(); StopWriting(); } Plugin* DataPointLDAP::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL&)(*dmcarg)).Protocol() != "ldap") return NULL; Glib::Module* module = dmcarg->get_module(); PluginsFactory* factory = dmcarg->get_factory(); if(!(factory && module)) { logger.msg(ERROR, "Missing reference to factory and/or module. Currently safe unloading of LDAP DMC is not supported. Report to developers."); return NULL; } // It looks like this DMC can't ensure all started threads are stopped // by its destructor. So current hackish solution is to keep code in // memory. // TODO: handle threads properly // TODO: provide generic solution for holding plugin in memory as long // as plugins/related obbjects are still active. factory->makePersistent(module); return new DataPointLDAP(*dmcarg, *dmcarg, dmcarg); } DataStatus DataPointLDAP::Check(bool check_meta) { return DataStatus::Success; } DataStatus DataPointLDAP::Remove() { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } void DataPointLDAP::CallBack(const std::string& attr, const std::string& value, void *ref) { DataPointLDAP& point = *(DataPointLDAP*)ref; if (attr == "dn") { point.entry = point.node; std::string path = ""; std::string attr_val = ""; std::string::size_type pos_o = value.size(); while (pos_o != std::string::npos) { std::string::size_type pos_n = (pos_o>0)?value.rfind(',', pos_o-1):std::string::npos; if(pos_n == std::string::npos) { attr_val = value.substr(0,pos_o); } else { attr_val = value.substr(pos_n+1,pos_o-pos_n-1); } pos_o = pos_n; attr_val = trim(attr_val, " "); path+=attr_val+","; std::map::iterator c_path = point.dn_cache.find(path); if (c_path != point.dn_cache.end()) { point.entry = c_path->second; } else { std::string::size_type pos_eq = attr_val.find('='); if(pos_eq != std::string::npos) { point.entry = point.entry.NewChild(trim(attr_val.substr(0, pos_eq), " ")) = trim(attr_val.substr(pos_eq + 1), " "); } else { point.entry = point.entry.NewChild(trim(attr_val, " ")); }; point.dn_cache.insert(std::make_pair(path, point.entry)); } } } else { point.entry.NewChild(attr) = value; } } DataStatus DataPointLDAP::StartReading(DataBuffer& buf) { if (buffer) return DataStatus::IsReadingError; buffer = &buf; LDAPQuery q(url.Host(), url.Port(), usercfg.Timeout()); int res = q.Query(url.Path(), url.LDAPFilter(), url.LDAPAttributes(), url.LDAPScope()); if (res != 0) { buffer = NULL; return DataStatus(DataStatus::ReadStartError, (res == 1) ? ETIMEDOUT : ECONNREFUSED); } NS ns; XMLNode(ns, "LDAPQueryResult").New(node); res = q.Result(CallBack, this); if (res != 0) { buffer = NULL; return DataStatus(DataStatus::ReadStartError, (res == 1) ? ETIMEDOUT : ECONNREFUSED); } if(!CreateThreadFunction(&ReadThread, this, &thread_cnt)) { buffer = NULL; return DataStatus(DataStatus::ReadStartError); } return DataStatus::Success; } DataStatus DataPointLDAP::StopReading() { if (!buffer) return DataStatus::ReadStopError; if(!buffer->eof_read()) buffer->error_read(true); buffer = NULL; thread_cnt.wait(); return DataStatus::Success; } DataStatus DataPointLDAP::StartWriting(DataBuffer&, DataCallback*) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } DataStatus DataPointLDAP::StopWriting() { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } DataStatus DataPointLDAP::Stat(FileInfo& file, DataPoint::DataPointInfoType verb) { return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } DataStatus DataPointLDAP::List(std::list& file, DataPoint::DataPointInfoType verb) { // TODO: Implement through Read return DataStatus(DataStatus::UnimplementedError, EOPNOTSUPP); } void DataPointLDAP::ReadThread(void *arg) { DataPointLDAP& point = *(DataPointLDAP*)arg; std::string text; point.node.GetDoc(text); std::string::size_type length = text.size(); unsigned long long int pos = 0; int transfer_handle = -1; do { unsigned int transfer_size = 0; if(!point.buffer->for_read(transfer_handle, transfer_size, true)) break; if (length < transfer_size) transfer_size = length; memcpy((*point.buffer)[transfer_handle], &text[pos], transfer_size); point.buffer->is_read(transfer_handle, transfer_size, pos); length -= transfer_size; pos += transfer_size; } while (length > 0); point.buffer->eof_read(true); } } // namespace ArcDMCLDAP extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "ldap", "HED:DMC", "Lightweight Directory Access Protocol", 0, &ArcDMCLDAP::DataPointLDAP::Instance }, { NULL, NULL, NULL, 0, NULL } }; extern "C" { void ARC_MODULE_CONSTRUCTOR_NAME(Glib::Module* module, Arc::ModuleManager* manager) { if(manager && module) { manager->makePersistent(module); }; } } nordugrid-arc-5.4.2/src/hed/dmc/ldap/PaxHeaders.7502/README0000644000000000000000000000012311001653037021062 xustar000000000000000027 mtime=1208440351.928622 26 atime=1513200575.22071 30 ctime=1513200660.865758074 nordugrid-arc-5.4.2/src/hed/dmc/ldap/README0000644000175000002070000000006411001653037021130 0ustar00mockbuildmock00000000000000DMC handles queries against LDAP (ldap:// protocol) nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/xrootd0000644000000000000000000000013213214316024020524 xustar000000000000000030 mtime=1513200660.963759273 30 atime=1513200668.722854169 30 ctime=1513200660.963759273 nordugrid-arc-5.4.2/src/hed/dmc/xrootd/0000755000175000002070000000000013214316024020647 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/dmc/xrootd/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022646 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200595.823962588 30 ctime=1513200660.959759224 nordugrid-arc-5.4.2/src/hed/dmc/xrootd/Makefile.am0000644000175000002070000000111612052416515022707 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdmcxrootd.la libdmcxrootd_la_SOURCES = DataPointXrootd.cpp DataPointXrootd.h libdmcxrootd_la_CXXFLAGS = -I$(top_srcdir)/include $(XROOTD_CPPFLAGS) \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmcxrootd_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) \ $(XROOTD_LIBS) libdmcxrootd_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/dmc/xrootd/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315723022653 xustar000000000000000030 mtime=1513200595.867963126 30 atime=1513200649.095614121 30 ctime=1513200660.961759248 nordugrid-arc-5.4.2/src/hed/dmc/xrootd/Makefile.in0000644000175000002070000006220613214315723022727 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/dmc/xrootd DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdmcxrootd_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libdmcxrootd_la_OBJECTS = libdmcxrootd_la-DataPointXrootd.lo libdmcxrootd_la_OBJECTS = $(am_libdmcxrootd_la_OBJECTS) libdmcxrootd_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdmcxrootd_la_CXXFLAGS) $(CXXFLAGS) \ $(libdmcxrootd_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdmcxrootd_la_SOURCES) DIST_SOURCES = $(libdmcxrootd_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libdmcxrootd.la libdmcxrootd_la_SOURCES = DataPointXrootd.cpp DataPointXrootd.h libdmcxrootd_la_CXXFLAGS = -I$(top_srcdir)/include $(XROOTD_CPPFLAGS) \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) libdmcxrootd_la_LIBADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) \ $(XROOTD_LIBS) libdmcxrootd_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/dmc/xrootd/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/dmc/xrootd/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdmcxrootd.la: $(libdmcxrootd_la_OBJECTS) $(libdmcxrootd_la_DEPENDENCIES) $(libdmcxrootd_la_LINK) -rpath $(pkglibdir) $(libdmcxrootd_la_OBJECTS) $(libdmcxrootd_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdmcxrootd_la-DataPointXrootd.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdmcxrootd_la-DataPointXrootd.lo: DataPointXrootd.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcxrootd_la_CXXFLAGS) $(CXXFLAGS) -MT libdmcxrootd_la-DataPointXrootd.lo -MD -MP -MF $(DEPDIR)/libdmcxrootd_la-DataPointXrootd.Tpo -c -o libdmcxrootd_la-DataPointXrootd.lo `test -f 'DataPointXrootd.cpp' || echo '$(srcdir)/'`DataPointXrootd.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdmcxrootd_la-DataPointXrootd.Tpo $(DEPDIR)/libdmcxrootd_la-DataPointXrootd.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DataPointXrootd.cpp' object='libdmcxrootd_la-DataPointXrootd.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdmcxrootd_la_CXXFLAGS) $(CXXFLAGS) -c -o libdmcxrootd_la-DataPointXrootd.lo `test -f 'DataPointXrootd.cpp' || echo '$(srcdir)/'`DataPointXrootd.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/dmc/xrootd/PaxHeaders.7502/DataPointXrootd.h0000644000000000000000000000012312133524305024037 xustar000000000000000027 mtime=1366206661.683381 26 atime=1513200575.21371 30 ctime=1513200660.963759273 nordugrid-arc-5.4.2/src/hed/dmc/xrootd/DataPointXrootd.h0000644000175000002070000000411212133524305024103 0ustar00mockbuildmock00000000000000#ifndef __ARC_DATAPOINTXROOTD_H__ #define __ARC_DATAPOINTXROOTD_H__ #include #include #include namespace ArcDMCXrootd { using namespace Arc; /** * xrootd is a protocol for data access across large scale storage clusters. * More information can be found at http://xrootd.slac.stanford.edu/ * * This class is a loadable module and cannot be used directly. The DataHandle * class loads modules at runtime and should be used instead of this. */ class DataPointXrootd : public DataPointDirect { public: DataPointXrootd(const URL& url, const UserConfig& usercfg, PluginArgument* parg); virtual ~DataPointXrootd(); static Plugin* Instance(PluginArgument *arg); virtual DataStatus StartReading(DataBuffer& buffer); virtual DataStatus StartWriting(DataBuffer& buffer, DataCallback *space_cb = NULL); virtual DataStatus StopReading(); virtual DataStatus StopWriting(); virtual DataStatus Check(bool check_meta); virtual DataStatus Stat(FileInfo& file, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL); virtual DataStatus Remove(); virtual DataStatus CreateDirectory(bool with_parents=false); virtual DataStatus Rename(const URL& newurl); virtual bool RequiresCredentialsInFile() const; private: /// thread functions for async read/write static void read_file_start(void* arg); static void write_file_start(void* arg); void read_file(); void write_file(); /// must be called everytime a new XrdClient is created void set_log_level(); /// Internal stat() DataStatus do_stat(const URL& url, FileInfo& file, DataPointInfoType verb); int fd; SimpleCondition transfer_cond; bool reading; bool writing; static Logger logger; // There must be one instance of this object per executable static XrdPosixXrootd xrdposix; }; } // namespace ArcDMCXrootd #endif /* __ARC_DATAPOINTXROOTD_H__ */ nordugrid-arc-5.4.2/src/hed/dmc/xrootd/PaxHeaders.7502/DataPointXrootd.cpp0000644000000000000000000000012312675602216024403 xustar000000000000000027 mtime=1459029134.924374 26 atime=1513200575.21471 30 ctime=1513200660.962759261 nordugrid-arc-5.4.2/src/hed/dmc/xrootd/DataPointXrootd.cpp0000644000175000002070000004055512675602216024462 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "DataPointXrootd.h" namespace ArcDMCXrootd { using namespace Arc; Logger DataPointXrootd::logger(Logger::getRootLogger(), "DataPoint.Xrootd"); XrdPosixXrootd DataPointXrootd::xrdposix; DataPointXrootd::DataPointXrootd(const URL& url, const UserConfig& usercfg, PluginArgument* parg) : DataPointDirect(url, usercfg, parg), fd(-1), reading(false), writing(false){ // set xrootd log level set_log_level(); // xrootd requires 2 slashes at the start of the URL path if (url.Path().find("//") != 0) { this->url.ChangePath(std::string("/"+url.Path())); } } DataPointXrootd::~DataPointXrootd() { StopReading(); StopWriting(); } Plugin* DataPointXrootd::Instance(PluginArgument *arg) { DataPointPluginArgument *dmcarg = dynamic_cast(arg); if (!dmcarg) return NULL; if (((const URL &)(*dmcarg)).Protocol() != "root") return NULL; return new DataPointXrootd(*dmcarg, *dmcarg, dmcarg); } void DataPointXrootd::read_file_start(void* arg) { ((DataPointXrootd*)arg)->read_file(); } void DataPointXrootd::read_file() { // TODO range reads bool do_cksum = true; unsigned long long int offset = 0; bool eof = false; for (;;) { // 1. claim buffer int h; unsigned int l; if (!buffer->for_read(h, l, true)) { // failed to get buffer - must be error or request to exit buffer->error_read(true); break; } if (buffer->error()) { buffer->is_read(h, 0, 0); break; } if (eof) { buffer->is_read(h, 0, 0); if(do_cksum) { for(std::list::iterator cksum = checksums.begin(); cksum != checksums.end(); ++cksum) { if(*cksum) (*cksum)->end(); } } break; } // 2. read, making sure not to read past eof if (size - offset < l) { l = size - offset; eof = true; if (l == 0) { // don't try to read zero bytes! buffer->is_read(h, 0, 0); continue; } } logger.msg(DEBUG, "Reading %u bytes from byte %llu", l, offset); int res = XrdPosixXrootd::Read(fd, (*(buffer))[h], l); logger.msg(DEBUG, "Read %i bytes", res); if (res <= 0) { // error buffer->is_read(h, 0, 0); buffer->error_read(true); break; } if(do_cksum) { for(std::list::iterator cksum = checksums.begin(); cksum != checksums.end(); ++cksum) { if(*cksum) (*cksum)->add((*(buffer))[h], res); } } // 3. announce buffer->is_read(h, res, offset); offset += res; } XrdPosixXrootd::Close(fd); buffer->eof_read(true); transfer_cond.signal(); } DataStatus DataPointXrootd::StartReading(DataBuffer& buf) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; reading = true; { CertEnvLocker env(usercfg); fd = XrdPosixXrootd::Open(url.plainstr().c_str(), O_RDONLY); if (fd == -1) { logger.msg(VERBOSE, "Could not open file %s for reading: %s", url.plainstr(), StrError(errno)); reading = false; return DataStatus(DataStatus::ReadStartError, errno); } } // It is an error to read past EOF, so we need the file size if not known if (!CheckSize()) { FileInfo f; DataStatus res = Stat(f, INFO_TYPE_CONTENT); if (!res) { reading = false; return DataStatus(DataStatus::ReadStartError, res.GetErrno(), res.GetDesc()); } if (!CheckSize()) { logger.msg(VERBOSE, "Unable to find file size of %s", url.plainstr()); reading = false; return DataStatus(DataStatus::ReadStartError, std::string("Unable to obtain file size")); } } buffer = &buf; transfer_cond.reset(); // create thread to maintain reading if(!CreateThreadFunction(&DataPointXrootd::read_file_start, this)) { XrdPosixXrootd::Close(fd); reading = false; buffer = NULL; return DataStatus::ReadStartError; } return DataStatus::Success; } DataStatus DataPointXrootd::StopReading() { if (!reading) return DataStatus::ReadStopError; reading = false; if (!buffer) return DataStatus(DataStatus::ReadStopError, EARCLOGIC, "Not reading"); if (!buffer->eof_read()) { buffer->error_read(true); /* trigger transfer error */ if (fd != -1) XrdPosixXrootd::Close(fd); fd = -1; } transfer_cond.wait(); /* wait till reading thread exited */ if (buffer->error_read()) { buffer = NULL; return DataStatus::ReadError; } buffer = NULL; return DataStatus::Success; } void DataPointXrootd::write_file_start(void *object) { ((DataPointXrootd*)object)->write_file(); } void DataPointXrootd::write_file() { int handle; unsigned int length; unsigned long long int position; unsigned long long int offset = 0; ssize_t bytes_written = 0; unsigned int chunk_offset; for (;;) { // Ask the DataBuffer for a buffer with data to write, // and the length and position where to write to if (!buffer->for_write(handle, length, position, true)) { // no more data from the buffer, did the other side finished? if (!buffer->eof_read()) { // the other side hasn't finished yet, must be an error buffer->error_write(true); } break; } // if the buffer gives different position than we are currently in the // destination, then we have to seek there if (position != offset) { logger.msg(DEBUG, "DataPointXrootd::write_file got position %d and offset %d, has to seek", position, offset); XrdPosixXrootd::Lseek(fd, position, SEEK_SET); offset = position; } // we want to write the chunk we got from the buffer, // but we may not be able to write it in one shot chunk_offset = 0; while (chunk_offset < length) { bytes_written = XrdPosixXrootd::Write(fd, (*(buffer))[handle] + chunk_offset, length - chunk_offset); if (bytes_written < 0) break; // there was an error // calculate how far we got into to the chunk chunk_offset += bytes_written; // if the new chunk_offset is still less then the length of the chunk, // we have to continue writing } // we finished with writing (even if there was an error) buffer->is_written(handle); offset += length; // if there was an error during writing if (bytes_written < 0) { logger.msg(VERBOSE, "xrootd write failed: %s", StrError(errno)); buffer->error_write(true); break; } } buffer->eof_write(true); // Close the file if (fd != -1) { if (XrdPosixXrootd::Close(fd) < 0) { logger.msg(WARNING, "xrootd close failed: %s", StrError(errno)); } fd = -1; } transfer_cond.signal(); } DataStatus DataPointXrootd::StartWriting(DataBuffer& buf, DataCallback *space_cb) { if (reading) return DataStatus::IsReadingError; if (writing) return DataStatus::IsWritingError; writing = true; { CertEnvLocker env(usercfg); // Open the file fd = XrdPosixXrootd::Open(url.plainstr().c_str(), O_WRONLY | O_CREAT, 0600); } if (fd < 0) { // If no entry try to create parent directories if (errno == ENOENT) { logger.msg(VERBOSE, "Failed to open %s, trying to create parent directories", url.plainstr()); std::string original_path(url.Path()); url.ChangePath(Glib::path_get_dirname(url.Path())); DataStatus r = CreateDirectory(true); url.ChangePath(original_path); if (!r) return r; { // Try to open again CertEnvLocker env(usercfg); fd = XrdPosixXrootd::Open(url.plainstr().c_str(), O_WRONLY | O_CREAT, 0600); } } if (fd < 0) { logger.msg(VERBOSE, "xrootd open failed: %s", StrError(errno)); writing = false; return DataStatus(DataStatus::WriteStartError, errno); } } // Remember the DataBuffer we got, the separate writing thread will use it buffer = &buf; transfer_cond.reset(); // StopWriting will wait for this condition, // which will be signalled by the separate writing thread // Create the separate writing thread if (!CreateThreadFunction(&DataPointXrootd::write_file_start, this)) { if (fd != -1 && XrdPosixXrootd::Close(fd) < 0) { logger.msg(WARNING, "close failed: %s", StrError(errno)); } writing = false; return DataStatus(DataStatus::WriteStartError, "Failed to create writing thread"); } return DataStatus::Success; } DataStatus DataPointXrootd::StopWriting() { if (!writing) return DataStatus(DataStatus::WriteStopError, EARCLOGIC, "Not writing"); writing = false; if (!buffer) return DataStatus(DataStatus::WriteStopError, EARCLOGIC, "Not writing"); // If the writing is not finished, trigger writing error if (!buffer->eof_write()) buffer->error_write(true); // Wait until the writing thread finishes logger.msg(DEBUG, "StopWriting starts waiting for transfer_condition."); transfer_cond.wait(); logger.msg(DEBUG, "StopWriting finished waiting for transfer_condition."); // Close the file if not done already if (fd != -1) { if (XrdPosixXrootd::Close(fd) < 0) { logger.msg(WARNING, "xrootd close failed: %s", StrError(errno)); } fd = -1; } // If there was an error (maybe we triggered it) if (buffer->error_write()) { buffer = NULL; return DataStatus::WriteError; } buffer = NULL; return DataStatus::Success; } DataStatus DataPointXrootd::Check(bool check_meta) { { CertEnvLocker env(usercfg); if (XrdPosixXrootd::Access(url.plainstr().c_str(), R_OK) != 0) { logger.msg(VERBOSE, "Read access not allowed for %s: %s", url.plainstr(), StrError(errno)); return DataStatus(DataStatus::CheckError, errno); } } if (check_meta) { FileInfo f; return do_stat(url, f, INFO_TYPE_CONTENT); } return DataStatus::Success; } DataStatus DataPointXrootd::do_stat(const URL& u, FileInfo& file, DataPointInfoType verb) { struct stat st; { CertEnvLocker env(usercfg); // When used against dcache stat returns 0 even if file does not exist // so check inode number if (XrdPosixXrootd::Stat(u.plainstr().c_str(), &st) != 0 || st.st_ino == (unsigned long long int)(-1)) { logger.msg(VERBOSE, "Could not stat file %s: %s", u.plainstr(), StrError(errno)); return DataStatus(DataStatus::StatError, errno); } } file.SetName(u.Path()); file.SetSize(st.st_size); file.SetModified(st.st_mtime); if(S_ISREG(st.st_mode)) { file.SetType(FileInfo::file_type_file); } else if(S_ISDIR(st.st_mode)) { file.SetType(FileInfo::file_type_dir); } else { file.SetType(FileInfo::file_type_unknown); } SetSize(file.GetSize()); SetModified(file.GetModified()); return DataStatus::Success; } DataStatus DataPointXrootd::Stat(FileInfo& file, DataPointInfoType verb) { return do_stat(url, file, verb); } DataStatus DataPointXrootd::List(std::list& files, DataPointInfoType verb) { DIR* dir = NULL; { CertEnvLocker env(usercfg); dir = XrdPosixXrootd::Opendir(url.plainstr().c_str()); } if (!dir) { logger.msg(VERBOSE, "Failed to open directory %s: %s", url.plainstr(), StrError(errno)); return DataStatus(DataStatus::ListError, errno); } struct dirent* entry; while ((entry = XrdPosixXrootd::Readdir(dir))) { FileInfo f; if (verb > INFO_TYPE_NAME) { std::string path = url.plainstr() + '/' + entry->d_name; do_stat(path, f, verb); } f.SetName(entry->d_name); files.push_back(f); } if (errno != 0) { logger.msg(VERBOSE, "Error while reading dir %s: %s", url.plainstr(), StrError(errno)); return DataStatus(DataStatus::ListError, errno); } XrdPosixXrootd::Closedir(dir); return DataStatus::Success; } DataStatus DataPointXrootd::Remove() { if (reading) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); if (writing) return DataStatus(DataStatus::IsReadingError, EARCLOGIC); struct stat st; CertEnvLocker env(usercfg); if (XrdPosixXrootd::Stat(url.plainstr().c_str(), &st) != 0) { if (errno == ENOENT) return DataStatus::Success; logger.msg(VERBOSE, "File is not accessible %s: %s", url.plainstr(), StrError(errno)); return DataStatus(DataStatus::DeleteError, errno, "Failed to stat file "+url.plainstr()); } // path is a directory if (S_ISDIR(st.st_mode)) { if (XrdPosixXrootd::Rmdir(url.plainstr().c_str()) != 0) { logger.msg(VERBOSE, "Can't delete directory %s: %s", url.plainstr(), StrError(errno)); return DataStatus(DataStatus::DeleteError, errno, "Failed to delete directory "+url.plainstr()); } return DataStatus::Success; } // path is a file if (XrdPosixXrootd::Unlink(url.plainstr().c_str()) != 0) { logger.msg(VERBOSE, "Can't delete file %s: %s", url.plainstr(), StrError(errno)); return DataStatus(DataStatus::DeleteError, errno, "Failed to delete file "+url.plainstr()); } return DataStatus::Success; } DataStatus DataPointXrootd::CreateDirectory(bool with_parents) { std::string::size_type slashpos = url.Path().find("/", 1); // don't create root dir int r; URL dir(url); if (!with_parents) { dir.ChangePath(url.Path().substr(0, url.Path().rfind("/"))); if (dir.Path().empty() || dir == url.Path()) return DataStatus::Success; logger.msg(VERBOSE, "Creating directory %s", dir.plainstr()); CertEnvLocker env(usercfg); r = XrdPosixXrootd::Mkdir(dir.plainstr().c_str(), 0775); if (r == 0 || errno == EEXIST) return DataStatus::Success; logger.msg(VERBOSE, "Error creating required dirs: %s", StrError(errno)); return DataStatus(DataStatus::CreateDirectoryError, errno, StrError(errno)); } while (slashpos != std::string::npos) { dir.ChangePath(url.Path().substr(0, slashpos)); // stat dir to see if it exists struct stat st; CertEnvLocker env(usercfg); r = XrdPosixXrootd::Stat(dir.plainstr().c_str(), &st); if (r == 0) { slashpos = url.Path().find("/", slashpos + 1); continue; } logger.msg(VERBOSE, "Creating directory %s", dir.plainstr()); r = XrdPosixXrootd::Mkdir(dir.plainstr().c_str(), 0775); if (r != 0) { if (errno != EEXIST) { logger.msg(VERBOSE, "Error creating required dirs: %s", StrError(errno)); return DataStatus(DataStatus::CreateDirectoryError, errno, StrError(errno)); } } slashpos = url.Path().find("/", slashpos + 1); } return DataStatus::Success; } DataStatus DataPointXrootd::Rename(const URL& newurl) { logger.msg(VERBOSE, "Renaming %s to %s", url.plainstr(), newurl.plainstr()); URL tmpurl(newurl); // xrootd requires 2 slashes at the start of the URL path if (tmpurl.Path().find("//") != 0) { tmpurl.ChangePath(std::string("/"+tmpurl.Path())); } if (XrdPosixXrootd::Rename(url.plainstr().c_str(), tmpurl.plainstr().c_str()) != 0) { logger.msg(VERBOSE, "Can't rename file %s: %s", url.plainstr(), StrError(errno)); return DataStatus(DataStatus::RenameError, errno, "Failed to rename file "+url.plainstr()); } return DataStatus::Success; } bool DataPointXrootd::RequiresCredentialsInFile() const { return true; } void DataPointXrootd::set_log_level() { // TODO xrootd lib logs to stderr - need to redirect to log file for DTR // Level 1 enables some messages which go to stdout - which messes up // communication in DTR so better to use no debugging if (logger.getThreshold() == DEBUG) XrdPosixXrootd::setDebug(1); else XrdPosixXrootd::setDebug(0); } } // namespace ArcDMCXrootd extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "root", "HED:DMC", "XRootd", 0, &ArcDMCXrootd::DataPointXrootd::Instance }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/dmc/xrootd/PaxHeaders.7502/README0000644000000000000000000000012311556316311021467 xustar000000000000000027 mtime=1304009929.383826 26 atime=1513200575.21471 30 ctime=1513200660.958759212 nordugrid-arc-5.4.2/src/hed/dmc/xrootd/README0000644000175000002070000000011011556316311021525 0ustar00mockbuildmock00000000000000DMC which handles root:// protocol. See http://xrootd.slac.stanford.edu/nordugrid-arc-5.4.2/src/hed/dmc/PaxHeaders.7502/README0000644000000000000000000000012411001640373020142 xustar000000000000000027 mtime=1208434939.937032 27 atime=1513200575.279711 30 ctime=1513200660.729756411 nordugrid-arc-5.4.2/src/hed/dmc/README0000644000175000002070000000004011001640373020201 0ustar00mockbuildmock00000000000000dmc = Data Management Component nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/shc0000644000000000000000000000013213214316025017220 xustar000000000000000030 mtime=1513200661.031760104 30 atime=1513200668.722854169 30 ctime=1513200661.031760104 nordugrid-arc-5.4.2/src/hed/shc/0000755000175000002070000000000013214316025017343 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/allowpdp0000644000000000000000000000013213214316025021042 xustar000000000000000030 mtime=1513200661.048760312 30 atime=1513200668.722854169 30 ctime=1513200661.048760312 nordugrid-arc-5.4.2/src/hed/shc/allowpdp/0000755000175000002070000000000013214316025021165 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/allowpdp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023163 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200599.829011571 30 ctime=1513200661.046760288 nordugrid-arc-5.4.2/src/hed/shc/allowpdp/Makefile.am0000644000175000002070000000050512052416515023225 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = liballowpdp.la liballowpdp_la_SOURCES = AllowPDP.cpp AllowPDP.h liballowpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) liballowpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/shc/allowpdp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013013214315727023172 xustar000000000000000030 mtime=1513200599.872012097 30 atime=1513200649.358617338 28 ctime=1513200661.0477603 nordugrid-arc-5.4.2/src/hed/shc/allowpdp/Makefile.in0000644000175000002070000005456013214315727023254 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/allowpdp DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) liballowpdp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_liballowpdp_la_OBJECTS = liballowpdp_la-AllowPDP.lo liballowpdp_la_OBJECTS = $(am_liballowpdp_la_OBJECTS) liballowpdp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(liballowpdp_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(liballowpdp_la_SOURCES) DIST_SOURCES = $(liballowpdp_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = liballowpdp.la liballowpdp_la_SOURCES = AllowPDP.cpp AllowPDP.h liballowpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) liballowpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/allowpdp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/allowpdp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done liballowpdp.la: $(liballowpdp_la_OBJECTS) $(liballowpdp_la_DEPENDENCIES) $(liballowpdp_la_LINK) $(liballowpdp_la_OBJECTS) $(liballowpdp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liballowpdp_la-AllowPDP.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< liballowpdp_la-AllowPDP.lo: AllowPDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liballowpdp_la_CXXFLAGS) $(CXXFLAGS) -MT liballowpdp_la-AllowPDP.lo -MD -MP -MF $(DEPDIR)/liballowpdp_la-AllowPDP.Tpo -c -o liballowpdp_la-AllowPDP.lo `test -f 'AllowPDP.cpp' || echo '$(srcdir)/'`AllowPDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/liballowpdp_la-AllowPDP.Tpo $(DEPDIR)/liballowpdp_la-AllowPDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='AllowPDP.cpp' object='liballowpdp_la-AllowPDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liballowpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o liballowpdp_la-AllowPDP.lo `test -f 'AllowPDP.cpp' || echo '$(srcdir)/'`AllowPDP.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/allowpdp/PaxHeaders.7502/AllowPDP.cpp0000644000000000000000000000012412110410653023242 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.017708 30 ctime=1513200661.048760312 nordugrid-arc-5.4.2/src/hed/shc/allowpdp/AllowPDP.cpp0000644000175000002070000000104712110410653023311 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "AllowPDP.h" using namespace Arc; namespace ArcSec { Plugin* AllowPDP::get_allow_pdp(PluginArgument *arg) { PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new AllowPDP((Config*)(*pdparg),pdparg); } AllowPDP::AllowPDP(Config* cfg, PluginArgument* parg):PDP(cfg,parg){ } PDPStatus AllowPDP::isPermitted(Message*) const { return true; } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/allowpdp/PaxHeaders.7502/AllowPDP.h0000644000000000000000000000012412110410653022707 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.017708 30 ctime=1513200661.048760312 nordugrid-arc-5.4.2/src/hed/shc/allowpdp/AllowPDP.h0000644000175000002070000000077412110410653022764 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ALLOWPDP_H__ #define __ARC_SEC_ALLOWPDP_H__ #include #include #include namespace ArcSec { /// This PDP always return true (allow) class AllowPDP : public PDP { public: static Arc::Plugin* get_allow_pdp(Arc::PluginArgument *arg); AllowPDP(Arc::Config* cfg, Arc::PluginArgument* parg); virtual ~AllowPDP() {}; virtual PDPStatus isPermitted(Arc::Message *msg) const; }; } // namespace ArcSec #endif /* __ARC_SEC_ALLOWPDP_H__ */ nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515021341 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200599.719010226 30 ctime=1513200661.018759946 nordugrid-arc-5.4.2/src/hed/shc/Makefile.am0000644000175000002070000000656512052416515021417 0ustar00mockbuildmock00000000000000if XMLSEC_ENABLED LIBS_WITH_XMLSEC = \ x509tokensh/libx509tokensh.la \ samltokensh/libsamltokensh.la \ saml2sso_assertionconsumersh/libsaml2ssoassertionconsumersh.la SUBDIRS_XMLSEC = x509tokensh samltokensh saml2sso_assertionconsumersh else LIBS_WITH_XMLSEC = SUBDIRS_XMLSEC = endif SUBDIRS = allowpdp denypdp simplelistpdp arcpdp xacmlpdp \ pdpserviceinvoker arcauthzsh delegationpdp usernametokensh gaclpdp \ $(SUBDIRS_XMLSEC) delegationsh legacy DIST_SUBDIRS = allowpdp denypdp simplelistpdp arcpdp xacmlpdp \ pdpserviceinvoker arcauthzsh delegationpdp usernametokensh gaclpdp \ x509tokensh samltokensh saml2sso_assertionconsumersh delegationsh legacy noinst_PROGRAMS = test testinterface_arc testinterface_xacml pkglib_LTLIBRARIES = libarcshc.la libarcshc_la_SOURCES = SecHandlerPlugin.cpp libarcshc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcshc_la_LIBADD = \ allowpdp/liballowpdp.la denypdp/libdenypdp.la \ simplelistpdp/libsimplelistpdp.la arcpdp/libarcpdp.la \ xacmlpdp/libxacmlpdp.la gaclpdp/libgaclpdp.la \ pdpserviceinvoker/libarcpdpserviceinvoker.la \ delegationpdp/libdelegationpdp.la \ arcauthzsh/libarcauthzsh.la usernametokensh/libusernametokensh.la \ $(LIBS_WITH_XMLSEC) \ delegationsh/libdelegationsh.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libarcshc_la_LDFLAGS = -no-undefined -avoid-version -module test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) test_LDADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) testinterface_arc_SOURCES = testinterface_arc.cpp testinterface_arc_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) testinterface_arc_LDADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) testinterface_xacml_SOURCES = testinterface_xacml.cpp testinterface_xacml_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) testinterface_xacml_LDADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) #classload_test_SOURCES = classload_test.cpp #classload_test_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #classload_test_LDADD = \ # $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/hed/libs/loader/libarcloader.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(GLIBMM_LIBS) $(LIBXML2_LIBS) nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/arcauthzsh0000644000000000000000000000013213214316025021374 xustar000000000000000030 mtime=1513200661.320763639 30 atime=1513200668.722854169 30 ctime=1513200661.320763639 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/0000755000175000002070000000000013214316025021517 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/PaxHeaders.7502/ArcAuthZ.cpp0000644000000000000000000000012412110401544023632 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.113709 30 ctime=1513200661.317763602 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/ArcAuthZ.cpp0000644000175000002070000000602712110401544023704 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "ArcAuthZ.h" using namespace Arc; namespace ArcSec { Plugin* ArcAuthZ::get_sechandler(PluginArgument* arg) { SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; ArcAuthZ* plugin = new ArcAuthZ((Config*)(*shcarg),(Arc::ChainContext*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; plugin = NULL; }; return plugin; } ArcAuthZ::PDPDesc::PDPDesc(const std::string& action_,const std::string& id_,PDP* pdp_):pdp(pdp_),action(breakOnDeny),id(id_) { if(strcasecmp("breakOnAllow",action_.c_str()) == 0) { action=breakOnAllow; } else if(strcasecmp("breakOnDeny",action_.c_str()) == 0) { action=breakOnDeny; } else if(strcasecmp("breakAlways",action_.c_str()) == 0) { action=breakAlways; } else if(strcasecmp("breakNever",action_.c_str()) == 0) { action=breakNever; }; } ArcAuthZ::ArcAuthZ(Config *cfg,ChainContext* ctx,Arc::PluginArgument* parg):SecHandler(cfg,parg),valid_(false) { pdp_factory = (PluginsFactory*)(*ctx); if(pdp_factory) { for(int n = 0;;++n) { XMLNode p = (*cfg)["Plugins"][n]; if(!p) break; std::string name = (*cfg)["Plugins"][n]["Name"]; if(name.empty()) continue; // Nameless plugin? pdp_factory->load(name,PDPPluginKind); }; }; if(!MakePDPs(*cfg)) { for(pdp_container_t::iterator p = pdps_.begin();p!=pdps_.end();) { if(p->pdp) delete p->pdp; p = pdps_.erase(p); }; logger.msg(ERROR, "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional"); }; valid_ = true; } ArcAuthZ::~ArcAuthZ() { for(pdp_container_t::iterator p = pdps_.begin();p!=pdps_.end();) { if(p->pdp) delete p->pdp; p = pdps_.erase(p); }; } /**Producing PDPs */ bool ArcAuthZ::MakePDPs(XMLNode cfg) { /**Creating the PDP plugins*/ XMLNode cn; cn=cfg["PDP"]; //need some polishing for(;cn;++cn) { if(!cn) break; Arc::Config cfg_(cn); std::string name = cn.Attribute("name"); if(name.empty()) { logger.msg(ERROR, "PDP: missing name attribute"); return false; }; std::string id = cn.Attribute("id"); logger.msg(VERBOSE, "PDP: %s (%s)", name, id); PDP* pdp = NULL; PDPPluginArgument arg(&cfg_); pdp = pdp_factory->GetInstance(PDPPluginKind,name,&arg); if(!pdp) { logger.msg(ERROR, "PDP: %s (%s) can not be loaded", name, id); return false; }; pdps_.push_back(PDPDesc(cn.Attribute("action"),id,pdp)); } return true; } SecHandlerStatus ArcAuthZ::Handle(Arc::Message* msg) const { pdp_container_t::const_iterator it; bool r = false; for(it=pdps_.begin();it!=pdps_.end();it++){ r = it->pdp->isPermitted(msg); if((r == true) && (it->action == PDPDesc::breakOnAllow)) break; if((r == false) && (it->action == PDPDesc::breakOnDeny)) break; if(it->action == PDPDesc::breakAlways) break; } return r; } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023515 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200599.888012293 30 ctime=1513200661.315763578 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/Makefile.am0000644000175000002070000000053712052416515023564 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libarcauthzsh.la libarcauthzsh_la_SOURCES = ArcAuthZ.cpp ArcAuthZ.h libarcauthzsh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcauthzsh_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315727023525 xustar000000000000000030 mtime=1513200599.932012831 30 atime=1513200649.418618071 29 ctime=1513200661.31676359 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/Makefile.in0000644000175000002070000006743113214315727023607 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/arcauthzsh DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libarcauthzsh_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libarcauthzsh_la_OBJECTS = libarcauthzsh_la-ArcAuthZ.lo libarcauthzsh_la_OBJECTS = $(am_libarcauthzsh_la_OBJECTS) libarcauthzsh_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcauthzsh_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcauthzsh_la_SOURCES) DIST_SOURCES = $(libarcauthzsh_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libarcauthzsh.la libarcauthzsh_la_SOURCES = ArcAuthZ.cpp ArcAuthZ.h libarcauthzsh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcauthzsh_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/arcauthzsh/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/arcauthzsh/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcauthzsh.la: $(libarcauthzsh_la_OBJECTS) $(libarcauthzsh_la_DEPENDENCIES) $(libarcauthzsh_la_LINK) $(libarcauthzsh_la_OBJECTS) $(libarcauthzsh_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcauthzsh_la-ArcAuthZ.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcauthzsh_la-ArcAuthZ.lo: ArcAuthZ.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcauthzsh_la_CXXFLAGS) $(CXXFLAGS) -MT libarcauthzsh_la-ArcAuthZ.lo -MD -MP -MF $(DEPDIR)/libarcauthzsh_la-ArcAuthZ.Tpo -c -o libarcauthzsh_la-ArcAuthZ.lo `test -f 'ArcAuthZ.cpp' || echo '$(srcdir)/'`ArcAuthZ.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcauthzsh_la-ArcAuthZ.Tpo $(DEPDIR)/libarcauthzsh_la-ArcAuthZ.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcAuthZ.cpp' object='libarcauthzsh_la-ArcAuthZ.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcauthzsh_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcauthzsh_la-ArcAuthZ.lo `test -f 'ArcAuthZ.cpp' || echo '$(srcdir)/'`ArcAuthZ.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/PaxHeaders.7502/schema0000644000000000000000000000013213214316025022634 xustar000000000000000030 mtime=1513200661.337763847 30 atime=1513200668.722854169 30 ctime=1513200661.337763847 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/schema/0000755000175000002070000000000013214316025022757 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321024750 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200599.947013014 30 ctime=1513200661.335763823 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/schema/Makefile.am0000644000175000002070000000014111255700321025006 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = ArcAuthZ.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727024766 xustar000000000000000030 mtime=1513200599.978013393 30 atime=1513200649.432618242 30 ctime=1513200661.336763835 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/schema/Makefile.in0000644000175000002070000004353213214315727025043 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/arcauthzsh/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = ArcAuthZ.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/arcauthzsh/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/arcauthzsh/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/schema/PaxHeaders.7502/ArcAuthZ.xsd0000644000000000000000000000012411531447156025125 xustar000000000000000027 mtime=1298550382.323251 27 atime=1513200575.113709 30 ctime=1513200661.337763847 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/schema/ArcAuthZ.xsd0000644000175000002070000000601311531447156025172 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/PaxHeaders.7502/ArcAuthZ.h0000644000000000000000000000012412110401544023277 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.111709 30 ctime=1513200661.318763615 nordugrid-arc-5.4.2/src/hed/shc/arcauthzsh/ArcAuthZ.h0000644000175000002070000000323612110401544023350 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCAUTHZ_H__ #define __ARC_SEC_ARCAUTHZ_H__ #include #include #include #include #include #include namespace ArcSec { /// Tests message against list of PDPs /** This class implements SecHandler interface. It's Handle() method runs provided Message instance against all PDPs specified in configuration. If any of PDPs returns positive result Handle() return true, otherwise false. This class is the main entry for configuring authorization, and could include different PDP configured inside. */ class ArcAuthZ : public SecHandler { private: class PDPDesc { public: PDP* pdp; enum { breakOnAllow, breakOnDeny, breakAlways, breakNever } action; std::string id; PDPDesc(const std::string& action,const std::string& id,PDP* pdp); }; typedef std::list pdp_container_t; /** Link to Factory responsible for loading and creation of PDP objects */ Arc::PluginsFactory *pdp_factory; /** One Handler can include few PDP */ pdp_container_t pdps_; bool valid_; protected: /** Create PDP according to conf info */ bool MakePDPs(Arc::XMLNode cfg); public: ArcAuthZ(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~ArcAuthZ(void); static Plugin* get_sechandler(Arc::PluginArgument* arg); /** Get authorization decision*/ virtual SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; }; } // namespace ArcSec #endif /* __ARC_SEC_ARCAUTHZ_H__ */ nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727021352 xustar000000000000000030 mtime=1513200599.813011375 30 atime=1513200649.196615356 30 ctime=1513200661.019759958 nordugrid-arc-5.4.2/src/hed/shc/Makefile.in0000644000175000002070000012517413214315727021432 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test$(EXEEXT) testinterface_arc$(EXEEXT) \ testinterface_xacml$(EXEEXT) subdir = src/hed/shc DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) @XMLSEC_ENABLED_TRUE@am__DEPENDENCIES_1 = \ @XMLSEC_ENABLED_TRUE@ x509tokensh/libx509tokensh.la \ @XMLSEC_ENABLED_TRUE@ samltokensh/libsamltokensh.la \ @XMLSEC_ENABLED_TRUE@ saml2sso_assertionconsumersh/libsaml2ssoassertionconsumersh.la am__DEPENDENCIES_2 = libarcshc_la_DEPENDENCIES = allowpdp/liballowpdp.la \ denypdp/libdenypdp.la simplelistpdp/libsimplelistpdp.la \ arcpdp/libarcpdp.la xacmlpdp/libxacmlpdp.la \ gaclpdp/libgaclpdp.la \ pdpserviceinvoker/libarcpdpserviceinvoker.la \ delegationpdp/libdelegationpdp.la arcauthzsh/libarcauthzsh.la \ usernametokensh/libusernametokensh.la $(am__DEPENDENCIES_1) \ delegationsh/libdelegationsh.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_2) am_libarcshc_la_OBJECTS = libarcshc_la-SecHandlerPlugin.lo libarcshc_la_OBJECTS = $(am_libarcshc_la_OBJECTS) libarcshc_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarcshc_la_CXXFLAGS) \ $(CXXFLAGS) $(libarcshc_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am_test_OBJECTS = test-test.$(OBJEXT) test_OBJECTS = $(am_test_OBJECTS) test_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_2) test_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(test_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_testinterface_arc_OBJECTS = \ testinterface_arc-testinterface_arc.$(OBJEXT) testinterface_arc_OBJECTS = $(am_testinterface_arc_OBJECTS) testinterface_arc_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_2) testinterface_arc_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(testinterface_arc_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_testinterface_xacml_OBJECTS = \ testinterface_xacml-testinterface_xacml.$(OBJEXT) testinterface_xacml_OBJECTS = $(am_testinterface_xacml_OBJECTS) testinterface_xacml_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_2) testinterface_xacml_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(testinterface_xacml_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcshc_la_SOURCES) $(test_SOURCES) \ $(testinterface_arc_SOURCES) $(testinterface_xacml_SOURCES) DIST_SOURCES = $(libarcshc_la_SOURCES) $(test_SOURCES) \ $(testinterface_arc_SOURCES) $(testinterface_xacml_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @XMLSEC_ENABLED_FALSE@LIBS_WITH_XMLSEC = @XMLSEC_ENABLED_TRUE@LIBS_WITH_XMLSEC = \ @XMLSEC_ENABLED_TRUE@ x509tokensh/libx509tokensh.la \ @XMLSEC_ENABLED_TRUE@ samltokensh/libsamltokensh.la \ @XMLSEC_ENABLED_TRUE@ saml2sso_assertionconsumersh/libsaml2ssoassertionconsumersh.la @XMLSEC_ENABLED_FALSE@SUBDIRS_XMLSEC = @XMLSEC_ENABLED_TRUE@SUBDIRS_XMLSEC = x509tokensh samltokensh saml2sso_assertionconsumersh SUBDIRS = allowpdp denypdp simplelistpdp arcpdp xacmlpdp \ pdpserviceinvoker arcauthzsh delegationpdp usernametokensh gaclpdp \ $(SUBDIRS_XMLSEC) delegationsh legacy DIST_SUBDIRS = allowpdp denypdp simplelistpdp arcpdp xacmlpdp \ pdpserviceinvoker arcauthzsh delegationpdp usernametokensh gaclpdp \ x509tokensh samltokensh saml2sso_assertionconsumersh delegationsh legacy pkglib_LTLIBRARIES = libarcshc.la libarcshc_la_SOURCES = SecHandlerPlugin.cpp libarcshc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcshc_la_LIBADD = \ allowpdp/liballowpdp.la denypdp/libdenypdp.la \ simplelistpdp/libsimplelistpdp.la arcpdp/libarcpdp.la \ xacmlpdp/libxacmlpdp.la gaclpdp/libgaclpdp.la \ pdpserviceinvoker/libarcpdpserviceinvoker.la \ delegationpdp/libdelegationpdp.la \ arcauthzsh/libarcauthzsh.la usernametokensh/libusernametokensh.la \ $(LIBS_WITH_XMLSEC) \ delegationsh/libdelegationsh.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libarcshc_la_LDFLAGS = -no-undefined -avoid-version -module test_SOURCES = test.cpp test_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) test_LDADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) testinterface_arc_SOURCES = testinterface_arc.cpp testinterface_arc_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) testinterface_arc_LDADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) testinterface_xacml_SOURCES = testinterface_xacml.cpp testinterface_xacml_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) testinterface_xacml_LDADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcshc.la: $(libarcshc_la_OBJECTS) $(libarcshc_la_DEPENDENCIES) $(libarcshc_la_LINK) -rpath $(pkglibdir) $(libarcshc_la_OBJECTS) $(libarcshc_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test$(EXEEXT): $(test_OBJECTS) $(test_DEPENDENCIES) @rm -f test$(EXEEXT) $(test_LINK) $(test_OBJECTS) $(test_LDADD) $(LIBS) testinterface_arc$(EXEEXT): $(testinterface_arc_OBJECTS) $(testinterface_arc_DEPENDENCIES) @rm -f testinterface_arc$(EXEEXT) $(testinterface_arc_LINK) $(testinterface_arc_OBJECTS) $(testinterface_arc_LDADD) $(LIBS) testinterface_xacml$(EXEEXT): $(testinterface_xacml_OBJECTS) $(testinterface_xacml_DEPENDENCIES) @rm -f testinterface_xacml$(EXEEXT) $(testinterface_xacml_LINK) $(testinterface_xacml_OBJECTS) $(testinterface_xacml_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshc_la-SecHandlerPlugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/testinterface_arc-testinterface_arc.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/testinterface_xacml-testinterface_xacml.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcshc_la-SecHandlerPlugin.lo: SecHandlerPlugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshc_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshc_la-SecHandlerPlugin.lo -MD -MP -MF $(DEPDIR)/libarcshc_la-SecHandlerPlugin.Tpo -c -o libarcshc_la-SecHandlerPlugin.lo `test -f 'SecHandlerPlugin.cpp' || echo '$(srcdir)/'`SecHandlerPlugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshc_la-SecHandlerPlugin.Tpo $(DEPDIR)/libarcshc_la-SecHandlerPlugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SecHandlerPlugin.cpp' object='libarcshc_la-SecHandlerPlugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshc_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshc_la-SecHandlerPlugin.lo `test -f 'SecHandlerPlugin.cpp' || echo '$(srcdir)/'`SecHandlerPlugin.cpp test-test.o: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.o -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.o `test -f 'test.cpp' || echo '$(srcdir)/'`test.cpp test-test.obj: test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -MT test-test.obj -MD -MP -MF $(DEPDIR)/test-test.Tpo -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/test-test.Tpo $(DEPDIR)/test-test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='test.cpp' object='test-test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_CXXFLAGS) $(CXXFLAGS) -c -o test-test.obj `if test -f 'test.cpp'; then $(CYGPATH_W) 'test.cpp'; else $(CYGPATH_W) '$(srcdir)/test.cpp'; fi` testinterface_arc-testinterface_arc.o: testinterface_arc.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testinterface_arc_CXXFLAGS) $(CXXFLAGS) -MT testinterface_arc-testinterface_arc.o -MD -MP -MF $(DEPDIR)/testinterface_arc-testinterface_arc.Tpo -c -o testinterface_arc-testinterface_arc.o `test -f 'testinterface_arc.cpp' || echo '$(srcdir)/'`testinterface_arc.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/testinterface_arc-testinterface_arc.Tpo $(DEPDIR)/testinterface_arc-testinterface_arc.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='testinterface_arc.cpp' object='testinterface_arc-testinterface_arc.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testinterface_arc_CXXFLAGS) $(CXXFLAGS) -c -o testinterface_arc-testinterface_arc.o `test -f 'testinterface_arc.cpp' || echo '$(srcdir)/'`testinterface_arc.cpp testinterface_arc-testinterface_arc.obj: testinterface_arc.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testinterface_arc_CXXFLAGS) $(CXXFLAGS) -MT testinterface_arc-testinterface_arc.obj -MD -MP -MF $(DEPDIR)/testinterface_arc-testinterface_arc.Tpo -c -o testinterface_arc-testinterface_arc.obj `if test -f 'testinterface_arc.cpp'; then $(CYGPATH_W) 'testinterface_arc.cpp'; else $(CYGPATH_W) '$(srcdir)/testinterface_arc.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/testinterface_arc-testinterface_arc.Tpo $(DEPDIR)/testinterface_arc-testinterface_arc.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='testinterface_arc.cpp' object='testinterface_arc-testinterface_arc.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testinterface_arc_CXXFLAGS) $(CXXFLAGS) -c -o testinterface_arc-testinterface_arc.obj `if test -f 'testinterface_arc.cpp'; then $(CYGPATH_W) 'testinterface_arc.cpp'; else $(CYGPATH_W) '$(srcdir)/testinterface_arc.cpp'; fi` testinterface_xacml-testinterface_xacml.o: testinterface_xacml.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testinterface_xacml_CXXFLAGS) $(CXXFLAGS) -MT testinterface_xacml-testinterface_xacml.o -MD -MP -MF $(DEPDIR)/testinterface_xacml-testinterface_xacml.Tpo -c -o testinterface_xacml-testinterface_xacml.o `test -f 'testinterface_xacml.cpp' || echo '$(srcdir)/'`testinterface_xacml.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/testinterface_xacml-testinterface_xacml.Tpo $(DEPDIR)/testinterface_xacml-testinterface_xacml.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='testinterface_xacml.cpp' object='testinterface_xacml-testinterface_xacml.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testinterface_xacml_CXXFLAGS) $(CXXFLAGS) -c -o testinterface_xacml-testinterface_xacml.o `test -f 'testinterface_xacml.cpp' || echo '$(srcdir)/'`testinterface_xacml.cpp testinterface_xacml-testinterface_xacml.obj: testinterface_xacml.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testinterface_xacml_CXXFLAGS) $(CXXFLAGS) -MT testinterface_xacml-testinterface_xacml.obj -MD -MP -MF $(DEPDIR)/testinterface_xacml-testinterface_xacml.Tpo -c -o testinterface_xacml-testinterface_xacml.obj `if test -f 'testinterface_xacml.cpp'; then $(CYGPATH_W) 'testinterface_xacml.cpp'; else $(CYGPATH_W) '$(srcdir)/testinterface_xacml.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/testinterface_xacml-testinterface_xacml.Tpo $(DEPDIR)/testinterface_xacml-testinterface_xacml.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='testinterface_xacml.cpp' object='testinterface_xacml-testinterface_xacml.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(testinterface_xacml_CXXFLAGS) $(CXXFLAGS) -c -o testinterface_xacml-testinterface_xacml.obj `if test -f 'testinterface_xacml.cpp'; then $(CYGPATH_W) 'testinterface_xacml.cpp'; else $(CYGPATH_W) '$(srcdir)/testinterface_xacml.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ clean-pkglibLTLIBRARIES mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstPROGRAMS clean-pkglibLTLIBRARIES ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-pkglibLTLIBRARIES install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-pkglibLTLIBRARIES #classload_test_SOURCES = classload_test.cpp #classload_test_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) #classload_test_LDADD = \ # $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/hed/libs/loader/libarcloader.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(GLIBMM_LIBS) $(LIBXML2_LIBS) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/xacmlpdp0000644000000000000000000000013213214316025021030 xustar000000000000000030 mtime=1513200661.215762355 30 atime=1513200668.722854169 30 ctime=1513200661.215762355 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/0000755000175000002070000000000013214316025021153 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023151 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200601.204028388 30 ctime=1513200661.181761939 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/Makefile.am0000644000175000002070000000163312052416515023216 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libxacmlpdp.la libxacmlpdp_la_SOURCES = XACMLEvaluationCtx.cpp XACMLEvaluationCtx.h \ XACMLEvaluator.cpp XACMLEvaluator.h XACMLRequest.cpp XACMLRequest.h \ XACMLPolicy.cpp XACMLPolicy.h XACMLRule.cpp XACMLRule.h \ XACMLTarget.cpp XACMLTarget.h AttributeDesignator.cpp \ AttributeDesignator.h AttributeSelector.cpp AttributeSelector.h \ XACMLApply.cpp XACMLApply.h XACMLCondition.cpp XACMLCondition.h \ XACMLAttributeProxy.h XACMLAttributeFactory.cpp \ XACMLAttributeFactory.h XACMLFnFactory.cpp XACMLFnFactory.h \ XACMLAlgFactory.cpp XACMLAlgFactory.h XACMLPDP.cpp XACMLPDP.h libxacmlpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libxacmlpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLEvaluator.cpp0000644000000000000000000000012411730411253024341 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.072708 30 ctime=1513200661.185761988 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLEvaluator.cpp0000644000175000002070000002214011730411253024405 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "XACMLEvaluator.h" #include "XACMLEvaluationCtx.h" Arc::Plugin* ArcSec::XACMLEvaluator::get_evaluator(Arc::PluginArgument* arg) { Arc::ClassLoaderPluginArgument* clarg = arg?dynamic_cast(arg):NULL; if(!clarg) return NULL; return new ArcSec::XACMLEvaluator((Arc::XMLNode*)(*clarg),arg); } using namespace Arc; using namespace ArcSec; Arc::Logger ArcSec::XACMLEvaluator::logger(Arc::Logger::rootLogger, "XACMLEvaluator"); void XACMLEvaluator::parsecfg(Arc::XMLNode& cfg){ std::string policystore, policylocation, functionfactory, attributefactory, combingalgfactory; XMLNode nd; Arc::NS nsList; std::list res; nsList.insert(std::pair("pdp","http://www.nordugrid.org/schemas/pdp/Config")); //Get the name of "PolicyStore" class //res = cfg.XPathLookup("//pdp:PolicyStore", nsList); //presently, there can be only one PolicyStore //if(!(res.empty())){ // nd = *(res.begin()); // policystore = (std::string)(nd.Attribute("name")); // policylocation = (std::string)(nd.Attribute("location")); //} //else if (res.empty()){ // logger.msg(ERROR, "No any policy exists, the policy engine can not be loaded"); // exit(1); //} //Get the name of "FunctionFactory" class res = cfg.XPathLookup("//pdp:FunctionFactory", nsList); if(!(res.empty())){ nd = *(res.begin()); functionfactory = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for FunctionFactory from configuration"); return;} //Get the name of "AttributeFactory" class res = cfg.XPathLookup("//pdp:AttributeFactory", nsList); if(!(res.empty())){ nd = *(res.begin()); attributefactory = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for AttributeFactory from configuration"); return;} //Get the name of "CombiningAlgorithmFactory" class res = cfg.XPathLookup("//pdp:CombingAlgorithmFactory", nsList); if(!(res.empty())){ nd = *(res.begin()); combingalgfactory = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for CombiningAlgorithmFactory from configuration"); return;} //Get the name of the "Request" class res = m_cfg->XPathLookup("//pdp:Request", nsList); if(!(res.empty())){ nd = *(res.begin()); request_classname = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for Request from configuration"); return;} //Get the name of the "Policy" class std::string policy_classname; res = m_cfg->XPathLookup("//pdp:Policy", nsList); if(!(res.empty())){ nd = *(res.begin()); policy_classname = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for Policy from configuration"); return;} //Get the ClassLoader object; The object which loads this XACMLEvaluator should have //constructed ClassLoader by using ClassLoader(cfg), and putting the configuration //information into it; meanwhile ClassLoader is designed as a Singleton, so here //we don't need to intialte ClassLoader by using ClassLoader(cfg); ClassLoader* classloader; classloader=ClassLoader::getClassLoader(); attrfactory=NULL; attrfactory = (AttributeFactory*)(classloader->Instance(attributefactory)); if(attrfactory == NULL) logger.msg(ERROR, "Can not dynamically produce AttributeFactory"); fnfactory=NULL; fnfactory = (FnFactory*)(classloader->Instance(functionfactory)); if(fnfactory == NULL) logger.msg(ERROR, "Can not dynamically produce FnFactory"); algfactory=NULL; algfactory = (AlgFactory*)(classloader->Instance(combingalgfactory)); if(algfactory == NULL) logger.msg(ERROR, "Can not dynamically produce AlgFacroty"); //Create the EvaluatorContext for the usage of creating Policy context = new EvaluatorContext(this); std::string alg("Permit-Overrides"); //std::list filelist; //filelist.push_back(policylocation); //plstore = new PolicyStore(filelist, alg, policy_classname, context); plstore = new PolicyStore(alg, policy_classname, context); if(plstore == NULL) logger.msg(ERROR, "Can not create PolicyStore object"); } XACMLEvaluator::XACMLEvaluator(Arc::XMLNode* cfg, Arc::PluginArgument* parg) : Evaluator(cfg,parg), m_cfg(cfg) { plstore = NULL;; fnfactory = NULL; attrfactory = NULL; algfactory = NULL; combining_alg = EvaluatorFailsOnDeny; combining_alg_ex = NULL; context = NULL; parsecfg(*m_cfg); } XACMLEvaluator::XACMLEvaluator(const char * cfgfile, Arc::PluginArgument* parg) : Evaluator(cfgfile,parg){ combining_alg = EvaluatorFailsOnDeny; combining_alg_ex = NULL; std::string str; std::string xml_str = ""; std::ifstream f(cfgfile); while (f >> str) { xml_str.append(str); xml_str.append(" "); } f.close(); Arc::XMLNode node(xml_str); parsecfg(node); } void XACMLEvaluator::setCombiningAlg(EvaluatorCombiningAlg alg) { combining_alg = alg; } void XACMLEvaluator::setCombiningAlg(CombiningAlg* alg) { combining_alg_ex = alg; } Request* XACMLEvaluator::make_reqobj(XMLNode& reqnode){ Request* request = NULL; std::string requestor; Arc::ClassLoader* classloader = NULL; //Since the configuration information for loader has been got before (when create XACMLEvaluator), //it is not necessary to get once more here classloader = ClassLoader::getClassLoader(); //Load the Request object request = (ArcSec::Request*)(classloader->Instance(request_classname,&reqnode)); if(request == NULL) logger.msg(Arc::ERROR, "Can not dynamically produce Request"); return request; } Response* XACMLEvaluator::evaluate(Request* request){ Request* req = request; req->setAttributeFactory(attrfactory); //req->make_request(); EvaluationCtx * evalctx = NULL; evalctx = new XACMLEvaluationCtx(req); //evaluate the request based on policy if(evalctx) return evaluate(evalctx); return NULL; } Response* XACMLEvaluator::evaluate(const Source& req){ //0.Prepare request for evaluation Arc::XMLNode node = req.Get(); NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; node.Namespaces(ns); //1.Create the request object according to the configuration Request* request = NULL; request = make_reqobj(node); //2.Pre-process the Request object request->setAttributeFactory(attrfactory); //request->make_request(); EvaluationCtx * evalctx = NULL; evalctx = new XACMLEvaluationCtx(request); //3.evaluate the request based on policy Response* resp = NULL; if(evalctx) resp = evaluate(evalctx); if(request) delete request; return resp; } Response* XACMLEvaluator::evaluate(EvaluationCtx* evl_ctx){ //Split request into tuples XACMLEvaluationCtx* ctx = dynamic_cast(evl_ctx); std::list policies; std::list::iterator policyit; Response* resp = new Response(); policies = plstore->findPolicy(ctx); std::list permitset; //Combining algorithm should be present if there are mutiple std::list plist; // Preparing list of policies to evaluate for(policyit = policies.begin(); policyit != policies.end(); policyit++){ plist.push_back((Policy*)(*policyit)); }; Result result; if(plist.size() == 1) result = ((Policy*)(*(policies.begin())))->eval(ctx); else result = combining_alg_ex->combine(ctx,plist); ResponseItem* item = new ResponseItem; item->res = result; resp->addResponseItem(item); if(ctx) delete ctx; return resp; } Response* XACMLEvaluator::evaluate(Request* request, const Source& policy) { plstore->removePolicies(); plstore->addPolicy(policy, context, ""); Response* resp = evaluate(request); plstore->removePolicies(); return resp; } Response* XACMLEvaluator::evaluate(const Source& request, const Source& policy) { plstore->removePolicies(); plstore->addPolicy(policy, context, ""); Response* resp = evaluate(request); plstore->removePolicies(); return resp; } Response* XACMLEvaluator::evaluate(Request* request, Policy* policyobj) { plstore->removePolicies(); plstore->addPolicy(policyobj, context, ""); Response* resp = evaluate(request); plstore->releasePolicies(); return resp; } Response* XACMLEvaluator::evaluate(const Source& request, Policy* policyobj) { plstore->removePolicies(); plstore->addPolicy(policyobj, context, ""); Response* resp = evaluate(request); plstore->releasePolicies(); return resp; } const char* XACMLEvaluator::getName(void) const { return "xacml.evaluator"; } XACMLEvaluator::~XACMLEvaluator(){ //TODO delete all the object if(plstore) delete plstore; if(context) delete context; if(fnfactory) delete fnfactory; if(attrfactory) delete attrfactory; if(algfactory) delete algfactory; } nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731023155 xustar000000000000000030 mtime=1513200601.275029256 30 atime=1513200649.244615943 30 ctime=1513200661.182761951 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/Makefile.in0000644000175000002070000013053513214315731023232 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/xacmlpdp DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libxacmlpdp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libxacmlpdp_la_OBJECTS = libxacmlpdp_la-XACMLEvaluationCtx.lo \ libxacmlpdp_la-XACMLEvaluator.lo \ libxacmlpdp_la-XACMLRequest.lo libxacmlpdp_la-XACMLPolicy.lo \ libxacmlpdp_la-XACMLRule.lo libxacmlpdp_la-XACMLTarget.lo \ libxacmlpdp_la-AttributeDesignator.lo \ libxacmlpdp_la-AttributeSelector.lo \ libxacmlpdp_la-XACMLApply.lo libxacmlpdp_la-XACMLCondition.lo \ libxacmlpdp_la-XACMLAttributeFactory.lo \ libxacmlpdp_la-XACMLFnFactory.lo \ libxacmlpdp_la-XACMLAlgFactory.lo libxacmlpdp_la-XACMLPDP.lo libxacmlpdp_la_OBJECTS = $(am_libxacmlpdp_la_OBJECTS) libxacmlpdp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libxacmlpdp_la_SOURCES) DIST_SOURCES = $(libxacmlpdp_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libxacmlpdp.la libxacmlpdp_la_SOURCES = XACMLEvaluationCtx.cpp XACMLEvaluationCtx.h \ XACMLEvaluator.cpp XACMLEvaluator.h XACMLRequest.cpp XACMLRequest.h \ XACMLPolicy.cpp XACMLPolicy.h XACMLRule.cpp XACMLRule.h \ XACMLTarget.cpp XACMLTarget.h AttributeDesignator.cpp \ AttributeDesignator.h AttributeSelector.cpp AttributeSelector.h \ XACMLApply.cpp XACMLApply.h XACMLCondition.cpp XACMLCondition.h \ XACMLAttributeProxy.h XACMLAttributeFactory.cpp \ XACMLAttributeFactory.h XACMLFnFactory.cpp XACMLFnFactory.h \ XACMLAlgFactory.cpp XACMLAlgFactory.h XACMLPDP.cpp XACMLPDP.h libxacmlpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libxacmlpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/xacmlpdp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/xacmlpdp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libxacmlpdp.la: $(libxacmlpdp_la_OBJECTS) $(libxacmlpdp_la_DEPENDENCIES) $(libxacmlpdp_la_LINK) $(libxacmlpdp_la_OBJECTS) $(libxacmlpdp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-AttributeDesignator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-AttributeSelector.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLAlgFactory.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLApply.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLAttributeFactory.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLCondition.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLEvaluationCtx.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLEvaluator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLFnFactory.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLPDP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLPolicy.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLRequest.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLRule.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libxacmlpdp_la-XACMLTarget.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libxacmlpdp_la-XACMLEvaluationCtx.lo: XACMLEvaluationCtx.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLEvaluationCtx.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLEvaluationCtx.Tpo -c -o libxacmlpdp_la-XACMLEvaluationCtx.lo `test -f 'XACMLEvaluationCtx.cpp' || echo '$(srcdir)/'`XACMLEvaluationCtx.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLEvaluationCtx.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLEvaluationCtx.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLEvaluationCtx.cpp' object='libxacmlpdp_la-XACMLEvaluationCtx.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLEvaluationCtx.lo `test -f 'XACMLEvaluationCtx.cpp' || echo '$(srcdir)/'`XACMLEvaluationCtx.cpp libxacmlpdp_la-XACMLEvaluator.lo: XACMLEvaluator.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLEvaluator.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLEvaluator.Tpo -c -o libxacmlpdp_la-XACMLEvaluator.lo `test -f 'XACMLEvaluator.cpp' || echo '$(srcdir)/'`XACMLEvaluator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLEvaluator.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLEvaluator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLEvaluator.cpp' object='libxacmlpdp_la-XACMLEvaluator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLEvaluator.lo `test -f 'XACMLEvaluator.cpp' || echo '$(srcdir)/'`XACMLEvaluator.cpp libxacmlpdp_la-XACMLRequest.lo: XACMLRequest.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLRequest.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLRequest.Tpo -c -o libxacmlpdp_la-XACMLRequest.lo `test -f 'XACMLRequest.cpp' || echo '$(srcdir)/'`XACMLRequest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLRequest.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLRequest.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLRequest.cpp' object='libxacmlpdp_la-XACMLRequest.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLRequest.lo `test -f 'XACMLRequest.cpp' || echo '$(srcdir)/'`XACMLRequest.cpp libxacmlpdp_la-XACMLPolicy.lo: XACMLPolicy.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLPolicy.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLPolicy.Tpo -c -o libxacmlpdp_la-XACMLPolicy.lo `test -f 'XACMLPolicy.cpp' || echo '$(srcdir)/'`XACMLPolicy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLPolicy.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLPolicy.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLPolicy.cpp' object='libxacmlpdp_la-XACMLPolicy.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLPolicy.lo `test -f 'XACMLPolicy.cpp' || echo '$(srcdir)/'`XACMLPolicy.cpp libxacmlpdp_la-XACMLRule.lo: XACMLRule.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLRule.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLRule.Tpo -c -o libxacmlpdp_la-XACMLRule.lo `test -f 'XACMLRule.cpp' || echo '$(srcdir)/'`XACMLRule.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLRule.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLRule.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLRule.cpp' object='libxacmlpdp_la-XACMLRule.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLRule.lo `test -f 'XACMLRule.cpp' || echo '$(srcdir)/'`XACMLRule.cpp libxacmlpdp_la-XACMLTarget.lo: XACMLTarget.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLTarget.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLTarget.Tpo -c -o libxacmlpdp_la-XACMLTarget.lo `test -f 'XACMLTarget.cpp' || echo '$(srcdir)/'`XACMLTarget.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLTarget.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLTarget.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLTarget.cpp' object='libxacmlpdp_la-XACMLTarget.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLTarget.lo `test -f 'XACMLTarget.cpp' || echo '$(srcdir)/'`XACMLTarget.cpp libxacmlpdp_la-AttributeDesignator.lo: AttributeDesignator.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-AttributeDesignator.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-AttributeDesignator.Tpo -c -o libxacmlpdp_la-AttributeDesignator.lo `test -f 'AttributeDesignator.cpp' || echo '$(srcdir)/'`AttributeDesignator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-AttributeDesignator.Tpo $(DEPDIR)/libxacmlpdp_la-AttributeDesignator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='AttributeDesignator.cpp' object='libxacmlpdp_la-AttributeDesignator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-AttributeDesignator.lo `test -f 'AttributeDesignator.cpp' || echo '$(srcdir)/'`AttributeDesignator.cpp libxacmlpdp_la-AttributeSelector.lo: AttributeSelector.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-AttributeSelector.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-AttributeSelector.Tpo -c -o libxacmlpdp_la-AttributeSelector.lo `test -f 'AttributeSelector.cpp' || echo '$(srcdir)/'`AttributeSelector.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-AttributeSelector.Tpo $(DEPDIR)/libxacmlpdp_la-AttributeSelector.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='AttributeSelector.cpp' object='libxacmlpdp_la-AttributeSelector.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-AttributeSelector.lo `test -f 'AttributeSelector.cpp' || echo '$(srcdir)/'`AttributeSelector.cpp libxacmlpdp_la-XACMLApply.lo: XACMLApply.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLApply.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLApply.Tpo -c -o libxacmlpdp_la-XACMLApply.lo `test -f 'XACMLApply.cpp' || echo '$(srcdir)/'`XACMLApply.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLApply.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLApply.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLApply.cpp' object='libxacmlpdp_la-XACMLApply.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLApply.lo `test -f 'XACMLApply.cpp' || echo '$(srcdir)/'`XACMLApply.cpp libxacmlpdp_la-XACMLCondition.lo: XACMLCondition.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLCondition.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLCondition.Tpo -c -o libxacmlpdp_la-XACMLCondition.lo `test -f 'XACMLCondition.cpp' || echo '$(srcdir)/'`XACMLCondition.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLCondition.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLCondition.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLCondition.cpp' object='libxacmlpdp_la-XACMLCondition.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLCondition.lo `test -f 'XACMLCondition.cpp' || echo '$(srcdir)/'`XACMLCondition.cpp libxacmlpdp_la-XACMLAttributeFactory.lo: XACMLAttributeFactory.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLAttributeFactory.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLAttributeFactory.Tpo -c -o libxacmlpdp_la-XACMLAttributeFactory.lo `test -f 'XACMLAttributeFactory.cpp' || echo '$(srcdir)/'`XACMLAttributeFactory.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLAttributeFactory.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLAttributeFactory.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLAttributeFactory.cpp' object='libxacmlpdp_la-XACMLAttributeFactory.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLAttributeFactory.lo `test -f 'XACMLAttributeFactory.cpp' || echo '$(srcdir)/'`XACMLAttributeFactory.cpp libxacmlpdp_la-XACMLFnFactory.lo: XACMLFnFactory.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLFnFactory.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLFnFactory.Tpo -c -o libxacmlpdp_la-XACMLFnFactory.lo `test -f 'XACMLFnFactory.cpp' || echo '$(srcdir)/'`XACMLFnFactory.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLFnFactory.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLFnFactory.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLFnFactory.cpp' object='libxacmlpdp_la-XACMLFnFactory.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLFnFactory.lo `test -f 'XACMLFnFactory.cpp' || echo '$(srcdir)/'`XACMLFnFactory.cpp libxacmlpdp_la-XACMLAlgFactory.lo: XACMLAlgFactory.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLAlgFactory.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLAlgFactory.Tpo -c -o libxacmlpdp_la-XACMLAlgFactory.lo `test -f 'XACMLAlgFactory.cpp' || echo '$(srcdir)/'`XACMLAlgFactory.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLAlgFactory.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLAlgFactory.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLAlgFactory.cpp' object='libxacmlpdp_la-XACMLAlgFactory.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLAlgFactory.lo `test -f 'XACMLAlgFactory.cpp' || echo '$(srcdir)/'`XACMLAlgFactory.cpp libxacmlpdp_la-XACMLPDP.lo: XACMLPDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libxacmlpdp_la-XACMLPDP.lo -MD -MP -MF $(DEPDIR)/libxacmlpdp_la-XACMLPDP.Tpo -c -o libxacmlpdp_la-XACMLPDP.lo `test -f 'XACMLPDP.cpp' || echo '$(srcdir)/'`XACMLPDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libxacmlpdp_la-XACMLPDP.Tpo $(DEPDIR)/libxacmlpdp_la-XACMLPDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='XACMLPDP.cpp' object='libxacmlpdp_la-XACMLPDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libxacmlpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libxacmlpdp_la-XACMLPDP.lo `test -f 'XACMLPDP.cpp' || echo '$(srcdir)/'`XACMLPDP.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLRequest.cpp0000644000000000000000000000012412044527530024034 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200575.081708 30 ctime=1513200661.187762013 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLRequest.cpp0000644000175000002070000000544412044527530024110 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "XACMLRequest.h" Arc::Logger ArcSec::XACMLRequest::logger(Arc::Logger::rootLogger, "XACMLRequest"); static Arc::NS reqns("request", "urn:oasis:names:tc:xacml:2.0:context:schema:os"); /** get_request (in charge of class-loading of XACMLRequest) can only accept two types of argument: NULL, XMLNode*/ Arc::Plugin* ArcSec::XACMLRequest::get_request(Arc::PluginArgument* arg) { if(arg==NULL) return NULL; else { Arc::ClassLoaderPluginArgument* clarg = arg?dynamic_cast(arg):NULL; if(!clarg) return NULL; Arc::XMLNode* xarg = (Arc::XMLNode*)(*clarg); if(xarg==NULL) { return new ArcSec::XACMLRequest(arg); } // ??? ArcSec::Source source(*xarg); return new ArcSec::XACMLRequest(source,arg); } } using namespace Arc; using namespace ArcSec; void XACMLRequest::make_request(){ //Parse the XMLNode structure, and generate the RequestAttribute object if((!reqnode) || (reqnode.Size() == 0)) { logger.msg(ERROR,"Request is empty"); return; } std::list r = reqnode.XPathLookup("//request:Request", reqns); if(r.empty()) { logger.msg(ERROR,"Can not find element with proper namespace"); return; } XMLNode node = *(r.begin()); XMLNode nd, cnd; //Parse the part //"Catagory" need to be processed std::string type; int i; nd = node["Subject"]; if(!nd) {std::cerr<<"There is no subject element in request"< part nd = node["Resource"]; Resource res; for(i = 0;; i++) { cnd = nd["Attribute"][i]; if(!cnd) break; res.push_back(new RequestAttribute(cnd, attrfactory)); } //Parse the part nd = node["Action"]; Action act; for(i = 0;; i++) { cnd = nd["Attribute"][i]; if(!cnd) break; act.push_back(new RequestAttribute(cnd, attrfactory)); } //Parse the part nd = node["Environment"]; Context env; for(i = 0;; i++) { cnd = nd["Attribute"][i]; if(!cnd) break; env.push_back(new RequestAttribute(cnd, attrfactory)); } } XACMLRequest::XACMLRequest (const Source& req, Arc::PluginArgument* parg) : Request(req,parg), attrfactory(NULL) { req.Get().New(reqnode); NS ns; ns["ra"]="urn:oasis:names:tc:xacml:2.0:context:schema:os"; reqnode.Namespaces(ns); } XACMLRequest::XACMLRequest (Arc::PluginArgument* parg) : Request(parg), attrfactory(NULL) { NS ns; ns["ra"]="urn:oasis:names:tc:xacml:2.0:context:schema:os"; XMLNode request(ns,"ra:Request"); request.New(reqnode); } XACMLRequest::~XACMLRequest(){ } nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLAttributeFactory.cpp0000644000000000000000000000012411730411253025672 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.084708 30 ctime=1513200661.205762233 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLAttributeFactory.cpp0000644000175000002070000000673411730411253025751 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "XACMLAttributeProxy.h" #include "XACMLAttributeFactory.h" using namespace Arc; namespace ArcSec { Arc::Plugin* get_xacmlpdp_attr_factory (Arc::PluginArgument* arg) { return new ArcSec::XACMLAttributeFactory(arg); } void XACMLAttributeFactory::initDatatypes(){ //Some XACML specified attribute types apmap.insert(std::pair(StringAttribute::getIdentifier(), new XACMLAttributeProxy)); apmap.insert(std::pair(DateTimeAttribute::getIdentifier(), new XACMLAttributeProxy)); apmap.insert(std::pair(DateAttribute::getIdentifier(), new XACMLAttributeProxy)); apmap.insert(std::pair(TimeAttribute::getIdentifier(), new XACMLAttributeProxy)); apmap.insert(std::pair(DurationAttribute::getIdentifier(), new XACMLAttributeProxy)); apmap.insert(std::pair(PeriodAttribute::getIdentifier(), new XACMLAttributeProxy)); apmap.insert(std::pair(X500NameAttribute::getIdentifier(), new XACMLAttributeProxy)); apmap.insert(std::pair(AnyURIAttribute::getIdentifier(), new XACMLAttributeProxy)); apmap.insert(std::pair(GenericAttribute::getIdentifier(), new XACMLAttributeProxy)); /** TODO: other datatype............. */ } XACMLAttributeFactory::XACMLAttributeFactory(Arc::PluginArgument* parg) : AttributeFactory(parg) { initDatatypes(); } AttributeValue* XACMLAttributeFactory::createValue(const XMLNode& node, const std::string& type){ AttrProxyMap::iterator it; if((it=apmap.find(type)) != apmap.end()) return ((*it).second)->getAttribute(node); #if 0 // This may look like hack, but generic attribute needs special treatment std::string value; if((bool)(const_cast(node).Child())) { value = (std::string)(const_cast(node).Child()); //abc } else { value = (std::string)node; } //abc std::size_t start; start = value.find_first_not_of(" \n\r\t"); value = value.substr(start); std::size_t end; end = value.find_last_not_of(" \n\r\t"); value = value.substr(0, end+1); GenericAttribute* attr = new GenericAttribute(value, (std::string)(const_cast(node).Attribute("AttributeId"))); attr->setType(type); return attr; #endif //For generic attributes, treat them as string if((it=apmap.find("string")) != apmap.end()) return ((*it).second)->getAttribute(node); return NULL; } XACMLAttributeFactory::~XACMLAttributeFactory(){ AttrProxyMap::iterator it; for(it = apmap.begin(); it != apmap.end(); it = apmap.begin()){ AttributeProxy* attrproxy = (*it).second; apmap.erase(it); if(attrproxy) delete attrproxy; } } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/AttributeSelector.cpp0000644000000000000000000000012411231311237025253 xustar000000000000000027 mtime=1248170655.118622 27 atime=1513200575.076708 30 ctime=1513200661.197762135 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/AttributeSelector.cpp0000644000175000002070000000303711231311237025323 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "XACMLEvaluationCtx.h" #include "AttributeSelector.h" using namespace Arc; using namespace ArcSec; AttributeSelector::AttributeSelector(Arc::XMLNode& node, AttributeFactory* attr_factory) : present(false), attrfactory(attr_factory) { std::string tp = (std::string)(node.Attribute("DataType")); if(tp.empty()) {std::cerr<<"Required DataType does not exist in AttributeSelector"< node //TODO: need to duplicate node maybe policyroot = node.GetRoot(); } AttributeSelector::~AttributeSelector() { } std::list AttributeSelector::evaluate(EvaluationCtx* ctx) { std::list res; res = ctx->getAttributes(reqctxpath, policyroot, type, attrfactory); if(present && (res.size() ==0)) { //std::cerr<<"AttributeSelector requires at least one attributes from request's"< #endif #include #include #include #include #include "XACMLPolicy.h" #include "XACMLRule.h" #include "XACMLTarget.h" static Arc::Logger logger(Arc::Logger::rootLogger, "XACMLTarget"); using namespace Arc; using namespace ArcSec; XACMLTargetMatch::XACMLTargetMatch(XMLNode& node, EvaluatorContext* ctx) : matchnode(node), attrval(NULL), function(NULL), designator(NULL), selector(NULL) { attrfactory = (AttributeFactory*)(*ctx); fnfactory = (FnFactory*)(*ctx); matchId = (std::string)(node.Attribute("MatchId")); //get the suffix of xacml-formated matchId, like //"urn:oasis:names:tc:xacml:1.0:function:string-equal", //and use it as the function name std::size_t found = matchId.find_last_of(":"); std::string funcname = matchId.substr(found+1); //If matchId does not exist, compose the DataType and "equal" function //e.g. if the DataType of inside this is "string", then //suppose the match function is "string-equal" std::string datatype = (std::string)(node["AttributeValue"].Attribute("DataType")); if(funcname.empty()) funcname = EqualFunction::getFunctionName(datatype); //create the Function based on the function name function = fnfactory->createFn(funcname); if(!function) { logger.msg(ERROR, "Can not create function %s", funcname); return; } //create the AttributeValue, AttributeDesignator and AttributeSelector XMLNode cnd; XMLNode attrval_nd; std::string attrval_id; std::string attrval_type; for(int i = 0;;i++ ) { cnd = node.Child(i); if(!cnd) break; std::string name = cnd.Name(); if(name.find("AttributeValue") != std::string::npos) { std::string data_type = cnd.Attribute("DataType"); // // http://www.med.example.com/schemas/record.xsd // attrval_nd = cnd; std::size_t f = data_type.find_last_of("#"); //http://www.w3.org/2001/XMLSchema#string if(f!=std::string::npos) { attrval_type = data_type.substr(f+1); } else { f=data_type.find_last_of(":"); //urn:oasis:names:tc:xacml:1.0:data-type:rfc822Name attrval_type = data_type.substr(f+1); } } else if(name.find("AttributeSelector") != std::string::npos) { selector = new AttributeSelector(cnd, attrfactory); attrval_id = (std::string)(cnd.Attribute("AttributeId")); } else if(name.find("AttributeDesignator") != std::string::npos) { designator = new AttributeDesignator(cnd, attrfactory); attrval_id = (std::string)(cnd.Attribute("AttributeId")); } } //kind of hack here. Because in xacml, (the policy side) //normally xml attribute "AttributeId" is absent, but in our implementation //about comparing two attribute, "AttributeId" is required. attrval_nd.NewAttribute("AttributeId") = attrval_id; attrval = attrfactory->createValue(attrval_nd, attrval_type); } XACMLTargetMatch::~XACMLTargetMatch() { if(attrval != NULL) delete attrval; if(selector != NULL) delete selector; if(designator != NULL) delete designator; } MatchResult XACMLTargetMatch::match(EvaluationCtx* ctx) { std::list attrlist; if(selector != NULL) attrlist = selector->evaluate(ctx); else if(designator != NULL) attrlist = designator->evaluate(ctx); AttributeValue* evalres = NULL; std::list::iterator i; for(i = attrlist.begin(); i != attrlist.end(); i++) { std::cout<<"Request side: "<<(*i)->encode()<<" Policy side: "<encode()<evaluate(attrval, (*i), false); BooleanAttribute bool_attr(true); if((evalres != NULL) && (evalres->equal(&bool_attr))) { std::cout<<"Matched!"<::iterator i; for(i = matches.begin(); i!= matches.end(); i++) { res = (*i)->match(ctx); if(res == MATCH) break; } return res; } XACMLTargetSection::XACMLTargetSection(Arc::XMLNode& node, EvaluatorContext* ctx) : sectionnode(node) { XMLNode cnd; std::string name; for(int i = 0;;i++ ) { cnd = node.Child(i); if(!cnd) break; name = cnd.Name(); if(name == "Subject" || name == "Resource" || name == "Action" || name == "Environment" || name == "AnySubject" || name == "AnyResource" || name == "AnyAction" || name == "AnyEnvironment") { groups.push_back(new XACMLTargetMatchGroup(cnd, ctx)); } if(name == "AnySubject" || name == "AnyResource" || name == "AnyAction" || name == "AnyEnvironment") break; } } XACMLTargetSection::~XACMLTargetSection() { while(!(groups.empty())) { XACMLTargetMatchGroup* grp = groups.back(); groups.pop_back(); delete grp; } } MatchResult XACMLTargetSection::match(EvaluationCtx* ctx) { MatchResult res = NO_MATCH; std::list::iterator i; for(i = groups.begin(); i!= groups.end(); i++) { res = (*i)->match(ctx); if(res == MATCH) break; } return res; } XACMLTarget::XACMLTarget(Arc::XMLNode& node, EvaluatorContext* ctx) : targetnode(node) { XMLNode cnd; std::string name; for(int i = 0;;i++ ) { cnd = node.Child(i); if(!cnd) break; name = cnd.Name(); if(name == "Subjects" || name == "Resources" || name == "Actions" || name == "Environments") { sections.push_back(new XACMLTargetSection(cnd, ctx)); } } } XACMLTarget::~XACMLTarget() { while(!(sections.empty())) { XACMLTargetSection* section = sections.back(); sections.pop_back(); delete section; } } MatchResult XACMLTarget::match(EvaluationCtx* ctx) { MatchResult res = NO_MATCH; std::list::iterator i; for(i = sections.begin(); i!= sections.end(); i++) { res = (*i)->match(ctx); if(res != MATCH) break; } return res; } nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLAlgFactory.cpp0000644000000000000000000000012411730411253024432 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.084708 30 ctime=1513200661.210762294 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLAlgFactory.cpp0000644000175000002070000000606211730411253024503 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "XACMLAlgFactory.h" using namespace Arc; namespace ArcSec { Arc::Plugin* get_xacmlpdp_alg_factory (Arc::PluginArgument* arg) { return new ArcSec::XACMLAlgFactory(arg); } void XACMLAlgFactory::initCombiningAlg(CombiningAlg* alg) { if(alg) algmap[alg->getalgId()]=alg; } void XACMLAlgFactory::initCombiningAlgs(){ //Some XACML specified algorithm types initCombiningAlg(new DenyOverridesCombiningAlg); initCombiningAlg(new PermitOverridesCombiningAlg); initCombiningAlg(new PermitDenyIndeterminateNotApplicableCombiningAlg); initCombiningAlg(new PermitDenyNotApplicableIndeterminateCombiningAlg); initCombiningAlg(new PermitIndeterminateDenyNotApplicableCombiningAlg); initCombiningAlg(new PermitIndeterminateNotApplicableDenyCombiningAlg); initCombiningAlg(new PermitNotApplicableDenyIndeterminateCombiningAlg); initCombiningAlg(new PermitNotApplicableIndeterminateDenyCombiningAlg); initCombiningAlg(new DenyPermitIndeterminateNotApplicableCombiningAlg); initCombiningAlg(new DenyPermitNotApplicableIndeterminateCombiningAlg); initCombiningAlg(new DenyIndeterminatePermitNotApplicableCombiningAlg); initCombiningAlg(new DenyIndeterminateNotApplicablePermitCombiningAlg); initCombiningAlg(new DenyNotApplicablePermitIndeterminateCombiningAlg); initCombiningAlg(new DenyNotApplicableIndeterminatePermitCombiningAlg); initCombiningAlg(new IndeterminatePermitDenyNotApplicableCombiningAlg); initCombiningAlg(new IndeterminatePermitNotApplicableDenyCombiningAlg); initCombiningAlg(new IndeterminateDenyPermitNotApplicableCombiningAlg); initCombiningAlg(new IndeterminateDenyNotApplicablePermitCombiningAlg); initCombiningAlg(new IndeterminateNotApplicablePermitDenyCombiningAlg); initCombiningAlg(new IndeterminateNotApplicableDenyPermitCombiningAlg); initCombiningAlg(new NotApplicablePermitDenyIndeterminateCombiningAlg); initCombiningAlg(new NotApplicablePermitIndeterminateDenyCombiningAlg); initCombiningAlg(new NotApplicableDenyPermitIndeterminateCombiningAlg); initCombiningAlg(new NotApplicableDenyIndeterminatePermitCombiningAlg); initCombiningAlg(new NotApplicableIndeterminatePermitDenyCombiningAlg); initCombiningAlg(new NotApplicableIndeterminateDenyPermitCombiningAlg); /** TODO: other algorithm type............. */ } XACMLAlgFactory::XACMLAlgFactory(Arc::PluginArgument* parg) : AlgFactory(parg) { initCombiningAlgs(); } CombiningAlg* XACMLAlgFactory::createAlg(const std::string& type){ AlgMap::iterator it; if((it=algmap.find(type)) != algmap.end()){ return (*it).second; } else return NULL; } XACMLAlgFactory::~XACMLAlgFactory(){ AlgMap::iterator it; for(it = algmap.begin(); it != algmap.end(); it = algmap.begin()){ CombiningAlg * alg = (*it).second; algmap.erase(it); if(alg) delete alg; } } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLPDP.cpp0000644000000000000000000000012412110410653023016 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.076708 30 ctime=1513200661.213762331 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLPDP.cpp0000644000175000002070000001407612110410653023073 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "XACMLPDP.h" Arc::Logger ArcSec::XACMLPDP::logger(Arc::Logger::getRootLogger(), "ArcSec.XACMLPDP"); using namespace Arc; namespace ArcSec { Plugin* XACMLPDP::get_xacml_pdp(PluginArgument* arg) { ArcSec::PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new XACMLPDP((Arc::Config*)(*pdparg),arg); } // This class is used to store Evaluator per connection class XACMLPDPContext:public Arc::MessageContextElement { friend class XACMLPDP; private: Evaluator* eval; public: XACMLPDPContext(Evaluator* e); XACMLPDPContext(void); virtual ~XACMLPDPContext(void); }; XACMLPDPContext::~XACMLPDPContext(void) { if(eval) delete eval; } XACMLPDPContext::XACMLPDPContext(Evaluator* e):eval(e) { } XACMLPDPContext::XACMLPDPContext(void):eval(NULL) { std::string evaluator = "xacml.evaluator"; EvaluatorLoader eval_loader; eval = eval_loader.getEvaluator(evaluator); } XACMLPDP::XACMLPDP(Config* cfg, Arc::PluginArgument* parg):PDP(cfg,parg) /*, eval(NULL)*/ { XMLNode pdp_node(*cfg); XMLNode filter = (*cfg)["Filter"]; if((bool)filter) { XMLNode select_attr = filter["Select"]; XMLNode reject_attr = filter["Reject"]; for(;(bool)select_attr;++select_attr) select_attrs.push_back((std::string)select_attr); for(;(bool)reject_attr;++reject_attr) reject_attrs.push_back((std::string)reject_attr); }; XMLNode policy_store = (*cfg)["PolicyStore"]; XMLNode policy_location = policy_store["Location"]; for(;(bool)policy_location;++policy_location) policy_locations.push_back((std::string)policy_location); XMLNode policy = (*cfg)["Policy"]; for(;(bool)policy;++policy) policies.AddNew(policy); policy_combining_alg = (std::string)((*cfg)["PolicyCombiningAlg"]); } PDPStatus XACMLPDP::isPermitted(Message *msg) const { //Compose Request based on the information inside message, the Request will be //compatible to xacml request schema Evaluator* eval = NULL; const std::string ctxid = "arcsec.xacmlpdp"; try { Arc::MessageContextElement* mctx = (*(msg->Context()))[ctxid]; if(mctx) { XACMLPDPContext* pdpctx = dynamic_cast(mctx); if(pdpctx) { eval=pdpctx->eval; } else { logger.msg(INFO, "Can not find XACMLPDPContext"); } }; } catch(std::exception& e) { }; if(!eval) { XACMLPDPContext* pdpctx = new XACMLPDPContext(); if(pdpctx) { eval=pdpctx->eval; if(eval) { //for(Arc::AttributeIterator it = (msg->Attributes())->getAll("PDP:POLICYLOCATION"); it.hasMore(); it++) { // eval->addPolicy(SourceFile(*it)); //} for(std::list::const_iterator it = policy_locations.begin(); it!= policy_locations.end(); it++) { eval->addPolicy(SourceFile(*it)); } for(int n = 0;naddPolicy(Source(const_cast(policies)[n])); } if(!policy_combining_alg.empty()) { if(policy_combining_alg == "EvaluatorFailsOnDeny") { eval->setCombiningAlg(EvaluatorFailsOnDeny); } else if(policy_combining_alg == "EvaluatorStopsOnDeny") { eval->setCombiningAlg(EvaluatorStopsOnDeny); } else if(policy_combining_alg == "EvaluatorStopsOnPermit") { eval->setCombiningAlg(EvaluatorStopsOnPermit); } else if(policy_combining_alg == "EvaluatorStopsNever") { eval->setCombiningAlg(EvaluatorStopsNever); } else { AlgFactory* factory = eval->getAlgFactory(); if(!factory) { logger.msg(WARNING, "Evaluator does not support loadable Combining Algorithms"); } else { CombiningAlg* algorithm = factory->createAlg(policy_combining_alg); if(!algorithm) { logger.msg(ERROR, "Evaluator does not support specified Combining Algorithm - %s",policy_combining_alg); } else { eval->setCombiningAlg(algorithm); }; }; }; }; msg->Context()->Add(ctxid, pdpctx); } else { delete pdpctx; } } if(!eval) logger.msg(ERROR, "Can not dynamically produce Evaluator"); } if(!eval) { logger.msg(ERROR,"Evaluator for XACMLPDP was not loaded"); return false; }; MessageAuth* mauth = msg->Auth()->Filter(select_attrs,reject_attrs); MessageAuth* cauth = msg->AuthContext()->Filter(select_attrs,reject_attrs); if((!mauth) && (!cauth)) { logger.msg(ERROR,"Missing security object in message"); return false; }; NS ns; XMLNode requestxml(ns,""); if(mauth) { if(!mauth->Export(SecAttr::XACML,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to XACML request"); return false; }; delete mauth; }; if(cauth) { if(!cauth->Export(SecAttr::XACML,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to XACML request"); return false; }; delete cauth; }; { std::string s; requestxml.GetXML(s); logger.msg(DEBUG,"XACML request: %s",s); }; if(requestxml.Size() <= 0) { logger.msg(ERROR,"No requested security information was collected"); return false; }; //Call the evaluation functionality inside Evaluator Response *resp = eval->evaluate(requestxml); ArcSec::ResponseList rlist = resp->getResponseItems(); std::cout<res<res == DECISION_PERMIT) { logger.msg(INFO, "Authorized from xacml.pdp"); result = true; } else logger.msg(ERROR, "UnAuthorized from xacml.pdp"); if(resp) delete resp; return result; } XACMLPDP::~XACMLPDP(){ } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLPolicy.h0000644000000000000000000000012411730411253023303 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.074708 30 ctime=1513200661.190762049 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLPolicy.h0000644000175000002070000000417011730411253023352 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLPOLICY_H__ #define __ARC_SEC_XACMLPOLICY_H__ #include #include #include #include #include #include #include "XACMLTarget.h" namespace ArcSec { ///XACMLPolicy class to parse and operate XACML specific node class XACMLPolicy : public Policy { public: /**Constructor*/ XACMLPolicy(Arc::PluginArgument* parg); /**Constructor*/ XACMLPolicy(const Arc::XMLNode node, Arc::PluginArgument* parg); /**Constructor - */ XACMLPolicy(const Arc::XMLNode node, EvaluatorContext* ctx, Arc::PluginArgument* parg); virtual ~XACMLPolicy(); virtual operator bool(void) const { return (bool)policynode; }; virtual Result eval(EvaluationCtx* ctx); virtual void setEvaluatorContext(EvaluatorContext* evaluatorcontext) { evaluatorctx = evaluatorcontext; }; /**Parse XMLNode, and construct the low-level Rule object*/ virtual void make_policy(); virtual MatchResult match(EvaluationCtx* ctx); virtual std::string getEffect() const { return "Not_applicable";}; virtual EvalResult& getEvalResult(); virtual void setEvalResult(EvalResult& res) { evalres = res; }; const char* getEvalName() const { return "xacml.evaluator"; }; const char* getName() const { return "xacml.policy"; }; static Arc::Plugin* get_policy(Arc::PluginArgument* arg); private: //std::list rules; std::string id; std::string version; /**The combining algorithm between lower-lever element, */ CombiningAlg *comalg; std::string description; /**Evaluator Context which contains factory object*/ EvaluatorContext* evaluatorctx; /**Algorithm factory*/ AlgFactory *algfactory; EvalResult evalres; /**Corresponding node*/ Arc::XMLNode policynode; /**Top element of policy tree*/ Arc::XMLNode policytop; /**The object for containing information*/ XACMLTarget* target; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_XACMLPOLICY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLTarget.h0000644000000000000000000000012411213674533023302 xustar000000000000000027 mtime=1244625243.682748 27 atime=1513200575.083708 30 ctime=1513200661.194762098 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLTarget.h0000644000175000002070000000415311213674533023352 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLTARGET_H__ #define __ARC_SEC_XACMLTARGET_H__ #include #include #include #include #include #include #include #include "./AttributeSelector.h" #include "./AttributeDesignator.h" namespace ArcSec { // , or class XACMLTargetMatch { public: XACMLTargetMatch(Arc::XMLNode& node, EvaluatorContext* ctx); virtual ~XACMLTargetMatch(); virtual MatchResult match(EvaluationCtx* ctx); private: AttributeFactory* attrfactory; FnFactory* fnfactory; Arc::XMLNode matchnode; std::string matchId; AttributeValue* attrval; Function* function; AttributeDesignator* designator; AttributeSelector* selector; }; //node in higher level of above one, , or class XACMLTargetMatchGroup { public: XACMLTargetMatchGroup(Arc::XMLNode& node, EvaluatorContext* ctx); virtual ~XACMLTargetMatchGroup(); virtual MatchResult match(EvaluationCtx* ctx); private: Arc::XMLNode matchgrpnode; std::list matches; }; //node in higher level of above one, , or class XACMLTargetSection { public: XACMLTargetSection(Arc::XMLNode& node, EvaluatorContext* ctx); virtual ~XACMLTargetSection(); virtual MatchResult match(EvaluationCtx* ctx); private: Arc::XMLNode sectionnode; std::list groups; }; ///XACMLTarget class to parse and operate XACML specific node //node in higher level of above one, class XACMLTarget { public: /**Constructor - */ XACMLTarget(Arc::XMLNode& node, EvaluatorContext* ctx); virtual ~XACMLTarget(); virtual MatchResult match(EvaluationCtx* ctx); private: Arc::XMLNode targetnode; //std::list sections; std::list sections; }; } // namespace ArcSec #endif /* __ARC_SEC_XACMLTARGET_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLCondition.cpp0000644000000000000000000000012411231633622024327 xustar000000000000000027 mtime=1248278418.671274 27 atime=1513200575.079708 30 ctime=1513200661.202762196 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLCondition.cpp0000644000175000002070000000234111231633622024374 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "XACMLPolicy.h" #include "XACMLRule.h" #include "XACMLCondition.h" static Arc::Logger logger(Arc::Logger::rootLogger, "XACMLCondition"); using namespace Arc; using namespace ArcSec; XACMLCondition::XACMLCondition(Arc::XMLNode& node, EvaluatorContext* ctx) : condition_node(node) { XMLNode cnd; std::string name; for(int i = 0;;i++ ) { cnd = node.Child(i); if(!cnd) break; name = cnd.Name(); if(name == "Apply") { apply_list.push_back(new XACMLApply(cnd, ctx)); } } } XACMLCondition::~XACMLCondition() { while(!(apply_list.empty())) { XACMLApply* apply = apply_list.back(); apply_list.pop_back(); delete apply; } } std::list XACMLCondition::evaluate(EvaluationCtx* ctx) { std::list res_list; std::list::iterator i; for(i = apply_list.begin(); i!= apply_list.end(); i++) { res_list = (*i)->evaluate(ctx); if(!res_list.empty()) break; //Suppose only one exists under //TODO: process the situation about more than one exist under } return res_list; } nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/AttributeSelector.h0000644000000000000000000000012411213674533024733 xustar000000000000000027 mtime=1244625243.682748 27 atime=1513200575.077708 30 ctime=1513200661.199762159 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/AttributeSelector.h0000644000175000002070000000170311213674533025001 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ATTRIBUTESELECTOR_H__ #define __ARC_SEC_ATTRIBUTESELECTOR_H__ #include #include #include #include #include #include namespace ArcSec { //AttibuteSelector is for the element in xacml policy schema, and //in charge of getting attribute value from the request, by using xpath class AttributeSelector { public: AttributeSelector(Arc::XMLNode& node, AttributeFactory* attr_factory); virtual ~AttributeSelector(); virtual std::list evaluate(EvaluationCtx* ctx); private: std::string type; std::string reqctxpath; //The node from which the xpath searchs Arc::XMLNode policyroot; std::string xpathver; bool present; AttributeFactory* attrfactory; }; } // namespace ArcSec #endif /* __ARC_SEC_ATTRIBUTESELECTOR_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLRule.h0000644000000000000000000000012411231633622022755 xustar000000000000000027 mtime=1248278418.671274 27 atime=1513200575.076708 30 ctime=1513200661.192762074 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLRule.h0000644000175000002070000000272211231633622023025 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCRULE_H__ #define __ARC_SEC_ARCRULE_H__ #include #include #include #include #include #include #include #include #include "./XACMLTarget.h" #include "./XACMLCondition.h" namespace ArcSec { ///XACMLRule class to parse XACML specific node class XACMLRule : public Policy { public: XACMLRule(Arc::XMLNode& node, EvaluatorContext* ctx); virtual std::string getEffect(); virtual Result eval(EvaluationCtx* ctx); virtual MatchResult match(EvaluationCtx* ctx); virtual ~XACMLRule(); virtual operator bool(void) const { return true; }; virtual std::string getEffect() const { return effect; }; virtual EvalResult& getEvalResult(); virtual void setEvalResult(EvalResult& res) { evalres = res; }; const char* getEvalName() const { return "xacml.evaluator"; }; const char* getName() const { return "xacml.rule"; }; private: std::string effect; std::string id; std::string version; std::string description; AttributeFactory* attrfactory; FnFactory* fnfactory; EvalResult evalres; Arc::XMLNode rulenode; XACMLTarget* target; XACMLCondition* condition; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_ARCRULE_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/AttributeDesignator.h0000644000000000000000000000012411231311237025237 xustar000000000000000027 mtime=1248170655.118622 27 atime=1513200575.074708 30 ctime=1513200661.196762123 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/AttributeDesignator.h0000644000175000002070000000154211231311237025306 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ATTRIBUTEDESGINATOR_H__ #define __ARC_SEC_ATTRIBUTEDESGINATOR_H__ #include #include #include #include #include namespace ArcSec { //AttibuteDesignator is for the element in xacml policy schema, and //in charge of getting attribute value from the request class AttributeDesignator { public: AttributeDesignator(Arc::XMLNode& node, AttributeFactory* attr_factory); virtual ~AttributeDesignator(); virtual std::list evaluate(EvaluationCtx* ctx); private: std::string target; std::string id; std::string type; std::string category; std::string issuer; bool present; AttributeFactory* attrfactory; }; } // namespace ArcSec #endif /* __ARC_SEC_ATTRIBUTEDESGINATOR_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLAttributeProxy.h0000644000000000000000000000012211327673573025070 xustar000000000000000026 mtime=1264547707.86997 27 atime=1513200575.076708 29 ctime=1513200661.20476222 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLAttributeProxy.h0000644000175000002070000000247411327673573025146 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLATTRIBUTEPROXY_H__ #define __ARC_SEC_XACMLATTRIBUTEPROXY_H__ #include #include #include #include #include #include namespace ArcSec { ///XACML specific AttributeProxy class template class XACMLAttributeProxy : public AttributeProxy { public: XACMLAttributeProxy(){}; virtual ~XACMLAttributeProxy(){}; public: virtual AttributeValue* getAttribute(const Arc::XMLNode& node); }; ///Implementation of getAttribute method template AttributeValue* XACMLAttributeProxy::getAttribute(const Arc::XMLNode& node){ Arc::XMLNode x; std::string value; if((bool)(const_cast(node).Child())) { x=const_cast(node).Child(0); } else { x=node; } value = (std::string)x; std::string attrid = (std::string)(const_cast(node).Attribute("AttributeId")); std::size_t start; start = value.find_first_not_of(" \n\r\t"); value = value.substr(start); std::size_t end; end = value.find_last_not_of(" \n\r\t"); value = value.substr(0, end+1); return new TheAttribute(value, attrid); } } // namespace ArcSec #endif /* __ARC_SEC_XACMLATTRIBUTEPROXY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLApply.h0000644000000000000000000000012411231633622023133 xustar000000000000000027 mtime=1248278418.671274 27 atime=1513200575.083708 30 ctime=1513200661.201762184 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLApply.h0000644000175000002070000000230311231633622023176 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLAPPLY_H__ #define __ARC_SEC_XACMLAPPLY_H__ #include #include #include #include #include #include #include #include "./AttributeSelector.h" #include "./AttributeDesignator.h" namespace ArcSec { // class XACMLApply { public: XACMLApply(Arc::XMLNode& node, EvaluatorContext* ctx); virtual ~XACMLApply(); virtual std::list evaluate(EvaluationCtx* ctx); private: Arc::XMLNode applynode; std::string functionId; AttributeFactory* attrfactory; FnFactory* fnfactory; Function* function; /**Sub *, the first value of map is the apperance sequence *in this , because sequance should be counted in case of function *such as "less-or-equal" */ std::map attrval_list; std::map sub_apply_list; std::map designator_list; std::map selector_list; }; } // namespace ArcSec #endif /* __ARC_SEC_XACMLAPPLY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLRule.cpp0000644000000000000000000000012411730411253023306 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.074708 30 ctime=1513200661.191762061 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLRule.cpp0000644000175000002070000000520611730411253023356 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "XACMLRule.h" #include #include Arc::Logger ArcSec::XACMLRule::logger(Arc::Logger::rootLogger, "XACMLRule"); using namespace Arc; using namespace ArcSec; XACMLRule::XACMLRule(XMLNode& node, EvaluatorContext* ctx) : Policy(node,NULL), target(NULL), condition(NULL) { rulenode = node; evalres.node = node; evalres.effect = "Not_applicable"; attrfactory = (AttributeFactory*)(*ctx); fnfactory = (FnFactory*)(*ctx); id = (std::string)(node.Attribute("RuleId")); description = (std::string)(node["Description"]); if((std::string)(node.Attribute("Effect"))=="Permit") effect="Permit"; else if((std::string)(node.Attribute("Effect"))=="Deny") effect="Deny"; else logger.msg(Arc::ERROR, "Invalid Effect"); XMLNode targetnode = node["Target"]; if(((bool)targetnode) && ((bool)(targetnode.Child()))) target = new XACMLTarget(targetnode, ctx); XMLNode conditionnode = node["Condition"]; if((bool)conditionnode) condition = new XACMLCondition(conditionnode, ctx); } MatchResult XACMLRule::match(EvaluationCtx* ctx){ MatchResult res; if(target != NULL) res = target->match(ctx); else { logger.msg(Arc::ERROR, "No target available inside the rule"); res = INDETERMINATE; } return res; } Result XACMLRule::eval(EvaluationCtx* ctx){ Result result = DECISION_NOT_APPLICABLE; if(target != NULL) { MatchResult matchres = target->match(ctx); if(matchres == NO_MATCH) return result; else if(matchres == INDETERMINATE) {result = DECISION_INDETERMINATE; return result;} } //evaluate the "Condition" bool cond_res = false; if(condition != NULL) { std::list res_list = condition->evaluate(ctx); AttributeValue* attrval = *(res_list.begin()); //Suppose only one "bool" attribute value in the evaluation result. BooleanAttribute bool_attr(true); if(attrval->equal(&bool_attr)) cond_res = true; if(attrval) delete attrval; if(!cond_res) { result = DECISION_INDETERMINATE; return result; } } if (effect == "Permit") { result = DECISION_PERMIT; evalres.effect = "Permit"; } else if (effect == "Deny") { result = DECISION_DENY; evalres.effect = "Deny"; } return result; } std::string XACMLRule::getEffect(){ return effect; } EvalResult& XACMLRule::getEvalResult(){ return evalres; } XACMLRule::~XACMLRule(){ if(target != NULL) delete target; if(condition != NULL) delete condition; } nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLFnFactory.cpp0000644000000000000000000000012411730411253024272 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.083708 30 ctime=1513200661.208762269 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLFnFactory.cpp0000644000175000002070000001034311730411253024340 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "XACMLFnFactory.h" using namespace Arc; namespace ArcSec { Arc::Plugin* get_xacmlpdp_fn_factory (Arc::PluginArgument* arg) { return new ArcSec::XACMLFnFactory(arg); } void XACMLFnFactory::initFunctions(){ //EqualFunctions std::string fnName = EqualFunction::getFunctionName(StringAttribute::getIdentifier()); std::string argType = StringAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(DateTimeAttribute::getIdentifier()); argType = DateTimeAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(DateAttribute::getIdentifier()); argType = DateAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(TimeAttribute::getIdentifier()); argType = TimeAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(DurationAttribute::getIdentifier()); argType = DurationAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(PeriodAttribute::getIdentifier()); argType = PeriodAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(X500NameAttribute::getIdentifier()); argType = X500NameAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(AnyURIAttribute::getIdentifier()); argType = AnyURIAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); //MatchFunctions fnName = MatchFunction::getFunctionName(StringAttribute::getIdentifier()); argType = StringAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new MatchFunction(fnName, argType))); fnName = MatchFunction::getFunctionName(X500NameAttribute::getIdentifier()); argType = X500NameAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new MatchFunction(fnName, argType))); fnName = MatchFunction::getFunctionName(AnyURIAttribute::getIdentifier()); argType = AnyURIAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new MatchFunction(fnName, argType))); //InRangeFunctions fnName = InRangeFunction::getFunctionName(StringAttribute::getIdentifier()); argType = StringAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new InRangeFunction(fnName, argType))); fnName = InRangeFunction::getFunctionName(PeriodAttribute::getIdentifier()); argType = PeriodAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new InRangeFunction(fnName, argType))); /** TODO: other function type............. */ } XACMLFnFactory::XACMLFnFactory(Arc::PluginArgument* parg) : FnFactory(parg) { initFunctions(); } Function* XACMLFnFactory::createFn(const std::string& type){ FnMap::iterator it; if((it=fnmap.find(type)) != fnmap.end()) return (*it).second; else { //Default: string-equal std::string tp("string-equal"); if((it=fnmap.find(tp)) != fnmap.end()) return (*it).second; } return NULL; } XACMLFnFactory::~XACMLFnFactory(){ FnMap::iterator it; for(it = fnmap.begin(); it != fnmap.end(); it = fnmap.begin()){ Function* fn = (*it).second; fnmap.erase(it); if(fn) delete fn; } } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLApply.cpp0000644000000000000000000000012413213445240023465 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200575.078708 30 ctime=1513200661.200762171 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLApply.cpp0000644000175000002070000001371213213445240023536 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "XACMLPolicy.h" #include "XACMLRule.h" #include "XACMLApply.h" static Arc::Logger logger(Arc::Logger::rootLogger, "XACMLApply"); using namespace Arc; using namespace ArcSec; XACMLApply::XACMLApply(XMLNode& node, EvaluatorContext* ctx) : applynode(node), function(NULL) { attrfactory = (AttributeFactory*)(*ctx); fnfactory = (FnFactory*)(*ctx); functionId = (std::string)(node.Attribute("FunctionId")); //get the suffix of xacml-formated FunctionId, like //"urn:oasis:names:tc:xacml:1.0:function:and", //and use it as the function name std::size_t found = functionId.find_last_of(":"); std::string funcname = functionId.substr(found+1); if(funcname.empty()) { logger.msg(ERROR, "Can not create function: FunctionId does not exist"); return; }; //create the Function based on the function name function = fnfactory->createFn(funcname); if(!function) { logger.msg(ERROR, "Can not create function %s", funcname); return; } //create the AttributeValue, AttributeDesignator and AttributeSelector XMLNode cnd; XMLNode attrval_nd; std::string attrval_id; std::string attrval_type; for(int i = 0;;i++ ) { cnd = node.Child(i); if(!cnd) break; std::string name = cnd.Name(); if(name.find("AttributeValue") != std::string::npos) { std::string data_type = cnd.Attribute("DataType"); // // http://www.med.example.com/schemas/record.xsd // attrval_nd = cnd; std::size_t f = data_type.find_last_of("#"); //http://www.w3.org/2001/XMLSchema#string if(f!=std::string::npos) { attrval_type = data_type.substr(f+1); } else { f=data_type.find_last_of(":"); //urn:oasis:names:tc:xacml:1.0:data-type:rfc822Name attrval_type = data_type.substr(f+1); } AttributeValue* attrval = attrfactory->createValue(attrval_nd, attrval_type); attrval_list[i] = attrval; } else if(name.find("AttributeSelector") != std::string::npos) { AttributeSelector* selector = new AttributeSelector(cnd, attrfactory); selector_list[i] = selector; } else if(name.find("AttributeDesignator") != std::string::npos) { AttributeDesignator* designator = new AttributeDesignator(cnd, attrfactory); designator_list[i] = designator; } else if(name == "Apply") { XACMLApply* apply = new XACMLApply(cnd, ctx); sub_apply_list[i] = apply; } } } XACMLApply::~XACMLApply() { std::map::iterator attrval_it; std::map::iterator selector_it; std::map::iterator designator_it; std::map::iterator apply_it; attrval_it = attrval_list.begin(); selector_it = selector_list.begin(); designator_it = designator_list.begin(); apply_it = sub_apply_list.begin(); for(;attrval_it != attrval_list.end();) { AttributeValue* attrval = (*attrval_it).second; attrval_list.erase(attrval_it); delete attrval; attrval_it = attrval_list.begin(); } for(;selector_it != selector_list.end();) { AttributeSelector* selector = (*selector_it).second; selector_list.erase(selector_it); delete selector; selector_it = selector_list.begin(); } for(;designator_it != designator_list.end();) { AttributeDesignator* designator = (*designator_it).second; designator_list.erase(designator_it); delete designator; designator_it = designator_list.begin(); } for(;apply_it != sub_apply_list.end();) { XACMLApply* apply = (*apply_it).second; sub_apply_list.erase(apply_it); delete apply; apply_it = sub_apply_list.begin(); } } std::list XACMLApply::evaluate(EvaluationCtx* ctx) { std::list list; std::list attrlist; std::list attrlist_to_remove; std::map::iterator attrval_it; std::map::iterator selector_it; std::map::iterator designator_it; std::map::iterator apply_it; for(int i=0;;i++) { attrval_it = attrval_list.find(i); selector_it = selector_list.find(i); designator_it = designator_list.find(i); apply_it = sub_apply_list.find(i); if((attrval_it == attrval_list.end()) && (selector_it == selector_list.end()) && (designator_it == designator_list.end()) && (apply_it == sub_apply_list.end())) break; if(attrval_it != attrval_list.end()) { attrlist.push_back((*attrval_it).second); } if(selector_it != selector_list.end()) { list = (*selector_it).second->evaluate(ctx); attrlist.insert(attrlist.end(), list.begin(), list.end()); attrlist_to_remove.insert(attrlist_to_remove.end(), list.begin(), list.end()); } if(designator_it != designator_list.end()) { list = (*designator_it).second->evaluate(ctx); attrlist.insert(attrlist.end(), list.begin(), list.end()); attrlist_to_remove.insert(attrlist_to_remove.end(), list.begin(), list.end()); } if(apply_it != sub_apply_list.end()) { list = (*apply_it).second->evaluate(ctx); attrlist.insert(attrlist.end(), list.begin(), list.end()); attrlist_to_remove.insert(attrlist_to_remove.end(), list.begin(), list.end()); } } //Evaluate std::list res; try{ std::cout<<"There are "<evaluate(attrlist, false); } catch(std::exception&) { }; while(!(attrlist_to_remove.empty())) { //Note that the attributes which are directly parsed //from policy should not be deleted here. //Instead, they should be deleted by deconstructor of XACMLApply AttributeValue* val = attrlist_to_remove.back(); attrlist_to_remove.pop_back(); delete val; } return res; } nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLAttributeFactory.h0000644000000000000000000000012411730411253025337 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.071708 30 ctime=1513200661.207762257 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLAttributeFactory.h0000644000175000002070000000156111730411253025407 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLATTRIBUTEFACTORY_H__ #define __ARC_SEC_XACMLATTRIBUTEFACTORY_H__ #include #include #include #include #include namespace ArcSec { /// Attribute factory class for XACML specified attributes class XACMLAttributeFactory : public AttributeFactory { public: XACMLAttributeFactory(Arc::PluginArgument* parg); virtual ~XACMLAttributeFactory(); public: /**creat a AttributeValue according to the value in the XML node and the type; It should be the caller to release the AttributeValue Object*/ virtual AttributeValue* createValue(const Arc::XMLNode& node, const std::string& type); private: void initDatatypes(); }; Arc::Plugin* get_xacmlpdp_attr_factory (Arc::PluginArgument*); } // namespace ArcSec #endif /* __ARC_SEC_XACMLATTRIBUTEFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLEvaluationCtx.h0000644000000000000000000000012411231311237024627 xustar000000000000000027 mtime=1248170655.118622 27 atime=1513200575.086708 30 ctime=1513200661.184761976 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLEvaluationCtx.h0000644000175000002070000000434111231311237024676 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLEVALUATIONCTX_H__ #define __ARC_SEC_XACMLEVALUATIONCTX_H__ #include #include #include #include #include #include #include namespace ArcSec { #if 0 ///RequestTuple, container which includes the class ArcRequestTuple : public RequestTuple { public: RequestTuple* duplicate(const RequestTuple*); //virtual Arc::XMLNode& getNode() { return tuple; }; ArcRequestTuple(); virtual ~ArcRequestTuple(); virtual void erase(); }; #endif ///EvaluationCtx, in charge of storing some context information for evaluation, including Request, current time, etc. class XACMLEvaluationCtx : public EvaluationCtx { public: /**Construct a new EvaluationCtx based on the given request */ XACMLEvaluationCtx (Request* request); virtual ~XACMLEvaluationCtx(); virtual Request* getRequest() const; virtual std::list getSubjectAttributes(std::string& id, std::string& type, std::string& issuer, std::string& category, AttributeFactory* attrfactory); virtual std::list getResourceAttributes(std::string& id, std::string& type, std::string& issuer, AttributeFactory* attrfactory); virtual std::list getActionAttributes(std::string& id, std::string& type, std::string& issuer, AttributeFactory* attrfactory); virtual std::list getContextAttributes(std::string& id, std::string& type, std::string& issuer, AttributeFactory* attrfactory); virtual std::list getAttributes(std::string& reqctxpath, Arc::XMLNode& policy, std::string& data_type, AttributeFactory* attrfactory); private: std::list getAttributesHelper(std::string& id, std::string& type, std::string& issuer, AttributeFactory* attrfactory, const std::string& target_class); private: static Arc::Logger logger; Request* req; std::map subjects; std::map resources; std::map actions; std::map enviornments; }; } // namespace ArcSec #endif /* __ARC_SEC_XACMLEVALUATIONCTX_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLFnFactory.h0000644000000000000000000000012411730411253023737 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.071708 30 ctime=1513200661.209762282 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLFnFactory.h0000644000175000002070000000136511730411253024011 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLFUNCTIONFACTORY_H__ #define __ARC_SEC_XACMLFUNCTIONFACTORY_H__ #include #include #include namespace ArcSec { /// Function factory class for XACML specified attributes class XACMLFnFactory : public FnFactory { public: XACMLFnFactory(Arc::PluginArgument* parg); virtual ~XACMLFnFactory(); public: /**return a Function object according to the "Function" attribute in the XML node; The XACMLFnFactory itself will release the Function objects*/ virtual Function* createFn(const std::string& type); private: void initFunctions(); }; Arc::Plugin* get_xacmlpdp_fn_factory (Arc::PluginArgument*); } // namespace ArcSec #endif /* __ARC_SEC_XACMLFUNCTIONFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLCondition.h0000644000000000000000000000012411231633622023774 xustar000000000000000027 mtime=1248278418.671274 27 atime=1513200575.085708 30 ctime=1513200661.203762208 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLCondition.h0000644000175000002070000000161311231633622024042 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLCONDITION_H__ #define __ARC_SEC_XACMLCONDITION_H__ #include #include #include #include #include #include #include #include "./AttributeSelector.h" #include "./AttributeDesignator.h" #include "./XACMLApply.h" namespace ArcSec { ///XACMLCondition class to parse and operate XACML specific node. class XACMLCondition { public: /**Constructor - */ XACMLCondition(Arc::XMLNode& node, EvaluatorContext* ctx); virtual ~XACMLCondition(); std::list evaluate(EvaluationCtx* ctx); private: Arc::XMLNode condition_node; std::list apply_list; }; } // namespace ArcSec #endif /* __ARC_SEC_XACMLCONDITION_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/AttributeDesignator.cpp0000644000000000000000000000012311231311237025571 xustar000000000000000027 mtime=1248170655.118622 27 atime=1513200575.086708 29 ctime=1513200661.19576211 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/AttributeDesignator.cpp0000644000175000002070000000402311231311237025636 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "AttributeDesignator.h" using namespace Arc; using namespace ArcSec; AttributeDesignator::AttributeDesignator(Arc::XMLNode& node, AttributeFactory* attr_factory) : present(false), attrfactory(attr_factory) { std::string name = node.Name(); size_t found = name.find("AttributeDesignator"); target = name.substr(0, found); id = (std::string)(node.Attribute("AttributeId")); if(id.empty()) {std::cerr<<"Required AttributeId does not exist in AttributeDesignator"< AttributeDesignator::evaluate(EvaluationCtx* ctx) { std::list res; if(target == "Subject") { res = ctx->getSubjectAttributes(id, type, issuer, category, attrfactory); } else if(target == "Resource") { res = ctx->getResourceAttributes(id, type, issuer, attrfactory); } else if(target == "Action") { res = ctx->getActionAttributes(id, type, issuer, attrfactory); } else if(target == "Environment") { res = ctx->getContextAttributes(id, type, issuer, attrfactory); } if(present && (res.size() ==0)) { std::cerr<<"AttributeDesignator requires at least one attributes from request's"< #include #include #include #include #include #include #include #include #include #include namespace ArcSec { ///Execute the policy evaluation, based on the request and policy class XACMLEvaluator : public Evaluator { friend class EvaluatorContext; private: static Arc::Logger logger; PolicyStore *plstore; FnFactory* fnfactory; AttributeFactory* attrfactory; AlgFactory* algfactory; EvaluatorContext* context; Arc::XMLNode* m_cfg; std::string request_classname; EvaluatorCombiningAlg combining_alg; CombiningAlg* combining_alg_ex; public: XACMLEvaluator (Arc::XMLNode* cfg, Arc::PluginArgument* parg); XACMLEvaluator (const char * cfgfile, Arc::PluginArgument* parg); virtual ~XACMLEvaluator(); /**Evaluate the request based on the policy information inside PolicyStore*/ virtual Response* evaluate(Request* request); virtual Response* evaluate(const Source& request); virtual Response* evaluate(Request* request, const Source& policy); virtual Response* evaluate(const Source& request, const Source& policy); virtual Response* evaluate(Request* request, Policy* policyobj); virtual Response* evaluate(const Source& request, Policy* policyobj); virtual AttributeFactory* getAttrFactory () { return attrfactory;}; virtual FnFactory* getFnFactory () { return fnfactory; }; virtual AlgFactory* getAlgFactory () { return algfactory; }; virtual void addPolicy(const Source& policy,const std::string& id = "") { plstore->addPolicy(policy, context, id); }; virtual void addPolicy(Policy* policy,const std::string& id = "") { plstore->addPolicy(policy, context, id); }; virtual void removePolicies(void) { plstore->removePolicies(); }; virtual void setCombiningAlg(EvaluatorCombiningAlg alg); virtual void setCombiningAlg(CombiningAlg* alg); virtual const char* getName(void) const; static Arc::Plugin* get_evaluator(Arc::PluginArgument* arg); protected: virtual Response* evaluate(EvaluationCtx* ctx); private: virtual void parsecfg(Arc::XMLNode& cfg); virtual Request* make_reqobj(Arc::XMLNode& reqnode); }; } // namespace ArcSec #endif /* __ARC_SEC_XACMLEVALUATOR_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLRequest.h0000644000000000000000000000012411730411253023474 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.086708 30 ctime=1513200661.188762025 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLRequest.h0000644000175000002070000000314011730411253023537 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLREQUEST_H__ #define __ARC_SEC_XACMLREQUEST_H__ #include #include #include #include #include //#include "ArcEvaluator.h" ///XACMLRequest, Parsing the xacml request format namespace ArcSec { class XACMLRequest : public Request { //friend class ArcEvaluator; public: /**Get the name of corresponding evaulator*/ virtual const char* getEvalName() const { return NULL; }; /**Get the name of this request*/ virtual const char* getName() const { return NULL; }; virtual Arc::XMLNode& getReqNode() { return reqnode; }; static Arc::Plugin* get_request(Arc::PluginArgument* arg); //**Set the attribute factory for the usage of Request*/ virtual void setAttributeFactory(AttributeFactory* attributefactory) { attrfactory = attributefactory; }; //**Default constructor*/ XACMLRequest (Arc::PluginArgument* parg); //**Parse request information from external source*/ XACMLRequest (const Source& source, Arc::PluginArgument* parg); virtual ~XACMLRequest(); //**Create the objects included in Request according to the node attached to the Request object*/ virtual void make_request(); private: //**AttributeFactory which is in charge of producing Attribute*/ AttributeFactory * attrfactory; //**A XMLNode structure which includes the xml structure of a request*/ Arc::XMLNode reqnode; Subject sub; Resource res; Action act; Context env; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_XACMLREQUEST_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLEvaluationCtx.cpp0000644000000000000000000000012411455353323025174 xustar000000000000000027 mtime=1286985427.270721 27 atime=1513200575.081708 30 ctime=1513200661.183761964 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLEvaluationCtx.cpp0000644000175000002070000001436411455353323025251 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "XACMLEvaluationCtx.h" using namespace Arc; using namespace ArcSec; Logger XACMLEvaluationCtx::logger(Arc::Logger::rootLogger, "XACMLEvaluationCtx"); XACMLEvaluationCtx::XACMLEvaluationCtx(Request* request) : EvaluationCtx(request), req(NULL) { req = request; XMLNode req_node = req->getReqNode(); } XACMLEvaluationCtx::~XACMLEvaluationCtx(){ } Request* XACMLEvaluationCtx::getRequest () const{ return req; } std::list XACMLEvaluationCtx::getAttributes(std::string& reqctxpath, Arc::XMLNode& namespaceNode, std::string& data_type, AttributeFactory* attrfactory) { std::list attrlist; XMLNode req_node = req->getReqNode(); NS nsList; nsList = namespaceNode.Namespaces(); std::string path; //If the xPath string is a relative one if(reqctxpath.find_first_of("/") != 0) { std::string name = req_node.Name(); std::string name_space = req_node.Namespace(); if(name_space.empty()) //no namespace in the request path = "//" + name + "/"; else { //namespaces are used in the request. lookup the correct prefix //for using in the search string for (NS::const_iterator ns = nsList.begin(); ns != nsList.end(); ++ns) { std::string ns_val = ns->second; if(ns_val == name_space) { std::string ns_name = ns->first; if(ns_name.empty()) path = "//"; else path = "//" + ns_name; path = path + ":" + name + "/"; break; } } if(path.empty()) std::cout<<"Failed to map a namespace into an XPath expression"< list = req_node.XPathLookup(path, nsList); std::list::iterator it; for (it = list.begin(); it != list.end(); it++) { std::cout << (*it).FullName() << ":" << (std::string)(*it) << std::endl; AttributeValue* attr = NULL; std::string type; std::size_t f = data_type.find_last_of("#"); //http://www.w3.org/2001/XMLSchema#string if(f!=std::string::npos) { type = data_type.substr(f+1); } else { f=data_type.find_last_of(":"); //urn:oasis:names:tc:xacml:1.0:data-type:rfc822Name type = data_type.substr(f+1); } attr = attrfactory->createValue((*it), type); attrlist.push_back(attr); } return attrlist; } std::list XACMLEvaluationCtx::getSubjectAttributes(std::string& id, std::string& type, std::string& issuer, std::string& category, AttributeFactory* attrfactory) { std::list attrlist; XMLNode req_node = req->getReqNode(); for(int i = 0;; i++) { XMLNode attr_nd = req_node["Subject"]["Attribute"][i]; std::string sub_category = req_node["Subject"].Attribute("SubjectCategory"); if(sub_category.empty()) sub_category = "urn:oasis:names:tc:xacml:1.0:subject-category:access-subject"; if(!attr_nd) break; std::string attr_id = attr_nd.Attribute("AttributeId"); std::string attr_type = attr_nd.Attribute("DataType"); std::string attr_issuer = attr_nd.Attribute("Issuer"); std::cout<createValue(attr_nd, tp); attrlist.push_back(attr); } } return attrlist; } std::list XACMLEvaluationCtx::getAttributesHelper(std::string& id, std::string& type, std::string& issuer, AttributeFactory* attrfactory, const std::string& target_class) { std::list attrlist; XMLNode req_node = req->getReqNode(); for(int i = 0;; i++) { XMLNode attr_nd = req_node[target_class]["Attribute"][i]; if(!attr_nd) break; std::string attr_id = attr_nd.Attribute("AttributeId"); std::string attr_type = attr_nd.Attribute("DataType"); std::string attr_issuer = attr_nd.Attribute("Issuer"); std::cout<createValue(attr_nd, tp); attrlist.push_back(attr); } } return attrlist; } std::list XACMLEvaluationCtx::getResourceAttributes(std::string& id, std::string& type, std::string& issuer, AttributeFactory* attrfactory) { return getAttributesHelper(id, type, issuer, attrfactory, std::string("Resource")); } std::list XACMLEvaluationCtx::getActionAttributes(std::string& id, std::string& type, std::string& issuer, AttributeFactory* attrfactory) { return getAttributesHelper(id, type, issuer, attrfactory, std::string("Action")); } std::list XACMLEvaluationCtx::getContextAttributes(std::string& id, std::string& type, std::string& issuer, AttributeFactory* attrfactory) { return getAttributesHelper(id, type, issuer, attrfactory, std::string("Environment")); } nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLAlgFactory.h0000644000000000000000000000012411730411253024077 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.071708 30 ctime=1513200661.211762306 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLAlgFactory.h0000644000175000002070000000144411730411253024147 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLALGFACTORY_H__ #define __ARC_SEC_XACMLALGFACTORY_H__ #include #include #include #include namespace ArcSec { ///Algorithm factory class for XACML class XACMLAlgFactory : public AlgFactory { public: XACMLAlgFactory(Arc::PluginArgument* parg); virtual ~XACMLAlgFactory(); public: /**return a Alg object according to the "CombiningAlg" attribute in the node; The XACMLAlgFactory itself will release the Alg objects*/ virtual CombiningAlg* createAlg(const std::string& type); private: void initCombiningAlg(CombiningAlg* alg); void initCombiningAlgs(); }; Arc::Plugin* get_xacmlpdp_alg_factory (Arc::PluginArgument*); } // namespace ArcSec #endif /* __ARC_SEC_XACMLALGFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLPolicy.cpp0000644000000000000000000000012412046764073023652 xustar000000000000000027 mtime=1352394811.725688 27 atime=1513200575.086708 30 ctime=1513200661.189762037 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLPolicy.cpp0000644000175000002070000001222212046764073023716 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "XACMLPolicy.h" #include "XACMLRule.h" Arc::Logger ArcSec::XACMLPolicy::logger(Arc::Logger::rootLogger, "XACMLPolicy"); static Arc::NS policyns("policy", "urn:oasis:names:tc:xacml:2.0:policy:schema:os"); /** get_policy (in charge of class-loading of XACMLPolicy) can only * accept one type of argument--XMLNode */ Arc::Plugin* ArcSec::XACMLPolicy::get_policy(Arc::PluginArgument* arg) { //std::cout<<"Argument type of XACMLPolicy:"<(arg):NULL; if(!clarg) return NULL; // Check if empty or valid policy is supplied Arc::XMLNode* doc = (Arc::XMLNode*)(*clarg); if(doc==NULL) { std::cerr<<"XACMLPolicy creation requires XMLNode as argument"< res = policynode.XPathLookup("//policy:Policy",policyns); if(res.empty()) { logger.msg(ERROR,"Can not find element with proper namespace"); policynode.Destroy(); return; } policytop = *(res.begin()); } XACMLPolicy::XACMLPolicy(const XMLNode node, EvaluatorContext* ctx, Arc::PluginArgument* parg) : Policy(node,parg), comalg(NULL), target(NULL) { if((!node) || (node.Size() == 0)) { logger.msg(ERROR,"Policy is empty"); return; } node.New(policynode); std::list res = policynode.XPathLookup("//policy:Policy",policyns); if(res.empty()) { policynode.Destroy(); return; } policytop = *(res.begin()); setEvaluatorContext(ctx); make_policy(); } void XACMLPolicy::make_policy() { //EvalResult.node record the policy(in XMLNode) information about evaluation result. //According to the developer's requirement, //EvalResult.node can include rules(in XMLNode) that "Permit" or "Deny" //the request tuple. In the existing code, it include all //the original rules. if(!policynode) return; if(!policytop) return; evalres.node = policynode; evalres.effect = "Not_applicable"; XACMLRule *rule; //Get AlgFactory from EvaluatorContext algfactory = (AlgFactory*)(*evaluatorctx); XMLNode nd = policytop; XMLNode rnd; if((bool)nd){ nd = policytop; id = (std::string)(nd.Attribute("PolicyId")); //Setup the rules' combining algorithm inside one policy, according to the "RuleCombiningAlgId" name if(nd.Attribute("RuleCombiningAlgId")) { std::string tmpstr = (std::string)(nd.Attribute("RuleCombiningAlgId")); size_t found = tmpstr.find_last_of(":"); std::string algstr = tmpstr.substr(found+1); if(algstr == "deny-overrides") algstr = "Deny-Overrides"; else if(algstr == "permit-overrides") algstr = "Permit-Overrides"; comalg = algfactory->createAlg(algstr); } else comalg = algfactory->createAlg("Deny-Overrides"); description = (std::string)(nd["Description"]); } logger.msg(INFO, "PolicyId: %s Alg inside this policy is:-- %s", id, comalg?(comalg->getalgId()):""); XMLNode targetnode = nd["Target"]; if(((bool)targetnode) && ((bool)(targetnode.Child()))) target = new XACMLTarget(targetnode, evaluatorctx); for ( int i=0;; i++ ){ rnd = nd["Rule"][i]; if(!rnd) break; rule = new XACMLRule(rnd, evaluatorctx); subelements.push_back(rule); } } MatchResult XACMLPolicy::match(EvaluationCtx* ctx){ MatchResult res; if(target != NULL) res = target->match(ctx); else { logger.msg(Arc::INFO, "No target available inside the policy"); res = INDETERMINATE; } return res; } Result XACMLPolicy::eval(EvaluationCtx* ctx){ Result result = DECISION_NOT_APPLICABLE; if(target != NULL) { MatchResult matchres = target->match(ctx); if(matchres == NO_MATCH) return result; else if(matchres == INDETERMINATE) {result = DECISION_INDETERMINATE; return result;} } result = comalg?comalg->combine(ctx, subelements):DECISION_INDETERMINATE; if(result == DECISION_PERMIT) evalres.effect = "Permit"; else if(result == DECISION_DENY) evalres.effect = "Deny"; else if(result == DECISION_INDETERMINATE) evalres.effect = "Indeterminate"; return result; } EvalResult& XACMLPolicy::getEvalResult(){ return evalres; } XACMLPolicy::~XACMLPolicy(){ while(!(subelements.empty())){ delete subelements.back(); subelements.pop_back(); } if(target != NULL) delete target; } nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/schema0000644000000000000000000000013213214316025022270 xustar000000000000000030 mtime=1513200661.239762649 30 atime=1513200668.722854169 30 ctime=1513200661.239762649 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/schema/0000755000175000002070000000000013214316025022413 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611255700321024403 xustar000000000000000027 mtime=1253540049.444682 29 atime=1513200601.29002944 30 ctime=1513200661.236762612 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/schema/Makefile.am0000644000175000002070000000014111255700321024442 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = XACMLPDP.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731024415 xustar000000000000000030 mtime=1513200601.322029831 30 atime=1513200649.261616151 30 ctime=1513200661.238762636 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/schema/Makefile.in0000644000175000002070000004352413214315731024473 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/xacmlpdp/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = XACMLPDP.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/xacmlpdp/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/xacmlpdp/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/schema/PaxHeaders.7502/XACMLPDP.xsd0000644000000000000000000000012411260401050024266 xustar000000000000000027 mtime=1254228520.829514 27 atime=1513200575.081708 30 ctime=1513200661.239762649 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/schema/XACMLPDP.xsd0000644000175000002070000000250211260401050024332 0ustar00mockbuildmock00000000000000 This element specifies file containing policy document. There could be multiple sub elements . This attribute is to specify the type of policy soure which will be used when parsing policy. nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/PaxHeaders.7502/XACMLPDP.h0000644000000000000000000000012412110410653022463 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.081708 30 ctime=1513200661.214762343 nordugrid-arc-5.4.2/src/hed/shc/xacmlpdp/XACMLPDP.h0000644000175000002070000000164012110410653022531 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_XACMLPDP_H__ #define __ARC_SEC_XACMLPDP_H__ #include //#include #include #include #include namespace ArcSec { ///XACMLPDP - PDP which can handle the XACML specific request and policy schema class XACMLPDP : public PDP { public: static Arc::Plugin* get_xacml_pdp(Arc::PluginArgument* arg); XACMLPDP(Arc::Config* cfg, Arc::PluginArgument* parg); virtual ~XACMLPDP(); virtual PDPStatus isPermitted(Arc::Message *msg) const; private: // Evaluator *eval; // Arc::ClassLoader* classloader; std::list select_attrs; std::list reject_attrs; std::list policy_locations; Arc::XMLNodeContainer policies; std::string policy_combining_alg; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_XACMLPDP_H__ */ nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/delegationsh0000644000000000000000000000013213214316025021666 xustar000000000000000030 mtime=1513200661.743768813 30 atime=1513200668.722854169 30 ctime=1513200661.743768813 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/0000755000175000002070000000000013214316025022011 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/delegationsh/PaxHeaders.7502/DelegationSH.h0000644000000000000000000000012412110401544024416 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.009708 30 ctime=1513200661.740768776 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/DelegationSH.h0000644000175000002070000000441012110401544024462 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_DELEGATIONSH_H__ #define __ARC_SEC_DELEGATIONSH_H__ #include #include #include #include namespace ArcSec { /// class DelegationContext; class DelegationSH : public SecHandler { private: enum { delegation_delegator, delegation_delegatee } delegation_role_; enum { delegation_x509, delegation_saml } delegation_type_; std::string ds_endpoint_; //endpoint of delegation service, // to which this Sec handler will // create a delegation credential std::string peers_endpoint_; //endpoint of the peer service, to which //the real service invokation will be called. //This variable is only valid for the delegator //role Delegation handler. std::string delegation_id_; //The delegation ID which is used to //be send to the peer service side. The //variable is only valid for the delegator role //Delegation handler. //The delegator role delegation handler is only need //to be set if it is configured in a client. //If the delegator role Delegation handler is configured //in a service, then delegation_id_ delegation_id //does not need to set. std::string delegation_cred_identity_; std::string cert_file_; std::string key_file_; std::string proxy_file_; std::string ca_file_; std::string ca_dir_; Arc::MessageContextElement* mcontext_; bool valid_; protected: static Arc::Logger logger; private: DelegationContext* get_delegcontext(Arc::Message& msg) const; public: DelegationSH(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~DelegationSH(void); static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg); virtual SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; }; } // namespace ArcSec #endif /* __ARC_SEC_DELEGATIONSH_H__ */ nordugrid-arc-5.4.2/src/hed/shc/delegationsh/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024007 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.228016451 30 ctime=1513200661.737768739 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/Makefile.am0000644000175000002070000000073112052416515024052 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libdelegationsh.la libdelegationsh_la_SOURCES = DelegationSH.cpp DelegationSH.h libdelegationsh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libdelegationsh_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(OPENSSL_LIBS) nordugrid-arc-5.4.2/src/hed/shc/delegationsh/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315730024011 xustar000000000000000030 mtime=1513200600.273017001 29 atime=1513200649.55261971 30 ctime=1513200661.738768752 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/Makefile.in0000644000175000002070000007020013214315730024057 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/delegationsh DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libdelegationsh_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(am__DEPENDENCIES_1) am_libdelegationsh_la_OBJECTS = libdelegationsh_la-DelegationSH.lo libdelegationsh_la_OBJECTS = $(am_libdelegationsh_la_OBJECTS) libdelegationsh_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdelegationsh_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdelegationsh_la_SOURCES) DIST_SOURCES = $(libdelegationsh_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libdelegationsh.la libdelegationsh_la_SOURCES = DelegationSH.cpp DelegationSH.h libdelegationsh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libdelegationsh_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(OPENSSL_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/delegationsh/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/delegationsh/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdelegationsh.la: $(libdelegationsh_la_OBJECTS) $(libdelegationsh_la_DEPENDENCIES) $(libdelegationsh_la_LINK) $(libdelegationsh_la_OBJECTS) $(libdelegationsh_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegationsh_la-DelegationSH.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdelegationsh_la-DelegationSH.lo: DelegationSH.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegationsh_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegationsh_la-DelegationSH.lo -MD -MP -MF $(DEPDIR)/libdelegationsh_la-DelegationSH.Tpo -c -o libdelegationsh_la-DelegationSH.lo `test -f 'DelegationSH.cpp' || echo '$(srcdir)/'`DelegationSH.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdelegationsh_la-DelegationSH.Tpo $(DEPDIR)/libdelegationsh_la-DelegationSH.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationSH.cpp' object='libdelegationsh_la-DelegationSH.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegationsh_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegationsh_la-DelegationSH.lo `test -f 'DelegationSH.cpp' || echo '$(srcdir)/'`DelegationSH.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/delegationsh/PaxHeaders.7502/DelegationSH.cpp0000644000000000000000000000012412110401544024751 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.007707 30 ctime=1513200661.739768764 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/DelegationSH.cpp0000644000175000002070000003763012110401544025027 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "DelegationSH.h" namespace ArcSec { using namespace Arc; static Logger logger(Logger::rootLogger, "DelegationSH"); Logger DelegationSH::logger(Logger::rootLogger,"DelegationSH"); Plugin* DelegationSH::get_sechandler(PluginArgument* arg) { SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; DelegationSH* plugin = new DelegationSH((Config*)(*shcarg),(ChainContext*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; plugin = NULL; }; return plugin; } /* sechandler_descriptors ARC_SECHANDLER_LOADER = { { "delegation.handler", 0, &get_sechandler}, { NULL, 0, NULL } }; */ class DelegationContext:public Arc::MessageContextElement{ public: bool have_delegated_; DelegationContext(void){ have_delegated_ = false; }; virtual ~DelegationContext(void) { }; }; DelegationSH::DelegationSH(Config *cfg,ChainContext*,Arc::PluginArgument* parg):SecHandler(cfg,parg),valid_(false) { std::string delegation_type = (std::string)((*cfg)["Type"]); std::string delegation_role = (std::string)((*cfg)["Role"]); ds_endpoint_ = (std::string)((*cfg)["DelegationServiceEndpoint"]); peers_endpoint_ = (std::string)((*cfg)["PeerServiceEndpoint"]);// And this value will be parsed from main chain later delegation_id_ = (std::string)((*cfg)["DelegationID"]); delegation_cred_identity_ = (std::string)((*cfg)["DelegationCredIdentity"]); if(delegation_type.empty()) delegation_type = "x509"; if(delegation_type == "x509") { proxy_file_=(std::string)((*cfg)["ProxyPath"]); cert_file_=(std::string)((*cfg)["CertificatePath"]); if(cert_file_.empty()&&proxy_file_.empty()&&delegation_cred_identity_.empty()) { logger.msg(ERROR,"Missing CertificatePath element or ProxyPath element, or is missing"); return; }; key_file_=(std::string)((*cfg)["KeyPath"]); if(key_file_.empty()&&proxy_file_.empty()&&delegation_cred_identity_.empty()) { logger.msg(ERROR,"Missing or empty KeyPath element, or is missing"); return; }; ca_file_=(std::string)((*cfg)["CACertificatePath"]); ca_dir_=(std::string)((*cfg)["CACertificatesDir"]); if(ca_file_.empty() && ca_dir_.empty()) { logger.msg(ERROR,"Missing or empty CertificatePath or CACertificatesDir element"); return; } delegation_type_=delegation_x509; if(delegation_role == "delegator") delegation_role_ = delegation_delegator; else if(delegation_role == "delegatee") delegation_role_ = delegation_delegatee; else { logger.msg(ERROR,"Delegation role not supported: %s",delegation_role); return; } } else if(delegation_type == "saml") { //TODO: delegation_type_=delegation_saml; } else { logger.msg(ERROR,"Delegation type not supported: %s",delegation_type); return; } mcontext_ = new DelegationContext(); valid_ = true; } DelegationSH::~DelegationSH() { delete mcontext_; } DelegationContext* DelegationSH::get_delegcontext(Arc::Message& msg) const { DelegationContext* deleg_ctx=NULL; Arc::MessageContextElement* mcontext = (*msg.Context())["deleg.context"]; if(mcontext) { try { deleg_ctx = dynamic_cast(mcontext); } catch(std::exception& e) { }; }; if(deleg_ctx) return deleg_ctx; deleg_ctx = new DelegationContext(); if(deleg_ctx) { msg.Context()->Add("deleg.context",deleg_ctx); } else { logger.msg(Arc::ERROR, "Failed to acquire delegation context"); } return deleg_ctx; } //Generate hash value for a string static unsigned long string_hash(const std::string& value){ unsigned long ret=0; unsigned char md[16]; MD5((unsigned char *)(value.c_str()),value.length(),&(md[0])); ret=(((unsigned long)md[0])|((unsigned long)md[1]<<8L)| ((unsigned long)md[2]<<16L)|((unsigned long)md[3]<<24L) )&0xffffffffL; return ret; } SecHandlerStatus DelegationSH::Handle(Arc::Message* msg) const { if(delegation_type_ == delegation_x509) { try { dynamic_cast(msg->Payload()); if(delegation_role_ == delegation_delegatee) { //Try to get the delegation service and delegation ID //information from incoming message //Store delegation context into message context DelegationContext* deleg_ctx = get_delegcontext(*msg); //dynamic_cast(mcontext_); if(!deleg_ctx) { logger.msg(Arc::ERROR, "Can't create delegation context"); return false; } //Credential delegation will only be triggered once for each connection if(deleg_ctx->have_delegated_) return true; logger.msg(Arc::INFO,"Delegation handler with delegatee role starts to process"); std::string method = (*msg).Attributes()->get("HTTP:METHOD"); if(method == "POST") { logger.msg(Arc::VERBOSE, "process: POST"); // Extracting payload Arc::PayloadSOAP* inpayload = NULL; try { inpayload = dynamic_cast(msg->Payload()); } catch(std::exception& e) { }; if(!inpayload) { logger.msg(Arc::ERROR, "input is not SOAP"); return false; }; // Analyzing request std::string ds_endpoint = (std::string)((*inpayload)["DelegationService"]); std::string delegation_id = (std::string)((*inpayload)["DelegationID"]); if(!ds_endpoint.empty()) { logger.msg(Arc::INFO, "Delegation service: %s",ds_endpoint.c_str()); Arc::MCCConfig ds_client_cfg; //Use service's credential to acquire delegation credential which //will be used by this service to act on behalf of the original //EEC credential's holder. if(!cert_file_.empty())ds_client_cfg.AddCertificate(cert_file_); if(!key_file_.empty())ds_client_cfg.AddPrivateKey(key_file_); if(!proxy_file_.empty())ds_client_cfg.AddProxy(proxy_file_); if(!ca_dir_.empty()) ds_client_cfg.AddCADir(ca_dir_); if(!ca_file_.empty())ds_client_cfg.AddCAFile(ca_file_); Arc::URL ds_url(ds_endpoint); Arc::ClientX509Delegation client_deleg(ds_client_cfg,ds_url); std::string delegation_cred; if(!delegation_id.empty()) { if(!client_deleg.acquireDelegation(DELEG_ARC,delegation_cred,delegation_id)) { logger.msg(ERROR,"Can not get the delegation credential: %s from delegation service: %s",delegation_id.c_str(),ds_endpoint.c_str()); return false; }; } else { std::string cred_identity = msg->Attributes()->get("TLS:IDENTITYDN"); std::string cred_delegator_ip = msg->Attributes()->get("TCP:REMOTEHOST"); if(!client_deleg.acquireDelegation(DELEG_ARC,delegation_cred,delegation_id,cred_identity,cred_delegator_ip)) { logger.msg(ERROR,"Can not get the delegation credential: %s from delegation service: %s",delegation_id.c_str(),ds_endpoint.c_str()); return false; }; } std::string cred_identity = msg->Attributes()->get("TLS:IDENTITYDN"); unsigned long hash_value = string_hash(cred_identity); //Store the delegated credential (got from delegation service) //into local temporary path; this delegated credential will be //used by the client functionality in this service (intemidiate //service) to contact the next service. std::string deleg_cred_path="/tmp/"; char text[20]; snprintf(text, sizeof(text), "%08lx", hash_value); deleg_cred_path.append(text).append(".pem"); logger.msg(INFO,"Delegated credential identity: %s",cred_identity.c_str()); logger.msg(INFO,"The delegated credential got from delegation service is stored into path: %s",deleg_cred_path.c_str()); std::ofstream proxy_f(deleg_cred_path.c_str()); proxy_f.write(delegation_cred.c_str(),delegation_cred.size()); proxy_f.close(); //Remove the delegation information inside the payload //since this information ('DelegationService' and 'DelegationID') //is only supposed to be consumer by this security handler, not //the hosted service itself. if((*inpayload)["DelegationService"]) ((*inpayload)["DelegationService"]).Destroy(); if((*inpayload)["DelegationID"]) ((*inpayload)["DelegationID"]).Destroy(); } else { logger.msg(ERROR,"The endpoint of delgation service should be configured"); return false; } }; //Set the 'have_delegated_' value of DelegationContext to //be true, so that the delegation process will only be triggered //once for each communication. deleg_ctx->have_delegated_=true; logger.msg(Arc::INFO,"Delegation handler with delegatee role ends"); return true; } else if(delegation_role_ == delegation_delegator) { //Create one more level of delegation Arc::MCCConfig ds_client_cfg; //Use delegation credential (one option is to use the one got and stored //in the delegation handler with 'delegatee' delegation role, note in this //case the service implementation should configure the client interface //(the client interface which is called by the service implementation to //contact another service) with the 'Identity' of the credential on which //this service will act on behalf, then the delegation handler with 'delegator' //delegation role (configured in this client interface's configuration) will //get the delegated credential from local temporary path (this path is //decided according to the 'Identity'); the other option is cofigure the credential //(EEC credential or delegated credential) in this 'delegator' role delegation //handler's configuration. What can be concluded here is: the former option //applies to intermediate service; the later option applies to the client //utilities. //By creating one more level of delegation, the delegated credential //will be used by the next intermediate service to act on behalf of //the EEC credential's holder //Store delegation context into message context DelegationContext* deleg_ctx = dynamic_cast(mcontext_); //get_delegcontext(*msg); if(!deleg_ctx) { logger.msg(Arc::ERROR, "Can't create delegation context"); return false; } //Credential delegation will only be triggered once for each connection if(deleg_ctx->have_delegated_) return true; logger.msg(Arc::INFO,"Delegation handler with delegator role starts to process"); std::string proxy_path; if(!delegation_cred_identity_.empty()) { unsigned long hash_value = string_hash(delegation_cred_identity_); proxy_path="/tmp/"; char text[20]; snprintf(text, sizeof(text), "%08lx", hash_value); proxy_path.append(text).append(".pem"); logger.msg(INFO,"Delegated credential identity: %s",delegation_cred_identity_.c_str()); logger.msg(INFO,"The delegated credential got from path: %s",proxy_path.c_str()); ds_client_cfg.AddProxy(proxy_path); }else if(!proxy_file_.empty()) { ds_client_cfg.AddProxy(proxy_file_); }else if(!cert_file_.empty()&&!key_file_.empty()) { ds_client_cfg.AddCertificate(cert_file_); ds_client_cfg.AddPrivateKey(key_file_); } if(!ca_dir_.empty()) ds_client_cfg.AddCADir(ca_dir_); if(!ca_file_.empty())ds_client_cfg.AddCAFile(ca_file_); //Note here the delegation service endpoint to which this service will //create the delegation credential ('n+1' level delegation) could be //different with the delegation service endpoint from which this service //acquire the delegation credential ('n' level delegation) Arc::URL ds_url(ds_endpoint_); Arc::ClientX509Delegation client_deleg(ds_client_cfg,ds_url); std::string delegation_id; //delegation_id will be used later to interact //with the next intermediate service. if(!client_deleg.createDelegation(DELEG_ARC,delegation_id)) { logger.msg(ERROR,"Can not create delegation crendential to delegation service: %s",ds_endpoint_.c_str()); return false; }; //Send the endpoint of delegation service and delegation ID to //the peer service side, on which the delegation handler with //'delegation_delegatee' role will get the endpoint of delegation //service and delegation ID to aquire delegation credential. // //The delegation service and delegation ID //information will be sent to the service side by the //client side. If the client functionality is hosted/called by //some intermediate service, this handler (delegation handler with //delegator role) should be configured into the 'incoming' message of //the hosted service; if the client functionality is called by //some independent client utility, this handler should be configured //into the 'incoming' message of the client itself. // //The credential used to send delegation service and delegation ID //information is the same credential which is used to interact with //the peer service. And the target service is the peer service. Arc::MCCConfig peers_client_cfg; if(!cert_file_.empty())peers_client_cfg.AddCertificate(cert_file_); if(!key_file_.empty())peers_client_cfg.AddPrivateKey(key_file_); if(!proxy_file_.empty())peers_client_cfg.AddProxy(proxy_file_); if(!ca_dir_.empty()) peers_client_cfg.AddCADir(ca_dir_); if(!ca_file_.empty())peers_client_cfg.AddCAFile(ca_file_); Arc::URL peers_url(peers_endpoint_); Arc::ClientSOAP client_peer(peers_client_cfg,peers_url,60); Arc::NS delegation_ns; delegation_ns["deleg"]="urn:delegation"; // //Treat the delegation information as 'out-of-bound' information //to SOAP message Arc::PayloadSOAP* outpayload = NULL; try { outpayload = dynamic_cast(msg->Payload()); } catch(std::exception& e) { }; if(!outpayload) { logger.msg(Arc::ERROR, "output is not SOAP"); return false; }; outpayload->NewChild("deleg:DelegationService")=ds_endpoint_; outpayload->NewChild("deleg:DelegationID")=delegation_id; //Set the 'have_delegated_' value of DelegationContext to //be true, so that the delegation process will only be triggered //once for each communication. deleg_ctx->have_delegated_=true; logger.msg(Arc::INFO, "Succeeded to send DelegationService: %s and DelegationID: %s info to peer service",ds_endpoint_.c_str(),delegation_id.c_str()); logger.msg(Arc::INFO,"Delegation handler with delegatee role ends"); return true; } } catch(std::exception&) { logger.msg(ERROR,"Incoming Message is not SOAP"); return false; } } else if(delegation_type_ == delegation_saml) { try { dynamic_cast(msg->Payload()); } catch(std::exception&) { logger.msg(ERROR,"Outgoing Message is not SOAP"); return false; } } else { logger.msg(ERROR,"Delegation handler is not configured"); return false; } return true; } } nordugrid-arc-5.4.2/src/hed/shc/delegationsh/PaxHeaders.7502/schema0000644000000000000000000000013213214316025023126 xustar000000000000000030 mtime=1513200661.761769033 30 atime=1513200668.722854169 30 ctime=1513200661.761769033 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/schema/0000755000175000002070000000000013214316025023251 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/delegationsh/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321025242 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200600.288017185 30 ctime=1513200661.759769008 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/schema/Makefile.am0000644000175000002070000000014511255700321025304 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = DelegationSH.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/delegationsh/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730025252 xustar000000000000000030 mtime=1513200600.320017576 30 atime=1513200649.569619918 30 ctime=1513200661.760769021 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/schema/Makefile.in0000644000175000002070000004354413214315730025332 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/delegationsh/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = DelegationSH.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/delegationsh/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/delegationsh/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/delegationsh/schema/PaxHeaders.7502/DelegationSH.xsd0000644000000000000000000000012411457664434026253 xustar000000000000000027 mtime=1287612700.761119 27 atime=1513200575.009708 30 ctime=1513200661.761769033 nordugrid-arc-5.4.2/src/hed/shc/delegationsh/schema/DelegationSH.xsd0000644000175000002070000000764411457664434026333 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/legacy0000644000000000000000000000013213214316025020464 xustar000000000000000030 mtime=1513200661.812769657 30 atime=1513200668.722854169 30 ctime=1513200661.812769657 nordugrid-arc-5.4.2/src/hed/shc/legacy/0000755000175000002070000000000013214316025020607 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/LegacyPDP.cpp0000644000000000000000000000012412675602216023030 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.069708 30 ctime=1513200661.802769534 nordugrid-arc-5.4.2/src/hed/shc/legacy/LegacyPDP.cpp0000644000175000002070000001653312675602216023105 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "LegacySecAttr.h" #include "ConfigParser.h" #include "auth.h" #include "LegacyPDP.h" namespace ArcSHCLegacy { Arc::Plugin* LegacyPDP::get_pdp(Arc::PluginArgument *arg) { ArcSec::PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new LegacyPDP((Arc::Config*)(*pdparg),arg); } static bool match_lists(const std::list& list1, const std::list& list2, std::string& matched, Arc::Logger& logger) { for(std::list::const_iterator l1 = list1.begin(); l1 != list1.end(); ++l1) { for(std::list::const_iterator l2 = list2.begin(); l2 != list2.end(); ++l2) { if((*l1) == (*l2)) { matched = *l1; return true; }; }; }; return false; } class LegacyPDPCP: public ConfigParser { public: LegacyPDPCP(LegacyPDP::cfgfile& file, Arc::Logger& logger):ConfigParser(file.filename,logger),file_(file) { }; virtual ~LegacyPDPCP(void) { }; protected: virtual bool BlockStart(const std::string& id, const std::string& name) { std::string bname = id; if(!name.empty()) bname = bname+"/"+name; for(std::list::iterator block = file_.blocks.begin(); block != file_.blocks.end();++block) { if(block->name == bname) { block->exists = true; }; }; return true; }; virtual bool BlockEnd(const std::string& id, const std::string& name) { return true; }; virtual bool ConfigLine(const std::string& id, const std::string& name, const std::string& cmd, const std::string& line) { //if(group_matched_) return true; if(cmd != "groupcfg") return true; std::string bname = id; if(!name.empty()) bname = bname+"/"+name; for(std::list::iterator block = file_.blocks.begin(); block != file_.blocks.end();++block) { if(block->name == bname) { block->limited = true; std::list groups; Arc::tokenize(line,groups," \t","\"","\""); block->groups.insert(block->groups.end(),groups.begin(),groups.end()); }; }; return true; }; private: LegacyPDP::cfgfile& file_; }; LegacyPDP::LegacyPDP(Arc::Config* cfg,Arc::PluginArgument* parg):PDP(cfg,parg) { any_ = false; Arc::XMLNode group = (*cfg)["Group"]; while((bool)group) { groups_.push_back((std::string)group); ++group; }; Arc::XMLNode vo = (*cfg)["VO"]; while((bool)vo) { vos_.push_back((std::string)vo); ++vo; }; Arc::XMLNode block = (*cfg)["ConfigBlock"]; while((bool)block) { std::string filename = (std::string)(block["ConfigFile"]); if(filename.empty()) { logger.msg(Arc::ERROR, "Configuration file not specified in ConfigBlock"); //blocks_.clear(); return; }; cfgfile file(filename); Arc::XMLNode name = block["BlockName"]; while((bool)name) { std::string blockname = (std::string)name; if(blockname.empty()) { logger.msg(Arc::ERROR, "BlockName is empty"); //blocks_.clear(); return; }; //file.blocknames.push_back(blockname); file.blocks.push_back(blockname); ++name; }; LegacyPDPCP parser(file,logger); if((!parser) || (!parser.Parse())) { logger.msg(Arc::ERROR, "Failed to parse configuration file %s",filename); return; }; for(std::list::const_iterator b = file.blocks.begin(); b != file.blocks.end(); ++b) { if(!(b->exists)) { logger.msg(Arc::ERROR, "Block %s not found in configuration file %s",b->name,filename); return; }; if(!(b->limited)) { any_ = true; } else { groups_.insert(groups_.end(),b->groups.begin(),b->groups.end()); }; }; //blocks_.push_back(file); ++block; }; } LegacyPDP::~LegacyPDP() { } class LegacyPDPAttr: public Arc::SecAttr { public: LegacyPDPAttr(bool decision):decision_(decision) { }; LegacyPDPAttr(bool decision, const std::list& mvoms, const std::list& mvo): decision_(decision), voms(mvoms), vo(mvo) { }; virtual ~LegacyPDPAttr(void); // Common interface virtual operator bool(void) const; virtual bool Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const; virtual std::string get(const std::string& id) const; virtual std::list getAll(const std::string& id) const; // Specific interface bool GetDecision(void) const { return decision_; }; protected: bool decision_; virtual bool equal(const SecAttr &b) const; std::list voms; std::list vo; }; LegacyPDPAttr::~LegacyPDPAttr(void) { } LegacyPDPAttr::operator bool(void) const { return true; } bool LegacyPDPAttr::Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const { return true; } std::string LegacyPDPAttr::get(const std::string& id) const { if(id == "VOMS") { if(!voms.empty()) return *voms.begin(); } else if(id == "VO") { if(!vo.empty()) return *vo.begin(); } return ""; } std::list LegacyPDPAttr::getAll(const std::string& id) const { if(id == "VOMS") return voms; if(id == "VO") return vo; return std::list(); } bool LegacyPDPAttr::equal(const SecAttr &b) const { const LegacyPDPAttr& a = dynamic_cast(b); if (!a) return false; return (decision_ == a.decision_); } ArcSec::PDPStatus LegacyPDP::isPermitted(Arc::Message *msg) const { if(any_) return true; // No need to perform anything if everyone is allowed Arc::SecAttr* sattr = msg->Auth()->get("ARCLEGACY"); if(!sattr) { // Only if information collection is done per context. // Check if decision is already made. Arc::SecAttr* dattr = msg->AuthContext()->get("ARCLEGACYPDP"); if(dattr) { LegacyPDPAttr* pattr = dynamic_cast(dattr); if(pattr) { // Decision is already made in this context return pattr->GetDecision(); }; }; }; if(!sattr) sattr = msg->AuthContext()->get("ARCLEGACY"); if(!sattr) { logger.msg(Arc::ERROR, "LegacyPDP: there is no ARCLEGACY Sec Attribute defined. Probably ARC Legacy Sec Handler is not configured or failed."); return false; }; LegacySecAttr* lattr = dynamic_cast(sattr); if(!lattr) { logger.msg(Arc::ERROR, "LegacyPDP: ARC Legacy Sec Attribute not recognized."); return false; }; const std::list& groups(lattr->GetGroups()); const std::list& vos(lattr->GetVOs()); bool decision = false; std::string match; if(match_lists(groups_,groups,match,logger)) { decision = true; const std::list& matched_voms = lattr->GetGroupVOMS(match); const std::list& matched_vo = lattr->GetGroupVO(match); msg->AuthContext()->set("ARCLEGACYPDP",new LegacyPDPAttr(decision, matched_voms, matched_vo)); } else if(match_lists(vos_,vos,match,logger)) { decision = true; const std::list matched_voms; std::list matched_vo; matched_vo.push_back(match); msg->AuthContext()->set("ARCLEGACYPDP",new LegacyPDPAttr(decision, matched_voms, matched_vo)); } else { msg->AuthContext()->set("ARCLEGACYPDP",new LegacyPDPAttr(decision)); }; return decision; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/Makefile.am0000644000000000000000000000012613213471407022605 xustar000000000000000027 mtime=1512993543.496042 30 atime=1513200600.459019276 29 ctime=1513200661.78276929 nordugrid-arc-5.4.2/src/hed/shc/legacy/Makefile.am0000644000175000002070000000517013213471407022653 0ustar00mockbuildmock00000000000000SUBDIRS = schema DIST_SUBDIRS = schema pkglib_LTLIBRARIES = libarcshclegacy.la if GLOBUSUTILS_ENABLED pkglibexec_PROGRAMS = arc-lcas arc-lcmaps endif libarcshclegacy_la_SOURCES = auth_file.cpp auth_subject.cpp \ auth_plugin.cpp auth_lcas.cpp auth_ldap.cpp \ auth_voms.cpp auth.cpp auth.h \ simplemap.cpp simplemap.h \ unixmap_lcmaps.cpp unixmap.cpp unixmap.h \ ConfigParser.cpp ConfigParser.h \ LegacySecAttr.cpp LegacySecAttr.h \ LegacySecHandler.cpp LegacySecHandler.h \ LegacyPDP.cpp LegacyPDP.h \ LegacyMap.cpp LegacyMap.h \ plugin.cpp libarcshclegacy_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcshclegacy_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libarcshclegacy_la_LDFLAGS = -no-undefined -avoid-version -module arc_lcas_SOURCES = arc_lcas.cpp cert_util.cpp cert_util.h arc_lcas_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GTHREAD_CFLAGS) \ $(OPENSSL_CFLAGS) $(LCAS_CFLAGS) \ $(GLOBUS_GSSAPI_GSI_CFLAGS) $(GLOBUS_GSI_CREDENTIAL_CFLAGS) \ $(GLOBUS_GSI_CERT_UTILS_CFLAGS) $(AM_CXXFLAGS) arc_lcas_LDADD = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(OPENSSL_LIBS) \ $(GLOBUS_COMMON_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_GSI_CREDENTIAL_LIBS) \ $(GLOBUS_GSI_CERT_UTILS_LIBS) arc_lcmaps_SOURCES = arc_lcmaps.cpp cert_util.cpp cert_util.h arc_lcmaps_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GTHREAD_CFLAGS) \ $(OPENSSL_CFLAGS) $(LCMAPS_CFLAGS) \ $(GLOBUS_GSSAPI_GSI_CFLAGS) $(GLOBUS_GSI_CREDENTIAL_CFLAGS) \ $(GLOBUS_GSI_CERT_UTILS_CFLAGS) $(AM_CXXFLAGS) arc_lcmaps_LDADD = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(OPENSSL_LIBS) \ $(GLOBUS_COMMON_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_GSI_CREDENTIAL_LIBS) \ $(GLOBUS_GSI_CERT_UTILS_LIBS) nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/simplemap.h0000644000000000000000000000012411611503620022700 xustar000000000000000027 mtime=1311147920.204773 27 atime=1513200575.059708 30 ctime=1513200661.792769412 nordugrid-arc-5.4.2/src/hed/shc/legacy/simplemap.h0000644000175000002070000000067311611503620022753 0ustar00mockbuildmock00000000000000#include #define SELFUNMAP_TIME (10*24*60*60) namespace ArcSHCLegacy { class SimpleMap { private: std::string dir_; int pool_handle_; public: SimpleMap(const char* dir); ~SimpleMap(void); std::string map(const char* subject); bool unmap(const char* subject); operator bool(void) const { return (pool_handle_ != -1); }; bool operator!(void) const { return (pool_handle_ == -1); }; }; } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/LegacyPDP.h0000644000000000000000000000012412675602216022475 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.033708 30 ctime=1513200661.803769547 nordugrid-arc-5.4.2/src/hed/shc/legacy/LegacyPDP.h0000644000175000002070000000230512675602216022542 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include /** * Match authorization information collected by ArcLegacyHandler and * stored in ARCLEGACY sec. attribute and provide decision based on configuration. * Decision is stored in message context sec. attribute under ARCLEGACYPDP tag. * For storing decision LegacyPDPAttr class is used. */ namespace ArcSHCLegacy { class LegacyPDP : public ArcSec::PDP { friend class LegacyPDPCP; private: class cfgblock { public: std::string name; std::list groups; bool exists; bool limited; cfgblock(const std::string& n):name(n),exists(false),limited(false) { }; }; class cfgfile { public: std::string filename; std::list blocks; cfgfile(const std::string& fname):filename(fname) {}; }; bool any_; std::list groups_; std::list vos_; public: static Arc::Plugin* get_pdp(Arc::PluginArgument *arg); LegacyPDP(Arc::Config* cfg, Arc::PluginArgument* parg); virtual ~LegacyPDP(); virtual ArcSec::PDPStatus isPermitted(Arc::Message *msg) const; }; } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315730022607 xustar000000000000000030 mtime=1513200600.548020365 30 atime=1513200649.585620114 29 ctime=1513200661.78276929 nordugrid-arc-5.4.2/src/hed/shc/legacy/Makefile.in0000644000175000002070000017374013214315730022672 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @GLOBUSUTILS_ENABLED_TRUE@pkglibexec_PROGRAMS = arc-lcas$(EXEEXT) \ @GLOBUSUTILS_ENABLED_TRUE@ arc-lcmaps$(EXEEXT) subdir = src/hed/shc/legacy DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" \ "$(DESTDIR)$(pkglibexecdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcshclegacy_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libarcshclegacy_la_OBJECTS = libarcshclegacy_la-auth_file.lo \ libarcshclegacy_la-auth_subject.lo \ libarcshclegacy_la-auth_plugin.lo \ libarcshclegacy_la-auth_lcas.lo \ libarcshclegacy_la-auth_ldap.lo \ libarcshclegacy_la-auth_voms.lo libarcshclegacy_la-auth.lo \ libarcshclegacy_la-simplemap.lo \ libarcshclegacy_la-unixmap_lcmaps.lo \ libarcshclegacy_la-unixmap.lo \ libarcshclegacy_la-ConfigParser.lo \ libarcshclegacy_la-LegacySecAttr.lo \ libarcshclegacy_la-LegacySecHandler.lo \ libarcshclegacy_la-LegacyPDP.lo \ libarcshclegacy_la-LegacyMap.lo libarcshclegacy_la-plugin.lo libarcshclegacy_la_OBJECTS = $(am_libarcshclegacy_la_OBJECTS) libarcshclegacy_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcshclegacy_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(pkglibexec_PROGRAMS) am_arc_lcas_OBJECTS = arc_lcas-arc_lcas.$(OBJEXT) \ arc_lcas-cert_util.$(OBJEXT) arc_lcas_OBJECTS = $(am_arc_lcas_OBJECTS) arc_lcas_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arc_lcas_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arc_lcas_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arc_lcmaps_OBJECTS = arc_lcmaps-arc_lcmaps.$(OBJEXT) \ arc_lcmaps-cert_util.$(OBJEXT) arc_lcmaps_OBJECTS = $(am_arc_lcmaps_OBJECTS) arc_lcmaps_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arc_lcmaps_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arc_lcmaps_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcshclegacy_la_SOURCES) $(arc_lcas_SOURCES) \ $(arc_lcmaps_SOURCES) DIST_SOURCES = $(libarcshclegacy_la_SOURCES) $(arc_lcas_SOURCES) \ $(arc_lcmaps_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema DIST_SUBDIRS = schema pkglib_LTLIBRARIES = libarcshclegacy.la libarcshclegacy_la_SOURCES = auth_file.cpp auth_subject.cpp \ auth_plugin.cpp auth_lcas.cpp auth_ldap.cpp \ auth_voms.cpp auth.cpp auth.h \ simplemap.cpp simplemap.h \ unixmap_lcmaps.cpp unixmap.cpp unixmap.h \ ConfigParser.cpp ConfigParser.h \ LegacySecAttr.cpp LegacySecAttr.h \ LegacySecHandler.cpp LegacySecHandler.h \ LegacyPDP.cpp LegacyPDP.h \ LegacyMap.cpp LegacyMap.h \ plugin.cpp libarcshclegacy_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcshclegacy_la_LIBADD = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libarcshclegacy_la_LDFLAGS = -no-undefined -avoid-version -module arc_lcas_SOURCES = arc_lcas.cpp cert_util.cpp cert_util.h arc_lcas_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GTHREAD_CFLAGS) \ $(OPENSSL_CFLAGS) $(LCAS_CFLAGS) \ $(GLOBUS_GSSAPI_GSI_CFLAGS) $(GLOBUS_GSI_CREDENTIAL_CFLAGS) \ $(GLOBUS_GSI_CERT_UTILS_CFLAGS) $(AM_CXXFLAGS) arc_lcas_LDADD = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(OPENSSL_LIBS) \ $(GLOBUS_COMMON_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_GSI_CREDENTIAL_LIBS) \ $(GLOBUS_GSI_CERT_UTILS_LIBS) arc_lcmaps_SOURCES = arc_lcmaps.cpp cert_util.cpp cert_util.h arc_lcmaps_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(GTHREAD_CFLAGS) \ $(OPENSSL_CFLAGS) $(LCMAPS_CFLAGS) \ $(GLOBUS_GSSAPI_GSI_CFLAGS) $(GLOBUS_GSI_CREDENTIAL_CFLAGS) \ $(GLOBUS_GSI_CERT_UTILS_CFLAGS) $(AM_CXXFLAGS) arc_lcmaps_LDADD = \ $(top_builddir)/src/hed/libs/globusutils/libarcglobusutils.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(OPENSSL_LIBS) \ $(GLOBUS_COMMON_LIBS) \ $(GLOBUS_GSSAPI_GSI_LIBS) $(GLOBUS_GSI_CREDENTIAL_LIBS) \ $(GLOBUS_GSI_CERT_UTILS_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/legacy/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/legacy/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcshclegacy.la: $(libarcshclegacy_la_OBJECTS) $(libarcshclegacy_la_DEPENDENCIES) $(libarcshclegacy_la_LINK) -rpath $(pkglibdir) $(libarcshclegacy_la_OBJECTS) $(libarcshclegacy_la_LIBADD) $(LIBS) install-pkglibexecPROGRAMS: $(pkglibexec_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(pkglibexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p || test -f $$p1; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files clean-pkglibexecPROGRAMS: @list='$(pkglibexec_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arc-lcas$(EXEEXT): $(arc_lcas_OBJECTS) $(arc_lcas_DEPENDENCIES) @rm -f arc-lcas$(EXEEXT) $(arc_lcas_LINK) $(arc_lcas_OBJECTS) $(arc_lcas_LDADD) $(LIBS) arc-lcmaps$(EXEEXT): $(arc_lcmaps_OBJECTS) $(arc_lcmaps_DEPENDENCIES) @rm -f arc-lcmaps$(EXEEXT) $(arc_lcmaps_LINK) $(arc_lcmaps_OBJECTS) $(arc_lcmaps_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_lcas-arc_lcas.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_lcas-cert_util.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_lcmaps-arc_lcmaps.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_lcmaps-cert_util.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-ConfigParser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-LegacyMap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-LegacyPDP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-LegacySecAttr.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-LegacySecHandler.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-auth.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-auth_file.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-auth_lcas.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-auth_ldap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-auth_plugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-auth_subject.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-auth_voms.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-plugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-simplemap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-unixmap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcshclegacy_la-unixmap_lcmaps.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcshclegacy_la-auth_file.lo: auth_file.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-auth_file.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-auth_file.Tpo -c -o libarcshclegacy_la-auth_file.lo `test -f 'auth_file.cpp' || echo '$(srcdir)/'`auth_file.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-auth_file.Tpo $(DEPDIR)/libarcshclegacy_la-auth_file.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_file.cpp' object='libarcshclegacy_la-auth_file.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-auth_file.lo `test -f 'auth_file.cpp' || echo '$(srcdir)/'`auth_file.cpp libarcshclegacy_la-auth_subject.lo: auth_subject.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-auth_subject.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-auth_subject.Tpo -c -o libarcshclegacy_la-auth_subject.lo `test -f 'auth_subject.cpp' || echo '$(srcdir)/'`auth_subject.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-auth_subject.Tpo $(DEPDIR)/libarcshclegacy_la-auth_subject.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_subject.cpp' object='libarcshclegacy_la-auth_subject.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-auth_subject.lo `test -f 'auth_subject.cpp' || echo '$(srcdir)/'`auth_subject.cpp libarcshclegacy_la-auth_plugin.lo: auth_plugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-auth_plugin.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-auth_plugin.Tpo -c -o libarcshclegacy_la-auth_plugin.lo `test -f 'auth_plugin.cpp' || echo '$(srcdir)/'`auth_plugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-auth_plugin.Tpo $(DEPDIR)/libarcshclegacy_la-auth_plugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_plugin.cpp' object='libarcshclegacy_la-auth_plugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-auth_plugin.lo `test -f 'auth_plugin.cpp' || echo '$(srcdir)/'`auth_plugin.cpp libarcshclegacy_la-auth_lcas.lo: auth_lcas.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-auth_lcas.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-auth_lcas.Tpo -c -o libarcshclegacy_la-auth_lcas.lo `test -f 'auth_lcas.cpp' || echo '$(srcdir)/'`auth_lcas.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-auth_lcas.Tpo $(DEPDIR)/libarcshclegacy_la-auth_lcas.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_lcas.cpp' object='libarcshclegacy_la-auth_lcas.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-auth_lcas.lo `test -f 'auth_lcas.cpp' || echo '$(srcdir)/'`auth_lcas.cpp libarcshclegacy_la-auth_ldap.lo: auth_ldap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-auth_ldap.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-auth_ldap.Tpo -c -o libarcshclegacy_la-auth_ldap.lo `test -f 'auth_ldap.cpp' || echo '$(srcdir)/'`auth_ldap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-auth_ldap.Tpo $(DEPDIR)/libarcshclegacy_la-auth_ldap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_ldap.cpp' object='libarcshclegacy_la-auth_ldap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-auth_ldap.lo `test -f 'auth_ldap.cpp' || echo '$(srcdir)/'`auth_ldap.cpp libarcshclegacy_la-auth_voms.lo: auth_voms.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-auth_voms.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-auth_voms.Tpo -c -o libarcshclegacy_la-auth_voms.lo `test -f 'auth_voms.cpp' || echo '$(srcdir)/'`auth_voms.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-auth_voms.Tpo $(DEPDIR)/libarcshclegacy_la-auth_voms.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth_voms.cpp' object='libarcshclegacy_la-auth_voms.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-auth_voms.lo `test -f 'auth_voms.cpp' || echo '$(srcdir)/'`auth_voms.cpp libarcshclegacy_la-auth.lo: auth.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-auth.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-auth.Tpo -c -o libarcshclegacy_la-auth.lo `test -f 'auth.cpp' || echo '$(srcdir)/'`auth.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-auth.Tpo $(DEPDIR)/libarcshclegacy_la-auth.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='auth.cpp' object='libarcshclegacy_la-auth.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-auth.lo `test -f 'auth.cpp' || echo '$(srcdir)/'`auth.cpp libarcshclegacy_la-simplemap.lo: simplemap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-simplemap.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-simplemap.Tpo -c -o libarcshclegacy_la-simplemap.lo `test -f 'simplemap.cpp' || echo '$(srcdir)/'`simplemap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-simplemap.Tpo $(DEPDIR)/libarcshclegacy_la-simplemap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='simplemap.cpp' object='libarcshclegacy_la-simplemap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-simplemap.lo `test -f 'simplemap.cpp' || echo '$(srcdir)/'`simplemap.cpp libarcshclegacy_la-unixmap_lcmaps.lo: unixmap_lcmaps.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-unixmap_lcmaps.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-unixmap_lcmaps.Tpo -c -o libarcshclegacy_la-unixmap_lcmaps.lo `test -f 'unixmap_lcmaps.cpp' || echo '$(srcdir)/'`unixmap_lcmaps.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-unixmap_lcmaps.Tpo $(DEPDIR)/libarcshclegacy_la-unixmap_lcmaps.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='unixmap_lcmaps.cpp' object='libarcshclegacy_la-unixmap_lcmaps.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-unixmap_lcmaps.lo `test -f 'unixmap_lcmaps.cpp' || echo '$(srcdir)/'`unixmap_lcmaps.cpp libarcshclegacy_la-unixmap.lo: unixmap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-unixmap.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-unixmap.Tpo -c -o libarcshclegacy_la-unixmap.lo `test -f 'unixmap.cpp' || echo '$(srcdir)/'`unixmap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-unixmap.Tpo $(DEPDIR)/libarcshclegacy_la-unixmap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='unixmap.cpp' object='libarcshclegacy_la-unixmap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-unixmap.lo `test -f 'unixmap.cpp' || echo '$(srcdir)/'`unixmap.cpp libarcshclegacy_la-ConfigParser.lo: ConfigParser.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-ConfigParser.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-ConfigParser.Tpo -c -o libarcshclegacy_la-ConfigParser.lo `test -f 'ConfigParser.cpp' || echo '$(srcdir)/'`ConfigParser.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-ConfigParser.Tpo $(DEPDIR)/libarcshclegacy_la-ConfigParser.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ConfigParser.cpp' object='libarcshclegacy_la-ConfigParser.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-ConfigParser.lo `test -f 'ConfigParser.cpp' || echo '$(srcdir)/'`ConfigParser.cpp libarcshclegacy_la-LegacySecAttr.lo: LegacySecAttr.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-LegacySecAttr.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-LegacySecAttr.Tpo -c -o libarcshclegacy_la-LegacySecAttr.lo `test -f 'LegacySecAttr.cpp' || echo '$(srcdir)/'`LegacySecAttr.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-LegacySecAttr.Tpo $(DEPDIR)/libarcshclegacy_la-LegacySecAttr.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LegacySecAttr.cpp' object='libarcshclegacy_la-LegacySecAttr.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-LegacySecAttr.lo `test -f 'LegacySecAttr.cpp' || echo '$(srcdir)/'`LegacySecAttr.cpp libarcshclegacy_la-LegacySecHandler.lo: LegacySecHandler.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-LegacySecHandler.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-LegacySecHandler.Tpo -c -o libarcshclegacy_la-LegacySecHandler.lo `test -f 'LegacySecHandler.cpp' || echo '$(srcdir)/'`LegacySecHandler.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-LegacySecHandler.Tpo $(DEPDIR)/libarcshclegacy_la-LegacySecHandler.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LegacySecHandler.cpp' object='libarcshclegacy_la-LegacySecHandler.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-LegacySecHandler.lo `test -f 'LegacySecHandler.cpp' || echo '$(srcdir)/'`LegacySecHandler.cpp libarcshclegacy_la-LegacyPDP.lo: LegacyPDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-LegacyPDP.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-LegacyPDP.Tpo -c -o libarcshclegacy_la-LegacyPDP.lo `test -f 'LegacyPDP.cpp' || echo '$(srcdir)/'`LegacyPDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-LegacyPDP.Tpo $(DEPDIR)/libarcshclegacy_la-LegacyPDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LegacyPDP.cpp' object='libarcshclegacy_la-LegacyPDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-LegacyPDP.lo `test -f 'LegacyPDP.cpp' || echo '$(srcdir)/'`LegacyPDP.cpp libarcshclegacy_la-LegacyMap.lo: LegacyMap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-LegacyMap.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-LegacyMap.Tpo -c -o libarcshclegacy_la-LegacyMap.lo `test -f 'LegacyMap.cpp' || echo '$(srcdir)/'`LegacyMap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-LegacyMap.Tpo $(DEPDIR)/libarcshclegacy_la-LegacyMap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='LegacyMap.cpp' object='libarcshclegacy_la-LegacyMap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-LegacyMap.lo `test -f 'LegacyMap.cpp' || echo '$(srcdir)/'`LegacyMap.cpp libarcshclegacy_la-plugin.lo: plugin.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -MT libarcshclegacy_la-plugin.lo -MD -MP -MF $(DEPDIR)/libarcshclegacy_la-plugin.Tpo -c -o libarcshclegacy_la-plugin.lo `test -f 'plugin.cpp' || echo '$(srcdir)/'`plugin.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcshclegacy_la-plugin.Tpo $(DEPDIR)/libarcshclegacy_la-plugin.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='plugin.cpp' object='libarcshclegacy_la-plugin.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcshclegacy_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcshclegacy_la-plugin.lo `test -f 'plugin.cpp' || echo '$(srcdir)/'`plugin.cpp arc_lcas-arc_lcas.o: arc_lcas.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcas_CXXFLAGS) $(CXXFLAGS) -MT arc_lcas-arc_lcas.o -MD -MP -MF $(DEPDIR)/arc_lcas-arc_lcas.Tpo -c -o arc_lcas-arc_lcas.o `test -f 'arc_lcas.cpp' || echo '$(srcdir)/'`arc_lcas.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_lcas-arc_lcas.Tpo $(DEPDIR)/arc_lcas-arc_lcas.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_lcas.cpp' object='arc_lcas-arc_lcas.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcas_CXXFLAGS) $(CXXFLAGS) -c -o arc_lcas-arc_lcas.o `test -f 'arc_lcas.cpp' || echo '$(srcdir)/'`arc_lcas.cpp arc_lcas-arc_lcas.obj: arc_lcas.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcas_CXXFLAGS) $(CXXFLAGS) -MT arc_lcas-arc_lcas.obj -MD -MP -MF $(DEPDIR)/arc_lcas-arc_lcas.Tpo -c -o arc_lcas-arc_lcas.obj `if test -f 'arc_lcas.cpp'; then $(CYGPATH_W) 'arc_lcas.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_lcas.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_lcas-arc_lcas.Tpo $(DEPDIR)/arc_lcas-arc_lcas.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_lcas.cpp' object='arc_lcas-arc_lcas.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcas_CXXFLAGS) $(CXXFLAGS) -c -o arc_lcas-arc_lcas.obj `if test -f 'arc_lcas.cpp'; then $(CYGPATH_W) 'arc_lcas.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_lcas.cpp'; fi` arc_lcas-cert_util.o: cert_util.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcas_CXXFLAGS) $(CXXFLAGS) -MT arc_lcas-cert_util.o -MD -MP -MF $(DEPDIR)/arc_lcas-cert_util.Tpo -c -o arc_lcas-cert_util.o `test -f 'cert_util.cpp' || echo '$(srcdir)/'`cert_util.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_lcas-cert_util.Tpo $(DEPDIR)/arc_lcas-cert_util.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='cert_util.cpp' object='arc_lcas-cert_util.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcas_CXXFLAGS) $(CXXFLAGS) -c -o arc_lcas-cert_util.o `test -f 'cert_util.cpp' || echo '$(srcdir)/'`cert_util.cpp arc_lcas-cert_util.obj: cert_util.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcas_CXXFLAGS) $(CXXFLAGS) -MT arc_lcas-cert_util.obj -MD -MP -MF $(DEPDIR)/arc_lcas-cert_util.Tpo -c -o arc_lcas-cert_util.obj `if test -f 'cert_util.cpp'; then $(CYGPATH_W) 'cert_util.cpp'; else $(CYGPATH_W) '$(srcdir)/cert_util.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_lcas-cert_util.Tpo $(DEPDIR)/arc_lcas-cert_util.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='cert_util.cpp' object='arc_lcas-cert_util.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcas_CXXFLAGS) $(CXXFLAGS) -c -o arc_lcas-cert_util.obj `if test -f 'cert_util.cpp'; then $(CYGPATH_W) 'cert_util.cpp'; else $(CYGPATH_W) '$(srcdir)/cert_util.cpp'; fi` arc_lcmaps-arc_lcmaps.o: arc_lcmaps.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcmaps_CXXFLAGS) $(CXXFLAGS) -MT arc_lcmaps-arc_lcmaps.o -MD -MP -MF $(DEPDIR)/arc_lcmaps-arc_lcmaps.Tpo -c -o arc_lcmaps-arc_lcmaps.o `test -f 'arc_lcmaps.cpp' || echo '$(srcdir)/'`arc_lcmaps.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_lcmaps-arc_lcmaps.Tpo $(DEPDIR)/arc_lcmaps-arc_lcmaps.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_lcmaps.cpp' object='arc_lcmaps-arc_lcmaps.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcmaps_CXXFLAGS) $(CXXFLAGS) -c -o arc_lcmaps-arc_lcmaps.o `test -f 'arc_lcmaps.cpp' || echo '$(srcdir)/'`arc_lcmaps.cpp arc_lcmaps-arc_lcmaps.obj: arc_lcmaps.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcmaps_CXXFLAGS) $(CXXFLAGS) -MT arc_lcmaps-arc_lcmaps.obj -MD -MP -MF $(DEPDIR)/arc_lcmaps-arc_lcmaps.Tpo -c -o arc_lcmaps-arc_lcmaps.obj `if test -f 'arc_lcmaps.cpp'; then $(CYGPATH_W) 'arc_lcmaps.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_lcmaps.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_lcmaps-arc_lcmaps.Tpo $(DEPDIR)/arc_lcmaps-arc_lcmaps.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_lcmaps.cpp' object='arc_lcmaps-arc_lcmaps.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcmaps_CXXFLAGS) $(CXXFLAGS) -c -o arc_lcmaps-arc_lcmaps.obj `if test -f 'arc_lcmaps.cpp'; then $(CYGPATH_W) 'arc_lcmaps.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_lcmaps.cpp'; fi` arc_lcmaps-cert_util.o: cert_util.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcmaps_CXXFLAGS) $(CXXFLAGS) -MT arc_lcmaps-cert_util.o -MD -MP -MF $(DEPDIR)/arc_lcmaps-cert_util.Tpo -c -o arc_lcmaps-cert_util.o `test -f 'cert_util.cpp' || echo '$(srcdir)/'`cert_util.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_lcmaps-cert_util.Tpo $(DEPDIR)/arc_lcmaps-cert_util.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='cert_util.cpp' object='arc_lcmaps-cert_util.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcmaps_CXXFLAGS) $(CXXFLAGS) -c -o arc_lcmaps-cert_util.o `test -f 'cert_util.cpp' || echo '$(srcdir)/'`cert_util.cpp arc_lcmaps-cert_util.obj: cert_util.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcmaps_CXXFLAGS) $(CXXFLAGS) -MT arc_lcmaps-cert_util.obj -MD -MP -MF $(DEPDIR)/arc_lcmaps-cert_util.Tpo -c -o arc_lcmaps-cert_util.obj `if test -f 'cert_util.cpp'; then $(CYGPATH_W) 'cert_util.cpp'; else $(CYGPATH_W) '$(srcdir)/cert_util.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/arc_lcmaps-cert_util.Tpo $(DEPDIR)/arc_lcmaps-cert_util.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='cert_util.cpp' object='arc_lcmaps-cert_util.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_lcmaps_CXXFLAGS) $(CXXFLAGS) -c -o arc_lcmaps-cert_util.obj `if test -f 'cert_util.cpp'; then $(CYGPATH_W) 'cert_util.cpp'; else $(CYGPATH_W) '$(srcdir)/cert_util.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(pkglibexecdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ clean-pkglibexecPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-pkglibexecPROGRAMS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES uninstall-pkglibexecPROGRAMS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES clean-pkglibexecPROGRAMS ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-pkglibLTLIBRARIES install-pkglibexecPROGRAMS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES uninstall-pkglibexecPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/LegacySecAttr.cpp0000644000000000000000000000012412675602216023752 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.038708 30 ctime=1513200661.798769485 nordugrid-arc-5.4.2/src/hed/shc/legacy/LegacySecAttr.cpp0000644000175000002070000000420312675602216024016 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "LegacySecAttr.h" namespace ArcSHCLegacy { static std::string empty_string; static std::list empty_list; LegacySecAttr::LegacySecAttr(Arc::Logger& logger):logger_(logger) { } LegacySecAttr::~LegacySecAttr(void) { } std::string LegacySecAttr::get(const std::string& id) const { if(id == "GROUP") { if(groups_.size() > 0) return *groups_.begin(); return ""; }; if(id == "VO") { if(VOs_.size() > 0) return *VOs_.begin(); return ""; }; return ""; } std::list LegacySecAttr::getAll(const std::string& id) const { if(id == "GROUP") return groups_; if(id == "VO") return VOs_; return std::list(); } bool LegacySecAttr::Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const { // No need to export information yet. return true; } bool LegacySecAttr::equal(const SecAttr &b) const { try { const LegacySecAttr& a = dynamic_cast(b); if (!a) return false; // ... return false; } catch(std::exception&) { }; return false; } LegacySecAttr::operator bool(void) const { return true; } const std::list& LegacySecAttr::GetGroupVO(const std::string& group) const { std::list< std::list >::const_iterator vo = groupsVO_.begin(); for(std::list::const_iterator grp = groups_.begin(); grp != groups_.end(); ++grp) { if(vo == groupsVO_.end()) break; if(*grp == group) return *vo; ++vo; }; return empty_list; } const std::list& LegacySecAttr::GetGroupVOMS(const std::string& group) const { std::list< std::list >::const_iterator voms = groupsVOMS_.begin(); for(std::list::const_iterator grp = groups_.begin(); grp != groups_.end(); ++grp) { if(voms == groupsVOMS_.end()) break; if(*grp == group) return *voms; ++voms; }; return empty_list; } void LegacySecAttr::AddGroup(const std::string& group, const std::list& vo, const std::list& voms) { groups_.push_back(group); groupsVO_.push_back(vo); groupsVOMS_.push_back(voms); } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/arc_lcmaps.cpp0000644000000000000000000000012413213471407023357 xustar000000000000000027 mtime=1512993543.496042 27 atime=1513200575.064708 30 ctime=1513200661.810769632 nordugrid-arc-5.4.2/src/hed/shc/legacy/arc_lcmaps.cpp0000644000175000002070000002611113213471407023425 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #ifndef WIN32 #include #include #endif #include #include #include #include #include #include #include #include #include #include "cert_util.h" #include "unixmap.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"LCMAPS"); #ifdef HAVE_LCMAPS #include #define ALLOW_EMPTY_CREDENTIALS 1 #define LCMAPS_GSI_MODE 1 extern "C" { #define extern typedef #define lcmaps_init (*lcmaps_init_t) #define lcmaps_run_and_return_username (*lcmaps_run_and_return_username_t) #define lcmaps_run (*lcmaps_run_t) #define lcmaps_term (*lcmaps_term_t) #define getCredentialData (*getCredentialData_t) #include #include #undef lcmaps_init #undef lcmaps_run #undef lcmaps_run_and_return_username #undef lcmaps_term #undef getCredentialData #undef extern } #else //#warning Using hardcoded definition of LCMAPS functions - software will break during runtime if interface changed. Otherwise compilation will break :) extern "C" { typedef char* lcmaps_request_t; typedef int (*lcmaps_init_t)(FILE *fp); typedef int (*lcmaps_run_and_return_username_t)(char *user_dn_tmp,gss_cred_id_t user_cred,lcmaps_request_t request,char **usernamep,int npols,char **policynames); typedef int (*lcmaps_run_t)(char *user_dn_tmp,gss_cred_id_t user_cred,lcmaps_request_t request); typedef int (*lcmaps_term_t)(void); typedef void* (*getCredentialData_t)(int datatype, int *count); } #define UID (10) #define PRI_GID (20) #endif extern "C" { // Definitions taken from gssapi_openssl.h typedef struct gss_cred_id_desc_struct { globus_gsi_cred_handle_t cred_handle; gss_name_t globusid; gss_cred_usage_t cred_usage; SSL_CTX * ssl_context; } gss_cred_id_desc; extern gss_OID_desc * gss_nt_x509; #define GLOBUS_GSS_C_NT_X509 gss_nt_x509 }; static Arc::SimpleCondition lcmaps_global_lock; static std::string lcmaps_db_file_old; static std::string lcmaps_dir_old; void set_lcmaps_env(const std::string& lcmaps_db_file,const std::string& lcmaps_dir) { lcmaps_global_lock.lock(); lcmaps_db_file_old=Arc::GetEnv("LCMAPS_DB_FILE"); if(lcmaps_db_file.length() != 0) Arc::SetEnv("LCMAPS_DB_FILE",lcmaps_db_file,true); lcmaps_dir_old=Arc::GetEnv("LCMAPS_DIR"); if(lcmaps_dir.length() != 0) Arc::SetEnv("LCMAPS_DIR",lcmaps_dir,true); } void recover_lcmaps_env(void) { if(lcmaps_db_file_old.length() == 0) { Arc::UnsetEnv("LCMAPS_DB_FILE"); } else { Arc::SetEnv("LCMAPS_DB_FILE",lcmaps_db_file_old,true); }; if(lcmaps_dir_old.length() == 0) { Arc::UnsetEnv("LCMAPS_DIR"); } else { Arc::SetEnv("LCMAPS_DIR",lcmaps_dir_old,true); }; lcmaps_global_lock.unlock(); } gss_cred_id_t read_globus_credentials(const std::string& filename) { X509* cert = NULL; STACK_OF(X509)* cchain = NULL; EVP_PKEY* key = NULL; LoadCertificateFile(filename, cert, cchain); LoadKeyFile(filename, key); globus_gsi_cred_handle_t chandle; Arc::GlobusResult(globus_gsi_cred_handle_init(&chandle, NULL)); if(cert) Arc::GlobusResult(globus_gsi_cred_set_cert(chandle, cert)); if(key) Arc::GlobusResult(globus_gsi_cred_set_key(chandle, key)); if(cchain) Arc::GlobusResult(globus_gsi_cred_set_cert_chain(chandle, cchain)); gss_cred_id_desc* ccred = (gss_cred_id_desc*)::malloc(sizeof(gss_cred_id_desc)); if(ccred) { ::memset(ccred,0,sizeof(gss_cred_id_desc)); ccred->cred_handle = chandle; chandle = NULL; // cred_usage // ssl_context X509* identity_cert = NULL; if(cert) { globus_gsi_cert_utils_cert_type_t ctype = GLOBUS_GSI_CERT_UTILS_TYPE_DEFAULT; Arc::GlobusResult(globus_gsi_cert_utils_get_cert_type(cert,&ctype)); if(ctype == GLOBUS_GSI_CERT_UTILS_TYPE_EEC) { identity_cert = cert; }; }; if(!identity_cert && cchain) { // For compatibility with older globus not using //Arc::GlobusResult(globus_gsi_cert_utils_get_identity_cert(cchain,&identity_cert)); for(int n = 0; n < sk_X509_num(cchain); ++n) { X509* tmp_cert = sk_X509_value(cchain, n); if(tmp_cert) { globus_gsi_cert_utils_cert_type_t ctype = GLOBUS_GSI_CERT_UTILS_TYPE_DEFAULT; Arc::GlobusResult(globus_gsi_cert_utils_get_cert_type(tmp_cert,&ctype)); if(ctype == GLOBUS_GSI_CERT_UTILS_TYPE_EEC) { identity_cert = tmp_cert; break; }; }; }; }; gss_buffer_desc peer_buffer; #if GLOBUS_GSSAPI_GSI_OLD_OPENSSL peer_buffer.value = identity_cert; peer_buffer.length = identity_cert?sizeof(X509*):0; // Globus expects this size despite stored value is X509, not X509* #else peer_buffer.value = identity_cert; peer_buffer.length = identity_cert?sizeof(X509*):0; #endif OM_uint32 majstat, minstat; majstat = gss_import_name(&minstat, &peer_buffer, identity_cert?GLOBUS_GSS_C_NT_X509:GSS_C_NT_ANONYMOUS, &ccred->globusid); if (GSS_ERROR(majstat)) { logger.msg(Arc::ERROR, "Failed to convert GSI credential to " "GSS credential (major: %d, minor: %d)", majstat, minstat); majstat = gss_release_cred(&minstat, &ccred); }; } else { ccred = GSS_C_NO_CREDENTIAL; }; if(cert) X509_free(cert); if(key) EVP_PKEY_free(key); if(cchain) sk_X509_pop_free(cchain, X509_free); if(chandle) Arc::GlobusResult(globus_gsi_cred_handle_destroy(chandle)); return ccred; } int main(int argc,char* argv[]) { Arc::LogStream err(std::cerr); err.setFormat(Arc::EmptyFormat); Arc::Logger::rootLogger.addDestination(err); std::string lcmaps_library; std::string lcmaps_db_file; std::string lcmaps_dir; std::string subject; std::string filename; if(argc > 1) subject = argv[1]; if(subject.empty()) { logger.msg(Arc::ERROR, "Missing subject name"); return -1; }; if(argc > 2) filename = argv[2]; if(filename.empty()) { logger.msg(Arc::ERROR, "Missing path of credentials file"); return -1; }; if(argc > 3) lcmaps_library = argv[3]; if(lcmaps_library.empty()) { logger.msg(Arc::ERROR, "Missing name of LCMAPS library"); return -1; }; if(argc > 4) lcmaps_dir = argv[4]; if(argc > 5) lcmaps_db_file = argv[5]; if(lcmaps_dir == "*") lcmaps_dir.resize(0); if(lcmaps_db_file == "*") lcmaps_db_file.resize(0); if((lcmaps_library[0] != G_DIR_SEPARATOR) && (lcmaps_library[0] != '.')) { if(lcmaps_dir.length() != 0) lcmaps_library=lcmaps_dir+G_DIR_SEPARATOR_S+lcmaps_library; }; if(argc <= 6) { logger.msg(Arc::ERROR, "Can't read policy names"); return -1; }; char** policynames = argv+6; int npols = 0; for(;policynames[npols];npols++) { }; set_lcmaps_env(lcmaps_db_file,lcmaps_dir); Glib::Module lcmaps_handle(lcmaps_library,Glib::ModuleFlags(0)); if(!lcmaps_handle) { recover_lcmaps_env(); logger.msg(Arc::ERROR, "Can't load LCMAPS library %s: %s", lcmaps_library, Glib::Module::get_last_error()); return -1; }; void *lcmaps_init_p = NULL; void *lcmaps_run_and_return_username_p = NULL; void *lcmaps_run_p = NULL; void *lcmaps_term_p = NULL; void *getCredentialData_p = NULL; if((!lcmaps_handle.get_symbol("lcmaps_init",lcmaps_init_p)) || (!lcmaps_handle.get_symbol("lcmaps_run_and_return_username",lcmaps_run_and_return_username_p)) || (!lcmaps_handle.get_symbol("lcmaps_term",lcmaps_term_p))) { recover_lcmaps_env(); logger.msg(Arc::ERROR, "Can't find LCMAPS functions in a library %s", lcmaps_library); return -1; }; lcmaps_handle.get_symbol("lcmaps_run",lcmaps_run_p); lcmaps_handle.get_symbol("getCredentialData",getCredentialData_p); lcmaps_init_t lcmaps_init_f = (lcmaps_init_t)lcmaps_init_p; lcmaps_run_and_return_username_t lcmaps_run_and_return_username_f = (lcmaps_run_and_return_username_t)lcmaps_run_and_return_username_p; lcmaps_run_t lcmaps_run_f = (lcmaps_run_t)lcmaps_run_p; lcmaps_term_t lcmaps_term_f = (lcmaps_term_t)lcmaps_term_p; getCredentialData_t getCredentialData_f = (getCredentialData_t)getCredentialData_p; if(lcmaps_run_f) logger.msg(Arc::ERROR,"LCMAPS has lcmaps_run"); if(getCredentialData_f) logger.msg(Arc::ERROR,"LCMAPS has getCredentialData"); FILE* lcmaps_log = fdopen(STDERR_FILENO,"a"); if((*lcmaps_init_f)(lcmaps_log) != 0) { recover_lcmaps_env(); logger.msg(Arc::ERROR, "Failed to initialize LCMAPS"); return -1; }; // In case anything is not initialized yet Arc::GlobusResult(globus_module_activate(GLOBUS_GSI_GSSAPI_MODULE)); Arc::GlobusResult(globus_module_activate(GLOBUS_GSI_CREDENTIAL_MODULE)); Arc::GlobusResult(globus_module_activate(GLOBUS_GSI_CERT_UTILS_MODULE)); // User without credentials is useless for LCMAPS ? //Arc::GSSCredential cred(filename,"",""); gss_cred_id_t cred = read_globus_credentials(filename); char* username = NULL; int res = 1; if((!getCredentialData_f) || (!lcmaps_run_f)) { if((*lcmaps_run_and_return_username_f)( (char*)(subject.c_str()),cred,(char*)"",&username,npols,policynames ) == 0) { if(username != NULL) { res=0; std::cout< 0) { uid_t uid = uids[0]; gid_t gid = (gid_t)(-1); gid_t* gids = (gid_t*)(*getCredentialData_f)(PRI_GID,&cnt); if(cnt > 0) gid = gids[0]; struct passwd* pw = getpwuid(uid); if(pw) { username = pw->pw_name; if(!username.empty()) { if(gid != (gid_t)(-1)) { struct group* gr = getgrgid(gid); if(gr) { username += std::string(":") + gr->gr_name; } else { logger.msg(Arc::ERROR,"LCMAPS returned invalid GID: %u",(unsigned int)gid); }; } else { logger.msg(Arc::WARNING,"LCMAPS did not return any GID"); }; } else { logger.msg(Arc::ERROR,"LCMAPS returned UID which has no username: %u",(unsigned int)uid); }; } else { logger.msg(Arc::ERROR,"LCMAPS returned invalid UID: %u",(unsigned int)uid); }; } else { logger.msg(Arc::ERROR,"LCMAPS did not return any UID"); }; }; if(!username.empty()) { res=0; std::cout< #endif #include #include #include "LegacySecAttr.h" #include "unixmap.h" #include "ConfigParser.h" #include "LegacyMap.h" namespace ArcSHCLegacy { Arc::Plugin* LegacyMap::get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; LegacyMap* plugin = new LegacyMap((Arc::Config*)(*shcarg),(Arc::ChainContext*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; plugin = NULL; }; return plugin; } LegacyMap::LegacyMap(Arc::Config *cfg,Arc::ChainContext* ctx,Arc::PluginArgument* parg):SecHandler(cfg,parg) { Arc::XMLNode block = (*cfg)["ConfigBlock"]; while((bool)block) { std::string filename = (std::string)(block["ConfigFile"]); if(filename.empty()) { logger.msg(Arc::ERROR, "Configuration file not specified in ConfigBlock"); blocks_.clear(); return; }; cfgfile file(filename); Arc::XMLNode name = block["BlockName"]; while((bool)name) { std::string blockname = (std::string)name; if(blockname.empty()) { logger.msg(Arc::ERROR, "BlockName is empty"); blocks_.clear(); return; }; file.blocknames.push_back(blockname); ++name; }; blocks_.push_back(file); ++block; }; } LegacyMap::~LegacyMap(void) { } class LegacyMapCP: public ConfigParser { public: LegacyMapCP(const LegacyMap::cfgfile& file, Arc::Logger& logger, AuthUser& auth):ConfigParser(file.filename,logger),file_(file),map_(auth),is_block_(false) { }; virtual ~LegacyMapCP(void) { }; std::string LocalID(void) { if(!map_) return ""; return map_.unix_name(); }; protected: virtual bool BlockStart(const std::string& id, const std::string& name) { if(map_) return true; // already mapped std::string bname = id; if(!name.empty()) bname = bname+"/"+name; if(file_.blocknames.empty()) { is_block_ = true; return true; }; for(std::list::const_iterator block = file_.blocknames.begin(); block != file_.blocknames.end();++block) { if(*block == bname) { is_block_ = true; break; }; }; return true; }; virtual bool BlockEnd(const std::string& id, const std::string& name) { is_block_ = false; return true; }; virtual bool ConfigLine(const std::string& id, const std::string& name, const std::string& cmd, const std::string& line) { if(!is_block_) return true; if(map_) return true; // already mapped if(cmd == "unixmap") { //# unixmap [unixname][:unixgroup] rule //unixmap="nobody:nogroup all" if(map_.mapname(line.c_str()) == AAA_FAILURE) { logger_.msg(Arc::ERROR, "Failed processing user mapping command: unixmap %s", line); return false; }; } else if(cmd == "unixgroup") { //# unixgroup group rule //unixgroup="users simplepool /etc/grid-security/pool/users" if(map_.mapgroup(line.c_str()) == AAA_FAILURE) { logger_.msg(Arc::ERROR, "Failed processing user mapping command: unixgroup %s", line); return false; }; } else if(cmd == "unixvo") { //# unixvo vo rule //unixvo="ATLAS unixuser atlas:atlas" if(map_.mapvo(line.c_str()) == AAA_FAILURE) { logger_.msg(Arc::ERROR, "Failed processing user mapping command: unixvo %s", line); return false; }; }; return true; }; private: const LegacyMap::cfgfile& file_; //AuthUser& auth_; UnixMap map_; bool is_block_; }; class LegacyMapAttr: public Arc::SecAttr { public: LegacyMapAttr(const std::string& id):id_(id) { }; virtual ~LegacyMapAttr(void); // Common interface virtual operator bool(void) const; virtual bool Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const; virtual std::string get(const std::string& id) const; virtual std::list getAll(const std::string& id) const; // Specific interface const std::string GetID(void) const { return id_; }; protected: std::string id_; virtual bool equal(const SecAttr &b) const; }; LegacyMapAttr::~LegacyMapAttr(void) { } LegacyMapAttr::operator bool(void) const { return true; } bool LegacyMapAttr::Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const { return true; } std::string LegacyMapAttr::get(const std::string& id) const { return ""; } std::list LegacyMapAttr::getAll(const std::string& id) const { return std::list(); } bool LegacyMapAttr::equal(const SecAttr &b) const { const LegacyMapAttr& a = dynamic_cast(b); if (!a) return false; return (id_ == a.id_); } ArcSec::SecHandlerStatus LegacyMap::Handle(Arc::Message* msg) const { if(blocks_.size()<=0) { logger.msg(Arc::ERROR, "LegacyMap: no configurations blocks defined"); return false; }; Arc::SecAttr* sattr = msg->Auth()->get("ARCLEGACY"); if(!sattr) { // Only if information collection is done per context. // Check if decision is already made. Arc::SecAttr* dattr = msg->AuthContext()->get("ARCLEGACYMAP"); if(dattr) { LegacyMapAttr* mattr = dynamic_cast(dattr); if(mattr) { // Mapping already was done in this context std::string id = mattr->GetID(); if(!id.empty()) { msg->Attributes()->set("SEC:LOCALID",id); }; return true; }; }; }; if(!sattr) sattr = msg->AuthContext()->get("ARCLEGACY"); if(!sattr) { logger.msg(Arc::ERROR, "LegacyPDP: there is no ARCLEGACY Sec Attribute defined. Probably ARC Legacy Sec Handler is not configured or failed."); return false; }; LegacySecAttr* lattr = dynamic_cast(sattr); if(!lattr) { logger.msg(Arc::ERROR, "LegacyPDP: ARC Legacy Sec Attribute not recognized."); return false; }; // Populate with collected info AuthUser auth(*msg); auth.add_groups(lattr->GetGroups()); auth.add_vos(lattr->GetVOs()); std::string id; for(std::list::const_iterator block = blocks_.begin(); block!=blocks_.end();++block) { LegacyMapCP parser(*block,logger,auth); if(!parser) return false; if(!parser.Parse()) return false; id = parser.LocalID(); if(!id.empty()) { logger.msg(Arc::INFO,"Grid identity is mapped to local identity '%s'",id); msg->Attributes()->set("SEC:LOCALID",id); break; }; }; // Store decision even if no id was selected msg->AuthContext()->set("ARCLEGACYMAP",new LegacyMapAttr(id)); return true; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/ConfigParser.h0000644000000000000000000000012411766073413023310 xustar000000000000000027 mtime=1339586315.829356 27 atime=1513200575.037708 30 ctime=1513200661.797769473 nordugrid-arc-5.4.2/src/hed/shc/legacy/ConfigParser.h0000644000175000002070000000134611766073413023361 0ustar00mockbuildmock00000000000000#include #include namespace ArcSHCLegacy { class ConfigParser { public: ConfigParser(const std::string& filename, Arc::Logger& logger); virtual ~ConfigParser(void); bool Parse(void); operator bool(void) { return (bool)f_; }; bool operator!(void) { return !(bool)f_; }; protected: virtual bool BlockStart(const std::string& id, const std::string& name) = 0; virtual bool BlockEnd(const std::string& id, const std::string& name) = 0; virtual bool ConfigLine(const std::string& id, const std::string& name, const std::string& cmd, const std::string& line) = 0; Arc::Logger& logger_; private: std::string block_id_; std::string block_name_; std::ifstream f_; }; } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/ConfigParser.cpp0000644000000000000000000000012412123101751023624 xustar000000000000000027 mtime=1363969001.473645 27 atime=1513200575.035708 30 ctime=1513200661.796769461 nordugrid-arc-5.4.2/src/hed/shc/legacy/ConfigParser.cpp0000644000175000002070000000570012123101751023673 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include namespace ArcSHCLegacy { ConfigParser::ConfigParser(const std::string& filename, Arc::Logger& logger):logger_(logger) { if(filename.empty()) { logger_.msg(Arc::ERROR, "Configuration file not specified"); return; }; f_.open(filename.c_str()); if(!f_) { logger_.msg(Arc::ERROR, "Configuration file can not be read"); return; }; } ConfigParser::~ConfigParser(void) { } static bool is_nested(const std::string& line) { if(line.length() < 2) return false; if(line[0] != '"') return false; if(line[line.length()-1] != '"') return false; std::string::size_type p = 1; for(;p= line.length()-1) break; if(line[p] == '"') { for(++p;p= line.length()-1) return false; ++p; } else { for(++p;p= line.length()-1) break; }; }; return true; } bool ConfigParser::Parse(void) { if(!f_) { logger_.msg(Arc::ERROR, "Configuration file can not be read"); return false; }; while(f_.good()) { if(!f_) { logger_.msg(Arc::ERROR, "Configuration file can not be read"); return false; }; std::string line; getline(f_,line); line = Arc::trim(line); if(line.empty()) continue; if(line[0] == '#') continue; if(line[0] == '[') { if(line.length() < 2) { logger_.msg(Arc::ERROR, "Configuration file is broken - block name is too short: %s",line); return false; }; if(!block_id_.empty()) { if(!BlockEnd(block_id_,block_name_)) { return false; }; }; line = line.substr(1,line.length()-2); block_id_ = ""; block_name_ = ""; std::string::size_type ps = line.find('/'); if(ps != std::string::npos) { block_name_ = Arc::trim(line.substr(ps+1)); line.resize(ps); }; line = Arc::trim(line); block_id_ = line; if(!BlockStart(block_id_,block_name_)) { return false; }; continue; }; std::string cmd; std::string::size_type p = line.find('='); if(p == std::string::npos) { cmd = Arc::trim(line); line = ""; } else { cmd = Arc::trim(line.substr(0,p)); line = Arc::trim(line.substr(p+1)); if(is_nested(line)) line=line.substr(1,line.length()-2); }; if(cmd == "name") { if(p != std::string::npos) { block_name_ = Arc::trim(Arc::trim(line),"\""); }; continue; }; if(!ConfigLine(block_id_,block_name_,cmd,line)) { return false; }; }; if(!block_id_.empty()) { if(!BlockEnd(block_id_,block_name_)) return false; }; return true; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/auth.h0000644000000000000000000000012412675602216021666 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.067708 30 ctime=1513200661.790769388 nordugrid-arc-5.4.2/src/hed/shc/legacy/auth.h0000644000175000002070000001353512675602216021742 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include //#include namespace ArcSHCLegacy { enum AuthResult { AAA_POSITIVE_MATCH = 1, AAA_NEGATIVE_MATCH = -1, AAA_NO_MATCH = 0, AAA_FAILURE = 2 }; class AuthVO; /** VOMS FQAN split into elements */ struct voms_fqan_t { std::string group; // including root group which is always same as VO std::string role; // role associated to group - for each role there is one voms_fqan_t std::string capability; // deprecated but must keep itt void str(std::string& str) const; // convert to string (minimal variation) }; /** VOMS data */ struct voms_t { std::string server; /*!< The VOMS server hostname */ std::string voname; /*!< The name of the VO to which the VOMS belongs */ std::vector fqans; /*!< Processed FQANs of user */ }; class AuthUser { private: typedef AuthResult (AuthUser:: * match_func_t)(const char* line); typedef struct { const char* cmd; match_func_t func; } source_t; class group_t { public: std::string name; // const char* vo; // local VO which caused authorization of this group struct voms_t voms; // VOMS attributes which caused authorization of this group group_t(const std::string& name_,const char* vo_,const struct voms_t& voms_):name(name_),vo(vo_?vo_:""),voms(voms_) { }; }; struct voms_t default_voms_; const char* default_vo_; const char* default_group_; // Attributes of user std::string subject_; // DN of certificate std::vector voms_data_; // VOMS information extracted from message // Old attributes - remove or convert std::string from; // Remote hostname std::string filename; // Delegated proxy stored in this file bool proxy_file_was_created; // If proxy file was created by this object bool has_delegation; // If proxy contains delegation // Matching methods static source_t sources[]; // Supported evaluation sources AuthResult match_all(const char* line); AuthResult match_group(const char* line); AuthResult match_subject(const char* line); AuthResult match_file(const char* line); AuthResult match_ldap(const char* line); AuthResult match_voms(const char* line); AuthResult match_vo(const char* line); AuthResult match_lcas(const char *); AuthResult match_plugin(const char* line); const group_t* find_group(const char* grp) const { if(grp == NULL) return NULL; for(std::list::const_iterator i=groups_.begin();i!=groups_.end();++i) { if(i->name == grp) return &(*i); }; return NULL; }; const group_t* find_group(const std::string& grp) const { return find_group(grp.c_str());}; // bool voms_extracted; // Evaluation results std::list groups_; // Groups which user matched (internal names) std::list vos_; // VOs to which user belongs (external names) // References to related/source data Arc::Message& message_; public: AuthUser(const AuthUser&); // Constructor AuthUser(Arc::Message& message); // subject - subject/DN of user // filename - file with (delegated) credentials //AuthUser(const char* subject = NULL,const char* filename = NULL); ~AuthUser(void); // void operator=(const AuthUser&); // Reassign user with supplied credentials //void operator=(gss_cred_id_t cred); //void operator=(gss_ctx_id_t ctx); //void set(const char* subject,const char* hostname = NULL); // void set(const char* subject,gss_ctx_id_t ctx,gss_cred_id_t cred,const char* hostname = NULL); //void set(const char* s,STACK_OF(X509)* cred,const char* hostname = NULL); // Evaluate authentication rules AuthResult evaluate(const char* line); const char* DN(void) const { return subject_.c_str(); }; const char* proxy(void) const { (const_cast(this))->store_credentials(); return filename.c_str(); }; bool is_proxy(void) const { return has_delegation; }; const char* hostname(void) const { return from.c_str(); }; // Remember this user belongs to group 'grp' void add_group(const std::string& grp); void add_groups(const std::list& grps); // Mark this user as belonging to no no groups void clear_groups(void) { groups_.clear(); default_group_=NULL; }; // Returns true if user belongs to specified group 'grp' bool check_group(const std::string& grp) const { for(std::list::const_iterator i=groups_.begin();i!=groups_.end();++i) { if(i->name == grp) return true; }; return false; }; void get_groups(std::list& groups) const; void add_vo(const std::string& vo); void add_vos(const std::list& vos); void clear_vos(void) { vos_.clear(); }; bool check_vo(const std::string& vo) const { for(std::list::const_iterator i=vos_.begin();i!=vos_.end();++i) { if(*i == vo) return true; }; return false; }; const std::vector& voms(void); const std::list& VOs(void); const struct voms_t* get_group_voms(const std::string& grp) const { const group_t* group = find_group(grp); return (group == NULL)?NULL:&(group->voms); }; const char* get_group_vo(const std::string& grp) const { const group_t* group = find_group(grp); return (group == NULL)?NULL:group->vo; }; // convert ARC list into voms structure static std::vector arc_to_voms(const std::list& attributes); void subst(std::string& str); bool store_credentials(void); }; class AuthVO { friend class AuthUser; private: std::string name; std::string file; public: AuthVO(const char* vo,const char* filename):name(vo),file(filename) { }; AuthVO(const std::string& vo,const std::string& filename):name(vo.c_str()),file(filename.c_str()) { }; ~AuthVO(void) { }; }; } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/LegacyMap.h0000644000000000000000000000012412110401544022550 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.067708 30 ctime=1513200661.805769571 nordugrid-arc-5.4.2/src/hed/shc/legacy/LegacyMap.h0000644000175000002070000000153612110401544022622 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include #include namespace ArcSHCLegacy { class LegacyMap : public ArcSec::SecHandler { friend class LegacyMapCP; private: class cfgfile { public: std::string filename; std::list blocknames; cfgfile(const std::string& fname):filename(fname) {}; }; std::list blocks_; public: LegacyMap(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~LegacyMap(void); static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg); virtual ArcSec::SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return (blocks_.size()>0); }; bool operator!(void) { return (blocks_.size()<=0); }; }; } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/unixmap.h0000644000000000000000000000012412675602216022406 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.035708 30 ctime=1513200661.795769449 nordugrid-arc-5.4.2/src/hed/shc/legacy/unixmap.h0000644000175000002070000000330212675602216022451 0ustar00mockbuildmock00000000000000#include #include "auth.h" namespace ArcSHCLegacy { class UnixMap { private: class unix_user_t { public: std::string name; std::string group; unix_user_t(void) { }; }; typedef AuthResult (UnixMap:: * map_func_t)(const AuthUser& user,unix_user_t& unix_user,const char* line); typedef struct { const char* cmd; map_func_t map; } source_t; static source_t sources[]; // Supported evaluation sources // Unix user obtained after mapping unix_user_t unix_user_; // Associated user AuthUser& user_; // Identity of mapping request. std::string map_id_; // Mapping was done bool mapped_; AuthResult map_mapfile(const AuthUser& user,unix_user_t& unix_user,const char* line); AuthResult map_simplepool(const AuthUser& user,unix_user_t& unix_user,const char* line); AuthResult map_unixuser(const AuthUser& user,unix_user_t& unix_user,const char* line); AuthResult map_lcmaps(const AuthUser& user,unix_user_t& unix_user,const char* line); AuthResult map_mapplugin(const AuthUser& user,unix_user_t& unix_user,const char* line); public: // Constructor - links to grid user UnixMap(AuthUser& user,const std::string& id = ""); ~UnixMap(void); // Properties const char* id(void) const { return map_id_.c_str(); }; operator bool(void) const { return mapped_; }; bool operator!(void) const { return !mapped_; }; const std::string& unix_name(void) const { return unix_user_.name; }; const std::string& unix_group(void) const { return unix_user_.group; }; AuthUser& user(void) { return user_; }; // Map AuthResult mapname(const char* line); AuthResult mapgroup(const char* line); AuthResult mapvo(const char* line); }; } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/auth.cpp0000644000000000000000000000012412771225230022213 xustar000000000000000027 mtime=1474636440.725253 27 atime=1513200575.062708 30 ctime=1513200661.789769375 nordugrid-arc-5.4.2/src/hed/shc/legacy/auth.cpp0000644000175000002070000002204612771225230022264 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "LegacySecAttr.h" #include "auth.h" namespace ArcSHCLegacy { static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUser"); void voms_fqan_t::str(std::string& str) const { str = group; if(!role.empty()) str += "/Role="+role; if(!capability.empty()) str += "/Capability="+capability; } AuthResult AuthUser::match_all(const char* /* line */) { default_voms_=voms_t(); default_vo_=NULL; default_group_=NULL; return AAA_POSITIVE_MATCH; } AuthResult AuthUser::match_group(const char* line) { std::string::size_type n = 0; for(;;) { if(n == std::string::npos) break; std::string s(""); n = Arc::get_token(s,line,n," ","\"","\""); if(s.empty()) continue; for(std::list::iterator i = groups_.begin();i!=groups_.end();++i) { if(s == i->name) { default_voms_=voms_t(); default_vo_=i->vo; default_group_=i->name.c_str(); return AAA_POSITIVE_MATCH; }; }; }; return AAA_NO_MATCH; } AuthResult AuthUser::match_vo(const char* line) { std::string::size_type n = 0; for(;;) { if(n == std::string::npos) break; std::string s(""); n = Arc::get_token(s,line,n," ","\"","\""); if(s.empty()) continue; for(std::list::iterator i = vos_.begin();i!=vos_.end();++i) { if(s == *i) { default_voms_=voms_t(); default_vo_=i->c_str(); default_group_=NULL; return AAA_POSITIVE_MATCH; }; }; }; return AAA_NO_MATCH; } AuthUser::source_t AuthUser::sources[] = { { "all", &AuthUser::match_all }, { "group", &AuthUser::match_group }, { "subject", &AuthUser::match_subject }, { "file", &AuthUser::match_file }, { "remote", &AuthUser::match_ldap }, { "voms", &AuthUser::match_voms }, { "vo", &AuthUser::match_vo }, { "lcas", &AuthUser::match_lcas }, { "plugin", &AuthUser::match_plugin }, { NULL, NULL } }; AuthUser::AuthUser(const AuthUser& a):message_(a.message_) { subject_ = a.subject_; voms_data_ = a.voms_data_; from = a.from; filename=a.filename; has_delegation=a.has_delegation; proxy_file_was_created=false; // process_voms(); default_voms_=voms_t(); default_vo_=NULL; default_group_=NULL; groups_ = a.groups_; vos_ = a.vos_; } AuthUser::AuthUser(Arc::Message& message): default_voms_(), default_vo_(NULL), default_group_(NULL), proxy_file_was_created(false), has_delegation(false), message_(message) { subject_ = message.Attributes()->get("TLS:IDENTITYDN"); // Fetch VOMS attributes std::list voms_attrs; Arc::SecAttr* sattr = NULL; sattr = message_.Auth()->get("TLS"); if(sattr) { std::list vomses = sattr->getAll("VOMS"); voms_attrs.splice(voms_attrs.end(),vomses); }; sattr = message_.AuthContext()->get("TLS"); if(sattr) { std::list vomses = sattr->getAll("VOMS"); voms_attrs.splice(voms_attrs.end(),vomses); }; voms_data_ = arc_to_voms(voms_attrs); } // The attributes passed to this method are of "extended fqan" kind with every field // made of key=value pair. Also each attribute has /VO=voname prepended. // Special ARC attribute /voname=voname/hostname=hostname is used for assigning // server host name to VO. std::vector AuthUser::arc_to_voms(const std::list& attributes) { std::vector voms_list; struct voms_t voms_item; for(std::list::const_iterator v = attributes.begin(); v != attributes.end(); ++v) { std::list elements; Arc::tokenize(*v, elements, "/"); // Handle first element which contains VO name std::list::iterator i = elements.begin(); if(i == elements.end()) continue; // empty attribute? // Handle first element which contains VO name std::vector keyvalue; Arc::tokenize(*i, keyvalue, "="); if (keyvalue.size() != 2) continue; // improper record if (keyvalue[0] == "voname") { // VO to hostname association if(voms_item.voname != keyvalue[1]) { if(!voms_item.voname.empty()) { voms_list.push_back(voms_item); }; voms_item = voms_t(); voms_item.voname = keyvalue[1]; }; ++i; if(i != elements.end()) { Arc::tokenize(*i, keyvalue, "="); if (keyvalue.size() == 2) { if (keyvalue[0] == "hostname") { voms_item.server = keyvalue[1]; }; }; }; continue; } else if(keyvalue[0] == "VO") { if(voms_item.voname != keyvalue[1]) { if(!voms_item.voname.empty()) { voms_list.push_back(voms_item); }; voms_item = voms_t(); voms_item.voname = keyvalue[1]; }; } else { // Skip unknown record continue; } ++i; voms_fqan_t fqan; for (; i != elements.end(); ++i) { std::vector keyvalue; Arc::tokenize(*i, keyvalue, "="); // /Group=mygroup/Role=myrole // Ignoring unrecognized records if (keyvalue.size() == 2) { if (keyvalue[0] == "Group") { fqan.group += "/"+keyvalue[1]; } else if (keyvalue[0] == "Role") { fqan.role = keyvalue[1]; } else if (keyvalue[0] == "Capability") { fqan.capability = keyvalue[1]; }; }; }; voms_item.fqans.push_back(fqan); }; if(!voms_item.voname.empty()) { voms_list.push_back(voms_item); }; return voms_list; } AuthUser::~AuthUser(void) { if(filename.length()) Arc::FileDelete(filename); } AuthResult AuthUser::evaluate(const char* line) { bool invert = false; bool no_match = false; const char* command = "subject"; size_t command_len = 7; if(subject_.empty()) return AAA_NO_MATCH; // ?? if(!line) return AAA_NO_MATCH; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) return AAA_NO_MATCH; if(*line == '#') return AAA_NO_MATCH; if(*line == '-') { line++; invert=true; } else if(*line == '+') { line++; }; if(*line == '!') { no_match=true; line++; }; if((*line != '/') && (*line != '"')) { command=line; for(;*line;line++) if(isspace(*line)) break; command_len=line-command; for(;*line;line++) if(!isspace(*line)) break; }; for(source_t* s = sources;s->cmd;s++) { if((strncmp(s->cmd,command,command_len) == 0) && (strlen(s->cmd) == command_len)) { AuthResult res=(this->*(s->func))(line); if(res == AAA_FAILURE) return res; if(no_match) { if(res==AAA_NO_MATCH) { res=AAA_POSITIVE_MATCH; } else { res=AAA_NO_MATCH; }; }; if(invert) { switch(res) { case AAA_POSITIVE_MATCH: res = AAA_NEGATIVE_MATCH; break; case AAA_NEGATIVE_MATCH: res = AAA_POSITIVE_MATCH; break; }; }; return res; }; }; return AAA_FAILURE; } const std::list& AuthUser::VOs(void) { return vos_; } void AuthUser::get_groups(std::list& groups) const { for(std::list::const_iterator g = groups_.begin();g!=groups_.end();++g) { groups.push_back(g->name); }; } void AuthUser::subst(std::string& str) { int l = str.length(); // Substitutions: %D, %P for(int i=0;iget("TLS"); std::string cred; if(sattr) { cred = sattr->get("CERTIFICATE"); }; if(cred.empty()) { sattr = message_.AuthContext()->get("TLS"); if(sattr) { cred = sattr->get("CERTIFICATE"); }; }; if(!cred.empty()) { cred+=sattr->get("CERTIFICATECHAIN"); std::string tmpfile; if(Arc::TmpFileCreate(tmpfile,cred)) { filename = tmpfile; logger.msg(Arc::VERBOSE,"Credentials stored in temporary file %s",filename); return true; }; }; return false; } void AuthUser::add_group(const std::string& grp) { groups_.push_back(group_t(grp,default_vo_,default_voms_)); logger.msg(Arc::VERBOSE,"Assigned to authorization group %s",grp); }; void AuthUser::add_vo(const std::string& vo) { vos_.push_back(vo); logger.msg(Arc::VERBOSE,"Assigned to VO %s",vo); } void AuthUser::add_groups(const std::list& grps) { for(std::list::const_iterator grp = grps.begin(); grp != grps.end(); ++grp) { add_group(*grp); } } void AuthUser::add_vos(const std::list& vos) { for(std::list::const_iterator vo = vos.begin(); vo != vos.end(); ++vo) { add_vo(*vo); } } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/auth_subject.cpp0000644000000000000000000000012412675602216023740 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.035708 30 ctime=1513200661.784769314 nordugrid-arc-5.4.2/src/hed/shc/legacy/auth_subject.cpp0000644000175000002070000000261012675602216024004 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "auth.h" namespace ArcSHCLegacy { AuthResult AuthUser::match_subject(const char* line) { // A bit of hacking here to properly split DNs with spaces std::string line_(line); std::string subj; std::string::size_type pos = line_.find_first_not_of(" \t"); if(pos == std::string::npos) return AAA_NO_MATCH; bool enc = (line_[pos] == '"'); pos = Arc::get_token(subj,line_,pos," \t", "\"", "\""); while(true) { if(subj.empty() && (pos == std::string::npos)) break; if((!enc) && (!subj.empty()) && (pos != std::string::npos)) { std::string subj_; std::string::size_type pos_ = line_.find_first_not_of(" \t",pos); if(pos_ != std::string::npos) { bool enc_ = (line_[pos_] == '"'); if(!enc_) { pos_ = Arc::get_token(subj_,line_,pos_," \t", "\"", "\""); if(subj_[0] != '/') { // Merge tokens subj=subj+line_.substr(pos,pos_-pos); pos=pos_; continue; }; }; }; }; if(subject_ == subj) { return AAA_POSITIVE_MATCH; }; pos = line_.find_first_not_of(" \t",pos); if(pos == std::string::npos) break; enc = (line_[pos] == '"'); pos = Arc::get_token(subj,line_,pos," \t", "\"", "\""); }; return AAA_NO_MATCH; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/auth_ldap.cpp0000644000000000000000000000012412675602216023221 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.062708 30 ctime=1513200661.787769351 nordugrid-arc-5.4.2/src/hed/shc/legacy/auth_ldap.cpp0000644000175000002070000000101312675602216023261 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "auth.h" namespace ArcSHCLegacy { #define LDAP_CONNECT_TIMEOUT 10 #define LDAP_QUERY_TIMEOUT 20 #define LDAP_RESULT_TIMEOUT 60 static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUserLDAP"); AuthResult AuthUser::match_ldap(const char* line) { logger.msg(Arc::ERROR, "LDAP authorization is not supported anymore"); return AAA_FAILURE; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/auth_voms.cpp0000644000000000000000000000012412675602216023265 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.066708 30 ctime=1513200661.788769363 nordugrid-arc-5.4.2/src/hed/shc/legacy/auth_voms.cpp0000644000175000002070000000561512675602216023341 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "auth.h" namespace ArcSHCLegacy { static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUserVOMS"); static bool match_value(const std::string& value, const std::vector& seq) { for(std::vector::const_iterator v = seq.begin(); v != seq.end(); ++v) { if(*v == value) { return true; } }; return false; } AuthResult AuthUser::match_voms(const char* line) { // No need to process anything if no VOMS extensions are present if(voms_data_.empty()) return AAA_NO_MATCH; // parse line std::string vo(""); std::string group(""); std::string role(""); std::string capabilities(""); std::string auto_c(""); std::string::size_type n = 0; n=Arc::get_token(vo,line,n," ","\"","\""); if((n == std::string::npos) && (vo.empty())) { logger.msg(Arc::ERROR, "Missing VO in configuration"); return AAA_FAILURE; }; n=Arc::get_token(group,line,n," ","\"","\""); if((n == std::string::npos) && (group.empty())) { logger.msg(Arc::ERROR, "Missing group in configuration"); return AAA_FAILURE; }; n=Arc::get_token(role,line,n," ","\"","\""); if((n == std::string::npos) && (role.empty())) { logger.msg(Arc::ERROR, "Missing role in configuration"); return AAA_FAILURE; }; n=Arc::get_token(capabilities,line,n," ","\"","\""); if((n == std::string::npos) && (capabilities.empty())) { logger.msg(Arc::ERROR, "Missing capabilities in configuration"); return AAA_FAILURE; }; n=Arc::get_token(auto_c,line,n," ","\"","\""); logger.msg(Arc::VERBOSE, "Rule: vo: %s", vo); logger.msg(Arc::VERBOSE, "Rule: group: %s", group); logger.msg(Arc::VERBOSE, "Rule: role: %s", role); logger.msg(Arc::VERBOSE, "Rule: capabilities: %s", capabilities); // analyse permissions for(std::vector::iterator v = voms_data_.begin();v!=voms_data_.end();++v) { logger.msg(Arc::DEBUG, "Match vo: %s", v->voname); if((vo == "*") || (vo == v->voname)) { bool matched = false; for(std::vector::iterator f = v->fqans.begin(); f != v->fqans.end(); ++f) { if(((group == "*") || (group == f->group)) && ((role == "*") || (role == f->role)) && ((capabilities == "*") || (capabilities == f->capability))) { logger.msg(Arc::VERBOSE, "Matched: %s %s %s %s",v->voname,f->group,f->role,f->capability); if(!matched) { default_voms_ = voms_t(); default_voms_.voname = v->voname; default_voms_.server = v->server; matched = true; }; default_voms_.fqans.push_back(*f); }; }; if(matched) { return AAA_POSITIVE_MATCH; }; }; }; logger.msg(Arc::VERBOSE, "Matched nothing"); return AAA_NO_MATCH; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/arc_lcas.cpp0000644000000000000000000000012413213471407023022 xustar000000000000000027 mtime=1512993543.496042 27 atime=1513200575.067708 30 ctime=1513200661.807769595 nordugrid-arc-5.4.2/src/hed/shc/legacy/arc_lcas.cpp0000644000175000002070000002040613213471407023071 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "cert_util.h" static Arc::Logger logger(Arc::Logger::getRootLogger(),"LCAS"); #ifdef HAVE_LCAS #include #define ALLOW_EMPTY_CREDENTIALS 1 extern "C" { #define extern typedef #define getMajorVersionNumber lcas_getMajorVersionNumber #define getMinorVersionNumber lcas_getMinorVersionNumber #define getPatchVersionNumber lcas_getPatchVersionNumber #define lcas_init (*lcas_init_t) #define lcas_get_fabric_authorization (*lcas_get_fabric_authorization_t) #define lcas_term (*lcas_term_t) #include #undef lcas_init #undef lcas_get_fabric_authorization #undef lcas_term #undef getMajorVersionNumber #undef getMinorVersionNumber #undef getPatchVersionNumber #undef extern }; #else //#warning Using hardcoded definition of LCAS functions - software will break if interface changed extern "C" { typedef char* lcas_request_t; typedef int (*lcas_init_t)(FILE *fp); typedef int (*lcas_term_t)(void); typedef int (*lcas_get_fabric_authorization_t)(char *user_dn_tmp,gss_cred_id_t user_cred,lcas_request_t request); } #endif extern "C" { // Definitions taken from gssapi_openssl.h typedef struct gss_cred_id_desc_struct { globus_gsi_cred_handle_t cred_handle; gss_name_t globusid; gss_cred_usage_t cred_usage; SSL_CTX * ssl_context; } gss_cred_id_desc; extern gss_OID_desc * gss_nt_x509; #define GLOBUS_GSS_C_NT_X509 gss_nt_x509 }; // extern "C" static std::string lcas_db_file_old; static std::string lcas_dir_old; void set_lcas_env(const std::string& lcas_db_file,const std::string& lcas_dir) { lcas_db_file_old=Arc::GetEnv("LCAS_DB_FILE"); if(lcas_db_file.length() != 0) Arc::SetEnv("LCAS_DB_FILE",lcas_db_file,true); lcas_dir_old=Arc::GetEnv("LCAS_DIR"); if(lcas_dir.length() != 0) Arc::SetEnv("LCAS_DIR",lcas_dir,true); } void recover_lcas_env(void) { if(lcas_db_file_old.length() == 0) { Arc::UnsetEnv("LCAS_DB_FILE"); } else { Arc::SetEnv("LCAS_DB_FILE",lcas_db_file_old,true); }; if(lcas_dir_old.length() == 0) { Arc::UnsetEnv("LCAS_DIR"); } else { Arc::SetEnv("LCAS_DIR",lcas_dir_old,true); }; } gss_cred_id_t read_globus_credentials(const std::string& filename) { X509* cert = NULL; STACK_OF(X509)* cchain = NULL; EVP_PKEY* key = NULL; LoadCertificateFile(filename, cert, cchain); LoadKeyFile(filename, key); globus_gsi_cred_handle_t chandle; Arc::GlobusResult(globus_gsi_cred_handle_init(&chandle, NULL)); if(cert) Arc::GlobusResult(globus_gsi_cred_set_cert(chandle, cert)); if(key) Arc::GlobusResult(globus_gsi_cred_set_key(chandle, key)); if(cchain) Arc::GlobusResult(globus_gsi_cred_set_cert_chain(chandle, cchain)); gss_cred_id_desc* ccred = (gss_cred_id_desc*)::malloc(sizeof(gss_cred_id_desc)); if(ccred) { ::memset(ccred,0,sizeof(gss_cred_id_desc)); ccred->cred_handle = chandle; chandle = NULL; // cred_usage // ssl_context X509* identity_cert = NULL; if(cert) { globus_gsi_cert_utils_cert_type_t ctype = GLOBUS_GSI_CERT_UTILS_TYPE_DEFAULT; Arc::GlobusResult(globus_gsi_cert_utils_get_cert_type(cert,&ctype)); if(ctype == GLOBUS_GSI_CERT_UTILS_TYPE_EEC) { identity_cert = cert; }; }; if(!identity_cert && cchain) { // For compatibility with older globus not using //Arc::GlobusResult(globus_gsi_cert_utils_get_identity_cert(cchain,&identity_cert)); for(int n = 0; n < sk_X509_num(cchain); ++n) { X509* tmp_cert = sk_X509_value(cchain, n); if(tmp_cert) { globus_gsi_cert_utils_cert_type_t ctype = GLOBUS_GSI_CERT_UTILS_TYPE_DEFAULT; Arc::GlobusResult(globus_gsi_cert_utils_get_cert_type(tmp_cert,&ctype)); if(ctype == GLOBUS_GSI_CERT_UTILS_TYPE_EEC) { identity_cert = tmp_cert; break; }; }; }; } gss_buffer_desc peer_buffer; #if GLOBUS_GSSAPI_GSI_OLD_OPENSSL peer_buffer.value = identity_cert; peer_buffer.length = identity_cert?sizeof(X509*):0; // Globus expects this size despite strored value is X509, not X509* #else peer_buffer.value = identity_cert; peer_buffer.length = identity_cert?sizeof(X509*):0; #endif OM_uint32 majstat, minstat; majstat = gss_import_name(&minstat, &peer_buffer, identity_cert?GLOBUS_GSS_C_NT_X509:GSS_C_NT_ANONYMOUS, &ccred->globusid); if (GSS_ERROR(majstat)) { logger.msg(Arc::ERROR, "Failed to convert GSI credential to " "GSS credential (major: %d, minor: %d)", majstat, minstat); majstat = gss_release_cred(&minstat, &ccred); }; } else { ccred = GSS_C_NO_CREDENTIAL; }; if(cert) X509_free(cert); if(key) EVP_PKEY_free(key); if(cchain) sk_X509_pop_free(cchain, X509_free); if(chandle) Arc::GlobusResult(globus_gsi_cred_handle_destroy(chandle)); return ccred; } int main(int argc,char* argv[]) { Arc::LogStream err(std::cerr); err.setFormat(Arc::EmptyFormat); Arc::Logger::rootLogger.addDestination(err); std::string lcas_library; std::string lcas_db_file; std::string lcas_dir; std::string subject; std::string filename; if(argc > 1) subject = argv[1]; if(subject.empty()) { logger.msg(Arc::ERROR, "Missing subject name"); return -1; }; if(argc > 2) filename = argv[2]; if(filename.empty()) { logger.msg(Arc::ERROR, "Missing path of credentials file"); return -1; }; if(argc > 3) lcas_library = argv[3]; if(lcas_library.empty()) { logger.msg(Arc::ERROR, "Missing name of LCAS library"); return -1; }; if(argc > 4) lcas_dir = argv[4]; if(argc > 5) lcas_db_file = argv[5]; if(lcas_dir == "*") lcas_dir.resize(0); if(lcas_db_file == "*") lcas_db_file.resize(0); if((lcas_library[0] != G_DIR_SEPARATOR) && (lcas_library[0] != '.')) { if(lcas_dir.length() != 0) lcas_library=lcas_dir+G_DIR_SEPARATOR_S+lcas_library; }; set_lcas_env(lcas_db_file,lcas_dir); Glib::Module lcas_handle(lcas_library,Glib::ModuleFlags(0)); if(!lcas_handle) { recover_lcas_env(); logger.msg(Arc::ERROR, "Can't load LCAS library %s: %s", lcas_library, Glib::Module::get_last_error()); return -1; }; void *lcas_init_p = NULL; void *lcas_get_fabric_authorization_p = NULL; void *lcas_term_p = NULL; if((!lcas_handle.get_symbol("lcas_init",lcas_init_p)) || (!lcas_handle.get_symbol("lcas_get_fabric_authorization",lcas_get_fabric_authorization_p)) || (!lcas_handle.get_symbol("lcas_term",lcas_term_p))) { recover_lcas_env(); logger.msg(Arc::ERROR, "Can't find LCAS functions in a library %s", lcas_library); return -1; }; lcas_init_t lcas_init_f = (lcas_init_t)lcas_init_p; lcas_get_fabric_authorization_t lcas_get_fabric_authorization_f = (lcas_get_fabric_authorization_t)lcas_get_fabric_authorization_p; lcas_term_t lcas_term_f = (lcas_term_t)lcas_term_p; FILE* lcas_log = fdopen(STDERR_FILENO,"a"); if((*lcas_init_f)(lcas_log) != 0) { recover_lcas_env(); logger.msg(Arc::ERROR, "Failed to initialize LCAS"); return -1; }; // In case anything is not initialized yet Arc::GlobusResult(globus_module_activate(GLOBUS_GSI_GSSAPI_MODULE)); Arc::GlobusResult(globus_module_activate(GLOBUS_GSI_CREDENTIAL_MODULE)); Arc::GlobusResult(globus_module_activate(GLOBUS_GSI_CERT_UTILS_MODULE)); // User without credentials is useless for LCAS ? //Arc::GSSCredential cred(filename,"",""); gss_cred_id_t cred = read_globus_credentials(filename); int res = 1; if((*lcas_get_fabric_authorization_f)((char*)(subject.c_str()),(gss_cred_id_t)cred,(char*)"") == 0) { res=0; }; if((*lcas_term_f)() != 0) { logger.msg(Arc::WARNING, "Failed to terminate LCAS"); }; if(cred != GSS_C_NO_CREDENTIAL) { OM_uint32 majstat, minstat; majstat = gss_release_cred(&minstat, &cred); }; recover_lcas_env(); Arc::GlobusResult(globus_module_deactivate(GLOBUS_GSI_CERT_UTILS_MODULE)); Arc::GlobusResult(globus_module_deactivate(GLOBUS_GSI_CREDENTIAL_MODULE)); Arc::GlobusResult(globus_module_deactivate(GLOBUS_GSI_GSSAPI_MODULE)); return res; } nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/LegacySecHandler.h0000644000000000000000000000012412675602216024062 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.064708 30 ctime=1513200661.801769522 nordugrid-arc-5.4.2/src/hed/shc/legacy/LegacySecHandler.h0000644000175000002070000000161212675602216024127 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include #include namespace ArcSHCLegacy { /** Processes configuration and evaluates groups to which requestor belongs. Obtained result is stored in message context as LegacySecAttr security attribute under ARCLEGACY tag. */ class LegacySecHandler : public ArcSec::SecHandler { private: std::list conf_files_; public: LegacySecHandler(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~LegacySecHandler(void); static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg); virtual ArcSec::SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return (conf_files_.size() > 0); }; bool operator!(void) { return (conf_files_.size() <= 0); }; }; } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/cert_util.cpp0000644000000000000000000000012412675602216023252 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.059708 30 ctime=1513200661.808769608 nordugrid-arc-5.4.2/src/hed/shc/legacy/cert_util.cpp0000644000175000002070000000333212675602216023320 0ustar00mockbuildmock00000000000000#include #include #include "cert_util.h" static BIO* OpenFileBIO(const std::string& file) { if(!Glib::file_test(file,Glib::FILE_TEST_IS_REGULAR)) return NULL; return BIO_new_file(file.c_str(), "r"); } static int passwordcb(char *buf, int bufsiz, int verify, void *cb_tmp) { return 0; } bool LoadCertificateFile(const std::string& certfile, X509* &x509, STACK_OF(X509)* &certchain) { BIO* certbio = OpenFileBIO(certfile); if(!certbio) return false; //Read certificate if(!(PEM_read_bio_X509(certbio, &x509, NULL, NULL))) { BIO_set_close(certbio,BIO_CLOSE); BIO_free_all(certbio); return false; }; //Get the issuer chain certchain = sk_X509_new_null(); int n = 0; while(!BIO_eof(certbio)){ X509* tmp = NULL; if(!(PEM_read_bio_X509(certbio, &tmp, NULL, NULL))){ ERR_clear_error(); break; }; if(sk_X509_insert(certchain, tmp, n) == 0) { X509_free(tmp); X509_free(x509); x509 = NULL; sk_X509_pop_free(certchain, X509_free); certchain = NULL; BIO_set_close(certbio,BIO_CLOSE); BIO_free_all(certbio); return false; }; ++n; }; BIO_set_close(certbio,BIO_CLOSE); BIO_free_all(certbio); return true; } // Only private keys without password bool LoadKeyFile(const std::string& keyfile, EVP_PKEY* &pkey) { BIO* keybio = OpenFileBIO(keyfile); if(!keybio) return false; pkey = PEM_read_bio_PrivateKey(keybio, NULL, passwordcb, NULL); if(!pkey) { BIO_set_close(keybio,BIO_CLOSE); BIO_free_all(keybio); return false; }; BIO_set_close(keybio,BIO_CLOSE); BIO_free_all(keybio); return true; } nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/auth_plugin.cpp0000644000000000000000000000012412675602216023577 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.033708 30 ctime=1513200661.785769326 nordugrid-arc-5.4.2/src/hed/shc/legacy/auth_plugin.cpp0000644000175000002070000000320312675602216023642 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "auth.h" namespace ArcSHCLegacy { static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUser"); AuthResult AuthUser::match_plugin(const char* line) { if(!line) return AAA_NO_MATCH; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) return AAA_NO_MATCH; char* p; long int to = strtol(line,&p,0); if(p == line) return AAA_NO_MATCH; if(to < 0) return AAA_NO_MATCH; line=p; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) return AAA_NO_MATCH; std::list args; Arc::tokenize(line,args," ","\"","\""); if(args.size() <= 0) return AAA_NO_MATCH; for(std::list::iterator arg = args.begin(); arg != args.end();++arg) { subst(*arg); }; std::string stdout_str; std::string stderr_str; Arc::Run run(args); run.AssignStdout(stdout_str); run.AssignStderr(stderr_str); if(run.Start()) { if(run.Wait(to)) { if(run.Result() == 0) { return AAA_POSITIVE_MATCH; } else { logger.msg(Arc::ERROR,"Plugin %s returned: %u",args.front(),run.Result()); }; } else { run.Kill(1); logger.msg(Arc::ERROR,"Plugin %s timeout after %u seconds",args.front(),to); }; } else { logger.msg(Arc::ERROR,"Plugin %s failed to start",args.front()); }; if(!stdout_str.empty()) logger.msg(Arc::INFO,"Plugin %s printed: %s",args.front(),stdout_str); if(!stderr_str.empty()) logger.msg(Arc::ERROR,"Plugin %s error: %s",args.front(),stderr_str); return AAA_NO_MATCH; // ?? } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/cert_util.h0000644000000000000000000000012312675602216022716 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.061708 29 ctime=1513200661.80976962 nordugrid-arc-5.4.2/src/hed/shc/legacy/cert_util.h0000644000175000002070000000031412675602216022762 0ustar00mockbuildmock00000000000000#include #include bool LoadCertificateFile(const std::string& certfile, X509* &x509, STACK_OF(X509)* &certchain); bool LoadKeyFile(const std::string& keyfile, EVP_PKEY* &pkey); nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/LegacySecAttr.h0000644000000000000000000000012412675602216023417 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.060708 30 ctime=1513200661.799769498 nordugrid-arc-5.4.2/src/hed/shc/legacy/LegacySecAttr.h0000644000175000002070000000300412675602216023461 0ustar00mockbuildmock00000000000000#include #include #include #include namespace ArcSHCLegacy { /** * Container for athorization evaluation result. * It stores authorized VOs, groups and VOMS+VO * attributes associated with groups. * TODO: Merge with AuthUser. */ class LegacySecAttr: public Arc::SecAttr { public: LegacySecAttr(Arc::Logger& logger); virtual ~LegacySecAttr(void); // Common interface virtual operator bool(void) const; virtual bool Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const; virtual std::string get(const std::string& id) const; virtual std::list getAll(const std::string& id) const; // Specific interface void AddGroup(const std::string& group, const std::list& vo, const std::list& voms); const std::list GetGroups(void) const { return groups_; }; const std::list& GetGroupVO(const std::string& group) const; const std::list& GetGroupVOMS(const std::string& group) const; void AddVO(const std::string& vo) { VOs_.push_back(vo); }; const std::list GetVOs(void) const { return VOs_; }; protected: Arc::Logger& logger_; std::list groups_; std::list VOs_; std::list< std::list > groupsVO_; // synchronized with groups_ std::list< std::list > groupsVOMS_; // synchronized with groups_ virtual bool equal(const SecAttr &b) const; }; } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/unixmap_lcmaps.cpp0000644000000000000000000000012412675602216024300 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.033708 30 ctime=1513200661.793769424 nordugrid-arc-5.4.2/src/hed/shc/legacy/unixmap_lcmaps.cpp0000644000175000002070000000166112675602216024351 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include //#include #include //#include "../misc/escaped.h" //#include "../misc/proxy.h" //#include "../run/run_plugin.h" #include "unixmap.h" namespace ArcSHCLegacy { static Arc::Logger logger(Arc::Logger::getRootLogger(),"UnixMap"); AuthResult UnixMap::map_lcmaps(const AuthUser& user,unix_user_t& unix_user,const char* line) { // TODO: escape // TODO: hardcoded 60s timeout for lcmaps std::string lcmaps_plugin = "60 \""+ Arc::ArcLocation::Get()+G_DIR_SEPARATOR_S+PKGLIBEXECSUBDIR+ G_DIR_SEPARATOR_S+"arc-lcmaps\" "; lcmaps_plugin+=std::string("\"")+user_.DN()+"\" "; lcmaps_plugin+=std::string("\"")+user_.proxy()+"\" "; lcmaps_plugin+=line; AuthResult res = map_mapplugin(user,unix_user,lcmaps_plugin.c_str()); return res; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/simplemap.cpp0000644000000000000000000000012212123101751023227 xustar000000000000000027 mtime=1363969001.473645 27 atime=1513200575.037708 28 ctime=1513200661.7917694 nordugrid-arc-5.4.2/src/hed/shc/legacy/simplemap.cpp0000644000175000002070000001273212123101751023303 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "simplemap.h" namespace ArcSHCLegacy { static Arc::Logger logger(Arc::Logger::getRootLogger(),"SimpleMap"); class FileLock { private: int h_; #ifndef WIN32 struct flock l_; #endif public: FileLock(int h):h_(h) { if(h_ == -1) return; #ifndef WIN32 l_.l_type=F_WRLCK; l_.l_whence=SEEK_SET; l_.l_start=0; l_.l_len=0; for(;;) { if(fcntl(h_,F_SETLKW,&l_) == 0) break; if(errno != EINTR) { h_=-1; return; }; }; #endif }; ~FileLock(void) { if(h_ == -1) return; #ifndef WIN32 l_.l_type=F_UNLCK; fcntl(h_,F_SETLKW,&l_); #endif }; operator bool(void) { return (h_ != -1); }; bool operator!(void) { return (h_ == -1); }; }; SimpleMap::SimpleMap(const char* dir):dir_(dir) { if((dir_.length() == 0) || (dir_[dir_.length()-1] != '/')) dir_+="/"; if(dir_[0] != '/') dir_=Glib::get_current_dir()+"/"+dir_; pool_handle_=open((dir_+"pool").c_str(),O_RDWR); } SimpleMap::~SimpleMap(void) { if(pool_handle_ != -1) close(pool_handle_); pool_handle_=-1; } #define failure(S) { \ logger.msg(Arc::ERROR, "SimpleMap: %s", (S)); \ return ""; \ } #define info(S) { \ logger.msg(Arc::INFO, "SimpleMap: %s", (S)); \ } std::string SimpleMap::map(const char* subject) { if(pool_handle_ == -1) failure("not initialized"); if(!subject) failure("missing subject"); std::string filename(subject); for(std::string::size_type i = filename.find('/');i!=std::string::npos; i=filename.find('/',i+1)) filename[i]='_'; filename=dir_+filename; FileLock lock(pool_handle_); if(!lock) failure("failed to lock pool file"); // Check for existing mapping struct stat st; if(stat(filename.c_str(),&st) == 0) { if(!S_ISREG(st.st_mode)) failure("mapping is not a regular file"); std::ifstream f(filename.c_str()); if(!f.is_open()) failure("can't open mapping file"); std::string buf; getline(f,buf); utime(filename.c_str(),NULL); return buf; }; // Look for unused names // Get full list first. std::list names; { std::ifstream f((dir_+"pool").c_str()); if(!f.is_open()) failure("can't open pool file") std::string buf; while(getline(f,buf)) { if(buf.empty()) continue; names.push_back(buf); }; }; if(!names.size()) failure("pool is empty"); // Remove all used names from list. Also find oldest maping. time_t oldmap_time = 0; std::string oldmap_name; std::string oldmap_subject; { #ifdef HAVE_READDIR_R struct dirent file_; #endif struct dirent *file; DIR *dir=opendir(dir_.c_str()); if(dir == NULL) failure("can't list pool directory"); for(;;) { #ifdef HAVE_READDIR_R readdir_r(dir,&file_,&file); #else file = readdir(dir); #endif if(file == NULL) break; if(std::string(file->d_name) == ".") continue; if(std::string(file->d_name) == "..") continue; if(std::string(file->d_name) == "pool") continue; std::string filename = dir_+file->d_name; struct stat st; if(stat(filename.c_str(),&st) != 0) continue; if(!S_ISREG(st.st_mode)) continue; std::ifstream f(filename.c_str()); if(!f.is_open()) { // trash in directory closedir(dir); failure("can't open one of mapping files"); }; std::string buf; getline(f,buf); // find this name in list std::list::iterator i = names.begin(); for(;i!=names.end();++i) if(*i == buf) break; if(i == names.end()) { // Always try to destroy old mappings without corresponding // entry in the pool file if(((unsigned int)(time(NULL) - st.st_mtime)) >= SELFUNMAP_TIME) { unlink(filename.c_str()); }; } else { names.erase(i); if( (oldmap_name.length() == 0) || (((int)(oldmap_time - st.st_mtime)) > 0) ) { oldmap_name=buf; oldmap_subject=file->d_name; oldmap_time=st.st_mtime; }; }; }; closedir(dir); }; if(names.size()) { // Claim one of unused names std::ofstream f(filename.c_str()); if(!f.is_open()) failure("can't create mapping file"); f<<*(names.begin())< #endif #include #include "LegacySecHandler.h" #include "LegacyPDP.h" #include "LegacyMap.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "arclegacy.handler", "HED:SHC", NULL, 0, &ArcSHCLegacy::LegacySecHandler::get_sechandler}, { "arclegacy.pdp", "HED:PDP", NULL, 0, &ArcSHCLegacy::LegacyPDP::get_pdp}, { "arclegacy.map", "HED:SHC", NULL, 0, &ArcSHCLegacy::LegacyMap::get_sechandler}, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/auth_lcas.cpp0000644000000000000000000000012412675602216023223 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.064708 30 ctime=1513200661.786769339 nordugrid-arc-5.4.2/src/hed/shc/legacy/auth_lcas.cpp0000644000175000002070000000117712675602216023276 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "auth.h" namespace ArcSHCLegacy { AuthResult AuthUser::match_lcas(const char* line) { // TODO: escape // TODO: hardcoded 60s timeout std::string lcas_plugin = "60 \""+ Arc::ArcLocation::Get()+G_DIR_SEPARATOR_S+PKGLIBEXECSUBDIR+ G_DIR_SEPARATOR_S+"arc-lcas\" "; lcas_plugin+=std::string("\"")+DN()+"\" "; lcas_plugin+=std::string("\"")+proxy()+"\" "; lcas_plugin+=line; AuthResult res = match_plugin(lcas_plugin.c_str()); return res; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/auth_file.cpp0000644000000000000000000000012412675602216023220 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.061708 30 ctime=1513200661.783769302 nordugrid-arc-5.4.2/src/hed/shc/legacy/auth_file.cpp0000644000175000002070000000162012675602216023264 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "auth.h" namespace ArcSHCLegacy { static Arc::Logger logger(Arc::Logger::getRootLogger(),"AuthUser"); AuthResult AuthUser::match_file(const char* line) { std::list tokens; Arc::tokenize(line, tokens, " ", "\"", "\""); for(std::list::iterator s = tokens.begin();s!=tokens.end();++s) { std::ifstream f(s->c_str()); if(!f.is_open()) { logger.msg(Arc::ERROR, "Failed to read file %s", *s); return AAA_FAILURE; }; for(;f.good();) { std::string buf; getline(f,buf); AuthResult res = evaluate(buf.c_str()); if(res != AAA_NO_MATCH) { f.close(); return res; }; }; f.close(); }; return AAA_NO_MATCH; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/LegacySecHandler.cpp0000644000000000000000000000012312675602216024414 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.059708 29 ctime=1513200661.80076951 nordugrid-arc-5.4.2/src/hed/shc/legacy/LegacySecHandler.cpp0000644000175000002070000001156112675602216024466 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "LegacySecAttr.h" #include "auth.h" #include "ConfigParser.h" #include "LegacySecHandler.h" namespace ArcSHCLegacy { Arc::Plugin* LegacySecHandler::get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; LegacySecHandler* plugin = new LegacySecHandler((Arc::Config*)(*shcarg),(Arc::ChainContext*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; plugin = NULL; }; return plugin; } LegacySecHandler::LegacySecHandler(Arc::Config *cfg,Arc::ChainContext* ctx,Arc::PluginArgument* parg):SecHandler(cfg,parg) { Arc::XMLNode conf_file = (*cfg)["ConfigFile"]; while((bool)conf_file) { std::string filename = (std::string)conf_file; if(!filename.empty()) { conf_files_.push_back(filename); }; ++conf_file; }; if(conf_files_.size() <= 0) { logger.msg(Arc::ERROR, "LegacySecHandler: configuration file not specified"); }; } LegacySecHandler::~LegacySecHandler(void) { } class LegacySHCP: public ConfigParser { public: LegacySHCP(const std::string& filename, Arc::Logger& logger, AuthUser& auth/*, LegacySecAttr& sattr*/): ConfigParser(filename,logger),auth_(auth)/*,sattr_(sattr)*/,group_match_(0),vo_match_(false) { }; virtual ~LegacySHCP(void) { }; protected: virtual bool BlockStart(const std::string& id, const std::string& name) { group_match_ = AAA_NO_MATCH; group_name_ = ""; vo_match_ = false; vo_name_ = ""; return true; }; virtual bool BlockEnd(const std::string& id, const std::string& name) { if(id == "group") { if(group_name_.empty()) group_name_ = name; if((group_match_ == AAA_POSITIVE_MATCH) && !group_name_.empty()) { auth_.add_group(group_name_); }; } else if(id == "vo") { if(vo_name_.empty()) vo_name_ = name; if(vo_match_ && !vo_name_.empty()) { auth_.add_vo(vo_name_); }; }; return true; }; virtual bool ConfigLine(const std::string& id, const std::string& name, const std::string& cmd, const std::string& line) { if(id == "group") { if(group_match_ == AAA_NO_MATCH) { group_match_ = auth_.evaluate((cmd + " " + line).c_str()); }; } else if(id == "vo") { if(!vo_match_) { if(cmd == "file") { if(!line.empty()) { // Because file=filename looks exactly like // matching rule evaluate() can be used int r = auth_.evaluate((cmd + " " + line).c_str()); vo_match_ = (r == AAA_POSITIVE_MATCH); }; } else if(cmd == "vo") { vo_name_ = line; }; }; }; return true; }; private: AuthUser& auth_; /* LegacySecAttr& sattr_; */ int group_match_; std::string group_name_; bool vo_match_; std::string vo_name_; }; ArcSec::SecHandlerStatus LegacySecHandler::Handle(Arc::Message* msg) const { if(conf_files_.size() <= 0) { logger.msg(Arc::ERROR, "LegacySecHandler: configuration file not specified"); return false; }; Arc::SecAttr* attr = msg->AuthContext()->get("ARCLEGACY"); if(attr) { LegacySecAttr* lattr = dynamic_cast(attr); if(lattr) { // Information already collected return true; }; }; AuthUser auth(*msg); Arc::AutoPointer sattr(new LegacySecAttr(logger)); for(std::list::const_iterator conf_file = conf_files_.begin(); conf_file != conf_files_.end();++conf_file) { LegacySHCP parser(*conf_file,logger,auth /*,*sattr*/); if(!parser) return false; if(!parser.Parse()) return false; }; // Pass all matched groups and VOs to LegacySecAttr { const std::list& vos = auth.VOs(); for(std::list::const_iterator vo = vos.begin(); vo != vos.end(); ++vo) sattr->AddVO(*vo); }; { std::list groups; auth.get_groups(groups); for(std::list::const_iterator grp = groups.begin(); grp != groups.end(); ++grp) { const char* vo = auth.get_group_vo(*grp); const voms_t* voms = auth.get_group_voms(*grp); std::list vos; std::list vomss; if((vo != NULL) && (*vo != '\0')) vos.push_back(vo); if(voms != NULL) { for(std::vector::const_iterator f = voms->fqans.begin(); f != voms->fqans.end(); ++f) { std::string fqan; f->str(fqan); vomss.push_back(fqan); }; }; sattr->AddGroup(*grp, vos, vomss); }; }; // Pass all matched groups and VOs to Message in SecAttr msg->AuthContext()->set("ARCLEGACY",sattr.Release()); return true; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/unixmap.cpp0000644000000000000000000000012412675602216022741 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.035708 30 ctime=1513200661.794769437 nordugrid-arc-5.4.2/src/hed/shc/legacy/unixmap.cpp0000644000175000002070000002510712675602216023013 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "simplemap.h" #include "unixmap.h" namespace ArcSHCLegacy { static Arc::Logger logger(Arc::Logger::getRootLogger(),"UnixMap"); UnixMap::source_t UnixMap::sources[] = { { "mapfile", &UnixMap::map_mapfile }, { "simplepool", &UnixMap::map_simplepool }, { "unixuser", &UnixMap::map_unixuser }, { "lcmaps", &UnixMap::map_lcmaps }, { "mapplugin", &UnixMap::map_mapplugin }, { NULL, NULL } }; UnixMap::UnixMap(AuthUser& user,const std::string& id): user_(user),map_id_(id),mapped_(false) { } UnixMap::~UnixMap(void) { } void split_unixname(std::string& unixname,std::string& unixgroup) { std::string::size_type p = unixname.find(':'); if(p != std::string::npos) { unixgroup=unixname.c_str()+p+1; unixname.resize(p); }; if(unixname[0] == '*') unixname.resize(0); if(unixgroup[0] == '*') unixgroup.resize(0); } AuthResult UnixMap::mapgroup(const char* line) { mapped_=false; if(!line) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; const char* groupname = line; for(;*line;line++) if(isspace(*line)) break; int groupname_len = line-groupname; if(groupname_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty group: %s", groupname); return AAA_FAILURE; }; if(!user_.check_group(std::string(groupname,groupname_len))) return AAA_NO_MATCH; unix_user_.name.resize(0); unix_user_.group.resize(0); for(;*line;line++) if(!isspace(*line)) break; const char* command = line; for(;*line;line++) if(isspace(*line)) break; size_t command_len = line-command; if(command_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty command: %s", command); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; for(source_t* s = sources;s->cmd;s++) { if((strncmp(s->cmd,command,command_len) == 0) && (strlen(s->cmd) == command_len)) { AuthResult res=(this->*(s->map))(user_,unix_user_,line); if(res == AAA_POSITIVE_MATCH) { mapped_=true; return AAA_POSITIVE_MATCH; }; if(res == AAA_FAILURE) { // Processing failure cause immediate error return AAA_FAILURE; }; // Paranoid about negative match return AAA_NO_MATCH; }; }; return AAA_FAILURE; } AuthResult UnixMap::mapvo(const char* line) { mapped_=false; if(!line) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; const char* voname = line; for(;*line;line++) if(isspace(*line)) break; int voname_len = line-voname; if(voname_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty VO: %s", voname); return AAA_FAILURE; }; if(!user_.check_vo(std::string(voname,voname_len))) return AAA_NO_MATCH; unix_user_.name.resize(0); unix_user_.group.resize(0); for(;*line;line++) if(!isspace(*line)) break; const char* command = line; for(;*line;line++) if(isspace(*line)) break; size_t command_len = line-command; if(command_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty command: %s", command); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; for(source_t* s = sources;s->cmd;s++) { if((strncmp(s->cmd,command,command_len) == 0) && (strlen(s->cmd) == command_len)) { AuthResult res=(this->*(s->map))(user_,unix_user_,line); if(res == AAA_POSITIVE_MATCH) { mapped_=true; return AAA_POSITIVE_MATCH; }; if(res == AAA_FAILURE) { // Processing failure cause immediate error return AAA_FAILURE; }; // Paranoid about negative match return AAA_NO_MATCH; }; }; return AAA_FAILURE; } AuthResult UnixMap::mapname(const char* line) { mapped_=false; if(!line) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"User name mapping command is empty"); return AAA_FAILURE; }; const char* unixname = line; for(;*line;line++) if(isspace(*line)) break; int unixname_len = line-unixname; if(unixname_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty name: %s", unixname); return AAA_FAILURE; }; unix_user_.name.assign(unixname,unixname_len); split_unixname(unix_user_.name,unix_user_.group); for(;*line;line++) if(!isspace(*line)) break; const char* command = line; for(;*line;line++) if(isspace(*line)) break; size_t command_len = line-command; if(command_len == 0) { logger.msg(Arc::ERROR,"User name mapping has empty command: %s", command); return AAA_FAILURE; } for(;*line;line++) if(!isspace(*line)) break; for(source_t* s = sources;s->cmd;s++) { if((strncmp(s->cmd,command,command_len) == 0) && (strlen(s->cmd) == command_len)) { AuthResult res=(this->*(s->map))(user_,unix_user_,line); if(res == AAA_POSITIVE_MATCH) { mapped_=true; return AAA_POSITIVE_MATCH; }; if(res == AAA_FAILURE) { // Processing failure cause immediate error return AAA_FAILURE; }; // Paranoid about negative match return AAA_NO_MATCH; }; }; if(unix_user_.name.length() != 0) { // Try authorization rules if username is predefined AuthResult decision = user_.evaluate(command); if(decision == AAA_POSITIVE_MATCH) { mapped_=true; return AAA_POSITIVE_MATCH; }; return decision; // propagate failure information }; // If user name is not defined then it was supposed to be // mapping rule. And if not then we failed. return AAA_FAILURE; } // ----------------------------------------------------------- AuthResult UnixMap::map_mapplugin(const AuthUser& /* user */ ,unix_user_t& unix_user,const char* line) { // timeout path arg ... if(!line) { logger.msg(Arc::ERROR,"Plugin (user mapping) command is empty"); return AAA_FAILURE; }; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"Plugin (user mapping) command is empty"); return AAA_FAILURE; }; char* p; long int to = strtol(line,&p,0); if(p == line) { logger.msg(Arc::ERROR,"Plugin (user mapping) timeout is not a number: %s", line); return AAA_FAILURE; }; if(to < 0) { logger.msg(Arc::ERROR,"Plugin (user mapping) timeout is wrong number: %s", line); return AAA_FAILURE; }; line=p; for(;*line;line++) if(!isspace(*line)) break; if(*line == 0) { logger.msg(Arc::ERROR,"Plugin (user mapping) command is empty"); return AAA_FAILURE; }; std::list args; Arc::tokenize(line,args," ","\"","\""); if(args.size() <= 0) { logger.msg(Arc::ERROR,"Plugin (user mapping) command is empty"); return AAA_FAILURE; }; for(std::list::iterator arg = args.begin(); arg != args.end();++arg) { user_.subst(*arg); }; std::string stdout_channel; std::string stderr_channel; Arc::Run run(args); run.AssignStdout(stdout_channel); run.AssignStderr(stderr_channel); if(run.Start()) { if(run.Wait(to)) { if(run.Result() == 0) { if(stdout_channel.length() <= 512) { // sane name // Plugin should print user[:group] at stdout or nothing if no suitable mapping found unix_user.name = stdout_channel; split_unixname(unix_user.name,unix_user.group); if(unix_user.name.empty()) return AAA_NO_MATCH; // success but no match return AAA_POSITIVE_MATCH; } else { logger.msg(Arc::ERROR,"Plugin %s returned too much: %s",args.front(),stdout_channel); }; } else { logger.msg(Arc::ERROR,"Plugin %s returned: %u",args.front(),run.Result()); }; } else { run.Kill(1); logger.msg(Arc::ERROR,"Plugin %s timeout after %u seconds",args.front(),to); }; } else { logger.msg(Arc::ERROR,"Plugin %s failed to start",args.front()); }; if(!stdout_channel.empty()) logger.msg(Arc::INFO,"Plugin %s printed: %s",args.front(),stdout_channel); if(!stderr_channel.empty()) logger.msg(Arc::ERROR,"Plugin %s error: %s",args.front(),stderr_channel); return AAA_FAILURE; } AuthResult UnixMap::map_mapfile(const AuthUser& user,unix_user_t& unix_user,const char* line) { // This is just grid-mapfile std::ifstream f(line); if(user.DN()[0] == 0) { logger.msg(Arc::ERROR, "User subject match is missing user subject."); return AAA_NO_MATCH; }; if(!f.is_open() ) { logger.msg(Arc::ERROR, "Mapfile at %s can't be opened.", line); return AAA_FAILURE; }; for(;f.good();) { std::string buf; getline(f,buf); std::string::size_type p = 0; for(;p=buf.length()) continue; std::string val; p = Arc::get_token(val,buf,p," ","\"","\""); if(val != user.DN()) continue; p = Arc::get_token(unix_user.name,buf,p," ","\"","\""); f.close(); return AAA_POSITIVE_MATCH; }; f.close(); return AAA_NO_MATCH; } AuthResult UnixMap::map_simplepool(const AuthUser& user,unix_user_t& unix_user,const char* line) { if(user.DN()[0] == 0) { logger.msg(Arc::ERROR, "User pool call is missing user subject."); return AAA_NO_MATCH; }; SimpleMap pool(line); if(!pool) { logger.msg(Arc::ERROR, "User pool at %s can't be opened.", line); return AAA_FAILURE; }; unix_user.name=pool.map(user.DN()); if(unix_user.name.empty()) { logger.msg(Arc::ERROR, "User pool at %s failed to perform user mapping.", line); return AAA_FAILURE; }; split_unixname(unix_user.name,unix_user.group); return AAA_POSITIVE_MATCH; } AuthResult UnixMap::map_unixuser(const AuthUser& /* user */,unix_user_t& unix_user,const char* line) { // Maping is always positive - just fill specified username std::string unixname(line); std::string unixgroup; std::string::size_type p = unixname.find(':'); if(p != std::string::npos) { unixgroup=unixname.c_str()+p+1; unixname.resize(p); }; if(unixname.empty()) { logger.msg(Arc::ERROR, "User name direct mapping is missing user name: %s.", line); return AAA_FAILURE; }; unix_user.name=unixname; unix_user.group=unixgroup; return AAA_POSITIVE_MATCH; } } // namespace ArcSHCLegacy nordugrid-arc-5.4.2/src/hed/shc/legacy/PaxHeaders.7502/schema0000644000000000000000000000013213214316025021724 xustar000000000000000030 mtime=1513200661.838769975 30 atime=1513200668.722854169 30 ctime=1513200661.838769975 nordugrid-arc-5.4.2/src/hed/shc/legacy/schema/0000755000175000002070000000000013214316025022047 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/legacy/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611561331634024046 xustar000000000000000027 mtime=1304802204.481834 29 atime=1513200600.56402056 30 ctime=1513200661.835769938 nordugrid-arc-5.4.2/src/hed/shc/legacy/schema/Makefile.am0000644000175000002070000000014511561331634024111 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = ARCSHCLegacy.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/legacy/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730024050 xustar000000000000000030 mtime=1513200600.596020952 30 atime=1513200649.600620297 30 ctime=1513200661.837769962 nordugrid-arc-5.4.2/src/hed/shc/legacy/schema/Makefile.in0000644000175000002070000004352213214315730024124 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/legacy/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = ARCSHCLegacy.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/legacy/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/legacy/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/legacy/schema/PaxHeaders.7502/ARCSHCLegacy.xsd0000644000000000000000000000012411565303021024611 xustar000000000000000027 mtime=1305839121.576139 27 atime=1513200575.033708 30 ctime=1513200661.838769975 nordugrid-arc-5.4.2/src/hed/shc/legacy/schema/ARCSHCLegacy.xsd0000644000175000002070000000536411565303021024666 0ustar00mockbuildmock00000000000000 This element defines configuration file path for ARCLegacy SecHandler. That file contains [group] and [vo] which wil be proessed by that SecHandler. This elemnt is mandatory. This element defines allowed group for ARCLegacy PDP. If client belongs to that group PDP returns positive result. There may be multiple such elements. This element defines allowed VO for ARCLegacy PDP. If client belongs to that VO PDP returns positive result. There may be multiple such elements. This element defines configuration file and configuration blocks for processing in ARCLegacy Identity Mapper and PDP. There may be multiple such elements. nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/testinterface_xacml.cpp0000644000000000000000000000012411570717263024042 xustar000000000000000027 mtime=1306762931.686321 27 atime=1513200575.006707 30 ctime=1513200661.022759994 nordugrid-arc-5.4.2/src/hed/shc/testinterface_xacml.cpp0000644000175000002070000000446611570717263024121 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include //#include #include #include int main(void){ signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); signal(SIGPIPE,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "PDPTest"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); logger.msg(Arc::INFO, "Start test"); ArcSec::EvaluatorLoader eval_loader; //TEST: XACMLEvaluator, XACMLPolicy, XACMLRequest //Load the Evaluator ArcSec::Evaluator* eval = NULL; std::string evaluator = "xacml.evaluator"; eval = eval_loader.getEvaluator(evaluator); if(eval == NULL) { logger.msg(Arc::ERROR, "Can not dynamically produce Evaluator"); return 0; } ArcSec::Policy* policy = NULL; std::string policyclassname = "xacml.policy"; ArcSec::SourceFile policy_source("XACML_Policy.xml"); policy = eval_loader.getPolicy(policyclassname, policy_source); if(policy == NULL) logger.msg(Arc::ERROR, "Can not dynamically produce Policy"); ArcSec::Request* request = NULL; std::string requestclassname = "xacml.request"; ArcSec::SourceFile request_source("XACML_Request.xml"); request = eval_loader.getRequest(requestclassname, request_source); if(request == NULL) logger.msg(Arc::ERROR, "Can not dynamically produce Request"); /**Two options to add policy into evaluator*/ //eval->addPolicy(policy_source); eval->addPolicy(policy); ArcSec::Response *resp = NULL; /**Feed evaluator with request to execute evaluation*/ resp = eval->evaluate(request_source); //resp = eval->evaluate(request_source, policy); //resp = eval->evaluate(request, policy_source); //resp = eval->evaluate(request_source, policy_source); //resp = eval->evaluate(request, policy); /**Get the response*/ ArcSec::ResponseList rlist = resp->getResponseItems(); std::cout<res</dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/PaxHeaders.7502/X509TokenSH.cpp0000644000000000000000000000012412110401544023777 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.028708 30 ctime=1513200661.473765511 nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/X509TokenSH.cpp0000644000175000002070000000737212110401544024055 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "X509TokenSH.h" static Arc::Logger logger(Arc::Logger::rootLogger, "X509TokenSH"); Arc::Plugin* ArcSec::X509TokenSH::get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; ArcSec::X509TokenSH* plugin = new ArcSec::X509TokenSH((Arc::Config*)(*shcarg),(Arc::ChainContext*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; plugin = NULL; }; return plugin; } /* sechandler_descriptors ARC_SECHANDLER_LOADER = { { "x509token.creator", 0, &get_sechandler}, { NULL, 0, NULL } }; */ namespace ArcSec { using namespace Arc; X509TokenSH::X509TokenSH(Config *cfg,ChainContext*,Arc::PluginArgument* parg):SecHandler(cfg,parg),valid_(false){ if(!init_xmlsec()) return; process_type_=process_none; std::string process_type = (std::string)((*cfg)["Process"]); if(process_type == "generate") { cert_file_=(std::string)((*cfg)["CertificatePath"]); if(cert_file_.empty()) { logger.msg(ERROR,"Missing or empty CertificatePath element"); return; }; key_file_=(std::string)((*cfg)["KeyPath"]); if(key_file_.empty()) { logger.msg(ERROR,"Missing or empty KeyPath element"); return; }; process_type_=process_generate; } else if(process_type == "extract") { //If ca file does not exist, we can only verify the signature by //using the certificate in the incoming wssecurity; we can not authenticate //the the message because we can not check the certificate chain without //trusted ca. ca_file_=(std::string)((*cfg)["CACertificatePath"]); ca_dir_=(std::string)((*cfg)["CACertificatesDir"]); if(ca_file_.empty() && ca_dir_.empty()) { logger.msg(INFO,"Missing or empty CertificatePath or CACertificatesDir element; will only check the signature, will not do message authentication"); }; process_type_=process_extract; } else { logger.msg(ERROR,"Processing type not supported: %s",process_type); return; }; valid_ = true; } X509TokenSH::~X509TokenSH() { final_xmlsec(); } SecHandlerStatus X509TokenSH::Handle(Arc::Message* msg) const { if(process_type_ == process_extract) { try { PayloadSOAP* soap = dynamic_cast(msg->Payload()); X509Token xt(*soap); if(!xt) { logger.msg(ERROR,"Failed to parse X509 Token from incoming SOAP"); return false; }; if(!xt.Authenticate()) { logger.msg(ERROR, "Failed to verify X509 Token inside the incoming SOAP"); return false; }; if((!ca_file_.empty() || !ca_dir_.empty()) && !xt.Authenticate(ca_file_, ca_dir_)) { logger.msg(ERROR, "Failed to authenticate X509 Token inside the incoming SOAP"); return false; }; logger.msg(INFO, "Succeeded to authenticate X509Token"); } catch(std::exception&) { logger.msg(ERROR,"Incoming Message is not SOAP"); return false; } } else if(process_type_ == process_generate) { try { PayloadSOAP* soap = dynamic_cast(msg->Payload()); X509Token xt(*soap, cert_file_, key_file_); if(!xt) { logger.msg(ERROR,"Failed to generate X509 Token for outgoing SOAP"); return false; }; //Reset the soap message (*soap) = xt; } catch(std::exception&) { logger.msg(ERROR,"Outgoing Message is not SOAP"); return false; } } else { logger.msg(ERROR,"X509 Token handler is not configured"); return false; } return true; } } nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/PaxHeaders.7502/X509TokenSH.h0000644000000000000000000000012412110401544023444 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.029708 30 ctime=1513200661.474765523 nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/X509TokenSH.h0000644000175000002070000000161512110401544023514 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_X509TOKENSH_H__ #define __ARC_SEC_X509TOKENSH_H__ #include #include #include #include namespace ArcSec { /// Adds WS-Security X509 Token into SOAP Header class X509TokenSH : public SecHandler { private: enum { process_none, process_extract, process_generate } process_type_; std::string cert_file_; std::string key_file_; std::string ca_file_; std::string ca_dir_; bool valid_; public: X509TokenSH(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~X509TokenSH(void); static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg); virtual SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; }; } // namespace ArcSec #endif /* __ARC_SEC_X509TOKENSH_H__ */ nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/PaxHeaders.7502/schema0000644000000000000000000000013213214316025022561 xustar000000000000000030 mtime=1513200661.503765877 30 atime=1513200668.722854169 30 ctime=1513200661.503765877 nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/schema/0000755000175000002070000000000013214316025022704 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321024675 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200601.158027825 30 ctime=1513200661.500765841 nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/schema/Makefile.am0000644000175000002070000000014411255700321024736 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = X509TokenSH.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731024706 xustar000000000000000030 mtime=1513200601.189028204 30 atime=1513200649.487618915 30 ctime=1513200661.501765853 nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/schema/Makefile.in0000644000175000002070000004354013214315731024762 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/x509tokensh/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = X509TokenSH.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/x509tokensh/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/x509tokensh/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/schema/PaxHeaders.7502/X509TokenSH.xsd0000644000000000000000000000012411457664434025301 xustar000000000000000027 mtime=1287612700.761119 27 atime=1513200575.028708 30 ctime=1513200661.503765877 nordugrid-arc-5.4.2/src/hed/shc/x509tokensh/schema/X509TokenSH.xsd0000644000175000002070000000647211457664434025357 0ustar00mockbuildmock00000000000000 This element defines either X509 Token is extracted from SOAP header or generated using other configuration elements. Type of the processing of X509 Token to SOAP message: extract or generate . Default is none. The usage of the X509 Token: signature or encryption. Default is signature. The location of private key which is used to sign the SOAP message, only needed by the client side. Default is none. The location of certificate, the certificate is used to be as one part of X509 Token, only needed by the client side. Default is none. The location of the file of trusted CA certificate, the certificate is used for verifying the signature to SOAP message. Only needed by the service side. Default is none. The location of the directory that contains trusted CA certificates, the certificates are used for verifying the signature to SOAP message. Only needed by the service side. Default is "/etc/grid-security/certificates". nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/gaclpdp0000644000000000000000000000013213214316025020632 xustar000000000000000030 mtime=1513200661.449765217 30 atime=1513200668.722854169 30 ctime=1513200661.449765217 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/0000755000175000002070000000000013214316025020755 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022753 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.394018481 30 ctime=1513200661.440765107 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/Makefile.am0000644000175000002070000000055012052416515023015 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libgaclpdp.la libgaclpdp_la_SOURCES = GACLRequest.cpp GACLRequest.h \ GACLPolicy.cpp GACLPolicy.h \ GACLEvaluator.cpp GACLEvaluator.h \ GACLPDP.cpp GACLPDP.h libgaclpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libgaclpdp_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/GACLPDP.cpp0000644000000000000000000000012412110410653022462 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.109709 30 ctime=1513200661.448765205 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/GACLPDP.cpp0000644000175000002070000001155012110410653022531 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include /* #include #include #include #include #include #include #include #include #include #include #include */ #include "GACLPDP.h" Arc::Logger ArcSec::GACLPDP::logger(Arc::Logger::getRootLogger(), "ArcSec.GACLPDP"); Arc::SecAttrFormat ArcSec::GACLPDP::GACL("gacl"); /* static ArcSec::PDP* get_pdp(Arc::Config *cfg,Arc::ChainContext *ctx) { return new ArcSec::ArcPDP(cfg); } pdp_descriptors ARC_PDP_LOADER = { { "gacl.pdp", 0, &get_pdp}, { NULL, 0, NULL } }; */ using namespace Arc; namespace ArcSec { Plugin* GACLPDP::get_gacl_pdp(PluginArgument* arg) { PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new GACLPDP((Config*)(*pdparg),arg); } class GACLPDPContext:public Arc::MessageContextElement { friend class GACLPDP; private: Evaluator* eval; public: GACLPDPContext(Evaluator* e); GACLPDPContext(void); virtual ~GACLPDPContext(void); }; GACLPDPContext::~GACLPDPContext(void) { if(eval) delete eval; } GACLPDPContext::GACLPDPContext(Evaluator* e):eval(e) { } GACLPDPContext::GACLPDPContext(void):eval(NULL) { EvaluatorLoader eval_loader; eval = eval_loader.getEvaluator(std::string("gacl.evaluator")); } GACLPDP::GACLPDP(Config* cfg, Arc::PluginArgument* parg):PDP(cfg,parg) { XMLNode pdp_node(*cfg); XMLNode filter = (*cfg)["Filter"]; if((bool)filter) { XMLNode select_attr = filter["Select"]; XMLNode reject_attr = filter["Reject"]; for(;(bool)select_attr;++select_attr) select_attrs.push_back((std::string)select_attr); for(;(bool)reject_attr;++reject_attr) reject_attrs.push_back((std::string)reject_attr); }; XMLNode policy_store = (*cfg)["PolicyStore"]; XMLNode policy_location = policy_store["Location"]; for(;(bool)policy_location;++policy_location) policy_locations.push_back((std::string)policy_location); XMLNode policy_doc = policy_store["Policy"]; for(;(bool)policy_doc;++policy_doc) policy_docs.AddNew(policy_doc); } PDPStatus GACLPDP::isPermitted(Message *msg) const{ Evaluator* eval = NULL; std::string ctxid = "arcsec.gaclpdp"; try { Arc::MessageContextElement* mctx = (*(msg->Context()))[ctxid]; if(mctx) { GACLPDPContext* pdpctx = dynamic_cast(mctx); if(pdpctx) { eval=pdpctx->eval; }; }; } catch(std::exception& e) { }; if(!eval) { GACLPDPContext* pdpctx = new GACLPDPContext(); if(pdpctx) { eval=pdpctx->eval; if(eval) { for(std::list::const_iterator it = policy_locations.begin(); it!= policy_locations.end(); it++) { eval->addPolicy(SourceFile(*it)); } for(int n = 0;naddPolicy(Source(const_cast(policy_docs)[n])); } msg->Context()->Add(ctxid, pdpctx); } else { delete pdpctx; } } if(!eval) logger.msg(ERROR, "Can not dynamically produce Evaluator"); } if(!eval) { logger.msg(ERROR,"Evaluator for GACLPDP was not loaded"); return false; }; MessageAuth* mauth = msg->Auth()->Filter(select_attrs,reject_attrs); MessageAuth* cauth = msg->AuthContext()->Filter(select_attrs,reject_attrs); if((!mauth) && (!cauth)) { logger.msg(ERROR,"Missing security object in message"); return false; }; NS ns; XMLNode requestxml(ns,""); if(mauth) { if(!mauth->Export(GACL,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to ARC request"); return false; }; delete mauth; }; if(cauth) { if(!cauth->Export(GACL,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to ARC request"); return false; }; delete cauth; }; if(DEBUG >= logger.getThreshold()) { std::string s; requestxml.GetXML(s); logger.msg(DEBUG,"GACL Auth. request: %s",s); }; if(requestxml.Size() <= 0) { logger.msg(ERROR,"No requested security information was collected"); return false; }; //Call the evaluation functionality inside Evaluator Response *resp = eval->evaluate(requestxml); if(!resp) return false; ResponseList rlist = resp->getResponseItems(); // Current implementation of GACL Evaluator returns only one item // and only PERMIT/DENY results. if(rlist.size() <= 0) { delete resp; return false; }; ResponseItem* item = rlist[0]; if(item->res != DECISION_PERMIT) { delete resp; return false; }; delete resp; return true; } GACLPDP::~GACLPDP(){ } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315730022755 xustar000000000000000029 mtime=1513200600.44301908 30 atime=1513200649.309616738 30 ctime=1513200661.441765119 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/Makefile.in0000644000175000002070000006350113214315730023031 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/gaclpdp DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libgaclpdp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libgaclpdp_la_OBJECTS = libgaclpdp_la-GACLRequest.lo \ libgaclpdp_la-GACLPolicy.lo libgaclpdp_la-GACLEvaluator.lo \ libgaclpdp_la-GACLPDP.lo libgaclpdp_la_OBJECTS = $(am_libgaclpdp_la_OBJECTS) libgaclpdp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libgaclpdp_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libgaclpdp_la_SOURCES) DIST_SOURCES = $(libgaclpdp_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libgaclpdp.la libgaclpdp_la_SOURCES = GACLRequest.cpp GACLRequest.h \ GACLPolicy.cpp GACLPolicy.h \ GACLEvaluator.cpp GACLEvaluator.h \ GACLPDP.cpp GACLPDP.h libgaclpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libgaclpdp_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/gaclpdp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/gaclpdp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libgaclpdp.la: $(libgaclpdp_la_OBJECTS) $(libgaclpdp_la_DEPENDENCIES) $(libgaclpdp_la_LINK) $(libgaclpdp_la_OBJECTS) $(libgaclpdp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgaclpdp_la-GACLEvaluator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgaclpdp_la-GACLPDP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgaclpdp_la-GACLPolicy.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgaclpdp_la-GACLRequest.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libgaclpdp_la-GACLRequest.lo: GACLRequest.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgaclpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libgaclpdp_la-GACLRequest.lo -MD -MP -MF $(DEPDIR)/libgaclpdp_la-GACLRequest.Tpo -c -o libgaclpdp_la-GACLRequest.lo `test -f 'GACLRequest.cpp' || echo '$(srcdir)/'`GACLRequest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libgaclpdp_la-GACLRequest.Tpo $(DEPDIR)/libgaclpdp_la-GACLRequest.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GACLRequest.cpp' object='libgaclpdp_la-GACLRequest.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgaclpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libgaclpdp_la-GACLRequest.lo `test -f 'GACLRequest.cpp' || echo '$(srcdir)/'`GACLRequest.cpp libgaclpdp_la-GACLPolicy.lo: GACLPolicy.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgaclpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libgaclpdp_la-GACLPolicy.lo -MD -MP -MF $(DEPDIR)/libgaclpdp_la-GACLPolicy.Tpo -c -o libgaclpdp_la-GACLPolicy.lo `test -f 'GACLPolicy.cpp' || echo '$(srcdir)/'`GACLPolicy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libgaclpdp_la-GACLPolicy.Tpo $(DEPDIR)/libgaclpdp_la-GACLPolicy.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GACLPolicy.cpp' object='libgaclpdp_la-GACLPolicy.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgaclpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libgaclpdp_la-GACLPolicy.lo `test -f 'GACLPolicy.cpp' || echo '$(srcdir)/'`GACLPolicy.cpp libgaclpdp_la-GACLEvaluator.lo: GACLEvaluator.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgaclpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libgaclpdp_la-GACLEvaluator.lo -MD -MP -MF $(DEPDIR)/libgaclpdp_la-GACLEvaluator.Tpo -c -o libgaclpdp_la-GACLEvaluator.lo `test -f 'GACLEvaluator.cpp' || echo '$(srcdir)/'`GACLEvaluator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libgaclpdp_la-GACLEvaluator.Tpo $(DEPDIR)/libgaclpdp_la-GACLEvaluator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GACLEvaluator.cpp' object='libgaclpdp_la-GACLEvaluator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgaclpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libgaclpdp_la-GACLEvaluator.lo `test -f 'GACLEvaluator.cpp' || echo '$(srcdir)/'`GACLEvaluator.cpp libgaclpdp_la-GACLPDP.lo: GACLPDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgaclpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libgaclpdp_la-GACLPDP.lo -MD -MP -MF $(DEPDIR)/libgaclpdp_la-GACLPDP.Tpo -c -o libgaclpdp_la-GACLPDP.lo `test -f 'GACLPDP.cpp' || echo '$(srcdir)/'`GACLPDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libgaclpdp_la-GACLPDP.Tpo $(DEPDIR)/libgaclpdp_la-GACLPDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GACLPDP.cpp' object='libgaclpdp_la-GACLPDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgaclpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libgaclpdp_la-GACLPDP.lo `test -f 'GACLPDP.cpp' || echo '$(srcdir)/'`GACLPDP.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/GACLRequest.cpp0000644000000000000000000000012411730411253023473 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.109709 30 ctime=1513200661.442765131 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/GACLRequest.cpp0000644000175000002070000000206311730411253023541 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "GACLRequest.h" /** get_request (in charge of class-loading of ArcRequest) can only accept two types of argument: NULL, XMLNode*/ Arc::Plugin* ArcSec::GACLRequest::get_request(Arc::PluginArgument* arg) { if(arg==NULL) return NULL; Arc::ClassLoaderPluginArgument* clarg = arg?dynamic_cast(arg):NULL; if(!clarg) return NULL; Arc::XMLNode* xarg = (Arc::XMLNode*)(*clarg); if(xarg == NULL) return new ArcSec::GACLRequest(arg); ArcSec::Source source(*xarg); return new ArcSec::GACLRequest(source,arg); } //loader_descriptors __arc_request_modules__ = { // { "gacl.request", 0, &ArcSec::GACLRequest::get_request }, // { NULL, 0, NULL } //}; using namespace Arc; using namespace ArcSec; GACLRequest::GACLRequest (const Source& req, Arc::PluginArgument* parg) : Request(req,parg) { req.Get().New(reqnode); } GACLRequest::GACLRequest (Arc::PluginArgument* parg) : Request(parg) { } GACLRequest::~GACLRequest(){ } nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/GACLPolicy.h0000644000000000000000000000012411730411253022747 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.109709 30 ctime=1513200661.445765168 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/GACLPolicy.h0000644000175000002070000000215611730411253023020 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_GACLPOLICY_H__ #define __ARC_SEC_GACLPOLICY_H__ #include namespace ArcSec { class GACLPolicy : public Policy { public: GACLPolicy(Arc::PluginArgument* parg); GACLPolicy(const Source& source, Arc::PluginArgument* parg); GACLPolicy(const Arc::XMLNode source, Arc::PluginArgument* parg); virtual ~GACLPolicy(); virtual operator bool(void) const { return (bool)policynode; }; virtual Result eval(EvaluationCtx* ctx); virtual MatchResult match(EvaluationCtx* /* ctx */) { return NO_MATCH; }; virtual std::string getEffect() const { return ""; }; virtual EvalResult& getEvalResult(); virtual void setEvalResult(EvalResult& res); Arc::XMLNode getXML(void) { return policynode; }; virtual const char* getEvalName() const { return "gacl.evaluator"; }; virtual const char* getName() const { return "gacl.policy"; }; static Arc::Plugin* get_policy(Arc::PluginArgument* arg); private: EvalResult evalres; Arc::XMLNode policynode; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_GACLPOLICY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/GACLEvaluator.cpp0000644000000000000000000000012311730411253024004 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.107709 29 ctime=1513200661.44676518 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/GACLEvaluator.cpp0000644000175000002070000001033511730411253024054 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif //#include #include "GACLPolicy.h" #include "GACLRequest.h" #include "GACLEvaluator.h" Arc::Plugin* ArcSec::GACLEvaluator::get_evaluator(Arc::PluginArgument* arg) { Arc::ClassLoaderPluginArgument* clarg = arg?dynamic_cast(arg):NULL; if(!clarg) return NULL; return new ArcSec::GACLEvaluator((Arc::XMLNode*)(*clarg),arg); } //loader_descriptors __arc_evaluator_modules__ = { // { "gacl.evaluator", 0, &ArcSec::GACLEvaluator::get_evaluator }, // { NULL, 0, NULL } //}; using namespace Arc; using namespace ArcSec; Arc::Logger ArcSec::GACLEvaluator::logger(Arc::Logger::rootLogger, "GACLEvaluator"); GACLEvaluator::GACLEvaluator(Arc::XMLNode* cfg, Arc::PluginArgument* parg) : Evaluator(cfg,parg), plstore(NULL) { plstore = new PolicyStore("", "gacl.policy", NULL); if(!plstore) logger.msg(ERROR, "Can not create PolicyStore object"); combining_alg = EvaluatorFailsOnDeny; } GACLEvaluator::GACLEvaluator(const char * cfgfile, Arc::PluginArgument* parg) : Evaluator(cfgfile,parg){ plstore = new PolicyStore("", "gacl.policy", NULL); if(!plstore) logger.msg(ERROR, "Can not create PolicyStore object"); combining_alg = EvaluatorFailsOnDeny; } Response* GACLEvaluator::evaluate(Request* request, Policy* policyobj) { GACLPolicy* gpol = dynamic_cast(policyobj); if(!gpol) return NULL; GACLRequest* greq = dynamic_cast(request); if(!greq) return NULL; EvaluationCtx ctx(greq); ResponseItem* ritem = new ResponseItem; if(!ritem) return NULL; Response* resp = new Response(); if(!resp) { delete ritem; return NULL; }; resp->setRequestSize(0); ritem->reqtp = NULL; ritem->res = gpol->eval(&ctx); //greq->getXML().New(ritem->reqxml); //ritem->plsxml.push_back(gpol->getXML()); //ritem->pls.push_back(gpol); resp->addResponseItem(ritem); return resp; } Response* GACLEvaluator::evaluate(const Source& request, const Source& policy) { GACLRequest greq(request,NULL); GACLPolicy gpol(policy,NULL); return evaluate(&greq,&gpol); } Response* GACLEvaluator::evaluate(Request* request, const Source& policy) { GACLPolicy gpol(policy,NULL); return evaluate(request,&gpol); } Response* GACLEvaluator::evaluate(const Source& request, Policy* policyobj) { GACLRequest greq(request,NULL); return evaluate(&greq,policyobj); } Response* GACLEvaluator::evaluate(Request* request) { if(!plstore) return NULL; GACLRequest* greq = dynamic_cast(request); if(!greq) return NULL; EvaluationCtx ctx(greq); ResponseItem* ritem = new ResponseItem; if(!ritem) return NULL; Response* resp = new Response(); if(!resp) { delete ritem; return NULL; }; Result result = DECISION_DENY; std::list policies = plstore->findPolicy(&ctx); std::list::iterator policyit; bool have_permit = false; bool have_deny = false; bool have_indeterminate = false; bool have_notapplicable = false; for(policyit = policies.begin(); policyit != policies.end(); policyit++){ Result res = ((Policy*)(*policyit))->eval(&ctx); if(res == DECISION_PERMIT){ have_permit=true; if(combining_alg == EvaluatorStopsOnPermit) break; } else if(res == DECISION_DENY) { have_deny=true; if(combining_alg == EvaluatorStopsOnDeny) break; if(combining_alg == EvaluatorFailsOnDeny) break; } else if(res == DECISION_INDETERMINATE) { have_indeterminate=true; } else if(res == DECISION_NOT_APPLICABLE) { have_notapplicable=true; }; }; if(have_permit) { result = DECISION_PERMIT; } else if(have_deny) { result = DECISION_DENY; } else if(have_indeterminate) { result = DECISION_INDETERMINATE; } else if(have_notapplicable) { result = DECISION_NOT_APPLICABLE; }; resp->setRequestSize(0); ritem->reqtp = NULL; ritem->res = result; //greq->getXML().New(ritem->reqxml); //ritem->plsxml.push_back(gpol->getXML()); //ritem->pls.push_back(gpol); resp->addResponseItem(ritem); return resp; } Response* GACLEvaluator::evaluate(const Source& request) { GACLRequest greq(request,NULL); return evaluate(&greq); } GACLEvaluator::~GACLEvaluator(){ if(plstore) delete plstore; } nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/GACLPDP.h0000644000000000000000000000012412110410653022127 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.109709 30 ctime=1513200661.449765217 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/GACLPDP.h0000644000175000002070000000154312110410653022177 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_GACLPDP_H__ #define __ARC_SEC_GACLPDP_H__ #include #include #include #include #include /* #include #include #include */ namespace ArcSec { class GACLPDP: public PDP { public: static Arc::SecAttrFormat GACL; static Arc::Plugin* get_gacl_pdp(Arc::PluginArgument *arg); GACLPDP(Arc::Config* cfg, Arc::PluginArgument* parg); virtual ~GACLPDP(); virtual PDPStatus isPermitted(Arc::Message *msg) const; private: std::list select_attrs; std::list reject_attrs; std::list policy_locations; Arc::XMLNodeContainer policy_docs; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_GACLPDP_H__ */ nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/GACLPolicy.cpp0000644000000000000000000000012412311664431023306 xustar000000000000000027 mtime=1395091737.913805 27 atime=1513200575.111709 30 ctime=1513200661.444765156 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/GACLPolicy.cpp0000644000175000002070000001166312311664431023362 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "GACLPolicy.h" #include "GACLRequest.h" Arc::Logger ArcSec::GACLPolicy::logger(Arc::Logger::rootLogger, "GACLPolicy"); Arc::Plugin* ArcSec::GACLPolicy::get_policy(Arc::PluginArgument* arg) { if(arg==NULL) return NULL; Arc::ClassLoaderPluginArgument* clarg = arg?dynamic_cast(arg):NULL; if(!clarg) return NULL; Arc::XMLNode* doc = (Arc::XMLNode*)(*clarg); if(doc==NULL) { std::cerr<<"GACLPolicy creation needs XMLNode as argument"< 0) { for(int n=0;;++n) { XMLNode pitem = pid.Child(n); if(!pitem) break; XMLNode ritem = rid[pitem.Name()]; for(;(bool)ritem;++ritem) { if(CompareIdentity(pitem,ritem)) break; }; if(!ritem) return false; }; return true; }; return (((std::string)pid) == ((std::string)rid)); } static void CollectActions(XMLNode actions,std::list& actions_list) { for(int n = 0;;++n) { XMLNode action = actions.Child(n); if(!action) break; actions_list.push_back(action.Name()); }; } static bool FindAction(const std::string& action,const std::list& actions) { for(std::list::const_iterator act = actions.begin();act!=actions.end();++act) { if((*act) == action) return true; }; return false; } Result GACLPolicy::eval(EvaluationCtx* ctx){ if(!ctx) return DECISION_INDETERMINATE; Request* req = ctx->getRequest(); if(!req) return DECISION_INDETERMINATE; GACLRequest* greq = dynamic_cast(req); if(!greq) return DECISION_INDETERMINATE; // Although it is possible to split GACL request and policy to // attributes current implementation simply evaluates XMLs directly. // Doing it "right way" is TODO. //Result result = DECISION_DENY; XMLNode requestentry = greq->getXML(); if(requestentry.Name() == "gacl") requestentry=requestentry["entry"]; if(requestentry.Name() != "entry") return DECISION_INDETERMINATE; for(;(bool)requestentry;++requestentry) { XMLNode policyentry = policynode["entry"]; std::list allow; std::list deny; for(;(bool)policyentry;++policyentry) { bool matched = false; for(int n = 0;;++n) { XMLNode pid = policyentry.Child(n); if(!pid) break; if(pid.Name() == "allow") continue; if(pid.Name() == "deny") continue; if(pid.Name() == "any-user") { matched=true; break; }; // TODO: somehow check if user really authenticated if(pid.Name() == "auth-user") { matched=true; break; }; XMLNode rid = requestentry[pid.Name()]; for(;(bool)rid;++rid) { if(CompareIdentity(pid,rid)) break; }; if((bool)rid) { matched=true; break; }; }; if(matched) { XMLNode pallow = policyentry["allow"]; XMLNode pdeny = policyentry["deny"]; CollectActions(pallow,allow); CollectActions(pdeny,deny); }; }; allow.sort(); allow.unique(); deny.sort(); deny.unique(); if(allow.empty()) return DECISION_DENY; std::list rallow; CollectActions(requestentry["allow"],rallow); if(rallow.empty()) return DECISION_DENY; // Unlikely to happen std::list::iterator act = rallow.begin(); for(;act!=rallow.end();++act) { if(!FindAction(*act,allow)) break; if(FindAction(*act,deny)) break; }; if(act != rallow.end()) return DECISION_DENY; //if(act == rallow.end()) result=DECISION_PERMIT; }; return DECISION_PERMIT; //return result; } EvalResult& GACLPolicy::getEvalResult(){ return evalres; } void GACLPolicy::setEvalResult(EvalResult& res){ evalres = res; } GACLPolicy::~GACLPolicy(){ } nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/GACLEvaluator.h0000644000000000000000000000012411730411253023452 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.109709 30 ctime=1513200661.447765193 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/GACLEvaluator.h0000644000175000002070000000445011730411253023522 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_GACLEVALUATOR_H__ #define __ARC_SEC_GACLEVALUATOR_H__ #include #include /* #include #include #include #include #include #include #include #include #include */ namespace ArcSec { class GACLEvaluator : public Evaluator { private: static Arc::Logger logger; PolicyStore *plstore; public: GACLEvaluator (Arc::XMLNode* cfg, Arc::PluginArgument* parg); GACLEvaluator (const char * cfgfile, Arc::PluginArgument* parg); virtual ~GACLEvaluator(); /**Evaluate the request based on the policy information inside PolicyStore*/ virtual Response* evaluate(Request* request); virtual Response* evaluate(const Source& request); virtual Response* evaluate(Request* request, const Source& policy); virtual Response* evaluate(const Source& request, const Source& policy); virtual Response* evaluate(Request* request, Policy* policyobj); virtual Response* evaluate(const Source& request, Policy* policyobj); virtual AttributeFactory* getAttrFactory () { return NULL; /*attrfactory;*/ }; virtual FnFactory* getFnFactory () { return NULL; /*fnfactory;*/ }; virtual AlgFactory* getAlgFactory () { return NULL; /*algfactory;*/ }; virtual void addPolicy(const Source& policy,const std::string& id = "") { plstore->addPolicy(policy, NULL /* context */, id); }; virtual void addPolicy(Policy* policy,const std::string& id = "") { plstore->addPolicy(policy, NULL /* context */, id); }; virtual void removePolicies(void) { plstore->removePolicies(); }; virtual void setCombiningAlg(EvaluatorCombiningAlg alg) { combining_alg = alg; } ; virtual void setCombiningAlg(CombiningAlg* /* alg */) { } ; virtual const char* getName() const { return "gacl.evaluator"; }; static Arc::Plugin* get_evaluator(Arc::PluginArgument* arg); protected: virtual Response* evaluate(EvaluationCtx* /* ctx */) { return NULL; }; private: virtual void parsecfg(Arc::XMLNode& /* cfg */) { }; EvaluatorCombiningAlg combining_alg; }; } // namespace ArcSec #endif /* __ARC_SEC_GACLEVALUATOR_H__ */ nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/PaxHeaders.7502/GACLRequest.h0000644000000000000000000000012411730411253023140 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.111709 30 ctime=1513200661.443765144 nordugrid-arc-5.4.2/src/hed/shc/gaclpdp/GACLRequest.h0000644000175000002070000000212311730411253023203 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_GACLREQUEST_H__ #define __ARC_SEC_GACLREQUEST_H__ #include #include namespace ArcSec { class GACLRequest : public Request { public: virtual ReqItemList getRequestItems () const { return rlist; }; virtual void setRequestItems (ReqItemList /* sl */) { }; virtual void addRequestItem(Attrs& /* sub */, Attrs& /* res */, Attrs& /* act */, Attrs& /* ctx */) { }; virtual void setAttributeFactory(AttributeFactory* /* attributefactory */) { }; virtual void make_request() { }; GACLRequest (Arc::PluginArgument* parg); GACLRequest (const Source& source, Arc::PluginArgument* parg); virtual ~GACLRequest(); Arc::XMLNode getXML(void) { return reqnode; }; virtual const char* getEvalName() const { return "gacl.evaluator"; }; virtual const char* getName() const { return "gacl.request"; }; static Arc::Plugin* get_request(Arc::PluginArgument* arg); virtual Arc::XMLNode& getReqNode() { return reqnode; }; private: Arc::XMLNode reqnode; }; } // namespace ArcSec #endif /* __ARC_SEC_GACLREQUEST_H__ */ nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/pdpserviceinvoker0000644000000000000000000000013213214316025022762 xustar000000000000000030 mtime=1513200661.267762991 30 atime=1513200668.722854169 30 ctime=1513200661.267762991 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/0000755000175000002070000000000013214316025023105 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515025103 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.611021135 30 ctime=1513200661.261762918 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/Makefile.am0000644000175000002070000000076012052416515025150 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libarcpdpserviceinvoker.la libarcpdpserviceinvoker_la_SOURCES = PDPServiceInvoker.cpp PDPServiceInvoker.h libarcpdpserviceinvoker_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcpdpserviceinvoker_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315730025105 xustar000000000000000030 mtime=1513200600.655021673 30 atime=1513200649.326616946 29 ctime=1513200661.26276293 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/Makefile.in0000644000175000002070000007064013214315730025163 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/pdpserviceinvoker DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libarcpdpserviceinvoker_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la am_libarcpdpserviceinvoker_la_OBJECTS = \ libarcpdpserviceinvoker_la-PDPServiceInvoker.lo libarcpdpserviceinvoker_la_OBJECTS = \ $(am_libarcpdpserviceinvoker_la_OBJECTS) libarcpdpserviceinvoker_la_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcpdpserviceinvoker_la_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcpdpserviceinvoker_la_SOURCES) DIST_SOURCES = $(libarcpdpserviceinvoker_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libarcpdpserviceinvoker.la libarcpdpserviceinvoker_la_SOURCES = PDPServiceInvoker.cpp PDPServiceInvoker.h libarcpdpserviceinvoker_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcpdpserviceinvoker_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/pdpserviceinvoker/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/pdpserviceinvoker/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcpdpserviceinvoker.la: $(libarcpdpserviceinvoker_la_OBJECTS) $(libarcpdpserviceinvoker_la_DEPENDENCIES) $(libarcpdpserviceinvoker_la_LINK) $(libarcpdpserviceinvoker_la_OBJECTS) $(libarcpdpserviceinvoker_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdpserviceinvoker_la-PDPServiceInvoker.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcpdpserviceinvoker_la-PDPServiceInvoker.lo: PDPServiceInvoker.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdpserviceinvoker_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdpserviceinvoker_la-PDPServiceInvoker.lo -MD -MP -MF $(DEPDIR)/libarcpdpserviceinvoker_la-PDPServiceInvoker.Tpo -c -o libarcpdpserviceinvoker_la-PDPServiceInvoker.lo `test -f 'PDPServiceInvoker.cpp' || echo '$(srcdir)/'`PDPServiceInvoker.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdpserviceinvoker_la-PDPServiceInvoker.Tpo $(DEPDIR)/libarcpdpserviceinvoker_la-PDPServiceInvoker.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PDPServiceInvoker.cpp' object='libarcpdpserviceinvoker_la-PDPServiceInvoker.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdpserviceinvoker_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdpserviceinvoker_la-PDPServiceInvoker.lo `test -f 'PDPServiceInvoker.cpp' || echo '$(srcdir)/'`PDPServiceInvoker.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/PaxHeaders.7502/PDPServiceInvoker.h0000644000000000000000000000012412110410653026507 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.002707 30 ctime=1513200661.265762967 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.h0000644000175000002070000000214112110410653026552 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_PDPSERVICEINVOKER_H__ #define __ARC_SEC_PDPSERVICEINVOKER_H__ #include #include #include #include #include namespace ArcSec { ///PDPServiceInvoker - client which will invoke pdpservice class PDPServiceInvoker : public PDP { public: static Arc::Plugin* get_pdpservice_invoker(Arc::PluginArgument* arg); PDPServiceInvoker(Arc::Config* cfg, Arc::PluginArgument* parg); virtual ~PDPServiceInvoker(); virtual PDPStatus isPermitted(Arc::Message *msg) const; private: Arc::ClientSOAP* client; std::string proxy_path; std::string cert_path; std::string key_path; std::string ca_dir; std::string ca_file; std::list select_attrs; std::list reject_attrs; std::list policy_locations; bool is_xacml; //If the policy decision request is with XACML format bool is_saml; //If the "SAML2.0 profile of XACML v2.0" is used protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_PDPSERVICEINVOKER_H__ */ nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/PaxHeaders.7502/PDPServiceInvoker.cpp0000644000000000000000000000012413213445240027047 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200575.001707 30 ctime=1513200661.264762954 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp0000644000175000002070000001614113213445240027117 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include //#include //#include #include "PDPServiceInvoker.h" Arc::Logger ArcSec::PDPServiceInvoker::logger(Arc::Logger::getRootLogger(), "ArcSec.PDPServiceInvoker"); #define SAML_NAMESPACE "urn:oasis:names:tc:SAML:2.0:assertion" #define SAMLP_NAMESPACE "urn:oasis:names:tc:SAML:2.0:protocol" #define XACML_SAMLP_NAMESPACE "urn:oasis:xacml:2.0:saml:protocol:schema:os" using namespace Arc; namespace ArcSec { Plugin* PDPServiceInvoker::get_pdpservice_invoker(PluginArgument* arg) { PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new PDPServiceInvoker((Config*)(*pdparg),arg); } PDPServiceInvoker::PDPServiceInvoker(Config* cfg,Arc::PluginArgument* parg):PDP(cfg,parg), client(NULL), is_xacml(false), is_saml(false) { XMLNode filter = (*cfg)["Filter"]; if((bool)filter) { XMLNode select_attr = filter["Select"]; XMLNode reject_attr = filter["Reject"]; for(;(bool)select_attr;++select_attr) select_attrs.push_back((std::string)select_attr); for(;(bool)reject_attr;++reject_attr) reject_attrs.push_back((std::string)reject_attr); }; //Create a SOAP client logger.msg(Arc::INFO, "Creating a pdpservice client"); std::string url_str; url_str = (std::string)((*cfg)["Endpoint"]); Arc::URL url(url_str); std::cout<<"URL: "<Auth()->Filter(select_attrs,reject_attrs); MessageAuth* cauth = msg->AuthContext()->Filter(select_attrs,reject_attrs); if((!mauth) && (!cauth)) { logger.msg(ERROR,"Missing security object in message"); return false; }; NS ns; XMLNode requestxml(ns,""); if(mauth) { if(!mauth->Export(is_xacml? SecAttr::XACML : SecAttr::ARCAuth,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to ARC request"); return false; }; delete mauth; }; if(cauth) { if(!cauth->Export(is_xacml? SecAttr::XACML : SecAttr::ARCAuth,requestxml)) { delete cauth; logger.msg(ERROR,"Failed to convert security information to ARC request"); return false; }; delete cauth; }; { std::string s; requestxml.GetXML(s); logger.msg(DEBUG,"ARC Auth. request: %s",s); }; if(requestxml.Size() <= 0) { logger.msg(ERROR,"No requested security information was collected"); return false; }; //Invoke the remote pdp service if(is_saml) { Arc::NS ns; ns["saml"] = SAML_NAMESPACE; ns["samlp"] = SAMLP_NAMESPACE; ns["xacml-samlp"] = XACML_SAMLP_NAMESPACE; Arc::XMLNode authz_query(ns, "xacml-samlp:XACMLAuthzDecisionQuery"); std::string query_id = Arc::UUID(); authz_query.NewAttribute("ID") = query_id; Arc::Time t; std::string current_time = t.str(Arc::UTCTime); authz_query.NewAttribute("IssueInstant") = current_time; authz_query.NewAttribute("Version") = std::string("2.0"); Arc::Credential cred(cert_path.empty() ? proxy_path : cert_path, cert_path.empty() ? proxy_path : key_path, ca_dir, ca_file); std::string local_dn_str = cred.GetDN(); std::string local_dn = Arc::convert_to_rdn(local_dn_str); std::string issuer_name = local_dn; authz_query.NewChild("saml:Issuer") = issuer_name; authz_query.NewAttribute("InputContextOnly") = std::string("false"); authz_query.NewAttribute("ReturnContext") = std::string("true"); authz_query.NewChild(requestxml); Arc::NS req_ns; Arc::SOAPEnvelope req_env(req_ns); req_env.NewChild(authz_query); Arc::PayloadSOAP req(req_env); Arc::PayloadSOAP* resp = NULL; if(client) { Arc::MCC_Status status = client->process(&req,&resp); if(!status) { logger.msg(Arc::ERROR, "Policy Decision Service invocation failed"); } if(resp == NULL) { logger.msg(Arc::ERROR,"There was no SOAP response"); } } std::string authz_res; if(resp) { std::string str; resp->GetXML(str); logger.msg(Arc::INFO, "Response: %s", str); std::string authz_res = (std::string)((*resp)["samlp:Response"]["saml:Assertion"]["xacml-saml:XACMLAuthzDecisionStatement"]["xacml-context:Response"]["xacml-context:Result"]["xacml-context:Decision"]); delete resp; } if(authz_res == "Permit") { logger.msg(Arc::INFO,"Authorized from remote pdp service"); return true; } else { logger.msg(Arc::INFO,"Unauthorized from remote pdp service"); return false; } } else { Arc::NS req_ns; //req_ns["ra"] = "http://www.nordugrid.org/schemas/request-arc"; req_ns["pdp"] = "http://www.nordugrid.org/schemas/pdp"; Arc::PayloadSOAP req(req_ns); Arc::XMLNode reqnode = req.NewChild("pdp:GetPolicyDecisionRequest"); reqnode.NewChild(requestxml); Arc::PayloadSOAP* resp = NULL; if(client) { Arc::MCC_Status status = client->process(&req,&resp); if(!status) { logger.msg(Arc::ERROR, "Policy Decision Service invocation failed"); } if(resp == NULL) { logger.msg(Arc::ERROR,"There was no SOAP response"); } } std::string authz_res; if(resp) { std::string str; resp->GetXML(str); logger.msg(Arc::INFO, "Response: %s", str); // TODO: Fix namespaces authz_res=(std::string)((*resp)["pdp:GetPolicyDecisionResponse"]["response:Response"]["response:AuthZResult"]); delete resp; } if(authz_res == "PERMIT") { logger.msg(Arc::INFO,"Authorized from remote pdp service"); return true; } else { logger.msg(Arc::INFO,"Unauthorized from remote pdp service"); return false; } } } PDPServiceInvoker::~PDPServiceInvoker(){ if(client != NULL) delete client; } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/PaxHeaders.7502/schema0000644000000000000000000000013213214316025024222 xustar000000000000000030 mtime=1513200661.292763297 30 atime=1513200668.722854169 30 ctime=1513200661.292763297 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/schema/0000755000175000002070000000000013214316025024345 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012611255705373026351 xustar000000000000000027 mtime=1253542651.230162 30 atime=1513200600.671021869 29 ctime=1513200661.28976326 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/schema/Makefile.am0000644000175000002070000000015211255705373026412 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = PDPServiceInvoker.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730026346 xustar000000000000000030 mtime=1513200600.702022248 30 atime=1513200649.343617154 30 ctime=1513200661.291763284 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/schema/Makefile.in0000644000175000002070000004357013214315730026425 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/pdpserviceinvoker/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = PDPServiceInvoker.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/pdpserviceinvoker/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/pdpserviceinvoker/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/schema/PaxHeaders.7502/PDPServiceInvoker.xsd0000644000000000000000000000012411457664434030343 xustar000000000000000027 mtime=1287612700.761119 27 atime=1513200575.004707 30 ctime=1513200661.292763297 nordugrid-arc-5.4.2/src/hed/shc/pdpserviceinvoker/schema/PDPServiceInvoker.xsd0000644000175000002070000001203611457664434030412 0ustar00mockbuildmock00000000000000 This element defines Security Attributes to select and reject. If there are no Select elements all Attributes are used except those listed in Reject elements. This element is to specify endpoint about remote pdpservice. It will be configured under the This element is to specified the format of request. Two options are recognized: xacml, arc. Default is "arc". This element is to specified the protocol for transfering request. Default is the arc specific protocol; if "SAML" is specified, then the "SAML2.0 profile of the XACML v2.0" will be used for carrying request. If RequestFormat is specified to XACML, and Transfer is specified to SAML, then this pdpservice invoker is able to interact with third-party pdp service, such as the GLite authorization service. Two options are recognized: saml, arc. Default is "arc". Location of private key. Default is $HOME/.globus/userkey.pem. Location of public certificate. Default is $HOME/.globus/usercert.pem. Location of proxy credentials - includes certificates, key and chain of involved certificates. Overwrites elements KeyPath and CertificatePath. Default is /tmp/hash{userid}.0 Location of certificate of CA. Default is none. Directory containing certificates of accepted CAs. Default is /etc/grid-security/certificates nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/test.cpp0000644000000000000000000000012411570717263020775 xustar000000000000000027 mtime=1306762931.686321 27 atime=1513200574.998707 30 ctime=1513200661.021759982 nordugrid-arc-5.4.2/src/hed/shc/test.cpp0000644000175000002070000001617411570717263021053 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include //#include #include #include int main(void){ signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); signal(SIGPIPE,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "PDPTest"); //Arc::LogStream logcerr(std::cerr); //Arc::Logger::rootLogger.addDestination(logcerr); logger.msg(Arc::INFO, "Start test"); // NOTE: ClassLoader can't distiguish between object of different kinds of classes // As result if constructor of some kind of object is not clever enough to distinguish // that it is supplied with configuration of different type method Instance() may return // unexpected object type. Because of that we need separate loaders for different types // of classes. ArcSec::EvaluatorLoader eval_loader; ArcSec::Evaluator* eval = NULL; ArcSec::Policy* policy = NULL; // Load policy dinamically detecting it's type // Load evaluator by policy type { ArcSec::SourceFile source1("policy1.xml"); if(!source1) { std::cerr<<"policy1.xml failed to load"<getEvalName()<getEvalName()<Instance(evaluator, (void**)(void*)&modulecfg)); #endif std::string evaluator = "arc.evaluator"; eval = eval_loader.getEvaluator(evaluator); if(eval == NULL) { logger.msg(Arc::ERROR, "Can not dynamically produce Evaluator"); return 0; } ArcSec::Response *resp = NULL; //Input request from a file: Request.xml logger.msg(Arc::INFO, "Input request from a file: Request.xml"); //Evaluate the request std::ifstream f("Request.xml"); ArcSec::Source source(f); resp = eval->evaluate(source); //Get the response logger.msg(Arc::INFO, "There is %d subjects, which satisfy at least one policy", (resp->getResponseItems()).size()); ArcSec::ResponseList rlist = resp->getResponseItems(); int size = rlist.size(); for(int i = 0; i< size; i++){ ArcSec::ResponseItem* respitem = rlist[i]; ArcSec::RequestTuple* tp = respitem->reqtp; ArcSec::Subject::iterator it; ArcSec::Subject subject = tp->sub; for (it = subject.begin(); it!= subject.end(); it++){ ArcSec::AttributeValue *attrval; ArcSec::RequestAttribute *attr; attr = dynamic_cast(*it); if(attr){ attrval = (*it)->getAttributeValue(); if(attrval) logger.msg(Arc::INFO,"Attribute Value (1): %s", attrval->encode()); } } } if(resp){ delete resp; resp = NULL; } //Input/Set request from code logger.msg(Arc::INFO, "Input request from code"); //Request example /* /O=NorduGrid/OU=UIO/CN=test file://home/test read copy 2007-09-10T20:30:20/P1Y1M */ //Data Structure to compose a request /* typedef struct{ std::string value; std::string type; } Attr; typedef std::list Attrs; */ ArcSec::Attr subject_attr1, subject_attr2, resource_attr1, action_attr1, action_attr2, context_attr1; ArcSec::Attrs sub, res, act, ctx; subject_attr1.type = "string"; subject_attr1.value = "/O=NorduGrid/OU=UIO/CN=test"; sub.addItem(subject_attr1); resource_attr1.type = "string"; resource_attr1.value = "file://home/test"; res.addItem(resource_attr1); action_attr1.type = "string"; action_attr1.value = "read"; act.addItem(action_attr1); action_attr2.type = "string"; action_attr2.value = "copy"; act.addItem(action_attr2); context_attr1.type = "period"; context_attr1.value = "2007-09-10T20:30:20/P1Y1M"; ctx.addItem(context_attr1); ArcSec::Request* request = NULL; std::string requestor = "arc.request"; Arc::ClassLoader* classloader = NULL; classloader = Arc::ClassLoader::getClassLoader(); request = (ArcSec::Request*)(classloader->Instance(requestor)); if(request == NULL) logger.msg(Arc::ERROR, "Can not dynamically produce Request"); //Add the request information into Request object request->addRequestItem(sub, res, act, ctx); //Evaluate the request //resp = eval->evaluate(request); //Evalute the request with policy argument std::ifstream f1("Policy_Example.xml"); ArcSec::Source source1(f1); resp = eval->evaluate(request, source1); //Get the response logger.msg(Arc::INFO, "There is %d subjects, which satisfy at least one policy", (resp->getResponseItems()).size()); rlist = resp->getResponseItems(); size = rlist.size(); for(int i = 0; i < size; i++){ ArcSec::ResponseItem* respitem = rlist[i]; ArcSec::RequestTuple* tp = respitem->reqtp; ArcSec::Subject::iterator it; ArcSec::Subject subject = tp->sub; for (it = subject.begin(); it!= subject.end(); it++){ ArcSec::AttributeValue *attrval; ArcSec::RequestAttribute *attr; attr = dynamic_cast(*it); if(attr){ attrval = (*it)->getAttributeValue(); if(attrval) logger.msg(Arc::INFO,"Attribute Value (2): %s", attrval->encode()); } } } if(resp){ delete resp; resp = NULL; } delete eval; delete request; return 0; } nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/denypdp0000644000000000000000000000013213214316025020663 xustar000000000000000030 mtime=1513200661.068760557 30 atime=1513200668.722854169 30 ctime=1513200661.068760557 nordugrid-arc-5.4.2/src/hed/shc/denypdp/0000755000175000002070000000000013214316025021006 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/denypdp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612052416515023003 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.335017759 29 ctime=1513200661.06576052 nordugrid-arc-5.4.2/src/hed/shc/denypdp/Makefile.am0000644000175000002070000000047712052416515023056 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libdenypdp.la libdenypdp_la_SOURCES = DenyPDP.cpp DenyPDP.h libdenypdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdenypdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/shc/denypdp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730023007 xustar000000000000000030 mtime=1513200600.378018286 30 atime=1513200649.373617521 30 ctime=1513200661.066760533 nordugrid-arc-5.4.2/src/hed/shc/denypdp/Makefile.in0000644000175000002070000005447213214315730023071 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/denypdp DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libdenypdp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libdenypdp_la_OBJECTS = libdenypdp_la-DenyPDP.lo libdenypdp_la_OBJECTS = $(am_libdenypdp_la_OBJECTS) libdenypdp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libdenypdp_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdenypdp_la_SOURCES) DIST_SOURCES = $(libdenypdp_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ noinst_LTLIBRARIES = libdenypdp.la libdenypdp_la_SOURCES = DenyPDP.cpp DenyPDP.h libdenypdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdenypdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/denypdp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/denypdp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdenypdp.la: $(libdenypdp_la_OBJECTS) $(libdenypdp_la_DEPENDENCIES) $(libdenypdp_la_LINK) $(libdenypdp_la_OBJECTS) $(libdenypdp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdenypdp_la-DenyPDP.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdenypdp_la-DenyPDP.lo: DenyPDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdenypdp_la_CXXFLAGS) $(CXXFLAGS) -MT libdenypdp_la-DenyPDP.lo -MD -MP -MF $(DEPDIR)/libdenypdp_la-DenyPDP.Tpo -c -o libdenypdp_la-DenyPDP.lo `test -f 'DenyPDP.cpp' || echo '$(srcdir)/'`DenyPDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdenypdp_la-DenyPDP.Tpo $(DEPDIR)/libdenypdp_la-DenyPDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DenyPDP.cpp' object='libdenypdp_la-DenyPDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdenypdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libdenypdp_la-DenyPDP.lo `test -f 'DenyPDP.cpp' || echo '$(srcdir)/'`DenyPDP.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/denypdp/PaxHeaders.7502/DenyPDP.cpp0000644000000000000000000000012412110410653022704 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.030708 30 ctime=1513200661.067760545 nordugrid-arc-5.4.2/src/hed/shc/denypdp/DenyPDP.cpp0000644000175000002070000000103712110410653022752 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "DenyPDP.h" using namespace Arc; using namespace ArcSec; Plugin* DenyPDP::get_deny_pdp(PluginArgument* arg) { ArcSec::PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new DenyPDP((Arc::Config*)(*pdparg),arg); } DenyPDP::DenyPDP(Config* cfg,PluginArgument* parg):PDP(cfg,parg){ } PDPStatus DenyPDP::isPermitted(Message*) const { return false; } nordugrid-arc-5.4.2/src/hed/shc/denypdp/PaxHeaders.7502/DenyPDP.h0000644000000000000000000000012412110410653022351 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.030708 30 ctime=1513200661.068760557 nordugrid-arc-5.4.2/src/hed/shc/denypdp/DenyPDP.h0000644000175000002070000000076512110410653022426 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_DENYPDP_H__ #define __ARC_SEC_DENYPDP_H__ #include #include #include namespace ArcSec { /// This PDP always returns false (deny) class DenyPDP : public PDP { public: static Arc::Plugin* get_deny_pdp(Arc::PluginArgument* arg); DenyPDP(Arc::Config* cfg,Arc::PluginArgument* parg); virtual ~DenyPDP() {}; virtual PDPStatus isPermitted(Arc::Message *msg) const; }; } // namespace ArcSec #endif /* __ARC_SEC_DENYPDP_H__ */ nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/SecHandlerPlugin.cpp0000644000000000000000000000012312675602216023202 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.088708 29 ctime=1513200661.02075997 nordugrid-arc-5.4.2/src/hed/shc/SecHandlerPlugin.cpp0000644000175000002070000001042412675602216023251 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "simplelistpdp/SimpleListPDP.h" #include "pdpserviceinvoker/PDPServiceInvoker.h" #include "delegationpdp/DelegationPDP.h" #include "arcpdp/ArcPDP.h" #include "xacmlpdp/XACMLPDP.h" #include "allowpdp/AllowPDP.h" #include "denypdp/DenyPDP.h" #include "arcauthzsh/ArcAuthZ.h" #include "usernametokensh/UsernameTokenSH.h" #include "x509tokensh/X509TokenSH.h" #include "samltokensh/SAMLTokenSH.h" #include "saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.h" #include "delegationsh/DelegationSH.h" #include "arcpdp/ArcPolicy.h" #include "xacmlpdp/XACMLPolicy.h" #include "gaclpdp/GACLPolicy.h" #include "arcpdp/ArcEvaluator.h" #include "xacmlpdp/XACMLEvaluator.h" #include "gaclpdp/GACLEvaluator.h" #include "arcpdp/ArcRequest.h" #include "xacmlpdp/XACMLRequest.h" #include "gaclpdp/GACLRequest.h" #include "arcpdp/ArcAttributeFactory.h" #include "arcpdp/ArcAlgFactory.h" #include "arcpdp/ArcFnFactory.h" #include "xacmlpdp/XACMLAttributeFactory.h" #include "xacmlpdp/XACMLAlgFactory.h" #include "xacmlpdp/XACMLFnFactory.h" using namespace ArcSec; extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "simplelist.pdp", "HED:PDP", NULL, 0, &ArcSec::SimpleListPDP::get_simplelist_pdp}, { "arc.pdp", "HED:PDP", NULL, 0, &ArcSec::ArcPDP::get_arc_pdp}, { "xacml.pdp", "HED:PDP", NULL, 0, &ArcSec::XACMLPDP::get_xacml_pdp}, { "pdpservice.invoker", "HED:PDP", NULL, 0, &ArcSec::PDPServiceInvoker::get_pdpservice_invoker}, { "delegation.pdp", "HED:PDP", NULL, 0, &ArcSec::DelegationPDP::get_delegation_pdp}, { "allow.pdp", "HED:PDP", NULL, 0, &ArcSec::AllowPDP::get_allow_pdp}, { "deny.pdp", "HED:PDP", NULL, 0, &ArcSec::DenyPDP::get_deny_pdp}, { "arc.authz", "HED:SHC", NULL, 0, &ArcSec::ArcAuthZ::get_sechandler}, { "usernametoken.handler", "HED:SHC", NULL, 0, &ArcSec::UsernameTokenSH::get_sechandler}, #ifdef HAVE_XMLSEC { "x509token.handler", "HED:SHC", NULL, 0, &ArcSec::X509TokenSH::get_sechandler}, { "samltoken.handler", "HED:SHC", NULL, 0, &ArcSec::SAMLTokenSH::get_sechandler}, { "saml2ssoassertionconsumer.handler", "HED:SHC", NULL, 0, &ArcSec::SAML2SSO_AssertionConsumerSH::get_sechandler}, #endif { "delegation.handler", "HED:SHC", NULL, 0, &ArcSec::DelegationSH::get_sechandler}, { "arc.policy", "__arc_policy_modules__", NULL, 0, &ArcSec::ArcPolicy::get_policy }, { "xacml.policy", "__arc_policy_modules__", NULL, 0, &ArcSec::XACMLPolicy::get_policy }, { "gacl.policy", "__arc_policy_modules__", NULL, 0, //__gacl_policy_modules__ --> __arc_policy_modules__ &ArcSec::GACLPolicy::get_policy }, { "arc.evaluator", "__arc_evaluator_modules__", NULL, 0, &ArcSec::ArcEvaluator::get_evaluator }, { "xacml.evaluator", "__arc_evaluator_modules__", NULL, 0, &ArcSec::XACMLEvaluator::get_evaluator }, { "gacl.evaluator", "__arc_evaluator_modules__", NULL, 0, &ArcSec::GACLEvaluator::get_evaluator }, { "arc.request", "__arc_request_modules__", NULL, 0, &ArcSec::ArcRequest::get_request }, { "xacml.request", "__arc_request_modules__", NULL, 0, &ArcSec::XACMLRequest::get_request }, { "gacl.request", "__arc_request_modules__", NULL, 0, &ArcSec::GACLRequest::get_request }, { "arc.attrfactory", "__arc_attrfactory_modules__", NULL, 0, &get_arcpdp_attr_factory }, { "arc.algfactory", "__arc_algfactory_modules__", NULL, 0, &get_arcpdp_alg_factory }, { "arc.fnfactory", "__arc_fnfactory_modules__", NULL, 0, &get_arcpdp_fn_factory }, { "xacml.attrfactory", "__arc_attrfactory_modules__", NULL, 0, &get_xacmlpdp_attr_factory }, { "xacml.algfactory", "__arc_algfactory_modules__", NULL, 0, &get_xacmlpdp_alg_factory }, { "xacml.fnfactory", "__arc_fnfactory_modules__", NULL, 0, &get_xacmlpdp_fn_factory }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/usernametokensh0000644000000000000000000000013213214316025022433 xustar000000000000000030 mtime=1513200661.407764703 30 atime=1513200668.722854169 30 ctime=1513200661.407764703 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/0000755000175000002070000000000013214316025022556 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024554 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.991025783 30 ctime=1513200661.402764642 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/Makefile.am0000644000175000002070000000070112052416515024614 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libusernametokensh.la libusernametokensh_la_SOURCES = UsernameTokenSH.cpp UsernameTokenSH.h libusernametokensh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libusernametokensh_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731024560 xustar000000000000000030 mtime=1513200601.035026321 30 atime=1513200649.446618414 30 ctime=1513200661.403764654 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/Makefile.in0000644000175000002070000007030013214315731024626 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/usernametokensh DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libusernametokensh_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libusernametokensh_la_OBJECTS = \ libusernametokensh_la-UsernameTokenSH.lo libusernametokensh_la_OBJECTS = $(am_libusernametokensh_la_OBJECTS) libusernametokensh_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libusernametokensh_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libusernametokensh_la_SOURCES) DIST_SOURCES = $(libusernametokensh_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libusernametokensh.la libusernametokensh_la_SOURCES = UsernameTokenSH.cpp UsernameTokenSH.h libusernametokensh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libusernametokensh_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/usernametokensh/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/usernametokensh/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libusernametokensh.la: $(libusernametokensh_la_OBJECTS) $(libusernametokensh_la_DEPENDENCIES) $(libusernametokensh_la_LINK) $(libusernametokensh_la_OBJECTS) $(libusernametokensh_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libusernametokensh_la-UsernameTokenSH.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libusernametokensh_la-UsernameTokenSH.lo: UsernameTokenSH.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libusernametokensh_la_CXXFLAGS) $(CXXFLAGS) -MT libusernametokensh_la-UsernameTokenSH.lo -MD -MP -MF $(DEPDIR)/libusernametokensh_la-UsernameTokenSH.Tpo -c -o libusernametokensh_la-UsernameTokenSH.lo `test -f 'UsernameTokenSH.cpp' || echo '$(srcdir)/'`UsernameTokenSH.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libusernametokensh_la-UsernameTokenSH.Tpo $(DEPDIR)/libusernametokensh_la-UsernameTokenSH.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='UsernameTokenSH.cpp' object='libusernametokensh_la-UsernameTokenSH.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libusernametokensh_la_CXXFLAGS) $(CXXFLAGS) -c -o libusernametokensh_la-UsernameTokenSH.lo `test -f 'UsernameTokenSH.cpp' || echo '$(srcdir)/'`UsernameTokenSH.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/PaxHeaders.7502/UsernameTokenSH.cpp0000644000000000000000000000012412110401544026223 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.012708 30 ctime=1513200661.404764666 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/UsernameTokenSH.cpp0000644000175000002070000001032112110401544026265 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "UsernameTokenSH.h" static Arc::Logger logger(Arc::Logger::rootLogger, "UsernameTokenSH"); Arc::Plugin* ArcSec::UsernameTokenSH::get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; ArcSec::UsernameTokenSH* plugin = new ArcSec::UsernameTokenSH((Arc::Config*)(*shcarg),(Arc::ChainContext*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; plugin = NULL; }; return plugin; } /* sechandler_descriptors ARC_SECHANDLER_LOADER = { { "usernametoken.creator", 0, &get_sechandler}, { NULL, 0, NULL } }; */ namespace ArcSec { using namespace Arc; UsernameTokenSH::UsernameTokenSH(Config *cfg,ChainContext*,Arc::PluginArgument* parg):SecHandler(cfg,parg),valid_(false){ process_type_=process_none; std::string process_type = (std::string)((*cfg)["Process"]); if(process_type == "extract") { password_source_=(std::string)((*cfg)["PasswordSource"]); if(password_source_.empty()) { logger.msg(ERROR,"Missing or empty PasswordSource element"); return; }; process_type_=process_extract; } else if(process_type == "generate") { std::string pwd_encoding = (std::string)((*cfg)["PasswordEncoding"]); if(pwd_encoding == "digest") { password_type_=password_digest; } else if((pwd_encoding == "text") || pwd_encoding.empty()) { password_type_=password_text; } else { logger.msg(ERROR,"Password encoding type not supported: %s",pwd_encoding); return; }; username_=(std::string)((*cfg)["Username"]); if(username_.empty()) { logger.msg(ERROR,"Missing or empty Username element"); return; }; password_=(std::string)((*cfg)["Password"]); process_type_=process_generate; } else { logger.msg(ERROR,"Processing type not supported: %s",process_type); return; }; valid_ = true; } UsernameTokenSH::~UsernameTokenSH() { } SecHandlerStatus UsernameTokenSH::Handle(Arc::Message* msg) const { if(process_type_ == process_extract) { try { MessagePayload* payload = msg->Payload(); if(!payload) { logger.msg(ERROR,"The payload of incoming message is empty"); return false; } PayloadSOAP* soap = dynamic_cast(payload); if(!soap) { logger.msg(ERROR,"Failed to cast PayloadSOAP from incoming payload"); return false; } UsernameToken ut(*soap); if(!ut) { logger.msg(ERROR,"Failed to parse Username Token from incoming SOAP"); return false; }; std::string derived_key; std::ifstream stream(password_source_.c_str()); if(!ut.Authenticate(stream, derived_key)) { logger.msg(ERROR, "Failed to authenticate Username Token inside the incoming SOAP"); stream.close(); return false; }; logger.msg(INFO, "Succeeded to authenticate UsernameToken"); stream.close(); } catch(std::exception&) { logger.msg(ERROR,"Incoming Message is not SOAP"); return false; } } else if(process_type_ == process_generate) { try { MessagePayload* payload = msg->Payload(); if(!payload) { logger.msg(ERROR,"The payload of outgoing message is empty"); return false; } PayloadSOAP* soap = dynamic_cast(payload); if(!soap) { logger.msg(ERROR,"Failed to cast PayloadSOAP from outgoing payload"); return false; } UsernameToken ut(*soap,username_,password_,std::string(""), (password_type_==password_digest)?(UsernameToken::PasswordDigest):(UsernameToken::PasswordText)); if(!ut) { logger.msg(ERROR,"Failed to generate Username Token for outgoing SOAP"); return false; }; } catch(std::exception&) { logger.msg(ERROR,"Outgoing Message is not SOAP"); return false; } } else { logger.msg(ERROR,"Username Token handler is not configured"); return false; } return true; } } nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/PaxHeaders.7502/UsernameTokenSH.h0000644000000000000000000000012412110401544025670 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.014708 30 ctime=1513200661.405764679 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/UsernameTokenSH.h0000644000175000002070000000173512110401544025743 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_USERNAMETOKENSH_H__ #define __ARC_SEC_USERNAMETOKENSH_H__ #include #include #include #include namespace ArcSec { /// Adds WS-Security Username Token into SOAP Header class UsernameTokenSH : public SecHandler { private: enum { process_none, process_extract, process_generate } process_type_; enum { password_text, password_digest } password_type_; std::string username_; std::string password_; std::string password_source_; bool valid_; public: UsernameTokenSH(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~UsernameTokenSH(void); static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg); virtual SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; }; } // namespace ArcSec #endif /* __ARC_SEC_USERNAMETOKENSH_H__ */ nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/PaxHeaders.7502/schema0000644000000000000000000000013213214316025023673 xustar000000000000000030 mtime=1513200661.424764911 30 atime=1513200668.722854169 30 ctime=1513200661.424764911 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/schema/0000755000175000002070000000000013214316025024016 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321026007 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200601.051026517 30 ctime=1513200661.422764887 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/schema/Makefile.am0000644000175000002070000000015011255700321026045 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = UsernameTokenSH.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315731026020 xustar000000000000000030 mtime=1513200601.082026896 30 atime=1513200649.459618573 30 ctime=1513200661.423764899 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/schema/Makefile.in0000644000175000002070000004356013214315731026076 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/usernametokensh/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = UsernameTokenSH.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/usernametokensh/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/usernametokensh/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/schema/PaxHeaders.7502/UsernameTokenSH.xsd0000644000000000000000000000012411457664434027525 xustar000000000000000027 mtime=1287612700.761119 27 atime=1513200575.012708 30 ctime=1513200661.424764911 nordugrid-arc-5.4.2/src/hed/shc/usernametokensh/schema/UsernameTokenSH.xsd0000644000175000002070000000577411457664434027607 0ustar00mockbuildmock00000000000000 This element defines either Username Token is extracted from SOAP header or generated using other configuration elements. Type of the processing of Username Token to SOAP message: extract or generate. It is needed for both client and service side. Default is none. The encoding type of the password (one part of UsernameToken): text or digest. only needed for client side. Default is none. The Username element of the token. only needed for client side. Default is none. The Password element of the token. only needed for client side. Default is none. Location of external source for password to read from. The content of the file should be like: user1, password1 user2, password2 only needed for service side. Default is none. nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/simplelistpdp0000644000000000000000000000013213214316025022111 xustar000000000000000030 mtime=1513200661.088760802 30 atime=1513200668.722854169 30 ctime=1513200661.088760802 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/0000755000175000002070000000000013214316025022234 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024232 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.884024474 30 ctime=1513200661.083760741 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/Makefile.am0000644000175000002070000000056512052416515024302 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libsimplelistpdp.la libsimplelistpdp_la_SOURCES = SimpleListPDP.cpp SimpleListPDP.h libsimplelistpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libsimplelistpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/PaxHeaders.7502/SimpleListPDP.cpp0000644000000000000000000000012412123101751025320 xustar000000000000000027 mtime=1363969001.473645 27 atime=1513200575.024708 30 ctime=1513200661.085760765 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/SimpleListPDP.cpp0000644000175000002070000000515212123101751025370 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "SimpleListPDP.h" Arc::Logger ArcSec::SimpleListPDP::logger(Arc::Logger::rootLogger, "SimpleListPDP"); using namespace Arc; using namespace ArcSec; Plugin* SimpleListPDP::get_simplelist_pdp(PluginArgument* arg) { ArcSec::PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new SimpleListPDP((Arc::Config*)(*pdparg),arg); } SimpleListPDP::SimpleListPDP(Config* cfg, Arc::PluginArgument* parg):PDP(cfg,parg){ location = (std::string)(cfg->Attribute("location")); logger.msg(VERBOSE, "Access list location: %s", location); for(XMLNode dn = (*cfg)["DN"];(bool)dn;++dn) { dns.push_back((std::string)dn); } } PDPStatus SimpleListPDP::isPermitted(Message *msg) const { std::string subject=msg->Attributes()->get("TLS:IDENTITYDN"); std::string line; if(location.empty() && dns.empty()) { logger.msg(ERROR, "No policy file or DNs specified for simplelist.pdp, please set location attribute or at least one DN element for simplelist PDP node in configuration."); return false; } logger.msg(DEBUG, "Subject to match: %s", subject); for(std::list::const_iterator dn = dns.begin(); dn != dns.end();++dn) { logger.msg(DEBUG, "Policy subject: %s", *dn); if((*dn) == subject) { logger.msg(VERBOSE, "Authorized from simplelist.pdp: %s", subject); return true; } } if(location.empty()) return false; std::ifstream fs(location.c_str()); if(fs.fail()) { logger.msg(ERROR, "The policy file setup for simplelist.pdp does not exist, please check location attribute for simplelist PDP node in service configuration"); return false; } while (fs.good()) { std::string::size_type p; getline (fs, line); logger.msg(DEBUG, "Policy line: %s", line); p=line.find_first_not_of(" \t"); line.erase(0,p); p=line.find_last_not_of(" \t"); if(p != std::string::npos) line.erase(p+1); if(!line.empty()) { if(line[0] == '"') { std::string::size_type p = line.find('"',1); if(p != std::string::npos) line=line.substr(1,p-1); }; }; if(!line.empty()) { if(!(line.compare(subject))) { fs.close(); logger.msg(VERBOSE, "Authorized from simplelist.pdp: %s", subject); return true; } } } fs.close(); logger.msg(ERROR, "Not authorized from simplelist.pdp: %s", subject); return false; } nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730024235 xustar000000000000000030 mtime=1513200600.928025012 30 atime=1513200649.387617692 30 ctime=1513200661.084760753 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/Makefile.in0000644000175000002070000006772613214315730024325 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/simplelistpdp DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libsimplelistpdp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libsimplelistpdp_la_OBJECTS = libsimplelistpdp_la-SimpleListPDP.lo libsimplelistpdp_la_OBJECTS = $(am_libsimplelistpdp_la_OBJECTS) libsimplelistpdp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libsimplelistpdp_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libsimplelistpdp_la_SOURCES) DIST_SOURCES = $(libsimplelistpdp_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libsimplelistpdp.la libsimplelistpdp_la_SOURCES = SimpleListPDP.cpp SimpleListPDP.h libsimplelistpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libsimplelistpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/simplelistpdp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/simplelistpdp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libsimplelistpdp.la: $(libsimplelistpdp_la_OBJECTS) $(libsimplelistpdp_la_DEPENDENCIES) $(libsimplelistpdp_la_LINK) $(libsimplelistpdp_la_OBJECTS) $(libsimplelistpdp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsimplelistpdp_la-SimpleListPDP.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libsimplelistpdp_la-SimpleListPDP.lo: SimpleListPDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsimplelistpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libsimplelistpdp_la-SimpleListPDP.lo -MD -MP -MF $(DEPDIR)/libsimplelistpdp_la-SimpleListPDP.Tpo -c -o libsimplelistpdp_la-SimpleListPDP.lo `test -f 'SimpleListPDP.cpp' || echo '$(srcdir)/'`SimpleListPDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libsimplelistpdp_la-SimpleListPDP.Tpo $(DEPDIR)/libsimplelistpdp_la-SimpleListPDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SimpleListPDP.cpp' object='libsimplelistpdp_la-SimpleListPDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsimplelistpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libsimplelistpdp_la-SimpleListPDP.lo `test -f 'SimpleListPDP.cpp' || echo '$(srcdir)/'`SimpleListPDP.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/PaxHeaders.7502/SimpleListPDP.h0000644000000000000000000000012412110410653024765 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.025708 30 ctime=1513200661.086760777 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/SimpleListPDP.h0000644000175000002070000000174412110410653025040 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_SIMPLEPDP_H__ #define __ARC_SEC_SIMPLEPDP_H__ #include #include #include namespace ArcSec { /// Tests X509 subject against list of subjects in file /** This class implements PDP interface. It's isPermitted() method compares X590 subject of requestor obtained from TLS layer (TLS:PEERDN) to list of subjects (ne per line) in external file. Locations of file is defined by 'location' attribute of PDP caonfiguration. Returns true if subject is present in list, otherwise false. */ class SimpleListPDP : public PDP { public: static Arc::Plugin* get_simplelist_pdp(Arc::PluginArgument *arg); SimpleListPDP(Arc::Config* cfg, Arc::PluginArgument* parg); virtual ~SimpleListPDP() {}; virtual PDPStatus isPermitted(Arc::Message *msg) const; private: std::string location; std::list dns; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_SIMPLELISTPDP_H__ */ nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/PaxHeaders.7502/schema0000644000000000000000000000013213214316025023351 xustar000000000000000030 mtime=1513200661.103760985 30 atime=1513200668.722854169 30 ctime=1513200661.103760985 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/schema/0000755000175000002070000000000013214316025023474 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321025465 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200600.944025208 30 ctime=1513200661.102760973 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/schema/Makefile.am0000644000175000002070000000014611255700321025530 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = SimpleListPDP.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730025475 xustar000000000000000030 mtime=1513200600.975025587 30 atime=1513200649.403617888 30 ctime=1513200661.102760973 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/schema/Makefile.in0000644000175000002070000004355013214315730025552 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/simplelistpdp/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = SimpleListPDP.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/simplelistpdp/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/simplelistpdp/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/schema/PaxHeaders.7502/SimpleListPDP.xsd0000644000000000000000000000012411457664434026621 xustar000000000000000027 mtime=1287612700.761119 27 atime=1513200575.025708 30 ctime=1513200661.103760985 nordugrid-arc-5.4.2/src/hed/shc/simplelistpdp/schema/SimpleListPDP.xsd0000644000175000002070000000304711457664434026672 0ustar00mockbuildmock00000000000000 This attribute is to be used in top (and only) element of PDP configuration. It specified full path to file with list of identities to be matched. Default is none. the list of dn that is directly specified by this pdp. Default is none. nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/samltokensh0000644000000000000000000000013213214316025021550 xustar000000000000000030 mtime=1513200661.533766244 30 atime=1513200668.722854169 30 ctime=1513200661.533766244 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/0000755000175000002070000000000013214316025021673 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/samltokensh/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515023671 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.777023166 30 ctime=1513200661.527766171 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/Makefile.am0000644000175000002070000000127312052416515023736 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libsamltokensh.la libsamltokensh_la_SOURCES = SAMLTokenSH.cpp SAMLTokenSH.h libsamltokensh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) \ $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) libsamltokensh_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) nordugrid-arc-5.4.2/src/hed/shc/samltokensh/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730023674 xustar000000000000000030 mtime=1513200600.822023716 30 atime=1513200649.502619099 30 ctime=1513200661.529766195 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/Makefile.in0000644000175000002070000007100013214315730023740 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/samltokensh DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libsamltokensh_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libsamltokensh_la_OBJECTS = libsamltokensh_la-SAMLTokenSH.lo libsamltokensh_la_OBJECTS = $(am_libsamltokensh_la_OBJECTS) libsamltokensh_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libsamltokensh_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libsamltokensh_la_SOURCES) DIST_SOURCES = $(libsamltokensh_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libsamltokensh.la libsamltokensh_la_SOURCES = SAMLTokenSH.cpp SAMLTokenSH.h libsamltokensh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) \ $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) libsamltokensh_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/samltokensh/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/samltokensh/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libsamltokensh.la: $(libsamltokensh_la_OBJECTS) $(libsamltokensh_la_DEPENDENCIES) $(libsamltokensh_la_LINK) $(libsamltokensh_la_OBJECTS) $(libsamltokensh_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsamltokensh_la-SAMLTokenSH.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libsamltokensh_la-SAMLTokenSH.lo: SAMLTokenSH.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsamltokensh_la_CXXFLAGS) $(CXXFLAGS) -MT libsamltokensh_la-SAMLTokenSH.lo -MD -MP -MF $(DEPDIR)/libsamltokensh_la-SAMLTokenSH.Tpo -c -o libsamltokensh_la-SAMLTokenSH.lo `test -f 'SAMLTokenSH.cpp' || echo '$(srcdir)/'`SAMLTokenSH.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libsamltokensh_la-SAMLTokenSH.Tpo $(DEPDIR)/libsamltokensh_la-SAMLTokenSH.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SAMLTokenSH.cpp' object='libsamltokensh_la-SAMLTokenSH.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsamltokensh_la_CXXFLAGS) $(CXXFLAGS) -c -o libsamltokensh_la-SAMLTokenSH.lo `test -f 'SAMLTokenSH.cpp' || echo '$(srcdir)/'`SAMLTokenSH.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/samltokensh/PaxHeaders.7502/SAMLTokenSH.h0000644000000000000000000000012312110401544023761 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.020708 29 ctime=1513200661.53176622 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/SAMLTokenSH.h0000644000175000002070000000175112110401544024033 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_SAMLTOKENSH_H__ #define __ARC_SEC_SAMLTOKENSH_H__ #include #include #include #include namespace ArcSec { /// Adds WS-Security SAML Token into SOAP Header class SAMLTokenSH : public SecHandler { private: enum { process_none, process_extract, process_generate } process_type_; std::string cert_file_; std::string key_file_; std::string ca_file_; std::string ca_dir_; std::string local_dn_; std::string aa_service_; mutable Arc::XMLNode saml_assertion_; bool valid_; public: SAMLTokenSH(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~SAMLTokenSH(void); static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg); virtual SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; }; } // namespace ArcSec #endif /* __ARC_SEC_SAMLTOKENSH_H__ */ nordugrid-arc-5.4.2/src/hed/shc/samltokensh/PaxHeaders.7502/SAMLTokenSH.cpp0000644000000000000000000000012412110401544024315 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.022708 30 ctime=1513200661.530766208 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/SAMLTokenSH.cpp0000644000175000002070000003123412110401544024365 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "SAMLTokenSH.h" static Arc::Logger logger(Arc::Logger::rootLogger, "SAMLTokenSH"); Arc::Plugin* ArcSec::SAMLTokenSH::get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; ArcSec::SAMLTokenSH* plugin = new ArcSec::SAMLTokenSH((Arc::Config*)(*shcarg),(Arc::ChainContext*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; plugin = NULL; }; return plugin; } /* sechandler_descriptors ARC_SECHANDLER_LOADER = { { "samltoken.creator", 0, &get_sechandler}, { NULL, 0, NULL } }; */ namespace ArcSec { using namespace Arc; class SAMLAssertionSecAttr: public Arc::SecAttr { public: SAMLAssertionSecAttr(XMLNode& node); SAMLAssertionSecAttr(std::string& str); virtual ~SAMLAssertionSecAttr(void); virtual operator bool(void) const; virtual bool Export(SecAttrFormat format,XMLNode &val) const; virtual bool Import(SecAttrFormat format, const XMLNode& val); virtual std::string get(const std::string& id) const; protected: virtual bool equal(const SecAttr &b) const; private: XMLNode saml_assertion_node_; }; SAMLAssertionSecAttr::SAMLAssertionSecAttr(XMLNode& node) { Import(SAML, node); } SAMLAssertionSecAttr::SAMLAssertionSecAttr(std::string& node_str) { Import(SAML, node_str); } SAMLAssertionSecAttr::~SAMLAssertionSecAttr(){} std::string SAMLAssertionSecAttr::get(const std::string& id) const { // TODO: do some dissection of saml_assertion_node_ return ""; } bool SAMLAssertionSecAttr::equal(const SecAttr& b) const { try { const SAMLAssertionSecAttr& a = dynamic_cast(b); if (!a) return false; // ... return false; } catch(std::exception&) { }; return false; } SAMLAssertionSecAttr::operator bool() const { return true; } static void add_subject_attribute(XMLNode item,const std::string& subject,const char* id) { XMLNode attr = item.NewChild("ra:SubjectAttribute"); attr=subject; attr.NewAttribute("Type")="string"; attr.NewAttribute("AttributeId")=id; } bool SAMLAssertionSecAttr::Export(Arc::SecAttrFormat format, XMLNode& val) const { if(format == UNDEFINED) { } else if(format == SAML) { saml_assertion_node_.New(val); return true; } else if(format == ARCAuth) { //Parse the attributes inside saml assertion, //and compose it into Arc request NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; val.Namespaces(ns); val.Name("ra:Request"); XMLNode item = val.NewChild("ra:RequestItem"); XMLNode subj = item.NewChild("ra:Subject"); Arc::XMLNode subject_nd = saml_assertion_node_["Subject"]["NameID"]; add_subject_attribute(subj,subject_nd,"http://www.nordugrid.org/schemas/policy-arc/types/wss-saml/subject"); Arc::XMLNode issuer_nd = saml_assertion_node_["Issuer"]; add_subject_attribute(subj,issuer_nd,"http://www.nordugrid.org/schemas/policy-arc/types/wss-saml/issuer"); Arc::XMLNode attr_statement = saml_assertion_node_["AttributeStatement"]; Arc::XMLNode attr_nd; for(int i=0;;i++) { attr_nd = attr_statement["Attribute"][i]; if(!attr_nd) break; std::string attr_name = attr_nd.Attribute("Name"); //std::string attr_nameformat = attr_nd.Attribute("NameFormat"); //std::string attr_friendname = attribute.Attribute("FriendlyName"); Arc::XMLNode attrval_nd; for(int j=0;;j++) { attrval_nd = attr_nd["AttributeValue"][j]; if(!attrval_nd) break; std::string tmp = "http://www.nordugrid.org/schemas/policy-arc/types/wss-saml/"+attr_name; add_subject_attribute(subj,attrval_nd,tmp.c_str()); } } } else {}; return true; } bool SAMLAssertionSecAttr::Import(Arc::SecAttrFormat format, const XMLNode& val) { if(format == UNDEFINED) { } else if(format == SAML) { val.New(saml_assertion_node_); return true; } else {}; return false; } SAMLTokenSH::SAMLTokenSH(Config *cfg,ChainContext*,Arc::PluginArgument* parg):SecHandler(cfg,parg),valid_(false){ if(!init_xmlsec()) return; process_type_=process_none; std::string process_type = (std::string)((*cfg)["Process"]); if(process_type == "generate") { cert_file_=(std::string)((*cfg)["CertificatePath"]); if(cert_file_.empty()) { logger.msg(ERROR,"Missing or empty CertificatePath element"); return; }; key_file_=(std::string)((*cfg)["KeyPath"]); if(key_file_.empty()) { logger.msg(ERROR,"Missing or empty KeyPath element"); return; }; ca_file_=(std::string)((*cfg)["CACertificatePath"]); ca_dir_=(std::string)((*cfg)["CACertificatesDir"]); if(ca_file_.empty() && ca_dir_.empty()) { logger.msg(WARNING,"Both of CACertificatePath and CACertificatesDir elements missing or empty"); }; aa_service_ = (std::string)((*cfg)["AAService"]); process_type_=process_generate; } else if(process_type == "extract") { //If ca file does not exist, we can only verify the signature by //using the certificate in the incoming wssecurity; we can not authenticate //the the message because we can not check the certificate chain without //trusted ca. ca_file_=(std::string)((*cfg)["CACertificatePath"]); ca_dir_=(std::string)((*cfg)["CACertificatesDir"]); if(ca_file_.empty() && ca_dir_.empty()) { logger.msg(INFO,"Missing or empty CertificatePath or CACertificatesDir element; will only check the signature, will not do message authentication"); }; process_type_=process_extract; } else { logger.msg(ERROR,"Processing type not supported: %s",process_type); return; }; if(!cert_file_.empty()) { Arc::Credential cred(cert_file_, key_file_, ca_dir_, ca_file_); local_dn_ = convert_to_rdn(cred.GetDN()); } valid_ = true; } SAMLTokenSH::~SAMLTokenSH() { final_xmlsec(); } SecHandlerStatus SAMLTokenSH::Handle(Arc::Message* msg) const { if(process_type_ == process_extract) { try { PayloadSOAP* soap = dynamic_cast(msg->Payload()); SAMLToken st(*soap); if(!st) { logger.msg(ERROR,"Failed to parse SAML Token from incoming SOAP"); return false; }; /* if(!st.Authenticate()) { logger.msg(ERROR, "Failed to verify SAML Token inside the incoming SOAP"); return false; }; */ if((!ca_file_.empty() || !ca_dir_.empty()) && !st.Authenticate(ca_file_, ca_dir_)) { logger.msg(ERROR, "Failed to authenticate SAML Token inside the incoming SOAP"); return false; }; logger.msg(INFO, "Succeeded to authenticate SAMLToken"); //Store the saml assertion into message context Arc::XMLNode assertion_nd = st["Assertion"]; SAMLAssertionSecAttr* sattr = new SAMLAssertionSecAttr(assertion_nd); msg->Auth()->set("SAMLAssertion", sattr); } catch(std::exception&) { logger.msg(ERROR,"Incoming Message is not SOAP"); return false; } } else if(process_type_ == process_generate) { try { if(!saml_assertion_) { //Contact the AA service to get the saml assertion //Compose Arc::NS ns; ns["saml"] = "urn:oasis:names:tc:SAML:2.0:assertion"; ns["samlp"] = "urn:oasis:names:tc:SAML:2.0:protocol"; Arc::XMLNode attr_query(ns, "samlp:AttributeQuery"); std::string query_id = Arc::UUID(); attr_query.NewAttribute("ID") = query_id; Arc::Time t; std::string current_time = t.str(Arc::UTCTime); attr_query.NewAttribute("IssueInstant") = current_time; attr_query.NewAttribute("Version") = std::string("2.0"); attr_query.NewChild("saml:Issuer") = local_dn_; Arc::XMLNode subject = attr_query.NewChild("saml:Subject"); Arc::XMLNode name_id = subject.NewChild("saml:NameID"); name_id.NewAttribute("Format")=std::string("urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName"); name_id = local_dn_; Arc::XMLNode attribute = attr_query.NewChild("saml:Attribute"); attribute.NewAttribute("Name")=std::string("urn:oid:1.3.6.1.4.1.5923.1.1.1.6"); attribute.NewAttribute("NameFormat")=std::string("urn:oasis:names:tc:SAML:2.0:attrname-format:uri"); attribute.NewAttribute("FriendlyName")=std::string("eduPersonPrincipalName"); Arc::XMLSecNode attr_query_secnd(attr_query); std::string attr_query_idname("ID"); attr_query_secnd.AddSignatureTemplate(attr_query_idname, Arc::XMLSecNode::RSA_SHA1); if(attr_query_secnd.SignNode(key_file_, cert_file_)) { std::cout<<"Succeeded to sign the signature under "< under response soap message:"); std::string tmp; attr_resp.GetXML(tmp); logger.msg(Arc::ERROR, "%s", tmp.c_str()); return false; } std::string resp_idname = "ID"; Arc::XMLSecNode attr_resp_secnode(attr_resp); if(attr_resp_secnode.VerifyNode(resp_idname, ca_file_, ca_dir_)) { logger.msg(Arc::INFO, "Succeeded to verify the signature under "); } else { logger.msg(Arc::ERROR, "Failed to verify the signature under "); delete response; return false; } std::string responseto_id = (std::string)(attr_resp.Attribute("InResponseTo")); if(query_id != responseto_id) { logger.msg(Arc::INFO, "The Response is not going to this end"); delete response; return false; } std::string resp_time = attr_resp.Attribute("IssueInstant"); std::string statuscode_value = attr_resp["samlp:Status"]["samlp:StatusCode"]; if(statuscode_value == "urn:oasis:names:tc:SAML:2.0:status:Success") logger.msg(Arc::INFO, "The StatusCode is Success"); Arc::XMLNode assertion = attr_resp["saml:Assertion"]; std::string assertion_idname = "ID"; Arc::XMLSecNode assertion_secnode(assertion); if(assertion_secnode.VerifyNode(assertion_idname, ca_file_, ca_dir_)) { logger.msg(Arc::INFO, "Succeeded to verify the signature under "); } else { logger.msg(Arc::ERROR, "Failed to verify the signature under "); delete response; return false; } assertion.New(saml_assertion_); delete response; } //Protect the SOAP message with SAML assertion PayloadSOAP* soap = dynamic_cast(msg->Payload()); SAMLToken st(*soap, cert_file_, key_file_, SAMLToken::SAML2, saml_assertion_); if(!st) { logger.msg(ERROR,"Failed to generate SAML Token for outgoing SOAP"); return false; }; //Reset the soap message (*soap) = st; } catch(std::exception&) { logger.msg(ERROR,"Outgoing Message is not SOAP"); return false; } } else { logger.msg(ERROR,"SAML Token handler is not configured"); return false; } return true; } } nordugrid-arc-5.4.2/src/hed/shc/samltokensh/PaxHeaders.7502/schema0000644000000000000000000000013213214316025023010 xustar000000000000000030 mtime=1513200661.695768226 30 atime=1513200668.722854169 30 ctime=1513200661.695768226 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/schema/0000755000175000002070000000000013214316025023133 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/samltokensh/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321025124 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200600.837023899 30 ctime=1513200661.692768189 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/schema/Makefile.am0000644000175000002070000000014411255700321025165 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = SAMLTokenSH.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/samltokensh/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315730025133 xustar000000000000000029 mtime=1513200600.86902429 30 atime=1513200649.519619307 30 ctime=1513200661.693768201 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/schema/Makefile.in0000644000175000002070000004354013214315730025210 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/samltokensh/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = SAMLTokenSH.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/samltokensh/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/samltokensh/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/samltokensh/schema/PaxHeaders.7502/SAMLTokenSH.xsd0000644000000000000000000000012411457664434025617 xustar000000000000000027 mtime=1287612700.761119 27 atime=1513200575.020708 30 ctime=1513200661.695768226 nordugrid-arc-5.4.2/src/hed/shc/samltokensh/schema/SAMLTokenSH.xsd0000644000175000002070000000770111457664434025671 0ustar00mockbuildmock00000000000000 This element defines either SAML Token is extracted from SOAP header or generated using other configuration elements. Type of the processing of SAML Token to SOAP message: extract or generate. It is needed for both client and service side. Default is none. The location of private key which is used to sign the SOAP message, only needed by the client side. Default is none. The location of certificate, the public key parsed from certificate is used to be as one part of SAML Token: public key Only needed by the client side. Default is none. The location of the file of trusted CA certificate, the certificate is used for verifying the signature to SOAP message. Needed by client and service side. Default is none. The location of the directory that contains trusted CA certificates, the certificates are used for verifying the signature to SOAP message. Needed by client and service side. Default is "/etc/grid-security/certificates". Endpoint of the attribute authority service. AA (attribute authority) service is an external third-party service that is used for authenticate the requestor(client) and signing SAML Token with requestor's attributes embedded. Needed by client side. Default is none. nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/arcpdp0000644000000000000000000000013213214316025020471 xustar000000000000000030 mtime=1513200661.139761425 30 atime=1513200668.722854169 30 ctime=1513200661.139761425 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/0000755000175000002070000000000013214316025020614 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712350510553022610 xustar000000000000000027 mtime=1403162987.293169 30 atime=1513200599.994013589 30 ctime=1513200661.117761156 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/Makefile.am0000644000175000002070000000135012350510553022651 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libarcpdp.la libarcpdp_la_SOURCES = \ ArcPDP.cpp ArcPDP.h ArcEvaluationCtx.cpp ArcEvaluator.cpp \ ArcRequest.cpp ArcRequestItem.cpp ArcAttributeFactory.cpp \ ArcPolicy.cpp ArcRule.cpp ArcFnFactory.cpp ArcAlgFactory.cpp \ ArcEvaluationCtx.h ArcEvaluator.h ArcRequest.h ArcRequestItem.h \ ArcAttributeFactory.h ArcPolicy.h ArcRule.h ArcFnFactory.h \ ArcAlgFactory.h ArcAttributeProxy.h libarcpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la noinst_DATA = Policy.xsd Request.xsd Response.xsd EXTRA_DIST = $(noinst_DATA) nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/Request.xsd0000644000000000000000000000012411231304743022720 xustar000000000000000027 mtime=1248168419.114661 27 atime=1513200575.106709 30 ctime=1513200661.137761401 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/Request.xsd0000644000175000002070000001463711231304743023000 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcRule.h0000644000000000000000000000012311100362210022241 xustar000000000000000027 mtime=1224860808.620442 27 atime=1513200575.106709 29 ctime=1513200661.13276134 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcRule.h0000644000175000002070000000763311100362210022320 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCRULE_H__ #define __ARC_SEC_ARCRULE_H__ #include #include #include #include #include #include #include #include namespace ArcSec { ///Pair Match include the AttributeValue object in and the Function which is used to handle the AttributeValue, ///default function is "Equal", if some other function is used, it should be explicitly specified, e.g. ////vo.knowarc/usergroupA typedef std::pair Match; /** example inside : /O=NorduGrid/OU=UIO/CN=test /vo.knowarc/usergroupA /O=Grid/OU=KnowARC/CN=XYZ urn:mace:shibboleth:examples subgrpexample1 */ /** "And" relationship means the request should satisfy all of the items /O=Grid/OU=KnowARC/CN=XYZ urn:mace:shibboleth:examples */ /** "Or" relationship meand the request should satisfy any of the items /O=Grid/OU=KnowARC/CN=ABC /vo.knowarc/usergroupA /O=Grid/OU=KnowARC/CN=XYZ urn:mace:shibboleth:examples subgrpexample1 */ ///AndList - include items inside one (or ) typedef std::list AndList; ///OrList - include items inside one (or ) typedef std::list OrList; enum Id_MatchResult { //The "id" of all the s under a (or other type) is matched //by s under in ID_MATCH = 0, //Part "id" is matched ID_PARTIAL_MATCH = 1, //Any "id" of the s is not matched ID_NO_MATCH = 2 }; ///ArcRule class to parse Arc specific node class ArcRule : public Policy { public: ArcRule(const Arc::XMLNode node, EvaluatorContext* ctx); virtual std::string getEffect() const; virtual Result eval(EvaluationCtx* ctx); virtual MatchResult match(EvaluationCtx* ctx); virtual ~ArcRule(); virtual operator bool(void) const ; virtual EvalResult& getEvalResult(); virtual void setEvalResult(EvalResult& res); const char* getEvalName() const; const char* getName() const; private: /**Parse the inside one Can also refer to the other source by using , the attribute is the location of the refered file the value "subgrpexample" is the index for searching in the refered file */ void getItemlist(Arc::XMLNode& nd, OrList& items, const std::string& itemtype, const std::string& type_attr, const std::string& function_attr); private: std::string effect; std::string id; std::string version; std::string description; OrList subjects; OrList resources; OrList actions; OrList conditions; AttributeFactory* attrfactory; FnFactory* fnfactory; EvalResult evalres; Arc::XMLNode rulenode; Id_MatchResult sub_idmatched; Id_MatchResult res_idmatched; Id_MatchResult act_idmatched; Id_MatchResult ctx_idmatched; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_ARCRULE_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730022615 xustar000000000000000030 mtime=1513200600.058014372 30 atime=1513200649.213615564 30 ctime=1513200661.118761169 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/Makefile.in0000644000175000002070000011524013214315730022666 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/arcpdp DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libarcpdp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libarcpdp_la_OBJECTS = libarcpdp_la-ArcPDP.lo \ libarcpdp_la-ArcEvaluationCtx.lo libarcpdp_la-ArcEvaluator.lo \ libarcpdp_la-ArcRequest.lo libarcpdp_la-ArcRequestItem.lo \ libarcpdp_la-ArcAttributeFactory.lo libarcpdp_la-ArcPolicy.lo \ libarcpdp_la-ArcRule.lo libarcpdp_la-ArcFnFactory.lo \ libarcpdp_la-ArcAlgFactory.lo libarcpdp_la_OBJECTS = $(am_libarcpdp_la_OBJECTS) libarcpdp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarcpdp_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libarcpdp_la_SOURCES) DIST_SOURCES = $(libarcpdp_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive DATA = $(noinst_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libarcpdp.la libarcpdp_la_SOURCES = \ ArcPDP.cpp ArcPDP.h ArcEvaluationCtx.cpp ArcEvaluator.cpp \ ArcRequest.cpp ArcRequestItem.cpp ArcAttributeFactory.cpp \ ArcPolicy.cpp ArcRule.cpp ArcFnFactory.cpp ArcAlgFactory.cpp \ ArcEvaluationCtx.h ArcEvaluator.h ArcRequest.h ArcRequestItem.h \ ArcAttributeFactory.h ArcPolicy.h ArcRule.h ArcFnFactory.h \ ArcAlgFactory.h ArcAttributeProxy.h libarcpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libarcpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la noinst_DATA = Policy.xsd Request.xsd Response.xsd EXTRA_DIST = $(noinst_DATA) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/arcpdp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/arcpdp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libarcpdp.la: $(libarcpdp_la_OBJECTS) $(libarcpdp_la_DEPENDENCIES) $(libarcpdp_la_LINK) $(libarcpdp_la_OBJECTS) $(libarcpdp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcAlgFactory.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcAttributeFactory.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcEvaluationCtx.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcEvaluator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcFnFactory.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcPDP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcPolicy.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcRequest.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcRequestItem.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcpdp_la-ArcRule.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libarcpdp_la-ArcPDP.lo: ArcPDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcPDP.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcPDP.Tpo -c -o libarcpdp_la-ArcPDP.lo `test -f 'ArcPDP.cpp' || echo '$(srcdir)/'`ArcPDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcPDP.Tpo $(DEPDIR)/libarcpdp_la-ArcPDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcPDP.cpp' object='libarcpdp_la-ArcPDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcPDP.lo `test -f 'ArcPDP.cpp' || echo '$(srcdir)/'`ArcPDP.cpp libarcpdp_la-ArcEvaluationCtx.lo: ArcEvaluationCtx.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcEvaluationCtx.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcEvaluationCtx.Tpo -c -o libarcpdp_la-ArcEvaluationCtx.lo `test -f 'ArcEvaluationCtx.cpp' || echo '$(srcdir)/'`ArcEvaluationCtx.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcEvaluationCtx.Tpo $(DEPDIR)/libarcpdp_la-ArcEvaluationCtx.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcEvaluationCtx.cpp' object='libarcpdp_la-ArcEvaluationCtx.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcEvaluationCtx.lo `test -f 'ArcEvaluationCtx.cpp' || echo '$(srcdir)/'`ArcEvaluationCtx.cpp libarcpdp_la-ArcEvaluator.lo: ArcEvaluator.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcEvaluator.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcEvaluator.Tpo -c -o libarcpdp_la-ArcEvaluator.lo `test -f 'ArcEvaluator.cpp' || echo '$(srcdir)/'`ArcEvaluator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcEvaluator.Tpo $(DEPDIR)/libarcpdp_la-ArcEvaluator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcEvaluator.cpp' object='libarcpdp_la-ArcEvaluator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcEvaluator.lo `test -f 'ArcEvaluator.cpp' || echo '$(srcdir)/'`ArcEvaluator.cpp libarcpdp_la-ArcRequest.lo: ArcRequest.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcRequest.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcRequest.Tpo -c -o libarcpdp_la-ArcRequest.lo `test -f 'ArcRequest.cpp' || echo '$(srcdir)/'`ArcRequest.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcRequest.Tpo $(DEPDIR)/libarcpdp_la-ArcRequest.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcRequest.cpp' object='libarcpdp_la-ArcRequest.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcRequest.lo `test -f 'ArcRequest.cpp' || echo '$(srcdir)/'`ArcRequest.cpp libarcpdp_la-ArcRequestItem.lo: ArcRequestItem.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcRequestItem.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcRequestItem.Tpo -c -o libarcpdp_la-ArcRequestItem.lo `test -f 'ArcRequestItem.cpp' || echo '$(srcdir)/'`ArcRequestItem.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcRequestItem.Tpo $(DEPDIR)/libarcpdp_la-ArcRequestItem.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcRequestItem.cpp' object='libarcpdp_la-ArcRequestItem.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcRequestItem.lo `test -f 'ArcRequestItem.cpp' || echo '$(srcdir)/'`ArcRequestItem.cpp libarcpdp_la-ArcAttributeFactory.lo: ArcAttributeFactory.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcAttributeFactory.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcAttributeFactory.Tpo -c -o libarcpdp_la-ArcAttributeFactory.lo `test -f 'ArcAttributeFactory.cpp' || echo '$(srcdir)/'`ArcAttributeFactory.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcAttributeFactory.Tpo $(DEPDIR)/libarcpdp_la-ArcAttributeFactory.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcAttributeFactory.cpp' object='libarcpdp_la-ArcAttributeFactory.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcAttributeFactory.lo `test -f 'ArcAttributeFactory.cpp' || echo '$(srcdir)/'`ArcAttributeFactory.cpp libarcpdp_la-ArcPolicy.lo: ArcPolicy.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcPolicy.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcPolicy.Tpo -c -o libarcpdp_la-ArcPolicy.lo `test -f 'ArcPolicy.cpp' || echo '$(srcdir)/'`ArcPolicy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcPolicy.Tpo $(DEPDIR)/libarcpdp_la-ArcPolicy.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcPolicy.cpp' object='libarcpdp_la-ArcPolicy.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcPolicy.lo `test -f 'ArcPolicy.cpp' || echo '$(srcdir)/'`ArcPolicy.cpp libarcpdp_la-ArcRule.lo: ArcRule.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcRule.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcRule.Tpo -c -o libarcpdp_la-ArcRule.lo `test -f 'ArcRule.cpp' || echo '$(srcdir)/'`ArcRule.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcRule.Tpo $(DEPDIR)/libarcpdp_la-ArcRule.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcRule.cpp' object='libarcpdp_la-ArcRule.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcRule.lo `test -f 'ArcRule.cpp' || echo '$(srcdir)/'`ArcRule.cpp libarcpdp_la-ArcFnFactory.lo: ArcFnFactory.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcFnFactory.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcFnFactory.Tpo -c -o libarcpdp_la-ArcFnFactory.lo `test -f 'ArcFnFactory.cpp' || echo '$(srcdir)/'`ArcFnFactory.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcFnFactory.Tpo $(DEPDIR)/libarcpdp_la-ArcFnFactory.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcFnFactory.cpp' object='libarcpdp_la-ArcFnFactory.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcFnFactory.lo `test -f 'ArcFnFactory.cpp' || echo '$(srcdir)/'`ArcFnFactory.cpp libarcpdp_la-ArcAlgFactory.lo: ArcAlgFactory.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libarcpdp_la-ArcAlgFactory.lo -MD -MP -MF $(DEPDIR)/libarcpdp_la-ArcAlgFactory.Tpo -c -o libarcpdp_la-ArcAlgFactory.lo `test -f 'ArcAlgFactory.cpp' || echo '$(srcdir)/'`ArcAlgFactory.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libarcpdp_la-ArcAlgFactory.Tpo $(DEPDIR)/libarcpdp_la-ArcAlgFactory.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ArcAlgFactory.cpp' object='libarcpdp_la-ArcAlgFactory.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcpdp_la-ArcAlgFactory.lo `test -f 'ArcAlgFactory.cpp' || echo '$(srcdir)/'`ArcAlgFactory.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(DATA) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcAttributeProxy.h0000644000000000000000000000012411056763077024372 xustar000000000000000027 mtime=1220273727.447932 27 atime=1513200575.104709 30 ctime=1513200661.135761377 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcAttributeProxy.h0000644000175000002070000000210511056763077024435 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCATTRIBUTEPROXY_H__ #define __ARC_SEC_ARCATTRIBUTEPROXY_H__ #include #include #include #include #include #include namespace ArcSec { ///Arc specific AttributeProxy class template class ArcAttributeProxy : public AttributeProxy { public: ArcAttributeProxy(){}; virtual ~ArcAttributeProxy(){}; public: virtual AttributeValue* getAttribute(const Arc::XMLNode& node); }; ///Implementation of getAttribute method template AttributeValue* ArcAttributeProxy::getAttribute(const Arc::XMLNode& node){ Arc::XMLNode x = node; std::string value = (std::string)x; if(value.empty()) x=x.Child(0); // ??? value = (std::string)x; std::string attrid = (std::string)(x.Attribute("AttributeId")); if(attrid.empty()) attrid = (std::string)(x.Attribute("Id")); return new TheAttribute(value, attrid); } } // namespace ArcSec #endif /* __ARC_SEC_ARCATTRIBUTEPROXY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcEvaluator.cpp0000644000000000000000000000012411730411253023643 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.099709 30 ctime=1513200661.121761205 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcEvaluator.cpp0000644000175000002070000003511611730411253023716 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "ArcEvaluator.h" #include "ArcEvaluationCtx.h" Arc::Plugin* ArcSec::ArcEvaluator::get_evaluator(Arc::PluginArgument* arg) { Arc::ClassLoaderPluginArgument* clarg = arg?dynamic_cast(arg):NULL; if(!clarg) return NULL; return new ArcSec::ArcEvaluator((Arc::XMLNode*)(*clarg),arg); } //loader_descriptors __arc_evaluator_modules__ = { // { "arc.evaluator", 0, &ArcSec::ArcEvaluator::get_evaluator }, // { NULL, 0, NULL } //}; using namespace Arc; using namespace ArcSec; Arc::Logger ArcSec::ArcEvaluator::logger(Arc::Logger::rootLogger, "ArcEvaluator"); void ArcEvaluator::parsecfg(Arc::XMLNode& cfg){ std::string policystore, policylocation, functionfactory, attributefactory, combingalgfactory; XMLNode nd; Arc::NS nsList; std::list res; nsList.insert(std::pair("pdp","http://www.nordugrid.org/schemas/pdp/Config")); //Get the name of "PolicyStore" class //res = cfg.XPathLookup("//pdp:PolicyStore", nsList); //presently, there can be only one PolicyStore //if(!(res.empty())){ // nd = *(res.begin()); // policystore = (std::string)(nd.Attribute("name")); // policylocation = (std::string)(nd.Attribute("location")); //} //else if (res.empty()){ // logger.msg(ERROR, "No any policy exists, the policy engine can not be loaded"); // exit(1); //} //Get the name of "FunctionFactory" class res = cfg.XPathLookup("//pdp:FunctionFactory", nsList); if(!(res.empty())){ nd = *(res.begin()); functionfactory = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for FunctionFactory from configuration"); return;} //Get the name of "AttributeFactory" class res = cfg.XPathLookup("//pdp:AttributeFactory", nsList); if(!(res.empty())){ nd = *(res.begin()); attributefactory = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for AttributeFactory from configuration"); return;} //Get the name of "CombiningAlgorithmFactory" class res = cfg.XPathLookup("//pdp:CombingAlgorithmFactory", nsList); if(!(res.empty())){ nd = *(res.begin()); combingalgfactory = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for CombiningAlgorithmFactory from configuration"); return;} //Get the name of the "Request" class res = m_cfg->XPathLookup("//pdp:Request", nsList); if(!(res.empty())){ nd = *(res.begin()); request_classname = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for Request from configuration"); return;} //Get the name of the "Policy" class std::string policy_classname; res = m_cfg->XPathLookup("//pdp:Policy", nsList); if(!(res.empty())){ nd = *(res.begin()); policy_classname = (std::string)(nd.Attribute("name")); } else { logger.msg(ERROR, "Can not parse classname for Policy from configuration"); return;} //Get the ClassLoader object; The object which loads this ArcEvaluator should have //constructed ClassLoader by using ClassLoader(cfg), and putting the configuration //information into it; meanwhile ClassLoader is designed as a Singleton, so here //we don't need to intialte ClassLoader by using ClassLoader(cfg); ClassLoader* classloader; classloader=ClassLoader::getClassLoader(); attrfactory=NULL; attrfactory = (AttributeFactory*)(classloader->Instance(attributefactory)); if(attrfactory == NULL) logger.msg(ERROR, "Can not dynamically produce AttributeFactory"); fnfactory=NULL; fnfactory = (FnFactory*)(classloader->Instance(functionfactory)); if(fnfactory == NULL) logger.msg(ERROR, "Can not dynamically produce FnFactory"); algfactory=NULL; algfactory = (AlgFactory*)(classloader->Instance(combingalgfactory)); if(algfactory == NULL) logger.msg(ERROR, "Can not dynamically produce AlgFacroty"); //Create the EvaluatorContext for the usage of creating Policy context = new EvaluatorContext(this); std::string alg("Permit-Overrides"); //std::list filelist; //filelist.push_back(policylocation); //plstore = new PolicyStore(filelist, alg, policy_classname, context); plstore = new PolicyStore(alg, policy_classname, context); if(plstore == NULL) logger.msg(ERROR, "Can not create PolicyStore object"); } ArcEvaluator::ArcEvaluator(Arc::XMLNode* cfg,Arc::PluginArgument* parg) : Evaluator(cfg,parg), m_cfg(cfg) { plstore = NULL;; fnfactory = NULL; attrfactory = NULL; algfactory = NULL; combining_alg = EvaluatorFailsOnDeny; combining_alg_ex = NULL; context = NULL; parsecfg(*m_cfg); } ArcEvaluator::ArcEvaluator(const char * cfgfile,Arc::PluginArgument* parg) : Evaluator(cfgfile,parg){ combining_alg = EvaluatorFailsOnDeny; combining_alg_ex = NULL; std::string str; std::string xml_str = ""; std::ifstream f(cfgfile); while (f >> str) { xml_str.append(str); xml_str.append(" "); } f.close(); Arc::XMLNode node(xml_str); parsecfg(node); } void ArcEvaluator::setCombiningAlg(EvaluatorCombiningAlg alg) { combining_alg = alg; } void ArcEvaluator::setCombiningAlg(CombiningAlg* alg) { combining_alg_ex = alg; } Request* ArcEvaluator::make_reqobj(XMLNode& reqnode){ Request* request = NULL; std::string requestor; Arc::ClassLoader* classloader = NULL; //Since the configuration information for loader has been got before (when create ArcEvaluator), //it is not necessary to get once more here classloader = ClassLoader::getClassLoader(); //Load the Request object request = (ArcSec::Request*)(classloader->Instance(request_classname,&reqnode)); if(request == NULL) logger.msg(Arc::ERROR, "Can not dynamically produce Request"); return request; } Response* ArcEvaluator::evaluate(Request* request){ Request* req = request; req->setAttributeFactory(attrfactory); req->make_request(); EvaluationCtx * evalctx = NULL; evalctx = new ArcEvaluationCtx(req); //evaluate the request based on policy if(evalctx) return evaluate(evalctx); return NULL; } Response* ArcEvaluator::evaluate(const Source& req){ //0.Prepare request for evaluation Arc::XMLNode node = req.Get(); NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; node.Namespaces(ns); //1.Create the request object according to the configuration Request* request = make_reqobj(node); if(request == NULL) return NULL; //2.Pre-process the Request object request->setAttributeFactory(attrfactory); request->make_request(); EvaluationCtx * evalctx = new ArcEvaluationCtx(request); //3.evaluate the request based on policy Response* resp = evaluate(evalctx); delete request; return resp; } // NOTE: This method deletes passed context on exit Response* ArcEvaluator::evaluate(EvaluationCtx* evl_ctx){ if(!evl_ctx) return NULL; //Split request into tuples ArcEvaluationCtx* ctx = dynamic_cast(evl_ctx); if(!ctx) { delete evl_ctx; return NULL; } ctx->split(); std::list policies; std::list::iterator policyit; std::list reqtuples = ctx->getRequestTuples(); std::list::iterator it; Response* resp = new Response(); resp->setRequestSize(reqtuples.size()); for(it = reqtuples.begin(); it != reqtuples.end(); it++){ //set the current RequestTuple for evaluation //RequestTuple will be evaluated one by one ctx->setEvalTuple(*it); policies = plstore->findPolicy(ctx); std::list permitset; if(!combining_alg) { bool atleast_onepermit = false; bool atleast_onedeny = false; bool atleast_onenotapplicable = false; bool atleast_oneindeterminate = false; Result result = DECISION_NOT_APPLICABLE; //Each policy evaluates the present RequestTuple, using default combiningalg between s: PERMIT-OVERRIDES for(policyit = policies.begin(); policyit != policies.end(); policyit++){ Result res = ((Policy*)(*policyit))->eval(ctx); logger.msg(VERBOSE,"Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d", res); if(combining_alg == EvaluatorStopsOnDeny) { if(res == DECISION_PERMIT){ permitset.push_back(*policyit); atleast_onepermit = true; } else if(res == DECISION_DENY) { atleast_onedeny = true; break; } else if(res == DECISION_INDETERMINATE) atleast_oneindeterminate = true; else if(res == DECISION_NOT_APPLICABLE) atleast_onenotapplicable = true; } else if(combining_alg == EvaluatorStopsOnPermit) { if(res == DECISION_PERMIT){ permitset.push_back(*policyit); atleast_onepermit = true; break; } else if(res == DECISION_DENY) atleast_onedeny = true; else if(res == DECISION_INDETERMINATE) atleast_oneindeterminate = true; else if(res == DECISION_NOT_APPLICABLE) atleast_onenotapplicable = true; } else if(combining_alg == EvaluatorStopsNever) { if(res == DECISION_PERMIT){ permitset.push_back(*policyit); atleast_onepermit = true; } else if(res == DECISION_DENY) atleast_onedeny = true; else if(res == DECISION_INDETERMINATE) atleast_oneindeterminate = true; else if(res == DECISION_NOT_APPLICABLE) atleast_onenotapplicable = true; } else { // EvaluatorFailsOnDeny //If there is one policy gives negative evaluation result, then jump out //For RequestTuple which is denied, we will not feedback any information so far if(res == DECISION_PERMIT){ permitset.push_back(*policyit); atleast_onepermit = true; } else if (res == DECISION_DENY) { atleast_onedeny = true; permitset.clear(); break; } else if(res == DECISION_INDETERMINATE) atleast_oneindeterminate = true; else if(res == DECISION_NOT_APPLICABLE) atleast_onenotapplicable = true; }; } //The decision for this RequestTuple is recorded. Here the algorithm is Permit-Overides, //if any policy gives "Permit", the result is "Permit"; //if no policy gives "Permit", and any policy gives "Deny", the result is "Deny"; //if no policy gives "Permit", no policy gives "Deny", if(atleast_onepermit == true) result = DECISION_PERMIT; else if(atleast_onepermit == false && atleast_onedeny ==true) result = DECISION_DENY; else if(atleast_onepermit == false && atleast_onedeny ==false && atleast_oneindeterminate == true) result = DECISION_INDETERMINATE; else if(atleast_onepermit == false && atleast_onedeny ==false && atleast_oneindeterminate == false && atleast_onenotapplicable == true) result = DECISION_NOT_APPLICABLE; ResponseItem* item = new ResponseItem; ArcRequestTuple* reqtuple = new ArcRequestTuple; reqtuple->duplicate(*it); item->reqtp = reqtuple; item->reqxml = reqtuple->getNode(); item->res = result; //For RequestTuple that passes the evaluation check, fill the information into ResponseItem if(atleast_onepermit){ std::list::iterator permit_it; for(permit_it = permitset.begin(); permit_it != permitset.end(); permit_it++){ item->pls.push_back((Policy*)(*permit_it)); EvalResult evalres = ((Policy*)(*permit_it))->getEvalResult(); //TODO, handle policyset XMLNode policyxml = evalres.node; (item->plsxml).push_back(policyxml); } } //Store the ResponseItem resp->addResponseItem(item); } else { // if(combining_alg_ex) // Now if real combining algorithm defined use it instead // of hardcoded mess above. std::list plist; // Preparing list of policies to evaluate for(policyit = policies.begin(); policyit != policies.end(); policyit++){ plist.push_back((Policy*)(*policyit)); }; // Running request tuple and policies through evaluator // and combining results // TODO: record permitset (is it really needed?) Result result = combining_alg_ex->combine(ctx,plist); ResponseItem* item = new ResponseItem; ArcRequestTuple* reqtuple = new ArcRequestTuple; // reqtuple->duplicate((ArcRequestTuple*)(*it)); reqtuple->duplicate(*it); item->reqtp = reqtuple; item->reqxml = reqtuple->getNode(); item->res = result; // Recording positive response - not implemented yet //if(result == DECISION_PERMIT) { // std::list::iterator permit_it; // for(pit = permitset.begin(); pit != permitset.end(); pit++){ // item->pls.push_back((Policy*)(*pit)); // EvalResult evalres = ((Policy*)(*pit))->getEvalResult(); // //TODO, handle policyset // XMLNode policyxml = evalres.node; // (item->plsxml).push_back(policyxml); // } //} //Store the ResponseItem resp->addResponseItem(item); } } delete evl_ctx; return resp; } Response* ArcEvaluator::evaluate(Request* request, const Source& policy) { plstore->removePolicies(); plstore->addPolicy(policy, context, ""); Response* resp = evaluate(request); plstore->removePolicies(); return resp; } Response* ArcEvaluator::evaluate(const Source& request, const Source& policy) { plstore->removePolicies(); plstore->addPolicy(policy, context, ""); Response* resp = evaluate(request); plstore->removePolicies(); return resp; } Response* ArcEvaluator::evaluate(Request* request, Policy* policyobj) { plstore->removePolicies(); plstore->addPolicy(policyobj, context, ""); Response* resp = evaluate(request); plstore->releasePolicies(); return resp; } Response* ArcEvaluator::evaluate(const Source& request, Policy* policyobj) { plstore->removePolicies(); plstore->addPolicy(policyobj, context, ""); Response* resp = evaluate(request); plstore->releasePolicies(); return resp; } const char* ArcEvaluator::getName(void) const { return "arc.evaluator"; } ArcEvaluator::~ArcEvaluator(){ //TODO delete all the object if(plstore) delete plstore; if(context) delete context; if(fnfactory) delete fnfactory; if(attrfactory) delete attrfactory; if(algfactory) delete algfactory; } nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcEvaluationCtx.cpp0000644000000000000000000000012412044527530024474 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200575.091709 30 ctime=1513200661.120761193 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcEvaluationCtx.cpp0000644000175000002070000002672712044527530024557 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "ArcEvaluationCtx.h" using namespace Arc; using namespace ArcSec; Logger ArcEvaluationCtx::logger(Arc::Logger::rootLogger, "ArcEvaluationCtx"); ArcRequestTuple::ArcRequestTuple() : RequestTuple() { NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; XMLNode tupledoc(ns,"ra:RequestItem"); tupledoc.New(tuple); } RequestTuple* ArcRequestTuple::duplicate(const RequestTuple* req_tpl) { XMLNode root = tuple; //ArcRequestTuple* tpl = dynamic_cast(req_tpl); //Reconstruct the XMLNode by using the information inside req_tpl //Reconstruct subject Subject::iterator sit; Subject req_sub = req_tpl->sub; XMLNode subject; if(!req_sub.empty()) subject = root.NewChild("ra:Subject"); for(sit = req_sub.begin(); sit != req_sub.end(); sit++){ //Record the object of the Attribute RequestAttribute* attr = new RequestAttribute; attr->duplicate(*(*sit)); sub.push_back(attr); //Record the xml node of the Attribute XMLNode subjectattr = subject.NewChild("ra:Attribute"); subjectattr = ((*sit)->getAttributeValue())->encode(); XMLNode subjectattr_attr = subjectattr.NewAttribute("ra:Type"); subjectattr_attr = ((*sit)->getAttributeValue())->getType(); subjectattr_attr = subjectattr.NewAttribute("ra:AttributeId"); subjectattr_attr = ((*sit)->getAttributeValue())->getId(); /* AttributeValue *attrval; attrval = (*sit)->getAttributeValue(); if(attrval) std::cout<< "Attribute Value:"<< (attrval->encode()).c_str() << std::endl; */ } //Reconstruct resource Resource::iterator rit; Resource req_res = req_tpl->res; XMLNode resource; if(!req_res.empty()) resource = root.NewChild("ra:Resource"); for(rit = req_res.begin(); rit != req_res.end(); rit++){ RequestAttribute* attr = new RequestAttribute; attr->duplicate(*(*rit)); res.push_back(attr); XMLNode resourceattr = resource.NewChild("ra:Attribute"); resourceattr = ((*rit)->getAttributeValue())->encode(); XMLNode resourceattr_attr = resourceattr.NewAttribute("ra:Type"); resourceattr_attr = ((*rit)->getAttributeValue())->getType(); resourceattr_attr = resourceattr.NewAttribute("ra:AttributeId"); resourceattr_attr = ((*rit)->getAttributeValue())->getId(); } //Reconstruct action Action::iterator ait; Action req_act = req_tpl->act; XMLNode action; if(!req_act.empty()) action = root.NewChild("ra:Action"); for(ait = req_act.begin(); ait != req_act.end(); ait++){ RequestAttribute* attr = new RequestAttribute; attr->duplicate(*(*ait)); act.push_back(attr); XMLNode actionattr = action.NewChild("ra:Attribute"); actionattr = ((*ait)->getAttributeValue())->encode(); XMLNode actionattr_attr = actionattr.NewAttribute("ra:Type"); actionattr_attr = ((*ait)->getAttributeValue())->getType(); actionattr_attr = actionattr.NewAttribute("ra:AttributeId"); actionattr_attr = ((*ait)->getAttributeValue())->getId(); } //Reconstruct context Context::iterator cit; Context req_ctx = req_tpl->ctx; XMLNode context; if(!req_ctx.empty()) context = root.NewChild("ra:Context"); for(cit = req_ctx.begin(); cit != req_ctx.end(); cit++){ RequestAttribute* attr = new RequestAttribute; attr->duplicate(*(*cit)); ctx.push_back(attr); XMLNode contextattr = context.NewChild("ra:Attribute"); contextattr = ((*cit)->getAttributeValue())->encode(); XMLNode contextattr_attr = contextattr.NewAttribute("ra:Type"); contextattr_attr = ((*cit)->getAttributeValue())->getType(); contextattr_attr = contextattr.NewAttribute("ra:AttributeId"); contextattr_attr = ((*cit)->getAttributeValue())->getId(); } return this; } void ArcRequestTuple::erase() { while(!(sub.empty())){ delete sub.back(); sub.pop_back(); } while(!(res.empty())){ delete res.back(); res.pop_back(); } while(!(act.empty())){ delete act.back(); act.pop_back(); } while(!(ctx.empty())){ delete ctx.back(); ctx.pop_back(); } } ArcRequestTuple::~ArcRequestTuple() { while(!(sub.empty())){ sub.pop_back(); } while(!(res.empty())){ res.pop_back(); } while(!(act.empty())){ act.pop_back(); } while(!(ctx.empty())){ ctx.pop_back(); } } ArcEvaluationCtx::ArcEvaluationCtx(Request* request) : EvaluationCtx(req), req(NULL), reqitem(NULL), evaltuple(NULL) { req = request; } ArcEvaluationCtx::~ArcEvaluationCtx(){ //if(req) // delete req; while(!(reqtuples.empty())) { delete reqtuples.back(); reqtuples.pop_back(); } } Request* ArcEvaluationCtx::getRequest () const{ return req; } /* ArrtibuteValue * EvaluationCtx::getSubjectAttribute(){ } ArrtibuteValue * EvaluationCtx::getResourceAttribute(){ } ArrtibuteValue * EvaluationCtx::getActionAttribute(){ } ArrtibuteValue * EvaluationCtx::getContextAttribute(){ } */ static void add_tuple(std::list& reqtuples,Subject* subject,Resource* resource,Action* action,Context* context) { if(subject || resource || action || context) { ArcRequestTuple* reqtuple = new ArcRequestTuple; if(subject) reqtuple->sub = *subject; if(resource) reqtuple->res = *resource; if(action) reqtuple->act = *action; if(context) reqtuple->ctx = *context; reqtuples.push_back(reqtuple); }; } static void add_contexts(std::list& reqtuples,Subject* subject,Resource* resource,Action* action,CtxList& contexts) { if(contexts.empty()) { add_tuple(reqtuples,subject,resource,action,NULL); return; } CtxList::iterator cit = contexts.begin(); for(;cit != contexts.end();++cit) { add_tuple(reqtuples,subject,resource,action,&(*cit)); } } static void add_actions(std::list& reqtuples,Subject* subject,Resource* resource,ActList& actions,CtxList& contexts) { if(actions.empty()) { add_contexts(reqtuples,subject,resource,NULL,contexts); return; } ActList::iterator ait = actions.begin(); for(;ait != actions.end();++ait) { add_contexts(reqtuples,subject,resource,&(*ait),contexts); } } static void add_resources(std::list& reqtuples,Subject* subject,ResList& resources,ActList& actions,CtxList& contexts) { if(resources.empty()) { add_actions(reqtuples,subject,NULL,actions,contexts); return; } ResList::iterator rit = resources.begin(); for(;rit != resources.end();++rit) { add_actions(reqtuples,subject,&(*rit),actions,contexts); } } static void add_subjects(std::list& reqtuples,SubList& subjects,ResList& resources,ActList& actions,CtxList& contexts) { if(subjects.empty()) { add_resources(reqtuples,NULL,resources,actions,contexts); return; } SubList::iterator sit = subjects.begin(); for(;sit != subjects.end();++sit) { add_resources(reqtuples,&(*sit),resources,actions,contexts); } } void ArcEvaluationCtx::split(){ while(!reqtuples.empty()) { delete reqtuples.back(); reqtuples.pop_back(); } ReqItemList reqlist = req->getRequestItems(); logger.msg(VERBOSE,"There are %d RequestItems", reqlist.size()); std::list::iterator it; for (it = reqlist.begin(); it != reqlist.end(); it++) { SubList subjects = (*it)->getSubjects(); SubList::iterator sit; ResList resources = (*it)->getResources(); ResList::iterator rit; ActList actions = (*it)->getActions(); ActList::iterator ait; CtxList contexts = (*it)->getContexts(); CtxList::iterator cit; //Scan subjects, resources, actions and contexts inside one RequestItem object //to split subjects, resources, actions or contexts into some tuple with one subject, one resource, one action and context //See more descrioption in inteface RequestItem.h add_subjects(reqtuples,subjects,resources,actions,contexts); /* for(sit = subjects.begin(); sit != subjects.end(); sit++) { //The subject part will never be empty if(!resources.empty()) { for(rit = resources.begin(); rit != resources.end(); rit++){ if(!actions.empty()){ for(ait = actions.begin(); ait != actions.end(); ait++){ if(!contexts.empty()){ for(cit = contexts.begin(); cit != contexts.end(); cit++){ RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuple->res = *rit; reqtuple->act = *ait; reqtuple->ctx = *cit; reqtuples.push_back(reqtuple); } } else { RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuple->res = *rit; reqtuple->act = *ait; reqtuples.push_back(reqtuple); } } } else { if(!contexts.empty()){ for(cit = contexts.begin(); cit != contexts.end(); cit++){ RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuple->res = *rit; reqtuple->ctx = *cit; reqtuples.push_back(reqtuple); } } else { RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuple->res = *rit; reqtuples.push_back(reqtuple); } } } } else{ if(!actions.empty()){ for(ait = actions.begin(); ait != actions.end(); ait++){ if(!contexts.empty()){ for(cit = contexts.begin(); cit != contexts.end(); cit++){ RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuple->act = *ait; reqtuple->ctx = *cit; reqtuples.push_back(reqtuple); } } else { RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuple->act = *ait; reqtuples.push_back(reqtuple); } } } else { if(!contexts.empty()){ for(cit = contexts.begin(); cit != contexts.end(); cit++){ RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuple->ctx = *cit; reqtuples.push_back(reqtuple); } } else { RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuples.push_back(reqtuple); } } } } */ /* for(sit = subjects.begin(); sit != subjects.end(); sit++) { for(rit = resources.begin(); rit != resources.end(); rit++){ for(ait = actions.begin(); ait != actions.end(); ait++){ for(cit = contexts.begin(); cit != contexts.end(); cit++){ RequestTuple* reqtuple = new RequestTuple; reqtuple->sub = *sit; reqtuple->res = *rit; reqtuple->act = *ait; reqtuple->ctx = *cit; reqtuples.push_back(reqtuple); logger.msg(INFO, "Subject size: %d", (*sit).size()); Arc::Subject::iterator it; for (it = (*sit).begin(); it!= (*sit).end(); it++){ AttributeValue *attr; attr = (*it)->getAttributeValue(); if(attr!=NULL) logger.msg(INFO, "%s", attr->encode()); } } } } }*/ } } nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcAlgFactory.h0000644000000000000000000000012411730411253023401 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.099709 30 ctime=1513200661.134761364 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcAlgFactory.h0000644000175000002070000000142211730411253023445 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCALGFACTORY_H__ #define __ARC_SEC_ARCALGFACTORY_H__ #include #include #include #include namespace ArcSec { ///Algorithm factory class for Arc class ArcAlgFactory : public AlgFactory { public: ArcAlgFactory(Arc::PluginArgument* parg); virtual ~ArcAlgFactory(); public: /**return a Alg object according to the "CombiningAlg" attribute in the node; The ArcAlgFactory itself will release the Alg objects*/ virtual CombiningAlg* createAlg(const std::string& type); private: void initCombiningAlg(CombiningAlg* alg); void initCombiningAlgs(); }; Arc::Plugin* get_arcpdp_alg_factory (Arc::PluginArgument*); } // namespace ArcSec #endif /* __ARC_SEC_ARCALGFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcRule.cpp0000644000000000000000000000012411730411253022610 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.104709 30 ctime=1513200661.124761242 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcRule.cpp0000644000175000002070000003301111730411253022653 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "ArcEvaluationCtx.h" #include "ArcRule.h" Arc::Logger ArcSec::ArcRule::logger(Arc::Logger::rootLogger, "ArcRule"); using namespace Arc; using namespace ArcSec; #define DEFAULT_ATTRIBUTE_TYPE "string" void ArcRule::getItemlist(XMLNode& nd, OrList& items, const std::string& itemtype, const std::string& type_attr, const std::string& function_attr){ XMLNode tnd; for(int i=0; icreateValue(tnd, type), fnfactory->createFn(function))); items.push_back(item); } else if((type.empty())&&(tnd.Size()>0)){ AndList item; int size = tnd.Size(); for(int k=0; k Element.Attribute("Function") > Subelement.Attribute("Type") + "equal" if(!((std::string)(snd.Attribute("Function"))).empty()) funcname = (std::string)(snd.Attribute("Function")); std::string function; if(funcname.empty()) function = EqualFunction::getFunctionName(type); else if(funcname == "Match" || funcname == "match" || funcname == "MATCH") function = MatchFunction::getFunctionName(type); else if(funcname == "InRange" || funcname == "inrange" || funcname == "INRANGE" || funcname == "Inrange") function = InRangeFunction::getFunctionName(type); else std::cout<<"Function Name is wrong"<createValue(snd, type), fnfactory->createFn(function))); } items.push_back(item); } else if(!(type.empty())&&(tnd.Size()>0)){ AndList item; int size = tnd.Size(); for(int k=0; kcreateValue(snd, type), fnfactory->createFn(function))); } items.push_back(item); } else{ //std::cerr <<"Error definition in policy"<> str) { xml_str.append(str); xml_str.append(" "); } f.close(); XMLNode root(xml_str); XMLNode subref = root.Child(); XMLNode snd; std::string itemgrouptype = itemtype + "Group"; for(int k=0;;k++){ snd = subref[itemgrouptype][k]; if(!snd) break; //If the reference ID in the policy file matches the ID in the external file, //try to get the subject information from the external file if((std::string)(snd.Attribute("GroupId")) == (std::string)tnd){ getItemlist(snd, items, itemtype, type_attr, function_attr); } } } } return; } ArcRule::ArcRule(const XMLNode node, EvaluatorContext* ctx) : Policy(node,NULL) { rulenode = node; evalres.node = rulenode; evalres.effect = "Not_applicable"; attrfactory = (AttributeFactory*)(*ctx); fnfactory = (FnFactory*)(*ctx); XMLNode nd, tnd; id = (std::string)(rulenode.Attribute("RuleId")); description = (std::string)(rulenode["Description"]); if((std::string)(rulenode.Attribute("Effect"))=="Permit") effect="Permit"; else if((std::string)(rulenode.Attribute("Effect"))=="Deny") effect="Deny"; //else //std::cerr<< "Invalid Effect" < req, Id_MatchResult& idmatched){ ArcSec::OrList::iterator orit; ArcSec::AndList::iterator andit; std::list::iterator reqit; bool indeterminate = true; idmatched = ID_NO_MATCH; //Go through each or under // or //For example, go through each element under in a rule, //once one element is satisfied, skip out. for( orit = items.begin(); orit != items.end(); orit++ ){ int all_fraction_matched = 0; int all_id_matched = 0; //For example, go through each element in one , //all of the elements should be satisfied for(andit = (*orit).begin(); andit != (*orit).end(); andit++){ bool one_req_matched = false; bool one_id_matched = false; //go through each element in one in Request.xml, //all of the should be satisfied. for(reqit = req.begin(); reqit != req.end(); reqit++){ //evaluate two "AttributeValue*" based on "Function" definition in "Rule" AttributeValue* res = NULL; try{ res = ((*andit).second)->evaluate((*andit).first, (*reqit)->getAttributeValue()); } catch(std::exception&) { }; BooleanAttribute bool_attr(true); if(res->equal(&bool_attr)) one_req_matched = true; if(res) delete res; //distinguish whether the "id" of the two s (from request and policy) are matched //here we distinguish three kinds of situation: //1. All the id under one (or other type) in the policy side is matched by // id under one in the request side; //2. Part of id is matched; //3. None of id is matched at all. if( ((*andit).first)->getId() == ((*reqit)->getAttributeValue())->getId() ) one_id_matched = true; } // if any of the in one Request's matches one of the // Rule.Subjects.Subject.Attribute, count the match number. Later if all of the // s under Rule.Subjects.Subject are matched, then the Rule.Subjects.Subject // is mathed. if(one_req_matched) all_fraction_matched +=1; //Similar to above, except only "id" is considered, not including the "value" of if(one_id_matched) all_id_matched +=1; } //One Rule.Subjects.Subject is satisfied (all of the Attribute value and Attribute Id are matched) //by the RequestTuple.Subject if(all_fraction_matched == int((*orit).size())){ idmatched = ID_MATCH; return MATCH; } else if(all_id_matched == int((*orit).size())) { idmatched = ID_MATCH; indeterminate = false; /*break;*/ } //else if(all_id_matched > 0) { idmatched = ID_PARTIAL_MATCH; } } if(indeterminate) return INDETERMINATE; return NO_MATCH; } MatchResult ArcRule::match(EvaluationCtx* eval_ctx){ ArcEvaluationCtx* ctx = dynamic_cast(eval_ctx); ArcRequestTuple* evaltuple = dynamic_cast(ctx->getEvalTuple()); //Reset the value for id matching, since the Rule object could be //used a number of times for match-making sub_idmatched = ID_NO_MATCH; res_idmatched = ID_NO_MATCH; act_idmatched = ID_NO_MATCH; ctx_idmatched = ID_NO_MATCH; MatchResult sub_matched, res_matched, act_matched, ctx_matched; sub_matched = itemMatch(subjects, evaltuple->sub, sub_idmatched); res_matched = itemMatch(resources, evaltuple->res, res_idmatched); act_matched = itemMatch(actions, evaltuple->act, act_idmatched); ctx_matched = itemMatch(conditions, evaltuple->ctx, ctx_idmatched); if( ( subjects.empty() || sub_matched==MATCH) && ( resources.empty() || res_matched==MATCH) && ( actions.empty() || act_matched==MATCH) && ( conditions.empty() || ctx_matched==MATCH) ) return MATCH; else if ( ( !(subjects.empty()) && sub_matched==INDETERMINATE ) || ( !(resources.empty()) &&res_matched==INDETERMINATE ) || ( !(actions.empty()) && act_matched==INDETERMINATE ) || ( !(conditions.empty()) && ctx_matched==INDETERMINATE) ) return INDETERMINATE; else return NO_MATCH; } Result ArcRule::eval(EvaluationCtx* ctx){ Result result = DECISION_NOT_APPLICABLE; MatchResult match_res = match(ctx); if(match_res == MATCH) { if(effect == "Permit") { result = DECISION_PERMIT; evalres.effect = "Permit"; } else if(effect == "Deny") { result = DECISION_DENY; evalres.effect = "Deny"; } return result; } else if(match_res == INDETERMINATE) { if(effect == "Permit") evalres.effect = "Permit"; else if(effect == "Deny") evalres.effect = "Deny"; return DECISION_INDETERMINATE; } else if(match_res == NO_MATCH){ if(effect == "Permit") evalres.effect = "Permit"; else if(effect == "Deny") evalres.effect = "Deny"; return DECISION_NOT_APPLICABLE; } return DECISION_NOT_APPLICABLE; } std::string ArcRule::getEffect() const { return effect; } EvalResult& ArcRule::getEvalResult() { return evalres; } void ArcRule::setEvalResult(EvalResult& res){ evalres = res; } ArcRule::operator bool(void) const { return true; } const char* ArcRule::getEvalName() const{ return "arc.evaluator"; } const char* ArcRule::getName() const{ return "arc.rule"; } ArcRule::~ArcRule(){ while(!(subjects.empty())){ AndList list = subjects.back(); while(!(list.empty())){ Match match = list.back(); if(match.first){ delete match.first; } list.pop_back(); } subjects.pop_back(); } while(!(resources.empty())){ AndList list = resources.back(); while(!(list.empty())){ Match match = list.back(); if(match.first) delete match.first; list.pop_back(); } resources.pop_back(); } while(!(actions.empty())){ AndList list = actions.back(); while(!(list.empty())){ Match match = list.back(); if(match.first) delete match.first; list.pop_back(); } actions.pop_back(); } while(!(conditions.empty())){ AndList list = conditions.back(); while(!(list.empty())){ Match match = list.back(); if(match.first) delete match.first; list.pop_back(); } conditions.pop_back(); } } nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcAttributeFactory.cpp0000644000000000000000000000012311730411253025173 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.096709 29 ctime=1513200661.12376123 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcAttributeFactory.cpp0000644000175000002070000000567511730411253025256 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "ArcAttributeFactory.h" #include #include #include #include #include #include #include "ArcAttributeProxy.h" /* loader_descriptors __arc_attrfactory_modules__ = { { "attr.factory", 0, &get_attr_factory }, { NULL, 0, NULL } }; */ using namespace Arc; namespace ArcSec { Arc::Plugin* get_arcpdp_attr_factory (Arc::PluginArgument* arg) { return new ArcSec::ArcAttributeFactory(arg); } void ArcAttributeFactory::initDatatypes(){ //Some Arc specified attribute types apmap.insert(std::pair(StringAttribute::getIdentifier(), new ArcAttributeProxy)); apmap.insert(std::pair(DateTimeAttribute::getIdentifier(), new ArcAttributeProxy)); apmap.insert(std::pair(DateAttribute::getIdentifier(), new ArcAttributeProxy)); apmap.insert(std::pair(TimeAttribute::getIdentifier(), new ArcAttributeProxy)); apmap.insert(std::pair(DurationAttribute::getIdentifier(), new ArcAttributeProxy)); apmap.insert(std::pair(PeriodAttribute::getIdentifier(), new ArcAttributeProxy)); apmap.insert(std::pair(X500NameAttribute::getIdentifier(), new ArcAttributeProxy)); apmap.insert(std::pair(AnyURIAttribute::getIdentifier(), new ArcAttributeProxy)); apmap.insert(std::pair(GenericAttribute::getIdentifier(), new ArcAttributeProxy)); /** TODO: other datatype............. */ } ArcAttributeFactory::ArcAttributeFactory(Arc::PluginArgument* parg) : AttributeFactory(parg) { initDatatypes(); } AttributeValue* ArcAttributeFactory::createValue(const XMLNode& node, const std::string& type){ AttrProxyMap::iterator it; if((it=apmap.find(type)) != apmap.end()) return ((*it).second)->getAttribute(node); // This may look like hack, but generic attribute needs special treatment GenericAttribute* attr = new GenericAttribute( (std::string)const_cast(node), (std::string)(const_cast(node).Attribute("AttributeId"))); attr->setType(type); return attr; // return NULL; } ArcAttributeFactory::~ArcAttributeFactory(){ AttrProxyMap::iterator it; for(it = apmap.begin(); it != apmap.end(); it = apmap.begin()){ AttributeProxy* attrproxy = (*it).second; apmap.erase(it); if(attrproxy) delete attrproxy; } } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcAlgFactory.cpp0000644000000000000000000000012411730411253023734 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.097709 30 ctime=1513200661.126761267 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcAlgFactory.cpp0000644000175000002070000000623211730411253024004 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "ArcAlgFactory.h" #include #include #include /* loader_descriptors __arc_algfactory_modules__ = { { "alg.factory", 0, &get_alg_factory }, { NULL, 0, NULL } }; */ using namespace Arc; namespace ArcSec { Arc::Plugin* get_arcpdp_alg_factory (Arc::PluginArgument* arg) { return new ArcSec::ArcAlgFactory(arg); } void ArcAlgFactory::initCombiningAlg(CombiningAlg* alg) { if(alg) algmap[alg->getalgId()]=alg; } void ArcAlgFactory::initCombiningAlgs(){ //Some Arc specified algorithm types initCombiningAlg(new DenyOverridesCombiningAlg); initCombiningAlg(new PermitOverridesCombiningAlg); initCombiningAlg(new PermitDenyIndeterminateNotApplicableCombiningAlg); initCombiningAlg(new PermitDenyNotApplicableIndeterminateCombiningAlg); initCombiningAlg(new PermitIndeterminateDenyNotApplicableCombiningAlg); initCombiningAlg(new PermitIndeterminateNotApplicableDenyCombiningAlg); initCombiningAlg(new PermitNotApplicableDenyIndeterminateCombiningAlg); initCombiningAlg(new PermitNotApplicableIndeterminateDenyCombiningAlg); initCombiningAlg(new DenyPermitIndeterminateNotApplicableCombiningAlg); initCombiningAlg(new DenyPermitNotApplicableIndeterminateCombiningAlg); initCombiningAlg(new DenyIndeterminatePermitNotApplicableCombiningAlg); initCombiningAlg(new DenyIndeterminateNotApplicablePermitCombiningAlg); initCombiningAlg(new DenyNotApplicablePermitIndeterminateCombiningAlg); initCombiningAlg(new DenyNotApplicableIndeterminatePermitCombiningAlg); initCombiningAlg(new IndeterminatePermitDenyNotApplicableCombiningAlg); initCombiningAlg(new IndeterminatePermitNotApplicableDenyCombiningAlg); initCombiningAlg(new IndeterminateDenyPermitNotApplicableCombiningAlg); initCombiningAlg(new IndeterminateDenyNotApplicablePermitCombiningAlg); initCombiningAlg(new IndeterminateNotApplicablePermitDenyCombiningAlg); initCombiningAlg(new IndeterminateNotApplicableDenyPermitCombiningAlg); initCombiningAlg(new NotApplicablePermitDenyIndeterminateCombiningAlg); initCombiningAlg(new NotApplicablePermitIndeterminateDenyCombiningAlg); initCombiningAlg(new NotApplicableDenyPermitIndeterminateCombiningAlg); initCombiningAlg(new NotApplicableDenyIndeterminatePermitCombiningAlg); initCombiningAlg(new NotApplicableIndeterminatePermitDenyCombiningAlg); initCombiningAlg(new NotApplicableIndeterminateDenyPermitCombiningAlg); /** TODO: other algorithm type............. */ } ArcAlgFactory::ArcAlgFactory(Arc::PluginArgument* parg) : AlgFactory(parg) { initCombiningAlgs(); } CombiningAlg* ArcAlgFactory::createAlg(const std::string& type){ AlgMap::iterator it; if((it=algmap.find(type)) != algmap.end()){ return (*it).second; } else return NULL; } ArcAlgFactory::~ArcAlgFactory(){ AlgMap::iterator it; for(it = algmap.begin(); it != algmap.end(); it = algmap.begin()){ CombiningAlg * alg = (*it).second; algmap.erase(it); if(alg) delete alg; } } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcPDP.h0000644000000000000000000000012412110410653021765 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.094709 30 ctime=1513200661.119761181 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcPDP.h0000644000175000002070000000162612110410653022037 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCPDP_H__ #define __ARC_SEC_ARCPDP_H__ #include //#include #include #include #include namespace ArcSec { ///ArcPDP - PDP which can handle the Arc specific request and policy schema class ArcPDP : public PDP { public: static Arc::Plugin* get_arc_pdp(Arc::PluginArgument* arg); ArcPDP(Arc::Config* cfg, Arc::PluginArgument* parg); virtual ~ArcPDP(); /***/ virtual PDPStatus isPermitted(Arc::Message *msg) const; private: // Evaluator *eval; // Arc::ClassLoader* classloader; std::list select_attrs; std::list reject_attrs; std::list policy_locations; Arc::XMLNodeContainer policies; std::string policy_combining_alg; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_ARCPDP_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcPolicy.h0000644000000000000000000000012411730411253022605 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.102709 30 ctime=1513200661.131761328 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcPolicy.h0000644000175000002070000000364611730411253022663 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCPOLICY_H__ #define __ARC_SEC_ARCPOLICY_H__ #include #include #include #include #include #include namespace ArcSec { ///ArcPolicy class to parse and operate Arc specific node class ArcPolicy : public Policy { public: /**Constructor*/ ArcPolicy(Arc::PluginArgument* parg); /**Constructor*/ ArcPolicy(const Arc::XMLNode node, Arc::PluginArgument* parg); /**Constructor*/ ArcPolicy(const Arc::XMLNode node, EvaluatorContext* ctx, Arc::PluginArgument* parg); virtual ~ArcPolicy(); virtual operator bool(void) const { return (bool)policynode; }; virtual Result eval(EvaluationCtx* ctx); virtual void setEvaluatorContext(EvaluatorContext* evaluatorcontext) { evaluatorctx = evaluatorcontext; }; /**Parse XMLNode, and construct the low-level Rule object*/ virtual void make_policy(); virtual MatchResult match(EvaluationCtx* ctx); virtual std::string getEffect() const { return "Not_applicable";}; virtual EvalResult& getEvalResult(); virtual void setEvalResult(EvalResult& res); const char* getEvalName() const; const char* getName() const; static Arc::Plugin* get_policy(Arc::PluginArgument* arg); private: //std::list rules; std::string id; std::string version; /**The combining algorithm between lower-lever element, */ CombiningAlg *comalg; std::string description; /**Evaluator Context which contains factory object*/ EvaluatorContext* evaluatorctx; /**Algorithm factory*/ AlgFactory *algfactory; EvalResult evalres; /**Corresponding node*/ Arc::XMLNode policynode; /**Top element of policy tree*/ Arc::XMLNode policytop; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_ARCPOLICY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/Response.xsd0000644000000000000000000000012411051650026023064 xustar000000000000000027 mtime=1218924566.337573 27 atime=1513200575.091709 30 ctime=1513200661.138761413 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/Response.xsd0000644000175000002070000000242511051650026023134 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcRequestItem.cpp0000644000000000000000000000012411011435655024154 xustar000000000000000027 mtime=1210465197.186908 27 atime=1513200575.102709 30 ctime=1513200661.122761218 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcRequestItem.cpp0000644000175000002070000001732611011435655024232 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "ArcRequestItem.h" #include "ArcAttributeFactory.h" #include using namespace Arc; using namespace ArcSec; ArcRequestItem::ArcRequestItem(XMLNode& node, AttributeFactory* attrfactory) : RequestItem(node, attrfactory) { //Parse the XMLNode structure, and generate the RequestAttribute object XMLNode nd; //Parse the part for ( int i=0;; i++ ){ std::string type; nd = node["Subject"][i]; if(!nd) break; if(!((std::string)(nd.Attribute("Type"))).empty()) type = (std::string)(nd.Attribute("Type")); //if the "Subject" is like this: ///O=NorduGrid/OU=UIO/CN=test if(!(type.empty())&&(nd.Size()==0)){ Subject sub; sub.push_back(new RequestAttribute(nd, attrfactory)); subjects.push_back(sub); } //else if like this: /* administrator /O=NorduGrid/OU=UIO/CN=admin */ else if((type.empty())&&(nd.Size()>0)){ Subject sub; for(int j=0;;j++){ XMLNode tnd = nd.Child(j); if(!tnd) break; sub.push_back(new RequestAttribute(tnd, attrfactory)); } subjects.push_back(sub); } //else if like this: /* administrator /O=NorduGrid/OU=UIO/CN=admin */ else if(!(type.empty())&&(nd.Size()>0)){ Subject sub; for(int j=0;;j++){ XMLNode tnd = nd.Child(j); if(!tnd) break; std::string type_fullname = (nd.Attribute("Type")).Prefix(); type_fullname = type_fullname + ":Type"; XMLNode type_prop = tnd.NewAttribute(type_fullname.c_str()); type_prop = type; sub.push_back(new RequestAttribute(tnd, attrfactory)); } subjects.push_back(sub); } //else if like this: else if((type.empty()) && (nd.Size()==0) && (((std::string)nd).empty())) {} else {std::cerr <<"Error definition in RequestItem:Subject"< for ( int i=0;; i++ ){ std::string type; nd = node["Resource"][i]; if(!nd) break; if(!((std::string)(nd.Attribute("Type"))).empty()) type = (std::string)(nd.Attribute("Type")); if(!(type.empty())&&(nd.Size()==0)){ Resource res; res.push_back(new RequestAttribute(nd, attrfactory)); resources.push_back(res); } else if((type.empty())&&(nd.Size()>0)){ Resource res; for(int j=0;;j++){ XMLNode tnd = nd.Child(j); if(!tnd) break; res.push_back(new RequestAttribute(tnd, attrfactory)); } resources.push_back(res); } else if(!(type.empty())&&(nd.Size()>0)){ Resource res; for(int j=0;;j++){ XMLNode tnd = nd.Child(j); if(!tnd) break; std::string type_fullname = (nd.Attribute("Type")).Prefix(); type_fullname = type_fullname + ":Type"; XMLNode type_prop = tnd.NewAttribute(type_fullname.c_str()); type_prop = type; res.push_back(new RequestAttribute(tnd, attrfactory)); } resources.push_back(res); } else if((type.empty()) && (nd.Size()==0) && (((std::string)nd).empty())) {} else {std::cerr <<"Error definition in RequestItem:Resource"< part for ( int i=0;; i++ ){ std::string type; nd = node["Action"][i]; if(!nd) break; if(!((std::string)(nd.Attribute("Type"))).empty()) type = (std::string)(nd.Attribute("Type")); if(!(type.empty())&&(nd.Size()==0)){ Action act; act.push_back(new RequestAttribute(nd, attrfactory)); actions.push_back(act); } else if((type.empty())&&(nd.Size()>0)){ Action act; for(int j=0;;j++){ XMLNode tnd = nd.Child(j); if(!tnd) break; act.push_back(new RequestAttribute(tnd, attrfactory)); } actions.push_back(act); } else if(!(type.empty())&&(nd.Size()>0)){ Action act; for(int j=0;;j++){ XMLNode tnd = nd.Child(j); if(!tnd) break; std::string type_fullname = (nd.Attribute("Type")).Prefix(); type_fullname = type_fullname + ":Type"; XMLNode type_prop = tnd.NewAttribute(type_fullname.c_str()); type_prop = type; act.push_back(new RequestAttribute(tnd, attrfactory)); } actions.push_back(act); } else if((type.empty()) && (nd.Size()==0) && (((std::string)nd).empty())) {} else {std::cerr <<"Error definition in RequestItem:Action"<0)){ Context ctx; for(int j=0;;j++){ XMLNode tnd = nd.Child(j); if(!tnd) break; ctx.push_back(new RequestAttribute(tnd, attrfactory)); } contexts.push_back(ctx); } else if(!(type.empty())&&(nd.Size()>0)){ Context ctx; for(int j=0;;j++){ XMLNode tnd = nd.Child(j); if(!tnd) break; std::string type_fullname = (nd.Attribute("Type")).Prefix(); type_fullname = type_fullname + ":Type"; XMLNode type_prop = tnd.NewAttribute(type_fullname.c_str()); type_prop = type; ctx.push_back(new RequestAttribute(tnd, attrfactory)); } contexts.push_back(ctx); } else if((type.empty()) && (nd.Size()==0) && (((std::string)nd).empty())) {} else {std::cerr <<"Error definition in RequestItem:Context"< #include #include namespace ArcSec { /// Function factory class for Arc specified attributes class ArcFnFactory : public FnFactory { public: ArcFnFactory(Arc::PluginArgument* parg); virtual ~ArcFnFactory(); public: /**return a Function object according to the "Function" attribute in the XML node; The ArcFnFactory itself will release the Function objects*/ virtual Function* createFn(const std::string& type); private: void initFunctions(); }; Arc::Plugin* get_arcpdp_fn_factory (Arc::PluginArgument*); } // namespace ArcSec #endif /* __ARC_SEC_ARCFUNCTIONFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcPolicy.cpp0000644000000000000000000000012312046764073023153 xustar000000000000000027 mtime=1352394811.725688 27 atime=1513200575.097709 29 ctime=1513200661.12376123 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcPolicy.cpp0000644000175000002070000001164412046764073023227 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "ArcPolicy.h" #include "ArcRule.h" Arc::Logger ArcSec::ArcPolicy::logger(Arc::Logger::rootLogger, "ArcPolicy"); static Arc::NS policyns("policy", "http://www.nordugrid.org/schemas/policy-arc"); /** get_policy (in charge of class-loading of ArcPolicy) can only accept one type of argument--XMLNode */ Arc::Plugin* ArcSec::ArcPolicy::get_policy(Arc::PluginArgument* arg) { //std::cout<<"Argument type of ArcPolicy:"<(arg):NULL; if(!clarg) return NULL; // Check if empty or valid policy is supplied Arc::XMLNode* doc = (Arc::XMLNode*)(*clarg); if(doc==NULL) { std::cerr<<"ArcPolicy creation requires XMLNode as argument"< res = policynode.XPathLookup("//policy:Policy",policyns); if(res.empty()) { policynode.Destroy(); return; } policytop = *(res.begin()); } ArcPolicy::ArcPolicy(const XMLNode node, EvaluatorContext* ctx, Arc::PluginArgument* parg) : Policy(node,parg), comalg(NULL) { if((!node) || (node.Size() == 0)) { logger.msg(WARNING,"Policy is empty"); return; } node.New(policynode); std::list res = policynode.XPathLookup("//policy:Policy",policyns); if(res.empty()) { policynode.Destroy(); return; } policytop = *(res.begin()); setEvaluatorContext(ctx); make_policy(); } void ArcPolicy::make_policy() { //EvalResult.node record the policy(in XMLNode) information about evaluation result. //According to the developer's requirement, EvalResult.node can include rules(in XMLNode) //that "Permit" or "Deny" the request tuple. In the existing code, it include all //the original rules. if(!policynode) return; if(!policytop) return; evalres.node = policynode; evalres.effect = "Not_applicable"; ArcRule *rule; //Get AlgFactory from EvaluatorContext algfactory = (AlgFactory*)(*evaluatorctx); XMLNode nd = policytop; XMLNode rnd; if((bool)nd){ nd = policytop; id = (std::string)(nd.Attribute("PolicyId")); //Setup the rules' combining algorithm inside one policy, according to the "CombiningAlg" name if(nd.Attribute("CombiningAlg")) comalg = algfactory->createAlg((std::string)(nd.Attribute("CombiningAlg"))); else comalg = algfactory->createAlg("Deny-Overrides"); description = (std::string)(nd["Description"]); } logger.msg(VERBOSE, "PolicyId: %s Alg inside this policy is:-- %s", id, comalg?(comalg->getalgId()):""); for ( int i=0;; i++ ){ rnd = nd["Rule"][i]; if(!rnd) break; rule = new ArcRule(rnd, evaluatorctx); subelements.push_back(rule); } } MatchResult ArcPolicy::match(EvaluationCtx*){// ctx){ //RequestTuple* evaltuple = ctx->getEvalTuple(); //Because ArcPolicy definition has no any directly; //All the s are only in ArcRule. //So the function always return "Match" return MATCH; } Result ArcPolicy::eval(EvaluationCtx* ctx){ Result result = comalg?comalg->combine(ctx, subelements):DECISION_INDETERMINATE; if(result == DECISION_PERMIT) evalres.effect = "Permit"; else if(result == DECISION_DENY) evalres.effect = "Deny"; else if(result == DECISION_INDETERMINATE) evalres.effect = "Indeterminate"; else if(result == DECISION_NOT_APPLICABLE) evalres.effect = "Not_Applicable"; return result; } EvalResult& ArcPolicy::getEvalResult() { return evalres; } void ArcPolicy::setEvalResult(EvalResult& res){ evalres = res; } const char* ArcPolicy::getEvalName() const{ return "arc.evaluator"; } const char* ArcPolicy::getName() const{ return "arc.policy"; } ArcPolicy::~ArcPolicy(){ while(!(subelements.empty())){ delete subelements.back(); subelements.pop_back(); } } nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcRequestItem.h0000644000000000000000000000012411011425766023623 xustar000000000000000027 mtime=1210461174.650189 27 atime=1513200575.104709 30 ctime=1513200661.129761303 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcRequestItem.h0000644000175000002070000000173711011425766023700 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCREQUESTITEM_H__ #define __ARC_SEC_ARCREQUESTITEM_H__ #include #include namespace ArcSec { ///Container, tuple /**Specified ArcRequestItem which can parse Arc request formate*/ class ArcRequestItem : public RequestItem{ public: ArcRequestItem(Arc::XMLNode& node, AttributeFactory* attrfactory); virtual ~ArcRequestItem(); public: virtual SubList getSubjects () const; virtual void setSubjects (const SubList& sl); virtual ResList getResources () const; virtual void setResources (const ResList& rl); virtual ActList getActions () const; virtual void setActions (const ActList& actions); virtual CtxList getContexts () const; virtual void setContexts (const CtxList& ctx); private: void removeSubjects (); void removeResources (); void removeActions (); void removeContexts (); }; } // namespace ArcSec #endif /* __ARC_SEC_ARCREQUESTITEM_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcEvaluationCtx.h0000644000000000000000000000012411213674373024146 xustar000000000000000027 mtime=1244625147.968885 27 atime=1513200575.096709 30 ctime=1513200661.127761279 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcEvaluationCtx.h0000644000175000002070000000373111213674373024217 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCEVALUATIONCTX_H__ #define __ARC_SEC_ARCEVALUATIONCTX_H__ #include #include #include #include #include #include #include namespace ArcSec { ///RequestTuple, container which includes the class ArcRequestTuple : public RequestTuple { public: RequestTuple* duplicate(const RequestTuple*); //virtual Arc::XMLNode& getNode() { return tuple; }; ArcRequestTuple(); virtual ~ArcRequestTuple(); virtual void erase(); }; ///EvaluationCtx, in charge of storing some context information for evaluation, including Request, current time, etc. class ArcEvaluationCtx : public EvaluationCtx { public: /**Construct a new EvaluationCtx based on the given request */ ArcEvaluationCtx (Request* request); virtual ~ArcEvaluationCtx(); virtual Request* getRequest() const; virtual void setRequestItem(RequestItem* reqit){reqitem = reqit;}; virtual RequestItem* getRequestItem() const {return reqitem;}; /**Convert/split one RequestItem ( one tuple ) into a few tuples. The purpose is for evaluation. The evaluator will evaluate each RequestTuple one by one, not the RequestItem because it includes some independent s and the evaluator should deal with them independently. */ virtual void split(); virtual std::list getRequestTuples() const { return reqtuples; }; virtual void setEvalTuple(RequestTuple* tuple){ evaltuple = tuple; }; virtual RequestTuple* getEvalTuple()const { return evaltuple; }; private: static Arc::Logger logger; Request* req; RequestItem* reqitem; std::list reqtuples; /**The RequestTuple for evaluation at present*/ RequestTuple* evaltuple; }; } // namespace ArcSec #endif /* __ARC_SEC_EVALUATIONCTX_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcRequest.cpp0000644000000000000000000000012412044527530023336 xustar000000000000000027 mtime=1351790424.395655 27 atime=1513200575.093709 30 ctime=1513200661.122761218 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcRequest.cpp0000644000175000002070000001123512044527530023405 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "ArcRequest.h" #include "ArcRequestItem.h" /** get_request (in charge of class-loading of ArcRequest) can only accept two types of argument: NULL, XMLNode*/ Arc::Plugin* ArcSec::ArcRequest::get_request(Arc::PluginArgument* arg) { //std::cout<<"Argument type of ArcRequest:"<GetXML(xml); std::cout<<"node inside ArcRequest:"<(arg):NULL; if(!clarg) return NULL; Arc::XMLNode* xarg = (Arc::XMLNode*)(*clarg); if(xarg==NULL) { return new ArcSec::ArcRequest(arg); } // ??? ArcSec::Source source(*xarg); return new ArcSec::ArcRequest(source,arg); } } //loader_descriptors __arc_request_modules__ = { // { "arc.request", 0, &ArcSec::ArcRequest::get_request }, // { NULL, 0, NULL } //}; using namespace Arc; using namespace ArcSec; ReqItemList ArcRequest::getRequestItems () const { return rlist; } void ArcRequest::setRequestItems (ReqItemList sl){ rlist = sl; } void ArcRequest::addRequestItem(Attrs& sub, Attrs& res, Attrs& act, Attrs& ctx){ //Add a new RequestItem into reqnode XMLNode request = reqnode; XMLNode requestitem = request.NewChild("ra:RequestItem"); XMLNode subject = requestitem.NewChild("ra:Subject"); int i; int size = sub.size(); if(size>1) { for(i = 0; i < size; i++ ) { XMLNode subjectattr = subject.NewChild("ra:Attribute"); subjectattr = sub[i].value; XMLNode subjectattr_attr = subjectattr.NewAttribute("ra:Type"); subjectattr_attr = sub[i].type; } } else{ XMLNode subject_attr = subject.NewAttribute("ra:Type"); subject = sub[0].value; subject_attr = sub[0].type; } XMLNode resource = requestitem.NewChild("ra:Resource"); size = res.size(); if(size>1) { for(i = 0; i< size; i++) { XMLNode resourceattr = resource.NewChild("ra:Attribute"); resourceattr = res[i].value; XMLNode resourceattr_attr = resourceattr.NewAttribute("ra:Type"); resourceattr_attr = res[i].type; } } else{ XMLNode resource_attr = resource.NewAttribute("ra:Type"); resource = res[0].value; resource_attr = res[0].type; } XMLNode action = requestitem.NewChild("ra:Action"); size = act.size(); if(size>1) { for(i = 0; i < size; i++) { XMLNode actionattr = action.NewChild("ra:Attribute"); actionattr = act[i].value; XMLNode actionattr_attr = actionattr.NewAttribute("ra:Type"); actionattr_attr = act[i].type; } } else{ XMLNode action_attr = action.NewAttribute("ra:Type"); action = act[0].value; action_attr = act[0].type; } XMLNode context = requestitem.NewChild("ra:Context"); size = ctx.size(); if(size>1) { for(i = 0; i < size; i++) { XMLNode contextattr = context.NewChild("ra:Attribute"); contextattr = ctx[i].value; XMLNode contextattr_attr = contextattr.NewAttribute("ra:Type"); contextattr_attr = ctx[i].type; } } else{ XMLNode context_attr = context.NewAttribute("ra:Type"); context = ctx[0].value; context_attr = ctx[0].type; } std::string xml; reqnode.GetDoc(xml); std::cout<("request","http://www.nordugrid.org/schemas/request-arc")); std::list::iterator itemit; std::list itemlist = reqnode.XPathLookup("//request:RequestItem", nsList); for ( itemit=itemlist.begin() ; itemit != itemlist.end(); itemit++ ){ XMLNode itemnd=*itemit; //Generate a new ArcRequestItem, which will generate RequestAttribute object rlist.push_back(new ArcRequestItem(itemnd, attrfactory)); } } const char* ArcRequest::getEvalName() const{ return "arc.evaluator"; } const char* ArcRequest::getName() const{ return "arc.request"; } ArcRequest::ArcRequest (const Source& req,Arc::PluginArgument* parg) : Request(req,parg), attrfactory(NULL) { req.Get().New(reqnode); NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; reqnode.Namespaces(ns); } ArcRequest::ArcRequest (Arc::PluginArgument* parg) : Request(parg), attrfactory(NULL) { NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; XMLNode request(ns,"ra:Request"); request.New(reqnode); } ArcRequest::~ArcRequest(){ while(!(rlist.empty())){ delete rlist.back(); rlist.pop_back(); } } nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/Policy.xsd0000644000000000000000000000012411231304743022527 xustar000000000000000027 mtime=1248168419.114661 27 atime=1513200575.091709 30 ctime=1513200661.136761389 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/Policy.xsd0000644000175000002070000003123111231304743022574 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcPDP.cpp0000644000000000000000000000012412110410653022320 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.089709 30 ctime=1513200661.119761181 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcPDP.cpp0000644000175000002070000002025312110410653022367 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "ArcPDP.h" Arc::Logger ArcSec::ArcPDP::logger(Arc::Logger::getRootLogger(), "ArcSec.ArcPDP"); /* static ArcSec::PDP* get_pdp(Arc::Config *cfg,Arc::ChainContext *ctx) { return new ArcSec::ArcPDP(cfg); } pdp_descriptors ARC_PDP_LOADER = { { "arc.pdp", 0, &get_pdp}, { NULL, 0, NULL } }; */ using namespace Arc; namespace ArcSec { Plugin* ArcPDP::get_arc_pdp(PluginArgument* arg) { ArcSec::PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new ArcPDP((Arc::Config*)(*pdparg),arg); } // This class is used to store Evaluator per connection class ArcPDPContext:public Arc::MessageContextElement { friend class ArcPDP; private: Evaluator* eval; public: ArcPDPContext(Evaluator* e); ArcPDPContext(void); virtual ~ArcPDPContext(void); }; ArcPDPContext::~ArcPDPContext(void) { if(eval) delete eval; } ArcPDPContext::ArcPDPContext(Evaluator* e):eval(e) { } ArcPDPContext::ArcPDPContext(void):eval(NULL) { std::string evaluator = "arc.evaluator"; EvaluatorLoader eval_loader; eval = eval_loader.getEvaluator(evaluator); } ArcPDP::ArcPDP(Config* cfg,Arc::PluginArgument* parg):PDP(cfg,parg) /*, eval(NULL)*/ { XMLNode pdp_node(*cfg); XMLNode filter = (*cfg)["Filter"]; if((bool)filter) { XMLNode select_attr = filter["Select"]; XMLNode reject_attr = filter["Reject"]; for(;(bool)select_attr;++select_attr) select_attrs.push_back((std::string)select_attr); for(;(bool)reject_attr;++reject_attr) reject_attrs.push_back((std::string)reject_attr); }; XMLNode policy_store = (*cfg)["PolicyStore"]; for(;(bool)policy_store;++policy_store) { XMLNode policy_location = policy_store["Location"]; policy_locations.push_back((std::string)policy_location); }; XMLNode policy = (*cfg)["Policy"]; for(;(bool)policy;++policy) policies.AddNew(policy); policy_combining_alg = (std::string)((*cfg)["PolicyCombiningAlg"]); } PDPStatus ArcPDP::isPermitted(Message *msg) const { //Compose Request based on the information inside message, the Request will be like below: /* 123.45.67.89 /O=NorduGrid/OU=UIO/CN=test GET */ Evaluator* eval = NULL; std::string ctxid = "arcsec.arcpdp"; try { Arc::MessageContextElement* mctx = (*(msg->Context()))[ctxid]; if(mctx) { ArcPDPContext* pdpctx = dynamic_cast(mctx); if(pdpctx) { eval=pdpctx->eval; } else { logger.msg(INFO, "Can not find ArcPDPContext"); } }; } catch(std::exception& e) { }; if(!eval) { ArcPDPContext* pdpctx = new ArcPDPContext(); if(pdpctx) { eval=pdpctx->eval; if(eval) { //for(Arc::AttributeIterator it = (msg->Attributes())->getAll("PDP:POLICYLOCATION"); it.hasMore(); it++) { // eval->addPolicy(SourceFile(*it)); //} for(std::list::const_iterator it = policy_locations.begin(); it!= policy_locations.end(); it++) { eval->addPolicy(SourceFile(*it)); } for(int n = 0;naddPolicy(Source(const_cast(policies)[n])); } if(!policy_combining_alg.empty()) { if(policy_combining_alg == "EvaluatorFailsOnDeny") { eval->setCombiningAlg(EvaluatorFailsOnDeny); } else if(policy_combining_alg == "EvaluatorStopsOnDeny") { eval->setCombiningAlg(EvaluatorStopsOnDeny); } else if(policy_combining_alg == "EvaluatorStopsOnPermit") { eval->setCombiningAlg(EvaluatorStopsOnPermit); } else if(policy_combining_alg == "EvaluatorStopsNever") { eval->setCombiningAlg(EvaluatorStopsNever); } else { AlgFactory* factory = eval->getAlgFactory(); if(!factory) { logger.msg(WARNING, "Evaluator does not support loadable Combining Algorithms"); } else { CombiningAlg* algorithm = factory->createAlg(policy_combining_alg); if(!algorithm) { logger.msg(ERROR, "Evaluator does not support specified Combining Algorithm - %s",policy_combining_alg); } else { eval->setCombiningAlg(algorithm); }; }; }; }; msg->Context()->Add(ctxid, pdpctx); } else { delete pdpctx; } } if(!eval) logger.msg(ERROR, "Can not dynamically produce Evaluator"); } if(!eval) { logger.msg(ERROR,"Evaluator for ArcPDP was not loaded"); return false; }; MessageAuth* mauth = msg->Auth()->Filter(select_attrs,reject_attrs); MessageAuth* cauth = msg->AuthContext()->Filter(select_attrs,reject_attrs); if((!mauth) && (!cauth)) { logger.msg(ERROR,"Missing security object in message"); return false; }; NS ns; XMLNode requestxml(ns,""); if(mauth) { if(!mauth->Export(SecAttr::ARCAuth,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to ARC request"); return false; }; delete mauth; }; if(cauth) { if(!cauth->Export(SecAttr::ARCAuth,requestxml)) { delete mauth; logger.msg(ERROR,"Failed to convert security information to ARC request"); return false; }; delete cauth; }; { std::string s; requestxml.GetXML(s); logger.msg(DEBUG,"ARC Auth. request: %s",s); }; if(requestxml.Size() <= 0) { logger.msg(ERROR,"No requested security information was collected"); return false; }; //Call the evaluation functionality inside Evaluator Response *resp = eval->evaluate(requestxml); if(!resp) { logger.msg(ERROR, "Not authorized from arc.pdp - failed to get reponse from Evaluator"); return false; }; ResponseList rlist = resp->getResponseItems(); int size = rlist.size(); // The current ArcPDP is supposed to be used as policy decision point for Arc1 HED components, and // those services which are based on HED. // Each message/session comes with one unique (with a number of s), // and different and elements (possibly plus ). // The results from all tuples are combined using following decision algorithm: // 1. If any of tuples made of , , and gets "DENY" // then final result is negative (false). // 2. Otherwise if any of tuples gets "PERMIT" then final result is positive (true). // 3. Otherwise result is negative (false). bool atleast_onedeny = false; bool atleast_onepermit = false; for(int i = 0; i < size; i++) { ResponseItem* item = rlist[i]; RequestTuple* tp = item->reqtp; if(item->res == DECISION_DENY) atleast_onedeny = true; if(item->res == DECISION_PERMIT) atleast_onepermit = true; Subject::iterator it; Subject subject = tp->sub; for (it = subject.begin(); it!= subject.end(); it++){ AttributeValue *attrval; RequestAttribute *attr; attr = dynamic_cast(*it); if(attr){ attrval = (*it)->getAttributeValue(); if(attrval) logger.msg(DEBUG, "%s", attrval->encode()); } } } bool result = false; if(atleast_onedeny) result = false; else if(atleast_onepermit) result = true; else result = false; if(result) logger.msg(VERBOSE, "Authorized by arc.pdp"); else logger.msg(INFO, "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy Policy"); if(resp) delete resp; return result; } ArcPDP::~ArcPDP(){ //if(eval) // delete eval; //eval = NULL; } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcFnFactory.cpp0000644000000000000000000000012411730411253023574 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.094709 30 ctime=1513200661.125761254 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcFnFactory.cpp0000644000175000002070000001065211730411253023645 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "ArcFnFactory.h" #include #include #include #include #include #include #include /* loader_descriptors __arc_fnfactory_modules__ = { { "fn.factory", 0, &get_fn_factory }, { NULL, 0, NULL } }; */ using namespace Arc; namespace ArcSec { Arc::Plugin* get_arcpdp_fn_factory (Arc::PluginArgument* arg) { return new ArcSec::ArcFnFactory(arg); } void ArcFnFactory::initFunctions(){ /**Some Arc specified function types*/ //fnmap.insert(pair(StringFunction.identify, new StringFunction)); //fnmap.insert(pair(DateMathFunction.identify, new DateMathFunction)); /** TODO: other function type............. */ //EqualFunctions std::string fnName = EqualFunction::getFunctionName(StringAttribute::getIdentifier()); std::string argType = StringAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(DateTimeAttribute::getIdentifier()); argType = DateTimeAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(DateAttribute::getIdentifier()); argType = DateAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(TimeAttribute::getIdentifier()); argType = TimeAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(DurationAttribute::getIdentifier()); argType = DurationAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(PeriodAttribute::getIdentifier()); argType = PeriodAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(X500NameAttribute::getIdentifier()); argType = X500NameAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); fnName = EqualFunction::getFunctionName(AnyURIAttribute::getIdentifier()); argType = AnyURIAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new EqualFunction(fnName, argType))); //MatchFunctions fnName = MatchFunction::getFunctionName(StringAttribute::getIdentifier()); argType = StringAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new MatchFunction(fnName, argType))); fnName = MatchFunction::getFunctionName(X500NameAttribute::getIdentifier()); argType = X500NameAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new MatchFunction(fnName, argType))); fnName = MatchFunction::getFunctionName(AnyURIAttribute::getIdentifier()); argType = AnyURIAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new MatchFunction(fnName, argType))); //InRangeFunctions fnName = InRangeFunction::getFunctionName(StringAttribute::getIdentifier()); argType = StringAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new InRangeFunction(fnName, argType))); fnName = InRangeFunction::getFunctionName(PeriodAttribute::getIdentifier()); argType = PeriodAttribute::getIdentifier(); fnmap.insert(std::pair(fnName, new InRangeFunction(fnName, argType))); } ArcFnFactory::ArcFnFactory(Arc::PluginArgument* parg): FnFactory(parg) { initFunctions(); } Function* ArcFnFactory::createFn(const std::string& type){ FnMap::iterator it; if((it=fnmap.find(type)) != fnmap.end()) return (*it).second; else return NULL; } ArcFnFactory::~ArcFnFactory(){ FnMap::iterator it; for(it = fnmap.begin(); it != fnmap.end(); it = fnmap.begin()){ Function* fn = (*it).second; fnmap.erase(it); if(fn) delete fn; } } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcRequest.h0000644000000000000000000000012411730411253022776 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.097709 30 ctime=1513200661.128761291 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcRequest.h0000644000175000002070000000330411730411253023043 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCREQUEST_H__ #define __ARC_SEC_ARCREQUEST_H__ #include #include #include #include #include #include "ArcEvaluator.h" ///ArcRequest, Parsing the specified Arc request format namespace ArcSec { class ArcRequest : public Request { friend class ArcEvaluator; public: //**Get all the RequestItem inside RequestItem container */ virtual ReqItemList getRequestItems () const; //**Set the content of the container*/ virtual void setRequestItems (ReqItemList sl); //**Add request tuple from non-XMLNode*/ virtual void addRequestItem(Attrs& sub, Attrs& res, Attrs& act, Attrs& ctx); //**Set the attribute factory for the usage of Request*/ virtual void setAttributeFactory(AttributeFactory* attributefactory) { attrfactory = attributefactory; }; //**Default constructor*/ ArcRequest (Arc::PluginArgument* parg); //**Parse request information from external source*/ ArcRequest (const Source& source,Arc::PluginArgument* parg); virtual ~ArcRequest(); //**Create the objects included in Request according to the node attached to the Request object*/ virtual void make_request(); virtual const char* getEvalName() const; virtual const char* getName() const; virtual Arc::XMLNode& getReqNode() { return reqnode; }; static Arc::Plugin* get_request(Arc::PluginArgument* arg); private: //**AttributeFactory which is in charge of producing Attribute*/ AttributeFactory * attrfactory; //**A XMLNode structure which includes the xml structure of a request*/ Arc::XMLNode reqnode; }; } // namespace ArcSec #endif /* __ARC_SEC_ARCREQUEST_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcAttributeFactory.h0000644000000000000000000000012411730411253024641 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.099709 30 ctime=1513200661.130761315 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcAttributeFactory.h0000644000175000002070000000154111730411253024707 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCATTRIBUTEFACTORY_H__ #define __ARC_SEC_ARCATTRIBUTEFACTORY_H__ #include #include #include #include #include namespace ArcSec { /// Attribute factory class for Arc specified attributes class ArcAttributeFactory : public AttributeFactory { public: ArcAttributeFactory(Arc::PluginArgument* parg); virtual ~ArcAttributeFactory(); public: /**creat a AttributeValue according to the value in the XML node and the type; It should be the caller to release the AttributeValue Object*/ virtual AttributeValue* createValue(const Arc::XMLNode& node, const std::string& type); private: void initDatatypes(); }; Arc::Plugin* get_arcpdp_attr_factory (Arc::PluginArgument*); } // namespace ArcSec #endif /* __ARC_SEC_ARCATTRIBUTEFACTORY_H__ */ nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/schema0000644000000000000000000000013013214316025021727 xustar000000000000000029 mtime=1513200661.15976167 30 atime=1513200668.723854182 29 ctime=1513200661.15976167 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/schema/0000755000175000002070000000000013214316025022054 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/arcpdp/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321024045 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200600.074014568 30 ctime=1513200661.157761646 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/schema/Makefile.am0000644000175000002070000000013711255700321024110 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = ArcPDP.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/arcpdp/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315730024054 xustar000000000000000030 mtime=1513200600.106014959 29 atime=1513200649.22961576 30 ctime=1513200661.158761658 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/schema/Makefile.in0000644000175000002070000004351413214315730024132 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/arcpdp/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = ArcPDP.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/arcpdp/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/arcpdp/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/arcpdp/schema/PaxHeaders.7502/ArcPDP.xsd0000644000000000000000000000012311667515041023610 xustar000000000000000027 mtime=1323211297.642332 27 atime=1513200575.094709 29 ctime=1513200661.15976167 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/schema/ArcPDP.xsd0000644000175000002070000000542611667515041023665 0ustar00mockbuildmock00000000000000 This element defines Security Attributes to select and reject. If there are no Select elements all Attributes are used except those listed in Reject elements. This element specifies file containing policy document. There can be multiple such elements. Content of Type element is not defined yet. This element contains policy to be processed. There may be multiple such elements. Combining Algorithm for Policies. For supported names please check documentation. In addition to those there are also few legacy algorithms provided: EvaluatorFailsOnDeny, EvaluatorStopsOnDeny, EvaluatorStopsOnPermit, EvaluatorStopsNever. Unfortunately they behavior is not well defined. nordugrid-arc-5.4.2/src/hed/shc/arcpdp/PaxHeaders.7502/ArcEvaluator.h0000644000000000000000000000012411730411253023310 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.102709 30 ctime=1513200661.127761279 nordugrid-arc-5.4.2/src/hed/shc/arcpdp/ArcEvaluator.h0000644000175000002070000000476111730411253023365 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_ARCEVALUATOR_H__ #define __ARC_SEC_ARCEVALUATOR_H__ #include #include #include #include #include #include #include #include #include #include #include namespace ArcSec { ///Execute the policy evaluation, based on the request and policy class ArcEvaluator : public Evaluator { friend class EvaluatorContext; private: static Arc::Logger logger; PolicyStore *plstore; FnFactory* fnfactory; AttributeFactory* attrfactory; AlgFactory* algfactory; EvaluatorContext* context; Arc::XMLNode* m_cfg; std::string request_classname; EvaluatorCombiningAlg combining_alg; CombiningAlg* combining_alg_ex; public: ArcEvaluator (Arc::XMLNode* cfg, Arc::PluginArgument* parg); ArcEvaluator (const char * cfgfile, Arc::PluginArgument* parg); virtual ~ArcEvaluator(); /**Evaluate the request based on the policy information inside PolicyStore*/ virtual Response* evaluate(Request* request); virtual Response* evaluate(const Source& request); virtual Response* evaluate(Request* request, const Source& policy); virtual Response* evaluate(const Source& request, const Source& policy); virtual Response* evaluate(Request* request, Policy* policyobj); virtual Response* evaluate(const Source& request, Policy* policyobj); virtual AttributeFactory* getAttrFactory () { return attrfactory;}; virtual FnFactory* getFnFactory () { return fnfactory; }; virtual AlgFactory* getAlgFactory () { return algfactory; }; virtual void addPolicy(const Source& policy,const std::string& id = "") { plstore->addPolicy(policy, context, id); }; virtual void addPolicy(Policy* policy,const std::string& id = "") { plstore->addPolicy(policy, context, id); }; virtual void removePolicies(void) { plstore->removePolicies(); }; virtual void setCombiningAlg(EvaluatorCombiningAlg alg); virtual void setCombiningAlg(CombiningAlg* alg); virtual const char* getName(void) const; static Arc::Plugin* get_evaluator(Arc::PluginArgument* arg); protected: virtual Response* evaluate(EvaluationCtx* ctx); private: virtual void parsecfg(Arc::XMLNode& cfg); virtual Request* make_reqobj(Arc::XMLNode& reqnode); }; } // namespace ArcSec #endif /* __ARC_SEC_ARCEVALUATOR_H__ */ nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/saml2sso_assertionconsumersh0000644000000000000000000000013213214316025025161 xustar000000000000000030 mtime=1513200661.718768507 30 atime=1513200668.723854182 30 ctime=1513200661.718768507 nordugrid-arc-5.4.2/src/hed/shc/saml2sso_assertionconsumersh/0000755000175000002070000000000013214316025025304 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/saml2sso_assertionconsumersh/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515027302 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.718022444 30 ctime=1513200661.714768458 nordugrid-arc-5.4.2/src/hed/shc/saml2sso_assertionconsumersh/Makefile.am0000644000175000002070000000226112052416515027345 0ustar00mockbuildmock00000000000000#pkglib_LTLIBRARIES = libsaml2sp.la noinst_LTLIBRARIES = libsaml2ssoassertionconsumersh.la #libsaml2sp_la_SOURCES = SPService.cpp SPService.h #libsaml2sp_la_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) \ # $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) #libsaml2sp_la_LIBADD = \ # $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) $(LIBXML2_LIBS) #libsaml2sp_la_LDFLAGS = -no-undefined -avoid-version -module libsaml2ssoassertionconsumersh_la_SOURCES = \ SAML2SSO_AssertionConsumerSH.cpp SAML2SSO_AssertionConsumerSH.h libsaml2ssoassertionconsumersh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) \ $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) libsaml2ssoassertionconsumersh_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) nordugrid-arc-5.4.2/src/hed/shc/saml2sso_assertionconsumersh/PaxHeaders.7502/Makefile.in0000644000000000000000000000013013214315730027303 xustar000000000000000029 mtime=1513200600.76102297 30 atime=1513200649.535619502 29 ctime=1513200661.71576847 nordugrid-arc-5.4.2/src/hed/shc/saml2sso_assertionconsumersh/Makefile.in0000644000175000002070000006040713214315730027362 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/saml2sso_assertionconsumersh DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libsaml2ssoassertionconsumersh_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libsaml2ssoassertionconsumersh_la_OBJECTS = libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.lo libsaml2ssoassertionconsumersh_la_OBJECTS = \ $(am_libsaml2ssoassertionconsumersh_la_OBJECTS) libsaml2ssoassertionconsumersh_la_LINK = $(LIBTOOL) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libsaml2ssoassertionconsumersh_la_CXXFLAGS) $(CXXFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libsaml2ssoassertionconsumersh_la_SOURCES) DIST_SOURCES = $(libsaml2ssoassertionconsumersh_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ #pkglib_LTLIBRARIES = libsaml2sp.la noinst_LTLIBRARIES = libsaml2ssoassertionconsumersh.la #libsaml2sp_la_SOURCES = SPService.cpp SPService.h #libsaml2sp_la_CXXFLAGS = -I$(top_srcdir)/include \ # $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) \ # $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) #libsaml2sp_la_LIBADD = \ # $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/hed/libs/common/libarccommon.la \ # $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) $(LIBXML2_LIBS) #libsaml2sp_la_LDFLAGS = -no-undefined -avoid-version -module libsaml2ssoassertionconsumersh_la_SOURCES = \ SAML2SSO_AssertionConsumerSH.cpp SAML2SSO_AssertionConsumerSH.h libsaml2ssoassertionconsumersh_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) \ $(XMLSEC_OPENSSL_CFLAGS) $(XMLSEC_CFLAGS) $(AM_CXXFLAGS) libsaml2ssoassertionconsumersh_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la \ $(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(XMLSEC_OPENSSL_LIBS) $(XMLSEC_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/saml2sso_assertionconsumersh/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/saml2sso_assertionconsumersh/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libsaml2ssoassertionconsumersh.la: $(libsaml2ssoassertionconsumersh_la_OBJECTS) $(libsaml2ssoassertionconsumersh_la_DEPENDENCIES) $(libsaml2ssoassertionconsumersh_la_LINK) $(libsaml2ssoassertionconsumersh_la_OBJECTS) $(libsaml2ssoassertionconsumersh_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.lo: SAML2SSO_AssertionConsumerSH.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsaml2ssoassertionconsumersh_la_CXXFLAGS) $(CXXFLAGS) -MT libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.lo -MD -MP -MF $(DEPDIR)/libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.Tpo -c -o libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.lo `test -f 'SAML2SSO_AssertionConsumerSH.cpp' || echo '$(srcdir)/'`SAML2SSO_AssertionConsumerSH.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.Tpo $(DEPDIR)/libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SAML2SSO_AssertionConsumerSH.cpp' object='libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libsaml2ssoassertionconsumersh_la_CXXFLAGS) $(CXXFLAGS) -c -o libsaml2ssoassertionconsumersh_la-SAML2SSO_AssertionConsumerSH.lo `test -f 'SAML2SSO_AssertionConsumerSH.cpp' || echo '$(srcdir)/'`SAML2SSO_AssertionConsumerSH.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/saml2sso_assertionconsumersh/PaxHeaders.7502/SAML2SSO_AssertionConsu0000644000000000000000000000012412110401544031417 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200574.999707 30 ctime=1513200661.717768495 nordugrid-arc-5.4.2/src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp0000644000175000002070000000634112110401544033210 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "SAML2SSO_AssertionConsumerSH.h" namespace ArcSec { using namespace Arc; static Logger logger(Logger::rootLogger, "SAMLSSO_AssertionConsumerSH"); Plugin* SAML2SSO_AssertionConsumerSH::get_sechandler(PluginArgument* arg) { SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; SAML2SSO_AssertionConsumerSH* plugin = new SAML2SSO_AssertionConsumerSH((Config*)(*shcarg),(ChainContext*)(*shcarg),arg); if(!plugin) return NULL; if(!(*plugin)) { delete plugin; plugin = NULL; }; return plugin; } /* sechandler_descriptors ARC_SECHANDLER_LOADER = { { "saml2ssoassertionconsumer.handler", 0, &get_sechandler}, { NULL, 0, NULL } }; */ SAML2SSO_AssertionConsumerSH::SAML2SSO_AssertionConsumerSH(Config *cfg,ChainContext*,Arc::PluginArgument* parg):SecHandler(cfg,parg), SP_service_loader(NULL), valid_(false) { if(!init_xmlsec()) return; valid_ = true; } SAML2SSO_AssertionConsumerSH::~SAML2SSO_AssertionConsumerSH() { final_xmlsec(); if(SP_service_loader) delete SP_service_loader; } SecHandlerStatus SAML2SSO_AssertionConsumerSH::Handle(Arc::Message* msg) const { //Explaination: The SPService checks the authentication result (generated by IdP and sent by //user agent) and records those successful authentication result into "SAMLAssertion". Since //the real client (client to normal service) and user agent (client to SP service) share the //same tcp/tls session (this is why only one copy of base class ClientTCP is needed in the //ClientSAMLInterface), we can use the authentication result from user-agent/SPService for //the decision-making in normal Client/Service interaction. // //Here the message which includes the endpoint "/saml2sp" is avoided, because //this specific endpoint is supposed to participate the SAML2 SSO prifile, check //the authentication result from IdP and record the authentication information //for later saml2ssp_serviceprovider handler, and itself should not be enforced //the saml2sso_serviceprovider handler. std::string http_endpoint = msg->Attributes()->get("HTTP:ENDPOINT"); std::size_t pos = http_endpoint.find("saml2sp"); if(pos == std::string::npos) { SecAttr* sattr = msg->Auth()->get("SAMLAssertion"); if(!sattr) { logger.msg(ERROR, "Can not get SAMLAssertion SecAttr from message context"); return false; } std::string str; XMLNode saml_assertion_nd; if(!(sattr->Export(SecAttr::SAML, saml_assertion_nd))) return false; saml_assertion_nd.GetXML(str); std::cout<<"SAML Assertion parsed by SP service: "< #include #include #include #include namespace ArcSec { /// Implement the funcionality of the Service Provider in SAML2 SSO profile //1.Launch a service (called SP Service) which will compose AuthnRequest according //to the IdP information sent from client side/user agent. So the SAML2SSO_ServiceProviderSH //handler and SP Service together composes the functionality if Service Provider in //SAML2 SSO profile //2.Consume the saml assertion from client side/user agent (Push model): //a. assertion inside soap message as WS-Security SAML token; //b. assertion inside x509 certificate as exention. we need to parse the peer //x509 certificate from transport level and take out the saml assertion. //Or contact the IdP and get back the saml assertion related to the client(Pull model) class SAML2SSO_AssertionConsumerSH : public SecHandler { private: std::string cert_file_; std::string key_file_; std::string ca_file_; std::string ca_dir_; Arc::MCCLoader* SP_service_loader; bool valid_; public: SAML2SSO_AssertionConsumerSH(Arc::Config *cfg, Arc::ChainContext* ctx, Arc::PluginArgument* parg); virtual ~SAML2SSO_AssertionConsumerSH(void); static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg); virtual SecHandlerStatus Handle(Arc::Message* msg) const; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; }; } // namespace ArcSec #endif /* __ARC_SEC_SAML2SSO_ASSERTIONCONSUMERSH_H__ */ nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/testinterface_arc.cpp0000644000000000000000000000012411570717263023503 xustar000000000000000027 mtime=1306762931.686321 27 atime=1513200575.070708 30 ctime=1513200661.021759982 nordugrid-arc-5.4.2/src/hed/shc/testinterface_arc.cpp0000644000175000002070000001333711570717263023557 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include //#include #include #include int main(void){ signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); signal(SIGPIPE,SIG_IGN); Arc::Logger logger(Arc::Logger::rootLogger, "PDPTest"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); logger.msg(Arc::INFO, "Start test"); ArcSec::EvaluatorLoader eval_loader; //TEST: ArcEvaluator, ArcPolicy, ArcRequest //Load the Evaluator ArcSec::Evaluator* eval = NULL; std::string evaluator = "arc.evaluator"; eval = eval_loader.getEvaluator(evaluator); if(eval == NULL) { logger.msg(Arc::ERROR, "Can not dynamically produce Evaluator"); return 0; } Arc::XMLNode policynode("\ \ \ \ /O=NorduGrid/OU=UIO/CN=test\ /vo.knowarc/usergroupA\ \ /O=Grid/OU=KnowARC/CN=XYZ\ urn:mace:shibboleth:examples\ \ \ \ file://home/test\ \ \ read\ stat\ list\ \ \ 2007-09-10T20:30:20/P1Y1M\ \ \ "); std::string policy_str; policynode.GetXML(policy_str); ArcSec::Policy* policy = NULL; std::string policyclassname = "arc.policy"; /**Three options to create policy object*/ ArcSec::SourceFile policy_source("Policy_Example.xml"); //ArcSec::Source policy_source(policy_str); //ArcSec::Source policy_source(policynode); policy = eval_loader.getPolicy(policyclassname, policy_source); if(policy == NULL) logger.msg(Arc::ERROR, "Can not dynamically produce Policy"); Arc::XMLNode reqnode("\ \ \ \ /O=NorduGrid/OU=UIO/CN=test\ /O=NorduGrid/OU=UIO/CN=admin\ \ file://home/test\ \ read\ \ 2007-09-10T20:30:20/P1Y1M\ \ "); std::string request_str; reqnode.GetXML(request_str); ArcSec::Request* request = NULL; std::string requestclassname = "arc.request"; /**Three options to create request object*/ //ArcSec::Source request_source(reqnode); ArcSec::Source request_source(request_str); //ArcSec::SourceFile request_source("Request.xml"); request = eval_loader.getRequest(requestclassname, request_source); if(request == NULL) logger.msg(Arc::ERROR, "Can not dynamically produce Request"); /**Two options to add policy into evaluator*/ eval->addPolicy(policy_source); //eval->addPolicy(policy); ArcSec::Response *resp = NULL; /**Feed evaluator with request to execute evaluation*/ resp = eval->evaluate(request_source); /**Evaluate request againt policy. Both request and policy are as arguments of evaluator. Pre-stored policy *inside evaluator will be deleted and not affect the evaluation. *The request argument can be two options: object, Source; *The policy argument can also be the above two options */ //resp = eval->evaluate(request_source, policy); //resp = eval->evaluate(request, policy_source); //resp = eval->evaluate(request_source, policy_source); //resp = eval->evaluate(request, policy); /**Get the response*/ logger.msg(Arc::INFO, "There is %d subjects, which satisfy at least one policy", (resp->getResponseItems()).size()); ArcSec::ResponseList rlist = resp->getResponseItems(); int size = rlist.size(); for(int i = 0; i< size; i++){ ArcSec::ResponseItem* respitem = rlist[i]; ArcSec::RequestTuple* tp = respitem->reqtp; ArcSec::Subject::iterator it; ArcSec::Subject subject = tp->sub; for (it = subject.begin(); it!= subject.end(); it++){ ArcSec::AttributeValue *attrval; ArcSec::RequestAttribute *attr; attr = dynamic_cast(*it); if(attr){ attrval = (*it)->getAttributeValue(); if(attrval) logger.msg(Arc::INFO,"Attribute Value inside Subject: %s", attrval->encode()); } } /**Return "yes" or "no"*/ //Scan each (since the original has been splitted, //here there is only one , , , under ), //then scan each under . Since we only return the //which has satisfied the policy, and is a must element for , if //there is exists, we can say the satisfies the policy. if(subject.size()>0) logger.msg(Arc::INFO, "The request has passed the policy evaluation"); } if(resp){ delete resp; resp = NULL; } delete eval; delete request; return 0; } nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/README0000644000000000000000000000012411121445203020152 xustar000000000000000027 mtime=1229343363.681734 27 atime=1513200575.027708 30 ctime=1513200661.018759946 nordugrid-arc-5.4.2/src/hed/shc/README0000644000175000002070000000321111121445203020214 0ustar00mockbuildmock00000000000000pdc = Policy Decision Component Some description about the ArcPDP directory: test.cpp: test file which call the existing ArcPDP api. It can be merged to any PDP (policy decision point). classload_test.cpp: test file which is just to test the functionalith of the class ClassLoad. ArcRequest.cpp, ArcRequestItem: parse the specific request.xml which specified by request.xsd schema. Evaluator.cpp: central class which accept request.xml and policy.xml, and make decision. EvaluationCtx.cpp: context class which includes some information related with each request.xml. attr directory: includes all the XYZAttribute classes which can understand different types of datatype, and AttributeFactory which will produce the XYZAttributes.So far, the XYZAttributes are statically related with XXAttributeFactory, which means if you want to support a new type of Attribute, you need to modify the XXAttributeFactory, or write a new AttributeFactory. Solution is make the attribute-support dynamically parsed from configuration file, but so far, it has not been implemented. The XXAttributeFactory can be dynamically loaded by parsing the configuration file. fn directory: includes all the function which operate on the attributes, and FnFactory which will produce the Functions. The XXFnAttributeFactory can be dynamically loaded. alg directory: includes all the algorithms which can define the composition relationship between the rules or policies. policy directory: include all the classes which can parse rule or policy, and evaluate each request against policy or rule according to the datatyoe and function definition in the policy or rule. nordugrid-arc-5.4.2/src/hed/shc/PaxHeaders.7502/delegationpdp0000644000000000000000000000013213214316025022037 xustar000000000000000030 mtime=1513200661.361764141 30 atime=1513200668.723854182 30 ctime=1513200661.361764141 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/0000755000175000002070000000000013214316025022162 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612052416515024157 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200600.121015142 29 ctime=1513200661.35676408 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/Makefile.am0000644000175000002070000000056512052416515024230 0ustar00mockbuildmock00000000000000SUBDIRS = schema noinst_LTLIBRARIES = libdelegationpdp.la libdelegationpdp_la_SOURCES = DelegationPDP.cpp DelegationPDP.h libdelegationpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdelegationpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315730024163 xustar000000000000000030 mtime=1513200600.166015692 30 atime=1513200649.276616335 30 ctime=1513200661.357764092 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/Makefile.in0000644000175000002070000006772613214315730024253 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/delegationpdp DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libdelegationpdp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libdelegationpdp_la_OBJECTS = libdelegationpdp_la-DelegationPDP.lo libdelegationpdp_la_OBJECTS = $(am_libdelegationpdp_la_OBJECTS) libdelegationpdp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdelegationpdp_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libdelegationpdp_la_SOURCES) DIST_SOURCES = $(libdelegationpdp_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema noinst_LTLIBRARIES = libdelegationpdp.la libdelegationpdp_la_SOURCES = DelegationPDP.cpp DelegationPDP.h libdelegationpdp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdelegationpdp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/delegationpdp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/delegationpdp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libdelegationpdp.la: $(libdelegationpdp_la_OBJECTS) $(libdelegationpdp_la_DEPENDENCIES) $(libdelegationpdp_la_LINK) $(libdelegationpdp_la_OBJECTS) $(libdelegationpdp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegationpdp_la-DelegationPDP.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libdelegationpdp_la-DelegationPDP.lo: DelegationPDP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegationpdp_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegationpdp_la-DelegationPDP.lo -MD -MP -MF $(DEPDIR)/libdelegationpdp_la-DelegationPDP.Tpo -c -o libdelegationpdp_la-DelegationPDP.lo `test -f 'DelegationPDP.cpp' || echo '$(srcdir)/'`DelegationPDP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libdelegationpdp_la-DelegationPDP.Tpo $(DEPDIR)/libdelegationpdp_la-DelegationPDP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationPDP.cpp' object='libdelegationpdp_la-DelegationPDP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegationpdp_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegationpdp_la-DelegationPDP.lo `test -f 'DelegationPDP.cpp' || echo '$(srcdir)/'`DelegationPDP.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/PaxHeaders.7502/DelegationPDP.h0000644000000000000000000000012412110410653024701 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.017708 30 ctime=1513200661.359764116 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/DelegationPDP.h0000644000175000002070000000143112110410653024745 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_DELEGATIONPDP_H__ #define __ARC_SEC_DELEGATIONPDP_H__ #include #include #include #include namespace ArcSec { ///DeleagtionPDP - PDP which can handle the Arc specific request and policy /// provided as identity delegation policy. class DelegationPDP : public PDP { public: static Arc::Plugin* get_delegation_pdp(Arc::PluginArgument *arg); DelegationPDP(Arc::Config* cfg, Arc::PluginArgument* parg); virtual ~DelegationPDP(); virtual PDPStatus isPermitted(Arc::Message *msg) const; private: std::list select_attrs; std::list reject_attrs; protected: static Arc::Logger logger; }; } // namespace ArcSec #endif /* __ARC_SEC_DELEGATIONPDP_H__ */ nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/PaxHeaders.7502/DelegationPDP.cpp0000644000000000000000000000012412110410653025234 xustar000000000000000027 mtime=1361187243.181034 27 atime=1513200575.014708 30 ctime=1513200661.358764104 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/DelegationPDP.cpp0000644000175000002070000001464712110410653025315 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "DelegationPDP.h" // NOTE: using factories/attrbites provided ArcPDP // This PDP is mostly same as ArcPDP. The difference // is that it takes both request and policies from SecAttr. // Also currently for flexibility it performs every evaluation // per request. Later it should become clever and distinguish // if delegated policy comes per request or per session. Arc::Logger ArcSec::DelegationPDP::logger(Arc::Logger::getRootLogger(), "ArcSec.DelegationPDP"); using namespace Arc; namespace ArcSec { Plugin* DelegationPDP::get_delegation_pdp(PluginArgument* arg) { PDPPluginArgument* pdparg = arg?dynamic_cast(arg):NULL; if(!pdparg) return NULL; return new DelegationPDP((Config*)(*pdparg),arg); } DelegationPDP::DelegationPDP(Config* cfg, Arc::PluginArgument* parg):PDP(cfg,parg) { XMLNode pdp_node(*cfg); XMLNode filter = (*cfg)["Filter"]; if((bool)filter) { XMLNode select_attr = filter["Select"]; XMLNode reject_attr = filter["Reject"]; for(;(bool)select_attr;++select_attr) select_attrs.push_back((std::string)select_attr); for(;(bool)reject_attr;++reject_attr) reject_attrs.push_back((std::string)reject_attr); }; } DelegationPDP::~DelegationPDP(){ } PDPStatus DelegationPDP::isPermitted(Message *msg) const { MessageAuth* mauth = msg->Auth()->Filter(select_attrs,reject_attrs); MessageAuth* cauth = msg->AuthContext()->Filter(select_attrs,reject_attrs); if((!mauth) && (!cauth)) { logger.msg(ERROR,"Missing security object in message"); return false; }; // Extract policies // TODO: Probably make MessageAuth do it or there should be some other way // to avoid multiple extraction of same object. // Currently delegated policies are simply stored under special name "DELEGATION POLICY" // To have multiple policies in same object MultiSecAttr class may be used. Then // Policies are catenated under top-level element "Policies". // Otherwise in case of single policy (current implementation) top-level element is "Policy". bool result = false; Evaluator* eval = NULL; try { SecAttr* mpolicy_attr = mauth?(*mauth)["DELEGATION POLICY"]:NULL; SecAttr* cpolicy_attr = cauth?(*cauth)["DELEGATION POLICY"]:NULL; if((cpolicy_attr == NULL) && (mpolicy_attr == NULL)) { logger.msg(INFO,"No delegation policies in this context and message - passing through"); result=true; throw std::exception(); }; // Create evaluator std::string evaluator = "arc.evaluator"; EvaluatorLoader eval_loader; eval = eval_loader.getEvaluator(evaluator); if(!eval) { logger.msg(ERROR, "Can not dynamically produce Evaluator"); throw std::exception(); }; // Just make sure algorithm is proper one eval->setCombiningAlg(EvaluatorFailsOnDeny); // Add policies to evaluator int policies_num = 0; if(mpolicy_attr) { NS ns; XMLNode policyxml(ns,""); if(!mpolicy_attr->Export(SecAttr::ARCAuth,policyxml)) { logger.msg(ERROR,"Failed to convert security information to ARC policy"); throw std::exception(); }; if(policyxml.Name() == "Policy") { eval->addPolicy(policyxml); ++policies_num; } else if(policyxml.Name() == "Policies") { for(XMLNode p = policyxml["Policy"];(bool)p;++p) { eval->addPolicy(p); ++policies_num; }; }; }; if(cpolicy_attr) { NS ns; XMLNode policyxml(ns,""); if(!cpolicy_attr->Export(SecAttr::ARCAuth,policyxml)) { logger.msg(ERROR,"Failed to convert security information to ARC policy"); throw std::exception(); }; if(policyxml.Name() == "Policy") { eval->addPolicy(policyxml); ++policies_num; { std::string s; policyxml.GetXML(s); logger.msg(DEBUG,"ARC delegation policy: %s",s); }; } else if(policyxml.Name() == "Policies") { for(XMLNode p = policyxml["Policy"];(bool)p;++p) { eval->addPolicy(p); ++policies_num; { std::string s; policyxml.GetXML(s); logger.msg(DEBUG,"ARC delegation policy: %s",s); }; }; }; }; if(policies_num == 0) { logger.msg(INFO,"No delegation policies in this context and message - passing through"); result=true; throw std::exception(); }; // Generate request NS ns; XMLNode requestxml(ns,""); if(mauth) { if(!mauth->Export(SecAttr::ARCAuth,requestxml)) { logger.msg(ERROR,"Failed to convert security information to ARC request"); throw std::exception(); }; }; if(cauth) { if(!cauth->Export(SecAttr::ARCAuth,requestxml)) { logger.msg(ERROR,"Failed to convert security information to ARC request"); throw std::exception(); }; }; { std::string s; requestxml.GetXML(s); logger.msg(DEBUG,"ARC Auth. request: %s",s); }; if(requestxml.Size() <= 0) { logger.msg(ERROR,"No requested security information was collected"); throw std::exception(); }; //Call the evaluation functionality inside Evaluator Response *resp = eval->evaluate(requestxml); if(!resp) { logger.msg(ERROR,"No authorization response was returned"); throw std::exception(); }; logger.msg(INFO, "There are %d requests, which satisfy at least one policy", (resp->getResponseItems()).size()); bool atleast_onedeny = false; bool atleast_onepermit = false; ResponseList rlist = resp->getResponseItems(); int size = rlist.size(); for(int i = 0; i < size; i++) { ResponseItem* item = rlist[i]; if(item->res == DECISION_DENY) atleast_onedeny = true; if(item->res == DECISION_PERMIT) atleast_onepermit = true; } delete resp; if(atleast_onepermit) result = true; if(atleast_onedeny) result = false; } catch(std::exception&) { }; if(result) { logger.msg(INFO, "Delegation authorization passed"); } else { logger.msg(INFO, "Delegation authorization failed"); }; if(mauth) delete mauth; if(cauth) delete cauth; if(eval) delete eval; return result; } } // namespace ArcSec nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/PaxHeaders.7502/schema0000644000000000000000000000013213214316025023277 xustar000000000000000030 mtime=1513200661.384764422 30 atime=1513200668.723854182 30 ctime=1513200661.384764422 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/schema/0000755000175000002070000000000013214316025023422 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321025413 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200600.181015876 30 ctime=1513200661.382764398 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/schema/Makefile.am0000644000175000002070000000014611255700321025456 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = DelegationPDP.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315730025422 xustar000000000000000030 mtime=1513200600.213016267 30 atime=1513200649.293616542 29 ctime=1513200661.38376441 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/schema/Makefile.in0000644000175000002070000004355013214315730025500 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/shc/delegationpdp/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = DelegationPDP.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/shc/delegationpdp/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/shc/delegationpdp/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/schema/PaxHeaders.7502/DelegationPDP.xsd0000644000000000000000000000012411457664434026535 xustar000000000000000027 mtime=1287612700.761119 27 atime=1513200575.017708 30 ctime=1513200661.384764422 nordugrid-arc-5.4.2/src/hed/shc/delegationpdp/schema/DelegationPDP.xsd0000644000175000002070000000151211457664434026601 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/profiles0000644000000000000000000000013213214316022020263 xustar000000000000000030 mtime=1513200658.578730103 30 atime=1513200668.723854182 30 ctime=1513200658.578730103 nordugrid-arc-5.4.2/src/hed/profiles/0000755000175000002070000000000013214316022020406 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/profiles/PaxHeaders.7502/general0000644000000000000000000000013213214316022021700 xustar000000000000000030 mtime=1513200658.609730482 30 atime=1513200668.723854182 30 ctime=1513200658.609730482 nordugrid-arc-5.4.2/src/hed/profiles/general/0000755000175000002070000000000013214316022022023 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/profiles/general/PaxHeaders.7502/general.xml.in0000644000000000000000000000012712574532370024543 xustar000000000000000027 mtime=1441969400.372727 30 atime=1513200649.175615099 30 ctime=1513200658.609730482 nordugrid-arc-5.4.2/src/hed/profiles/general/general.xml.in0000644000175000002070000026316312574532370024620 0ustar00mockbuildmock00000000000000 mcctcp mcctls mcchttp mccsoap arcshc identitymap identitymap arex isis echo .* file POST GET HEAD PUT extract extract fork fork pbs pbs sge sge condor condor lsf lsf ll ll slurm slurm yes file file extract extract file extract extract echo_python.EchoService.EchoService file extract extract EchoService file extract extract storage.bartender.bartender.BartenderService storage.bartender.gateway.gateway.Gateway file extract extract storage.ahash.ahash.AHashService storage.ahash.ahash.CentralAHash arcom.store.cachedpicklestore.CachedPickleStore storage.ahash.replicatedahash.ReplicatedAHash file extract extract storage.librarian.librarian.LibrarianService file extract extract storage.shepherd.shepherd.ShepherdService arcom.store.cachedpicklestore.CachedPickleStore storage.shepherd.hardlinkingbackend.HopiBackend file extract extract 1 file extract extract Allow only the following IP address to access service /datadeliveryservice POST GET datadeliveryservice nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/mcc0000644000000000000000000000013213214316024017204 xustar000000000000000030 mtime=1513200660.513753769 30 atime=1513200668.723854182 30 ctime=1513200660.513753769 nordugrid-arc-5.4.2/src/hed/mcc/0000755000175000002070000000000013214316024017327 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712441403527021327 xustar000000000000000027 mtime=1418069847.362374 30 atime=1513200599.152003291 30 ctime=1513200660.507753696 nordugrid-arc-5.4.2/src/hed/mcc/Makefile.am0000644000175000002070000000012712441403527021371 0ustar00mockbuildmock00000000000000SUBDIRS = tcp http soap tls msgvalidator DIST_SUBDIRS = tcp http soap tls msgvalidator nordugrid-arc-5.4.2/src/hed/mcc/PaxHeaders.7502/tls0000644000000000000000000000013213214316024020006 xustar000000000000000030 mtime=1513200660.663755604 30 atime=1513200668.723854182 30 ctime=1513200660.663755604 nordugrid-arc-5.4.2/src/hed/mcc/tls/0000755000175000002070000000000013214316024020131 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/PayloadTLSStream.h0000644000000000000000000000012312206662137023375 xustar000000000000000027 mtime=1377526879.651921 27 atime=1513200575.157709 29 ctime=1513200660.65775553 nordugrid-arc-5.4.2/src/hed/mcc/tls/PayloadTLSStream.h0000644000175000002070000000456612206662137023456 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADTLSSTREAM_H__ #define __ARC_PAYLOADTLSSTREAM_H__ #include #include #include #include #include #include namespace ArcMCCTLS { using namespace Arc; /** Implemetation of PayloadStreamInterface for SSL handle. */ class PayloadTLSStream: public PayloadStreamInterface { protected: int timeout_; /** Timeout for read/write operations */ SSL* ssl_; Logger& logger_; public: /** Constructor. Attaches to already open handle. Handle is not managed by this class and must be closed by external code. */ PayloadTLSStream(Logger& logger,SSL* ssl=NULL); PayloadTLSStream(PayloadTLSStream& stream); /** Destructor. */ virtual ~PayloadTLSStream(void); //void HandleError(int code = SSL_ERROR_NONE); //static void HandleError(Logger& logger,int code = SSL_ERROR_NONE); //void ClearError(void); virtual bool Get(char* buf,int& size); virtual bool Get(std::string& buf); virtual std::string Get(void) { std::string buf; Get(buf); return buf; }; virtual bool Put(const char* buf,Size_t size); virtual bool Put(const std::string& buf) { return Put(buf.c_str(),buf.length()); }; virtual bool Put(const char* buf) { return Put(buf,buf?strlen(buf):0); }; virtual operator bool(void) { return (ssl_ != NULL); }; virtual bool operator!(void) { return (ssl_ == NULL); }; virtual int Timeout(void) const { return timeout_; }; virtual void Timeout(int to) { timeout_=to; }; virtual Size_t Pos(void) const { return 0; }; virtual Size_t Size(void) const { return 0; }; virtual Size_t Limit(void) const { return 0; }; virtual void SetFailure(const std::string& err); virtual void SetFailure(int code = SSL_ERROR_NONE); /**Get peer certificate from the established ssl. Obtained X509 object is owned by this instance and becomes invalid after destruction. Still obtained has to be freed at end of usage. */ X509* GetPeerCert(void); /** Get chain of peer certificates from the established ssl. Obtained X509 object is owned by this instance and becomes invalid after destruction. */ STACK_OF(X509)* GetPeerChain(void); /** Get local certificate from associated ssl. Obtained X509 object is owned by this instance and becomes invalid after destruction. */ X509* GetCert(void); }; } #endif /* __ARC_PAYLOADTLSSTREAM_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/Makefile.am0000644000000000000000000000012713065017103022123 xustar000000000000000027 mtime=1490296387.698578 30 atime=1513200599.595008709 30 ctime=1513200660.650755445 nordugrid-arc-5.4.2/src/hed/mcc/tls/Makefile.am0000644000175000002070000000220513065017103022164 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libmcctls.la libmcctls_la_SOURCES = PayloadTLSStream.cpp MCCTLS.cpp \ ConfigTLSMCC.cpp PayloadTLSMCC.cpp \ GlobusSigningPolicy.cpp DelegationSecAttr.cpp \ DelegationCollector.cpp \ BIOMCC.cpp BIOGSIMCC.cpp \ PayloadTLSStream.h MCCTLS.h \ ConfigTLSMCC.h PayloadTLSMCC.h \ GlobusSigningPolicy.h DelegationSecAttr.h \ DelegationCollector.h \ BIOMCC.h BIOGSIMCC.h libmcctls_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libmcctls_la_LIBADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) libmcctls_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/BIOMCC.h0000644000000000000000000000012412206663345021204 xustar000000000000000027 mtime=1377527525.531159 27 atime=1513200575.160709 30 ctime=1513200660.662755592 nordugrid-arc-5.4.2/src/hed/mcc/tls/BIOMCC.h0000644000175000002070000000056712206663345021261 0ustar00mockbuildmock00000000000000#ifndef __ARC_BIOMCC_H__ #define __ARC_BIOMCC_H__ #include namespace Arc { class MCCInterface; class PayloadStreamInterface; } namespace ArcMCCTLS { using namespace Arc; BIO* BIO_new_MCC(MCCInterface* mcc); BIO* BIO_new_MCC(PayloadStreamInterface* stream); bool BIO_MCC_failure(BIO* bio, MCC_Status& s); } // namespace Arc #endif // __ARC_BIOMCC_H__ nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727022141 xustar000000000000000030 mtime=1513200599.657009467 30 atime=1513200648.640608556 30 ctime=1513200660.650755445 nordugrid-arc-5.4.2/src/hed/mcc/tls/Makefile.in0000644000175000002070000012074413214315727022217 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc/tls DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libmcctls_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libmcctls_la_OBJECTS = libmcctls_la-PayloadTLSStream.lo \ libmcctls_la-MCCTLS.lo libmcctls_la-ConfigTLSMCC.lo \ libmcctls_la-PayloadTLSMCC.lo \ libmcctls_la-GlobusSigningPolicy.lo \ libmcctls_la-DelegationSecAttr.lo \ libmcctls_la-DelegationCollector.lo libmcctls_la-BIOMCC.lo \ libmcctls_la-BIOGSIMCC.lo libmcctls_la_OBJECTS = $(am_libmcctls_la_OBJECTS) libmcctls_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmcctls_la_CXXFLAGS) \ $(CXXFLAGS) $(libmcctls_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmcctls_la_SOURCES) DIST_SOURCES = $(libmcctls_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema pkglib_LTLIBRARIES = libmcctls.la libmcctls_la_SOURCES = PayloadTLSStream.cpp MCCTLS.cpp \ ConfigTLSMCC.cpp PayloadTLSMCC.cpp \ GlobusSigningPolicy.cpp DelegationSecAttr.cpp \ DelegationCollector.cpp \ BIOMCC.cpp BIOGSIMCC.cpp \ PayloadTLSStream.h MCCTLS.h \ ConfigTLSMCC.h PayloadTLSMCC.h \ GlobusSigningPolicy.h DelegationSecAttr.h \ DelegationCollector.h \ BIOMCC.h BIOGSIMCC.h libmcctls_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libmcctls_la_LIBADD = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) libmcctls_la_LDFLAGS = -no-undefined -avoid-version -module all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/tls/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/tls/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmcctls.la: $(libmcctls_la_OBJECTS) $(libmcctls_la_DEPENDENCIES) $(libmcctls_la_LINK) -rpath $(pkglibdir) $(libmcctls_la_OBJECTS) $(libmcctls_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-BIOGSIMCC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-BIOMCC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-ConfigTLSMCC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-DelegationCollector.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-DelegationSecAttr.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-GlobusSigningPolicy.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-MCCTLS.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-PayloadTLSMCC.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctls_la-PayloadTLSStream.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmcctls_la-PayloadTLSStream.lo: PayloadTLSStream.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-PayloadTLSStream.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-PayloadTLSStream.Tpo -c -o libmcctls_la-PayloadTLSStream.lo `test -f 'PayloadTLSStream.cpp' || echo '$(srcdir)/'`PayloadTLSStream.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-PayloadTLSStream.Tpo $(DEPDIR)/libmcctls_la-PayloadTLSStream.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PayloadTLSStream.cpp' object='libmcctls_la-PayloadTLSStream.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-PayloadTLSStream.lo `test -f 'PayloadTLSStream.cpp' || echo '$(srcdir)/'`PayloadTLSStream.cpp libmcctls_la-MCCTLS.lo: MCCTLS.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-MCCTLS.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-MCCTLS.Tpo -c -o libmcctls_la-MCCTLS.lo `test -f 'MCCTLS.cpp' || echo '$(srcdir)/'`MCCTLS.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-MCCTLS.Tpo $(DEPDIR)/libmcctls_la-MCCTLS.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MCCTLS.cpp' object='libmcctls_la-MCCTLS.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-MCCTLS.lo `test -f 'MCCTLS.cpp' || echo '$(srcdir)/'`MCCTLS.cpp libmcctls_la-ConfigTLSMCC.lo: ConfigTLSMCC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-ConfigTLSMCC.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-ConfigTLSMCC.Tpo -c -o libmcctls_la-ConfigTLSMCC.lo `test -f 'ConfigTLSMCC.cpp' || echo '$(srcdir)/'`ConfigTLSMCC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-ConfigTLSMCC.Tpo $(DEPDIR)/libmcctls_la-ConfigTLSMCC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ConfigTLSMCC.cpp' object='libmcctls_la-ConfigTLSMCC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-ConfigTLSMCC.lo `test -f 'ConfigTLSMCC.cpp' || echo '$(srcdir)/'`ConfigTLSMCC.cpp libmcctls_la-PayloadTLSMCC.lo: PayloadTLSMCC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-PayloadTLSMCC.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-PayloadTLSMCC.Tpo -c -o libmcctls_la-PayloadTLSMCC.lo `test -f 'PayloadTLSMCC.cpp' || echo '$(srcdir)/'`PayloadTLSMCC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-PayloadTLSMCC.Tpo $(DEPDIR)/libmcctls_la-PayloadTLSMCC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PayloadTLSMCC.cpp' object='libmcctls_la-PayloadTLSMCC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-PayloadTLSMCC.lo `test -f 'PayloadTLSMCC.cpp' || echo '$(srcdir)/'`PayloadTLSMCC.cpp libmcctls_la-GlobusSigningPolicy.lo: GlobusSigningPolicy.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-GlobusSigningPolicy.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-GlobusSigningPolicy.Tpo -c -o libmcctls_la-GlobusSigningPolicy.lo `test -f 'GlobusSigningPolicy.cpp' || echo '$(srcdir)/'`GlobusSigningPolicy.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-GlobusSigningPolicy.Tpo $(DEPDIR)/libmcctls_la-GlobusSigningPolicy.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GlobusSigningPolicy.cpp' object='libmcctls_la-GlobusSigningPolicy.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-GlobusSigningPolicy.lo `test -f 'GlobusSigningPolicy.cpp' || echo '$(srcdir)/'`GlobusSigningPolicy.cpp libmcctls_la-DelegationSecAttr.lo: DelegationSecAttr.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-DelegationSecAttr.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-DelegationSecAttr.Tpo -c -o libmcctls_la-DelegationSecAttr.lo `test -f 'DelegationSecAttr.cpp' || echo '$(srcdir)/'`DelegationSecAttr.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-DelegationSecAttr.Tpo $(DEPDIR)/libmcctls_la-DelegationSecAttr.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationSecAttr.cpp' object='libmcctls_la-DelegationSecAttr.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-DelegationSecAttr.lo `test -f 'DelegationSecAttr.cpp' || echo '$(srcdir)/'`DelegationSecAttr.cpp libmcctls_la-DelegationCollector.lo: DelegationCollector.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-DelegationCollector.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-DelegationCollector.Tpo -c -o libmcctls_la-DelegationCollector.lo `test -f 'DelegationCollector.cpp' || echo '$(srcdir)/'`DelegationCollector.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-DelegationCollector.Tpo $(DEPDIR)/libmcctls_la-DelegationCollector.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='DelegationCollector.cpp' object='libmcctls_la-DelegationCollector.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-DelegationCollector.lo `test -f 'DelegationCollector.cpp' || echo '$(srcdir)/'`DelegationCollector.cpp libmcctls_la-BIOMCC.lo: BIOMCC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-BIOMCC.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-BIOMCC.Tpo -c -o libmcctls_la-BIOMCC.lo `test -f 'BIOMCC.cpp' || echo '$(srcdir)/'`BIOMCC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-BIOMCC.Tpo $(DEPDIR)/libmcctls_la-BIOMCC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BIOMCC.cpp' object='libmcctls_la-BIOMCC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-BIOMCC.lo `test -f 'BIOMCC.cpp' || echo '$(srcdir)/'`BIOMCC.cpp libmcctls_la-BIOGSIMCC.lo: BIOGSIMCC.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctls_la-BIOGSIMCC.lo -MD -MP -MF $(DEPDIR)/libmcctls_la-BIOGSIMCC.Tpo -c -o libmcctls_la-BIOGSIMCC.lo `test -f 'BIOGSIMCC.cpp' || echo '$(srcdir)/'`BIOGSIMCC.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctls_la-BIOGSIMCC.Tpo $(DEPDIR)/libmcctls_la-BIOGSIMCC.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BIOGSIMCC.cpp' object='libmcctls_la-BIOGSIMCC.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctls_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctls_la-BIOGSIMCC.lo `test -f 'BIOGSIMCC.cpp' || echo '$(srcdir)/'`BIOGSIMCC.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/PayloadTLSMCC.h0000644000000000000000000000012412206663345022547 xustar000000000000000027 mtime=1377527525.531159 27 atime=1513200575.161709 30 ctime=1513200660.659755555 nordugrid-arc-5.4.2/src/hed/mcc/tls/PayloadTLSMCC.h0000644000175000002070000000406712206663345022623 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADTLSMCC_H__ #define __ARC_PAYLOADTLSMCC_H__ #include #include #include #include #include #include "BIOMCC.h" #include "BIOGSIMCC.h" #include "PayloadTLSStream.h" #include "ConfigTLSMCC.h" namespace ArcMCCTLS { // This class extends PayloadTLSStream with initialization procedure to // connect it to next MCC or Stream interface. class PayloadTLSMCC: public PayloadTLSStream { private: /** Specifies if this object owns internal SSL objects */ bool master_; /** SSL context */ SSL_CTX* sslctx_; BIO* bio_; static int ex_data_index_; //PayloadTLSMCC(PayloadTLSMCC& stream); ConfigTLSMCC config_; bool StoreInstance(void); bool ClearInstance(void); // Generic purpose bit flags unsigned long flags_; public: /** Constructor - creates ssl object which is bound to next MCC. This instance must be used on client side. It obtains Stream interface from next MCC dynamically. */ PayloadTLSMCC(MCCInterface* mcc, const ConfigTLSMCC& cfg, Logger& logger); /** Constructor - creates ssl object which is bound to stream. This constructor to be used on server side. Provided stream is NOT destroyed in destructor. */ PayloadTLSMCC(PayloadStreamInterface* stream, const ConfigTLSMCC& cfg, Logger& logger); /** Copy constructor with new logger. Created object shares same SSL objects but does not destroy them in destructor. Main instance must be destroyed after all copied ones. */ PayloadTLSMCC(PayloadTLSMCC& stream); virtual ~PayloadTLSMCC(void); const ConfigTLSMCC& Config(void) { return config_; }; static PayloadTLSMCC* RetrieveInstance(X509_STORE_CTX* container); unsigned long Flags(void) { return flags_; }; void Flags(unsigned long flags) { flags=flags_; }; void SetFailure(const std::string& err); void SetFailure(int code = SSL_ERROR_NONE); operator bool(void) { return (sslctx_ != NULL); }; bool operator!(void) { return (sslctx_ == NULL); }; }; } // namespace Arc #endif /* __ARC_PAYLOADTLSMCC_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/PayloadTLSStream.cpp0000644000000000000000000000012412206662137023731 xustar000000000000000027 mtime=1377526879.651921 27 atime=1513200575.154709 30 ctime=1513200660.651755457 nordugrid-arc-5.4.2/src/hed/mcc/tls/PayloadTLSStream.cpp0000644000175000002070000000564112206662137024004 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #ifndef WIN32 #include #else #define NOGDI #include #include #endif #include #include #include #include #include #include "ConfigTLSMCC.h" #include "PayloadTLSStream.h" namespace ArcMCCTLS { PayloadTLSStream::PayloadTLSStream(Logger& logger,SSL* ssl):timeout_(0),ssl_(ssl),logger_(logger) { //initialize something return; } PayloadTLSStream::PayloadTLSStream(PayloadTLSStream& stream):timeout_(stream.timeout_),ssl_(stream.ssl_),logger_(stream.logger_) { } PayloadTLSStream::~PayloadTLSStream(void) { ConfigTLSMCC::ClearError(); } bool PayloadTLSStream::Get(char* buf,int& size) { if(ssl_ == NULL) return false; //ssl read ssize_t l=size; size=0; l=SSL_read(ssl_,buf,l); if(l <= 0){ SetFailure(SSL_get_error(ssl_,l)); return false; } size=l; return true; } bool PayloadTLSStream::Get(std::string& buf) { char tbuf[1024]; int l = sizeof(tbuf); bool result = Get(tbuf,l); buf.assign(tbuf,l); return result; } bool PayloadTLSStream::Put(const char* buf,Size_t size) { //ssl write ssize_t l; if(ssl_ == NULL) return false; for(;size;){ l=SSL_write(ssl_,buf,size); if(l <= 0){ SetFailure(SSL_get_error(ssl_,l)); return false; } buf+=l; size-=l; } return true; } X509* PayloadTLSStream::GetPeerCert(void){ X509* peercert; int err; if(ssl_ == NULL) return NULL; if((err=SSL_get_verify_result(ssl_)) == X509_V_OK){ peercert=SSL_get_peer_certificate (ssl_); if(peercert!=NULL) return peercert; SetFailure("Peer certificate cannot be extracted\n"+ConfigTLSMCC::HandleError()); } else{ SetFailure(std::string("Peer cert verification failed: ")+X509_verify_cert_error_string(err)+"\n"+ConfigTLSMCC::HandleError(err)); } return NULL; } X509* PayloadTLSStream::GetCert(void){ X509* cert; if(ssl_ == NULL) return NULL; cert=SSL_get_certificate (ssl_); if(cert!=NULL) return cert; SetFailure("Peer certificate cannot be extracted\n"+ConfigTLSMCC::HandleError()); return NULL; } STACK_OF(X509)* PayloadTLSStream::GetPeerChain(void){ STACK_OF(X509)* peerchain; int err; if(ssl_ == NULL) return NULL; if((err=SSL_get_verify_result(ssl_)) == X509_V_OK){ peerchain=SSL_get_peer_cert_chain (ssl_); if(peerchain!=NULL) return peerchain; SetFailure("Peer certificate chain cannot be extracted\n"+ConfigTLSMCC::HandleError()); } else { SetFailure(std::string("Peer cert verification failed: ")+X509_verify_cert_error_string(err)+"\n"+ConfigTLSMCC::HandleError(err)); } return NULL; } void PayloadTLSStream::SetFailure(const std::string& err) { failure_ = MCC_Status(GENERIC_ERROR,"TLS",err); } void PayloadTLSStream::SetFailure(int code) { failure_ = MCC_Status(GENERIC_ERROR,"TLS", ConfigTLSMCC::HandleError(code)); } } // namespace ArcMCCTLS nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/BIOGSIMCC.cpp0000644000000000000000000000012413065017103022067 xustar000000000000000027 mtime=1490296387.698578 27 atime=1513200575.159709 30 ctime=1513200660.656755518 nordugrid-arc-5.4.2/src/hed/mcc/tls/BIOGSIMCC.cpp0000644000175000002070000001637313065017103022146 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "BIOGSIMCC.h" namespace ArcMCCTLS { using namespace Arc; #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static BIO_METHOD *BIO_meth_new(int type, const char *name) { BIO_METHOD *biom = (BIO_METHOD*)std::malloc(sizeof(BIO_METHOD)); if (biom) std::memset(biom,0,sizeof(biom)); return biom; } static void BIO_meth_free(BIO_METHOD *biom) { std::free(biom); } static void BIO_set_data(BIO *a, void *ptr) { a->ptr = ptr; } static void *BIO_get_data(BIO *a) { return a->ptr; } static void BIO_set_init(BIO *a, int init) { a->init = init; } #endif class BIOGSIMCC { private: PayloadStreamInterface* stream_; MCCInterface* next_; unsigned int header_; unsigned int token_; MCC_Status result_; BIO_METHOD* biom_; BIO* bio_; // not owned by this object public: BIOGSIMCC(MCCInterface* next):result_(STATUS_OK) { next_=NULL; stream_=NULL; bio_ = NULL; if(MakeMethod()) { bio_ = BIO_new(biom_); if(bio_) { next_=next; BIO_set_data(bio_,this); } } }; BIOGSIMCC(PayloadStreamInterface* stream):result_(STATUS_OK) { next_=NULL; stream_=NULL; bio_ = NULL; if(MakeMethod()) { bio_ = BIO_new(biom_); if(bio_) { stream_=stream; BIO_set_data(bio_,this); } } }; ~BIOGSIMCC(void) { if(stream_ && next_) delete stream_; if(biom_) BIO_meth_free(biom_); }; BIO* GetBIO() const { return bio_; }; PayloadStreamInterface* Stream() const { return stream_; }; void Stream(PayloadStreamInterface* stream) { stream_=stream; /*free ??*/ }; MCCInterface* Next(void) const { return next_; }; void MCC(MCCInterface* next) { next_=next; }; int Header(void) const { return header_; }; void Header(int v) { header_=v; }; int Token(void) const { return token_; }; void Token(int v) { token_=v; }; const MCC_Status& Result(void) { return result_; }; private: static int mcc_write(BIO *h, const char *buf, int num); static int mcc_read(BIO *h, char *buf, int size); static int mcc_puts(BIO *h, const char *str); static long mcc_ctrl(BIO *h, int cmd, long arg1, void *arg2); static int mcc_new(BIO *h); static int mcc_free(BIO *data); bool MakeMethod(void) { biom_ = BIO_meth_new(BIO_TYPE_FD,"Message Chain Component"); if(biom_) { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) biom_->bwrite = &BIOGSIMCC::mcc_write; biom_->bread = &BIOGSIMCC::mcc_read; biom_->bputs = &BIOGSIMCC::mcc_puts; biom_->ctrl = &BIOGSIMCC::mcc_ctrl; biom_->create = &BIOGSIMCC::mcc_new; biom_->destroy = &BIOGSIMCC::mcc_free; #else (void)BIO_meth_set_write(biom_,&BIOGSIMCC::mcc_write); (void)BIO_meth_set_read(biom_,&BIOGSIMCC::mcc_read); (void)BIO_meth_set_puts(biom_,&BIOGSIMCC::mcc_puts); (void)BIO_meth_set_ctrl(biom_,&BIOGSIMCC::mcc_ctrl); (void)BIO_meth_set_create(biom_,&BIOGSIMCC::mcc_new); (void)BIO_meth_set_destroy(biom_,&BIOGSIMCC::mcc_free); #endif } return (biom_ != NULL); }; }; BIO *BIO_new_GSIMCC(MCCInterface* mcc) { BIOGSIMCC* biomcc = new BIOGSIMCC(mcc); BIO* bio = biomcc->GetBIO(); if(!bio) delete biomcc; return bio; } BIO* BIO_new_GSIMCC(PayloadStreamInterface* stream) { BIOGSIMCC* biomcc = new BIOGSIMCC(stream); BIO* bio = biomcc->GetBIO(); if(!bio) delete biomcc; return bio; } int BIOGSIMCC::mcc_new(BIO *bi) { BIO_set_data(bi,NULL); BIO_set_init(bi,1); return(1); } int BIOGSIMCC::mcc_free(BIO *b) { if(b == NULL) return(0); BIOGSIMCC* biomcc = (BIOGSIMCC*)(BIO_get_data(b)); BIO_set_data(b,NULL); if(biomcc) delete biomcc; return(1); } int BIOGSIMCC::mcc_read(BIO *b, char *out,int outl) { int ret=0; if (out == NULL) return(ret); if(b == NULL) return(ret); BIOGSIMCC* biomcc = (BIOGSIMCC*)(BIO_get_data(b)); if(biomcc == NULL) return(ret); PayloadStreamInterface* stream = biomcc->Stream(); if(stream == NULL) return ret; bool r = true; if(biomcc->Header()) { unsigned char header[4]; int l = biomcc->Header(); r = stream->Get((char*)(header+(4-l)),l); if(r) { for(int n = (4-biomcc->Header());n<(4-biomcc->Header()+l);++n) { biomcc->Token(biomcc->Token() | (header[n] << ((3-n)*8))); }; biomcc->Header(biomcc->Header()-l); }; }; if(r) { if(biomcc->Header() == 0) { if(biomcc->Token()) { int l = biomcc->Token(); if(l > outl) l=outl; r = stream->Get(out,l); if(r) { biomcc->Token(biomcc->Token() - l); outl = l; }; } else { outl=0; }; if(biomcc->Token() == 0) biomcc->Header(4); }; }; //clear_sys_error(); BIO_clear_retry_flags(b); if(r) { ret=outl; } else { ret=-1; }; return ret; } int BIOGSIMCC::mcc_write(BIO *b, const char *in, int inl) { int ret = 0; //clear_sys_error(); if(in == NULL) return(ret); if(b == NULL) return(ret); if(BIO_get_data(b) == NULL) return(ret); BIOGSIMCC* biomcc = (BIOGSIMCC*)(BIO_get_data(b)); if(biomcc == NULL) return(ret); unsigned char header[4]; header[0]=(inl>>24)&0xff; header[1]=(inl>>16)&0xff; header[2]=(inl>>8)&0xff; header[3]=(inl>>0)&0xff; PayloadStreamInterface* stream = biomcc->Stream(); if(stream != NULL) { // If available just use stream directly // TODO: check if stream has changed ??? bool r; r = stream->Put((const char*)header,4); if(r) r = stream->Put(in,inl); BIO_clear_retry_flags(b); if(r) { ret=inl; } else { ret=-1; }; return ret; }; MCCInterface* next = biomcc->Next(); if(next == NULL) return(ret); PayloadRaw nextpayload; nextpayload.Insert((const char*)header,0,4); nextpayload.Insert(in,4,inl); // Not efficient !!!! Message nextinmsg; nextinmsg.Payload(&nextpayload); Message nextoutmsg; MCC_Status mccret = next->process(nextinmsg,nextoutmsg); BIO_clear_retry_flags(b); if(mccret) { if(nextoutmsg.Payload()) { PayloadStreamInterface* retpayload = NULL; try { retpayload=dynamic_cast(nextoutmsg.Payload()); } catch(std::exception& e) { }; if(retpayload) { biomcc->Stream(retpayload); } else { delete nextoutmsg.Payload(); }; }; ret=inl; } else { if(nextoutmsg.Payload()) delete nextoutmsg.Payload(); ret=-1; }; return(ret); } long BIOGSIMCC::mcc_ctrl(BIO*, int cmd, long, void*) { long ret=0; switch (cmd) { case BIO_CTRL_RESET: case BIO_CTRL_SET_CLOSE: case BIO_CTRL_SET: case BIO_CTRL_EOF: case BIO_CTRL_FLUSH: case BIO_CTRL_DUP: ret=1; break; }; return(ret); } int BIOGSIMCC::mcc_puts(BIO *bp, const char *str) { int n,ret; n=strlen(str); ret=mcc_write(bp,str,n); return(ret); } bool BIO_GSIMCC_failure(BIO* bio, MCC_Status& s) { if(!bio) return false; BIOGSIMCC* b = (BIOGSIMCC*)(BIO_get_data(bio)); if(!b || b->Result()) return false; s = b->Result(); return true; } } // namespace Arc nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/DelegationSecAttr.cpp0000644000000000000000000000012311715026661024141 xustar000000000000000026 mtime=1328819633.84179 27 atime=1513200575.160709 30 ctime=1513200660.654755494 nordugrid-arc-5.4.2/src/hed/mcc/tls/DelegationSecAttr.cpp0000644000175000002070000000353611715026661024216 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "DelegationSecAttr.h" namespace ArcMCCTLSSec { using namespace Arc; DelegationSecAttr::DelegationSecAttr(void) { } DelegationSecAttr::DelegationSecAttr(const char* policy_str,int policy_size) { if(policy_str == NULL) return; XMLNode policy(policy_str,policy_size); // Only XML policies are accepted if(!policy) return; NS ns; ns["pa"]="http://www.nordugrid.org/schemas/policy-arc"; policy.Namespaces(ns); // Only ARC Policy is supported so far if(!MatchXMLName(policy,"pa:Policy")) return; policy.New(policy_doc_); } DelegationSecAttr::~DelegationSecAttr(void) { } DelegationSecAttr::operator bool(void) const { return (bool)policy_doc_; } bool DelegationSecAttr::equal(const SecAttr &b) const { try { const DelegationSecAttr& a = dynamic_cast(b); if (!a) return false; // ... return false; } catch(std::exception&) { }; return false; } bool DelegationSecAttr::Export(SecAttrFormat format,XMLNode &val) const { if(format == UNDEFINED) { } else if(format == ARCAuth) { policy_doc_.New(val); return true; } else { }; return false; } DelegationMultiSecAttr::DelegationMultiSecAttr(void) { } DelegationMultiSecAttr::~DelegationMultiSecAttr(void) { } bool DelegationMultiSecAttr::Add(const char* policy_str,int policy_size) { SecAttr* sattr = new DelegationSecAttr(policy_str,policy_size); if(!sattr) return false; if(!(*sattr)) { delete sattr; return false; }; attrs_.push_back(sattr); return true; } bool DelegationMultiSecAttr::Export(SecAttrFormat format,XMLNode &val) const { if(attrs_.size() == 0) return true; if(attrs_.size() == 1) return (*attrs_.begin())->Export(format,val); if(!MultiSecAttr::Export(format,val)) return false; val.Name("Policies"); return true; } } nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/MCCTLS.cpp0000644000000000000000000000012413111074301021550 xustar000000000000000027 mtime=1495562433.805447 27 atime=1513200575.150709 30 ctime=1513200660.652755469 nordugrid-arc-5.4.2/src/hed/mcc/tls/MCCTLS.cpp0000644000175000002070000005511113111074301021620 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #ifndef WIN32 #include #include #else #define NOGDI #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "GlobusSigningPolicy.h" #include "PayloadTLSStream.h" #include "PayloadTLSMCC.h" #include "DelegationCollector.h" #include "MCCTLS.h" namespace ArcMCCTLS { using namespace Arc; bool x509_to_string(X509* cert,std::string& str) { BIO *out = BIO_new(BIO_s_mem()); if(!out) return false; if(!PEM_write_bio_X509(out,cert)) { BIO_free_all(out); return false; }; for(;;) { char s[256]; int l = BIO_read(out,s,sizeof(s)); if(l <= 0) break; str.append(s,l);; }; BIO_free_all(out); return true; } class TLSSecAttr: public SecAttr { friend class MCC_TLS_Service; friend class MCC_TLS_Client; public: TLSSecAttr(PayloadTLSStream&, ConfigTLSMCC& config, Logger& logger); virtual ~TLSSecAttr(void); virtual operator bool(void) const; virtual bool Export(SecAttrFormat format,XMLNode &val) const; std::string Identity(void) { return identity_; }; std::string Subject(void) const { if(subjects_.size() <= 0) return ""; return *(--(subjects_.end())); }; std::string CA(void) const { if(subjects_.size() <= 0) return ""; return *(subjects_.begin()); }; std::string X509Str(void) const { return x509str_; }; std::string X509ChainStr(void) const { return x509chainstr_; }; virtual std::string get(const std::string& id) const { if(id == "IDENTITY") return identity_; if(id == "SUBJECT") return Subject(); if(id == "CA") return CA(); if(id == "CERTIFICATE") return x509str_; if(id == "CERTIFICATECHAIN") return x509chainstr_; if(id == "LOCALSUBJECT") return target_; if((id == "VOMS") || (id == "VO")) { std::list items = getAll(id); if(!items.empty()) return *items.begin(); return ""; }; return ""; }; virtual std::list getAll(const std::string& id) const { std::list items; if(id == "VOMS") { for(std::vector::const_iterator v = voms_attributes_.begin(); v != voms_attributes_.end();++v) { for(std::vector::const_iterator a = v->attributes.begin(); a != v->attributes.end();++a) { std::string vattr = VOMSFQANToFull(v->voname,*a); items.push_back(vattr); }; }; } else if(id == "VO") { for(std::vector::const_iterator v = voms_attributes_.begin(); v != voms_attributes_.end();++v) { items.push_back(v->voname); }; } else { return SecAttr::getAll(id); }; return items; }; protected: std::string identity_; // Subject of last non-proxy certificate std::list subjects_; // Subjects of all certificates in chain std::vector voms_attributes_; // VOMS attributes from the VOMS extension of proxy std::string target_; // Subject of host certificate std::string x509str_; // The last certificate (in string format). TODO: extract on demand. std::string x509chainstr_; // Other certificates (in string format). TODO: extract on demand. bool processing_failed_; virtual bool equal(const SecAttr &b) const; }; #define SELFSIGNED(cert) (X509_NAME_cmp(X509_get_issuer_name(cert),X509_get_subject_name(cert)) == 0) TLSSecAttr::TLSSecAttr(PayloadTLSStream& payload, ConfigTLSMCC& config, Logger& logger) { std::string subject; processing_failed_ = false; STACK_OF(X509)* peerchain = payload.GetPeerChain(); voms_attributes_.clear(); if(peerchain != NULL) { for(int idx = 0;;++idx) { if(idx >= sk_X509_num(peerchain)) break; X509* cert = sk_X509_value(peerchain,sk_X509_num(peerchain)-idx-1); if(idx == 0) { // Obtain CA subject // Sometimes certificates chain contains CA certificate. if(!SELFSIGNED(cert)) { char* buf = X509_NAME_oneline(X509_get_issuer_name(cert),NULL,0); if(buf) { subject=buf; subjects_.push_back(subject); OPENSSL_free(buf); }; }; }; { char* buf = X509_NAME_oneline(X509_get_subject_name(cert),NULL,0); if(buf) { subject=buf; subjects_.push_back(subject); OPENSSL_free(buf); }; }; std::string certstr; x509_to_string(cert, certstr); x509chainstr_=certstr+x509chainstr_; if(X509_get_ext_by_NID(cert,NID_proxyCertInfo,-1) < 0) { identity_=subject; }; // Parse VOMS attributes from each certificate of the peer chain. Arc::VOMSTrustList trust_list(config.VOMSCertTrustDN()); bool res = parseVOMSAC(cert, config.CADir(), config.CAFile(), config.VOMSDir(), trust_list, voms_attributes_, true, true); if(!res) { logger.msg(ERROR,"VOMS attribute parsing failed"); }; }; }; X509* peercert = payload.GetPeerCert(); if (peercert != NULL) { if(subjects_.size() <= 0) { // Obtain CA subject if not obtained yet // Check for CA certificate used for connection - overprotection if(!SELFSIGNED(peercert)) { char* buf = X509_NAME_oneline(X509_get_issuer_name(peercert),NULL,0); if(buf) { subject=buf; subjects_.push_back(subject); OPENSSL_free(buf); }; }; }; { char* buf = X509_NAME_oneline(X509_get_subject_name(peercert),NULL,0); if(buf) { subject=buf; //logger.msg(VERBOSE, "Peer name: %s", peer_dn); subjects_.push_back(subject); OPENSSL_free(buf); }; }; if(X509_get_ext_by_NID(peercert,NID_proxyCertInfo,-1) < 0) { identity_=subject; }; // Parse VOMS attributes from peer certificate Arc::VOMSTrustList trust_list(config.VOMSCertTrustDN()); bool res = parseVOMSAC(peercert, config.CADir(), config.CAFile(), config.VOMSDir(), trust_list, voms_attributes_, true, true); if(!res) { logger.msg(ERROR,"VOMS attribute parsing failed"); }; // Convert the x509 cert into string format x509_to_string(peercert, x509str_); X509_free(peercert); }; if(identity_.empty()) identity_=subject; X509* hostcert = payload.GetCert(); if (hostcert != NULL) { char* buf = X509_NAME_oneline(X509_get_subject_name(hostcert),NULL,0); if(buf) { target_=buf; //logger.msg(VERBOSE, "Host name: %s", peer_dn); OPENSSL_free(buf); }; }; // Cleaning collected VOMS attributes for(std::vector::iterator v = voms_attributes_.begin(); v != voms_attributes_.end();) { if(v->status & VOMSACInfo::Error) { if(config.IfCheckVOMSCritical() && (v->status & VOMSACInfo::IsCritical)) { processing_failed_ = true; logger.msg(ERROR,"Critical VOMS attribute processing failed"); }; if(config.IfFailOnVOMSParsing() && (v->status & VOMSACInfo::ParsingError)) { processing_failed_ = true; logger.msg(ERROR,"VOMS attribute parsing failed"); }; if(config.IfFailOnVOMSInvalid() && (v->status & VOMSACInfo::ValidationError)) { processing_failed_ = true; logger.msg(ERROR,"VOMS attribute validation failed"); }; logger.msg(ERROR,"VOMS attribute is ignored due to processing/validation error"); v = voms_attributes_.erase(v); } else { ++v; }; }; } TLSSecAttr::~TLSSecAttr(void) { } TLSSecAttr::operator bool(void) const { return !processing_failed_; } bool TLSSecAttr::equal(const SecAttr &b) const { try { const TLSSecAttr& a = dynamic_cast(b); if (!a) return false; // ... return false; } catch(std::exception&) { }; return false; } static void add_arc_subject_attribute(XMLNode item,const std::string& subject,const char* id) { XMLNode attr = item.NewChild("ra:SubjectAttribute"); attr=subject; attr.NewAttribute("Type")="string"; attr.NewAttribute("AttributeId")=id; } static void add_xacml_subject_attribute(XMLNode item,const std::string& subject,const char* id) { XMLNode attr = item.NewChild("ra:Attribute"); attr.NewAttribute("DataType")="xs:string"; attr.NewAttribute("AttributeId")=id; attr.NewChild("ra:AttributeValue") = subject; } bool TLSSecAttr::Export(SecAttrFormat format,XMLNode &val) const { if(format == UNDEFINED) { } else if(format == ARCAuth) { NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; val.Namespaces(ns); val.Name("ra:Request"); XMLNode item = val.NewChild("ra:RequestItem"); XMLNode subj = item.NewChild("ra:Subject"); std::list::const_iterator s = subjects_.begin(); std::string subject; if(s != subjects_.end()) { subject=*s; add_arc_subject_attribute(subj,subject,"http://www.nordugrid.org/schemas/policy-arc/types/tls/ca"); for(;s != subjects_.end();++s) { subject=*s; add_arc_subject_attribute(subj,subject,"http://www.nordugrid.org/schemas/policy-arc/types/tls/chain"); }; add_arc_subject_attribute(subj,subject,"http://www.nordugrid.org/schemas/policy-arc/types/tls/subject"); }; if(!identity_.empty()) { add_arc_subject_attribute(subj,identity_,"http://www.nordugrid.org/schemas/policy-arc/types/tls/identity"); }; if(!voms_attributes_.empty()) { for(int k=0; k < voms_attributes_.size(); k++) { for(int n=0; n < voms_attributes_[k].attributes.size(); n++) { std::string vattr = VOMSFQANToFull(voms_attributes_[k].voname,voms_attributes_[k].attributes[n]); add_arc_subject_attribute(subj,vattr,"http://www.nordugrid.org/schemas/policy-arc/types/tls/vomsattribute"); }; }; }; if(!target_.empty()) { XMLNode resource = item.NewChild("ra:Resource"); resource=target_; resource.NewAttribute("Type")="string"; resource.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/tls/hostidentity"; // Following is agreed to not be use till all use cases are clarified (Bern agreement) //hostidentity should be SubjectAttribute, because hostidentity is be constrained to access //the peer delegation identity, or some resource which is attached to the peer delegation identity. //The constrant is defined in delegation policy. //add_arc_subject_attribute(subj,target_,"http://www.nordugrid.org/schemas/policy-arc/types/tls/hostidentity"); }; return true; } else if(format == XACML) { NS ns; ns["ra"]="urn:oasis:names:tc:xacml:2.0:context:schema:os"; val.Namespaces(ns); val.Name("ra:Request"); XMLNode subj = val.NewChild("ra:Subject"); std::list::const_iterator s = subjects_.begin(); std::string subject; if(s != subjects_.end()) { subject=*s; add_xacml_subject_attribute(subj,subject,"http://www.nordugrid.org/schemas/policy-arc/types/tls/ca"); for(;s != subjects_.end();++s) { subject=*s; add_xacml_subject_attribute(subj,subject,"http://www.nordugrid.org/schemas/policy-arc/types/tls/chain"); }; add_xacml_subject_attribute(subj,subject,"http://www.nordugrid.org/schemas/policy-arc/types/tls/subject"); }; if(!identity_.empty()) { add_xacml_subject_attribute(subj,identity_,"http://www.nordugrid.org/schemas/policy-arc/types/tls/identity"); }; if(!voms_attributes_.empty()) { for(int k=0; k < voms_attributes_.size(); k++) { for(int n=0; n < voms_attributes_[k].attributes.size(); n++) { std::string vattr = VOMSFQANToFull(voms_attributes_[k].voname,voms_attributes_[k].attributes[n]); add_xacml_subject_attribute(subj,vattr,"http://www.nordugrid.org/schemas/policy-arc/types/tls/vomsattribute"); }; }; }; if(!target_.empty()) { XMLNode resource = val.NewChild("ra:Resource"); XMLNode attr = resource.NewChild("ra:Attribute"); attr.NewChild("ra:AttributeValue") = target_; attr.NewAttribute("DataType")="xs:string"; attr.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/tls/hostidentity"; // Following is agreed to not be use till all use cases are clarified (Bern agreement) //hostidentity should be SubjectAttribute, because hostidentity is be constrained to access //the peer delegation identity, or some resource which is attached to the peer delegation identity. //The constrant is defined in delegation policy. //add_xacml_subject_attribute(subj,target_,"http://www.nordugrid.org/schemas/policy-arc/types/tls/hostidentity"); }; return true; } else if(format == GACL) { NS ns; val.Namespaces(ns); val.Name("gacl"); XMLNode entry = val.NewChild("entry"); if(!identity_.empty()) entry.NewChild("person").NewChild("dn") = identity_; XMLNode voms; for(std::vector::const_iterator v = voms_attributes_.begin(); v != voms_attributes_.end();++v) { for(std::vector::const_iterator a = v->attributes.begin(); a != v->attributes.end();++a) { if(!voms) voms = entry.NewChild("voms"); voms.NewChild("fqan") = *a; }; voms = XMLNode(); // ?? }; return true; } else { }; return false; } class MCC_TLS_Context:public MessageContextElement { public: PayloadTLSMCC* stream; MCC_TLS_Context(PayloadTLSMCC* s = NULL):stream(s) { }; virtual ~MCC_TLS_Context(void) { if(stream) delete stream; }; }; /* The main functionality of the constructor method is to initialize SSL layer. */ MCC_TLS_Service::MCC_TLS_Service(Config& cfg,PluginArgument* parg):MCC_TLS(cfg,false,parg) { if(!OpenSSLInit()) return; } MCC_TLS_Service::~MCC_TLS_Service(void) { // SSL deinit not needed } MCC_Status MCC_TLS_Service::process(Message& inmsg,Message& outmsg) { // Accepted payload is StreamInterface // Returned payload is undefined - currently holds no information // TODO: probably some other credentials check is needed //if(!sslctx_) return MCC_Status(); // Obtaining underlying stream if(!inmsg.Payload()) return MCC_Status(); PayloadStreamInterface* inpayload = NULL; try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) return MCC_Status(); // Obtaining previously created stream context or creating a new one MCC_TLS_Context* context = NULL; { MessageContextElement* mcontext = (*inmsg.Context())["tls.service"]; if(mcontext) { try { context = dynamic_cast(mcontext); } catch(std::exception& e) { }; }; }; PayloadTLSMCC *stream = NULL; if(context) { // Old connection - using available SSL stream stream=context->stream; } else { // Creating new SSL object bound to stream of previous MCC // TODO: renew stream because it may be recreated by TCP MCC stream = new PayloadTLSMCC(inpayload,config_,logger); // Check for established connection if(!*stream) { logger.msg(ERROR, "Failed to establish connection: %s", stream->Failure().operator std::string()); delete stream; return MCC_Status(); } context=new MCC_TLS_Context(stream); inmsg.Context()->Add("tls.service",context); }; // Creating message to pass to next MCC Message nextinmsg = inmsg; nextinmsg.Payload(stream); Message nextoutmsg = outmsg; nextoutmsg.Payload(NULL); PayloadTLSStream* tstream = dynamic_cast(stream); // Filling security attributes if(tstream && (config_.IfClientAuthn())) { TLSSecAttr* sattr = new TLSSecAttr(*tstream, config_, logger); //Getting the subject name of peer(client) certificate logger.msg(VERBOSE, "Peer name: %s", sattr->Subject()); nextinmsg.Attributes()->set("TLS:PEERDN",sattr->Subject()); logger.msg(VERBOSE, "Identity name: %s", sattr->Identity()); nextinmsg.Attributes()->set("TLS:IDENTITYDN",sattr->Identity()); logger.msg(VERBOSE, "CA name: %s", sattr->CA()); nextinmsg.Attributes()->set("TLS:CADN",sattr->CA()); if(!((sattr->target_).empty())) { nextinmsg.Attributes()->set("TLS:LOCALDN",sattr->target_); } if(!*sattr) { logger.msg(ERROR, "Failed to process security attributes in TLS MCC for incoming message"); delete sattr; return MCC_Status(); } nextinmsg.Auth()->set("TLS",sattr); } // Checking authentication and authorization; if(!ProcessSecHandlers(nextinmsg,"incoming")) { logger.msg(ERROR, "Security check failed in TLS MCC for incoming message"); return MCC_Status(); }; // Call next MCC MCCInterface* next = Next(); if(!next) return MCC_Status(); MCC_Status ret = next->process(nextinmsg,nextoutmsg); // TODO: If next MCC returns anything redirect it to stream if(nextoutmsg.Payload()) { delete nextoutmsg.Payload(); nextoutmsg.Payload(NULL); }; if(!ret) return ret; // For nextoutmsg, nothing to do for payload of msg, but // transfer some attributes of msg outmsg = nextoutmsg; return MCC_Status(STATUS_OK); } MCC_TLS_Client::MCC_TLS_Client(Config& cfg,PluginArgument* parg):MCC_TLS(cfg,true,parg){ stream_=NULL; if(!OpenSSLInit()) return; /* Get DN from certificate, and put it into message's attribute */ } MCC_TLS_Client::~MCC_TLS_Client(void) { if(stream_) delete stream_; // SSL deinit not needed } MCC_Status MCC_TLS_Client::process(Message& inmsg,Message& outmsg) { // Accepted payload is Raw and Stream // Returned payload is Stream // Extracting payload if(!inmsg.Payload()) { return MCC_Status(GENERIC_ERROR,"TLS","Internal error: missing payload for outgoing TLS message"); } if(!stream_) { return MCC_Status(GENERIC_ERROR,"TLS","Internal error: communication stream not initialised"); } if(!*stream_) { if(!stream_->Failure()) return stream_->Failure(); return MCC_Status(GENERIC_ERROR,"TLS","Internal error: communication stream not initialised"); } PayloadRawInterface* rinpayload = NULL; PayloadStreamInterface* sinpayload = NULL; try { rinpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; try { sinpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if((!rinpayload) && (!sinpayload)) { return MCC_Status(GENERIC_ERROR,"TLS","Internal error: unsupported payload for outgoing TLS message"); } // Collecting security attributes // TODO: keep them or redo same for incoming message PayloadTLSStream* tstream = dynamic_cast(stream_); if(tstream) { TLSSecAttr* sattr = new TLSSecAttr(*tstream, config_, logger); inmsg.Auth()->set("TLS",sattr); //Getting the subject name of peer(client) certificate logger.msg(VERBOSE, "Peer name: %s", sattr->Subject()); inmsg.Attributes()->set("TLS:PEERDN",sattr->Subject()); logger.msg(VERBOSE, "Identity name: %s", sattr->Identity()); inmsg.Attributes()->set("TLS:IDENTITYDN",sattr->Identity()); logger.msg(VERBOSE, "CA name: %s", sattr->CA()); inmsg.Attributes()->set("TLS:CADN",sattr->CA()); } //Checking authentication and authorization; if(!ProcessSecHandlers(inmsg,"outgoing")) { logger.msg(VERBOSE, "Security check failed for outgoing TLS message"); // TODO: propagate message from SecHandlers return MCC_Status(GENERIC_ERROR,"TLS","Security check failed for outgoing TLS message"); }; // Sending payload if(rinpayload) { for(int n=0;;++n) { char* buf = rinpayload->Buffer(n); if(!buf) break; int bufsize = rinpayload->BufferSize(n); if(!(stream_->Put(buf,bufsize))) { logger.msg(VERBOSE, "Failed to send content of buffer"); if(!stream_->Failure()) return stream_->Failure(); return MCC_Status(GENERIC_ERROR,"TLS","Failed to send content of buffer"); }; }; } else { int size = -1; if(!sinpayload->Get(*stream_,size)) { // Currently false may also mean that stream finihsed. // Hence it can't be used to indicate real failure. // logger.msg(INFO, "Failed to transfer content of stream"); // return MCC_Status(); }; }; outmsg.Payload(new PayloadTLSMCC(*stream_)); //outmsg.Attributes(inmsg.Attributes()); //outmsg.Context(inmsg.Context()); if(!ProcessSecHandlers(outmsg,"incoming")) { logger.msg(VERBOSE, "Security check failed for incoming TLS message"); delete outmsg.Payload(NULL); // TODO: propagate message from SecHandlers return MCC_Status(GENERIC_ERROR,"TLS","Security check failed for incoming TLS message"); }; return MCC_Status(STATUS_OK); } void MCC_TLS_Client::Next(MCCInterface* next,const std::string& label) { if(label.empty()) { if(stream_) delete stream_; stream_=NULL; stream_=new PayloadTLSMCC(next,config_,logger); }; MCC::Next(next,label); } } // namespace ArcMCCTLS //Glib::Mutex Arc::MCC_TLS::lock_; Arc::Logger ArcMCCTLS::MCC_TLS::logger(Arc::Logger::getRootLogger(), "MCC.TLS"); ArcMCCTLS::MCC_TLS::MCC_TLS(Arc::Config& cfg,bool client,PluginArgument* parg) : Arc::MCC(&cfg,parg), config_(cfg,client) { } static Arc::Plugin* get_mcc_service(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new ArcMCCTLS::MCC_TLS_Service(*(Arc::Config*)(*mccarg),mccarg); } static Arc::Plugin* get_mcc_client(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new ArcMCCTLS::MCC_TLS_Client(*(Arc::Config*)(*mccarg),mccarg); } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "tls.service", "HED:MCC", NULL, 0, &get_mcc_service }, { "tls.client", "HED:MCC", NULL, 0, &get_mcc_client }, { "delegation.collector", "HED:SHC", NULL, 0, &ArcMCCTLSSec::DelegationCollector::get_sechandler}, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/DelegationCollector.cpp0000644000000000000000000000012413213445240024514 xustar000000000000000027 mtime=1512983200.815191 27 atime=1513200575.149709 30 ctime=1513200660.655755506 nordugrid-arc-5.4.2/src/hed/mcc/tls/DelegationCollector.cpp0000644000175000002070000001034313213445240024562 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "PayloadTLSStream.h" #include "DelegationSecAttr.h" #include "DelegationCollector.h" namespace ArcMCCTLSSec { using namespace Arc; using namespace ArcMCCTLS; static Arc::Logger logger(Arc::Logger::getRootLogger(),"DelegationCollector"); DelegationCollector::DelegationCollector(Config *cfg,PluginArgument* parg):SecHandler(cfg,parg) { } DelegationCollector::~DelegationCollector(void) { } // TODO: In a future accept and store ALL policies. Let it fail at PDP level. // Currently code fails immediately if policy not recognized. // Alternatively behavior may be configurable. static bool get_proxy_policy(X509* cert,DelegationMultiSecAttr* sattr) { bool result = false; PROXY_CERT_INFO_EXTENSION *pci = (PROXY_CERT_INFO_EXTENSION*)X509_get_ext_d2i(cert,NID_proxyCertInfo,NULL,NULL); if(!pci) return true; // No proxy switch (OBJ_obj2nid(pci->proxyPolicy->policyLanguage)) { case NID_Independent: { // No rights granted // Either such situation should be disallowed or // policy should be generated which grants no right for anything. // First option is easier to implement so using it at least yet. logger.msg(DEBUG,"Independent proxy - no rights granted"); }; break; case NID_id_ppl_inheritAll: { // All right granted. No additional policies should be enforced. logger.msg(DEBUG,"Proxy with all rights inherited"); result=true; }; break; case NID_id_ppl_anyLanguage: { // Here we store ARC policy // Either this is ARC policy is determined by examining content const char* policy_str = (const char *)(pci->proxyPolicy->policy->data); int policy_length = pci->proxyPolicy->policy->length; if((policy_str == NULL) || (policy_length <= 0)) { logger.msg(DEBUG,"Proxy with empty policy - fail on unrecognized policy"); break; }; { std::string s(policy_str,policy_length); logger.msg(DEBUG,"Proxy with specific policy: %s",s); }; result = sattr->Add(policy_str,policy_length); if(result) { logger.msg(DEBUG,"Proxy with ARC Policy"); } else { logger.msg(DEBUG,"Proxy with unknown policy - fail on unrecognized policy"); }; }; break; default: { // Unsupported policy - fail }; break; }; PROXY_CERT_INFO_EXTENSION_free(pci); return result; } SecHandlerStatus DelegationCollector::Handle(Arc::Message* msg) const { SecAttr* sattr_ = NULL; DelegationMultiSecAttr* sattr = NULL; try { MessagePayload* mpayload = msg->Payload(); if(!mpayload) return false; // No payload in this message PayloadTLSStream* tstream = dynamic_cast(msg->Payload()); // Currently only TLS payloads are supported if(!tstream) return false; sattr_ = msg->Auth()->get("DELEGATION POLICY"); if(sattr_) sattr = dynamic_cast(sattr_); if(!sattr) { sattr_ = NULL; sattr = new DelegationMultiSecAttr; } X509* cert = tstream->GetPeerCert(); if (cert != NULL) { if(!get_proxy_policy(cert,sattr)) { X509_free(cert); throw std::exception(); }; X509_free(cert); }; STACK_OF(X509)* peerchain = tstream->GetPeerChain(); if(peerchain != NULL) { for(int idx = 0;;++idx) { if(idx >= sk_X509_num(peerchain)) break; X509* cert = sk_X509_value(peerchain,idx); if(cert) { if(!get_proxy_policy(cert,sattr)) throw std::exception(); }; }; }; if(!sattr_) msg->Auth()->set("DELEGATION POLICY",sattr); sattr = NULL; return true; } catch(std::exception&) { }; if(!sattr_) delete sattr; return false; } Arc::Plugin* DelegationCollector::get_sechandler(Arc::PluginArgument* arg) { ArcSec::SecHandlerPluginArgument* shcarg = arg?dynamic_cast(arg):NULL; if(!shcarg) return NULL; return new DelegationCollector((Arc::Config*)(*shcarg),shcarg); } } //extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { // { "delegation.collector", "HED:SHC", NULL, 0, &ArcSec::DelegationCollector::get_sechandler}, // { NULL, NULL, NULL, 0, NULL } //}; nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/PayloadTLSMCC.cpp0000644000000000000000000000012413153455031023073 xustar000000000000000027 mtime=1504598553.682268 27 atime=1513200575.157709 30 ctime=1513200660.653755481 nordugrid-arc-5.4.2/src/hed/mcc/tls/PayloadTLSMCC.cpp0000644000175000002070000004332013153455031023142 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "GlobusSigningPolicy.h" #include "PayloadTLSMCC.h" #include #include #include #include #include namespace ArcMCCTLS { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) #define X509_getm_notAfter X509_get_notAfter #define X509_getm_notBefore X509_get_notBefore #define X509_set1_notAfter X509_set_notAfter #define X509_set1_notBefore X509_set_notBefore #endif #if (OPENSSL_VERSION_NUMBER < 0x10002000L) static X509_VERIFY_PARAM *SSL_CTX_get0_param(SSL_CTX *ctx) { return ctx->param; } #endif static const char * ex_data_id = "ARC_MCC_Payload_TLS"; int PayloadTLSMCC::ex_data_index_ = -1; Time asn1_to_utctime(const ASN1_UTCTIME *s) { std::string t_str; if(!s) return Time(); if(s->type == V_ASN1_UTCTIME) { t_str.append("20"); t_str.append((char*)(s->data)); } else {//V_ASN1_GENERALIZEDTIME t_str.append((char*)(s->data)); } return Time(t_str); } // This callback implements additional verification // algorithms not present in OpenSSL static int verify_callback(int ok,X509_STORE_CTX *sctx) { PayloadTLSMCC* it = PayloadTLSMCC::RetrieveInstance(sctx); //std::cerr<<"+++ verify_callback: ok = "<SetFailure((std::string)X509_verify_cert_error_string(err)); } else { Logger::getRootLogger().msg(ERROR,"%s",X509_verify_cert_error_string(err)); } }; break; }; }; if(ok == 1) { //std::cerr<<"+++ additional verification"<Config().GlobusPolicy()<<" - "<Config().CADir()<Config().GlobusPolicy()) && (!(it->Config().CADir().empty()))) { //std::cerr<<"+++ additional verification: check signing policy"<Config().CADir())) { //std::cerr<<"+++ additional verification: policy is open"<SetFailure(std::string("Certificate ")+subject_name+" failed Globus signing policy"); //std::cerr<<"+++ additional verification: failed: "< #include #include #include namespace ArcMCCTLS { class GlobusSigningPolicy { public: GlobusSigningPolicy(): stream_(NULL) { }; ~GlobusSigningPolicy() { close(); }; bool open(const X509_NAME* issuer_subject,const std::string& ca_path); void close() { delete stream_; stream_ = NULL; }; bool match(const X509_NAME* issuer_subject,const X509_NAME* subject); private: GlobusSigningPolicy(GlobusSigningPolicy const &); GlobusSigningPolicy& operator=(GlobusSigningPolicy const &); std::istream* stream_; }; } // namespace ArcMCCTLS nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/ConfigTLSMCC.h0000644000000000000000000000012412771225446022366 xustar000000000000000027 mtime=1474636582.341115 27 atime=1513200575.149709 30 ctime=1513200660.658755542 nordugrid-arc-5.4.2/src/hed/mcc/tls/ConfigTLSMCC.h0000644000175000002070000000545512771225446022444 0ustar00mockbuildmock00000000000000#ifndef __ARC_CONFIGTLSMCC_H__ #define __ARC_CONFIGTLSMCC_H__ #include #include #include #include namespace ArcMCCTLS { using namespace Arc; class ConfigTLSMCC { private: std::string ca_dir_; std::string ca_file_; std::string voms_dir_; std::string proxy_file_; std::string cert_file_; std::string key_file_; std::string credential_; bool client_authn_; bool globus_policy_; bool globus_gsi_; bool globusio_gsi_; enum { tls_handshake, // default ssl3_handshake, tls10_handshake, tls11_handshake, tls12_handshake, dtls_handshake, dtls10_handshake, dtls12_handshake, } handshake_; enum { relaxed_voms, standard_voms, strict_voms, noerrors_voms } voms_processing_; std::vector vomscert_trust_dn_; std::string cipher_list_; std::string failure_; ConfigTLSMCC(void); public: ConfigTLSMCC(XMLNode cfg,bool client = false); const std::string& CADir(void) const { return ca_dir_; }; const std::string& CAFile(void) const { return ca_file_; }; const std::string& VOMSDir(void) const { return voms_dir_; }; const std::string& ProxyFile(void) const { return proxy_file_; }; const std::string& CertFile(void) const { return cert_file_; }; const std::string& KeyFile(void) const { return key_file_; }; bool GlobusPolicy(void) const { return globus_policy_; }; bool GlobusGSI(void) const { return globus_gsi_; }; bool GlobusIOGSI(void) const { return globusio_gsi_; }; const std::vector& VOMSCertTrustDN(void) { return vomscert_trust_dn_; }; bool Set(SSL_CTX* sslctx); bool IfClientAuthn(void) const { return client_authn_; }; bool IfTLSHandshake(void) const { return handshake_ == tls_handshake; }; bool IfSSLv3Handshake(void) const { return handshake_ == ssl3_handshake; }; bool IfTLSv1Handshake(void) const { return handshake_ == tls10_handshake; }; bool IfTLSv11Handshake(void) const { return handshake_ == tls11_handshake; }; bool IfTLSv12Handshake(void) const { return handshake_ == tls12_handshake; }; bool IfDTLSHandshake(void) const { return handshake_ == dtls_handshake; }; bool IfDTLSv1Handshake(void) const { return handshake_ == dtls10_handshake; }; bool IfDTLSv12Handshake(void) const { return handshake_ == dtls12_handshake; }; bool IfCheckVOMSCritical(void) const { return (voms_processing_ != relaxed_voms); }; bool IfFailOnVOMSParsing(void) const { return (voms_processing_ == noerrors_voms) || (voms_processing_ == strict_voms); }; bool IfFailOnVOMSInvalid(void) const { return (voms_processing_ == noerrors_voms); }; const std::string& Failure(void) { return failure_; }; static std::string HandleError(int code = SSL_ERROR_NONE); static void ClearError(void); }; } // namespace Arc #endif /* __ARC_CONFIGTLSMCC_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/ConfigTLSMCC.cpp0000644000000000000000000000012412574532370022717 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200575.161709 30 ctime=1513200660.652755469 nordugrid-arc-5.4.2/src/hed/mcc/tls/ConfigTLSMCC.cpp0000644000175000002070000002146312574532370022772 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "PayloadTLSStream.h" #include "ConfigTLSMCC.h" namespace ArcMCCTLS { using namespace Arc; static void config_VOMS_add(XMLNode cfg,std::vector& vomscert_trust_dn) { XMLNode nd = cfg["VOMSCertTrustDNChain"]; for(;(bool)nd;++nd) { XMLNode cnd = nd["VOMSCertTrustDN"]; if((bool)cnd) { for(;(bool)cnd;++cnd) { vomscert_trust_dn.push_back((std::string)cnd); } vomscert_trust_dn.push_back("----NEXT CHAIN----"); } else { XMLNode rnd = nd["VOMSCertTrustRegex"]; if(rnd) { std::string rgx = (std::string)rnd; if(rgx[0] != '^') rgx.insert(0,"^"); if(rgx[rgx.length()-1] != '$') rgx+="$"; vomscert_trust_dn.push_back(rgx); vomscert_trust_dn.push_back("----NEXT CHAIN----"); } } } } // This class is collection of configuration information ConfigTLSMCC::ConfigTLSMCC(XMLNode cfg,bool client) { client_authn_ = true; cert_file_ = (std::string)(cfg["CertificatePath"]); key_file_ = (std::string)(cfg["KeyPath"]); ca_file_ = (std::string)(cfg["CACertificatePath"]); ca_dir_ = (std::string)(cfg["CACertificatesDir"]); voms_dir_ = (std::string)(cfg["VOMSDir"]); globus_policy_ = (((std::string)(cfg["CACertificatesDir"].Attribute("PolicyGlobus"))) == "true"); globus_gsi_ = (((std::string)(cfg["GSI"])) == "globus"); globusio_gsi_ = (((std::string)(cfg["GSI"])) == "globusio"); handshake_ = (cfg["Handshake"] == "SSLv3")?ssl3_handshake:tls_handshake; proxy_file_ = (std::string)(cfg["ProxyPath"]); credential_ = (std::string)(cfg["Credential"]); if(client) { // Client is using safest setup by default cipher_list_ = "TLSv1:SSLv3:!eNULL:!aNULL"; } else { // Server allows client to choose. But requires authentication. cipher_list_ = "TLSv1:SSLv3:eNULL:!aNULL"; } if(cfg["Encryption"] == "required") { } else if(cfg["Encryption"] == "preferred") { cipher_list_ = "TLSv1:SSLv3:eNULL:!aNULL"; } else if(cfg["Encryption"] == "optional") { cipher_list_ = "eNULL:TLSv1:SSLv3:!aNULL"; } else if(cfg["Encryption"] == "off") { cipher_list_ = "eNULL:!aNULL"; } std::vector gridSecDir (2); gridSecDir[0] = G_DIR_SEPARATOR_S + std::string("etc"); gridSecDir[1] = "grid-security"; std::string gridSecurityDir = Glib::build_path(G_DIR_SEPARATOR_S, gridSecDir); if(!client) { if(cert_file_.empty()) cert_file_= Glib::build_filename(gridSecurityDir, "hostcert.pem"); if(key_file_.empty()) key_file_= Glib::build_filename(gridSecurityDir, "hostkey.pem"); // Use VOMS trust DN of server certificates specified in configuration config_VOMS_add(cfg,vomscert_trust_dn_); // Look for those configured in separate files // TODO: should those file be reread on every connection XMLNode locnd = cfg["VOMSCertTrustDNChainsLocation"]; for(;(bool)locnd;++locnd) { std::string filename = (std::string)locnd; std::ifstream file(filename.c_str()); if (!file) { failure_ = "Can not read file "+filename+" with list of trusted VOMS DNs"; continue; }; XMLNode node; file >> node; config_VOMS_add(node,vomscert_trust_dn_); }; std::string vproc = cfg["VOMSProcessing"]; if(vproc == "relaxed") { voms_processing_ = relaxed_voms; } else if(vproc == "standard") { voms_processing_ = standard_voms; } else if(vproc == "strict") { voms_processing_ = strict_voms; } else if(vproc == "noerrors") { voms_processing_ = noerrors_voms; } else { voms_processing_ = standard_voms; } //If ClientAuthn is explicitly set to be "false" in configuration, //then client/authentication is not required, which means client //side does not need to provide certificate and key in its configuration. //The default value of ClientAuthn is "true" if (((std::string)((cfg)["ClientAuthn"])) == "false") client_authn_ = false; } else { //If both CertificatePath and ProxyPath have not beed configured, //client side can not provide certificate for server side. Then server //side should not require client authentication if(cert_file_.empty() && proxy_file_.empty()) client_authn_ = false; }; if(ca_dir_.empty() && ca_file_.empty()) ca_dir_= gridSecurityDir + G_DIR_SEPARATOR_S + "certificates"; if(voms_dir_.empty()) voms_dir_= gridSecurityDir + G_DIR_SEPARATOR_S + "vomsdir"; if(!proxy_file_.empty()) { key_file_=proxy_file_; cert_file_=proxy_file_; }; } bool ConfigTLSMCC::Set(SSL_CTX* sslctx) { if((!ca_file_.empty()) || (!ca_dir_.empty())) { if(!SSL_CTX_load_verify_locations(sslctx, ca_file_.empty()?NULL:ca_file_.c_str(), ca_dir_.empty()?NULL:ca_dir_.c_str())) { failure_ = "Can not assign CA location - "+(ca_dir_.empty()?ca_file_:ca_dir_)+"\n"; failure_ += HandleError(); return false; }; }; if(!credential_.empty()) { // First try to use in-memory credential Credential cred(credential_, credential_, ca_dir_, ca_file_, Credential::NoPassword(), false); if (!cred.IsValid()) { failure_ = "Failed to read in-memory credentials"; return false; } EVP_PKEY* key = cred.GetPrivKey(); if (SSL_CTX_use_PrivateKey(sslctx, key) != 1) { failure_ = "Can not load key from in-memory credentials\n"; failure_ += HandleError(); EVP_PKEY_free(key); return false; } EVP_PKEY_free(key); X509* cert = cred.GetCert(); if (SSL_CTX_use_certificate(sslctx, cert) != 1) { failure_ = "Can not load certificate from in-memory credentials\n"; failure_ += HandleError(); X509_free(cert); return false; } X509_free(cert); // Add certificate chain STACK_OF(X509)* chain = cred.GetCertChain(); int res = 1; for (int id = 0; id < sk_X509_num(chain) && res == 1; ++id) { X509* cert = sk_X509_value(chain,id); res = SSL_CTX_add_extra_chain_cert(sslctx, cert); } if (res != 1) { failure_ = "Can not construct certificate chain from in-memory credentials\n"; failure_ += HandleError(); return false; } } else { if(!cert_file_.empty()) { // Try to load proxy then PEM and then ASN1 certificate if((SSL_CTX_use_certificate_chain_file(sslctx,cert_file_.c_str()) != 1) && (SSL_CTX_use_certificate_file(sslctx,cert_file_.c_str(),SSL_FILETYPE_PEM) != 1) && (SSL_CTX_use_certificate_file(sslctx,cert_file_.c_str(),SSL_FILETYPE_ASN1) != 1)) { failure_ = "Can not load certificate file - "+cert_file_+"\n"; failure_ += HandleError(); return false; }; }; if(!key_file_.empty()) { if((SSL_CTX_use_PrivateKey_file(sslctx,key_file_.c_str(),SSL_FILETYPE_PEM) != 1) && (SSL_CTX_use_PrivateKey_file(sslctx,key_file_.c_str(),SSL_FILETYPE_ASN1) != 1)) { failure_ = "Can not load key file - "+key_file_+"\n"; failure_ += HandleError(); return false; }; }; if((!key_file_.empty()) && (!cert_file_.empty())) { if(!(SSL_CTX_check_private_key(sslctx))) { failure_ = "Private key "+key_file_+" does not match certificate "+cert_file_+"\n"; failure_ += HandleError(); return false; }; }; }; if(!cipher_list_.empty()) { if(!SSL_CTX_set_cipher_list(sslctx,cipher_list_.c_str())) { failure_ = "No ciphers found to satisfy requested encryption level. " "Check if OpenSSL supports ciphers '"+cipher_list_+"'\n"; failure_ += HandleError(); return false; }; }; return true; } std::string ConfigTLSMCC::HandleError(int code) { std::string errstr; unsigned long e = (code==SSL_ERROR_NONE)?ERR_get_error():code; while(e != SSL_ERROR_NONE) { if(e == SSL_ERROR_SYSCALL) { // Hiding system errors // int err = errno; // logger.msg(DEBUG, "SSL error: system call failed: %d, %s",err,StrError(err)); } else { const char* lib = ERR_lib_error_string(e); const char* func = ERR_func_error_string(e); const char* reason = ERR_reason_error_string(e); const char* alert = SSL_alert_desc_string_long(e); // Ignore unknown errors if (reason || func || lib || std::string(alert) != "unknown") { if(!errstr.empty()) errstr += "\n"; errstr += "SSL error"; if (reason) errstr += ", \"" + std::string(reason) + "\""; if (func) errstr += ", in \"" + std::string(func) + "\" function"; if (lib) errstr += ", at \"" + std::string(lib) + "\" library"; if (alert) errstr += ", with \"" + std::string(alert) + "\" alert"; //logger.msg(DEBUG, errstr); } }; e = ERR_get_error(); } return errstr; } void ConfigTLSMCC::ClearError(void) { int e = ERR_get_error(); while(e != 0) { e = ERR_get_error(); } } } // namespace Arc nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/BIOGSIMCC.h0000644000000000000000000000012412206663345021547 xustar000000000000000027 mtime=1377527525.531159 27 atime=1513200575.161709 30 ctime=1513200660.662755592 nordugrid-arc-5.4.2/src/hed/mcc/tls/BIOGSIMCC.h0000644000175000002070000000060612206663345021616 0ustar00mockbuildmock00000000000000#ifndef __ARC_BIOGSIMCC_H__ #define __ARC_BIOGSIMCC_H__ #include namespace Arc { class MCCInterface; class PayloadStreamInterface; } namespace ArcMCCTLS { using namespace Arc; BIO* BIO_new_GSIMCC(MCCInterface* mcc); BIO* BIO_new_GSIMCC(PayloadStreamInterface* stream); bool BIO_GSIMCC_failure(BIO* bio, MCC_Status& s); } // namespace Arc #endif // __ARC_BIOMCC_H__ nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/MCCTLS.h0000644000000000000000000000012411730411253021223 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.157709 30 ctime=1513200660.658755542 nordugrid-arc-5.4.2/src/hed/mcc/tls/MCCTLS.h0000644000175000002070000000521311730411253021271 0ustar00mockbuildmock00000000000000#ifndef __ARC_MCCTLS_H__ #define __ARC_MCCTLS_H__ #include #include #include namespace ArcMCCTLS { //! A base class for TLS client and service MCCs. /*! This is a base class for TLS client and service MCCs. It provides some common functionality for them. */ class MCC_TLS : public MCC { public: MCC_TLS(Config& cfg,bool client,PluginArgument* parg); protected: //bool tls_random_seed(std::string filename, long n); static unsigned int ssl_initialized_; static Glib::Mutex lock_; static Glib::Mutex* ssl_locks_; static int ssl_locks_num_; static Logger logger; static void ssl_locking_cb(int mode, int n, const char *file, int line); static unsigned long ssl_id_cb(void); //static void* ssl_idptr_cb(void); ConfigTLSMCC config_; }; /** This MCC implements TLS server side functionality. Upon creation this object creats SSL_CTX object and configures SSL_CTX object with some environment information about credential. Because we cannot know the "socket" when the creation of MCC_TLS_Service/MCC_TLS_Client object (not like MCC_TCP_Client, which can creat socket in the constructor method by using information in configuration file), we can only creat "ssl" object which is binded to specified "socket", when MCC_HTTP_Client calls the process() method of MCC_TLS_Client object, or MCC_TCP_Service calls the process() method of MCC_TLS_Service object. The "ssl" object is embeded in a payload called PayloadTLSSocket. The process() method of MCC_TLS_Service is passed payload implementing PayloadStreamInterface and the method returns empty PayloadRaw payload in "outmsg". The ssl object is created and bound to Stream payload when constructing the PayloadTLSSocket in the process() method. During processing of message this MCC generates attribute TLS:PEERDN which contains Distinguished Name of remoote peer. */ class MCC_TLS_Service: public MCC_TLS { private: //SSL_CTX* sslctx_; public: MCC_TLS_Service(Config& cfg,PluginArgument* parg); virtual ~MCC_TLS_Service(void); virtual MCC_Status process(Message&,Message&); }; class PayloadTLSMCC; /** This class is MCC implementing TLS client. */ class MCC_TLS_Client: public MCC_TLS { private: //SSL_CTX* sslctx_; PayloadTLSMCC* stream_; public: MCC_TLS_Client(Config& cfg,PluginArgument* parg); virtual ~MCC_TLS_Client(void); virtual MCC_Status process(Message&,Message&); virtual void Next(MCCInterface* next,const std::string& label = ""); }; } // namespace Arc #endif /* __ARC_MCCTLS_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/BIOMCC.cpp0000644000000000000000000000012413065017103021524 xustar000000000000000027 mtime=1490296387.698578 27 atime=1513200575.157709 30 ctime=1513200660.656755518 nordugrid-arc-5.4.2/src/hed/mcc/tls/BIOMCC.cpp0000644000175000002070000001423513065017103021576 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "BIOMCC.h" namespace ArcMCCTLS { using namespace Arc; #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static BIO_METHOD *BIO_meth_new(int type, const char *name) { BIO_METHOD *biom = (BIO_METHOD*)std::malloc(sizeof(BIO_METHOD)); if (biom) std::memset(biom,0,sizeof(biom)); return biom; } static void BIO_meth_free(BIO_METHOD *biom) { std::free(biom); } static void BIO_set_data(BIO *a, void *ptr) { a->ptr = ptr; } static void *BIO_get_data(BIO *a) { return a->ptr; } static void BIO_set_init(BIO *a, int init) { a->init = init; } #endif class BIOMCC { private: PayloadStreamInterface* stream_; MCCInterface* next_; MCC_Status result_; BIO_METHOD* biom_; BIO* bio_; // not owned by this object public: BIOMCC(MCCInterface* next):result_(STATUS_OK) { next_=NULL; stream_=NULL; bio_ = NULL; if(MakeMethod()) { bio_ = BIO_new(biom_); if(bio_) { next_=next; BIO_set_data(bio_,this); } } }; BIOMCC(PayloadStreamInterface* stream):result_(STATUS_OK) { next_=NULL; stream_=NULL; bio_ = NULL; if(MakeMethod()) { bio_ = BIO_new(biom_); if(bio_) { stream_=stream; BIO_set_data(bio_,this); } } }; ~BIOMCC(void) { if(stream_ && next_) delete stream_; if(biom_) BIO_meth_free(biom_); }; BIO* GetBIO() const { return bio_; }; PayloadStreamInterface* Stream() const { return stream_; }; void Stream(PayloadStreamInterface* stream) { stream_=stream; /*free ??*/ }; MCCInterface* Next(void) const { return next_; }; void MCC(MCCInterface* next) { next_=next; }; const MCC_Status& Result(void) { return result_; }; private: static int mcc_write(BIO *h, const char *buf, int num); static int mcc_read(BIO *h, char *buf, int size); static int mcc_puts(BIO *h, const char *str); static long mcc_ctrl(BIO *h, int cmd, long arg1, void *arg2); static int mcc_new(BIO *h); static int mcc_free(BIO *data); bool MakeMethod(void) { biom_ = BIO_meth_new(BIO_TYPE_FD,"Message Chain Component"); if(biom_) { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) biom_->bwrite = &BIOMCC::mcc_write; biom_->bread = &BIOMCC::mcc_read; biom_->bputs = &BIOMCC::mcc_puts; biom_->ctrl = &BIOMCC::mcc_ctrl; biom_->create = &BIOMCC::mcc_new; biom_->destroy = &BIOMCC::mcc_free; #else (void)BIO_meth_set_write(biom_,&BIOMCC::mcc_write); (void)BIO_meth_set_read(biom_,&BIOMCC::mcc_read); (void)BIO_meth_set_puts(biom_,&BIOMCC::mcc_puts); (void)BIO_meth_set_ctrl(biom_,&BIOMCC::mcc_ctrl); (void)BIO_meth_set_create(biom_,&BIOMCC::mcc_new); (void)BIO_meth_set_destroy(biom_,&BIOMCC::mcc_free); #endif } return (biom_ != NULL); }; }; BIO *BIO_new_MCC(MCCInterface* mcc) { BIOMCC* biomcc = new BIOMCC(mcc); BIO* bio = biomcc->GetBIO(); if(!bio) delete biomcc; return bio; } BIO* BIO_new_MCC(PayloadStreamInterface* stream) { BIOMCC* biomcc = new BIOMCC(stream); BIO* bio = biomcc->GetBIO(); if(!bio) delete biomcc; return bio; } int BIOMCC::mcc_new(BIO *bi) { BIO_set_data(bi,NULL); BIO_set_init(bi,1); return(1); } int BIOMCC::mcc_free(BIO *b) { if(b == NULL) return(0); BIOMCC* biomcc = (BIOMCC*)(BIO_get_data(b)); BIO_set_data(b,NULL); if(biomcc) delete biomcc; return(1); } int BIOMCC::mcc_read(BIO *b, char *out,int outl) { int ret=0; if (out == NULL) return(ret); if(b == NULL) return(ret); BIOMCC* biomcc = (BIOMCC*)(BIO_get_data(b)); if(biomcc == NULL) return(ret); PayloadStreamInterface* stream = biomcc->Stream(); if(stream == NULL) return ret; //clear_sys_error(); bool r = stream->Get(out,outl); BIO_clear_retry_flags(b); if(r) { ret=outl; } else { ret=-1; biomcc->result_ = stream->Failure(); }; return ret; } int BIOMCC::mcc_write(BIO *b, const char *in, int inl) { int ret = 0; //clear_sys_error(); if(in == NULL) return(ret); if(b == NULL) return(ret); if(BIO_get_data(b) == NULL) return(ret); BIOMCC* biomcc = (BIOMCC*)(BIO_get_data(b)); if(biomcc == NULL) return(ret); PayloadStreamInterface* stream = biomcc->Stream(); if(stream != NULL) { // If available just use stream directly // TODO: check if stream has changed ??? bool r = stream->Put(in,inl); BIO_clear_retry_flags(b); if(r) { ret=inl; } else { ret=-1; biomcc->result_ = stream->Failure(); }; return ret; }; MCCInterface* next = biomcc->Next(); if(next == NULL) return(ret); PayloadRaw nextpayload; nextpayload.Insert(in,0,inl); // Not efficient !!!! Message nextinmsg; nextinmsg.Payload(&nextpayload); Message nextoutmsg; MCC_Status mccret = next->process(nextinmsg,nextoutmsg); BIO_clear_retry_flags(b); if(mccret) { if(nextoutmsg.Payload()) { PayloadStreamInterface* retpayload = NULL; try { retpayload=dynamic_cast(nextoutmsg.Payload()); } catch(std::exception& e) { }; if(retpayload) { biomcc->Stream(retpayload); } else { delete nextoutmsg.Payload(); }; }; ret=inl; } else { biomcc->result_ = mccret; if(nextoutmsg.Payload()) delete nextoutmsg.Payload(); ret=-1; }; return(ret); } long BIOMCC::mcc_ctrl(BIO*, int cmd, long, void*) { long ret=0; switch (cmd) { case BIO_CTRL_RESET: case BIO_CTRL_SET_CLOSE: case BIO_CTRL_SET: case BIO_CTRL_EOF: case BIO_CTRL_FLUSH: case BIO_CTRL_DUP: ret=1; break; }; return(ret); } int BIOMCC::mcc_puts(BIO *bp, const char *str) { int n,ret; n=strlen(str); ret=mcc_write(bp,str,n); return(ret); } bool BIO_MCC_failure(BIO* bio, MCC_Status& s) { if(!bio) return false; BIOMCC* b = (BIOMCC*)(BIO_get_data(bio)); if(!b || b->Result()) return false; s = b->Result(); return true; } } // namespace ArcMCCTLS nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/DelegationCollector.h0000644000000000000000000000012412110401544024153 xustar000000000000000027 mtime=1361183588.521754 27 atime=1513200575.154709 30 ctime=1513200660.661755579 nordugrid-arc-5.4.2/src/hed/mcc/tls/DelegationCollector.h0000644000175000002070000000114512110401544024221 0ustar00mockbuildmock00000000000000#ifndef __ARC_SEC_DELEGATIONCOLLECTOR_H__ #define __ARC_SEC_DELEGATIONCOLLECTOR_H__ #include #include #include #include namespace ArcMCCTLSSec { using namespace ArcSec; class DelegationCollector : public SecHandler { public: DelegationCollector(Arc::Config *cfg,Arc::PluginArgument* parg); virtual ~DelegationCollector(void); virtual SecHandlerStatus Handle(Arc::Message* msg) const; static Arc::Plugin* get_sechandler(Arc::PluginArgument* arg); }; } // namespace ArcSec #endif /* __ARC_SEC_DELEGATIONCOLLECTOR_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/GlobusSigningPolicy.cpp0000644000000000000000000000012413153455222024530 xustar000000000000000027 mtime=1504598674.017612 27 atime=1513200575.154709 30 ctime=1513200660.654755494 nordugrid-arc-5.4.2/src/hed/mcc/tls/GlobusSigningPolicy.cpp0000644000175000002070000001643513153455222024606 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "GlobusSigningPolicy.h" namespace ArcMCCTLS { using namespace Arc; static Logger& logger = Logger::getRootLogger(); static const char access_id[] = "access_id_"; static const char positive_rights[] = "pos_rights"; static const char negative_rights[] = "neg_rights"; static const char globus_id[] = "globus"; static const char sign_id[] = "CA:sign"; static const char conditions_id[] = "cond_"; static const char policy_suffix[] = ".signing_policy"; static void get_line(std::istream& in,std::string& s) { for(;;) { s.resize(0); if(in.fail() || in.eof()) return; getline(in,s); std::string::size_type p; p=s.find_first_not_of(" \t"); if(p != std::string::npos) s=s.substr(p); p=s.find_last_not_of(" \t\r\n"); // also trim CRLF if(p == std::string::npos) s.resize(0); else s.resize(p+1); if((!s.empty()) && (s[0] != '#')) break; }; return; } static void get_word(std::string& s,std::string& word) { std::string::size_type w_s; std::string::size_type w_e; word.resize(0); w_s=s.find_first_not_of(" \t"); if(w_s == std::string::npos) { s.resize(0); return; }; if(s[w_s] == '\'') { w_e=s.find('\'',++w_s); } else if(s[w_s] == '"') { w_e=s.find('"',++w_s); } else { w_e=s.find_first_of(" \t",w_s); }; if(w_e == std::string::npos) w_e=s.length(); word=s.substr(w_s,w_e-w_s); if((s[w_e] == '\'') || (s[w_e] == '"')) ++w_e; w_s=s.find_first_not_of(" \t",w_e); if(w_s == std::string::npos) w_s=w_e; s=s.substr(w_s); return; } static bool get_id(std::string& s,std::string& ca_subject) { std::string id; ca_subject.resize(0); get_word(s,id); if(id.empty()) return true; if(id.compare(0,strlen(access_id),access_id) != 0) { logger.msg(WARNING,"Was expecting %s at the beginning of \"%s\"",access_id,id); return false; }; id=id.substr(strlen(access_id)); if(id != "CA") { logger.msg(WARNING,"We only support CAs in Globus signing policy - %s is not supported",id); return false; }; get_word(s,id); if(id != "X509") { logger.msg(WARNING,"We only support X509 CAs in Globus signing policy - %s is not supported",id); return false; }; get_word(s,ca_subject); if(ca_subject.empty()) { logger.msg(WARNING,"Missing CA subject in Globus signing policy"); return false; }; return true; } static bool get_rights(std::string& s) { std::string id; get_word(s,id); if(id == negative_rights) { logger.msg(WARNING,"Negative rights are not supported in Globus signing policy"); return false; }; if(id != positive_rights) { logger.msg(WARNING,"Unknown rights in Globus signing policy - %s",id); return false; }; get_word(s,id); if(id != globus_id) { logger.msg(WARNING,"Only globus rights are supported in Globus signing policy - %s is not supported",id); return false; }; get_word(s,id); if(id != sign_id) { logger.msg(WARNING,"Only signing rights are supported in Globus signing policy - %s is not supported",id); return false; }; return true; } static bool get_conditions(std::string s,std::list& patterns) { std::string id; patterns.resize(0); get_word(s,id); if(id.empty()) return true; if(id.compare(0,strlen(conditions_id),conditions_id) != 0) { logger.msg(WARNING,"Was expecting %s at the beginning of \"%s\"",conditions_id,id); return false; }; id=id.substr(strlen(conditions_id)); if(id != "subjects") { logger.msg(WARNING,"We only support subjects conditions in Globus signing policy - %s is not supported",id); return false; }; get_word(s,id); if(id != globus_id) { logger.msg(WARNING,"We only support globus conditions in Globus signing policy - %s is not supported",id); return false; }; std::string subjects; get_word(s,subjects); if(subjects.empty()) { logger.msg(WARNING,"Missing condition subjects in Globus signing policy"); return false; }; std::string subject; for(;;) { get_word(subjects,subject); if(subject.empty()) break; patterns.push_back(subject); }; return true; } static bool match_all(const std::string& issuer_subject,const std::string& subject,const std::string& policy_ca_subject,std::list policy_patterns) { if(issuer_subject == policy_ca_subject) { std::list::iterator pattern = policy_patterns.begin(); for(;pattern!=policy_patterns.end();++pattern) { std::string::size_type p = 0; for(;;) { p=pattern->find('*',p); if(p == std::string::npos) break; pattern->insert(p,"."); p+=2; }; (*pattern)="^"+(*pattern)+"$"; RegularExpression re(*pattern); bool r = re.match(subject); if(r) return true; }; }; return false; } static void X509_NAME_to_string(std::string& str,const X509_NAME* name) { str.resize(0); if(name == NULL) return; char* s = X509_NAME_oneline((X509_NAME*)name,NULL,0); if(s) { str=s; OPENSSL_free(s); }; return; } bool GlobusSigningPolicy::match(const X509_NAME* issuer_subject,const X509_NAME* subject) { if(!stream_) return false; std::istream& in(*stream_); std::string issuer_subject_str; std::string subject_str; std::string s; std::string policy_ca_subject; std::list policy_patterns; bool rights_defined = false; bool failure = false; X509_NAME_to_string(issuer_subject_str,issuer_subject); X509_NAME_to_string(subject_str,subject); for(;;) { get_line(in,s); if(s.empty()) break; if(s.compare(0,strlen(access_id),access_id) == 0) { if((!policy_ca_subject.empty()) && (rights_defined) && (!failure)) { bool r = match_all(issuer_subject_str,subject_str,policy_ca_subject,policy_patterns); if(r) return true; }; policy_ca_subject.resize(0); policy_patterns.resize(0); failure=false; rights_defined=false; if(!get_id(s,policy_ca_subject)) failure=true; } else if((s.compare(0,strlen(positive_rights),positive_rights) == 0) || (s.compare(0,strlen(positive_rights),negative_rights) == 0)) { if(!get_rights(s)) { failure=true; } else { rights_defined=true; }; } else if(s.compare(0,strlen(conditions_id),conditions_id) == 0) { if(!get_conditions(s,policy_patterns)) failure=true; } else { logger.msg(WARNING,"Unknown element in Globus signing policy"); failure=true; }; }; if((!policy_ca_subject.empty()) && (rights_defined) && (!failure)) { bool r = match_all(issuer_subject_str,subject_str,policy_ca_subject,policy_patterns); if(r) return true; }; return false; } bool GlobusSigningPolicy::open(const X509_NAME* issuer_subject,const std::string& ca_path) { close(); //std::string issuer_subject_str; //X509_NAME_to_string(issuer_subject_str,issuer_subject); unsigned long hash = X509_NAME_hash((X509_NAME*)issuer_subject); char hash_str[32]; snprintf(hash_str,sizeof(hash_str)-1,"%08lx",hash); hash_str[sizeof(hash_str)-1]=0; std::string fname = ca_path+"/"+hash_str+policy_suffix; std::ifstream* f = new std::ifstream(fname.c_str()); if(!(*f)) { delete f; return false; }; stream_ = f; return true; } } nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/DelegationSecAttr.h0000644000000000000000000000012311715026661023606 xustar000000000000000026 mtime=1328819633.84179 27 atime=1513200575.161709 30 ctime=1513200660.660755567 nordugrid-arc-5.4.2/src/hed/mcc/tls/DelegationSecAttr.h0000644000175000002070000000137211715026661023657 0ustar00mockbuildmock00000000000000#include namespace ArcMCCTLSSec { class DelegationSecAttr: public Arc::SecAttr { public: DelegationSecAttr(void); DelegationSecAttr(const char* policy_str,int policy_size = -1); virtual ~DelegationSecAttr(void); virtual operator bool(void) const; virtual bool Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const; protected: Arc::XMLNode policy_doc_; virtual bool equal(const Arc::SecAttr &b) const; }; class DelegationMultiSecAttr: public Arc::MultiSecAttr { public: DelegationMultiSecAttr(void); virtual ~DelegationMultiSecAttr(void); //virtual operator bool() const; virtual bool Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const; bool Add(const char* policy_str,int policy_size = -1); }; } nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/schema0000644000000000000000000000013213214316024021246 xustar000000000000000030 mtime=1513200660.677755775 30 atime=1513200668.723854182 30 ctime=1513200660.677755775 nordugrid-arc-5.4.2/src/hed/mcc/tls/schema/0000755000175000002070000000000013214316024021371 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/tls/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321023363 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200599.673009663 30 ctime=1513200660.676755763 nordugrid-arc-5.4.2/src/hed/mcc/tls/schema/Makefile.am0000644000175000002070000000013411255700321023423 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = tls.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/mcc/tls/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727023401 xustar000000000000000030 mtime=1513200599.704010042 30 atime=1513200648.654608727 30 ctime=1513200660.677755775 nordugrid-arc-5.4.2/src/hed/mcc/tls/schema/Makefile.in0000644000175000002070000004350013214315727023451 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc/tls/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = tls.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/tls/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/tls/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/tls/schema/PaxHeaders.7502/tls.xsd0000644000000000000000000000012412574532370022662 xustar000000000000000027 mtime=1441969400.372727 27 atime=1513200575.164709 30 ctime=1513200660.677755775 nordugrid-arc-5.4.2/src/hed/mcc/tls/schema/tls.xsd0000644000175000002070000001675512574532370022745 0ustar00mockbuildmock00000000000000 Location of private key. Default is /etc/grid-security/hostkey.pem for service and $HOME/.globus/userkey.pem for client. Location of public certificate. Default is /etc/grid-security/hostcert.pem for service and $HOME/.globus/usercert.pem for client. Location of proxy credentials - includes certificates, key and chain of involved certificates. Overwrites elements KeyPath and CertificatePath. Default is /tmp/hash{userid}.0 for client, and none for service. String representation of credentials. It may simply be the contents of the proxy file or a string representing any other kind of credential. Location of certificate of CA. Default is none. Directory containing certificates of accepted CAs. Default is /etc/grid-security/certificates Directory containing per-VO subdirectories each containing per-host *.lsc VOMS configuration files. Default is /etc/grid-security/vomsdir Whether checking client certificate. Only needed for service side. Default is "true" The DN list of the trusted voms server credential; in the AC part of voms proxy certificate, voms proxy certificate comes with the server certificate which is used to sign the AC. So when verifying the AC on the AC-consuming side (in ARC1, it is the MCCTLS which will consumes the AC), the server certificate will be checked against a trusted DN list. Only if the DN and issuer's DN of server certificate exactly matches the DN list in the configuration under TLS component, the AC can be trusted DN list in an external file, which is in the same format as VOMSCertTrustDNChain How to behave if failure happens during VOMS processing. relaxed - use everything that passed validation. standard - same as relaxed but fail if parsing errors took place and VOMS extension is marked as critical. This is a default. strict - fail if any parsing error was discovered. noerrors - fail if any parsing or validation error happened. Type of handshake applied. Default is TLS. GSI features to use. Currently only supports Globus without delegation. Encryption level to use. Can choose from required - only ciphers with encryption are allowed (default for client), preferred - ciphers with encryption preferred and without encryption allowd (default for server), optional - ciphers with encryption allowed but without encryption preferred, off - only ciphers without encryption are allowed. nordugrid-arc-5.4.2/src/hed/mcc/tls/PaxHeaders.7502/README0000644000000000000000000000012411037472457020762 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200575.164709 30 ctime=1513200660.649755432 nordugrid-arc-5.4.2/src/hed/mcc/tls/README0000644000175000002070000000011011037472457021017 0ustar00mockbuildmock00000000000000MCC that handles TLS based channel security on server and client sides. nordugrid-arc-5.4.2/src/hed/mcc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727021337 xustar000000000000000030 mtime=1513200599.184003682 30 atime=1513200648.557607541 30 ctime=1513200660.507753696 nordugrid-arc-5.4.2/src/hed/mcc/Makefile.in0000644000175000002070000005552613214315727021422 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = tcp http soap tls msgvalidator DIST_SUBDIRS = tcp http soap tls msgvalidator all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/PaxHeaders.7502/http0000644000000000000000000000013013214316024020161 xustar000000000000000029 mtime=1513200660.58575465 30 atime=1513200668.723854182 29 ctime=1513200660.58575465 nordugrid-arc-5.4.2/src/hed/mcc/http/0000755000175000002070000000000013214316024020306 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/http/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022305 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200599.199003866 30 ctime=1513200660.579754576 nordugrid-arc-5.4.2/src/hed/mcc/http/Makefile.am0000644000175000002070000000240612052416515022351 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libmcchttp.la noinst_PROGRAMS = http_test http_test_withtls libmcchttp_la_SOURCES = PayloadHTTP.cpp MCCHTTP.cpp PayloadHTTP.h MCCHTTP.h libmcchttp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmcchttp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libmcchttp_la_LDFLAGS = $(LIBXML2_LIBS) -no-undefined -avoid-version -module http_test_SOURCES = http_test.cpp http_test_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) http_test_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) http_test_withtls_SOURCES = http_test_withtls.cpp http_test_withtls_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) http_test_withtls_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(OPENSSL_LIBS) nordugrid-arc-5.4.2/src/hed/mcc/http/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727022316 xustar000000000000000030 mtime=1513200599.255004551 30 atime=1513200648.613608226 30 ctime=1513200660.580754589 nordugrid-arc-5.4.2/src/hed/mcc/http/Makefile.in0000644000175000002070000011403513214315727022370 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = http_test$(EXEEXT) http_test_withtls$(EXEEXT) subdir = src/hed/mcc/http DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) libmcchttp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libmcchttp_la_OBJECTS = libmcchttp_la-PayloadHTTP.lo \ libmcchttp_la-MCCHTTP.lo libmcchttp_la_OBJECTS = $(am_libmcchttp_la_OBJECTS) libmcchttp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmcchttp_la_CXXFLAGS) \ $(CXXFLAGS) $(libmcchttp_la_LDFLAGS) $(LDFLAGS) -o $@ PROGRAMS = $(noinst_PROGRAMS) am_http_test_OBJECTS = http_test-http_test.$(OBJEXT) http_test_OBJECTS = $(am_http_test_OBJECTS) am__DEPENDENCIES_1 = http_test_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) http_test_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(http_test_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_http_test_withtls_OBJECTS = \ http_test_withtls-http_test_withtls.$(OBJEXT) http_test_withtls_OBJECTS = $(am_http_test_withtls_OBJECTS) http_test_withtls_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) http_test_withtls_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(http_test_withtls_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmcchttp_la_SOURCES) $(http_test_SOURCES) \ $(http_test_withtls_SOURCES) DIST_SOURCES = $(libmcchttp_la_SOURCES) $(http_test_SOURCES) \ $(http_test_withtls_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema pkglib_LTLIBRARIES = libmcchttp.la libmcchttp_la_SOURCES = PayloadHTTP.cpp MCCHTTP.cpp PayloadHTTP.h MCCHTTP.h libmcchttp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmcchttp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la libmcchttp_la_LDFLAGS = $(LIBXML2_LIBS) -no-undefined -avoid-version -module http_test_SOURCES = http_test.cpp http_test_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) http_test_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) http_test_withtls_SOURCES = http_test_withtls.cpp http_test_withtls_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) http_test_withtls_LDADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(OPENSSL_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/http/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/http/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmcchttp.la: $(libmcchttp_la_OBJECTS) $(libmcchttp_la_DEPENDENCIES) $(libmcchttp_la_LINK) -rpath $(pkglibdir) $(libmcchttp_la_OBJECTS) $(libmcchttp_la_LIBADD) $(LIBS) clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list http_test$(EXEEXT): $(http_test_OBJECTS) $(http_test_DEPENDENCIES) @rm -f http_test$(EXEEXT) $(http_test_LINK) $(http_test_OBJECTS) $(http_test_LDADD) $(LIBS) http_test_withtls$(EXEEXT): $(http_test_withtls_OBJECTS) $(http_test_withtls_DEPENDENCIES) @rm -f http_test_withtls$(EXEEXT) $(http_test_withtls_LINK) $(http_test_withtls_OBJECTS) $(http_test_withtls_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/http_test-http_test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/http_test_withtls-http_test_withtls.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcchttp_la-MCCHTTP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcchttp_la-PayloadHTTP.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmcchttp_la-PayloadHTTP.lo: PayloadHTTP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcchttp_la_CXXFLAGS) $(CXXFLAGS) -MT libmcchttp_la-PayloadHTTP.lo -MD -MP -MF $(DEPDIR)/libmcchttp_la-PayloadHTTP.Tpo -c -o libmcchttp_la-PayloadHTTP.lo `test -f 'PayloadHTTP.cpp' || echo '$(srcdir)/'`PayloadHTTP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcchttp_la-PayloadHTTP.Tpo $(DEPDIR)/libmcchttp_la-PayloadHTTP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PayloadHTTP.cpp' object='libmcchttp_la-PayloadHTTP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcchttp_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcchttp_la-PayloadHTTP.lo `test -f 'PayloadHTTP.cpp' || echo '$(srcdir)/'`PayloadHTTP.cpp libmcchttp_la-MCCHTTP.lo: MCCHTTP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcchttp_la_CXXFLAGS) $(CXXFLAGS) -MT libmcchttp_la-MCCHTTP.lo -MD -MP -MF $(DEPDIR)/libmcchttp_la-MCCHTTP.Tpo -c -o libmcchttp_la-MCCHTTP.lo `test -f 'MCCHTTP.cpp' || echo '$(srcdir)/'`MCCHTTP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcchttp_la-MCCHTTP.Tpo $(DEPDIR)/libmcchttp_la-MCCHTTP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MCCHTTP.cpp' object='libmcchttp_la-MCCHTTP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcchttp_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcchttp_la-MCCHTTP.lo `test -f 'MCCHTTP.cpp' || echo '$(srcdir)/'`MCCHTTP.cpp http_test-http_test.o: http_test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(http_test_CXXFLAGS) $(CXXFLAGS) -MT http_test-http_test.o -MD -MP -MF $(DEPDIR)/http_test-http_test.Tpo -c -o http_test-http_test.o `test -f 'http_test.cpp' || echo '$(srcdir)/'`http_test.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/http_test-http_test.Tpo $(DEPDIR)/http_test-http_test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='http_test.cpp' object='http_test-http_test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(http_test_CXXFLAGS) $(CXXFLAGS) -c -o http_test-http_test.o `test -f 'http_test.cpp' || echo '$(srcdir)/'`http_test.cpp http_test-http_test.obj: http_test.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(http_test_CXXFLAGS) $(CXXFLAGS) -MT http_test-http_test.obj -MD -MP -MF $(DEPDIR)/http_test-http_test.Tpo -c -o http_test-http_test.obj `if test -f 'http_test.cpp'; then $(CYGPATH_W) 'http_test.cpp'; else $(CYGPATH_W) '$(srcdir)/http_test.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/http_test-http_test.Tpo $(DEPDIR)/http_test-http_test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='http_test.cpp' object='http_test-http_test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(http_test_CXXFLAGS) $(CXXFLAGS) -c -o http_test-http_test.obj `if test -f 'http_test.cpp'; then $(CYGPATH_W) 'http_test.cpp'; else $(CYGPATH_W) '$(srcdir)/http_test.cpp'; fi` http_test_withtls-http_test_withtls.o: http_test_withtls.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(http_test_withtls_CXXFLAGS) $(CXXFLAGS) -MT http_test_withtls-http_test_withtls.o -MD -MP -MF $(DEPDIR)/http_test_withtls-http_test_withtls.Tpo -c -o http_test_withtls-http_test_withtls.o `test -f 'http_test_withtls.cpp' || echo '$(srcdir)/'`http_test_withtls.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/http_test_withtls-http_test_withtls.Tpo $(DEPDIR)/http_test_withtls-http_test_withtls.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='http_test_withtls.cpp' object='http_test_withtls-http_test_withtls.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(http_test_withtls_CXXFLAGS) $(CXXFLAGS) -c -o http_test_withtls-http_test_withtls.o `test -f 'http_test_withtls.cpp' || echo '$(srcdir)/'`http_test_withtls.cpp http_test_withtls-http_test_withtls.obj: http_test_withtls.cpp @am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(http_test_withtls_CXXFLAGS) $(CXXFLAGS) -MT http_test_withtls-http_test_withtls.obj -MD -MP -MF $(DEPDIR)/http_test_withtls-http_test_withtls.Tpo -c -o http_test_withtls-http_test_withtls.obj `if test -f 'http_test_withtls.cpp'; then $(CYGPATH_W) 'http_test_withtls.cpp'; else $(CYGPATH_W) '$(srcdir)/http_test_withtls.cpp'; fi` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/http_test_withtls-http_test_withtls.Tpo $(DEPDIR)/http_test_withtls-http_test_withtls.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='http_test_withtls.cpp' object='http_test_withtls-http_test_withtls.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(http_test_withtls_CXXFLAGS) $(CXXFLAGS) -c -o http_test_withtls-http_test_withtls.obj `if test -f 'http_test_withtls.cpp'; then $(CYGPATH_W) 'http_test_withtls.cpp'; else $(CYGPATH_W) '$(srcdir)/http_test_withtls.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ clean-pkglibLTLIBRARIES mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-noinstPROGRAMS clean-pkglibLTLIBRARIES ctags \ ctags-recursive distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-pkglibLTLIBRARIES install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/http/PaxHeaders.7502/http_test_withtls.cpp0000644000000000000000000000012411114777117024554 xustar000000000000000027 mtime=1228144207.795496 27 atime=1513200575.140709 30 ctime=1513200660.584754638 nordugrid-arc-5.4.2/src/hed/mcc/http/http_test_withtls.cpp0000644000175000002070000000574111114777117024630 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include void test1(void) { std::cout<<"------ Testing simple file download ------"<process(request,response); std::cout<<"*** RESPONSE ***"<(*response.Payload()); for(int n = 0;n namespace ArcMCCHTTP { using namespace Arc; //! A base class for HTTP client and service MCCs. /*! This is a base class for HTTP client and service MCCs. It provides some common functionality for them, i.e. so far only a logger. */ class MCC_HTTP : public MCC { public: MCC_HTTP(Config *cfg,PluginArgument* parg); protected: static Logger logger; }; /** This class implements MCC to processes HTTP request. On input payload with PayloadStreamInterface is expected. HTTP message is read from stream ans it's body is converted into PayloadRaw and passed to next MCC. Returned payload of PayloadRawInterface type is treated as body part of returning PayloadHTTP. Generated HTTP response is sent though stream passed in input payload. During processing of request/input message following attributes are generated: HTTP:METHOD - HTTP method e.g. GET, PUT, POST, etc. HTTP:ENDPOINT - URL taken from HTTP request ENDPOINT - global attribute equal to HTTP:ENDPOINT HTTP:RANGESTART - start of requested byte range HTTP:RANGEEND - end of requested byte range (inclusive) HTTP:name - all 'name' attributes of HTTP header. Attributes of response message of HTTP:name type are translated into HTTP header with corresponding 'name's. */ class MCC_HTTP_Service: public MCC_HTTP { public: MCC_HTTP_Service(Config *cfg,PluginArgument* parg); virtual ~MCC_HTTP_Service(void); virtual MCC_Status process(Message&,Message&); }; /** This class is a client part of HTTP MCC. It accepts PayloadRawInterface payload and uses it as body to generate HTTP request. Request is passed to next MCC as PayloadRawInterface type of payload. Returned PayloadStreamInterface payload is parsed into HTTP response and it's body is passed back to calling MCC as PayloadRawInerface. Attributes of request/input message of type HTTP:name are translated into HTTP header with corresponding 'name's. Special attributes HTTP:METHOD and HTTP:ENDPOINT specify method and URL in HTTP request. If not present meathod and URL are taken from configuration. In output/response message following attributes are present: HTTP:CODE - response code of HTTP HTTP:REASON - reason string of HTTP response HTTP:name - all 'name' attributes of HTTP header. */ class MCC_HTTP_Client: public MCC_HTTP { protected: std::string method_; std::string endpoint_; public: MCC_HTTP_Client(Config *cfg,PluginArgument* parg); virtual ~MCC_HTTP_Client(void); virtual MCC_Status process(Message&,Message&); }; } // namespace ArcMCCHTTP #endif /* __ARC_MCCSOAP_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/http/PaxHeaders.7502/http_test.cpp0000644000000000000000000000012411114777117022776 xustar000000000000000027 mtime=1228144207.795496 27 atime=1513200575.143709 30 ctime=1513200660.583754625 nordugrid-arc-5.4.2/src/hed/mcc/http/http_test.cpp0000644000175000002070000000474411114777117023054 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include void test1(void) { std::cout<<"------ Testing simple file download ------"<process(request,response); std::cout<<"*** RESPONSE ***"<(*response.Payload()); for(int n = 0;n #endif #include #include #include #include #include #include #include "PayloadHTTP.h" #include "MCCHTTP.h" Arc::Logger ArcMCCHTTP::MCC_HTTP::logger(Arc::Logger::getRootLogger(), "MCC.HTTP"); ArcMCCHTTP::MCC_HTTP::MCC_HTTP(Arc::Config *cfg,Arc::PluginArgument* parg) : Arc::MCC(cfg,parg) { } static Arc::Plugin* get_mcc_service(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new ArcMCCHTTP::MCC_HTTP_Service((Arc::Config*)(*mccarg),mccarg); } static Arc::Plugin* get_mcc_client(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new ArcMCCHTTP::MCC_HTTP_Client((Arc::Config*)(*mccarg),mccarg); } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "http.service", "HED:MCC", NULL, 0, &get_mcc_service }, { "http.client", "HED:MCC", NULL, 0, &get_mcc_client }, { NULL, NULL, NULL, 0, NULL } }; namespace ArcMCCHTTP { using namespace Arc; class HTTPSecAttr: public SecAttr { friend class MCC_HTTP_Service; friend class MCC_HTTP_Client; public: HTTPSecAttr(PayloadHTTPIn& payload); virtual ~HTTPSecAttr(void); virtual operator bool(void) const; virtual bool Export(SecAttrFormat format,XMLNode &val) const; virtual std::string get(const std::string& id) const; protected: std::string action_; std::string object_; virtual bool equal(const SecAttr &b) const; }; HTTPSecAttr::HTTPSecAttr(PayloadHTTPIn& payload) { action_=payload.Method(); std::string path = payload.Endpoint(); // Remove service, port and protocol - those will be provided by // another layer std::string::size_type p = path.find("://"); if(p != std::string::npos) { p=path.find('/',p+3); if(p != std::string::npos) { path.erase(0,p); }; }; object_=path; } HTTPSecAttr::~HTTPSecAttr(void) { } HTTPSecAttr::operator bool(void) const { return true; } std::string HTTPSecAttr::get(const std::string& id) const { if(id == "ACTION") return action_; if(id == "OBJECT") return object_; return ""; } bool HTTPSecAttr::equal(const SecAttr &b) const { try { const HTTPSecAttr& a = (const HTTPSecAttr&)b; return ((action_ == a.action_) && (object_ == a.object_)); } catch(std::exception&) { }; return false; } bool HTTPSecAttr::Export(SecAttrFormat format,XMLNode &val) const { if(format == UNDEFINED) { } else if(format == ARCAuth) { NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; val.Namespaces(ns); val.Name("ra:Request"); XMLNode item = val.NewChild("ra:RequestItem"); if(!object_.empty()) { XMLNode object = item.NewChild("ra:Resource"); object=object_; object.NewAttribute("Type")="string"; object.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/http/path"; }; if(!action_.empty()) { XMLNode action = item.NewChild("ra:Action"); action=action_; action.NewAttribute("Type")="string"; action.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/http/method"; }; return true; } else if(format == XACML) { NS ns; ns["ra"]="urn:oasis:names:tc:xacml:2.0:context:schema:os"; val.Namespaces(ns); val.Name("ra:Request"); if(!object_.empty()) { XMLNode object = val.NewChild("ra:Resource"); XMLNode attr = object.NewChild("ra:Attribute"); attr.NewChild("ra:AttributeValue") = object_; attr.NewAttribute("DataType")="xs:string"; attr.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/http/path"; }; if(!action_.empty()) { XMLNode action = val.NewChild("ra:Action"); XMLNode attr = action.NewChild("ra:Attribute"); attr.NewChild("ra:AttributeValue") = action_; attr.NewAttribute("DataType")="xs:string"; attr.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/http/method"; }; return true; } else { }; return false; } MCC_HTTP_Service::MCC_HTTP_Service(Config *cfg,PluginArgument* parg):MCC_HTTP(cfg,parg) { } MCC_HTTP_Service::~MCC_HTTP_Service(void) { } static MCC_Status make_http_fault(Logger& logger, PayloadHTTPIn &inpayload, PayloadStreamInterface& stream, Message& outmsg, int code, const char* desc = NULL) { if((desc == NULL) || (*desc == 0)) { switch(code) { case HTTP_BAD_REQUEST: desc="Bad Request"; break; case HTTP_NOT_FOUND: desc="Not Found"; break; case HTTP_INTERNAL_ERR: desc="Internal error"; break; case HTTP_NOT_IMPLEMENTED: desc="Not Implemented"; break; default: desc="Something went wrong"; break; }; }; logger.msg(WARNING, "HTTP Error: %d %s",code,desc); PayloadHTTPOut outpayload(code,desc); bool keep_alive = (!inpayload)?false:inpayload.KeepAlive(); outpayload.KeepAlive(keep_alive); if(!outpayload.Flush(stream)) return MCC_Status(); // Returning empty payload because response is already sent outmsg.Payload(new PayloadRaw); if(!keep_alive) return MCC_Status(SESSION_CLOSE); // If connection is supposed to be kept any unused body must be ignored if(!inpayload) return MCC_Status(SESSION_CLOSE); if(!inpayload.Sync()) return MCC_Status(SESSION_CLOSE); return MCC_Status(STATUS_OK); } static MCC_Status make_raw_fault(Message& outmsg,const char* desc = NULL) { PayloadRaw* outpayload = new PayloadRaw; if(desc) outpayload->Insert(desc,0); outmsg.Payload(outpayload); if(desc) return MCC_Status(GENERIC_ERROR,"HTTP",desc); return MCC_Status(GENERIC_ERROR,"HTTP"); } static MCC_Status make_raw_fault(Message& outmsg,const std::string desc) { return make_raw_fault(outmsg,desc.c_str()); } static MCC_Status make_raw_fault(Message& outmsg,const MCC_Status& desc) { PayloadRaw* outpayload = new PayloadRaw; std::string errstr = (std::string)desc; if(!errstr.empty()) outpayload->Insert(errstr.c_str(),0); outmsg.Payload(outpayload); return desc; } static void parse_http_range(PayloadHTTP& http,Message& msg) { std::string http_range = http.Attribute("range"); if(http_range.empty()) return; if(strncasecmp(http_range.c_str(),"bytes=",6) != 0) return; std::string::size_type p = http_range.find(',',6); if(p != std::string::npos) { http_range=http_range.substr(6,p-6); } else { http_range=http_range.substr(6); }; p=http_range.find('-'); std::string val; if(p != std::string::npos) { val=http_range.substr(0,p); if(!val.empty()) msg.Attributes()->set("HTTP:RANGESTART",val); val=http_range.substr(p+1); if(!val.empty()) msg.Attributes()->set("HTTP:RANGEEND",val); }; } MCC_Status MCC_HTTP_Service::process(Message& inmsg,Message& outmsg) { // Extracting payload if(!inmsg.Payload()) return MCC_Status(); PayloadStreamInterface* inpayload = NULL; try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) return MCC_Status(); // Converting stream payload to HTTP which implements raw and stream interfaces PayloadHTTPIn nextpayload(*inpayload); if(!nextpayload) { logger.msg(WARNING, "Cannot create http payload"); return make_http_fault(logger,nextpayload,*inpayload,outmsg,HTTP_BAD_REQUEST); }; if(nextpayload.Method() == "END") { return MCC_Status(SESSION_CLOSE); }; // By now PayloadHTTPIn only parsed header of incoming message. // If header contains Expect: 100-continue then intermediate // response must be returned to client followed by real response. if(nextpayload.AttributeMatch("expect", "100-continue")) { // For this request intermediate 100 response must be sent // TODO: maybe use PayloadHTTPOut for sending header. std::string oheader = "HTTP/1.1 100 CONTINUE\r\n\r\n"; // 100-continue happens only in version 1.1. if(!inpayload->Put(oheader)) { // Failed to send intermediate response. // Most probbaly connection was closed. return MCC_Status(SESSION_CLOSE); } // Now client will send body of message and it will become // available through PayloadHTTPIn } bool keep_alive = nextpayload.KeepAlive(); // Creating message to pass to next MCC and setting new payload. Message nextinmsg = inmsg; nextinmsg.Payload(&nextpayload); // Creating attributes // Endpoints must be URL-like so make sure HTTP path is // converted to HTTP URL std::string endpoint = nextpayload.Endpoint(); { std::string::size_type p = endpoint.find("://"); if(p == std::string::npos) { // TODO: Use Host attribute of HTTP std::string oendpoint = nextinmsg.Attributes()->get("ENDPOINT"); p=oendpoint.find("://"); if(p != std::string::npos) { oendpoint.erase(0,p+3); }; // Assuming we have host:port here if(oendpoint.empty() || (oendpoint[oendpoint.length()-1] != '/')) { if(endpoint[0] != '/') oendpoint+="/"; }; // TODO: HTTPS detection endpoint="http://"+oendpoint+endpoint; }; }; nextinmsg.Attributes()->set("ENDPOINT",endpoint); nextinmsg.Attributes()->set("HTTP:ENDPOINT",nextpayload.Endpoint()); nextinmsg.Attributes()->set("HTTP:METHOD",nextpayload.Method()); bool request_is_head = (upper(nextpayload.Method()) == "HEAD"); // Filling security attributes HTTPSecAttr* sattr = new HTTPSecAttr(nextpayload); nextinmsg.Auth()->set("HTTP",sattr); parse_http_range(nextpayload,nextinmsg); // Reason ? for(std::multimap::const_iterator i = nextpayload.Attributes().begin();i!=nextpayload.Attributes().end();++i) { nextinmsg.Attributes()->add("HTTP:"+i->first,i->second); }; if(!ProcessSecHandlers(nextinmsg,"incoming")) { return make_http_fault(logger,nextpayload,*inpayload,outmsg,HTTP_BAD_REQUEST); // Maybe not 400 ? }; // Call next MCC MCCInterface* next = Next(nextpayload.Method()); if(!next) { logger.msg(WARNING, "No next element in the chain"); // Here selection is on method name. So failure result is "not supported" return make_http_fault(logger,nextpayload,*inpayload,outmsg,HTTP_NOT_IMPLEMENTED); } Message nextoutmsg = outmsg; nextoutmsg.Payload(NULL); MCC_Status ret = next->process(nextinmsg,nextoutmsg); // Do checks and extract raw response if(!ret) { if(nextoutmsg.Payload()) delete nextoutmsg.Payload(); logger.msg(WARNING, "next element of the chain returned error status"); int http_code = (ret.getKind() == UNKNOWN_SERVICE_ERROR)?HTTP_NOT_FOUND:HTTP_INTERNAL_ERR; // Check if next chain provided error code and reason std::string http_code_s = nextoutmsg.Attributes()->get("HTTP:CODE"); std::string http_resp = nextoutmsg.Attributes()->get("HTTP:REASON"); if (!http_code_s.empty()) stringto(http_code_s, http_code); return make_http_fault(logger,nextpayload,*inpayload,outmsg,http_code,http_resp.c_str()); } if(!nextoutmsg.Payload()) { logger.msg(WARNING, "next element of the chain returned no payload"); return make_http_fault(logger,nextpayload,*inpayload,outmsg,HTTP_INTERNAL_ERR); } PayloadRawInterface* retpayload = NULL; PayloadStreamInterface* strpayload = NULL; try { retpayload = dynamic_cast(nextoutmsg.Payload()); } catch(std::exception& e) { }; if(!retpayload) try { strpayload = dynamic_cast(nextoutmsg.Payload()); } catch(std::exception& e) { }; if((!retpayload) && (!strpayload)) { logger.msg(WARNING, "next element of the chain returned invalid/unsupported payload"); delete nextoutmsg.Payload(); return make_http_fault(logger,nextpayload,*inpayload,outmsg,HTTP_INTERNAL_ERR); }; if(!ProcessSecHandlers(nextinmsg,"outgoing")) { delete nextoutmsg.Payload(); return make_http_fault(logger,nextpayload,*inpayload,outmsg,HTTP_BAD_REQUEST); // Maybe not 400 ? }; // Create HTTP response from raw body content // Use stream payload of inmsg to send HTTP response int http_code = HTTP_OK; std::string http_code_s = nextoutmsg.Attributes()->get("HTTP:CODE"); std::string http_resp = nextoutmsg.Attributes()->get("HTTP:REASON"); if(http_resp.empty()) http_resp = "OK"; if(!http_code_s.empty()) stringto(http_code_s,http_code); nextoutmsg.Attributes()->removeAll("HTTP:CODE"); nextoutmsg.Attributes()->removeAll("HTTP:REASON"); /* int l = 0; if(retpayload) { if(retpayload->BufferPos(0) != 0) { http_code=HTTP_PARTIAL; http_resp="Partial content"; } else { for(int i = 0;;++i) { if(retpayload->Buffer(i) == NULL) break; l=retpayload->BufferPos(i) + retpayload->BufferSize(i); }; if(l != retpayload->Size()) { http_code=HTTP_PARTIAL; http_resp="Partial content"; }; }; } else { if((strpayload->Pos() != 0) || (strpayload->Limit() != strpayload->Size())) { http_code=HTTP_PARTIAL; http_resp="Partial content"; }; }; */ PayloadHTTPOut* outpayload = NULL; PayloadHTTPOutRaw* routpayload = NULL; PayloadHTTPOutStream* soutpayload = NULL; if(retpayload) { routpayload = new PayloadHTTPOutRaw(http_code,http_resp,request_is_head); outpayload = routpayload; } else { soutpayload = new PayloadHTTPOutStream(http_code,http_resp,request_is_head); outpayload = soutpayload; }; // Use attributes which higher level MCC may have produced for HTTP for(AttributeIterator i = nextoutmsg.Attributes()->getAll();i.hasMore();++i) { const char* key = i.key().c_str(); if(strncmp("HTTP:",key,5) == 0) { key+=5; // TODO: check for special attributes: method, code, reason, endpoint, etc. outpayload->Attribute(std::string(key),*i); }; }; outpayload->KeepAlive(keep_alive); if(retpayload) { routpayload->Body(*retpayload); } else { soutpayload->Body(*strpayload); } bool flush_r = outpayload->Flush(*inpayload); delete outpayload; outmsg = nextoutmsg; // Returning empty payload because response is already sent through Flush // TODO: add support for non-stream communication through chain. outmsg.Payload(new PayloadRaw); if(!flush_r) { // If flush failed then we can't know if anything HTTPish was // already sent. Hence we are just making lower level close // connection. logger.msg(WARNING, "Error to flush output payload"); return MCC_Status(SESSION_CLOSE); }; if(!keep_alive) return MCC_Status(SESSION_CLOSE); // Make sure whole body sent to us was fetch from input stream. if(!nextpayload.Sync()) return MCC_Status(SESSION_CLOSE); return MCC_Status(STATUS_OK); } MCC_HTTP_Client::MCC_HTTP_Client(Config *cfg,PluginArgument* parg):MCC_HTTP(cfg,parg) { endpoint_=(std::string)((*cfg)["Endpoint"]); method_=(std::string)((*cfg)["Method"]); } MCC_HTTP_Client::~MCC_HTTP_Client(void) { } static MCC_Status extract_http_response(Message& nextoutmsg, Message& outmsg, bool is_head, PayloadHTTPIn * & outpayload) { // Do checks and process response - supported response so far is stream // Generated result is HTTP payload with Raw and Stream interfaces // Check if any payload in message if(!nextoutmsg.Payload()) { return make_raw_fault(outmsg,"No response received by HTTP layer"); }; // Check if payload is stream (currently no other payload kinds are supported) PayloadStreamInterface* retpayload = NULL; try { retpayload = dynamic_cast(nextoutmsg.Payload()); } catch(std::exception& e) { }; if(!retpayload) { delete nextoutmsg.Payload(); return make_raw_fault(outmsg,"HTTP layer got something that is not stream"); }; // Try to parse payload. At least header part. outpayload = new PayloadHTTPIn(*retpayload,true,is_head); if(!outpayload) { delete retpayload; return make_raw_fault(outmsg,"Returned payload is not recognized as HTTP"); }; if(!(*outpayload)) { std::string errstr = "Returned payload is not recognized as HTTP: "+outpayload->GetError(); delete outpayload; outpayload = NULL; return make_raw_fault(outmsg,errstr.c_str()); }; // Check for closed connection during response - not suitable in client mode if(outpayload->Method() == "END") { delete outpayload; outpayload = NULL; return make_raw_fault(outmsg,"Connection was closed"); }; return MCC_Status(STATUS_OK); } MCC_Status MCC_HTTP_Client::process(Message& inmsg,Message& outmsg) { // Take payload, add HTTP stuf by using PayloadHTTPOut and // pass further through chain. // Extracting payload if(!inmsg.Payload()) return make_raw_fault(outmsg,"Nothing to send"); PayloadRawInterface* inrpayload = NULL; PayloadStreamInterface* inspayload = NULL; try { inrpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; try { inspayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if((!inrpayload) && (!inspayload)) return make_raw_fault(outmsg,"Notihing to send"); // Making HTTP request // Use attributes which higher level MCC may have produced for HTTP std::string http_method = inmsg.Attributes()->get("HTTP:METHOD"); std::string http_endpoint = inmsg.Attributes()->get("HTTP:ENDPOINT"); if(http_method.empty()) http_method=method_; if(http_endpoint.empty()) http_endpoint=endpoint_; AutoPointer nextrpayload(inrpayload?new PayloadHTTPOutRaw(http_method,http_endpoint):NULL); AutoPointer nextspayload(inspayload?new PayloadHTTPOutStream(http_method,http_endpoint):NULL); PayloadHTTPOut* nextpayload(inrpayload? dynamic_cast(nextrpayload.Ptr()): dynamic_cast(nextspayload.Ptr()) ); bool expect100 = false; for(AttributeIterator i = inmsg.Attributes()->getAll();i.hasMore();++i) { const char* key = i.key().c_str(); if(strncmp("HTTP:",key,5) == 0) { key+=5; // TODO: check for special attributes: method, code, reason, endpoint, etc. if(strcasecmp(key,"METHOD") == 0) continue; if(strcasecmp(key,"ENDPOINT") == 0) continue; if(strcasecmp(key,"EXPECT") == 0) { if(Arc::lower(*i) == "100-continue") expect100 = true; } nextpayload->Attribute(std::string(key),*i); }; }; nextpayload->Attribute("User-Agent","ARC"); bool request_is_head = (upper(http_method) == "HEAD"); // Creating message to pass to next MCC and setting new payload.. Message nextinmsg = inmsg; if(inrpayload) { nextrpayload->Body(*inrpayload,false); nextinmsg.Payload(nextrpayload.Ptr()); } else { nextspayload->Body(*inspayload,false); nextinmsg.Payload(nextspayload.Ptr()); }; // Call next MCC MCCInterface* next = Next(); if(!next) return make_raw_fault(outmsg,"Chain has no continuation"); Message nextoutmsg = outmsg; nextoutmsg.Payload(NULL); MCC_Status ret; PayloadHTTPIn* outpayload = NULL; if(!expect100) { // Simple request and response ret = next->process(nextinmsg,nextoutmsg); if(!ret) { delete nextoutmsg.Payload(); return make_raw_fault(outmsg,ret); }; ret = extract_http_response(nextoutmsg, outmsg, request_is_head, outpayload); if(!ret) return ret; // TODO: handle 100 response sent by server just in case } else { // Header and body must be sent separately with intermediate server // response fetched after header is sent. // Turning body off and sending header only nextpayload->ResetOutput(true,false); ret = next->process(nextinmsg,nextoutmsg); if(!ret) { delete nextoutmsg.Payload(); return make_raw_fault(outmsg,ret); }; // Parse response and check if it is 100 ret = extract_http_response(nextoutmsg, outmsg, request_is_head, outpayload); if(!ret) return ret; int resp_code = outpayload->Code(); if(resp_code == HTTP_CONTINUE) { // So continue with body delete outpayload; outpayload = NULL; nextpayload->ResetOutput(false,true); ret = next->process(nextinmsg,nextoutmsg); if(!ret) { delete nextoutmsg.Payload(); return make_raw_fault(outmsg,ret); }; ret = extract_http_response(nextoutmsg, outmsg, request_is_head, outpayload); if(!ret) return ret; } else { // Any other response should mean server can't accept request. // But just in case server responded with 2xx this can fool our caller. if(HTTP_CODE_IS_GOOD(resp_code)) { // Convert positive response into something bad std::string reason = outpayload->Reason(); delete outpayload; outpayload = NULL; return make_raw_fault(outmsg,"Unexpected positive response received: "+ Arc::tostring(resp_code)+" "+reason); } } } // Here outpayload should contain real response outmsg = nextoutmsg; // Payload returned by next.process is not destroyed here because // it is now owned by outpayload. outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE",tostring(outpayload->Code())); outmsg.Attributes()->set("HTTP:REASON",outpayload->Reason()); outmsg.Attributes()->set("HTTP:KEEPALIVE",outpayload->KeepAlive()?"TRUE":"FALSE"); for(std::map::const_iterator i = outpayload->Attributes().begin();i!=outpayload->Attributes().end();++i) { outmsg.Attributes()->add("HTTP:"+i->first,i->second); }; return MCC_Status(STATUS_OK); } } // namespace ArcMCCHTTP nordugrid-arc-5.4.2/src/hed/mcc/http/PaxHeaders.7502/PayloadHTTP.h0000644000000000000000000000012412304202374022504 xustar000000000000000027 mtime=1393624316.401844 27 atime=1513200575.141709 30 ctime=1513200660.582754613 nordugrid-arc-5.4.2/src/hed/mcc/http/PayloadHTTP.h0000644000175000002070000003240212304202374022552 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADHTTP_H__ #define __ARC_PAYLOADHTTP_H__ #include #include #ifdef HAVE_STDINT_H #include #endif #include #include #include #define HTTP_OK (200) #define HTTP_BAD_REQUEST (400) #define HTTP_NOT_FOUND (404) #define HTTP_PARTIAL (206) #define HTTP_RANGE_NOT_SATISFIABLE (416) #define HTTP_INTERNAL_ERR (500) #define HTTP_NOT_IMPLEMENTED (501) #define HTTP_CONTINUE (100) #define HTTP_CODE_IS_GOOD(CODE) (((CODE)>=200) && ((CODE)<=300)) namespace ArcMCCHTTP { using namespace Arc; /* PayloadHTTP PayloadHTTPIn Stream - body Raw - body PayloadHTTPOut Stream - whole Raw - whole */ /** These classes implement parsing and generation of HTTP messages. They implement only subset of HTTP/1.1 and also provides PayloadRawInterface and PayloadStreamInterface for including as payload into Message passed through MCC chains. */ class PayloadHTTP { protected: static Arc::Logger logger; bool valid_; /** set to true if object is valid */ std::string uri_; /** URI being contacted */ int version_major_; /** major number of HTTP version - must be 1 */ int version_minor_; /** minor number of HTTP version - must be 0 or 1 */ std::string method_; /** HTTP method being used or requested (if request) */ int code_; /** HTTP code being sent or supplied (if response) */ std::string reason_; /** HTTP reason being sent or supplied (if response) */ int64_t length_; /** Content-length of HTTP message */ int64_t offset_; /** Logical beginning of content computed from Content-Range */ int64_t size_; /** Logical size of content obtained from Content-Range */ int64_t end_; /** Logical end of content computed from Content-Range */ bool keep_alive_; /** true if conection should not be closed after response */ std::multimap attributes_; /* All HTTP attributes */ std::string error_; /** Textual description of error which happened */ public: /** Constructor - creates empty object */ PayloadHTTP(void); /** Constructor - creates HTTP request. */ PayloadHTTP(const std::string& method,const std::string& url); /** Constructor - creates HTTP response. */ PayloadHTTP(int code,const std::string& reason); virtual ~PayloadHTTP(void); virtual operator bool(void) { return valid_; }; virtual bool operator!(void) { return !valid_; }; /** Returns HTTP header attribute with specified name. Empty string if no such attribute. */ virtual const std::string& Attribute(const std::string& name) const; /** Returns all HTTP header attributes. */ virtual const std::multimap& Attributes(void) const; /** Returns HTTP header attributes with specified name. */ virtual const std::list Attributes(const std::string& name) const; /** Returns true if there is attribute with specified name and value. Matching is case-insensitive with empty space trimed. */ virtual bool AttributeMatch(const std::string& name, const std::string& value) const; /** Returns textual description of last error */ std::string GetError() { return error_; }; }; class PayloadHTTPIn: public PayloadHTTP, public PayloadRawInterface, public PayloadStreamInterface { protected: typedef enum { CHUNKED_NONE = 0, CHUNKED_START, CHUNKED_CHUNK, CHUNKED_END, CHUNKED_EOF, CHUNKED_ERROR } chunked_t; typedef enum { MULTIPART_NONE = 0, MULTIPART_START, MULTIPART_BODY, MULTIPART_END, MULTIPART_EOF, MULTIPART_ERROR } multipart_t; bool head_response_; /** true if HTTP response for HEAD request is expected */ chunked_t chunked_; /** chunked encoding parsing state */ int64_t chunk_size_; multipart_t multipart_; std::string multipart_tag_; std::string multipart_buf_; PayloadStreamInterface* stream_; /** stream used to pull HTTP data from */ uint64_t stream_offset_; /** amount of data read from stream_ */ bool stream_own_; /** if true stream_ is owned by this */ bool fetched_; /** true if whole content of HTTP body was fetched and stored in internal buffers. Otherwise only header and part of body in tbuf_ was fetched and rest is to be read through stream_. */ bool header_read_; /** true if whole header was read from stream_ */ bool body_read_; /** true if whole body was read from stream_ */ char tbuf_[1024]; /** intermediate buffer for reading header lines */ int tbuflen_; /** amount of data stored in tbuf */ char* body_; int64_t body_size_; bool readtbuf(void); /** Read from stream_ till \r\n */ bool readline(std::string& line); bool readline_chunked(std::string& line); bool readline_multipart(std::string& line); /** Read up to 'size' bytes from stream_ */ bool read(char* buf,int64_t& size); bool read_chunked(char* buf,int64_t& size); bool flush_chunked(void); char* find_multipart(char* buf,int64_t size); bool read_multipart(char* buf,int64_t& size); bool flush_multipart(void); /** Read HTTP header and fill internal variables */ bool read_header(void); bool parse_header(void); /** Read Body of HTTP message and store it into internal buffer. Avoid using it unless really needed because it can read a lot of data. TODO: maybe using on disk buffer can help with GB sized bodies. */ bool get_body(void); public: /** Constructor - creates object by parsing HTTP request or response from 'stream'. Supplied stream is associated with object for later use. If 'own' is set to true then stream will be deleted in destructor. Because stream can be used by this object during whole lifetime it is important not to destroy stream till this object is deleted. */ PayloadHTTPIn(PayloadStreamInterface& stream,bool own = false,bool head_response = false); virtual ~PayloadHTTPIn(void); virtual operator bool(void) { return valid_; }; virtual bool operator!(void) { return !valid_; }; virtual std::string Method(void) { return method_; }; virtual std::string Endpoint(void) { return uri_; }; virtual std::string Reason(void) { return reason_; }; virtual int Code(void) { return code_; }; virtual bool KeepAlive(void) { return keep_alive_; }; // Fetch anything what is left of current request from input stream // to sync for next request. virtual bool Sync(void); // PayloadRawInterface implemented methods virtual char operator[](PayloadRawInterface::Size_t pos) const; virtual char* Content(PayloadRawInterface::Size_t pos = -1); virtual PayloadRawInterface::Size_t Size(void) const; virtual char* Insert(PayloadRawInterface::Size_t pos = 0,PayloadRawInterface::Size_t size = 0); virtual char* Insert(const char* s,PayloadRawInterface::Size_t pos = 0,PayloadRawInterface::Size_t size = -1); virtual char* Buffer(unsigned int num = 0); virtual PayloadRawInterface::Size_t BufferSize(unsigned int num = 0) const; virtual PayloadRawInterface::Size_t BufferPos(unsigned int num = 0) const; virtual bool Truncate(PayloadRawInterface::Size_t size); // PayloadStreamInterface implemented methods virtual bool Get(char* buf,int& size); virtual bool Put(const char* buf,PayloadStreamInterface::Size_t size); virtual int Timeout(void) const; virtual void Timeout(int to); virtual PayloadStreamInterface::Size_t Pos(void) const; virtual PayloadStreamInterface::Size_t Limit(void) const; }; class PayloadHTTPOut: public PayloadHTTP { protected: bool head_response_; /** true if HTTP response for HEAD request to be generated */ PayloadRawInterface* rbody_; /** associated HTTP Body buffer if any (to avoid copying to own buffer) */ PayloadStreamInterface* sbody_; /** associated HTTP Body stream if any (to avoid copying to own buffer) */ PayloadStreamInterface::Size_t sbody_size_; bool body_own_; /** if true rbody_ and sbody_ is owned by this */ std::string header_; /** Header to be prepended to body */ bool to_stream_; /** Header was generated for streaming data */ bool use_chunked_transfer_; /** Chunked transfer to be used */ uint64_t stream_offset_; /** Amount of data read read through Stream interface */ bool stream_finished_; /** Set to true when reading through Stream interface considered to be done */ bool enable_header_out_; /** set to false to disable HTTP header in output */ bool enable_body_out_; /** set to false to disable HTTP body in output */ bool make_header(bool to_stream); bool remake_header(bool to_stream); PayloadRawInterface::Size_t body_size(void) const; PayloadRawInterface::Size_t data_size(void) const; public: /** Constructor - creates HTTP request to be sent. */ PayloadHTTPOut(const std::string& method,const std::string& url); /** Constructor - creates HTTP response to be sent. If 'head_response' is set to true then response is generated as if it is result of HEAD request. */ PayloadHTTPOut(int code,const std::string& reason,bool head_response = false); virtual ~PayloadHTTPOut(void); /** Resets state of Stream and Raw interfaces so that this object can be used again for providing all HTTP elements */ void ResetOutput(bool enable_header, bool enable_body); /** Adds HTTP header attribute 'name' = 'value' */ virtual void Attribute(const std::string& name,const std::string& value); virtual void KeepAlive(bool keep_alive) { keep_alive_=keep_alive; }; /** Send header of created object through provided stream. */ virtual bool FlushHeader(PayloadStreamInterface& stream); /** Send body of created object through provided stream. */ /* After this call associated stream body object usually is positioned at its end of data. */ virtual bool FlushBody(PayloadStreamInterface& stream); /** Shortcut for FlushHeader() && FlushBody() */ virtual bool Flush(PayloadStreamInterface& stream); }; class PayloadHTTPOutStream: public PayloadHTTPOut, public PayloadStreamInterface { protected: //int chunk_size_get(char* buf,int size,int l,uint64_t chunk_size); //std::string chunk_size_str_; /** Buffer to store chunk size */ //std::string::size_type chunk_size_offset_; /** How much of chunk_size_str_ is sent */ public: PayloadHTTPOutStream(const std::string& method,const std::string& url); PayloadHTTPOutStream(int code,const std::string& reason,bool head_response = false); virtual ~PayloadHTTPOutStream(void); virtual operator bool(void) { return valid_; }; virtual bool operator!(void) { return !valid_; }; /** Assign HTTP body. Assigned object is not copied. Instead it is remembered and made available through Raw and Stream interfaces. Previously attached body is discarded. If 'ownership' is true then passed object is treated as being owned by this instance and destroyed in destructor or when discarded. */ virtual void Body(PayloadStreamInterface& body,bool ownership = true); // PayloadStreamInterface implemented methods virtual PayloadStreamInterface::Size_t Size(void) const; virtual bool Get(char* buf,int& size); virtual bool Get(PayloadStreamInterface& dest,int& size); virtual bool Put(const char* buf,PayloadStreamInterface::Size_t size); virtual int Timeout(void) const; virtual void Timeout(int to); virtual PayloadStreamInterface::Size_t Pos(void) const; virtual PayloadStreamInterface::Size_t Limit(void) const; }; class PayloadHTTPOutRaw: public PayloadHTTPOut, public PayloadRawInterface { public: PayloadHTTPOutRaw(const std::string& method,const std::string& url); PayloadHTTPOutRaw(int code,const std::string& reason,bool head_response = false); virtual ~PayloadHTTPOutRaw(void); virtual operator bool(void) { return valid_; }; virtual bool operator!(void) { return !valid_; }; /** Assign HTTP body. Assigned object is not copied. Instead it is remembered and made available through Raw and Stream interfaces. Previously attached body is discarded. If 'ownership' is true then passed object is treated as being owned by this instance and destroyed in destructor or when discarded. */ virtual void Body(PayloadRawInterface& body,bool ownership = true); // PayloadRawInterface implemented methods virtual char operator[](PayloadRawInterface::Size_t pos) const; virtual char* Content(PayloadRawInterface::Size_t pos = -1); virtual PayloadRawInterface::Size_t Size(void) const; virtual char* Insert(PayloadRawInterface::Size_t pos = 0,PayloadRawInterface::Size_t size = 0); virtual char* Insert(const char* s,PayloadRawInterface::Size_t pos = 0,PayloadRawInterface::Size_t size = -1); virtual char* Buffer(unsigned int num = 0); virtual PayloadRawInterface::Size_t BufferSize(unsigned int num = 0) const; virtual PayloadRawInterface::Size_t BufferPos(unsigned int num = 0) const; virtual bool Truncate(PayloadRawInterface::Size_t size); }; } // namespace ArcMCCHTTP #endif /* __ARC_PAYLOADHTTP_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/http/PaxHeaders.7502/PayloadHTTP.cpp0000644000000000000000000000012413213445322023041 xustar000000000000000027 mtime=1512983250.864845 27 atime=1513200575.138709 30 ctime=1513200660.581754601 nordugrid-arc-5.4.2/src/hed/mcc/http/PayloadHTTP.cpp0000644000175000002070000012435113213445322023114 0ustar00mockbuildmock00000000000000// The code in this file is quate a mess. It is subject to cleaning // and simplification as soon as possible. Functions must be simplified // and functionality to be split into more functions. // Some methods of this class must be called in proper order to have // it function properly. A lot of proptections to be added. #ifdef HAVE_CONFIG_H #include #endif #include #include "PayloadHTTP.h" #include namespace ArcMCCHTTP { using namespace Arc; Arc::Logger PayloadHTTP::logger(Arc::Logger::getRootLogger(), "MCC.HTTP"); static std::string empty_string(""); static bool ParseHTTPVersion(const std::string& s,int& major,int& minor) { major=0; minor=0; const char* p = s.c_str(); if(strncasecmp(p,"HTTP/",5) != 0) return false; p+=5; char* e; major=strtol(p,&e,10); if(*e != '.') { major=0; return false; }; p=e+1; minor=strtol(p,&e,10); if(*e != 0) { major=0; minor=0; return false; }; return true; } // -------------------- PayloadHTTP ----------------------------- const std::string& PayloadHTTP::Attribute(const std::string& name) const { std::multimap::const_iterator it = attributes_.find(name); if(it == attributes_.end()) return empty_string; return it->second; } const std::list PayloadHTTP::Attributes(const std::string& name) const { std::list attrs; for(std::multimap::const_iterator attr = attributes_.begin(); attr != attributes_.end(); ++attr) { if(attr->first == name) attrs.push_back(attr->second); }; return attrs; } const std::multimap& PayloadHTTP::Attributes(void) const { return attributes_; } bool PayloadHTTP::AttributeMatch(const std::string& name, const std::string& value) const { std::multimap::const_iterator attr = attributes_.begin(); for(;attr != attributes_.end();++attr) { if(attr->first == name) { std::string sattr = Arc::lower(Arc::trim(attr->second," \r\n")); if(sattr == value) return true; }; }; return false; } PayloadHTTP::PayloadHTTP(void): valid_(false),version_major_(1),version_minor_(1), code_(0),length_(0),offset_(0),size_(0),end_(0),keep_alive_(true) { } PayloadHTTP::PayloadHTTP(const std::string& method,const std::string& url): valid_(false), uri_(url),version_major_(1),version_minor_(1),method_(method), code_(0),length_(0),offset_(0),size_(0),end_(0),keep_alive_(true) { // TODO: encode URI properly } PayloadHTTP::PayloadHTTP(int code,const std::string& reason): valid_(false), version_major_(1),version_minor_(1), code_(code),reason_(reason), length_(0),offset_(0),size_(0),end_(0),keep_alive_(true) { if(reason_.empty()) reason_="OK"; } PayloadHTTP::~PayloadHTTP(void) { } // ------------------- PayloadHTTPIn ---------------------------- bool PayloadHTTPIn::readtbuf(void) { int l = (sizeof(tbuf_)-1) - tbuflen_; if(l > 0) { if(stream_->Get(tbuf_+tbuflen_,l)) { tbuflen_ += l; tbuf_[tbuflen_]=0; } } return (tbuflen_>0); } bool PayloadHTTPIn::readline(std::string& line) { line.resize(0); for(;line.length()<4096;) { // keeping line size sane char* p = (char*)memchr(tbuf_,'\n',tbuflen_); if(p) { *p=0; line.append(tbuf_,p-tbuf_); tbuflen_-=(p-tbuf_)+1; memmove(tbuf_,p+1,tbuflen_+1); if((!line.empty()) && (line[line.length()-1] == '\r')) line.resize(line.length()-1); return true; }; line.append(tbuf_,tbuflen_); tbuflen_=0; if(!readtbuf()) break; }; tbuf_[tbuflen_]=0; return false; } bool PayloadHTTPIn::read(char* buf,int64_t& size) { char* sbuf = buf; if(tbuflen_ >= size) { memcpy(buf,tbuf_,size); memmove(tbuf_,tbuf_+size,tbuflen_-size+1); tbuflen_-=size; } else { memcpy(buf,tbuf_,tbuflen_); buf+=tbuflen_; int64_t l = size-tbuflen_; size=tbuflen_; tbuflen_=0; tbuf_[0]=0; for(;l;) { int l_ = (l>INT_MAX)?INT_MAX:l; if(!stream_->Get(buf,l_)) return (size>0); size+=l_; buf+=l_; l-=l_; }; }; return true; } bool PayloadHTTPIn::readline_chunked(std::string& line) { if(!chunked_) return readline(line); line.resize(0); for(;line.length()<4096;) { // keeping line size sane if(tbuflen_ <= 0) { if(!readtbuf()) break; }; char c; int64_t l = 1; if(!read_chunked(&c,l)) break; if(c == '\n') { if((!line.empty()) && (line[line.length()-1] == '\r')) line.resize(line.length()-1); return true; }; line.append(&c,1); // suboptimal }; return false; } bool PayloadHTTPIn::read_chunked(char* buf,int64_t& size) { if(!chunked_) return read(buf,size); int64_t bufsize = size; size = 0; if(chunked_ == CHUNKED_ERROR) return false; for(;bufsize>0;) { if(chunked_ == CHUNKED_EOF) break; if(chunked_ == CHUNKED_START) { // reading chunk size std::string line; chunked_ = CHUNKED_ERROR; if(!readline(line)) break; char* e; chunk_size_ = strtoll(line.c_str(),&e,16); if((*e != ';') && (*e != 0)) break; if(e == line.c_str()) break; if(chunk_size_ == 0) { chunked_ = CHUNKED_EOF; } else { chunked_ = CHUNKED_CHUNK; }; }; if(chunked_ == CHUNKED_CHUNK) { // reading chunk data int64_t l = bufsize; if(chunk_size_ < l) l = chunk_size_; chunked_ = CHUNKED_ERROR; if(!read(buf,l)) break; chunk_size_ -= l; size += l; bufsize -= l; buf += l; if(chunk_size_ <= 0) { chunked_ = CHUNKED_END; } else { chunked_ = CHUNKED_CHUNK; }; }; if(chunked_ == CHUNKED_END) { // reading CRLF at end of chunk std::string line; chunked_ = CHUNKED_ERROR; if(!readline(line)) break; if(!line.empty()) break; chunked_ = CHUNKED_START; }; }; return (size>0); } bool PayloadHTTPIn::flush_chunked(void) { if(!chunked_) return true; if(chunked_ == CHUNKED_EOF) return true; if(chunked_ == CHUNKED_ERROR) return false; const int bufsize = 1024; char* buf = new char[bufsize]; for(;;) { if(chunked_ == CHUNKED_EOF) break; if(chunked_ == CHUNKED_ERROR) break; int64_t l = bufsize; if(!read_chunked(buf,l)) break; }; delete[] buf; return (chunked_ == CHUNKED_EOF); } char* PayloadHTTPIn::find_multipart(char* buf,int64_t size) { char* p = buf; for(;;++p) { p = (char*)memchr(p,'\r',size-(p-buf)); if(!p) break; // no tag found int64_t l = (multipart_tag_.length()+2) - (size-(p-buf)); if(l > 0) { // filling buffer with necessary amount of information if(l > multipart_buf_.length()) { int64_t ll = multipart_buf_.length(); multipart_buf_.resize(l); l = l-ll; if(!read_chunked((char*)(multipart_buf_.c_str()+ll),l)) { p = NULL; break; }; multipart_buf_.resize(ll+l); } } int64_t pos = p-buf; ++pos; char c = '\0'; if(pos < size) { c = buf[pos]; } else if((pos-size) < multipart_buf_.length()) { c = multipart_buf_[pos-size]; }; if(c != '\n') continue; int tpos = 0; for(;tpos= multipart_tag_.length()) break; // tag found } return p; } bool PayloadHTTPIn::read_multipart(char* buf,int64_t& size) { if(!multipart_) return read_chunked(buf,size); if(multipart_ == MULTIPART_END) return false; if(multipart_ == MULTIPART_EOF) return false; int64_t bufsize = size; size = 0; if(!multipart_buf_.empty()) { // pick up previously loaded data if(bufsize >= multipart_buf_.length()) { memcpy(buf,multipart_buf_.c_str(),multipart_buf_.length()); size = multipart_buf_.length(); multipart_buf_.resize(0); } else { memcpy(buf,multipart_buf_.c_str(),bufsize); size = bufsize; multipart_buf_.erase(0,bufsize); }; } // read more data if needed if(size < bufsize) { int64_t l = bufsize - size; if(!read_chunked(buf+size,l)) return false; size += l; } // looking for tag const char* p = find_multipart(buf,size); if(p) { // tag found // hope nothing sends GBs in multipart multipart_buf_.insert(0,p,size-(p-buf)); size = (p-buf); // TODO: check if it is last tag multipart_ = MULTIPART_END; }; logger.msg(Arc::DEBUG,"<< %s",std::string(buf,size)); return true; } bool PayloadHTTPIn::flush_multipart(void) { // TODO: protect against insame length of body if(!multipart_) return true; if(multipart_ == MULTIPART_ERROR) return false; std::string::size_type pos = 0; for(;multipart_ != MULTIPART_EOF;) { pos = multipart_buf_.find('\r',pos); if(pos == std::string::npos) { pos = 0; // read just enough int64_t l = multipart_tag_.length()+4; multipart_buf_.resize(l); if(!read_chunked((char*)(multipart_buf_.c_str()),l)) return false; multipart_buf_.resize(l); continue; } multipart_buf_.erase(0,pos); // suboptimal pos = 0; int64_t l = multipart_tag_.length()+4; if(l > multipart_buf_.length()) { int64_t ll = multipart_buf_.length(); multipart_buf_.resize(l); l = l - ll; if(!read_chunked((char*)(multipart_buf_.c_str()+ll),l)) return false; ll += l; if(ll < multipart_buf_.length()) return false; // can't read enough data }; ++pos; if(multipart_buf_[pos] != '\n') continue; ++pos; if(strncmp(multipart_buf_.c_str()+pos,multipart_tag_.c_str(),multipart_tag_.length()) != 0) continue; pos+=multipart_tag_.length(); if(multipart_buf_[pos] != '-') continue; ++pos; if(multipart_buf_[pos] != '-') continue; // end tag found multipart_ = MULTIPART_EOF; }; return true; } bool PayloadHTTPIn::read_header(void) { std::string line; for(;readline_chunked(line) && (!line.empty());) { logger.msg(Arc::DEBUG,"< %s",line); std::string::size_type pos = line.find(':'); if(pos == std::string::npos) continue; std::string name = line.substr(0,pos); for(++pos;pos(lower(name),line.substr(pos))); } else { attributes_.insert(std::pair(lower(name),"")); }; }; std::map::iterator it; it=attributes_.find("content-length"); if(it != attributes_.end()) { length_=strtoll(it->second.c_str(),NULL,10); }; it=attributes_.find("content-range"); if(it != attributes_.end()) { const char* token = it->second.c_str(); const char* p = token; for(;*p;p++) if(isspace(*p)) break; int64_t range_start,range_end,entity_size; if(strncasecmp("bytes",token,p-token) == 0) { for(;*p;p++) if(!isspace(*p)) break; char *e; range_start=strtoull(p,&e,10); if((*e) == '-') { p=e+1; range_end=strtoull(p,&e,10); p=e; if(((*e) == '/') || ((*e) == 0)) { if(range_start <= range_end) { offset_=range_start; end_=range_end+1; }; if((*p) == '/') { p++; entity_size=strtoull(p,&e,10); if((*e) == 0) { size_=entity_size; }; }; }; }; }; }; it=attributes_.find("transfer-encoding"); if(it != attributes_.end()) { if(strcasecmp(it->second.c_str(),"chunked") != 0) { // Non-implemented encoding return false; }; chunked_= CHUNKED_START; }; it=attributes_.find("connection"); if(it != attributes_.end()) { if(strcasecmp(it->second.c_str(),"keep-alive") == 0) { keep_alive_=true; } else { keep_alive_=false; }; }; it=attributes_.find("content-type"); if(it != attributes_.end()) { if(strncasecmp(it->second.c_str(),"multipart/",10) == 0) { // TODO: more bulletproof approach is needed std::string lline = lower(it->second); const char* boundary = strstr(lline.c_str()+10,"boundary="); if(!boundary) return false; boundary = it->second.c_str()+(boundary-lline.c_str()); //const char* boundary = strcasestr(it->second.c_str()+10,"boundary="); const char* tag_start = strchr(boundary,'"'); const char* tag_end = NULL; if(!tag_start) { tag_start = boundary + 9; tag_end = strchr(tag_start,' '); if(!tag_end) tag_end = tag_start + strlen(tag_start); } else { ++tag_start; tag_end = strchr(tag_start,'"'); } if(!tag_end) return false; multipart_tag_ = std::string(tag_start,tag_end-tag_start); if(multipart_tag_.empty()) return false; multipart_ = MULTIPART_START; multipart_tag_.insert(0,"--"); multipart_buf_.resize(0); } } return true; } bool PayloadHTTPIn::parse_header(void) { method_.resize(0); code_=0; keep_alive_=false; multipart_ = MULTIPART_NONE; multipart_tag_ = ""; chunked_=CHUNKED_NONE; // Skip empty lines std::string line; for(;line.empty();) if(!readline(line)) { method_="END"; // Special name to represent closed connection length_=0; return true; }; logger.msg(Arc::DEBUG,"< %s",line); // Parse request/response line std::string::size_type pos2 = line.find(' '); if(pos2 == std::string::npos) return false; std::string word1 = line.substr(0,pos2); // Identify request/response if(ParseHTTPVersion(line.substr(0,pos2),version_major_,version_minor_)) { // Response std::string::size_type pos3 = line.find(' ',pos2+1); if(pos3 == std::string::npos) return false; code_=strtol(line.c_str()+pos2+1,NULL,10); reason_=line.substr(pos3+1); if(code_ == 100) { // TODO: skip 100 response } } else { // Request std::string::size_type pos3 = line.rfind(' '); if((pos3 == pos2) || (pos2 == std::string::npos)) return false; if(!ParseHTTPVersion(line.substr(pos3+1),version_major_,version_minor_)) return false; method_=line.substr(0,pos2); uri_=line.substr(pos2+1,pos3-pos2-1); }; if((version_major_ > 1) || ((version_major_ == 1) && (version_minor_ >= 1))) { keep_alive_=true; }; // Parse header lines length_=-1; chunked_=CHUNKED_NONE; if(!read_header()) return false; if(multipart_ == MULTIPART_START) { attributes_.erase("content-type"); // looking for multipart std::string line; for(;;) { if(!readline_chunked(line)) return false; if(line.length() == multipart_tag_.length()) { if(strncmp(line.c_str(),multipart_tag_.c_str(),multipart_tag_.length()) == 0) { multipart_ = MULTIPART_BODY; break; }; }; }; // reading additional header lines chunked_t chunked = chunked_; if(!read_header()) return false; if(multipart_ != MULTIPART_BODY) return false; // nested multipart if(chunked_ != chunked) return false; // can't change transfer encoding per part // TODO: check if content-length can be defined here // now real body follows } // In case of keep_alive (HTTP1.1) there must be length specified if(keep_alive_ && (!chunked_) && (length_ == -1)) length_=0; // If size of object was not reported then try to deduce it. if((size_ == 0) && (length_ != -1)) size_=offset_+length_; return true; } bool PayloadHTTPIn::get_body(void) { if(fetched_) return true; // Already fetched body fetched_=true; // Even attempt counts valid_=false; // But object is invalid till whole body is available if(body_) free(body_); body_ = NULL; body_size_ = 0; if(head_response_ && (code_ == 200)) { // Successful response to HEAD contains no body valid_=true; flush_multipart(); flush_chunked(); body_read_=true; return true; }; char* result = NULL; int64_t result_size = 0; if(length_ == 0) { valid_=true; body_read_=true; return true; } else if(length_ > 0) { // TODO: combination of chunked and defined length is probably impossible // TODO: protect against insane length_ result=(char*)malloc(length_+1); if(!read_multipart(result,length_)) { free(result); return false; }; result_size=length_; } else { // length undefined // Read till connection closed or some logic reports eof for(;;) { int64_t chunk_size = 4096; char* new_result = (char*)realloc(result,result_size+chunk_size+1); if(new_result == NULL) { free(result); return false; }; result=new_result; if(!read_multipart(result+result_size,chunk_size)) break; // TODO: logical size is not always same as end of body // TODO: protect against insane length of body result_size+=chunk_size; }; }; if (result == NULL) { return false; } result[result_size]=0; // Attach result to buffer exposed to user body_ = result; body_size_ = result_size; // If size of object was not reported then try to deduce it. if(size_ == 0) size_=offset_+result_size; valid_=true; // allign to end of message flush_multipart(); flush_chunked(); body_read_=true; return true; } PayloadHTTPIn::PayloadHTTPIn(PayloadStreamInterface& stream,bool own,bool head_response): head_response_(head_response),chunked_(CHUNKED_NONE),chunk_size_(0), multipart_(MULTIPART_NONE),stream_(&stream),stream_offset_(0), stream_own_(own),fetched_(false),header_read_(false),body_read_(false), body_(NULL),body_size_(0) { tbuf_[0]=0; tbuflen_=0; if(!parse_header()) { error_ = IString("Failed to parse HTTP header").str(); return; } header_read_=true; valid_=true; } PayloadHTTPIn::~PayloadHTTPIn(void) { // allign to end of message (maybe not needed with Sync() exposed) flush_multipart(); flush_chunked(); if(stream_ && stream_own_) delete stream_; if(body_) ::free(body_); } char PayloadHTTPIn::operator[](PayloadRawInterface::Size_t pos) const { if(!((PayloadHTTPIn*)this)->get_body()) return 0; if(!body_) return 0; if(pos == -1) pos = offset_; if(pos < offset_) return 0; pos -= offset_; if(pos >= body_size_) return 0; return body_[pos]; } char* PayloadHTTPIn::Content(PayloadRawInterface::Size_t pos) { if(!get_body()) return NULL; if(!body_) return 0; if(pos == -1) pos = offset_; if(pos < offset_) return NULL; pos -= offset_; if(pos >= body_size_) return NULL; return body_+pos; } PayloadRawInterface::Size_t PayloadHTTPIn::Size(void) const { if(!valid_) return 0; PayloadRawInterface::Size_t size = 0; if(size_ > 0) { size = size_; } else if(end_ > 0) { size = end_; } else if(length_ >= 0) { size = offset_ + length_; } else { // Only do it if no other way of determining size worked. if(((PayloadHTTPIn*)this)->get_body()) size = body_size_; } return size; } char* PayloadHTTPIn::Insert(PayloadRawInterface::Size_t pos,PayloadRawInterface::Size_t size) { return NULL; } char* PayloadHTTPIn::Insert(const char* s,PayloadRawInterface::Size_t pos,PayloadRawInterface::Size_t size) { return NULL; } char* PayloadHTTPIn::Buffer(unsigned int num) { if(num != 0) return NULL; if(!get_body()) return NULL; return body_; } PayloadRawInterface::Size_t PayloadHTTPIn::BufferSize(unsigned int num) const { if(num != 0) return 0; if(!((PayloadHTTPIn*)this)->get_body()) return 0; return body_size_; } PayloadRawInterface::Size_t PayloadHTTPIn::BufferPos(unsigned int num) const { if(num != 0) return 0; return offset_; } bool PayloadHTTPIn::Truncate(PayloadRawInterface::Size_t size) { if(!get_body()) return false; if(size <= offset_) { if(body_) free(body_); body_ = NULL; body_size_ = 0; }; if((size-offset_) <= body_size_) { body_size_ = (size-offset_); return true; }; return false; } bool PayloadHTTPIn::Get(char* buf,int& size) { if(!valid_) return false; if(fetched_) { // Read from buffer if(stream_offset_ < body_size_) { uint64_t l = body_size_ - stream_offset_; if(l>size) l=size; ::memcpy(buf,body_+stream_offset_,l); size=l; stream_offset_+=l; return true; }; return false; }; // Read directly from stream // TODO: Check for methods and responses which can't have body if(length_ == 0) { // No body size=0; body_read_=true; return false; }; if(length_ > 0) { // Ordinary stream with known length int64_t bs = length_-stream_offset_; if(bs == 0) { size=0; return false; }; // End of content if(bs > size) bs=size; if(!read_multipart(buf,bs)) { valid_=false; // This is not expected, hence invalidate object size=bs; return false; }; size=bs; stream_offset_+=bs; if(stream_offset_ >= length_) body_read_=true; return true; }; // Ordinary stream with no length known int64_t tsize = size; bool r = read_multipart(buf,tsize); if(r) stream_offset_+=tsize; if(!r) body_read_=true; size=tsize; // TODO: adjust logical parameters of buffers return r; } // Stream interface is meant to be used only // for reading HTTP body. bool PayloadHTTPIn::Put(const char* /* buf */,PayloadStreamInterface::Size_t /* size */) { return false; } int PayloadHTTPIn::Timeout(void) const { if(!stream_) return 0; return stream_->Timeout(); } void PayloadHTTPIn::Timeout(int to) { if(stream_) stream_->Timeout(to); } PayloadStreamInterface::Size_t PayloadHTTPIn::Pos(void) const { if(!stream_) return 0; return offset_+stream_offset_; } PayloadStreamInterface::Size_t PayloadHTTPIn::Limit(void) const { if(length_ >= 0) return (offset_ + length_); return (offset_ + body_size_); } bool PayloadHTTPIn::Sync(void) { if(!valid_) return false; if(!header_read_) return false; if(fetched_) return true; // For multipart data - read till end tag // If data is chunked then it is enough to just read till // chunks are over. if(multipart_ || chunked_) { bool r = true; if(!flush_multipart()) r=false; // not really neaded but keeps variables ok if(!flush_chunked()) r=false; if(r) body_read_ = true; return r; }; // For data without any tags just read till end reached for(;!body_read_;) { char buf[1024]; int size = sizeof(buf); bool r = Get(buf,size); if(!r) break; }; if(body_read_) return true; return false; } // ------------------- PayloadHTTPOut --------------------------- void PayloadHTTPOut::Attribute(const std::string& name,const std::string& value) { attributes_.insert(std::pair(lower(name),value)); } PayloadHTTPOut::PayloadHTTPOut(const std::string& method,const std::string& url): PayloadHTTP(method,url), head_response_(false),rbody_(NULL),sbody_(NULL),sbody_size_(0), body_own_(false),to_stream_(false),use_chunked_transfer_(false), stream_offset_(0),stream_finished_(false), enable_header_out_(true), enable_body_out_(true) { valid_ = true; } PayloadHTTPOut::PayloadHTTPOut(int code,const std::string& reason,bool head_response): PayloadHTTP(code,reason), head_response_(head_response),rbody_(NULL),sbody_(NULL),sbody_size_(0), body_own_(false),to_stream_(false),use_chunked_transfer_(false), stream_offset_(0), stream_finished_(false), enable_header_out_(true), enable_body_out_(true) { valid_ = true; } PayloadHTTPOut::~PayloadHTTPOut(void) { if(rbody_ && body_own_) delete rbody_; if(sbody_ && body_own_) delete sbody_; } PayloadHTTPOutStream::PayloadHTTPOutStream(const std::string& method,const std::string& url): PayloadHTTPOut(method,url) /*,chunk_size_offset_(0)*/ { } PayloadHTTPOutStream::PayloadHTTPOutStream(int code,const std::string& reason,bool head_response): PayloadHTTPOut(code,reason,head_response) /*,chunk_size_offset_(0)*/ { } PayloadHTTPOutStream::~PayloadHTTPOutStream(void) { } PayloadHTTPOutRaw::PayloadHTTPOutRaw(const std::string& method,const std::string& url): PayloadHTTPOut(method,url) { } PayloadHTTPOutRaw::PayloadHTTPOutRaw(int code,const std::string& reason,bool head_response): PayloadHTTPOut(code,reason,head_response) { } PayloadHTTPOutRaw::~PayloadHTTPOutRaw(void) { } PayloadRawInterface::Size_t PayloadHTTPOut::body_size(void) const { if(rbody_) { PayloadRawInterface::Size_t size = 0; for(int n = 0;rbody_->Buffer(n);++n) { size += rbody_->BufferSize(n); }; return size; } else if(sbody_) { return sbody_size_; }; return 0; } PayloadRawInterface::Size_t PayloadHTTPOut::data_size(void) const { if(rbody_) { return (rbody_->Size()); }; if(sbody_) { return (sbody_->Size()); }; return 0; } bool PayloadHTTPOut::make_header(bool to_stream) { header_.resize(0); std::string header; if(method_.empty() && (code_ == 0)) { error_ = IString("Invalid HTTP object can't produce result").str(); return false; }; // Computing length of Body part int64_t length = 0; std::string range_header; use_chunked_transfer_ = false; if((method_ != "GET") && (method_ != "HEAD")) { int64_t start = 0; if(head_response_ && (code_==HTTP_OK)) { length = data_size(); } else { if(sbody_) { if(sbody_->Limit() > sbody_->Pos()) { length = sbody_->Limit() - sbody_->Pos(); }; start = sbody_->Pos(); } else if(rbody_) { for(int n=0;;++n) { if(rbody_->Buffer(n) == NULL) break; length+=rbody_->BufferSize(n); }; start = rbody_->BufferPos(0); } else { length = 0; start = 0; }; }; if(length != data_size()) { // Add range definition if Body represents part of logical buffer size // and adjust HTTP code accordingly int64_t end = start+length; std::string length_str; std::string range_str; if(end <= data_size()) { length_str=tostring(data_size()); } else { length_str="*"; }; if(end > start) { range_str=tostring(start)+"-"+tostring(end-1); if(code_ == HTTP_OK) { code_=HTTP_PARTIAL; reason_="Partial content"; }; } else { range_str="*"; if(code_ == HTTP_OK) { code_=HTTP_RANGE_NOT_SATISFIABLE; reason_="Range not satisfiable"; }; }; range_header="Content-Range: bytes "+range_str+"/"+length_str+"\r\n"; }; if(length > 0 || !sbody_) { range_header+="Content-Length: "+tostring(length)+"\r\n"; } else { // If computed length is 0 for stream source that may also mean it is // not known in advance. In this case either connection closing or // chunked encoding may be used. Chunked is better because it // allows to avoid reconnection. // But chunked can only be used if writing to stream right now. // TODO: extend it to support chunked in Raw buffer mode. It // is possible by inserting small buffers with lengths. if(to_stream) { range_header+="Transfer-Encoding: chunked\r\n"; use_chunked_transfer_ = true; } else { // As last resort try to force connection close. // Although I have no idea how it shol dwork for PUT keep_alive_ = false; }; }; }; // Starting header if(!method_.empty()) { header=method_+" "+uri_+ " HTTP/"+tostring(version_major_)+"."+tostring(version_minor_)+"\r\n"; } else if(code_ != 0) { header="HTTP/"+tostring(version_major_)+"."+tostring(version_minor_)+" "+ tostring(code_)+" "+reason_+"\r\n"; } else { return false; }; if((version_major_ == 1) && (version_minor_ == 1) && (!method_.empty())) { // Adding Host attribute to request if not present. std::map::iterator it = attributes_.find("host"); if(it == attributes_.end()) { std::string host; if(!uri_.empty()) { std::string::size_type p1 = uri_.find("://"); if(p1 != std::string::npos) { std::string::size_type p2 = uri_.find('/',p1+3); if(p2 == std::string::npos) p2 = uri_.length(); host=uri_.substr(p1+3,p2-p1-3); }; }; header+="Host: "+host+"\r\n"; }; }; // Adding previously generated range specifier header+=range_header; bool keep_alive = false; if((version_major_ == 1) && (version_minor_ == 1)) keep_alive=keep_alive_; if(keep_alive) { header+="Connection: keep-alive\r\n"; } else { header+="Connection: close\r\n"; }; for(std::map::iterator a = attributes_.begin();a!=attributes_.end();++a) { header+=(a->first)+": "+(a->second)+"\r\n"; }; header+="\r\n"; logger.msg(Arc::DEBUG,"> %s",header); header_ = header; to_stream_ = to_stream; length_ = length; return true; } bool PayloadHTTPOut::remake_header(bool to_stream) { if(header_.empty() || (to_stream_ != to_stream)) return make_header(to_stream); return true; } bool PayloadHTTPOut::Flush(PayloadStreamInterface& stream) { if(enable_header_out_) { if(!FlushHeader(stream)) return false; } if(enable_body_out_) { if(!FlushBody(stream)) return false; } return true; } bool PayloadHTTPOut::FlushHeader(PayloadStreamInterface& stream) { if(!make_header(true)) return false; if(!stream.Put(header_)) { error_ = IString("Failed to write header to output stream").str(); return false; }; return true; } bool PayloadHTTPOut::FlushBody(PayloadStreamInterface& stream) { // TODO: process 100 request/response if((length_ > 0) || (use_chunked_transfer_)) { if(sbody_) { // stream to stream transfer // TODO: choose optimal buffer size // TODO: parallel read and write for better performance int tbufsize = ((length_ <= 0) || (length_>1024*1024))?(1024*1024):length_; char* tbuf = new char[tbufsize]; if(!tbuf) { error_ = IString("Memory allocation error").str(); return false; }; for(;;) { int lbuf = tbufsize; if(!sbody_->Get(tbuf,lbuf)) break; if(lbuf == 0) continue; if(use_chunked_transfer_) { if(!stream.Put(inttostr(lbuf,16)+"\r\n")) { error_ = IString("Failed to write body to output stream").str(); delete[] tbuf; return false; }; }; if(!stream.Put(tbuf,lbuf)) { error_ = IString("Failed to write body to output stream").str(); delete[] tbuf; return false; }; if(use_chunked_transfer_) { if(!stream.Put("\r\n")) { error_ = IString("Failed to write body to output stream").str(); delete[] tbuf; return false; }; }; }; delete[] tbuf; tbuf = NULL; if(use_chunked_transfer_) { if(!stream.Put("0\r\n\r\n")) { error_ = IString("Failed to write body to output stream").str(); return false; }; }; } else if(rbody_) { for(int n=0;;++n) { char* tbuf = rbody_->Buffer(n); if(tbuf == NULL) break; int64_t lbuf = rbody_->BufferSize(n); if(lbuf > 0) { if(use_chunked_transfer_) { if(!stream.Put(inttostr(lbuf,16)+"\r\n")) { error_ = IString("Failed to write body to output stream").str(); return false; }; }; if(!stream.Put(tbuf,lbuf)) { error_ = IString("Failed to write body to output stream").str(); return false; }; if(use_chunked_transfer_) { if(!stream.Put("\r\n")) { error_ = IString("Failed to write body to output stream").str(); return false; }; }; }; }; if(use_chunked_transfer_) { if(!stream.Put("0\r\n\r\n")) { error_ = IString("Failed to write body to output stream").str(); return false; }; }; } else { if(use_chunked_transfer_) { if(!stream.Put("0\r\n\r\n")) { error_ = IString("Failed to write body to output stream").str(); return false; }; }; }; }; return true; } void PayloadHTTPOutRaw::Body(PayloadRawInterface& body,bool ownership) { if(rbody_ && body_own_) delete rbody_; if(sbody_ && body_own_) delete sbody_; sbody_ = NULL; rbody_=&body; body_own_=ownership; } void PayloadHTTPOutStream::Body(PayloadStreamInterface& body,bool ownership) { if(rbody_ && body_own_) delete rbody_; if(sbody_ && body_own_) delete sbody_; rbody_ = NULL; sbody_=&body; body_own_=ownership; sbody_size_ = 0; PayloadStreamInterface::Size_t pos = sbody_->Pos(); PayloadStreamInterface::Size_t size = sbody_->Size(); PayloadStreamInterface::Size_t limit = sbody_->Limit(); if((size == 0) || (size > limit)) size = limit; if(pos < size) sbody_size_ = (size-pos); } void PayloadHTTPOut::ResetOutput(bool enable_header, bool enable_body) { stream_offset_ = 0; stream_finished_ = false; // Because we can't/do not want to reset state // body stream then actual body size need to be // recomputed. sbody_size_ = 0; if(sbody_) { PayloadStreamInterface::Size_t pos = sbody_->Pos(); PayloadStreamInterface::Size_t size = sbody_->Size(); PayloadStreamInterface::Size_t limit = sbody_->Limit(); if((size == 0) || (size > limit)) size = limit; if(pos < size) sbody_size_ = (size-pos); } enable_header_out_ = enable_header; enable_body_out_ = enable_body; } char PayloadHTTPOutRaw::operator[](PayloadRawInterface::Size_t pos) const { if(!((PayloadHTTPOutRaw&)(*this)).remake_header(false)) return 0; if(pos == -1) pos = 0; if(pos < 0) return 0; if(pos < header_.length()) { return header_[pos]; }; pos -= header_.length(); if(rbody_) { return rbody_->operator[](pos); }; if(sbody_) { // Not supporting direct read from stream body }; return 0; } char* PayloadHTTPOutRaw::Content(PayloadRawInterface::Size_t pos) { if(!remake_header(false)) return NULL; if(pos == -1) pos = 0; if(pos < 0) return NULL; if(pos < header_.length()) { return (char*)(header_.c_str()+pos); }; pos -= header_.length(); if(rbody_) { return rbody_->Content(pos); }; if(sbody_) { // Not supporting content from stream body }; return NULL; } PayloadRawInterface::Size_t PayloadHTTPOutRaw::Size(void) const { if(!valid_) return 0; // Here Size() of Stream and Raw are conflicting. Currently we just // hope that nothing will be interested in logical size of stream. // TODO: either separate Size()s or implement chunked for Raw. if(!((PayloadHTTPOutRaw&)(*this)).remake_header(false)) return 0; return header_.length()+body_size(); } char* PayloadHTTPOutRaw::Insert(PayloadRawInterface::Size_t pos,PayloadRawInterface::Size_t size) { // Not allowing to manipulate body content through this interface return NULL; } char* PayloadHTTPOutRaw::Insert(const char* s,PayloadRawInterface::Size_t pos,PayloadRawInterface::Size_t size) { return NULL; } char* PayloadHTTPOutRaw::Buffer(unsigned int num) { if(!remake_header(false)) return NULL; if(num == 0) { return (char*)(header_.c_str()); }; --num; if(rbody_) { return rbody_->Buffer(num); }; if(sbody_) { // Not supporting buffer access to stream body }; return NULL; } PayloadRawInterface::Size_t PayloadHTTPOutRaw::BufferSize(unsigned int num) const { if(!((PayloadHTTPOutRaw&)(*this)).remake_header(false)) return 0; if(num == 0) { return header_.length(); }; --num; if(rbody_) { return rbody_->BufferSize(num); }; if(sbody_) { // Not supporting buffer access to stream body }; return 0; } PayloadRawInterface::Size_t PayloadHTTPOutRaw::BufferPos(unsigned int num) const { if(num == 0) { return 0; }; if(!((PayloadHTTPOutRaw&)(*this)).remake_header(false)) return 0; PayloadRawInterface::Size_t pos = header_.length(); if(rbody_) { --num; int n = 0; for(n=0;nBuffer(n)) break; pos += rbody_->BufferSize(n); }; return pos; }; if(sbody_) { // Not supporting buffer access to stream body }; return pos; } bool PayloadHTTPOutRaw::Truncate(PayloadRawInterface::Size_t size) { // TODO: Review it later. Truncate may be acting logically on *body_. if(!remake_header(false)) return false; if(size <= header_.length()) { if(rbody_ && body_own_) delete rbody_; if(sbody_ && body_own_) delete sbody_; rbody_=NULL; sbody_=NULL; header_.resize(size); return true; }; if(rbody_) { return rbody_->Truncate(size-header_.length()); }; if(sbody_) { // Stream body does not support Truncate yet }; return false; } PayloadRawInterface::Size_t PayloadHTTPOutStream::Size(void) const { if(!valid_) return 0; // Here Size() of Stream and Raw are conflicting. Currently we just // hope that nothing will be interested in logical size of stream. // TODO: either separate Size()s or implement chunked for Raw. if(!((PayloadHTTPOutStream&)(*this)).remake_header(true)) return 0; return header_.length()+body_size(); } /* int PayloadHTTPOutStream::chunk_size_get(char* buf,int size,int l,uint64_t chunk_size) { if (chunk_size_str_.empty()) { // Generating new chunk size chunk_size_str_ = inttostr(chunk_size,16)+"\r\n"; chunk_size_offset_ = 0; }; if(chunk_size_offset_ < chunk_size_str_.length()) { // copy chunk size std::string::size_type cs = chunk_size_str_.length() - chunk_size_offset_; if(cs>(size-l)) cs=(size-l); ::memcpy(buf+l,chunk_size_str_.c_str()+chunk_size_offset_,cs); l+=cs; chunk_size_offset_+=cs; }; return l; } */ bool PayloadHTTPOutStream::Get(char* buf,int& size) { if(!valid_) return false; if(!remake_header(true)) return false; if(stream_finished_) return false; // Read header uint64_t bo = 0; // buf offset uint64_t bs = enable_header_out_?header_.length():0; // buf size int l = 0; if(l >= size) { size = l; return true; }; if((bo+bs) > stream_offset_) { const char* p = header_.c_str(); p+=(stream_offset_-bo); bs-=(stream_offset_-bo); if(bs>(size-l)) bs=(size-l); ::memcpy(buf+l,p,bs); l+=bs; stream_offset_+=bs; //chunk_size_str_ = ""; chunk_size_offset_ = 0; }; bo+=bs; if(l >= size) { size = l; return true; }; // buffer is full // Read data if(rbody_ && enable_body_out_) { /* This code is only needed if stream and raw are mixed. Currently it is not possible hence it is not needed. But code is kept for future use. Code is not tested. for(unsigned int num = 0;;++num) { if(l >= size) { size = l; return true; }; // buffer is full const char* p = rbody_->Buffer(num); if(!p) { // No more buffers if(use_chunked_transfer_) { l = chunk_size_get(buf,size,l,0); }; break; }; bs = rbody_->BufferSize(num); if(bs <= 0) continue; // Protection against empty buffers if((bo+bs) > stream_offset_) { if((use_chunked_transfer_) && (bo == stream_offset_)) { l = chunk_size_get(buf,size,l,bs); if(l >= size) { size = l; return true; }; // buffer is full }; p+=(stream_offset_-bo); bs-=(stream_offset_-bo); if(bs>(size-l)) bs=(size-l); ::memcpy(buf+l,p,bs); l+=bs; stream_offset_+=bs; chunk_size_str_ = ""; chunk_size_offset_ = 0; }; bo+=bs; }; size = l; if(l > 0) return true; return false; */ size = 0; return false; }; if(sbody_ && enable_body_out_) { if(use_chunked_transfer_) { // It is impossible to know size of chunk // in advance. So first prelimnary size is // is generated and later adjusted. // The problem is that if supplied buffer is // not enough for size and at least one // byte of data that can cause infinite loop. // To avoid that false is returned. // So in case of very short buffer transmission // will fail. std::string chunk_size_str = inttostr(size,16)+"\r\n"; std::string::size_type cs = chunk_size_str.length(); if((cs+2+1) > (size-l)) { // too small buffer size = l; return (l > 0); }; int s = size-l-cs-2; if(sbody_->Get(buf+l+cs,s)) { if(s > 0) { chunk_size_str = inttostr(s,16)+"\r\n"; if(chunk_size_str.length() > cs) { // paranoic size = 0; return false; }; ::memset(buf+l,'0',cs); ::memcpy(buf+l+(cs-chunk_size_str.length()),chunk_size_str.c_str(),chunk_size_str.length()); ::memcpy(buf+l+cs+s,"\r\n",2); stream_offset_+=s; l+=(cs+s+2); }; size = l; return true; }; // Write 0 chunk size first time. Any later request must just fail. if(5 > (size-l)) { // too small buffer size = l; return (l > 0); }; ::memcpy(buf+l,"0\r\n\r\n",5); l+=5; size = l; stream_finished_ = true; return true; } else { int s = size-l; if(sbody_->Get(buf+l,s)) { stream_offset_+=s; l+=s; size = l; return true; }; stream_finished_ = true; }; size = l; return false; }; size = l; if(l > 0) return true; return false; } bool PayloadHTTPOutStream::Get(PayloadStreamInterface& dest,int& size) { if((stream_offset_ > 0) || (size >= 0)) { // If it is not first call or if size control is requested // then convenience method PayloadStreamInterface::Get is // used, which finally calls PayloadHTTPOutStream::Get. return PayloadStreamInterface::Get(dest,size); } // But if whole content is requested at once we use faster Flush* methods Flush(dest); return false; // stream finished } // Stream interface is meant to be used only for reading. bool PayloadHTTPOutStream::Put(const char* /* buf */,PayloadStreamInterface::Size_t /* size */) { return false; } int PayloadHTTPOutStream::Timeout(void) const { if(!sbody_) return 0; return sbody_->Timeout(); } void PayloadHTTPOutStream::Timeout(int to) { if(sbody_) sbody_->Timeout(to); } PayloadStreamInterface::Size_t PayloadHTTPOutStream::Pos(void) const { return stream_offset_; } PayloadStreamInterface::Size_t PayloadHTTPOutStream::Limit(void) const { if(!((PayloadHTTPOutStream&)(*this)).remake_header(true)) return 0; PayloadStreamInterface::Size_t limit = 0; if(enable_header_out_) limit += header_.length(); if(enable_body_out_) limit += body_size(); return limit; } //------------------------------------------------------------------- } // namespace ArcMCCHTTP nordugrid-arc-5.4.2/src/hed/mcc/http/PaxHeaders.7502/schema0000644000000000000000000000013013214316024021421 xustar000000000000000029 mtime=1513200660.60375487 30 atime=1513200668.723854182 29 ctime=1513200660.60375487 nordugrid-arc-5.4.2/src/hed/mcc/http/schema/0000755000175000002070000000000013214316024021546 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/http/schema/PaxHeaders.7502/http.xsd0000644000000000000000000000012311457664434023221 xustar000000000000000027 mtime=1287612700.761119 27 atime=1513200575.143709 29 ctime=1513200660.60375487 nordugrid-arc-5.4.2/src/hed/mcc/http/schema/http.xsd0000644000175000002070000000273211457664434023273 0ustar00mockbuildmock00000000000000 Endpoint (URL) for HTTP request. This configuration parameter may be overwritted by HTTP:ENDPOINT attribute of message. Method to use for HTTP request. May be overwritten by HTTP:METHOD attribute of message. nordugrid-arc-5.4.2/src/hed/mcc/http/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321023540 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200599.271004746 30 ctime=1513200660.601754846 nordugrid-arc-5.4.2/src/hed/mcc/http/schema/Makefile.am0000644000175000002070000000013511255700321023601 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = http.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/mcc/http/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727023556 xustar000000000000000030 mtime=1513200599.302005125 30 atime=1513200648.626608385 30 ctime=1513200660.602754858 nordugrid-arc-5.4.2/src/hed/mcc/http/schema/Makefile.in0000644000175000002070000004350413214315727023632 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc/http/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = http.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/http/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/http/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/http/PaxHeaders.7502/README0000644000000000000000000000012411037472457021137 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200575.140709 30 ctime=1513200660.578754564 nordugrid-arc-5.4.2/src/hed/mcc/http/README0000644000175000002070000000007311037472457021204 0ustar00mockbuildmock00000000000000MCC that handles HTTP messages on server and client sides. nordugrid-arc-5.4.2/src/hed/mcc/PaxHeaders.7502/msgvalidator0000644000000000000000000000013213214316024021700 xustar000000000000000030 mtime=1513200660.695755995 30 atime=1513200668.723854182 30 ctime=1513200660.695755995 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/0000755000175000002070000000000013214316024022023 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515024022 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200599.317005309 30 ctime=1513200660.691755946 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/Makefile.am0000644000175000002070000000111712052416515024064 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libmccmsgvalidator.la libmccmsgvalidator_la_SOURCES = MCCMsgValidator.cpp MCCMsgValidator.h libmccmsgvalidator_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmccmsgvalidator_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libmccmsgvalidator_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727024033 xustar000000000000000030 mtime=1513200599.363005872 30 atime=1513200648.667608886 30 ctime=1513200660.692755958 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/Makefile.in0000644000175000002070000007516613214315727024120 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc/msgvalidator DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libmccmsgvalidator_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libmccmsgvalidator_la_OBJECTS = \ libmccmsgvalidator_la-MCCMsgValidator.lo libmccmsgvalidator_la_OBJECTS = $(am_libmccmsgvalidator_la_OBJECTS) libmccmsgvalidator_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libmccmsgvalidator_la_CXXFLAGS) $(CXXFLAGS) \ $(libmccmsgvalidator_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmccmsgvalidator_la_SOURCES) DIST_SOURCES = $(libmccmsgvalidator_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema pkglib_LTLIBRARIES = libmccmsgvalidator.la libmccmsgvalidator_la_SOURCES = MCCMsgValidator.cpp MCCMsgValidator.h libmccmsgvalidator_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmccmsgvalidator_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) $(GLIBMM_LIBS) libmccmsgvalidator_la_LDFLAGS = -no-undefined -avoid-version -module all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/msgvalidator/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/msgvalidator/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmccmsgvalidator.la: $(libmccmsgvalidator_la_OBJECTS) $(libmccmsgvalidator_la_DEPENDENCIES) $(libmccmsgvalidator_la_LINK) -rpath $(pkglibdir) $(libmccmsgvalidator_la_OBJECTS) $(libmccmsgvalidator_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmccmsgvalidator_la-MCCMsgValidator.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmccmsgvalidator_la-MCCMsgValidator.lo: MCCMsgValidator.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmccmsgvalidator_la_CXXFLAGS) $(CXXFLAGS) -MT libmccmsgvalidator_la-MCCMsgValidator.lo -MD -MP -MF $(DEPDIR)/libmccmsgvalidator_la-MCCMsgValidator.Tpo -c -o libmccmsgvalidator_la-MCCMsgValidator.lo `test -f 'MCCMsgValidator.cpp' || echo '$(srcdir)/'`MCCMsgValidator.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmccmsgvalidator_la-MCCMsgValidator.Tpo $(DEPDIR)/libmccmsgvalidator_la-MCCMsgValidator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MCCMsgValidator.cpp' object='libmccmsgvalidator_la-MCCMsgValidator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmccmsgvalidator_la_CXXFLAGS) $(CXXFLAGS) -c -o libmccmsgvalidator_la-MCCMsgValidator.lo `test -f 'MCCMsgValidator.cpp' || echo '$(srcdir)/'`MCCMsgValidator.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/PaxHeaders.7502/MCCMsgValidator.cpp0000644000000000000000000000012412675602216025414 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.125709 30 ctime=1513200660.693755971 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/MCCMsgValidator.cpp0000644000175000002070000002136112675602216025464 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "MCCMsgValidator.h" #include #include #include #include #include #include #include #include #include #include //#include Arc::Logger ArcMCCMsgValidator::MCC_MsgValidator::logger(Arc::Logger::getRootLogger(), "MCC.MsgValidator"); ArcMCCMsgValidator::MCC_MsgValidator::MCC_MsgValidator(Arc::Config *cfg,PluginArgument* parg) : Arc::MCC(cfg,parg) { // Collect services to be validated for(int i = 0;;++i) { Arc::XMLNode n = (*cfg)["ValidatedService"][i]; if(!n) break; std::string servicepath = n["ServicePath"]; if(servicepath.empty()) { //missing path logger.msg(Arc::WARNING, "Skipping service: no ServicePath found!"); continue; }; std::string schemapath = n["SchemaPath"]; if(schemapath.empty()) { //missing path logger.msg(Arc::WARNING, "Skipping service: no SchemaPath found!"); continue; }; // register schema path with service schemas[servicepath] = schemapath; } } static Arc::Plugin* get_mcc_service(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new ArcMCCMsgValidator::MCC_MsgValidator_Service((Arc::Config*)(*mccarg),mccarg); } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "msg.validator.service", "HED:MCC", NULL, 0, &get_mcc_service }, { "msg.validator.client", "HED:MCC", NULL, 0, &get_mcc_service }, { NULL, NULL, NULL, 0, NULL } }; namespace ArcMCCMsgValidator { using namespace Arc; MCC_MsgValidator_Service::MCC_MsgValidator_Service(Config *cfg,PluginArgument* parg):MCC_MsgValidator(cfg,parg) { } MCC_MsgValidator_Service::~MCC_MsgValidator_Service(void) { } std::string MCC_MsgValidator::getSchemaPath(std::string servicePath) { // Look for servicePath in the map. // Using a const_iterator since we are not going to change the values. for(std::map::const_iterator iter = schemas.begin(); iter != schemas.end(); ++iter) { if(iter->first == servicePath) { // found servicePath; returning schemaPath return iter->second; } } // nothing found; returning empty path return ""; } bool MCC_MsgValidator::validateMessage(Message& msg, std::string schemaPath){ // create parser ctxt for schema accessible on schemaPath xmlSchemaParserCtxtPtr schemaParserP = xmlSchemaNewParserCtxt(schemaPath.c_str()); if(!schemaParserP) { // could not create context logger.msg(ERROR, "Parser Context creation failed!"); return false; } // parse schema xmlSchemaPtr schemaP = xmlSchemaParse(schemaParserP); if(!schemaP) { // could not parse schema logger.msg(ERROR, "Cannot parse schema!"); // have to free parser ctxt xmlSchemaFreeParserCtxt(schemaParserP); return false; } // we do not need schemaParserP any more, so it can be freed xmlSchemaFreeParserCtxt(schemaParserP); // Extracting payload MessagePayload* payload = msg.Payload(); if(!payload) { logger.msg(ERROR, "Empty payload!"); return false; } // Converting payload to SOAP PayloadSOAP* plsp = NULL; plsp = dynamic_cast(payload); if(!plsp) { // cast failed logger.msg(ERROR, "Could not convert payload!"); return false; } PayloadSOAP soapPL(*plsp); if(!soapPL) { logger.msg(ERROR, "Could not create PayloadSOAP!"); return false; } std::string arcPSstr; // get SOAP payload as string soapPL.GetXML(arcPSstr); // parse string into libxml2 xmlDoc xmlDocPtr lxdocP = xmlParseDoc(xmlCharStrdup(arcPSstr.c_str())); // create XPath context; later, we will have to free it! xmlXPathContextPtr xpCtxtP = xmlXPathNewContext(lxdocP); // content is the first child _element_ of SOAP Body std::string exprstr = "//*[local-name()='Body' and namespace-uri()='http://schemas.xmlsoap.org/soap/envelope/'][1]/*[1]"; // result is a xmlXPathObjectPtr; later, we will have to free it! xmlXPathObjectPtr xpObP = xmlXPathEval(xmlCharStrdup(exprstr.c_str()),xpCtxtP); // xnsP is the set of result nodes xmlNodeSetPtr xnsP = xpObP->nodesetval; // the set has only one member (content) - see above (exprstr) xmlNodePtr content = xnsP->nodeTab[0]; // create a new xmlDoc for content xmlDocPtr doc = xmlNewDoc(xmlCharStrdup("1.0")); // create schema validation context xmlSchemaValidCtxtPtr validity_ctxt_ptr = xmlSchemaNewValidCtxt(schemaP); // copy & add content to doc as a child xmlNodePtr tmpNode = xmlDocCopyNode(content, doc, 1); xmlAddChild((xmlNodePtr)doc, tmpNode); // validate against schema bool result = (xmlSchemaValidateDoc(validity_ctxt_ptr, doc) == 0); // free resources and return result xmlSchemaFreeValidCtxt(validity_ctxt_ptr); xmlSchemaFree(schemaP); xmlFreeDoc(doc); xmlFreeDoc(lxdocP); xmlXPathFreeContext(xpCtxtP); xmlXPathFreeObject(xpObP); return result; } static MCC_Status make_raw_fault(Message& outmsg,const char* = NULL) { NS ns; SOAPEnvelope soap(ns,true); soap.Fault()->Code(SOAPFault::Receiver); std::string xml; soap.GetXML(xml); PayloadRaw* payload = new PayloadRaw; payload->Insert(xml.c_str()); outmsg.Payload(payload); return MCC_Status(GENERIC_ERROR); } MCC_Status MCC_MsgValidator_Service::process(Message& inmsg,Message& outmsg) { // Extracting payload MessagePayload* inpayload = inmsg.Payload(); if(!inpayload) { logger.msg(WARNING, "Empty input payload!"); return make_raw_fault(outmsg); } // Converting payload to SOAP PayloadSOAP* plsp = NULL; plsp = dynamic_cast(inpayload); if(!plsp) { // cast failed logger.msg(ERROR, "Could not convert incoming payload!"); return make_raw_fault(outmsg); } PayloadSOAP nextpayload(*plsp); if(!nextpayload) { logger.msg(ERROR, "Could not create PayloadSOAP!"); return make_raw_fault(outmsg); } // Creating message to pass to next MCC and setting new payload.. // Using separate message. But could also use same inmsg. // Just trying to keep it intact as much as possible. Message nextinmsg = inmsg; nextinmsg.Payload(&nextpayload); std::string endpoint_attr = inmsg.Attributes()->get("ENDPOINT"); // extract service path std::string servicePath = getPath(endpoint_attr); // check config fog corresponding service std::string schemaPath = getSchemaPath(servicePath); if("" == schemaPath) { // missing schema logger.msg(WARNING, "Missing schema! Skipping validation..."); } else { // try to validate message against service schema if(!validateMessage(nextinmsg,schemaPath)) { // message validation failed for some reason logger.msg(ERROR, "Could not validate message!"); return make_raw_fault(outmsg); } } // Call next MCC MCCInterface* next = Next(); if(!next) { logger.msg(WARNING, "empty next chain element"); return make_raw_fault(outmsg); } Message nextoutmsg = outmsg; nextoutmsg.Payload(NULL); MCC_Status ret = next->process(nextinmsg,nextoutmsg); // Do checks and extract SOAP response if(!ret) { if(nextoutmsg.Payload()) delete nextoutmsg.Payload(); logger.msg(WARNING, "next element of the chain returned error status"); return make_raw_fault(outmsg); } if(!nextoutmsg.Payload()) { logger.msg(WARNING, "next element of the chain returned empty payload"); return make_raw_fault(outmsg); } PayloadSOAP* retpayload = NULL; try { retpayload = dynamic_cast(nextoutmsg.Payload()); } catch(std::exception& e) { }; if(!retpayload) { logger.msg(WARNING, "next element of the chain returned invalid payload"); delete nextoutmsg.Payload(); return make_raw_fault(outmsg); }; if(!(*retpayload)) { delete retpayload; return make_raw_fault(outmsg); }; // replace old payload with retpayload // then delete old payload delete outmsg.Payload(retpayload); return MCC_Status(STATUS_OK); } std::string MCC_MsgValidator_Service::getPath(std::string url){ std::string::size_type ds, ps; ds=url.find("//"); if (ds==std::string::npos) ps=url.find("/"); else ps=url.find("/", ds+2); if (ps==std::string::npos) return ""; else return url.substr(ps); } } // namespace ArcMCCMsgValidator nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/PaxHeaders.7502/MCCMsgValidator.h0000644000000000000000000000012411730411253025047 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.128709 30 ctime=1513200660.693755971 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/MCCMsgValidator.h0000644000175000002070000000215511730411253025117 0ustar00mockbuildmock00000000000000#ifndef __ARC_MCCMSGVALIDATOR_H__ #define __ARC_MCCMSGVALIDATOR_H__ #include namespace ArcMCCMsgValidator { using namespace Arc; // This is a base class for Message Validator client and service MCCs. class MCC_MsgValidator : public MCC { public: MCC_MsgValidator(Config *cfg,PluginArgument* parg); protected: static Logger logger; std::map schemas; bool validateMessage(Message&,std::string); std::string getSchemaPath(std::string serviceName); }; /* This MCC validates messages against XML schemas. It accepts and produces (i.e. inmsg/outmsg) PayloadSOAP kind of payloads in it's process() method. */ class MCC_MsgValidator_Service: public MCC_MsgValidator { public: /* Constructor takes configuration of MCC. */ MCC_MsgValidator_Service(Config *cfg,PluginArgument* parg); virtual ~MCC_MsgValidator_Service(void); virtual MCC_Status process(Message&,Message&); private: static std::string getPath(std::string url); }; } // namespace ArcMCCMsgValidator #endif /* __ARC_MCCMSGVALIDATOR_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/PaxHeaders.7502/schema0000644000000000000000000000013213214316024023140 xustar000000000000000030 mtime=1513200660.711756191 30 atime=1513200668.723854182 30 ctime=1513200660.711756191 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/schema/0000755000175000002070000000000013214316024023263 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321025255 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200599.378006055 30 ctime=1513200660.709756166 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/schema/Makefile.am0000644000175000002070000000014511255700321025317 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = msgvalidator.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727025273 xustar000000000000000030 mtime=1513200599.409006434 30 atime=1513200648.681609057 30 ctime=1513200660.710756179 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/schema/Makefile.in0000644000175000002070000004354413214315727025353 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc/msgvalidator/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = msgvalidator.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/msgvalidator/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/msgvalidator/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/schema/PaxHeaders.7502/msgvalidator.xsd0000644000000000000000000000012411255700321026432 xustar000000000000000027 mtime=1253540049.444682 27 atime=1513200575.128709 30 ctime=1513200660.711756191 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/schema/msgvalidator.xsd0000644000175000002070000000327211255700321026503 0ustar00mockbuildmock00000000000000 Node for a Service to be validated. There may be multiple ValidatedService elements. Relative path of the service to be validated e.g.: "http://example.org:50000/PythonServiceDemo" --> "/PythonServiceDemo" If omitted, validation of this Service will be skipped Path of the schema to validate with. If omitted, validation of this Service will be skipped nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/PaxHeaders.7502/README0000644000000000000000000000012311101311305022623 xustar000000000000000026 mtime=1225102021.95547 27 atime=1513200575.125709 30 ctime=1513200660.690755934 nordugrid-arc-5.4.2/src/hed/mcc/msgvalidator/README0000644000175000002070000000374111101311305022676 0ustar00mockbuildmock00000000000000############################## # MCCMsgValidator # # a Message Chain Component # # for validation of incoming # # SOAP service requests # ############################## This MCC validates incoming SOAP requests on service side, i.e. it checks wheter the content of SOAP Body is valid (according to the XML schema of the service). This MCC accepts payloads that could be converted into PayloadSOAP. Next MCC is called with a Message containing this PayloadSOAP. Payload of the next MCC's response Message is converted to PayloadSOAP. Finally, an outgoing Message is created with this PayloadSOAP. Steps taken: - extract SOAP payload - validate request (found in SOAP Body) against schema - call next MCC - extract SOAP response and then return it ################### # Configuration # # of # # MCCMsgValidator # ################### --> new namespace added xmlns:vsrv="http://www.nordugrid.org/schemas/ArcMCCMSGVALIDATOR/2008" new Plugin --> mccmsgvalidator MCCMsgValidator should be placed after MCCSOAP: new Component --> /PythonServiceDemo /usr/share/doc/arc/schemas/PythonServiceDemo.xsd MCCMsgValidator should be placed right before Plexer --> ValidatedService --> node for a Service to be validated ServicePath --> relative path of the service to be validated e.g.: "http://example.org:50000/PythonServiceDemo" --> "/PythonServiceDemo" if omitted, validation of this Service will be skipped SchemaPath --> path of the schema to validate with if omitted, validation of this Service will be skipped nordugrid-arc-5.4.2/src/hed/mcc/PaxHeaders.7502/tcp0000644000000000000000000000013213214316024017772 xustar000000000000000030 mtime=1513200660.539754087 30 atime=1513200668.723854182 30 ctime=1513200660.539754087 nordugrid-arc-5.4.2/src/hed/mcc/tcp/0000755000175000002070000000000013214316024020115 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/tcp/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712052416515022114 xustar000000000000000027 mtime=1353325901.850498 30 atime=1513200599.485007364 30 ctime=1513200660.534754026 nordugrid-arc-5.4.2/src/hed/mcc/tcp/Makefile.am0000644000175000002070000000106312052416515022156 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libmcctcp.la libmcctcp_la_SOURCES = MCCTCP.cpp PayloadTCPSocket.cpp \ MCCTCP.h PayloadTCPSocket.h libmcctcp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmcctcp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(SOCKET_LIBS) libmcctcp_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/mcc/tcp/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727022125 xustar000000000000000030 mtime=1513200599.532007938 30 atime=1513200648.587607908 30 ctime=1513200660.534754026 nordugrid-arc-5.4.2/src/hed/mcc/tcp/Makefile.in0000644000175000002070000007662213214315727022210 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc/tcp DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libmcctcp_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am_libmcctcp_la_OBJECTS = libmcctcp_la-MCCTCP.lo \ libmcctcp_la-PayloadTCPSocket.lo libmcctcp_la_OBJECTS = $(am_libmcctcp_la_OBJECTS) libmcctcp_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmcctcp_la_CXXFLAGS) \ $(CXXFLAGS) $(libmcctcp_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmcctcp_la_SOURCES) DIST_SOURCES = $(libmcctcp_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = schema pkglib_LTLIBRARIES = libmcctcp.la libmcctcp_la_SOURCES = MCCTCP.cpp PayloadTCPSocket.cpp \ MCCTCP.h PayloadTCPSocket.h libmcctcp_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmcctcp_la_LIBADD = \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(SOCKET_LIBS) libmcctcp_la_LDFLAGS = -no-undefined -avoid-version -module all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/tcp/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/tcp/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmcctcp.la: $(libmcctcp_la_OBJECTS) $(libmcctcp_la_DEPENDENCIES) $(libmcctcp_la_LINK) -rpath $(pkglibdir) $(libmcctcp_la_OBJECTS) $(libmcctcp_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctcp_la-MCCTCP.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmcctcp_la-PayloadTCPSocket.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmcctcp_la-MCCTCP.lo: MCCTCP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctcp_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctcp_la-MCCTCP.lo -MD -MP -MF $(DEPDIR)/libmcctcp_la-MCCTCP.Tpo -c -o libmcctcp_la-MCCTCP.lo `test -f 'MCCTCP.cpp' || echo '$(srcdir)/'`MCCTCP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctcp_la-MCCTCP.Tpo $(DEPDIR)/libmcctcp_la-MCCTCP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MCCTCP.cpp' object='libmcctcp_la-MCCTCP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctcp_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctcp_la-MCCTCP.lo `test -f 'MCCTCP.cpp' || echo '$(srcdir)/'`MCCTCP.cpp libmcctcp_la-PayloadTCPSocket.lo: PayloadTCPSocket.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctcp_la_CXXFLAGS) $(CXXFLAGS) -MT libmcctcp_la-PayloadTCPSocket.lo -MD -MP -MF $(DEPDIR)/libmcctcp_la-PayloadTCPSocket.Tpo -c -o libmcctcp_la-PayloadTCPSocket.lo `test -f 'PayloadTCPSocket.cpp' || echo '$(srcdir)/'`PayloadTCPSocket.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmcctcp_la-PayloadTCPSocket.Tpo $(DEPDIR)/libmcctcp_la-PayloadTCPSocket.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='PayloadTCPSocket.cpp' object='libmcctcp_la-PayloadTCPSocket.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmcctcp_la_CXXFLAGS) $(CXXFLAGS) -c -o libmcctcp_la-PayloadTCPSocket.lo `test -f 'PayloadTCPSocket.cpp' || echo '$(srcdir)/'`PayloadTCPSocket.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/tcp/PaxHeaders.7502/MCCTCP.h0000644000000000000000000000012313107551057021201 xustar000000000000000026 mtime=1495192111.08484 27 atime=1513200575.121709 30 ctime=1513200660.537754063 nordugrid-arc-5.4.2/src/hed/mcc/tcp/MCCTCP.h0000644000175000002070000000762313107551057021257 0ustar00mockbuildmock00000000000000#ifndef __ARC_MCCTCP_H__ #define __ARC_MCCTCP_H__ #include #include #include "PayloadTCPSocket.h" namespace ArcMCCTCP { using namespace Arc; //! A base class for TCP client and service MCCs. /*! This is a base class for TCP client and service MCCs. It provides some common functionality for them, i.e. so far only a logger. */ class MCC_TCP : public MCC { public: MCC_TCP(Config *cfg, PluginArgument* parg); protected: static Logger logger; friend class PayloadTCPSocket; }; /** This class is MCC implementing TCP server. Upon creation this object binds to specified TCP ports and listens for incoming TCP connections on dedicated thread. Each connection is accepted and dedicated thread is created. Then that thread is used to call process() method of next MCC in chain. That method is passed payload implementing PayloadStreamInterface. On response payload with PayloadRawInterface is expected. Alternatively called MCC may use provided PayloadStreamInterface to send it's response back directly. During processing of request this MCC generates following attributes: TCP:HOST - IP address of interface to which local TCP socket is bound TCP:PORT - port number to which local TCP socket is bound TCP:REMOTEHOST - IP address from which connection is accepted TCP:REMOTEPORT - TCP port from which connection is accepted TCP:ENDPOINT - URL-like representation of remote connection - ://HOST:PORT ENDPOINT - global attribute equal to TCP:ENDPOINT */ class MCC_TCP_Service: public MCC_TCP { friend class mcc_tcp_exec_t; private: class mcc_tcp_exec_t { public: MCC_TCP_Service* obj; int handle; bool no_delay; int timeout; mcc_tcp_exec_t(MCC_TCP_Service* o,int h,int t, bool nd = false); operator bool(void) { return (handle != -1); }; }; class mcc_tcp_handle_t { public: int handle; bool no_delay; int timeout; mcc_tcp_handle_t(int h, int t, bool nd = false):handle(h),no_delay(nd),timeout(t) { }; operator int(void) { return handle; }; }; bool valid_; std::list handles_; /** listening sockets */ std::list executers_; /** active connections and associated threads */ int max_executers_; bool max_executers_drop_; /* pthread_t listen_th_; ** thread listening for incoming connections */ Glib::Mutex lock_; /** lock for safe operations in internal lists */ Glib::Cond cond_; static void listener(void *); /** executing function for listening thread */ static void executer(void *); /** executing function for connection thread */ public: MCC_TCP_Service(Config *cfg, PluginArgument* parg); virtual ~MCC_TCP_Service(void); virtual MCC_Status process(Message&,Message&); operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; }; /** This class is MCC implementing TCP client. Upon creation it connects to specified TCP post at specified host. process() method accepts PayloadRawInterface type of payload. Content of payload is sent over TCP socket. It returns PayloadStreamInterface payload for previous MCC to read response. */ class MCC_TCP_Client: public MCC_TCP { private: /** Socket object connected to remote site. It contains NULL if connectino failed. */ PayloadTCPSocket* s_; public: MCC_TCP_Client(Config *cfg, PluginArgument* parg); virtual ~MCC_TCP_Client(void); virtual MCC_Status process(Message&,Message&); operator bool(void) { return (s_ != NULL); }; bool operator!(void) { return (s_ == NULL); }; }; } // namespace Arc #endif /* __ARC_MCCTCP_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/tcp/PaxHeaders.7502/PayloadTCPSocket.h0000644000000000000000000000012412052517370023340 xustar000000000000000027 mtime=1353359096.401407 27 atime=1513200575.121709 30 ctime=1513200660.537754063 nordugrid-arc-5.4.2/src/hed/mcc/tcp/PayloadTCPSocket.h0000644000175000002070000000462312052517370023412 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADTCPSOCKET_H__ #define __ARC_PAYLOADTCPSOCKET_H__ #include #include #include namespace ArcMCCTCP { using namespace Arc; /** This class extends PayloadStream with TCP socket specific features */ class PayloadTCPSocket: public PayloadStreamInterface { private: int connect_socket(const char* hostname,int port); int handle_; bool acquired_; int timeout_; std::string error_; Logger& logger; public: /** Constructor - connects to TCP server at specified hostname:port */ PayloadTCPSocket(const char* hostname,int port, int timeout, Logger& logger); /** Constructor - connects to TCP server at specified endpoint - hostname:port */ PayloadTCPSocket(const std::string& endpoint,int timeout, Logger& logger); /** Constructor - creates object of already connected socket. Socket is NOT closed in destructor. */ PayloadTCPSocket(int s, int timeout, Logger& logger): handle_(s),acquired_(false),timeout_(timeout),logger(logger) { }; /** Copy constructor - inherits socket of copied object. Socket is NOT closed in destructor. */ PayloadTCPSocket(PayloadTCPSocket& s): handle_(s.handle_),acquired_(false),timeout_(s.timeout_),logger(s.logger) { }; /** Copy constructor - inherits handle of copied object. Handle is NOT closed in destructor. */ PayloadTCPSocket(PayloadTCPSocket& s,Logger& logger): handle_(s.handle_),acquired_(false),timeout_(s.timeout_),logger(logger) { }; virtual ~PayloadTCPSocket(void); virtual bool Get(char* buf,int& size); virtual bool Get(std::string& buf); virtual std::string Get(void) { std::string buf; Get(buf); return buf; }; virtual bool Put(const char* buf,Size_t size); virtual bool Put(const std::string& buf) { return Put(buf.c_str(),buf.length()); }; virtual bool Put(const char* buf) { return Put(buf,buf?strlen(buf):0); }; virtual operator bool(void) { return (handle_ != -1); }; virtual bool operator!(void) { return (handle_ == -1); }; virtual int Timeout(void) const { return timeout_; }; virtual void Timeout(int to) { timeout_=to; }; virtual Size_t Pos(void) const { return 0; }; virtual Size_t Size(void) const { return 0; }; virtual Size_t Limit(void) const { return 0; }; int GetHandle() { return handle_; }; std::string GetError() { return error_; }; void NoDelay(bool val); }; } // namespace Arc #endif /* __ARC_PAYLOADTCPSOCKET_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/tcp/PaxHeaders.7502/PayloadTCPSocket.cpp0000644000000000000000000000012313214315176023673 xustar000000000000000027 mtime=1513200254.761812 27 atime=1513200575.121709 29 ctime=1513200660.53675405 nordugrid-arc-5.4.2/src/hed/mcc/tcp/PayloadTCPSocket.cpp0000644000175000002070000002012113214315176023735 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #ifndef WIN32 #include #include #include #include #include #include #include #endif #include #include #include #include #include "PayloadTCPSocket.h" #define USE_REMOTE_HOSTNAME_RESOLVER 1 namespace ArcMCCTCP { using namespace Arc; #ifndef WIN32 static int spoll(int h, int timeout, unsigned int& events) { int r; // Second resolution is enough time_t c_time = time(NULL); time_t f_time = c_time + timeout; struct pollfd fd; for(;;) { fd.fd=h; fd.events=events; fd.revents=0; r = ::poll(&fd,1,(f_time-c_time)*1000); if(r != -1) break; // success or timeout // Checking for operation interrupted by signal if(errno != EINTR) break; time_t n_time = time(NULL); // Protection against time jumping backward if(((int)(n_time - c_time)) < 0) f_time -= (c_time - n_time); c_time = n_time; // If over time, make one more try with 0 timeout if(((int)(f_time - c_time)) < 0) c_time = f_time; } events = fd.revents; return r; } #endif int PayloadTCPSocket::connect_socket(const char* hostname,int port) { std::string port_str = tostring(port); #ifndef USE_REMOTE_HOSTNAME_RESOLVER struct addrinfo hint; memset(&hint, 0, sizeof(hint)); hint.ai_family = AF_UNSPEC; hint.ai_socktype = SOCK_STREAM; hint.ai_protocol = IPPROTO_TCP; struct addrinfo *info = NULL; int ret = getaddrinfo(hostname, port_str.c_str(), &hint, &info); if ((ret != 0) || (!info)) { std::string err_str = gai_strerror(ret); error_ = IString("Failed to resolve %s (%s)", hostname, err_str).str(); logger.msg(VERBOSE, "%s", error_); return -1; } int s = -1; for(struct addrinfo *info_ = info;info_;info_=info_->ai_next) { int family = info_->ai_family; socklen_t slen = info_->ai_addrlen; struct sockaddr* saddr = info_->ai_addr; #else HostnameResolver* hr = HostnameResolver::Acquire(); std::list info; int ret = hr->hr_resolve(hostname, port_str, false, info); HostnameResolver::Release(hr); if ((ret != 0) || (info.empty())) { std::string err_str = gai_strerror(ret); error_ = IString("Failed to resolve %s (%s)", hostname, err_str).str(); logger.msg(VERBOSE, "%s", error_); return -1; } int s = -1; for(std::list::iterator info_ = info.begin(); info_ != info.end(); ++info_) { int family = info_->Family(); socklen_t slen = info_->Length(); struct sockaddr const* saddr = info_->Addr(); #endif logger.msg(VERBOSE,"Trying to connect %s(%s):%d", hostname,family==AF_INET6?"IPv6":"IPv4",port); s = ::socket(family, SOCK_STREAM, IPPROTO_TCP); if(s == -1) { error_ = IString("Failed to create socket for connecting to %s(%s):%d - %s", hostname,family==AF_INET6?"IPv6":"IPv4",port, Arc::StrError(errno)).str(); logger.msg(VERBOSE, "%s", error_); continue; } #ifndef WIN32 // In *NIX we can use non-blocking socket because poll() will // be used for waiting. int s_flags = ::fcntl(s, F_GETFL, 0); if(s_flags != -1) { ::fcntl(s, F_SETFL, s_flags | O_NONBLOCK); } else { logger.msg(VERBOSE, "Failed to get TCP socket options for connection" " to %s(%s):%d - timeout won't work - %s", hostname,family==AF_INET6?"IPv6":"IPv4",port, Arc::StrError(errno)); } if(::connect(s, saddr, slen) == -1) { if(errno != EINPROGRESS) { error_ = IString("Failed to connect to %s(%s):%i - %s", hostname,family==AF_INET6?"IPv6":"IPv4",port, Arc::StrError(errno)).str(); logger.msg(VERBOSE, "%s", error_); close(s); s = -1; continue; } unsigned int events = POLLOUT | POLLPRI; int pres = spoll(s,timeout_,events); if(pres == 0) { error_ = IString("Timeout connecting to %s(%s):%i - %i s", hostname,family==AF_INET6?"IPv6":"IPv4",port, timeout_).str(); logger.msg(VERBOSE, "%s", error_); close(s); s = -1; continue; } if(pres != 1) { error_ = IString("Failed while waiting for connection to %s(%s):%i - %s", hostname,family==AF_INET6?"IPv6":"IPv4",port, Arc::StrError(errno)).str(); logger.msg(VERBOSE, "%s", error_); close(s); s = -1; continue; } // man connect says one has to check SO_ERROR, but poll() returns // POLLERR and POLLHUP so we can use them directly. if(events & (POLLERR | POLLHUP)) { error_ = IString("Failed to connect to %s(%s):%i", hostname,family==AF_INET6?"IPv6":"IPv4",port).str(); logger.msg(VERBOSE, "%s", error_); close(s); s = -1; continue; } } #else if(::connect(s, info_->ai_addr, info_->ai_addrlen) == -1) { error_ = IString("Failed to connect to %s(%s):%i", hostname,family==AF_INET6?"IPv6":"IPv4",port).str(); logger.msg(VERBOSE, "%s", error_); close(s); s = -1; continue; }; #endif break; }; #ifndef USE_REMOTE_HOSTNAME_RESOLVER freeaddrinfo(info); #endif if(s != -1) error_ = ""; return s; } PayloadTCPSocket::PayloadTCPSocket(const char* hostname, int port, int timeout, Logger& logger) : logger(logger) { timeout_=timeout; handle_=connect_socket(hostname,port); acquired_=true; } PayloadTCPSocket::PayloadTCPSocket(const std::string& endpoint, int timeout, Logger& logger) : logger(logger) { handle_ = -1; acquired_=false; std::string hostname = endpoint; std::string::size_type p = hostname.find(':'); if(p == std::string::npos) return; int port = atoi(hostname.c_str()+p+1); hostname.resize(p); timeout_=timeout; handle_=connect_socket(hostname.c_str(),port); acquired_=true; } PayloadTCPSocket::~PayloadTCPSocket(void) { if(acquired_ && (handle_ != -1)) { shutdown(handle_,2); close(handle_); }; } bool PayloadTCPSocket::Get(char* buf,int& size) { ssize_t l = size; size=0; if(handle_ == -1) return false; int flags = 0; #ifndef WIN32 unsigned int events = POLLIN | POLLPRI | POLLERR; if(spoll(handle_,timeout_,events) != 1) return false; if(!(events & (POLLIN | POLLPRI))) return false; // Probably never happens if((events & POLLPRI) && !(events & POLLIN)) { logger.msg(ERROR, "Received message out-of-band (not critical, ERROR level is just for debugging purposes)"); flags = MSG_OOB; } #endif l=::recv(handle_,buf,l,flags); if(flags & MSG_OOB) { size = 0; return true; } if(l == -1) return false; size=l; if(l == 0) return false; return true; } bool PayloadTCPSocket::Get(std::string& buf) { char tbuf[1024]; int l = sizeof(tbuf); bool result = Get(tbuf,l); buf.assign(tbuf,l); return result; } bool PayloadTCPSocket::Put(const char* buf,Size_t size) { ssize_t l; if(handle_ == -1) return false; time_t start = time(NULL); for(;size;) { #ifndef WIN32 unsigned int events = POLLOUT | POLLERR; int to = timeout_-(unsigned int)(time(NULL)-start); if(to < 0) to = 0; if(spoll(handle_,to,events) != 1) return false; if(!(events & POLLOUT)) return false; #endif l=::send(handle_, buf, size, 0); if(l == -1) return false; buf+=l; size-=l; #ifdef WIN32 int to = timeout_-(unsigned int)(time(NULL)-start); if(to < 0) return false; #endif }; return true; } void PayloadTCPSocket::NoDelay(bool val) { if(handle_ == -1) return; int flag = val?1:0; #ifdef WIN32 ::setsockopt(handle_,IPPROTO_TCP,TCP_NODELAY,(const char*)(&flag),sizeof(flag)); #else ::setsockopt(handle_,IPPROTO_TCP,TCP_NODELAY,&flag,sizeof(flag)); #endif } } // namespace Arc nordugrid-arc-5.4.2/src/hed/mcc/tcp/PaxHeaders.7502/schema0000644000000000000000000000013213214316024021232 xustar000000000000000030 mtime=1513200660.557754307 30 atime=1513200668.723854182 30 ctime=1513200660.557754307 nordugrid-arc-5.4.2/src/hed/mcc/tcp/schema/0000755000175000002070000000000013214316024021355 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/tcp/schema/PaxHeaders.7502/Makefile.am0000644000000000000000000000012711255700321023347 xustar000000000000000027 mtime=1253540049.444682 30 atime=1513200599.548008134 30 ctime=1513200660.555754283 nordugrid-arc-5.4.2/src/hed/mcc/tcp/schema/Makefile.am0000644000175000002070000000013411255700321023407 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = tcp.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-5.4.2/src/hed/mcc/tcp/schema/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727023365 xustar000000000000000030 mtime=1513200599.579008513 30 atime=1513200648.599608054 30 ctime=1513200660.556754295 nordugrid-arc-5.4.2/src/hed/mcc/tcp/schema/Makefile.in0000644000175000002070000004350013214315727023435 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc/tcp/schema DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = tcp.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/tcp/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/tcp/schema/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) test -z "$(arcschemadir)" || $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(arcschemadir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(arcschemadir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-arcschemaDATA install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-arcschemaDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/tcp/schema/PaxHeaders.7502/tcp.xsd0000644000000000000000000000012411643341511022621 xustar000000000000000027 mtime=1317913417.571798 27 atime=1513200575.123709 30 ctime=1513200660.557754307 nordugrid-arc-5.4.2/src/hed/mcc/tcp/schema/tcp.xsd0000644000175000002070000001143011643341511022665 0ustar00mockbuildmock00000000000000 This element defines listening TCP socket. If interface is missing socket is bound to all local interfaces (not supported). There may be multiple Listen elements. Listen network interface. Listen network port. This element defines TCP/IP protocol version. If not specified both versions will be used whenether possible. Apply no delay socket option. If set to true TCP packets will be proceesd as soon as possible without waiting for more data to become available. Timeout for socket level operations If set to true and MCC fails to bind all addresses of specified interface then such situation is considered failure. In case value set to false it is enough to bind at least one address of interface. This element defines TCP connection to be established to specified Host at specified Port. If LocalPort is defined TCP socket will be bound to this port number (not supported). This element defines upper limit for number of simultaneous active TCP connections. Only positive numbers are meaningful. If attribute "drop" is specified and is set to true then connections over specified limit will be dropped. Otherwise they will be put on hold. nordugrid-arc-5.4.2/src/hed/mcc/tcp/PaxHeaders.7502/MCCTCP.cpp0000644000000000000000000000012413214315176021534 xustar000000000000000027 mtime=1513200254.761812 27 atime=1513200575.119709 30 ctime=1513200660.535754038 nordugrid-arc-5.4.2/src/hed/mcc/tcp/MCCTCP.cpp0000644000175000002070000006746513214315176021623 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #ifdef WIN32 #include typedef int socklen_t; #define ErrNo WSAGetLastError() #include // There is no inet_ntop on WIN32 inline const char *inet_ntop(int af, const void *__restrict src, char *__restrict dest, socklen_t size) { // IPV6 not supported (yet?) if(AF_INET!=af) { printf("inet_ntop is only implemented for AF_INET address family on win32/msvc8"); abort(); } // Format address char *s=inet_ntoa(*reinterpret_cast(src)); if(!s) return 0; // Copy to given buffer socklen_t len=(socklen_t)strlen(s); if(len>=size) return 0; return strncpy(dest, s, len); } #else // UNIX // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #define ErrNo errno #endif #include #include #include #include #include #include #include #include "MCCTCP.h" #define PROTO_NAME(ADDR) ((ADDR->ai_family==AF_INET6)?"IPv6":"IPv4") Arc::Logger ArcMCCTCP::MCC_TCP::logger(Arc::Logger::getRootLogger(), "MCC.TCP"); ArcMCCTCP::MCC_TCP::MCC_TCP(Arc::Config *cfg, PluginArgument* parg) : Arc::MCC(cfg, parg) { } static Arc::Plugin* get_mcc_service(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; ArcMCCTCP::MCC_TCP_Service* plugin = new ArcMCCTCP::MCC_TCP_Service((Arc::Config*)(*mccarg),mccarg); if(!(*plugin)) { delete plugin; return NULL; }; return plugin; } static Arc::Plugin* get_mcc_client(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; ArcMCCTCP::MCC_TCP_Client* plugin = new ArcMCCTCP::MCC_TCP_Client((Arc::Config*)(*mccarg),mccarg); if(!(*plugin)) { delete plugin; return NULL; }; return plugin; } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "tcp.service", "HED:MCC", NULL, 0, &get_mcc_service }, { "tcp.client", "HED:MCC", NULL, 0, &get_mcc_client }, { NULL, NULL, NULL, 0, NULL } }; namespace ArcMCCTCP { using namespace Arc; MCC_TCP_Service::MCC_TCP_Service(Config *cfg, PluginArgument* parg):MCC_TCP(cfg,parg),valid_(false),max_executers_(-1),max_executers_drop_(false) { #ifdef WIN32 WSADATA wsadata; if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { logger.msg(ERROR, "Cannot initialize winsock library"); return; } #endif for(int i = 0;;++i) { struct addrinfo hint; struct addrinfo *info = NULL; memset(&hint, 0, sizeof(hint)); hint.ai_socktype = SOCK_STREAM; hint.ai_protocol = IPPROTO_TCP; // ? hint.ai_flags = AI_PASSIVE; XMLNode l = (*cfg)["Listen"][i]; if(!l) break; std::string port_s = l["Port"]; if(port_s.empty()) { logger.msg(ERROR, "Missing Port in Listen element"); return; }; std::string interface_s = l["Interface"]; std::string version_s = l["Version"]; if(!version_s.empty()) { if(version_s == "4") { hint.ai_family = AF_INET; } else if(version_s == "6") { hint.ai_family = AF_INET6; } else { logger.msg(ERROR, "Version in Listen element can't be recognized"); return; }; }; int ret = getaddrinfo(interface_s.empty()?NULL:interface_s.c_str(), port_s.c_str(), &hint, &info); if (ret != 0) { std::string err_str = gai_strerror(ret); if(interface_s.empty()) { logger.msg(ERROR, "Failed to obtain local address for port %s - %s", port_s, err_str); } else { logger.msg(ERROR, "Failed to obtain local address for %s:%s - %s", interface_s, port_s, err_str); }; return; }; bool bound = false; for(struct addrinfo *info_ = info;info_;info_=info_->ai_next) { if(interface_s.empty()) { logger.msg(VERBOSE, "Trying to listen on TCP port %s(%s)", port_s, PROTO_NAME(info_)); } else { logger.msg(VERBOSE, "Trying to listen on %s:%s(%s)", interface_s, port_s, PROTO_NAME(info_)); }; int s = ::socket(info_->ai_family,info_->ai_socktype,info_->ai_protocol); if(s == -1) { std::string e = StrError(errno); if(interface_s.empty()) { logger.msg(ERROR, "Failed to create socket for for listening at TCP port %s(%s): %s", port_s, PROTO_NAME(info_),e); } else { logger.msg(ERROR, "Failed to create socket for for listening at %s:%s(%s): %s", interface_s, port_s, PROTO_NAME(info_),e); }; continue; }; // Set REUSEADDR so that after crash re-binding works despite TIME_WAIT sockets int resuseaddr_arg = 1; setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &resuseaddr_arg, sizeof(resuseaddr_arg)); #ifdef IPV6_V6ONLY if(info_->ai_family == AF_INET6) { int v = 1; // Some systems (Linux for example) make v6 support v4 too // by default. Some don't. Make it same for everyone - // separate sockets for v4 and v6. if(setsockopt(s,IPPROTO_IPV6,IPV6_V6ONLY,(const char*)(&v),sizeof(v)) != 0) { if(interface_s.empty()) { logger.msg(ERROR, "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at same port", port_s); } else { logger.msg(ERROR, "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same port", interface_s, port_s); }; }; }; #endif if(::bind(s,info_->ai_addr,info_->ai_addrlen) == -1) { std::string e = StrError(errno); if(interface_s.empty()) { logger.msg(ERROR, "Failed to bind socket for TCP port %s(%s): %s", port_s, PROTO_NAME(info_),e); } else { logger.msg(ERROR, "Failed to bind socket for %s:%s(%s): %s", interface_s, port_s, PROTO_NAME(info_),e); }; close(s); if(l["AllAddresses"]) { std::string v = l["AllAddresses"]; if((v == "false") || (v == "0")) { continue; }; }; bound = false; break; }; if(::listen(s,-1) == -1) { std::string e = StrError(errno); if(interface_s.empty()) { logger.msg(WARNING, "Failed to listen at TCP port %s(%s): %s", port_s, PROTO_NAME(info_),e); } else { logger.msg(WARNING, "Failed to listen at %s:%s(%s): %s", interface_s, port_s, PROTO_NAME(info_),e); }; close(s); continue; }; bool no_delay = false; if(l["NoDelay"]) { std::string v = l["NoDelay"]; if((v == "true") || (v == "1")) no_delay=true; } int timeout = 60; if (l["Timeout"]) { std::string v = l["Timeout"]; timeout = atoi(v.c_str()); } handles_.push_back(mcc_tcp_handle_t(s,timeout,no_delay)); if(interface_s.empty()) { logger.msg(INFO, "Listening on TCP port %s(%s)", port_s, PROTO_NAME(info_)); } else { logger.msg(INFO, "Listening on %s:%s(%s)", interface_s, port_s, PROTO_NAME(info_)); }; bound = true; }; freeaddrinfo(info); if(!bound) { if(version_s.empty()) { logger.msg(ERROR, "Failed to start listening on any address for %s:%s", interface_s, port_s); } else { logger.msg(ERROR, "Failed to start listening on any address for %s:%s(IPv%s)", interface_s, port_s, version_s); }; return; }; }; if(handles_.size() == 0) { logger.msg(ERROR, "No listening ports initiated"); return; }; if((*cfg)["Limit"]) { std::string v = (*cfg)["Limit"]; max_executers_ = atoi(v.c_str()); v=(std::string)((*cfg)["Limit"].Attribute("drop")); if((v == "yes") || (v == "true") || (v == "1")) { max_executers_drop_=true; }; if(max_executers_ > 0) { logger.msg(INFO, "Setting connections limit to %i, connections over limit will be %s",max_executers_,max_executers_drop_?"dropped":"put on hold"); }; }; if(!CreateThreadFunction(&listener,this)) { logger.msg(ERROR, "Failed to start thread for listening"); for(std::list::iterator i = handles_.begin();i!=handles_.end();i=handles_.erase(i)) ::close(i->handle); }; valid_ = true; } MCC_TCP_Service::~MCC_TCP_Service(void) { //logger.msg(VERBOSE, "TCP_Service destroy"); lock_.lock(); for(std::list::iterator i = handles_.begin();i!=handles_.end();++i) { ::close(i->handle); i->handle=-1; }; for(std::list::iterator e = executers_.begin();e != executers_.end();++e) { ::shutdown(e->handle,2); }; if(!valid_) { for(std::list::iterator i = handles_.begin();i!=handles_.end();i=handles_.erase(i)) { }; }; // Wait for threads to exit while(executers_.size() > 0) { lock_.unlock(); sleep(1); lock_.lock(); }; while(handles_.size() > 0) { lock_.unlock(); sleep(1); lock_.lock(); }; lock_.unlock(); #ifdef WIN32 WSACleanup(); #endif } MCC_TCP_Service::mcc_tcp_exec_t::mcc_tcp_exec_t(MCC_TCP_Service* o,int h,int t,bool nd):obj(o),handle(h),no_delay(nd),timeout(t) { if(handle == -1) return; // list is locked externally std::list::iterator e = o->executers_.insert(o->executers_.end(),*this); if(!CreateThreadFunction(&MCC_TCP_Service::executer,&(*e))) { logger.msg(ERROR, "Failed to start thread for communication"); ::shutdown(handle,2); #ifdef WIN32 ::closesocket(handle); handle=-1; o->executers_.erase(e); #else ::close(handle); handle=-1; o->executers_.erase(e); #endif }; } void MCC_TCP_Service::listener(void* arg) { MCC_TCP_Service& it = *((MCC_TCP_Service*)arg); for(;;) { int max_s = -1; fd_set readfds; FD_ZERO(&readfds); it.lock_.lock(); for(std::list::iterator i = it.handles_.begin();i!=it.handles_.end();) { int s = i->handle; if(s == -1) { i=it.handles_.erase(i); continue; }; FD_SET(s,&readfds); if(s > max_s) max_s = s; ++i; }; it.lock_.unlock(); if(max_s == -1) break; struct timeval tv; tv.tv_sec = 2; tv.tv_usec = 0; int n = select(max_s+1,&readfds,NULL,NULL,&tv); if(n < 0) { if(ErrNo != EINTR) { logger.msg(ERROR, "Failed while waiting for connection request"); it.lock_.lock(); for(std::list::iterator i = it.handles_.begin();i!=it.handles_.end();) { int s = i->handle; ::close(s); i=it.handles_.erase(i); }; it.lock_.unlock(); return; }; continue; } else if(n == 0) continue; it.lock_.lock(); for(std::list::iterator i = it.handles_.begin();i!=it.handles_.end();++i) { int s = i->handle; if(s == -1) continue; if(FD_ISSET(s,&readfds)) { it.lock_.unlock(); struct sockaddr addr; socklen_t addrlen = sizeof(addr); int h = ::accept(s,&addr,&addrlen); if(h == -1) { logger.msg(ERROR, "Failed to accept connection request"); it.lock_.lock(); } else { it.lock_.lock(); bool rejected = false; bool first_time = true; while((it.max_executers_ > 0) && (it.executers_.size() >= (size_t) it.max_executers_)) { if(it.max_executers_drop_) { logger.msg(WARNING, "Too many connections - dropping new one"); ::shutdown(h,2); ::close(h); rejected = true; break; } else { if(first_time) logger.msg(WARNING, "Too many connections - waiting for old to close"); Glib::TimeVal etime; etime.assign_current_time(); etime.add_milliseconds(10000); // 10 s it.cond_.timed_wait(it.lock_,etime); first_time = false; }; }; if(!rejected) { mcc_tcp_exec_t t(&it,h,i->timeout,i->no_delay); }; }; }; }; it.lock_.unlock(); }; return; } class TCPSecAttr: public SecAttr { friend class MCC_TCP_Service; public: TCPSecAttr(const std::string& remote_ip, const std::string &remote_port, const std::string& local_ip, const std::string& local_port); virtual ~TCPSecAttr(void); virtual operator bool(void); virtual bool Export(SecAttrFormat format,XMLNode &val) const; virtual std::string get(const std::string& id) const; protected: std::string local_ip_; std::string local_port_; std::string remote_ip_; std::string remote_port_; virtual bool equal(const SecAttr &b) const; }; TCPSecAttr::TCPSecAttr(const std::string& remote_ip, const std::string &remote_port, const std::string& local_ip, const std::string& local_port) : local_ip_(local_ip), local_port_(local_port), remote_ip_(remote_ip), remote_port_(remote_port) { } TCPSecAttr::~TCPSecAttr(void) { } TCPSecAttr::operator bool(void) { return true; } std::string TCPSecAttr::get(const std::string& id) const { if(id == "LOCALIP") return local_ip_; if(id == "LOCALPORT") return local_port_; if(id == "REMOTEIP") return remote_ip_; if(id == "REMOTEPORT") return remote_port_; return ""; } bool TCPSecAttr::equal(const SecAttr &b) const { try { const TCPSecAttr& a = (const TCPSecAttr&)b; if((!local_ip_.empty()) && (!a.local_ip_.empty()) && (local_ip_ != a.local_ip_)) return false; if((!local_port_.empty()) && (!a.local_port_.empty()) && (local_port_ != a.local_port_)) return false; if((!remote_ip_.empty()) && (!a.remote_ip_.empty()) && (remote_ip_ != a.remote_ip_)) return false; if((!remote_port_.empty()) && (!a.remote_port_.empty()) && (remote_port_ != a.remote_port_)) return false; return true; } catch(std::exception&) { }; return false; } static void fill_arc_string_attribute(XMLNode object,std::string value,const char* id) { object=value; object.NewAttribute("Type")="string"; object.NewAttribute("AttributeId")=id; } static void fill_xacml_string_attribute(XMLNode object,std::string value,const char* id) { object.NewChild("ra:AttributeValue")=value; object.NewAttribute("DataType")="xs:string"; object.NewAttribute("AttributeId")=id; } #define TCP_SECATTR_REMOTE_NS "http://www.nordugrid.org/schemas/policy-arc/types/tcp/remoteendpoint" #define TCP_SECATTR_LOCAL_NS "http://www.nordugrid.org/schemas/policy-arc/types/tcp/localendpoint" bool TCPSecAttr::Export(SecAttrFormat format,XMLNode &val) const { if(format == UNDEFINED) { } else if(format == ARCAuth) { NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; val.Namespaces(ns); val.Name("ra:Request"); XMLNode item = val.NewChild("ra:RequestItem"); if(!local_port_.empty()) { fill_arc_string_attribute(item.NewChild("ra:Resource"),local_ip_+":"+local_port_,TCP_SECATTR_LOCAL_NS); } else if(!local_ip_.empty()) { fill_arc_string_attribute(item.NewChild("ra:Resource"),local_ip_,TCP_SECATTR_LOCAL_NS); }; if(!remote_port_.empty()) { fill_arc_string_attribute(item.NewChild("ra:Subject").NewChild("ra:SubjectAttribute"),remote_ip_+":"+remote_port_,TCP_SECATTR_REMOTE_NS); } else if(!remote_ip_.empty()) { fill_arc_string_attribute(item.NewChild("ra:Subject").NewChild("ra:SubjectAttribute"),remote_ip_,TCP_SECATTR_REMOTE_NS); }; return true; } else if(format == XACML) { NS ns; ns["ra"]="urn:oasis:names:tc:xacml:2.0:context:schema:os"; val.Namespaces(ns); val.Name("ra:Request"); if(!local_port_.empty()) { fill_xacml_string_attribute(val.NewChild("ra:Resource").NewChild("ra:Attribute"),local_ip_+":"+local_port_,TCP_SECATTR_LOCAL_NS); } else if(!local_ip_.empty()) { fill_xacml_string_attribute(val.NewChild("ra:Resource").NewChild("ra:Attribute"),local_ip_,TCP_SECATTR_LOCAL_NS); }; if(!remote_port_.empty()) { fill_xacml_string_attribute(val.NewChild("ra:Subject").NewChild("ra:Attribute"),remote_ip_+":"+remote_port_,TCP_SECATTR_REMOTE_NS); } else if(!remote_ip_.empty()) { fill_xacml_string_attribute(val.NewChild("ra:Subject").NewChild("ra:Attribute"),remote_ip_,"http://www.nordugrid.org/schemas/policy-arc/types/tcp/remoteiendpoint"); }; return true; } else { }; return false; } static bool get_host_port(struct sockaddr_storage *addr, std::string &host, std::string &port) { char buf[INET6_ADDRSTRLEN]; memset(buf,0,sizeof(buf)); const char *ret = NULL; switch (addr->ss_family) { case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *)addr; ret = inet_ntop(AF_INET, &(sin->sin_addr), buf, sizeof(buf)-1); if (ret != NULL) { port = tostring(ntohs(sin->sin_port)); } break; } case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_V4MAPPED(&(sin6->sin6_addr))) { ret = inet_ntop(AF_INET6, &(sin6->sin6_addr), buf, sizeof(buf)-1); } else { // ipv4 address mapped to ipv6 so resolve as ipv4 address struct sockaddr_in sin; memset(&sin, 0, sizeof(struct sockaddr_in)); sin.sin_family = AF_INET; sin.sin_port = sin6->sin6_port; sin.sin_addr.s_addr = ((uint32_t *)&sin6->sin6_addr)[3]; memcpy(addr, &sin, sizeof(struct sockaddr_in)); ret = inet_ntop(AF_INET, &(sin.sin_addr), buf, sizeof(buf)-1); } if (ret != NULL) { port = tostring(ntohs(sin6->sin6_port)); } break; } default: return false; break; } if (ret != NULL) { buf[sizeof(buf)-1] = 0; host = buf; } else { return false; } return true; } void MCC_TCP_Service::executer(void* arg) { MCC_TCP_Service& it = *(((mcc_tcp_exec_t*)arg)->obj); int s = ((mcc_tcp_exec_t*)arg)->handle; int no_delay = ((mcc_tcp_exec_t*)arg)->no_delay; int timeout = ((mcc_tcp_exec_t*)arg)->timeout; std::string host_attr,port_attr; std::string remotehost_attr,remoteport_attr; std::string endpoint_attr; // Extract useful attributes { struct sockaddr_storage addr; socklen_t addrlen; addrlen=sizeof(addr); if(getsockname(s, (struct sockaddr*)(&addr), &addrlen) == 0) { if (get_host_port(&addr, host_attr, port_attr) == true) { endpoint_attr = "://"+host_attr+":"+port_attr; } } if(getpeername(s, (struct sockaddr*)&addr, &addrlen) == 0) { get_host_port(&addr, remotehost_attr, remoteport_attr); } // SESSIONID }; // Creating stream payload PayloadTCPSocket stream(s, timeout, logger); stream.NoDelay(no_delay); MessageContext context; MessageAuthContext auth_context; for(;;) { // TODO: Check state of socket here and leave immediately if not connected anymore. // Preparing Message objects for chain MessageAttributes attributes_in; MessageAttributes attributes_out; MessageAuth auth_in; MessageAuth auth_out; Message nextinmsg; Message nextoutmsg; nextinmsg.Payload(&stream); nextinmsg.Attributes(&attributes_in); nextinmsg.Attributes()->set("TCP:HOST",host_attr); nextinmsg.Attributes()->set("TCP:PORT",port_attr); nextinmsg.Attributes()->set("TCP:REMOTEHOST",remotehost_attr); nextinmsg.Attributes()->set("TCP:REMOTEPORT",remoteport_attr); nextinmsg.Attributes()->set("TCP:ENDPOINT",endpoint_attr); nextinmsg.Attributes()->set("ENDPOINT",endpoint_attr); nextinmsg.Context(&context); nextinmsg.Auth(&auth_in); TCPSecAttr* tattr = new TCPSecAttr(remotehost_attr, remoteport_attr, host_attr, port_attr); nextinmsg.Auth()->set("TCP",tattr); nextinmsg.AuthContext(&auth_context); nextoutmsg.Attributes(&attributes_out); nextoutmsg.Context(&context); nextoutmsg.Auth(&auth_out); nextoutmsg.AuthContext(&auth_context); if(!it.ProcessSecHandlers(nextinmsg,"incoming")) break; // Call next MCC MCCInterface* next = it.Next(); if(!next) break; logger.msg(VERBOSE, "next chain element called"); MCC_Status ret = next->process(nextinmsg,nextoutmsg); if(!it.ProcessSecHandlers(nextoutmsg,"outgoing")) { if(nextoutmsg.Payload()) delete nextoutmsg.Payload(); break; }; // If nextoutmsg contains some useful payload send it here. // So far only buffer payload is supported // Extracting payload if(nextoutmsg.Payload()) { PayloadRawInterface* outpayload = NULL; try { outpayload = dynamic_cast(nextoutmsg.Payload()); } catch(std::exception& e) { }; if(!outpayload) { logger.msg(WARNING, "Only Raw Buffer payload is supported for output"); } else { // Sending payload for(int n=0;;++n) { char* buf = outpayload->Buffer(n); if(!buf) break; int bufsize = outpayload->BufferSize(n); if(!(stream.Put(buf,bufsize))) { logger.msg(ERROR, "Failed to send content of buffer"); break; }; }; }; delete nextoutmsg.Payload(); }; if(!ret) break; }; it.lock_.lock(); for(std::list::iterator e = it.executers_.begin();e != it.executers_.end();++e) { if((mcc_tcp_exec_t*)arg == &(*e)) { logger.msg(VERBOSE, "TCP executor is removed"); if(s != e->handle) logger.msg(ERROR, "Sockets do not match on exit %i != %i",s,e->handle); it.executers_.erase(e); break; }; }; ::shutdown(s,2); ::close(s); it.cond_.signal(); it.lock_.unlock(); return; } MCC_Status MCC_TCP_Service::process(Message&,Message&) { // Service is not really processing messages because there // are no lower lelel MCCs in chain. return MCC_Status(); } MCC_TCP_Client::MCC_TCP_Client(Config *cfg, PluginArgument* parg):MCC_TCP(cfg,parg),s_(NULL) { #ifdef WIN32 WSADATA wsadata; if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { logger.msg(ERROR, "Cannot initialize winsock library"); return; } #endif XMLNode c = (*cfg)["Connect"][0]; if(!c) { logger.msg(ERROR,"No Connect element specified"); return; }; std::string port_s = c["Port"]; if(port_s.empty()) { logger.msg(ERROR,"Missing Port in Connect element"); return; }; std::string host_s = c["Host"]; if(host_s.empty()) { logger.msg(ERROR,"Missing Host in Connect element"); return; }; int port = atoi(port_s.c_str()); std::string timeout_s = c["Timeout"]; int timeout = 60; if (!timeout_s.empty()) { timeout = atoi(timeout_s.c_str()); } s_ = new PayloadTCPSocket(host_s.c_str(),port,timeout,logger); if(!(*s_)) { // Connection error is reported in process() } else { std::string v = c["NoDelay"]; s_->NoDelay((v == "true") || (v == "1")); }; } MCC_TCP_Client::~MCC_TCP_Client(void) { if(s_) delete(s_); #ifdef WIN32 WSACleanup(); #endif } MCC_Status MCC_TCP_Client::process(Message& inmsg,Message& outmsg) { // Accepted payload is Raw and Stream // Returned payload is Stream logger.msg(DEBUG, "TCP client process called"); //outmsg.Attributes(inmsg.Attributes()); //outmsg.Context(inmsg.Context()); if(!s_) return MCC_Status(GENERIC_ERROR,"TCP","Not connected"); if(!*s_) return MCC_Status(GENERIC_ERROR,"TCP",s_->GetError()); // Extracting payload if(!inmsg.Payload()) return MCC_Status(GENERIC_ERROR); PayloadRawInterface* rinpayload = NULL; PayloadStreamInterface* sinpayload = NULL; try { rinpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; try { sinpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if((!rinpayload) && (!sinpayload)) return MCC_Status(GENERIC_ERROR); if(!ProcessSecHandlers(inmsg,"outgoing")) return MCC_Status(GENERIC_ERROR,"TCP","Auth processing failed"); // Sending payload if(rinpayload) { for(int n=0;;++n) { char* buf = rinpayload->Buffer(n); if(!buf) break; int bufsize = rinpayload->BufferSize(n); if(!(s_->Put(buf,bufsize))) { logger.msg(INFO, "Failed to send content of buffer"); return MCC_Status(GENERIC_ERROR,"TCP",s_->GetError()); }; }; } else { int size = -1; if(!sinpayload->Get(*s_,size)) { // Currently false may also mean that stream finihsed. // Hence it can't be used to indicate real failure. // logger.msg(INFO, "Failed to transfer content of stream"); // return MCC_Status(GENERIC_ERROR,"TCP",s_->GetError()); }; }; std::string host_attr,port_attr; std::string remotehost_attr,remoteport_attr; std::string endpoint_attr; // Extract useful attributes { struct sockaddr_storage addr; socklen_t addrlen; addrlen=sizeof(addr); if (getsockname(s_->GetHandle(), (struct sockaddr*)&addr, &addrlen) == 0) get_host_port(&addr, host_attr, port_attr); addrlen=sizeof(addr); if (getpeername(s_->GetHandle(), (struct sockaddr*)&addr, &addrlen) == 0) if (get_host_port(&addr, remotehost_attr, remoteport_attr)) endpoint_attr = "://"+remotehost_attr+":"+remoteport_attr; } outmsg.Payload(new PayloadTCPSocket(*s_)); outmsg.Attributes()->set("TCP:HOST",host_attr); outmsg.Attributes()->set("TCP:PORT",port_attr); outmsg.Attributes()->set("TCP:REMOTEHOST",remotehost_attr); outmsg.Attributes()->set("TCP:REMOTEPORT",remoteport_attr); outmsg.Attributes()->set("TCP:ENDPOINT",endpoint_attr); outmsg.Attributes()->set("ENDPOINT",endpoint_attr); if(!ProcessSecHandlers(outmsg,"incoming")) return MCC_Status(GENERIC_ERROR,"TCP","Auth processing failed"); return MCC_Status(STATUS_OK); } } // namespace ArcMCCTCP nordugrid-arc-5.4.2/src/hed/mcc/tcp/PaxHeaders.7502/README0000644000000000000000000000012411037472457020746 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200575.121709 30 ctime=1513200660.533754014 nordugrid-arc-5.4.2/src/hed/mcc/tcp/README0000644000175000002070000000007211037472457021012 0ustar00mockbuildmock00000000000000MCC that handles TCP messages on server and client sides. nordugrid-arc-5.4.2/src/hed/mcc/PaxHeaders.7502/soap0000644000000000000000000000012613214316024020151 xustar000000000000000028 mtime=1513200660.6307552 30 atime=1513200668.723854182 28 ctime=1513200660.6307552 nordugrid-arc-5.4.2/src/hed/mcc/soap/0000755000175000002070000000000013214316024020271 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/src/hed/mcc/soap/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612052416515022267 xustar000000000000000027 mtime=1353325901.850498 29 atime=1513200599.42500663 30 ctime=1513200660.626755151 nordugrid-arc-5.4.2/src/hed/mcc/soap/Makefile.am0000644000175000002070000000076612052416515022343 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libmccsoap.la libmccsoap_la_SOURCES = MCCSOAP.cpp MCCSOAP.h libmccsoap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmccsoap_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libmccsoap_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-5.4.2/src/hed/mcc/soap/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315727022301 xustar000000000000000030 mtime=1513200599.469007168 30 atime=1513200648.573607736 30 ctime=1513200660.627755163 nordugrid-arc-5.4.2/src/hed/mcc/soap/Makefile.in0000644000175000002070000006147313214315727022362 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/hed/mcc/soap DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libmccsoap_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libmccsoap_la_OBJECTS = libmccsoap_la-MCCSOAP.lo libmccsoap_la_OBJECTS = $(am_libmccsoap_la_OBJECTS) libmccsoap_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmccsoap_la_CXXFLAGS) \ $(CXXFLAGS) $(libmccsoap_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libmccsoap_la_SOURCES) DIST_SOURCES = $(libmccsoap_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ pkglib_LTLIBRARIES = libmccsoap.la libmccsoap_la_SOURCES = MCCSOAP.cpp MCCSOAP.h libmccsoap_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libmccsoap_la_LIBADD = \ $(top_builddir)/src/hed/libs/ws/libarcws.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(LIBXML2_LIBS) libmccsoap_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/hed/mcc/soap/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/hed/mcc/soap/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done libmccsoap.la: $(libmccsoap_la_OBJECTS) $(libmccsoap_la_DEPENDENCIES) $(libmccsoap_la_LINK) -rpath $(pkglibdir) $(libmccsoap_la_OBJECTS) $(libmccsoap_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmccsoap_la-MCCSOAP.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< libmccsoap_la-MCCSOAP.lo: MCCSOAP.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmccsoap_la_CXXFLAGS) $(CXXFLAGS) -MT libmccsoap_la-MCCSOAP.lo -MD -MP -MF $(DEPDIR)/libmccsoap_la-MCCSOAP.Tpo -c -o libmccsoap_la-MCCSOAP.lo `test -f 'MCCSOAP.cpp' || echo '$(srcdir)/'`MCCSOAP.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libmccsoap_la-MCCSOAP.Tpo $(DEPDIR)/libmccsoap_la-MCCSOAP.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MCCSOAP.cpp' object='libmccsoap_la-MCCSOAP.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmccsoap_la_CXXFLAGS) $(CXXFLAGS) -c -o libmccsoap_la-MCCSOAP.lo `test -f 'MCCSOAP.cpp' || echo '$(srcdir)/'`MCCSOAP.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/src/hed/mcc/soap/PaxHeaders.7502/MCCSOAP.cpp0000644000000000000000000000012412675602216022030 xustar000000000000000027 mtime=1459029134.924374 27 atime=1513200575.145709 30 ctime=1513200660.628755176 nordugrid-arc-5.4.2/src/hed/mcc/soap/MCCSOAP.cpp0000644000175000002070000003504712675602216022106 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "MCCSOAP.h" Arc::Logger ArcMCCSOAP::MCC_SOAP::logger(Arc::Logger::getRootLogger(), "MCC.SOAP"); ArcMCCSOAP::MCC_SOAP::MCC_SOAP(Arc::Config *cfg,PluginArgument* parg) : Arc::MCC(cfg,parg) { } static Arc::Plugin* get_mcc_service(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new ArcMCCSOAP::MCC_SOAP_Service((Arc::Config*)(*mccarg),mccarg); } static Arc::Plugin* get_mcc_client(Arc::PluginArgument* arg) { Arc::MCCPluginArgument* mccarg = arg?dynamic_cast(arg):NULL; if(!mccarg) return NULL; return new ArcMCCSOAP::MCC_SOAP_Client((Arc::Config*)(*mccarg),mccarg); } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "soap.service", "HED:MCC", NULL, 0, &get_mcc_service }, { "soap.client", "HED:MCC", NULL, 0, &get_mcc_client }, { NULL, NULL, NULL, 0, NULL } }; namespace ArcMCCSOAP { using namespace Arc; class SOAPSecAttr: public SecAttr { friend class MCC_SOAP_Service; friend class MCC_SOAP_Client; public: SOAPSecAttr(PayloadSOAP& payload); virtual ~SOAPSecAttr(void); virtual operator bool(void) const; virtual bool Export(SecAttrFormat format,XMLNode &val) const; virtual std::string get(const std::string& id) const; protected: std::string action_; std::string object_; std::string context_; virtual bool equal(const SecAttr &b) const; }; SOAPSecAttr::SOAPSecAttr(PayloadSOAP& payload) { action_=payload.Child().Name(); context_=payload.Child().Namespace(); if(WSAHeader::Check(payload)) object_ = WSAHeader(payload).To(); } SOAPSecAttr::~SOAPSecAttr(void) { } SOAPSecAttr::operator bool(void) const { return !action_.empty(); } std::string SOAPSecAttr::get(const std::string& id) const { if(id == "ACTION") return action_; if(id == "OBJECT") return object_; if(id == "CONTEXT") return context_; return ""; } bool SOAPSecAttr::equal(const SecAttr &b) const { try { const SOAPSecAttr& a = (const SOAPSecAttr&)b; return ((action_ == a.action_) && (context_ == a.context_)); } catch(std::exception&) { }; return false; } bool SOAPSecAttr::Export(SecAttrFormat format,XMLNode &val) const { if(format == UNDEFINED) { } else if(format == ARCAuth) { NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; val.Namespaces(ns); val.Name("ra:Request"); XMLNode item = val.NewChild("ra:RequestItem"); if(!object_.empty()) { XMLNode object = item.NewChild("ra:Resource"); object=object_; object.NewAttribute("Type")="string"; object.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/soap/endpoint"; }; if(!action_.empty()) { XMLNode action = item.NewChild("ra:Action"); action=action_; action.NewAttribute("Type")="string"; action.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/soap/operation"; }; if(!context_.empty()) { XMLNode context = item.NewChild("ra:Context").NewChild("ra:ContextAttribute"); context=context_; context.NewAttribute("Type")="string"; context.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/soap/namespace"; }; return true; } else if(format == XACML) { NS ns; ns["ra"]="urn:oasis:names:tc:xacml:2.0:context:schema:os"; val.Namespaces(ns); val.Name("ra:Request"); if(!object_.empty()) { XMLNode object = val.NewChild("ra:Resource"); XMLNode attr = object.NewChild("ra:Attribute"); attr.NewChild("ra:AttributeValue") = object_; attr.NewAttribute("DataType")="xs:string"; attr.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/soap/endpoint"; }; if(!action_.empty()) { XMLNode action = val.NewChild("ra:Action"); XMLNode attr = action.NewChild("ra:Attribute"); attr.NewChild("ra:AttributeValue") = action_; attr.NewAttribute("DataType")="xs:string"; attr.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/soap/operation"; }; if(!context_.empty()) { XMLNode context = val.NewChild("ra:Environment"); XMLNode attr = context.NewChild("ra:Attribute"); attr.NewChild("ra:AttributeValue") = context_; attr.NewAttribute("DataType")="xs:string"; attr.NewAttribute("AttributeId")="http://www.nordugrid.org/schemas/policy-arc/types/soap/namespace"; }; return true; } else { }; return false; } MCC_SOAP_Service::MCC_SOAP_Service(Config *cfg,PluginArgument* parg):MCC_SOAP(cfg,parg) { } MCC_SOAP_Service::~MCC_SOAP_Service(void) { } MCC_SOAP_Client::MCC_SOAP_Client(Config *cfg,PluginArgument* parg):MCC_SOAP(cfg,parg) { } MCC_SOAP_Client::~MCC_SOAP_Client(void) { } static MCC_Status make_raw_fault(Message& outmsg,const char* reason = NULL) { NS ns; SOAPEnvelope soap(ns,true); soap.Fault()->Code(SOAPFault::Receiver); if(reason != NULL) soap.Fault()->Reason(0, reason); std::string xml; soap.GetXML(xml); PayloadRaw* payload = new PayloadRaw; payload->Insert(xml.c_str()); outmsg.Payload(payload); return MCC_Status(STATUS_OK); } static MCC_Status make_soap_fault(Message& outmsg,bool senderfault,const char* reason = NULL) { PayloadSOAP* soap = new PayloadSOAP(NS(),true); soap->Fault()->Code(senderfault?SOAPFault::Sender:SOAPFault::Receiver); if(reason != NULL) soap->Fault()->Reason(0, reason); outmsg.Payload(soap); return MCC_Status(STATUS_OK); } static MCC_Status make_soap_fault(Message& outmsg,Message& oldmsg,bool senderfault,const char* reason = NULL) { if(oldmsg.Payload()) { delete oldmsg.Payload(); oldmsg.Payload(NULL); }; return make_soap_fault(outmsg,senderfault,reason); } MCC_Status MCC_SOAP_Service::process(Message& inmsg,Message& outmsg) { // Extracting payload MessagePayload* inpayload = inmsg.Payload(); if(!inpayload) { logger.msg(WARNING, "empty input payload"); return make_raw_fault(outmsg,"Missing incoming request"); } // Converting payload to SOAP PayloadSOAP nextpayload(*inpayload); if(!nextpayload) { logger.msg(WARNING, "incoming message is not SOAP"); return make_raw_fault(outmsg,"Incoming request is not SOAP"); } // Creating message to pass to next MCC and setting new payload.. // Using separate message. But could also use same inmsg. // Just trying to keep it intact as much as possible. Message nextinmsg = inmsg; nextinmsg.Payload(&nextpayload); if(WSAHeader::Check(nextpayload)) { std::string endpoint_attr = WSAHeader(nextpayload).To(); nextinmsg.Attributes()->set("SOAP:ENDPOINT",endpoint_attr); nextinmsg.Attributes()->set("ENDPOINT",endpoint_attr); }; SOAPSecAttr* sattr = new SOAPSecAttr(nextpayload); nextinmsg.Auth()->set("SOAP",sattr); // Checking authentication and authorization; if(!ProcessSecHandlers(nextinmsg,"incoming")) { logger.msg(ERROR, "Security check failed in SOAP MCC for incoming message"); return make_raw_fault(outmsg, "Security check failed for SOAP request"); }; // TODO: pass SOAP action from HTTP header to SOAP:ACTION attribute // Call next MCC MCCInterface* next = Next(); if(!next) { logger.msg(WARNING, "empty next chain element"); return make_raw_fault(outmsg,"Internal error"); } Message nextoutmsg = outmsg; nextoutmsg.Payload(NULL); MCC_Status ret = next->process(nextinmsg,nextoutmsg); // Do checks and extract SOAP response if(!ret) { if(nextoutmsg.Payload()) delete nextoutmsg.Payload(); logger.msg(WARNING, "next element of the chain returned error status: %s",(std::string)ret); if(ret.getKind() == UNKNOWN_SERVICE_ERROR) { return make_raw_fault(outmsg,"No requested service found"); } else { return make_raw_fault(outmsg,"Internal error"); } } if(!nextoutmsg.Payload()) { logger.msg(WARNING, "next element of the chain returned empty payload"); return make_raw_fault(outmsg,"There is no response"); } PayloadSOAP* retpayload = NULL; try { retpayload = dynamic_cast(nextoutmsg.Payload()); } catch(std::exception& e) { }; if(!retpayload) { // There is a chance that next MCC decided to return pre-rendered SOAP // or otherwise valid non-SOAP response. For that case we simply pass // it back to previous MCC and let it decide. logger.msg(INFO, "next element of the chain returned unknown payload - passing through"); //Checking authentication and authorization; if(!ProcessSecHandlers(nextoutmsg,"outgoing")) { logger.msg(ERROR, "Security check failed in SOAP MCC for outgoing message"); delete nextoutmsg.Payload(); return make_raw_fault(outmsg,"Security check failed for SOAP response"); }; outmsg = nextoutmsg; return MCC_Status(STATUS_OK); }; if(!(*retpayload)) { delete retpayload; return make_raw_fault(outmsg,"There is no valid SOAP response"); }; //Checking authentication and authorization; if(!ProcessSecHandlers(nextoutmsg,"outgoing")) { logger.msg(ERROR, "Security check failed in SOAP MCC for outgoing message"); delete retpayload; return make_raw_fault(outmsg,"Security check failed for SOAP response"); }; // Convert to Raw - TODO: more efficient conversion PayloadRaw* outpayload = new PayloadRaw; std::string xml; retpayload->GetXML(xml); outpayload->Insert(xml.c_str()); outmsg = nextoutmsg; outmsg.Payload(NULL); // Specifying attributes for binding to underlying protocols - HTTP so far std::string soap_action; bool soap_action_defined = nextoutmsg.Attributes()->count("SOAP:ACTION") > 0; if(soap_action_defined) { soap_action=nextoutmsg.Attributes()->get("SOAP:ACTION"); } else { soap_action_defined=WSAHeader(*retpayload).hasAction(); soap_action=WSAHeader(*retpayload).Action(); }; if(retpayload->Version() == SOAPEnvelope::Version_1_2) { // TODO: For SOAP 1.2 Content-Type is not sent in case of error - probably harmless std::string mime_type("application/soap+xml"); if(soap_action_defined) mime_type+=" ;action=\""+soap_action+"\""; outmsg.Attributes()->set("HTTP:Content-Type",mime_type); } else { outmsg.Attributes()->set("HTTP:Content-Type","text/xml"); if(soap_action_defined) outmsg.Attributes()->set("HTTP:SOAPAction",soap_action); }; if(retpayload->Fault() != NULL) { // Maybe MCC_Status should be used instead ? outmsg.Attributes()->set("HTTP:CODE","500"); // TODO: For SOAP 1.2 :Sender fault must generate 400 outmsg.Attributes()->set("HTTP:REASON","SOAP FAULT"); // CONFUSED: SOAP 1.2 says that SOAP message is sent in response only if // HTTP code is 200 "Only if status line is 200, the SOAP message serialized according // to the rules for carrying SOAP messages in the media type given by the Content-Type // header field ...". But it also associates SOAP faults with HTTP error codes. So it // looks like in case of SOAP fault SOAP fault messages is not sent. That sounds // stupid - not implementing. }; delete retpayload; outmsg.Payload(outpayload); return MCC_Status(STATUS_OK); } MCC_Status MCC_SOAP_Client::process(Message& inmsg,Message& outmsg) { // Extracting payload if(!inmsg.Payload()) return make_soap_fault(outmsg,true,"No message to send"); PayloadSOAP* inpayload = NULL; try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) return make_soap_fault(outmsg,true,"No SOAP message to send"); //Checking authentication and authorization; if(!ProcessSecHandlers(inmsg,"outgoing")) { logger.msg(ERROR, "Security check failed in SOAP MCC for outgoing message"); return make_soap_fault(outmsg,true,"Security check failed for outgoing SOAP message"); }; // Converting payload to Raw PayloadRaw nextpayload; std::string xml; inpayload->GetXML(xml); nextpayload.Insert(xml.c_str()); // Creating message to pass to next MCC and setting new payload.. Message nextinmsg = inmsg; nextinmsg.Payload(&nextpayload); // Specifying attributes for binding to underlying protocols - HTTP so far std::string soap_action; bool soap_action_defined = inmsg.Attributes()->count("SOAP:ACTION") > 0; if(soap_action_defined) { soap_action=inmsg.Attributes()->get("SOAP:ACTION"); } else { soap_action_defined=true; //WSAHeader(*inpayload).hasAction(); - SOAPAction must be always present soap_action=WSAHeader(*inpayload).Action(); }; if(inpayload->Version() == SOAPEnvelope::Version_1_2) { std::string mime_type("application/soap+xml"); if(soap_action_defined) mime_type+=" ;action=\""+soap_action+"\""; nextinmsg.Attributes()->set("HTTP:Content-Type",mime_type); } else { nextinmsg.Attributes()->set("HTTP:Content-Type","text/xml"); if(soap_action_defined) nextinmsg.Attributes()->set("HTTP:SOAPAction","\""+soap_action+"\""); }; // Call next MCC MCCInterface* next = Next(); if(!next) return make_soap_fault(outmsg,true,"Internal chain failure: no next component"); Message nextoutmsg = outmsg; nextoutmsg.Payload(NULL); MCC_Status ret = next->process(nextinmsg,nextoutmsg); // Do checks and create SOAP response // TODO: pass SOAP action from HTTP header to SOAP:ACTION attribute if(!ret) { std::string errstr = "Failed to send SOAP message: "+(std::string)ret; return make_soap_fault(outmsg,nextoutmsg,false,errstr.c_str()); }; if(!nextoutmsg.Payload()) { return make_soap_fault(outmsg,nextoutmsg,false,"No response for SOAP message received"); }; MessagePayload* retpayload = nextoutmsg.Payload(); if(!retpayload) return make_soap_fault(outmsg,nextoutmsg,false,"No valid response for SOAP message received"); PayloadSOAP* outpayload = new PayloadSOAP(*retpayload); if(!outpayload) { return make_soap_fault(outmsg,nextoutmsg,false,"Response is not SOAP"); }; if(!(*outpayload)) { delete outpayload; return make_soap_fault(outmsg,nextoutmsg,false,"Response is not valid SOAP"); }; outmsg = nextoutmsg; outmsg.Payload(outpayload); delete nextoutmsg.Payload(); nextoutmsg.Payload(NULL); //Checking authentication and authorization; if(!ProcessSecHandlers(outmsg,"incoming")) { logger.msg(ERROR, "Security check failed in SOAP MCC for incoming message"); delete outpayload; return make_soap_fault(outmsg,false,"Security check failed for incoming SOAP message"); }; return MCC_Status(STATUS_OK); } } // namespace ArcMCCSOAP nordugrid-arc-5.4.2/src/hed/mcc/soap/PaxHeaders.7502/MCCSOAP.h0000644000000000000000000000012211730411253021461 xustar000000000000000027 mtime=1331827371.967902 27 atime=1513200575.149709 28 ctime=1513200660.6307552 nordugrid-arc-5.4.2/src/hed/mcc/soap/MCCSOAP.h0000644000175000002070000000323511730411253021533 0ustar00mockbuildmock00000000000000#ifndef __ARC_MCCSOAP_H__ #define __ARC_MCCSOAP_H__ #include namespace ArcMCCSOAP { using namespace Arc; //! A base class for SOAP client and service MCCs. /*! This is a base class for SOAP client and service MCCs. It provides some common functionality for them, i.e. so far only a logger. */ class MCC_SOAP : public MCC { public: MCC_SOAP(Config *cfg,PluginArgument* parg); protected: static Logger logger; }; /** This MCC parses SOAP message from input payload. On input payload with PayloadRawInterface is expected. It's converted into PayloadSOAP and passed next MCC. Returned PayloadSOAP is converted into PayloadRaw and returned to calling MCC. */ class MCC_SOAP_Service: public MCC_SOAP { public: /* Constructor takes configuration of MCC. Currently there are no configuration parameters for this MCC */ MCC_SOAP_Service(Config *cfg,PluginArgument* parg); virtual ~MCC_SOAP_Service(void); virtual MCC_Status process(Message&,Message&); }; /* This is client side of SOAP processing MCC. It accepts PayloadSOAP kind of payloads as incoming messages and produces same type as outgoing message. Communication to next MCC is done using payloads implementing PayloadRawInterface. */ class MCC_SOAP_Client: public MCC_SOAP { public: /* Constructor takes configuration of MCC. Currently there are no configuration parameters for this MCC */ MCC_SOAP_Client(Config *cfg,PluginArgument* parg); virtual ~MCC_SOAP_Client(void); virtual MCC_Status process(Message&,Message&); }; } // namespace ArcMCCSOAP #endif /* __ARC_MCCSOAP_H__ */ nordugrid-arc-5.4.2/src/hed/mcc/soap/PaxHeaders.7502/README0000644000000000000000000000012411037472457021122 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200575.149709 30 ctime=1513200660.624755127 nordugrid-arc-5.4.2/src/hed/mcc/soap/README0000644000175000002070000000007311037472457021167 0ustar00mockbuildmock00000000000000MCC that handles SOAP messages on server and client sides. nordugrid-arc-5.4.2/src/hed/mcc/PaxHeaders.7502/README0000644000000000000000000000012411037472457020160 xustar000000000000000027 mtime=1216247087.350292 27 atime=1513200575.128709 30 ctime=1513200660.506753683 nordugrid-arc-5.4.2/src/hed/mcc/README0000644000175000002070000000024411037472457020225 0ustar00mockbuildmock00000000000000Message Chain Components (MCC) http - handles HTTP messages soap - handles SOAP messages tcp - handles TCP messages tls - handles TLS based channel security nordugrid-arc-5.4.2/src/hed/PaxHeaders.7502/README0000644000000000000000000000012411016534143017402 xustar000000000000000027 mtime=1211807843.180089 27 atime=1513200574.997707 30 ctime=1513200658.748732182 nordugrid-arc-5.4.2/src/hed/README0000644000175000002070000000015211016534143017445 0ustar00mockbuildmock00000000000000This directory contains libraries and plugins which make basic low level functionality of ARC a.k.a. HED. nordugrid-arc-5.4.2/src/PaxHeaders.7502/README0000644000000000000000000000012412267746601016657 xustar000000000000000027 mtime=1390398849.863325 27 atime=1513200574.174697 30 ctime=1513200658.650730984 nordugrid-arc-5.4.2/src/README0000644000175000002070000000063312267746601016726 0ustar00mockbuildmock00000000000000Main source tree clients - ARC clients doxygen - Documentation genereated from code external - External software compiled from source hed - Libraries and plugins which make up the basic low-level functionality of ARC - the Hosting Environment Daemon (HED) libs - Other ARC libraries services - ARC services tests - Testing playground (?) utils - Utilities for HED code development nordugrid-arc-5.4.2/PaxHeaders.7502/VERSION0000644000000000000000000000012713214315410016242 xustar000000000000000027 mtime=1513200392.861335 30 atime=1513200579.469762569 30 ctime=1513200658.598730348 nordugrid-arc-5.4.2/VERSION0000644000175000002070000000000613214315410016300 0ustar00mockbuildmock000000000000005.4.2 nordugrid-arc-5.4.2/PaxHeaders.7502/arcbase.pc.in0000644000000000000000000000012711214143113017520 xustar000000000000000027 mtime=1244710475.851959 30 atime=1513200653.041662383 30 ctime=1513200658.603730409 nordugrid-arc-5.4.2/arcbase.pc.in0000644000175000002070000000047411214143113017567 0ustar00mockbuildmock00000000000000prefix=@prefix@ exec_prefix=${prefix} libdir=${exec_prefix}/lib includedir=${prefix}/include Name: ARC Base Description: Base libraries of Advaced Resource Connection - Grid Middleware Requires: glibmm-2.4 libxml-2.0 Version: @VERSION@ Libs: -L${libdir} -larccommon -larcloader -larcmessage Cflags: -I${includedir} nordugrid-arc-5.4.2/PaxHeaders.7502/mingw-nordugrid-arc.spec0000644000000000000000000000013213214316014021722 xustar000000000000000030 mtime=1513200652.971661526 30 atime=1513200658.421728183 30 ctime=1513200658.624730666 nordugrid-arc-5.4.2/mingw-nordugrid-arc.spec0000644000175000002070000005024413214316014021775 0ustar00mockbuildmock00000000000000%{?mingw_package_header} %global mingw_pkg_name nordugrid-arc # Cross-compilation platforms supported %global mingw_build_win32 1 %global mingw_build_win64 1 # External (currently unofficial) dependencies) %define with_mingw32_python 0 %define with_mingw32_xmlsec1 1 %define with_mingw32_globus 0 %define with_mingw64_python 0 %define with_mingw64_xmlsec1 1 %define with_mingw64_globus 0 %define pkgdir arc Name: mingw-%{mingw_pkg_name} Version: 5.4.2 Release: 1%{?dist} Summary: ARC Group: System Environment/Daemons License: ASL 2.0 URL: http://www.nordugrid.org/ Source: http://download.nordugrid.org/packages/nordugrid-arc/releases/%{version}/src/%{mingw_pkg_name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildArch: noarch # mingw32 %if %{mingw_build_win32} BuildRequires: mingw32-filesystem >= 65 BuildRequires: mingw32-binutils BuildRequires: mingw32-gcc BuildRequires: mingw32-gcc-c++ BuildRequires: mingw32-runtime BuildRequires: mingw32-headers BuildRequires: mingw32-dlfcn BuildRequires: mingw32-gettext %if %{with_mingw32_python} BuildRequires: mingw32-python %endif BuildRequires: mingw32-glibmm24 BuildRequires: mingw32-glib2 BuildRequires: mingw32-libxml2 BuildRequires: mingw32-openssl BuildRequires: mingw32-libgnurx %if %{with_mingw32_xmlsec1} BuildRequires: mingw32-xmlsec1 %endif BuildRequires: mingw32-cppunit BuildRequires: mingw32-libdb BuildRequires: mingw32-canl-c++ %if %{with_mingw32_globus} BuildRequires: mingw32-globus-common BuildRequires: mingw32-globus-ftp-client BuildRequires: mingw32-globus-ftp-control %endif %endif # mingw64 %if %{mingw_build_win64} BuildRequires: mingw64-filesystem >= 65 BuildRequires: mingw64-binutils BuildRequires: mingw64-gcc BuildRequires: mingw64-gcc-c++ BuildRequires: mingw64-runtime BuildRequires: mingw64-headers BuildRequires: mingw64-dlfcn BuildRequires: mingw64-gettext %if %{with_mingw64_python} BuildRequires: mingw64-python %endif BuildRequires: mingw64-glibmm24 BuildRequires: mingw64-glib2 BuildRequires: mingw64-libxml2 BuildRequires: mingw64-openssl BuildRequires: mingw64-libgnurx %if %{with_mingw64_xmlsec1} BuildRequires: mingw64-xmlsec1 %endif BuildRequires: mingw64-cppunit BuildRequires: mingw64-libdb BuildRequires: mingw64-canl-c++ %if %{with_mingw64_globus} BuildRequires: mingw64-globus-common BuildRequires: mingw64-globus-ftp-client BuildRequires: mingw64-globus-ftp-control %endif %endif BuildRequires: pkgconfig BuildRequires: swig %description NorduGrid ARC %if %{mingw_build_win32} %package -n mingw32-%{mingw_pkg_name} Summary: ARC core libraries Group: System Environment/Libraries Requires: mingw32-openssl %description -n mingw32-%{mingw_pkg_name} NorduGrid ARC core libraries %package -n mingw32-%{mingw_pkg_name}-client Summary: ARC command line interface Group: Applications/Internet Requires: mingw32-%{mingw_pkg_name} = %{version} Requires: mingw32-%{mingw_pkg_name}-plugins-needed = %{version} %description -n mingw32-%{mingw_pkg_name}-client ARC command line interface. %package -n mingw32-%{mingw_pkg_name}-hed Summary: ARC Hosting Environment Daemon Group: System Environment/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} %description -n mingw32-%{mingw_pkg_name}-hed ARC Hosting Environment Daemon (HED). %package -n mingw32-%{mingw_pkg_name}-plugins-needed Summary: ARC base plugins Group: System Environment/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} %description -n mingw32-%{mingw_pkg_name}-plugins-needed ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). %if %{with_mingw32_globus} %package -n mingw32-%{mingw_pkg_name}-plugins-globus Summary: ARC Globus plugins Group: System Environment/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} Requires: globus_common Requires: globus_ftp_client Requires: globus_ftp_control %description -n mingw32-%{mingw_pkg_name}-plugins-globus ARC Globus plugins. This includes the Globus dependent Data Manager Components (DMCs): libdmcgridftp.so %endif %package -n mingw32-%{mingw_pkg_name}-devel Summary: ARC development files Group: Development/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} Requires: mingw32-glibmm24 Requires: mingw32-glib2 Requires: mingw32-libxml2 Requires: mingw32-openssl %description -n mingw32-%{mingw_pkg_name}-devel Development files for ARC %package -n mingw32-%{mingw_pkg_name}-misc-utils Summary: NorduGrid misc tools Group: Applications/Internet Requires: mingw32-%{mingw_pkg_name} = %{version}-%{release} Requires: mingw32-%{mingw_pkg_name}-plugins-needed = %{version}-%{release} %description -n mingw32-%{mingw_pkg_name}-misc-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains various for various tasks including testing. The package is usually not required by users or sysadmins but mainly for developers. %if %{with_mingw32_python} %package -n mingw32-%{mingw_pkg_name}-python Summary: ARC Python wrapper Group: Development/Libraries Requires: mingw32-%{mingw_pkg_name} = %{version} Requires: python %description -n mingw32-%{mingw_pkg_name}-python Python wrapper for ARC %endif %endif # mingw64 %if %{mingw_build_win64} %package -n mingw64-%{mingw_pkg_name} Summary: ARC core libraries Group: System Environment/Libraries Requires: mingw32-openssl %description -n mingw64-%{mingw_pkg_name} NorduGrid ARC core libraries %package -n mingw64-%{mingw_pkg_name}-client Summary: ARC command line interface Group: Applications/Internet Requires: mingw64-%{mingw_pkg_name} = %{version} Requires: mingw64-%{mingw_pkg_name}-plugins-needed = %{version} %description -n mingw64-%{mingw_pkg_name}-client ARC command line interface. %package -n mingw64-%{mingw_pkg_name}-hed Summary: ARC Hosting Environment Daemon Group: System Environment/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} %description -n mingw64-%{mingw_pkg_name}-hed ARC Hosting Environment Daemon (HED). %package -n mingw64-%{mingw_pkg_name}-plugins-needed Summary: ARC base plugins Group: System Environment/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} %description -n mingw64-%{mingw_pkg_name}-plugins-needed ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). %if %{with_mingw64_globus} %package -n mingw64-%{mingw_pkg_name}-plugins-globus Summary: ARC Globus plugins Group: System Environment/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} Requires: globus_common Requires: globus_ftp_client Requires: globus_ftp_control %description -n mingw64-%{mingw_pkg_name}-plugins-globus ARC Globus plugins. This includes the Globus dependent Data Manager Components (DMCs): libdmcgridftp.so %endif %package -n mingw64-%{mingw_pkg_name}-devel Summary: ARC development files Group: Development/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} Requires: mingw64-glibmm24 Requires: mingw64-glib2 Requires: mingw64-libxml2 Requires: mingw64-openssl %description -n mingw64-%{mingw_pkg_name}-devel Development files for ARC %package -n mingw64-%{mingw_pkg_name}-misc-utils Summary: NorduGrid misc tools Group: Applications/Internet Requires: mingw64-%{mingw_pkg_name} = %{version}-%{release} Requires: mingw64-%{mingw_pkg_name}-plugins-needed = %{version}-%{release} %description -n mingw64-%{mingw_pkg_name}-misc-utils NorduGrid is a collaboration aiming at development, maintenance and support of the free Grid middleware, known as the Advanced Resource Connector (ARC). This package contains various for various tasks including testing. The package is usually not required by users or sysadmins but mainly for developers. %if %{with_mingw64_python} %package -n mingw64-%{mingw_pkg_name}-python Summary: ARC Python wrapper Group: Development/Libraries Requires: mingw64-%{mingw_pkg_name} = %{version} Requires: python %description -n mingw64-%{mingw_pkg_name}-python Python wrapper for ARC %endif %endif %{?mingw_debug_package} %prep %setup -q -n %{mingw_pkg_name}-%{version} %build %mingw_configure \ --enable-canlxx \ --disable-java \ --disable-doc \ --disable-ws-monitor \ --disable-ldap-monitor \ --disable-acix \ --disable-static LDFLAGS=-Wl,--enable-auto-import %mingw_make %{?_smp_mflags} #make check %install rm -rf $RPM_BUILD_ROOT %mingw_make_install DESTDIR=$RPM_BUILD_ROOT find $RPM_BUILD_ROOT -type f -name \*.la -exec rm -fv '{}' ';' #mkdir -p $RPM_BUILD_ROOT/etc/init.d #cp -p src/hed/daemon/scripts/arched.redhat $RPM_BUILD_ROOT/etc/init.d/arched #chmod +x $RPM_BUILD_ROOT/etc/init.d/arched # RPM does it's own doc handling rm -fr $RPM_BUILD_ROOT%{mingw32_datadir}/doc/%{mingw_pkg_name}/ rm -fr $RPM_BUILD_ROOT%{mingw64_datadir}/doc/%{mingw_pkg_name}/ rm -fr $RPM_BUILD_ROOT%{mingw32_datadir}/arc/examples/sdk/*.py rm -fr $RPM_BUILD_ROOT%{mingw64_datadir}/arc/examples/sdk/*.py %mingw_find_lang %{mingw_pkg_name} %clean rm -rf $RPM_BUILD_ROOT %if %{mingw_build_win32} %files -n mingw32-%{mingw_pkg_name} -f mingw32-%{mingw_pkg_name}.lang %defattr(-,root,root,-) %doc README AUTHORS LICENSE NOTICE ChangeLog %{mingw32_bindir}/lib*.dll %{mingw32_libdir}/%{pkgdir}/libmodcrypto.dll* %{mingw32_libdir}/%{pkgdir}/libmodcredential.dll* %{mingw32_datadir}/%{pkgdir}/schema %{mingw32_libdir}/%{pkgdir}/arc-file-access.exe %{mingw32_libdir}/%{pkgdir}/DataStagingDelivery.exe %dir %{mingw32_datadir}/%{pkgdir}/test-jobs %{mingw32_datadir}/%{pkgdir}/test-jobs/test-job-* %files -n mingw32-%{mingw_pkg_name}-client %defattr(-,root,root,-) %{mingw32_datadir}/%{pkgdir}/examples/client.conf # # Executables # %{mingw32_bindir}/arcecho.exe %{mingw32_bindir}/arcinfo.exe %{mingw32_bindir}/arcproxy.exe %{mingw32_bindir}/arcproxyalt.exe %{mingw32_bindir}/arccat.exe %{mingw32_bindir}/arccp.exe %{mingw32_bindir}/arcls.exe %{mingw32_bindir}/arcrm.exe %{mingw32_bindir}/arcmkdir.exe %{mingw32_bindir}/arcrename.exe %if %{with_mingw32_xmlsec1} %endif %{mingw32_bindir}/arcstat.exe %{mingw32_bindir}/arcsub.exe %{mingw32_bindir}/arcsync.exe %{mingw32_bindir}/arcresub.exe %{mingw32_bindir}/arcget.exe %{mingw32_bindir}/arcclean.exe %{mingw32_bindir}/arckill.exe %{mingw32_bindir}/arcrenew.exe %{mingw32_bindir}/arcresume.exe %{mingw32_bindir}/arctest.exe # %{mingw32_sysconfdir}/arc/client.conf # # Man pages # %doc %{mingw32_mandir}/man1/arcecho.1* %doc %{mingw32_mandir}/man1/arcinfo.1* %doc %{mingw32_mandir}/man1/arcproxy.1* %if %{with_mingw32_xmlsec1} %endif %doc %{mingw32_mandir}/man1/arccat.1* %doc %{mingw32_mandir}/man1/arccp.1* %doc %{mingw32_mandir}/man1/arcls.1* %doc %{mingw32_mandir}/man1/arcrm.1* %doc %{mingw32_mandir}/man1/arcmkdir.1* %doc %{mingw32_mandir}/man1/arcrename.1* %doc %{mingw32_mandir}/man1/arcstat.1* %doc %{mingw32_mandir}/man1/arcsub.1* %doc %{mingw32_mandir}/man1/arcsync.1* %doc %{mingw32_mandir}/man1/arcresub.1* %doc %{mingw32_mandir}/man1/arcget.1* %doc %{mingw32_mandir}/man1/arcclean.1* %doc %{mingw32_mandir}/man1/arckill.1* %doc %{mingw32_mandir}/man1/arcrenew.1* %doc %{mingw32_mandir}/man1/arcresume.1* %doc %{mingw32_mandir}/man1/arctest.1* %doc %{mingw32_datadir}/%{pkgdir}/examples/echo/echo.wsdl %files -n mingw32-%{mingw_pkg_name}-hed %defattr(-,root,root,-) %{mingw32_sbindir}/arched.exe %doc %{mingw32_mandir}/man8/arched.8* %doc %{mingw32_mandir}/man5/arc.conf.5* %{mingw32_datadir}/%{pkgdir}/profiles %{mingw32_datadir}/%{pkgdir}/examples/config %{mingw32_datadir}/%{pkgdir}/examples/arc.conf.reference %doc %{mingw32_datadir}/%{pkgdir}/examples/echo/echo_service.xml.example %{mingw32_libdir}/%{pkgdir}/libecho.dll* %files -n mingw32-%{mingw_pkg_name}-devel %defattr(-,root,root,-) %{mingw32_includedir}/%{pkgdir} %{mingw32_libdir}/lib*.dll.a %{mingw32_bindir}/wsdl2hed.exe %doc %{mingw32_mandir}/man1/wsdl2hed.1* %{mingw32_bindir}/arcplugin.exe %doc %{mingw32_mandir}/man1/arcplugin.1* %{mingw32_libdir}/pkgconfig/arcbase.pc %doc %{mingw32_datadir}/%{pkgdir}/examples/sdk/*.cpp %doc %{mingw32_datadir}/%{pkgdir}/examples/sdk/*.h %files -n mingw32-%{mingw_pkg_name}-plugins-needed %defattr(-,root,root,-) %{mingw32_libdir}/%{pkgdir}/libmcchttp.dll* %{mingw32_libdir}/%{pkgdir}/libmccmsgvalidator.dll* %{mingw32_libdir}/%{pkgdir}/libmccsoap.dll* %{mingw32_libdir}/%{pkgdir}/libmcctcp.dll* %{mingw32_libdir}/%{pkgdir}/libmcctls.dll* %{mingw32_libdir}/%{pkgdir}/libdmcfile.dll* %{mingw32_libdir}/%{pkgdir}/libdmchttp.dll* %{mingw32_libdir}/%{pkgdir}/libdmcldap.dll* %{mingw32_libdir}/%{pkgdir}/libdmcsrm.dll* %{mingw32_libdir}/%{pkgdir}/libdmcrucio.dll* %{mingw32_libdir}/%{pkgdir}/libdmcacix.dll* %{mingw32_libdir}/%{pkgdir}/libarcshc.dll* %{mingw32_libdir}/%{pkgdir}/libarcshclegacy.dll* %{mingw32_libdir}/%{pkgdir}/libidentitymap.dll* %{mingw32_libdir}/%{pkgdir}/libarguspdpclient.dll* %{mingw32_libdir}/%{pkgdir}/libaccARC1.dll* %{mingw32_libdir}/%{pkgdir}/libaccBroker.dll* %{mingw32_libdir}/%{pkgdir}/libaccCREAM.dll* %{mingw32_libdir}/%{pkgdir}/libaccEMIES.dll* %{mingw32_libdir}/%{pkgdir}/libaccSER.dll* %{mingw32_libdir}/%{pkgdir}/libaccldap.dll* #%{mingw32_libdir}/%{pkgdir}/libaccUNICORE.dll* %{mingw32_libdir}/%{pkgdir}/libaccJobDescriptionParser.dll* %{mingw32_libdir}/%{pkgdir}/test/libaccTEST.dll* %if %{with_mingw32_globus} %files -n mingw32-%{mingw_pkg_name}-plugins-globus %defattr(-,root,root,-) %{mingw32_libdir}/%{pkgdir}/libdmcgridftp.dll* %{mingw32_libdir}/%{pkgdir}/libaccARC0.dll* %{mingw32_libexecdir}/%{pkgdir}/arc-lcas.exe %{mingw32_libexecdir}/%{pkgdir}/arc-lcmaps.exe %endif %files -n mingw32-%{mingw_pkg_name}-misc-utils %defattr(-,root,root,-) %{mingw32_bindir}/arcemiestest.exe %{mingw32_bindir}/arcperftest.exe %{mingw32_bindir}/arcwsrf.exe %doc %{mingw32_mandir}/man1/arcemiestest.1* %doc %{mingw32_mandir}/man1/arcperftest.1* %doc %{mingw32_mandir}/man1/arcwsrf.1* %if %{with_mingw32_xmlsec1} %{mingw32_bindir}/saml_assertion_init.exe %doc %{mingw32_mandir}/man1/saml_assertion_init.1* %endif %if %{with_mingw32_python} %files -n mingw32-%{mingw_pkg_name}-python %defattr(-,root,root,-) %{mingw32_libdir}/python?.?/site-packages/_arc*.dll* %{mingw32_libdir}/python?.?/site-packages/arc/__init__.py* %{mingw32_libdir}/python?.?/site-packages/arc/common.py* %{mingw32_libdir}/python?.?/site-packages/arc/loader.py* %{mingw32_libdir}/python?.?/site-packages/arc/message.py* %{mingw32_libdir}/python?.?/site-packages/arc/communication.py* %{mingw32_libdir}/python?.?/site-packages/arc/compute.py* %{mingw32_libdir}/python?.?/site-packages/arc/credential.py* %{mingw32_libdir}/python?.?/site-packages/arc/data.py* %{mingw32_libdir}/python?.?/site-packages/arc/delegation.py* %{mingw32_libdir}/python?.?/site-packages/arc/security.py* %{mingw32_libdir}/%{pkgdir}/libpythonservice.dll* %{mingw32_libdir}/%{pkgdir}/libaccPythonBroker.dll* %doc %{mingw32_datadir}/%{pkgdir}/examples/PythonBroker/ACIXBroker.py* %doc %{mingw32_datadir}/%{pkgdir}/examples/PythonBroker/SampleBroker.py* %doc %{mingw32_datadir}/%{pkgdir}/examples/sdk/*.py* %doc %{mingw32_datadir}/%{pkgdir}/examples/echo_python/* %endif %endif # # mingw64 # %if %{mingw_build_win64} %files -n mingw64-%{mingw_pkg_name} -f mingw64-%{mingw_pkg_name}.lang %defattr(-,root,root,-) %doc README AUTHORS LICENSE NOTICE ChangeLog %{mingw64_bindir}/lib*.dll %{mingw64_libdir}/%{pkgdir}/libmodcrypto.dll* %{mingw64_libdir}/%{pkgdir}/libmodcredential.dll* %{mingw64_datadir}/%{pkgdir}/schema %{mingw64_libdir}/%{pkgdir}/arc-file-access.exe %{mingw64_libdir}/%{pkgdir}/DataStagingDelivery.exe %dir %{mingw64_datadir}/%{pkgdir}/test-jobs %{mingw64_datadir}/%{pkgdir}/test-jobs/test-job-* %files -n mingw64-%{mingw_pkg_name}-client %defattr(-,root,root,-) %{mingw64_datadir}/%{pkgdir}/examples/client.conf # # Executables # %{mingw64_bindir}/arcecho.exe %{mingw64_bindir}/arcinfo.exe %{mingw64_bindir}/arcproxy.exe %{mingw64_bindir}/arcproxyalt.exe %{mingw64_bindir}/arccat.exe %{mingw64_bindir}/arccp.exe %{mingw64_bindir}/arcls.exe %{mingw64_bindir}/arcrm.exe %{mingw64_bindir}/arcmkdir.exe %{mingw64_bindir}/arcrename.exe %if %{with_mingw64_xmlsec1} %endif %{mingw64_bindir}/arcstat.exe %{mingw64_bindir}/arcsub.exe %{mingw64_bindir}/arcsync.exe %{mingw64_bindir}/arcresub.exe %{mingw64_bindir}/arcget.exe %{mingw64_bindir}/arcclean.exe %{mingw64_bindir}/arckill.exe %{mingw64_bindir}/arcrenew.exe %{mingw64_bindir}/arcresume.exe %{mingw64_bindir}/arctest.exe # %{mingw64_sysconfdir}/arc/client.conf # # Man pages # %doc %{mingw64_mandir}/man1/arcecho.1* %doc %{mingw64_mandir}/man1/arcinfo.1* %doc %{mingw64_mandir}/man1/arcproxy.1* %if %{with_mingw64_xmlsec1} %endif %doc %{mingw64_mandir}/man1/arccat.1* %doc %{mingw64_mandir}/man1/arccp.1* %doc %{mingw64_mandir}/man1/arcls.1* %doc %{mingw64_mandir}/man1/arcrm.1* %doc %{mingw64_mandir}/man1/arcmkdir.1* %doc %{mingw64_mandir}/man1/arcrename.1* %doc %{mingw64_mandir}/man1/arcstat.1* %doc %{mingw64_mandir}/man1/arcsub.1* %doc %{mingw64_mandir}/man1/arcsync.1* %doc %{mingw64_mandir}/man1/arcresub.1* %doc %{mingw64_mandir}/man1/arcget.1* %doc %{mingw64_mandir}/man1/arcclean.1* %doc %{mingw64_mandir}/man1/arckill.1* %doc %{mingw64_mandir}/man1/arcrenew.1* %doc %{mingw64_mandir}/man1/arcresume.1* %doc %{mingw64_mandir}/man1/arctest.1* %doc %{mingw64_datadir}/%{pkgdir}/examples/echo/echo.wsdl %files -n mingw64-%{mingw_pkg_name}-hed %defattr(-,root,root,-) %{mingw64_sbindir}/arched.exe %doc %{mingw64_mandir}/man8/arched.8* %doc %{mingw64_mandir}/man5/arc.conf.5* %{mingw64_datadir}/%{pkgdir}/profiles %{mingw64_datadir}/%{pkgdir}/examples/config %{mingw64_datadir}/%{pkgdir}/examples/arc.conf.reference %doc %{mingw64_datadir}/%{pkgdir}/examples/echo/echo_service.xml.example %{mingw64_libdir}/%{pkgdir}/libecho.dll* %files -n mingw64-%{mingw_pkg_name}-devel %defattr(-,root,root,-) %{mingw64_includedir}/%{pkgdir} %{mingw64_libdir}/lib*.dll.a %{mingw64_bindir}/wsdl2hed.exe %doc %{mingw64_mandir}/man1/wsdl2hed.1* %{mingw64_bindir}/arcplugin.exe %doc %{mingw64_mandir}/man1/arcplugin.1* %{mingw64_libdir}/pkgconfig/arcbase.pc %doc %{mingw64_datadir}/%{pkgdir}/examples/sdk/*.cpp %doc %{mingw64_datadir}/%{pkgdir}/examples/sdk/*.h %files -n mingw64-%{mingw_pkg_name}-plugins-needed %defattr(-,root,root,-) %{mingw64_libdir}/%{pkgdir}/libmcchttp.dll* %{mingw64_libdir}/%{pkgdir}/libmccmsgvalidator.dll* %{mingw64_libdir}/%{pkgdir}/libmccsoap.dll* %{mingw64_libdir}/%{pkgdir}/libmcctcp.dll* %{mingw64_libdir}/%{pkgdir}/libmcctls.dll* %{mingw64_libdir}/%{pkgdir}/libdmcfile.dll* %{mingw64_libdir}/%{pkgdir}/libdmchttp.dll* %{mingw64_libdir}/%{pkgdir}/libdmcldap.dll* %{mingw64_libdir}/%{pkgdir}/libdmcsrm.dll* %{mingw64_libdir}/%{pkgdir}/libdmcrucio.dll* %{mingw64_libdir}/%{pkgdir}/libdmcacix.dll* %{mingw64_libdir}/%{pkgdir}/libarcshc.dll* %{mingw64_libdir}/%{pkgdir}/libarcshclegacy.dll* %{mingw64_libdir}/%{pkgdir}/libidentitymap.dll* %{mingw64_libdir}/%{pkgdir}/libarguspdpclient.dll* %{mingw64_libdir}/%{pkgdir}/libaccARC1.dll* %{mingw64_libdir}/%{pkgdir}/libaccBroker.dll* %{mingw64_libdir}/%{pkgdir}/libaccCREAM.dll* %{mingw64_libdir}/%{pkgdir}/libaccEMIES.dll* %{mingw64_libdir}/%{pkgdir}/libaccSER.dll* %{mingw64_libdir}/%{pkgdir}/libaccldap.dll* #%{mingw64_libdir}/%{pkgdir}/libaccUNICORE.dll* %{mingw64_libdir}/%{pkgdir}/libaccJobDescriptionParser.dll* %{mingw64_libdir}/%{pkgdir}/test/libaccTEST.dll* %if %{with_mingw64_globus} %files -n mingw64-%{mingw_pkg_name}-plugins-globus %defattr(-,root,root,-) %{mingw64_libdir}/%{pkgdir}/libdmcgridftp.dll* %{mingw64_libdir}/%{pkgdir}/libaccARC0.dll* %{mingw64_libexecdir}/%{pkgdir}/arc-lcas.exe %{mingw64_libexecdir}/%{pkgdir}/arc-lcmaps.exe %endif %files -n mingw64-%{mingw_pkg_name}-misc-utils %defattr(-,root,root,-) %{mingw64_bindir}/arcemiestest.exe %{mingw64_bindir}/arcperftest.exe %{mingw64_bindir}/arcwsrf.exe %doc %{mingw64_mandir}/man1/arcemiestest.1* %doc %{mingw64_mandir}/man1/arcperftest.1* %doc %{mingw64_mandir}/man1/arcwsrf.1* %if %{with_mingw64_xmlsec1} %{mingw64_bindir}/saml_assertion_init.exe %doc %{mingw64_mandir}/man1/saml_assertion_init.1* %endif %if %{with_mingw64_python} %files -n mingw64-%{mingw_pkg_name}-python %defattr(-,root,root,-) %{mingw64_libdir}/python?.?/site-packages/_arc.dll* %{mingw64_libdir}/python?.?/site-packages/arc.py* %{mingw64_libdir}/%{pkgdir}/libpythonservice.dll* %{mingw64_libdir}/%{pkgdir}/libaccPythonBroker.dll* %doc %{mingw64_datadir}/%{pkgdir}/examples/PythonBroker/SampleBroker.py* %doc %{mingw64_datadir}/%{pkgdir}/examples/sdk/*.py* %doc %{mingw64_datadir}/%{pkgdir}/examples/echo_python/* %endif %endif %changelog * Wed Dec 13 2017 Anders Waananen - 5.4.2-1 - Initial release nordugrid-arc-5.4.2/PaxHeaders.7502/swig0000644000000000000000000000013213214316031016062 xustar000000000000000030 mtime=1513200665.020808892 30 atime=1513200668.723854182 30 ctime=1513200665.020808892 nordugrid-arc-5.4.2/swig/0000755000175000002070000000000013214316031016205 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/swig/PaxHeaders.7502/Arc.i0000644000000000000000000000012212315137015017021 xustar000000000000000027 mtime=1395965453.851694 26 atime=1513200576.85373 29 ctime=1513200665.01080877 nordugrid-arc-5.4.2/swig/Arc.i0000644000175000002070000004322012315137015017071 0ustar00mockbuildmock00000000000000/** * Note that the order of the "%include" statements are important! If a * "%include" depends on other "%include"s, it should be placed after these * "%include" dependencies. */ /* For the Java bindings the approach of one single SWIG generated C++ file is * used, while for Python one SWIG C++ file is generated for each mapped ARC * library. Therefore for Python each of the specialised SWIG files (.i) is * passed to SWIG, while for Java only this file is passed. */ #ifdef SWIGJAVA %module(directors="1") arc #endif %include %include #define DEPRECATED(X) X #ifdef SWIGPYTHON %include %include #ifdef PYDOXYGEN %include "pydoxygen.i" #endif namespace Arc { /** * Python cannot deal with string references since strings in Python * are immutable. Therefore ignore the string reference argument and * store a reference in a temporary variable. Here it is assumed that * the string reference does not contain any input to the function being * called. **/ %typemap(in, numinputs=0) std::string& TUPLEOUTPUTSTRING (std::string str) { $1 = &str; } /** * Return the original return value and the temporary string reference * combined in a Python tuple. **/ %typemap(argout) std::string& TUPLEOUTPUTSTRING { $result = PyTuple_Pack(2, $result, SWIG_From_std_string(*$1)); } } %pythoncode %{ import warnings def deprecated(method): """This decorator is used to mark python methods as deprecated, _not_ functions. It will result in a warning being emmitted when the method is used.""" def newMethod(*args, **kwargs): warnings.warn("Call to deprecated method 'arc.%s.%s'." % (args[0].__class__.__name__, method.__name__), category = DeprecationWarning, stacklevel = 2) return method(*args, **kwargs) newMethod.__name__ = method.__name__ newMethod.__doc__ = method.__doc__ newMethod.__dict__.update(method.__dict__) return newMethod %} %rename(__nonzero__) operator bool; %rename(__str__) operator std::string; %pythoncode %{ class StaticPropertyWrapper(object): def __init__(self, wrapped_class): object.__setattr__(self, "wrapped_class", wrapped_class) def __getattr__(self, name): orig_attr = getattr(self.wrapped_class, name) if isinstance(orig_attr, property): return orig_attr.fget() else: return orig_attr def __setattr__(self, name, value): orig_attr = getattr(self.wrapped_class, name) if isinstance(orig_attr, property): orig_attr.fset(value) else: setattr(self.wrapped_class, name, value) %} #endif #ifdef SWIGJAVA %include %include %{ #include #include #include #include %} %pragma(java) jniclasscode=%{ static { try { System.loadLibrary("jarc"); } catch (UnsatisfiedLinkError e1) { try { System.load("/usr/lib64/arc/libjarc.so"); } catch (UnsatisfiedLinkError e2) { try { System.load("/usr/lib/arc/libjarc.so"); } catch (UnsatisfiedLinkError e3) { System.err.println("Unable to load native code library (jarc), which provides Java interface to the ARC C++ libraries."); System.exit(1); } } } } %} /* Swig does not offer any bindings of the std::list template class, so * below a implementation is done which offers basic looping and element * access support, i.e. through the std::list and listiteratorhandler * classes. */ template class listiterator { private: typename std::list::iterator it; const std::list& origList; public: listiterator(typename std::list::iterator it, const std::list& origList); T next(); bool hasNext(); }; template class setiterator { private: typename std::set::iterator it; public: setiterator(typename std::set::iterator it); T pointer(); void next(); bool equal(const setiterator& ita); }; namespace std { template class list { public: typedef size_t size_type; typedef T value_type; typedef const value_type& const_reference; list(); list(size_type n); %extend { int size() const { return (int)self->size(); } } %rename(isEmpty) empty; bool empty() const; void clear(); %rename(add) push_back; void push_back(const value_type& x); %extend { listiterator iterator() { return listiterator(self->begin(), *self); } listiterator begin() { return listiterator(self->begin(), *self); } listiterator end() { return listiterator(self->end(), *self); } } }; template class set { public: typedef size_t size_type; typedef T key_type; typedef T value_type; typedef const value_type& const_reference; set(); %extend { int size() const { return (int)self->size(); } } %rename(isEmpty) empty; bool empty() const; void clear(); void insert(const value_type& x); %extend { int count(const key_type& k) const { return (int)self->count(k); } setiterator begin() { return setiterator(self->begin()); } setiterator end() { return setiterator(self->end()); } } }; } %{ #include template class listiterator { private: typename std::list::iterator it; const std::list& origList; public: listiterator(typename std::list::iterator it, const std::list& origList) : it(it), origList(origList) {} T next() throw (std::out_of_range) { if (!hasNext()) { throw std::out_of_range(""); }; return (it++).operator*(); }; bool hasNext() { return it != origList.end(); }; }; template class setiterator { private: typename std::set::iterator it; public: setiterator(typename std::set::iterator it) : it(it) {} T pointer() { return it.operator*(); }; void next() { it.operator++(); }; bool equal(const setiterator& ita) { return it.operator==(ita.it); }; }; %} #ifdef JAVA_IS_15_OR_ABOVE %typemap(javaimports) listiterator "import java.util.Iterator; import java.util.NoSuchElementException;" #else %typemap(javaimports) listiterator "import java.util.NoSuchElementException;" #endif %typemap(javacode) listiterator %{ // Copied verbatim from '%typemape(javacode) SWIGTYPE'. private Object objectManagingMyMemory; protected void setMemoryManager(Object r) { objectManagingMyMemory = r; } // %typemap(javacode) SWIGTYPE - End public void remove() throws UnsupportedOperationException { throw new UnsupportedOperationException(); } %} %javaexception("java.util.NoSuchElementException") listiterator::next { try { $action } catch (std::out_of_range &e) { jclass clazz = jenv->FindClass("java/util/NoSuchElementException"); jenv->ThrowNew(clazz, "Range error"); return $null; } } /* For non-static methods in the ARC library which returns a reference to an * internal object, a workaround is needed for Java. The reason for this is * the following: * // In C++ let A::get() return a reference to an internal (private) object. * const R& A::get() const; // C++ * // In Java the A::get() method is wrapped to call the C++ A::get(). * A a = new A(); // Java * R r = a->get(); // Java * // The memory of object r is managed by object a. When a is garbage * // collected, which means in C++ terms that the a object is deleted, the r * // object becomes invalid since a no longer exist. * // In C++ a will exist through out the scope which a is defined in, but in * // Java the object a might garbage collected when it is not in use any more, * // which means that it might be garbage collected before a goes out of scope. * ... * // Do some something not involving the a object. * ... * // Now still in scope a might have been garbage collected and thus the * // following statement might cause a segmentation fault. * System.out.println("r = " + r.toString()); * * Therefore when returning a C++ reference, the Java object holding the * C++ reference must have a Java reference to the Java object which returned * the C++ reference. In that way the garbage collector will not garbage collect * the Java object actually managing the memory of the referenced C++ object. * * See * for more info. */ /* Add member to any Java proxy class which is able to hold a reference to * an object managing its memory. * Add method which sets managing object. * Since typemaps overrides other typemaps which are less specific in the * matching and since the $typemap macro is not working in older versions of * swig (e.g. 1.3.29) the code in the below typemap is copied verbatim in this * and other swig interface files (.i). Look for '%typemap(javacode) SWIGTYPE' * comment. */ %typemap(javacode) SWIGTYPE %{ private Object objectManagingMyMemory; protected void setMemoryManager(Object r) { objectManagingMyMemory = r; } %} /* Make sure that when a C++ reference is returned that the corresponding Java * proxy class will call the setMemoryManager method to indicate that the memory * is maintained by this object. */ %typemap(javaout) SWIGTYPE & { long cPtr = $jnicall; $javaclassname ret = null; if (cPtr != 0) { ret = new $javaclassname(cPtr, $owner); ret.setMemoryManager(this); } return ret; } /* With Swig 1.3.29 the memory management introduced above is applied to the get * method in the wrapped std::map template classes where the value parameter is * a "basic" type (int, double, string, etc.), however the management is not * applied to the 'SWIGTYPE_p_' wrapped class. So don't memory * management to these std::map classes. */ %typemap(javaout) double& get { return new $javaclassname($jnicall, $owner); } %typemap(javaout) int& get { return new $javaclassname($jnicall, $owner); } %typemap(javaout) std::string& get { return new $javaclassname($jnicall, $owner); } %rename(toBool) operator bool; %rename(toString) operator std::string; %rename(equals) operator==; %ignore *::operator!=; %ignore *::operator<; %ignore *::operator>; %ignore *::operator<=; %ignore *::operator>=; /* The std::cout object will always exist, so do not set any references. See * comments in Arc.i. */ %typemap(javaout) std::ostream& getStdout { return new $javaclassname($jnicall, $owner); } /* Provide method of getting std::cout as a std::ostream object, usable * with the LogDestination class. */ %inline %{ #include std::ostream& getStdout() { return std::cout; } %} /* Swig doesn't provide a direct way to wrap an abstract class with no concrete * members to a Java interface. Instead the following workaround is made in * order to make the EntityConsumer interface work correctly in the Java * bindings. * The '%prewrapentityconsumerinterface' macro should be invoked * before the C++ class definition is included (%include). * Some of the code is taken from: */ %define %prewrapentityconsumerinterface(X, Y...) %feature("director") Arc::EntityConsumer< Y >; %rename(Native ## X ## Consumer) Arc::EntityConsumer< Y >; %typemap(jstype) const Arc::EntityConsumer< Y >& %{X ## Consumer%}; %typemap(jstype) Arc::EntityConsumer< Y >& %{X ## Consumer%}; %typemap(javainterfaces) Arc::EntityConsumer< Y > %{X ## Consumer%}; #if SWIG_VERSION > 0x010331 /* Make sure every Java consumer interface instance is converted to an instance * which inherits from the concrete native consumer class which wraps the C++ * consumer abstract class. */ %typemap(javain, pgcppname="n", pre= " $javaclassname n = $module.makeNative($javainput);") const Arc::EntityConsumer< Y >& %{ Native ## X ## Consumer.getCPtr(n) %}; %typemap(javain, pgcppname="n", pre= " $javaclassname n = $module.makeNative($javainput);") Arc::EntityConsumer< Y >& %{ Native ## X ## Consumer.getCPtr(n) %}; /* For C++ classes holding an instance of the consumer object, the java wrapped * class also need to hold a reference to the native consumer. Otherwise it will * go out of scope, while being used. Note that a Map (HashMap) must be added * to the particular Java class, corresponding the C++ in question. This must be * done at before '%include'ing the corresponding C++ header. E.g.: %typemap(javacode) Arc::Example %{ private java.util.HashMap consumers = new java.util.HashMap(); %} * Overrides the 'javain' typemaps above since those below are more specific due * to the use of the 'addConsumer_consumer' and 'removeConsumer_consumer' * matching. */ %typemap(javain, pgcppname="n", pre= " $javaclassname n = $module.makeNative($javainput); consumers.put($javainput, n);") Arc::EntityConsumer< Y >& addConsumer_consumer %{Native ## X ## Consumer.getCPtr(n)%}; %typemap(javain, pgcppname="n", pre= " if (!consumers.containsKey($javainput)) return; $javaclassname n = ($javaclassname)consumers.get($javainput);", post = " consumers.remove($javainput);") Arc::EntityConsumer< Y >& removeConsumer_consumer %{Native ## X ## Consumer.getCPtr(n)%}; %typemap(javain, pgcppname="n", pre= " if (!consumers.containsKey($javainput)) return; $javaclassname n = ($javaclassname)consumers.get($javainput);", post = " consumers.remove($javainput);") const Arc::EntityConsumer< Y >& removeConsumer_consumer %{Native ## X ## Consumer.getCPtr(n)%}; #else /* Workaround for older swig versions. The pgcppname, pre and post attributes * to the javain typemap doesn't work in swig 1.3.31 and lower. Additional * workarounds is found in the other swig interface files (e.g. compute.i). */ %typemap(javain) const Arc::EntityConsumer< Y >& %{Native ## X ## Consumer.getCPtr(n)%}; %typemap(javain) Arc::EntityConsumer< Y >& %{Native ## X ## Consumer.getCPtr(n)%}; #endif %pragma(java) modulecode=%{ private static class Native ## X ## ConsumerProxy extends Native ## X ## Consumer { private X ## Consumer delegate; public Native ## X ## ConsumerProxy(X ## Consumer i) { delegate = i; } public Native ## X ## ConsumerProxy(X ## Consumer i, long cPtr) { super(cPtr, false); delegate = i; } public void addEntity(X e) { delegate.addEntity(e); } } // No access modifier. Classes in this package need to access the method. static Native ## X ## Consumer makeNative(X ## Consumer i) { if (i instanceof Native ## X ## Consumer) { // If it already *is* a Native ## X ## Consumer don't bother wrapping it again return (Native ## X ## Consumer)i; } /* Not all Swig wrapped classes which inherits (C++) from EntityConsumer is * a native consumer. Check if the instance have a 'getCPtr' method and if * so instantiate native consumer proxy with the value returned by * 'getCPtr'. */ try { Class[] types = new Class[1]; types[0] = i.getClass(); Object[] params = new Object[1]; params[0] = i; return new Native ## X ## ConsumerProxy(i, ((Long)i.getClass().getMethod("getCPtr", types).invoke(null, params)).longValue()); } catch (Exception e) { return new Native ## X ## ConsumerProxy(i); } } %} %enddef #endif #ifdef SWIGPYTHON // Make typemap for time_t - otherwise it will be a useless proxy object. // Solution taken from: http://stackoverflow.com/questions/2816046/simple-typemap-example-in-swig-java %typemap(in) time_t { if (PyLong_Check($input)) $1 = (time_t) PyLong_AsLong($input); else if (PyInt_Check($input)) $1 = (time_t) PyInt_AsLong($input); else if (PyFloat_Check($input)) $1 = (time_t) PyFloat_AsDouble($input); else { PyErr_SetString(PyExc_TypeError,"Expected a large type"); return NULL; } } %typemap(typecheck) time_t = long; %typemap(out) time_t { $result = PyLong_FromLong((long)$1); } %typemap(in) uint32_t { if (PyInt_Check($input)) $1 = (uint32_t) PyInt_AsLong($input); else if (PyFloat_Check($input)) $1 = (uint32_t) PyFloat_AsDouble($input); else { PyErr_SetString(PyExc_TypeError,"Unable to convert type to 32bit number (int/float)"); return NULL; } } %typemap(typecheck) uint32_t = int; // It seems there is no need for an out typemap for uint32_t... // Methods returning an uint32_t type will in python return a long. #endif #ifdef SWIGJAVA // Make typemap for time_t - otherwise it will be a useless proxy object. // Solution taken from: http://stackoverflow.com/questions/2816046/simple-typemap-example-in-swig-java %typemap(in) time_t { $1 = (time_t)$input; } %typemap(javain) time_t "$javainput" %typemap(out) time_t %{ $result = (jlong)$1; %} %typemap(javaout) time_t { return $jnicall; } %typemap(jni) time_t "jlong" %typemap(jtype) time_t "long" %typemap(jstype) time_t "long" %typemap(in) uint32_t { $1 = (uint32_t)$input; } %typemap(javain) uint32_t "$javainput" %typemap(out) uint32_t %{ $result = (jint)$1; %} %typemap(javaout) uint32_t { return $jnicall; } %typemap(jni) uint32_t "jint" %typemap(jtype) uint32_t "int" %typemap(jstype) uint32_t "int" #endif #ifndef SWIGJAVA %define %wraplist(X, Y...) %template(X ## List) std::list< Y >; %enddef #else #ifdef JAVA_IS_15_OR_ABOVE %define %wraplist(X, Y...) %typemap(javainterfaces) listiterator< Y > %{Iterator< X >%} %typemap(javainterfaces) std::list< Y > %{Iterable< X >%} %template(X ## List) std::list< Y >; %template(X ## ListIterator) listiterator< Y >; %enddef #else %define %wraplist(X, Y...) %template(X ## List) std::list< Y >; %template(X ## ListIterator) listiterator< Y >; %enddef #endif #endif #ifdef SWIGJAVA %include "common.i" %include "loader.i" %include "message.i" %include "communication.i" %include "compute.i" %include "credential.i" %include "data.i" %include "delegation.i" #endif nordugrid-arc-5.4.2/swig/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712045235201020177 xustar000000000000000027 mtime=1351957121.246634 30 atime=1513200606.373091608 30 ctime=1513200665.008808745 nordugrid-arc-5.4.2/swig/Makefile.am0000644000175000002070000000016312045235201020241 0ustar00mockbuildmock00000000000000EXTRA_DIST=Arc.i common.i loader.i message.i communication.i compute.i credential.i data.i delegation.i security.i nordugrid-arc-5.4.2/swig/PaxHeaders.7502/compute.i0000644000000000000000000000012312771222411017772 xustar000000000000000027 mtime=1474635017.400882 26 atime=1513200576.85173 30 ctime=1513200665.016808843 nordugrid-arc-5.4.2/swig/compute.i0000644000175000002070000007561712771222411020060 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module(directors="1") compute // In case of exceptions, try to get error message and then exit python. %feature("director:except") { if( $error != NULL ) { PyObject *ptype, *pvalue, *ptraceback; PyErr_Fetch( &ptype, &pvalue, &ptraceback ); PyErr_Restore( ptype, pvalue, ptraceback ); PyErr_Print(); Py_Exit(1); } } %include "Arc.i" // Import common module in order to access common template types. %import(module="common") "common.i" // (module="..") is needed for inheritance from those classes to work in python %import "../src/hed/libs/common/ArcConfig.h" %import(module="common") "../src/hed/libs/common/URL.h" %import "../src/hed/libs/common/XMLNode.h" %import "../src/hed/libs/common/DateTime.h" %import "../src/hed/libs/common/Thread.h" %import "../src/hed/libs/message/PayloadSOAP.h" %import "../src/hed/libs/message/PayloadRaw.h" %import "../src/hed/libs/message/PayloadStream.h" %import "../src/hed/libs/message/MCC_Status.h" %import "../src/hed/libs/message/Message.h" %ignore Arc::AutoPointer::operator!; %warnfilter(SWIGWARN_PARSE_NAMED_NESTED_CLASS) Arc::CountedPointer::Base; %ignore Arc::CountedPointer::operator!; %ignore Arc::CountedPointer::operator=(T*); %ignore Arc::CountedPointer::operator=(const CountedPointer&); // Ignoring functions from Utils.h since swig thinks they are methods of the CountedPointer class, and thus compilation fails. %ignore Arc::GetEnv; %ignore Arc::SetEnv; %ignore Arc::UnsetEnv; %ignore Arc::EnvLockAcquire; %ignore Arc::EnvLockRelease; %ignore Arc::EnvLockWrap; %ignore Arc::EnvLockUnwrap; %ignore Arc::EnvLockUnwrapComplete; %ignore Arc::EnvLockWrapper; %ignore Arc::InterruptGuard; %ignore Arc::StrError; %ignore PersistentLibraryInit; /* Swig tries to create functions which return a new CountedPointer object. * Those functions takes no arguments, and since there is no default * constructor for the CountedPointer compilation fails. * Adding a "constructor" (used as a function in the cpp file) which * returns a new CountedPointer object with newed T object created * Ts default constructor. Thus if T has no default constructor, * another workaround is needed in order to map that CountedPointer * wrapped class with swig. */ %extend Arc::CountedPointer { CountedPointer() { return new Arc::CountedPointer(new T());} } %{ #include %} %include "../src/hed/libs/common/Utils.h" #endif // Wrap contents of $(top_srcdir)/src/hed/libs/compute/Software.h %{ #include %} %ignore Arc::SoftwareRequirement::operator=(const SoftwareRequirement&); %ignore Arc::Software::convert(const ComparisonOperatorEnum& co); %ignore Arc::Software::toString(ComparisonOperator co); %ignore operator<<(std::ostream&, const Software&); %ignore Arc::SoftwareRequirement::SoftwareRequirement(const Software& sw, Software::ComparisonOperator swComOp); %ignore Arc::SoftwareRequirement::add(const Software& sw, Software::ComparisonOperator swComOp); %wraplist(Software, Arc::Software); %wraplist(SoftwareRequirement, Arc::SoftwareRequirement); #ifdef SWIGJAVA %ignore Arc::Software::operator(); %ignore Arc::SoftwareRequirement::getComparisonOperatorList() const; #endif %include "../src/hed/libs/compute/Software.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/Endpoint.h %{ #include %} %ignore Arc::Endpoint::operator=(const ConfigEndpoint&); %wraplist(Endpoint, Arc::Endpoint); %include "../src/hed/libs/compute/Endpoint.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/JobState.h %{ #include %} %ignore Arc::JobState::operator!; %ignore Arc::JobState::operator=(const JobState&); %rename(GetType) Arc::JobState::operator StateType; // works with swig 1.3.40, and higher... %rename(GetType) Arc::JobState::operator Arc::JobState::StateType; // works with swig 1.3.29 %rename(GetNativeState) Arc::JobState::operator(); %wraplist(JobState, Arc::JobState); %include "../src/hed/libs/compute/JobState.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/Job.h %{ #include %} %ignore Arc::Job::operator=(XMLNode); %ignore Arc::Job::operator=(const Job&); %wraplist(Job, Arc::Job); #ifdef SWIGPYTHON %ignore Arc::Job::WriteJobIDsToFile(const std::list&, const std::string&, unsigned = 10, unsigned = 500000); // Clash. It is sufficient to wrap only WriteJobIDsToFile(cosnt std::list&, ...); #endif %include "../src/hed/libs/compute/Job.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/JobControllerPlugin.h %{ #include %} %ignore Arc::JobControllerPluginArgument::operator const UserConfig&; // works with swig 1.3.40, and higher... %ignore Arc::JobControllerPluginArgument::operator const Arc::UserConfig&; // works with swig 1.3.29 %wraplist(JobControllerPlugin, Arc::JobControllerPlugin *); %template(JobControllerPluginMap) std::map; #ifdef SWIGPYTHON %apply std::string& TUPLEOUTPUTSTRING { std::string& desc_str }; /* Applies to: * bool JobControllerPlugin::GetJobDescription(const Job&, std::string& desc_str) const; */ #endif %include "../src/hed/libs/compute/JobControllerPlugin.h" #ifdef SWIGPYTHON %clear std::string& desc_str; #endif // Wrap contents of $(top_srcdir)/src/hed/libs/compute/EndpointQueryingStatus.h %{ #include %} %ignore Arc::EndpointQueryingStatus::operator!; %ignore Arc::EndpointQueryingStatus::operator=(EndpointQueryingStatusType); %ignore Arc::EndpointQueryingStatus::operator=(const EndpointQueryingStatus&); %include "../src/hed/libs/compute/EndpointQueryingStatus.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/JobDescription.h %{ #include %} %ignore Arc::JobDescriptionResult::operator!; %ignore Arc::Range::operator int; %ignore Arc::OptIn::operator=(const OptIn&); %ignore Arc::OptIn::operator=(const T&); %ignore Arc::Range::operator=(const Range&); %ignore Arc::Range::operator=(const T&); %ignore Arc::SourceType::operator=(const URL&); %ignore Arc::SourceType::operator=(const std::string&); %ignore Arc::JobDescription::operator=(const JobDescription&); #ifdef SWIGPYTHON %apply std::string& TUPLEOUTPUTSTRING { std::string& product }; /* Applies to: * JobDescriptionResult JobDescription::UnParse(std::string& product, std::string language, const std::string& dialect = "") const; */ #endif #ifdef SWIGJAVA %ignore Arc::JobDescription::GetAlternatives() const; #endif %include "../src/hed/libs/compute/JobDescription.h" %wraplist(JobDescription, Arc::JobDescription); %template(JobDescriptionConstList) std::list< Arc::JobDescription const * >; #ifdef SWIGJAVA %template(JobDescriptionConstListIterator) listiterator< Arc::JobDescription const * >; #ifdef JAVA_IS_15_OR_ABOVE %typemap(javainterfaces) listiterator< Arc::JobDescription const * > %{Iterator< JobDescription >%} %typemap(javainterfaces) std::list< Arc::JobDescription const * > %{Iterable< JobDescription >%} #endif #endif %wraplist(StringPair, std::pair); %wraplist(ExecutableType, Arc::ExecutableType); %wraplist(RemoteLoggingType, Arc::RemoteLoggingType); %wraplist(NotificationType, Arc::NotificationType); %wraplist(InputFileType, Arc::InputFileType); %wraplist(OutputFileType, Arc::OutputFileType); %wraplist(SourceType, Arc::SourceType); %wraplist(TargetType, Arc::TargetType); %template(ScalableTimeInt) Arc::ScalableTime; %template(RangeInt) Arc::Range; %template(StringOptIn) Arc::OptIn; %template(BoolIntPair) std::pair; #ifdef SWIGPYTHON %clear std::string& product; #endif %template(StringDoublePair) std::pair; // Wrap contents of $(top_srcdir)/src/hed/libs/compute/SubmissionStatus.h %{ #include %} %include "../src/hed/libs/compute/SubmissionStatus.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/ExecutionTarget.h %{ #include %} %ignore Arc::ApplicationEnvironment::operator=(const Software&); %ignore Arc::ExecutionTarget::operator=(const ExecutionTarget&); %ignore Arc::GLUE2Entity::operator->() const; %ignore Arc::GLUE2Entity::operator->() const; %ignore Arc::GLUE2Entity::operator->() const; %ignore Arc::GLUE2Entity::operator->() const; %ignore Arc::GLUE2Entity::operator->() const; %ignore Arc::GLUE2Entity::operator->() const; %ignore Arc::GLUE2Entity::operator->() const; %ignore Arc::GLUE2Entity::operator->() const; #ifdef SWIGPYTHON /* When instantiating a template of the form * Arc::CountedPointer< T > two __nonzero__ * python methods are created in the generated python module file which * causes an swig error. The two __nonzero__ methods probably stems from * the "operator bool" and "operator ->" methods. At least in the Arc.i * file the "operator bool" method is renamed to "__nonzero__". In * order to avoid that name clash the following "operator bool" methods * are ignored. */ %ignore Arc::CountedPointer< std::map >::operator bool; %ignore Arc::CountedPointer< std::list >::operator bool; %{ #include %} %ignore ::operator<<(std::ostream&, const LocationAttributes&); %extend Arc::LocationAttributes { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const AdminDomainAttributes&); %extend Arc::AdminDomainAttributes { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ExecutionEnvironmentAttributes&); %extend Arc::ExecutionEnvironmentAttributes { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const MappingPolicyAttributes&); %extend Arc::MappingPolicyAttributes { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ComputingManagerAttributes&); %extend Arc::ComputingManagerAttributes { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ComputingShareAttributes&); %extend Arc::ComputingShareAttributes { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ComputingEndpointAttributes&); %extend Arc::ComputingEndpointAttributes { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ComputingServiceAttributes&); %extend Arc::ComputingServiceAttributes { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ComputingServiceType&); %extend Arc::ComputingServiceType { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ExecutionTarget&); %extend Arc::ExecutionTarget { std::string __str__() { std::ostringstream oss; oss << *self; return oss.str(); } } #endif // SWIGPYTHON #ifdef SWIGJAVA %{ #include %} %ignore Arc::GLUE2Entity::operator*() const; %ignore ::operator<<(std::ostream&, const LocationAttributes&); %extend Arc::LocationAttributes { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore Arc::GLUE2Entity::operator*() const; %ignore ::operator<<(std::ostream&, const AdminDomainAttributes&); %extend Arc::AdminDomainAttributes { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore Arc::GLUE2Entity::operator*() const; %ignore ::operator<<(std::ostream&, const ExecutionEnvironmentAttributes&); %extend Arc::ExecutionEnvironmentAttributes { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore Arc::GLUE2Entity::operator*() const; %ignore ::operator<<(std::ostream&, const MappingPolicyAttributes&); %extend Arc::MappingPolicyAttributes { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore Arc::GLUE2Entity::operator*() const; %ignore ::operator<<(std::ostream&, const ComputingManagerAttributes&); %extend Arc::ComputingManagerAttributes { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore Arc::GLUE2Entity::operator*() const; %ignore ::operator<<(std::ostream&, const ComputingShareAttributes&); %extend Arc::ComputingShareAttributes { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore Arc::GLUE2Entity::operator*() const; %ignore ::operator<<(std::ostream&, const ComputingEndpointAttributes&); %extend Arc::ComputingEndpointAttributes { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore Arc::GLUE2Entity::operator*() const; %ignore ::operator<<(std::ostream&, const ComputingServiceAttributes&); %extend Arc::ComputingServiceAttributes { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ComputingServiceType&); %extend Arc::ComputingServiceType { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } %ignore ::operator<<(std::ostream&, const ExecutionTarget&); %extend Arc::ExecutionTarget { std::string toString() { std::ostringstream oss; oss << *self; return oss.str(); } } #endif %include "../src/hed/libs/compute/GLUE2Entity.h" // Contains declaration of the GLUE2Entity template, used in ExecutionTarget.h file. %wraplist(ApplicationEnvironment, Arc::ApplicationEnvironment); %wraplist(ExecutionTarget, Arc::ExecutionTarget); %wraplist(ComputingServiceType, Arc::ComputingServiceType); %wraplist(CPComputingEndpointAttributes, Arc::CountedPointer); %template(PeriodIntMap) std::map; %template(ComputingEndpointMap) std::map; %template(ComputingShareMap) std::map; %template(ComputingManagerMap) std::map; %template(ExecutionEnvironmentMap) std::map; %template(MappingPolicyMap) std::map; %template(StringDoubleMap) std::map; %template(SharedBenchmarkMap) Arc::CountedPointer< std::map >; %template(SharedApplicationEnvironmentList) Arc::CountedPointer< std::list >; %template(GLUE2EntityLocationAttributes) Arc::GLUE2Entity; %template(CPLocationAttributes) Arc::CountedPointer; %template(GLUE2EntityAdminDomainAttributes) Arc::GLUE2Entity; %template(CPAdminDomainAttributes) Arc::CountedPointer; %template(GLUE2EntityExecutionEnvironmentAttributes) Arc::GLUE2Entity; %template(CPExecutionEnvironmentAttributes) Arc::CountedPointer; %template(GLUE2EntityMappingPolicyAttributes) Arc::GLUE2Entity; %template(CPMappingPolicyAttributes) Arc::CountedPointer; %template(GLUE2EntityComputingManagerAttributes) Arc::GLUE2Entity; %template(CPComputingManagerAttributes) Arc::CountedPointer; %template(GLUE2EntityComputingShareAttributes) Arc::GLUE2Entity; %template(CPComputingShareAttributes) Arc::CountedPointer; %template(GLUE2EntityComputingEndpointAttributes) Arc::GLUE2Entity; %template(CPComputingEndpointAttributes) Arc::CountedPointer; %template(GLUE2EntityComputingServiceAttributes) Arc::GLUE2Entity; %template(CPComputingServiceAttributes) Arc::CountedPointer; %template(IntSet) std::set; #ifdef SWIGJAVA %template(IntSetIterator) setiterator; #endif %include "../src/hed/libs/compute/ExecutionTarget.h" %extend Arc::ComputingServiceType { %template(GetExecutionTargetsFromList) GetExecutionTargets< std::list >; }; // Wrap contents of $(top_srcdir)/src/hed/libs/compute/EntityRetrieverPlugin.h %{ #include %} %include "../src/hed/libs/compute/EntityRetrieverPlugin.h" %template(ServiceEndpointQueryOptions) Arc::EndpointQueryOptions; %template(ComputingServiceQueryOptions) Arc::EndpointQueryOptions; %template(JobListQueryOptions) Arc::EndpointQueryOptions; // Wrap contents of $(top_srcdir)/src/hed/libs/compute/EntityRetriever.h %{ #include %} #ifdef SWIGJAVA %rename(_wait) Arc::EntityRetriever::wait; %prewrapentityconsumerinterface(Endpoint, Arc::Endpoint); %prewrapentityconsumerinterface(Job, Arc::Job); %prewrapentityconsumerinterface(ComputingServiceType, Arc::ComputingServiceType); %typemap(javainterfaces) Arc::EntityContainer "EndpointConsumer"; %typemap(javainterfaces) Arc::EntityContainer "JobConsumer"; %typemap(javainterfaces) Arc::EntityContainer "ComputingServiceTypeConsumer"; %typemap(javacode,noblock=1) Arc::EntityRetriever { // Copied verbatim from '%typemape(javacode) SWIGTYPE'. private Object objectManagingMyMemory; protected void setMemoryManager(Object r) { objectManagingMyMemory = r; } // %typemap(javacode) SWIGTYPE - End #ifdef JAVA_IS_15_OR_ABOVE private java.util.HashMap consumers = new java.util.HashMap(); #else private java.util.HashMap consumers = new java.util.HashMap(); #endif } %typemap(javacode,noblock=1) Arc::EntityRetriever { // Copied verbatim from '%typemape(javacode) SWIGTYPE'. private Object objectManagingMyMemory; protected void setMemoryManager(Object r) { objectManagingMyMemory = r; } // %typemap(javacode) SWIGTYPE - End #ifdef JAVA_IS_15_OR_ABOVE private java.util.HashMap consumers = new java.util.HashMap(); #else private java.util.HashMap consumers = new java.util.HashMap(); #endif } %typemap(javacode,noblock=1) Arc::EntityRetriever { // Copied verbatim from '%typemape(javacode) SWIGTYPE'. private Object objectManagingMyMemory; protected void setMemoryManager(Object r) { objectManagingMyMemory = r; } // %typemap(javacode) SWIGTYPE - End #ifdef JAVA_IS_15_OR_ABOVE private java.util.HashMap consumers = new java.util.HashMap(); #else private java.util.HashMap consumers = new java.util.HashMap(); #endif } #endif %include "../src/hed/libs/compute/EntityRetriever.h" #ifdef SWIGPYTHON %template(EndpointConsumer) Arc::EntityConsumer; %template(ComputingServiceConsumer) Arc::EntityConsumer; %template(JobConsumer) Arc::EntityConsumer; #endif #if SWIG_VERSION <= 0x010331 && defined(SWIGJAVA) // Workaround for older swig versions wrt. the EntityConsumer interface. See Arc.i. %typemap(javaout) void Arc::EntityRetriever::addConsumer { NativeJobConsumer n = $module.makeNative(addConsumer_consumer); consumers.put(addConsumer_consumer, n); $jnicall; } %typemap(javaout) void Arc::EntityRetriever::removeConsumer { if (!consumers.containsKey(removeConsumer_consumer)) return; NativeJobConsumer n = (NativeJobConsumer)consumers.get(removeConsumer_consumer); $jnicall; consumers.remove(removeConsumer_consumer); } %typemap(javaout) void Arc::EntityRetriever::addConsumer { NativeComputingServiceTypeConsumer n = $module.makeNative(addConsumer_consumer); consumers.put(addConsumer_consumer, n); $jnicall; } %typemap(javaout) void Arc::EntityRetriever::removeConsumer { if (!consumers.containsKey(removeConsumer_consumer)) return; NativeComputingServiceTypeConsumer n = (NativeComputingServiceTypeConsumer)consumers.get(removeConsumer_consumer); $jnicall; consumers.remove(removeConsumer_consumer); } %typemap(javaout) void Arc::EntityRetriever::addConsumer { NativeEndpointConsumer n = $module.makeNative(addConsumer_consumer); consumers.put(addConsumer_consumer, n); $jnicall; } %typemap(javaout) void Arc::EntityRetriever::removeConsumer { if (!consumers.containsKey(removeConsumer_consumer)) return; NativeEndpointConsumer n = (NativeEndpointConsumer)consumers.get(removeConsumer_consumer); $jnicall; consumers.remove(removeConsumer_consumer); } #endif #ifdef SWIGJAVA %template(NativeEndpointConsumer) Arc::EntityConsumer; %template(NativeComputingServiceTypeConsumer) Arc::EntityConsumer; %template(NativeJobConsumer) Arc::EntityConsumer; %warnfilter(SWIGWARN_JAVA_MULTIPLE_INHERITANCE) Arc::EntityContainer; %warnfilter(SWIGWARN_JAVA_MULTIPLE_INHERITANCE) Arc::EntityContainer; %warnfilter(SWIGWARN_JAVA_MULTIPLE_INHERITANCE) Arc::EntityContainer; #endif %template(EndpointContainer) Arc::EntityContainer; %template(ServiceEndpointRetriever) Arc::EntityRetriever; %template(ComputingServiceContainer) Arc::EntityContainer; %template(TargetInformationRetriever) Arc::EntityRetriever; %template(JobContainer) Arc::EntityContainer; %template(JobListRetriever) Arc::EntityRetriever; // Wrap contents of $(top_srcdir)/src/hed/libs/compute/SubmitterPlugin.h %{ #include %} #if SWIG_VERSION <= 0x010331 && defined(SWIGJAVA) // Workaround for older swig versions wrt. the EntityConsumer interface. See Arc.i. %typemap(javaout) Arc::SubmissionStatus Arc::SubmitterPlugin::Submit { NativeJobConsumer n = $module.makeNative(jc); return new SubmissionStatus($jnicall, $owner); } #endif %ignore Arc::SubmitterPluginArgument::operator const UserConfig&; // works with swig 1.3.40, and higher... %ignore Arc::SubmitterPluginArgument::operator const Arc::UserConfig&; // works with swig 1.3.29 %wraplist(SubmitterPlugin, Arc::SubmitterPlugin*); %include "../src/hed/libs/compute/SubmitterPlugin.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/Submitter.h %{ #include %} #ifdef SWIGPYTHON #if SWIG_VERSION <= 0x010329 /* Swig version 1.3.29 cannot handle mapping a template of a "const *" type, so * adding a traits_from::from method taking a "const *" as taken from * swig-1.3.31 makes it possible to handle such types. */ %{ namespace swig { template struct traits_from { static PyObject *from(const Type* val) { return traits_from_ptr::from(const_cast(val), 0); } }; } %} #endif #endif #if SWIG_VERSION <= 0x010331 && defined(SWIGJAVA) // Workaround for older swig versions wrt. the EntityConsumer interface. See Arc.i. %typemap(javaout) void Arc::Submitter::addConsumer { NativeJobConsumer n = $module.makeNative(addConsumer_consumer); consumers.put(addConsumer_consumer, n); $jnicall; } %typemap(javaout) void Arc::Submitter::removeConsumer { if (!consumers.containsKey(removeConsumer_consumer)) return; NativeJobConsumer n = (NativeJobConsumer)consumers.get(removeConsumer_consumer); $jnicall; consumers.remove(removeConsumer_consumer); } #endif #ifdef SWIGJAVA %typemap(javacode,noblock=1) Arc::Submitter { // Copied verbatim from '%typemape(javacode) SWIGTYPE'. private Object objectManagingMyMemory; protected void setMemoryManager(Object r) { objectManagingMyMemory = r; } // %typemap(javacode) SWIGTYPE - End #ifdef JAVA_IS_15_OR_ABOVE private java.util.HashMap consumers = new java.util.HashMap(); #else private java.util.HashMap consumers = new java.util.HashMap(); #endif } #endif %template(EndpointQueryingStatusMap) std::map; %template(EndpointSubmissionStatusMap) std::map; %include "../src/hed/libs/compute/Submitter.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/ComputingServiceRetriever.h %{ #include %} #if SWIG_VERSION <= 0x010331 && defined(SWIGJAVA) // Workaround for older swig versions wrt. the EntityConsumer interface. See Arc.i. %typemap(javaout) void Arc::ComputingServiceRetriever::addConsumer { NativeComputingServiceTypeConsumer n = $module.makeNative(addConsumer_consumer); consumers.put(addConsumer_consumer, n); $jnicall; } %typemap(javaout) void Arc::ComputingServiceRetriever::removeConsumer { if (!consumers.containsKey(removeConsumer_consumer)) return; NativeComputingServiceTypeConsumer n = (NativeComputingServiceTypeConsumer)consumers.get(removeConsumer_consumer); $jnicall; consumers.remove(removeConsumer_consumer); } #endif #ifdef SWIGJAVA %typemap(javacode,noblock=1) Arc::ComputingServiceRetriever { // Copied verbatim from '%typemape(javacode) SWIGTYPE'. private Object objectManagingMyMemory; protected void setMemoryManager(Object r) { objectManagingMyMemory = r; } // %typemap(javacode) SWIGTYPE - End #ifdef JAVA_IS_15_OR_ABOVE private java.util.HashMap consumers = new java.util.HashMap(); #else private java.util.HashMap consumers = new java.util.HashMap(); #endif } %rename(_wait) Arc::ComputingServiceRetriever::wait; %warnfilter(SWIGWARN_JAVA_MULTIPLE_INHERITANCE) Arc::ComputingServiceRetriever; %typemap(javainterfaces) Arc::ComputingServiceRetriever "EndpointConsumer"; #endif #ifdef SWIGPYTHON %extend Arc::ComputingServiceRetriever { const std::list& getResults() { return *self; } %insert("python") %{ def __iter__(self): return self.getResults().__iter__() %} } /* This typemap tells SWIG that we don't want to use the * 'std::list& etList' argument from the target language, * but we need a temporary variable for internal use, and we want this * argument to point to this temporary variable */ %typemap(in, numinputs=0) std::list& etList (std::list temp) { $1 = &temp; } /* This typemap tells SWIG what to with the * 'std::list& etList' argument after the method finished. * This typemap applies to * 'void GetExecutionTargets(std::list& etList)' and since it * does not return anything we simply return the argument list. */ %typemap(argout) std::list& etList { PyObject *o = PyList_New(0); for (std::list::iterator it = (*$1).begin(); it != (*$1).end(); ++it) { PyList_Append(o, SWIG_NewPointerObj(new Arc::ExecutionTarget(*it), SWIGTYPE_p_Arc__ExecutionTarget, SWIG_POINTER_OWN | 0 )); } $result = o; } /* applies to: * void GetExecutionTargets(std::list& etList) */ #endif %include "../src/hed/libs/compute/ComputingServiceRetriever.h" #ifdef SWIGPYTHON %clear std::list& etList; #endif // Wrap contents of $(top_srcdir)/src/hed/libs/compute/BrokerPlugin.h %{ #include %} %ignore Arc::BrokerPluginArgument::operator const UserConfig&; // works with swig 1.3.40, and higher... %ignore Arc::BrokerPluginArgument::operator const Arc::UserConfig&; // works with swig 1.3.29 #ifdef SWIGJAVA %rename(compare) Arc::BrokerPlugin::operator()(const ExecutionTarget&, const ExecutionTarget&) const; #endif %include "../src/hed/libs/compute/BrokerPlugin.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/Broker.h %{ #include %} %ignore Arc::Broker::operator=(const Broker&); #ifdef SWIGJAVA %rename(compare) Arc::Broker::operator()(const ExecutionTarget&, const ExecutionTarget&) const; #endif %include "../src/hed/libs/compute/Broker.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/JobSupervisor.h %feature("director") Arc::JobSelector; %{ #include %} %include "../src/hed/libs/compute/JobSupervisor.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/TestACCControl.h %{ #include %} #ifdef SWIGPYTHON %warnfilter(SWIGWARN_TYPEMAP_SWIGTYPELEAK) Arc::SubmitterPluginTestACCControl::submitJob; %warnfilter(SWIGWARN_TYPEMAP_SWIGTYPELEAK) Arc::SubmitterPluginTestACCControl::migrateJob; %rename(_BrokerPluginTestACCControl) Arc::BrokerPluginTestACCControl; %rename(_JobDescriptionParserPluginTestACCControl) Arc::JobDescriptionParserPluginTestACCControl; %rename(_JobControllerPluginTestACCControl) Arc::JobControllerPluginTestACCControl; %rename(_SubmitterPluginTestACCControl) Arc::SubmitterPluginTestACCControl; %rename(_ServiceEndpointRetrieverPluginTESTControl) Arc::ServiceEndpointRetrieverPluginTESTControl; %rename(_TargetInformationRetrieverPluginTESTControl) Arc::TargetInformationRetrieverPluginTESTControl; #endif %include "../src/hed/libs/compute/TestACCControl.h" %template(EndpointListList) std::list< std::list >; %template(EndpointQueryingStatusList) std::list; #ifdef SWIGPYTHON %pythoncode %{ BrokerPluginTestACCControl = StaticPropertyWrapper(_BrokerPluginTestACCControl) JobDescriptionParserPluginTestACCControl = StaticPropertyWrapper(_JobDescriptionParserPluginTestACCControl) JobControllerPluginTestACCControl = StaticPropertyWrapper(_JobControllerPluginTestACCControl) SubmitterPluginTestACCControl = StaticPropertyWrapper(_SubmitterPluginTestACCControl) ServiceEndpointRetrieverPluginTESTControl = StaticPropertyWrapper(_ServiceEndpointRetrieverPluginTESTControl) TargetInformationRetrieverPluginTESTControl = StaticPropertyWrapper(_TargetInformationRetrieverPluginTESTControl) %} #endif // Wrap contents of $(top_srcdir)/src/hed/libs/compute/JobInformationStorage.h %{ #include %} %include "../src/hed/libs/compute/JobInformationStorage.h" // Wrap contents of $(top_srcdir)/src/hed/libs/compute/JobInformationStorageXML.h %{ #include %} %include "../src/hed/libs/compute/JobInformationStorageXML.h" #ifdef DBJSTORE_ENABLED // Wrap contents of $(top_srcdir)/src/hed/libs/compute/JobInformationStorageBDB.h %{ #include %} %include "../src/hed/libs/compute/JobInformationStorageBDB.h" #endif // DBJSTORE_ENABLED nordugrid-arc-5.4.2/swig/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315736020217 xustar000000000000000030 mtime=1513200606.404091987 30 atime=1513200652.651657613 30 ctime=1513200665.009808758 nordugrid-arc-5.4.2/swig/Makefile.in0000644000175000002070000003745313214315736020301 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = swig DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ EXTRA_DIST = Arc.i common.i loader.i message.i communication.i compute.i credential.i data.i delegation.i security.i all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign swig/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign swig/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/swig/PaxHeaders.7502/loader.i0000644000000000000000000000012312065074022017563 xustar000000000000000027 mtime=1356101650.095815 26 atime=1513200576.84173 30 ctime=1513200665.013808807 nordugrid-arc-5.4.2/swig/loader.i0000644000175000002070000000323312065074022017632 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module loader %include "Arc.i" %import "../src/hed/libs/common/XMLNode.h" %import "../src/hed/libs/common/Logger.h" #endif // Wrap contents of $(top_srcdir)/src/hed/libs/loader/ModuleManager.h /* The 'operator Glib::Module*' method cannot be wrapped. If it is * needed in the bindings, it should be renamed. */ %ignore Arc::LoadableModuleDescription::operator Glib::Module*; %ignore Arc::LoadableModuleDescription::operator!; %ignore Arc::LoadableModuleDescription::operator=(Glib::Module*); %ignore Arc::LoadableModuleDescription::operator=(const LoadableModuleDescription&); %{ #include %} %include "../src/hed/libs/loader/ModuleManager.h" // Wrap contents of $(top_srcdir)/src/hed/libs/loader/Plugin.h /* Suppress warnings about potential possibility of memory leak, when * using Arc::plugins_table_name, and * Arc::PluginDescriptor::{name,kind,description} */ %warnfilter(SWIGWARN_TYPEMAP_CHARLEAK) Arc::plugins_table_name; %warnfilter(SWIGWARN_TYPEMAP_CHARLEAK) Arc::PluginDescriptor; %{ #include %} #ifdef SWIGJAVA /* Make garbage collector destroy all plugin objects _before_ destroying loader */ %typemap(javaout) Arc::Plugin * get_instance { long cPtr = $jnicall; $javaclassname ret = null; if (cPtr != 0) { ret = new $javaclassname(cPtr, $owner); ret.setMemoryManager(this); } return ret; } #endif %include "../src/hed/libs/loader/Plugin.h" %wraplist(ModuleDesc, Arc::ModuleDesc); %wraplist(PluginDesc, Arc::PluginDesc); // Wrap contents of $(top_srcdir)/src/hed/libs/loader/Loader.h %{ #include %} %include "../src/hed/libs/loader/Loader.h" nordugrid-arc-5.4.2/swig/PaxHeaders.7502/common.i0000644000000000000000000000012312675602216017616 xustar000000000000000027 mtime=1459029134.924374 26 atime=1513200576.84173 30 ctime=1513200665.011808782 nordugrid-arc-5.4.2/swig/common.i0000644000175000002070000002250412675602216017667 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module common %include "Arc.i" #endif /* Python: Avoid creating a new SWIG types for each module, for types that * are general for different modules. E.g. StringPair - put it in common, and * use the one from common in the other modules. */ %template(StringPair) std::pair; %wraplist(String, std::string) %template(StringSet) std::set; #ifdef SWIGJAVA %template(StringSetIterator) setiterator; #endif %template(StringVector) std::vector; %template(StringStringMap) std::map; #ifndef SWIGIMPORTED // Wrap contents of $(top_srcdir)/src/hed/libs/common/XMLNode.h %{ #include %} %ignore Arc::MatchXMLName; %ignore Arc::MatchXMLNamespace; %ignore Arc::XMLNode::operator!; %ignore Arc::XMLNode::operator[](const char *) const; %ignore Arc::XMLNode::operator[](const std::string&) const; %ignore Arc::XMLNode::operator[](int) const; %ignore Arc::XMLNode::operator=(const char *); %ignore Arc::XMLNode::operator=(const std::string&); %ignore Arc::XMLNode::operator=(const XMLNode&); %ignore Arc::XMLNode::operator++(); %ignore Arc::XMLNode::operator--(); %ignore Arc::XMLNodeContainer::operator[](int); %ignore Arc::XMLNodeContainer::operator=(const XMLNodeContainer&); %ignore operator<<(std::ostream&, const XMLNode&); %ignore operator>>(std::istream&, XMLNode&); #ifdef SWIGPYTHON %include %apply std::string& OUTPUT { std::string& out_xml_str }; #endif #ifdef SWIGJAVA %ignore Arc::XMLNode::XMLNode(const char*); %ignore Arc::XMLNode::XMLNode(const char*, int); %ignore Arc::XMLNode::NewChild(const std::string&); %ignore Arc::XMLNode::NewChild(const std::string&, int); %ignore Arc::XMLNode::NewChild(const std::string&, NS const&, int); %ignore Arc::XMLNode::NewChild(const std::string&, NS const&); %ignore Arc::XMLNode::Name(const char*); %ignore Arc::XMLNode::Attribute(const char*); %ignore Arc::XMLNode::NewAttribute(const char*); %ignore Arc::XMLNode::NewChild(const char*, int, bool); %ignore Arc::XMLNode::NewChild(const char*, const NS&, int, bool); %ignore Arc::XMLNode::operator==(const char*); // Arc::XMLNode::operator==(const std::string&) is wrapped instead which is equivalent to this. #endif %include "../src/hed/libs/common/XMLNode.h" %wraplist(XMLNode, Arc::XMLNode); #ifdef SWIGPYTHON %clear std::string& out_xml_str; #endif // Wrap contents of $(top_srcdir)/src/hed/libs/common/ArcConfig.h %rename(_print) Arc::Config::print; #ifdef SWIGJAVA %ignore Arc::Config::Config(const char*); #endif %{ #include %} %include "../src/hed/libs/common/ArcConfig.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/ArcLocation.h %{ #include %} %include "../src/hed/libs/common/ArcLocation.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/ArcVersion.h %{ #include %} %include "../src/hed/libs/common/ArcVersion.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/IString.h %{ #include %} %ignore Arc::IString::operator=(const IString&); %ignore operator<<(std::ostream&, const IString&); %include "../src/hed/libs/common/IString.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/Logger.h %{ #include %} %rename(LogStream_ostream) Arc::LogStream; %ignore Arc::LogFile::operator!; %ignore operator<<(std::ostream&, const LoggerFormat&); %ignore operator<<(std::ostream&, LogLevel); %ignore operator<<(std::ostream&, const LogMessage&); #ifdef SWIGPYTHON // Suppress warnings about unknown classes std::streambuf and std::ostream %warnfilter(SWIGWARN_TYPE_UNDEFINED_CLASS) CPyOutbuf; %warnfilter(SWIGWARN_TYPE_UNDEFINED_CLASS) CPyOstream; // code from: http://www.nabble.com/Using-std%3A%3Aistream-from-Python-ts7920114.html#a7923253 %inline %{ class CPyOutbuf : public std::streambuf { public: CPyOutbuf(PyObject* obj) : m_PyObj(obj) { Py_INCREF(m_PyObj); } ~CPyOutbuf() { Py_DECREF(m_PyObj); } protected: int_type overflow(int_type c) { // Call to PyGILState_Ensure ensures there is Python // thread state created/assigned. PyGILState_STATE gstate = PyGILState_Ensure(); PyObject_CallMethod(m_PyObj, (char*) "write", (char*) "c", c); PyGILState_Release(gstate); return c; } std::streamsize xsputn(const char* s, std::streamsize count) { // Call to PyGILState_Ensure ensures there is Python // thread state created/assigned. PyGILState_STATE gstate = PyGILState_Ensure(); PyObject_CallMethod(m_PyObj, (char*) "write", (char*) "s#", s, int(count)); PyGILState_Release(gstate); return count; } PyObject* m_PyObj; }; class CPyOstream : public std::ostream { public: CPyOstream(PyObject* obj) : std::ostream(&m_Buf), m_Buf(obj) {} private: CPyOutbuf m_Buf; }; %} %pythoncode %{ def LogStream(file): os = CPyOstream(file) os.thisown = False ls = LogStream_ostream(os) ls.thisown = False return ls %} #endif #ifdef SWIGJAVA /* The static logger object will always exist, so do not set any references. See * comments in Arc.i. */ %typemap(javaout) Arc::Logger& Arc::Logger::getRootLogger() { return new $javaclassname($jnicall, $owner); } #endif %include "../src/hed/libs/common/Logger.h" %wraplist(LogDestination, Arc::LogDestination*); // Wrap contents of $(top_srcdir)/src/hed/libs/common/DateTime.h %{ #include %} %ignore Arc::Time::UNDEFINED; %ignore Arc::Time::operator=(time_t); %ignore Arc::Time::operator=(const Time&); %ignore Arc::Time::operator=(const char*); %ignore Arc::Time::operator=(const std::string&); %ignore operator<<(std::ostream&, const Time&);; %ignore Arc::Period::operator=(time_t); %ignore Arc::Period::operator=(const Period&); %ignore operator<<(std::ostream&, const Period&); #ifdef SWIGJAVA %rename(add) Arc::Time::operator+(const Period&) const; %rename(sub) Arc::Time::operator-(const Period&) const; %rename(sub) Arc::Time::operator-(const Time&) const; #endif %include "../src/hed/libs/common/DateTime.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/URL.h %{ #include %} %ignore Arc::URL::operator!; %ignore Arc::PathIterator::operator++(); %ignore Arc::PathIterator::operator--(); %ignore operator<<(std::ostream&, const URL&); %include "../src/hed/libs/common/URL.h" %wraplist(URL, Arc::URL); %template(URLVector) std::vector; %template(URLListMap) std::map< std::string, std::list >; %wraplist(URLLocation, Arc::URLLocation); // Wrap contents of $(top_srcdir)/src/hed/libs/common/Utils.h %ignore Arc::AutoPointer::operator!; %warnfilter(SWIGWARN_PARSE_NAMED_NESTED_CLASS) Arc::CountedPointer::Base; %ignore Arc::CountedPointer::operator!; %ignore Arc::CountedPointer::operator=(T*); %ignore Arc::CountedPointer::operator=(const CountedPointer&); // Ignoring functions from Utils.h since swig thinks they are methods of the CountedPointer class, and thus compilation fails. %ignore Arc::GetEnv; %ignore Arc::SetEnv; %ignore Arc::UnsetEnv; %ignore Arc::EnvLockAcquire; %ignore Arc::EnvLockRelease; %ignore Arc::EnvLockWrap; %ignore Arc::EnvLockUnwrap; %ignore Arc::EnvLockUnwrapComplete; %ignore Arc::EnvLockWrapper; %ignore Arc::InterruptGuard; %ignore Arc::StrError; %ignore PersistentLibraryInit; /* Swig tries to create functions which return a new CountedPointer object. * Those functions takes no arguments, and since there is no default * constructor for the CountedPointer compilation fails. * Adding a "constructor" (used as a function in the cpp file) which * returns a new CountedPointer object with newed T object created * Ts default constructor. Thus if T has no default constructor, * another work around is needed in order to map that CountedPointer * wrapped class with swig. */ %extend Arc::CountedPointer { CountedPointer() { return new Arc::CountedPointer(new T());} } %{ #include %} %include "../src/hed/libs/common/Utils.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/User.h %{ #include %} %ignore Arc::User::operator!; %include "../src/hed/libs/common/User.h" // Wrap contents of $(top_srcdir)/src/hed/libs/UserConfig.h> %rename(toValue) Arc::initializeCredentialsType::operator initializeType; // works with swig 1.3.40, and higher... %rename(toValue) Arc::initializeCredentialsType::operator Arc::initializeCredentialsType::initializeType; // works with swig 1.3.29 %ignore Arc::UserConfig::operator!; %ignore Arc::ConfigEndpoint::operator!; %{ #include %} %wraplist(ConfigEndpoint, Arc::ConfigEndpoint); %include "../src/hed/libs/common/UserConfig.h" %template(ConfigEndpointMap) std::map; // Wrap contents of $(top_srcdir)/src/hed/libs/common/GUID.h %{ #include %} %include "../src/hed/libs/common/GUID.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/CheckSum.h %{ #include %} %include "../src/hed/libs/common/CheckSum.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/JobPerfLog.h %{ #include %} %include "../src/hed/libs/common/JobPerfLog.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/Thread.h %{ #include %} %ignore Arc::ThreadId; #ifdef SWIGJAVA %rename(_wait) Arc::SimpleCondition::wait; %rename(_wait) Arc::SimpleCounter::wait; %rename(_wait) Arc::ThreadedPointerBase::wait; #endif %include "../src/hed/libs/common/Thread.h" %template(SimpleConditionList) std::list; #endif // SWIGIMPORTED nordugrid-arc-5.4.2/swig/PaxHeaders.7502/message.i0000644000000000000000000000012312106711376017747 xustar000000000000000027 mtime=1360761598.532184 26 atime=1513200576.85373 30 ctime=1513200665.014808819 nordugrid-arc-5.4.2/swig/message.i0000644000175000002070000001326312106711376020022 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module message %include "Arc.i" // (module="..") is needed for inheritance from those classes to work in python %import(module="common") "../src/hed/libs/common/XMLNode.h" %import(module="common") "../src/hed/libs/common/ArcConfig.h" %import(module="loader") "../src/hed/libs/loader/Plugin.h" #endif // Wrap contents of $(top_srcdir)/src/hed/libs/message/MCC_Status.h %{ #include %} %ignore Arc::MCC_Status::operator!; %include "../src/hed/libs/message/MCC_Status.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/MessageAttributes.h %rename(next) Arc::AttributeIterator::operator++; #ifdef SWIGPYTHON %pythonappend Arc::MessageAttributes::getAll %{ d = dict() while val.hasMore(): d[val.key()] = val.__ref__() val.next() return d %} #endif %{ #include %} %include "../src/hed/libs/message/MessageAttributes.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/SecAttr.h %{ #include %} %ignore Arc::SecAttrFormat::operator=(SecAttrFormat); %ignore Arc::SecAttrFormat::operator=(const char*); #ifdef SWIGPYTHON %include %apply std::string& OUTPUT { std::string &val }; #endif #ifdef SWIGJAVA %ignore Arc::SecAttr::equal; #endif %include "../src/hed/libs/message/SecAttr.h" #ifdef SWIGPYTHON %clear std::string &val; #endif // Wrap contents of $(top_srcdir)/src/hed/libs/message/MessageAuth.h %{ #include %} %ignore Arc::MessageAuth::operator[](const std::string&); #ifdef SWIGPYTHON %pythonprepend Arc::MessageAuth::Export %{ x = XMLNode("") args = args[:-1] + (args[-1].fget(), x) %} %pythonappend Arc::MessageAuth::Export %{ return x %} #endif %include "../src/hed/libs/message/MessageAuth.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/Message.h %{ #include %} %ignore Arc::MessageContext::operator[](const std::string&); %ignore Arc::Message::operator=(Message&); %include "../src/hed/libs/message/Message.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/MCC.h /* The 'operator Config*' and 'operator ChainContext*' methods cannot be * wrapped. If they are needed in the bindings, they should be renamed. */ %ignore Arc::MCCPluginArgument::operator Config*; // works with swig 1.3.40, and higher... %ignore Arc::MCCPluginArgument::operator Arc::Config*; // works with swig 1.3.29 %ignore Arc::MCCPluginArgument::operator ChainContext*; // works with swig 1.3.40, and higher... %ignore Arc::MCCPluginArgument::operator Arc::ChainContext*; // works with swig 1.3.29 %{ #include %} %include "../src/hed/libs/message/MCC.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/PayloadRaw.h %{ #include %} %ignore Arc::PayloadRawInterface::operator[](Size_t) const; %ignore Arc::PayloadRaw::operator[](Size_t) const; %include "../src/hed/libs/message/PayloadRaw.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/SOAPEnvelope.h %{ #include %} %ignore Arc::SOAPEnvelope::operator=(const SOAPEnvelope&); /* The 'operator XMLNode' method cannot be wrapped. If it is needed in * the bindings, it should be renamed. */ %ignore Arc::SOAPFault::operator XMLNode; // works with swig 1.3.40, and higher... %ignore Arc::SOAPFault::operator Arc::XMLNode; // works with swig 1.3.29 #ifdef SWIGPYTHON %apply std::string& OUTPUT { std::string& out_xml_str }; #endif #ifdef SWIGJAVA %ignore Arc::SOAPFault::Reason(const std::string&); // Reason(const char*) is wrapped instead which is equivalent to this one. %ignore Arc::SOAPEnvelope::SOAPEnvelope(const char*); // SOAPEnvelope(const std::string&) is wrapped instead which is equivalent to this one. %ignore Arc::SOAPEnvelope::SOAPEnvelope(const char*, int); // SOAPEnvelope(const std::string& xml) is wrapped instead which is equivalent to this one. #endif %include "../src/hed/libs/message/SOAPEnvelope.h" #ifdef SWIGPYTHON %clear std::string& out_xml_str; #endif // Wrap contents of $(top_srcdir)/src/hed/libs/message/SOAPMessage.h %{ #include %} %include "../src/hed/libs/message/SOAPMessage.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/PayloadSOAP.h #ifdef SWIGJAVA /* Multiple inheritance is not supported in Java, so the PayloadSOAP * class might not be fully functional. If needed further investigations * must be done. Suppress warning for now. */ %warnfilter(SWIGWARN_JAVA_MULTIPLE_INHERITANCE) Arc::PayloadSOAP; #endif %{ #include %} %include "../src/hed/libs/message/PayloadSOAP.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/PayloadStream.h %{ #include %} %ignore Arc::PayloadStreamInterface::operator!; %ignore Arc::PayloadStream::operator!; #ifdef SWIGJAVA %ignore Arc::PayloadStreamInterface::Put(const char*); // Put(const std::string&) is wrapped instead which is equivalent to this one. #endif %include "../src/hed/libs/message/PayloadStream.h" // Wrap contents of $(top_srcdir)/src/hed/libs/message/Service.h %{ #include %} %ignore Arc::Service::operator!; /* The 'operator Config*' and 'operator ChainContext*' methods cannot be * wrapped. If they are needed in the bindings, they should be renamed. */ %ignore Arc::ServicePluginArgument::operator Config*; // works with swig 1.3.40, and higher... %ignore Arc::ServicePluginArgument::operator Arc::Config*; // works with swig 1.3.29 %ignore Arc::ServicePluginArgument::operator ChainContext*; // works with swig 1.3.40, and higher... %ignore Arc::ServicePluginArgument::operator Arc::ChainContext*; // works with swig 1.3.29 %include "../src/hed/libs/message/Service.h" nordugrid-arc-5.4.2/swig/PaxHeaders.7502/delegation.i0000644000000000000000000000012212044731630020431 xustar000000000000000027 mtime=1351857048.487543 26 atime=1513200576.85073 29 ctime=1513200665.01980888 nordugrid-arc-5.4.2/swig/delegation.i0000644000175000002070000000423512044731630020504 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module delegation %include "Arc.i" %import "../src/hed/libs/common/XMLNode.h" %import "../src/hed/libs/message/SOAPEnvelope.h" %import "../src/hed/libs/message/MCC.h" %import "../src/hed/libs/message/Message.h" %import "../src/hed/libs/message/MessageAttributes.h" #endif // Wrap contents of $(top_srcdir)/src/hed/libs/delegation/DelegationInterface.h %{ #include %} %ignore Arc::DelegationConsumer::operator!; %ignore Arc::DelegationProvider::operator!; #ifdef SWIGPYTHON %ignore Arc::DelegationConsumer::Acquire(std::string&, std::string&); %ignore Arc::DelegationConsumerSOAP::UpdateCredentials(std::string&, std::string&, const SOAPEnvelope&, SOAPEnvelope&); %ignore Arc::DelegationConsumerSOAP::DelegatedToken(std::string&, std::string&, XMLNode); %ignore Arc::DelegationContainerSOAP::DelegatedToken(std::string&, std::string&, XMLNode); %ignore Arc::DelegationContainerSOAP::DelegatedToken(std::string&, std::string&, XMLNode, std::string const&); %ignore Arc::DelegationContainerSOAP::Process(std::string&, const SOAPEnvelope&, SOAPEnvelope&); %ignore Arc::DelegationContainerSOAP::Process(std::string&, const SOAPEnvelope&, SOAPEnvelope&, const std::string&); %apply std::string& TUPLEOUTPUTSTRING { std::string& credentials }; /* Applies to: * bool DelegationConsumerSOAP::UpdateCredentials(std::string& credentials, const SOAPEnvelope& in, SOAPEnvelope& out); * bool DelegationConsumerSOAP::DelegatedToken(std::string& credentials, XMLNode token); * bool DelegationContainerSOAP::DelegatedToken(std::string& credentials, XMLNode token, const std::string& client = ""); * bool DelegationContainerSOAP::Process(std::string& credentials, const SOAPEnvelope& in, SOAPEnvelope& out, const std::string& client = ""); */ %apply std::string& TUPLEOUTPUTSTRING { std::string& content }; /* Applies to: * bool DelegationConsumer::Backup(std::string& content); * bool DelegationConsumer::Request(std::string& content); * bool DelegationConsumer::Acquire(std::string& content); */ #endif %include "../src/hed/libs/delegation/DelegationInterface.h" #ifdef SWIGPYTHON %clear std::string& credentials; %clear std::string& content; #endif nordugrid-arc-5.4.2/swig/PaxHeaders.7502/credential.i0000644000000000000000000000012312315137015020427 xustar000000000000000027 mtime=1395965453.851694 26 atime=1513200576.85073 30 ctime=1513200665.017808855 nordugrid-arc-5.4.2/swig/credential.i0000644000175000002070000000405612315137015020502 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module credential %include "Arc.i" // Import common module in order to access common template types. %import(module="common") "common.i" %import "../src/hed/libs/common/UserConfig.h" %import "../src/hed/libs/common/DateTime.h" %import "../src/hed/libs/common/URL.h" %import "../src/hed/libs/common/Logger.h" #endif /* STACK_OF, which is used in 'Credential.h' and 'VOMSUtil.h' is not * known to swig. Providing definition below. If definition is not * provided swig encounters a syntax error. */ #ifdef STACK_OF #undef STACK_OF #endif #define STACK_OF( A ) void // Wrap contents of $(top_srcdir)/src/hed/libs/crypto/OpenSSL.h %{ #include %} %include "../src/hed/libs/crypto/OpenSSL.h" // Wrap contents of $(top_srcdir)/src/hed/libs/credential/PasswordSource.h %{ #include %} %include "../src/hed/libs/credential/PasswordSource.h" // Wrap contents of $(top_srcdir)/src/hed/libs/credential/Credential.h #ifdef SWIGJAVA // Suppress warning about unknown class std::runtime_error %warnfilter(SWIGWARN_TYPE_UNDEFINED_CLASS) Arc::CredentialError; #endif %{ #include %} %include "../src/hed/libs/credential/Credential.h" // Wrap contents of $(top_srcdir)/src/hed/libs/common/ArcRegex.h %{ #include %} %ignore Arc::RegularExpression::operator=(const RegularExpression&); %include "../src/hed/libs/common/ArcRegex.h" // Wrap contents of $(top_srcdir)/src/hed/libs/credential/VOMSUtil.h #ifdef SWIGPYTHON /* 'from' is a python keyword, renaming to _from. */ %rename(_from) Arc::VOMSACInfo::from; #endif %{ #include %} %include "../src/hed/libs/credential/VOMSUtil.h" %template(VOMSACInfoVector) std::vector; %template(StringVectorVector) std::vector< std::vector >; // Wrap contents of $(top_srcdir)/src/hed/libs/credentialstore/CredentialStore.h %{ #include %} %ignore Arc::CredentialStore::operator!; %include "../src/hed/libs/credentialstore/CredentialStore.h" nordugrid-arc-5.4.2/swig/PaxHeaders.7502/data.i0000644000000000000000000000012212117633662017236 xustar000000000000000027 mtime=1363097522.461274 26 atime=1513200576.85073 29 ctime=1513200665.01980888 nordugrid-arc-5.4.2/swig/data.i0000644000175000002070000002100512117633662017303 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module data %include "Arc.i" %import "../src/hed/libs/common/CheckSum.h" %import "../src/hed/libs/common/DateTime.h" %import "../src/hed/libs/common/URL.h" %import "../src/hed/libs/common/UserConfig.h" #endif // Wrap contents of $(top_srcdir)/src/hed/libs/data/DataStatus.h %{ #include %} %ignore Arc::DataStatus::operator!; %ignore Arc::DataStatus::operator=(const DataStatusType&); %ignore operator<<(std::ostream&, const DataStatus&); %include "../src/hed/libs/data/DataStatus.h" // Wrap contents of $(top_srcdir)/src/hed/libs/data/FileInfo.h %{ #include %} %ignore Arc::FileInfo::operator!; %include "../src/hed/libs/data/FileInfo.h" // Wrap contents of $(top_srcdir)/src/hed/libs/data/URLMap.h %{ #include %} %ignore Arc::URLMap::operator!; %include "../src/hed/libs/data/URLMap.h" // Wrap contents of $(top_srcdir)/src/hed/libs/data/DataPoint.h %{ #include %} %ignore Arc::DataPoint::operator!; %ignore Arc::DataPointPluginArgument::operator const URL&; // works with swig 1.3.40, and higher... %ignore Arc::DataPointPluginArgument::operator const Arc::URL&; // works with swig 1.3.29 %ignore Arc::DataPointPluginArgument::operator const UserConfig&; // works with swig 1.3.40, and higher... %ignore Arc::DataPointPluginArgument::operator const Arc::UserConfig&; // works with swig 1.3.29 #ifdef SWIGPYTHON /* This typemap tells SWIG that we don't want to use the * 'std::list& files' argument from the target language, but * we need a temporary variable for internal use, and we want this * argument to point to this temporary variable */ %typemap(in, numinputs=0) std::list& files (std::list temp) { $1 = &temp; } /* This typemap tells SWIG what we want to do with the * 'std::list & files' argument after the method finished: * first we create a python list from the std::list (it should be a * better way to do this...) then we want to return a tuple with two * members, the first member will be the newly created list, and the * second member is the original return value, the DataStatus. */ %typemap(argout) std::list& files { PyObject *o = PyList_New(0); std::list::iterator it; for (it = (*$1).begin(); it != (*$1).end(); ++it) { PyList_Append(o, SWIG_NewPointerObj(new Arc::FileInfo(*it), SWIGTYPE_p_Arc__FileInfo, SWIG_POINTER_OWN | 0 )); } $result = PyTuple_Pack(2, o, $result); } /* applies to: * virtual DataStatus DataPoint::Stat(std::list& files, const std::list& urls, DataPointInfoType verb = INFO_TYPE_ALL) * virtual DataStatus DataPoint::List(std::list& files, DataPointInfoType verb = INFO_TYPE_ALL) */ #endif %include "../src/hed/libs/data/DataPoint.h" #ifdef SWIGPYTHON %clear std::list& files; #endif %wraplist(FileInfo, Arc::FileInfo); %wraplist(DataPoint, Arc::DataPoint*); // Wrap contents of $(top_srcdir)/src/hed/libs/data/DataHandle.h %{ #include %} %ignore Arc::DataHandle::operator!; %ignore Arc::DataHandle::operator->; #ifdef SWIGJAVA %ignore Arc::DataHandle::operator*; #endif %include "../src/hed/libs/data/DataHandle.h" // Wrap contents of $(top_srcdir)/src/hed/libs/data/DataSpeed.h %{ #include %} %include "../src/hed/libs/data/DataSpeed.h" // Wrap contents of $(top_srcdir)/src/hed/libs/data/DataBuffer.h %{ #include %} %ignore Arc::DataBuffer::operator[](int); #ifdef SWIGPYTHON %{ namespace Arc { typedef struct { bool result; int handle; unsigned int size; unsigned long long int offset; char* buffer; } DataBufferForWriteResult; typedef struct { bool result; int handle; unsigned int size; } DataBufferForReadResult; } // namespace Arc %} %typemap(out) Arc::DataBufferForWriteResult { $result = PyTuple_New(5); PyTuple_SetItem($result,0,PyInt_FromLong($1.result)); PyTuple_SetItem($result,1,PyInt_FromLong($1.handle)); PyTuple_SetItem($result,2,PyInt_FromLong($1.size)); PyTuple_SetItem($result,3,PyInt_FromLong($1.offset)); %#if PY_VERSION_HEX>=0x03000000 PyTuple_SetItem($result,4,$1.buffer?PyUnicode_FromStringAndSize($1.buffer,$1.size):Py_None); %#else PyTuple_SetItem($result,4,$1.buffer?PyString_FromStringAndSize($1.buffer,$1.size):Py_None); %#endif } %typemap(out) Arc::DataBufferForReadResult { $result = PyTuple_Pack(3, PyInt_FromLong($1.result), PyInt_FromLong($1.handle), PyInt_FromLong($1.size)); } %typemap(in) (char* DataBufferIsReadBuf, unsigned int DataBufferIsReadSize) { %#if PY_VERSION_HEX>=0x03000000 $input = PyUnicode_AsUTF8String($input); $1 = PyBytes_AsString($input); $2 = ($1)?PyBytes_Size($input):0; %#else $1 = PyString_AsString($input); $2 = ($1)?PyString_Size($input):0; %#endif } %extend Arc::DataBuffer { Arc::DataBufferForWriteResult for_write(bool wait) { Arc::DataBufferForWriteResult r; r.result = self->for_write(r.handle, r.size, r.offset, wait); r.buffer = r.result?(self->operator[](r.handle)):NULL; return r; } Arc::DataBufferForReadResult for_read(bool wait) { Arc::DataBufferForReadResult r; r.result = self->for_read(r.handle, r.size, wait); return r; } bool is_read(int handle, char* DataBufferIsReadBuf, unsigned int DataBufferIsReadSize, unsigned long long int offset) { char* buf = self->operator[](handle); if(!buf) return false; if(DataBufferIsReadSize > self->buffer_size()) return false; memcpy(buf, DataBufferIsReadBuf, DataBufferIsReadSize); return self->is_read(handle, DataBufferIsReadSize, offset); } }; %ignore Arc::DataBuffer::for_write(int&,unsigned int&,unsigned long long int&,bool); %ignore Arc::DataBuffer::for_read(int&,unsigned int&,bool); %ignore Arc::DataBuffer::is_read(int,unsigned int,unsigned long long int); %ignore Arc::DataBuffer::is_read(char*,unsigned int,unsigned long long int); %ignore Arc::DataBuffer::is_written(char*); %ignore Arc::DataBuffer::is_notwritten(char*); #endif %include "../src/hed/libs/data/DataBuffer.h" // Wrap contents of $(top_srcdir)/src/hed/libs/data/FileCache.h %{ #ifdef WIN32 #ifndef uid_t #define uid_t int #endif #ifndef gid_t #define gid_t int #endif #endif %} typedef int uid_t; typedef int gid_t; %{ #include %} %include "../src/hed/libs/data/FileCache.h" // Wrap contents of $(top_srcdir)/src/hed/libs/data/DataMover.h %{ #include %} %include "../src/hed/libs/data/DataMover.h" // Wrap contents of $(top_srcdir)/src/hed/libs/data-staging/DTRStatus.h %{ #include %} %ignore DataStaging::DTRStatus::operator=(const DTRStatusType&); %ignore DataStaging::DTRErrorStatus::operator=(const DTRErrorStatusType&); %include "../src/libs/data-staging/DTRStatus.h" %template(DTRStatusTypeVector) std::vector; // Wrap contents of $(top_srcdir)/src/hed/libs/data-staging/DTR.h %{ #include %} %ignore DataStaging::DTR::operator!; %include "../src/libs/data-staging/DTR.h" %wraplist(DTRCallback, DataStaging::DTRCallback*); // Wrap contents of $(top_srcdir)/src/hed/libs/data-staging/TransferShares.h %{ #include %} %include "../src/libs/data-staging/TransferShares.h" %template(StringIntMap) std::map; // Wrap contents of $(top_srcdir)/src/hed/libs/data-staging/Scheduler.h #ifdef SWIGJAVA // Enable a Java class to inherit from Scheduler (and DTRCallback) so that // its receiveDTR() is called. DTRCallback cannot be derived from directly // because it is pure virtual and swig does not create a constructor. %feature("director") Scheduler; #endif %{ #include %} %include "../src/libs/data-staging/Scheduler.h" %ignore Arc::ThreadedPointer::operator=(const ThreadedPointer&); %ignore Arc::ThreadedPointer::operator=(T*); %ignore Arc::ThreadedPointer::operator!; %import "../src/hed/libs/common/Thread.h" %ignore Arc::ThreadedPointer::operator bool; // Clash between "operator bool" in DTR and ThreadedPointer (smart pointer wrapping). %template(DTRPointer) Arc::ThreadedPointer; %template(DTRLogger) Arc::ThreadedPointer; #ifdef SWIGPYTHON %pythoncode %{ import arc def datapoint_from_url(url_string, usercfg=arc.UserConfig()): url = arc.URL(url_string) data_handle = DataHandle(url, usercfg) data_point = data_handle.__ref__() url.thisown = False data_handle.thisown = False return data_point %} #endif nordugrid-arc-5.4.2/swig/PaxHeaders.7502/communication.i0000644000000000000000000000012312106711376021170 xustar000000000000000027 mtime=1360761598.532184 26 atime=1513200576.85473 30 ctime=1513200665.015808831 nordugrid-arc-5.4.2/swig/communication.i0000644000175000002070000000524112106711376021240 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module communication %include "Arc.i" // (module="..") is needed for inheritance from those classes to work in python %import "../src/hed/libs/common/ArcConfig.h" %import "../src/hed/libs/common/DateTime.h" %import "../src/hed/libs/common/URL.h" %import(module="common") "../src/hed/libs/common/XMLNode.h" %import "../src/hed/libs/message/Message.h" %import "../src/hed/libs/message/PayloadRaw.h" %import "../src/hed/libs/message/PayloadSOAP.h" %import "../src/hed/libs/message/PayloadStream.h" %import "../src/hed/libs/message/MCC_Status.h" #endif // Wrap contents of $(top_srcdir)/src/hed/libs/compute/ClientInterface.h %{ #include %} #ifdef SWIGPYTHON /* These typemaps tells SWIG that we don't want to use the * 'PayloadSOAP ** response' argument from the target language, but we * need a temporary PayloadSOAP for internal use, and we want this * argument to point to this temporary 'PayloadSOAP *' variable (in). * Then after the method finished: we want to return a python tuple (a * list) with two members, the first member will be the '*response' and * the second member is the original return value, the MCC_Status * (argout). */ %typemap(in, numinputs=0) Arc::PayloadSOAP ** response (Arc::PayloadSOAP *temp) { $1 = &temp; } %typemap(argout) Arc::PayloadSOAP ** response { $result = PyTuple_Pack(2, SWIG_NewPointerObj(*$1, SWIGTYPE_p_Arc__PayloadSOAP, SWIG_POINTER_OWN | 0 ), $result); } /* applies to: * MCC_Status ClientSOAP::process(PayloadSOAP *request, PayloadSOAP **response); * MCC_Status ClientSOAP::process(const std::string& action, PayloadSOAP *request, PayloadSOAP **response); */ #endif %include "../src/hed/libs/communication/ClientInterface.h" #ifdef SWIGPYTHON %clear Arc::PayloadSOAP ** response; #endif // Wrap contents of $(top_srcdir)/src/hed/libs/compute/ClientX509Delegation.h %{ #include %} #ifdef SWIGPYTHON %apply std::string& TUPLEOUTPUTSTRING { std::string& delegation_id }; /* Currently applies to: * bool ClientX509Delegation::createDelegation(DelegationType deleg, std::string& delegation_id); * bool ClientX509Delegation::acquireDelegation(DelegationType deleg, std::string& delegation_cred, std::string& delegation_id, const std::string cred_identity = "", const std::string cred_delegator_ip = "", const std::string username = "", const std::string password = ""); * Look in Arc.i for a description of the OUTPUT typemap. */ #endif %include "../src/hed/libs/communication/ClientX509Delegation.h" #ifdef SWIGPYTHON %clear std::string& delegation_id; #endif nordugrid-arc-5.4.2/swig/PaxHeaders.7502/security.i0000644000000000000000000000012312044731630020166 xustar000000000000000027 mtime=1351857048.487543 26 atime=1513200576.84173 30 ctime=1513200665.020808892 nordugrid-arc-5.4.2/swig/security.i0000644000175000002070000000562712044731630020246 0ustar00mockbuildmock00000000000000#ifdef SWIGPYTHON %module security %include "Arc.i" #endif /** * Note that the order of the "%include" statements are important! If a * "%include" depends on other "%include"s, it should be placed after these * "%include" dependencies. */ %{ #include #include #include #include #include #include #include %} %ignore ArcSec::ResponseList::operator[](int); namespace ArcSec { %nodefaultctor Policy; class Policy {}; %nodefaultctor Request; class Request {}; typedef enum { DECISION_PERMIT = 0, DECISION_DENY = 1, DECISION_INDETERMINATE = 2, DECISION_NOT_APPLICABLE = 3 } Result; typedef struct { Result res; } ResponseItem; class ResponseList { public: int size() ; ResponseItem* getItem(int n); ResponseItem* operator[](int n); bool empty(); }; class Response { public: ResponseList& getResponseItems (); }; class Source { public: Source(const Source& s):node(s.node); Source(Arc::XMLNode& xml); Source(std::istream& stream); Source(Arc::URL& url); Source(const std::string& str); }; class SourceFile: public Source { public: SourceFile(const SourceFile& s):Source(s),stream(NULL); SourceFile(const char* name); SourceFile(const std::string& name); }; class SourceURL: public Source { public: SourceURL(const SourceURL& s):Source(s),url(NULL); SourceURL(const char* url); SourceURL(const std::string& url); }; %nodefaultctor Evaluator; %newobject Evaluator::evaluate; class Evaluator { public: void addPolicy(const Source& policy,const std::string& id = ""); %apply SWIGTYPE *DISOWN {Policy *policy}; void addPolicy(Policy* policy,const std::string& id = ""); %clear Policy *policy; Response* evaluate(Request* request); Response* evaluate(const Source& request); Response* evaluate(const Source& request, const Source& policy); Response* evaluate(const Source& request, Policy* policyobj); Response* evaluate(Request* request, Policy* policyobj); Response* evaluate(Request* request, const Source& policy); }; %newobject EvaluatorLoader::getEvaluator; %newobject EvaluatorLoader::getRequest; %newobject EvaluatorLoader::getPolicy; class EvaluatorLoader { public: EvaluatorLoader(); Evaluator* getEvaluator(const std::string& classname); Request* getRequest(const std::string& classname, const Source& requestsource); Policy* getPolicy(const std::string& classname, const Source& policysource); }; } //namespace ArcSec nordugrid-arc-5.4.2/swig/PaxHeaders.7502/README0000644000000000000000000000012311037472457017037 xustar000000000000000027 mtime=1216247087.350292 26 atime=1513200576.84173 30 ctime=1513200665.006808721 nordugrid-arc-5.4.2/swig/README0000644000175000002070000000007211037472457017104 0ustar00mockbuildmock00000000000000This directory collects the Swig wrapper defintion files. nordugrid-arc-5.4.2/PaxHeaders.7502/README0000644000000000000000000000012312442324223016052 xustar000000000000000027 mtime=1418307731.508846 26 atime=1513200576.83173 30 ctime=1513200658.579730115 nordugrid-arc-5.4.2/README0000644000175000002070000003456312442324223016133 0ustar00mockbuildmock00000000000000ARC Middleware ============== The Advanced Resource Connector (ARC) middleware, developed by the NorduGrid Collaboration (www.nordugrid.org), is an open source software solution enabling computing grid infrastructures with emphasis on processing of large data volumes. ARC is being used to enable national and international e-infrastructures since its first release in 2002. Dependencies ============ The core part of middleware is written in C/C++. Building the software from source or installing a pre-compiled binary requires different external packages, furthermore the client and server packages have different dependencies too. Below a list of the explicit requirements is shown: Mandatory dependencies ---------------------- Build: o GNU make, autotools (autoconf>=2.56) (automake>=1.8) o CVS o m4 o GNU gettext o C++ compiler and library o libtool o pkg-config o doxygen Build & runtime: o e2fsprogs o gthread-2.0 version 2.4.7 or later o glibmm-2.4 version 2.4.7 or later o libxml-2.0 version 2.4.0 or later o openssl version 0.9.7a or later If you are using LDAP based infosys: o open-ldap server o bdii version 5 or later o glue-schema Optional dependencies --------------------- Build: o CppUnit for unit testing o Grid Packaging Tools (GPT) (compute client) o swig version 1.3.28 or later (bindings) Build & runtime: o open-ldap client (LDAP DMC) o python 2.4 or higher (bindings, APEL publisher by Jura, ACIX) o java sdk 1.4 or later (bindings) o globus-common 4 (compute client) o globus-gssapi-gsi 4 (compute client) o globus-ftp-client 4 (compute client) o globus-ftp-control 4 (compute client) o globus-io 4 (compute client) o globus-openssl (compute client) o xrootd (xrootd DMC) o GFAL 2 (GFAL DMC) o Berkeley DB C++ interface (Delegation) o xmlsec1 1.2.4 or higher (Security) o NSS 3 (credential) Runtime dependencies: o Perl, libxml-simple-perl, perl-Digest-SHA1 (A-rex) o Perl, perl-SOAP-Lite, perl-Crypt-OpenSSL-X509 (nordugridmap) o perl-DBI (Boinc backend) o GNU time (A-rex) o VOMS o Stomppy, m2Crypto (APEL publisher by Jura) o pyOpenSSL, python-twisted-web, python-twisted-core, python-simplejson, (Python 2.4 only) python-hashlib (ACIX) Please note that depending on operating system distribution to build ARC you may need to install development versions of mentioned packages. Getting the software ==================== The middleware is free to deploy anywhere by anybody. Binary packages are available from Ubuntu, Fedora and EPEL repositories, as well as from NorduGrid's own repositories, as described at http://download.nordugrid.org The software code is released under the Apache 2.0 License (see the LICENSE file). It is available from the NorduGrid's Subversion repository, see http://svn.nordugrid.org for more details. The necessary 3rd party libraries are usually available in standard Linux distributions; in rare cases NorduGrid repositories offer them. There are also nightly code snapshots and package builds available at http://download.nordugrid.org/nightlies/packages/nordugrid-arc/trunk/ . Building & Installation ======================= The recommended way to install ARC is from repositories. If you want to build it yourself and downloaded the tarball, unpack it and cd into the created directory (below, replace 1.0.0 with the desired tag): tar -zxvf nordugrid-arc-1.0.0.tar.gz cd nordugrid-arc-1.0.0 If you obtained the code from the Subversion repository, use the 'tags/1.0.0' directory. svn co http://svn.nordugrid.org/repos/nordugrid/arc1/tags/1.0.0 nordugrid-arc cd nordugrid-arc Now configure the obtained code with ./autogen.sh ./configure --prefix=PLACE_TO_INSTALL_ARC Choose the installation prefix wisely and according to the requirements of your OS and personal preferences. ARC should function properly from any location. By default installation goes into /usr/local if you omit the '--prefix' option. If you install into another directory than /usr/local you may need to set up an environment variable after installation: export ARC_LOCATION=PLACE_TO_INSTALL_ARC On some systems 'autogen.sh' may produce few warnings. Ignore them as long as 'configure' passes without errors. But in case of problems during configure or compilation, collect them and present while reporting problems. If the previous commands finish without errors, do touch src/doxygen/*.pdf in order to get around an issue with timestamps and then compile and install ARC: make make install If you have already installed ARC libraries in the system default location such as /usr/lib you may need to use the following installation command instead in order to override installed pkgconfig files and/or libtool archives which contain -L/usr/lib: make LDFLAGS="-L/lib" install On some systems you may need to use gmake instead of make. Depending on the chosen installation location you may need to run the last command from a root account. That should install the following components: sbin/arched - server executable etc/init.d - daemon init scripts bin/ - user tools and command line clients lib/ - common libraries used by clients, server and plugins lib/arc/ - plugins implementing Message Chain, Service and Security components include/arc/ - C++ headers for application development libexec/ - additional modules used by ARC services - currently only A-REX share/arc - configuration examples, templates etc share/doc/nordugrid-arc-* - documentation share/locale - internationalization files - curently very limited support share/man - manual pages for various utilities X509 Certificates ================= All ARC services use HTTPS or GridFTP as transport protocol so they require proper setup of an X509 security infrastructure. Minimal requirements are: * Host certificate aka public key in PEM format * Corresponding private key * Certificate of the Certification Authority (CA) which was used to sign the host certificate * Certificates of CAs of clients which are going to send requests to services, unless of course clients use the same CA as the server. More information about X509 certificates and their usage in Grid environment can be found on http://www.nordugrid.org/documents/certificate_howto.html and http://www.nordugrid.org/documents/arc-server-install.html#security For testing purposes you can use pre-generated certificates and keys available at: http://svn.nordugrid.org/trac/nordugrid/browser/doc/trunk/tech_doc/sec/TestCA Alternatively, you may choose to use KnowARC Instant CA service available at https://arc-emi.grid.upjs.sk/instantCA/instantCA . It is especially useful when testing installations consisting of multiple hosts. Please remember that it is not safe to use such instant keys in publicly accessible installations of ARC. Make sure that even the generated CA certificate is removed before making your services available to the outside world. You can put host certificates and private keys anywhere. Common locations for servers running from root account are /etc/grid-security/hostcert.pem and /etc/grid-security/hostkey.pem, respectively. The content of the private key must not be encrypted nor protected by a password since a service has no way to ask a password. Therefore it must be properly protected by means of file system permissions (some services enforce that the private key is only readable by the user running the service). It is possible to configure the ARC server to accept either a single CA certificate or multiple CA certificates located in the specified directory. The latter option is recommended. The common location is /etc/grid-security/certificates/ . In that case the names of the certificate files have to follow the hash values of the certificates. These are obtainable by running the command openssl x509 -hash -noout -in path_to_certificate The corresponding file name for the certificate should be .0 . The value for the pre-generated CA certificate is 4457e417. For the ARC client tools you may use the pre-generated user certificate and key located at the same place above. Generally the key and certificate are not used directly but a passphraseless proxy certificate is generated and used instead. ARC comes with a proxy generation utility arcproxy - see 'man arcproxy' for usage and options. Locations of the credentials are provided to the client tools via the client configuration file. The set of pre-generated keys and certificates also includes a user certificate in PKCS12 format which you can import into your browser for accessing ARC services capable of producing HTML output. IMPORTANT: If during the configuration stage you see a message "OpenSSL contains no support for proxy credentials" that means you won't be able to use proxy credentials generated by utilities like grid-proxy-init, voms-proxy-init or arcproxy. Because of that all user private keys must be kept unencrypted. ARC Server Setup & Configuration ================================ The configuration of the ARC server is specified in a file which by default is at /etc/arc.conf. A different location can be specified by the ARC_CONFIG environment variable. For configuration details and examples please refer to the reference in share/arc/arc.conf.reference or the service manual of the particular services you wish to run. The A-REX Service ================= ARC comes with an OGSA BES compliant Grid job management service called A-REX. To deploy A-REX refer to "ARC Computing Element: System Administrator Guide" (NORDUGRID-MANUAL-20) which contains extensive information on set up and configuration of A-REX. Testing and Using A-REX (clients) ================================= Instructions below refer to the Web Service interface; similar tests can be done for the original GridFTP-based interface (gsiftp:// protocol). Now you may use the command line utility 'arcinfo' to obtain a service description. You can do something like arcinfo -c https://localhost:60000/arex -l This should produce a description list of the resources A-REX represents. Below you can see a truncated example of proper output. --- Cluster: localhost Health State: ok Location information: Domain information: Service information: Service Name: MINIMAL Computing Element Service Type: org.nordugrid.execution.arex Endpoint information: URL: https://localhost:60000/arex Capabilities: executionmanagement.jobexecution Technology: webservice Interface Name: OGSA-BES Supported Profiles: WS-I 1.0 HPC-BP Implementor: NorduGrid Implementation Name: A-REX Implementation Version: 0.9 QualityLevel: development Health State: ok Serving State: production Issuer CA: /O=Grid/O=NorduGrid/CN=NorduGrid Certification Authority Trusted CAs: /C=BE/O=BELNET/OU=BEGrid/CN=BEGrid CA/emailAddress=gridca@belnet.be /C=FR/O=CNRS/CN=CNRS2-Projets /DC=org/DC=ugrid/CN=UGRID CA [...] Staging: staginginout Job Descriptions: ogf:jsdl:1.0 Queue information: Mapping Queue: default Max Total Jobs: 100 Max Running Jobs: 10 Max Waiting Jobs: 99 Max Pre LRMS Waiting Jobs: 0 Max User Running Jobs: 5 Max Slots Per Job: 1 Doesn't Support Preemption Total Jobs: 0 Running Jobs: 0 Waiting Jobs: 0 Suspended Jobs: 0 Staging Jobs: 0 Pre-LRMS Waiting Jobs: 0 Free Slots: 10 Free Slots With Duration: P68Y1M5DT3H14M7S: 10 Used Slots: 0 Requested Slots: 0 Manager information: Resource Manager: torque Doesn't Support Advance Reservations Doesn't Support Bulk Submission Total Physical CPUs: 10 Total Logical CPUs: 10 Total Slots: 10 Non-homogeneous Resource Working area is nor shared among jobs Working Area Total Size: 15 Working Area Free Size: 4 Working Area Life Time: P7D Cache Area Total Size: 15 Cache Area Free Size: 4 Execution Environment information: Execution environment is a physical machine Execution environment does not support inbound connections Execution environment does not support outbound connections --- A-REX accepts jobs described in XRSL, which is described in "Extended Resource Specification Language: Reference Manual for ARC versions 0.8 and above" (NORDUGRID-MANUAL-4). To submit a job to the A-REX service one may use the 'arcsub' command: arcsub -c https://localhost:60000/arex -f simple.xrsl If everything goes fine, somewhere in its output there should be a message "Job submitted", and a job identifier is obtained which will be stored locally in a client job store. One can then query job state with the 'arcstat' utility: arcstat State: Running arcstat State: Finished For more information on these and other arc* job and data management commands please see the man pages of those utilities or "ARC Clients: User Manual for ARC 11.05 (client versions 1.0.0) and above" (NORDUGRID-MANUAL-13). Contributing ============ The open source development of the ARC middleware is coordinated by the NorduGrid Collaboration which is always open to new members. Contributions from the community to the software and the documentation is welcomed. Sources can be downloaded from the software repository at download.nordugrid.org or the Subversion code repository at svn.nordugrid.org. The technical coordination group defines outstanding issues that have to be addressed in the framework of ARC development. Feature requests and enhancement proposals are recorded in the Bugzilla bug tracking system at bugzilla.nordugrid.org. For a more detailed description, write access to the code repository and further questions, write to the nordugrid-discuss mailing list (see www.nordugrid.org for details). Ongoing and completed Grid research projects and student assignments related to the middleware are listed on the NorduGrid web site as well. Support, documentation, mailing lists, contact ============================================== User support and site installation assistance is provided via the request tracking system available at nordugrid-support@nordugrid.org. In addition, the NorduGrid runs several mailing lists, among which the nordugrid-discuss mailing list is a general forum for all kind of issues related to the ARC middleware. The Bugzilla problem tracking system (bugzilla.nordugrid.org) accepts requests for features or enhancements, and is the prime medium to track and report problems. Research papers, overview talks, reference manuals, user guides, installation instructions, conference presentations, FAQ and even tutorial materials can be fetched from the documentation section of www.nordugrid.org Contact information is kept updated on the www.nordugrid.org web site. nordugrid-arc-5.4.2/PaxHeaders.7502/python0000644000000000000000000000013213214316031016432 xustar000000000000000030 mtime=1513200665.132810262 30 atime=1513200668.723854182 30 ctime=1513200665.132810262 nordugrid-arc-5.4.2/python/0000755000175000002070000000000013214316031016555 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712242371471020560 xustar000000000000000027 mtime=1384772409.509397 30 atime=1513200592.403920759 30 ctime=1513200665.120810115 nordugrid-arc-5.4.2/python/Makefile.am0000644000175000002070000000044612242371471020626 0ustar00mockbuildmock00000000000000if ALTPYTHON_ENABLED ALTPYTHON_DIR = altpython else ALTPYTHON_DIR = endif DIST_SUBDIRS = python altpython test examples SUBDIRS = python $(ALTPYTHON_DIR) $(TEST_DIR) examples EXTRA_DIST = Doxyfile.api.in doxy2swig.py __init__.py common.mk \ swigmodulesinit_wrap.cpp arc_init.cpp nordugrid-arc-5.4.2/python/PaxHeaders.7502/arc_init.cpp0000644000000000000000000000012312134602144021006 xustar000000000000000027 mtime=1366492260.756585 26 atime=1513200576.86073 30 ctime=1513200665.127810201 nordugrid-arc-5.4.2/python/arc_init.cpp0000644000175000002070000000545212134602144021062 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include static bool __arc_init_try_path(const std::string& modulepath) { Glib::Module *module = NULL; #ifdef HAVE_GLIBMM_BIND_LOCAL module = new Glib::Module(modulepath,Glib::ModuleFlags(0)); #else module = new Glib::Module(modulepath); #endif if(module == NULL) return false; if(*module) return true; delete module; return false; } __attribute__((constructor)) void __arc_init(void) { /* char* Py_GetPath () Return the default module search path; this is computed from the program name (set by Py_SetProgramName() above) and some environment variables. The returned string consists of a series of directory names separated by a platform dependent delimiter character. The delimiter character is ":" on Unix, ";" on DOS/Windows, and "\n" (the ASCII newline character) on Macintosh. The returned string points into static storage; the caller should not modify its value. The value is available to Python code as the list sys.path, which may be modified to change the future search path for loaded modules. Note: it seems like Python is hiding site-packages part of path. Maybe it is hardcoded inside Python somewhere. But at least part till site-packages seems to be present. */ #if PY_MAJOR_VERSION >= 3 std::wstring pythonwpath = Py_GetPath(); std::string pythonpath(pythonwpath.begin(), pythonwpath.end()); #else std::string pythonpath = Py_GetPath(); #endif std::string::size_type start = 0; std::string::size_type end = pythonpath.find_first_of(";:\n"); if(end == std::string::npos) end=pythonpath.length(); for(;start/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/python/PaxHeaders.7502/test0000644000000000000000000000013213214316031017411 xustar000000000000000030 mtime=1513200665.287812158 30 atime=1513200668.723854182 30 ctime=1513200665.287812158 nordugrid-arc-5.4.2/python/test/0000755000175000002070000000000013214316031017534 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/ServiceEndpointRetrieverTest.py0000644000000000000000000000012312051450410025667 xustar000000000000000027 mtime=1353077000.196319 27 atime=1513200576.889731 29 ctime=1513200665.27981206 nordugrid-arc-5.4.2/python/test/ServiceEndpointRetrieverTest.py0000644000175000002070000004035612051450410025745 0ustar00mockbuildmock00000000000000import testutils, arc, unittest, time class ServiceEndpointRetrieverTest(testutils.ARCClientTestCase): def setUp(self): self.usercfg = arc.UserConfig(arc.initializeCredentialsType(arc.initializeCredentialsType.SkipCredentials)) arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.clear() arc.ServiceEndpointRetrieverPluginTESTControl.status.clear() arc.ServiceEndpointRetrieverPluginTESTControl.condition.clear() def tearDown(self): try: self.condition.signal() del self.condition except AttributeError: pass try: self.retriever.wait() del self.retriever except AttributeError: pass def test_the_class_exists(self): self.expect(arc.ServiceEndpointRetriever).to_be_an_instance_of(type) def test_the_constructor(self): self.retriever = arc.ServiceEndpointRetriever(self.usercfg) self.expect(self.retriever).to_be_an_instance_of(arc.ServiceEndpointRetriever) def test_getting_the_endpoints(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([arc.Endpoint()]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) self.expect(container).to_be_empty() registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.wait() self.expect(container).to_have(1).endpoint() def test_getting_status(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([arc.Endpoint()]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.FAILED)) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.wait() status = self.retriever.getStatusOfEndpoint(registry) self.expect(status).to_be_an_instance_of(arc.EndpointQueryingStatus) self.expect(status).to_be(arc.EndpointQueryingStatus.FAILED) def test_the_status_is_started_first(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([arc.Endpoint()]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) self.condition = arc.SimpleCondition() arc.ServiceEndpointRetrieverPluginTESTControl.condition.push_back(self.condition) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) status = self.retriever.getStatusOfEndpoint(registry) self.expect(status).to_be(arc.EndpointQueryingStatus.STARTED) self.condition.signal() self.retriever.wait() status = self.retriever.getStatusOfEndpoint(registry) self.expect(status).to_be(arc.EndpointQueryingStatus.SUCCESSFUL) def test_constructor_returns_immediately(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([arc.Endpoint()]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) self.condition = arc.SimpleCondition() arc.ServiceEndpointRetrieverPluginTESTControl.condition.push_back(self.condition) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) # the endpoint should not arrive yet self.expect(container).to_have(0).endpoints() self.condition.signal() # we are not interested in it anymore self.retriever.removeConsumer(container) # we must wait until self.retriever is done otherwise 'condition' will go out of scope while being used. self.retriever.wait() def test_same_endpoint_is_not_queried_twice(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([arc.Endpoint()]) arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([arc.Endpoint()]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.addEndpoint(registry) self.retriever.wait() self.expect(container).to_have(1).endpoint() def test_filtering(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([ arc.Endpoint("test1.nordugrid.org", ["cap1","cap2"]), arc.Endpoint("test2.nordugrid.org", ["cap3","cap4"]), arc.Endpoint("test3.nordugrid.org", ["cap1","cap3"]) ]) arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([ arc.Endpoint("test1.nordugrid.org", ["cap1","cap2"]), arc.Endpoint("test2.nordugrid.org", ["cap3","cap4"]), arc.Endpoint("test3.nordugrid.org", ["cap1","cap3"]) ]) arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([ arc.Endpoint("test1.nordugrid.org", ["cap1","cap2"]), arc.Endpoint("test2.nordugrid.org", ["cap3","cap4"]), arc.Endpoint("test3.nordugrid.org", ["cap1","cap3"]) ]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") options = arc.ServiceEndpointQueryOptions(False, ["cap1"]) self.retriever = arc.ServiceEndpointRetriever(self.usercfg, options) container = arc.EndpointContainer() self.retriever.addConsumer(container) self.retriever.addEndpoint(registry) self.retriever.wait() self.expect(container).to_have(2).endpoints() options = arc.ServiceEndpointQueryOptions(False, ["cap2"]) self.retriever = arc.ServiceEndpointRetriever(self.usercfg, options) container = arc.EndpointContainer() self.retriever.addConsumer(container) self.retriever.addEndpoint(registry) self.retriever.wait() self.expect(container).to_have(1).endpoint() options = arc.ServiceEndpointQueryOptions(False, ["cap5"]) self.retriever = arc.ServiceEndpointRetriever(self.usercfg, options) container = arc.EndpointContainer() self.retriever.addConsumer(container) self.retriever.addEndpoint(registry) self.retriever.wait() self.expect(container).to_have(0).endpoints() def test_recursivity(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([ arc.Endpoint("emir.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest"), arc.Endpoint("ce.nordugrid.org", arc.Endpoint.COMPUTINGINFO, "org.ogf.glue.emies.resourceinfo"), ]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([ arc.Endpoint("emir.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest"), arc.Endpoint("ce.nordugrid.org", arc.Endpoint.COMPUTINGINFO, "org.ogf.glue.emies.resourceinfo"), ]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) options = arc.ServiceEndpointQueryOptions(True) self.retriever = arc.ServiceEndpointRetriever(self.usercfg, options) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.wait() # expect to get both service endpoints twice # once from test.nordugrid.org, once from emir.nordugrid.org self.expect(container).to_have(4).endpoints() emirs = [endpoint for endpoint in container if "emir" in endpoint.URLString] ces = [endpoint for endpoint in container if "ce" in endpoint.URLString] self.expect(emirs).to_have(2).endpoints() self.expect(ces).to_have(2).endpoints() def test_recursivity_with_filtering(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([ arc.Endpoint("emir.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest"), arc.Endpoint("ce.nordugrid.org", arc.Endpoint.COMPUTINGINFO, "org.ogf.glue.emies.resourceinfo"), ]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([ arc.Endpoint("emir.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest"), arc.Endpoint("ce.nordugrid.org", arc.Endpoint.COMPUTINGINFO, "org.ogf.glue.emies.resourceinfo"), ]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) options = arc.ServiceEndpointQueryOptions(True, ["information.discovery.resource"]) self.retriever = arc.ServiceEndpointRetriever(self.usercfg, options) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.wait() # expect to only get the ce.nordugrid.org, but that will be there twice # once from test.nordugrid.org, once from emir.nordugrid.org self.expect(container).to_have(2).endpoints() emirs = [endpoint for endpoint in container if "emir" in endpoint.URLString] ces = [endpoint for endpoint in container if "ce" in endpoint.URLString] self.expect(emirs).to_have(0).endpoints() self.expect(ces).to_have(2).endpoints() def test_rejected_services(self): rejected = "http://test.nordugrid.org" not_rejected = "http://test2.nordugrid.org" arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([ arc.Endpoint(rejected), arc.Endpoint(not_rejected) ]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) options = arc.ServiceEndpointQueryOptions(False, [], [rejected]) self.retriever = arc.ServiceEndpointRetriever(self.usercfg, options) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("registry.nordugrid.org", arc.Endpoint.REGISTRY) self.retriever.addEndpoint(registry) self.retriever.wait() self.expect(container).to_have(1).endpoint() self.expect(container[0].URLString).to_be(not_rejected) def test_empty_registry_type(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back(arc.EndpointList()) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY) self.retriever.addEndpoint(registry) self.retriever.wait() # it should fill the empty type with the available plugins: # among them the TEST plugin which doesn't return any endpoint self.expect(container).to_have(0).endpoint() def test_status_of_typeless_registry(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back(arc.EndpointList()) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY) self.retriever.addEndpoint(registry) self.retriever.wait() status = self.retriever.getStatusOfEndpoint(registry) self.expect(status).to_be(arc.EndpointQueryingStatus.SUCCESSFUL) def test_deleting_the_consumer_before_the_retriever(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back(arc.EndpointList()) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.removeConsumer(container) del container self.retriever.wait() # expect it not to crash def test_works_without_consumer(self): self.retriever = arc.ServiceEndpointRetriever(self.usercfg) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.wait() # expect it not to crash def test_removing_consumer(self): self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container = arc.EndpointContainer() self.retriever.addConsumer(container) self.retriever.removeConsumer(container) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.wait() self.expect(container).to_have(0).endpoints() def test_two_consumers(self): arc.ServiceEndpointRetrieverPluginTESTControl.endpoints.push_back([arc.Endpoint()]) arc.ServiceEndpointRetrieverPluginTESTControl.status.push_back(arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)) self.retriever = arc.ServiceEndpointRetriever(self.usercfg) container1 = arc.EndpointContainer() container2 = arc.EndpointContainer() self.retriever.addConsumer(container1) self.retriever.addConsumer(container2) registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY, "org.nordugrid.sertest") self.retriever.addEndpoint(registry) self.retriever.wait() self.expect(container1).to_have(1).endpoint() self.expect(container2).to_have(1).endpoint() if __name__ == '__main__': unittest.main() nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712240743250021533 xustar000000000000000027 mtime=1384367784.271138 30 atime=1513200592.774925297 30 ctime=1513200665.269811938 nordugrid-arc-5.4.2/python/test/Makefile.am0000644000175000002070000000032212240743250021572 0ustar00mockbuildmock00000000000000if ALTPYTHON_ENABLED ALTPYTHON_DIR = altpython else ALTPYTHON_DIR = endif DIST_SUBDIRS = python altpython SUBDIRS = python $(ALTPYTHON_DIR) include tests.mk EXTRA_DIST = $(TESTSCRIPTS) $(AUXFILES) common.mk nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/Makefile.in0000644000000000000000000000013113214315720021536 xustar000000000000000030 mtime=1513200592.806925688 30 atime=1513200652.825659741 29 ctime=1513200665.27081195 nordugrid-arc-5.4.2/python/test/Makefile.in0000644000175000002070000005715513214315720021622 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/tests.mk subdir = python/test ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @ALTPYTHON_ENABLED_FALSE@ALTPYTHON_DIR = @ALTPYTHON_ENABLED_TRUE@ALTPYTHON_DIR = altpython DIST_SUBDIRS = python altpython SUBDIRS = python $(ALTPYTHON_DIR) @PYTHON_SWIG_ENABLED_TRUE@TESTSCRIPTS = \ @PYTHON_SWIG_ENABLED_TRUE@ DeletingSwigIteratorObtainedFromPublicListRegTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ UsingTheNextMethodOnASwigIteratorRegTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ JobSupervisorTest.py EndpointContainerTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ EndpointTest.py ServiceEndpointRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ TargetInformationRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ ComputingServiceRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ MappingOf_time_t_and_uint32_t_CTypesToPythonRegressionTest.py AUXFILES = testutils.py EXTRA_DIST = $(TESTSCRIPTS) $(AUXFILES) common.mk all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/tests.mk $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign python/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign python/test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/DeletingSwigIteratorObtainedFromPublicListRegTest.py0000644000000000000000000000012411710546233031733 xustar000000000000000027 mtime=1327680667.638419 27 atime=1513200576.896731 30 ctime=1513200665.273811986 nordugrid-arc-5.4.2/python/test/DeletingSwigIteratorObtainedFromPublicListRegTest.py0000644000175000002070000000037311710546233032003 0ustar00mockbuildmock00000000000000''' This regression test tests whether deleting a Swig iterator obtained from a ARC C++ public member std::list object generates a segmentation fault. That issue was reported in bug 2473. ''' import arc j = arc.Job() for i in j.Error: continue nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/testutils.py0000644000000000000000000000012412540540276022114 xustar000000000000000027 mtime=1434632382.644008 27 atime=1513200576.891731 30 ctime=1513200665.283812109 nordugrid-arc-5.4.2/python/test/testutils.py0000644000175000002070000001024712540540276022165 0ustar00mockbuildmock00000000000000import unittest, arc class ExpectationalTestCase(unittest.TestCase): class Expectation(object): def __init__(self, actual, testcase): self.actual = actual self.testcase = testcase self.number = None def to_be(self, expected, message = None): if message is None: message = "%s was expected to be %s" % (self.actual, expected) self.testcase.assertEqual(self.actual, expected, message) def not_to_be(self, expected, message = None): if message is None: message = "%s was expected not to be %s" % (self.actual, expected) self.testcase.assertNotEqual(self.actual, expected, message) def to_be_empty(self): self.testcase.assertEqual(len(self.actual), 0, "%s was expected to be empty" % (self.actual,)) def not_to_be_empty(self): self.testcase.assertNotEqual(len(self.actual), 0, "%s was expected not to be empty" % (self.actual,)) def to_be_an_instance_of(self, class_): self.testcase.assertTrue(isinstance(self.actual, class_), "%s was expected to be an instance of %s" % (self.actual, class_)) def to_not_throw(self, exception = Exception): try: self.actual except exception: exc = sys.exc_info()[1] self.testcase.fail("%s was expected not to raise an exception, but it did: %s" % (self.actual, exc)) def to_contain(self, *items): for item in items: self.testcase.assertTrue(item in self.actual, "%s was expected to contain %s" % (self.actual, item)) def to_have(self, number): self.number = number return self def _test_having(self): self.testcase.assertEqual(len(self.actual), self.number, "%s was expected to have %s %s, but it has %s instead" % (self.actual, self.number, self.item_name, len(self.actual))) def __getattr__(self, name): if self.number is not None: self.item_name = name return self._test_having else: raise AttributeError def expect(self, actual): return self.Expectation(actual, self) def with_logging(function): def logging_func(self): import sys arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stdout)) arc.Logger.getRootLogger().setThreshold(arc.DEBUG) result = function(self) arc.Logger.getRootLogger().removeDestinations() return result return logging_func class ARCClientTestCase(ExpectationalTestCase): def create_test_target(self, url = "http://test.nordugrid.org"): cs = arc.ComputingServiceType() cs.ComputingEndpoint[0] = arc.ComputingEndpointType() cs.ComputingEndpoint[0].URLString = url cs.ComputingEndpoint[0].InterfaceName = "org.nordugrid.test" cs.ComputingEndpoint[0].HealthState = "ok" cs.ComputingEndpoint[0].Capability.append(arc.Endpoint_GetStringForCapability(arc.Endpoint.JOBCREATION)) cs.ComputingShare[0] = arc.ComputingShareType() cs.ComputingManager[0] = arc.ComputingManagerType() cs.ComputingManager[0].ExecutionEnvironment[0] = arc.ExecutionEnvironmentType() return cs def create_test_job(self, job_id = "http://test.nordugrid.org/testid", cluster = "http://test.nordugrid.org", state = arc.JobState.RUNNING, state_text = None, job_description = "non-empty"): job = arc.Job() job.JobID = job_id job.ServiceInformationInterfaceName = job.JobStatusInterfaceName = job.JobManagementInterfaceName = "org.nordugrid.test" job.ServiceInformationURL = job.JobStatusURL = job.JobManagementURL = arc.URL(cluster) if state_text is None: job.State = arc.JobStateTEST(state) else: job.State = arc.JobStateTEST(state, state_text) job.JobDescriptionDocument = job_description return job nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/common.mk0000644000000000000000000000012712265607317021332 xustar000000000000000027 mtime=1389825743.144177 30 atime=1513200592.822925884 30 ctime=1513200665.284812121 nordugrid-arc-5.4.2/python/test/common.mk0000644000175000002070000000203512265607317021374 0ustar00mockbuildmock00000000000000if MACOSX DYLD_LIBRARY_PATH = "$(top_builddir)/src/hed/libs/credentialstore/.libs:$(top_builddir)/src/hed/libs/communication/.libs:$(top_builddir)/src/hed/libs/compute/.libs:$(top_builddir)/src/hed/libs/data/.libs:$(top_builddir)/src/hed/libs/security/.libs:$(top_builddir)/src/hed/libs/credential/.libs:$(top_builddir)/src/hed/libs/crypto/.libs:$(top_builddir)/src/hed/libs/message/.libs:$(top_builddir)/src/hed/libs/loader/.libs:$(top_builddir)/src/hed/libs/common/.libs:$(top_builddir)/src/libs/data-staging/.libs" else DYLD_LIBRARY_PATH = endif include $(srcdir)/../tests.mk $(TESTSCRIPTS) testutils.py: $(addprefix ../, $(TESTSCRIPTS) testutils.py) $(TESTSCRIPTS) testutils.py: %: testutils.py cp -p $(srcdir)/../$* $@ CLEANFILES = $(TESTSCRIPTS) testutils.py* clean-local: -rm -rf __pycache__ if PYTHON_ENABLED TESTS_ENVIRONMENT = \ ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs \ DYLD_LIBRARY_PATH="$(DYLD_LIBRARY_PATH)" \ PYTHONPATH=$(PYTHONPATH) $(PYTHON) TESTS = $(TESTSCRIPTS) else TESTS = endif check_SCRIPTS = $(TESTS) nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/UsingTheNextMethodOnASwigIteratorRegTest.py0000644000000000000000000000012411710546563030045 xustar000000000000000027 mtime=1327680883.537473 27 atime=1513200576.896731 30 ctime=1513200665.274811999 nordugrid-arc-5.4.2/python/test/UsingTheNextMethodOnASwigIteratorRegTest.py0000644000175000002070000000045711710546563030120 0ustar00mockbuildmock00000000000000''' This regression test tests whether invoking the next method on a Swig iterator obtained from a std::list of ARC C++ objects generates a segmentation fault. That issue was reported in bug 2683. ''' import arc jobs = arc.JobList() jobs.push_back(arc.Job()) itJobs = jobs.__iter__() itJobs.next() nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/JobSupervisorTest.py0000644000000000000000000000012412051675267023536 xustar000000000000000027 mtime=1353153207.099019 27 atime=1513200576.894731 30 ctime=1513200665.275812011 nordugrid-arc-5.4.2/python/test/JobSupervisorTest.py0000644000175000002070000001665412051675267023617 0ustar00mockbuildmock00000000000000import testutils, arc, unittest class JobSupervisorTest(testutils.ARCClientTestCase): def setUp(self): self.usercfg = arc.UserConfig(arc.initializeCredentialsType(arc.initializeCredentialsType.SkipCredentials)) def test_constructor(self): id1 = "http://test.nordugrid.org/1234567890test1" id2 = "http://test.nordugrid.org/1234567890test2" js = arc.JobSupervisor(self.usercfg, [ self.create_test_job(job_id = id1), self.create_test_job(job_id = id2) ]); self.expect(js.GetAllJobs()).not_to_be_empty() jobs = js.GetAllJobs() self.expect(jobs).to_have(2).jobs() self.expect(jobs[0].JobID).to_be(id1) self.expect(jobs[1].JobID).to_be(id2) def test_add_job(self): js = arc.JobSupervisor(self.usercfg, arc.JobList()) self.expect(js.GetAllJobs()).to_be_empty() job = self.create_test_job(job_id = "http://test.nordugrid.org/1234567890test1") self.expect(js.AddJob(job)).to_be(True, message = "AddJob was expected to return True") self.expect(js.GetAllJobs()).not_to_be_empty() job.JobManagementInterfaceName = "" self.expect(js.AddJob(job)).to_be(False, message = "AddJob was expected to return False") self.expect(js.GetAllJobs()).to_have(1).job() job.JobManagementInterfaceName = "non.existent.interface" self.expect(js.AddJob(job)).to_be(False, message = "AddJob was expected to return False") self.expect(js.GetAllJobs()).to_have(1).job() def test_resubmit(self): self.usercfg.Broker("TEST") arc.TargetInformationRetrieverPluginTESTControl.targets = [self.create_test_target("http://test2.nordugrid.org")] arc.TargetInformationRetrieverPluginTESTControl.status = arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL) js = arc.JobSupervisor(self.usercfg, [ self.create_test_job(job_id = "http://test.nordugrid.org/1234567890test1", state = arc.JobState.FAILED), self.create_test_job(job_id = "http://test.nordugrid.org/1234567890test2", state = arc.JobState.RUNNING) ]) self.expect(js.GetAllJobs()).to_have(2).jobs() endpoints = [arc.Endpoint("http://test2.nordugrid.org", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.tirtest")] resubmitted = arc.JobList() result = js.Resubmit(0, endpoints, resubmitted) # TODO: When using the wrapped arc.TargetInformationRetrieverPluginTESTControl.targets static variable, the bindings sometimes segfaults. # Particular when accessing member of the arc.TargetInformationRetrieverPluginTESTControl.targets[].ComputingManager map, e.g. arc.TargetInformationRetrieverPluginTESTControl.targets[].ComputingManager["some-key"] #self.expect(result).to_be(True) #self.expect(resubmitted).to_have(2).jobs() def test_cancel(self): id1 = "http://test.nordugrid.org/1234567890test1" id2 = "http://test.nordugrid.org/1234567890test2" id3 = "http://test.nordugrid.org/1234567890test3" id4 = "http://test.nordugrid.org/1234567890test4" js = arc.JobSupervisor(self.usercfg, [ self.create_test_job(job_id = id1, state = arc.JobState.RUNNING), self.create_test_job(job_id = id2, state = arc.JobState.FINISHED), self.create_test_job(job_id = id3, state = arc.JobState.UNDEFINED) ]) arc.JobControllerPluginTestACCControl.cancelStatus = True self.expect(js.Cancel()).to_be(True, message = "Cancel was expected to return True") self.expect(js.GetIDsProcessed()).to_have(1).ID() self.expect(js.GetIDsProcessed()[0]).to_be(id1) self.expect(js.GetIDsNotProcessed()).to_have(2).IDs() self.expect(js.GetIDsNotProcessed()[0]).to_be(id2) self.expect(js.GetIDsNotProcessed()[1]).to_be(id3) js.ClearSelection() arc.JobControllerPluginTestACCControl.cancelStatus = False self.expect(js.Cancel()).to_be(False, message = "Cancel was expected to return False") self.expect(js.GetIDsProcessed()).to_have(0).IDs() self.expect(js.GetIDsNotProcessed()).to_have(3).IDs() self.expect(js.GetIDsNotProcessed()[0]).to_be(id1) self.expect(js.GetIDsNotProcessed()[1]).to_be(id2) self.expect(js.GetIDsNotProcessed()[2]).to_be(id3) js.ClearSelection() job = self.create_test_job(job_id = id4, state = arc.JobState.ACCEPTED, state_text = "Accepted") self.expect(js.AddJob(job)).to_be(True, message = "AddJob was expected to return True") arc.JobControllerPluginTestACCControl.cancelStatus = True js.SelectByStatus(["Accepted"]) self.expect(js.Cancel()).to_be(True, message = "Cancel was expected to return False") self.expect(js.GetIDsProcessed()).to_have(1).ID() self.expect(js.GetIDsProcessed()[0]).to_be(id4) self.expect(js.GetIDsNotProcessed()).to_have(0).IDs() js.ClearSelection() arc.JobControllerPluginTestACCControl.cancelStatus = False js.SelectByStatus(["Accepted"]) self.expect(js.Cancel()).to_be(False, message = "Cancel was expected to return False") self.expect(js.GetIDsProcessed()).to_have(0).IDs() self.expect(js.GetIDsNotProcessed()).to_have(1).ID() self.expect(js.GetIDsNotProcessed()[0]).to_be(id4) js.ClearSelection() def test_clean(self): id1 = "http://test.nordugrid.org/1234567890test1" id2 = "http://test.nordugrid.org/1234567890test2" js = arc.JobSupervisor(self.usercfg, [ self.create_test_job(job_id = id1, state = arc.JobState.FINISHED, state_text = "Finished"), self.create_test_job(job_id = id2, state = arc.JobState.UNDEFINED) ]) self.expect(js.GetAllJobs()).to_have(2).jobs() arc.JobControllerPluginTestACCControl.cleanStatus = True self.expect(js.Clean()).to_be(True, message = "Clean was expected to return True") self.expect(js.GetIDsProcessed()).to_have(1).ID() self.expect(js.GetIDsProcessed()[0]).to_be(id1) self.expect(js.GetIDsNotProcessed()).to_have(1).ID() self.expect(js.GetIDsNotProcessed()[0]).to_be(id2) js.ClearSelection() arc.JobControllerPluginTestACCControl.cleanStatus = False self.expect(js.Clean()).to_be(False, message = "Clean was expected to return False") self.expect(js.GetIDsProcessed()).to_have(0).IDs() self.expect(js.GetIDsNotProcessed()).to_have(2).IDs() self.expect(js.GetIDsNotProcessed()[0]).to_be(id1) self.expect(js.GetIDsNotProcessed()[1]).to_be(id2) js.ClearSelection() arc.JobControllerPluginTestACCControl.cleanStatus = True js.SelectByStatus(["Finished"]) self.expect(js.Clean()).to_be(True, message = "Clean was expected to return True") self.expect(js.GetIDsProcessed()).to_have(1).ID() self.expect(js.GetIDsProcessed()[0]).to_be(id1) self.expect(js.GetIDsNotProcessed()).to_have(0).IDs() js.ClearSelection() arc.JobControllerPluginTestACCControl.cleanStatus = False js.SelectByStatus(["Finished"]) self.expect(js.Clean()).to_be(False, message = "Clean was expected to return False") self.expect(js.GetIDsProcessed()).to_have(0).IDs() self.expect(js.GetIDsNotProcessed()).to_have(1).ID() self.expect(js.GetIDsNotProcessed()[0]).to_be(id1) js.ClearSelection() if __name__ == '__main__': unittest.main() nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/tests.mk0000644000000000000000000000012512540540140021164 xustar000000000000000025 mtime=1434632288.9341 30 atime=1513200592.774925297 30 ctime=1513200665.271811962 nordugrid-arc-5.4.2/python/test/tests.mk0000644000175000002070000000071212540540140021230 0ustar00mockbuildmock00000000000000if PYTHON_SWIG_ENABLED TESTSCRIPTS = \ DeletingSwigIteratorObtainedFromPublicListRegTest.py \ UsingTheNextMethodOnASwigIteratorRegTest.py \ JobSupervisorTest.py EndpointContainerTest.py \ EndpointTest.py ServiceEndpointRetrieverTest.py \ TargetInformationRetrieverTest.py \ ComputingServiceRetrieverTest.py \ MappingOf_time_t_and_uint32_t_CTypesToPythonRegressionTest.py endif AUXFILES = testutils.py nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/ComputingServiceRetrieverTest.py0000644000000000000000000000012412540540276026072 xustar000000000000000027 mtime=1434632382.644008 27 atime=1513200576.894731 30 ctime=1513200665.281812084 nordugrid-arc-5.4.2/python/test/ComputingServiceRetrieverTest.py0000644000175000002070000000270712540540276026145 0ustar00mockbuildmock00000000000000import testutils, arc, unittest, time class ComputingServiceRetrieverTest(testutils.ARCClientTestCase): def setUp(self): self.usercfg = arc.UserConfig(arc.initializeCredentialsType(arc.initializeCredentialsType.SkipCredentials)) self.ce = arc.Endpoint() self.ce.URLString = "test.nordugrid.org" self.ce.InterfaceName = "org.nordugrid.tirtest" self.ce.Capability.append(arc.Endpoint_GetStringForCapability(arc.Endpoint.COMPUTINGINFO)) arc.TargetInformationRetrieverPluginTESTControl.delay = 0 arc.TargetInformationRetrieverPluginTESTControl.targets = [self.create_test_target()] arc.TargetInformationRetrieverPluginTESTControl.status = arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL) def test_the_class_exists(self): self.expect(arc.ComputingServiceRetriever).to_be_an_instance_of(type) def test_the_constructor(self): retriever = arc.ComputingServiceRetriever(self.usercfg) self.expect(retriever).to_be_an_instance_of(arc.ComputingServiceRetriever) def test_getting_a_target(self): retriever = arc.ComputingServiceRetriever(self.usercfg) self.expect(retriever).to_be_empty() retriever.addEndpoint(self.ce) retriever.wait() self.expect(retriever).to_have(1).target() etlist = retriever.GetExecutionTargets() self.expect(etlist).to_have(1).target() if __name__ == '__main__': unittest.main() nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/EndpointTest.py0000644000000000000000000000012412042771650022473 xustar000000000000000027 mtime=1351349160.334943 27 atime=1513200576.894731 30 ctime=1513200665.277812035 nordugrid-arc-5.4.2/python/test/EndpointTest.py0000644000175000002070000000462012042771650022542 0ustar00mockbuildmock00000000000000import testutils, arc, unittest class EndpointTest(testutils.ARCClientTestCase): def test_the_class_exists(self): self.expect(arc.Endpoint).to_be_an_instance_of(type) def test_the_constructor(self): registry = arc.Endpoint() self.expect(registry).to_be_an_instance_of(arc.Endpoint) def test_default_attributes_are_empty(self): registry = arc.Endpoint() self.expect(registry.URLString).to_be("") self.expect(registry.Capability).to_have(0).capabilities() self.expect(registry.InterfaceName).to_be("") def test_constructor_with_values(self): registry = arc.Endpoint("test.nordugrid.org", ["information.discovery.registry"], "org.nordugrid.sertest") self.expect(registry.URLString).to_be("test.nordugrid.org") self.expect(registry.Capability[0]).to_be("information.discovery.registry") self.expect(registry.InterfaceName).to_be("org.nordugrid.sertest") def test_constructor_with_enum_registry(self): registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY) self.expect(registry.URLString).to_be("test.nordugrid.org") self.expect(registry.Capability[0]).to_be("information.discovery.registry") self.expect(registry.InterfaceName).to_be("") def test_constructor_with_enum_computing(self): endpoint = arc.Endpoint("test.nordugrid.org", arc.Endpoint.COMPUTINGINFO) self.expect(endpoint.URLString).to_be("test.nordugrid.org") self.expect(endpoint.Capability[0]).to_be("information.discovery.resource") self.expect(endpoint.InterfaceName).to_be("") def test_has_capability_with_enum(self): registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY) self.expect(registry.HasCapability(arc.Endpoint.REGISTRY)).to_be(True); def test_has_capability_with_string(self): registry = arc.Endpoint("test.nordugrid.org", arc.Endpoint.REGISTRY) self.expect(registry.HasCapability("information.discovery.registry")).to_be(True); def test_string_representation(self): registry = arc.Endpoint("test.nordugrid.org", ["information.discovery.registry"], "org.nordugrid.sertest") self.expect(registry.str()).to_be("test.nordugrid.org (org.nordugrid.sertest, capabilities: information.discovery.registry)") if __name__ == '__main__': unittest.main()nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/TargetInformationRetrieverTest.py0000644000000000000000000000012412036025767026243 xustar000000000000000027 mtime=1350052855.106361 27 atime=1513200576.896731 30 ctime=1513200665.280812072 nordugrid-arc-5.4.2/python/test/TargetInformationRetrieverTest.py0000644000175000002070000001245412036025767026316 0ustar00mockbuildmock00000000000000import testutils, arc, unittest, time class TargetInformationRetrieverTest(testutils.ARCClientTestCase): def setUp(self): self.usercfg = arc.UserConfig(arc.initializeCredentialsType(arc.initializeCredentialsType.SkipCredentials)) self.ce = arc.Endpoint() self.ce.URLString = "test.nordugrid.org" self.ce.InterfaceName = "org.nordugrid.tirtest" arc.TargetInformationRetrieverPluginTESTControl.delay = 0 arc.TargetInformationRetrieverPluginTESTControl.targets = [arc.ComputingServiceType()] arc.TargetInformationRetrieverPluginTESTControl.status = arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL) def test_the_class_exists(self): self.expect(arc.TargetInformationRetriever).to_be_an_instance_of(type) def test_the_constructor(self): retriever = arc.TargetInformationRetriever(self.usercfg) self.expect(retriever).to_be_an_instance_of(arc.TargetInformationRetriever) def test_getting_a_target(self): retriever = arc.TargetInformationRetriever(self.usercfg) container = arc.ComputingServiceContainer() retriever.addConsumer(container) self.expect(container).to_be_empty() retriever.addEndpoint(self.ce) retriever.wait() self.expect(container).to_have(1).target() def test_getting_a_target_without_interfacename_specified(self): retriever = arc.TargetInformationRetriever(self.usercfg) container = arc.ComputingServiceContainer() retriever.addConsumer(container) self.expect(container).to_be_empty() self.ce.InterfaceName = "" retriever.addEndpoint(self.ce) retriever.wait() self.expect(container).to_have(1).target() def test_getting_status(self): retriever = arc.TargetInformationRetriever(self.usercfg) container = arc.ComputingServiceContainer() retriever.addConsumer(container) arc.TargetInformationRetrieverPluginTESTControl.status = arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL, "TEST") retriever.addEndpoint(self.ce) retriever.wait() status = retriever.getStatusOfEndpoint(self.ce) self.expect(status).to_be_an_instance_of(arc.EndpointQueryingStatus) self.expect(status).to_be(arc.EndpointQueryingStatus.SUCCESSFUL) self.expect(status.getDescription()).to_be("TEST") def test_getting_status_without_interfacename_specified(self): retriever = arc.TargetInformationRetriever(self.usercfg) container = arc.ComputingServiceContainer() retriever.addConsumer(container) arc.TargetInformationRetrieverPluginTESTControl.status = arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL, "TEST") self.ce.InterfaceName = "" retriever.addEndpoint(self.ce) retriever.wait() status = retriever.getStatusOfEndpoint(self.ce) self.expect(status).to_be(arc.EndpointQueryingStatus.SUCCESSFUL) self.expect(status.getDescription()).to_be("TEST") def test_the_status_is_started_first(self): retriever = arc.TargetInformationRetriever(self.usercfg) container = arc.ComputingServiceContainer() retriever.addConsumer(container) arc.TargetInformationRetrieverPluginTESTControl.delay = 0.1 retriever.addEndpoint(self.ce) status = retriever.getStatusOfEndpoint(self.ce) self.expect(status).to_be(arc.EndpointQueryingStatus.STARTED) retriever.wait() status = retriever.getStatusOfEndpoint(self.ce) self.expect(status).to_be(arc.EndpointQueryingStatus.SUCCESSFUL) def test_same_endpoint_is_not_queried_twice(self): retriever = arc.TargetInformationRetriever(self.usercfg) container = arc.ComputingServiceContainer() retriever.addConsumer(container) retriever.addEndpoint(self.ce) retriever.addEndpoint(self.ce) retriever.wait() self.expect(container).to_have(1).target() def test_removing_the_consumer(self): retriever = arc.TargetInformationRetriever(self.usercfg) container = arc.ComputingServiceContainer() retriever.addConsumer(container) arc.TargetInformationRetrieverPluginTESTControl.delay = 0.1 retriever.addEndpoint(self.ce) retriever.removeConsumer(container) retriever.wait() self.expect(container).to_have(0).targets() def test_deleting_the_consumer_before_the_retriever(self): retriever = arc.TargetInformationRetriever(self.usercfg) container = arc.ComputingServiceContainer() retriever.addConsumer(container) arc.TargetInformationRetrieverPluginTESTControl.delay = 0.1 retriever.addEndpoint(self.ce) retriever.removeConsumer(container) del container retriever.wait() # expect it not to crash def test_two_consumers(self): retriever = arc.TargetInformationRetriever(self.usercfg) container1 = arc.ComputingServiceContainer() container2 = arc.ComputingServiceContainer() retriever.addConsumer(container1) retriever.addConsumer(container2) retriever.addEndpoint(self.ce) retriever.wait() self.expect(container1).to_have(1).target() self.expect(container2).to_have(1).target() if __name__ == '__main__': unittest.main() nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/EndpointContainerTest.py0000644000000000000000000000012412032552550024331 xustar000000000000000027 mtime=1349178728.953173 27 atime=1513200576.896731 30 ctime=1513200665.276812023 nordugrid-arc-5.4.2/python/test/EndpointContainerTest.py0000644000175000002070000000127612032552550024404 0ustar00mockbuildmock00000000000000import testutils, arc, unittest class EndpointContainerTest(testutils.ARCClientTestCase): def test_the_class_exists(self): self.expect(arc.EndpointContainer).to_be_an_instance_of(type) def test_the_constructor(self): container = arc.EndpointContainer() self.expect(container).to_be_an_instance_of(arc.EndpointContainer) def test_adding_endpoints(self): container = arc.EndpointContainer() endpoint1 = arc.Endpoint() endpoint2 = arc.Endpoint() container.addEntity(endpoint1) container.addEntity(endpoint2) self.expect(container).to_have(2).endpoints() if __name__ == '__main__': unittest.main()nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/python0000644000000000000000000000013213214316031020732 xustar000000000000000030 mtime=1513200665.308812414 30 atime=1513200668.723854182 30 ctime=1513200665.308812414 nordugrid-arc-5.4.2/python/test/python/0000755000175000002070000000000013214316031021055 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/test/python/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712240743250023054 xustar000000000000000027 mtime=1384367784.271138 30 atime=1513200592.870926471 30 ctime=1513200665.307812402 nordugrid-arc-5.4.2/python/test/python/Makefile.am0000644000175000002070000000012412240743250023113 0ustar00mockbuildmock00000000000000PYTHON=@PYTHON@ PYTHONPATH=../../python:../../python/arc/.libs include ../common.mk nordugrid-arc-5.4.2/python/test/python/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720023060 xustar000000000000000030 mtime=1513200592.903926875 30 atime=1513200652.839659912 30 ctime=1513200665.308812414 nordugrid-arc-5.4.2/python/test/python/Makefile.in0000644000175000002070000005067513214315720023143 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ DIST_COMMON = $(srcdir)/../common.mk $(srcdir)/../tests.mk \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in @PYTHON_ENABLED_TRUE@TESTS = $(TESTSCRIPTS) subdir = python/test/python ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ PYTHONPATH = ../../python:../../python/arc/.libs @MACOSX_FALSE@DYLD_LIBRARY_PATH = @MACOSX_TRUE@DYLD_LIBRARY_PATH = "$(top_builddir)/src/hed/libs/credentialstore/.libs:$(top_builddir)/src/hed/libs/communication/.libs:$(top_builddir)/src/hed/libs/compute/.libs:$(top_builddir)/src/hed/libs/data/.libs:$(top_builddir)/src/hed/libs/security/.libs:$(top_builddir)/src/hed/libs/credential/.libs:$(top_builddir)/src/hed/libs/crypto/.libs:$(top_builddir)/src/hed/libs/message/.libs:$(top_builddir)/src/hed/libs/loader/.libs:$(top_builddir)/src/hed/libs/common/.libs:$(top_builddir)/src/libs/data-staging/.libs" @PYTHON_SWIG_ENABLED_TRUE@TESTSCRIPTS = \ @PYTHON_SWIG_ENABLED_TRUE@ DeletingSwigIteratorObtainedFromPublicListRegTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ UsingTheNextMethodOnASwigIteratorRegTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ JobSupervisorTest.py EndpointContainerTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ EndpointTest.py ServiceEndpointRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ TargetInformationRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ ComputingServiceRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ MappingOf_time_t_and_uint32_t_CTypesToPythonRegressionTest.py AUXFILES = testutils.py CLEANFILES = $(TESTSCRIPTS) testutils.py* @PYTHON_ENABLED_TRUE@TESTS_ENVIRONMENT = \ @PYTHON_ENABLED_TRUE@ ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs \ @PYTHON_ENABLED_TRUE@ DYLD_LIBRARY_PATH="$(DYLD_LIBRARY_PATH)" \ @PYTHON_ENABLED_TRUE@ PYTHONPATH=$(PYTHONPATH) $(PYTHON) check_SCRIPTS = $(TESTS) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/../common.mk $(srcdir)/../tests.mk $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign python/test/python/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign python/test/python/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool clean-local distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am $(TESTSCRIPTS) testutils.py: $(addprefix ../, $(TESTSCRIPTS) testutils.py) $(TESTSCRIPTS) testutils.py: %: testutils.py cp -p $(srcdir)/../$* $@ clean-local: -rm -rf __pycache__ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/MappingOf_time_t_and_uint32_t_CTypesToPythonRegressi0000644000000000000000000000012212240426324031707 xustar000000000000000025 mtime=1384262868.6645 27 atime=1513200576.897731 30 ctime=1513200665.282812097 nordugrid-arc-5.4.2/python/test/MappingOf_time_t_and_uint32_t_CTypesToPythonRegressionTest.py0000644000175000002070000000231012240426324033576 0ustar00mockbuildmock00000000000000''' Regression test Checks whether the time_t and uint32_t types are mapped to a number (long, int, float). ''' import testutils, arc, unittest, time, sys if sys.version_info[0] >= 3: long = int number = long(123456) class MappingOf_time_t_and_uint32_t_CTypesToPythonRegressionTest(testutils.ARCClientTestCase): def setUp(self): pass def tearDown(self): pass def test_checkMappedTypeOf_time_t_CType(self): self.expect(arc.Time().GetTime()).to_be_an_instance_of((long, int, float)) self.expect(arc.Time(number)).to_not_throw(NotImplementedError) self.expect(arc.Period().GetPeriod()).to_be_an_instance_of((long, int, float)) self.expect(arc.Period(number)).to_not_throw(NotImplementedError) def test_checkMappedTypeOf_uint32_t_CType(self): self.expect(arc.Time().GetTimeNanoseconds()).to_be_an_instance_of((long, int, float)) self.expect(arc.Time(number, 123456)).to_not_throw(NotImplementedError) self.expect(arc.Period().GetPeriodNanoseconds()).to_be_an_instance_of((long, int, float)) self.expect(arc.Period(number, 123456)).to_not_throw(NotImplementedError) if __name__ == '__main__': unittest.main() nordugrid-arc-5.4.2/python/test/PaxHeaders.7502/altpython0000644000000000000000000000013213214316031021433 xustar000000000000000030 mtime=1513200665.326812635 30 atime=1513200668.723854182 30 ctime=1513200665.326812635 nordugrid-arc-5.4.2/python/test/altpython/0000755000175000002070000000000013214316031021556 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/test/altpython/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712240743250023555 xustar000000000000000027 mtime=1384367784.271138 30 atime=1513200592.821925872 30 ctime=1513200665.325812622 nordugrid-arc-5.4.2/python/test/altpython/Makefile.am0000644000175000002070000000013512240743250023616 0ustar00mockbuildmock00000000000000PYTHON=@ALTPYTHON@ PYTHONPATH=../../altpython:../../altpython/arc/.libs include ../common.mk nordugrid-arc-5.4.2/python/test/altpython/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720023561 xustar000000000000000030 mtime=1513200592.855926287 30 atime=1513200652.853660083 30 ctime=1513200665.326812635 nordugrid-arc-5.4.2/python/test/altpython/Makefile.in0000644000175000002070000005071713214315720023641 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ DIST_COMMON = $(srcdir)/../common.mk $(srcdir)/../tests.mk \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in @PYTHON_ENABLED_TRUE@TESTS = $(TESTSCRIPTS) subdir = python/test/altpython ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @ALTPYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ PYTHONPATH = ../../altpython:../../altpython/arc/.libs @MACOSX_FALSE@DYLD_LIBRARY_PATH = @MACOSX_TRUE@DYLD_LIBRARY_PATH = "$(top_builddir)/src/hed/libs/credentialstore/.libs:$(top_builddir)/src/hed/libs/communication/.libs:$(top_builddir)/src/hed/libs/compute/.libs:$(top_builddir)/src/hed/libs/data/.libs:$(top_builddir)/src/hed/libs/security/.libs:$(top_builddir)/src/hed/libs/credential/.libs:$(top_builddir)/src/hed/libs/crypto/.libs:$(top_builddir)/src/hed/libs/message/.libs:$(top_builddir)/src/hed/libs/loader/.libs:$(top_builddir)/src/hed/libs/common/.libs:$(top_builddir)/src/libs/data-staging/.libs" @PYTHON_SWIG_ENABLED_TRUE@TESTSCRIPTS = \ @PYTHON_SWIG_ENABLED_TRUE@ DeletingSwigIteratorObtainedFromPublicListRegTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ UsingTheNextMethodOnASwigIteratorRegTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ JobSupervisorTest.py EndpointContainerTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ EndpointTest.py ServiceEndpointRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ TargetInformationRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ ComputingServiceRetrieverTest.py \ @PYTHON_SWIG_ENABLED_TRUE@ MappingOf_time_t_and_uint32_t_CTypesToPythonRegressionTest.py AUXFILES = testutils.py CLEANFILES = $(TESTSCRIPTS) testutils.py* @PYTHON_ENABLED_TRUE@TESTS_ENVIRONMENT = \ @PYTHON_ENABLED_TRUE@ ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs \ @PYTHON_ENABLED_TRUE@ DYLD_LIBRARY_PATH="$(DYLD_LIBRARY_PATH)" \ @PYTHON_ENABLED_TRUE@ PYTHONPATH=$(PYTHONPATH) $(PYTHON) check_SCRIPTS = $(TESTS) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/../common.mk $(srcdir)/../tests.mk $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign python/test/altpython/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign python/test/altpython/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool clean-local distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am $(TESTSCRIPTS) testutils.py: $(addprefix ../, $(TESTSCRIPTS) testutils.py) $(TESTSCRIPTS) testutils.py: %: testutils.py cp -p $(srcdir)/../$* $@ clean-local: -rm -rf __pycache__ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/python/PaxHeaders.7502/__init__.py0000644000000000000000000000012412240743250020626 xustar000000000000000027 mtime=1384367784.271138 27 atime=1513200576.904731 30 ctime=1513200665.124810164 nordugrid-arc-5.4.2/python/__init__.py0000644000175000002070000000060212240743250020671 0ustar00mockbuildmock00000000000000# Import/initialise all the swig modules (low level wrappers) import _arc # Import the high level wrappers (proxy classes) into this namespace from arc.common import * from arc.loader import * from arc.message import * from arc.compute import * from arc.communication import * from arc.credential import * from arc.data import * from arc.delegation import * from arc.security import * nordugrid-arc-5.4.2/python/PaxHeaders.7502/common.mk0000644000000000000000000000012713024224743020343 xustar000000000000000027 mtime=1481714147.644344 30 atime=1513200592.498921921 30 ctime=1513200665.125810176 nordugrid-arc-5.4.2/python/common.mk0000644000175000002070000001316713024224743020415 0ustar00mockbuildmock00000000000000pkgpythondir = $(PYTHON_SITE_ARCH)/arc pyexecdir = $(PYTHON_SITE_ARCH) ARCSWIGLIBS = common loader message communication compute credential data delegation security ARCPYLIBS = $(ARCSWIGLIBS:=.py) ARCWRAPPERS = $(ARCSWIGLIBS:=_wrap.cpp) ARCWRAPHDRS = $(ARCSWIGLIBS:=_wrap.h) ARCWRAPDEPS = $(foreach module, $(ARCSWIGLIBS), ./$(DEPDIR)/$(module)_wrap.deps) ARCSWIGINIT = swigmodulesinit_wrap.cpp arc_init.cpp BUILT_SOURCES = $(ARCSWIGINIT) __init__.py $(ARCSWIGINIT) __init__.py: %: $(top_srcdir)/python/% cp $< $@ if PYTHON_SWIG_ENABLED nodist_pkgpython_PYTHON = __init__.py $(ARCPYLIBS) pyexec_LTLIBRARIES = _arc.la endif if WIN32 AM_CPPFLAGS = -DWIN32 -DWINNT endif if PYDOXYGEN PYDOXFLAGS = -DPYDOXYGEN PYDOXFILE = pydoxygen.i else PYDOXFLAGS = PYDOXFILE = endif # Only remake index.xml if it is unavailable. # It really depends on Doxyfile.api, but this file changes whenever # configure is run, and we do not want to remake index.xml all # the time since doxygen is currently not required to be present. #index.xml: $(top_srcdir)/python/Doxyfile.api index.xml: $(DOXYGEN) $(top_builddir)/python/Doxyfile.api cp -p api/xml/index.xml $@ rm -rf api pydoxygen.i: index.xml $(PYTHON) $(top_srcdir)/python/doxy2swig.py $< $@ EXTRA_DIST = index.xml MAINTAINERCLEANFILES = index.xml if DBJSTORE_ENABLED SWIG_IS_DBJSTORE_ENABLED = -DDBJSTORE_ENABLED else SWIG_IS_DBJSTORE_ENABLED = endif ARCLIBS = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nodist__arc_la_SOURCES = $(ARCSWIGINIT) $(ARCWRAPPERS) _arc_la_CXXFLAGS = -include $(top_builddir)/config.h -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(PYTHON_CFLAGS) $(ZLIB_CFLAGS) $(DBCXX_CPPFLAGS) \ -fno-strict-aliasing -DSWIG_COBJECT_TYPES $(AM_CXXFLAGS) _arc_la_LIBADD = \ $(ARCLIBS) $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(PYTHON_LIBS) $(ZLIB_LIBS) $(DBCXX_LIBS) _arc_la_LDFLAGS = -no-undefined -avoid-version -module CLEANFILES = $(ARCWRAPPERS) $(ARCWRAPHDRS) $(ARCPYLIBS) $(BUILT_SOURCES) pydoxygen.i $(ARCPYLIBS:.py=.pyc) clean-local: -rm -rf __pycache__ @AMDEP_TRUE@-include $(ARCWRAPDEPS) $(ARCPYLIBS): %.py: %_wrap.cpp $(ARCWRAPPERS): %_wrap.cpp: $(top_srcdir)/swig/%.i $(top_srcdir)/swig/Arc.i $(PYDOXFILE) mkdir -p $(DEPDIR) grep -h '^#' $< $(top_srcdir)/swig/Arc.i $(PYDOXFILE) | \ $(CXXCOMPILE) $(_arc_la_CXXFLAGS) -M -MT $*_wrap.cpp -MT arc_$*.py -MP -MF "$(DEPDIR)/$*_wrap.deps" -x c++ - $(SWIG) -v -c++ -python $(SWIG_PY3) -threads -o $*_wrap.cpp \ -I/usr/include -I$(top_srcdir)/include \ $(PYDOXFLAGS) $(SWIG_IS_DBJSTORE_ENABLED) \ $(AM_CPPFLAGS) $(OPENSSL_CFLAGS) $(top_srcdir)/swig/$*.i # Workaround for RHEL5 swig + EPEL5 python26 sed 's/\(^\s*char \*.*\) = \(.*ml_doc\)/\1 = (char *)\2/' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Ditto - for 64 bit sed 's/^\(\s*char \*cstr;\) int len;/#if PY_VERSION_HEX < 0x02050000 \&\& !defined(PY_SSIZE_T_MIN)\n&\n#else\n\1 Py_ssize_t len;\n#endif/' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Dont allow threading when deleting SwigPyIterator objects sed '/*_wrap_delete_@SWIG_PYTHON_NAMING@Iterator/,/SWIG_PYTHON_THREAD_END/ s/.*SWIG_PYTHON_THREAD_[A-Z]*_ALLOW.*//' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Dont allow threading when handling SWIG Python iterators (see bug # 2683). Fixed in SWIG version 2. if test "x@SWIG2@" != "xyes"; then \ sed '/*_wrap_@SWIG_PYTHON_NAMING@Iterator_/,/SWIG_PYTHON_THREAD_END/ s/.*SWIG_PYTHON_THREAD_[A-Z]*_ALLOW.*//' $*_wrap.cpp > $*_wrap.cpp.new; \ mv $*_wrap.cpp.new $*_wrap.cpp; \ fi # When mapping a template with a template class no space is inserted # between the two right angle brackets. sed 's/>>(new/> >(new/g' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # When mapping a template with another template class as argument, and # that template class takes two classes as argument, then older swigs # put parentheses around the two class arguments, e.g. T<(A,B)>, not # valid syntax should be T instead. sed 's/<(\([,:[:alnum:]]*\))>/<\1>/g' $*_wrap.cpp > $*_wrap.cpp.tmp mv $*_wrap.cpp.tmp $*_wrap.cpp # Fix python3 relative import problem sed "s/import _$*/from arc &/" < $*.py > $*.py.new mv $*.py.new $*.py # In swig 2.0.12 the erase method of the SharedBenchmarkMap type (swig template) uses a wrong name for the wrapped std::map::erase method. sed 's/std_map_Sl_std_string_Sc_double_Sc_std_less_Sl_std_string_Sg__Sc_std_allocator_Sl_std_pair_Sl_std_string_SS_const_Sc_double_Sg__Sg__Sg/std_map_Sl_std_string_Sc_double_Sg/g' $*_wrap.cpp > $*_wrap.cpp.tmp mv $*_wrap.cpp.tmp $*_wrap.cpp # Fix python3 relative import problem if the module imports other submodules through %import(module= in the *.i files for i in $(ARCSWIGLIBS); do\ if grep -q "^import $$i" $*.py; then \ sed "s/import $$i/from arc &/" < $*.py > $*.py.new ;\ mv $*.py.new $*.py; \ fi;\ done install-data-hook: if test -n "$(PYTHON_SOABI)" ; then \ mv $(DESTDIR)$(pyexecdir)/_arc.so \ $(DESTDIR)$(pyexecdir)/_arc.$(PYTHON_SOABI).so ; \ fi .NOTPARALLEL: %.lo %.o nordugrid-arc-5.4.2/python/PaxHeaders.7502/doxy2swig.py0000644000000000000000000000012411520402643021024 xustar000000000000000027 mtime=1296172451.364075 27 atime=1513200576.904731 30 ctime=1513200665.123810152 nordugrid-arc-5.4.2/python/doxy2swig.py0000644000175000002070000003554311520402643021103 0ustar00mockbuildmock00000000000000#!/usr/bin/env python """Doxygen XML to SWIG docstring converter. Usage: doxy2swig.py [options] input.xml output.i Converts Doxygen generated XML files into a file containing docstrings that can be used by SWIG-1.3.x. Note that you need to get SWIG version > 1.3.23 or use Robin Dunn's docstring patch to be able to use the resulting output. input.xml is your doxygen generated XML file and output.i is where the output will be written (the file will be clobbered). """ ###################################################################### # # This code is implemented using Mark Pilgrim's code as a guideline: # http://www.faqs.org/docs/diveintopython/kgp_divein.html # # Author: Prabhu Ramachandran # License: BSD style # # Thanks: # Johan Hake: the include_function_definition feature # Bill Spotz: bug reports and testing. # ###################################################################### from xml.dom import minidom import re import textwrap import sys import types import os.path import optparse def my_open_read(source): if hasattr(source, "read"): return source else: return open(source) def my_open_write(dest): if hasattr(dest, "write"): return dest else: return open(dest, 'w') class Doxy2SWIG: """Converts Doxygen generated XML files into a file containing docstrings that can be used by SWIG-1.3.x that have support for feature("docstring"). Once the data is parsed it is stored in self.pieces. """ def __init__(self, src, include_function_definition=True, quiet=False): """Initialize the instance given a source object. `src` can be a file or filename. If you do not want to include function definitions from doxygen then set `include_function_definition` to `False`. This is handy since this allows you to use the swig generated function definition using %feature("autodoc", [0,1]). """ f = my_open_read(src) self.my_dir = os.path.dirname(f.name) self.xmldoc = minidom.parse(f).documentElement f.close() self.pieces = [] self.pieces.append('\n// File: %s\n'%\ os.path.basename(f.name)) self.space_re = re.compile(r'\s+') self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)') self.multi = 0 self.ignores = ['inheritancegraph', 'param', 'listofallmembers', 'innerclass', 'name', 'declname', 'incdepgraph', 'invincdepgraph', 'programlisting', 'type', 'references', 'referencedby', 'location', 'collaborationgraph', 'reimplements', 'reimplementedby', 'derivedcompoundref', 'basecompoundref'] #self.generics = [] self.include_function_definition = include_function_definition if not include_function_definition: self.ignores.append('argsstring') self.quiet = quiet def generate(self): """Parses the file set in the initialization. The resulting data is stored in `self.pieces`. """ self.parse(self.xmldoc) def parse(self, node): """Parse a given node. This function in turn calls the `parse_` functions which handle the respective nodes. """ pm = getattr(self, "parse_%s"%node.__class__.__name__) pm(node) def parse_Document(self, node): self.parse(node.documentElement) def parse_Text(self, node): txt = node.data txt = txt.replace('\\', r'\\\\') txt = txt.replace('"', r'\"') # ignore pure whitespace m = self.space_re.match(txt) if m and len(m.group()) == len(txt): pass else: self.add_text(textwrap.fill(txt, break_long_words=False)) def parse_Element(self, node): """Parse an `ELEMENT_NODE`. This calls specific `do_` handers for different elements. If no handler is available the `generic_parse` method is called. All tagNames specified in `self.ignores` are simply ignored. """ name = node.tagName ignores = self.ignores if name in ignores: return attr = "do_%s" % name if hasattr(self, attr): handlerMethod = getattr(self, attr) handlerMethod(node) else: self.generic_parse(node) #if name not in self.generics: self.generics.append(name) def parse_Comment(self, node): """Parse a `COMMENT_NODE`. This does nothing for now.""" return def add_text(self, value): """Adds text corresponding to `value` into `self.pieces`.""" if type(value) in (list, tuple): self.pieces.extend(value) else: self.pieces.append(value) def get_specific_nodes(self, node, names): """Given a node and a sequence of strings in `names`, return a dictionary containing the names as keys and child `ELEMENT_NODEs`, that have a `tagName` equal to the name. """ nodes = [(x.tagName, x) for x in node.childNodes \ if x.nodeType == x.ELEMENT_NODE and \ x.tagName in names] return dict(nodes) def generic_parse(self, node, pad=0): """A Generic parser for arbitrary tags in a node. Parameters: - node: A node in the DOM. - pad: `int` (default: 0) If 0 the node data is not padded with newlines. If 1 it appends a newline after parsing the childNodes. If 2 it pads before and after the nodes are processed. Defaults to 0. """ npiece = 0 if pad: npiece = len(self.pieces) if pad == 2: self.add_text('\n') for n in node.childNodes: self.parse(n) if pad: if len(self.pieces) > npiece: self.add_text('\n') def space_parse(self, node): self.add_text(' ') self.generic_parse(node) do_ref = space_parse do_emphasis = space_parse do_bold = space_parse do_computeroutput = space_parse do_formula = space_parse def do_compoundname(self, node): self.add_text('\n\n') data = node.firstChild.data self.add_text('%%feature("docstring") %s "\n'%data) def do_compounddef(self, node): kind = node.attributes['kind'].value if kind in ('class', 'struct'): prot = node.attributes['prot'].value if prot != 'public': return names = ('compoundname', 'briefdescription', 'detaileddescription', 'includes') first = self.get_specific_nodes(node, names) for n in names: if n in first: self.parse(first[n]) self.add_text(['";','\n']) for n in node.childNodes: if n not in first.values(): self.parse(n) elif kind in ('file', 'namespace'): nodes = node.getElementsByTagName('sectiondef') for n in nodes: self.parse(n) def do_includes(self, node): self.add_text('C++ includes: ') self.generic_parse(node, pad=1) def do_parameterlist(self, node): text='unknown' for key, val in node.attributes.items(): if key == 'kind': if val == 'param': text = 'Parameters' elif val == 'exception': text = 'Exceptions' else: text = val break self.add_text(['\n', '\n', text, ':', '\n']) self.generic_parse(node, pad=1) def do_para(self, node): self.add_text('\n') self.generic_parse(node, pad=1) def do_parametername(self, node): self.add_text('\n') try: data=node.firstChild.data except AttributeError: # perhaps a tag in it data=node.firstChild.firstChild.data if data.find('Exception') != -1: self.add_text(data) else: self.add_text("%s: "%data) def do_parameterdefinition(self, node): self.generic_parse(node, pad=1) def do_detaileddescription(self, node): self.generic_parse(node, pad=1) def do_briefdescription(self, node): self.generic_parse(node, pad=1) def do_memberdef(self, node): prot = node.attributes['prot'].value id = node.attributes['id'].value kind = node.attributes['kind'].value tmp = node.parentNode.parentNode.parentNode compdef = tmp.getElementsByTagName('compounddef')[0] cdef_kind = compdef.attributes['kind'].value if prot == 'public': first = self.get_specific_nodes(node, ('definition', 'name')) name = first['name'].firstChild.data if name[:8] == 'operator': # Don't handle operators yet. return if 'definition' not in first or \ kind in ['variable', 'typedef']: return if self.include_function_definition: defn = first['definition'].firstChild.data else: defn = "" self.add_text('\n') self.add_text('%feature("docstring") ') anc = node.parentNode.parentNode if cdef_kind in ('file', 'namespace'): ns_node = anc.getElementsByTagName('innernamespace') if not ns_node and cdef_kind == 'namespace': ns_node = anc.getElementsByTagName('compoundname') if ns_node: ns = ns_node[0].firstChild.data self.add_text(' %s::%s "\n%s'%(ns, name, defn)) else: self.add_text(' %s "\n%s'%(name, defn)) elif cdef_kind in ('class', 'struct'): # Get the full function name. anc_node = anc.getElementsByTagName('compoundname') cname = anc_node[0].firstChild.data self.add_text(' %s::%s "\n%s'%(cname, name, defn)) for n in node.childNodes: if n not in first.values(): self.parse(n) self.add_text(['";', '\n']) def do_definition(self, node): data = node.firstChild.data self.add_text('%s "\n%s'%(data, data)) def do_sectiondef(self, node): kind = node.attributes['kind'].value if kind in ('public-func', 'func', 'user-defined', ''): self.generic_parse(node) def do_header(self, node): """For a user defined section def a header field is present which should not be printed as such, so we comment it in the output.""" data = node.firstChild.data self.add_text('\n/*\n %s \n*/\n'%data) # If our immediate sibling is a 'description' node then we # should comment that out also and remove it from the parent # node's children. parent = node.parentNode idx = parent.childNodes.index(node) if len(parent.childNodes) >= idx + 2: nd = parent.childNodes[idx+2] if nd.nodeName == 'description': nd = parent.removeChild(nd) self.add_text('\n/*') self.generic_parse(nd) self.add_text('\n*/\n') def do_simplesect(self, node): kind = node.attributes['kind'].value if kind in ('date', 'rcs', 'version'): pass elif kind == 'warning': self.add_text(['\n', 'WARNING: ']) self.generic_parse(node) elif kind == 'see': self.add_text('\n') self.add_text('See: ') self.generic_parse(node) else: self.generic_parse(node) def do_argsstring(self, node): self.generic_parse(node, pad=1) def do_member(self, node): kind = node.attributes['kind'].value refid = node.attributes['refid'].value if kind == 'function' and refid[:9] == 'namespace': self.generic_parse(node) def do_doxygenindex(self, node): self.multi = 1 comps = node.getElementsByTagName('compound') for c in comps: refid = c.attributes['refid'].value fname = refid + '.xml' if not os.path.exists(fname): fname = os.path.join(self.my_dir, fname) if not self.quiet: print("parsing file: ", fname) p = Doxy2SWIG(fname, self.include_function_definition, self.quiet) p.generate() self.pieces.extend(self.clean_pieces(p.pieces)) def write(self, fname): o = my_open_write(fname) if self.multi: o.write("".join(self.pieces)) else: o.write("".join(self.clean_pieces(self.pieces))) o.close() def clean_pieces(self, pieces): """Cleans the list of strings given as `pieces`. It replaces multiple newlines by a maximum of 2 and returns a new list. It also wraps the paragraphs nicely. """ ret = [] count = 0 for i in pieces: if i == '\n': count = count + 1 else: if i == '";': if count: ret.append('\n') elif count > 2: ret.append('\n\n') elif count: ret.append('\n'*count) count = 0 ret.append(i) _data = "".join(ret) ret = [] for i in _data.split('\n\n'): if i == 'Parameters:' or i == 'Exceptions:': ret.extend([i, '\n-----------', '\n\n']) elif i.find('// File:') > -1: # leave comments alone. ret.extend([i, '\n']) else: _tmp = textwrap.fill(i.strip(), break_long_words=False) _tmp = self.lead_spc.sub(r'\1"\2', _tmp) ret.extend([_tmp, '\n\n']) return ret def convert(input, output, include_function_definition=True, quiet=False): p = Doxy2SWIG(input, include_function_definition, quiet) p.generate() p.write(output) def main(): usage = __doc__ parser = optparse.OptionParser(usage) parser.add_option("-n", '--no-function-definition', action='store_true', default=False, dest='func_def', help='do not include doxygen function definitions') parser.add_option("-q", '--quiet', action='store_true', default=False, dest='quiet', help='be quiet and minimise output') options, args = parser.parse_args() if len(args) != 2: parser.error("error: no input and output specified") convert(args[0], args[1], not options.func_def, options.quiet) if __name__ == '__main__': main() nordugrid-arc-5.4.2/python/PaxHeaders.7502/swigmodulesinit_wrap.cpp0000644000000000000000000000012412734740172023510 xustar000000000000000027 mtime=1467203706.320865 27 atime=1513200576.902731 30 ctime=1513200665.126810189 nordugrid-arc-5.4.2/python/swigmodulesinit_wrap.cpp0000644000175000002070000001005712734740172023560 0ustar00mockbuildmock00000000000000#ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif #include // Python 2vs3 differences #if PY_MAJOR_VERSION >= 3 #define SWIG_init(NAME) PyInit__##NAME #define PyMOD_RETURN(NAME) return NAME #define PyMODVAL PyObject* #else #define SWIG_init(NAME) init_##NAME #define PyMOD_RETURN(NAME) return #define PyMODVAL void #endif PyMODINIT_FUNC SWIG_init(common)(void); PyMODINIT_FUNC SWIG_init(loader)(void); PyMODINIT_FUNC SWIG_init(message)(void); PyMODINIT_FUNC SWIG_init(communication)(void); PyMODINIT_FUNC SWIG_init(compute)(void); PyMODINIT_FUNC SWIG_init(credential)(void); PyMODINIT_FUNC SWIG_init(data)(void); PyMODINIT_FUNC SWIG_init(delegation)(void); PyMODINIT_FUNC SWIG_init(security)(void); static PyMODVAL init_extension_module(PyObject* package, const char *modulename, PyMODVAL (*initfunction)(void)) { #if PY_MAJOR_VERSION >= 3 PyObject *module = initfunction(); #else initfunction(); PyObject *module = PyImport_AddModule((char *)modulename); #endif if(!module) { fprintf(stderr, "Failed initialising Python module '%s', through Python C API\n", modulename); PyMOD_RETURN(NULL); } if(PyModule_AddObject(package, (char *)modulename, module)) { fprintf(stderr, "Failied adding Python module '%s' to package 'arc', through Python C API\n", modulename); PyMOD_RETURN(NULL); } PyObject *sys_modules = PyImport_GetModuleDict(); if (!sys_modules) { fprintf(stderr, "Failed to locate sys.modules.\n"); PyMOD_RETURN(NULL); } if (PyMapping_SetItemString(sys_modules, const_cast(modulename), module) == -1) { fprintf(stderr, "Failed to add %s to sys.modules.\n", modulename); PyMOD_RETURN(NULL); } Py_INCREF(module); PyMOD_RETURN(module); } #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_arc", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ NULL, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; #endif // We can probably change // extern "C" SWIGEXPORT to PyMODINIT_FUNC // and thus remove SWIGEXPORT since it is no longer used and PyMODINIT_FUNC // does most of what SWIGEXPORT does. One thing however would be missing: // __attribute__ ((visibility("default"))) // but that seems not to have any effect since -fvisibility* // is not used during compilation. // extern "C" SWIGEXPORT PyMODVAL SWIG_init(arc)(void) { // Initialise this module #if PY_MAJOR_VERSION >= 3 PyObject* module = PyModule_Create(&moduledef); #else PyObject* module = Py_InitModule("_arc", NULL); // NULL only works for Python >= 2.3 #endif if(!module) { fprintf(stderr, "initialisation failed\n"); PyMOD_RETURN(NULL); } // Initialise all the SWIG low level modules PyObject *package = PyImport_AddModule((char *)"arc"); // a means to get a handle to the package, not sure if this is a great idea but it works if(!package) { fprintf(stderr, "initialisation failed\n"); PyMOD_RETURN(NULL); } init_extension_module(package, "_common", SWIG_init(common)); init_extension_module(package, "_loader", SWIG_init(loader)); init_extension_module(package, "_message", SWIG_init(message)); init_extension_module(package, "_communication", SWIG_init(communication)); init_extension_module(package, "_compute", SWIG_init(compute)); init_extension_module(package, "_credential", SWIG_init(credential)); init_extension_module(package, "_data", SWIG_init(data)); init_extension_module(package, "_delegation", SWIG_init(delegation)); init_extension_module(package, "_security", SWIG_init(security)); Py_INCREF(module); PyMOD_RETURN(module); } nordugrid-arc-5.4.2/python/PaxHeaders.7502/examples0000644000000000000000000000013213214316031020250 xustar000000000000000030 mtime=1513200665.355812989 30 atime=1513200668.723854182 30 ctime=1513200665.355812989 nordugrid-arc-5.4.2/python/examples/0000755000175000002070000000000013214316031020373 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/dtr_generator.py0000644000000000000000000000012312540535212023543 xustar000000000000000027 mtime=1434630794.591176 26 atime=1513200576.88473 30 ctime=1513200665.354812977 nordugrid-arc-5.4.2/python/examples/dtr_generator.py0000644000175000002070000000511612540535212023614 0ustar00mockbuildmock00000000000000#!/usr/bin/python # The nordugrid-arc-python package is required. As stated in the comments, the # main missing piece in Python when compared to C++ is the ability to get # callbacks to the Generator when the DTR has finished. To run: # # python dtr_generator.py /bin/ls /tmp/dtrtest # # If nordugrid-arc-python is installed to a non-standard location, PYTHONPATH # may need to be set. import os import sys import time import arc class DTRGenerator(arc.DTRCallback): def __init__(self): super(DTRGenerator, self).__init__() # Set up logging self.root_logger = arc.Logger_getRootLogger() self.stream = arc.LogStream(sys.stdout) self.root_logger.addDestination(self.stream) self.root_logger.setThreshold(arc.DEBUG) self.cfg = arc.UserConfig('', '') self.id = '1' arc.DTR.LOG_LEVEL = self.root_logger.getThreshold() # Start the Scheduler self.scheduler = arc.Scheduler() self.scheduler.start() def __del__(self): # Stop Scheduler when Generator is finished self.scheduler.stop() def add(self, source, dest): # Logger object, wrapped in smart pointer. The Logger object can only be accessed # by explicitly deferencing the smart pointer. dtrlog = arc.createDTRLogger(self.root_logger, "DTR") dtrlog.__deref__().addDestination(self.stream) dtrlog.__deref__().setThreshold(arc.DEBUG) # Create DTR (also wrapped in smart pointer) dtrptr = arc.createDTRPtr(source, dest, self.cfg, self.id, os.getuid(), dtrlog) # The ability to register 'this' as a callback object is not available yet #dtrptr.registerCallback(self, arc.GENERATOR) # Register the scheduler callback so we can push the DTR to it dtrptr.registerCallback(self.scheduler, arc.SCHEDULER) # Send the DTR to the Scheduler arc.DTR.push(dtrptr, arc.SCHEDULER) # Since the callback is not available, wait until the transfer reaches a final state while dtrptr.get_status() != arc.DTRStatus.ERROR and dtrptr.get_status() != arc.DTRStatus.DONE: time.sleep(1) sys.stdout.write("%s\n"%dtrptr.get_status().str()) # This is never called in the current version def receiveDTR(self, dtr): sys.stdout.write('Received back DTR %s\n'%str(dtr.get_id())) def main(args): if len(args) != 3: sys.stdout.write("Usage: python dtr_generator.py source destination\n") sys.exit(1) generator = DTRGenerator() generator.add(args[1], args[2]) if __name__ == '__main__': main(sys.argv[0:]) nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712771222411022372 xustar000000000000000027 mtime=1474635017.400882 30 atime=1513200592.591923059 30 ctime=1513200665.345812867 nordugrid-arc-5.4.2/python/examples/Makefile.am0000644000175000002070000000235312771222411022437 0ustar00mockbuildmock00000000000000if MACOSX DYLD_LIBRARY_PATH = "$(top_builddir)/src/hed/libs/credentialstore/.libs:$(top_builddir)/src/hed/libs/communication/.libs:$(top_builddir)/src/hed/libs/compute/.libs:$(top_builddir)/src/hed/libs/data/.libs:$(top_builddir)/src/hed/libs/security/.libs:$(top_builddir)/src/hed/libs/credential/.libs:$(top_builddir)/src/hed/libs/crypto/.libs:$(top_builddir)/src/hed/libs/message/.libs:$(top_builddir)/src/hed/libs/loader/.libs:$(top_builddir)/src/hed/libs/common/.libs:$(top_builddir)/src/libs/data-staging/.libs" else DYLD_LIBRARY_PATH = endif EXAMPLES = \ basic_job_submission.py partial_copy.py copy_file.py job_filtering.py \ joblist_retrieval.py job_status.py job_submission.py retrieving_results.py \ service_discovery.py dtr_generator.py job_selector.py if PYLINT_ENABLED # Any options to arguments to pylint should be defined and checked in configure. TESTS_ENVIRONMENT = \ ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs \ DYLD_LIBRARY_PATH="$(DYLD_LIBRARY_PATH)" \ PYTHONPATH="../python:../python/arc/.libs" \ $(PYLINT) $(PYLINT_ARGS) $(PYLINT_ARGS_ARGUMENTS_DIFFER) TESTS = $(EXAMPLES) else TESTS = endif check_SCRIPTS = $(EXAMPLES) exampledir = $(pkgdatadir)/examples/sdk example_DATA = $(EXAMPLES) EXTRA_DIST = $(EXAMPLES) nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/job_status.py0000644000000000000000000000012312464624616023075 xustar000000000000000027 mtime=1423124878.700368 26 atime=1513200576.88273 30 ctime=1513200665.350812928 nordugrid-arc-5.4.2/python/examples/job_status.py0000755000175000002070000000270112464624616023146 0ustar00mockbuildmock00000000000000#! /usr/bin/env python import arc import sys import os def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Create a new job object with a given JobID job = arc.Job() job.JobID = "https://piff.hep.lu.se:443/arex/1QuMDmRwvUfn5h5iWqkutBwoABFKDmABFKDmIpHKDmXBFKDmIuAean" job.Flavour = "ARC1" job.JobManagementURL = arc.URL("https://piff.hep.lu.se:443/arex") job.JobStatusURL = arc.URL("https://piff.hep.lu.se:443/arex") sys.stdout.write("Job object before update:\n") job.SaveToStream(arc.CPyOstream(sys.stdout), True) job_supervisor = arc.JobSupervisor(uc, [job]) # Update the states of jobs within this JobSupervisor job_supervisor.Update() # Get our updated job from the JobSupervisor jobs = job_supervisor.GetAllJobs() job = jobs[0] sys.stdout.write("Job object after update:\n") job.SaveToStream(arc.CPyOstream(sys.stdout), True) # wait for all the background threads to finish before we destroy the objects they may use import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() # arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr)) # arc.Logger.getRootLogger().setThreshold(arc.DEBUG) # run the example example() nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720022376 xustar000000000000000030 mtime=1513200592.625923474 30 atime=1513200652.867660254 30 ctime=1513200665.345812867 nordugrid-arc-5.4.2/python/examples/Makefile.in0000644000175000002070000005347213214315720022457 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @PYLINT_ENABLED_TRUE@TESTS = $(EXAMPLES) subdir = python/examples DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(exampledir)" DATA = $(example_DATA) am__tty_colors = \ red=; grn=; lgn=; blu=; std= DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @MACOSX_FALSE@DYLD_LIBRARY_PATH = @MACOSX_TRUE@DYLD_LIBRARY_PATH = "$(top_builddir)/src/hed/libs/credentialstore/.libs:$(top_builddir)/src/hed/libs/communication/.libs:$(top_builddir)/src/hed/libs/compute/.libs:$(top_builddir)/src/hed/libs/data/.libs:$(top_builddir)/src/hed/libs/security/.libs:$(top_builddir)/src/hed/libs/credential/.libs:$(top_builddir)/src/hed/libs/crypto/.libs:$(top_builddir)/src/hed/libs/message/.libs:$(top_builddir)/src/hed/libs/loader/.libs:$(top_builddir)/src/hed/libs/common/.libs:$(top_builddir)/src/libs/data-staging/.libs" EXAMPLES = \ basic_job_submission.py partial_copy.py copy_file.py job_filtering.py \ joblist_retrieval.py job_status.py job_submission.py retrieving_results.py \ service_discovery.py dtr_generator.py job_selector.py # Any options to arguments to pylint should be defined and checked in configure. @PYLINT_ENABLED_TRUE@TESTS_ENVIRONMENT = \ @PYLINT_ENABLED_TRUE@ ARC_PLUGIN_PATH=$(top_builddir)/src/hed/acc/TEST/.libs \ @PYLINT_ENABLED_TRUE@ DYLD_LIBRARY_PATH="$(DYLD_LIBRARY_PATH)" \ @PYLINT_ENABLED_TRUE@ PYTHONPATH="../python:../python/arc/.libs" \ @PYLINT_ENABLED_TRUE@ $(PYLINT) $(PYLINT_ARGS) $(PYLINT_ARGS_ARGUMENTS_DIFFER) check_SCRIPTS = $(EXAMPLES) exampledir = $(pkgdatadir)/examples/sdk example_DATA = $(EXAMPLES) EXTRA_DIST = $(EXAMPLES) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign python/examples/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign python/examples/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(exampledir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(exampledir)" && rm -f $$files tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool distclean distclean-generic distclean-libtool \ distdir dvi dvi-am html html-am info info-am install \ install-am install-data install-data-am install-dvi \ install-dvi-am install-exampleDATA install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-exampleDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/copy_file.py0000644000000000000000000000012312464624616022671 xustar000000000000000027 mtime=1423124878.700368 26 atime=1513200576.88773 30 ctime=1513200665.347812892 nordugrid-arc-5.4.2/python/examples/copy_file.py0000644000175000002070000000263512464624616022745 0ustar00mockbuildmock00000000000000#!/usr/bin/env python import sys import arc # Copies a file from source to destination # Wait for all the background threads to exit before exiting import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() def usage(): sys.stdout.write(' Usage: copy_file.py source destination\n') if len(sys.argv) != 3: usage() sys.exit(1) # Logging to stdout root_logger = arc.Logger_getRootLogger() stream = arc.LogStream(sys.stdout) root_logger.addDestination(stream) # Set threshold to VERBOSE or DEBUG for more information root_logger.setThreshold(arc.ERROR) # User configuration - paths to proxy certificates etc can be set here # With no arguments default values are used cfg = arc.UserConfig() # Convert the arguments to DataPoint objects source = arc.datapoint_from_url(sys.argv[1], cfg) if source is None: root_logger.msg(arc.ERROR, "Can't handle source "+sys.argv[1]) sys.exit(1) destination = arc.datapoint_from_url(sys.argv[2], cfg) if destination is None: root_logger.msg(arc.ERROR, "Can't handle destination "+sys.argv[2]) sys.exit(1) # DataMover does the transfer mover = arc.DataMover() # Show transfer progress mover.verbose(True) # Don't attempt to retry on error mover.retry(False) # Do the transfer status = mover.Transfer(source, destination, arc.FileCache(), arc.URLMap()) # Print the exit status of the transfer sys.stdout.write("%s\n"%str(status)) nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/basic_job_submission.py0000644000000000000000000000012312721014565025076 xustar000000000000000027 mtime=1464080757.755156 26 atime=1513200576.88673 30 ctime=1513200665.346812879 nordugrid-arc-5.4.2/python/examples/basic_job_submission.py0000644000175000002070000000342012721014565025143 0ustar00mockbuildmock00000000000000import arc import sys # Set up logging to stderr with level VERBOSE (a lot of output will be shown) logstdout = arc.LogStream(sys.stdout) logstdout.setFormat(arc.ShortFormat) arc.Logger_getRootLogger().addDestination(logstdout) arc.Logger_getRootLogger().setThreshold(arc.VERBOSE) logger = arc.Logger(arc.Logger_getRootLogger(), "jobsubmit") # UserConfig contains information on credentials and default services to use. # This form of the constructor is necessary to initialise the local job list. usercfg = arc.UserConfig("", "") # Simple job description which outputs hostname to stdout jobdescstring = "&(executable=/bin/hostname)(stdout=stdout)" # Parse job description jobdescs = arc.JobDescriptionList() if not arc.JobDescription_Parse(jobdescstring, jobdescs): logger.msg(arc.ERROR, "Invalid job description") sys.exit(1) # Use 'arc.JobDescription_ParseFromFile("helloworld.xrsl", jobdescs)' # to parse job description from file. # Use top-level NorduGrid information index to find resources index = arc.Endpoint("ldap://index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", arc.Endpoint.REGISTRY, "org.nordugrid.ldapegiis") services = arc.EndpointList(1, index) # Do the submission jobs = arc.JobList() submitter = arc.Submitter(usercfg) if submitter.BrokeredSubmit(services, jobdescs, jobs) != arc.SubmissionStatus.NONE: logger.msg(arc.ERROR, "Failed to submit job") sys.exit(1) # Write information on submitted job to local job list (~/.arc/jobs.xml) jobList = arc.JobInformationStorageXML(usercfg.JobListFile()) if not jobList.Write(jobs): logger.msg(arc.WARNING, "Failed to write to local job list %s", usercfg.JobListFile()) # Job submitted ok sys.stdout.write("Job submitted with job id %s\n" % str(jobs.front().JobID)) nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/partial_copy.py0000644000000000000000000000012312540535274023402 xustar000000000000000027 mtime=1434630844.136964 26 atime=1513200576.88673 30 ctime=1513200665.347812892 nordugrid-arc-5.4.2/python/examples/partial_copy.py0000644000175000002070000000152512540535274023453 0ustar00mockbuildmock00000000000000import arc import sys if len(sys.argv) != 2: sys.stdout.write("Usage: python partial_copy.py filename\n") sys.exit(1) desired_size = 512 usercfg = arc.UserConfig() url = arc.URL(sys.argv[1]) handle = arc.DataHandle(url,usercfg) point = handle.__ref__() point.SetSecure(False) # GridFTP servers generally do not have encrypted data channel info = arc.FileInfo("") point.Stat(info) sys.stdout.write("Name: %s\n"%str(info.GetName())) fsize = info.GetSize() if fsize > desired_size: point.Range(fsize-desired_size,fsize-1) databuffer = arc.DataBuffer() point.StartReading(databuffer) while True: n = 0 length = 0 offset = 0 ( r, n, length, offset, buf) = databuffer.for_write(True) if not r: break sys.stdout.write("BUFFER: %d : %d : %s\n"%(offset, length, str(buf))) databuffer.is_written(n) point.StopReading() nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/job_selector.py0000644000000000000000000000012413067150476023371 xustar000000000000000027 mtime=1490866494.007435 27 atime=1513200576.889731 30 ctime=1513200665.355812989 nordugrid-arc-5.4.2/python/examples/job_selector.py0000644000175000002070000000364313067150476023444 0ustar00mockbuildmock00000000000000#!/usr/bin/python ''' Create a JobSelector class in order to specify a custom selection to be used with the JobSupervisor class. ''' import arc, sys # Extend the arc.compute.JobSelector class and the select method. class ThreeDaysOldJobSelector(arc.compute.JobSelector): def __init__(self): super(ThreeDaysOldJobSelector, self).__init__() self.now = arc.common.Time() self.three_days = arc.common.Period(60*60*24*3) #self.three_days = arc.common.Period("P3D") # ISO duration #self.three_days = arc.common.Period(3*arc.common.Time.DAY) # The select method recieves a arc.compute.Job instance and must return a # boolean, indicating whether the job should be selected or rejected. # All attributes of the arc.compute.Job object can be used in this method. def Select(self, job): return (self.now - job.EndTime) > self.three_days uc = arc.common.UserConfig() arc.common.Logger_getRootLogger().addDestination(arc.common.LogStream(sys.stderr)) arc.common.Logger_getRootLogger().setThreshold(arc.common.VERBOSE) j = arc.compute.Job() j.JobManagementInterfaceName = "org.ogf.glue.emies.activitymanagement" j.JobManagementURL = arc.common.URL("https://localhost") j.JobStatusInterfaceName = "org.ogf.glue.emies.activitymanagement" j.JobStatusURL = arc.common.URL("https://localhost") js = arc.compute.JobSupervisor(uc) j.JobID = "test-job-1-day-old" j.EndTime = arc.common.Time()-arc.common.Period("P1D") js.AddJob(j) j.JobID = "test-job-2-days-old" j.EndTime = arc.common.Time()-arc.common.Period("P2D") js.AddJob(j) j.JobID = "test-job-3-days-old" j.EndTime = arc.common.Time()-arc.common.Period("P3D") js.AddJob(j) j.JobID = "test-job-4-days-old" j.EndTime = arc.common.Time()-arc.common.Period("P4D") js.AddJob(j) selector = ThreeDaysOldJobSelector() js.Select(selector) for j in js.GetSelectedJobs(): print (j.JobID) # Make operation on selected jobs. E.g.: #js.Clean() nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/joblist_retrieval.py0000644000000000000000000000012312464624616024443 xustar000000000000000027 mtime=1423124878.700368 26 atime=1513200576.88473 30 ctime=1513200665.349812916 nordugrid-arc-5.4.2/python/examples/joblist_retrieval.py0000755000175000002070000000271612464624616024522 0ustar00mockbuildmock00000000000000#! /usr/bin/env python import arc import sys import os def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Creating an endpoint for a Computing Element endpoint = arc.Endpoint("piff.hep.lu.se:443/arex", arc.Endpoint.COMPUTINGINFO) # Creating a container which will store the retrieved jobs jobs = arc.JobContainer() # Create a job list retriever retriever = arc.JobListRetriever(uc) # Add our container as the consumer of this retriever, so it will get the results retriever.addConsumer(jobs) # Add our endpoint to the retriever, which starts querying it retriever.addEndpoint(endpoint) # Wait until it finishes retriever.wait() # Get the status of the retrieval sys.stdout.write("%s\n"%retriever.getStatusOfEndpoint(endpoint).str()) sys.stdout.write("Number of jobs found: %d\n"%len(jobs)) for job in jobs: job.SaveToStream(arc.CPyOstream(sys.stdout), True) # wait for all the background threads to finish before we destroy the objects they may use import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() # arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr)) # arc.Logger.getRootLogger().setThreshold(arc.DEBUG) # run the example example() nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/service_discovery.py0000644000000000000000000000012312540535134024436 xustar000000000000000027 mtime=1434630748.197552 26 atime=1513200576.88773 30 ctime=1513200665.353812965 nordugrid-arc-5.4.2/python/examples/service_discovery.py0000755000175000002070000000654612540535134024522 0ustar00mockbuildmock00000000000000#! /usr/bin/env python import arc import sys import os def retrieve(uc, endpoints): # The ComputingServiceRetriever needs the UserConfig to know which credentials # to use in case of HTTPS connections retriever = arc.ComputingServiceRetriever(uc, endpoints) # the constructor of the ComputingServiceRetriever returns immediately sys.stdout.write('\n') sys.stdout.write("ComputingServiceRetriever created with the following endpoints:\n") for endpoint in endpoints: sys.stdout.write("- %s\n"%endpoint.str()) # here we want to wait until all the results arrive sys.stdout.write("Waiting for the results...\n") retriever.wait() return retriever def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Query two registries (index servers) for Computing Services registries = [ # for the index1, we specify that it is an EGIIS service arc.Endpoint("index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", arc.Endpoint.REGISTRY, "org.nordugrid.ldapegiis"), # for the arc-emi.grid.upjs.sk, we don't specify the type (the InterfaceName) # we let the system to try all possibilities arc.Endpoint("arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI", arc.Endpoint.REGISTRY) ] retriever = retrieve(uc, registries) # The retriever acts as a list containing all the discovered ComputingServices: sys.stdout.write("Discovered ComputingServices: %s\n"%(", ".join([service.Name for service in retriever]))) # Get all the ExecutionTargets on these ComputingServices targets = retriever.GetExecutionTargets() sys.stdout.write("Number of ExecutionTargets on these ComputingServices: %d\n"%len(targets)) # Query the local infosys (COMPUTINGINFO) of computing elements computing_elements = [ # for piff, we specify that we want to query the LDAP GLUE2 tree arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2"), # for pgs03, we don't specify the interface, we let the system try all possibilities arc.Endpoint("pgs03.grid.upjs.sk", arc.Endpoint.COMPUTINGINFO) ] retriever2 = retrieve(uc, computing_elements) # Get all the ExecutionTargets on these ComputingServices targets2 = retriever2.GetExecutionTargets() sys.stdout.write("The discovered ExecutionTargets:\n") for target in targets2: sys.stdout.write("%s\n"%str(target)) # Query both registries and computing elements at the same time: endpoints = [ arc.Endpoint("arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI", arc.Endpoint.REGISTRY), arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2") ] retriever3 = retrieve(uc, endpoints) sys.stdout.write("Discovered ComputingServices: %s\n"%(", ".join([service.Name for service in retriever3]))) # wait for all the background threads to finish before we destroy the objects they may use import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() # arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr)) # arc.Logger.getRootLogger().setThreshold(arc.DEBUG) # run the example example() nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/retrieving_results.py0000644000000000000000000000012312540535134024646 xustar000000000000000027 mtime=1434630748.197552 26 atime=1513200576.88473 30 ctime=1513200665.352812953 nordugrid-arc-5.4.2/python/examples/retrieving_results.py0000755000175000002070000000435312540535134024724 0ustar00mockbuildmock00000000000000#! /usr/bin/env python import arc import sys import os def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Create a new job object with a given JobID job = arc.Job() job.JobID = "https://piff.hep.lu.se:443/arex/hYDLDmyxvUfn5h5iWqkutBwoABFKDmABFKDmIpHKDmYBFKDmtRy9En" job.Flavour = "ARC1" job.ServiceInformationURL = job.JobStatusURL = job.JobManagementURL = arc.URL("https://piff.hep.lu.se:443/arex") sys.stdout.write("Get job information from the computing element...\n") # Put the job into a JobSupervisor and update its information job_supervisor = arc.JobSupervisor(uc, [job]) job_supervisor.Update() sys.stdout.write("Downloading results...\n") # Prepare a list for storing the directories for the downloaded job results (if there would be more jobs) downloadeddirectories = arc.StringList() # Start retrieving results of all the selected jobs # into the "/tmp" directory (first argument) # using the jobid and not the jobname as the name of the subdirectory (second argument, usejobname = False) # do not overwrite existing directories with the same name (third argument: force = False) # collect the downloaded directories into the variable "downloadeddirectories" (forth argument) success = job_supervisor.Retrieve("/tmp", False, False, downloadeddirectories) if not success: sys.stdout.write("Downloading results failed.\n") for downloadeddirectory in downloadeddirectories: sys.stdout.write("Job results were downloaded to %s\n"%str(downloadeddirectory)) sys.stdout.write("Contents of the directory:\n") for filename in os.listdir(downloadeddirectory): sys.stdout.write(" %s\n"%filename) # wait for all the background threads to finish before we destroy the objects they may use import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() # arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr)) # arc.Logger.getRootLogger().setThreshold(arc.DEBUG) # run the example example() nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/job_filtering.py0000644000000000000000000000012312540535356023532 xustar000000000000000027 mtime=1434630894.424996 26 atime=1513200576.88173 30 ctime=1513200665.348812904 nordugrid-arc-5.4.2/python/examples/job_filtering.py0000755000175000002070000000272212540535356023606 0ustar00mockbuildmock00000000000000#! /usr/bin/env python import arc import sys def example(): uc = arc.UserConfig() # Create a JobSupervisor to handle all the jobs job_supervisor = arc.JobSupervisor(uc) # Retrieve all the jobs from this computing element endpoint = arc.Endpoint("https://piff.hep.lu.se:443/arex", arc.Endpoint.JOBLIST) sys.stdout.write("Querying %s for jobs...\n" % endpoint.str()) retriever = arc.JobListRetriever(uc) retriever.addConsumer(job_supervisor) retriever.addEndpoint(endpoint) retriever.wait() sys.stdout.write("%s jobs found\n" % len(job_supervisor.GetAllJobs())) sys.stdout.write("Getting job states...\n") # Update the states of the jobs job_supervisor.Update() # Print state of updated jobs sys.stdout.write("The jobs have the following states: %s\n"%(", ".join([job.State.GetGeneralState() for job in job_supervisor.GetAllJobs()]))) # Select failed jobs job_supervisor.SelectByStatus(["Failed"]) failed_jobs = job_supervisor.GetSelectedJobs() sys.stdout.write("The failed jobs:\n") for job in failed_jobs: job.SaveToStream(arc.CPyOstream(sys.stdout), True) # wait for all the background threads to finish before we destroy the objects they may use import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() # arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr)) # arc.Logger.getRootLogger().setThreshold(arc.DEBUG) # run the example example() nordugrid-arc-5.4.2/python/examples/PaxHeaders.7502/job_submission.py0000644000000000000000000000012312540535243023735 xustar000000000000000027 mtime=1434630819.230982 26 atime=1513200576.88673 30 ctime=1513200665.351812941 nordugrid-arc-5.4.2/python/examples/job_submission.py0000755000175000002070000000376312540535243024017 0ustar00mockbuildmock00000000000000#! /usr/bin/env python import arc import sys import os import random def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Creating an endpoint for a Computing Element endpoint = arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2") # Get the ExecutionTargets of this ComputingElement retriever = arc.ComputingServiceRetriever(uc, [endpoint]) retriever.wait() targets = retriever.GetExecutionTargets() # Shuffle the targets to simulate a random broker targets = list(targets) random.shuffle(targets) # Create a JobDescription jobdesc = arc.JobDescription() jobdesc.Application.Executable.Path = "/bin/hostname" jobdesc.Application.Output = "stdout.txt" # create an empty job object which will contain our submitted job job = arc.Job() success = False # Submit job directly to the execution targets, without a broker for target in targets: sys.stdout.write("Trying to submit to %s (%s) ... "%(target.ComputingEndpoint.URLString, target.ComputingEndpoint.InterfaceName)) sys.stdout.flush() success = target.Submit(uc, jobdesc, job) if success: sys.stdout.write("succeeded!\n") break else: sys.stdout.write("failed!\n") if success: sys.stdout.write("Job was submitted:\n") job.SaveToStream(arc.CPyOstream(sys.stdout), False) else: sys.stdout.write("Job submission failed\n") # wait for all the background threads to finish before we destroy the objects they may use import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() # arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr)) # arc.Logger.getRootLogger().setThreshold(arc.DEBUG) # run the example example() nordugrid-arc-5.4.2/python/PaxHeaders.7502/Doxyfile.api.in0000644000000000000000000000012712242371471021407 xustar000000000000000027 mtime=1384772409.509397 30 atime=1513200652.746658775 30 ctime=1513200665.119810103 nordugrid-arc-5.4.2/python/Doxyfile.api.in0000644000175000002070000011534712242371471021464 0ustar00mockbuildmock00000000000000# Doxyfile 1.3.5 PROJECT_NAME = "Hosting Environment (Daemon)" PROJECT_NUMBER = OUTPUT_DIRECTORY = api OUTPUT_LANGUAGE = English USE_WINDOWS_ENCODING = NO BRIEF_MEMBER_DESC = NO REPEAT_BRIEF = YES ABBREVIATE_BRIEF = ALWAYS_DETAILED_SEC = NO INLINE_INHERITED_MEMB = NO FULL_PATH_NAMES = NO STRIP_FROM_PATH = SHORT_NAMES = NO JAVADOC_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources # only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. # Allowing the documentation of undocumented members will produce # a latex error with doxygen 1.5.8-1~exp1 #HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_MEMBERS = YES # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = @top_srcdir@/src/hed/libs @top_srcdir@/src/libs # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp # *.h++ *.idl *.odl *.cs *.php *.php3 *.inc FILE_PATTERNS = *.h # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories # that are symbolic links (a Unix filesystem feature) are excluded from the input. EXCLUDE_SYMLINKS = YES # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. EXCLUDE_PATTERNS = */.svn* */test* */.libs* */.deps* # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. INPUT_FILTER = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = NO # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = YES # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_PREDEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse the # parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::addtions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base or # super classes. Setting the tag to NO turns the diagrams off. Note that this # option is superseded by the HAVE_DOT option below. This is only a fallback. It is # recommended to install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found on the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_WIDTH = 1024 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes that # lay further from the root node will be omitted. Note that setting this option to # 1 or 2 may greatly reduce the computation time needed for large code bases. Also # note that a graph may be further truncated if the graph's image dimensions are # not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH and MAX_DOT_GRAPH_HEIGHT). # If 0 is used for the depth value (the default), the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::addtions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO nordugrid-arc-5.4.2/python/PaxHeaders.7502/README0000644000000000000000000000012311001653037017371 xustar000000000000000027 mtime=1208440351.928622 26 atime=1513200576.88173 30 ctime=1513200665.118810091 nordugrid-arc-5.4.2/python/README0000644000175000002070000000004611001653037017437 0ustar00mockbuildmock00000000000000Python wrapper of main arc libraries. nordugrid-arc-5.4.2/python/PaxHeaders.7502/python0000644000000000000000000000013213214316031017753 xustar000000000000000030 mtime=1513200665.156810556 30 atime=1513200668.723854182 30 ctime=1513200665.156810556 nordugrid-arc-5.4.2/python/python/0000755000175000002070000000000013214316031020076 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/python/PaxHeaders.7502/arc0000644000000000000000000000013013214316031020516 xustar000000000000000029 mtime=1513200665.19481102 30 atime=1513200668.723854182 29 ctime=1513200665.19481102 nordugrid-arc-5.4.2/python/python/arc/0000755000175000002070000000000013214316031020643 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/python/arc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712240743250022642 xustar000000000000000027 mtime=1384367784.271138 30 atime=1513200592.687924233 30 ctime=1513200665.192810996 nordugrid-arc-5.4.2/python/python/arc/Makefile.am0000644000175000002070000000013112240743250022677 0ustar00mockbuildmock00000000000000if PYTHON3 SWIG_PY3 = -py3 else SWIG_PY3 = endif include $(top_srcdir)/python/common.mk nordugrid-arc-5.4.2/python/python/arc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720022646 xustar000000000000000030 mtime=1513200592.758925101 30 atime=1513200652.776659141 30 ctime=1513200665.193811008 nordugrid-arc-5.4.2/python/python/arc/Makefile.in0000644000175000002070000012636313214315720022727 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(top_srcdir)/python/common.mk subdir = python/python/arc ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pyexecdir)" "$(DESTDIR)$(pkgpythondir)" LTLIBRARIES = $(pyexec_LTLIBRARIES) am__DEPENDENCIES_1 = _arc_la_DEPENDENCIES = $(ARCLIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am__objects_1 = _arc_la-swigmodulesinit_wrap.lo _arc_la-arc_init.lo am__objects_2 = _arc_la-common_wrap.lo _arc_la-loader_wrap.lo \ _arc_la-message_wrap.lo _arc_la-communication_wrap.lo \ _arc_la-compute_wrap.lo _arc_la-credential_wrap.lo \ _arc_la-data_wrap.lo _arc_la-delegation_wrap.lo \ _arc_la-security_wrap.lo am__objects_3 = $(am__objects_2) nodist__arc_la_OBJECTS = $(am__objects_1) $(am__objects_3) _arc_la_OBJECTS = $(nodist__arc_la_OBJECTS) _arc_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(_arc_la_CXXFLAGS) $(CXXFLAGS) \ $(_arc_la_LDFLAGS) $(LDFLAGS) -o $@ @PYTHON_SWIG_ENABLED_TRUE@am__arc_la_rpath = -rpath $(pyexecdir) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(nodist__arc_la_SOURCES) DIST_SOURCES = py_compile = $(top_srcdir)/py-compile ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ @PYTHON3_FALSE@SWIG_PY3 = @PYTHON3_TRUE@SWIG_PY3 = -py3 pkgpythondir = $(PYTHON_SITE_ARCH)/arc pyexecdir = $(PYTHON_SITE_ARCH) ARCSWIGLIBS = common loader message communication compute credential data delegation security ARCPYLIBS = $(ARCSWIGLIBS:=.py) ARCWRAPPERS = $(ARCSWIGLIBS:=_wrap.cpp) ARCWRAPHDRS = $(ARCSWIGLIBS:=_wrap.h) ARCWRAPDEPS = $(foreach module, $(ARCSWIGLIBS), ./$(DEPDIR)/$(module)_wrap.deps) ARCSWIGINIT = swigmodulesinit_wrap.cpp arc_init.cpp BUILT_SOURCES = $(ARCSWIGINIT) __init__.py @PYTHON_SWIG_ENABLED_TRUE@nodist_pkgpython_PYTHON = __init__.py $(ARCPYLIBS) @PYTHON_SWIG_ENABLED_TRUE@pyexec_LTLIBRARIES = _arc.la @WIN32_TRUE@AM_CPPFLAGS = -DWIN32 -DWINNT @PYDOXYGEN_FALSE@PYDOXFLAGS = @PYDOXYGEN_TRUE@PYDOXFLAGS = -DPYDOXYGEN @PYDOXYGEN_FALSE@PYDOXFILE = @PYDOXYGEN_TRUE@PYDOXFILE = pydoxygen.i EXTRA_DIST = index.xml MAINTAINERCLEANFILES = index.xml @DBJSTORE_ENABLED_FALSE@SWIG_IS_DBJSTORE_ENABLED = @DBJSTORE_ENABLED_TRUE@SWIG_IS_DBJSTORE_ENABLED = -DDBJSTORE_ENABLED ARCLIBS = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nodist__arc_la_SOURCES = $(ARCSWIGINIT) $(ARCWRAPPERS) _arc_la_CXXFLAGS = -include $(top_builddir)/config.h -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(PYTHON_CFLAGS) $(ZLIB_CFLAGS) $(DBCXX_CPPFLAGS) \ -fno-strict-aliasing -DSWIG_COBJECT_TYPES $(AM_CXXFLAGS) _arc_la_LIBADD = \ $(ARCLIBS) $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(PYTHON_LIBS) $(ZLIB_LIBS) $(DBCXX_LIBS) _arc_la_LDFLAGS = -no-undefined -avoid-version -module CLEANFILES = $(ARCWRAPPERS) $(ARCWRAPHDRS) $(ARCPYLIBS) $(BUILT_SOURCES) pydoxygen.i $(ARCPYLIBS:.py=.pyc) all: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(top_srcdir)/python/common.mk $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign python/python/arc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign python/python/arc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pyexecLTLIBRARIES: $(pyexec_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pyexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pyexecdir)" @list='$(pyexec_LTLIBRARIES)'; test -n "$(pyexecdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pyexecdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pyexecdir)"; \ } uninstall-pyexecLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pyexec_LTLIBRARIES)'; test -n "$(pyexecdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pyexecdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pyexecdir)/$$f"; \ done clean-pyexecLTLIBRARIES: -test -z "$(pyexec_LTLIBRARIES)" || rm -f $(pyexec_LTLIBRARIES) @list='$(pyexec_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done _arc.la: $(_arc_la_OBJECTS) $(_arc_la_DEPENDENCIES) $(_arc_la_LINK) $(am__arc_la_rpath) $(_arc_la_OBJECTS) $(_arc_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-arc_init.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-common_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-communication_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-compute_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-credential_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-data_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-delegation_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-loader_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-message_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-security_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-swigmodulesinit_wrap.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< _arc_la-swigmodulesinit_wrap.lo: swigmodulesinit_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-swigmodulesinit_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-swigmodulesinit_wrap.Tpo -c -o _arc_la-swigmodulesinit_wrap.lo `test -f 'swigmodulesinit_wrap.cpp' || echo '$(srcdir)/'`swigmodulesinit_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-swigmodulesinit_wrap.Tpo $(DEPDIR)/_arc_la-swigmodulesinit_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='swigmodulesinit_wrap.cpp' object='_arc_la-swigmodulesinit_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-swigmodulesinit_wrap.lo `test -f 'swigmodulesinit_wrap.cpp' || echo '$(srcdir)/'`swigmodulesinit_wrap.cpp _arc_la-arc_init.lo: arc_init.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-arc_init.lo -MD -MP -MF $(DEPDIR)/_arc_la-arc_init.Tpo -c -o _arc_la-arc_init.lo `test -f 'arc_init.cpp' || echo '$(srcdir)/'`arc_init.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-arc_init.Tpo $(DEPDIR)/_arc_la-arc_init.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_init.cpp' object='_arc_la-arc_init.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-arc_init.lo `test -f 'arc_init.cpp' || echo '$(srcdir)/'`arc_init.cpp _arc_la-common_wrap.lo: common_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-common_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-common_wrap.Tpo -c -o _arc_la-common_wrap.lo `test -f 'common_wrap.cpp' || echo '$(srcdir)/'`common_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-common_wrap.Tpo $(DEPDIR)/_arc_la-common_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='common_wrap.cpp' object='_arc_la-common_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-common_wrap.lo `test -f 'common_wrap.cpp' || echo '$(srcdir)/'`common_wrap.cpp _arc_la-loader_wrap.lo: loader_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-loader_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-loader_wrap.Tpo -c -o _arc_la-loader_wrap.lo `test -f 'loader_wrap.cpp' || echo '$(srcdir)/'`loader_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-loader_wrap.Tpo $(DEPDIR)/_arc_la-loader_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='loader_wrap.cpp' object='_arc_la-loader_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-loader_wrap.lo `test -f 'loader_wrap.cpp' || echo '$(srcdir)/'`loader_wrap.cpp _arc_la-message_wrap.lo: message_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-message_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-message_wrap.Tpo -c -o _arc_la-message_wrap.lo `test -f 'message_wrap.cpp' || echo '$(srcdir)/'`message_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-message_wrap.Tpo $(DEPDIR)/_arc_la-message_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='message_wrap.cpp' object='_arc_la-message_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-message_wrap.lo `test -f 'message_wrap.cpp' || echo '$(srcdir)/'`message_wrap.cpp _arc_la-communication_wrap.lo: communication_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-communication_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-communication_wrap.Tpo -c -o _arc_la-communication_wrap.lo `test -f 'communication_wrap.cpp' || echo '$(srcdir)/'`communication_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-communication_wrap.Tpo $(DEPDIR)/_arc_la-communication_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='communication_wrap.cpp' object='_arc_la-communication_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-communication_wrap.lo `test -f 'communication_wrap.cpp' || echo '$(srcdir)/'`communication_wrap.cpp _arc_la-compute_wrap.lo: compute_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-compute_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-compute_wrap.Tpo -c -o _arc_la-compute_wrap.lo `test -f 'compute_wrap.cpp' || echo '$(srcdir)/'`compute_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-compute_wrap.Tpo $(DEPDIR)/_arc_la-compute_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='compute_wrap.cpp' object='_arc_la-compute_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-compute_wrap.lo `test -f 'compute_wrap.cpp' || echo '$(srcdir)/'`compute_wrap.cpp _arc_la-credential_wrap.lo: credential_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-credential_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-credential_wrap.Tpo -c -o _arc_la-credential_wrap.lo `test -f 'credential_wrap.cpp' || echo '$(srcdir)/'`credential_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-credential_wrap.Tpo $(DEPDIR)/_arc_la-credential_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='credential_wrap.cpp' object='_arc_la-credential_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-credential_wrap.lo `test -f 'credential_wrap.cpp' || echo '$(srcdir)/'`credential_wrap.cpp _arc_la-data_wrap.lo: data_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-data_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-data_wrap.Tpo -c -o _arc_la-data_wrap.lo `test -f 'data_wrap.cpp' || echo '$(srcdir)/'`data_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-data_wrap.Tpo $(DEPDIR)/_arc_la-data_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='data_wrap.cpp' object='_arc_la-data_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-data_wrap.lo `test -f 'data_wrap.cpp' || echo '$(srcdir)/'`data_wrap.cpp _arc_la-delegation_wrap.lo: delegation_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-delegation_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-delegation_wrap.Tpo -c -o _arc_la-delegation_wrap.lo `test -f 'delegation_wrap.cpp' || echo '$(srcdir)/'`delegation_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-delegation_wrap.Tpo $(DEPDIR)/_arc_la-delegation_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='delegation_wrap.cpp' object='_arc_la-delegation_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-delegation_wrap.lo `test -f 'delegation_wrap.cpp' || echo '$(srcdir)/'`delegation_wrap.cpp _arc_la-security_wrap.lo: security_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-security_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-security_wrap.Tpo -c -o _arc_la-security_wrap.lo `test -f 'security_wrap.cpp' || echo '$(srcdir)/'`security_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-security_wrap.Tpo $(DEPDIR)/_arc_la-security_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='security_wrap.cpp' object='_arc_la-security_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-security_wrap.lo `test -f 'security_wrap.cpp' || echo '$(srcdir)/'`security_wrap.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-nodist_pkgpythonPYTHON: $(nodist_pkgpython_PYTHON) @$(NORMAL_INSTALL) test -z "$(pkgpythondir)" || $(MKDIR_P) "$(DESTDIR)$(pkgpythondir)" @list='$(nodist_pkgpython_PYTHON)'; dlist=; list2=; test -n "$(pkgpythondir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgpythondir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgpythondir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ if test -z "$(DESTDIR)"; then \ PYTHON=$(PYTHON) $(py_compile) --basedir "$(pkgpythondir)" $$dlist; \ else \ PYTHON=$(PYTHON) $(py_compile) --destdir "$(DESTDIR)" --basedir "$(pkgpythondir)" $$dlist; \ fi; \ else :; fi uninstall-nodist_pkgpythonPYTHON: @$(NORMAL_UNINSTALL) @list='$(nodist_pkgpython_PYTHON)'; test -n "$(pkgpythondir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ filesc=`echo "$$files" | sed 's|$$|c|'`; \ fileso=`echo "$$files" | sed 's|$$|o|'`; \ echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$files || exit $$?; \ echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$filesc ")"; \ cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$filesc || exit $$?; \ echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$fileso ")"; \ cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$fileso ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pyexecdir)" "$(DESTDIR)$(pkgpythondir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool clean-local \ clean-pyexecLTLIBRARIES mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-nodist_pkgpythonPYTHON @$(NORMAL_INSTALL) $(MAKE) $(AM_MAKEFLAGS) install-data-hook install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pyexecLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-nodist_pkgpythonPYTHON \ uninstall-pyexecLTLIBRARIES .MAKE: all check install install-am install-data-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-pyexecLTLIBRARIES ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-data-hook install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-nodist_pkgpythonPYTHON install-pdf install-pdf-am \ install-ps install-ps-am install-pyexecLTLIBRARIES \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am \ uninstall-nodist_pkgpythonPYTHON uninstall-pyexecLTLIBRARIES $(ARCSWIGINIT) __init__.py: %: $(top_srcdir)/python/% cp $< $@ # Only remake index.xml if it is unavailable. # It really depends on Doxyfile.api, but this file changes whenever # configure is run, and we do not want to remake index.xml all # the time since doxygen is currently not required to be present. #index.xml: $(top_srcdir)/python/Doxyfile.api index.xml: $(DOXYGEN) $(top_builddir)/python/Doxyfile.api cp -p api/xml/index.xml $@ rm -rf api pydoxygen.i: index.xml $(PYTHON) $(top_srcdir)/python/doxy2swig.py $< $@ clean-local: -rm -rf __pycache__ @AMDEP_TRUE@-include $(ARCWRAPDEPS) $(ARCPYLIBS): %.py: %_wrap.cpp $(ARCWRAPPERS): %_wrap.cpp: $(top_srcdir)/swig/%.i $(top_srcdir)/swig/Arc.i $(PYDOXFILE) mkdir -p $(DEPDIR) grep -h '^#' $< $(top_srcdir)/swig/Arc.i $(PYDOXFILE) | \ $(CXXCOMPILE) $(_arc_la_CXXFLAGS) -M -MT $*_wrap.cpp -MT arc_$*.py -MP -MF "$(DEPDIR)/$*_wrap.deps" -x c++ - $(SWIG) -v -c++ -python $(SWIG_PY3) -threads -o $*_wrap.cpp \ -I/usr/include -I$(top_srcdir)/include \ $(PYDOXFLAGS) $(SWIG_IS_DBJSTORE_ENABLED) \ $(AM_CPPFLAGS) $(OPENSSL_CFLAGS) $(top_srcdir)/swig/$*.i # Workaround for RHEL5 swig + EPEL5 python26 sed 's/\(^\s*char \*.*\) = \(.*ml_doc\)/\1 = (char *)\2/' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Ditto - for 64 bit sed 's/^\(\s*char \*cstr;\) int len;/#if PY_VERSION_HEX < 0x02050000 \&\& !defined(PY_SSIZE_T_MIN)\n&\n#else\n\1 Py_ssize_t len;\n#endif/' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Dont allow threading when deleting SwigPyIterator objects sed '/*_wrap_delete_@SWIG_PYTHON_NAMING@Iterator/,/SWIG_PYTHON_THREAD_END/ s/.*SWIG_PYTHON_THREAD_[A-Z]*_ALLOW.*//' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Dont allow threading when handling SWIG Python iterators (see bug # 2683). Fixed in SWIG version 2. if test "x@SWIG2@" != "xyes"; then \ sed '/*_wrap_@SWIG_PYTHON_NAMING@Iterator_/,/SWIG_PYTHON_THREAD_END/ s/.*SWIG_PYTHON_THREAD_[A-Z]*_ALLOW.*//' $*_wrap.cpp > $*_wrap.cpp.new; \ mv $*_wrap.cpp.new $*_wrap.cpp; \ fi # When mapping a template with a template class no space is inserted # between the two right angle brackets. sed 's/>>(new/> >(new/g' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # When mapping a template with another template class as argument, and # that template class takes two classes as argument, then older swigs # put parentheses around the two class arguments, e.g. T<(A,B)>, not # valid syntax should be T instead. sed 's/<(\([,:[:alnum:]]*\))>/<\1>/g' $*_wrap.cpp > $*_wrap.cpp.tmp mv $*_wrap.cpp.tmp $*_wrap.cpp # Fix python3 relative import problem sed "s/import _$*/from arc &/" < $*.py > $*.py.new mv $*.py.new $*.py # In swig 2.0.12 the erase method of the SharedBenchmarkMap type (swig template) uses a wrong name for the wrapped std::map::erase method. sed 's/std_map_Sl_std_string_Sc_double_Sc_std_less_Sl_std_string_Sg__Sc_std_allocator_Sl_std_pair_Sl_std_string_SS_const_Sc_double_Sg__Sg__Sg/std_map_Sl_std_string_Sc_double_Sg/g' $*_wrap.cpp > $*_wrap.cpp.tmp mv $*_wrap.cpp.tmp $*_wrap.cpp # Fix python3 relative import problem if the module imports other submodules through %import(module= in the *.i files for i in $(ARCSWIGLIBS); do\ if grep -q "^import $$i" $*.py; then \ sed "s/import $$i/from arc &/" < $*.py > $*.py.new ;\ mv $*.py.new $*.py; \ fi;\ done install-data-hook: if test -n "$(PYTHON_SOABI)" ; then \ mv $(DESTDIR)$(pyexecdir)/_arc.so \ $(DESTDIR)$(pyexecdir)/_arc.$(PYTHON_SOABI).so ; \ fi .NOTPARALLEL: %.lo %.o # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/python/python/arc/PaxHeaders.7502/index.xml0000644000000000000000000000013113214316031022425 xustar000000000000000030 mtime=1513200665.180810849 30 atime=1513200665.180810849 29 ctime=1513200665.19481102 nordugrid-arc-5.4.2/python/python/arc/index.xml0000644000175000002070000000030613214316031022473 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/python/python/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612240743250022074 xustar000000000000000027 mtime=1384367784.271138 29 atime=1513200592.64192367 30 ctime=1513200665.153810519 nordugrid-arc-5.4.2/python/python/Makefile.am0000644000175000002070000000001612240743250022134 0ustar00mockbuildmock00000000000000SUBDIRS = arc nordugrid-arc-5.4.2/python/python/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720022101 xustar000000000000000030 mtime=1513200592.671924037 30 atime=1513200652.763658983 30 ctime=1513200665.154810531 nordugrid-arc-5.4.2/python/python/Makefile.in0000644000175000002070000005544613214315720022165 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = python/python DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = arc all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign python/python/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign python/python/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/python/PaxHeaders.7502/altpython0000644000000000000000000000013213214316031020454 xustar000000000000000030 mtime=1513200665.215811277 30 atime=1513200668.723854182 30 ctime=1513200665.215811277 nordugrid-arc-5.4.2/python/altpython/0000755000175000002070000000000013214316031020577 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/altpython/PaxHeaders.7502/arc0000644000000000000000000000013213214316031021221 xustar000000000000000030 mtime=1513200665.251811718 30 atime=1513200668.723854182 30 ctime=1513200665.251811718 nordugrid-arc-5.4.2/python/altpython/arc/0000755000175000002070000000000013214316031021344 5ustar00mockbuildmock00000000000000nordugrid-arc-5.4.2/python/altpython/arc/PaxHeaders.7502/Makefile.am0000644000000000000000000000012712240743250023343 xustar000000000000000027 mtime=1384367784.271138 30 atime=1513200592.497921909 30 ctime=1513200665.249811693 nordugrid-arc-5.4.2/python/altpython/arc/Makefile.am0000644000175000002070000000076612240743250023416 0ustar00mockbuildmock00000000000000# Set PYTHON variables from the alternative python PYTHON = @ALTPYTHON@ PYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ PYTHON_EXEC_PREFIX = @ALTPYTHON_EXEC_PREFIX@ PYTHON_LIBS = @ALTPYTHON_LIBS@ PYTHON_PLATFORM = @ALTPYTHON_PLATFORM@ PYTHON_PREFIX = @ALTPYTHON_PREFIX@ PYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ PYTHON_SOABI = @ALTPYTHON_SOABI@ PYTHON_VERSION = @ALTPYTHON_VERSION@ if ALTPYTHON3 SWIG_PY3 = -py3 else SWIG_PY3 = endif include $(top_srcdir)/python/common.mk nordugrid-arc-5.4.2/python/altpython/arc/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720023347 xustar000000000000000030 mtime=1513200592.575922863 30 atime=1513200652.808659533 30 ctime=1513200665.250811705 nordugrid-arc-5.4.2/python/altpython/arc/Makefile.in0000644000175000002070000012670313214315720023426 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # Set PYTHON variables from the alternative python VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(top_srcdir)/python/common.mk subdir = python/altpython/arc ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pyexecdir)" "$(DESTDIR)$(pkgpythondir)" LTLIBRARIES = $(pyexec_LTLIBRARIES) am__DEPENDENCIES_1 = _arc_la_DEPENDENCIES = $(ARCLIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am__objects_1 = _arc_la-swigmodulesinit_wrap.lo _arc_la-arc_init.lo am__objects_2 = _arc_la-common_wrap.lo _arc_la-loader_wrap.lo \ _arc_la-message_wrap.lo _arc_la-communication_wrap.lo \ _arc_la-compute_wrap.lo _arc_la-credential_wrap.lo \ _arc_la-data_wrap.lo _arc_la-delegation_wrap.lo \ _arc_la-security_wrap.lo am__objects_3 = $(am__objects_2) nodist__arc_la_OBJECTS = $(am__objects_1) $(am__objects_3) _arc_la_OBJECTS = $(nodist__arc_la_OBJECTS) _arc_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(_arc_la_CXXFLAGS) $(CXXFLAGS) \ $(_arc_la_LDFLAGS) $(LDFLAGS) -o $@ @PYTHON_SWIG_ENABLED_TRUE@am__arc_la_rpath = -rpath $(pyexecdir) DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) CXXLD = $(CXX) CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(nodist__arc_la_SOURCES) DIST_SOURCES = py_compile = $(top_srcdir)/py-compile ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @ALTPYTHON@ PYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ PYTHON_LIBS = @ALTPYTHON_LIBS@ PYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ PYTHON_SOABI = @ALTPYTHON_SOABI@ PYTHON_VERSION = @ALTPYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ PYTHON_EXEC_PREFIX = @ALTPYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @ALTPYTHON_PLATFORM@ PYTHON_PREFIX = @ALTPYTHON_PREFIX@ @ALTPYTHON3_FALSE@SWIG_PY3 = @ALTPYTHON3_TRUE@SWIG_PY3 = -py3 pkgpythondir = $(PYTHON_SITE_ARCH)/arc pyexecdir = $(PYTHON_SITE_ARCH) ARCSWIGLIBS = common loader message communication compute credential data delegation security ARCPYLIBS = $(ARCSWIGLIBS:=.py) ARCWRAPPERS = $(ARCSWIGLIBS:=_wrap.cpp) ARCWRAPHDRS = $(ARCSWIGLIBS:=_wrap.h) ARCWRAPDEPS = $(foreach module, $(ARCSWIGLIBS), ./$(DEPDIR)/$(module)_wrap.deps) ARCSWIGINIT = swigmodulesinit_wrap.cpp arc_init.cpp BUILT_SOURCES = $(ARCSWIGINIT) __init__.py @PYTHON_SWIG_ENABLED_TRUE@nodist_pkgpython_PYTHON = __init__.py $(ARCPYLIBS) @PYTHON_SWIG_ENABLED_TRUE@pyexec_LTLIBRARIES = _arc.la @WIN32_TRUE@AM_CPPFLAGS = -DWIN32 -DWINNT @PYDOXYGEN_FALSE@PYDOXFLAGS = @PYDOXYGEN_TRUE@PYDOXFLAGS = -DPYDOXYGEN @PYDOXYGEN_FALSE@PYDOXFILE = @PYDOXYGEN_TRUE@PYDOXFILE = pydoxygen.i EXTRA_DIST = index.xml MAINTAINERCLEANFILES = index.xml @DBJSTORE_ENABLED_FALSE@SWIG_IS_DBJSTORE_ENABLED = @DBJSTORE_ENABLED_TRUE@SWIG_IS_DBJSTORE_ENABLED = -DDBJSTORE_ENABLED ARCLIBS = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nodist__arc_la_SOURCES = $(ARCSWIGINIT) $(ARCWRAPPERS) _arc_la_CXXFLAGS = -include $(top_builddir)/config.h -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(PYTHON_CFLAGS) $(ZLIB_CFLAGS) $(DBCXX_CPPFLAGS) \ -fno-strict-aliasing -DSWIG_COBJECT_TYPES $(AM_CXXFLAGS) _arc_la_LIBADD = \ $(ARCLIBS) $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(PYTHON_LIBS) $(ZLIB_LIBS) $(DBCXX_LIBS) _arc_la_LDFLAGS = -no-undefined -avoid-version -module CLEANFILES = $(ARCWRAPPERS) $(ARCWRAPHDRS) $(ARCPYLIBS) $(BUILT_SOURCES) pydoxygen.i $(ARCPYLIBS:.py=.pyc) all: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(top_srcdir)/python/common.mk $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign python/altpython/arc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign python/altpython/arc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pyexecLTLIBRARIES: $(pyexec_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pyexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pyexecdir)" @list='$(pyexec_LTLIBRARIES)'; test -n "$(pyexecdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pyexecdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pyexecdir)"; \ } uninstall-pyexecLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pyexec_LTLIBRARIES)'; test -n "$(pyexecdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pyexecdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pyexecdir)/$$f"; \ done clean-pyexecLTLIBRARIES: -test -z "$(pyexec_LTLIBRARIES)" || rm -f $(pyexec_LTLIBRARIES) @list='$(pyexec_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done _arc.la: $(_arc_la_OBJECTS) $(_arc_la_DEPENDENCIES) $(_arc_la_LINK) $(am__arc_la_rpath) $(_arc_la_OBJECTS) $(_arc_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-arc_init.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-common_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-communication_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-compute_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-credential_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-data_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-delegation_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-loader_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-message_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-security_wrap.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_arc_la-swigmodulesinit_wrap.Plo@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< _arc_la-swigmodulesinit_wrap.lo: swigmodulesinit_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-swigmodulesinit_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-swigmodulesinit_wrap.Tpo -c -o _arc_la-swigmodulesinit_wrap.lo `test -f 'swigmodulesinit_wrap.cpp' || echo '$(srcdir)/'`swigmodulesinit_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-swigmodulesinit_wrap.Tpo $(DEPDIR)/_arc_la-swigmodulesinit_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='swigmodulesinit_wrap.cpp' object='_arc_la-swigmodulesinit_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-swigmodulesinit_wrap.lo `test -f 'swigmodulesinit_wrap.cpp' || echo '$(srcdir)/'`swigmodulesinit_wrap.cpp _arc_la-arc_init.lo: arc_init.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-arc_init.lo -MD -MP -MF $(DEPDIR)/_arc_la-arc_init.Tpo -c -o _arc_la-arc_init.lo `test -f 'arc_init.cpp' || echo '$(srcdir)/'`arc_init.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-arc_init.Tpo $(DEPDIR)/_arc_la-arc_init.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='arc_init.cpp' object='_arc_la-arc_init.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-arc_init.lo `test -f 'arc_init.cpp' || echo '$(srcdir)/'`arc_init.cpp _arc_la-common_wrap.lo: common_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-common_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-common_wrap.Tpo -c -o _arc_la-common_wrap.lo `test -f 'common_wrap.cpp' || echo '$(srcdir)/'`common_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-common_wrap.Tpo $(DEPDIR)/_arc_la-common_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='common_wrap.cpp' object='_arc_la-common_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-common_wrap.lo `test -f 'common_wrap.cpp' || echo '$(srcdir)/'`common_wrap.cpp _arc_la-loader_wrap.lo: loader_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-loader_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-loader_wrap.Tpo -c -o _arc_la-loader_wrap.lo `test -f 'loader_wrap.cpp' || echo '$(srcdir)/'`loader_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-loader_wrap.Tpo $(DEPDIR)/_arc_la-loader_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='loader_wrap.cpp' object='_arc_la-loader_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-loader_wrap.lo `test -f 'loader_wrap.cpp' || echo '$(srcdir)/'`loader_wrap.cpp _arc_la-message_wrap.lo: message_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-message_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-message_wrap.Tpo -c -o _arc_la-message_wrap.lo `test -f 'message_wrap.cpp' || echo '$(srcdir)/'`message_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-message_wrap.Tpo $(DEPDIR)/_arc_la-message_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='message_wrap.cpp' object='_arc_la-message_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-message_wrap.lo `test -f 'message_wrap.cpp' || echo '$(srcdir)/'`message_wrap.cpp _arc_la-communication_wrap.lo: communication_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-communication_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-communication_wrap.Tpo -c -o _arc_la-communication_wrap.lo `test -f 'communication_wrap.cpp' || echo '$(srcdir)/'`communication_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-communication_wrap.Tpo $(DEPDIR)/_arc_la-communication_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='communication_wrap.cpp' object='_arc_la-communication_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-communication_wrap.lo `test -f 'communication_wrap.cpp' || echo '$(srcdir)/'`communication_wrap.cpp _arc_la-compute_wrap.lo: compute_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-compute_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-compute_wrap.Tpo -c -o _arc_la-compute_wrap.lo `test -f 'compute_wrap.cpp' || echo '$(srcdir)/'`compute_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-compute_wrap.Tpo $(DEPDIR)/_arc_la-compute_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='compute_wrap.cpp' object='_arc_la-compute_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-compute_wrap.lo `test -f 'compute_wrap.cpp' || echo '$(srcdir)/'`compute_wrap.cpp _arc_la-credential_wrap.lo: credential_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-credential_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-credential_wrap.Tpo -c -o _arc_la-credential_wrap.lo `test -f 'credential_wrap.cpp' || echo '$(srcdir)/'`credential_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-credential_wrap.Tpo $(DEPDIR)/_arc_la-credential_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='credential_wrap.cpp' object='_arc_la-credential_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-credential_wrap.lo `test -f 'credential_wrap.cpp' || echo '$(srcdir)/'`credential_wrap.cpp _arc_la-data_wrap.lo: data_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-data_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-data_wrap.Tpo -c -o _arc_la-data_wrap.lo `test -f 'data_wrap.cpp' || echo '$(srcdir)/'`data_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-data_wrap.Tpo $(DEPDIR)/_arc_la-data_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='data_wrap.cpp' object='_arc_la-data_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-data_wrap.lo `test -f 'data_wrap.cpp' || echo '$(srcdir)/'`data_wrap.cpp _arc_la-delegation_wrap.lo: delegation_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-delegation_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-delegation_wrap.Tpo -c -o _arc_la-delegation_wrap.lo `test -f 'delegation_wrap.cpp' || echo '$(srcdir)/'`delegation_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-delegation_wrap.Tpo $(DEPDIR)/_arc_la-delegation_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='delegation_wrap.cpp' object='_arc_la-delegation_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-delegation_wrap.lo `test -f 'delegation_wrap.cpp' || echo '$(srcdir)/'`delegation_wrap.cpp _arc_la-security_wrap.lo: security_wrap.cpp @am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -MT _arc_la-security_wrap.lo -MD -MP -MF $(DEPDIR)/_arc_la-security_wrap.Tpo -c -o _arc_la-security_wrap.lo `test -f 'security_wrap.cpp' || echo '$(srcdir)/'`security_wrap.cpp @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/_arc_la-security_wrap.Tpo $(DEPDIR)/_arc_la-security_wrap.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='security_wrap.cpp' object='_arc_la-security_wrap.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_arc_la_CXXFLAGS) $(CXXFLAGS) -c -o _arc_la-security_wrap.lo `test -f 'security_wrap.cpp' || echo '$(srcdir)/'`security_wrap.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-nodist_pkgpythonPYTHON: $(nodist_pkgpython_PYTHON) @$(NORMAL_INSTALL) test -z "$(pkgpythondir)" || $(MKDIR_P) "$(DESTDIR)$(pkgpythondir)" @list='$(nodist_pkgpython_PYTHON)'; dlist=; list2=; test -n "$(pkgpythondir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgpythondir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgpythondir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ if test -z "$(DESTDIR)"; then \ PYTHON=$(PYTHON) $(py_compile) --basedir "$(pkgpythondir)" $$dlist; \ else \ PYTHON=$(PYTHON) $(py_compile) --destdir "$(DESTDIR)" --basedir "$(pkgpythondir)" $$dlist; \ fi; \ else :; fi uninstall-nodist_pkgpythonPYTHON: @$(NORMAL_UNINSTALL) @list='$(nodist_pkgpython_PYTHON)'; test -n "$(pkgpythondir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ filesc=`echo "$$files" | sed 's|$$|c|'`; \ fileso=`echo "$$files" | sed 's|$$|o|'`; \ echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$files || exit $$?; \ echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$filesc ")"; \ cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$filesc || exit $$?; \ echo " ( cd '$(DESTDIR)$(pkgpythondir)' && rm -f" $$fileso ")"; \ cd "$(DESTDIR)$(pkgpythondir)" && rm -f $$fileso ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pyexecdir)" "$(DESTDIR)$(pkgpythondir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool clean-local \ clean-pyexecLTLIBRARIES mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-nodist_pkgpythonPYTHON @$(NORMAL_INSTALL) $(MAKE) $(AM_MAKEFLAGS) install-data-hook install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pyexecLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-nodist_pkgpythonPYTHON \ uninstall-pyexecLTLIBRARIES .MAKE: all check install install-am install-data-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-pyexecLTLIBRARIES ctags \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-data-hook install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-nodist_pkgpythonPYTHON install-pdf install-pdf-am \ install-ps install-ps-am install-pyexecLTLIBRARIES \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags uninstall uninstall-am \ uninstall-nodist_pkgpythonPYTHON uninstall-pyexecLTLIBRARIES $(ARCSWIGINIT) __init__.py: %: $(top_srcdir)/python/% cp $< $@ # Only remake index.xml if it is unavailable. # It really depends on Doxyfile.api, but this file changes whenever # configure is run, and we do not want to remake index.xml all # the time since doxygen is currently not required to be present. #index.xml: $(top_srcdir)/python/Doxyfile.api index.xml: $(DOXYGEN) $(top_builddir)/python/Doxyfile.api cp -p api/xml/index.xml $@ rm -rf api pydoxygen.i: index.xml $(PYTHON) $(top_srcdir)/python/doxy2swig.py $< $@ clean-local: -rm -rf __pycache__ @AMDEP_TRUE@-include $(ARCWRAPDEPS) $(ARCPYLIBS): %.py: %_wrap.cpp $(ARCWRAPPERS): %_wrap.cpp: $(top_srcdir)/swig/%.i $(top_srcdir)/swig/Arc.i $(PYDOXFILE) mkdir -p $(DEPDIR) grep -h '^#' $< $(top_srcdir)/swig/Arc.i $(PYDOXFILE) | \ $(CXXCOMPILE) $(_arc_la_CXXFLAGS) -M -MT $*_wrap.cpp -MT arc_$*.py -MP -MF "$(DEPDIR)/$*_wrap.deps" -x c++ - $(SWIG) -v -c++ -python $(SWIG_PY3) -threads -o $*_wrap.cpp \ -I/usr/include -I$(top_srcdir)/include \ $(PYDOXFLAGS) $(SWIG_IS_DBJSTORE_ENABLED) \ $(AM_CPPFLAGS) $(OPENSSL_CFLAGS) $(top_srcdir)/swig/$*.i # Workaround for RHEL5 swig + EPEL5 python26 sed 's/\(^\s*char \*.*\) = \(.*ml_doc\)/\1 = (char *)\2/' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Ditto - for 64 bit sed 's/^\(\s*char \*cstr;\) int len;/#if PY_VERSION_HEX < 0x02050000 \&\& !defined(PY_SSIZE_T_MIN)\n&\n#else\n\1 Py_ssize_t len;\n#endif/' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Dont allow threading when deleting SwigPyIterator objects sed '/*_wrap_delete_@SWIG_PYTHON_NAMING@Iterator/,/SWIG_PYTHON_THREAD_END/ s/.*SWIG_PYTHON_THREAD_[A-Z]*_ALLOW.*//' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # Dont allow threading when handling SWIG Python iterators (see bug # 2683). Fixed in SWIG version 2. if test "x@SWIG2@" != "xyes"; then \ sed '/*_wrap_@SWIG_PYTHON_NAMING@Iterator_/,/SWIG_PYTHON_THREAD_END/ s/.*SWIG_PYTHON_THREAD_[A-Z]*_ALLOW.*//' $*_wrap.cpp > $*_wrap.cpp.new; \ mv $*_wrap.cpp.new $*_wrap.cpp; \ fi # When mapping a template with a template class no space is inserted # between the two right angle brackets. sed 's/>>(new/> >(new/g' $*_wrap.cpp > $*_wrap.cpp.new mv $*_wrap.cpp.new $*_wrap.cpp # When mapping a template with another template class as argument, and # that template class takes two classes as argument, then older swigs # put parentheses around the two class arguments, e.g. T<(A,B)>, not # valid syntax should be T instead. sed 's/<(\([,:[:alnum:]]*\))>/<\1>/g' $*_wrap.cpp > $*_wrap.cpp.tmp mv $*_wrap.cpp.tmp $*_wrap.cpp # Fix python3 relative import problem sed "s/import _$*/from arc &/" < $*.py > $*.py.new mv $*.py.new $*.py # In swig 2.0.12 the erase method of the SharedBenchmarkMap type (swig template) uses a wrong name for the wrapped std::map::erase method. sed 's/std_map_Sl_std_string_Sc_double_Sc_std_less_Sl_std_string_Sg__Sc_std_allocator_Sl_std_pair_Sl_std_string_SS_const_Sc_double_Sg__Sg__Sg/std_map_Sl_std_string_Sc_double_Sg/g' $*_wrap.cpp > $*_wrap.cpp.tmp mv $*_wrap.cpp.tmp $*_wrap.cpp # Fix python3 relative import problem if the module imports other submodules through %import(module= in the *.i files for i in $(ARCSWIGLIBS); do\ if grep -q "^import $$i" $*.py; then \ sed "s/import $$i/from arc &/" < $*.py > $*.py.new ;\ mv $*.py.new $*.py; \ fi;\ done install-data-hook: if test -n "$(PYTHON_SOABI)" ; then \ mv $(DESTDIR)$(pyexecdir)/_arc.so \ $(DESTDIR)$(pyexecdir)/_arc.$(PYTHON_SOABI).so ; \ fi .NOTPARALLEL: %.lo %.o # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/python/altpython/arc/PaxHeaders.7502/index.xml0000644000000000000000000000013213214316031023127 xustar000000000000000030 mtime=1513200665.238811558 30 atime=1513200665.238811558 30 ctime=1513200665.251811718 nordugrid-arc-5.4.2/python/altpython/arc/index.xml0000644000175000002070000000030613214316031023174 0ustar00mockbuildmock00000000000000 nordugrid-arc-5.4.2/python/altpython/PaxHeaders.7502/Makefile.am0000644000000000000000000000012612240743250022575 xustar000000000000000027 mtime=1384367784.271138 30 atime=1513200592.450921334 29 ctime=1513200665.21281124 nordugrid-arc-5.4.2/python/altpython/Makefile.am0000644000175000002070000000001612240743250022635 0ustar00mockbuildmock00000000000000SUBDIRS = arc nordugrid-arc-5.4.2/python/altpython/PaxHeaders.7502/Makefile.in0000644000000000000000000000013213214315720022602 xustar000000000000000030 mtime=1513200592.482921725 30 atime=1513200652.792659337 30 ctime=1513200665.213811253 nordugrid-arc-5.4.2/python/altpython/Makefile.in0000644000175000002070000005545713214315720022670 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = python/altpython DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = \ $(top_srcdir)/m4/ac_cxx_have_dbdeadlockexception.m4 \ $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/po.m4 \ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/VERSION \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_SOABI = @ALTPYTHON_SOABI@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCWS_CFLAGS = @ARCWS_CFLAGS@ ARCWS_LIBS = @ARCWS_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARGUS_CFLAGS = @ARGUS_CFLAGS@ ARGUS_LIBS = @ARGUS_LIBS@ AS = @AS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CANLXX_CFLAGS = @CANLXX_CFLAGS@ CANLXX_LIBS = @CANLXX_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DBCXX_CPPFLAGS = @DBCXX_CPPFLAGS@ DBCXX_LIBS = @DBCXX_LIBS@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ EXTRA_LIBS = @EXTRA_LIBS@ FGREP = @FGREP@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ GTHREAD_CFLAGS = @GTHREAD_CFLAGS@ GTHREAD_LIBS = @GTHREAD_LIBS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ JAR = @JAR@ JAR_JFLAGS = @JAR_JFLAGS@ JAVA = @JAVA@ JAVAC = @JAVAC@ JAVAC_FLAGS = @JAVAC_FLAGS@ JAVA_FLAGS = @JAVA_FLAGS@ JDK_CFLAGS = @JDK_CFLAGS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ MKINSTALLDIRS = @MKINSTALLDIRS@ MSGFMT = @MSGFMT@ MSGMERGE = @MSGMERGE@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_1_1_CFLAGS = @OPENSSL_1_1_CFLAGS@ OPENSSL_1_1_LIBS = @OPENSSL_1_1_LIBS@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_SOABI = @PYTHON_SOABI@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ REGEX_LIBS = @REGEX_LIBS@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SOCKET_LIBS = @SOCKET_LIBS@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ TEST_DIR = @TEST_DIR@ TRIAL = @TRIAL@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ WINDRES = @WINDRES@ XGETTEXT = @XGETTEXT@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ jnidir = @jnidir@ jninativedir = @jninativedir@ ldap_monitor_prefix = @ldap_monitor_prefix@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ nodename = @nodename@ oldincludedir = @oldincludedir@ pbs_bin_path = @pbs_bin_path@ pbs_log_path = @pbs_log_path@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ws_monitor_prefix = @ws_monitor_prefix@ SUBDIRS = arc all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign python/altpython/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign python/altpython/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-5.4.2/PaxHeaders.7502/install-sh0000644000000000000000000000013113214315717017200 xustar000000000000000030 mtime=1513200591.899914595 30 atime=1513200591.877914326 29 ctime=1513200658.61773058 nordugrid-arc-5.4.2/install-sh0000755000175000002070000003253713214315717017264 0ustar00mockbuildmock00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2009-04-28.21; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. nl=' ' IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit=${DOITPROG-} if test -z "$doit"; then doit_exec=exec else doit_exec=$doit fi # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_glob='?' initialize_posix_glob=' test "$posix_glob" != "?" || { if (set -f) 2>/dev/null; then posix_glob= else posix_glob=: fi } ' posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false no_target_directory= usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) dst_arg=$2 shift;; -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call `install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then trap '(exit $?); exit' 1 2 13 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names starting with `-'. case $src in -*) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # Protect names starting with `-'. case $dst in -*) dst=./$dst;; esac # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else # Prefer dirname, but fall back on a substitute if dirname fails. dstdir=` (dirname "$dst") 2>/dev/null || expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$dst" : 'X\(//\)[^/]' \| \ X"$dst" : 'X\(//\)$' \| \ X"$dst" : 'X\(/\)' \| . 2>/dev/null || echo X"$dst" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q' ` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writeable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; -*) prefix='./';; *) prefix='';; esac eval "$initialize_posix_glob" oIFS=$IFS IFS=/ $posix_glob set -f set fnord $dstdir shift $posix_glob set +f IFS=$oIFS prefixes= for d do test -z "$d" && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && eval "$initialize_posix_glob" && $posix_glob set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && $posix_glob set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: nordugrid-arc-5.4.2/PaxHeaders.7502/ChangeLog0000644000000000000000000000012413213474355016756 xustar000000000000000027 mtime=1512995053.469043 27 atime=1513200577.119733 30 ctime=1513200658.612730519 nordugrid-arc-5.4.2/ChangeLog0000644000175000002070000011006613213474355017027 0ustar00mockbuildmock000000000000002017-12-08 Christian Ulrik Søttrup * Turned on multicore single node scratch by default Revs: {33327} Type: bugfix 2017-11-21 Florido Paganelli * Second patch for negative numbers in GLUE2 VO ComputingShares (bug 3686) Revs: {33316} Type: bugfix 2017-09-13 Florido Paganelli * Patch for negative numbers in GLUE2 VO ComputingShares Revs: {33017} Type: bugfix 2017-05-26 Aleksandr Konstantinov * Fix for multiple file transfers cross-affecting transferred sizes Revs: {32506} Type: bugfix 2017-05-26 Aleksandr Konstantinov * File handles leaks and improper usage fixes Revs: {32468,32469,3247132472,32473,32474,32507} Type: bugfix 2017-05-26 Aleksandr Konstantinov * Loadable modules loading speed-up and possible botleneck removal Revs: {32455,32578} Type: bugfix 2017-05-26 Aleksandr Konstantinov * Replacing unreliable Glib sprocess spawning with pure fork Revs: {32410,32441} Type: bugfix 2017-05-26 Aleksandr Konstantinov * SQLite delegation storage speed-up Revs: {32400,32401} Type: bugfix 2017-05-26 Aleksandr Konstantinov * Numerous memory leaks and corruptions fixed. Revs: {32357,32358,32359,32360,32363,32364,32365,32366,32367,32368,32370,32372,32372,32373,32374,32395,32469,32462} Type: bugfix 2017-05-12 Andrii Salnikov * ARCHERY client implementation Type: new feature 2017-03-28 Maiken Pedersen * Arex-ganglia integration is now working. New histograms have also been included. Feature is configurable using enable_ganglia="yes" and ganglialocation="" (usually /usr/bin). Revs:{31856,31859,31870,31875,31876,31877,31880} also linked with Aleksandr Konstantinovs commits Revs:{31864,31879,31899} 2017-03-03 Aleksandr Konstantinov * Ganglia support for job state changes integrated directly into A-Rex. Revs: {31864,31879,31899} Type: enhancement 2017-01-28 Aleksandr Konstantinov * Handle unexpected callback in GridFTP related code. Improves stability of GridFTP DMC. Revs: {31889, 31890, 31894, 31895} Type: bugfix 2017-01-25 Aleksandr Konstantinov * Introduced support for sqlite as delegation database. To replace unstable BDB. Revs: {31777,31779,31784} Type: bugfix 2017-02-24 Florido Paganelli * Removal of heartbeat between A-REX and infoproviders. (bugs 2036,3573) A consequence of these fixes is that A-REX should be more stable, but infoproviders might take more time. In particular: - The default infoproviders timeout is changed to 3 hours - The default bdii timeout is changed to 6 hours Revs:{31857,31858,31871,31873} DocRevs:{31872,31874} Type:bug * Removal of leftover code from old infoproviders, namely support for gridftp storage. (bug 3286) ARC can no longer publish gridftp Storage Element information. Revs:{31866,31867,31868,31869} Type: bug 2017-02-24 David Cameron * Add new option to allow all data transfer log messages to be collected in a central log (bug 2598) Revs:{31885} Type:minor 2016-12-15 Aleksandr Konstantinov <@> * Fixed bug 3599 Revs:{31454} Type:bug 2016-12-13 Aleksandr Konstantinov <@> * Fixed bug 3622 Revs:{31651} Type:bug * Fixed bug reported on mailing list Revs:{31653} Type:bug 2016-12-13 Florido Paganelli * Reduced frequency of infoproviders performance data collection to half an hour. This solves bug #3616 . Users are encouraged to clean up the folder where the NYTProf performance collection is stored, called perl_nytprof. Default location is /var/log/arc/perfdata/perl_nytprof It is safe to run rm -f /var/log/arc/perfdata/perl_nytprof/* while A-REX is running. Revs:{31600,31649} Type:bug 2016-12-09 Christian Ulrik Søttrup * Added support for lowercase slurm in arc.conf to match other LRMS Revs:{31580} Type:minor 2016-12-09 Christian Ulrik Søttrup * Fixed bug 3603 Revs:{31566,31567} Type:bug * Fixed problem in condor info backend reported by Frederic Schaer Revs:{31569} Type:bug 2016-12-08 Florido Paganelli * Fixed issues with paths and SLURM support in PerfData.pl Revs:{31557,31558,31576} Type:bug * Data collection in perferator is now done using PerfData.pl instead of nytprofmerge (attempts to ease issues in bug #3616) Revs:{31578,31579} Type:minor 2016-12-02 Martin Skou Andersen * Support output redirection from shell in arccat (fixes bug 3613) [bugfix;{31582}] 2016-11-30 Aleksandr Konstantinov <@> * Fixed bug 3538 Revs:{31577} Type:bug 2016-11-24 Aleksandr Konstantinov <@> * Fixed bug Revs:{31568} Type:bug 2016-11-03 Aleksandr Konstantinov <@> * Fixed bug 3596 Revs:{31516} Type:bug 2016-10-31 Andrii Salnikov * [ARC 6] New RunTimeEnvironemnt handling approach without sharing a scripts directory * Introducing system-defined RTE location in /usr/share/arc/rte/ in addition to user-defined in arc.conf * ENV/PROXY and ENV/RTE scripts to be packaged 2016-10-29 Aleksandr Konstantinov <@> * Partially fixed bug 3557 Revs:{31512} Type:bug 2016-10-24 Aleksandr Konstantinov <@> * Fixed bug 3610 Revs:{31473} Type:bug 2016-10-20 Aleksandr Konstantinov <@> * Fixed bug 3604, 3586, 3579 Revs:{31460, 31461, 31462} Type:bug 2016-10-03 Florido Paganelli * Added PerfData.pl, an offline script to generate ARC performance data collected from a NYTProf database file generated by infoproviders. {31117,31347} 2016-08-25 David Cameron * Added new configuration options hostname and port in the acix/cacheserver block 2016-08-09 Aleksandr Konstantinov <@> * Introduced tool for converting delegation database Revs:{31122} Type:user friendliness 2016-07-07 Florido Paganelli * Added support for VO in the GLUE2 schema. Every authorizedvo will generate a different ComputingShare with statistics about the defined VO. For minimum backward compatibility there is a Share without any VO that presents information the same way ARC did before this change, but doesn't list all the VOs as in the previous version. This breaks backward compatibility with EMIES clients. {29933,30067,30077,30078,30288,30582,30583,31498} 2016-07-05 Aleksandr Konstantinov <@> * Support for faking VOMS assignment of user on cluster size Revs:{31020} Type:flexibility enhancement 2016-07-04 David Cameron * Added support for using Rucio as a proxy to read/write to objectstores 2016-07-04 David Cameron * Added new URL option "relativeURI" to force HTTP operations to use relative paths 2016-06-13 David Cameron * Changed the third parameter of cacheaccess to allow regular expressions 2016-05-24 Aleksandr Konstantinov <@> * Fixed bug 3561 Revs:{30285} Type:bug 2016-05-22 Aleksandr Konstantinov <@> * Fixed bug 3560, 3558 Revs:{30266} Type:bug 2016-04-19 David Cameron * Added new URL option "failureallowed" - if set then a failed transfer does not fail the job 2016-03-30 David Cameron * Changed behaviour of gridftp jobplugin so it does not restart failed jobs automatically when proxy is renewed. 2016-03-18 David Cameron * Add new options for performance reporting: enable_perflog_reporting and perflogdir 2016-02-29 Aleksandr Konstantinov <@> * Extended authorization to WS interfaces Revs:{30064} Type:bug 2016-01-29 Aleksandr Konstantinov <@> * Fixed conflict of internal code with VOMS libraries Revs:{29976} Type:bug 2016-01-19 Andrii Salnikov * Nordugridmap code changes to keep log records format the same across all ARC components * Review of log messages text in nordugridmap * Logging to file now enabled by default in nordugridmap 2015-11-17 Gabor Szigeti * Add file trasnfer statistics information to SGAS accounting record. (fixes bug #3519) 2015-09-10 Anders Waananen * Remove uploader and downloader binaries 2015-06-18 Christian Ulrik Søttrup * Bugfix: fixed file permission filter in submit-condor-job 2015-06-18 Gabor Szigeti * Add accounting records re-report functionality by JURA. (fixes bug #3458) 2015-06-10 Martin Skou Andersen * [JobSupervisor] Add method for selecting jobs using custom selector class (fixes bug 3484). * [Python examples] Better checking of python examples. 2015-06-04 Andrii Salnikov * Added options to allow to set ownership/permissions on generated gridmapfile 2015-06-02 Gabor Szigeti * Extend (with timestamp) JURA's log to long format. (fixes bug #3480) 2015-04-28 Christian Ulrik Søttrup * Changed priority calculation for SGE jobs to only negative priorities. * fixed comment typos. 2015-03-28 Gabor Roczei * Bugfix: adding correct free slot number to SLURM partition This can be usefuly in a multicore system where the jobs can use more than one core 2015-03-23 Christian Ulrik Søttrup * Changed scan-ll-job to use Luca's method. Should solve 3395 for Rod and improve performance. * Added Luca to AUTHORS 2015-03-19 Christian Ulrik Søttrup * Fixed a possible double output of nodename to diag in PBS and SLURM * Added a baseline nodecount to diag file 2015-03-18 Christian Ulrik Søttrup * Added a infosystem support for new format of exec_host in PBS backend 2015-03-10 Christian Ulrik Søttrup * Fixed wrong debug output in Condor.pm * Added option to use sacct in slurm backend 2015-01-26 Christian Ulrik Søttrup * Added a defined check to LL.pm for hosts and walltime to get rid of undefined warnings when running perl with strict. 2015-01-22 Christian Ulrik Søttrup * Added a check for the availability of qacct to the sge backend, * Changed RegExp to work for several version of llstatus in LL.pm 2015-01-22 David Cameron * Add new arc.conf option cachespacetool to allow a user-specified tool for the cache cleaner to find file system space information. 2015-01-20 Christian Ulrik Søttrup * change submit_common.sh to use compat config parser. * added warning to backend log, when requesting less than 0 cpu/wall time 2015-01-16 Christian Ulrik Søttrup * Added support for job priority in backends: Condor,LL,LSF,PBS,SGE,SLURM 2015-01-15 Christian Ulrik Søttrup * Added support for recognizing suspended jobs in newer condor versions * Added support for per job history file in condor backend 2015-01-14 David Cameron * Add new arc.conf option cacheshared to enable better management of caches on filesystems shared with other data. 2015-01-12 Gabor Szigeti * Accounting message publisher update to SSM 2.1.5. 2014-12-19 Christian Ulrik Søttrup * removed bashisms in condor and boinc backends. 2014-12-16 Christian Ulrik Søttrup * Changed "ExecutionUnits" default in submit common to "Processors" to match accounting 2014-12-11 Anders Waananen * Remove gridsite dependency and therefore gridftpd gacl file support * Remove arcacl client utility 2014-12-11 David Cameron * Remove old data-staging related code. Move all data-staging options in arc.conf to [data-staging] section. 2014-12-05 Jon Kerr Nilsen * Remove confusa (moved to obsolete) * Move echo services to examples dir * Remove arcmigrate 2014-12-02 Anders Waananen * Removed LFC and DQ2 DMCs 2014-11-25 Gabor Roczei * Adding ComputingShare->WaitingJobs GLUE2 attribute to ARC1 target retriever, because FastestQueueBroker require it 2014-11-17 Gabor Szigeti * Replace the mainnode to headnode URL in the recordID. (fixes bug #3435) 2014-10-20 Gabor Szigeti * Set extra VO Group attribute to every generated CAR accounting record if the proxy does not contains VOM attributes. (fixes bug #3404) 2014-09-02 Gabor Szigeti * Set extra VO attribute to every generated CAR accounting record. (fixes bug #3404) 2014-08-08 Gabor Szigeti * Accounting message publisher update to SSM 2.1.3. 2014-08-07 Gabor Roczei * Bug fixed: SGE BDII integration has problems with long hostnames (#3306) 2014-08-06 Gabor Szigeti * Add new configuration option that able to set the frequency of accouting message publishing. (fixing bug #3399) 2014-07-28 Gabor Roczei * Bug fixed: down nodes cause SGEmod to fail (#3303) 2014-05-09 David Cameron * Add writing/deleting capability to xrootd DMC 2014-05-09 Jon Kerr Nilsen * Add Rerun option as nordugrid extension to ADL - needed to enable arcresume for EMI-ES (fixes bug 3266). 2014-03-05 Martin Skou Andersen * Only parse second part of the inputfiles xRSL attribute value as file size and checksum for GM input (fixes bug 3334). Als simplify parsing of this attribute. 2014-03-04 Martin Skou Andersen * Fix invalid C++ code generated by swig 2.0.12. 2014-02-27 Martin Skou Andersen * arctest: Print job description, client version and name of computing service test job is submitted to (fixes bug 2947). 2014-02-26 Martin Skou Andersen * Print summary for arcstat (fixes bug 3145). * Improve parsing of the JSDL RangeValue_Type type (fixes bug 2352). 2014-02-26 David Cameron * Add Rucio DMC for access to new ATLAS data management system 2014-02-25 Martin Skou Andersen * Make it possible in arcstat to print jobs whose status information is not available (fixes bug 3240). Add information to man pages. Print status information in Job::SaveToStream method even if state is undefined. * Print warning when attempting to retrieve job with no output files, also do not count such jobs as retrieved in arcget summary (fixes bug 2946). * Quote job name when sending notification email (fixes bug 3339). Add some more logging information. 2014-02-25 Martin Skou Andersen * Only list specific state for job in long output (fixes bug 3178). 2014-02-21 Martin Skou Andersen * Make path to Berkeley DB absolute (fixes bug 3340). 2014-02-20 Anders Waananen * Add arc-blahp-logger 2014-02-20 Anders Waananen * Remove obsolete (not used) perl module LRMS.pm 2014-02-19 Martin Skou Andersen * Add verbose log message which shows the invoked command for job management CLI. * Add function which converts a string case-insensitively to a LogLevel object. * Treat specified log level to commands, in configuration etc. as case-insensitive. 2014-02-18 Martin Skou Andersen * Add default port and path arguments to URL constructor. * Use URL constructor with defaults for constructing URLs in ACCs (fixes issue with IPv6 addresses, bug 3331). 2014-02-17 Martin Skou Andersen * Add default port and path arguments to URL constructor. * Use URL constructor with defaults for constructing URLs in ACCs (fixes issue with IPv6 addresses, bug 3331). 2014-01-31 David Cameron * Add acix_endpoint option to arc.conf to allow A-REX to download cached replicas of input files from other sites 2014-01-22 David Cameron * Add ACIX DMC to allow discovery of locations of cached copies of data 2013-12-26 Anders Waananen * Package Boinc lrms 2013-12-26 Wenjing Wu * Add Boinc lrms 2013-12-06 David Cameron * Use 'anonymous' username by default for ftp listing (fixing bug #3312) 2013-11-27 Gabor Szigeti * Add VO filter to the jobreporter. (fixing bug #3302) 2013-11-19 Gabor Szigeti * Do not create new element in the aggregation record for every record. (fixing bug #3301) 2013-11-15 Anders Waananen * Remove the arc-ur-logger since jura is now a drop-in replacement 2013-11-13 Anders Waananen * Remove the old ngclient2arc utility 2013-11-13 Anders Waananen * Support unit tests for the alternative python * Support unit tests for python 3 * Reorganise build tree layout for python to make unit tests simpler * Add -py3 option to swig if using python3 * Move python shared library (_arc.*) to 2013-11-13 David Cameron * Remove option to use old downloader and uploader for data staging * Add ability to expose cache content through A-REX web interface 2013-11-12 Anders Waananen * Remove the HED cache client 2013-11-12 Anders Waananen * Remove the Chelonia DMC (libdmcarc.so) which handles the arc:// protocol 2013-11-11 Gabor Szigeti * Rename the RuntimeEnvironment element by the UR that send to SGAS. (fixing bug #3297) 2013-11-05 Gabor Szigeti * Add "JURA-ARC-" prefix for an Infrastructure's description in a CAR. Requested by APEL. * Extend the LocalJobId with the endtime in UTC Time format by the SGAS URs. (fixing bug #3288) 2013-11-05 Anders Waananen * Add support for ptyhon3 bindings. (fixing bug #3276) 2013-10-31 Gabor Szigeti * Remove the unnecessary "s" from the reported Processors value. (fixing bug #3289) 2013-10-30 Florido Paganelli * Added support for cluster_alias option in GLUE2, as ServiceName. * Default Validity of GLUE2 information is set to 10 minutes. * AdminDomain Name is now validated by 'service a-rex validate' to avoid infosystem to crash at startup * LDAP DNs containing characters that needs to be base64 encoded are now skipped and not published. This mostly affects NorduGrid schema nordugrid-authuser-name objects. 2013-10-18 Gabor Szigeti * Collecting processor count information from the backend (SGE, SLURM) into the accounting record. 2013-10-17 David Cameron * Add DQ2 DMC to allow download of ATLAS files using dataset and LFN. The URL convention is dq2://atlddmcat-reader.cern.ch/dataset/lfn 2013-10-08 Gabor Szigeti * Collecting information for the aggregation records during the accounting messsage sending procedure. * Send APEL sync record after every successfully sent CAR message. Partly fixes bug 3269. 2013-08-30 Martin Skou Andersen * Enable client job management tools to be able to use Berkeley database for storing information about jobs. 2013-08-21 Martin Skou Andersen * EMI-ES: Make use of bulk operations when retrieving list of jobs from service. Performance improvement to arcsync. On the way to fixing bug 3099. 2013-08-20 Martin Skou Andersen * arcsync: Pass timeout value specified on command-line to used UserConfig object. Previously arcsync ignored the specified timeout value. Fixes bug 3243 - a consequence of an unintentional omission in revision 12298. 2013-08-05 Martin Skou Andersen * EMI-ES: Make use of bulk operations when submitting jobs directly to a submission endpoint (i.e. using the '--direct' flag with arcsub). On the way to fixing bug 3099. 2013-07-17 Gabor Szigeti * Add new (use_ssl) configuration option that can be use during the communication with a production accounting server. 2013-06-26 Gabor Szigeti * Fixed ServiceLevel value and type in the accounting records. 2013-06-10 Gabor Szigeti * Option for setting benchmark parameters in the CAR from the arc.conf. 2013-06-05 Dmytro Karpenko * Support for job exclusive execution in Condor. 2013-05-10 Gabor Szigeti * Better VO group and role parsing mechanism by the CAR. 2013-05-02 Gabor Szigeti * Accounting message publisher update to SSM 2.1.0. 2013-04-23 David Cameron * Split log for A-REX WS-interface operations into separate file, specified by wslogfile in arc.conf. Default is /var/log/arc/ws-interface.log. 2013-04-22 Gabor Szigeti * Fixed wrong VO attribute that leading '/' character in the CAR. Required for the APEL. (fixes bug #3152) 2013-04-17 David Cameron * Add support for in-memory credentials for http and gsiftp-based protocols and use in data staging to avoid the need for creating temporary proxies. 2013-04-16 Martin Skou Andersen * EMI-ES: Make use of bulk queries (vector operations in EMI ES terminology) when retrieving job information. Partly fixes bug 3099. 2013-04-12 Gabor Roczei * SGE bugfix (#2714), the SGE backend will detect the MainMemorySize * SGE bugfix (#2716), correct handling of the Eqw SGE state 2013-03-19 Dmytro Karpenko * Condor configuration options in arc.conf have been revised. Condor users are strongly advised to check the updated Condor configuration manual http://www.nordugrid.org/documents/condor-config.html 2013-03-19 Gabor Szigeti * Configurable Site attribute from the arc.conf. Required for the APEL. (fixes bug #3126) 2013-02-06 Martin Skou Andersen * Adding support in library for storing job information in a Berkeley DB. 2013-01-31 Florido Paganelli, Mattias Ellert * Improved bdii-update automatical configuration. BDII configuration commands are no longer needed in the [infosys] configuration block. 2013-01-29 Dmytro Karpenko * SLURMmod.pm plugin with native ARC1 API is introduced. 2013-01-29 Anders Waananen * Make arc-ur-logger use ARC python api to retrieve information in proxy instead of relying on voms-proxy-info (fixes bug #2597). 2013-01-29 Gabor Szigeti * Use 2.0.0 version of SSM to the message sending. 2013-01-25 Anders Waananen * Rename perftest to arcperftest and add a man page. * Move some test and misc tools to a new sub package nordugrid-arc-misc-utils (fixes bug #3048): arcwsrf, arcemiestest, arcperftest, saml_assertion_init 2013-01-21 Gabor Roczei * Several exclusive execution related bug fixes 2013-01-22 Gabor Szigeti * Add VO attributes to the reported accounting record. (fixes bug #3003) * Use different filename for the different achiving record (UR or CAR). 2013-01-15 Martin Skou Andersen * Improved backwards compatibility when reading job list files (client side). Job records from 1.x and 2.x is supported, 1.x and 2.x does not support the format used in 3.x. Support for NOX job record format is dropped. As always 'arcsync' can be used to recover the job list. 2013-01-15 Florido Paganelli * Added infosys support for openldap2.4 on SLX distributions 2012-12-30 Martin Skou Andersen * Extending xRSL parser to parse "countpernode" and "exclusiveexecution" attributes used for expressing parallel setup requirements. 2012-12-20 Martin Skou Andersen * Extending arctest to print DNs of installed CA-certificates. 2012-12-19 Anders Waananen * Remove RLS support 2012-12-19 Martin Skou Andersen * [Java] List classes in ARC Java bindings now implements the Iterable Java interface. That makes it possible to use list objects in Java foreach statement. Addapting the listiterator classes in order to comply with the Java Iterator interface which those classes now implements. 2012-12-12 David Cameron * Add ability to run DataDelivery service without the need for a host certificate. See documentation for configuration details. 2012-12-12 Christian Søttrup * Removed GridFactory back-end * Added GRAMI support for EMI-ES parallel specification * Added parallel support in the LoadLeveler back-end * Fixed bug in DGBridge back-end 2012-12-12 Dmytro Karpenko * Added support for GLUE2 in SLURM infoprovider (fixes bug 2904) 2012-12-06 Martin Skou Andersen * Significant fixes and cleanup in the CREAM job plugin. * Added submitting convenience methods to Submitter class which only takes endpoints and job descriptions as arguments. * Extended arcsub to be able to submit directly to submission endpoints (no resource discovery and matchmaking). 2012-11-26 David Cameron * Change recursion options for arccp and arcls. -r specifies infinite recursion and -D is used for a specific level or recursion. * Change A-REX default data staging mechanism to DTR. The old system can still be used by setting enable_dtr="no" in arc.conf. * Change DataPoint API - Check() now takes a bool parameter to determine whether metadata information should be obtained during permission check. 2012-11-20 Aleksandr Konstantinov * Support for EMI ES interface version 1.16 implemented in A-REX service. 2012-11-17 Zsombor Nagy * Changing the main attributes of the Job class, also in the jobs.xml: remove the Cluster and the InterfaceName attributes, change the JobID from URL to string, add ServiceInformation, JobManagement and JobStatus URL and InterfaceName attributes, and StageIn, StageOut and SessionDir attributes. (Hopefully it can still understand the previous versions of the jobs.xml.) 2012-11-14 Gabor Szigeti * [ldap-monitor] Add new column to the Queue table by GLUE2 rendering. 2012-11-14 Florido Paganelli * Finalized GLUE2 rendering: * Removed ARIS service. All endpoints are now shown inside the A-REX org.nordugrid.arex ComputingService. This makes the GLUE2 rendering backward incompatible with previous versions of ARC. * Removed ComputingActivities from LDAP rendering. Can be enabled with infosys_glue2_ldap_showactivities new option. * Finalized EMI-ES rendering, one Endpoint per port-type including data capabilities. This obsoletes the experimental 12.05 EMI-ES GLUE2 rendering and therefore makes this change backward incompatible. * added infosys_glue2_service_qualitylevel option for the sysadmin to set manually GLUE2 QualityLevel, useful for operations within EGI. * Implemented a solution for client to match GLUE2 Service ID even if looking at data from nordugrid schema. * Implemented a solution for the client to know which interface the job was submitted both in GLUE2 and Nordugrid schema. * Introduced a relay backend in ldap configuration for interoperability with GLUE2 top-bdii. Now an ARC CE can directly be aggregated by a top-bdii if ARIS ldap URL is configured correctly in GOCDB. * Changed the top element of glue1 schema to match the needs of Glue1 top-bdii; now ARC CE can directly be aggregated by top-bdii if ARIS ldap URL is configured correctly in GOCDB * Added several fixes to Glue1 schema thanks from contributions by Finnish partners: localse, queue names. 2012-11-13 Aleksandr Konstantinov * EMI ES interface implemented in ARC client tools. See documentation and man pages for new interface names. 2012-11-10 Gabor Szigeti * Update the CAR accounting message version to 1.2. 2012-11-09 Aleksandr Konstantinov * Communication with ARGUS PDP is implemented and integerated into A-REX. See documentation for new configuration options. 2012-11-08 Gabor Szigeti * Set the non-standard compilation prefix by ssm_master location. 2012-11-08 Gabor Szigeti * Send CAR 1.1 accounting message to the APEL destination. See documentation for configuration options. 2012-11-08 David Cameron * Remove support for per-user control directories in A-REX. Add support for substitutions in configuration based on dynamically mapped local user. See documentation for changes in configuration options. 2012-11-07 Aleksandr Konstantinov * Administrator utility gm-jobs is extended to provide information about delegated tokens and filter jobs by identifier. See documentation and man pages for new options. 2012-11-03 Martin Skou Andersen * Moving ClientInterface classes to new libarccommunication library. Renaming libarcclient library to libarccompute. 2012-11-02 Martin Skou Andersen * [Python] Splitting python bindings into multiple modules (fixes bug 2934). 2012-10-31 Martin Skou Andersen * [Java] Transparently load native ARC JNI-C++ library when using ARC Java bindings. Thus making it redundant to call 'System.loadLibrary("jarc")'. 2012-10-31 Anders Waananen * Add NSS as a dependency. Currently used only by arcproxy 2012-10-29 Gabor Szigeti * Fixed the logger url by the APEL destination. (fixes bug 2971) 2012-10-19 Gabor Szigeti * Fixed the published MachineName attribute by JURA. (fixes bug 2959) 2012-10-18 Gabor Szigeti * Update the regexp for a new type of JobID. (fixes bug 2958) 2012-10-18 Gabor Szigeti * Enabled GLUE2 schema rendering together with the old nordugrid schema in the ldap-monitor. 2012-10-17 Anders Waananen * Obsolete the python utility module arcom 2012-10-09 David Cameron * Move xrootd DMC out of plugins-globus package and into a new package nordugrid-arc-plugins-xrootd 2012-10-08 Weizhong Qiang * Security plugin (Argus PDP Client) supports the contact with Argus PDP server to achieve access control decision. Argus library dependency is not required for this plugin. 2012-10-05 David Cameron * Add new package nordugrid-arc-plugins-gfal which contains the GFAL DMC and depends on gfal2 (RHEL/Fedora only) 2012-10-03 David Cameron * Rename ngconfigtest to arc-config-check, install to libexec instead of bin and package with A-REX instead of common package. Add new "validate" option to a-rex init script which calls arc-config-check. 2012-10-01 David Cameron * Change configuration option "newdatastaging" to "enable_dtr" 2012-09-26 Mattias Ellert * Rename libarcdata2 to libarcdata 2012-09-26 Gabor Szigeti * Parse the configured (optional) topic value from the server configuration. 2012-09-25 Gabor Szigeti * Send CAR records to the APEL. 2012-11-19 Aleksandr Konstantinov * HTTP implementation in data plugin enhanced to support redirection and chunked transfers for all functions. 2012-09-19 Gabor Szigeti * Use 1.2-2 version of SSM to the message sending. 2012-09-11 Aleksandr Konstantinov * Administrator utility gm-jobs is extended to cancel and clean jobs. See documentation and man pages for new options. 2012-09-08 Weizhong Qiang * arcproxy supports the detection of nss db from default location of filefox, seamonkey and thunderbird. 2012-09-06 David Cameron * Remove ability to set locale per LogDestination 2012-09-04 Weizhong Qiang * arcproxy supports proxy retrieval from myproxy server with VOMS AC included * arcproxy supports proxy generation with VOMS AC included, when using credential source from nss db 2012-09-04 Anders Waananen * Remove arcsrmping (fixes bug #2902) 2012-08-17 Martin Skou Andersen * Introducing new class ExecutionTargetSorter for sorting ExecutionTarget objects. It is to replace the ExecutionTargetSet class which isn't working as intended. 2012-08-14 Martin Skou Andersen * Submitter class extended with capability of job submission relying on resource discovery and brokering, previously part of arcsub * arcsub adapted to use rebased submission functionality 2012-08-07 David Cameron * Add ACIX (ARC Cache Index) to distribution (RHEL>=5, Fedora>=7, Debian>=4). See documentation for configuration options. 2012-08-02 Martin Skou Andersen * Adding possibility to format the CE specific job state string (fixes bug 2847) 2012-07-17 David Cameron * Implement 3rd party transfer in data library (new method DataPoint::Transfer3rdPaty()) and arccp (new option -3) using GFAL DMC 2012-07-13 David Cameron * Add errno-like field to DataStatus class 2012-07-03 Aleksandr Konstantinov * Increase performance of client file staging to/from HTTPS interface of A-REX service by turning off encryption. 2012-06-28 David Cameron * Improved formatting of arcls -l by aligning columns 2012-06-27 Aleksandr Konstantinov * Support for plugin priorities added. 2012-06-20 David Cameron * Add arcrename command and DataPoint::Rename() See documentation and man pages for functionality and possible options. 2012-06-08 David Cameron * Cache service can run in same arched as A-REX with shared DTR instance 2012-06-05 Aleksandr Konstantinov * Janitor component removed from source tree. 2012-06-05 Aleksandr Konstantinov * Client files stage in performance enhanced by decreasing number of reconnections during one submission cycle. 2012-06-01 Weizhong Qiang * arcproxy can support the contacting to voms server with RESTful interface. See documentation and man pages for options. 2012-06-01 Anders Wäänänen * Remove ISIS, Janitor, Chelonia and Hopi 2012-05-25 Aleksandr Konstantinov * Watchdog implemented for arched container and used in A-REX. See documentation for configuration options. 2012-04-23 Andrii Salnikov * nordugridmap updated to version 2.0. See documentation for configuration options. 2012-04-02 Aleksandr Konstantinov * Handle out-of-band data at TCP layer. 2012-03-30 Aleksandr Konstantinov * Communication with ARGUS PEP is implemented and integerated into A-REX. See documentation for configuration options.

    font_title."> $value